tcg/arm: use the blx instruction when possible
[qemu.git] / target-alpha / translate.c
blobd903800dc0a434dee247ce33f46819156d5b6bc9
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 uint64_t pc;
47 int mem_idx;
48 #if !defined (CONFIG_USER_ONLY)
49 int pal_mode;
50 #endif
51 CPUAlphaState *env;
52 uint32_t amask;
54 /* Current rounding mode for this TB. */
55 int tb_rm;
56 /* Current flush-to-zero setting for this TB. */
57 int tb_ftz;
60 /* global register indexes */
61 static TCGv_ptr cpu_env;
62 static TCGv cpu_ir[31];
63 static TCGv cpu_fir[31];
64 static TCGv cpu_pc;
65 static TCGv cpu_lock;
66 #ifdef CONFIG_USER_ONLY
67 static TCGv cpu_uniq;
68 #endif
70 /* register names */
71 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
73 #include "gen-icount.h"
75 static void alpha_translate_init(void)
77 int i;
78 char *p;
79 static int done_init = 0;
81 if (done_init)
82 return;
84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
86 p = cpu_reg_names;
87 for (i = 0; i < 31; i++) {
88 sprintf(p, "ir%d", i);
89 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
90 offsetof(CPUState, ir[i]), p);
91 p += (i < 10) ? 4 : 5;
93 sprintf(p, "fir%d", i);
94 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
95 offsetof(CPUState, fir[i]), p);
96 p += (i < 10) ? 5 : 6;
99 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
100 offsetof(CPUState, pc), "pc");
102 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
103 offsetof(CPUState, lock), "lock");
105 #ifdef CONFIG_USER_ONLY
106 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
107 offsetof(CPUState, unique), "uniq");
108 #endif
110 /* register helpers */
111 #define GEN_HELPER 2
112 #include "helper.h"
114 done_init = 1;
117 static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
119 TCGv_i32 tmp1, tmp2;
121 tcg_gen_movi_i64(cpu_pc, ctx->pc);
122 tmp1 = tcg_const_i32(exception);
123 tmp2 = tcg_const_i32(error_code);
124 gen_helper_excp(tmp1, tmp2);
125 tcg_temp_free_i32(tmp2);
126 tcg_temp_free_i32(tmp1);
129 static inline void gen_invalid(DisasContext *ctx)
131 gen_excp(ctx, EXCP_OPCDEC, 0);
134 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
136 TCGv tmp = tcg_temp_new();
137 TCGv_i32 tmp32 = tcg_temp_new_i32();
138 tcg_gen_qemu_ld32u(tmp, t1, flags);
139 tcg_gen_trunc_i64_i32(tmp32, tmp);
140 gen_helper_memory_to_f(t0, tmp32);
141 tcg_temp_free_i32(tmp32);
142 tcg_temp_free(tmp);
145 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
147 TCGv tmp = tcg_temp_new();
148 tcg_gen_qemu_ld64(tmp, t1, flags);
149 gen_helper_memory_to_g(t0, tmp);
150 tcg_temp_free(tmp);
153 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
155 TCGv tmp = tcg_temp_new();
156 TCGv_i32 tmp32 = tcg_temp_new_i32();
157 tcg_gen_qemu_ld32u(tmp, t1, flags);
158 tcg_gen_trunc_i64_i32(tmp32, tmp);
159 gen_helper_memory_to_s(t0, tmp32);
160 tcg_temp_free_i32(tmp32);
161 tcg_temp_free(tmp);
164 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
166 tcg_gen_mov_i64(cpu_lock, t1);
167 tcg_gen_qemu_ld32s(t0, t1, flags);
170 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
172 tcg_gen_mov_i64(cpu_lock, t1);
173 tcg_gen_qemu_ld64(t0, t1, flags);
176 static inline void gen_load_mem(DisasContext *ctx,
177 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
178 int flags),
179 int ra, int rb, int32_t disp16, int fp,
180 int clear)
182 TCGv addr;
184 if (unlikely(ra == 31))
185 return;
187 addr = tcg_temp_new();
188 if (rb != 31) {
189 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
190 if (clear)
191 tcg_gen_andi_i64(addr, addr, ~0x7);
192 } else {
193 if (clear)
194 disp16 &= ~0x7;
195 tcg_gen_movi_i64(addr, disp16);
197 if (fp)
198 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
199 else
200 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
201 tcg_temp_free(addr);
204 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
206 TCGv_i32 tmp32 = tcg_temp_new_i32();
207 TCGv tmp = tcg_temp_new();
208 gen_helper_f_to_memory(tmp32, t0);
209 tcg_gen_extu_i32_i64(tmp, tmp32);
210 tcg_gen_qemu_st32(tmp, t1, flags);
211 tcg_temp_free(tmp);
212 tcg_temp_free_i32(tmp32);
215 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
217 TCGv tmp = tcg_temp_new();
218 gen_helper_g_to_memory(tmp, t0);
219 tcg_gen_qemu_st64(tmp, t1, flags);
220 tcg_temp_free(tmp);
223 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
225 TCGv_i32 tmp32 = tcg_temp_new_i32();
226 TCGv tmp = tcg_temp_new();
227 gen_helper_s_to_memory(tmp32, t0);
228 tcg_gen_extu_i32_i64(tmp, tmp32);
229 tcg_gen_qemu_st32(tmp, t1, flags);
230 tcg_temp_free(tmp);
231 tcg_temp_free_i32(tmp32);
234 static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
236 int l1, l2;
238 l1 = gen_new_label();
239 l2 = gen_new_label();
240 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
241 tcg_gen_qemu_st32(t0, t1, flags);
242 tcg_gen_movi_i64(t0, 1);
243 tcg_gen_br(l2);
244 gen_set_label(l1);
245 tcg_gen_movi_i64(t0, 0);
246 gen_set_label(l2);
247 tcg_gen_movi_i64(cpu_lock, -1);
250 static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
252 int l1, l2;
254 l1 = gen_new_label();
255 l2 = gen_new_label();
256 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
257 tcg_gen_qemu_st64(t0, t1, flags);
258 tcg_gen_movi_i64(t0, 1);
259 tcg_gen_br(l2);
260 gen_set_label(l1);
261 tcg_gen_movi_i64(t0, 0);
262 gen_set_label(l2);
263 tcg_gen_movi_i64(cpu_lock, -1);
266 static inline void gen_store_mem(DisasContext *ctx,
267 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
268 int flags),
269 int ra, int rb, int32_t disp16, int fp,
270 int clear, int local)
272 TCGv addr;
273 if (local)
274 addr = tcg_temp_local_new();
275 else
276 addr = tcg_temp_new();
277 if (rb != 31) {
278 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
279 if (clear)
280 tcg_gen_andi_i64(addr, addr, ~0x7);
281 } else {
282 if (clear)
283 disp16 &= ~0x7;
284 tcg_gen_movi_i64(addr, disp16);
286 if (ra != 31) {
287 if (fp)
288 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
289 else
290 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
291 } else {
292 TCGv zero;
293 if (local)
294 zero = tcg_const_local_i64(0);
295 else
296 zero = tcg_const_i64(0);
297 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
298 tcg_temp_free(zero);
300 tcg_temp_free(addr);
303 static void gen_bcond_pcload(DisasContext *ctx, int32_t disp, int lab_true)
305 int lab_over = gen_new_label();
307 tcg_gen_movi_i64(cpu_pc, ctx->pc);
308 tcg_gen_br(lab_over);
309 gen_set_label(lab_true);
310 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
311 gen_set_label(lab_over);
314 static void gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
315 int32_t disp, int mask)
317 int lab_true = gen_new_label();
319 if (likely(ra != 31)) {
320 if (mask) {
321 TCGv tmp = tcg_temp_new();
322 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
323 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
324 tcg_temp_free(tmp);
325 } else {
326 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, lab_true);
328 } else {
329 /* Very uncommon case - Do not bother to optimize. */
330 TCGv tmp = tcg_const_i64(0);
331 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
332 tcg_temp_free(tmp);
334 gen_bcond_pcload(ctx, disp, lab_true);
337 /* Generate a forward TCG branch to LAB_TRUE if RA cmp 0.0.
338 This is complicated by the fact that -0.0 compares the same as +0.0. */
340 static void gen_fbcond_internal(TCGCond cond, TCGv src, int lab_true)
342 int lab_false = -1;
343 uint64_t mzero = 1ull << 63;
344 TCGv tmp;
346 switch (cond) {
347 case TCG_COND_LE:
348 case TCG_COND_GT:
349 /* For <= or >, the -0.0 value directly compares the way we want. */
350 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
351 break;
353 case TCG_COND_EQ:
354 case TCG_COND_NE:
355 /* For == or !=, we can simply mask off the sign bit and compare. */
356 /* ??? Assume that the temporary is reclaimed at the branch. */
357 tmp = tcg_temp_new();
358 tcg_gen_andi_i64(tmp, src, mzero - 1);
359 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
360 break;
362 case TCG_COND_GE:
363 /* For >=, emit two branches to the destination. */
364 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
365 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_true);
366 break;
368 case TCG_COND_LT:
369 /* For <, first filter out -0.0 to what will be the fallthru. */
370 lab_false = gen_new_label();
371 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_false);
372 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
373 gen_set_label(lab_false);
374 break;
376 default:
377 abort();
381 static void gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, int32_t disp)
383 int lab_true;
385 if (unlikely(ra == 31)) {
386 /* Very uncommon case, but easier to optimize it to an integer
387 comparison than continuing with the floating point comparison. */
388 gen_bcond(ctx, cond, ra, disp, 0);
389 return;
392 lab_true = gen_new_label();
393 gen_fbcond_internal(cond, cpu_fir[ra], lab_true);
394 gen_bcond_pcload(ctx, disp, lab_true);
397 static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
398 int islit, uint8_t lit, int mask)
400 TCGCond inv_cond = tcg_invert_cond(cond);
401 int l1;
403 if (unlikely(rc == 31))
404 return;
406 l1 = gen_new_label();
408 if (ra != 31) {
409 if (mask) {
410 TCGv tmp = tcg_temp_new();
411 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
412 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
413 tcg_temp_free(tmp);
414 } else
415 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
416 } else {
417 /* Very uncommon case - Do not bother to optimize. */
418 TCGv tmp = tcg_const_i64(0);
419 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
420 tcg_temp_free(tmp);
423 if (islit)
424 tcg_gen_movi_i64(cpu_ir[rc], lit);
425 else
426 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
427 gen_set_label(l1);
430 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
432 TCGv va = cpu_fir[ra];
433 int l1;
435 if (unlikely(rc == 31))
436 return;
437 if (unlikely(ra == 31)) {
438 /* ??? Assume that the temporary is reclaimed at the branch. */
439 va = tcg_const_i64(0);
442 l1 = gen_new_label();
443 gen_fbcond_internal(tcg_invert_cond(cond), va, l1);
445 if (rb != 31)
446 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
447 else
448 tcg_gen_movi_i64(cpu_fir[rc], 0);
449 gen_set_label(l1);
452 #define QUAL_RM_N 0x080 /* Round mode nearest even */
453 #define QUAL_RM_C 0x000 /* Round mode chopped */
454 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
455 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
456 #define QUAL_RM_MASK 0x0c0
458 #define QUAL_U 0x100 /* Underflow enable (fp output) */
459 #define QUAL_V 0x100 /* Overflow enable (int output) */
460 #define QUAL_S 0x400 /* Software completion enable */
461 #define QUAL_I 0x200 /* Inexact detection enable */
463 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
465 TCGv_i32 tmp;
467 fn11 &= QUAL_RM_MASK;
468 if (fn11 == ctx->tb_rm) {
469 return;
471 ctx->tb_rm = fn11;
473 tmp = tcg_temp_new_i32();
474 switch (fn11) {
475 case QUAL_RM_N:
476 tcg_gen_movi_i32(tmp, float_round_nearest_even);
477 break;
478 case QUAL_RM_C:
479 tcg_gen_movi_i32(tmp, float_round_to_zero);
480 break;
481 case QUAL_RM_M:
482 tcg_gen_movi_i32(tmp, float_round_down);
483 break;
484 case QUAL_RM_D:
485 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
486 break;
489 #if defined(CONFIG_SOFTFLOAT_INLINE)
490 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
491 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
492 sets the one field. */
493 tcg_gen_st8_i32(tmp, cpu_env,
494 offsetof(CPUState, fp_status.float_rounding_mode));
495 #else
496 gen_helper_setroundmode(tmp);
497 #endif
499 tcg_temp_free_i32(tmp);
502 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
504 TCGv_i32 tmp;
506 fn11 &= QUAL_U;
507 if (fn11 == ctx->tb_ftz) {
508 return;
510 ctx->tb_ftz = fn11;
512 tmp = tcg_temp_new_i32();
513 if (fn11) {
514 /* Underflow is enabled, use the FPCR setting. */
515 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
516 } else {
517 /* Underflow is disabled, force flush-to-zero. */
518 tcg_gen_movi_i32(tmp, 1);
521 #if defined(CONFIG_SOFTFLOAT_INLINE)
522 tcg_gen_st8_i32(tmp, cpu_env,
523 offsetof(CPUState, fp_status.flush_to_zero));
524 #else
525 gen_helper_setflushzero(tmp);
526 #endif
528 tcg_temp_free_i32(tmp);
531 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
533 TCGv val = tcg_temp_new();
534 if (reg == 31) {
535 tcg_gen_movi_i64(val, 0);
536 } else if (fn11 & QUAL_S) {
537 gen_helper_ieee_input_s(val, cpu_fir[reg]);
538 } else if (is_cmp) {
539 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
540 } else {
541 gen_helper_ieee_input(val, cpu_fir[reg]);
543 return val;
546 static void gen_fp_exc_clear(void)
548 #if defined(CONFIG_SOFTFLOAT_INLINE)
549 TCGv_i32 zero = tcg_const_i32(0);
550 tcg_gen_st8_i32(zero, cpu_env,
551 offsetof(CPUState, fp_status.float_exception_flags));
552 tcg_temp_free_i32(zero);
553 #else
554 gen_helper_fp_exc_clear();
555 #endif
558 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
560 /* ??? We ought to be able to do something with imprecise exceptions.
561 E.g. notice we're still in the trap shadow of something within the
562 TB and do not generate the code to signal the exception; end the TB
563 when an exception is forced to arrive, either by consumption of a
564 register value or TRAPB or EXCB. */
565 TCGv_i32 exc = tcg_temp_new_i32();
566 TCGv_i32 reg;
568 #if defined(CONFIG_SOFTFLOAT_INLINE)
569 tcg_gen_ld8u_i32(exc, cpu_env,
570 offsetof(CPUState, fp_status.float_exception_flags));
571 #else
572 gen_helper_fp_exc_get(exc);
573 #endif
575 if (ignore) {
576 tcg_gen_andi_i32(exc, exc, ~ignore);
579 /* ??? Pass in the regno of the destination so that the helper can
580 set EXC_MASK, which contains a bitmask of destination registers
581 that have caused arithmetic traps. A simple userspace emulation
582 does not require this. We do need it for a guest kernel's entArith,
583 or if we were to do something clever with imprecise exceptions. */
584 reg = tcg_const_i32(rc + 32);
586 if (fn11 & QUAL_S) {
587 gen_helper_fp_exc_raise_s(exc, reg);
588 } else {
589 gen_helper_fp_exc_raise(exc, reg);
592 tcg_temp_free_i32(reg);
593 tcg_temp_free_i32(exc);
596 static inline void gen_fp_exc_raise(int rc, int fn11)
598 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
601 static void gen_fcvtql(int rb, int rc)
603 if (unlikely(rc == 31)) {
604 return;
606 if (unlikely(rb == 31)) {
607 tcg_gen_movi_i64(cpu_fir[rc], 0);
608 } else {
609 TCGv tmp = tcg_temp_new();
611 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
612 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
613 tcg_gen_shli_i64(tmp, tmp, 32);
614 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
615 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
617 tcg_temp_free(tmp);
621 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
623 if (rb != 31) {
624 int lab = gen_new_label();
625 TCGv tmp = tcg_temp_new();
627 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
628 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
629 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
631 gen_set_label(lab);
633 gen_fcvtql(rb, rc);
636 #define FARITH2(name) \
637 static inline void glue(gen_f, name)(int rb, int rc) \
639 if (unlikely(rc == 31)) { \
640 return; \
642 if (rb != 31) { \
643 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
644 } else { \
645 TCGv tmp = tcg_const_i64(0); \
646 gen_helper_ ## name (cpu_fir[rc], tmp); \
647 tcg_temp_free(tmp); \
650 FARITH2(cvtlq)
652 /* ??? VAX instruction qualifiers ignored. */
653 FARITH2(sqrtf)
654 FARITH2(sqrtg)
655 FARITH2(cvtgf)
656 FARITH2(cvtgq)
657 FARITH2(cvtqf)
658 FARITH2(cvtqg)
660 static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
661 int rb, int rc, int fn11)
663 TCGv vb;
665 /* ??? This is wrong: the instruction is not a nop, it still may
666 raise exceptions. */
667 if (unlikely(rc == 31)) {
668 return;
671 gen_qual_roundmode(ctx, fn11);
672 gen_qual_flushzero(ctx, fn11);
673 gen_fp_exc_clear();
675 vb = gen_ieee_input(rb, fn11, 0);
676 helper(cpu_fir[rc], vb);
677 tcg_temp_free(vb);
679 gen_fp_exc_raise(rc, fn11);
682 #define IEEE_ARITH2(name) \
683 static inline void glue(gen_f, name)(DisasContext *ctx, \
684 int rb, int rc, int fn11) \
686 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
688 IEEE_ARITH2(sqrts)
689 IEEE_ARITH2(sqrtt)
690 IEEE_ARITH2(cvtst)
691 IEEE_ARITH2(cvtts)
693 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
695 TCGv vb;
696 int ignore = 0;
698 /* ??? This is wrong: the instruction is not a nop, it still may
699 raise exceptions. */
700 if (unlikely(rc == 31)) {
701 return;
704 /* No need to set flushzero, since we have an integer output. */
705 gen_fp_exc_clear();
706 vb = gen_ieee_input(rb, fn11, 0);
708 /* Almost all integer conversions use cropped rounding, and most
709 also do not have integer overflow enabled. Special case that. */
710 switch (fn11) {
711 case QUAL_RM_C:
712 gen_helper_cvttq_c(cpu_fir[rc], vb);
713 break;
714 case QUAL_V | QUAL_RM_C:
715 case QUAL_S | QUAL_V | QUAL_RM_C:
716 ignore = float_flag_inexact;
717 /* FALLTHRU */
718 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
719 gen_helper_cvttq_svic(cpu_fir[rc], vb);
720 break;
721 default:
722 gen_qual_roundmode(ctx, fn11);
723 gen_helper_cvttq(cpu_fir[rc], vb);
724 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
725 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
726 break;
728 tcg_temp_free(vb);
730 gen_fp_exc_raise_ignore(rc, fn11, ignore);
733 static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
734 int rb, int rc, int fn11)
736 TCGv vb;
738 /* ??? This is wrong: the instruction is not a nop, it still may
739 raise exceptions. */
740 if (unlikely(rc == 31)) {
741 return;
744 gen_qual_roundmode(ctx, fn11);
746 if (rb == 31) {
747 vb = tcg_const_i64(0);
748 } else {
749 vb = cpu_fir[rb];
752 /* The only exception that can be raised by integer conversion
753 is inexact. Thus we only need to worry about exceptions when
754 inexact handling is requested. */
755 if (fn11 & QUAL_I) {
756 gen_fp_exc_clear();
757 helper(cpu_fir[rc], vb);
758 gen_fp_exc_raise(rc, fn11);
759 } else {
760 helper(cpu_fir[rc], vb);
763 if (rb == 31) {
764 tcg_temp_free(vb);
768 #define IEEE_INTCVT(name) \
769 static inline void glue(gen_f, name)(DisasContext *ctx, \
770 int rb, int rc, int fn11) \
772 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
774 IEEE_INTCVT(cvtqs)
775 IEEE_INTCVT(cvtqt)
777 #define FARITH3(name) \
778 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
780 TCGv va, vb; \
782 if (unlikely(rc == 31)) { \
783 return; \
785 if (ra == 31) { \
786 va = tcg_const_i64(0); \
787 } else { \
788 va = cpu_fir[ra]; \
790 if (rb == 31) { \
791 vb = tcg_const_i64(0); \
792 } else { \
793 vb = cpu_fir[rb]; \
796 gen_helper_ ## name (cpu_fir[rc], va, vb); \
798 if (ra == 31) { \
799 tcg_temp_free(va); \
801 if (rb == 31) { \
802 tcg_temp_free(vb); \
805 /* ??? Ought to expand these inline; simple masking operations. */
806 FARITH3(cpys)
807 FARITH3(cpysn)
808 FARITH3(cpyse)
810 /* ??? VAX instruction qualifiers ignored. */
811 FARITH3(addf)
812 FARITH3(subf)
813 FARITH3(mulf)
814 FARITH3(divf)
815 FARITH3(addg)
816 FARITH3(subg)
817 FARITH3(mulg)
818 FARITH3(divg)
819 FARITH3(cmpgeq)
820 FARITH3(cmpglt)
821 FARITH3(cmpgle)
823 static void gen_ieee_arith3(DisasContext *ctx,
824 void (*helper)(TCGv, TCGv, TCGv),
825 int ra, int rb, int rc, int fn11)
827 TCGv va, vb;
829 /* ??? This is wrong: the instruction is not a nop, it still may
830 raise exceptions. */
831 if (unlikely(rc == 31)) {
832 return;
835 gen_qual_roundmode(ctx, fn11);
836 gen_qual_flushzero(ctx, fn11);
837 gen_fp_exc_clear();
839 va = gen_ieee_input(ra, fn11, 0);
840 vb = gen_ieee_input(rb, fn11, 0);
841 helper(cpu_fir[rc], va, vb);
842 tcg_temp_free(va);
843 tcg_temp_free(vb);
845 gen_fp_exc_raise(rc, fn11);
848 #define IEEE_ARITH3(name) \
849 static inline void glue(gen_f, name)(DisasContext *ctx, \
850 int ra, int rb, int rc, int fn11) \
852 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
854 IEEE_ARITH3(adds)
855 IEEE_ARITH3(subs)
856 IEEE_ARITH3(muls)
857 IEEE_ARITH3(divs)
858 IEEE_ARITH3(addt)
859 IEEE_ARITH3(subt)
860 IEEE_ARITH3(mult)
861 IEEE_ARITH3(divt)
863 static void gen_ieee_compare(DisasContext *ctx,
864 void (*helper)(TCGv, TCGv, TCGv),
865 int ra, int rb, int rc, int fn11)
867 TCGv va, vb;
869 /* ??? This is wrong: the instruction is not a nop, it still may
870 raise exceptions. */
871 if (unlikely(rc == 31)) {
872 return;
875 gen_fp_exc_clear();
877 va = gen_ieee_input(ra, fn11, 1);
878 vb = gen_ieee_input(rb, fn11, 1);
879 helper(cpu_fir[rc], va, vb);
880 tcg_temp_free(va);
881 tcg_temp_free(vb);
883 gen_fp_exc_raise(rc, fn11);
886 #define IEEE_CMP3(name) \
887 static inline void glue(gen_f, name)(DisasContext *ctx, \
888 int ra, int rb, int rc, int fn11) \
890 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
892 IEEE_CMP3(cmptun)
893 IEEE_CMP3(cmpteq)
894 IEEE_CMP3(cmptlt)
895 IEEE_CMP3(cmptle)
897 static inline uint64_t zapnot_mask(uint8_t lit)
899 uint64_t mask = 0;
900 int i;
902 for (i = 0; i < 8; ++i) {
903 if ((lit >> i) & 1)
904 mask |= 0xffull << (i * 8);
906 return mask;
909 /* Implement zapnot with an immediate operand, which expands to some
910 form of immediate AND. This is a basic building block in the
911 definition of many of the other byte manipulation instructions. */
912 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
914 switch (lit) {
915 case 0x00:
916 tcg_gen_movi_i64(dest, 0);
917 break;
918 case 0x01:
919 tcg_gen_ext8u_i64(dest, src);
920 break;
921 case 0x03:
922 tcg_gen_ext16u_i64(dest, src);
923 break;
924 case 0x0f:
925 tcg_gen_ext32u_i64(dest, src);
926 break;
927 case 0xff:
928 tcg_gen_mov_i64(dest, src);
929 break;
930 default:
931 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
932 break;
936 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
938 if (unlikely(rc == 31))
939 return;
940 else if (unlikely(ra == 31))
941 tcg_gen_movi_i64(cpu_ir[rc], 0);
942 else if (islit)
943 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
944 else
945 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
948 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
950 if (unlikely(rc == 31))
951 return;
952 else if (unlikely(ra == 31))
953 tcg_gen_movi_i64(cpu_ir[rc], 0);
954 else if (islit)
955 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
956 else
957 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
961 /* EXTWH, EXTLH, EXTQH */
962 static void gen_ext_h(int ra, int rb, int rc, int islit,
963 uint8_t lit, uint8_t byte_mask)
965 if (unlikely(rc == 31))
966 return;
967 else if (unlikely(ra == 31))
968 tcg_gen_movi_i64(cpu_ir[rc], 0);
969 else {
970 if (islit) {
971 lit = (64 - (lit & 7) * 8) & 0x3f;
972 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
973 } else {
974 TCGv tmp1 = tcg_temp_new();
975 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
976 tcg_gen_shli_i64(tmp1, tmp1, 3);
977 tcg_gen_neg_i64(tmp1, tmp1);
978 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
979 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
980 tcg_temp_free(tmp1);
982 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
986 /* EXTBL, EXTWL, EXTLL, EXTQL */
987 static void gen_ext_l(int ra, int rb, int rc, int islit,
988 uint8_t lit, uint8_t byte_mask)
990 if (unlikely(rc == 31))
991 return;
992 else if (unlikely(ra == 31))
993 tcg_gen_movi_i64(cpu_ir[rc], 0);
994 else {
995 if (islit) {
996 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
997 } else {
998 TCGv tmp = tcg_temp_new();
999 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1000 tcg_gen_shli_i64(tmp, tmp, 3);
1001 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1002 tcg_temp_free(tmp);
1004 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1008 /* INSWH, INSLH, INSQH */
1009 static void gen_ins_h(int ra, int rb, int rc, int islit,
1010 uint8_t lit, uint8_t byte_mask)
1012 if (unlikely(rc == 31))
1013 return;
1014 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1015 tcg_gen_movi_i64(cpu_ir[rc], 0);
1016 else {
1017 TCGv tmp = tcg_temp_new();
1019 /* The instruction description has us left-shift the byte mask
1020 and extract bits <15:8> and apply that zap at the end. This
1021 is equivalent to simply performing the zap first and shifting
1022 afterward. */
1023 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1025 if (islit) {
1026 /* Note that we have handled the lit==0 case above. */
1027 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1028 } else {
1029 TCGv shift = tcg_temp_new();
1031 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1032 Do this portably by splitting the shift into two parts:
1033 shift_count-1 and 1. Arrange for the -1 by using
1034 ones-complement instead of twos-complement in the negation:
1035 ~((B & 7) * 8) & 63. */
1037 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1038 tcg_gen_shli_i64(shift, shift, 3);
1039 tcg_gen_not_i64(shift, shift);
1040 tcg_gen_andi_i64(shift, shift, 0x3f);
1042 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1043 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1044 tcg_temp_free(shift);
1046 tcg_temp_free(tmp);
1050 /* INSBL, INSWL, INSLL, INSQL */
1051 static void gen_ins_l(int ra, int rb, int rc, int islit,
1052 uint8_t lit, uint8_t byte_mask)
1054 if (unlikely(rc == 31))
1055 return;
1056 else if (unlikely(ra == 31))
1057 tcg_gen_movi_i64(cpu_ir[rc], 0);
1058 else {
1059 TCGv tmp = tcg_temp_new();
1061 /* The instruction description has us left-shift the byte mask
1062 the same number of byte slots as the data and apply the zap
1063 at the end. This is equivalent to simply performing the zap
1064 first and shifting afterward. */
1065 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1067 if (islit) {
1068 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1069 } else {
1070 TCGv shift = tcg_temp_new();
1071 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1072 tcg_gen_shli_i64(shift, shift, 3);
1073 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1074 tcg_temp_free(shift);
1076 tcg_temp_free(tmp);
1080 /* MSKWH, MSKLH, MSKQH */
1081 static void gen_msk_h(int ra, int rb, int rc, int islit,
1082 uint8_t lit, uint8_t byte_mask)
1084 if (unlikely(rc == 31))
1085 return;
1086 else if (unlikely(ra == 31))
1087 tcg_gen_movi_i64(cpu_ir[rc], 0);
1088 else if (islit) {
1089 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1090 } else {
1091 TCGv shift = tcg_temp_new();
1092 TCGv mask = tcg_temp_new();
1094 /* The instruction description is as above, where the byte_mask
1095 is shifted left, and then we extract bits <15:8>. This can be
1096 emulated with a right-shift on the expanded byte mask. This
1097 requires extra care because for an input <2:0> == 0 we need a
1098 shift of 64 bits in order to generate a zero. This is done by
1099 splitting the shift into two parts, the variable shift - 1
1100 followed by a constant 1 shift. The code we expand below is
1101 equivalent to ~((B & 7) * 8) & 63. */
1103 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1104 tcg_gen_shli_i64(shift, shift, 3);
1105 tcg_gen_not_i64(shift, shift);
1106 tcg_gen_andi_i64(shift, shift, 0x3f);
1107 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1108 tcg_gen_shr_i64(mask, mask, shift);
1109 tcg_gen_shri_i64(mask, mask, 1);
1111 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1113 tcg_temp_free(mask);
1114 tcg_temp_free(shift);
1118 /* MSKBL, MSKWL, MSKLL, MSKQL */
1119 static void gen_msk_l(int ra, int rb, int rc, int islit,
1120 uint8_t lit, uint8_t byte_mask)
1122 if (unlikely(rc == 31))
1123 return;
1124 else if (unlikely(ra == 31))
1125 tcg_gen_movi_i64(cpu_ir[rc], 0);
1126 else if (islit) {
1127 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1128 } else {
1129 TCGv shift = tcg_temp_new();
1130 TCGv mask = tcg_temp_new();
1132 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1133 tcg_gen_shli_i64(shift, shift, 3);
1134 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1135 tcg_gen_shl_i64(mask, mask, shift);
1137 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1139 tcg_temp_free(mask);
1140 tcg_temp_free(shift);
1144 /* Code to call arith3 helpers */
1145 #define ARITH3(name) \
1146 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1147 uint8_t lit) \
1149 if (unlikely(rc == 31)) \
1150 return; \
1152 if (ra != 31) { \
1153 if (islit) { \
1154 TCGv tmp = tcg_const_i64(lit); \
1155 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1156 tcg_temp_free(tmp); \
1157 } else \
1158 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1159 } else { \
1160 TCGv tmp1 = tcg_const_i64(0); \
1161 if (islit) { \
1162 TCGv tmp2 = tcg_const_i64(lit); \
1163 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1164 tcg_temp_free(tmp2); \
1165 } else \
1166 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1167 tcg_temp_free(tmp1); \
1170 ARITH3(cmpbge)
1171 ARITH3(addlv)
1172 ARITH3(sublv)
1173 ARITH3(addqv)
1174 ARITH3(subqv)
1175 ARITH3(umulh)
1176 ARITH3(mullv)
1177 ARITH3(mulqv)
1178 ARITH3(minub8)
1179 ARITH3(minsb8)
1180 ARITH3(minuw4)
1181 ARITH3(minsw4)
1182 ARITH3(maxub8)
1183 ARITH3(maxsb8)
1184 ARITH3(maxuw4)
1185 ARITH3(maxsw4)
1186 ARITH3(perr)
1188 #define MVIOP2(name) \
1189 static inline void glue(gen_, name)(int rb, int rc) \
1191 if (unlikely(rc == 31)) \
1192 return; \
1193 if (unlikely(rb == 31)) \
1194 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1195 else \
1196 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1198 MVIOP2(pklb)
1199 MVIOP2(pkwb)
1200 MVIOP2(unpkbl)
1201 MVIOP2(unpkbw)
1203 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1204 int islit, uint8_t lit)
1206 TCGv va, vb;
1208 if (unlikely(rc == 31)) {
1209 return;
1212 if (ra == 31) {
1213 va = tcg_const_i64(0);
1214 } else {
1215 va = cpu_ir[ra];
1217 if (islit) {
1218 vb = tcg_const_i64(lit);
1219 } else {
1220 vb = cpu_ir[rb];
1223 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1225 if (ra == 31) {
1226 tcg_temp_free(va);
1228 if (islit) {
1229 tcg_temp_free(vb);
1233 static inline int translate_one(DisasContext *ctx, uint32_t insn)
1235 uint32_t palcode;
1236 int32_t disp21, disp16, disp12;
1237 uint16_t fn11, fn16;
1238 uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit, real_islit;
1239 uint8_t lit;
1240 int ret;
1242 /* Decode all instruction fields */
1243 opc = insn >> 26;
1244 ra = (insn >> 21) & 0x1F;
1245 rb = (insn >> 16) & 0x1F;
1246 rc = insn & 0x1F;
1247 sbz = (insn >> 13) & 0x07;
1248 real_islit = islit = (insn >> 12) & 1;
1249 if (rb == 31 && !islit) {
1250 islit = 1;
1251 lit = 0;
1252 } else
1253 lit = (insn >> 13) & 0xFF;
1254 palcode = insn & 0x03FFFFFF;
1255 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1256 disp16 = (int16_t)(insn & 0x0000FFFF);
1257 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1258 fn16 = insn & 0x0000FFFF;
1259 fn11 = (insn >> 5) & 0x000007FF;
1260 fpfn = fn11 & 0x3F;
1261 fn7 = (insn >> 5) & 0x0000007F;
1262 fn2 = (insn >> 5) & 0x00000003;
1263 ret = 0;
1264 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1265 opc, ra, rb, rc, disp16);
1267 switch (opc) {
1268 case 0x00:
1269 /* CALL_PAL */
1270 #ifdef CONFIG_USER_ONLY
1271 if (palcode == 0x9E) {
1272 /* RDUNIQUE */
1273 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1274 break;
1275 } else if (palcode == 0x9F) {
1276 /* WRUNIQUE */
1277 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1278 break;
1280 #endif
1281 if (palcode >= 0x80 && palcode < 0xC0) {
1282 /* Unprivileged PAL call */
1283 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
1284 ret = 3;
1285 break;
1287 #ifndef CONFIG_USER_ONLY
1288 if (palcode < 0x40) {
1289 /* Privileged PAL code */
1290 if (ctx->mem_idx & 1)
1291 goto invalid_opc;
1292 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
1293 ret = 3;
1295 #endif
1296 /* Invalid PAL call */
1297 goto invalid_opc;
1298 case 0x01:
1299 /* OPC01 */
1300 goto invalid_opc;
1301 case 0x02:
1302 /* OPC02 */
1303 goto invalid_opc;
1304 case 0x03:
1305 /* OPC03 */
1306 goto invalid_opc;
1307 case 0x04:
1308 /* OPC04 */
1309 goto invalid_opc;
1310 case 0x05:
1311 /* OPC05 */
1312 goto invalid_opc;
1313 case 0x06:
1314 /* OPC06 */
1315 goto invalid_opc;
1316 case 0x07:
1317 /* OPC07 */
1318 goto invalid_opc;
1319 case 0x08:
1320 /* LDA */
1321 if (likely(ra != 31)) {
1322 if (rb != 31)
1323 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1324 else
1325 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1327 break;
1328 case 0x09:
1329 /* LDAH */
1330 if (likely(ra != 31)) {
1331 if (rb != 31)
1332 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1333 else
1334 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1336 break;
1337 case 0x0A:
1338 /* LDBU */
1339 if (!(ctx->amask & AMASK_BWX))
1340 goto invalid_opc;
1341 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1342 break;
1343 case 0x0B:
1344 /* LDQ_U */
1345 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1346 break;
1347 case 0x0C:
1348 /* LDWU */
1349 if (!(ctx->amask & AMASK_BWX))
1350 goto invalid_opc;
1351 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1352 break;
1353 case 0x0D:
1354 /* STW */
1355 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
1356 break;
1357 case 0x0E:
1358 /* STB */
1359 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
1360 break;
1361 case 0x0F:
1362 /* STQ_U */
1363 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
1364 break;
1365 case 0x10:
1366 switch (fn7) {
1367 case 0x00:
1368 /* ADDL */
1369 if (likely(rc != 31)) {
1370 if (ra != 31) {
1371 if (islit) {
1372 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1373 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1374 } else {
1375 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1376 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1378 } else {
1379 if (islit)
1380 tcg_gen_movi_i64(cpu_ir[rc], lit);
1381 else
1382 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1385 break;
1386 case 0x02:
1387 /* S4ADDL */
1388 if (likely(rc != 31)) {
1389 if (ra != 31) {
1390 TCGv tmp = tcg_temp_new();
1391 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1392 if (islit)
1393 tcg_gen_addi_i64(tmp, tmp, lit);
1394 else
1395 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1396 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1397 tcg_temp_free(tmp);
1398 } else {
1399 if (islit)
1400 tcg_gen_movi_i64(cpu_ir[rc], lit);
1401 else
1402 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1405 break;
1406 case 0x09:
1407 /* SUBL */
1408 if (likely(rc != 31)) {
1409 if (ra != 31) {
1410 if (islit)
1411 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1412 else
1413 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1414 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1415 } else {
1416 if (islit)
1417 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1418 else {
1419 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1420 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1423 break;
1424 case 0x0B:
1425 /* S4SUBL */
1426 if (likely(rc != 31)) {
1427 if (ra != 31) {
1428 TCGv tmp = tcg_temp_new();
1429 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1430 if (islit)
1431 tcg_gen_subi_i64(tmp, tmp, lit);
1432 else
1433 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1434 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1435 tcg_temp_free(tmp);
1436 } else {
1437 if (islit)
1438 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1439 else {
1440 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1441 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1445 break;
1446 case 0x0F:
1447 /* CMPBGE */
1448 gen_cmpbge(ra, rb, rc, islit, lit);
1449 break;
1450 case 0x12:
1451 /* S8ADDL */
1452 if (likely(rc != 31)) {
1453 if (ra != 31) {
1454 TCGv tmp = tcg_temp_new();
1455 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1456 if (islit)
1457 tcg_gen_addi_i64(tmp, tmp, lit);
1458 else
1459 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1460 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1461 tcg_temp_free(tmp);
1462 } else {
1463 if (islit)
1464 tcg_gen_movi_i64(cpu_ir[rc], lit);
1465 else
1466 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1469 break;
1470 case 0x1B:
1471 /* S8SUBL */
1472 if (likely(rc != 31)) {
1473 if (ra != 31) {
1474 TCGv tmp = tcg_temp_new();
1475 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1476 if (islit)
1477 tcg_gen_subi_i64(tmp, tmp, lit);
1478 else
1479 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1480 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1481 tcg_temp_free(tmp);
1482 } else {
1483 if (islit)
1484 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1485 else
1486 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1487 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1491 break;
1492 case 0x1D:
1493 /* CMPULT */
1494 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1495 break;
1496 case 0x20:
1497 /* ADDQ */
1498 if (likely(rc != 31)) {
1499 if (ra != 31) {
1500 if (islit)
1501 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1502 else
1503 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1504 } else {
1505 if (islit)
1506 tcg_gen_movi_i64(cpu_ir[rc], lit);
1507 else
1508 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1511 break;
1512 case 0x22:
1513 /* S4ADDQ */
1514 if (likely(rc != 31)) {
1515 if (ra != 31) {
1516 TCGv tmp = tcg_temp_new();
1517 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1518 if (islit)
1519 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1520 else
1521 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1522 tcg_temp_free(tmp);
1523 } else {
1524 if (islit)
1525 tcg_gen_movi_i64(cpu_ir[rc], lit);
1526 else
1527 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1530 break;
1531 case 0x29:
1532 /* SUBQ */
1533 if (likely(rc != 31)) {
1534 if (ra != 31) {
1535 if (islit)
1536 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1537 else
1538 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1539 } else {
1540 if (islit)
1541 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1542 else
1543 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1546 break;
1547 case 0x2B:
1548 /* S4SUBQ */
1549 if (likely(rc != 31)) {
1550 if (ra != 31) {
1551 TCGv tmp = tcg_temp_new();
1552 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1553 if (islit)
1554 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1555 else
1556 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1557 tcg_temp_free(tmp);
1558 } else {
1559 if (islit)
1560 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1561 else
1562 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1565 break;
1566 case 0x2D:
1567 /* CMPEQ */
1568 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1569 break;
1570 case 0x32:
1571 /* S8ADDQ */
1572 if (likely(rc != 31)) {
1573 if (ra != 31) {
1574 TCGv tmp = tcg_temp_new();
1575 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1576 if (islit)
1577 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1578 else
1579 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1580 tcg_temp_free(tmp);
1581 } else {
1582 if (islit)
1583 tcg_gen_movi_i64(cpu_ir[rc], lit);
1584 else
1585 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1588 break;
1589 case 0x3B:
1590 /* S8SUBQ */
1591 if (likely(rc != 31)) {
1592 if (ra != 31) {
1593 TCGv tmp = tcg_temp_new();
1594 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1595 if (islit)
1596 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1597 else
1598 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1599 tcg_temp_free(tmp);
1600 } else {
1601 if (islit)
1602 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1603 else
1604 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1607 break;
1608 case 0x3D:
1609 /* CMPULE */
1610 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1611 break;
1612 case 0x40:
1613 /* ADDL/V */
1614 gen_addlv(ra, rb, rc, islit, lit);
1615 break;
1616 case 0x49:
1617 /* SUBL/V */
1618 gen_sublv(ra, rb, rc, islit, lit);
1619 break;
1620 case 0x4D:
1621 /* CMPLT */
1622 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1623 break;
1624 case 0x60:
1625 /* ADDQ/V */
1626 gen_addqv(ra, rb, rc, islit, lit);
1627 break;
1628 case 0x69:
1629 /* SUBQ/V */
1630 gen_subqv(ra, rb, rc, islit, lit);
1631 break;
1632 case 0x6D:
1633 /* CMPLE */
1634 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1635 break;
1636 default:
1637 goto invalid_opc;
1639 break;
1640 case 0x11:
1641 switch (fn7) {
1642 case 0x00:
1643 /* AND */
1644 if (likely(rc != 31)) {
1645 if (ra == 31)
1646 tcg_gen_movi_i64(cpu_ir[rc], 0);
1647 else if (islit)
1648 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1649 else
1650 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1652 break;
1653 case 0x08:
1654 /* BIC */
1655 if (likely(rc != 31)) {
1656 if (ra != 31) {
1657 if (islit)
1658 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1659 else
1660 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1661 } else
1662 tcg_gen_movi_i64(cpu_ir[rc], 0);
1664 break;
1665 case 0x14:
1666 /* CMOVLBS */
1667 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1668 break;
1669 case 0x16:
1670 /* CMOVLBC */
1671 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1672 break;
1673 case 0x20:
1674 /* BIS */
1675 if (likely(rc != 31)) {
1676 if (ra != 31) {
1677 if (islit)
1678 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1679 else
1680 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1681 } else {
1682 if (islit)
1683 tcg_gen_movi_i64(cpu_ir[rc], lit);
1684 else
1685 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1688 break;
1689 case 0x24:
1690 /* CMOVEQ */
1691 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1692 break;
1693 case 0x26:
1694 /* CMOVNE */
1695 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1696 break;
1697 case 0x28:
1698 /* ORNOT */
1699 if (likely(rc != 31)) {
1700 if (ra != 31) {
1701 if (islit)
1702 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1703 else
1704 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1705 } else {
1706 if (islit)
1707 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1708 else
1709 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1712 break;
1713 case 0x40:
1714 /* XOR */
1715 if (likely(rc != 31)) {
1716 if (ra != 31) {
1717 if (islit)
1718 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1719 else
1720 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1721 } else {
1722 if (islit)
1723 tcg_gen_movi_i64(cpu_ir[rc], lit);
1724 else
1725 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1728 break;
1729 case 0x44:
1730 /* CMOVLT */
1731 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1732 break;
1733 case 0x46:
1734 /* CMOVGE */
1735 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1736 break;
1737 case 0x48:
1738 /* EQV */
1739 if (likely(rc != 31)) {
1740 if (ra != 31) {
1741 if (islit)
1742 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1743 else
1744 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1745 } else {
1746 if (islit)
1747 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1748 else
1749 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1752 break;
1753 case 0x61:
1754 /* AMASK */
1755 if (likely(rc != 31)) {
1756 if (islit)
1757 tcg_gen_movi_i64(cpu_ir[rc], lit);
1758 else
1759 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1760 switch (ctx->env->implver) {
1761 case IMPLVER_2106x:
1762 /* EV4, EV45, LCA, LCA45 & EV5 */
1763 break;
1764 case IMPLVER_21164:
1765 case IMPLVER_21264:
1766 case IMPLVER_21364:
1767 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1768 ~(uint64_t)ctx->amask);
1769 break;
1772 break;
1773 case 0x64:
1774 /* CMOVLE */
1775 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1776 break;
1777 case 0x66:
1778 /* CMOVGT */
1779 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1780 break;
1781 case 0x6C:
1782 /* IMPLVER */
1783 if (rc != 31)
1784 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
1785 break;
1786 default:
1787 goto invalid_opc;
1789 break;
1790 case 0x12:
1791 switch (fn7) {
1792 case 0x02:
1793 /* MSKBL */
1794 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
1795 break;
1796 case 0x06:
1797 /* EXTBL */
1798 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
1799 break;
1800 case 0x0B:
1801 /* INSBL */
1802 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
1803 break;
1804 case 0x12:
1805 /* MSKWL */
1806 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
1807 break;
1808 case 0x16:
1809 /* EXTWL */
1810 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
1811 break;
1812 case 0x1B:
1813 /* INSWL */
1814 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
1815 break;
1816 case 0x22:
1817 /* MSKLL */
1818 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
1819 break;
1820 case 0x26:
1821 /* EXTLL */
1822 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
1823 break;
1824 case 0x2B:
1825 /* INSLL */
1826 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
1827 break;
1828 case 0x30:
1829 /* ZAP */
1830 gen_zap(ra, rb, rc, islit, lit);
1831 break;
1832 case 0x31:
1833 /* ZAPNOT */
1834 gen_zapnot(ra, rb, rc, islit, lit);
1835 break;
1836 case 0x32:
1837 /* MSKQL */
1838 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
1839 break;
1840 case 0x34:
1841 /* SRL */
1842 if (likely(rc != 31)) {
1843 if (ra != 31) {
1844 if (islit)
1845 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1846 else {
1847 TCGv shift = tcg_temp_new();
1848 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1849 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1850 tcg_temp_free(shift);
1852 } else
1853 tcg_gen_movi_i64(cpu_ir[rc], 0);
1855 break;
1856 case 0x36:
1857 /* EXTQL */
1858 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
1859 break;
1860 case 0x39:
1861 /* SLL */
1862 if (likely(rc != 31)) {
1863 if (ra != 31) {
1864 if (islit)
1865 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1866 else {
1867 TCGv shift = tcg_temp_new();
1868 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1869 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1870 tcg_temp_free(shift);
1872 } else
1873 tcg_gen_movi_i64(cpu_ir[rc], 0);
1875 break;
1876 case 0x3B:
1877 /* INSQL */
1878 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
1879 break;
1880 case 0x3C:
1881 /* SRA */
1882 if (likely(rc != 31)) {
1883 if (ra != 31) {
1884 if (islit)
1885 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1886 else {
1887 TCGv shift = tcg_temp_new();
1888 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1889 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1890 tcg_temp_free(shift);
1892 } else
1893 tcg_gen_movi_i64(cpu_ir[rc], 0);
1895 break;
1896 case 0x52:
1897 /* MSKWH */
1898 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
1899 break;
1900 case 0x57:
1901 /* INSWH */
1902 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
1903 break;
1904 case 0x5A:
1905 /* EXTWH */
1906 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
1907 break;
1908 case 0x62:
1909 /* MSKLH */
1910 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
1911 break;
1912 case 0x67:
1913 /* INSLH */
1914 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
1915 break;
1916 case 0x6A:
1917 /* EXTLH */
1918 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
1919 break;
1920 case 0x72:
1921 /* MSKQH */
1922 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
1923 break;
1924 case 0x77:
1925 /* INSQH */
1926 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
1927 break;
1928 case 0x7A:
1929 /* EXTQH */
1930 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
1931 break;
1932 default:
1933 goto invalid_opc;
1935 break;
1936 case 0x13:
1937 switch (fn7) {
1938 case 0x00:
1939 /* MULL */
1940 if (likely(rc != 31)) {
1941 if (ra == 31)
1942 tcg_gen_movi_i64(cpu_ir[rc], 0);
1943 else {
1944 if (islit)
1945 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1946 else
1947 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1948 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1951 break;
1952 case 0x20:
1953 /* MULQ */
1954 if (likely(rc != 31)) {
1955 if (ra == 31)
1956 tcg_gen_movi_i64(cpu_ir[rc], 0);
1957 else if (islit)
1958 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1959 else
1960 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1962 break;
1963 case 0x30:
1964 /* UMULH */
1965 gen_umulh(ra, rb, rc, islit, lit);
1966 break;
1967 case 0x40:
1968 /* MULL/V */
1969 gen_mullv(ra, rb, rc, islit, lit);
1970 break;
1971 case 0x60:
1972 /* MULQ/V */
1973 gen_mulqv(ra, rb, rc, islit, lit);
1974 break;
1975 default:
1976 goto invalid_opc;
1978 break;
1979 case 0x14:
1980 switch (fpfn) { /* fn11 & 0x3F */
1981 case 0x04:
1982 /* ITOFS */
1983 if (!(ctx->amask & AMASK_FIX))
1984 goto invalid_opc;
1985 if (likely(rc != 31)) {
1986 if (ra != 31) {
1987 TCGv_i32 tmp = tcg_temp_new_i32();
1988 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1989 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1990 tcg_temp_free_i32(tmp);
1991 } else
1992 tcg_gen_movi_i64(cpu_fir[rc], 0);
1994 break;
1995 case 0x0A:
1996 /* SQRTF */
1997 if (!(ctx->amask & AMASK_FIX))
1998 goto invalid_opc;
1999 gen_fsqrtf(rb, rc);
2000 break;
2001 case 0x0B:
2002 /* SQRTS */
2003 if (!(ctx->amask & AMASK_FIX))
2004 goto invalid_opc;
2005 gen_fsqrts(ctx, rb, rc, fn11);
2006 break;
2007 case 0x14:
2008 /* ITOFF */
2009 if (!(ctx->amask & AMASK_FIX))
2010 goto invalid_opc;
2011 if (likely(rc != 31)) {
2012 if (ra != 31) {
2013 TCGv_i32 tmp = tcg_temp_new_i32();
2014 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2015 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2016 tcg_temp_free_i32(tmp);
2017 } else
2018 tcg_gen_movi_i64(cpu_fir[rc], 0);
2020 break;
2021 case 0x24:
2022 /* ITOFT */
2023 if (!(ctx->amask & AMASK_FIX))
2024 goto invalid_opc;
2025 if (likely(rc != 31)) {
2026 if (ra != 31)
2027 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2028 else
2029 tcg_gen_movi_i64(cpu_fir[rc], 0);
2031 break;
2032 case 0x2A:
2033 /* SQRTG */
2034 if (!(ctx->amask & AMASK_FIX))
2035 goto invalid_opc;
2036 gen_fsqrtg(rb, rc);
2037 break;
2038 case 0x02B:
2039 /* SQRTT */
2040 if (!(ctx->amask & AMASK_FIX))
2041 goto invalid_opc;
2042 gen_fsqrtt(ctx, rb, rc, fn11);
2043 break;
2044 default:
2045 goto invalid_opc;
2047 break;
2048 case 0x15:
2049 /* VAX floating point */
2050 /* XXX: rounding mode and trap are ignored (!) */
2051 switch (fpfn) { /* fn11 & 0x3F */
2052 case 0x00:
2053 /* ADDF */
2054 gen_faddf(ra, rb, rc);
2055 break;
2056 case 0x01:
2057 /* SUBF */
2058 gen_fsubf(ra, rb, rc);
2059 break;
2060 case 0x02:
2061 /* MULF */
2062 gen_fmulf(ra, rb, rc);
2063 break;
2064 case 0x03:
2065 /* DIVF */
2066 gen_fdivf(ra, rb, rc);
2067 break;
2068 case 0x1E:
2069 /* CVTDG */
2070 #if 0 // TODO
2071 gen_fcvtdg(rb, rc);
2072 #else
2073 goto invalid_opc;
2074 #endif
2075 break;
2076 case 0x20:
2077 /* ADDG */
2078 gen_faddg(ra, rb, rc);
2079 break;
2080 case 0x21:
2081 /* SUBG */
2082 gen_fsubg(ra, rb, rc);
2083 break;
2084 case 0x22:
2085 /* MULG */
2086 gen_fmulg(ra, rb, rc);
2087 break;
2088 case 0x23:
2089 /* DIVG */
2090 gen_fdivg(ra, rb, rc);
2091 break;
2092 case 0x25:
2093 /* CMPGEQ */
2094 gen_fcmpgeq(ra, rb, rc);
2095 break;
2096 case 0x26:
2097 /* CMPGLT */
2098 gen_fcmpglt(ra, rb, rc);
2099 break;
2100 case 0x27:
2101 /* CMPGLE */
2102 gen_fcmpgle(ra, rb, rc);
2103 break;
2104 case 0x2C:
2105 /* CVTGF */
2106 gen_fcvtgf(rb, rc);
2107 break;
2108 case 0x2D:
2109 /* CVTGD */
2110 #if 0 // TODO
2111 gen_fcvtgd(rb, rc);
2112 #else
2113 goto invalid_opc;
2114 #endif
2115 break;
2116 case 0x2F:
2117 /* CVTGQ */
2118 gen_fcvtgq(rb, rc);
2119 break;
2120 case 0x3C:
2121 /* CVTQF */
2122 gen_fcvtqf(rb, rc);
2123 break;
2124 case 0x3E:
2125 /* CVTQG */
2126 gen_fcvtqg(rb, rc);
2127 break;
2128 default:
2129 goto invalid_opc;
2131 break;
2132 case 0x16:
2133 /* IEEE floating-point */
2134 switch (fpfn) { /* fn11 & 0x3F */
2135 case 0x00:
2136 /* ADDS */
2137 gen_fadds(ctx, ra, rb, rc, fn11);
2138 break;
2139 case 0x01:
2140 /* SUBS */
2141 gen_fsubs(ctx, ra, rb, rc, fn11);
2142 break;
2143 case 0x02:
2144 /* MULS */
2145 gen_fmuls(ctx, ra, rb, rc, fn11);
2146 break;
2147 case 0x03:
2148 /* DIVS */
2149 gen_fdivs(ctx, ra, rb, rc, fn11);
2150 break;
2151 case 0x20:
2152 /* ADDT */
2153 gen_faddt(ctx, ra, rb, rc, fn11);
2154 break;
2155 case 0x21:
2156 /* SUBT */
2157 gen_fsubt(ctx, ra, rb, rc, fn11);
2158 break;
2159 case 0x22:
2160 /* MULT */
2161 gen_fmult(ctx, ra, rb, rc, fn11);
2162 break;
2163 case 0x23:
2164 /* DIVT */
2165 gen_fdivt(ctx, ra, rb, rc, fn11);
2166 break;
2167 case 0x24:
2168 /* CMPTUN */
2169 gen_fcmptun(ctx, ra, rb, rc, fn11);
2170 break;
2171 case 0x25:
2172 /* CMPTEQ */
2173 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2174 break;
2175 case 0x26:
2176 /* CMPTLT */
2177 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2178 break;
2179 case 0x27:
2180 /* CMPTLE */
2181 gen_fcmptle(ctx, ra, rb, rc, fn11);
2182 break;
2183 case 0x2C:
2184 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2185 /* CVTST */
2186 gen_fcvtst(ctx, rb, rc, fn11);
2187 } else {
2188 /* CVTTS */
2189 gen_fcvtts(ctx, rb, rc, fn11);
2191 break;
2192 case 0x2F:
2193 /* CVTTQ */
2194 gen_fcvttq(ctx, rb, rc, fn11);
2195 break;
2196 case 0x3C:
2197 /* CVTQS */
2198 gen_fcvtqs(ctx, rb, rc, fn11);
2199 break;
2200 case 0x3E:
2201 /* CVTQT */
2202 gen_fcvtqt(ctx, rb, rc, fn11);
2203 break;
2204 default:
2205 goto invalid_opc;
2207 break;
2208 case 0x17:
2209 switch (fn11) {
2210 case 0x010:
2211 /* CVTLQ */
2212 gen_fcvtlq(rb, rc);
2213 break;
2214 case 0x020:
2215 if (likely(rc != 31)) {
2216 if (ra == rb) {
2217 /* FMOV */
2218 if (ra == 31)
2219 tcg_gen_movi_i64(cpu_fir[rc], 0);
2220 else
2221 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2222 } else {
2223 /* CPYS */
2224 gen_fcpys(ra, rb, rc);
2227 break;
2228 case 0x021:
2229 /* CPYSN */
2230 gen_fcpysn(ra, rb, rc);
2231 break;
2232 case 0x022:
2233 /* CPYSE */
2234 gen_fcpyse(ra, rb, rc);
2235 break;
2236 case 0x024:
2237 /* MT_FPCR */
2238 if (likely(ra != 31))
2239 gen_helper_store_fpcr(cpu_fir[ra]);
2240 else {
2241 TCGv tmp = tcg_const_i64(0);
2242 gen_helper_store_fpcr(tmp);
2243 tcg_temp_free(tmp);
2245 break;
2246 case 0x025:
2247 /* MF_FPCR */
2248 if (likely(ra != 31))
2249 gen_helper_load_fpcr(cpu_fir[ra]);
2250 break;
2251 case 0x02A:
2252 /* FCMOVEQ */
2253 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2254 break;
2255 case 0x02B:
2256 /* FCMOVNE */
2257 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2258 break;
2259 case 0x02C:
2260 /* FCMOVLT */
2261 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2262 break;
2263 case 0x02D:
2264 /* FCMOVGE */
2265 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2266 break;
2267 case 0x02E:
2268 /* FCMOVLE */
2269 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2270 break;
2271 case 0x02F:
2272 /* FCMOVGT */
2273 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2274 break;
2275 case 0x030:
2276 /* CVTQL */
2277 gen_fcvtql(rb, rc);
2278 break;
2279 case 0x130:
2280 /* CVTQL/V */
2281 case 0x530:
2282 /* CVTQL/SV */
2283 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2284 /v doesn't do. The only thing I can think is that /sv is a
2285 valid instruction merely for completeness in the ISA. */
2286 gen_fcvtql_v(ctx, rb, rc);
2287 break;
2288 default:
2289 goto invalid_opc;
2291 break;
2292 case 0x18:
2293 switch ((uint16_t)disp16) {
2294 case 0x0000:
2295 /* TRAPB */
2296 /* No-op. Just exit from the current tb */
2297 ret = 2;
2298 break;
2299 case 0x0400:
2300 /* EXCB */
2301 /* No-op. Just exit from the current tb */
2302 ret = 2;
2303 break;
2304 case 0x4000:
2305 /* MB */
2306 /* No-op */
2307 break;
2308 case 0x4400:
2309 /* WMB */
2310 /* No-op */
2311 break;
2312 case 0x8000:
2313 /* FETCH */
2314 /* No-op */
2315 break;
2316 case 0xA000:
2317 /* FETCH_M */
2318 /* No-op */
2319 break;
2320 case 0xC000:
2321 /* RPCC */
2322 if (ra != 31)
2323 gen_helper_load_pcc(cpu_ir[ra]);
2324 break;
2325 case 0xE000:
2326 /* RC */
2327 if (ra != 31)
2328 gen_helper_rc(cpu_ir[ra]);
2329 break;
2330 case 0xE800:
2331 /* ECB */
2332 break;
2333 case 0xF000:
2334 /* RS */
2335 if (ra != 31)
2336 gen_helper_rs(cpu_ir[ra]);
2337 break;
2338 case 0xF800:
2339 /* WH64 */
2340 /* No-op */
2341 break;
2342 default:
2343 goto invalid_opc;
2345 break;
2346 case 0x19:
2347 /* HW_MFPR (PALcode) */
2348 #if defined (CONFIG_USER_ONLY)
2349 goto invalid_opc;
2350 #else
2351 if (!ctx->pal_mode)
2352 goto invalid_opc;
2353 if (ra != 31) {
2354 TCGv tmp = tcg_const_i32(insn & 0xFF);
2355 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
2356 tcg_temp_free(tmp);
2358 break;
2359 #endif
2360 case 0x1A:
2361 if (rb != 31)
2362 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2363 else
2364 tcg_gen_movi_i64(cpu_pc, 0);
2365 if (ra != 31)
2366 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2367 /* Those four jumps only differ by the branch prediction hint */
2368 switch (fn2) {
2369 case 0x0:
2370 /* JMP */
2371 break;
2372 case 0x1:
2373 /* JSR */
2374 break;
2375 case 0x2:
2376 /* RET */
2377 break;
2378 case 0x3:
2379 /* JSR_COROUTINE */
2380 break;
2382 ret = 1;
2383 break;
2384 case 0x1B:
2385 /* HW_LD (PALcode) */
2386 #if defined (CONFIG_USER_ONLY)
2387 goto invalid_opc;
2388 #else
2389 if (!ctx->pal_mode)
2390 goto invalid_opc;
2391 if (ra != 31) {
2392 TCGv addr = tcg_temp_new();
2393 if (rb != 31)
2394 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2395 else
2396 tcg_gen_movi_i64(addr, disp12);
2397 switch ((insn >> 12) & 0xF) {
2398 case 0x0:
2399 /* Longword physical access (hw_ldl/p) */
2400 gen_helper_ldl_raw(cpu_ir[ra], addr);
2401 break;
2402 case 0x1:
2403 /* Quadword physical access (hw_ldq/p) */
2404 gen_helper_ldq_raw(cpu_ir[ra], addr);
2405 break;
2406 case 0x2:
2407 /* Longword physical access with lock (hw_ldl_l/p) */
2408 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
2409 break;
2410 case 0x3:
2411 /* Quadword physical access with lock (hw_ldq_l/p) */
2412 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
2413 break;
2414 case 0x4:
2415 /* Longword virtual PTE fetch (hw_ldl/v) */
2416 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2417 break;
2418 case 0x5:
2419 /* Quadword virtual PTE fetch (hw_ldq/v) */
2420 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2421 break;
2422 case 0x6:
2423 /* Incpu_ir[ra]id */
2424 goto invalid_opc;
2425 case 0x7:
2426 /* Incpu_ir[ra]id */
2427 goto invalid_opc;
2428 case 0x8:
2429 /* Longword virtual access (hw_ldl) */
2430 gen_helper_st_virt_to_phys(addr, addr);
2431 gen_helper_ldl_raw(cpu_ir[ra], addr);
2432 break;
2433 case 0x9:
2434 /* Quadword virtual access (hw_ldq) */
2435 gen_helper_st_virt_to_phys(addr, addr);
2436 gen_helper_ldq_raw(cpu_ir[ra], addr);
2437 break;
2438 case 0xA:
2439 /* Longword virtual access with protection check (hw_ldl/w) */
2440 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2441 break;
2442 case 0xB:
2443 /* Quadword virtual access with protection check (hw_ldq/w) */
2444 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2445 break;
2446 case 0xC:
2447 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2448 gen_helper_set_alt_mode();
2449 gen_helper_st_virt_to_phys(addr, addr);
2450 gen_helper_ldl_raw(cpu_ir[ra], addr);
2451 gen_helper_restore_mode();
2452 break;
2453 case 0xD:
2454 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2455 gen_helper_set_alt_mode();
2456 gen_helper_st_virt_to_phys(addr, addr);
2457 gen_helper_ldq_raw(cpu_ir[ra], addr);
2458 gen_helper_restore_mode();
2459 break;
2460 case 0xE:
2461 /* Longword virtual access with alternate access mode and
2462 * protection checks (hw_ldl/wa)
2464 gen_helper_set_alt_mode();
2465 gen_helper_ldl_data(cpu_ir[ra], addr);
2466 gen_helper_restore_mode();
2467 break;
2468 case 0xF:
2469 /* Quadword virtual access with alternate access mode and
2470 * protection checks (hw_ldq/wa)
2472 gen_helper_set_alt_mode();
2473 gen_helper_ldq_data(cpu_ir[ra], addr);
2474 gen_helper_restore_mode();
2475 break;
2477 tcg_temp_free(addr);
2479 break;
2480 #endif
2481 case 0x1C:
2482 switch (fn7) {
2483 case 0x00:
2484 /* SEXTB */
2485 if (!(ctx->amask & AMASK_BWX))
2486 goto invalid_opc;
2487 if (likely(rc != 31)) {
2488 if (islit)
2489 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2490 else
2491 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2493 break;
2494 case 0x01:
2495 /* SEXTW */
2496 if (!(ctx->amask & AMASK_BWX))
2497 goto invalid_opc;
2498 if (likely(rc != 31)) {
2499 if (islit)
2500 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2501 else
2502 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2504 break;
2505 case 0x30:
2506 /* CTPOP */
2507 if (!(ctx->amask & AMASK_CIX))
2508 goto invalid_opc;
2509 if (likely(rc != 31)) {
2510 if (islit)
2511 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2512 else
2513 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2515 break;
2516 case 0x31:
2517 /* PERR */
2518 if (!(ctx->amask & AMASK_MVI))
2519 goto invalid_opc;
2520 gen_perr(ra, rb, rc, islit, lit);
2521 break;
2522 case 0x32:
2523 /* CTLZ */
2524 if (!(ctx->amask & AMASK_CIX))
2525 goto invalid_opc;
2526 if (likely(rc != 31)) {
2527 if (islit)
2528 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2529 else
2530 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2532 break;
2533 case 0x33:
2534 /* CTTZ */
2535 if (!(ctx->amask & AMASK_CIX))
2536 goto invalid_opc;
2537 if (likely(rc != 31)) {
2538 if (islit)
2539 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2540 else
2541 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2543 break;
2544 case 0x34:
2545 /* UNPKBW */
2546 if (!(ctx->amask & AMASK_MVI))
2547 goto invalid_opc;
2548 if (real_islit || ra != 31)
2549 goto invalid_opc;
2550 gen_unpkbw (rb, rc);
2551 break;
2552 case 0x35:
2553 /* UNPKBL */
2554 if (!(ctx->amask & AMASK_MVI))
2555 goto invalid_opc;
2556 if (real_islit || ra != 31)
2557 goto invalid_opc;
2558 gen_unpkbl (rb, rc);
2559 break;
2560 case 0x36:
2561 /* PKWB */
2562 if (!(ctx->amask & AMASK_MVI))
2563 goto invalid_opc;
2564 if (real_islit || ra != 31)
2565 goto invalid_opc;
2566 gen_pkwb (rb, rc);
2567 break;
2568 case 0x37:
2569 /* PKLB */
2570 if (!(ctx->amask & AMASK_MVI))
2571 goto invalid_opc;
2572 if (real_islit || ra != 31)
2573 goto invalid_opc;
2574 gen_pklb (rb, rc);
2575 break;
2576 case 0x38:
2577 /* MINSB8 */
2578 if (!(ctx->amask & AMASK_MVI))
2579 goto invalid_opc;
2580 gen_minsb8 (ra, rb, rc, islit, lit);
2581 break;
2582 case 0x39:
2583 /* MINSW4 */
2584 if (!(ctx->amask & AMASK_MVI))
2585 goto invalid_opc;
2586 gen_minsw4 (ra, rb, rc, islit, lit);
2587 break;
2588 case 0x3A:
2589 /* MINUB8 */
2590 if (!(ctx->amask & AMASK_MVI))
2591 goto invalid_opc;
2592 gen_minub8 (ra, rb, rc, islit, lit);
2593 break;
2594 case 0x3B:
2595 /* MINUW4 */
2596 if (!(ctx->amask & AMASK_MVI))
2597 goto invalid_opc;
2598 gen_minuw4 (ra, rb, rc, islit, lit);
2599 break;
2600 case 0x3C:
2601 /* MAXUB8 */
2602 if (!(ctx->amask & AMASK_MVI))
2603 goto invalid_opc;
2604 gen_maxub8 (ra, rb, rc, islit, lit);
2605 break;
2606 case 0x3D:
2607 /* MAXUW4 */
2608 if (!(ctx->amask & AMASK_MVI))
2609 goto invalid_opc;
2610 gen_maxuw4 (ra, rb, rc, islit, lit);
2611 break;
2612 case 0x3E:
2613 /* MAXSB8 */
2614 if (!(ctx->amask & AMASK_MVI))
2615 goto invalid_opc;
2616 gen_maxsb8 (ra, rb, rc, islit, lit);
2617 break;
2618 case 0x3F:
2619 /* MAXSW4 */
2620 if (!(ctx->amask & AMASK_MVI))
2621 goto invalid_opc;
2622 gen_maxsw4 (ra, rb, rc, islit, lit);
2623 break;
2624 case 0x70:
2625 /* FTOIT */
2626 if (!(ctx->amask & AMASK_FIX))
2627 goto invalid_opc;
2628 if (likely(rc != 31)) {
2629 if (ra != 31)
2630 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2631 else
2632 tcg_gen_movi_i64(cpu_ir[rc], 0);
2634 break;
2635 case 0x78:
2636 /* FTOIS */
2637 if (!(ctx->amask & AMASK_FIX))
2638 goto invalid_opc;
2639 if (rc != 31) {
2640 TCGv_i32 tmp1 = tcg_temp_new_i32();
2641 if (ra != 31)
2642 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2643 else {
2644 TCGv tmp2 = tcg_const_i64(0);
2645 gen_helper_s_to_memory(tmp1, tmp2);
2646 tcg_temp_free(tmp2);
2648 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2649 tcg_temp_free_i32(tmp1);
2651 break;
2652 default:
2653 goto invalid_opc;
2655 break;
2656 case 0x1D:
2657 /* HW_MTPR (PALcode) */
2658 #if defined (CONFIG_USER_ONLY)
2659 goto invalid_opc;
2660 #else
2661 if (!ctx->pal_mode)
2662 goto invalid_opc;
2663 else {
2664 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2665 if (ra != 31)
2666 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2667 else {
2668 TCGv tmp2 = tcg_const_i64(0);
2669 gen_helper_mtpr(tmp1, tmp2);
2670 tcg_temp_free(tmp2);
2672 tcg_temp_free(tmp1);
2673 ret = 2;
2675 break;
2676 #endif
2677 case 0x1E:
2678 /* HW_REI (PALcode) */
2679 #if defined (CONFIG_USER_ONLY)
2680 goto invalid_opc;
2681 #else
2682 if (!ctx->pal_mode)
2683 goto invalid_opc;
2684 if (rb == 31) {
2685 /* "Old" alpha */
2686 gen_helper_hw_rei();
2687 } else {
2688 TCGv tmp;
2690 if (ra != 31) {
2691 tmp = tcg_temp_new();
2692 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2693 } else
2694 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2695 gen_helper_hw_ret(tmp);
2696 tcg_temp_free(tmp);
2698 ret = 2;
2699 break;
2700 #endif
2701 case 0x1F:
2702 /* HW_ST (PALcode) */
2703 #if defined (CONFIG_USER_ONLY)
2704 goto invalid_opc;
2705 #else
2706 if (!ctx->pal_mode)
2707 goto invalid_opc;
2708 else {
2709 TCGv addr, val;
2710 addr = tcg_temp_new();
2711 if (rb != 31)
2712 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2713 else
2714 tcg_gen_movi_i64(addr, disp12);
2715 if (ra != 31)
2716 val = cpu_ir[ra];
2717 else {
2718 val = tcg_temp_new();
2719 tcg_gen_movi_i64(val, 0);
2721 switch ((insn >> 12) & 0xF) {
2722 case 0x0:
2723 /* Longword physical access */
2724 gen_helper_stl_raw(val, addr);
2725 break;
2726 case 0x1:
2727 /* Quadword physical access */
2728 gen_helper_stq_raw(val, addr);
2729 break;
2730 case 0x2:
2731 /* Longword physical access with lock */
2732 gen_helper_stl_c_raw(val, val, addr);
2733 break;
2734 case 0x3:
2735 /* Quadword physical access with lock */
2736 gen_helper_stq_c_raw(val, val, addr);
2737 break;
2738 case 0x4:
2739 /* Longword virtual access */
2740 gen_helper_st_virt_to_phys(addr, addr);
2741 gen_helper_stl_raw(val, addr);
2742 break;
2743 case 0x5:
2744 /* Quadword virtual access */
2745 gen_helper_st_virt_to_phys(addr, addr);
2746 gen_helper_stq_raw(val, addr);
2747 break;
2748 case 0x6:
2749 /* Invalid */
2750 goto invalid_opc;
2751 case 0x7:
2752 /* Invalid */
2753 goto invalid_opc;
2754 case 0x8:
2755 /* Invalid */
2756 goto invalid_opc;
2757 case 0x9:
2758 /* Invalid */
2759 goto invalid_opc;
2760 case 0xA:
2761 /* Invalid */
2762 goto invalid_opc;
2763 case 0xB:
2764 /* Invalid */
2765 goto invalid_opc;
2766 case 0xC:
2767 /* Longword virtual access with alternate access mode */
2768 gen_helper_set_alt_mode();
2769 gen_helper_st_virt_to_phys(addr, addr);
2770 gen_helper_stl_raw(val, addr);
2771 gen_helper_restore_mode();
2772 break;
2773 case 0xD:
2774 /* Quadword virtual access with alternate access mode */
2775 gen_helper_set_alt_mode();
2776 gen_helper_st_virt_to_phys(addr, addr);
2777 gen_helper_stl_raw(val, addr);
2778 gen_helper_restore_mode();
2779 break;
2780 case 0xE:
2781 /* Invalid */
2782 goto invalid_opc;
2783 case 0xF:
2784 /* Invalid */
2785 goto invalid_opc;
2787 if (ra == 31)
2788 tcg_temp_free(val);
2789 tcg_temp_free(addr);
2791 break;
2792 #endif
2793 case 0x20:
2794 /* LDF */
2795 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2796 break;
2797 case 0x21:
2798 /* LDG */
2799 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2800 break;
2801 case 0x22:
2802 /* LDS */
2803 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2804 break;
2805 case 0x23:
2806 /* LDT */
2807 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2808 break;
2809 case 0x24:
2810 /* STF */
2811 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2812 break;
2813 case 0x25:
2814 /* STG */
2815 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2816 break;
2817 case 0x26:
2818 /* STS */
2819 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2820 break;
2821 case 0x27:
2822 /* STT */
2823 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2824 break;
2825 case 0x28:
2826 /* LDL */
2827 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2828 break;
2829 case 0x29:
2830 /* LDQ */
2831 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2832 break;
2833 case 0x2A:
2834 /* LDL_L */
2835 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2836 break;
2837 case 0x2B:
2838 /* LDQ_L */
2839 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2840 break;
2841 case 0x2C:
2842 /* STL */
2843 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
2844 break;
2845 case 0x2D:
2846 /* STQ */
2847 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
2848 break;
2849 case 0x2E:
2850 /* STL_C */
2851 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
2852 break;
2853 case 0x2F:
2854 /* STQ_C */
2855 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
2856 break;
2857 case 0x30:
2858 /* BR */
2859 if (ra != 31)
2860 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2861 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2862 ret = 1;
2863 break;
2864 case 0x31: /* FBEQ */
2865 gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2866 ret = 1;
2867 break;
2868 case 0x32: /* FBLT */
2869 gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2870 ret = 1;
2871 break;
2872 case 0x33: /* FBLE */
2873 gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2874 ret = 1;
2875 break;
2876 case 0x34:
2877 /* BSR */
2878 if (ra != 31)
2879 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2880 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2881 ret = 1;
2882 break;
2883 case 0x35: /* FBNE */
2884 gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2885 ret = 1;
2886 break;
2887 case 0x36: /* FBGE */
2888 gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2889 ret = 1;
2890 break;
2891 case 0x37: /* FBGT */
2892 gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2893 ret = 1;
2894 break;
2895 case 0x38:
2896 /* BLBC */
2897 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2898 ret = 1;
2899 break;
2900 case 0x39:
2901 /* BEQ */
2902 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2903 ret = 1;
2904 break;
2905 case 0x3A:
2906 /* BLT */
2907 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2908 ret = 1;
2909 break;
2910 case 0x3B:
2911 /* BLE */
2912 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2913 ret = 1;
2914 break;
2915 case 0x3C:
2916 /* BLBS */
2917 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2918 ret = 1;
2919 break;
2920 case 0x3D:
2921 /* BNE */
2922 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2923 ret = 1;
2924 break;
2925 case 0x3E:
2926 /* BGE */
2927 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2928 ret = 1;
2929 break;
2930 case 0x3F:
2931 /* BGT */
2932 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2933 ret = 1;
2934 break;
2935 invalid_opc:
2936 gen_invalid(ctx);
2937 ret = 3;
2938 break;
2941 return ret;
2944 static inline void gen_intermediate_code_internal(CPUState *env,
2945 TranslationBlock *tb,
2946 int search_pc)
2948 DisasContext ctx, *ctxp = &ctx;
2949 target_ulong pc_start;
2950 uint32_t insn;
2951 uint16_t *gen_opc_end;
2952 CPUBreakpoint *bp;
2953 int j, lj = -1;
2954 int ret;
2955 int num_insns;
2956 int max_insns;
2958 pc_start = tb->pc;
2959 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2960 ctx.pc = pc_start;
2961 ctx.amask = env->amask;
2962 ctx.env = env;
2963 #if defined (CONFIG_USER_ONLY)
2964 ctx.mem_idx = 0;
2965 #else
2966 ctx.mem_idx = ((env->ps >> 3) & 3);
2967 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2968 #endif
2970 /* ??? Every TB begins with unset rounding mode, to be initialized on
2971 the first fp insn of the TB. Alternately we could define a proper
2972 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2973 to reset the FP_STATUS to that default at the end of any TB that
2974 changes the default. We could even (gasp) dynamiclly figure out
2975 what default would be most efficient given the running program. */
2976 ctx.tb_rm = -1;
2977 /* Similarly for flush-to-zero. */
2978 ctx.tb_ftz = -1;
2980 num_insns = 0;
2981 max_insns = tb->cflags & CF_COUNT_MASK;
2982 if (max_insns == 0)
2983 max_insns = CF_COUNT_MASK;
2985 gen_icount_start();
2986 for (ret = 0; ret == 0;) {
2987 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2988 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2989 if (bp->pc == ctx.pc) {
2990 gen_excp(&ctx, EXCP_DEBUG, 0);
2991 break;
2995 if (search_pc) {
2996 j = gen_opc_ptr - gen_opc_buf;
2997 if (lj < j) {
2998 lj++;
2999 while (lj < j)
3000 gen_opc_instr_start[lj++] = 0;
3002 gen_opc_pc[lj] = ctx.pc;
3003 gen_opc_instr_start[lj] = 1;
3004 gen_opc_icount[lj] = num_insns;
3006 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3007 gen_io_start();
3008 insn = ldl_code(ctx.pc);
3009 num_insns++;
3011 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3012 tcg_gen_debug_insn_start(ctx.pc);
3015 ctx.pc += 4;
3016 ret = translate_one(ctxp, insn);
3017 if (ret != 0)
3018 break;
3019 /* if we reach a page boundary or are single stepping, stop
3020 * generation
3022 if (env->singlestep_enabled) {
3023 gen_excp(&ctx, EXCP_DEBUG, 0);
3024 break;
3027 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
3028 break;
3030 if (gen_opc_ptr >= gen_opc_end)
3031 break;
3033 if (num_insns >= max_insns)
3034 break;
3036 if (singlestep) {
3037 break;
3040 if (ret != 1 && ret != 3) {
3041 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3043 if (tb->cflags & CF_LAST_IO)
3044 gen_io_end();
3045 /* Generate the return instruction */
3046 tcg_gen_exit_tb(0);
3047 gen_icount_end(tb, num_insns);
3048 *gen_opc_ptr = INDEX_op_end;
3049 if (search_pc) {
3050 j = gen_opc_ptr - gen_opc_buf;
3051 lj++;
3052 while (lj <= j)
3053 gen_opc_instr_start[lj++] = 0;
3054 } else {
3055 tb->size = ctx.pc - pc_start;
3056 tb->icount = num_insns;
3058 #ifdef DEBUG_DISAS
3059 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3060 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3061 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3062 qemu_log("\n");
3064 #endif
3067 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3069 gen_intermediate_code_internal(env, tb, 0);
3072 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3074 gen_intermediate_code_internal(env, tb, 1);
3077 struct cpu_def_t {
3078 const char *name;
3079 int implver, amask;
3082 static const struct cpu_def_t cpu_defs[] = {
3083 { "ev4", IMPLVER_2106x, 0 },
3084 { "ev5", IMPLVER_21164, 0 },
3085 { "ev56", IMPLVER_21164, AMASK_BWX },
3086 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3087 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3088 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3089 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3090 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3091 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3092 { "21064", IMPLVER_2106x, 0 },
3093 { "21164", IMPLVER_21164, 0 },
3094 { "21164a", IMPLVER_21164, AMASK_BWX },
3095 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3096 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3097 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3098 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3101 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3103 CPUAlphaState *env;
3104 int implver, amask, i, max;
3106 env = qemu_mallocz(sizeof(CPUAlphaState));
3107 cpu_exec_init(env);
3108 alpha_translate_init();
3109 tlb_flush(env, 1);
3111 /* Default to ev67; no reason not to emulate insns by default. */
3112 implver = IMPLVER_21264;
3113 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3114 | AMASK_TRAP | AMASK_PREFETCH);
3116 max = ARRAY_SIZE(cpu_defs);
3117 for (i = 0; i < max; i++) {
3118 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3119 implver = cpu_defs[i].implver;
3120 amask = cpu_defs[i].amask;
3121 break;
3124 env->implver = implver;
3125 env->amask = amask;
3127 env->ps = 0x1F00;
3128 #if defined (CONFIG_USER_ONLY)
3129 env->ps |= 1 << 3;
3130 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3131 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3132 #else
3133 pal_init(env);
3134 #endif
3136 /* Initialize IPR */
3137 #if defined (CONFIG_USER_ONLY)
3138 env->ipr[IPR_EXC_ADDR] = 0;
3139 env->ipr[IPR_EXC_SUM] = 0;
3140 env->ipr[IPR_EXC_MASK] = 0;
3141 #else
3143 uint64_t hwpcb;
3144 hwpcb = env->ipr[IPR_PCBB];
3145 env->ipr[IPR_ASN] = 0;
3146 env->ipr[IPR_ASTEN] = 0;
3147 env->ipr[IPR_ASTSR] = 0;
3148 env->ipr[IPR_DATFX] = 0;
3149 /* XXX: fix this */
3150 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3151 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3152 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3153 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3154 env->ipr[IPR_FEN] = 0;
3155 env->ipr[IPR_IPL] = 31;
3156 env->ipr[IPR_MCES] = 0;
3157 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3158 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3159 env->ipr[IPR_SISR] = 0;
3160 env->ipr[IPR_VIRBND] = -1ULL;
3162 #endif
3164 qemu_init_vcpu(env);
3165 return env;
3168 void gen_pc_load(CPUState *env, TranslationBlock *tb,
3169 unsigned long searched_pc, int pc_pos, void *puc)
3171 env->pc = gen_opc_pc[pc_pos];