kvm_init_vcpu requires global lock held
[qemu/ar7.git] / target-alpha / translate.c
blob1e9ff58a9a80420ebcc70aefb57e85f935d2993a
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 uint64_t pc;
47 int mem_idx;
48 #if !defined (CONFIG_USER_ONLY)
49 int pal_mode;
50 #endif
51 CPUAlphaState *env;
52 uint32_t amask;
54 /* Current rounding mode for this TB. */
55 int tb_rm;
56 /* Current flush-to-zero setting for this TB. */
57 int tb_ftz;
60 /* global register indexes */
61 static TCGv_ptr cpu_env;
62 static TCGv cpu_ir[31];
63 static TCGv cpu_fir[31];
64 static TCGv cpu_pc;
65 static TCGv cpu_lock;
66 #ifdef CONFIG_USER_ONLY
67 static TCGv cpu_uniq;
68 #endif
70 /* register names */
71 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
73 #include "gen-icount.h"
75 static void alpha_translate_init(void)
77 int i;
78 char *p;
79 static int done_init = 0;
81 if (done_init)
82 return;
84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
86 p = cpu_reg_names;
87 for (i = 0; i < 31; i++) {
88 sprintf(p, "ir%d", i);
89 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
90 offsetof(CPUState, ir[i]), p);
91 p += (i < 10) ? 4 : 5;
93 sprintf(p, "fir%d", i);
94 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
95 offsetof(CPUState, fir[i]), p);
96 p += (i < 10) ? 5 : 6;
99 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
100 offsetof(CPUState, pc), "pc");
102 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
103 offsetof(CPUState, lock), "lock");
105 #ifdef CONFIG_USER_ONLY
106 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
107 offsetof(CPUState, unique), "uniq");
108 #endif
110 /* register helpers */
111 #define GEN_HELPER 2
112 #include "helper.h"
114 done_init = 1;
117 static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
119 TCGv_i32 tmp1, tmp2;
121 tcg_gen_movi_i64(cpu_pc, ctx->pc);
122 tmp1 = tcg_const_i32(exception);
123 tmp2 = tcg_const_i32(error_code);
124 gen_helper_excp(tmp1, tmp2);
125 tcg_temp_free_i32(tmp2);
126 tcg_temp_free_i32(tmp1);
129 static inline void gen_invalid(DisasContext *ctx)
131 gen_excp(ctx, EXCP_OPCDEC, 0);
134 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
136 TCGv tmp = tcg_temp_new();
137 TCGv_i32 tmp32 = tcg_temp_new_i32();
138 tcg_gen_qemu_ld32u(tmp, t1, flags);
139 tcg_gen_trunc_i64_i32(tmp32, tmp);
140 gen_helper_memory_to_f(t0, tmp32);
141 tcg_temp_free_i32(tmp32);
142 tcg_temp_free(tmp);
145 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
147 TCGv tmp = tcg_temp_new();
148 tcg_gen_qemu_ld64(tmp, t1, flags);
149 gen_helper_memory_to_g(t0, tmp);
150 tcg_temp_free(tmp);
153 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
155 TCGv tmp = tcg_temp_new();
156 TCGv_i32 tmp32 = tcg_temp_new_i32();
157 tcg_gen_qemu_ld32u(tmp, t1, flags);
158 tcg_gen_trunc_i64_i32(tmp32, tmp);
159 gen_helper_memory_to_s(t0, tmp32);
160 tcg_temp_free_i32(tmp32);
161 tcg_temp_free(tmp);
164 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
166 tcg_gen_mov_i64(cpu_lock, t1);
167 tcg_gen_qemu_ld32s(t0, t1, flags);
170 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
172 tcg_gen_mov_i64(cpu_lock, t1);
173 tcg_gen_qemu_ld64(t0, t1, flags);
176 static inline void gen_load_mem(DisasContext *ctx,
177 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
178 int flags),
179 int ra, int rb, int32_t disp16, int fp,
180 int clear)
182 TCGv addr;
184 if (unlikely(ra == 31))
185 return;
187 addr = tcg_temp_new();
188 if (rb != 31) {
189 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
190 if (clear)
191 tcg_gen_andi_i64(addr, addr, ~0x7);
192 } else {
193 if (clear)
194 disp16 &= ~0x7;
195 tcg_gen_movi_i64(addr, disp16);
197 if (fp)
198 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
199 else
200 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
201 tcg_temp_free(addr);
204 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
206 TCGv_i32 tmp32 = tcg_temp_new_i32();
207 TCGv tmp = tcg_temp_new();
208 gen_helper_f_to_memory(tmp32, t0);
209 tcg_gen_extu_i32_i64(tmp, tmp32);
210 tcg_gen_qemu_st32(tmp, t1, flags);
211 tcg_temp_free(tmp);
212 tcg_temp_free_i32(tmp32);
215 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
217 TCGv tmp = tcg_temp_new();
218 gen_helper_g_to_memory(tmp, t0);
219 tcg_gen_qemu_st64(tmp, t1, flags);
220 tcg_temp_free(tmp);
223 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
225 TCGv_i32 tmp32 = tcg_temp_new_i32();
226 TCGv tmp = tcg_temp_new();
227 gen_helper_s_to_memory(tmp32, t0);
228 tcg_gen_extu_i32_i64(tmp, tmp32);
229 tcg_gen_qemu_st32(tmp, t1, flags);
230 tcg_temp_free(tmp);
231 tcg_temp_free_i32(tmp32);
234 static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
236 int l1, l2;
238 l1 = gen_new_label();
239 l2 = gen_new_label();
240 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
241 tcg_gen_qemu_st32(t0, t1, flags);
242 tcg_gen_movi_i64(t0, 1);
243 tcg_gen_br(l2);
244 gen_set_label(l1);
245 tcg_gen_movi_i64(t0, 0);
246 gen_set_label(l2);
247 tcg_gen_movi_i64(cpu_lock, -1);
250 static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
252 int l1, l2;
254 l1 = gen_new_label();
255 l2 = gen_new_label();
256 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
257 tcg_gen_qemu_st64(t0, t1, flags);
258 tcg_gen_movi_i64(t0, 1);
259 tcg_gen_br(l2);
260 gen_set_label(l1);
261 tcg_gen_movi_i64(t0, 0);
262 gen_set_label(l2);
263 tcg_gen_movi_i64(cpu_lock, -1);
266 static inline void gen_store_mem(DisasContext *ctx,
267 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
268 int flags),
269 int ra, int rb, int32_t disp16, int fp,
270 int clear, int local)
272 TCGv addr;
273 if (local)
274 addr = tcg_temp_local_new();
275 else
276 addr = tcg_temp_new();
277 if (rb != 31) {
278 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
279 if (clear)
280 tcg_gen_andi_i64(addr, addr, ~0x7);
281 } else {
282 if (clear)
283 disp16 &= ~0x7;
284 tcg_gen_movi_i64(addr, disp16);
286 if (ra != 31) {
287 if (fp)
288 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
289 else
290 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
291 } else {
292 TCGv zero;
293 if (local)
294 zero = tcg_const_local_i64(0);
295 else
296 zero = tcg_const_i64(0);
297 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
298 tcg_temp_free(zero);
300 tcg_temp_free(addr);
303 static void gen_bcond_pcload(DisasContext *ctx, int32_t disp, int lab_true)
305 int lab_over = gen_new_label();
307 tcg_gen_movi_i64(cpu_pc, ctx->pc);
308 tcg_gen_br(lab_over);
309 gen_set_label(lab_true);
310 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
311 gen_set_label(lab_over);
314 static void gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
315 int32_t disp, int mask)
317 int lab_true = gen_new_label();
319 if (likely(ra != 31)) {
320 if (mask) {
321 TCGv tmp = tcg_temp_new();
322 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
323 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
324 tcg_temp_free(tmp);
325 } else {
326 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, lab_true);
328 } else {
329 /* Very uncommon case - Do not bother to optimize. */
330 TCGv tmp = tcg_const_i64(0);
331 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
332 tcg_temp_free(tmp);
334 gen_bcond_pcload(ctx, disp, lab_true);
337 /* Generate a forward TCG branch to LAB_TRUE if RA cmp 0.0.
338 This is complicated by the fact that -0.0 compares the same as +0.0. */
340 static void gen_fbcond_internal(TCGCond cond, TCGv src, int lab_true)
342 int lab_false = -1;
343 uint64_t mzero = 1ull << 63;
344 TCGv tmp;
346 switch (cond) {
347 case TCG_COND_LE:
348 case TCG_COND_GT:
349 /* For <= or >, the -0.0 value directly compares the way we want. */
350 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
351 break;
353 case TCG_COND_EQ:
354 case TCG_COND_NE:
355 /* For == or !=, we can simply mask off the sign bit and compare. */
356 /* ??? Assume that the temporary is reclaimed at the branch. */
357 tmp = tcg_temp_new();
358 tcg_gen_andi_i64(tmp, src, mzero - 1);
359 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
360 break;
362 case TCG_COND_GE:
363 /* For >=, emit two branches to the destination. */
364 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
365 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_true);
366 break;
368 case TCG_COND_LT:
369 /* For <, first filter out -0.0 to what will be the fallthru. */
370 lab_false = gen_new_label();
371 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_false);
372 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
373 gen_set_label(lab_false);
374 break;
376 default:
377 abort();
381 static void gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, int32_t disp)
383 int lab_true;
385 if (unlikely(ra == 31)) {
386 /* Very uncommon case, but easier to optimize it to an integer
387 comparison than continuing with the floating point comparison. */
388 gen_bcond(ctx, cond, ra, disp, 0);
389 return;
392 lab_true = gen_new_label();
393 gen_fbcond_internal(cond, cpu_fir[ra], lab_true);
394 gen_bcond_pcload(ctx, disp, lab_true);
397 static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
398 int islit, uint8_t lit, int mask)
400 TCGCond inv_cond = tcg_invert_cond(cond);
401 int l1;
403 if (unlikely(rc == 31))
404 return;
406 l1 = gen_new_label();
408 if (ra != 31) {
409 if (mask) {
410 TCGv tmp = tcg_temp_new();
411 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
412 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
413 tcg_temp_free(tmp);
414 } else
415 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
416 } else {
417 /* Very uncommon case - Do not bother to optimize. */
418 TCGv tmp = tcg_const_i64(0);
419 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
420 tcg_temp_free(tmp);
423 if (islit)
424 tcg_gen_movi_i64(cpu_ir[rc], lit);
425 else
426 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
427 gen_set_label(l1);
430 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
432 TCGv va = cpu_fir[ra];
433 int l1;
435 if (unlikely(rc == 31))
436 return;
437 if (unlikely(ra == 31)) {
438 /* ??? Assume that the temporary is reclaimed at the branch. */
439 va = tcg_const_i64(0);
442 l1 = gen_new_label();
443 gen_fbcond_internal(tcg_invert_cond(cond), va, l1);
445 if (rb != 31)
446 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
447 else
448 tcg_gen_movi_i64(cpu_fir[rc], 0);
449 gen_set_label(l1);
452 #define QUAL_RM_N 0x080 /* Round mode nearest even */
453 #define QUAL_RM_C 0x000 /* Round mode chopped */
454 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
455 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
456 #define QUAL_RM_MASK 0x0c0
458 #define QUAL_U 0x100 /* Underflow enable (fp output) */
459 #define QUAL_V 0x100 /* Overflow enable (int output) */
460 #define QUAL_S 0x400 /* Software completion enable */
461 #define QUAL_I 0x200 /* Inexact detection enable */
463 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
465 TCGv_i32 tmp;
467 fn11 &= QUAL_RM_MASK;
468 if (fn11 == ctx->tb_rm) {
469 return;
471 ctx->tb_rm = fn11;
473 tmp = tcg_temp_new_i32();
474 switch (fn11) {
475 case QUAL_RM_N:
476 tcg_gen_movi_i32(tmp, float_round_nearest_even);
477 break;
478 case QUAL_RM_C:
479 tcg_gen_movi_i32(tmp, float_round_to_zero);
480 break;
481 case QUAL_RM_M:
482 tcg_gen_movi_i32(tmp, float_round_down);
483 break;
484 case QUAL_RM_D:
485 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
486 break;
489 #if defined(CONFIG_SOFTFLOAT_INLINE)
490 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
491 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
492 sets the one field. */
493 tcg_gen_st8_i32(tmp, cpu_env,
494 offsetof(CPUState, fp_status.float_rounding_mode));
495 #else
496 gen_helper_setroundmode(tmp);
497 #endif
499 tcg_temp_free_i32(tmp);
502 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
504 TCGv_i32 tmp;
506 fn11 &= QUAL_U;
507 if (fn11 == ctx->tb_ftz) {
508 return;
510 ctx->tb_ftz = fn11;
512 tmp = tcg_temp_new_i32();
513 if (fn11) {
514 /* Underflow is enabled, use the FPCR setting. */
515 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
516 } else {
517 /* Underflow is disabled, force flush-to-zero. */
518 tcg_gen_movi_i32(tmp, 1);
521 #if defined(CONFIG_SOFTFLOAT_INLINE)
522 tcg_gen_st8_i32(tmp, cpu_env,
523 offsetof(CPUState, fp_status.flush_to_zero));
524 #else
525 gen_helper_setflushzero(tmp);
526 #endif
528 tcg_temp_free_i32(tmp);
531 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
533 TCGv val = tcg_temp_new();
534 if (reg == 31) {
535 tcg_gen_movi_i64(val, 0);
536 } else if (fn11 & QUAL_S) {
537 gen_helper_ieee_input_s(val, cpu_fir[reg]);
538 } else if (is_cmp) {
539 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
540 } else {
541 gen_helper_ieee_input(val, cpu_fir[reg]);
543 return val;
546 static void gen_fp_exc_clear(void)
548 #if defined(CONFIG_SOFTFLOAT_INLINE)
549 TCGv_i32 zero = tcg_const_i32(0);
550 tcg_gen_st8_i32(zero, cpu_env,
551 offsetof(CPUState, fp_status.float_exception_flags));
552 tcg_temp_free_i32(zero);
553 #else
554 gen_helper_fp_exc_clear();
555 #endif
558 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
560 /* ??? We ought to be able to do something with imprecise exceptions.
561 E.g. notice we're still in the trap shadow of something within the
562 TB and do not generate the code to signal the exception; end the TB
563 when an exception is forced to arrive, either by consumption of a
564 register value or TRAPB or EXCB. */
565 TCGv_i32 exc = tcg_temp_new_i32();
566 TCGv_i32 reg;
568 #if defined(CONFIG_SOFTFLOAT_INLINE)
569 tcg_gen_ld8u_i32(exc, cpu_env,
570 offsetof(CPUState, fp_status.float_exception_flags));
571 #else
572 gen_helper_fp_exc_get(exc);
573 #endif
575 if (ignore) {
576 tcg_gen_andi_i32(exc, exc, ~ignore);
579 /* ??? Pass in the regno of the destination so that the helper can
580 set EXC_MASK, which contains a bitmask of destination registers
581 that have caused arithmetic traps. A simple userspace emulation
582 does not require this. We do need it for a guest kernel's entArith,
583 or if we were to do something clever with imprecise exceptions. */
584 reg = tcg_const_i32(rc + 32);
586 if (fn11 & QUAL_S) {
587 gen_helper_fp_exc_raise_s(exc, reg);
588 } else {
589 gen_helper_fp_exc_raise(exc, reg);
592 tcg_temp_free_i32(reg);
593 tcg_temp_free_i32(exc);
596 static inline void gen_fp_exc_raise(int rc, int fn11)
598 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
601 static void gen_fcvtql(int rb, int rc)
603 if (unlikely(rc == 31)) {
604 return;
606 if (unlikely(rb == 31)) {
607 tcg_gen_movi_i64(cpu_fir[rc], 0);
608 } else {
609 TCGv tmp = tcg_temp_new();
611 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
612 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
613 tcg_gen_shli_i64(tmp, tmp, 32);
614 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
615 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
617 tcg_temp_free(tmp);
621 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
623 if (rb != 31) {
624 int lab = gen_new_label();
625 TCGv tmp = tcg_temp_new();
627 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
628 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
629 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
631 gen_set_label(lab);
633 gen_fcvtql(rb, rc);
636 #define FARITH2(name) \
637 static inline void glue(gen_f, name)(int rb, int rc) \
639 if (unlikely(rc == 31)) { \
640 return; \
642 if (rb != 31) { \
643 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
644 } else { \
645 TCGv tmp = tcg_const_i64(0); \
646 gen_helper_ ## name (cpu_fir[rc], tmp); \
647 tcg_temp_free(tmp); \
650 FARITH2(cvtlq)
652 /* ??? VAX instruction qualifiers ignored. */
653 FARITH2(sqrtf)
654 FARITH2(sqrtg)
655 FARITH2(cvtgf)
656 FARITH2(cvtgq)
657 FARITH2(cvtqf)
658 FARITH2(cvtqg)
660 static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
661 int rb, int rc, int fn11)
663 TCGv vb;
665 /* ??? This is wrong: the instruction is not a nop, it still may
666 raise exceptions. */
667 if (unlikely(rc == 31)) {
668 return;
671 gen_qual_roundmode(ctx, fn11);
672 gen_qual_flushzero(ctx, fn11);
673 gen_fp_exc_clear();
675 vb = gen_ieee_input(rb, fn11, 0);
676 helper(cpu_fir[rc], vb);
677 tcg_temp_free(vb);
679 gen_fp_exc_raise(rc, fn11);
682 #define IEEE_ARITH2(name) \
683 static inline void glue(gen_f, name)(DisasContext *ctx, \
684 int rb, int rc, int fn11) \
686 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
688 IEEE_ARITH2(sqrts)
689 IEEE_ARITH2(sqrtt)
690 IEEE_ARITH2(cvtst)
691 IEEE_ARITH2(cvtts)
693 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
695 TCGv vb;
696 int ignore = 0;
698 /* ??? This is wrong: the instruction is not a nop, it still may
699 raise exceptions. */
700 if (unlikely(rc == 31)) {
701 return;
704 /* No need to set flushzero, since we have an integer output. */
705 gen_fp_exc_clear();
706 vb = gen_ieee_input(rb, fn11, 0);
708 /* Almost all integer conversions use cropped rounding, and most
709 also do not have integer overflow enabled. Special case that. */
710 switch (fn11) {
711 case QUAL_RM_C:
712 gen_helper_cvttq_c(cpu_fir[rc], vb);
713 break;
714 case QUAL_V | QUAL_RM_C:
715 case QUAL_S | QUAL_V | QUAL_RM_C:
716 ignore = float_flag_inexact;
717 /* FALLTHRU */
718 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
719 gen_helper_cvttq_svic(cpu_fir[rc], vb);
720 break;
721 default:
722 gen_qual_roundmode(ctx, fn11);
723 gen_helper_cvttq(cpu_fir[rc], vb);
724 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
725 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
726 break;
728 tcg_temp_free(vb);
730 gen_fp_exc_raise_ignore(rc, fn11, ignore);
733 static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
734 int rb, int rc, int fn11)
736 TCGv vb;
738 /* ??? This is wrong: the instruction is not a nop, it still may
739 raise exceptions. */
740 if (unlikely(rc == 31)) {
741 return;
744 gen_qual_roundmode(ctx, fn11);
746 if (rb == 31) {
747 vb = tcg_const_i64(0);
748 } else {
749 vb = cpu_fir[rb];
752 /* The only exception that can be raised by integer conversion
753 is inexact. Thus we only need to worry about exceptions when
754 inexact handling is requested. */
755 if (fn11 & QUAL_I) {
756 gen_fp_exc_clear();
757 helper(cpu_fir[rc], vb);
758 gen_fp_exc_raise(rc, fn11);
759 } else {
760 helper(cpu_fir[rc], vb);
763 if (rb == 31) {
764 tcg_temp_free(vb);
768 #define IEEE_INTCVT(name) \
769 static inline void glue(gen_f, name)(DisasContext *ctx, \
770 int rb, int rc, int fn11) \
772 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
774 IEEE_INTCVT(cvtqs)
775 IEEE_INTCVT(cvtqt)
777 #define FARITH3(name) \
778 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
780 TCGv va, vb; \
782 if (unlikely(rc == 31)) { \
783 return; \
785 if (ra == 31) { \
786 va = tcg_const_i64(0); \
787 } else { \
788 va = cpu_fir[ra]; \
790 if (rb == 31) { \
791 vb = tcg_const_i64(0); \
792 } else { \
793 vb = cpu_fir[rb]; \
796 gen_helper_ ## name (cpu_fir[rc], va, vb); \
798 if (ra == 31) { \
799 tcg_temp_free(va); \
801 if (rb == 31) { \
802 tcg_temp_free(vb); \
805 /* ??? Ought to expand these inline; simple masking operations. */
806 FARITH3(cpys)
807 FARITH3(cpysn)
808 FARITH3(cpyse)
810 /* ??? VAX instruction qualifiers ignored. */
811 FARITH3(addf)
812 FARITH3(subf)
813 FARITH3(mulf)
814 FARITH3(divf)
815 FARITH3(addg)
816 FARITH3(subg)
817 FARITH3(mulg)
818 FARITH3(divg)
819 FARITH3(cmpgeq)
820 FARITH3(cmpglt)
821 FARITH3(cmpgle)
823 static void gen_ieee_arith3(DisasContext *ctx,
824 void (*helper)(TCGv, TCGv, TCGv),
825 int ra, int rb, int rc, int fn11)
827 TCGv va, vb;
829 /* ??? This is wrong: the instruction is not a nop, it still may
830 raise exceptions. */
831 if (unlikely(rc == 31)) {
832 return;
835 gen_qual_roundmode(ctx, fn11);
836 gen_qual_flushzero(ctx, fn11);
837 gen_fp_exc_clear();
839 va = gen_ieee_input(ra, fn11, 0);
840 vb = gen_ieee_input(rb, fn11, 0);
841 helper(cpu_fir[rc], va, vb);
842 tcg_temp_free(va);
843 tcg_temp_free(vb);
845 gen_fp_exc_raise(rc, fn11);
848 #define IEEE_ARITH3(name) \
849 static inline void glue(gen_f, name)(DisasContext *ctx, \
850 int ra, int rb, int rc, int fn11) \
852 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
854 IEEE_ARITH3(adds)
855 IEEE_ARITH3(subs)
856 IEEE_ARITH3(muls)
857 IEEE_ARITH3(divs)
858 IEEE_ARITH3(addt)
859 IEEE_ARITH3(subt)
860 IEEE_ARITH3(mult)
861 IEEE_ARITH3(divt)
863 static void gen_ieee_compare(DisasContext *ctx,
864 void (*helper)(TCGv, TCGv, TCGv),
865 int ra, int rb, int rc, int fn11)
867 TCGv va, vb;
869 /* ??? This is wrong: the instruction is not a nop, it still may
870 raise exceptions. */
871 if (unlikely(rc == 31)) {
872 return;
875 gen_fp_exc_clear();
877 va = gen_ieee_input(ra, fn11, 1);
878 vb = gen_ieee_input(rb, fn11, 1);
879 helper(cpu_fir[rc], va, vb);
880 tcg_temp_free(va);
881 tcg_temp_free(vb);
883 gen_fp_exc_raise(rc, fn11);
886 #define IEEE_CMP3(name) \
887 static inline void glue(gen_f, name)(DisasContext *ctx, \
888 int ra, int rb, int rc, int fn11) \
890 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
892 IEEE_CMP3(cmptun)
893 IEEE_CMP3(cmpteq)
894 IEEE_CMP3(cmptlt)
895 IEEE_CMP3(cmptle)
897 static inline uint64_t zapnot_mask(uint8_t lit)
899 uint64_t mask = 0;
900 int i;
902 for (i = 0; i < 8; ++i) {
903 if ((lit >> i) & 1)
904 mask |= 0xffull << (i * 8);
906 return mask;
909 /* Implement zapnot with an immediate operand, which expands to some
910 form of immediate AND. This is a basic building block in the
911 definition of many of the other byte manipulation instructions. */
912 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
914 switch (lit) {
915 case 0x00:
916 tcg_gen_movi_i64(dest, 0);
917 break;
918 case 0x01:
919 tcg_gen_ext8u_i64(dest, src);
920 break;
921 case 0x03:
922 tcg_gen_ext16u_i64(dest, src);
923 break;
924 case 0x0f:
925 tcg_gen_ext32u_i64(dest, src);
926 break;
927 case 0xff:
928 tcg_gen_mov_i64(dest, src);
929 break;
930 default:
931 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
932 break;
936 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
938 if (unlikely(rc == 31))
939 return;
940 else if (unlikely(ra == 31))
941 tcg_gen_movi_i64(cpu_ir[rc], 0);
942 else if (islit)
943 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
944 else
945 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
948 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
950 if (unlikely(rc == 31))
951 return;
952 else if (unlikely(ra == 31))
953 tcg_gen_movi_i64(cpu_ir[rc], 0);
954 else if (islit)
955 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
956 else
957 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
961 /* EXTWH, EXTLH, EXTQH */
962 static void gen_ext_h(int ra, int rb, int rc, int islit,
963 uint8_t lit, uint8_t byte_mask)
965 if (unlikely(rc == 31))
966 return;
967 else if (unlikely(ra == 31))
968 tcg_gen_movi_i64(cpu_ir[rc], 0);
969 else {
970 if (islit) {
971 lit = (64 - (lit & 7) * 8) & 0x3f;
972 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
973 } else {
974 TCGv tmp1 = tcg_temp_new();
975 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
976 tcg_gen_shli_i64(tmp1, tmp1, 3);
977 tcg_gen_neg_i64(tmp1, tmp1);
978 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
979 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
980 tcg_temp_free(tmp1);
982 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
986 /* EXTBL, EXTWL, EXTLL, EXTQL */
987 static void gen_ext_l(int ra, int rb, int rc, int islit,
988 uint8_t lit, uint8_t byte_mask)
990 if (unlikely(rc == 31))
991 return;
992 else if (unlikely(ra == 31))
993 tcg_gen_movi_i64(cpu_ir[rc], 0);
994 else {
995 if (islit) {
996 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
997 } else {
998 TCGv tmp = tcg_temp_new();
999 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1000 tcg_gen_shli_i64(tmp, tmp, 3);
1001 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1002 tcg_temp_free(tmp);
1004 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1008 /* INSWH, INSLH, INSQH */
1009 static void gen_ins_h(int ra, int rb, int rc, int islit,
1010 uint8_t lit, uint8_t byte_mask)
1012 if (unlikely(rc == 31))
1013 return;
1014 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1015 tcg_gen_movi_i64(cpu_ir[rc], 0);
1016 else {
1017 TCGv tmp = tcg_temp_new();
1019 /* The instruction description has us left-shift the byte mask
1020 and extract bits <15:8> and apply that zap at the end. This
1021 is equivalent to simply performing the zap first and shifting
1022 afterward. */
1023 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1025 if (islit) {
1026 /* Note that we have handled the lit==0 case above. */
1027 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1028 } else {
1029 TCGv shift = tcg_temp_new();
1031 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1032 Do this portably by splitting the shift into two parts:
1033 shift_count-1 and 1. Arrange for the -1 by using
1034 ones-complement instead of twos-complement in the negation:
1035 ~((B & 7) * 8) & 63. */
1037 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1038 tcg_gen_shli_i64(shift, shift, 3);
1039 tcg_gen_not_i64(shift, shift);
1040 tcg_gen_andi_i64(shift, shift, 0x3f);
1042 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1043 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1044 tcg_temp_free(shift);
1046 tcg_temp_free(tmp);
1050 /* INSBL, INSWL, INSLL, INSQL */
1051 static void gen_ins_l(int ra, int rb, int rc, int islit,
1052 uint8_t lit, uint8_t byte_mask)
1054 if (unlikely(rc == 31))
1055 return;
1056 else if (unlikely(ra == 31))
1057 tcg_gen_movi_i64(cpu_ir[rc], 0);
1058 else {
1059 TCGv tmp = tcg_temp_new();
1061 /* The instruction description has us left-shift the byte mask
1062 the same number of byte slots as the data and apply the zap
1063 at the end. This is equivalent to simply performing the zap
1064 first and shifting afterward. */
1065 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1067 if (islit) {
1068 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1069 } else {
1070 TCGv shift = tcg_temp_new();
1071 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1072 tcg_gen_shli_i64(shift, shift, 3);
1073 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1074 tcg_temp_free(shift);
1076 tcg_temp_free(tmp);
1080 /* MSKWH, MSKLH, MSKQH */
1081 static void gen_msk_h(int ra, int rb, int rc, int islit,
1082 uint8_t lit, uint8_t byte_mask)
1084 if (unlikely(rc == 31))
1085 return;
1086 else if (unlikely(ra == 31))
1087 tcg_gen_movi_i64(cpu_ir[rc], 0);
1088 else if (islit) {
1089 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1090 } else {
1091 TCGv shift = tcg_temp_new();
1092 TCGv mask = tcg_temp_new();
1094 /* The instruction description is as above, where the byte_mask
1095 is shifted left, and then we extract bits <15:8>. This can be
1096 emulated with a right-shift on the expanded byte mask. This
1097 requires extra care because for an input <2:0> == 0 we need a
1098 shift of 64 bits in order to generate a zero. This is done by
1099 splitting the shift into two parts, the variable shift - 1
1100 followed by a constant 1 shift. The code we expand below is
1101 equivalent to ~((B & 7) * 8) & 63. */
1103 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1104 tcg_gen_shli_i64(shift, shift, 3);
1105 tcg_gen_not_i64(shift, shift);
1106 tcg_gen_andi_i64(shift, shift, 0x3f);
1107 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1108 tcg_gen_shr_i64(mask, mask, shift);
1109 tcg_gen_shri_i64(mask, mask, 1);
1111 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1113 tcg_temp_free(mask);
1114 tcg_temp_free(shift);
1118 /* MSKBL, MSKWL, MSKLL, MSKQL */
1119 static void gen_msk_l(int ra, int rb, int rc, int islit,
1120 uint8_t lit, uint8_t byte_mask)
1122 if (unlikely(rc == 31))
1123 return;
1124 else if (unlikely(ra == 31))
1125 tcg_gen_movi_i64(cpu_ir[rc], 0);
1126 else if (islit) {
1127 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1128 } else {
1129 TCGv shift = tcg_temp_new();
1130 TCGv mask = tcg_temp_new();
1132 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1133 tcg_gen_shli_i64(shift, shift, 3);
1134 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1135 tcg_gen_shl_i64(mask, mask, shift);
1137 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1139 tcg_temp_free(mask);
1140 tcg_temp_free(shift);
1144 /* Code to call arith3 helpers */
1145 #define ARITH3(name) \
1146 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1147 uint8_t lit) \
1149 if (unlikely(rc == 31)) \
1150 return; \
1152 if (ra != 31) { \
1153 if (islit) { \
1154 TCGv tmp = tcg_const_i64(lit); \
1155 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1156 tcg_temp_free(tmp); \
1157 } else \
1158 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1159 } else { \
1160 TCGv tmp1 = tcg_const_i64(0); \
1161 if (islit) { \
1162 TCGv tmp2 = tcg_const_i64(lit); \
1163 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1164 tcg_temp_free(tmp2); \
1165 } else \
1166 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1167 tcg_temp_free(tmp1); \
1170 ARITH3(cmpbge)
1171 ARITH3(addlv)
1172 ARITH3(sublv)
1173 ARITH3(addqv)
1174 ARITH3(subqv)
1175 ARITH3(umulh)
1176 ARITH3(mullv)
1177 ARITH3(mulqv)
1178 ARITH3(minub8)
1179 ARITH3(minsb8)
1180 ARITH3(minuw4)
1181 ARITH3(minsw4)
1182 ARITH3(maxub8)
1183 ARITH3(maxsb8)
1184 ARITH3(maxuw4)
1185 ARITH3(maxsw4)
1186 ARITH3(perr)
1188 #define MVIOP2(name) \
1189 static inline void glue(gen_, name)(int rb, int rc) \
1191 if (unlikely(rc == 31)) \
1192 return; \
1193 if (unlikely(rb == 31)) \
1194 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1195 else \
1196 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1198 MVIOP2(pklb)
1199 MVIOP2(pkwb)
1200 MVIOP2(unpkbl)
1201 MVIOP2(unpkbw)
1203 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1204 int islit, uint8_t lit)
1206 TCGv va, vb;
1208 if (unlikely(rc == 31)) {
1209 return;
1212 if (ra == 31) {
1213 va = tcg_const_i64(0);
1214 } else {
1215 va = cpu_ir[ra];
1217 if (islit) {
1218 vb = tcg_const_i64(lit);
1219 } else {
1220 vb = cpu_ir[rb];
1223 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1225 if (ra == 31) {
1226 tcg_temp_free(va);
1228 if (islit) {
1229 tcg_temp_free(vb);
1233 static inline int translate_one(DisasContext *ctx, uint32_t insn)
1235 uint32_t palcode;
1236 int32_t disp21, disp16, disp12;
1237 uint16_t fn11;
1238 uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
1239 uint8_t lit;
1240 int ret;
1242 /* Decode all instruction fields */
1243 opc = insn >> 26;
1244 ra = (insn >> 21) & 0x1F;
1245 rb = (insn >> 16) & 0x1F;
1246 rc = insn & 0x1F;
1247 real_islit = islit = (insn >> 12) & 1;
1248 if (rb == 31 && !islit) {
1249 islit = 1;
1250 lit = 0;
1251 } else
1252 lit = (insn >> 13) & 0xFF;
1253 palcode = insn & 0x03FFFFFF;
1254 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1255 disp16 = (int16_t)(insn & 0x0000FFFF);
1256 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1257 fn11 = (insn >> 5) & 0x000007FF;
1258 fpfn = fn11 & 0x3F;
1259 fn7 = (insn >> 5) & 0x0000007F;
1260 fn2 = (insn >> 5) & 0x00000003;
1261 ret = 0;
1262 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1263 opc, ra, rb, rc, disp16);
1265 switch (opc) {
1266 case 0x00:
1267 /* CALL_PAL */
1268 #ifdef CONFIG_USER_ONLY
1269 if (palcode == 0x9E) {
1270 /* RDUNIQUE */
1271 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1272 break;
1273 } else if (palcode == 0x9F) {
1274 /* WRUNIQUE */
1275 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1276 break;
1278 #endif
1279 if (palcode >= 0x80 && palcode < 0xC0) {
1280 /* Unprivileged PAL call */
1281 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
1282 ret = 3;
1283 break;
1285 #ifndef CONFIG_USER_ONLY
1286 if (palcode < 0x40) {
1287 /* Privileged PAL code */
1288 if (ctx->mem_idx & 1)
1289 goto invalid_opc;
1290 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
1292 #endif
1293 /* Invalid PAL call */
1294 goto invalid_opc;
1295 case 0x01:
1296 /* OPC01 */
1297 goto invalid_opc;
1298 case 0x02:
1299 /* OPC02 */
1300 goto invalid_opc;
1301 case 0x03:
1302 /* OPC03 */
1303 goto invalid_opc;
1304 case 0x04:
1305 /* OPC04 */
1306 goto invalid_opc;
1307 case 0x05:
1308 /* OPC05 */
1309 goto invalid_opc;
1310 case 0x06:
1311 /* OPC06 */
1312 goto invalid_opc;
1313 case 0x07:
1314 /* OPC07 */
1315 goto invalid_opc;
1316 case 0x08:
1317 /* LDA */
1318 if (likely(ra != 31)) {
1319 if (rb != 31)
1320 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1321 else
1322 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1324 break;
1325 case 0x09:
1326 /* LDAH */
1327 if (likely(ra != 31)) {
1328 if (rb != 31)
1329 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1330 else
1331 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1333 break;
1334 case 0x0A:
1335 /* LDBU */
1336 if (!(ctx->amask & AMASK_BWX))
1337 goto invalid_opc;
1338 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1339 break;
1340 case 0x0B:
1341 /* LDQ_U */
1342 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1343 break;
1344 case 0x0C:
1345 /* LDWU */
1346 if (!(ctx->amask & AMASK_BWX))
1347 goto invalid_opc;
1348 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1349 break;
1350 case 0x0D:
1351 /* STW */
1352 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
1353 break;
1354 case 0x0E:
1355 /* STB */
1356 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
1357 break;
1358 case 0x0F:
1359 /* STQ_U */
1360 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
1361 break;
1362 case 0x10:
1363 switch (fn7) {
1364 case 0x00:
1365 /* ADDL */
1366 if (likely(rc != 31)) {
1367 if (ra != 31) {
1368 if (islit) {
1369 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1370 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1371 } else {
1372 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1373 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1375 } else {
1376 if (islit)
1377 tcg_gen_movi_i64(cpu_ir[rc], lit);
1378 else
1379 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1382 break;
1383 case 0x02:
1384 /* S4ADDL */
1385 if (likely(rc != 31)) {
1386 if (ra != 31) {
1387 TCGv tmp = tcg_temp_new();
1388 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1389 if (islit)
1390 tcg_gen_addi_i64(tmp, tmp, lit);
1391 else
1392 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1393 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1394 tcg_temp_free(tmp);
1395 } else {
1396 if (islit)
1397 tcg_gen_movi_i64(cpu_ir[rc], lit);
1398 else
1399 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1402 break;
1403 case 0x09:
1404 /* SUBL */
1405 if (likely(rc != 31)) {
1406 if (ra != 31) {
1407 if (islit)
1408 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1409 else
1410 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1411 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1412 } else {
1413 if (islit)
1414 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1415 else {
1416 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1417 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1420 break;
1421 case 0x0B:
1422 /* S4SUBL */
1423 if (likely(rc != 31)) {
1424 if (ra != 31) {
1425 TCGv tmp = tcg_temp_new();
1426 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1427 if (islit)
1428 tcg_gen_subi_i64(tmp, tmp, lit);
1429 else
1430 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1431 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1432 tcg_temp_free(tmp);
1433 } else {
1434 if (islit)
1435 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1436 else {
1437 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1438 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1442 break;
1443 case 0x0F:
1444 /* CMPBGE */
1445 gen_cmpbge(ra, rb, rc, islit, lit);
1446 break;
1447 case 0x12:
1448 /* S8ADDL */
1449 if (likely(rc != 31)) {
1450 if (ra != 31) {
1451 TCGv tmp = tcg_temp_new();
1452 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1453 if (islit)
1454 tcg_gen_addi_i64(tmp, tmp, lit);
1455 else
1456 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1457 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1458 tcg_temp_free(tmp);
1459 } else {
1460 if (islit)
1461 tcg_gen_movi_i64(cpu_ir[rc], lit);
1462 else
1463 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1466 break;
1467 case 0x1B:
1468 /* S8SUBL */
1469 if (likely(rc != 31)) {
1470 if (ra != 31) {
1471 TCGv tmp = tcg_temp_new();
1472 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1473 if (islit)
1474 tcg_gen_subi_i64(tmp, tmp, lit);
1475 else
1476 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1477 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1478 tcg_temp_free(tmp);
1479 } else {
1480 if (islit)
1481 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1482 else
1483 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1484 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1488 break;
1489 case 0x1D:
1490 /* CMPULT */
1491 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1492 break;
1493 case 0x20:
1494 /* ADDQ */
1495 if (likely(rc != 31)) {
1496 if (ra != 31) {
1497 if (islit)
1498 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1499 else
1500 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1501 } else {
1502 if (islit)
1503 tcg_gen_movi_i64(cpu_ir[rc], lit);
1504 else
1505 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1508 break;
1509 case 0x22:
1510 /* S4ADDQ */
1511 if (likely(rc != 31)) {
1512 if (ra != 31) {
1513 TCGv tmp = tcg_temp_new();
1514 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1515 if (islit)
1516 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1517 else
1518 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1519 tcg_temp_free(tmp);
1520 } else {
1521 if (islit)
1522 tcg_gen_movi_i64(cpu_ir[rc], lit);
1523 else
1524 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1527 break;
1528 case 0x29:
1529 /* SUBQ */
1530 if (likely(rc != 31)) {
1531 if (ra != 31) {
1532 if (islit)
1533 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1534 else
1535 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1536 } else {
1537 if (islit)
1538 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1539 else
1540 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1543 break;
1544 case 0x2B:
1545 /* S4SUBQ */
1546 if (likely(rc != 31)) {
1547 if (ra != 31) {
1548 TCGv tmp = tcg_temp_new();
1549 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1550 if (islit)
1551 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1552 else
1553 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1554 tcg_temp_free(tmp);
1555 } else {
1556 if (islit)
1557 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1558 else
1559 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1562 break;
1563 case 0x2D:
1564 /* CMPEQ */
1565 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1566 break;
1567 case 0x32:
1568 /* S8ADDQ */
1569 if (likely(rc != 31)) {
1570 if (ra != 31) {
1571 TCGv tmp = tcg_temp_new();
1572 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1573 if (islit)
1574 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1575 else
1576 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1577 tcg_temp_free(tmp);
1578 } else {
1579 if (islit)
1580 tcg_gen_movi_i64(cpu_ir[rc], lit);
1581 else
1582 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1585 break;
1586 case 0x3B:
1587 /* S8SUBQ */
1588 if (likely(rc != 31)) {
1589 if (ra != 31) {
1590 TCGv tmp = tcg_temp_new();
1591 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1592 if (islit)
1593 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1594 else
1595 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1596 tcg_temp_free(tmp);
1597 } else {
1598 if (islit)
1599 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1600 else
1601 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1604 break;
1605 case 0x3D:
1606 /* CMPULE */
1607 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1608 break;
1609 case 0x40:
1610 /* ADDL/V */
1611 gen_addlv(ra, rb, rc, islit, lit);
1612 break;
1613 case 0x49:
1614 /* SUBL/V */
1615 gen_sublv(ra, rb, rc, islit, lit);
1616 break;
1617 case 0x4D:
1618 /* CMPLT */
1619 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1620 break;
1621 case 0x60:
1622 /* ADDQ/V */
1623 gen_addqv(ra, rb, rc, islit, lit);
1624 break;
1625 case 0x69:
1626 /* SUBQ/V */
1627 gen_subqv(ra, rb, rc, islit, lit);
1628 break;
1629 case 0x6D:
1630 /* CMPLE */
1631 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1632 break;
1633 default:
1634 goto invalid_opc;
1636 break;
1637 case 0x11:
1638 switch (fn7) {
1639 case 0x00:
1640 /* AND */
1641 if (likely(rc != 31)) {
1642 if (ra == 31)
1643 tcg_gen_movi_i64(cpu_ir[rc], 0);
1644 else if (islit)
1645 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1646 else
1647 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1649 break;
1650 case 0x08:
1651 /* BIC */
1652 if (likely(rc != 31)) {
1653 if (ra != 31) {
1654 if (islit)
1655 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1656 else
1657 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1658 } else
1659 tcg_gen_movi_i64(cpu_ir[rc], 0);
1661 break;
1662 case 0x14:
1663 /* CMOVLBS */
1664 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1665 break;
1666 case 0x16:
1667 /* CMOVLBC */
1668 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1669 break;
1670 case 0x20:
1671 /* BIS */
1672 if (likely(rc != 31)) {
1673 if (ra != 31) {
1674 if (islit)
1675 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1676 else
1677 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1678 } else {
1679 if (islit)
1680 tcg_gen_movi_i64(cpu_ir[rc], lit);
1681 else
1682 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1685 break;
1686 case 0x24:
1687 /* CMOVEQ */
1688 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1689 break;
1690 case 0x26:
1691 /* CMOVNE */
1692 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1693 break;
1694 case 0x28:
1695 /* ORNOT */
1696 if (likely(rc != 31)) {
1697 if (ra != 31) {
1698 if (islit)
1699 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1700 else
1701 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1702 } else {
1703 if (islit)
1704 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1705 else
1706 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1709 break;
1710 case 0x40:
1711 /* XOR */
1712 if (likely(rc != 31)) {
1713 if (ra != 31) {
1714 if (islit)
1715 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1716 else
1717 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1718 } else {
1719 if (islit)
1720 tcg_gen_movi_i64(cpu_ir[rc], lit);
1721 else
1722 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1725 break;
1726 case 0x44:
1727 /* CMOVLT */
1728 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1729 break;
1730 case 0x46:
1731 /* CMOVGE */
1732 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1733 break;
1734 case 0x48:
1735 /* EQV */
1736 if (likely(rc != 31)) {
1737 if (ra != 31) {
1738 if (islit)
1739 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1740 else
1741 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1742 } else {
1743 if (islit)
1744 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1745 else
1746 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1749 break;
1750 case 0x61:
1751 /* AMASK */
1752 if (likely(rc != 31)) {
1753 if (islit)
1754 tcg_gen_movi_i64(cpu_ir[rc], lit);
1755 else
1756 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1757 switch (ctx->env->implver) {
1758 case IMPLVER_2106x:
1759 /* EV4, EV45, LCA, LCA45 & EV5 */
1760 break;
1761 case IMPLVER_21164:
1762 case IMPLVER_21264:
1763 case IMPLVER_21364:
1764 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1765 ~(uint64_t)ctx->amask);
1766 break;
1769 break;
1770 case 0x64:
1771 /* CMOVLE */
1772 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1773 break;
1774 case 0x66:
1775 /* CMOVGT */
1776 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1777 break;
1778 case 0x6C:
1779 /* IMPLVER */
1780 if (rc != 31)
1781 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
1782 break;
1783 default:
1784 goto invalid_opc;
1786 break;
1787 case 0x12:
1788 switch (fn7) {
1789 case 0x02:
1790 /* MSKBL */
1791 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
1792 break;
1793 case 0x06:
1794 /* EXTBL */
1795 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
1796 break;
1797 case 0x0B:
1798 /* INSBL */
1799 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
1800 break;
1801 case 0x12:
1802 /* MSKWL */
1803 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
1804 break;
1805 case 0x16:
1806 /* EXTWL */
1807 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
1808 break;
1809 case 0x1B:
1810 /* INSWL */
1811 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
1812 break;
1813 case 0x22:
1814 /* MSKLL */
1815 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
1816 break;
1817 case 0x26:
1818 /* EXTLL */
1819 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
1820 break;
1821 case 0x2B:
1822 /* INSLL */
1823 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
1824 break;
1825 case 0x30:
1826 /* ZAP */
1827 gen_zap(ra, rb, rc, islit, lit);
1828 break;
1829 case 0x31:
1830 /* ZAPNOT */
1831 gen_zapnot(ra, rb, rc, islit, lit);
1832 break;
1833 case 0x32:
1834 /* MSKQL */
1835 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
1836 break;
1837 case 0x34:
1838 /* SRL */
1839 if (likely(rc != 31)) {
1840 if (ra != 31) {
1841 if (islit)
1842 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1843 else {
1844 TCGv shift = tcg_temp_new();
1845 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1846 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1847 tcg_temp_free(shift);
1849 } else
1850 tcg_gen_movi_i64(cpu_ir[rc], 0);
1852 break;
1853 case 0x36:
1854 /* EXTQL */
1855 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
1856 break;
1857 case 0x39:
1858 /* SLL */
1859 if (likely(rc != 31)) {
1860 if (ra != 31) {
1861 if (islit)
1862 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1863 else {
1864 TCGv shift = tcg_temp_new();
1865 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1866 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1867 tcg_temp_free(shift);
1869 } else
1870 tcg_gen_movi_i64(cpu_ir[rc], 0);
1872 break;
1873 case 0x3B:
1874 /* INSQL */
1875 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
1876 break;
1877 case 0x3C:
1878 /* SRA */
1879 if (likely(rc != 31)) {
1880 if (ra != 31) {
1881 if (islit)
1882 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1883 else {
1884 TCGv shift = tcg_temp_new();
1885 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1886 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1887 tcg_temp_free(shift);
1889 } else
1890 tcg_gen_movi_i64(cpu_ir[rc], 0);
1892 break;
1893 case 0x52:
1894 /* MSKWH */
1895 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
1896 break;
1897 case 0x57:
1898 /* INSWH */
1899 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
1900 break;
1901 case 0x5A:
1902 /* EXTWH */
1903 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
1904 break;
1905 case 0x62:
1906 /* MSKLH */
1907 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
1908 break;
1909 case 0x67:
1910 /* INSLH */
1911 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
1912 break;
1913 case 0x6A:
1914 /* EXTLH */
1915 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
1916 break;
1917 case 0x72:
1918 /* MSKQH */
1919 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
1920 break;
1921 case 0x77:
1922 /* INSQH */
1923 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
1924 break;
1925 case 0x7A:
1926 /* EXTQH */
1927 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
1928 break;
1929 default:
1930 goto invalid_opc;
1932 break;
1933 case 0x13:
1934 switch (fn7) {
1935 case 0x00:
1936 /* MULL */
1937 if (likely(rc != 31)) {
1938 if (ra == 31)
1939 tcg_gen_movi_i64(cpu_ir[rc], 0);
1940 else {
1941 if (islit)
1942 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1943 else
1944 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1945 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1948 break;
1949 case 0x20:
1950 /* MULQ */
1951 if (likely(rc != 31)) {
1952 if (ra == 31)
1953 tcg_gen_movi_i64(cpu_ir[rc], 0);
1954 else if (islit)
1955 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1956 else
1957 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1959 break;
1960 case 0x30:
1961 /* UMULH */
1962 gen_umulh(ra, rb, rc, islit, lit);
1963 break;
1964 case 0x40:
1965 /* MULL/V */
1966 gen_mullv(ra, rb, rc, islit, lit);
1967 break;
1968 case 0x60:
1969 /* MULQ/V */
1970 gen_mulqv(ra, rb, rc, islit, lit);
1971 break;
1972 default:
1973 goto invalid_opc;
1975 break;
1976 case 0x14:
1977 switch (fpfn) { /* fn11 & 0x3F */
1978 case 0x04:
1979 /* ITOFS */
1980 if (!(ctx->amask & AMASK_FIX))
1981 goto invalid_opc;
1982 if (likely(rc != 31)) {
1983 if (ra != 31) {
1984 TCGv_i32 tmp = tcg_temp_new_i32();
1985 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1986 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1987 tcg_temp_free_i32(tmp);
1988 } else
1989 tcg_gen_movi_i64(cpu_fir[rc], 0);
1991 break;
1992 case 0x0A:
1993 /* SQRTF */
1994 if (!(ctx->amask & AMASK_FIX))
1995 goto invalid_opc;
1996 gen_fsqrtf(rb, rc);
1997 break;
1998 case 0x0B:
1999 /* SQRTS */
2000 if (!(ctx->amask & AMASK_FIX))
2001 goto invalid_opc;
2002 gen_fsqrts(ctx, rb, rc, fn11);
2003 break;
2004 case 0x14:
2005 /* ITOFF */
2006 if (!(ctx->amask & AMASK_FIX))
2007 goto invalid_opc;
2008 if (likely(rc != 31)) {
2009 if (ra != 31) {
2010 TCGv_i32 tmp = tcg_temp_new_i32();
2011 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2012 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2013 tcg_temp_free_i32(tmp);
2014 } else
2015 tcg_gen_movi_i64(cpu_fir[rc], 0);
2017 break;
2018 case 0x24:
2019 /* ITOFT */
2020 if (!(ctx->amask & AMASK_FIX))
2021 goto invalid_opc;
2022 if (likely(rc != 31)) {
2023 if (ra != 31)
2024 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2025 else
2026 tcg_gen_movi_i64(cpu_fir[rc], 0);
2028 break;
2029 case 0x2A:
2030 /* SQRTG */
2031 if (!(ctx->amask & AMASK_FIX))
2032 goto invalid_opc;
2033 gen_fsqrtg(rb, rc);
2034 break;
2035 case 0x02B:
2036 /* SQRTT */
2037 if (!(ctx->amask & AMASK_FIX))
2038 goto invalid_opc;
2039 gen_fsqrtt(ctx, rb, rc, fn11);
2040 break;
2041 default:
2042 goto invalid_opc;
2044 break;
2045 case 0x15:
2046 /* VAX floating point */
2047 /* XXX: rounding mode and trap are ignored (!) */
2048 switch (fpfn) { /* fn11 & 0x3F */
2049 case 0x00:
2050 /* ADDF */
2051 gen_faddf(ra, rb, rc);
2052 break;
2053 case 0x01:
2054 /* SUBF */
2055 gen_fsubf(ra, rb, rc);
2056 break;
2057 case 0x02:
2058 /* MULF */
2059 gen_fmulf(ra, rb, rc);
2060 break;
2061 case 0x03:
2062 /* DIVF */
2063 gen_fdivf(ra, rb, rc);
2064 break;
2065 case 0x1E:
2066 /* CVTDG */
2067 #if 0 // TODO
2068 gen_fcvtdg(rb, rc);
2069 #else
2070 goto invalid_opc;
2071 #endif
2072 break;
2073 case 0x20:
2074 /* ADDG */
2075 gen_faddg(ra, rb, rc);
2076 break;
2077 case 0x21:
2078 /* SUBG */
2079 gen_fsubg(ra, rb, rc);
2080 break;
2081 case 0x22:
2082 /* MULG */
2083 gen_fmulg(ra, rb, rc);
2084 break;
2085 case 0x23:
2086 /* DIVG */
2087 gen_fdivg(ra, rb, rc);
2088 break;
2089 case 0x25:
2090 /* CMPGEQ */
2091 gen_fcmpgeq(ra, rb, rc);
2092 break;
2093 case 0x26:
2094 /* CMPGLT */
2095 gen_fcmpglt(ra, rb, rc);
2096 break;
2097 case 0x27:
2098 /* CMPGLE */
2099 gen_fcmpgle(ra, rb, rc);
2100 break;
2101 case 0x2C:
2102 /* CVTGF */
2103 gen_fcvtgf(rb, rc);
2104 break;
2105 case 0x2D:
2106 /* CVTGD */
2107 #if 0 // TODO
2108 gen_fcvtgd(rb, rc);
2109 #else
2110 goto invalid_opc;
2111 #endif
2112 break;
2113 case 0x2F:
2114 /* CVTGQ */
2115 gen_fcvtgq(rb, rc);
2116 break;
2117 case 0x3C:
2118 /* CVTQF */
2119 gen_fcvtqf(rb, rc);
2120 break;
2121 case 0x3E:
2122 /* CVTQG */
2123 gen_fcvtqg(rb, rc);
2124 break;
2125 default:
2126 goto invalid_opc;
2128 break;
2129 case 0x16:
2130 /* IEEE floating-point */
2131 switch (fpfn) { /* fn11 & 0x3F */
2132 case 0x00:
2133 /* ADDS */
2134 gen_fadds(ctx, ra, rb, rc, fn11);
2135 break;
2136 case 0x01:
2137 /* SUBS */
2138 gen_fsubs(ctx, ra, rb, rc, fn11);
2139 break;
2140 case 0x02:
2141 /* MULS */
2142 gen_fmuls(ctx, ra, rb, rc, fn11);
2143 break;
2144 case 0x03:
2145 /* DIVS */
2146 gen_fdivs(ctx, ra, rb, rc, fn11);
2147 break;
2148 case 0x20:
2149 /* ADDT */
2150 gen_faddt(ctx, ra, rb, rc, fn11);
2151 break;
2152 case 0x21:
2153 /* SUBT */
2154 gen_fsubt(ctx, ra, rb, rc, fn11);
2155 break;
2156 case 0x22:
2157 /* MULT */
2158 gen_fmult(ctx, ra, rb, rc, fn11);
2159 break;
2160 case 0x23:
2161 /* DIVT */
2162 gen_fdivt(ctx, ra, rb, rc, fn11);
2163 break;
2164 case 0x24:
2165 /* CMPTUN */
2166 gen_fcmptun(ctx, ra, rb, rc, fn11);
2167 break;
2168 case 0x25:
2169 /* CMPTEQ */
2170 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2171 break;
2172 case 0x26:
2173 /* CMPTLT */
2174 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2175 break;
2176 case 0x27:
2177 /* CMPTLE */
2178 gen_fcmptle(ctx, ra, rb, rc, fn11);
2179 break;
2180 case 0x2C:
2181 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2182 /* CVTST */
2183 gen_fcvtst(ctx, rb, rc, fn11);
2184 } else {
2185 /* CVTTS */
2186 gen_fcvtts(ctx, rb, rc, fn11);
2188 break;
2189 case 0x2F:
2190 /* CVTTQ */
2191 gen_fcvttq(ctx, rb, rc, fn11);
2192 break;
2193 case 0x3C:
2194 /* CVTQS */
2195 gen_fcvtqs(ctx, rb, rc, fn11);
2196 break;
2197 case 0x3E:
2198 /* CVTQT */
2199 gen_fcvtqt(ctx, rb, rc, fn11);
2200 break;
2201 default:
2202 goto invalid_opc;
2204 break;
2205 case 0x17:
2206 switch (fn11) {
2207 case 0x010:
2208 /* CVTLQ */
2209 gen_fcvtlq(rb, rc);
2210 break;
2211 case 0x020:
2212 if (likely(rc != 31)) {
2213 if (ra == rb) {
2214 /* FMOV */
2215 if (ra == 31)
2216 tcg_gen_movi_i64(cpu_fir[rc], 0);
2217 else
2218 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2219 } else {
2220 /* CPYS */
2221 gen_fcpys(ra, rb, rc);
2224 break;
2225 case 0x021:
2226 /* CPYSN */
2227 gen_fcpysn(ra, rb, rc);
2228 break;
2229 case 0x022:
2230 /* CPYSE */
2231 gen_fcpyse(ra, rb, rc);
2232 break;
2233 case 0x024:
2234 /* MT_FPCR */
2235 if (likely(ra != 31))
2236 gen_helper_store_fpcr(cpu_fir[ra]);
2237 else {
2238 TCGv tmp = tcg_const_i64(0);
2239 gen_helper_store_fpcr(tmp);
2240 tcg_temp_free(tmp);
2242 break;
2243 case 0x025:
2244 /* MF_FPCR */
2245 if (likely(ra != 31))
2246 gen_helper_load_fpcr(cpu_fir[ra]);
2247 break;
2248 case 0x02A:
2249 /* FCMOVEQ */
2250 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2251 break;
2252 case 0x02B:
2253 /* FCMOVNE */
2254 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2255 break;
2256 case 0x02C:
2257 /* FCMOVLT */
2258 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2259 break;
2260 case 0x02D:
2261 /* FCMOVGE */
2262 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2263 break;
2264 case 0x02E:
2265 /* FCMOVLE */
2266 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2267 break;
2268 case 0x02F:
2269 /* FCMOVGT */
2270 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2271 break;
2272 case 0x030:
2273 /* CVTQL */
2274 gen_fcvtql(rb, rc);
2275 break;
2276 case 0x130:
2277 /* CVTQL/V */
2278 case 0x530:
2279 /* CVTQL/SV */
2280 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2281 /v doesn't do. The only thing I can think is that /sv is a
2282 valid instruction merely for completeness in the ISA. */
2283 gen_fcvtql_v(ctx, rb, rc);
2284 break;
2285 default:
2286 goto invalid_opc;
2288 break;
2289 case 0x18:
2290 switch ((uint16_t)disp16) {
2291 case 0x0000:
2292 /* TRAPB */
2293 /* No-op. Just exit from the current tb */
2294 ret = 2;
2295 break;
2296 case 0x0400:
2297 /* EXCB */
2298 /* No-op. Just exit from the current tb */
2299 ret = 2;
2300 break;
2301 case 0x4000:
2302 /* MB */
2303 /* No-op */
2304 break;
2305 case 0x4400:
2306 /* WMB */
2307 /* No-op */
2308 break;
2309 case 0x8000:
2310 /* FETCH */
2311 /* No-op */
2312 break;
2313 case 0xA000:
2314 /* FETCH_M */
2315 /* No-op */
2316 break;
2317 case 0xC000:
2318 /* RPCC */
2319 if (ra != 31)
2320 gen_helper_load_pcc(cpu_ir[ra]);
2321 break;
2322 case 0xE000:
2323 /* RC */
2324 if (ra != 31)
2325 gen_helper_rc(cpu_ir[ra]);
2326 break;
2327 case 0xE800:
2328 /* ECB */
2329 break;
2330 case 0xF000:
2331 /* RS */
2332 if (ra != 31)
2333 gen_helper_rs(cpu_ir[ra]);
2334 break;
2335 case 0xF800:
2336 /* WH64 */
2337 /* No-op */
2338 break;
2339 default:
2340 goto invalid_opc;
2342 break;
2343 case 0x19:
2344 /* HW_MFPR (PALcode) */
2345 #if defined (CONFIG_USER_ONLY)
2346 goto invalid_opc;
2347 #else
2348 if (!ctx->pal_mode)
2349 goto invalid_opc;
2350 if (ra != 31) {
2351 TCGv tmp = tcg_const_i32(insn & 0xFF);
2352 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
2353 tcg_temp_free(tmp);
2355 break;
2356 #endif
2357 case 0x1A:
2358 if (rb != 31)
2359 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2360 else
2361 tcg_gen_movi_i64(cpu_pc, 0);
2362 if (ra != 31)
2363 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2364 /* Those four jumps only differ by the branch prediction hint */
2365 switch (fn2) {
2366 case 0x0:
2367 /* JMP */
2368 break;
2369 case 0x1:
2370 /* JSR */
2371 break;
2372 case 0x2:
2373 /* RET */
2374 break;
2375 case 0x3:
2376 /* JSR_COROUTINE */
2377 break;
2379 ret = 1;
2380 break;
2381 case 0x1B:
2382 /* HW_LD (PALcode) */
2383 #if defined (CONFIG_USER_ONLY)
2384 goto invalid_opc;
2385 #else
2386 if (!ctx->pal_mode)
2387 goto invalid_opc;
2388 if (ra != 31) {
2389 TCGv addr = tcg_temp_new();
2390 if (rb != 31)
2391 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2392 else
2393 tcg_gen_movi_i64(addr, disp12);
2394 switch ((insn >> 12) & 0xF) {
2395 case 0x0:
2396 /* Longword physical access (hw_ldl/p) */
2397 gen_helper_ldl_raw(cpu_ir[ra], addr);
2398 break;
2399 case 0x1:
2400 /* Quadword physical access (hw_ldq/p) */
2401 gen_helper_ldq_raw(cpu_ir[ra], addr);
2402 break;
2403 case 0x2:
2404 /* Longword physical access with lock (hw_ldl_l/p) */
2405 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
2406 break;
2407 case 0x3:
2408 /* Quadword physical access with lock (hw_ldq_l/p) */
2409 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
2410 break;
2411 case 0x4:
2412 /* Longword virtual PTE fetch (hw_ldl/v) */
2413 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2414 break;
2415 case 0x5:
2416 /* Quadword virtual PTE fetch (hw_ldq/v) */
2417 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2418 break;
2419 case 0x6:
2420 /* Incpu_ir[ra]id */
2421 goto invalid_opc;
2422 case 0x7:
2423 /* Incpu_ir[ra]id */
2424 goto invalid_opc;
2425 case 0x8:
2426 /* Longword virtual access (hw_ldl) */
2427 gen_helper_st_virt_to_phys(addr, addr);
2428 gen_helper_ldl_raw(cpu_ir[ra], addr);
2429 break;
2430 case 0x9:
2431 /* Quadword virtual access (hw_ldq) */
2432 gen_helper_st_virt_to_phys(addr, addr);
2433 gen_helper_ldq_raw(cpu_ir[ra], addr);
2434 break;
2435 case 0xA:
2436 /* Longword virtual access with protection check (hw_ldl/w) */
2437 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2438 break;
2439 case 0xB:
2440 /* Quadword virtual access with protection check (hw_ldq/w) */
2441 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2442 break;
2443 case 0xC:
2444 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2445 gen_helper_set_alt_mode();
2446 gen_helper_st_virt_to_phys(addr, addr);
2447 gen_helper_ldl_raw(cpu_ir[ra], addr);
2448 gen_helper_restore_mode();
2449 break;
2450 case 0xD:
2451 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2452 gen_helper_set_alt_mode();
2453 gen_helper_st_virt_to_phys(addr, addr);
2454 gen_helper_ldq_raw(cpu_ir[ra], addr);
2455 gen_helper_restore_mode();
2456 break;
2457 case 0xE:
2458 /* Longword virtual access with alternate access mode and
2459 * protection checks (hw_ldl/wa)
2461 gen_helper_set_alt_mode();
2462 gen_helper_ldl_data(cpu_ir[ra], addr);
2463 gen_helper_restore_mode();
2464 break;
2465 case 0xF:
2466 /* Quadword virtual access with alternate access mode and
2467 * protection checks (hw_ldq/wa)
2469 gen_helper_set_alt_mode();
2470 gen_helper_ldq_data(cpu_ir[ra], addr);
2471 gen_helper_restore_mode();
2472 break;
2474 tcg_temp_free(addr);
2476 break;
2477 #endif
2478 case 0x1C:
2479 switch (fn7) {
2480 case 0x00:
2481 /* SEXTB */
2482 if (!(ctx->amask & AMASK_BWX))
2483 goto invalid_opc;
2484 if (likely(rc != 31)) {
2485 if (islit)
2486 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2487 else
2488 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2490 break;
2491 case 0x01:
2492 /* SEXTW */
2493 if (!(ctx->amask & AMASK_BWX))
2494 goto invalid_opc;
2495 if (likely(rc != 31)) {
2496 if (islit)
2497 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2498 else
2499 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2501 break;
2502 case 0x30:
2503 /* CTPOP */
2504 if (!(ctx->amask & AMASK_CIX))
2505 goto invalid_opc;
2506 if (likely(rc != 31)) {
2507 if (islit)
2508 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2509 else
2510 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2512 break;
2513 case 0x31:
2514 /* PERR */
2515 if (!(ctx->amask & AMASK_MVI))
2516 goto invalid_opc;
2517 gen_perr(ra, rb, rc, islit, lit);
2518 break;
2519 case 0x32:
2520 /* CTLZ */
2521 if (!(ctx->amask & AMASK_CIX))
2522 goto invalid_opc;
2523 if (likely(rc != 31)) {
2524 if (islit)
2525 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2526 else
2527 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2529 break;
2530 case 0x33:
2531 /* CTTZ */
2532 if (!(ctx->amask & AMASK_CIX))
2533 goto invalid_opc;
2534 if (likely(rc != 31)) {
2535 if (islit)
2536 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2537 else
2538 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2540 break;
2541 case 0x34:
2542 /* UNPKBW */
2543 if (!(ctx->amask & AMASK_MVI))
2544 goto invalid_opc;
2545 if (real_islit || ra != 31)
2546 goto invalid_opc;
2547 gen_unpkbw (rb, rc);
2548 break;
2549 case 0x35:
2550 /* UNPKBL */
2551 if (!(ctx->amask & AMASK_MVI))
2552 goto invalid_opc;
2553 if (real_islit || ra != 31)
2554 goto invalid_opc;
2555 gen_unpkbl (rb, rc);
2556 break;
2557 case 0x36:
2558 /* PKWB */
2559 if (!(ctx->amask & AMASK_MVI))
2560 goto invalid_opc;
2561 if (real_islit || ra != 31)
2562 goto invalid_opc;
2563 gen_pkwb (rb, rc);
2564 break;
2565 case 0x37:
2566 /* PKLB */
2567 if (!(ctx->amask & AMASK_MVI))
2568 goto invalid_opc;
2569 if (real_islit || ra != 31)
2570 goto invalid_opc;
2571 gen_pklb (rb, rc);
2572 break;
2573 case 0x38:
2574 /* MINSB8 */
2575 if (!(ctx->amask & AMASK_MVI))
2576 goto invalid_opc;
2577 gen_minsb8 (ra, rb, rc, islit, lit);
2578 break;
2579 case 0x39:
2580 /* MINSW4 */
2581 if (!(ctx->amask & AMASK_MVI))
2582 goto invalid_opc;
2583 gen_minsw4 (ra, rb, rc, islit, lit);
2584 break;
2585 case 0x3A:
2586 /* MINUB8 */
2587 if (!(ctx->amask & AMASK_MVI))
2588 goto invalid_opc;
2589 gen_minub8 (ra, rb, rc, islit, lit);
2590 break;
2591 case 0x3B:
2592 /* MINUW4 */
2593 if (!(ctx->amask & AMASK_MVI))
2594 goto invalid_opc;
2595 gen_minuw4 (ra, rb, rc, islit, lit);
2596 break;
2597 case 0x3C:
2598 /* MAXUB8 */
2599 if (!(ctx->amask & AMASK_MVI))
2600 goto invalid_opc;
2601 gen_maxub8 (ra, rb, rc, islit, lit);
2602 break;
2603 case 0x3D:
2604 /* MAXUW4 */
2605 if (!(ctx->amask & AMASK_MVI))
2606 goto invalid_opc;
2607 gen_maxuw4 (ra, rb, rc, islit, lit);
2608 break;
2609 case 0x3E:
2610 /* MAXSB8 */
2611 if (!(ctx->amask & AMASK_MVI))
2612 goto invalid_opc;
2613 gen_maxsb8 (ra, rb, rc, islit, lit);
2614 break;
2615 case 0x3F:
2616 /* MAXSW4 */
2617 if (!(ctx->amask & AMASK_MVI))
2618 goto invalid_opc;
2619 gen_maxsw4 (ra, rb, rc, islit, lit);
2620 break;
2621 case 0x70:
2622 /* FTOIT */
2623 if (!(ctx->amask & AMASK_FIX))
2624 goto invalid_opc;
2625 if (likely(rc != 31)) {
2626 if (ra != 31)
2627 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2628 else
2629 tcg_gen_movi_i64(cpu_ir[rc], 0);
2631 break;
2632 case 0x78:
2633 /* FTOIS */
2634 if (!(ctx->amask & AMASK_FIX))
2635 goto invalid_opc;
2636 if (rc != 31) {
2637 TCGv_i32 tmp1 = tcg_temp_new_i32();
2638 if (ra != 31)
2639 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2640 else {
2641 TCGv tmp2 = tcg_const_i64(0);
2642 gen_helper_s_to_memory(tmp1, tmp2);
2643 tcg_temp_free(tmp2);
2645 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2646 tcg_temp_free_i32(tmp1);
2648 break;
2649 default:
2650 goto invalid_opc;
2652 break;
2653 case 0x1D:
2654 /* HW_MTPR (PALcode) */
2655 #if defined (CONFIG_USER_ONLY)
2656 goto invalid_opc;
2657 #else
2658 if (!ctx->pal_mode)
2659 goto invalid_opc;
2660 else {
2661 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2662 if (ra != 31)
2663 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2664 else {
2665 TCGv tmp2 = tcg_const_i64(0);
2666 gen_helper_mtpr(tmp1, tmp2);
2667 tcg_temp_free(tmp2);
2669 tcg_temp_free(tmp1);
2670 ret = 2;
2672 break;
2673 #endif
2674 case 0x1E:
2675 /* HW_REI (PALcode) */
2676 #if defined (CONFIG_USER_ONLY)
2677 goto invalid_opc;
2678 #else
2679 if (!ctx->pal_mode)
2680 goto invalid_opc;
2681 if (rb == 31) {
2682 /* "Old" alpha */
2683 gen_helper_hw_rei();
2684 } else {
2685 TCGv tmp;
2687 if (ra != 31) {
2688 tmp = tcg_temp_new();
2689 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2690 } else
2691 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2692 gen_helper_hw_ret(tmp);
2693 tcg_temp_free(tmp);
2695 ret = 2;
2696 break;
2697 #endif
2698 case 0x1F:
2699 /* HW_ST (PALcode) */
2700 #if defined (CONFIG_USER_ONLY)
2701 goto invalid_opc;
2702 #else
2703 if (!ctx->pal_mode)
2704 goto invalid_opc;
2705 else {
2706 TCGv addr, val;
2707 addr = tcg_temp_new();
2708 if (rb != 31)
2709 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2710 else
2711 tcg_gen_movi_i64(addr, disp12);
2712 if (ra != 31)
2713 val = cpu_ir[ra];
2714 else {
2715 val = tcg_temp_new();
2716 tcg_gen_movi_i64(val, 0);
2718 switch ((insn >> 12) & 0xF) {
2719 case 0x0:
2720 /* Longword physical access */
2721 gen_helper_stl_raw(val, addr);
2722 break;
2723 case 0x1:
2724 /* Quadword physical access */
2725 gen_helper_stq_raw(val, addr);
2726 break;
2727 case 0x2:
2728 /* Longword physical access with lock */
2729 gen_helper_stl_c_raw(val, val, addr);
2730 break;
2731 case 0x3:
2732 /* Quadword physical access with lock */
2733 gen_helper_stq_c_raw(val, val, addr);
2734 break;
2735 case 0x4:
2736 /* Longword virtual access */
2737 gen_helper_st_virt_to_phys(addr, addr);
2738 gen_helper_stl_raw(val, addr);
2739 break;
2740 case 0x5:
2741 /* Quadword virtual access */
2742 gen_helper_st_virt_to_phys(addr, addr);
2743 gen_helper_stq_raw(val, addr);
2744 break;
2745 case 0x6:
2746 /* Invalid */
2747 goto invalid_opc;
2748 case 0x7:
2749 /* Invalid */
2750 goto invalid_opc;
2751 case 0x8:
2752 /* Invalid */
2753 goto invalid_opc;
2754 case 0x9:
2755 /* Invalid */
2756 goto invalid_opc;
2757 case 0xA:
2758 /* Invalid */
2759 goto invalid_opc;
2760 case 0xB:
2761 /* Invalid */
2762 goto invalid_opc;
2763 case 0xC:
2764 /* Longword virtual access with alternate access mode */
2765 gen_helper_set_alt_mode();
2766 gen_helper_st_virt_to_phys(addr, addr);
2767 gen_helper_stl_raw(val, addr);
2768 gen_helper_restore_mode();
2769 break;
2770 case 0xD:
2771 /* Quadword virtual access with alternate access mode */
2772 gen_helper_set_alt_mode();
2773 gen_helper_st_virt_to_phys(addr, addr);
2774 gen_helper_stl_raw(val, addr);
2775 gen_helper_restore_mode();
2776 break;
2777 case 0xE:
2778 /* Invalid */
2779 goto invalid_opc;
2780 case 0xF:
2781 /* Invalid */
2782 goto invalid_opc;
2784 if (ra == 31)
2785 tcg_temp_free(val);
2786 tcg_temp_free(addr);
2788 break;
2789 #endif
2790 case 0x20:
2791 /* LDF */
2792 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2793 break;
2794 case 0x21:
2795 /* LDG */
2796 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2797 break;
2798 case 0x22:
2799 /* LDS */
2800 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2801 break;
2802 case 0x23:
2803 /* LDT */
2804 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2805 break;
2806 case 0x24:
2807 /* STF */
2808 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2809 break;
2810 case 0x25:
2811 /* STG */
2812 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2813 break;
2814 case 0x26:
2815 /* STS */
2816 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2817 break;
2818 case 0x27:
2819 /* STT */
2820 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2821 break;
2822 case 0x28:
2823 /* LDL */
2824 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2825 break;
2826 case 0x29:
2827 /* LDQ */
2828 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2829 break;
2830 case 0x2A:
2831 /* LDL_L */
2832 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2833 break;
2834 case 0x2B:
2835 /* LDQ_L */
2836 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2837 break;
2838 case 0x2C:
2839 /* STL */
2840 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
2841 break;
2842 case 0x2D:
2843 /* STQ */
2844 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
2845 break;
2846 case 0x2E:
2847 /* STL_C */
2848 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
2849 break;
2850 case 0x2F:
2851 /* STQ_C */
2852 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
2853 break;
2854 case 0x30:
2855 /* BR */
2856 if (ra != 31)
2857 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2858 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2859 ret = 1;
2860 break;
2861 case 0x31: /* FBEQ */
2862 gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2863 ret = 1;
2864 break;
2865 case 0x32: /* FBLT */
2866 gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2867 ret = 1;
2868 break;
2869 case 0x33: /* FBLE */
2870 gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2871 ret = 1;
2872 break;
2873 case 0x34:
2874 /* BSR */
2875 if (ra != 31)
2876 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2877 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2878 ret = 1;
2879 break;
2880 case 0x35: /* FBNE */
2881 gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2882 ret = 1;
2883 break;
2884 case 0x36: /* FBGE */
2885 gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2886 ret = 1;
2887 break;
2888 case 0x37: /* FBGT */
2889 gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2890 ret = 1;
2891 break;
2892 case 0x38:
2893 /* BLBC */
2894 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2895 ret = 1;
2896 break;
2897 case 0x39:
2898 /* BEQ */
2899 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2900 ret = 1;
2901 break;
2902 case 0x3A:
2903 /* BLT */
2904 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2905 ret = 1;
2906 break;
2907 case 0x3B:
2908 /* BLE */
2909 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2910 ret = 1;
2911 break;
2912 case 0x3C:
2913 /* BLBS */
2914 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2915 ret = 1;
2916 break;
2917 case 0x3D:
2918 /* BNE */
2919 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2920 ret = 1;
2921 break;
2922 case 0x3E:
2923 /* BGE */
2924 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2925 ret = 1;
2926 break;
2927 case 0x3F:
2928 /* BGT */
2929 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2930 ret = 1;
2931 break;
2932 invalid_opc:
2933 gen_invalid(ctx);
2934 ret = 3;
2935 break;
2938 return ret;
2941 static inline void gen_intermediate_code_internal(CPUState *env,
2942 TranslationBlock *tb,
2943 int search_pc)
2945 DisasContext ctx, *ctxp = &ctx;
2946 target_ulong pc_start;
2947 uint32_t insn;
2948 uint16_t *gen_opc_end;
2949 CPUBreakpoint *bp;
2950 int j, lj = -1;
2951 int ret;
2952 int num_insns;
2953 int max_insns;
2955 pc_start = tb->pc;
2956 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2957 ctx.pc = pc_start;
2958 ctx.amask = env->amask;
2959 ctx.env = env;
2960 #if defined (CONFIG_USER_ONLY)
2961 ctx.mem_idx = 0;
2962 #else
2963 ctx.mem_idx = ((env->ps >> 3) & 3);
2964 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2965 #endif
2967 /* ??? Every TB begins with unset rounding mode, to be initialized on
2968 the first fp insn of the TB. Alternately we could define a proper
2969 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2970 to reset the FP_STATUS to that default at the end of any TB that
2971 changes the default. We could even (gasp) dynamiclly figure out
2972 what default would be most efficient given the running program. */
2973 ctx.tb_rm = -1;
2974 /* Similarly for flush-to-zero. */
2975 ctx.tb_ftz = -1;
2977 num_insns = 0;
2978 max_insns = tb->cflags & CF_COUNT_MASK;
2979 if (max_insns == 0)
2980 max_insns = CF_COUNT_MASK;
2982 gen_icount_start();
2983 for (ret = 0; ret == 0;) {
2984 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2985 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2986 if (bp->pc == ctx.pc) {
2987 gen_excp(&ctx, EXCP_DEBUG, 0);
2988 break;
2992 if (search_pc) {
2993 j = gen_opc_ptr - gen_opc_buf;
2994 if (lj < j) {
2995 lj++;
2996 while (lj < j)
2997 gen_opc_instr_start[lj++] = 0;
2999 gen_opc_pc[lj] = ctx.pc;
3000 gen_opc_instr_start[lj] = 1;
3001 gen_opc_icount[lj] = num_insns;
3003 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3004 gen_io_start();
3005 insn = ldl_code(ctx.pc);
3006 num_insns++;
3008 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3009 tcg_gen_debug_insn_start(ctx.pc);
3012 ctx.pc += 4;
3013 ret = translate_one(ctxp, insn);
3014 if (ret != 0)
3015 break;
3016 /* if we reach a page boundary or are single stepping, stop
3017 * generation
3019 if (env->singlestep_enabled) {
3020 gen_excp(&ctx, EXCP_DEBUG, 0);
3021 break;
3024 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
3025 break;
3027 if (gen_opc_ptr >= gen_opc_end)
3028 break;
3030 if (num_insns >= max_insns)
3031 break;
3033 if (singlestep) {
3034 break;
3037 if (ret != 1 && ret != 3) {
3038 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3040 if (tb->cflags & CF_LAST_IO)
3041 gen_io_end();
3042 /* Generate the return instruction */
3043 tcg_gen_exit_tb(0);
3044 gen_icount_end(tb, num_insns);
3045 *gen_opc_ptr = INDEX_op_end;
3046 if (search_pc) {
3047 j = gen_opc_ptr - gen_opc_buf;
3048 lj++;
3049 while (lj <= j)
3050 gen_opc_instr_start[lj++] = 0;
3051 } else {
3052 tb->size = ctx.pc - pc_start;
3053 tb->icount = num_insns;
3055 #ifdef DEBUG_DISAS
3056 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3057 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3058 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3059 qemu_log("\n");
3061 #endif
3064 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3066 gen_intermediate_code_internal(env, tb, 0);
3069 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3071 gen_intermediate_code_internal(env, tb, 1);
3074 struct cpu_def_t {
3075 const char *name;
3076 int implver, amask;
3079 static const struct cpu_def_t cpu_defs[] = {
3080 { "ev4", IMPLVER_2106x, 0 },
3081 { "ev5", IMPLVER_21164, 0 },
3082 { "ev56", IMPLVER_21164, AMASK_BWX },
3083 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3084 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3085 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3086 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3087 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3088 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3089 { "21064", IMPLVER_2106x, 0 },
3090 { "21164", IMPLVER_21164, 0 },
3091 { "21164a", IMPLVER_21164, AMASK_BWX },
3092 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3093 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3094 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3095 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3098 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3100 CPUAlphaState *env;
3101 int implver, amask, i, max;
3103 env = qemu_mallocz(sizeof(CPUAlphaState));
3104 cpu_exec_init(env);
3105 alpha_translate_init();
3106 tlb_flush(env, 1);
3108 /* Default to ev67; no reason not to emulate insns by default. */
3109 implver = IMPLVER_21264;
3110 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3111 | AMASK_TRAP | AMASK_PREFETCH);
3113 max = ARRAY_SIZE(cpu_defs);
3114 for (i = 0; i < max; i++) {
3115 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3116 implver = cpu_defs[i].implver;
3117 amask = cpu_defs[i].amask;
3118 break;
3121 env->implver = implver;
3122 env->amask = amask;
3124 env->ps = 0x1F00;
3125 #if defined (CONFIG_USER_ONLY)
3126 env->ps |= 1 << 3;
3127 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3128 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3129 #else
3130 pal_init(env);
3131 #endif
3133 /* Initialize IPR */
3134 #if defined (CONFIG_USER_ONLY)
3135 env->ipr[IPR_EXC_ADDR] = 0;
3136 env->ipr[IPR_EXC_SUM] = 0;
3137 env->ipr[IPR_EXC_MASK] = 0;
3138 #else
3140 // uint64_t hwpcb;
3141 // hwpcb = env->ipr[IPR_PCBB];
3142 env->ipr[IPR_ASN] = 0;
3143 env->ipr[IPR_ASTEN] = 0;
3144 env->ipr[IPR_ASTSR] = 0;
3145 env->ipr[IPR_DATFX] = 0;
3146 /* XXX: fix this */
3147 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3148 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3149 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3150 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3151 env->ipr[IPR_FEN] = 0;
3152 env->ipr[IPR_IPL] = 31;
3153 env->ipr[IPR_MCES] = 0;
3154 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3155 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3156 env->ipr[IPR_SISR] = 0;
3157 env->ipr[IPR_VIRBND] = -1ULL;
3159 #endif
3161 qemu_init_vcpu(env);
3162 return env;
3165 void gen_pc_load(CPUState *env, TranslationBlock *tb,
3166 unsigned long searched_pc, int pc_pos, void *puc)
3168 env->pc = gen_opc_pc[pc_pos];