2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "host-utils.h"
29 #include "qemu-common.h"
35 #undef ALPHA_DEBUG_DISAS
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 # define LOG_DISAS(...) do { } while (0)
43 typedef struct DisasContext DisasContext
;
47 #if !defined (CONFIG_USER_ONLY)
54 /* global register indexes */
55 static TCGv_ptr cpu_env
;
56 static TCGv cpu_ir
[31];
57 static TCGv cpu_fir
[31];
60 #ifdef CONFIG_USER_ONLY
65 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
67 #include "gen-icount.h"
69 static void alpha_translate_init(void)
73 static int done_init
= 0;
78 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
81 for (i
= 0; i
< 31; i
++) {
82 sprintf(p
, "ir%d", i
);
83 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
84 offsetof(CPUState
, ir
[i
]), p
);
85 p
+= (i
< 10) ? 4 : 5;
87 sprintf(p
, "fir%d", i
);
88 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
89 offsetof(CPUState
, fir
[i
]), p
);
90 p
+= (i
< 10) ? 5 : 6;
93 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
94 offsetof(CPUState
, pc
), "pc");
96 cpu_lock
= tcg_global_mem_new_i64(TCG_AREG0
,
97 offsetof(CPUState
, lock
), "lock");
99 #ifdef CONFIG_USER_ONLY
100 cpu_uniq
= tcg_global_mem_new_i64(TCG_AREG0
,
101 offsetof(CPUState
, unique
), "uniq");
104 /* register helpers */
111 static inline void gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
115 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
116 tmp1
= tcg_const_i32(exception
);
117 tmp2
= tcg_const_i32(error_code
);
118 gen_helper_excp(tmp1
, tmp2
);
119 tcg_temp_free_i32(tmp2
);
120 tcg_temp_free_i32(tmp1
);
123 static inline void gen_invalid(DisasContext
*ctx
)
125 gen_excp(ctx
, EXCP_OPCDEC
, 0);
128 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
130 TCGv tmp
= tcg_temp_new();
131 TCGv_i32 tmp32
= tcg_temp_new_i32();
132 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
133 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
134 gen_helper_memory_to_f(t0
, tmp32
);
135 tcg_temp_free_i32(tmp32
);
139 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
141 TCGv tmp
= tcg_temp_new();
142 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
143 gen_helper_memory_to_g(t0
, tmp
);
147 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
149 TCGv tmp
= tcg_temp_new();
150 TCGv_i32 tmp32
= tcg_temp_new_i32();
151 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
152 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
153 gen_helper_memory_to_s(t0
, tmp32
);
154 tcg_temp_free_i32(tmp32
);
158 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
160 tcg_gen_mov_i64(cpu_lock
, t1
);
161 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
164 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
166 tcg_gen_mov_i64(cpu_lock
, t1
);
167 tcg_gen_qemu_ld64(t0
, t1
, flags
);
170 static inline void gen_load_mem(DisasContext
*ctx
,
171 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
173 int ra
, int rb
, int32_t disp16
, int fp
,
178 if (unlikely(ra
== 31))
181 addr
= tcg_temp_new();
183 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
185 tcg_gen_andi_i64(addr
, addr
, ~0x7);
189 tcg_gen_movi_i64(addr
, disp16
);
192 tcg_gen_qemu_load(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
194 tcg_gen_qemu_load(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
198 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
200 TCGv_i32 tmp32
= tcg_temp_new_i32();
201 TCGv tmp
= tcg_temp_new();
202 gen_helper_f_to_memory(tmp32
, t0
);
203 tcg_gen_extu_i32_i64(tmp
, tmp32
);
204 tcg_gen_qemu_st32(tmp
, t1
, flags
);
206 tcg_temp_free_i32(tmp32
);
209 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
211 TCGv tmp
= tcg_temp_new();
212 gen_helper_g_to_memory(tmp
, t0
);
213 tcg_gen_qemu_st64(tmp
, t1
, flags
);
217 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
219 TCGv_i32 tmp32
= tcg_temp_new_i32();
220 TCGv tmp
= tcg_temp_new();
221 gen_helper_s_to_memory(tmp32
, t0
);
222 tcg_gen_extu_i32_i64(tmp
, tmp32
);
223 tcg_gen_qemu_st32(tmp
, t1
, flags
);
225 tcg_temp_free_i32(tmp32
);
228 static inline void gen_qemu_stl_c(TCGv t0
, TCGv t1
, int flags
)
232 l1
= gen_new_label();
233 l2
= gen_new_label();
234 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
235 tcg_gen_qemu_st32(t0
, t1
, flags
);
236 tcg_gen_movi_i64(t0
, 1);
239 tcg_gen_movi_i64(t0
, 0);
241 tcg_gen_movi_i64(cpu_lock
, -1);
244 static inline void gen_qemu_stq_c(TCGv t0
, TCGv t1
, int flags
)
248 l1
= gen_new_label();
249 l2
= gen_new_label();
250 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
251 tcg_gen_qemu_st64(t0
, t1
, flags
);
252 tcg_gen_movi_i64(t0
, 1);
255 tcg_gen_movi_i64(t0
, 0);
257 tcg_gen_movi_i64(cpu_lock
, -1);
260 static inline void gen_store_mem(DisasContext
*ctx
,
261 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
263 int ra
, int rb
, int32_t disp16
, int fp
,
264 int clear
, int local
)
268 addr
= tcg_temp_local_new();
270 addr
= tcg_temp_new();
272 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
274 tcg_gen_andi_i64(addr
, addr
, ~0x7);
278 tcg_gen_movi_i64(addr
, disp16
);
282 tcg_gen_qemu_store(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
284 tcg_gen_qemu_store(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
288 zero
= tcg_const_local_i64(0);
290 zero
= tcg_const_i64(0);
291 tcg_gen_qemu_store(zero
, addr
, ctx
->mem_idx
);
297 static void gen_bcond_pcload(DisasContext
*ctx
, int32_t disp
, int lab_true
)
299 int lab_over
= gen_new_label();
301 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
302 tcg_gen_br(lab_over
);
303 gen_set_label(lab_true
);
304 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp
<< 2));
305 gen_set_label(lab_over
);
308 static void gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
309 int32_t disp
, int mask
)
311 int lab_true
= gen_new_label();
313 if (likely(ra
!= 31)) {
315 TCGv tmp
= tcg_temp_new();
316 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
317 tcg_gen_brcondi_i64(cond
, tmp
, 0, lab_true
);
320 tcg_gen_brcondi_i64(cond
, cpu_ir
[ra
], 0, lab_true
);
323 /* Very uncommon case - Do not bother to optimize. */
324 TCGv tmp
= tcg_const_i64(0);
325 tcg_gen_brcondi_i64(cond
, tmp
, 0, lab_true
);
328 gen_bcond_pcload(ctx
, disp
, lab_true
);
331 /* Generate a forward TCG branch to LAB_TRUE if RA cmp 0.0.
332 This is complicated by the fact that -0.0 compares the same as +0.0. */
334 static void gen_fbcond_internal(TCGCond cond
, TCGv src
, int lab_true
)
337 uint64_t mzero
= 1ull << 63;
343 /* For <= or >, the -0.0 value directly compares the way we want. */
344 tcg_gen_brcondi_i64(cond
, src
, 0, lab_true
);
349 /* For == or !=, we can simply mask off the sign bit and compare. */
350 /* ??? Assume that the temporary is reclaimed at the branch. */
351 tmp
= tcg_temp_new();
352 tcg_gen_andi_i64(tmp
, src
, mzero
- 1);
353 tcg_gen_brcondi_i64(cond
, tmp
, 0, lab_true
);
357 /* For >=, emit two branches to the destination. */
358 tcg_gen_brcondi_i64(cond
, src
, 0, lab_true
);
359 tcg_gen_brcondi_i64(TCG_COND_EQ
, src
, mzero
, lab_true
);
363 /* For <, first filter out -0.0 to what will be the fallthru. */
364 lab_false
= gen_new_label();
365 tcg_gen_brcondi_i64(TCG_COND_EQ
, src
, mzero
, lab_false
);
366 tcg_gen_brcondi_i64(cond
, src
, 0, lab_true
);
367 gen_set_label(lab_false
);
375 static void gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
, int32_t disp
)
379 if (unlikely(ra
== 31)) {
380 /* Very uncommon case, but easier to optimize it to an integer
381 comparison than continuing with the floating point comparison. */
382 gen_bcond(ctx
, cond
, ra
, disp
, 0);
386 lab_true
= gen_new_label();
387 gen_fbcond_internal(cond
, cpu_fir
[ra
], lab_true
);
388 gen_bcond_pcload(ctx
, disp
, lab_true
);
391 static inline void gen_cmov(TCGCond inv_cond
, int ra
, int rb
, int rc
,
392 int islit
, uint8_t lit
, int mask
)
396 if (unlikely(rc
== 31))
399 l1
= gen_new_label();
403 TCGv tmp
= tcg_temp_new();
404 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
405 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
408 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
410 /* Very uncommon case - Do not bother to optimize. */
411 TCGv tmp
= tcg_const_i64(0);
412 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
417 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
419 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
423 static void gen_fcmov(TCGCond inv_cond
, int ra
, int rb
, int rc
)
425 TCGv va
= cpu_fir
[ra
];
428 if (unlikely(rc
== 31))
430 if (unlikely(ra
== 31)) {
431 /* ??? Assume that the temporary is reclaimed at the branch. */
432 va
= tcg_const_i64(0);
435 l1
= gen_new_label();
436 gen_fbcond_internal(inv_cond
, va
, l1
);
439 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[rb
]);
441 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
445 #define FARITH2(name) \
446 static inline void glue(gen_f, name)(int rb, int rc) \
448 if (unlikely(rc == 31)) \
452 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
454 TCGv tmp = tcg_const_i64(0); \
455 gen_helper_ ## name (cpu_fir[rc], tmp); \
456 tcg_temp_free(tmp); \
477 #define FARITH3(name) \
478 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
480 if (unlikely(rc == 31)) \
485 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], cpu_fir[rb]); \
487 TCGv tmp = tcg_const_i64(0); \
488 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], tmp); \
489 tcg_temp_free(tmp); \
492 TCGv tmp = tcg_const_i64(0); \
494 gen_helper_ ## name (cpu_fir[rc], tmp, cpu_fir[rb]); \
496 gen_helper_ ## name (cpu_fir[rc], tmp, tmp); \
497 tcg_temp_free(tmp); \
528 static inline uint64_t zapnot_mask(uint8_t lit
)
533 for (i
= 0; i
< 8; ++i
) {
535 mask
|= 0xffull
<< (i
* 8);
540 /* Implement zapnot with an immediate operand, which expands to some
541 form of immediate AND. This is a basic building block in the
542 definition of many of the other byte manipulation instructions. */
543 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
547 tcg_gen_movi_i64(dest
, 0);
550 tcg_gen_ext8u_i64(dest
, src
);
553 tcg_gen_ext16u_i64(dest
, src
);
556 tcg_gen_ext32u_i64(dest
, src
);
559 tcg_gen_mov_i64(dest
, src
);
562 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
567 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
569 if (unlikely(rc
== 31))
571 else if (unlikely(ra
== 31))
572 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
574 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
576 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
579 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
581 if (unlikely(rc
== 31))
583 else if (unlikely(ra
== 31))
584 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
586 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
588 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
592 /* EXTWH, EXTLH, EXTQH */
593 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
594 uint8_t lit
, uint8_t byte_mask
)
596 if (unlikely(rc
== 31))
598 else if (unlikely(ra
== 31))
599 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
602 lit
= (64 - (lit
& 7) * 8) & 0x3f;
603 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
605 TCGv tmp1
= tcg_temp_new();
606 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
607 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
608 tcg_gen_neg_i64(tmp1
, tmp1
);
609 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
610 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
613 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
617 /* EXTBL, EXTWL, EXTLL, EXTQL */
618 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
619 uint8_t lit
, uint8_t byte_mask
)
621 if (unlikely(rc
== 31))
623 else if (unlikely(ra
== 31))
624 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
627 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
629 TCGv tmp
= tcg_temp_new();
630 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
631 tcg_gen_shli_i64(tmp
, tmp
, 3);
632 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
635 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
639 /* INSWH, INSLH, INSQH */
640 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
641 uint8_t lit
, uint8_t byte_mask
)
643 if (unlikely(rc
== 31))
645 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
646 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
648 TCGv tmp
= tcg_temp_new();
650 /* The instruction description has us left-shift the byte mask
651 and extract bits <15:8> and apply that zap at the end. This
652 is equivalent to simply performing the zap first and shifting
654 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
657 /* Note that we have handled the lit==0 case above. */
658 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
660 TCGv shift
= tcg_temp_new();
662 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
663 Do this portably by splitting the shift into two parts:
664 shift_count-1 and 1. Arrange for the -1 by using
665 ones-complement instead of twos-complement in the negation:
666 ~((B & 7) * 8) & 63. */
668 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
669 tcg_gen_shli_i64(shift
, shift
, 3);
670 tcg_gen_not_i64(shift
, shift
);
671 tcg_gen_andi_i64(shift
, shift
, 0x3f);
673 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
674 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
675 tcg_temp_free(shift
);
681 /* INSBL, INSWL, INSLL, INSQL */
682 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
683 uint8_t lit
, uint8_t byte_mask
)
685 if (unlikely(rc
== 31))
687 else if (unlikely(ra
== 31))
688 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
690 TCGv tmp
= tcg_temp_new();
692 /* The instruction description has us left-shift the byte mask
693 the same number of byte slots as the data and apply the zap
694 at the end. This is equivalent to simply performing the zap
695 first and shifting afterward. */
696 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
699 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
701 TCGv shift
= tcg_temp_new();
702 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
703 tcg_gen_shli_i64(shift
, shift
, 3);
704 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
705 tcg_temp_free(shift
);
711 /* MSKWH, MSKLH, MSKQH */
712 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
713 uint8_t lit
, uint8_t byte_mask
)
715 if (unlikely(rc
== 31))
717 else if (unlikely(ra
== 31))
718 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
720 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
722 TCGv shift
= tcg_temp_new();
723 TCGv mask
= tcg_temp_new();
725 /* The instruction description is as above, where the byte_mask
726 is shifted left, and then we extract bits <15:8>. This can be
727 emulated with a right-shift on the expanded byte mask. This
728 requires extra care because for an input <2:0> == 0 we need a
729 shift of 64 bits in order to generate a zero. This is done by
730 splitting the shift into two parts, the variable shift - 1
731 followed by a constant 1 shift. The code we expand below is
732 equivalent to ~((B & 7) * 8) & 63. */
734 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
735 tcg_gen_shli_i64(shift
, shift
, 3);
736 tcg_gen_not_i64(shift
, shift
);
737 tcg_gen_andi_i64(shift
, shift
, 0x3f);
738 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
739 tcg_gen_shr_i64(mask
, mask
, shift
);
740 tcg_gen_shri_i64(mask
, mask
, 1);
742 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
745 tcg_temp_free(shift
);
749 /* MSKBL, MSKWL, MSKLL, MSKQL */
750 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
751 uint8_t lit
, uint8_t byte_mask
)
753 if (unlikely(rc
== 31))
755 else if (unlikely(ra
== 31))
756 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
758 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
760 TCGv shift
= tcg_temp_new();
761 TCGv mask
= tcg_temp_new();
763 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
764 tcg_gen_shli_i64(shift
, shift
, 3);
765 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
766 tcg_gen_shl_i64(mask
, mask
, shift
);
768 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
771 tcg_temp_free(shift
);
775 /* Code to call arith3 helpers */
776 #define ARITH3(name) \
777 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
780 if (unlikely(rc == 31)) \
785 TCGv tmp = tcg_const_i64(lit); \
786 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
787 tcg_temp_free(tmp); \
789 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
791 TCGv tmp1 = tcg_const_i64(0); \
793 TCGv tmp2 = tcg_const_i64(lit); \
794 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
795 tcg_temp_free(tmp2); \
797 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
798 tcg_temp_free(tmp1); \
819 #define MVIOP2(name) \
820 static inline void glue(gen_, name)(int rb, int rc) \
822 if (unlikely(rc == 31)) \
824 if (unlikely(rb == 31)) \
825 tcg_gen_movi_i64(cpu_ir[rc], 0); \
827 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
834 static inline void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
, int islit
,
840 if (unlikely(rc
== 31))
843 l1
= gen_new_label();
844 l2
= gen_new_label();
847 tmp
= tcg_temp_new();
848 tcg_gen_mov_i64(tmp
, cpu_ir
[ra
]);
850 tmp
= tcg_const_i64(0);
852 tcg_gen_brcondi_i64(cond
, tmp
, lit
, l1
);
854 tcg_gen_brcond_i64(cond
, tmp
, cpu_ir
[rb
], l1
);
856 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
859 tcg_gen_movi_i64(cpu_ir
[rc
], 1);
863 static inline int translate_one(DisasContext
*ctx
, uint32_t insn
)
866 int32_t disp21
, disp16
, disp12
;
868 uint8_t opc
, ra
, rb
, rc
, sbz
, fpfn
, fn7
, fn2
, islit
, real_islit
;
872 /* Decode all instruction fields */
874 ra
= (insn
>> 21) & 0x1F;
875 rb
= (insn
>> 16) & 0x1F;
877 sbz
= (insn
>> 13) & 0x07;
878 real_islit
= islit
= (insn
>> 12) & 1;
879 if (rb
== 31 && !islit
) {
883 lit
= (insn
>> 13) & 0xFF;
884 palcode
= insn
& 0x03FFFFFF;
885 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
886 disp16
= (int16_t)(insn
& 0x0000FFFF);
887 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
888 fn16
= insn
& 0x0000FFFF;
889 fn11
= (insn
>> 5) & 0x000007FF;
891 fn7
= (insn
>> 5) & 0x0000007F;
892 fn2
= (insn
>> 5) & 0x00000003;
894 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
895 opc
, ra
, rb
, rc
, disp16
);
900 #ifdef CONFIG_USER_ONLY
901 if (palcode
== 0x9E) {
903 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_uniq
);
905 } else if (palcode
== 0x9F) {
907 tcg_gen_mov_i64(cpu_uniq
, cpu_ir
[IR_A0
]);
911 if (palcode
>= 0x80 && palcode
< 0xC0) {
912 /* Unprivileged PAL call */
913 gen_excp(ctx
, EXCP_CALL_PAL
+ ((palcode
& 0x3F) << 6), 0);
917 #ifndef CONFIG_USER_ONLY
918 if (palcode
< 0x40) {
919 /* Privileged PAL code */
920 if (ctx
->mem_idx
& 1)
922 gen_excp(ctx
, EXCP_CALL_PALP
+ ((palcode
& 0x3F) << 6), 0);
926 /* Invalid PAL call */
951 if (likely(ra
!= 31)) {
953 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
955 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
960 if (likely(ra
!= 31)) {
962 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
964 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
969 if (!(ctx
->amask
& AMASK_BWX
))
971 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
975 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
979 if (!(ctx
->amask
& AMASK_BWX
))
981 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
985 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0, 0);
989 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0, 0);
993 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1, 0);
999 if (likely(rc
!= 31)) {
1002 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1003 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1005 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1006 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1010 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1012 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1018 if (likely(rc
!= 31)) {
1020 TCGv tmp
= tcg_temp_new();
1021 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1023 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1025 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1026 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1030 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1032 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1038 if (likely(rc
!= 31)) {
1041 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1043 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1044 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1047 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1049 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1050 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1056 if (likely(rc
!= 31)) {
1058 TCGv tmp
= tcg_temp_new();
1059 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1061 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1063 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1064 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1068 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1070 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1071 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1078 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1082 if (likely(rc
!= 31)) {
1084 TCGv tmp
= tcg_temp_new();
1085 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1087 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1089 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1090 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1094 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1096 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1102 if (likely(rc
!= 31)) {
1104 TCGv tmp
= tcg_temp_new();
1105 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1107 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1109 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1110 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1114 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1116 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1117 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1124 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1128 if (likely(rc
!= 31)) {
1131 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1133 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1136 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1138 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1144 if (likely(rc
!= 31)) {
1146 TCGv tmp
= tcg_temp_new();
1147 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1149 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1151 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1155 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1157 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1163 if (likely(rc
!= 31)) {
1166 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1168 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1171 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1173 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1179 if (likely(rc
!= 31)) {
1181 TCGv tmp
= tcg_temp_new();
1182 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1184 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1186 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1190 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1192 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1198 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
1202 if (likely(rc
!= 31)) {
1204 TCGv tmp
= tcg_temp_new();
1205 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1207 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1209 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1213 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1215 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1221 if (likely(rc
!= 31)) {
1223 TCGv tmp
= tcg_temp_new();
1224 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1226 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1228 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1232 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1234 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1240 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
1244 gen_addlv(ra
, rb
, rc
, islit
, lit
);
1248 gen_sublv(ra
, rb
, rc
, islit
, lit
);
1252 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
1256 gen_addqv(ra
, rb
, rc
, islit
, lit
);
1260 gen_subqv(ra
, rb
, rc
, islit
, lit
);
1264 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
1274 if (likely(rc
!= 31)) {
1276 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1278 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1280 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1285 if (likely(rc
!= 31)) {
1288 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1290 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1292 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1297 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
1301 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
1305 if (likely(rc
!= 31)) {
1308 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1310 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1313 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1315 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1321 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
1325 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
1329 if (likely(rc
!= 31)) {
1332 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1334 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1337 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1339 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1345 if (likely(rc
!= 31)) {
1348 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1350 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1353 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1355 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1361 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
1365 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
1369 if (likely(rc
!= 31)) {
1372 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1374 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1377 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1379 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1385 if (likely(rc
!= 31)) {
1387 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1389 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1390 switch (ctx
->env
->implver
) {
1392 /* EV4, EV45, LCA, LCA45 & EV5 */
1397 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rc
],
1398 ~(uint64_t)ctx
->amask
);
1405 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
1409 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
1414 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
1424 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1428 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1432 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1436 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1440 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1444 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1448 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1452 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1456 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1460 gen_zap(ra
, rb
, rc
, islit
, lit
);
1464 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
1468 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1472 if (likely(rc
!= 31)) {
1475 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1477 TCGv shift
= tcg_temp_new();
1478 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1479 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1480 tcg_temp_free(shift
);
1483 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1488 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1492 if (likely(rc
!= 31)) {
1495 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1497 TCGv shift
= tcg_temp_new();
1498 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1499 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1500 tcg_temp_free(shift
);
1503 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1508 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1512 if (likely(rc
!= 31)) {
1515 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1517 TCGv shift
= tcg_temp_new();
1518 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1519 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1520 tcg_temp_free(shift
);
1523 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1528 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1532 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1536 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1540 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1544 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1548 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1552 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1556 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1560 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1570 if (likely(rc
!= 31)) {
1572 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1575 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1577 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1578 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1584 if (likely(rc
!= 31)) {
1586 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1588 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1590 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1595 gen_umulh(ra
, rb
, rc
, islit
, lit
);
1599 gen_mullv(ra
, rb
, rc
, islit
, lit
);
1603 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
1610 switch (fpfn
) { /* f11 & 0x3F */
1613 if (!(ctx
->amask
& AMASK_FIX
))
1615 if (likely(rc
!= 31)) {
1617 TCGv_i32 tmp
= tcg_temp_new_i32();
1618 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
1619 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
1620 tcg_temp_free_i32(tmp
);
1622 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1627 if (!(ctx
->amask
& AMASK_FIX
))
1633 if (!(ctx
->amask
& AMASK_FIX
))
1639 if (!(ctx
->amask
& AMASK_FIX
))
1641 if (likely(rc
!= 31)) {
1643 TCGv_i32 tmp
= tcg_temp_new_i32();
1644 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
1645 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
1646 tcg_temp_free_i32(tmp
);
1648 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1653 if (!(ctx
->amask
& AMASK_FIX
))
1655 if (likely(rc
!= 31)) {
1657 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
1659 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1664 if (!(ctx
->amask
& AMASK_FIX
))
1670 if (!(ctx
->amask
& AMASK_FIX
))
1679 /* VAX floating point */
1680 /* XXX: rounding mode and trap are ignored (!) */
1681 switch (fpfn
) { /* f11 & 0x3F */
1684 gen_faddf(ra
, rb
, rc
);
1688 gen_fsubf(ra
, rb
, rc
);
1692 gen_fmulf(ra
, rb
, rc
);
1696 gen_fdivf(ra
, rb
, rc
);
1708 gen_faddg(ra
, rb
, rc
);
1712 gen_fsubg(ra
, rb
, rc
);
1716 gen_fmulg(ra
, rb
, rc
);
1720 gen_fdivg(ra
, rb
, rc
);
1724 gen_fcmpgeq(ra
, rb
, rc
);
1728 gen_fcmpglt(ra
, rb
, rc
);
1732 gen_fcmpgle(ra
, rb
, rc
);
1763 /* IEEE floating-point */
1764 /* XXX: rounding mode and traps are ignored (!) */
1765 switch (fpfn
) { /* f11 & 0x3F */
1768 gen_fadds(ra
, rb
, rc
);
1772 gen_fsubs(ra
, rb
, rc
);
1776 gen_fmuls(ra
, rb
, rc
);
1780 gen_fdivs(ra
, rb
, rc
);
1784 gen_faddt(ra
, rb
, rc
);
1788 gen_fsubt(ra
, rb
, rc
);
1792 gen_fmult(ra
, rb
, rc
);
1796 gen_fdivt(ra
, rb
, rc
);
1800 gen_fcmptun(ra
, rb
, rc
);
1804 gen_fcmpteq(ra
, rb
, rc
);
1808 gen_fcmptlt(ra
, rb
, rc
);
1812 gen_fcmptle(ra
, rb
, rc
);
1815 /* XXX: incorrect */
1816 if (fn11
== 0x2AC || fn11
== 0x6AC) {
1847 if (likely(rc
!= 31)) {
1851 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1853 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
1856 gen_fcpys(ra
, rb
, rc
);
1862 gen_fcpysn(ra
, rb
, rc
);
1866 gen_fcpyse(ra
, rb
, rc
);
1870 if (likely(ra
!= 31))
1871 gen_helper_store_fpcr(cpu_fir
[ra
]);
1873 TCGv tmp
= tcg_const_i64(0);
1874 gen_helper_store_fpcr(tmp
);
1880 if (likely(ra
!= 31))
1881 gen_helper_load_fpcr(cpu_fir
[ra
]);
1885 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
1889 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
1893 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
1897 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
1901 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
1905 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
1913 gen_fcvtqlv(rb
, rc
);
1917 gen_fcvtqlsv(rb
, rc
);
1924 switch ((uint16_t)disp16
) {
1927 /* No-op. Just exit from the current tb */
1932 /* No-op. Just exit from the current tb */
1954 gen_helper_load_pcc(cpu_ir
[ra
]);
1959 gen_helper_rc(cpu_ir
[ra
]);
1967 gen_helper_rs(cpu_ir
[ra
]);
1978 /* HW_MFPR (PALcode) */
1979 #if defined (CONFIG_USER_ONLY)
1985 TCGv tmp
= tcg_const_i32(insn
& 0xFF);
1986 gen_helper_mfpr(cpu_ir
[ra
], tmp
, cpu_ir
[ra
]);
1993 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
1995 tcg_gen_movi_i64(cpu_pc
, 0);
1997 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
1998 /* Those four jumps only differ by the branch prediction hint */
2016 /* HW_LD (PALcode) */
2017 #if defined (CONFIG_USER_ONLY)
2023 TCGv addr
= tcg_temp_new();
2025 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2027 tcg_gen_movi_i64(addr
, disp12
);
2028 switch ((insn
>> 12) & 0xF) {
2030 /* Longword physical access (hw_ldl/p) */
2031 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2034 /* Quadword physical access (hw_ldq/p) */
2035 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2038 /* Longword physical access with lock (hw_ldl_l/p) */
2039 gen_helper_ldl_l_raw(cpu_ir
[ra
], addr
);
2042 /* Quadword physical access with lock (hw_ldq_l/p) */
2043 gen_helper_ldq_l_raw(cpu_ir
[ra
], addr
);
2046 /* Longword virtual PTE fetch (hw_ldl/v) */
2047 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
2050 /* Quadword virtual PTE fetch (hw_ldq/v) */
2051 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2054 /* Incpu_ir[ra]id */
2057 /* Incpu_ir[ra]id */
2060 /* Longword virtual access (hw_ldl) */
2061 gen_helper_st_virt_to_phys(addr
, addr
);
2062 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2065 /* Quadword virtual access (hw_ldq) */
2066 gen_helper_st_virt_to_phys(addr
, addr
);
2067 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2070 /* Longword virtual access with protection check (hw_ldl/w) */
2071 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
2074 /* Quadword virtual access with protection check (hw_ldq/w) */
2075 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2078 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2079 gen_helper_set_alt_mode();
2080 gen_helper_st_virt_to_phys(addr
, addr
);
2081 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2082 gen_helper_restore_mode();
2085 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2086 gen_helper_set_alt_mode();
2087 gen_helper_st_virt_to_phys(addr
, addr
);
2088 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2089 gen_helper_restore_mode();
2092 /* Longword virtual access with alternate access mode and
2093 * protection checks (hw_ldl/wa)
2095 gen_helper_set_alt_mode();
2096 gen_helper_ldl_data(cpu_ir
[ra
], addr
);
2097 gen_helper_restore_mode();
2100 /* Quadword virtual access with alternate access mode and
2101 * protection checks (hw_ldq/wa)
2103 gen_helper_set_alt_mode();
2104 gen_helper_ldq_data(cpu_ir
[ra
], addr
);
2105 gen_helper_restore_mode();
2108 tcg_temp_free(addr
);
2116 if (!(ctx
->amask
& AMASK_BWX
))
2118 if (likely(rc
!= 31)) {
2120 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2122 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2127 if (!(ctx
->amask
& AMASK_BWX
))
2129 if (likely(rc
!= 31)) {
2131 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2133 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2138 if (!(ctx
->amask
& AMASK_CIX
))
2140 if (likely(rc
!= 31)) {
2142 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2144 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2149 if (!(ctx
->amask
& AMASK_MVI
))
2151 gen_perr(ra
, rb
, rc
, islit
, lit
);
2155 if (!(ctx
->amask
& AMASK_CIX
))
2157 if (likely(rc
!= 31)) {
2159 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2161 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2166 if (!(ctx
->amask
& AMASK_CIX
))
2168 if (likely(rc
!= 31)) {
2170 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2172 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
2177 if (!(ctx
->amask
& AMASK_MVI
))
2179 if (real_islit
|| ra
!= 31)
2181 gen_unpkbw (rb
, rc
);
2185 if (!(ctx
->amask
& AMASK_MVI
))
2187 if (real_islit
|| ra
!= 31)
2189 gen_unpkbl (rb
, rc
);
2193 if (!(ctx
->amask
& AMASK_MVI
))
2195 if (real_islit
|| ra
!= 31)
2201 if (!(ctx
->amask
& AMASK_MVI
))
2203 if (real_islit
|| ra
!= 31)
2209 if (!(ctx
->amask
& AMASK_MVI
))
2211 gen_minsb8 (ra
, rb
, rc
, islit
, lit
);
2215 if (!(ctx
->amask
& AMASK_MVI
))
2217 gen_minsw4 (ra
, rb
, rc
, islit
, lit
);
2221 if (!(ctx
->amask
& AMASK_MVI
))
2223 gen_minub8 (ra
, rb
, rc
, islit
, lit
);
2227 if (!(ctx
->amask
& AMASK_MVI
))
2229 gen_minuw4 (ra
, rb
, rc
, islit
, lit
);
2233 if (!(ctx
->amask
& AMASK_MVI
))
2235 gen_maxub8 (ra
, rb
, rc
, islit
, lit
);
2239 if (!(ctx
->amask
& AMASK_MVI
))
2241 gen_maxuw4 (ra
, rb
, rc
, islit
, lit
);
2245 if (!(ctx
->amask
& AMASK_MVI
))
2247 gen_maxsb8 (ra
, rb
, rc
, islit
, lit
);
2251 if (!(ctx
->amask
& AMASK_MVI
))
2253 gen_maxsw4 (ra
, rb
, rc
, islit
, lit
);
2257 if (!(ctx
->amask
& AMASK_FIX
))
2259 if (likely(rc
!= 31)) {
2261 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
2263 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2268 if (!(ctx
->amask
& AMASK_FIX
))
2271 TCGv_i32 tmp1
= tcg_temp_new_i32();
2273 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
2275 TCGv tmp2
= tcg_const_i64(0);
2276 gen_helper_s_to_memory(tmp1
, tmp2
);
2277 tcg_temp_free(tmp2
);
2279 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
2280 tcg_temp_free_i32(tmp1
);
2288 /* HW_MTPR (PALcode) */
2289 #if defined (CONFIG_USER_ONLY)
2295 TCGv tmp1
= tcg_const_i32(insn
& 0xFF);
2297 gen_helper_mtpr(tmp1
, cpu_ir
[ra
]);
2299 TCGv tmp2
= tcg_const_i64(0);
2300 gen_helper_mtpr(tmp1
, tmp2
);
2301 tcg_temp_free(tmp2
);
2303 tcg_temp_free(tmp1
);
2309 /* HW_REI (PALcode) */
2310 #if defined (CONFIG_USER_ONLY)
2317 gen_helper_hw_rei();
2322 tmp
= tcg_temp_new();
2323 tcg_gen_addi_i64(tmp
, cpu_ir
[rb
], (((int64_t)insn
<< 51) >> 51));
2325 tmp
= tcg_const_i64(((int64_t)insn
<< 51) >> 51);
2326 gen_helper_hw_ret(tmp
);
2333 /* HW_ST (PALcode) */
2334 #if defined (CONFIG_USER_ONLY)
2341 addr
= tcg_temp_new();
2343 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2345 tcg_gen_movi_i64(addr
, disp12
);
2349 val
= tcg_temp_new();
2350 tcg_gen_movi_i64(val
, 0);
2352 switch ((insn
>> 12) & 0xF) {
2354 /* Longword physical access */
2355 gen_helper_stl_raw(val
, addr
);
2358 /* Quadword physical access */
2359 gen_helper_stq_raw(val
, addr
);
2362 /* Longword physical access with lock */
2363 gen_helper_stl_c_raw(val
, val
, addr
);
2366 /* Quadword physical access with lock */
2367 gen_helper_stq_c_raw(val
, val
, addr
);
2370 /* Longword virtual access */
2371 gen_helper_st_virt_to_phys(addr
, addr
);
2372 gen_helper_stl_raw(val
, addr
);
2375 /* Quadword virtual access */
2376 gen_helper_st_virt_to_phys(addr
, addr
);
2377 gen_helper_stq_raw(val
, addr
);
2398 /* Longword virtual access with alternate access mode */
2399 gen_helper_set_alt_mode();
2400 gen_helper_st_virt_to_phys(addr
, addr
);
2401 gen_helper_stl_raw(val
, addr
);
2402 gen_helper_restore_mode();
2405 /* Quadword virtual access with alternate access mode */
2406 gen_helper_set_alt_mode();
2407 gen_helper_st_virt_to_phys(addr
, addr
);
2408 gen_helper_stl_raw(val
, addr
);
2409 gen_helper_restore_mode();
2420 tcg_temp_free(addr
);
2426 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2430 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2434 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2438 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2442 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0, 0);
2446 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0, 0);
2450 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0, 0);
2454 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0, 0);
2458 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2462 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2466 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2470 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2474 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0, 0);
2478 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0, 0);
2482 gen_store_mem(ctx
, &gen_qemu_stl_c
, ra
, rb
, disp16
, 0, 0, 1);
2486 gen_store_mem(ctx
, &gen_qemu_stq_c
, ra
, rb
, disp16
, 0, 0, 1);
2491 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2492 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2495 case 0x31: /* FBEQ */
2496 gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2499 case 0x32: /* FBLT */
2500 gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2503 case 0x33: /* FBLE */
2504 gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2510 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2511 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2514 case 0x35: /* FBNE */
2515 gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2518 case 0x36: /* FBGE */
2519 gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2522 case 0x37: /* FBGT */
2523 gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2528 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2533 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2538 gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2543 gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2548 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2553 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2558 gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2563 gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2575 static inline void gen_intermediate_code_internal(CPUState
*env
,
2576 TranslationBlock
*tb
,
2579 DisasContext ctx
, *ctxp
= &ctx
;
2580 target_ulong pc_start
;
2582 uint16_t *gen_opc_end
;
2590 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2592 ctx
.amask
= env
->amask
;
2594 #if defined (CONFIG_USER_ONLY)
2597 ctx
.mem_idx
= ((env
->ps
>> 3) & 3);
2598 ctx
.pal_mode
= env
->ipr
[IPR_EXC_ADDR
] & 1;
2601 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2603 max_insns
= CF_COUNT_MASK
;
2606 for (ret
= 0; ret
== 0;) {
2607 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
2608 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
2609 if (bp
->pc
== ctx
.pc
) {
2610 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2616 j
= gen_opc_ptr
- gen_opc_buf
;
2620 gen_opc_instr_start
[lj
++] = 0;
2622 gen_opc_pc
[lj
] = ctx
.pc
;
2623 gen_opc_instr_start
[lj
] = 1;
2624 gen_opc_icount
[lj
] = num_insns
;
2626 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
2628 insn
= ldl_code(ctx
.pc
);
2631 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
2632 tcg_gen_debug_insn_start(ctx
.pc
);
2636 ret
= translate_one(ctxp
, insn
);
2639 /* if we reach a page boundary or are single stepping, stop
2642 if (env
->singlestep_enabled
) {
2643 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2647 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
2650 if (gen_opc_ptr
>= gen_opc_end
)
2653 if (num_insns
>= max_insns
)
2660 if (ret
!= 1 && ret
!= 3) {
2661 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2663 if (tb
->cflags
& CF_LAST_IO
)
2665 /* Generate the return instruction */
2667 gen_icount_end(tb
, num_insns
);
2668 *gen_opc_ptr
= INDEX_op_end
;
2670 j
= gen_opc_ptr
- gen_opc_buf
;
2673 gen_opc_instr_start
[lj
++] = 0;
2675 tb
->size
= ctx
.pc
- pc_start
;
2676 tb
->icount
= num_insns
;
2679 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2680 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2681 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
2687 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
2689 gen_intermediate_code_internal(env
, tb
, 0);
2692 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
2694 gen_intermediate_code_internal(env
, tb
, 1);
2702 static const struct cpu_def_t cpu_defs
[] = {
2703 { "ev4", IMPLVER_2106x
, 0 },
2704 { "ev5", IMPLVER_21164
, 0 },
2705 { "ev56", IMPLVER_21164
, AMASK_BWX
},
2706 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
2707 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
2708 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
2709 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
2710 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
2711 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
2712 { "21064", IMPLVER_2106x
, 0 },
2713 { "21164", IMPLVER_21164
, 0 },
2714 { "21164a", IMPLVER_21164
, AMASK_BWX
},
2715 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
2716 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
2717 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
2718 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
2721 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
2725 int implver
, amask
, i
, max
;
2727 env
= qemu_mallocz(sizeof(CPUAlphaState
));
2729 alpha_translate_init();
2732 /* Default to ev67; no reason not to emulate insns by default. */
2733 implver
= IMPLVER_21264
;
2734 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
2735 | AMASK_TRAP
| AMASK_PREFETCH
);
2737 max
= ARRAY_SIZE(cpu_defs
);
2738 for (i
= 0; i
< max
; i
++) {
2739 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
2740 implver
= cpu_defs
[i
].implver
;
2741 amask
= cpu_defs
[i
].amask
;
2745 env
->implver
= implver
;
2749 #if defined (CONFIG_USER_ONLY)
2751 cpu_alpha_store_fpcr(env
, (FPCR_INVD
| FPCR_DZED
| FPCR_OVFD
2752 | FPCR_UNFD
| FPCR_INED
| FPCR_DNOD
));
2755 /* Initialize IPR */
2756 hwpcb
= env
->ipr
[IPR_PCBB
];
2757 env
->ipr
[IPR_ASN
] = 0;
2758 env
->ipr
[IPR_ASTEN
] = 0;
2759 env
->ipr
[IPR_ASTSR
] = 0;
2760 env
->ipr
[IPR_DATFX
] = 0;
2762 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
2763 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
2764 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
2765 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
2766 env
->ipr
[IPR_FEN
] = 0;
2767 env
->ipr
[IPR_IPL
] = 31;
2768 env
->ipr
[IPR_MCES
] = 0;
2769 env
->ipr
[IPR_PERFMON
] = 0; /* Implementation specific */
2770 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
2771 env
->ipr
[IPR_SISR
] = 0;
2772 env
->ipr
[IPR_VIRBND
] = -1ULL;
2774 qemu_init_vcpu(env
);
2778 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
2779 unsigned long searched_pc
, int pc_pos
, void *puc
)
2781 env
->pc
= gen_opc_pc
[pc_pos
];