2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "host-utils.h"
29 #include "qemu-common.h"
35 #undef ALPHA_DEBUG_DISAS
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 # define LOG_DISAS(...) do { } while (0)
43 typedef struct DisasContext DisasContext
;
47 #if !defined (CONFIG_USER_ONLY)
54 /* global register indexes */
55 static TCGv_ptr cpu_env
;
56 static TCGv cpu_ir
[31];
57 static TCGv cpu_fir
[31];
60 #ifdef CONFIG_USER_ONLY
65 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
67 #include "gen-icount.h"
69 static void alpha_translate_init(void)
73 static int done_init
= 0;
78 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
81 for (i
= 0; i
< 31; i
++) {
82 sprintf(p
, "ir%d", i
);
83 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
84 offsetof(CPUState
, ir
[i
]), p
);
85 p
+= (i
< 10) ? 4 : 5;
87 sprintf(p
, "fir%d", i
);
88 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
89 offsetof(CPUState
, fir
[i
]), p
);
90 p
+= (i
< 10) ? 5 : 6;
93 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
94 offsetof(CPUState
, pc
), "pc");
96 cpu_lock
= tcg_global_mem_new_i64(TCG_AREG0
,
97 offsetof(CPUState
, lock
), "lock");
99 #ifdef CONFIG_USER_ONLY
100 cpu_uniq
= tcg_global_mem_new_i64(TCG_AREG0
,
101 offsetof(CPUState
, unique
), "uniq");
104 /* register helpers */
111 static inline void gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
115 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
116 tmp1
= tcg_const_i32(exception
);
117 tmp2
= tcg_const_i32(error_code
);
118 gen_helper_excp(tmp1
, tmp2
);
119 tcg_temp_free_i32(tmp2
);
120 tcg_temp_free_i32(tmp1
);
123 static inline void gen_invalid(DisasContext
*ctx
)
125 gen_excp(ctx
, EXCP_OPCDEC
, 0);
128 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
130 TCGv tmp
= tcg_temp_new();
131 TCGv_i32 tmp32
= tcg_temp_new_i32();
132 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
133 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
134 gen_helper_memory_to_f(t0
, tmp32
);
135 tcg_temp_free_i32(tmp32
);
139 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
141 TCGv tmp
= tcg_temp_new();
142 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
143 gen_helper_memory_to_g(t0
, tmp
);
147 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
149 TCGv tmp
= tcg_temp_new();
150 TCGv_i32 tmp32
= tcg_temp_new_i32();
151 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
152 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
153 gen_helper_memory_to_s(t0
, tmp32
);
154 tcg_temp_free_i32(tmp32
);
158 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
160 tcg_gen_mov_i64(cpu_lock
, t1
);
161 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
164 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
166 tcg_gen_mov_i64(cpu_lock
, t1
);
167 tcg_gen_qemu_ld64(t0
, t1
, flags
);
170 static inline void gen_load_mem(DisasContext
*ctx
,
171 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
173 int ra
, int rb
, int32_t disp16
, int fp
,
178 if (unlikely(ra
== 31))
181 addr
= tcg_temp_new();
183 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
185 tcg_gen_andi_i64(addr
, addr
, ~0x7);
189 tcg_gen_movi_i64(addr
, disp16
);
192 tcg_gen_qemu_load(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
194 tcg_gen_qemu_load(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
198 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
200 TCGv_i32 tmp32
= tcg_temp_new_i32();
201 TCGv tmp
= tcg_temp_new();
202 gen_helper_f_to_memory(tmp32
, t0
);
203 tcg_gen_extu_i32_i64(tmp
, tmp32
);
204 tcg_gen_qemu_st32(tmp
, t1
, flags
);
206 tcg_temp_free_i32(tmp32
);
209 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
211 TCGv tmp
= tcg_temp_new();
212 gen_helper_g_to_memory(tmp
, t0
);
213 tcg_gen_qemu_st64(tmp
, t1
, flags
);
217 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
219 TCGv_i32 tmp32
= tcg_temp_new_i32();
220 TCGv tmp
= tcg_temp_new();
221 gen_helper_s_to_memory(tmp32
, t0
);
222 tcg_gen_extu_i32_i64(tmp
, tmp32
);
223 tcg_gen_qemu_st32(tmp
, t1
, flags
);
225 tcg_temp_free_i32(tmp32
);
228 static inline void gen_qemu_stl_c(TCGv t0
, TCGv t1
, int flags
)
232 l1
= gen_new_label();
233 l2
= gen_new_label();
234 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
235 tcg_gen_qemu_st32(t0
, t1
, flags
);
236 tcg_gen_movi_i64(t0
, 1);
239 tcg_gen_movi_i64(t0
, 0);
241 tcg_gen_movi_i64(cpu_lock
, -1);
244 static inline void gen_qemu_stq_c(TCGv t0
, TCGv t1
, int flags
)
248 l1
= gen_new_label();
249 l2
= gen_new_label();
250 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
251 tcg_gen_qemu_st64(t0
, t1
, flags
);
252 tcg_gen_movi_i64(t0
, 1);
255 tcg_gen_movi_i64(t0
, 0);
257 tcg_gen_movi_i64(cpu_lock
, -1);
260 static inline void gen_store_mem(DisasContext
*ctx
,
261 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
263 int ra
, int rb
, int32_t disp16
, int fp
,
264 int clear
, int local
)
268 addr
= tcg_temp_local_new();
270 addr
= tcg_temp_new();
272 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
274 tcg_gen_andi_i64(addr
, addr
, ~0x7);
278 tcg_gen_movi_i64(addr
, disp16
);
282 tcg_gen_qemu_store(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
284 tcg_gen_qemu_store(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
288 zero
= tcg_const_local_i64(0);
290 zero
= tcg_const_i64(0);
291 tcg_gen_qemu_store(zero
, addr
, ctx
->mem_idx
);
297 static inline void gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
298 int32_t disp
, int mask
)
302 l1
= gen_new_label();
303 l2
= gen_new_label();
304 if (likely(ra
!= 31)) {
306 TCGv tmp
= tcg_temp_new();
307 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
308 tcg_gen_brcondi_i64(cond
, tmp
, 0, l1
);
311 tcg_gen_brcondi_i64(cond
, cpu_ir
[ra
], 0, l1
);
313 /* Very uncommon case - Do not bother to optimize. */
314 TCGv tmp
= tcg_const_i64(0);
315 tcg_gen_brcondi_i64(cond
, tmp
, 0, l1
);
318 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
321 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp
<< 2));
325 static inline void gen_fbcond(DisasContext
*ctx
, int opc
, int ra
, int32_t disp
)
331 l1
= gen_new_label();
332 l2
= gen_new_label();
334 tmp
= tcg_temp_new();
337 tmp
= tcg_const_i64(0);
341 case 0x31: /* FBEQ */
342 gen_helper_cmpfeq(tmp
, src
);
344 case 0x32: /* FBLT */
345 gen_helper_cmpflt(tmp
, src
);
347 case 0x33: /* FBLE */
348 gen_helper_cmpfle(tmp
, src
);
350 case 0x35: /* FBNE */
351 gen_helper_cmpfne(tmp
, src
);
353 case 0x36: /* FBGE */
354 gen_helper_cmpfge(tmp
, src
);
356 case 0x37: /* FBGT */
357 gen_helper_cmpfgt(tmp
, src
);
362 tcg_gen_brcondi_i64(TCG_COND_NE
, tmp
, 0, l1
);
363 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
366 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp
<< 2));
370 static inline void gen_cmov(TCGCond inv_cond
, int ra
, int rb
, int rc
,
371 int islit
, uint8_t lit
, int mask
)
375 if (unlikely(rc
== 31))
378 l1
= gen_new_label();
382 TCGv tmp
= tcg_temp_new();
383 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
384 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
387 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
389 /* Very uncommon case - Do not bother to optimize. */
390 TCGv tmp
= tcg_const_i64(0);
391 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
396 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
398 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
402 #define FARITH2(name) \
403 static inline void glue(gen_f, name)(int rb, int rc) \
405 if (unlikely(rc == 31)) \
409 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
411 TCGv tmp = tcg_const_i64(0); \
412 gen_helper_ ## name (cpu_fir[rc], tmp); \
413 tcg_temp_free(tmp); \
434 #define FARITH3(name) \
435 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
437 if (unlikely(rc == 31)) \
442 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], cpu_fir[rb]); \
444 TCGv tmp = tcg_const_i64(0); \
445 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], tmp); \
446 tcg_temp_free(tmp); \
449 TCGv tmp = tcg_const_i64(0); \
451 gen_helper_ ## name (cpu_fir[rc], tmp, cpu_fir[rb]); \
453 gen_helper_ ## name (cpu_fir[rc], tmp, tmp); \
454 tcg_temp_free(tmp); \
485 #define FCMOV(name) \
486 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
491 if (unlikely(rc == 31)) \
494 l1 = gen_new_label(); \
495 tmp = tcg_temp_new(); \
497 tmp = tcg_temp_new(); \
498 gen_helper_ ## name (tmp, cpu_fir[ra]); \
500 tmp = tcg_const_i64(0); \
501 gen_helper_ ## name (tmp, tmp); \
503 tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1); \
505 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); \
507 tcg_gen_movi_i64(cpu_fir[rc], 0); \
517 static inline uint64_t zapnot_mask(uint8_t lit
)
522 for (i
= 0; i
< 8; ++i
) {
524 mask
|= 0xffull
<< (i
* 8);
529 /* Implement zapnot with an immediate operand, which expands to some
530 form of immediate AND. This is a basic building block in the
531 definition of many of the other byte manipulation instructions. */
532 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
536 tcg_gen_movi_i64(dest
, 0);
539 tcg_gen_ext8u_i64(dest
, src
);
542 tcg_gen_ext16u_i64(dest
, src
);
545 tcg_gen_ext32u_i64(dest
, src
);
548 tcg_gen_mov_i64(dest
, src
);
551 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
556 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
558 if (unlikely(rc
== 31))
560 else if (unlikely(ra
== 31))
561 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
563 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
565 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
568 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
570 if (unlikely(rc
== 31))
572 else if (unlikely(ra
== 31))
573 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
575 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
577 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
581 /* EXTWH, EXTLH, EXTQH */
582 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
583 uint8_t lit
, uint8_t byte_mask
)
585 if (unlikely(rc
== 31))
587 else if (unlikely(ra
== 31))
588 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
591 lit
= (64 - (lit
& 7) * 8) & 0x3f;
592 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
594 TCGv tmp1
= tcg_temp_new();
595 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
596 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
597 tcg_gen_neg_i64(tmp1
, tmp1
);
598 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
599 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
602 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
606 /* EXTBL, EXTWL, EXTLL, EXTQL */
607 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
608 uint8_t lit
, uint8_t byte_mask
)
610 if (unlikely(rc
== 31))
612 else if (unlikely(ra
== 31))
613 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
616 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
618 TCGv tmp
= tcg_temp_new();
619 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
620 tcg_gen_shli_i64(tmp
, tmp
, 3);
621 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
624 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
628 /* INSBL, INSWL, INSLL, INSQL */
629 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
630 uint8_t lit
, uint8_t byte_mask
)
632 if (unlikely(rc
== 31))
634 else if (unlikely(ra
== 31))
635 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
637 TCGv tmp
= tcg_temp_new();
639 /* The instruction description has us left-shift the byte mask
640 the same number of byte slots as the data and apply the zap
641 at the end. This is equivalent to simply performing the zap
642 first and shifting afterward. */
643 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
646 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
648 TCGv shift
= tcg_temp_new();
649 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
650 tcg_gen_shli_i64(shift
, shift
, 3);
651 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
652 tcg_temp_free(shift
);
658 /* MSKWH, MSKLH, MSKQH */
659 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
660 uint8_t lit
, uint8_t byte_mask
)
662 if (unlikely(rc
== 31))
664 else if (unlikely(ra
== 31))
665 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
667 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
669 TCGv shift
= tcg_temp_new();
670 TCGv mask
= tcg_temp_new();
672 /* The instruction description is as above, where the byte_mask
673 is shifted left, and then we extract bits <15:8>. This can be
674 emulated with a right-shift on the expanded byte mask. This
675 requires extra care because for an input <2:0> == 0 we need a
676 shift of 64 bits in order to generate a zero. This is done by
677 splitting the shift into two parts, the variable shift - 1
678 followed by a constant 1 shift. The code we expand below is
679 equivalent to ~((B & 7) * 8) & 63. */
681 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
682 tcg_gen_shli_i64(shift
, shift
, 3);
683 tcg_gen_not_i64(shift
, shift
);
684 tcg_gen_andi_i64(shift
, shift
, 0x3f);
685 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
686 tcg_gen_shr_i64(mask
, mask
, shift
);
687 tcg_gen_shri_i64(mask
, mask
, 1);
689 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
692 tcg_temp_free(shift
);
696 /* MSKBL, MSKWL, MSKLL, MSKQL */
697 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
698 uint8_t lit
, uint8_t byte_mask
)
700 if (unlikely(rc
== 31))
702 else if (unlikely(ra
== 31))
703 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
705 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
707 TCGv shift
= tcg_temp_new();
708 TCGv mask
= tcg_temp_new();
710 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
711 tcg_gen_shli_i64(shift
, shift
, 3);
712 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
713 tcg_gen_shl_i64(mask
, mask
, shift
);
715 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
718 tcg_temp_free(shift
);
722 /* Code to call arith3 helpers */
723 #define ARITH3(name) \
724 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
727 if (unlikely(rc == 31)) \
732 TCGv tmp = tcg_const_i64(lit); \
733 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
734 tcg_temp_free(tmp); \
736 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
738 TCGv tmp1 = tcg_const_i64(0); \
740 TCGv tmp2 = tcg_const_i64(lit); \
741 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
742 tcg_temp_free(tmp2); \
744 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
745 tcg_temp_free(tmp1); \
769 #define MVIOP2(name) \
770 static inline void glue(gen_, name)(int rb, int rc) \
772 if (unlikely(rc == 31)) \
774 if (unlikely(rb == 31)) \
775 tcg_gen_movi_i64(cpu_ir[rc], 0); \
777 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
784 static inline void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
, int islit
,
790 if (unlikely(rc
== 31))
793 l1
= gen_new_label();
794 l2
= gen_new_label();
797 tmp
= tcg_temp_new();
798 tcg_gen_mov_i64(tmp
, cpu_ir
[ra
]);
800 tmp
= tcg_const_i64(0);
802 tcg_gen_brcondi_i64(cond
, tmp
, lit
, l1
);
804 tcg_gen_brcond_i64(cond
, tmp
, cpu_ir
[rb
], l1
);
806 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
809 tcg_gen_movi_i64(cpu_ir
[rc
], 1);
813 static inline int translate_one(DisasContext
*ctx
, uint32_t insn
)
816 int32_t disp21
, disp16
, disp12
;
818 uint8_t opc
, ra
, rb
, rc
, sbz
, fpfn
, fn7
, fn2
, islit
, real_islit
;
822 /* Decode all instruction fields */
824 ra
= (insn
>> 21) & 0x1F;
825 rb
= (insn
>> 16) & 0x1F;
827 sbz
= (insn
>> 13) & 0x07;
828 real_islit
= islit
= (insn
>> 12) & 1;
829 if (rb
== 31 && !islit
) {
833 lit
= (insn
>> 13) & 0xFF;
834 palcode
= insn
& 0x03FFFFFF;
835 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
836 disp16
= (int16_t)(insn
& 0x0000FFFF);
837 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
838 fn16
= insn
& 0x0000FFFF;
839 fn11
= (insn
>> 5) & 0x000007FF;
841 fn7
= (insn
>> 5) & 0x0000007F;
842 fn2
= (insn
>> 5) & 0x00000003;
844 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
845 opc
, ra
, rb
, rc
, disp16
);
850 #ifdef CONFIG_USER_ONLY
851 if (palcode
== 0x9E) {
853 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_uniq
);
855 } else if (palcode
== 0x9F) {
857 tcg_gen_mov_i64(cpu_uniq
, cpu_ir
[IR_A0
]);
861 if (palcode
>= 0x80 && palcode
< 0xC0) {
862 /* Unprivileged PAL call */
863 gen_excp(ctx
, EXCP_CALL_PAL
+ ((palcode
& 0x3F) << 6), 0);
867 #ifndef CONFIG_USER_ONLY
868 if (palcode
< 0x40) {
869 /* Privileged PAL code */
870 if (ctx
->mem_idx
& 1)
872 gen_excp(ctx
, EXCP_CALL_PALP
+ ((palcode
& 0x3F) << 6), 0);
876 /* Invalid PAL call */
901 if (likely(ra
!= 31)) {
903 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
905 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
910 if (likely(ra
!= 31)) {
912 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
914 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
919 if (!(ctx
->amask
& AMASK_BWX
))
921 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
925 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
929 if (!(ctx
->amask
& AMASK_BWX
))
931 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
935 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0, 0);
939 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0, 0);
943 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1, 0);
949 if (likely(rc
!= 31)) {
952 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
953 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
955 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
956 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
960 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
962 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
968 if (likely(rc
!= 31)) {
970 TCGv tmp
= tcg_temp_new();
971 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
973 tcg_gen_addi_i64(tmp
, tmp
, lit
);
975 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
976 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
980 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
982 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
988 if (likely(rc
!= 31)) {
991 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
993 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
994 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
997 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
999 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1000 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1006 if (likely(rc
!= 31)) {
1008 TCGv tmp
= tcg_temp_new();
1009 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1011 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1013 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1014 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1018 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1020 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1021 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1028 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1032 if (likely(rc
!= 31)) {
1034 TCGv tmp
= tcg_temp_new();
1035 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1037 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1039 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1040 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1044 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1046 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1052 if (likely(rc
!= 31)) {
1054 TCGv tmp
= tcg_temp_new();
1055 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1057 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1059 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1060 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1064 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1066 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1067 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1074 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1078 if (likely(rc
!= 31)) {
1081 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1083 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1086 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1088 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1094 if (likely(rc
!= 31)) {
1096 TCGv tmp
= tcg_temp_new();
1097 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1099 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1101 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1105 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1107 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1113 if (likely(rc
!= 31)) {
1116 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1118 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1121 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1123 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1129 if (likely(rc
!= 31)) {
1131 TCGv tmp
= tcg_temp_new();
1132 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1134 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1136 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1140 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1142 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1148 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
1152 if (likely(rc
!= 31)) {
1154 TCGv tmp
= tcg_temp_new();
1155 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1157 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1159 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1163 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1165 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1171 if (likely(rc
!= 31)) {
1173 TCGv tmp
= tcg_temp_new();
1174 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1176 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1178 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1182 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1184 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1190 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
1194 gen_addlv(ra
, rb
, rc
, islit
, lit
);
1198 gen_sublv(ra
, rb
, rc
, islit
, lit
);
1202 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
1206 gen_addqv(ra
, rb
, rc
, islit
, lit
);
1210 gen_subqv(ra
, rb
, rc
, islit
, lit
);
1214 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
1224 if (likely(rc
!= 31)) {
1226 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1228 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1230 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1235 if (likely(rc
!= 31)) {
1238 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1240 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1242 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1247 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
1251 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
1255 if (likely(rc
!= 31)) {
1258 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1260 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1263 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1265 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1271 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
1275 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
1279 if (likely(rc
!= 31)) {
1282 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1284 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1287 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1289 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1295 if (likely(rc
!= 31)) {
1298 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1300 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1303 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1305 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1311 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
1315 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
1319 if (likely(rc
!= 31)) {
1322 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1324 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1327 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1329 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1335 if (likely(rc
!= 31)) {
1337 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1339 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1340 switch (ctx
->env
->implver
) {
1342 /* EV4, EV45, LCA, LCA45 & EV5 */
1347 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rc
],
1348 ~(uint64_t)ctx
->amask
);
1355 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
1359 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
1364 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
1374 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1378 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1382 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1386 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1390 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1394 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1398 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1402 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1406 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1410 gen_zap(ra
, rb
, rc
, islit
, lit
);
1414 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
1418 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1422 if (likely(rc
!= 31)) {
1425 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1427 TCGv shift
= tcg_temp_new();
1428 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1429 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1430 tcg_temp_free(shift
);
1433 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1438 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1442 if (likely(rc
!= 31)) {
1445 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1447 TCGv shift
= tcg_temp_new();
1448 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1449 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1450 tcg_temp_free(shift
);
1453 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1458 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1462 if (likely(rc
!= 31)) {
1465 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1467 TCGv shift
= tcg_temp_new();
1468 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1469 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1470 tcg_temp_free(shift
);
1473 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1478 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1482 gen_inswh(ra
, rb
, rc
, islit
, lit
);
1486 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1490 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1494 gen_inslh(ra
, rb
, rc
, islit
, lit
);
1498 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1502 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1506 gen_insqh(ra
, rb
, rc
, islit
, lit
);
1510 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1520 if (likely(rc
!= 31)) {
1522 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1525 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1527 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1528 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1534 if (likely(rc
!= 31)) {
1536 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1538 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1540 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1545 gen_umulh(ra
, rb
, rc
, islit
, lit
);
1549 gen_mullv(ra
, rb
, rc
, islit
, lit
);
1553 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
1560 switch (fpfn
) { /* f11 & 0x3F */
1563 if (!(ctx
->amask
& AMASK_FIX
))
1565 if (likely(rc
!= 31)) {
1567 TCGv_i32 tmp
= tcg_temp_new_i32();
1568 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
1569 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
1570 tcg_temp_free_i32(tmp
);
1572 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1577 if (!(ctx
->amask
& AMASK_FIX
))
1583 if (!(ctx
->amask
& AMASK_FIX
))
1589 if (!(ctx
->amask
& AMASK_FIX
))
1591 if (likely(rc
!= 31)) {
1593 TCGv_i32 tmp
= tcg_temp_new_i32();
1594 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
1595 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
1596 tcg_temp_free_i32(tmp
);
1598 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1603 if (!(ctx
->amask
& AMASK_FIX
))
1605 if (likely(rc
!= 31)) {
1607 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
1609 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1614 if (!(ctx
->amask
& AMASK_FIX
))
1620 if (!(ctx
->amask
& AMASK_FIX
))
1629 /* VAX floating point */
1630 /* XXX: rounding mode and trap are ignored (!) */
1631 switch (fpfn
) { /* f11 & 0x3F */
1634 gen_faddf(ra
, rb
, rc
);
1638 gen_fsubf(ra
, rb
, rc
);
1642 gen_fmulf(ra
, rb
, rc
);
1646 gen_fdivf(ra
, rb
, rc
);
1658 gen_faddg(ra
, rb
, rc
);
1662 gen_fsubg(ra
, rb
, rc
);
1666 gen_fmulg(ra
, rb
, rc
);
1670 gen_fdivg(ra
, rb
, rc
);
1674 gen_fcmpgeq(ra
, rb
, rc
);
1678 gen_fcmpglt(ra
, rb
, rc
);
1682 gen_fcmpgle(ra
, rb
, rc
);
1713 /* IEEE floating-point */
1714 /* XXX: rounding mode and traps are ignored (!) */
1715 switch (fpfn
) { /* f11 & 0x3F */
1718 gen_fadds(ra
, rb
, rc
);
1722 gen_fsubs(ra
, rb
, rc
);
1726 gen_fmuls(ra
, rb
, rc
);
1730 gen_fdivs(ra
, rb
, rc
);
1734 gen_faddt(ra
, rb
, rc
);
1738 gen_fsubt(ra
, rb
, rc
);
1742 gen_fmult(ra
, rb
, rc
);
1746 gen_fdivt(ra
, rb
, rc
);
1750 gen_fcmptun(ra
, rb
, rc
);
1754 gen_fcmpteq(ra
, rb
, rc
);
1758 gen_fcmptlt(ra
, rb
, rc
);
1762 gen_fcmptle(ra
, rb
, rc
);
1765 /* XXX: incorrect */
1766 if (fn11
== 0x2AC || fn11
== 0x6AC) {
1797 if (likely(rc
!= 31)) {
1800 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
1803 gen_fcpys(ra
, rb
, rc
);
1808 gen_fcpysn(ra
, rb
, rc
);
1812 gen_fcpyse(ra
, rb
, rc
);
1816 if (likely(ra
!= 31))
1817 gen_helper_store_fpcr(cpu_fir
[ra
]);
1819 TCGv tmp
= tcg_const_i64(0);
1820 gen_helper_store_fpcr(tmp
);
1826 if (likely(ra
!= 31))
1827 gen_helper_load_fpcr(cpu_fir
[ra
]);
1831 gen_fcmpfeq(ra
, rb
, rc
);
1835 gen_fcmpfne(ra
, rb
, rc
);
1839 gen_fcmpflt(ra
, rb
, rc
);
1843 gen_fcmpfge(ra
, rb
, rc
);
1847 gen_fcmpfle(ra
, rb
, rc
);
1851 gen_fcmpfgt(ra
, rb
, rc
);
1859 gen_fcvtqlv(rb
, rc
);
1863 gen_fcvtqlsv(rb
, rc
);
1870 switch ((uint16_t)disp16
) {
1873 /* No-op. Just exit from the current tb */
1878 /* No-op. Just exit from the current tb */
1900 gen_helper_load_pcc(cpu_ir
[ra
]);
1905 gen_helper_rc(cpu_ir
[ra
]);
1913 gen_helper_rs(cpu_ir
[ra
]);
1924 /* HW_MFPR (PALcode) */
1925 #if defined (CONFIG_USER_ONLY)
1931 TCGv tmp
= tcg_const_i32(insn
& 0xFF);
1932 gen_helper_mfpr(cpu_ir
[ra
], tmp
, cpu_ir
[ra
]);
1939 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
1941 tcg_gen_movi_i64(cpu_pc
, 0);
1943 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
1944 /* Those four jumps only differ by the branch prediction hint */
1962 /* HW_LD (PALcode) */
1963 #if defined (CONFIG_USER_ONLY)
1969 TCGv addr
= tcg_temp_new();
1971 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
1973 tcg_gen_movi_i64(addr
, disp12
);
1974 switch ((insn
>> 12) & 0xF) {
1976 /* Longword physical access (hw_ldl/p) */
1977 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
1980 /* Quadword physical access (hw_ldq/p) */
1981 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
1984 /* Longword physical access with lock (hw_ldl_l/p) */
1985 gen_helper_ldl_l_raw(cpu_ir
[ra
], addr
);
1988 /* Quadword physical access with lock (hw_ldq_l/p) */
1989 gen_helper_ldq_l_raw(cpu_ir
[ra
], addr
);
1992 /* Longword virtual PTE fetch (hw_ldl/v) */
1993 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
1996 /* Quadword virtual PTE fetch (hw_ldq/v) */
1997 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2000 /* Incpu_ir[ra]id */
2003 /* Incpu_ir[ra]id */
2006 /* Longword virtual access (hw_ldl) */
2007 gen_helper_st_virt_to_phys(addr
, addr
);
2008 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2011 /* Quadword virtual access (hw_ldq) */
2012 gen_helper_st_virt_to_phys(addr
, addr
);
2013 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2016 /* Longword virtual access with protection check (hw_ldl/w) */
2017 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
2020 /* Quadword virtual access with protection check (hw_ldq/w) */
2021 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2024 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2025 gen_helper_set_alt_mode();
2026 gen_helper_st_virt_to_phys(addr
, addr
);
2027 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2028 gen_helper_restore_mode();
2031 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2032 gen_helper_set_alt_mode();
2033 gen_helper_st_virt_to_phys(addr
, addr
);
2034 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2035 gen_helper_restore_mode();
2038 /* Longword virtual access with alternate access mode and
2039 * protection checks (hw_ldl/wa)
2041 gen_helper_set_alt_mode();
2042 gen_helper_ldl_data(cpu_ir
[ra
], addr
);
2043 gen_helper_restore_mode();
2046 /* Quadword virtual access with alternate access mode and
2047 * protection checks (hw_ldq/wa)
2049 gen_helper_set_alt_mode();
2050 gen_helper_ldq_data(cpu_ir
[ra
], addr
);
2051 gen_helper_restore_mode();
2054 tcg_temp_free(addr
);
2062 if (!(ctx
->amask
& AMASK_BWX
))
2064 if (likely(rc
!= 31)) {
2066 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2068 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2073 if (!(ctx
->amask
& AMASK_BWX
))
2075 if (likely(rc
!= 31)) {
2077 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2079 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2084 if (!(ctx
->amask
& AMASK_CIX
))
2086 if (likely(rc
!= 31)) {
2088 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2090 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2095 if (!(ctx
->amask
& AMASK_MVI
))
2097 gen_perr(ra
, rb
, rc
, islit
, lit
);
2101 if (!(ctx
->amask
& AMASK_CIX
))
2103 if (likely(rc
!= 31)) {
2105 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2107 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2112 if (!(ctx
->amask
& AMASK_CIX
))
2114 if (likely(rc
!= 31)) {
2116 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2118 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
2123 if (!(ctx
->amask
& AMASK_MVI
))
2125 if (real_islit
|| ra
!= 31)
2127 gen_unpkbw (rb
, rc
);
2131 if (!(ctx
->amask
& AMASK_MVI
))
2133 if (real_islit
|| ra
!= 31)
2135 gen_unpkbl (rb
, rc
);
2139 if (!(ctx
->amask
& AMASK_MVI
))
2141 if (real_islit
|| ra
!= 31)
2147 if (!(ctx
->amask
& AMASK_MVI
))
2149 if (real_islit
|| ra
!= 31)
2155 if (!(ctx
->amask
& AMASK_MVI
))
2157 gen_minsb8 (ra
, rb
, rc
, islit
, lit
);
2161 if (!(ctx
->amask
& AMASK_MVI
))
2163 gen_minsw4 (ra
, rb
, rc
, islit
, lit
);
2167 if (!(ctx
->amask
& AMASK_MVI
))
2169 gen_minub8 (ra
, rb
, rc
, islit
, lit
);
2173 if (!(ctx
->amask
& AMASK_MVI
))
2175 gen_minuw4 (ra
, rb
, rc
, islit
, lit
);
2179 if (!(ctx
->amask
& AMASK_MVI
))
2181 gen_maxub8 (ra
, rb
, rc
, islit
, lit
);
2185 if (!(ctx
->amask
& AMASK_MVI
))
2187 gen_maxuw4 (ra
, rb
, rc
, islit
, lit
);
2191 if (!(ctx
->amask
& AMASK_MVI
))
2193 gen_maxsb8 (ra
, rb
, rc
, islit
, lit
);
2197 if (!(ctx
->amask
& AMASK_MVI
))
2199 gen_maxsw4 (ra
, rb
, rc
, islit
, lit
);
2203 if (!(ctx
->amask
& AMASK_FIX
))
2205 if (likely(rc
!= 31)) {
2207 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
2209 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2214 if (!(ctx
->amask
& AMASK_FIX
))
2217 TCGv_i32 tmp1
= tcg_temp_new_i32();
2219 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
2221 TCGv tmp2
= tcg_const_i64(0);
2222 gen_helper_s_to_memory(tmp1
, tmp2
);
2223 tcg_temp_free(tmp2
);
2225 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
2226 tcg_temp_free_i32(tmp1
);
2234 /* HW_MTPR (PALcode) */
2235 #if defined (CONFIG_USER_ONLY)
2241 TCGv tmp1
= tcg_const_i32(insn
& 0xFF);
2243 gen_helper_mtpr(tmp1
, cpu_ir
[ra
]);
2245 TCGv tmp2
= tcg_const_i64(0);
2246 gen_helper_mtpr(tmp1
, tmp2
);
2247 tcg_temp_free(tmp2
);
2249 tcg_temp_free(tmp1
);
2255 /* HW_REI (PALcode) */
2256 #if defined (CONFIG_USER_ONLY)
2263 gen_helper_hw_rei();
2268 tmp
= tcg_temp_new();
2269 tcg_gen_addi_i64(tmp
, cpu_ir
[rb
], (((int64_t)insn
<< 51) >> 51));
2271 tmp
= tcg_const_i64(((int64_t)insn
<< 51) >> 51);
2272 gen_helper_hw_ret(tmp
);
2279 /* HW_ST (PALcode) */
2280 #if defined (CONFIG_USER_ONLY)
2287 addr
= tcg_temp_new();
2289 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2291 tcg_gen_movi_i64(addr
, disp12
);
2295 val
= tcg_temp_new();
2296 tcg_gen_movi_i64(val
, 0);
2298 switch ((insn
>> 12) & 0xF) {
2300 /* Longword physical access */
2301 gen_helper_stl_raw(val
, addr
);
2304 /* Quadword physical access */
2305 gen_helper_stq_raw(val
, addr
);
2308 /* Longword physical access with lock */
2309 gen_helper_stl_c_raw(val
, val
, addr
);
2312 /* Quadword physical access with lock */
2313 gen_helper_stq_c_raw(val
, val
, addr
);
2316 /* Longword virtual access */
2317 gen_helper_st_virt_to_phys(addr
, addr
);
2318 gen_helper_stl_raw(val
, addr
);
2321 /* Quadword virtual access */
2322 gen_helper_st_virt_to_phys(addr
, addr
);
2323 gen_helper_stq_raw(val
, addr
);
2344 /* Longword virtual access with alternate access mode */
2345 gen_helper_set_alt_mode();
2346 gen_helper_st_virt_to_phys(addr
, addr
);
2347 gen_helper_stl_raw(val
, addr
);
2348 gen_helper_restore_mode();
2351 /* Quadword virtual access with alternate access mode */
2352 gen_helper_set_alt_mode();
2353 gen_helper_st_virt_to_phys(addr
, addr
);
2354 gen_helper_stl_raw(val
, addr
);
2355 gen_helper_restore_mode();
2366 tcg_temp_free(addr
);
2372 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2376 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2380 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2384 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2388 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0, 0);
2392 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0, 0);
2396 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0, 0);
2400 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0, 0);
2404 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2408 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2412 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2416 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2420 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0, 0);
2424 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0, 0);
2428 gen_store_mem(ctx
, &gen_qemu_stl_c
, ra
, rb
, disp16
, 0, 0, 1);
2432 gen_store_mem(ctx
, &gen_qemu_stq_c
, ra
, rb
, disp16
, 0, 0, 1);
2437 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2438 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2441 case 0x31: /* FBEQ */
2442 case 0x32: /* FBLT */
2443 case 0x33: /* FBLE */
2444 gen_fbcond(ctx
, opc
, ra
, disp21
);
2450 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2451 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2454 case 0x35: /* FBNE */
2455 case 0x36: /* FBGE */
2456 case 0x37: /* FBGT */
2457 gen_fbcond(ctx
, opc
, ra
, disp21
);
2462 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2467 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2472 gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2477 gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2482 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2487 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2492 gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2497 gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2509 static inline void gen_intermediate_code_internal(CPUState
*env
,
2510 TranslationBlock
*tb
,
2513 DisasContext ctx
, *ctxp
= &ctx
;
2514 target_ulong pc_start
;
2516 uint16_t *gen_opc_end
;
2524 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2526 ctx
.amask
= env
->amask
;
2528 #if defined (CONFIG_USER_ONLY)
2531 ctx
.mem_idx
= ((env
->ps
>> 3) & 3);
2532 ctx
.pal_mode
= env
->ipr
[IPR_EXC_ADDR
] & 1;
2535 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2537 max_insns
= CF_COUNT_MASK
;
2540 for (ret
= 0; ret
== 0;) {
2541 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
2542 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
2543 if (bp
->pc
== ctx
.pc
) {
2544 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2550 j
= gen_opc_ptr
- gen_opc_buf
;
2554 gen_opc_instr_start
[lj
++] = 0;
2556 gen_opc_pc
[lj
] = ctx
.pc
;
2557 gen_opc_instr_start
[lj
] = 1;
2558 gen_opc_icount
[lj
] = num_insns
;
2560 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
2562 insn
= ldl_code(ctx
.pc
);
2565 ret
= translate_one(ctxp
, insn
);
2568 /* if we reach a page boundary or are single stepping, stop
2571 if (env
->singlestep_enabled
) {
2572 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2576 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
2579 if (gen_opc_ptr
>= gen_opc_end
)
2582 if (num_insns
>= max_insns
)
2589 if (ret
!= 1 && ret
!= 3) {
2590 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2592 if (tb
->cflags
& CF_LAST_IO
)
2594 /* Generate the return instruction */
2596 gen_icount_end(tb
, num_insns
);
2597 *gen_opc_ptr
= INDEX_op_end
;
2599 j
= gen_opc_ptr
- gen_opc_buf
;
2602 gen_opc_instr_start
[lj
++] = 0;
2604 tb
->size
= ctx
.pc
- pc_start
;
2605 tb
->icount
= num_insns
;
2608 log_cpu_state_mask(CPU_LOG_TB_CPU
, env
, 0);
2609 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2610 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2611 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
2617 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
2619 gen_intermediate_code_internal(env
, tb
, 0);
2622 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
2624 gen_intermediate_code_internal(env
, tb
, 1);
2632 static const struct cpu_def_t cpu_defs
[] = {
2633 { "ev4", IMPLVER_2106x
, 0 },
2634 { "ev5", IMPLVER_21164
, 0 },
2635 { "ev56", IMPLVER_21164
, AMASK_BWX
},
2636 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
2637 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
2638 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
2639 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
2640 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
2641 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
2642 { "21064", IMPLVER_2106x
, 0 },
2643 { "21164", IMPLVER_21164
, 0 },
2644 { "21164a", IMPLVER_21164
, AMASK_BWX
},
2645 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
2646 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
2647 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
2648 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
2651 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
2655 int implver
, amask
, i
, max
;
2657 env
= qemu_mallocz(sizeof(CPUAlphaState
));
2659 alpha_translate_init();
2662 /* Default to ev67; no reason not to emulate insns by default. */
2663 implver
= IMPLVER_21264
;
2664 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
2665 | AMASK_TRAP
| AMASK_PREFETCH
);
2667 max
= ARRAY_SIZE(cpu_defs
);
2668 for (i
= 0; i
< max
; i
++) {
2669 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
2670 implver
= cpu_defs
[i
].implver
;
2671 amask
= cpu_defs
[i
].amask
;
2675 env
->implver
= implver
;
2679 #if defined (CONFIG_USER_ONLY)
2683 /* Initialize IPR */
2684 hwpcb
= env
->ipr
[IPR_PCBB
];
2685 env
->ipr
[IPR_ASN
] = 0;
2686 env
->ipr
[IPR_ASTEN
] = 0;
2687 env
->ipr
[IPR_ASTSR
] = 0;
2688 env
->ipr
[IPR_DATFX
] = 0;
2690 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
2691 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
2692 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
2693 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
2694 env
->ipr
[IPR_FEN
] = 0;
2695 env
->ipr
[IPR_IPL
] = 31;
2696 env
->ipr
[IPR_MCES
] = 0;
2697 env
->ipr
[IPR_PERFMON
] = 0; /* Implementation specific */
2698 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
2699 env
->ipr
[IPR_SISR
] = 0;
2700 env
->ipr
[IPR_VIRBND
] = -1ULL;
2702 qemu_init_vcpu(env
);
2706 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
2707 unsigned long searched_pc
, int pc_pos
, void *puc
)
2709 env
->pc
= gen_opc_pc
[pc_pos
];