2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "host-utils.h"
29 #include "qemu-common.h"
35 /* #define DO_SINGLE_STEP */
36 #define ALPHA_DEBUG_DISAS
37 /* #define DO_TB_FLUSH */
40 #ifdef ALPHA_DEBUG_DISAS
41 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
43 # define LOG_DISAS(...) do { } while (0)
46 typedef struct DisasContext DisasContext
;
50 #if !defined (CONFIG_USER_ONLY)
57 /* global register indexes */
58 static TCGv_ptr cpu_env
;
59 static TCGv cpu_ir
[31];
60 static TCGv cpu_fir
[31];
65 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
67 #include "gen-icount.h"
69 static void alpha_translate_init(void)
73 static int done_init
= 0;
78 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
81 for (i
= 0; i
< 31; i
++) {
82 sprintf(p
, "ir%d", i
);
83 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
84 offsetof(CPUState
, ir
[i
]), p
);
85 p
+= (i
< 10) ? 4 : 5;
87 sprintf(p
, "fir%d", i
);
88 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
89 offsetof(CPUState
, fir
[i
]), p
);
90 p
+= (i
< 10) ? 5 : 6;
93 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
94 offsetof(CPUState
, pc
), "pc");
96 cpu_lock
= tcg_global_mem_new_i64(TCG_AREG0
,
97 offsetof(CPUState
, lock
), "lock");
99 /* register helpers */
106 static inline void gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
110 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
111 tmp1
= tcg_const_i32(exception
);
112 tmp2
= tcg_const_i32(error_code
);
113 gen_helper_excp(tmp1
, tmp2
);
114 tcg_temp_free_i32(tmp2
);
115 tcg_temp_free_i32(tmp1
);
118 static inline void gen_invalid(DisasContext
*ctx
)
120 gen_excp(ctx
, EXCP_OPCDEC
, 0);
123 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
125 TCGv tmp
= tcg_temp_new();
126 TCGv_i32 tmp32
= tcg_temp_new_i32();
127 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
128 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
129 gen_helper_memory_to_f(t0
, tmp32
);
130 tcg_temp_free_i32(tmp32
);
134 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
136 TCGv tmp
= tcg_temp_new();
137 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
138 gen_helper_memory_to_g(t0
, tmp
);
142 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
144 TCGv tmp
= tcg_temp_new();
145 TCGv_i32 tmp32
= tcg_temp_new_i32();
146 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
147 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
148 gen_helper_memory_to_s(t0
, tmp32
);
149 tcg_temp_free_i32(tmp32
);
153 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
155 tcg_gen_mov_i64(cpu_lock
, t1
);
156 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
159 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
161 tcg_gen_mov_i64(cpu_lock
, t1
);
162 tcg_gen_qemu_ld64(t0
, t1
, flags
);
165 static inline void gen_load_mem(DisasContext
*ctx
,
166 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
168 int ra
, int rb
, int32_t disp16
, int fp
,
173 if (unlikely(ra
== 31))
176 addr
= tcg_temp_new();
178 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
180 tcg_gen_andi_i64(addr
, addr
, ~0x7);
184 tcg_gen_movi_i64(addr
, disp16
);
187 tcg_gen_qemu_load(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
189 tcg_gen_qemu_load(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
193 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
195 TCGv_i32 tmp32
= tcg_temp_new_i32();
196 TCGv tmp
= tcg_temp_new();
197 gen_helper_f_to_memory(tmp32
, t0
);
198 tcg_gen_extu_i32_i64(tmp
, tmp32
);
199 tcg_gen_qemu_st32(tmp
, t1
, flags
);
201 tcg_temp_free_i32(tmp32
);
204 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
206 TCGv tmp
= tcg_temp_new();
207 gen_helper_g_to_memory(tmp
, t0
);
208 tcg_gen_qemu_st64(tmp
, t1
, flags
);
212 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
214 TCGv_i32 tmp32
= tcg_temp_new_i32();
215 TCGv tmp
= tcg_temp_new();
216 gen_helper_s_to_memory(tmp32
, t0
);
217 tcg_gen_extu_i32_i64(tmp
, tmp32
);
218 tcg_gen_qemu_st32(tmp
, t1
, flags
);
220 tcg_temp_free_i32(tmp32
);
223 static inline void gen_qemu_stl_c(TCGv t0
, TCGv t1
, int flags
)
227 l1
= gen_new_label();
228 l2
= gen_new_label();
229 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
230 tcg_gen_qemu_st32(t0
, t1
, flags
);
231 tcg_gen_movi_i64(t0
, 1);
234 tcg_gen_movi_i64(t0
, 0);
236 tcg_gen_movi_i64(cpu_lock
, -1);
239 static inline void gen_qemu_stq_c(TCGv t0
, TCGv t1
, int flags
)
243 l1
= gen_new_label();
244 l2
= gen_new_label();
245 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
246 tcg_gen_qemu_st64(t0
, t1
, flags
);
247 tcg_gen_movi_i64(t0
, 1);
250 tcg_gen_movi_i64(t0
, 0);
252 tcg_gen_movi_i64(cpu_lock
, -1);
255 static inline void gen_store_mem(DisasContext
*ctx
,
256 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
258 int ra
, int rb
, int32_t disp16
, int fp
,
259 int clear
, int local
)
263 addr
= tcg_temp_local_new();
265 addr
= tcg_temp_new();
267 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
269 tcg_gen_andi_i64(addr
, addr
, ~0x7);
273 tcg_gen_movi_i64(addr
, disp16
);
277 tcg_gen_qemu_store(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
279 tcg_gen_qemu_store(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
283 zero
= tcg_const_local_i64(0);
285 zero
= tcg_const_i64(0);
286 tcg_gen_qemu_store(zero
, addr
, ctx
->mem_idx
);
292 static inline void gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
293 int32_t disp
, int mask
)
297 l1
= gen_new_label();
298 l2
= gen_new_label();
299 if (likely(ra
!= 31)) {
301 TCGv tmp
= tcg_temp_new();
302 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
303 tcg_gen_brcondi_i64(cond
, tmp
, 0, l1
);
306 tcg_gen_brcondi_i64(cond
, cpu_ir
[ra
], 0, l1
);
308 /* Very uncommon case - Do not bother to optimize. */
309 TCGv tmp
= tcg_const_i64(0);
310 tcg_gen_brcondi_i64(cond
, tmp
, 0, l1
);
313 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
316 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp
<< 2));
320 static inline void gen_fbcond(DisasContext
*ctx
, int opc
, int ra
,
327 l1
= gen_new_label();
328 l2
= gen_new_label();
330 tmp
= tcg_temp_new();
333 tmp
= tcg_const_i64(0);
337 case 0x31: /* FBEQ */
338 gen_helper_cmpfeq(tmp
, src
);
340 case 0x32: /* FBLT */
341 gen_helper_cmpflt(tmp
, src
);
343 case 0x33: /* FBLE */
344 gen_helper_cmpfle(tmp
, src
);
346 case 0x35: /* FBNE */
347 gen_helper_cmpfne(tmp
, src
);
349 case 0x36: /* FBGE */
350 gen_helper_cmpfge(tmp
, src
);
352 case 0x37: /* FBGT */
353 gen_helper_cmpfgt(tmp
, src
);
358 tcg_gen_brcondi_i64(TCG_COND_NE
, tmp
, 0, l1
);
359 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
362 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp16
<< 2));
366 static inline void gen_cmov(TCGCond inv_cond
, int ra
, int rb
, int rc
,
367 int islit
, uint8_t lit
, int mask
)
371 if (unlikely(rc
== 31))
374 l1
= gen_new_label();
378 TCGv tmp
= tcg_temp_new();
379 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
380 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
383 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
385 /* Very uncommon case - Do not bother to optimize. */
386 TCGv tmp
= tcg_const_i64(0);
387 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
392 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
394 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
398 #define FARITH2(name) \
399 static inline void glue(gen_f, name)(int rb, int rc) \
401 if (unlikely(rc == 31)) \
405 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
407 TCGv tmp = tcg_const_i64(0); \
408 gen_helper_ ## name (cpu_fir[rc], tmp); \
409 tcg_temp_free(tmp); \
430 #define FARITH3(name) \
431 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
433 if (unlikely(rc == 31)) \
438 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], cpu_fir[rb]); \
440 TCGv tmp = tcg_const_i64(0); \
441 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], tmp); \
442 tcg_temp_free(tmp); \
445 TCGv tmp = tcg_const_i64(0); \
447 gen_helper_ ## name (cpu_fir[rc], tmp, cpu_fir[rb]); \
449 gen_helper_ ## name (cpu_fir[rc], tmp, tmp); \
450 tcg_temp_free(tmp); \
481 #define FCMOV(name) \
482 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
487 if (unlikely(rc == 31)) \
490 l1 = gen_new_label(); \
491 tmp = tcg_temp_new(); \
493 tmp = tcg_temp_new(); \
494 gen_helper_ ## name (tmp, cpu_fir[ra]); \
496 tmp = tcg_const_i64(0); \
497 gen_helper_ ## name (tmp, tmp); \
499 tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1); \
501 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); \
503 tcg_gen_movi_i64(cpu_fir[rc], 0); \
513 /* EXTWH, EXTWH, EXTLH, EXTQH */
514 static inline void gen_ext_h(void(*tcg_gen_ext_i64
)(TCGv t0
, TCGv t1
),
515 int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
517 if (unlikely(rc
== 31))
523 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], 64 - ((lit
& 7) * 8));
525 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[ra
]);
528 tmp1
= tcg_temp_new();
530 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
531 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
532 tcg_gen_neg_i64(tmp1
, tmp1
);
533 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
534 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
539 tcg_gen_ext_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
541 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
544 /* EXTBL, EXTWL, EXTWL, EXTLL, EXTQL */
545 static inline void gen_ext_l(void(*tcg_gen_ext_i64
)(TCGv t0
, TCGv t1
),
546 int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
548 if (unlikely(rc
== 31))
553 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
555 TCGv tmp
= tcg_temp_new();
556 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
557 tcg_gen_shli_i64(tmp
, tmp
, 3);
558 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
562 tcg_gen_ext_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
564 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
567 /* Code to call arith3 helpers */
568 #define ARITH3(name) \
569 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
572 if (unlikely(rc == 31)) \
577 TCGv tmp = tcg_const_i64(lit); \
578 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
579 tcg_temp_free(tmp); \
581 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
583 TCGv tmp1 = tcg_const_i64(0); \
585 TCGv tmp2 = tcg_const_i64(lit); \
586 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
587 tcg_temp_free(tmp2); \
589 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
590 tcg_temp_free(tmp1); \
618 static inline void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
, int islit
,
624 if (unlikely(rc
== 31))
627 l1
= gen_new_label();
628 l2
= gen_new_label();
631 tmp
= tcg_temp_new();
632 tcg_gen_mov_i64(tmp
, cpu_ir
[ra
]);
634 tmp
= tcg_const_i64(0);
636 tcg_gen_brcondi_i64(cond
, tmp
, lit
, l1
);
638 tcg_gen_brcond_i64(cond
, tmp
, cpu_ir
[rb
], l1
);
640 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
643 tcg_gen_movi_i64(cpu_ir
[rc
], 1);
647 static inline int translate_one(DisasContext
*ctx
, uint32_t insn
)
650 int32_t disp21
, disp16
, disp12
;
652 uint8_t opc
, ra
, rb
, rc
, sbz
, fpfn
, fn7
, fn2
, islit
;
656 /* Decode all instruction fields */
658 ra
= (insn
>> 21) & 0x1F;
659 rb
= (insn
>> 16) & 0x1F;
661 sbz
= (insn
>> 13) & 0x07;
662 islit
= (insn
>> 12) & 1;
663 if (rb
== 31 && !islit
) {
667 lit
= (insn
>> 13) & 0xFF;
668 palcode
= insn
& 0x03FFFFFF;
669 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
670 disp16
= (int16_t)(insn
& 0x0000FFFF);
671 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
672 fn16
= insn
& 0x0000FFFF;
673 fn11
= (insn
>> 5) & 0x000007FF;
675 fn7
= (insn
>> 5) & 0x0000007F;
676 fn2
= (insn
>> 5) & 0x00000003;
678 LOG_DISAS("opc %02x ra %d rb %d rc %d disp16 %04x\n",
679 opc
, ra
, rb
, rc
, disp16
);
683 if (palcode
>= 0x80 && palcode
< 0xC0) {
684 /* Unprivileged PAL call */
685 gen_excp(ctx
, EXCP_CALL_PAL
+ ((palcode
& 0x3F) << 6), 0);
686 #if !defined (CONFIG_USER_ONLY)
687 } else if (palcode
< 0x40) {
688 /* Privileged PAL code */
689 if (ctx
->mem_idx
& 1)
692 gen_excp(ctx
, EXCP_CALL_PALP
+ ((palcode
& 0x3F) << 6), 0);
695 /* Invalid PAL call */
723 if (likely(ra
!= 31)) {
725 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
727 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
732 if (likely(ra
!= 31)) {
734 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
736 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
741 if (!(ctx
->amask
& AMASK_BWX
))
743 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
747 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
751 if (!(ctx
->amask
& AMASK_BWX
))
753 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
757 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0, 0);
761 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0, 0);
765 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1, 0);
771 if (likely(rc
!= 31)) {
774 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
775 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
777 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
778 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
782 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
784 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
790 if (likely(rc
!= 31)) {
792 TCGv tmp
= tcg_temp_new();
793 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
795 tcg_gen_addi_i64(tmp
, tmp
, lit
);
797 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
798 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
802 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
804 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
810 if (likely(rc
!= 31)) {
813 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
815 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
816 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
819 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
821 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
822 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
828 if (likely(rc
!= 31)) {
830 TCGv tmp
= tcg_temp_new();
831 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
833 tcg_gen_subi_i64(tmp
, tmp
, lit
);
835 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
836 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
840 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
842 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
843 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
850 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
854 if (likely(rc
!= 31)) {
856 TCGv tmp
= tcg_temp_new();
857 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
859 tcg_gen_addi_i64(tmp
, tmp
, lit
);
861 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
862 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
866 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
868 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
874 if (likely(rc
!= 31)) {
876 TCGv tmp
= tcg_temp_new();
877 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
879 tcg_gen_subi_i64(tmp
, tmp
, lit
);
881 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
882 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
886 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
888 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
889 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
896 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
900 if (likely(rc
!= 31)) {
903 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
905 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
908 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
910 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
916 if (likely(rc
!= 31)) {
918 TCGv tmp
= tcg_temp_new();
919 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
921 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
923 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
927 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
929 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
935 if (likely(rc
!= 31)) {
938 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
940 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
943 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
945 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
951 if (likely(rc
!= 31)) {
953 TCGv tmp
= tcg_temp_new();
954 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
956 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
958 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
962 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
964 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
970 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
974 if (likely(rc
!= 31)) {
976 TCGv tmp
= tcg_temp_new();
977 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
979 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
981 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
985 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
987 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
993 if (likely(rc
!= 31)) {
995 TCGv tmp
= tcg_temp_new();
996 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
998 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1000 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1004 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1006 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1012 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
1016 gen_addlv(ra
, rb
, rc
, islit
, lit
);
1020 gen_sublv(ra
, rb
, rc
, islit
, lit
);
1024 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
1028 gen_addqv(ra
, rb
, rc
, islit
, lit
);
1032 gen_subqv(ra
, rb
, rc
, islit
, lit
);
1036 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
1046 if (likely(rc
!= 31)) {
1048 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1050 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1052 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1057 if (likely(rc
!= 31)) {
1060 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1062 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1064 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1069 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
1073 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
1077 if (likely(rc
!= 31)) {
1080 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1082 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1085 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1087 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1093 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
1097 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
1101 if (likely(rc
!= 31)) {
1104 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1106 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1109 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1111 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1117 if (likely(rc
!= 31)) {
1120 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1122 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1125 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1127 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1133 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
1137 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
1141 if (likely(rc
!= 31)) {
1144 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1146 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1149 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1151 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1157 if (likely(rc
!= 31)) {
1159 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1161 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1162 switch (ctx
->env
->implver
) {
1164 /* EV4, EV45, LCA, LCA45 & EV5 */
1169 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rc
],
1170 ~(uint64_t)ctx
->amask
);
1177 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
1181 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
1186 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
1196 gen_mskbl(ra
, rb
, rc
, islit
, lit
);
1200 gen_ext_l(&tcg_gen_ext8u_i64
, ra
, rb
, rc
, islit
, lit
);
1204 gen_insbl(ra
, rb
, rc
, islit
, lit
);
1208 gen_mskwl(ra
, rb
, rc
, islit
, lit
);
1212 gen_ext_l(&tcg_gen_ext16u_i64
, ra
, rb
, rc
, islit
, lit
);
1216 gen_inswl(ra
, rb
, rc
, islit
, lit
);
1220 gen_mskll(ra
, rb
, rc
, islit
, lit
);
1224 gen_ext_l(&tcg_gen_ext32u_i64
, ra
, rb
, rc
, islit
, lit
);
1228 gen_insll(ra
, rb
, rc
, islit
, lit
);
1232 gen_zap(ra
, rb
, rc
, islit
, lit
);
1236 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
1240 gen_mskql(ra
, rb
, rc
, islit
, lit
);
1244 if (likely(rc
!= 31)) {
1247 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1249 TCGv shift
= tcg_temp_new();
1250 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1251 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1252 tcg_temp_free(shift
);
1255 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1260 gen_ext_l(NULL
, ra
, rb
, rc
, islit
, lit
);
1264 if (likely(rc
!= 31)) {
1267 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1269 TCGv shift
= tcg_temp_new();
1270 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1271 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1272 tcg_temp_free(shift
);
1275 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1280 gen_insql(ra
, rb
, rc
, islit
, lit
);
1284 if (likely(rc
!= 31)) {
1287 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1289 TCGv shift
= tcg_temp_new();
1290 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1291 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1292 tcg_temp_free(shift
);
1295 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1300 gen_mskwh(ra
, rb
, rc
, islit
, lit
);
1304 gen_inswh(ra
, rb
, rc
, islit
, lit
);
1308 gen_ext_h(&tcg_gen_ext16u_i64
, ra
, rb
, rc
, islit
, lit
);
1312 gen_msklh(ra
, rb
, rc
, islit
, lit
);
1316 gen_inslh(ra
, rb
, rc
, islit
, lit
);
1320 gen_ext_h(&tcg_gen_ext32u_i64
, ra
, rb
, rc
, islit
, lit
);
1324 gen_mskqh(ra
, rb
, rc
, islit
, lit
);
1328 gen_insqh(ra
, rb
, rc
, islit
, lit
);
1332 gen_ext_h(NULL
, ra
, rb
, rc
, islit
, lit
);
1342 if (likely(rc
!= 31)) {
1344 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1347 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1349 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1350 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1356 if (likely(rc
!= 31)) {
1358 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1360 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1362 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1367 gen_umulh(ra
, rb
, rc
, islit
, lit
);
1371 gen_mullv(ra
, rb
, rc
, islit
, lit
);
1375 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
1382 switch (fpfn
) { /* f11 & 0x3F */
1385 if (!(ctx
->amask
& AMASK_FIX
))
1387 if (likely(rc
!= 31)) {
1389 TCGv_i32 tmp
= tcg_temp_new_i32();
1390 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
1391 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
1392 tcg_temp_free_i32(tmp
);
1394 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1399 if (!(ctx
->amask
& AMASK_FIX
))
1405 if (!(ctx
->amask
& AMASK_FIX
))
1411 if (!(ctx
->amask
& AMASK_FIX
))
1413 if (likely(rc
!= 31)) {
1415 TCGv_i32 tmp
= tcg_temp_new_i32();
1416 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
1417 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
1418 tcg_temp_free_i32(tmp
);
1420 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1425 if (!(ctx
->amask
& AMASK_FIX
))
1427 if (likely(rc
!= 31)) {
1429 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
1431 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1436 if (!(ctx
->amask
& AMASK_FIX
))
1442 if (!(ctx
->amask
& AMASK_FIX
))
1451 /* VAX floating point */
1452 /* XXX: rounding mode and trap are ignored (!) */
1453 switch (fpfn
) { /* f11 & 0x3F */
1456 gen_faddf(ra
, rb
, rc
);
1460 gen_fsubf(ra
, rb
, rc
);
1464 gen_fmulf(ra
, rb
, rc
);
1468 gen_fdivf(ra
, rb
, rc
);
1480 gen_faddg(ra
, rb
, rc
);
1484 gen_fsubg(ra
, rb
, rc
);
1488 gen_fmulg(ra
, rb
, rc
);
1492 gen_fdivg(ra
, rb
, rc
);
1496 gen_fcmpgeq(ra
, rb
, rc
);
1500 gen_fcmpglt(ra
, rb
, rc
);
1504 gen_fcmpgle(ra
, rb
, rc
);
1535 /* IEEE floating-point */
1536 /* XXX: rounding mode and traps are ignored (!) */
1537 switch (fpfn
) { /* f11 & 0x3F */
1540 gen_fadds(ra
, rb
, rc
);
1544 gen_fsubs(ra
, rb
, rc
);
1548 gen_fmuls(ra
, rb
, rc
);
1552 gen_fdivs(ra
, rb
, rc
);
1556 gen_faddt(ra
, rb
, rc
);
1560 gen_fsubt(ra
, rb
, rc
);
1564 gen_fmult(ra
, rb
, rc
);
1568 gen_fdivt(ra
, rb
, rc
);
1572 gen_fcmptun(ra
, rb
, rc
);
1576 gen_fcmpteq(ra
, rb
, rc
);
1580 gen_fcmptlt(ra
, rb
, rc
);
1584 gen_fcmptle(ra
, rb
, rc
);
1587 /* XXX: incorrect */
1588 if (fn11
== 0x2AC || fn11
== 0x6AC) {
1619 if (likely(rc
!= 31)) {
1622 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
1625 gen_fcpys(ra
, rb
, rc
);
1630 gen_fcpysn(ra
, rb
, rc
);
1634 gen_fcpyse(ra
, rb
, rc
);
1638 if (likely(ra
!= 31))
1639 gen_helper_store_fpcr(cpu_fir
[ra
]);
1641 TCGv tmp
= tcg_const_i64(0);
1642 gen_helper_store_fpcr(tmp
);
1648 if (likely(ra
!= 31))
1649 gen_helper_load_fpcr(cpu_fir
[ra
]);
1653 gen_fcmpfeq(ra
, rb
, rc
);
1657 gen_fcmpfne(ra
, rb
, rc
);
1661 gen_fcmpflt(ra
, rb
, rc
);
1665 gen_fcmpfge(ra
, rb
, rc
);
1669 gen_fcmpfle(ra
, rb
, rc
);
1673 gen_fcmpfgt(ra
, rb
, rc
);
1681 gen_fcvtqlv(rb
, rc
);
1685 gen_fcvtqlsv(rb
, rc
);
1692 switch ((uint16_t)disp16
) {
1695 /* No-op. Just exit from the current tb */
1700 /* No-op. Just exit from the current tb */
1722 gen_helper_load_pcc(cpu_ir
[ra
]);
1727 gen_helper_rc(cpu_ir
[ra
]);
1735 gen_helper_rs(cpu_ir
[ra
]);
1746 /* HW_MFPR (PALcode) */
1747 #if defined (CONFIG_USER_ONLY)
1753 TCGv tmp
= tcg_const_i32(insn
& 0xFF);
1754 gen_helper_mfpr(cpu_ir
[ra
], tmp
, cpu_ir
[ra
]);
1761 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
1763 tcg_gen_movi_i64(cpu_pc
, 0);
1765 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
1766 /* Those four jumps only differ by the branch prediction hint */
1784 /* HW_LD (PALcode) */
1785 #if defined (CONFIG_USER_ONLY)
1791 TCGv addr
= tcg_temp_new();
1793 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
1795 tcg_gen_movi_i64(addr
, disp12
);
1796 switch ((insn
>> 12) & 0xF) {
1798 /* Longword physical access (hw_ldl/p) */
1799 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
1802 /* Quadword physical access (hw_ldq/p) */
1803 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
1806 /* Longword physical access with lock (hw_ldl_l/p) */
1807 gen_helper_ldl_l_raw(cpu_ir
[ra
], addr
);
1810 /* Quadword physical access with lock (hw_ldq_l/p) */
1811 gen_helper_ldq_l_raw(cpu_ir
[ra
], addr
);
1814 /* Longword virtual PTE fetch (hw_ldl/v) */
1815 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
1818 /* Quadword virtual PTE fetch (hw_ldq/v) */
1819 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
1822 /* Incpu_ir[ra]id */
1825 /* Incpu_ir[ra]id */
1828 /* Longword virtual access (hw_ldl) */
1829 gen_helper_st_virt_to_phys(addr
, addr
);
1830 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
1833 /* Quadword virtual access (hw_ldq) */
1834 gen_helper_st_virt_to_phys(addr
, addr
);
1835 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
1838 /* Longword virtual access with protection check (hw_ldl/w) */
1839 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
1842 /* Quadword virtual access with protection check (hw_ldq/w) */
1843 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
1846 /* Longword virtual access with alt access mode (hw_ldl/a)*/
1847 gen_helper_set_alt_mode();
1848 gen_helper_st_virt_to_phys(addr
, addr
);
1849 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
1850 gen_helper_restore_mode();
1853 /* Quadword virtual access with alt access mode (hw_ldq/a) */
1854 gen_helper_set_alt_mode();
1855 gen_helper_st_virt_to_phys(addr
, addr
);
1856 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
1857 gen_helper_restore_mode();
1860 /* Longword virtual access with alternate access mode and
1861 * protection checks (hw_ldl/wa)
1863 gen_helper_set_alt_mode();
1864 gen_helper_ldl_data(cpu_ir
[ra
], addr
);
1865 gen_helper_restore_mode();
1868 /* Quadword virtual access with alternate access mode and
1869 * protection checks (hw_ldq/wa)
1871 gen_helper_set_alt_mode();
1872 gen_helper_ldq_data(cpu_ir
[ra
], addr
);
1873 gen_helper_restore_mode();
1876 tcg_temp_free(addr
);
1884 if (!(ctx
->amask
& AMASK_BWX
))
1886 if (likely(rc
!= 31)) {
1888 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
1890 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1895 if (!(ctx
->amask
& AMASK_BWX
))
1897 if (likely(rc
!= 31)) {
1899 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
1901 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1906 if (!(ctx
->amask
& AMASK_CIX
))
1908 if (likely(rc
!= 31)) {
1910 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
1912 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
1917 if (!(ctx
->amask
& AMASK_MVI
))
1924 if (!(ctx
->amask
& AMASK_CIX
))
1926 if (likely(rc
!= 31)) {
1928 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
1930 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
1935 if (!(ctx
->amask
& AMASK_CIX
))
1937 if (likely(rc
!= 31)) {
1939 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
1941 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
1946 if (!(ctx
->amask
& AMASK_MVI
))
1953 if (!(ctx
->amask
& AMASK_MVI
))
1960 if (!(ctx
->amask
& AMASK_MVI
))
1967 if (!(ctx
->amask
& AMASK_MVI
))
1974 if (!(ctx
->amask
& AMASK_MVI
))
1981 if (!(ctx
->amask
& AMASK_MVI
))
1988 if (!(ctx
->amask
& AMASK_MVI
))
1995 if (!(ctx
->amask
& AMASK_MVI
))
2002 if (!(ctx
->amask
& AMASK_MVI
))
2009 if (!(ctx
->amask
& AMASK_MVI
))
2016 if (!(ctx
->amask
& AMASK_MVI
))
2023 if (!(ctx
->amask
& AMASK_MVI
))
2030 if (!(ctx
->amask
& AMASK_FIX
))
2032 if (likely(rc
!= 31)) {
2034 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
2036 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2041 if (!(ctx
->amask
& AMASK_FIX
))
2044 TCGv_i32 tmp1
= tcg_temp_new_i32();
2046 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
2048 TCGv tmp2
= tcg_const_i64(0);
2049 gen_helper_s_to_memory(tmp1
, tmp2
);
2050 tcg_temp_free(tmp2
);
2052 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
2053 tcg_temp_free_i32(tmp1
);
2061 /* HW_MTPR (PALcode) */
2062 #if defined (CONFIG_USER_ONLY)
2068 TCGv tmp1
= tcg_const_i32(insn
& 0xFF);
2070 gen_helper_mtpr(tmp1
, cpu_ir
[ra
]);
2072 TCGv tmp2
= tcg_const_i64(0);
2073 gen_helper_mtpr(tmp1
, tmp2
);
2074 tcg_temp_free(tmp2
);
2076 tcg_temp_free(tmp1
);
2082 /* HW_REI (PALcode) */
2083 #if defined (CONFIG_USER_ONLY)
2090 gen_helper_hw_rei();
2095 tmp
= tcg_temp_new();
2096 tcg_gen_addi_i64(tmp
, cpu_ir
[rb
], (((int64_t)insn
<< 51) >> 51));
2098 tmp
= tcg_const_i64(((int64_t)insn
<< 51) >> 51);
2099 gen_helper_hw_ret(tmp
);
2106 /* HW_ST (PALcode) */
2107 #if defined (CONFIG_USER_ONLY)
2114 addr
= tcg_temp_new();
2116 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2118 tcg_gen_movi_i64(addr
, disp12
);
2122 val
= tcg_temp_new();
2123 tcg_gen_movi_i64(val
, 0);
2125 switch ((insn
>> 12) & 0xF) {
2127 /* Longword physical access */
2128 gen_helper_stl_raw(val
, addr
);
2131 /* Quadword physical access */
2132 gen_helper_stq_raw(val
, addr
);
2135 /* Longword physical access with lock */
2136 gen_helper_stl_c_raw(val
, val
, addr
);
2139 /* Quadword physical access with lock */
2140 gen_helper_stq_c_raw(val
, val
, addr
);
2143 /* Longword virtual access */
2144 gen_helper_st_virt_to_phys(addr
, addr
);
2145 gen_helper_stl_raw(val
, addr
);
2148 /* Quadword virtual access */
2149 gen_helper_st_virt_to_phys(addr
, addr
);
2150 gen_helper_stq_raw(val
, addr
);
2171 /* Longword virtual access with alternate access mode */
2172 gen_helper_set_alt_mode();
2173 gen_helper_st_virt_to_phys(addr
, addr
);
2174 gen_helper_stl_raw(val
, addr
);
2175 gen_helper_restore_mode();
2178 /* Quadword virtual access with alternate access mode */
2179 gen_helper_set_alt_mode();
2180 gen_helper_st_virt_to_phys(addr
, addr
);
2181 gen_helper_stl_raw(val
, addr
);
2182 gen_helper_restore_mode();
2193 tcg_temp_free(addr
);
2199 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2203 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2207 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2211 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2215 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0, 0);
2219 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0, 0);
2223 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0, 0);
2227 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0, 0);
2231 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2235 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2239 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2243 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2247 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0, 0);
2251 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0, 0);
2255 gen_store_mem(ctx
, &gen_qemu_stl_c
, ra
, rb
, disp16
, 0, 0, 1);
2259 gen_store_mem(ctx
, &gen_qemu_stq_c
, ra
, rb
, disp16
, 0, 0, 1);
2264 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2265 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2268 case 0x31: /* FBEQ */
2269 case 0x32: /* FBLT */
2270 case 0x33: /* FBLE */
2271 gen_fbcond(ctx
, opc
, ra
, disp16
);
2277 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2278 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2281 case 0x35: /* FBNE */
2282 case 0x36: /* FBGE */
2283 case 0x37: /* FBGT */
2284 gen_fbcond(ctx
, opc
, ra
, disp16
);
2289 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2294 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2299 gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2304 gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2309 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2314 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2319 gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2324 gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2336 static inline void gen_intermediate_code_internal(CPUState
*env
,
2337 TranslationBlock
*tb
,
2340 #if defined ALPHA_DEBUG_DISAS
2341 static int insn_count
;
2343 DisasContext ctx
, *ctxp
= &ctx
;
2344 target_ulong pc_start
;
2346 uint16_t *gen_opc_end
;
2354 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2356 ctx
.amask
= env
->amask
;
2358 #if defined (CONFIG_USER_ONLY)
2361 ctx
.mem_idx
= ((env
->ps
>> 3) & 3);
2362 ctx
.pal_mode
= env
->ipr
[IPR_EXC_ADDR
] & 1;
2365 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2367 max_insns
= CF_COUNT_MASK
;
2370 for (ret
= 0; ret
== 0;) {
2371 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
2372 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
2373 if (bp
->pc
== ctx
.pc
) {
2374 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2380 j
= gen_opc_ptr
- gen_opc_buf
;
2384 gen_opc_instr_start
[lj
++] = 0;
2386 gen_opc_pc
[lj
] = ctx
.pc
;
2387 gen_opc_instr_start
[lj
] = 1;
2388 gen_opc_icount
[lj
] = num_insns
;
2390 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
2392 #if defined ALPHA_DEBUG_DISAS
2394 LOG_DISAS("pc " TARGET_FMT_lx
" mem_idx %d\n",
2395 ctx
.pc
, ctx
.mem_idx
);
2397 insn
= ldl_code(ctx
.pc
);
2398 #if defined ALPHA_DEBUG_DISAS
2400 LOG_DISAS("opcode %08x %d\n", insn
, insn_count
);
2404 ret
= translate_one(ctxp
, insn
);
2407 /* if we reach a page boundary or are single stepping, stop
2410 if (env
->singlestep_enabled
) {
2411 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2415 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
2418 if (gen_opc_ptr
>= gen_opc_end
)
2421 if (num_insns
>= max_insns
)
2428 if (ret
!= 1 && ret
!= 3) {
2429 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2431 #if defined (DO_TB_FLUSH)
2432 gen_helper_tb_flush();
2434 if (tb
->cflags
& CF_LAST_IO
)
2436 /* Generate the return instruction */
2438 gen_icount_end(tb
, num_insns
);
2439 *gen_opc_ptr
= INDEX_op_end
;
2441 j
= gen_opc_ptr
- gen_opc_buf
;
2444 gen_opc_instr_start
[lj
++] = 0;
2446 tb
->size
= ctx
.pc
- pc_start
;
2447 tb
->icount
= num_insns
;
2449 #if defined ALPHA_DEBUG_DISAS
2450 log_cpu_state_mask(CPU_LOG_TB_CPU
, env
, 0);
2451 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2452 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2453 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
2459 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
2461 gen_intermediate_code_internal(env
, tb
, 0);
2464 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
2466 gen_intermediate_code_internal(env
, tb
, 1);
2469 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
2474 env
= qemu_mallocz(sizeof(CPUAlphaState
));
2476 alpha_translate_init();
2478 /* XXX: should not be hardcoded */
2479 env
->implver
= IMPLVER_2106x
;
2481 #if defined (CONFIG_USER_ONLY)
2485 /* Initialize IPR */
2486 hwpcb
= env
->ipr
[IPR_PCBB
];
2487 env
->ipr
[IPR_ASN
] = 0;
2488 env
->ipr
[IPR_ASTEN
] = 0;
2489 env
->ipr
[IPR_ASTSR
] = 0;
2490 env
->ipr
[IPR_DATFX
] = 0;
2492 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
2493 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
2494 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
2495 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
2496 env
->ipr
[IPR_FEN
] = 0;
2497 env
->ipr
[IPR_IPL
] = 31;
2498 env
->ipr
[IPR_MCES
] = 0;
2499 env
->ipr
[IPR_PERFMON
] = 0; /* Implementation specific */
2500 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
2501 env
->ipr
[IPR_SISR
] = 0;
2502 env
->ipr
[IPR_VIRBND
] = -1ULL;
2504 qemu_init_vcpu(env
);
2508 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
2509 unsigned long searched_pc
, int pc_pos
, void *puc
)
2511 env
->pc
= gen_opc_pc
[pc_pos
];