2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 # define LOG_DISAS(...) do { } while (0)
38 typedef struct DisasContext DisasContext
;
40 struct TranslationBlock
*tb
;
44 /* Current rounding mode for this TB. */
46 /* Current flush-to-zero setting for this TB. */
49 /* implver value for this CPU. */
52 bool singlestep_enabled
;
55 /* Return values from translate_one, indicating the state of the TB.
56 Note that zero indicates that we are not exiting the TB. */
61 /* We have emitted one or more goto_tb. No fixup required. */
64 /* We are not using a goto_tb (for whatever reason), but have updated
65 the PC (for whatever reason), so there's no need to do it again on
69 /* We are exiting the TB, but have neither emitted a goto_tb, nor
70 updated the PC for the next instruction to be executed. */
73 /* We are ending the TB with a noreturn function call, e.g. longjmp.
74 No following code will be executed. */
78 /* global register indexes */
79 static TCGv_ptr cpu_env
;
80 static TCGv cpu_ir
[31];
81 static TCGv cpu_fir
[31];
83 static TCGv cpu_lock_addr
;
84 static TCGv cpu_lock_st_addr
;
85 static TCGv cpu_lock_value
;
86 static TCGv cpu_unique
;
87 #ifndef CONFIG_USER_ONLY
88 static TCGv cpu_sysval
;
93 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
95 #include "exec/gen-icount.h"
97 void alpha_translate_init(void)
101 static int done_init
= 0;
106 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
109 for (i
= 0; i
< 31; i
++) {
110 sprintf(p
, "ir%d", i
);
111 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
112 offsetof(CPUAlphaState
, ir
[i
]), p
);
113 p
+= (i
< 10) ? 4 : 5;
115 sprintf(p
, "fir%d", i
);
116 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
117 offsetof(CPUAlphaState
, fir
[i
]), p
);
118 p
+= (i
< 10) ? 5 : 6;
121 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
122 offsetof(CPUAlphaState
, pc
), "pc");
124 cpu_lock_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
125 offsetof(CPUAlphaState
, lock_addr
),
127 cpu_lock_st_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
128 offsetof(CPUAlphaState
, lock_st_addr
),
130 cpu_lock_value
= tcg_global_mem_new_i64(TCG_AREG0
,
131 offsetof(CPUAlphaState
, lock_value
),
134 cpu_unique
= tcg_global_mem_new_i64(TCG_AREG0
,
135 offsetof(CPUAlphaState
, unique
), "unique");
136 #ifndef CONFIG_USER_ONLY
137 cpu_sysval
= tcg_global_mem_new_i64(TCG_AREG0
,
138 offsetof(CPUAlphaState
, sysval
), "sysval");
139 cpu_usp
= tcg_global_mem_new_i64(TCG_AREG0
,
140 offsetof(CPUAlphaState
, usp
), "usp");
146 static void gen_excp_1(int exception
, int error_code
)
150 tmp1
= tcg_const_i32(exception
);
151 tmp2
= tcg_const_i32(error_code
);
152 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
153 tcg_temp_free_i32(tmp2
);
154 tcg_temp_free_i32(tmp1
);
157 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
159 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
160 gen_excp_1(exception
, error_code
);
161 return EXIT_NORETURN
;
164 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
166 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
169 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
171 TCGv_i32 tmp32
= tcg_temp_new_i32();
172 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
173 gen_helper_memory_to_f(t0
, tmp32
);
174 tcg_temp_free_i32(tmp32
);
177 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
179 TCGv tmp
= tcg_temp_new();
180 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
181 gen_helper_memory_to_g(t0
, tmp
);
185 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
187 TCGv_i32 tmp32
= tcg_temp_new_i32();
188 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
189 gen_helper_memory_to_s(t0
, tmp32
);
190 tcg_temp_free_i32(tmp32
);
193 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
195 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
196 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
197 tcg_gen_mov_i64(cpu_lock_value
, t0
);
200 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
202 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
203 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
204 tcg_gen_mov_i64(cpu_lock_value
, t0
);
207 static inline void gen_load_mem(DisasContext
*ctx
,
208 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
210 int ra
, int rb
, int32_t disp16
, int fp
,
215 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
216 prefetches, which we can treat as nops. No worries about
217 missed exceptions here. */
218 if (unlikely(ra
== 31)) {
222 addr
= tcg_temp_new();
224 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
226 tcg_gen_andi_i64(addr
, addr
, ~0x7);
232 tcg_gen_movi_i64(addr
, disp16
);
235 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
236 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
241 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
243 TCGv_i32 tmp32
= tcg_temp_new_i32();
244 gen_helper_f_to_memory(tmp32
, t0
);
245 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
246 tcg_temp_free_i32(tmp32
);
249 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
251 TCGv tmp
= tcg_temp_new();
252 gen_helper_g_to_memory(tmp
, t0
);
253 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
257 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
259 TCGv_i32 tmp32
= tcg_temp_new_i32();
260 gen_helper_s_to_memory(tmp32
, t0
);
261 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
262 tcg_temp_free_i32(tmp32
);
265 static inline void gen_store_mem(DisasContext
*ctx
,
266 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
268 int ra
, int rb
, int32_t disp16
, int fp
,
273 addr
= tcg_temp_new();
275 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
277 tcg_gen_andi_i64(addr
, addr
, ~0x7);
283 tcg_gen_movi_i64(addr
, disp16
);
287 va
= tcg_const_i64(0);
289 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
291 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
299 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
300 int32_t disp16
, int quad
)
305 /* ??? Don't bother storing anything. The user can't tell
306 the difference, since the zero register always reads zero. */
310 #if defined(CONFIG_USER_ONLY)
311 addr
= cpu_lock_st_addr
;
313 addr
= tcg_temp_local_new();
317 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
319 tcg_gen_movi_i64(addr
, disp16
);
322 #if defined(CONFIG_USER_ONLY)
323 /* ??? This is handled via a complicated version of compare-and-swap
324 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
325 in TCG so that this isn't necessary. */
326 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
328 /* ??? In system mode we are never multi-threaded, so CAS can be
329 implemented via a non-atomic load-compare-store sequence. */
331 int lab_fail
, lab_done
;
334 lab_fail
= gen_new_label();
335 lab_done
= gen_new_label();
336 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
338 val
= tcg_temp_new();
339 tcg_gen_qemu_ld_i64(val
, addr
, ctx
->mem_idx
, quad
? MO_LEQ
: MO_LESL
);
340 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
342 tcg_gen_qemu_st_i64(cpu_ir
[ra
], addr
, ctx
->mem_idx
,
343 quad
? MO_LEQ
: MO_LEUL
);
344 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
345 tcg_gen_br(lab_done
);
347 gen_set_label(lab_fail
);
348 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
350 gen_set_label(lab_done
);
351 tcg_gen_movi_i64(cpu_lock_addr
, -1);
359 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
361 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
363 && ((addr
>> 41) & 3) == 2
364 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== addr
>> 63);
367 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
369 /* Suppress goto_tb in the case of single-steping and IO. */
370 if (ctx
->singlestep_enabled
|| (ctx
->tb
->cflags
& CF_LAST_IO
)) {
373 /* If the destination is in the superpage, the page perms can't change. */
374 if (in_superpage(ctx
, dest
)) {
377 /* Check for the dest on the same page as the start of the TB. */
378 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
381 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
383 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
386 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
389 /* Notice branch-to-next; used to initialize RA with the PC. */
392 } else if (use_goto_tb(ctx
, dest
)) {
394 tcg_gen_movi_i64(cpu_pc
, dest
);
395 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
398 tcg_gen_movi_i64(cpu_pc
, dest
);
399 return EXIT_PC_UPDATED
;
403 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
404 TCGv cmp
, int32_t disp
)
406 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
407 int lab_true
= gen_new_label();
409 if (use_goto_tb(ctx
, dest
)) {
410 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
413 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
414 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
416 gen_set_label(lab_true
);
418 tcg_gen_movi_i64(cpu_pc
, dest
);
419 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
423 TCGv_i64 z
= tcg_const_i64(0);
424 TCGv_i64 d
= tcg_const_i64(dest
);
425 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
427 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
429 tcg_temp_free_i64(z
);
430 tcg_temp_free_i64(d
);
431 tcg_temp_free_i64(p
);
432 return EXIT_PC_UPDATED
;
436 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
437 int32_t disp
, int mask
)
441 if (unlikely(ra
== 31)) {
442 cmp_tmp
= tcg_const_i64(0);
444 cmp_tmp
= tcg_temp_new();
446 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
448 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
452 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
455 /* Fold -0.0 for comparison with COND. */
457 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
459 uint64_t mzero
= 1ull << 63;
464 /* For <= or >, the -0.0 value directly compares the way we want. */
465 tcg_gen_mov_i64(dest
, src
);
470 /* For == or !=, we can simply mask off the sign bit and compare. */
471 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
476 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
477 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
478 tcg_gen_neg_i64(dest
, dest
);
479 tcg_gen_and_i64(dest
, dest
, src
);
487 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
492 if (unlikely(ra
== 31)) {
493 /* Very uncommon case, but easier to optimize it to an integer
494 comparison than continuing with the floating point comparison. */
495 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
498 cmp_tmp
= tcg_temp_new();
499 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
500 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
503 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
504 int islit
, uint8_t lit
, int mask
)
508 if (unlikely(rc
== 31)) {
513 /* Very uncommon case - Do not bother to optimize. */
514 c1
= tcg_const_i64(0);
516 c1
= tcg_const_i64(1);
517 tcg_gen_and_i64(c1
, c1
, cpu_ir
[ra
]);
522 v1
= tcg_const_i64(lit
);
526 z
= tcg_const_i64(0);
528 tcg_gen_movcond_i64(cond
, cpu_ir
[rc
], c1
, z
, v1
, cpu_ir
[rc
]);
530 tcg_temp_free_i64(z
);
531 if (ra
== 31 || mask
) {
532 tcg_temp_free_i64(c1
);
535 tcg_temp_free_i64(v1
);
539 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
543 if (unlikely(rc
== 31)) {
547 c1
= tcg_temp_new_i64();
548 if (unlikely(ra
== 31)) {
549 tcg_gen_movi_i64(c1
, 0);
551 gen_fold_mzero(cond
, c1
, cpu_fir
[ra
]);
554 v1
= tcg_const_i64(0);
558 z
= tcg_const_i64(0);
560 tcg_gen_movcond_i64(cond
, cpu_fir
[rc
], c1
, z
, v1
, cpu_fir
[rc
]);
562 tcg_temp_free_i64(z
);
563 tcg_temp_free_i64(c1
);
565 tcg_temp_free_i64(v1
);
569 #define QUAL_RM_N 0x080 /* Round mode nearest even */
570 #define QUAL_RM_C 0x000 /* Round mode chopped */
571 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
572 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
573 #define QUAL_RM_MASK 0x0c0
575 #define QUAL_U 0x100 /* Underflow enable (fp output) */
576 #define QUAL_V 0x100 /* Overflow enable (int output) */
577 #define QUAL_S 0x400 /* Software completion enable */
578 #define QUAL_I 0x200 /* Inexact detection enable */
580 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
584 fn11
&= QUAL_RM_MASK
;
585 if (fn11
== ctx
->tb_rm
) {
590 tmp
= tcg_temp_new_i32();
593 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
596 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
599 tcg_gen_movi_i32(tmp
, float_round_down
);
602 tcg_gen_ld8u_i32(tmp
, cpu_env
,
603 offsetof(CPUAlphaState
, fpcr_dyn_round
));
607 #if defined(CONFIG_SOFTFLOAT_INLINE)
608 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
609 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
610 sets the one field. */
611 tcg_gen_st8_i32(tmp
, cpu_env
,
612 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
614 gen_helper_setroundmode(tmp
);
617 tcg_temp_free_i32(tmp
);
620 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
625 if (fn11
== ctx
->tb_ftz
) {
630 tmp
= tcg_temp_new_i32();
632 /* Underflow is enabled, use the FPCR setting. */
633 tcg_gen_ld8u_i32(tmp
, cpu_env
,
634 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
636 /* Underflow is disabled, force flush-to-zero. */
637 tcg_gen_movi_i32(tmp
, 1);
640 #if defined(CONFIG_SOFTFLOAT_INLINE)
641 tcg_gen_st8_i32(tmp
, cpu_env
,
642 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
644 gen_helper_setflushzero(tmp
);
647 tcg_temp_free_i32(tmp
);
650 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
654 val
= tcg_const_i64(0);
656 if ((fn11
& QUAL_S
) == 0) {
658 gen_helper_ieee_input_cmp(cpu_env
, cpu_fir
[reg
]);
660 gen_helper_ieee_input(cpu_env
, cpu_fir
[reg
]);
663 val
= tcg_temp_new();
664 tcg_gen_mov_i64(val
, cpu_fir
[reg
]);
669 static void gen_fp_exc_clear(void)
671 #if defined(CONFIG_SOFTFLOAT_INLINE)
672 TCGv_i32 zero
= tcg_const_i32(0);
673 tcg_gen_st8_i32(zero
, cpu_env
,
674 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
675 tcg_temp_free_i32(zero
);
677 gen_helper_fp_exc_clear(cpu_env
);
681 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
683 /* ??? We ought to be able to do something with imprecise exceptions.
684 E.g. notice we're still in the trap shadow of something within the
685 TB and do not generate the code to signal the exception; end the TB
686 when an exception is forced to arrive, either by consumption of a
687 register value or TRAPB or EXCB. */
688 TCGv_i32 exc
= tcg_temp_new_i32();
691 #if defined(CONFIG_SOFTFLOAT_INLINE)
692 tcg_gen_ld8u_i32(exc
, cpu_env
,
693 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
695 gen_helper_fp_exc_get(exc
, cpu_env
);
699 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
702 /* ??? Pass in the regno of the destination so that the helper can
703 set EXC_MASK, which contains a bitmask of destination registers
704 that have caused arithmetic traps. A simple userspace emulation
705 does not require this. We do need it for a guest kernel's entArith,
706 or if we were to do something clever with imprecise exceptions. */
707 reg
= tcg_const_i32(rc
+ 32);
710 gen_helper_fp_exc_raise_s(cpu_env
, exc
, reg
);
712 gen_helper_fp_exc_raise(cpu_env
, exc
, reg
);
715 tcg_temp_free_i32(reg
);
716 tcg_temp_free_i32(exc
);
719 static inline void gen_fp_exc_raise(int rc
, int fn11
)
721 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
724 static void gen_fcvtlq(int rb
, int rc
)
726 if (unlikely(rc
== 31)) {
729 if (unlikely(rb
== 31)) {
730 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
732 TCGv tmp
= tcg_temp_new();
734 /* The arithmetic right shift here, plus the sign-extended mask below
735 yields a sign-extended result without an explicit ext32s_i64. */
736 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
737 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
738 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
739 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
740 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
746 static void gen_fcvtql(int rb
, int rc
)
748 if (unlikely(rc
== 31)) {
751 if (unlikely(rb
== 31)) {
752 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
754 TCGv tmp
= tcg_temp_new();
756 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
757 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
758 tcg_gen_shli_i64(tmp
, tmp
, 32);
759 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
760 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
766 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
769 int lab
= gen_new_label();
770 TCGv tmp
= tcg_temp_new();
772 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
773 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
774 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
781 #define FARITH2(name) \
782 static inline void glue(gen_f, name)(int rb, int rc) \
784 if (unlikely(rc == 31)) { \
788 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
790 TCGv tmp = tcg_const_i64(0); \
791 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
792 tcg_temp_free(tmp); \
796 /* ??? VAX instruction qualifiers ignored. */
804 static void gen_ieee_arith2(DisasContext
*ctx
,
805 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
806 int rb
, int rc
, int fn11
)
810 /* ??? This is wrong: the instruction is not a nop, it still may
812 if (unlikely(rc
== 31)) {
816 gen_qual_roundmode(ctx
, fn11
);
817 gen_qual_flushzero(ctx
, fn11
);
820 vb
= gen_ieee_input(rb
, fn11
, 0);
821 helper(cpu_fir
[rc
], cpu_env
, vb
);
824 gen_fp_exc_raise(rc
, fn11
);
827 #define IEEE_ARITH2(name) \
828 static inline void glue(gen_f, name)(DisasContext *ctx, \
829 int rb, int rc, int fn11) \
831 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
838 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
843 /* ??? This is wrong: the instruction is not a nop, it still may
845 if (unlikely(rc
== 31)) {
849 /* No need to set flushzero, since we have an integer output. */
851 vb
= gen_ieee_input(rb
, fn11
, 0);
853 /* Almost all integer conversions use cropped rounding, and most
854 also do not have integer overflow enabled. Special case that. */
857 gen_helper_cvttq_c(cpu_fir
[rc
], cpu_env
, vb
);
859 case QUAL_V
| QUAL_RM_C
:
860 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
861 ignore
= float_flag_inexact
;
863 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
864 gen_helper_cvttq_svic(cpu_fir
[rc
], cpu_env
, vb
);
867 gen_qual_roundmode(ctx
, fn11
);
868 gen_helper_cvttq(cpu_fir
[rc
], cpu_env
, vb
);
869 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
870 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
875 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
878 static void gen_ieee_intcvt(DisasContext
*ctx
,
879 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
880 int rb
, int rc
, int fn11
)
884 /* ??? This is wrong: the instruction is not a nop, it still may
886 if (unlikely(rc
== 31)) {
890 gen_qual_roundmode(ctx
, fn11
);
893 vb
= tcg_const_i64(0);
898 /* The only exception that can be raised by integer conversion
899 is inexact. Thus we only need to worry about exceptions when
900 inexact handling is requested. */
903 helper(cpu_fir
[rc
], cpu_env
, vb
);
904 gen_fp_exc_raise(rc
, fn11
);
906 helper(cpu_fir
[rc
], cpu_env
, vb
);
914 #define IEEE_INTCVT(name) \
915 static inline void glue(gen_f, name)(DisasContext *ctx, \
916 int rb, int rc, int fn11) \
918 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
923 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
928 if (unlikely(rc
== 31)) {
932 vmask
= tcg_const_i64(mask
);
942 va
= tcg_temp_new_i64();
943 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
945 tcg_gen_andc_i64(va
, vmask
, va
);
947 tcg_gen_and_i64(va
, va
, vmask
);
955 vb
= tcg_temp_new_i64();
956 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
959 switch (za
<< 1 | zb
) {
961 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
964 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
967 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
970 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
974 tcg_temp_free(vmask
);
983 static inline void gen_fcpys(int ra
, int rb
, int rc
)
985 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
988 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
990 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
993 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
995 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
998 #define FARITH3(name) \
999 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1003 if (unlikely(rc == 31)) { \
1007 va = tcg_const_i64(0); \
1012 vb = tcg_const_i64(0); \
1017 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1020 tcg_temp_free(va); \
1023 tcg_temp_free(vb); \
1027 /* ??? VAX instruction qualifiers ignored. */
1040 static void gen_ieee_arith3(DisasContext
*ctx
,
1041 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1042 int ra
, int rb
, int rc
, int fn11
)
1046 /* ??? This is wrong: the instruction is not a nop, it still may
1047 raise exceptions. */
1048 if (unlikely(rc
== 31)) {
1052 gen_qual_roundmode(ctx
, fn11
);
1053 gen_qual_flushzero(ctx
, fn11
);
1056 va
= gen_ieee_input(ra
, fn11
, 0);
1057 vb
= gen_ieee_input(rb
, fn11
, 0);
1058 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1062 gen_fp_exc_raise(rc
, fn11
);
1065 #define IEEE_ARITH3(name) \
1066 static inline void glue(gen_f, name)(DisasContext *ctx, \
1067 int ra, int rb, int rc, int fn11) \
1069 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1080 static void gen_ieee_compare(DisasContext
*ctx
,
1081 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1082 int ra
, int rb
, int rc
, int fn11
)
1086 /* ??? This is wrong: the instruction is not a nop, it still may
1087 raise exceptions. */
1088 if (unlikely(rc
== 31)) {
1094 va
= gen_ieee_input(ra
, fn11
, 1);
1095 vb
= gen_ieee_input(rb
, fn11
, 1);
1096 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1100 gen_fp_exc_raise(rc
, fn11
);
1103 #define IEEE_CMP3(name) \
1104 static inline void glue(gen_f, name)(DisasContext *ctx, \
1105 int ra, int rb, int rc, int fn11) \
1107 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1114 static inline uint64_t zapnot_mask(uint8_t lit
)
1119 for (i
= 0; i
< 8; ++i
) {
1121 mask
|= 0xffull
<< (i
* 8);
1126 /* Implement zapnot with an immediate operand, which expands to some
1127 form of immediate AND. This is a basic building block in the
1128 definition of many of the other byte manipulation instructions. */
1129 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1133 tcg_gen_movi_i64(dest
, 0);
1136 tcg_gen_ext8u_i64(dest
, src
);
1139 tcg_gen_ext16u_i64(dest
, src
);
1142 tcg_gen_ext32u_i64(dest
, src
);
1145 tcg_gen_mov_i64(dest
, src
);
1148 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1153 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1155 if (unlikely(rc
== 31))
1157 else if (unlikely(ra
== 31))
1158 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1160 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1162 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1165 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1167 if (unlikely(rc
== 31))
1169 else if (unlikely(ra
== 31))
1170 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1172 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1174 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1178 /* EXTWH, EXTLH, EXTQH */
1179 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1180 uint8_t lit
, uint8_t byte_mask
)
1182 if (unlikely(rc
== 31))
1184 else if (unlikely(ra
== 31))
1185 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1188 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1189 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1191 TCGv tmp1
= tcg_temp_new();
1192 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1193 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1194 tcg_gen_neg_i64(tmp1
, tmp1
);
1195 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1196 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1197 tcg_temp_free(tmp1
);
1199 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1203 /* EXTBL, EXTWL, EXTLL, EXTQL */
1204 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1205 uint8_t lit
, uint8_t byte_mask
)
1207 if (unlikely(rc
== 31))
1209 else if (unlikely(ra
== 31))
1210 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1213 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1215 TCGv tmp
= tcg_temp_new();
1216 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1217 tcg_gen_shli_i64(tmp
, tmp
, 3);
1218 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1221 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1225 /* INSWH, INSLH, INSQH */
1226 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1227 uint8_t lit
, uint8_t byte_mask
)
1229 if (unlikely(rc
== 31))
1231 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1232 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1234 TCGv tmp
= tcg_temp_new();
1236 /* The instruction description has us left-shift the byte mask
1237 and extract bits <15:8> and apply that zap at the end. This
1238 is equivalent to simply performing the zap first and shifting
1240 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1243 /* Note that we have handled the lit==0 case above. */
1244 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1246 TCGv shift
= tcg_temp_new();
1248 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1249 Do this portably by splitting the shift into two parts:
1250 shift_count-1 and 1. Arrange for the -1 by using
1251 ones-complement instead of twos-complement in the negation:
1252 ~((B & 7) * 8) & 63. */
1254 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1255 tcg_gen_shli_i64(shift
, shift
, 3);
1256 tcg_gen_not_i64(shift
, shift
);
1257 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1259 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1260 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1261 tcg_temp_free(shift
);
1267 /* INSBL, INSWL, INSLL, INSQL */
1268 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1269 uint8_t lit
, uint8_t byte_mask
)
1271 if (unlikely(rc
== 31))
1273 else if (unlikely(ra
== 31))
1274 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1276 TCGv tmp
= tcg_temp_new();
1278 /* The instruction description has us left-shift the byte mask
1279 the same number of byte slots as the data and apply the zap
1280 at the end. This is equivalent to simply performing the zap
1281 first and shifting afterward. */
1282 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1285 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1287 TCGv shift
= tcg_temp_new();
1288 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1289 tcg_gen_shli_i64(shift
, shift
, 3);
1290 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1291 tcg_temp_free(shift
);
1297 /* MSKWH, MSKLH, MSKQH */
1298 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1299 uint8_t lit
, uint8_t byte_mask
)
1301 if (unlikely(rc
== 31))
1303 else if (unlikely(ra
== 31))
1304 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1306 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1308 TCGv shift
= tcg_temp_new();
1309 TCGv mask
= tcg_temp_new();
1311 /* The instruction description is as above, where the byte_mask
1312 is shifted left, and then we extract bits <15:8>. This can be
1313 emulated with a right-shift on the expanded byte mask. This
1314 requires extra care because for an input <2:0> == 0 we need a
1315 shift of 64 bits in order to generate a zero. This is done by
1316 splitting the shift into two parts, the variable shift - 1
1317 followed by a constant 1 shift. The code we expand below is
1318 equivalent to ~((B & 7) * 8) & 63. */
1320 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1321 tcg_gen_shli_i64(shift
, shift
, 3);
1322 tcg_gen_not_i64(shift
, shift
);
1323 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1324 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1325 tcg_gen_shr_i64(mask
, mask
, shift
);
1326 tcg_gen_shri_i64(mask
, mask
, 1);
1328 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1330 tcg_temp_free(mask
);
1331 tcg_temp_free(shift
);
1335 /* MSKBL, MSKWL, MSKLL, MSKQL */
1336 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1337 uint8_t lit
, uint8_t byte_mask
)
1339 if (unlikely(rc
== 31))
1341 else if (unlikely(ra
== 31))
1342 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1344 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1346 TCGv shift
= tcg_temp_new();
1347 TCGv mask
= tcg_temp_new();
1349 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1350 tcg_gen_shli_i64(shift
, shift
, 3);
1351 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1352 tcg_gen_shl_i64(mask
, mask
, shift
);
1354 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1356 tcg_temp_free(mask
);
1357 tcg_temp_free(shift
);
1361 /* Code to call arith3 helpers */
1362 #define ARITH3(name) \
1363 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1366 if (unlikely(rc == 31)) \
1371 TCGv tmp = tcg_const_i64(lit); \
1372 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1373 tcg_temp_free(tmp); \
1375 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1377 TCGv tmp1 = tcg_const_i64(0); \
1379 TCGv tmp2 = tcg_const_i64(lit); \
1380 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1381 tcg_temp_free(tmp2); \
1383 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1384 tcg_temp_free(tmp1); \
1398 /* Code to call arith3 helpers */
1399 #define ARITH3_EX(name) \
1400 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1401 int islit, uint8_t lit) \
1403 if (unlikely(rc == 31)) { \
1408 TCGv tmp = tcg_const_i64(lit); \
1409 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1411 tcg_temp_free(tmp); \
1413 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1414 cpu_ir[ra], cpu_ir[rb]); \
1417 TCGv tmp1 = tcg_const_i64(0); \
1419 TCGv tmp2 = tcg_const_i64(lit); \
1420 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1421 tcg_temp_free(tmp2); \
1423 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1425 tcg_temp_free(tmp1); \
1435 #define MVIOP2(name) \
1436 static inline void glue(gen_, name)(int rb, int rc) \
1438 if (unlikely(rc == 31)) \
1440 if (unlikely(rb == 31)) \
1441 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1443 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1450 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1451 int islit
, uint8_t lit
)
1455 if (unlikely(rc
== 31)) {
1460 va
= tcg_const_i64(0);
1465 vb
= tcg_const_i64(lit
);
1470 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1480 static void gen_rx(int ra
, int set
)
1485 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1488 tmp
= tcg_const_i32(set
);
1489 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1490 tcg_temp_free_i32(tmp
);
1493 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1495 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1496 to internal cpu registers. */
1498 /* Unprivileged PAL call */
1499 if (palcode
>= 0x80 && palcode
< 0xC0) {
1503 /* No-op inside QEMU. */
1507 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_unique
);
1511 tcg_gen_mov_i64(cpu_unique
, cpu_ir
[IR_A0
]);
1520 #ifndef CONFIG_USER_ONLY
1521 /* Privileged PAL code */
1522 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1526 /* No-op inside QEMU. */
1530 /* No-op inside QEMU. */
1534 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
, offsetof(CPUAlphaState
, vptptr
));
1538 tcg_gen_mov_i64(cpu_sysval
, cpu_ir
[IR_A0
]);
1542 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_sysval
);
1549 /* Note that we already know we're in kernel mode, so we know
1550 that PS only contains the 3 IPL bits. */
1551 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1553 /* But make sure and store only the 3 IPL bits from the user. */
1554 tmp
= tcg_temp_new();
1555 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1556 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1563 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1567 tcg_gen_mov_i64(cpu_usp
, cpu_ir
[IR_A0
]);
1571 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_usp
);
1575 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1576 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1586 return gen_invalid(ctx
);
1589 #ifdef CONFIG_USER_ONLY
1590 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1593 TCGv pc
= tcg_const_i64(ctx
->pc
);
1594 TCGv entry
= tcg_const_i64(palcode
& 0x80
1595 ? 0x2000 + (palcode
- 0x80) * 64
1596 : 0x1000 + palcode
* 64);
1598 gen_helper_call_pal(cpu_env
, pc
, entry
);
1600 tcg_temp_free(entry
);
1603 /* Since the destination is running in PALmode, we don't really
1604 need the page permissions check. We'll see the existence of
1605 the page when we create the TB, and we'll flush all TBs if
1606 we change the PAL base register. */
1607 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1609 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1610 return EXIT_GOTO_TB
;
1613 return EXIT_PC_UPDATED
;
1618 #ifndef CONFIG_USER_ONLY
1620 #define PR_BYTE 0x100000
1621 #define PR_LONG 0x200000
1623 static int cpu_pr_data(int pr
)
1626 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1627 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1628 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1629 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1630 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1631 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1632 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1633 case 7: return offsetof(CPUAlphaState
, palbr
);
1634 case 8: return offsetof(CPUAlphaState
, ptbr
);
1635 case 9: return offsetof(CPUAlphaState
, vptptr
);
1636 case 10: return offsetof(CPUAlphaState
, unique
);
1637 case 11: return offsetof(CPUAlphaState
, sysval
);
1638 case 12: return offsetof(CPUAlphaState
, usp
);
1641 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1643 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1646 return offsetof(CPUAlphaState
, alarm_expire
);
1651 static ExitStatus
gen_mfpr(int ra
, int regno
)
1653 int data
= cpu_pr_data(regno
);
1655 /* In our emulated PALcode, these processor registers have no
1656 side effects from reading. */
1661 /* Special help for VMTIME and WALLTIME. */
1662 if (regno
== 250 || regno
== 249) {
1663 void (*helper
)(TCGv
) = gen_helper_get_walltime
;
1665 helper
= gen_helper_get_vmtime
;
1671 return EXIT_PC_STALE
;
1678 /* The basic registers are data only, and unknown registers
1679 are read-zero, write-ignore. */
1681 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
1682 } else if (data
& PR_BYTE
) {
1683 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_BYTE
);
1684 } else if (data
& PR_LONG
) {
1685 tcg_gen_ld32s_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_LONG
);
1687 tcg_gen_ld_i64(cpu_ir
[ra
], cpu_env
, data
);
1692 static ExitStatus
gen_mtpr(DisasContext
*ctx
, int rb
, int regno
)
1698 tmp
= tcg_const_i64(0);
1706 gen_helper_tbia(cpu_env
);
1711 gen_helper_tbis(cpu_env
, tmp
);
1716 tmp
= tcg_const_i64(1);
1717 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1718 offsetof(CPUState
, halted
));
1719 return gen_excp(ctx
, EXCP_HLT
, 0);
1723 gen_helper_halt(tmp
);
1724 return EXIT_PC_STALE
;
1728 gen_helper_set_alarm(cpu_env
, tmp
);
1733 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1734 /* Changing the PAL base register implies un-chaining all of the TBs
1735 that ended with a CALL_PAL. Since the base register usually only
1736 changes during boot, flushing everything works well. */
1737 gen_helper_tb_flush(cpu_env
);
1738 return EXIT_PC_STALE
;
1741 /* The basic registers are data only, and unknown registers
1742 are read-zero, write-ignore. */
1743 data
= cpu_pr_data(regno
);
1745 if (data
& PR_BYTE
) {
1746 tcg_gen_st8_i64(tmp
, cpu_env
, data
& ~PR_BYTE
);
1747 } else if (data
& PR_LONG
) {
1748 tcg_gen_st32_i64(tmp
, cpu_env
, data
& ~PR_LONG
);
1750 tcg_gen_st_i64(tmp
, cpu_env
, data
);
1762 #endif /* !USER_ONLY*/
1764 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1767 int32_t disp21
, disp16
;
1768 #ifndef CONFIG_USER_ONLY
1772 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, islit
, real_islit
;
1776 /* Decode all instruction fields */
1778 ra
= (insn
>> 21) & 0x1F;
1779 rb
= (insn
>> 16) & 0x1F;
1781 real_islit
= islit
= (insn
>> 12) & 1;
1782 if (rb
== 31 && !islit
) {
1786 lit
= (insn
>> 13) & 0xFF;
1787 palcode
= insn
& 0x03FFFFFF;
1788 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1789 disp16
= (int16_t)(insn
& 0x0000FFFF);
1790 #ifndef CONFIG_USER_ONLY
1791 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1793 fn11
= (insn
>> 5) & 0x000007FF;
1795 fn7
= (insn
>> 5) & 0x0000007F;
1796 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1797 opc
, ra
, rb
, rc
, disp16
);
1803 ret
= gen_call_pal(ctx
, palcode
);
1828 if (likely(ra
!= 31)) {
1830 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1832 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1837 if (likely(ra
!= 31)) {
1839 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1841 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1846 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1847 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1853 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1857 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1858 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1864 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1868 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1872 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1878 if (likely(rc
!= 31)) {
1881 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1882 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1884 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1885 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1889 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1891 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1897 if (likely(rc
!= 31)) {
1899 TCGv tmp
= tcg_temp_new();
1900 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1902 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1904 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1905 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1909 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1911 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1917 if (likely(rc
!= 31)) {
1920 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1922 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1923 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1926 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1928 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1929 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1936 if (likely(rc
!= 31)) {
1938 TCGv tmp
= tcg_temp_new();
1939 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1941 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1943 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1944 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1948 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1950 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1951 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1958 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1962 if (likely(rc
!= 31)) {
1964 TCGv tmp
= tcg_temp_new();
1965 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1967 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1969 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1970 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1974 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1976 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1982 if (likely(rc
!= 31)) {
1984 TCGv tmp
= tcg_temp_new();
1985 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1987 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1989 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1990 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1994 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1996 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1997 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2004 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
2008 if (likely(rc
!= 31)) {
2011 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2013 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2016 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2018 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2024 if (likely(rc
!= 31)) {
2026 TCGv tmp
= tcg_temp_new();
2027 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
2029 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
2031 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2035 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2037 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2043 if (likely(rc
!= 31)) {
2046 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2048 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2051 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2053 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2059 if (likely(rc
!= 31)) {
2061 TCGv tmp
= tcg_temp_new();
2062 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
2064 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2066 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2070 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2072 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2078 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
2082 if (likely(rc
!= 31)) {
2084 TCGv tmp
= tcg_temp_new();
2085 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2087 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
2089 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2093 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2095 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2101 if (likely(rc
!= 31)) {
2103 TCGv tmp
= tcg_temp_new();
2104 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2106 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2108 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2112 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2114 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2120 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
2124 gen_addlv(ra
, rb
, rc
, islit
, lit
);
2128 gen_sublv(ra
, rb
, rc
, islit
, lit
);
2132 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
2136 gen_addqv(ra
, rb
, rc
, islit
, lit
);
2140 gen_subqv(ra
, rb
, rc
, islit
, lit
);
2144 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
2154 if (likely(rc
!= 31)) {
2156 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2158 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2160 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2165 if (likely(rc
!= 31)) {
2168 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2170 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2172 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2177 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
2181 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
2185 if (likely(rc
!= 31)) {
2188 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2190 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2193 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2195 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2201 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
2205 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
2209 if (likely(rc
!= 31)) {
2212 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2214 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2217 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2219 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2225 if (likely(rc
!= 31)) {
2228 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2230 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2233 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2235 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2241 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
2245 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
2249 if (likely(rc
!= 31)) {
2252 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2254 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2257 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2259 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2265 if (likely(rc
!= 31)) {
2266 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
2269 tcg_gen_movi_i64(cpu_ir
[rc
], lit
& ~amask
);
2271 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rb
], ~amask
);
2277 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
2281 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
2286 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->implver
);
2297 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2301 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2305 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2309 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2313 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2317 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2321 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2325 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2329 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2333 gen_zap(ra
, rb
, rc
, islit
, lit
);
2337 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2341 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2345 if (likely(rc
!= 31)) {
2348 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2350 TCGv shift
= tcg_temp_new();
2351 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2352 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2353 tcg_temp_free(shift
);
2356 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2361 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2365 if (likely(rc
!= 31)) {
2368 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2370 TCGv shift
= tcg_temp_new();
2371 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2372 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2373 tcg_temp_free(shift
);
2376 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2381 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2385 if (likely(rc
!= 31)) {
2388 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2390 TCGv shift
= tcg_temp_new();
2391 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2392 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2393 tcg_temp_free(shift
);
2396 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2401 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2405 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2409 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2413 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2417 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2421 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2425 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2429 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2433 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2443 if (likely(rc
!= 31)) {
2445 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2448 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2450 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2451 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2457 if (likely(rc
!= 31)) {
2459 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2461 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2463 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2470 if (unlikely(rc
== 31)){
2474 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2477 low
= tcg_temp_new();
2479 tcg_gen_movi_tl(low
, lit
);
2480 tcg_gen_mulu2_i64(low
, cpu_ir
[rc
], cpu_ir
[ra
], low
);
2482 tcg_gen_mulu2_i64(low
, cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2489 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2493 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2500 switch (fpfn
) { /* fn11 & 0x3F */
2503 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2506 if (likely(rc
!= 31)) {
2508 TCGv_i32 tmp
= tcg_temp_new_i32();
2509 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2510 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2511 tcg_temp_free_i32(tmp
);
2513 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2518 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2525 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2526 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2532 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2535 if (likely(rc
!= 31)) {
2537 TCGv_i32 tmp
= tcg_temp_new_i32();
2538 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2539 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2540 tcg_temp_free_i32(tmp
);
2542 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2547 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2550 if (likely(rc
!= 31)) {
2552 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2554 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2559 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2566 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2567 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2576 /* VAX floating point */
2577 /* XXX: rounding mode and trap are ignored (!) */
2578 switch (fpfn
) { /* fn11 & 0x3F */
2581 gen_faddf(ra
, rb
, rc
);
2585 gen_fsubf(ra
, rb
, rc
);
2589 gen_fmulf(ra
, rb
, rc
);
2593 gen_fdivf(ra
, rb
, rc
);
2605 gen_faddg(ra
, rb
, rc
);
2609 gen_fsubg(ra
, rb
, rc
);
2613 gen_fmulg(ra
, rb
, rc
);
2617 gen_fdivg(ra
, rb
, rc
);
2621 gen_fcmpgeq(ra
, rb
, rc
);
2625 gen_fcmpglt(ra
, rb
, rc
);
2629 gen_fcmpgle(ra
, rb
, rc
);
2660 /* IEEE floating-point */
2661 switch (fpfn
) { /* fn11 & 0x3F */
2664 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2668 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2672 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2676 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2680 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2684 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2688 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2692 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2696 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2700 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2704 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2708 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2711 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2713 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2716 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2721 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2725 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2729 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2742 if (likely(rc
!= 31)) {
2746 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2748 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2751 gen_fcpys(ra
, rb
, rc
);
2757 gen_fcpysn(ra
, rb
, rc
);
2761 gen_fcpyse(ra
, rb
, rc
);
2765 if (likely(ra
!= 31))
2766 gen_helper_store_fpcr(cpu_env
, cpu_fir
[ra
]);
2768 TCGv tmp
= tcg_const_i64(0);
2769 gen_helper_store_fpcr(cpu_env
, tmp
);
2775 if (likely(ra
!= 31))
2776 gen_helper_load_fpcr(cpu_fir
[ra
], cpu_env
);
2780 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2784 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2788 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2792 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2796 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2800 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2810 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2811 /v doesn't do. The only thing I can think is that /sv is a
2812 valid instruction merely for completeness in the ISA. */
2813 gen_fcvtql_v(ctx
, rb
, rc
);
2820 switch ((uint16_t)disp16
) {
2850 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2852 ret
= EXIT_PC_STALE
;
2854 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2878 /* HW_MFPR (PALcode) */
2879 #ifndef CONFIG_USER_ONLY
2880 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2881 return gen_mfpr(ra
, insn
& 0xffff);
2886 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2887 prediction stack action, which of course we don't implement. */
2889 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2891 tcg_gen_movi_i64(cpu_pc
, 0);
2894 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2896 ret
= EXIT_PC_UPDATED
;
2899 /* HW_LD (PALcode) */
2900 #ifndef CONFIG_USER_ONLY
2901 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2908 addr
= tcg_temp_new();
2910 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2912 tcg_gen_movi_i64(addr
, disp12
);
2913 switch ((insn
>> 12) & 0xF) {
2915 /* Longword physical access (hw_ldl/p) */
2916 gen_helper_ldl_phys(cpu_ir
[ra
], cpu_env
, addr
);
2919 /* Quadword physical access (hw_ldq/p) */
2920 gen_helper_ldq_phys(cpu_ir
[ra
], cpu_env
, addr
);
2923 /* Longword physical access with lock (hw_ldl_l/p) */
2924 gen_helper_ldl_l_phys(cpu_ir
[ra
], cpu_env
, addr
);
2927 /* Quadword physical access with lock (hw_ldq_l/p) */
2928 gen_helper_ldq_l_phys(cpu_ir
[ra
], cpu_env
, addr
);
2931 /* Longword virtual PTE fetch (hw_ldl/v) */
2934 /* Quadword virtual PTE fetch (hw_ldq/v) */
2938 /* Incpu_ir[ra]id */
2941 /* Incpu_ir[ra]id */
2944 /* Longword virtual access (hw_ldl) */
2947 /* Quadword virtual access (hw_ldq) */
2950 /* Longword virtual access with protection check (hw_ldl/w) */
2951 tcg_gen_qemu_ld_i64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
, MO_LESL
);
2954 /* Quadword virtual access with protection check (hw_ldq/w) */
2955 tcg_gen_qemu_ld_i64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2958 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2961 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2964 /* Longword virtual access with alternate access mode and
2965 protection checks (hw_ldl/wa) */
2966 tcg_gen_qemu_ld_i64(cpu_ir
[ra
], addr
, MMU_USER_IDX
, MO_LESL
);
2969 /* Quadword virtual access with alternate access mode and
2970 protection checks (hw_ldq/wa) */
2971 tcg_gen_qemu_ld_i64(cpu_ir
[ra
], addr
, MMU_USER_IDX
, MO_LEQ
);
2974 tcg_temp_free(addr
);
2983 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) == 0) {
2986 if (likely(rc
!= 31)) {
2988 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2990 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2995 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
2996 if (likely(rc
!= 31)) {
2998 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
3000 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
3008 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
3009 if (likely(rc
!= 31)) {
3011 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
3013 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
3021 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3022 gen_perr(ra
, rb
, rc
, islit
, lit
);
3028 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
3029 if (likely(rc
!= 31)) {
3031 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
3033 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
3041 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
3042 if (likely(rc
!= 31)) {
3044 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
3046 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
3054 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3055 if (real_islit
|| ra
!= 31) {
3064 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3065 if (real_islit
|| ra
!= 31) {
3074 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3075 if (real_islit
|| ra
!= 31) {
3084 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3085 if (real_islit
|| ra
!= 31) {
3094 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3095 gen_minsb8(ra
, rb
, rc
, islit
, lit
);
3101 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3102 gen_minsw4(ra
, rb
, rc
, islit
, lit
);
3108 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3109 gen_minub8(ra
, rb
, rc
, islit
, lit
);
3115 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3116 gen_minuw4(ra
, rb
, rc
, islit
, lit
);
3122 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3123 gen_maxub8(ra
, rb
, rc
, islit
, lit
);
3129 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3130 gen_maxuw4(ra
, rb
, rc
, islit
, lit
);
3136 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3137 gen_maxsb8(ra
, rb
, rc
, islit
, lit
);
3143 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3144 gen_maxsw4(ra
, rb
, rc
, islit
, lit
);
3150 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3153 if (likely(rc
!= 31)) {
3155 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
3157 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
3162 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3166 TCGv_i32 tmp1
= tcg_temp_new_i32();
3168 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
3170 TCGv tmp2
= tcg_const_i64(0);
3171 gen_helper_s_to_memory(tmp1
, tmp2
);
3172 tcg_temp_free(tmp2
);
3174 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
3175 tcg_temp_free_i32(tmp1
);
3183 /* HW_MTPR (PALcode) */
3184 #ifndef CONFIG_USER_ONLY
3185 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3186 return gen_mtpr(ctx
, rb
, insn
& 0xffff);
3191 /* HW_RET (PALcode) */
3192 #ifndef CONFIG_USER_ONLY
3193 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3195 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3196 address from EXC_ADDR. This turns out to be useful for our
3197 emulation PALcode, so continue to accept it. */
3198 TCGv tmp
= tcg_temp_new();
3199 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
3200 gen_helper_hw_ret(cpu_env
, tmp
);
3203 gen_helper_hw_ret(cpu_env
, cpu_ir
[rb
]);
3205 ret
= EXIT_PC_UPDATED
;
3211 /* HW_ST (PALcode) */
3212 #ifndef CONFIG_USER_ONLY
3213 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3215 addr
= tcg_temp_new();
3217 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
3219 tcg_gen_movi_i64(addr
, disp12
);
3223 val
= tcg_temp_new();
3224 tcg_gen_movi_i64(val
, 0);
3226 switch ((insn
>> 12) & 0xF) {
3228 /* Longword physical access */
3229 gen_helper_stl_phys(cpu_env
, addr
, val
);
3232 /* Quadword physical access */
3233 gen_helper_stq_phys(cpu_env
, addr
, val
);
3236 /* Longword physical access with lock */
3237 gen_helper_stl_c_phys(val
, cpu_env
, addr
, val
);
3240 /* Quadword physical access with lock */
3241 gen_helper_stq_c_phys(val
, cpu_env
, addr
, val
);
3244 /* Longword virtual access */
3247 /* Quadword virtual access */
3268 /* Longword virtual access with alternate access mode */
3271 /* Quadword virtual access with alternate access mode */
3282 tcg_temp_free(addr
);
3289 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
3293 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
3297 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
3301 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
3305 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
3309 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
3313 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
3317 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
3321 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
3325 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
3329 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3333 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3337 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
3341 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
3345 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
3349 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
3353 ret
= gen_bdirect(ctx
, ra
, disp21
);
3355 case 0x31: /* FBEQ */
3356 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3358 case 0x32: /* FBLT */
3359 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3361 case 0x33: /* FBLE */
3362 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3366 ret
= gen_bdirect(ctx
, ra
, disp21
);
3368 case 0x35: /* FBNE */
3369 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3371 case 0x36: /* FBGE */
3372 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3374 case 0x37: /* FBGT */
3375 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3379 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3383 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3387 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3391 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3395 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3399 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3403 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3407 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3410 ret
= gen_invalid(ctx
);
3417 static inline void gen_intermediate_code_internal(AlphaCPU
*cpu
,
3418 TranslationBlock
*tb
,
3421 CPUState
*cs
= CPU(cpu
);
3422 CPUAlphaState
*env
= &cpu
->env
;
3423 DisasContext ctx
, *ctxp
= &ctx
;
3424 target_ulong pc_start
;
3425 target_ulong pc_mask
;
3427 uint16_t *gen_opc_end
;
3435 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
3439 ctx
.mem_idx
= cpu_mmu_index(env
);
3440 ctx
.implver
= env
->implver
;
3441 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
3443 /* ??? Every TB begins with unset rounding mode, to be initialized on
3444 the first fp insn of the TB. Alternately we could define a proper
3445 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3446 to reset the FP_STATUS to that default at the end of any TB that
3447 changes the default. We could even (gasp) dynamiclly figure out
3448 what default would be most efficient given the running program. */
3450 /* Similarly for flush-to-zero. */
3454 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3455 if (max_insns
== 0) {
3456 max_insns
= CF_COUNT_MASK
;
3459 if (in_superpage(&ctx
, pc_start
)) {
3460 pc_mask
= (1ULL << 41) - 1;
3462 pc_mask
= ~TARGET_PAGE_MASK
;
3467 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
3468 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
3469 if (bp
->pc
== ctx
.pc
) {
3470 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3476 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
3480 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3482 tcg_ctx
.gen_opc_pc
[lj
] = ctx
.pc
;
3483 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
3484 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
3486 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3488 insn
= cpu_ldl_code(env
, ctx
.pc
);
3491 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
3492 tcg_gen_debug_insn_start(ctx
.pc
);
3496 ret
= translate_one(ctxp
, insn
);
3498 /* If we reach a page boundary, are single stepping,
3499 or exhaust instruction count, stop generation. */
3501 && ((ctx
.pc
& pc_mask
) == 0
3502 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
3503 || num_insns
>= max_insns
3505 || ctx
.singlestep_enabled
)) {
3506 ret
= EXIT_PC_STALE
;
3508 } while (ret
== NO_EXIT
);
3510 if (tb
->cflags
& CF_LAST_IO
) {
3519 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3521 case EXIT_PC_UPDATED
:
3522 if (ctx
.singlestep_enabled
) {
3523 gen_excp_1(EXCP_DEBUG
, 0);
3532 gen_tb_end(tb
, num_insns
);
3533 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
3535 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
3538 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3540 tb
->size
= ctx
.pc
- pc_start
;
3541 tb
->icount
= num_insns
;
3545 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3546 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3547 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 1);
3553 void gen_intermediate_code (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3555 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, false);
3558 void gen_intermediate_code_pc (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3560 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, true);
3563 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
, int pc_pos
)
3565 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];