2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 # define LOG_DISAS(...) do { } while (0)
38 typedef struct DisasContext DisasContext
;
40 struct TranslationBlock
*tb
;
44 /* Current rounding mode for this TB. */
46 /* Current flush-to-zero setting for this TB. */
49 /* implver value for this CPU. */
52 bool singlestep_enabled
;
55 /* Return values from translate_one, indicating the state of the TB.
56 Note that zero indicates that we are not exiting the TB. */
61 /* We have emitted one or more goto_tb. No fixup required. */
64 /* We are not using a goto_tb (for whatever reason), but have updated
65 the PC (for whatever reason), so there's no need to do it again on
69 /* We are exiting the TB, but have neither emitted a goto_tb, nor
70 updated the PC for the next instruction to be executed. */
73 /* We are ending the TB with a noreturn function call, e.g. longjmp.
74 No following code will be executed. */
78 /* global register indexes */
79 static TCGv_ptr cpu_env
;
80 static TCGv cpu_ir
[31];
81 static TCGv cpu_fir
[31];
83 static TCGv cpu_lock_addr
;
84 static TCGv cpu_lock_st_addr
;
85 static TCGv cpu_lock_value
;
86 static TCGv cpu_unique
;
87 #ifndef CONFIG_USER_ONLY
88 static TCGv cpu_sysval
;
93 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
95 #include "exec/gen-icount.h"
97 void alpha_translate_init(void)
101 static int done_init
= 0;
106 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
109 for (i
= 0; i
< 31; i
++) {
110 sprintf(p
, "ir%d", i
);
111 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
112 offsetof(CPUAlphaState
, ir
[i
]), p
);
113 p
+= (i
< 10) ? 4 : 5;
115 sprintf(p
, "fir%d", i
);
116 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
117 offsetof(CPUAlphaState
, fir
[i
]), p
);
118 p
+= (i
< 10) ? 5 : 6;
121 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
122 offsetof(CPUAlphaState
, pc
), "pc");
124 cpu_lock_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
125 offsetof(CPUAlphaState
, lock_addr
),
127 cpu_lock_st_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
128 offsetof(CPUAlphaState
, lock_st_addr
),
130 cpu_lock_value
= tcg_global_mem_new_i64(TCG_AREG0
,
131 offsetof(CPUAlphaState
, lock_value
),
134 cpu_unique
= tcg_global_mem_new_i64(TCG_AREG0
,
135 offsetof(CPUAlphaState
, unique
), "unique");
136 #ifndef CONFIG_USER_ONLY
137 cpu_sysval
= tcg_global_mem_new_i64(TCG_AREG0
,
138 offsetof(CPUAlphaState
, sysval
), "sysval");
139 cpu_usp
= tcg_global_mem_new_i64(TCG_AREG0
,
140 offsetof(CPUAlphaState
, usp
), "usp");
143 /* register helpers */
150 static void gen_excp_1(int exception
, int error_code
)
154 tmp1
= tcg_const_i32(exception
);
155 tmp2
= tcg_const_i32(error_code
);
156 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
157 tcg_temp_free_i32(tmp2
);
158 tcg_temp_free_i32(tmp1
);
161 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
163 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
164 gen_excp_1(exception
, error_code
);
165 return EXIT_NORETURN
;
168 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
170 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
173 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
175 TCGv tmp
= tcg_temp_new();
176 TCGv_i32 tmp32
= tcg_temp_new_i32();
177 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
178 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
179 gen_helper_memory_to_f(t0
, tmp32
);
180 tcg_temp_free_i32(tmp32
);
184 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
186 TCGv tmp
= tcg_temp_new();
187 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
188 gen_helper_memory_to_g(t0
, tmp
);
192 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
194 TCGv tmp
= tcg_temp_new();
195 TCGv_i32 tmp32
= tcg_temp_new_i32();
196 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
197 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
198 gen_helper_memory_to_s(t0
, tmp32
);
199 tcg_temp_free_i32(tmp32
);
203 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
205 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
206 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
207 tcg_gen_mov_i64(cpu_lock_value
, t0
);
210 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
212 tcg_gen_qemu_ld64(t0
, t1
, flags
);
213 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
214 tcg_gen_mov_i64(cpu_lock_value
, t0
);
217 static inline void gen_load_mem(DisasContext
*ctx
,
218 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
220 int ra
, int rb
, int32_t disp16
, int fp
,
225 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
226 prefetches, which we can treat as nops. No worries about
227 missed exceptions here. */
228 if (unlikely(ra
== 31)) {
232 addr
= tcg_temp_new();
234 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
236 tcg_gen_andi_i64(addr
, addr
, ~0x7);
242 tcg_gen_movi_i64(addr
, disp16
);
245 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
246 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
251 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
253 TCGv_i32 tmp32
= tcg_temp_new_i32();
254 TCGv tmp
= tcg_temp_new();
255 gen_helper_f_to_memory(tmp32
, t0
);
256 tcg_gen_extu_i32_i64(tmp
, tmp32
);
257 tcg_gen_qemu_st32(tmp
, t1
, flags
);
259 tcg_temp_free_i32(tmp32
);
262 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
264 TCGv tmp
= tcg_temp_new();
265 gen_helper_g_to_memory(tmp
, t0
);
266 tcg_gen_qemu_st64(tmp
, t1
, flags
);
270 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
272 TCGv_i32 tmp32
= tcg_temp_new_i32();
273 TCGv tmp
= tcg_temp_new();
274 gen_helper_s_to_memory(tmp32
, t0
);
275 tcg_gen_extu_i32_i64(tmp
, tmp32
);
276 tcg_gen_qemu_st32(tmp
, t1
, flags
);
278 tcg_temp_free_i32(tmp32
);
281 static inline void gen_store_mem(DisasContext
*ctx
,
282 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
284 int ra
, int rb
, int32_t disp16
, int fp
,
289 addr
= tcg_temp_new();
291 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
293 tcg_gen_andi_i64(addr
, addr
, ~0x7);
299 tcg_gen_movi_i64(addr
, disp16
);
303 va
= tcg_const_i64(0);
305 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
307 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
315 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
316 int32_t disp16
, int quad
)
321 /* ??? Don't bother storing anything. The user can't tell
322 the difference, since the zero register always reads zero. */
326 #if defined(CONFIG_USER_ONLY)
327 addr
= cpu_lock_st_addr
;
329 addr
= tcg_temp_local_new();
333 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
335 tcg_gen_movi_i64(addr
, disp16
);
338 #if defined(CONFIG_USER_ONLY)
339 /* ??? This is handled via a complicated version of compare-and-swap
340 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
341 in TCG so that this isn't necessary. */
342 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
344 /* ??? In system mode we are never multi-threaded, so CAS can be
345 implemented via a non-atomic load-compare-store sequence. */
347 int lab_fail
, lab_done
;
350 lab_fail
= gen_new_label();
351 lab_done
= gen_new_label();
352 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
354 val
= tcg_temp_new();
356 tcg_gen_qemu_ld64(val
, addr
, ctx
->mem_idx
);
358 tcg_gen_qemu_ld32s(val
, addr
, ctx
->mem_idx
);
360 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
363 tcg_gen_qemu_st64(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
365 tcg_gen_qemu_st32(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
367 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
368 tcg_gen_br(lab_done
);
370 gen_set_label(lab_fail
);
371 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
373 gen_set_label(lab_done
);
374 tcg_gen_movi_i64(cpu_lock_addr
, -1);
382 static int use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
384 /* Check for the dest on the same page as the start of the TB. We
385 also want to suppress goto_tb in the case of single-steping and IO. */
386 return (((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0
387 && !ctx
->singlestep_enabled
388 && !(ctx
->tb
->cflags
& CF_LAST_IO
));
391 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
393 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
396 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
399 /* Notice branch-to-next; used to initialize RA with the PC. */
402 } else if (use_goto_tb(ctx
, dest
)) {
404 tcg_gen_movi_i64(cpu_pc
, dest
);
405 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
408 tcg_gen_movi_i64(cpu_pc
, dest
);
409 return EXIT_PC_UPDATED
;
413 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
414 TCGv cmp
, int32_t disp
)
416 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
417 int lab_true
= gen_new_label();
419 if (use_goto_tb(ctx
, dest
)) {
420 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
423 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
424 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
426 gen_set_label(lab_true
);
428 tcg_gen_movi_i64(cpu_pc
, dest
);
429 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
+ 1);
433 TCGv_i64 z
= tcg_const_i64(0);
434 TCGv_i64 d
= tcg_const_i64(dest
);
435 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
437 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
439 tcg_temp_free_i64(z
);
440 tcg_temp_free_i64(d
);
441 tcg_temp_free_i64(p
);
442 return EXIT_PC_UPDATED
;
446 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
447 int32_t disp
, int mask
)
451 if (unlikely(ra
== 31)) {
452 cmp_tmp
= tcg_const_i64(0);
454 cmp_tmp
= tcg_temp_new();
456 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
458 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
462 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
465 /* Fold -0.0 for comparison with COND. */
467 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
469 uint64_t mzero
= 1ull << 63;
474 /* For <= or >, the -0.0 value directly compares the way we want. */
475 tcg_gen_mov_i64(dest
, src
);
480 /* For == or !=, we can simply mask off the sign bit and compare. */
481 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
486 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
487 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
488 tcg_gen_neg_i64(dest
, dest
);
489 tcg_gen_and_i64(dest
, dest
, src
);
497 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
502 if (unlikely(ra
== 31)) {
503 /* Very uncommon case, but easier to optimize it to an integer
504 comparison than continuing with the floating point comparison. */
505 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
508 cmp_tmp
= tcg_temp_new();
509 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
510 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
513 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
514 int islit
, uint8_t lit
, int mask
)
518 if (unlikely(rc
== 31)) {
523 /* Very uncommon case - Do not bother to optimize. */
524 c1
= tcg_const_i64(0);
526 c1
= tcg_const_i64(1);
527 tcg_gen_and_i64(c1
, c1
, cpu_ir
[ra
]);
532 v1
= tcg_const_i64(lit
);
536 z
= tcg_const_i64(0);
538 tcg_gen_movcond_i64(cond
, cpu_ir
[rc
], c1
, z
, v1
, cpu_ir
[rc
]);
540 tcg_temp_free_i64(z
);
541 if (ra
== 31 || mask
) {
542 tcg_temp_free_i64(c1
);
545 tcg_temp_free_i64(v1
);
549 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
553 if (unlikely(rc
== 31)) {
557 c1
= tcg_temp_new_i64();
558 if (unlikely(ra
== 31)) {
559 tcg_gen_movi_i64(c1
, 0);
561 gen_fold_mzero(cond
, c1
, cpu_fir
[ra
]);
564 v1
= tcg_const_i64(0);
568 z
= tcg_const_i64(0);
570 tcg_gen_movcond_i64(cond
, cpu_fir
[rc
], c1
, z
, v1
, cpu_fir
[rc
]);
572 tcg_temp_free_i64(z
);
573 tcg_temp_free_i64(c1
);
575 tcg_temp_free_i64(v1
);
579 #define QUAL_RM_N 0x080 /* Round mode nearest even */
580 #define QUAL_RM_C 0x000 /* Round mode chopped */
581 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
582 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
583 #define QUAL_RM_MASK 0x0c0
585 #define QUAL_U 0x100 /* Underflow enable (fp output) */
586 #define QUAL_V 0x100 /* Overflow enable (int output) */
587 #define QUAL_S 0x400 /* Software completion enable */
588 #define QUAL_I 0x200 /* Inexact detection enable */
590 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
594 fn11
&= QUAL_RM_MASK
;
595 if (fn11
== ctx
->tb_rm
) {
600 tmp
= tcg_temp_new_i32();
603 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
606 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
609 tcg_gen_movi_i32(tmp
, float_round_down
);
612 tcg_gen_ld8u_i32(tmp
, cpu_env
,
613 offsetof(CPUAlphaState
, fpcr_dyn_round
));
617 #if defined(CONFIG_SOFTFLOAT_INLINE)
618 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
619 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
620 sets the one field. */
621 tcg_gen_st8_i32(tmp
, cpu_env
,
622 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
624 gen_helper_setroundmode(tmp
);
627 tcg_temp_free_i32(tmp
);
630 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
635 if (fn11
== ctx
->tb_ftz
) {
640 tmp
= tcg_temp_new_i32();
642 /* Underflow is enabled, use the FPCR setting. */
643 tcg_gen_ld8u_i32(tmp
, cpu_env
,
644 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
646 /* Underflow is disabled, force flush-to-zero. */
647 tcg_gen_movi_i32(tmp
, 1);
650 #if defined(CONFIG_SOFTFLOAT_INLINE)
651 tcg_gen_st8_i32(tmp
, cpu_env
,
652 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
654 gen_helper_setflushzero(tmp
);
657 tcg_temp_free_i32(tmp
);
660 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
664 val
= tcg_const_i64(0);
666 if ((fn11
& QUAL_S
) == 0) {
668 gen_helper_ieee_input_cmp(cpu_env
, cpu_fir
[reg
]);
670 gen_helper_ieee_input(cpu_env
, cpu_fir
[reg
]);
673 val
= tcg_temp_new();
674 tcg_gen_mov_i64(val
, cpu_fir
[reg
]);
679 static void gen_fp_exc_clear(void)
681 #if defined(CONFIG_SOFTFLOAT_INLINE)
682 TCGv_i32 zero
= tcg_const_i32(0);
683 tcg_gen_st8_i32(zero
, cpu_env
,
684 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
685 tcg_temp_free_i32(zero
);
687 gen_helper_fp_exc_clear(cpu_env
);
691 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
693 /* ??? We ought to be able to do something with imprecise exceptions.
694 E.g. notice we're still in the trap shadow of something within the
695 TB and do not generate the code to signal the exception; end the TB
696 when an exception is forced to arrive, either by consumption of a
697 register value or TRAPB or EXCB. */
698 TCGv_i32 exc
= tcg_temp_new_i32();
701 #if defined(CONFIG_SOFTFLOAT_INLINE)
702 tcg_gen_ld8u_i32(exc
, cpu_env
,
703 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
705 gen_helper_fp_exc_get(exc
, cpu_env
);
709 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
712 /* ??? Pass in the regno of the destination so that the helper can
713 set EXC_MASK, which contains a bitmask of destination registers
714 that have caused arithmetic traps. A simple userspace emulation
715 does not require this. We do need it for a guest kernel's entArith,
716 or if we were to do something clever with imprecise exceptions. */
717 reg
= tcg_const_i32(rc
+ 32);
720 gen_helper_fp_exc_raise_s(cpu_env
, exc
, reg
);
722 gen_helper_fp_exc_raise(cpu_env
, exc
, reg
);
725 tcg_temp_free_i32(reg
);
726 tcg_temp_free_i32(exc
);
729 static inline void gen_fp_exc_raise(int rc
, int fn11
)
731 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
734 static void gen_fcvtlq(int rb
, int rc
)
736 if (unlikely(rc
== 31)) {
739 if (unlikely(rb
== 31)) {
740 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
742 TCGv tmp
= tcg_temp_new();
744 /* The arithmetic right shift here, plus the sign-extended mask below
745 yields a sign-extended result without an explicit ext32s_i64. */
746 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
747 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
748 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
749 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
750 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
756 static void gen_fcvtql(int rb
, int rc
)
758 if (unlikely(rc
== 31)) {
761 if (unlikely(rb
== 31)) {
762 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
764 TCGv tmp
= tcg_temp_new();
766 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
767 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
768 tcg_gen_shli_i64(tmp
, tmp
, 32);
769 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
770 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
776 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
779 int lab
= gen_new_label();
780 TCGv tmp
= tcg_temp_new();
782 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
783 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
784 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
791 #define FARITH2(name) \
792 static inline void glue(gen_f, name)(int rb, int rc) \
794 if (unlikely(rc == 31)) { \
798 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
800 TCGv tmp = tcg_const_i64(0); \
801 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
802 tcg_temp_free(tmp); \
806 /* ??? VAX instruction qualifiers ignored. */
814 static void gen_ieee_arith2(DisasContext
*ctx
,
815 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
816 int rb
, int rc
, int fn11
)
820 /* ??? This is wrong: the instruction is not a nop, it still may
822 if (unlikely(rc
== 31)) {
826 gen_qual_roundmode(ctx
, fn11
);
827 gen_qual_flushzero(ctx
, fn11
);
830 vb
= gen_ieee_input(rb
, fn11
, 0);
831 helper(cpu_fir
[rc
], cpu_env
, vb
);
834 gen_fp_exc_raise(rc
, fn11
);
837 #define IEEE_ARITH2(name) \
838 static inline void glue(gen_f, name)(DisasContext *ctx, \
839 int rb, int rc, int fn11) \
841 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
848 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
853 /* ??? This is wrong: the instruction is not a nop, it still may
855 if (unlikely(rc
== 31)) {
859 /* No need to set flushzero, since we have an integer output. */
861 vb
= gen_ieee_input(rb
, fn11
, 0);
863 /* Almost all integer conversions use cropped rounding, and most
864 also do not have integer overflow enabled. Special case that. */
867 gen_helper_cvttq_c(cpu_fir
[rc
], cpu_env
, vb
);
869 case QUAL_V
| QUAL_RM_C
:
870 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
871 ignore
= float_flag_inexact
;
873 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
874 gen_helper_cvttq_svic(cpu_fir
[rc
], cpu_env
, vb
);
877 gen_qual_roundmode(ctx
, fn11
);
878 gen_helper_cvttq(cpu_fir
[rc
], cpu_env
, vb
);
879 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
880 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
885 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
888 static void gen_ieee_intcvt(DisasContext
*ctx
,
889 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
890 int rb
, int rc
, int fn11
)
894 /* ??? This is wrong: the instruction is not a nop, it still may
896 if (unlikely(rc
== 31)) {
900 gen_qual_roundmode(ctx
, fn11
);
903 vb
= tcg_const_i64(0);
908 /* The only exception that can be raised by integer conversion
909 is inexact. Thus we only need to worry about exceptions when
910 inexact handling is requested. */
913 helper(cpu_fir
[rc
], cpu_env
, vb
);
914 gen_fp_exc_raise(rc
, fn11
);
916 helper(cpu_fir
[rc
], cpu_env
, vb
);
924 #define IEEE_INTCVT(name) \
925 static inline void glue(gen_f, name)(DisasContext *ctx, \
926 int rb, int rc, int fn11) \
928 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
933 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
938 if (unlikely(rc
== 31)) {
942 vmask
= tcg_const_i64(mask
);
952 va
= tcg_temp_new_i64();
953 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
955 tcg_gen_andc_i64(va
, vmask
, va
);
957 tcg_gen_and_i64(va
, va
, vmask
);
965 vb
= tcg_temp_new_i64();
966 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
969 switch (za
<< 1 | zb
) {
971 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
974 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
977 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
980 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
984 tcg_temp_free(vmask
);
993 static inline void gen_fcpys(int ra
, int rb
, int rc
)
995 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
998 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
1000 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
1003 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
1005 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
1008 #define FARITH3(name) \
1009 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1013 if (unlikely(rc == 31)) { \
1017 va = tcg_const_i64(0); \
1022 vb = tcg_const_i64(0); \
1027 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1030 tcg_temp_free(va); \
1033 tcg_temp_free(vb); \
1037 /* ??? VAX instruction qualifiers ignored. */
1050 static void gen_ieee_arith3(DisasContext
*ctx
,
1051 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1052 int ra
, int rb
, int rc
, int fn11
)
1056 /* ??? This is wrong: the instruction is not a nop, it still may
1057 raise exceptions. */
1058 if (unlikely(rc
== 31)) {
1062 gen_qual_roundmode(ctx
, fn11
);
1063 gen_qual_flushzero(ctx
, fn11
);
1066 va
= gen_ieee_input(ra
, fn11
, 0);
1067 vb
= gen_ieee_input(rb
, fn11
, 0);
1068 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1072 gen_fp_exc_raise(rc
, fn11
);
1075 #define IEEE_ARITH3(name) \
1076 static inline void glue(gen_f, name)(DisasContext *ctx, \
1077 int ra, int rb, int rc, int fn11) \
1079 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1090 static void gen_ieee_compare(DisasContext
*ctx
,
1091 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1092 int ra
, int rb
, int rc
, int fn11
)
1096 /* ??? This is wrong: the instruction is not a nop, it still may
1097 raise exceptions. */
1098 if (unlikely(rc
== 31)) {
1104 va
= gen_ieee_input(ra
, fn11
, 1);
1105 vb
= gen_ieee_input(rb
, fn11
, 1);
1106 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1110 gen_fp_exc_raise(rc
, fn11
);
1113 #define IEEE_CMP3(name) \
1114 static inline void glue(gen_f, name)(DisasContext *ctx, \
1115 int ra, int rb, int rc, int fn11) \
1117 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1124 static inline uint64_t zapnot_mask(uint8_t lit
)
1129 for (i
= 0; i
< 8; ++i
) {
1131 mask
|= 0xffull
<< (i
* 8);
1136 /* Implement zapnot with an immediate operand, which expands to some
1137 form of immediate AND. This is a basic building block in the
1138 definition of many of the other byte manipulation instructions. */
1139 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1143 tcg_gen_movi_i64(dest
, 0);
1146 tcg_gen_ext8u_i64(dest
, src
);
1149 tcg_gen_ext16u_i64(dest
, src
);
1152 tcg_gen_ext32u_i64(dest
, src
);
1155 tcg_gen_mov_i64(dest
, src
);
1158 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1163 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1165 if (unlikely(rc
== 31))
1167 else if (unlikely(ra
== 31))
1168 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1170 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1172 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1175 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1177 if (unlikely(rc
== 31))
1179 else if (unlikely(ra
== 31))
1180 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1182 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1184 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1188 /* EXTWH, EXTLH, EXTQH */
1189 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1190 uint8_t lit
, uint8_t byte_mask
)
1192 if (unlikely(rc
== 31))
1194 else if (unlikely(ra
== 31))
1195 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1198 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1199 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1201 TCGv tmp1
= tcg_temp_new();
1202 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1203 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1204 tcg_gen_neg_i64(tmp1
, tmp1
);
1205 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1206 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1207 tcg_temp_free(tmp1
);
1209 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1213 /* EXTBL, EXTWL, EXTLL, EXTQL */
1214 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1215 uint8_t lit
, uint8_t byte_mask
)
1217 if (unlikely(rc
== 31))
1219 else if (unlikely(ra
== 31))
1220 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1223 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1225 TCGv tmp
= tcg_temp_new();
1226 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1227 tcg_gen_shli_i64(tmp
, tmp
, 3);
1228 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1231 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1235 /* INSWH, INSLH, INSQH */
1236 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1237 uint8_t lit
, uint8_t byte_mask
)
1239 if (unlikely(rc
== 31))
1241 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1242 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1244 TCGv tmp
= tcg_temp_new();
1246 /* The instruction description has us left-shift the byte mask
1247 and extract bits <15:8> and apply that zap at the end. This
1248 is equivalent to simply performing the zap first and shifting
1250 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1253 /* Note that we have handled the lit==0 case above. */
1254 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1256 TCGv shift
= tcg_temp_new();
1258 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1259 Do this portably by splitting the shift into two parts:
1260 shift_count-1 and 1. Arrange for the -1 by using
1261 ones-complement instead of twos-complement in the negation:
1262 ~((B & 7) * 8) & 63. */
1264 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1265 tcg_gen_shli_i64(shift
, shift
, 3);
1266 tcg_gen_not_i64(shift
, shift
);
1267 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1269 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1270 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1271 tcg_temp_free(shift
);
1277 /* INSBL, INSWL, INSLL, INSQL */
1278 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1279 uint8_t lit
, uint8_t byte_mask
)
1281 if (unlikely(rc
== 31))
1283 else if (unlikely(ra
== 31))
1284 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1286 TCGv tmp
= tcg_temp_new();
1288 /* The instruction description has us left-shift the byte mask
1289 the same number of byte slots as the data and apply the zap
1290 at the end. This is equivalent to simply performing the zap
1291 first and shifting afterward. */
1292 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1295 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1297 TCGv shift
= tcg_temp_new();
1298 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1299 tcg_gen_shli_i64(shift
, shift
, 3);
1300 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1301 tcg_temp_free(shift
);
1307 /* MSKWH, MSKLH, MSKQH */
1308 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1309 uint8_t lit
, uint8_t byte_mask
)
1311 if (unlikely(rc
== 31))
1313 else if (unlikely(ra
== 31))
1314 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1316 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1318 TCGv shift
= tcg_temp_new();
1319 TCGv mask
= tcg_temp_new();
1321 /* The instruction description is as above, where the byte_mask
1322 is shifted left, and then we extract bits <15:8>. This can be
1323 emulated with a right-shift on the expanded byte mask. This
1324 requires extra care because for an input <2:0> == 0 we need a
1325 shift of 64 bits in order to generate a zero. This is done by
1326 splitting the shift into two parts, the variable shift - 1
1327 followed by a constant 1 shift. The code we expand below is
1328 equivalent to ~((B & 7) * 8) & 63. */
1330 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1331 tcg_gen_shli_i64(shift
, shift
, 3);
1332 tcg_gen_not_i64(shift
, shift
);
1333 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1334 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1335 tcg_gen_shr_i64(mask
, mask
, shift
);
1336 tcg_gen_shri_i64(mask
, mask
, 1);
1338 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1340 tcg_temp_free(mask
);
1341 tcg_temp_free(shift
);
1345 /* MSKBL, MSKWL, MSKLL, MSKQL */
1346 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1347 uint8_t lit
, uint8_t byte_mask
)
1349 if (unlikely(rc
== 31))
1351 else if (unlikely(ra
== 31))
1352 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1354 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1356 TCGv shift
= tcg_temp_new();
1357 TCGv mask
= tcg_temp_new();
1359 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1360 tcg_gen_shli_i64(shift
, shift
, 3);
1361 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1362 tcg_gen_shl_i64(mask
, mask
, shift
);
1364 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1366 tcg_temp_free(mask
);
1367 tcg_temp_free(shift
);
1371 /* Code to call arith3 helpers */
1372 #define ARITH3(name) \
1373 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1376 if (unlikely(rc == 31)) \
1381 TCGv tmp = tcg_const_i64(lit); \
1382 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1383 tcg_temp_free(tmp); \
1385 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1387 TCGv tmp1 = tcg_const_i64(0); \
1389 TCGv tmp2 = tcg_const_i64(lit); \
1390 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1391 tcg_temp_free(tmp2); \
1393 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1394 tcg_temp_free(tmp1); \
1408 /* Code to call arith3 helpers */
1409 #define ARITH3_EX(name) \
1410 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1411 int islit, uint8_t lit) \
1413 if (unlikely(rc == 31)) { \
1418 TCGv tmp = tcg_const_i64(lit); \
1419 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1421 tcg_temp_free(tmp); \
1423 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1424 cpu_ir[ra], cpu_ir[rb]); \
1427 TCGv tmp1 = tcg_const_i64(0); \
1429 TCGv tmp2 = tcg_const_i64(lit); \
1430 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1431 tcg_temp_free(tmp2); \
1433 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1435 tcg_temp_free(tmp1); \
1445 #define MVIOP2(name) \
1446 static inline void glue(gen_, name)(int rb, int rc) \
1448 if (unlikely(rc == 31)) \
1450 if (unlikely(rb == 31)) \
1451 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1453 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1460 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1461 int islit
, uint8_t lit
)
1465 if (unlikely(rc
== 31)) {
1470 va
= tcg_const_i64(0);
1475 vb
= tcg_const_i64(lit
);
1480 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1490 static void gen_rx(int ra
, int set
)
1495 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1498 tmp
= tcg_const_i32(set
);
1499 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1500 tcg_temp_free_i32(tmp
);
1503 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1505 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1506 to internal cpu registers. */
1508 /* Unprivileged PAL call */
1509 if (palcode
>= 0x80 && palcode
< 0xC0) {
1513 /* No-op inside QEMU. */
1517 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_unique
);
1521 tcg_gen_mov_i64(cpu_unique
, cpu_ir
[IR_A0
]);
1524 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0xbf);
1529 #ifndef CONFIG_USER_ONLY
1530 /* Privileged PAL code */
1531 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1535 /* No-op inside QEMU. */
1539 /* No-op inside QEMU. */
1543 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
, offsetof(CPUAlphaState
, vptptr
));
1547 tcg_gen_mov_i64(cpu_sysval
, cpu_ir
[IR_A0
]);
1551 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_sysval
);
1558 /* Note that we already know we're in kernel mode, so we know
1559 that PS only contains the 3 IPL bits. */
1560 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1562 /* But make sure and store only the 3 IPL bits from the user. */
1563 tmp
= tcg_temp_new();
1564 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1565 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1572 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1576 tcg_gen_mov_i64(cpu_usp
, cpu_ir
[IR_A0
]);
1580 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_usp
);
1584 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1585 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1589 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0x3f);
1595 return gen_invalid(ctx
);
1598 #ifndef CONFIG_USER_ONLY
1600 #define PR_BYTE 0x100000
1601 #define PR_LONG 0x200000
1603 static int cpu_pr_data(int pr
)
1606 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1607 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1608 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1609 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1610 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1611 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1612 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1613 case 7: return offsetof(CPUAlphaState
, palbr
);
1614 case 8: return offsetof(CPUAlphaState
, ptbr
);
1615 case 9: return offsetof(CPUAlphaState
, vptptr
);
1616 case 10: return offsetof(CPUAlphaState
, unique
);
1617 case 11: return offsetof(CPUAlphaState
, sysval
);
1618 case 12: return offsetof(CPUAlphaState
, usp
);
1621 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1623 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1626 return offsetof(CPUAlphaState
, alarm_expire
);
1631 static ExitStatus
gen_mfpr(int ra
, int regno
)
1633 int data
= cpu_pr_data(regno
);
1635 /* In our emulated PALcode, these processor registers have no
1636 side effects from reading. */
1641 /* Special help for VMTIME and WALLTIME. */
1642 if (regno
== 250 || regno
== 249) {
1643 void (*helper
)(TCGv
) = gen_helper_get_walltime
;
1645 helper
= gen_helper_get_vmtime
;
1651 return EXIT_PC_STALE
;
1658 /* The basic registers are data only, and unknown registers
1659 are read-zero, write-ignore. */
1661 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
1662 } else if (data
& PR_BYTE
) {
1663 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_BYTE
);
1664 } else if (data
& PR_LONG
) {
1665 tcg_gen_ld32s_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_LONG
);
1667 tcg_gen_ld_i64(cpu_ir
[ra
], cpu_env
, data
);
1672 static ExitStatus
gen_mtpr(DisasContext
*ctx
, int rb
, int regno
)
1678 tmp
= tcg_const_i64(0);
1686 gen_helper_tbia(cpu_env
);
1691 gen_helper_tbis(cpu_env
, tmp
);
1696 tmp
= tcg_const_i64(1);
1697 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1698 offsetof(CPUState
, halted
));
1699 return gen_excp(ctx
, EXCP_HLT
, 0);
1703 gen_helper_halt(tmp
);
1704 return EXIT_PC_STALE
;
1708 gen_helper_set_alarm(cpu_env
, tmp
);
1712 /* The basic registers are data only, and unknown registers
1713 are read-zero, write-ignore. */
1714 data
= cpu_pr_data(regno
);
1716 if (data
& PR_BYTE
) {
1717 tcg_gen_st8_i64(tmp
, cpu_env
, data
& ~PR_BYTE
);
1718 } else if (data
& PR_LONG
) {
1719 tcg_gen_st32_i64(tmp
, cpu_env
, data
& ~PR_LONG
);
1721 tcg_gen_st_i64(tmp
, cpu_env
, data
);
1733 #endif /* !USER_ONLY*/
1735 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1738 int32_t disp21
, disp16
;
1739 #ifndef CONFIG_USER_ONLY
1743 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, islit
, real_islit
;
1747 /* Decode all instruction fields */
1749 ra
= (insn
>> 21) & 0x1F;
1750 rb
= (insn
>> 16) & 0x1F;
1752 real_islit
= islit
= (insn
>> 12) & 1;
1753 if (rb
== 31 && !islit
) {
1757 lit
= (insn
>> 13) & 0xFF;
1758 palcode
= insn
& 0x03FFFFFF;
1759 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1760 disp16
= (int16_t)(insn
& 0x0000FFFF);
1761 #ifndef CONFIG_USER_ONLY
1762 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1764 fn11
= (insn
>> 5) & 0x000007FF;
1766 fn7
= (insn
>> 5) & 0x0000007F;
1767 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1768 opc
, ra
, rb
, rc
, disp16
);
1774 ret
= gen_call_pal(ctx
, palcode
);
1799 if (likely(ra
!= 31)) {
1801 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1803 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1808 if (likely(ra
!= 31)) {
1810 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1812 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1817 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1818 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1824 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1828 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1829 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1835 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1839 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1843 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1849 if (likely(rc
!= 31)) {
1852 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1853 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1855 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1856 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1860 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1862 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1868 if (likely(rc
!= 31)) {
1870 TCGv tmp
= tcg_temp_new();
1871 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1873 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1875 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1876 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1880 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1882 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1888 if (likely(rc
!= 31)) {
1891 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1893 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1894 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1897 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1899 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1900 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1906 if (likely(rc
!= 31)) {
1908 TCGv tmp
= tcg_temp_new();
1909 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1911 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1913 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1914 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1918 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1920 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1921 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1928 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1932 if (likely(rc
!= 31)) {
1934 TCGv tmp
= tcg_temp_new();
1935 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1937 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1939 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1940 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1944 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1946 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1952 if (likely(rc
!= 31)) {
1954 TCGv tmp
= tcg_temp_new();
1955 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1957 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1959 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1960 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1964 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1966 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1967 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1974 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1978 if (likely(rc
!= 31)) {
1981 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1983 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1986 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1988 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1994 if (likely(rc
!= 31)) {
1996 TCGv tmp
= tcg_temp_new();
1997 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1999 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
2001 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2005 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2007 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2013 if (likely(rc
!= 31)) {
2016 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2018 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2021 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2023 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2029 if (likely(rc
!= 31)) {
2031 TCGv tmp
= tcg_temp_new();
2032 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
2034 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2036 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2040 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2042 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2048 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
2052 if (likely(rc
!= 31)) {
2054 TCGv tmp
= tcg_temp_new();
2055 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2057 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
2059 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2063 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2065 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2071 if (likely(rc
!= 31)) {
2073 TCGv tmp
= tcg_temp_new();
2074 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2076 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2078 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2082 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2084 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2090 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
2094 gen_addlv(ra
, rb
, rc
, islit
, lit
);
2098 gen_sublv(ra
, rb
, rc
, islit
, lit
);
2102 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
2106 gen_addqv(ra
, rb
, rc
, islit
, lit
);
2110 gen_subqv(ra
, rb
, rc
, islit
, lit
);
2114 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
2124 if (likely(rc
!= 31)) {
2126 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2128 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2130 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2135 if (likely(rc
!= 31)) {
2138 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2140 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2142 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2147 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
2151 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
2155 if (likely(rc
!= 31)) {
2158 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2160 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2163 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2165 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2171 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
2175 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
2179 if (likely(rc
!= 31)) {
2182 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2184 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2187 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2189 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2195 if (likely(rc
!= 31)) {
2198 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2200 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2203 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2205 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2211 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
2215 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
2219 if (likely(rc
!= 31)) {
2222 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2224 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2227 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2229 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2235 if (likely(rc
!= 31)) {
2236 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
2239 tcg_gen_movi_i64(cpu_ir
[rc
], lit
& ~amask
);
2241 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rb
], ~amask
);
2247 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
2251 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
2256 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->implver
);
2267 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2271 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2275 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2279 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2283 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2287 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2291 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2295 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2299 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2303 gen_zap(ra
, rb
, rc
, islit
, lit
);
2307 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2311 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2315 if (likely(rc
!= 31)) {
2318 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2320 TCGv shift
= tcg_temp_new();
2321 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2322 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2323 tcg_temp_free(shift
);
2326 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2331 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2335 if (likely(rc
!= 31)) {
2338 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2340 TCGv shift
= tcg_temp_new();
2341 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2342 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2343 tcg_temp_free(shift
);
2346 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2351 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2355 if (likely(rc
!= 31)) {
2358 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2360 TCGv shift
= tcg_temp_new();
2361 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2362 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2363 tcg_temp_free(shift
);
2366 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2371 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2375 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2379 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2383 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2387 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2391 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2395 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2399 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2403 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2413 if (likely(rc
!= 31)) {
2415 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2418 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2420 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2421 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2427 if (likely(rc
!= 31)) {
2429 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2431 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2433 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2440 if (unlikely(rc
== 31)){
2444 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2447 low
= tcg_temp_new();
2449 tcg_gen_movi_tl(low
, lit
);
2450 tcg_gen_mulu2_i64(low
, cpu_ir
[rc
], cpu_ir
[ra
], low
);
2452 tcg_gen_mulu2_i64(low
, cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2459 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2463 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2470 switch (fpfn
) { /* fn11 & 0x3F */
2473 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2476 if (likely(rc
!= 31)) {
2478 TCGv_i32 tmp
= tcg_temp_new_i32();
2479 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2480 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2481 tcg_temp_free_i32(tmp
);
2483 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2488 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2495 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2496 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2502 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2505 if (likely(rc
!= 31)) {
2507 TCGv_i32 tmp
= tcg_temp_new_i32();
2508 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2509 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2510 tcg_temp_free_i32(tmp
);
2512 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2517 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2520 if (likely(rc
!= 31)) {
2522 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2524 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2529 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2536 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2537 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2546 /* VAX floating point */
2547 /* XXX: rounding mode and trap are ignored (!) */
2548 switch (fpfn
) { /* fn11 & 0x3F */
2551 gen_faddf(ra
, rb
, rc
);
2555 gen_fsubf(ra
, rb
, rc
);
2559 gen_fmulf(ra
, rb
, rc
);
2563 gen_fdivf(ra
, rb
, rc
);
2575 gen_faddg(ra
, rb
, rc
);
2579 gen_fsubg(ra
, rb
, rc
);
2583 gen_fmulg(ra
, rb
, rc
);
2587 gen_fdivg(ra
, rb
, rc
);
2591 gen_fcmpgeq(ra
, rb
, rc
);
2595 gen_fcmpglt(ra
, rb
, rc
);
2599 gen_fcmpgle(ra
, rb
, rc
);
2630 /* IEEE floating-point */
2631 switch (fpfn
) { /* fn11 & 0x3F */
2634 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2638 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2642 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2646 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2650 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2654 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2658 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2662 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2666 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2670 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2674 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2678 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2681 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2683 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2686 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2691 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2695 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2699 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2712 if (likely(rc
!= 31)) {
2716 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2718 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2721 gen_fcpys(ra
, rb
, rc
);
2727 gen_fcpysn(ra
, rb
, rc
);
2731 gen_fcpyse(ra
, rb
, rc
);
2735 if (likely(ra
!= 31))
2736 gen_helper_store_fpcr(cpu_env
, cpu_fir
[ra
]);
2738 TCGv tmp
= tcg_const_i64(0);
2739 gen_helper_store_fpcr(cpu_env
, tmp
);
2745 if (likely(ra
!= 31))
2746 gen_helper_load_fpcr(cpu_fir
[ra
], cpu_env
);
2750 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2754 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2758 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2762 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2766 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2770 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2780 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2781 /v doesn't do. The only thing I can think is that /sv is a
2782 valid instruction merely for completeness in the ISA. */
2783 gen_fcvtql_v(ctx
, rb
, rc
);
2790 switch ((uint16_t)disp16
) {
2820 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2822 ret
= EXIT_PC_STALE
;
2824 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2848 /* HW_MFPR (PALcode) */
2849 #ifndef CONFIG_USER_ONLY
2850 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2851 return gen_mfpr(ra
, insn
& 0xffff);
2856 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2857 prediction stack action, which of course we don't implement. */
2859 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2861 tcg_gen_movi_i64(cpu_pc
, 0);
2864 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2866 ret
= EXIT_PC_UPDATED
;
2869 /* HW_LD (PALcode) */
2870 #ifndef CONFIG_USER_ONLY
2871 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2878 addr
= tcg_temp_new();
2880 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2882 tcg_gen_movi_i64(addr
, disp12
);
2883 switch ((insn
>> 12) & 0xF) {
2885 /* Longword physical access (hw_ldl/p) */
2886 gen_helper_ldl_phys(cpu_ir
[ra
], addr
);
2889 /* Quadword physical access (hw_ldq/p) */
2890 gen_helper_ldq_phys(cpu_ir
[ra
], addr
);
2893 /* Longword physical access with lock (hw_ldl_l/p) */
2894 gen_helper_ldl_l_phys(cpu_ir
[ra
], cpu_env
, addr
);
2897 /* Quadword physical access with lock (hw_ldq_l/p) */
2898 gen_helper_ldq_l_phys(cpu_ir
[ra
], cpu_env
, addr
);
2901 /* Longword virtual PTE fetch (hw_ldl/v) */
2904 /* Quadword virtual PTE fetch (hw_ldq/v) */
2908 /* Incpu_ir[ra]id */
2911 /* Incpu_ir[ra]id */
2914 /* Longword virtual access (hw_ldl) */
2917 /* Quadword virtual access (hw_ldq) */
2920 /* Longword virtual access with protection check (hw_ldl/w) */
2921 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2924 /* Quadword virtual access with protection check (hw_ldq/w) */
2925 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2928 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2931 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2934 /* Longword virtual access with alternate access mode and
2935 protection checks (hw_ldl/wa) */
2936 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2939 /* Quadword virtual access with alternate access mode and
2940 protection checks (hw_ldq/wa) */
2941 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2944 tcg_temp_free(addr
);
2953 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) == 0) {
2956 if (likely(rc
!= 31)) {
2958 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2960 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2965 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
2966 if (likely(rc
!= 31)) {
2968 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2970 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2978 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2979 if (likely(rc
!= 31)) {
2981 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2983 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2991 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2992 gen_perr(ra
, rb
, rc
, islit
, lit
);
2998 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2999 if (likely(rc
!= 31)) {
3001 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
3003 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
3011 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
3012 if (likely(rc
!= 31)) {
3014 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
3016 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
3024 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3025 if (real_islit
|| ra
!= 31) {
3034 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3035 if (real_islit
|| ra
!= 31) {
3044 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3045 if (real_islit
|| ra
!= 31) {
3054 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3055 if (real_islit
|| ra
!= 31) {
3064 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3065 gen_minsb8(ra
, rb
, rc
, islit
, lit
);
3071 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3072 gen_minsw4(ra
, rb
, rc
, islit
, lit
);
3078 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3079 gen_minub8(ra
, rb
, rc
, islit
, lit
);
3085 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3086 gen_minuw4(ra
, rb
, rc
, islit
, lit
);
3092 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3093 gen_maxub8(ra
, rb
, rc
, islit
, lit
);
3099 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3100 gen_maxuw4(ra
, rb
, rc
, islit
, lit
);
3106 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3107 gen_maxsb8(ra
, rb
, rc
, islit
, lit
);
3113 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3114 gen_maxsw4(ra
, rb
, rc
, islit
, lit
);
3120 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3123 if (likely(rc
!= 31)) {
3125 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
3127 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
3132 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3136 TCGv_i32 tmp1
= tcg_temp_new_i32();
3138 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
3140 TCGv tmp2
= tcg_const_i64(0);
3141 gen_helper_s_to_memory(tmp1
, tmp2
);
3142 tcg_temp_free(tmp2
);
3144 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
3145 tcg_temp_free_i32(tmp1
);
3153 /* HW_MTPR (PALcode) */
3154 #ifndef CONFIG_USER_ONLY
3155 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3156 return gen_mtpr(ctx
, rb
, insn
& 0xffff);
3161 /* HW_RET (PALcode) */
3162 #ifndef CONFIG_USER_ONLY
3163 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3165 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3166 address from EXC_ADDR. This turns out to be useful for our
3167 emulation PALcode, so continue to accept it. */
3168 TCGv tmp
= tcg_temp_new();
3169 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
3170 gen_helper_hw_ret(cpu_env
, tmp
);
3173 gen_helper_hw_ret(cpu_env
, cpu_ir
[rb
]);
3175 ret
= EXIT_PC_UPDATED
;
3181 /* HW_ST (PALcode) */
3182 #ifndef CONFIG_USER_ONLY
3183 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3185 addr
= tcg_temp_new();
3187 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
3189 tcg_gen_movi_i64(addr
, disp12
);
3193 val
= tcg_temp_new();
3194 tcg_gen_movi_i64(val
, 0);
3196 switch ((insn
>> 12) & 0xF) {
3198 /* Longword physical access */
3199 gen_helper_stl_phys(addr
, val
);
3202 /* Quadword physical access */
3203 gen_helper_stq_phys(addr
, val
);
3206 /* Longword physical access with lock */
3207 gen_helper_stl_c_phys(val
, cpu_env
, addr
, val
);
3210 /* Quadword physical access with lock */
3211 gen_helper_stq_c_phys(val
, cpu_env
, addr
, val
);
3214 /* Longword virtual access */
3217 /* Quadword virtual access */
3238 /* Longword virtual access with alternate access mode */
3241 /* Quadword virtual access with alternate access mode */
3252 tcg_temp_free(addr
);
3259 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
3263 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
3267 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
3271 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
3275 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
3279 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
3283 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
3287 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
3291 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
3295 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
3299 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3303 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3307 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
3311 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
3315 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
3319 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
3323 ret
= gen_bdirect(ctx
, ra
, disp21
);
3325 case 0x31: /* FBEQ */
3326 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3328 case 0x32: /* FBLT */
3329 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3331 case 0x33: /* FBLE */
3332 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3336 ret
= gen_bdirect(ctx
, ra
, disp21
);
3338 case 0x35: /* FBNE */
3339 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3341 case 0x36: /* FBGE */
3342 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3344 case 0x37: /* FBGT */
3345 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3349 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3353 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3357 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3361 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3365 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3369 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3373 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3377 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3380 ret
= gen_invalid(ctx
);
3387 static inline void gen_intermediate_code_internal(AlphaCPU
*cpu
,
3388 TranslationBlock
*tb
,
3391 CPUState
*cs
= CPU(cpu
);
3392 CPUAlphaState
*env
= &cpu
->env
;
3393 DisasContext ctx
, *ctxp
= &ctx
;
3394 target_ulong pc_start
;
3396 uint16_t *gen_opc_end
;
3404 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
3408 ctx
.mem_idx
= cpu_mmu_index(env
);
3409 ctx
.implver
= env
->implver
;
3410 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
3412 /* ??? Every TB begins with unset rounding mode, to be initialized on
3413 the first fp insn of the TB. Alternately we could define a proper
3414 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3415 to reset the FP_STATUS to that default at the end of any TB that
3416 changes the default. We could even (gasp) dynamiclly figure out
3417 what default would be most efficient given the running program. */
3419 /* Similarly for flush-to-zero. */
3423 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3425 max_insns
= CF_COUNT_MASK
;
3429 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3430 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3431 if (bp
->pc
== ctx
.pc
) {
3432 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3438 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
3442 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3444 tcg_ctx
.gen_opc_pc
[lj
] = ctx
.pc
;
3445 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
3446 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
3448 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3450 insn
= cpu_ldl_code(env
, ctx
.pc
);
3453 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
3454 tcg_gen_debug_insn_start(ctx
.pc
);
3458 ret
= translate_one(ctxp
, insn
);
3460 /* If we reach a page boundary, are single stepping,
3461 or exhaust instruction count, stop generation. */
3463 && ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0
3464 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
3465 || num_insns
>= max_insns
3467 || ctx
.singlestep_enabled
)) {
3468 ret
= EXIT_PC_STALE
;
3470 } while (ret
== NO_EXIT
);
3472 if (tb
->cflags
& CF_LAST_IO
) {
3481 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3483 case EXIT_PC_UPDATED
:
3484 if (ctx
.singlestep_enabled
) {
3485 gen_excp_1(EXCP_DEBUG
, 0);
3494 gen_tb_end(tb
, num_insns
);
3495 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
3497 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
3500 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3502 tb
->size
= ctx
.pc
- pc_start
;
3503 tb
->icount
= num_insns
;
3507 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3508 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3509 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 1);
3515 void gen_intermediate_code (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3517 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, false);
3520 void gen_intermediate_code_pc (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3522 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, true);
3525 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
, int pc_pos
)
3527 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];