2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "host-utils.h"
28 #include "qemu-common.h"
34 #undef ALPHA_DEBUG_DISAS
35 #define CONFIG_SOFTFLOAT_INLINE
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 # define LOG_DISAS(...) do { } while (0)
43 typedef struct DisasContext DisasContext
;
45 struct TranslationBlock
*tb
;
50 /* Current rounding mode for this TB. */
52 /* Current flush-to-zero setting for this TB. */
56 /* Return values from translate_one, indicating the state of the TB.
57 Note that zero indicates that we are not exiting the TB. */
62 /* We have emitted one or more goto_tb. No fixup required. */
65 /* We are not using a goto_tb (for whatever reason), but have updated
66 the PC (for whatever reason), so there's no need to do it again on
70 /* We are exiting the TB, but have neither emitted a goto_tb, nor
71 updated the PC for the next instruction to be executed. */
74 /* We are ending the TB with a noreturn function call, e.g. longjmp.
75 No following code will be executed. */
79 /* global register indexes */
80 static TCGv_ptr cpu_env
;
81 static TCGv cpu_ir
[31];
82 static TCGv cpu_fir
[31];
84 static TCGv cpu_lock_addr
;
85 static TCGv cpu_lock_st_addr
;
86 static TCGv cpu_lock_value
;
87 static TCGv cpu_unique
;
88 #ifndef CONFIG_USER_ONLY
89 static TCGv cpu_sysval
;
94 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
96 #include "gen-icount.h"
98 static void alpha_translate_init(void)
102 static int done_init
= 0;
107 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
110 for (i
= 0; i
< 31; i
++) {
111 sprintf(p
, "ir%d", i
);
112 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
113 offsetof(CPUState
, ir
[i
]), p
);
114 p
+= (i
< 10) ? 4 : 5;
116 sprintf(p
, "fir%d", i
);
117 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
118 offsetof(CPUState
, fir
[i
]), p
);
119 p
+= (i
< 10) ? 5 : 6;
122 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
123 offsetof(CPUState
, pc
), "pc");
125 cpu_lock_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
126 offsetof(CPUState
, lock_addr
),
128 cpu_lock_st_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
129 offsetof(CPUState
, lock_st_addr
),
131 cpu_lock_value
= tcg_global_mem_new_i64(TCG_AREG0
,
132 offsetof(CPUState
, lock_value
),
135 cpu_unique
= tcg_global_mem_new_i64(TCG_AREG0
,
136 offsetof(CPUState
, unique
), "unique");
137 #ifndef CONFIG_USER_ONLY
138 cpu_sysval
= tcg_global_mem_new_i64(TCG_AREG0
,
139 offsetof(CPUState
, sysval
), "sysval");
140 cpu_usp
= tcg_global_mem_new_i64(TCG_AREG0
,
141 offsetof(CPUState
, usp
), "usp");
144 /* register helpers */
151 static void gen_excp_1(int exception
, int error_code
)
155 tmp1
= tcg_const_i32(exception
);
156 tmp2
= tcg_const_i32(error_code
);
157 gen_helper_excp(tmp1
, tmp2
);
158 tcg_temp_free_i32(tmp2
);
159 tcg_temp_free_i32(tmp1
);
162 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
164 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
165 gen_excp_1(exception
, error_code
);
166 return EXIT_NORETURN
;
169 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
171 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
174 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
176 TCGv tmp
= tcg_temp_new();
177 TCGv_i32 tmp32
= tcg_temp_new_i32();
178 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
179 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
180 gen_helper_memory_to_f(t0
, tmp32
);
181 tcg_temp_free_i32(tmp32
);
185 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
187 TCGv tmp
= tcg_temp_new();
188 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
189 gen_helper_memory_to_g(t0
, tmp
);
193 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
195 TCGv tmp
= tcg_temp_new();
196 TCGv_i32 tmp32
= tcg_temp_new_i32();
197 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
198 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
199 gen_helper_memory_to_s(t0
, tmp32
);
200 tcg_temp_free_i32(tmp32
);
204 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
206 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
207 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
208 tcg_gen_mov_i64(cpu_lock_value
, t0
);
211 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
213 tcg_gen_qemu_ld64(t0
, t1
, flags
);
214 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
215 tcg_gen_mov_i64(cpu_lock_value
, t0
);
218 static inline void gen_load_mem(DisasContext
*ctx
,
219 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
221 int ra
, int rb
, int32_t disp16
, int fp
,
226 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
227 prefetches, which we can treat as nops. No worries about
228 missed exceptions here. */
229 if (unlikely(ra
== 31)) {
233 addr
= tcg_temp_new();
235 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
237 tcg_gen_andi_i64(addr
, addr
, ~0x7);
243 tcg_gen_movi_i64(addr
, disp16
);
246 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
247 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
252 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
254 TCGv_i32 tmp32
= tcg_temp_new_i32();
255 TCGv tmp
= tcg_temp_new();
256 gen_helper_f_to_memory(tmp32
, t0
);
257 tcg_gen_extu_i32_i64(tmp
, tmp32
);
258 tcg_gen_qemu_st32(tmp
, t1
, flags
);
260 tcg_temp_free_i32(tmp32
);
263 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
265 TCGv tmp
= tcg_temp_new();
266 gen_helper_g_to_memory(tmp
, t0
);
267 tcg_gen_qemu_st64(tmp
, t1
, flags
);
271 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
273 TCGv_i32 tmp32
= tcg_temp_new_i32();
274 TCGv tmp
= tcg_temp_new();
275 gen_helper_s_to_memory(tmp32
, t0
);
276 tcg_gen_extu_i32_i64(tmp
, tmp32
);
277 tcg_gen_qemu_st32(tmp
, t1
, flags
);
279 tcg_temp_free_i32(tmp32
);
282 static inline void gen_store_mem(DisasContext
*ctx
,
283 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
285 int ra
, int rb
, int32_t disp16
, int fp
,
290 addr
= tcg_temp_new();
292 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
294 tcg_gen_andi_i64(addr
, addr
, ~0x7);
300 tcg_gen_movi_i64(addr
, disp16
);
304 va
= tcg_const_i64(0);
306 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
308 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
316 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
317 int32_t disp16
, int quad
)
322 /* ??? Don't bother storing anything. The user can't tell
323 the difference, since the zero register always reads zero. */
327 #if defined(CONFIG_USER_ONLY)
328 addr
= cpu_lock_st_addr
;
330 addr
= tcg_temp_local_new();
334 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
336 tcg_gen_movi_i64(addr
, disp16
);
339 #if defined(CONFIG_USER_ONLY)
340 /* ??? This is handled via a complicated version of compare-and-swap
341 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
342 in TCG so that this isn't necessary. */
343 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
345 /* ??? In system mode we are never multi-threaded, so CAS can be
346 implemented via a non-atomic load-compare-store sequence. */
348 int lab_fail
, lab_done
;
351 lab_fail
= gen_new_label();
352 lab_done
= gen_new_label();
353 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
355 val
= tcg_temp_new();
357 tcg_gen_qemu_ld64(val
, addr
, ctx
->mem_idx
);
359 tcg_gen_qemu_ld32s(val
, addr
, ctx
->mem_idx
);
361 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
364 tcg_gen_qemu_st64(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
366 tcg_gen_qemu_st32(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
368 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
369 tcg_gen_br(lab_done
);
371 gen_set_label(lab_fail
);
372 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
374 gen_set_label(lab_done
);
375 tcg_gen_movi_i64(cpu_lock_addr
, -1);
383 static int use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
385 /* Check for the dest on the same page as the start of the TB. We
386 also want to suppress goto_tb in the case of single-steping and IO. */
387 return (((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0
388 && !ctx
->env
->singlestep_enabled
389 && !(ctx
->tb
->cflags
& CF_LAST_IO
));
392 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
394 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
397 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
400 /* Notice branch-to-next; used to initialize RA with the PC. */
403 } else if (use_goto_tb(ctx
, dest
)) {
405 tcg_gen_movi_i64(cpu_pc
, dest
);
406 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
409 tcg_gen_movi_i64(cpu_pc
, dest
);
410 return EXIT_PC_UPDATED
;
414 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
415 TCGv cmp
, int32_t disp
)
417 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
418 int lab_true
= gen_new_label();
420 if (use_goto_tb(ctx
, dest
)) {
421 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
424 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
425 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
427 gen_set_label(lab_true
);
429 tcg_gen_movi_i64(cpu_pc
, dest
);
430 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
+ 1);
434 int lab_over
= gen_new_label();
436 /* ??? Consider using either
439 movcond pc, cond, 0, tmp, pc
446 The current diamond subgraph surely isn't efficient. */
448 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
449 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
450 tcg_gen_br(lab_over
);
451 gen_set_label(lab_true
);
452 tcg_gen_movi_i64(cpu_pc
, dest
);
453 gen_set_label(lab_over
);
455 return EXIT_PC_UPDATED
;
459 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
460 int32_t disp
, int mask
)
464 if (unlikely(ra
== 31)) {
465 cmp_tmp
= tcg_const_i64(0);
467 cmp_tmp
= tcg_temp_new();
469 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
471 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
475 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
478 /* Fold -0.0 for comparison with COND. */
480 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
482 uint64_t mzero
= 1ull << 63;
487 /* For <= or >, the -0.0 value directly compares the way we want. */
488 tcg_gen_mov_i64(dest
, src
);
493 /* For == or !=, we can simply mask off the sign bit and compare. */
494 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
499 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
500 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
501 tcg_gen_neg_i64(dest
, dest
);
502 tcg_gen_and_i64(dest
, dest
, src
);
510 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
515 if (unlikely(ra
== 31)) {
516 /* Very uncommon case, but easier to optimize it to an integer
517 comparison than continuing with the floating point comparison. */
518 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
521 cmp_tmp
= tcg_temp_new();
522 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
523 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
526 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
527 int islit
, uint8_t lit
, int mask
)
529 TCGCond inv_cond
= tcg_invert_cond(cond
);
532 if (unlikely(rc
== 31))
535 l1
= gen_new_label();
539 TCGv tmp
= tcg_temp_new();
540 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
541 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
544 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
546 /* Very uncommon case - Do not bother to optimize. */
547 TCGv tmp
= tcg_const_i64(0);
548 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
553 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
555 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
559 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
564 if (unlikely(rc
== 31)) {
568 cmp_tmp
= tcg_temp_new();
569 if (unlikely(ra
== 31)) {
570 tcg_gen_movi_i64(cmp_tmp
, 0);
572 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
575 l1
= gen_new_label();
576 tcg_gen_brcondi_i64(tcg_invert_cond(cond
), cmp_tmp
, 0, l1
);
577 tcg_temp_free(cmp_tmp
);
580 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[rb
]);
582 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
586 #define QUAL_RM_N 0x080 /* Round mode nearest even */
587 #define QUAL_RM_C 0x000 /* Round mode chopped */
588 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
589 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
590 #define QUAL_RM_MASK 0x0c0
592 #define QUAL_U 0x100 /* Underflow enable (fp output) */
593 #define QUAL_V 0x100 /* Overflow enable (int output) */
594 #define QUAL_S 0x400 /* Software completion enable */
595 #define QUAL_I 0x200 /* Inexact detection enable */
597 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
601 fn11
&= QUAL_RM_MASK
;
602 if (fn11
== ctx
->tb_rm
) {
607 tmp
= tcg_temp_new_i32();
610 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
613 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
616 tcg_gen_movi_i32(tmp
, float_round_down
);
619 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_dyn_round
));
623 #if defined(CONFIG_SOFTFLOAT_INLINE)
624 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
625 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
626 sets the one field. */
627 tcg_gen_st8_i32(tmp
, cpu_env
,
628 offsetof(CPUState
, fp_status
.float_rounding_mode
));
630 gen_helper_setroundmode(tmp
);
633 tcg_temp_free_i32(tmp
);
636 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
641 if (fn11
== ctx
->tb_ftz
) {
646 tmp
= tcg_temp_new_i32();
648 /* Underflow is enabled, use the FPCR setting. */
649 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_flush_to_zero
));
651 /* Underflow is disabled, force flush-to-zero. */
652 tcg_gen_movi_i32(tmp
, 1);
655 #if defined(CONFIG_SOFTFLOAT_INLINE)
656 tcg_gen_st8_i32(tmp
, cpu_env
,
657 offsetof(CPUState
, fp_status
.flush_to_zero
));
659 gen_helper_setflushzero(tmp
);
662 tcg_temp_free_i32(tmp
);
665 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
667 TCGv val
= tcg_temp_new();
669 tcg_gen_movi_i64(val
, 0);
670 } else if (fn11
& QUAL_S
) {
671 gen_helper_ieee_input_s(val
, cpu_fir
[reg
]);
673 gen_helper_ieee_input_cmp(val
, cpu_fir
[reg
]);
675 gen_helper_ieee_input(val
, cpu_fir
[reg
]);
680 static void gen_fp_exc_clear(void)
682 #if defined(CONFIG_SOFTFLOAT_INLINE)
683 TCGv_i32 zero
= tcg_const_i32(0);
684 tcg_gen_st8_i32(zero
, cpu_env
,
685 offsetof(CPUState
, fp_status
.float_exception_flags
));
686 tcg_temp_free_i32(zero
);
688 gen_helper_fp_exc_clear();
692 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
694 /* ??? We ought to be able to do something with imprecise exceptions.
695 E.g. notice we're still in the trap shadow of something within the
696 TB and do not generate the code to signal the exception; end the TB
697 when an exception is forced to arrive, either by consumption of a
698 register value or TRAPB or EXCB. */
699 TCGv_i32 exc
= tcg_temp_new_i32();
702 #if defined(CONFIG_SOFTFLOAT_INLINE)
703 tcg_gen_ld8u_i32(exc
, cpu_env
,
704 offsetof(CPUState
, fp_status
.float_exception_flags
));
706 gen_helper_fp_exc_get(exc
);
710 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
713 /* ??? Pass in the regno of the destination so that the helper can
714 set EXC_MASK, which contains a bitmask of destination registers
715 that have caused arithmetic traps. A simple userspace emulation
716 does not require this. We do need it for a guest kernel's entArith,
717 or if we were to do something clever with imprecise exceptions. */
718 reg
= tcg_const_i32(rc
+ 32);
721 gen_helper_fp_exc_raise_s(exc
, reg
);
723 gen_helper_fp_exc_raise(exc
, reg
);
726 tcg_temp_free_i32(reg
);
727 tcg_temp_free_i32(exc
);
730 static inline void gen_fp_exc_raise(int rc
, int fn11
)
732 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
735 static void gen_fcvtlq(int rb
, int rc
)
737 if (unlikely(rc
== 31)) {
740 if (unlikely(rb
== 31)) {
741 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
743 TCGv tmp
= tcg_temp_new();
745 /* The arithmetic right shift here, plus the sign-extended mask below
746 yields a sign-extended result without an explicit ext32s_i64. */
747 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
748 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
749 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
750 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
751 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
757 static void gen_fcvtql(int rb
, int rc
)
759 if (unlikely(rc
== 31)) {
762 if (unlikely(rb
== 31)) {
763 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
765 TCGv tmp
= tcg_temp_new();
767 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
768 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
769 tcg_gen_shli_i64(tmp
, tmp
, 32);
770 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
771 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
777 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
780 int lab
= gen_new_label();
781 TCGv tmp
= tcg_temp_new();
783 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
784 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
785 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
792 #define FARITH2(name) \
793 static inline void glue(gen_f, name)(int rb, int rc) \
795 if (unlikely(rc == 31)) { \
799 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
801 TCGv tmp = tcg_const_i64(0); \
802 gen_helper_ ## name (cpu_fir[rc], tmp); \
803 tcg_temp_free(tmp); \
807 /* ??? VAX instruction qualifiers ignored. */
815 static void gen_ieee_arith2(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
816 int rb
, int rc
, int fn11
)
820 /* ??? This is wrong: the instruction is not a nop, it still may
822 if (unlikely(rc
== 31)) {
826 gen_qual_roundmode(ctx
, fn11
);
827 gen_qual_flushzero(ctx
, fn11
);
830 vb
= gen_ieee_input(rb
, fn11
, 0);
831 helper(cpu_fir
[rc
], vb
);
834 gen_fp_exc_raise(rc
, fn11
);
837 #define IEEE_ARITH2(name) \
838 static inline void glue(gen_f, name)(DisasContext *ctx, \
839 int rb, int rc, int fn11) \
841 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
848 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
853 /* ??? This is wrong: the instruction is not a nop, it still may
855 if (unlikely(rc
== 31)) {
859 /* No need to set flushzero, since we have an integer output. */
861 vb
= gen_ieee_input(rb
, fn11
, 0);
863 /* Almost all integer conversions use cropped rounding, and most
864 also do not have integer overflow enabled. Special case that. */
867 gen_helper_cvttq_c(cpu_fir
[rc
], vb
);
869 case QUAL_V
| QUAL_RM_C
:
870 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
871 ignore
= float_flag_inexact
;
873 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
874 gen_helper_cvttq_svic(cpu_fir
[rc
], vb
);
877 gen_qual_roundmode(ctx
, fn11
);
878 gen_helper_cvttq(cpu_fir
[rc
], vb
);
879 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
880 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
885 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
888 static void gen_ieee_intcvt(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
889 int rb
, int rc
, int fn11
)
893 /* ??? This is wrong: the instruction is not a nop, it still may
895 if (unlikely(rc
== 31)) {
899 gen_qual_roundmode(ctx
, fn11
);
902 vb
= tcg_const_i64(0);
907 /* The only exception that can be raised by integer conversion
908 is inexact. Thus we only need to worry about exceptions when
909 inexact handling is requested. */
912 helper(cpu_fir
[rc
], vb
);
913 gen_fp_exc_raise(rc
, fn11
);
915 helper(cpu_fir
[rc
], vb
);
923 #define IEEE_INTCVT(name) \
924 static inline void glue(gen_f, name)(DisasContext *ctx, \
925 int rb, int rc, int fn11) \
927 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
932 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
937 if (unlikely(rc
== 31)) {
941 vmask
= tcg_const_i64(mask
);
951 va
= tcg_temp_new_i64();
952 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
954 tcg_gen_andc_i64(va
, vmask
, va
);
956 tcg_gen_and_i64(va
, va
, vmask
);
964 vb
= tcg_temp_new_i64();
965 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
968 switch (za
<< 1 | zb
) {
970 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
973 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
976 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
979 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
983 tcg_temp_free(vmask
);
992 static inline void gen_fcpys(int ra
, int rb
, int rc
)
994 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
997 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
999 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
1002 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
1004 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
1007 #define FARITH3(name) \
1008 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1012 if (unlikely(rc == 31)) { \
1016 va = tcg_const_i64(0); \
1021 vb = tcg_const_i64(0); \
1026 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1029 tcg_temp_free(va); \
1032 tcg_temp_free(vb); \
1036 /* ??? VAX instruction qualifiers ignored. */
1049 static void gen_ieee_arith3(DisasContext
*ctx
,
1050 void (*helper
)(TCGv
, TCGv
, TCGv
),
1051 int ra
, int rb
, int rc
, int fn11
)
1055 /* ??? This is wrong: the instruction is not a nop, it still may
1056 raise exceptions. */
1057 if (unlikely(rc
== 31)) {
1061 gen_qual_roundmode(ctx
, fn11
);
1062 gen_qual_flushzero(ctx
, fn11
);
1065 va
= gen_ieee_input(ra
, fn11
, 0);
1066 vb
= gen_ieee_input(rb
, fn11
, 0);
1067 helper(cpu_fir
[rc
], va
, vb
);
1071 gen_fp_exc_raise(rc
, fn11
);
1074 #define IEEE_ARITH3(name) \
1075 static inline void glue(gen_f, name)(DisasContext *ctx, \
1076 int ra, int rb, int rc, int fn11) \
1078 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1089 static void gen_ieee_compare(DisasContext
*ctx
,
1090 void (*helper
)(TCGv
, TCGv
, TCGv
),
1091 int ra
, int rb
, int rc
, int fn11
)
1095 /* ??? This is wrong: the instruction is not a nop, it still may
1096 raise exceptions. */
1097 if (unlikely(rc
== 31)) {
1103 va
= gen_ieee_input(ra
, fn11
, 1);
1104 vb
= gen_ieee_input(rb
, fn11
, 1);
1105 helper(cpu_fir
[rc
], va
, vb
);
1109 gen_fp_exc_raise(rc
, fn11
);
1112 #define IEEE_CMP3(name) \
1113 static inline void glue(gen_f, name)(DisasContext *ctx, \
1114 int ra, int rb, int rc, int fn11) \
1116 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1123 static inline uint64_t zapnot_mask(uint8_t lit
)
1128 for (i
= 0; i
< 8; ++i
) {
1130 mask
|= 0xffull
<< (i
* 8);
1135 /* Implement zapnot with an immediate operand, which expands to some
1136 form of immediate AND. This is a basic building block in the
1137 definition of many of the other byte manipulation instructions. */
1138 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1142 tcg_gen_movi_i64(dest
, 0);
1145 tcg_gen_ext8u_i64(dest
, src
);
1148 tcg_gen_ext16u_i64(dest
, src
);
1151 tcg_gen_ext32u_i64(dest
, src
);
1154 tcg_gen_mov_i64(dest
, src
);
1157 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1162 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1164 if (unlikely(rc
== 31))
1166 else if (unlikely(ra
== 31))
1167 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1169 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1171 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1174 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1176 if (unlikely(rc
== 31))
1178 else if (unlikely(ra
== 31))
1179 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1181 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1183 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1187 /* EXTWH, EXTLH, EXTQH */
1188 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1189 uint8_t lit
, uint8_t byte_mask
)
1191 if (unlikely(rc
== 31))
1193 else if (unlikely(ra
== 31))
1194 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1197 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1198 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1200 TCGv tmp1
= tcg_temp_new();
1201 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1202 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1203 tcg_gen_neg_i64(tmp1
, tmp1
);
1204 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1205 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1206 tcg_temp_free(tmp1
);
1208 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1212 /* EXTBL, EXTWL, EXTLL, EXTQL */
1213 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1214 uint8_t lit
, uint8_t byte_mask
)
1216 if (unlikely(rc
== 31))
1218 else if (unlikely(ra
== 31))
1219 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1222 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1224 TCGv tmp
= tcg_temp_new();
1225 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1226 tcg_gen_shli_i64(tmp
, tmp
, 3);
1227 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1230 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1234 /* INSWH, INSLH, INSQH */
1235 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1236 uint8_t lit
, uint8_t byte_mask
)
1238 if (unlikely(rc
== 31))
1240 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1241 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1243 TCGv tmp
= tcg_temp_new();
1245 /* The instruction description has us left-shift the byte mask
1246 and extract bits <15:8> and apply that zap at the end. This
1247 is equivalent to simply performing the zap first and shifting
1249 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1252 /* Note that we have handled the lit==0 case above. */
1253 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1255 TCGv shift
= tcg_temp_new();
1257 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1258 Do this portably by splitting the shift into two parts:
1259 shift_count-1 and 1. Arrange for the -1 by using
1260 ones-complement instead of twos-complement in the negation:
1261 ~((B & 7) * 8) & 63. */
1263 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1264 tcg_gen_shli_i64(shift
, shift
, 3);
1265 tcg_gen_not_i64(shift
, shift
);
1266 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1268 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1269 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1270 tcg_temp_free(shift
);
1276 /* INSBL, INSWL, INSLL, INSQL */
1277 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1278 uint8_t lit
, uint8_t byte_mask
)
1280 if (unlikely(rc
== 31))
1282 else if (unlikely(ra
== 31))
1283 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1285 TCGv tmp
= tcg_temp_new();
1287 /* The instruction description has us left-shift the byte mask
1288 the same number of byte slots as the data and apply the zap
1289 at the end. This is equivalent to simply performing the zap
1290 first and shifting afterward. */
1291 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1294 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1296 TCGv shift
= tcg_temp_new();
1297 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1298 tcg_gen_shli_i64(shift
, shift
, 3);
1299 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1300 tcg_temp_free(shift
);
1306 /* MSKWH, MSKLH, MSKQH */
1307 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1308 uint8_t lit
, uint8_t byte_mask
)
1310 if (unlikely(rc
== 31))
1312 else if (unlikely(ra
== 31))
1313 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1315 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1317 TCGv shift
= tcg_temp_new();
1318 TCGv mask
= tcg_temp_new();
1320 /* The instruction description is as above, where the byte_mask
1321 is shifted left, and then we extract bits <15:8>. This can be
1322 emulated with a right-shift on the expanded byte mask. This
1323 requires extra care because for an input <2:0> == 0 we need a
1324 shift of 64 bits in order to generate a zero. This is done by
1325 splitting the shift into two parts, the variable shift - 1
1326 followed by a constant 1 shift. The code we expand below is
1327 equivalent to ~((B & 7) * 8) & 63. */
1329 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1330 tcg_gen_shli_i64(shift
, shift
, 3);
1331 tcg_gen_not_i64(shift
, shift
);
1332 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1333 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1334 tcg_gen_shr_i64(mask
, mask
, shift
);
1335 tcg_gen_shri_i64(mask
, mask
, 1);
1337 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1339 tcg_temp_free(mask
);
1340 tcg_temp_free(shift
);
1344 /* MSKBL, MSKWL, MSKLL, MSKQL */
1345 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1346 uint8_t lit
, uint8_t byte_mask
)
1348 if (unlikely(rc
== 31))
1350 else if (unlikely(ra
== 31))
1351 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1353 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1355 TCGv shift
= tcg_temp_new();
1356 TCGv mask
= tcg_temp_new();
1358 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1359 tcg_gen_shli_i64(shift
, shift
, 3);
1360 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1361 tcg_gen_shl_i64(mask
, mask
, shift
);
1363 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1365 tcg_temp_free(mask
);
1366 tcg_temp_free(shift
);
1370 /* Code to call arith3 helpers */
1371 #define ARITH3(name) \
1372 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1375 if (unlikely(rc == 31)) \
1380 TCGv tmp = tcg_const_i64(lit); \
1381 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1382 tcg_temp_free(tmp); \
1384 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1386 TCGv tmp1 = tcg_const_i64(0); \
1388 TCGv tmp2 = tcg_const_i64(lit); \
1389 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1390 tcg_temp_free(tmp2); \
1392 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1393 tcg_temp_free(tmp1); \
1414 #define MVIOP2(name) \
1415 static inline void glue(gen_, name)(int rb, int rc) \
1417 if (unlikely(rc == 31)) \
1419 if (unlikely(rb == 31)) \
1420 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1422 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1429 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1430 int islit
, uint8_t lit
)
1434 if (unlikely(rc
== 31)) {
1439 va
= tcg_const_i64(0);
1444 vb
= tcg_const_i64(lit
);
1449 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1459 static void gen_rx(int ra
, int set
)
1464 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUState
, intr_flag
));
1467 tmp
= tcg_const_i32(set
);
1468 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUState
, intr_flag
));
1469 tcg_temp_free_i32(tmp
);
1472 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1474 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1475 to internal cpu registers. */
1477 /* Unprivileged PAL call */
1478 if (palcode
>= 0x80 && palcode
< 0xC0) {
1482 /* No-op inside QEMU. */
1486 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_unique
);
1490 tcg_gen_mov_i64(cpu_unique
, cpu_ir
[IR_A0
]);
1493 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0xbf);
1498 #ifndef CONFIG_USER_ONLY
1499 /* Privileged PAL code */
1500 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1504 /* No-op inside QEMU. */
1508 /* No-op inside QEMU. */
1512 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
, offsetof(CPUState
, vptptr
));
1516 tcg_gen_mov_i64(cpu_sysval
, cpu_ir
[IR_A0
]);
1520 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_sysval
);
1527 /* Note that we already know we're in kernel mode, so we know
1528 that PS only contains the 3 IPL bits. */
1529 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUState
, ps
));
1531 /* But make sure and store only the 3 IPL bits from the user. */
1532 tmp
= tcg_temp_new();
1533 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1534 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUState
, ps
));
1541 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUState
, ps
));
1545 tcg_gen_mov_i64(cpu_usp
, cpu_ir
[IR_A0
]);
1549 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_usp
);
1553 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1554 offsetof(CPUState
, cpu_index
));
1558 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0x3f);
1564 return gen_invalid(ctx
);
1567 #ifndef CONFIG_USER_ONLY
1569 #define PR_BYTE 0x100000
1570 #define PR_LONG 0x200000
1572 static int cpu_pr_data(int pr
)
1575 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1576 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1577 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1578 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1579 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1580 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1581 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1582 case 7: return offsetof(CPUAlphaState
, palbr
);
1583 case 8: return offsetof(CPUAlphaState
, ptbr
);
1584 case 9: return offsetof(CPUAlphaState
, vptptr
);
1585 case 10: return offsetof(CPUAlphaState
, unique
);
1586 case 11: return offsetof(CPUAlphaState
, sysval
);
1587 case 12: return offsetof(CPUAlphaState
, usp
);
1590 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1592 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1597 static void gen_mfpr(int ra
, int regno
)
1599 int data
= cpu_pr_data(regno
);
1601 /* In our emulated PALcode, these processor registers have no
1602 side effects from reading. */
1607 /* The basic registers are data only, and unknown registers
1608 are read-zero, write-ignore. */
1610 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
1611 } else if (data
& PR_BYTE
) {
1612 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_BYTE
);
1613 } else if (data
& PR_LONG
) {
1614 tcg_gen_ld32s_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_LONG
);
1616 tcg_gen_ld_i64(cpu_ir
[ra
], cpu_env
, data
);
1620 static void gen_mtpr(int rb
, int regno
)
1625 tmp
= tcg_const_i64(0);
1630 /* These two register numbers perform a TLB cache flush. Thankfully we
1631 can only do this inside PALmode, which means that the current basic
1632 block cannot be affected by the change in mappings. */
1636 } else if (regno
== 254) {
1638 gen_helper_tbis(tmp
);
1640 /* The basic registers are data only, and unknown registers
1641 are read-zero, write-ignore. */
1642 int data
= cpu_pr_data(regno
);
1644 if (data
& PR_BYTE
) {
1645 tcg_gen_st8_i64(tmp
, cpu_env
, data
& ~PR_BYTE
);
1646 } else if (data
& PR_LONG
) {
1647 tcg_gen_st32_i64(tmp
, cpu_env
, data
& ~PR_LONG
);
1649 tcg_gen_st_i64(tmp
, cpu_env
, data
);
1658 #endif /* !USER_ONLY*/
1660 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1663 int32_t disp21
, disp16
;
1664 #ifndef CONFIG_USER_ONLY
1668 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, islit
, real_islit
;
1672 /* Decode all instruction fields */
1674 ra
= (insn
>> 21) & 0x1F;
1675 rb
= (insn
>> 16) & 0x1F;
1677 real_islit
= islit
= (insn
>> 12) & 1;
1678 if (rb
== 31 && !islit
) {
1682 lit
= (insn
>> 13) & 0xFF;
1683 palcode
= insn
& 0x03FFFFFF;
1684 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1685 disp16
= (int16_t)(insn
& 0x0000FFFF);
1686 #ifndef CONFIG_USER_ONLY
1687 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1689 fn11
= (insn
>> 5) & 0x000007FF;
1691 fn7
= (insn
>> 5) & 0x0000007F;
1692 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1693 opc
, ra
, rb
, rc
, disp16
);
1699 ret
= gen_call_pal(ctx
, palcode
);
1724 if (likely(ra
!= 31)) {
1726 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1728 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1733 if (likely(ra
!= 31)) {
1735 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1737 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1742 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1743 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1749 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1753 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1754 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1760 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1764 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1768 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1774 if (likely(rc
!= 31)) {
1777 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1778 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1780 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1781 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1785 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1787 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1793 if (likely(rc
!= 31)) {
1795 TCGv tmp
= tcg_temp_new();
1796 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1798 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1800 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1801 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1805 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1807 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1813 if (likely(rc
!= 31)) {
1816 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1818 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1819 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1822 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1824 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1825 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1831 if (likely(rc
!= 31)) {
1833 TCGv tmp
= tcg_temp_new();
1834 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1836 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1838 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1839 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1843 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1845 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1846 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1853 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1857 if (likely(rc
!= 31)) {
1859 TCGv tmp
= tcg_temp_new();
1860 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1862 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1864 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1865 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1869 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1871 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1877 if (likely(rc
!= 31)) {
1879 TCGv tmp
= tcg_temp_new();
1880 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1882 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1884 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1885 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1889 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1891 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1892 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1899 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1903 if (likely(rc
!= 31)) {
1906 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1908 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1911 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1913 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1919 if (likely(rc
!= 31)) {
1921 TCGv tmp
= tcg_temp_new();
1922 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1924 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1926 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1930 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1932 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1938 if (likely(rc
!= 31)) {
1941 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1943 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1946 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1948 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1954 if (likely(rc
!= 31)) {
1956 TCGv tmp
= tcg_temp_new();
1957 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1959 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1961 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1965 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1967 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1973 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
1977 if (likely(rc
!= 31)) {
1979 TCGv tmp
= tcg_temp_new();
1980 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1982 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1984 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1988 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1990 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1996 if (likely(rc
!= 31)) {
1998 TCGv tmp
= tcg_temp_new();
1999 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2001 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2003 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2007 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2009 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2015 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
2019 gen_addlv(ra
, rb
, rc
, islit
, lit
);
2023 gen_sublv(ra
, rb
, rc
, islit
, lit
);
2027 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
2031 gen_addqv(ra
, rb
, rc
, islit
, lit
);
2035 gen_subqv(ra
, rb
, rc
, islit
, lit
);
2039 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
2049 if (likely(rc
!= 31)) {
2051 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2053 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2055 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2060 if (likely(rc
!= 31)) {
2063 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2065 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2067 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2072 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
2076 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
2080 if (likely(rc
!= 31)) {
2083 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2085 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2088 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2090 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2096 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
2100 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
2104 if (likely(rc
!= 31)) {
2107 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2109 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2112 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2114 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2120 if (likely(rc
!= 31)) {
2123 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2125 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2128 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2130 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2136 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
2140 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
2144 if (likely(rc
!= 31)) {
2147 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2149 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2152 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2154 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2160 if (likely(rc
!= 31)) {
2161 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
2164 tcg_gen_movi_i64(cpu_ir
[rc
], lit
& ~amask
);
2166 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rb
], ~amask
);
2172 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
2176 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
2181 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
2191 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2195 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2199 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2203 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2207 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2211 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2215 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2219 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2223 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2227 gen_zap(ra
, rb
, rc
, islit
, lit
);
2231 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2235 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2239 if (likely(rc
!= 31)) {
2242 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2244 TCGv shift
= tcg_temp_new();
2245 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2246 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2247 tcg_temp_free(shift
);
2250 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2255 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2259 if (likely(rc
!= 31)) {
2262 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2264 TCGv shift
= tcg_temp_new();
2265 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2266 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2267 tcg_temp_free(shift
);
2270 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2275 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2279 if (likely(rc
!= 31)) {
2282 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2284 TCGv shift
= tcg_temp_new();
2285 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2286 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2287 tcg_temp_free(shift
);
2290 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2295 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2299 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2303 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2307 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2311 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2315 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2319 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2323 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2327 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2337 if (likely(rc
!= 31)) {
2339 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2342 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2344 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2345 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2351 if (likely(rc
!= 31)) {
2353 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2355 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2357 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2362 gen_umulh(ra
, rb
, rc
, islit
, lit
);
2366 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2370 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2377 switch (fpfn
) { /* fn11 & 0x3F */
2380 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2383 if (likely(rc
!= 31)) {
2385 TCGv_i32 tmp
= tcg_temp_new_i32();
2386 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2387 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2388 tcg_temp_free_i32(tmp
);
2390 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2395 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2402 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2403 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2409 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2412 if (likely(rc
!= 31)) {
2414 TCGv_i32 tmp
= tcg_temp_new_i32();
2415 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2416 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2417 tcg_temp_free_i32(tmp
);
2419 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2424 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2427 if (likely(rc
!= 31)) {
2429 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2431 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2436 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2443 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2444 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2453 /* VAX floating point */
2454 /* XXX: rounding mode and trap are ignored (!) */
2455 switch (fpfn
) { /* fn11 & 0x3F */
2458 gen_faddf(ra
, rb
, rc
);
2462 gen_fsubf(ra
, rb
, rc
);
2466 gen_fmulf(ra
, rb
, rc
);
2470 gen_fdivf(ra
, rb
, rc
);
2482 gen_faddg(ra
, rb
, rc
);
2486 gen_fsubg(ra
, rb
, rc
);
2490 gen_fmulg(ra
, rb
, rc
);
2494 gen_fdivg(ra
, rb
, rc
);
2498 gen_fcmpgeq(ra
, rb
, rc
);
2502 gen_fcmpglt(ra
, rb
, rc
);
2506 gen_fcmpgle(ra
, rb
, rc
);
2537 /* IEEE floating-point */
2538 switch (fpfn
) { /* fn11 & 0x3F */
2541 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2545 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2549 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2553 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2557 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2561 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2565 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2569 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2573 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2577 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2581 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2585 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2588 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2590 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2593 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2598 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2602 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2606 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2619 if (likely(rc
!= 31)) {
2623 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2625 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2628 gen_fcpys(ra
, rb
, rc
);
2634 gen_fcpysn(ra
, rb
, rc
);
2638 gen_fcpyse(ra
, rb
, rc
);
2642 if (likely(ra
!= 31))
2643 gen_helper_store_fpcr(cpu_fir
[ra
]);
2645 TCGv tmp
= tcg_const_i64(0);
2646 gen_helper_store_fpcr(tmp
);
2652 if (likely(ra
!= 31))
2653 gen_helper_load_fpcr(cpu_fir
[ra
]);
2657 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2661 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2665 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2669 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2673 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2677 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2687 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2688 /v doesn't do. The only thing I can think is that /sv is a
2689 valid instruction merely for completeness in the ISA. */
2690 gen_fcvtql_v(ctx
, rb
, rc
);
2697 switch ((uint16_t)disp16
) {
2725 gen_helper_load_pcc(cpu_ir
[ra
]);
2747 /* HW_MFPR (PALcode) */
2748 #ifndef CONFIG_USER_ONLY
2749 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2750 gen_mfpr(ra
, insn
& 0xffff);
2756 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2757 prediction stack action, which of course we don't implement. */
2759 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2761 tcg_gen_movi_i64(cpu_pc
, 0);
2764 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2766 ret
= EXIT_PC_UPDATED
;
2769 /* HW_LD (PALcode) */
2770 #ifndef CONFIG_USER_ONLY
2771 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2778 addr
= tcg_temp_new();
2780 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2782 tcg_gen_movi_i64(addr
, disp12
);
2783 switch ((insn
>> 12) & 0xF) {
2785 /* Longword physical access (hw_ldl/p) */
2786 gen_helper_ldl_phys(cpu_ir
[ra
], addr
);
2789 /* Quadword physical access (hw_ldq/p) */
2790 gen_helper_ldq_phys(cpu_ir
[ra
], addr
);
2793 /* Longword physical access with lock (hw_ldl_l/p) */
2794 gen_helper_ldl_l_phys(cpu_ir
[ra
], addr
);
2797 /* Quadword physical access with lock (hw_ldq_l/p) */
2798 gen_helper_ldq_l_phys(cpu_ir
[ra
], addr
);
2801 /* Longword virtual PTE fetch (hw_ldl/v) */
2804 /* Quadword virtual PTE fetch (hw_ldq/v) */
2808 /* Incpu_ir[ra]id */
2811 /* Incpu_ir[ra]id */
2814 /* Longword virtual access (hw_ldl) */
2817 /* Quadword virtual access (hw_ldq) */
2820 /* Longword virtual access with protection check (hw_ldl/w) */
2821 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2824 /* Quadword virtual access with protection check (hw_ldq/w) */
2825 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2828 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2831 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2834 /* Longword virtual access with alternate access mode and
2835 protection checks (hw_ldl/wa) */
2836 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2839 /* Quadword virtual access with alternate access mode and
2840 protection checks (hw_ldq/wa) */
2841 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2844 tcg_temp_free(addr
);
2853 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) == 0) {
2856 if (likely(rc
!= 31)) {
2858 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2860 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2865 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
2866 if (likely(rc
!= 31)) {
2868 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2870 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2878 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2879 if (likely(rc
!= 31)) {
2881 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2883 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2891 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2892 gen_perr(ra
, rb
, rc
, islit
, lit
);
2898 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2899 if (likely(rc
!= 31)) {
2901 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2903 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2911 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2912 if (likely(rc
!= 31)) {
2914 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2916 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
2924 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2925 if (real_islit
|| ra
!= 31) {
2934 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2935 if (real_islit
|| ra
!= 31) {
2944 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2945 if (real_islit
|| ra
!= 31) {
2954 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2955 if (real_islit
|| ra
!= 31) {
2964 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2965 gen_minsb8(ra
, rb
, rc
, islit
, lit
);
2971 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2972 gen_minsw4(ra
, rb
, rc
, islit
, lit
);
2978 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2979 gen_minub8(ra
, rb
, rc
, islit
, lit
);
2985 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2986 gen_minuw4(ra
, rb
, rc
, islit
, lit
);
2992 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2993 gen_maxub8(ra
, rb
, rc
, islit
, lit
);
2999 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3000 gen_maxuw4(ra
, rb
, rc
, islit
, lit
);
3006 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3007 gen_maxsb8(ra
, rb
, rc
, islit
, lit
);
3013 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3014 gen_maxsw4(ra
, rb
, rc
, islit
, lit
);
3020 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3023 if (likely(rc
!= 31)) {
3025 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
3027 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
3032 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3036 TCGv_i32 tmp1
= tcg_temp_new_i32();
3038 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
3040 TCGv tmp2
= tcg_const_i64(0);
3041 gen_helper_s_to_memory(tmp1
, tmp2
);
3042 tcg_temp_free(tmp2
);
3044 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
3045 tcg_temp_free_i32(tmp1
);
3053 /* HW_MTPR (PALcode) */
3054 #ifndef CONFIG_USER_ONLY
3055 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3056 gen_mtpr(rb
, insn
& 0xffff);
3062 /* HW_RET (PALcode) */
3063 #ifndef CONFIG_USER_ONLY
3064 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3066 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3067 address from EXC_ADDR. This turns out to be useful for our
3068 emulation PALcode, so continue to accept it. */
3069 TCGv tmp
= tcg_temp_new();
3070 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUState
, exc_addr
));
3071 gen_helper_hw_ret(tmp
);
3074 gen_helper_hw_ret(cpu_ir
[rb
]);
3076 ret
= EXIT_PC_UPDATED
;
3082 /* HW_ST (PALcode) */
3083 #ifndef CONFIG_USER_ONLY
3084 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3086 addr
= tcg_temp_new();
3088 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
3090 tcg_gen_movi_i64(addr
, disp12
);
3094 val
= tcg_temp_new();
3095 tcg_gen_movi_i64(val
, 0);
3097 switch ((insn
>> 12) & 0xF) {
3099 /* Longword physical access */
3100 gen_helper_stl_phys(addr
, val
);
3103 /* Quadword physical access */
3104 gen_helper_stq_phys(addr
, val
);
3107 /* Longword physical access with lock */
3108 gen_helper_stl_c_phys(val
, addr
, val
);
3111 /* Quadword physical access with lock */
3112 gen_helper_stq_c_phys(val
, addr
, val
);
3115 /* Longword virtual access */
3118 /* Quadword virtual access */
3139 /* Longword virtual access with alternate access mode */
3142 /* Quadword virtual access with alternate access mode */
3153 tcg_temp_free(addr
);
3160 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
3164 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
3168 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
3172 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
3176 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
3180 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
3184 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
3188 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
3192 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
3196 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
3200 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3204 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3208 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
3212 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
3216 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
3220 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
3224 ret
= gen_bdirect(ctx
, ra
, disp21
);
3226 case 0x31: /* FBEQ */
3227 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3229 case 0x32: /* FBLT */
3230 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3232 case 0x33: /* FBLE */
3233 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3237 ret
= gen_bdirect(ctx
, ra
, disp21
);
3239 case 0x35: /* FBNE */
3240 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3242 case 0x36: /* FBGE */
3243 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3245 case 0x37: /* FBGT */
3246 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3250 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3254 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3258 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3262 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3266 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3270 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3274 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3278 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3281 ret
= gen_invalid(ctx
);
3288 static inline void gen_intermediate_code_internal(CPUState
*env
,
3289 TranslationBlock
*tb
,
3292 DisasContext ctx
, *ctxp
= &ctx
;
3293 target_ulong pc_start
;
3295 uint16_t *gen_opc_end
;
3303 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
3308 ctx
.mem_idx
= cpu_mmu_index(env
);
3310 /* ??? Every TB begins with unset rounding mode, to be initialized on
3311 the first fp insn of the TB. Alternately we could define a proper
3312 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3313 to reset the FP_STATUS to that default at the end of any TB that
3314 changes the default. We could even (gasp) dynamiclly figure out
3315 what default would be most efficient given the running program. */
3317 /* Similarly for flush-to-zero. */
3321 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3323 max_insns
= CF_COUNT_MASK
;
3327 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3328 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3329 if (bp
->pc
== ctx
.pc
) {
3330 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3336 j
= gen_opc_ptr
- gen_opc_buf
;
3340 gen_opc_instr_start
[lj
++] = 0;
3342 gen_opc_pc
[lj
] = ctx
.pc
;
3343 gen_opc_instr_start
[lj
] = 1;
3344 gen_opc_icount
[lj
] = num_insns
;
3346 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3348 insn
= ldl_code(ctx
.pc
);
3351 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
3352 tcg_gen_debug_insn_start(ctx
.pc
);
3356 ret
= translate_one(ctxp
, insn
);
3358 /* If we reach a page boundary, are single stepping,
3359 or exhaust instruction count, stop generation. */
3361 && ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0
3362 || gen_opc_ptr
>= gen_opc_end
3363 || num_insns
>= max_insns
3365 || env
->singlestep_enabled
)) {
3366 ret
= EXIT_PC_STALE
;
3368 } while (ret
== NO_EXIT
);
3370 if (tb
->cflags
& CF_LAST_IO
) {
3379 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3381 case EXIT_PC_UPDATED
:
3382 if (env
->singlestep_enabled
) {
3383 gen_excp_1(EXCP_DEBUG
, 0);
3392 gen_icount_end(tb
, num_insns
);
3393 *gen_opc_ptr
= INDEX_op_end
;
3395 j
= gen_opc_ptr
- gen_opc_buf
;
3398 gen_opc_instr_start
[lj
++] = 0;
3400 tb
->size
= ctx
.pc
- pc_start
;
3401 tb
->icount
= num_insns
;
3405 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3406 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3407 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
3413 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
3415 gen_intermediate_code_internal(env
, tb
, 0);
3418 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
3420 gen_intermediate_code_internal(env
, tb
, 1);
3428 static const struct cpu_def_t cpu_defs
[] = {
3429 { "ev4", IMPLVER_2106x
, 0 },
3430 { "ev5", IMPLVER_21164
, 0 },
3431 { "ev56", IMPLVER_21164
, AMASK_BWX
},
3432 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3433 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3434 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3435 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3436 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3437 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3438 { "21064", IMPLVER_2106x
, 0 },
3439 { "21164", IMPLVER_21164
, 0 },
3440 { "21164a", IMPLVER_21164
, AMASK_BWX
},
3441 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3442 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3443 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3444 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
3447 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
3450 int implver
, amask
, i
, max
;
3452 env
= qemu_mallocz(sizeof(CPUAlphaState
));
3454 alpha_translate_init();
3457 /* Default to ev67; no reason not to emulate insns by default. */
3458 implver
= IMPLVER_21264
;
3459 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
3460 | AMASK_TRAP
| AMASK_PREFETCH
);
3462 max
= ARRAY_SIZE(cpu_defs
);
3463 for (i
= 0; i
< max
; i
++) {
3464 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
3465 implver
= cpu_defs
[i
].implver
;
3466 amask
= cpu_defs
[i
].amask
;
3470 env
->implver
= implver
;
3473 #if defined (CONFIG_USER_ONLY)
3474 env
->ps
= PS_USER_MODE
;
3475 cpu_alpha_store_fpcr(env
, (FPCR_INVD
| FPCR_DZED
| FPCR_OVFD
3476 | FPCR_UNFD
| FPCR_INED
| FPCR_DNOD
));
3478 env
->lock_addr
= -1;
3481 qemu_init_vcpu(env
);
3485 void restore_state_to_opc(CPUState
*env
, TranslationBlock
*tb
, int pc_pos
)
3487 env
->pc
= gen_opc_pc
[pc_pos
];