2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "host-utils.h"
29 #include "qemu-common.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
48 #if !defined (CONFIG_USER_ONLY)
54 /* Current rounding mode for this TB. */
56 /* Current flush-to-zero setting for this TB. */
60 /* global register indexes */
61 static TCGv_ptr cpu_env
;
62 static TCGv cpu_ir
[31];
63 static TCGv cpu_fir
[31];
66 #ifdef CONFIG_USER_ONLY
71 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
73 #include "gen-icount.h"
75 static void alpha_translate_init(void)
79 static int done_init
= 0;
84 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
87 for (i
= 0; i
< 31; i
++) {
88 sprintf(p
, "ir%d", i
);
89 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
90 offsetof(CPUState
, ir
[i
]), p
);
91 p
+= (i
< 10) ? 4 : 5;
93 sprintf(p
, "fir%d", i
);
94 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
95 offsetof(CPUState
, fir
[i
]), p
);
96 p
+= (i
< 10) ? 5 : 6;
99 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
100 offsetof(CPUState
, pc
), "pc");
102 cpu_lock
= tcg_global_mem_new_i64(TCG_AREG0
,
103 offsetof(CPUState
, lock
), "lock");
105 #ifdef CONFIG_USER_ONLY
106 cpu_uniq
= tcg_global_mem_new_i64(TCG_AREG0
,
107 offsetof(CPUState
, unique
), "uniq");
110 /* register helpers */
117 static inline void gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
121 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
122 tmp1
= tcg_const_i32(exception
);
123 tmp2
= tcg_const_i32(error_code
);
124 gen_helper_excp(tmp1
, tmp2
);
125 tcg_temp_free_i32(tmp2
);
126 tcg_temp_free_i32(tmp1
);
129 static inline void gen_invalid(DisasContext
*ctx
)
131 gen_excp(ctx
, EXCP_OPCDEC
, 0);
134 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
136 TCGv tmp
= tcg_temp_new();
137 TCGv_i32 tmp32
= tcg_temp_new_i32();
138 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
139 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
140 gen_helper_memory_to_f(t0
, tmp32
);
141 tcg_temp_free_i32(tmp32
);
145 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
147 TCGv tmp
= tcg_temp_new();
148 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
149 gen_helper_memory_to_g(t0
, tmp
);
153 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
155 TCGv tmp
= tcg_temp_new();
156 TCGv_i32 tmp32
= tcg_temp_new_i32();
157 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
158 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
159 gen_helper_memory_to_s(t0
, tmp32
);
160 tcg_temp_free_i32(tmp32
);
164 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
166 tcg_gen_mov_i64(cpu_lock
, t1
);
167 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
170 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
172 tcg_gen_mov_i64(cpu_lock
, t1
);
173 tcg_gen_qemu_ld64(t0
, t1
, flags
);
176 static inline void gen_load_mem(DisasContext
*ctx
,
177 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
179 int ra
, int rb
, int32_t disp16
, int fp
,
184 if (unlikely(ra
== 31))
187 addr
= tcg_temp_new();
189 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
191 tcg_gen_andi_i64(addr
, addr
, ~0x7);
195 tcg_gen_movi_i64(addr
, disp16
);
198 tcg_gen_qemu_load(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
200 tcg_gen_qemu_load(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
204 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
206 TCGv_i32 tmp32
= tcg_temp_new_i32();
207 TCGv tmp
= tcg_temp_new();
208 gen_helper_f_to_memory(tmp32
, t0
);
209 tcg_gen_extu_i32_i64(tmp
, tmp32
);
210 tcg_gen_qemu_st32(tmp
, t1
, flags
);
212 tcg_temp_free_i32(tmp32
);
215 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
217 TCGv tmp
= tcg_temp_new();
218 gen_helper_g_to_memory(tmp
, t0
);
219 tcg_gen_qemu_st64(tmp
, t1
, flags
);
223 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
225 TCGv_i32 tmp32
= tcg_temp_new_i32();
226 TCGv tmp
= tcg_temp_new();
227 gen_helper_s_to_memory(tmp32
, t0
);
228 tcg_gen_extu_i32_i64(tmp
, tmp32
);
229 tcg_gen_qemu_st32(tmp
, t1
, flags
);
231 tcg_temp_free_i32(tmp32
);
234 static inline void gen_qemu_stl_c(TCGv t0
, TCGv t1
, int flags
)
238 l1
= gen_new_label();
239 l2
= gen_new_label();
240 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
241 tcg_gen_qemu_st32(t0
, t1
, flags
);
242 tcg_gen_movi_i64(t0
, 1);
245 tcg_gen_movi_i64(t0
, 0);
247 tcg_gen_movi_i64(cpu_lock
, -1);
250 static inline void gen_qemu_stq_c(TCGv t0
, TCGv t1
, int flags
)
254 l1
= gen_new_label();
255 l2
= gen_new_label();
256 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
257 tcg_gen_qemu_st64(t0
, t1
, flags
);
258 tcg_gen_movi_i64(t0
, 1);
261 tcg_gen_movi_i64(t0
, 0);
263 tcg_gen_movi_i64(cpu_lock
, -1);
266 static inline void gen_store_mem(DisasContext
*ctx
,
267 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
269 int ra
, int rb
, int32_t disp16
, int fp
,
270 int clear
, int local
)
274 addr
= tcg_temp_local_new();
276 addr
= tcg_temp_new();
278 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
280 tcg_gen_andi_i64(addr
, addr
, ~0x7);
284 tcg_gen_movi_i64(addr
, disp16
);
288 tcg_gen_qemu_store(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
290 tcg_gen_qemu_store(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
294 zero
= tcg_const_local_i64(0);
296 zero
= tcg_const_i64(0);
297 tcg_gen_qemu_store(zero
, addr
, ctx
->mem_idx
);
303 static void gen_bcond_pcload(DisasContext
*ctx
, int32_t disp
, int lab_true
)
305 int lab_over
= gen_new_label();
307 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
308 tcg_gen_br(lab_over
);
309 gen_set_label(lab_true
);
310 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp
<< 2));
311 gen_set_label(lab_over
);
314 static void gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
315 int32_t disp
, int mask
)
317 int lab_true
= gen_new_label();
319 if (likely(ra
!= 31)) {
321 TCGv tmp
= tcg_temp_new();
322 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
323 tcg_gen_brcondi_i64(cond
, tmp
, 0, lab_true
);
326 tcg_gen_brcondi_i64(cond
, cpu_ir
[ra
], 0, lab_true
);
329 /* Very uncommon case - Do not bother to optimize. */
330 TCGv tmp
= tcg_const_i64(0);
331 tcg_gen_brcondi_i64(cond
, tmp
, 0, lab_true
);
334 gen_bcond_pcload(ctx
, disp
, lab_true
);
337 /* Generate a forward TCG branch to LAB_TRUE if RA cmp 0.0.
338 This is complicated by the fact that -0.0 compares the same as +0.0. */
340 static void gen_fbcond_internal(TCGCond cond
, TCGv src
, int lab_true
)
343 uint64_t mzero
= 1ull << 63;
349 /* For <= or >, the -0.0 value directly compares the way we want. */
350 tcg_gen_brcondi_i64(cond
, src
, 0, lab_true
);
355 /* For == or !=, we can simply mask off the sign bit and compare. */
356 /* ??? Assume that the temporary is reclaimed at the branch. */
357 tmp
= tcg_temp_new();
358 tcg_gen_andi_i64(tmp
, src
, mzero
- 1);
359 tcg_gen_brcondi_i64(cond
, tmp
, 0, lab_true
);
363 /* For >=, emit two branches to the destination. */
364 tcg_gen_brcondi_i64(cond
, src
, 0, lab_true
);
365 tcg_gen_brcondi_i64(TCG_COND_EQ
, src
, mzero
, lab_true
);
369 /* For <, first filter out -0.0 to what will be the fallthru. */
370 lab_false
= gen_new_label();
371 tcg_gen_brcondi_i64(TCG_COND_EQ
, src
, mzero
, lab_false
);
372 tcg_gen_brcondi_i64(cond
, src
, 0, lab_true
);
373 gen_set_label(lab_false
);
381 static void gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
, int32_t disp
)
385 if (unlikely(ra
== 31)) {
386 /* Very uncommon case, but easier to optimize it to an integer
387 comparison than continuing with the floating point comparison. */
388 gen_bcond(ctx
, cond
, ra
, disp
, 0);
392 lab_true
= gen_new_label();
393 gen_fbcond_internal(cond
, cpu_fir
[ra
], lab_true
);
394 gen_bcond_pcload(ctx
, disp
, lab_true
);
397 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
398 int islit
, uint8_t lit
, int mask
)
400 TCGCond inv_cond
= tcg_invert_cond(cond
);
403 if (unlikely(rc
== 31))
406 l1
= gen_new_label();
410 TCGv tmp
= tcg_temp_new();
411 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
412 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
415 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
417 /* Very uncommon case - Do not bother to optimize. */
418 TCGv tmp
= tcg_const_i64(0);
419 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
424 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
426 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
430 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
432 TCGv va
= cpu_fir
[ra
];
435 if (unlikely(rc
== 31))
437 if (unlikely(ra
== 31)) {
438 /* ??? Assume that the temporary is reclaimed at the branch. */
439 va
= tcg_const_i64(0);
442 l1
= gen_new_label();
443 gen_fbcond_internal(tcg_invert_cond(cond
), va
, l1
);
446 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[rb
]);
448 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
452 #define QUAL_RM_N 0x080 /* Round mode nearest even */
453 #define QUAL_RM_C 0x000 /* Round mode chopped */
454 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
455 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
456 #define QUAL_RM_MASK 0x0c0
458 #define QUAL_U 0x100 /* Underflow enable (fp output) */
459 #define QUAL_V 0x100 /* Overflow enable (int output) */
460 #define QUAL_S 0x400 /* Software completion enable */
461 #define QUAL_I 0x200 /* Inexact detection enable */
463 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
467 fn11
&= QUAL_RM_MASK
;
468 if (fn11
== ctx
->tb_rm
) {
473 tmp
= tcg_temp_new_i32();
476 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
479 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
482 tcg_gen_movi_i32(tmp
, float_round_down
);
485 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_dyn_round
));
489 #if defined(CONFIG_SOFTFLOAT_INLINE)
490 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
491 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
492 sets the one field. */
493 tcg_gen_st8_i32(tmp
, cpu_env
,
494 offsetof(CPUState
, fp_status
.float_rounding_mode
));
496 gen_helper_setroundmode(tmp
);
499 tcg_temp_free_i32(tmp
);
502 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
507 if (fn11
== ctx
->tb_ftz
) {
512 tmp
= tcg_temp_new_i32();
514 /* Underflow is enabled, use the FPCR setting. */
515 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_flush_to_zero
));
517 /* Underflow is disabled, force flush-to-zero. */
518 tcg_gen_movi_i32(tmp
, 1);
521 #if defined(CONFIG_SOFTFLOAT_INLINE)
522 tcg_gen_st8_i32(tmp
, cpu_env
,
523 offsetof(CPUState
, fp_status
.flush_to_zero
));
525 gen_helper_setflushzero(tmp
);
528 tcg_temp_free_i32(tmp
);
531 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
533 TCGv val
= tcg_temp_new();
535 tcg_gen_movi_i64(val
, 0);
536 } else if (fn11
& QUAL_S
) {
537 gen_helper_ieee_input_s(val
, cpu_fir
[reg
]);
539 gen_helper_ieee_input_cmp(val
, cpu_fir
[reg
]);
541 gen_helper_ieee_input(val
, cpu_fir
[reg
]);
546 static void gen_fp_exc_clear(void)
548 #if defined(CONFIG_SOFTFLOAT_INLINE)
549 TCGv_i32 zero
= tcg_const_i32(0);
550 tcg_gen_st8_i32(zero
, cpu_env
,
551 offsetof(CPUState
, fp_status
.float_exception_flags
));
552 tcg_temp_free_i32(zero
);
554 gen_helper_fp_exc_clear();
558 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
560 /* ??? We ought to be able to do something with imprecise exceptions.
561 E.g. notice we're still in the trap shadow of something within the
562 TB and do not generate the code to signal the exception; end the TB
563 when an exception is forced to arrive, either by consumption of a
564 register value or TRAPB or EXCB. */
565 TCGv_i32 exc
= tcg_temp_new_i32();
568 #if defined(CONFIG_SOFTFLOAT_INLINE)
569 tcg_gen_ld8u_i32(exc
, cpu_env
,
570 offsetof(CPUState
, fp_status
.float_exception_flags
));
572 gen_helper_fp_exc_get(exc
);
576 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
579 /* ??? Pass in the regno of the destination so that the helper can
580 set EXC_MASK, which contains a bitmask of destination registers
581 that have caused arithmetic traps. A simple userspace emulation
582 does not require this. We do need it for a guest kernel's entArith,
583 or if we were to do something clever with imprecise exceptions. */
584 reg
= tcg_const_i32(rc
+ 32);
587 gen_helper_fp_exc_raise_s(exc
, reg
);
589 gen_helper_fp_exc_raise(exc
, reg
);
592 tcg_temp_free_i32(reg
);
593 tcg_temp_free_i32(exc
);
596 static inline void gen_fp_exc_raise(int rc
, int fn11
)
598 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
601 static void gen_fcvtql(int rb
, int rc
)
603 if (unlikely(rc
== 31)) {
606 if (unlikely(rb
== 31)) {
607 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
609 TCGv tmp
= tcg_temp_new();
611 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
612 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
613 tcg_gen_shli_i64(tmp
, tmp
, 32);
614 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
615 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
621 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
624 int lab
= gen_new_label();
625 TCGv tmp
= tcg_temp_new();
627 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
628 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
629 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
636 #define FARITH2(name) \
637 static inline void glue(gen_f, name)(int rb, int rc) \
639 if (unlikely(rc == 31)) { \
643 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
645 TCGv tmp = tcg_const_i64(0); \
646 gen_helper_ ## name (cpu_fir[rc], tmp); \
647 tcg_temp_free(tmp); \
652 /* ??? VAX instruction qualifiers ignored. */
660 static void gen_ieee_arith2(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
661 int rb
, int rc
, int fn11
)
665 /* ??? This is wrong: the instruction is not a nop, it still may
667 if (unlikely(rc
== 31)) {
671 gen_qual_roundmode(ctx
, fn11
);
672 gen_qual_flushzero(ctx
, fn11
);
675 vb
= gen_ieee_input(rb
, fn11
, 0);
676 helper(cpu_fir
[rc
], vb
);
679 gen_fp_exc_raise(rc
, fn11
);
682 #define IEEE_ARITH2(name) \
683 static inline void glue(gen_f, name)(DisasContext *ctx, \
684 int rb, int rc, int fn11) \
686 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
693 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
698 /* ??? This is wrong: the instruction is not a nop, it still may
700 if (unlikely(rc
== 31)) {
704 /* No need to set flushzero, since we have an integer output. */
706 vb
= gen_ieee_input(rb
, fn11
, 0);
708 /* Almost all integer conversions use cropped rounding, and most
709 also do not have integer overflow enabled. Special case that. */
712 gen_helper_cvttq_c(cpu_fir
[rc
], vb
);
714 case QUAL_V
| QUAL_RM_C
:
715 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
716 ignore
= float_flag_inexact
;
718 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
719 gen_helper_cvttq_svic(cpu_fir
[rc
], vb
);
722 gen_qual_roundmode(ctx
, fn11
);
723 gen_helper_cvttq(cpu_fir
[rc
], vb
);
724 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
725 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
730 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
733 static void gen_ieee_intcvt(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
734 int rb
, int rc
, int fn11
)
738 /* ??? This is wrong: the instruction is not a nop, it still may
740 if (unlikely(rc
== 31)) {
744 gen_qual_roundmode(ctx
, fn11
);
747 vb
= tcg_const_i64(0);
752 /* The only exception that can be raised by integer conversion
753 is inexact. Thus we only need to worry about exceptions when
754 inexact handling is requested. */
757 helper(cpu_fir
[rc
], vb
);
758 gen_fp_exc_raise(rc
, fn11
);
760 helper(cpu_fir
[rc
], vb
);
768 #define IEEE_INTCVT(name) \
769 static inline void glue(gen_f, name)(DisasContext *ctx, \
770 int rb, int rc, int fn11) \
772 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
777 #define FARITH3(name) \
778 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
782 if (unlikely(rc == 31)) { \
786 va = tcg_const_i64(0); \
791 vb = tcg_const_i64(0); \
796 gen_helper_ ## name (cpu_fir[rc], va, vb); \
805 /* ??? Ought to expand these inline; simple masking operations. */
810 /* ??? VAX instruction qualifiers ignored. */
823 static void gen_ieee_arith3(DisasContext
*ctx
,
824 void (*helper
)(TCGv
, TCGv
, TCGv
),
825 int ra
, int rb
, int rc
, int fn11
)
829 /* ??? This is wrong: the instruction is not a nop, it still may
831 if (unlikely(rc
== 31)) {
835 gen_qual_roundmode(ctx
, fn11
);
836 gen_qual_flushzero(ctx
, fn11
);
839 va
= gen_ieee_input(ra
, fn11
, 0);
840 vb
= gen_ieee_input(rb
, fn11
, 0);
841 helper(cpu_fir
[rc
], va
, vb
);
845 gen_fp_exc_raise(rc
, fn11
);
848 #define IEEE_ARITH3(name) \
849 static inline void glue(gen_f, name)(DisasContext *ctx, \
850 int ra, int rb, int rc, int fn11) \
852 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
863 static void gen_ieee_compare(DisasContext
*ctx
,
864 void (*helper
)(TCGv
, TCGv
, TCGv
),
865 int ra
, int rb
, int rc
, int fn11
)
869 /* ??? This is wrong: the instruction is not a nop, it still may
871 if (unlikely(rc
== 31)) {
877 va
= gen_ieee_input(ra
, fn11
, 1);
878 vb
= gen_ieee_input(rb
, fn11
, 1);
879 helper(cpu_fir
[rc
], va
, vb
);
883 gen_fp_exc_raise(rc
, fn11
);
886 #define IEEE_CMP3(name) \
887 static inline void glue(gen_f, name)(DisasContext *ctx, \
888 int ra, int rb, int rc, int fn11) \
890 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
897 static inline uint64_t zapnot_mask(uint8_t lit
)
902 for (i
= 0; i
< 8; ++i
) {
904 mask
|= 0xffull
<< (i
* 8);
909 /* Implement zapnot with an immediate operand, which expands to some
910 form of immediate AND. This is a basic building block in the
911 definition of many of the other byte manipulation instructions. */
912 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
916 tcg_gen_movi_i64(dest
, 0);
919 tcg_gen_ext8u_i64(dest
, src
);
922 tcg_gen_ext16u_i64(dest
, src
);
925 tcg_gen_ext32u_i64(dest
, src
);
928 tcg_gen_mov_i64(dest
, src
);
931 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
936 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
938 if (unlikely(rc
== 31))
940 else if (unlikely(ra
== 31))
941 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
943 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
945 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
948 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
950 if (unlikely(rc
== 31))
952 else if (unlikely(ra
== 31))
953 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
955 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
957 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
961 /* EXTWH, EXTLH, EXTQH */
962 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
963 uint8_t lit
, uint8_t byte_mask
)
965 if (unlikely(rc
== 31))
967 else if (unlikely(ra
== 31))
968 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
971 lit
= (64 - (lit
& 7) * 8) & 0x3f;
972 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
974 TCGv tmp1
= tcg_temp_new();
975 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
976 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
977 tcg_gen_neg_i64(tmp1
, tmp1
);
978 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
979 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
982 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
986 /* EXTBL, EXTWL, EXTLL, EXTQL */
987 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
988 uint8_t lit
, uint8_t byte_mask
)
990 if (unlikely(rc
== 31))
992 else if (unlikely(ra
== 31))
993 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
996 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
998 TCGv tmp
= tcg_temp_new();
999 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1000 tcg_gen_shli_i64(tmp
, tmp
, 3);
1001 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1004 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1008 /* INSWH, INSLH, INSQH */
1009 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1010 uint8_t lit
, uint8_t byte_mask
)
1012 if (unlikely(rc
== 31))
1014 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1015 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1017 TCGv tmp
= tcg_temp_new();
1019 /* The instruction description has us left-shift the byte mask
1020 and extract bits <15:8> and apply that zap at the end. This
1021 is equivalent to simply performing the zap first and shifting
1023 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1026 /* Note that we have handled the lit==0 case above. */
1027 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1029 TCGv shift
= tcg_temp_new();
1031 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1032 Do this portably by splitting the shift into two parts:
1033 shift_count-1 and 1. Arrange for the -1 by using
1034 ones-complement instead of twos-complement in the negation:
1035 ~((B & 7) * 8) & 63. */
1037 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1038 tcg_gen_shli_i64(shift
, shift
, 3);
1039 tcg_gen_not_i64(shift
, shift
);
1040 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1042 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1043 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1044 tcg_temp_free(shift
);
1050 /* INSBL, INSWL, INSLL, INSQL */
1051 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1052 uint8_t lit
, uint8_t byte_mask
)
1054 if (unlikely(rc
== 31))
1056 else if (unlikely(ra
== 31))
1057 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1059 TCGv tmp
= tcg_temp_new();
1061 /* The instruction description has us left-shift the byte mask
1062 the same number of byte slots as the data and apply the zap
1063 at the end. This is equivalent to simply performing the zap
1064 first and shifting afterward. */
1065 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1068 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1070 TCGv shift
= tcg_temp_new();
1071 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1072 tcg_gen_shli_i64(shift
, shift
, 3);
1073 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1074 tcg_temp_free(shift
);
1080 /* MSKWH, MSKLH, MSKQH */
1081 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1082 uint8_t lit
, uint8_t byte_mask
)
1084 if (unlikely(rc
== 31))
1086 else if (unlikely(ra
== 31))
1087 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1089 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1091 TCGv shift
= tcg_temp_new();
1092 TCGv mask
= tcg_temp_new();
1094 /* The instruction description is as above, where the byte_mask
1095 is shifted left, and then we extract bits <15:8>. This can be
1096 emulated with a right-shift on the expanded byte mask. This
1097 requires extra care because for an input <2:0> == 0 we need a
1098 shift of 64 bits in order to generate a zero. This is done by
1099 splitting the shift into two parts, the variable shift - 1
1100 followed by a constant 1 shift. The code we expand below is
1101 equivalent to ~((B & 7) * 8) & 63. */
1103 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1104 tcg_gen_shli_i64(shift
, shift
, 3);
1105 tcg_gen_not_i64(shift
, shift
);
1106 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1107 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1108 tcg_gen_shr_i64(mask
, mask
, shift
);
1109 tcg_gen_shri_i64(mask
, mask
, 1);
1111 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1113 tcg_temp_free(mask
);
1114 tcg_temp_free(shift
);
1118 /* MSKBL, MSKWL, MSKLL, MSKQL */
1119 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1120 uint8_t lit
, uint8_t byte_mask
)
1122 if (unlikely(rc
== 31))
1124 else if (unlikely(ra
== 31))
1125 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1127 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1129 TCGv shift
= tcg_temp_new();
1130 TCGv mask
= tcg_temp_new();
1132 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1133 tcg_gen_shli_i64(shift
, shift
, 3);
1134 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1135 tcg_gen_shl_i64(mask
, mask
, shift
);
1137 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1139 tcg_temp_free(mask
);
1140 tcg_temp_free(shift
);
1144 /* Code to call arith3 helpers */
1145 #define ARITH3(name) \
1146 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1149 if (unlikely(rc == 31)) \
1154 TCGv tmp = tcg_const_i64(lit); \
1155 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1156 tcg_temp_free(tmp); \
1158 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1160 TCGv tmp1 = tcg_const_i64(0); \
1162 TCGv tmp2 = tcg_const_i64(lit); \
1163 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1164 tcg_temp_free(tmp2); \
1166 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1167 tcg_temp_free(tmp1); \
1188 #define MVIOP2(name) \
1189 static inline void glue(gen_, name)(int rb, int rc) \
1191 if (unlikely(rc == 31)) \
1193 if (unlikely(rb == 31)) \
1194 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1196 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1203 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1204 int islit
, uint8_t lit
)
1208 if (unlikely(rc
== 31)) {
1213 va
= tcg_const_i64(0);
1218 vb
= tcg_const_i64(lit
);
1223 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1233 static inline int translate_one(DisasContext
*ctx
, uint32_t insn
)
1236 int32_t disp21
, disp16
, disp12
;
1237 uint16_t fn11
, fn16
;
1238 uint8_t opc
, ra
, rb
, rc
, sbz
, fpfn
, fn7
, fn2
, islit
, real_islit
;
1242 /* Decode all instruction fields */
1244 ra
= (insn
>> 21) & 0x1F;
1245 rb
= (insn
>> 16) & 0x1F;
1247 sbz
= (insn
>> 13) & 0x07;
1248 real_islit
= islit
= (insn
>> 12) & 1;
1249 if (rb
== 31 && !islit
) {
1253 lit
= (insn
>> 13) & 0xFF;
1254 palcode
= insn
& 0x03FFFFFF;
1255 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1256 disp16
= (int16_t)(insn
& 0x0000FFFF);
1257 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1258 fn16
= insn
& 0x0000FFFF;
1259 fn11
= (insn
>> 5) & 0x000007FF;
1261 fn7
= (insn
>> 5) & 0x0000007F;
1262 fn2
= (insn
>> 5) & 0x00000003;
1264 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1265 opc
, ra
, rb
, rc
, disp16
);
1270 #ifdef CONFIG_USER_ONLY
1271 if (palcode
== 0x9E) {
1273 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_uniq
);
1275 } else if (palcode
== 0x9F) {
1277 tcg_gen_mov_i64(cpu_uniq
, cpu_ir
[IR_A0
]);
1281 if (palcode
>= 0x80 && palcode
< 0xC0) {
1282 /* Unprivileged PAL call */
1283 gen_excp(ctx
, EXCP_CALL_PAL
+ ((palcode
& 0x3F) << 6), 0);
1287 #ifndef CONFIG_USER_ONLY
1288 if (palcode
< 0x40) {
1289 /* Privileged PAL code */
1290 if (ctx
->mem_idx
& 1)
1292 gen_excp(ctx
, EXCP_CALL_PALP
+ ((palcode
& 0x3F) << 6), 0);
1296 /* Invalid PAL call */
1321 if (likely(ra
!= 31)) {
1323 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1325 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1330 if (likely(ra
!= 31)) {
1332 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1334 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1339 if (!(ctx
->amask
& AMASK_BWX
))
1341 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1345 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1349 if (!(ctx
->amask
& AMASK_BWX
))
1351 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1355 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0, 0);
1359 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0, 0);
1363 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1, 0);
1369 if (likely(rc
!= 31)) {
1372 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1373 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1375 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1376 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1380 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1382 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1388 if (likely(rc
!= 31)) {
1390 TCGv tmp
= tcg_temp_new();
1391 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1393 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1395 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1396 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1400 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1402 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1408 if (likely(rc
!= 31)) {
1411 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1413 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1414 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1417 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1419 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1420 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1426 if (likely(rc
!= 31)) {
1428 TCGv tmp
= tcg_temp_new();
1429 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1431 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1433 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1434 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1438 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1440 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1441 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1448 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1452 if (likely(rc
!= 31)) {
1454 TCGv tmp
= tcg_temp_new();
1455 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1457 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1459 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1460 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1464 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1466 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1472 if (likely(rc
!= 31)) {
1474 TCGv tmp
= tcg_temp_new();
1475 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1477 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1479 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1480 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1484 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1486 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1487 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1494 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1498 if (likely(rc
!= 31)) {
1501 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1503 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1506 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1508 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1514 if (likely(rc
!= 31)) {
1516 TCGv tmp
= tcg_temp_new();
1517 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1519 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1521 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1525 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1527 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1533 if (likely(rc
!= 31)) {
1536 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1538 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1541 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1543 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1549 if (likely(rc
!= 31)) {
1551 TCGv tmp
= tcg_temp_new();
1552 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1554 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1556 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1560 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1562 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1568 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
1572 if (likely(rc
!= 31)) {
1574 TCGv tmp
= tcg_temp_new();
1575 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1577 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1579 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1583 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1585 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1591 if (likely(rc
!= 31)) {
1593 TCGv tmp
= tcg_temp_new();
1594 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1596 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1598 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1602 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1604 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1610 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
1614 gen_addlv(ra
, rb
, rc
, islit
, lit
);
1618 gen_sublv(ra
, rb
, rc
, islit
, lit
);
1622 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
1626 gen_addqv(ra
, rb
, rc
, islit
, lit
);
1630 gen_subqv(ra
, rb
, rc
, islit
, lit
);
1634 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
1644 if (likely(rc
!= 31)) {
1646 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1648 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1650 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1655 if (likely(rc
!= 31)) {
1658 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1660 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1662 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1667 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
1671 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
1675 if (likely(rc
!= 31)) {
1678 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1680 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1683 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1685 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1691 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
1695 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
1699 if (likely(rc
!= 31)) {
1702 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1704 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1707 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1709 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1715 if (likely(rc
!= 31)) {
1718 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1720 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1723 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1725 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1731 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
1735 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
1739 if (likely(rc
!= 31)) {
1742 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1744 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1747 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1749 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1755 if (likely(rc
!= 31)) {
1757 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1759 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1760 switch (ctx
->env
->implver
) {
1762 /* EV4, EV45, LCA, LCA45 & EV5 */
1767 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rc
],
1768 ~(uint64_t)ctx
->amask
);
1775 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
1779 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
1784 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
1794 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1798 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1802 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1806 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1810 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1814 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1818 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1822 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1826 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1830 gen_zap(ra
, rb
, rc
, islit
, lit
);
1834 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
1838 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1842 if (likely(rc
!= 31)) {
1845 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1847 TCGv shift
= tcg_temp_new();
1848 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1849 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1850 tcg_temp_free(shift
);
1853 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1858 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1862 if (likely(rc
!= 31)) {
1865 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1867 TCGv shift
= tcg_temp_new();
1868 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1869 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1870 tcg_temp_free(shift
);
1873 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1878 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1882 if (likely(rc
!= 31)) {
1885 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1887 TCGv shift
= tcg_temp_new();
1888 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1889 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1890 tcg_temp_free(shift
);
1893 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1898 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1902 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1906 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1910 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1914 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1918 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1922 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1926 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1930 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1940 if (likely(rc
!= 31)) {
1942 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1945 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1947 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1948 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1954 if (likely(rc
!= 31)) {
1956 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1958 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1960 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1965 gen_umulh(ra
, rb
, rc
, islit
, lit
);
1969 gen_mullv(ra
, rb
, rc
, islit
, lit
);
1973 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
1980 switch (fpfn
) { /* fn11 & 0x3F */
1983 if (!(ctx
->amask
& AMASK_FIX
))
1985 if (likely(rc
!= 31)) {
1987 TCGv_i32 tmp
= tcg_temp_new_i32();
1988 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
1989 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
1990 tcg_temp_free_i32(tmp
);
1992 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1997 if (!(ctx
->amask
& AMASK_FIX
))
2003 if (!(ctx
->amask
& AMASK_FIX
))
2005 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2009 if (!(ctx
->amask
& AMASK_FIX
))
2011 if (likely(rc
!= 31)) {
2013 TCGv_i32 tmp
= tcg_temp_new_i32();
2014 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2015 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2016 tcg_temp_free_i32(tmp
);
2018 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2023 if (!(ctx
->amask
& AMASK_FIX
))
2025 if (likely(rc
!= 31)) {
2027 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2029 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2034 if (!(ctx
->amask
& AMASK_FIX
))
2040 if (!(ctx
->amask
& AMASK_FIX
))
2042 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2049 /* VAX floating point */
2050 /* XXX: rounding mode and trap are ignored (!) */
2051 switch (fpfn
) { /* fn11 & 0x3F */
2054 gen_faddf(ra
, rb
, rc
);
2058 gen_fsubf(ra
, rb
, rc
);
2062 gen_fmulf(ra
, rb
, rc
);
2066 gen_fdivf(ra
, rb
, rc
);
2078 gen_faddg(ra
, rb
, rc
);
2082 gen_fsubg(ra
, rb
, rc
);
2086 gen_fmulg(ra
, rb
, rc
);
2090 gen_fdivg(ra
, rb
, rc
);
2094 gen_fcmpgeq(ra
, rb
, rc
);
2098 gen_fcmpglt(ra
, rb
, rc
);
2102 gen_fcmpgle(ra
, rb
, rc
);
2133 /* IEEE floating-point */
2134 switch (fpfn
) { /* fn11 & 0x3F */
2137 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2141 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2145 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2149 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2153 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2157 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2161 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2165 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2169 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2173 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2177 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2181 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2184 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2186 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2189 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2194 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2198 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2202 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2215 if (likely(rc
!= 31)) {
2219 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2221 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2224 gen_fcpys(ra
, rb
, rc
);
2230 gen_fcpysn(ra
, rb
, rc
);
2234 gen_fcpyse(ra
, rb
, rc
);
2238 if (likely(ra
!= 31))
2239 gen_helper_store_fpcr(cpu_fir
[ra
]);
2241 TCGv tmp
= tcg_const_i64(0);
2242 gen_helper_store_fpcr(tmp
);
2248 if (likely(ra
!= 31))
2249 gen_helper_load_fpcr(cpu_fir
[ra
]);
2253 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2257 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2261 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2265 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2269 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2273 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2283 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2284 /v doesn't do. The only thing I can think is that /sv is a
2285 valid instruction merely for completeness in the ISA. */
2286 gen_fcvtql_v(ctx
, rb
, rc
);
2293 switch ((uint16_t)disp16
) {
2296 /* No-op. Just exit from the current tb */
2301 /* No-op. Just exit from the current tb */
2323 gen_helper_load_pcc(cpu_ir
[ra
]);
2328 gen_helper_rc(cpu_ir
[ra
]);
2336 gen_helper_rs(cpu_ir
[ra
]);
2347 /* HW_MFPR (PALcode) */
2348 #if defined (CONFIG_USER_ONLY)
2354 TCGv tmp
= tcg_const_i32(insn
& 0xFF);
2355 gen_helper_mfpr(cpu_ir
[ra
], tmp
, cpu_ir
[ra
]);
2362 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2364 tcg_gen_movi_i64(cpu_pc
, 0);
2366 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2367 /* Those four jumps only differ by the branch prediction hint */
2385 /* HW_LD (PALcode) */
2386 #if defined (CONFIG_USER_ONLY)
2392 TCGv addr
= tcg_temp_new();
2394 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2396 tcg_gen_movi_i64(addr
, disp12
);
2397 switch ((insn
>> 12) & 0xF) {
2399 /* Longword physical access (hw_ldl/p) */
2400 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2403 /* Quadword physical access (hw_ldq/p) */
2404 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2407 /* Longword physical access with lock (hw_ldl_l/p) */
2408 gen_helper_ldl_l_raw(cpu_ir
[ra
], addr
);
2411 /* Quadword physical access with lock (hw_ldq_l/p) */
2412 gen_helper_ldq_l_raw(cpu_ir
[ra
], addr
);
2415 /* Longword virtual PTE fetch (hw_ldl/v) */
2416 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
2419 /* Quadword virtual PTE fetch (hw_ldq/v) */
2420 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2423 /* Incpu_ir[ra]id */
2426 /* Incpu_ir[ra]id */
2429 /* Longword virtual access (hw_ldl) */
2430 gen_helper_st_virt_to_phys(addr
, addr
);
2431 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2434 /* Quadword virtual access (hw_ldq) */
2435 gen_helper_st_virt_to_phys(addr
, addr
);
2436 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2439 /* Longword virtual access with protection check (hw_ldl/w) */
2440 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
2443 /* Quadword virtual access with protection check (hw_ldq/w) */
2444 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2447 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2448 gen_helper_set_alt_mode();
2449 gen_helper_st_virt_to_phys(addr
, addr
);
2450 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2451 gen_helper_restore_mode();
2454 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2455 gen_helper_set_alt_mode();
2456 gen_helper_st_virt_to_phys(addr
, addr
);
2457 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2458 gen_helper_restore_mode();
2461 /* Longword virtual access with alternate access mode and
2462 * protection checks (hw_ldl/wa)
2464 gen_helper_set_alt_mode();
2465 gen_helper_ldl_data(cpu_ir
[ra
], addr
);
2466 gen_helper_restore_mode();
2469 /* Quadword virtual access with alternate access mode and
2470 * protection checks (hw_ldq/wa)
2472 gen_helper_set_alt_mode();
2473 gen_helper_ldq_data(cpu_ir
[ra
], addr
);
2474 gen_helper_restore_mode();
2477 tcg_temp_free(addr
);
2485 if (!(ctx
->amask
& AMASK_BWX
))
2487 if (likely(rc
!= 31)) {
2489 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2491 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2496 if (!(ctx
->amask
& AMASK_BWX
))
2498 if (likely(rc
!= 31)) {
2500 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2502 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2507 if (!(ctx
->amask
& AMASK_CIX
))
2509 if (likely(rc
!= 31)) {
2511 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2513 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2518 if (!(ctx
->amask
& AMASK_MVI
))
2520 gen_perr(ra
, rb
, rc
, islit
, lit
);
2524 if (!(ctx
->amask
& AMASK_CIX
))
2526 if (likely(rc
!= 31)) {
2528 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2530 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2535 if (!(ctx
->amask
& AMASK_CIX
))
2537 if (likely(rc
!= 31)) {
2539 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2541 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
2546 if (!(ctx
->amask
& AMASK_MVI
))
2548 if (real_islit
|| ra
!= 31)
2550 gen_unpkbw (rb
, rc
);
2554 if (!(ctx
->amask
& AMASK_MVI
))
2556 if (real_islit
|| ra
!= 31)
2558 gen_unpkbl (rb
, rc
);
2562 if (!(ctx
->amask
& AMASK_MVI
))
2564 if (real_islit
|| ra
!= 31)
2570 if (!(ctx
->amask
& AMASK_MVI
))
2572 if (real_islit
|| ra
!= 31)
2578 if (!(ctx
->amask
& AMASK_MVI
))
2580 gen_minsb8 (ra
, rb
, rc
, islit
, lit
);
2584 if (!(ctx
->amask
& AMASK_MVI
))
2586 gen_minsw4 (ra
, rb
, rc
, islit
, lit
);
2590 if (!(ctx
->amask
& AMASK_MVI
))
2592 gen_minub8 (ra
, rb
, rc
, islit
, lit
);
2596 if (!(ctx
->amask
& AMASK_MVI
))
2598 gen_minuw4 (ra
, rb
, rc
, islit
, lit
);
2602 if (!(ctx
->amask
& AMASK_MVI
))
2604 gen_maxub8 (ra
, rb
, rc
, islit
, lit
);
2608 if (!(ctx
->amask
& AMASK_MVI
))
2610 gen_maxuw4 (ra
, rb
, rc
, islit
, lit
);
2614 if (!(ctx
->amask
& AMASK_MVI
))
2616 gen_maxsb8 (ra
, rb
, rc
, islit
, lit
);
2620 if (!(ctx
->amask
& AMASK_MVI
))
2622 gen_maxsw4 (ra
, rb
, rc
, islit
, lit
);
2626 if (!(ctx
->amask
& AMASK_FIX
))
2628 if (likely(rc
!= 31)) {
2630 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
2632 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2637 if (!(ctx
->amask
& AMASK_FIX
))
2640 TCGv_i32 tmp1
= tcg_temp_new_i32();
2642 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
2644 TCGv tmp2
= tcg_const_i64(0);
2645 gen_helper_s_to_memory(tmp1
, tmp2
);
2646 tcg_temp_free(tmp2
);
2648 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
2649 tcg_temp_free_i32(tmp1
);
2657 /* HW_MTPR (PALcode) */
2658 #if defined (CONFIG_USER_ONLY)
2664 TCGv tmp1
= tcg_const_i32(insn
& 0xFF);
2666 gen_helper_mtpr(tmp1
, cpu_ir
[ra
]);
2668 TCGv tmp2
= tcg_const_i64(0);
2669 gen_helper_mtpr(tmp1
, tmp2
);
2670 tcg_temp_free(tmp2
);
2672 tcg_temp_free(tmp1
);
2678 /* HW_REI (PALcode) */
2679 #if defined (CONFIG_USER_ONLY)
2686 gen_helper_hw_rei();
2691 tmp
= tcg_temp_new();
2692 tcg_gen_addi_i64(tmp
, cpu_ir
[rb
], (((int64_t)insn
<< 51) >> 51));
2694 tmp
= tcg_const_i64(((int64_t)insn
<< 51) >> 51);
2695 gen_helper_hw_ret(tmp
);
2702 /* HW_ST (PALcode) */
2703 #if defined (CONFIG_USER_ONLY)
2710 addr
= tcg_temp_new();
2712 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2714 tcg_gen_movi_i64(addr
, disp12
);
2718 val
= tcg_temp_new();
2719 tcg_gen_movi_i64(val
, 0);
2721 switch ((insn
>> 12) & 0xF) {
2723 /* Longword physical access */
2724 gen_helper_stl_raw(val
, addr
);
2727 /* Quadword physical access */
2728 gen_helper_stq_raw(val
, addr
);
2731 /* Longword physical access with lock */
2732 gen_helper_stl_c_raw(val
, val
, addr
);
2735 /* Quadword physical access with lock */
2736 gen_helper_stq_c_raw(val
, val
, addr
);
2739 /* Longword virtual access */
2740 gen_helper_st_virt_to_phys(addr
, addr
);
2741 gen_helper_stl_raw(val
, addr
);
2744 /* Quadword virtual access */
2745 gen_helper_st_virt_to_phys(addr
, addr
);
2746 gen_helper_stq_raw(val
, addr
);
2767 /* Longword virtual access with alternate access mode */
2768 gen_helper_set_alt_mode();
2769 gen_helper_st_virt_to_phys(addr
, addr
);
2770 gen_helper_stl_raw(val
, addr
);
2771 gen_helper_restore_mode();
2774 /* Quadword virtual access with alternate access mode */
2775 gen_helper_set_alt_mode();
2776 gen_helper_st_virt_to_phys(addr
, addr
);
2777 gen_helper_stl_raw(val
, addr
);
2778 gen_helper_restore_mode();
2789 tcg_temp_free(addr
);
2795 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2799 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2803 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2807 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2811 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0, 0);
2815 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0, 0);
2819 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0, 0);
2823 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0, 0);
2827 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2831 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2835 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2839 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2843 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0, 0);
2847 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0, 0);
2851 gen_store_mem(ctx
, &gen_qemu_stl_c
, ra
, rb
, disp16
, 0, 0, 1);
2855 gen_store_mem(ctx
, &gen_qemu_stq_c
, ra
, rb
, disp16
, 0, 0, 1);
2860 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2861 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2864 case 0x31: /* FBEQ */
2865 gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2868 case 0x32: /* FBLT */
2869 gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2872 case 0x33: /* FBLE */
2873 gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2879 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2880 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2883 case 0x35: /* FBNE */
2884 gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2887 case 0x36: /* FBGE */
2888 gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2891 case 0x37: /* FBGT */
2892 gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2897 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2902 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2907 gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2912 gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2917 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2922 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2927 gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2932 gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2944 static inline void gen_intermediate_code_internal(CPUState
*env
,
2945 TranslationBlock
*tb
,
2948 DisasContext ctx
, *ctxp
= &ctx
;
2949 target_ulong pc_start
;
2951 uint16_t *gen_opc_end
;
2959 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2961 ctx
.amask
= env
->amask
;
2963 #if defined (CONFIG_USER_ONLY)
2966 ctx
.mem_idx
= ((env
->ps
>> 3) & 3);
2967 ctx
.pal_mode
= env
->ipr
[IPR_EXC_ADDR
] & 1;
2970 /* ??? Every TB begins with unset rounding mode, to be initialized on
2971 the first fp insn of the TB. Alternately we could define a proper
2972 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2973 to reset the FP_STATUS to that default at the end of any TB that
2974 changes the default. We could even (gasp) dynamiclly figure out
2975 what default would be most efficient given the running program. */
2977 /* Similarly for flush-to-zero. */
2981 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2983 max_insns
= CF_COUNT_MASK
;
2986 for (ret
= 0; ret
== 0;) {
2987 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
2988 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
2989 if (bp
->pc
== ctx
.pc
) {
2990 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2996 j
= gen_opc_ptr
- gen_opc_buf
;
3000 gen_opc_instr_start
[lj
++] = 0;
3002 gen_opc_pc
[lj
] = ctx
.pc
;
3003 gen_opc_instr_start
[lj
] = 1;
3004 gen_opc_icount
[lj
] = num_insns
;
3006 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3008 insn
= ldl_code(ctx
.pc
);
3011 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
3012 tcg_gen_debug_insn_start(ctx
.pc
);
3016 ret
= translate_one(ctxp
, insn
);
3019 /* if we reach a page boundary or are single stepping, stop
3022 if (env
->singlestep_enabled
) {
3023 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3027 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
3030 if (gen_opc_ptr
>= gen_opc_end
)
3033 if (num_insns
>= max_insns
)
3040 if (ret
!= 1 && ret
!= 3) {
3041 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3043 if (tb
->cflags
& CF_LAST_IO
)
3045 /* Generate the return instruction */
3047 gen_icount_end(tb
, num_insns
);
3048 *gen_opc_ptr
= INDEX_op_end
;
3050 j
= gen_opc_ptr
- gen_opc_buf
;
3053 gen_opc_instr_start
[lj
++] = 0;
3055 tb
->size
= ctx
.pc
- pc_start
;
3056 tb
->icount
= num_insns
;
3059 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3060 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3061 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
3067 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
3069 gen_intermediate_code_internal(env
, tb
, 0);
3072 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
3074 gen_intermediate_code_internal(env
, tb
, 1);
3082 static const struct cpu_def_t cpu_defs
[] = {
3083 { "ev4", IMPLVER_2106x
, 0 },
3084 { "ev5", IMPLVER_21164
, 0 },
3085 { "ev56", IMPLVER_21164
, AMASK_BWX
},
3086 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3087 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3088 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3089 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3090 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3091 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3092 { "21064", IMPLVER_2106x
, 0 },
3093 { "21164", IMPLVER_21164
, 0 },
3094 { "21164a", IMPLVER_21164
, AMASK_BWX
},
3095 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3096 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3097 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3098 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
3101 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
3104 int implver
, amask
, i
, max
;
3106 env
= qemu_mallocz(sizeof(CPUAlphaState
));
3108 alpha_translate_init();
3111 /* Default to ev67; no reason not to emulate insns by default. */
3112 implver
= IMPLVER_21264
;
3113 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
3114 | AMASK_TRAP
| AMASK_PREFETCH
);
3116 max
= ARRAY_SIZE(cpu_defs
);
3117 for (i
= 0; i
< max
; i
++) {
3118 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
3119 implver
= cpu_defs
[i
].implver
;
3120 amask
= cpu_defs
[i
].amask
;
3124 env
->implver
= implver
;
3128 #if defined (CONFIG_USER_ONLY)
3130 cpu_alpha_store_fpcr(env
, (FPCR_INVD
| FPCR_DZED
| FPCR_OVFD
3131 | FPCR_UNFD
| FPCR_INED
| FPCR_DNOD
));
3136 /* Initialize IPR */
3137 #if defined (CONFIG_USER_ONLY)
3138 env
->ipr
[IPR_EXC_ADDR
] = 0;
3139 env
->ipr
[IPR_EXC_SUM
] = 0;
3140 env
->ipr
[IPR_EXC_MASK
] = 0;
3144 hwpcb
= env
->ipr
[IPR_PCBB
];
3145 env
->ipr
[IPR_ASN
] = 0;
3146 env
->ipr
[IPR_ASTEN
] = 0;
3147 env
->ipr
[IPR_ASTSR
] = 0;
3148 env
->ipr
[IPR_DATFX
] = 0;
3150 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3151 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3152 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3153 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3154 env
->ipr
[IPR_FEN
] = 0;
3155 env
->ipr
[IPR_IPL
] = 31;
3156 env
->ipr
[IPR_MCES
] = 0;
3157 env
->ipr
[IPR_PERFMON
] = 0; /* Implementation specific */
3158 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3159 env
->ipr
[IPR_SISR
] = 0;
3160 env
->ipr
[IPR_VIRBND
] = -1ULL;
3164 qemu_init_vcpu(env
);
3168 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
3169 unsigned long searched_pc
, int pc_pos
, void *puc
)
3171 env
->pc
= gen_opc_pc
[pc_pos
];