2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "host-utils.h"
29 #include "qemu-common.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
48 #if !defined (CONFIG_USER_ONLY)
54 /* Current rounding mode for this TB. */
56 /* Current flush-to-zero setting for this TB. */
60 /* global register indexes */
61 static TCGv_ptr cpu_env
;
62 static TCGv cpu_ir
[31];
63 static TCGv cpu_fir
[31];
66 #ifdef CONFIG_USER_ONLY
71 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
73 #include "gen-icount.h"
75 static void alpha_translate_init(void)
79 static int done_init
= 0;
84 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
87 for (i
= 0; i
< 31; i
++) {
88 sprintf(p
, "ir%d", i
);
89 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
90 offsetof(CPUState
, ir
[i
]), p
);
91 p
+= (i
< 10) ? 4 : 5;
93 sprintf(p
, "fir%d", i
);
94 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
95 offsetof(CPUState
, fir
[i
]), p
);
96 p
+= (i
< 10) ? 5 : 6;
99 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
100 offsetof(CPUState
, pc
), "pc");
102 cpu_lock
= tcg_global_mem_new_i64(TCG_AREG0
,
103 offsetof(CPUState
, lock
), "lock");
105 #ifdef CONFIG_USER_ONLY
106 cpu_uniq
= tcg_global_mem_new_i64(TCG_AREG0
,
107 offsetof(CPUState
, unique
), "uniq");
110 /* register helpers */
117 static inline void gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
121 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
122 tmp1
= tcg_const_i32(exception
);
123 tmp2
= tcg_const_i32(error_code
);
124 gen_helper_excp(tmp1
, tmp2
);
125 tcg_temp_free_i32(tmp2
);
126 tcg_temp_free_i32(tmp1
);
129 static inline void gen_invalid(DisasContext
*ctx
)
131 gen_excp(ctx
, EXCP_OPCDEC
, 0);
134 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
136 TCGv tmp
= tcg_temp_new();
137 TCGv_i32 tmp32
= tcg_temp_new_i32();
138 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
139 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
140 gen_helper_memory_to_f(t0
, tmp32
);
141 tcg_temp_free_i32(tmp32
);
145 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
147 TCGv tmp
= tcg_temp_new();
148 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
149 gen_helper_memory_to_g(t0
, tmp
);
153 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
155 TCGv tmp
= tcg_temp_new();
156 TCGv_i32 tmp32
= tcg_temp_new_i32();
157 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
158 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
159 gen_helper_memory_to_s(t0
, tmp32
);
160 tcg_temp_free_i32(tmp32
);
164 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
166 tcg_gen_mov_i64(cpu_lock
, t1
);
167 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
170 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
172 tcg_gen_mov_i64(cpu_lock
, t1
);
173 tcg_gen_qemu_ld64(t0
, t1
, flags
);
176 static inline void gen_load_mem(DisasContext
*ctx
,
177 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
179 int ra
, int rb
, int32_t disp16
, int fp
,
184 if (unlikely(ra
== 31))
187 addr
= tcg_temp_new();
189 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
191 tcg_gen_andi_i64(addr
, addr
, ~0x7);
195 tcg_gen_movi_i64(addr
, disp16
);
198 tcg_gen_qemu_load(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
200 tcg_gen_qemu_load(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
204 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
206 TCGv_i32 tmp32
= tcg_temp_new_i32();
207 TCGv tmp
= tcg_temp_new();
208 gen_helper_f_to_memory(tmp32
, t0
);
209 tcg_gen_extu_i32_i64(tmp
, tmp32
);
210 tcg_gen_qemu_st32(tmp
, t1
, flags
);
212 tcg_temp_free_i32(tmp32
);
215 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
217 TCGv tmp
= tcg_temp_new();
218 gen_helper_g_to_memory(tmp
, t0
);
219 tcg_gen_qemu_st64(tmp
, t1
, flags
);
223 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
225 TCGv_i32 tmp32
= tcg_temp_new_i32();
226 TCGv tmp
= tcg_temp_new();
227 gen_helper_s_to_memory(tmp32
, t0
);
228 tcg_gen_extu_i32_i64(tmp
, tmp32
);
229 tcg_gen_qemu_st32(tmp
, t1
, flags
);
231 tcg_temp_free_i32(tmp32
);
234 static inline void gen_qemu_stl_c(TCGv t0
, TCGv t1
, int flags
)
238 l1
= gen_new_label();
239 l2
= gen_new_label();
240 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
241 tcg_gen_qemu_st32(t0
, t1
, flags
);
242 tcg_gen_movi_i64(t0
, 1);
245 tcg_gen_movi_i64(t0
, 0);
247 tcg_gen_movi_i64(cpu_lock
, -1);
250 static inline void gen_qemu_stq_c(TCGv t0
, TCGv t1
, int flags
)
254 l1
= gen_new_label();
255 l2
= gen_new_label();
256 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
257 tcg_gen_qemu_st64(t0
, t1
, flags
);
258 tcg_gen_movi_i64(t0
, 1);
261 tcg_gen_movi_i64(t0
, 0);
263 tcg_gen_movi_i64(cpu_lock
, -1);
266 static inline void gen_store_mem(DisasContext
*ctx
,
267 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
269 int ra
, int rb
, int32_t disp16
, int fp
,
270 int clear
, int local
)
274 addr
= tcg_temp_local_new();
276 addr
= tcg_temp_new();
278 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
280 tcg_gen_andi_i64(addr
, addr
, ~0x7);
284 tcg_gen_movi_i64(addr
, disp16
);
288 tcg_gen_qemu_store(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
290 tcg_gen_qemu_store(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
294 zero
= tcg_const_local_i64(0);
296 zero
= tcg_const_i64(0);
297 tcg_gen_qemu_store(zero
, addr
, ctx
->mem_idx
);
303 static void gen_bcond_pcload(DisasContext
*ctx
, int32_t disp
, int lab_true
)
305 int lab_over
= gen_new_label();
307 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
308 tcg_gen_br(lab_over
);
309 gen_set_label(lab_true
);
310 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp
<< 2));
311 gen_set_label(lab_over
);
314 static void gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
315 int32_t disp
, int mask
)
317 int lab_true
= gen_new_label();
319 if (likely(ra
!= 31)) {
321 TCGv tmp
= tcg_temp_new();
322 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
323 tcg_gen_brcondi_i64(cond
, tmp
, 0, lab_true
);
326 tcg_gen_brcondi_i64(cond
, cpu_ir
[ra
], 0, lab_true
);
329 /* Very uncommon case - Do not bother to optimize. */
330 TCGv tmp
= tcg_const_i64(0);
331 tcg_gen_brcondi_i64(cond
, tmp
, 0, lab_true
);
334 gen_bcond_pcload(ctx
, disp
, lab_true
);
337 /* Generate a forward TCG branch to LAB_TRUE if RA cmp 0.0.
338 This is complicated by the fact that -0.0 compares the same as +0.0. */
340 static void gen_fbcond_internal(TCGCond cond
, TCGv src
, int lab_true
)
343 uint64_t mzero
= 1ull << 63;
349 /* For <= or >, the -0.0 value directly compares the way we want. */
350 tcg_gen_brcondi_i64(cond
, src
, 0, lab_true
);
355 /* For == or !=, we can simply mask off the sign bit and compare. */
356 /* ??? Assume that the temporary is reclaimed at the branch. */
357 tmp
= tcg_temp_new();
358 tcg_gen_andi_i64(tmp
, src
, mzero
- 1);
359 tcg_gen_brcondi_i64(cond
, tmp
, 0, lab_true
);
363 /* For >=, emit two branches to the destination. */
364 tcg_gen_brcondi_i64(cond
, src
, 0, lab_true
);
365 tcg_gen_brcondi_i64(TCG_COND_EQ
, src
, mzero
, lab_true
);
369 /* For <, first filter out -0.0 to what will be the fallthru. */
370 lab_false
= gen_new_label();
371 tcg_gen_brcondi_i64(TCG_COND_EQ
, src
, mzero
, lab_false
);
372 tcg_gen_brcondi_i64(cond
, src
, 0, lab_true
);
373 gen_set_label(lab_false
);
381 static void gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
, int32_t disp
)
385 if (unlikely(ra
== 31)) {
386 /* Very uncommon case, but easier to optimize it to an integer
387 comparison than continuing with the floating point comparison. */
388 gen_bcond(ctx
, cond
, ra
, disp
, 0);
392 lab_true
= gen_new_label();
393 gen_fbcond_internal(cond
, cpu_fir
[ra
], lab_true
);
394 gen_bcond_pcload(ctx
, disp
, lab_true
);
397 static inline void gen_cmov(TCGCond inv_cond
, int ra
, int rb
, int rc
,
398 int islit
, uint8_t lit
, int mask
)
402 if (unlikely(rc
== 31))
405 l1
= gen_new_label();
409 TCGv tmp
= tcg_temp_new();
410 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
411 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
414 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
416 /* Very uncommon case - Do not bother to optimize. */
417 TCGv tmp
= tcg_const_i64(0);
418 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
423 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
425 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
429 static void gen_fcmov(TCGCond inv_cond
, int ra
, int rb
, int rc
)
431 TCGv va
= cpu_fir
[ra
];
434 if (unlikely(rc
== 31))
436 if (unlikely(ra
== 31)) {
437 /* ??? Assume that the temporary is reclaimed at the branch. */
438 va
= tcg_const_i64(0);
441 l1
= gen_new_label();
442 gen_fbcond_internal(inv_cond
, va
, l1
);
445 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[rb
]);
447 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
451 #define QUAL_RM_N 0x080 /* Round mode nearest even */
452 #define QUAL_RM_C 0x000 /* Round mode chopped */
453 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
454 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
455 #define QUAL_RM_MASK 0x0c0
457 #define QUAL_U 0x100 /* Underflow enable (fp output) */
458 #define QUAL_V 0x100 /* Overflow enable (int output) */
459 #define QUAL_S 0x400 /* Software completion enable */
460 #define QUAL_I 0x200 /* Inexact detection enable */
462 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
466 fn11
&= QUAL_RM_MASK
;
467 if (fn11
== ctx
->tb_rm
) {
472 tmp
= tcg_temp_new_i32();
475 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
478 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
481 tcg_gen_movi_i32(tmp
, float_round_down
);
484 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_dyn_round
));
488 #if defined(CONFIG_SOFTFLOAT_INLINE)
489 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
490 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
491 sets the one field. */
492 tcg_gen_st8_i32(tmp
, cpu_env
,
493 offsetof(CPUState
, fp_status
.float_rounding_mode
));
495 gen_helper_setroundmode(tmp
);
498 tcg_temp_free_i32(tmp
);
501 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
506 if (fn11
== ctx
->tb_ftz
) {
511 tmp
= tcg_temp_new_i32();
513 /* Underflow is enabled, use the FPCR setting. */
514 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_flush_to_zero
));
516 /* Underflow is disabled, force flush-to-zero. */
517 tcg_gen_movi_i32(tmp
, 1);
520 #if defined(CONFIG_SOFTFLOAT_INLINE)
521 tcg_gen_st8_i32(tmp
, cpu_env
,
522 offsetof(CPUState
, fp_status
.flush_to_zero
));
524 gen_helper_setflushzero(tmp
);
527 tcg_temp_free_i32(tmp
);
530 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
532 TCGv val
= tcg_temp_new();
534 tcg_gen_movi_i64(val
, 0);
535 } else if (fn11
& QUAL_S
) {
536 gen_helper_ieee_input_s(val
, cpu_fir
[reg
]);
538 gen_helper_ieee_input_cmp(val
, cpu_fir
[reg
]);
540 gen_helper_ieee_input(val
, cpu_fir
[reg
]);
545 static void gen_fp_exc_clear(void)
547 #if defined(CONFIG_SOFTFLOAT_INLINE)
548 TCGv_i32 zero
= tcg_const_i32(0);
549 tcg_gen_st8_i32(zero
, cpu_env
,
550 offsetof(CPUState
, fp_status
.float_exception_flags
));
551 tcg_temp_free_i32(zero
);
553 gen_helper_fp_exc_clear();
557 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
559 /* ??? We ought to be able to do something with imprecise exceptions.
560 E.g. notice we're still in the trap shadow of something within the
561 TB and do not generate the code to signal the exception; end the TB
562 when an exception is forced to arrive, either by consumption of a
563 register value or TRAPB or EXCB. */
564 TCGv_i32 exc
= tcg_temp_new_i32();
567 #if defined(CONFIG_SOFTFLOAT_INLINE)
568 tcg_gen_ld8u_i32(exc
, cpu_env
,
569 offsetof(CPUState
, fp_status
.float_exception_flags
));
571 gen_helper_fp_exc_get(exc
);
575 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
578 /* ??? Pass in the regno of the destination so that the helper can
579 set EXC_MASK, which contains a bitmask of destination registers
580 that have caused arithmetic traps. A simple userspace emulation
581 does not require this. We do need it for a guest kernel's entArith,
582 or if we were to do something clever with imprecise exceptions. */
583 reg
= tcg_const_i32(rc
+ 32);
586 gen_helper_fp_exc_raise_s(exc
, reg
);
588 gen_helper_fp_exc_raise(exc
, reg
);
591 tcg_temp_free_i32(reg
);
592 tcg_temp_free_i32(exc
);
595 static inline void gen_fp_exc_raise(int rc
, int fn11
)
597 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
600 #define FARITH2(name) \
601 static inline void glue(gen_f, name)(int rb, int rc) \
603 if (unlikely(rc == 31)) { \
607 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
609 TCGv tmp = tcg_const_i64(0); \
610 gen_helper_ ## name (cpu_fir[rc], tmp); \
611 tcg_temp_free(tmp); \
619 /* ??? VAX instruction qualifiers ignored. */
627 static void gen_ieee_arith2(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
628 int rb
, int rc
, int fn11
)
632 /* ??? This is wrong: the instruction is not a nop, it still may
634 if (unlikely(rc
== 31)) {
638 gen_qual_roundmode(ctx
, fn11
);
639 gen_qual_flushzero(ctx
, fn11
);
642 vb
= gen_ieee_input(rb
, fn11
, 0);
643 helper(cpu_fir
[rc
], vb
);
646 gen_fp_exc_raise(rc
, fn11
);
649 #define IEEE_ARITH2(name) \
650 static inline void glue(gen_f, name)(DisasContext *ctx, \
651 int rb, int rc, int fn11) \
653 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
660 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
665 /* ??? This is wrong: the instruction is not a nop, it still may
667 if (unlikely(rc
== 31)) {
671 /* No need to set flushzero, since we have an integer output. */
673 vb
= gen_ieee_input(rb
, fn11
, 0);
675 /* Almost all integer conversions use cropped rounding, and most
676 also do not have integer overflow enabled. Special case that. */
679 gen_helper_cvttq_c(cpu_fir
[rc
], vb
);
681 case QUAL_V
| QUAL_RM_C
:
682 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
683 ignore
= float_flag_inexact
;
685 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
686 gen_helper_cvttq_svic(cpu_fir
[rc
], vb
);
689 gen_qual_roundmode(ctx
, fn11
);
690 gen_helper_cvttq(cpu_fir
[rc
], vb
);
691 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
692 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
697 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
700 static void gen_ieee_intcvt(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
701 int rb
, int rc
, int fn11
)
705 /* ??? This is wrong: the instruction is not a nop, it still may
707 if (unlikely(rc
== 31)) {
711 gen_qual_roundmode(ctx
, fn11
);
714 vb
= tcg_const_i64(0);
719 /* The only exception that can be raised by integer conversion
720 is inexact. Thus we only need to worry about exceptions when
721 inexact handling is requested. */
724 helper(cpu_fir
[rc
], vb
);
725 gen_fp_exc_raise(rc
, fn11
);
727 helper(cpu_fir
[rc
], vb
);
735 #define IEEE_INTCVT(name) \
736 static inline void glue(gen_f, name)(DisasContext *ctx, \
737 int rb, int rc, int fn11) \
739 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
744 #define FARITH3(name) \
745 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
749 if (unlikely(rc == 31)) { \
753 va = tcg_const_i64(0); \
758 vb = tcg_const_i64(0); \
763 gen_helper_ ## name (cpu_fir[rc], va, vb); \
772 /* ??? Ought to expand these inline; simple masking operations. */
777 /* ??? VAX instruction qualifiers ignored. */
790 static void gen_ieee_arith3(DisasContext
*ctx
,
791 void (*helper
)(TCGv
, TCGv
, TCGv
),
792 int ra
, int rb
, int rc
, int fn11
)
796 /* ??? This is wrong: the instruction is not a nop, it still may
798 if (unlikely(rc
== 31)) {
802 gen_qual_roundmode(ctx
, fn11
);
803 gen_qual_flushzero(ctx
, fn11
);
806 va
= gen_ieee_input(ra
, fn11
, 0);
807 vb
= gen_ieee_input(rb
, fn11
, 0);
808 helper(cpu_fir
[rc
], va
, vb
);
812 gen_fp_exc_raise(rc
, fn11
);
815 #define IEEE_ARITH3(name) \
816 static inline void glue(gen_f, name)(DisasContext *ctx, \
817 int ra, int rb, int rc, int fn11) \
819 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
830 static void gen_ieee_compare(DisasContext
*ctx
,
831 void (*helper
)(TCGv
, TCGv
, TCGv
),
832 int ra
, int rb
, int rc
, int fn11
)
836 /* ??? This is wrong: the instruction is not a nop, it still may
838 if (unlikely(rc
== 31)) {
844 va
= gen_ieee_input(ra
, fn11
, 1);
845 vb
= gen_ieee_input(rb
, fn11
, 1);
846 helper(cpu_fir
[rc
], va
, vb
);
850 gen_fp_exc_raise(rc
, fn11
);
853 #define IEEE_CMP3(name) \
854 static inline void glue(gen_f, name)(DisasContext *ctx, \
855 int ra, int rb, int rc, int fn11) \
857 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
864 static inline uint64_t zapnot_mask(uint8_t lit
)
869 for (i
= 0; i
< 8; ++i
) {
871 mask
|= 0xffull
<< (i
* 8);
876 /* Implement zapnot with an immediate operand, which expands to some
877 form of immediate AND. This is a basic building block in the
878 definition of many of the other byte manipulation instructions. */
879 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
883 tcg_gen_movi_i64(dest
, 0);
886 tcg_gen_ext8u_i64(dest
, src
);
889 tcg_gen_ext16u_i64(dest
, src
);
892 tcg_gen_ext32u_i64(dest
, src
);
895 tcg_gen_mov_i64(dest
, src
);
898 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
903 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
905 if (unlikely(rc
== 31))
907 else if (unlikely(ra
== 31))
908 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
910 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
912 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
915 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
917 if (unlikely(rc
== 31))
919 else if (unlikely(ra
== 31))
920 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
922 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
924 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
928 /* EXTWH, EXTLH, EXTQH */
929 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
930 uint8_t lit
, uint8_t byte_mask
)
932 if (unlikely(rc
== 31))
934 else if (unlikely(ra
== 31))
935 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
938 lit
= (64 - (lit
& 7) * 8) & 0x3f;
939 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
941 TCGv tmp1
= tcg_temp_new();
942 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
943 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
944 tcg_gen_neg_i64(tmp1
, tmp1
);
945 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
946 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
949 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
953 /* EXTBL, EXTWL, EXTLL, EXTQL */
954 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
955 uint8_t lit
, uint8_t byte_mask
)
957 if (unlikely(rc
== 31))
959 else if (unlikely(ra
== 31))
960 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
963 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
965 TCGv tmp
= tcg_temp_new();
966 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
967 tcg_gen_shli_i64(tmp
, tmp
, 3);
968 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
971 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
975 /* INSWH, INSLH, INSQH */
976 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
977 uint8_t lit
, uint8_t byte_mask
)
979 if (unlikely(rc
== 31))
981 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
982 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
984 TCGv tmp
= tcg_temp_new();
986 /* The instruction description has us left-shift the byte mask
987 and extract bits <15:8> and apply that zap at the end. This
988 is equivalent to simply performing the zap first and shifting
990 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
993 /* Note that we have handled the lit==0 case above. */
994 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
996 TCGv shift
= tcg_temp_new();
998 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
999 Do this portably by splitting the shift into two parts:
1000 shift_count-1 and 1. Arrange for the -1 by using
1001 ones-complement instead of twos-complement in the negation:
1002 ~((B & 7) * 8) & 63. */
1004 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1005 tcg_gen_shli_i64(shift
, shift
, 3);
1006 tcg_gen_not_i64(shift
, shift
);
1007 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1009 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1010 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1011 tcg_temp_free(shift
);
1017 /* INSBL, INSWL, INSLL, INSQL */
1018 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1019 uint8_t lit
, uint8_t byte_mask
)
1021 if (unlikely(rc
== 31))
1023 else if (unlikely(ra
== 31))
1024 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1026 TCGv tmp
= tcg_temp_new();
1028 /* The instruction description has us left-shift the byte mask
1029 the same number of byte slots as the data and apply the zap
1030 at the end. This is equivalent to simply performing the zap
1031 first and shifting afterward. */
1032 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1035 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1037 TCGv shift
= tcg_temp_new();
1038 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1039 tcg_gen_shli_i64(shift
, shift
, 3);
1040 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1041 tcg_temp_free(shift
);
1047 /* MSKWH, MSKLH, MSKQH */
1048 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1049 uint8_t lit
, uint8_t byte_mask
)
1051 if (unlikely(rc
== 31))
1053 else if (unlikely(ra
== 31))
1054 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1056 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1058 TCGv shift
= tcg_temp_new();
1059 TCGv mask
= tcg_temp_new();
1061 /* The instruction description is as above, where the byte_mask
1062 is shifted left, and then we extract bits <15:8>. This can be
1063 emulated with a right-shift on the expanded byte mask. This
1064 requires extra care because for an input <2:0> == 0 we need a
1065 shift of 64 bits in order to generate a zero. This is done by
1066 splitting the shift into two parts, the variable shift - 1
1067 followed by a constant 1 shift. The code we expand below is
1068 equivalent to ~((B & 7) * 8) & 63. */
1070 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1071 tcg_gen_shli_i64(shift
, shift
, 3);
1072 tcg_gen_not_i64(shift
, shift
);
1073 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1074 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1075 tcg_gen_shr_i64(mask
, mask
, shift
);
1076 tcg_gen_shri_i64(mask
, mask
, 1);
1078 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1080 tcg_temp_free(mask
);
1081 tcg_temp_free(shift
);
1085 /* MSKBL, MSKWL, MSKLL, MSKQL */
1086 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1087 uint8_t lit
, uint8_t byte_mask
)
1089 if (unlikely(rc
== 31))
1091 else if (unlikely(ra
== 31))
1092 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1094 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1096 TCGv shift
= tcg_temp_new();
1097 TCGv mask
= tcg_temp_new();
1099 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1100 tcg_gen_shli_i64(shift
, shift
, 3);
1101 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1102 tcg_gen_shl_i64(mask
, mask
, shift
);
1104 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1106 tcg_temp_free(mask
);
1107 tcg_temp_free(shift
);
1111 /* Code to call arith3 helpers */
1112 #define ARITH3(name) \
1113 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1116 if (unlikely(rc == 31)) \
1121 TCGv tmp = tcg_const_i64(lit); \
1122 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1123 tcg_temp_free(tmp); \
1125 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1127 TCGv tmp1 = tcg_const_i64(0); \
1129 TCGv tmp2 = tcg_const_i64(lit); \
1130 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1131 tcg_temp_free(tmp2); \
1133 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1134 tcg_temp_free(tmp1); \
1155 #define MVIOP2(name) \
1156 static inline void glue(gen_, name)(int rb, int rc) \
1158 if (unlikely(rc == 31)) \
1160 if (unlikely(rb == 31)) \
1161 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1163 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1170 static inline void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
, int islit
,
1176 if (unlikely(rc
== 31))
1179 l1
= gen_new_label();
1180 l2
= gen_new_label();
1183 tmp
= tcg_temp_new();
1184 tcg_gen_mov_i64(tmp
, cpu_ir
[ra
]);
1186 tmp
= tcg_const_i64(0);
1188 tcg_gen_brcondi_i64(cond
, tmp
, lit
, l1
);
1190 tcg_gen_brcond_i64(cond
, tmp
, cpu_ir
[rb
], l1
);
1192 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1195 tcg_gen_movi_i64(cpu_ir
[rc
], 1);
1199 static inline int translate_one(DisasContext
*ctx
, uint32_t insn
)
1202 int32_t disp21
, disp16
, disp12
;
1203 uint16_t fn11
, fn16
;
1204 uint8_t opc
, ra
, rb
, rc
, sbz
, fpfn
, fn7
, fn2
, islit
, real_islit
;
1208 /* Decode all instruction fields */
1210 ra
= (insn
>> 21) & 0x1F;
1211 rb
= (insn
>> 16) & 0x1F;
1213 sbz
= (insn
>> 13) & 0x07;
1214 real_islit
= islit
= (insn
>> 12) & 1;
1215 if (rb
== 31 && !islit
) {
1219 lit
= (insn
>> 13) & 0xFF;
1220 palcode
= insn
& 0x03FFFFFF;
1221 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1222 disp16
= (int16_t)(insn
& 0x0000FFFF);
1223 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1224 fn16
= insn
& 0x0000FFFF;
1225 fn11
= (insn
>> 5) & 0x000007FF;
1227 fn7
= (insn
>> 5) & 0x0000007F;
1228 fn2
= (insn
>> 5) & 0x00000003;
1230 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1231 opc
, ra
, rb
, rc
, disp16
);
1236 #ifdef CONFIG_USER_ONLY
1237 if (palcode
== 0x9E) {
1239 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_uniq
);
1241 } else if (palcode
== 0x9F) {
1243 tcg_gen_mov_i64(cpu_uniq
, cpu_ir
[IR_A0
]);
1247 if (palcode
>= 0x80 && palcode
< 0xC0) {
1248 /* Unprivileged PAL call */
1249 gen_excp(ctx
, EXCP_CALL_PAL
+ ((palcode
& 0x3F) << 6), 0);
1253 #ifndef CONFIG_USER_ONLY
1254 if (palcode
< 0x40) {
1255 /* Privileged PAL code */
1256 if (ctx
->mem_idx
& 1)
1258 gen_excp(ctx
, EXCP_CALL_PALP
+ ((palcode
& 0x3F) << 6), 0);
1262 /* Invalid PAL call */
1287 if (likely(ra
!= 31)) {
1289 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1291 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1296 if (likely(ra
!= 31)) {
1298 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1300 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1305 if (!(ctx
->amask
& AMASK_BWX
))
1307 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1311 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1315 if (!(ctx
->amask
& AMASK_BWX
))
1317 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1321 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0, 0);
1325 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0, 0);
1329 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1, 0);
1335 if (likely(rc
!= 31)) {
1338 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1339 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1341 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1342 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1346 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1348 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1354 if (likely(rc
!= 31)) {
1356 TCGv tmp
= tcg_temp_new();
1357 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1359 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1361 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1362 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1366 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1368 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1374 if (likely(rc
!= 31)) {
1377 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1379 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1380 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1383 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1385 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1386 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1392 if (likely(rc
!= 31)) {
1394 TCGv tmp
= tcg_temp_new();
1395 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1397 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1399 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1400 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1404 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1406 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1407 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1414 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1418 if (likely(rc
!= 31)) {
1420 TCGv tmp
= tcg_temp_new();
1421 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1423 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1425 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1426 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1430 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1432 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1438 if (likely(rc
!= 31)) {
1440 TCGv tmp
= tcg_temp_new();
1441 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1443 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1445 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1446 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1450 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1452 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1453 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1460 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1464 if (likely(rc
!= 31)) {
1467 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1469 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1472 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1474 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1480 if (likely(rc
!= 31)) {
1482 TCGv tmp
= tcg_temp_new();
1483 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1485 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1487 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1491 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1493 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1499 if (likely(rc
!= 31)) {
1502 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1504 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1507 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1509 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1515 if (likely(rc
!= 31)) {
1517 TCGv tmp
= tcg_temp_new();
1518 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1520 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1522 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1526 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1528 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1534 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
1538 if (likely(rc
!= 31)) {
1540 TCGv tmp
= tcg_temp_new();
1541 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1543 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1545 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1549 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1551 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1557 if (likely(rc
!= 31)) {
1559 TCGv tmp
= tcg_temp_new();
1560 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1562 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1564 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1568 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1570 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1576 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
1580 gen_addlv(ra
, rb
, rc
, islit
, lit
);
1584 gen_sublv(ra
, rb
, rc
, islit
, lit
);
1588 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
1592 gen_addqv(ra
, rb
, rc
, islit
, lit
);
1596 gen_subqv(ra
, rb
, rc
, islit
, lit
);
1600 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
1610 if (likely(rc
!= 31)) {
1612 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1614 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1616 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1621 if (likely(rc
!= 31)) {
1624 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1626 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1628 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1633 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
1637 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
1641 if (likely(rc
!= 31)) {
1644 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1646 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1649 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1651 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1657 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
1661 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
1665 if (likely(rc
!= 31)) {
1668 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1670 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1673 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1675 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1681 if (likely(rc
!= 31)) {
1684 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1686 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1689 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1691 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1697 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
1701 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
1705 if (likely(rc
!= 31)) {
1708 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1710 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1713 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1715 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1721 if (likely(rc
!= 31)) {
1723 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1725 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1726 switch (ctx
->env
->implver
) {
1728 /* EV4, EV45, LCA, LCA45 & EV5 */
1733 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rc
],
1734 ~(uint64_t)ctx
->amask
);
1741 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
1745 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
1750 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
1760 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1764 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1768 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1772 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1776 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1780 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1784 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1788 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1792 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1796 gen_zap(ra
, rb
, rc
, islit
, lit
);
1800 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
1804 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1808 if (likely(rc
!= 31)) {
1811 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1813 TCGv shift
= tcg_temp_new();
1814 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1815 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1816 tcg_temp_free(shift
);
1819 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1824 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1828 if (likely(rc
!= 31)) {
1831 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1833 TCGv shift
= tcg_temp_new();
1834 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1835 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1836 tcg_temp_free(shift
);
1839 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1844 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1848 if (likely(rc
!= 31)) {
1851 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1853 TCGv shift
= tcg_temp_new();
1854 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1855 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1856 tcg_temp_free(shift
);
1859 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1864 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1868 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1872 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1876 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1880 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1884 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1888 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1892 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1896 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1906 if (likely(rc
!= 31)) {
1908 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1911 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1913 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1914 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1920 if (likely(rc
!= 31)) {
1922 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1924 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1926 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1931 gen_umulh(ra
, rb
, rc
, islit
, lit
);
1935 gen_mullv(ra
, rb
, rc
, islit
, lit
);
1939 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
1946 switch (fpfn
) { /* fn11 & 0x3F */
1949 if (!(ctx
->amask
& AMASK_FIX
))
1951 if (likely(rc
!= 31)) {
1953 TCGv_i32 tmp
= tcg_temp_new_i32();
1954 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
1955 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
1956 tcg_temp_free_i32(tmp
);
1958 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1963 if (!(ctx
->amask
& AMASK_FIX
))
1969 if (!(ctx
->amask
& AMASK_FIX
))
1971 gen_fsqrts(ctx
, rb
, rc
, fn11
);
1975 if (!(ctx
->amask
& AMASK_FIX
))
1977 if (likely(rc
!= 31)) {
1979 TCGv_i32 tmp
= tcg_temp_new_i32();
1980 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
1981 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
1982 tcg_temp_free_i32(tmp
);
1984 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1989 if (!(ctx
->amask
& AMASK_FIX
))
1991 if (likely(rc
!= 31)) {
1993 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
1995 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2000 if (!(ctx
->amask
& AMASK_FIX
))
2006 if (!(ctx
->amask
& AMASK_FIX
))
2008 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2015 /* VAX floating point */
2016 /* XXX: rounding mode and trap are ignored (!) */
2017 switch (fpfn
) { /* fn11 & 0x3F */
2020 gen_faddf(ra
, rb
, rc
);
2024 gen_fsubf(ra
, rb
, rc
);
2028 gen_fmulf(ra
, rb
, rc
);
2032 gen_fdivf(ra
, rb
, rc
);
2044 gen_faddg(ra
, rb
, rc
);
2048 gen_fsubg(ra
, rb
, rc
);
2052 gen_fmulg(ra
, rb
, rc
);
2056 gen_fdivg(ra
, rb
, rc
);
2060 gen_fcmpgeq(ra
, rb
, rc
);
2064 gen_fcmpglt(ra
, rb
, rc
);
2068 gen_fcmpgle(ra
, rb
, rc
);
2099 /* IEEE floating-point */
2100 switch (fpfn
) { /* fn11 & 0x3F */
2103 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2107 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2111 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2115 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2119 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2123 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2127 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2131 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2135 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2139 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2143 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2147 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2150 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2152 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2155 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2160 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2164 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2168 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2181 if (likely(rc
!= 31)) {
2185 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2187 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2190 gen_fcpys(ra
, rb
, rc
);
2196 gen_fcpysn(ra
, rb
, rc
);
2200 gen_fcpyse(ra
, rb
, rc
);
2204 if (likely(ra
!= 31))
2205 gen_helper_store_fpcr(cpu_fir
[ra
]);
2207 TCGv tmp
= tcg_const_i64(0);
2208 gen_helper_store_fpcr(tmp
);
2214 if (likely(ra
!= 31))
2215 gen_helper_load_fpcr(cpu_fir
[ra
]);
2219 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2223 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2227 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2231 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2235 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2239 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2247 gen_fcvtql_v(rb
, rc
);
2251 gen_fcvtql_sv(rb
, rc
);
2258 switch ((uint16_t)disp16
) {
2261 /* No-op. Just exit from the current tb */
2266 /* No-op. Just exit from the current tb */
2288 gen_helper_load_pcc(cpu_ir
[ra
]);
2293 gen_helper_rc(cpu_ir
[ra
]);
2301 gen_helper_rs(cpu_ir
[ra
]);
2312 /* HW_MFPR (PALcode) */
2313 #if defined (CONFIG_USER_ONLY)
2319 TCGv tmp
= tcg_const_i32(insn
& 0xFF);
2320 gen_helper_mfpr(cpu_ir
[ra
], tmp
, cpu_ir
[ra
]);
2327 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2329 tcg_gen_movi_i64(cpu_pc
, 0);
2331 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2332 /* Those four jumps only differ by the branch prediction hint */
2350 /* HW_LD (PALcode) */
2351 #if defined (CONFIG_USER_ONLY)
2357 TCGv addr
= tcg_temp_new();
2359 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2361 tcg_gen_movi_i64(addr
, disp12
);
2362 switch ((insn
>> 12) & 0xF) {
2364 /* Longword physical access (hw_ldl/p) */
2365 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2368 /* Quadword physical access (hw_ldq/p) */
2369 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2372 /* Longword physical access with lock (hw_ldl_l/p) */
2373 gen_helper_ldl_l_raw(cpu_ir
[ra
], addr
);
2376 /* Quadword physical access with lock (hw_ldq_l/p) */
2377 gen_helper_ldq_l_raw(cpu_ir
[ra
], addr
);
2380 /* Longword virtual PTE fetch (hw_ldl/v) */
2381 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
2384 /* Quadword virtual PTE fetch (hw_ldq/v) */
2385 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2388 /* Incpu_ir[ra]id */
2391 /* Incpu_ir[ra]id */
2394 /* Longword virtual access (hw_ldl) */
2395 gen_helper_st_virt_to_phys(addr
, addr
);
2396 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2399 /* Quadword virtual access (hw_ldq) */
2400 gen_helper_st_virt_to_phys(addr
, addr
);
2401 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2404 /* Longword virtual access with protection check (hw_ldl/w) */
2405 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
2408 /* Quadword virtual access with protection check (hw_ldq/w) */
2409 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2412 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2413 gen_helper_set_alt_mode();
2414 gen_helper_st_virt_to_phys(addr
, addr
);
2415 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2416 gen_helper_restore_mode();
2419 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2420 gen_helper_set_alt_mode();
2421 gen_helper_st_virt_to_phys(addr
, addr
);
2422 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2423 gen_helper_restore_mode();
2426 /* Longword virtual access with alternate access mode and
2427 * protection checks (hw_ldl/wa)
2429 gen_helper_set_alt_mode();
2430 gen_helper_ldl_data(cpu_ir
[ra
], addr
);
2431 gen_helper_restore_mode();
2434 /* Quadword virtual access with alternate access mode and
2435 * protection checks (hw_ldq/wa)
2437 gen_helper_set_alt_mode();
2438 gen_helper_ldq_data(cpu_ir
[ra
], addr
);
2439 gen_helper_restore_mode();
2442 tcg_temp_free(addr
);
2450 if (!(ctx
->amask
& AMASK_BWX
))
2452 if (likely(rc
!= 31)) {
2454 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2456 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2461 if (!(ctx
->amask
& AMASK_BWX
))
2463 if (likely(rc
!= 31)) {
2465 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2467 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2472 if (!(ctx
->amask
& AMASK_CIX
))
2474 if (likely(rc
!= 31)) {
2476 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2478 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2483 if (!(ctx
->amask
& AMASK_MVI
))
2485 gen_perr(ra
, rb
, rc
, islit
, lit
);
2489 if (!(ctx
->amask
& AMASK_CIX
))
2491 if (likely(rc
!= 31)) {
2493 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2495 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2500 if (!(ctx
->amask
& AMASK_CIX
))
2502 if (likely(rc
!= 31)) {
2504 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2506 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
2511 if (!(ctx
->amask
& AMASK_MVI
))
2513 if (real_islit
|| ra
!= 31)
2515 gen_unpkbw (rb
, rc
);
2519 if (!(ctx
->amask
& AMASK_MVI
))
2521 if (real_islit
|| ra
!= 31)
2523 gen_unpkbl (rb
, rc
);
2527 if (!(ctx
->amask
& AMASK_MVI
))
2529 if (real_islit
|| ra
!= 31)
2535 if (!(ctx
->amask
& AMASK_MVI
))
2537 if (real_islit
|| ra
!= 31)
2543 if (!(ctx
->amask
& AMASK_MVI
))
2545 gen_minsb8 (ra
, rb
, rc
, islit
, lit
);
2549 if (!(ctx
->amask
& AMASK_MVI
))
2551 gen_minsw4 (ra
, rb
, rc
, islit
, lit
);
2555 if (!(ctx
->amask
& AMASK_MVI
))
2557 gen_minub8 (ra
, rb
, rc
, islit
, lit
);
2561 if (!(ctx
->amask
& AMASK_MVI
))
2563 gen_minuw4 (ra
, rb
, rc
, islit
, lit
);
2567 if (!(ctx
->amask
& AMASK_MVI
))
2569 gen_maxub8 (ra
, rb
, rc
, islit
, lit
);
2573 if (!(ctx
->amask
& AMASK_MVI
))
2575 gen_maxuw4 (ra
, rb
, rc
, islit
, lit
);
2579 if (!(ctx
->amask
& AMASK_MVI
))
2581 gen_maxsb8 (ra
, rb
, rc
, islit
, lit
);
2585 if (!(ctx
->amask
& AMASK_MVI
))
2587 gen_maxsw4 (ra
, rb
, rc
, islit
, lit
);
2591 if (!(ctx
->amask
& AMASK_FIX
))
2593 if (likely(rc
!= 31)) {
2595 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
2597 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2602 if (!(ctx
->amask
& AMASK_FIX
))
2605 TCGv_i32 tmp1
= tcg_temp_new_i32();
2607 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
2609 TCGv tmp2
= tcg_const_i64(0);
2610 gen_helper_s_to_memory(tmp1
, tmp2
);
2611 tcg_temp_free(tmp2
);
2613 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
2614 tcg_temp_free_i32(tmp1
);
2622 /* HW_MTPR (PALcode) */
2623 #if defined (CONFIG_USER_ONLY)
2629 TCGv tmp1
= tcg_const_i32(insn
& 0xFF);
2631 gen_helper_mtpr(tmp1
, cpu_ir
[ra
]);
2633 TCGv tmp2
= tcg_const_i64(0);
2634 gen_helper_mtpr(tmp1
, tmp2
);
2635 tcg_temp_free(tmp2
);
2637 tcg_temp_free(tmp1
);
2643 /* HW_REI (PALcode) */
2644 #if defined (CONFIG_USER_ONLY)
2651 gen_helper_hw_rei();
2656 tmp
= tcg_temp_new();
2657 tcg_gen_addi_i64(tmp
, cpu_ir
[rb
], (((int64_t)insn
<< 51) >> 51));
2659 tmp
= tcg_const_i64(((int64_t)insn
<< 51) >> 51);
2660 gen_helper_hw_ret(tmp
);
2667 /* HW_ST (PALcode) */
2668 #if defined (CONFIG_USER_ONLY)
2675 addr
= tcg_temp_new();
2677 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2679 tcg_gen_movi_i64(addr
, disp12
);
2683 val
= tcg_temp_new();
2684 tcg_gen_movi_i64(val
, 0);
2686 switch ((insn
>> 12) & 0xF) {
2688 /* Longword physical access */
2689 gen_helper_stl_raw(val
, addr
);
2692 /* Quadword physical access */
2693 gen_helper_stq_raw(val
, addr
);
2696 /* Longword physical access with lock */
2697 gen_helper_stl_c_raw(val
, val
, addr
);
2700 /* Quadword physical access with lock */
2701 gen_helper_stq_c_raw(val
, val
, addr
);
2704 /* Longword virtual access */
2705 gen_helper_st_virt_to_phys(addr
, addr
);
2706 gen_helper_stl_raw(val
, addr
);
2709 /* Quadword virtual access */
2710 gen_helper_st_virt_to_phys(addr
, addr
);
2711 gen_helper_stq_raw(val
, addr
);
2732 /* Longword virtual access with alternate access mode */
2733 gen_helper_set_alt_mode();
2734 gen_helper_st_virt_to_phys(addr
, addr
);
2735 gen_helper_stl_raw(val
, addr
);
2736 gen_helper_restore_mode();
2739 /* Quadword virtual access with alternate access mode */
2740 gen_helper_set_alt_mode();
2741 gen_helper_st_virt_to_phys(addr
, addr
);
2742 gen_helper_stl_raw(val
, addr
);
2743 gen_helper_restore_mode();
2754 tcg_temp_free(addr
);
2760 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2764 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2768 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2772 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2776 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0, 0);
2780 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0, 0);
2784 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0, 0);
2788 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0, 0);
2792 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2796 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2800 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2804 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2808 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0, 0);
2812 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0, 0);
2816 gen_store_mem(ctx
, &gen_qemu_stl_c
, ra
, rb
, disp16
, 0, 0, 1);
2820 gen_store_mem(ctx
, &gen_qemu_stq_c
, ra
, rb
, disp16
, 0, 0, 1);
2825 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2826 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2829 case 0x31: /* FBEQ */
2830 gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2833 case 0x32: /* FBLT */
2834 gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2837 case 0x33: /* FBLE */
2838 gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2844 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2845 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2848 case 0x35: /* FBNE */
2849 gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2852 case 0x36: /* FBGE */
2853 gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2856 case 0x37: /* FBGT */
2857 gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2862 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2867 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2872 gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2877 gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2882 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2887 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2892 gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2897 gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2909 static inline void gen_intermediate_code_internal(CPUState
*env
,
2910 TranslationBlock
*tb
,
2913 DisasContext ctx
, *ctxp
= &ctx
;
2914 target_ulong pc_start
;
2916 uint16_t *gen_opc_end
;
2924 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2926 ctx
.amask
= env
->amask
;
2928 #if defined (CONFIG_USER_ONLY)
2931 ctx
.mem_idx
= ((env
->ps
>> 3) & 3);
2932 ctx
.pal_mode
= env
->ipr
[IPR_EXC_ADDR
] & 1;
2935 /* ??? Every TB begins with unset rounding mode, to be initialized on
2936 the first fp insn of the TB. Alternately we could define a proper
2937 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2938 to reset the FP_STATUS to that default at the end of any TB that
2939 changes the default. We could even (gasp) dynamiclly figure out
2940 what default would be most efficient given the running program. */
2942 /* Similarly for flush-to-zero. */
2946 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2948 max_insns
= CF_COUNT_MASK
;
2951 for (ret
= 0; ret
== 0;) {
2952 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
2953 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
2954 if (bp
->pc
== ctx
.pc
) {
2955 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2961 j
= gen_opc_ptr
- gen_opc_buf
;
2965 gen_opc_instr_start
[lj
++] = 0;
2967 gen_opc_pc
[lj
] = ctx
.pc
;
2968 gen_opc_instr_start
[lj
] = 1;
2969 gen_opc_icount
[lj
] = num_insns
;
2971 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
2973 insn
= ldl_code(ctx
.pc
);
2976 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
2977 tcg_gen_debug_insn_start(ctx
.pc
);
2981 ret
= translate_one(ctxp
, insn
);
2984 /* if we reach a page boundary or are single stepping, stop
2987 if (env
->singlestep_enabled
) {
2988 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2992 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
2995 if (gen_opc_ptr
>= gen_opc_end
)
2998 if (num_insns
>= max_insns
)
3005 if (ret
!= 1 && ret
!= 3) {
3006 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3008 if (tb
->cflags
& CF_LAST_IO
)
3010 /* Generate the return instruction */
3012 gen_icount_end(tb
, num_insns
);
3013 *gen_opc_ptr
= INDEX_op_end
;
3015 j
= gen_opc_ptr
- gen_opc_buf
;
3018 gen_opc_instr_start
[lj
++] = 0;
3020 tb
->size
= ctx
.pc
- pc_start
;
3021 tb
->icount
= num_insns
;
3024 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3025 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3026 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
3032 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
3034 gen_intermediate_code_internal(env
, tb
, 0);
3037 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
3039 gen_intermediate_code_internal(env
, tb
, 1);
3047 static const struct cpu_def_t cpu_defs
[] = {
3048 { "ev4", IMPLVER_2106x
, 0 },
3049 { "ev5", IMPLVER_21164
, 0 },
3050 { "ev56", IMPLVER_21164
, AMASK_BWX
},
3051 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3052 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3053 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3054 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3055 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3056 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3057 { "21064", IMPLVER_2106x
, 0 },
3058 { "21164", IMPLVER_21164
, 0 },
3059 { "21164a", IMPLVER_21164
, AMASK_BWX
},
3060 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3061 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3062 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3063 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
3066 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
3069 int implver
, amask
, i
, max
;
3071 env
= qemu_mallocz(sizeof(CPUAlphaState
));
3073 alpha_translate_init();
3076 /* Default to ev67; no reason not to emulate insns by default. */
3077 implver
= IMPLVER_21264
;
3078 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
3079 | AMASK_TRAP
| AMASK_PREFETCH
);
3081 max
= ARRAY_SIZE(cpu_defs
);
3082 for (i
= 0; i
< max
; i
++) {
3083 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
3084 implver
= cpu_defs
[i
].implver
;
3085 amask
= cpu_defs
[i
].amask
;
3089 env
->implver
= implver
;
3093 #if defined (CONFIG_USER_ONLY)
3095 cpu_alpha_store_fpcr(env
, (FPCR_INVD
| FPCR_DZED
| FPCR_OVFD
3096 | FPCR_UNFD
| FPCR_INED
| FPCR_DNOD
));
3101 /* Initialize IPR */
3102 #if defined (CONFIG_USER_ONLY)
3103 env
->ipr
[IPR_EXC_ADDR
] = 0;
3104 env
->ipr
[IPR_EXC_SUM
] = 0;
3105 env
->ipr
[IPR_EXC_MASK
] = 0;
3109 hwpcb
= env
->ipr
[IPR_PCBB
];
3110 env
->ipr
[IPR_ASN
] = 0;
3111 env
->ipr
[IPR_ASTEN
] = 0;
3112 env
->ipr
[IPR_ASTSR
] = 0;
3113 env
->ipr
[IPR_DATFX
] = 0;
3115 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3116 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3117 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3118 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3119 env
->ipr
[IPR_FEN
] = 0;
3120 env
->ipr
[IPR_IPL
] = 31;
3121 env
->ipr
[IPR_MCES
] = 0;
3122 env
->ipr
[IPR_PERFMON
] = 0; /* Implementation specific */
3123 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3124 env
->ipr
[IPR_SISR
] = 0;
3125 env
->ipr
[IPR_VIRBND
] = -1ULL;
3129 qemu_init_vcpu(env
);
3133 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
3134 unsigned long searched_pc
, int pc_pos
, void *puc
)
3136 env
->pc
= gen_opc_pc
[pc_pos
];