2 * UniCore32 translation
4 * Copyright (C) 2010-2012 Guan Xuetao
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation, or (at your option) any
9 * later version. See the COPYING file in the top-level directory.
18 #include "disas/disas.h"
21 #include "exec/cpu_ldst.h"
23 #include "exec/helper-proto.h"
24 #include "exec/helper-gen.h"
26 #include "trace-tcg.h"
29 /* internal defines */
30 typedef struct DisasContext
{
33 /* Nonzero if this instruction has been conditionally skipped. */
35 /* The label that will be jumped to when the instruction is skipped. */
37 struct TranslationBlock
*tb
;
38 int singlestep_enabled
;
39 #ifndef CONFIG_USER_ONLY
44 #ifndef CONFIG_USER_ONLY
45 #define IS_USER(s) (s->user)
50 /* These instructions trap after executing, so defer them until after the
51 conditional executions state has been updated. */
52 #define DISAS_SYSCALL 5
54 static TCGv_ptr cpu_env
;
55 static TCGv_i32 cpu_R
[32];
57 /* FIXME: These should be removed. */
58 static TCGv cpu_F0s
, cpu_F1s
;
59 static TCGv_i64 cpu_F0d
, cpu_F1d
;
61 #include "exec/gen-icount.h"
63 static const char *regnames
[] = {
64 "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
65 "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
66 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
67 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
69 /* initialize TCG globals. */
70 void uc32_translate_init(void)
74 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
76 for (i
= 0; i
< 32; i
++) {
77 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
78 offsetof(CPUUniCore32State
, regs
[i
]), regnames
[i
]);
84 /* Allocate a temporary variable. */
85 static TCGv_i32
new_tmp(void)
88 return tcg_temp_new_i32();
91 /* Release a temporary variable. */
92 static void dead_tmp(TCGv tmp
)
98 static inline TCGv
load_cpu_offset(int offset
)
100 TCGv tmp
= new_tmp();
101 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
105 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
107 static inline void store_cpu_offset(TCGv var
, int offset
)
109 tcg_gen_st_i32(var
, cpu_env
, offset
);
113 #define store_cpu_field(var, name) \
114 store_cpu_offset(var, offsetof(CPUUniCore32State, name))
116 /* Set a variable to the value of a CPU register. */
117 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
121 /* normaly, since we updated PC */
123 tcg_gen_movi_i32(var
, addr
);
125 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
129 /* Create a new temporary and set it to the value of a CPU register. */
130 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
132 TCGv tmp
= new_tmp();
133 load_reg_var(s
, tmp
, reg
);
137 /* Set a CPU register. The source must be a temporary and will be
139 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
142 tcg_gen_andi_i32(var
, var
, ~3);
143 s
->is_jmp
= DISAS_JUMP
;
145 tcg_gen_mov_i32(cpu_R
[reg
], var
);
149 /* Value extensions. */
150 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
151 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
152 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
153 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
155 #define UCOP_REG_M (((insn) >> 0) & 0x1f)
156 #define UCOP_REG_N (((insn) >> 19) & 0x1f)
157 #define UCOP_REG_D (((insn) >> 14) & 0x1f)
158 #define UCOP_REG_S (((insn) >> 9) & 0x1f)
159 #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
160 #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
161 #define UCOP_SH_OP (((insn) >> 6) & 0x03)
162 #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
163 #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
164 #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
165 #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
166 #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
167 #define UCOP_COND (((insn) >> 25) & 0x0f)
168 #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
169 #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
170 #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
171 #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
172 #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
174 #define UCOP_SET(i) ((insn) & (1 << (i)))
175 #define UCOP_SET_P UCOP_SET(28)
176 #define UCOP_SET_U UCOP_SET(27)
177 #define UCOP_SET_B UCOP_SET(26)
178 #define UCOP_SET_W UCOP_SET(25)
179 #define UCOP_SET_L UCOP_SET(24)
180 #define UCOP_SET_S UCOP_SET(24)
182 #define ILLEGAL cpu_abort(CPU(cpu), \
183 "Illegal UniCore32 instruction %x at line %d!", \
186 #ifndef CONFIG_USER_ONLY
187 static void disas_cp0_insn(CPUUniCore32State
*env
, DisasContext
*s
,
190 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
191 TCGv tmp
, tmp2
, tmp3
;
192 if ((insn
& 0xfe000000) == 0xe0000000) {
195 tcg_gen_movi_i32(tmp2
, UCOP_REG_N
);
196 tcg_gen_movi_i32(tmp3
, UCOP_IMM10
);
199 gen_helper_cp0_get(tmp
, cpu_env
, tmp2
, tmp3
);
200 store_reg(s
, UCOP_REG_D
, tmp
);
202 tmp
= load_reg(s
, UCOP_REG_D
);
203 gen_helper_cp0_set(cpu_env
, tmp
, tmp2
, tmp3
);
213 static void disas_ocd_insn(CPUUniCore32State
*env
, DisasContext
*s
,
216 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
219 if ((insn
& 0xff003fff) == 0xe1000400) {
221 * movc rd, pp.nn, #imm9
223 * nn: UCOP_REG_N (must be 0)
226 if (UCOP_REG_N
== 0) {
228 tcg_gen_movi_i32(tmp
, 0);
229 store_reg(s
, UCOP_REG_D
, tmp
);
235 if ((insn
& 0xff003fff) == 0xe0000401) {
237 * movc pp.nn, rn, #imm9
239 * nn: UCOP_REG_N (must be 1)
242 if (UCOP_REG_N
== 1) {
243 tmp
= load_reg(s
, UCOP_REG_D
);
244 gen_helper_cp1_putc(tmp
);
255 static inline void gen_set_asr(TCGv var
, uint32_t mask
)
257 TCGv tmp_mask
= tcg_const_i32(mask
);
258 gen_helper_asr_write(cpu_env
, var
, tmp_mask
);
259 tcg_temp_free_i32(tmp_mask
);
261 /* Set NZCV flags from the high 4 bits of var. */
262 #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
264 static void gen_exception(int excp
)
266 TCGv tmp
= new_tmp();
267 tcg_gen_movi_i32(tmp
, excp
);
268 gen_helper_exception(cpu_env
, tmp
);
272 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
274 /* Set CF to the top bit of var. */
275 static void gen_set_CF_bit31(TCGv var
)
277 TCGv tmp
= new_tmp();
278 tcg_gen_shri_i32(tmp
, var
, 31);
283 /* Set N and Z flags from var. */
284 static inline void gen_logic_CC(TCGv var
)
286 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUUniCore32State
, NF
));
287 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUUniCore32State
, ZF
));
290 /* dest = T0 + T1 + CF. */
291 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
294 tcg_gen_add_i32(dest
, t0
, t1
);
295 tmp
= load_cpu_field(CF
);
296 tcg_gen_add_i32(dest
, dest
, tmp
);
300 /* dest = T0 - T1 + CF - 1. */
301 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
304 tcg_gen_sub_i32(dest
, t0
, t1
);
305 tmp
= load_cpu_field(CF
);
306 tcg_gen_add_i32(dest
, dest
, tmp
);
307 tcg_gen_subi_i32(dest
, dest
, 1);
311 static void shifter_out_im(TCGv var
, int shift
)
313 TCGv tmp
= new_tmp();
315 tcg_gen_andi_i32(tmp
, var
, 1);
317 tcg_gen_shri_i32(tmp
, var
, shift
);
319 tcg_gen_andi_i32(tmp
, tmp
, 1);
326 /* Shift by immediate. Includes special handling for shift == 0. */
327 static inline void gen_uc32_shift_im(TCGv var
, int shiftop
, int shift
,
334 shifter_out_im(var
, 32 - shift
);
336 tcg_gen_shli_i32(var
, var
, shift
);
342 tcg_gen_shri_i32(var
, var
, 31);
345 tcg_gen_movi_i32(var
, 0);
348 shifter_out_im(var
, shift
- 1);
350 tcg_gen_shri_i32(var
, var
, shift
);
358 shifter_out_im(var
, shift
- 1);
363 tcg_gen_sari_i32(var
, var
, shift
);
365 case 3: /* ROR/RRX */
368 shifter_out_im(var
, shift
- 1);
370 tcg_gen_rotri_i32(var
, var
, shift
); break;
372 TCGv tmp
= load_cpu_field(CF
);
374 shifter_out_im(var
, 0);
376 tcg_gen_shri_i32(var
, var
, 1);
377 tcg_gen_shli_i32(tmp
, tmp
, 31);
378 tcg_gen_or_i32(var
, var
, tmp
);
384 static inline void gen_uc32_shift_reg(TCGv var
, int shiftop
,
385 TCGv shift
, int flags
)
390 gen_helper_shl_cc(var
, cpu_env
, var
, shift
);
393 gen_helper_shr_cc(var
, cpu_env
, var
, shift
);
396 gen_helper_sar_cc(var
, cpu_env
, var
, shift
);
399 gen_helper_ror_cc(var
, cpu_env
, var
, shift
);
405 gen_helper_shl(var
, var
, shift
);
408 gen_helper_shr(var
, var
, shift
);
411 gen_helper_sar(var
, var
, shift
);
414 tcg_gen_andi_i32(shift
, shift
, 0x1f);
415 tcg_gen_rotr_i32(var
, var
, shift
);
422 static void gen_test_cc(int cc
, TCGLabel
*label
)
430 tmp
= load_cpu_field(ZF
);
431 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
434 tmp
= load_cpu_field(ZF
);
435 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
438 tmp
= load_cpu_field(CF
);
439 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
442 tmp
= load_cpu_field(CF
);
443 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
446 tmp
= load_cpu_field(NF
);
447 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
450 tmp
= load_cpu_field(NF
);
451 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
454 tmp
= load_cpu_field(VF
);
455 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
458 tmp
= load_cpu_field(VF
);
459 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
461 case 8: /* hi: C && !Z */
462 inv
= gen_new_label();
463 tmp
= load_cpu_field(CF
);
464 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
466 tmp
= load_cpu_field(ZF
);
467 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
470 case 9: /* ls: !C || Z */
471 tmp
= load_cpu_field(CF
);
472 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
474 tmp
= load_cpu_field(ZF
);
475 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
477 case 10: /* ge: N == V -> N ^ V == 0 */
478 tmp
= load_cpu_field(VF
);
479 tmp2
= load_cpu_field(NF
);
480 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
482 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
484 case 11: /* lt: N != V -> N ^ V != 0 */
485 tmp
= load_cpu_field(VF
);
486 tmp2
= load_cpu_field(NF
);
487 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
489 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
491 case 12: /* gt: !Z && N == V */
492 inv
= gen_new_label();
493 tmp
= load_cpu_field(ZF
);
494 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
496 tmp
= load_cpu_field(VF
);
497 tmp2
= load_cpu_field(NF
);
498 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
500 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
503 case 13: /* le: Z || N != V */
504 tmp
= load_cpu_field(ZF
);
505 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
507 tmp
= load_cpu_field(VF
);
508 tmp2
= load_cpu_field(NF
);
509 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
511 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
514 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
520 static const uint8_t table_logic_cc
[16] = {
521 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
522 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
523 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
524 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
527 /* Set PC state from an immediate address. */
528 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
530 s
->is_jmp
= DISAS_UPDATE
;
531 tcg_gen_movi_i32(cpu_R
[31], addr
& ~3);
534 /* Set PC state from var. var is marked as dead. */
535 static inline void gen_bx(DisasContext
*s
, TCGv var
)
537 s
->is_jmp
= DISAS_UPDATE
;
538 tcg_gen_andi_i32(cpu_R
[31], var
, ~3);
542 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv var
)
544 store_reg(s
, reg
, var
);
547 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
549 TCGv tmp
= new_tmp();
550 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
554 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
556 TCGv tmp
= new_tmp();
557 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
561 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
563 TCGv tmp
= new_tmp();
564 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
568 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
570 TCGv tmp
= new_tmp();
571 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
575 static inline TCGv
gen_ld32(TCGv addr
, int index
)
577 TCGv tmp
= new_tmp();
578 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
582 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
584 tcg_gen_qemu_st8(val
, addr
, index
);
588 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
590 tcg_gen_qemu_st16(val
, addr
, index
);
594 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
596 tcg_gen_qemu_st32(val
, addr
, index
);
600 static inline void gen_set_pc_im(uint32_t val
)
602 tcg_gen_movi_i32(cpu_R
[31], val
);
605 /* Force a TB lookup after an instruction that changes the CPU state. */
606 static inline void gen_lookup_tb(DisasContext
*s
)
608 tcg_gen_movi_i32(cpu_R
[31], s
->pc
& ~1);
609 s
->is_jmp
= DISAS_UPDATE
;
612 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
625 tcg_gen_addi_i32(var
, var
, val
);
629 offset
= load_reg(s
, UCOP_REG_M
);
630 gen_uc32_shift_im(offset
, UCOP_SH_OP
, UCOP_SH_IM
, 0);
632 tcg_gen_sub_i32(var
, var
, offset
);
634 tcg_gen_add_i32(var
, var
, offset
);
640 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
648 val
= (insn
& 0x1f) | ((insn
>> 4) & 0x3e0);
653 tcg_gen_addi_i32(var
, var
, val
);
657 offset
= load_reg(s
, UCOP_REG_M
);
659 tcg_gen_sub_i32(var
, var
, offset
);
661 tcg_gen_add_i32(var
, var
, offset
);
667 static inline long ucf64_reg_offset(int reg
)
670 return offsetof(CPUUniCore32State
, ucf64
.regs
[reg
>> 1])
671 + offsetof(CPU_DoubleU
, l
.upper
);
673 return offsetof(CPUUniCore32State
, ucf64
.regs
[reg
>> 1])
674 + offsetof(CPU_DoubleU
, l
.lower
);
678 #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
679 #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
681 /* UniCore-F64 single load/store I_offset */
682 static void do_ucf64_ldst_i(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
684 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
689 addr
= load_reg(s
, UCOP_REG_N
);
690 if (!UCOP_SET_P
&& !UCOP_SET_W
) {
695 offset
= UCOP_IMM10
<< 2;
700 tcg_gen_addi_i32(addr
, addr
, offset
);
704 if (UCOP_SET_L
) { /* load */
705 tmp
= gen_ld32(addr
, IS_USER(s
));
706 ucf64_gen_st32(tmp
, UCOP_REG_D
);
708 tmp
= ucf64_gen_ld32(UCOP_REG_D
);
709 gen_st32(tmp
, addr
, IS_USER(s
));
713 offset
= UCOP_IMM10
<< 2;
718 tcg_gen_addi_i32(addr
, addr
, offset
);
722 store_reg(s
, UCOP_REG_N
, addr
);
728 /* UniCore-F64 load/store multiple words */
729 static void do_ucf64_ldst_m(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
731 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
737 if (UCOP_REG_D
!= 0) {
740 if (UCOP_REG_N
== 31) {
743 if ((insn
<< 24) == 0) {
747 addr
= load_reg(s
, UCOP_REG_N
);
750 for (i
= 0; i
< 8; i
++) {
757 if (UCOP_SET_P
) { /* pre increment */
758 tcg_gen_addi_i32(addr
, addr
, 4);
759 } /* unnecessary to do anything when post increment */
761 if (UCOP_SET_P
) { /* pre decrement */
762 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
763 } else { /* post decrement */
765 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
770 freg
= ((insn
>> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
772 for (i
= 0, j
= 0; i
< 8; i
++, freg
++) {
777 if (UCOP_SET_L
) { /* load */
778 tmp
= gen_ld32(addr
, IS_USER(s
));
779 ucf64_gen_st32(tmp
, freg
);
781 tmp
= ucf64_gen_ld32(freg
);
782 gen_st32(tmp
, addr
, IS_USER(s
));
786 /* unnecessary to add after the last transfer */
788 tcg_gen_addi_i32(addr
, addr
, 4);
792 if (UCOP_SET_W
) { /* write back */
794 if (!UCOP_SET_P
) { /* post increment */
795 tcg_gen_addi_i32(addr
, addr
, 4);
796 } /* unnecessary to do anything when pre increment */
801 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
805 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
808 store_reg(s
, UCOP_REG_N
, addr
);
814 /* UniCore-F64 mrc/mcr */
815 static void do_ucf64_trans(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
817 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
820 if ((insn
& 0xfe0003ff) == 0xe2000000) {
821 /* control register */
822 if ((UCOP_REG_N
!= UC32_UCF64_FPSCR
) || (UCOP_REG_D
== 31)) {
828 gen_helper_ucf64_get_fpscr(tmp
, cpu_env
);
829 store_reg(s
, UCOP_REG_D
, tmp
);
832 tmp
= load_reg(s
, UCOP_REG_D
);
833 gen_helper_ucf64_set_fpscr(cpu_env
, tmp
);
839 if ((insn
& 0xfe0003ff) == 0xe0000000) {
840 /* general register */
841 if (UCOP_REG_D
== 31) {
844 if (UCOP_SET(24)) { /* MFF */
845 tmp
= ucf64_gen_ld32(UCOP_REG_N
);
846 store_reg(s
, UCOP_REG_D
, tmp
);
848 tmp
= load_reg(s
, UCOP_REG_D
);
849 ucf64_gen_st32(tmp
, UCOP_REG_N
);
853 if ((insn
& 0xfb000000) == 0xe9000000) {
855 if (UCOP_REG_D
!= 31) {
858 if (UCOP_UCF64_COND
& 0x8) {
863 tcg_gen_movi_i32(tmp
, UCOP_UCF64_COND
);
865 tcg_gen_ld_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_N
));
866 tcg_gen_ld_i64(cpu_F1d
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
867 gen_helper_ucf64_cmpd(cpu_F0d
, cpu_F1d
, tmp
, cpu_env
);
869 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_N
));
870 tcg_gen_ld_i32(cpu_F1s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
871 gen_helper_ucf64_cmps(cpu_F0s
, cpu_F1s
, tmp
, cpu_env
);
879 /* UniCore-F64 convert instructions */
880 static void do_ucf64_fcvt(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
882 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
884 if (UCOP_UCF64_FMT
== 3) {
887 if (UCOP_REG_N
!= 0) {
890 switch (UCOP_UCF64_FUNC
) {
892 switch (UCOP_UCF64_FMT
) {
894 tcg_gen_ld_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
895 gen_helper_ucf64_df2sf(cpu_F0s
, cpu_F0d
, cpu_env
);
896 tcg_gen_st_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
899 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
900 gen_helper_ucf64_si2sf(cpu_F0s
, cpu_F0s
, cpu_env
);
901 tcg_gen_st_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
909 switch (UCOP_UCF64_FMT
) {
911 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
912 gen_helper_ucf64_sf2df(cpu_F0d
, cpu_F0s
, cpu_env
);
913 tcg_gen_st_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
916 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
917 gen_helper_ucf64_si2df(cpu_F0d
, cpu_F0s
, cpu_env
);
918 tcg_gen_st_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
926 switch (UCOP_UCF64_FMT
) {
928 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
929 gen_helper_ucf64_sf2si(cpu_F0s
, cpu_F0s
, cpu_env
);
930 tcg_gen_st_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
933 tcg_gen_ld_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
934 gen_helper_ucf64_df2si(cpu_F0s
, cpu_F0d
, cpu_env
);
935 tcg_gen_st_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
947 /* UniCore-F64 compare instructions */
948 static void do_ucf64_fcmp(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
950 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
955 if (UCOP_REG_D
!= 0) {
961 tcg_gen_ld_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_N
));
962 tcg_gen_ld_i64(cpu_F1d
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
963 /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
965 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_N
));
966 tcg_gen_ld_i32(cpu_F1s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
967 /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
971 #define gen_helper_ucf64_movs(x, y) do { } while (0)
972 #define gen_helper_ucf64_movd(x, y) do { } while (0)
974 #define UCF64_OP1(name) do { \
975 if (UCOP_REG_N != 0) { \
978 switch (UCOP_UCF64_FMT) { \
980 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
981 ucf64_reg_offset(UCOP_REG_M)); \
982 gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
983 tcg_gen_st_i32(cpu_F0s, cpu_env, \
984 ucf64_reg_offset(UCOP_REG_D)); \
987 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
988 ucf64_reg_offset(UCOP_REG_M)); \
989 gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
990 tcg_gen_st_i64(cpu_F0d, cpu_env, \
991 ucf64_reg_offset(UCOP_REG_D)); \
999 #define UCF64_OP2(name) do { \
1000 switch (UCOP_UCF64_FMT) { \
1002 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
1003 ucf64_reg_offset(UCOP_REG_N)); \
1004 tcg_gen_ld_i32(cpu_F1s, cpu_env, \
1005 ucf64_reg_offset(UCOP_REG_M)); \
1006 gen_helper_ucf64_##name##s(cpu_F0s, \
1007 cpu_F0s, cpu_F1s, cpu_env); \
1008 tcg_gen_st_i32(cpu_F0s, cpu_env, \
1009 ucf64_reg_offset(UCOP_REG_D)); \
1012 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
1013 ucf64_reg_offset(UCOP_REG_N)); \
1014 tcg_gen_ld_i64(cpu_F1d, cpu_env, \
1015 ucf64_reg_offset(UCOP_REG_M)); \
1016 gen_helper_ucf64_##name##d(cpu_F0d, \
1017 cpu_F0d, cpu_F1d, cpu_env); \
1018 tcg_gen_st_i64(cpu_F0d, cpu_env, \
1019 ucf64_reg_offset(UCOP_REG_D)); \
1027 /* UniCore-F64 data processing */
1028 static void do_ucf64_datap(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1030 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1032 if (UCOP_UCF64_FMT
== 3) {
1035 switch (UCOP_UCF64_FUNC
) {
1062 /* Disassemble an F64 instruction */
1063 static void disas_ucf64_insn(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1065 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1067 if (!UCOP_SET(29)) {
1069 do_ucf64_ldst_m(env
, s
, insn
);
1071 do_ucf64_ldst_i(env
, s
, insn
);
1075 switch ((insn
>> 26) & 0x3) {
1077 do_ucf64_datap(env
, s
, insn
);
1083 do_ucf64_fcvt(env
, s
, insn
);
1086 do_ucf64_fcmp(env
, s
, insn
);
1090 do_ucf64_trans(env
, s
, insn
);
1095 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
1097 TranslationBlock
*tb
;
1100 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
1102 gen_set_pc_im(dest
);
1103 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
1105 gen_set_pc_im(dest
);
1110 static inline void gen_jmp(DisasContext
*s
, uint32_t dest
)
1112 if (unlikely(s
->singlestep_enabled
)) {
1113 /* An indirect jump so that we still trigger the debug exception. */
1116 gen_goto_tb(s
, 0, dest
);
1117 s
->is_jmp
= DISAS_TB_JUMP
;
1121 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1122 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int bsr
, TCGv t0
)
1126 /* ??? This is also undefined in system mode. */
1131 tmp
= load_cpu_field(bsr
);
1132 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
1133 tcg_gen_andi_i32(t0
, t0
, mask
);
1134 tcg_gen_or_i32(tmp
, tmp
, t0
);
1135 store_cpu_field(tmp
, bsr
);
1137 gen_set_asr(t0
, mask
);
1144 /* Generate an old-style exception return. Marks pc as dead. */
1145 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
1148 store_reg(s
, 31, pc
);
1149 tmp
= load_cpu_field(bsr
);
1150 gen_set_asr(tmp
, 0xffffffff);
1152 s
->is_jmp
= DISAS_UPDATE
;
1155 static void disas_coproc_insn(CPUUniCore32State
*env
, DisasContext
*s
,
1158 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1160 switch (UCOP_CPNUM
) {
1161 #ifndef CONFIG_USER_ONLY
1163 disas_cp0_insn(env
, s
, insn
);
1166 disas_ocd_insn(env
, s
, insn
);
1170 disas_ucf64_insn(env
, s
, insn
);
1173 /* Unknown coprocessor. */
1174 cpu_abort(CPU(cpu
), "Unknown coprocessor!");
1178 /* data processing instructions */
1179 static void do_datap(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1181 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1186 if (UCOP_OPCODES
== 0x0f || UCOP_OPCODES
== 0x0d) {
1187 if (UCOP_SET(23)) { /* CMOV instructions */
1188 if ((UCOP_CMOV_COND
== 0xe) || (UCOP_CMOV_COND
== 0xf)) {
1191 /* if not always execute, we generate a conditional jump to
1193 s
->condlabel
= gen_new_label();
1194 gen_test_cc(UCOP_CMOV_COND
^ 1, s
->condlabel
);
1199 logic_cc
= table_logic_cc
[UCOP_OPCODES
] & (UCOP_SET_S
>> 24);
1203 /* immediate operand */
1206 val
= (val
>> UCOP_SH_IM
) | (val
<< (32 - UCOP_SH_IM
));
1209 tcg_gen_movi_i32(tmp2
, val
);
1210 if (logic_cc
&& UCOP_SH_IM
) {
1211 gen_set_CF_bit31(tmp2
);
1215 tmp2
= load_reg(s
, UCOP_REG_M
);
1217 tmp
= load_reg(s
, UCOP_REG_S
);
1218 gen_uc32_shift_reg(tmp2
, UCOP_SH_OP
, tmp
, logic_cc
);
1220 gen_uc32_shift_im(tmp2
, UCOP_SH_OP
, UCOP_SH_IM
, logic_cc
);
1224 if (UCOP_OPCODES
!= 0x0f && UCOP_OPCODES
!= 0x0d) {
1225 tmp
= load_reg(s
, UCOP_REG_N
);
1230 switch (UCOP_OPCODES
) {
1232 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1236 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1239 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
1243 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1246 if (UCOP_SET_S
&& UCOP_REG_D
== 31) {
1247 /* SUBS r31, ... is used for exception return. */
1251 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
1252 gen_exception_return(s
, tmp
);
1255 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
1257 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
1259 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1264 gen_helper_sub_cc(tmp
, cpu_env
, tmp2
, tmp
);
1266 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
1268 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1272 gen_helper_add_cc(tmp
, cpu_env
, tmp
, tmp2
);
1274 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
1276 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1280 gen_helper_adc_cc(tmp
, cpu_env
, tmp
, tmp2
);
1282 gen_add_carry(tmp
, tmp
, tmp2
);
1284 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1288 gen_helper_sbc_cc(tmp
, cpu_env
, tmp
, tmp2
);
1290 gen_sub_carry(tmp
, tmp
, tmp2
);
1292 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1296 gen_helper_sbc_cc(tmp
, cpu_env
, tmp2
, tmp
);
1298 gen_sub_carry(tmp
, tmp2
, tmp
);
1300 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1304 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1311 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
1318 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
1324 gen_helper_add_cc(tmp
, cpu_env
, tmp
, tmp2
);
1329 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1333 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1336 if (logic_cc
&& UCOP_REG_D
== 31) {
1337 /* MOVS r31, ... is used for exception return. */
1341 gen_exception_return(s
, tmp2
);
1346 store_reg_bx(s
, UCOP_REG_D
, tmp2
);
1350 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1354 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1358 tcg_gen_not_i32(tmp2
, tmp2
);
1362 store_reg_bx(s
, UCOP_REG_D
, tmp2
);
1365 if (UCOP_OPCODES
!= 0x0f && UCOP_OPCODES
!= 0x0d) {
1371 static void do_mult(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1373 TCGv tmp
, tmp2
, tmp3
, tmp4
;
1377 tmp
= load_reg(s
, UCOP_REG_M
);
1378 tmp2
= load_reg(s
, UCOP_REG_N
);
1380 tcg_gen_muls2_i32(tmp
, tmp2
, tmp
, tmp2
);
1382 tcg_gen_mulu2_i32(tmp
, tmp2
, tmp
, tmp2
);
1384 if (UCOP_SET(25)) { /* mult accumulate */
1385 tmp3
= load_reg(s
, UCOP_REG_LO
);
1386 tmp4
= load_reg(s
, UCOP_REG_HI
);
1387 tcg_gen_add2_i32(tmp
, tmp2
, tmp
, tmp2
, tmp3
, tmp4
);
1391 store_reg(s
, UCOP_REG_LO
, tmp
);
1392 store_reg(s
, UCOP_REG_HI
, tmp2
);
1395 tmp
= load_reg(s
, UCOP_REG_M
);
1396 tmp2
= load_reg(s
, UCOP_REG_N
);
1397 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
1401 tmp2
= load_reg(s
, UCOP_REG_S
);
1402 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
1408 store_reg(s
, UCOP_REG_D
, tmp
);
1412 /* miscellaneous instructions */
1413 static void do_misc(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1415 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1419 if ((insn
& 0xffffffe0) == 0x10ffc120) {
1420 /* Trivial implementation equivalent to bx. */
1421 tmp
= load_reg(s
, UCOP_REG_M
);
1426 if ((insn
& 0xfbffc000) == 0x30ffc000) {
1427 /* PSR = immediate */
1430 val
= (val
>> UCOP_SH_IM
) | (val
<< (32 - UCOP_SH_IM
));
1433 tcg_gen_movi_i32(tmp
, val
);
1434 if (gen_set_psr(s
, ~ASR_RESERVED
, UCOP_SET_B
, tmp
)) {
1440 if ((insn
& 0xfbffffe0) == 0x12ffc020) {
1441 /* PSR.flag = reg */
1442 tmp
= load_reg(s
, UCOP_REG_M
);
1443 if (gen_set_psr(s
, ASR_NZCV
, UCOP_SET_B
, tmp
)) {
1449 if ((insn
& 0xfbffffe0) == 0x10ffc020) {
1451 tmp
= load_reg(s
, UCOP_REG_M
);
1452 if (gen_set_psr(s
, ~ASR_RESERVED
, UCOP_SET_B
, tmp
)) {
1458 if ((insn
& 0xfbf83fff) == 0x10f80000) {
1464 tmp
= load_cpu_field(bsr
);
1467 gen_helper_asr_read(tmp
, cpu_env
);
1469 store_reg(s
, UCOP_REG_D
, tmp
);
1473 if ((insn
& 0xfbf83fe0) == 0x12f80120) {
1475 tmp
= load_reg(s
, UCOP_REG_M
);
1477 gen_helper_clo(tmp
, tmp
);
1479 gen_helper_clz(tmp
, tmp
);
1481 store_reg(s
, UCOP_REG_D
, tmp
);
1489 /* load/store I_offset and R_offset */
1490 static void do_ldst_ir(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1492 unsigned int mmu_idx
;
1496 tmp2
= load_reg(s
, UCOP_REG_N
);
1497 mmu_idx
= (IS_USER(s
) || (!UCOP_SET_P
&& UCOP_SET_W
));
1501 gen_add_data_offset(s
, insn
, tmp2
);
1507 tmp
= gen_ld8u(tmp2
, mmu_idx
);
1509 tmp
= gen_ld32(tmp2
, mmu_idx
);
1513 tmp
= load_reg(s
, UCOP_REG_D
);
1515 gen_st8(tmp
, tmp2
, mmu_idx
);
1517 gen_st32(tmp
, tmp2
, mmu_idx
);
1521 gen_add_data_offset(s
, insn
, tmp2
);
1522 store_reg(s
, UCOP_REG_N
, tmp2
);
1523 } else if (UCOP_SET_W
) {
1524 store_reg(s
, UCOP_REG_N
, tmp2
);
1529 /* Complete the load. */
1530 if (UCOP_REG_D
== 31) {
1533 store_reg(s
, UCOP_REG_D
, tmp
);
1538 /* SWP instruction */
1539 static void do_swap(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1541 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1546 if ((insn
& 0xff003fe0) != 0x40000120) {
1550 /* ??? This is not really atomic. However we know
1551 we never have multiple CPUs running in parallel,
1552 so it is good enough. */
1553 addr
= load_reg(s
, UCOP_REG_N
);
1554 tmp
= load_reg(s
, UCOP_REG_M
);
1556 tmp2
= gen_ld8u(addr
, IS_USER(s
));
1557 gen_st8(tmp
, addr
, IS_USER(s
));
1559 tmp2
= gen_ld32(addr
, IS_USER(s
));
1560 gen_st32(tmp
, addr
, IS_USER(s
));
1563 store_reg(s
, UCOP_REG_D
, tmp2
);
1566 /* load/store hw/sb */
1567 static void do_ldst_hwsb(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1569 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1573 if (UCOP_SH_OP
== 0) {
1574 do_swap(env
, s
, insn
);
1578 addr
= load_reg(s
, UCOP_REG_N
);
1580 gen_add_datah_offset(s
, insn
, addr
);
1583 if (UCOP_SET_L
) { /* load */
1584 switch (UCOP_SH_OP
) {
1586 tmp
= gen_ld16u(addr
, IS_USER(s
));
1589 tmp
= gen_ld8s(addr
, IS_USER(s
));
1591 default: /* see do_swap */
1593 tmp
= gen_ld16s(addr
, IS_USER(s
));
1596 } else { /* store */
1597 if (UCOP_SH_OP
!= 1) {
1600 tmp
= load_reg(s
, UCOP_REG_D
);
1601 gen_st16(tmp
, addr
, IS_USER(s
));
1603 /* Perform base writeback before the loaded value to
1604 ensure correct behavior with overlapping index registers. */
1606 gen_add_datah_offset(s
, insn
, addr
);
1607 store_reg(s
, UCOP_REG_N
, addr
);
1608 } else if (UCOP_SET_W
) {
1609 store_reg(s
, UCOP_REG_N
, addr
);
1614 /* Complete the load. */
1615 store_reg(s
, UCOP_REG_D
, tmp
);
1619 /* load/store multiple words */
1620 static void do_ldst_m(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1622 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1623 unsigned int val
, i
, mmu_idx
;
1624 int j
, n
, reg
, user
, loaded_base
;
1633 /* XXX: store correct base if write back */
1635 if (UCOP_SET_B
) { /* S bit in instruction table */
1637 ILLEGAL
; /* only usable in supervisor mode */
1639 if (UCOP_SET(18) == 0) { /* pc reg */
1644 mmu_idx
= (IS_USER(s
) || (!UCOP_SET_P
&& UCOP_SET_W
));
1645 addr
= load_reg(s
, UCOP_REG_N
);
1647 /* compute total size */
1649 TCGV_UNUSED(loaded_var
);
1651 for (i
= 0; i
< 6; i
++) {
1656 for (i
= 9; i
< 19; i
++) {
1661 /* XXX: test invalid n == 0 case ? */
1665 tcg_gen_addi_i32(addr
, addr
, 4);
1667 /* post increment */
1672 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
1674 /* post decrement */
1676 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
1682 reg
= UCOP_SET(6) ? 16 : 0;
1683 for (i
= 0; i
< 19; i
++, reg
++) {
1688 if (UCOP_SET_L
) { /* load */
1689 tmp
= gen_ld32(addr
, mmu_idx
);
1693 tmp2
= tcg_const_i32(reg
);
1694 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
1695 tcg_temp_free_i32(tmp2
);
1697 } else if (reg
== UCOP_REG_N
) {
1701 store_reg(s
, reg
, tmp
);
1703 } else { /* store */
1705 /* special case: r31 = PC + 4 */
1708 tcg_gen_movi_i32(tmp
, val
);
1711 tmp2
= tcg_const_i32(reg
);
1712 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
1713 tcg_temp_free_i32(tmp2
);
1715 tmp
= load_reg(s
, reg
);
1717 gen_st32(tmp
, addr
, mmu_idx
);
1720 /* no need to add after the last transfer */
1722 tcg_gen_addi_i32(addr
, addr
, 4);
1726 if (UCOP_SET_W
) { /* write back */
1731 /* post increment */
1732 tcg_gen_addi_i32(addr
, addr
, 4);
1738 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
1741 /* post decrement */
1742 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
1745 store_reg(s
, UCOP_REG_N
, addr
);
1750 store_reg(s
, UCOP_REG_N
, loaded_var
);
1752 if (UCOP_SET_B
&& !user
) {
1753 /* Restore ASR from BSR. */
1754 tmp
= load_cpu_field(bsr
);
1755 gen_set_asr(tmp
, 0xffffffff);
1757 s
->is_jmp
= DISAS_UPDATE
;
1761 /* branch (and link) */
1762 static void do_branch(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1764 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1769 if (UCOP_COND
== 0xf) {
1773 if (UCOP_COND
!= 0xe) {
1774 /* if not always execute, we generate a conditional jump to
1776 s
->condlabel
= gen_new_label();
1777 gen_test_cc(UCOP_COND
^ 1, s
->condlabel
);
1781 val
= (int32_t)s
->pc
;
1784 tcg_gen_movi_i32(tmp
, val
);
1785 store_reg(s
, 30, tmp
);
1787 offset
= (((int32_t)insn
<< 8) >> 8);
1788 val
+= (offset
<< 2); /* unicore is pc+4 */
1792 static void disas_uc32_insn(CPUUniCore32State
*env
, DisasContext
*s
)
1794 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1797 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1798 tcg_gen_debug_insn_start(s
->pc
);
1801 insn
= cpu_ldl_code(env
, s
->pc
);
1804 /* UniCore instructions class:
1805 * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1806 * AAA : see switch case
1807 * BBBB : opcodes or cond or PUBW
1812 switch (insn
>> 29) {
1814 if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1815 do_mult(env
, s
, insn
);
1820 do_misc(env
, s
, insn
);
1824 if (((UCOP_OPCODES
>> 2) == 2) && !UCOP_SET_S
) {
1825 do_misc(env
, s
, insn
);
1828 do_datap(env
, s
, insn
);
1832 if (UCOP_SET(8) && UCOP_SET(5)) {
1833 do_ldst_hwsb(env
, s
, insn
);
1836 if (UCOP_SET(8) || UCOP_SET(5)) {
1840 do_ldst_ir(env
, s
, insn
);
1845 ILLEGAL
; /* extended instructions */
1847 do_ldst_m(env
, s
, insn
);
1850 do_branch(env
, s
, insn
);
1854 disas_coproc_insn(env
, s
, insn
);
1857 if (!UCOP_SET(28)) {
1858 disas_coproc_insn(env
, s
, insn
);
1861 if ((insn
& 0xff000000) == 0xff000000) { /* syscall */
1862 gen_set_pc_im(s
->pc
);
1863 s
->is_jmp
= DISAS_SYSCALL
;
1870 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
1871 basic block 'tb'. If search_pc is TRUE, also generate PC
1872 information for each intermediate instruction. */
1873 static inline void gen_intermediate_code_internal(UniCore32CPU
*cpu
,
1874 TranslationBlock
*tb
, bool search_pc
)
1876 CPUState
*cs
= CPU(cpu
);
1877 CPUUniCore32State
*env
= &cpu
->env
;
1878 DisasContext dc1
, *dc
= &dc1
;
1881 target_ulong pc_start
;
1882 uint32_t next_page_start
;
1886 /* generate intermediate code */
1893 dc
->is_jmp
= DISAS_NEXT
;
1895 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1897 cpu_F0s
= tcg_temp_new_i32();
1898 cpu_F1s
= tcg_temp_new_i32();
1899 cpu_F0d
= tcg_temp_new_i64();
1900 cpu_F1d
= tcg_temp_new_i64();
1901 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1904 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1905 if (max_insns
== 0) {
1906 max_insns
= CF_COUNT_MASK
;
1909 #ifndef CONFIG_USER_ONLY
1910 if ((env
->uncached_asr
& ASR_M
) == ASR_MODE_USER
) {
1919 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
1920 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
1921 if (bp
->pc
== dc
->pc
) {
1922 gen_set_pc_im(dc
->pc
);
1923 gen_exception(EXCP_DEBUG
);
1924 dc
->is_jmp
= DISAS_JUMP
;
1925 /* Advance PC so that clearing the breakpoint will
1926 invalidate this TB. */
1927 dc
->pc
+= 2; /* FIXME */
1928 goto done_generating
;
1933 j
= tcg_op_buf_count();
1937 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
1940 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
1941 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
1942 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
1945 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1949 disas_uc32_insn(env
, dc
);
1952 fprintf(stderr
, "Internal resource leak before %08x\n", dc
->pc
);
1956 if (dc
->condjmp
&& !dc
->is_jmp
) {
1957 gen_set_label(dc
->condlabel
);
1960 /* Translation stops when a conditional branch is encountered.
1961 * Otherwise the subsequent code could get translated several times.
1962 * Also stop translation when a page boundary is reached. This
1963 * ensures prefetch aborts occur at the right place. */
1965 } while (!dc
->is_jmp
&& !tcg_op_buf_full() &&
1966 !cs
->singlestep_enabled
&&
1968 dc
->pc
< next_page_start
&&
1969 num_insns
< max_insns
);
1971 if (tb
->cflags
& CF_LAST_IO
) {
1973 /* FIXME: This can theoretically happen with self-modifying
1975 cpu_abort(cs
, "IO on conditional branch instruction");
1980 /* At this stage dc->condjmp will only be set when the skipped
1981 instruction was a conditional branch or trap, and the PC has
1982 already been written. */
1983 if (unlikely(cs
->singlestep_enabled
)) {
1984 /* Make sure the pc is updated, and raise a debug exception. */
1986 if (dc
->is_jmp
== DISAS_SYSCALL
) {
1987 gen_exception(UC32_EXCP_PRIV
);
1989 gen_exception(EXCP_DEBUG
);
1991 gen_set_label(dc
->condlabel
);
1993 if (dc
->condjmp
|| !dc
->is_jmp
) {
1994 gen_set_pc_im(dc
->pc
);
1997 if (dc
->is_jmp
== DISAS_SYSCALL
&& !dc
->condjmp
) {
1998 gen_exception(UC32_EXCP_PRIV
);
2000 gen_exception(EXCP_DEBUG
);
2003 /* While branches must always occur at the end of an IT block,
2004 there are a few other things that can cause us to terminate
2005 the TB in the middel of an IT block:
2006 - Exception generating instructions (bkpt, swi, undefined).
2008 - Hardware watchpoints.
2009 Hardware breakpoints have already been handled and skip this code.
2011 switch (dc
->is_jmp
) {
2013 gen_goto_tb(dc
, 1, dc
->pc
);
2018 /* indicate that the hash table must be used to find the next TB */
2022 /* nothing more to generate */
2025 gen_exception(UC32_EXCP_PRIV
);
2029 gen_set_label(dc
->condlabel
);
2030 gen_goto_tb(dc
, 1, dc
->pc
);
2036 gen_tb_end(tb
, num_insns
);
2039 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2040 qemu_log("----------------\n");
2041 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2042 log_target_disas(env
, pc_start
, dc
->pc
- pc_start
, 0);
2047 j
= tcg_op_buf_count();
2050 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2053 tb
->size
= dc
->pc
- pc_start
;
2054 tb
->icount
= num_insns
;
2058 void gen_intermediate_code(CPUUniCore32State
*env
, TranslationBlock
*tb
)
2060 gen_intermediate_code_internal(uc32_env_get_cpu(env
), tb
, false);
2063 void gen_intermediate_code_pc(CPUUniCore32State
*env
, TranslationBlock
*tb
)
2065 gen_intermediate_code_internal(uc32_env_get_cpu(env
), tb
, true);
2068 static const char *cpu_mode_names
[16] = {
2069 "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2070 "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2073 #undef UCF64_DUMP_STATE
2074 #ifdef UCF64_DUMP_STATE
2075 static void cpu_dump_state_ucf64(CPUUniCore32State
*env
, FILE *f
,
2076 fprintf_function cpu_fprintf
, int flags
)
2084 /* ??? This assumes float64 and double have the same layout.
2085 Oh well, it's only debug dumps. */
2091 for (i
= 0; i
< 16; i
++) {
2092 d
.d
= env
->ucf64
.regs
[i
];
2096 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g)",
2097 i
* 2, (int)s0
.i
, s0
.s
,
2098 i
* 2 + 1, (int)s1
.i
, s1
.s
);
2099 cpu_fprintf(f
, " d%02d=%" PRIx64
"(%8g)\n",
2100 i
, (uint64_t)d0
.f64
, d0
.d
);
2102 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->ucf64
.xregs
[UC32_UCF64_FPSCR
]);
2105 #define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0)
2108 void uc32_cpu_dump_state(CPUState
*cs
, FILE *f
,
2109 fprintf_function cpu_fprintf
, int flags
)
2111 UniCore32CPU
*cpu
= UNICORE32_CPU(cs
);
2112 CPUUniCore32State
*env
= &cpu
->env
;
2116 for (i
= 0; i
< 32; i
++) {
2117 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
2119 cpu_fprintf(f
, "\n");
2121 cpu_fprintf(f
, " ");
2124 psr
= cpu_asr_read(env
);
2125 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %s\n",
2127 psr
& (1 << 31) ? 'N' : '-',
2128 psr
& (1 << 30) ? 'Z' : '-',
2129 psr
& (1 << 29) ? 'C' : '-',
2130 psr
& (1 << 28) ? 'V' : '-',
2131 cpu_mode_names
[psr
& 0xf]);
2133 cpu_dump_state_ucf64(env
, f
, cpu_fprintf
, flags
);
2136 void restore_state_to_opc(CPUUniCore32State
*env
, TranslationBlock
*tb
, int pc_pos
)
2138 env
->regs
[31] = tcg_ctx
.gen_opc_pc
[pc_pos
];