2 * UniCore32 translation
4 * Copyright (C) 2010-2012 Guan Xuetao
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation, or (at your option) any
9 * later version. See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
14 #include "disas/disas.h"
15 #include "exec/exec-all.h"
18 #include "exec/cpu_ldst.h"
19 #include "exec/translator.h"
20 #include "qemu/qemu-print.h"
22 #include "exec/helper-proto.h"
23 #include "exec/helper-gen.h"
25 #include "trace-tcg.h"
29 /* internal defines */
30 typedef struct DisasContext
{
33 /* Nonzero if this instruction has been conditionally skipped. */
35 /* The label that will be jumped to when the instruction is skipped. */
37 struct TranslationBlock
*tb
;
38 int singlestep_enabled
;
39 #ifndef CONFIG_USER_ONLY
44 #ifndef CONFIG_USER_ONLY
45 #define IS_USER(s) (s->user)
50 /* is_jmp field values */
51 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54 /* These instructions trap after executing, so defer them until after the
55 conditional executions state has been updated. */
56 #define DISAS_SYSCALL DISAS_TARGET_3
58 static TCGv_i32 cpu_R
[32];
60 /* FIXME: These should be removed. */
61 static TCGv cpu_F0s
, cpu_F1s
;
62 static TCGv_i64 cpu_F0d
, cpu_F1d
;
64 #include "exec/gen-icount.h"
66 static const char *regnames
[] = {
67 "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
68 "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
69 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
70 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
72 /* initialize TCG globals. */
73 void uc32_translate_init(void)
77 for (i
= 0; i
< 32; i
++) {
78 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
79 offsetof(CPUUniCore32State
, regs
[i
]), regnames
[i
]);
85 /* Allocate a temporary variable. */
86 static TCGv_i32
new_tmp(void)
89 return tcg_temp_new_i32();
92 /* Release a temporary variable. */
93 static void dead_tmp(TCGv tmp
)
99 static inline TCGv
load_cpu_offset(int offset
)
101 TCGv tmp
= new_tmp();
102 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
106 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
108 static inline void store_cpu_offset(TCGv var
, int offset
)
110 tcg_gen_st_i32(var
, cpu_env
, offset
);
114 #define store_cpu_field(var, name) \
115 store_cpu_offset(var, offsetof(CPUUniCore32State, name))
117 /* Set a variable to the value of a CPU register. */
118 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
122 /* normaly, since we updated PC */
124 tcg_gen_movi_i32(var
, addr
);
126 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
130 /* Create a new temporary and set it to the value of a CPU register. */
131 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
133 TCGv tmp
= new_tmp();
134 load_reg_var(s
, tmp
, reg
);
138 /* Set a CPU register. The source must be a temporary and will be
140 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
143 tcg_gen_andi_i32(var
, var
, ~3);
144 s
->is_jmp
= DISAS_JUMP
;
146 tcg_gen_mov_i32(cpu_R
[reg
], var
);
150 /* Value extensions. */
151 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
152 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
153 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
154 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
156 #define UCOP_REG_M (((insn) >> 0) & 0x1f)
157 #define UCOP_REG_N (((insn) >> 19) & 0x1f)
158 #define UCOP_REG_D (((insn) >> 14) & 0x1f)
159 #define UCOP_REG_S (((insn) >> 9) & 0x1f)
160 #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
161 #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
162 #define UCOP_SH_OP (((insn) >> 6) & 0x03)
163 #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
164 #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
165 #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
166 #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
167 #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
168 #define UCOP_COND (((insn) >> 25) & 0x0f)
169 #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
170 #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
171 #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
172 #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
173 #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
175 #define UCOP_SET(i) ((insn) & (1 << (i)))
176 #define UCOP_SET_P UCOP_SET(28)
177 #define UCOP_SET_U UCOP_SET(27)
178 #define UCOP_SET_B UCOP_SET(26)
179 #define UCOP_SET_W UCOP_SET(25)
180 #define UCOP_SET_L UCOP_SET(24)
181 #define UCOP_SET_S UCOP_SET(24)
183 #define ILLEGAL cpu_abort(CPU(cpu), \
184 "Illegal UniCore32 instruction %x at line %d!", \
187 #ifndef CONFIG_USER_ONLY
188 static void disas_cp0_insn(CPUUniCore32State
*env
, DisasContext
*s
,
191 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
192 TCGv tmp
, tmp2
, tmp3
;
193 if ((insn
& 0xfe000000) == 0xe0000000) {
196 tcg_gen_movi_i32(tmp2
, UCOP_REG_N
);
197 tcg_gen_movi_i32(tmp3
, UCOP_IMM10
);
200 gen_helper_cp0_get(tmp
, cpu_env
, tmp2
, tmp3
);
201 store_reg(s
, UCOP_REG_D
, tmp
);
203 tmp
= load_reg(s
, UCOP_REG_D
);
204 gen_helper_cp0_set(cpu_env
, tmp
, tmp2
, tmp3
);
214 static void disas_ocd_insn(CPUUniCore32State
*env
, DisasContext
*s
,
217 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
220 if ((insn
& 0xff003fff) == 0xe1000400) {
222 * movc rd, pp.nn, #imm9
224 * nn: UCOP_REG_N (must be 0)
227 if (UCOP_REG_N
== 0) {
229 tcg_gen_movi_i32(tmp
, 0);
230 store_reg(s
, UCOP_REG_D
, tmp
);
236 if ((insn
& 0xff003fff) == 0xe0000401) {
238 * movc pp.nn, rn, #imm9
240 * nn: UCOP_REG_N (must be 1)
243 if (UCOP_REG_N
== 1) {
244 tmp
= load_reg(s
, UCOP_REG_D
);
245 gen_helper_cp1_putc(tmp
);
256 static inline void gen_set_asr(TCGv var
, uint32_t mask
)
258 TCGv tmp_mask
= tcg_const_i32(mask
);
259 gen_helper_asr_write(cpu_env
, var
, tmp_mask
);
260 tcg_temp_free_i32(tmp_mask
);
262 /* Set NZCV flags from the high 4 bits of var. */
263 #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
265 static void gen_exception(int excp
)
267 TCGv tmp
= new_tmp();
268 tcg_gen_movi_i32(tmp
, excp
);
269 gen_helper_exception(cpu_env
, tmp
);
273 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
275 /* Set CF to the top bit of var. */
276 static void gen_set_CF_bit31(TCGv var
)
278 TCGv tmp
= new_tmp();
279 tcg_gen_shri_i32(tmp
, var
, 31);
284 /* Set N and Z flags from var. */
285 static inline void gen_logic_CC(TCGv var
)
287 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUUniCore32State
, NF
));
288 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUUniCore32State
, ZF
));
291 /* dest = T0 + T1 + CF. */
292 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
295 tcg_gen_add_i32(dest
, t0
, t1
);
296 tmp
= load_cpu_field(CF
);
297 tcg_gen_add_i32(dest
, dest
, tmp
);
301 /* dest = T0 - T1 + CF - 1. */
302 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
305 tcg_gen_sub_i32(dest
, t0
, t1
);
306 tmp
= load_cpu_field(CF
);
307 tcg_gen_add_i32(dest
, dest
, tmp
);
308 tcg_gen_subi_i32(dest
, dest
, 1);
312 static void shifter_out_im(TCGv var
, int shift
)
314 TCGv tmp
= new_tmp();
316 tcg_gen_andi_i32(tmp
, var
, 1);
318 tcg_gen_shri_i32(tmp
, var
, shift
);
320 tcg_gen_andi_i32(tmp
, tmp
, 1);
327 /* Shift by immediate. Includes special handling for shift == 0. */
328 static inline void gen_uc32_shift_im(TCGv var
, int shiftop
, int shift
,
335 shifter_out_im(var
, 32 - shift
);
337 tcg_gen_shli_i32(var
, var
, shift
);
343 tcg_gen_shri_i32(var
, var
, 31);
346 tcg_gen_movi_i32(var
, 0);
349 shifter_out_im(var
, shift
- 1);
351 tcg_gen_shri_i32(var
, var
, shift
);
359 shifter_out_im(var
, shift
- 1);
364 tcg_gen_sari_i32(var
, var
, shift
);
366 case 3: /* ROR/RRX */
369 shifter_out_im(var
, shift
- 1);
371 tcg_gen_rotri_i32(var
, var
, shift
); break;
373 TCGv tmp
= load_cpu_field(CF
);
375 shifter_out_im(var
, 0);
377 tcg_gen_shri_i32(var
, var
, 1);
378 tcg_gen_shli_i32(tmp
, tmp
, 31);
379 tcg_gen_or_i32(var
, var
, tmp
);
385 static inline void gen_uc32_shift_reg(TCGv var
, int shiftop
,
386 TCGv shift
, int flags
)
391 gen_helper_shl_cc(var
, cpu_env
, var
, shift
);
394 gen_helper_shr_cc(var
, cpu_env
, var
, shift
);
397 gen_helper_sar_cc(var
, cpu_env
, var
, shift
);
400 gen_helper_ror_cc(var
, cpu_env
, var
, shift
);
406 gen_helper_shl(var
, var
, shift
);
409 gen_helper_shr(var
, var
, shift
);
412 gen_helper_sar(var
, var
, shift
);
415 tcg_gen_andi_i32(shift
, shift
, 0x1f);
416 tcg_gen_rotr_i32(var
, var
, shift
);
423 static void gen_test_cc(int cc
, TCGLabel
*label
)
431 tmp
= load_cpu_field(ZF
);
432 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
435 tmp
= load_cpu_field(ZF
);
436 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
439 tmp
= load_cpu_field(CF
);
440 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
443 tmp
= load_cpu_field(CF
);
444 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
447 tmp
= load_cpu_field(NF
);
448 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
451 tmp
= load_cpu_field(NF
);
452 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
455 tmp
= load_cpu_field(VF
);
456 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
459 tmp
= load_cpu_field(VF
);
460 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
462 case 8: /* hi: C && !Z */
463 inv
= gen_new_label();
464 tmp
= load_cpu_field(CF
);
465 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
467 tmp
= load_cpu_field(ZF
);
468 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
471 case 9: /* ls: !C || Z */
472 tmp
= load_cpu_field(CF
);
473 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
475 tmp
= load_cpu_field(ZF
);
476 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
478 case 10: /* ge: N == V -> N ^ V == 0 */
479 tmp
= load_cpu_field(VF
);
480 tmp2
= load_cpu_field(NF
);
481 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
483 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
485 case 11: /* lt: N != V -> N ^ V != 0 */
486 tmp
= load_cpu_field(VF
);
487 tmp2
= load_cpu_field(NF
);
488 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
490 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
492 case 12: /* gt: !Z && N == V */
493 inv
= gen_new_label();
494 tmp
= load_cpu_field(ZF
);
495 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
497 tmp
= load_cpu_field(VF
);
498 tmp2
= load_cpu_field(NF
);
499 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
501 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
504 case 13: /* le: Z || N != V */
505 tmp
= load_cpu_field(ZF
);
506 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
508 tmp
= load_cpu_field(VF
);
509 tmp2
= load_cpu_field(NF
);
510 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
512 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
515 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
521 static const uint8_t table_logic_cc
[16] = {
522 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
523 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
524 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
525 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
528 /* Set PC state from an immediate address. */
529 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
531 s
->is_jmp
= DISAS_UPDATE
;
532 tcg_gen_movi_i32(cpu_R
[31], addr
& ~3);
535 /* Set PC state from var. var is marked as dead. */
536 static inline void gen_bx(DisasContext
*s
, TCGv var
)
538 s
->is_jmp
= DISAS_UPDATE
;
539 tcg_gen_andi_i32(cpu_R
[31], var
, ~3);
543 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv var
)
545 store_reg(s
, reg
, var
);
548 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
550 TCGv tmp
= new_tmp();
551 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
555 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
557 TCGv tmp
= new_tmp();
558 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
562 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
564 TCGv tmp
= new_tmp();
565 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
569 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
571 TCGv tmp
= new_tmp();
572 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
576 static inline TCGv
gen_ld32(TCGv addr
, int index
)
578 TCGv tmp
= new_tmp();
579 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
583 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
585 tcg_gen_qemu_st8(val
, addr
, index
);
589 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
591 tcg_gen_qemu_st16(val
, addr
, index
);
595 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
597 tcg_gen_qemu_st32(val
, addr
, index
);
601 static inline void gen_set_pc_im(uint32_t val
)
603 tcg_gen_movi_i32(cpu_R
[31], val
);
606 /* Force a TB lookup after an instruction that changes the CPU state. */
607 static inline void gen_lookup_tb(DisasContext
*s
)
609 tcg_gen_movi_i32(cpu_R
[31], s
->pc
& ~1);
610 s
->is_jmp
= DISAS_UPDATE
;
613 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
626 tcg_gen_addi_i32(var
, var
, val
);
630 offset
= load_reg(s
, UCOP_REG_M
);
631 gen_uc32_shift_im(offset
, UCOP_SH_OP
, UCOP_SH_IM
, 0);
633 tcg_gen_sub_i32(var
, var
, offset
);
635 tcg_gen_add_i32(var
, var
, offset
);
641 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
649 val
= (insn
& 0x1f) | ((insn
>> 4) & 0x3e0);
654 tcg_gen_addi_i32(var
, var
, val
);
658 offset
= load_reg(s
, UCOP_REG_M
);
660 tcg_gen_sub_i32(var
, var
, offset
);
662 tcg_gen_add_i32(var
, var
, offset
);
668 static inline long ucf64_reg_offset(int reg
)
671 return offsetof(CPUUniCore32State
, ucf64
.regs
[reg
>> 1])
672 + offsetof(CPU_DoubleU
, l
.upper
);
674 return offsetof(CPUUniCore32State
, ucf64
.regs
[reg
>> 1])
675 + offsetof(CPU_DoubleU
, l
.lower
);
679 #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
680 #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
682 /* UniCore-F64 single load/store I_offset */
683 static void do_ucf64_ldst_i(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
685 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
690 addr
= load_reg(s
, UCOP_REG_N
);
691 if (!UCOP_SET_P
&& !UCOP_SET_W
) {
696 offset
= UCOP_IMM10
<< 2;
701 tcg_gen_addi_i32(addr
, addr
, offset
);
705 if (UCOP_SET_L
) { /* load */
706 tmp
= gen_ld32(addr
, IS_USER(s
));
707 ucf64_gen_st32(tmp
, UCOP_REG_D
);
709 tmp
= ucf64_gen_ld32(UCOP_REG_D
);
710 gen_st32(tmp
, addr
, IS_USER(s
));
714 offset
= UCOP_IMM10
<< 2;
719 tcg_gen_addi_i32(addr
, addr
, offset
);
723 store_reg(s
, UCOP_REG_N
, addr
);
729 /* UniCore-F64 load/store multiple words */
730 static void do_ucf64_ldst_m(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
732 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
738 if (UCOP_REG_D
!= 0) {
741 if (UCOP_REG_N
== 31) {
744 if ((insn
<< 24) == 0) {
748 addr
= load_reg(s
, UCOP_REG_N
);
751 for (i
= 0; i
< 8; i
++) {
758 if (UCOP_SET_P
) { /* pre increment */
759 tcg_gen_addi_i32(addr
, addr
, 4);
760 } /* unnecessary to do anything when post increment */
762 if (UCOP_SET_P
) { /* pre decrement */
763 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
764 } else { /* post decrement */
766 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
771 freg
= ((insn
>> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
773 for (i
= 0, j
= 0; i
< 8; i
++, freg
++) {
778 if (UCOP_SET_L
) { /* load */
779 tmp
= gen_ld32(addr
, IS_USER(s
));
780 ucf64_gen_st32(tmp
, freg
);
782 tmp
= ucf64_gen_ld32(freg
);
783 gen_st32(tmp
, addr
, IS_USER(s
));
787 /* unnecessary to add after the last transfer */
789 tcg_gen_addi_i32(addr
, addr
, 4);
793 if (UCOP_SET_W
) { /* write back */
795 if (!UCOP_SET_P
) { /* post increment */
796 tcg_gen_addi_i32(addr
, addr
, 4);
797 } /* unnecessary to do anything when pre increment */
802 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
806 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
809 store_reg(s
, UCOP_REG_N
, addr
);
815 /* UniCore-F64 mrc/mcr */
816 static void do_ucf64_trans(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
818 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
821 if ((insn
& 0xfe0003ff) == 0xe2000000) {
822 /* control register */
823 if ((UCOP_REG_N
!= UC32_UCF64_FPSCR
) || (UCOP_REG_D
== 31)) {
829 gen_helper_ucf64_get_fpscr(tmp
, cpu_env
);
830 store_reg(s
, UCOP_REG_D
, tmp
);
833 tmp
= load_reg(s
, UCOP_REG_D
);
834 gen_helper_ucf64_set_fpscr(cpu_env
, tmp
);
840 if ((insn
& 0xfe0003ff) == 0xe0000000) {
841 /* general register */
842 if (UCOP_REG_D
== 31) {
845 if (UCOP_SET(24)) { /* MFF */
846 tmp
= ucf64_gen_ld32(UCOP_REG_N
);
847 store_reg(s
, UCOP_REG_D
, tmp
);
849 tmp
= load_reg(s
, UCOP_REG_D
);
850 ucf64_gen_st32(tmp
, UCOP_REG_N
);
854 if ((insn
& 0xfb000000) == 0xe9000000) {
856 if (UCOP_REG_D
!= 31) {
859 if (UCOP_UCF64_COND
& 0x8) {
864 tcg_gen_movi_i32(tmp
, UCOP_UCF64_COND
);
866 tcg_gen_ld_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_N
));
867 tcg_gen_ld_i64(cpu_F1d
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
868 gen_helper_ucf64_cmpd(cpu_F0d
, cpu_F1d
, tmp
, cpu_env
);
870 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_N
));
871 tcg_gen_ld_i32(cpu_F1s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
872 gen_helper_ucf64_cmps(cpu_F0s
, cpu_F1s
, tmp
, cpu_env
);
880 /* UniCore-F64 convert instructions */
881 static void do_ucf64_fcvt(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
883 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
885 if (UCOP_UCF64_FMT
== 3) {
888 if (UCOP_REG_N
!= 0) {
891 switch (UCOP_UCF64_FUNC
) {
893 switch (UCOP_UCF64_FMT
) {
895 tcg_gen_ld_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
896 gen_helper_ucf64_df2sf(cpu_F0s
, cpu_F0d
, cpu_env
);
897 tcg_gen_st_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
900 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
901 gen_helper_ucf64_si2sf(cpu_F0s
, cpu_F0s
, cpu_env
);
902 tcg_gen_st_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
910 switch (UCOP_UCF64_FMT
) {
912 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
913 gen_helper_ucf64_sf2df(cpu_F0d
, cpu_F0s
, cpu_env
);
914 tcg_gen_st_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
917 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
918 gen_helper_ucf64_si2df(cpu_F0d
, cpu_F0s
, cpu_env
);
919 tcg_gen_st_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
927 switch (UCOP_UCF64_FMT
) {
929 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
930 gen_helper_ucf64_sf2si(cpu_F0s
, cpu_F0s
, cpu_env
);
931 tcg_gen_st_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
934 tcg_gen_ld_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
935 gen_helper_ucf64_df2si(cpu_F0s
, cpu_F0d
, cpu_env
);
936 tcg_gen_st_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
948 /* UniCore-F64 compare instructions */
949 static void do_ucf64_fcmp(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
951 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
956 if (UCOP_REG_D
!= 0) {
962 tcg_gen_ld_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_N
));
963 tcg_gen_ld_i64(cpu_F1d
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
964 /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
966 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_N
));
967 tcg_gen_ld_i32(cpu_F1s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
968 /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
972 #define gen_helper_ucf64_movs(x, y) do { } while (0)
973 #define gen_helper_ucf64_movd(x, y) do { } while (0)
975 #define UCF64_OP1(name) do { \
976 if (UCOP_REG_N != 0) { \
979 switch (UCOP_UCF64_FMT) { \
981 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
982 ucf64_reg_offset(UCOP_REG_M)); \
983 gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
984 tcg_gen_st_i32(cpu_F0s, cpu_env, \
985 ucf64_reg_offset(UCOP_REG_D)); \
988 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
989 ucf64_reg_offset(UCOP_REG_M)); \
990 gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
991 tcg_gen_st_i64(cpu_F0d, cpu_env, \
992 ucf64_reg_offset(UCOP_REG_D)); \
1000 #define UCF64_OP2(name) do { \
1001 switch (UCOP_UCF64_FMT) { \
1003 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
1004 ucf64_reg_offset(UCOP_REG_N)); \
1005 tcg_gen_ld_i32(cpu_F1s, cpu_env, \
1006 ucf64_reg_offset(UCOP_REG_M)); \
1007 gen_helper_ucf64_##name##s(cpu_F0s, \
1008 cpu_F0s, cpu_F1s, cpu_env); \
1009 tcg_gen_st_i32(cpu_F0s, cpu_env, \
1010 ucf64_reg_offset(UCOP_REG_D)); \
1013 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
1014 ucf64_reg_offset(UCOP_REG_N)); \
1015 tcg_gen_ld_i64(cpu_F1d, cpu_env, \
1016 ucf64_reg_offset(UCOP_REG_M)); \
1017 gen_helper_ucf64_##name##d(cpu_F0d, \
1018 cpu_F0d, cpu_F1d, cpu_env); \
1019 tcg_gen_st_i64(cpu_F0d, cpu_env, \
1020 ucf64_reg_offset(UCOP_REG_D)); \
1028 /* UniCore-F64 data processing */
1029 static void do_ucf64_datap(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1031 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1033 if (UCOP_UCF64_FMT
== 3) {
1036 switch (UCOP_UCF64_FUNC
) {
1063 /* Disassemble an F64 instruction */
1064 static void disas_ucf64_insn(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1066 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1068 if (!UCOP_SET(29)) {
1070 do_ucf64_ldst_m(env
, s
, insn
);
1072 do_ucf64_ldst_i(env
, s
, insn
);
1076 switch ((insn
>> 26) & 0x3) {
1078 do_ucf64_datap(env
, s
, insn
);
1084 do_ucf64_fcvt(env
, s
, insn
);
1087 do_ucf64_fcmp(env
, s
, insn
);
1091 do_ucf64_trans(env
, s
, insn
);
1096 static inline bool use_goto_tb(DisasContext
*s
, uint32_t dest
)
1098 #ifndef CONFIG_USER_ONLY
1099 return (s
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
1105 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
1107 if (use_goto_tb(s
, dest
)) {
1109 gen_set_pc_im(dest
);
1110 tcg_gen_exit_tb(s
->tb
, n
);
1112 gen_set_pc_im(dest
);
1113 tcg_gen_exit_tb(NULL
, 0);
1117 static inline void gen_jmp(DisasContext
*s
, uint32_t dest
)
1119 if (unlikely(s
->singlestep_enabled
)) {
1120 /* An indirect jump so that we still trigger the debug exception. */
1123 gen_goto_tb(s
, 0, dest
);
1124 s
->is_jmp
= DISAS_TB_JUMP
;
1128 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1129 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int bsr
, TCGv t0
)
1133 /* ??? This is also undefined in system mode. */
1138 tmp
= load_cpu_field(bsr
);
1139 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
1140 tcg_gen_andi_i32(t0
, t0
, mask
);
1141 tcg_gen_or_i32(tmp
, tmp
, t0
);
1142 store_cpu_field(tmp
, bsr
);
1144 gen_set_asr(t0
, mask
);
1151 /* Generate an old-style exception return. Marks pc as dead. */
1152 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
1155 store_reg(s
, 31, pc
);
1156 tmp
= load_cpu_field(bsr
);
1157 gen_set_asr(tmp
, 0xffffffff);
1159 s
->is_jmp
= DISAS_UPDATE
;
1162 static void disas_coproc_insn(CPUUniCore32State
*env
, DisasContext
*s
,
1165 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1167 switch (UCOP_CPNUM
) {
1168 #ifndef CONFIG_USER_ONLY
1170 disas_cp0_insn(env
, s
, insn
);
1173 disas_ocd_insn(env
, s
, insn
);
1177 disas_ucf64_insn(env
, s
, insn
);
1180 /* Unknown coprocessor. */
1181 cpu_abort(CPU(cpu
), "Unknown coprocessor!");
1185 /* data processing instructions */
1186 static void do_datap(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1188 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1193 if (UCOP_OPCODES
== 0x0f || UCOP_OPCODES
== 0x0d) {
1194 if (UCOP_SET(23)) { /* CMOV instructions */
1195 if ((UCOP_CMOV_COND
== 0xe) || (UCOP_CMOV_COND
== 0xf)) {
1198 /* if not always execute, we generate a conditional jump to
1200 s
->condlabel
= gen_new_label();
1201 gen_test_cc(UCOP_CMOV_COND
^ 1, s
->condlabel
);
1206 logic_cc
= table_logic_cc
[UCOP_OPCODES
] & (UCOP_SET_S
>> 24);
1210 /* immediate operand */
1213 val
= (val
>> UCOP_SH_IM
) | (val
<< (32 - UCOP_SH_IM
));
1216 tcg_gen_movi_i32(tmp2
, val
);
1217 if (logic_cc
&& UCOP_SH_IM
) {
1218 gen_set_CF_bit31(tmp2
);
1222 tmp2
= load_reg(s
, UCOP_REG_M
);
1224 tmp
= load_reg(s
, UCOP_REG_S
);
1225 gen_uc32_shift_reg(tmp2
, UCOP_SH_OP
, tmp
, logic_cc
);
1227 gen_uc32_shift_im(tmp2
, UCOP_SH_OP
, UCOP_SH_IM
, logic_cc
);
1231 if (UCOP_OPCODES
!= 0x0f && UCOP_OPCODES
!= 0x0d) {
1232 tmp
= load_reg(s
, UCOP_REG_N
);
1237 switch (UCOP_OPCODES
) {
1239 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1243 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1246 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
1250 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1253 if (UCOP_SET_S
&& UCOP_REG_D
== 31) {
1254 /* SUBS r31, ... is used for exception return. */
1258 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
1259 gen_exception_return(s
, tmp
);
1262 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
1264 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
1266 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1271 gen_helper_sub_cc(tmp
, cpu_env
, tmp2
, tmp
);
1273 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
1275 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1279 gen_helper_add_cc(tmp
, cpu_env
, tmp
, tmp2
);
1281 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
1283 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1287 gen_helper_adc_cc(tmp
, cpu_env
, tmp
, tmp2
);
1289 gen_add_carry(tmp
, tmp
, tmp2
);
1291 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1295 gen_helper_sbc_cc(tmp
, cpu_env
, tmp
, tmp2
);
1297 gen_sub_carry(tmp
, tmp
, tmp2
);
1299 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1303 gen_helper_sbc_cc(tmp
, cpu_env
, tmp2
, tmp
);
1305 gen_sub_carry(tmp
, tmp2
, tmp
);
1307 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1311 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1318 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
1325 gen_helper_sub_cc(tmp
, cpu_env
, tmp
, tmp2
);
1331 gen_helper_add_cc(tmp
, cpu_env
, tmp
, tmp2
);
1336 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1340 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1343 if (logic_cc
&& UCOP_REG_D
== 31) {
1344 /* MOVS r31, ... is used for exception return. */
1348 gen_exception_return(s
, tmp2
);
1353 store_reg_bx(s
, UCOP_REG_D
, tmp2
);
1357 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1361 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1365 tcg_gen_not_i32(tmp2
, tmp2
);
1369 store_reg_bx(s
, UCOP_REG_D
, tmp2
);
1372 if (UCOP_OPCODES
!= 0x0f && UCOP_OPCODES
!= 0x0d) {
1378 static void do_mult(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1380 TCGv tmp
, tmp2
, tmp3
, tmp4
;
1384 tmp
= load_reg(s
, UCOP_REG_M
);
1385 tmp2
= load_reg(s
, UCOP_REG_N
);
1387 tcg_gen_muls2_i32(tmp
, tmp2
, tmp
, tmp2
);
1389 tcg_gen_mulu2_i32(tmp
, tmp2
, tmp
, tmp2
);
1391 if (UCOP_SET(25)) { /* mult accumulate */
1392 tmp3
= load_reg(s
, UCOP_REG_LO
);
1393 tmp4
= load_reg(s
, UCOP_REG_HI
);
1394 tcg_gen_add2_i32(tmp
, tmp2
, tmp
, tmp2
, tmp3
, tmp4
);
1398 store_reg(s
, UCOP_REG_LO
, tmp
);
1399 store_reg(s
, UCOP_REG_HI
, tmp2
);
1402 tmp
= load_reg(s
, UCOP_REG_M
);
1403 tmp2
= load_reg(s
, UCOP_REG_N
);
1404 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
1408 tmp2
= load_reg(s
, UCOP_REG_S
);
1409 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
1415 store_reg(s
, UCOP_REG_D
, tmp
);
1419 /* miscellaneous instructions */
1420 static void do_misc(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1422 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1426 if ((insn
& 0xffffffe0) == 0x10ffc120) {
1427 /* Trivial implementation equivalent to bx. */
1428 tmp
= load_reg(s
, UCOP_REG_M
);
1433 if ((insn
& 0xfbffc000) == 0x30ffc000) {
1434 /* PSR = immediate */
1437 val
= (val
>> UCOP_SH_IM
) | (val
<< (32 - UCOP_SH_IM
));
1440 tcg_gen_movi_i32(tmp
, val
);
1441 if (gen_set_psr(s
, ~ASR_RESERVED
, UCOP_SET_B
, tmp
)) {
1447 if ((insn
& 0xfbffffe0) == 0x12ffc020) {
1448 /* PSR.flag = reg */
1449 tmp
= load_reg(s
, UCOP_REG_M
);
1450 if (gen_set_psr(s
, ASR_NZCV
, UCOP_SET_B
, tmp
)) {
1456 if ((insn
& 0xfbffffe0) == 0x10ffc020) {
1458 tmp
= load_reg(s
, UCOP_REG_M
);
1459 if (gen_set_psr(s
, ~ASR_RESERVED
, UCOP_SET_B
, tmp
)) {
1465 if ((insn
& 0xfbf83fff) == 0x10f80000) {
1471 tmp
= load_cpu_field(bsr
);
1474 gen_helper_asr_read(tmp
, cpu_env
);
1476 store_reg(s
, UCOP_REG_D
, tmp
);
1480 if ((insn
& 0xfbf83fe0) == 0x12f80120) {
1482 tmp
= load_reg(s
, UCOP_REG_M
);
1485 tcg_gen_not_i32(tmp
, tmp
);
1487 tcg_gen_clzi_i32(tmp
, tmp
, 32);
1488 store_reg(s
, UCOP_REG_D
, tmp
);
1496 /* load/store I_offset and R_offset */
1497 static void do_ldst_ir(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1499 unsigned int mmu_idx
;
1503 tmp2
= load_reg(s
, UCOP_REG_N
);
1504 mmu_idx
= (IS_USER(s
) || (!UCOP_SET_P
&& UCOP_SET_W
));
1508 gen_add_data_offset(s
, insn
, tmp2
);
1514 tmp
= gen_ld8u(tmp2
, mmu_idx
);
1516 tmp
= gen_ld32(tmp2
, mmu_idx
);
1520 tmp
= load_reg(s
, UCOP_REG_D
);
1522 gen_st8(tmp
, tmp2
, mmu_idx
);
1524 gen_st32(tmp
, tmp2
, mmu_idx
);
1528 gen_add_data_offset(s
, insn
, tmp2
);
1529 store_reg(s
, UCOP_REG_N
, tmp2
);
1530 } else if (UCOP_SET_W
) {
1531 store_reg(s
, UCOP_REG_N
, tmp2
);
1536 /* Complete the load. */
1537 if (UCOP_REG_D
== 31) {
1540 store_reg(s
, UCOP_REG_D
, tmp
);
1545 /* SWP instruction */
1546 static void do_swap(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1548 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1553 if ((insn
& 0xff003fe0) != 0x40000120) {
1557 /* ??? This is not really atomic. However we know
1558 we never have multiple CPUs running in parallel,
1559 so it is good enough. */
1560 addr
= load_reg(s
, UCOP_REG_N
);
1561 tmp
= load_reg(s
, UCOP_REG_M
);
1563 tmp2
= gen_ld8u(addr
, IS_USER(s
));
1564 gen_st8(tmp
, addr
, IS_USER(s
));
1566 tmp2
= gen_ld32(addr
, IS_USER(s
));
1567 gen_st32(tmp
, addr
, IS_USER(s
));
1570 store_reg(s
, UCOP_REG_D
, tmp2
);
1573 /* load/store hw/sb */
1574 static void do_ldst_hwsb(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1576 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1580 if (UCOP_SH_OP
== 0) {
1581 do_swap(env
, s
, insn
);
1585 addr
= load_reg(s
, UCOP_REG_N
);
1587 gen_add_datah_offset(s
, insn
, addr
);
1590 if (UCOP_SET_L
) { /* load */
1591 switch (UCOP_SH_OP
) {
1593 tmp
= gen_ld16u(addr
, IS_USER(s
));
1596 tmp
= gen_ld8s(addr
, IS_USER(s
));
1598 default: /* see do_swap */
1600 tmp
= gen_ld16s(addr
, IS_USER(s
));
1603 } else { /* store */
1604 if (UCOP_SH_OP
!= 1) {
1607 tmp
= load_reg(s
, UCOP_REG_D
);
1608 gen_st16(tmp
, addr
, IS_USER(s
));
1610 /* Perform base writeback before the loaded value to
1611 ensure correct behavior with overlapping index registers. */
1613 gen_add_datah_offset(s
, insn
, addr
);
1614 store_reg(s
, UCOP_REG_N
, addr
);
1615 } else if (UCOP_SET_W
) {
1616 store_reg(s
, UCOP_REG_N
, addr
);
1621 /* Complete the load. */
1622 store_reg(s
, UCOP_REG_D
, tmp
);
1626 /* load/store multiple words */
1627 static void do_ldst_m(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1629 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1630 unsigned int val
, i
, mmu_idx
;
1631 int j
, n
, reg
, user
, loaded_base
;
1640 /* XXX: store correct base if write back */
1642 if (UCOP_SET_B
) { /* S bit in instruction table */
1644 ILLEGAL
; /* only usable in supervisor mode */
1646 if (UCOP_SET(18) == 0) { /* pc reg */
1651 mmu_idx
= (IS_USER(s
) || (!UCOP_SET_P
&& UCOP_SET_W
));
1652 addr
= load_reg(s
, UCOP_REG_N
);
1654 /* compute total size */
1658 for (i
= 0; i
< 6; i
++) {
1663 for (i
= 9; i
< 19; i
++) {
1668 /* XXX: test invalid n == 0 case ? */
1672 tcg_gen_addi_i32(addr
, addr
, 4);
1674 /* post increment */
1679 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
1681 /* post decrement */
1683 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
1689 reg
= UCOP_SET(6) ? 16 : 0;
1690 for (i
= 0; i
< 19; i
++, reg
++) {
1695 if (UCOP_SET_L
) { /* load */
1696 tmp
= gen_ld32(addr
, mmu_idx
);
1700 tmp2
= tcg_const_i32(reg
);
1701 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
1702 tcg_temp_free_i32(tmp2
);
1704 } else if (reg
== UCOP_REG_N
) {
1708 store_reg(s
, reg
, tmp
);
1710 } else { /* store */
1712 /* special case: r31 = PC + 4 */
1715 tcg_gen_movi_i32(tmp
, val
);
1718 tmp2
= tcg_const_i32(reg
);
1719 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
1720 tcg_temp_free_i32(tmp2
);
1722 tmp
= load_reg(s
, reg
);
1724 gen_st32(tmp
, addr
, mmu_idx
);
1727 /* no need to add after the last transfer */
1729 tcg_gen_addi_i32(addr
, addr
, 4);
1733 if (UCOP_SET_W
) { /* write back */
1738 /* post increment */
1739 tcg_gen_addi_i32(addr
, addr
, 4);
1745 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
1748 /* post decrement */
1749 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
1752 store_reg(s
, UCOP_REG_N
, addr
);
1757 store_reg(s
, UCOP_REG_N
, loaded_var
);
1759 if (UCOP_SET_B
&& !user
) {
1760 /* Restore ASR from BSR. */
1761 tmp
= load_cpu_field(bsr
);
1762 gen_set_asr(tmp
, 0xffffffff);
1764 s
->is_jmp
= DISAS_UPDATE
;
1768 /* branch (and link) */
1769 static void do_branch(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1771 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1776 if (UCOP_COND
== 0xf) {
1780 if (UCOP_COND
!= 0xe) {
1781 /* if not always execute, we generate a conditional jump to
1783 s
->condlabel
= gen_new_label();
1784 gen_test_cc(UCOP_COND
^ 1, s
->condlabel
);
1788 val
= (int32_t)s
->pc
;
1791 tcg_gen_movi_i32(tmp
, val
);
1792 store_reg(s
, 30, tmp
);
1794 offset
= (((int32_t)insn
<< 8) >> 8);
1795 val
+= (offset
<< 2); /* unicore is pc+4 */
1799 static void disas_uc32_insn(CPUUniCore32State
*env
, DisasContext
*s
)
1801 UniCore32CPU
*cpu
= uc32_env_get_cpu(env
);
1804 insn
= cpu_ldl_code(env
, s
->pc
);
1807 /* UniCore instructions class:
1808 * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1809 * AAA : see switch case
1810 * BBBB : opcodes or cond or PUBW
1815 switch (insn
>> 29) {
1817 if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1818 do_mult(env
, s
, insn
);
1823 do_misc(env
, s
, insn
);
1827 if (((UCOP_OPCODES
>> 2) == 2) && !UCOP_SET_S
) {
1828 do_misc(env
, s
, insn
);
1831 do_datap(env
, s
, insn
);
1835 if (UCOP_SET(8) && UCOP_SET(5)) {
1836 do_ldst_hwsb(env
, s
, insn
);
1839 if (UCOP_SET(8) || UCOP_SET(5)) {
1843 do_ldst_ir(env
, s
, insn
);
1848 ILLEGAL
; /* extended instructions */
1850 do_ldst_m(env
, s
, insn
);
1853 do_branch(env
, s
, insn
);
1857 disas_coproc_insn(env
, s
, insn
);
1860 if (!UCOP_SET(28)) {
1861 disas_coproc_insn(env
, s
, insn
);
1864 if ((insn
& 0xff000000) == 0xff000000) { /* syscall */
1865 gen_set_pc_im(s
->pc
);
1866 s
->is_jmp
= DISAS_SYSCALL
;
1873 /* generate intermediate code for basic block 'tb'. */
1874 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
1876 CPUUniCore32State
*env
= cs
->env_ptr
;
1877 DisasContext dc1
, *dc
= &dc1
;
1878 target_ulong pc_start
;
1879 uint32_t page_start
;
1882 /* generate intermediate code */
1889 dc
->is_jmp
= DISAS_NEXT
;
1891 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1893 cpu_F0s
= tcg_temp_new_i32();
1894 cpu_F1s
= tcg_temp_new_i32();
1895 cpu_F0d
= tcg_temp_new_i64();
1896 cpu_F1d
= tcg_temp_new_i64();
1897 page_start
= pc_start
& TARGET_PAGE_MASK
;
1900 #ifndef CONFIG_USER_ONLY
1901 if ((env
->uncached_asr
& ASR_M
) == ASR_MODE_USER
) {
1910 tcg_gen_insn_start(dc
->pc
);
1913 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1914 gen_set_pc_im(dc
->pc
);
1915 gen_exception(EXCP_DEBUG
);
1916 dc
->is_jmp
= DISAS_JUMP
;
1917 /* The address covered by the breakpoint must be included in
1918 [tb->pc, tb->pc + tb->size) in order to for it to be
1919 properly cleared -- thus we increment the PC here so that
1920 the logic setting tb->size below does the right thing. */
1922 goto done_generating
;
1925 if (num_insns
== max_insns
&& (tb_cflags(tb
) & CF_LAST_IO
)) {
1929 disas_uc32_insn(env
, dc
);
1932 fprintf(stderr
, "Internal resource leak before %08x\n", dc
->pc
);
1936 if (dc
->condjmp
&& !dc
->is_jmp
) {
1937 gen_set_label(dc
->condlabel
);
1940 /* Translation stops when a conditional branch is encountered.
1941 * Otherwise the subsequent code could get translated several times.
1942 * Also stop translation when a page boundary is reached. This
1943 * ensures prefetch aborts occur at the right place. */
1944 } while (!dc
->is_jmp
&& !tcg_op_buf_full() &&
1945 !cs
->singlestep_enabled
&&
1947 dc
->pc
- page_start
< TARGET_PAGE_SIZE
&&
1948 num_insns
< max_insns
);
1950 if (tb_cflags(tb
) & CF_LAST_IO
) {
1952 /* FIXME: This can theoretically happen with self-modifying
1954 cpu_abort(cs
, "IO on conditional branch instruction");
1959 /* At this stage dc->condjmp will only be set when the skipped
1960 instruction was a conditional branch or trap, and the PC has
1961 already been written. */
1962 if (unlikely(cs
->singlestep_enabled
)) {
1963 /* Make sure the pc is updated, and raise a debug exception. */
1965 if (dc
->is_jmp
== DISAS_SYSCALL
) {
1966 gen_exception(UC32_EXCP_PRIV
);
1968 gen_exception(EXCP_DEBUG
);
1970 gen_set_label(dc
->condlabel
);
1972 if (dc
->condjmp
|| !dc
->is_jmp
) {
1973 gen_set_pc_im(dc
->pc
);
1976 if (dc
->is_jmp
== DISAS_SYSCALL
&& !dc
->condjmp
) {
1977 gen_exception(UC32_EXCP_PRIV
);
1979 gen_exception(EXCP_DEBUG
);
1982 /* While branches must always occur at the end of an IT block,
1983 there are a few other things that can cause us to terminate
1984 the TB in the middel of an IT block:
1985 - Exception generating instructions (bkpt, swi, undefined).
1987 - Hardware watchpoints.
1988 Hardware breakpoints have already been handled and skip this code.
1990 switch (dc
->is_jmp
) {
1992 gen_goto_tb(dc
, 1, dc
->pc
);
1997 /* indicate that the hash table must be used to find the next TB */
1998 tcg_gen_exit_tb(NULL
, 0);
2001 /* nothing more to generate */
2004 gen_exception(UC32_EXCP_PRIV
);
2008 gen_set_label(dc
->condlabel
);
2009 gen_goto_tb(dc
, 1, dc
->pc
);
2015 gen_tb_end(tb
, num_insns
);
2018 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
2019 && qemu_log_in_addr_range(pc_start
)) {
2021 qemu_log("----------------\n");
2022 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2023 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
);
2028 tb
->size
= dc
->pc
- pc_start
;
2029 tb
->icount
= num_insns
;
2032 static const char *cpu_mode_names
[16] = {
2033 "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2034 "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2037 #undef UCF64_DUMP_STATE
2038 #ifdef UCF64_DUMP_STATE
2039 static void cpu_dump_state_ucf64(CPUUniCore32State
*env
, int flags
)
2047 /* ??? This assumes float64 and double have the same layout.
2048 Oh well, it's only debug dumps. */
2054 for (i
= 0; i
< 16; i
++) {
2055 d
.d
= env
->ucf64
.regs
[i
];
2059 qemu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g)",
2060 i
* 2, (int)s0
.i
, s0
.s
,
2061 i
* 2 + 1, (int)s1
.i
, s1
.s
);
2062 qemu_fprintf(f
, " d%02d=%" PRIx64
"(%8g)\n",
2063 i
, (uint64_t)d0
.f64
, d0
.d
);
2065 qemu_fprintf(f
, "FPSCR: %08x\n", (int)env
->ucf64
.xregs
[UC32_UCF64_FPSCR
]);
2068 #define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0)
2071 void uc32_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
2073 UniCore32CPU
*cpu
= UNICORE32_CPU(cs
);
2074 CPUUniCore32State
*env
= &cpu
->env
;
2078 for (i
= 0; i
< 32; i
++) {
2079 qemu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
2081 qemu_fprintf(f
, "\n");
2083 qemu_fprintf(f
, " ");
2086 psr
= cpu_asr_read(env
);
2087 qemu_fprintf(f
, "PSR=%08x %c%c%c%c %s\n",
2089 psr
& (1 << 31) ? 'N' : '-',
2090 psr
& (1 << 30) ? 'Z' : '-',
2091 psr
& (1 << 29) ? 'C' : '-',
2092 psr
& (1 << 28) ? 'V' : '-',
2093 cpu_mode_names
[psr
& 0xf]);
2095 if (flags
& CPU_DUMP_FPU
) {
2096 cpu_dump_state_ucf64(env
, f
, cpu_fprintf
, flags
);
2100 void restore_state_to_opc(CPUUniCore32State
*env
, TranslationBlock
*tb
,
2103 env
->regs
[31] = data
[0];