elf2dmp/qemu_elf: Plug memleak in QEMU_Elf_init
[qemu/ar7.git] / target / unicore32 / translate.c
blobd4b06df672b21cfe5c3c5437aeb3bad6dfbe147c
1 /*
2 * UniCore32 translation
4 * Copyright (C) 2010-2012 Guan Xuetao
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation, or (at your option) any
9 * later version. See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "cpu.h"
14 #include "disas/disas.h"
15 #include "exec/exec-all.h"
16 #include "tcg/tcg-op.h"
17 #include "qemu/log.h"
18 #include "exec/cpu_ldst.h"
19 #include "exec/translator.h"
20 #include "qemu/qemu-print.h"
22 #include "exec/helper-proto.h"
23 #include "exec/helper-gen.h"
25 #include "trace-tcg.h"
26 #include "exec/log.h"
29 /* internal defines */
30 typedef struct DisasContext {
31 target_ulong pc;
32 int is_jmp;
33 /* Nonzero if this instruction has been conditionally skipped. */
34 int condjmp;
35 /* The label that will be jumped to when the instruction is skipped. */
36 TCGLabel *condlabel;
37 struct TranslationBlock *tb;
38 int singlestep_enabled;
39 #ifndef CONFIG_USER_ONLY
40 int user;
41 #endif
42 } DisasContext;
44 #ifndef CONFIG_USER_ONLY
45 #define IS_USER(s) (s->user)
46 #else
47 #define IS_USER(s) 1
48 #endif
50 /* is_jmp field values */
51 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54 /* These instructions trap after executing, so defer them until after the
55 conditional executions state has been updated. */
56 #define DISAS_SYSCALL DISAS_TARGET_3
58 static TCGv_i32 cpu_R[32];
60 /* FIXME: These should be removed. */
61 static TCGv cpu_F0s, cpu_F1s;
62 static TCGv_i64 cpu_F0d, cpu_F1d;
64 #include "exec/gen-icount.h"
66 static const char *regnames[] = {
67 "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
68 "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
69 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
70 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
72 /* initialize TCG globals. */
73 void uc32_translate_init(void)
75 int i;
77 for (i = 0; i < 32; i++) {
78 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
79 offsetof(CPUUniCore32State, regs[i]), regnames[i]);
83 static int num_temps;
85 /* Allocate a temporary variable. */
86 static TCGv_i32 new_tmp(void)
88 num_temps++;
89 return tcg_temp_new_i32();
92 /* Release a temporary variable. */
93 static void dead_tmp(TCGv tmp)
95 tcg_temp_free(tmp);
96 num_temps--;
99 static inline TCGv load_cpu_offset(int offset)
101 TCGv tmp = new_tmp();
102 tcg_gen_ld_i32(tmp, cpu_env, offset);
103 return tmp;
106 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
108 static inline void store_cpu_offset(TCGv var, int offset)
110 tcg_gen_st_i32(var, cpu_env, offset);
111 dead_tmp(var);
114 #define store_cpu_field(var, name) \
115 store_cpu_offset(var, offsetof(CPUUniCore32State, name))
117 /* Set a variable to the value of a CPU register. */
118 static void load_reg_var(DisasContext *s, TCGv var, int reg)
120 if (reg == 31) {
121 uint32_t addr;
122 /* normaly, since we updated PC */
123 addr = (long)s->pc;
124 tcg_gen_movi_i32(var, addr);
125 } else {
126 tcg_gen_mov_i32(var, cpu_R[reg]);
130 /* Create a new temporary and set it to the value of a CPU register. */
131 static inline TCGv load_reg(DisasContext *s, int reg)
133 TCGv tmp = new_tmp();
134 load_reg_var(s, tmp, reg);
135 return tmp;
138 /* Set a CPU register. The source must be a temporary and will be
139 marked as dead. */
140 static void store_reg(DisasContext *s, int reg, TCGv var)
142 if (reg == 31) {
143 tcg_gen_andi_i32(var, var, ~3);
144 s->is_jmp = DISAS_JUMP;
146 tcg_gen_mov_i32(cpu_R[reg], var);
147 dead_tmp(var);
150 /* Value extensions. */
151 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
152 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
153 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
154 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
156 #define UCOP_REG_M (((insn) >> 0) & 0x1f)
157 #define UCOP_REG_N (((insn) >> 19) & 0x1f)
158 #define UCOP_REG_D (((insn) >> 14) & 0x1f)
159 #define UCOP_REG_S (((insn) >> 9) & 0x1f)
160 #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
161 #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
162 #define UCOP_SH_OP (((insn) >> 6) & 0x03)
163 #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
164 #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
165 #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
166 #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
167 #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
168 #define UCOP_COND (((insn) >> 25) & 0x0f)
169 #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
170 #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
171 #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
172 #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
173 #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
175 #define UCOP_SET(i) ((insn) & (1 << (i)))
176 #define UCOP_SET_P UCOP_SET(28)
177 #define UCOP_SET_U UCOP_SET(27)
178 #define UCOP_SET_B UCOP_SET(26)
179 #define UCOP_SET_W UCOP_SET(25)
180 #define UCOP_SET_L UCOP_SET(24)
181 #define UCOP_SET_S UCOP_SET(24)
183 #define ILLEGAL cpu_abort(env_cpu(env), \
184 "Illegal UniCore32 instruction %x at line %d!", \
185 insn, __LINE__)
187 #ifndef CONFIG_USER_ONLY
188 static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s,
189 uint32_t insn)
191 TCGv tmp, tmp2, tmp3;
192 if ((insn & 0xfe000000) == 0xe0000000) {
193 tmp2 = new_tmp();
194 tmp3 = new_tmp();
195 tcg_gen_movi_i32(tmp2, UCOP_REG_N);
196 tcg_gen_movi_i32(tmp3, UCOP_IMM10);
197 if (UCOP_SET_L) {
198 tmp = new_tmp();
199 gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3);
200 store_reg(s, UCOP_REG_D, tmp);
201 } else {
202 tmp = load_reg(s, UCOP_REG_D);
203 gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3);
204 dead_tmp(tmp);
206 dead_tmp(tmp2);
207 dead_tmp(tmp3);
208 return;
210 ILLEGAL;
213 static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s,
214 uint32_t insn)
216 TCGv tmp;
218 if ((insn & 0xff003fff) == 0xe1000400) {
220 * movc rd, pp.nn, #imm9
221 * rd: UCOP_REG_D
222 * nn: UCOP_REG_N (must be 0)
223 * imm9: 0
225 if (UCOP_REG_N == 0) {
226 tmp = new_tmp();
227 tcg_gen_movi_i32(tmp, 0);
228 store_reg(s, UCOP_REG_D, tmp);
229 return;
230 } else {
231 ILLEGAL;
234 if ((insn & 0xff003fff) == 0xe0000401) {
236 * movc pp.nn, rn, #imm9
237 * rn: UCOP_REG_D
238 * nn: UCOP_REG_N (must be 1)
239 * imm9: 1
241 if (UCOP_REG_N == 1) {
242 tmp = load_reg(s, UCOP_REG_D);
243 gen_helper_cp1_putc(tmp);
244 dead_tmp(tmp);
245 return;
246 } else {
247 ILLEGAL;
250 ILLEGAL;
252 #endif
254 static inline void gen_set_asr(TCGv var, uint32_t mask)
256 TCGv tmp_mask = tcg_const_i32(mask);
257 gen_helper_asr_write(cpu_env, var, tmp_mask);
258 tcg_temp_free_i32(tmp_mask);
260 /* Set NZCV flags from the high 4 bits of var. */
261 #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
263 static void gen_exception(int excp)
265 TCGv tmp = new_tmp();
266 tcg_gen_movi_i32(tmp, excp);
267 gen_helper_exception(cpu_env, tmp);
268 dead_tmp(tmp);
271 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
273 /* Set CF to the top bit of var. */
274 static void gen_set_CF_bit31(TCGv var)
276 TCGv tmp = new_tmp();
277 tcg_gen_shri_i32(tmp, var, 31);
278 gen_set_CF(tmp);
279 dead_tmp(tmp);
282 /* Set N and Z flags from var. */
283 static inline void gen_logic_CC(TCGv var)
285 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF));
286 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF));
289 /* dest = T0 + T1 + CF. */
290 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
292 TCGv tmp;
293 tcg_gen_add_i32(dest, t0, t1);
294 tmp = load_cpu_field(CF);
295 tcg_gen_add_i32(dest, dest, tmp);
296 dead_tmp(tmp);
299 /* dest = T0 - T1 + CF - 1. */
300 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
302 TCGv tmp;
303 tcg_gen_sub_i32(dest, t0, t1);
304 tmp = load_cpu_field(CF);
305 tcg_gen_add_i32(dest, dest, tmp);
306 tcg_gen_subi_i32(dest, dest, 1);
307 dead_tmp(tmp);
310 static void shifter_out_im(TCGv var, int shift)
312 TCGv tmp = new_tmp();
313 if (shift == 0) {
314 tcg_gen_andi_i32(tmp, var, 1);
315 } else {
316 tcg_gen_shri_i32(tmp, var, shift);
317 if (shift != 31) {
318 tcg_gen_andi_i32(tmp, tmp, 1);
321 gen_set_CF(tmp);
322 dead_tmp(tmp);
325 /* Shift by immediate. Includes special handling for shift == 0. */
326 static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
327 int flags)
329 switch (shiftop) {
330 case 0: /* LSL */
331 if (shift != 0) {
332 if (flags) {
333 shifter_out_im(var, 32 - shift);
335 tcg_gen_shli_i32(var, var, shift);
337 break;
338 case 1: /* LSR */
339 if (shift == 0) {
340 if (flags) {
341 tcg_gen_shri_i32(var, var, 31);
342 gen_set_CF(var);
344 tcg_gen_movi_i32(var, 0);
345 } else {
346 if (flags) {
347 shifter_out_im(var, shift - 1);
349 tcg_gen_shri_i32(var, var, shift);
351 break;
352 case 2: /* ASR */
353 if (shift == 0) {
354 shift = 32;
356 if (flags) {
357 shifter_out_im(var, shift - 1);
359 if (shift == 32) {
360 shift = 31;
362 tcg_gen_sari_i32(var, var, shift);
363 break;
364 case 3: /* ROR/RRX */
365 if (shift != 0) {
366 if (flags) {
367 shifter_out_im(var, shift - 1);
369 tcg_gen_rotri_i32(var, var, shift); break;
370 } else {
371 TCGv tmp = load_cpu_field(CF);
372 if (flags) {
373 shifter_out_im(var, 0);
375 tcg_gen_shri_i32(var, var, 1);
376 tcg_gen_shli_i32(tmp, tmp, 31);
377 tcg_gen_or_i32(var, var, tmp);
378 dead_tmp(tmp);
383 static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
384 TCGv shift, int flags)
386 if (flags) {
387 switch (shiftop) {
388 case 0:
389 gen_helper_shl_cc(var, cpu_env, var, shift);
390 break;
391 case 1:
392 gen_helper_shr_cc(var, cpu_env, var, shift);
393 break;
394 case 2:
395 gen_helper_sar_cc(var, cpu_env, var, shift);
396 break;
397 case 3:
398 gen_helper_ror_cc(var, cpu_env, var, shift);
399 break;
401 } else {
402 switch (shiftop) {
403 case 0:
404 gen_helper_shl(var, var, shift);
405 break;
406 case 1:
407 gen_helper_shr(var, var, shift);
408 break;
409 case 2:
410 gen_helper_sar(var, var, shift);
411 break;
412 case 3:
413 tcg_gen_andi_i32(shift, shift, 0x1f);
414 tcg_gen_rotr_i32(var, var, shift);
415 break;
418 dead_tmp(shift);
421 static void gen_test_cc(int cc, TCGLabel *label)
423 TCGv tmp;
424 TCGv tmp2;
425 TCGLabel *inv;
427 switch (cc) {
428 case 0: /* eq: Z */
429 tmp = load_cpu_field(ZF);
430 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
431 break;
432 case 1: /* ne: !Z */
433 tmp = load_cpu_field(ZF);
434 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
435 break;
436 case 2: /* cs: C */
437 tmp = load_cpu_field(CF);
438 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
439 break;
440 case 3: /* cc: !C */
441 tmp = load_cpu_field(CF);
442 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
443 break;
444 case 4: /* mi: N */
445 tmp = load_cpu_field(NF);
446 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
447 break;
448 case 5: /* pl: !N */
449 tmp = load_cpu_field(NF);
450 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
451 break;
452 case 6: /* vs: V */
453 tmp = load_cpu_field(VF);
454 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
455 break;
456 case 7: /* vc: !V */
457 tmp = load_cpu_field(VF);
458 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
459 break;
460 case 8: /* hi: C && !Z */
461 inv = gen_new_label();
462 tmp = load_cpu_field(CF);
463 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
464 dead_tmp(tmp);
465 tmp = load_cpu_field(ZF);
466 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
467 gen_set_label(inv);
468 break;
469 case 9: /* ls: !C || Z */
470 tmp = load_cpu_field(CF);
471 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
472 dead_tmp(tmp);
473 tmp = load_cpu_field(ZF);
474 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
475 break;
476 case 10: /* ge: N == V -> N ^ V == 0 */
477 tmp = load_cpu_field(VF);
478 tmp2 = load_cpu_field(NF);
479 tcg_gen_xor_i32(tmp, tmp, tmp2);
480 dead_tmp(tmp2);
481 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
482 break;
483 case 11: /* lt: N != V -> N ^ V != 0 */
484 tmp = load_cpu_field(VF);
485 tmp2 = load_cpu_field(NF);
486 tcg_gen_xor_i32(tmp, tmp, tmp2);
487 dead_tmp(tmp2);
488 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
489 break;
490 case 12: /* gt: !Z && N == V */
491 inv = gen_new_label();
492 tmp = load_cpu_field(ZF);
493 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
494 dead_tmp(tmp);
495 tmp = load_cpu_field(VF);
496 tmp2 = load_cpu_field(NF);
497 tcg_gen_xor_i32(tmp, tmp, tmp2);
498 dead_tmp(tmp2);
499 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
500 gen_set_label(inv);
501 break;
502 case 13: /* le: Z || N != V */
503 tmp = load_cpu_field(ZF);
504 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
505 dead_tmp(tmp);
506 tmp = load_cpu_field(VF);
507 tmp2 = load_cpu_field(NF);
508 tcg_gen_xor_i32(tmp, tmp, tmp2);
509 dead_tmp(tmp2);
510 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
511 break;
512 default:
513 fprintf(stderr, "Bad condition code 0x%x\n", cc);
514 abort();
516 dead_tmp(tmp);
519 static const uint8_t table_logic_cc[16] = {
520 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
521 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
522 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
523 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
526 /* Set PC state from an immediate address. */
527 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
529 s->is_jmp = DISAS_UPDATE;
530 tcg_gen_movi_i32(cpu_R[31], addr & ~3);
533 /* Set PC state from var. var is marked as dead. */
534 static inline void gen_bx(DisasContext *s, TCGv var)
536 s->is_jmp = DISAS_UPDATE;
537 tcg_gen_andi_i32(cpu_R[31], var, ~3);
538 dead_tmp(var);
541 static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
543 store_reg(s, reg, var);
546 static inline TCGv gen_ld8s(TCGv addr, int index)
548 TCGv tmp = new_tmp();
549 tcg_gen_qemu_ld8s(tmp, addr, index);
550 return tmp;
553 static inline TCGv gen_ld8u(TCGv addr, int index)
555 TCGv tmp = new_tmp();
556 tcg_gen_qemu_ld8u(tmp, addr, index);
557 return tmp;
560 static inline TCGv gen_ld16s(TCGv addr, int index)
562 TCGv tmp = new_tmp();
563 tcg_gen_qemu_ld16s(tmp, addr, index);
564 return tmp;
567 static inline TCGv gen_ld16u(TCGv addr, int index)
569 TCGv tmp = new_tmp();
570 tcg_gen_qemu_ld16u(tmp, addr, index);
571 return tmp;
574 static inline TCGv gen_ld32(TCGv addr, int index)
576 TCGv tmp = new_tmp();
577 tcg_gen_qemu_ld32u(tmp, addr, index);
578 return tmp;
581 static inline void gen_st8(TCGv val, TCGv addr, int index)
583 tcg_gen_qemu_st8(val, addr, index);
584 dead_tmp(val);
587 static inline void gen_st16(TCGv val, TCGv addr, int index)
589 tcg_gen_qemu_st16(val, addr, index);
590 dead_tmp(val);
593 static inline void gen_st32(TCGv val, TCGv addr, int index)
595 tcg_gen_qemu_st32(val, addr, index);
596 dead_tmp(val);
599 static inline void gen_set_pc_im(uint32_t val)
601 tcg_gen_movi_i32(cpu_R[31], val);
604 /* Force a TB lookup after an instruction that changes the CPU state. */
605 static inline void gen_lookup_tb(DisasContext *s)
607 tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
608 s->is_jmp = DISAS_UPDATE;
611 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
612 TCGv var)
614 int val;
615 TCGv offset;
617 if (UCOP_SET(29)) {
618 /* immediate */
619 val = UCOP_IMM14;
620 if (!UCOP_SET_U) {
621 val = -val;
623 if (val != 0) {
624 tcg_gen_addi_i32(var, var, val);
626 } else {
627 /* shift/register */
628 offset = load_reg(s, UCOP_REG_M);
629 gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
630 if (!UCOP_SET_U) {
631 tcg_gen_sub_i32(var, var, offset);
632 } else {
633 tcg_gen_add_i32(var, var, offset);
635 dead_tmp(offset);
639 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
640 TCGv var)
642 int val;
643 TCGv offset;
645 if (UCOP_SET(26)) {
646 /* immediate */
647 val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
648 if (!UCOP_SET_U) {
649 val = -val;
651 if (val != 0) {
652 tcg_gen_addi_i32(var, var, val);
654 } else {
655 /* register */
656 offset = load_reg(s, UCOP_REG_M);
657 if (!UCOP_SET_U) {
658 tcg_gen_sub_i32(var, var, offset);
659 } else {
660 tcg_gen_add_i32(var, var, offset);
662 dead_tmp(offset);
666 static inline long ucf64_reg_offset(int reg)
668 if (reg & 1) {
669 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
670 + offsetof(CPU_DoubleU, l.upper);
671 } else {
672 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
673 + offsetof(CPU_DoubleU, l.lower);
677 #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
678 #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
680 /* UniCore-F64 single load/store I_offset */
681 static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
683 int offset;
684 TCGv tmp;
685 TCGv addr;
687 addr = load_reg(s, UCOP_REG_N);
688 if (!UCOP_SET_P && !UCOP_SET_W) {
689 ILLEGAL;
692 if (UCOP_SET_P) {
693 offset = UCOP_IMM10 << 2;
694 if (!UCOP_SET_U) {
695 offset = -offset;
697 if (offset != 0) {
698 tcg_gen_addi_i32(addr, addr, offset);
702 if (UCOP_SET_L) { /* load */
703 tmp = gen_ld32(addr, IS_USER(s));
704 ucf64_gen_st32(tmp, UCOP_REG_D);
705 } else { /* store */
706 tmp = ucf64_gen_ld32(UCOP_REG_D);
707 gen_st32(tmp, addr, IS_USER(s));
710 if (!UCOP_SET_P) {
711 offset = UCOP_IMM10 << 2;
712 if (!UCOP_SET_U) {
713 offset = -offset;
715 if (offset != 0) {
716 tcg_gen_addi_i32(addr, addr, offset);
719 if (UCOP_SET_W) {
720 store_reg(s, UCOP_REG_N, addr);
721 } else {
722 dead_tmp(addr);
726 /* UniCore-F64 load/store multiple words */
727 static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
729 unsigned int i;
730 int j, n, freg;
731 TCGv tmp;
732 TCGv addr;
734 if (UCOP_REG_D != 0) {
735 ILLEGAL;
737 if (UCOP_REG_N == 31) {
738 ILLEGAL;
740 if ((insn << 24) == 0) {
741 ILLEGAL;
744 addr = load_reg(s, UCOP_REG_N);
746 n = 0;
747 for (i = 0; i < 8; i++) {
748 if (UCOP_SET(i)) {
749 n++;
753 if (UCOP_SET_U) {
754 if (UCOP_SET_P) { /* pre increment */
755 tcg_gen_addi_i32(addr, addr, 4);
756 } /* unnecessary to do anything when post increment */
757 } else {
758 if (UCOP_SET_P) { /* pre decrement */
759 tcg_gen_addi_i32(addr, addr, -(n * 4));
760 } else { /* post decrement */
761 if (n != 1) {
762 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
767 freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
769 for (i = 0, j = 0; i < 8; i++, freg++) {
770 if (!UCOP_SET(i)) {
771 continue;
774 if (UCOP_SET_L) { /* load */
775 tmp = gen_ld32(addr, IS_USER(s));
776 ucf64_gen_st32(tmp, freg);
777 } else { /* store */
778 tmp = ucf64_gen_ld32(freg);
779 gen_st32(tmp, addr, IS_USER(s));
782 j++;
783 /* unnecessary to add after the last transfer */
784 if (j != n) {
785 tcg_gen_addi_i32(addr, addr, 4);
789 if (UCOP_SET_W) { /* write back */
790 if (UCOP_SET_U) {
791 if (!UCOP_SET_P) { /* post increment */
792 tcg_gen_addi_i32(addr, addr, 4);
793 } /* unnecessary to do anything when pre increment */
794 } else {
795 if (UCOP_SET_P) {
796 /* pre decrement */
797 if (n != 1) {
798 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
800 } else {
801 /* post decrement */
802 tcg_gen_addi_i32(addr, addr, -(n * 4));
805 store_reg(s, UCOP_REG_N, addr);
806 } else {
807 dead_tmp(addr);
811 /* UniCore-F64 mrc/mcr */
812 static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
814 TCGv tmp;
816 if ((insn & 0xfe0003ff) == 0xe2000000) {
817 /* control register */
818 if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
819 ILLEGAL;
821 if (UCOP_SET(24)) {
822 /* CFF */
823 tmp = new_tmp();
824 gen_helper_ucf64_get_fpscr(tmp, cpu_env);
825 store_reg(s, UCOP_REG_D, tmp);
826 } else {
827 /* CTF */
828 tmp = load_reg(s, UCOP_REG_D);
829 gen_helper_ucf64_set_fpscr(cpu_env, tmp);
830 dead_tmp(tmp);
831 gen_lookup_tb(s);
833 return;
835 if ((insn & 0xfe0003ff) == 0xe0000000) {
836 /* general register */
837 if (UCOP_REG_D == 31) {
838 ILLEGAL;
840 if (UCOP_SET(24)) { /* MFF */
841 tmp = ucf64_gen_ld32(UCOP_REG_N);
842 store_reg(s, UCOP_REG_D, tmp);
843 } else { /* MTF */
844 tmp = load_reg(s, UCOP_REG_D);
845 ucf64_gen_st32(tmp, UCOP_REG_N);
847 return;
849 if ((insn & 0xfb000000) == 0xe9000000) {
850 /* MFFC */
851 if (UCOP_REG_D != 31) {
852 ILLEGAL;
854 if (UCOP_UCF64_COND & 0x8) {
855 ILLEGAL;
858 tmp = new_tmp();
859 tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
860 if (UCOP_SET(26)) {
861 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
862 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
863 gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
864 } else {
865 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
866 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
867 gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
869 dead_tmp(tmp);
870 return;
872 ILLEGAL;
875 /* UniCore-F64 convert instructions */
876 static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
878 if (UCOP_UCF64_FMT == 3) {
879 ILLEGAL;
881 if (UCOP_REG_N != 0) {
882 ILLEGAL;
884 switch (UCOP_UCF64_FUNC) {
885 case 0: /* cvt.s */
886 switch (UCOP_UCF64_FMT) {
887 case 1 /* d */:
888 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
889 gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
890 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
891 break;
892 case 2 /* w */:
893 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
894 gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
895 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
896 break;
897 default /* s */:
898 ILLEGAL;
899 break;
901 break;
902 case 1: /* cvt.d */
903 switch (UCOP_UCF64_FMT) {
904 case 0 /* s */:
905 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
906 gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
907 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
908 break;
909 case 2 /* w */:
910 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
911 gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
912 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
913 break;
914 default /* d */:
915 ILLEGAL;
916 break;
918 break;
919 case 4: /* cvt.w */
920 switch (UCOP_UCF64_FMT) {
921 case 0 /* s */:
922 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
923 gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
924 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
925 break;
926 case 1 /* d */:
927 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
928 gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
929 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
930 break;
931 default /* w */:
932 ILLEGAL;
933 break;
935 break;
936 default:
937 ILLEGAL;
941 /* UniCore-F64 compare instructions */
942 static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
944 if (UCOP_SET(25)) {
945 ILLEGAL;
947 if (UCOP_REG_D != 0) {
948 ILLEGAL;
951 ILLEGAL; /* TODO */
952 if (UCOP_SET(24)) {
953 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
954 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
955 /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
956 } else {
957 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
958 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
959 /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
963 #define gen_helper_ucf64_movs(x, y) do { } while (0)
964 #define gen_helper_ucf64_movd(x, y) do { } while (0)
966 #define UCF64_OP1(name) do { \
967 if (UCOP_REG_N != 0) { \
968 ILLEGAL; \
970 switch (UCOP_UCF64_FMT) { \
971 case 0 /* s */: \
972 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
973 ucf64_reg_offset(UCOP_REG_M)); \
974 gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
975 tcg_gen_st_i32(cpu_F0s, cpu_env, \
976 ucf64_reg_offset(UCOP_REG_D)); \
977 break; \
978 case 1 /* d */: \
979 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
980 ucf64_reg_offset(UCOP_REG_M)); \
981 gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
982 tcg_gen_st_i64(cpu_F0d, cpu_env, \
983 ucf64_reg_offset(UCOP_REG_D)); \
984 break; \
985 case 2 /* w */: \
986 ILLEGAL; \
987 break; \
989 } while (0)
991 #define UCF64_OP2(name) do { \
992 switch (UCOP_UCF64_FMT) { \
993 case 0 /* s */: \
994 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
995 ucf64_reg_offset(UCOP_REG_N)); \
996 tcg_gen_ld_i32(cpu_F1s, cpu_env, \
997 ucf64_reg_offset(UCOP_REG_M)); \
998 gen_helper_ucf64_##name##s(cpu_F0s, \
999 cpu_F0s, cpu_F1s, cpu_env); \
1000 tcg_gen_st_i32(cpu_F0s, cpu_env, \
1001 ucf64_reg_offset(UCOP_REG_D)); \
1002 break; \
1003 case 1 /* d */: \
1004 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
1005 ucf64_reg_offset(UCOP_REG_N)); \
1006 tcg_gen_ld_i64(cpu_F1d, cpu_env, \
1007 ucf64_reg_offset(UCOP_REG_M)); \
1008 gen_helper_ucf64_##name##d(cpu_F0d, \
1009 cpu_F0d, cpu_F1d, cpu_env); \
1010 tcg_gen_st_i64(cpu_F0d, cpu_env, \
1011 ucf64_reg_offset(UCOP_REG_D)); \
1012 break; \
1013 case 2 /* w */: \
1014 ILLEGAL; \
1015 break; \
1017 } while (0)
1019 /* UniCore-F64 data processing */
1020 static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1022 if (UCOP_UCF64_FMT == 3) {
1023 ILLEGAL;
1025 switch (UCOP_UCF64_FUNC) {
1026 case 0: /* add */
1027 UCF64_OP2(add);
1028 break;
1029 case 1: /* sub */
1030 UCF64_OP2(sub);
1031 break;
1032 case 2: /* mul */
1033 UCF64_OP2(mul);
1034 break;
1035 case 4: /* div */
1036 UCF64_OP2(div);
1037 break;
1038 case 5: /* abs */
1039 UCF64_OP1(abs);
1040 break;
1041 case 6: /* mov */
1042 UCF64_OP1(mov);
1043 break;
1044 case 7: /* neg */
1045 UCF64_OP1(neg);
1046 break;
1047 default:
1048 ILLEGAL;
1052 /* Disassemble an F64 instruction */
1053 static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1055 if (!UCOP_SET(29)) {
1056 if (UCOP_SET(26)) {
1057 do_ucf64_ldst_m(env, s, insn);
1058 } else {
1059 do_ucf64_ldst_i(env, s, insn);
1061 } else {
1062 if (UCOP_SET(5)) {
1063 switch ((insn >> 26) & 0x3) {
1064 case 0:
1065 do_ucf64_datap(env, s, insn);
1066 break;
1067 case 1:
1068 ILLEGAL;
1069 break;
1070 case 2:
1071 do_ucf64_fcvt(env, s, insn);
1072 break;
1073 case 3:
1074 do_ucf64_fcmp(env, s, insn);
1075 break;
1077 } else {
1078 do_ucf64_trans(env, s, insn);
1083 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1085 #ifndef CONFIG_USER_ONLY
1086 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1087 #else
1088 return true;
1089 #endif
1092 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1094 if (use_goto_tb(s, dest)) {
1095 tcg_gen_goto_tb(n);
1096 gen_set_pc_im(dest);
1097 tcg_gen_exit_tb(s->tb, n);
1098 } else {
1099 gen_set_pc_im(dest);
1100 tcg_gen_exit_tb(NULL, 0);
1104 static inline void gen_jmp(DisasContext *s, uint32_t dest)
1106 if (unlikely(s->singlestep_enabled)) {
1107 /* An indirect jump so that we still trigger the debug exception. */
1108 gen_bx_im(s, dest);
1109 } else {
1110 gen_goto_tb(s, 0, dest);
1111 s->is_jmp = DISAS_TB_JUMP;
1115 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1116 static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
1118 TCGv tmp;
1119 if (bsr) {
1120 /* ??? This is also undefined in system mode. */
1121 if (IS_USER(s)) {
1122 return 1;
1125 tmp = load_cpu_field(bsr);
1126 tcg_gen_andi_i32(tmp, tmp, ~mask);
1127 tcg_gen_andi_i32(t0, t0, mask);
1128 tcg_gen_or_i32(tmp, tmp, t0);
1129 store_cpu_field(tmp, bsr);
1130 } else {
1131 gen_set_asr(t0, mask);
1133 dead_tmp(t0);
1134 gen_lookup_tb(s);
1135 return 0;
1138 /* Generate an old-style exception return. Marks pc as dead. */
1139 static void gen_exception_return(DisasContext *s, TCGv pc)
1141 TCGv tmp;
1142 store_reg(s, 31, pc);
1143 tmp = load_cpu_field(bsr);
1144 gen_set_asr(tmp, 0xffffffff);
1145 dead_tmp(tmp);
1146 s->is_jmp = DISAS_UPDATE;
1149 static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s,
1150 uint32_t insn)
1152 switch (UCOP_CPNUM) {
1153 #ifndef CONFIG_USER_ONLY
1154 case 0:
1155 disas_cp0_insn(env, s, insn);
1156 break;
1157 case 1:
1158 disas_ocd_insn(env, s, insn);
1159 break;
1160 #endif
1161 case 2:
1162 disas_ucf64_insn(env, s, insn);
1163 break;
1164 default:
1165 /* Unknown coprocessor. */
1166 cpu_abort(env_cpu(env), "Unknown coprocessor!");
1170 /* data processing instructions */
1171 static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1173 TCGv tmp;
1174 TCGv tmp2;
1175 int logic_cc;
1177 if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
1178 if (UCOP_SET(23)) { /* CMOV instructions */
1179 if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
1180 ILLEGAL;
1182 /* if not always execute, we generate a conditional jump to
1183 next instruction */
1184 s->condlabel = gen_new_label();
1185 gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
1186 s->condjmp = 1;
1190 logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
1192 if (UCOP_SET(29)) {
1193 unsigned int val;
1194 /* immediate operand */
1195 val = UCOP_IMM_9;
1196 if (UCOP_SH_IM) {
1197 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1199 tmp2 = new_tmp();
1200 tcg_gen_movi_i32(tmp2, val);
1201 if (logic_cc && UCOP_SH_IM) {
1202 gen_set_CF_bit31(tmp2);
1204 } else {
1205 /* register */
1206 tmp2 = load_reg(s, UCOP_REG_M);
1207 if (UCOP_SET(5)) {
1208 tmp = load_reg(s, UCOP_REG_S);
1209 gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
1210 } else {
1211 gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
1215 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1216 tmp = load_reg(s, UCOP_REG_N);
1217 } else {
1218 tmp = NULL;
1221 switch (UCOP_OPCODES) {
1222 case 0x00:
1223 tcg_gen_and_i32(tmp, tmp, tmp2);
1224 if (logic_cc) {
1225 gen_logic_CC(tmp);
1227 store_reg_bx(s, UCOP_REG_D, tmp);
1228 break;
1229 case 0x01:
1230 tcg_gen_xor_i32(tmp, tmp, tmp2);
1231 if (logic_cc) {
1232 gen_logic_CC(tmp);
1234 store_reg_bx(s, UCOP_REG_D, tmp);
1235 break;
1236 case 0x02:
1237 if (UCOP_SET_S && UCOP_REG_D == 31) {
1238 /* SUBS r31, ... is used for exception return. */
1239 if (IS_USER(s)) {
1240 ILLEGAL;
1242 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1243 gen_exception_return(s, tmp);
1244 } else {
1245 if (UCOP_SET_S) {
1246 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1247 } else {
1248 tcg_gen_sub_i32(tmp, tmp, tmp2);
1250 store_reg_bx(s, UCOP_REG_D, tmp);
1252 break;
1253 case 0x03:
1254 if (UCOP_SET_S) {
1255 gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
1256 } else {
1257 tcg_gen_sub_i32(tmp, tmp2, tmp);
1259 store_reg_bx(s, UCOP_REG_D, tmp);
1260 break;
1261 case 0x04:
1262 if (UCOP_SET_S) {
1263 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1264 } else {
1265 tcg_gen_add_i32(tmp, tmp, tmp2);
1267 store_reg_bx(s, UCOP_REG_D, tmp);
1268 break;
1269 case 0x05:
1270 if (UCOP_SET_S) {
1271 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
1272 } else {
1273 gen_add_carry(tmp, tmp, tmp2);
1275 store_reg_bx(s, UCOP_REG_D, tmp);
1276 break;
1277 case 0x06:
1278 if (UCOP_SET_S) {
1279 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
1280 } else {
1281 gen_sub_carry(tmp, tmp, tmp2);
1283 store_reg_bx(s, UCOP_REG_D, tmp);
1284 break;
1285 case 0x07:
1286 if (UCOP_SET_S) {
1287 gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
1288 } else {
1289 gen_sub_carry(tmp, tmp2, tmp);
1291 store_reg_bx(s, UCOP_REG_D, tmp);
1292 break;
1293 case 0x08:
1294 if (UCOP_SET_S) {
1295 tcg_gen_and_i32(tmp, tmp, tmp2);
1296 gen_logic_CC(tmp);
1298 dead_tmp(tmp);
1299 break;
1300 case 0x09:
1301 if (UCOP_SET_S) {
1302 tcg_gen_xor_i32(tmp, tmp, tmp2);
1303 gen_logic_CC(tmp);
1305 dead_tmp(tmp);
1306 break;
1307 case 0x0a:
1308 if (UCOP_SET_S) {
1309 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1311 dead_tmp(tmp);
1312 break;
1313 case 0x0b:
1314 if (UCOP_SET_S) {
1315 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1317 dead_tmp(tmp);
1318 break;
1319 case 0x0c:
1320 tcg_gen_or_i32(tmp, tmp, tmp2);
1321 if (logic_cc) {
1322 gen_logic_CC(tmp);
1324 store_reg_bx(s, UCOP_REG_D, tmp);
1325 break;
1326 case 0x0d:
1327 if (logic_cc && UCOP_REG_D == 31) {
1328 /* MOVS r31, ... is used for exception return. */
1329 if (IS_USER(s)) {
1330 ILLEGAL;
1332 gen_exception_return(s, tmp2);
1333 } else {
1334 if (logic_cc) {
1335 gen_logic_CC(tmp2);
1337 store_reg_bx(s, UCOP_REG_D, tmp2);
1339 break;
1340 case 0x0e:
1341 tcg_gen_andc_i32(tmp, tmp, tmp2);
1342 if (logic_cc) {
1343 gen_logic_CC(tmp);
1345 store_reg_bx(s, UCOP_REG_D, tmp);
1346 break;
1347 default:
1348 case 0x0f:
1349 tcg_gen_not_i32(tmp2, tmp2);
1350 if (logic_cc) {
1351 gen_logic_CC(tmp2);
1353 store_reg_bx(s, UCOP_REG_D, tmp2);
1354 break;
1356 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1357 dead_tmp(tmp2);
1361 /* multiply */
1362 static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1364 TCGv tmp, tmp2, tmp3, tmp4;
1366 if (UCOP_SET(27)) {
1367 /* 64 bit mul */
1368 tmp = load_reg(s, UCOP_REG_M);
1369 tmp2 = load_reg(s, UCOP_REG_N);
1370 if (UCOP_SET(26)) {
1371 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
1372 } else {
1373 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
1375 if (UCOP_SET(25)) { /* mult accumulate */
1376 tmp3 = load_reg(s, UCOP_REG_LO);
1377 tmp4 = load_reg(s, UCOP_REG_HI);
1378 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4);
1379 dead_tmp(tmp3);
1380 dead_tmp(tmp4);
1382 store_reg(s, UCOP_REG_LO, tmp);
1383 store_reg(s, UCOP_REG_HI, tmp2);
1384 } else {
1385 /* 32 bit mul */
1386 tmp = load_reg(s, UCOP_REG_M);
1387 tmp2 = load_reg(s, UCOP_REG_N);
1388 tcg_gen_mul_i32(tmp, tmp, tmp2);
1389 dead_tmp(tmp2);
1390 if (UCOP_SET(25)) {
1391 /* Add */
1392 tmp2 = load_reg(s, UCOP_REG_S);
1393 tcg_gen_add_i32(tmp, tmp, tmp2);
1394 dead_tmp(tmp2);
1396 if (UCOP_SET_S) {
1397 gen_logic_CC(tmp);
1399 store_reg(s, UCOP_REG_D, tmp);
1403 /* miscellaneous instructions */
1404 static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1406 unsigned int val;
1407 TCGv tmp;
1409 if ((insn & 0xffffffe0) == 0x10ffc120) {
1410 /* Trivial implementation equivalent to bx. */
1411 tmp = load_reg(s, UCOP_REG_M);
1412 gen_bx(s, tmp);
1413 return;
1416 if ((insn & 0xfbffc000) == 0x30ffc000) {
1417 /* PSR = immediate */
1418 val = UCOP_IMM_9;
1419 if (UCOP_SH_IM) {
1420 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1422 tmp = new_tmp();
1423 tcg_gen_movi_i32(tmp, val);
1424 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1425 ILLEGAL;
1427 return;
1430 if ((insn & 0xfbffffe0) == 0x12ffc020) {
1431 /* PSR.flag = reg */
1432 tmp = load_reg(s, UCOP_REG_M);
1433 if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
1434 ILLEGAL;
1436 return;
1439 if ((insn & 0xfbffffe0) == 0x10ffc020) {
1440 /* PSR = reg */
1441 tmp = load_reg(s, UCOP_REG_M);
1442 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1443 ILLEGAL;
1445 return;
1448 if ((insn & 0xfbf83fff) == 0x10f80000) {
1449 /* reg = PSR */
1450 if (UCOP_SET_B) {
1451 if (IS_USER(s)) {
1452 ILLEGAL;
1454 tmp = load_cpu_field(bsr);
1455 } else {
1456 tmp = new_tmp();
1457 gen_helper_asr_read(tmp, cpu_env);
1459 store_reg(s, UCOP_REG_D, tmp);
1460 return;
1463 if ((insn & 0xfbf83fe0) == 0x12f80120) {
1464 /* clz */
1465 tmp = load_reg(s, UCOP_REG_M);
1466 if (UCOP_SET(26)) {
1467 /* clo */
1468 tcg_gen_not_i32(tmp, tmp);
1470 tcg_gen_clzi_i32(tmp, tmp, 32);
1471 store_reg(s, UCOP_REG_D, tmp);
1472 return;
1475 /* otherwise */
1476 ILLEGAL;
1479 /* load/store I_offset and R_offset */
1480 static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1482 unsigned int mmu_idx;
1483 TCGv tmp;
1484 TCGv tmp2;
1486 tmp2 = load_reg(s, UCOP_REG_N);
1487 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1489 /* immediate */
1490 if (UCOP_SET_P) {
1491 gen_add_data_offset(s, insn, tmp2);
1494 if (UCOP_SET_L) {
1495 /* load */
1496 if (UCOP_SET_B) {
1497 tmp = gen_ld8u(tmp2, mmu_idx);
1498 } else {
1499 tmp = gen_ld32(tmp2, mmu_idx);
1501 } else {
1502 /* store */
1503 tmp = load_reg(s, UCOP_REG_D);
1504 if (UCOP_SET_B) {
1505 gen_st8(tmp, tmp2, mmu_idx);
1506 } else {
1507 gen_st32(tmp, tmp2, mmu_idx);
1510 if (!UCOP_SET_P) {
1511 gen_add_data_offset(s, insn, tmp2);
1512 store_reg(s, UCOP_REG_N, tmp2);
1513 } else if (UCOP_SET_W) {
1514 store_reg(s, UCOP_REG_N, tmp2);
1515 } else {
1516 dead_tmp(tmp2);
1518 if (UCOP_SET_L) {
1519 /* Complete the load. */
1520 if (UCOP_REG_D == 31) {
1521 gen_bx(s, tmp);
1522 } else {
1523 store_reg(s, UCOP_REG_D, tmp);
1528 /* SWP instruction */
1529 static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1531 TCGv addr;
1532 TCGv tmp;
1533 TCGv tmp2;
1535 if ((insn & 0xff003fe0) != 0x40000120) {
1536 ILLEGAL;
1539 /* ??? This is not really atomic. However we know
1540 we never have multiple CPUs running in parallel,
1541 so it is good enough. */
1542 addr = load_reg(s, UCOP_REG_N);
1543 tmp = load_reg(s, UCOP_REG_M);
1544 if (UCOP_SET_B) {
1545 tmp2 = gen_ld8u(addr, IS_USER(s));
1546 gen_st8(tmp, addr, IS_USER(s));
1547 } else {
1548 tmp2 = gen_ld32(addr, IS_USER(s));
1549 gen_st32(tmp, addr, IS_USER(s));
1551 dead_tmp(addr);
1552 store_reg(s, UCOP_REG_D, tmp2);
1555 /* load/store hw/sb */
1556 static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1558 TCGv addr;
1559 TCGv tmp;
1561 if (UCOP_SH_OP == 0) {
1562 do_swap(env, s, insn);
1563 return;
1566 addr = load_reg(s, UCOP_REG_N);
1567 if (UCOP_SET_P) {
1568 gen_add_datah_offset(s, insn, addr);
1571 if (UCOP_SET_L) { /* load */
1572 switch (UCOP_SH_OP) {
1573 case 1:
1574 tmp = gen_ld16u(addr, IS_USER(s));
1575 break;
1576 case 2:
1577 tmp = gen_ld8s(addr, IS_USER(s));
1578 break;
1579 default: /* see do_swap */
1580 case 3:
1581 tmp = gen_ld16s(addr, IS_USER(s));
1582 break;
1584 } else { /* store */
1585 if (UCOP_SH_OP != 1) {
1586 ILLEGAL;
1588 tmp = load_reg(s, UCOP_REG_D);
1589 gen_st16(tmp, addr, IS_USER(s));
1591 /* Perform base writeback before the loaded value to
1592 ensure correct behavior with overlapping index registers. */
1593 if (!UCOP_SET_P) {
1594 gen_add_datah_offset(s, insn, addr);
1595 store_reg(s, UCOP_REG_N, addr);
1596 } else if (UCOP_SET_W) {
1597 store_reg(s, UCOP_REG_N, addr);
1598 } else {
1599 dead_tmp(addr);
1601 if (UCOP_SET_L) {
1602 /* Complete the load. */
1603 store_reg(s, UCOP_REG_D, tmp);
1607 /* load/store multiple words */
1608 static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1610 unsigned int val, i, mmu_idx;
1611 int j, n, reg, user, loaded_base;
1612 TCGv tmp;
1613 TCGv tmp2;
1614 TCGv addr;
1615 TCGv loaded_var;
1617 if (UCOP_SET(7)) {
1618 ILLEGAL;
1620 /* XXX: store correct base if write back */
1621 user = 0;
1622 if (UCOP_SET_B) { /* S bit in instruction table */
1623 if (IS_USER(s)) {
1624 ILLEGAL; /* only usable in supervisor mode */
1626 if (UCOP_SET(18) == 0) { /* pc reg */
1627 user = 1;
1631 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1632 addr = load_reg(s, UCOP_REG_N);
1634 /* compute total size */
1635 loaded_base = 0;
1636 loaded_var = NULL;
1637 n = 0;
1638 for (i = 0; i < 6; i++) {
1639 if (UCOP_SET(i)) {
1640 n++;
1643 for (i = 9; i < 19; i++) {
1644 if (UCOP_SET(i)) {
1645 n++;
1648 /* XXX: test invalid n == 0 case ? */
1649 if (UCOP_SET_U) {
1650 if (UCOP_SET_P) {
1651 /* pre increment */
1652 tcg_gen_addi_i32(addr, addr, 4);
1653 } else {
1654 /* post increment */
1656 } else {
1657 if (UCOP_SET_P) {
1658 /* pre decrement */
1659 tcg_gen_addi_i32(addr, addr, -(n * 4));
1660 } else {
1661 /* post decrement */
1662 if (n != 1) {
1663 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1668 j = 0;
1669 reg = UCOP_SET(6) ? 16 : 0;
1670 for (i = 0; i < 19; i++, reg++) {
1671 if (i == 6) {
1672 i = i + 3;
1674 if (UCOP_SET(i)) {
1675 if (UCOP_SET_L) { /* load */
1676 tmp = gen_ld32(addr, mmu_idx);
1677 if (reg == 31) {
1678 gen_bx(s, tmp);
1679 } else if (user) {
1680 tmp2 = tcg_const_i32(reg);
1681 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
1682 tcg_temp_free_i32(tmp2);
1683 dead_tmp(tmp);
1684 } else if (reg == UCOP_REG_N) {
1685 loaded_var = tmp;
1686 loaded_base = 1;
1687 } else {
1688 store_reg(s, reg, tmp);
1690 } else { /* store */
1691 if (reg == 31) {
1692 /* special case: r31 = PC + 4 */
1693 val = (long)s->pc;
1694 tmp = new_tmp();
1695 tcg_gen_movi_i32(tmp, val);
1696 } else if (user) {
1697 tmp = new_tmp();
1698 tmp2 = tcg_const_i32(reg);
1699 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
1700 tcg_temp_free_i32(tmp2);
1701 } else {
1702 tmp = load_reg(s, reg);
1704 gen_st32(tmp, addr, mmu_idx);
1706 j++;
1707 /* no need to add after the last transfer */
1708 if (j != n) {
1709 tcg_gen_addi_i32(addr, addr, 4);
1713 if (UCOP_SET_W) { /* write back */
1714 if (UCOP_SET_U) {
1715 if (UCOP_SET_P) {
1716 /* pre increment */
1717 } else {
1718 /* post increment */
1719 tcg_gen_addi_i32(addr, addr, 4);
1721 } else {
1722 if (UCOP_SET_P) {
1723 /* pre decrement */
1724 if (n != 1) {
1725 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1727 } else {
1728 /* post decrement */
1729 tcg_gen_addi_i32(addr, addr, -(n * 4));
1732 store_reg(s, UCOP_REG_N, addr);
1733 } else {
1734 dead_tmp(addr);
1736 if (loaded_base) {
1737 store_reg(s, UCOP_REG_N, loaded_var);
1739 if (UCOP_SET_B && !user) {
1740 /* Restore ASR from BSR. */
1741 tmp = load_cpu_field(bsr);
1742 gen_set_asr(tmp, 0xffffffff);
1743 dead_tmp(tmp);
1744 s->is_jmp = DISAS_UPDATE;
1748 /* branch (and link) */
1749 static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1751 unsigned int val;
1752 int32_t offset;
1753 TCGv tmp;
1755 if (UCOP_COND == 0xf) {
1756 ILLEGAL;
1759 if (UCOP_COND != 0xe) {
1760 /* if not always execute, we generate a conditional jump to
1761 next instruction */
1762 s->condlabel = gen_new_label();
1763 gen_test_cc(UCOP_COND ^ 1, s->condlabel);
1764 s->condjmp = 1;
1767 val = (int32_t)s->pc;
1768 if (UCOP_SET_L) {
1769 tmp = new_tmp();
1770 tcg_gen_movi_i32(tmp, val);
1771 store_reg(s, 30, tmp);
1773 offset = (((int32_t)insn << 8) >> 8);
1774 val += (offset << 2); /* unicore is pc+4 */
1775 gen_jmp(s, val);
1778 static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
1780 unsigned int insn;
1782 insn = cpu_ldl_code(env, s->pc);
1783 s->pc += 4;
1785 /* UniCore instructions class:
1786 * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1787 * AAA : see switch case
1788 * BBBB : opcodes or cond or PUBW
1789 * C : S OR L
1790 * D : 8
1791 * E : 5
1793 switch (insn >> 29) {
1794 case 0x0:
1795 if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1796 do_mult(env, s, insn);
1797 break;
1800 if (UCOP_SET(8)) {
1801 do_misc(env, s, insn);
1802 break;
1804 case 0x1:
1805 if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
1806 do_misc(env, s, insn);
1807 break;
1809 do_datap(env, s, insn);
1810 break;
1812 case 0x2:
1813 if (UCOP_SET(8) && UCOP_SET(5)) {
1814 do_ldst_hwsb(env, s, insn);
1815 break;
1817 if (UCOP_SET(8) || UCOP_SET(5)) {
1818 ILLEGAL;
1820 case 0x3:
1821 do_ldst_ir(env, s, insn);
1822 break;
1824 case 0x4:
1825 if (UCOP_SET(8)) {
1826 ILLEGAL; /* extended instructions */
1828 do_ldst_m(env, s, insn);
1829 break;
1830 case 0x5:
1831 do_branch(env, s, insn);
1832 break;
1833 case 0x6:
1834 /* Coprocessor. */
1835 disas_coproc_insn(env, s, insn);
1836 break;
1837 case 0x7:
1838 if (!UCOP_SET(28)) {
1839 disas_coproc_insn(env, s, insn);
1840 break;
1842 if ((insn & 0xff000000) == 0xff000000) { /* syscall */
1843 gen_set_pc_im(s->pc);
1844 s->is_jmp = DISAS_SYSCALL;
1845 break;
1847 ILLEGAL;
1851 /* generate intermediate code for basic block 'tb'. */
1852 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1854 CPUUniCore32State *env = cs->env_ptr;
1855 DisasContext dc1, *dc = &dc1;
1856 target_ulong pc_start;
1857 uint32_t page_start;
1858 int num_insns;
1860 /* generate intermediate code */
1861 num_temps = 0;
1863 pc_start = tb->pc;
1865 dc->tb = tb;
1867 dc->is_jmp = DISAS_NEXT;
1868 dc->pc = pc_start;
1869 dc->singlestep_enabled = cs->singlestep_enabled;
1870 dc->condjmp = 0;
1871 cpu_F0s = tcg_temp_new_i32();
1872 cpu_F1s = tcg_temp_new_i32();
1873 cpu_F0d = tcg_temp_new_i64();
1874 cpu_F1d = tcg_temp_new_i64();
1875 page_start = pc_start & TARGET_PAGE_MASK;
1876 num_insns = 0;
1878 #ifndef CONFIG_USER_ONLY
1879 if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) {
1880 dc->user = 1;
1881 } else {
1882 dc->user = 0;
1884 #endif
1886 gen_tb_start(tb);
1887 do {
1888 tcg_gen_insn_start(dc->pc);
1889 num_insns++;
1891 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1892 gen_set_pc_im(dc->pc);
1893 gen_exception(EXCP_DEBUG);
1894 dc->is_jmp = DISAS_JUMP;
1895 /* The address covered by the breakpoint must be included in
1896 [tb->pc, tb->pc + tb->size) in order to for it to be
1897 properly cleared -- thus we increment the PC here so that
1898 the logic setting tb->size below does the right thing. */
1899 dc->pc += 4;
1900 goto done_generating;
1903 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1904 gen_io_start();
1907 disas_uc32_insn(env, dc);
1909 if (num_temps) {
1910 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
1911 num_temps = 0;
1914 if (dc->condjmp && !dc->is_jmp) {
1915 gen_set_label(dc->condlabel);
1916 dc->condjmp = 0;
1918 /* Translation stops when a conditional branch is encountered.
1919 * Otherwise the subsequent code could get translated several times.
1920 * Also stop translation when a page boundary is reached. This
1921 * ensures prefetch aborts occur at the right place. */
1922 } while (!dc->is_jmp && !tcg_op_buf_full() &&
1923 !cs->singlestep_enabled &&
1924 !singlestep &&
1925 dc->pc - page_start < TARGET_PAGE_SIZE &&
1926 num_insns < max_insns);
1928 if (tb_cflags(tb) & CF_LAST_IO) {
1929 if (dc->condjmp) {
1930 /* FIXME: This can theoretically happen with self-modifying
1931 code. */
1932 cpu_abort(cs, "IO on conditional branch instruction");
1936 /* At this stage dc->condjmp will only be set when the skipped
1937 instruction was a conditional branch or trap, and the PC has
1938 already been written. */
1939 if (unlikely(cs->singlestep_enabled)) {
1940 /* Make sure the pc is updated, and raise a debug exception. */
1941 if (dc->condjmp) {
1942 if (dc->is_jmp == DISAS_SYSCALL) {
1943 gen_exception(UC32_EXCP_PRIV);
1944 } else {
1945 gen_exception(EXCP_DEBUG);
1947 gen_set_label(dc->condlabel);
1949 if (dc->condjmp || !dc->is_jmp) {
1950 gen_set_pc_im(dc->pc);
1951 dc->condjmp = 0;
1953 if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
1954 gen_exception(UC32_EXCP_PRIV);
1955 } else {
1956 gen_exception(EXCP_DEBUG);
1958 } else {
1959 /* While branches must always occur at the end of an IT block,
1960 there are a few other things that can cause us to terminate
1961 the TB in the middel of an IT block:
1962 - Exception generating instructions (bkpt, swi, undefined).
1963 - Page boundaries.
1964 - Hardware watchpoints.
1965 Hardware breakpoints have already been handled and skip this code.
1967 switch (dc->is_jmp) {
1968 case DISAS_NEXT:
1969 gen_goto_tb(dc, 1, dc->pc);
1970 break;
1971 default:
1972 case DISAS_JUMP:
1973 case DISAS_UPDATE:
1974 /* indicate that the hash table must be used to find the next TB */
1975 tcg_gen_exit_tb(NULL, 0);
1976 break;
1977 case DISAS_TB_JUMP:
1978 /* nothing more to generate */
1979 break;
1980 case DISAS_SYSCALL:
1981 gen_exception(UC32_EXCP_PRIV);
1982 break;
1984 if (dc->condjmp) {
1985 gen_set_label(dc->condlabel);
1986 gen_goto_tb(dc, 1, dc->pc);
1987 dc->condjmp = 0;
1991 done_generating:
1992 gen_tb_end(tb, num_insns);
1994 #ifdef DEBUG_DISAS
1995 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1996 && qemu_log_in_addr_range(pc_start)) {
1997 FILE *logfile = qemu_log_lock();
1998 qemu_log("----------------\n");
1999 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2000 log_target_disas(cs, pc_start, dc->pc - pc_start);
2001 qemu_log("\n");
2002 qemu_log_unlock(logfile);
2004 #endif
2005 tb->size = dc->pc - pc_start;
2006 tb->icount = num_insns;
2009 static const char *cpu_mode_names[16] = {
2010 "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2011 "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2014 #undef UCF64_DUMP_STATE
2015 #ifdef UCF64_DUMP_STATE
2016 static void cpu_dump_state_ucf64(CPUUniCore32State *env, int flags)
2018 int i;
2019 union {
2020 uint32_t i;
2021 float s;
2022 } s0, s1;
2023 CPU_DoubleU d;
2024 /* ??? This assumes float64 and double have the same layout.
2025 Oh well, it's only debug dumps. */
2026 union {
2027 float64 f64;
2028 double d;
2029 } d0;
2031 for (i = 0; i < 16; i++) {
2032 d.d = env->ucf64.regs[i];
2033 s0.i = d.l.lower;
2034 s1.i = d.l.upper;
2035 d0.f64 = d.d;
2036 qemu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g)",
2037 i * 2, (int)s0.i, s0.s,
2038 i * 2 + 1, (int)s1.i, s1.s);
2039 qemu_fprintf(f, " d%02d=%" PRIx64 "(%8g)\n",
2040 i, (uint64_t)d0.f64, d0.d);
2042 qemu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
2044 #else
2045 #define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0)
2046 #endif
2048 void uc32_cpu_dump_state(CPUState *cs, FILE *f, int flags)
2050 UniCore32CPU *cpu = UNICORE32_CPU(cs);
2051 CPUUniCore32State *env = &cpu->env;
2052 int i;
2053 uint32_t psr;
2055 for (i = 0; i < 32; i++) {
2056 qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2057 if ((i % 4) == 3) {
2058 qemu_fprintf(f, "\n");
2059 } else {
2060 qemu_fprintf(f, " ");
2063 psr = cpu_asr_read(env);
2064 qemu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
2065 psr,
2066 psr & (1 << 31) ? 'N' : '-',
2067 psr & (1 << 30) ? 'Z' : '-',
2068 psr & (1 << 29) ? 'C' : '-',
2069 psr & (1 << 28) ? 'V' : '-',
2070 cpu_mode_names[psr & 0xf]);
2072 if (flags & CPU_DUMP_FPU) {
2073 cpu_dump_state_ucf64(env, f, cpu_fprintf, flags);
2077 void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb,
2078 target_ulong *data)
2080 env->regs[31] = data[0];