dma/pl330: Fix misleading type
[qemu.git] / target-unicore32 / translate.c
blob4572890ffa913695caa2c801f372d5002d4be214
1 /*
2 * UniCore32 translation
4 * Copyright (C) 2010-2012 Guan Xuetao
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation, or (at your option) any
9 * later version. See the COPYING file in the top-level directory.
11 #include <stdarg.h>
12 #include <stdlib.h>
13 #include <stdio.h>
14 #include <string.h>
15 #include <inttypes.h>
17 #include "cpu.h"
18 #include "disas/disas.h"
19 #include "tcg-op.h"
20 #include "qemu/log.h"
22 #include "helper.h"
23 #define GEN_HELPER 1
24 #include "helper.h"
26 /* internal defines */
27 typedef struct DisasContext {
28 target_ulong pc;
29 int is_jmp;
30 /* Nonzero if this instruction has been conditionally skipped. */
31 int condjmp;
32 /* The label that will be jumped to when the instruction is skipped. */
33 int condlabel;
34 struct TranslationBlock *tb;
35 int singlestep_enabled;
36 #ifndef CONFIG_USER_ONLY
37 int user;
38 #endif
39 } DisasContext;
41 #ifndef CONFIG_USER_ONLY
42 #define IS_USER(s) (s->user)
43 #else
44 #define IS_USER(s) 1
45 #endif
47 /* These instructions trap after executing, so defer them until after the
48 conditional executions state has been updated. */
49 #define DISAS_SYSCALL 5
51 static TCGv_ptr cpu_env;
52 static TCGv_i32 cpu_R[32];
54 /* FIXME: These should be removed. */
55 static TCGv cpu_F0s, cpu_F1s;
56 static TCGv_i64 cpu_F0d, cpu_F1d;
58 #include "exec/gen-icount.h"
60 static const char *regnames[] = {
61 "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
62 "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
63 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
64 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
66 /* initialize TCG globals. */
67 void uc32_translate_init(void)
69 int i;
71 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
73 for (i = 0; i < 32; i++) {
74 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
75 offsetof(CPUUniCore32State, regs[i]), regnames[i]);
79 static int num_temps;
81 /* Allocate a temporary variable. */
82 static TCGv_i32 new_tmp(void)
84 num_temps++;
85 return tcg_temp_new_i32();
88 /* Release a temporary variable. */
89 static void dead_tmp(TCGv tmp)
91 tcg_temp_free(tmp);
92 num_temps--;
95 static inline TCGv load_cpu_offset(int offset)
97 TCGv tmp = new_tmp();
98 tcg_gen_ld_i32(tmp, cpu_env, offset);
99 return tmp;
102 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
104 static inline void store_cpu_offset(TCGv var, int offset)
106 tcg_gen_st_i32(var, cpu_env, offset);
107 dead_tmp(var);
110 #define store_cpu_field(var, name) \
111 store_cpu_offset(var, offsetof(CPUUniCore32State, name))
113 /* Set a variable to the value of a CPU register. */
114 static void load_reg_var(DisasContext *s, TCGv var, int reg)
116 if (reg == 31) {
117 uint32_t addr;
118 /* normaly, since we updated PC */
119 addr = (long)s->pc;
120 tcg_gen_movi_i32(var, addr);
121 } else {
122 tcg_gen_mov_i32(var, cpu_R[reg]);
126 /* Create a new temporary and set it to the value of a CPU register. */
127 static inline TCGv load_reg(DisasContext *s, int reg)
129 TCGv tmp = new_tmp();
130 load_reg_var(s, tmp, reg);
131 return tmp;
134 /* Set a CPU register. The source must be a temporary and will be
135 marked as dead. */
136 static void store_reg(DisasContext *s, int reg, TCGv var)
138 if (reg == 31) {
139 tcg_gen_andi_i32(var, var, ~3);
140 s->is_jmp = DISAS_JUMP;
142 tcg_gen_mov_i32(cpu_R[reg], var);
143 dead_tmp(var);
146 /* Value extensions. */
147 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
148 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
149 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
150 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
152 #define UCOP_REG_M (((insn) >> 0) & 0x1f)
153 #define UCOP_REG_N (((insn) >> 19) & 0x1f)
154 #define UCOP_REG_D (((insn) >> 14) & 0x1f)
155 #define UCOP_REG_S (((insn) >> 9) & 0x1f)
156 #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
157 #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
158 #define UCOP_SH_OP (((insn) >> 6) & 0x03)
159 #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
160 #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
161 #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
162 #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
163 #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
164 #define UCOP_COND (((insn) >> 25) & 0x0f)
165 #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
166 #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
167 #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
168 #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
169 #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
171 #define UCOP_SET(i) ((insn) & (1 << (i)))
172 #define UCOP_SET_P UCOP_SET(28)
173 #define UCOP_SET_U UCOP_SET(27)
174 #define UCOP_SET_B UCOP_SET(26)
175 #define UCOP_SET_W UCOP_SET(25)
176 #define UCOP_SET_L UCOP_SET(24)
177 #define UCOP_SET_S UCOP_SET(24)
179 #define ILLEGAL cpu_abort(env, \
180 "Illegal UniCore32 instruction %x at line %d!", \
181 insn, __LINE__)
183 #ifndef CONFIG_USER_ONLY
184 static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s,
185 uint32_t insn)
187 TCGv tmp, tmp2, tmp3;
188 if ((insn & 0xfe000000) == 0xe0000000) {
189 tmp2 = new_tmp();
190 tmp3 = new_tmp();
191 tcg_gen_movi_i32(tmp2, UCOP_REG_N);
192 tcg_gen_movi_i32(tmp3, UCOP_IMM10);
193 if (UCOP_SET_L) {
194 tmp = new_tmp();
195 gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3);
196 store_reg(s, UCOP_REG_D, tmp);
197 } else {
198 tmp = load_reg(s, UCOP_REG_D);
199 gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3);
200 dead_tmp(tmp);
202 dead_tmp(tmp2);
203 dead_tmp(tmp3);
204 return;
206 ILLEGAL;
209 static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s,
210 uint32_t insn)
212 TCGv tmp;
214 if ((insn & 0xff003fff) == 0xe1000400) {
216 * movc rd, pp.nn, #imm9
217 * rd: UCOP_REG_D
218 * nn: UCOP_REG_N (must be 0)
219 * imm9: 0
221 if (UCOP_REG_N == 0) {
222 tmp = new_tmp();
223 tcg_gen_movi_i32(tmp, 0);
224 store_reg(s, UCOP_REG_D, tmp);
225 return;
226 } else {
227 ILLEGAL;
230 if ((insn & 0xff003fff) == 0xe0000401) {
232 * movc pp.nn, rn, #imm9
233 * rn: UCOP_REG_D
234 * nn: UCOP_REG_N (must be 1)
235 * imm9: 1
237 if (UCOP_REG_N == 1) {
238 tmp = load_reg(s, UCOP_REG_D);
239 gen_helper_cp1_putc(tmp);
240 dead_tmp(tmp);
241 return;
242 } else {
243 ILLEGAL;
246 ILLEGAL;
248 #endif
250 static inline void gen_set_asr(TCGv var, uint32_t mask)
252 TCGv tmp_mask = tcg_const_i32(mask);
253 gen_helper_asr_write(cpu_env, var, tmp_mask);
254 tcg_temp_free_i32(tmp_mask);
256 /* Set NZCV flags from the high 4 bits of var. */
257 #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
259 static void gen_exception(int excp)
261 TCGv tmp = new_tmp();
262 tcg_gen_movi_i32(tmp, excp);
263 gen_helper_exception(cpu_env, tmp);
264 dead_tmp(tmp);
267 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
269 /* Set CF to the top bit of var. */
270 static void gen_set_CF_bit31(TCGv var)
272 TCGv tmp = new_tmp();
273 tcg_gen_shri_i32(tmp, var, 31);
274 gen_set_CF(tmp);
275 dead_tmp(tmp);
278 /* Set N and Z flags from var. */
279 static inline void gen_logic_CC(TCGv var)
281 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF));
282 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF));
285 /* dest = T0 + T1 + CF. */
286 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
288 TCGv tmp;
289 tcg_gen_add_i32(dest, t0, t1);
290 tmp = load_cpu_field(CF);
291 tcg_gen_add_i32(dest, dest, tmp);
292 dead_tmp(tmp);
295 /* dest = T0 - T1 + CF - 1. */
296 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
298 TCGv tmp;
299 tcg_gen_sub_i32(dest, t0, t1);
300 tmp = load_cpu_field(CF);
301 tcg_gen_add_i32(dest, dest, tmp);
302 tcg_gen_subi_i32(dest, dest, 1);
303 dead_tmp(tmp);
306 static void shifter_out_im(TCGv var, int shift)
308 TCGv tmp = new_tmp();
309 if (shift == 0) {
310 tcg_gen_andi_i32(tmp, var, 1);
311 } else {
312 tcg_gen_shri_i32(tmp, var, shift);
313 if (shift != 31) {
314 tcg_gen_andi_i32(tmp, tmp, 1);
317 gen_set_CF(tmp);
318 dead_tmp(tmp);
321 /* Shift by immediate. Includes special handling for shift == 0. */
322 static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
323 int flags)
325 switch (shiftop) {
326 case 0: /* LSL */
327 if (shift != 0) {
328 if (flags) {
329 shifter_out_im(var, 32 - shift);
331 tcg_gen_shli_i32(var, var, shift);
333 break;
334 case 1: /* LSR */
335 if (shift == 0) {
336 if (flags) {
337 tcg_gen_shri_i32(var, var, 31);
338 gen_set_CF(var);
340 tcg_gen_movi_i32(var, 0);
341 } else {
342 if (flags) {
343 shifter_out_im(var, shift - 1);
345 tcg_gen_shri_i32(var, var, shift);
347 break;
348 case 2: /* ASR */
349 if (shift == 0) {
350 shift = 32;
352 if (flags) {
353 shifter_out_im(var, shift - 1);
355 if (shift == 32) {
356 shift = 31;
358 tcg_gen_sari_i32(var, var, shift);
359 break;
360 case 3: /* ROR/RRX */
361 if (shift != 0) {
362 if (flags) {
363 shifter_out_im(var, shift - 1);
365 tcg_gen_rotri_i32(var, var, shift); break;
366 } else {
367 TCGv tmp = load_cpu_field(CF);
368 if (flags) {
369 shifter_out_im(var, 0);
371 tcg_gen_shri_i32(var, var, 1);
372 tcg_gen_shli_i32(tmp, tmp, 31);
373 tcg_gen_or_i32(var, var, tmp);
374 dead_tmp(tmp);
379 static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
380 TCGv shift, int flags)
382 if (flags) {
383 switch (shiftop) {
384 case 0:
385 gen_helper_shl_cc(var, cpu_env, var, shift);
386 break;
387 case 1:
388 gen_helper_shr_cc(var, cpu_env, var, shift);
389 break;
390 case 2:
391 gen_helper_sar_cc(var, cpu_env, var, shift);
392 break;
393 case 3:
394 gen_helper_ror_cc(var, cpu_env, var, shift);
395 break;
397 } else {
398 switch (shiftop) {
399 case 0:
400 gen_helper_shl(var, var, shift);
401 break;
402 case 1:
403 gen_helper_shr(var, var, shift);
404 break;
405 case 2:
406 gen_helper_sar(var, var, shift);
407 break;
408 case 3:
409 tcg_gen_andi_i32(shift, shift, 0x1f);
410 tcg_gen_rotr_i32(var, var, shift);
411 break;
414 dead_tmp(shift);
417 static void gen_test_cc(int cc, int label)
419 TCGv tmp;
420 TCGv tmp2;
421 int inv;
423 switch (cc) {
424 case 0: /* eq: Z */
425 tmp = load_cpu_field(ZF);
426 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
427 break;
428 case 1: /* ne: !Z */
429 tmp = load_cpu_field(ZF);
430 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
431 break;
432 case 2: /* cs: C */
433 tmp = load_cpu_field(CF);
434 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
435 break;
436 case 3: /* cc: !C */
437 tmp = load_cpu_field(CF);
438 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
439 break;
440 case 4: /* mi: N */
441 tmp = load_cpu_field(NF);
442 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
443 break;
444 case 5: /* pl: !N */
445 tmp = load_cpu_field(NF);
446 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
447 break;
448 case 6: /* vs: V */
449 tmp = load_cpu_field(VF);
450 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
451 break;
452 case 7: /* vc: !V */
453 tmp = load_cpu_field(VF);
454 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
455 break;
456 case 8: /* hi: C && !Z */
457 inv = gen_new_label();
458 tmp = load_cpu_field(CF);
459 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
460 dead_tmp(tmp);
461 tmp = load_cpu_field(ZF);
462 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
463 gen_set_label(inv);
464 break;
465 case 9: /* ls: !C || Z */
466 tmp = load_cpu_field(CF);
467 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
468 dead_tmp(tmp);
469 tmp = load_cpu_field(ZF);
470 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
471 break;
472 case 10: /* ge: N == V -> N ^ V == 0 */
473 tmp = load_cpu_field(VF);
474 tmp2 = load_cpu_field(NF);
475 tcg_gen_xor_i32(tmp, tmp, tmp2);
476 dead_tmp(tmp2);
477 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
478 break;
479 case 11: /* lt: N != V -> N ^ V != 0 */
480 tmp = load_cpu_field(VF);
481 tmp2 = load_cpu_field(NF);
482 tcg_gen_xor_i32(tmp, tmp, tmp2);
483 dead_tmp(tmp2);
484 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
485 break;
486 case 12: /* gt: !Z && N == V */
487 inv = gen_new_label();
488 tmp = load_cpu_field(ZF);
489 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
490 dead_tmp(tmp);
491 tmp = load_cpu_field(VF);
492 tmp2 = load_cpu_field(NF);
493 tcg_gen_xor_i32(tmp, tmp, tmp2);
494 dead_tmp(tmp2);
495 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
496 gen_set_label(inv);
497 break;
498 case 13: /* le: Z || N != V */
499 tmp = load_cpu_field(ZF);
500 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
501 dead_tmp(tmp);
502 tmp = load_cpu_field(VF);
503 tmp2 = load_cpu_field(NF);
504 tcg_gen_xor_i32(tmp, tmp, tmp2);
505 dead_tmp(tmp2);
506 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
507 break;
508 default:
509 fprintf(stderr, "Bad condition code 0x%x\n", cc);
510 abort();
512 dead_tmp(tmp);
515 static const uint8_t table_logic_cc[16] = {
516 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
517 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
518 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
519 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
522 /* Set PC state from an immediate address. */
523 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
525 s->is_jmp = DISAS_UPDATE;
526 tcg_gen_movi_i32(cpu_R[31], addr & ~3);
529 /* Set PC state from var. var is marked as dead. */
530 static inline void gen_bx(DisasContext *s, TCGv var)
532 s->is_jmp = DISAS_UPDATE;
533 tcg_gen_andi_i32(cpu_R[31], var, ~3);
534 dead_tmp(var);
537 static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
539 store_reg(s, reg, var);
542 static inline TCGv gen_ld8s(TCGv addr, int index)
544 TCGv tmp = new_tmp();
545 tcg_gen_qemu_ld8s(tmp, addr, index);
546 return tmp;
549 static inline TCGv gen_ld8u(TCGv addr, int index)
551 TCGv tmp = new_tmp();
552 tcg_gen_qemu_ld8u(tmp, addr, index);
553 return tmp;
556 static inline TCGv gen_ld16s(TCGv addr, int index)
558 TCGv tmp = new_tmp();
559 tcg_gen_qemu_ld16s(tmp, addr, index);
560 return tmp;
563 static inline TCGv gen_ld16u(TCGv addr, int index)
565 TCGv tmp = new_tmp();
566 tcg_gen_qemu_ld16u(tmp, addr, index);
567 return tmp;
570 static inline TCGv gen_ld32(TCGv addr, int index)
572 TCGv tmp = new_tmp();
573 tcg_gen_qemu_ld32u(tmp, addr, index);
574 return tmp;
577 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
579 TCGv_i64 tmp = tcg_temp_new_i64();
580 tcg_gen_qemu_ld64(tmp, addr, index);
581 return tmp;
584 static inline void gen_st8(TCGv val, TCGv addr, int index)
586 tcg_gen_qemu_st8(val, addr, index);
587 dead_tmp(val);
590 static inline void gen_st16(TCGv val, TCGv addr, int index)
592 tcg_gen_qemu_st16(val, addr, index);
593 dead_tmp(val);
596 static inline void gen_st32(TCGv val, TCGv addr, int index)
598 tcg_gen_qemu_st32(val, addr, index);
599 dead_tmp(val);
602 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
604 tcg_gen_qemu_st64(val, addr, index);
605 tcg_temp_free_i64(val);
608 static inline void gen_set_pc_im(uint32_t val)
610 tcg_gen_movi_i32(cpu_R[31], val);
613 /* Force a TB lookup after an instruction that changes the CPU state. */
614 static inline void gen_lookup_tb(DisasContext *s)
616 tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
617 s->is_jmp = DISAS_UPDATE;
620 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
621 TCGv var)
623 int val;
624 TCGv offset;
626 if (UCOP_SET(29)) {
627 /* immediate */
628 val = UCOP_IMM14;
629 if (!UCOP_SET_U) {
630 val = -val;
632 if (val != 0) {
633 tcg_gen_addi_i32(var, var, val);
635 } else {
636 /* shift/register */
637 offset = load_reg(s, UCOP_REG_M);
638 gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
639 if (!UCOP_SET_U) {
640 tcg_gen_sub_i32(var, var, offset);
641 } else {
642 tcg_gen_add_i32(var, var, offset);
644 dead_tmp(offset);
648 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
649 TCGv var)
651 int val;
652 TCGv offset;
654 if (UCOP_SET(26)) {
655 /* immediate */
656 val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
657 if (!UCOP_SET_U) {
658 val = -val;
660 if (val != 0) {
661 tcg_gen_addi_i32(var, var, val);
663 } else {
664 /* register */
665 offset = load_reg(s, UCOP_REG_M);
666 if (!UCOP_SET_U) {
667 tcg_gen_sub_i32(var, var, offset);
668 } else {
669 tcg_gen_add_i32(var, var, offset);
671 dead_tmp(offset);
675 static inline long ucf64_reg_offset(int reg)
677 if (reg & 1) {
678 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
679 + offsetof(CPU_DoubleU, l.upper);
680 } else {
681 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
682 + offsetof(CPU_DoubleU, l.lower);
686 #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
687 #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
689 /* UniCore-F64 single load/store I_offset */
690 static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
692 int offset;
693 TCGv tmp;
694 TCGv addr;
696 addr = load_reg(s, UCOP_REG_N);
697 if (!UCOP_SET_P && !UCOP_SET_W) {
698 ILLEGAL;
701 if (UCOP_SET_P) {
702 offset = UCOP_IMM10 << 2;
703 if (!UCOP_SET_U) {
704 offset = -offset;
706 if (offset != 0) {
707 tcg_gen_addi_i32(addr, addr, offset);
711 if (UCOP_SET_L) { /* load */
712 tmp = gen_ld32(addr, IS_USER(s));
713 ucf64_gen_st32(tmp, UCOP_REG_D);
714 } else { /* store */
715 tmp = ucf64_gen_ld32(UCOP_REG_D);
716 gen_st32(tmp, addr, IS_USER(s));
719 if (!UCOP_SET_P) {
720 offset = UCOP_IMM10 << 2;
721 if (!UCOP_SET_U) {
722 offset = -offset;
724 if (offset != 0) {
725 tcg_gen_addi_i32(addr, addr, offset);
728 if (UCOP_SET_W) {
729 store_reg(s, UCOP_REG_N, addr);
730 } else {
731 dead_tmp(addr);
735 /* UniCore-F64 load/store multiple words */
736 static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
738 unsigned int i;
739 int j, n, freg;
740 TCGv tmp;
741 TCGv addr;
743 if (UCOP_REG_D != 0) {
744 ILLEGAL;
746 if (UCOP_REG_N == 31) {
747 ILLEGAL;
749 if ((insn << 24) == 0) {
750 ILLEGAL;
753 addr = load_reg(s, UCOP_REG_N);
755 n = 0;
756 for (i = 0; i < 8; i++) {
757 if (UCOP_SET(i)) {
758 n++;
762 if (UCOP_SET_U) {
763 if (UCOP_SET_P) { /* pre increment */
764 tcg_gen_addi_i32(addr, addr, 4);
765 } /* unnecessary to do anything when post increment */
766 } else {
767 if (UCOP_SET_P) { /* pre decrement */
768 tcg_gen_addi_i32(addr, addr, -(n * 4));
769 } else { /* post decrement */
770 if (n != 1) {
771 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
776 freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
778 for (i = 0, j = 0; i < 8; i++, freg++) {
779 if (!UCOP_SET(i)) {
780 continue;
783 if (UCOP_SET_L) { /* load */
784 tmp = gen_ld32(addr, IS_USER(s));
785 ucf64_gen_st32(tmp, freg);
786 } else { /* store */
787 tmp = ucf64_gen_ld32(freg);
788 gen_st32(tmp, addr, IS_USER(s));
791 j++;
792 /* unnecessary to add after the last transfer */
793 if (j != n) {
794 tcg_gen_addi_i32(addr, addr, 4);
798 if (UCOP_SET_W) { /* write back */
799 if (UCOP_SET_U) {
800 if (!UCOP_SET_P) { /* post increment */
801 tcg_gen_addi_i32(addr, addr, 4);
802 } /* unnecessary to do anything when pre increment */
803 } else {
804 if (UCOP_SET_P) {
805 /* pre decrement */
806 if (n != 1) {
807 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
809 } else {
810 /* post decrement */
811 tcg_gen_addi_i32(addr, addr, -(n * 4));
814 store_reg(s, UCOP_REG_N, addr);
815 } else {
816 dead_tmp(addr);
820 /* UniCore-F64 mrc/mcr */
821 static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
823 TCGv tmp;
825 if ((insn & 0xfe0003ff) == 0xe2000000) {
826 /* control register */
827 if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
828 ILLEGAL;
830 if (UCOP_SET(24)) {
831 /* CFF */
832 tmp = new_tmp();
833 gen_helper_ucf64_get_fpscr(tmp, cpu_env);
834 store_reg(s, UCOP_REG_D, tmp);
835 } else {
836 /* CTF */
837 tmp = load_reg(s, UCOP_REG_D);
838 gen_helper_ucf64_set_fpscr(cpu_env, tmp);
839 dead_tmp(tmp);
840 gen_lookup_tb(s);
842 return;
844 if ((insn & 0xfe0003ff) == 0xe0000000) {
845 /* general register */
846 if (UCOP_REG_D == 31) {
847 ILLEGAL;
849 if (UCOP_SET(24)) { /* MFF */
850 tmp = ucf64_gen_ld32(UCOP_REG_N);
851 store_reg(s, UCOP_REG_D, tmp);
852 } else { /* MTF */
853 tmp = load_reg(s, UCOP_REG_D);
854 ucf64_gen_st32(tmp, UCOP_REG_N);
856 return;
858 if ((insn & 0xfb000000) == 0xe9000000) {
859 /* MFFC */
860 if (UCOP_REG_D != 31) {
861 ILLEGAL;
863 if (UCOP_UCF64_COND & 0x8) {
864 ILLEGAL;
867 tmp = new_tmp();
868 tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
869 if (UCOP_SET(26)) {
870 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
871 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
872 gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
873 } else {
874 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
875 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
876 gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
878 dead_tmp(tmp);
879 return;
881 ILLEGAL;
884 /* UniCore-F64 convert instructions */
885 static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
887 if (UCOP_UCF64_FMT == 3) {
888 ILLEGAL;
890 if (UCOP_REG_N != 0) {
891 ILLEGAL;
893 switch (UCOP_UCF64_FUNC) {
894 case 0: /* cvt.s */
895 switch (UCOP_UCF64_FMT) {
896 case 1 /* d */:
897 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
898 gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
899 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
900 break;
901 case 2 /* w */:
902 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
903 gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
904 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
905 break;
906 default /* s */:
907 ILLEGAL;
908 break;
910 break;
911 case 1: /* cvt.d */
912 switch (UCOP_UCF64_FMT) {
913 case 0 /* s */:
914 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
915 gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
916 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
917 break;
918 case 2 /* w */:
919 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
920 gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
921 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
922 break;
923 default /* d */:
924 ILLEGAL;
925 break;
927 break;
928 case 4: /* cvt.w */
929 switch (UCOP_UCF64_FMT) {
930 case 0 /* s */:
931 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
932 gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
933 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
934 break;
935 case 1 /* d */:
936 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
937 gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
938 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
939 break;
940 default /* w */:
941 ILLEGAL;
942 break;
944 break;
945 default:
946 ILLEGAL;
950 /* UniCore-F64 compare instructions */
951 static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
953 if (UCOP_SET(25)) {
954 ILLEGAL;
956 if (UCOP_REG_D != 0) {
957 ILLEGAL;
960 ILLEGAL; /* TODO */
961 if (UCOP_SET(24)) {
962 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
963 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
964 /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
965 } else {
966 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
967 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
968 /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
972 #define gen_helper_ucf64_movs(x, y) do { } while (0)
973 #define gen_helper_ucf64_movd(x, y) do { } while (0)
975 #define UCF64_OP1(name) do { \
976 if (UCOP_REG_N != 0) { \
977 ILLEGAL; \
979 switch (UCOP_UCF64_FMT) { \
980 case 0 /* s */: \
981 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
982 ucf64_reg_offset(UCOP_REG_M)); \
983 gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
984 tcg_gen_st_i32(cpu_F0s, cpu_env, \
985 ucf64_reg_offset(UCOP_REG_D)); \
986 break; \
987 case 1 /* d */: \
988 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
989 ucf64_reg_offset(UCOP_REG_M)); \
990 gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
991 tcg_gen_st_i64(cpu_F0d, cpu_env, \
992 ucf64_reg_offset(UCOP_REG_D)); \
993 break; \
994 case 2 /* w */: \
995 ILLEGAL; \
996 break; \
998 } while (0)
1000 #define UCF64_OP2(name) do { \
1001 switch (UCOP_UCF64_FMT) { \
1002 case 0 /* s */: \
1003 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
1004 ucf64_reg_offset(UCOP_REG_N)); \
1005 tcg_gen_ld_i32(cpu_F1s, cpu_env, \
1006 ucf64_reg_offset(UCOP_REG_M)); \
1007 gen_helper_ucf64_##name##s(cpu_F0s, \
1008 cpu_F0s, cpu_F1s, cpu_env); \
1009 tcg_gen_st_i32(cpu_F0s, cpu_env, \
1010 ucf64_reg_offset(UCOP_REG_D)); \
1011 break; \
1012 case 1 /* d */: \
1013 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
1014 ucf64_reg_offset(UCOP_REG_N)); \
1015 tcg_gen_ld_i64(cpu_F1d, cpu_env, \
1016 ucf64_reg_offset(UCOP_REG_M)); \
1017 gen_helper_ucf64_##name##d(cpu_F0d, \
1018 cpu_F0d, cpu_F1d, cpu_env); \
1019 tcg_gen_st_i64(cpu_F0d, cpu_env, \
1020 ucf64_reg_offset(UCOP_REG_D)); \
1021 break; \
1022 case 2 /* w */: \
1023 ILLEGAL; \
1024 break; \
1026 } while (0)
1028 /* UniCore-F64 data processing */
1029 static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1031 if (UCOP_UCF64_FMT == 3) {
1032 ILLEGAL;
1034 switch (UCOP_UCF64_FUNC) {
1035 case 0: /* add */
1036 UCF64_OP2(add);
1037 break;
1038 case 1: /* sub */
1039 UCF64_OP2(sub);
1040 break;
1041 case 2: /* mul */
1042 UCF64_OP2(mul);
1043 break;
1044 case 4: /* div */
1045 UCF64_OP2(div);
1046 break;
1047 case 5: /* abs */
1048 UCF64_OP1(abs);
1049 break;
1050 case 6: /* mov */
1051 UCF64_OP1(mov);
1052 break;
1053 case 7: /* neg */
1054 UCF64_OP1(neg);
1055 break;
1056 default:
1057 ILLEGAL;
1061 /* Disassemble an F64 instruction */
1062 static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1064 if (!UCOP_SET(29)) {
1065 if (UCOP_SET(26)) {
1066 do_ucf64_ldst_m(env, s, insn);
1067 } else {
1068 do_ucf64_ldst_i(env, s, insn);
1070 } else {
1071 if (UCOP_SET(5)) {
1072 switch ((insn >> 26) & 0x3) {
1073 case 0:
1074 do_ucf64_datap(env, s, insn);
1075 break;
1076 case 1:
1077 ILLEGAL;
1078 break;
1079 case 2:
1080 do_ucf64_fcvt(env, s, insn);
1081 break;
1082 case 3:
1083 do_ucf64_fcmp(env, s, insn);
1084 break;
1086 } else {
1087 do_ucf64_trans(env, s, insn);
1092 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1094 TranslationBlock *tb;
1096 tb = s->tb;
1097 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
1098 tcg_gen_goto_tb(n);
1099 gen_set_pc_im(dest);
1100 tcg_gen_exit_tb((uintptr_t)tb + n);
1101 } else {
1102 gen_set_pc_im(dest);
1103 tcg_gen_exit_tb(0);
1107 static inline void gen_jmp(DisasContext *s, uint32_t dest)
1109 if (unlikely(s->singlestep_enabled)) {
1110 /* An indirect jump so that we still trigger the debug exception. */
1111 gen_bx_im(s, dest);
1112 } else {
1113 gen_goto_tb(s, 0, dest);
1114 s->is_jmp = DISAS_TB_JUMP;
1118 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
1120 if (x) {
1121 tcg_gen_sari_i32(t0, t0, 16);
1122 } else {
1123 gen_sxth(t0);
1125 if (y) {
1126 tcg_gen_sari_i32(t1, t1, 16);
1127 } else {
1128 gen_sxth(t1);
1130 tcg_gen_mul_i32(t0, t0, t1);
1133 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1134 static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
1136 TCGv tmp;
1137 if (bsr) {
1138 /* ??? This is also undefined in system mode. */
1139 if (IS_USER(s)) {
1140 return 1;
1143 tmp = load_cpu_field(bsr);
1144 tcg_gen_andi_i32(tmp, tmp, ~mask);
1145 tcg_gen_andi_i32(t0, t0, mask);
1146 tcg_gen_or_i32(tmp, tmp, t0);
1147 store_cpu_field(tmp, bsr);
1148 } else {
1149 gen_set_asr(t0, mask);
1151 dead_tmp(t0);
1152 gen_lookup_tb(s);
1153 return 0;
1156 /* Generate an old-style exception return. Marks pc as dead. */
1157 static void gen_exception_return(DisasContext *s, TCGv pc)
1159 TCGv tmp;
1160 store_reg(s, 31, pc);
1161 tmp = load_cpu_field(bsr);
1162 gen_set_asr(tmp, 0xffffffff);
1163 dead_tmp(tmp);
1164 s->is_jmp = DISAS_UPDATE;
1167 static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s,
1168 uint32_t insn)
1170 switch (UCOP_CPNUM) {
1171 #ifndef CONFIG_USER_ONLY
1172 case 0:
1173 disas_cp0_insn(env, s, insn);
1174 break;
1175 case 1:
1176 disas_ocd_insn(env, s, insn);
1177 break;
1178 #endif
1179 case 2:
1180 disas_ucf64_insn(env, s, insn);
1181 break;
1182 default:
1183 /* Unknown coprocessor. */
1184 cpu_abort(env, "Unknown coprocessor!");
1188 /* data processing instructions */
1189 static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1191 TCGv tmp;
1192 TCGv tmp2;
1193 int logic_cc;
1195 if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
1196 if (UCOP_SET(23)) { /* CMOV instructions */
1197 if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
1198 ILLEGAL;
1200 /* if not always execute, we generate a conditional jump to
1201 next instruction */
1202 s->condlabel = gen_new_label();
1203 gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
1204 s->condjmp = 1;
1208 logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
1210 if (UCOP_SET(29)) {
1211 unsigned int val;
1212 /* immediate operand */
1213 val = UCOP_IMM_9;
1214 if (UCOP_SH_IM) {
1215 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1217 tmp2 = new_tmp();
1218 tcg_gen_movi_i32(tmp2, val);
1219 if (logic_cc && UCOP_SH_IM) {
1220 gen_set_CF_bit31(tmp2);
1222 } else {
1223 /* register */
1224 tmp2 = load_reg(s, UCOP_REG_M);
1225 if (UCOP_SET(5)) {
1226 tmp = load_reg(s, UCOP_REG_S);
1227 gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
1228 } else {
1229 gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
1233 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1234 tmp = load_reg(s, UCOP_REG_N);
1235 } else {
1236 TCGV_UNUSED(tmp);
1239 switch (UCOP_OPCODES) {
1240 case 0x00:
1241 tcg_gen_and_i32(tmp, tmp, tmp2);
1242 if (logic_cc) {
1243 gen_logic_CC(tmp);
1245 store_reg_bx(s, UCOP_REG_D, tmp);
1246 break;
1247 case 0x01:
1248 tcg_gen_xor_i32(tmp, tmp, tmp2);
1249 if (logic_cc) {
1250 gen_logic_CC(tmp);
1252 store_reg_bx(s, UCOP_REG_D, tmp);
1253 break;
1254 case 0x02:
1255 if (UCOP_SET_S && UCOP_REG_D == 31) {
1256 /* SUBS r31, ... is used for exception return. */
1257 if (IS_USER(s)) {
1258 ILLEGAL;
1260 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1261 gen_exception_return(s, tmp);
1262 } else {
1263 if (UCOP_SET_S) {
1264 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1265 } else {
1266 tcg_gen_sub_i32(tmp, tmp, tmp2);
1268 store_reg_bx(s, UCOP_REG_D, tmp);
1270 break;
1271 case 0x03:
1272 if (UCOP_SET_S) {
1273 gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
1274 } else {
1275 tcg_gen_sub_i32(tmp, tmp2, tmp);
1277 store_reg_bx(s, UCOP_REG_D, tmp);
1278 break;
1279 case 0x04:
1280 if (UCOP_SET_S) {
1281 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1282 } else {
1283 tcg_gen_add_i32(tmp, tmp, tmp2);
1285 store_reg_bx(s, UCOP_REG_D, tmp);
1286 break;
1287 case 0x05:
1288 if (UCOP_SET_S) {
1289 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
1290 } else {
1291 gen_add_carry(tmp, tmp, tmp2);
1293 store_reg_bx(s, UCOP_REG_D, tmp);
1294 break;
1295 case 0x06:
1296 if (UCOP_SET_S) {
1297 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
1298 } else {
1299 gen_sub_carry(tmp, tmp, tmp2);
1301 store_reg_bx(s, UCOP_REG_D, tmp);
1302 break;
1303 case 0x07:
1304 if (UCOP_SET_S) {
1305 gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
1306 } else {
1307 gen_sub_carry(tmp, tmp2, tmp);
1309 store_reg_bx(s, UCOP_REG_D, tmp);
1310 break;
1311 case 0x08:
1312 if (UCOP_SET_S) {
1313 tcg_gen_and_i32(tmp, tmp, tmp2);
1314 gen_logic_CC(tmp);
1316 dead_tmp(tmp);
1317 break;
1318 case 0x09:
1319 if (UCOP_SET_S) {
1320 tcg_gen_xor_i32(tmp, tmp, tmp2);
1321 gen_logic_CC(tmp);
1323 dead_tmp(tmp);
1324 break;
1325 case 0x0a:
1326 if (UCOP_SET_S) {
1327 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1329 dead_tmp(tmp);
1330 break;
1331 case 0x0b:
1332 if (UCOP_SET_S) {
1333 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1335 dead_tmp(tmp);
1336 break;
1337 case 0x0c:
1338 tcg_gen_or_i32(tmp, tmp, tmp2);
1339 if (logic_cc) {
1340 gen_logic_CC(tmp);
1342 store_reg_bx(s, UCOP_REG_D, tmp);
1343 break;
1344 case 0x0d:
1345 if (logic_cc && UCOP_REG_D == 31) {
1346 /* MOVS r31, ... is used for exception return. */
1347 if (IS_USER(s)) {
1348 ILLEGAL;
1350 gen_exception_return(s, tmp2);
1351 } else {
1352 if (logic_cc) {
1353 gen_logic_CC(tmp2);
1355 store_reg_bx(s, UCOP_REG_D, tmp2);
1357 break;
1358 case 0x0e:
1359 tcg_gen_andc_i32(tmp, tmp, tmp2);
1360 if (logic_cc) {
1361 gen_logic_CC(tmp);
1363 store_reg_bx(s, UCOP_REG_D, tmp);
1364 break;
1365 default:
1366 case 0x0f:
1367 tcg_gen_not_i32(tmp2, tmp2);
1368 if (logic_cc) {
1369 gen_logic_CC(tmp2);
1371 store_reg_bx(s, UCOP_REG_D, tmp2);
1372 break;
1374 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1375 dead_tmp(tmp2);
1379 /* multiply */
1380 static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1382 TCGv tmp, tmp2, tmp3, tmp4;
1384 if (UCOP_SET(27)) {
1385 /* 64 bit mul */
1386 tmp = load_reg(s, UCOP_REG_M);
1387 tmp2 = load_reg(s, UCOP_REG_N);
1388 if (UCOP_SET(26)) {
1389 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
1390 } else {
1391 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
1393 if (UCOP_SET(25)) { /* mult accumulate */
1394 tmp3 = load_reg(s, UCOP_REG_LO);
1395 tmp4 = load_reg(s, UCOP_REG_HI);
1396 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4);
1397 dead_tmp(tmp3);
1398 dead_tmp(tmp4);
1400 store_reg(s, UCOP_REG_LO, tmp);
1401 store_reg(s, UCOP_REG_HI, tmp2);
1402 } else {
1403 /* 32 bit mul */
1404 tmp = load_reg(s, UCOP_REG_M);
1405 tmp2 = load_reg(s, UCOP_REG_N);
1406 tcg_gen_mul_i32(tmp, tmp, tmp2);
1407 dead_tmp(tmp2);
1408 if (UCOP_SET(25)) {
1409 /* Add */
1410 tmp2 = load_reg(s, UCOP_REG_S);
1411 tcg_gen_add_i32(tmp, tmp, tmp2);
1412 dead_tmp(tmp2);
1414 if (UCOP_SET_S) {
1415 gen_logic_CC(tmp);
1417 store_reg(s, UCOP_REG_D, tmp);
1421 /* miscellaneous instructions */
1422 static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1424 unsigned int val;
1425 TCGv tmp;
1427 if ((insn & 0xffffffe0) == 0x10ffc120) {
1428 /* Trivial implementation equivalent to bx. */
1429 tmp = load_reg(s, UCOP_REG_M);
1430 gen_bx(s, tmp);
1431 return;
1434 if ((insn & 0xfbffc000) == 0x30ffc000) {
1435 /* PSR = immediate */
1436 val = UCOP_IMM_9;
1437 if (UCOP_SH_IM) {
1438 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1440 tmp = new_tmp();
1441 tcg_gen_movi_i32(tmp, val);
1442 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1443 ILLEGAL;
1445 return;
1448 if ((insn & 0xfbffffe0) == 0x12ffc020) {
1449 /* PSR.flag = reg */
1450 tmp = load_reg(s, UCOP_REG_M);
1451 if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
1452 ILLEGAL;
1454 return;
1457 if ((insn & 0xfbffffe0) == 0x10ffc020) {
1458 /* PSR = reg */
1459 tmp = load_reg(s, UCOP_REG_M);
1460 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1461 ILLEGAL;
1463 return;
1466 if ((insn & 0xfbf83fff) == 0x10f80000) {
1467 /* reg = PSR */
1468 if (UCOP_SET_B) {
1469 if (IS_USER(s)) {
1470 ILLEGAL;
1472 tmp = load_cpu_field(bsr);
1473 } else {
1474 tmp = new_tmp();
1475 gen_helper_asr_read(tmp, cpu_env);
1477 store_reg(s, UCOP_REG_D, tmp);
1478 return;
1481 if ((insn & 0xfbf83fe0) == 0x12f80120) {
1482 /* clz */
1483 tmp = load_reg(s, UCOP_REG_M);
1484 if (UCOP_SET(26)) {
1485 gen_helper_clo(tmp, tmp);
1486 } else {
1487 gen_helper_clz(tmp, tmp);
1489 store_reg(s, UCOP_REG_D, tmp);
1490 return;
1493 /* otherwise */
1494 ILLEGAL;
1497 /* load/store I_offset and R_offset */
1498 static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1500 unsigned int mmu_idx;
1501 TCGv tmp;
1502 TCGv tmp2;
1504 tmp2 = load_reg(s, UCOP_REG_N);
1505 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1507 /* immediate */
1508 if (UCOP_SET_P) {
1509 gen_add_data_offset(s, insn, tmp2);
1512 if (UCOP_SET_L) {
1513 /* load */
1514 if (UCOP_SET_B) {
1515 tmp = gen_ld8u(tmp2, mmu_idx);
1516 } else {
1517 tmp = gen_ld32(tmp2, mmu_idx);
1519 } else {
1520 /* store */
1521 tmp = load_reg(s, UCOP_REG_D);
1522 if (UCOP_SET_B) {
1523 gen_st8(tmp, tmp2, mmu_idx);
1524 } else {
1525 gen_st32(tmp, tmp2, mmu_idx);
1528 if (!UCOP_SET_P) {
1529 gen_add_data_offset(s, insn, tmp2);
1530 store_reg(s, UCOP_REG_N, tmp2);
1531 } else if (UCOP_SET_W) {
1532 store_reg(s, UCOP_REG_N, tmp2);
1533 } else {
1534 dead_tmp(tmp2);
1536 if (UCOP_SET_L) {
1537 /* Complete the load. */
1538 if (UCOP_REG_D == 31) {
1539 gen_bx(s, tmp);
1540 } else {
1541 store_reg(s, UCOP_REG_D, tmp);
1546 /* SWP instruction */
1547 static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1549 TCGv addr;
1550 TCGv tmp;
1551 TCGv tmp2;
1553 if ((insn & 0xff003fe0) != 0x40000120) {
1554 ILLEGAL;
1557 /* ??? This is not really atomic. However we know
1558 we never have multiple CPUs running in parallel,
1559 so it is good enough. */
1560 addr = load_reg(s, UCOP_REG_N);
1561 tmp = load_reg(s, UCOP_REG_M);
1562 if (UCOP_SET_B) {
1563 tmp2 = gen_ld8u(addr, IS_USER(s));
1564 gen_st8(tmp, addr, IS_USER(s));
1565 } else {
1566 tmp2 = gen_ld32(addr, IS_USER(s));
1567 gen_st32(tmp, addr, IS_USER(s));
1569 dead_tmp(addr);
1570 store_reg(s, UCOP_REG_D, tmp2);
1573 /* load/store hw/sb */
1574 static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1576 TCGv addr;
1577 TCGv tmp;
1579 if (UCOP_SH_OP == 0) {
1580 do_swap(env, s, insn);
1581 return;
1584 addr = load_reg(s, UCOP_REG_N);
1585 if (UCOP_SET_P) {
1586 gen_add_datah_offset(s, insn, addr);
1589 if (UCOP_SET_L) { /* load */
1590 switch (UCOP_SH_OP) {
1591 case 1:
1592 tmp = gen_ld16u(addr, IS_USER(s));
1593 break;
1594 case 2:
1595 tmp = gen_ld8s(addr, IS_USER(s));
1596 break;
1597 default: /* see do_swap */
1598 case 3:
1599 tmp = gen_ld16s(addr, IS_USER(s));
1600 break;
1602 } else { /* store */
1603 if (UCOP_SH_OP != 1) {
1604 ILLEGAL;
1606 tmp = load_reg(s, UCOP_REG_D);
1607 gen_st16(tmp, addr, IS_USER(s));
1609 /* Perform base writeback before the loaded value to
1610 ensure correct behavior with overlapping index registers. */
1611 if (!UCOP_SET_P) {
1612 gen_add_datah_offset(s, insn, addr);
1613 store_reg(s, UCOP_REG_N, addr);
1614 } else if (UCOP_SET_W) {
1615 store_reg(s, UCOP_REG_N, addr);
1616 } else {
1617 dead_tmp(addr);
1619 if (UCOP_SET_L) {
1620 /* Complete the load. */
1621 store_reg(s, UCOP_REG_D, tmp);
1625 /* load/store multiple words */
1626 static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1628 unsigned int val, i, mmu_idx;
1629 int j, n, reg, user, loaded_base;
1630 TCGv tmp;
1631 TCGv tmp2;
1632 TCGv addr;
1633 TCGv loaded_var;
1635 if (UCOP_SET(7)) {
1636 ILLEGAL;
1638 /* XXX: store correct base if write back */
1639 user = 0;
1640 if (UCOP_SET_B) { /* S bit in instruction table */
1641 if (IS_USER(s)) {
1642 ILLEGAL; /* only usable in supervisor mode */
1644 if (UCOP_SET(18) == 0) { /* pc reg */
1645 user = 1;
1649 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1650 addr = load_reg(s, UCOP_REG_N);
1652 /* compute total size */
1653 loaded_base = 0;
1654 TCGV_UNUSED(loaded_var);
1655 n = 0;
1656 for (i = 0; i < 6; i++) {
1657 if (UCOP_SET(i)) {
1658 n++;
1661 for (i = 9; i < 19; i++) {
1662 if (UCOP_SET(i)) {
1663 n++;
1666 /* XXX: test invalid n == 0 case ? */
1667 if (UCOP_SET_U) {
1668 if (UCOP_SET_P) {
1669 /* pre increment */
1670 tcg_gen_addi_i32(addr, addr, 4);
1671 } else {
1672 /* post increment */
1674 } else {
1675 if (UCOP_SET_P) {
1676 /* pre decrement */
1677 tcg_gen_addi_i32(addr, addr, -(n * 4));
1678 } else {
1679 /* post decrement */
1680 if (n != 1) {
1681 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1686 j = 0;
1687 reg = UCOP_SET(6) ? 16 : 0;
1688 for (i = 0; i < 19; i++, reg++) {
1689 if (i == 6) {
1690 i = i + 3;
1692 if (UCOP_SET(i)) {
1693 if (UCOP_SET_L) { /* load */
1694 tmp = gen_ld32(addr, mmu_idx);
1695 if (reg == 31) {
1696 gen_bx(s, tmp);
1697 } else if (user) {
1698 tmp2 = tcg_const_i32(reg);
1699 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
1700 tcg_temp_free_i32(tmp2);
1701 dead_tmp(tmp);
1702 } else if (reg == UCOP_REG_N) {
1703 loaded_var = tmp;
1704 loaded_base = 1;
1705 } else {
1706 store_reg(s, reg, tmp);
1708 } else { /* store */
1709 if (reg == 31) {
1710 /* special case: r31 = PC + 4 */
1711 val = (long)s->pc;
1712 tmp = new_tmp();
1713 tcg_gen_movi_i32(tmp, val);
1714 } else if (user) {
1715 tmp = new_tmp();
1716 tmp2 = tcg_const_i32(reg);
1717 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
1718 tcg_temp_free_i32(tmp2);
1719 } else {
1720 tmp = load_reg(s, reg);
1722 gen_st32(tmp, addr, mmu_idx);
1724 j++;
1725 /* no need to add after the last transfer */
1726 if (j != n) {
1727 tcg_gen_addi_i32(addr, addr, 4);
1731 if (UCOP_SET_W) { /* write back */
1732 if (UCOP_SET_U) {
1733 if (UCOP_SET_P) {
1734 /* pre increment */
1735 } else {
1736 /* post increment */
1737 tcg_gen_addi_i32(addr, addr, 4);
1739 } else {
1740 if (UCOP_SET_P) {
1741 /* pre decrement */
1742 if (n != 1) {
1743 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1745 } else {
1746 /* post decrement */
1747 tcg_gen_addi_i32(addr, addr, -(n * 4));
1750 store_reg(s, UCOP_REG_N, addr);
1751 } else {
1752 dead_tmp(addr);
1754 if (loaded_base) {
1755 store_reg(s, UCOP_REG_N, loaded_var);
1757 if (UCOP_SET_B && !user) {
1758 /* Restore ASR from BSR. */
1759 tmp = load_cpu_field(bsr);
1760 gen_set_asr(tmp, 0xffffffff);
1761 dead_tmp(tmp);
1762 s->is_jmp = DISAS_UPDATE;
1766 /* branch (and link) */
1767 static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1769 unsigned int val;
1770 int32_t offset;
1771 TCGv tmp;
1773 if (UCOP_COND == 0xf) {
1774 ILLEGAL;
1777 if (UCOP_COND != 0xe) {
1778 /* if not always execute, we generate a conditional jump to
1779 next instruction */
1780 s->condlabel = gen_new_label();
1781 gen_test_cc(UCOP_COND ^ 1, s->condlabel);
1782 s->condjmp = 1;
1785 val = (int32_t)s->pc;
1786 if (UCOP_SET_L) {
1787 tmp = new_tmp();
1788 tcg_gen_movi_i32(tmp, val);
1789 store_reg(s, 30, tmp);
1791 offset = (((int32_t)insn << 8) >> 8);
1792 val += (offset << 2); /* unicore is pc+4 */
1793 gen_jmp(s, val);
1796 static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
1798 unsigned int insn;
1800 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1801 tcg_gen_debug_insn_start(s->pc);
1804 insn = cpu_ldl_code(env, s->pc);
1805 s->pc += 4;
1807 /* UniCore instructions class:
1808 * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1809 * AAA : see switch case
1810 * BBBB : opcodes or cond or PUBW
1811 * C : S OR L
1812 * D : 8
1813 * E : 5
1815 switch (insn >> 29) {
1816 case 0x0:
1817 if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1818 do_mult(env, s, insn);
1819 break;
1822 if (UCOP_SET(8)) {
1823 do_misc(env, s, insn);
1824 break;
1826 case 0x1:
1827 if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
1828 do_misc(env, s, insn);
1829 break;
1831 do_datap(env, s, insn);
1832 break;
1834 case 0x2:
1835 if (UCOP_SET(8) && UCOP_SET(5)) {
1836 do_ldst_hwsb(env, s, insn);
1837 break;
1839 if (UCOP_SET(8) || UCOP_SET(5)) {
1840 ILLEGAL;
1842 case 0x3:
1843 do_ldst_ir(env, s, insn);
1844 break;
1846 case 0x4:
1847 if (UCOP_SET(8)) {
1848 ILLEGAL; /* extended instructions */
1850 do_ldst_m(env, s, insn);
1851 break;
1852 case 0x5:
1853 do_branch(env, s, insn);
1854 break;
1855 case 0x6:
1856 /* Coprocessor. */
1857 disas_coproc_insn(env, s, insn);
1858 break;
1859 case 0x7:
1860 if (!UCOP_SET(28)) {
1861 disas_coproc_insn(env, s, insn);
1862 break;
1864 if ((insn & 0xff000000) == 0xff000000) { /* syscall */
1865 gen_set_pc_im(s->pc);
1866 s->is_jmp = DISAS_SYSCALL;
1867 break;
1869 ILLEGAL;
1873 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
1874 basic block 'tb'. If search_pc is TRUE, also generate PC
1875 information for each intermediate instruction. */
1876 static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
1877 TranslationBlock *tb, bool search_pc)
1879 CPUState *cs = CPU(cpu);
1880 CPUUniCore32State *env = &cpu->env;
1881 DisasContext dc1, *dc = &dc1;
1882 CPUBreakpoint *bp;
1883 uint16_t *gen_opc_end;
1884 int j, lj;
1885 target_ulong pc_start;
1886 uint32_t next_page_start;
1887 int num_insns;
1888 int max_insns;
1890 /* generate intermediate code */
1891 num_temps = 0;
1893 pc_start = tb->pc;
1895 dc->tb = tb;
1897 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1899 dc->is_jmp = DISAS_NEXT;
1900 dc->pc = pc_start;
1901 dc->singlestep_enabled = cs->singlestep_enabled;
1902 dc->condjmp = 0;
1903 cpu_F0s = tcg_temp_new_i32();
1904 cpu_F1s = tcg_temp_new_i32();
1905 cpu_F0d = tcg_temp_new_i64();
1906 cpu_F1d = tcg_temp_new_i64();
1907 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1908 lj = -1;
1909 num_insns = 0;
1910 max_insns = tb->cflags & CF_COUNT_MASK;
1911 if (max_insns == 0) {
1912 max_insns = CF_COUNT_MASK;
1915 #ifndef CONFIG_USER_ONLY
1916 if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) {
1917 dc->user = 1;
1918 } else {
1919 dc->user = 0;
1921 #endif
1923 gen_tb_start();
1924 do {
1925 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1926 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1927 if (bp->pc == dc->pc) {
1928 gen_set_pc_im(dc->pc);
1929 gen_exception(EXCP_DEBUG);
1930 dc->is_jmp = DISAS_JUMP;
1931 /* Advance PC so that clearing the breakpoint will
1932 invalidate this TB. */
1933 dc->pc += 2; /* FIXME */
1934 goto done_generating;
1938 if (search_pc) {
1939 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1940 if (lj < j) {
1941 lj++;
1942 while (lj < j) {
1943 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1946 tcg_ctx.gen_opc_pc[lj] = dc->pc;
1947 tcg_ctx.gen_opc_instr_start[lj] = 1;
1948 tcg_ctx.gen_opc_icount[lj] = num_insns;
1951 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
1952 gen_io_start();
1955 disas_uc32_insn(env, dc);
1957 if (num_temps) {
1958 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
1959 num_temps = 0;
1962 if (dc->condjmp && !dc->is_jmp) {
1963 gen_set_label(dc->condlabel);
1964 dc->condjmp = 0;
1966 /* Translation stops when a conditional branch is encountered.
1967 * Otherwise the subsequent code could get translated several times.
1968 * Also stop translation when a page boundary is reached. This
1969 * ensures prefetch aborts occur at the right place. */
1970 num_insns++;
1971 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
1972 !cs->singlestep_enabled &&
1973 !singlestep &&
1974 dc->pc < next_page_start &&
1975 num_insns < max_insns);
1977 if (tb->cflags & CF_LAST_IO) {
1978 if (dc->condjmp) {
1979 /* FIXME: This can theoretically happen with self-modifying
1980 code. */
1981 cpu_abort(env, "IO on conditional branch instruction");
1983 gen_io_end();
1986 /* At this stage dc->condjmp will only be set when the skipped
1987 instruction was a conditional branch or trap, and the PC has
1988 already been written. */
1989 if (unlikely(cs->singlestep_enabled)) {
1990 /* Make sure the pc is updated, and raise a debug exception. */
1991 if (dc->condjmp) {
1992 if (dc->is_jmp == DISAS_SYSCALL) {
1993 gen_exception(UC32_EXCP_PRIV);
1994 } else {
1995 gen_exception(EXCP_DEBUG);
1997 gen_set_label(dc->condlabel);
1999 if (dc->condjmp || !dc->is_jmp) {
2000 gen_set_pc_im(dc->pc);
2001 dc->condjmp = 0;
2003 if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
2004 gen_exception(UC32_EXCP_PRIV);
2005 } else {
2006 gen_exception(EXCP_DEBUG);
2008 } else {
2009 /* While branches must always occur at the end of an IT block,
2010 there are a few other things that can cause us to terminate
2011 the TB in the middel of an IT block:
2012 - Exception generating instructions (bkpt, swi, undefined).
2013 - Page boundaries.
2014 - Hardware watchpoints.
2015 Hardware breakpoints have already been handled and skip this code.
2017 switch (dc->is_jmp) {
2018 case DISAS_NEXT:
2019 gen_goto_tb(dc, 1, dc->pc);
2020 break;
2021 default:
2022 case DISAS_JUMP:
2023 case DISAS_UPDATE:
2024 /* indicate that the hash table must be used to find the next TB */
2025 tcg_gen_exit_tb(0);
2026 break;
2027 case DISAS_TB_JUMP:
2028 /* nothing more to generate */
2029 break;
2030 case DISAS_SYSCALL:
2031 gen_exception(UC32_EXCP_PRIV);
2032 break;
2034 if (dc->condjmp) {
2035 gen_set_label(dc->condlabel);
2036 gen_goto_tb(dc, 1, dc->pc);
2037 dc->condjmp = 0;
2041 done_generating:
2042 gen_tb_end(tb, num_insns);
2043 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2045 #ifdef DEBUG_DISAS
2046 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2047 qemu_log("----------------\n");
2048 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2049 log_target_disas(env, pc_start, dc->pc - pc_start, 0);
2050 qemu_log("\n");
2052 #endif
2053 if (search_pc) {
2054 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2055 lj++;
2056 while (lj <= j) {
2057 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2059 } else {
2060 tb->size = dc->pc - pc_start;
2061 tb->icount = num_insns;
2065 void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
2067 gen_intermediate_code_internal(uc32_env_get_cpu(env), tb, false);
2070 void gen_intermediate_code_pc(CPUUniCore32State *env, TranslationBlock *tb)
2072 gen_intermediate_code_internal(uc32_env_get_cpu(env), tb, true);
2075 static const char *cpu_mode_names[16] = {
2076 "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2077 "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2080 #undef UCF64_DUMP_STATE
2081 #ifdef UCF64_DUMP_STATE
2082 static void cpu_dump_state_ucf64(CPUUniCore32State *env, FILE *f,
2083 fprintf_function cpu_fprintf, int flags)
2085 int i;
2086 union {
2087 uint32_t i;
2088 float s;
2089 } s0, s1;
2090 CPU_DoubleU d;
2091 /* ??? This assumes float64 and double have the same layout.
2092 Oh well, it's only debug dumps. */
2093 union {
2094 float64 f64;
2095 double d;
2096 } d0;
2098 for (i = 0; i < 16; i++) {
2099 d.d = env->ucf64.regs[i];
2100 s0.i = d.l.lower;
2101 s1.i = d.l.upper;
2102 d0.f64 = d.d;
2103 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g)",
2104 i * 2, (int)s0.i, s0.s,
2105 i * 2 + 1, (int)s1.i, s1.s);
2106 cpu_fprintf(f, " d%02d=%" PRIx64 "(%8g)\n",
2107 i, (uint64_t)d0.f64, d0.d);
2109 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
2111 #else
2112 #define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0)
2113 #endif
2115 void uc32_cpu_dump_state(CPUState *cs, FILE *f,
2116 fprintf_function cpu_fprintf, int flags)
2118 UniCore32CPU *cpu = UNICORE32_CPU(cs);
2119 CPUUniCore32State *env = &cpu->env;
2120 int i;
2121 uint32_t psr;
2123 for (i = 0; i < 32; i++) {
2124 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2125 if ((i % 4) == 3) {
2126 cpu_fprintf(f, "\n");
2127 } else {
2128 cpu_fprintf(f, " ");
2131 psr = cpu_asr_read(env);
2132 cpu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
2133 psr,
2134 psr & (1 << 31) ? 'N' : '-',
2135 psr & (1 << 30) ? 'Z' : '-',
2136 psr & (1 << 29) ? 'C' : '-',
2137 psr & (1 << 28) ? 'V' : '-',
2138 cpu_mode_names[psr & 0xf]);
2140 cpu_dump_state_ucf64(env, f, cpu_fprintf, flags);
2143 void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb, int pc_pos)
2145 env->regs[31] = tcg_ctx.gen_opc_pc[pc_pos];