virt-acpi-build: add always-on property for timer
[qemu/ar7.git] / target-unicore32 / translate.c
blob5db8f949313ce8728a6cbbe29627d63f5ba61146
1 /*
2 * UniCore32 translation
4 * Copyright (C) 2010-2012 Guan Xuetao
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation, or (at your option) any
9 * later version. See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "cpu.h"
14 #include "disas/disas.h"
15 #include "tcg-op.h"
16 #include "qemu/log.h"
17 #include "exec/cpu_ldst.h"
19 #include "exec/helper-proto.h"
20 #include "exec/helper-gen.h"
22 #include "trace-tcg.h"
25 /* internal defines */
26 typedef struct DisasContext {
27 target_ulong pc;
28 int is_jmp;
29 /* Nonzero if this instruction has been conditionally skipped. */
30 int condjmp;
31 /* The label that will be jumped to when the instruction is skipped. */
32 TCGLabel *condlabel;
33 struct TranslationBlock *tb;
34 int singlestep_enabled;
35 #ifndef CONFIG_USER_ONLY
36 int user;
37 #endif
38 } DisasContext;
40 #ifndef CONFIG_USER_ONLY
41 #define IS_USER(s) (s->user)
42 #else
43 #define IS_USER(s) 1
44 #endif
46 /* These instructions trap after executing, so defer them until after the
47 conditional executions state has been updated. */
48 #define DISAS_SYSCALL 5
50 static TCGv_ptr cpu_env;
51 static TCGv_i32 cpu_R[32];
53 /* FIXME: These should be removed. */
54 static TCGv cpu_F0s, cpu_F1s;
55 static TCGv_i64 cpu_F0d, cpu_F1d;
57 #include "exec/gen-icount.h"
59 static const char *regnames[] = {
60 "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
61 "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
62 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
63 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
65 /* initialize TCG globals. */
66 void uc32_translate_init(void)
68 int i;
70 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
72 for (i = 0; i < 32; i++) {
73 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
74 offsetof(CPUUniCore32State, regs[i]), regnames[i]);
78 static int num_temps;
80 /* Allocate a temporary variable. */
81 static TCGv_i32 new_tmp(void)
83 num_temps++;
84 return tcg_temp_new_i32();
87 /* Release a temporary variable. */
88 static void dead_tmp(TCGv tmp)
90 tcg_temp_free(tmp);
91 num_temps--;
94 static inline TCGv load_cpu_offset(int offset)
96 TCGv tmp = new_tmp();
97 tcg_gen_ld_i32(tmp, cpu_env, offset);
98 return tmp;
101 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
103 static inline void store_cpu_offset(TCGv var, int offset)
105 tcg_gen_st_i32(var, cpu_env, offset);
106 dead_tmp(var);
109 #define store_cpu_field(var, name) \
110 store_cpu_offset(var, offsetof(CPUUniCore32State, name))
112 /* Set a variable to the value of a CPU register. */
113 static void load_reg_var(DisasContext *s, TCGv var, int reg)
115 if (reg == 31) {
116 uint32_t addr;
117 /* normaly, since we updated PC */
118 addr = (long)s->pc;
119 tcg_gen_movi_i32(var, addr);
120 } else {
121 tcg_gen_mov_i32(var, cpu_R[reg]);
125 /* Create a new temporary and set it to the value of a CPU register. */
126 static inline TCGv load_reg(DisasContext *s, int reg)
128 TCGv tmp = new_tmp();
129 load_reg_var(s, tmp, reg);
130 return tmp;
133 /* Set a CPU register. The source must be a temporary and will be
134 marked as dead. */
135 static void store_reg(DisasContext *s, int reg, TCGv var)
137 if (reg == 31) {
138 tcg_gen_andi_i32(var, var, ~3);
139 s->is_jmp = DISAS_JUMP;
141 tcg_gen_mov_i32(cpu_R[reg], var);
142 dead_tmp(var);
145 /* Value extensions. */
146 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
147 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
148 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
149 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
151 #define UCOP_REG_M (((insn) >> 0) & 0x1f)
152 #define UCOP_REG_N (((insn) >> 19) & 0x1f)
153 #define UCOP_REG_D (((insn) >> 14) & 0x1f)
154 #define UCOP_REG_S (((insn) >> 9) & 0x1f)
155 #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
156 #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
157 #define UCOP_SH_OP (((insn) >> 6) & 0x03)
158 #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
159 #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
160 #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
161 #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
162 #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
163 #define UCOP_COND (((insn) >> 25) & 0x0f)
164 #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
165 #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
166 #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
167 #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
168 #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
170 #define UCOP_SET(i) ((insn) & (1 << (i)))
171 #define UCOP_SET_P UCOP_SET(28)
172 #define UCOP_SET_U UCOP_SET(27)
173 #define UCOP_SET_B UCOP_SET(26)
174 #define UCOP_SET_W UCOP_SET(25)
175 #define UCOP_SET_L UCOP_SET(24)
176 #define UCOP_SET_S UCOP_SET(24)
178 #define ILLEGAL cpu_abort(CPU(cpu), \
179 "Illegal UniCore32 instruction %x at line %d!", \
180 insn, __LINE__)
182 #ifndef CONFIG_USER_ONLY
183 static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s,
184 uint32_t insn)
186 UniCore32CPU *cpu = uc32_env_get_cpu(env);
187 TCGv tmp, tmp2, tmp3;
188 if ((insn & 0xfe000000) == 0xe0000000) {
189 tmp2 = new_tmp();
190 tmp3 = new_tmp();
191 tcg_gen_movi_i32(tmp2, UCOP_REG_N);
192 tcg_gen_movi_i32(tmp3, UCOP_IMM10);
193 if (UCOP_SET_L) {
194 tmp = new_tmp();
195 gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3);
196 store_reg(s, UCOP_REG_D, tmp);
197 } else {
198 tmp = load_reg(s, UCOP_REG_D);
199 gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3);
200 dead_tmp(tmp);
202 dead_tmp(tmp2);
203 dead_tmp(tmp3);
204 return;
206 ILLEGAL;
209 static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s,
210 uint32_t insn)
212 UniCore32CPU *cpu = uc32_env_get_cpu(env);
213 TCGv tmp;
215 if ((insn & 0xff003fff) == 0xe1000400) {
217 * movc rd, pp.nn, #imm9
218 * rd: UCOP_REG_D
219 * nn: UCOP_REG_N (must be 0)
220 * imm9: 0
222 if (UCOP_REG_N == 0) {
223 tmp = new_tmp();
224 tcg_gen_movi_i32(tmp, 0);
225 store_reg(s, UCOP_REG_D, tmp);
226 return;
227 } else {
228 ILLEGAL;
231 if ((insn & 0xff003fff) == 0xe0000401) {
233 * movc pp.nn, rn, #imm9
234 * rn: UCOP_REG_D
235 * nn: UCOP_REG_N (must be 1)
236 * imm9: 1
238 if (UCOP_REG_N == 1) {
239 tmp = load_reg(s, UCOP_REG_D);
240 gen_helper_cp1_putc(tmp);
241 dead_tmp(tmp);
242 return;
243 } else {
244 ILLEGAL;
247 ILLEGAL;
249 #endif
251 static inline void gen_set_asr(TCGv var, uint32_t mask)
253 TCGv tmp_mask = tcg_const_i32(mask);
254 gen_helper_asr_write(cpu_env, var, tmp_mask);
255 tcg_temp_free_i32(tmp_mask);
257 /* Set NZCV flags from the high 4 bits of var. */
258 #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
260 static void gen_exception(int excp)
262 TCGv tmp = new_tmp();
263 tcg_gen_movi_i32(tmp, excp);
264 gen_helper_exception(cpu_env, tmp);
265 dead_tmp(tmp);
268 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
270 /* Set CF to the top bit of var. */
271 static void gen_set_CF_bit31(TCGv var)
273 TCGv tmp = new_tmp();
274 tcg_gen_shri_i32(tmp, var, 31);
275 gen_set_CF(tmp);
276 dead_tmp(tmp);
279 /* Set N and Z flags from var. */
280 static inline void gen_logic_CC(TCGv var)
282 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF));
283 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF));
286 /* dest = T0 + T1 + CF. */
287 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
289 TCGv tmp;
290 tcg_gen_add_i32(dest, t0, t1);
291 tmp = load_cpu_field(CF);
292 tcg_gen_add_i32(dest, dest, tmp);
293 dead_tmp(tmp);
296 /* dest = T0 - T1 + CF - 1. */
297 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
299 TCGv tmp;
300 tcg_gen_sub_i32(dest, t0, t1);
301 tmp = load_cpu_field(CF);
302 tcg_gen_add_i32(dest, dest, tmp);
303 tcg_gen_subi_i32(dest, dest, 1);
304 dead_tmp(tmp);
307 static void shifter_out_im(TCGv var, int shift)
309 TCGv tmp = new_tmp();
310 if (shift == 0) {
311 tcg_gen_andi_i32(tmp, var, 1);
312 } else {
313 tcg_gen_shri_i32(tmp, var, shift);
314 if (shift != 31) {
315 tcg_gen_andi_i32(tmp, tmp, 1);
318 gen_set_CF(tmp);
319 dead_tmp(tmp);
322 /* Shift by immediate. Includes special handling for shift == 0. */
323 static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
324 int flags)
326 switch (shiftop) {
327 case 0: /* LSL */
328 if (shift != 0) {
329 if (flags) {
330 shifter_out_im(var, 32 - shift);
332 tcg_gen_shli_i32(var, var, shift);
334 break;
335 case 1: /* LSR */
336 if (shift == 0) {
337 if (flags) {
338 tcg_gen_shri_i32(var, var, 31);
339 gen_set_CF(var);
341 tcg_gen_movi_i32(var, 0);
342 } else {
343 if (flags) {
344 shifter_out_im(var, shift - 1);
346 tcg_gen_shri_i32(var, var, shift);
348 break;
349 case 2: /* ASR */
350 if (shift == 0) {
351 shift = 32;
353 if (flags) {
354 shifter_out_im(var, shift - 1);
356 if (shift == 32) {
357 shift = 31;
359 tcg_gen_sari_i32(var, var, shift);
360 break;
361 case 3: /* ROR/RRX */
362 if (shift != 0) {
363 if (flags) {
364 shifter_out_im(var, shift - 1);
366 tcg_gen_rotri_i32(var, var, shift); break;
367 } else {
368 TCGv tmp = load_cpu_field(CF);
369 if (flags) {
370 shifter_out_im(var, 0);
372 tcg_gen_shri_i32(var, var, 1);
373 tcg_gen_shli_i32(tmp, tmp, 31);
374 tcg_gen_or_i32(var, var, tmp);
375 dead_tmp(tmp);
380 static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
381 TCGv shift, int flags)
383 if (flags) {
384 switch (shiftop) {
385 case 0:
386 gen_helper_shl_cc(var, cpu_env, var, shift);
387 break;
388 case 1:
389 gen_helper_shr_cc(var, cpu_env, var, shift);
390 break;
391 case 2:
392 gen_helper_sar_cc(var, cpu_env, var, shift);
393 break;
394 case 3:
395 gen_helper_ror_cc(var, cpu_env, var, shift);
396 break;
398 } else {
399 switch (shiftop) {
400 case 0:
401 gen_helper_shl(var, var, shift);
402 break;
403 case 1:
404 gen_helper_shr(var, var, shift);
405 break;
406 case 2:
407 gen_helper_sar(var, var, shift);
408 break;
409 case 3:
410 tcg_gen_andi_i32(shift, shift, 0x1f);
411 tcg_gen_rotr_i32(var, var, shift);
412 break;
415 dead_tmp(shift);
418 static void gen_test_cc(int cc, TCGLabel *label)
420 TCGv tmp;
421 TCGv tmp2;
422 TCGLabel *inv;
424 switch (cc) {
425 case 0: /* eq: Z */
426 tmp = load_cpu_field(ZF);
427 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
428 break;
429 case 1: /* ne: !Z */
430 tmp = load_cpu_field(ZF);
431 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
432 break;
433 case 2: /* cs: C */
434 tmp = load_cpu_field(CF);
435 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
436 break;
437 case 3: /* cc: !C */
438 tmp = load_cpu_field(CF);
439 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
440 break;
441 case 4: /* mi: N */
442 tmp = load_cpu_field(NF);
443 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
444 break;
445 case 5: /* pl: !N */
446 tmp = load_cpu_field(NF);
447 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
448 break;
449 case 6: /* vs: V */
450 tmp = load_cpu_field(VF);
451 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
452 break;
453 case 7: /* vc: !V */
454 tmp = load_cpu_field(VF);
455 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
456 break;
457 case 8: /* hi: C && !Z */
458 inv = gen_new_label();
459 tmp = load_cpu_field(CF);
460 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
461 dead_tmp(tmp);
462 tmp = load_cpu_field(ZF);
463 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
464 gen_set_label(inv);
465 break;
466 case 9: /* ls: !C || Z */
467 tmp = load_cpu_field(CF);
468 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
469 dead_tmp(tmp);
470 tmp = load_cpu_field(ZF);
471 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
472 break;
473 case 10: /* ge: N == V -> N ^ V == 0 */
474 tmp = load_cpu_field(VF);
475 tmp2 = load_cpu_field(NF);
476 tcg_gen_xor_i32(tmp, tmp, tmp2);
477 dead_tmp(tmp2);
478 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
479 break;
480 case 11: /* lt: N != V -> N ^ V != 0 */
481 tmp = load_cpu_field(VF);
482 tmp2 = load_cpu_field(NF);
483 tcg_gen_xor_i32(tmp, tmp, tmp2);
484 dead_tmp(tmp2);
485 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
486 break;
487 case 12: /* gt: !Z && N == V */
488 inv = gen_new_label();
489 tmp = load_cpu_field(ZF);
490 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
491 dead_tmp(tmp);
492 tmp = load_cpu_field(VF);
493 tmp2 = load_cpu_field(NF);
494 tcg_gen_xor_i32(tmp, tmp, tmp2);
495 dead_tmp(tmp2);
496 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
497 gen_set_label(inv);
498 break;
499 case 13: /* le: Z || N != V */
500 tmp = load_cpu_field(ZF);
501 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
502 dead_tmp(tmp);
503 tmp = load_cpu_field(VF);
504 tmp2 = load_cpu_field(NF);
505 tcg_gen_xor_i32(tmp, tmp, tmp2);
506 dead_tmp(tmp2);
507 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
508 break;
509 default:
510 fprintf(stderr, "Bad condition code 0x%x\n", cc);
511 abort();
513 dead_tmp(tmp);
516 static const uint8_t table_logic_cc[16] = {
517 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
518 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
519 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
520 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
523 /* Set PC state from an immediate address. */
524 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
526 s->is_jmp = DISAS_UPDATE;
527 tcg_gen_movi_i32(cpu_R[31], addr & ~3);
530 /* Set PC state from var. var is marked as dead. */
531 static inline void gen_bx(DisasContext *s, TCGv var)
533 s->is_jmp = DISAS_UPDATE;
534 tcg_gen_andi_i32(cpu_R[31], var, ~3);
535 dead_tmp(var);
538 static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
540 store_reg(s, reg, var);
543 static inline TCGv gen_ld8s(TCGv addr, int index)
545 TCGv tmp = new_tmp();
546 tcg_gen_qemu_ld8s(tmp, addr, index);
547 return tmp;
550 static inline TCGv gen_ld8u(TCGv addr, int index)
552 TCGv tmp = new_tmp();
553 tcg_gen_qemu_ld8u(tmp, addr, index);
554 return tmp;
557 static inline TCGv gen_ld16s(TCGv addr, int index)
559 TCGv tmp = new_tmp();
560 tcg_gen_qemu_ld16s(tmp, addr, index);
561 return tmp;
564 static inline TCGv gen_ld16u(TCGv addr, int index)
566 TCGv tmp = new_tmp();
567 tcg_gen_qemu_ld16u(tmp, addr, index);
568 return tmp;
571 static inline TCGv gen_ld32(TCGv addr, int index)
573 TCGv tmp = new_tmp();
574 tcg_gen_qemu_ld32u(tmp, addr, index);
575 return tmp;
578 static inline void gen_st8(TCGv val, TCGv addr, int index)
580 tcg_gen_qemu_st8(val, addr, index);
581 dead_tmp(val);
584 static inline void gen_st16(TCGv val, TCGv addr, int index)
586 tcg_gen_qemu_st16(val, addr, index);
587 dead_tmp(val);
590 static inline void gen_st32(TCGv val, TCGv addr, int index)
592 tcg_gen_qemu_st32(val, addr, index);
593 dead_tmp(val);
596 static inline void gen_set_pc_im(uint32_t val)
598 tcg_gen_movi_i32(cpu_R[31], val);
601 /* Force a TB lookup after an instruction that changes the CPU state. */
602 static inline void gen_lookup_tb(DisasContext *s)
604 tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
605 s->is_jmp = DISAS_UPDATE;
608 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
609 TCGv var)
611 int val;
612 TCGv offset;
614 if (UCOP_SET(29)) {
615 /* immediate */
616 val = UCOP_IMM14;
617 if (!UCOP_SET_U) {
618 val = -val;
620 if (val != 0) {
621 tcg_gen_addi_i32(var, var, val);
623 } else {
624 /* shift/register */
625 offset = load_reg(s, UCOP_REG_M);
626 gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
627 if (!UCOP_SET_U) {
628 tcg_gen_sub_i32(var, var, offset);
629 } else {
630 tcg_gen_add_i32(var, var, offset);
632 dead_tmp(offset);
636 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
637 TCGv var)
639 int val;
640 TCGv offset;
642 if (UCOP_SET(26)) {
643 /* immediate */
644 val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
645 if (!UCOP_SET_U) {
646 val = -val;
648 if (val != 0) {
649 tcg_gen_addi_i32(var, var, val);
651 } else {
652 /* register */
653 offset = load_reg(s, UCOP_REG_M);
654 if (!UCOP_SET_U) {
655 tcg_gen_sub_i32(var, var, offset);
656 } else {
657 tcg_gen_add_i32(var, var, offset);
659 dead_tmp(offset);
663 static inline long ucf64_reg_offset(int reg)
665 if (reg & 1) {
666 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
667 + offsetof(CPU_DoubleU, l.upper);
668 } else {
669 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
670 + offsetof(CPU_DoubleU, l.lower);
674 #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
675 #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
677 /* UniCore-F64 single load/store I_offset */
678 static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
680 UniCore32CPU *cpu = uc32_env_get_cpu(env);
681 int offset;
682 TCGv tmp;
683 TCGv addr;
685 addr = load_reg(s, UCOP_REG_N);
686 if (!UCOP_SET_P && !UCOP_SET_W) {
687 ILLEGAL;
690 if (UCOP_SET_P) {
691 offset = UCOP_IMM10 << 2;
692 if (!UCOP_SET_U) {
693 offset = -offset;
695 if (offset != 0) {
696 tcg_gen_addi_i32(addr, addr, offset);
700 if (UCOP_SET_L) { /* load */
701 tmp = gen_ld32(addr, IS_USER(s));
702 ucf64_gen_st32(tmp, UCOP_REG_D);
703 } else { /* store */
704 tmp = ucf64_gen_ld32(UCOP_REG_D);
705 gen_st32(tmp, addr, IS_USER(s));
708 if (!UCOP_SET_P) {
709 offset = UCOP_IMM10 << 2;
710 if (!UCOP_SET_U) {
711 offset = -offset;
713 if (offset != 0) {
714 tcg_gen_addi_i32(addr, addr, offset);
717 if (UCOP_SET_W) {
718 store_reg(s, UCOP_REG_N, addr);
719 } else {
720 dead_tmp(addr);
724 /* UniCore-F64 load/store multiple words */
725 static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
727 UniCore32CPU *cpu = uc32_env_get_cpu(env);
728 unsigned int i;
729 int j, n, freg;
730 TCGv tmp;
731 TCGv addr;
733 if (UCOP_REG_D != 0) {
734 ILLEGAL;
736 if (UCOP_REG_N == 31) {
737 ILLEGAL;
739 if ((insn << 24) == 0) {
740 ILLEGAL;
743 addr = load_reg(s, UCOP_REG_N);
745 n = 0;
746 for (i = 0; i < 8; i++) {
747 if (UCOP_SET(i)) {
748 n++;
752 if (UCOP_SET_U) {
753 if (UCOP_SET_P) { /* pre increment */
754 tcg_gen_addi_i32(addr, addr, 4);
755 } /* unnecessary to do anything when post increment */
756 } else {
757 if (UCOP_SET_P) { /* pre decrement */
758 tcg_gen_addi_i32(addr, addr, -(n * 4));
759 } else { /* post decrement */
760 if (n != 1) {
761 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
766 freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
768 for (i = 0, j = 0; i < 8; i++, freg++) {
769 if (!UCOP_SET(i)) {
770 continue;
773 if (UCOP_SET_L) { /* load */
774 tmp = gen_ld32(addr, IS_USER(s));
775 ucf64_gen_st32(tmp, freg);
776 } else { /* store */
777 tmp = ucf64_gen_ld32(freg);
778 gen_st32(tmp, addr, IS_USER(s));
781 j++;
782 /* unnecessary to add after the last transfer */
783 if (j != n) {
784 tcg_gen_addi_i32(addr, addr, 4);
788 if (UCOP_SET_W) { /* write back */
789 if (UCOP_SET_U) {
790 if (!UCOP_SET_P) { /* post increment */
791 tcg_gen_addi_i32(addr, addr, 4);
792 } /* unnecessary to do anything when pre increment */
793 } else {
794 if (UCOP_SET_P) {
795 /* pre decrement */
796 if (n != 1) {
797 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
799 } else {
800 /* post decrement */
801 tcg_gen_addi_i32(addr, addr, -(n * 4));
804 store_reg(s, UCOP_REG_N, addr);
805 } else {
806 dead_tmp(addr);
810 /* UniCore-F64 mrc/mcr */
811 static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
813 UniCore32CPU *cpu = uc32_env_get_cpu(env);
814 TCGv tmp;
816 if ((insn & 0xfe0003ff) == 0xe2000000) {
817 /* control register */
818 if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
819 ILLEGAL;
821 if (UCOP_SET(24)) {
822 /* CFF */
823 tmp = new_tmp();
824 gen_helper_ucf64_get_fpscr(tmp, cpu_env);
825 store_reg(s, UCOP_REG_D, tmp);
826 } else {
827 /* CTF */
828 tmp = load_reg(s, UCOP_REG_D);
829 gen_helper_ucf64_set_fpscr(cpu_env, tmp);
830 dead_tmp(tmp);
831 gen_lookup_tb(s);
833 return;
835 if ((insn & 0xfe0003ff) == 0xe0000000) {
836 /* general register */
837 if (UCOP_REG_D == 31) {
838 ILLEGAL;
840 if (UCOP_SET(24)) { /* MFF */
841 tmp = ucf64_gen_ld32(UCOP_REG_N);
842 store_reg(s, UCOP_REG_D, tmp);
843 } else { /* MTF */
844 tmp = load_reg(s, UCOP_REG_D);
845 ucf64_gen_st32(tmp, UCOP_REG_N);
847 return;
849 if ((insn & 0xfb000000) == 0xe9000000) {
850 /* MFFC */
851 if (UCOP_REG_D != 31) {
852 ILLEGAL;
854 if (UCOP_UCF64_COND & 0x8) {
855 ILLEGAL;
858 tmp = new_tmp();
859 tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
860 if (UCOP_SET(26)) {
861 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
862 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
863 gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
864 } else {
865 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
866 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
867 gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
869 dead_tmp(tmp);
870 return;
872 ILLEGAL;
875 /* UniCore-F64 convert instructions */
876 static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
878 UniCore32CPU *cpu = uc32_env_get_cpu(env);
880 if (UCOP_UCF64_FMT == 3) {
881 ILLEGAL;
883 if (UCOP_REG_N != 0) {
884 ILLEGAL;
886 switch (UCOP_UCF64_FUNC) {
887 case 0: /* cvt.s */
888 switch (UCOP_UCF64_FMT) {
889 case 1 /* d */:
890 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
891 gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
892 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
893 break;
894 case 2 /* w */:
895 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
896 gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
897 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
898 break;
899 default /* s */:
900 ILLEGAL;
901 break;
903 break;
904 case 1: /* cvt.d */
905 switch (UCOP_UCF64_FMT) {
906 case 0 /* s */:
907 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
908 gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
909 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
910 break;
911 case 2 /* w */:
912 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
913 gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
914 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
915 break;
916 default /* d */:
917 ILLEGAL;
918 break;
920 break;
921 case 4: /* cvt.w */
922 switch (UCOP_UCF64_FMT) {
923 case 0 /* s */:
924 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
925 gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
926 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
927 break;
928 case 1 /* d */:
929 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
930 gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
931 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
932 break;
933 default /* w */:
934 ILLEGAL;
935 break;
937 break;
938 default:
939 ILLEGAL;
943 /* UniCore-F64 compare instructions */
944 static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
946 UniCore32CPU *cpu = uc32_env_get_cpu(env);
948 if (UCOP_SET(25)) {
949 ILLEGAL;
951 if (UCOP_REG_D != 0) {
952 ILLEGAL;
955 ILLEGAL; /* TODO */
956 if (UCOP_SET(24)) {
957 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
958 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
959 /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
960 } else {
961 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
962 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
963 /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
967 #define gen_helper_ucf64_movs(x, y) do { } while (0)
968 #define gen_helper_ucf64_movd(x, y) do { } while (0)
970 #define UCF64_OP1(name) do { \
971 if (UCOP_REG_N != 0) { \
972 ILLEGAL; \
974 switch (UCOP_UCF64_FMT) { \
975 case 0 /* s */: \
976 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
977 ucf64_reg_offset(UCOP_REG_M)); \
978 gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
979 tcg_gen_st_i32(cpu_F0s, cpu_env, \
980 ucf64_reg_offset(UCOP_REG_D)); \
981 break; \
982 case 1 /* d */: \
983 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
984 ucf64_reg_offset(UCOP_REG_M)); \
985 gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
986 tcg_gen_st_i64(cpu_F0d, cpu_env, \
987 ucf64_reg_offset(UCOP_REG_D)); \
988 break; \
989 case 2 /* w */: \
990 ILLEGAL; \
991 break; \
993 } while (0)
995 #define UCF64_OP2(name) do { \
996 switch (UCOP_UCF64_FMT) { \
997 case 0 /* s */: \
998 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
999 ucf64_reg_offset(UCOP_REG_N)); \
1000 tcg_gen_ld_i32(cpu_F1s, cpu_env, \
1001 ucf64_reg_offset(UCOP_REG_M)); \
1002 gen_helper_ucf64_##name##s(cpu_F0s, \
1003 cpu_F0s, cpu_F1s, cpu_env); \
1004 tcg_gen_st_i32(cpu_F0s, cpu_env, \
1005 ucf64_reg_offset(UCOP_REG_D)); \
1006 break; \
1007 case 1 /* d */: \
1008 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
1009 ucf64_reg_offset(UCOP_REG_N)); \
1010 tcg_gen_ld_i64(cpu_F1d, cpu_env, \
1011 ucf64_reg_offset(UCOP_REG_M)); \
1012 gen_helper_ucf64_##name##d(cpu_F0d, \
1013 cpu_F0d, cpu_F1d, cpu_env); \
1014 tcg_gen_st_i64(cpu_F0d, cpu_env, \
1015 ucf64_reg_offset(UCOP_REG_D)); \
1016 break; \
1017 case 2 /* w */: \
1018 ILLEGAL; \
1019 break; \
1021 } while (0)
1023 /* UniCore-F64 data processing */
1024 static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1026 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1028 if (UCOP_UCF64_FMT == 3) {
1029 ILLEGAL;
1031 switch (UCOP_UCF64_FUNC) {
1032 case 0: /* add */
1033 UCF64_OP2(add);
1034 break;
1035 case 1: /* sub */
1036 UCF64_OP2(sub);
1037 break;
1038 case 2: /* mul */
1039 UCF64_OP2(mul);
1040 break;
1041 case 4: /* div */
1042 UCF64_OP2(div);
1043 break;
1044 case 5: /* abs */
1045 UCF64_OP1(abs);
1046 break;
1047 case 6: /* mov */
1048 UCF64_OP1(mov);
1049 break;
1050 case 7: /* neg */
1051 UCF64_OP1(neg);
1052 break;
1053 default:
1054 ILLEGAL;
1058 /* Disassemble an F64 instruction */
1059 static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1061 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1063 if (!UCOP_SET(29)) {
1064 if (UCOP_SET(26)) {
1065 do_ucf64_ldst_m(env, s, insn);
1066 } else {
1067 do_ucf64_ldst_i(env, s, insn);
1069 } else {
1070 if (UCOP_SET(5)) {
1071 switch ((insn >> 26) & 0x3) {
1072 case 0:
1073 do_ucf64_datap(env, s, insn);
1074 break;
1075 case 1:
1076 ILLEGAL;
1077 break;
1078 case 2:
1079 do_ucf64_fcvt(env, s, insn);
1080 break;
1081 case 3:
1082 do_ucf64_fcmp(env, s, insn);
1083 break;
1085 } else {
1086 do_ucf64_trans(env, s, insn);
1091 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1093 TranslationBlock *tb;
1095 tb = s->tb;
1096 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
1097 tcg_gen_goto_tb(n);
1098 gen_set_pc_im(dest);
1099 tcg_gen_exit_tb((uintptr_t)tb + n);
1100 } else {
1101 gen_set_pc_im(dest);
1102 tcg_gen_exit_tb(0);
1106 static inline void gen_jmp(DisasContext *s, uint32_t dest)
1108 if (unlikely(s->singlestep_enabled)) {
1109 /* An indirect jump so that we still trigger the debug exception. */
1110 gen_bx_im(s, dest);
1111 } else {
1112 gen_goto_tb(s, 0, dest);
1113 s->is_jmp = DISAS_TB_JUMP;
1117 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1118 static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
1120 TCGv tmp;
1121 if (bsr) {
1122 /* ??? This is also undefined in system mode. */
1123 if (IS_USER(s)) {
1124 return 1;
1127 tmp = load_cpu_field(bsr);
1128 tcg_gen_andi_i32(tmp, tmp, ~mask);
1129 tcg_gen_andi_i32(t0, t0, mask);
1130 tcg_gen_or_i32(tmp, tmp, t0);
1131 store_cpu_field(tmp, bsr);
1132 } else {
1133 gen_set_asr(t0, mask);
1135 dead_tmp(t0);
1136 gen_lookup_tb(s);
1137 return 0;
1140 /* Generate an old-style exception return. Marks pc as dead. */
1141 static void gen_exception_return(DisasContext *s, TCGv pc)
1143 TCGv tmp;
1144 store_reg(s, 31, pc);
1145 tmp = load_cpu_field(bsr);
1146 gen_set_asr(tmp, 0xffffffff);
1147 dead_tmp(tmp);
1148 s->is_jmp = DISAS_UPDATE;
1151 static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s,
1152 uint32_t insn)
1154 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1156 switch (UCOP_CPNUM) {
1157 #ifndef CONFIG_USER_ONLY
1158 case 0:
1159 disas_cp0_insn(env, s, insn);
1160 break;
1161 case 1:
1162 disas_ocd_insn(env, s, insn);
1163 break;
1164 #endif
1165 case 2:
1166 disas_ucf64_insn(env, s, insn);
1167 break;
1168 default:
1169 /* Unknown coprocessor. */
1170 cpu_abort(CPU(cpu), "Unknown coprocessor!");
1174 /* data processing instructions */
1175 static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1177 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1178 TCGv tmp;
1179 TCGv tmp2;
1180 int logic_cc;
1182 if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
1183 if (UCOP_SET(23)) { /* CMOV instructions */
1184 if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
1185 ILLEGAL;
1187 /* if not always execute, we generate a conditional jump to
1188 next instruction */
1189 s->condlabel = gen_new_label();
1190 gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
1191 s->condjmp = 1;
1195 logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
1197 if (UCOP_SET(29)) {
1198 unsigned int val;
1199 /* immediate operand */
1200 val = UCOP_IMM_9;
1201 if (UCOP_SH_IM) {
1202 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1204 tmp2 = new_tmp();
1205 tcg_gen_movi_i32(tmp2, val);
1206 if (logic_cc && UCOP_SH_IM) {
1207 gen_set_CF_bit31(tmp2);
1209 } else {
1210 /* register */
1211 tmp2 = load_reg(s, UCOP_REG_M);
1212 if (UCOP_SET(5)) {
1213 tmp = load_reg(s, UCOP_REG_S);
1214 gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
1215 } else {
1216 gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
1220 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1221 tmp = load_reg(s, UCOP_REG_N);
1222 } else {
1223 TCGV_UNUSED(tmp);
1226 switch (UCOP_OPCODES) {
1227 case 0x00:
1228 tcg_gen_and_i32(tmp, tmp, tmp2);
1229 if (logic_cc) {
1230 gen_logic_CC(tmp);
1232 store_reg_bx(s, UCOP_REG_D, tmp);
1233 break;
1234 case 0x01:
1235 tcg_gen_xor_i32(tmp, tmp, tmp2);
1236 if (logic_cc) {
1237 gen_logic_CC(tmp);
1239 store_reg_bx(s, UCOP_REG_D, tmp);
1240 break;
1241 case 0x02:
1242 if (UCOP_SET_S && UCOP_REG_D == 31) {
1243 /* SUBS r31, ... is used for exception return. */
1244 if (IS_USER(s)) {
1245 ILLEGAL;
1247 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1248 gen_exception_return(s, tmp);
1249 } else {
1250 if (UCOP_SET_S) {
1251 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1252 } else {
1253 tcg_gen_sub_i32(tmp, tmp, tmp2);
1255 store_reg_bx(s, UCOP_REG_D, tmp);
1257 break;
1258 case 0x03:
1259 if (UCOP_SET_S) {
1260 gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
1261 } else {
1262 tcg_gen_sub_i32(tmp, tmp2, tmp);
1264 store_reg_bx(s, UCOP_REG_D, tmp);
1265 break;
1266 case 0x04:
1267 if (UCOP_SET_S) {
1268 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1269 } else {
1270 tcg_gen_add_i32(tmp, tmp, tmp2);
1272 store_reg_bx(s, UCOP_REG_D, tmp);
1273 break;
1274 case 0x05:
1275 if (UCOP_SET_S) {
1276 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
1277 } else {
1278 gen_add_carry(tmp, tmp, tmp2);
1280 store_reg_bx(s, UCOP_REG_D, tmp);
1281 break;
1282 case 0x06:
1283 if (UCOP_SET_S) {
1284 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
1285 } else {
1286 gen_sub_carry(tmp, tmp, tmp2);
1288 store_reg_bx(s, UCOP_REG_D, tmp);
1289 break;
1290 case 0x07:
1291 if (UCOP_SET_S) {
1292 gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
1293 } else {
1294 gen_sub_carry(tmp, tmp2, tmp);
1296 store_reg_bx(s, UCOP_REG_D, tmp);
1297 break;
1298 case 0x08:
1299 if (UCOP_SET_S) {
1300 tcg_gen_and_i32(tmp, tmp, tmp2);
1301 gen_logic_CC(tmp);
1303 dead_tmp(tmp);
1304 break;
1305 case 0x09:
1306 if (UCOP_SET_S) {
1307 tcg_gen_xor_i32(tmp, tmp, tmp2);
1308 gen_logic_CC(tmp);
1310 dead_tmp(tmp);
1311 break;
1312 case 0x0a:
1313 if (UCOP_SET_S) {
1314 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1316 dead_tmp(tmp);
1317 break;
1318 case 0x0b:
1319 if (UCOP_SET_S) {
1320 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1322 dead_tmp(tmp);
1323 break;
1324 case 0x0c:
1325 tcg_gen_or_i32(tmp, tmp, tmp2);
1326 if (logic_cc) {
1327 gen_logic_CC(tmp);
1329 store_reg_bx(s, UCOP_REG_D, tmp);
1330 break;
1331 case 0x0d:
1332 if (logic_cc && UCOP_REG_D == 31) {
1333 /* MOVS r31, ... is used for exception return. */
1334 if (IS_USER(s)) {
1335 ILLEGAL;
1337 gen_exception_return(s, tmp2);
1338 } else {
1339 if (logic_cc) {
1340 gen_logic_CC(tmp2);
1342 store_reg_bx(s, UCOP_REG_D, tmp2);
1344 break;
1345 case 0x0e:
1346 tcg_gen_andc_i32(tmp, tmp, tmp2);
1347 if (logic_cc) {
1348 gen_logic_CC(tmp);
1350 store_reg_bx(s, UCOP_REG_D, tmp);
1351 break;
1352 default:
1353 case 0x0f:
1354 tcg_gen_not_i32(tmp2, tmp2);
1355 if (logic_cc) {
1356 gen_logic_CC(tmp2);
1358 store_reg_bx(s, UCOP_REG_D, tmp2);
1359 break;
1361 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1362 dead_tmp(tmp2);
1366 /* multiply */
1367 static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1369 TCGv tmp, tmp2, tmp3, tmp4;
1371 if (UCOP_SET(27)) {
1372 /* 64 bit mul */
1373 tmp = load_reg(s, UCOP_REG_M);
1374 tmp2 = load_reg(s, UCOP_REG_N);
1375 if (UCOP_SET(26)) {
1376 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
1377 } else {
1378 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
1380 if (UCOP_SET(25)) { /* mult accumulate */
1381 tmp3 = load_reg(s, UCOP_REG_LO);
1382 tmp4 = load_reg(s, UCOP_REG_HI);
1383 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4);
1384 dead_tmp(tmp3);
1385 dead_tmp(tmp4);
1387 store_reg(s, UCOP_REG_LO, tmp);
1388 store_reg(s, UCOP_REG_HI, tmp2);
1389 } else {
1390 /* 32 bit mul */
1391 tmp = load_reg(s, UCOP_REG_M);
1392 tmp2 = load_reg(s, UCOP_REG_N);
1393 tcg_gen_mul_i32(tmp, tmp, tmp2);
1394 dead_tmp(tmp2);
1395 if (UCOP_SET(25)) {
1396 /* Add */
1397 tmp2 = load_reg(s, UCOP_REG_S);
1398 tcg_gen_add_i32(tmp, tmp, tmp2);
1399 dead_tmp(tmp2);
1401 if (UCOP_SET_S) {
1402 gen_logic_CC(tmp);
1404 store_reg(s, UCOP_REG_D, tmp);
1408 /* miscellaneous instructions */
1409 static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1411 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1412 unsigned int val;
1413 TCGv tmp;
1415 if ((insn & 0xffffffe0) == 0x10ffc120) {
1416 /* Trivial implementation equivalent to bx. */
1417 tmp = load_reg(s, UCOP_REG_M);
1418 gen_bx(s, tmp);
1419 return;
1422 if ((insn & 0xfbffc000) == 0x30ffc000) {
1423 /* PSR = immediate */
1424 val = UCOP_IMM_9;
1425 if (UCOP_SH_IM) {
1426 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1428 tmp = new_tmp();
1429 tcg_gen_movi_i32(tmp, val);
1430 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1431 ILLEGAL;
1433 return;
1436 if ((insn & 0xfbffffe0) == 0x12ffc020) {
1437 /* PSR.flag = reg */
1438 tmp = load_reg(s, UCOP_REG_M);
1439 if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
1440 ILLEGAL;
1442 return;
1445 if ((insn & 0xfbffffe0) == 0x10ffc020) {
1446 /* PSR = reg */
1447 tmp = load_reg(s, UCOP_REG_M);
1448 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1449 ILLEGAL;
1451 return;
1454 if ((insn & 0xfbf83fff) == 0x10f80000) {
1455 /* reg = PSR */
1456 if (UCOP_SET_B) {
1457 if (IS_USER(s)) {
1458 ILLEGAL;
1460 tmp = load_cpu_field(bsr);
1461 } else {
1462 tmp = new_tmp();
1463 gen_helper_asr_read(tmp, cpu_env);
1465 store_reg(s, UCOP_REG_D, tmp);
1466 return;
1469 if ((insn & 0xfbf83fe0) == 0x12f80120) {
1470 /* clz */
1471 tmp = load_reg(s, UCOP_REG_M);
1472 if (UCOP_SET(26)) {
1473 gen_helper_clo(tmp, tmp);
1474 } else {
1475 gen_helper_clz(tmp, tmp);
1477 store_reg(s, UCOP_REG_D, tmp);
1478 return;
1481 /* otherwise */
1482 ILLEGAL;
1485 /* load/store I_offset and R_offset */
1486 static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1488 unsigned int mmu_idx;
1489 TCGv tmp;
1490 TCGv tmp2;
1492 tmp2 = load_reg(s, UCOP_REG_N);
1493 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1495 /* immediate */
1496 if (UCOP_SET_P) {
1497 gen_add_data_offset(s, insn, tmp2);
1500 if (UCOP_SET_L) {
1501 /* load */
1502 if (UCOP_SET_B) {
1503 tmp = gen_ld8u(tmp2, mmu_idx);
1504 } else {
1505 tmp = gen_ld32(tmp2, mmu_idx);
1507 } else {
1508 /* store */
1509 tmp = load_reg(s, UCOP_REG_D);
1510 if (UCOP_SET_B) {
1511 gen_st8(tmp, tmp2, mmu_idx);
1512 } else {
1513 gen_st32(tmp, tmp2, mmu_idx);
1516 if (!UCOP_SET_P) {
1517 gen_add_data_offset(s, insn, tmp2);
1518 store_reg(s, UCOP_REG_N, tmp2);
1519 } else if (UCOP_SET_W) {
1520 store_reg(s, UCOP_REG_N, tmp2);
1521 } else {
1522 dead_tmp(tmp2);
1524 if (UCOP_SET_L) {
1525 /* Complete the load. */
1526 if (UCOP_REG_D == 31) {
1527 gen_bx(s, tmp);
1528 } else {
1529 store_reg(s, UCOP_REG_D, tmp);
1534 /* SWP instruction */
1535 static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1537 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1538 TCGv addr;
1539 TCGv tmp;
1540 TCGv tmp2;
1542 if ((insn & 0xff003fe0) != 0x40000120) {
1543 ILLEGAL;
1546 /* ??? This is not really atomic. However we know
1547 we never have multiple CPUs running in parallel,
1548 so it is good enough. */
1549 addr = load_reg(s, UCOP_REG_N);
1550 tmp = load_reg(s, UCOP_REG_M);
1551 if (UCOP_SET_B) {
1552 tmp2 = gen_ld8u(addr, IS_USER(s));
1553 gen_st8(tmp, addr, IS_USER(s));
1554 } else {
1555 tmp2 = gen_ld32(addr, IS_USER(s));
1556 gen_st32(tmp, addr, IS_USER(s));
1558 dead_tmp(addr);
1559 store_reg(s, UCOP_REG_D, tmp2);
1562 /* load/store hw/sb */
1563 static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1565 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1566 TCGv addr;
1567 TCGv tmp;
1569 if (UCOP_SH_OP == 0) {
1570 do_swap(env, s, insn);
1571 return;
1574 addr = load_reg(s, UCOP_REG_N);
1575 if (UCOP_SET_P) {
1576 gen_add_datah_offset(s, insn, addr);
1579 if (UCOP_SET_L) { /* load */
1580 switch (UCOP_SH_OP) {
1581 case 1:
1582 tmp = gen_ld16u(addr, IS_USER(s));
1583 break;
1584 case 2:
1585 tmp = gen_ld8s(addr, IS_USER(s));
1586 break;
1587 default: /* see do_swap */
1588 case 3:
1589 tmp = gen_ld16s(addr, IS_USER(s));
1590 break;
1592 } else { /* store */
1593 if (UCOP_SH_OP != 1) {
1594 ILLEGAL;
1596 tmp = load_reg(s, UCOP_REG_D);
1597 gen_st16(tmp, addr, IS_USER(s));
1599 /* Perform base writeback before the loaded value to
1600 ensure correct behavior with overlapping index registers. */
1601 if (!UCOP_SET_P) {
1602 gen_add_datah_offset(s, insn, addr);
1603 store_reg(s, UCOP_REG_N, addr);
1604 } else if (UCOP_SET_W) {
1605 store_reg(s, UCOP_REG_N, addr);
1606 } else {
1607 dead_tmp(addr);
1609 if (UCOP_SET_L) {
1610 /* Complete the load. */
1611 store_reg(s, UCOP_REG_D, tmp);
1615 /* load/store multiple words */
1616 static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1618 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1619 unsigned int val, i, mmu_idx;
1620 int j, n, reg, user, loaded_base;
1621 TCGv tmp;
1622 TCGv tmp2;
1623 TCGv addr;
1624 TCGv loaded_var;
1626 if (UCOP_SET(7)) {
1627 ILLEGAL;
1629 /* XXX: store correct base if write back */
1630 user = 0;
1631 if (UCOP_SET_B) { /* S bit in instruction table */
1632 if (IS_USER(s)) {
1633 ILLEGAL; /* only usable in supervisor mode */
1635 if (UCOP_SET(18) == 0) { /* pc reg */
1636 user = 1;
1640 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1641 addr = load_reg(s, UCOP_REG_N);
1643 /* compute total size */
1644 loaded_base = 0;
1645 TCGV_UNUSED(loaded_var);
1646 n = 0;
1647 for (i = 0; i < 6; i++) {
1648 if (UCOP_SET(i)) {
1649 n++;
1652 for (i = 9; i < 19; i++) {
1653 if (UCOP_SET(i)) {
1654 n++;
1657 /* XXX: test invalid n == 0 case ? */
1658 if (UCOP_SET_U) {
1659 if (UCOP_SET_P) {
1660 /* pre increment */
1661 tcg_gen_addi_i32(addr, addr, 4);
1662 } else {
1663 /* post increment */
1665 } else {
1666 if (UCOP_SET_P) {
1667 /* pre decrement */
1668 tcg_gen_addi_i32(addr, addr, -(n * 4));
1669 } else {
1670 /* post decrement */
1671 if (n != 1) {
1672 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1677 j = 0;
1678 reg = UCOP_SET(6) ? 16 : 0;
1679 for (i = 0; i < 19; i++, reg++) {
1680 if (i == 6) {
1681 i = i + 3;
1683 if (UCOP_SET(i)) {
1684 if (UCOP_SET_L) { /* load */
1685 tmp = gen_ld32(addr, mmu_idx);
1686 if (reg == 31) {
1687 gen_bx(s, tmp);
1688 } else if (user) {
1689 tmp2 = tcg_const_i32(reg);
1690 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
1691 tcg_temp_free_i32(tmp2);
1692 dead_tmp(tmp);
1693 } else if (reg == UCOP_REG_N) {
1694 loaded_var = tmp;
1695 loaded_base = 1;
1696 } else {
1697 store_reg(s, reg, tmp);
1699 } else { /* store */
1700 if (reg == 31) {
1701 /* special case: r31 = PC + 4 */
1702 val = (long)s->pc;
1703 tmp = new_tmp();
1704 tcg_gen_movi_i32(tmp, val);
1705 } else if (user) {
1706 tmp = new_tmp();
1707 tmp2 = tcg_const_i32(reg);
1708 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
1709 tcg_temp_free_i32(tmp2);
1710 } else {
1711 tmp = load_reg(s, reg);
1713 gen_st32(tmp, addr, mmu_idx);
1715 j++;
1716 /* no need to add after the last transfer */
1717 if (j != n) {
1718 tcg_gen_addi_i32(addr, addr, 4);
1722 if (UCOP_SET_W) { /* write back */
1723 if (UCOP_SET_U) {
1724 if (UCOP_SET_P) {
1725 /* pre increment */
1726 } else {
1727 /* post increment */
1728 tcg_gen_addi_i32(addr, addr, 4);
1730 } else {
1731 if (UCOP_SET_P) {
1732 /* pre decrement */
1733 if (n != 1) {
1734 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1736 } else {
1737 /* post decrement */
1738 tcg_gen_addi_i32(addr, addr, -(n * 4));
1741 store_reg(s, UCOP_REG_N, addr);
1742 } else {
1743 dead_tmp(addr);
1745 if (loaded_base) {
1746 store_reg(s, UCOP_REG_N, loaded_var);
1748 if (UCOP_SET_B && !user) {
1749 /* Restore ASR from BSR. */
1750 tmp = load_cpu_field(bsr);
1751 gen_set_asr(tmp, 0xffffffff);
1752 dead_tmp(tmp);
1753 s->is_jmp = DISAS_UPDATE;
1757 /* branch (and link) */
1758 static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1760 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1761 unsigned int val;
1762 int32_t offset;
1763 TCGv tmp;
1765 if (UCOP_COND == 0xf) {
1766 ILLEGAL;
1769 if (UCOP_COND != 0xe) {
1770 /* if not always execute, we generate a conditional jump to
1771 next instruction */
1772 s->condlabel = gen_new_label();
1773 gen_test_cc(UCOP_COND ^ 1, s->condlabel);
1774 s->condjmp = 1;
1777 val = (int32_t)s->pc;
1778 if (UCOP_SET_L) {
1779 tmp = new_tmp();
1780 tcg_gen_movi_i32(tmp, val);
1781 store_reg(s, 30, tmp);
1783 offset = (((int32_t)insn << 8) >> 8);
1784 val += (offset << 2); /* unicore is pc+4 */
1785 gen_jmp(s, val);
1788 static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
1790 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1791 unsigned int insn;
1793 insn = cpu_ldl_code(env, s->pc);
1794 s->pc += 4;
1796 /* UniCore instructions class:
1797 * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1798 * AAA : see switch case
1799 * BBBB : opcodes or cond or PUBW
1800 * C : S OR L
1801 * D : 8
1802 * E : 5
1804 switch (insn >> 29) {
1805 case 0x0:
1806 if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1807 do_mult(env, s, insn);
1808 break;
1811 if (UCOP_SET(8)) {
1812 do_misc(env, s, insn);
1813 break;
1815 case 0x1:
1816 if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
1817 do_misc(env, s, insn);
1818 break;
1820 do_datap(env, s, insn);
1821 break;
1823 case 0x2:
1824 if (UCOP_SET(8) && UCOP_SET(5)) {
1825 do_ldst_hwsb(env, s, insn);
1826 break;
1828 if (UCOP_SET(8) || UCOP_SET(5)) {
1829 ILLEGAL;
1831 case 0x3:
1832 do_ldst_ir(env, s, insn);
1833 break;
1835 case 0x4:
1836 if (UCOP_SET(8)) {
1837 ILLEGAL; /* extended instructions */
1839 do_ldst_m(env, s, insn);
1840 break;
1841 case 0x5:
1842 do_branch(env, s, insn);
1843 break;
1844 case 0x6:
1845 /* Coprocessor. */
1846 disas_coproc_insn(env, s, insn);
1847 break;
1848 case 0x7:
1849 if (!UCOP_SET(28)) {
1850 disas_coproc_insn(env, s, insn);
1851 break;
1853 if ((insn & 0xff000000) == 0xff000000) { /* syscall */
1854 gen_set_pc_im(s->pc);
1855 s->is_jmp = DISAS_SYSCALL;
1856 break;
1858 ILLEGAL;
1862 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
1863 basic block 'tb'. */
1864 void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
1866 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1867 CPUState *cs = CPU(cpu);
1868 DisasContext dc1, *dc = &dc1;
1869 target_ulong pc_start;
1870 uint32_t next_page_start;
1871 int num_insns;
1872 int max_insns;
1874 /* generate intermediate code */
1875 num_temps = 0;
1877 pc_start = tb->pc;
1879 dc->tb = tb;
1881 dc->is_jmp = DISAS_NEXT;
1882 dc->pc = pc_start;
1883 dc->singlestep_enabled = cs->singlestep_enabled;
1884 dc->condjmp = 0;
1885 cpu_F0s = tcg_temp_new_i32();
1886 cpu_F1s = tcg_temp_new_i32();
1887 cpu_F0d = tcg_temp_new_i64();
1888 cpu_F1d = tcg_temp_new_i64();
1889 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1890 num_insns = 0;
1891 max_insns = tb->cflags & CF_COUNT_MASK;
1892 if (max_insns == 0) {
1893 max_insns = CF_COUNT_MASK;
1895 if (max_insns > TCG_MAX_INSNS) {
1896 max_insns = TCG_MAX_INSNS;
1899 #ifndef CONFIG_USER_ONLY
1900 if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) {
1901 dc->user = 1;
1902 } else {
1903 dc->user = 0;
1905 #endif
1907 gen_tb_start(tb);
1908 do {
1909 tcg_gen_insn_start(dc->pc);
1910 num_insns++;
1912 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1913 gen_set_pc_im(dc->pc);
1914 gen_exception(EXCP_DEBUG);
1915 dc->is_jmp = DISAS_JUMP;
1916 /* The address covered by the breakpoint must be included in
1917 [tb->pc, tb->pc + tb->size) in order to for it to be
1918 properly cleared -- thus we increment the PC here so that
1919 the logic setting tb->size below does the right thing. */
1920 dc->pc += 4;
1921 goto done_generating;
1924 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1925 gen_io_start();
1928 disas_uc32_insn(env, dc);
1930 if (num_temps) {
1931 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
1932 num_temps = 0;
1935 if (dc->condjmp && !dc->is_jmp) {
1936 gen_set_label(dc->condlabel);
1937 dc->condjmp = 0;
1939 /* Translation stops when a conditional branch is encountered.
1940 * Otherwise the subsequent code could get translated several times.
1941 * Also stop translation when a page boundary is reached. This
1942 * ensures prefetch aborts occur at the right place. */
1943 } while (!dc->is_jmp && !tcg_op_buf_full() &&
1944 !cs->singlestep_enabled &&
1945 !singlestep &&
1946 dc->pc < next_page_start &&
1947 num_insns < max_insns);
1949 if (tb->cflags & CF_LAST_IO) {
1950 if (dc->condjmp) {
1951 /* FIXME: This can theoretically happen with self-modifying
1952 code. */
1953 cpu_abort(cs, "IO on conditional branch instruction");
1955 gen_io_end();
1958 /* At this stage dc->condjmp will only be set when the skipped
1959 instruction was a conditional branch or trap, and the PC has
1960 already been written. */
1961 if (unlikely(cs->singlestep_enabled)) {
1962 /* Make sure the pc is updated, and raise a debug exception. */
1963 if (dc->condjmp) {
1964 if (dc->is_jmp == DISAS_SYSCALL) {
1965 gen_exception(UC32_EXCP_PRIV);
1966 } else {
1967 gen_exception(EXCP_DEBUG);
1969 gen_set_label(dc->condlabel);
1971 if (dc->condjmp || !dc->is_jmp) {
1972 gen_set_pc_im(dc->pc);
1973 dc->condjmp = 0;
1975 if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
1976 gen_exception(UC32_EXCP_PRIV);
1977 } else {
1978 gen_exception(EXCP_DEBUG);
1980 } else {
1981 /* While branches must always occur at the end of an IT block,
1982 there are a few other things that can cause us to terminate
1983 the TB in the middel of an IT block:
1984 - Exception generating instructions (bkpt, swi, undefined).
1985 - Page boundaries.
1986 - Hardware watchpoints.
1987 Hardware breakpoints have already been handled and skip this code.
1989 switch (dc->is_jmp) {
1990 case DISAS_NEXT:
1991 gen_goto_tb(dc, 1, dc->pc);
1992 break;
1993 default:
1994 case DISAS_JUMP:
1995 case DISAS_UPDATE:
1996 /* indicate that the hash table must be used to find the next TB */
1997 tcg_gen_exit_tb(0);
1998 break;
1999 case DISAS_TB_JUMP:
2000 /* nothing more to generate */
2001 break;
2002 case DISAS_SYSCALL:
2003 gen_exception(UC32_EXCP_PRIV);
2004 break;
2006 if (dc->condjmp) {
2007 gen_set_label(dc->condlabel);
2008 gen_goto_tb(dc, 1, dc->pc);
2009 dc->condjmp = 0;
2013 done_generating:
2014 gen_tb_end(tb, num_insns);
2016 #ifdef DEBUG_DISAS
2017 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2018 qemu_log("----------------\n");
2019 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2020 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
2021 qemu_log("\n");
2023 #endif
2024 tb->size = dc->pc - pc_start;
2025 tb->icount = num_insns;
2028 static const char *cpu_mode_names[16] = {
2029 "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2030 "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2033 #undef UCF64_DUMP_STATE
2034 #ifdef UCF64_DUMP_STATE
2035 static void cpu_dump_state_ucf64(CPUUniCore32State *env, FILE *f,
2036 fprintf_function cpu_fprintf, int flags)
2038 int i;
2039 union {
2040 uint32_t i;
2041 float s;
2042 } s0, s1;
2043 CPU_DoubleU d;
2044 /* ??? This assumes float64 and double have the same layout.
2045 Oh well, it's only debug dumps. */
2046 union {
2047 float64 f64;
2048 double d;
2049 } d0;
2051 for (i = 0; i < 16; i++) {
2052 d.d = env->ucf64.regs[i];
2053 s0.i = d.l.lower;
2054 s1.i = d.l.upper;
2055 d0.f64 = d.d;
2056 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g)",
2057 i * 2, (int)s0.i, s0.s,
2058 i * 2 + 1, (int)s1.i, s1.s);
2059 cpu_fprintf(f, " d%02d=%" PRIx64 "(%8g)\n",
2060 i, (uint64_t)d0.f64, d0.d);
2062 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
2064 #else
2065 #define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0)
2066 #endif
2068 void uc32_cpu_dump_state(CPUState *cs, FILE *f,
2069 fprintf_function cpu_fprintf, int flags)
2071 UniCore32CPU *cpu = UNICORE32_CPU(cs);
2072 CPUUniCore32State *env = &cpu->env;
2073 int i;
2074 uint32_t psr;
2076 for (i = 0; i < 32; i++) {
2077 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2078 if ((i % 4) == 3) {
2079 cpu_fprintf(f, "\n");
2080 } else {
2081 cpu_fprintf(f, " ");
2084 psr = cpu_asr_read(env);
2085 cpu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
2086 psr,
2087 psr & (1 << 31) ? 'N' : '-',
2088 psr & (1 << 30) ? 'Z' : '-',
2089 psr & (1 << 29) ? 'C' : '-',
2090 psr & (1 << 28) ? 'V' : '-',
2091 cpu_mode_names[psr & 0xf]);
2093 cpu_dump_state_ucf64(env, f, cpu_fprintf, flags);
2096 void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb,
2097 target_ulong *data)
2099 env->regs[31] = data[0];