i386: kvmvapic: initialise imm32 variable
[qemu/ar7.git] / target-unicore32 / translate.c
blobb04d22c9fbc3df5bf0591fbdd44abd4f5adc8548
1 /*
2 * UniCore32 translation
4 * Copyright (C) 2010-2012 Guan Xuetao
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation, or (at your option) any
9 * later version. See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "cpu.h"
14 #include "disas/disas.h"
15 #include "exec/exec-all.h"
16 #include "tcg-op.h"
17 #include "qemu/log.h"
18 #include "exec/cpu_ldst.h"
20 #include "exec/helper-proto.h"
21 #include "exec/helper-gen.h"
23 #include "trace-tcg.h"
24 #include "exec/log.h"
27 /* internal defines */
28 typedef struct DisasContext {
29 target_ulong pc;
30 int is_jmp;
31 /* Nonzero if this instruction has been conditionally skipped. */
32 int condjmp;
33 /* The label that will be jumped to when the instruction is skipped. */
34 TCGLabel *condlabel;
35 struct TranslationBlock *tb;
36 int singlestep_enabled;
37 #ifndef CONFIG_USER_ONLY
38 int user;
39 #endif
40 } DisasContext;
42 #ifndef CONFIG_USER_ONLY
43 #define IS_USER(s) (s->user)
44 #else
45 #define IS_USER(s) 1
46 #endif
48 /* These instructions trap after executing, so defer them until after the
49 conditional executions state has been updated. */
50 #define DISAS_SYSCALL 5
52 static TCGv_env cpu_env;
53 static TCGv_i32 cpu_R[32];
55 /* FIXME: These should be removed. */
56 static TCGv cpu_F0s, cpu_F1s;
57 static TCGv_i64 cpu_F0d, cpu_F1d;
59 #include "exec/gen-icount.h"
61 static const char *regnames[] = {
62 "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
63 "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
64 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
65 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
67 /* initialize TCG globals. */
68 void uc32_translate_init(void)
70 int i;
72 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
74 for (i = 0; i < 32; i++) {
75 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
76 offsetof(CPUUniCore32State, regs[i]), regnames[i]);
80 static int num_temps;
82 /* Allocate a temporary variable. */
83 static TCGv_i32 new_tmp(void)
85 num_temps++;
86 return tcg_temp_new_i32();
89 /* Release a temporary variable. */
90 static void dead_tmp(TCGv tmp)
92 tcg_temp_free(tmp);
93 num_temps--;
96 static inline TCGv load_cpu_offset(int offset)
98 TCGv tmp = new_tmp();
99 tcg_gen_ld_i32(tmp, cpu_env, offset);
100 return tmp;
103 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
105 static inline void store_cpu_offset(TCGv var, int offset)
107 tcg_gen_st_i32(var, cpu_env, offset);
108 dead_tmp(var);
111 #define store_cpu_field(var, name) \
112 store_cpu_offset(var, offsetof(CPUUniCore32State, name))
114 /* Set a variable to the value of a CPU register. */
115 static void load_reg_var(DisasContext *s, TCGv var, int reg)
117 if (reg == 31) {
118 uint32_t addr;
119 /* normaly, since we updated PC */
120 addr = (long)s->pc;
121 tcg_gen_movi_i32(var, addr);
122 } else {
123 tcg_gen_mov_i32(var, cpu_R[reg]);
127 /* Create a new temporary and set it to the value of a CPU register. */
128 static inline TCGv load_reg(DisasContext *s, int reg)
130 TCGv tmp = new_tmp();
131 load_reg_var(s, tmp, reg);
132 return tmp;
135 /* Set a CPU register. The source must be a temporary and will be
136 marked as dead. */
137 static void store_reg(DisasContext *s, int reg, TCGv var)
139 if (reg == 31) {
140 tcg_gen_andi_i32(var, var, ~3);
141 s->is_jmp = DISAS_JUMP;
143 tcg_gen_mov_i32(cpu_R[reg], var);
144 dead_tmp(var);
147 /* Value extensions. */
148 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
149 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
150 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
151 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
153 #define UCOP_REG_M (((insn) >> 0) & 0x1f)
154 #define UCOP_REG_N (((insn) >> 19) & 0x1f)
155 #define UCOP_REG_D (((insn) >> 14) & 0x1f)
156 #define UCOP_REG_S (((insn) >> 9) & 0x1f)
157 #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
158 #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
159 #define UCOP_SH_OP (((insn) >> 6) & 0x03)
160 #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
161 #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
162 #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
163 #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
164 #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
165 #define UCOP_COND (((insn) >> 25) & 0x0f)
166 #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
167 #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
168 #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
169 #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
170 #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
172 #define UCOP_SET(i) ((insn) & (1 << (i)))
173 #define UCOP_SET_P UCOP_SET(28)
174 #define UCOP_SET_U UCOP_SET(27)
175 #define UCOP_SET_B UCOP_SET(26)
176 #define UCOP_SET_W UCOP_SET(25)
177 #define UCOP_SET_L UCOP_SET(24)
178 #define UCOP_SET_S UCOP_SET(24)
180 #define ILLEGAL cpu_abort(CPU(cpu), \
181 "Illegal UniCore32 instruction %x at line %d!", \
182 insn, __LINE__)
184 #ifndef CONFIG_USER_ONLY
185 static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s,
186 uint32_t insn)
188 UniCore32CPU *cpu = uc32_env_get_cpu(env);
189 TCGv tmp, tmp2, tmp3;
190 if ((insn & 0xfe000000) == 0xe0000000) {
191 tmp2 = new_tmp();
192 tmp3 = new_tmp();
193 tcg_gen_movi_i32(tmp2, UCOP_REG_N);
194 tcg_gen_movi_i32(tmp3, UCOP_IMM10);
195 if (UCOP_SET_L) {
196 tmp = new_tmp();
197 gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3);
198 store_reg(s, UCOP_REG_D, tmp);
199 } else {
200 tmp = load_reg(s, UCOP_REG_D);
201 gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3);
202 dead_tmp(tmp);
204 dead_tmp(tmp2);
205 dead_tmp(tmp3);
206 return;
208 ILLEGAL;
211 static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s,
212 uint32_t insn)
214 UniCore32CPU *cpu = uc32_env_get_cpu(env);
215 TCGv tmp;
217 if ((insn & 0xff003fff) == 0xe1000400) {
219 * movc rd, pp.nn, #imm9
220 * rd: UCOP_REG_D
221 * nn: UCOP_REG_N (must be 0)
222 * imm9: 0
224 if (UCOP_REG_N == 0) {
225 tmp = new_tmp();
226 tcg_gen_movi_i32(tmp, 0);
227 store_reg(s, UCOP_REG_D, tmp);
228 return;
229 } else {
230 ILLEGAL;
233 if ((insn & 0xff003fff) == 0xe0000401) {
235 * movc pp.nn, rn, #imm9
236 * rn: UCOP_REG_D
237 * nn: UCOP_REG_N (must be 1)
238 * imm9: 1
240 if (UCOP_REG_N == 1) {
241 tmp = load_reg(s, UCOP_REG_D);
242 gen_helper_cp1_putc(tmp);
243 dead_tmp(tmp);
244 return;
245 } else {
246 ILLEGAL;
249 ILLEGAL;
251 #endif
253 static inline void gen_set_asr(TCGv var, uint32_t mask)
255 TCGv tmp_mask = tcg_const_i32(mask);
256 gen_helper_asr_write(cpu_env, var, tmp_mask);
257 tcg_temp_free_i32(tmp_mask);
259 /* Set NZCV flags from the high 4 bits of var. */
260 #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
262 static void gen_exception(int excp)
264 TCGv tmp = new_tmp();
265 tcg_gen_movi_i32(tmp, excp);
266 gen_helper_exception(cpu_env, tmp);
267 dead_tmp(tmp);
270 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
272 /* Set CF to the top bit of var. */
273 static void gen_set_CF_bit31(TCGv var)
275 TCGv tmp = new_tmp();
276 tcg_gen_shri_i32(tmp, var, 31);
277 gen_set_CF(tmp);
278 dead_tmp(tmp);
281 /* Set N and Z flags from var. */
282 static inline void gen_logic_CC(TCGv var)
284 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF));
285 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF));
288 /* dest = T0 + T1 + CF. */
289 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
291 TCGv tmp;
292 tcg_gen_add_i32(dest, t0, t1);
293 tmp = load_cpu_field(CF);
294 tcg_gen_add_i32(dest, dest, tmp);
295 dead_tmp(tmp);
298 /* dest = T0 - T1 + CF - 1. */
299 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
301 TCGv tmp;
302 tcg_gen_sub_i32(dest, t0, t1);
303 tmp = load_cpu_field(CF);
304 tcg_gen_add_i32(dest, dest, tmp);
305 tcg_gen_subi_i32(dest, dest, 1);
306 dead_tmp(tmp);
309 static void shifter_out_im(TCGv var, int shift)
311 TCGv tmp = new_tmp();
312 if (shift == 0) {
313 tcg_gen_andi_i32(tmp, var, 1);
314 } else {
315 tcg_gen_shri_i32(tmp, var, shift);
316 if (shift != 31) {
317 tcg_gen_andi_i32(tmp, tmp, 1);
320 gen_set_CF(tmp);
321 dead_tmp(tmp);
324 /* Shift by immediate. Includes special handling for shift == 0. */
325 static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
326 int flags)
328 switch (shiftop) {
329 case 0: /* LSL */
330 if (shift != 0) {
331 if (flags) {
332 shifter_out_im(var, 32 - shift);
334 tcg_gen_shli_i32(var, var, shift);
336 break;
337 case 1: /* LSR */
338 if (shift == 0) {
339 if (flags) {
340 tcg_gen_shri_i32(var, var, 31);
341 gen_set_CF(var);
343 tcg_gen_movi_i32(var, 0);
344 } else {
345 if (flags) {
346 shifter_out_im(var, shift - 1);
348 tcg_gen_shri_i32(var, var, shift);
350 break;
351 case 2: /* ASR */
352 if (shift == 0) {
353 shift = 32;
355 if (flags) {
356 shifter_out_im(var, shift - 1);
358 if (shift == 32) {
359 shift = 31;
361 tcg_gen_sari_i32(var, var, shift);
362 break;
363 case 3: /* ROR/RRX */
364 if (shift != 0) {
365 if (flags) {
366 shifter_out_im(var, shift - 1);
368 tcg_gen_rotri_i32(var, var, shift); break;
369 } else {
370 TCGv tmp = load_cpu_field(CF);
371 if (flags) {
372 shifter_out_im(var, 0);
374 tcg_gen_shri_i32(var, var, 1);
375 tcg_gen_shli_i32(tmp, tmp, 31);
376 tcg_gen_or_i32(var, var, tmp);
377 dead_tmp(tmp);
382 static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
383 TCGv shift, int flags)
385 if (flags) {
386 switch (shiftop) {
387 case 0:
388 gen_helper_shl_cc(var, cpu_env, var, shift);
389 break;
390 case 1:
391 gen_helper_shr_cc(var, cpu_env, var, shift);
392 break;
393 case 2:
394 gen_helper_sar_cc(var, cpu_env, var, shift);
395 break;
396 case 3:
397 gen_helper_ror_cc(var, cpu_env, var, shift);
398 break;
400 } else {
401 switch (shiftop) {
402 case 0:
403 gen_helper_shl(var, var, shift);
404 break;
405 case 1:
406 gen_helper_shr(var, var, shift);
407 break;
408 case 2:
409 gen_helper_sar(var, var, shift);
410 break;
411 case 3:
412 tcg_gen_andi_i32(shift, shift, 0x1f);
413 tcg_gen_rotr_i32(var, var, shift);
414 break;
417 dead_tmp(shift);
420 static void gen_test_cc(int cc, TCGLabel *label)
422 TCGv tmp;
423 TCGv tmp2;
424 TCGLabel *inv;
426 switch (cc) {
427 case 0: /* eq: Z */
428 tmp = load_cpu_field(ZF);
429 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
430 break;
431 case 1: /* ne: !Z */
432 tmp = load_cpu_field(ZF);
433 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
434 break;
435 case 2: /* cs: C */
436 tmp = load_cpu_field(CF);
437 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
438 break;
439 case 3: /* cc: !C */
440 tmp = load_cpu_field(CF);
441 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
442 break;
443 case 4: /* mi: N */
444 tmp = load_cpu_field(NF);
445 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
446 break;
447 case 5: /* pl: !N */
448 tmp = load_cpu_field(NF);
449 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
450 break;
451 case 6: /* vs: V */
452 tmp = load_cpu_field(VF);
453 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
454 break;
455 case 7: /* vc: !V */
456 tmp = load_cpu_field(VF);
457 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
458 break;
459 case 8: /* hi: C && !Z */
460 inv = gen_new_label();
461 tmp = load_cpu_field(CF);
462 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
463 dead_tmp(tmp);
464 tmp = load_cpu_field(ZF);
465 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
466 gen_set_label(inv);
467 break;
468 case 9: /* ls: !C || Z */
469 tmp = load_cpu_field(CF);
470 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
471 dead_tmp(tmp);
472 tmp = load_cpu_field(ZF);
473 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
474 break;
475 case 10: /* ge: N == V -> N ^ V == 0 */
476 tmp = load_cpu_field(VF);
477 tmp2 = load_cpu_field(NF);
478 tcg_gen_xor_i32(tmp, tmp, tmp2);
479 dead_tmp(tmp2);
480 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
481 break;
482 case 11: /* lt: N != V -> N ^ V != 0 */
483 tmp = load_cpu_field(VF);
484 tmp2 = load_cpu_field(NF);
485 tcg_gen_xor_i32(tmp, tmp, tmp2);
486 dead_tmp(tmp2);
487 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
488 break;
489 case 12: /* gt: !Z && N == V */
490 inv = gen_new_label();
491 tmp = load_cpu_field(ZF);
492 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
493 dead_tmp(tmp);
494 tmp = load_cpu_field(VF);
495 tmp2 = load_cpu_field(NF);
496 tcg_gen_xor_i32(tmp, tmp, tmp2);
497 dead_tmp(tmp2);
498 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
499 gen_set_label(inv);
500 break;
501 case 13: /* le: Z || N != V */
502 tmp = load_cpu_field(ZF);
503 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
504 dead_tmp(tmp);
505 tmp = load_cpu_field(VF);
506 tmp2 = load_cpu_field(NF);
507 tcg_gen_xor_i32(tmp, tmp, tmp2);
508 dead_tmp(tmp2);
509 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
510 break;
511 default:
512 fprintf(stderr, "Bad condition code 0x%x\n", cc);
513 abort();
515 dead_tmp(tmp);
518 static const uint8_t table_logic_cc[16] = {
519 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
520 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
521 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
522 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
525 /* Set PC state from an immediate address. */
526 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
528 s->is_jmp = DISAS_UPDATE;
529 tcg_gen_movi_i32(cpu_R[31], addr & ~3);
532 /* Set PC state from var. var is marked as dead. */
533 static inline void gen_bx(DisasContext *s, TCGv var)
535 s->is_jmp = DISAS_UPDATE;
536 tcg_gen_andi_i32(cpu_R[31], var, ~3);
537 dead_tmp(var);
540 static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
542 store_reg(s, reg, var);
545 static inline TCGv gen_ld8s(TCGv addr, int index)
547 TCGv tmp = new_tmp();
548 tcg_gen_qemu_ld8s(tmp, addr, index);
549 return tmp;
552 static inline TCGv gen_ld8u(TCGv addr, int index)
554 TCGv tmp = new_tmp();
555 tcg_gen_qemu_ld8u(tmp, addr, index);
556 return tmp;
559 static inline TCGv gen_ld16s(TCGv addr, int index)
561 TCGv tmp = new_tmp();
562 tcg_gen_qemu_ld16s(tmp, addr, index);
563 return tmp;
566 static inline TCGv gen_ld16u(TCGv addr, int index)
568 TCGv tmp = new_tmp();
569 tcg_gen_qemu_ld16u(tmp, addr, index);
570 return tmp;
573 static inline TCGv gen_ld32(TCGv addr, int index)
575 TCGv tmp = new_tmp();
576 tcg_gen_qemu_ld32u(tmp, addr, index);
577 return tmp;
580 static inline void gen_st8(TCGv val, TCGv addr, int index)
582 tcg_gen_qemu_st8(val, addr, index);
583 dead_tmp(val);
586 static inline void gen_st16(TCGv val, TCGv addr, int index)
588 tcg_gen_qemu_st16(val, addr, index);
589 dead_tmp(val);
592 static inline void gen_st32(TCGv val, TCGv addr, int index)
594 tcg_gen_qemu_st32(val, addr, index);
595 dead_tmp(val);
598 static inline void gen_set_pc_im(uint32_t val)
600 tcg_gen_movi_i32(cpu_R[31], val);
603 /* Force a TB lookup after an instruction that changes the CPU state. */
604 static inline void gen_lookup_tb(DisasContext *s)
606 tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
607 s->is_jmp = DISAS_UPDATE;
610 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
611 TCGv var)
613 int val;
614 TCGv offset;
616 if (UCOP_SET(29)) {
617 /* immediate */
618 val = UCOP_IMM14;
619 if (!UCOP_SET_U) {
620 val = -val;
622 if (val != 0) {
623 tcg_gen_addi_i32(var, var, val);
625 } else {
626 /* shift/register */
627 offset = load_reg(s, UCOP_REG_M);
628 gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
629 if (!UCOP_SET_U) {
630 tcg_gen_sub_i32(var, var, offset);
631 } else {
632 tcg_gen_add_i32(var, var, offset);
634 dead_tmp(offset);
638 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
639 TCGv var)
641 int val;
642 TCGv offset;
644 if (UCOP_SET(26)) {
645 /* immediate */
646 val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
647 if (!UCOP_SET_U) {
648 val = -val;
650 if (val != 0) {
651 tcg_gen_addi_i32(var, var, val);
653 } else {
654 /* register */
655 offset = load_reg(s, UCOP_REG_M);
656 if (!UCOP_SET_U) {
657 tcg_gen_sub_i32(var, var, offset);
658 } else {
659 tcg_gen_add_i32(var, var, offset);
661 dead_tmp(offset);
665 static inline long ucf64_reg_offset(int reg)
667 if (reg & 1) {
668 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
669 + offsetof(CPU_DoubleU, l.upper);
670 } else {
671 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
672 + offsetof(CPU_DoubleU, l.lower);
676 #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
677 #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
679 /* UniCore-F64 single load/store I_offset */
680 static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
682 UniCore32CPU *cpu = uc32_env_get_cpu(env);
683 int offset;
684 TCGv tmp;
685 TCGv addr;
687 addr = load_reg(s, UCOP_REG_N);
688 if (!UCOP_SET_P && !UCOP_SET_W) {
689 ILLEGAL;
692 if (UCOP_SET_P) {
693 offset = UCOP_IMM10 << 2;
694 if (!UCOP_SET_U) {
695 offset = -offset;
697 if (offset != 0) {
698 tcg_gen_addi_i32(addr, addr, offset);
702 if (UCOP_SET_L) { /* load */
703 tmp = gen_ld32(addr, IS_USER(s));
704 ucf64_gen_st32(tmp, UCOP_REG_D);
705 } else { /* store */
706 tmp = ucf64_gen_ld32(UCOP_REG_D);
707 gen_st32(tmp, addr, IS_USER(s));
710 if (!UCOP_SET_P) {
711 offset = UCOP_IMM10 << 2;
712 if (!UCOP_SET_U) {
713 offset = -offset;
715 if (offset != 0) {
716 tcg_gen_addi_i32(addr, addr, offset);
719 if (UCOP_SET_W) {
720 store_reg(s, UCOP_REG_N, addr);
721 } else {
722 dead_tmp(addr);
726 /* UniCore-F64 load/store multiple words */
727 static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
729 UniCore32CPU *cpu = uc32_env_get_cpu(env);
730 unsigned int i;
731 int j, n, freg;
732 TCGv tmp;
733 TCGv addr;
735 if (UCOP_REG_D != 0) {
736 ILLEGAL;
738 if (UCOP_REG_N == 31) {
739 ILLEGAL;
741 if ((insn << 24) == 0) {
742 ILLEGAL;
745 addr = load_reg(s, UCOP_REG_N);
747 n = 0;
748 for (i = 0; i < 8; i++) {
749 if (UCOP_SET(i)) {
750 n++;
754 if (UCOP_SET_U) {
755 if (UCOP_SET_P) { /* pre increment */
756 tcg_gen_addi_i32(addr, addr, 4);
757 } /* unnecessary to do anything when post increment */
758 } else {
759 if (UCOP_SET_P) { /* pre decrement */
760 tcg_gen_addi_i32(addr, addr, -(n * 4));
761 } else { /* post decrement */
762 if (n != 1) {
763 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
768 freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
770 for (i = 0, j = 0; i < 8; i++, freg++) {
771 if (!UCOP_SET(i)) {
772 continue;
775 if (UCOP_SET_L) { /* load */
776 tmp = gen_ld32(addr, IS_USER(s));
777 ucf64_gen_st32(tmp, freg);
778 } else { /* store */
779 tmp = ucf64_gen_ld32(freg);
780 gen_st32(tmp, addr, IS_USER(s));
783 j++;
784 /* unnecessary to add after the last transfer */
785 if (j != n) {
786 tcg_gen_addi_i32(addr, addr, 4);
790 if (UCOP_SET_W) { /* write back */
791 if (UCOP_SET_U) {
792 if (!UCOP_SET_P) { /* post increment */
793 tcg_gen_addi_i32(addr, addr, 4);
794 } /* unnecessary to do anything when pre increment */
795 } else {
796 if (UCOP_SET_P) {
797 /* pre decrement */
798 if (n != 1) {
799 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
801 } else {
802 /* post decrement */
803 tcg_gen_addi_i32(addr, addr, -(n * 4));
806 store_reg(s, UCOP_REG_N, addr);
807 } else {
808 dead_tmp(addr);
812 /* UniCore-F64 mrc/mcr */
813 static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
815 UniCore32CPU *cpu = uc32_env_get_cpu(env);
816 TCGv tmp;
818 if ((insn & 0xfe0003ff) == 0xe2000000) {
819 /* control register */
820 if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
821 ILLEGAL;
823 if (UCOP_SET(24)) {
824 /* CFF */
825 tmp = new_tmp();
826 gen_helper_ucf64_get_fpscr(tmp, cpu_env);
827 store_reg(s, UCOP_REG_D, tmp);
828 } else {
829 /* CTF */
830 tmp = load_reg(s, UCOP_REG_D);
831 gen_helper_ucf64_set_fpscr(cpu_env, tmp);
832 dead_tmp(tmp);
833 gen_lookup_tb(s);
835 return;
837 if ((insn & 0xfe0003ff) == 0xe0000000) {
838 /* general register */
839 if (UCOP_REG_D == 31) {
840 ILLEGAL;
842 if (UCOP_SET(24)) { /* MFF */
843 tmp = ucf64_gen_ld32(UCOP_REG_N);
844 store_reg(s, UCOP_REG_D, tmp);
845 } else { /* MTF */
846 tmp = load_reg(s, UCOP_REG_D);
847 ucf64_gen_st32(tmp, UCOP_REG_N);
849 return;
851 if ((insn & 0xfb000000) == 0xe9000000) {
852 /* MFFC */
853 if (UCOP_REG_D != 31) {
854 ILLEGAL;
856 if (UCOP_UCF64_COND & 0x8) {
857 ILLEGAL;
860 tmp = new_tmp();
861 tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
862 if (UCOP_SET(26)) {
863 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
864 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
865 gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
866 } else {
867 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
868 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
869 gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
871 dead_tmp(tmp);
872 return;
874 ILLEGAL;
877 /* UniCore-F64 convert instructions */
878 static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
880 UniCore32CPU *cpu = uc32_env_get_cpu(env);
882 if (UCOP_UCF64_FMT == 3) {
883 ILLEGAL;
885 if (UCOP_REG_N != 0) {
886 ILLEGAL;
888 switch (UCOP_UCF64_FUNC) {
889 case 0: /* cvt.s */
890 switch (UCOP_UCF64_FMT) {
891 case 1 /* d */:
892 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
893 gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
894 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
895 break;
896 case 2 /* w */:
897 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
898 gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
899 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
900 break;
901 default /* s */:
902 ILLEGAL;
903 break;
905 break;
906 case 1: /* cvt.d */
907 switch (UCOP_UCF64_FMT) {
908 case 0 /* s */:
909 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
910 gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
911 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
912 break;
913 case 2 /* w */:
914 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
915 gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
916 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
917 break;
918 default /* d */:
919 ILLEGAL;
920 break;
922 break;
923 case 4: /* cvt.w */
924 switch (UCOP_UCF64_FMT) {
925 case 0 /* s */:
926 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
927 gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
928 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
929 break;
930 case 1 /* d */:
931 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
932 gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
933 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
934 break;
935 default /* w */:
936 ILLEGAL;
937 break;
939 break;
940 default:
941 ILLEGAL;
945 /* UniCore-F64 compare instructions */
946 static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
948 UniCore32CPU *cpu = uc32_env_get_cpu(env);
950 if (UCOP_SET(25)) {
951 ILLEGAL;
953 if (UCOP_REG_D != 0) {
954 ILLEGAL;
957 ILLEGAL; /* TODO */
958 if (UCOP_SET(24)) {
959 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
960 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
961 /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
962 } else {
963 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
964 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
965 /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
969 #define gen_helper_ucf64_movs(x, y) do { } while (0)
970 #define gen_helper_ucf64_movd(x, y) do { } while (0)
972 #define UCF64_OP1(name) do { \
973 if (UCOP_REG_N != 0) { \
974 ILLEGAL; \
976 switch (UCOP_UCF64_FMT) { \
977 case 0 /* s */: \
978 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
979 ucf64_reg_offset(UCOP_REG_M)); \
980 gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
981 tcg_gen_st_i32(cpu_F0s, cpu_env, \
982 ucf64_reg_offset(UCOP_REG_D)); \
983 break; \
984 case 1 /* d */: \
985 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
986 ucf64_reg_offset(UCOP_REG_M)); \
987 gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
988 tcg_gen_st_i64(cpu_F0d, cpu_env, \
989 ucf64_reg_offset(UCOP_REG_D)); \
990 break; \
991 case 2 /* w */: \
992 ILLEGAL; \
993 break; \
995 } while (0)
997 #define UCF64_OP2(name) do { \
998 switch (UCOP_UCF64_FMT) { \
999 case 0 /* s */: \
1000 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
1001 ucf64_reg_offset(UCOP_REG_N)); \
1002 tcg_gen_ld_i32(cpu_F1s, cpu_env, \
1003 ucf64_reg_offset(UCOP_REG_M)); \
1004 gen_helper_ucf64_##name##s(cpu_F0s, \
1005 cpu_F0s, cpu_F1s, cpu_env); \
1006 tcg_gen_st_i32(cpu_F0s, cpu_env, \
1007 ucf64_reg_offset(UCOP_REG_D)); \
1008 break; \
1009 case 1 /* d */: \
1010 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
1011 ucf64_reg_offset(UCOP_REG_N)); \
1012 tcg_gen_ld_i64(cpu_F1d, cpu_env, \
1013 ucf64_reg_offset(UCOP_REG_M)); \
1014 gen_helper_ucf64_##name##d(cpu_F0d, \
1015 cpu_F0d, cpu_F1d, cpu_env); \
1016 tcg_gen_st_i64(cpu_F0d, cpu_env, \
1017 ucf64_reg_offset(UCOP_REG_D)); \
1018 break; \
1019 case 2 /* w */: \
1020 ILLEGAL; \
1021 break; \
1023 } while (0)
1025 /* UniCore-F64 data processing */
1026 static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1028 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1030 if (UCOP_UCF64_FMT == 3) {
1031 ILLEGAL;
1033 switch (UCOP_UCF64_FUNC) {
1034 case 0: /* add */
1035 UCF64_OP2(add);
1036 break;
1037 case 1: /* sub */
1038 UCF64_OP2(sub);
1039 break;
1040 case 2: /* mul */
1041 UCF64_OP2(mul);
1042 break;
1043 case 4: /* div */
1044 UCF64_OP2(div);
1045 break;
1046 case 5: /* abs */
1047 UCF64_OP1(abs);
1048 break;
1049 case 6: /* mov */
1050 UCF64_OP1(mov);
1051 break;
1052 case 7: /* neg */
1053 UCF64_OP1(neg);
1054 break;
1055 default:
1056 ILLEGAL;
1060 /* Disassemble an F64 instruction */
1061 static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1063 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1065 if (!UCOP_SET(29)) {
1066 if (UCOP_SET(26)) {
1067 do_ucf64_ldst_m(env, s, insn);
1068 } else {
1069 do_ucf64_ldst_i(env, s, insn);
1071 } else {
1072 if (UCOP_SET(5)) {
1073 switch ((insn >> 26) & 0x3) {
1074 case 0:
1075 do_ucf64_datap(env, s, insn);
1076 break;
1077 case 1:
1078 ILLEGAL;
1079 break;
1080 case 2:
1081 do_ucf64_fcvt(env, s, insn);
1082 break;
1083 case 3:
1084 do_ucf64_fcmp(env, s, insn);
1085 break;
1087 } else {
1088 do_ucf64_trans(env, s, insn);
1093 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1095 #ifndef CONFIG_USER_ONLY
1096 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1097 #else
1098 return true;
1099 #endif
1102 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1104 if (use_goto_tb(s, dest)) {
1105 tcg_gen_goto_tb(n);
1106 gen_set_pc_im(dest);
1107 tcg_gen_exit_tb((uintptr_t)s->tb + n);
1108 } else {
1109 gen_set_pc_im(dest);
1110 tcg_gen_exit_tb(0);
1114 static inline void gen_jmp(DisasContext *s, uint32_t dest)
1116 if (unlikely(s->singlestep_enabled)) {
1117 /* An indirect jump so that we still trigger the debug exception. */
1118 gen_bx_im(s, dest);
1119 } else {
1120 gen_goto_tb(s, 0, dest);
1121 s->is_jmp = DISAS_TB_JUMP;
1125 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1126 static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
1128 TCGv tmp;
1129 if (bsr) {
1130 /* ??? This is also undefined in system mode. */
1131 if (IS_USER(s)) {
1132 return 1;
1135 tmp = load_cpu_field(bsr);
1136 tcg_gen_andi_i32(tmp, tmp, ~mask);
1137 tcg_gen_andi_i32(t0, t0, mask);
1138 tcg_gen_or_i32(tmp, tmp, t0);
1139 store_cpu_field(tmp, bsr);
1140 } else {
1141 gen_set_asr(t0, mask);
1143 dead_tmp(t0);
1144 gen_lookup_tb(s);
1145 return 0;
1148 /* Generate an old-style exception return. Marks pc as dead. */
1149 static void gen_exception_return(DisasContext *s, TCGv pc)
1151 TCGv tmp;
1152 store_reg(s, 31, pc);
1153 tmp = load_cpu_field(bsr);
1154 gen_set_asr(tmp, 0xffffffff);
1155 dead_tmp(tmp);
1156 s->is_jmp = DISAS_UPDATE;
1159 static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s,
1160 uint32_t insn)
1162 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1164 switch (UCOP_CPNUM) {
1165 #ifndef CONFIG_USER_ONLY
1166 case 0:
1167 disas_cp0_insn(env, s, insn);
1168 break;
1169 case 1:
1170 disas_ocd_insn(env, s, insn);
1171 break;
1172 #endif
1173 case 2:
1174 disas_ucf64_insn(env, s, insn);
1175 break;
1176 default:
1177 /* Unknown coprocessor. */
1178 cpu_abort(CPU(cpu), "Unknown coprocessor!");
1182 /* data processing instructions */
1183 static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1185 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1186 TCGv tmp;
1187 TCGv tmp2;
1188 int logic_cc;
1190 if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
1191 if (UCOP_SET(23)) { /* CMOV instructions */
1192 if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
1193 ILLEGAL;
1195 /* if not always execute, we generate a conditional jump to
1196 next instruction */
1197 s->condlabel = gen_new_label();
1198 gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
1199 s->condjmp = 1;
1203 logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
1205 if (UCOP_SET(29)) {
1206 unsigned int val;
1207 /* immediate operand */
1208 val = UCOP_IMM_9;
1209 if (UCOP_SH_IM) {
1210 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1212 tmp2 = new_tmp();
1213 tcg_gen_movi_i32(tmp2, val);
1214 if (logic_cc && UCOP_SH_IM) {
1215 gen_set_CF_bit31(tmp2);
1217 } else {
1218 /* register */
1219 tmp2 = load_reg(s, UCOP_REG_M);
1220 if (UCOP_SET(5)) {
1221 tmp = load_reg(s, UCOP_REG_S);
1222 gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
1223 } else {
1224 gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
1228 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1229 tmp = load_reg(s, UCOP_REG_N);
1230 } else {
1231 TCGV_UNUSED(tmp);
1234 switch (UCOP_OPCODES) {
1235 case 0x00:
1236 tcg_gen_and_i32(tmp, tmp, tmp2);
1237 if (logic_cc) {
1238 gen_logic_CC(tmp);
1240 store_reg_bx(s, UCOP_REG_D, tmp);
1241 break;
1242 case 0x01:
1243 tcg_gen_xor_i32(tmp, tmp, tmp2);
1244 if (logic_cc) {
1245 gen_logic_CC(tmp);
1247 store_reg_bx(s, UCOP_REG_D, tmp);
1248 break;
1249 case 0x02:
1250 if (UCOP_SET_S && UCOP_REG_D == 31) {
1251 /* SUBS r31, ... is used for exception return. */
1252 if (IS_USER(s)) {
1253 ILLEGAL;
1255 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1256 gen_exception_return(s, tmp);
1257 } else {
1258 if (UCOP_SET_S) {
1259 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1260 } else {
1261 tcg_gen_sub_i32(tmp, tmp, tmp2);
1263 store_reg_bx(s, UCOP_REG_D, tmp);
1265 break;
1266 case 0x03:
1267 if (UCOP_SET_S) {
1268 gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
1269 } else {
1270 tcg_gen_sub_i32(tmp, tmp2, tmp);
1272 store_reg_bx(s, UCOP_REG_D, tmp);
1273 break;
1274 case 0x04:
1275 if (UCOP_SET_S) {
1276 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1277 } else {
1278 tcg_gen_add_i32(tmp, tmp, tmp2);
1280 store_reg_bx(s, UCOP_REG_D, tmp);
1281 break;
1282 case 0x05:
1283 if (UCOP_SET_S) {
1284 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
1285 } else {
1286 gen_add_carry(tmp, tmp, tmp2);
1288 store_reg_bx(s, UCOP_REG_D, tmp);
1289 break;
1290 case 0x06:
1291 if (UCOP_SET_S) {
1292 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
1293 } else {
1294 gen_sub_carry(tmp, tmp, tmp2);
1296 store_reg_bx(s, UCOP_REG_D, tmp);
1297 break;
1298 case 0x07:
1299 if (UCOP_SET_S) {
1300 gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
1301 } else {
1302 gen_sub_carry(tmp, tmp2, tmp);
1304 store_reg_bx(s, UCOP_REG_D, tmp);
1305 break;
1306 case 0x08:
1307 if (UCOP_SET_S) {
1308 tcg_gen_and_i32(tmp, tmp, tmp2);
1309 gen_logic_CC(tmp);
1311 dead_tmp(tmp);
1312 break;
1313 case 0x09:
1314 if (UCOP_SET_S) {
1315 tcg_gen_xor_i32(tmp, tmp, tmp2);
1316 gen_logic_CC(tmp);
1318 dead_tmp(tmp);
1319 break;
1320 case 0x0a:
1321 if (UCOP_SET_S) {
1322 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1324 dead_tmp(tmp);
1325 break;
1326 case 0x0b:
1327 if (UCOP_SET_S) {
1328 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1330 dead_tmp(tmp);
1331 break;
1332 case 0x0c:
1333 tcg_gen_or_i32(tmp, tmp, tmp2);
1334 if (logic_cc) {
1335 gen_logic_CC(tmp);
1337 store_reg_bx(s, UCOP_REG_D, tmp);
1338 break;
1339 case 0x0d:
1340 if (logic_cc && UCOP_REG_D == 31) {
1341 /* MOVS r31, ... is used for exception return. */
1342 if (IS_USER(s)) {
1343 ILLEGAL;
1345 gen_exception_return(s, tmp2);
1346 } else {
1347 if (logic_cc) {
1348 gen_logic_CC(tmp2);
1350 store_reg_bx(s, UCOP_REG_D, tmp2);
1352 break;
1353 case 0x0e:
1354 tcg_gen_andc_i32(tmp, tmp, tmp2);
1355 if (logic_cc) {
1356 gen_logic_CC(tmp);
1358 store_reg_bx(s, UCOP_REG_D, tmp);
1359 break;
1360 default:
1361 case 0x0f:
1362 tcg_gen_not_i32(tmp2, tmp2);
1363 if (logic_cc) {
1364 gen_logic_CC(tmp2);
1366 store_reg_bx(s, UCOP_REG_D, tmp2);
1367 break;
1369 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1370 dead_tmp(tmp2);
1374 /* multiply */
1375 static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1377 TCGv tmp, tmp2, tmp3, tmp4;
1379 if (UCOP_SET(27)) {
1380 /* 64 bit mul */
1381 tmp = load_reg(s, UCOP_REG_M);
1382 tmp2 = load_reg(s, UCOP_REG_N);
1383 if (UCOP_SET(26)) {
1384 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
1385 } else {
1386 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
1388 if (UCOP_SET(25)) { /* mult accumulate */
1389 tmp3 = load_reg(s, UCOP_REG_LO);
1390 tmp4 = load_reg(s, UCOP_REG_HI);
1391 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4);
1392 dead_tmp(tmp3);
1393 dead_tmp(tmp4);
1395 store_reg(s, UCOP_REG_LO, tmp);
1396 store_reg(s, UCOP_REG_HI, tmp2);
1397 } else {
1398 /* 32 bit mul */
1399 tmp = load_reg(s, UCOP_REG_M);
1400 tmp2 = load_reg(s, UCOP_REG_N);
1401 tcg_gen_mul_i32(tmp, tmp, tmp2);
1402 dead_tmp(tmp2);
1403 if (UCOP_SET(25)) {
1404 /* Add */
1405 tmp2 = load_reg(s, UCOP_REG_S);
1406 tcg_gen_add_i32(tmp, tmp, tmp2);
1407 dead_tmp(tmp2);
1409 if (UCOP_SET_S) {
1410 gen_logic_CC(tmp);
1412 store_reg(s, UCOP_REG_D, tmp);
1416 /* miscellaneous instructions */
1417 static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1419 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1420 unsigned int val;
1421 TCGv tmp;
1423 if ((insn & 0xffffffe0) == 0x10ffc120) {
1424 /* Trivial implementation equivalent to bx. */
1425 tmp = load_reg(s, UCOP_REG_M);
1426 gen_bx(s, tmp);
1427 return;
1430 if ((insn & 0xfbffc000) == 0x30ffc000) {
1431 /* PSR = immediate */
1432 val = UCOP_IMM_9;
1433 if (UCOP_SH_IM) {
1434 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1436 tmp = new_tmp();
1437 tcg_gen_movi_i32(tmp, val);
1438 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1439 ILLEGAL;
1441 return;
1444 if ((insn & 0xfbffffe0) == 0x12ffc020) {
1445 /* PSR.flag = reg */
1446 tmp = load_reg(s, UCOP_REG_M);
1447 if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
1448 ILLEGAL;
1450 return;
1453 if ((insn & 0xfbffffe0) == 0x10ffc020) {
1454 /* PSR = reg */
1455 tmp = load_reg(s, UCOP_REG_M);
1456 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1457 ILLEGAL;
1459 return;
1462 if ((insn & 0xfbf83fff) == 0x10f80000) {
1463 /* reg = PSR */
1464 if (UCOP_SET_B) {
1465 if (IS_USER(s)) {
1466 ILLEGAL;
1468 tmp = load_cpu_field(bsr);
1469 } else {
1470 tmp = new_tmp();
1471 gen_helper_asr_read(tmp, cpu_env);
1473 store_reg(s, UCOP_REG_D, tmp);
1474 return;
1477 if ((insn & 0xfbf83fe0) == 0x12f80120) {
1478 /* clz */
1479 tmp = load_reg(s, UCOP_REG_M);
1480 if (UCOP_SET(26)) {
1481 gen_helper_clo(tmp, tmp);
1482 } else {
1483 gen_helper_clz(tmp, tmp);
1485 store_reg(s, UCOP_REG_D, tmp);
1486 return;
1489 /* otherwise */
1490 ILLEGAL;
1493 /* load/store I_offset and R_offset */
1494 static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1496 unsigned int mmu_idx;
1497 TCGv tmp;
1498 TCGv tmp2;
1500 tmp2 = load_reg(s, UCOP_REG_N);
1501 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1503 /* immediate */
1504 if (UCOP_SET_P) {
1505 gen_add_data_offset(s, insn, tmp2);
1508 if (UCOP_SET_L) {
1509 /* load */
1510 if (UCOP_SET_B) {
1511 tmp = gen_ld8u(tmp2, mmu_idx);
1512 } else {
1513 tmp = gen_ld32(tmp2, mmu_idx);
1515 } else {
1516 /* store */
1517 tmp = load_reg(s, UCOP_REG_D);
1518 if (UCOP_SET_B) {
1519 gen_st8(tmp, tmp2, mmu_idx);
1520 } else {
1521 gen_st32(tmp, tmp2, mmu_idx);
1524 if (!UCOP_SET_P) {
1525 gen_add_data_offset(s, insn, tmp2);
1526 store_reg(s, UCOP_REG_N, tmp2);
1527 } else if (UCOP_SET_W) {
1528 store_reg(s, UCOP_REG_N, tmp2);
1529 } else {
1530 dead_tmp(tmp2);
1532 if (UCOP_SET_L) {
1533 /* Complete the load. */
1534 if (UCOP_REG_D == 31) {
1535 gen_bx(s, tmp);
1536 } else {
1537 store_reg(s, UCOP_REG_D, tmp);
1542 /* SWP instruction */
1543 static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1545 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1546 TCGv addr;
1547 TCGv tmp;
1548 TCGv tmp2;
1550 if ((insn & 0xff003fe0) != 0x40000120) {
1551 ILLEGAL;
1554 /* ??? This is not really atomic. However we know
1555 we never have multiple CPUs running in parallel,
1556 so it is good enough. */
1557 addr = load_reg(s, UCOP_REG_N);
1558 tmp = load_reg(s, UCOP_REG_M);
1559 if (UCOP_SET_B) {
1560 tmp2 = gen_ld8u(addr, IS_USER(s));
1561 gen_st8(tmp, addr, IS_USER(s));
1562 } else {
1563 tmp2 = gen_ld32(addr, IS_USER(s));
1564 gen_st32(tmp, addr, IS_USER(s));
1566 dead_tmp(addr);
1567 store_reg(s, UCOP_REG_D, tmp2);
1570 /* load/store hw/sb */
1571 static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1573 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1574 TCGv addr;
1575 TCGv tmp;
1577 if (UCOP_SH_OP == 0) {
1578 do_swap(env, s, insn);
1579 return;
1582 addr = load_reg(s, UCOP_REG_N);
1583 if (UCOP_SET_P) {
1584 gen_add_datah_offset(s, insn, addr);
1587 if (UCOP_SET_L) { /* load */
1588 switch (UCOP_SH_OP) {
1589 case 1:
1590 tmp = gen_ld16u(addr, IS_USER(s));
1591 break;
1592 case 2:
1593 tmp = gen_ld8s(addr, IS_USER(s));
1594 break;
1595 default: /* see do_swap */
1596 case 3:
1597 tmp = gen_ld16s(addr, IS_USER(s));
1598 break;
1600 } else { /* store */
1601 if (UCOP_SH_OP != 1) {
1602 ILLEGAL;
1604 tmp = load_reg(s, UCOP_REG_D);
1605 gen_st16(tmp, addr, IS_USER(s));
1607 /* Perform base writeback before the loaded value to
1608 ensure correct behavior with overlapping index registers. */
1609 if (!UCOP_SET_P) {
1610 gen_add_datah_offset(s, insn, addr);
1611 store_reg(s, UCOP_REG_N, addr);
1612 } else if (UCOP_SET_W) {
1613 store_reg(s, UCOP_REG_N, addr);
1614 } else {
1615 dead_tmp(addr);
1617 if (UCOP_SET_L) {
1618 /* Complete the load. */
1619 store_reg(s, UCOP_REG_D, tmp);
1623 /* load/store multiple words */
1624 static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1626 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1627 unsigned int val, i, mmu_idx;
1628 int j, n, reg, user, loaded_base;
1629 TCGv tmp;
1630 TCGv tmp2;
1631 TCGv addr;
1632 TCGv loaded_var;
1634 if (UCOP_SET(7)) {
1635 ILLEGAL;
1637 /* XXX: store correct base if write back */
1638 user = 0;
1639 if (UCOP_SET_B) { /* S bit in instruction table */
1640 if (IS_USER(s)) {
1641 ILLEGAL; /* only usable in supervisor mode */
1643 if (UCOP_SET(18) == 0) { /* pc reg */
1644 user = 1;
1648 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1649 addr = load_reg(s, UCOP_REG_N);
1651 /* compute total size */
1652 loaded_base = 0;
1653 TCGV_UNUSED(loaded_var);
1654 n = 0;
1655 for (i = 0; i < 6; i++) {
1656 if (UCOP_SET(i)) {
1657 n++;
1660 for (i = 9; i < 19; i++) {
1661 if (UCOP_SET(i)) {
1662 n++;
1665 /* XXX: test invalid n == 0 case ? */
1666 if (UCOP_SET_U) {
1667 if (UCOP_SET_P) {
1668 /* pre increment */
1669 tcg_gen_addi_i32(addr, addr, 4);
1670 } else {
1671 /* post increment */
1673 } else {
1674 if (UCOP_SET_P) {
1675 /* pre decrement */
1676 tcg_gen_addi_i32(addr, addr, -(n * 4));
1677 } else {
1678 /* post decrement */
1679 if (n != 1) {
1680 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1685 j = 0;
1686 reg = UCOP_SET(6) ? 16 : 0;
1687 for (i = 0; i < 19; i++, reg++) {
1688 if (i == 6) {
1689 i = i + 3;
1691 if (UCOP_SET(i)) {
1692 if (UCOP_SET_L) { /* load */
1693 tmp = gen_ld32(addr, mmu_idx);
1694 if (reg == 31) {
1695 gen_bx(s, tmp);
1696 } else if (user) {
1697 tmp2 = tcg_const_i32(reg);
1698 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
1699 tcg_temp_free_i32(tmp2);
1700 dead_tmp(tmp);
1701 } else if (reg == UCOP_REG_N) {
1702 loaded_var = tmp;
1703 loaded_base = 1;
1704 } else {
1705 store_reg(s, reg, tmp);
1707 } else { /* store */
1708 if (reg == 31) {
1709 /* special case: r31 = PC + 4 */
1710 val = (long)s->pc;
1711 tmp = new_tmp();
1712 tcg_gen_movi_i32(tmp, val);
1713 } else if (user) {
1714 tmp = new_tmp();
1715 tmp2 = tcg_const_i32(reg);
1716 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
1717 tcg_temp_free_i32(tmp2);
1718 } else {
1719 tmp = load_reg(s, reg);
1721 gen_st32(tmp, addr, mmu_idx);
1723 j++;
1724 /* no need to add after the last transfer */
1725 if (j != n) {
1726 tcg_gen_addi_i32(addr, addr, 4);
1730 if (UCOP_SET_W) { /* write back */
1731 if (UCOP_SET_U) {
1732 if (UCOP_SET_P) {
1733 /* pre increment */
1734 } else {
1735 /* post increment */
1736 tcg_gen_addi_i32(addr, addr, 4);
1738 } else {
1739 if (UCOP_SET_P) {
1740 /* pre decrement */
1741 if (n != 1) {
1742 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1744 } else {
1745 /* post decrement */
1746 tcg_gen_addi_i32(addr, addr, -(n * 4));
1749 store_reg(s, UCOP_REG_N, addr);
1750 } else {
1751 dead_tmp(addr);
1753 if (loaded_base) {
1754 store_reg(s, UCOP_REG_N, loaded_var);
1756 if (UCOP_SET_B && !user) {
1757 /* Restore ASR from BSR. */
1758 tmp = load_cpu_field(bsr);
1759 gen_set_asr(tmp, 0xffffffff);
1760 dead_tmp(tmp);
1761 s->is_jmp = DISAS_UPDATE;
1765 /* branch (and link) */
1766 static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1768 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1769 unsigned int val;
1770 int32_t offset;
1771 TCGv tmp;
1773 if (UCOP_COND == 0xf) {
1774 ILLEGAL;
1777 if (UCOP_COND != 0xe) {
1778 /* if not always execute, we generate a conditional jump to
1779 next instruction */
1780 s->condlabel = gen_new_label();
1781 gen_test_cc(UCOP_COND ^ 1, s->condlabel);
1782 s->condjmp = 1;
1785 val = (int32_t)s->pc;
1786 if (UCOP_SET_L) {
1787 tmp = new_tmp();
1788 tcg_gen_movi_i32(tmp, val);
1789 store_reg(s, 30, tmp);
1791 offset = (((int32_t)insn << 8) >> 8);
1792 val += (offset << 2); /* unicore is pc+4 */
1793 gen_jmp(s, val);
1796 static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
1798 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1799 unsigned int insn;
1801 insn = cpu_ldl_code(env, s->pc);
1802 s->pc += 4;
1804 /* UniCore instructions class:
1805 * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1806 * AAA : see switch case
1807 * BBBB : opcodes or cond or PUBW
1808 * C : S OR L
1809 * D : 8
1810 * E : 5
1812 switch (insn >> 29) {
1813 case 0x0:
1814 if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1815 do_mult(env, s, insn);
1816 break;
1819 if (UCOP_SET(8)) {
1820 do_misc(env, s, insn);
1821 break;
1823 case 0x1:
1824 if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
1825 do_misc(env, s, insn);
1826 break;
1828 do_datap(env, s, insn);
1829 break;
1831 case 0x2:
1832 if (UCOP_SET(8) && UCOP_SET(5)) {
1833 do_ldst_hwsb(env, s, insn);
1834 break;
1836 if (UCOP_SET(8) || UCOP_SET(5)) {
1837 ILLEGAL;
1839 case 0x3:
1840 do_ldst_ir(env, s, insn);
1841 break;
1843 case 0x4:
1844 if (UCOP_SET(8)) {
1845 ILLEGAL; /* extended instructions */
1847 do_ldst_m(env, s, insn);
1848 break;
1849 case 0x5:
1850 do_branch(env, s, insn);
1851 break;
1852 case 0x6:
1853 /* Coprocessor. */
1854 disas_coproc_insn(env, s, insn);
1855 break;
1856 case 0x7:
1857 if (!UCOP_SET(28)) {
1858 disas_coproc_insn(env, s, insn);
1859 break;
1861 if ((insn & 0xff000000) == 0xff000000) { /* syscall */
1862 gen_set_pc_im(s->pc);
1863 s->is_jmp = DISAS_SYSCALL;
1864 break;
1866 ILLEGAL;
1870 /* generate intermediate code for basic block 'tb'. */
1871 void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
1873 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1874 CPUState *cs = CPU(cpu);
1875 DisasContext dc1, *dc = &dc1;
1876 target_ulong pc_start;
1877 uint32_t next_page_start;
1878 int num_insns;
1879 int max_insns;
1881 /* generate intermediate code */
1882 num_temps = 0;
1884 pc_start = tb->pc;
1886 dc->tb = tb;
1888 dc->is_jmp = DISAS_NEXT;
1889 dc->pc = pc_start;
1890 dc->singlestep_enabled = cs->singlestep_enabled;
1891 dc->condjmp = 0;
1892 cpu_F0s = tcg_temp_new_i32();
1893 cpu_F1s = tcg_temp_new_i32();
1894 cpu_F0d = tcg_temp_new_i64();
1895 cpu_F1d = tcg_temp_new_i64();
1896 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1897 num_insns = 0;
1898 max_insns = tb->cflags & CF_COUNT_MASK;
1899 if (max_insns == 0) {
1900 max_insns = CF_COUNT_MASK;
1902 if (max_insns > TCG_MAX_INSNS) {
1903 max_insns = TCG_MAX_INSNS;
1906 #ifndef CONFIG_USER_ONLY
1907 if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) {
1908 dc->user = 1;
1909 } else {
1910 dc->user = 0;
1912 #endif
1914 gen_tb_start(tb);
1915 do {
1916 tcg_gen_insn_start(dc->pc);
1917 num_insns++;
1919 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1920 gen_set_pc_im(dc->pc);
1921 gen_exception(EXCP_DEBUG);
1922 dc->is_jmp = DISAS_JUMP;
1923 /* The address covered by the breakpoint must be included in
1924 [tb->pc, tb->pc + tb->size) in order to for it to be
1925 properly cleared -- thus we increment the PC here so that
1926 the logic setting tb->size below does the right thing. */
1927 dc->pc += 4;
1928 goto done_generating;
1931 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1932 gen_io_start();
1935 disas_uc32_insn(env, dc);
1937 if (num_temps) {
1938 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
1939 num_temps = 0;
1942 if (dc->condjmp && !dc->is_jmp) {
1943 gen_set_label(dc->condlabel);
1944 dc->condjmp = 0;
1946 /* Translation stops when a conditional branch is encountered.
1947 * Otherwise the subsequent code could get translated several times.
1948 * Also stop translation when a page boundary is reached. This
1949 * ensures prefetch aborts occur at the right place. */
1950 } while (!dc->is_jmp && !tcg_op_buf_full() &&
1951 !cs->singlestep_enabled &&
1952 !singlestep &&
1953 dc->pc < next_page_start &&
1954 num_insns < max_insns);
1956 if (tb->cflags & CF_LAST_IO) {
1957 if (dc->condjmp) {
1958 /* FIXME: This can theoretically happen with self-modifying
1959 code. */
1960 cpu_abort(cs, "IO on conditional branch instruction");
1962 gen_io_end();
1965 /* At this stage dc->condjmp will only be set when the skipped
1966 instruction was a conditional branch or trap, and the PC has
1967 already been written. */
1968 if (unlikely(cs->singlestep_enabled)) {
1969 /* Make sure the pc is updated, and raise a debug exception. */
1970 if (dc->condjmp) {
1971 if (dc->is_jmp == DISAS_SYSCALL) {
1972 gen_exception(UC32_EXCP_PRIV);
1973 } else {
1974 gen_exception(EXCP_DEBUG);
1976 gen_set_label(dc->condlabel);
1978 if (dc->condjmp || !dc->is_jmp) {
1979 gen_set_pc_im(dc->pc);
1980 dc->condjmp = 0;
1982 if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
1983 gen_exception(UC32_EXCP_PRIV);
1984 } else {
1985 gen_exception(EXCP_DEBUG);
1987 } else {
1988 /* While branches must always occur at the end of an IT block,
1989 there are a few other things that can cause us to terminate
1990 the TB in the middel of an IT block:
1991 - Exception generating instructions (bkpt, swi, undefined).
1992 - Page boundaries.
1993 - Hardware watchpoints.
1994 Hardware breakpoints have already been handled and skip this code.
1996 switch (dc->is_jmp) {
1997 case DISAS_NEXT:
1998 gen_goto_tb(dc, 1, dc->pc);
1999 break;
2000 default:
2001 case DISAS_JUMP:
2002 case DISAS_UPDATE:
2003 /* indicate that the hash table must be used to find the next TB */
2004 tcg_gen_exit_tb(0);
2005 break;
2006 case DISAS_TB_JUMP:
2007 /* nothing more to generate */
2008 break;
2009 case DISAS_SYSCALL:
2010 gen_exception(UC32_EXCP_PRIV);
2011 break;
2013 if (dc->condjmp) {
2014 gen_set_label(dc->condlabel);
2015 gen_goto_tb(dc, 1, dc->pc);
2016 dc->condjmp = 0;
2020 done_generating:
2021 gen_tb_end(tb, num_insns);
2023 #ifdef DEBUG_DISAS
2024 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2025 qemu_log("----------------\n");
2026 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2027 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
2028 qemu_log("\n");
2030 #endif
2031 tb->size = dc->pc - pc_start;
2032 tb->icount = num_insns;
2035 static const char *cpu_mode_names[16] = {
2036 "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2037 "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2040 #undef UCF64_DUMP_STATE
2041 #ifdef UCF64_DUMP_STATE
2042 static void cpu_dump_state_ucf64(CPUUniCore32State *env, FILE *f,
2043 fprintf_function cpu_fprintf, int flags)
2045 int i;
2046 union {
2047 uint32_t i;
2048 float s;
2049 } s0, s1;
2050 CPU_DoubleU d;
2051 /* ??? This assumes float64 and double have the same layout.
2052 Oh well, it's only debug dumps. */
2053 union {
2054 float64 f64;
2055 double d;
2056 } d0;
2058 for (i = 0; i < 16; i++) {
2059 d.d = env->ucf64.regs[i];
2060 s0.i = d.l.lower;
2061 s1.i = d.l.upper;
2062 d0.f64 = d.d;
2063 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g)",
2064 i * 2, (int)s0.i, s0.s,
2065 i * 2 + 1, (int)s1.i, s1.s);
2066 cpu_fprintf(f, " d%02d=%" PRIx64 "(%8g)\n",
2067 i, (uint64_t)d0.f64, d0.d);
2069 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
2071 #else
2072 #define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0)
2073 #endif
2075 void uc32_cpu_dump_state(CPUState *cs, FILE *f,
2076 fprintf_function cpu_fprintf, int flags)
2078 UniCore32CPU *cpu = UNICORE32_CPU(cs);
2079 CPUUniCore32State *env = &cpu->env;
2080 int i;
2081 uint32_t psr;
2083 for (i = 0; i < 32; i++) {
2084 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2085 if ((i % 4) == 3) {
2086 cpu_fprintf(f, "\n");
2087 } else {
2088 cpu_fprintf(f, " ");
2091 psr = cpu_asr_read(env);
2092 cpu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
2093 psr,
2094 psr & (1 << 31) ? 'N' : '-',
2095 psr & (1 << 30) ? 'Z' : '-',
2096 psr & (1 << 29) ? 'C' : '-',
2097 psr & (1 << 28) ? 'V' : '-',
2098 cpu_mode_names[psr & 0xf]);
2100 cpu_dump_state_ucf64(env, f, cpu_fprintf, flags);
2103 void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb,
2104 target_ulong *data)
2106 env->regs[31] = data[0];