iotests: test external snapshot with bitmap copying
[qemu/ar7.git] / target / unicore32 / translate.c
blob89b02d1c3ca1152b9f657775b17a093b957c593d
1 /*
2 * UniCore32 translation
4 * Copyright (C) 2010-2012 Guan Xuetao
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation, or (at your option) any
9 * later version. See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "cpu.h"
14 #include "disas/disas.h"
15 #include "exec/exec-all.h"
16 #include "tcg-op.h"
17 #include "qemu/log.h"
18 #include "exec/cpu_ldst.h"
19 #include "exec/translator.h"
20 #include "qemu/qemu-print.h"
22 #include "exec/helper-proto.h"
23 #include "exec/helper-gen.h"
25 #include "trace-tcg.h"
26 #include "exec/log.h"
29 /* internal defines */
30 typedef struct DisasContext {
31 target_ulong pc;
32 int is_jmp;
33 /* Nonzero if this instruction has been conditionally skipped. */
34 int condjmp;
35 /* The label that will be jumped to when the instruction is skipped. */
36 TCGLabel *condlabel;
37 struct TranslationBlock *tb;
38 int singlestep_enabled;
39 #ifndef CONFIG_USER_ONLY
40 int user;
41 #endif
42 } DisasContext;
44 #ifndef CONFIG_USER_ONLY
45 #define IS_USER(s) (s->user)
46 #else
47 #define IS_USER(s) 1
48 #endif
50 /* is_jmp field values */
51 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
54 /* These instructions trap after executing, so defer them until after the
55 conditional executions state has been updated. */
56 #define DISAS_SYSCALL DISAS_TARGET_3
58 static TCGv_i32 cpu_R[32];
60 /* FIXME: These should be removed. */
61 static TCGv cpu_F0s, cpu_F1s;
62 static TCGv_i64 cpu_F0d, cpu_F1d;
64 #include "exec/gen-icount.h"
66 static const char *regnames[] = {
67 "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
68 "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
69 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
70 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
72 /* initialize TCG globals. */
73 void uc32_translate_init(void)
75 int i;
77 for (i = 0; i < 32; i++) {
78 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
79 offsetof(CPUUniCore32State, regs[i]), regnames[i]);
83 static int num_temps;
85 /* Allocate a temporary variable. */
86 static TCGv_i32 new_tmp(void)
88 num_temps++;
89 return tcg_temp_new_i32();
92 /* Release a temporary variable. */
93 static void dead_tmp(TCGv tmp)
95 tcg_temp_free(tmp);
96 num_temps--;
99 static inline TCGv load_cpu_offset(int offset)
101 TCGv tmp = new_tmp();
102 tcg_gen_ld_i32(tmp, cpu_env, offset);
103 return tmp;
106 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
108 static inline void store_cpu_offset(TCGv var, int offset)
110 tcg_gen_st_i32(var, cpu_env, offset);
111 dead_tmp(var);
114 #define store_cpu_field(var, name) \
115 store_cpu_offset(var, offsetof(CPUUniCore32State, name))
117 /* Set a variable to the value of a CPU register. */
118 static void load_reg_var(DisasContext *s, TCGv var, int reg)
120 if (reg == 31) {
121 uint32_t addr;
122 /* normaly, since we updated PC */
123 addr = (long)s->pc;
124 tcg_gen_movi_i32(var, addr);
125 } else {
126 tcg_gen_mov_i32(var, cpu_R[reg]);
130 /* Create a new temporary and set it to the value of a CPU register. */
131 static inline TCGv load_reg(DisasContext *s, int reg)
133 TCGv tmp = new_tmp();
134 load_reg_var(s, tmp, reg);
135 return tmp;
138 /* Set a CPU register. The source must be a temporary and will be
139 marked as dead. */
140 static void store_reg(DisasContext *s, int reg, TCGv var)
142 if (reg == 31) {
143 tcg_gen_andi_i32(var, var, ~3);
144 s->is_jmp = DISAS_JUMP;
146 tcg_gen_mov_i32(cpu_R[reg], var);
147 dead_tmp(var);
150 /* Value extensions. */
151 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
152 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
153 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
154 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
156 #define UCOP_REG_M (((insn) >> 0) & 0x1f)
157 #define UCOP_REG_N (((insn) >> 19) & 0x1f)
158 #define UCOP_REG_D (((insn) >> 14) & 0x1f)
159 #define UCOP_REG_S (((insn) >> 9) & 0x1f)
160 #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
161 #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
162 #define UCOP_SH_OP (((insn) >> 6) & 0x03)
163 #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
164 #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
165 #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
166 #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
167 #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
168 #define UCOP_COND (((insn) >> 25) & 0x0f)
169 #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
170 #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
171 #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
172 #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
173 #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
175 #define UCOP_SET(i) ((insn) & (1 << (i)))
176 #define UCOP_SET_P UCOP_SET(28)
177 #define UCOP_SET_U UCOP_SET(27)
178 #define UCOP_SET_B UCOP_SET(26)
179 #define UCOP_SET_W UCOP_SET(25)
180 #define UCOP_SET_L UCOP_SET(24)
181 #define UCOP_SET_S UCOP_SET(24)
183 #define ILLEGAL cpu_abort(CPU(cpu), \
184 "Illegal UniCore32 instruction %x at line %d!", \
185 insn, __LINE__)
187 #ifndef CONFIG_USER_ONLY
188 static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s,
189 uint32_t insn)
191 UniCore32CPU *cpu = uc32_env_get_cpu(env);
192 TCGv tmp, tmp2, tmp3;
193 if ((insn & 0xfe000000) == 0xe0000000) {
194 tmp2 = new_tmp();
195 tmp3 = new_tmp();
196 tcg_gen_movi_i32(tmp2, UCOP_REG_N);
197 tcg_gen_movi_i32(tmp3, UCOP_IMM10);
198 if (UCOP_SET_L) {
199 tmp = new_tmp();
200 gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3);
201 store_reg(s, UCOP_REG_D, tmp);
202 } else {
203 tmp = load_reg(s, UCOP_REG_D);
204 gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3);
205 dead_tmp(tmp);
207 dead_tmp(tmp2);
208 dead_tmp(tmp3);
209 return;
211 ILLEGAL;
214 static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s,
215 uint32_t insn)
217 UniCore32CPU *cpu = uc32_env_get_cpu(env);
218 TCGv tmp;
220 if ((insn & 0xff003fff) == 0xe1000400) {
222 * movc rd, pp.nn, #imm9
223 * rd: UCOP_REG_D
224 * nn: UCOP_REG_N (must be 0)
225 * imm9: 0
227 if (UCOP_REG_N == 0) {
228 tmp = new_tmp();
229 tcg_gen_movi_i32(tmp, 0);
230 store_reg(s, UCOP_REG_D, tmp);
231 return;
232 } else {
233 ILLEGAL;
236 if ((insn & 0xff003fff) == 0xe0000401) {
238 * movc pp.nn, rn, #imm9
239 * rn: UCOP_REG_D
240 * nn: UCOP_REG_N (must be 1)
241 * imm9: 1
243 if (UCOP_REG_N == 1) {
244 tmp = load_reg(s, UCOP_REG_D);
245 gen_helper_cp1_putc(tmp);
246 dead_tmp(tmp);
247 return;
248 } else {
249 ILLEGAL;
252 ILLEGAL;
254 #endif
256 static inline void gen_set_asr(TCGv var, uint32_t mask)
258 TCGv tmp_mask = tcg_const_i32(mask);
259 gen_helper_asr_write(cpu_env, var, tmp_mask);
260 tcg_temp_free_i32(tmp_mask);
262 /* Set NZCV flags from the high 4 bits of var. */
263 #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
265 static void gen_exception(int excp)
267 TCGv tmp = new_tmp();
268 tcg_gen_movi_i32(tmp, excp);
269 gen_helper_exception(cpu_env, tmp);
270 dead_tmp(tmp);
273 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
275 /* Set CF to the top bit of var. */
276 static void gen_set_CF_bit31(TCGv var)
278 TCGv tmp = new_tmp();
279 tcg_gen_shri_i32(tmp, var, 31);
280 gen_set_CF(tmp);
281 dead_tmp(tmp);
284 /* Set N and Z flags from var. */
285 static inline void gen_logic_CC(TCGv var)
287 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF));
288 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF));
291 /* dest = T0 + T1 + CF. */
292 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
294 TCGv tmp;
295 tcg_gen_add_i32(dest, t0, t1);
296 tmp = load_cpu_field(CF);
297 tcg_gen_add_i32(dest, dest, tmp);
298 dead_tmp(tmp);
301 /* dest = T0 - T1 + CF - 1. */
302 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
304 TCGv tmp;
305 tcg_gen_sub_i32(dest, t0, t1);
306 tmp = load_cpu_field(CF);
307 tcg_gen_add_i32(dest, dest, tmp);
308 tcg_gen_subi_i32(dest, dest, 1);
309 dead_tmp(tmp);
312 static void shifter_out_im(TCGv var, int shift)
314 TCGv tmp = new_tmp();
315 if (shift == 0) {
316 tcg_gen_andi_i32(tmp, var, 1);
317 } else {
318 tcg_gen_shri_i32(tmp, var, shift);
319 if (shift != 31) {
320 tcg_gen_andi_i32(tmp, tmp, 1);
323 gen_set_CF(tmp);
324 dead_tmp(tmp);
327 /* Shift by immediate. Includes special handling for shift == 0. */
328 static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
329 int flags)
331 switch (shiftop) {
332 case 0: /* LSL */
333 if (shift != 0) {
334 if (flags) {
335 shifter_out_im(var, 32 - shift);
337 tcg_gen_shli_i32(var, var, shift);
339 break;
340 case 1: /* LSR */
341 if (shift == 0) {
342 if (flags) {
343 tcg_gen_shri_i32(var, var, 31);
344 gen_set_CF(var);
346 tcg_gen_movi_i32(var, 0);
347 } else {
348 if (flags) {
349 shifter_out_im(var, shift - 1);
351 tcg_gen_shri_i32(var, var, shift);
353 break;
354 case 2: /* ASR */
355 if (shift == 0) {
356 shift = 32;
358 if (flags) {
359 shifter_out_im(var, shift - 1);
361 if (shift == 32) {
362 shift = 31;
364 tcg_gen_sari_i32(var, var, shift);
365 break;
366 case 3: /* ROR/RRX */
367 if (shift != 0) {
368 if (flags) {
369 shifter_out_im(var, shift - 1);
371 tcg_gen_rotri_i32(var, var, shift); break;
372 } else {
373 TCGv tmp = load_cpu_field(CF);
374 if (flags) {
375 shifter_out_im(var, 0);
377 tcg_gen_shri_i32(var, var, 1);
378 tcg_gen_shli_i32(tmp, tmp, 31);
379 tcg_gen_or_i32(var, var, tmp);
380 dead_tmp(tmp);
385 static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
386 TCGv shift, int flags)
388 if (flags) {
389 switch (shiftop) {
390 case 0:
391 gen_helper_shl_cc(var, cpu_env, var, shift);
392 break;
393 case 1:
394 gen_helper_shr_cc(var, cpu_env, var, shift);
395 break;
396 case 2:
397 gen_helper_sar_cc(var, cpu_env, var, shift);
398 break;
399 case 3:
400 gen_helper_ror_cc(var, cpu_env, var, shift);
401 break;
403 } else {
404 switch (shiftop) {
405 case 0:
406 gen_helper_shl(var, var, shift);
407 break;
408 case 1:
409 gen_helper_shr(var, var, shift);
410 break;
411 case 2:
412 gen_helper_sar(var, var, shift);
413 break;
414 case 3:
415 tcg_gen_andi_i32(shift, shift, 0x1f);
416 tcg_gen_rotr_i32(var, var, shift);
417 break;
420 dead_tmp(shift);
423 static void gen_test_cc(int cc, TCGLabel *label)
425 TCGv tmp;
426 TCGv tmp2;
427 TCGLabel *inv;
429 switch (cc) {
430 case 0: /* eq: Z */
431 tmp = load_cpu_field(ZF);
432 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
433 break;
434 case 1: /* ne: !Z */
435 tmp = load_cpu_field(ZF);
436 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
437 break;
438 case 2: /* cs: C */
439 tmp = load_cpu_field(CF);
440 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
441 break;
442 case 3: /* cc: !C */
443 tmp = load_cpu_field(CF);
444 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
445 break;
446 case 4: /* mi: N */
447 tmp = load_cpu_field(NF);
448 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
449 break;
450 case 5: /* pl: !N */
451 tmp = load_cpu_field(NF);
452 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
453 break;
454 case 6: /* vs: V */
455 tmp = load_cpu_field(VF);
456 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
457 break;
458 case 7: /* vc: !V */
459 tmp = load_cpu_field(VF);
460 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
461 break;
462 case 8: /* hi: C && !Z */
463 inv = gen_new_label();
464 tmp = load_cpu_field(CF);
465 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
466 dead_tmp(tmp);
467 tmp = load_cpu_field(ZF);
468 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
469 gen_set_label(inv);
470 break;
471 case 9: /* ls: !C || Z */
472 tmp = load_cpu_field(CF);
473 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
474 dead_tmp(tmp);
475 tmp = load_cpu_field(ZF);
476 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
477 break;
478 case 10: /* ge: N == V -> N ^ V == 0 */
479 tmp = load_cpu_field(VF);
480 tmp2 = load_cpu_field(NF);
481 tcg_gen_xor_i32(tmp, tmp, tmp2);
482 dead_tmp(tmp2);
483 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
484 break;
485 case 11: /* lt: N != V -> N ^ V != 0 */
486 tmp = load_cpu_field(VF);
487 tmp2 = load_cpu_field(NF);
488 tcg_gen_xor_i32(tmp, tmp, tmp2);
489 dead_tmp(tmp2);
490 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
491 break;
492 case 12: /* gt: !Z && N == V */
493 inv = gen_new_label();
494 tmp = load_cpu_field(ZF);
495 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
496 dead_tmp(tmp);
497 tmp = load_cpu_field(VF);
498 tmp2 = load_cpu_field(NF);
499 tcg_gen_xor_i32(tmp, tmp, tmp2);
500 dead_tmp(tmp2);
501 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
502 gen_set_label(inv);
503 break;
504 case 13: /* le: Z || N != V */
505 tmp = load_cpu_field(ZF);
506 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
507 dead_tmp(tmp);
508 tmp = load_cpu_field(VF);
509 tmp2 = load_cpu_field(NF);
510 tcg_gen_xor_i32(tmp, tmp, tmp2);
511 dead_tmp(tmp2);
512 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
513 break;
514 default:
515 fprintf(stderr, "Bad condition code 0x%x\n", cc);
516 abort();
518 dead_tmp(tmp);
521 static const uint8_t table_logic_cc[16] = {
522 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
523 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
524 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
525 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
528 /* Set PC state from an immediate address. */
529 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
531 s->is_jmp = DISAS_UPDATE;
532 tcg_gen_movi_i32(cpu_R[31], addr & ~3);
535 /* Set PC state from var. var is marked as dead. */
536 static inline void gen_bx(DisasContext *s, TCGv var)
538 s->is_jmp = DISAS_UPDATE;
539 tcg_gen_andi_i32(cpu_R[31], var, ~3);
540 dead_tmp(var);
543 static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
545 store_reg(s, reg, var);
548 static inline TCGv gen_ld8s(TCGv addr, int index)
550 TCGv tmp = new_tmp();
551 tcg_gen_qemu_ld8s(tmp, addr, index);
552 return tmp;
555 static inline TCGv gen_ld8u(TCGv addr, int index)
557 TCGv tmp = new_tmp();
558 tcg_gen_qemu_ld8u(tmp, addr, index);
559 return tmp;
562 static inline TCGv gen_ld16s(TCGv addr, int index)
564 TCGv tmp = new_tmp();
565 tcg_gen_qemu_ld16s(tmp, addr, index);
566 return tmp;
569 static inline TCGv gen_ld16u(TCGv addr, int index)
571 TCGv tmp = new_tmp();
572 tcg_gen_qemu_ld16u(tmp, addr, index);
573 return tmp;
576 static inline TCGv gen_ld32(TCGv addr, int index)
578 TCGv tmp = new_tmp();
579 tcg_gen_qemu_ld32u(tmp, addr, index);
580 return tmp;
583 static inline void gen_st8(TCGv val, TCGv addr, int index)
585 tcg_gen_qemu_st8(val, addr, index);
586 dead_tmp(val);
589 static inline void gen_st16(TCGv val, TCGv addr, int index)
591 tcg_gen_qemu_st16(val, addr, index);
592 dead_tmp(val);
595 static inline void gen_st32(TCGv val, TCGv addr, int index)
597 tcg_gen_qemu_st32(val, addr, index);
598 dead_tmp(val);
601 static inline void gen_set_pc_im(uint32_t val)
603 tcg_gen_movi_i32(cpu_R[31], val);
606 /* Force a TB lookup after an instruction that changes the CPU state. */
607 static inline void gen_lookup_tb(DisasContext *s)
609 tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
610 s->is_jmp = DISAS_UPDATE;
613 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
614 TCGv var)
616 int val;
617 TCGv offset;
619 if (UCOP_SET(29)) {
620 /* immediate */
621 val = UCOP_IMM14;
622 if (!UCOP_SET_U) {
623 val = -val;
625 if (val != 0) {
626 tcg_gen_addi_i32(var, var, val);
628 } else {
629 /* shift/register */
630 offset = load_reg(s, UCOP_REG_M);
631 gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
632 if (!UCOP_SET_U) {
633 tcg_gen_sub_i32(var, var, offset);
634 } else {
635 tcg_gen_add_i32(var, var, offset);
637 dead_tmp(offset);
641 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
642 TCGv var)
644 int val;
645 TCGv offset;
647 if (UCOP_SET(26)) {
648 /* immediate */
649 val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
650 if (!UCOP_SET_U) {
651 val = -val;
653 if (val != 0) {
654 tcg_gen_addi_i32(var, var, val);
656 } else {
657 /* register */
658 offset = load_reg(s, UCOP_REG_M);
659 if (!UCOP_SET_U) {
660 tcg_gen_sub_i32(var, var, offset);
661 } else {
662 tcg_gen_add_i32(var, var, offset);
664 dead_tmp(offset);
668 static inline long ucf64_reg_offset(int reg)
670 if (reg & 1) {
671 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
672 + offsetof(CPU_DoubleU, l.upper);
673 } else {
674 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
675 + offsetof(CPU_DoubleU, l.lower);
679 #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
680 #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
682 /* UniCore-F64 single load/store I_offset */
683 static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
685 UniCore32CPU *cpu = uc32_env_get_cpu(env);
686 int offset;
687 TCGv tmp;
688 TCGv addr;
690 addr = load_reg(s, UCOP_REG_N);
691 if (!UCOP_SET_P && !UCOP_SET_W) {
692 ILLEGAL;
695 if (UCOP_SET_P) {
696 offset = UCOP_IMM10 << 2;
697 if (!UCOP_SET_U) {
698 offset = -offset;
700 if (offset != 0) {
701 tcg_gen_addi_i32(addr, addr, offset);
705 if (UCOP_SET_L) { /* load */
706 tmp = gen_ld32(addr, IS_USER(s));
707 ucf64_gen_st32(tmp, UCOP_REG_D);
708 } else { /* store */
709 tmp = ucf64_gen_ld32(UCOP_REG_D);
710 gen_st32(tmp, addr, IS_USER(s));
713 if (!UCOP_SET_P) {
714 offset = UCOP_IMM10 << 2;
715 if (!UCOP_SET_U) {
716 offset = -offset;
718 if (offset != 0) {
719 tcg_gen_addi_i32(addr, addr, offset);
722 if (UCOP_SET_W) {
723 store_reg(s, UCOP_REG_N, addr);
724 } else {
725 dead_tmp(addr);
729 /* UniCore-F64 load/store multiple words */
730 static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
732 UniCore32CPU *cpu = uc32_env_get_cpu(env);
733 unsigned int i;
734 int j, n, freg;
735 TCGv tmp;
736 TCGv addr;
738 if (UCOP_REG_D != 0) {
739 ILLEGAL;
741 if (UCOP_REG_N == 31) {
742 ILLEGAL;
744 if ((insn << 24) == 0) {
745 ILLEGAL;
748 addr = load_reg(s, UCOP_REG_N);
750 n = 0;
751 for (i = 0; i < 8; i++) {
752 if (UCOP_SET(i)) {
753 n++;
757 if (UCOP_SET_U) {
758 if (UCOP_SET_P) { /* pre increment */
759 tcg_gen_addi_i32(addr, addr, 4);
760 } /* unnecessary to do anything when post increment */
761 } else {
762 if (UCOP_SET_P) { /* pre decrement */
763 tcg_gen_addi_i32(addr, addr, -(n * 4));
764 } else { /* post decrement */
765 if (n != 1) {
766 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
771 freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
773 for (i = 0, j = 0; i < 8; i++, freg++) {
774 if (!UCOP_SET(i)) {
775 continue;
778 if (UCOP_SET_L) { /* load */
779 tmp = gen_ld32(addr, IS_USER(s));
780 ucf64_gen_st32(tmp, freg);
781 } else { /* store */
782 tmp = ucf64_gen_ld32(freg);
783 gen_st32(tmp, addr, IS_USER(s));
786 j++;
787 /* unnecessary to add after the last transfer */
788 if (j != n) {
789 tcg_gen_addi_i32(addr, addr, 4);
793 if (UCOP_SET_W) { /* write back */
794 if (UCOP_SET_U) {
795 if (!UCOP_SET_P) { /* post increment */
796 tcg_gen_addi_i32(addr, addr, 4);
797 } /* unnecessary to do anything when pre increment */
798 } else {
799 if (UCOP_SET_P) {
800 /* pre decrement */
801 if (n != 1) {
802 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
804 } else {
805 /* post decrement */
806 tcg_gen_addi_i32(addr, addr, -(n * 4));
809 store_reg(s, UCOP_REG_N, addr);
810 } else {
811 dead_tmp(addr);
815 /* UniCore-F64 mrc/mcr */
816 static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
818 UniCore32CPU *cpu = uc32_env_get_cpu(env);
819 TCGv tmp;
821 if ((insn & 0xfe0003ff) == 0xe2000000) {
822 /* control register */
823 if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
824 ILLEGAL;
826 if (UCOP_SET(24)) {
827 /* CFF */
828 tmp = new_tmp();
829 gen_helper_ucf64_get_fpscr(tmp, cpu_env);
830 store_reg(s, UCOP_REG_D, tmp);
831 } else {
832 /* CTF */
833 tmp = load_reg(s, UCOP_REG_D);
834 gen_helper_ucf64_set_fpscr(cpu_env, tmp);
835 dead_tmp(tmp);
836 gen_lookup_tb(s);
838 return;
840 if ((insn & 0xfe0003ff) == 0xe0000000) {
841 /* general register */
842 if (UCOP_REG_D == 31) {
843 ILLEGAL;
845 if (UCOP_SET(24)) { /* MFF */
846 tmp = ucf64_gen_ld32(UCOP_REG_N);
847 store_reg(s, UCOP_REG_D, tmp);
848 } else { /* MTF */
849 tmp = load_reg(s, UCOP_REG_D);
850 ucf64_gen_st32(tmp, UCOP_REG_N);
852 return;
854 if ((insn & 0xfb000000) == 0xe9000000) {
855 /* MFFC */
856 if (UCOP_REG_D != 31) {
857 ILLEGAL;
859 if (UCOP_UCF64_COND & 0x8) {
860 ILLEGAL;
863 tmp = new_tmp();
864 tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
865 if (UCOP_SET(26)) {
866 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
867 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
868 gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
869 } else {
870 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
871 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
872 gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
874 dead_tmp(tmp);
875 return;
877 ILLEGAL;
880 /* UniCore-F64 convert instructions */
881 static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
883 UniCore32CPU *cpu = uc32_env_get_cpu(env);
885 if (UCOP_UCF64_FMT == 3) {
886 ILLEGAL;
888 if (UCOP_REG_N != 0) {
889 ILLEGAL;
891 switch (UCOP_UCF64_FUNC) {
892 case 0: /* cvt.s */
893 switch (UCOP_UCF64_FMT) {
894 case 1 /* d */:
895 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
896 gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
897 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
898 break;
899 case 2 /* w */:
900 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
901 gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
902 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
903 break;
904 default /* s */:
905 ILLEGAL;
906 break;
908 break;
909 case 1: /* cvt.d */
910 switch (UCOP_UCF64_FMT) {
911 case 0 /* s */:
912 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
913 gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
914 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
915 break;
916 case 2 /* w */:
917 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
918 gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
919 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
920 break;
921 default /* d */:
922 ILLEGAL;
923 break;
925 break;
926 case 4: /* cvt.w */
927 switch (UCOP_UCF64_FMT) {
928 case 0 /* s */:
929 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
930 gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
931 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
932 break;
933 case 1 /* d */:
934 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
935 gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
936 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
937 break;
938 default /* w */:
939 ILLEGAL;
940 break;
942 break;
943 default:
944 ILLEGAL;
948 /* UniCore-F64 compare instructions */
949 static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
951 UniCore32CPU *cpu = uc32_env_get_cpu(env);
953 if (UCOP_SET(25)) {
954 ILLEGAL;
956 if (UCOP_REG_D != 0) {
957 ILLEGAL;
960 ILLEGAL; /* TODO */
961 if (UCOP_SET(24)) {
962 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
963 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
964 /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
965 } else {
966 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
967 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
968 /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
972 #define gen_helper_ucf64_movs(x, y) do { } while (0)
973 #define gen_helper_ucf64_movd(x, y) do { } while (0)
975 #define UCF64_OP1(name) do { \
976 if (UCOP_REG_N != 0) { \
977 ILLEGAL; \
979 switch (UCOP_UCF64_FMT) { \
980 case 0 /* s */: \
981 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
982 ucf64_reg_offset(UCOP_REG_M)); \
983 gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
984 tcg_gen_st_i32(cpu_F0s, cpu_env, \
985 ucf64_reg_offset(UCOP_REG_D)); \
986 break; \
987 case 1 /* d */: \
988 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
989 ucf64_reg_offset(UCOP_REG_M)); \
990 gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
991 tcg_gen_st_i64(cpu_F0d, cpu_env, \
992 ucf64_reg_offset(UCOP_REG_D)); \
993 break; \
994 case 2 /* w */: \
995 ILLEGAL; \
996 break; \
998 } while (0)
1000 #define UCF64_OP2(name) do { \
1001 switch (UCOP_UCF64_FMT) { \
1002 case 0 /* s */: \
1003 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
1004 ucf64_reg_offset(UCOP_REG_N)); \
1005 tcg_gen_ld_i32(cpu_F1s, cpu_env, \
1006 ucf64_reg_offset(UCOP_REG_M)); \
1007 gen_helper_ucf64_##name##s(cpu_F0s, \
1008 cpu_F0s, cpu_F1s, cpu_env); \
1009 tcg_gen_st_i32(cpu_F0s, cpu_env, \
1010 ucf64_reg_offset(UCOP_REG_D)); \
1011 break; \
1012 case 1 /* d */: \
1013 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
1014 ucf64_reg_offset(UCOP_REG_N)); \
1015 tcg_gen_ld_i64(cpu_F1d, cpu_env, \
1016 ucf64_reg_offset(UCOP_REG_M)); \
1017 gen_helper_ucf64_##name##d(cpu_F0d, \
1018 cpu_F0d, cpu_F1d, cpu_env); \
1019 tcg_gen_st_i64(cpu_F0d, cpu_env, \
1020 ucf64_reg_offset(UCOP_REG_D)); \
1021 break; \
1022 case 2 /* w */: \
1023 ILLEGAL; \
1024 break; \
1026 } while (0)
1028 /* UniCore-F64 data processing */
1029 static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1031 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1033 if (UCOP_UCF64_FMT == 3) {
1034 ILLEGAL;
1036 switch (UCOP_UCF64_FUNC) {
1037 case 0: /* add */
1038 UCF64_OP2(add);
1039 break;
1040 case 1: /* sub */
1041 UCF64_OP2(sub);
1042 break;
1043 case 2: /* mul */
1044 UCF64_OP2(mul);
1045 break;
1046 case 4: /* div */
1047 UCF64_OP2(div);
1048 break;
1049 case 5: /* abs */
1050 UCF64_OP1(abs);
1051 break;
1052 case 6: /* mov */
1053 UCF64_OP1(mov);
1054 break;
1055 case 7: /* neg */
1056 UCF64_OP1(neg);
1057 break;
1058 default:
1059 ILLEGAL;
1063 /* Disassemble an F64 instruction */
1064 static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1066 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1068 if (!UCOP_SET(29)) {
1069 if (UCOP_SET(26)) {
1070 do_ucf64_ldst_m(env, s, insn);
1071 } else {
1072 do_ucf64_ldst_i(env, s, insn);
1074 } else {
1075 if (UCOP_SET(5)) {
1076 switch ((insn >> 26) & 0x3) {
1077 case 0:
1078 do_ucf64_datap(env, s, insn);
1079 break;
1080 case 1:
1081 ILLEGAL;
1082 break;
1083 case 2:
1084 do_ucf64_fcvt(env, s, insn);
1085 break;
1086 case 3:
1087 do_ucf64_fcmp(env, s, insn);
1088 break;
1090 } else {
1091 do_ucf64_trans(env, s, insn);
1096 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1098 #ifndef CONFIG_USER_ONLY
1099 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1100 #else
1101 return true;
1102 #endif
1105 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1107 if (use_goto_tb(s, dest)) {
1108 tcg_gen_goto_tb(n);
1109 gen_set_pc_im(dest);
1110 tcg_gen_exit_tb(s->tb, n);
1111 } else {
1112 gen_set_pc_im(dest);
1113 tcg_gen_exit_tb(NULL, 0);
1117 static inline void gen_jmp(DisasContext *s, uint32_t dest)
1119 if (unlikely(s->singlestep_enabled)) {
1120 /* An indirect jump so that we still trigger the debug exception. */
1121 gen_bx_im(s, dest);
1122 } else {
1123 gen_goto_tb(s, 0, dest);
1124 s->is_jmp = DISAS_TB_JUMP;
1128 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1129 static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
1131 TCGv tmp;
1132 if (bsr) {
1133 /* ??? This is also undefined in system mode. */
1134 if (IS_USER(s)) {
1135 return 1;
1138 tmp = load_cpu_field(bsr);
1139 tcg_gen_andi_i32(tmp, tmp, ~mask);
1140 tcg_gen_andi_i32(t0, t0, mask);
1141 tcg_gen_or_i32(tmp, tmp, t0);
1142 store_cpu_field(tmp, bsr);
1143 } else {
1144 gen_set_asr(t0, mask);
1146 dead_tmp(t0);
1147 gen_lookup_tb(s);
1148 return 0;
1151 /* Generate an old-style exception return. Marks pc as dead. */
1152 static void gen_exception_return(DisasContext *s, TCGv pc)
1154 TCGv tmp;
1155 store_reg(s, 31, pc);
1156 tmp = load_cpu_field(bsr);
1157 gen_set_asr(tmp, 0xffffffff);
1158 dead_tmp(tmp);
1159 s->is_jmp = DISAS_UPDATE;
1162 static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s,
1163 uint32_t insn)
1165 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1167 switch (UCOP_CPNUM) {
1168 #ifndef CONFIG_USER_ONLY
1169 case 0:
1170 disas_cp0_insn(env, s, insn);
1171 break;
1172 case 1:
1173 disas_ocd_insn(env, s, insn);
1174 break;
1175 #endif
1176 case 2:
1177 disas_ucf64_insn(env, s, insn);
1178 break;
1179 default:
1180 /* Unknown coprocessor. */
1181 cpu_abort(CPU(cpu), "Unknown coprocessor!");
1185 /* data processing instructions */
1186 static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1188 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1189 TCGv tmp;
1190 TCGv tmp2;
1191 int logic_cc;
1193 if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
1194 if (UCOP_SET(23)) { /* CMOV instructions */
1195 if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
1196 ILLEGAL;
1198 /* if not always execute, we generate a conditional jump to
1199 next instruction */
1200 s->condlabel = gen_new_label();
1201 gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
1202 s->condjmp = 1;
1206 logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
1208 if (UCOP_SET(29)) {
1209 unsigned int val;
1210 /* immediate operand */
1211 val = UCOP_IMM_9;
1212 if (UCOP_SH_IM) {
1213 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1215 tmp2 = new_tmp();
1216 tcg_gen_movi_i32(tmp2, val);
1217 if (logic_cc && UCOP_SH_IM) {
1218 gen_set_CF_bit31(tmp2);
1220 } else {
1221 /* register */
1222 tmp2 = load_reg(s, UCOP_REG_M);
1223 if (UCOP_SET(5)) {
1224 tmp = load_reg(s, UCOP_REG_S);
1225 gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
1226 } else {
1227 gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
1231 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1232 tmp = load_reg(s, UCOP_REG_N);
1233 } else {
1234 tmp = NULL;
1237 switch (UCOP_OPCODES) {
1238 case 0x00:
1239 tcg_gen_and_i32(tmp, tmp, tmp2);
1240 if (logic_cc) {
1241 gen_logic_CC(tmp);
1243 store_reg_bx(s, UCOP_REG_D, tmp);
1244 break;
1245 case 0x01:
1246 tcg_gen_xor_i32(tmp, tmp, tmp2);
1247 if (logic_cc) {
1248 gen_logic_CC(tmp);
1250 store_reg_bx(s, UCOP_REG_D, tmp);
1251 break;
1252 case 0x02:
1253 if (UCOP_SET_S && UCOP_REG_D == 31) {
1254 /* SUBS r31, ... is used for exception return. */
1255 if (IS_USER(s)) {
1256 ILLEGAL;
1258 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1259 gen_exception_return(s, tmp);
1260 } else {
1261 if (UCOP_SET_S) {
1262 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1263 } else {
1264 tcg_gen_sub_i32(tmp, tmp, tmp2);
1266 store_reg_bx(s, UCOP_REG_D, tmp);
1268 break;
1269 case 0x03:
1270 if (UCOP_SET_S) {
1271 gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
1272 } else {
1273 tcg_gen_sub_i32(tmp, tmp2, tmp);
1275 store_reg_bx(s, UCOP_REG_D, tmp);
1276 break;
1277 case 0x04:
1278 if (UCOP_SET_S) {
1279 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1280 } else {
1281 tcg_gen_add_i32(tmp, tmp, tmp2);
1283 store_reg_bx(s, UCOP_REG_D, tmp);
1284 break;
1285 case 0x05:
1286 if (UCOP_SET_S) {
1287 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
1288 } else {
1289 gen_add_carry(tmp, tmp, tmp2);
1291 store_reg_bx(s, UCOP_REG_D, tmp);
1292 break;
1293 case 0x06:
1294 if (UCOP_SET_S) {
1295 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
1296 } else {
1297 gen_sub_carry(tmp, tmp, tmp2);
1299 store_reg_bx(s, UCOP_REG_D, tmp);
1300 break;
1301 case 0x07:
1302 if (UCOP_SET_S) {
1303 gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
1304 } else {
1305 gen_sub_carry(tmp, tmp2, tmp);
1307 store_reg_bx(s, UCOP_REG_D, tmp);
1308 break;
1309 case 0x08:
1310 if (UCOP_SET_S) {
1311 tcg_gen_and_i32(tmp, tmp, tmp2);
1312 gen_logic_CC(tmp);
1314 dead_tmp(tmp);
1315 break;
1316 case 0x09:
1317 if (UCOP_SET_S) {
1318 tcg_gen_xor_i32(tmp, tmp, tmp2);
1319 gen_logic_CC(tmp);
1321 dead_tmp(tmp);
1322 break;
1323 case 0x0a:
1324 if (UCOP_SET_S) {
1325 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1327 dead_tmp(tmp);
1328 break;
1329 case 0x0b:
1330 if (UCOP_SET_S) {
1331 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1333 dead_tmp(tmp);
1334 break;
1335 case 0x0c:
1336 tcg_gen_or_i32(tmp, tmp, tmp2);
1337 if (logic_cc) {
1338 gen_logic_CC(tmp);
1340 store_reg_bx(s, UCOP_REG_D, tmp);
1341 break;
1342 case 0x0d:
1343 if (logic_cc && UCOP_REG_D == 31) {
1344 /* MOVS r31, ... is used for exception return. */
1345 if (IS_USER(s)) {
1346 ILLEGAL;
1348 gen_exception_return(s, tmp2);
1349 } else {
1350 if (logic_cc) {
1351 gen_logic_CC(tmp2);
1353 store_reg_bx(s, UCOP_REG_D, tmp2);
1355 break;
1356 case 0x0e:
1357 tcg_gen_andc_i32(tmp, tmp, tmp2);
1358 if (logic_cc) {
1359 gen_logic_CC(tmp);
1361 store_reg_bx(s, UCOP_REG_D, tmp);
1362 break;
1363 default:
1364 case 0x0f:
1365 tcg_gen_not_i32(tmp2, tmp2);
1366 if (logic_cc) {
1367 gen_logic_CC(tmp2);
1369 store_reg_bx(s, UCOP_REG_D, tmp2);
1370 break;
1372 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1373 dead_tmp(tmp2);
1377 /* multiply */
1378 static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1380 TCGv tmp, tmp2, tmp3, tmp4;
1382 if (UCOP_SET(27)) {
1383 /* 64 bit mul */
1384 tmp = load_reg(s, UCOP_REG_M);
1385 tmp2 = load_reg(s, UCOP_REG_N);
1386 if (UCOP_SET(26)) {
1387 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
1388 } else {
1389 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
1391 if (UCOP_SET(25)) { /* mult accumulate */
1392 tmp3 = load_reg(s, UCOP_REG_LO);
1393 tmp4 = load_reg(s, UCOP_REG_HI);
1394 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4);
1395 dead_tmp(tmp3);
1396 dead_tmp(tmp4);
1398 store_reg(s, UCOP_REG_LO, tmp);
1399 store_reg(s, UCOP_REG_HI, tmp2);
1400 } else {
1401 /* 32 bit mul */
1402 tmp = load_reg(s, UCOP_REG_M);
1403 tmp2 = load_reg(s, UCOP_REG_N);
1404 tcg_gen_mul_i32(tmp, tmp, tmp2);
1405 dead_tmp(tmp2);
1406 if (UCOP_SET(25)) {
1407 /* Add */
1408 tmp2 = load_reg(s, UCOP_REG_S);
1409 tcg_gen_add_i32(tmp, tmp, tmp2);
1410 dead_tmp(tmp2);
1412 if (UCOP_SET_S) {
1413 gen_logic_CC(tmp);
1415 store_reg(s, UCOP_REG_D, tmp);
1419 /* miscellaneous instructions */
1420 static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1422 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1423 unsigned int val;
1424 TCGv tmp;
1426 if ((insn & 0xffffffe0) == 0x10ffc120) {
1427 /* Trivial implementation equivalent to bx. */
1428 tmp = load_reg(s, UCOP_REG_M);
1429 gen_bx(s, tmp);
1430 return;
1433 if ((insn & 0xfbffc000) == 0x30ffc000) {
1434 /* PSR = immediate */
1435 val = UCOP_IMM_9;
1436 if (UCOP_SH_IM) {
1437 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1439 tmp = new_tmp();
1440 tcg_gen_movi_i32(tmp, val);
1441 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1442 ILLEGAL;
1444 return;
1447 if ((insn & 0xfbffffe0) == 0x12ffc020) {
1448 /* PSR.flag = reg */
1449 tmp = load_reg(s, UCOP_REG_M);
1450 if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
1451 ILLEGAL;
1453 return;
1456 if ((insn & 0xfbffffe0) == 0x10ffc020) {
1457 /* PSR = reg */
1458 tmp = load_reg(s, UCOP_REG_M);
1459 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1460 ILLEGAL;
1462 return;
1465 if ((insn & 0xfbf83fff) == 0x10f80000) {
1466 /* reg = PSR */
1467 if (UCOP_SET_B) {
1468 if (IS_USER(s)) {
1469 ILLEGAL;
1471 tmp = load_cpu_field(bsr);
1472 } else {
1473 tmp = new_tmp();
1474 gen_helper_asr_read(tmp, cpu_env);
1476 store_reg(s, UCOP_REG_D, tmp);
1477 return;
1480 if ((insn & 0xfbf83fe0) == 0x12f80120) {
1481 /* clz */
1482 tmp = load_reg(s, UCOP_REG_M);
1483 if (UCOP_SET(26)) {
1484 /* clo */
1485 tcg_gen_not_i32(tmp, tmp);
1487 tcg_gen_clzi_i32(tmp, tmp, 32);
1488 store_reg(s, UCOP_REG_D, tmp);
1489 return;
1492 /* otherwise */
1493 ILLEGAL;
1496 /* load/store I_offset and R_offset */
1497 static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1499 unsigned int mmu_idx;
1500 TCGv tmp;
1501 TCGv tmp2;
1503 tmp2 = load_reg(s, UCOP_REG_N);
1504 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1506 /* immediate */
1507 if (UCOP_SET_P) {
1508 gen_add_data_offset(s, insn, tmp2);
1511 if (UCOP_SET_L) {
1512 /* load */
1513 if (UCOP_SET_B) {
1514 tmp = gen_ld8u(tmp2, mmu_idx);
1515 } else {
1516 tmp = gen_ld32(tmp2, mmu_idx);
1518 } else {
1519 /* store */
1520 tmp = load_reg(s, UCOP_REG_D);
1521 if (UCOP_SET_B) {
1522 gen_st8(tmp, tmp2, mmu_idx);
1523 } else {
1524 gen_st32(tmp, tmp2, mmu_idx);
1527 if (!UCOP_SET_P) {
1528 gen_add_data_offset(s, insn, tmp2);
1529 store_reg(s, UCOP_REG_N, tmp2);
1530 } else if (UCOP_SET_W) {
1531 store_reg(s, UCOP_REG_N, tmp2);
1532 } else {
1533 dead_tmp(tmp2);
1535 if (UCOP_SET_L) {
1536 /* Complete the load. */
1537 if (UCOP_REG_D == 31) {
1538 gen_bx(s, tmp);
1539 } else {
1540 store_reg(s, UCOP_REG_D, tmp);
1545 /* SWP instruction */
1546 static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1548 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1549 TCGv addr;
1550 TCGv tmp;
1551 TCGv tmp2;
1553 if ((insn & 0xff003fe0) != 0x40000120) {
1554 ILLEGAL;
1557 /* ??? This is not really atomic. However we know
1558 we never have multiple CPUs running in parallel,
1559 so it is good enough. */
1560 addr = load_reg(s, UCOP_REG_N);
1561 tmp = load_reg(s, UCOP_REG_M);
1562 if (UCOP_SET_B) {
1563 tmp2 = gen_ld8u(addr, IS_USER(s));
1564 gen_st8(tmp, addr, IS_USER(s));
1565 } else {
1566 tmp2 = gen_ld32(addr, IS_USER(s));
1567 gen_st32(tmp, addr, IS_USER(s));
1569 dead_tmp(addr);
1570 store_reg(s, UCOP_REG_D, tmp2);
1573 /* load/store hw/sb */
1574 static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1576 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1577 TCGv addr;
1578 TCGv tmp;
1580 if (UCOP_SH_OP == 0) {
1581 do_swap(env, s, insn);
1582 return;
1585 addr = load_reg(s, UCOP_REG_N);
1586 if (UCOP_SET_P) {
1587 gen_add_datah_offset(s, insn, addr);
1590 if (UCOP_SET_L) { /* load */
1591 switch (UCOP_SH_OP) {
1592 case 1:
1593 tmp = gen_ld16u(addr, IS_USER(s));
1594 break;
1595 case 2:
1596 tmp = gen_ld8s(addr, IS_USER(s));
1597 break;
1598 default: /* see do_swap */
1599 case 3:
1600 tmp = gen_ld16s(addr, IS_USER(s));
1601 break;
1603 } else { /* store */
1604 if (UCOP_SH_OP != 1) {
1605 ILLEGAL;
1607 tmp = load_reg(s, UCOP_REG_D);
1608 gen_st16(tmp, addr, IS_USER(s));
1610 /* Perform base writeback before the loaded value to
1611 ensure correct behavior with overlapping index registers. */
1612 if (!UCOP_SET_P) {
1613 gen_add_datah_offset(s, insn, addr);
1614 store_reg(s, UCOP_REG_N, addr);
1615 } else if (UCOP_SET_W) {
1616 store_reg(s, UCOP_REG_N, addr);
1617 } else {
1618 dead_tmp(addr);
1620 if (UCOP_SET_L) {
1621 /* Complete the load. */
1622 store_reg(s, UCOP_REG_D, tmp);
1626 /* load/store multiple words */
1627 static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1629 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1630 unsigned int val, i, mmu_idx;
1631 int j, n, reg, user, loaded_base;
1632 TCGv tmp;
1633 TCGv tmp2;
1634 TCGv addr;
1635 TCGv loaded_var;
1637 if (UCOP_SET(7)) {
1638 ILLEGAL;
1640 /* XXX: store correct base if write back */
1641 user = 0;
1642 if (UCOP_SET_B) { /* S bit in instruction table */
1643 if (IS_USER(s)) {
1644 ILLEGAL; /* only usable in supervisor mode */
1646 if (UCOP_SET(18) == 0) { /* pc reg */
1647 user = 1;
1651 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1652 addr = load_reg(s, UCOP_REG_N);
1654 /* compute total size */
1655 loaded_base = 0;
1656 loaded_var = NULL;
1657 n = 0;
1658 for (i = 0; i < 6; i++) {
1659 if (UCOP_SET(i)) {
1660 n++;
1663 for (i = 9; i < 19; i++) {
1664 if (UCOP_SET(i)) {
1665 n++;
1668 /* XXX: test invalid n == 0 case ? */
1669 if (UCOP_SET_U) {
1670 if (UCOP_SET_P) {
1671 /* pre increment */
1672 tcg_gen_addi_i32(addr, addr, 4);
1673 } else {
1674 /* post increment */
1676 } else {
1677 if (UCOP_SET_P) {
1678 /* pre decrement */
1679 tcg_gen_addi_i32(addr, addr, -(n * 4));
1680 } else {
1681 /* post decrement */
1682 if (n != 1) {
1683 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1688 j = 0;
1689 reg = UCOP_SET(6) ? 16 : 0;
1690 for (i = 0; i < 19; i++, reg++) {
1691 if (i == 6) {
1692 i = i + 3;
1694 if (UCOP_SET(i)) {
1695 if (UCOP_SET_L) { /* load */
1696 tmp = gen_ld32(addr, mmu_idx);
1697 if (reg == 31) {
1698 gen_bx(s, tmp);
1699 } else if (user) {
1700 tmp2 = tcg_const_i32(reg);
1701 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
1702 tcg_temp_free_i32(tmp2);
1703 dead_tmp(tmp);
1704 } else if (reg == UCOP_REG_N) {
1705 loaded_var = tmp;
1706 loaded_base = 1;
1707 } else {
1708 store_reg(s, reg, tmp);
1710 } else { /* store */
1711 if (reg == 31) {
1712 /* special case: r31 = PC + 4 */
1713 val = (long)s->pc;
1714 tmp = new_tmp();
1715 tcg_gen_movi_i32(tmp, val);
1716 } else if (user) {
1717 tmp = new_tmp();
1718 tmp2 = tcg_const_i32(reg);
1719 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
1720 tcg_temp_free_i32(tmp2);
1721 } else {
1722 tmp = load_reg(s, reg);
1724 gen_st32(tmp, addr, mmu_idx);
1726 j++;
1727 /* no need to add after the last transfer */
1728 if (j != n) {
1729 tcg_gen_addi_i32(addr, addr, 4);
1733 if (UCOP_SET_W) { /* write back */
1734 if (UCOP_SET_U) {
1735 if (UCOP_SET_P) {
1736 /* pre increment */
1737 } else {
1738 /* post increment */
1739 tcg_gen_addi_i32(addr, addr, 4);
1741 } else {
1742 if (UCOP_SET_P) {
1743 /* pre decrement */
1744 if (n != 1) {
1745 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1747 } else {
1748 /* post decrement */
1749 tcg_gen_addi_i32(addr, addr, -(n * 4));
1752 store_reg(s, UCOP_REG_N, addr);
1753 } else {
1754 dead_tmp(addr);
1756 if (loaded_base) {
1757 store_reg(s, UCOP_REG_N, loaded_var);
1759 if (UCOP_SET_B && !user) {
1760 /* Restore ASR from BSR. */
1761 tmp = load_cpu_field(bsr);
1762 gen_set_asr(tmp, 0xffffffff);
1763 dead_tmp(tmp);
1764 s->is_jmp = DISAS_UPDATE;
1768 /* branch (and link) */
1769 static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1771 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1772 unsigned int val;
1773 int32_t offset;
1774 TCGv tmp;
1776 if (UCOP_COND == 0xf) {
1777 ILLEGAL;
1780 if (UCOP_COND != 0xe) {
1781 /* if not always execute, we generate a conditional jump to
1782 next instruction */
1783 s->condlabel = gen_new_label();
1784 gen_test_cc(UCOP_COND ^ 1, s->condlabel);
1785 s->condjmp = 1;
1788 val = (int32_t)s->pc;
1789 if (UCOP_SET_L) {
1790 tmp = new_tmp();
1791 tcg_gen_movi_i32(tmp, val);
1792 store_reg(s, 30, tmp);
1794 offset = (((int32_t)insn << 8) >> 8);
1795 val += (offset << 2); /* unicore is pc+4 */
1796 gen_jmp(s, val);
1799 static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
1801 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1802 unsigned int insn;
1804 insn = cpu_ldl_code(env, s->pc);
1805 s->pc += 4;
1807 /* UniCore instructions class:
1808 * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1809 * AAA : see switch case
1810 * BBBB : opcodes or cond or PUBW
1811 * C : S OR L
1812 * D : 8
1813 * E : 5
1815 switch (insn >> 29) {
1816 case 0x0:
1817 if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1818 do_mult(env, s, insn);
1819 break;
1822 if (UCOP_SET(8)) {
1823 do_misc(env, s, insn);
1824 break;
1826 case 0x1:
1827 if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
1828 do_misc(env, s, insn);
1829 break;
1831 do_datap(env, s, insn);
1832 break;
1834 case 0x2:
1835 if (UCOP_SET(8) && UCOP_SET(5)) {
1836 do_ldst_hwsb(env, s, insn);
1837 break;
1839 if (UCOP_SET(8) || UCOP_SET(5)) {
1840 ILLEGAL;
1842 case 0x3:
1843 do_ldst_ir(env, s, insn);
1844 break;
1846 case 0x4:
1847 if (UCOP_SET(8)) {
1848 ILLEGAL; /* extended instructions */
1850 do_ldst_m(env, s, insn);
1851 break;
1852 case 0x5:
1853 do_branch(env, s, insn);
1854 break;
1855 case 0x6:
1856 /* Coprocessor. */
1857 disas_coproc_insn(env, s, insn);
1858 break;
1859 case 0x7:
1860 if (!UCOP_SET(28)) {
1861 disas_coproc_insn(env, s, insn);
1862 break;
1864 if ((insn & 0xff000000) == 0xff000000) { /* syscall */
1865 gen_set_pc_im(s->pc);
1866 s->is_jmp = DISAS_SYSCALL;
1867 break;
1869 ILLEGAL;
1873 /* generate intermediate code for basic block 'tb'. */
1874 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1876 CPUUniCore32State *env = cs->env_ptr;
1877 DisasContext dc1, *dc = &dc1;
1878 target_ulong pc_start;
1879 uint32_t page_start;
1880 int num_insns;
1882 /* generate intermediate code */
1883 num_temps = 0;
1885 pc_start = tb->pc;
1887 dc->tb = tb;
1889 dc->is_jmp = DISAS_NEXT;
1890 dc->pc = pc_start;
1891 dc->singlestep_enabled = cs->singlestep_enabled;
1892 dc->condjmp = 0;
1893 cpu_F0s = tcg_temp_new_i32();
1894 cpu_F1s = tcg_temp_new_i32();
1895 cpu_F0d = tcg_temp_new_i64();
1896 cpu_F1d = tcg_temp_new_i64();
1897 page_start = pc_start & TARGET_PAGE_MASK;
1898 num_insns = 0;
1900 #ifndef CONFIG_USER_ONLY
1901 if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) {
1902 dc->user = 1;
1903 } else {
1904 dc->user = 0;
1906 #endif
1908 gen_tb_start(tb);
1909 do {
1910 tcg_gen_insn_start(dc->pc);
1911 num_insns++;
1913 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1914 gen_set_pc_im(dc->pc);
1915 gen_exception(EXCP_DEBUG);
1916 dc->is_jmp = DISAS_JUMP;
1917 /* The address covered by the breakpoint must be included in
1918 [tb->pc, tb->pc + tb->size) in order to for it to be
1919 properly cleared -- thus we increment the PC here so that
1920 the logic setting tb->size below does the right thing. */
1921 dc->pc += 4;
1922 goto done_generating;
1925 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1926 gen_io_start();
1929 disas_uc32_insn(env, dc);
1931 if (num_temps) {
1932 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
1933 num_temps = 0;
1936 if (dc->condjmp && !dc->is_jmp) {
1937 gen_set_label(dc->condlabel);
1938 dc->condjmp = 0;
1940 /* Translation stops when a conditional branch is encountered.
1941 * Otherwise the subsequent code could get translated several times.
1942 * Also stop translation when a page boundary is reached. This
1943 * ensures prefetch aborts occur at the right place. */
1944 } while (!dc->is_jmp && !tcg_op_buf_full() &&
1945 !cs->singlestep_enabled &&
1946 !singlestep &&
1947 dc->pc - page_start < TARGET_PAGE_SIZE &&
1948 num_insns < max_insns);
1950 if (tb_cflags(tb) & CF_LAST_IO) {
1951 if (dc->condjmp) {
1952 /* FIXME: This can theoretically happen with self-modifying
1953 code. */
1954 cpu_abort(cs, "IO on conditional branch instruction");
1956 gen_io_end();
1959 /* At this stage dc->condjmp will only be set when the skipped
1960 instruction was a conditional branch or trap, and the PC has
1961 already been written. */
1962 if (unlikely(cs->singlestep_enabled)) {
1963 /* Make sure the pc is updated, and raise a debug exception. */
1964 if (dc->condjmp) {
1965 if (dc->is_jmp == DISAS_SYSCALL) {
1966 gen_exception(UC32_EXCP_PRIV);
1967 } else {
1968 gen_exception(EXCP_DEBUG);
1970 gen_set_label(dc->condlabel);
1972 if (dc->condjmp || !dc->is_jmp) {
1973 gen_set_pc_im(dc->pc);
1974 dc->condjmp = 0;
1976 if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
1977 gen_exception(UC32_EXCP_PRIV);
1978 } else {
1979 gen_exception(EXCP_DEBUG);
1981 } else {
1982 /* While branches must always occur at the end of an IT block,
1983 there are a few other things that can cause us to terminate
1984 the TB in the middel of an IT block:
1985 - Exception generating instructions (bkpt, swi, undefined).
1986 - Page boundaries.
1987 - Hardware watchpoints.
1988 Hardware breakpoints have already been handled and skip this code.
1990 switch (dc->is_jmp) {
1991 case DISAS_NEXT:
1992 gen_goto_tb(dc, 1, dc->pc);
1993 break;
1994 default:
1995 case DISAS_JUMP:
1996 case DISAS_UPDATE:
1997 /* indicate that the hash table must be used to find the next TB */
1998 tcg_gen_exit_tb(NULL, 0);
1999 break;
2000 case DISAS_TB_JUMP:
2001 /* nothing more to generate */
2002 break;
2003 case DISAS_SYSCALL:
2004 gen_exception(UC32_EXCP_PRIV);
2005 break;
2007 if (dc->condjmp) {
2008 gen_set_label(dc->condlabel);
2009 gen_goto_tb(dc, 1, dc->pc);
2010 dc->condjmp = 0;
2014 done_generating:
2015 gen_tb_end(tb, num_insns);
2017 #ifdef DEBUG_DISAS
2018 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
2019 && qemu_log_in_addr_range(pc_start)) {
2020 qemu_log_lock();
2021 qemu_log("----------------\n");
2022 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2023 log_target_disas(cs, pc_start, dc->pc - pc_start);
2024 qemu_log("\n");
2025 qemu_log_unlock();
2027 #endif
2028 tb->size = dc->pc - pc_start;
2029 tb->icount = num_insns;
2032 static const char *cpu_mode_names[16] = {
2033 "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2034 "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2037 #undef UCF64_DUMP_STATE
2038 #ifdef UCF64_DUMP_STATE
2039 static void cpu_dump_state_ucf64(CPUUniCore32State *env, int flags)
2041 int i;
2042 union {
2043 uint32_t i;
2044 float s;
2045 } s0, s1;
2046 CPU_DoubleU d;
2047 /* ??? This assumes float64 and double have the same layout.
2048 Oh well, it's only debug dumps. */
2049 union {
2050 float64 f64;
2051 double d;
2052 } d0;
2054 for (i = 0; i < 16; i++) {
2055 d.d = env->ucf64.regs[i];
2056 s0.i = d.l.lower;
2057 s1.i = d.l.upper;
2058 d0.f64 = d.d;
2059 qemu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g)",
2060 i * 2, (int)s0.i, s0.s,
2061 i * 2 + 1, (int)s1.i, s1.s);
2062 qemu_fprintf(f, " d%02d=%" PRIx64 "(%8g)\n",
2063 i, (uint64_t)d0.f64, d0.d);
2065 qemu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
2067 #else
2068 #define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0)
2069 #endif
2071 void uc32_cpu_dump_state(CPUState *cs, FILE *f, int flags)
2073 UniCore32CPU *cpu = UNICORE32_CPU(cs);
2074 CPUUniCore32State *env = &cpu->env;
2075 int i;
2076 uint32_t psr;
2078 for (i = 0; i < 32; i++) {
2079 qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2080 if ((i % 4) == 3) {
2081 qemu_fprintf(f, "\n");
2082 } else {
2083 qemu_fprintf(f, " ");
2086 psr = cpu_asr_read(env);
2087 qemu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
2088 psr,
2089 psr & (1 << 31) ? 'N' : '-',
2090 psr & (1 << 30) ? 'Z' : '-',
2091 psr & (1 << 29) ? 'C' : '-',
2092 psr & (1 << 28) ? 'V' : '-',
2093 cpu_mode_names[psr & 0xf]);
2095 if (flags & CPU_DUMP_FPU) {
2096 cpu_dump_state_ucf64(env, f, cpu_fprintf, flags);
2100 void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb,
2101 target_ulong *data)
2103 env->regs[31] = data[0];