ppc: use PowerPCCPU instead of CPUPPCState
[qemu/cris-port.git] / target-unicore32 / translate.c
blob307f7b205924ae5225a3c8475959e483a438c8af
1 /*
2 * UniCore32 translation
4 * Copyright (C) 2010-2012 Guan Xuetao
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation, or (at your option) any
9 * later version. See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "cpu.h"
14 #include "disas/disas.h"
15 #include "tcg-op.h"
16 #include "qemu/log.h"
17 #include "exec/cpu_ldst.h"
19 #include "exec/helper-proto.h"
20 #include "exec/helper-gen.h"
22 #include "trace-tcg.h"
23 #include "exec/log.h"
26 /* internal defines */
27 typedef struct DisasContext {
28 target_ulong pc;
29 int is_jmp;
30 /* Nonzero if this instruction has been conditionally skipped. */
31 int condjmp;
32 /* The label that will be jumped to when the instruction is skipped. */
33 TCGLabel *condlabel;
34 struct TranslationBlock *tb;
35 int singlestep_enabled;
36 #ifndef CONFIG_USER_ONLY
37 int user;
38 #endif
39 } DisasContext;
41 #ifndef CONFIG_USER_ONLY
42 #define IS_USER(s) (s->user)
43 #else
44 #define IS_USER(s) 1
45 #endif
47 /* These instructions trap after executing, so defer them until after the
48 conditional executions state has been updated. */
49 #define DISAS_SYSCALL 5
51 static TCGv_env cpu_env;
52 static TCGv_i32 cpu_R[32];
54 /* FIXME: These should be removed. */
55 static TCGv cpu_F0s, cpu_F1s;
56 static TCGv_i64 cpu_F0d, cpu_F1d;
58 #include "exec/gen-icount.h"
60 static const char *regnames[] = {
61 "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
62 "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
63 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
64 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
66 /* initialize TCG globals. */
67 void uc32_translate_init(void)
69 int i;
71 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
73 for (i = 0; i < 32; i++) {
74 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
75 offsetof(CPUUniCore32State, regs[i]), regnames[i]);
79 static int num_temps;
81 /* Allocate a temporary variable. */
82 static TCGv_i32 new_tmp(void)
84 num_temps++;
85 return tcg_temp_new_i32();
88 /* Release a temporary variable. */
89 static void dead_tmp(TCGv tmp)
91 tcg_temp_free(tmp);
92 num_temps--;
95 static inline TCGv load_cpu_offset(int offset)
97 TCGv tmp = new_tmp();
98 tcg_gen_ld_i32(tmp, cpu_env, offset);
99 return tmp;
102 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
104 static inline void store_cpu_offset(TCGv var, int offset)
106 tcg_gen_st_i32(var, cpu_env, offset);
107 dead_tmp(var);
110 #define store_cpu_field(var, name) \
111 store_cpu_offset(var, offsetof(CPUUniCore32State, name))
113 /* Set a variable to the value of a CPU register. */
114 static void load_reg_var(DisasContext *s, TCGv var, int reg)
116 if (reg == 31) {
117 uint32_t addr;
118 /* normaly, since we updated PC */
119 addr = (long)s->pc;
120 tcg_gen_movi_i32(var, addr);
121 } else {
122 tcg_gen_mov_i32(var, cpu_R[reg]);
126 /* Create a new temporary and set it to the value of a CPU register. */
127 static inline TCGv load_reg(DisasContext *s, int reg)
129 TCGv tmp = new_tmp();
130 load_reg_var(s, tmp, reg);
131 return tmp;
134 /* Set a CPU register. The source must be a temporary and will be
135 marked as dead. */
136 static void store_reg(DisasContext *s, int reg, TCGv var)
138 if (reg == 31) {
139 tcg_gen_andi_i32(var, var, ~3);
140 s->is_jmp = DISAS_JUMP;
142 tcg_gen_mov_i32(cpu_R[reg], var);
143 dead_tmp(var);
146 /* Value extensions. */
147 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
148 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
149 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
150 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
152 #define UCOP_REG_M (((insn) >> 0) & 0x1f)
153 #define UCOP_REG_N (((insn) >> 19) & 0x1f)
154 #define UCOP_REG_D (((insn) >> 14) & 0x1f)
155 #define UCOP_REG_S (((insn) >> 9) & 0x1f)
156 #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
157 #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
158 #define UCOP_SH_OP (((insn) >> 6) & 0x03)
159 #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
160 #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
161 #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
162 #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
163 #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
164 #define UCOP_COND (((insn) >> 25) & 0x0f)
165 #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
166 #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
167 #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
168 #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
169 #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
171 #define UCOP_SET(i) ((insn) & (1 << (i)))
172 #define UCOP_SET_P UCOP_SET(28)
173 #define UCOP_SET_U UCOP_SET(27)
174 #define UCOP_SET_B UCOP_SET(26)
175 #define UCOP_SET_W UCOP_SET(25)
176 #define UCOP_SET_L UCOP_SET(24)
177 #define UCOP_SET_S UCOP_SET(24)
179 #define ILLEGAL cpu_abort(CPU(cpu), \
180 "Illegal UniCore32 instruction %x at line %d!", \
181 insn, __LINE__)
183 #ifndef CONFIG_USER_ONLY
184 static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s,
185 uint32_t insn)
187 UniCore32CPU *cpu = uc32_env_get_cpu(env);
188 TCGv tmp, tmp2, tmp3;
189 if ((insn & 0xfe000000) == 0xe0000000) {
190 tmp2 = new_tmp();
191 tmp3 = new_tmp();
192 tcg_gen_movi_i32(tmp2, UCOP_REG_N);
193 tcg_gen_movi_i32(tmp3, UCOP_IMM10);
194 if (UCOP_SET_L) {
195 tmp = new_tmp();
196 gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3);
197 store_reg(s, UCOP_REG_D, tmp);
198 } else {
199 tmp = load_reg(s, UCOP_REG_D);
200 gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3);
201 dead_tmp(tmp);
203 dead_tmp(tmp2);
204 dead_tmp(tmp3);
205 return;
207 ILLEGAL;
210 static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s,
211 uint32_t insn)
213 UniCore32CPU *cpu = uc32_env_get_cpu(env);
214 TCGv tmp;
216 if ((insn & 0xff003fff) == 0xe1000400) {
218 * movc rd, pp.nn, #imm9
219 * rd: UCOP_REG_D
220 * nn: UCOP_REG_N (must be 0)
221 * imm9: 0
223 if (UCOP_REG_N == 0) {
224 tmp = new_tmp();
225 tcg_gen_movi_i32(tmp, 0);
226 store_reg(s, UCOP_REG_D, tmp);
227 return;
228 } else {
229 ILLEGAL;
232 if ((insn & 0xff003fff) == 0xe0000401) {
234 * movc pp.nn, rn, #imm9
235 * rn: UCOP_REG_D
236 * nn: UCOP_REG_N (must be 1)
237 * imm9: 1
239 if (UCOP_REG_N == 1) {
240 tmp = load_reg(s, UCOP_REG_D);
241 gen_helper_cp1_putc(tmp);
242 dead_tmp(tmp);
243 return;
244 } else {
245 ILLEGAL;
248 ILLEGAL;
250 #endif
252 static inline void gen_set_asr(TCGv var, uint32_t mask)
254 TCGv tmp_mask = tcg_const_i32(mask);
255 gen_helper_asr_write(cpu_env, var, tmp_mask);
256 tcg_temp_free_i32(tmp_mask);
258 /* Set NZCV flags from the high 4 bits of var. */
259 #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
261 static void gen_exception(int excp)
263 TCGv tmp = new_tmp();
264 tcg_gen_movi_i32(tmp, excp);
265 gen_helper_exception(cpu_env, tmp);
266 dead_tmp(tmp);
269 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
271 /* Set CF to the top bit of var. */
272 static void gen_set_CF_bit31(TCGv var)
274 TCGv tmp = new_tmp();
275 tcg_gen_shri_i32(tmp, var, 31);
276 gen_set_CF(tmp);
277 dead_tmp(tmp);
280 /* Set N and Z flags from var. */
281 static inline void gen_logic_CC(TCGv var)
283 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF));
284 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF));
287 /* dest = T0 + T1 + CF. */
288 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
290 TCGv tmp;
291 tcg_gen_add_i32(dest, t0, t1);
292 tmp = load_cpu_field(CF);
293 tcg_gen_add_i32(dest, dest, tmp);
294 dead_tmp(tmp);
297 /* dest = T0 - T1 + CF - 1. */
298 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
300 TCGv tmp;
301 tcg_gen_sub_i32(dest, t0, t1);
302 tmp = load_cpu_field(CF);
303 tcg_gen_add_i32(dest, dest, tmp);
304 tcg_gen_subi_i32(dest, dest, 1);
305 dead_tmp(tmp);
308 static void shifter_out_im(TCGv var, int shift)
310 TCGv tmp = new_tmp();
311 if (shift == 0) {
312 tcg_gen_andi_i32(tmp, var, 1);
313 } else {
314 tcg_gen_shri_i32(tmp, var, shift);
315 if (shift != 31) {
316 tcg_gen_andi_i32(tmp, tmp, 1);
319 gen_set_CF(tmp);
320 dead_tmp(tmp);
323 /* Shift by immediate. Includes special handling for shift == 0. */
324 static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
325 int flags)
327 switch (shiftop) {
328 case 0: /* LSL */
329 if (shift != 0) {
330 if (flags) {
331 shifter_out_im(var, 32 - shift);
333 tcg_gen_shli_i32(var, var, shift);
335 break;
336 case 1: /* LSR */
337 if (shift == 0) {
338 if (flags) {
339 tcg_gen_shri_i32(var, var, 31);
340 gen_set_CF(var);
342 tcg_gen_movi_i32(var, 0);
343 } else {
344 if (flags) {
345 shifter_out_im(var, shift - 1);
347 tcg_gen_shri_i32(var, var, shift);
349 break;
350 case 2: /* ASR */
351 if (shift == 0) {
352 shift = 32;
354 if (flags) {
355 shifter_out_im(var, shift - 1);
357 if (shift == 32) {
358 shift = 31;
360 tcg_gen_sari_i32(var, var, shift);
361 break;
362 case 3: /* ROR/RRX */
363 if (shift != 0) {
364 if (flags) {
365 shifter_out_im(var, shift - 1);
367 tcg_gen_rotri_i32(var, var, shift); break;
368 } else {
369 TCGv tmp = load_cpu_field(CF);
370 if (flags) {
371 shifter_out_im(var, 0);
373 tcg_gen_shri_i32(var, var, 1);
374 tcg_gen_shli_i32(tmp, tmp, 31);
375 tcg_gen_or_i32(var, var, tmp);
376 dead_tmp(tmp);
381 static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
382 TCGv shift, int flags)
384 if (flags) {
385 switch (shiftop) {
386 case 0:
387 gen_helper_shl_cc(var, cpu_env, var, shift);
388 break;
389 case 1:
390 gen_helper_shr_cc(var, cpu_env, var, shift);
391 break;
392 case 2:
393 gen_helper_sar_cc(var, cpu_env, var, shift);
394 break;
395 case 3:
396 gen_helper_ror_cc(var, cpu_env, var, shift);
397 break;
399 } else {
400 switch (shiftop) {
401 case 0:
402 gen_helper_shl(var, var, shift);
403 break;
404 case 1:
405 gen_helper_shr(var, var, shift);
406 break;
407 case 2:
408 gen_helper_sar(var, var, shift);
409 break;
410 case 3:
411 tcg_gen_andi_i32(shift, shift, 0x1f);
412 tcg_gen_rotr_i32(var, var, shift);
413 break;
416 dead_tmp(shift);
419 static void gen_test_cc(int cc, TCGLabel *label)
421 TCGv tmp;
422 TCGv tmp2;
423 TCGLabel *inv;
425 switch (cc) {
426 case 0: /* eq: Z */
427 tmp = load_cpu_field(ZF);
428 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
429 break;
430 case 1: /* ne: !Z */
431 tmp = load_cpu_field(ZF);
432 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
433 break;
434 case 2: /* cs: C */
435 tmp = load_cpu_field(CF);
436 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
437 break;
438 case 3: /* cc: !C */
439 tmp = load_cpu_field(CF);
440 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
441 break;
442 case 4: /* mi: N */
443 tmp = load_cpu_field(NF);
444 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
445 break;
446 case 5: /* pl: !N */
447 tmp = load_cpu_field(NF);
448 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
449 break;
450 case 6: /* vs: V */
451 tmp = load_cpu_field(VF);
452 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
453 break;
454 case 7: /* vc: !V */
455 tmp = load_cpu_field(VF);
456 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
457 break;
458 case 8: /* hi: C && !Z */
459 inv = gen_new_label();
460 tmp = load_cpu_field(CF);
461 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
462 dead_tmp(tmp);
463 tmp = load_cpu_field(ZF);
464 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
465 gen_set_label(inv);
466 break;
467 case 9: /* ls: !C || Z */
468 tmp = load_cpu_field(CF);
469 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
470 dead_tmp(tmp);
471 tmp = load_cpu_field(ZF);
472 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
473 break;
474 case 10: /* ge: N == V -> N ^ V == 0 */
475 tmp = load_cpu_field(VF);
476 tmp2 = load_cpu_field(NF);
477 tcg_gen_xor_i32(tmp, tmp, tmp2);
478 dead_tmp(tmp2);
479 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
480 break;
481 case 11: /* lt: N != V -> N ^ V != 0 */
482 tmp = load_cpu_field(VF);
483 tmp2 = load_cpu_field(NF);
484 tcg_gen_xor_i32(tmp, tmp, tmp2);
485 dead_tmp(tmp2);
486 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
487 break;
488 case 12: /* gt: !Z && N == V */
489 inv = gen_new_label();
490 tmp = load_cpu_field(ZF);
491 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
492 dead_tmp(tmp);
493 tmp = load_cpu_field(VF);
494 tmp2 = load_cpu_field(NF);
495 tcg_gen_xor_i32(tmp, tmp, tmp2);
496 dead_tmp(tmp2);
497 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
498 gen_set_label(inv);
499 break;
500 case 13: /* le: Z || N != V */
501 tmp = load_cpu_field(ZF);
502 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
503 dead_tmp(tmp);
504 tmp = load_cpu_field(VF);
505 tmp2 = load_cpu_field(NF);
506 tcg_gen_xor_i32(tmp, tmp, tmp2);
507 dead_tmp(tmp2);
508 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
509 break;
510 default:
511 fprintf(stderr, "Bad condition code 0x%x\n", cc);
512 abort();
514 dead_tmp(tmp);
517 static const uint8_t table_logic_cc[16] = {
518 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
519 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
520 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
521 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
524 /* Set PC state from an immediate address. */
525 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
527 s->is_jmp = DISAS_UPDATE;
528 tcg_gen_movi_i32(cpu_R[31], addr & ~3);
531 /* Set PC state from var. var is marked as dead. */
532 static inline void gen_bx(DisasContext *s, TCGv var)
534 s->is_jmp = DISAS_UPDATE;
535 tcg_gen_andi_i32(cpu_R[31], var, ~3);
536 dead_tmp(var);
539 static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
541 store_reg(s, reg, var);
544 static inline TCGv gen_ld8s(TCGv addr, int index)
546 TCGv tmp = new_tmp();
547 tcg_gen_qemu_ld8s(tmp, addr, index);
548 return tmp;
551 static inline TCGv gen_ld8u(TCGv addr, int index)
553 TCGv tmp = new_tmp();
554 tcg_gen_qemu_ld8u(tmp, addr, index);
555 return tmp;
558 static inline TCGv gen_ld16s(TCGv addr, int index)
560 TCGv tmp = new_tmp();
561 tcg_gen_qemu_ld16s(tmp, addr, index);
562 return tmp;
565 static inline TCGv gen_ld16u(TCGv addr, int index)
567 TCGv tmp = new_tmp();
568 tcg_gen_qemu_ld16u(tmp, addr, index);
569 return tmp;
572 static inline TCGv gen_ld32(TCGv addr, int index)
574 TCGv tmp = new_tmp();
575 tcg_gen_qemu_ld32u(tmp, addr, index);
576 return tmp;
579 static inline void gen_st8(TCGv val, TCGv addr, int index)
581 tcg_gen_qemu_st8(val, addr, index);
582 dead_tmp(val);
585 static inline void gen_st16(TCGv val, TCGv addr, int index)
587 tcg_gen_qemu_st16(val, addr, index);
588 dead_tmp(val);
591 static inline void gen_st32(TCGv val, TCGv addr, int index)
593 tcg_gen_qemu_st32(val, addr, index);
594 dead_tmp(val);
597 static inline void gen_set_pc_im(uint32_t val)
599 tcg_gen_movi_i32(cpu_R[31], val);
602 /* Force a TB lookup after an instruction that changes the CPU state. */
603 static inline void gen_lookup_tb(DisasContext *s)
605 tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
606 s->is_jmp = DISAS_UPDATE;
609 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
610 TCGv var)
612 int val;
613 TCGv offset;
615 if (UCOP_SET(29)) {
616 /* immediate */
617 val = UCOP_IMM14;
618 if (!UCOP_SET_U) {
619 val = -val;
621 if (val != 0) {
622 tcg_gen_addi_i32(var, var, val);
624 } else {
625 /* shift/register */
626 offset = load_reg(s, UCOP_REG_M);
627 gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
628 if (!UCOP_SET_U) {
629 tcg_gen_sub_i32(var, var, offset);
630 } else {
631 tcg_gen_add_i32(var, var, offset);
633 dead_tmp(offset);
637 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
638 TCGv var)
640 int val;
641 TCGv offset;
643 if (UCOP_SET(26)) {
644 /* immediate */
645 val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
646 if (!UCOP_SET_U) {
647 val = -val;
649 if (val != 0) {
650 tcg_gen_addi_i32(var, var, val);
652 } else {
653 /* register */
654 offset = load_reg(s, UCOP_REG_M);
655 if (!UCOP_SET_U) {
656 tcg_gen_sub_i32(var, var, offset);
657 } else {
658 tcg_gen_add_i32(var, var, offset);
660 dead_tmp(offset);
664 static inline long ucf64_reg_offset(int reg)
666 if (reg & 1) {
667 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
668 + offsetof(CPU_DoubleU, l.upper);
669 } else {
670 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
671 + offsetof(CPU_DoubleU, l.lower);
675 #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
676 #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
678 /* UniCore-F64 single load/store I_offset */
679 static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
681 UniCore32CPU *cpu = uc32_env_get_cpu(env);
682 int offset;
683 TCGv tmp;
684 TCGv addr;
686 addr = load_reg(s, UCOP_REG_N);
687 if (!UCOP_SET_P && !UCOP_SET_W) {
688 ILLEGAL;
691 if (UCOP_SET_P) {
692 offset = UCOP_IMM10 << 2;
693 if (!UCOP_SET_U) {
694 offset = -offset;
696 if (offset != 0) {
697 tcg_gen_addi_i32(addr, addr, offset);
701 if (UCOP_SET_L) { /* load */
702 tmp = gen_ld32(addr, IS_USER(s));
703 ucf64_gen_st32(tmp, UCOP_REG_D);
704 } else { /* store */
705 tmp = ucf64_gen_ld32(UCOP_REG_D);
706 gen_st32(tmp, addr, IS_USER(s));
709 if (!UCOP_SET_P) {
710 offset = UCOP_IMM10 << 2;
711 if (!UCOP_SET_U) {
712 offset = -offset;
714 if (offset != 0) {
715 tcg_gen_addi_i32(addr, addr, offset);
718 if (UCOP_SET_W) {
719 store_reg(s, UCOP_REG_N, addr);
720 } else {
721 dead_tmp(addr);
725 /* UniCore-F64 load/store multiple words */
726 static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
728 UniCore32CPU *cpu = uc32_env_get_cpu(env);
729 unsigned int i;
730 int j, n, freg;
731 TCGv tmp;
732 TCGv addr;
734 if (UCOP_REG_D != 0) {
735 ILLEGAL;
737 if (UCOP_REG_N == 31) {
738 ILLEGAL;
740 if ((insn << 24) == 0) {
741 ILLEGAL;
744 addr = load_reg(s, UCOP_REG_N);
746 n = 0;
747 for (i = 0; i < 8; i++) {
748 if (UCOP_SET(i)) {
749 n++;
753 if (UCOP_SET_U) {
754 if (UCOP_SET_P) { /* pre increment */
755 tcg_gen_addi_i32(addr, addr, 4);
756 } /* unnecessary to do anything when post increment */
757 } else {
758 if (UCOP_SET_P) { /* pre decrement */
759 tcg_gen_addi_i32(addr, addr, -(n * 4));
760 } else { /* post decrement */
761 if (n != 1) {
762 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
767 freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
769 for (i = 0, j = 0; i < 8; i++, freg++) {
770 if (!UCOP_SET(i)) {
771 continue;
774 if (UCOP_SET_L) { /* load */
775 tmp = gen_ld32(addr, IS_USER(s));
776 ucf64_gen_st32(tmp, freg);
777 } else { /* store */
778 tmp = ucf64_gen_ld32(freg);
779 gen_st32(tmp, addr, IS_USER(s));
782 j++;
783 /* unnecessary to add after the last transfer */
784 if (j != n) {
785 tcg_gen_addi_i32(addr, addr, 4);
789 if (UCOP_SET_W) { /* write back */
790 if (UCOP_SET_U) {
791 if (!UCOP_SET_P) { /* post increment */
792 tcg_gen_addi_i32(addr, addr, 4);
793 } /* unnecessary to do anything when pre increment */
794 } else {
795 if (UCOP_SET_P) {
796 /* pre decrement */
797 if (n != 1) {
798 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
800 } else {
801 /* post decrement */
802 tcg_gen_addi_i32(addr, addr, -(n * 4));
805 store_reg(s, UCOP_REG_N, addr);
806 } else {
807 dead_tmp(addr);
811 /* UniCore-F64 mrc/mcr */
812 static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
814 UniCore32CPU *cpu = uc32_env_get_cpu(env);
815 TCGv tmp;
817 if ((insn & 0xfe0003ff) == 0xe2000000) {
818 /* control register */
819 if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
820 ILLEGAL;
822 if (UCOP_SET(24)) {
823 /* CFF */
824 tmp = new_tmp();
825 gen_helper_ucf64_get_fpscr(tmp, cpu_env);
826 store_reg(s, UCOP_REG_D, tmp);
827 } else {
828 /* CTF */
829 tmp = load_reg(s, UCOP_REG_D);
830 gen_helper_ucf64_set_fpscr(cpu_env, tmp);
831 dead_tmp(tmp);
832 gen_lookup_tb(s);
834 return;
836 if ((insn & 0xfe0003ff) == 0xe0000000) {
837 /* general register */
838 if (UCOP_REG_D == 31) {
839 ILLEGAL;
841 if (UCOP_SET(24)) { /* MFF */
842 tmp = ucf64_gen_ld32(UCOP_REG_N);
843 store_reg(s, UCOP_REG_D, tmp);
844 } else { /* MTF */
845 tmp = load_reg(s, UCOP_REG_D);
846 ucf64_gen_st32(tmp, UCOP_REG_N);
848 return;
850 if ((insn & 0xfb000000) == 0xe9000000) {
851 /* MFFC */
852 if (UCOP_REG_D != 31) {
853 ILLEGAL;
855 if (UCOP_UCF64_COND & 0x8) {
856 ILLEGAL;
859 tmp = new_tmp();
860 tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
861 if (UCOP_SET(26)) {
862 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
863 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
864 gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
865 } else {
866 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
867 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
868 gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
870 dead_tmp(tmp);
871 return;
873 ILLEGAL;
876 /* UniCore-F64 convert instructions */
877 static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
879 UniCore32CPU *cpu = uc32_env_get_cpu(env);
881 if (UCOP_UCF64_FMT == 3) {
882 ILLEGAL;
884 if (UCOP_REG_N != 0) {
885 ILLEGAL;
887 switch (UCOP_UCF64_FUNC) {
888 case 0: /* cvt.s */
889 switch (UCOP_UCF64_FMT) {
890 case 1 /* d */:
891 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
892 gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
893 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
894 break;
895 case 2 /* w */:
896 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
897 gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
898 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
899 break;
900 default /* s */:
901 ILLEGAL;
902 break;
904 break;
905 case 1: /* cvt.d */
906 switch (UCOP_UCF64_FMT) {
907 case 0 /* s */:
908 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
909 gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
910 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
911 break;
912 case 2 /* w */:
913 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
914 gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
915 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
916 break;
917 default /* d */:
918 ILLEGAL;
919 break;
921 break;
922 case 4: /* cvt.w */
923 switch (UCOP_UCF64_FMT) {
924 case 0 /* s */:
925 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
926 gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
927 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
928 break;
929 case 1 /* d */:
930 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
931 gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
932 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
933 break;
934 default /* w */:
935 ILLEGAL;
936 break;
938 break;
939 default:
940 ILLEGAL;
944 /* UniCore-F64 compare instructions */
945 static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
947 UniCore32CPU *cpu = uc32_env_get_cpu(env);
949 if (UCOP_SET(25)) {
950 ILLEGAL;
952 if (UCOP_REG_D != 0) {
953 ILLEGAL;
956 ILLEGAL; /* TODO */
957 if (UCOP_SET(24)) {
958 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
959 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
960 /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
961 } else {
962 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
963 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
964 /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
968 #define gen_helper_ucf64_movs(x, y) do { } while (0)
969 #define gen_helper_ucf64_movd(x, y) do { } while (0)
971 #define UCF64_OP1(name) do { \
972 if (UCOP_REG_N != 0) { \
973 ILLEGAL; \
975 switch (UCOP_UCF64_FMT) { \
976 case 0 /* s */: \
977 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
978 ucf64_reg_offset(UCOP_REG_M)); \
979 gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
980 tcg_gen_st_i32(cpu_F0s, cpu_env, \
981 ucf64_reg_offset(UCOP_REG_D)); \
982 break; \
983 case 1 /* d */: \
984 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
985 ucf64_reg_offset(UCOP_REG_M)); \
986 gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
987 tcg_gen_st_i64(cpu_F0d, cpu_env, \
988 ucf64_reg_offset(UCOP_REG_D)); \
989 break; \
990 case 2 /* w */: \
991 ILLEGAL; \
992 break; \
994 } while (0)
996 #define UCF64_OP2(name) do { \
997 switch (UCOP_UCF64_FMT) { \
998 case 0 /* s */: \
999 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
1000 ucf64_reg_offset(UCOP_REG_N)); \
1001 tcg_gen_ld_i32(cpu_F1s, cpu_env, \
1002 ucf64_reg_offset(UCOP_REG_M)); \
1003 gen_helper_ucf64_##name##s(cpu_F0s, \
1004 cpu_F0s, cpu_F1s, cpu_env); \
1005 tcg_gen_st_i32(cpu_F0s, cpu_env, \
1006 ucf64_reg_offset(UCOP_REG_D)); \
1007 break; \
1008 case 1 /* d */: \
1009 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
1010 ucf64_reg_offset(UCOP_REG_N)); \
1011 tcg_gen_ld_i64(cpu_F1d, cpu_env, \
1012 ucf64_reg_offset(UCOP_REG_M)); \
1013 gen_helper_ucf64_##name##d(cpu_F0d, \
1014 cpu_F0d, cpu_F1d, cpu_env); \
1015 tcg_gen_st_i64(cpu_F0d, cpu_env, \
1016 ucf64_reg_offset(UCOP_REG_D)); \
1017 break; \
1018 case 2 /* w */: \
1019 ILLEGAL; \
1020 break; \
1022 } while (0)
1024 /* UniCore-F64 data processing */
1025 static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1027 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1029 if (UCOP_UCF64_FMT == 3) {
1030 ILLEGAL;
1032 switch (UCOP_UCF64_FUNC) {
1033 case 0: /* add */
1034 UCF64_OP2(add);
1035 break;
1036 case 1: /* sub */
1037 UCF64_OP2(sub);
1038 break;
1039 case 2: /* mul */
1040 UCF64_OP2(mul);
1041 break;
1042 case 4: /* div */
1043 UCF64_OP2(div);
1044 break;
1045 case 5: /* abs */
1046 UCF64_OP1(abs);
1047 break;
1048 case 6: /* mov */
1049 UCF64_OP1(mov);
1050 break;
1051 case 7: /* neg */
1052 UCF64_OP1(neg);
1053 break;
1054 default:
1055 ILLEGAL;
1059 /* Disassemble an F64 instruction */
1060 static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1062 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1064 if (!UCOP_SET(29)) {
1065 if (UCOP_SET(26)) {
1066 do_ucf64_ldst_m(env, s, insn);
1067 } else {
1068 do_ucf64_ldst_i(env, s, insn);
1070 } else {
1071 if (UCOP_SET(5)) {
1072 switch ((insn >> 26) & 0x3) {
1073 case 0:
1074 do_ucf64_datap(env, s, insn);
1075 break;
1076 case 1:
1077 ILLEGAL;
1078 break;
1079 case 2:
1080 do_ucf64_fcvt(env, s, insn);
1081 break;
1082 case 3:
1083 do_ucf64_fcmp(env, s, insn);
1084 break;
1086 } else {
1087 do_ucf64_trans(env, s, insn);
1092 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1094 #ifndef CONFIG_USER_ONLY
1095 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1096 #else
1097 return true;
1098 #endif
1101 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1103 if (use_goto_tb(s, dest)) {
1104 tcg_gen_goto_tb(n);
1105 gen_set_pc_im(dest);
1106 tcg_gen_exit_tb((uintptr_t)s->tb + n);
1107 } else {
1108 gen_set_pc_im(dest);
1109 tcg_gen_exit_tb(0);
1113 static inline void gen_jmp(DisasContext *s, uint32_t dest)
1115 if (unlikely(s->singlestep_enabled)) {
1116 /* An indirect jump so that we still trigger the debug exception. */
1117 gen_bx_im(s, dest);
1118 } else {
1119 gen_goto_tb(s, 0, dest);
1120 s->is_jmp = DISAS_TB_JUMP;
1124 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1125 static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
1127 TCGv tmp;
1128 if (bsr) {
1129 /* ??? This is also undefined in system mode. */
1130 if (IS_USER(s)) {
1131 return 1;
1134 tmp = load_cpu_field(bsr);
1135 tcg_gen_andi_i32(tmp, tmp, ~mask);
1136 tcg_gen_andi_i32(t0, t0, mask);
1137 tcg_gen_or_i32(tmp, tmp, t0);
1138 store_cpu_field(tmp, bsr);
1139 } else {
1140 gen_set_asr(t0, mask);
1142 dead_tmp(t0);
1143 gen_lookup_tb(s);
1144 return 0;
1147 /* Generate an old-style exception return. Marks pc as dead. */
1148 static void gen_exception_return(DisasContext *s, TCGv pc)
1150 TCGv tmp;
1151 store_reg(s, 31, pc);
1152 tmp = load_cpu_field(bsr);
1153 gen_set_asr(tmp, 0xffffffff);
1154 dead_tmp(tmp);
1155 s->is_jmp = DISAS_UPDATE;
1158 static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s,
1159 uint32_t insn)
1161 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1163 switch (UCOP_CPNUM) {
1164 #ifndef CONFIG_USER_ONLY
1165 case 0:
1166 disas_cp0_insn(env, s, insn);
1167 break;
1168 case 1:
1169 disas_ocd_insn(env, s, insn);
1170 break;
1171 #endif
1172 case 2:
1173 disas_ucf64_insn(env, s, insn);
1174 break;
1175 default:
1176 /* Unknown coprocessor. */
1177 cpu_abort(CPU(cpu), "Unknown coprocessor!");
1181 /* data processing instructions */
1182 static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1184 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1185 TCGv tmp;
1186 TCGv tmp2;
1187 int logic_cc;
1189 if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
1190 if (UCOP_SET(23)) { /* CMOV instructions */
1191 if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
1192 ILLEGAL;
1194 /* if not always execute, we generate a conditional jump to
1195 next instruction */
1196 s->condlabel = gen_new_label();
1197 gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
1198 s->condjmp = 1;
1202 logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
1204 if (UCOP_SET(29)) {
1205 unsigned int val;
1206 /* immediate operand */
1207 val = UCOP_IMM_9;
1208 if (UCOP_SH_IM) {
1209 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1211 tmp2 = new_tmp();
1212 tcg_gen_movi_i32(tmp2, val);
1213 if (logic_cc && UCOP_SH_IM) {
1214 gen_set_CF_bit31(tmp2);
1216 } else {
1217 /* register */
1218 tmp2 = load_reg(s, UCOP_REG_M);
1219 if (UCOP_SET(5)) {
1220 tmp = load_reg(s, UCOP_REG_S);
1221 gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
1222 } else {
1223 gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
1227 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1228 tmp = load_reg(s, UCOP_REG_N);
1229 } else {
1230 TCGV_UNUSED(tmp);
1233 switch (UCOP_OPCODES) {
1234 case 0x00:
1235 tcg_gen_and_i32(tmp, tmp, tmp2);
1236 if (logic_cc) {
1237 gen_logic_CC(tmp);
1239 store_reg_bx(s, UCOP_REG_D, tmp);
1240 break;
1241 case 0x01:
1242 tcg_gen_xor_i32(tmp, tmp, tmp2);
1243 if (logic_cc) {
1244 gen_logic_CC(tmp);
1246 store_reg_bx(s, UCOP_REG_D, tmp);
1247 break;
1248 case 0x02:
1249 if (UCOP_SET_S && UCOP_REG_D == 31) {
1250 /* SUBS r31, ... is used for exception return. */
1251 if (IS_USER(s)) {
1252 ILLEGAL;
1254 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1255 gen_exception_return(s, tmp);
1256 } else {
1257 if (UCOP_SET_S) {
1258 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1259 } else {
1260 tcg_gen_sub_i32(tmp, tmp, tmp2);
1262 store_reg_bx(s, UCOP_REG_D, tmp);
1264 break;
1265 case 0x03:
1266 if (UCOP_SET_S) {
1267 gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
1268 } else {
1269 tcg_gen_sub_i32(tmp, tmp2, tmp);
1271 store_reg_bx(s, UCOP_REG_D, tmp);
1272 break;
1273 case 0x04:
1274 if (UCOP_SET_S) {
1275 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1276 } else {
1277 tcg_gen_add_i32(tmp, tmp, tmp2);
1279 store_reg_bx(s, UCOP_REG_D, tmp);
1280 break;
1281 case 0x05:
1282 if (UCOP_SET_S) {
1283 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
1284 } else {
1285 gen_add_carry(tmp, tmp, tmp2);
1287 store_reg_bx(s, UCOP_REG_D, tmp);
1288 break;
1289 case 0x06:
1290 if (UCOP_SET_S) {
1291 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
1292 } else {
1293 gen_sub_carry(tmp, tmp, tmp2);
1295 store_reg_bx(s, UCOP_REG_D, tmp);
1296 break;
1297 case 0x07:
1298 if (UCOP_SET_S) {
1299 gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
1300 } else {
1301 gen_sub_carry(tmp, tmp2, tmp);
1303 store_reg_bx(s, UCOP_REG_D, tmp);
1304 break;
1305 case 0x08:
1306 if (UCOP_SET_S) {
1307 tcg_gen_and_i32(tmp, tmp, tmp2);
1308 gen_logic_CC(tmp);
1310 dead_tmp(tmp);
1311 break;
1312 case 0x09:
1313 if (UCOP_SET_S) {
1314 tcg_gen_xor_i32(tmp, tmp, tmp2);
1315 gen_logic_CC(tmp);
1317 dead_tmp(tmp);
1318 break;
1319 case 0x0a:
1320 if (UCOP_SET_S) {
1321 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1323 dead_tmp(tmp);
1324 break;
1325 case 0x0b:
1326 if (UCOP_SET_S) {
1327 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1329 dead_tmp(tmp);
1330 break;
1331 case 0x0c:
1332 tcg_gen_or_i32(tmp, tmp, tmp2);
1333 if (logic_cc) {
1334 gen_logic_CC(tmp);
1336 store_reg_bx(s, UCOP_REG_D, tmp);
1337 break;
1338 case 0x0d:
1339 if (logic_cc && UCOP_REG_D == 31) {
1340 /* MOVS r31, ... is used for exception return. */
1341 if (IS_USER(s)) {
1342 ILLEGAL;
1344 gen_exception_return(s, tmp2);
1345 } else {
1346 if (logic_cc) {
1347 gen_logic_CC(tmp2);
1349 store_reg_bx(s, UCOP_REG_D, tmp2);
1351 break;
1352 case 0x0e:
1353 tcg_gen_andc_i32(tmp, tmp, tmp2);
1354 if (logic_cc) {
1355 gen_logic_CC(tmp);
1357 store_reg_bx(s, UCOP_REG_D, tmp);
1358 break;
1359 default:
1360 case 0x0f:
1361 tcg_gen_not_i32(tmp2, tmp2);
1362 if (logic_cc) {
1363 gen_logic_CC(tmp2);
1365 store_reg_bx(s, UCOP_REG_D, tmp2);
1366 break;
1368 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1369 dead_tmp(tmp2);
1373 /* multiply */
1374 static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1376 TCGv tmp, tmp2, tmp3, tmp4;
1378 if (UCOP_SET(27)) {
1379 /* 64 bit mul */
1380 tmp = load_reg(s, UCOP_REG_M);
1381 tmp2 = load_reg(s, UCOP_REG_N);
1382 if (UCOP_SET(26)) {
1383 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
1384 } else {
1385 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
1387 if (UCOP_SET(25)) { /* mult accumulate */
1388 tmp3 = load_reg(s, UCOP_REG_LO);
1389 tmp4 = load_reg(s, UCOP_REG_HI);
1390 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4);
1391 dead_tmp(tmp3);
1392 dead_tmp(tmp4);
1394 store_reg(s, UCOP_REG_LO, tmp);
1395 store_reg(s, UCOP_REG_HI, tmp2);
1396 } else {
1397 /* 32 bit mul */
1398 tmp = load_reg(s, UCOP_REG_M);
1399 tmp2 = load_reg(s, UCOP_REG_N);
1400 tcg_gen_mul_i32(tmp, tmp, tmp2);
1401 dead_tmp(tmp2);
1402 if (UCOP_SET(25)) {
1403 /* Add */
1404 tmp2 = load_reg(s, UCOP_REG_S);
1405 tcg_gen_add_i32(tmp, tmp, tmp2);
1406 dead_tmp(tmp2);
1408 if (UCOP_SET_S) {
1409 gen_logic_CC(tmp);
1411 store_reg(s, UCOP_REG_D, tmp);
1415 /* miscellaneous instructions */
1416 static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1418 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1419 unsigned int val;
1420 TCGv tmp;
1422 if ((insn & 0xffffffe0) == 0x10ffc120) {
1423 /* Trivial implementation equivalent to bx. */
1424 tmp = load_reg(s, UCOP_REG_M);
1425 gen_bx(s, tmp);
1426 return;
1429 if ((insn & 0xfbffc000) == 0x30ffc000) {
1430 /* PSR = immediate */
1431 val = UCOP_IMM_9;
1432 if (UCOP_SH_IM) {
1433 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1435 tmp = new_tmp();
1436 tcg_gen_movi_i32(tmp, val);
1437 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1438 ILLEGAL;
1440 return;
1443 if ((insn & 0xfbffffe0) == 0x12ffc020) {
1444 /* PSR.flag = reg */
1445 tmp = load_reg(s, UCOP_REG_M);
1446 if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
1447 ILLEGAL;
1449 return;
1452 if ((insn & 0xfbffffe0) == 0x10ffc020) {
1453 /* PSR = reg */
1454 tmp = load_reg(s, UCOP_REG_M);
1455 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1456 ILLEGAL;
1458 return;
1461 if ((insn & 0xfbf83fff) == 0x10f80000) {
1462 /* reg = PSR */
1463 if (UCOP_SET_B) {
1464 if (IS_USER(s)) {
1465 ILLEGAL;
1467 tmp = load_cpu_field(bsr);
1468 } else {
1469 tmp = new_tmp();
1470 gen_helper_asr_read(tmp, cpu_env);
1472 store_reg(s, UCOP_REG_D, tmp);
1473 return;
1476 if ((insn & 0xfbf83fe0) == 0x12f80120) {
1477 /* clz */
1478 tmp = load_reg(s, UCOP_REG_M);
1479 if (UCOP_SET(26)) {
1480 gen_helper_clo(tmp, tmp);
1481 } else {
1482 gen_helper_clz(tmp, tmp);
1484 store_reg(s, UCOP_REG_D, tmp);
1485 return;
1488 /* otherwise */
1489 ILLEGAL;
1492 /* load/store I_offset and R_offset */
1493 static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1495 unsigned int mmu_idx;
1496 TCGv tmp;
1497 TCGv tmp2;
1499 tmp2 = load_reg(s, UCOP_REG_N);
1500 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1502 /* immediate */
1503 if (UCOP_SET_P) {
1504 gen_add_data_offset(s, insn, tmp2);
1507 if (UCOP_SET_L) {
1508 /* load */
1509 if (UCOP_SET_B) {
1510 tmp = gen_ld8u(tmp2, mmu_idx);
1511 } else {
1512 tmp = gen_ld32(tmp2, mmu_idx);
1514 } else {
1515 /* store */
1516 tmp = load_reg(s, UCOP_REG_D);
1517 if (UCOP_SET_B) {
1518 gen_st8(tmp, tmp2, mmu_idx);
1519 } else {
1520 gen_st32(tmp, tmp2, mmu_idx);
1523 if (!UCOP_SET_P) {
1524 gen_add_data_offset(s, insn, tmp2);
1525 store_reg(s, UCOP_REG_N, tmp2);
1526 } else if (UCOP_SET_W) {
1527 store_reg(s, UCOP_REG_N, tmp2);
1528 } else {
1529 dead_tmp(tmp2);
1531 if (UCOP_SET_L) {
1532 /* Complete the load. */
1533 if (UCOP_REG_D == 31) {
1534 gen_bx(s, tmp);
1535 } else {
1536 store_reg(s, UCOP_REG_D, tmp);
1541 /* SWP instruction */
1542 static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1544 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1545 TCGv addr;
1546 TCGv tmp;
1547 TCGv tmp2;
1549 if ((insn & 0xff003fe0) != 0x40000120) {
1550 ILLEGAL;
1553 /* ??? This is not really atomic. However we know
1554 we never have multiple CPUs running in parallel,
1555 so it is good enough. */
1556 addr = load_reg(s, UCOP_REG_N);
1557 tmp = load_reg(s, UCOP_REG_M);
1558 if (UCOP_SET_B) {
1559 tmp2 = gen_ld8u(addr, IS_USER(s));
1560 gen_st8(tmp, addr, IS_USER(s));
1561 } else {
1562 tmp2 = gen_ld32(addr, IS_USER(s));
1563 gen_st32(tmp, addr, IS_USER(s));
1565 dead_tmp(addr);
1566 store_reg(s, UCOP_REG_D, tmp2);
1569 /* load/store hw/sb */
1570 static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1572 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1573 TCGv addr;
1574 TCGv tmp;
1576 if (UCOP_SH_OP == 0) {
1577 do_swap(env, s, insn);
1578 return;
1581 addr = load_reg(s, UCOP_REG_N);
1582 if (UCOP_SET_P) {
1583 gen_add_datah_offset(s, insn, addr);
1586 if (UCOP_SET_L) { /* load */
1587 switch (UCOP_SH_OP) {
1588 case 1:
1589 tmp = gen_ld16u(addr, IS_USER(s));
1590 break;
1591 case 2:
1592 tmp = gen_ld8s(addr, IS_USER(s));
1593 break;
1594 default: /* see do_swap */
1595 case 3:
1596 tmp = gen_ld16s(addr, IS_USER(s));
1597 break;
1599 } else { /* store */
1600 if (UCOP_SH_OP != 1) {
1601 ILLEGAL;
1603 tmp = load_reg(s, UCOP_REG_D);
1604 gen_st16(tmp, addr, IS_USER(s));
1606 /* Perform base writeback before the loaded value to
1607 ensure correct behavior with overlapping index registers. */
1608 if (!UCOP_SET_P) {
1609 gen_add_datah_offset(s, insn, addr);
1610 store_reg(s, UCOP_REG_N, addr);
1611 } else if (UCOP_SET_W) {
1612 store_reg(s, UCOP_REG_N, addr);
1613 } else {
1614 dead_tmp(addr);
1616 if (UCOP_SET_L) {
1617 /* Complete the load. */
1618 store_reg(s, UCOP_REG_D, tmp);
1622 /* load/store multiple words */
1623 static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1625 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1626 unsigned int val, i, mmu_idx;
1627 int j, n, reg, user, loaded_base;
1628 TCGv tmp;
1629 TCGv tmp2;
1630 TCGv addr;
1631 TCGv loaded_var;
1633 if (UCOP_SET(7)) {
1634 ILLEGAL;
1636 /* XXX: store correct base if write back */
1637 user = 0;
1638 if (UCOP_SET_B) { /* S bit in instruction table */
1639 if (IS_USER(s)) {
1640 ILLEGAL; /* only usable in supervisor mode */
1642 if (UCOP_SET(18) == 0) { /* pc reg */
1643 user = 1;
1647 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1648 addr = load_reg(s, UCOP_REG_N);
1650 /* compute total size */
1651 loaded_base = 0;
1652 TCGV_UNUSED(loaded_var);
1653 n = 0;
1654 for (i = 0; i < 6; i++) {
1655 if (UCOP_SET(i)) {
1656 n++;
1659 for (i = 9; i < 19; i++) {
1660 if (UCOP_SET(i)) {
1661 n++;
1664 /* XXX: test invalid n == 0 case ? */
1665 if (UCOP_SET_U) {
1666 if (UCOP_SET_P) {
1667 /* pre increment */
1668 tcg_gen_addi_i32(addr, addr, 4);
1669 } else {
1670 /* post increment */
1672 } else {
1673 if (UCOP_SET_P) {
1674 /* pre decrement */
1675 tcg_gen_addi_i32(addr, addr, -(n * 4));
1676 } else {
1677 /* post decrement */
1678 if (n != 1) {
1679 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1684 j = 0;
1685 reg = UCOP_SET(6) ? 16 : 0;
1686 for (i = 0; i < 19; i++, reg++) {
1687 if (i == 6) {
1688 i = i + 3;
1690 if (UCOP_SET(i)) {
1691 if (UCOP_SET_L) { /* load */
1692 tmp = gen_ld32(addr, mmu_idx);
1693 if (reg == 31) {
1694 gen_bx(s, tmp);
1695 } else if (user) {
1696 tmp2 = tcg_const_i32(reg);
1697 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
1698 tcg_temp_free_i32(tmp2);
1699 dead_tmp(tmp);
1700 } else if (reg == UCOP_REG_N) {
1701 loaded_var = tmp;
1702 loaded_base = 1;
1703 } else {
1704 store_reg(s, reg, tmp);
1706 } else { /* store */
1707 if (reg == 31) {
1708 /* special case: r31 = PC + 4 */
1709 val = (long)s->pc;
1710 tmp = new_tmp();
1711 tcg_gen_movi_i32(tmp, val);
1712 } else if (user) {
1713 tmp = new_tmp();
1714 tmp2 = tcg_const_i32(reg);
1715 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
1716 tcg_temp_free_i32(tmp2);
1717 } else {
1718 tmp = load_reg(s, reg);
1720 gen_st32(tmp, addr, mmu_idx);
1722 j++;
1723 /* no need to add after the last transfer */
1724 if (j != n) {
1725 tcg_gen_addi_i32(addr, addr, 4);
1729 if (UCOP_SET_W) { /* write back */
1730 if (UCOP_SET_U) {
1731 if (UCOP_SET_P) {
1732 /* pre increment */
1733 } else {
1734 /* post increment */
1735 tcg_gen_addi_i32(addr, addr, 4);
1737 } else {
1738 if (UCOP_SET_P) {
1739 /* pre decrement */
1740 if (n != 1) {
1741 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1743 } else {
1744 /* post decrement */
1745 tcg_gen_addi_i32(addr, addr, -(n * 4));
1748 store_reg(s, UCOP_REG_N, addr);
1749 } else {
1750 dead_tmp(addr);
1752 if (loaded_base) {
1753 store_reg(s, UCOP_REG_N, loaded_var);
1755 if (UCOP_SET_B && !user) {
1756 /* Restore ASR from BSR. */
1757 tmp = load_cpu_field(bsr);
1758 gen_set_asr(tmp, 0xffffffff);
1759 dead_tmp(tmp);
1760 s->is_jmp = DISAS_UPDATE;
1764 /* branch (and link) */
1765 static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1767 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1768 unsigned int val;
1769 int32_t offset;
1770 TCGv tmp;
1772 if (UCOP_COND == 0xf) {
1773 ILLEGAL;
1776 if (UCOP_COND != 0xe) {
1777 /* if not always execute, we generate a conditional jump to
1778 next instruction */
1779 s->condlabel = gen_new_label();
1780 gen_test_cc(UCOP_COND ^ 1, s->condlabel);
1781 s->condjmp = 1;
1784 val = (int32_t)s->pc;
1785 if (UCOP_SET_L) {
1786 tmp = new_tmp();
1787 tcg_gen_movi_i32(tmp, val);
1788 store_reg(s, 30, tmp);
1790 offset = (((int32_t)insn << 8) >> 8);
1791 val += (offset << 2); /* unicore is pc+4 */
1792 gen_jmp(s, val);
1795 static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
1797 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1798 unsigned int insn;
1800 insn = cpu_ldl_code(env, s->pc);
1801 s->pc += 4;
1803 /* UniCore instructions class:
1804 * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1805 * AAA : see switch case
1806 * BBBB : opcodes or cond or PUBW
1807 * C : S OR L
1808 * D : 8
1809 * E : 5
1811 switch (insn >> 29) {
1812 case 0x0:
1813 if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1814 do_mult(env, s, insn);
1815 break;
1818 if (UCOP_SET(8)) {
1819 do_misc(env, s, insn);
1820 break;
1822 case 0x1:
1823 if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
1824 do_misc(env, s, insn);
1825 break;
1827 do_datap(env, s, insn);
1828 break;
1830 case 0x2:
1831 if (UCOP_SET(8) && UCOP_SET(5)) {
1832 do_ldst_hwsb(env, s, insn);
1833 break;
1835 if (UCOP_SET(8) || UCOP_SET(5)) {
1836 ILLEGAL;
1838 case 0x3:
1839 do_ldst_ir(env, s, insn);
1840 break;
1842 case 0x4:
1843 if (UCOP_SET(8)) {
1844 ILLEGAL; /* extended instructions */
1846 do_ldst_m(env, s, insn);
1847 break;
1848 case 0x5:
1849 do_branch(env, s, insn);
1850 break;
1851 case 0x6:
1852 /* Coprocessor. */
1853 disas_coproc_insn(env, s, insn);
1854 break;
1855 case 0x7:
1856 if (!UCOP_SET(28)) {
1857 disas_coproc_insn(env, s, insn);
1858 break;
1860 if ((insn & 0xff000000) == 0xff000000) { /* syscall */
1861 gen_set_pc_im(s->pc);
1862 s->is_jmp = DISAS_SYSCALL;
1863 break;
1865 ILLEGAL;
1869 /* generate intermediate code for basic block 'tb'. */
1870 void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
1872 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1873 CPUState *cs = CPU(cpu);
1874 DisasContext dc1, *dc = &dc1;
1875 target_ulong pc_start;
1876 uint32_t next_page_start;
1877 int num_insns;
1878 int max_insns;
1880 /* generate intermediate code */
1881 num_temps = 0;
1883 pc_start = tb->pc;
1885 dc->tb = tb;
1887 dc->is_jmp = DISAS_NEXT;
1888 dc->pc = pc_start;
1889 dc->singlestep_enabled = cs->singlestep_enabled;
1890 dc->condjmp = 0;
1891 cpu_F0s = tcg_temp_new_i32();
1892 cpu_F1s = tcg_temp_new_i32();
1893 cpu_F0d = tcg_temp_new_i64();
1894 cpu_F1d = tcg_temp_new_i64();
1895 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1896 num_insns = 0;
1897 max_insns = tb->cflags & CF_COUNT_MASK;
1898 if (max_insns == 0) {
1899 max_insns = CF_COUNT_MASK;
1901 if (max_insns > TCG_MAX_INSNS) {
1902 max_insns = TCG_MAX_INSNS;
1905 #ifndef CONFIG_USER_ONLY
1906 if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) {
1907 dc->user = 1;
1908 } else {
1909 dc->user = 0;
1911 #endif
1913 gen_tb_start(tb);
1914 do {
1915 tcg_gen_insn_start(dc->pc);
1916 num_insns++;
1918 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1919 gen_set_pc_im(dc->pc);
1920 gen_exception(EXCP_DEBUG);
1921 dc->is_jmp = DISAS_JUMP;
1922 /* The address covered by the breakpoint must be included in
1923 [tb->pc, tb->pc + tb->size) in order to for it to be
1924 properly cleared -- thus we increment the PC here so that
1925 the logic setting tb->size below does the right thing. */
1926 dc->pc += 4;
1927 goto done_generating;
1930 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1931 gen_io_start();
1934 disas_uc32_insn(env, dc);
1936 if (num_temps) {
1937 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
1938 num_temps = 0;
1941 if (dc->condjmp && !dc->is_jmp) {
1942 gen_set_label(dc->condlabel);
1943 dc->condjmp = 0;
1945 /* Translation stops when a conditional branch is encountered.
1946 * Otherwise the subsequent code could get translated several times.
1947 * Also stop translation when a page boundary is reached. This
1948 * ensures prefetch aborts occur at the right place. */
1949 } while (!dc->is_jmp && !tcg_op_buf_full() &&
1950 !cs->singlestep_enabled &&
1951 !singlestep &&
1952 dc->pc < next_page_start &&
1953 num_insns < max_insns);
1955 if (tb->cflags & CF_LAST_IO) {
1956 if (dc->condjmp) {
1957 /* FIXME: This can theoretically happen with self-modifying
1958 code. */
1959 cpu_abort(cs, "IO on conditional branch instruction");
1961 gen_io_end();
1964 /* At this stage dc->condjmp will only be set when the skipped
1965 instruction was a conditional branch or trap, and the PC has
1966 already been written. */
1967 if (unlikely(cs->singlestep_enabled)) {
1968 /* Make sure the pc is updated, and raise a debug exception. */
1969 if (dc->condjmp) {
1970 if (dc->is_jmp == DISAS_SYSCALL) {
1971 gen_exception(UC32_EXCP_PRIV);
1972 } else {
1973 gen_exception(EXCP_DEBUG);
1975 gen_set_label(dc->condlabel);
1977 if (dc->condjmp || !dc->is_jmp) {
1978 gen_set_pc_im(dc->pc);
1979 dc->condjmp = 0;
1981 if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
1982 gen_exception(UC32_EXCP_PRIV);
1983 } else {
1984 gen_exception(EXCP_DEBUG);
1986 } else {
1987 /* While branches must always occur at the end of an IT block,
1988 there are a few other things that can cause us to terminate
1989 the TB in the middel of an IT block:
1990 - Exception generating instructions (bkpt, swi, undefined).
1991 - Page boundaries.
1992 - Hardware watchpoints.
1993 Hardware breakpoints have already been handled and skip this code.
1995 switch (dc->is_jmp) {
1996 case DISAS_NEXT:
1997 gen_goto_tb(dc, 1, dc->pc);
1998 break;
1999 default:
2000 case DISAS_JUMP:
2001 case DISAS_UPDATE:
2002 /* indicate that the hash table must be used to find the next TB */
2003 tcg_gen_exit_tb(0);
2004 break;
2005 case DISAS_TB_JUMP:
2006 /* nothing more to generate */
2007 break;
2008 case DISAS_SYSCALL:
2009 gen_exception(UC32_EXCP_PRIV);
2010 break;
2012 if (dc->condjmp) {
2013 gen_set_label(dc->condlabel);
2014 gen_goto_tb(dc, 1, dc->pc);
2015 dc->condjmp = 0;
2019 done_generating:
2020 gen_tb_end(tb, num_insns);
2022 #ifdef DEBUG_DISAS
2023 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2024 qemu_log("----------------\n");
2025 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2026 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
2027 qemu_log("\n");
2029 #endif
2030 tb->size = dc->pc - pc_start;
2031 tb->icount = num_insns;
2034 static const char *cpu_mode_names[16] = {
2035 "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2036 "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2039 #undef UCF64_DUMP_STATE
2040 #ifdef UCF64_DUMP_STATE
2041 static void cpu_dump_state_ucf64(CPUUniCore32State *env, FILE *f,
2042 fprintf_function cpu_fprintf, int flags)
2044 int i;
2045 union {
2046 uint32_t i;
2047 float s;
2048 } s0, s1;
2049 CPU_DoubleU d;
2050 /* ??? This assumes float64 and double have the same layout.
2051 Oh well, it's only debug dumps. */
2052 union {
2053 float64 f64;
2054 double d;
2055 } d0;
2057 for (i = 0; i < 16; i++) {
2058 d.d = env->ucf64.regs[i];
2059 s0.i = d.l.lower;
2060 s1.i = d.l.upper;
2061 d0.f64 = d.d;
2062 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g)",
2063 i * 2, (int)s0.i, s0.s,
2064 i * 2 + 1, (int)s1.i, s1.s);
2065 cpu_fprintf(f, " d%02d=%" PRIx64 "(%8g)\n",
2066 i, (uint64_t)d0.f64, d0.d);
2068 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
2070 #else
2071 #define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0)
2072 #endif
2074 void uc32_cpu_dump_state(CPUState *cs, FILE *f,
2075 fprintf_function cpu_fprintf, int flags)
2077 UniCore32CPU *cpu = UNICORE32_CPU(cs);
2078 CPUUniCore32State *env = &cpu->env;
2079 int i;
2080 uint32_t psr;
2082 for (i = 0; i < 32; i++) {
2083 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2084 if ((i % 4) == 3) {
2085 cpu_fprintf(f, "\n");
2086 } else {
2087 cpu_fprintf(f, " ");
2090 psr = cpu_asr_read(env);
2091 cpu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
2092 psr,
2093 psr & (1 << 31) ? 'N' : '-',
2094 psr & (1 << 30) ? 'Z' : '-',
2095 psr & (1 << 29) ? 'C' : '-',
2096 psr & (1 << 28) ? 'V' : '-',
2097 cpu_mode_names[psr & 0xf]);
2099 cpu_dump_state_ucf64(env, f, cpu_fprintf, flags);
2102 void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb,
2103 target_ulong *data)
2105 env->regs[31] = data[0];