target-sparc: Use defines from asi.h
[qemu/ar7.git] / target-unicore32 / translate.c
blob09354f92d24a01bbde16f6bb781330604d9f0a4e
1 /*
2 * UniCore32 translation
4 * Copyright (C) 2010-2012 Guan Xuetao
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation, or (at your option) any
9 * later version. See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "cpu.h"
14 #include "disas/disas.h"
15 #include "exec/exec-all.h"
16 #include "tcg-op.h"
17 #include "qemu/log.h"
18 #include "exec/cpu_ldst.h"
20 #include "exec/helper-proto.h"
21 #include "exec/helper-gen.h"
23 #include "trace-tcg.h"
24 #include "exec/log.h"
27 /* internal defines */
28 typedef struct DisasContext {
29 target_ulong pc;
30 int is_jmp;
31 /* Nonzero if this instruction has been conditionally skipped. */
32 int condjmp;
33 /* The label that will be jumped to when the instruction is skipped. */
34 TCGLabel *condlabel;
35 struct TranslationBlock *tb;
36 int singlestep_enabled;
37 #ifndef CONFIG_USER_ONLY
38 int user;
39 #endif
40 } DisasContext;
42 #ifndef CONFIG_USER_ONLY
43 #define IS_USER(s) (s->user)
44 #else
45 #define IS_USER(s) 1
46 #endif
48 /* These instructions trap after executing, so defer them until after the
49 conditional executions state has been updated. */
50 #define DISAS_SYSCALL 5
52 static TCGv_env cpu_env;
53 static TCGv_i32 cpu_R[32];
55 /* FIXME: These should be removed. */
56 static TCGv cpu_F0s, cpu_F1s;
57 static TCGv_i64 cpu_F0d, cpu_F1d;
59 #include "exec/gen-icount.h"
61 static const char *regnames[] = {
62 "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
63 "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
64 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
65 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
67 /* initialize TCG globals. */
68 void uc32_translate_init(void)
70 int i;
72 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
73 tcg_ctx.tcg_env = cpu_env;
75 for (i = 0; i < 32; i++) {
76 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
77 offsetof(CPUUniCore32State, regs[i]), regnames[i]);
81 static int num_temps;
83 /* Allocate a temporary variable. */
84 static TCGv_i32 new_tmp(void)
86 num_temps++;
87 return tcg_temp_new_i32();
90 /* Release a temporary variable. */
91 static void dead_tmp(TCGv tmp)
93 tcg_temp_free(tmp);
94 num_temps--;
97 static inline TCGv load_cpu_offset(int offset)
99 TCGv tmp = new_tmp();
100 tcg_gen_ld_i32(tmp, cpu_env, offset);
101 return tmp;
104 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
106 static inline void store_cpu_offset(TCGv var, int offset)
108 tcg_gen_st_i32(var, cpu_env, offset);
109 dead_tmp(var);
112 #define store_cpu_field(var, name) \
113 store_cpu_offset(var, offsetof(CPUUniCore32State, name))
115 /* Set a variable to the value of a CPU register. */
116 static void load_reg_var(DisasContext *s, TCGv var, int reg)
118 if (reg == 31) {
119 uint32_t addr;
120 /* normaly, since we updated PC */
121 addr = (long)s->pc;
122 tcg_gen_movi_i32(var, addr);
123 } else {
124 tcg_gen_mov_i32(var, cpu_R[reg]);
128 /* Create a new temporary and set it to the value of a CPU register. */
129 static inline TCGv load_reg(DisasContext *s, int reg)
131 TCGv tmp = new_tmp();
132 load_reg_var(s, tmp, reg);
133 return tmp;
136 /* Set a CPU register. The source must be a temporary and will be
137 marked as dead. */
138 static void store_reg(DisasContext *s, int reg, TCGv var)
140 if (reg == 31) {
141 tcg_gen_andi_i32(var, var, ~3);
142 s->is_jmp = DISAS_JUMP;
144 tcg_gen_mov_i32(cpu_R[reg], var);
145 dead_tmp(var);
148 /* Value extensions. */
149 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
150 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
151 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
152 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
154 #define UCOP_REG_M (((insn) >> 0) & 0x1f)
155 #define UCOP_REG_N (((insn) >> 19) & 0x1f)
156 #define UCOP_REG_D (((insn) >> 14) & 0x1f)
157 #define UCOP_REG_S (((insn) >> 9) & 0x1f)
158 #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
159 #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
160 #define UCOP_SH_OP (((insn) >> 6) & 0x03)
161 #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
162 #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
163 #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
164 #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
165 #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
166 #define UCOP_COND (((insn) >> 25) & 0x0f)
167 #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
168 #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
169 #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
170 #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
171 #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
173 #define UCOP_SET(i) ((insn) & (1 << (i)))
174 #define UCOP_SET_P UCOP_SET(28)
175 #define UCOP_SET_U UCOP_SET(27)
176 #define UCOP_SET_B UCOP_SET(26)
177 #define UCOP_SET_W UCOP_SET(25)
178 #define UCOP_SET_L UCOP_SET(24)
179 #define UCOP_SET_S UCOP_SET(24)
181 #define ILLEGAL cpu_abort(CPU(cpu), \
182 "Illegal UniCore32 instruction %x at line %d!", \
183 insn, __LINE__)
185 #ifndef CONFIG_USER_ONLY
186 static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s,
187 uint32_t insn)
189 UniCore32CPU *cpu = uc32_env_get_cpu(env);
190 TCGv tmp, tmp2, tmp3;
191 if ((insn & 0xfe000000) == 0xe0000000) {
192 tmp2 = new_tmp();
193 tmp3 = new_tmp();
194 tcg_gen_movi_i32(tmp2, UCOP_REG_N);
195 tcg_gen_movi_i32(tmp3, UCOP_IMM10);
196 if (UCOP_SET_L) {
197 tmp = new_tmp();
198 gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3);
199 store_reg(s, UCOP_REG_D, tmp);
200 } else {
201 tmp = load_reg(s, UCOP_REG_D);
202 gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3);
203 dead_tmp(tmp);
205 dead_tmp(tmp2);
206 dead_tmp(tmp3);
207 return;
209 ILLEGAL;
212 static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s,
213 uint32_t insn)
215 UniCore32CPU *cpu = uc32_env_get_cpu(env);
216 TCGv tmp;
218 if ((insn & 0xff003fff) == 0xe1000400) {
220 * movc rd, pp.nn, #imm9
221 * rd: UCOP_REG_D
222 * nn: UCOP_REG_N (must be 0)
223 * imm9: 0
225 if (UCOP_REG_N == 0) {
226 tmp = new_tmp();
227 tcg_gen_movi_i32(tmp, 0);
228 store_reg(s, UCOP_REG_D, tmp);
229 return;
230 } else {
231 ILLEGAL;
234 if ((insn & 0xff003fff) == 0xe0000401) {
236 * movc pp.nn, rn, #imm9
237 * rn: UCOP_REG_D
238 * nn: UCOP_REG_N (must be 1)
239 * imm9: 1
241 if (UCOP_REG_N == 1) {
242 tmp = load_reg(s, UCOP_REG_D);
243 gen_helper_cp1_putc(tmp);
244 dead_tmp(tmp);
245 return;
246 } else {
247 ILLEGAL;
250 ILLEGAL;
252 #endif
254 static inline void gen_set_asr(TCGv var, uint32_t mask)
256 TCGv tmp_mask = tcg_const_i32(mask);
257 gen_helper_asr_write(cpu_env, var, tmp_mask);
258 tcg_temp_free_i32(tmp_mask);
260 /* Set NZCV flags from the high 4 bits of var. */
261 #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
263 static void gen_exception(int excp)
265 TCGv tmp = new_tmp();
266 tcg_gen_movi_i32(tmp, excp);
267 gen_helper_exception(cpu_env, tmp);
268 dead_tmp(tmp);
271 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
273 /* Set CF to the top bit of var. */
274 static void gen_set_CF_bit31(TCGv var)
276 TCGv tmp = new_tmp();
277 tcg_gen_shri_i32(tmp, var, 31);
278 gen_set_CF(tmp);
279 dead_tmp(tmp);
282 /* Set N and Z flags from var. */
283 static inline void gen_logic_CC(TCGv var)
285 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF));
286 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF));
289 /* dest = T0 + T1 + CF. */
290 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
292 TCGv tmp;
293 tcg_gen_add_i32(dest, t0, t1);
294 tmp = load_cpu_field(CF);
295 tcg_gen_add_i32(dest, dest, tmp);
296 dead_tmp(tmp);
299 /* dest = T0 - T1 + CF - 1. */
300 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
302 TCGv tmp;
303 tcg_gen_sub_i32(dest, t0, t1);
304 tmp = load_cpu_field(CF);
305 tcg_gen_add_i32(dest, dest, tmp);
306 tcg_gen_subi_i32(dest, dest, 1);
307 dead_tmp(tmp);
310 static void shifter_out_im(TCGv var, int shift)
312 TCGv tmp = new_tmp();
313 if (shift == 0) {
314 tcg_gen_andi_i32(tmp, var, 1);
315 } else {
316 tcg_gen_shri_i32(tmp, var, shift);
317 if (shift != 31) {
318 tcg_gen_andi_i32(tmp, tmp, 1);
321 gen_set_CF(tmp);
322 dead_tmp(tmp);
325 /* Shift by immediate. Includes special handling for shift == 0. */
326 static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
327 int flags)
329 switch (shiftop) {
330 case 0: /* LSL */
331 if (shift != 0) {
332 if (flags) {
333 shifter_out_im(var, 32 - shift);
335 tcg_gen_shli_i32(var, var, shift);
337 break;
338 case 1: /* LSR */
339 if (shift == 0) {
340 if (flags) {
341 tcg_gen_shri_i32(var, var, 31);
342 gen_set_CF(var);
344 tcg_gen_movi_i32(var, 0);
345 } else {
346 if (flags) {
347 shifter_out_im(var, shift - 1);
349 tcg_gen_shri_i32(var, var, shift);
351 break;
352 case 2: /* ASR */
353 if (shift == 0) {
354 shift = 32;
356 if (flags) {
357 shifter_out_im(var, shift - 1);
359 if (shift == 32) {
360 shift = 31;
362 tcg_gen_sari_i32(var, var, shift);
363 break;
364 case 3: /* ROR/RRX */
365 if (shift != 0) {
366 if (flags) {
367 shifter_out_im(var, shift - 1);
369 tcg_gen_rotri_i32(var, var, shift); break;
370 } else {
371 TCGv tmp = load_cpu_field(CF);
372 if (flags) {
373 shifter_out_im(var, 0);
375 tcg_gen_shri_i32(var, var, 1);
376 tcg_gen_shli_i32(tmp, tmp, 31);
377 tcg_gen_or_i32(var, var, tmp);
378 dead_tmp(tmp);
383 static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
384 TCGv shift, int flags)
386 if (flags) {
387 switch (shiftop) {
388 case 0:
389 gen_helper_shl_cc(var, cpu_env, var, shift);
390 break;
391 case 1:
392 gen_helper_shr_cc(var, cpu_env, var, shift);
393 break;
394 case 2:
395 gen_helper_sar_cc(var, cpu_env, var, shift);
396 break;
397 case 3:
398 gen_helper_ror_cc(var, cpu_env, var, shift);
399 break;
401 } else {
402 switch (shiftop) {
403 case 0:
404 gen_helper_shl(var, var, shift);
405 break;
406 case 1:
407 gen_helper_shr(var, var, shift);
408 break;
409 case 2:
410 gen_helper_sar(var, var, shift);
411 break;
412 case 3:
413 tcg_gen_andi_i32(shift, shift, 0x1f);
414 tcg_gen_rotr_i32(var, var, shift);
415 break;
418 dead_tmp(shift);
421 static void gen_test_cc(int cc, TCGLabel *label)
423 TCGv tmp;
424 TCGv tmp2;
425 TCGLabel *inv;
427 switch (cc) {
428 case 0: /* eq: Z */
429 tmp = load_cpu_field(ZF);
430 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
431 break;
432 case 1: /* ne: !Z */
433 tmp = load_cpu_field(ZF);
434 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
435 break;
436 case 2: /* cs: C */
437 tmp = load_cpu_field(CF);
438 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
439 break;
440 case 3: /* cc: !C */
441 tmp = load_cpu_field(CF);
442 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
443 break;
444 case 4: /* mi: N */
445 tmp = load_cpu_field(NF);
446 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
447 break;
448 case 5: /* pl: !N */
449 tmp = load_cpu_field(NF);
450 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
451 break;
452 case 6: /* vs: V */
453 tmp = load_cpu_field(VF);
454 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
455 break;
456 case 7: /* vc: !V */
457 tmp = load_cpu_field(VF);
458 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
459 break;
460 case 8: /* hi: C && !Z */
461 inv = gen_new_label();
462 tmp = load_cpu_field(CF);
463 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
464 dead_tmp(tmp);
465 tmp = load_cpu_field(ZF);
466 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
467 gen_set_label(inv);
468 break;
469 case 9: /* ls: !C || Z */
470 tmp = load_cpu_field(CF);
471 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
472 dead_tmp(tmp);
473 tmp = load_cpu_field(ZF);
474 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
475 break;
476 case 10: /* ge: N == V -> N ^ V == 0 */
477 tmp = load_cpu_field(VF);
478 tmp2 = load_cpu_field(NF);
479 tcg_gen_xor_i32(tmp, tmp, tmp2);
480 dead_tmp(tmp2);
481 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
482 break;
483 case 11: /* lt: N != V -> N ^ V != 0 */
484 tmp = load_cpu_field(VF);
485 tmp2 = load_cpu_field(NF);
486 tcg_gen_xor_i32(tmp, tmp, tmp2);
487 dead_tmp(tmp2);
488 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
489 break;
490 case 12: /* gt: !Z && N == V */
491 inv = gen_new_label();
492 tmp = load_cpu_field(ZF);
493 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
494 dead_tmp(tmp);
495 tmp = load_cpu_field(VF);
496 tmp2 = load_cpu_field(NF);
497 tcg_gen_xor_i32(tmp, tmp, tmp2);
498 dead_tmp(tmp2);
499 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
500 gen_set_label(inv);
501 break;
502 case 13: /* le: Z || N != V */
503 tmp = load_cpu_field(ZF);
504 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
505 dead_tmp(tmp);
506 tmp = load_cpu_field(VF);
507 tmp2 = load_cpu_field(NF);
508 tcg_gen_xor_i32(tmp, tmp, tmp2);
509 dead_tmp(tmp2);
510 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
511 break;
512 default:
513 fprintf(stderr, "Bad condition code 0x%x\n", cc);
514 abort();
516 dead_tmp(tmp);
519 static const uint8_t table_logic_cc[16] = {
520 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
521 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
522 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
523 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
526 /* Set PC state from an immediate address. */
527 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
529 s->is_jmp = DISAS_UPDATE;
530 tcg_gen_movi_i32(cpu_R[31], addr & ~3);
533 /* Set PC state from var. var is marked as dead. */
534 static inline void gen_bx(DisasContext *s, TCGv var)
536 s->is_jmp = DISAS_UPDATE;
537 tcg_gen_andi_i32(cpu_R[31], var, ~3);
538 dead_tmp(var);
541 static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
543 store_reg(s, reg, var);
546 static inline TCGv gen_ld8s(TCGv addr, int index)
548 TCGv tmp = new_tmp();
549 tcg_gen_qemu_ld8s(tmp, addr, index);
550 return tmp;
553 static inline TCGv gen_ld8u(TCGv addr, int index)
555 TCGv tmp = new_tmp();
556 tcg_gen_qemu_ld8u(tmp, addr, index);
557 return tmp;
560 static inline TCGv gen_ld16s(TCGv addr, int index)
562 TCGv tmp = new_tmp();
563 tcg_gen_qemu_ld16s(tmp, addr, index);
564 return tmp;
567 static inline TCGv gen_ld16u(TCGv addr, int index)
569 TCGv tmp = new_tmp();
570 tcg_gen_qemu_ld16u(tmp, addr, index);
571 return tmp;
574 static inline TCGv gen_ld32(TCGv addr, int index)
576 TCGv tmp = new_tmp();
577 tcg_gen_qemu_ld32u(tmp, addr, index);
578 return tmp;
581 static inline void gen_st8(TCGv val, TCGv addr, int index)
583 tcg_gen_qemu_st8(val, addr, index);
584 dead_tmp(val);
587 static inline void gen_st16(TCGv val, TCGv addr, int index)
589 tcg_gen_qemu_st16(val, addr, index);
590 dead_tmp(val);
593 static inline void gen_st32(TCGv val, TCGv addr, int index)
595 tcg_gen_qemu_st32(val, addr, index);
596 dead_tmp(val);
599 static inline void gen_set_pc_im(uint32_t val)
601 tcg_gen_movi_i32(cpu_R[31], val);
604 /* Force a TB lookup after an instruction that changes the CPU state. */
605 static inline void gen_lookup_tb(DisasContext *s)
607 tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
608 s->is_jmp = DISAS_UPDATE;
611 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
612 TCGv var)
614 int val;
615 TCGv offset;
617 if (UCOP_SET(29)) {
618 /* immediate */
619 val = UCOP_IMM14;
620 if (!UCOP_SET_U) {
621 val = -val;
623 if (val != 0) {
624 tcg_gen_addi_i32(var, var, val);
626 } else {
627 /* shift/register */
628 offset = load_reg(s, UCOP_REG_M);
629 gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
630 if (!UCOP_SET_U) {
631 tcg_gen_sub_i32(var, var, offset);
632 } else {
633 tcg_gen_add_i32(var, var, offset);
635 dead_tmp(offset);
639 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
640 TCGv var)
642 int val;
643 TCGv offset;
645 if (UCOP_SET(26)) {
646 /* immediate */
647 val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
648 if (!UCOP_SET_U) {
649 val = -val;
651 if (val != 0) {
652 tcg_gen_addi_i32(var, var, val);
654 } else {
655 /* register */
656 offset = load_reg(s, UCOP_REG_M);
657 if (!UCOP_SET_U) {
658 tcg_gen_sub_i32(var, var, offset);
659 } else {
660 tcg_gen_add_i32(var, var, offset);
662 dead_tmp(offset);
666 static inline long ucf64_reg_offset(int reg)
668 if (reg & 1) {
669 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
670 + offsetof(CPU_DoubleU, l.upper);
671 } else {
672 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
673 + offsetof(CPU_DoubleU, l.lower);
677 #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
678 #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
680 /* UniCore-F64 single load/store I_offset */
681 static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
683 UniCore32CPU *cpu = uc32_env_get_cpu(env);
684 int offset;
685 TCGv tmp;
686 TCGv addr;
688 addr = load_reg(s, UCOP_REG_N);
689 if (!UCOP_SET_P && !UCOP_SET_W) {
690 ILLEGAL;
693 if (UCOP_SET_P) {
694 offset = UCOP_IMM10 << 2;
695 if (!UCOP_SET_U) {
696 offset = -offset;
698 if (offset != 0) {
699 tcg_gen_addi_i32(addr, addr, offset);
703 if (UCOP_SET_L) { /* load */
704 tmp = gen_ld32(addr, IS_USER(s));
705 ucf64_gen_st32(tmp, UCOP_REG_D);
706 } else { /* store */
707 tmp = ucf64_gen_ld32(UCOP_REG_D);
708 gen_st32(tmp, addr, IS_USER(s));
711 if (!UCOP_SET_P) {
712 offset = UCOP_IMM10 << 2;
713 if (!UCOP_SET_U) {
714 offset = -offset;
716 if (offset != 0) {
717 tcg_gen_addi_i32(addr, addr, offset);
720 if (UCOP_SET_W) {
721 store_reg(s, UCOP_REG_N, addr);
722 } else {
723 dead_tmp(addr);
727 /* UniCore-F64 load/store multiple words */
728 static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
730 UniCore32CPU *cpu = uc32_env_get_cpu(env);
731 unsigned int i;
732 int j, n, freg;
733 TCGv tmp;
734 TCGv addr;
736 if (UCOP_REG_D != 0) {
737 ILLEGAL;
739 if (UCOP_REG_N == 31) {
740 ILLEGAL;
742 if ((insn << 24) == 0) {
743 ILLEGAL;
746 addr = load_reg(s, UCOP_REG_N);
748 n = 0;
749 for (i = 0; i < 8; i++) {
750 if (UCOP_SET(i)) {
751 n++;
755 if (UCOP_SET_U) {
756 if (UCOP_SET_P) { /* pre increment */
757 tcg_gen_addi_i32(addr, addr, 4);
758 } /* unnecessary to do anything when post increment */
759 } else {
760 if (UCOP_SET_P) { /* pre decrement */
761 tcg_gen_addi_i32(addr, addr, -(n * 4));
762 } else { /* post decrement */
763 if (n != 1) {
764 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
769 freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
771 for (i = 0, j = 0; i < 8; i++, freg++) {
772 if (!UCOP_SET(i)) {
773 continue;
776 if (UCOP_SET_L) { /* load */
777 tmp = gen_ld32(addr, IS_USER(s));
778 ucf64_gen_st32(tmp, freg);
779 } else { /* store */
780 tmp = ucf64_gen_ld32(freg);
781 gen_st32(tmp, addr, IS_USER(s));
784 j++;
785 /* unnecessary to add after the last transfer */
786 if (j != n) {
787 tcg_gen_addi_i32(addr, addr, 4);
791 if (UCOP_SET_W) { /* write back */
792 if (UCOP_SET_U) {
793 if (!UCOP_SET_P) { /* post increment */
794 tcg_gen_addi_i32(addr, addr, 4);
795 } /* unnecessary to do anything when pre increment */
796 } else {
797 if (UCOP_SET_P) {
798 /* pre decrement */
799 if (n != 1) {
800 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
802 } else {
803 /* post decrement */
804 tcg_gen_addi_i32(addr, addr, -(n * 4));
807 store_reg(s, UCOP_REG_N, addr);
808 } else {
809 dead_tmp(addr);
813 /* UniCore-F64 mrc/mcr */
814 static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
816 UniCore32CPU *cpu = uc32_env_get_cpu(env);
817 TCGv tmp;
819 if ((insn & 0xfe0003ff) == 0xe2000000) {
820 /* control register */
821 if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
822 ILLEGAL;
824 if (UCOP_SET(24)) {
825 /* CFF */
826 tmp = new_tmp();
827 gen_helper_ucf64_get_fpscr(tmp, cpu_env);
828 store_reg(s, UCOP_REG_D, tmp);
829 } else {
830 /* CTF */
831 tmp = load_reg(s, UCOP_REG_D);
832 gen_helper_ucf64_set_fpscr(cpu_env, tmp);
833 dead_tmp(tmp);
834 gen_lookup_tb(s);
836 return;
838 if ((insn & 0xfe0003ff) == 0xe0000000) {
839 /* general register */
840 if (UCOP_REG_D == 31) {
841 ILLEGAL;
843 if (UCOP_SET(24)) { /* MFF */
844 tmp = ucf64_gen_ld32(UCOP_REG_N);
845 store_reg(s, UCOP_REG_D, tmp);
846 } else { /* MTF */
847 tmp = load_reg(s, UCOP_REG_D);
848 ucf64_gen_st32(tmp, UCOP_REG_N);
850 return;
852 if ((insn & 0xfb000000) == 0xe9000000) {
853 /* MFFC */
854 if (UCOP_REG_D != 31) {
855 ILLEGAL;
857 if (UCOP_UCF64_COND & 0x8) {
858 ILLEGAL;
861 tmp = new_tmp();
862 tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
863 if (UCOP_SET(26)) {
864 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
865 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
866 gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
867 } else {
868 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
869 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
870 gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
872 dead_tmp(tmp);
873 return;
875 ILLEGAL;
878 /* UniCore-F64 convert instructions */
879 static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
881 UniCore32CPU *cpu = uc32_env_get_cpu(env);
883 if (UCOP_UCF64_FMT == 3) {
884 ILLEGAL;
886 if (UCOP_REG_N != 0) {
887 ILLEGAL;
889 switch (UCOP_UCF64_FUNC) {
890 case 0: /* cvt.s */
891 switch (UCOP_UCF64_FMT) {
892 case 1 /* d */:
893 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
894 gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
895 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
896 break;
897 case 2 /* w */:
898 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
899 gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
900 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
901 break;
902 default /* s */:
903 ILLEGAL;
904 break;
906 break;
907 case 1: /* cvt.d */
908 switch (UCOP_UCF64_FMT) {
909 case 0 /* s */:
910 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
911 gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
912 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
913 break;
914 case 2 /* w */:
915 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
916 gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
917 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
918 break;
919 default /* d */:
920 ILLEGAL;
921 break;
923 break;
924 case 4: /* cvt.w */
925 switch (UCOP_UCF64_FMT) {
926 case 0 /* s */:
927 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
928 gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
929 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
930 break;
931 case 1 /* d */:
932 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
933 gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
934 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
935 break;
936 default /* w */:
937 ILLEGAL;
938 break;
940 break;
941 default:
942 ILLEGAL;
946 /* UniCore-F64 compare instructions */
947 static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
949 UniCore32CPU *cpu = uc32_env_get_cpu(env);
951 if (UCOP_SET(25)) {
952 ILLEGAL;
954 if (UCOP_REG_D != 0) {
955 ILLEGAL;
958 ILLEGAL; /* TODO */
959 if (UCOP_SET(24)) {
960 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
961 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
962 /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
963 } else {
964 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
965 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
966 /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
970 #define gen_helper_ucf64_movs(x, y) do { } while (0)
971 #define gen_helper_ucf64_movd(x, y) do { } while (0)
973 #define UCF64_OP1(name) do { \
974 if (UCOP_REG_N != 0) { \
975 ILLEGAL; \
977 switch (UCOP_UCF64_FMT) { \
978 case 0 /* s */: \
979 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
980 ucf64_reg_offset(UCOP_REG_M)); \
981 gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
982 tcg_gen_st_i32(cpu_F0s, cpu_env, \
983 ucf64_reg_offset(UCOP_REG_D)); \
984 break; \
985 case 1 /* d */: \
986 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
987 ucf64_reg_offset(UCOP_REG_M)); \
988 gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
989 tcg_gen_st_i64(cpu_F0d, cpu_env, \
990 ucf64_reg_offset(UCOP_REG_D)); \
991 break; \
992 case 2 /* w */: \
993 ILLEGAL; \
994 break; \
996 } while (0)
998 #define UCF64_OP2(name) do { \
999 switch (UCOP_UCF64_FMT) { \
1000 case 0 /* s */: \
1001 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
1002 ucf64_reg_offset(UCOP_REG_N)); \
1003 tcg_gen_ld_i32(cpu_F1s, cpu_env, \
1004 ucf64_reg_offset(UCOP_REG_M)); \
1005 gen_helper_ucf64_##name##s(cpu_F0s, \
1006 cpu_F0s, cpu_F1s, cpu_env); \
1007 tcg_gen_st_i32(cpu_F0s, cpu_env, \
1008 ucf64_reg_offset(UCOP_REG_D)); \
1009 break; \
1010 case 1 /* d */: \
1011 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
1012 ucf64_reg_offset(UCOP_REG_N)); \
1013 tcg_gen_ld_i64(cpu_F1d, cpu_env, \
1014 ucf64_reg_offset(UCOP_REG_M)); \
1015 gen_helper_ucf64_##name##d(cpu_F0d, \
1016 cpu_F0d, cpu_F1d, cpu_env); \
1017 tcg_gen_st_i64(cpu_F0d, cpu_env, \
1018 ucf64_reg_offset(UCOP_REG_D)); \
1019 break; \
1020 case 2 /* w */: \
1021 ILLEGAL; \
1022 break; \
1024 } while (0)
1026 /* UniCore-F64 data processing */
1027 static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1029 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1031 if (UCOP_UCF64_FMT == 3) {
1032 ILLEGAL;
1034 switch (UCOP_UCF64_FUNC) {
1035 case 0: /* add */
1036 UCF64_OP2(add);
1037 break;
1038 case 1: /* sub */
1039 UCF64_OP2(sub);
1040 break;
1041 case 2: /* mul */
1042 UCF64_OP2(mul);
1043 break;
1044 case 4: /* div */
1045 UCF64_OP2(div);
1046 break;
1047 case 5: /* abs */
1048 UCF64_OP1(abs);
1049 break;
1050 case 6: /* mov */
1051 UCF64_OP1(mov);
1052 break;
1053 case 7: /* neg */
1054 UCF64_OP1(neg);
1055 break;
1056 default:
1057 ILLEGAL;
1061 /* Disassemble an F64 instruction */
1062 static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1064 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1066 if (!UCOP_SET(29)) {
1067 if (UCOP_SET(26)) {
1068 do_ucf64_ldst_m(env, s, insn);
1069 } else {
1070 do_ucf64_ldst_i(env, s, insn);
1072 } else {
1073 if (UCOP_SET(5)) {
1074 switch ((insn >> 26) & 0x3) {
1075 case 0:
1076 do_ucf64_datap(env, s, insn);
1077 break;
1078 case 1:
1079 ILLEGAL;
1080 break;
1081 case 2:
1082 do_ucf64_fcvt(env, s, insn);
1083 break;
1084 case 3:
1085 do_ucf64_fcmp(env, s, insn);
1086 break;
1088 } else {
1089 do_ucf64_trans(env, s, insn);
1094 static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1096 #ifndef CONFIG_USER_ONLY
1097 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1098 #else
1099 return true;
1100 #endif
1103 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1105 if (use_goto_tb(s, dest)) {
1106 tcg_gen_goto_tb(n);
1107 gen_set_pc_im(dest);
1108 tcg_gen_exit_tb((uintptr_t)s->tb + n);
1109 } else {
1110 gen_set_pc_im(dest);
1111 tcg_gen_exit_tb(0);
1115 static inline void gen_jmp(DisasContext *s, uint32_t dest)
1117 if (unlikely(s->singlestep_enabled)) {
1118 /* An indirect jump so that we still trigger the debug exception. */
1119 gen_bx_im(s, dest);
1120 } else {
1121 gen_goto_tb(s, 0, dest);
1122 s->is_jmp = DISAS_TB_JUMP;
1126 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1127 static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
1129 TCGv tmp;
1130 if (bsr) {
1131 /* ??? This is also undefined in system mode. */
1132 if (IS_USER(s)) {
1133 return 1;
1136 tmp = load_cpu_field(bsr);
1137 tcg_gen_andi_i32(tmp, tmp, ~mask);
1138 tcg_gen_andi_i32(t0, t0, mask);
1139 tcg_gen_or_i32(tmp, tmp, t0);
1140 store_cpu_field(tmp, bsr);
1141 } else {
1142 gen_set_asr(t0, mask);
1144 dead_tmp(t0);
1145 gen_lookup_tb(s);
1146 return 0;
1149 /* Generate an old-style exception return. Marks pc as dead. */
1150 static void gen_exception_return(DisasContext *s, TCGv pc)
1152 TCGv tmp;
1153 store_reg(s, 31, pc);
1154 tmp = load_cpu_field(bsr);
1155 gen_set_asr(tmp, 0xffffffff);
1156 dead_tmp(tmp);
1157 s->is_jmp = DISAS_UPDATE;
1160 static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s,
1161 uint32_t insn)
1163 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1165 switch (UCOP_CPNUM) {
1166 #ifndef CONFIG_USER_ONLY
1167 case 0:
1168 disas_cp0_insn(env, s, insn);
1169 break;
1170 case 1:
1171 disas_ocd_insn(env, s, insn);
1172 break;
1173 #endif
1174 case 2:
1175 disas_ucf64_insn(env, s, insn);
1176 break;
1177 default:
1178 /* Unknown coprocessor. */
1179 cpu_abort(CPU(cpu), "Unknown coprocessor!");
1183 /* data processing instructions */
1184 static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1186 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1187 TCGv tmp;
1188 TCGv tmp2;
1189 int logic_cc;
1191 if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
1192 if (UCOP_SET(23)) { /* CMOV instructions */
1193 if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
1194 ILLEGAL;
1196 /* if not always execute, we generate a conditional jump to
1197 next instruction */
1198 s->condlabel = gen_new_label();
1199 gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
1200 s->condjmp = 1;
1204 logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
1206 if (UCOP_SET(29)) {
1207 unsigned int val;
1208 /* immediate operand */
1209 val = UCOP_IMM_9;
1210 if (UCOP_SH_IM) {
1211 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1213 tmp2 = new_tmp();
1214 tcg_gen_movi_i32(tmp2, val);
1215 if (logic_cc && UCOP_SH_IM) {
1216 gen_set_CF_bit31(tmp2);
1218 } else {
1219 /* register */
1220 tmp2 = load_reg(s, UCOP_REG_M);
1221 if (UCOP_SET(5)) {
1222 tmp = load_reg(s, UCOP_REG_S);
1223 gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
1224 } else {
1225 gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
1229 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1230 tmp = load_reg(s, UCOP_REG_N);
1231 } else {
1232 TCGV_UNUSED(tmp);
1235 switch (UCOP_OPCODES) {
1236 case 0x00:
1237 tcg_gen_and_i32(tmp, tmp, tmp2);
1238 if (logic_cc) {
1239 gen_logic_CC(tmp);
1241 store_reg_bx(s, UCOP_REG_D, tmp);
1242 break;
1243 case 0x01:
1244 tcg_gen_xor_i32(tmp, tmp, tmp2);
1245 if (logic_cc) {
1246 gen_logic_CC(tmp);
1248 store_reg_bx(s, UCOP_REG_D, tmp);
1249 break;
1250 case 0x02:
1251 if (UCOP_SET_S && UCOP_REG_D == 31) {
1252 /* SUBS r31, ... is used for exception return. */
1253 if (IS_USER(s)) {
1254 ILLEGAL;
1256 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1257 gen_exception_return(s, tmp);
1258 } else {
1259 if (UCOP_SET_S) {
1260 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1261 } else {
1262 tcg_gen_sub_i32(tmp, tmp, tmp2);
1264 store_reg_bx(s, UCOP_REG_D, tmp);
1266 break;
1267 case 0x03:
1268 if (UCOP_SET_S) {
1269 gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
1270 } else {
1271 tcg_gen_sub_i32(tmp, tmp2, tmp);
1273 store_reg_bx(s, UCOP_REG_D, tmp);
1274 break;
1275 case 0x04:
1276 if (UCOP_SET_S) {
1277 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1278 } else {
1279 tcg_gen_add_i32(tmp, tmp, tmp2);
1281 store_reg_bx(s, UCOP_REG_D, tmp);
1282 break;
1283 case 0x05:
1284 if (UCOP_SET_S) {
1285 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
1286 } else {
1287 gen_add_carry(tmp, tmp, tmp2);
1289 store_reg_bx(s, UCOP_REG_D, tmp);
1290 break;
1291 case 0x06:
1292 if (UCOP_SET_S) {
1293 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
1294 } else {
1295 gen_sub_carry(tmp, tmp, tmp2);
1297 store_reg_bx(s, UCOP_REG_D, tmp);
1298 break;
1299 case 0x07:
1300 if (UCOP_SET_S) {
1301 gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
1302 } else {
1303 gen_sub_carry(tmp, tmp2, tmp);
1305 store_reg_bx(s, UCOP_REG_D, tmp);
1306 break;
1307 case 0x08:
1308 if (UCOP_SET_S) {
1309 tcg_gen_and_i32(tmp, tmp, tmp2);
1310 gen_logic_CC(tmp);
1312 dead_tmp(tmp);
1313 break;
1314 case 0x09:
1315 if (UCOP_SET_S) {
1316 tcg_gen_xor_i32(tmp, tmp, tmp2);
1317 gen_logic_CC(tmp);
1319 dead_tmp(tmp);
1320 break;
1321 case 0x0a:
1322 if (UCOP_SET_S) {
1323 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
1325 dead_tmp(tmp);
1326 break;
1327 case 0x0b:
1328 if (UCOP_SET_S) {
1329 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
1331 dead_tmp(tmp);
1332 break;
1333 case 0x0c:
1334 tcg_gen_or_i32(tmp, tmp, tmp2);
1335 if (logic_cc) {
1336 gen_logic_CC(tmp);
1338 store_reg_bx(s, UCOP_REG_D, tmp);
1339 break;
1340 case 0x0d:
1341 if (logic_cc && UCOP_REG_D == 31) {
1342 /* MOVS r31, ... is used for exception return. */
1343 if (IS_USER(s)) {
1344 ILLEGAL;
1346 gen_exception_return(s, tmp2);
1347 } else {
1348 if (logic_cc) {
1349 gen_logic_CC(tmp2);
1351 store_reg_bx(s, UCOP_REG_D, tmp2);
1353 break;
1354 case 0x0e:
1355 tcg_gen_andc_i32(tmp, tmp, tmp2);
1356 if (logic_cc) {
1357 gen_logic_CC(tmp);
1359 store_reg_bx(s, UCOP_REG_D, tmp);
1360 break;
1361 default:
1362 case 0x0f:
1363 tcg_gen_not_i32(tmp2, tmp2);
1364 if (logic_cc) {
1365 gen_logic_CC(tmp2);
1367 store_reg_bx(s, UCOP_REG_D, tmp2);
1368 break;
1370 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1371 dead_tmp(tmp2);
1375 /* multiply */
1376 static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1378 TCGv tmp, tmp2, tmp3, tmp4;
1380 if (UCOP_SET(27)) {
1381 /* 64 bit mul */
1382 tmp = load_reg(s, UCOP_REG_M);
1383 tmp2 = load_reg(s, UCOP_REG_N);
1384 if (UCOP_SET(26)) {
1385 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
1386 } else {
1387 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
1389 if (UCOP_SET(25)) { /* mult accumulate */
1390 tmp3 = load_reg(s, UCOP_REG_LO);
1391 tmp4 = load_reg(s, UCOP_REG_HI);
1392 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4);
1393 dead_tmp(tmp3);
1394 dead_tmp(tmp4);
1396 store_reg(s, UCOP_REG_LO, tmp);
1397 store_reg(s, UCOP_REG_HI, tmp2);
1398 } else {
1399 /* 32 bit mul */
1400 tmp = load_reg(s, UCOP_REG_M);
1401 tmp2 = load_reg(s, UCOP_REG_N);
1402 tcg_gen_mul_i32(tmp, tmp, tmp2);
1403 dead_tmp(tmp2);
1404 if (UCOP_SET(25)) {
1405 /* Add */
1406 tmp2 = load_reg(s, UCOP_REG_S);
1407 tcg_gen_add_i32(tmp, tmp, tmp2);
1408 dead_tmp(tmp2);
1410 if (UCOP_SET_S) {
1411 gen_logic_CC(tmp);
1413 store_reg(s, UCOP_REG_D, tmp);
1417 /* miscellaneous instructions */
1418 static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1420 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1421 unsigned int val;
1422 TCGv tmp;
1424 if ((insn & 0xffffffe0) == 0x10ffc120) {
1425 /* Trivial implementation equivalent to bx. */
1426 tmp = load_reg(s, UCOP_REG_M);
1427 gen_bx(s, tmp);
1428 return;
1431 if ((insn & 0xfbffc000) == 0x30ffc000) {
1432 /* PSR = immediate */
1433 val = UCOP_IMM_9;
1434 if (UCOP_SH_IM) {
1435 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1437 tmp = new_tmp();
1438 tcg_gen_movi_i32(tmp, val);
1439 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1440 ILLEGAL;
1442 return;
1445 if ((insn & 0xfbffffe0) == 0x12ffc020) {
1446 /* PSR.flag = reg */
1447 tmp = load_reg(s, UCOP_REG_M);
1448 if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
1449 ILLEGAL;
1451 return;
1454 if ((insn & 0xfbffffe0) == 0x10ffc020) {
1455 /* PSR = reg */
1456 tmp = load_reg(s, UCOP_REG_M);
1457 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1458 ILLEGAL;
1460 return;
1463 if ((insn & 0xfbf83fff) == 0x10f80000) {
1464 /* reg = PSR */
1465 if (UCOP_SET_B) {
1466 if (IS_USER(s)) {
1467 ILLEGAL;
1469 tmp = load_cpu_field(bsr);
1470 } else {
1471 tmp = new_tmp();
1472 gen_helper_asr_read(tmp, cpu_env);
1474 store_reg(s, UCOP_REG_D, tmp);
1475 return;
1478 if ((insn & 0xfbf83fe0) == 0x12f80120) {
1479 /* clz */
1480 tmp = load_reg(s, UCOP_REG_M);
1481 if (UCOP_SET(26)) {
1482 gen_helper_clo(tmp, tmp);
1483 } else {
1484 gen_helper_clz(tmp, tmp);
1486 store_reg(s, UCOP_REG_D, tmp);
1487 return;
1490 /* otherwise */
1491 ILLEGAL;
1494 /* load/store I_offset and R_offset */
1495 static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1497 unsigned int mmu_idx;
1498 TCGv tmp;
1499 TCGv tmp2;
1501 tmp2 = load_reg(s, UCOP_REG_N);
1502 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1504 /* immediate */
1505 if (UCOP_SET_P) {
1506 gen_add_data_offset(s, insn, tmp2);
1509 if (UCOP_SET_L) {
1510 /* load */
1511 if (UCOP_SET_B) {
1512 tmp = gen_ld8u(tmp2, mmu_idx);
1513 } else {
1514 tmp = gen_ld32(tmp2, mmu_idx);
1516 } else {
1517 /* store */
1518 tmp = load_reg(s, UCOP_REG_D);
1519 if (UCOP_SET_B) {
1520 gen_st8(tmp, tmp2, mmu_idx);
1521 } else {
1522 gen_st32(tmp, tmp2, mmu_idx);
1525 if (!UCOP_SET_P) {
1526 gen_add_data_offset(s, insn, tmp2);
1527 store_reg(s, UCOP_REG_N, tmp2);
1528 } else if (UCOP_SET_W) {
1529 store_reg(s, UCOP_REG_N, tmp2);
1530 } else {
1531 dead_tmp(tmp2);
1533 if (UCOP_SET_L) {
1534 /* Complete the load. */
1535 if (UCOP_REG_D == 31) {
1536 gen_bx(s, tmp);
1537 } else {
1538 store_reg(s, UCOP_REG_D, tmp);
1543 /* SWP instruction */
1544 static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1546 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1547 TCGv addr;
1548 TCGv tmp;
1549 TCGv tmp2;
1551 if ((insn & 0xff003fe0) != 0x40000120) {
1552 ILLEGAL;
1555 /* ??? This is not really atomic. However we know
1556 we never have multiple CPUs running in parallel,
1557 so it is good enough. */
1558 addr = load_reg(s, UCOP_REG_N);
1559 tmp = load_reg(s, UCOP_REG_M);
1560 if (UCOP_SET_B) {
1561 tmp2 = gen_ld8u(addr, IS_USER(s));
1562 gen_st8(tmp, addr, IS_USER(s));
1563 } else {
1564 tmp2 = gen_ld32(addr, IS_USER(s));
1565 gen_st32(tmp, addr, IS_USER(s));
1567 dead_tmp(addr);
1568 store_reg(s, UCOP_REG_D, tmp2);
1571 /* load/store hw/sb */
1572 static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1574 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1575 TCGv addr;
1576 TCGv tmp;
1578 if (UCOP_SH_OP == 0) {
1579 do_swap(env, s, insn);
1580 return;
1583 addr = load_reg(s, UCOP_REG_N);
1584 if (UCOP_SET_P) {
1585 gen_add_datah_offset(s, insn, addr);
1588 if (UCOP_SET_L) { /* load */
1589 switch (UCOP_SH_OP) {
1590 case 1:
1591 tmp = gen_ld16u(addr, IS_USER(s));
1592 break;
1593 case 2:
1594 tmp = gen_ld8s(addr, IS_USER(s));
1595 break;
1596 default: /* see do_swap */
1597 case 3:
1598 tmp = gen_ld16s(addr, IS_USER(s));
1599 break;
1601 } else { /* store */
1602 if (UCOP_SH_OP != 1) {
1603 ILLEGAL;
1605 tmp = load_reg(s, UCOP_REG_D);
1606 gen_st16(tmp, addr, IS_USER(s));
1608 /* Perform base writeback before the loaded value to
1609 ensure correct behavior with overlapping index registers. */
1610 if (!UCOP_SET_P) {
1611 gen_add_datah_offset(s, insn, addr);
1612 store_reg(s, UCOP_REG_N, addr);
1613 } else if (UCOP_SET_W) {
1614 store_reg(s, UCOP_REG_N, addr);
1615 } else {
1616 dead_tmp(addr);
1618 if (UCOP_SET_L) {
1619 /* Complete the load. */
1620 store_reg(s, UCOP_REG_D, tmp);
1624 /* load/store multiple words */
1625 static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1627 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1628 unsigned int val, i, mmu_idx;
1629 int j, n, reg, user, loaded_base;
1630 TCGv tmp;
1631 TCGv tmp2;
1632 TCGv addr;
1633 TCGv loaded_var;
1635 if (UCOP_SET(7)) {
1636 ILLEGAL;
1638 /* XXX: store correct base if write back */
1639 user = 0;
1640 if (UCOP_SET_B) { /* S bit in instruction table */
1641 if (IS_USER(s)) {
1642 ILLEGAL; /* only usable in supervisor mode */
1644 if (UCOP_SET(18) == 0) { /* pc reg */
1645 user = 1;
1649 mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1650 addr = load_reg(s, UCOP_REG_N);
1652 /* compute total size */
1653 loaded_base = 0;
1654 TCGV_UNUSED(loaded_var);
1655 n = 0;
1656 for (i = 0; i < 6; i++) {
1657 if (UCOP_SET(i)) {
1658 n++;
1661 for (i = 9; i < 19; i++) {
1662 if (UCOP_SET(i)) {
1663 n++;
1666 /* XXX: test invalid n == 0 case ? */
1667 if (UCOP_SET_U) {
1668 if (UCOP_SET_P) {
1669 /* pre increment */
1670 tcg_gen_addi_i32(addr, addr, 4);
1671 } else {
1672 /* post increment */
1674 } else {
1675 if (UCOP_SET_P) {
1676 /* pre decrement */
1677 tcg_gen_addi_i32(addr, addr, -(n * 4));
1678 } else {
1679 /* post decrement */
1680 if (n != 1) {
1681 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1686 j = 0;
1687 reg = UCOP_SET(6) ? 16 : 0;
1688 for (i = 0; i < 19; i++, reg++) {
1689 if (i == 6) {
1690 i = i + 3;
1692 if (UCOP_SET(i)) {
1693 if (UCOP_SET_L) { /* load */
1694 tmp = gen_ld32(addr, mmu_idx);
1695 if (reg == 31) {
1696 gen_bx(s, tmp);
1697 } else if (user) {
1698 tmp2 = tcg_const_i32(reg);
1699 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
1700 tcg_temp_free_i32(tmp2);
1701 dead_tmp(tmp);
1702 } else if (reg == UCOP_REG_N) {
1703 loaded_var = tmp;
1704 loaded_base = 1;
1705 } else {
1706 store_reg(s, reg, tmp);
1708 } else { /* store */
1709 if (reg == 31) {
1710 /* special case: r31 = PC + 4 */
1711 val = (long)s->pc;
1712 tmp = new_tmp();
1713 tcg_gen_movi_i32(tmp, val);
1714 } else if (user) {
1715 tmp = new_tmp();
1716 tmp2 = tcg_const_i32(reg);
1717 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
1718 tcg_temp_free_i32(tmp2);
1719 } else {
1720 tmp = load_reg(s, reg);
1722 gen_st32(tmp, addr, mmu_idx);
1724 j++;
1725 /* no need to add after the last transfer */
1726 if (j != n) {
1727 tcg_gen_addi_i32(addr, addr, 4);
1731 if (UCOP_SET_W) { /* write back */
1732 if (UCOP_SET_U) {
1733 if (UCOP_SET_P) {
1734 /* pre increment */
1735 } else {
1736 /* post increment */
1737 tcg_gen_addi_i32(addr, addr, 4);
1739 } else {
1740 if (UCOP_SET_P) {
1741 /* pre decrement */
1742 if (n != 1) {
1743 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1745 } else {
1746 /* post decrement */
1747 tcg_gen_addi_i32(addr, addr, -(n * 4));
1750 store_reg(s, UCOP_REG_N, addr);
1751 } else {
1752 dead_tmp(addr);
1754 if (loaded_base) {
1755 store_reg(s, UCOP_REG_N, loaded_var);
1757 if (UCOP_SET_B && !user) {
1758 /* Restore ASR from BSR. */
1759 tmp = load_cpu_field(bsr);
1760 gen_set_asr(tmp, 0xffffffff);
1761 dead_tmp(tmp);
1762 s->is_jmp = DISAS_UPDATE;
1766 /* branch (and link) */
1767 static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1769 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1770 unsigned int val;
1771 int32_t offset;
1772 TCGv tmp;
1774 if (UCOP_COND == 0xf) {
1775 ILLEGAL;
1778 if (UCOP_COND != 0xe) {
1779 /* if not always execute, we generate a conditional jump to
1780 next instruction */
1781 s->condlabel = gen_new_label();
1782 gen_test_cc(UCOP_COND ^ 1, s->condlabel);
1783 s->condjmp = 1;
1786 val = (int32_t)s->pc;
1787 if (UCOP_SET_L) {
1788 tmp = new_tmp();
1789 tcg_gen_movi_i32(tmp, val);
1790 store_reg(s, 30, tmp);
1792 offset = (((int32_t)insn << 8) >> 8);
1793 val += (offset << 2); /* unicore is pc+4 */
1794 gen_jmp(s, val);
1797 static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
1799 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1800 unsigned int insn;
1802 insn = cpu_ldl_code(env, s->pc);
1803 s->pc += 4;
1805 /* UniCore instructions class:
1806 * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1807 * AAA : see switch case
1808 * BBBB : opcodes or cond or PUBW
1809 * C : S OR L
1810 * D : 8
1811 * E : 5
1813 switch (insn >> 29) {
1814 case 0x0:
1815 if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1816 do_mult(env, s, insn);
1817 break;
1820 if (UCOP_SET(8)) {
1821 do_misc(env, s, insn);
1822 break;
1824 case 0x1:
1825 if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
1826 do_misc(env, s, insn);
1827 break;
1829 do_datap(env, s, insn);
1830 break;
1832 case 0x2:
1833 if (UCOP_SET(8) && UCOP_SET(5)) {
1834 do_ldst_hwsb(env, s, insn);
1835 break;
1837 if (UCOP_SET(8) || UCOP_SET(5)) {
1838 ILLEGAL;
1840 case 0x3:
1841 do_ldst_ir(env, s, insn);
1842 break;
1844 case 0x4:
1845 if (UCOP_SET(8)) {
1846 ILLEGAL; /* extended instructions */
1848 do_ldst_m(env, s, insn);
1849 break;
1850 case 0x5:
1851 do_branch(env, s, insn);
1852 break;
1853 case 0x6:
1854 /* Coprocessor. */
1855 disas_coproc_insn(env, s, insn);
1856 break;
1857 case 0x7:
1858 if (!UCOP_SET(28)) {
1859 disas_coproc_insn(env, s, insn);
1860 break;
1862 if ((insn & 0xff000000) == 0xff000000) { /* syscall */
1863 gen_set_pc_im(s->pc);
1864 s->is_jmp = DISAS_SYSCALL;
1865 break;
1867 ILLEGAL;
1871 /* generate intermediate code for basic block 'tb'. */
1872 void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
1874 UniCore32CPU *cpu = uc32_env_get_cpu(env);
1875 CPUState *cs = CPU(cpu);
1876 DisasContext dc1, *dc = &dc1;
1877 target_ulong pc_start;
1878 uint32_t next_page_start;
1879 int num_insns;
1880 int max_insns;
1882 /* generate intermediate code */
1883 num_temps = 0;
1885 pc_start = tb->pc;
1887 dc->tb = tb;
1889 dc->is_jmp = DISAS_NEXT;
1890 dc->pc = pc_start;
1891 dc->singlestep_enabled = cs->singlestep_enabled;
1892 dc->condjmp = 0;
1893 cpu_F0s = tcg_temp_new_i32();
1894 cpu_F1s = tcg_temp_new_i32();
1895 cpu_F0d = tcg_temp_new_i64();
1896 cpu_F1d = tcg_temp_new_i64();
1897 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1898 num_insns = 0;
1899 max_insns = tb->cflags & CF_COUNT_MASK;
1900 if (max_insns == 0) {
1901 max_insns = CF_COUNT_MASK;
1903 if (max_insns > TCG_MAX_INSNS) {
1904 max_insns = TCG_MAX_INSNS;
1907 #ifndef CONFIG_USER_ONLY
1908 if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) {
1909 dc->user = 1;
1910 } else {
1911 dc->user = 0;
1913 #endif
1915 gen_tb_start(tb);
1916 do {
1917 tcg_gen_insn_start(dc->pc);
1918 num_insns++;
1920 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1921 gen_set_pc_im(dc->pc);
1922 gen_exception(EXCP_DEBUG);
1923 dc->is_jmp = DISAS_JUMP;
1924 /* The address covered by the breakpoint must be included in
1925 [tb->pc, tb->pc + tb->size) in order to for it to be
1926 properly cleared -- thus we increment the PC here so that
1927 the logic setting tb->size below does the right thing. */
1928 dc->pc += 4;
1929 goto done_generating;
1932 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1933 gen_io_start();
1936 disas_uc32_insn(env, dc);
1938 if (num_temps) {
1939 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
1940 num_temps = 0;
1943 if (dc->condjmp && !dc->is_jmp) {
1944 gen_set_label(dc->condlabel);
1945 dc->condjmp = 0;
1947 /* Translation stops when a conditional branch is encountered.
1948 * Otherwise the subsequent code could get translated several times.
1949 * Also stop translation when a page boundary is reached. This
1950 * ensures prefetch aborts occur at the right place. */
1951 } while (!dc->is_jmp && !tcg_op_buf_full() &&
1952 !cs->singlestep_enabled &&
1953 !singlestep &&
1954 dc->pc < next_page_start &&
1955 num_insns < max_insns);
1957 if (tb->cflags & CF_LAST_IO) {
1958 if (dc->condjmp) {
1959 /* FIXME: This can theoretically happen with self-modifying
1960 code. */
1961 cpu_abort(cs, "IO on conditional branch instruction");
1963 gen_io_end();
1966 /* At this stage dc->condjmp will only be set when the skipped
1967 instruction was a conditional branch or trap, and the PC has
1968 already been written. */
1969 if (unlikely(cs->singlestep_enabled)) {
1970 /* Make sure the pc is updated, and raise a debug exception. */
1971 if (dc->condjmp) {
1972 if (dc->is_jmp == DISAS_SYSCALL) {
1973 gen_exception(UC32_EXCP_PRIV);
1974 } else {
1975 gen_exception(EXCP_DEBUG);
1977 gen_set_label(dc->condlabel);
1979 if (dc->condjmp || !dc->is_jmp) {
1980 gen_set_pc_im(dc->pc);
1981 dc->condjmp = 0;
1983 if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
1984 gen_exception(UC32_EXCP_PRIV);
1985 } else {
1986 gen_exception(EXCP_DEBUG);
1988 } else {
1989 /* While branches must always occur at the end of an IT block,
1990 there are a few other things that can cause us to terminate
1991 the TB in the middel of an IT block:
1992 - Exception generating instructions (bkpt, swi, undefined).
1993 - Page boundaries.
1994 - Hardware watchpoints.
1995 Hardware breakpoints have already been handled and skip this code.
1997 switch (dc->is_jmp) {
1998 case DISAS_NEXT:
1999 gen_goto_tb(dc, 1, dc->pc);
2000 break;
2001 default:
2002 case DISAS_JUMP:
2003 case DISAS_UPDATE:
2004 /* indicate that the hash table must be used to find the next TB */
2005 tcg_gen_exit_tb(0);
2006 break;
2007 case DISAS_TB_JUMP:
2008 /* nothing more to generate */
2009 break;
2010 case DISAS_SYSCALL:
2011 gen_exception(UC32_EXCP_PRIV);
2012 break;
2014 if (dc->condjmp) {
2015 gen_set_label(dc->condlabel);
2016 gen_goto_tb(dc, 1, dc->pc);
2017 dc->condjmp = 0;
2021 done_generating:
2022 gen_tb_end(tb, num_insns);
2024 #ifdef DEBUG_DISAS
2025 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
2026 && qemu_log_in_addr_range(pc_start)) {
2027 qemu_log("----------------\n");
2028 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2029 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
2030 qemu_log("\n");
2032 #endif
2033 tb->size = dc->pc - pc_start;
2034 tb->icount = num_insns;
2037 static const char *cpu_mode_names[16] = {
2038 "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2039 "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2042 #undef UCF64_DUMP_STATE
2043 #ifdef UCF64_DUMP_STATE
2044 static void cpu_dump_state_ucf64(CPUUniCore32State *env, FILE *f,
2045 fprintf_function cpu_fprintf, int flags)
2047 int i;
2048 union {
2049 uint32_t i;
2050 float s;
2051 } s0, s1;
2052 CPU_DoubleU d;
2053 /* ??? This assumes float64 and double have the same layout.
2054 Oh well, it's only debug dumps. */
2055 union {
2056 float64 f64;
2057 double d;
2058 } d0;
2060 for (i = 0; i < 16; i++) {
2061 d.d = env->ucf64.regs[i];
2062 s0.i = d.l.lower;
2063 s1.i = d.l.upper;
2064 d0.f64 = d.d;
2065 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g)",
2066 i * 2, (int)s0.i, s0.s,
2067 i * 2 + 1, (int)s1.i, s1.s);
2068 cpu_fprintf(f, " d%02d=%" PRIx64 "(%8g)\n",
2069 i, (uint64_t)d0.f64, d0.d);
2071 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
2073 #else
2074 #define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0)
2075 #endif
2077 void uc32_cpu_dump_state(CPUState *cs, FILE *f,
2078 fprintf_function cpu_fprintf, int flags)
2080 UniCore32CPU *cpu = UNICORE32_CPU(cs);
2081 CPUUniCore32State *env = &cpu->env;
2082 int i;
2083 uint32_t psr;
2085 for (i = 0; i < 32; i++) {
2086 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2087 if ((i % 4) == 3) {
2088 cpu_fprintf(f, "\n");
2089 } else {
2090 cpu_fprintf(f, " ");
2093 psr = cpu_asr_read(env);
2094 cpu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
2095 psr,
2096 psr & (1 << 31) ? 'N' : '-',
2097 psr & (1 << 30) ? 'Z' : '-',
2098 psr & (1 << 29) ? 'C' : '-',
2099 psr & (1 << 28) ? 'V' : '-',
2100 cpu_mode_names[psr & 0xf]);
2102 cpu_dump_state_ucf64(env, f, cpu_fprintf, flags);
2105 void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb,
2106 target_ulong *data)
2108 env->regs[31] = data[0];