qxl: ioport_write: remove guest trigerrable abort
[qemu/ar7.git] / target-unicore32 / translate.c
blob9793d14c1b6b74611b312296cf47aa9160f597a9
1 /*
2 * UniCore32 translation
4 * Copyright (C) 2010-2011 GUAN Xue-tao
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation, or (at your option) any
9 * later version. See the COPYING file in the top-level directory.
11 #include <stdarg.h>
12 #include <stdlib.h>
13 #include <stdio.h>
14 #include <string.h>
15 #include <inttypes.h>
17 #include "cpu.h"
18 #include "disas.h"
19 #include "tcg-op.h"
20 #include "qemu-log.h"
22 #include "helper.h"
23 #define GEN_HELPER 1
24 #include "helper.h"
26 /* internal defines */
27 typedef struct DisasContext {
28 target_ulong pc;
29 int is_jmp;
30 /* Nonzero if this instruction has been conditionally skipped. */
31 int condjmp;
32 /* The label that will be jumped to when the instruction is skipped. */
33 int condlabel;
34 struct TranslationBlock *tb;
35 int singlestep_enabled;
36 } DisasContext;
38 #define IS_USER(s) 1
40 /* These instructions trap after executing, so defer them until after the
41 conditional executions state has been updated. */
42 #define DISAS_SYSCALL 5
44 static TCGv_ptr cpu_env;
45 static TCGv_i32 cpu_R[32];
47 /* FIXME: These should be removed. */
48 static TCGv cpu_F0s, cpu_F1s;
49 static TCGv_i64 cpu_F0d, cpu_F1d;
51 #include "gen-icount.h"
53 static const char *regnames[] = {
54 "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
55 "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
56 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
57 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
59 /* initialize TCG globals. */
60 void uc32_translate_init(void)
62 int i;
64 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
66 for (i = 0; i < 32; i++) {
67 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
68 offsetof(CPUUniCore32State, regs[i]), regnames[i]);
71 #define GEN_HELPER 2
72 #include "helper.h"
75 static int num_temps;
77 /* Allocate a temporary variable. */
78 static TCGv_i32 new_tmp(void)
80 num_temps++;
81 return tcg_temp_new_i32();
84 /* Release a temporary variable. */
85 static void dead_tmp(TCGv tmp)
87 tcg_temp_free(tmp);
88 num_temps--;
91 static inline TCGv load_cpu_offset(int offset)
93 TCGv tmp = new_tmp();
94 tcg_gen_ld_i32(tmp, cpu_env, offset);
95 return tmp;
98 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
100 static inline void store_cpu_offset(TCGv var, int offset)
102 tcg_gen_st_i32(var, cpu_env, offset);
103 dead_tmp(var);
106 #define store_cpu_field(var, name) \
107 store_cpu_offset(var, offsetof(CPUUniCore32State, name))
109 /* Set a variable to the value of a CPU register. */
110 static void load_reg_var(DisasContext *s, TCGv var, int reg)
112 if (reg == 31) {
113 uint32_t addr;
114 /* normaly, since we updated PC */
115 addr = (long)s->pc;
116 tcg_gen_movi_i32(var, addr);
117 } else {
118 tcg_gen_mov_i32(var, cpu_R[reg]);
122 /* Create a new temporary and set it to the value of a CPU register. */
123 static inline TCGv load_reg(DisasContext *s, int reg)
125 TCGv tmp = new_tmp();
126 load_reg_var(s, tmp, reg);
127 return tmp;
130 /* Set a CPU register. The source must be a temporary and will be
131 marked as dead. */
132 static void store_reg(DisasContext *s, int reg, TCGv var)
134 if (reg == 31) {
135 tcg_gen_andi_i32(var, var, ~3);
136 s->is_jmp = DISAS_JUMP;
138 tcg_gen_mov_i32(cpu_R[reg], var);
139 dead_tmp(var);
142 /* Value extensions. */
143 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
144 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
145 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
146 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
148 #define UCOP_REG_M (((insn) >> 0) & 0x1f)
149 #define UCOP_REG_N (((insn) >> 19) & 0x1f)
150 #define UCOP_REG_D (((insn) >> 14) & 0x1f)
151 #define UCOP_REG_S (((insn) >> 9) & 0x1f)
152 #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
153 #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
154 #define UCOP_SH_OP (((insn) >> 6) & 0x03)
155 #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
156 #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
157 #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
158 #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
159 #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
160 #define UCOP_COND (((insn) >> 25) & 0x0f)
161 #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
162 #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
163 #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
164 #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
165 #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
167 #define UCOP_SET(i) ((insn) & (1 << (i)))
168 #define UCOP_SET_P UCOP_SET(28)
169 #define UCOP_SET_U UCOP_SET(27)
170 #define UCOP_SET_B UCOP_SET(26)
171 #define UCOP_SET_W UCOP_SET(25)
172 #define UCOP_SET_L UCOP_SET(24)
173 #define UCOP_SET_S UCOP_SET(24)
175 #define ILLEGAL cpu_abort(env, \
176 "Illegal UniCore32 instruction %x at line %d!", \
177 insn, __LINE__)
179 static inline void gen_set_asr(TCGv var, uint32_t mask)
181 TCGv tmp_mask = tcg_const_i32(mask);
182 gen_helper_asr_write(var, tmp_mask);
183 tcg_temp_free_i32(tmp_mask);
185 /* Set NZCV flags from the high 4 bits of var. */
186 #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
188 static void gen_exception(int excp)
190 TCGv tmp = new_tmp();
191 tcg_gen_movi_i32(tmp, excp);
192 gen_helper_exception(tmp);
193 dead_tmp(tmp);
196 /* FIXME: Most targets have native widening multiplication.
197 It would be good to use that instead of a full wide multiply. */
198 /* 32x32->64 multiply. Marks inputs as dead. */
199 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
201 TCGv_i64 tmp1 = tcg_temp_new_i64();
202 TCGv_i64 tmp2 = tcg_temp_new_i64();
204 tcg_gen_extu_i32_i64(tmp1, a);
205 dead_tmp(a);
206 tcg_gen_extu_i32_i64(tmp2, b);
207 dead_tmp(b);
208 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
209 tcg_temp_free_i64(tmp2);
210 return tmp1;
213 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
215 TCGv_i64 tmp1 = tcg_temp_new_i64();
216 TCGv_i64 tmp2 = tcg_temp_new_i64();
218 tcg_gen_ext_i32_i64(tmp1, a);
219 dead_tmp(a);
220 tcg_gen_ext_i32_i64(tmp2, b);
221 dead_tmp(b);
222 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
223 tcg_temp_free_i64(tmp2);
224 return tmp1;
227 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
229 /* Set CF to the top bit of var. */
230 static void gen_set_CF_bit31(TCGv var)
232 TCGv tmp = new_tmp();
233 tcg_gen_shri_i32(tmp, var, 31);
234 gen_set_CF(tmp);
235 dead_tmp(tmp);
238 /* Set N and Z flags from var. */
239 static inline void gen_logic_CC(TCGv var)
241 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF));
242 tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF));
245 /* dest = T0 + T1 + CF. */
246 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
248 TCGv tmp;
249 tcg_gen_add_i32(dest, t0, t1);
250 tmp = load_cpu_field(CF);
251 tcg_gen_add_i32(dest, dest, tmp);
252 dead_tmp(tmp);
255 /* dest = T0 - T1 + CF - 1. */
256 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
258 TCGv tmp;
259 tcg_gen_sub_i32(dest, t0, t1);
260 tmp = load_cpu_field(CF);
261 tcg_gen_add_i32(dest, dest, tmp);
262 tcg_gen_subi_i32(dest, dest, 1);
263 dead_tmp(tmp);
266 static void shifter_out_im(TCGv var, int shift)
268 TCGv tmp = new_tmp();
269 if (shift == 0) {
270 tcg_gen_andi_i32(tmp, var, 1);
271 } else {
272 tcg_gen_shri_i32(tmp, var, shift);
273 if (shift != 31) {
274 tcg_gen_andi_i32(tmp, tmp, 1);
277 gen_set_CF(tmp);
278 dead_tmp(tmp);
281 /* Shift by immediate. Includes special handling for shift == 0. */
282 static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
283 int flags)
285 switch (shiftop) {
286 case 0: /* LSL */
287 if (shift != 0) {
288 if (flags) {
289 shifter_out_im(var, 32 - shift);
291 tcg_gen_shli_i32(var, var, shift);
293 break;
294 case 1: /* LSR */
295 if (shift == 0) {
296 if (flags) {
297 tcg_gen_shri_i32(var, var, 31);
298 gen_set_CF(var);
300 tcg_gen_movi_i32(var, 0);
301 } else {
302 if (flags) {
303 shifter_out_im(var, shift - 1);
305 tcg_gen_shri_i32(var, var, shift);
307 break;
308 case 2: /* ASR */
309 if (shift == 0) {
310 shift = 32;
312 if (flags) {
313 shifter_out_im(var, shift - 1);
315 if (shift == 32) {
316 shift = 31;
318 tcg_gen_sari_i32(var, var, shift);
319 break;
320 case 3: /* ROR/RRX */
321 if (shift != 0) {
322 if (flags) {
323 shifter_out_im(var, shift - 1);
325 tcg_gen_rotri_i32(var, var, shift); break;
326 } else {
327 TCGv tmp = load_cpu_field(CF);
328 if (flags) {
329 shifter_out_im(var, 0);
331 tcg_gen_shri_i32(var, var, 1);
332 tcg_gen_shli_i32(tmp, tmp, 31);
333 tcg_gen_or_i32(var, var, tmp);
334 dead_tmp(tmp);
339 static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
340 TCGv shift, int flags)
342 if (flags) {
343 switch (shiftop) {
344 case 0:
345 gen_helper_shl_cc(var, var, shift);
346 break;
347 case 1:
348 gen_helper_shr_cc(var, var, shift);
349 break;
350 case 2:
351 gen_helper_sar_cc(var, var, shift);
352 break;
353 case 3:
354 gen_helper_ror_cc(var, var, shift);
355 break;
357 } else {
358 switch (shiftop) {
359 case 0:
360 gen_helper_shl(var, var, shift);
361 break;
362 case 1:
363 gen_helper_shr(var, var, shift);
364 break;
365 case 2:
366 gen_helper_sar(var, var, shift);
367 break;
368 case 3:
369 tcg_gen_andi_i32(shift, shift, 0x1f);
370 tcg_gen_rotr_i32(var, var, shift);
371 break;
374 dead_tmp(shift);
377 static void gen_test_cc(int cc, int label)
379 TCGv tmp;
380 TCGv tmp2;
381 int inv;
383 switch (cc) {
384 case 0: /* eq: Z */
385 tmp = load_cpu_field(ZF);
386 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
387 break;
388 case 1: /* ne: !Z */
389 tmp = load_cpu_field(ZF);
390 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
391 break;
392 case 2: /* cs: C */
393 tmp = load_cpu_field(CF);
394 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
395 break;
396 case 3: /* cc: !C */
397 tmp = load_cpu_field(CF);
398 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
399 break;
400 case 4: /* mi: N */
401 tmp = load_cpu_field(NF);
402 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
403 break;
404 case 5: /* pl: !N */
405 tmp = load_cpu_field(NF);
406 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
407 break;
408 case 6: /* vs: V */
409 tmp = load_cpu_field(VF);
410 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
411 break;
412 case 7: /* vc: !V */
413 tmp = load_cpu_field(VF);
414 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
415 break;
416 case 8: /* hi: C && !Z */
417 inv = gen_new_label();
418 tmp = load_cpu_field(CF);
419 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
420 dead_tmp(tmp);
421 tmp = load_cpu_field(ZF);
422 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
423 gen_set_label(inv);
424 break;
425 case 9: /* ls: !C || Z */
426 tmp = load_cpu_field(CF);
427 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
428 dead_tmp(tmp);
429 tmp = load_cpu_field(ZF);
430 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
431 break;
432 case 10: /* ge: N == V -> N ^ V == 0 */
433 tmp = load_cpu_field(VF);
434 tmp2 = load_cpu_field(NF);
435 tcg_gen_xor_i32(tmp, tmp, tmp2);
436 dead_tmp(tmp2);
437 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
438 break;
439 case 11: /* lt: N != V -> N ^ V != 0 */
440 tmp = load_cpu_field(VF);
441 tmp2 = load_cpu_field(NF);
442 tcg_gen_xor_i32(tmp, tmp, tmp2);
443 dead_tmp(tmp2);
444 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
445 break;
446 case 12: /* gt: !Z && N == V */
447 inv = gen_new_label();
448 tmp = load_cpu_field(ZF);
449 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
450 dead_tmp(tmp);
451 tmp = load_cpu_field(VF);
452 tmp2 = load_cpu_field(NF);
453 tcg_gen_xor_i32(tmp, tmp, tmp2);
454 dead_tmp(tmp2);
455 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
456 gen_set_label(inv);
457 break;
458 case 13: /* le: Z || N != V */
459 tmp = load_cpu_field(ZF);
460 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
461 dead_tmp(tmp);
462 tmp = load_cpu_field(VF);
463 tmp2 = load_cpu_field(NF);
464 tcg_gen_xor_i32(tmp, tmp, tmp2);
465 dead_tmp(tmp2);
466 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
467 break;
468 default:
469 fprintf(stderr, "Bad condition code 0x%x\n", cc);
470 abort();
472 dead_tmp(tmp);
475 static const uint8_t table_logic_cc[16] = {
476 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
477 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
478 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
479 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
482 /* Set PC state from an immediate address. */
483 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
485 s->is_jmp = DISAS_UPDATE;
486 tcg_gen_movi_i32(cpu_R[31], addr & ~3);
489 /* Set PC state from var. var is marked as dead. */
490 static inline void gen_bx(DisasContext *s, TCGv var)
492 s->is_jmp = DISAS_UPDATE;
493 tcg_gen_andi_i32(cpu_R[31], var, ~3);
494 dead_tmp(var);
497 static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
499 store_reg(s, reg, var);
502 static inline TCGv gen_ld8s(TCGv addr, int index)
504 TCGv tmp = new_tmp();
505 tcg_gen_qemu_ld8s(tmp, addr, index);
506 return tmp;
509 static inline TCGv gen_ld8u(TCGv addr, int index)
511 TCGv tmp = new_tmp();
512 tcg_gen_qemu_ld8u(tmp, addr, index);
513 return tmp;
516 static inline TCGv gen_ld16s(TCGv addr, int index)
518 TCGv tmp = new_tmp();
519 tcg_gen_qemu_ld16s(tmp, addr, index);
520 return tmp;
523 static inline TCGv gen_ld16u(TCGv addr, int index)
525 TCGv tmp = new_tmp();
526 tcg_gen_qemu_ld16u(tmp, addr, index);
527 return tmp;
530 static inline TCGv gen_ld32(TCGv addr, int index)
532 TCGv tmp = new_tmp();
533 tcg_gen_qemu_ld32u(tmp, addr, index);
534 return tmp;
537 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
539 TCGv_i64 tmp = tcg_temp_new_i64();
540 tcg_gen_qemu_ld64(tmp, addr, index);
541 return tmp;
544 static inline void gen_st8(TCGv val, TCGv addr, int index)
546 tcg_gen_qemu_st8(val, addr, index);
547 dead_tmp(val);
550 static inline void gen_st16(TCGv val, TCGv addr, int index)
552 tcg_gen_qemu_st16(val, addr, index);
553 dead_tmp(val);
556 static inline void gen_st32(TCGv val, TCGv addr, int index)
558 tcg_gen_qemu_st32(val, addr, index);
559 dead_tmp(val);
562 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
564 tcg_gen_qemu_st64(val, addr, index);
565 tcg_temp_free_i64(val);
568 static inline void gen_set_pc_im(uint32_t val)
570 tcg_gen_movi_i32(cpu_R[31], val);
573 /* Force a TB lookup after an instruction that changes the CPU state. */
574 static inline void gen_lookup_tb(DisasContext *s)
576 tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
577 s->is_jmp = DISAS_UPDATE;
580 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
581 TCGv var)
583 int val;
584 TCGv offset;
586 if (UCOP_SET(29)) {
587 /* immediate */
588 val = UCOP_IMM14;
589 if (!UCOP_SET_U) {
590 val = -val;
592 if (val != 0) {
593 tcg_gen_addi_i32(var, var, val);
595 } else {
596 /* shift/register */
597 offset = load_reg(s, UCOP_REG_M);
598 gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
599 if (!UCOP_SET_U) {
600 tcg_gen_sub_i32(var, var, offset);
601 } else {
602 tcg_gen_add_i32(var, var, offset);
604 dead_tmp(offset);
608 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
609 TCGv var)
611 int val;
612 TCGv offset;
614 if (UCOP_SET(26)) {
615 /* immediate */
616 val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
617 if (!UCOP_SET_U) {
618 val = -val;
620 if (val != 0) {
621 tcg_gen_addi_i32(var, var, val);
623 } else {
624 /* register */
625 offset = load_reg(s, UCOP_REG_M);
626 if (!UCOP_SET_U) {
627 tcg_gen_sub_i32(var, var, offset);
628 } else {
629 tcg_gen_add_i32(var, var, offset);
631 dead_tmp(offset);
635 static inline long ucf64_reg_offset(int reg)
637 if (reg & 1) {
638 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
639 + offsetof(CPU_DoubleU, l.upper);
640 } else {
641 return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
642 + offsetof(CPU_DoubleU, l.lower);
646 #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
647 #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
649 /* UniCore-F64 single load/store I_offset */
650 static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
652 int offset;
653 TCGv tmp;
654 TCGv addr;
656 addr = load_reg(s, UCOP_REG_N);
657 if (!UCOP_SET_P && !UCOP_SET_W) {
658 ILLEGAL;
661 if (UCOP_SET_P) {
662 offset = UCOP_IMM10 << 2;
663 if (!UCOP_SET_U) {
664 offset = -offset;
666 if (offset != 0) {
667 tcg_gen_addi_i32(addr, addr, offset);
671 if (UCOP_SET_L) { /* load */
672 tmp = gen_ld32(addr, IS_USER(s));
673 ucf64_gen_st32(tmp, UCOP_REG_D);
674 } else { /* store */
675 tmp = ucf64_gen_ld32(UCOP_REG_D);
676 gen_st32(tmp, addr, IS_USER(s));
679 if (!UCOP_SET_P) {
680 offset = UCOP_IMM10 << 2;
681 if (!UCOP_SET_U) {
682 offset = -offset;
684 if (offset != 0) {
685 tcg_gen_addi_i32(addr, addr, offset);
688 if (UCOP_SET_W) {
689 store_reg(s, UCOP_REG_N, addr);
690 } else {
691 dead_tmp(addr);
695 /* UniCore-F64 load/store multiple words */
696 static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
698 unsigned int i;
699 int j, n, freg;
700 TCGv tmp;
701 TCGv addr;
703 if (UCOP_REG_D != 0) {
704 ILLEGAL;
706 if (UCOP_REG_N == 31) {
707 ILLEGAL;
709 if ((insn << 24) == 0) {
710 ILLEGAL;
713 addr = load_reg(s, UCOP_REG_N);
715 n = 0;
716 for (i = 0; i < 8; i++) {
717 if (UCOP_SET(i)) {
718 n++;
722 if (UCOP_SET_U) {
723 if (UCOP_SET_P) { /* pre increment */
724 tcg_gen_addi_i32(addr, addr, 4);
725 } /* unnecessary to do anything when post increment */
726 } else {
727 if (UCOP_SET_P) { /* pre decrement */
728 tcg_gen_addi_i32(addr, addr, -(n * 4));
729 } else { /* post decrement */
730 if (n != 1) {
731 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
736 freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
738 for (i = 0, j = 0; i < 8; i++, freg++) {
739 if (!UCOP_SET(i)) {
740 continue;
743 if (UCOP_SET_L) { /* load */
744 tmp = gen_ld32(addr, IS_USER(s));
745 ucf64_gen_st32(tmp, freg);
746 } else { /* store */
747 tmp = ucf64_gen_ld32(freg);
748 gen_st32(tmp, addr, IS_USER(s));
751 j++;
752 /* unnecessary to add after the last transfer */
753 if (j != n) {
754 tcg_gen_addi_i32(addr, addr, 4);
758 if (UCOP_SET_W) { /* write back */
759 if (UCOP_SET_U) {
760 if (!UCOP_SET_P) { /* post increment */
761 tcg_gen_addi_i32(addr, addr, 4);
762 } /* unnecessary to do anything when pre increment */
763 } else {
764 if (UCOP_SET_P) {
765 /* pre decrement */
766 if (n != 1) {
767 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
769 } else {
770 /* post decrement */
771 tcg_gen_addi_i32(addr, addr, -(n * 4));
774 store_reg(s, UCOP_REG_N, addr);
775 } else {
776 dead_tmp(addr);
780 /* UniCore-F64 mrc/mcr */
781 static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
783 TCGv tmp;
785 if ((insn & 0xfe0003ff) == 0xe2000000) {
786 /* control register */
787 if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
788 ILLEGAL;
790 if (UCOP_SET(24)) {
791 /* CFF */
792 tmp = new_tmp();
793 gen_helper_ucf64_get_fpscr(tmp, cpu_env);
794 store_reg(s, UCOP_REG_D, tmp);
795 } else {
796 /* CTF */
797 tmp = load_reg(s, UCOP_REG_D);
798 gen_helper_ucf64_set_fpscr(cpu_env, tmp);
799 dead_tmp(tmp);
800 gen_lookup_tb(s);
802 return;
804 if ((insn & 0xfe0003ff) == 0xe0000000) {
805 /* general register */
806 if (UCOP_REG_D == 31) {
807 ILLEGAL;
809 if (UCOP_SET(24)) { /* MFF */
810 tmp = ucf64_gen_ld32(UCOP_REG_N);
811 store_reg(s, UCOP_REG_D, tmp);
812 } else { /* MTF */
813 tmp = load_reg(s, UCOP_REG_D);
814 ucf64_gen_st32(tmp, UCOP_REG_N);
816 return;
818 if ((insn & 0xfb000000) == 0xe9000000) {
819 /* MFFC */
820 if (UCOP_REG_D != 31) {
821 ILLEGAL;
823 if (UCOP_UCF64_COND & 0x8) {
824 ILLEGAL;
827 tmp = new_tmp();
828 tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
829 if (UCOP_SET(26)) {
830 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
831 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
832 gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
833 } else {
834 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
835 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
836 gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
838 dead_tmp(tmp);
839 return;
841 ILLEGAL;
844 /* UniCore-F64 convert instructions */
845 static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
847 if (UCOP_UCF64_FMT == 3) {
848 ILLEGAL;
850 if (UCOP_REG_N != 0) {
851 ILLEGAL;
853 switch (UCOP_UCF64_FUNC) {
854 case 0: /* cvt.s */
855 switch (UCOP_UCF64_FMT) {
856 case 1 /* d */:
857 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
858 gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
859 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
860 break;
861 case 2 /* w */:
862 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
863 gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
864 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
865 break;
866 default /* s */:
867 ILLEGAL;
868 break;
870 break;
871 case 1: /* cvt.d */
872 switch (UCOP_UCF64_FMT) {
873 case 0 /* s */:
874 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
875 gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
876 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
877 break;
878 case 2 /* w */:
879 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
880 gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
881 tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
882 break;
883 default /* d */:
884 ILLEGAL;
885 break;
887 break;
888 case 4: /* cvt.w */
889 switch (UCOP_UCF64_FMT) {
890 case 0 /* s */:
891 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
892 gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
893 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
894 break;
895 case 1 /* d */:
896 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
897 gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
898 tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
899 break;
900 default /* w */:
901 ILLEGAL;
902 break;
904 break;
905 default:
906 ILLEGAL;
910 /* UniCore-F64 compare instructions */
911 static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
913 if (UCOP_SET(25)) {
914 ILLEGAL;
916 if (UCOP_REG_D != 0) {
917 ILLEGAL;
920 ILLEGAL; /* TODO */
921 if (UCOP_SET(24)) {
922 tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
923 tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
924 /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
925 } else {
926 tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
927 tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
928 /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
932 #define gen_helper_ucf64_movs(x, y) do { } while (0)
933 #define gen_helper_ucf64_movd(x, y) do { } while (0)
935 #define UCF64_OP1(name) do { \
936 if (UCOP_REG_N != 0) { \
937 ILLEGAL; \
939 switch (UCOP_UCF64_FMT) { \
940 case 0 /* s */: \
941 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
942 ucf64_reg_offset(UCOP_REG_M)); \
943 gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
944 tcg_gen_st_i32(cpu_F0s, cpu_env, \
945 ucf64_reg_offset(UCOP_REG_D)); \
946 break; \
947 case 1 /* d */: \
948 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
949 ucf64_reg_offset(UCOP_REG_M)); \
950 gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
951 tcg_gen_st_i64(cpu_F0d, cpu_env, \
952 ucf64_reg_offset(UCOP_REG_D)); \
953 break; \
954 case 2 /* w */: \
955 ILLEGAL; \
956 break; \
958 } while (0)
960 #define UCF64_OP2(name) do { \
961 switch (UCOP_UCF64_FMT) { \
962 case 0 /* s */: \
963 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
964 ucf64_reg_offset(UCOP_REG_N)); \
965 tcg_gen_ld_i32(cpu_F1s, cpu_env, \
966 ucf64_reg_offset(UCOP_REG_M)); \
967 gen_helper_ucf64_##name##s(cpu_F0s, \
968 cpu_F0s, cpu_F1s, cpu_env); \
969 tcg_gen_st_i32(cpu_F0s, cpu_env, \
970 ucf64_reg_offset(UCOP_REG_D)); \
971 break; \
972 case 1 /* d */: \
973 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
974 ucf64_reg_offset(UCOP_REG_N)); \
975 tcg_gen_ld_i64(cpu_F1d, cpu_env, \
976 ucf64_reg_offset(UCOP_REG_M)); \
977 gen_helper_ucf64_##name##d(cpu_F0d, \
978 cpu_F0d, cpu_F1d, cpu_env); \
979 tcg_gen_st_i64(cpu_F0d, cpu_env, \
980 ucf64_reg_offset(UCOP_REG_D)); \
981 break; \
982 case 2 /* w */: \
983 ILLEGAL; \
984 break; \
986 } while (0)
988 /* UniCore-F64 data processing */
989 static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
991 if (UCOP_UCF64_FMT == 3) {
992 ILLEGAL;
994 switch (UCOP_UCF64_FUNC) {
995 case 0: /* add */
996 UCF64_OP2(add);
997 break;
998 case 1: /* sub */
999 UCF64_OP2(sub);
1000 break;
1001 case 2: /* mul */
1002 UCF64_OP2(mul);
1003 break;
1004 case 4: /* div */
1005 UCF64_OP2(div);
1006 break;
1007 case 5: /* abs */
1008 UCF64_OP1(abs);
1009 break;
1010 case 6: /* mov */
1011 UCF64_OP1(mov);
1012 break;
1013 case 7: /* neg */
1014 UCF64_OP1(neg);
1015 break;
1016 default:
1017 ILLEGAL;
1021 /* Disassemble an F64 instruction */
1022 static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1024 if (!UCOP_SET(29)) {
1025 if (UCOP_SET(26)) {
1026 do_ucf64_ldst_m(env, s, insn);
1027 } else {
1028 do_ucf64_ldst_i(env, s, insn);
1030 } else {
1031 if (UCOP_SET(5)) {
1032 switch ((insn >> 26) & 0x3) {
1033 case 0:
1034 do_ucf64_datap(env, s, insn);
1035 break;
1036 case 1:
1037 ILLEGAL;
1038 break;
1039 case 2:
1040 do_ucf64_fcvt(env, s, insn);
1041 break;
1042 case 3:
1043 do_ucf64_fcmp(env, s, insn);
1044 break;
1046 } else {
1047 do_ucf64_trans(env, s, insn);
1052 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1054 TranslationBlock *tb;
1056 tb = s->tb;
1057 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
1058 tcg_gen_goto_tb(n);
1059 gen_set_pc_im(dest);
1060 tcg_gen_exit_tb((tcg_target_long)tb + n);
1061 } else {
1062 gen_set_pc_im(dest);
1063 tcg_gen_exit_tb(0);
1067 static inline void gen_jmp(DisasContext *s, uint32_t dest)
1069 if (unlikely(s->singlestep_enabled)) {
1070 /* An indirect jump so that we still trigger the debug exception. */
1071 gen_bx_im(s, dest);
1072 } else {
1073 gen_goto_tb(s, 0, dest);
1074 s->is_jmp = DISAS_TB_JUMP;
1078 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
1080 if (x) {
1081 tcg_gen_sari_i32(t0, t0, 16);
1082 } else {
1083 gen_sxth(t0);
1085 if (y) {
1086 tcg_gen_sari_i32(t1, t1, 16);
1087 } else {
1088 gen_sxth(t1);
1090 tcg_gen_mul_i32(t0, t0, t1);
1093 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1094 static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
1096 TCGv tmp;
1097 if (bsr) {
1098 /* ??? This is also undefined in system mode. */
1099 if (IS_USER(s)) {
1100 return 1;
1103 tmp = load_cpu_field(bsr);
1104 tcg_gen_andi_i32(tmp, tmp, ~mask);
1105 tcg_gen_andi_i32(t0, t0, mask);
1106 tcg_gen_or_i32(tmp, tmp, t0);
1107 store_cpu_field(tmp, bsr);
1108 } else {
1109 gen_set_asr(t0, mask);
1111 dead_tmp(t0);
1112 gen_lookup_tb(s);
1113 return 0;
1116 /* Generate an old-style exception return. Marks pc as dead. */
1117 static void gen_exception_return(DisasContext *s, TCGv pc)
1119 TCGv tmp;
1120 store_reg(s, 31, pc);
1121 tmp = load_cpu_field(bsr);
1122 gen_set_asr(tmp, 0xffffffff);
1123 dead_tmp(tmp);
1124 s->is_jmp = DISAS_UPDATE;
1127 static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1129 switch (UCOP_CPNUM) {
1130 case 2:
1131 disas_ucf64_insn(env, s, insn);
1132 break;
1133 default:
1134 /* Unknown coprocessor. */
1135 cpu_abort(env, "Unknown coprocessor!");
1140 /* Store a 64-bit value to a register pair. Clobbers val. */
1141 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
1143 TCGv tmp;
1144 tmp = new_tmp();
1145 tcg_gen_trunc_i64_i32(tmp, val);
1146 store_reg(s, rlow, tmp);
1147 tmp = new_tmp();
1148 tcg_gen_shri_i64(val, val, 32);
1149 tcg_gen_trunc_i64_i32(tmp, val);
1150 store_reg(s, rhigh, tmp);
1153 /* load and add a 64-bit value from a register pair. */
1154 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
1156 TCGv_i64 tmp;
1157 TCGv tmpl;
1158 TCGv tmph;
1160 /* Load 64-bit value rd:rn. */
1161 tmpl = load_reg(s, rlow);
1162 tmph = load_reg(s, rhigh);
1163 tmp = tcg_temp_new_i64();
1164 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
1165 dead_tmp(tmpl);
1166 dead_tmp(tmph);
1167 tcg_gen_add_i64(val, val, tmp);
1168 tcg_temp_free_i64(tmp);
1171 /* data processing instructions */
1172 static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1174 TCGv tmp;
1175 TCGv tmp2;
1176 int logic_cc;
1178 if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
1179 if (UCOP_SET(23)) { /* CMOV instructions */
1180 if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
1181 ILLEGAL;
1183 /* if not always execute, we generate a conditional jump to
1184 next instruction */
1185 s->condlabel = gen_new_label();
1186 gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
1187 s->condjmp = 1;
1191 logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
1193 if (UCOP_SET(29)) {
1194 unsigned int val;
1195 /* immediate operand */
1196 val = UCOP_IMM_9;
1197 if (UCOP_SH_IM) {
1198 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1200 tmp2 = new_tmp();
1201 tcg_gen_movi_i32(tmp2, val);
1202 if (logic_cc && UCOP_SH_IM) {
1203 gen_set_CF_bit31(tmp2);
1205 } else {
1206 /* register */
1207 tmp2 = load_reg(s, UCOP_REG_M);
1208 if (UCOP_SET(5)) {
1209 tmp = load_reg(s, UCOP_REG_S);
1210 gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
1211 } else {
1212 gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
1216 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1217 tmp = load_reg(s, UCOP_REG_N);
1218 } else {
1219 TCGV_UNUSED(tmp);
1222 switch (UCOP_OPCODES) {
1223 case 0x00:
1224 tcg_gen_and_i32(tmp, tmp, tmp2);
1225 if (logic_cc) {
1226 gen_logic_CC(tmp);
1228 store_reg_bx(s, UCOP_REG_D, tmp);
1229 break;
1230 case 0x01:
1231 tcg_gen_xor_i32(tmp, tmp, tmp2);
1232 if (logic_cc) {
1233 gen_logic_CC(tmp);
1235 store_reg_bx(s, UCOP_REG_D, tmp);
1236 break;
1237 case 0x02:
1238 if (UCOP_SET_S && UCOP_REG_D == 31) {
1239 /* SUBS r31, ... is used for exception return. */
1240 if (IS_USER(s)) {
1241 ILLEGAL;
1243 gen_helper_sub_cc(tmp, tmp, tmp2);
1244 gen_exception_return(s, tmp);
1245 } else {
1246 if (UCOP_SET_S) {
1247 gen_helper_sub_cc(tmp, tmp, tmp2);
1248 } else {
1249 tcg_gen_sub_i32(tmp, tmp, tmp2);
1251 store_reg_bx(s, UCOP_REG_D, tmp);
1253 break;
1254 case 0x03:
1255 if (UCOP_SET_S) {
1256 gen_helper_sub_cc(tmp, tmp2, tmp);
1257 } else {
1258 tcg_gen_sub_i32(tmp, tmp2, tmp);
1260 store_reg_bx(s, UCOP_REG_D, tmp);
1261 break;
1262 case 0x04:
1263 if (UCOP_SET_S) {
1264 gen_helper_add_cc(tmp, tmp, tmp2);
1265 } else {
1266 tcg_gen_add_i32(tmp, tmp, tmp2);
1268 store_reg_bx(s, UCOP_REG_D, tmp);
1269 break;
1270 case 0x05:
1271 if (UCOP_SET_S) {
1272 gen_helper_adc_cc(tmp, tmp, tmp2);
1273 } else {
1274 gen_add_carry(tmp, tmp, tmp2);
1276 store_reg_bx(s, UCOP_REG_D, tmp);
1277 break;
1278 case 0x06:
1279 if (UCOP_SET_S) {
1280 gen_helper_sbc_cc(tmp, tmp, tmp2);
1281 } else {
1282 gen_sub_carry(tmp, tmp, tmp2);
1284 store_reg_bx(s, UCOP_REG_D, tmp);
1285 break;
1286 case 0x07:
1287 if (UCOP_SET_S) {
1288 gen_helper_sbc_cc(tmp, tmp2, tmp);
1289 } else {
1290 gen_sub_carry(tmp, tmp2, tmp);
1292 store_reg_bx(s, UCOP_REG_D, tmp);
1293 break;
1294 case 0x08:
1295 if (UCOP_SET_S) {
1296 tcg_gen_and_i32(tmp, tmp, tmp2);
1297 gen_logic_CC(tmp);
1299 dead_tmp(tmp);
1300 break;
1301 case 0x09:
1302 if (UCOP_SET_S) {
1303 tcg_gen_xor_i32(tmp, tmp, tmp2);
1304 gen_logic_CC(tmp);
1306 dead_tmp(tmp);
1307 break;
1308 case 0x0a:
1309 if (UCOP_SET_S) {
1310 gen_helper_sub_cc(tmp, tmp, tmp2);
1312 dead_tmp(tmp);
1313 break;
1314 case 0x0b:
1315 if (UCOP_SET_S) {
1316 gen_helper_add_cc(tmp, tmp, tmp2);
1318 dead_tmp(tmp);
1319 break;
1320 case 0x0c:
1321 tcg_gen_or_i32(tmp, tmp, tmp2);
1322 if (logic_cc) {
1323 gen_logic_CC(tmp);
1325 store_reg_bx(s, UCOP_REG_D, tmp);
1326 break;
1327 case 0x0d:
1328 if (logic_cc && UCOP_REG_D == 31) {
1329 /* MOVS r31, ... is used for exception return. */
1330 if (IS_USER(s)) {
1331 ILLEGAL;
1333 gen_exception_return(s, tmp2);
1334 } else {
1335 if (logic_cc) {
1336 gen_logic_CC(tmp2);
1338 store_reg_bx(s, UCOP_REG_D, tmp2);
1340 break;
1341 case 0x0e:
1342 tcg_gen_andc_i32(tmp, tmp, tmp2);
1343 if (logic_cc) {
1344 gen_logic_CC(tmp);
1346 store_reg_bx(s, UCOP_REG_D, tmp);
1347 break;
1348 default:
1349 case 0x0f:
1350 tcg_gen_not_i32(tmp2, tmp2);
1351 if (logic_cc) {
1352 gen_logic_CC(tmp2);
1354 store_reg_bx(s, UCOP_REG_D, tmp2);
1355 break;
1357 if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
1358 dead_tmp(tmp2);
1362 /* multiply */
1363 static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1365 TCGv tmp;
1366 TCGv tmp2;
1367 TCGv_i64 tmp64;
1369 if (UCOP_SET(27)) {
1370 /* 64 bit mul */
1371 tmp = load_reg(s, UCOP_REG_M);
1372 tmp2 = load_reg(s, UCOP_REG_N);
1373 if (UCOP_SET(26)) {
1374 tmp64 = gen_muls_i64_i32(tmp, tmp2);
1375 } else {
1376 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
1378 if (UCOP_SET(25)) { /* mult accumulate */
1379 gen_addq(s, tmp64, UCOP_REG_LO, UCOP_REG_HI);
1381 gen_storeq_reg(s, UCOP_REG_LO, UCOP_REG_HI, tmp64);
1382 tcg_temp_free_i64(tmp64);
1383 } else {
1384 /* 32 bit mul */
1385 tmp = load_reg(s, UCOP_REG_M);
1386 tmp2 = load_reg(s, UCOP_REG_N);
1387 tcg_gen_mul_i32(tmp, tmp, tmp2);
1388 dead_tmp(tmp2);
1389 if (UCOP_SET(25)) {
1390 /* Add */
1391 tmp2 = load_reg(s, UCOP_REG_S);
1392 tcg_gen_add_i32(tmp, tmp, tmp2);
1393 dead_tmp(tmp2);
1395 if (UCOP_SET_S) {
1396 gen_logic_CC(tmp);
1398 store_reg(s, UCOP_REG_D, tmp);
1402 /* miscellaneous instructions */
1403 static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1405 unsigned int val;
1406 TCGv tmp;
1408 if ((insn & 0xffffffe0) == 0x10ffc120) {
1409 /* Trivial implementation equivalent to bx. */
1410 tmp = load_reg(s, UCOP_REG_M);
1411 gen_bx(s, tmp);
1412 return;
1415 if ((insn & 0xfbffc000) == 0x30ffc000) {
1416 /* PSR = immediate */
1417 val = UCOP_IMM_9;
1418 if (UCOP_SH_IM) {
1419 val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
1421 tmp = new_tmp();
1422 tcg_gen_movi_i32(tmp, val);
1423 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1424 ILLEGAL;
1426 return;
1429 if ((insn & 0xfbffffe0) == 0x12ffc020) {
1430 /* PSR.flag = reg */
1431 tmp = load_reg(s, UCOP_REG_M);
1432 if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
1433 ILLEGAL;
1435 return;
1438 if ((insn & 0xfbffffe0) == 0x10ffc020) {
1439 /* PSR = reg */
1440 tmp = load_reg(s, UCOP_REG_M);
1441 if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
1442 ILLEGAL;
1444 return;
1447 if ((insn & 0xfbf83fff) == 0x10f80000) {
1448 /* reg = PSR */
1449 if (UCOP_SET_B) {
1450 if (IS_USER(s)) {
1451 ILLEGAL;
1453 tmp = load_cpu_field(bsr);
1454 } else {
1455 tmp = new_tmp();
1456 gen_helper_asr_read(tmp);
1458 store_reg(s, UCOP_REG_D, tmp);
1459 return;
1462 if ((insn & 0xfbf83fe0) == 0x12f80120) {
1463 /* clz */
1464 tmp = load_reg(s, UCOP_REG_M);
1465 if (UCOP_SET(26)) {
1466 gen_helper_clo(tmp, tmp);
1467 } else {
1468 gen_helper_clz(tmp, tmp);
1470 store_reg(s, UCOP_REG_D, tmp);
1471 return;
1474 /* otherwise */
1475 ILLEGAL;
1478 /* load/store I_offset and R_offset */
1479 static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1481 unsigned int i;
1482 TCGv tmp;
1483 TCGv tmp2;
1485 tmp2 = load_reg(s, UCOP_REG_N);
1486 i = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
1488 /* immediate */
1489 if (UCOP_SET_P) {
1490 gen_add_data_offset(s, insn, tmp2);
1493 if (UCOP_SET_L) {
1494 /* load */
1495 if (UCOP_SET_B) {
1496 tmp = gen_ld8u(tmp2, i);
1497 } else {
1498 tmp = gen_ld32(tmp2, i);
1500 } else {
1501 /* store */
1502 tmp = load_reg(s, UCOP_REG_D);
1503 if (UCOP_SET_B) {
1504 gen_st8(tmp, tmp2, i);
1505 } else {
1506 gen_st32(tmp, tmp2, i);
1509 if (!UCOP_SET_P) {
1510 gen_add_data_offset(s, insn, tmp2);
1511 store_reg(s, UCOP_REG_N, tmp2);
1512 } else if (UCOP_SET_W) {
1513 store_reg(s, UCOP_REG_N, tmp2);
1514 } else {
1515 dead_tmp(tmp2);
1517 if (UCOP_SET_L) {
1518 /* Complete the load. */
1519 if (UCOP_REG_D == 31) {
1520 gen_bx(s, tmp);
1521 } else {
1522 store_reg(s, UCOP_REG_D, tmp);
1527 /* SWP instruction */
1528 static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1530 TCGv addr;
1531 TCGv tmp;
1532 TCGv tmp2;
1534 if ((insn & 0xff003fe0) != 0x40000120) {
1535 ILLEGAL;
1538 /* ??? This is not really atomic. However we know
1539 we never have multiple CPUs running in parallel,
1540 so it is good enough. */
1541 addr = load_reg(s, UCOP_REG_N);
1542 tmp = load_reg(s, UCOP_REG_M);
1543 if (UCOP_SET_B) {
1544 tmp2 = gen_ld8u(addr, IS_USER(s));
1545 gen_st8(tmp, addr, IS_USER(s));
1546 } else {
1547 tmp2 = gen_ld32(addr, IS_USER(s));
1548 gen_st32(tmp, addr, IS_USER(s));
1550 dead_tmp(addr);
1551 store_reg(s, UCOP_REG_D, tmp2);
1554 /* load/store hw/sb */
1555 static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1557 TCGv addr;
1558 TCGv tmp;
1560 if (UCOP_SH_OP == 0) {
1561 do_swap(env, s, insn);
1562 return;
1565 addr = load_reg(s, UCOP_REG_N);
1566 if (UCOP_SET_P) {
1567 gen_add_datah_offset(s, insn, addr);
1570 if (UCOP_SET_L) { /* load */
1571 switch (UCOP_SH_OP) {
1572 case 1:
1573 tmp = gen_ld16u(addr, IS_USER(s));
1574 break;
1575 case 2:
1576 tmp = gen_ld8s(addr, IS_USER(s));
1577 break;
1578 default: /* see do_swap */
1579 case 3:
1580 tmp = gen_ld16s(addr, IS_USER(s));
1581 break;
1583 } else { /* store */
1584 if (UCOP_SH_OP != 1) {
1585 ILLEGAL;
1587 tmp = load_reg(s, UCOP_REG_D);
1588 gen_st16(tmp, addr, IS_USER(s));
1590 /* Perform base writeback before the loaded value to
1591 ensure correct behavior with overlapping index registers. */
1592 if (!UCOP_SET_P) {
1593 gen_add_datah_offset(s, insn, addr);
1594 store_reg(s, UCOP_REG_N, addr);
1595 } else if (UCOP_SET_W) {
1596 store_reg(s, UCOP_REG_N, addr);
1597 } else {
1598 dead_tmp(addr);
1600 if (UCOP_SET_L) {
1601 /* Complete the load. */
1602 store_reg(s, UCOP_REG_D, tmp);
1606 /* load/store multiple words */
1607 static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1609 unsigned int val, i;
1610 int j, n, reg, user, loaded_base;
1611 TCGv tmp;
1612 TCGv tmp2;
1613 TCGv addr;
1614 TCGv loaded_var;
1616 if (UCOP_SET(7)) {
1617 ILLEGAL;
1619 /* XXX: store correct base if write back */
1620 user = 0;
1621 if (UCOP_SET_B) { /* S bit in instruction table */
1622 if (IS_USER(s)) {
1623 ILLEGAL; /* only usable in supervisor mode */
1625 if (UCOP_SET(18) == 0) { /* pc reg */
1626 user = 1;
1630 addr = load_reg(s, UCOP_REG_N);
1632 /* compute total size */
1633 loaded_base = 0;
1634 TCGV_UNUSED(loaded_var);
1635 n = 0;
1636 for (i = 0; i < 6; i++) {
1637 if (UCOP_SET(i)) {
1638 n++;
1641 for (i = 9; i < 19; i++) {
1642 if (UCOP_SET(i)) {
1643 n++;
1646 /* XXX: test invalid n == 0 case ? */
1647 if (UCOP_SET_U) {
1648 if (UCOP_SET_P) {
1649 /* pre increment */
1650 tcg_gen_addi_i32(addr, addr, 4);
1651 } else {
1652 /* post increment */
1654 } else {
1655 if (UCOP_SET_P) {
1656 /* pre decrement */
1657 tcg_gen_addi_i32(addr, addr, -(n * 4));
1658 } else {
1659 /* post decrement */
1660 if (n != 1) {
1661 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1666 j = 0;
1667 reg = UCOP_SET(6) ? 16 : 0;
1668 for (i = 0; i < 19; i++, reg++) {
1669 if (i == 6) {
1670 i = i + 3;
1672 if (UCOP_SET(i)) {
1673 if (UCOP_SET_L) { /* load */
1674 tmp = gen_ld32(addr, IS_USER(s));
1675 if (reg == 31) {
1676 gen_bx(s, tmp);
1677 } else if (user) {
1678 tmp2 = tcg_const_i32(reg);
1679 gen_helper_set_user_reg(tmp2, tmp);
1680 tcg_temp_free_i32(tmp2);
1681 dead_tmp(tmp);
1682 } else if (reg == UCOP_REG_N) {
1683 loaded_var = tmp;
1684 loaded_base = 1;
1685 } else {
1686 store_reg(s, reg, tmp);
1688 } else { /* store */
1689 if (reg == 31) {
1690 /* special case: r31 = PC + 4 */
1691 val = (long)s->pc;
1692 tmp = new_tmp();
1693 tcg_gen_movi_i32(tmp, val);
1694 } else if (user) {
1695 tmp = new_tmp();
1696 tmp2 = tcg_const_i32(reg);
1697 gen_helper_get_user_reg(tmp, tmp2);
1698 tcg_temp_free_i32(tmp2);
1699 } else {
1700 tmp = load_reg(s, reg);
1702 gen_st32(tmp, addr, IS_USER(s));
1704 j++;
1705 /* no need to add after the last transfer */
1706 if (j != n) {
1707 tcg_gen_addi_i32(addr, addr, 4);
1711 if (UCOP_SET_W) { /* write back */
1712 if (UCOP_SET_U) {
1713 if (UCOP_SET_P) {
1714 /* pre increment */
1715 } else {
1716 /* post increment */
1717 tcg_gen_addi_i32(addr, addr, 4);
1719 } else {
1720 if (UCOP_SET_P) {
1721 /* pre decrement */
1722 if (n != 1) {
1723 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
1725 } else {
1726 /* post decrement */
1727 tcg_gen_addi_i32(addr, addr, -(n * 4));
1730 store_reg(s, UCOP_REG_N, addr);
1731 } else {
1732 dead_tmp(addr);
1734 if (loaded_base) {
1735 store_reg(s, UCOP_REG_N, loaded_var);
1737 if (UCOP_SET_B && !user) {
1738 /* Restore ASR from BSR. */
1739 tmp = load_cpu_field(bsr);
1740 gen_set_asr(tmp, 0xffffffff);
1741 dead_tmp(tmp);
1742 s->is_jmp = DISAS_UPDATE;
1746 /* branch (and link) */
1747 static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
1749 unsigned int val;
1750 int32_t offset;
1751 TCGv tmp;
1753 if (UCOP_COND == 0xf) {
1754 ILLEGAL;
1757 if (UCOP_COND != 0xe) {
1758 /* if not always execute, we generate a conditional jump to
1759 next instruction */
1760 s->condlabel = gen_new_label();
1761 gen_test_cc(UCOP_COND ^ 1, s->condlabel);
1762 s->condjmp = 1;
1765 val = (int32_t)s->pc;
1766 if (UCOP_SET_L) {
1767 tmp = new_tmp();
1768 tcg_gen_movi_i32(tmp, val);
1769 store_reg(s, 30, tmp);
1771 offset = (((int32_t)insn << 8) >> 8);
1772 val += (offset << 2); /* unicore is pc+4 */
1773 gen_jmp(s, val);
1776 static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
1778 unsigned int insn;
1780 insn = ldl_code(s->pc);
1781 s->pc += 4;
1783 /* UniCore instructions class:
1784 * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1785 * AAA : see switch case
1786 * BBBB : opcodes or cond or PUBW
1787 * C : S OR L
1788 * D : 8
1789 * E : 5
1791 switch (insn >> 29) {
1792 case 0x0:
1793 if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1794 do_mult(env, s, insn);
1795 break;
1798 if (UCOP_SET(8)) {
1799 do_misc(env, s, insn);
1800 break;
1802 case 0x1:
1803 if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
1804 do_misc(env, s, insn);
1805 break;
1807 do_datap(env, s, insn);
1808 break;
1810 case 0x2:
1811 if (UCOP_SET(8) && UCOP_SET(5)) {
1812 do_ldst_hwsb(env, s, insn);
1813 break;
1815 if (UCOP_SET(8) || UCOP_SET(5)) {
1816 ILLEGAL;
1818 case 0x3:
1819 do_ldst_ir(env, s, insn);
1820 break;
1822 case 0x4:
1823 if (UCOP_SET(8)) {
1824 ILLEGAL; /* extended instructions */
1826 do_ldst_m(env, s, insn);
1827 break;
1828 case 0x5:
1829 do_branch(env, s, insn);
1830 break;
1831 case 0x6:
1832 /* Coprocessor. */
1833 disas_coproc_insn(env, s, insn);
1834 break;
1835 case 0x7:
1836 if (!UCOP_SET(28)) {
1837 disas_coproc_insn(env, s, insn);
1838 break;
1840 if ((insn & 0xff000000) == 0xff000000) { /* syscall */
1841 gen_set_pc_im(s->pc);
1842 s->is_jmp = DISAS_SYSCALL;
1843 break;
1845 ILLEGAL;
1848 return;
1851 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
1852 basic block 'tb'. If search_pc is TRUE, also generate PC
1853 information for each intermediate instruction. */
1854 static inline void gen_intermediate_code_internal(CPUUniCore32State *env,
1855 TranslationBlock *tb, int search_pc)
1857 DisasContext dc1, *dc = &dc1;
1858 CPUBreakpoint *bp;
1859 uint16_t *gen_opc_end;
1860 int j, lj;
1861 target_ulong pc_start;
1862 uint32_t next_page_start;
1863 int num_insns;
1864 int max_insns;
1866 /* generate intermediate code */
1867 num_temps = 0;
1869 pc_start = tb->pc;
1871 dc->tb = tb;
1873 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1875 dc->is_jmp = DISAS_NEXT;
1876 dc->pc = pc_start;
1877 dc->singlestep_enabled = env->singlestep_enabled;
1878 dc->condjmp = 0;
1879 cpu_F0s = tcg_temp_new_i32();
1880 cpu_F1s = tcg_temp_new_i32();
1881 cpu_F0d = tcg_temp_new_i64();
1882 cpu_F1d = tcg_temp_new_i64();
1883 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1884 lj = -1;
1885 num_insns = 0;
1886 max_insns = tb->cflags & CF_COUNT_MASK;
1887 if (max_insns == 0) {
1888 max_insns = CF_COUNT_MASK;
1891 gen_icount_start();
1892 do {
1893 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1894 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1895 if (bp->pc == dc->pc) {
1896 gen_set_pc_im(dc->pc);
1897 gen_exception(EXCP_DEBUG);
1898 dc->is_jmp = DISAS_JUMP;
1899 /* Advance PC so that clearing the breakpoint will
1900 invalidate this TB. */
1901 dc->pc += 2; /* FIXME */
1902 goto done_generating;
1903 break;
1907 if (search_pc) {
1908 j = gen_opc_ptr - gen_opc_buf;
1909 if (lj < j) {
1910 lj++;
1911 while (lj < j) {
1912 gen_opc_instr_start[lj++] = 0;
1915 gen_opc_pc[lj] = dc->pc;
1916 gen_opc_instr_start[lj] = 1;
1917 gen_opc_icount[lj] = num_insns;
1920 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
1921 gen_io_start();
1924 disas_uc32_insn(env, dc);
1926 if (num_temps) {
1927 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
1928 num_temps = 0;
1931 if (dc->condjmp && !dc->is_jmp) {
1932 gen_set_label(dc->condlabel);
1933 dc->condjmp = 0;
1935 /* Translation stops when a conditional branch is encountered.
1936 * Otherwise the subsequent code could get translated several times.
1937 * Also stop translation when a page boundary is reached. This
1938 * ensures prefetch aborts occur at the right place. */
1939 num_insns++;
1940 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
1941 !env->singlestep_enabled &&
1942 !singlestep &&
1943 dc->pc < next_page_start &&
1944 num_insns < max_insns);
1946 if (tb->cflags & CF_LAST_IO) {
1947 if (dc->condjmp) {
1948 /* FIXME: This can theoretically happen with self-modifying
1949 code. */
1950 cpu_abort(env, "IO on conditional branch instruction");
1952 gen_io_end();
1955 /* At this stage dc->condjmp will only be set when the skipped
1956 instruction was a conditional branch or trap, and the PC has
1957 already been written. */
1958 if (unlikely(env->singlestep_enabled)) {
1959 /* Make sure the pc is updated, and raise a debug exception. */
1960 if (dc->condjmp) {
1961 if (dc->is_jmp == DISAS_SYSCALL) {
1962 gen_exception(UC32_EXCP_PRIV);
1963 } else {
1964 gen_exception(EXCP_DEBUG);
1966 gen_set_label(dc->condlabel);
1968 if (dc->condjmp || !dc->is_jmp) {
1969 gen_set_pc_im(dc->pc);
1970 dc->condjmp = 0;
1972 if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
1973 gen_exception(UC32_EXCP_PRIV);
1974 } else {
1975 gen_exception(EXCP_DEBUG);
1977 } else {
1978 /* While branches must always occur at the end of an IT block,
1979 there are a few other things that can cause us to terminate
1980 the TB in the middel of an IT block:
1981 - Exception generating instructions (bkpt, swi, undefined).
1982 - Page boundaries.
1983 - Hardware watchpoints.
1984 Hardware breakpoints have already been handled and skip this code.
1986 switch (dc->is_jmp) {
1987 case DISAS_NEXT:
1988 gen_goto_tb(dc, 1, dc->pc);
1989 break;
1990 default:
1991 case DISAS_JUMP:
1992 case DISAS_UPDATE:
1993 /* indicate that the hash table must be used to find the next TB */
1994 tcg_gen_exit_tb(0);
1995 break;
1996 case DISAS_TB_JUMP:
1997 /* nothing more to generate */
1998 break;
1999 case DISAS_SYSCALL:
2000 gen_exception(UC32_EXCP_PRIV);
2001 break;
2003 if (dc->condjmp) {
2004 gen_set_label(dc->condlabel);
2005 gen_goto_tb(dc, 1, dc->pc);
2006 dc->condjmp = 0;
2010 done_generating:
2011 gen_icount_end(tb, num_insns);
2012 *gen_opc_ptr = INDEX_op_end;
2014 #ifdef DEBUG_DISAS
2015 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2016 qemu_log("----------------\n");
2017 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2018 log_target_disas(pc_start, dc->pc - pc_start, 0);
2019 qemu_log("\n");
2021 #endif
2022 if (search_pc) {
2023 j = gen_opc_ptr - gen_opc_buf;
2024 lj++;
2025 while (lj <= j) {
2026 gen_opc_instr_start[lj++] = 0;
2028 } else {
2029 tb->size = dc->pc - pc_start;
2030 tb->icount = num_insns;
2034 void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
2036 gen_intermediate_code_internal(env, tb, 0);
2039 void gen_intermediate_code_pc(CPUUniCore32State *env, TranslationBlock *tb)
2041 gen_intermediate_code_internal(env, tb, 1);
2044 static const char *cpu_mode_names[16] = {
2045 "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2046 "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2049 #define UCF64_DUMP_STATE
2050 void cpu_dump_state(CPUUniCore32State *env, FILE *f, fprintf_function cpu_fprintf,
2051 int flags)
2053 int i;
2054 #ifdef UCF64_DUMP_STATE
2055 union {
2056 uint32_t i;
2057 float s;
2058 } s0, s1;
2059 CPU_DoubleU d;
2060 /* ??? This assumes float64 and double have the same layout.
2061 Oh well, it's only debug dumps. */
2062 union {
2063 float64 f64;
2064 double d;
2065 } d0;
2066 #endif
2067 uint32_t psr;
2069 for (i = 0; i < 32; i++) {
2070 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2071 if ((i % 4) == 3) {
2072 cpu_fprintf(f, "\n");
2073 } else {
2074 cpu_fprintf(f, " ");
2077 psr = cpu_asr_read(env);
2078 cpu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
2079 psr,
2080 psr & (1 << 31) ? 'N' : '-',
2081 psr & (1 << 30) ? 'Z' : '-',
2082 psr & (1 << 29) ? 'C' : '-',
2083 psr & (1 << 28) ? 'V' : '-',
2084 cpu_mode_names[psr & 0xf]);
2086 #ifdef UCF64_DUMP_STATE
2087 for (i = 0; i < 16; i++) {
2088 d.d = env->ucf64.regs[i];
2089 s0.i = d.l.lower;
2090 s1.i = d.l.upper;
2091 d0.f64 = d.d;
2092 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%" PRIx64 "(%8g)\n",
2093 i * 2, (int)s0.i, s0.s,
2094 i * 2 + 1, (int)s1.i, s1.s,
2095 i, (uint64_t)d0.f64, d0.d);
2097 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
2098 #endif
2101 void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb, int pc_pos)
2103 env->regs[31] = gen_opc_pc[pc_pos];