qapi: fix input visitor bugs
[qemu/qmp-unstable.git] / target-microblaze / translate.c
blob03ea15803bc5a0f1cb5edc4d1e116ed3b4acfbcd
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "exec/helper-proto.h"
25 #include "microblaze-decode.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-gen.h"
29 #define SIM_COMPAT 0
30 #define DISAS_GNU 1
31 #define DISAS_MB 1
32 #if DISAS_MB && !SIM_COMPAT
33 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34 #else
35 # define LOG_DIS(...) do { } while (0)
36 #endif
38 #define D(x)
40 #define EXTRACT_FIELD(src, start, end) \
41 (((src) >> start) & ((1 << (end - start + 1)) - 1))
43 static TCGv env_debug;
44 static TCGv_ptr cpu_env;
45 static TCGv cpu_R[32];
46 static TCGv cpu_SR[18];
47 static TCGv env_imm;
48 static TCGv env_btaken;
49 static TCGv env_btarget;
50 static TCGv env_iflags;
51 static TCGv env_res_addr;
52 static TCGv env_res_val;
54 #include "exec/gen-icount.h"
56 /* This is the state at translation time. */
57 typedef struct DisasContext {
58 MicroBlazeCPU *cpu;
59 target_ulong pc;
61 /* Decoder. */
62 int type_b;
63 uint32_t ir;
64 uint8_t opcode;
65 uint8_t rd, ra, rb;
66 uint16_t imm;
68 unsigned int cpustate_changed;
69 unsigned int delayed_branch;
70 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
71 unsigned int clear_imm;
72 int is_jmp;
74 #define JMP_NOJMP 0
75 #define JMP_DIRECT 1
76 #define JMP_DIRECT_CC 2
77 #define JMP_INDIRECT 3
78 unsigned int jmp;
79 uint32_t jmp_pc;
81 int abort_at_next_insn;
82 int nr_nops;
83 struct TranslationBlock *tb;
84 int singlestep_enabled;
85 } DisasContext;
87 static const char *regnames[] =
89 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
90 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
91 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
92 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
95 static const char *special_regnames[] =
97 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
98 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
99 "sr16", "sr17", "sr18"
102 static inline void t_sync_flags(DisasContext *dc)
104 /* Synch the tb dependent flags between translator and runtime. */
105 if (dc->tb_flags != dc->synced_flags) {
106 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
107 dc->synced_flags = dc->tb_flags;
111 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
113 TCGv_i32 tmp = tcg_const_i32(index);
115 t_sync_flags(dc);
116 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
117 gen_helper_raise_exception(cpu_env, tmp);
118 tcg_temp_free_i32(tmp);
119 dc->is_jmp = DISAS_UPDATE;
122 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
124 TranslationBlock *tb;
125 tb = dc->tb;
126 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
127 tcg_gen_goto_tb(n);
128 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
129 tcg_gen_exit_tb((uintptr_t)tb + n);
130 } else {
131 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
132 tcg_gen_exit_tb(0);
136 static void read_carry(DisasContext *dc, TCGv d)
138 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
142 * write_carry sets the carry bits in MSR based on bit 0 of v.
143 * v[31:1] are ignored.
145 static void write_carry(DisasContext *dc, TCGv v)
147 TCGv t0 = tcg_temp_new();
148 tcg_gen_shli_tl(t0, v, 31);
149 tcg_gen_sari_tl(t0, t0, 31);
150 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
151 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
152 ~(MSR_C | MSR_CC));
153 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
154 tcg_temp_free(t0);
157 static void write_carryi(DisasContext *dc, bool carry)
159 TCGv t0 = tcg_temp_new();
160 tcg_gen_movi_tl(t0, carry);
161 write_carry(dc, t0);
162 tcg_temp_free(t0);
165 /* True if ALU operand b is a small immediate that may deserve
166 faster treatment. */
167 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
169 /* Immediate insn without the imm prefix ? */
170 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
173 static inline TCGv *dec_alu_op_b(DisasContext *dc)
175 if (dc->type_b) {
176 if (dc->tb_flags & IMM_FLAG)
177 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
178 else
179 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
180 return &env_imm;
181 } else
182 return &cpu_R[dc->rb];
185 static void dec_add(DisasContext *dc)
187 unsigned int k, c;
188 TCGv cf;
190 k = dc->opcode & 4;
191 c = dc->opcode & 2;
193 LOG_DIS("add%s%s%s r%d r%d r%d\n",
194 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
195 dc->rd, dc->ra, dc->rb);
197 /* Take care of the easy cases first. */
198 if (k) {
199 /* k - keep carry, no need to update MSR. */
200 /* If rd == r0, it's a nop. */
201 if (dc->rd) {
202 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
204 if (c) {
205 /* c - Add carry into the result. */
206 cf = tcg_temp_new();
208 read_carry(dc, cf);
209 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
210 tcg_temp_free(cf);
213 return;
216 /* From now on, we can assume k is zero. So we need to update MSR. */
217 /* Extract carry. */
218 cf = tcg_temp_new();
219 if (c) {
220 read_carry(dc, cf);
221 } else {
222 tcg_gen_movi_tl(cf, 0);
225 if (dc->rd) {
226 TCGv ncf = tcg_temp_new();
227 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
228 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
229 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
230 write_carry(dc, ncf);
231 tcg_temp_free(ncf);
232 } else {
233 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
234 write_carry(dc, cf);
236 tcg_temp_free(cf);
239 static void dec_sub(DisasContext *dc)
241 unsigned int u, cmp, k, c;
242 TCGv cf, na;
244 u = dc->imm & 2;
245 k = dc->opcode & 4;
246 c = dc->opcode & 2;
247 cmp = (dc->imm & 1) && (!dc->type_b) && k;
249 if (cmp) {
250 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
251 if (dc->rd) {
252 if (u)
253 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
254 else
255 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
257 return;
260 LOG_DIS("sub%s%s r%d, r%d r%d\n",
261 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
263 /* Take care of the easy cases first. */
264 if (k) {
265 /* k - keep carry, no need to update MSR. */
266 /* If rd == r0, it's a nop. */
267 if (dc->rd) {
268 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
270 if (c) {
271 /* c - Add carry into the result. */
272 cf = tcg_temp_new();
274 read_carry(dc, cf);
275 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
276 tcg_temp_free(cf);
279 return;
282 /* From now on, we can assume k is zero. So we need to update MSR. */
283 /* Extract carry. And complement a into na. */
284 cf = tcg_temp_new();
285 na = tcg_temp_new();
286 if (c) {
287 read_carry(dc, cf);
288 } else {
289 tcg_gen_movi_tl(cf, 1);
292 /* d = b + ~a + c. carry defaults to 1. */
293 tcg_gen_not_tl(na, cpu_R[dc->ra]);
295 if (dc->rd) {
296 TCGv ncf = tcg_temp_new();
297 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
298 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
299 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
300 write_carry(dc, ncf);
301 tcg_temp_free(ncf);
302 } else {
303 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
304 write_carry(dc, cf);
306 tcg_temp_free(cf);
307 tcg_temp_free(na);
310 static void dec_pattern(DisasContext *dc)
312 unsigned int mode;
313 int l1;
315 if ((dc->tb_flags & MSR_EE_FLAG)
316 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
317 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
318 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
319 t_gen_raise_exception(dc, EXCP_HW_EXCP);
322 mode = dc->opcode & 3;
323 switch (mode) {
324 case 0:
325 /* pcmpbf. */
326 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
327 if (dc->rd)
328 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
329 break;
330 case 2:
331 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
332 if (dc->rd) {
333 TCGv t0 = tcg_temp_local_new();
334 l1 = gen_new_label();
335 tcg_gen_movi_tl(t0, 1);
336 tcg_gen_brcond_tl(TCG_COND_EQ,
337 cpu_R[dc->ra], cpu_R[dc->rb], l1);
338 tcg_gen_movi_tl(t0, 0);
339 gen_set_label(l1);
340 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
341 tcg_temp_free(t0);
343 break;
344 case 3:
345 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
346 l1 = gen_new_label();
347 if (dc->rd) {
348 TCGv t0 = tcg_temp_local_new();
349 tcg_gen_movi_tl(t0, 1);
350 tcg_gen_brcond_tl(TCG_COND_NE,
351 cpu_R[dc->ra], cpu_R[dc->rb], l1);
352 tcg_gen_movi_tl(t0, 0);
353 gen_set_label(l1);
354 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
355 tcg_temp_free(t0);
357 break;
358 default:
359 cpu_abort(CPU(dc->cpu),
360 "unsupported pattern insn opcode=%x\n", dc->opcode);
361 break;
365 static void dec_and(DisasContext *dc)
367 unsigned int not;
369 if (!dc->type_b && (dc->imm & (1 << 10))) {
370 dec_pattern(dc);
371 return;
374 not = dc->opcode & (1 << 1);
375 LOG_DIS("and%s\n", not ? "n" : "");
377 if (!dc->rd)
378 return;
380 if (not) {
381 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
382 } else
383 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
386 static void dec_or(DisasContext *dc)
388 if (!dc->type_b && (dc->imm & (1 << 10))) {
389 dec_pattern(dc);
390 return;
393 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
394 if (dc->rd)
395 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
398 static void dec_xor(DisasContext *dc)
400 if (!dc->type_b && (dc->imm & (1 << 10))) {
401 dec_pattern(dc);
402 return;
405 LOG_DIS("xor r%d\n", dc->rd);
406 if (dc->rd)
407 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
410 static inline void msr_read(DisasContext *dc, TCGv d)
412 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
415 static inline void msr_write(DisasContext *dc, TCGv v)
417 TCGv t;
419 t = tcg_temp_new();
420 dc->cpustate_changed = 1;
421 /* PVR bit is not writable. */
422 tcg_gen_andi_tl(t, v, ~MSR_PVR);
423 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
424 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
425 tcg_temp_free(t);
428 static void dec_msr(DisasContext *dc)
430 CPUState *cs = CPU(dc->cpu);
431 TCGv t0, t1;
432 unsigned int sr, to, rn;
433 int mem_index = cpu_mmu_index(&dc->cpu->env);
435 sr = dc->imm & ((1 << 14) - 1);
436 to = dc->imm & (1 << 14);
437 dc->type_b = 1;
438 if (to)
439 dc->cpustate_changed = 1;
441 /* msrclr and msrset. */
442 if (!(dc->imm & (1 << 15))) {
443 unsigned int clr = dc->ir & (1 << 16);
445 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
446 dc->rd, dc->imm);
448 if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
449 /* nop??? */
450 return;
453 if ((dc->tb_flags & MSR_EE_FLAG)
454 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
455 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
456 t_gen_raise_exception(dc, EXCP_HW_EXCP);
457 return;
460 if (dc->rd)
461 msr_read(dc, cpu_R[dc->rd]);
463 t0 = tcg_temp_new();
464 t1 = tcg_temp_new();
465 msr_read(dc, t0);
466 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
468 if (clr) {
469 tcg_gen_not_tl(t1, t1);
470 tcg_gen_and_tl(t0, t0, t1);
471 } else
472 tcg_gen_or_tl(t0, t0, t1);
473 msr_write(dc, t0);
474 tcg_temp_free(t0);
475 tcg_temp_free(t1);
476 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
477 dc->is_jmp = DISAS_UPDATE;
478 return;
481 if (to) {
482 if ((dc->tb_flags & MSR_EE_FLAG)
483 && mem_index == MMU_USER_IDX) {
484 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
485 t_gen_raise_exception(dc, EXCP_HW_EXCP);
486 return;
490 #if !defined(CONFIG_USER_ONLY)
491 /* Catch read/writes to the mmu block. */
492 if ((sr & ~0xff) == 0x1000) {
493 sr &= 7;
494 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
495 if (to)
496 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
497 else
498 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
499 return;
501 #endif
503 if (to) {
504 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
505 switch (sr) {
506 case 0:
507 break;
508 case 1:
509 msr_write(dc, cpu_R[dc->ra]);
510 break;
511 case 0x3:
512 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
513 break;
514 case 0x5:
515 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
516 break;
517 case 0x7:
518 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
519 break;
520 case 0x800:
521 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
522 break;
523 case 0x802:
524 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
525 break;
526 default:
527 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
528 break;
530 } else {
531 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
533 switch (sr) {
534 case 0:
535 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
536 break;
537 case 1:
538 msr_read(dc, cpu_R[dc->rd]);
539 break;
540 case 0x3:
541 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
542 break;
543 case 0x5:
544 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
545 break;
546 case 0x7:
547 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
548 break;
549 case 0xb:
550 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
551 break;
552 case 0x800:
553 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
554 break;
555 case 0x802:
556 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
557 break;
558 case 0x2000:
559 case 0x2001:
560 case 0x2002:
561 case 0x2003:
562 case 0x2004:
563 case 0x2005:
564 case 0x2006:
565 case 0x2007:
566 case 0x2008:
567 case 0x2009:
568 case 0x200a:
569 case 0x200b:
570 case 0x200c:
571 rn = sr & 0xf;
572 tcg_gen_ld_tl(cpu_R[dc->rd],
573 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
574 break;
575 default:
576 cpu_abort(cs, "unknown mfs reg %x\n", sr);
577 break;
581 if (dc->rd == 0) {
582 tcg_gen_movi_tl(cpu_R[0], 0);
586 /* 64-bit signed mul, lower result in d and upper in d2. */
587 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
589 TCGv_i64 t0, t1;
591 t0 = tcg_temp_new_i64();
592 t1 = tcg_temp_new_i64();
594 tcg_gen_ext_i32_i64(t0, a);
595 tcg_gen_ext_i32_i64(t1, b);
596 tcg_gen_mul_i64(t0, t0, t1);
598 tcg_gen_trunc_i64_i32(d, t0);
599 tcg_gen_shri_i64(t0, t0, 32);
600 tcg_gen_trunc_i64_i32(d2, t0);
602 tcg_temp_free_i64(t0);
603 tcg_temp_free_i64(t1);
606 /* 64-bit unsigned muls, lower result in d and upper in d2. */
607 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
609 TCGv_i64 t0, t1;
611 t0 = tcg_temp_new_i64();
612 t1 = tcg_temp_new_i64();
614 tcg_gen_extu_i32_i64(t0, a);
615 tcg_gen_extu_i32_i64(t1, b);
616 tcg_gen_mul_i64(t0, t0, t1);
618 tcg_gen_trunc_i64_i32(d, t0);
619 tcg_gen_shri_i64(t0, t0, 32);
620 tcg_gen_trunc_i64_i32(d2, t0);
622 tcg_temp_free_i64(t0);
623 tcg_temp_free_i64(t1);
626 /* Multiplier unit. */
627 static void dec_mul(DisasContext *dc)
629 TCGv d[2];
630 unsigned int subcode;
632 if ((dc->tb_flags & MSR_EE_FLAG)
633 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
634 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
635 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
636 t_gen_raise_exception(dc, EXCP_HW_EXCP);
637 return;
640 subcode = dc->imm & 3;
641 d[0] = tcg_temp_new();
642 d[1] = tcg_temp_new();
644 if (dc->type_b) {
645 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
646 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
647 goto done;
650 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
651 if (subcode >= 1 && subcode <= 3
652 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
653 /* nop??? */
656 switch (subcode) {
657 case 0:
658 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
659 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
660 break;
661 case 1:
662 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
663 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
664 break;
665 case 2:
666 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
667 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
668 break;
669 case 3:
670 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
671 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
672 break;
673 default:
674 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
675 break;
677 done:
678 tcg_temp_free(d[0]);
679 tcg_temp_free(d[1]);
682 /* Div unit. */
683 static void dec_div(DisasContext *dc)
685 unsigned int u;
687 u = dc->imm & 2;
688 LOG_DIS("div\n");
690 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
691 && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
692 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
693 t_gen_raise_exception(dc, EXCP_HW_EXCP);
696 if (u)
697 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
698 cpu_R[dc->ra]);
699 else
700 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
701 cpu_R[dc->ra]);
702 if (!dc->rd)
703 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
706 static void dec_barrel(DisasContext *dc)
708 TCGv t0;
709 unsigned int s, t;
711 if ((dc->tb_flags & MSR_EE_FLAG)
712 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
713 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
714 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
715 t_gen_raise_exception(dc, EXCP_HW_EXCP);
716 return;
719 s = dc->imm & (1 << 10);
720 t = dc->imm & (1 << 9);
722 LOG_DIS("bs%s%s r%d r%d r%d\n",
723 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
725 t0 = tcg_temp_new();
727 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
728 tcg_gen_andi_tl(t0, t0, 31);
730 if (s)
731 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
732 else {
733 if (t)
734 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
735 else
736 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
740 static void dec_bit(DisasContext *dc)
742 CPUState *cs = CPU(dc->cpu);
743 TCGv t0;
744 unsigned int op;
745 int mem_index = cpu_mmu_index(&dc->cpu->env);
747 op = dc->ir & ((1 << 9) - 1);
748 switch (op) {
749 case 0x21:
750 /* src. */
751 t0 = tcg_temp_new();
753 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
754 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
755 write_carry(dc, cpu_R[dc->ra]);
756 if (dc->rd) {
757 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
758 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
760 tcg_temp_free(t0);
761 break;
763 case 0x1:
764 case 0x41:
765 /* srl. */
766 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
768 /* Update carry. Note that write carry only looks at the LSB. */
769 write_carry(dc, cpu_R[dc->ra]);
770 if (dc->rd) {
771 if (op == 0x41)
772 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
773 else
774 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
776 break;
777 case 0x60:
778 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
779 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
780 break;
781 case 0x61:
782 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
783 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
784 break;
785 case 0x64:
786 case 0x66:
787 case 0x74:
788 case 0x76:
789 /* wdc. */
790 LOG_DIS("wdc r%d\n", dc->ra);
791 if ((dc->tb_flags & MSR_EE_FLAG)
792 && mem_index == MMU_USER_IDX) {
793 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
794 t_gen_raise_exception(dc, EXCP_HW_EXCP);
795 return;
797 break;
798 case 0x68:
799 /* wic. */
800 LOG_DIS("wic r%d\n", dc->ra);
801 if ((dc->tb_flags & MSR_EE_FLAG)
802 && mem_index == MMU_USER_IDX) {
803 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
804 t_gen_raise_exception(dc, EXCP_HW_EXCP);
805 return;
807 break;
808 case 0xe0:
809 if ((dc->tb_flags & MSR_EE_FLAG)
810 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
811 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
812 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
813 t_gen_raise_exception(dc, EXCP_HW_EXCP);
815 if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
816 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
818 break;
819 case 0x1e0:
820 /* swapb */
821 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
822 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
823 break;
824 case 0x1e2:
825 /*swaph */
826 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
827 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
828 break;
829 default:
830 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
831 dc->pc, op, dc->rd, dc->ra, dc->rb);
832 break;
836 static inline void sync_jmpstate(DisasContext *dc)
838 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
839 if (dc->jmp == JMP_DIRECT) {
840 tcg_gen_movi_tl(env_btaken, 1);
842 dc->jmp = JMP_INDIRECT;
843 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
847 static void dec_imm(DisasContext *dc)
849 LOG_DIS("imm %x\n", dc->imm << 16);
850 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
851 dc->tb_flags |= IMM_FLAG;
852 dc->clear_imm = 0;
855 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
857 unsigned int extimm = dc->tb_flags & IMM_FLAG;
858 /* Should be set to one if r1 is used by loadstores. */
859 int stackprot = 0;
861 /* All load/stores use ra. */
862 if (dc->ra == 1) {
863 stackprot = 1;
866 /* Treat the common cases first. */
867 if (!dc->type_b) {
868 /* If any of the regs is r0, return a ptr to the other. */
869 if (dc->ra == 0) {
870 return &cpu_R[dc->rb];
871 } else if (dc->rb == 0) {
872 return &cpu_R[dc->ra];
875 if (dc->rb == 1) {
876 stackprot = 1;
879 *t = tcg_temp_new();
880 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
882 if (stackprot) {
883 gen_helper_stackprot(cpu_env, *t);
885 return t;
887 /* Immediate. */
888 if (!extimm) {
889 if (dc->imm == 0) {
890 return &cpu_R[dc->ra];
892 *t = tcg_temp_new();
893 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
894 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
895 } else {
896 *t = tcg_temp_new();
897 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
900 if (stackprot) {
901 gen_helper_stackprot(cpu_env, *t);
903 return t;
906 static void dec_load(DisasContext *dc)
908 TCGv t, v, *addr;
909 unsigned int size, rev = 0, ex = 0;
910 TCGMemOp mop;
912 mop = dc->opcode & 3;
913 size = 1 << mop;
914 if (!dc->type_b) {
915 rev = (dc->ir >> 9) & 1;
916 ex = (dc->ir >> 10) & 1;
918 mop |= MO_TE;
919 if (rev) {
920 mop ^= MO_BSWAP;
923 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
924 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
925 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
926 t_gen_raise_exception(dc, EXCP_HW_EXCP);
927 return;
930 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
931 ex ? "x" : "");
933 t_sync_flags(dc);
934 addr = compute_ldst_addr(dc, &t);
937 * When doing reverse accesses we need to do two things.
939 * 1. Reverse the address wrt endianness.
940 * 2. Byteswap the data lanes on the way back into the CPU core.
942 if (rev && size != 4) {
943 /* Endian reverse the address. t is addr. */
944 switch (size) {
945 case 1:
947 /* 00 -> 11
948 01 -> 10
949 10 -> 10
950 11 -> 00 */
951 TCGv low = tcg_temp_new();
953 /* Force addr into the temp. */
954 if (addr != &t) {
955 t = tcg_temp_new();
956 tcg_gen_mov_tl(t, *addr);
957 addr = &t;
960 tcg_gen_andi_tl(low, t, 3);
961 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
962 tcg_gen_andi_tl(t, t, ~3);
963 tcg_gen_or_tl(t, t, low);
964 tcg_gen_mov_tl(env_imm, t);
965 tcg_temp_free(low);
966 break;
969 case 2:
970 /* 00 -> 10
971 10 -> 00. */
972 /* Force addr into the temp. */
973 if (addr != &t) {
974 t = tcg_temp_new();
975 tcg_gen_xori_tl(t, *addr, 2);
976 addr = &t;
977 } else {
978 tcg_gen_xori_tl(t, t, 2);
980 break;
981 default:
982 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
983 break;
987 /* lwx does not throw unaligned access errors, so force alignment */
988 if (ex) {
989 /* Force addr into the temp. */
990 if (addr != &t) {
991 t = tcg_temp_new();
992 tcg_gen_mov_tl(t, *addr);
993 addr = &t;
995 tcg_gen_andi_tl(t, t, ~3);
998 /* If we get a fault on a dslot, the jmpstate better be in sync. */
999 sync_jmpstate(dc);
1001 /* Verify alignment if needed. */
1003 * Microblaze gives MMU faults priority over faults due to
1004 * unaligned addresses. That's why we speculatively do the load
1005 * into v. If the load succeeds, we verify alignment of the
1006 * address and if that succeeds we write into the destination reg.
1008 v = tcg_temp_new();
1009 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
1011 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1012 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1013 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1014 tcg_const_tl(0), tcg_const_tl(size - 1));
1017 if (ex) {
1018 tcg_gen_mov_tl(env_res_addr, *addr);
1019 tcg_gen_mov_tl(env_res_val, v);
1021 if (dc->rd) {
1022 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1024 tcg_temp_free(v);
1026 if (ex) { /* lwx */
1027 /* no support for for AXI exclusive so always clear C */
1028 write_carryi(dc, 0);
1031 if (addr == &t)
1032 tcg_temp_free(t);
1035 static void dec_store(DisasContext *dc)
1037 TCGv t, *addr, swx_addr;
1038 int swx_skip = 0;
1039 unsigned int size, rev = 0, ex = 0;
1040 TCGMemOp mop;
1042 mop = dc->opcode & 3;
1043 size = 1 << mop;
1044 if (!dc->type_b) {
1045 rev = (dc->ir >> 9) & 1;
1046 ex = (dc->ir >> 10) & 1;
1048 mop |= MO_TE;
1049 if (rev) {
1050 mop ^= MO_BSWAP;
1053 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1054 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1055 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1056 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1057 return;
1060 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1061 ex ? "x" : "");
1062 t_sync_flags(dc);
1063 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1064 sync_jmpstate(dc);
1065 addr = compute_ldst_addr(dc, &t);
1067 swx_addr = tcg_temp_local_new();
1068 if (ex) { /* swx */
1069 TCGv tval;
1071 /* Force addr into the swx_addr. */
1072 tcg_gen_mov_tl(swx_addr, *addr);
1073 addr = &swx_addr;
1074 /* swx does not throw unaligned access errors, so force alignment */
1075 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1077 write_carryi(dc, 1);
1078 swx_skip = gen_new_label();
1079 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1081 /* Compare the value loaded at lwx with current contents of
1082 the reserved location.
1083 FIXME: This only works for system emulation where we can expect
1084 this compare and the following write to be atomic. For user
1085 emulation we need to add atomicity between threads. */
1086 tval = tcg_temp_new();
1087 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
1088 MO_TEUL);
1089 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1090 write_carryi(dc, 0);
1091 tcg_temp_free(tval);
1094 if (rev && size != 4) {
1095 /* Endian reverse the address. t is addr. */
1096 switch (size) {
1097 case 1:
1099 /* 00 -> 11
1100 01 -> 10
1101 10 -> 10
1102 11 -> 00 */
1103 TCGv low = tcg_temp_new();
1105 /* Force addr into the temp. */
1106 if (addr != &t) {
1107 t = tcg_temp_new();
1108 tcg_gen_mov_tl(t, *addr);
1109 addr = &t;
1112 tcg_gen_andi_tl(low, t, 3);
1113 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1114 tcg_gen_andi_tl(t, t, ~3);
1115 tcg_gen_or_tl(t, t, low);
1116 tcg_gen_mov_tl(env_imm, t);
1117 tcg_temp_free(low);
1118 break;
1121 case 2:
1122 /* 00 -> 10
1123 10 -> 00. */
1124 /* Force addr into the temp. */
1125 if (addr != &t) {
1126 t = tcg_temp_new();
1127 tcg_gen_xori_tl(t, *addr, 2);
1128 addr = &t;
1129 } else {
1130 tcg_gen_xori_tl(t, t, 2);
1132 break;
1133 default:
1134 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1135 break;
1138 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
1140 /* Verify alignment if needed. */
1141 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1142 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1143 /* FIXME: if the alignment is wrong, we should restore the value
1144 * in memory. One possible way to achieve this is to probe
1145 * the MMU prior to the memaccess, thay way we could put
1146 * the alignment checks in between the probe and the mem
1147 * access.
1149 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1150 tcg_const_tl(1), tcg_const_tl(size - 1));
1153 if (ex) {
1154 gen_set_label(swx_skip);
1156 tcg_temp_free(swx_addr);
1158 if (addr == &t)
1159 tcg_temp_free(t);
1162 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1163 TCGv d, TCGv a, TCGv b)
1165 switch (cc) {
1166 case CC_EQ:
1167 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1168 break;
1169 case CC_NE:
1170 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1171 break;
1172 case CC_LT:
1173 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1174 break;
1175 case CC_LE:
1176 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1177 break;
1178 case CC_GE:
1179 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1180 break;
1181 case CC_GT:
1182 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1183 break;
1184 default:
1185 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1186 break;
1190 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1192 int l1;
1194 l1 = gen_new_label();
1195 /* Conditional jmp. */
1196 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1197 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1198 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1199 gen_set_label(l1);
1202 static void dec_bcc(DisasContext *dc)
1204 unsigned int cc;
1205 unsigned int dslot;
1207 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1208 dslot = dc->ir & (1 << 25);
1209 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1211 dc->delayed_branch = 1;
1212 if (dslot) {
1213 dc->delayed_branch = 2;
1214 dc->tb_flags |= D_FLAG;
1215 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1216 cpu_env, offsetof(CPUMBState, bimm));
1219 if (dec_alu_op_b_is_small_imm(dc)) {
1220 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1222 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1223 dc->jmp = JMP_DIRECT_CC;
1224 dc->jmp_pc = dc->pc + offset;
1225 } else {
1226 dc->jmp = JMP_INDIRECT;
1227 tcg_gen_movi_tl(env_btarget, dc->pc);
1228 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1230 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1233 static void dec_br(DisasContext *dc)
1235 unsigned int dslot, link, abs, mbar;
1236 int mem_index = cpu_mmu_index(&dc->cpu->env);
1238 dslot = dc->ir & (1 << 20);
1239 abs = dc->ir & (1 << 19);
1240 link = dc->ir & (1 << 18);
1242 /* Memory barrier. */
1243 mbar = (dc->ir >> 16) & 31;
1244 if (mbar == 2 && dc->imm == 4) {
1245 /* mbar IMM & 16 decodes to sleep. */
1246 if (dc->rd & 16) {
1247 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1248 TCGv_i32 tmp_1 = tcg_const_i32(1);
1250 LOG_DIS("sleep\n");
1252 t_sync_flags(dc);
1253 tcg_gen_st_i32(tmp_1, cpu_env,
1254 -offsetof(MicroBlazeCPU, env)
1255 +offsetof(CPUState, halted));
1256 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1257 gen_helper_raise_exception(cpu_env, tmp_hlt);
1258 tcg_temp_free_i32(tmp_hlt);
1259 tcg_temp_free_i32(tmp_1);
1260 return;
1262 LOG_DIS("mbar %d\n", dc->rd);
1263 /* Break the TB. */
1264 dc->cpustate_changed = 1;
1265 return;
1268 LOG_DIS("br%s%s%s%s imm=%x\n",
1269 abs ? "a" : "", link ? "l" : "",
1270 dc->type_b ? "i" : "", dslot ? "d" : "",
1271 dc->imm);
1273 dc->delayed_branch = 1;
1274 if (dslot) {
1275 dc->delayed_branch = 2;
1276 dc->tb_flags |= D_FLAG;
1277 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1278 cpu_env, offsetof(CPUMBState, bimm));
1280 if (link && dc->rd)
1281 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1283 dc->jmp = JMP_INDIRECT;
1284 if (abs) {
1285 tcg_gen_movi_tl(env_btaken, 1);
1286 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1287 if (link && !dslot) {
1288 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1289 t_gen_raise_exception(dc, EXCP_BREAK);
1290 if (dc->imm == 0) {
1291 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1292 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1293 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1294 return;
1297 t_gen_raise_exception(dc, EXCP_DEBUG);
1300 } else {
1301 if (dec_alu_op_b_is_small_imm(dc)) {
1302 dc->jmp = JMP_DIRECT;
1303 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1304 } else {
1305 tcg_gen_movi_tl(env_btaken, 1);
1306 tcg_gen_movi_tl(env_btarget, dc->pc);
1307 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1312 static inline void do_rti(DisasContext *dc)
1314 TCGv t0, t1;
1315 t0 = tcg_temp_new();
1316 t1 = tcg_temp_new();
1317 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1318 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1319 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1321 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1322 tcg_gen_or_tl(t1, t1, t0);
1323 msr_write(dc, t1);
1324 tcg_temp_free(t1);
1325 tcg_temp_free(t0);
1326 dc->tb_flags &= ~DRTI_FLAG;
1329 static inline void do_rtb(DisasContext *dc)
1331 TCGv t0, t1;
1332 t0 = tcg_temp_new();
1333 t1 = tcg_temp_new();
1334 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1335 tcg_gen_shri_tl(t0, t1, 1);
1336 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1338 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1339 tcg_gen_or_tl(t1, t1, t0);
1340 msr_write(dc, t1);
1341 tcg_temp_free(t1);
1342 tcg_temp_free(t0);
1343 dc->tb_flags &= ~DRTB_FLAG;
1346 static inline void do_rte(DisasContext *dc)
1348 TCGv t0, t1;
1349 t0 = tcg_temp_new();
1350 t1 = tcg_temp_new();
1352 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1353 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1354 tcg_gen_shri_tl(t0, t1, 1);
1355 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1357 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1358 tcg_gen_or_tl(t1, t1, t0);
1359 msr_write(dc, t1);
1360 tcg_temp_free(t1);
1361 tcg_temp_free(t0);
1362 dc->tb_flags &= ~DRTE_FLAG;
1365 static void dec_rts(DisasContext *dc)
1367 unsigned int b_bit, i_bit, e_bit;
1368 int mem_index = cpu_mmu_index(&dc->cpu->env);
1370 i_bit = dc->ir & (1 << 21);
1371 b_bit = dc->ir & (1 << 22);
1372 e_bit = dc->ir & (1 << 23);
1374 dc->delayed_branch = 2;
1375 dc->tb_flags |= D_FLAG;
1376 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1377 cpu_env, offsetof(CPUMBState, bimm));
1379 if (i_bit) {
1380 LOG_DIS("rtid ir=%x\n", dc->ir);
1381 if ((dc->tb_flags & MSR_EE_FLAG)
1382 && mem_index == MMU_USER_IDX) {
1383 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1384 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1386 dc->tb_flags |= DRTI_FLAG;
1387 } else if (b_bit) {
1388 LOG_DIS("rtbd ir=%x\n", dc->ir);
1389 if ((dc->tb_flags & MSR_EE_FLAG)
1390 && mem_index == MMU_USER_IDX) {
1391 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1392 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1394 dc->tb_flags |= DRTB_FLAG;
1395 } else if (e_bit) {
1396 LOG_DIS("rted ir=%x\n", dc->ir);
1397 if ((dc->tb_flags & MSR_EE_FLAG)
1398 && mem_index == MMU_USER_IDX) {
1399 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1400 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1402 dc->tb_flags |= DRTE_FLAG;
1403 } else
1404 LOG_DIS("rts ir=%x\n", dc->ir);
1406 dc->jmp = JMP_INDIRECT;
1407 tcg_gen_movi_tl(env_btaken, 1);
1408 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1411 static int dec_check_fpuv2(DisasContext *dc)
1413 int r;
1415 r = dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU2_MASK;
1417 if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1418 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1419 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1421 return r;
1424 static void dec_fpu(DisasContext *dc)
1426 unsigned int fpu_insn;
1428 if ((dc->tb_flags & MSR_EE_FLAG)
1429 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1430 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU_MASK))) {
1431 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1432 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1433 return;
1436 fpu_insn = (dc->ir >> 7) & 7;
1438 switch (fpu_insn) {
1439 case 0:
1440 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1441 cpu_R[dc->rb]);
1442 break;
1444 case 1:
1445 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1446 cpu_R[dc->rb]);
1447 break;
1449 case 2:
1450 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1451 cpu_R[dc->rb]);
1452 break;
1454 case 3:
1455 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1456 cpu_R[dc->rb]);
1457 break;
1459 case 4:
1460 switch ((dc->ir >> 4) & 7) {
1461 case 0:
1462 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1463 cpu_R[dc->ra], cpu_R[dc->rb]);
1464 break;
1465 case 1:
1466 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1467 cpu_R[dc->ra], cpu_R[dc->rb]);
1468 break;
1469 case 2:
1470 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1471 cpu_R[dc->ra], cpu_R[dc->rb]);
1472 break;
1473 case 3:
1474 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1475 cpu_R[dc->ra], cpu_R[dc->rb]);
1476 break;
1477 case 4:
1478 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1479 cpu_R[dc->ra], cpu_R[dc->rb]);
1480 break;
1481 case 5:
1482 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1483 cpu_R[dc->ra], cpu_R[dc->rb]);
1484 break;
1485 case 6:
1486 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1487 cpu_R[dc->ra], cpu_R[dc->rb]);
1488 break;
1489 default:
1490 qemu_log_mask(LOG_UNIMP,
1491 "unimplemented fcmp fpu_insn=%x pc=%x"
1492 " opc=%x\n",
1493 fpu_insn, dc->pc, dc->opcode);
1494 dc->abort_at_next_insn = 1;
1495 break;
1497 break;
1499 case 5:
1500 if (!dec_check_fpuv2(dc)) {
1501 return;
1503 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1504 break;
1506 case 6:
1507 if (!dec_check_fpuv2(dc)) {
1508 return;
1510 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1511 break;
1513 case 7:
1514 if (!dec_check_fpuv2(dc)) {
1515 return;
1517 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1518 break;
1520 default:
1521 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1522 " opc=%x\n",
1523 fpu_insn, dc->pc, dc->opcode);
1524 dc->abort_at_next_insn = 1;
1525 break;
1529 static void dec_null(DisasContext *dc)
1531 if ((dc->tb_flags & MSR_EE_FLAG)
1532 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1533 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1534 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1535 return;
1537 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1538 dc->abort_at_next_insn = 1;
1541 /* Insns connected to FSL or AXI stream attached devices. */
1542 static void dec_stream(DisasContext *dc)
1544 int mem_index = cpu_mmu_index(&dc->cpu->env);
1545 TCGv_i32 t_id, t_ctrl;
1546 int ctrl;
1548 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1549 dc->type_b ? "" : "d", dc->imm);
1551 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1552 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1553 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1554 return;
1557 t_id = tcg_temp_new();
1558 if (dc->type_b) {
1559 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1560 ctrl = dc->imm >> 10;
1561 } else {
1562 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1563 ctrl = dc->imm >> 5;
1566 t_ctrl = tcg_const_tl(ctrl);
1568 if (dc->rd == 0) {
1569 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1570 } else {
1571 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1573 tcg_temp_free(t_id);
1574 tcg_temp_free(t_ctrl);
1577 static struct decoder_info {
1578 struct {
1579 uint32_t bits;
1580 uint32_t mask;
1582 void (*dec)(DisasContext *dc);
1583 } decinfo[] = {
1584 {DEC_ADD, dec_add},
1585 {DEC_SUB, dec_sub},
1586 {DEC_AND, dec_and},
1587 {DEC_XOR, dec_xor},
1588 {DEC_OR, dec_or},
1589 {DEC_BIT, dec_bit},
1590 {DEC_BARREL, dec_barrel},
1591 {DEC_LD, dec_load},
1592 {DEC_ST, dec_store},
1593 {DEC_IMM, dec_imm},
1594 {DEC_BR, dec_br},
1595 {DEC_BCC, dec_bcc},
1596 {DEC_RTS, dec_rts},
1597 {DEC_FPU, dec_fpu},
1598 {DEC_MUL, dec_mul},
1599 {DEC_DIV, dec_div},
1600 {DEC_MSR, dec_msr},
1601 {DEC_STREAM, dec_stream},
1602 {{0, 0}, dec_null}
1605 static inline void decode(DisasContext *dc, uint32_t ir)
1607 int i;
1609 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1610 tcg_gen_debug_insn_start(dc->pc);
1613 dc->ir = ir;
1614 LOG_DIS("%8.8x\t", dc->ir);
1616 if (dc->ir)
1617 dc->nr_nops = 0;
1618 else {
1619 if ((dc->tb_flags & MSR_EE_FLAG)
1620 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1621 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1622 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1623 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1624 return;
1627 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1628 dc->nr_nops++;
1629 if (dc->nr_nops > 4) {
1630 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1633 /* bit 2 seems to indicate insn type. */
1634 dc->type_b = ir & (1 << 29);
1636 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1637 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1638 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1639 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1640 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1642 /* Large switch for all insns. */
1643 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1644 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1645 decinfo[i].dec(dc);
1646 break;
1651 static void check_breakpoint(CPUMBState *env, DisasContext *dc)
1653 CPUState *cs = CPU(mb_env_get_cpu(env));
1654 CPUBreakpoint *bp;
1656 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
1657 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1658 if (bp->pc == dc->pc) {
1659 t_gen_raise_exception(dc, EXCP_DEBUG);
1660 dc->is_jmp = DISAS_UPDATE;
1666 /* generate intermediate code for basic block 'tb'. */
1667 static inline void
1668 gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
1669 bool search_pc)
1671 CPUState *cs = CPU(cpu);
1672 CPUMBState *env = &cpu->env;
1673 uint16_t *gen_opc_end;
1674 uint32_t pc_start;
1675 int j, lj;
1676 struct DisasContext ctx;
1677 struct DisasContext *dc = &ctx;
1678 uint32_t next_page_start, org_flags;
1679 target_ulong npc;
1680 int num_insns;
1681 int max_insns;
1683 pc_start = tb->pc;
1684 dc->cpu = cpu;
1685 dc->tb = tb;
1686 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1688 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1690 dc->is_jmp = DISAS_NEXT;
1691 dc->jmp = 0;
1692 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1693 if (dc->delayed_branch) {
1694 dc->jmp = JMP_INDIRECT;
1696 dc->pc = pc_start;
1697 dc->singlestep_enabled = cs->singlestep_enabled;
1698 dc->cpustate_changed = 0;
1699 dc->abort_at_next_insn = 0;
1700 dc->nr_nops = 0;
1702 if (pc_start & 3) {
1703 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1706 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1707 #if !SIM_COMPAT
1708 qemu_log("--------------\n");
1709 log_cpu_state(CPU(cpu), 0);
1710 #endif
1713 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1714 lj = -1;
1715 num_insns = 0;
1716 max_insns = tb->cflags & CF_COUNT_MASK;
1717 if (max_insns == 0)
1718 max_insns = CF_COUNT_MASK;
1720 gen_tb_start();
1723 #if SIM_COMPAT
1724 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1725 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1726 gen_helper_debug();
1728 #endif
1729 check_breakpoint(env, dc);
1731 if (search_pc) {
1732 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1733 if (lj < j) {
1734 lj++;
1735 while (lj < j)
1736 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1738 tcg_ctx.gen_opc_pc[lj] = dc->pc;
1739 tcg_ctx.gen_opc_instr_start[lj] = 1;
1740 tcg_ctx.gen_opc_icount[lj] = num_insns;
1743 /* Pretty disas. */
1744 LOG_DIS("%8.8x:\t", dc->pc);
1746 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1747 gen_io_start();
1749 dc->clear_imm = 1;
1750 decode(dc, cpu_ldl_code(env, dc->pc));
1751 if (dc->clear_imm)
1752 dc->tb_flags &= ~IMM_FLAG;
1753 dc->pc += 4;
1754 num_insns++;
1756 if (dc->delayed_branch) {
1757 dc->delayed_branch--;
1758 if (!dc->delayed_branch) {
1759 if (dc->tb_flags & DRTI_FLAG)
1760 do_rti(dc);
1761 if (dc->tb_flags & DRTB_FLAG)
1762 do_rtb(dc);
1763 if (dc->tb_flags & DRTE_FLAG)
1764 do_rte(dc);
1765 /* Clear the delay slot flag. */
1766 dc->tb_flags &= ~D_FLAG;
1767 /* If it is a direct jump, try direct chaining. */
1768 if (dc->jmp == JMP_INDIRECT) {
1769 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1770 dc->is_jmp = DISAS_JUMP;
1771 } else if (dc->jmp == JMP_DIRECT) {
1772 t_sync_flags(dc);
1773 gen_goto_tb(dc, 0, dc->jmp_pc);
1774 dc->is_jmp = DISAS_TB_JUMP;
1775 } else if (dc->jmp == JMP_DIRECT_CC) {
1776 int l1;
1778 t_sync_flags(dc);
1779 l1 = gen_new_label();
1780 /* Conditional jmp. */
1781 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1782 gen_goto_tb(dc, 1, dc->pc);
1783 gen_set_label(l1);
1784 gen_goto_tb(dc, 0, dc->jmp_pc);
1786 dc->is_jmp = DISAS_TB_JUMP;
1788 break;
1791 if (cs->singlestep_enabled) {
1792 break;
1794 } while (!dc->is_jmp && !dc->cpustate_changed
1795 && tcg_ctx.gen_opc_ptr < gen_opc_end
1796 && !singlestep
1797 && (dc->pc < next_page_start)
1798 && num_insns < max_insns);
1800 npc = dc->pc;
1801 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1802 if (dc->tb_flags & D_FLAG) {
1803 dc->is_jmp = DISAS_UPDATE;
1804 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1805 sync_jmpstate(dc);
1806 } else
1807 npc = dc->jmp_pc;
1810 if (tb->cflags & CF_LAST_IO)
1811 gen_io_end();
1812 /* Force an update if the per-tb cpu state has changed. */
1813 if (dc->is_jmp == DISAS_NEXT
1814 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1815 dc->is_jmp = DISAS_UPDATE;
1816 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1818 t_sync_flags(dc);
1820 if (unlikely(cs->singlestep_enabled)) {
1821 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1823 if (dc->is_jmp != DISAS_JUMP) {
1824 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1826 gen_helper_raise_exception(cpu_env, tmp);
1827 tcg_temp_free_i32(tmp);
1828 } else {
1829 switch(dc->is_jmp) {
1830 case DISAS_NEXT:
1831 gen_goto_tb(dc, 1, npc);
1832 break;
1833 default:
1834 case DISAS_JUMP:
1835 case DISAS_UPDATE:
1836 /* indicate that the hash table must be used
1837 to find the next TB */
1838 tcg_gen_exit_tb(0);
1839 break;
1840 case DISAS_TB_JUMP:
1841 /* nothing more to generate */
1842 break;
1845 gen_tb_end(tb, num_insns);
1846 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1847 if (search_pc) {
1848 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1849 lj++;
1850 while (lj <= j)
1851 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1852 } else {
1853 tb->size = dc->pc - pc_start;
1854 tb->icount = num_insns;
1857 #ifdef DEBUG_DISAS
1858 #if !SIM_COMPAT
1859 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1860 qemu_log("\n");
1861 #if DISAS_GNU
1862 log_target_disas(env, pc_start, dc->pc - pc_start, 0);
1863 #endif
1864 qemu_log("\nisize=%d osize=%td\n",
1865 dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
1866 tcg_ctx.gen_opc_buf);
1868 #endif
1869 #endif
1870 assert(!dc->abort_at_next_insn);
1873 void gen_intermediate_code (CPUMBState *env, struct TranslationBlock *tb)
1875 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, false);
1878 void gen_intermediate_code_pc (CPUMBState *env, struct TranslationBlock *tb)
1880 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, true);
1883 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1884 int flags)
1886 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1887 CPUMBState *env = &cpu->env;
1888 int i;
1890 if (!env || !f)
1891 return;
1893 cpu_fprintf(f, "IN: PC=%x %s\n",
1894 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1895 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1896 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1897 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1898 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1899 env->btaken, env->btarget,
1900 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1901 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1902 (env->sregs[SR_MSR] & MSR_EIP),
1903 (env->sregs[SR_MSR] & MSR_IE));
1905 for (i = 0; i < 32; i++) {
1906 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1907 if ((i + 1) % 4 == 0)
1908 cpu_fprintf(f, "\n");
1910 cpu_fprintf(f, "\n\n");
1913 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1915 MicroBlazeCPU *cpu;
1917 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1919 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1921 return cpu;
1924 void mb_tcg_init(void)
1926 int i;
1928 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1930 env_debug = tcg_global_mem_new(TCG_AREG0,
1931 offsetof(CPUMBState, debug),
1932 "debug0");
1933 env_iflags = tcg_global_mem_new(TCG_AREG0,
1934 offsetof(CPUMBState, iflags),
1935 "iflags");
1936 env_imm = tcg_global_mem_new(TCG_AREG0,
1937 offsetof(CPUMBState, imm),
1938 "imm");
1939 env_btarget = tcg_global_mem_new(TCG_AREG0,
1940 offsetof(CPUMBState, btarget),
1941 "btarget");
1942 env_btaken = tcg_global_mem_new(TCG_AREG0,
1943 offsetof(CPUMBState, btaken),
1944 "btaken");
1945 env_res_addr = tcg_global_mem_new(TCG_AREG0,
1946 offsetof(CPUMBState, res_addr),
1947 "res_addr");
1948 env_res_val = tcg_global_mem_new(TCG_AREG0,
1949 offsetof(CPUMBState, res_val),
1950 "res_val");
1951 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1952 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1953 offsetof(CPUMBState, regs[i]),
1954 regnames[i]);
1956 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1957 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
1958 offsetof(CPUMBState, sregs[i]),
1959 special_regnames[i]);
1963 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
1965 env->sregs[SR_PC] = tcg_ctx.gen_opc_pc[pc_pos];