Merge remote-tracking branch 'remotes/kraxel/tags/pull-gtk-20141121-1' into staging
[qemu-kvm.git] / target-microblaze / translate.c
blobfd2b77164561e183203e93dc670a070ad21f9df5
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "exec/helper-proto.h"
25 #include "microblaze-decode.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-gen.h"
29 #include "trace-tcg.h"
32 #define SIM_COMPAT 0
33 #define DISAS_GNU 1
34 #define DISAS_MB 1
35 #if DISAS_MB && !SIM_COMPAT
36 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
37 #else
38 # define LOG_DIS(...) do { } while (0)
39 #endif
41 #define D(x)
43 #define EXTRACT_FIELD(src, start, end) \
44 (((src) >> start) & ((1 << (end - start + 1)) - 1))
46 static TCGv env_debug;
47 static TCGv_ptr cpu_env;
48 static TCGv cpu_R[32];
49 static TCGv cpu_SR[18];
50 static TCGv env_imm;
51 static TCGv env_btaken;
52 static TCGv env_btarget;
53 static TCGv env_iflags;
54 static TCGv env_res_addr;
55 static TCGv env_res_val;
57 #include "exec/gen-icount.h"
59 /* This is the state at translation time. */
60 typedef struct DisasContext {
61 MicroBlazeCPU *cpu;
62 target_ulong pc;
64 /* Decoder. */
65 int type_b;
66 uint32_t ir;
67 uint8_t opcode;
68 uint8_t rd, ra, rb;
69 uint16_t imm;
71 unsigned int cpustate_changed;
72 unsigned int delayed_branch;
73 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
74 unsigned int clear_imm;
75 int is_jmp;
77 #define JMP_NOJMP 0
78 #define JMP_DIRECT 1
79 #define JMP_DIRECT_CC 2
80 #define JMP_INDIRECT 3
81 unsigned int jmp;
82 uint32_t jmp_pc;
84 int abort_at_next_insn;
85 int nr_nops;
86 struct TranslationBlock *tb;
87 int singlestep_enabled;
88 } DisasContext;
90 static const char *regnames[] =
92 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
93 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
94 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
95 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
98 static const char *special_regnames[] =
100 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
101 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
102 "sr16", "sr17", "sr18"
105 static inline void t_sync_flags(DisasContext *dc)
107 /* Synch the tb dependent flags between translator and runtime. */
108 if (dc->tb_flags != dc->synced_flags) {
109 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
110 dc->synced_flags = dc->tb_flags;
114 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
116 TCGv_i32 tmp = tcg_const_i32(index);
118 t_sync_flags(dc);
119 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
120 gen_helper_raise_exception(cpu_env, tmp);
121 tcg_temp_free_i32(tmp);
122 dc->is_jmp = DISAS_UPDATE;
125 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
127 TranslationBlock *tb;
128 tb = dc->tb;
129 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
130 tcg_gen_goto_tb(n);
131 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
132 tcg_gen_exit_tb((uintptr_t)tb + n);
133 } else {
134 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
135 tcg_gen_exit_tb(0);
139 static void read_carry(DisasContext *dc, TCGv d)
141 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
145 * write_carry sets the carry bits in MSR based on bit 0 of v.
146 * v[31:1] are ignored.
148 static void write_carry(DisasContext *dc, TCGv v)
150 TCGv t0 = tcg_temp_new();
151 tcg_gen_shli_tl(t0, v, 31);
152 tcg_gen_sari_tl(t0, t0, 31);
153 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
154 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
155 ~(MSR_C | MSR_CC));
156 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
157 tcg_temp_free(t0);
160 static void write_carryi(DisasContext *dc, bool carry)
162 TCGv t0 = tcg_temp_new();
163 tcg_gen_movi_tl(t0, carry);
164 write_carry(dc, t0);
165 tcg_temp_free(t0);
168 /* True if ALU operand b is a small immediate that may deserve
169 faster treatment. */
170 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
172 /* Immediate insn without the imm prefix ? */
173 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
176 static inline TCGv *dec_alu_op_b(DisasContext *dc)
178 if (dc->type_b) {
179 if (dc->tb_flags & IMM_FLAG)
180 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
181 else
182 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
183 return &env_imm;
184 } else
185 return &cpu_R[dc->rb];
188 static void dec_add(DisasContext *dc)
190 unsigned int k, c;
191 TCGv cf;
193 k = dc->opcode & 4;
194 c = dc->opcode & 2;
196 LOG_DIS("add%s%s%s r%d r%d r%d\n",
197 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
198 dc->rd, dc->ra, dc->rb);
200 /* Take care of the easy cases first. */
201 if (k) {
202 /* k - keep carry, no need to update MSR. */
203 /* If rd == r0, it's a nop. */
204 if (dc->rd) {
205 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
207 if (c) {
208 /* c - Add carry into the result. */
209 cf = tcg_temp_new();
211 read_carry(dc, cf);
212 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
213 tcg_temp_free(cf);
216 return;
219 /* From now on, we can assume k is zero. So we need to update MSR. */
220 /* Extract carry. */
221 cf = tcg_temp_new();
222 if (c) {
223 read_carry(dc, cf);
224 } else {
225 tcg_gen_movi_tl(cf, 0);
228 if (dc->rd) {
229 TCGv ncf = tcg_temp_new();
230 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
231 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
232 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
233 write_carry(dc, ncf);
234 tcg_temp_free(ncf);
235 } else {
236 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
237 write_carry(dc, cf);
239 tcg_temp_free(cf);
242 static void dec_sub(DisasContext *dc)
244 unsigned int u, cmp, k, c;
245 TCGv cf, na;
247 u = dc->imm & 2;
248 k = dc->opcode & 4;
249 c = dc->opcode & 2;
250 cmp = (dc->imm & 1) && (!dc->type_b) && k;
252 if (cmp) {
253 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
254 if (dc->rd) {
255 if (u)
256 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
257 else
258 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
260 return;
263 LOG_DIS("sub%s%s r%d, r%d r%d\n",
264 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
266 /* Take care of the easy cases first. */
267 if (k) {
268 /* k - keep carry, no need to update MSR. */
269 /* If rd == r0, it's a nop. */
270 if (dc->rd) {
271 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
273 if (c) {
274 /* c - Add carry into the result. */
275 cf = tcg_temp_new();
277 read_carry(dc, cf);
278 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
279 tcg_temp_free(cf);
282 return;
285 /* From now on, we can assume k is zero. So we need to update MSR. */
286 /* Extract carry. And complement a into na. */
287 cf = tcg_temp_new();
288 na = tcg_temp_new();
289 if (c) {
290 read_carry(dc, cf);
291 } else {
292 tcg_gen_movi_tl(cf, 1);
295 /* d = b + ~a + c. carry defaults to 1. */
296 tcg_gen_not_tl(na, cpu_R[dc->ra]);
298 if (dc->rd) {
299 TCGv ncf = tcg_temp_new();
300 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
301 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
302 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
303 write_carry(dc, ncf);
304 tcg_temp_free(ncf);
305 } else {
306 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
307 write_carry(dc, cf);
309 tcg_temp_free(cf);
310 tcg_temp_free(na);
313 static void dec_pattern(DisasContext *dc)
315 unsigned int mode;
316 int l1;
318 if ((dc->tb_flags & MSR_EE_FLAG)
319 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
320 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
321 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
322 t_gen_raise_exception(dc, EXCP_HW_EXCP);
325 mode = dc->opcode & 3;
326 switch (mode) {
327 case 0:
328 /* pcmpbf. */
329 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
330 if (dc->rd)
331 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
332 break;
333 case 2:
334 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
335 if (dc->rd) {
336 TCGv t0 = tcg_temp_local_new();
337 l1 = gen_new_label();
338 tcg_gen_movi_tl(t0, 1);
339 tcg_gen_brcond_tl(TCG_COND_EQ,
340 cpu_R[dc->ra], cpu_R[dc->rb], l1);
341 tcg_gen_movi_tl(t0, 0);
342 gen_set_label(l1);
343 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
344 tcg_temp_free(t0);
346 break;
347 case 3:
348 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
349 l1 = gen_new_label();
350 if (dc->rd) {
351 TCGv t0 = tcg_temp_local_new();
352 tcg_gen_movi_tl(t0, 1);
353 tcg_gen_brcond_tl(TCG_COND_NE,
354 cpu_R[dc->ra], cpu_R[dc->rb], l1);
355 tcg_gen_movi_tl(t0, 0);
356 gen_set_label(l1);
357 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
358 tcg_temp_free(t0);
360 break;
361 default:
362 cpu_abort(CPU(dc->cpu),
363 "unsupported pattern insn opcode=%x\n", dc->opcode);
364 break;
368 static void dec_and(DisasContext *dc)
370 unsigned int not;
372 if (!dc->type_b && (dc->imm & (1 << 10))) {
373 dec_pattern(dc);
374 return;
377 not = dc->opcode & (1 << 1);
378 LOG_DIS("and%s\n", not ? "n" : "");
380 if (!dc->rd)
381 return;
383 if (not) {
384 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
385 } else
386 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
389 static void dec_or(DisasContext *dc)
391 if (!dc->type_b && (dc->imm & (1 << 10))) {
392 dec_pattern(dc);
393 return;
396 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
397 if (dc->rd)
398 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
401 static void dec_xor(DisasContext *dc)
403 if (!dc->type_b && (dc->imm & (1 << 10))) {
404 dec_pattern(dc);
405 return;
408 LOG_DIS("xor r%d\n", dc->rd);
409 if (dc->rd)
410 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
413 static inline void msr_read(DisasContext *dc, TCGv d)
415 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
418 static inline void msr_write(DisasContext *dc, TCGv v)
420 TCGv t;
422 t = tcg_temp_new();
423 dc->cpustate_changed = 1;
424 /* PVR bit is not writable. */
425 tcg_gen_andi_tl(t, v, ~MSR_PVR);
426 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
427 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
428 tcg_temp_free(t);
431 static void dec_msr(DisasContext *dc)
433 CPUState *cs = CPU(dc->cpu);
434 TCGv t0, t1;
435 unsigned int sr, to, rn;
436 int mem_index = cpu_mmu_index(&dc->cpu->env);
438 sr = dc->imm & ((1 << 14) - 1);
439 to = dc->imm & (1 << 14);
440 dc->type_b = 1;
441 if (to)
442 dc->cpustate_changed = 1;
444 /* msrclr and msrset. */
445 if (!(dc->imm & (1 << 15))) {
446 unsigned int clr = dc->ir & (1 << 16);
448 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
449 dc->rd, dc->imm);
451 if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
452 /* nop??? */
453 return;
456 if ((dc->tb_flags & MSR_EE_FLAG)
457 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
458 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
459 t_gen_raise_exception(dc, EXCP_HW_EXCP);
460 return;
463 if (dc->rd)
464 msr_read(dc, cpu_R[dc->rd]);
466 t0 = tcg_temp_new();
467 t1 = tcg_temp_new();
468 msr_read(dc, t0);
469 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
471 if (clr) {
472 tcg_gen_not_tl(t1, t1);
473 tcg_gen_and_tl(t0, t0, t1);
474 } else
475 tcg_gen_or_tl(t0, t0, t1);
476 msr_write(dc, t0);
477 tcg_temp_free(t0);
478 tcg_temp_free(t1);
479 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
480 dc->is_jmp = DISAS_UPDATE;
481 return;
484 if (to) {
485 if ((dc->tb_flags & MSR_EE_FLAG)
486 && mem_index == MMU_USER_IDX) {
487 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
488 t_gen_raise_exception(dc, EXCP_HW_EXCP);
489 return;
493 #if !defined(CONFIG_USER_ONLY)
494 /* Catch read/writes to the mmu block. */
495 if ((sr & ~0xff) == 0x1000) {
496 sr &= 7;
497 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
498 if (to)
499 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
500 else
501 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
502 return;
504 #endif
506 if (to) {
507 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
508 switch (sr) {
509 case 0:
510 break;
511 case 1:
512 msr_write(dc, cpu_R[dc->ra]);
513 break;
514 case 0x3:
515 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
516 break;
517 case 0x5:
518 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
519 break;
520 case 0x7:
521 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
522 break;
523 case 0x800:
524 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
525 break;
526 case 0x802:
527 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
528 break;
529 default:
530 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
531 break;
533 } else {
534 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
536 switch (sr) {
537 case 0:
538 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
539 break;
540 case 1:
541 msr_read(dc, cpu_R[dc->rd]);
542 break;
543 case 0x3:
544 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
545 break;
546 case 0x5:
547 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
548 break;
549 case 0x7:
550 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
551 break;
552 case 0xb:
553 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
554 break;
555 case 0x800:
556 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
557 break;
558 case 0x802:
559 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
560 break;
561 case 0x2000:
562 case 0x2001:
563 case 0x2002:
564 case 0x2003:
565 case 0x2004:
566 case 0x2005:
567 case 0x2006:
568 case 0x2007:
569 case 0x2008:
570 case 0x2009:
571 case 0x200a:
572 case 0x200b:
573 case 0x200c:
574 rn = sr & 0xf;
575 tcg_gen_ld_tl(cpu_R[dc->rd],
576 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
577 break;
578 default:
579 cpu_abort(cs, "unknown mfs reg %x\n", sr);
580 break;
584 if (dc->rd == 0) {
585 tcg_gen_movi_tl(cpu_R[0], 0);
589 /* 64-bit signed mul, lower result in d and upper in d2. */
590 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
592 TCGv_i64 t0, t1;
594 t0 = tcg_temp_new_i64();
595 t1 = tcg_temp_new_i64();
597 tcg_gen_ext_i32_i64(t0, a);
598 tcg_gen_ext_i32_i64(t1, b);
599 tcg_gen_mul_i64(t0, t0, t1);
601 tcg_gen_trunc_i64_i32(d, t0);
602 tcg_gen_shri_i64(t0, t0, 32);
603 tcg_gen_trunc_i64_i32(d2, t0);
605 tcg_temp_free_i64(t0);
606 tcg_temp_free_i64(t1);
609 /* 64-bit unsigned muls, lower result in d and upper in d2. */
610 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
612 TCGv_i64 t0, t1;
614 t0 = tcg_temp_new_i64();
615 t1 = tcg_temp_new_i64();
617 tcg_gen_extu_i32_i64(t0, a);
618 tcg_gen_extu_i32_i64(t1, b);
619 tcg_gen_mul_i64(t0, t0, t1);
621 tcg_gen_trunc_i64_i32(d, t0);
622 tcg_gen_shri_i64(t0, t0, 32);
623 tcg_gen_trunc_i64_i32(d2, t0);
625 tcg_temp_free_i64(t0);
626 tcg_temp_free_i64(t1);
629 /* Multiplier unit. */
630 static void dec_mul(DisasContext *dc)
632 TCGv d[2];
633 unsigned int subcode;
635 if ((dc->tb_flags & MSR_EE_FLAG)
636 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
637 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
638 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
639 t_gen_raise_exception(dc, EXCP_HW_EXCP);
640 return;
643 subcode = dc->imm & 3;
644 d[0] = tcg_temp_new();
645 d[1] = tcg_temp_new();
647 if (dc->type_b) {
648 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
649 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
650 goto done;
653 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
654 if (subcode >= 1 && subcode <= 3
655 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
656 /* nop??? */
659 switch (subcode) {
660 case 0:
661 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
662 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
663 break;
664 case 1:
665 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
666 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
667 break;
668 case 2:
669 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
670 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
671 break;
672 case 3:
673 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
674 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
675 break;
676 default:
677 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
678 break;
680 done:
681 tcg_temp_free(d[0]);
682 tcg_temp_free(d[1]);
685 /* Div unit. */
686 static void dec_div(DisasContext *dc)
688 unsigned int u;
690 u = dc->imm & 2;
691 LOG_DIS("div\n");
693 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
694 && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
695 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
696 t_gen_raise_exception(dc, EXCP_HW_EXCP);
699 if (u)
700 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
701 cpu_R[dc->ra]);
702 else
703 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
704 cpu_R[dc->ra]);
705 if (!dc->rd)
706 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
709 static void dec_barrel(DisasContext *dc)
711 TCGv t0;
712 unsigned int s, t;
714 if ((dc->tb_flags & MSR_EE_FLAG)
715 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
716 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
717 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
718 t_gen_raise_exception(dc, EXCP_HW_EXCP);
719 return;
722 s = dc->imm & (1 << 10);
723 t = dc->imm & (1 << 9);
725 LOG_DIS("bs%s%s r%d r%d r%d\n",
726 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
728 t0 = tcg_temp_new();
730 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
731 tcg_gen_andi_tl(t0, t0, 31);
733 if (s)
734 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
735 else {
736 if (t)
737 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
738 else
739 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
743 static void dec_bit(DisasContext *dc)
745 CPUState *cs = CPU(dc->cpu);
746 TCGv t0;
747 unsigned int op;
748 int mem_index = cpu_mmu_index(&dc->cpu->env);
750 op = dc->ir & ((1 << 9) - 1);
751 switch (op) {
752 case 0x21:
753 /* src. */
754 t0 = tcg_temp_new();
756 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
757 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
758 write_carry(dc, cpu_R[dc->ra]);
759 if (dc->rd) {
760 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
761 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
763 tcg_temp_free(t0);
764 break;
766 case 0x1:
767 case 0x41:
768 /* srl. */
769 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
771 /* Update carry. Note that write carry only looks at the LSB. */
772 write_carry(dc, cpu_R[dc->ra]);
773 if (dc->rd) {
774 if (op == 0x41)
775 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
776 else
777 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
779 break;
780 case 0x60:
781 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
782 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
783 break;
784 case 0x61:
785 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
786 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
787 break;
788 case 0x64:
789 case 0x66:
790 case 0x74:
791 case 0x76:
792 /* wdc. */
793 LOG_DIS("wdc r%d\n", dc->ra);
794 if ((dc->tb_flags & MSR_EE_FLAG)
795 && mem_index == MMU_USER_IDX) {
796 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
797 t_gen_raise_exception(dc, EXCP_HW_EXCP);
798 return;
800 break;
801 case 0x68:
802 /* wic. */
803 LOG_DIS("wic r%d\n", dc->ra);
804 if ((dc->tb_flags & MSR_EE_FLAG)
805 && mem_index == MMU_USER_IDX) {
806 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
807 t_gen_raise_exception(dc, EXCP_HW_EXCP);
808 return;
810 break;
811 case 0xe0:
812 if ((dc->tb_flags & MSR_EE_FLAG)
813 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
814 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
815 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
816 t_gen_raise_exception(dc, EXCP_HW_EXCP);
818 if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
819 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
821 break;
822 case 0x1e0:
823 /* swapb */
824 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
825 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
826 break;
827 case 0x1e2:
828 /*swaph */
829 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
830 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
831 break;
832 default:
833 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
834 dc->pc, op, dc->rd, dc->ra, dc->rb);
835 break;
839 static inline void sync_jmpstate(DisasContext *dc)
841 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
842 if (dc->jmp == JMP_DIRECT) {
843 tcg_gen_movi_tl(env_btaken, 1);
845 dc->jmp = JMP_INDIRECT;
846 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
850 static void dec_imm(DisasContext *dc)
852 LOG_DIS("imm %x\n", dc->imm << 16);
853 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
854 dc->tb_flags |= IMM_FLAG;
855 dc->clear_imm = 0;
858 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
860 unsigned int extimm = dc->tb_flags & IMM_FLAG;
861 /* Should be set to one if r1 is used by loadstores. */
862 int stackprot = 0;
864 /* All load/stores use ra. */
865 if (dc->ra == 1) {
866 stackprot = 1;
869 /* Treat the common cases first. */
870 if (!dc->type_b) {
871 /* If any of the regs is r0, return a ptr to the other. */
872 if (dc->ra == 0) {
873 return &cpu_R[dc->rb];
874 } else if (dc->rb == 0) {
875 return &cpu_R[dc->ra];
878 if (dc->rb == 1) {
879 stackprot = 1;
882 *t = tcg_temp_new();
883 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
885 if (stackprot) {
886 gen_helper_stackprot(cpu_env, *t);
888 return t;
890 /* Immediate. */
891 if (!extimm) {
892 if (dc->imm == 0) {
893 return &cpu_R[dc->ra];
895 *t = tcg_temp_new();
896 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
897 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
898 } else {
899 *t = tcg_temp_new();
900 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
903 if (stackprot) {
904 gen_helper_stackprot(cpu_env, *t);
906 return t;
909 static void dec_load(DisasContext *dc)
911 TCGv t, v, *addr;
912 unsigned int size, rev = 0, ex = 0;
913 TCGMemOp mop;
915 mop = dc->opcode & 3;
916 size = 1 << mop;
917 if (!dc->type_b) {
918 rev = (dc->ir >> 9) & 1;
919 ex = (dc->ir >> 10) & 1;
921 mop |= MO_TE;
922 if (rev) {
923 mop ^= MO_BSWAP;
926 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
927 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
928 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
929 t_gen_raise_exception(dc, EXCP_HW_EXCP);
930 return;
933 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
934 ex ? "x" : "");
936 t_sync_flags(dc);
937 addr = compute_ldst_addr(dc, &t);
940 * When doing reverse accesses we need to do two things.
942 * 1. Reverse the address wrt endianness.
943 * 2. Byteswap the data lanes on the way back into the CPU core.
945 if (rev && size != 4) {
946 /* Endian reverse the address. t is addr. */
947 switch (size) {
948 case 1:
950 /* 00 -> 11
951 01 -> 10
952 10 -> 10
953 11 -> 00 */
954 TCGv low = tcg_temp_new();
956 /* Force addr into the temp. */
957 if (addr != &t) {
958 t = tcg_temp_new();
959 tcg_gen_mov_tl(t, *addr);
960 addr = &t;
963 tcg_gen_andi_tl(low, t, 3);
964 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
965 tcg_gen_andi_tl(t, t, ~3);
966 tcg_gen_or_tl(t, t, low);
967 tcg_gen_mov_tl(env_imm, t);
968 tcg_temp_free(low);
969 break;
972 case 2:
973 /* 00 -> 10
974 10 -> 00. */
975 /* Force addr into the temp. */
976 if (addr != &t) {
977 t = tcg_temp_new();
978 tcg_gen_xori_tl(t, *addr, 2);
979 addr = &t;
980 } else {
981 tcg_gen_xori_tl(t, t, 2);
983 break;
984 default:
985 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
986 break;
990 /* lwx does not throw unaligned access errors, so force alignment */
991 if (ex) {
992 /* Force addr into the temp. */
993 if (addr != &t) {
994 t = tcg_temp_new();
995 tcg_gen_mov_tl(t, *addr);
996 addr = &t;
998 tcg_gen_andi_tl(t, t, ~3);
1001 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1002 sync_jmpstate(dc);
1004 /* Verify alignment if needed. */
1006 * Microblaze gives MMU faults priority over faults due to
1007 * unaligned addresses. That's why we speculatively do the load
1008 * into v. If the load succeeds, we verify alignment of the
1009 * address and if that succeeds we write into the destination reg.
1011 v = tcg_temp_new();
1012 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
1014 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1015 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1016 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1017 tcg_const_tl(0), tcg_const_tl(size - 1));
1020 if (ex) {
1021 tcg_gen_mov_tl(env_res_addr, *addr);
1022 tcg_gen_mov_tl(env_res_val, v);
1024 if (dc->rd) {
1025 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1027 tcg_temp_free(v);
1029 if (ex) { /* lwx */
1030 /* no support for for AXI exclusive so always clear C */
1031 write_carryi(dc, 0);
1034 if (addr == &t)
1035 tcg_temp_free(t);
1038 static void dec_store(DisasContext *dc)
1040 TCGv t, *addr, swx_addr;
1041 int swx_skip = 0;
1042 unsigned int size, rev = 0, ex = 0;
1043 TCGMemOp mop;
1045 mop = dc->opcode & 3;
1046 size = 1 << mop;
1047 if (!dc->type_b) {
1048 rev = (dc->ir >> 9) & 1;
1049 ex = (dc->ir >> 10) & 1;
1051 mop |= MO_TE;
1052 if (rev) {
1053 mop ^= MO_BSWAP;
1056 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1057 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1058 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1059 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1060 return;
1063 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1064 ex ? "x" : "");
1065 t_sync_flags(dc);
1066 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1067 sync_jmpstate(dc);
1068 addr = compute_ldst_addr(dc, &t);
1070 swx_addr = tcg_temp_local_new();
1071 if (ex) { /* swx */
1072 TCGv tval;
1074 /* Force addr into the swx_addr. */
1075 tcg_gen_mov_tl(swx_addr, *addr);
1076 addr = &swx_addr;
1077 /* swx does not throw unaligned access errors, so force alignment */
1078 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1080 write_carryi(dc, 1);
1081 swx_skip = gen_new_label();
1082 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1084 /* Compare the value loaded at lwx with current contents of
1085 the reserved location.
1086 FIXME: This only works for system emulation where we can expect
1087 this compare and the following write to be atomic. For user
1088 emulation we need to add atomicity between threads. */
1089 tval = tcg_temp_new();
1090 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
1091 MO_TEUL);
1092 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1093 write_carryi(dc, 0);
1094 tcg_temp_free(tval);
1097 if (rev && size != 4) {
1098 /* Endian reverse the address. t is addr. */
1099 switch (size) {
1100 case 1:
1102 /* 00 -> 11
1103 01 -> 10
1104 10 -> 10
1105 11 -> 00 */
1106 TCGv low = tcg_temp_new();
1108 /* Force addr into the temp. */
1109 if (addr != &t) {
1110 t = tcg_temp_new();
1111 tcg_gen_mov_tl(t, *addr);
1112 addr = &t;
1115 tcg_gen_andi_tl(low, t, 3);
1116 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1117 tcg_gen_andi_tl(t, t, ~3);
1118 tcg_gen_or_tl(t, t, low);
1119 tcg_gen_mov_tl(env_imm, t);
1120 tcg_temp_free(low);
1121 break;
1124 case 2:
1125 /* 00 -> 10
1126 10 -> 00. */
1127 /* Force addr into the temp. */
1128 if (addr != &t) {
1129 t = tcg_temp_new();
1130 tcg_gen_xori_tl(t, *addr, 2);
1131 addr = &t;
1132 } else {
1133 tcg_gen_xori_tl(t, t, 2);
1135 break;
1136 default:
1137 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1138 break;
1141 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
1143 /* Verify alignment if needed. */
1144 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1145 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1146 /* FIXME: if the alignment is wrong, we should restore the value
1147 * in memory. One possible way to achieve this is to probe
1148 * the MMU prior to the memaccess, thay way we could put
1149 * the alignment checks in between the probe and the mem
1150 * access.
1152 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1153 tcg_const_tl(1), tcg_const_tl(size - 1));
1156 if (ex) {
1157 gen_set_label(swx_skip);
1159 tcg_temp_free(swx_addr);
1161 if (addr == &t)
1162 tcg_temp_free(t);
1165 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1166 TCGv d, TCGv a, TCGv b)
1168 switch (cc) {
1169 case CC_EQ:
1170 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1171 break;
1172 case CC_NE:
1173 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1174 break;
1175 case CC_LT:
1176 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1177 break;
1178 case CC_LE:
1179 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1180 break;
1181 case CC_GE:
1182 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1183 break;
1184 case CC_GT:
1185 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1186 break;
1187 default:
1188 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1189 break;
1193 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1195 int l1;
1197 l1 = gen_new_label();
1198 /* Conditional jmp. */
1199 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1200 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1201 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1202 gen_set_label(l1);
1205 static void dec_bcc(DisasContext *dc)
1207 unsigned int cc;
1208 unsigned int dslot;
1210 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1211 dslot = dc->ir & (1 << 25);
1212 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1214 dc->delayed_branch = 1;
1215 if (dslot) {
1216 dc->delayed_branch = 2;
1217 dc->tb_flags |= D_FLAG;
1218 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1219 cpu_env, offsetof(CPUMBState, bimm));
1222 if (dec_alu_op_b_is_small_imm(dc)) {
1223 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1225 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1226 dc->jmp = JMP_DIRECT_CC;
1227 dc->jmp_pc = dc->pc + offset;
1228 } else {
1229 dc->jmp = JMP_INDIRECT;
1230 tcg_gen_movi_tl(env_btarget, dc->pc);
1231 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1233 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1236 static void dec_br(DisasContext *dc)
1238 unsigned int dslot, link, abs, mbar;
1239 int mem_index = cpu_mmu_index(&dc->cpu->env);
1241 dslot = dc->ir & (1 << 20);
1242 abs = dc->ir & (1 << 19);
1243 link = dc->ir & (1 << 18);
1245 /* Memory barrier. */
1246 mbar = (dc->ir >> 16) & 31;
1247 if (mbar == 2 && dc->imm == 4) {
1248 /* mbar IMM & 16 decodes to sleep. */
1249 if (dc->rd & 16) {
1250 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1251 TCGv_i32 tmp_1 = tcg_const_i32(1);
1253 LOG_DIS("sleep\n");
1255 t_sync_flags(dc);
1256 tcg_gen_st_i32(tmp_1, cpu_env,
1257 -offsetof(MicroBlazeCPU, env)
1258 +offsetof(CPUState, halted));
1259 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1260 gen_helper_raise_exception(cpu_env, tmp_hlt);
1261 tcg_temp_free_i32(tmp_hlt);
1262 tcg_temp_free_i32(tmp_1);
1263 return;
1265 LOG_DIS("mbar %d\n", dc->rd);
1266 /* Break the TB. */
1267 dc->cpustate_changed = 1;
1268 return;
1271 LOG_DIS("br%s%s%s%s imm=%x\n",
1272 abs ? "a" : "", link ? "l" : "",
1273 dc->type_b ? "i" : "", dslot ? "d" : "",
1274 dc->imm);
1276 dc->delayed_branch = 1;
1277 if (dslot) {
1278 dc->delayed_branch = 2;
1279 dc->tb_flags |= D_FLAG;
1280 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1281 cpu_env, offsetof(CPUMBState, bimm));
1283 if (link && dc->rd)
1284 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1286 dc->jmp = JMP_INDIRECT;
1287 if (abs) {
1288 tcg_gen_movi_tl(env_btaken, 1);
1289 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1290 if (link && !dslot) {
1291 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1292 t_gen_raise_exception(dc, EXCP_BREAK);
1293 if (dc->imm == 0) {
1294 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1295 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1296 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1297 return;
1300 t_gen_raise_exception(dc, EXCP_DEBUG);
1303 } else {
1304 if (dec_alu_op_b_is_small_imm(dc)) {
1305 dc->jmp = JMP_DIRECT;
1306 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1307 } else {
1308 tcg_gen_movi_tl(env_btaken, 1);
1309 tcg_gen_movi_tl(env_btarget, dc->pc);
1310 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1315 static inline void do_rti(DisasContext *dc)
1317 TCGv t0, t1;
1318 t0 = tcg_temp_new();
1319 t1 = tcg_temp_new();
1320 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1321 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1322 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1324 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1325 tcg_gen_or_tl(t1, t1, t0);
1326 msr_write(dc, t1);
1327 tcg_temp_free(t1);
1328 tcg_temp_free(t0);
1329 dc->tb_flags &= ~DRTI_FLAG;
1332 static inline void do_rtb(DisasContext *dc)
1334 TCGv t0, t1;
1335 t0 = tcg_temp_new();
1336 t1 = tcg_temp_new();
1337 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1338 tcg_gen_shri_tl(t0, t1, 1);
1339 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1341 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1342 tcg_gen_or_tl(t1, t1, t0);
1343 msr_write(dc, t1);
1344 tcg_temp_free(t1);
1345 tcg_temp_free(t0);
1346 dc->tb_flags &= ~DRTB_FLAG;
1349 static inline void do_rte(DisasContext *dc)
1351 TCGv t0, t1;
1352 t0 = tcg_temp_new();
1353 t1 = tcg_temp_new();
1355 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1356 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1357 tcg_gen_shri_tl(t0, t1, 1);
1358 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1360 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1361 tcg_gen_or_tl(t1, t1, t0);
1362 msr_write(dc, t1);
1363 tcg_temp_free(t1);
1364 tcg_temp_free(t0);
1365 dc->tb_flags &= ~DRTE_FLAG;
1368 static void dec_rts(DisasContext *dc)
1370 unsigned int b_bit, i_bit, e_bit;
1371 int mem_index = cpu_mmu_index(&dc->cpu->env);
1373 i_bit = dc->ir & (1 << 21);
1374 b_bit = dc->ir & (1 << 22);
1375 e_bit = dc->ir & (1 << 23);
1377 dc->delayed_branch = 2;
1378 dc->tb_flags |= D_FLAG;
1379 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1380 cpu_env, offsetof(CPUMBState, bimm));
1382 if (i_bit) {
1383 LOG_DIS("rtid ir=%x\n", dc->ir);
1384 if ((dc->tb_flags & MSR_EE_FLAG)
1385 && mem_index == MMU_USER_IDX) {
1386 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1387 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1389 dc->tb_flags |= DRTI_FLAG;
1390 } else if (b_bit) {
1391 LOG_DIS("rtbd ir=%x\n", dc->ir);
1392 if ((dc->tb_flags & MSR_EE_FLAG)
1393 && mem_index == MMU_USER_IDX) {
1394 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1395 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1397 dc->tb_flags |= DRTB_FLAG;
1398 } else if (e_bit) {
1399 LOG_DIS("rted ir=%x\n", dc->ir);
1400 if ((dc->tb_flags & MSR_EE_FLAG)
1401 && mem_index == MMU_USER_IDX) {
1402 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1403 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1405 dc->tb_flags |= DRTE_FLAG;
1406 } else
1407 LOG_DIS("rts ir=%x\n", dc->ir);
1409 dc->jmp = JMP_INDIRECT;
1410 tcg_gen_movi_tl(env_btaken, 1);
1411 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1414 static int dec_check_fpuv2(DisasContext *dc)
1416 int r;
1418 r = dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU2_MASK;
1420 if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1421 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1422 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1424 return r;
1427 static void dec_fpu(DisasContext *dc)
1429 unsigned int fpu_insn;
1431 if ((dc->tb_flags & MSR_EE_FLAG)
1432 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1433 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU_MASK))) {
1434 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1435 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1436 return;
1439 fpu_insn = (dc->ir >> 7) & 7;
1441 switch (fpu_insn) {
1442 case 0:
1443 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1444 cpu_R[dc->rb]);
1445 break;
1447 case 1:
1448 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1449 cpu_R[dc->rb]);
1450 break;
1452 case 2:
1453 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1454 cpu_R[dc->rb]);
1455 break;
1457 case 3:
1458 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1459 cpu_R[dc->rb]);
1460 break;
1462 case 4:
1463 switch ((dc->ir >> 4) & 7) {
1464 case 0:
1465 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1466 cpu_R[dc->ra], cpu_R[dc->rb]);
1467 break;
1468 case 1:
1469 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1470 cpu_R[dc->ra], cpu_R[dc->rb]);
1471 break;
1472 case 2:
1473 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1474 cpu_R[dc->ra], cpu_R[dc->rb]);
1475 break;
1476 case 3:
1477 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1478 cpu_R[dc->ra], cpu_R[dc->rb]);
1479 break;
1480 case 4:
1481 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1482 cpu_R[dc->ra], cpu_R[dc->rb]);
1483 break;
1484 case 5:
1485 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1486 cpu_R[dc->ra], cpu_R[dc->rb]);
1487 break;
1488 case 6:
1489 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1490 cpu_R[dc->ra], cpu_R[dc->rb]);
1491 break;
1492 default:
1493 qemu_log_mask(LOG_UNIMP,
1494 "unimplemented fcmp fpu_insn=%x pc=%x"
1495 " opc=%x\n",
1496 fpu_insn, dc->pc, dc->opcode);
1497 dc->abort_at_next_insn = 1;
1498 break;
1500 break;
1502 case 5:
1503 if (!dec_check_fpuv2(dc)) {
1504 return;
1506 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1507 break;
1509 case 6:
1510 if (!dec_check_fpuv2(dc)) {
1511 return;
1513 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1514 break;
1516 case 7:
1517 if (!dec_check_fpuv2(dc)) {
1518 return;
1520 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1521 break;
1523 default:
1524 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1525 " opc=%x\n",
1526 fpu_insn, dc->pc, dc->opcode);
1527 dc->abort_at_next_insn = 1;
1528 break;
1532 static void dec_null(DisasContext *dc)
1534 if ((dc->tb_flags & MSR_EE_FLAG)
1535 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1536 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1537 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1538 return;
1540 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1541 dc->abort_at_next_insn = 1;
1544 /* Insns connected to FSL or AXI stream attached devices. */
1545 static void dec_stream(DisasContext *dc)
1547 int mem_index = cpu_mmu_index(&dc->cpu->env);
1548 TCGv_i32 t_id, t_ctrl;
1549 int ctrl;
1551 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1552 dc->type_b ? "" : "d", dc->imm);
1554 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1555 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1556 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1557 return;
1560 t_id = tcg_temp_new();
1561 if (dc->type_b) {
1562 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1563 ctrl = dc->imm >> 10;
1564 } else {
1565 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1566 ctrl = dc->imm >> 5;
1569 t_ctrl = tcg_const_tl(ctrl);
1571 if (dc->rd == 0) {
1572 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1573 } else {
1574 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1576 tcg_temp_free(t_id);
1577 tcg_temp_free(t_ctrl);
1580 static struct decoder_info {
1581 struct {
1582 uint32_t bits;
1583 uint32_t mask;
1585 void (*dec)(DisasContext *dc);
1586 } decinfo[] = {
1587 {DEC_ADD, dec_add},
1588 {DEC_SUB, dec_sub},
1589 {DEC_AND, dec_and},
1590 {DEC_XOR, dec_xor},
1591 {DEC_OR, dec_or},
1592 {DEC_BIT, dec_bit},
1593 {DEC_BARREL, dec_barrel},
1594 {DEC_LD, dec_load},
1595 {DEC_ST, dec_store},
1596 {DEC_IMM, dec_imm},
1597 {DEC_BR, dec_br},
1598 {DEC_BCC, dec_bcc},
1599 {DEC_RTS, dec_rts},
1600 {DEC_FPU, dec_fpu},
1601 {DEC_MUL, dec_mul},
1602 {DEC_DIV, dec_div},
1603 {DEC_MSR, dec_msr},
1604 {DEC_STREAM, dec_stream},
1605 {{0, 0}, dec_null}
1608 static inline void decode(DisasContext *dc, uint32_t ir)
1610 int i;
1612 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1613 tcg_gen_debug_insn_start(dc->pc);
1616 dc->ir = ir;
1617 LOG_DIS("%8.8x\t", dc->ir);
1619 if (dc->ir)
1620 dc->nr_nops = 0;
1621 else {
1622 if ((dc->tb_flags & MSR_EE_FLAG)
1623 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1624 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1625 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1626 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1627 return;
1630 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1631 dc->nr_nops++;
1632 if (dc->nr_nops > 4) {
1633 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1636 /* bit 2 seems to indicate insn type. */
1637 dc->type_b = ir & (1 << 29);
1639 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1640 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1641 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1642 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1643 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1645 /* Large switch for all insns. */
1646 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1647 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1648 decinfo[i].dec(dc);
1649 break;
1654 static void check_breakpoint(CPUMBState *env, DisasContext *dc)
1656 CPUState *cs = CPU(mb_env_get_cpu(env));
1657 CPUBreakpoint *bp;
1659 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
1660 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1661 if (bp->pc == dc->pc) {
1662 t_gen_raise_exception(dc, EXCP_DEBUG);
1663 dc->is_jmp = DISAS_UPDATE;
1669 /* generate intermediate code for basic block 'tb'. */
1670 static inline void
1671 gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
1672 bool search_pc)
1674 CPUState *cs = CPU(cpu);
1675 CPUMBState *env = &cpu->env;
1676 uint16_t *gen_opc_end;
1677 uint32_t pc_start;
1678 int j, lj;
1679 struct DisasContext ctx;
1680 struct DisasContext *dc = &ctx;
1681 uint32_t next_page_start, org_flags;
1682 target_ulong npc;
1683 int num_insns;
1684 int max_insns;
1686 pc_start = tb->pc;
1687 dc->cpu = cpu;
1688 dc->tb = tb;
1689 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1691 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1693 dc->is_jmp = DISAS_NEXT;
1694 dc->jmp = 0;
1695 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1696 if (dc->delayed_branch) {
1697 dc->jmp = JMP_INDIRECT;
1699 dc->pc = pc_start;
1700 dc->singlestep_enabled = cs->singlestep_enabled;
1701 dc->cpustate_changed = 0;
1702 dc->abort_at_next_insn = 0;
1703 dc->nr_nops = 0;
1705 if (pc_start & 3) {
1706 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1709 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1710 #if !SIM_COMPAT
1711 qemu_log("--------------\n");
1712 log_cpu_state(CPU(cpu), 0);
1713 #endif
1716 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1717 lj = -1;
1718 num_insns = 0;
1719 max_insns = tb->cflags & CF_COUNT_MASK;
1720 if (max_insns == 0)
1721 max_insns = CF_COUNT_MASK;
1723 gen_tb_start();
1726 #if SIM_COMPAT
1727 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1728 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1729 gen_helper_debug();
1731 #endif
1732 check_breakpoint(env, dc);
1734 if (search_pc) {
1735 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1736 if (lj < j) {
1737 lj++;
1738 while (lj < j)
1739 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1741 tcg_ctx.gen_opc_pc[lj] = dc->pc;
1742 tcg_ctx.gen_opc_instr_start[lj] = 1;
1743 tcg_ctx.gen_opc_icount[lj] = num_insns;
1746 /* Pretty disas. */
1747 LOG_DIS("%8.8x:\t", dc->pc);
1749 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1750 gen_io_start();
1752 dc->clear_imm = 1;
1753 decode(dc, cpu_ldl_code(env, dc->pc));
1754 if (dc->clear_imm)
1755 dc->tb_flags &= ~IMM_FLAG;
1756 dc->pc += 4;
1757 num_insns++;
1759 if (dc->delayed_branch) {
1760 dc->delayed_branch--;
1761 if (!dc->delayed_branch) {
1762 if (dc->tb_flags & DRTI_FLAG)
1763 do_rti(dc);
1764 if (dc->tb_flags & DRTB_FLAG)
1765 do_rtb(dc);
1766 if (dc->tb_flags & DRTE_FLAG)
1767 do_rte(dc);
1768 /* Clear the delay slot flag. */
1769 dc->tb_flags &= ~D_FLAG;
1770 /* If it is a direct jump, try direct chaining. */
1771 if (dc->jmp == JMP_INDIRECT) {
1772 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1773 dc->is_jmp = DISAS_JUMP;
1774 } else if (dc->jmp == JMP_DIRECT) {
1775 t_sync_flags(dc);
1776 gen_goto_tb(dc, 0, dc->jmp_pc);
1777 dc->is_jmp = DISAS_TB_JUMP;
1778 } else if (dc->jmp == JMP_DIRECT_CC) {
1779 int l1;
1781 t_sync_flags(dc);
1782 l1 = gen_new_label();
1783 /* Conditional jmp. */
1784 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1785 gen_goto_tb(dc, 1, dc->pc);
1786 gen_set_label(l1);
1787 gen_goto_tb(dc, 0, dc->jmp_pc);
1789 dc->is_jmp = DISAS_TB_JUMP;
1791 break;
1794 if (cs->singlestep_enabled) {
1795 break;
1797 } while (!dc->is_jmp && !dc->cpustate_changed
1798 && tcg_ctx.gen_opc_ptr < gen_opc_end
1799 && !singlestep
1800 && (dc->pc < next_page_start)
1801 && num_insns < max_insns);
1803 npc = dc->pc;
1804 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1805 if (dc->tb_flags & D_FLAG) {
1806 dc->is_jmp = DISAS_UPDATE;
1807 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1808 sync_jmpstate(dc);
1809 } else
1810 npc = dc->jmp_pc;
1813 if (tb->cflags & CF_LAST_IO)
1814 gen_io_end();
1815 /* Force an update if the per-tb cpu state has changed. */
1816 if (dc->is_jmp == DISAS_NEXT
1817 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1818 dc->is_jmp = DISAS_UPDATE;
1819 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1821 t_sync_flags(dc);
1823 if (unlikely(cs->singlestep_enabled)) {
1824 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1826 if (dc->is_jmp != DISAS_JUMP) {
1827 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1829 gen_helper_raise_exception(cpu_env, tmp);
1830 tcg_temp_free_i32(tmp);
1831 } else {
1832 switch(dc->is_jmp) {
1833 case DISAS_NEXT:
1834 gen_goto_tb(dc, 1, npc);
1835 break;
1836 default:
1837 case DISAS_JUMP:
1838 case DISAS_UPDATE:
1839 /* indicate that the hash table must be used
1840 to find the next TB */
1841 tcg_gen_exit_tb(0);
1842 break;
1843 case DISAS_TB_JUMP:
1844 /* nothing more to generate */
1845 break;
1848 gen_tb_end(tb, num_insns);
1849 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1850 if (search_pc) {
1851 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1852 lj++;
1853 while (lj <= j)
1854 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1855 } else {
1856 tb->size = dc->pc - pc_start;
1857 tb->icount = num_insns;
1860 #ifdef DEBUG_DISAS
1861 #if !SIM_COMPAT
1862 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1863 qemu_log("\n");
1864 #if DISAS_GNU
1865 log_target_disas(env, pc_start, dc->pc - pc_start, 0);
1866 #endif
1867 qemu_log("\nisize=%d osize=%td\n",
1868 dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
1869 tcg_ctx.gen_opc_buf);
1871 #endif
1872 #endif
1873 assert(!dc->abort_at_next_insn);
1876 void gen_intermediate_code (CPUMBState *env, struct TranslationBlock *tb)
1878 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, false);
1881 void gen_intermediate_code_pc (CPUMBState *env, struct TranslationBlock *tb)
1883 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, true);
1886 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1887 int flags)
1889 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1890 CPUMBState *env = &cpu->env;
1891 int i;
1893 if (!env || !f)
1894 return;
1896 cpu_fprintf(f, "IN: PC=%x %s\n",
1897 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1898 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1899 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1900 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1901 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1902 env->btaken, env->btarget,
1903 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1904 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1905 (env->sregs[SR_MSR] & MSR_EIP),
1906 (env->sregs[SR_MSR] & MSR_IE));
1908 for (i = 0; i < 32; i++) {
1909 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1910 if ((i + 1) % 4 == 0)
1911 cpu_fprintf(f, "\n");
1913 cpu_fprintf(f, "\n\n");
1916 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1918 MicroBlazeCPU *cpu;
1920 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1922 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1924 return cpu;
1927 void mb_tcg_init(void)
1929 int i;
1931 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1933 env_debug = tcg_global_mem_new(TCG_AREG0,
1934 offsetof(CPUMBState, debug),
1935 "debug0");
1936 env_iflags = tcg_global_mem_new(TCG_AREG0,
1937 offsetof(CPUMBState, iflags),
1938 "iflags");
1939 env_imm = tcg_global_mem_new(TCG_AREG0,
1940 offsetof(CPUMBState, imm),
1941 "imm");
1942 env_btarget = tcg_global_mem_new(TCG_AREG0,
1943 offsetof(CPUMBState, btarget),
1944 "btarget");
1945 env_btaken = tcg_global_mem_new(TCG_AREG0,
1946 offsetof(CPUMBState, btaken),
1947 "btaken");
1948 env_res_addr = tcg_global_mem_new(TCG_AREG0,
1949 offsetof(CPUMBState, res_addr),
1950 "res_addr");
1951 env_res_val = tcg_global_mem_new(TCG_AREG0,
1952 offsetof(CPUMBState, res_val),
1953 "res_val");
1954 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1955 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1956 offsetof(CPUMBState, regs[i]),
1957 regnames[i]);
1959 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1960 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
1961 offsetof(CPUMBState, sregs[i]),
1962 special_regnames[i]);
1966 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
1968 env->sregs[SR_PC] = tcg_ctx.gen_opc_pc[pc_pos];