target-microblaze: dec_barrel: Add braces around if-statements
[qemu/ar7.git] / target / microblaze / translate.c
blob504ed8871a8385efa09f63dd4687e589617ccd2c
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
32 #include "exec/log.h"
35 #define SIM_COMPAT 0
36 #define DISAS_GNU 1
37 #define DISAS_MB 1
38 #if DISAS_MB && !SIM_COMPAT
39 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DIS(...) do { } while (0)
42 #endif
44 #define D(x)
46 #define EXTRACT_FIELD(src, start, end) \
47 (((src) >> start) & ((1 << (end - start + 1)) - 1))
49 static TCGv env_debug;
50 static TCGv_env cpu_env;
51 static TCGv cpu_R[32];
52 static TCGv cpu_SR[18];
53 static TCGv env_imm;
54 static TCGv env_btaken;
55 static TCGv env_btarget;
56 static TCGv env_iflags;
57 static TCGv env_res_addr;
58 static TCGv env_res_val;
60 #include "exec/gen-icount.h"
62 /* This is the state at translation time. */
63 typedef struct DisasContext {
64 MicroBlazeCPU *cpu;
65 target_ulong pc;
67 /* Decoder. */
68 int type_b;
69 uint32_t ir;
70 uint8_t opcode;
71 uint8_t rd, ra, rb;
72 uint16_t imm;
74 unsigned int cpustate_changed;
75 unsigned int delayed_branch;
76 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
77 unsigned int clear_imm;
78 int is_jmp;
80 #define JMP_NOJMP 0
81 #define JMP_DIRECT 1
82 #define JMP_DIRECT_CC 2
83 #define JMP_INDIRECT 3
84 unsigned int jmp;
85 uint32_t jmp_pc;
87 int abort_at_next_insn;
88 int nr_nops;
89 struct TranslationBlock *tb;
90 int singlestep_enabled;
91 } DisasContext;
93 static const char *regnames[] =
95 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
96 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
97 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
98 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
101 static const char *special_regnames[] =
103 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
104 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
105 "sr16", "sr17", "sr18"
108 static inline void t_sync_flags(DisasContext *dc)
110 /* Synch the tb dependent flags between translator and runtime. */
111 if (dc->tb_flags != dc->synced_flags) {
112 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
113 dc->synced_flags = dc->tb_flags;
117 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
119 TCGv_i32 tmp = tcg_const_i32(index);
121 t_sync_flags(dc);
122 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
123 gen_helper_raise_exception(cpu_env, tmp);
124 tcg_temp_free_i32(tmp);
125 dc->is_jmp = DISAS_UPDATE;
128 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
130 #ifndef CONFIG_USER_ONLY
131 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
132 #else
133 return true;
134 #endif
137 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
139 if (use_goto_tb(dc, dest)) {
140 tcg_gen_goto_tb(n);
141 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
142 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
143 } else {
144 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
145 tcg_gen_exit_tb(0);
149 static void read_carry(DisasContext *dc, TCGv d)
151 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
155 * write_carry sets the carry bits in MSR based on bit 0 of v.
156 * v[31:1] are ignored.
158 static void write_carry(DisasContext *dc, TCGv v)
160 TCGv t0 = tcg_temp_new();
161 tcg_gen_shli_tl(t0, v, 31);
162 tcg_gen_sari_tl(t0, t0, 31);
163 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
164 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
165 ~(MSR_C | MSR_CC));
166 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
167 tcg_temp_free(t0);
170 static void write_carryi(DisasContext *dc, bool carry)
172 TCGv t0 = tcg_temp_new();
173 tcg_gen_movi_tl(t0, carry);
174 write_carry(dc, t0);
175 tcg_temp_free(t0);
178 /* True if ALU operand b is a small immediate that may deserve
179 faster treatment. */
180 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
182 /* Immediate insn without the imm prefix ? */
183 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
186 static inline TCGv *dec_alu_op_b(DisasContext *dc)
188 if (dc->type_b) {
189 if (dc->tb_flags & IMM_FLAG)
190 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
191 else
192 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
193 return &env_imm;
194 } else
195 return &cpu_R[dc->rb];
198 static void dec_add(DisasContext *dc)
200 unsigned int k, c;
201 TCGv cf;
203 k = dc->opcode & 4;
204 c = dc->opcode & 2;
206 LOG_DIS("add%s%s%s r%d r%d r%d\n",
207 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
208 dc->rd, dc->ra, dc->rb);
210 /* Take care of the easy cases first. */
211 if (k) {
212 /* k - keep carry, no need to update MSR. */
213 /* If rd == r0, it's a nop. */
214 if (dc->rd) {
215 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
217 if (c) {
218 /* c - Add carry into the result. */
219 cf = tcg_temp_new();
221 read_carry(dc, cf);
222 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
223 tcg_temp_free(cf);
226 return;
229 /* From now on, we can assume k is zero. So we need to update MSR. */
230 /* Extract carry. */
231 cf = tcg_temp_new();
232 if (c) {
233 read_carry(dc, cf);
234 } else {
235 tcg_gen_movi_tl(cf, 0);
238 if (dc->rd) {
239 TCGv ncf = tcg_temp_new();
240 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
241 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
243 write_carry(dc, ncf);
244 tcg_temp_free(ncf);
245 } else {
246 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
247 write_carry(dc, cf);
249 tcg_temp_free(cf);
252 static void dec_sub(DisasContext *dc)
254 unsigned int u, cmp, k, c;
255 TCGv cf, na;
257 u = dc->imm & 2;
258 k = dc->opcode & 4;
259 c = dc->opcode & 2;
260 cmp = (dc->imm & 1) && (!dc->type_b) && k;
262 if (cmp) {
263 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
264 if (dc->rd) {
265 if (u)
266 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
267 else
268 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
270 return;
273 LOG_DIS("sub%s%s r%d, r%d r%d\n",
274 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
276 /* Take care of the easy cases first. */
277 if (k) {
278 /* k - keep carry, no need to update MSR. */
279 /* If rd == r0, it's a nop. */
280 if (dc->rd) {
281 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
283 if (c) {
284 /* c - Add carry into the result. */
285 cf = tcg_temp_new();
287 read_carry(dc, cf);
288 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
289 tcg_temp_free(cf);
292 return;
295 /* From now on, we can assume k is zero. So we need to update MSR. */
296 /* Extract carry. And complement a into na. */
297 cf = tcg_temp_new();
298 na = tcg_temp_new();
299 if (c) {
300 read_carry(dc, cf);
301 } else {
302 tcg_gen_movi_tl(cf, 1);
305 /* d = b + ~a + c. carry defaults to 1. */
306 tcg_gen_not_tl(na, cpu_R[dc->ra]);
308 if (dc->rd) {
309 TCGv ncf = tcg_temp_new();
310 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
311 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
312 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
313 write_carry(dc, ncf);
314 tcg_temp_free(ncf);
315 } else {
316 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
317 write_carry(dc, cf);
319 tcg_temp_free(cf);
320 tcg_temp_free(na);
323 static void dec_pattern(DisasContext *dc)
325 unsigned int mode;
327 if ((dc->tb_flags & MSR_EE_FLAG)
328 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
329 && !dc->cpu->cfg.use_pcmp_instr) {
330 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
331 t_gen_raise_exception(dc, EXCP_HW_EXCP);
334 mode = dc->opcode & 3;
335 switch (mode) {
336 case 0:
337 /* pcmpbf. */
338 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
339 if (dc->rd)
340 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
341 break;
342 case 2:
343 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
344 if (dc->rd) {
345 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
346 cpu_R[dc->ra], cpu_R[dc->rb]);
348 break;
349 case 3:
350 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
351 if (dc->rd) {
352 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
353 cpu_R[dc->ra], cpu_R[dc->rb]);
355 break;
356 default:
357 cpu_abort(CPU(dc->cpu),
358 "unsupported pattern insn opcode=%x\n", dc->opcode);
359 break;
363 static void dec_and(DisasContext *dc)
365 unsigned int not;
367 if (!dc->type_b && (dc->imm & (1 << 10))) {
368 dec_pattern(dc);
369 return;
372 not = dc->opcode & (1 << 1);
373 LOG_DIS("and%s\n", not ? "n" : "");
375 if (!dc->rd)
376 return;
378 if (not) {
379 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
380 } else
381 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
384 static void dec_or(DisasContext *dc)
386 if (!dc->type_b && (dc->imm & (1 << 10))) {
387 dec_pattern(dc);
388 return;
391 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
392 if (dc->rd)
393 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
396 static void dec_xor(DisasContext *dc)
398 if (!dc->type_b && (dc->imm & (1 << 10))) {
399 dec_pattern(dc);
400 return;
403 LOG_DIS("xor r%d\n", dc->rd);
404 if (dc->rd)
405 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
408 static inline void msr_read(DisasContext *dc, TCGv d)
410 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
413 static inline void msr_write(DisasContext *dc, TCGv v)
415 TCGv t;
417 t = tcg_temp_new();
418 dc->cpustate_changed = 1;
419 /* PVR bit is not writable. */
420 tcg_gen_andi_tl(t, v, ~MSR_PVR);
421 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
422 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
423 tcg_temp_free(t);
426 static void dec_msr(DisasContext *dc)
428 CPUState *cs = CPU(dc->cpu);
429 TCGv t0, t1;
430 unsigned int sr, to, rn;
431 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
433 sr = dc->imm & ((1 << 14) - 1);
434 to = dc->imm & (1 << 14);
435 dc->type_b = 1;
436 if (to)
437 dc->cpustate_changed = 1;
439 /* msrclr and msrset. */
440 if (!(dc->imm & (1 << 15))) {
441 unsigned int clr = dc->ir & (1 << 16);
443 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
444 dc->rd, dc->imm);
446 if (!dc->cpu->cfg.use_msr_instr) {
447 /* nop??? */
448 return;
451 if ((dc->tb_flags & MSR_EE_FLAG)
452 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
453 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
454 t_gen_raise_exception(dc, EXCP_HW_EXCP);
455 return;
458 if (dc->rd)
459 msr_read(dc, cpu_R[dc->rd]);
461 t0 = tcg_temp_new();
462 t1 = tcg_temp_new();
463 msr_read(dc, t0);
464 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
466 if (clr) {
467 tcg_gen_not_tl(t1, t1);
468 tcg_gen_and_tl(t0, t0, t1);
469 } else
470 tcg_gen_or_tl(t0, t0, t1);
471 msr_write(dc, t0);
472 tcg_temp_free(t0);
473 tcg_temp_free(t1);
474 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
475 dc->is_jmp = DISAS_UPDATE;
476 return;
479 if (to) {
480 if ((dc->tb_flags & MSR_EE_FLAG)
481 && mem_index == MMU_USER_IDX) {
482 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
483 t_gen_raise_exception(dc, EXCP_HW_EXCP);
484 return;
488 #if !defined(CONFIG_USER_ONLY)
489 /* Catch read/writes to the mmu block. */
490 if ((sr & ~0xff) == 0x1000) {
491 sr &= 7;
492 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
493 if (to)
494 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
495 else
496 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
497 return;
499 #endif
501 if (to) {
502 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
503 switch (sr) {
504 case 0:
505 break;
506 case 1:
507 msr_write(dc, cpu_R[dc->ra]);
508 break;
509 case 0x3:
510 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
511 break;
512 case 0x5:
513 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
514 break;
515 case 0x7:
516 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
517 break;
518 case 0x800:
519 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
520 break;
521 case 0x802:
522 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
523 break;
524 default:
525 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
526 break;
528 } else {
529 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
531 switch (sr) {
532 case 0:
533 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
534 break;
535 case 1:
536 msr_read(dc, cpu_R[dc->rd]);
537 break;
538 case 0x3:
539 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
540 break;
541 case 0x5:
542 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
543 break;
544 case 0x7:
545 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
546 break;
547 case 0xb:
548 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
549 break;
550 case 0x800:
551 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
552 break;
553 case 0x802:
554 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
555 break;
556 case 0x2000:
557 case 0x2001:
558 case 0x2002:
559 case 0x2003:
560 case 0x2004:
561 case 0x2005:
562 case 0x2006:
563 case 0x2007:
564 case 0x2008:
565 case 0x2009:
566 case 0x200a:
567 case 0x200b:
568 case 0x200c:
569 rn = sr & 0xf;
570 tcg_gen_ld_tl(cpu_R[dc->rd],
571 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
572 break;
573 default:
574 cpu_abort(cs, "unknown mfs reg %x\n", sr);
575 break;
579 if (dc->rd == 0) {
580 tcg_gen_movi_tl(cpu_R[0], 0);
584 /* Multiplier unit. */
585 static void dec_mul(DisasContext *dc)
587 TCGv tmp;
588 unsigned int subcode;
590 if ((dc->tb_flags & MSR_EE_FLAG)
591 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
592 && !dc->cpu->cfg.use_hw_mul) {
593 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
594 t_gen_raise_exception(dc, EXCP_HW_EXCP);
595 return;
598 subcode = dc->imm & 3;
600 if (dc->type_b) {
601 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
602 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
603 return;
606 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
607 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
608 /* nop??? */
611 tmp = tcg_temp_new();
612 switch (subcode) {
613 case 0:
614 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
615 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
616 break;
617 case 1:
618 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
619 tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
620 break;
621 case 2:
622 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
623 tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
624 break;
625 case 3:
626 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
627 tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
628 break;
629 default:
630 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
631 break;
633 tcg_temp_free(tmp);
636 /* Div unit. */
637 static void dec_div(DisasContext *dc)
639 unsigned int u;
641 u = dc->imm & 2;
642 LOG_DIS("div\n");
644 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
645 && !dc->cpu->cfg.use_div) {
646 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
647 t_gen_raise_exception(dc, EXCP_HW_EXCP);
650 if (u)
651 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
652 cpu_R[dc->ra]);
653 else
654 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
655 cpu_R[dc->ra]);
656 if (!dc->rd)
657 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
660 static void dec_barrel(DisasContext *dc)
662 TCGv t0;
663 bool s, t;
665 if ((dc->tb_flags & MSR_EE_FLAG)
666 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
667 && !dc->cpu->cfg.use_barrel) {
668 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
669 t_gen_raise_exception(dc, EXCP_HW_EXCP);
670 return;
673 s = extract32(dc->imm, 10, 1);
674 t = extract32(dc->imm, 9, 1);
676 LOG_DIS("bs%s%s r%d r%d r%d\n",
677 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
679 t0 = tcg_temp_new();
681 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
682 tcg_gen_andi_tl(t0, t0, 31);
684 if (s) {
685 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
686 } else {
687 if (t) {
688 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
689 } else {
690 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
695 static void dec_bit(DisasContext *dc)
697 CPUState *cs = CPU(dc->cpu);
698 TCGv t0;
699 unsigned int op;
700 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
702 op = dc->ir & ((1 << 9) - 1);
703 switch (op) {
704 case 0x21:
705 /* src. */
706 t0 = tcg_temp_new();
708 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
709 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
710 write_carry(dc, cpu_R[dc->ra]);
711 if (dc->rd) {
712 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
713 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
715 tcg_temp_free(t0);
716 break;
718 case 0x1:
719 case 0x41:
720 /* srl. */
721 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
723 /* Update carry. Note that write carry only looks at the LSB. */
724 write_carry(dc, cpu_R[dc->ra]);
725 if (dc->rd) {
726 if (op == 0x41)
727 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
728 else
729 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
731 break;
732 case 0x60:
733 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
734 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
735 break;
736 case 0x61:
737 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
738 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
739 break;
740 case 0x64:
741 case 0x66:
742 case 0x74:
743 case 0x76:
744 /* wdc. */
745 LOG_DIS("wdc r%d\n", dc->ra);
746 if ((dc->tb_flags & MSR_EE_FLAG)
747 && mem_index == MMU_USER_IDX) {
748 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
749 t_gen_raise_exception(dc, EXCP_HW_EXCP);
750 return;
752 break;
753 case 0x68:
754 /* wic. */
755 LOG_DIS("wic r%d\n", dc->ra);
756 if ((dc->tb_flags & MSR_EE_FLAG)
757 && mem_index == MMU_USER_IDX) {
758 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
759 t_gen_raise_exception(dc, EXCP_HW_EXCP);
760 return;
762 break;
763 case 0xe0:
764 if ((dc->tb_flags & MSR_EE_FLAG)
765 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
766 && !dc->cpu->cfg.use_pcmp_instr) {
767 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
768 t_gen_raise_exception(dc, EXCP_HW_EXCP);
770 if (dc->cpu->cfg.use_pcmp_instr) {
771 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
773 break;
774 case 0x1e0:
775 /* swapb */
776 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
777 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
778 break;
779 case 0x1e2:
780 /*swaph */
781 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
782 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
783 break;
784 default:
785 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
786 dc->pc, op, dc->rd, dc->ra, dc->rb);
787 break;
791 static inline void sync_jmpstate(DisasContext *dc)
793 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
794 if (dc->jmp == JMP_DIRECT) {
795 tcg_gen_movi_tl(env_btaken, 1);
797 dc->jmp = JMP_INDIRECT;
798 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
802 static void dec_imm(DisasContext *dc)
804 LOG_DIS("imm %x\n", dc->imm << 16);
805 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
806 dc->tb_flags |= IMM_FLAG;
807 dc->clear_imm = 0;
810 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
812 unsigned int extimm = dc->tb_flags & IMM_FLAG;
813 /* Should be set to one if r1 is used by loadstores. */
814 int stackprot = 0;
816 /* All load/stores use ra. */
817 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
818 stackprot = 1;
821 /* Treat the common cases first. */
822 if (!dc->type_b) {
823 /* If any of the regs is r0, return a ptr to the other. */
824 if (dc->ra == 0) {
825 return &cpu_R[dc->rb];
826 } else if (dc->rb == 0) {
827 return &cpu_R[dc->ra];
830 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
831 stackprot = 1;
834 *t = tcg_temp_new();
835 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
837 if (stackprot) {
838 gen_helper_stackprot(cpu_env, *t);
840 return t;
842 /* Immediate. */
843 if (!extimm) {
844 if (dc->imm == 0) {
845 return &cpu_R[dc->ra];
847 *t = tcg_temp_new();
848 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
849 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
850 } else {
851 *t = tcg_temp_new();
852 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
855 if (stackprot) {
856 gen_helper_stackprot(cpu_env, *t);
858 return t;
861 static void dec_load(DisasContext *dc)
863 TCGv t, v, *addr;
864 unsigned int size, rev = 0, ex = 0;
865 TCGMemOp mop;
867 mop = dc->opcode & 3;
868 size = 1 << mop;
869 if (!dc->type_b) {
870 rev = (dc->ir >> 9) & 1;
871 ex = (dc->ir >> 10) & 1;
873 mop |= MO_TE;
874 if (rev) {
875 mop ^= MO_BSWAP;
878 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
879 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
880 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
881 t_gen_raise_exception(dc, EXCP_HW_EXCP);
882 return;
885 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
886 ex ? "x" : "");
888 t_sync_flags(dc);
889 addr = compute_ldst_addr(dc, &t);
892 * When doing reverse accesses we need to do two things.
894 * 1. Reverse the address wrt endianness.
895 * 2. Byteswap the data lanes on the way back into the CPU core.
897 if (rev && size != 4) {
898 /* Endian reverse the address. t is addr. */
899 switch (size) {
900 case 1:
902 /* 00 -> 11
903 01 -> 10
904 10 -> 10
905 11 -> 00 */
906 TCGv low = tcg_temp_new();
908 /* Force addr into the temp. */
909 if (addr != &t) {
910 t = tcg_temp_new();
911 tcg_gen_mov_tl(t, *addr);
912 addr = &t;
915 tcg_gen_andi_tl(low, t, 3);
916 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
917 tcg_gen_andi_tl(t, t, ~3);
918 tcg_gen_or_tl(t, t, low);
919 tcg_gen_mov_tl(env_imm, t);
920 tcg_temp_free(low);
921 break;
924 case 2:
925 /* 00 -> 10
926 10 -> 00. */
927 /* Force addr into the temp. */
928 if (addr != &t) {
929 t = tcg_temp_new();
930 tcg_gen_xori_tl(t, *addr, 2);
931 addr = &t;
932 } else {
933 tcg_gen_xori_tl(t, t, 2);
935 break;
936 default:
937 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
938 break;
942 /* lwx does not throw unaligned access errors, so force alignment */
943 if (ex) {
944 /* Force addr into the temp. */
945 if (addr != &t) {
946 t = tcg_temp_new();
947 tcg_gen_mov_tl(t, *addr);
948 addr = &t;
950 tcg_gen_andi_tl(t, t, ~3);
953 /* If we get a fault on a dslot, the jmpstate better be in sync. */
954 sync_jmpstate(dc);
956 /* Verify alignment if needed. */
958 * Microblaze gives MMU faults priority over faults due to
959 * unaligned addresses. That's why we speculatively do the load
960 * into v. If the load succeeds, we verify alignment of the
961 * address and if that succeeds we write into the destination reg.
963 v = tcg_temp_new();
964 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
966 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
967 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
968 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
969 tcg_const_tl(0), tcg_const_tl(size - 1));
972 if (ex) {
973 tcg_gen_mov_tl(env_res_addr, *addr);
974 tcg_gen_mov_tl(env_res_val, v);
976 if (dc->rd) {
977 tcg_gen_mov_tl(cpu_R[dc->rd], v);
979 tcg_temp_free(v);
981 if (ex) { /* lwx */
982 /* no support for AXI exclusive so always clear C */
983 write_carryi(dc, 0);
986 if (addr == &t)
987 tcg_temp_free(t);
990 static void dec_store(DisasContext *dc)
992 TCGv t, *addr, swx_addr;
993 TCGLabel *swx_skip = NULL;
994 unsigned int size, rev = 0, ex = 0;
995 TCGMemOp mop;
997 mop = dc->opcode & 3;
998 size = 1 << mop;
999 if (!dc->type_b) {
1000 rev = (dc->ir >> 9) & 1;
1001 ex = (dc->ir >> 10) & 1;
1003 mop |= MO_TE;
1004 if (rev) {
1005 mop ^= MO_BSWAP;
1008 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1009 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1010 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1011 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1012 return;
1015 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1016 ex ? "x" : "");
1017 t_sync_flags(dc);
1018 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1019 sync_jmpstate(dc);
1020 addr = compute_ldst_addr(dc, &t);
1022 swx_addr = tcg_temp_local_new();
1023 if (ex) { /* swx */
1024 TCGv tval;
1026 /* Force addr into the swx_addr. */
1027 tcg_gen_mov_tl(swx_addr, *addr);
1028 addr = &swx_addr;
1029 /* swx does not throw unaligned access errors, so force alignment */
1030 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1032 write_carryi(dc, 1);
1033 swx_skip = gen_new_label();
1034 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1036 /* Compare the value loaded at lwx with current contents of
1037 the reserved location.
1038 FIXME: This only works for system emulation where we can expect
1039 this compare and the following write to be atomic. For user
1040 emulation we need to add atomicity between threads. */
1041 tval = tcg_temp_new();
1042 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1043 MO_TEUL);
1044 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1045 write_carryi(dc, 0);
1046 tcg_temp_free(tval);
1049 if (rev && size != 4) {
1050 /* Endian reverse the address. t is addr. */
1051 switch (size) {
1052 case 1:
1054 /* 00 -> 11
1055 01 -> 10
1056 10 -> 10
1057 11 -> 00 */
1058 TCGv low = tcg_temp_new();
1060 /* Force addr into the temp. */
1061 if (addr != &t) {
1062 t = tcg_temp_new();
1063 tcg_gen_mov_tl(t, *addr);
1064 addr = &t;
1067 tcg_gen_andi_tl(low, t, 3);
1068 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1069 tcg_gen_andi_tl(t, t, ~3);
1070 tcg_gen_or_tl(t, t, low);
1071 tcg_gen_mov_tl(env_imm, t);
1072 tcg_temp_free(low);
1073 break;
1076 case 2:
1077 /* 00 -> 10
1078 10 -> 00. */
1079 /* Force addr into the temp. */
1080 if (addr != &t) {
1081 t = tcg_temp_new();
1082 tcg_gen_xori_tl(t, *addr, 2);
1083 addr = &t;
1084 } else {
1085 tcg_gen_xori_tl(t, t, 2);
1087 break;
1088 default:
1089 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1090 break;
1093 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1095 /* Verify alignment if needed. */
1096 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1097 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1098 /* FIXME: if the alignment is wrong, we should restore the value
1099 * in memory. One possible way to achieve this is to probe
1100 * the MMU prior to the memaccess, thay way we could put
1101 * the alignment checks in between the probe and the mem
1102 * access.
1104 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1105 tcg_const_tl(1), tcg_const_tl(size - 1));
1108 if (ex) {
1109 gen_set_label(swx_skip);
1111 tcg_temp_free(swx_addr);
1113 if (addr == &t)
1114 tcg_temp_free(t);
1117 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1118 TCGv d, TCGv a, TCGv b)
1120 switch (cc) {
1121 case CC_EQ:
1122 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1123 break;
1124 case CC_NE:
1125 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1126 break;
1127 case CC_LT:
1128 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1129 break;
1130 case CC_LE:
1131 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1132 break;
1133 case CC_GE:
1134 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1135 break;
1136 case CC_GT:
1137 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1138 break;
1139 default:
1140 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1141 break;
1145 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1147 TCGLabel *l1 = gen_new_label();
1148 /* Conditional jmp. */
1149 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1150 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1151 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1152 gen_set_label(l1);
1155 static void dec_bcc(DisasContext *dc)
1157 unsigned int cc;
1158 unsigned int dslot;
1160 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1161 dslot = dc->ir & (1 << 25);
1162 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1164 dc->delayed_branch = 1;
1165 if (dslot) {
1166 dc->delayed_branch = 2;
1167 dc->tb_flags |= D_FLAG;
1168 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1169 cpu_env, offsetof(CPUMBState, bimm));
1172 if (dec_alu_op_b_is_small_imm(dc)) {
1173 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1175 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1176 dc->jmp = JMP_DIRECT_CC;
1177 dc->jmp_pc = dc->pc + offset;
1178 } else {
1179 dc->jmp = JMP_INDIRECT;
1180 tcg_gen_movi_tl(env_btarget, dc->pc);
1181 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1183 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1186 static void dec_br(DisasContext *dc)
1188 unsigned int dslot, link, abs, mbar;
1189 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1191 dslot = dc->ir & (1 << 20);
1192 abs = dc->ir & (1 << 19);
1193 link = dc->ir & (1 << 18);
1195 /* Memory barrier. */
1196 mbar = (dc->ir >> 16) & 31;
1197 if (mbar == 2 && dc->imm == 4) {
1198 /* mbar IMM & 16 decodes to sleep. */
1199 if (dc->rd & 16) {
1200 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1201 TCGv_i32 tmp_1 = tcg_const_i32(1);
1203 LOG_DIS("sleep\n");
1205 t_sync_flags(dc);
1206 tcg_gen_st_i32(tmp_1, cpu_env,
1207 -offsetof(MicroBlazeCPU, env)
1208 +offsetof(CPUState, halted));
1209 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1210 gen_helper_raise_exception(cpu_env, tmp_hlt);
1211 tcg_temp_free_i32(tmp_hlt);
1212 tcg_temp_free_i32(tmp_1);
1213 return;
1215 LOG_DIS("mbar %d\n", dc->rd);
1216 /* Break the TB. */
1217 dc->cpustate_changed = 1;
1218 return;
1221 LOG_DIS("br%s%s%s%s imm=%x\n",
1222 abs ? "a" : "", link ? "l" : "",
1223 dc->type_b ? "i" : "", dslot ? "d" : "",
1224 dc->imm);
1226 dc->delayed_branch = 1;
1227 if (dslot) {
1228 dc->delayed_branch = 2;
1229 dc->tb_flags |= D_FLAG;
1230 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1231 cpu_env, offsetof(CPUMBState, bimm));
1233 if (link && dc->rd)
1234 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1236 dc->jmp = JMP_INDIRECT;
1237 if (abs) {
1238 tcg_gen_movi_tl(env_btaken, 1);
1239 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1240 if (link && !dslot) {
1241 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1242 t_gen_raise_exception(dc, EXCP_BREAK);
1243 if (dc->imm == 0) {
1244 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1245 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1246 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1247 return;
1250 t_gen_raise_exception(dc, EXCP_DEBUG);
1253 } else {
1254 if (dec_alu_op_b_is_small_imm(dc)) {
1255 dc->jmp = JMP_DIRECT;
1256 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1257 } else {
1258 tcg_gen_movi_tl(env_btaken, 1);
1259 tcg_gen_movi_tl(env_btarget, dc->pc);
1260 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1265 static inline void do_rti(DisasContext *dc)
1267 TCGv t0, t1;
1268 t0 = tcg_temp_new();
1269 t1 = tcg_temp_new();
1270 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1271 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1272 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1274 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1275 tcg_gen_or_tl(t1, t1, t0);
1276 msr_write(dc, t1);
1277 tcg_temp_free(t1);
1278 tcg_temp_free(t0);
1279 dc->tb_flags &= ~DRTI_FLAG;
1282 static inline void do_rtb(DisasContext *dc)
1284 TCGv t0, t1;
1285 t0 = tcg_temp_new();
1286 t1 = tcg_temp_new();
1287 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1288 tcg_gen_shri_tl(t0, t1, 1);
1289 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1291 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1292 tcg_gen_or_tl(t1, t1, t0);
1293 msr_write(dc, t1);
1294 tcg_temp_free(t1);
1295 tcg_temp_free(t0);
1296 dc->tb_flags &= ~DRTB_FLAG;
1299 static inline void do_rte(DisasContext *dc)
1301 TCGv t0, t1;
1302 t0 = tcg_temp_new();
1303 t1 = tcg_temp_new();
1305 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1306 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1307 tcg_gen_shri_tl(t0, t1, 1);
1308 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1310 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1311 tcg_gen_or_tl(t1, t1, t0);
1312 msr_write(dc, t1);
1313 tcg_temp_free(t1);
1314 tcg_temp_free(t0);
1315 dc->tb_flags &= ~DRTE_FLAG;
1318 static void dec_rts(DisasContext *dc)
1320 unsigned int b_bit, i_bit, e_bit;
1321 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1323 i_bit = dc->ir & (1 << 21);
1324 b_bit = dc->ir & (1 << 22);
1325 e_bit = dc->ir & (1 << 23);
1327 dc->delayed_branch = 2;
1328 dc->tb_flags |= D_FLAG;
1329 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1330 cpu_env, offsetof(CPUMBState, bimm));
1332 if (i_bit) {
1333 LOG_DIS("rtid ir=%x\n", dc->ir);
1334 if ((dc->tb_flags & MSR_EE_FLAG)
1335 && mem_index == MMU_USER_IDX) {
1336 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1337 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1339 dc->tb_flags |= DRTI_FLAG;
1340 } else if (b_bit) {
1341 LOG_DIS("rtbd ir=%x\n", dc->ir);
1342 if ((dc->tb_flags & MSR_EE_FLAG)
1343 && mem_index == MMU_USER_IDX) {
1344 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1345 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1347 dc->tb_flags |= DRTB_FLAG;
1348 } else if (e_bit) {
1349 LOG_DIS("rted ir=%x\n", dc->ir);
1350 if ((dc->tb_flags & MSR_EE_FLAG)
1351 && mem_index == MMU_USER_IDX) {
1352 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1353 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1355 dc->tb_flags |= DRTE_FLAG;
1356 } else
1357 LOG_DIS("rts ir=%x\n", dc->ir);
1359 dc->jmp = JMP_INDIRECT;
1360 tcg_gen_movi_tl(env_btaken, 1);
1361 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1364 static int dec_check_fpuv2(DisasContext *dc)
1366 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1367 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1368 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1370 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1373 static void dec_fpu(DisasContext *dc)
1375 unsigned int fpu_insn;
1377 if ((dc->tb_flags & MSR_EE_FLAG)
1378 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1379 && (dc->cpu->cfg.use_fpu != 1)) {
1380 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1381 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1382 return;
1385 fpu_insn = (dc->ir >> 7) & 7;
1387 switch (fpu_insn) {
1388 case 0:
1389 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1390 cpu_R[dc->rb]);
1391 break;
1393 case 1:
1394 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1395 cpu_R[dc->rb]);
1396 break;
1398 case 2:
1399 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1400 cpu_R[dc->rb]);
1401 break;
1403 case 3:
1404 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1405 cpu_R[dc->rb]);
1406 break;
1408 case 4:
1409 switch ((dc->ir >> 4) & 7) {
1410 case 0:
1411 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1412 cpu_R[dc->ra], cpu_R[dc->rb]);
1413 break;
1414 case 1:
1415 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1416 cpu_R[dc->ra], cpu_R[dc->rb]);
1417 break;
1418 case 2:
1419 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1420 cpu_R[dc->ra], cpu_R[dc->rb]);
1421 break;
1422 case 3:
1423 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1424 cpu_R[dc->ra], cpu_R[dc->rb]);
1425 break;
1426 case 4:
1427 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1428 cpu_R[dc->ra], cpu_R[dc->rb]);
1429 break;
1430 case 5:
1431 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1432 cpu_R[dc->ra], cpu_R[dc->rb]);
1433 break;
1434 case 6:
1435 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1436 cpu_R[dc->ra], cpu_R[dc->rb]);
1437 break;
1438 default:
1439 qemu_log_mask(LOG_UNIMP,
1440 "unimplemented fcmp fpu_insn=%x pc=%x"
1441 " opc=%x\n",
1442 fpu_insn, dc->pc, dc->opcode);
1443 dc->abort_at_next_insn = 1;
1444 break;
1446 break;
1448 case 5:
1449 if (!dec_check_fpuv2(dc)) {
1450 return;
1452 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1453 break;
1455 case 6:
1456 if (!dec_check_fpuv2(dc)) {
1457 return;
1459 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1460 break;
1462 case 7:
1463 if (!dec_check_fpuv2(dc)) {
1464 return;
1466 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1467 break;
1469 default:
1470 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1471 " opc=%x\n",
1472 fpu_insn, dc->pc, dc->opcode);
1473 dc->abort_at_next_insn = 1;
1474 break;
1478 static void dec_null(DisasContext *dc)
1480 if ((dc->tb_flags & MSR_EE_FLAG)
1481 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1482 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1483 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1484 return;
1486 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1487 dc->abort_at_next_insn = 1;
1490 /* Insns connected to FSL or AXI stream attached devices. */
1491 static void dec_stream(DisasContext *dc)
1493 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1494 TCGv_i32 t_id, t_ctrl;
1495 int ctrl;
1497 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1498 dc->type_b ? "" : "d", dc->imm);
1500 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1501 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1502 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1503 return;
1506 t_id = tcg_temp_new();
1507 if (dc->type_b) {
1508 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1509 ctrl = dc->imm >> 10;
1510 } else {
1511 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1512 ctrl = dc->imm >> 5;
1515 t_ctrl = tcg_const_tl(ctrl);
1517 if (dc->rd == 0) {
1518 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1519 } else {
1520 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1522 tcg_temp_free(t_id);
1523 tcg_temp_free(t_ctrl);
1526 static struct decoder_info {
1527 struct {
1528 uint32_t bits;
1529 uint32_t mask;
1531 void (*dec)(DisasContext *dc);
1532 } decinfo[] = {
1533 {DEC_ADD, dec_add},
1534 {DEC_SUB, dec_sub},
1535 {DEC_AND, dec_and},
1536 {DEC_XOR, dec_xor},
1537 {DEC_OR, dec_or},
1538 {DEC_BIT, dec_bit},
1539 {DEC_BARREL, dec_barrel},
1540 {DEC_LD, dec_load},
1541 {DEC_ST, dec_store},
1542 {DEC_IMM, dec_imm},
1543 {DEC_BR, dec_br},
1544 {DEC_BCC, dec_bcc},
1545 {DEC_RTS, dec_rts},
1546 {DEC_FPU, dec_fpu},
1547 {DEC_MUL, dec_mul},
1548 {DEC_DIV, dec_div},
1549 {DEC_MSR, dec_msr},
1550 {DEC_STREAM, dec_stream},
1551 {{0, 0}, dec_null}
1554 static inline void decode(DisasContext *dc, uint32_t ir)
1556 int i;
1558 dc->ir = ir;
1559 LOG_DIS("%8.8x\t", dc->ir);
1561 if (dc->ir)
1562 dc->nr_nops = 0;
1563 else {
1564 if ((dc->tb_flags & MSR_EE_FLAG)
1565 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1566 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1567 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1568 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1569 return;
1572 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1573 dc->nr_nops++;
1574 if (dc->nr_nops > 4) {
1575 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1578 /* bit 2 seems to indicate insn type. */
1579 dc->type_b = ir & (1 << 29);
1581 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1582 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1583 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1584 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1585 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1587 /* Large switch for all insns. */
1588 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1589 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1590 decinfo[i].dec(dc);
1591 break;
1596 /* generate intermediate code for basic block 'tb'. */
1597 void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
1599 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1600 CPUState *cs = CPU(cpu);
1601 uint32_t pc_start;
1602 struct DisasContext ctx;
1603 struct DisasContext *dc = &ctx;
1604 uint32_t next_page_start, org_flags;
1605 target_ulong npc;
1606 int num_insns;
1607 int max_insns;
1609 pc_start = tb->pc;
1610 dc->cpu = cpu;
1611 dc->tb = tb;
1612 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1614 dc->is_jmp = DISAS_NEXT;
1615 dc->jmp = 0;
1616 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1617 if (dc->delayed_branch) {
1618 dc->jmp = JMP_INDIRECT;
1620 dc->pc = pc_start;
1621 dc->singlestep_enabled = cs->singlestep_enabled;
1622 dc->cpustate_changed = 0;
1623 dc->abort_at_next_insn = 0;
1624 dc->nr_nops = 0;
1626 if (pc_start & 3) {
1627 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1630 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1631 num_insns = 0;
1632 max_insns = tb->cflags & CF_COUNT_MASK;
1633 if (max_insns == 0) {
1634 max_insns = CF_COUNT_MASK;
1636 if (max_insns > TCG_MAX_INSNS) {
1637 max_insns = TCG_MAX_INSNS;
1640 gen_tb_start(tb);
1643 tcg_gen_insn_start(dc->pc);
1644 num_insns++;
1646 #if SIM_COMPAT
1647 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1648 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1649 gen_helper_debug();
1651 #endif
1653 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1654 t_gen_raise_exception(dc, EXCP_DEBUG);
1655 dc->is_jmp = DISAS_UPDATE;
1656 /* The address covered by the breakpoint must be included in
1657 [tb->pc, tb->pc + tb->size) in order to for it to be
1658 properly cleared -- thus we increment the PC here so that
1659 the logic setting tb->size below does the right thing. */
1660 dc->pc += 4;
1661 break;
1664 /* Pretty disas. */
1665 LOG_DIS("%8.8x:\t", dc->pc);
1667 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1668 gen_io_start();
1671 dc->clear_imm = 1;
1672 decode(dc, cpu_ldl_code(env, dc->pc));
1673 if (dc->clear_imm)
1674 dc->tb_flags &= ~IMM_FLAG;
1675 dc->pc += 4;
1677 if (dc->delayed_branch) {
1678 dc->delayed_branch--;
1679 if (!dc->delayed_branch) {
1680 if (dc->tb_flags & DRTI_FLAG)
1681 do_rti(dc);
1682 if (dc->tb_flags & DRTB_FLAG)
1683 do_rtb(dc);
1684 if (dc->tb_flags & DRTE_FLAG)
1685 do_rte(dc);
1686 /* Clear the delay slot flag. */
1687 dc->tb_flags &= ~D_FLAG;
1688 /* If it is a direct jump, try direct chaining. */
1689 if (dc->jmp == JMP_INDIRECT) {
1690 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1691 dc->is_jmp = DISAS_JUMP;
1692 } else if (dc->jmp == JMP_DIRECT) {
1693 t_sync_flags(dc);
1694 gen_goto_tb(dc, 0, dc->jmp_pc);
1695 dc->is_jmp = DISAS_TB_JUMP;
1696 } else if (dc->jmp == JMP_DIRECT_CC) {
1697 TCGLabel *l1 = gen_new_label();
1698 t_sync_flags(dc);
1699 /* Conditional jmp. */
1700 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1701 gen_goto_tb(dc, 1, dc->pc);
1702 gen_set_label(l1);
1703 gen_goto_tb(dc, 0, dc->jmp_pc);
1705 dc->is_jmp = DISAS_TB_JUMP;
1707 break;
1710 if (cs->singlestep_enabled) {
1711 break;
1713 } while (!dc->is_jmp && !dc->cpustate_changed
1714 && !tcg_op_buf_full()
1715 && !singlestep
1716 && (dc->pc < next_page_start)
1717 && num_insns < max_insns);
1719 npc = dc->pc;
1720 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1721 if (dc->tb_flags & D_FLAG) {
1722 dc->is_jmp = DISAS_UPDATE;
1723 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1724 sync_jmpstate(dc);
1725 } else
1726 npc = dc->jmp_pc;
1729 if (tb->cflags & CF_LAST_IO)
1730 gen_io_end();
1731 /* Force an update if the per-tb cpu state has changed. */
1732 if (dc->is_jmp == DISAS_NEXT
1733 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1734 dc->is_jmp = DISAS_UPDATE;
1735 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1737 t_sync_flags(dc);
1739 if (unlikely(cs->singlestep_enabled)) {
1740 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1742 if (dc->is_jmp != DISAS_JUMP) {
1743 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1745 gen_helper_raise_exception(cpu_env, tmp);
1746 tcg_temp_free_i32(tmp);
1747 } else {
1748 switch(dc->is_jmp) {
1749 case DISAS_NEXT:
1750 gen_goto_tb(dc, 1, npc);
1751 break;
1752 default:
1753 case DISAS_JUMP:
1754 case DISAS_UPDATE:
1755 /* indicate that the hash table must be used
1756 to find the next TB */
1757 tcg_gen_exit_tb(0);
1758 break;
1759 case DISAS_TB_JUMP:
1760 /* nothing more to generate */
1761 break;
1764 gen_tb_end(tb, num_insns);
1766 tb->size = dc->pc - pc_start;
1767 tb->icount = num_insns;
1769 #ifdef DEBUG_DISAS
1770 #if !SIM_COMPAT
1771 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1772 && qemu_log_in_addr_range(pc_start)) {
1773 qemu_log_lock();
1774 qemu_log("--------------\n");
1775 #if DISAS_GNU
1776 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1777 #endif
1778 qemu_log("\nisize=%d osize=%d\n",
1779 dc->pc - pc_start, tcg_op_buf_count());
1780 qemu_log_unlock();
1782 #endif
1783 #endif
1784 assert(!dc->abort_at_next_insn);
1787 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1788 int flags)
1790 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1791 CPUMBState *env = &cpu->env;
1792 int i;
1794 if (!env || !f)
1795 return;
1797 cpu_fprintf(f, "IN: PC=%x %s\n",
1798 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1799 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1800 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1801 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1802 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1803 env->btaken, env->btarget,
1804 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1805 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1806 (env->sregs[SR_MSR] & MSR_EIP),
1807 (env->sregs[SR_MSR] & MSR_IE));
1809 for (i = 0; i < 32; i++) {
1810 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1811 if ((i + 1) % 4 == 0)
1812 cpu_fprintf(f, "\n");
1814 cpu_fprintf(f, "\n\n");
1817 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1819 MicroBlazeCPU *cpu;
1821 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1823 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1825 return cpu;
1828 void mb_tcg_init(void)
1830 int i;
1832 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1833 tcg_ctx.tcg_env = cpu_env;
1835 env_debug = tcg_global_mem_new(cpu_env,
1836 offsetof(CPUMBState, debug),
1837 "debug0");
1838 env_iflags = tcg_global_mem_new(cpu_env,
1839 offsetof(CPUMBState, iflags),
1840 "iflags");
1841 env_imm = tcg_global_mem_new(cpu_env,
1842 offsetof(CPUMBState, imm),
1843 "imm");
1844 env_btarget = tcg_global_mem_new(cpu_env,
1845 offsetof(CPUMBState, btarget),
1846 "btarget");
1847 env_btaken = tcg_global_mem_new(cpu_env,
1848 offsetof(CPUMBState, btaken),
1849 "btaken");
1850 env_res_addr = tcg_global_mem_new(cpu_env,
1851 offsetof(CPUMBState, res_addr),
1852 "res_addr");
1853 env_res_val = tcg_global_mem_new(cpu_env,
1854 offsetof(CPUMBState, res_val),
1855 "res_val");
1856 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1857 cpu_R[i] = tcg_global_mem_new(cpu_env,
1858 offsetof(CPUMBState, regs[i]),
1859 regnames[i]);
1861 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1862 cpu_SR[i] = tcg_global_mem_new(cpu_env,
1863 offsetof(CPUMBState, sregs[i]),
1864 special_regnames[i]);
1868 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1869 target_ulong *data)
1871 env->sregs[SR_PC] = data[0];