target-microblaze: Break out trap_userspace()
[qemu.git] / target / microblaze / translate.c
blobffdb36cf9478c4f7adb4bb5045556e5bc6bb59fb
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
36 #define SIM_COMPAT 0
37 #define DISAS_GNU 1
38 #define DISAS_MB 1
39 #if DISAS_MB && !SIM_COMPAT
40 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 # define LOG_DIS(...) do { } while (0)
43 #endif
45 #define D(x)
47 #define EXTRACT_FIELD(src, start, end) \
48 (((src) >> start) & ((1 << (end - start + 1)) - 1))
50 /* is_jmp field values */
51 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55 static TCGv_i32 env_debug;
56 static TCGv_i32 cpu_R[32];
57 static TCGv_i32 cpu_SR[14];
58 static TCGv_i32 env_imm;
59 static TCGv_i32 env_btaken;
60 static TCGv_i32 env_btarget;
61 static TCGv_i32 env_iflags;
62 static TCGv env_res_addr;
63 static TCGv_i32 env_res_val;
65 #include "exec/gen-icount.h"
67 /* This is the state at translation time. */
68 typedef struct DisasContext {
69 MicroBlazeCPU *cpu;
70 uint32_t pc;
72 /* Decoder. */
73 int type_b;
74 uint32_t ir;
75 uint8_t opcode;
76 uint8_t rd, ra, rb;
77 uint16_t imm;
79 unsigned int cpustate_changed;
80 unsigned int delayed_branch;
81 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
82 unsigned int clear_imm;
83 int is_jmp;
85 #define JMP_NOJMP 0
86 #define JMP_DIRECT 1
87 #define JMP_DIRECT_CC 2
88 #define JMP_INDIRECT 3
89 unsigned int jmp;
90 uint32_t jmp_pc;
92 int abort_at_next_insn;
93 int nr_nops;
94 struct TranslationBlock *tb;
95 int singlestep_enabled;
96 } DisasContext;
98 static const char *regnames[] =
100 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
106 static const char *special_regnames[] =
108 "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
109 "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
112 static inline void t_sync_flags(DisasContext *dc)
114 /* Synch the tb dependent flags between translator and runtime. */
115 if (dc->tb_flags != dc->synced_flags) {
116 tcg_gen_movi_i32(env_iflags, dc->tb_flags);
117 dc->synced_flags = dc->tb_flags;
121 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
123 TCGv_i32 tmp = tcg_const_i32(index);
125 t_sync_flags(dc);
126 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
127 gen_helper_raise_exception(cpu_env, tmp);
128 tcg_temp_free_i32(tmp);
129 dc->is_jmp = DISAS_UPDATE;
132 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
134 #ifndef CONFIG_USER_ONLY
135 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
136 #else
137 return true;
138 #endif
141 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
143 if (use_goto_tb(dc, dest)) {
144 tcg_gen_goto_tb(n);
145 tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
146 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
147 } else {
148 tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
149 tcg_gen_exit_tb(0);
153 static void read_carry(DisasContext *dc, TCGv_i32 d)
155 tcg_gen_shri_i32(d, cpu_SR[SR_MSR], 31);
159 * write_carry sets the carry bits in MSR based on bit 0 of v.
160 * v[31:1] are ignored.
162 static void write_carry(DisasContext *dc, TCGv_i32 v)
164 TCGv_i32 t0 = tcg_temp_new_i32();
165 tcg_gen_shli_i32(t0, v, 31);
166 tcg_gen_sari_i32(t0, t0, 31);
167 tcg_gen_andi_i32(t0, t0, (MSR_C | MSR_CC));
168 tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
169 ~(MSR_C | MSR_CC));
170 tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
171 tcg_temp_free_i32(t0);
174 static void write_carryi(DisasContext *dc, bool carry)
176 TCGv_i32 t0 = tcg_temp_new_i32();
177 tcg_gen_movi_i32(t0, carry);
178 write_carry(dc, t0);
179 tcg_temp_free_i32(t0);
183 * Returns true if the insn is illegal in userspace.
184 * If exceptions are enabled, an exception is raised.
186 static bool trap_userspace(DisasContext *dc, bool cond)
188 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
189 bool cond_user = cond && mem_index == MMU_USER_IDX;
191 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
192 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
193 t_gen_raise_exception(dc, EXCP_HW_EXCP);
195 return cond_user;
198 /* True if ALU operand b is a small immediate that may deserve
199 faster treatment. */
200 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
202 /* Immediate insn without the imm prefix ? */
203 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
206 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
208 if (dc->type_b) {
209 if (dc->tb_flags & IMM_FLAG)
210 tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
211 else
212 tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
213 return &env_imm;
214 } else
215 return &cpu_R[dc->rb];
218 static void dec_add(DisasContext *dc)
220 unsigned int k, c;
221 TCGv_i32 cf;
223 k = dc->opcode & 4;
224 c = dc->opcode & 2;
226 LOG_DIS("add%s%s%s r%d r%d r%d\n",
227 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
228 dc->rd, dc->ra, dc->rb);
230 /* Take care of the easy cases first. */
231 if (k) {
232 /* k - keep carry, no need to update MSR. */
233 /* If rd == r0, it's a nop. */
234 if (dc->rd) {
235 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
237 if (c) {
238 /* c - Add carry into the result. */
239 cf = tcg_temp_new_i32();
241 read_carry(dc, cf);
242 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
243 tcg_temp_free_i32(cf);
246 return;
249 /* From now on, we can assume k is zero. So we need to update MSR. */
250 /* Extract carry. */
251 cf = tcg_temp_new_i32();
252 if (c) {
253 read_carry(dc, cf);
254 } else {
255 tcg_gen_movi_i32(cf, 0);
258 if (dc->rd) {
259 TCGv_i32 ncf = tcg_temp_new_i32();
260 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
261 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
262 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
263 write_carry(dc, ncf);
264 tcg_temp_free_i32(ncf);
265 } else {
266 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
267 write_carry(dc, cf);
269 tcg_temp_free_i32(cf);
272 static void dec_sub(DisasContext *dc)
274 unsigned int u, cmp, k, c;
275 TCGv_i32 cf, na;
277 u = dc->imm & 2;
278 k = dc->opcode & 4;
279 c = dc->opcode & 2;
280 cmp = (dc->imm & 1) && (!dc->type_b) && k;
282 if (cmp) {
283 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
284 if (dc->rd) {
285 if (u)
286 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
287 else
288 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
290 return;
293 LOG_DIS("sub%s%s r%d, r%d r%d\n",
294 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
296 /* Take care of the easy cases first. */
297 if (k) {
298 /* k - keep carry, no need to update MSR. */
299 /* If rd == r0, it's a nop. */
300 if (dc->rd) {
301 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
303 if (c) {
304 /* c - Add carry into the result. */
305 cf = tcg_temp_new_i32();
307 read_carry(dc, cf);
308 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
309 tcg_temp_free_i32(cf);
312 return;
315 /* From now on, we can assume k is zero. So we need to update MSR. */
316 /* Extract carry. And complement a into na. */
317 cf = tcg_temp_new_i32();
318 na = tcg_temp_new_i32();
319 if (c) {
320 read_carry(dc, cf);
321 } else {
322 tcg_gen_movi_i32(cf, 1);
325 /* d = b + ~a + c. carry defaults to 1. */
326 tcg_gen_not_i32(na, cpu_R[dc->ra]);
328 if (dc->rd) {
329 TCGv_i32 ncf = tcg_temp_new_i32();
330 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
331 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
332 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
333 write_carry(dc, ncf);
334 tcg_temp_free_i32(ncf);
335 } else {
336 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
337 write_carry(dc, cf);
339 tcg_temp_free_i32(cf);
340 tcg_temp_free_i32(na);
343 static void dec_pattern(DisasContext *dc)
345 unsigned int mode;
347 if ((dc->tb_flags & MSR_EE_FLAG)
348 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
349 && !dc->cpu->cfg.use_pcmp_instr) {
350 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
351 t_gen_raise_exception(dc, EXCP_HW_EXCP);
354 mode = dc->opcode & 3;
355 switch (mode) {
356 case 0:
357 /* pcmpbf. */
358 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
359 if (dc->rd)
360 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
361 break;
362 case 2:
363 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
364 if (dc->rd) {
365 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
366 cpu_R[dc->ra], cpu_R[dc->rb]);
368 break;
369 case 3:
370 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
371 if (dc->rd) {
372 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
373 cpu_R[dc->ra], cpu_R[dc->rb]);
375 break;
376 default:
377 cpu_abort(CPU(dc->cpu),
378 "unsupported pattern insn opcode=%x\n", dc->opcode);
379 break;
383 static void dec_and(DisasContext *dc)
385 unsigned int not;
387 if (!dc->type_b && (dc->imm & (1 << 10))) {
388 dec_pattern(dc);
389 return;
392 not = dc->opcode & (1 << 1);
393 LOG_DIS("and%s\n", not ? "n" : "");
395 if (!dc->rd)
396 return;
398 if (not) {
399 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
400 } else
401 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
404 static void dec_or(DisasContext *dc)
406 if (!dc->type_b && (dc->imm & (1 << 10))) {
407 dec_pattern(dc);
408 return;
411 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
412 if (dc->rd)
413 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
416 static void dec_xor(DisasContext *dc)
418 if (!dc->type_b && (dc->imm & (1 << 10))) {
419 dec_pattern(dc);
420 return;
423 LOG_DIS("xor r%d\n", dc->rd);
424 if (dc->rd)
425 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
428 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
430 tcg_gen_mov_i32(d, cpu_SR[SR_MSR]);
433 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
435 TCGv_i32 t;
437 t = tcg_temp_new_i32();
438 dc->cpustate_changed = 1;
439 /* PVR bit is not writable. */
440 tcg_gen_andi_i32(t, v, ~MSR_PVR);
441 tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
442 tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
443 tcg_temp_free(t);
446 static void dec_msr(DisasContext *dc)
448 CPUState *cs = CPU(dc->cpu);
449 TCGv_i32 t0, t1;
450 unsigned int sr, to, rn;
452 sr = dc->imm & ((1 << 14) - 1);
453 to = dc->imm & (1 << 14);
454 dc->type_b = 1;
455 if (to)
456 dc->cpustate_changed = 1;
458 /* msrclr and msrset. */
459 if (!(dc->imm & (1 << 15))) {
460 unsigned int clr = dc->ir & (1 << 16);
462 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
463 dc->rd, dc->imm);
465 if (!dc->cpu->cfg.use_msr_instr) {
466 /* nop??? */
467 return;
470 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
471 return;
474 if (dc->rd)
475 msr_read(dc, cpu_R[dc->rd]);
477 t0 = tcg_temp_new_i32();
478 t1 = tcg_temp_new_i32();
479 msr_read(dc, t0);
480 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
482 if (clr) {
483 tcg_gen_not_i32(t1, t1);
484 tcg_gen_and_i32(t0, t0, t1);
485 } else
486 tcg_gen_or_i32(t0, t0, t1);
487 msr_write(dc, t0);
488 tcg_temp_free_i32(t0);
489 tcg_temp_free_i32(t1);
490 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
491 dc->is_jmp = DISAS_UPDATE;
492 return;
495 if (trap_userspace(dc, to)) {
496 return;
499 #if !defined(CONFIG_USER_ONLY)
500 /* Catch read/writes to the mmu block. */
501 if ((sr & ~0xff) == 0x1000) {
502 sr &= 7;
503 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
504 if (to)
505 gen_helper_mmu_write(cpu_env, tcg_const_i32(sr), cpu_R[dc->ra]);
506 else
507 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_i32(sr));
508 return;
510 #endif
512 if (to) {
513 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
514 switch (sr) {
515 case 0:
516 break;
517 case 1:
518 msr_write(dc, cpu_R[dc->ra]);
519 break;
520 case 0x3:
521 tcg_gen_mov_i32(cpu_SR[SR_EAR], cpu_R[dc->ra]);
522 break;
523 case 0x5:
524 tcg_gen_mov_i32(cpu_SR[SR_ESR], cpu_R[dc->ra]);
525 break;
526 case 0x7:
527 tcg_gen_andi_i32(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
528 break;
529 case 0x800:
530 tcg_gen_st_i32(cpu_R[dc->ra],
531 cpu_env, offsetof(CPUMBState, slr));
532 break;
533 case 0x802:
534 tcg_gen_st_i32(cpu_R[dc->ra],
535 cpu_env, offsetof(CPUMBState, shr));
536 break;
537 default:
538 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
539 break;
541 } else {
542 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
544 switch (sr) {
545 case 0:
546 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
547 break;
548 case 1:
549 msr_read(dc, cpu_R[dc->rd]);
550 break;
551 case 0x3:
552 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_EAR]);
553 break;
554 case 0x5:
555 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_ESR]);
556 break;
557 case 0x7:
558 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_FSR]);
559 break;
560 case 0xb:
561 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_BTR]);
562 break;
563 case 0x800:
564 tcg_gen_ld_i32(cpu_R[dc->rd],
565 cpu_env, offsetof(CPUMBState, slr));
566 break;
567 case 0x802:
568 tcg_gen_ld_i32(cpu_R[dc->rd],
569 cpu_env, offsetof(CPUMBState, shr));
570 break;
571 case 0x2000:
572 case 0x2001:
573 case 0x2002:
574 case 0x2003:
575 case 0x2004:
576 case 0x2005:
577 case 0x2006:
578 case 0x2007:
579 case 0x2008:
580 case 0x2009:
581 case 0x200a:
582 case 0x200b:
583 case 0x200c:
584 rn = sr & 0xf;
585 tcg_gen_ld_i32(cpu_R[dc->rd],
586 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
587 break;
588 default:
589 cpu_abort(cs, "unknown mfs reg %x\n", sr);
590 break;
594 if (dc->rd == 0) {
595 tcg_gen_movi_i32(cpu_R[0], 0);
599 /* Multiplier unit. */
600 static void dec_mul(DisasContext *dc)
602 TCGv_i32 tmp;
603 unsigned int subcode;
605 if ((dc->tb_flags & MSR_EE_FLAG)
606 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
607 && !dc->cpu->cfg.use_hw_mul) {
608 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
609 t_gen_raise_exception(dc, EXCP_HW_EXCP);
610 return;
613 subcode = dc->imm & 3;
615 if (dc->type_b) {
616 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
617 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
618 return;
621 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
622 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
623 /* nop??? */
626 tmp = tcg_temp_new_i32();
627 switch (subcode) {
628 case 0:
629 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
630 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
631 break;
632 case 1:
633 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
634 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
635 cpu_R[dc->ra], cpu_R[dc->rb]);
636 break;
637 case 2:
638 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
639 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
640 cpu_R[dc->ra], cpu_R[dc->rb]);
641 break;
642 case 3:
643 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
644 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
645 break;
646 default:
647 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
648 break;
650 tcg_temp_free_i32(tmp);
653 /* Div unit. */
654 static void dec_div(DisasContext *dc)
656 unsigned int u;
658 u = dc->imm & 2;
659 LOG_DIS("div\n");
661 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
662 && !dc->cpu->cfg.use_div) {
663 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
664 t_gen_raise_exception(dc, EXCP_HW_EXCP);
667 if (u)
668 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
669 cpu_R[dc->ra]);
670 else
671 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
672 cpu_R[dc->ra]);
673 if (!dc->rd)
674 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
677 static void dec_barrel(DisasContext *dc)
679 TCGv_i32 t0;
680 unsigned int imm_w, imm_s;
681 bool s, t, e = false, i = false;
683 if ((dc->tb_flags & MSR_EE_FLAG)
684 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
685 && !dc->cpu->cfg.use_barrel) {
686 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
687 t_gen_raise_exception(dc, EXCP_HW_EXCP);
688 return;
691 if (dc->type_b) {
692 /* Insert and extract are only available in immediate mode. */
693 i = extract32(dc->imm, 15, 1);
694 e = extract32(dc->imm, 14, 1);
696 s = extract32(dc->imm, 10, 1);
697 t = extract32(dc->imm, 9, 1);
698 imm_w = extract32(dc->imm, 6, 5);
699 imm_s = extract32(dc->imm, 0, 5);
701 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
702 e ? "e" : "",
703 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
705 if (e) {
706 if (imm_w + imm_s > 32 || imm_w == 0) {
707 /* These inputs have an undefined behavior. */
708 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
709 imm_w, imm_s);
710 } else {
711 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
713 } else if (i) {
714 int width = imm_w - imm_s + 1;
716 if (imm_w < imm_s) {
717 /* These inputs have an undefined behavior. */
718 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
719 imm_w, imm_s);
720 } else {
721 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
722 imm_s, width);
724 } else {
725 t0 = tcg_temp_new_i32();
727 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
728 tcg_gen_andi_i32(t0, t0, 31);
730 if (s) {
731 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
732 } else {
733 if (t) {
734 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
735 } else {
736 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
739 tcg_temp_free_i32(t0);
743 static void dec_bit(DisasContext *dc)
745 CPUState *cs = CPU(dc->cpu);
746 TCGv_i32 t0;
747 unsigned int op;
749 op = dc->ir & ((1 << 9) - 1);
750 switch (op) {
751 case 0x21:
752 /* src. */
753 t0 = tcg_temp_new_i32();
755 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
756 tcg_gen_andi_i32(t0, cpu_SR[SR_MSR], MSR_CC);
757 write_carry(dc, cpu_R[dc->ra]);
758 if (dc->rd) {
759 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
760 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
762 tcg_temp_free_i32(t0);
763 break;
765 case 0x1:
766 case 0x41:
767 /* srl. */
768 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
770 /* Update carry. Note that write carry only looks at the LSB. */
771 write_carry(dc, cpu_R[dc->ra]);
772 if (dc->rd) {
773 if (op == 0x41)
774 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
775 else
776 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
778 break;
779 case 0x60:
780 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
781 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
782 break;
783 case 0x61:
784 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
785 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
786 break;
787 case 0x64:
788 case 0x66:
789 case 0x74:
790 case 0x76:
791 /* wdc. */
792 LOG_DIS("wdc r%d\n", dc->ra);
793 trap_userspace(dc, true);
794 break;
795 case 0x68:
796 /* wic. */
797 LOG_DIS("wic r%d\n", dc->ra);
798 trap_userspace(dc, true);
799 break;
800 case 0xe0:
801 if ((dc->tb_flags & MSR_EE_FLAG)
802 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
803 && !dc->cpu->cfg.use_pcmp_instr) {
804 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
805 t_gen_raise_exception(dc, EXCP_HW_EXCP);
807 if (dc->cpu->cfg.use_pcmp_instr) {
808 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
810 break;
811 case 0x1e0:
812 /* swapb */
813 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
814 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
815 break;
816 case 0x1e2:
817 /*swaph */
818 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
819 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
820 break;
821 default:
822 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
823 dc->pc, op, dc->rd, dc->ra, dc->rb);
824 break;
828 static inline void sync_jmpstate(DisasContext *dc)
830 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
831 if (dc->jmp == JMP_DIRECT) {
832 tcg_gen_movi_i32(env_btaken, 1);
834 dc->jmp = JMP_INDIRECT;
835 tcg_gen_movi_i32(env_btarget, dc->jmp_pc);
839 static void dec_imm(DisasContext *dc)
841 LOG_DIS("imm %x\n", dc->imm << 16);
842 tcg_gen_movi_i32(env_imm, (dc->imm << 16));
843 dc->tb_flags |= IMM_FLAG;
844 dc->clear_imm = 0;
847 static inline void compute_ldst_addr(DisasContext *dc, TCGv t)
849 bool extimm = dc->tb_flags & IMM_FLAG;
850 /* Should be set to true if r1 is used by loadstores. */
851 bool stackprot = false;
852 TCGv_i32 t32;
854 /* All load/stores use ra. */
855 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
856 stackprot = true;
859 /* Treat the common cases first. */
860 if (!dc->type_b) {
861 /* If any of the regs is r0, set t to the value of the other reg. */
862 if (dc->ra == 0) {
863 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
864 return;
865 } else if (dc->rb == 0) {
866 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
867 return;
870 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
871 stackprot = true;
874 t32 = tcg_temp_new_i32();
875 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
876 tcg_gen_extu_i32_tl(t, t32);
877 tcg_temp_free_i32(t32);
879 if (stackprot) {
880 gen_helper_stackprot(cpu_env, t);
882 return;
884 /* Immediate. */
885 t32 = tcg_temp_new_i32();
886 if (!extimm) {
887 if (dc->imm == 0) {
888 tcg_gen_mov_i32(t32, cpu_R[dc->ra]);
889 } else {
890 tcg_gen_movi_i32(t32, (int32_t)((int16_t)dc->imm));
891 tcg_gen_add_i32(t32, cpu_R[dc->ra], t32);
893 } else {
894 tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
896 tcg_gen_extu_i32_tl(t, t32);
897 tcg_temp_free_i32(t32);
899 if (stackprot) {
900 gen_helper_stackprot(cpu_env, t);
902 return;
905 static void dec_load(DisasContext *dc)
907 TCGv_i32 v;
908 TCGv addr;
909 unsigned int size;
910 bool rev = false, ex = false;
911 TCGMemOp mop;
913 mop = dc->opcode & 3;
914 size = 1 << mop;
915 if (!dc->type_b) {
916 rev = extract32(dc->ir, 9, 1);
917 ex = extract32(dc->ir, 10, 1);
919 mop |= MO_TE;
920 if (rev) {
921 mop ^= MO_BSWAP;
924 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
925 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
926 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
927 t_gen_raise_exception(dc, EXCP_HW_EXCP);
928 return;
931 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
932 ex ? "x" : "");
934 t_sync_flags(dc);
935 addr = tcg_temp_new();
936 compute_ldst_addr(dc, addr);
939 * When doing reverse accesses we need to do two things.
941 * 1. Reverse the address wrt endianness.
942 * 2. Byteswap the data lanes on the way back into the CPU core.
944 if (rev && size != 4) {
945 /* Endian reverse the address. t is addr. */
946 switch (size) {
947 case 1:
949 /* 00 -> 11
950 01 -> 10
951 10 -> 10
952 11 -> 00 */
953 TCGv low = tcg_temp_new();
955 tcg_gen_andi_tl(low, addr, 3);
956 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
957 tcg_gen_andi_tl(addr, addr, ~3);
958 tcg_gen_or_tl(addr, addr, low);
959 tcg_temp_free(low);
960 break;
963 case 2:
964 /* 00 -> 10
965 10 -> 00. */
966 tcg_gen_xori_tl(addr, addr, 2);
967 break;
968 default:
969 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
970 break;
974 /* lwx does not throw unaligned access errors, so force alignment */
975 if (ex) {
976 tcg_gen_andi_tl(addr, addr, ~3);
979 /* If we get a fault on a dslot, the jmpstate better be in sync. */
980 sync_jmpstate(dc);
982 /* Verify alignment if needed. */
984 * Microblaze gives MMU faults priority over faults due to
985 * unaligned addresses. That's why we speculatively do the load
986 * into v. If the load succeeds, we verify alignment of the
987 * address and if that succeeds we write into the destination reg.
989 v = tcg_temp_new_i32();
990 tcg_gen_qemu_ld_i32(v, addr, cpu_mmu_index(&dc->cpu->env, false), mop);
992 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
993 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
994 gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
995 tcg_const_i32(0), tcg_const_i32(size - 1));
998 if (ex) {
999 tcg_gen_mov_tl(env_res_addr, addr);
1000 tcg_gen_mov_i32(env_res_val, v);
1002 if (dc->rd) {
1003 tcg_gen_mov_i32(cpu_R[dc->rd], v);
1005 tcg_temp_free_i32(v);
1007 if (ex) { /* lwx */
1008 /* no support for AXI exclusive so always clear C */
1009 write_carryi(dc, 0);
1012 tcg_temp_free(addr);
1015 static void dec_store(DisasContext *dc)
1017 TCGv addr;
1018 TCGLabel *swx_skip = NULL;
1019 unsigned int size;
1020 bool rev = false, ex = false;
1021 TCGMemOp mop;
1023 mop = dc->opcode & 3;
1024 size = 1 << mop;
1025 if (!dc->type_b) {
1026 rev = extract32(dc->ir, 9, 1);
1027 ex = extract32(dc->ir, 10, 1);
1029 mop |= MO_TE;
1030 if (rev) {
1031 mop ^= MO_BSWAP;
1034 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1035 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1036 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1037 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1038 return;
1041 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1042 ex ? "x" : "");
1043 t_sync_flags(dc);
1044 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1045 sync_jmpstate(dc);
1046 /* SWX needs a temp_local. */
1047 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1048 compute_ldst_addr(dc, addr);
1050 if (ex) { /* swx */
1051 TCGv_i32 tval;
1053 /* swx does not throw unaligned access errors, so force alignment */
1054 tcg_gen_andi_tl(addr, addr, ~3);
1056 write_carryi(dc, 1);
1057 swx_skip = gen_new_label();
1058 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1060 /* Compare the value loaded at lwx with current contents of
1061 the reserved location.
1062 FIXME: This only works for system emulation where we can expect
1063 this compare and the following write to be atomic. For user
1064 emulation we need to add atomicity between threads. */
1065 tval = tcg_temp_new_i32();
1066 tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
1067 MO_TEUL);
1068 tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1069 write_carryi(dc, 0);
1070 tcg_temp_free_i32(tval);
1073 if (rev && size != 4) {
1074 /* Endian reverse the address. t is addr. */
1075 switch (size) {
1076 case 1:
1078 /* 00 -> 11
1079 01 -> 10
1080 10 -> 10
1081 11 -> 00 */
1082 TCGv low = tcg_temp_new();
1084 tcg_gen_andi_tl(low, addr, 3);
1085 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1086 tcg_gen_andi_tl(addr, addr, ~3);
1087 tcg_gen_or_tl(addr, addr, low);
1088 tcg_temp_free(low);
1089 break;
1092 case 2:
1093 /* 00 -> 10
1094 10 -> 00. */
1095 /* Force addr into the temp. */
1096 tcg_gen_xori_tl(addr, addr, 2);
1097 break;
1098 default:
1099 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1100 break;
1103 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr,
1104 cpu_mmu_index(&dc->cpu->env, false), mop);
1106 /* Verify alignment if needed. */
1107 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1108 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
1109 /* FIXME: if the alignment is wrong, we should restore the value
1110 * in memory. One possible way to achieve this is to probe
1111 * the MMU prior to the memaccess, thay way we could put
1112 * the alignment checks in between the probe and the mem
1113 * access.
1115 gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
1116 tcg_const_i32(1), tcg_const_i32(size - 1));
1119 if (ex) {
1120 gen_set_label(swx_skip);
1123 tcg_temp_free(addr);
1126 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1127 TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1129 switch (cc) {
1130 case CC_EQ:
1131 tcg_gen_setcond_i32(TCG_COND_EQ, d, a, b);
1132 break;
1133 case CC_NE:
1134 tcg_gen_setcond_i32(TCG_COND_NE, d, a, b);
1135 break;
1136 case CC_LT:
1137 tcg_gen_setcond_i32(TCG_COND_LT, d, a, b);
1138 break;
1139 case CC_LE:
1140 tcg_gen_setcond_i32(TCG_COND_LE, d, a, b);
1141 break;
1142 case CC_GE:
1143 tcg_gen_setcond_i32(TCG_COND_GE, d, a, b);
1144 break;
1145 case CC_GT:
1146 tcg_gen_setcond_i32(TCG_COND_GT, d, a, b);
1147 break;
1148 default:
1149 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1150 break;
1154 static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
1156 TCGLabel *l1 = gen_new_label();
1157 /* Conditional jmp. */
1158 tcg_gen_mov_i32(cpu_SR[SR_PC], pc_false);
1159 tcg_gen_brcondi_i32(TCG_COND_EQ, env_btaken, 0, l1);
1160 tcg_gen_mov_i32(cpu_SR[SR_PC], pc_true);
1161 gen_set_label(l1);
1164 static void dec_bcc(DisasContext *dc)
1166 unsigned int cc;
1167 unsigned int dslot;
1169 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1170 dslot = dc->ir & (1 << 25);
1171 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1173 dc->delayed_branch = 1;
1174 if (dslot) {
1175 dc->delayed_branch = 2;
1176 dc->tb_flags |= D_FLAG;
1177 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1178 cpu_env, offsetof(CPUMBState, bimm));
1181 if (dec_alu_op_b_is_small_imm(dc)) {
1182 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1184 tcg_gen_movi_i32(env_btarget, dc->pc + offset);
1185 dc->jmp = JMP_DIRECT_CC;
1186 dc->jmp_pc = dc->pc + offset;
1187 } else {
1188 dc->jmp = JMP_INDIRECT;
1189 tcg_gen_movi_i32(env_btarget, dc->pc);
1190 tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1192 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_i32(0));
1195 static void dec_br(DisasContext *dc)
1197 unsigned int dslot, link, abs, mbar;
1199 dslot = dc->ir & (1 << 20);
1200 abs = dc->ir & (1 << 19);
1201 link = dc->ir & (1 << 18);
1203 /* Memory barrier. */
1204 mbar = (dc->ir >> 16) & 31;
1205 if (mbar == 2 && dc->imm == 4) {
1206 /* mbar IMM & 16 decodes to sleep. */
1207 if (dc->rd & 16) {
1208 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1209 TCGv_i32 tmp_1 = tcg_const_i32(1);
1211 LOG_DIS("sleep\n");
1213 t_sync_flags(dc);
1214 tcg_gen_st_i32(tmp_1, cpu_env,
1215 -offsetof(MicroBlazeCPU, env)
1216 +offsetof(CPUState, halted));
1217 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
1218 gen_helper_raise_exception(cpu_env, tmp_hlt);
1219 tcg_temp_free_i32(tmp_hlt);
1220 tcg_temp_free_i32(tmp_1);
1221 return;
1223 LOG_DIS("mbar %d\n", dc->rd);
1224 /* Break the TB. */
1225 dc->cpustate_changed = 1;
1226 return;
1229 LOG_DIS("br%s%s%s%s imm=%x\n",
1230 abs ? "a" : "", link ? "l" : "",
1231 dc->type_b ? "i" : "", dslot ? "d" : "",
1232 dc->imm);
1234 dc->delayed_branch = 1;
1235 if (dslot) {
1236 dc->delayed_branch = 2;
1237 dc->tb_flags |= D_FLAG;
1238 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1239 cpu_env, offsetof(CPUMBState, bimm));
1241 if (link && dc->rd)
1242 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1244 dc->jmp = JMP_INDIRECT;
1245 if (abs) {
1246 tcg_gen_movi_i32(env_btaken, 1);
1247 tcg_gen_mov_i32(env_btarget, *(dec_alu_op_b(dc)));
1248 if (link && !dslot) {
1249 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1250 t_gen_raise_exception(dc, EXCP_BREAK);
1251 if (dc->imm == 0) {
1252 if (trap_userspace(dc, true)) {
1253 return;
1256 t_gen_raise_exception(dc, EXCP_DEBUG);
1259 } else {
1260 if (dec_alu_op_b_is_small_imm(dc)) {
1261 dc->jmp = JMP_DIRECT;
1262 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1263 } else {
1264 tcg_gen_movi_i32(env_btaken, 1);
1265 tcg_gen_movi_i32(env_btarget, dc->pc);
1266 tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1271 static inline void do_rti(DisasContext *dc)
1273 TCGv_i32 t0, t1;
1274 t0 = tcg_temp_new_i32();
1275 t1 = tcg_temp_new_i32();
1276 tcg_gen_shri_i32(t0, cpu_SR[SR_MSR], 1);
1277 tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_IE);
1278 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1280 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1281 tcg_gen_or_i32(t1, t1, t0);
1282 msr_write(dc, t1);
1283 tcg_temp_free_i32(t1);
1284 tcg_temp_free_i32(t0);
1285 dc->tb_flags &= ~DRTI_FLAG;
1288 static inline void do_rtb(DisasContext *dc)
1290 TCGv_i32 t0, t1;
1291 t0 = tcg_temp_new_i32();
1292 t1 = tcg_temp_new_i32();
1293 tcg_gen_andi_i32(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1294 tcg_gen_shri_i32(t0, t1, 1);
1295 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1297 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1298 tcg_gen_or_i32(t1, t1, t0);
1299 msr_write(dc, t1);
1300 tcg_temp_free_i32(t1);
1301 tcg_temp_free_i32(t0);
1302 dc->tb_flags &= ~DRTB_FLAG;
1305 static inline void do_rte(DisasContext *dc)
1307 TCGv_i32 t0, t1;
1308 t0 = tcg_temp_new_i32();
1309 t1 = tcg_temp_new_i32();
1311 tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_EE);
1312 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1313 tcg_gen_shri_i32(t0, t1, 1);
1314 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1316 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1317 tcg_gen_or_i32(t1, t1, t0);
1318 msr_write(dc, t1);
1319 tcg_temp_free_i32(t1);
1320 tcg_temp_free_i32(t0);
1321 dc->tb_flags &= ~DRTE_FLAG;
1324 static void dec_rts(DisasContext *dc)
1326 unsigned int b_bit, i_bit, e_bit;
1328 i_bit = dc->ir & (1 << 21);
1329 b_bit = dc->ir & (1 << 22);
1330 e_bit = dc->ir & (1 << 23);
1332 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1333 return;
1336 dc->delayed_branch = 2;
1337 dc->tb_flags |= D_FLAG;
1338 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1339 cpu_env, offsetof(CPUMBState, bimm));
1341 if (i_bit) {
1342 LOG_DIS("rtid ir=%x\n", dc->ir);
1343 dc->tb_flags |= DRTI_FLAG;
1344 } else if (b_bit) {
1345 LOG_DIS("rtbd ir=%x\n", dc->ir);
1346 dc->tb_flags |= DRTB_FLAG;
1347 } else if (e_bit) {
1348 LOG_DIS("rted ir=%x\n", dc->ir);
1349 dc->tb_flags |= DRTE_FLAG;
1350 } else
1351 LOG_DIS("rts ir=%x\n", dc->ir);
1353 dc->jmp = JMP_INDIRECT;
1354 tcg_gen_movi_i32(env_btaken, 1);
1355 tcg_gen_add_i32(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1358 static int dec_check_fpuv2(DisasContext *dc)
1360 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1361 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_FPU);
1362 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1364 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1367 static void dec_fpu(DisasContext *dc)
1369 unsigned int fpu_insn;
1371 if ((dc->tb_flags & MSR_EE_FLAG)
1372 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1373 && !dc->cpu->cfg.use_fpu) {
1374 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1375 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1376 return;
1379 fpu_insn = (dc->ir >> 7) & 7;
1381 switch (fpu_insn) {
1382 case 0:
1383 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1384 cpu_R[dc->rb]);
1385 break;
1387 case 1:
1388 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1389 cpu_R[dc->rb]);
1390 break;
1392 case 2:
1393 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1394 cpu_R[dc->rb]);
1395 break;
1397 case 3:
1398 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1399 cpu_R[dc->rb]);
1400 break;
1402 case 4:
1403 switch ((dc->ir >> 4) & 7) {
1404 case 0:
1405 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1406 cpu_R[dc->ra], cpu_R[dc->rb]);
1407 break;
1408 case 1:
1409 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1410 cpu_R[dc->ra], cpu_R[dc->rb]);
1411 break;
1412 case 2:
1413 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1414 cpu_R[dc->ra], cpu_R[dc->rb]);
1415 break;
1416 case 3:
1417 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1418 cpu_R[dc->ra], cpu_R[dc->rb]);
1419 break;
1420 case 4:
1421 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1422 cpu_R[dc->ra], cpu_R[dc->rb]);
1423 break;
1424 case 5:
1425 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1426 cpu_R[dc->ra], cpu_R[dc->rb]);
1427 break;
1428 case 6:
1429 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1430 cpu_R[dc->ra], cpu_R[dc->rb]);
1431 break;
1432 default:
1433 qemu_log_mask(LOG_UNIMP,
1434 "unimplemented fcmp fpu_insn=%x pc=%x"
1435 " opc=%x\n",
1436 fpu_insn, dc->pc, dc->opcode);
1437 dc->abort_at_next_insn = 1;
1438 break;
1440 break;
1442 case 5:
1443 if (!dec_check_fpuv2(dc)) {
1444 return;
1446 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1447 break;
1449 case 6:
1450 if (!dec_check_fpuv2(dc)) {
1451 return;
1453 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1454 break;
1456 case 7:
1457 if (!dec_check_fpuv2(dc)) {
1458 return;
1460 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1461 break;
1463 default:
1464 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1465 " opc=%x\n",
1466 fpu_insn, dc->pc, dc->opcode);
1467 dc->abort_at_next_insn = 1;
1468 break;
1472 static void dec_null(DisasContext *dc)
1474 if ((dc->tb_flags & MSR_EE_FLAG)
1475 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1476 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1477 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1478 return;
1480 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1481 dc->abort_at_next_insn = 1;
1484 /* Insns connected to FSL or AXI stream attached devices. */
1485 static void dec_stream(DisasContext *dc)
1487 TCGv_i32 t_id, t_ctrl;
1488 int ctrl;
1490 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1491 dc->type_b ? "" : "d", dc->imm);
1493 if (trap_userspace(dc, true)) {
1494 return;
1497 t_id = tcg_temp_new_i32();
1498 if (dc->type_b) {
1499 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1500 ctrl = dc->imm >> 10;
1501 } else {
1502 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1503 ctrl = dc->imm >> 5;
1506 t_ctrl = tcg_const_i32(ctrl);
1508 if (dc->rd == 0) {
1509 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1510 } else {
1511 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1513 tcg_temp_free_i32(t_id);
1514 tcg_temp_free_i32(t_ctrl);
1517 static struct decoder_info {
1518 struct {
1519 uint32_t bits;
1520 uint32_t mask;
1522 void (*dec)(DisasContext *dc);
1523 } decinfo[] = {
1524 {DEC_ADD, dec_add},
1525 {DEC_SUB, dec_sub},
1526 {DEC_AND, dec_and},
1527 {DEC_XOR, dec_xor},
1528 {DEC_OR, dec_or},
1529 {DEC_BIT, dec_bit},
1530 {DEC_BARREL, dec_barrel},
1531 {DEC_LD, dec_load},
1532 {DEC_ST, dec_store},
1533 {DEC_IMM, dec_imm},
1534 {DEC_BR, dec_br},
1535 {DEC_BCC, dec_bcc},
1536 {DEC_RTS, dec_rts},
1537 {DEC_FPU, dec_fpu},
1538 {DEC_MUL, dec_mul},
1539 {DEC_DIV, dec_div},
1540 {DEC_MSR, dec_msr},
1541 {DEC_STREAM, dec_stream},
1542 {{0, 0}, dec_null}
1545 static inline void decode(DisasContext *dc, uint32_t ir)
1547 int i;
1549 dc->ir = ir;
1550 LOG_DIS("%8.8x\t", dc->ir);
1552 if (dc->ir)
1553 dc->nr_nops = 0;
1554 else {
1555 if ((dc->tb_flags & MSR_EE_FLAG)
1556 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1557 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1558 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1559 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1560 return;
1563 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1564 dc->nr_nops++;
1565 if (dc->nr_nops > 4) {
1566 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1569 /* bit 2 seems to indicate insn type. */
1570 dc->type_b = ir & (1 << 29);
1572 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1573 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1574 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1575 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1576 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1578 /* Large switch for all insns. */
1579 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1580 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1581 decinfo[i].dec(dc);
1582 break;
1587 /* generate intermediate code for basic block 'tb'. */
1588 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1590 CPUMBState *env = cs->env_ptr;
1591 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1592 uint32_t pc_start;
1593 struct DisasContext ctx;
1594 struct DisasContext *dc = &ctx;
1595 uint32_t page_start, org_flags;
1596 uint32_t npc;
1597 int num_insns;
1598 int max_insns;
1600 pc_start = tb->pc;
1601 dc->cpu = cpu;
1602 dc->tb = tb;
1603 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1605 dc->is_jmp = DISAS_NEXT;
1606 dc->jmp = 0;
1607 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1608 if (dc->delayed_branch) {
1609 dc->jmp = JMP_INDIRECT;
1611 dc->pc = pc_start;
1612 dc->singlestep_enabled = cs->singlestep_enabled;
1613 dc->cpustate_changed = 0;
1614 dc->abort_at_next_insn = 0;
1615 dc->nr_nops = 0;
1617 if (pc_start & 3) {
1618 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1621 page_start = pc_start & TARGET_PAGE_MASK;
1622 num_insns = 0;
1623 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
1624 if (max_insns == 0) {
1625 max_insns = CF_COUNT_MASK;
1627 if (max_insns > TCG_MAX_INSNS) {
1628 max_insns = TCG_MAX_INSNS;
1631 gen_tb_start(tb);
1634 tcg_gen_insn_start(dc->pc);
1635 num_insns++;
1637 #if SIM_COMPAT
1638 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1639 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
1640 gen_helper_debug();
1642 #endif
1644 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1645 t_gen_raise_exception(dc, EXCP_DEBUG);
1646 dc->is_jmp = DISAS_UPDATE;
1647 /* The address covered by the breakpoint must be included in
1648 [tb->pc, tb->pc + tb->size) in order to for it to be
1649 properly cleared -- thus we increment the PC here so that
1650 the logic setting tb->size below does the right thing. */
1651 dc->pc += 4;
1652 break;
1655 /* Pretty disas. */
1656 LOG_DIS("%8.8x:\t", dc->pc);
1658 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1659 gen_io_start();
1662 dc->clear_imm = 1;
1663 decode(dc, cpu_ldl_code(env, dc->pc));
1664 if (dc->clear_imm)
1665 dc->tb_flags &= ~IMM_FLAG;
1666 dc->pc += 4;
1668 if (dc->delayed_branch) {
1669 dc->delayed_branch--;
1670 if (!dc->delayed_branch) {
1671 if (dc->tb_flags & DRTI_FLAG)
1672 do_rti(dc);
1673 if (dc->tb_flags & DRTB_FLAG)
1674 do_rtb(dc);
1675 if (dc->tb_flags & DRTE_FLAG)
1676 do_rte(dc);
1677 /* Clear the delay slot flag. */
1678 dc->tb_flags &= ~D_FLAG;
1679 /* If it is a direct jump, try direct chaining. */
1680 if (dc->jmp == JMP_INDIRECT) {
1681 eval_cond_jmp(dc, env_btarget, tcg_const_i32(dc->pc));
1682 dc->is_jmp = DISAS_JUMP;
1683 } else if (dc->jmp == JMP_DIRECT) {
1684 t_sync_flags(dc);
1685 gen_goto_tb(dc, 0, dc->jmp_pc);
1686 dc->is_jmp = DISAS_TB_JUMP;
1687 } else if (dc->jmp == JMP_DIRECT_CC) {
1688 TCGLabel *l1 = gen_new_label();
1689 t_sync_flags(dc);
1690 /* Conditional jmp. */
1691 tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1692 gen_goto_tb(dc, 1, dc->pc);
1693 gen_set_label(l1);
1694 gen_goto_tb(dc, 0, dc->jmp_pc);
1696 dc->is_jmp = DISAS_TB_JUMP;
1698 break;
1701 if (cs->singlestep_enabled) {
1702 break;
1704 } while (!dc->is_jmp && !dc->cpustate_changed
1705 && !tcg_op_buf_full()
1706 && !singlestep
1707 && (dc->pc - page_start < TARGET_PAGE_SIZE)
1708 && num_insns < max_insns);
1710 npc = dc->pc;
1711 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1712 if (dc->tb_flags & D_FLAG) {
1713 dc->is_jmp = DISAS_UPDATE;
1714 tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1715 sync_jmpstate(dc);
1716 } else
1717 npc = dc->jmp_pc;
1720 if (tb_cflags(tb) & CF_LAST_IO)
1721 gen_io_end();
1722 /* Force an update if the per-tb cpu state has changed. */
1723 if (dc->is_jmp == DISAS_NEXT
1724 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1725 dc->is_jmp = DISAS_UPDATE;
1726 tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1728 t_sync_flags(dc);
1730 if (unlikely(cs->singlestep_enabled)) {
1731 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1733 if (dc->is_jmp != DISAS_JUMP) {
1734 tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1736 gen_helper_raise_exception(cpu_env, tmp);
1737 tcg_temp_free_i32(tmp);
1738 } else {
1739 switch(dc->is_jmp) {
1740 case DISAS_NEXT:
1741 gen_goto_tb(dc, 1, npc);
1742 break;
1743 default:
1744 case DISAS_JUMP:
1745 case DISAS_UPDATE:
1746 /* indicate that the hash table must be used
1747 to find the next TB */
1748 tcg_gen_exit_tb(0);
1749 break;
1750 case DISAS_TB_JUMP:
1751 /* nothing more to generate */
1752 break;
1755 gen_tb_end(tb, num_insns);
1757 tb->size = dc->pc - pc_start;
1758 tb->icount = num_insns;
1760 #ifdef DEBUG_DISAS
1761 #if !SIM_COMPAT
1762 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1763 && qemu_log_in_addr_range(pc_start)) {
1764 qemu_log_lock();
1765 qemu_log("--------------\n");
1766 log_target_disas(cs, pc_start, dc->pc - pc_start);
1767 qemu_log_unlock();
1769 #endif
1770 #endif
1771 assert(!dc->abort_at_next_insn);
1774 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1775 int flags)
1777 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1778 CPUMBState *env = &cpu->env;
1779 int i;
1781 if (!env || !f)
1782 return;
1784 cpu_fprintf(f, "IN: PC=%x %s\n",
1785 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1786 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1787 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1788 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1789 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1790 env->btaken, env->btarget,
1791 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1792 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1793 (env->sregs[SR_MSR] & MSR_EIP),
1794 (env->sregs[SR_MSR] & MSR_IE));
1796 for (i = 0; i < 32; i++) {
1797 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1798 if ((i + 1) % 4 == 0)
1799 cpu_fprintf(f, "\n");
1801 cpu_fprintf(f, "\n\n");
1804 void mb_tcg_init(void)
1806 int i;
1808 env_debug = tcg_global_mem_new_i32(cpu_env,
1809 offsetof(CPUMBState, debug),
1810 "debug0");
1811 env_iflags = tcg_global_mem_new_i32(cpu_env,
1812 offsetof(CPUMBState, iflags),
1813 "iflags");
1814 env_imm = tcg_global_mem_new_i32(cpu_env,
1815 offsetof(CPUMBState, imm),
1816 "imm");
1817 env_btarget = tcg_global_mem_new_i32(cpu_env,
1818 offsetof(CPUMBState, btarget),
1819 "btarget");
1820 env_btaken = tcg_global_mem_new_i32(cpu_env,
1821 offsetof(CPUMBState, btaken),
1822 "btaken");
1823 env_res_addr = tcg_global_mem_new(cpu_env,
1824 offsetof(CPUMBState, res_addr),
1825 "res_addr");
1826 env_res_val = tcg_global_mem_new_i32(cpu_env,
1827 offsetof(CPUMBState, res_val),
1828 "res_val");
1829 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1830 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1831 offsetof(CPUMBState, regs[i]),
1832 regnames[i]);
1834 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1835 cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
1836 offsetof(CPUMBState, sregs[i]),
1837 special_regnames[i]);
1841 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1842 target_ulong *data)
1844 env->sregs[SR_PC] = data[0];