cpu: Move can_do_io field from CPU_COMMON to CPUState
[qemu/ar7.git] / target-microblaze / translate.c
blob270138c6d285ec66a0788d17c40f4e4b6331672d
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "helper.h"
25 #include "microblaze-decode.h"
27 #define GEN_HELPER 1
28 #include "helper.h"
30 #define SIM_COMPAT 0
31 #define DISAS_GNU 1
32 #define DISAS_MB 1
33 #if DISAS_MB && !SIM_COMPAT
34 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 #else
36 # define LOG_DIS(...) do { } while (0)
37 #endif
39 #define D(x)
41 #define EXTRACT_FIELD(src, start, end) \
42 (((src) >> start) & ((1 << (end - start + 1)) - 1))
44 static TCGv env_debug;
45 static TCGv_ptr cpu_env;
46 static TCGv cpu_R[32];
47 static TCGv cpu_SR[18];
48 static TCGv env_imm;
49 static TCGv env_btaken;
50 static TCGv env_btarget;
51 static TCGv env_iflags;
52 static TCGv env_res_addr;
53 static TCGv env_res_val;
55 #include "exec/gen-icount.h"
57 /* This is the state at translation time. */
58 typedef struct DisasContext {
59 CPUMBState *env;
60 target_ulong pc;
62 /* Decoder. */
63 int type_b;
64 uint32_t ir;
65 uint8_t opcode;
66 uint8_t rd, ra, rb;
67 uint16_t imm;
69 unsigned int cpustate_changed;
70 unsigned int delayed_branch;
71 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
72 unsigned int clear_imm;
73 int is_jmp;
75 #define JMP_NOJMP 0
76 #define JMP_DIRECT 1
77 #define JMP_DIRECT_CC 2
78 #define JMP_INDIRECT 3
79 unsigned int jmp;
80 uint32_t jmp_pc;
82 int abort_at_next_insn;
83 int nr_nops;
84 struct TranslationBlock *tb;
85 int singlestep_enabled;
86 } DisasContext;
88 static const char *regnames[] =
90 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
91 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
92 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
93 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
96 static const char *special_regnames[] =
98 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
99 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
100 "sr16", "sr17", "sr18"
103 /* Sign extend at translation time. */
104 static inline int sign_extend(unsigned int val, unsigned int width)
106 int sval;
108 /* LSL. */
109 val <<= 31 - width;
110 sval = val;
111 /* ASR. */
112 sval >>= 31 - width;
113 return sval;
116 static inline void t_sync_flags(DisasContext *dc)
118 /* Synch the tb dependent flags between translator and runtime. */
119 if (dc->tb_flags != dc->synced_flags) {
120 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
121 dc->synced_flags = dc->tb_flags;
125 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
127 TCGv_i32 tmp = tcg_const_i32(index);
129 t_sync_flags(dc);
130 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
131 gen_helper_raise_exception(cpu_env, tmp);
132 tcg_temp_free_i32(tmp);
133 dc->is_jmp = DISAS_UPDATE;
136 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
138 TranslationBlock *tb;
139 tb = dc->tb;
140 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
141 tcg_gen_goto_tb(n);
142 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
143 tcg_gen_exit_tb((uintptr_t)tb + n);
144 } else {
145 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
146 tcg_gen_exit_tb(0);
150 static void read_carry(DisasContext *dc, TCGv d)
152 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
156 * write_carry sets the carry bits in MSR based on bit 0 of v.
157 * v[31:1] are ignored.
159 static void write_carry(DisasContext *dc, TCGv v)
161 TCGv t0 = tcg_temp_new();
162 tcg_gen_shli_tl(t0, v, 31);
163 tcg_gen_sari_tl(t0, t0, 31);
164 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
165 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
166 ~(MSR_C | MSR_CC));
167 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
168 tcg_temp_free(t0);
171 static void write_carryi(DisasContext *dc, bool carry)
173 TCGv t0 = tcg_temp_new();
174 tcg_gen_movi_tl(t0, carry);
175 write_carry(dc, t0);
176 tcg_temp_free(t0);
179 /* True if ALU operand b is a small immediate that may deserve
180 faster treatment. */
181 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
183 /* Immediate insn without the imm prefix ? */
184 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
187 static inline TCGv *dec_alu_op_b(DisasContext *dc)
189 if (dc->type_b) {
190 if (dc->tb_flags & IMM_FLAG)
191 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
192 else
193 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
194 return &env_imm;
195 } else
196 return &cpu_R[dc->rb];
199 static void dec_add(DisasContext *dc)
201 unsigned int k, c;
202 TCGv cf;
204 k = dc->opcode & 4;
205 c = dc->opcode & 2;
207 LOG_DIS("add%s%s%s r%d r%d r%d\n",
208 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
209 dc->rd, dc->ra, dc->rb);
211 /* Take care of the easy cases first. */
212 if (k) {
213 /* k - keep carry, no need to update MSR. */
214 /* If rd == r0, it's a nop. */
215 if (dc->rd) {
216 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
218 if (c) {
219 /* c - Add carry into the result. */
220 cf = tcg_temp_new();
222 read_carry(dc, cf);
223 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
224 tcg_temp_free(cf);
227 return;
230 /* From now on, we can assume k is zero. So we need to update MSR. */
231 /* Extract carry. */
232 cf = tcg_temp_new();
233 if (c) {
234 read_carry(dc, cf);
235 } else {
236 tcg_gen_movi_tl(cf, 0);
239 if (dc->rd) {
240 TCGv ncf = tcg_temp_new();
241 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
243 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
244 write_carry(dc, ncf);
245 tcg_temp_free(ncf);
246 } else {
247 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
248 write_carry(dc, cf);
250 tcg_temp_free(cf);
253 static void dec_sub(DisasContext *dc)
255 unsigned int u, cmp, k, c;
256 TCGv cf, na;
258 u = dc->imm & 2;
259 k = dc->opcode & 4;
260 c = dc->opcode & 2;
261 cmp = (dc->imm & 1) && (!dc->type_b) && k;
263 if (cmp) {
264 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
265 if (dc->rd) {
266 if (u)
267 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
268 else
269 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
271 return;
274 LOG_DIS("sub%s%s r%d, r%d r%d\n",
275 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
277 /* Take care of the easy cases first. */
278 if (k) {
279 /* k - keep carry, no need to update MSR. */
280 /* If rd == r0, it's a nop. */
281 if (dc->rd) {
282 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
284 if (c) {
285 /* c - Add carry into the result. */
286 cf = tcg_temp_new();
288 read_carry(dc, cf);
289 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
290 tcg_temp_free(cf);
293 return;
296 /* From now on, we can assume k is zero. So we need to update MSR. */
297 /* Extract carry. And complement a into na. */
298 cf = tcg_temp_new();
299 na = tcg_temp_new();
300 if (c) {
301 read_carry(dc, cf);
302 } else {
303 tcg_gen_movi_tl(cf, 1);
306 /* d = b + ~a + c. carry defaults to 1. */
307 tcg_gen_not_tl(na, cpu_R[dc->ra]);
309 if (dc->rd) {
310 TCGv ncf = tcg_temp_new();
311 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
312 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
313 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
314 write_carry(dc, ncf);
315 tcg_temp_free(ncf);
316 } else {
317 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
318 write_carry(dc, cf);
320 tcg_temp_free(cf);
321 tcg_temp_free(na);
324 static void dec_pattern(DisasContext *dc)
326 unsigned int mode;
327 int l1;
329 if ((dc->tb_flags & MSR_EE_FLAG)
330 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
331 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
332 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
333 t_gen_raise_exception(dc, EXCP_HW_EXCP);
336 mode = dc->opcode & 3;
337 switch (mode) {
338 case 0:
339 /* pcmpbf. */
340 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
341 if (dc->rd)
342 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
343 break;
344 case 2:
345 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
346 if (dc->rd) {
347 TCGv t0 = tcg_temp_local_new();
348 l1 = gen_new_label();
349 tcg_gen_movi_tl(t0, 1);
350 tcg_gen_brcond_tl(TCG_COND_EQ,
351 cpu_R[dc->ra], cpu_R[dc->rb], l1);
352 tcg_gen_movi_tl(t0, 0);
353 gen_set_label(l1);
354 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
355 tcg_temp_free(t0);
357 break;
358 case 3:
359 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
360 l1 = gen_new_label();
361 if (dc->rd) {
362 TCGv t0 = tcg_temp_local_new();
363 tcg_gen_movi_tl(t0, 1);
364 tcg_gen_brcond_tl(TCG_COND_NE,
365 cpu_R[dc->ra], cpu_R[dc->rb], l1);
366 tcg_gen_movi_tl(t0, 0);
367 gen_set_label(l1);
368 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
369 tcg_temp_free(t0);
371 break;
372 default:
373 cpu_abort(dc->env,
374 "unsupported pattern insn opcode=%x\n", dc->opcode);
375 break;
379 static void dec_and(DisasContext *dc)
381 unsigned int not;
383 if (!dc->type_b && (dc->imm & (1 << 10))) {
384 dec_pattern(dc);
385 return;
388 not = dc->opcode & (1 << 1);
389 LOG_DIS("and%s\n", not ? "n" : "");
391 if (!dc->rd)
392 return;
394 if (not) {
395 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
396 } else
397 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
400 static void dec_or(DisasContext *dc)
402 if (!dc->type_b && (dc->imm & (1 << 10))) {
403 dec_pattern(dc);
404 return;
407 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
408 if (dc->rd)
409 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
412 static void dec_xor(DisasContext *dc)
414 if (!dc->type_b && (dc->imm & (1 << 10))) {
415 dec_pattern(dc);
416 return;
419 LOG_DIS("xor r%d\n", dc->rd);
420 if (dc->rd)
421 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
424 static inline void msr_read(DisasContext *dc, TCGv d)
426 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
429 static inline void msr_write(DisasContext *dc, TCGv v)
431 TCGv t;
433 t = tcg_temp_new();
434 dc->cpustate_changed = 1;
435 /* PVR bit is not writable. */
436 tcg_gen_andi_tl(t, v, ~MSR_PVR);
437 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
438 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
439 tcg_temp_free(t);
442 static void dec_msr(DisasContext *dc)
444 TCGv t0, t1;
445 unsigned int sr, to, rn;
446 int mem_index = cpu_mmu_index(dc->env);
448 sr = dc->imm & ((1 << 14) - 1);
449 to = dc->imm & (1 << 14);
450 dc->type_b = 1;
451 if (to)
452 dc->cpustate_changed = 1;
454 /* msrclr and msrset. */
455 if (!(dc->imm & (1 << 15))) {
456 unsigned int clr = dc->ir & (1 << 16);
458 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
459 dc->rd, dc->imm);
461 if (!(dc->env->pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
462 /* nop??? */
463 return;
466 if ((dc->tb_flags & MSR_EE_FLAG)
467 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
468 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
469 t_gen_raise_exception(dc, EXCP_HW_EXCP);
470 return;
473 if (dc->rd)
474 msr_read(dc, cpu_R[dc->rd]);
476 t0 = tcg_temp_new();
477 t1 = tcg_temp_new();
478 msr_read(dc, t0);
479 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
481 if (clr) {
482 tcg_gen_not_tl(t1, t1);
483 tcg_gen_and_tl(t0, t0, t1);
484 } else
485 tcg_gen_or_tl(t0, t0, t1);
486 msr_write(dc, t0);
487 tcg_temp_free(t0);
488 tcg_temp_free(t1);
489 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
490 dc->is_jmp = DISAS_UPDATE;
491 return;
494 if (to) {
495 if ((dc->tb_flags & MSR_EE_FLAG)
496 && mem_index == MMU_USER_IDX) {
497 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
498 t_gen_raise_exception(dc, EXCP_HW_EXCP);
499 return;
503 #if !defined(CONFIG_USER_ONLY)
504 /* Catch read/writes to the mmu block. */
505 if ((sr & ~0xff) == 0x1000) {
506 sr &= 7;
507 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
508 if (to)
509 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
510 else
511 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
512 return;
514 #endif
516 if (to) {
517 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
518 switch (sr) {
519 case 0:
520 break;
521 case 1:
522 msr_write(dc, cpu_R[dc->ra]);
523 break;
524 case 0x3:
525 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
526 break;
527 case 0x5:
528 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
529 break;
530 case 0x7:
531 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
532 break;
533 case 0x800:
534 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
535 break;
536 case 0x802:
537 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
538 break;
539 default:
540 cpu_abort(dc->env, "unknown mts reg %x\n", sr);
541 break;
543 } else {
544 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
546 switch (sr) {
547 case 0:
548 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
549 break;
550 case 1:
551 msr_read(dc, cpu_R[dc->rd]);
552 break;
553 case 0x3:
554 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
555 break;
556 case 0x5:
557 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
558 break;
559 case 0x7:
560 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
561 break;
562 case 0xb:
563 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
564 break;
565 case 0x800:
566 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
567 break;
568 case 0x802:
569 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
570 break;
571 case 0x2000:
572 case 0x2001:
573 case 0x2002:
574 case 0x2003:
575 case 0x2004:
576 case 0x2005:
577 case 0x2006:
578 case 0x2007:
579 case 0x2008:
580 case 0x2009:
581 case 0x200a:
582 case 0x200b:
583 case 0x200c:
584 rn = sr & 0xf;
585 tcg_gen_ld_tl(cpu_R[dc->rd],
586 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
587 break;
588 default:
589 cpu_abort(dc->env, "unknown mfs reg %x\n", sr);
590 break;
594 if (dc->rd == 0) {
595 tcg_gen_movi_tl(cpu_R[0], 0);
599 /* 64-bit signed mul, lower result in d and upper in d2. */
600 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
602 TCGv_i64 t0, t1;
604 t0 = tcg_temp_new_i64();
605 t1 = tcg_temp_new_i64();
607 tcg_gen_ext_i32_i64(t0, a);
608 tcg_gen_ext_i32_i64(t1, b);
609 tcg_gen_mul_i64(t0, t0, t1);
611 tcg_gen_trunc_i64_i32(d, t0);
612 tcg_gen_shri_i64(t0, t0, 32);
613 tcg_gen_trunc_i64_i32(d2, t0);
615 tcg_temp_free_i64(t0);
616 tcg_temp_free_i64(t1);
619 /* 64-bit unsigned muls, lower result in d and upper in d2. */
620 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
622 TCGv_i64 t0, t1;
624 t0 = tcg_temp_new_i64();
625 t1 = tcg_temp_new_i64();
627 tcg_gen_extu_i32_i64(t0, a);
628 tcg_gen_extu_i32_i64(t1, b);
629 tcg_gen_mul_i64(t0, t0, t1);
631 tcg_gen_trunc_i64_i32(d, t0);
632 tcg_gen_shri_i64(t0, t0, 32);
633 tcg_gen_trunc_i64_i32(d2, t0);
635 tcg_temp_free_i64(t0);
636 tcg_temp_free_i64(t1);
639 /* Multiplier unit. */
640 static void dec_mul(DisasContext *dc)
642 TCGv d[2];
643 unsigned int subcode;
645 if ((dc->tb_flags & MSR_EE_FLAG)
646 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
647 && !(dc->env->pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
648 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
649 t_gen_raise_exception(dc, EXCP_HW_EXCP);
650 return;
653 subcode = dc->imm & 3;
654 d[0] = tcg_temp_new();
655 d[1] = tcg_temp_new();
657 if (dc->type_b) {
658 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
659 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
660 goto done;
663 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
664 if (subcode >= 1 && subcode <= 3
665 && !((dc->env->pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
666 /* nop??? */
669 switch (subcode) {
670 case 0:
671 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
672 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
673 break;
674 case 1:
675 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
676 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
677 break;
678 case 2:
679 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
680 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
681 break;
682 case 3:
683 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
684 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
685 break;
686 default:
687 cpu_abort(dc->env, "unknown MUL insn %x\n", subcode);
688 break;
690 done:
691 tcg_temp_free(d[0]);
692 tcg_temp_free(d[1]);
695 /* Div unit. */
696 static void dec_div(DisasContext *dc)
698 unsigned int u;
700 u = dc->imm & 2;
701 LOG_DIS("div\n");
703 if ((dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
704 && !((dc->env->pvr.regs[0] & PVR0_USE_DIV_MASK))) {
705 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
706 t_gen_raise_exception(dc, EXCP_HW_EXCP);
709 if (u)
710 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
711 cpu_R[dc->ra]);
712 else
713 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
714 cpu_R[dc->ra]);
715 if (!dc->rd)
716 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
719 static void dec_barrel(DisasContext *dc)
721 TCGv t0;
722 unsigned int s, t;
724 if ((dc->tb_flags & MSR_EE_FLAG)
725 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
726 && !(dc->env->pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
727 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
728 t_gen_raise_exception(dc, EXCP_HW_EXCP);
729 return;
732 s = dc->imm & (1 << 10);
733 t = dc->imm & (1 << 9);
735 LOG_DIS("bs%s%s r%d r%d r%d\n",
736 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
738 t0 = tcg_temp_new();
740 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
741 tcg_gen_andi_tl(t0, t0, 31);
743 if (s)
744 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
745 else {
746 if (t)
747 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
748 else
749 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
753 static void dec_bit(DisasContext *dc)
755 TCGv t0;
756 unsigned int op;
757 int mem_index = cpu_mmu_index(dc->env);
759 op = dc->ir & ((1 << 9) - 1);
760 switch (op) {
761 case 0x21:
762 /* src. */
763 t0 = tcg_temp_new();
765 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
766 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
767 write_carry(dc, cpu_R[dc->ra]);
768 if (dc->rd) {
769 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
770 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
772 tcg_temp_free(t0);
773 break;
775 case 0x1:
776 case 0x41:
777 /* srl. */
778 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
780 /* Update carry. Note that write carry only looks at the LSB. */
781 write_carry(dc, cpu_R[dc->ra]);
782 if (dc->rd) {
783 if (op == 0x41)
784 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
785 else
786 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
788 break;
789 case 0x60:
790 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
791 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
792 break;
793 case 0x61:
794 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
795 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
796 break;
797 case 0x64:
798 case 0x66:
799 case 0x74:
800 case 0x76:
801 /* wdc. */
802 LOG_DIS("wdc r%d\n", dc->ra);
803 if ((dc->tb_flags & MSR_EE_FLAG)
804 && mem_index == MMU_USER_IDX) {
805 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
806 t_gen_raise_exception(dc, EXCP_HW_EXCP);
807 return;
809 break;
810 case 0x68:
811 /* wic. */
812 LOG_DIS("wic r%d\n", dc->ra);
813 if ((dc->tb_flags & MSR_EE_FLAG)
814 && mem_index == MMU_USER_IDX) {
815 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
816 t_gen_raise_exception(dc, EXCP_HW_EXCP);
817 return;
819 break;
820 case 0xe0:
821 if ((dc->tb_flags & MSR_EE_FLAG)
822 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
823 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
824 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
825 t_gen_raise_exception(dc, EXCP_HW_EXCP);
827 if (dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
828 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
830 break;
831 case 0x1e0:
832 /* swapb */
833 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
834 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
835 break;
836 case 0x1e2:
837 /*swaph */
838 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
839 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
840 break;
841 default:
842 cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
843 dc->pc, op, dc->rd, dc->ra, dc->rb);
844 break;
848 static inline void sync_jmpstate(DisasContext *dc)
850 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
851 if (dc->jmp == JMP_DIRECT) {
852 tcg_gen_movi_tl(env_btaken, 1);
854 dc->jmp = JMP_INDIRECT;
855 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
859 static void dec_imm(DisasContext *dc)
861 LOG_DIS("imm %x\n", dc->imm << 16);
862 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
863 dc->tb_flags |= IMM_FLAG;
864 dc->clear_imm = 0;
867 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
869 unsigned int extimm = dc->tb_flags & IMM_FLAG;
870 /* Should be set to one if r1 is used by loadstores. */
871 int stackprot = 0;
873 /* All load/stores use ra. */
874 if (dc->ra == 1) {
875 stackprot = 1;
878 /* Treat the common cases first. */
879 if (!dc->type_b) {
880 /* If any of the regs is r0, return a ptr to the other. */
881 if (dc->ra == 0) {
882 return &cpu_R[dc->rb];
883 } else if (dc->rb == 0) {
884 return &cpu_R[dc->ra];
887 if (dc->rb == 1) {
888 stackprot = 1;
891 *t = tcg_temp_new();
892 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
894 if (stackprot) {
895 gen_helper_stackprot(cpu_env, *t);
897 return t;
899 /* Immediate. */
900 if (!extimm) {
901 if (dc->imm == 0) {
902 return &cpu_R[dc->ra];
904 *t = tcg_temp_new();
905 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
906 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
907 } else {
908 *t = tcg_temp_new();
909 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
912 if (stackprot) {
913 gen_helper_stackprot(cpu_env, *t);
915 return t;
918 static void dec_load(DisasContext *dc)
920 TCGv t, v, *addr;
921 unsigned int size, rev = 0, ex = 0;
922 TCGMemOp mop;
924 mop = dc->opcode & 3;
925 size = 1 << mop;
926 if (!dc->type_b) {
927 rev = (dc->ir >> 9) & 1;
928 ex = (dc->ir >> 10) & 1;
930 mop |= MO_TE;
931 if (rev) {
932 mop ^= MO_BSWAP;
935 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
936 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
937 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
938 t_gen_raise_exception(dc, EXCP_HW_EXCP);
939 return;
942 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
943 ex ? "x" : "");
945 t_sync_flags(dc);
946 addr = compute_ldst_addr(dc, &t);
949 * When doing reverse accesses we need to do two things.
951 * 1. Reverse the address wrt endianness.
952 * 2. Byteswap the data lanes on the way back into the CPU core.
954 if (rev && size != 4) {
955 /* Endian reverse the address. t is addr. */
956 switch (size) {
957 case 1:
959 /* 00 -> 11
960 01 -> 10
961 10 -> 10
962 11 -> 00 */
963 TCGv low = tcg_temp_new();
965 /* Force addr into the temp. */
966 if (addr != &t) {
967 t = tcg_temp_new();
968 tcg_gen_mov_tl(t, *addr);
969 addr = &t;
972 tcg_gen_andi_tl(low, t, 3);
973 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
974 tcg_gen_andi_tl(t, t, ~3);
975 tcg_gen_or_tl(t, t, low);
976 tcg_gen_mov_tl(env_imm, t);
977 tcg_temp_free(low);
978 break;
981 case 2:
982 /* 00 -> 10
983 10 -> 00. */
984 /* Force addr into the temp. */
985 if (addr != &t) {
986 t = tcg_temp_new();
987 tcg_gen_xori_tl(t, *addr, 2);
988 addr = &t;
989 } else {
990 tcg_gen_xori_tl(t, t, 2);
992 break;
993 default:
994 cpu_abort(dc->env, "Invalid reverse size\n");
995 break;
999 /* lwx does not throw unaligned access errors, so force alignment */
1000 if (ex) {
1001 /* Force addr into the temp. */
1002 if (addr != &t) {
1003 t = tcg_temp_new();
1004 tcg_gen_mov_tl(t, *addr);
1005 addr = &t;
1007 tcg_gen_andi_tl(t, t, ~3);
1010 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1011 sync_jmpstate(dc);
1013 /* Verify alignment if needed. */
1015 * Microblaze gives MMU faults priority over faults due to
1016 * unaligned addresses. That's why we speculatively do the load
1017 * into v. If the load succeeds, we verify alignment of the
1018 * address and if that succeeds we write into the destination reg.
1020 v = tcg_temp_new();
1021 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(dc->env), mop);
1023 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1024 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1025 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1026 tcg_const_tl(0), tcg_const_tl(size - 1));
1029 if (ex) {
1030 tcg_gen_mov_tl(env_res_addr, *addr);
1031 tcg_gen_mov_tl(env_res_val, v);
1033 if (dc->rd) {
1034 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1036 tcg_temp_free(v);
1038 if (ex) { /* lwx */
1039 /* no support for for AXI exclusive so always clear C */
1040 write_carryi(dc, 0);
1043 if (addr == &t)
1044 tcg_temp_free(t);
1047 static void dec_store(DisasContext *dc)
1049 TCGv t, *addr, swx_addr;
1050 int swx_skip = 0;
1051 unsigned int size, rev = 0, ex = 0;
1052 TCGMemOp mop;
1054 mop = dc->opcode & 3;
1055 size = 1 << mop;
1056 if (!dc->type_b) {
1057 rev = (dc->ir >> 9) & 1;
1058 ex = (dc->ir >> 10) & 1;
1060 mop |= MO_TE;
1061 if (rev) {
1062 mop ^= MO_BSWAP;
1065 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1066 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1067 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1068 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1069 return;
1072 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1073 ex ? "x" : "");
1074 t_sync_flags(dc);
1075 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1076 sync_jmpstate(dc);
1077 addr = compute_ldst_addr(dc, &t);
1079 swx_addr = tcg_temp_local_new();
1080 if (ex) { /* swx */
1081 TCGv tval;
1083 /* Force addr into the swx_addr. */
1084 tcg_gen_mov_tl(swx_addr, *addr);
1085 addr = &swx_addr;
1086 /* swx does not throw unaligned access errors, so force alignment */
1087 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1089 write_carryi(dc, 1);
1090 swx_skip = gen_new_label();
1091 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1093 /* Compare the value loaded at lwx with current contents of
1094 the reserved location.
1095 FIXME: This only works for system emulation where we can expect
1096 this compare and the following write to be atomic. For user
1097 emulation we need to add atomicity between threads. */
1098 tval = tcg_temp_new();
1099 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(dc->env), MO_TEUL);
1100 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1101 write_carryi(dc, 0);
1102 tcg_temp_free(tval);
1105 if (rev && size != 4) {
1106 /* Endian reverse the address. t is addr. */
1107 switch (size) {
1108 case 1:
1110 /* 00 -> 11
1111 01 -> 10
1112 10 -> 10
1113 11 -> 00 */
1114 TCGv low = tcg_temp_new();
1116 /* Force addr into the temp. */
1117 if (addr != &t) {
1118 t = tcg_temp_new();
1119 tcg_gen_mov_tl(t, *addr);
1120 addr = &t;
1123 tcg_gen_andi_tl(low, t, 3);
1124 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1125 tcg_gen_andi_tl(t, t, ~3);
1126 tcg_gen_or_tl(t, t, low);
1127 tcg_gen_mov_tl(env_imm, t);
1128 tcg_temp_free(low);
1129 break;
1132 case 2:
1133 /* 00 -> 10
1134 10 -> 00. */
1135 /* Force addr into the temp. */
1136 if (addr != &t) {
1137 t = tcg_temp_new();
1138 tcg_gen_xori_tl(t, *addr, 2);
1139 addr = &t;
1140 } else {
1141 tcg_gen_xori_tl(t, t, 2);
1143 break;
1144 default:
1145 cpu_abort(dc->env, "Invalid reverse size\n");
1146 break;
1149 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(dc->env), mop);
1151 /* Verify alignment if needed. */
1152 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1153 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1154 /* FIXME: if the alignment is wrong, we should restore the value
1155 * in memory. One possible way to achieve this is to probe
1156 * the MMU prior to the memaccess, thay way we could put
1157 * the alignment checks in between the probe and the mem
1158 * access.
1160 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1161 tcg_const_tl(1), tcg_const_tl(size - 1));
1164 if (ex) {
1165 gen_set_label(swx_skip);
1167 tcg_temp_free(swx_addr);
1169 if (addr == &t)
1170 tcg_temp_free(t);
1173 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1174 TCGv d, TCGv a, TCGv b)
1176 switch (cc) {
1177 case CC_EQ:
1178 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1179 break;
1180 case CC_NE:
1181 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1182 break;
1183 case CC_LT:
1184 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1185 break;
1186 case CC_LE:
1187 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1188 break;
1189 case CC_GE:
1190 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1191 break;
1192 case CC_GT:
1193 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1194 break;
1195 default:
1196 cpu_abort(dc->env, "Unknown condition code %x.\n", cc);
1197 break;
1201 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1203 int l1;
1205 l1 = gen_new_label();
1206 /* Conditional jmp. */
1207 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1208 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1209 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1210 gen_set_label(l1);
1213 static void dec_bcc(DisasContext *dc)
1215 unsigned int cc;
1216 unsigned int dslot;
1218 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1219 dslot = dc->ir & (1 << 25);
1220 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1222 dc->delayed_branch = 1;
1223 if (dslot) {
1224 dc->delayed_branch = 2;
1225 dc->tb_flags |= D_FLAG;
1226 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1227 cpu_env, offsetof(CPUMBState, bimm));
1230 if (dec_alu_op_b_is_small_imm(dc)) {
1231 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1233 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1234 dc->jmp = JMP_DIRECT_CC;
1235 dc->jmp_pc = dc->pc + offset;
1236 } else {
1237 dc->jmp = JMP_INDIRECT;
1238 tcg_gen_movi_tl(env_btarget, dc->pc);
1239 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1241 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1244 static void dec_br(DisasContext *dc)
1246 unsigned int dslot, link, abs, mbar;
1247 int mem_index = cpu_mmu_index(dc->env);
1249 dslot = dc->ir & (1 << 20);
1250 abs = dc->ir & (1 << 19);
1251 link = dc->ir & (1 << 18);
1253 /* Memory barrier. */
1254 mbar = (dc->ir >> 16) & 31;
1255 if (mbar == 2 && dc->imm == 4) {
1256 /* mbar IMM & 16 decodes to sleep. */
1257 if (dc->rd & 16) {
1258 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1259 TCGv_i32 tmp_1 = tcg_const_i32(1);
1261 LOG_DIS("sleep\n");
1263 t_sync_flags(dc);
1264 tcg_gen_st_i32(tmp_1, cpu_env,
1265 -offsetof(MicroBlazeCPU, env)
1266 +offsetof(CPUState, halted));
1267 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1268 gen_helper_raise_exception(cpu_env, tmp_hlt);
1269 tcg_temp_free_i32(tmp_hlt);
1270 tcg_temp_free_i32(tmp_1);
1271 return;
1273 LOG_DIS("mbar %d\n", dc->rd);
1274 /* Break the TB. */
1275 dc->cpustate_changed = 1;
1276 return;
1279 LOG_DIS("br%s%s%s%s imm=%x\n",
1280 abs ? "a" : "", link ? "l" : "",
1281 dc->type_b ? "i" : "", dslot ? "d" : "",
1282 dc->imm);
1284 dc->delayed_branch = 1;
1285 if (dslot) {
1286 dc->delayed_branch = 2;
1287 dc->tb_flags |= D_FLAG;
1288 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1289 cpu_env, offsetof(CPUMBState, bimm));
1291 if (link && dc->rd)
1292 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1294 dc->jmp = JMP_INDIRECT;
1295 if (abs) {
1296 tcg_gen_movi_tl(env_btaken, 1);
1297 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1298 if (link && !dslot) {
1299 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1300 t_gen_raise_exception(dc, EXCP_BREAK);
1301 if (dc->imm == 0) {
1302 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1303 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1304 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1305 return;
1308 t_gen_raise_exception(dc, EXCP_DEBUG);
1311 } else {
1312 if (dec_alu_op_b_is_small_imm(dc)) {
1313 dc->jmp = JMP_DIRECT;
1314 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1315 } else {
1316 tcg_gen_movi_tl(env_btaken, 1);
1317 tcg_gen_movi_tl(env_btarget, dc->pc);
1318 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1323 static inline void do_rti(DisasContext *dc)
1325 TCGv t0, t1;
1326 t0 = tcg_temp_new();
1327 t1 = tcg_temp_new();
1328 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1329 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1330 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1332 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1333 tcg_gen_or_tl(t1, t1, t0);
1334 msr_write(dc, t1);
1335 tcg_temp_free(t1);
1336 tcg_temp_free(t0);
1337 dc->tb_flags &= ~DRTI_FLAG;
1340 static inline void do_rtb(DisasContext *dc)
1342 TCGv t0, t1;
1343 t0 = tcg_temp_new();
1344 t1 = tcg_temp_new();
1345 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1346 tcg_gen_shri_tl(t0, t1, 1);
1347 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1349 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1350 tcg_gen_or_tl(t1, t1, t0);
1351 msr_write(dc, t1);
1352 tcg_temp_free(t1);
1353 tcg_temp_free(t0);
1354 dc->tb_flags &= ~DRTB_FLAG;
1357 static inline void do_rte(DisasContext *dc)
1359 TCGv t0, t1;
1360 t0 = tcg_temp_new();
1361 t1 = tcg_temp_new();
1363 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1364 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1365 tcg_gen_shri_tl(t0, t1, 1);
1366 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1368 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1369 tcg_gen_or_tl(t1, t1, t0);
1370 msr_write(dc, t1);
1371 tcg_temp_free(t1);
1372 tcg_temp_free(t0);
1373 dc->tb_flags &= ~DRTE_FLAG;
1376 static void dec_rts(DisasContext *dc)
1378 unsigned int b_bit, i_bit, e_bit;
1379 int mem_index = cpu_mmu_index(dc->env);
1381 i_bit = dc->ir & (1 << 21);
1382 b_bit = dc->ir & (1 << 22);
1383 e_bit = dc->ir & (1 << 23);
1385 dc->delayed_branch = 2;
1386 dc->tb_flags |= D_FLAG;
1387 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1388 cpu_env, offsetof(CPUMBState, bimm));
1390 if (i_bit) {
1391 LOG_DIS("rtid ir=%x\n", dc->ir);
1392 if ((dc->tb_flags & MSR_EE_FLAG)
1393 && mem_index == MMU_USER_IDX) {
1394 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1395 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1397 dc->tb_flags |= DRTI_FLAG;
1398 } else if (b_bit) {
1399 LOG_DIS("rtbd ir=%x\n", dc->ir);
1400 if ((dc->tb_flags & MSR_EE_FLAG)
1401 && mem_index == MMU_USER_IDX) {
1402 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1403 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1405 dc->tb_flags |= DRTB_FLAG;
1406 } else if (e_bit) {
1407 LOG_DIS("rted ir=%x\n", dc->ir);
1408 if ((dc->tb_flags & MSR_EE_FLAG)
1409 && mem_index == MMU_USER_IDX) {
1410 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1411 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1413 dc->tb_flags |= DRTE_FLAG;
1414 } else
1415 LOG_DIS("rts ir=%x\n", dc->ir);
1417 dc->jmp = JMP_INDIRECT;
1418 tcg_gen_movi_tl(env_btaken, 1);
1419 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1422 static int dec_check_fpuv2(DisasContext *dc)
1424 int r;
1426 r = dc->env->pvr.regs[2] & PVR2_USE_FPU2_MASK;
1428 if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1429 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1430 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1432 return r;
1435 static void dec_fpu(DisasContext *dc)
1437 unsigned int fpu_insn;
1439 if ((dc->tb_flags & MSR_EE_FLAG)
1440 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1441 && !((dc->env->pvr.regs[2] & PVR2_USE_FPU_MASK))) {
1442 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1443 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1444 return;
1447 fpu_insn = (dc->ir >> 7) & 7;
1449 switch (fpu_insn) {
1450 case 0:
1451 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1452 cpu_R[dc->rb]);
1453 break;
1455 case 1:
1456 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1457 cpu_R[dc->rb]);
1458 break;
1460 case 2:
1461 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1462 cpu_R[dc->rb]);
1463 break;
1465 case 3:
1466 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1467 cpu_R[dc->rb]);
1468 break;
1470 case 4:
1471 switch ((dc->ir >> 4) & 7) {
1472 case 0:
1473 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1474 cpu_R[dc->ra], cpu_R[dc->rb]);
1475 break;
1476 case 1:
1477 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1478 cpu_R[dc->ra], cpu_R[dc->rb]);
1479 break;
1480 case 2:
1481 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1482 cpu_R[dc->ra], cpu_R[dc->rb]);
1483 break;
1484 case 3:
1485 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1486 cpu_R[dc->ra], cpu_R[dc->rb]);
1487 break;
1488 case 4:
1489 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1490 cpu_R[dc->ra], cpu_R[dc->rb]);
1491 break;
1492 case 5:
1493 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1494 cpu_R[dc->ra], cpu_R[dc->rb]);
1495 break;
1496 case 6:
1497 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1498 cpu_R[dc->ra], cpu_R[dc->rb]);
1499 break;
1500 default:
1501 qemu_log_mask(LOG_UNIMP,
1502 "unimplemented fcmp fpu_insn=%x pc=%x"
1503 " opc=%x\n",
1504 fpu_insn, dc->pc, dc->opcode);
1505 dc->abort_at_next_insn = 1;
1506 break;
1508 break;
1510 case 5:
1511 if (!dec_check_fpuv2(dc)) {
1512 return;
1514 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1515 break;
1517 case 6:
1518 if (!dec_check_fpuv2(dc)) {
1519 return;
1521 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1522 break;
1524 case 7:
1525 if (!dec_check_fpuv2(dc)) {
1526 return;
1528 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1529 break;
1531 default:
1532 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1533 " opc=%x\n",
1534 fpu_insn, dc->pc, dc->opcode);
1535 dc->abort_at_next_insn = 1;
1536 break;
1540 static void dec_null(DisasContext *dc)
1542 if ((dc->tb_flags & MSR_EE_FLAG)
1543 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1544 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1545 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1546 return;
1548 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1549 dc->abort_at_next_insn = 1;
1552 /* Insns connected to FSL or AXI stream attached devices. */
1553 static void dec_stream(DisasContext *dc)
1555 int mem_index = cpu_mmu_index(dc->env);
1556 TCGv_i32 t_id, t_ctrl;
1557 int ctrl;
1559 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1560 dc->type_b ? "" : "d", dc->imm);
1562 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1563 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1564 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1565 return;
1568 t_id = tcg_temp_new();
1569 if (dc->type_b) {
1570 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1571 ctrl = dc->imm >> 10;
1572 } else {
1573 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1574 ctrl = dc->imm >> 5;
1577 t_ctrl = tcg_const_tl(ctrl);
1579 if (dc->rd == 0) {
1580 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1581 } else {
1582 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1584 tcg_temp_free(t_id);
1585 tcg_temp_free(t_ctrl);
1588 static struct decoder_info {
1589 struct {
1590 uint32_t bits;
1591 uint32_t mask;
1593 void (*dec)(DisasContext *dc);
1594 } decinfo[] = {
1595 {DEC_ADD, dec_add},
1596 {DEC_SUB, dec_sub},
1597 {DEC_AND, dec_and},
1598 {DEC_XOR, dec_xor},
1599 {DEC_OR, dec_or},
1600 {DEC_BIT, dec_bit},
1601 {DEC_BARREL, dec_barrel},
1602 {DEC_LD, dec_load},
1603 {DEC_ST, dec_store},
1604 {DEC_IMM, dec_imm},
1605 {DEC_BR, dec_br},
1606 {DEC_BCC, dec_bcc},
1607 {DEC_RTS, dec_rts},
1608 {DEC_FPU, dec_fpu},
1609 {DEC_MUL, dec_mul},
1610 {DEC_DIV, dec_div},
1611 {DEC_MSR, dec_msr},
1612 {DEC_STREAM, dec_stream},
1613 {{0, 0}, dec_null}
1616 static inline void decode(DisasContext *dc, uint32_t ir)
1618 int i;
1620 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1621 tcg_gen_debug_insn_start(dc->pc);
1624 dc->ir = ir;
1625 LOG_DIS("%8.8x\t", dc->ir);
1627 if (dc->ir)
1628 dc->nr_nops = 0;
1629 else {
1630 if ((dc->tb_flags & MSR_EE_FLAG)
1631 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1632 && (dc->env->pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1633 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1634 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1635 return;
1638 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1639 dc->nr_nops++;
1640 if (dc->nr_nops > 4)
1641 cpu_abort(dc->env, "fetching nop sequence\n");
1643 /* bit 2 seems to indicate insn type. */
1644 dc->type_b = ir & (1 << 29);
1646 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1647 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1648 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1649 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1650 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1652 /* Large switch for all insns. */
1653 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1654 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1655 decinfo[i].dec(dc);
1656 break;
1661 static void check_breakpoint(CPUMBState *env, DisasContext *dc)
1663 CPUBreakpoint *bp;
1665 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1666 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1667 if (bp->pc == dc->pc) {
1668 t_gen_raise_exception(dc, EXCP_DEBUG);
1669 dc->is_jmp = DISAS_UPDATE;
1675 /* generate intermediate code for basic block 'tb'. */
1676 static inline void
1677 gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
1678 bool search_pc)
1680 CPUState *cs = CPU(cpu);
1681 CPUMBState *env = &cpu->env;
1682 uint16_t *gen_opc_end;
1683 uint32_t pc_start;
1684 int j, lj;
1685 struct DisasContext ctx;
1686 struct DisasContext *dc = &ctx;
1687 uint32_t next_page_start, org_flags;
1688 target_ulong npc;
1689 int num_insns;
1690 int max_insns;
1692 pc_start = tb->pc;
1693 dc->env = env;
1694 dc->tb = tb;
1695 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1697 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1699 dc->is_jmp = DISAS_NEXT;
1700 dc->jmp = 0;
1701 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1702 if (dc->delayed_branch) {
1703 dc->jmp = JMP_INDIRECT;
1705 dc->pc = pc_start;
1706 dc->singlestep_enabled = cs->singlestep_enabled;
1707 dc->cpustate_changed = 0;
1708 dc->abort_at_next_insn = 0;
1709 dc->nr_nops = 0;
1711 if (pc_start & 3)
1712 cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start);
1714 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1715 #if !SIM_COMPAT
1716 qemu_log("--------------\n");
1717 log_cpu_state(CPU(cpu), 0);
1718 #endif
1721 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1722 lj = -1;
1723 num_insns = 0;
1724 max_insns = tb->cflags & CF_COUNT_MASK;
1725 if (max_insns == 0)
1726 max_insns = CF_COUNT_MASK;
1728 gen_tb_start();
1731 #if SIM_COMPAT
1732 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1733 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1734 gen_helper_debug();
1736 #endif
1737 check_breakpoint(env, dc);
1739 if (search_pc) {
1740 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1741 if (lj < j) {
1742 lj++;
1743 while (lj < j)
1744 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1746 tcg_ctx.gen_opc_pc[lj] = dc->pc;
1747 tcg_ctx.gen_opc_instr_start[lj] = 1;
1748 tcg_ctx.gen_opc_icount[lj] = num_insns;
1751 /* Pretty disas. */
1752 LOG_DIS("%8.8x:\t", dc->pc);
1754 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1755 gen_io_start();
1757 dc->clear_imm = 1;
1758 decode(dc, cpu_ldl_code(env, dc->pc));
1759 if (dc->clear_imm)
1760 dc->tb_flags &= ~IMM_FLAG;
1761 dc->pc += 4;
1762 num_insns++;
1764 if (dc->delayed_branch) {
1765 dc->delayed_branch--;
1766 if (!dc->delayed_branch) {
1767 if (dc->tb_flags & DRTI_FLAG)
1768 do_rti(dc);
1769 if (dc->tb_flags & DRTB_FLAG)
1770 do_rtb(dc);
1771 if (dc->tb_flags & DRTE_FLAG)
1772 do_rte(dc);
1773 /* Clear the delay slot flag. */
1774 dc->tb_flags &= ~D_FLAG;
1775 /* If it is a direct jump, try direct chaining. */
1776 if (dc->jmp == JMP_INDIRECT) {
1777 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1778 dc->is_jmp = DISAS_JUMP;
1779 } else if (dc->jmp == JMP_DIRECT) {
1780 t_sync_flags(dc);
1781 gen_goto_tb(dc, 0, dc->jmp_pc);
1782 dc->is_jmp = DISAS_TB_JUMP;
1783 } else if (dc->jmp == JMP_DIRECT_CC) {
1784 int l1;
1786 t_sync_flags(dc);
1787 l1 = gen_new_label();
1788 /* Conditional jmp. */
1789 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1790 gen_goto_tb(dc, 1, dc->pc);
1791 gen_set_label(l1);
1792 gen_goto_tb(dc, 0, dc->jmp_pc);
1794 dc->is_jmp = DISAS_TB_JUMP;
1796 break;
1799 if (cs->singlestep_enabled) {
1800 break;
1802 } while (!dc->is_jmp && !dc->cpustate_changed
1803 && tcg_ctx.gen_opc_ptr < gen_opc_end
1804 && !singlestep
1805 && (dc->pc < next_page_start)
1806 && num_insns < max_insns);
1808 npc = dc->pc;
1809 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1810 if (dc->tb_flags & D_FLAG) {
1811 dc->is_jmp = DISAS_UPDATE;
1812 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1813 sync_jmpstate(dc);
1814 } else
1815 npc = dc->jmp_pc;
1818 if (tb->cflags & CF_LAST_IO)
1819 gen_io_end();
1820 /* Force an update if the per-tb cpu state has changed. */
1821 if (dc->is_jmp == DISAS_NEXT
1822 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1823 dc->is_jmp = DISAS_UPDATE;
1824 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1826 t_sync_flags(dc);
1828 if (unlikely(cs->singlestep_enabled)) {
1829 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1831 if (dc->is_jmp != DISAS_JUMP) {
1832 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1834 gen_helper_raise_exception(cpu_env, tmp);
1835 tcg_temp_free_i32(tmp);
1836 } else {
1837 switch(dc->is_jmp) {
1838 case DISAS_NEXT:
1839 gen_goto_tb(dc, 1, npc);
1840 break;
1841 default:
1842 case DISAS_JUMP:
1843 case DISAS_UPDATE:
1844 /* indicate that the hash table must be used
1845 to find the next TB */
1846 tcg_gen_exit_tb(0);
1847 break;
1848 case DISAS_TB_JUMP:
1849 /* nothing more to generate */
1850 break;
1853 gen_tb_end(tb, num_insns);
1854 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1855 if (search_pc) {
1856 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1857 lj++;
1858 while (lj <= j)
1859 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1860 } else {
1861 tb->size = dc->pc - pc_start;
1862 tb->icount = num_insns;
1865 #ifdef DEBUG_DISAS
1866 #if !SIM_COMPAT
1867 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1868 qemu_log("\n");
1869 #if DISAS_GNU
1870 log_target_disas(env, pc_start, dc->pc - pc_start, 0);
1871 #endif
1872 qemu_log("\nisize=%d osize=%td\n",
1873 dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
1874 tcg_ctx.gen_opc_buf);
1876 #endif
1877 #endif
1878 assert(!dc->abort_at_next_insn);
1881 void gen_intermediate_code (CPUMBState *env, struct TranslationBlock *tb)
1883 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, false);
1886 void gen_intermediate_code_pc (CPUMBState *env, struct TranslationBlock *tb)
1888 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, true);
1891 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1892 int flags)
1894 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1895 CPUMBState *env = &cpu->env;
1896 int i;
1898 if (!env || !f)
1899 return;
1901 cpu_fprintf(f, "IN: PC=%x %s\n",
1902 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1903 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1904 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1905 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1906 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1907 env->btaken, env->btarget,
1908 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1909 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1910 (env->sregs[SR_MSR] & MSR_EIP),
1911 (env->sregs[SR_MSR] & MSR_IE));
1913 for (i = 0; i < 32; i++) {
1914 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1915 if ((i + 1) % 4 == 0)
1916 cpu_fprintf(f, "\n");
1918 cpu_fprintf(f, "\n\n");
1921 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1923 MicroBlazeCPU *cpu;
1925 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1927 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1929 return cpu;
1932 void mb_tcg_init(void)
1934 int i;
1936 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1938 env_debug = tcg_global_mem_new(TCG_AREG0,
1939 offsetof(CPUMBState, debug),
1940 "debug0");
1941 env_iflags = tcg_global_mem_new(TCG_AREG0,
1942 offsetof(CPUMBState, iflags),
1943 "iflags");
1944 env_imm = tcg_global_mem_new(TCG_AREG0,
1945 offsetof(CPUMBState, imm),
1946 "imm");
1947 env_btarget = tcg_global_mem_new(TCG_AREG0,
1948 offsetof(CPUMBState, btarget),
1949 "btarget");
1950 env_btaken = tcg_global_mem_new(TCG_AREG0,
1951 offsetof(CPUMBState, btaken),
1952 "btaken");
1953 env_res_addr = tcg_global_mem_new(TCG_AREG0,
1954 offsetof(CPUMBState, res_addr),
1955 "res_addr");
1956 env_res_val = tcg_global_mem_new(TCG_AREG0,
1957 offsetof(CPUMBState, res_val),
1958 "res_val");
1959 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1960 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1961 offsetof(CPUMBState, regs[i]),
1962 regnames[i]);
1964 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1965 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
1966 offsetof(CPUMBState, sregs[i]),
1967 special_regnames[i]);
1971 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
1973 env->sregs[SR_PC] = tcg_ctx.gen_opc_pc[pc_pos];