s390x: fix flat file load on 32 bit systems
[qemu/qmp-unstable.git] / target-microblaze / translate.c
blob9edcb67e6660594b0d75101837fb4b0508878bb4
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "helper.h"
25 #include "microblaze-decode.h"
27 #define GEN_HELPER 1
28 #include "helper.h"
30 #define SIM_COMPAT 0
31 #define DISAS_GNU 1
32 #define DISAS_MB 1
33 #if DISAS_MB && !SIM_COMPAT
34 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 #else
36 # define LOG_DIS(...) do { } while (0)
37 #endif
39 #define D(x)
41 #define EXTRACT_FIELD(src, start, end) \
42 (((src) >> start) & ((1 << (end - start + 1)) - 1))
44 static TCGv env_debug;
45 static TCGv_ptr cpu_env;
46 static TCGv cpu_R[32];
47 static TCGv cpu_SR[18];
48 static TCGv env_imm;
49 static TCGv env_btaken;
50 static TCGv env_btarget;
51 static TCGv env_iflags;
52 static TCGv env_res_addr;
53 static TCGv env_res_val;
55 #include "exec/gen-icount.h"
57 /* This is the state at translation time. */
58 typedef struct DisasContext {
59 CPUMBState *env;
60 target_ulong pc;
62 /* Decoder. */
63 int type_b;
64 uint32_t ir;
65 uint8_t opcode;
66 uint8_t rd, ra, rb;
67 uint16_t imm;
69 unsigned int cpustate_changed;
70 unsigned int delayed_branch;
71 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
72 unsigned int clear_imm;
73 int is_jmp;
75 #define JMP_NOJMP 0
76 #define JMP_DIRECT 1
77 #define JMP_DIRECT_CC 2
78 #define JMP_INDIRECT 3
79 unsigned int jmp;
80 uint32_t jmp_pc;
82 int abort_at_next_insn;
83 int nr_nops;
84 struct TranslationBlock *tb;
85 int singlestep_enabled;
86 } DisasContext;
88 static const char *regnames[] =
90 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
91 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
92 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
93 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
96 static const char *special_regnames[] =
98 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
99 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
100 "sr16", "sr17", "sr18"
103 /* Sign extend at translation time. */
104 static inline int sign_extend(unsigned int val, unsigned int width)
106 int sval;
108 /* LSL. */
109 val <<= 31 - width;
110 sval = val;
111 /* ASR. */
112 sval >>= 31 - width;
113 return sval;
116 static inline void t_sync_flags(DisasContext *dc)
118 /* Synch the tb dependent flags between translator and runtime. */
119 if (dc->tb_flags != dc->synced_flags) {
120 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
121 dc->synced_flags = dc->tb_flags;
125 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
127 TCGv_i32 tmp = tcg_const_i32(index);
129 t_sync_flags(dc);
130 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
131 gen_helper_raise_exception(cpu_env, tmp);
132 tcg_temp_free_i32(tmp);
133 dc->is_jmp = DISAS_UPDATE;
136 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
138 TranslationBlock *tb;
139 tb = dc->tb;
140 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
141 tcg_gen_goto_tb(n);
142 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
143 tcg_gen_exit_tb((uintptr_t)tb + n);
144 } else {
145 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
146 tcg_gen_exit_tb(0);
150 static void read_carry(DisasContext *dc, TCGv d)
152 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
156 * write_carry sets the carry bits in MSR based on bit 0 of v.
157 * v[31:1] are ignored.
159 static void write_carry(DisasContext *dc, TCGv v)
161 TCGv t0 = tcg_temp_new();
162 tcg_gen_shli_tl(t0, v, 31);
163 tcg_gen_sari_tl(t0, t0, 31);
164 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
165 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
166 ~(MSR_C | MSR_CC));
167 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
168 tcg_temp_free(t0);
171 static void write_carryi(DisasContext *dc, bool carry)
173 TCGv t0 = tcg_temp_new();
174 tcg_gen_movi_tl(t0, carry);
175 write_carry(dc, t0);
176 tcg_temp_free(t0);
179 /* True if ALU operand b is a small immediate that may deserve
180 faster treatment. */
181 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
183 /* Immediate insn without the imm prefix ? */
184 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
187 static inline TCGv *dec_alu_op_b(DisasContext *dc)
189 if (dc->type_b) {
190 if (dc->tb_flags & IMM_FLAG)
191 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
192 else
193 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
194 return &env_imm;
195 } else
196 return &cpu_R[dc->rb];
199 static void dec_add(DisasContext *dc)
201 unsigned int k, c;
202 TCGv cf;
204 k = dc->opcode & 4;
205 c = dc->opcode & 2;
207 LOG_DIS("add%s%s%s r%d r%d r%d\n",
208 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
209 dc->rd, dc->ra, dc->rb);
211 /* Take care of the easy cases first. */
212 if (k) {
213 /* k - keep carry, no need to update MSR. */
214 /* If rd == r0, it's a nop. */
215 if (dc->rd) {
216 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
218 if (c) {
219 /* c - Add carry into the result. */
220 cf = tcg_temp_new();
222 read_carry(dc, cf);
223 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
224 tcg_temp_free(cf);
227 return;
230 /* From now on, we can assume k is zero. So we need to update MSR. */
231 /* Extract carry. */
232 cf = tcg_temp_new();
233 if (c) {
234 read_carry(dc, cf);
235 } else {
236 tcg_gen_movi_tl(cf, 0);
239 if (dc->rd) {
240 TCGv ncf = tcg_temp_new();
241 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
243 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
244 write_carry(dc, ncf);
245 tcg_temp_free(ncf);
246 } else {
247 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
248 write_carry(dc, cf);
250 tcg_temp_free(cf);
253 static void dec_sub(DisasContext *dc)
255 unsigned int u, cmp, k, c;
256 TCGv cf, na;
258 u = dc->imm & 2;
259 k = dc->opcode & 4;
260 c = dc->opcode & 2;
261 cmp = (dc->imm & 1) && (!dc->type_b) && k;
263 if (cmp) {
264 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
265 if (dc->rd) {
266 if (u)
267 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
268 else
269 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
271 return;
274 LOG_DIS("sub%s%s r%d, r%d r%d\n",
275 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
277 /* Take care of the easy cases first. */
278 if (k) {
279 /* k - keep carry, no need to update MSR. */
280 /* If rd == r0, it's a nop. */
281 if (dc->rd) {
282 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
284 if (c) {
285 /* c - Add carry into the result. */
286 cf = tcg_temp_new();
288 read_carry(dc, cf);
289 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
290 tcg_temp_free(cf);
293 return;
296 /* From now on, we can assume k is zero. So we need to update MSR. */
297 /* Extract carry. And complement a into na. */
298 cf = tcg_temp_new();
299 na = tcg_temp_new();
300 if (c) {
301 read_carry(dc, cf);
302 } else {
303 tcg_gen_movi_tl(cf, 1);
306 /* d = b + ~a + c. carry defaults to 1. */
307 tcg_gen_not_tl(na, cpu_R[dc->ra]);
309 if (dc->rd) {
310 TCGv ncf = tcg_temp_new();
311 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
312 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
313 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
314 write_carry(dc, ncf);
315 tcg_temp_free(ncf);
316 } else {
317 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
318 write_carry(dc, cf);
320 tcg_temp_free(cf);
321 tcg_temp_free(na);
324 static void dec_pattern(DisasContext *dc)
326 unsigned int mode;
327 int l1;
329 if ((dc->tb_flags & MSR_EE_FLAG)
330 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
331 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
332 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
333 t_gen_raise_exception(dc, EXCP_HW_EXCP);
336 mode = dc->opcode & 3;
337 switch (mode) {
338 case 0:
339 /* pcmpbf. */
340 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
341 if (dc->rd)
342 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
343 break;
344 case 2:
345 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
346 if (dc->rd) {
347 TCGv t0 = tcg_temp_local_new();
348 l1 = gen_new_label();
349 tcg_gen_movi_tl(t0, 1);
350 tcg_gen_brcond_tl(TCG_COND_EQ,
351 cpu_R[dc->ra], cpu_R[dc->rb], l1);
352 tcg_gen_movi_tl(t0, 0);
353 gen_set_label(l1);
354 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
355 tcg_temp_free(t0);
357 break;
358 case 3:
359 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
360 l1 = gen_new_label();
361 if (dc->rd) {
362 TCGv t0 = tcg_temp_local_new();
363 tcg_gen_movi_tl(t0, 1);
364 tcg_gen_brcond_tl(TCG_COND_NE,
365 cpu_R[dc->ra], cpu_R[dc->rb], l1);
366 tcg_gen_movi_tl(t0, 0);
367 gen_set_label(l1);
368 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
369 tcg_temp_free(t0);
371 break;
372 default:
373 cpu_abort(dc->env,
374 "unsupported pattern insn opcode=%x\n", dc->opcode);
375 break;
379 static void dec_and(DisasContext *dc)
381 unsigned int not;
383 if (!dc->type_b && (dc->imm & (1 << 10))) {
384 dec_pattern(dc);
385 return;
388 not = dc->opcode & (1 << 1);
389 LOG_DIS("and%s\n", not ? "n" : "");
391 if (!dc->rd)
392 return;
394 if (not) {
395 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
396 } else
397 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
400 static void dec_or(DisasContext *dc)
402 if (!dc->type_b && (dc->imm & (1 << 10))) {
403 dec_pattern(dc);
404 return;
407 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
408 if (dc->rd)
409 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
412 static void dec_xor(DisasContext *dc)
414 if (!dc->type_b && (dc->imm & (1 << 10))) {
415 dec_pattern(dc);
416 return;
419 LOG_DIS("xor r%d\n", dc->rd);
420 if (dc->rd)
421 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
424 static inline void msr_read(DisasContext *dc, TCGv d)
426 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
429 static inline void msr_write(DisasContext *dc, TCGv v)
431 TCGv t;
433 t = tcg_temp_new();
434 dc->cpustate_changed = 1;
435 /* PVR bit is not writable. */
436 tcg_gen_andi_tl(t, v, ~MSR_PVR);
437 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
438 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
439 tcg_temp_free(t);
442 static void dec_msr(DisasContext *dc)
444 TCGv t0, t1;
445 unsigned int sr, to, rn;
446 int mem_index = cpu_mmu_index(dc->env);
448 sr = dc->imm & ((1 << 14) - 1);
449 to = dc->imm & (1 << 14);
450 dc->type_b = 1;
451 if (to)
452 dc->cpustate_changed = 1;
454 /* msrclr and msrset. */
455 if (!(dc->imm & (1 << 15))) {
456 unsigned int clr = dc->ir & (1 << 16);
458 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
459 dc->rd, dc->imm);
461 if (!(dc->env->pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
462 /* nop??? */
463 return;
466 if ((dc->tb_flags & MSR_EE_FLAG)
467 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
468 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
469 t_gen_raise_exception(dc, EXCP_HW_EXCP);
470 return;
473 if (dc->rd)
474 msr_read(dc, cpu_R[dc->rd]);
476 t0 = tcg_temp_new();
477 t1 = tcg_temp_new();
478 msr_read(dc, t0);
479 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
481 if (clr) {
482 tcg_gen_not_tl(t1, t1);
483 tcg_gen_and_tl(t0, t0, t1);
484 } else
485 tcg_gen_or_tl(t0, t0, t1);
486 msr_write(dc, t0);
487 tcg_temp_free(t0);
488 tcg_temp_free(t1);
489 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
490 dc->is_jmp = DISAS_UPDATE;
491 return;
494 if (to) {
495 if ((dc->tb_flags & MSR_EE_FLAG)
496 && mem_index == MMU_USER_IDX) {
497 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
498 t_gen_raise_exception(dc, EXCP_HW_EXCP);
499 return;
503 #if !defined(CONFIG_USER_ONLY)
504 /* Catch read/writes to the mmu block. */
505 if ((sr & ~0xff) == 0x1000) {
506 sr &= 7;
507 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
508 if (to)
509 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
510 else
511 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
512 return;
514 #endif
516 if (to) {
517 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
518 switch (sr) {
519 case 0:
520 break;
521 case 1:
522 msr_write(dc, cpu_R[dc->ra]);
523 break;
524 case 0x3:
525 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
526 break;
527 case 0x5:
528 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
529 break;
530 case 0x7:
531 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
532 break;
533 case 0x800:
534 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
535 break;
536 case 0x802:
537 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
538 break;
539 default:
540 cpu_abort(dc->env, "unknown mts reg %x\n", sr);
541 break;
543 } else {
544 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
546 switch (sr) {
547 case 0:
548 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
549 break;
550 case 1:
551 msr_read(dc, cpu_R[dc->rd]);
552 break;
553 case 0x3:
554 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
555 break;
556 case 0x5:
557 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
558 break;
559 case 0x7:
560 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
561 break;
562 case 0xb:
563 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
564 break;
565 case 0x800:
566 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
567 break;
568 case 0x802:
569 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
570 break;
571 case 0x2000:
572 case 0x2001:
573 case 0x2002:
574 case 0x2003:
575 case 0x2004:
576 case 0x2005:
577 case 0x2006:
578 case 0x2007:
579 case 0x2008:
580 case 0x2009:
581 case 0x200a:
582 case 0x200b:
583 case 0x200c:
584 rn = sr & 0xf;
585 tcg_gen_ld_tl(cpu_R[dc->rd],
586 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
587 break;
588 default:
589 cpu_abort(dc->env, "unknown mfs reg %x\n", sr);
590 break;
594 if (dc->rd == 0) {
595 tcg_gen_movi_tl(cpu_R[0], 0);
599 /* 64-bit signed mul, lower result in d and upper in d2. */
600 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
602 TCGv_i64 t0, t1;
604 t0 = tcg_temp_new_i64();
605 t1 = tcg_temp_new_i64();
607 tcg_gen_ext_i32_i64(t0, a);
608 tcg_gen_ext_i32_i64(t1, b);
609 tcg_gen_mul_i64(t0, t0, t1);
611 tcg_gen_trunc_i64_i32(d, t0);
612 tcg_gen_shri_i64(t0, t0, 32);
613 tcg_gen_trunc_i64_i32(d2, t0);
615 tcg_temp_free_i64(t0);
616 tcg_temp_free_i64(t1);
619 /* 64-bit unsigned muls, lower result in d and upper in d2. */
620 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
622 TCGv_i64 t0, t1;
624 t0 = tcg_temp_new_i64();
625 t1 = tcg_temp_new_i64();
627 tcg_gen_extu_i32_i64(t0, a);
628 tcg_gen_extu_i32_i64(t1, b);
629 tcg_gen_mul_i64(t0, t0, t1);
631 tcg_gen_trunc_i64_i32(d, t0);
632 tcg_gen_shri_i64(t0, t0, 32);
633 tcg_gen_trunc_i64_i32(d2, t0);
635 tcg_temp_free_i64(t0);
636 tcg_temp_free_i64(t1);
639 /* Multiplier unit. */
640 static void dec_mul(DisasContext *dc)
642 TCGv d[2];
643 unsigned int subcode;
645 if ((dc->tb_flags & MSR_EE_FLAG)
646 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
647 && !(dc->env->pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
648 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
649 t_gen_raise_exception(dc, EXCP_HW_EXCP);
650 return;
653 subcode = dc->imm & 3;
654 d[0] = tcg_temp_new();
655 d[1] = tcg_temp_new();
657 if (dc->type_b) {
658 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
659 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
660 goto done;
663 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
664 if (subcode >= 1 && subcode <= 3
665 && !((dc->env->pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
666 /* nop??? */
669 switch (subcode) {
670 case 0:
671 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
672 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
673 break;
674 case 1:
675 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
676 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
677 break;
678 case 2:
679 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
680 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
681 break;
682 case 3:
683 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
684 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
685 break;
686 default:
687 cpu_abort(dc->env, "unknown MUL insn %x\n", subcode);
688 break;
690 done:
691 tcg_temp_free(d[0]);
692 tcg_temp_free(d[1]);
695 /* Div unit. */
696 static void dec_div(DisasContext *dc)
698 unsigned int u;
700 u = dc->imm & 2;
701 LOG_DIS("div\n");
703 if ((dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
704 && !((dc->env->pvr.regs[0] & PVR0_USE_DIV_MASK))) {
705 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
706 t_gen_raise_exception(dc, EXCP_HW_EXCP);
709 if (u)
710 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
711 cpu_R[dc->ra]);
712 else
713 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
714 cpu_R[dc->ra]);
715 if (!dc->rd)
716 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
719 static void dec_barrel(DisasContext *dc)
721 TCGv t0;
722 unsigned int s, t;
724 if ((dc->tb_flags & MSR_EE_FLAG)
725 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
726 && !(dc->env->pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
727 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
728 t_gen_raise_exception(dc, EXCP_HW_EXCP);
729 return;
732 s = dc->imm & (1 << 10);
733 t = dc->imm & (1 << 9);
735 LOG_DIS("bs%s%s r%d r%d r%d\n",
736 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
738 t0 = tcg_temp_new();
740 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
741 tcg_gen_andi_tl(t0, t0, 31);
743 if (s)
744 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
745 else {
746 if (t)
747 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
748 else
749 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
753 static void dec_bit(DisasContext *dc)
755 TCGv t0;
756 unsigned int op;
757 int mem_index = cpu_mmu_index(dc->env);
759 op = dc->ir & ((1 << 9) - 1);
760 switch (op) {
761 case 0x21:
762 /* src. */
763 t0 = tcg_temp_new();
765 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
766 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
767 write_carry(dc, cpu_R[dc->ra]);
768 if (dc->rd) {
769 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
770 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
772 tcg_temp_free(t0);
773 break;
775 case 0x1:
776 case 0x41:
777 /* srl. */
778 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
780 /* Update carry. Note that write carry only looks at the LSB. */
781 write_carry(dc, cpu_R[dc->ra]);
782 if (dc->rd) {
783 if (op == 0x41)
784 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
785 else
786 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
788 break;
789 case 0x60:
790 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
791 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
792 break;
793 case 0x61:
794 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
795 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
796 break;
797 case 0x64:
798 case 0x66:
799 case 0x74:
800 case 0x76:
801 /* wdc. */
802 LOG_DIS("wdc r%d\n", dc->ra);
803 if ((dc->tb_flags & MSR_EE_FLAG)
804 && mem_index == MMU_USER_IDX) {
805 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
806 t_gen_raise_exception(dc, EXCP_HW_EXCP);
807 return;
809 break;
810 case 0x68:
811 /* wic. */
812 LOG_DIS("wic r%d\n", dc->ra);
813 if ((dc->tb_flags & MSR_EE_FLAG)
814 && mem_index == MMU_USER_IDX) {
815 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
816 t_gen_raise_exception(dc, EXCP_HW_EXCP);
817 return;
819 break;
820 case 0xe0:
821 if ((dc->tb_flags & MSR_EE_FLAG)
822 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
823 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
824 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
825 t_gen_raise_exception(dc, EXCP_HW_EXCP);
827 if (dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
828 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
830 break;
831 case 0x1e0:
832 /* swapb */
833 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
834 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
835 break;
836 case 0x1e2:
837 /*swaph */
838 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
839 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
840 break;
841 default:
842 cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
843 dc->pc, op, dc->rd, dc->ra, dc->rb);
844 break;
848 static inline void sync_jmpstate(DisasContext *dc)
850 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
851 if (dc->jmp == JMP_DIRECT) {
852 tcg_gen_movi_tl(env_btaken, 1);
854 dc->jmp = JMP_INDIRECT;
855 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
859 static void dec_imm(DisasContext *dc)
861 LOG_DIS("imm %x\n", dc->imm << 16);
862 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
863 dc->tb_flags |= IMM_FLAG;
864 dc->clear_imm = 0;
867 static inline void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
868 unsigned int size, bool exclusive)
870 int mem_index = cpu_mmu_index(dc->env);
872 if (size == 1) {
873 tcg_gen_qemu_ld8u(dst, addr, mem_index);
874 } else if (size == 2) {
875 tcg_gen_qemu_ld16u(dst, addr, mem_index);
876 } else if (size == 4) {
877 tcg_gen_qemu_ld32u(dst, addr, mem_index);
878 } else
879 cpu_abort(dc->env, "Incorrect load size %d\n", size);
881 if (exclusive) {
882 tcg_gen_mov_tl(env_res_addr, addr);
883 tcg_gen_mov_tl(env_res_val, dst);
887 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
889 unsigned int extimm = dc->tb_flags & IMM_FLAG;
890 /* Should be set to one if r1 is used by loadstores. */
891 int stackprot = 0;
893 /* All load/stores use ra. */
894 if (dc->ra == 1) {
895 stackprot = 1;
898 /* Treat the common cases first. */
899 if (!dc->type_b) {
900 /* If any of the regs is r0, return a ptr to the other. */
901 if (dc->ra == 0) {
902 return &cpu_R[dc->rb];
903 } else if (dc->rb == 0) {
904 return &cpu_R[dc->ra];
907 if (dc->rb == 1) {
908 stackprot = 1;
911 *t = tcg_temp_new();
912 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
914 if (stackprot) {
915 gen_helper_stackprot(cpu_env, *t);
917 return t;
919 /* Immediate. */
920 if (!extimm) {
921 if (dc->imm == 0) {
922 return &cpu_R[dc->ra];
924 *t = tcg_temp_new();
925 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
926 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
927 } else {
928 *t = tcg_temp_new();
929 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
932 if (stackprot) {
933 gen_helper_stackprot(cpu_env, *t);
935 return t;
938 static inline void dec_byteswap(DisasContext *dc, TCGv dst, TCGv src, int size)
940 if (size == 4) {
941 tcg_gen_bswap32_tl(dst, src);
942 } else if (size == 2) {
943 TCGv t = tcg_temp_new();
945 /* bswap16 assumes the high bits are zero. */
946 tcg_gen_andi_tl(t, src, 0xffff);
947 tcg_gen_bswap16_tl(dst, t);
948 tcg_temp_free(t);
949 } else {
950 /* Ignore.
951 cpu_abort(dc->env, "Invalid ldst byteswap size %d\n", size);
956 static void dec_load(DisasContext *dc)
958 TCGv t, *addr;
959 unsigned int size, rev = 0, ex = 0;
961 size = 1 << (dc->opcode & 3);
963 if (!dc->type_b) {
964 rev = (dc->ir >> 9) & 1;
965 ex = (dc->ir >> 10) & 1;
968 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
969 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
970 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
971 t_gen_raise_exception(dc, EXCP_HW_EXCP);
972 return;
975 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
976 ex ? "x" : "");
978 t_sync_flags(dc);
979 addr = compute_ldst_addr(dc, &t);
982 * When doing reverse accesses we need to do two things.
984 * 1. Reverse the address wrt endianness.
985 * 2. Byteswap the data lanes on the way back into the CPU core.
987 if (rev && size != 4) {
988 /* Endian reverse the address. t is addr. */
989 switch (size) {
990 case 1:
992 /* 00 -> 11
993 01 -> 10
994 10 -> 10
995 11 -> 00 */
996 TCGv low = tcg_temp_new();
998 /* Force addr into the temp. */
999 if (addr != &t) {
1000 t = tcg_temp_new();
1001 tcg_gen_mov_tl(t, *addr);
1002 addr = &t;
1005 tcg_gen_andi_tl(low, t, 3);
1006 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1007 tcg_gen_andi_tl(t, t, ~3);
1008 tcg_gen_or_tl(t, t, low);
1009 tcg_gen_mov_tl(env_imm, t);
1010 tcg_temp_free(low);
1011 break;
1014 case 2:
1015 /* 00 -> 10
1016 10 -> 00. */
1017 /* Force addr into the temp. */
1018 if (addr != &t) {
1019 t = tcg_temp_new();
1020 tcg_gen_xori_tl(t, *addr, 2);
1021 addr = &t;
1022 } else {
1023 tcg_gen_xori_tl(t, t, 2);
1025 break;
1026 default:
1027 cpu_abort(dc->env, "Invalid reverse size\n");
1028 break;
1032 /* lwx does not throw unaligned access errors, so force alignment */
1033 if (ex) {
1034 /* Force addr into the temp. */
1035 if (addr != &t) {
1036 t = tcg_temp_new();
1037 tcg_gen_mov_tl(t, *addr);
1038 addr = &t;
1040 tcg_gen_andi_tl(t, t, ~3);
1043 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1044 sync_jmpstate(dc);
1046 /* Verify alignment if needed. */
1047 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1048 TCGv v = tcg_temp_new();
1051 * Microblaze gives MMU faults priority over faults due to
1052 * unaligned addresses. That's why we speculatively do the load
1053 * into v. If the load succeeds, we verify alignment of the
1054 * address and if that succeeds we write into the destination reg.
1056 gen_load(dc, v, *addr, size, ex);
1058 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1059 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1060 tcg_const_tl(0), tcg_const_tl(size - 1));
1061 if (dc->rd) {
1062 if (rev) {
1063 dec_byteswap(dc, cpu_R[dc->rd], v, size);
1064 } else {
1065 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1068 tcg_temp_free(v);
1069 } else {
1070 if (dc->rd) {
1071 gen_load(dc, cpu_R[dc->rd], *addr, size, ex);
1072 if (rev) {
1073 dec_byteswap(dc, cpu_R[dc->rd], cpu_R[dc->rd], size);
1075 } else {
1076 /* We are loading into r0, no need to reverse. */
1077 gen_load(dc, env_imm, *addr, size, ex);
1081 if (ex) { /* lwx */
1082 /* no support for for AXI exclusive so always clear C */
1083 write_carryi(dc, 0);
1086 if (addr == &t)
1087 tcg_temp_free(t);
1090 static void gen_store(DisasContext *dc, TCGv addr, TCGv val,
1091 unsigned int size)
1093 int mem_index = cpu_mmu_index(dc->env);
1095 if (size == 1)
1096 tcg_gen_qemu_st8(val, addr, mem_index);
1097 else if (size == 2) {
1098 tcg_gen_qemu_st16(val, addr, mem_index);
1099 } else if (size == 4) {
1100 tcg_gen_qemu_st32(val, addr, mem_index);
1101 } else
1102 cpu_abort(dc->env, "Incorrect store size %d\n", size);
1105 static void dec_store(DisasContext *dc)
1107 TCGv t, *addr, swx_addr;
1108 int swx_skip = 0;
1109 unsigned int size, rev = 0, ex = 0;
1111 size = 1 << (dc->opcode & 3);
1112 if (!dc->type_b) {
1113 rev = (dc->ir >> 9) & 1;
1114 ex = (dc->ir >> 10) & 1;
1117 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1118 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1119 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1120 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1121 return;
1124 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1125 ex ? "x" : "");
1126 t_sync_flags(dc);
1127 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1128 sync_jmpstate(dc);
1129 addr = compute_ldst_addr(dc, &t);
1131 swx_addr = tcg_temp_local_new();
1132 if (ex) { /* swx */
1133 TCGv tval;
1135 /* Force addr into the swx_addr. */
1136 tcg_gen_mov_tl(swx_addr, *addr);
1137 addr = &swx_addr;
1138 /* swx does not throw unaligned access errors, so force alignment */
1139 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1141 write_carryi(dc, 1);
1142 swx_skip = gen_new_label();
1143 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1145 /* Compare the value loaded at lwx with current contents of
1146 the reserved location.
1147 FIXME: This only works for system emulation where we can expect
1148 this compare and the following write to be atomic. For user
1149 emulation we need to add atomicity between threads. */
1150 tval = tcg_temp_new();
1151 gen_load(dc, tval, swx_addr, 4, false);
1152 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1153 write_carryi(dc, 0);
1154 tcg_temp_free(tval);
1157 if (rev && size != 4) {
1158 /* Endian reverse the address. t is addr. */
1159 switch (size) {
1160 case 1:
1162 /* 00 -> 11
1163 01 -> 10
1164 10 -> 10
1165 11 -> 00 */
1166 TCGv low = tcg_temp_new();
1168 /* Force addr into the temp. */
1169 if (addr != &t) {
1170 t = tcg_temp_new();
1171 tcg_gen_mov_tl(t, *addr);
1172 addr = &t;
1175 tcg_gen_andi_tl(low, t, 3);
1176 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1177 tcg_gen_andi_tl(t, t, ~3);
1178 tcg_gen_or_tl(t, t, low);
1179 tcg_gen_mov_tl(env_imm, t);
1180 tcg_temp_free(low);
1181 break;
1184 case 2:
1185 /* 00 -> 10
1186 10 -> 00. */
1187 /* Force addr into the temp. */
1188 if (addr != &t) {
1189 t = tcg_temp_new();
1190 tcg_gen_xori_tl(t, *addr, 2);
1191 addr = &t;
1192 } else {
1193 tcg_gen_xori_tl(t, t, 2);
1195 break;
1196 default:
1197 cpu_abort(dc->env, "Invalid reverse size\n");
1198 break;
1201 if (size != 1) {
1202 TCGv bs_data = tcg_temp_new();
1203 dec_byteswap(dc, bs_data, cpu_R[dc->rd], size);
1204 gen_store(dc, *addr, bs_data, size);
1205 tcg_temp_free(bs_data);
1206 } else {
1207 gen_store(dc, *addr, cpu_R[dc->rd], size);
1209 } else {
1210 if (rev) {
1211 TCGv bs_data = tcg_temp_new();
1212 dec_byteswap(dc, bs_data, cpu_R[dc->rd], size);
1213 gen_store(dc, *addr, bs_data, size);
1214 tcg_temp_free(bs_data);
1215 } else {
1216 gen_store(dc, *addr, cpu_R[dc->rd], size);
1220 /* Verify alignment if needed. */
1221 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1222 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1223 /* FIXME: if the alignment is wrong, we should restore the value
1224 * in memory. One possible way to achieve this is to probe
1225 * the MMU prior to the memaccess, thay way we could put
1226 * the alignment checks in between the probe and the mem
1227 * access.
1229 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1230 tcg_const_tl(1), tcg_const_tl(size - 1));
1233 if (ex) {
1234 gen_set_label(swx_skip);
1236 tcg_temp_free(swx_addr);
1238 if (addr == &t)
1239 tcg_temp_free(t);
1242 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1243 TCGv d, TCGv a, TCGv b)
1245 switch (cc) {
1246 case CC_EQ:
1247 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1248 break;
1249 case CC_NE:
1250 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1251 break;
1252 case CC_LT:
1253 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1254 break;
1255 case CC_LE:
1256 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1257 break;
1258 case CC_GE:
1259 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1260 break;
1261 case CC_GT:
1262 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1263 break;
1264 default:
1265 cpu_abort(dc->env, "Unknown condition code %x.\n", cc);
1266 break;
1270 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1272 int l1;
1274 l1 = gen_new_label();
1275 /* Conditional jmp. */
1276 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1277 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1278 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1279 gen_set_label(l1);
1282 static void dec_bcc(DisasContext *dc)
1284 unsigned int cc;
1285 unsigned int dslot;
1287 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1288 dslot = dc->ir & (1 << 25);
1289 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1291 dc->delayed_branch = 1;
1292 if (dslot) {
1293 dc->delayed_branch = 2;
1294 dc->tb_flags |= D_FLAG;
1295 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1296 cpu_env, offsetof(CPUMBState, bimm));
1299 if (dec_alu_op_b_is_small_imm(dc)) {
1300 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1302 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1303 dc->jmp = JMP_DIRECT_CC;
1304 dc->jmp_pc = dc->pc + offset;
1305 } else {
1306 dc->jmp = JMP_INDIRECT;
1307 tcg_gen_movi_tl(env_btarget, dc->pc);
1308 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1310 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1313 static void dec_br(DisasContext *dc)
1315 unsigned int dslot, link, abs, mbar;
1316 int mem_index = cpu_mmu_index(dc->env);
1318 dslot = dc->ir & (1 << 20);
1319 abs = dc->ir & (1 << 19);
1320 link = dc->ir & (1 << 18);
1322 /* Memory barrier. */
1323 mbar = (dc->ir >> 16) & 31;
1324 if (mbar == 2 && dc->imm == 4) {
1325 /* mbar IMM & 16 decodes to sleep. */
1326 if (dc->rd & 16) {
1327 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1328 TCGv_i32 tmp_1 = tcg_const_i32(1);
1330 LOG_DIS("sleep\n");
1332 t_sync_flags(dc);
1333 tcg_gen_st_i32(tmp_1, cpu_env,
1334 -offsetof(MicroBlazeCPU, env)
1335 +offsetof(CPUState, halted));
1336 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1337 gen_helper_raise_exception(cpu_env, tmp_hlt);
1338 tcg_temp_free_i32(tmp_hlt);
1339 tcg_temp_free_i32(tmp_1);
1340 return;
1342 LOG_DIS("mbar %d\n", dc->rd);
1343 /* Break the TB. */
1344 dc->cpustate_changed = 1;
1345 return;
1348 LOG_DIS("br%s%s%s%s imm=%x\n",
1349 abs ? "a" : "", link ? "l" : "",
1350 dc->type_b ? "i" : "", dslot ? "d" : "",
1351 dc->imm);
1353 dc->delayed_branch = 1;
1354 if (dslot) {
1355 dc->delayed_branch = 2;
1356 dc->tb_flags |= D_FLAG;
1357 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1358 cpu_env, offsetof(CPUMBState, bimm));
1360 if (link && dc->rd)
1361 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1363 dc->jmp = JMP_INDIRECT;
1364 if (abs) {
1365 tcg_gen_movi_tl(env_btaken, 1);
1366 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1367 if (link && !dslot) {
1368 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1369 t_gen_raise_exception(dc, EXCP_BREAK);
1370 if (dc->imm == 0) {
1371 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1372 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1373 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1374 return;
1377 t_gen_raise_exception(dc, EXCP_DEBUG);
1380 } else {
1381 if (dec_alu_op_b_is_small_imm(dc)) {
1382 dc->jmp = JMP_DIRECT;
1383 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1384 } else {
1385 tcg_gen_movi_tl(env_btaken, 1);
1386 tcg_gen_movi_tl(env_btarget, dc->pc);
1387 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1392 static inline void do_rti(DisasContext *dc)
1394 TCGv t0, t1;
1395 t0 = tcg_temp_new();
1396 t1 = tcg_temp_new();
1397 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1398 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1399 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1401 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1402 tcg_gen_or_tl(t1, t1, t0);
1403 msr_write(dc, t1);
1404 tcg_temp_free(t1);
1405 tcg_temp_free(t0);
1406 dc->tb_flags &= ~DRTI_FLAG;
1409 static inline void do_rtb(DisasContext *dc)
1411 TCGv t0, t1;
1412 t0 = tcg_temp_new();
1413 t1 = tcg_temp_new();
1414 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1415 tcg_gen_shri_tl(t0, t1, 1);
1416 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1418 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1419 tcg_gen_or_tl(t1, t1, t0);
1420 msr_write(dc, t1);
1421 tcg_temp_free(t1);
1422 tcg_temp_free(t0);
1423 dc->tb_flags &= ~DRTB_FLAG;
1426 static inline void do_rte(DisasContext *dc)
1428 TCGv t0, t1;
1429 t0 = tcg_temp_new();
1430 t1 = tcg_temp_new();
1432 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1433 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1434 tcg_gen_shri_tl(t0, t1, 1);
1435 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1437 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1438 tcg_gen_or_tl(t1, t1, t0);
1439 msr_write(dc, t1);
1440 tcg_temp_free(t1);
1441 tcg_temp_free(t0);
1442 dc->tb_flags &= ~DRTE_FLAG;
1445 static void dec_rts(DisasContext *dc)
1447 unsigned int b_bit, i_bit, e_bit;
1448 int mem_index = cpu_mmu_index(dc->env);
1450 i_bit = dc->ir & (1 << 21);
1451 b_bit = dc->ir & (1 << 22);
1452 e_bit = dc->ir & (1 << 23);
1454 dc->delayed_branch = 2;
1455 dc->tb_flags |= D_FLAG;
1456 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1457 cpu_env, offsetof(CPUMBState, bimm));
1459 if (i_bit) {
1460 LOG_DIS("rtid ir=%x\n", dc->ir);
1461 if ((dc->tb_flags & MSR_EE_FLAG)
1462 && mem_index == MMU_USER_IDX) {
1463 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1464 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1466 dc->tb_flags |= DRTI_FLAG;
1467 } else if (b_bit) {
1468 LOG_DIS("rtbd ir=%x\n", dc->ir);
1469 if ((dc->tb_flags & MSR_EE_FLAG)
1470 && mem_index == MMU_USER_IDX) {
1471 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1472 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1474 dc->tb_flags |= DRTB_FLAG;
1475 } else if (e_bit) {
1476 LOG_DIS("rted ir=%x\n", dc->ir);
1477 if ((dc->tb_flags & MSR_EE_FLAG)
1478 && mem_index == MMU_USER_IDX) {
1479 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1480 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1482 dc->tb_flags |= DRTE_FLAG;
1483 } else
1484 LOG_DIS("rts ir=%x\n", dc->ir);
1486 dc->jmp = JMP_INDIRECT;
1487 tcg_gen_movi_tl(env_btaken, 1);
1488 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1491 static int dec_check_fpuv2(DisasContext *dc)
1493 int r;
1495 r = dc->env->pvr.regs[2] & PVR2_USE_FPU2_MASK;
1497 if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
1498 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1499 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1501 return r;
1504 static void dec_fpu(DisasContext *dc)
1506 unsigned int fpu_insn;
1508 if ((dc->tb_flags & MSR_EE_FLAG)
1509 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1510 && !((dc->env->pvr.regs[2] & PVR2_USE_FPU_MASK))) {
1511 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1512 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1513 return;
1516 fpu_insn = (dc->ir >> 7) & 7;
1518 switch (fpu_insn) {
1519 case 0:
1520 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1521 cpu_R[dc->rb]);
1522 break;
1524 case 1:
1525 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1526 cpu_R[dc->rb]);
1527 break;
1529 case 2:
1530 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1531 cpu_R[dc->rb]);
1532 break;
1534 case 3:
1535 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1536 cpu_R[dc->rb]);
1537 break;
1539 case 4:
1540 switch ((dc->ir >> 4) & 7) {
1541 case 0:
1542 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1543 cpu_R[dc->ra], cpu_R[dc->rb]);
1544 break;
1545 case 1:
1546 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1547 cpu_R[dc->ra], cpu_R[dc->rb]);
1548 break;
1549 case 2:
1550 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1551 cpu_R[dc->ra], cpu_R[dc->rb]);
1552 break;
1553 case 3:
1554 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1555 cpu_R[dc->ra], cpu_R[dc->rb]);
1556 break;
1557 case 4:
1558 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1559 cpu_R[dc->ra], cpu_R[dc->rb]);
1560 break;
1561 case 5:
1562 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1563 cpu_R[dc->ra], cpu_R[dc->rb]);
1564 break;
1565 case 6:
1566 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1567 cpu_R[dc->ra], cpu_R[dc->rb]);
1568 break;
1569 default:
1570 qemu_log_mask(LOG_UNIMP,
1571 "unimplemented fcmp fpu_insn=%x pc=%x"
1572 " opc=%x\n",
1573 fpu_insn, dc->pc, dc->opcode);
1574 dc->abort_at_next_insn = 1;
1575 break;
1577 break;
1579 case 5:
1580 if (!dec_check_fpuv2(dc)) {
1581 return;
1583 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1584 break;
1586 case 6:
1587 if (!dec_check_fpuv2(dc)) {
1588 return;
1590 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1591 break;
1593 case 7:
1594 if (!dec_check_fpuv2(dc)) {
1595 return;
1597 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1598 break;
1600 default:
1601 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1602 " opc=%x\n",
1603 fpu_insn, dc->pc, dc->opcode);
1604 dc->abort_at_next_insn = 1;
1605 break;
1609 static void dec_null(DisasContext *dc)
1611 if ((dc->tb_flags & MSR_EE_FLAG)
1612 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1613 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1614 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1615 return;
1617 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1618 dc->abort_at_next_insn = 1;
1621 /* Insns connected to FSL or AXI stream attached devices. */
1622 static void dec_stream(DisasContext *dc)
1624 int mem_index = cpu_mmu_index(dc->env);
1625 TCGv_i32 t_id, t_ctrl;
1626 int ctrl;
1628 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1629 dc->type_b ? "" : "d", dc->imm);
1631 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1632 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1633 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1634 return;
1637 t_id = tcg_temp_new();
1638 if (dc->type_b) {
1639 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1640 ctrl = dc->imm >> 10;
1641 } else {
1642 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1643 ctrl = dc->imm >> 5;
1646 t_ctrl = tcg_const_tl(ctrl);
1648 if (dc->rd == 0) {
1649 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1650 } else {
1651 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1653 tcg_temp_free(t_id);
1654 tcg_temp_free(t_ctrl);
1657 static struct decoder_info {
1658 struct {
1659 uint32_t bits;
1660 uint32_t mask;
1662 void (*dec)(DisasContext *dc);
1663 } decinfo[] = {
1664 {DEC_ADD, dec_add},
1665 {DEC_SUB, dec_sub},
1666 {DEC_AND, dec_and},
1667 {DEC_XOR, dec_xor},
1668 {DEC_OR, dec_or},
1669 {DEC_BIT, dec_bit},
1670 {DEC_BARREL, dec_barrel},
1671 {DEC_LD, dec_load},
1672 {DEC_ST, dec_store},
1673 {DEC_IMM, dec_imm},
1674 {DEC_BR, dec_br},
1675 {DEC_BCC, dec_bcc},
1676 {DEC_RTS, dec_rts},
1677 {DEC_FPU, dec_fpu},
1678 {DEC_MUL, dec_mul},
1679 {DEC_DIV, dec_div},
1680 {DEC_MSR, dec_msr},
1681 {DEC_STREAM, dec_stream},
1682 {{0, 0}, dec_null}
1685 static inline void decode(DisasContext *dc, uint32_t ir)
1687 int i;
1689 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1690 tcg_gen_debug_insn_start(dc->pc);
1693 dc->ir = ir;
1694 LOG_DIS("%8.8x\t", dc->ir);
1696 if (dc->ir)
1697 dc->nr_nops = 0;
1698 else {
1699 if ((dc->tb_flags & MSR_EE_FLAG)
1700 && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1701 && (dc->env->pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1702 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1703 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1704 return;
1707 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1708 dc->nr_nops++;
1709 if (dc->nr_nops > 4)
1710 cpu_abort(dc->env, "fetching nop sequence\n");
1712 /* bit 2 seems to indicate insn type. */
1713 dc->type_b = ir & (1 << 29);
1715 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1716 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1717 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1718 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1719 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1721 /* Large switch for all insns. */
1722 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1723 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1724 decinfo[i].dec(dc);
1725 break;
1730 static void check_breakpoint(CPUMBState *env, DisasContext *dc)
1732 CPUBreakpoint *bp;
1734 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1735 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1736 if (bp->pc == dc->pc) {
1737 t_gen_raise_exception(dc, EXCP_DEBUG);
1738 dc->is_jmp = DISAS_UPDATE;
1744 /* generate intermediate code for basic block 'tb'. */
1745 static inline void
1746 gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
1747 bool search_pc)
1749 CPUState *cs = CPU(cpu);
1750 CPUMBState *env = &cpu->env;
1751 uint16_t *gen_opc_end;
1752 uint32_t pc_start;
1753 int j, lj;
1754 struct DisasContext ctx;
1755 struct DisasContext *dc = &ctx;
1756 uint32_t next_page_start, org_flags;
1757 target_ulong npc;
1758 int num_insns;
1759 int max_insns;
1761 pc_start = tb->pc;
1762 dc->env = env;
1763 dc->tb = tb;
1764 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1766 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1768 dc->is_jmp = DISAS_NEXT;
1769 dc->jmp = 0;
1770 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1771 if (dc->delayed_branch) {
1772 dc->jmp = JMP_INDIRECT;
1774 dc->pc = pc_start;
1775 dc->singlestep_enabled = cs->singlestep_enabled;
1776 dc->cpustate_changed = 0;
1777 dc->abort_at_next_insn = 0;
1778 dc->nr_nops = 0;
1780 if (pc_start & 3)
1781 cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start);
1783 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1784 #if !SIM_COMPAT
1785 qemu_log("--------------\n");
1786 log_cpu_state(CPU(cpu), 0);
1787 #endif
1790 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1791 lj = -1;
1792 num_insns = 0;
1793 max_insns = tb->cflags & CF_COUNT_MASK;
1794 if (max_insns == 0)
1795 max_insns = CF_COUNT_MASK;
1797 gen_tb_start();
1800 #if SIM_COMPAT
1801 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1802 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1803 gen_helper_debug();
1805 #endif
1806 check_breakpoint(env, dc);
1808 if (search_pc) {
1809 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1810 if (lj < j) {
1811 lj++;
1812 while (lj < j)
1813 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1815 tcg_ctx.gen_opc_pc[lj] = dc->pc;
1816 tcg_ctx.gen_opc_instr_start[lj] = 1;
1817 tcg_ctx.gen_opc_icount[lj] = num_insns;
1820 /* Pretty disas. */
1821 LOG_DIS("%8.8x:\t", dc->pc);
1823 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1824 gen_io_start();
1826 dc->clear_imm = 1;
1827 decode(dc, cpu_ldl_code(env, dc->pc));
1828 if (dc->clear_imm)
1829 dc->tb_flags &= ~IMM_FLAG;
1830 dc->pc += 4;
1831 num_insns++;
1833 if (dc->delayed_branch) {
1834 dc->delayed_branch--;
1835 if (!dc->delayed_branch) {
1836 if (dc->tb_flags & DRTI_FLAG)
1837 do_rti(dc);
1838 if (dc->tb_flags & DRTB_FLAG)
1839 do_rtb(dc);
1840 if (dc->tb_flags & DRTE_FLAG)
1841 do_rte(dc);
1842 /* Clear the delay slot flag. */
1843 dc->tb_flags &= ~D_FLAG;
1844 /* If it is a direct jump, try direct chaining. */
1845 if (dc->jmp == JMP_INDIRECT) {
1846 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1847 dc->is_jmp = DISAS_JUMP;
1848 } else if (dc->jmp == JMP_DIRECT) {
1849 t_sync_flags(dc);
1850 gen_goto_tb(dc, 0, dc->jmp_pc);
1851 dc->is_jmp = DISAS_TB_JUMP;
1852 } else if (dc->jmp == JMP_DIRECT_CC) {
1853 int l1;
1855 t_sync_flags(dc);
1856 l1 = gen_new_label();
1857 /* Conditional jmp. */
1858 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1859 gen_goto_tb(dc, 1, dc->pc);
1860 gen_set_label(l1);
1861 gen_goto_tb(dc, 0, dc->jmp_pc);
1863 dc->is_jmp = DISAS_TB_JUMP;
1865 break;
1868 if (cs->singlestep_enabled) {
1869 break;
1871 } while (!dc->is_jmp && !dc->cpustate_changed
1872 && tcg_ctx.gen_opc_ptr < gen_opc_end
1873 && !singlestep
1874 && (dc->pc < next_page_start)
1875 && num_insns < max_insns);
1877 npc = dc->pc;
1878 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1879 if (dc->tb_flags & D_FLAG) {
1880 dc->is_jmp = DISAS_UPDATE;
1881 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1882 sync_jmpstate(dc);
1883 } else
1884 npc = dc->jmp_pc;
1887 if (tb->cflags & CF_LAST_IO)
1888 gen_io_end();
1889 /* Force an update if the per-tb cpu state has changed. */
1890 if (dc->is_jmp == DISAS_NEXT
1891 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1892 dc->is_jmp = DISAS_UPDATE;
1893 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1895 t_sync_flags(dc);
1897 if (unlikely(cs->singlestep_enabled)) {
1898 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1900 if (dc->is_jmp != DISAS_JUMP) {
1901 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1903 gen_helper_raise_exception(cpu_env, tmp);
1904 tcg_temp_free_i32(tmp);
1905 } else {
1906 switch(dc->is_jmp) {
1907 case DISAS_NEXT:
1908 gen_goto_tb(dc, 1, npc);
1909 break;
1910 default:
1911 case DISAS_JUMP:
1912 case DISAS_UPDATE:
1913 /* indicate that the hash table must be used
1914 to find the next TB */
1915 tcg_gen_exit_tb(0);
1916 break;
1917 case DISAS_TB_JUMP:
1918 /* nothing more to generate */
1919 break;
1922 gen_tb_end(tb, num_insns);
1923 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1924 if (search_pc) {
1925 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1926 lj++;
1927 while (lj <= j)
1928 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1929 } else {
1930 tb->size = dc->pc - pc_start;
1931 tb->icount = num_insns;
1934 #ifdef DEBUG_DISAS
1935 #if !SIM_COMPAT
1936 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1937 qemu_log("\n");
1938 #if DISAS_GNU
1939 log_target_disas(env, pc_start, dc->pc - pc_start, 0);
1940 #endif
1941 qemu_log("\nisize=%d osize=%td\n",
1942 dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
1943 tcg_ctx.gen_opc_buf);
1945 #endif
1946 #endif
1947 assert(!dc->abort_at_next_insn);
1950 void gen_intermediate_code (CPUMBState *env, struct TranslationBlock *tb)
1952 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, false);
1955 void gen_intermediate_code_pc (CPUMBState *env, struct TranslationBlock *tb)
1957 gen_intermediate_code_internal(mb_env_get_cpu(env), tb, true);
1960 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1961 int flags)
1963 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1964 CPUMBState *env = &cpu->env;
1965 int i;
1967 if (!env || !f)
1968 return;
1970 cpu_fprintf(f, "IN: PC=%x %s\n",
1971 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1972 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1973 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1974 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1975 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1976 env->btaken, env->btarget,
1977 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1978 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1979 (env->sregs[SR_MSR] & MSR_EIP),
1980 (env->sregs[SR_MSR] & MSR_IE));
1982 for (i = 0; i < 32; i++) {
1983 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1984 if ((i + 1) % 4 == 0)
1985 cpu_fprintf(f, "\n");
1987 cpu_fprintf(f, "\n\n");
1990 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1992 MicroBlazeCPU *cpu;
1994 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1996 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1998 return cpu;
2001 void mb_tcg_init(void)
2003 int i;
2005 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
2007 env_debug = tcg_global_mem_new(TCG_AREG0,
2008 offsetof(CPUMBState, debug),
2009 "debug0");
2010 env_iflags = tcg_global_mem_new(TCG_AREG0,
2011 offsetof(CPUMBState, iflags),
2012 "iflags");
2013 env_imm = tcg_global_mem_new(TCG_AREG0,
2014 offsetof(CPUMBState, imm),
2015 "imm");
2016 env_btarget = tcg_global_mem_new(TCG_AREG0,
2017 offsetof(CPUMBState, btarget),
2018 "btarget");
2019 env_btaken = tcg_global_mem_new(TCG_AREG0,
2020 offsetof(CPUMBState, btaken),
2021 "btaken");
2022 env_res_addr = tcg_global_mem_new(TCG_AREG0,
2023 offsetof(CPUMBState, res_addr),
2024 "res_addr");
2025 env_res_val = tcg_global_mem_new(TCG_AREG0,
2026 offsetof(CPUMBState, res_val),
2027 "res_val");
2028 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
2029 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
2030 offsetof(CPUMBState, regs[i]),
2031 regnames[i]);
2033 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
2034 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
2035 offsetof(CPUMBState, sregs[i]),
2036 special_regnames[i]);
2040 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)
2042 env->sregs[SR_PC] = tcg_ctx.gen_opc_pc[pc_pos];