2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
32 #include "trace-tcg.h"
39 #if DISAS_MB && !SIM_COMPAT
40 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 # define LOG_DIS(...) do { } while (0)
47 #define EXTRACT_FIELD(src, start, end) \
48 (((src) >> start) & ((1 << (end - start + 1)) - 1))
50 /* is_jmp field values */
51 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55 static TCGv_i32 env_debug
;
56 static TCGv_i32 cpu_R
[32];
57 static TCGv_i32 cpu_SR
[14];
58 static TCGv_i32 env_imm
;
59 static TCGv_i32 env_btaken
;
60 static TCGv_i32 env_btarget
;
61 static TCGv_i32 env_iflags
;
62 static TCGv env_res_addr
;
63 static TCGv_i32 env_res_val
;
65 #include "exec/gen-icount.h"
67 /* This is the state at translation time. */
68 typedef struct DisasContext
{
79 unsigned int cpustate_changed
;
80 unsigned int delayed_branch
;
81 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
82 unsigned int clear_imm
;
87 #define JMP_DIRECT_CC 2
88 #define JMP_INDIRECT 3
92 int abort_at_next_insn
;
94 struct TranslationBlock
*tb
;
95 int singlestep_enabled
;
98 static const char *regnames
[] =
100 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
106 static const char *special_regnames
[] =
108 "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
109 "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
112 static inline void t_sync_flags(DisasContext
*dc
)
114 /* Synch the tb dependent flags between translator and runtime. */
115 if (dc
->tb_flags
!= dc
->synced_flags
) {
116 tcg_gen_movi_i32(env_iflags
, dc
->tb_flags
);
117 dc
->synced_flags
= dc
->tb_flags
;
121 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
123 TCGv_i32 tmp
= tcg_const_i32(index
);
126 tcg_gen_movi_i32(cpu_SR
[SR_PC
], dc
->pc
);
127 gen_helper_raise_exception(cpu_env
, tmp
);
128 tcg_temp_free_i32(tmp
);
129 dc
->is_jmp
= DISAS_UPDATE
;
132 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
134 #ifndef CONFIG_USER_ONLY
135 return (dc
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
141 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
143 if (use_goto_tb(dc
, dest
)) {
145 tcg_gen_movi_i32(cpu_SR
[SR_PC
], dest
);
146 tcg_gen_exit_tb((uintptr_t)dc
->tb
+ n
);
148 tcg_gen_movi_i32(cpu_SR
[SR_PC
], dest
);
153 static void read_carry(DisasContext
*dc
, TCGv_i32 d
)
155 tcg_gen_shri_i32(d
, cpu_SR
[SR_MSR
], 31);
159 * write_carry sets the carry bits in MSR based on bit 0 of v.
160 * v[31:1] are ignored.
162 static void write_carry(DisasContext
*dc
, TCGv_i32 v
)
164 TCGv_i32 t0
= tcg_temp_new_i32();
165 tcg_gen_shli_i32(t0
, v
, 31);
166 tcg_gen_sari_i32(t0
, t0
, 31);
167 tcg_gen_andi_i32(t0
, t0
, (MSR_C
| MSR_CC
));
168 tcg_gen_andi_i32(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
],
170 tcg_gen_or_i32(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
);
171 tcg_temp_free_i32(t0
);
174 static void write_carryi(DisasContext
*dc
, bool carry
)
176 TCGv_i32 t0
= tcg_temp_new_i32();
177 tcg_gen_movi_i32(t0
, carry
);
179 tcg_temp_free_i32(t0
);
183 * Returns true if the insn is illegal in userspace.
184 * If exceptions are enabled, an exception is raised.
186 static bool trap_userspace(DisasContext
*dc
, bool cond
)
188 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
189 bool cond_user
= cond
&& mem_index
== MMU_USER_IDX
;
191 if (cond_user
&& (dc
->tb_flags
& MSR_EE_FLAG
)) {
192 tcg_gen_movi_i32(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
193 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
198 /* True if ALU operand b is a small immediate that may deserve
200 static inline int dec_alu_op_b_is_small_imm(DisasContext
*dc
)
202 /* Immediate insn without the imm prefix ? */
203 return dc
->type_b
&& !(dc
->tb_flags
& IMM_FLAG
);
206 static inline TCGv_i32
*dec_alu_op_b(DisasContext
*dc
)
209 if (dc
->tb_flags
& IMM_FLAG
)
210 tcg_gen_ori_i32(env_imm
, env_imm
, dc
->imm
);
212 tcg_gen_movi_i32(env_imm
, (int32_t)((int16_t)dc
->imm
));
215 return &cpu_R
[dc
->rb
];
218 static void dec_add(DisasContext
*dc
)
226 LOG_DIS("add%s%s%s r%d r%d r%d\n",
227 dc
->type_b
? "i" : "", k
? "k" : "", c
? "c" : "",
228 dc
->rd
, dc
->ra
, dc
->rb
);
230 /* Take care of the easy cases first. */
232 /* k - keep carry, no need to update MSR. */
233 /* If rd == r0, it's a nop. */
235 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
238 /* c - Add carry into the result. */
239 cf
= tcg_temp_new_i32();
242 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
243 tcg_temp_free_i32(cf
);
249 /* From now on, we can assume k is zero. So we need to update MSR. */
251 cf
= tcg_temp_new_i32();
255 tcg_gen_movi_i32(cf
, 0);
259 TCGv_i32 ncf
= tcg_temp_new_i32();
260 gen_helper_carry(ncf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
261 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
262 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
263 write_carry(dc
, ncf
);
264 tcg_temp_free_i32(ncf
);
266 gen_helper_carry(cf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
269 tcg_temp_free_i32(cf
);
272 static void dec_sub(DisasContext
*dc
)
274 unsigned int u
, cmp
, k
, c
;
280 cmp
= (dc
->imm
& 1) && (!dc
->type_b
) && k
;
283 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u
? "u" : "", dc
->rd
, dc
->ra
, dc
->ir
);
286 gen_helper_cmpu(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
288 gen_helper_cmp(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
293 LOG_DIS("sub%s%s r%d, r%d r%d\n",
294 k
? "k" : "", c
? "c" : "", dc
->rd
, dc
->ra
, dc
->rb
);
296 /* Take care of the easy cases first. */
298 /* k - keep carry, no need to update MSR. */
299 /* If rd == r0, it's a nop. */
301 tcg_gen_sub_i32(cpu_R
[dc
->rd
], *(dec_alu_op_b(dc
)), cpu_R
[dc
->ra
]);
304 /* c - Add carry into the result. */
305 cf
= tcg_temp_new_i32();
308 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
309 tcg_temp_free_i32(cf
);
315 /* From now on, we can assume k is zero. So we need to update MSR. */
316 /* Extract carry. And complement a into na. */
317 cf
= tcg_temp_new_i32();
318 na
= tcg_temp_new_i32();
322 tcg_gen_movi_i32(cf
, 1);
325 /* d = b + ~a + c. carry defaults to 1. */
326 tcg_gen_not_i32(na
, cpu_R
[dc
->ra
]);
329 TCGv_i32 ncf
= tcg_temp_new_i32();
330 gen_helper_carry(ncf
, na
, *(dec_alu_op_b(dc
)), cf
);
331 tcg_gen_add_i32(cpu_R
[dc
->rd
], na
, *(dec_alu_op_b(dc
)));
332 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
333 write_carry(dc
, ncf
);
334 tcg_temp_free_i32(ncf
);
336 gen_helper_carry(cf
, na
, *(dec_alu_op_b(dc
)), cf
);
339 tcg_temp_free_i32(cf
);
340 tcg_temp_free_i32(na
);
343 static void dec_pattern(DisasContext
*dc
)
347 if ((dc
->tb_flags
& MSR_EE_FLAG
)
348 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
349 && !dc
->cpu
->cfg
.use_pcmp_instr
) {
350 tcg_gen_movi_i32(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
351 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
354 mode
= dc
->opcode
& 3;
358 LOG_DIS("pcmpbf r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
360 gen_helper_pcmpbf(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
363 LOG_DIS("pcmpeq r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
365 tcg_gen_setcond_i32(TCG_COND_EQ
, cpu_R
[dc
->rd
],
366 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
370 LOG_DIS("pcmpne r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
372 tcg_gen_setcond_i32(TCG_COND_NE
, cpu_R
[dc
->rd
],
373 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
377 cpu_abort(CPU(dc
->cpu
),
378 "unsupported pattern insn opcode=%x\n", dc
->opcode
);
383 static void dec_and(DisasContext
*dc
)
387 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
392 not = dc
->opcode
& (1 << 1);
393 LOG_DIS("and%s\n", not ? "n" : "");
399 tcg_gen_andc_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
401 tcg_gen_and_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
404 static void dec_or(DisasContext
*dc
)
406 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
411 LOG_DIS("or r%d r%d r%d imm=%x\n", dc
->rd
, dc
->ra
, dc
->rb
, dc
->imm
);
413 tcg_gen_or_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
416 static void dec_xor(DisasContext
*dc
)
418 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
423 LOG_DIS("xor r%d\n", dc
->rd
);
425 tcg_gen_xor_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
428 static inline void msr_read(DisasContext
*dc
, TCGv_i32 d
)
430 tcg_gen_mov_i32(d
, cpu_SR
[SR_MSR
]);
433 static inline void msr_write(DisasContext
*dc
, TCGv_i32 v
)
437 t
= tcg_temp_new_i32();
438 dc
->cpustate_changed
= 1;
439 /* PVR bit is not writable. */
440 tcg_gen_andi_i32(t
, v
, ~MSR_PVR
);
441 tcg_gen_andi_i32(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], MSR_PVR
);
442 tcg_gen_or_i32(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t
);
446 static void dec_msr(DisasContext
*dc
)
448 CPUState
*cs
= CPU(dc
->cpu
);
450 unsigned int sr
, to
, rn
;
452 sr
= dc
->imm
& ((1 << 14) - 1);
453 to
= dc
->imm
& (1 << 14);
456 dc
->cpustate_changed
= 1;
458 /* msrclr and msrset. */
459 if (!(dc
->imm
& (1 << 15))) {
460 unsigned int clr
= dc
->ir
& (1 << 16);
462 LOG_DIS("msr%s r%d imm=%x\n", clr
? "clr" : "set",
465 if (!dc
->cpu
->cfg
.use_msr_instr
) {
470 if (trap_userspace(dc
, dc
->imm
!= 4 && dc
->imm
!= 0)) {
475 msr_read(dc
, cpu_R
[dc
->rd
]);
477 t0
= tcg_temp_new_i32();
478 t1
= tcg_temp_new_i32();
480 tcg_gen_mov_i32(t1
, *(dec_alu_op_b(dc
)));
483 tcg_gen_not_i32(t1
, t1
);
484 tcg_gen_and_i32(t0
, t0
, t1
);
486 tcg_gen_or_i32(t0
, t0
, t1
);
488 tcg_temp_free_i32(t0
);
489 tcg_temp_free_i32(t1
);
490 tcg_gen_movi_i32(cpu_SR
[SR_PC
], dc
->pc
+ 4);
491 dc
->is_jmp
= DISAS_UPDATE
;
495 if (trap_userspace(dc
, to
)) {
499 #if !defined(CONFIG_USER_ONLY)
500 /* Catch read/writes to the mmu block. */
501 if ((sr
& ~0xff) == 0x1000) {
503 LOG_DIS("m%ss sr%d r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
505 gen_helper_mmu_write(cpu_env
, tcg_const_i32(sr
), cpu_R
[dc
->ra
]);
507 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tcg_const_i32(sr
));
513 LOG_DIS("m%ss sr%x r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
518 msr_write(dc
, cpu_R
[dc
->ra
]);
521 tcg_gen_mov_i32(cpu_SR
[SR_EAR
], cpu_R
[dc
->ra
]);
524 tcg_gen_mov_i32(cpu_SR
[SR_ESR
], cpu_R
[dc
->ra
]);
527 tcg_gen_andi_i32(cpu_SR
[SR_FSR
], cpu_R
[dc
->ra
], 31);
530 tcg_gen_st_i32(cpu_R
[dc
->ra
],
531 cpu_env
, offsetof(CPUMBState
, slr
));
534 tcg_gen_st_i32(cpu_R
[dc
->ra
],
535 cpu_env
, offsetof(CPUMBState
, shr
));
538 cpu_abort(CPU(dc
->cpu
), "unknown mts reg %x\n", sr
);
542 LOG_DIS("m%ss r%d sr%x imm=%x\n", to
? "t" : "f", dc
->rd
, sr
, dc
->imm
);
546 tcg_gen_movi_i32(cpu_R
[dc
->rd
], dc
->pc
);
549 msr_read(dc
, cpu_R
[dc
->rd
]);
552 tcg_gen_mov_i32(cpu_R
[dc
->rd
], cpu_SR
[SR_EAR
]);
555 tcg_gen_mov_i32(cpu_R
[dc
->rd
], cpu_SR
[SR_ESR
]);
558 tcg_gen_mov_i32(cpu_R
[dc
->rd
], cpu_SR
[SR_FSR
]);
561 tcg_gen_mov_i32(cpu_R
[dc
->rd
], cpu_SR
[SR_BTR
]);
564 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
565 cpu_env
, offsetof(CPUMBState
, slr
));
568 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
569 cpu_env
, offsetof(CPUMBState
, shr
));
585 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
586 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
589 cpu_abort(cs
, "unknown mfs reg %x\n", sr
);
595 tcg_gen_movi_i32(cpu_R
[0], 0);
599 /* Multiplier unit. */
600 static void dec_mul(DisasContext
*dc
)
603 unsigned int subcode
;
605 if ((dc
->tb_flags
& MSR_EE_FLAG
)
606 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
607 && !dc
->cpu
->cfg
.use_hw_mul
) {
608 tcg_gen_movi_i32(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
609 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
613 subcode
= dc
->imm
& 3;
616 LOG_DIS("muli r%d r%d %x\n", dc
->rd
, dc
->ra
, dc
->imm
);
617 tcg_gen_mul_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
621 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
622 if (subcode
>= 1 && subcode
<= 3 && dc
->cpu
->cfg
.use_hw_mul
< 2) {
626 tmp
= tcg_temp_new_i32();
629 LOG_DIS("mul r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
630 tcg_gen_mul_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
633 LOG_DIS("mulh r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
634 tcg_gen_muls2_i32(tmp
, cpu_R
[dc
->rd
],
635 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
638 LOG_DIS("mulhsu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
639 tcg_gen_mulsu2_i32(tmp
, cpu_R
[dc
->rd
],
640 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
643 LOG_DIS("mulhu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
644 tcg_gen_mulu2_i32(tmp
, cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
647 cpu_abort(CPU(dc
->cpu
), "unknown MUL insn %x\n", subcode
);
650 tcg_temp_free_i32(tmp
);
654 static void dec_div(DisasContext
*dc
)
661 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
662 && !dc
->cpu
->cfg
.use_div
) {
663 tcg_gen_movi_i32(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
664 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
668 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
671 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
674 tcg_gen_movi_i32(cpu_R
[dc
->rd
], 0);
677 static void dec_barrel(DisasContext
*dc
)
680 unsigned int imm_w
, imm_s
;
681 bool s
, t
, e
= false, i
= false;
683 if ((dc
->tb_flags
& MSR_EE_FLAG
)
684 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
685 && !dc
->cpu
->cfg
.use_barrel
) {
686 tcg_gen_movi_i32(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
687 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
692 /* Insert and extract are only available in immediate mode. */
693 i
= extract32(dc
->imm
, 15, 1);
694 e
= extract32(dc
->imm
, 14, 1);
696 s
= extract32(dc
->imm
, 10, 1);
697 t
= extract32(dc
->imm
, 9, 1);
698 imm_w
= extract32(dc
->imm
, 6, 5);
699 imm_s
= extract32(dc
->imm
, 0, 5);
701 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
703 s
? "l" : "r", t
? "a" : "l", dc
->rd
, dc
->ra
, dc
->rb
);
706 if (imm_w
+ imm_s
> 32 || imm_w
== 0) {
707 /* These inputs have an undefined behavior. */
708 qemu_log_mask(LOG_GUEST_ERROR
, "bsefi: Bad input w=%d s=%d\n",
711 tcg_gen_extract_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], imm_s
, imm_w
);
714 int width
= imm_w
- imm_s
+ 1;
717 /* These inputs have an undefined behavior. */
718 qemu_log_mask(LOG_GUEST_ERROR
, "bsifi: Bad input w=%d s=%d\n",
721 tcg_gen_deposit_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
],
725 t0
= tcg_temp_new_i32();
727 tcg_gen_mov_i32(t0
, *(dec_alu_op_b(dc
)));
728 tcg_gen_andi_i32(t0
, t0
, 31);
731 tcg_gen_shl_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
734 tcg_gen_sar_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
736 tcg_gen_shr_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
739 tcg_temp_free_i32(t0
);
743 static void dec_bit(DisasContext
*dc
)
745 CPUState
*cs
= CPU(dc
->cpu
);
749 op
= dc
->ir
& ((1 << 9) - 1);
753 t0
= tcg_temp_new_i32();
755 LOG_DIS("src r%d r%d\n", dc
->rd
, dc
->ra
);
756 tcg_gen_andi_i32(t0
, cpu_SR
[SR_MSR
], MSR_CC
);
757 write_carry(dc
, cpu_R
[dc
->ra
]);
759 tcg_gen_shri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
760 tcg_gen_or_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
762 tcg_temp_free_i32(t0
);
768 LOG_DIS("srl r%d r%d\n", dc
->rd
, dc
->ra
);
770 /* Update carry. Note that write carry only looks at the LSB. */
771 write_carry(dc
, cpu_R
[dc
->ra
]);
774 tcg_gen_shri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
776 tcg_gen_sari_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
780 LOG_DIS("ext8s r%d r%d\n", dc
->rd
, dc
->ra
);
781 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
784 LOG_DIS("ext16s r%d r%d\n", dc
->rd
, dc
->ra
);
785 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
792 LOG_DIS("wdc r%d\n", dc
->ra
);
793 trap_userspace(dc
, true);
797 LOG_DIS("wic r%d\n", dc
->ra
);
798 trap_userspace(dc
, true);
801 if ((dc
->tb_flags
& MSR_EE_FLAG
)
802 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
803 && !dc
->cpu
->cfg
.use_pcmp_instr
) {
804 tcg_gen_movi_i32(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
805 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
807 if (dc
->cpu
->cfg
.use_pcmp_instr
) {
808 tcg_gen_clzi_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 32);
813 LOG_DIS("swapb r%d r%d\n", dc
->rd
, dc
->ra
);
814 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
818 LOG_DIS("swaph r%d r%d\n", dc
->rd
, dc
->ra
);
819 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
822 cpu_abort(cs
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
823 dc
->pc
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
828 static inline void sync_jmpstate(DisasContext
*dc
)
830 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
831 if (dc
->jmp
== JMP_DIRECT
) {
832 tcg_gen_movi_i32(env_btaken
, 1);
834 dc
->jmp
= JMP_INDIRECT
;
835 tcg_gen_movi_i32(env_btarget
, dc
->jmp_pc
);
839 static void dec_imm(DisasContext
*dc
)
841 LOG_DIS("imm %x\n", dc
->imm
<< 16);
842 tcg_gen_movi_i32(env_imm
, (dc
->imm
<< 16));
843 dc
->tb_flags
|= IMM_FLAG
;
847 static inline void compute_ldst_addr(DisasContext
*dc
, TCGv t
)
849 bool extimm
= dc
->tb_flags
& IMM_FLAG
;
850 /* Should be set to true if r1 is used by loadstores. */
851 bool stackprot
= false;
854 /* All load/stores use ra. */
855 if (dc
->ra
== 1 && dc
->cpu
->cfg
.stackprot
) {
859 /* Treat the common cases first. */
861 /* If any of the regs is r0, set t to the value of the other reg. */
863 tcg_gen_extu_i32_tl(t
, cpu_R
[dc
->rb
]);
865 } else if (dc
->rb
== 0) {
866 tcg_gen_extu_i32_tl(t
, cpu_R
[dc
->ra
]);
870 if (dc
->rb
== 1 && dc
->cpu
->cfg
.stackprot
) {
874 t32
= tcg_temp_new_i32();
875 tcg_gen_add_i32(t32
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
876 tcg_gen_extu_i32_tl(t
, t32
);
877 tcg_temp_free_i32(t32
);
880 gen_helper_stackprot(cpu_env
, t
);
885 t32
= tcg_temp_new_i32();
888 tcg_gen_mov_i32(t32
, cpu_R
[dc
->ra
]);
890 tcg_gen_movi_i32(t32
, (int32_t)((int16_t)dc
->imm
));
891 tcg_gen_add_i32(t32
, cpu_R
[dc
->ra
], t32
);
894 tcg_gen_add_i32(t32
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
896 tcg_gen_extu_i32_tl(t
, t32
);
897 tcg_temp_free_i32(t32
);
900 gen_helper_stackprot(cpu_env
, t
);
905 static void dec_load(DisasContext
*dc
)
910 bool rev
= false, ex
= false;
913 mop
= dc
->opcode
& 3;
916 rev
= extract32(dc
->ir
, 9, 1);
917 ex
= extract32(dc
->ir
, 10, 1);
924 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
925 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
926 tcg_gen_movi_i32(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
927 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
931 LOG_DIS("l%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
935 addr
= tcg_temp_new();
936 compute_ldst_addr(dc
, addr
);
939 * When doing reverse accesses we need to do two things.
941 * 1. Reverse the address wrt endianness.
942 * 2. Byteswap the data lanes on the way back into the CPU core.
944 if (rev
&& size
!= 4) {
945 /* Endian reverse the address. t is addr. */
953 TCGv low
= tcg_temp_new();
955 tcg_gen_andi_tl(low
, addr
, 3);
956 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
957 tcg_gen_andi_tl(addr
, addr
, ~3);
958 tcg_gen_or_tl(addr
, addr
, low
);
966 tcg_gen_xori_tl(addr
, addr
, 2);
969 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
974 /* lwx does not throw unaligned access errors, so force alignment */
976 tcg_gen_andi_tl(addr
, addr
, ~3);
979 /* If we get a fault on a dslot, the jmpstate better be in sync. */
982 /* Verify alignment if needed. */
984 * Microblaze gives MMU faults priority over faults due to
985 * unaligned addresses. That's why we speculatively do the load
986 * into v. If the load succeeds, we verify alignment of the
987 * address and if that succeeds we write into the destination reg.
989 v
= tcg_temp_new_i32();
990 tcg_gen_qemu_ld_i32(v
, addr
, cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
992 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
993 tcg_gen_movi_i32(cpu_SR
[SR_PC
], dc
->pc
);
994 gen_helper_memalign(cpu_env
, addr
, tcg_const_i32(dc
->rd
),
995 tcg_const_i32(0), tcg_const_i32(size
- 1));
999 tcg_gen_mov_tl(env_res_addr
, addr
);
1000 tcg_gen_mov_i32(env_res_val
, v
);
1003 tcg_gen_mov_i32(cpu_R
[dc
->rd
], v
);
1005 tcg_temp_free_i32(v
);
1008 /* no support for AXI exclusive so always clear C */
1009 write_carryi(dc
, 0);
1012 tcg_temp_free(addr
);
1015 static void dec_store(DisasContext
*dc
)
1018 TCGLabel
*swx_skip
= NULL
;
1020 bool rev
= false, ex
= false;
1023 mop
= dc
->opcode
& 3;
1026 rev
= extract32(dc
->ir
, 9, 1);
1027 ex
= extract32(dc
->ir
, 10, 1);
1034 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
1035 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1036 tcg_gen_movi_i32(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1037 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1041 LOG_DIS("s%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
1044 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1046 /* SWX needs a temp_local. */
1047 addr
= ex
? tcg_temp_local_new() : tcg_temp_new();
1048 compute_ldst_addr(dc
, addr
);
1053 /* swx does not throw unaligned access errors, so force alignment */
1054 tcg_gen_andi_tl(addr
, addr
, ~3);
1056 write_carryi(dc
, 1);
1057 swx_skip
= gen_new_label();
1058 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_addr
, addr
, swx_skip
);
1060 /* Compare the value loaded at lwx with current contents of
1061 the reserved location.
1062 FIXME: This only works for system emulation where we can expect
1063 this compare and the following write to be atomic. For user
1064 emulation we need to add atomicity between threads. */
1065 tval
= tcg_temp_new_i32();
1066 tcg_gen_qemu_ld_i32(tval
, addr
, cpu_mmu_index(&dc
->cpu
->env
, false),
1068 tcg_gen_brcond_i32(TCG_COND_NE
, env_res_val
, tval
, swx_skip
);
1069 write_carryi(dc
, 0);
1070 tcg_temp_free_i32(tval
);
1073 if (rev
&& size
!= 4) {
1074 /* Endian reverse the address. t is addr. */
1082 TCGv low
= tcg_temp_new();
1084 tcg_gen_andi_tl(low
, addr
, 3);
1085 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
1086 tcg_gen_andi_tl(addr
, addr
, ~3);
1087 tcg_gen_or_tl(addr
, addr
, low
);
1095 /* Force addr into the temp. */
1096 tcg_gen_xori_tl(addr
, addr
, 2);
1099 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1103 tcg_gen_qemu_st_i32(cpu_R
[dc
->rd
], addr
,
1104 cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
1106 /* Verify alignment if needed. */
1107 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1108 tcg_gen_movi_i32(cpu_SR
[SR_PC
], dc
->pc
);
1109 /* FIXME: if the alignment is wrong, we should restore the value
1110 * in memory. One possible way to achieve this is to probe
1111 * the MMU prior to the memaccess, thay way we could put
1112 * the alignment checks in between the probe and the mem
1115 gen_helper_memalign(cpu_env
, addr
, tcg_const_i32(dc
->rd
),
1116 tcg_const_i32(1), tcg_const_i32(size
- 1));
1120 gen_set_label(swx_skip
);
1123 tcg_temp_free(addr
);
1126 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1127 TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1131 tcg_gen_setcond_i32(TCG_COND_EQ
, d
, a
, b
);
1134 tcg_gen_setcond_i32(TCG_COND_NE
, d
, a
, b
);
1137 tcg_gen_setcond_i32(TCG_COND_LT
, d
, a
, b
);
1140 tcg_gen_setcond_i32(TCG_COND_LE
, d
, a
, b
);
1143 tcg_gen_setcond_i32(TCG_COND_GE
, d
, a
, b
);
1146 tcg_gen_setcond_i32(TCG_COND_GT
, d
, a
, b
);
1149 cpu_abort(CPU(dc
->cpu
), "Unknown condition code %x.\n", cc
);
1154 static void eval_cond_jmp(DisasContext
*dc
, TCGv_i32 pc_true
, TCGv_i32 pc_false
)
1156 TCGLabel
*l1
= gen_new_label();
1157 /* Conditional jmp. */
1158 tcg_gen_mov_i32(cpu_SR
[SR_PC
], pc_false
);
1159 tcg_gen_brcondi_i32(TCG_COND_EQ
, env_btaken
, 0, l1
);
1160 tcg_gen_mov_i32(cpu_SR
[SR_PC
], pc_true
);
1164 static void dec_bcc(DisasContext
*dc
)
1169 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1170 dslot
= dc
->ir
& (1 << 25);
1171 LOG_DIS("bcc%s r%d %x\n", dslot
? "d" : "", dc
->ra
, dc
->imm
);
1173 dc
->delayed_branch
= 1;
1175 dc
->delayed_branch
= 2;
1176 dc
->tb_flags
|= D_FLAG
;
1177 tcg_gen_st_i32(tcg_const_i32(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1178 cpu_env
, offsetof(CPUMBState
, bimm
));
1181 if (dec_alu_op_b_is_small_imm(dc
)) {
1182 int32_t offset
= (int32_t)((int16_t)dc
->imm
); /* sign-extend. */
1184 tcg_gen_movi_i32(env_btarget
, dc
->pc
+ offset
);
1185 dc
->jmp
= JMP_DIRECT_CC
;
1186 dc
->jmp_pc
= dc
->pc
+ offset
;
1188 dc
->jmp
= JMP_INDIRECT
;
1189 tcg_gen_movi_i32(env_btarget
, dc
->pc
);
1190 tcg_gen_add_i32(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1192 eval_cc(dc
, cc
, env_btaken
, cpu_R
[dc
->ra
], tcg_const_i32(0));
1195 static void dec_br(DisasContext
*dc
)
1197 unsigned int dslot
, link
, abs
, mbar
;
1199 dslot
= dc
->ir
& (1 << 20);
1200 abs
= dc
->ir
& (1 << 19);
1201 link
= dc
->ir
& (1 << 18);
1203 /* Memory barrier. */
1204 mbar
= (dc
->ir
>> 16) & 31;
1205 if (mbar
== 2 && dc
->imm
== 4) {
1206 /* mbar IMM & 16 decodes to sleep. */
1208 TCGv_i32 tmp_hlt
= tcg_const_i32(EXCP_HLT
);
1209 TCGv_i32 tmp_1
= tcg_const_i32(1);
1214 tcg_gen_st_i32(tmp_1
, cpu_env
,
1215 -offsetof(MicroBlazeCPU
, env
)
1216 +offsetof(CPUState
, halted
));
1217 tcg_gen_movi_i32(cpu_SR
[SR_PC
], dc
->pc
+ 4);
1218 gen_helper_raise_exception(cpu_env
, tmp_hlt
);
1219 tcg_temp_free_i32(tmp_hlt
);
1220 tcg_temp_free_i32(tmp_1
);
1223 LOG_DIS("mbar %d\n", dc
->rd
);
1225 dc
->cpustate_changed
= 1;
1229 LOG_DIS("br%s%s%s%s imm=%x\n",
1230 abs
? "a" : "", link
? "l" : "",
1231 dc
->type_b
? "i" : "", dslot
? "d" : "",
1234 dc
->delayed_branch
= 1;
1236 dc
->delayed_branch
= 2;
1237 dc
->tb_flags
|= D_FLAG
;
1238 tcg_gen_st_i32(tcg_const_i32(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1239 cpu_env
, offsetof(CPUMBState
, bimm
));
1242 tcg_gen_movi_i32(cpu_R
[dc
->rd
], dc
->pc
);
1244 dc
->jmp
= JMP_INDIRECT
;
1246 tcg_gen_movi_i32(env_btaken
, 1);
1247 tcg_gen_mov_i32(env_btarget
, *(dec_alu_op_b(dc
)));
1248 if (link
&& !dslot
) {
1249 if (!(dc
->tb_flags
& IMM_FLAG
) && (dc
->imm
== 8 || dc
->imm
== 0x18))
1250 t_gen_raise_exception(dc
, EXCP_BREAK
);
1252 if (trap_userspace(dc
, true)) {
1256 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1260 if (dec_alu_op_b_is_small_imm(dc
)) {
1261 dc
->jmp
= JMP_DIRECT
;
1262 dc
->jmp_pc
= dc
->pc
+ (int32_t)((int16_t)dc
->imm
);
1264 tcg_gen_movi_i32(env_btaken
, 1);
1265 tcg_gen_movi_i32(env_btarget
, dc
->pc
);
1266 tcg_gen_add_i32(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1271 static inline void do_rti(DisasContext
*dc
)
1274 t0
= tcg_temp_new_i32();
1275 t1
= tcg_temp_new_i32();
1276 tcg_gen_shri_i32(t0
, cpu_SR
[SR_MSR
], 1);
1277 tcg_gen_ori_i32(t1
, cpu_SR
[SR_MSR
], MSR_IE
);
1278 tcg_gen_andi_i32(t0
, t0
, (MSR_VM
| MSR_UM
));
1280 tcg_gen_andi_i32(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1281 tcg_gen_or_i32(t1
, t1
, t0
);
1283 tcg_temp_free_i32(t1
);
1284 tcg_temp_free_i32(t0
);
1285 dc
->tb_flags
&= ~DRTI_FLAG
;
1288 static inline void do_rtb(DisasContext
*dc
)
1291 t0
= tcg_temp_new_i32();
1292 t1
= tcg_temp_new_i32();
1293 tcg_gen_andi_i32(t1
, cpu_SR
[SR_MSR
], ~MSR_BIP
);
1294 tcg_gen_shri_i32(t0
, t1
, 1);
1295 tcg_gen_andi_i32(t0
, t0
, (MSR_VM
| MSR_UM
));
1297 tcg_gen_andi_i32(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1298 tcg_gen_or_i32(t1
, t1
, t0
);
1300 tcg_temp_free_i32(t1
);
1301 tcg_temp_free_i32(t0
);
1302 dc
->tb_flags
&= ~DRTB_FLAG
;
1305 static inline void do_rte(DisasContext
*dc
)
1308 t0
= tcg_temp_new_i32();
1309 t1
= tcg_temp_new_i32();
1311 tcg_gen_ori_i32(t1
, cpu_SR
[SR_MSR
], MSR_EE
);
1312 tcg_gen_andi_i32(t1
, t1
, ~MSR_EIP
);
1313 tcg_gen_shri_i32(t0
, t1
, 1);
1314 tcg_gen_andi_i32(t0
, t0
, (MSR_VM
| MSR_UM
));
1316 tcg_gen_andi_i32(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1317 tcg_gen_or_i32(t1
, t1
, t0
);
1319 tcg_temp_free_i32(t1
);
1320 tcg_temp_free_i32(t0
);
1321 dc
->tb_flags
&= ~DRTE_FLAG
;
1324 static void dec_rts(DisasContext
*dc
)
1326 unsigned int b_bit
, i_bit
, e_bit
;
1328 i_bit
= dc
->ir
& (1 << 21);
1329 b_bit
= dc
->ir
& (1 << 22);
1330 e_bit
= dc
->ir
& (1 << 23);
1332 if (trap_userspace(dc
, i_bit
|| b_bit
|| e_bit
)) {
1336 dc
->delayed_branch
= 2;
1337 dc
->tb_flags
|= D_FLAG
;
1338 tcg_gen_st_i32(tcg_const_i32(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1339 cpu_env
, offsetof(CPUMBState
, bimm
));
1342 LOG_DIS("rtid ir=%x\n", dc
->ir
);
1343 dc
->tb_flags
|= DRTI_FLAG
;
1345 LOG_DIS("rtbd ir=%x\n", dc
->ir
);
1346 dc
->tb_flags
|= DRTB_FLAG
;
1348 LOG_DIS("rted ir=%x\n", dc
->ir
);
1349 dc
->tb_flags
|= DRTE_FLAG
;
1351 LOG_DIS("rts ir=%x\n", dc
->ir
);
1353 dc
->jmp
= JMP_INDIRECT
;
1354 tcg_gen_movi_i32(env_btaken
, 1);
1355 tcg_gen_add_i32(env_btarget
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
1358 static int dec_check_fpuv2(DisasContext
*dc
)
1360 if ((dc
->cpu
->cfg
.use_fpu
!= 2) && (dc
->tb_flags
& MSR_EE_FLAG
)) {
1361 tcg_gen_movi_i32(cpu_SR
[SR_ESR
], ESR_EC_FPU
);
1362 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1364 return (dc
->cpu
->cfg
.use_fpu
== 2) ? 0 : PVR2_USE_FPU2_MASK
;
1367 static void dec_fpu(DisasContext
*dc
)
1369 unsigned int fpu_insn
;
1371 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1372 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1373 && !dc
->cpu
->cfg
.use_fpu
) {
1374 tcg_gen_movi_i32(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1375 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1379 fpu_insn
= (dc
->ir
>> 7) & 7;
1383 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1388 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1393 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1398 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1403 switch ((dc
->ir
>> 4) & 7) {
1405 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1406 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1409 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1410 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1413 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1414 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1417 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1418 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1421 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1422 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1425 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1426 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1429 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1430 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1433 qemu_log_mask(LOG_UNIMP
,
1434 "unimplemented fcmp fpu_insn=%x pc=%x"
1436 fpu_insn
, dc
->pc
, dc
->opcode
);
1437 dc
->abort_at_next_insn
= 1;
1443 if (!dec_check_fpuv2(dc
)) {
1446 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1450 if (!dec_check_fpuv2(dc
)) {
1453 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1457 if (!dec_check_fpuv2(dc
)) {
1460 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1464 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1466 fpu_insn
, dc
->pc
, dc
->opcode
);
1467 dc
->abort_at_next_insn
= 1;
1472 static void dec_null(DisasContext
*dc
)
1474 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1475 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1476 tcg_gen_movi_i32(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1477 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1480 qemu_log_mask(LOG_GUEST_ERROR
, "unknown insn pc=%x opc=%x\n", dc
->pc
, dc
->opcode
);
1481 dc
->abort_at_next_insn
= 1;
1484 /* Insns connected to FSL or AXI stream attached devices. */
1485 static void dec_stream(DisasContext
*dc
)
1487 TCGv_i32 t_id
, t_ctrl
;
1490 LOG_DIS("%s%s imm=%x\n", dc
->rd
? "get" : "put",
1491 dc
->type_b
? "" : "d", dc
->imm
);
1493 if (trap_userspace(dc
, true)) {
1497 t_id
= tcg_temp_new_i32();
1499 tcg_gen_movi_i32(t_id
, dc
->imm
& 0xf);
1500 ctrl
= dc
->imm
>> 10;
1502 tcg_gen_andi_i32(t_id
, cpu_R
[dc
->rb
], 0xf);
1503 ctrl
= dc
->imm
>> 5;
1506 t_ctrl
= tcg_const_i32(ctrl
);
1509 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1511 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1513 tcg_temp_free_i32(t_id
);
1514 tcg_temp_free_i32(t_ctrl
);
1517 static struct decoder_info
{
1522 void (*dec
)(DisasContext
*dc
);
1530 {DEC_BARREL
, dec_barrel
},
1532 {DEC_ST
, dec_store
},
1541 {DEC_STREAM
, dec_stream
},
1545 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1550 LOG_DIS("%8.8x\t", dc
->ir
);
1555 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1556 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1557 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_OPCODE_0x0_ILL_MASK
)) {
1558 tcg_gen_movi_i32(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1559 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1563 LOG_DIS("nr_nops=%d\t", dc
->nr_nops
);
1565 if (dc
->nr_nops
> 4) {
1566 cpu_abort(CPU(dc
->cpu
), "fetching nop sequence\n");
1569 /* bit 2 seems to indicate insn type. */
1570 dc
->type_b
= ir
& (1 << 29);
1572 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1573 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1574 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1575 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1576 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1578 /* Large switch for all insns. */
1579 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1580 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1587 /* generate intermediate code for basic block 'tb'. */
1588 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
1590 CPUMBState
*env
= cs
->env_ptr
;
1591 MicroBlazeCPU
*cpu
= mb_env_get_cpu(env
);
1593 struct DisasContext ctx
;
1594 struct DisasContext
*dc
= &ctx
;
1595 uint32_t page_start
, org_flags
;
1603 org_flags
= dc
->synced_flags
= dc
->tb_flags
= tb
->flags
;
1605 dc
->is_jmp
= DISAS_NEXT
;
1607 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1608 if (dc
->delayed_branch
) {
1609 dc
->jmp
= JMP_INDIRECT
;
1612 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1613 dc
->cpustate_changed
= 0;
1614 dc
->abort_at_next_insn
= 0;
1618 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n", pc_start
);
1621 page_start
= pc_start
& TARGET_PAGE_MASK
;
1623 max_insns
= tb_cflags(tb
) & CF_COUNT_MASK
;
1624 if (max_insns
== 0) {
1625 max_insns
= CF_COUNT_MASK
;
1627 if (max_insns
> TCG_MAX_INSNS
) {
1628 max_insns
= TCG_MAX_INSNS
;
1634 tcg_gen_insn_start(dc
->pc
);
1638 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1639 tcg_gen_movi_i32(cpu_SR
[SR_PC
], dc
->pc
);
1644 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1645 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1646 dc
->is_jmp
= DISAS_UPDATE
;
1647 /* The address covered by the breakpoint must be included in
1648 [tb->pc, tb->pc + tb->size) in order to for it to be
1649 properly cleared -- thus we increment the PC here so that
1650 the logic setting tb->size below does the right thing. */
1656 LOG_DIS("%8.8x:\t", dc
->pc
);
1658 if (num_insns
== max_insns
&& (tb_cflags(tb
) & CF_LAST_IO
)) {
1663 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1665 dc
->tb_flags
&= ~IMM_FLAG
;
1668 if (dc
->delayed_branch
) {
1669 dc
->delayed_branch
--;
1670 if (!dc
->delayed_branch
) {
1671 if (dc
->tb_flags
& DRTI_FLAG
)
1673 if (dc
->tb_flags
& DRTB_FLAG
)
1675 if (dc
->tb_flags
& DRTE_FLAG
)
1677 /* Clear the delay slot flag. */
1678 dc
->tb_flags
&= ~D_FLAG
;
1679 /* If it is a direct jump, try direct chaining. */
1680 if (dc
->jmp
== JMP_INDIRECT
) {
1681 eval_cond_jmp(dc
, env_btarget
, tcg_const_i32(dc
->pc
));
1682 dc
->is_jmp
= DISAS_JUMP
;
1683 } else if (dc
->jmp
== JMP_DIRECT
) {
1685 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1686 dc
->is_jmp
= DISAS_TB_JUMP
;
1687 } else if (dc
->jmp
== JMP_DIRECT_CC
) {
1688 TCGLabel
*l1
= gen_new_label();
1690 /* Conditional jmp. */
1691 tcg_gen_brcondi_i32(TCG_COND_NE
, env_btaken
, 0, l1
);
1692 gen_goto_tb(dc
, 1, dc
->pc
);
1694 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1696 dc
->is_jmp
= DISAS_TB_JUMP
;
1701 if (cs
->singlestep_enabled
) {
1704 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
1705 && !tcg_op_buf_full()
1707 && (dc
->pc
- page_start
< TARGET_PAGE_SIZE
)
1708 && num_insns
< max_insns
);
1711 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
1712 if (dc
->tb_flags
& D_FLAG
) {
1713 dc
->is_jmp
= DISAS_UPDATE
;
1714 tcg_gen_movi_i32(cpu_SR
[SR_PC
], npc
);
1720 if (tb_cflags(tb
) & CF_LAST_IO
)
1722 /* Force an update if the per-tb cpu state has changed. */
1723 if (dc
->is_jmp
== DISAS_NEXT
1724 && (dc
->cpustate_changed
|| org_flags
!= dc
->tb_flags
)) {
1725 dc
->is_jmp
= DISAS_UPDATE
;
1726 tcg_gen_movi_i32(cpu_SR
[SR_PC
], npc
);
1730 if (unlikely(cs
->singlestep_enabled
)) {
1731 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
1733 if (dc
->is_jmp
!= DISAS_JUMP
) {
1734 tcg_gen_movi_i32(cpu_SR
[SR_PC
], npc
);
1736 gen_helper_raise_exception(cpu_env
, tmp
);
1737 tcg_temp_free_i32(tmp
);
1739 switch(dc
->is_jmp
) {
1741 gen_goto_tb(dc
, 1, npc
);
1746 /* indicate that the hash table must be used
1747 to find the next TB */
1751 /* nothing more to generate */
1755 gen_tb_end(tb
, num_insns
);
1757 tb
->size
= dc
->pc
- pc_start
;
1758 tb
->icount
= num_insns
;
1762 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1763 && qemu_log_in_addr_range(pc_start
)) {
1765 qemu_log("--------------\n");
1766 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
);
1771 assert(!dc
->abort_at_next_insn
);
1774 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1777 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1778 CPUMBState
*env
= &cpu
->env
;
1784 cpu_fprintf(f
, "IN: PC=%x %s\n",
1785 env
->sregs
[SR_PC
], lookup_symbol(env
->sregs
[SR_PC
]));
1786 cpu_fprintf(f
, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1787 env
->sregs
[SR_MSR
], env
->sregs
[SR_ESR
], env
->sregs
[SR_EAR
],
1788 env
->debug
, env
->imm
, env
->iflags
, env
->sregs
[SR_FSR
]);
1789 cpu_fprintf(f
, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1790 env
->btaken
, env
->btarget
,
1791 (env
->sregs
[SR_MSR
] & MSR_UM
) ? "user" : "kernel",
1792 (env
->sregs
[SR_MSR
] & MSR_UMS
) ? "user" : "kernel",
1793 (env
->sregs
[SR_MSR
] & MSR_EIP
),
1794 (env
->sregs
[SR_MSR
] & MSR_IE
));
1796 for (i
= 0; i
< 32; i
++) {
1797 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1798 if ((i
+ 1) % 4 == 0)
1799 cpu_fprintf(f
, "\n");
1801 cpu_fprintf(f
, "\n\n");
1804 void mb_tcg_init(void)
1808 env_debug
= tcg_global_mem_new_i32(cpu_env
,
1809 offsetof(CPUMBState
, debug
),
1811 env_iflags
= tcg_global_mem_new_i32(cpu_env
,
1812 offsetof(CPUMBState
, iflags
),
1814 env_imm
= tcg_global_mem_new_i32(cpu_env
,
1815 offsetof(CPUMBState
, imm
),
1817 env_btarget
= tcg_global_mem_new_i32(cpu_env
,
1818 offsetof(CPUMBState
, btarget
),
1820 env_btaken
= tcg_global_mem_new_i32(cpu_env
,
1821 offsetof(CPUMBState
, btaken
),
1823 env_res_addr
= tcg_global_mem_new(cpu_env
,
1824 offsetof(CPUMBState
, res_addr
),
1826 env_res_val
= tcg_global_mem_new_i32(cpu_env
,
1827 offsetof(CPUMBState
, res_val
),
1829 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1830 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
1831 offsetof(CPUMBState
, regs
[i
]),
1834 for (i
= 0; i
< ARRAY_SIZE(cpu_SR
); i
++) {
1835 cpu_SR
[i
] = tcg_global_mem_new_i32(cpu_env
,
1836 offsetof(CPUMBState
, sregs
[i
]),
1837 special_regnames
[i
]);
1841 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
,
1844 env
->sregs
[SR_PC
] = data
[0];