2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
33 #include "trace-tcg.h"
40 #if DISAS_MB && !SIM_COMPAT
41 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
43 # define LOG_DIS(...) do { } while (0)
48 #define EXTRACT_FIELD(src, start, end) \
49 (((src) >> start) & ((1 << (end - start + 1)) - 1))
51 /* is_jmp field values */
52 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
53 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
54 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
56 static TCGv_i32 env_debug
;
57 static TCGv_i32 cpu_R
[32];
58 static TCGv_i64 cpu_SR
[14];
59 static TCGv_i32 env_imm
;
60 static TCGv_i32 env_btaken
;
61 static TCGv_i64 env_btarget
;
62 static TCGv_i32 env_iflags
;
63 static TCGv env_res_addr
;
64 static TCGv_i32 env_res_val
;
66 #include "exec/gen-icount.h"
68 /* This is the state at translation time. */
69 typedef struct DisasContext
{
80 unsigned int cpustate_changed
;
81 unsigned int delayed_branch
;
82 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
83 unsigned int clear_imm
;
88 #define JMP_DIRECT_CC 2
89 #define JMP_INDIRECT 3
93 int abort_at_next_insn
;
94 struct TranslationBlock
*tb
;
95 int singlestep_enabled
;
98 static const char *regnames
[] =
100 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
106 static const char *special_regnames
[] =
108 "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
109 "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
112 static inline void t_sync_flags(DisasContext
*dc
)
114 /* Synch the tb dependent flags between translator and runtime. */
115 if (dc
->tb_flags
!= dc
->synced_flags
) {
116 tcg_gen_movi_i32(env_iflags
, dc
->tb_flags
);
117 dc
->synced_flags
= dc
->tb_flags
;
121 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
123 TCGv_i32 tmp
= tcg_const_i32(index
);
126 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dc
->pc
);
127 gen_helper_raise_exception(cpu_env
, tmp
);
128 tcg_temp_free_i32(tmp
);
129 dc
->is_jmp
= DISAS_UPDATE
;
132 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
134 #ifndef CONFIG_USER_ONLY
135 return (dc
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
141 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
143 if (use_goto_tb(dc
, dest
)) {
145 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dest
);
146 tcg_gen_exit_tb(dc
->tb
, n
);
148 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dest
);
149 tcg_gen_exit_tb(NULL
, 0);
153 static void read_carry(DisasContext
*dc
, TCGv_i32 d
)
155 tcg_gen_extrl_i64_i32(d
, cpu_SR
[SR_MSR
]);
156 tcg_gen_shri_i32(d
, d
, 31);
160 * write_carry sets the carry bits in MSR based on bit 0 of v.
161 * v[31:1] are ignored.
163 static void write_carry(DisasContext
*dc
, TCGv_i32 v
)
165 TCGv_i64 t0
= tcg_temp_new_i64();
166 tcg_gen_extu_i32_i64(t0
, v
);
167 /* Deposit bit 0 into MSR_C and the alias MSR_CC. */
168 tcg_gen_deposit_i64(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
, 2, 1);
169 tcg_gen_deposit_i64(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
, 31, 1);
170 tcg_temp_free_i64(t0
);
173 static void write_carryi(DisasContext
*dc
, bool carry
)
175 TCGv_i32 t0
= tcg_temp_new_i32();
176 tcg_gen_movi_i32(t0
, carry
);
178 tcg_temp_free_i32(t0
);
182 * Returns true if the insn an illegal operation.
183 * If exceptions are enabled, an exception is raised.
185 static bool trap_illegal(DisasContext
*dc
, bool cond
)
187 if (cond
&& (dc
->tb_flags
& MSR_EE_FLAG
)
188 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
189 tcg_gen_movi_i64(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
190 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
196 * Returns true if the insn is illegal in userspace.
197 * If exceptions are enabled, an exception is raised.
199 static bool trap_userspace(DisasContext
*dc
, bool cond
)
201 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
202 bool cond_user
= cond
&& mem_index
== MMU_USER_IDX
;
204 if (cond_user
&& (dc
->tb_flags
& MSR_EE_FLAG
)) {
205 tcg_gen_movi_i64(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
206 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
211 /* True if ALU operand b is a small immediate that may deserve
213 static inline int dec_alu_op_b_is_small_imm(DisasContext
*dc
)
215 /* Immediate insn without the imm prefix ? */
216 return dc
->type_b
&& !(dc
->tb_flags
& IMM_FLAG
);
219 static inline TCGv_i32
*dec_alu_op_b(DisasContext
*dc
)
222 if (dc
->tb_flags
& IMM_FLAG
)
223 tcg_gen_ori_i32(env_imm
, env_imm
, dc
->imm
);
225 tcg_gen_movi_i32(env_imm
, (int32_t)((int16_t)dc
->imm
));
228 return &cpu_R
[dc
->rb
];
231 static void dec_add(DisasContext
*dc
)
239 LOG_DIS("add%s%s%s r%d r%d r%d\n",
240 dc
->type_b
? "i" : "", k
? "k" : "", c
? "c" : "",
241 dc
->rd
, dc
->ra
, dc
->rb
);
243 /* Take care of the easy cases first. */
245 /* k - keep carry, no need to update MSR. */
246 /* If rd == r0, it's a nop. */
248 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
251 /* c - Add carry into the result. */
252 cf
= tcg_temp_new_i32();
255 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
256 tcg_temp_free_i32(cf
);
262 /* From now on, we can assume k is zero. So we need to update MSR. */
264 cf
= tcg_temp_new_i32();
268 tcg_gen_movi_i32(cf
, 0);
272 TCGv_i32 ncf
= tcg_temp_new_i32();
273 gen_helper_carry(ncf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
274 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
275 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
276 write_carry(dc
, ncf
);
277 tcg_temp_free_i32(ncf
);
279 gen_helper_carry(cf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
282 tcg_temp_free_i32(cf
);
285 static void dec_sub(DisasContext
*dc
)
287 unsigned int u
, cmp
, k
, c
;
293 cmp
= (dc
->imm
& 1) && (!dc
->type_b
) && k
;
296 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u
? "u" : "", dc
->rd
, dc
->ra
, dc
->ir
);
299 gen_helper_cmpu(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
301 gen_helper_cmp(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
306 LOG_DIS("sub%s%s r%d, r%d r%d\n",
307 k
? "k" : "", c
? "c" : "", dc
->rd
, dc
->ra
, dc
->rb
);
309 /* Take care of the easy cases first. */
311 /* k - keep carry, no need to update MSR. */
312 /* If rd == r0, it's a nop. */
314 tcg_gen_sub_i32(cpu_R
[dc
->rd
], *(dec_alu_op_b(dc
)), cpu_R
[dc
->ra
]);
317 /* c - Add carry into the result. */
318 cf
= tcg_temp_new_i32();
321 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
322 tcg_temp_free_i32(cf
);
328 /* From now on, we can assume k is zero. So we need to update MSR. */
329 /* Extract carry. And complement a into na. */
330 cf
= tcg_temp_new_i32();
331 na
= tcg_temp_new_i32();
335 tcg_gen_movi_i32(cf
, 1);
338 /* d = b + ~a + c. carry defaults to 1. */
339 tcg_gen_not_i32(na
, cpu_R
[dc
->ra
]);
342 TCGv_i32 ncf
= tcg_temp_new_i32();
343 gen_helper_carry(ncf
, na
, *(dec_alu_op_b(dc
)), cf
);
344 tcg_gen_add_i32(cpu_R
[dc
->rd
], na
, *(dec_alu_op_b(dc
)));
345 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
346 write_carry(dc
, ncf
);
347 tcg_temp_free_i32(ncf
);
349 gen_helper_carry(cf
, na
, *(dec_alu_op_b(dc
)), cf
);
352 tcg_temp_free_i32(cf
);
353 tcg_temp_free_i32(na
);
356 static void dec_pattern(DisasContext
*dc
)
360 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_pcmp_instr
)) {
364 mode
= dc
->opcode
& 3;
368 LOG_DIS("pcmpbf r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
370 gen_helper_pcmpbf(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
373 LOG_DIS("pcmpeq r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
375 tcg_gen_setcond_i32(TCG_COND_EQ
, cpu_R
[dc
->rd
],
376 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
380 LOG_DIS("pcmpne r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
382 tcg_gen_setcond_i32(TCG_COND_NE
, cpu_R
[dc
->rd
],
383 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
387 cpu_abort(CPU(dc
->cpu
),
388 "unsupported pattern insn opcode=%x\n", dc
->opcode
);
393 static void dec_and(DisasContext
*dc
)
397 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
402 not = dc
->opcode
& (1 << 1);
403 LOG_DIS("and%s\n", not ? "n" : "");
409 tcg_gen_andc_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
411 tcg_gen_and_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
414 static void dec_or(DisasContext
*dc
)
416 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
421 LOG_DIS("or r%d r%d r%d imm=%x\n", dc
->rd
, dc
->ra
, dc
->rb
, dc
->imm
);
423 tcg_gen_or_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
426 static void dec_xor(DisasContext
*dc
)
428 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
433 LOG_DIS("xor r%d\n", dc
->rd
);
435 tcg_gen_xor_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
438 static inline void msr_read(DisasContext
*dc
, TCGv_i32 d
)
440 tcg_gen_extrl_i64_i32(d
, cpu_SR
[SR_MSR
]);
443 static inline void msr_write(DisasContext
*dc
, TCGv_i32 v
)
447 t
= tcg_temp_new_i64();
448 dc
->cpustate_changed
= 1;
449 /* PVR bit is not writable. */
450 tcg_gen_extu_i32_i64(t
, v
);
451 tcg_gen_andi_i64(t
, t
, ~MSR_PVR
);
452 tcg_gen_andi_i64(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], MSR_PVR
);
453 tcg_gen_or_i64(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t
);
454 tcg_temp_free_i64(t
);
457 static void dec_msr(DisasContext
*dc
)
459 CPUState
*cs
= CPU(dc
->cpu
);
462 bool to
, clrset
, extended
= false;
464 sr
= extract32(dc
->imm
, 0, 14);
465 to
= extract32(dc
->imm
, 14, 1);
466 clrset
= extract32(dc
->imm
, 15, 1) == 0;
469 dc
->cpustate_changed
= 1;
472 /* Extended MSRs are only available if addr_size > 32. */
473 if (dc
->cpu
->cfg
.addr_size
> 32) {
474 /* The E-bit is encoded differently for To/From MSR. */
475 static const unsigned int e_bit
[] = { 19, 24 };
477 extended
= extract32(dc
->imm
, e_bit
[to
], 1);
480 /* msrclr and msrset. */
482 bool clr
= extract32(dc
->ir
, 16, 1);
484 LOG_DIS("msr%s r%d imm=%x\n", clr
? "clr" : "set",
487 if (!dc
->cpu
->cfg
.use_msr_instr
) {
492 if (trap_userspace(dc
, dc
->imm
!= 4 && dc
->imm
!= 0)) {
497 msr_read(dc
, cpu_R
[dc
->rd
]);
499 t0
= tcg_temp_new_i32();
500 t1
= tcg_temp_new_i32();
502 tcg_gen_mov_i32(t1
, *(dec_alu_op_b(dc
)));
505 tcg_gen_not_i32(t1
, t1
);
506 tcg_gen_and_i32(t0
, t0
, t1
);
508 tcg_gen_or_i32(t0
, t0
, t1
);
510 tcg_temp_free_i32(t0
);
511 tcg_temp_free_i32(t1
);
512 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dc
->pc
+ 4);
513 dc
->is_jmp
= DISAS_UPDATE
;
517 if (trap_userspace(dc
, to
)) {
521 #if !defined(CONFIG_USER_ONLY)
522 /* Catch read/writes to the mmu block. */
523 if ((sr
& ~0xff) == 0x1000) {
524 TCGv_i32 tmp_ext
= tcg_const_i32(extended
);
528 tmp_sr
= tcg_const_i32(sr
);
529 LOG_DIS("m%ss sr%d r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
531 gen_helper_mmu_write(cpu_env
, tmp_ext
, tmp_sr
, cpu_R
[dc
->ra
]);
533 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tmp_ext
, tmp_sr
);
535 tcg_temp_free_i32(tmp_sr
);
536 tcg_temp_free_i32(tmp_ext
);
542 LOG_DIS("m%ss sr%x r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
547 msr_write(dc
, cpu_R
[dc
->ra
]);
552 tcg_gen_extu_i32_i64(cpu_SR
[sr
], cpu_R
[dc
->ra
]);
555 tcg_gen_st_i32(cpu_R
[dc
->ra
],
556 cpu_env
, offsetof(CPUMBState
, slr
));
559 tcg_gen_st_i32(cpu_R
[dc
->ra
],
560 cpu_env
, offsetof(CPUMBState
, shr
));
563 cpu_abort(CPU(dc
->cpu
), "unknown mts reg %x\n", sr
);
567 LOG_DIS("m%ss r%d sr%x imm=%x\n", to
? "t" : "f", dc
->rd
, sr
, dc
->imm
);
571 tcg_gen_movi_i32(cpu_R
[dc
->rd
], dc
->pc
);
574 msr_read(dc
, cpu_R
[dc
->rd
]);
578 tcg_gen_extrh_i64_i32(cpu_R
[dc
->rd
], cpu_SR
[sr
]);
584 tcg_gen_extrl_i64_i32(cpu_R
[dc
->rd
], cpu_SR
[sr
]);
587 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
588 cpu_env
, offsetof(CPUMBState
, slr
));
591 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
592 cpu_env
, offsetof(CPUMBState
, shr
));
594 case 0x2000 ... 0x200c:
596 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
597 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
600 cpu_abort(cs
, "unknown mfs reg %x\n", sr
);
606 tcg_gen_movi_i32(cpu_R
[0], 0);
610 /* Multiplier unit. */
611 static void dec_mul(DisasContext
*dc
)
614 unsigned int subcode
;
616 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_hw_mul
)) {
620 subcode
= dc
->imm
& 3;
623 LOG_DIS("muli r%d r%d %x\n", dc
->rd
, dc
->ra
, dc
->imm
);
624 tcg_gen_mul_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
628 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
629 if (subcode
>= 1 && subcode
<= 3 && dc
->cpu
->cfg
.use_hw_mul
< 2) {
633 tmp
= tcg_temp_new_i32();
636 LOG_DIS("mul r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
637 tcg_gen_mul_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
640 LOG_DIS("mulh r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
641 tcg_gen_muls2_i32(tmp
, cpu_R
[dc
->rd
],
642 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
645 LOG_DIS("mulhsu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
646 tcg_gen_mulsu2_i32(tmp
, cpu_R
[dc
->rd
],
647 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
650 LOG_DIS("mulhu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
651 tcg_gen_mulu2_i32(tmp
, cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
654 cpu_abort(CPU(dc
->cpu
), "unknown MUL insn %x\n", subcode
);
657 tcg_temp_free_i32(tmp
);
661 static void dec_div(DisasContext
*dc
)
668 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_div
)) {
673 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
676 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
679 tcg_gen_movi_i32(cpu_R
[dc
->rd
], 0);
682 static void dec_barrel(DisasContext
*dc
)
685 unsigned int imm_w
, imm_s
;
686 bool s
, t
, e
= false, i
= false;
688 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_barrel
)) {
693 /* Insert and extract are only available in immediate mode. */
694 i
= extract32(dc
->imm
, 15, 1);
695 e
= extract32(dc
->imm
, 14, 1);
697 s
= extract32(dc
->imm
, 10, 1);
698 t
= extract32(dc
->imm
, 9, 1);
699 imm_w
= extract32(dc
->imm
, 6, 5);
700 imm_s
= extract32(dc
->imm
, 0, 5);
702 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
704 s
? "l" : "r", t
? "a" : "l", dc
->rd
, dc
->ra
, dc
->rb
);
707 if (imm_w
+ imm_s
> 32 || imm_w
== 0) {
708 /* These inputs have an undefined behavior. */
709 qemu_log_mask(LOG_GUEST_ERROR
, "bsefi: Bad input w=%d s=%d\n",
712 tcg_gen_extract_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], imm_s
, imm_w
);
715 int width
= imm_w
- imm_s
+ 1;
718 /* These inputs have an undefined behavior. */
719 qemu_log_mask(LOG_GUEST_ERROR
, "bsifi: Bad input w=%d s=%d\n",
722 tcg_gen_deposit_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
],
726 t0
= tcg_temp_new_i32();
728 tcg_gen_mov_i32(t0
, *(dec_alu_op_b(dc
)));
729 tcg_gen_andi_i32(t0
, t0
, 31);
732 tcg_gen_shl_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
735 tcg_gen_sar_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
737 tcg_gen_shr_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
740 tcg_temp_free_i32(t0
);
744 static void dec_bit(DisasContext
*dc
)
746 CPUState
*cs
= CPU(dc
->cpu
);
750 op
= dc
->ir
& ((1 << 9) - 1);
754 t0
= tcg_temp_new_i32();
756 LOG_DIS("src r%d r%d\n", dc
->rd
, dc
->ra
);
757 tcg_gen_extrl_i64_i32(t0
, cpu_SR
[SR_MSR
]);
758 tcg_gen_andi_i32(t0
, t0
, MSR_CC
);
759 write_carry(dc
, cpu_R
[dc
->ra
]);
761 tcg_gen_shri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
762 tcg_gen_or_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
764 tcg_temp_free_i32(t0
);
770 LOG_DIS("srl r%d r%d\n", dc
->rd
, dc
->ra
);
772 /* Update carry. Note that write carry only looks at the LSB. */
773 write_carry(dc
, cpu_R
[dc
->ra
]);
776 tcg_gen_shri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
778 tcg_gen_sari_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
782 LOG_DIS("ext8s r%d r%d\n", dc
->rd
, dc
->ra
);
783 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
786 LOG_DIS("ext16s r%d r%d\n", dc
->rd
, dc
->ra
);
787 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
794 LOG_DIS("wdc r%d\n", dc
->ra
);
795 trap_userspace(dc
, true);
799 LOG_DIS("wic r%d\n", dc
->ra
);
800 trap_userspace(dc
, true);
803 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_pcmp_instr
)) {
806 if (dc
->cpu
->cfg
.use_pcmp_instr
) {
807 tcg_gen_clzi_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 32);
812 LOG_DIS("swapb r%d r%d\n", dc
->rd
, dc
->ra
);
813 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
817 LOG_DIS("swaph r%d r%d\n", dc
->rd
, dc
->ra
);
818 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
821 cpu_abort(cs
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
822 dc
->pc
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
827 static inline void sync_jmpstate(DisasContext
*dc
)
829 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
830 if (dc
->jmp
== JMP_DIRECT
) {
831 tcg_gen_movi_i32(env_btaken
, 1);
833 dc
->jmp
= JMP_INDIRECT
;
834 tcg_gen_movi_i64(env_btarget
, dc
->jmp_pc
);
838 static void dec_imm(DisasContext
*dc
)
840 LOG_DIS("imm %x\n", dc
->imm
<< 16);
841 tcg_gen_movi_i32(env_imm
, (dc
->imm
<< 16));
842 dc
->tb_flags
|= IMM_FLAG
;
846 static inline void compute_ldst_addr(DisasContext
*dc
, bool ea
, TCGv t
)
848 bool extimm
= dc
->tb_flags
& IMM_FLAG
;
849 /* Should be set to true if r1 is used by loadstores. */
850 bool stackprot
= false;
853 /* All load/stores use ra. */
854 if (dc
->ra
== 1 && dc
->cpu
->cfg
.stackprot
) {
858 /* Treat the common cases first. */
861 int addr_size
= dc
->cpu
->cfg
.addr_size
;
863 if (addr_size
== 32) {
864 tcg_gen_extu_i32_tl(t
, cpu_R
[dc
->rb
]);
868 tcg_gen_concat_i32_i64(t
, cpu_R
[dc
->rb
], cpu_R
[dc
->ra
]);
869 if (addr_size
< 64) {
870 /* Mask off out of range bits. */
871 tcg_gen_andi_i64(t
, t
, MAKE_64BIT_MASK(0, addr_size
));
876 /* If any of the regs is r0, set t to the value of the other reg. */
878 tcg_gen_extu_i32_tl(t
, cpu_R
[dc
->rb
]);
880 } else if (dc
->rb
== 0) {
881 tcg_gen_extu_i32_tl(t
, cpu_R
[dc
->ra
]);
885 if (dc
->rb
== 1 && dc
->cpu
->cfg
.stackprot
) {
889 t32
= tcg_temp_new_i32();
890 tcg_gen_add_i32(t32
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
891 tcg_gen_extu_i32_tl(t
, t32
);
892 tcg_temp_free_i32(t32
);
895 gen_helper_stackprot(cpu_env
, t
);
900 t32
= tcg_temp_new_i32();
902 tcg_gen_addi_i32(t32
, cpu_R
[dc
->ra
], (int16_t)dc
->imm
);
904 tcg_gen_add_i32(t32
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
906 tcg_gen_extu_i32_tl(t
, t32
);
907 tcg_temp_free_i32(t32
);
910 gen_helper_stackprot(cpu_env
, t
);
915 static void dec_load(DisasContext
*dc
)
920 bool rev
= false, ex
= false, ea
= false;
921 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
924 mop
= dc
->opcode
& 3;
927 ea
= extract32(dc
->ir
, 7, 1);
928 rev
= extract32(dc
->ir
, 9, 1);
929 ex
= extract32(dc
->ir
, 10, 1);
936 if (trap_illegal(dc
, size
> 4)) {
940 if (trap_userspace(dc
, ea
)) {
944 LOG_DIS("l%d%s%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
949 addr
= tcg_temp_new();
950 compute_ldst_addr(dc
, ea
, addr
);
951 /* Extended addressing bypasses the MMU. */
952 mem_index
= ea
? MMU_NOMMU_IDX
: mem_index
;
955 * When doing reverse accesses we need to do two things.
957 * 1. Reverse the address wrt endianness.
958 * 2. Byteswap the data lanes on the way back into the CPU core.
960 if (rev
&& size
!= 4) {
961 /* Endian reverse the address. t is addr. */
969 TCGv low
= tcg_temp_new();
971 tcg_gen_andi_tl(low
, addr
, 3);
972 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
973 tcg_gen_andi_tl(addr
, addr
, ~3);
974 tcg_gen_or_tl(addr
, addr
, low
);
982 tcg_gen_xori_tl(addr
, addr
, 2);
985 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
990 /* lwx does not throw unaligned access errors, so force alignment */
992 tcg_gen_andi_tl(addr
, addr
, ~3);
995 /* If we get a fault on a dslot, the jmpstate better be in sync. */
998 /* Verify alignment if needed. */
1000 * Microblaze gives MMU faults priority over faults due to
1001 * unaligned addresses. That's why we speculatively do the load
1002 * into v. If the load succeeds, we verify alignment of the
1003 * address and if that succeeds we write into the destination reg.
1005 v
= tcg_temp_new_i32();
1006 tcg_gen_qemu_ld_i32(v
, addr
, mem_index
, mop
);
1008 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1009 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dc
->pc
);
1010 gen_helper_memalign(cpu_env
, addr
, tcg_const_i32(dc
->rd
),
1011 tcg_const_i32(0), tcg_const_i32(size
- 1));
1015 tcg_gen_mov_tl(env_res_addr
, addr
);
1016 tcg_gen_mov_i32(env_res_val
, v
);
1019 tcg_gen_mov_i32(cpu_R
[dc
->rd
], v
);
1021 tcg_temp_free_i32(v
);
1024 /* no support for AXI exclusive so always clear C */
1025 write_carryi(dc
, 0);
1028 tcg_temp_free(addr
);
1031 static void dec_store(DisasContext
*dc
)
1034 TCGLabel
*swx_skip
= NULL
;
1036 bool rev
= false, ex
= false, ea
= false;
1037 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1040 mop
= dc
->opcode
& 3;
1043 ea
= extract32(dc
->ir
, 7, 1);
1044 rev
= extract32(dc
->ir
, 9, 1);
1045 ex
= extract32(dc
->ir
, 10, 1);
1052 if (trap_illegal(dc
, size
> 4)) {
1056 trap_userspace(dc
, ea
);
1058 LOG_DIS("s%d%s%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
1062 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1064 /* SWX needs a temp_local. */
1065 addr
= ex
? tcg_temp_local_new() : tcg_temp_new();
1066 compute_ldst_addr(dc
, ea
, addr
);
1067 /* Extended addressing bypasses the MMU. */
1068 mem_index
= ea
? MMU_NOMMU_IDX
: mem_index
;
1073 /* swx does not throw unaligned access errors, so force alignment */
1074 tcg_gen_andi_tl(addr
, addr
, ~3);
1076 write_carryi(dc
, 1);
1077 swx_skip
= gen_new_label();
1078 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_addr
, addr
, swx_skip
);
1080 /* Compare the value loaded at lwx with current contents of
1081 the reserved location.
1082 FIXME: This only works for system emulation where we can expect
1083 this compare and the following write to be atomic. For user
1084 emulation we need to add atomicity between threads. */
1085 tval
= tcg_temp_new_i32();
1086 tcg_gen_qemu_ld_i32(tval
, addr
, cpu_mmu_index(&dc
->cpu
->env
, false),
1088 tcg_gen_brcond_i32(TCG_COND_NE
, env_res_val
, tval
, swx_skip
);
1089 write_carryi(dc
, 0);
1090 tcg_temp_free_i32(tval
);
1093 if (rev
&& size
!= 4) {
1094 /* Endian reverse the address. t is addr. */
1102 TCGv low
= tcg_temp_new();
1104 tcg_gen_andi_tl(low
, addr
, 3);
1105 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
1106 tcg_gen_andi_tl(addr
, addr
, ~3);
1107 tcg_gen_or_tl(addr
, addr
, low
);
1115 /* Force addr into the temp. */
1116 tcg_gen_xori_tl(addr
, addr
, 2);
1119 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1123 tcg_gen_qemu_st_i32(cpu_R
[dc
->rd
], addr
, mem_index
, mop
);
1125 /* Verify alignment if needed. */
1126 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1127 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dc
->pc
);
1128 /* FIXME: if the alignment is wrong, we should restore the value
1129 * in memory. One possible way to achieve this is to probe
1130 * the MMU prior to the memaccess, thay way we could put
1131 * the alignment checks in between the probe and the mem
1134 gen_helper_memalign(cpu_env
, addr
, tcg_const_i32(dc
->rd
),
1135 tcg_const_i32(1), tcg_const_i32(size
- 1));
1139 gen_set_label(swx_skip
);
1142 tcg_temp_free(addr
);
1145 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1146 TCGv_i32 d
, TCGv_i32 a
)
1148 static const int mb_to_tcg_cc
[] = {
1149 [CC_EQ
] = TCG_COND_EQ
,
1150 [CC_NE
] = TCG_COND_NE
,
1151 [CC_LT
] = TCG_COND_LT
,
1152 [CC_LE
] = TCG_COND_LE
,
1153 [CC_GE
] = TCG_COND_GE
,
1154 [CC_GT
] = TCG_COND_GT
,
1164 tcg_gen_setcondi_i32(mb_to_tcg_cc
[cc
], d
, a
, 0);
1167 cpu_abort(CPU(dc
->cpu
), "Unknown condition code %x.\n", cc
);
1172 static void eval_cond_jmp(DisasContext
*dc
, TCGv_i64 pc_true
, TCGv_i64 pc_false
)
1174 TCGv_i64 tmp_btaken
= tcg_temp_new_i64();
1175 TCGv_i64 tmp_zero
= tcg_const_i64(0);
1177 tcg_gen_extu_i32_i64(tmp_btaken
, env_btaken
);
1178 tcg_gen_movcond_i64(TCG_COND_NE
, cpu_SR
[SR_PC
],
1179 tmp_btaken
, tmp_zero
,
1182 tcg_temp_free_i64(tmp_btaken
);
1183 tcg_temp_free_i64(tmp_zero
);
1186 static void dec_bcc(DisasContext
*dc
)
1191 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1192 dslot
= dc
->ir
& (1 << 25);
1193 LOG_DIS("bcc%s r%d %x\n", dslot
? "d" : "", dc
->ra
, dc
->imm
);
1195 dc
->delayed_branch
= 1;
1197 dc
->delayed_branch
= 2;
1198 dc
->tb_flags
|= D_FLAG
;
1199 tcg_gen_st_i32(tcg_const_i32(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1200 cpu_env
, offsetof(CPUMBState
, bimm
));
1203 if (dec_alu_op_b_is_small_imm(dc
)) {
1204 int32_t offset
= (int32_t)((int16_t)dc
->imm
); /* sign-extend. */
1206 tcg_gen_movi_i64(env_btarget
, dc
->pc
+ offset
);
1207 dc
->jmp
= JMP_DIRECT_CC
;
1208 dc
->jmp_pc
= dc
->pc
+ offset
;
1210 dc
->jmp
= JMP_INDIRECT
;
1211 tcg_gen_extu_i32_i64(env_btarget
, *(dec_alu_op_b(dc
)));
1212 tcg_gen_addi_i64(env_btarget
, env_btarget
, dc
->pc
);
1213 tcg_gen_andi_i64(env_btarget
, env_btarget
, UINT32_MAX
);
1215 eval_cc(dc
, cc
, env_btaken
, cpu_R
[dc
->ra
]);
1218 static void dec_br(DisasContext
*dc
)
1220 unsigned int dslot
, link
, abs
, mbar
;
1222 dslot
= dc
->ir
& (1 << 20);
1223 abs
= dc
->ir
& (1 << 19);
1224 link
= dc
->ir
& (1 << 18);
1226 /* Memory barrier. */
1227 mbar
= (dc
->ir
>> 16) & 31;
1228 if (mbar
== 2 && dc
->imm
== 4) {
1229 /* mbar IMM & 16 decodes to sleep. */
1231 TCGv_i32 tmp_hlt
= tcg_const_i32(EXCP_HLT
);
1232 TCGv_i32 tmp_1
= tcg_const_i32(1);
1237 tcg_gen_st_i32(tmp_1
, cpu_env
,
1238 -offsetof(MicroBlazeCPU
, env
)
1239 +offsetof(CPUState
, halted
));
1240 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dc
->pc
+ 4);
1241 gen_helper_raise_exception(cpu_env
, tmp_hlt
);
1242 tcg_temp_free_i32(tmp_hlt
);
1243 tcg_temp_free_i32(tmp_1
);
1246 LOG_DIS("mbar %d\n", dc
->rd
);
1248 dc
->cpustate_changed
= 1;
1252 LOG_DIS("br%s%s%s%s imm=%x\n",
1253 abs
? "a" : "", link
? "l" : "",
1254 dc
->type_b
? "i" : "", dslot
? "d" : "",
1257 dc
->delayed_branch
= 1;
1259 dc
->delayed_branch
= 2;
1260 dc
->tb_flags
|= D_FLAG
;
1261 tcg_gen_st_i32(tcg_const_i32(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1262 cpu_env
, offsetof(CPUMBState
, bimm
));
1265 tcg_gen_movi_i32(cpu_R
[dc
->rd
], dc
->pc
);
1267 dc
->jmp
= JMP_INDIRECT
;
1269 tcg_gen_movi_i32(env_btaken
, 1);
1270 tcg_gen_extu_i32_i64(env_btarget
, *(dec_alu_op_b(dc
)));
1271 if (link
&& !dslot
) {
1272 if (!(dc
->tb_flags
& IMM_FLAG
) && (dc
->imm
== 8 || dc
->imm
== 0x18))
1273 t_gen_raise_exception(dc
, EXCP_BREAK
);
1275 if (trap_userspace(dc
, true)) {
1279 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1283 if (dec_alu_op_b_is_small_imm(dc
)) {
1284 dc
->jmp
= JMP_DIRECT
;
1285 dc
->jmp_pc
= dc
->pc
+ (int32_t)((int16_t)dc
->imm
);
1287 tcg_gen_movi_i32(env_btaken
, 1);
1288 tcg_gen_extu_i32_i64(env_btarget
, *(dec_alu_op_b(dc
)));
1289 tcg_gen_addi_i64(env_btarget
, env_btarget
, dc
->pc
);
1290 tcg_gen_andi_i64(env_btarget
, env_btarget
, UINT32_MAX
);
1295 static inline void do_rti(DisasContext
*dc
)
1298 t0
= tcg_temp_new_i32();
1299 t1
= tcg_temp_new_i32();
1300 tcg_gen_extrl_i64_i32(t1
, cpu_SR
[SR_MSR
]);
1301 tcg_gen_shri_i32(t0
, t1
, 1);
1302 tcg_gen_ori_i32(t1
, t1
, MSR_IE
);
1303 tcg_gen_andi_i32(t0
, t0
, (MSR_VM
| MSR_UM
));
1305 tcg_gen_andi_i32(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1306 tcg_gen_or_i32(t1
, t1
, t0
);
1308 tcg_temp_free_i32(t1
);
1309 tcg_temp_free_i32(t0
);
1310 dc
->tb_flags
&= ~DRTI_FLAG
;
1313 static inline void do_rtb(DisasContext
*dc
)
1316 t0
= tcg_temp_new_i32();
1317 t1
= tcg_temp_new_i32();
1318 tcg_gen_extrl_i64_i32(t1
, cpu_SR
[SR_MSR
]);
1319 tcg_gen_andi_i32(t1
, t1
, ~MSR_BIP
);
1320 tcg_gen_shri_i32(t0
, t1
, 1);
1321 tcg_gen_andi_i32(t0
, t0
, (MSR_VM
| MSR_UM
));
1323 tcg_gen_andi_i32(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1324 tcg_gen_or_i32(t1
, t1
, t0
);
1326 tcg_temp_free_i32(t1
);
1327 tcg_temp_free_i32(t0
);
1328 dc
->tb_flags
&= ~DRTB_FLAG
;
1331 static inline void do_rte(DisasContext
*dc
)
1334 t0
= tcg_temp_new_i32();
1335 t1
= tcg_temp_new_i32();
1337 tcg_gen_extrl_i64_i32(t1
, cpu_SR
[SR_MSR
]);
1338 tcg_gen_ori_i32(t1
, t1
, MSR_EE
);
1339 tcg_gen_andi_i32(t1
, t1
, ~MSR_EIP
);
1340 tcg_gen_shri_i32(t0
, t1
, 1);
1341 tcg_gen_andi_i32(t0
, t0
, (MSR_VM
| MSR_UM
));
1343 tcg_gen_andi_i32(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1344 tcg_gen_or_i32(t1
, t1
, t0
);
1346 tcg_temp_free_i32(t1
);
1347 tcg_temp_free_i32(t0
);
1348 dc
->tb_flags
&= ~DRTE_FLAG
;
1351 static void dec_rts(DisasContext
*dc
)
1353 unsigned int b_bit
, i_bit
, e_bit
;
1356 i_bit
= dc
->ir
& (1 << 21);
1357 b_bit
= dc
->ir
& (1 << 22);
1358 e_bit
= dc
->ir
& (1 << 23);
1360 if (trap_userspace(dc
, i_bit
|| b_bit
|| e_bit
)) {
1364 dc
->delayed_branch
= 2;
1365 dc
->tb_flags
|= D_FLAG
;
1366 tcg_gen_st_i32(tcg_const_i32(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1367 cpu_env
, offsetof(CPUMBState
, bimm
));
1370 LOG_DIS("rtid ir=%x\n", dc
->ir
);
1371 dc
->tb_flags
|= DRTI_FLAG
;
1373 LOG_DIS("rtbd ir=%x\n", dc
->ir
);
1374 dc
->tb_flags
|= DRTB_FLAG
;
1376 LOG_DIS("rted ir=%x\n", dc
->ir
);
1377 dc
->tb_flags
|= DRTE_FLAG
;
1379 LOG_DIS("rts ir=%x\n", dc
->ir
);
1381 dc
->jmp
= JMP_INDIRECT
;
1382 tcg_gen_movi_i32(env_btaken
, 1);
1384 tmp64
= tcg_temp_new_i64();
1385 tcg_gen_extu_i32_i64(env_btarget
, *(dec_alu_op_b(dc
)));
1386 tcg_gen_extu_i32_i64(tmp64
, cpu_R
[dc
->ra
]);
1387 tcg_gen_add_i64(env_btarget
, env_btarget
, tmp64
);
1388 tcg_gen_andi_i64(env_btarget
, env_btarget
, UINT32_MAX
);
1389 tcg_temp_free_i64(tmp64
);
1392 static int dec_check_fpuv2(DisasContext
*dc
)
1394 if ((dc
->cpu
->cfg
.use_fpu
!= 2) && (dc
->tb_flags
& MSR_EE_FLAG
)) {
1395 tcg_gen_movi_i64(cpu_SR
[SR_ESR
], ESR_EC_FPU
);
1396 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1398 return (dc
->cpu
->cfg
.use_fpu
== 2) ? 0 : PVR2_USE_FPU2_MASK
;
1401 static void dec_fpu(DisasContext
*dc
)
1403 unsigned int fpu_insn
;
1405 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_fpu
)) {
1409 fpu_insn
= (dc
->ir
>> 7) & 7;
1413 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1418 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1423 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1428 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1433 switch ((dc
->ir
>> 4) & 7) {
1435 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1436 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1439 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1440 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1443 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1444 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1447 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1448 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1451 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1452 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1455 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1456 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1459 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1460 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1463 qemu_log_mask(LOG_UNIMP
,
1464 "unimplemented fcmp fpu_insn=%x pc=%x"
1466 fpu_insn
, dc
->pc
, dc
->opcode
);
1467 dc
->abort_at_next_insn
= 1;
1473 if (!dec_check_fpuv2(dc
)) {
1476 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1480 if (!dec_check_fpuv2(dc
)) {
1483 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1487 if (!dec_check_fpuv2(dc
)) {
1490 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1494 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1496 fpu_insn
, dc
->pc
, dc
->opcode
);
1497 dc
->abort_at_next_insn
= 1;
1502 static void dec_null(DisasContext
*dc
)
1504 if (trap_illegal(dc
, true)) {
1507 qemu_log_mask(LOG_GUEST_ERROR
, "unknown insn pc=%x opc=%x\n", dc
->pc
, dc
->opcode
);
1508 dc
->abort_at_next_insn
= 1;
1511 /* Insns connected to FSL or AXI stream attached devices. */
1512 static void dec_stream(DisasContext
*dc
)
1514 TCGv_i32 t_id
, t_ctrl
;
1517 LOG_DIS("%s%s imm=%x\n", dc
->rd
? "get" : "put",
1518 dc
->type_b
? "" : "d", dc
->imm
);
1520 if (trap_userspace(dc
, true)) {
1524 t_id
= tcg_temp_new_i32();
1526 tcg_gen_movi_i32(t_id
, dc
->imm
& 0xf);
1527 ctrl
= dc
->imm
>> 10;
1529 tcg_gen_andi_i32(t_id
, cpu_R
[dc
->rb
], 0xf);
1530 ctrl
= dc
->imm
>> 5;
1533 t_ctrl
= tcg_const_i32(ctrl
);
1536 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1538 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1540 tcg_temp_free_i32(t_id
);
1541 tcg_temp_free_i32(t_ctrl
);
1544 static struct decoder_info
{
1549 void (*dec
)(DisasContext
*dc
);
1557 {DEC_BARREL
, dec_barrel
},
1559 {DEC_ST
, dec_store
},
1568 {DEC_STREAM
, dec_stream
},
1572 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1577 LOG_DIS("%8.8x\t", dc
->ir
);
1580 trap_illegal(dc
, dc
->cpu
->env
.pvr
.regs
[2] & PVR2_OPCODE_0x0_ILL_MASK
);
1581 /* Don't decode nop/zero instructions any further. */
1585 /* bit 2 seems to indicate insn type. */
1586 dc
->type_b
= ir
& (1 << 29);
1588 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1589 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1590 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1591 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1592 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1594 /* Large switch for all insns. */
1595 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1596 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1603 /* generate intermediate code for basic block 'tb'. */
1604 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
1606 CPUMBState
*env
= cs
->env_ptr
;
1607 MicroBlazeCPU
*cpu
= env_archcpu(env
);
1609 struct DisasContext ctx
;
1610 struct DisasContext
*dc
= &ctx
;
1611 uint32_t page_start
, org_flags
;
1618 org_flags
= dc
->synced_flags
= dc
->tb_flags
= tb
->flags
;
1620 dc
->is_jmp
= DISAS_NEXT
;
1622 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1623 if (dc
->delayed_branch
) {
1624 dc
->jmp
= JMP_INDIRECT
;
1627 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1628 dc
->cpustate_changed
= 0;
1629 dc
->abort_at_next_insn
= 0;
1632 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n", pc_start
);
1635 page_start
= pc_start
& TARGET_PAGE_MASK
;
1641 tcg_gen_insn_start(dc
->pc
);
1645 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1646 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dc
->pc
);
1651 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1652 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1653 dc
->is_jmp
= DISAS_UPDATE
;
1654 /* The address covered by the breakpoint must be included in
1655 [tb->pc, tb->pc + tb->size) in order to for it to be
1656 properly cleared -- thus we increment the PC here so that
1657 the logic setting tb->size below does the right thing. */
1663 LOG_DIS("%8.8x:\t", dc
->pc
);
1665 if (num_insns
== max_insns
&& (tb_cflags(tb
) & CF_LAST_IO
)) {
1670 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1672 dc
->tb_flags
&= ~IMM_FLAG
;
1675 if (dc
->delayed_branch
) {
1676 dc
->delayed_branch
--;
1677 if (!dc
->delayed_branch
) {
1678 if (dc
->tb_flags
& DRTI_FLAG
)
1680 if (dc
->tb_flags
& DRTB_FLAG
)
1682 if (dc
->tb_flags
& DRTE_FLAG
)
1684 /* Clear the delay slot flag. */
1685 dc
->tb_flags
&= ~D_FLAG
;
1686 /* If it is a direct jump, try direct chaining. */
1687 if (dc
->jmp
== JMP_INDIRECT
) {
1688 eval_cond_jmp(dc
, env_btarget
, tcg_const_i64(dc
->pc
));
1689 dc
->is_jmp
= DISAS_JUMP
;
1690 } else if (dc
->jmp
== JMP_DIRECT
) {
1692 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1693 dc
->is_jmp
= DISAS_TB_JUMP
;
1694 } else if (dc
->jmp
== JMP_DIRECT_CC
) {
1695 TCGLabel
*l1
= gen_new_label();
1697 /* Conditional jmp. */
1698 tcg_gen_brcondi_i32(TCG_COND_NE
, env_btaken
, 0, l1
);
1699 gen_goto_tb(dc
, 1, dc
->pc
);
1701 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1703 dc
->is_jmp
= DISAS_TB_JUMP
;
1708 if (cs
->singlestep_enabled
) {
1711 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
1712 && !tcg_op_buf_full()
1714 && (dc
->pc
- page_start
< TARGET_PAGE_SIZE
)
1715 && num_insns
< max_insns
);
1718 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
1719 if (dc
->tb_flags
& D_FLAG
) {
1720 dc
->is_jmp
= DISAS_UPDATE
;
1721 tcg_gen_movi_i64(cpu_SR
[SR_PC
], npc
);
1727 if (tb_cflags(tb
) & CF_LAST_IO
)
1729 /* Force an update if the per-tb cpu state has changed. */
1730 if (dc
->is_jmp
== DISAS_NEXT
1731 && (dc
->cpustate_changed
|| org_flags
!= dc
->tb_flags
)) {
1732 dc
->is_jmp
= DISAS_UPDATE
;
1733 tcg_gen_movi_i64(cpu_SR
[SR_PC
], npc
);
1737 if (unlikely(cs
->singlestep_enabled
)) {
1738 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
1740 if (dc
->is_jmp
!= DISAS_JUMP
) {
1741 tcg_gen_movi_i64(cpu_SR
[SR_PC
], npc
);
1743 gen_helper_raise_exception(cpu_env
, tmp
);
1744 tcg_temp_free_i32(tmp
);
1746 switch(dc
->is_jmp
) {
1748 gen_goto_tb(dc
, 1, npc
);
1753 /* indicate that the hash table must be used
1754 to find the next TB */
1755 tcg_gen_exit_tb(NULL
, 0);
1758 /* nothing more to generate */
1762 gen_tb_end(tb
, num_insns
);
1764 tb
->size
= dc
->pc
- pc_start
;
1765 tb
->icount
= num_insns
;
1769 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1770 && qemu_log_in_addr_range(pc_start
)) {
1772 qemu_log("--------------\n");
1773 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
);
1778 assert(!dc
->abort_at_next_insn
);
1781 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
1783 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1784 CPUMBState
*env
= &cpu
->env
;
1791 qemu_fprintf(f
, "IN: PC=%" PRIx64
" %s\n",
1792 env
->sregs
[SR_PC
], lookup_symbol(env
->sregs
[SR_PC
]));
1793 qemu_fprintf(f
, "rmsr=%" PRIx64
" resr=%" PRIx64
" rear=%" PRIx64
" "
1794 "debug=%x imm=%x iflags=%x fsr=%" PRIx64
"\n",
1795 env
->sregs
[SR_MSR
], env
->sregs
[SR_ESR
], env
->sregs
[SR_EAR
],
1796 env
->debug
, env
->imm
, env
->iflags
, env
->sregs
[SR_FSR
]);
1797 qemu_fprintf(f
, "btaken=%d btarget=%" PRIx64
" mode=%s(saved=%s) "
1799 env
->btaken
, env
->btarget
,
1800 (env
->sregs
[SR_MSR
] & MSR_UM
) ? "user" : "kernel",
1801 (env
->sregs
[SR_MSR
] & MSR_UMS
) ? "user" : "kernel",
1802 (bool)(env
->sregs
[SR_MSR
] & MSR_EIP
),
1803 (bool)(env
->sregs
[SR_MSR
] & MSR_IE
));
1805 for (i
= 0; i
< 32; i
++) {
1806 qemu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1807 if ((i
+ 1) % 4 == 0)
1808 qemu_fprintf(f
, "\n");
1810 qemu_fprintf(f
, "\n\n");
1813 void mb_tcg_init(void)
1817 env_debug
= tcg_global_mem_new_i32(cpu_env
,
1818 offsetof(CPUMBState
, debug
),
1820 env_iflags
= tcg_global_mem_new_i32(cpu_env
,
1821 offsetof(CPUMBState
, iflags
),
1823 env_imm
= tcg_global_mem_new_i32(cpu_env
,
1824 offsetof(CPUMBState
, imm
),
1826 env_btarget
= tcg_global_mem_new_i64(cpu_env
,
1827 offsetof(CPUMBState
, btarget
),
1829 env_btaken
= tcg_global_mem_new_i32(cpu_env
,
1830 offsetof(CPUMBState
, btaken
),
1832 env_res_addr
= tcg_global_mem_new(cpu_env
,
1833 offsetof(CPUMBState
, res_addr
),
1835 env_res_val
= tcg_global_mem_new_i32(cpu_env
,
1836 offsetof(CPUMBState
, res_val
),
1838 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1839 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
1840 offsetof(CPUMBState
, regs
[i
]),
1843 for (i
= 0; i
< ARRAY_SIZE(cpu_SR
); i
++) {
1844 cpu_SR
[i
] = tcg_global_mem_new_i64(cpu_env
,
1845 offsetof(CPUMBState
, sregs
[i
]),
1846 special_regnames
[i
]);
1850 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
,
1853 env
->sregs
[SR_PC
] = data
[0];