2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
32 #include "trace-tcg.h"
39 #if DISAS_MB && !SIM_COMPAT
40 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 # define LOG_DIS(...) do { } while (0)
47 #define EXTRACT_FIELD(src, start, end) \
48 (((src) >> start) & ((1 << (end - start + 1)) - 1))
50 /* is_jmp field values */
51 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55 static TCGv env_debug
;
56 static TCGv cpu_R
[32];
57 static TCGv cpu_SR
[18];
59 static TCGv env_btaken
;
60 static TCGv env_btarget
;
61 static TCGv env_iflags
;
62 static TCGv env_res_addr
;
63 static TCGv env_res_val
;
65 #include "exec/gen-icount.h"
67 /* This is the state at translation time. */
68 typedef struct DisasContext
{
79 unsigned int cpustate_changed
;
80 unsigned int delayed_branch
;
81 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
82 unsigned int clear_imm
;
87 #define JMP_DIRECT_CC 2
88 #define JMP_INDIRECT 3
92 int abort_at_next_insn
;
94 struct TranslationBlock
*tb
;
95 int singlestep_enabled
;
98 static const char *regnames
[] =
100 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
106 static const char *special_regnames
[] =
108 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
109 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
110 "sr16", "sr17", "sr18"
113 static inline void t_sync_flags(DisasContext
*dc
)
115 /* Synch the tb dependent flags between translator and runtime. */
116 if (dc
->tb_flags
!= dc
->synced_flags
) {
117 tcg_gen_movi_tl(env_iflags
, dc
->tb_flags
);
118 dc
->synced_flags
= dc
->tb_flags
;
122 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
124 TCGv_i32 tmp
= tcg_const_i32(index
);
127 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
128 gen_helper_raise_exception(cpu_env
, tmp
);
129 tcg_temp_free_i32(tmp
);
130 dc
->is_jmp
= DISAS_UPDATE
;
133 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
135 #ifndef CONFIG_USER_ONLY
136 return (dc
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
142 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
144 if (use_goto_tb(dc
, dest
)) {
146 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
147 tcg_gen_exit_tb((uintptr_t)dc
->tb
+ n
);
149 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
154 static void read_carry(DisasContext
*dc
, TCGv d
)
156 tcg_gen_shri_tl(d
, cpu_SR
[SR_MSR
], 31);
160 * write_carry sets the carry bits in MSR based on bit 0 of v.
161 * v[31:1] are ignored.
163 static void write_carry(DisasContext
*dc
, TCGv v
)
165 TCGv t0
= tcg_temp_new();
166 tcg_gen_shli_tl(t0
, v
, 31);
167 tcg_gen_sari_tl(t0
, t0
, 31);
168 tcg_gen_andi_tl(t0
, t0
, (MSR_C
| MSR_CC
));
169 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
],
171 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
);
175 static void write_carryi(DisasContext
*dc
, bool carry
)
177 TCGv t0
= tcg_temp_new();
178 tcg_gen_movi_tl(t0
, carry
);
183 /* True if ALU operand b is a small immediate that may deserve
185 static inline int dec_alu_op_b_is_small_imm(DisasContext
*dc
)
187 /* Immediate insn without the imm prefix ? */
188 return dc
->type_b
&& !(dc
->tb_flags
& IMM_FLAG
);
191 static inline TCGv
*dec_alu_op_b(DisasContext
*dc
)
194 if (dc
->tb_flags
& IMM_FLAG
)
195 tcg_gen_ori_tl(env_imm
, env_imm
, dc
->imm
);
197 tcg_gen_movi_tl(env_imm
, (int32_t)((int16_t)dc
->imm
));
200 return &cpu_R
[dc
->rb
];
203 static void dec_add(DisasContext
*dc
)
211 LOG_DIS("add%s%s%s r%d r%d r%d\n",
212 dc
->type_b
? "i" : "", k
? "k" : "", c
? "c" : "",
213 dc
->rd
, dc
->ra
, dc
->rb
);
215 /* Take care of the easy cases first. */
217 /* k - keep carry, no need to update MSR. */
218 /* If rd == r0, it's a nop. */
220 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
223 /* c - Add carry into the result. */
227 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
234 /* From now on, we can assume k is zero. So we need to update MSR. */
240 tcg_gen_movi_tl(cf
, 0);
244 TCGv ncf
= tcg_temp_new();
245 gen_helper_carry(ncf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
246 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
247 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
248 write_carry(dc
, ncf
);
251 gen_helper_carry(cf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
257 static void dec_sub(DisasContext
*dc
)
259 unsigned int u
, cmp
, k
, c
;
265 cmp
= (dc
->imm
& 1) && (!dc
->type_b
) && k
;
268 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u
? "u" : "", dc
->rd
, dc
->ra
, dc
->ir
);
271 gen_helper_cmpu(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
273 gen_helper_cmp(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
278 LOG_DIS("sub%s%s r%d, r%d r%d\n",
279 k
? "k" : "", c
? "c" : "", dc
->rd
, dc
->ra
, dc
->rb
);
281 /* Take care of the easy cases first. */
283 /* k - keep carry, no need to update MSR. */
284 /* If rd == r0, it's a nop. */
286 tcg_gen_sub_tl(cpu_R
[dc
->rd
], *(dec_alu_op_b(dc
)), cpu_R
[dc
->ra
]);
289 /* c - Add carry into the result. */
293 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
300 /* From now on, we can assume k is zero. So we need to update MSR. */
301 /* Extract carry. And complement a into na. */
307 tcg_gen_movi_tl(cf
, 1);
310 /* d = b + ~a + c. carry defaults to 1. */
311 tcg_gen_not_tl(na
, cpu_R
[dc
->ra
]);
314 TCGv ncf
= tcg_temp_new();
315 gen_helper_carry(ncf
, na
, *(dec_alu_op_b(dc
)), cf
);
316 tcg_gen_add_tl(cpu_R
[dc
->rd
], na
, *(dec_alu_op_b(dc
)));
317 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
318 write_carry(dc
, ncf
);
321 gen_helper_carry(cf
, na
, *(dec_alu_op_b(dc
)), cf
);
328 static void dec_pattern(DisasContext
*dc
)
332 if ((dc
->tb_flags
& MSR_EE_FLAG
)
333 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
334 && !dc
->cpu
->cfg
.use_pcmp_instr
) {
335 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
336 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
339 mode
= dc
->opcode
& 3;
343 LOG_DIS("pcmpbf r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
345 gen_helper_pcmpbf(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
348 LOG_DIS("pcmpeq r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
350 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_R
[dc
->rd
],
351 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
355 LOG_DIS("pcmpne r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
357 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_R
[dc
->rd
],
358 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
362 cpu_abort(CPU(dc
->cpu
),
363 "unsupported pattern insn opcode=%x\n", dc
->opcode
);
368 static void dec_and(DisasContext
*dc
)
372 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
377 not = dc
->opcode
& (1 << 1);
378 LOG_DIS("and%s\n", not ? "n" : "");
384 tcg_gen_andc_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
386 tcg_gen_and_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
389 static void dec_or(DisasContext
*dc
)
391 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
396 LOG_DIS("or r%d r%d r%d imm=%x\n", dc
->rd
, dc
->ra
, dc
->rb
, dc
->imm
);
398 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
401 static void dec_xor(DisasContext
*dc
)
403 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
408 LOG_DIS("xor r%d\n", dc
->rd
);
410 tcg_gen_xor_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
413 static inline void msr_read(DisasContext
*dc
, TCGv d
)
415 tcg_gen_mov_tl(d
, cpu_SR
[SR_MSR
]);
418 static inline void msr_write(DisasContext
*dc
, TCGv v
)
423 dc
->cpustate_changed
= 1;
424 /* PVR bit is not writable. */
425 tcg_gen_andi_tl(t
, v
, ~MSR_PVR
);
426 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], MSR_PVR
);
427 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], v
);
431 static void dec_msr(DisasContext
*dc
)
433 CPUState
*cs
= CPU(dc
->cpu
);
435 unsigned int sr
, to
, rn
;
436 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
438 sr
= dc
->imm
& ((1 << 14) - 1);
439 to
= dc
->imm
& (1 << 14);
442 dc
->cpustate_changed
= 1;
444 /* msrclr and msrset. */
445 if (!(dc
->imm
& (1 << 15))) {
446 unsigned int clr
= dc
->ir
& (1 << 16);
448 LOG_DIS("msr%s r%d imm=%x\n", clr
? "clr" : "set",
451 if (!dc
->cpu
->cfg
.use_msr_instr
) {
456 if ((dc
->tb_flags
& MSR_EE_FLAG
)
457 && mem_index
== MMU_USER_IDX
&& (dc
->imm
!= 4 && dc
->imm
!= 0)) {
458 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
459 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
464 msr_read(dc
, cpu_R
[dc
->rd
]);
469 tcg_gen_mov_tl(t1
, *(dec_alu_op_b(dc
)));
472 tcg_gen_not_tl(t1
, t1
);
473 tcg_gen_and_tl(t0
, t0
, t1
);
475 tcg_gen_or_tl(t0
, t0
, t1
);
479 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
480 dc
->is_jmp
= DISAS_UPDATE
;
485 if ((dc
->tb_flags
& MSR_EE_FLAG
)
486 && mem_index
== MMU_USER_IDX
) {
487 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
488 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
493 #if !defined(CONFIG_USER_ONLY)
494 /* Catch read/writes to the mmu block. */
495 if ((sr
& ~0xff) == 0x1000) {
497 LOG_DIS("m%ss sr%d r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
499 gen_helper_mmu_write(cpu_env
, tcg_const_tl(sr
), cpu_R
[dc
->ra
]);
501 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tcg_const_tl(sr
));
507 LOG_DIS("m%ss sr%x r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
512 msr_write(dc
, cpu_R
[dc
->ra
]);
515 tcg_gen_mov_tl(cpu_SR
[SR_EAR
], cpu_R
[dc
->ra
]);
518 tcg_gen_mov_tl(cpu_SR
[SR_ESR
], cpu_R
[dc
->ra
]);
521 tcg_gen_andi_tl(cpu_SR
[SR_FSR
], cpu_R
[dc
->ra
], 31);
524 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, slr
));
527 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, shr
));
530 cpu_abort(CPU(dc
->cpu
), "unknown mts reg %x\n", sr
);
534 LOG_DIS("m%ss r%d sr%x imm=%x\n", to
? "t" : "f", dc
->rd
, sr
, dc
->imm
);
538 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
541 msr_read(dc
, cpu_R
[dc
->rd
]);
544 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_EAR
]);
547 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_ESR
]);
550 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_FSR
]);
553 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_BTR
]);
556 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, slr
));
559 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, shr
));
575 tcg_gen_ld_tl(cpu_R
[dc
->rd
],
576 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
579 cpu_abort(cs
, "unknown mfs reg %x\n", sr
);
585 tcg_gen_movi_tl(cpu_R
[0], 0);
589 /* Multiplier unit. */
590 static void dec_mul(DisasContext
*dc
)
593 unsigned int subcode
;
595 if ((dc
->tb_flags
& MSR_EE_FLAG
)
596 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
597 && !dc
->cpu
->cfg
.use_hw_mul
) {
598 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
599 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
603 subcode
= dc
->imm
& 3;
606 LOG_DIS("muli r%d r%d %x\n", dc
->rd
, dc
->ra
, dc
->imm
);
607 tcg_gen_mul_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
611 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
612 if (subcode
>= 1 && subcode
<= 3 && dc
->cpu
->cfg
.use_hw_mul
< 2) {
616 tmp
= tcg_temp_new();
619 LOG_DIS("mul r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
620 tcg_gen_mul_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
623 LOG_DIS("mulh r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
624 tcg_gen_muls2_tl(tmp
, cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
627 LOG_DIS("mulhsu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
628 tcg_gen_mulsu2_tl(tmp
, cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
631 LOG_DIS("mulhu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
632 tcg_gen_mulu2_tl(tmp
, cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
635 cpu_abort(CPU(dc
->cpu
), "unknown MUL insn %x\n", subcode
);
642 static void dec_div(DisasContext
*dc
)
649 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
650 && !dc
->cpu
->cfg
.use_div
) {
651 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
652 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
656 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
659 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
662 tcg_gen_movi_tl(cpu_R
[dc
->rd
], 0);
665 static void dec_barrel(DisasContext
*dc
)
668 unsigned int imm_w
, imm_s
;
669 bool s
, t
, e
= false, i
= false;
671 if ((dc
->tb_flags
& MSR_EE_FLAG
)
672 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
673 && !dc
->cpu
->cfg
.use_barrel
) {
674 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
675 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
680 /* Insert and extract are only available in immediate mode. */
681 i
= extract32(dc
->imm
, 15, 1);
682 e
= extract32(dc
->imm
, 14, 1);
684 s
= extract32(dc
->imm
, 10, 1);
685 t
= extract32(dc
->imm
, 9, 1);
686 imm_w
= extract32(dc
->imm
, 6, 5);
687 imm_s
= extract32(dc
->imm
, 0, 5);
689 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
691 s
? "l" : "r", t
? "a" : "l", dc
->rd
, dc
->ra
, dc
->rb
);
694 if (imm_w
+ imm_s
> 32 || imm_w
== 0) {
695 /* These inputs have an undefined behavior. */
696 qemu_log_mask(LOG_GUEST_ERROR
, "bsefi: Bad input w=%d s=%d\n",
699 tcg_gen_extract_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], imm_s
, imm_w
);
702 int width
= imm_w
- imm_s
+ 1;
705 /* These inputs have an undefined behavior. */
706 qemu_log_mask(LOG_GUEST_ERROR
, "bsifi: Bad input w=%d s=%d\n",
709 tcg_gen_deposit_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
],
715 tcg_gen_mov_tl(t0
, *(dec_alu_op_b(dc
)));
716 tcg_gen_andi_tl(t0
, t0
, 31);
719 tcg_gen_shl_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
722 tcg_gen_sar_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
724 tcg_gen_shr_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
731 static void dec_bit(DisasContext
*dc
)
733 CPUState
*cs
= CPU(dc
->cpu
);
736 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
738 op
= dc
->ir
& ((1 << 9) - 1);
744 LOG_DIS("src r%d r%d\n", dc
->rd
, dc
->ra
);
745 tcg_gen_andi_tl(t0
, cpu_SR
[SR_MSR
], MSR_CC
);
746 write_carry(dc
, cpu_R
[dc
->ra
]);
748 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
749 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
757 LOG_DIS("srl r%d r%d\n", dc
->rd
, dc
->ra
);
759 /* Update carry. Note that write carry only looks at the LSB. */
760 write_carry(dc
, cpu_R
[dc
->ra
]);
763 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
765 tcg_gen_sari_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
769 LOG_DIS("ext8s r%d r%d\n", dc
->rd
, dc
->ra
);
770 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
773 LOG_DIS("ext16s r%d r%d\n", dc
->rd
, dc
->ra
);
774 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
781 LOG_DIS("wdc r%d\n", dc
->ra
);
782 if ((dc
->tb_flags
& MSR_EE_FLAG
)
783 && mem_index
== MMU_USER_IDX
) {
784 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
785 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
791 LOG_DIS("wic r%d\n", dc
->ra
);
792 if ((dc
->tb_flags
& MSR_EE_FLAG
)
793 && mem_index
== MMU_USER_IDX
) {
794 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
795 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
800 if ((dc
->tb_flags
& MSR_EE_FLAG
)
801 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
802 && !dc
->cpu
->cfg
.use_pcmp_instr
) {
803 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
804 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
806 if (dc
->cpu
->cfg
.use_pcmp_instr
) {
807 tcg_gen_clzi_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 32);
812 LOG_DIS("swapb r%d r%d\n", dc
->rd
, dc
->ra
);
813 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
817 LOG_DIS("swaph r%d r%d\n", dc
->rd
, dc
->ra
);
818 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
821 cpu_abort(cs
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
822 dc
->pc
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
827 static inline void sync_jmpstate(DisasContext
*dc
)
829 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
830 if (dc
->jmp
== JMP_DIRECT
) {
831 tcg_gen_movi_tl(env_btaken
, 1);
833 dc
->jmp
= JMP_INDIRECT
;
834 tcg_gen_movi_tl(env_btarget
, dc
->jmp_pc
);
838 static void dec_imm(DisasContext
*dc
)
840 LOG_DIS("imm %x\n", dc
->imm
<< 16);
841 tcg_gen_movi_tl(env_imm
, (dc
->imm
<< 16));
842 dc
->tb_flags
|= IMM_FLAG
;
846 static inline TCGv
*compute_ldst_addr(DisasContext
*dc
, TCGv
*t
)
848 unsigned int extimm
= dc
->tb_flags
& IMM_FLAG
;
849 /* Should be set to one if r1 is used by loadstores. */
852 /* All load/stores use ra. */
853 if (dc
->ra
== 1 && dc
->cpu
->cfg
.stackprot
) {
857 /* Treat the common cases first. */
859 /* If any of the regs is r0, return a ptr to the other. */
861 return &cpu_R
[dc
->rb
];
862 } else if (dc
->rb
== 0) {
863 return &cpu_R
[dc
->ra
];
866 if (dc
->rb
== 1 && dc
->cpu
->cfg
.stackprot
) {
871 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
874 gen_helper_stackprot(cpu_env
, *t
);
881 return &cpu_R
[dc
->ra
];
884 tcg_gen_movi_tl(*t
, (int32_t)((int16_t)dc
->imm
));
885 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *t
);
888 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
892 gen_helper_stackprot(cpu_env
, *t
);
897 static void dec_load(DisasContext
*dc
)
900 unsigned int size
, rev
= 0, ex
= 0;
903 mop
= dc
->opcode
& 3;
906 rev
= (dc
->ir
>> 9) & 1;
907 ex
= (dc
->ir
>> 10) & 1;
914 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
915 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
916 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
917 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
921 LOG_DIS("l%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
925 addr
= compute_ldst_addr(dc
, &t
);
928 * When doing reverse accesses we need to do two things.
930 * 1. Reverse the address wrt endianness.
931 * 2. Byteswap the data lanes on the way back into the CPU core.
933 if (rev
&& size
!= 4) {
934 /* Endian reverse the address. t is addr. */
942 TCGv low
= tcg_temp_new();
944 /* Force addr into the temp. */
947 tcg_gen_mov_tl(t
, *addr
);
951 tcg_gen_andi_tl(low
, t
, 3);
952 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
953 tcg_gen_andi_tl(t
, t
, ~3);
954 tcg_gen_or_tl(t
, t
, low
);
955 tcg_gen_mov_tl(env_imm
, t
);
963 /* Force addr into the temp. */
966 tcg_gen_xori_tl(t
, *addr
, 2);
969 tcg_gen_xori_tl(t
, t
, 2);
973 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
978 /* lwx does not throw unaligned access errors, so force alignment */
980 /* Force addr into the temp. */
983 tcg_gen_mov_tl(t
, *addr
);
986 tcg_gen_andi_tl(t
, t
, ~3);
989 /* If we get a fault on a dslot, the jmpstate better be in sync. */
992 /* Verify alignment if needed. */
994 * Microblaze gives MMU faults priority over faults due to
995 * unaligned addresses. That's why we speculatively do the load
996 * into v. If the load succeeds, we verify alignment of the
997 * address and if that succeeds we write into the destination reg.
1000 tcg_gen_qemu_ld_tl(v
, *addr
, cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
1002 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1003 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1004 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1005 tcg_const_tl(0), tcg_const_tl(size
- 1));
1009 tcg_gen_mov_tl(env_res_addr
, *addr
);
1010 tcg_gen_mov_tl(env_res_val
, v
);
1013 tcg_gen_mov_tl(cpu_R
[dc
->rd
], v
);
1018 /* no support for AXI exclusive so always clear C */
1019 write_carryi(dc
, 0);
1026 static void dec_store(DisasContext
*dc
)
1028 TCGv t
, *addr
, swx_addr
;
1029 TCGLabel
*swx_skip
= NULL
;
1030 unsigned int size
, rev
= 0, ex
= 0;
1033 mop
= dc
->opcode
& 3;
1036 rev
= (dc
->ir
>> 9) & 1;
1037 ex
= (dc
->ir
>> 10) & 1;
1044 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
1045 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1046 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1047 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1051 LOG_DIS("s%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
1054 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1056 addr
= compute_ldst_addr(dc
, &t
);
1058 swx_addr
= tcg_temp_local_new();
1062 /* Force addr into the swx_addr. */
1063 tcg_gen_mov_tl(swx_addr
, *addr
);
1065 /* swx does not throw unaligned access errors, so force alignment */
1066 tcg_gen_andi_tl(swx_addr
, swx_addr
, ~3);
1068 write_carryi(dc
, 1);
1069 swx_skip
= gen_new_label();
1070 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_addr
, swx_addr
, swx_skip
);
1072 /* Compare the value loaded at lwx with current contents of
1073 the reserved location.
1074 FIXME: This only works for system emulation where we can expect
1075 this compare and the following write to be atomic. For user
1076 emulation we need to add atomicity between threads. */
1077 tval
= tcg_temp_new();
1078 tcg_gen_qemu_ld_tl(tval
, swx_addr
, cpu_mmu_index(&dc
->cpu
->env
, false),
1080 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_val
, tval
, swx_skip
);
1081 write_carryi(dc
, 0);
1082 tcg_temp_free(tval
);
1085 if (rev
&& size
!= 4) {
1086 /* Endian reverse the address. t is addr. */
1094 TCGv low
= tcg_temp_new();
1096 /* Force addr into the temp. */
1099 tcg_gen_mov_tl(t
, *addr
);
1103 tcg_gen_andi_tl(low
, t
, 3);
1104 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
1105 tcg_gen_andi_tl(t
, t
, ~3);
1106 tcg_gen_or_tl(t
, t
, low
);
1107 tcg_gen_mov_tl(env_imm
, t
);
1115 /* Force addr into the temp. */
1118 tcg_gen_xori_tl(t
, *addr
, 2);
1121 tcg_gen_xori_tl(t
, t
, 2);
1125 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1129 tcg_gen_qemu_st_tl(cpu_R
[dc
->rd
], *addr
, cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
1131 /* Verify alignment if needed. */
1132 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1133 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1134 /* FIXME: if the alignment is wrong, we should restore the value
1135 * in memory. One possible way to achieve this is to probe
1136 * the MMU prior to the memaccess, thay way we could put
1137 * the alignment checks in between the probe and the mem
1140 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1141 tcg_const_tl(1), tcg_const_tl(size
- 1));
1145 gen_set_label(swx_skip
);
1147 tcg_temp_free(swx_addr
);
1153 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1154 TCGv d
, TCGv a
, TCGv b
)
1158 tcg_gen_setcond_tl(TCG_COND_EQ
, d
, a
, b
);
1161 tcg_gen_setcond_tl(TCG_COND_NE
, d
, a
, b
);
1164 tcg_gen_setcond_tl(TCG_COND_LT
, d
, a
, b
);
1167 tcg_gen_setcond_tl(TCG_COND_LE
, d
, a
, b
);
1170 tcg_gen_setcond_tl(TCG_COND_GE
, d
, a
, b
);
1173 tcg_gen_setcond_tl(TCG_COND_GT
, d
, a
, b
);
1176 cpu_abort(CPU(dc
->cpu
), "Unknown condition code %x.\n", cc
);
1181 static void eval_cond_jmp(DisasContext
*dc
, TCGv pc_true
, TCGv pc_false
)
1183 TCGLabel
*l1
= gen_new_label();
1184 /* Conditional jmp. */
1185 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_false
);
1186 tcg_gen_brcondi_tl(TCG_COND_EQ
, env_btaken
, 0, l1
);
1187 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_true
);
1191 static void dec_bcc(DisasContext
*dc
)
1196 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1197 dslot
= dc
->ir
& (1 << 25);
1198 LOG_DIS("bcc%s r%d %x\n", dslot
? "d" : "", dc
->ra
, dc
->imm
);
1200 dc
->delayed_branch
= 1;
1202 dc
->delayed_branch
= 2;
1203 dc
->tb_flags
|= D_FLAG
;
1204 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1205 cpu_env
, offsetof(CPUMBState
, bimm
));
1208 if (dec_alu_op_b_is_small_imm(dc
)) {
1209 int32_t offset
= (int32_t)((int16_t)dc
->imm
); /* sign-extend. */
1211 tcg_gen_movi_tl(env_btarget
, dc
->pc
+ offset
);
1212 dc
->jmp
= JMP_DIRECT_CC
;
1213 dc
->jmp_pc
= dc
->pc
+ offset
;
1215 dc
->jmp
= JMP_INDIRECT
;
1216 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1217 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1219 eval_cc(dc
, cc
, env_btaken
, cpu_R
[dc
->ra
], tcg_const_tl(0));
1222 static void dec_br(DisasContext
*dc
)
1224 unsigned int dslot
, link
, abs
, mbar
;
1225 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1227 dslot
= dc
->ir
& (1 << 20);
1228 abs
= dc
->ir
& (1 << 19);
1229 link
= dc
->ir
& (1 << 18);
1231 /* Memory barrier. */
1232 mbar
= (dc
->ir
>> 16) & 31;
1233 if (mbar
== 2 && dc
->imm
== 4) {
1234 /* mbar IMM & 16 decodes to sleep. */
1236 TCGv_i32 tmp_hlt
= tcg_const_i32(EXCP_HLT
);
1237 TCGv_i32 tmp_1
= tcg_const_i32(1);
1242 tcg_gen_st_i32(tmp_1
, cpu_env
,
1243 -offsetof(MicroBlazeCPU
, env
)
1244 +offsetof(CPUState
, halted
));
1245 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
1246 gen_helper_raise_exception(cpu_env
, tmp_hlt
);
1247 tcg_temp_free_i32(tmp_hlt
);
1248 tcg_temp_free_i32(tmp_1
);
1251 LOG_DIS("mbar %d\n", dc
->rd
);
1253 dc
->cpustate_changed
= 1;
1257 LOG_DIS("br%s%s%s%s imm=%x\n",
1258 abs
? "a" : "", link
? "l" : "",
1259 dc
->type_b
? "i" : "", dslot
? "d" : "",
1262 dc
->delayed_branch
= 1;
1264 dc
->delayed_branch
= 2;
1265 dc
->tb_flags
|= D_FLAG
;
1266 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1267 cpu_env
, offsetof(CPUMBState
, bimm
));
1270 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
1272 dc
->jmp
= JMP_INDIRECT
;
1274 tcg_gen_movi_tl(env_btaken
, 1);
1275 tcg_gen_mov_tl(env_btarget
, *(dec_alu_op_b(dc
)));
1276 if (link
&& !dslot
) {
1277 if (!(dc
->tb_flags
& IMM_FLAG
) && (dc
->imm
== 8 || dc
->imm
== 0x18))
1278 t_gen_raise_exception(dc
, EXCP_BREAK
);
1280 if ((dc
->tb_flags
& MSR_EE_FLAG
) && mem_index
== MMU_USER_IDX
) {
1281 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1282 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1286 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1290 if (dec_alu_op_b_is_small_imm(dc
)) {
1291 dc
->jmp
= JMP_DIRECT
;
1292 dc
->jmp_pc
= dc
->pc
+ (int32_t)((int16_t)dc
->imm
);
1294 tcg_gen_movi_tl(env_btaken
, 1);
1295 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1296 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1301 static inline void do_rti(DisasContext
*dc
)
1304 t0
= tcg_temp_new();
1305 t1
= tcg_temp_new();
1306 tcg_gen_shri_tl(t0
, cpu_SR
[SR_MSR
], 1);
1307 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_IE
);
1308 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1310 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1311 tcg_gen_or_tl(t1
, t1
, t0
);
1315 dc
->tb_flags
&= ~DRTI_FLAG
;
1318 static inline void do_rtb(DisasContext
*dc
)
1321 t0
= tcg_temp_new();
1322 t1
= tcg_temp_new();
1323 tcg_gen_andi_tl(t1
, cpu_SR
[SR_MSR
], ~MSR_BIP
);
1324 tcg_gen_shri_tl(t0
, t1
, 1);
1325 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1327 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1328 tcg_gen_or_tl(t1
, t1
, t0
);
1332 dc
->tb_flags
&= ~DRTB_FLAG
;
1335 static inline void do_rte(DisasContext
*dc
)
1338 t0
= tcg_temp_new();
1339 t1
= tcg_temp_new();
1341 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_EE
);
1342 tcg_gen_andi_tl(t1
, t1
, ~MSR_EIP
);
1343 tcg_gen_shri_tl(t0
, t1
, 1);
1344 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1346 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1347 tcg_gen_or_tl(t1
, t1
, t0
);
1351 dc
->tb_flags
&= ~DRTE_FLAG
;
1354 static void dec_rts(DisasContext
*dc
)
1356 unsigned int b_bit
, i_bit
, e_bit
;
1357 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1359 i_bit
= dc
->ir
& (1 << 21);
1360 b_bit
= dc
->ir
& (1 << 22);
1361 e_bit
= dc
->ir
& (1 << 23);
1363 dc
->delayed_branch
= 2;
1364 dc
->tb_flags
|= D_FLAG
;
1365 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1366 cpu_env
, offsetof(CPUMBState
, bimm
));
1369 LOG_DIS("rtid ir=%x\n", dc
->ir
);
1370 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1371 && mem_index
== MMU_USER_IDX
) {
1372 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1373 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1375 dc
->tb_flags
|= DRTI_FLAG
;
1377 LOG_DIS("rtbd ir=%x\n", dc
->ir
);
1378 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1379 && mem_index
== MMU_USER_IDX
) {
1380 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1381 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1383 dc
->tb_flags
|= DRTB_FLAG
;
1385 LOG_DIS("rted ir=%x\n", dc
->ir
);
1386 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1387 && mem_index
== MMU_USER_IDX
) {
1388 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1389 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1391 dc
->tb_flags
|= DRTE_FLAG
;
1393 LOG_DIS("rts ir=%x\n", dc
->ir
);
1395 dc
->jmp
= JMP_INDIRECT
;
1396 tcg_gen_movi_tl(env_btaken
, 1);
1397 tcg_gen_add_tl(env_btarget
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
1400 static int dec_check_fpuv2(DisasContext
*dc
)
1402 if ((dc
->cpu
->cfg
.use_fpu
!= 2) && (dc
->tb_flags
& MSR_EE_FLAG
)) {
1403 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_FPU
);
1404 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1406 return (dc
->cpu
->cfg
.use_fpu
== 2) ? 0 : PVR2_USE_FPU2_MASK
;
1409 static void dec_fpu(DisasContext
*dc
)
1411 unsigned int fpu_insn
;
1413 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1414 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1415 && (dc
->cpu
->cfg
.use_fpu
!= 1)) {
1416 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1417 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1421 fpu_insn
= (dc
->ir
>> 7) & 7;
1425 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1430 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1435 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1440 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1445 switch ((dc
->ir
>> 4) & 7) {
1447 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1448 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1451 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1452 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1455 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1456 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1459 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1460 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1463 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1464 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1467 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1468 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1471 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1472 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1475 qemu_log_mask(LOG_UNIMP
,
1476 "unimplemented fcmp fpu_insn=%x pc=%x"
1478 fpu_insn
, dc
->pc
, dc
->opcode
);
1479 dc
->abort_at_next_insn
= 1;
1485 if (!dec_check_fpuv2(dc
)) {
1488 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1492 if (!dec_check_fpuv2(dc
)) {
1495 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1499 if (!dec_check_fpuv2(dc
)) {
1502 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1506 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1508 fpu_insn
, dc
->pc
, dc
->opcode
);
1509 dc
->abort_at_next_insn
= 1;
1514 static void dec_null(DisasContext
*dc
)
1516 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1517 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1518 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1519 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1522 qemu_log_mask(LOG_GUEST_ERROR
, "unknown insn pc=%x opc=%x\n", dc
->pc
, dc
->opcode
);
1523 dc
->abort_at_next_insn
= 1;
1526 /* Insns connected to FSL or AXI stream attached devices. */
1527 static void dec_stream(DisasContext
*dc
)
1529 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1530 TCGv_i32 t_id
, t_ctrl
;
1533 LOG_DIS("%s%s imm=%x\n", dc
->rd
? "get" : "put",
1534 dc
->type_b
? "" : "d", dc
->imm
);
1536 if ((dc
->tb_flags
& MSR_EE_FLAG
) && (mem_index
== MMU_USER_IDX
)) {
1537 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1538 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1542 t_id
= tcg_temp_new();
1544 tcg_gen_movi_tl(t_id
, dc
->imm
& 0xf);
1545 ctrl
= dc
->imm
>> 10;
1547 tcg_gen_andi_tl(t_id
, cpu_R
[dc
->rb
], 0xf);
1548 ctrl
= dc
->imm
>> 5;
1551 t_ctrl
= tcg_const_tl(ctrl
);
1554 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1556 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1558 tcg_temp_free(t_id
);
1559 tcg_temp_free(t_ctrl
);
1562 static struct decoder_info
{
1567 void (*dec
)(DisasContext
*dc
);
1575 {DEC_BARREL
, dec_barrel
},
1577 {DEC_ST
, dec_store
},
1586 {DEC_STREAM
, dec_stream
},
1590 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1595 LOG_DIS("%8.8x\t", dc
->ir
);
1600 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1601 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1602 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_OPCODE_0x0_ILL_MASK
)) {
1603 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1604 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1608 LOG_DIS("nr_nops=%d\t", dc
->nr_nops
);
1610 if (dc
->nr_nops
> 4) {
1611 cpu_abort(CPU(dc
->cpu
), "fetching nop sequence\n");
1614 /* bit 2 seems to indicate insn type. */
1615 dc
->type_b
= ir
& (1 << 29);
1617 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1618 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1619 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1620 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1621 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1623 /* Large switch for all insns. */
1624 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1625 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1632 /* generate intermediate code for basic block 'tb'. */
1633 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
1635 CPUMBState
*env
= cs
->env_ptr
;
1636 MicroBlazeCPU
*cpu
= mb_env_get_cpu(env
);
1638 struct DisasContext ctx
;
1639 struct DisasContext
*dc
= &ctx
;
1640 uint32_t next_page_start
, org_flags
;
1648 org_flags
= dc
->synced_flags
= dc
->tb_flags
= tb
->flags
;
1650 dc
->is_jmp
= DISAS_NEXT
;
1652 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1653 if (dc
->delayed_branch
) {
1654 dc
->jmp
= JMP_INDIRECT
;
1657 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1658 dc
->cpustate_changed
= 0;
1659 dc
->abort_at_next_insn
= 0;
1663 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n", pc_start
);
1666 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1668 max_insns
= tb_cflags(tb
) & CF_COUNT_MASK
;
1669 if (max_insns
== 0) {
1670 max_insns
= CF_COUNT_MASK
;
1672 if (max_insns
> TCG_MAX_INSNS
) {
1673 max_insns
= TCG_MAX_INSNS
;
1679 tcg_gen_insn_start(dc
->pc
);
1683 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1684 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1689 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1690 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1691 dc
->is_jmp
= DISAS_UPDATE
;
1692 /* The address covered by the breakpoint must be included in
1693 [tb->pc, tb->pc + tb->size) in order to for it to be
1694 properly cleared -- thus we increment the PC here so that
1695 the logic setting tb->size below does the right thing. */
1701 LOG_DIS("%8.8x:\t", dc
->pc
);
1703 if (num_insns
== max_insns
&& (tb_cflags(tb
) & CF_LAST_IO
)) {
1708 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1710 dc
->tb_flags
&= ~IMM_FLAG
;
1713 if (dc
->delayed_branch
) {
1714 dc
->delayed_branch
--;
1715 if (!dc
->delayed_branch
) {
1716 if (dc
->tb_flags
& DRTI_FLAG
)
1718 if (dc
->tb_flags
& DRTB_FLAG
)
1720 if (dc
->tb_flags
& DRTE_FLAG
)
1722 /* Clear the delay slot flag. */
1723 dc
->tb_flags
&= ~D_FLAG
;
1724 /* If it is a direct jump, try direct chaining. */
1725 if (dc
->jmp
== JMP_INDIRECT
) {
1726 eval_cond_jmp(dc
, env_btarget
, tcg_const_tl(dc
->pc
));
1727 dc
->is_jmp
= DISAS_JUMP
;
1728 } else if (dc
->jmp
== JMP_DIRECT
) {
1730 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1731 dc
->is_jmp
= DISAS_TB_JUMP
;
1732 } else if (dc
->jmp
== JMP_DIRECT_CC
) {
1733 TCGLabel
*l1
= gen_new_label();
1735 /* Conditional jmp. */
1736 tcg_gen_brcondi_tl(TCG_COND_NE
, env_btaken
, 0, l1
);
1737 gen_goto_tb(dc
, 1, dc
->pc
);
1739 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1741 dc
->is_jmp
= DISAS_TB_JUMP
;
1746 if (cs
->singlestep_enabled
) {
1749 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
1750 && !tcg_op_buf_full()
1752 && (dc
->pc
< next_page_start
)
1753 && num_insns
< max_insns
);
1756 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
1757 if (dc
->tb_flags
& D_FLAG
) {
1758 dc
->is_jmp
= DISAS_UPDATE
;
1759 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1765 if (tb_cflags(tb
) & CF_LAST_IO
)
1767 /* Force an update if the per-tb cpu state has changed. */
1768 if (dc
->is_jmp
== DISAS_NEXT
1769 && (dc
->cpustate_changed
|| org_flags
!= dc
->tb_flags
)) {
1770 dc
->is_jmp
= DISAS_UPDATE
;
1771 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1775 if (unlikely(cs
->singlestep_enabled
)) {
1776 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
1778 if (dc
->is_jmp
!= DISAS_JUMP
) {
1779 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1781 gen_helper_raise_exception(cpu_env
, tmp
);
1782 tcg_temp_free_i32(tmp
);
1784 switch(dc
->is_jmp
) {
1786 gen_goto_tb(dc
, 1, npc
);
1791 /* indicate that the hash table must be used
1792 to find the next TB */
1796 /* nothing more to generate */
1800 gen_tb_end(tb
, num_insns
);
1802 tb
->size
= dc
->pc
- pc_start
;
1803 tb
->icount
= num_insns
;
1807 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1808 && qemu_log_in_addr_range(pc_start
)) {
1810 qemu_log("--------------\n");
1811 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
);
1816 assert(!dc
->abort_at_next_insn
);
1819 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1822 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1823 CPUMBState
*env
= &cpu
->env
;
1829 cpu_fprintf(f
, "IN: PC=%x %s\n",
1830 env
->sregs
[SR_PC
], lookup_symbol(env
->sregs
[SR_PC
]));
1831 cpu_fprintf(f
, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1832 env
->sregs
[SR_MSR
], env
->sregs
[SR_ESR
], env
->sregs
[SR_EAR
],
1833 env
->debug
, env
->imm
, env
->iflags
, env
->sregs
[SR_FSR
]);
1834 cpu_fprintf(f
, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1835 env
->btaken
, env
->btarget
,
1836 (env
->sregs
[SR_MSR
] & MSR_UM
) ? "user" : "kernel",
1837 (env
->sregs
[SR_MSR
] & MSR_UMS
) ? "user" : "kernel",
1838 (env
->sregs
[SR_MSR
] & MSR_EIP
),
1839 (env
->sregs
[SR_MSR
] & MSR_IE
));
1841 for (i
= 0; i
< 32; i
++) {
1842 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1843 if ((i
+ 1) % 4 == 0)
1844 cpu_fprintf(f
, "\n");
1846 cpu_fprintf(f
, "\n\n");
1849 void mb_tcg_init(void)
1853 env_debug
= tcg_global_mem_new(cpu_env
,
1854 offsetof(CPUMBState
, debug
),
1856 env_iflags
= tcg_global_mem_new(cpu_env
,
1857 offsetof(CPUMBState
, iflags
),
1859 env_imm
= tcg_global_mem_new(cpu_env
,
1860 offsetof(CPUMBState
, imm
),
1862 env_btarget
= tcg_global_mem_new(cpu_env
,
1863 offsetof(CPUMBState
, btarget
),
1865 env_btaken
= tcg_global_mem_new(cpu_env
,
1866 offsetof(CPUMBState
, btaken
),
1868 env_res_addr
= tcg_global_mem_new(cpu_env
,
1869 offsetof(CPUMBState
, res_addr
),
1871 env_res_val
= tcg_global_mem_new(cpu_env
,
1872 offsetof(CPUMBState
, res_val
),
1874 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1875 cpu_R
[i
] = tcg_global_mem_new(cpu_env
,
1876 offsetof(CPUMBState
, regs
[i
]),
1879 for (i
= 0; i
< ARRAY_SIZE(cpu_SR
); i
++) {
1880 cpu_SR
[i
] = tcg_global_mem_new(cpu_env
,
1881 offsetof(CPUMBState
, sregs
[i
]),
1882 special_regnames
[i
]);
1886 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
,
1889 env
->sregs
[SR_PC
] = data
[0];