2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
32 #include "trace-tcg.h"
39 #if DISAS_MB && !SIM_COMPAT
40 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 # define LOG_DIS(...) do { } while (0)
47 #define EXTRACT_FIELD(src, start, end) \
48 (((src) >> start) & ((1 << (end - start + 1)) - 1))
50 /* is_jmp field values */
51 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55 static TCGv env_debug
;
56 static TCGv_env cpu_env
;
57 static TCGv cpu_R
[32];
58 static TCGv cpu_SR
[18];
60 static TCGv env_btaken
;
61 static TCGv env_btarget
;
62 static TCGv env_iflags
;
63 static TCGv env_res_addr
;
64 static TCGv env_res_val
;
66 #include "exec/gen-icount.h"
68 /* This is the state at translation time. */
69 typedef struct DisasContext
{
80 unsigned int cpustate_changed
;
81 unsigned int delayed_branch
;
82 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
83 unsigned int clear_imm
;
88 #define JMP_DIRECT_CC 2
89 #define JMP_INDIRECT 3
93 int abort_at_next_insn
;
95 struct TranslationBlock
*tb
;
96 int singlestep_enabled
;
99 static const char *regnames
[] =
101 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
102 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
103 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
104 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
107 static const char *special_regnames
[] =
109 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
110 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
111 "sr16", "sr17", "sr18"
114 static inline void t_sync_flags(DisasContext
*dc
)
116 /* Synch the tb dependent flags between translator and runtime. */
117 if (dc
->tb_flags
!= dc
->synced_flags
) {
118 tcg_gen_movi_tl(env_iflags
, dc
->tb_flags
);
119 dc
->synced_flags
= dc
->tb_flags
;
123 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
125 TCGv_i32 tmp
= tcg_const_i32(index
);
128 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
129 gen_helper_raise_exception(cpu_env
, tmp
);
130 tcg_temp_free_i32(tmp
);
131 dc
->is_jmp
= DISAS_UPDATE
;
134 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
136 #ifndef CONFIG_USER_ONLY
137 return (dc
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
143 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
145 if (use_goto_tb(dc
, dest
)) {
147 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
148 tcg_gen_exit_tb((uintptr_t)dc
->tb
+ n
);
150 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
155 static void read_carry(DisasContext
*dc
, TCGv d
)
157 tcg_gen_shri_tl(d
, cpu_SR
[SR_MSR
], 31);
161 * write_carry sets the carry bits in MSR based on bit 0 of v.
162 * v[31:1] are ignored.
164 static void write_carry(DisasContext
*dc
, TCGv v
)
166 TCGv t0
= tcg_temp_new();
167 tcg_gen_shli_tl(t0
, v
, 31);
168 tcg_gen_sari_tl(t0
, t0
, 31);
169 tcg_gen_andi_tl(t0
, t0
, (MSR_C
| MSR_CC
));
170 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
],
172 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
);
176 static void write_carryi(DisasContext
*dc
, bool carry
)
178 TCGv t0
= tcg_temp_new();
179 tcg_gen_movi_tl(t0
, carry
);
184 /* True if ALU operand b is a small immediate that may deserve
186 static inline int dec_alu_op_b_is_small_imm(DisasContext
*dc
)
188 /* Immediate insn without the imm prefix ? */
189 return dc
->type_b
&& !(dc
->tb_flags
& IMM_FLAG
);
192 static inline TCGv
*dec_alu_op_b(DisasContext
*dc
)
195 if (dc
->tb_flags
& IMM_FLAG
)
196 tcg_gen_ori_tl(env_imm
, env_imm
, dc
->imm
);
198 tcg_gen_movi_tl(env_imm
, (int32_t)((int16_t)dc
->imm
));
201 return &cpu_R
[dc
->rb
];
204 static void dec_add(DisasContext
*dc
)
212 LOG_DIS("add%s%s%s r%d r%d r%d\n",
213 dc
->type_b
? "i" : "", k
? "k" : "", c
? "c" : "",
214 dc
->rd
, dc
->ra
, dc
->rb
);
216 /* Take care of the easy cases first. */
218 /* k - keep carry, no need to update MSR. */
219 /* If rd == r0, it's a nop. */
221 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
224 /* c - Add carry into the result. */
228 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
235 /* From now on, we can assume k is zero. So we need to update MSR. */
241 tcg_gen_movi_tl(cf
, 0);
245 TCGv ncf
= tcg_temp_new();
246 gen_helper_carry(ncf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
247 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
248 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
249 write_carry(dc
, ncf
);
252 gen_helper_carry(cf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
258 static void dec_sub(DisasContext
*dc
)
260 unsigned int u
, cmp
, k
, c
;
266 cmp
= (dc
->imm
& 1) && (!dc
->type_b
) && k
;
269 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u
? "u" : "", dc
->rd
, dc
->ra
, dc
->ir
);
272 gen_helper_cmpu(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
274 gen_helper_cmp(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
279 LOG_DIS("sub%s%s r%d, r%d r%d\n",
280 k
? "k" : "", c
? "c" : "", dc
->rd
, dc
->ra
, dc
->rb
);
282 /* Take care of the easy cases first. */
284 /* k - keep carry, no need to update MSR. */
285 /* If rd == r0, it's a nop. */
287 tcg_gen_sub_tl(cpu_R
[dc
->rd
], *(dec_alu_op_b(dc
)), cpu_R
[dc
->ra
]);
290 /* c - Add carry into the result. */
294 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
301 /* From now on, we can assume k is zero. So we need to update MSR. */
302 /* Extract carry. And complement a into na. */
308 tcg_gen_movi_tl(cf
, 1);
311 /* d = b + ~a + c. carry defaults to 1. */
312 tcg_gen_not_tl(na
, cpu_R
[dc
->ra
]);
315 TCGv ncf
= tcg_temp_new();
316 gen_helper_carry(ncf
, na
, *(dec_alu_op_b(dc
)), cf
);
317 tcg_gen_add_tl(cpu_R
[dc
->rd
], na
, *(dec_alu_op_b(dc
)));
318 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
319 write_carry(dc
, ncf
);
322 gen_helper_carry(cf
, na
, *(dec_alu_op_b(dc
)), cf
);
329 static void dec_pattern(DisasContext
*dc
)
333 if ((dc
->tb_flags
& MSR_EE_FLAG
)
334 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
335 && !dc
->cpu
->cfg
.use_pcmp_instr
) {
336 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
337 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
340 mode
= dc
->opcode
& 3;
344 LOG_DIS("pcmpbf r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
346 gen_helper_pcmpbf(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
349 LOG_DIS("pcmpeq r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
351 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_R
[dc
->rd
],
352 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
356 LOG_DIS("pcmpne r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
358 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_R
[dc
->rd
],
359 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
363 cpu_abort(CPU(dc
->cpu
),
364 "unsupported pattern insn opcode=%x\n", dc
->opcode
);
369 static void dec_and(DisasContext
*dc
)
373 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
378 not = dc
->opcode
& (1 << 1);
379 LOG_DIS("and%s\n", not ? "n" : "");
385 tcg_gen_andc_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
387 tcg_gen_and_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
390 static void dec_or(DisasContext
*dc
)
392 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
397 LOG_DIS("or r%d r%d r%d imm=%x\n", dc
->rd
, dc
->ra
, dc
->rb
, dc
->imm
);
399 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
402 static void dec_xor(DisasContext
*dc
)
404 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
409 LOG_DIS("xor r%d\n", dc
->rd
);
411 tcg_gen_xor_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
414 static inline void msr_read(DisasContext
*dc
, TCGv d
)
416 tcg_gen_mov_tl(d
, cpu_SR
[SR_MSR
]);
419 static inline void msr_write(DisasContext
*dc
, TCGv v
)
424 dc
->cpustate_changed
= 1;
425 /* PVR bit is not writable. */
426 tcg_gen_andi_tl(t
, v
, ~MSR_PVR
);
427 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], MSR_PVR
);
428 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], v
);
432 static void dec_msr(DisasContext
*dc
)
434 CPUState
*cs
= CPU(dc
->cpu
);
436 unsigned int sr
, to
, rn
;
437 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
439 sr
= dc
->imm
& ((1 << 14) - 1);
440 to
= dc
->imm
& (1 << 14);
443 dc
->cpustate_changed
= 1;
445 /* msrclr and msrset. */
446 if (!(dc
->imm
& (1 << 15))) {
447 unsigned int clr
= dc
->ir
& (1 << 16);
449 LOG_DIS("msr%s r%d imm=%x\n", clr
? "clr" : "set",
452 if (!dc
->cpu
->cfg
.use_msr_instr
) {
457 if ((dc
->tb_flags
& MSR_EE_FLAG
)
458 && mem_index
== MMU_USER_IDX
&& (dc
->imm
!= 4 && dc
->imm
!= 0)) {
459 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
460 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
465 msr_read(dc
, cpu_R
[dc
->rd
]);
470 tcg_gen_mov_tl(t1
, *(dec_alu_op_b(dc
)));
473 tcg_gen_not_tl(t1
, t1
);
474 tcg_gen_and_tl(t0
, t0
, t1
);
476 tcg_gen_or_tl(t0
, t0
, t1
);
480 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
481 dc
->is_jmp
= DISAS_UPDATE
;
486 if ((dc
->tb_flags
& MSR_EE_FLAG
)
487 && mem_index
== MMU_USER_IDX
) {
488 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
489 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
494 #if !defined(CONFIG_USER_ONLY)
495 /* Catch read/writes to the mmu block. */
496 if ((sr
& ~0xff) == 0x1000) {
498 LOG_DIS("m%ss sr%d r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
500 gen_helper_mmu_write(cpu_env
, tcg_const_tl(sr
), cpu_R
[dc
->ra
]);
502 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tcg_const_tl(sr
));
508 LOG_DIS("m%ss sr%x r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
513 msr_write(dc
, cpu_R
[dc
->ra
]);
516 tcg_gen_mov_tl(cpu_SR
[SR_EAR
], cpu_R
[dc
->ra
]);
519 tcg_gen_mov_tl(cpu_SR
[SR_ESR
], cpu_R
[dc
->ra
]);
522 tcg_gen_andi_tl(cpu_SR
[SR_FSR
], cpu_R
[dc
->ra
], 31);
525 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, slr
));
528 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, shr
));
531 cpu_abort(CPU(dc
->cpu
), "unknown mts reg %x\n", sr
);
535 LOG_DIS("m%ss r%d sr%x imm=%x\n", to
? "t" : "f", dc
->rd
, sr
, dc
->imm
);
539 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
542 msr_read(dc
, cpu_R
[dc
->rd
]);
545 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_EAR
]);
548 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_ESR
]);
551 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_FSR
]);
554 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_BTR
]);
557 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, slr
));
560 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, shr
));
576 tcg_gen_ld_tl(cpu_R
[dc
->rd
],
577 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
580 cpu_abort(cs
, "unknown mfs reg %x\n", sr
);
586 tcg_gen_movi_tl(cpu_R
[0], 0);
590 /* Multiplier unit. */
591 static void dec_mul(DisasContext
*dc
)
594 unsigned int subcode
;
596 if ((dc
->tb_flags
& MSR_EE_FLAG
)
597 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
598 && !dc
->cpu
->cfg
.use_hw_mul
) {
599 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
600 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
604 subcode
= dc
->imm
& 3;
607 LOG_DIS("muli r%d r%d %x\n", dc
->rd
, dc
->ra
, dc
->imm
);
608 tcg_gen_mul_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
612 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
613 if (subcode
>= 1 && subcode
<= 3 && dc
->cpu
->cfg
.use_hw_mul
< 2) {
617 tmp
= tcg_temp_new();
620 LOG_DIS("mul r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
621 tcg_gen_mul_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
624 LOG_DIS("mulh r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
625 tcg_gen_muls2_tl(tmp
, cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
628 LOG_DIS("mulhsu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
629 tcg_gen_mulsu2_tl(tmp
, cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
632 LOG_DIS("mulhu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
633 tcg_gen_mulu2_tl(tmp
, cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
636 cpu_abort(CPU(dc
->cpu
), "unknown MUL insn %x\n", subcode
);
643 static void dec_div(DisasContext
*dc
)
650 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
651 && !dc
->cpu
->cfg
.use_div
) {
652 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
653 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
657 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
660 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
663 tcg_gen_movi_tl(cpu_R
[dc
->rd
], 0);
666 static void dec_barrel(DisasContext
*dc
)
669 unsigned int imm_w
, imm_s
;
670 bool s
, t
, e
= false, i
= false;
672 if ((dc
->tb_flags
& MSR_EE_FLAG
)
673 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
674 && !dc
->cpu
->cfg
.use_barrel
) {
675 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
676 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
681 /* Insert and extract are only available in immediate mode. */
682 i
= extract32(dc
->imm
, 15, 1);
683 e
= extract32(dc
->imm
, 14, 1);
685 s
= extract32(dc
->imm
, 10, 1);
686 t
= extract32(dc
->imm
, 9, 1);
687 imm_w
= extract32(dc
->imm
, 6, 5);
688 imm_s
= extract32(dc
->imm
, 0, 5);
690 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
692 s
? "l" : "r", t
? "a" : "l", dc
->rd
, dc
->ra
, dc
->rb
);
695 if (imm_w
+ imm_s
> 32 || imm_w
== 0) {
696 /* These inputs have an undefined behavior. */
697 qemu_log_mask(LOG_GUEST_ERROR
, "bsefi: Bad input w=%d s=%d\n",
700 tcg_gen_extract_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], imm_s
, imm_w
);
703 int width
= imm_w
- imm_s
+ 1;
706 /* These inputs have an undefined behavior. */
707 qemu_log_mask(LOG_GUEST_ERROR
, "bsifi: Bad input w=%d s=%d\n",
710 tcg_gen_deposit_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
],
716 tcg_gen_mov_tl(t0
, *(dec_alu_op_b(dc
)));
717 tcg_gen_andi_tl(t0
, t0
, 31);
720 tcg_gen_shl_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
723 tcg_gen_sar_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
725 tcg_gen_shr_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
732 static void dec_bit(DisasContext
*dc
)
734 CPUState
*cs
= CPU(dc
->cpu
);
737 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
739 op
= dc
->ir
& ((1 << 9) - 1);
745 LOG_DIS("src r%d r%d\n", dc
->rd
, dc
->ra
);
746 tcg_gen_andi_tl(t0
, cpu_SR
[SR_MSR
], MSR_CC
);
747 write_carry(dc
, cpu_R
[dc
->ra
]);
749 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
750 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
758 LOG_DIS("srl r%d r%d\n", dc
->rd
, dc
->ra
);
760 /* Update carry. Note that write carry only looks at the LSB. */
761 write_carry(dc
, cpu_R
[dc
->ra
]);
764 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
766 tcg_gen_sari_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
770 LOG_DIS("ext8s r%d r%d\n", dc
->rd
, dc
->ra
);
771 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
774 LOG_DIS("ext16s r%d r%d\n", dc
->rd
, dc
->ra
);
775 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
782 LOG_DIS("wdc r%d\n", dc
->ra
);
783 if ((dc
->tb_flags
& MSR_EE_FLAG
)
784 && mem_index
== MMU_USER_IDX
) {
785 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
786 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
792 LOG_DIS("wic r%d\n", dc
->ra
);
793 if ((dc
->tb_flags
& MSR_EE_FLAG
)
794 && mem_index
== MMU_USER_IDX
) {
795 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
796 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
801 if ((dc
->tb_flags
& MSR_EE_FLAG
)
802 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
803 && !dc
->cpu
->cfg
.use_pcmp_instr
) {
804 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
805 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
807 if (dc
->cpu
->cfg
.use_pcmp_instr
) {
808 tcg_gen_clzi_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 32);
813 LOG_DIS("swapb r%d r%d\n", dc
->rd
, dc
->ra
);
814 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
818 LOG_DIS("swaph r%d r%d\n", dc
->rd
, dc
->ra
);
819 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
822 cpu_abort(cs
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
823 dc
->pc
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
828 static inline void sync_jmpstate(DisasContext
*dc
)
830 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
831 if (dc
->jmp
== JMP_DIRECT
) {
832 tcg_gen_movi_tl(env_btaken
, 1);
834 dc
->jmp
= JMP_INDIRECT
;
835 tcg_gen_movi_tl(env_btarget
, dc
->jmp_pc
);
839 static void dec_imm(DisasContext
*dc
)
841 LOG_DIS("imm %x\n", dc
->imm
<< 16);
842 tcg_gen_movi_tl(env_imm
, (dc
->imm
<< 16));
843 dc
->tb_flags
|= IMM_FLAG
;
847 static inline TCGv
*compute_ldst_addr(DisasContext
*dc
, TCGv
*t
)
849 unsigned int extimm
= dc
->tb_flags
& IMM_FLAG
;
850 /* Should be set to one if r1 is used by loadstores. */
853 /* All load/stores use ra. */
854 if (dc
->ra
== 1 && dc
->cpu
->cfg
.stackprot
) {
858 /* Treat the common cases first. */
860 /* If any of the regs is r0, return a ptr to the other. */
862 return &cpu_R
[dc
->rb
];
863 } else if (dc
->rb
== 0) {
864 return &cpu_R
[dc
->ra
];
867 if (dc
->rb
== 1 && dc
->cpu
->cfg
.stackprot
) {
872 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
875 gen_helper_stackprot(cpu_env
, *t
);
882 return &cpu_R
[dc
->ra
];
885 tcg_gen_movi_tl(*t
, (int32_t)((int16_t)dc
->imm
));
886 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *t
);
889 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
893 gen_helper_stackprot(cpu_env
, *t
);
898 static void dec_load(DisasContext
*dc
)
901 unsigned int size
, rev
= 0, ex
= 0;
904 mop
= dc
->opcode
& 3;
907 rev
= (dc
->ir
>> 9) & 1;
908 ex
= (dc
->ir
>> 10) & 1;
915 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
916 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
917 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
918 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
922 LOG_DIS("l%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
926 addr
= compute_ldst_addr(dc
, &t
);
929 * When doing reverse accesses we need to do two things.
931 * 1. Reverse the address wrt endianness.
932 * 2. Byteswap the data lanes on the way back into the CPU core.
934 if (rev
&& size
!= 4) {
935 /* Endian reverse the address. t is addr. */
943 TCGv low
= tcg_temp_new();
945 /* Force addr into the temp. */
948 tcg_gen_mov_tl(t
, *addr
);
952 tcg_gen_andi_tl(low
, t
, 3);
953 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
954 tcg_gen_andi_tl(t
, t
, ~3);
955 tcg_gen_or_tl(t
, t
, low
);
956 tcg_gen_mov_tl(env_imm
, t
);
964 /* Force addr into the temp. */
967 tcg_gen_xori_tl(t
, *addr
, 2);
970 tcg_gen_xori_tl(t
, t
, 2);
974 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
979 /* lwx does not throw unaligned access errors, so force alignment */
981 /* Force addr into the temp. */
984 tcg_gen_mov_tl(t
, *addr
);
987 tcg_gen_andi_tl(t
, t
, ~3);
990 /* If we get a fault on a dslot, the jmpstate better be in sync. */
993 /* Verify alignment if needed. */
995 * Microblaze gives MMU faults priority over faults due to
996 * unaligned addresses. That's why we speculatively do the load
997 * into v. If the load succeeds, we verify alignment of the
998 * address and if that succeeds we write into the destination reg.
1001 tcg_gen_qemu_ld_tl(v
, *addr
, cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
1003 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1004 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1005 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1006 tcg_const_tl(0), tcg_const_tl(size
- 1));
1010 tcg_gen_mov_tl(env_res_addr
, *addr
);
1011 tcg_gen_mov_tl(env_res_val
, v
);
1014 tcg_gen_mov_tl(cpu_R
[dc
->rd
], v
);
1019 /* no support for AXI exclusive so always clear C */
1020 write_carryi(dc
, 0);
1027 static void dec_store(DisasContext
*dc
)
1029 TCGv t
, *addr
, swx_addr
;
1030 TCGLabel
*swx_skip
= NULL
;
1031 unsigned int size
, rev
= 0, ex
= 0;
1034 mop
= dc
->opcode
& 3;
1037 rev
= (dc
->ir
>> 9) & 1;
1038 ex
= (dc
->ir
>> 10) & 1;
1045 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
1046 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1047 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1048 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1052 LOG_DIS("s%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
1055 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1057 addr
= compute_ldst_addr(dc
, &t
);
1059 swx_addr
= tcg_temp_local_new();
1063 /* Force addr into the swx_addr. */
1064 tcg_gen_mov_tl(swx_addr
, *addr
);
1066 /* swx does not throw unaligned access errors, so force alignment */
1067 tcg_gen_andi_tl(swx_addr
, swx_addr
, ~3);
1069 write_carryi(dc
, 1);
1070 swx_skip
= gen_new_label();
1071 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_addr
, swx_addr
, swx_skip
);
1073 /* Compare the value loaded at lwx with current contents of
1074 the reserved location.
1075 FIXME: This only works for system emulation where we can expect
1076 this compare and the following write to be atomic. For user
1077 emulation we need to add atomicity between threads. */
1078 tval
= tcg_temp_new();
1079 tcg_gen_qemu_ld_tl(tval
, swx_addr
, cpu_mmu_index(&dc
->cpu
->env
, false),
1081 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_val
, tval
, swx_skip
);
1082 write_carryi(dc
, 0);
1083 tcg_temp_free(tval
);
1086 if (rev
&& size
!= 4) {
1087 /* Endian reverse the address. t is addr. */
1095 TCGv low
= tcg_temp_new();
1097 /* Force addr into the temp. */
1100 tcg_gen_mov_tl(t
, *addr
);
1104 tcg_gen_andi_tl(low
, t
, 3);
1105 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
1106 tcg_gen_andi_tl(t
, t
, ~3);
1107 tcg_gen_or_tl(t
, t
, low
);
1108 tcg_gen_mov_tl(env_imm
, t
);
1116 /* Force addr into the temp. */
1119 tcg_gen_xori_tl(t
, *addr
, 2);
1122 tcg_gen_xori_tl(t
, t
, 2);
1126 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1130 tcg_gen_qemu_st_tl(cpu_R
[dc
->rd
], *addr
, cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
1132 /* Verify alignment if needed. */
1133 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1134 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1135 /* FIXME: if the alignment is wrong, we should restore the value
1136 * in memory. One possible way to achieve this is to probe
1137 * the MMU prior to the memaccess, thay way we could put
1138 * the alignment checks in between the probe and the mem
1141 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1142 tcg_const_tl(1), tcg_const_tl(size
- 1));
1146 gen_set_label(swx_skip
);
1148 tcg_temp_free(swx_addr
);
1154 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1155 TCGv d
, TCGv a
, TCGv b
)
1159 tcg_gen_setcond_tl(TCG_COND_EQ
, d
, a
, b
);
1162 tcg_gen_setcond_tl(TCG_COND_NE
, d
, a
, b
);
1165 tcg_gen_setcond_tl(TCG_COND_LT
, d
, a
, b
);
1168 tcg_gen_setcond_tl(TCG_COND_LE
, d
, a
, b
);
1171 tcg_gen_setcond_tl(TCG_COND_GE
, d
, a
, b
);
1174 tcg_gen_setcond_tl(TCG_COND_GT
, d
, a
, b
);
1177 cpu_abort(CPU(dc
->cpu
), "Unknown condition code %x.\n", cc
);
1182 static void eval_cond_jmp(DisasContext
*dc
, TCGv pc_true
, TCGv pc_false
)
1184 TCGLabel
*l1
= gen_new_label();
1185 /* Conditional jmp. */
1186 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_false
);
1187 tcg_gen_brcondi_tl(TCG_COND_EQ
, env_btaken
, 0, l1
);
1188 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_true
);
1192 static void dec_bcc(DisasContext
*dc
)
1197 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1198 dslot
= dc
->ir
& (1 << 25);
1199 LOG_DIS("bcc%s r%d %x\n", dslot
? "d" : "", dc
->ra
, dc
->imm
);
1201 dc
->delayed_branch
= 1;
1203 dc
->delayed_branch
= 2;
1204 dc
->tb_flags
|= D_FLAG
;
1205 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1206 cpu_env
, offsetof(CPUMBState
, bimm
));
1209 if (dec_alu_op_b_is_small_imm(dc
)) {
1210 int32_t offset
= (int32_t)((int16_t)dc
->imm
); /* sign-extend. */
1212 tcg_gen_movi_tl(env_btarget
, dc
->pc
+ offset
);
1213 dc
->jmp
= JMP_DIRECT_CC
;
1214 dc
->jmp_pc
= dc
->pc
+ offset
;
1216 dc
->jmp
= JMP_INDIRECT
;
1217 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1218 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1220 eval_cc(dc
, cc
, env_btaken
, cpu_R
[dc
->ra
], tcg_const_tl(0));
1223 static void dec_br(DisasContext
*dc
)
1225 unsigned int dslot
, link
, abs
, mbar
;
1226 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1228 dslot
= dc
->ir
& (1 << 20);
1229 abs
= dc
->ir
& (1 << 19);
1230 link
= dc
->ir
& (1 << 18);
1232 /* Memory barrier. */
1233 mbar
= (dc
->ir
>> 16) & 31;
1234 if (mbar
== 2 && dc
->imm
== 4) {
1235 /* mbar IMM & 16 decodes to sleep. */
1237 TCGv_i32 tmp_hlt
= tcg_const_i32(EXCP_HLT
);
1238 TCGv_i32 tmp_1
= tcg_const_i32(1);
1243 tcg_gen_st_i32(tmp_1
, cpu_env
,
1244 -offsetof(MicroBlazeCPU
, env
)
1245 +offsetof(CPUState
, halted
));
1246 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
1247 gen_helper_raise_exception(cpu_env
, tmp_hlt
);
1248 tcg_temp_free_i32(tmp_hlt
);
1249 tcg_temp_free_i32(tmp_1
);
1252 LOG_DIS("mbar %d\n", dc
->rd
);
1254 dc
->cpustate_changed
= 1;
1258 LOG_DIS("br%s%s%s%s imm=%x\n",
1259 abs
? "a" : "", link
? "l" : "",
1260 dc
->type_b
? "i" : "", dslot
? "d" : "",
1263 dc
->delayed_branch
= 1;
1265 dc
->delayed_branch
= 2;
1266 dc
->tb_flags
|= D_FLAG
;
1267 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1268 cpu_env
, offsetof(CPUMBState
, bimm
));
1271 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
1273 dc
->jmp
= JMP_INDIRECT
;
1275 tcg_gen_movi_tl(env_btaken
, 1);
1276 tcg_gen_mov_tl(env_btarget
, *(dec_alu_op_b(dc
)));
1277 if (link
&& !dslot
) {
1278 if (!(dc
->tb_flags
& IMM_FLAG
) && (dc
->imm
== 8 || dc
->imm
== 0x18))
1279 t_gen_raise_exception(dc
, EXCP_BREAK
);
1281 if ((dc
->tb_flags
& MSR_EE_FLAG
) && mem_index
== MMU_USER_IDX
) {
1282 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1283 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1287 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1291 if (dec_alu_op_b_is_small_imm(dc
)) {
1292 dc
->jmp
= JMP_DIRECT
;
1293 dc
->jmp_pc
= dc
->pc
+ (int32_t)((int16_t)dc
->imm
);
1295 tcg_gen_movi_tl(env_btaken
, 1);
1296 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1297 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1302 static inline void do_rti(DisasContext
*dc
)
1305 t0
= tcg_temp_new();
1306 t1
= tcg_temp_new();
1307 tcg_gen_shri_tl(t0
, cpu_SR
[SR_MSR
], 1);
1308 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_IE
);
1309 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1311 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1312 tcg_gen_or_tl(t1
, t1
, t0
);
1316 dc
->tb_flags
&= ~DRTI_FLAG
;
1319 static inline void do_rtb(DisasContext
*dc
)
1322 t0
= tcg_temp_new();
1323 t1
= tcg_temp_new();
1324 tcg_gen_andi_tl(t1
, cpu_SR
[SR_MSR
], ~MSR_BIP
);
1325 tcg_gen_shri_tl(t0
, t1
, 1);
1326 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1328 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1329 tcg_gen_or_tl(t1
, t1
, t0
);
1333 dc
->tb_flags
&= ~DRTB_FLAG
;
1336 static inline void do_rte(DisasContext
*dc
)
1339 t0
= tcg_temp_new();
1340 t1
= tcg_temp_new();
1342 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_EE
);
1343 tcg_gen_andi_tl(t1
, t1
, ~MSR_EIP
);
1344 tcg_gen_shri_tl(t0
, t1
, 1);
1345 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1347 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1348 tcg_gen_or_tl(t1
, t1
, t0
);
1352 dc
->tb_flags
&= ~DRTE_FLAG
;
1355 static void dec_rts(DisasContext
*dc
)
1357 unsigned int b_bit
, i_bit
, e_bit
;
1358 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1360 i_bit
= dc
->ir
& (1 << 21);
1361 b_bit
= dc
->ir
& (1 << 22);
1362 e_bit
= dc
->ir
& (1 << 23);
1364 dc
->delayed_branch
= 2;
1365 dc
->tb_flags
|= D_FLAG
;
1366 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1367 cpu_env
, offsetof(CPUMBState
, bimm
));
1370 LOG_DIS("rtid ir=%x\n", dc
->ir
);
1371 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1372 && mem_index
== MMU_USER_IDX
) {
1373 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1374 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1376 dc
->tb_flags
|= DRTI_FLAG
;
1378 LOG_DIS("rtbd ir=%x\n", dc
->ir
);
1379 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1380 && mem_index
== MMU_USER_IDX
) {
1381 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1382 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1384 dc
->tb_flags
|= DRTB_FLAG
;
1386 LOG_DIS("rted ir=%x\n", dc
->ir
);
1387 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1388 && mem_index
== MMU_USER_IDX
) {
1389 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1390 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1392 dc
->tb_flags
|= DRTE_FLAG
;
1394 LOG_DIS("rts ir=%x\n", dc
->ir
);
1396 dc
->jmp
= JMP_INDIRECT
;
1397 tcg_gen_movi_tl(env_btaken
, 1);
1398 tcg_gen_add_tl(env_btarget
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
1401 static int dec_check_fpuv2(DisasContext
*dc
)
1403 if ((dc
->cpu
->cfg
.use_fpu
!= 2) && (dc
->tb_flags
& MSR_EE_FLAG
)) {
1404 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_FPU
);
1405 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1407 return (dc
->cpu
->cfg
.use_fpu
== 2) ? 0 : PVR2_USE_FPU2_MASK
;
1410 static void dec_fpu(DisasContext
*dc
)
1412 unsigned int fpu_insn
;
1414 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1415 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1416 && (dc
->cpu
->cfg
.use_fpu
!= 1)) {
1417 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1418 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1422 fpu_insn
= (dc
->ir
>> 7) & 7;
1426 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1431 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1436 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1441 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1446 switch ((dc
->ir
>> 4) & 7) {
1448 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1449 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1452 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1453 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1456 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1457 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1460 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1461 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1464 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1465 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1468 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1469 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1472 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1473 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1476 qemu_log_mask(LOG_UNIMP
,
1477 "unimplemented fcmp fpu_insn=%x pc=%x"
1479 fpu_insn
, dc
->pc
, dc
->opcode
);
1480 dc
->abort_at_next_insn
= 1;
1486 if (!dec_check_fpuv2(dc
)) {
1489 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1493 if (!dec_check_fpuv2(dc
)) {
1496 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1500 if (!dec_check_fpuv2(dc
)) {
1503 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1507 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1509 fpu_insn
, dc
->pc
, dc
->opcode
);
1510 dc
->abort_at_next_insn
= 1;
1515 static void dec_null(DisasContext
*dc
)
1517 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1518 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1519 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1520 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1523 qemu_log_mask(LOG_GUEST_ERROR
, "unknown insn pc=%x opc=%x\n", dc
->pc
, dc
->opcode
);
1524 dc
->abort_at_next_insn
= 1;
1527 /* Insns connected to FSL or AXI stream attached devices. */
1528 static void dec_stream(DisasContext
*dc
)
1530 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1531 TCGv_i32 t_id
, t_ctrl
;
1534 LOG_DIS("%s%s imm=%x\n", dc
->rd
? "get" : "put",
1535 dc
->type_b
? "" : "d", dc
->imm
);
1537 if ((dc
->tb_flags
& MSR_EE_FLAG
) && (mem_index
== MMU_USER_IDX
)) {
1538 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1539 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1543 t_id
= tcg_temp_new();
1545 tcg_gen_movi_tl(t_id
, dc
->imm
& 0xf);
1546 ctrl
= dc
->imm
>> 10;
1548 tcg_gen_andi_tl(t_id
, cpu_R
[dc
->rb
], 0xf);
1549 ctrl
= dc
->imm
>> 5;
1552 t_ctrl
= tcg_const_tl(ctrl
);
1555 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1557 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1559 tcg_temp_free(t_id
);
1560 tcg_temp_free(t_ctrl
);
1563 static struct decoder_info
{
1568 void (*dec
)(DisasContext
*dc
);
1576 {DEC_BARREL
, dec_barrel
},
1578 {DEC_ST
, dec_store
},
1587 {DEC_STREAM
, dec_stream
},
1591 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1596 LOG_DIS("%8.8x\t", dc
->ir
);
1601 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1602 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1603 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_OPCODE_0x0_ILL_MASK
)) {
1604 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1605 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1609 LOG_DIS("nr_nops=%d\t", dc
->nr_nops
);
1611 if (dc
->nr_nops
> 4) {
1612 cpu_abort(CPU(dc
->cpu
), "fetching nop sequence\n");
1615 /* bit 2 seems to indicate insn type. */
1616 dc
->type_b
= ir
& (1 << 29);
1618 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1619 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1620 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1621 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1622 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1624 /* Large switch for all insns. */
1625 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1626 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1633 /* generate intermediate code for basic block 'tb'. */
1634 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
1636 CPUMBState
*env
= cs
->env_ptr
;
1637 MicroBlazeCPU
*cpu
= mb_env_get_cpu(env
);
1639 struct DisasContext ctx
;
1640 struct DisasContext
*dc
= &ctx
;
1641 uint32_t next_page_start
, org_flags
;
1649 org_flags
= dc
->synced_flags
= dc
->tb_flags
= tb
->flags
;
1651 dc
->is_jmp
= DISAS_NEXT
;
1653 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1654 if (dc
->delayed_branch
) {
1655 dc
->jmp
= JMP_INDIRECT
;
1658 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1659 dc
->cpustate_changed
= 0;
1660 dc
->abort_at_next_insn
= 0;
1664 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n", pc_start
);
1667 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1669 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1670 if (max_insns
== 0) {
1671 max_insns
= CF_COUNT_MASK
;
1673 if (max_insns
> TCG_MAX_INSNS
) {
1674 max_insns
= TCG_MAX_INSNS
;
1680 tcg_gen_insn_start(dc
->pc
);
1684 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1685 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1690 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1691 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1692 dc
->is_jmp
= DISAS_UPDATE
;
1693 /* The address covered by the breakpoint must be included in
1694 [tb->pc, tb->pc + tb->size) in order to for it to be
1695 properly cleared -- thus we increment the PC here so that
1696 the logic setting tb->size below does the right thing. */
1702 LOG_DIS("%8.8x:\t", dc
->pc
);
1704 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1709 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1711 dc
->tb_flags
&= ~IMM_FLAG
;
1714 if (dc
->delayed_branch
) {
1715 dc
->delayed_branch
--;
1716 if (!dc
->delayed_branch
) {
1717 if (dc
->tb_flags
& DRTI_FLAG
)
1719 if (dc
->tb_flags
& DRTB_FLAG
)
1721 if (dc
->tb_flags
& DRTE_FLAG
)
1723 /* Clear the delay slot flag. */
1724 dc
->tb_flags
&= ~D_FLAG
;
1725 /* If it is a direct jump, try direct chaining. */
1726 if (dc
->jmp
== JMP_INDIRECT
) {
1727 eval_cond_jmp(dc
, env_btarget
, tcg_const_tl(dc
->pc
));
1728 dc
->is_jmp
= DISAS_JUMP
;
1729 } else if (dc
->jmp
== JMP_DIRECT
) {
1731 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1732 dc
->is_jmp
= DISAS_TB_JUMP
;
1733 } else if (dc
->jmp
== JMP_DIRECT_CC
) {
1734 TCGLabel
*l1
= gen_new_label();
1736 /* Conditional jmp. */
1737 tcg_gen_brcondi_tl(TCG_COND_NE
, env_btaken
, 0, l1
);
1738 gen_goto_tb(dc
, 1, dc
->pc
);
1740 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1742 dc
->is_jmp
= DISAS_TB_JUMP
;
1747 if (cs
->singlestep_enabled
) {
1750 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
1751 && !tcg_op_buf_full()
1753 && (dc
->pc
< next_page_start
)
1754 && num_insns
< max_insns
);
1757 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
1758 if (dc
->tb_flags
& D_FLAG
) {
1759 dc
->is_jmp
= DISAS_UPDATE
;
1760 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1766 if (tb
->cflags
& CF_LAST_IO
)
1768 /* Force an update if the per-tb cpu state has changed. */
1769 if (dc
->is_jmp
== DISAS_NEXT
1770 && (dc
->cpustate_changed
|| org_flags
!= dc
->tb_flags
)) {
1771 dc
->is_jmp
= DISAS_UPDATE
;
1772 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1776 if (unlikely(cs
->singlestep_enabled
)) {
1777 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
1779 if (dc
->is_jmp
!= DISAS_JUMP
) {
1780 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1782 gen_helper_raise_exception(cpu_env
, tmp
);
1783 tcg_temp_free_i32(tmp
);
1785 switch(dc
->is_jmp
) {
1787 gen_goto_tb(dc
, 1, npc
);
1792 /* indicate that the hash table must be used
1793 to find the next TB */
1797 /* nothing more to generate */
1801 gen_tb_end(tb
, num_insns
);
1803 tb
->size
= dc
->pc
- pc_start
;
1804 tb
->icount
= num_insns
;
1808 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1809 && qemu_log_in_addr_range(pc_start
)) {
1811 qemu_log("--------------\n");
1813 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
, 0);
1815 qemu_log("\nisize=%d osize=%d\n",
1816 dc
->pc
- pc_start
, tcg_op_buf_count());
1821 assert(!dc
->abort_at_next_insn
);
1824 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1827 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1828 CPUMBState
*env
= &cpu
->env
;
1834 cpu_fprintf(f
, "IN: PC=%x %s\n",
1835 env
->sregs
[SR_PC
], lookup_symbol(env
->sregs
[SR_PC
]));
1836 cpu_fprintf(f
, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1837 env
->sregs
[SR_MSR
], env
->sregs
[SR_ESR
], env
->sregs
[SR_EAR
],
1838 env
->debug
, env
->imm
, env
->iflags
, env
->sregs
[SR_FSR
]);
1839 cpu_fprintf(f
, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1840 env
->btaken
, env
->btarget
,
1841 (env
->sregs
[SR_MSR
] & MSR_UM
) ? "user" : "kernel",
1842 (env
->sregs
[SR_MSR
] & MSR_UMS
) ? "user" : "kernel",
1843 (env
->sregs
[SR_MSR
] & MSR_EIP
),
1844 (env
->sregs
[SR_MSR
] & MSR_IE
));
1846 for (i
= 0; i
< 32; i
++) {
1847 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1848 if ((i
+ 1) % 4 == 0)
1849 cpu_fprintf(f
, "\n");
1851 cpu_fprintf(f
, "\n\n");
1854 void mb_tcg_init(void)
1858 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1859 tcg_ctx
.tcg_env
= cpu_env
;
1861 env_debug
= tcg_global_mem_new(cpu_env
,
1862 offsetof(CPUMBState
, debug
),
1864 env_iflags
= tcg_global_mem_new(cpu_env
,
1865 offsetof(CPUMBState
, iflags
),
1867 env_imm
= tcg_global_mem_new(cpu_env
,
1868 offsetof(CPUMBState
, imm
),
1870 env_btarget
= tcg_global_mem_new(cpu_env
,
1871 offsetof(CPUMBState
, btarget
),
1873 env_btaken
= tcg_global_mem_new(cpu_env
,
1874 offsetof(CPUMBState
, btaken
),
1876 env_res_addr
= tcg_global_mem_new(cpu_env
,
1877 offsetof(CPUMBState
, res_addr
),
1879 env_res_val
= tcg_global_mem_new(cpu_env
,
1880 offsetof(CPUMBState
, res_val
),
1882 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1883 cpu_R
[i
] = tcg_global_mem_new(cpu_env
,
1884 offsetof(CPUMBState
, regs
[i
]),
1887 for (i
= 0; i
< ARRAY_SIZE(cpu_SR
); i
++) {
1888 cpu_SR
[i
] = tcg_global_mem_new(cpu_env
,
1889 offsetof(CPUMBState
, sregs
[i
]),
1890 special_regnames
[i
]);
1894 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
,
1897 env
->sregs
[SR_PC
] = data
[0];