2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/helper-proto.h"
25 #include "microblaze-decode.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-gen.h"
32 #if DISAS_MB && !SIM_COMPAT
33 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 # define LOG_DIS(...) do { } while (0)
40 #define EXTRACT_FIELD(src, start, end) \
41 (((src) >> start) & ((1 << (end - start + 1)) - 1))
43 static TCGv env_debug
;
44 static TCGv_ptr cpu_env
;
45 static TCGv cpu_R
[32];
46 static TCGv cpu_SR
[18];
48 static TCGv env_btaken
;
49 static TCGv env_btarget
;
50 static TCGv env_iflags
;
51 static TCGv env_res_addr
;
52 static TCGv env_res_val
;
54 #include "exec/gen-icount.h"
56 /* This is the state at translation time. */
57 typedef struct DisasContext
{
68 unsigned int cpustate_changed
;
69 unsigned int delayed_branch
;
70 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
71 unsigned int clear_imm
;
76 #define JMP_DIRECT_CC 2
77 #define JMP_INDIRECT 3
81 int abort_at_next_insn
;
83 struct TranslationBlock
*tb
;
84 int singlestep_enabled
;
87 static const char *regnames
[] =
89 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
90 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
91 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
92 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
95 static const char *special_regnames
[] =
97 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
98 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
99 "sr16", "sr17", "sr18"
102 static inline void t_sync_flags(DisasContext
*dc
)
104 /* Synch the tb dependent flags between translator and runtime. */
105 if (dc
->tb_flags
!= dc
->synced_flags
) {
106 tcg_gen_movi_tl(env_iflags
, dc
->tb_flags
);
107 dc
->synced_flags
= dc
->tb_flags
;
111 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
113 TCGv_i32 tmp
= tcg_const_i32(index
);
116 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
117 gen_helper_raise_exception(cpu_env
, tmp
);
118 tcg_temp_free_i32(tmp
);
119 dc
->is_jmp
= DISAS_UPDATE
;
122 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
124 TranslationBlock
*tb
;
126 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
128 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
129 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
131 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
136 static void read_carry(DisasContext
*dc
, TCGv d
)
138 tcg_gen_shri_tl(d
, cpu_SR
[SR_MSR
], 31);
142 * write_carry sets the carry bits in MSR based on bit 0 of v.
143 * v[31:1] are ignored.
145 static void write_carry(DisasContext
*dc
, TCGv v
)
147 TCGv t0
= tcg_temp_new();
148 tcg_gen_shli_tl(t0
, v
, 31);
149 tcg_gen_sari_tl(t0
, t0
, 31);
150 tcg_gen_andi_tl(t0
, t0
, (MSR_C
| MSR_CC
));
151 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
],
153 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
);
157 static void write_carryi(DisasContext
*dc
, bool carry
)
159 TCGv t0
= tcg_temp_new();
160 tcg_gen_movi_tl(t0
, carry
);
165 /* True if ALU operand b is a small immediate that may deserve
167 static inline int dec_alu_op_b_is_small_imm(DisasContext
*dc
)
169 /* Immediate insn without the imm prefix ? */
170 return dc
->type_b
&& !(dc
->tb_flags
& IMM_FLAG
);
173 static inline TCGv
*dec_alu_op_b(DisasContext
*dc
)
176 if (dc
->tb_flags
& IMM_FLAG
)
177 tcg_gen_ori_tl(env_imm
, env_imm
, dc
->imm
);
179 tcg_gen_movi_tl(env_imm
, (int32_t)((int16_t)dc
->imm
));
182 return &cpu_R
[dc
->rb
];
185 static void dec_add(DisasContext
*dc
)
193 LOG_DIS("add%s%s%s r%d r%d r%d\n",
194 dc
->type_b
? "i" : "", k
? "k" : "", c
? "c" : "",
195 dc
->rd
, dc
->ra
, dc
->rb
);
197 /* Take care of the easy cases first. */
199 /* k - keep carry, no need to update MSR. */
200 /* If rd == r0, it's a nop. */
202 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
205 /* c - Add carry into the result. */
209 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
216 /* From now on, we can assume k is zero. So we need to update MSR. */
222 tcg_gen_movi_tl(cf
, 0);
226 TCGv ncf
= tcg_temp_new();
227 gen_helper_carry(ncf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
228 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
229 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
230 write_carry(dc
, ncf
);
233 gen_helper_carry(cf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
239 static void dec_sub(DisasContext
*dc
)
241 unsigned int u
, cmp
, k
, c
;
247 cmp
= (dc
->imm
& 1) && (!dc
->type_b
) && k
;
250 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u
? "u" : "", dc
->rd
, dc
->ra
, dc
->ir
);
253 gen_helper_cmpu(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
255 gen_helper_cmp(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
260 LOG_DIS("sub%s%s r%d, r%d r%d\n",
261 k
? "k" : "", c
? "c" : "", dc
->rd
, dc
->ra
, dc
->rb
);
263 /* Take care of the easy cases first. */
265 /* k - keep carry, no need to update MSR. */
266 /* If rd == r0, it's a nop. */
268 tcg_gen_sub_tl(cpu_R
[dc
->rd
], *(dec_alu_op_b(dc
)), cpu_R
[dc
->ra
]);
271 /* c - Add carry into the result. */
275 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
282 /* From now on, we can assume k is zero. So we need to update MSR. */
283 /* Extract carry. And complement a into na. */
289 tcg_gen_movi_tl(cf
, 1);
292 /* d = b + ~a + c. carry defaults to 1. */
293 tcg_gen_not_tl(na
, cpu_R
[dc
->ra
]);
296 TCGv ncf
= tcg_temp_new();
297 gen_helper_carry(ncf
, na
, *(dec_alu_op_b(dc
)), cf
);
298 tcg_gen_add_tl(cpu_R
[dc
->rd
], na
, *(dec_alu_op_b(dc
)));
299 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
300 write_carry(dc
, ncf
);
303 gen_helper_carry(cf
, na
, *(dec_alu_op_b(dc
)), cf
);
310 static void dec_pattern(DisasContext
*dc
)
315 if ((dc
->tb_flags
& MSR_EE_FLAG
)
316 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
317 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
318 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
319 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
322 mode
= dc
->opcode
& 3;
326 LOG_DIS("pcmpbf r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
328 gen_helper_pcmpbf(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
331 LOG_DIS("pcmpeq r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
333 TCGv t0
= tcg_temp_local_new();
334 l1
= gen_new_label();
335 tcg_gen_movi_tl(t0
, 1);
336 tcg_gen_brcond_tl(TCG_COND_EQ
,
337 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
], l1
);
338 tcg_gen_movi_tl(t0
, 0);
340 tcg_gen_mov_tl(cpu_R
[dc
->rd
], t0
);
345 LOG_DIS("pcmpne r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
346 l1
= gen_new_label();
348 TCGv t0
= tcg_temp_local_new();
349 tcg_gen_movi_tl(t0
, 1);
350 tcg_gen_brcond_tl(TCG_COND_NE
,
351 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
], l1
);
352 tcg_gen_movi_tl(t0
, 0);
354 tcg_gen_mov_tl(cpu_R
[dc
->rd
], t0
);
359 cpu_abort(CPU(dc
->cpu
),
360 "unsupported pattern insn opcode=%x\n", dc
->opcode
);
365 static void dec_and(DisasContext
*dc
)
369 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
374 not = dc
->opcode
& (1 << 1);
375 LOG_DIS("and%s\n", not ? "n" : "");
381 tcg_gen_andc_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
383 tcg_gen_and_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
386 static void dec_or(DisasContext
*dc
)
388 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
393 LOG_DIS("or r%d r%d r%d imm=%x\n", dc
->rd
, dc
->ra
, dc
->rb
, dc
->imm
);
395 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
398 static void dec_xor(DisasContext
*dc
)
400 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
405 LOG_DIS("xor r%d\n", dc
->rd
);
407 tcg_gen_xor_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
410 static inline void msr_read(DisasContext
*dc
, TCGv d
)
412 tcg_gen_mov_tl(d
, cpu_SR
[SR_MSR
]);
415 static inline void msr_write(DisasContext
*dc
, TCGv v
)
420 dc
->cpustate_changed
= 1;
421 /* PVR bit is not writable. */
422 tcg_gen_andi_tl(t
, v
, ~MSR_PVR
);
423 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], MSR_PVR
);
424 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], v
);
428 static void dec_msr(DisasContext
*dc
)
430 CPUState
*cs
= CPU(dc
->cpu
);
432 unsigned int sr
, to
, rn
;
433 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
435 sr
= dc
->imm
& ((1 << 14) - 1);
436 to
= dc
->imm
& (1 << 14);
439 dc
->cpustate_changed
= 1;
441 /* msrclr and msrset. */
442 if (!(dc
->imm
& (1 << 15))) {
443 unsigned int clr
= dc
->ir
& (1 << 16);
445 LOG_DIS("msr%s r%d imm=%x\n", clr
? "clr" : "set",
448 if (!(dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MSR_INSTR
)) {
453 if ((dc
->tb_flags
& MSR_EE_FLAG
)
454 && mem_index
== MMU_USER_IDX
&& (dc
->imm
!= 4 && dc
->imm
!= 0)) {
455 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
456 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
461 msr_read(dc
, cpu_R
[dc
->rd
]);
466 tcg_gen_mov_tl(t1
, *(dec_alu_op_b(dc
)));
469 tcg_gen_not_tl(t1
, t1
);
470 tcg_gen_and_tl(t0
, t0
, t1
);
472 tcg_gen_or_tl(t0
, t0
, t1
);
476 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
477 dc
->is_jmp
= DISAS_UPDATE
;
482 if ((dc
->tb_flags
& MSR_EE_FLAG
)
483 && mem_index
== MMU_USER_IDX
) {
484 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
485 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
490 #if !defined(CONFIG_USER_ONLY)
491 /* Catch read/writes to the mmu block. */
492 if ((sr
& ~0xff) == 0x1000) {
494 LOG_DIS("m%ss sr%d r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
496 gen_helper_mmu_write(cpu_env
, tcg_const_tl(sr
), cpu_R
[dc
->ra
]);
498 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tcg_const_tl(sr
));
504 LOG_DIS("m%ss sr%x r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
509 msr_write(dc
, cpu_R
[dc
->ra
]);
512 tcg_gen_mov_tl(cpu_SR
[SR_EAR
], cpu_R
[dc
->ra
]);
515 tcg_gen_mov_tl(cpu_SR
[SR_ESR
], cpu_R
[dc
->ra
]);
518 tcg_gen_andi_tl(cpu_SR
[SR_FSR
], cpu_R
[dc
->ra
], 31);
521 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, slr
));
524 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, shr
));
527 cpu_abort(CPU(dc
->cpu
), "unknown mts reg %x\n", sr
);
531 LOG_DIS("m%ss r%d sr%x imm=%x\n", to
? "t" : "f", dc
->rd
, sr
, dc
->imm
);
535 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
538 msr_read(dc
, cpu_R
[dc
->rd
]);
541 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_EAR
]);
544 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_ESR
]);
547 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_FSR
]);
550 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_BTR
]);
553 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, slr
));
556 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, shr
));
572 tcg_gen_ld_tl(cpu_R
[dc
->rd
],
573 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
576 cpu_abort(cs
, "unknown mfs reg %x\n", sr
);
582 tcg_gen_movi_tl(cpu_R
[0], 0);
586 /* 64-bit signed mul, lower result in d and upper in d2. */
587 static void t_gen_muls(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
591 t0
= tcg_temp_new_i64();
592 t1
= tcg_temp_new_i64();
594 tcg_gen_ext_i32_i64(t0
, a
);
595 tcg_gen_ext_i32_i64(t1
, b
);
596 tcg_gen_mul_i64(t0
, t0
, t1
);
598 tcg_gen_trunc_i64_i32(d
, t0
);
599 tcg_gen_shri_i64(t0
, t0
, 32);
600 tcg_gen_trunc_i64_i32(d2
, t0
);
602 tcg_temp_free_i64(t0
);
603 tcg_temp_free_i64(t1
);
606 /* 64-bit unsigned muls, lower result in d and upper in d2. */
607 static void t_gen_mulu(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
611 t0
= tcg_temp_new_i64();
612 t1
= tcg_temp_new_i64();
614 tcg_gen_extu_i32_i64(t0
, a
);
615 tcg_gen_extu_i32_i64(t1
, b
);
616 tcg_gen_mul_i64(t0
, t0
, t1
);
618 tcg_gen_trunc_i64_i32(d
, t0
);
619 tcg_gen_shri_i64(t0
, t0
, 32);
620 tcg_gen_trunc_i64_i32(d2
, t0
);
622 tcg_temp_free_i64(t0
);
623 tcg_temp_free_i64(t1
);
626 /* Multiplier unit. */
627 static void dec_mul(DisasContext
*dc
)
630 unsigned int subcode
;
632 if ((dc
->tb_flags
& MSR_EE_FLAG
)
633 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
634 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_HW_MUL_MASK
)) {
635 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
636 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
640 subcode
= dc
->imm
& 3;
641 d
[0] = tcg_temp_new();
642 d
[1] = tcg_temp_new();
645 LOG_DIS("muli r%d r%d %x\n", dc
->rd
, dc
->ra
, dc
->imm
);
646 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
650 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
651 if (subcode
>= 1 && subcode
<= 3
652 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MUL64_MASK
))) {
658 LOG_DIS("mul r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
659 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
662 LOG_DIS("mulh r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
663 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
666 LOG_DIS("mulhsu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
667 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
670 LOG_DIS("mulhu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
671 t_gen_mulu(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
674 cpu_abort(CPU(dc
->cpu
), "unknown MUL insn %x\n", subcode
);
683 static void dec_div(DisasContext
*dc
)
690 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
691 && !((dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_DIV_MASK
))) {
692 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
693 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
697 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
700 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
703 tcg_gen_movi_tl(cpu_R
[dc
->rd
], 0);
706 static void dec_barrel(DisasContext
*dc
)
711 if ((dc
->tb_flags
& MSR_EE_FLAG
)
712 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
713 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_BARREL_MASK
)) {
714 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
715 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
719 s
= dc
->imm
& (1 << 10);
720 t
= dc
->imm
& (1 << 9);
722 LOG_DIS("bs%s%s r%d r%d r%d\n",
723 s
? "l" : "r", t
? "a" : "l", dc
->rd
, dc
->ra
, dc
->rb
);
727 tcg_gen_mov_tl(t0
, *(dec_alu_op_b(dc
)));
728 tcg_gen_andi_tl(t0
, t0
, 31);
731 tcg_gen_shl_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
734 tcg_gen_sar_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
736 tcg_gen_shr_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
740 static void dec_bit(DisasContext
*dc
)
742 CPUState
*cs
= CPU(dc
->cpu
);
745 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
747 op
= dc
->ir
& ((1 << 9) - 1);
753 LOG_DIS("src r%d r%d\n", dc
->rd
, dc
->ra
);
754 tcg_gen_andi_tl(t0
, cpu_SR
[SR_MSR
], MSR_CC
);
755 write_carry(dc
, cpu_R
[dc
->ra
]);
757 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
758 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
766 LOG_DIS("srl r%d r%d\n", dc
->rd
, dc
->ra
);
768 /* Update carry. Note that write carry only looks at the LSB. */
769 write_carry(dc
, cpu_R
[dc
->ra
]);
772 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
774 tcg_gen_sari_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
778 LOG_DIS("ext8s r%d r%d\n", dc
->rd
, dc
->ra
);
779 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
782 LOG_DIS("ext16s r%d r%d\n", dc
->rd
, dc
->ra
);
783 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
790 LOG_DIS("wdc r%d\n", dc
->ra
);
791 if ((dc
->tb_flags
& MSR_EE_FLAG
)
792 && mem_index
== MMU_USER_IDX
) {
793 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
794 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
800 LOG_DIS("wic r%d\n", dc
->ra
);
801 if ((dc
->tb_flags
& MSR_EE_FLAG
)
802 && mem_index
== MMU_USER_IDX
) {
803 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
804 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
809 if ((dc
->tb_flags
& MSR_EE_FLAG
)
810 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
811 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
812 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
813 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
815 if (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
) {
816 gen_helper_clz(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
821 LOG_DIS("swapb r%d r%d\n", dc
->rd
, dc
->ra
);
822 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
826 LOG_DIS("swaph r%d r%d\n", dc
->rd
, dc
->ra
);
827 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
830 cpu_abort(cs
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
831 dc
->pc
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
836 static inline void sync_jmpstate(DisasContext
*dc
)
838 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
839 if (dc
->jmp
== JMP_DIRECT
) {
840 tcg_gen_movi_tl(env_btaken
, 1);
842 dc
->jmp
= JMP_INDIRECT
;
843 tcg_gen_movi_tl(env_btarget
, dc
->jmp_pc
);
847 static void dec_imm(DisasContext
*dc
)
849 LOG_DIS("imm %x\n", dc
->imm
<< 16);
850 tcg_gen_movi_tl(env_imm
, (dc
->imm
<< 16));
851 dc
->tb_flags
|= IMM_FLAG
;
855 static inline TCGv
*compute_ldst_addr(DisasContext
*dc
, TCGv
*t
)
857 unsigned int extimm
= dc
->tb_flags
& IMM_FLAG
;
858 /* Should be set to one if r1 is used by loadstores. */
861 /* All load/stores use ra. */
866 /* Treat the common cases first. */
868 /* If any of the regs is r0, return a ptr to the other. */
870 return &cpu_R
[dc
->rb
];
871 } else if (dc
->rb
== 0) {
872 return &cpu_R
[dc
->ra
];
880 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
883 gen_helper_stackprot(cpu_env
, *t
);
890 return &cpu_R
[dc
->ra
];
893 tcg_gen_movi_tl(*t
, (int32_t)((int16_t)dc
->imm
));
894 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *t
);
897 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
901 gen_helper_stackprot(cpu_env
, *t
);
906 static void dec_load(DisasContext
*dc
)
909 unsigned int size
, rev
= 0, ex
= 0;
912 mop
= dc
->opcode
& 3;
915 rev
= (dc
->ir
>> 9) & 1;
916 ex
= (dc
->ir
>> 10) & 1;
923 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
924 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
925 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
926 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
930 LOG_DIS("l%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
934 addr
= compute_ldst_addr(dc
, &t
);
937 * When doing reverse accesses we need to do two things.
939 * 1. Reverse the address wrt endianness.
940 * 2. Byteswap the data lanes on the way back into the CPU core.
942 if (rev
&& size
!= 4) {
943 /* Endian reverse the address. t is addr. */
951 TCGv low
= tcg_temp_new();
953 /* Force addr into the temp. */
956 tcg_gen_mov_tl(t
, *addr
);
960 tcg_gen_andi_tl(low
, t
, 3);
961 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
962 tcg_gen_andi_tl(t
, t
, ~3);
963 tcg_gen_or_tl(t
, t
, low
);
964 tcg_gen_mov_tl(env_imm
, t
);
972 /* Force addr into the temp. */
975 tcg_gen_xori_tl(t
, *addr
, 2);
978 tcg_gen_xori_tl(t
, t
, 2);
982 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
987 /* lwx does not throw unaligned access errors, so force alignment */
989 /* Force addr into the temp. */
992 tcg_gen_mov_tl(t
, *addr
);
995 tcg_gen_andi_tl(t
, t
, ~3);
998 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1001 /* Verify alignment if needed. */
1003 * Microblaze gives MMU faults priority over faults due to
1004 * unaligned addresses. That's why we speculatively do the load
1005 * into v. If the load succeeds, we verify alignment of the
1006 * address and if that succeeds we write into the destination reg.
1009 tcg_gen_qemu_ld_tl(v
, *addr
, cpu_mmu_index(&dc
->cpu
->env
), mop
);
1011 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1012 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1013 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1014 tcg_const_tl(0), tcg_const_tl(size
- 1));
1018 tcg_gen_mov_tl(env_res_addr
, *addr
);
1019 tcg_gen_mov_tl(env_res_val
, v
);
1022 tcg_gen_mov_tl(cpu_R
[dc
->rd
], v
);
1027 /* no support for for AXI exclusive so always clear C */
1028 write_carryi(dc
, 0);
1035 static void dec_store(DisasContext
*dc
)
1037 TCGv t
, *addr
, swx_addr
;
1039 unsigned int size
, rev
= 0, ex
= 0;
1042 mop
= dc
->opcode
& 3;
1045 rev
= (dc
->ir
>> 9) & 1;
1046 ex
= (dc
->ir
>> 10) & 1;
1053 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
1054 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1055 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1056 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1060 LOG_DIS("s%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
1063 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1065 addr
= compute_ldst_addr(dc
, &t
);
1067 swx_addr
= tcg_temp_local_new();
1071 /* Force addr into the swx_addr. */
1072 tcg_gen_mov_tl(swx_addr
, *addr
);
1074 /* swx does not throw unaligned access errors, so force alignment */
1075 tcg_gen_andi_tl(swx_addr
, swx_addr
, ~3);
1077 write_carryi(dc
, 1);
1078 swx_skip
= gen_new_label();
1079 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_addr
, swx_addr
, swx_skip
);
1081 /* Compare the value loaded at lwx with current contents of
1082 the reserved location.
1083 FIXME: This only works for system emulation where we can expect
1084 this compare and the following write to be atomic. For user
1085 emulation we need to add atomicity between threads. */
1086 tval
= tcg_temp_new();
1087 tcg_gen_qemu_ld_tl(tval
, swx_addr
, cpu_mmu_index(&dc
->cpu
->env
),
1089 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_val
, tval
, swx_skip
);
1090 write_carryi(dc
, 0);
1091 tcg_temp_free(tval
);
1094 if (rev
&& size
!= 4) {
1095 /* Endian reverse the address. t is addr. */
1103 TCGv low
= tcg_temp_new();
1105 /* Force addr into the temp. */
1108 tcg_gen_mov_tl(t
, *addr
);
1112 tcg_gen_andi_tl(low
, t
, 3);
1113 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
1114 tcg_gen_andi_tl(t
, t
, ~3);
1115 tcg_gen_or_tl(t
, t
, low
);
1116 tcg_gen_mov_tl(env_imm
, t
);
1124 /* Force addr into the temp. */
1127 tcg_gen_xori_tl(t
, *addr
, 2);
1130 tcg_gen_xori_tl(t
, t
, 2);
1134 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1138 tcg_gen_qemu_st_tl(cpu_R
[dc
->rd
], *addr
, cpu_mmu_index(&dc
->cpu
->env
), mop
);
1140 /* Verify alignment if needed. */
1141 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1142 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1143 /* FIXME: if the alignment is wrong, we should restore the value
1144 * in memory. One possible way to achieve this is to probe
1145 * the MMU prior to the memaccess, thay way we could put
1146 * the alignment checks in between the probe and the mem
1149 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1150 tcg_const_tl(1), tcg_const_tl(size
- 1));
1154 gen_set_label(swx_skip
);
1156 tcg_temp_free(swx_addr
);
1162 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1163 TCGv d
, TCGv a
, TCGv b
)
1167 tcg_gen_setcond_tl(TCG_COND_EQ
, d
, a
, b
);
1170 tcg_gen_setcond_tl(TCG_COND_NE
, d
, a
, b
);
1173 tcg_gen_setcond_tl(TCG_COND_LT
, d
, a
, b
);
1176 tcg_gen_setcond_tl(TCG_COND_LE
, d
, a
, b
);
1179 tcg_gen_setcond_tl(TCG_COND_GE
, d
, a
, b
);
1182 tcg_gen_setcond_tl(TCG_COND_GT
, d
, a
, b
);
1185 cpu_abort(CPU(dc
->cpu
), "Unknown condition code %x.\n", cc
);
1190 static void eval_cond_jmp(DisasContext
*dc
, TCGv pc_true
, TCGv pc_false
)
1194 l1
= gen_new_label();
1195 /* Conditional jmp. */
1196 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_false
);
1197 tcg_gen_brcondi_tl(TCG_COND_EQ
, env_btaken
, 0, l1
);
1198 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_true
);
1202 static void dec_bcc(DisasContext
*dc
)
1207 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1208 dslot
= dc
->ir
& (1 << 25);
1209 LOG_DIS("bcc%s r%d %x\n", dslot
? "d" : "", dc
->ra
, dc
->imm
);
1211 dc
->delayed_branch
= 1;
1213 dc
->delayed_branch
= 2;
1214 dc
->tb_flags
|= D_FLAG
;
1215 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1216 cpu_env
, offsetof(CPUMBState
, bimm
));
1219 if (dec_alu_op_b_is_small_imm(dc
)) {
1220 int32_t offset
= (int32_t)((int16_t)dc
->imm
); /* sign-extend. */
1222 tcg_gen_movi_tl(env_btarget
, dc
->pc
+ offset
);
1223 dc
->jmp
= JMP_DIRECT_CC
;
1224 dc
->jmp_pc
= dc
->pc
+ offset
;
1226 dc
->jmp
= JMP_INDIRECT
;
1227 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1228 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1230 eval_cc(dc
, cc
, env_btaken
, cpu_R
[dc
->ra
], tcg_const_tl(0));
1233 static void dec_br(DisasContext
*dc
)
1235 unsigned int dslot
, link
, abs
, mbar
;
1236 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
1238 dslot
= dc
->ir
& (1 << 20);
1239 abs
= dc
->ir
& (1 << 19);
1240 link
= dc
->ir
& (1 << 18);
1242 /* Memory barrier. */
1243 mbar
= (dc
->ir
>> 16) & 31;
1244 if (mbar
== 2 && dc
->imm
== 4) {
1245 /* mbar IMM & 16 decodes to sleep. */
1247 TCGv_i32 tmp_hlt
= tcg_const_i32(EXCP_HLT
);
1248 TCGv_i32 tmp_1
= tcg_const_i32(1);
1253 tcg_gen_st_i32(tmp_1
, cpu_env
,
1254 -offsetof(MicroBlazeCPU
, env
)
1255 +offsetof(CPUState
, halted
));
1256 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
1257 gen_helper_raise_exception(cpu_env
, tmp_hlt
);
1258 tcg_temp_free_i32(tmp_hlt
);
1259 tcg_temp_free_i32(tmp_1
);
1262 LOG_DIS("mbar %d\n", dc
->rd
);
1264 dc
->cpustate_changed
= 1;
1268 LOG_DIS("br%s%s%s%s imm=%x\n",
1269 abs
? "a" : "", link
? "l" : "",
1270 dc
->type_b
? "i" : "", dslot
? "d" : "",
1273 dc
->delayed_branch
= 1;
1275 dc
->delayed_branch
= 2;
1276 dc
->tb_flags
|= D_FLAG
;
1277 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1278 cpu_env
, offsetof(CPUMBState
, bimm
));
1281 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
1283 dc
->jmp
= JMP_INDIRECT
;
1285 tcg_gen_movi_tl(env_btaken
, 1);
1286 tcg_gen_mov_tl(env_btarget
, *(dec_alu_op_b(dc
)));
1287 if (link
&& !dslot
) {
1288 if (!(dc
->tb_flags
& IMM_FLAG
) && (dc
->imm
== 8 || dc
->imm
== 0x18))
1289 t_gen_raise_exception(dc
, EXCP_BREAK
);
1291 if ((dc
->tb_flags
& MSR_EE_FLAG
) && mem_index
== MMU_USER_IDX
) {
1292 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1293 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1297 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1301 if (dec_alu_op_b_is_small_imm(dc
)) {
1302 dc
->jmp
= JMP_DIRECT
;
1303 dc
->jmp_pc
= dc
->pc
+ (int32_t)((int16_t)dc
->imm
);
1305 tcg_gen_movi_tl(env_btaken
, 1);
1306 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1307 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1312 static inline void do_rti(DisasContext
*dc
)
1315 t0
= tcg_temp_new();
1316 t1
= tcg_temp_new();
1317 tcg_gen_shri_tl(t0
, cpu_SR
[SR_MSR
], 1);
1318 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_IE
);
1319 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1321 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1322 tcg_gen_or_tl(t1
, t1
, t0
);
1326 dc
->tb_flags
&= ~DRTI_FLAG
;
1329 static inline void do_rtb(DisasContext
*dc
)
1332 t0
= tcg_temp_new();
1333 t1
= tcg_temp_new();
1334 tcg_gen_andi_tl(t1
, cpu_SR
[SR_MSR
], ~MSR_BIP
);
1335 tcg_gen_shri_tl(t0
, t1
, 1);
1336 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1338 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1339 tcg_gen_or_tl(t1
, t1
, t0
);
1343 dc
->tb_flags
&= ~DRTB_FLAG
;
1346 static inline void do_rte(DisasContext
*dc
)
1349 t0
= tcg_temp_new();
1350 t1
= tcg_temp_new();
1352 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_EE
);
1353 tcg_gen_andi_tl(t1
, t1
, ~MSR_EIP
);
1354 tcg_gen_shri_tl(t0
, t1
, 1);
1355 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1357 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1358 tcg_gen_or_tl(t1
, t1
, t0
);
1362 dc
->tb_flags
&= ~DRTE_FLAG
;
1365 static void dec_rts(DisasContext
*dc
)
1367 unsigned int b_bit
, i_bit
, e_bit
;
1368 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
1370 i_bit
= dc
->ir
& (1 << 21);
1371 b_bit
= dc
->ir
& (1 << 22);
1372 e_bit
= dc
->ir
& (1 << 23);
1374 dc
->delayed_branch
= 2;
1375 dc
->tb_flags
|= D_FLAG
;
1376 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1377 cpu_env
, offsetof(CPUMBState
, bimm
));
1380 LOG_DIS("rtid ir=%x\n", dc
->ir
);
1381 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1382 && mem_index
== MMU_USER_IDX
) {
1383 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1384 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1386 dc
->tb_flags
|= DRTI_FLAG
;
1388 LOG_DIS("rtbd ir=%x\n", dc
->ir
);
1389 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1390 && mem_index
== MMU_USER_IDX
) {
1391 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1392 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1394 dc
->tb_flags
|= DRTB_FLAG
;
1396 LOG_DIS("rted ir=%x\n", dc
->ir
);
1397 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1398 && mem_index
== MMU_USER_IDX
) {
1399 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1400 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1402 dc
->tb_flags
|= DRTE_FLAG
;
1404 LOG_DIS("rts ir=%x\n", dc
->ir
);
1406 dc
->jmp
= JMP_INDIRECT
;
1407 tcg_gen_movi_tl(env_btaken
, 1);
1408 tcg_gen_add_tl(env_btarget
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
1411 static int dec_check_fpuv2(DisasContext
*dc
)
1415 r
= dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_FPU2_MASK
;
1417 if (!r
&& (dc
->tb_flags
& MSR_EE_FLAG
)) {
1418 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_FPU
);
1419 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1424 static void dec_fpu(DisasContext
*dc
)
1426 unsigned int fpu_insn
;
1428 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1429 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1430 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_FPU_MASK
))) {
1431 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1432 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1436 fpu_insn
= (dc
->ir
>> 7) & 7;
1440 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1445 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1450 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1455 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1460 switch ((dc
->ir
>> 4) & 7) {
1462 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1463 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1466 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1467 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1470 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1471 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1474 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1475 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1478 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1479 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1482 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1483 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1486 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1487 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1490 qemu_log_mask(LOG_UNIMP
,
1491 "unimplemented fcmp fpu_insn=%x pc=%x"
1493 fpu_insn
, dc
->pc
, dc
->opcode
);
1494 dc
->abort_at_next_insn
= 1;
1500 if (!dec_check_fpuv2(dc
)) {
1503 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1507 if (!dec_check_fpuv2(dc
)) {
1510 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1514 if (!dec_check_fpuv2(dc
)) {
1517 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1521 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1523 fpu_insn
, dc
->pc
, dc
->opcode
);
1524 dc
->abort_at_next_insn
= 1;
1529 static void dec_null(DisasContext
*dc
)
1531 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1532 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1533 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1534 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1537 qemu_log ("unknown insn pc=%x opc=%x\n", dc
->pc
, dc
->opcode
);
1538 dc
->abort_at_next_insn
= 1;
1541 /* Insns connected to FSL or AXI stream attached devices. */
1542 static void dec_stream(DisasContext
*dc
)
1544 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
1545 TCGv_i32 t_id
, t_ctrl
;
1548 LOG_DIS("%s%s imm=%x\n", dc
->rd
? "get" : "put",
1549 dc
->type_b
? "" : "d", dc
->imm
);
1551 if ((dc
->tb_flags
& MSR_EE_FLAG
) && (mem_index
== MMU_USER_IDX
)) {
1552 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1553 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1557 t_id
= tcg_temp_new();
1559 tcg_gen_movi_tl(t_id
, dc
->imm
& 0xf);
1560 ctrl
= dc
->imm
>> 10;
1562 tcg_gen_andi_tl(t_id
, cpu_R
[dc
->rb
], 0xf);
1563 ctrl
= dc
->imm
>> 5;
1566 t_ctrl
= tcg_const_tl(ctrl
);
1569 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1571 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1573 tcg_temp_free(t_id
);
1574 tcg_temp_free(t_ctrl
);
1577 static struct decoder_info
{
1582 void (*dec
)(DisasContext
*dc
);
1590 {DEC_BARREL
, dec_barrel
},
1592 {DEC_ST
, dec_store
},
1601 {DEC_STREAM
, dec_stream
},
1605 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1609 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1610 tcg_gen_debug_insn_start(dc
->pc
);
1614 LOG_DIS("%8.8x\t", dc
->ir
);
1619 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1620 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1621 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_OPCODE_0x0_ILL_MASK
)) {
1622 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1623 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1627 LOG_DIS("nr_nops=%d\t", dc
->nr_nops
);
1629 if (dc
->nr_nops
> 4) {
1630 cpu_abort(CPU(dc
->cpu
), "fetching nop sequence\n");
1633 /* bit 2 seems to indicate insn type. */
1634 dc
->type_b
= ir
& (1 << 29);
1636 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1637 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1638 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1639 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1640 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1642 /* Large switch for all insns. */
1643 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1644 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1651 static void check_breakpoint(CPUMBState
*env
, DisasContext
*dc
)
1653 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
1656 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
1657 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
1658 if (bp
->pc
== dc
->pc
) {
1659 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1660 dc
->is_jmp
= DISAS_UPDATE
;
1666 /* generate intermediate code for basic block 'tb'. */
1668 gen_intermediate_code_internal(MicroBlazeCPU
*cpu
, TranslationBlock
*tb
,
1671 CPUState
*cs
= CPU(cpu
);
1672 CPUMBState
*env
= &cpu
->env
;
1673 uint16_t *gen_opc_end
;
1676 struct DisasContext ctx
;
1677 struct DisasContext
*dc
= &ctx
;
1678 uint32_t next_page_start
, org_flags
;
1686 org_flags
= dc
->synced_flags
= dc
->tb_flags
= tb
->flags
;
1688 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1690 dc
->is_jmp
= DISAS_NEXT
;
1692 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1693 if (dc
->delayed_branch
) {
1694 dc
->jmp
= JMP_INDIRECT
;
1697 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1698 dc
->cpustate_changed
= 0;
1699 dc
->abort_at_next_insn
= 0;
1703 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n", pc_start
);
1706 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1708 qemu_log("--------------\n");
1709 log_cpu_state(CPU(cpu
), 0);
1713 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1716 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1718 max_insns
= CF_COUNT_MASK
;
1724 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1725 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1729 check_breakpoint(env
, dc
);
1732 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1736 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
1738 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
1739 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
1740 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
1744 LOG_DIS("%8.8x:\t", dc
->pc
);
1746 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1750 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1752 dc
->tb_flags
&= ~IMM_FLAG
;
1756 if (dc
->delayed_branch
) {
1757 dc
->delayed_branch
--;
1758 if (!dc
->delayed_branch
) {
1759 if (dc
->tb_flags
& DRTI_FLAG
)
1761 if (dc
->tb_flags
& DRTB_FLAG
)
1763 if (dc
->tb_flags
& DRTE_FLAG
)
1765 /* Clear the delay slot flag. */
1766 dc
->tb_flags
&= ~D_FLAG
;
1767 /* If it is a direct jump, try direct chaining. */
1768 if (dc
->jmp
== JMP_INDIRECT
) {
1769 eval_cond_jmp(dc
, env_btarget
, tcg_const_tl(dc
->pc
));
1770 dc
->is_jmp
= DISAS_JUMP
;
1771 } else if (dc
->jmp
== JMP_DIRECT
) {
1773 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1774 dc
->is_jmp
= DISAS_TB_JUMP
;
1775 } else if (dc
->jmp
== JMP_DIRECT_CC
) {
1779 l1
= gen_new_label();
1780 /* Conditional jmp. */
1781 tcg_gen_brcondi_tl(TCG_COND_NE
, env_btaken
, 0, l1
);
1782 gen_goto_tb(dc
, 1, dc
->pc
);
1784 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1786 dc
->is_jmp
= DISAS_TB_JUMP
;
1791 if (cs
->singlestep_enabled
) {
1794 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
1795 && tcg_ctx
.gen_opc_ptr
< gen_opc_end
1797 && (dc
->pc
< next_page_start
)
1798 && num_insns
< max_insns
);
1801 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
1802 if (dc
->tb_flags
& D_FLAG
) {
1803 dc
->is_jmp
= DISAS_UPDATE
;
1804 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1810 if (tb
->cflags
& CF_LAST_IO
)
1812 /* Force an update if the per-tb cpu state has changed. */
1813 if (dc
->is_jmp
== DISAS_NEXT
1814 && (dc
->cpustate_changed
|| org_flags
!= dc
->tb_flags
)) {
1815 dc
->is_jmp
= DISAS_UPDATE
;
1816 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1820 if (unlikely(cs
->singlestep_enabled
)) {
1821 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
1823 if (dc
->is_jmp
!= DISAS_JUMP
) {
1824 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1826 gen_helper_raise_exception(cpu_env
, tmp
);
1827 tcg_temp_free_i32(tmp
);
1829 switch(dc
->is_jmp
) {
1831 gen_goto_tb(dc
, 1, npc
);
1836 /* indicate that the hash table must be used
1837 to find the next TB */
1841 /* nothing more to generate */
1845 gen_tb_end(tb
, num_insns
);
1846 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
1848 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1851 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
1853 tb
->size
= dc
->pc
- pc_start
;
1854 tb
->icount
= num_insns
;
1859 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1862 log_target_disas(env
, pc_start
, dc
->pc
- pc_start
, 0);
1864 qemu_log("\nisize=%d osize=%td\n",
1865 dc
->pc
- pc_start
, tcg_ctx
.gen_opc_ptr
-
1866 tcg_ctx
.gen_opc_buf
);
1870 assert(!dc
->abort_at_next_insn
);
1873 void gen_intermediate_code (CPUMBState
*env
, struct TranslationBlock
*tb
)
1875 gen_intermediate_code_internal(mb_env_get_cpu(env
), tb
, false);
1878 void gen_intermediate_code_pc (CPUMBState
*env
, struct TranslationBlock
*tb
)
1880 gen_intermediate_code_internal(mb_env_get_cpu(env
), tb
, true);
1883 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1886 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1887 CPUMBState
*env
= &cpu
->env
;
1893 cpu_fprintf(f
, "IN: PC=%x %s\n",
1894 env
->sregs
[SR_PC
], lookup_symbol(env
->sregs
[SR_PC
]));
1895 cpu_fprintf(f
, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1896 env
->sregs
[SR_MSR
], env
->sregs
[SR_ESR
], env
->sregs
[SR_EAR
],
1897 env
->debug
, env
->imm
, env
->iflags
, env
->sregs
[SR_FSR
]);
1898 cpu_fprintf(f
, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1899 env
->btaken
, env
->btarget
,
1900 (env
->sregs
[SR_MSR
] & MSR_UM
) ? "user" : "kernel",
1901 (env
->sregs
[SR_MSR
] & MSR_UMS
) ? "user" : "kernel",
1902 (env
->sregs
[SR_MSR
] & MSR_EIP
),
1903 (env
->sregs
[SR_MSR
] & MSR_IE
));
1905 for (i
= 0; i
< 32; i
++) {
1906 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1907 if ((i
+ 1) % 4 == 0)
1908 cpu_fprintf(f
, "\n");
1910 cpu_fprintf(f
, "\n\n");
1913 MicroBlazeCPU
*cpu_mb_init(const char *cpu_model
)
1917 cpu
= MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU
));
1919 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
1924 void mb_tcg_init(void)
1928 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1930 env_debug
= tcg_global_mem_new(TCG_AREG0
,
1931 offsetof(CPUMBState
, debug
),
1933 env_iflags
= tcg_global_mem_new(TCG_AREG0
,
1934 offsetof(CPUMBState
, iflags
),
1936 env_imm
= tcg_global_mem_new(TCG_AREG0
,
1937 offsetof(CPUMBState
, imm
),
1939 env_btarget
= tcg_global_mem_new(TCG_AREG0
,
1940 offsetof(CPUMBState
, btarget
),
1942 env_btaken
= tcg_global_mem_new(TCG_AREG0
,
1943 offsetof(CPUMBState
, btaken
),
1945 env_res_addr
= tcg_global_mem_new(TCG_AREG0
,
1946 offsetof(CPUMBState
, res_addr
),
1948 env_res_val
= tcg_global_mem_new(TCG_AREG0
,
1949 offsetof(CPUMBState
, res_val
),
1951 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1952 cpu_R
[i
] = tcg_global_mem_new(TCG_AREG0
,
1953 offsetof(CPUMBState
, regs
[i
]),
1956 for (i
= 0; i
< ARRAY_SIZE(cpu_SR
); i
++) {
1957 cpu_SR
[i
] = tcg_global_mem_new(TCG_AREG0
,
1958 offsetof(CPUMBState
, sregs
[i
]),
1959 special_regnames
[i
]);
1963 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
, int pc_pos
)
1965 env
->sregs
[SR_PC
] = tcg_ctx
.gen_opc_pc
[pc_pos
];