2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "microblaze-decode.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
37 #if DISAS_MB && !SIM_COMPAT
38 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 # define LOG_DIS(...) do { } while (0)
45 #define EXTRACT_FIELD(src, start, end) \
46 (((src) >> start) & ((1 << (end - start + 1)) - 1))
48 static TCGv env_debug
;
49 static TCGv_env cpu_env
;
50 static TCGv cpu_R
[32];
51 static TCGv cpu_SR
[18];
53 static TCGv env_btaken
;
54 static TCGv env_btarget
;
55 static TCGv env_iflags
;
56 static TCGv env_res_addr
;
57 static TCGv env_res_val
;
59 #include "exec/gen-icount.h"
61 /* This is the state at translation time. */
62 typedef struct DisasContext
{
73 unsigned int cpustate_changed
;
74 unsigned int delayed_branch
;
75 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
76 unsigned int clear_imm
;
81 #define JMP_DIRECT_CC 2
82 #define JMP_INDIRECT 3
86 int abort_at_next_insn
;
88 struct TranslationBlock
*tb
;
89 int singlestep_enabled
;
92 static const char *regnames
[] =
94 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
96 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
97 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
100 static const char *special_regnames
[] =
102 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
103 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
104 "sr16", "sr17", "sr18"
107 static inline void t_sync_flags(DisasContext
*dc
)
109 /* Synch the tb dependent flags between translator and runtime. */
110 if (dc
->tb_flags
!= dc
->synced_flags
) {
111 tcg_gen_movi_tl(env_iflags
, dc
->tb_flags
);
112 dc
->synced_flags
= dc
->tb_flags
;
116 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
118 TCGv_i32 tmp
= tcg_const_i32(index
);
121 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
122 gen_helper_raise_exception(cpu_env
, tmp
);
123 tcg_temp_free_i32(tmp
);
124 dc
->is_jmp
= DISAS_UPDATE
;
127 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
129 #ifndef CONFIG_USER_ONLY
130 return (dc
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
136 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
138 if (use_goto_tb(dc
, dest
)) {
140 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
141 tcg_gen_exit_tb((uintptr_t)dc
->tb
+ n
);
143 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
148 static void read_carry(DisasContext
*dc
, TCGv d
)
150 tcg_gen_shri_tl(d
, cpu_SR
[SR_MSR
], 31);
154 * write_carry sets the carry bits in MSR based on bit 0 of v.
155 * v[31:1] are ignored.
157 static void write_carry(DisasContext
*dc
, TCGv v
)
159 TCGv t0
= tcg_temp_new();
160 tcg_gen_shli_tl(t0
, v
, 31);
161 tcg_gen_sari_tl(t0
, t0
, 31);
162 tcg_gen_andi_tl(t0
, t0
, (MSR_C
| MSR_CC
));
163 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
],
165 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
);
169 static void write_carryi(DisasContext
*dc
, bool carry
)
171 TCGv t0
= tcg_temp_new();
172 tcg_gen_movi_tl(t0
, carry
);
177 /* True if ALU operand b is a small immediate that may deserve
179 static inline int dec_alu_op_b_is_small_imm(DisasContext
*dc
)
181 /* Immediate insn without the imm prefix ? */
182 return dc
->type_b
&& !(dc
->tb_flags
& IMM_FLAG
);
185 static inline TCGv
*dec_alu_op_b(DisasContext
*dc
)
188 if (dc
->tb_flags
& IMM_FLAG
)
189 tcg_gen_ori_tl(env_imm
, env_imm
, dc
->imm
);
191 tcg_gen_movi_tl(env_imm
, (int32_t)((int16_t)dc
->imm
));
194 return &cpu_R
[dc
->rb
];
197 static void dec_add(DisasContext
*dc
)
205 LOG_DIS("add%s%s%s r%d r%d r%d\n",
206 dc
->type_b
? "i" : "", k
? "k" : "", c
? "c" : "",
207 dc
->rd
, dc
->ra
, dc
->rb
);
209 /* Take care of the easy cases first. */
211 /* k - keep carry, no need to update MSR. */
212 /* If rd == r0, it's a nop. */
214 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
217 /* c - Add carry into the result. */
221 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
228 /* From now on, we can assume k is zero. So we need to update MSR. */
234 tcg_gen_movi_tl(cf
, 0);
238 TCGv ncf
= tcg_temp_new();
239 gen_helper_carry(ncf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
240 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
241 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
242 write_carry(dc
, ncf
);
245 gen_helper_carry(cf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
251 static void dec_sub(DisasContext
*dc
)
253 unsigned int u
, cmp
, k
, c
;
259 cmp
= (dc
->imm
& 1) && (!dc
->type_b
) && k
;
262 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u
? "u" : "", dc
->rd
, dc
->ra
, dc
->ir
);
265 gen_helper_cmpu(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
267 gen_helper_cmp(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
272 LOG_DIS("sub%s%s r%d, r%d r%d\n",
273 k
? "k" : "", c
? "c" : "", dc
->rd
, dc
->ra
, dc
->rb
);
275 /* Take care of the easy cases first. */
277 /* k - keep carry, no need to update MSR. */
278 /* If rd == r0, it's a nop. */
280 tcg_gen_sub_tl(cpu_R
[dc
->rd
], *(dec_alu_op_b(dc
)), cpu_R
[dc
->ra
]);
283 /* c - Add carry into the result. */
287 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
294 /* From now on, we can assume k is zero. So we need to update MSR. */
295 /* Extract carry. And complement a into na. */
301 tcg_gen_movi_tl(cf
, 1);
304 /* d = b + ~a + c. carry defaults to 1. */
305 tcg_gen_not_tl(na
, cpu_R
[dc
->ra
]);
308 TCGv ncf
= tcg_temp_new();
309 gen_helper_carry(ncf
, na
, *(dec_alu_op_b(dc
)), cf
);
310 tcg_gen_add_tl(cpu_R
[dc
->rd
], na
, *(dec_alu_op_b(dc
)));
311 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
312 write_carry(dc
, ncf
);
315 gen_helper_carry(cf
, na
, *(dec_alu_op_b(dc
)), cf
);
322 static void dec_pattern(DisasContext
*dc
)
326 if ((dc
->tb_flags
& MSR_EE_FLAG
)
327 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
328 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
329 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
330 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
333 mode
= dc
->opcode
& 3;
337 LOG_DIS("pcmpbf r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
339 gen_helper_pcmpbf(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
342 LOG_DIS("pcmpeq r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
344 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_R
[dc
->rd
],
345 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
349 LOG_DIS("pcmpne r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
351 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_R
[dc
->rd
],
352 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
356 cpu_abort(CPU(dc
->cpu
),
357 "unsupported pattern insn opcode=%x\n", dc
->opcode
);
362 static void dec_and(DisasContext
*dc
)
366 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
371 not = dc
->opcode
& (1 << 1);
372 LOG_DIS("and%s\n", not ? "n" : "");
378 tcg_gen_andc_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
380 tcg_gen_and_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
383 static void dec_or(DisasContext
*dc
)
385 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
390 LOG_DIS("or r%d r%d r%d imm=%x\n", dc
->rd
, dc
->ra
, dc
->rb
, dc
->imm
);
392 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
395 static void dec_xor(DisasContext
*dc
)
397 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
402 LOG_DIS("xor r%d\n", dc
->rd
);
404 tcg_gen_xor_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
407 static inline void msr_read(DisasContext
*dc
, TCGv d
)
409 tcg_gen_mov_tl(d
, cpu_SR
[SR_MSR
]);
412 static inline void msr_write(DisasContext
*dc
, TCGv v
)
417 dc
->cpustate_changed
= 1;
418 /* PVR bit is not writable. */
419 tcg_gen_andi_tl(t
, v
, ~MSR_PVR
);
420 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], MSR_PVR
);
421 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], v
);
425 static void dec_msr(DisasContext
*dc
)
427 CPUState
*cs
= CPU(dc
->cpu
);
429 unsigned int sr
, to
, rn
;
430 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
432 sr
= dc
->imm
& ((1 << 14) - 1);
433 to
= dc
->imm
& (1 << 14);
436 dc
->cpustate_changed
= 1;
438 /* msrclr and msrset. */
439 if (!(dc
->imm
& (1 << 15))) {
440 unsigned int clr
= dc
->ir
& (1 << 16);
442 LOG_DIS("msr%s r%d imm=%x\n", clr
? "clr" : "set",
445 if (!(dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MSR_INSTR
)) {
450 if ((dc
->tb_flags
& MSR_EE_FLAG
)
451 && mem_index
== MMU_USER_IDX
&& (dc
->imm
!= 4 && dc
->imm
!= 0)) {
452 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
453 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
458 msr_read(dc
, cpu_R
[dc
->rd
]);
463 tcg_gen_mov_tl(t1
, *(dec_alu_op_b(dc
)));
466 tcg_gen_not_tl(t1
, t1
);
467 tcg_gen_and_tl(t0
, t0
, t1
);
469 tcg_gen_or_tl(t0
, t0
, t1
);
473 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
474 dc
->is_jmp
= DISAS_UPDATE
;
479 if ((dc
->tb_flags
& MSR_EE_FLAG
)
480 && mem_index
== MMU_USER_IDX
) {
481 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
482 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
487 #if !defined(CONFIG_USER_ONLY)
488 /* Catch read/writes to the mmu block. */
489 if ((sr
& ~0xff) == 0x1000) {
491 LOG_DIS("m%ss sr%d r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
493 gen_helper_mmu_write(cpu_env
, tcg_const_tl(sr
), cpu_R
[dc
->ra
]);
495 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tcg_const_tl(sr
));
501 LOG_DIS("m%ss sr%x r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
506 msr_write(dc
, cpu_R
[dc
->ra
]);
509 tcg_gen_mov_tl(cpu_SR
[SR_EAR
], cpu_R
[dc
->ra
]);
512 tcg_gen_mov_tl(cpu_SR
[SR_ESR
], cpu_R
[dc
->ra
]);
515 tcg_gen_andi_tl(cpu_SR
[SR_FSR
], cpu_R
[dc
->ra
], 31);
518 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, slr
));
521 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, shr
));
524 cpu_abort(CPU(dc
->cpu
), "unknown mts reg %x\n", sr
);
528 LOG_DIS("m%ss r%d sr%x imm=%x\n", to
? "t" : "f", dc
->rd
, sr
, dc
->imm
);
532 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
535 msr_read(dc
, cpu_R
[dc
->rd
]);
538 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_EAR
]);
541 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_ESR
]);
544 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_FSR
]);
547 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_BTR
]);
550 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, slr
));
553 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, shr
));
569 tcg_gen_ld_tl(cpu_R
[dc
->rd
],
570 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
573 cpu_abort(cs
, "unknown mfs reg %x\n", sr
);
579 tcg_gen_movi_tl(cpu_R
[0], 0);
583 /* 64-bit signed mul, lower result in d and upper in d2. */
584 static void t_gen_muls(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
588 t0
= tcg_temp_new_i64();
589 t1
= tcg_temp_new_i64();
591 tcg_gen_ext_i32_i64(t0
, a
);
592 tcg_gen_ext_i32_i64(t1
, b
);
593 tcg_gen_mul_i64(t0
, t0
, t1
);
595 tcg_gen_extrl_i64_i32(d
, t0
);
596 tcg_gen_shri_i64(t0
, t0
, 32);
597 tcg_gen_extrl_i64_i32(d2
, t0
);
599 tcg_temp_free_i64(t0
);
600 tcg_temp_free_i64(t1
);
603 /* 64-bit unsigned muls, lower result in d and upper in d2. */
604 static void t_gen_mulu(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
608 t0
= tcg_temp_new_i64();
609 t1
= tcg_temp_new_i64();
611 tcg_gen_extu_i32_i64(t0
, a
);
612 tcg_gen_extu_i32_i64(t1
, b
);
613 tcg_gen_mul_i64(t0
, t0
, t1
);
615 tcg_gen_extrl_i64_i32(d
, t0
);
616 tcg_gen_shri_i64(t0
, t0
, 32);
617 tcg_gen_extrl_i64_i32(d2
, t0
);
619 tcg_temp_free_i64(t0
);
620 tcg_temp_free_i64(t1
);
623 /* Multiplier unit. */
624 static void dec_mul(DisasContext
*dc
)
627 unsigned int subcode
;
629 if ((dc
->tb_flags
& MSR_EE_FLAG
)
630 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
631 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_HW_MUL_MASK
)) {
632 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
633 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
637 subcode
= dc
->imm
& 3;
638 d
[0] = tcg_temp_new();
639 d
[1] = tcg_temp_new();
642 LOG_DIS("muli r%d r%d %x\n", dc
->rd
, dc
->ra
, dc
->imm
);
643 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
647 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
648 if (subcode
>= 1 && subcode
<= 3
649 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MUL64_MASK
))) {
655 LOG_DIS("mul r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
656 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
659 LOG_DIS("mulh r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
660 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
663 LOG_DIS("mulhsu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
664 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
667 LOG_DIS("mulhu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
668 t_gen_mulu(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
671 cpu_abort(CPU(dc
->cpu
), "unknown MUL insn %x\n", subcode
);
680 static void dec_div(DisasContext
*dc
)
687 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
688 && !((dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_DIV_MASK
))) {
689 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
690 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
694 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
697 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
700 tcg_gen_movi_tl(cpu_R
[dc
->rd
], 0);
703 static void dec_barrel(DisasContext
*dc
)
708 if ((dc
->tb_flags
& MSR_EE_FLAG
)
709 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
710 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_BARREL_MASK
)) {
711 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
712 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
716 s
= dc
->imm
& (1 << 10);
717 t
= dc
->imm
& (1 << 9);
719 LOG_DIS("bs%s%s r%d r%d r%d\n",
720 s
? "l" : "r", t
? "a" : "l", dc
->rd
, dc
->ra
, dc
->rb
);
724 tcg_gen_mov_tl(t0
, *(dec_alu_op_b(dc
)));
725 tcg_gen_andi_tl(t0
, t0
, 31);
728 tcg_gen_shl_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
731 tcg_gen_sar_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
733 tcg_gen_shr_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
737 static void dec_bit(DisasContext
*dc
)
739 CPUState
*cs
= CPU(dc
->cpu
);
742 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
744 op
= dc
->ir
& ((1 << 9) - 1);
750 LOG_DIS("src r%d r%d\n", dc
->rd
, dc
->ra
);
751 tcg_gen_andi_tl(t0
, cpu_SR
[SR_MSR
], MSR_CC
);
752 write_carry(dc
, cpu_R
[dc
->ra
]);
754 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
755 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
763 LOG_DIS("srl r%d r%d\n", dc
->rd
, dc
->ra
);
765 /* Update carry. Note that write carry only looks at the LSB. */
766 write_carry(dc
, cpu_R
[dc
->ra
]);
769 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
771 tcg_gen_sari_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
775 LOG_DIS("ext8s r%d r%d\n", dc
->rd
, dc
->ra
);
776 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
779 LOG_DIS("ext16s r%d r%d\n", dc
->rd
, dc
->ra
);
780 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
787 LOG_DIS("wdc r%d\n", dc
->ra
);
788 if ((dc
->tb_flags
& MSR_EE_FLAG
)
789 && mem_index
== MMU_USER_IDX
) {
790 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
791 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
797 LOG_DIS("wic r%d\n", dc
->ra
);
798 if ((dc
->tb_flags
& MSR_EE_FLAG
)
799 && mem_index
== MMU_USER_IDX
) {
800 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
801 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
806 if ((dc
->tb_flags
& MSR_EE_FLAG
)
807 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
808 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
809 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
810 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
812 if (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
) {
813 gen_helper_clz(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
818 LOG_DIS("swapb r%d r%d\n", dc
->rd
, dc
->ra
);
819 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
823 LOG_DIS("swaph r%d r%d\n", dc
->rd
, dc
->ra
);
824 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
827 cpu_abort(cs
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
828 dc
->pc
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
833 static inline void sync_jmpstate(DisasContext
*dc
)
835 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
836 if (dc
->jmp
== JMP_DIRECT
) {
837 tcg_gen_movi_tl(env_btaken
, 1);
839 dc
->jmp
= JMP_INDIRECT
;
840 tcg_gen_movi_tl(env_btarget
, dc
->jmp_pc
);
844 static void dec_imm(DisasContext
*dc
)
846 LOG_DIS("imm %x\n", dc
->imm
<< 16);
847 tcg_gen_movi_tl(env_imm
, (dc
->imm
<< 16));
848 dc
->tb_flags
|= IMM_FLAG
;
852 static inline TCGv
*compute_ldst_addr(DisasContext
*dc
, TCGv
*t
)
854 unsigned int extimm
= dc
->tb_flags
& IMM_FLAG
;
855 /* Should be set to one if r1 is used by loadstores. */
858 /* All load/stores use ra. */
859 if (dc
->ra
== 1 && dc
->cpu
->cfg
.stackprot
) {
863 /* Treat the common cases first. */
865 /* If any of the regs is r0, return a ptr to the other. */
867 return &cpu_R
[dc
->rb
];
868 } else if (dc
->rb
== 0) {
869 return &cpu_R
[dc
->ra
];
872 if (dc
->rb
== 1 && dc
->cpu
->cfg
.stackprot
) {
877 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
880 gen_helper_stackprot(cpu_env
, *t
);
887 return &cpu_R
[dc
->ra
];
890 tcg_gen_movi_tl(*t
, (int32_t)((int16_t)dc
->imm
));
891 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *t
);
894 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
898 gen_helper_stackprot(cpu_env
, *t
);
903 static void dec_load(DisasContext
*dc
)
906 unsigned int size
, rev
= 0, ex
= 0;
909 mop
= dc
->opcode
& 3;
912 rev
= (dc
->ir
>> 9) & 1;
913 ex
= (dc
->ir
>> 10) & 1;
920 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
921 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
922 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
923 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
927 LOG_DIS("l%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
931 addr
= compute_ldst_addr(dc
, &t
);
934 * When doing reverse accesses we need to do two things.
936 * 1. Reverse the address wrt endianness.
937 * 2. Byteswap the data lanes on the way back into the CPU core.
939 if (rev
&& size
!= 4) {
940 /* Endian reverse the address. t is addr. */
948 TCGv low
= tcg_temp_new();
950 /* Force addr into the temp. */
953 tcg_gen_mov_tl(t
, *addr
);
957 tcg_gen_andi_tl(low
, t
, 3);
958 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
959 tcg_gen_andi_tl(t
, t
, ~3);
960 tcg_gen_or_tl(t
, t
, low
);
961 tcg_gen_mov_tl(env_imm
, t
);
969 /* Force addr into the temp. */
972 tcg_gen_xori_tl(t
, *addr
, 2);
975 tcg_gen_xori_tl(t
, t
, 2);
979 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
984 /* lwx does not throw unaligned access errors, so force alignment */
986 /* Force addr into the temp. */
989 tcg_gen_mov_tl(t
, *addr
);
992 tcg_gen_andi_tl(t
, t
, ~3);
995 /* If we get a fault on a dslot, the jmpstate better be in sync. */
998 /* Verify alignment if needed. */
1000 * Microblaze gives MMU faults priority over faults due to
1001 * unaligned addresses. That's why we speculatively do the load
1002 * into v. If the load succeeds, we verify alignment of the
1003 * address and if that succeeds we write into the destination reg.
1006 tcg_gen_qemu_ld_tl(v
, *addr
, cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
1008 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1009 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1010 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1011 tcg_const_tl(0), tcg_const_tl(size
- 1));
1015 tcg_gen_mov_tl(env_res_addr
, *addr
);
1016 tcg_gen_mov_tl(env_res_val
, v
);
1019 tcg_gen_mov_tl(cpu_R
[dc
->rd
], v
);
1024 /* no support for AXI exclusive so always clear C */
1025 write_carryi(dc
, 0);
1032 static void dec_store(DisasContext
*dc
)
1034 TCGv t
, *addr
, swx_addr
;
1035 TCGLabel
*swx_skip
= NULL
;
1036 unsigned int size
, rev
= 0, ex
= 0;
1039 mop
= dc
->opcode
& 3;
1042 rev
= (dc
->ir
>> 9) & 1;
1043 ex
= (dc
->ir
>> 10) & 1;
1050 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
1051 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1052 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1053 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1057 LOG_DIS("s%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
1060 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1062 addr
= compute_ldst_addr(dc
, &t
);
1064 swx_addr
= tcg_temp_local_new();
1068 /* Force addr into the swx_addr. */
1069 tcg_gen_mov_tl(swx_addr
, *addr
);
1071 /* swx does not throw unaligned access errors, so force alignment */
1072 tcg_gen_andi_tl(swx_addr
, swx_addr
, ~3);
1074 write_carryi(dc
, 1);
1075 swx_skip
= gen_new_label();
1076 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_addr
, swx_addr
, swx_skip
);
1078 /* Compare the value loaded at lwx with current contents of
1079 the reserved location.
1080 FIXME: This only works for system emulation where we can expect
1081 this compare and the following write to be atomic. For user
1082 emulation we need to add atomicity between threads. */
1083 tval
= tcg_temp_new();
1084 tcg_gen_qemu_ld_tl(tval
, swx_addr
, cpu_mmu_index(&dc
->cpu
->env
, false),
1086 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_val
, tval
, swx_skip
);
1087 write_carryi(dc
, 0);
1088 tcg_temp_free(tval
);
1091 if (rev
&& size
!= 4) {
1092 /* Endian reverse the address. t is addr. */
1100 TCGv low
= tcg_temp_new();
1102 /* Force addr into the temp. */
1105 tcg_gen_mov_tl(t
, *addr
);
1109 tcg_gen_andi_tl(low
, t
, 3);
1110 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
1111 tcg_gen_andi_tl(t
, t
, ~3);
1112 tcg_gen_or_tl(t
, t
, low
);
1113 tcg_gen_mov_tl(env_imm
, t
);
1121 /* Force addr into the temp. */
1124 tcg_gen_xori_tl(t
, *addr
, 2);
1127 tcg_gen_xori_tl(t
, t
, 2);
1131 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1135 tcg_gen_qemu_st_tl(cpu_R
[dc
->rd
], *addr
, cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
1137 /* Verify alignment if needed. */
1138 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1139 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1140 /* FIXME: if the alignment is wrong, we should restore the value
1141 * in memory. One possible way to achieve this is to probe
1142 * the MMU prior to the memaccess, thay way we could put
1143 * the alignment checks in between the probe and the mem
1146 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1147 tcg_const_tl(1), tcg_const_tl(size
- 1));
1151 gen_set_label(swx_skip
);
1153 tcg_temp_free(swx_addr
);
1159 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1160 TCGv d
, TCGv a
, TCGv b
)
1164 tcg_gen_setcond_tl(TCG_COND_EQ
, d
, a
, b
);
1167 tcg_gen_setcond_tl(TCG_COND_NE
, d
, a
, b
);
1170 tcg_gen_setcond_tl(TCG_COND_LT
, d
, a
, b
);
1173 tcg_gen_setcond_tl(TCG_COND_LE
, d
, a
, b
);
1176 tcg_gen_setcond_tl(TCG_COND_GE
, d
, a
, b
);
1179 tcg_gen_setcond_tl(TCG_COND_GT
, d
, a
, b
);
1182 cpu_abort(CPU(dc
->cpu
), "Unknown condition code %x.\n", cc
);
1187 static void eval_cond_jmp(DisasContext
*dc
, TCGv pc_true
, TCGv pc_false
)
1189 TCGLabel
*l1
= gen_new_label();
1190 /* Conditional jmp. */
1191 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_false
);
1192 tcg_gen_brcondi_tl(TCG_COND_EQ
, env_btaken
, 0, l1
);
1193 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_true
);
1197 static void dec_bcc(DisasContext
*dc
)
1202 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1203 dslot
= dc
->ir
& (1 << 25);
1204 LOG_DIS("bcc%s r%d %x\n", dslot
? "d" : "", dc
->ra
, dc
->imm
);
1206 dc
->delayed_branch
= 1;
1208 dc
->delayed_branch
= 2;
1209 dc
->tb_flags
|= D_FLAG
;
1210 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1211 cpu_env
, offsetof(CPUMBState
, bimm
));
1214 if (dec_alu_op_b_is_small_imm(dc
)) {
1215 int32_t offset
= (int32_t)((int16_t)dc
->imm
); /* sign-extend. */
1217 tcg_gen_movi_tl(env_btarget
, dc
->pc
+ offset
);
1218 dc
->jmp
= JMP_DIRECT_CC
;
1219 dc
->jmp_pc
= dc
->pc
+ offset
;
1221 dc
->jmp
= JMP_INDIRECT
;
1222 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1223 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1225 eval_cc(dc
, cc
, env_btaken
, cpu_R
[dc
->ra
], tcg_const_tl(0));
1228 static void dec_br(DisasContext
*dc
)
1230 unsigned int dslot
, link
, abs
, mbar
;
1231 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1233 dslot
= dc
->ir
& (1 << 20);
1234 abs
= dc
->ir
& (1 << 19);
1235 link
= dc
->ir
& (1 << 18);
1237 /* Memory barrier. */
1238 mbar
= (dc
->ir
>> 16) & 31;
1239 if (mbar
== 2 && dc
->imm
== 4) {
1240 /* mbar IMM & 16 decodes to sleep. */
1242 TCGv_i32 tmp_hlt
= tcg_const_i32(EXCP_HLT
);
1243 TCGv_i32 tmp_1
= tcg_const_i32(1);
1248 tcg_gen_st_i32(tmp_1
, cpu_env
,
1249 -offsetof(MicroBlazeCPU
, env
)
1250 +offsetof(CPUState
, halted
));
1251 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
1252 gen_helper_raise_exception(cpu_env
, tmp_hlt
);
1253 tcg_temp_free_i32(tmp_hlt
);
1254 tcg_temp_free_i32(tmp_1
);
1257 LOG_DIS("mbar %d\n", dc
->rd
);
1259 dc
->cpustate_changed
= 1;
1263 LOG_DIS("br%s%s%s%s imm=%x\n",
1264 abs
? "a" : "", link
? "l" : "",
1265 dc
->type_b
? "i" : "", dslot
? "d" : "",
1268 dc
->delayed_branch
= 1;
1270 dc
->delayed_branch
= 2;
1271 dc
->tb_flags
|= D_FLAG
;
1272 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1273 cpu_env
, offsetof(CPUMBState
, bimm
));
1276 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
1278 dc
->jmp
= JMP_INDIRECT
;
1280 tcg_gen_movi_tl(env_btaken
, 1);
1281 tcg_gen_mov_tl(env_btarget
, *(dec_alu_op_b(dc
)));
1282 if (link
&& !dslot
) {
1283 if (!(dc
->tb_flags
& IMM_FLAG
) && (dc
->imm
== 8 || dc
->imm
== 0x18))
1284 t_gen_raise_exception(dc
, EXCP_BREAK
);
1286 if ((dc
->tb_flags
& MSR_EE_FLAG
) && mem_index
== MMU_USER_IDX
) {
1287 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1288 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1292 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1296 if (dec_alu_op_b_is_small_imm(dc
)) {
1297 dc
->jmp
= JMP_DIRECT
;
1298 dc
->jmp_pc
= dc
->pc
+ (int32_t)((int16_t)dc
->imm
);
1300 tcg_gen_movi_tl(env_btaken
, 1);
1301 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1302 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1307 static inline void do_rti(DisasContext
*dc
)
1310 t0
= tcg_temp_new();
1311 t1
= tcg_temp_new();
1312 tcg_gen_shri_tl(t0
, cpu_SR
[SR_MSR
], 1);
1313 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_IE
);
1314 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1316 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1317 tcg_gen_or_tl(t1
, t1
, t0
);
1321 dc
->tb_flags
&= ~DRTI_FLAG
;
1324 static inline void do_rtb(DisasContext
*dc
)
1327 t0
= tcg_temp_new();
1328 t1
= tcg_temp_new();
1329 tcg_gen_andi_tl(t1
, cpu_SR
[SR_MSR
], ~MSR_BIP
);
1330 tcg_gen_shri_tl(t0
, t1
, 1);
1331 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1333 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1334 tcg_gen_or_tl(t1
, t1
, t0
);
1338 dc
->tb_flags
&= ~DRTB_FLAG
;
1341 static inline void do_rte(DisasContext
*dc
)
1344 t0
= tcg_temp_new();
1345 t1
= tcg_temp_new();
1347 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_EE
);
1348 tcg_gen_andi_tl(t1
, t1
, ~MSR_EIP
);
1349 tcg_gen_shri_tl(t0
, t1
, 1);
1350 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1352 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1353 tcg_gen_or_tl(t1
, t1
, t0
);
1357 dc
->tb_flags
&= ~DRTE_FLAG
;
1360 static void dec_rts(DisasContext
*dc
)
1362 unsigned int b_bit
, i_bit
, e_bit
;
1363 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1365 i_bit
= dc
->ir
& (1 << 21);
1366 b_bit
= dc
->ir
& (1 << 22);
1367 e_bit
= dc
->ir
& (1 << 23);
1369 dc
->delayed_branch
= 2;
1370 dc
->tb_flags
|= D_FLAG
;
1371 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1372 cpu_env
, offsetof(CPUMBState
, bimm
));
1375 LOG_DIS("rtid ir=%x\n", dc
->ir
);
1376 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1377 && mem_index
== MMU_USER_IDX
) {
1378 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1379 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1381 dc
->tb_flags
|= DRTI_FLAG
;
1383 LOG_DIS("rtbd ir=%x\n", dc
->ir
);
1384 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1385 && mem_index
== MMU_USER_IDX
) {
1386 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1387 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1389 dc
->tb_flags
|= DRTB_FLAG
;
1391 LOG_DIS("rted ir=%x\n", dc
->ir
);
1392 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1393 && mem_index
== MMU_USER_IDX
) {
1394 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1395 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1397 dc
->tb_flags
|= DRTE_FLAG
;
1399 LOG_DIS("rts ir=%x\n", dc
->ir
);
1401 dc
->jmp
= JMP_INDIRECT
;
1402 tcg_gen_movi_tl(env_btaken
, 1);
1403 tcg_gen_add_tl(env_btarget
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
1406 static int dec_check_fpuv2(DisasContext
*dc
)
1408 if ((dc
->cpu
->cfg
.use_fpu
!= 2) && (dc
->tb_flags
& MSR_EE_FLAG
)) {
1409 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_FPU
);
1410 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1412 return (dc
->cpu
->cfg
.use_fpu
== 2) ? 0 : PVR2_USE_FPU2_MASK
;
1415 static void dec_fpu(DisasContext
*dc
)
1417 unsigned int fpu_insn
;
1419 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1420 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1421 && (dc
->cpu
->cfg
.use_fpu
!= 1)) {
1422 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1423 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1427 fpu_insn
= (dc
->ir
>> 7) & 7;
1431 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1436 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1441 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1446 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1451 switch ((dc
->ir
>> 4) & 7) {
1453 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1454 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1457 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1458 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1461 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1462 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1465 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1466 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1469 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1470 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1473 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1474 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1477 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1478 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1481 qemu_log_mask(LOG_UNIMP
,
1482 "unimplemented fcmp fpu_insn=%x pc=%x"
1484 fpu_insn
, dc
->pc
, dc
->opcode
);
1485 dc
->abort_at_next_insn
= 1;
1491 if (!dec_check_fpuv2(dc
)) {
1494 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1498 if (!dec_check_fpuv2(dc
)) {
1501 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1505 if (!dec_check_fpuv2(dc
)) {
1508 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1512 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1514 fpu_insn
, dc
->pc
, dc
->opcode
);
1515 dc
->abort_at_next_insn
= 1;
1520 static void dec_null(DisasContext
*dc
)
1522 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1523 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1524 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1525 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1528 qemu_log_mask(LOG_GUEST_ERROR
, "unknown insn pc=%x opc=%x\n", dc
->pc
, dc
->opcode
);
1529 dc
->abort_at_next_insn
= 1;
1532 /* Insns connected to FSL or AXI stream attached devices. */
1533 static void dec_stream(DisasContext
*dc
)
1535 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1536 TCGv_i32 t_id
, t_ctrl
;
1539 LOG_DIS("%s%s imm=%x\n", dc
->rd
? "get" : "put",
1540 dc
->type_b
? "" : "d", dc
->imm
);
1542 if ((dc
->tb_flags
& MSR_EE_FLAG
) && (mem_index
== MMU_USER_IDX
)) {
1543 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1544 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1548 t_id
= tcg_temp_new();
1550 tcg_gen_movi_tl(t_id
, dc
->imm
& 0xf);
1551 ctrl
= dc
->imm
>> 10;
1553 tcg_gen_andi_tl(t_id
, cpu_R
[dc
->rb
], 0xf);
1554 ctrl
= dc
->imm
>> 5;
1557 t_ctrl
= tcg_const_tl(ctrl
);
1560 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1562 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1564 tcg_temp_free(t_id
);
1565 tcg_temp_free(t_ctrl
);
1568 static struct decoder_info
{
1573 void (*dec
)(DisasContext
*dc
);
1581 {DEC_BARREL
, dec_barrel
},
1583 {DEC_ST
, dec_store
},
1592 {DEC_STREAM
, dec_stream
},
1596 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1601 LOG_DIS("%8.8x\t", dc
->ir
);
1606 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1607 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1608 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_OPCODE_0x0_ILL_MASK
)) {
1609 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1610 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1614 LOG_DIS("nr_nops=%d\t", dc
->nr_nops
);
1616 if (dc
->nr_nops
> 4) {
1617 cpu_abort(CPU(dc
->cpu
), "fetching nop sequence\n");
1620 /* bit 2 seems to indicate insn type. */
1621 dc
->type_b
= ir
& (1 << 29);
1623 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1624 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1625 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1626 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1627 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1629 /* Large switch for all insns. */
1630 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1631 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1638 /* generate intermediate code for basic block 'tb'. */
1639 void gen_intermediate_code(CPUMBState
*env
, struct TranslationBlock
*tb
)
1641 MicroBlazeCPU
*cpu
= mb_env_get_cpu(env
);
1642 CPUState
*cs
= CPU(cpu
);
1644 struct DisasContext ctx
;
1645 struct DisasContext
*dc
= &ctx
;
1646 uint32_t next_page_start
, org_flags
;
1654 org_flags
= dc
->synced_flags
= dc
->tb_flags
= tb
->flags
;
1656 dc
->is_jmp
= DISAS_NEXT
;
1658 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1659 if (dc
->delayed_branch
) {
1660 dc
->jmp
= JMP_INDIRECT
;
1663 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1664 dc
->cpustate_changed
= 0;
1665 dc
->abort_at_next_insn
= 0;
1669 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n", pc_start
);
1672 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1674 qemu_log("--------------\n");
1675 log_cpu_state(CPU(cpu
), 0);
1679 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1681 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1682 if (max_insns
== 0) {
1683 max_insns
= CF_COUNT_MASK
;
1685 if (max_insns
> TCG_MAX_INSNS
) {
1686 max_insns
= TCG_MAX_INSNS
;
1692 tcg_gen_insn_start(dc
->pc
);
1696 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1697 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1702 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1703 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1704 dc
->is_jmp
= DISAS_UPDATE
;
1705 /* The address covered by the breakpoint must be included in
1706 [tb->pc, tb->pc + tb->size) in order to for it to be
1707 properly cleared -- thus we increment the PC here so that
1708 the logic setting tb->size below does the right thing. */
1714 LOG_DIS("%8.8x:\t", dc
->pc
);
1716 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1721 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1723 dc
->tb_flags
&= ~IMM_FLAG
;
1726 if (dc
->delayed_branch
) {
1727 dc
->delayed_branch
--;
1728 if (!dc
->delayed_branch
) {
1729 if (dc
->tb_flags
& DRTI_FLAG
)
1731 if (dc
->tb_flags
& DRTB_FLAG
)
1733 if (dc
->tb_flags
& DRTE_FLAG
)
1735 /* Clear the delay slot flag. */
1736 dc
->tb_flags
&= ~D_FLAG
;
1737 /* If it is a direct jump, try direct chaining. */
1738 if (dc
->jmp
== JMP_INDIRECT
) {
1739 eval_cond_jmp(dc
, env_btarget
, tcg_const_tl(dc
->pc
));
1740 dc
->is_jmp
= DISAS_JUMP
;
1741 } else if (dc
->jmp
== JMP_DIRECT
) {
1743 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1744 dc
->is_jmp
= DISAS_TB_JUMP
;
1745 } else if (dc
->jmp
== JMP_DIRECT_CC
) {
1746 TCGLabel
*l1
= gen_new_label();
1748 /* Conditional jmp. */
1749 tcg_gen_brcondi_tl(TCG_COND_NE
, env_btaken
, 0, l1
);
1750 gen_goto_tb(dc
, 1, dc
->pc
);
1752 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1754 dc
->is_jmp
= DISAS_TB_JUMP
;
1759 if (cs
->singlestep_enabled
) {
1762 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
1763 && !tcg_op_buf_full()
1765 && (dc
->pc
< next_page_start
)
1766 && num_insns
< max_insns
);
1769 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
1770 if (dc
->tb_flags
& D_FLAG
) {
1771 dc
->is_jmp
= DISAS_UPDATE
;
1772 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1778 if (tb
->cflags
& CF_LAST_IO
)
1780 /* Force an update if the per-tb cpu state has changed. */
1781 if (dc
->is_jmp
== DISAS_NEXT
1782 && (dc
->cpustate_changed
|| org_flags
!= dc
->tb_flags
)) {
1783 dc
->is_jmp
= DISAS_UPDATE
;
1784 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1788 if (unlikely(cs
->singlestep_enabled
)) {
1789 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
1791 if (dc
->is_jmp
!= DISAS_JUMP
) {
1792 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1794 gen_helper_raise_exception(cpu_env
, tmp
);
1795 tcg_temp_free_i32(tmp
);
1797 switch(dc
->is_jmp
) {
1799 gen_goto_tb(dc
, 1, npc
);
1804 /* indicate that the hash table must be used
1805 to find the next TB */
1809 /* nothing more to generate */
1813 gen_tb_end(tb
, num_insns
);
1815 tb
->size
= dc
->pc
- pc_start
;
1816 tb
->icount
= num_insns
;
1820 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1823 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
, 0);
1825 qemu_log("\nisize=%d osize=%d\n",
1826 dc
->pc
- pc_start
, tcg_op_buf_count());
1830 assert(!dc
->abort_at_next_insn
);
1833 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1836 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1837 CPUMBState
*env
= &cpu
->env
;
1843 cpu_fprintf(f
, "IN: PC=%x %s\n",
1844 env
->sregs
[SR_PC
], lookup_symbol(env
->sregs
[SR_PC
]));
1845 cpu_fprintf(f
, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1846 env
->sregs
[SR_MSR
], env
->sregs
[SR_ESR
], env
->sregs
[SR_EAR
],
1847 env
->debug
, env
->imm
, env
->iflags
, env
->sregs
[SR_FSR
]);
1848 cpu_fprintf(f
, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1849 env
->btaken
, env
->btarget
,
1850 (env
->sregs
[SR_MSR
] & MSR_UM
) ? "user" : "kernel",
1851 (env
->sregs
[SR_MSR
] & MSR_UMS
) ? "user" : "kernel",
1852 (env
->sregs
[SR_MSR
] & MSR_EIP
),
1853 (env
->sregs
[SR_MSR
] & MSR_IE
));
1855 for (i
= 0; i
< 32; i
++) {
1856 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1857 if ((i
+ 1) % 4 == 0)
1858 cpu_fprintf(f
, "\n");
1860 cpu_fprintf(f
, "\n\n");
1863 MicroBlazeCPU
*cpu_mb_init(const char *cpu_model
)
1867 cpu
= MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU
));
1869 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
1874 void mb_tcg_init(void)
1878 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1880 env_debug
= tcg_global_mem_new(cpu_env
,
1881 offsetof(CPUMBState
, debug
),
1883 env_iflags
= tcg_global_mem_new(cpu_env
,
1884 offsetof(CPUMBState
, iflags
),
1886 env_imm
= tcg_global_mem_new(cpu_env
,
1887 offsetof(CPUMBState
, imm
),
1889 env_btarget
= tcg_global_mem_new(cpu_env
,
1890 offsetof(CPUMBState
, btarget
),
1892 env_btaken
= tcg_global_mem_new(cpu_env
,
1893 offsetof(CPUMBState
, btaken
),
1895 env_res_addr
= tcg_global_mem_new(cpu_env
,
1896 offsetof(CPUMBState
, res_addr
),
1898 env_res_val
= tcg_global_mem_new(cpu_env
,
1899 offsetof(CPUMBState
, res_val
),
1901 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1902 cpu_R
[i
] = tcg_global_mem_new(cpu_env
,
1903 offsetof(CPUMBState
, regs
[i
]),
1906 for (i
= 0; i
< ARRAY_SIZE(cpu_SR
); i
++) {
1907 cpu_SR
[i
] = tcg_global_mem_new(cpu_env
,
1908 offsetof(CPUMBState
, sregs
[i
]),
1909 special_regnames
[i
]);
1913 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
,
1916 env
->sregs
[SR_PC
] = data
[0];