2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
25 #include "microblaze-decode.h"
33 #if DISAS_MB && !SIM_COMPAT
34 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
36 # define LOG_DIS(...) do { } while (0)
41 #define EXTRACT_FIELD(src, start, end) \
42 (((src) >> start) & ((1 << (end - start + 1)) - 1))
44 static TCGv env_debug
;
45 static TCGv_ptr cpu_env
;
46 static TCGv cpu_R
[32];
47 static TCGv cpu_SR
[18];
49 static TCGv env_btaken
;
50 static TCGv env_btarget
;
51 static TCGv env_iflags
;
52 static TCGv env_res_addr
;
53 static TCGv env_res_val
;
55 #include "exec/gen-icount.h"
57 /* This is the state at translation time. */
58 typedef struct DisasContext
{
69 unsigned int cpustate_changed
;
70 unsigned int delayed_branch
;
71 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
72 unsigned int clear_imm
;
77 #define JMP_DIRECT_CC 2
78 #define JMP_INDIRECT 3
82 int abort_at_next_insn
;
84 struct TranslationBlock
*tb
;
85 int singlestep_enabled
;
88 static const char *regnames
[] =
90 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
91 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
92 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
93 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
96 static const char *special_regnames
[] =
98 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
99 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
100 "sr16", "sr17", "sr18"
103 /* Sign extend at translation time. */
104 static inline int sign_extend(unsigned int val
, unsigned int width
)
116 static inline void t_sync_flags(DisasContext
*dc
)
118 /* Synch the tb dependent flags between translator and runtime. */
119 if (dc
->tb_flags
!= dc
->synced_flags
) {
120 tcg_gen_movi_tl(env_iflags
, dc
->tb_flags
);
121 dc
->synced_flags
= dc
->tb_flags
;
125 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
127 TCGv_i32 tmp
= tcg_const_i32(index
);
130 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
131 gen_helper_raise_exception(cpu_env
, tmp
);
132 tcg_temp_free_i32(tmp
);
133 dc
->is_jmp
= DISAS_UPDATE
;
136 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
138 TranslationBlock
*tb
;
140 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
142 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
143 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
145 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
150 static void read_carry(DisasContext
*dc
, TCGv d
)
152 tcg_gen_shri_tl(d
, cpu_SR
[SR_MSR
], 31);
156 * write_carry sets the carry bits in MSR based on bit 0 of v.
157 * v[31:1] are ignored.
159 static void write_carry(DisasContext
*dc
, TCGv v
)
161 TCGv t0
= tcg_temp_new();
162 tcg_gen_shli_tl(t0
, v
, 31);
163 tcg_gen_sari_tl(t0
, t0
, 31);
164 tcg_gen_andi_tl(t0
, t0
, (MSR_C
| MSR_CC
));
165 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
],
167 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
);
171 static void write_carryi(DisasContext
*dc
, bool carry
)
173 TCGv t0
= tcg_temp_new();
174 tcg_gen_movi_tl(t0
, carry
);
179 /* True if ALU operand b is a small immediate that may deserve
181 static inline int dec_alu_op_b_is_small_imm(DisasContext
*dc
)
183 /* Immediate insn without the imm prefix ? */
184 return dc
->type_b
&& !(dc
->tb_flags
& IMM_FLAG
);
187 static inline TCGv
*dec_alu_op_b(DisasContext
*dc
)
190 if (dc
->tb_flags
& IMM_FLAG
)
191 tcg_gen_ori_tl(env_imm
, env_imm
, dc
->imm
);
193 tcg_gen_movi_tl(env_imm
, (int32_t)((int16_t)dc
->imm
));
196 return &cpu_R
[dc
->rb
];
199 static void dec_add(DisasContext
*dc
)
207 LOG_DIS("add%s%s%s r%d r%d r%d\n",
208 dc
->type_b
? "i" : "", k
? "k" : "", c
? "c" : "",
209 dc
->rd
, dc
->ra
, dc
->rb
);
211 /* Take care of the easy cases first. */
213 /* k - keep carry, no need to update MSR. */
214 /* If rd == r0, it's a nop. */
216 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
219 /* c - Add carry into the result. */
223 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
230 /* From now on, we can assume k is zero. So we need to update MSR. */
236 tcg_gen_movi_tl(cf
, 0);
240 TCGv ncf
= tcg_temp_new();
241 gen_helper_carry(ncf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
242 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
243 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
244 write_carry(dc
, ncf
);
247 gen_helper_carry(cf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
253 static void dec_sub(DisasContext
*dc
)
255 unsigned int u
, cmp
, k
, c
;
261 cmp
= (dc
->imm
& 1) && (!dc
->type_b
) && k
;
264 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u
? "u" : "", dc
->rd
, dc
->ra
, dc
->ir
);
267 gen_helper_cmpu(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
269 gen_helper_cmp(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
274 LOG_DIS("sub%s%s r%d, r%d r%d\n",
275 k
? "k" : "", c
? "c" : "", dc
->rd
, dc
->ra
, dc
->rb
);
277 /* Take care of the easy cases first. */
279 /* k - keep carry, no need to update MSR. */
280 /* If rd == r0, it's a nop. */
282 tcg_gen_sub_tl(cpu_R
[dc
->rd
], *(dec_alu_op_b(dc
)), cpu_R
[dc
->ra
]);
285 /* c - Add carry into the result. */
289 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
296 /* From now on, we can assume k is zero. So we need to update MSR. */
297 /* Extract carry. And complement a into na. */
303 tcg_gen_movi_tl(cf
, 1);
306 /* d = b + ~a + c. carry defaults to 1. */
307 tcg_gen_not_tl(na
, cpu_R
[dc
->ra
]);
310 TCGv ncf
= tcg_temp_new();
311 gen_helper_carry(ncf
, na
, *(dec_alu_op_b(dc
)), cf
);
312 tcg_gen_add_tl(cpu_R
[dc
->rd
], na
, *(dec_alu_op_b(dc
)));
313 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
314 write_carry(dc
, ncf
);
317 gen_helper_carry(cf
, na
, *(dec_alu_op_b(dc
)), cf
);
324 static void dec_pattern(DisasContext
*dc
)
329 if ((dc
->tb_flags
& MSR_EE_FLAG
)
330 && (dc
->env
->pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
331 && !((dc
->env
->pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
332 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
333 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
336 mode
= dc
->opcode
& 3;
340 LOG_DIS("pcmpbf r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
342 gen_helper_pcmpbf(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
345 LOG_DIS("pcmpeq r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
347 TCGv t0
= tcg_temp_local_new();
348 l1
= gen_new_label();
349 tcg_gen_movi_tl(t0
, 1);
350 tcg_gen_brcond_tl(TCG_COND_EQ
,
351 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
], l1
);
352 tcg_gen_movi_tl(t0
, 0);
354 tcg_gen_mov_tl(cpu_R
[dc
->rd
], t0
);
359 LOG_DIS("pcmpne r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
360 l1
= gen_new_label();
362 TCGv t0
= tcg_temp_local_new();
363 tcg_gen_movi_tl(t0
, 1);
364 tcg_gen_brcond_tl(TCG_COND_NE
,
365 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
], l1
);
366 tcg_gen_movi_tl(t0
, 0);
368 tcg_gen_mov_tl(cpu_R
[dc
->rd
], t0
);
374 "unsupported pattern insn opcode=%x\n", dc
->opcode
);
379 static void dec_and(DisasContext
*dc
)
383 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
388 not = dc
->opcode
& (1 << 1);
389 LOG_DIS("and%s\n", not ? "n" : "");
395 tcg_gen_andc_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
397 tcg_gen_and_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
400 static void dec_or(DisasContext
*dc
)
402 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
407 LOG_DIS("or r%d r%d r%d imm=%x\n", dc
->rd
, dc
->ra
, dc
->rb
, dc
->imm
);
409 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
412 static void dec_xor(DisasContext
*dc
)
414 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
419 LOG_DIS("xor r%d\n", dc
->rd
);
421 tcg_gen_xor_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
424 static inline void msr_read(DisasContext
*dc
, TCGv d
)
426 tcg_gen_mov_tl(d
, cpu_SR
[SR_MSR
]);
429 static inline void msr_write(DisasContext
*dc
, TCGv v
)
434 dc
->cpustate_changed
= 1;
435 /* PVR bit is not writable. */
436 tcg_gen_andi_tl(t
, v
, ~MSR_PVR
);
437 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], MSR_PVR
);
438 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], v
);
442 static void dec_msr(DisasContext
*dc
)
445 unsigned int sr
, to
, rn
;
446 int mem_index
= cpu_mmu_index(dc
->env
);
448 sr
= dc
->imm
& ((1 << 14) - 1);
449 to
= dc
->imm
& (1 << 14);
452 dc
->cpustate_changed
= 1;
454 /* msrclr and msrset. */
455 if (!(dc
->imm
& (1 << 15))) {
456 unsigned int clr
= dc
->ir
& (1 << 16);
458 LOG_DIS("msr%s r%d imm=%x\n", clr
? "clr" : "set",
461 if (!(dc
->env
->pvr
.regs
[2] & PVR2_USE_MSR_INSTR
)) {
466 if ((dc
->tb_flags
& MSR_EE_FLAG
)
467 && mem_index
== MMU_USER_IDX
&& (dc
->imm
!= 4 && dc
->imm
!= 0)) {
468 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
469 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
474 msr_read(dc
, cpu_R
[dc
->rd
]);
479 tcg_gen_mov_tl(t1
, *(dec_alu_op_b(dc
)));
482 tcg_gen_not_tl(t1
, t1
);
483 tcg_gen_and_tl(t0
, t0
, t1
);
485 tcg_gen_or_tl(t0
, t0
, t1
);
489 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
490 dc
->is_jmp
= DISAS_UPDATE
;
495 if ((dc
->tb_flags
& MSR_EE_FLAG
)
496 && mem_index
== MMU_USER_IDX
) {
497 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
498 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
503 #if !defined(CONFIG_USER_ONLY)
504 /* Catch read/writes to the mmu block. */
505 if ((sr
& ~0xff) == 0x1000) {
507 LOG_DIS("m%ss sr%d r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
509 gen_helper_mmu_write(cpu_env
, tcg_const_tl(sr
), cpu_R
[dc
->ra
]);
511 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tcg_const_tl(sr
));
517 LOG_DIS("m%ss sr%x r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
522 msr_write(dc
, cpu_R
[dc
->ra
]);
525 tcg_gen_mov_tl(cpu_SR
[SR_EAR
], cpu_R
[dc
->ra
]);
528 tcg_gen_mov_tl(cpu_SR
[SR_ESR
], cpu_R
[dc
->ra
]);
531 tcg_gen_andi_tl(cpu_SR
[SR_FSR
], cpu_R
[dc
->ra
], 31);
534 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, slr
));
537 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, shr
));
540 cpu_abort(dc
->env
, "unknown mts reg %x\n", sr
);
544 LOG_DIS("m%ss r%d sr%x imm=%x\n", to
? "t" : "f", dc
->rd
, sr
, dc
->imm
);
548 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
551 msr_read(dc
, cpu_R
[dc
->rd
]);
554 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_EAR
]);
557 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_ESR
]);
560 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_FSR
]);
563 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_BTR
]);
566 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, slr
));
569 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, shr
));
585 tcg_gen_ld_tl(cpu_R
[dc
->rd
],
586 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
589 cpu_abort(dc
->env
, "unknown mfs reg %x\n", sr
);
595 tcg_gen_movi_tl(cpu_R
[0], 0);
599 /* 64-bit signed mul, lower result in d and upper in d2. */
600 static void t_gen_muls(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
604 t0
= tcg_temp_new_i64();
605 t1
= tcg_temp_new_i64();
607 tcg_gen_ext_i32_i64(t0
, a
);
608 tcg_gen_ext_i32_i64(t1
, b
);
609 tcg_gen_mul_i64(t0
, t0
, t1
);
611 tcg_gen_trunc_i64_i32(d
, t0
);
612 tcg_gen_shri_i64(t0
, t0
, 32);
613 tcg_gen_trunc_i64_i32(d2
, t0
);
615 tcg_temp_free_i64(t0
);
616 tcg_temp_free_i64(t1
);
619 /* 64-bit unsigned muls, lower result in d and upper in d2. */
620 static void t_gen_mulu(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
624 t0
= tcg_temp_new_i64();
625 t1
= tcg_temp_new_i64();
627 tcg_gen_extu_i32_i64(t0
, a
);
628 tcg_gen_extu_i32_i64(t1
, b
);
629 tcg_gen_mul_i64(t0
, t0
, t1
);
631 tcg_gen_trunc_i64_i32(d
, t0
);
632 tcg_gen_shri_i64(t0
, t0
, 32);
633 tcg_gen_trunc_i64_i32(d2
, t0
);
635 tcg_temp_free_i64(t0
);
636 tcg_temp_free_i64(t1
);
639 /* Multiplier unit. */
640 static void dec_mul(DisasContext
*dc
)
643 unsigned int subcode
;
645 if ((dc
->tb_flags
& MSR_EE_FLAG
)
646 && (dc
->env
->pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
647 && !(dc
->env
->pvr
.regs
[0] & PVR0_USE_HW_MUL_MASK
)) {
648 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
649 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
653 subcode
= dc
->imm
& 3;
654 d
[0] = tcg_temp_new();
655 d
[1] = tcg_temp_new();
658 LOG_DIS("muli r%d r%d %x\n", dc
->rd
, dc
->ra
, dc
->imm
);
659 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
663 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
664 if (subcode
>= 1 && subcode
<= 3
665 && !((dc
->env
->pvr
.regs
[2] & PVR2_USE_MUL64_MASK
))) {
671 LOG_DIS("mul r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
672 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
675 LOG_DIS("mulh r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
676 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
679 LOG_DIS("mulhsu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
680 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
683 LOG_DIS("mulhu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
684 t_gen_mulu(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
687 cpu_abort(dc
->env
, "unknown MUL insn %x\n", subcode
);
696 static void dec_div(DisasContext
*dc
)
703 if ((dc
->env
->pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
704 && !((dc
->env
->pvr
.regs
[0] & PVR0_USE_DIV_MASK
))) {
705 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
706 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
710 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
713 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
716 tcg_gen_movi_tl(cpu_R
[dc
->rd
], 0);
719 static void dec_barrel(DisasContext
*dc
)
724 if ((dc
->tb_flags
& MSR_EE_FLAG
)
725 && (dc
->env
->pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
726 && !(dc
->env
->pvr
.regs
[0] & PVR0_USE_BARREL_MASK
)) {
727 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
728 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
732 s
= dc
->imm
& (1 << 10);
733 t
= dc
->imm
& (1 << 9);
735 LOG_DIS("bs%s%s r%d r%d r%d\n",
736 s
? "l" : "r", t
? "a" : "l", dc
->rd
, dc
->ra
, dc
->rb
);
740 tcg_gen_mov_tl(t0
, *(dec_alu_op_b(dc
)));
741 tcg_gen_andi_tl(t0
, t0
, 31);
744 tcg_gen_shl_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
747 tcg_gen_sar_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
749 tcg_gen_shr_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
753 static void dec_bit(DisasContext
*dc
)
757 int mem_index
= cpu_mmu_index(dc
->env
);
759 op
= dc
->ir
& ((1 << 9) - 1);
765 LOG_DIS("src r%d r%d\n", dc
->rd
, dc
->ra
);
766 tcg_gen_andi_tl(t0
, cpu_SR
[SR_MSR
], MSR_CC
);
767 write_carry(dc
, cpu_R
[dc
->ra
]);
769 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
770 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
778 LOG_DIS("srl r%d r%d\n", dc
->rd
, dc
->ra
);
780 /* Update carry. Note that write carry only looks at the LSB. */
781 write_carry(dc
, cpu_R
[dc
->ra
]);
784 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
786 tcg_gen_sari_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
790 LOG_DIS("ext8s r%d r%d\n", dc
->rd
, dc
->ra
);
791 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
794 LOG_DIS("ext16s r%d r%d\n", dc
->rd
, dc
->ra
);
795 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
802 LOG_DIS("wdc r%d\n", dc
->ra
);
803 if ((dc
->tb_flags
& MSR_EE_FLAG
)
804 && mem_index
== MMU_USER_IDX
) {
805 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
806 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
812 LOG_DIS("wic r%d\n", dc
->ra
);
813 if ((dc
->tb_flags
& MSR_EE_FLAG
)
814 && mem_index
== MMU_USER_IDX
) {
815 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
816 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
821 if ((dc
->tb_flags
& MSR_EE_FLAG
)
822 && (dc
->env
->pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
823 && !((dc
->env
->pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
824 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
825 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
827 if (dc
->env
->pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
) {
828 gen_helper_clz(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
833 LOG_DIS("swapb r%d r%d\n", dc
->rd
, dc
->ra
);
834 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
838 LOG_DIS("swaph r%d r%d\n", dc
->rd
, dc
->ra
);
839 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
842 cpu_abort(dc
->env
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
843 dc
->pc
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
848 static inline void sync_jmpstate(DisasContext
*dc
)
850 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
851 if (dc
->jmp
== JMP_DIRECT
) {
852 tcg_gen_movi_tl(env_btaken
, 1);
854 dc
->jmp
= JMP_INDIRECT
;
855 tcg_gen_movi_tl(env_btarget
, dc
->jmp_pc
);
859 static void dec_imm(DisasContext
*dc
)
861 LOG_DIS("imm %x\n", dc
->imm
<< 16);
862 tcg_gen_movi_tl(env_imm
, (dc
->imm
<< 16));
863 dc
->tb_flags
|= IMM_FLAG
;
867 static inline TCGv
*compute_ldst_addr(DisasContext
*dc
, TCGv
*t
)
869 unsigned int extimm
= dc
->tb_flags
& IMM_FLAG
;
870 /* Should be set to one if r1 is used by loadstores. */
873 /* All load/stores use ra. */
878 /* Treat the common cases first. */
880 /* If any of the regs is r0, return a ptr to the other. */
882 return &cpu_R
[dc
->rb
];
883 } else if (dc
->rb
== 0) {
884 return &cpu_R
[dc
->ra
];
892 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
895 gen_helper_stackprot(cpu_env
, *t
);
902 return &cpu_R
[dc
->ra
];
905 tcg_gen_movi_tl(*t
, (int32_t)((int16_t)dc
->imm
));
906 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *t
);
909 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
913 gen_helper_stackprot(cpu_env
, *t
);
918 static void dec_load(DisasContext
*dc
)
921 unsigned int size
, rev
= 0, ex
= 0;
924 mop
= dc
->opcode
& 3;
927 rev
= (dc
->ir
>> 9) & 1;
928 ex
= (dc
->ir
>> 10) & 1;
935 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
936 && (dc
->env
->pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
937 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
938 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
942 LOG_DIS("l%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
946 addr
= compute_ldst_addr(dc
, &t
);
949 * When doing reverse accesses we need to do two things.
951 * 1. Reverse the address wrt endianness.
952 * 2. Byteswap the data lanes on the way back into the CPU core.
954 if (rev
&& size
!= 4) {
955 /* Endian reverse the address. t is addr. */
963 TCGv low
= tcg_temp_new();
965 /* Force addr into the temp. */
968 tcg_gen_mov_tl(t
, *addr
);
972 tcg_gen_andi_tl(low
, t
, 3);
973 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
974 tcg_gen_andi_tl(t
, t
, ~3);
975 tcg_gen_or_tl(t
, t
, low
);
976 tcg_gen_mov_tl(env_imm
, t
);
984 /* Force addr into the temp. */
987 tcg_gen_xori_tl(t
, *addr
, 2);
990 tcg_gen_xori_tl(t
, t
, 2);
994 cpu_abort(dc
->env
, "Invalid reverse size\n");
999 /* lwx does not throw unaligned access errors, so force alignment */
1001 /* Force addr into the temp. */
1004 tcg_gen_mov_tl(t
, *addr
);
1007 tcg_gen_andi_tl(t
, t
, ~3);
1010 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1013 /* Verify alignment if needed. */
1015 * Microblaze gives MMU faults priority over faults due to
1016 * unaligned addresses. That's why we speculatively do the load
1017 * into v. If the load succeeds, we verify alignment of the
1018 * address and if that succeeds we write into the destination reg.
1021 tcg_gen_qemu_ld_tl(v
, *addr
, cpu_mmu_index(dc
->env
), mop
);
1023 if ((dc
->env
->pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1024 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1025 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1026 tcg_const_tl(0), tcg_const_tl(size
- 1));
1030 tcg_gen_mov_tl(env_res_addr
, *addr
);
1031 tcg_gen_mov_tl(env_res_val
, v
);
1034 tcg_gen_mov_tl(cpu_R
[dc
->rd
], v
);
1039 /* no support for for AXI exclusive so always clear C */
1040 write_carryi(dc
, 0);
1047 static void dec_store(DisasContext
*dc
)
1049 TCGv t
, *addr
, swx_addr
;
1051 unsigned int size
, rev
= 0, ex
= 0;
1054 mop
= dc
->opcode
& 3;
1057 rev
= (dc
->ir
>> 9) & 1;
1058 ex
= (dc
->ir
>> 10) & 1;
1065 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
1066 && (dc
->env
->pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1067 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1068 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1072 LOG_DIS("s%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
1075 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1077 addr
= compute_ldst_addr(dc
, &t
);
1079 swx_addr
= tcg_temp_local_new();
1083 /* Force addr into the swx_addr. */
1084 tcg_gen_mov_tl(swx_addr
, *addr
);
1086 /* swx does not throw unaligned access errors, so force alignment */
1087 tcg_gen_andi_tl(swx_addr
, swx_addr
, ~3);
1089 write_carryi(dc
, 1);
1090 swx_skip
= gen_new_label();
1091 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_addr
, swx_addr
, swx_skip
);
1093 /* Compare the value loaded at lwx with current contents of
1094 the reserved location.
1095 FIXME: This only works for system emulation where we can expect
1096 this compare and the following write to be atomic. For user
1097 emulation we need to add atomicity between threads. */
1098 tval
= tcg_temp_new();
1099 tcg_gen_qemu_ld_tl(tval
, swx_addr
, cpu_mmu_index(dc
->env
), MO_TEUL
);
1100 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_val
, tval
, swx_skip
);
1101 write_carryi(dc
, 0);
1102 tcg_temp_free(tval
);
1105 if (rev
&& size
!= 4) {
1106 /* Endian reverse the address. t is addr. */
1114 TCGv low
= tcg_temp_new();
1116 /* Force addr into the temp. */
1119 tcg_gen_mov_tl(t
, *addr
);
1123 tcg_gen_andi_tl(low
, t
, 3);
1124 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
1125 tcg_gen_andi_tl(t
, t
, ~3);
1126 tcg_gen_or_tl(t
, t
, low
);
1127 tcg_gen_mov_tl(env_imm
, t
);
1135 /* Force addr into the temp. */
1138 tcg_gen_xori_tl(t
, *addr
, 2);
1141 tcg_gen_xori_tl(t
, t
, 2);
1145 cpu_abort(dc
->env
, "Invalid reverse size\n");
1149 tcg_gen_qemu_st_tl(cpu_R
[dc
->rd
], *addr
, cpu_mmu_index(dc
->env
), mop
);
1151 /* Verify alignment if needed. */
1152 if ((dc
->env
->pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1153 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1154 /* FIXME: if the alignment is wrong, we should restore the value
1155 * in memory. One possible way to achieve this is to probe
1156 * the MMU prior to the memaccess, thay way we could put
1157 * the alignment checks in between the probe and the mem
1160 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1161 tcg_const_tl(1), tcg_const_tl(size
- 1));
1165 gen_set_label(swx_skip
);
1167 tcg_temp_free(swx_addr
);
1173 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1174 TCGv d
, TCGv a
, TCGv b
)
1178 tcg_gen_setcond_tl(TCG_COND_EQ
, d
, a
, b
);
1181 tcg_gen_setcond_tl(TCG_COND_NE
, d
, a
, b
);
1184 tcg_gen_setcond_tl(TCG_COND_LT
, d
, a
, b
);
1187 tcg_gen_setcond_tl(TCG_COND_LE
, d
, a
, b
);
1190 tcg_gen_setcond_tl(TCG_COND_GE
, d
, a
, b
);
1193 tcg_gen_setcond_tl(TCG_COND_GT
, d
, a
, b
);
1196 cpu_abort(dc
->env
, "Unknown condition code %x.\n", cc
);
1201 static void eval_cond_jmp(DisasContext
*dc
, TCGv pc_true
, TCGv pc_false
)
1205 l1
= gen_new_label();
1206 /* Conditional jmp. */
1207 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_false
);
1208 tcg_gen_brcondi_tl(TCG_COND_EQ
, env_btaken
, 0, l1
);
1209 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_true
);
1213 static void dec_bcc(DisasContext
*dc
)
1218 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1219 dslot
= dc
->ir
& (1 << 25);
1220 LOG_DIS("bcc%s r%d %x\n", dslot
? "d" : "", dc
->ra
, dc
->imm
);
1222 dc
->delayed_branch
= 1;
1224 dc
->delayed_branch
= 2;
1225 dc
->tb_flags
|= D_FLAG
;
1226 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1227 cpu_env
, offsetof(CPUMBState
, bimm
));
1230 if (dec_alu_op_b_is_small_imm(dc
)) {
1231 int32_t offset
= (int32_t)((int16_t)dc
->imm
); /* sign-extend. */
1233 tcg_gen_movi_tl(env_btarget
, dc
->pc
+ offset
);
1234 dc
->jmp
= JMP_DIRECT_CC
;
1235 dc
->jmp_pc
= dc
->pc
+ offset
;
1237 dc
->jmp
= JMP_INDIRECT
;
1238 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1239 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1241 eval_cc(dc
, cc
, env_btaken
, cpu_R
[dc
->ra
], tcg_const_tl(0));
1244 static void dec_br(DisasContext
*dc
)
1246 unsigned int dslot
, link
, abs
, mbar
;
1247 int mem_index
= cpu_mmu_index(dc
->env
);
1249 dslot
= dc
->ir
& (1 << 20);
1250 abs
= dc
->ir
& (1 << 19);
1251 link
= dc
->ir
& (1 << 18);
1253 /* Memory barrier. */
1254 mbar
= (dc
->ir
>> 16) & 31;
1255 if (mbar
== 2 && dc
->imm
== 4) {
1256 /* mbar IMM & 16 decodes to sleep. */
1258 TCGv_i32 tmp_hlt
= tcg_const_i32(EXCP_HLT
);
1259 TCGv_i32 tmp_1
= tcg_const_i32(1);
1264 tcg_gen_st_i32(tmp_1
, cpu_env
,
1265 -offsetof(MicroBlazeCPU
, env
)
1266 +offsetof(CPUState
, halted
));
1267 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
1268 gen_helper_raise_exception(cpu_env
, tmp_hlt
);
1269 tcg_temp_free_i32(tmp_hlt
);
1270 tcg_temp_free_i32(tmp_1
);
1273 LOG_DIS("mbar %d\n", dc
->rd
);
1275 dc
->cpustate_changed
= 1;
1279 LOG_DIS("br%s%s%s%s imm=%x\n",
1280 abs
? "a" : "", link
? "l" : "",
1281 dc
->type_b
? "i" : "", dslot
? "d" : "",
1284 dc
->delayed_branch
= 1;
1286 dc
->delayed_branch
= 2;
1287 dc
->tb_flags
|= D_FLAG
;
1288 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1289 cpu_env
, offsetof(CPUMBState
, bimm
));
1292 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
1294 dc
->jmp
= JMP_INDIRECT
;
1296 tcg_gen_movi_tl(env_btaken
, 1);
1297 tcg_gen_mov_tl(env_btarget
, *(dec_alu_op_b(dc
)));
1298 if (link
&& !dslot
) {
1299 if (!(dc
->tb_flags
& IMM_FLAG
) && (dc
->imm
== 8 || dc
->imm
== 0x18))
1300 t_gen_raise_exception(dc
, EXCP_BREAK
);
1302 if ((dc
->tb_flags
& MSR_EE_FLAG
) && mem_index
== MMU_USER_IDX
) {
1303 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1304 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1308 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1312 if (dec_alu_op_b_is_small_imm(dc
)) {
1313 dc
->jmp
= JMP_DIRECT
;
1314 dc
->jmp_pc
= dc
->pc
+ (int32_t)((int16_t)dc
->imm
);
1316 tcg_gen_movi_tl(env_btaken
, 1);
1317 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1318 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1323 static inline void do_rti(DisasContext
*dc
)
1326 t0
= tcg_temp_new();
1327 t1
= tcg_temp_new();
1328 tcg_gen_shri_tl(t0
, cpu_SR
[SR_MSR
], 1);
1329 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_IE
);
1330 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1332 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1333 tcg_gen_or_tl(t1
, t1
, t0
);
1337 dc
->tb_flags
&= ~DRTI_FLAG
;
1340 static inline void do_rtb(DisasContext
*dc
)
1343 t0
= tcg_temp_new();
1344 t1
= tcg_temp_new();
1345 tcg_gen_andi_tl(t1
, cpu_SR
[SR_MSR
], ~MSR_BIP
);
1346 tcg_gen_shri_tl(t0
, t1
, 1);
1347 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1349 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1350 tcg_gen_or_tl(t1
, t1
, t0
);
1354 dc
->tb_flags
&= ~DRTB_FLAG
;
1357 static inline void do_rte(DisasContext
*dc
)
1360 t0
= tcg_temp_new();
1361 t1
= tcg_temp_new();
1363 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_EE
);
1364 tcg_gen_andi_tl(t1
, t1
, ~MSR_EIP
);
1365 tcg_gen_shri_tl(t0
, t1
, 1);
1366 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1368 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1369 tcg_gen_or_tl(t1
, t1
, t0
);
1373 dc
->tb_flags
&= ~DRTE_FLAG
;
1376 static void dec_rts(DisasContext
*dc
)
1378 unsigned int b_bit
, i_bit
, e_bit
;
1379 int mem_index
= cpu_mmu_index(dc
->env
);
1381 i_bit
= dc
->ir
& (1 << 21);
1382 b_bit
= dc
->ir
& (1 << 22);
1383 e_bit
= dc
->ir
& (1 << 23);
1385 dc
->delayed_branch
= 2;
1386 dc
->tb_flags
|= D_FLAG
;
1387 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1388 cpu_env
, offsetof(CPUMBState
, bimm
));
1391 LOG_DIS("rtid ir=%x\n", dc
->ir
);
1392 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1393 && mem_index
== MMU_USER_IDX
) {
1394 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1395 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1397 dc
->tb_flags
|= DRTI_FLAG
;
1399 LOG_DIS("rtbd ir=%x\n", dc
->ir
);
1400 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1401 && mem_index
== MMU_USER_IDX
) {
1402 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1403 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1405 dc
->tb_flags
|= DRTB_FLAG
;
1407 LOG_DIS("rted ir=%x\n", dc
->ir
);
1408 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1409 && mem_index
== MMU_USER_IDX
) {
1410 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1411 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1413 dc
->tb_flags
|= DRTE_FLAG
;
1415 LOG_DIS("rts ir=%x\n", dc
->ir
);
1417 dc
->jmp
= JMP_INDIRECT
;
1418 tcg_gen_movi_tl(env_btaken
, 1);
1419 tcg_gen_add_tl(env_btarget
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
1422 static int dec_check_fpuv2(DisasContext
*dc
)
1426 r
= dc
->env
->pvr
.regs
[2] & PVR2_USE_FPU2_MASK
;
1428 if (!r
&& (dc
->tb_flags
& MSR_EE_FLAG
)) {
1429 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_FPU
);
1430 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1435 static void dec_fpu(DisasContext
*dc
)
1437 unsigned int fpu_insn
;
1439 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1440 && (dc
->env
->pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1441 && !((dc
->env
->pvr
.regs
[2] & PVR2_USE_FPU_MASK
))) {
1442 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1443 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1447 fpu_insn
= (dc
->ir
>> 7) & 7;
1451 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1456 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1461 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1466 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1471 switch ((dc
->ir
>> 4) & 7) {
1473 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1474 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1477 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1478 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1481 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1482 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1485 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1486 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1489 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1490 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1493 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1494 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1497 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1498 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1501 qemu_log_mask(LOG_UNIMP
,
1502 "unimplemented fcmp fpu_insn=%x pc=%x"
1504 fpu_insn
, dc
->pc
, dc
->opcode
);
1505 dc
->abort_at_next_insn
= 1;
1511 if (!dec_check_fpuv2(dc
)) {
1514 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1518 if (!dec_check_fpuv2(dc
)) {
1521 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1525 if (!dec_check_fpuv2(dc
)) {
1528 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1532 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1534 fpu_insn
, dc
->pc
, dc
->opcode
);
1535 dc
->abort_at_next_insn
= 1;
1540 static void dec_null(DisasContext
*dc
)
1542 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1543 && (dc
->env
->pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1544 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1545 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1548 qemu_log ("unknown insn pc=%x opc=%x\n", dc
->pc
, dc
->opcode
);
1549 dc
->abort_at_next_insn
= 1;
1552 /* Insns connected to FSL or AXI stream attached devices. */
1553 static void dec_stream(DisasContext
*dc
)
1555 int mem_index
= cpu_mmu_index(dc
->env
);
1556 TCGv_i32 t_id
, t_ctrl
;
1559 LOG_DIS("%s%s imm=%x\n", dc
->rd
? "get" : "put",
1560 dc
->type_b
? "" : "d", dc
->imm
);
1562 if ((dc
->tb_flags
& MSR_EE_FLAG
) && (mem_index
== MMU_USER_IDX
)) {
1563 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1564 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1568 t_id
= tcg_temp_new();
1570 tcg_gen_movi_tl(t_id
, dc
->imm
& 0xf);
1571 ctrl
= dc
->imm
>> 10;
1573 tcg_gen_andi_tl(t_id
, cpu_R
[dc
->rb
], 0xf);
1574 ctrl
= dc
->imm
>> 5;
1577 t_ctrl
= tcg_const_tl(ctrl
);
1580 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1582 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1584 tcg_temp_free(t_id
);
1585 tcg_temp_free(t_ctrl
);
1588 static struct decoder_info
{
1593 void (*dec
)(DisasContext
*dc
);
1601 {DEC_BARREL
, dec_barrel
},
1603 {DEC_ST
, dec_store
},
1612 {DEC_STREAM
, dec_stream
},
1616 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1620 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1621 tcg_gen_debug_insn_start(dc
->pc
);
1625 LOG_DIS("%8.8x\t", dc
->ir
);
1630 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1631 && (dc
->env
->pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1632 && (dc
->env
->pvr
.regs
[2] & PVR2_OPCODE_0x0_ILL_MASK
)) {
1633 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1634 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1638 LOG_DIS("nr_nops=%d\t", dc
->nr_nops
);
1640 if (dc
->nr_nops
> 4)
1641 cpu_abort(dc
->env
, "fetching nop sequence\n");
1643 /* bit 2 seems to indicate insn type. */
1644 dc
->type_b
= ir
& (1 << 29);
1646 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1647 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1648 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1649 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1650 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1652 /* Large switch for all insns. */
1653 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1654 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1661 static void check_breakpoint(CPUMBState
*env
, DisasContext
*dc
)
1665 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
1666 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1667 if (bp
->pc
== dc
->pc
) {
1668 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1669 dc
->is_jmp
= DISAS_UPDATE
;
1675 /* generate intermediate code for basic block 'tb'. */
1677 gen_intermediate_code_internal(MicroBlazeCPU
*cpu
, TranslationBlock
*tb
,
1680 CPUState
*cs
= CPU(cpu
);
1681 CPUMBState
*env
= &cpu
->env
;
1682 uint16_t *gen_opc_end
;
1685 struct DisasContext ctx
;
1686 struct DisasContext
*dc
= &ctx
;
1687 uint32_t next_page_start
, org_flags
;
1695 org_flags
= dc
->synced_flags
= dc
->tb_flags
= tb
->flags
;
1697 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1699 dc
->is_jmp
= DISAS_NEXT
;
1701 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1702 if (dc
->delayed_branch
) {
1703 dc
->jmp
= JMP_INDIRECT
;
1706 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1707 dc
->cpustate_changed
= 0;
1708 dc
->abort_at_next_insn
= 0;
1712 cpu_abort(env
, "Microblaze: unaligned PC=%x\n", pc_start
);
1714 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1716 qemu_log("--------------\n");
1717 log_cpu_state(CPU(cpu
), 0);
1721 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1724 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1726 max_insns
= CF_COUNT_MASK
;
1732 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1733 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1737 check_breakpoint(env
, dc
);
1740 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1744 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
1746 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
1747 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
1748 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
1752 LOG_DIS("%8.8x:\t", dc
->pc
);
1754 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1758 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1760 dc
->tb_flags
&= ~IMM_FLAG
;
1764 if (dc
->delayed_branch
) {
1765 dc
->delayed_branch
--;
1766 if (!dc
->delayed_branch
) {
1767 if (dc
->tb_flags
& DRTI_FLAG
)
1769 if (dc
->tb_flags
& DRTB_FLAG
)
1771 if (dc
->tb_flags
& DRTE_FLAG
)
1773 /* Clear the delay slot flag. */
1774 dc
->tb_flags
&= ~D_FLAG
;
1775 /* If it is a direct jump, try direct chaining. */
1776 if (dc
->jmp
== JMP_INDIRECT
) {
1777 eval_cond_jmp(dc
, env_btarget
, tcg_const_tl(dc
->pc
));
1778 dc
->is_jmp
= DISAS_JUMP
;
1779 } else if (dc
->jmp
== JMP_DIRECT
) {
1781 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1782 dc
->is_jmp
= DISAS_TB_JUMP
;
1783 } else if (dc
->jmp
== JMP_DIRECT_CC
) {
1787 l1
= gen_new_label();
1788 /* Conditional jmp. */
1789 tcg_gen_brcondi_tl(TCG_COND_NE
, env_btaken
, 0, l1
);
1790 gen_goto_tb(dc
, 1, dc
->pc
);
1792 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1794 dc
->is_jmp
= DISAS_TB_JUMP
;
1799 if (cs
->singlestep_enabled
) {
1802 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
1803 && tcg_ctx
.gen_opc_ptr
< gen_opc_end
1805 && (dc
->pc
< next_page_start
)
1806 && num_insns
< max_insns
);
1809 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
1810 if (dc
->tb_flags
& D_FLAG
) {
1811 dc
->is_jmp
= DISAS_UPDATE
;
1812 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1818 if (tb
->cflags
& CF_LAST_IO
)
1820 /* Force an update if the per-tb cpu state has changed. */
1821 if (dc
->is_jmp
== DISAS_NEXT
1822 && (dc
->cpustate_changed
|| org_flags
!= dc
->tb_flags
)) {
1823 dc
->is_jmp
= DISAS_UPDATE
;
1824 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1828 if (unlikely(cs
->singlestep_enabled
)) {
1829 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
1831 if (dc
->is_jmp
!= DISAS_JUMP
) {
1832 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1834 gen_helper_raise_exception(cpu_env
, tmp
);
1835 tcg_temp_free_i32(tmp
);
1837 switch(dc
->is_jmp
) {
1839 gen_goto_tb(dc
, 1, npc
);
1844 /* indicate that the hash table must be used
1845 to find the next TB */
1849 /* nothing more to generate */
1853 gen_tb_end(tb
, num_insns
);
1854 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
1856 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1859 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
1861 tb
->size
= dc
->pc
- pc_start
;
1862 tb
->icount
= num_insns
;
1867 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1870 log_target_disas(env
, pc_start
, dc
->pc
- pc_start
, 0);
1872 qemu_log("\nisize=%d osize=%td\n",
1873 dc
->pc
- pc_start
, tcg_ctx
.gen_opc_ptr
-
1874 tcg_ctx
.gen_opc_buf
);
1878 assert(!dc
->abort_at_next_insn
);
1881 void gen_intermediate_code (CPUMBState
*env
, struct TranslationBlock
*tb
)
1883 gen_intermediate_code_internal(mb_env_get_cpu(env
), tb
, false);
1886 void gen_intermediate_code_pc (CPUMBState
*env
, struct TranslationBlock
*tb
)
1888 gen_intermediate_code_internal(mb_env_get_cpu(env
), tb
, true);
1891 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1894 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1895 CPUMBState
*env
= &cpu
->env
;
1901 cpu_fprintf(f
, "IN: PC=%x %s\n",
1902 env
->sregs
[SR_PC
], lookup_symbol(env
->sregs
[SR_PC
]));
1903 cpu_fprintf(f
, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1904 env
->sregs
[SR_MSR
], env
->sregs
[SR_ESR
], env
->sregs
[SR_EAR
],
1905 env
->debug
, env
->imm
, env
->iflags
, env
->sregs
[SR_FSR
]);
1906 cpu_fprintf(f
, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1907 env
->btaken
, env
->btarget
,
1908 (env
->sregs
[SR_MSR
] & MSR_UM
) ? "user" : "kernel",
1909 (env
->sregs
[SR_MSR
] & MSR_UMS
) ? "user" : "kernel",
1910 (env
->sregs
[SR_MSR
] & MSR_EIP
),
1911 (env
->sregs
[SR_MSR
] & MSR_IE
));
1913 for (i
= 0; i
< 32; i
++) {
1914 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1915 if ((i
+ 1) % 4 == 0)
1916 cpu_fprintf(f
, "\n");
1918 cpu_fprintf(f
, "\n\n");
1921 MicroBlazeCPU
*cpu_mb_init(const char *cpu_model
)
1925 cpu
= MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU
));
1927 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
1932 void mb_tcg_init(void)
1936 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1938 env_debug
= tcg_global_mem_new(TCG_AREG0
,
1939 offsetof(CPUMBState
, debug
),
1941 env_iflags
= tcg_global_mem_new(TCG_AREG0
,
1942 offsetof(CPUMBState
, iflags
),
1944 env_imm
= tcg_global_mem_new(TCG_AREG0
,
1945 offsetof(CPUMBState
, imm
),
1947 env_btarget
= tcg_global_mem_new(TCG_AREG0
,
1948 offsetof(CPUMBState
, btarget
),
1950 env_btaken
= tcg_global_mem_new(TCG_AREG0
,
1951 offsetof(CPUMBState
, btaken
),
1953 env_res_addr
= tcg_global_mem_new(TCG_AREG0
,
1954 offsetof(CPUMBState
, res_addr
),
1956 env_res_val
= tcg_global_mem_new(TCG_AREG0
,
1957 offsetof(CPUMBState
, res_val
),
1959 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1960 cpu_R
[i
] = tcg_global_mem_new(TCG_AREG0
,
1961 offsetof(CPUMBState
, regs
[i
]),
1964 for (i
= 0; i
< ARRAY_SIZE(cpu_SR
); i
++) {
1965 cpu_SR
[i
] = tcg_global_mem_new(TCG_AREG0
,
1966 offsetof(CPUMBState
, sregs
[i
]),
1967 special_regnames
[i
]);
1971 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
, int pc_pos
)
1973 env
->sregs
[SR_PC
] = tcg_ctx
.gen_opc_pc
[pc_pos
];