2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
38 #if DISAS_MB && !SIM_COMPAT
39 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DIS(...) do { } while (0)
46 #define EXTRACT_FIELD(src, start, end) \
47 (((src) >> start) & ((1 << (end - start + 1)) - 1))
49 static TCGv env_debug
;
50 static TCGv_env cpu_env
;
51 static TCGv cpu_R
[32];
52 static TCGv cpu_SR
[18];
54 static TCGv env_btaken
;
55 static TCGv env_btarget
;
56 static TCGv env_iflags
;
57 static TCGv env_res_addr
;
58 static TCGv env_res_val
;
60 #include "exec/gen-icount.h"
62 /* This is the state at translation time. */
63 typedef struct DisasContext
{
74 unsigned int cpustate_changed
;
75 unsigned int delayed_branch
;
76 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
77 unsigned int clear_imm
;
82 #define JMP_DIRECT_CC 2
83 #define JMP_INDIRECT 3
87 int abort_at_next_insn
;
89 struct TranslationBlock
*tb
;
90 int singlestep_enabled
;
93 static const char *regnames
[] =
95 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
96 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
97 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
98 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
101 static const char *special_regnames
[] =
103 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
104 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
105 "sr16", "sr17", "sr18"
108 static inline void t_sync_flags(DisasContext
*dc
)
110 /* Synch the tb dependent flags between translator and runtime. */
111 if (dc
->tb_flags
!= dc
->synced_flags
) {
112 tcg_gen_movi_tl(env_iflags
, dc
->tb_flags
);
113 dc
->synced_flags
= dc
->tb_flags
;
117 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
119 TCGv_i32 tmp
= tcg_const_i32(index
);
122 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
123 gen_helper_raise_exception(cpu_env
, tmp
);
124 tcg_temp_free_i32(tmp
);
125 dc
->is_jmp
= DISAS_UPDATE
;
128 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
130 #ifndef CONFIG_USER_ONLY
131 return (dc
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
137 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
139 if (use_goto_tb(dc
, dest
)) {
141 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
142 tcg_gen_exit_tb((uintptr_t)dc
->tb
+ n
);
144 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
149 static void read_carry(DisasContext
*dc
, TCGv d
)
151 tcg_gen_shri_tl(d
, cpu_SR
[SR_MSR
], 31);
155 * write_carry sets the carry bits in MSR based on bit 0 of v.
156 * v[31:1] are ignored.
158 static void write_carry(DisasContext
*dc
, TCGv v
)
160 TCGv t0
= tcg_temp_new();
161 tcg_gen_shli_tl(t0
, v
, 31);
162 tcg_gen_sari_tl(t0
, t0
, 31);
163 tcg_gen_andi_tl(t0
, t0
, (MSR_C
| MSR_CC
));
164 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
],
166 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
);
170 static void write_carryi(DisasContext
*dc
, bool carry
)
172 TCGv t0
= tcg_temp_new();
173 tcg_gen_movi_tl(t0
, carry
);
178 /* True if ALU operand b is a small immediate that may deserve
180 static inline int dec_alu_op_b_is_small_imm(DisasContext
*dc
)
182 /* Immediate insn without the imm prefix ? */
183 return dc
->type_b
&& !(dc
->tb_flags
& IMM_FLAG
);
186 static inline TCGv
*dec_alu_op_b(DisasContext
*dc
)
189 if (dc
->tb_flags
& IMM_FLAG
)
190 tcg_gen_ori_tl(env_imm
, env_imm
, dc
->imm
);
192 tcg_gen_movi_tl(env_imm
, (int32_t)((int16_t)dc
->imm
));
195 return &cpu_R
[dc
->rb
];
198 static void dec_add(DisasContext
*dc
)
206 LOG_DIS("add%s%s%s r%d r%d r%d\n",
207 dc
->type_b
? "i" : "", k
? "k" : "", c
? "c" : "",
208 dc
->rd
, dc
->ra
, dc
->rb
);
210 /* Take care of the easy cases first. */
212 /* k - keep carry, no need to update MSR. */
213 /* If rd == r0, it's a nop. */
215 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
218 /* c - Add carry into the result. */
222 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
229 /* From now on, we can assume k is zero. So we need to update MSR. */
235 tcg_gen_movi_tl(cf
, 0);
239 TCGv ncf
= tcg_temp_new();
240 gen_helper_carry(ncf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
241 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
242 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
243 write_carry(dc
, ncf
);
246 gen_helper_carry(cf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
252 static void dec_sub(DisasContext
*dc
)
254 unsigned int u
, cmp
, k
, c
;
260 cmp
= (dc
->imm
& 1) && (!dc
->type_b
) && k
;
263 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u
? "u" : "", dc
->rd
, dc
->ra
, dc
->ir
);
266 gen_helper_cmpu(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
268 gen_helper_cmp(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
273 LOG_DIS("sub%s%s r%d, r%d r%d\n",
274 k
? "k" : "", c
? "c" : "", dc
->rd
, dc
->ra
, dc
->rb
);
276 /* Take care of the easy cases first. */
278 /* k - keep carry, no need to update MSR. */
279 /* If rd == r0, it's a nop. */
281 tcg_gen_sub_tl(cpu_R
[dc
->rd
], *(dec_alu_op_b(dc
)), cpu_R
[dc
->ra
]);
284 /* c - Add carry into the result. */
288 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
295 /* From now on, we can assume k is zero. So we need to update MSR. */
296 /* Extract carry. And complement a into na. */
302 tcg_gen_movi_tl(cf
, 1);
305 /* d = b + ~a + c. carry defaults to 1. */
306 tcg_gen_not_tl(na
, cpu_R
[dc
->ra
]);
309 TCGv ncf
= tcg_temp_new();
310 gen_helper_carry(ncf
, na
, *(dec_alu_op_b(dc
)), cf
);
311 tcg_gen_add_tl(cpu_R
[dc
->rd
], na
, *(dec_alu_op_b(dc
)));
312 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
313 write_carry(dc
, ncf
);
316 gen_helper_carry(cf
, na
, *(dec_alu_op_b(dc
)), cf
);
323 static void dec_pattern(DisasContext
*dc
)
327 if ((dc
->tb_flags
& MSR_EE_FLAG
)
328 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
329 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
330 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
331 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
334 mode
= dc
->opcode
& 3;
338 LOG_DIS("pcmpbf r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
340 gen_helper_pcmpbf(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
343 LOG_DIS("pcmpeq r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
345 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_R
[dc
->rd
],
346 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
350 LOG_DIS("pcmpne r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
352 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_R
[dc
->rd
],
353 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
357 cpu_abort(CPU(dc
->cpu
),
358 "unsupported pattern insn opcode=%x\n", dc
->opcode
);
363 static void dec_and(DisasContext
*dc
)
367 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
372 not = dc
->opcode
& (1 << 1);
373 LOG_DIS("and%s\n", not ? "n" : "");
379 tcg_gen_andc_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
381 tcg_gen_and_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
384 static void dec_or(DisasContext
*dc
)
386 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
391 LOG_DIS("or r%d r%d r%d imm=%x\n", dc
->rd
, dc
->ra
, dc
->rb
, dc
->imm
);
393 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
396 static void dec_xor(DisasContext
*dc
)
398 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
403 LOG_DIS("xor r%d\n", dc
->rd
);
405 tcg_gen_xor_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
408 static inline void msr_read(DisasContext
*dc
, TCGv d
)
410 tcg_gen_mov_tl(d
, cpu_SR
[SR_MSR
]);
413 static inline void msr_write(DisasContext
*dc
, TCGv v
)
418 dc
->cpustate_changed
= 1;
419 /* PVR bit is not writable. */
420 tcg_gen_andi_tl(t
, v
, ~MSR_PVR
);
421 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], MSR_PVR
);
422 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], v
);
426 static void dec_msr(DisasContext
*dc
)
428 CPUState
*cs
= CPU(dc
->cpu
);
430 unsigned int sr
, to
, rn
;
431 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
433 sr
= dc
->imm
& ((1 << 14) - 1);
434 to
= dc
->imm
& (1 << 14);
437 dc
->cpustate_changed
= 1;
439 /* msrclr and msrset. */
440 if (!(dc
->imm
& (1 << 15))) {
441 unsigned int clr
= dc
->ir
& (1 << 16);
443 LOG_DIS("msr%s r%d imm=%x\n", clr
? "clr" : "set",
446 if (!(dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MSR_INSTR
)) {
451 if ((dc
->tb_flags
& MSR_EE_FLAG
)
452 && mem_index
== MMU_USER_IDX
&& (dc
->imm
!= 4 && dc
->imm
!= 0)) {
453 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
454 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
459 msr_read(dc
, cpu_R
[dc
->rd
]);
464 tcg_gen_mov_tl(t1
, *(dec_alu_op_b(dc
)));
467 tcg_gen_not_tl(t1
, t1
);
468 tcg_gen_and_tl(t0
, t0
, t1
);
470 tcg_gen_or_tl(t0
, t0
, t1
);
474 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
475 dc
->is_jmp
= DISAS_UPDATE
;
480 if ((dc
->tb_flags
& MSR_EE_FLAG
)
481 && mem_index
== MMU_USER_IDX
) {
482 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
483 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
488 #if !defined(CONFIG_USER_ONLY)
489 /* Catch read/writes to the mmu block. */
490 if ((sr
& ~0xff) == 0x1000) {
492 LOG_DIS("m%ss sr%d r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
494 gen_helper_mmu_write(cpu_env
, tcg_const_tl(sr
), cpu_R
[dc
->ra
]);
496 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tcg_const_tl(sr
));
502 LOG_DIS("m%ss sr%x r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
507 msr_write(dc
, cpu_R
[dc
->ra
]);
510 tcg_gen_mov_tl(cpu_SR
[SR_EAR
], cpu_R
[dc
->ra
]);
513 tcg_gen_mov_tl(cpu_SR
[SR_ESR
], cpu_R
[dc
->ra
]);
516 tcg_gen_andi_tl(cpu_SR
[SR_FSR
], cpu_R
[dc
->ra
], 31);
519 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, slr
));
522 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, shr
));
525 cpu_abort(CPU(dc
->cpu
), "unknown mts reg %x\n", sr
);
529 LOG_DIS("m%ss r%d sr%x imm=%x\n", to
? "t" : "f", dc
->rd
, sr
, dc
->imm
);
533 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
536 msr_read(dc
, cpu_R
[dc
->rd
]);
539 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_EAR
]);
542 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_ESR
]);
545 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_FSR
]);
548 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_BTR
]);
551 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, slr
));
554 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, shr
));
570 tcg_gen_ld_tl(cpu_R
[dc
->rd
],
571 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
574 cpu_abort(cs
, "unknown mfs reg %x\n", sr
);
580 tcg_gen_movi_tl(cpu_R
[0], 0);
584 /* 64-bit signed mul, lower result in d and upper in d2. */
585 static void t_gen_muls(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
589 t0
= tcg_temp_new_i64();
590 t1
= tcg_temp_new_i64();
592 tcg_gen_ext_i32_i64(t0
, a
);
593 tcg_gen_ext_i32_i64(t1
, b
);
594 tcg_gen_mul_i64(t0
, t0
, t1
);
596 tcg_gen_extrl_i64_i32(d
, t0
);
597 tcg_gen_shri_i64(t0
, t0
, 32);
598 tcg_gen_extrl_i64_i32(d2
, t0
);
600 tcg_temp_free_i64(t0
);
601 tcg_temp_free_i64(t1
);
604 /* 64-bit unsigned muls, lower result in d and upper in d2. */
605 static void t_gen_mulu(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
609 t0
= tcg_temp_new_i64();
610 t1
= tcg_temp_new_i64();
612 tcg_gen_extu_i32_i64(t0
, a
);
613 tcg_gen_extu_i32_i64(t1
, b
);
614 tcg_gen_mul_i64(t0
, t0
, t1
);
616 tcg_gen_extrl_i64_i32(d
, t0
);
617 tcg_gen_shri_i64(t0
, t0
, 32);
618 tcg_gen_extrl_i64_i32(d2
, t0
);
620 tcg_temp_free_i64(t0
);
621 tcg_temp_free_i64(t1
);
624 /* Multiplier unit. */
625 static void dec_mul(DisasContext
*dc
)
628 unsigned int subcode
;
630 if ((dc
->tb_flags
& MSR_EE_FLAG
)
631 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
632 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_HW_MUL_MASK
)) {
633 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
634 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
638 subcode
= dc
->imm
& 3;
639 d
[0] = tcg_temp_new();
640 d
[1] = tcg_temp_new();
643 LOG_DIS("muli r%d r%d %x\n", dc
->rd
, dc
->ra
, dc
->imm
);
644 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
648 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
649 if (subcode
>= 1 && subcode
<= 3
650 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MUL64_MASK
))) {
656 LOG_DIS("mul r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
657 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
660 LOG_DIS("mulh r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
661 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
664 LOG_DIS("mulhsu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
665 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
668 LOG_DIS("mulhu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
669 t_gen_mulu(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
672 cpu_abort(CPU(dc
->cpu
), "unknown MUL insn %x\n", subcode
);
681 static void dec_div(DisasContext
*dc
)
688 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
689 && !((dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_DIV_MASK
))) {
690 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
691 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
695 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
698 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
701 tcg_gen_movi_tl(cpu_R
[dc
->rd
], 0);
704 static void dec_barrel(DisasContext
*dc
)
709 if ((dc
->tb_flags
& MSR_EE_FLAG
)
710 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
711 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_BARREL_MASK
)) {
712 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
713 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
717 s
= dc
->imm
& (1 << 10);
718 t
= dc
->imm
& (1 << 9);
720 LOG_DIS("bs%s%s r%d r%d r%d\n",
721 s
? "l" : "r", t
? "a" : "l", dc
->rd
, dc
->ra
, dc
->rb
);
725 tcg_gen_mov_tl(t0
, *(dec_alu_op_b(dc
)));
726 tcg_gen_andi_tl(t0
, t0
, 31);
729 tcg_gen_shl_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
732 tcg_gen_sar_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
734 tcg_gen_shr_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
738 static void dec_bit(DisasContext
*dc
)
740 CPUState
*cs
= CPU(dc
->cpu
);
743 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
745 op
= dc
->ir
& ((1 << 9) - 1);
751 LOG_DIS("src r%d r%d\n", dc
->rd
, dc
->ra
);
752 tcg_gen_andi_tl(t0
, cpu_SR
[SR_MSR
], MSR_CC
);
753 write_carry(dc
, cpu_R
[dc
->ra
]);
755 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
756 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
764 LOG_DIS("srl r%d r%d\n", dc
->rd
, dc
->ra
);
766 /* Update carry. Note that write carry only looks at the LSB. */
767 write_carry(dc
, cpu_R
[dc
->ra
]);
770 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
772 tcg_gen_sari_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
776 LOG_DIS("ext8s r%d r%d\n", dc
->rd
, dc
->ra
);
777 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
780 LOG_DIS("ext16s r%d r%d\n", dc
->rd
, dc
->ra
);
781 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
788 LOG_DIS("wdc r%d\n", dc
->ra
);
789 if ((dc
->tb_flags
& MSR_EE_FLAG
)
790 && mem_index
== MMU_USER_IDX
) {
791 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
792 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
798 LOG_DIS("wic r%d\n", dc
->ra
);
799 if ((dc
->tb_flags
& MSR_EE_FLAG
)
800 && mem_index
== MMU_USER_IDX
) {
801 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
802 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
807 if ((dc
->tb_flags
& MSR_EE_FLAG
)
808 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
809 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
810 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
811 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
813 if (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
) {
814 gen_helper_clz(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
819 LOG_DIS("swapb r%d r%d\n", dc
->rd
, dc
->ra
);
820 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
824 LOG_DIS("swaph r%d r%d\n", dc
->rd
, dc
->ra
);
825 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
828 cpu_abort(cs
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
829 dc
->pc
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
834 static inline void sync_jmpstate(DisasContext
*dc
)
836 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
837 if (dc
->jmp
== JMP_DIRECT
) {
838 tcg_gen_movi_tl(env_btaken
, 1);
840 dc
->jmp
= JMP_INDIRECT
;
841 tcg_gen_movi_tl(env_btarget
, dc
->jmp_pc
);
845 static void dec_imm(DisasContext
*dc
)
847 LOG_DIS("imm %x\n", dc
->imm
<< 16);
848 tcg_gen_movi_tl(env_imm
, (dc
->imm
<< 16));
849 dc
->tb_flags
|= IMM_FLAG
;
853 static inline TCGv
*compute_ldst_addr(DisasContext
*dc
, TCGv
*t
)
855 unsigned int extimm
= dc
->tb_flags
& IMM_FLAG
;
856 /* Should be set to one if r1 is used by loadstores. */
859 /* All load/stores use ra. */
860 if (dc
->ra
== 1 && dc
->cpu
->cfg
.stackprot
) {
864 /* Treat the common cases first. */
866 /* If any of the regs is r0, return a ptr to the other. */
868 return &cpu_R
[dc
->rb
];
869 } else if (dc
->rb
== 0) {
870 return &cpu_R
[dc
->ra
];
873 if (dc
->rb
== 1 && dc
->cpu
->cfg
.stackprot
) {
878 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
881 gen_helper_stackprot(cpu_env
, *t
);
888 return &cpu_R
[dc
->ra
];
891 tcg_gen_movi_tl(*t
, (int32_t)((int16_t)dc
->imm
));
892 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *t
);
895 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
899 gen_helper_stackprot(cpu_env
, *t
);
904 static void dec_load(DisasContext
*dc
)
907 unsigned int size
, rev
= 0, ex
= 0;
910 mop
= dc
->opcode
& 3;
913 rev
= (dc
->ir
>> 9) & 1;
914 ex
= (dc
->ir
>> 10) & 1;
921 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
922 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
923 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
924 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
928 LOG_DIS("l%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
932 addr
= compute_ldst_addr(dc
, &t
);
935 * When doing reverse accesses we need to do two things.
937 * 1. Reverse the address wrt endianness.
938 * 2. Byteswap the data lanes on the way back into the CPU core.
940 if (rev
&& size
!= 4) {
941 /* Endian reverse the address. t is addr. */
949 TCGv low
= tcg_temp_new();
951 /* Force addr into the temp. */
954 tcg_gen_mov_tl(t
, *addr
);
958 tcg_gen_andi_tl(low
, t
, 3);
959 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
960 tcg_gen_andi_tl(t
, t
, ~3);
961 tcg_gen_or_tl(t
, t
, low
);
962 tcg_gen_mov_tl(env_imm
, t
);
970 /* Force addr into the temp. */
973 tcg_gen_xori_tl(t
, *addr
, 2);
976 tcg_gen_xori_tl(t
, t
, 2);
980 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
985 /* lwx does not throw unaligned access errors, so force alignment */
987 /* Force addr into the temp. */
990 tcg_gen_mov_tl(t
, *addr
);
993 tcg_gen_andi_tl(t
, t
, ~3);
996 /* If we get a fault on a dslot, the jmpstate better be in sync. */
999 /* Verify alignment if needed. */
1001 * Microblaze gives MMU faults priority over faults due to
1002 * unaligned addresses. That's why we speculatively do the load
1003 * into v. If the load succeeds, we verify alignment of the
1004 * address and if that succeeds we write into the destination reg.
1007 tcg_gen_qemu_ld_tl(v
, *addr
, cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
1009 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1010 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1011 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1012 tcg_const_tl(0), tcg_const_tl(size
- 1));
1016 tcg_gen_mov_tl(env_res_addr
, *addr
);
1017 tcg_gen_mov_tl(env_res_val
, v
);
1020 tcg_gen_mov_tl(cpu_R
[dc
->rd
], v
);
1025 /* no support for AXI exclusive so always clear C */
1026 write_carryi(dc
, 0);
1033 static void dec_store(DisasContext
*dc
)
1035 TCGv t
, *addr
, swx_addr
;
1036 TCGLabel
*swx_skip
= NULL
;
1037 unsigned int size
, rev
= 0, ex
= 0;
1040 mop
= dc
->opcode
& 3;
1043 rev
= (dc
->ir
>> 9) & 1;
1044 ex
= (dc
->ir
>> 10) & 1;
1051 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
1052 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1053 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1054 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1058 LOG_DIS("s%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
1061 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1063 addr
= compute_ldst_addr(dc
, &t
);
1065 swx_addr
= tcg_temp_local_new();
1069 /* Force addr into the swx_addr. */
1070 tcg_gen_mov_tl(swx_addr
, *addr
);
1072 /* swx does not throw unaligned access errors, so force alignment */
1073 tcg_gen_andi_tl(swx_addr
, swx_addr
, ~3);
1075 write_carryi(dc
, 1);
1076 swx_skip
= gen_new_label();
1077 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_addr
, swx_addr
, swx_skip
);
1079 /* Compare the value loaded at lwx with current contents of
1080 the reserved location.
1081 FIXME: This only works for system emulation where we can expect
1082 this compare and the following write to be atomic. For user
1083 emulation we need to add atomicity between threads. */
1084 tval
= tcg_temp_new();
1085 tcg_gen_qemu_ld_tl(tval
, swx_addr
, cpu_mmu_index(&dc
->cpu
->env
, false),
1087 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_val
, tval
, swx_skip
);
1088 write_carryi(dc
, 0);
1089 tcg_temp_free(tval
);
1092 if (rev
&& size
!= 4) {
1093 /* Endian reverse the address. t is addr. */
1101 TCGv low
= tcg_temp_new();
1103 /* Force addr into the temp. */
1106 tcg_gen_mov_tl(t
, *addr
);
1110 tcg_gen_andi_tl(low
, t
, 3);
1111 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
1112 tcg_gen_andi_tl(t
, t
, ~3);
1113 tcg_gen_or_tl(t
, t
, low
);
1114 tcg_gen_mov_tl(env_imm
, t
);
1122 /* Force addr into the temp. */
1125 tcg_gen_xori_tl(t
, *addr
, 2);
1128 tcg_gen_xori_tl(t
, t
, 2);
1132 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1136 tcg_gen_qemu_st_tl(cpu_R
[dc
->rd
], *addr
, cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
1138 /* Verify alignment if needed. */
1139 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1140 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1141 /* FIXME: if the alignment is wrong, we should restore the value
1142 * in memory. One possible way to achieve this is to probe
1143 * the MMU prior to the memaccess, thay way we could put
1144 * the alignment checks in between the probe and the mem
1147 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1148 tcg_const_tl(1), tcg_const_tl(size
- 1));
1152 gen_set_label(swx_skip
);
1154 tcg_temp_free(swx_addr
);
1160 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1161 TCGv d
, TCGv a
, TCGv b
)
1165 tcg_gen_setcond_tl(TCG_COND_EQ
, d
, a
, b
);
1168 tcg_gen_setcond_tl(TCG_COND_NE
, d
, a
, b
);
1171 tcg_gen_setcond_tl(TCG_COND_LT
, d
, a
, b
);
1174 tcg_gen_setcond_tl(TCG_COND_LE
, d
, a
, b
);
1177 tcg_gen_setcond_tl(TCG_COND_GE
, d
, a
, b
);
1180 tcg_gen_setcond_tl(TCG_COND_GT
, d
, a
, b
);
1183 cpu_abort(CPU(dc
->cpu
), "Unknown condition code %x.\n", cc
);
1188 static void eval_cond_jmp(DisasContext
*dc
, TCGv pc_true
, TCGv pc_false
)
1190 TCGLabel
*l1
= gen_new_label();
1191 /* Conditional jmp. */
1192 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_false
);
1193 tcg_gen_brcondi_tl(TCG_COND_EQ
, env_btaken
, 0, l1
);
1194 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_true
);
1198 static void dec_bcc(DisasContext
*dc
)
1203 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1204 dslot
= dc
->ir
& (1 << 25);
1205 LOG_DIS("bcc%s r%d %x\n", dslot
? "d" : "", dc
->ra
, dc
->imm
);
1207 dc
->delayed_branch
= 1;
1209 dc
->delayed_branch
= 2;
1210 dc
->tb_flags
|= D_FLAG
;
1211 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1212 cpu_env
, offsetof(CPUMBState
, bimm
));
1215 if (dec_alu_op_b_is_small_imm(dc
)) {
1216 int32_t offset
= (int32_t)((int16_t)dc
->imm
); /* sign-extend. */
1218 tcg_gen_movi_tl(env_btarget
, dc
->pc
+ offset
);
1219 dc
->jmp
= JMP_DIRECT_CC
;
1220 dc
->jmp_pc
= dc
->pc
+ offset
;
1222 dc
->jmp
= JMP_INDIRECT
;
1223 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1224 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1226 eval_cc(dc
, cc
, env_btaken
, cpu_R
[dc
->ra
], tcg_const_tl(0));
1229 static void dec_br(DisasContext
*dc
)
1231 unsigned int dslot
, link
, abs
, mbar
;
1232 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1234 dslot
= dc
->ir
& (1 << 20);
1235 abs
= dc
->ir
& (1 << 19);
1236 link
= dc
->ir
& (1 << 18);
1238 /* Memory barrier. */
1239 mbar
= (dc
->ir
>> 16) & 31;
1240 if (mbar
== 2 && dc
->imm
== 4) {
1241 /* mbar IMM & 16 decodes to sleep. */
1243 TCGv_i32 tmp_hlt
= tcg_const_i32(EXCP_HLT
);
1244 TCGv_i32 tmp_1
= tcg_const_i32(1);
1249 tcg_gen_st_i32(tmp_1
, cpu_env
,
1250 -offsetof(MicroBlazeCPU
, env
)
1251 +offsetof(CPUState
, halted
));
1252 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
1253 gen_helper_raise_exception(cpu_env
, tmp_hlt
);
1254 tcg_temp_free_i32(tmp_hlt
);
1255 tcg_temp_free_i32(tmp_1
);
1258 LOG_DIS("mbar %d\n", dc
->rd
);
1260 dc
->cpustate_changed
= 1;
1264 LOG_DIS("br%s%s%s%s imm=%x\n",
1265 abs
? "a" : "", link
? "l" : "",
1266 dc
->type_b
? "i" : "", dslot
? "d" : "",
1269 dc
->delayed_branch
= 1;
1271 dc
->delayed_branch
= 2;
1272 dc
->tb_flags
|= D_FLAG
;
1273 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1274 cpu_env
, offsetof(CPUMBState
, bimm
));
1277 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
1279 dc
->jmp
= JMP_INDIRECT
;
1281 tcg_gen_movi_tl(env_btaken
, 1);
1282 tcg_gen_mov_tl(env_btarget
, *(dec_alu_op_b(dc
)));
1283 if (link
&& !dslot
) {
1284 if (!(dc
->tb_flags
& IMM_FLAG
) && (dc
->imm
== 8 || dc
->imm
== 0x18))
1285 t_gen_raise_exception(dc
, EXCP_BREAK
);
1287 if ((dc
->tb_flags
& MSR_EE_FLAG
) && mem_index
== MMU_USER_IDX
) {
1288 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1289 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1293 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1297 if (dec_alu_op_b_is_small_imm(dc
)) {
1298 dc
->jmp
= JMP_DIRECT
;
1299 dc
->jmp_pc
= dc
->pc
+ (int32_t)((int16_t)dc
->imm
);
1301 tcg_gen_movi_tl(env_btaken
, 1);
1302 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1303 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1308 static inline void do_rti(DisasContext
*dc
)
1311 t0
= tcg_temp_new();
1312 t1
= tcg_temp_new();
1313 tcg_gen_shri_tl(t0
, cpu_SR
[SR_MSR
], 1);
1314 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_IE
);
1315 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1317 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1318 tcg_gen_or_tl(t1
, t1
, t0
);
1322 dc
->tb_flags
&= ~DRTI_FLAG
;
1325 static inline void do_rtb(DisasContext
*dc
)
1328 t0
= tcg_temp_new();
1329 t1
= tcg_temp_new();
1330 tcg_gen_andi_tl(t1
, cpu_SR
[SR_MSR
], ~MSR_BIP
);
1331 tcg_gen_shri_tl(t0
, t1
, 1);
1332 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1334 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1335 tcg_gen_or_tl(t1
, t1
, t0
);
1339 dc
->tb_flags
&= ~DRTB_FLAG
;
1342 static inline void do_rte(DisasContext
*dc
)
1345 t0
= tcg_temp_new();
1346 t1
= tcg_temp_new();
1348 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_EE
);
1349 tcg_gen_andi_tl(t1
, t1
, ~MSR_EIP
);
1350 tcg_gen_shri_tl(t0
, t1
, 1);
1351 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1353 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1354 tcg_gen_or_tl(t1
, t1
, t0
);
1358 dc
->tb_flags
&= ~DRTE_FLAG
;
1361 static void dec_rts(DisasContext
*dc
)
1363 unsigned int b_bit
, i_bit
, e_bit
;
1364 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1366 i_bit
= dc
->ir
& (1 << 21);
1367 b_bit
= dc
->ir
& (1 << 22);
1368 e_bit
= dc
->ir
& (1 << 23);
1370 dc
->delayed_branch
= 2;
1371 dc
->tb_flags
|= D_FLAG
;
1372 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1373 cpu_env
, offsetof(CPUMBState
, bimm
));
1376 LOG_DIS("rtid ir=%x\n", dc
->ir
);
1377 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1378 && mem_index
== MMU_USER_IDX
) {
1379 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1380 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1382 dc
->tb_flags
|= DRTI_FLAG
;
1384 LOG_DIS("rtbd ir=%x\n", dc
->ir
);
1385 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1386 && mem_index
== MMU_USER_IDX
) {
1387 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1388 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1390 dc
->tb_flags
|= DRTB_FLAG
;
1392 LOG_DIS("rted ir=%x\n", dc
->ir
);
1393 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1394 && mem_index
== MMU_USER_IDX
) {
1395 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1396 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1398 dc
->tb_flags
|= DRTE_FLAG
;
1400 LOG_DIS("rts ir=%x\n", dc
->ir
);
1402 dc
->jmp
= JMP_INDIRECT
;
1403 tcg_gen_movi_tl(env_btaken
, 1);
1404 tcg_gen_add_tl(env_btarget
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
1407 static int dec_check_fpuv2(DisasContext
*dc
)
1409 if ((dc
->cpu
->cfg
.use_fpu
!= 2) && (dc
->tb_flags
& MSR_EE_FLAG
)) {
1410 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_FPU
);
1411 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1413 return (dc
->cpu
->cfg
.use_fpu
== 2) ? 0 : PVR2_USE_FPU2_MASK
;
1416 static void dec_fpu(DisasContext
*dc
)
1418 unsigned int fpu_insn
;
1420 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1421 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1422 && (dc
->cpu
->cfg
.use_fpu
!= 1)) {
1423 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1424 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1428 fpu_insn
= (dc
->ir
>> 7) & 7;
1432 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1437 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1442 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1447 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1452 switch ((dc
->ir
>> 4) & 7) {
1454 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1455 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1458 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1459 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1462 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1463 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1466 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1467 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1470 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1471 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1474 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1475 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1478 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1479 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1482 qemu_log_mask(LOG_UNIMP
,
1483 "unimplemented fcmp fpu_insn=%x pc=%x"
1485 fpu_insn
, dc
->pc
, dc
->opcode
);
1486 dc
->abort_at_next_insn
= 1;
1492 if (!dec_check_fpuv2(dc
)) {
1495 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1499 if (!dec_check_fpuv2(dc
)) {
1502 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1506 if (!dec_check_fpuv2(dc
)) {
1509 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1513 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1515 fpu_insn
, dc
->pc
, dc
->opcode
);
1516 dc
->abort_at_next_insn
= 1;
1521 static void dec_null(DisasContext
*dc
)
1523 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1524 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1525 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1526 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1529 qemu_log_mask(LOG_GUEST_ERROR
, "unknown insn pc=%x opc=%x\n", dc
->pc
, dc
->opcode
);
1530 dc
->abort_at_next_insn
= 1;
1533 /* Insns connected to FSL or AXI stream attached devices. */
1534 static void dec_stream(DisasContext
*dc
)
1536 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1537 TCGv_i32 t_id
, t_ctrl
;
1540 LOG_DIS("%s%s imm=%x\n", dc
->rd
? "get" : "put",
1541 dc
->type_b
? "" : "d", dc
->imm
);
1543 if ((dc
->tb_flags
& MSR_EE_FLAG
) && (mem_index
== MMU_USER_IDX
)) {
1544 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1545 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1549 t_id
= tcg_temp_new();
1551 tcg_gen_movi_tl(t_id
, dc
->imm
& 0xf);
1552 ctrl
= dc
->imm
>> 10;
1554 tcg_gen_andi_tl(t_id
, cpu_R
[dc
->rb
], 0xf);
1555 ctrl
= dc
->imm
>> 5;
1558 t_ctrl
= tcg_const_tl(ctrl
);
1561 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1563 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1565 tcg_temp_free(t_id
);
1566 tcg_temp_free(t_ctrl
);
1569 static struct decoder_info
{
1574 void (*dec
)(DisasContext
*dc
);
1582 {DEC_BARREL
, dec_barrel
},
1584 {DEC_ST
, dec_store
},
1593 {DEC_STREAM
, dec_stream
},
1597 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1602 LOG_DIS("%8.8x\t", dc
->ir
);
1607 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1608 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1609 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_OPCODE_0x0_ILL_MASK
)) {
1610 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1611 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1615 LOG_DIS("nr_nops=%d\t", dc
->nr_nops
);
1617 if (dc
->nr_nops
> 4) {
1618 cpu_abort(CPU(dc
->cpu
), "fetching nop sequence\n");
1621 /* bit 2 seems to indicate insn type. */
1622 dc
->type_b
= ir
& (1 << 29);
1624 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1625 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1626 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1627 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1628 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1630 /* Large switch for all insns. */
1631 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1632 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1639 /* generate intermediate code for basic block 'tb'. */
1640 void gen_intermediate_code(CPUMBState
*env
, struct TranslationBlock
*tb
)
1642 MicroBlazeCPU
*cpu
= mb_env_get_cpu(env
);
1643 CPUState
*cs
= CPU(cpu
);
1645 struct DisasContext ctx
;
1646 struct DisasContext
*dc
= &ctx
;
1647 uint32_t next_page_start
, org_flags
;
1655 org_flags
= dc
->synced_flags
= dc
->tb_flags
= tb
->flags
;
1657 dc
->is_jmp
= DISAS_NEXT
;
1659 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1660 if (dc
->delayed_branch
) {
1661 dc
->jmp
= JMP_INDIRECT
;
1664 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1665 dc
->cpustate_changed
= 0;
1666 dc
->abort_at_next_insn
= 0;
1670 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n", pc_start
);
1673 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1675 qemu_log("--------------\n");
1676 log_cpu_state(CPU(cpu
), 0);
1680 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1682 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1683 if (max_insns
== 0) {
1684 max_insns
= CF_COUNT_MASK
;
1686 if (max_insns
> TCG_MAX_INSNS
) {
1687 max_insns
= TCG_MAX_INSNS
;
1693 tcg_gen_insn_start(dc
->pc
);
1697 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1698 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1703 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1704 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1705 dc
->is_jmp
= DISAS_UPDATE
;
1706 /* The address covered by the breakpoint must be included in
1707 [tb->pc, tb->pc + tb->size) in order to for it to be
1708 properly cleared -- thus we increment the PC here so that
1709 the logic setting tb->size below does the right thing. */
1715 LOG_DIS("%8.8x:\t", dc
->pc
);
1717 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1722 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1724 dc
->tb_flags
&= ~IMM_FLAG
;
1727 if (dc
->delayed_branch
) {
1728 dc
->delayed_branch
--;
1729 if (!dc
->delayed_branch
) {
1730 if (dc
->tb_flags
& DRTI_FLAG
)
1732 if (dc
->tb_flags
& DRTB_FLAG
)
1734 if (dc
->tb_flags
& DRTE_FLAG
)
1736 /* Clear the delay slot flag. */
1737 dc
->tb_flags
&= ~D_FLAG
;
1738 /* If it is a direct jump, try direct chaining. */
1739 if (dc
->jmp
== JMP_INDIRECT
) {
1740 eval_cond_jmp(dc
, env_btarget
, tcg_const_tl(dc
->pc
));
1741 dc
->is_jmp
= DISAS_JUMP
;
1742 } else if (dc
->jmp
== JMP_DIRECT
) {
1744 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1745 dc
->is_jmp
= DISAS_TB_JUMP
;
1746 } else if (dc
->jmp
== JMP_DIRECT_CC
) {
1747 TCGLabel
*l1
= gen_new_label();
1749 /* Conditional jmp. */
1750 tcg_gen_brcondi_tl(TCG_COND_NE
, env_btaken
, 0, l1
);
1751 gen_goto_tb(dc
, 1, dc
->pc
);
1753 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1755 dc
->is_jmp
= DISAS_TB_JUMP
;
1760 if (cs
->singlestep_enabled
) {
1763 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
1764 && !tcg_op_buf_full()
1766 && (dc
->pc
< next_page_start
)
1767 && num_insns
< max_insns
);
1770 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
1771 if (dc
->tb_flags
& D_FLAG
) {
1772 dc
->is_jmp
= DISAS_UPDATE
;
1773 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1779 if (tb
->cflags
& CF_LAST_IO
)
1781 /* Force an update if the per-tb cpu state has changed. */
1782 if (dc
->is_jmp
== DISAS_NEXT
1783 && (dc
->cpustate_changed
|| org_flags
!= dc
->tb_flags
)) {
1784 dc
->is_jmp
= DISAS_UPDATE
;
1785 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1789 if (unlikely(cs
->singlestep_enabled
)) {
1790 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
1792 if (dc
->is_jmp
!= DISAS_JUMP
) {
1793 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1795 gen_helper_raise_exception(cpu_env
, tmp
);
1796 tcg_temp_free_i32(tmp
);
1798 switch(dc
->is_jmp
) {
1800 gen_goto_tb(dc
, 1, npc
);
1805 /* indicate that the hash table must be used
1806 to find the next TB */
1810 /* nothing more to generate */
1814 gen_tb_end(tb
, num_insns
);
1816 tb
->size
= dc
->pc
- pc_start
;
1817 tb
->icount
= num_insns
;
1821 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1822 && qemu_log_in_addr_range(pc_start
)) {
1825 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
, 0);
1827 qemu_log("\nisize=%d osize=%d\n",
1828 dc
->pc
- pc_start
, tcg_op_buf_count());
1832 assert(!dc
->abort_at_next_insn
);
1835 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1838 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1839 CPUMBState
*env
= &cpu
->env
;
1845 cpu_fprintf(f
, "IN: PC=%x %s\n",
1846 env
->sregs
[SR_PC
], lookup_symbol(env
->sregs
[SR_PC
]));
1847 cpu_fprintf(f
, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1848 env
->sregs
[SR_MSR
], env
->sregs
[SR_ESR
], env
->sregs
[SR_EAR
],
1849 env
->debug
, env
->imm
, env
->iflags
, env
->sregs
[SR_FSR
]);
1850 cpu_fprintf(f
, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1851 env
->btaken
, env
->btarget
,
1852 (env
->sregs
[SR_MSR
] & MSR_UM
) ? "user" : "kernel",
1853 (env
->sregs
[SR_MSR
] & MSR_UMS
) ? "user" : "kernel",
1854 (env
->sregs
[SR_MSR
] & MSR_EIP
),
1855 (env
->sregs
[SR_MSR
] & MSR_IE
));
1857 for (i
= 0; i
< 32; i
++) {
1858 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1859 if ((i
+ 1) % 4 == 0)
1860 cpu_fprintf(f
, "\n");
1862 cpu_fprintf(f
, "\n\n");
1865 MicroBlazeCPU
*cpu_mb_init(const char *cpu_model
)
1869 cpu
= MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU
));
1871 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
1876 void mb_tcg_init(void)
1880 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1881 tcg_ctx
.tcg_env
= cpu_env
;
1883 env_debug
= tcg_global_mem_new(cpu_env
,
1884 offsetof(CPUMBState
, debug
),
1886 env_iflags
= tcg_global_mem_new(cpu_env
,
1887 offsetof(CPUMBState
, iflags
),
1889 env_imm
= tcg_global_mem_new(cpu_env
,
1890 offsetof(CPUMBState
, imm
),
1892 env_btarget
= tcg_global_mem_new(cpu_env
,
1893 offsetof(CPUMBState
, btarget
),
1895 env_btaken
= tcg_global_mem_new(cpu_env
,
1896 offsetof(CPUMBState
, btaken
),
1898 env_res_addr
= tcg_global_mem_new(cpu_env
,
1899 offsetof(CPUMBState
, res_addr
),
1901 env_res_val
= tcg_global_mem_new(cpu_env
,
1902 offsetof(CPUMBState
, res_val
),
1904 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1905 cpu_R
[i
] = tcg_global_mem_new(cpu_env
,
1906 offsetof(CPUMBState
, regs
[i
]),
1909 for (i
= 0; i
< ARRAY_SIZE(cpu_SR
); i
++) {
1910 cpu_SR
[i
] = tcg_global_mem_new(cpu_env
,
1911 offsetof(CPUMBState
, sregs
[i
]),
1912 special_regnames
[i
]);
1916 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
,
1919 env
->sregs
[SR_PC
] = data
[0];