2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
32 #include "trace-tcg.h"
39 #if DISAS_MB && !SIM_COMPAT
40 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 # define LOG_DIS(...) do { } while (0)
47 #define EXTRACT_FIELD(src, start, end) \
48 (((src) >> start) & ((1 << (end - start + 1)) - 1))
50 /* is_jmp field values */
51 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55 static TCGv_i32 env_debug
;
56 static TCGv_i32 cpu_R
[32];
57 static TCGv_i64 cpu_SR
[14];
58 static TCGv_i32 env_imm
;
59 static TCGv_i32 env_btaken
;
60 static TCGv_i64 env_btarget
;
61 static TCGv_i32 env_iflags
;
62 static TCGv env_res_addr
;
63 static TCGv_i32 env_res_val
;
65 #include "exec/gen-icount.h"
67 /* This is the state at translation time. */
68 typedef struct DisasContext
{
79 unsigned int cpustate_changed
;
80 unsigned int delayed_branch
;
81 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
82 unsigned int clear_imm
;
87 #define JMP_DIRECT_CC 2
88 #define JMP_INDIRECT 3
92 int abort_at_next_insn
;
93 struct TranslationBlock
*tb
;
94 int singlestep_enabled
;
97 static const char *regnames
[] =
99 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
101 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
102 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
105 static const char *special_regnames
[] =
107 "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
108 "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
111 static inline void t_sync_flags(DisasContext
*dc
)
113 /* Synch the tb dependent flags between translator and runtime. */
114 if (dc
->tb_flags
!= dc
->synced_flags
) {
115 tcg_gen_movi_i32(env_iflags
, dc
->tb_flags
);
116 dc
->synced_flags
= dc
->tb_flags
;
120 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
122 TCGv_i32 tmp
= tcg_const_i32(index
);
125 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dc
->pc
);
126 gen_helper_raise_exception(cpu_env
, tmp
);
127 tcg_temp_free_i32(tmp
);
128 dc
->is_jmp
= DISAS_UPDATE
;
131 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
133 #ifndef CONFIG_USER_ONLY
134 return (dc
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
140 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
142 if (use_goto_tb(dc
, dest
)) {
144 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dest
);
145 tcg_gen_exit_tb(dc
->tb
, n
);
147 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dest
);
148 tcg_gen_exit_tb(NULL
, 0);
152 static void read_carry(DisasContext
*dc
, TCGv_i32 d
)
154 tcg_gen_extrl_i64_i32(d
, cpu_SR
[SR_MSR
]);
155 tcg_gen_shri_i32(d
, d
, 31);
159 * write_carry sets the carry bits in MSR based on bit 0 of v.
160 * v[31:1] are ignored.
162 static void write_carry(DisasContext
*dc
, TCGv_i32 v
)
164 TCGv_i64 t0
= tcg_temp_new_i64();
165 tcg_gen_extu_i32_i64(t0
, v
);
166 /* Deposit bit 0 into MSR_C and the alias MSR_CC. */
167 tcg_gen_deposit_i64(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
, 2, 1);
168 tcg_gen_deposit_i64(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
, 31, 1);
169 tcg_temp_free_i64(t0
);
172 static void write_carryi(DisasContext
*dc
, bool carry
)
174 TCGv_i32 t0
= tcg_temp_new_i32();
175 tcg_gen_movi_i32(t0
, carry
);
177 tcg_temp_free_i32(t0
);
181 * Returns true if the insn an illegal operation.
182 * If exceptions are enabled, an exception is raised.
184 static bool trap_illegal(DisasContext
*dc
, bool cond
)
186 if (cond
&& (dc
->tb_flags
& MSR_EE_FLAG
)
187 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
188 tcg_gen_movi_i64(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
189 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
195 * Returns true if the insn is illegal in userspace.
196 * If exceptions are enabled, an exception is raised.
198 static bool trap_userspace(DisasContext
*dc
, bool cond
)
200 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
201 bool cond_user
= cond
&& mem_index
== MMU_USER_IDX
;
203 if (cond_user
&& (dc
->tb_flags
& MSR_EE_FLAG
)) {
204 tcg_gen_movi_i64(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
205 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
210 /* True if ALU operand b is a small immediate that may deserve
212 static inline int dec_alu_op_b_is_small_imm(DisasContext
*dc
)
214 /* Immediate insn without the imm prefix ? */
215 return dc
->type_b
&& !(dc
->tb_flags
& IMM_FLAG
);
218 static inline TCGv_i32
*dec_alu_op_b(DisasContext
*dc
)
221 if (dc
->tb_flags
& IMM_FLAG
)
222 tcg_gen_ori_i32(env_imm
, env_imm
, dc
->imm
);
224 tcg_gen_movi_i32(env_imm
, (int32_t)((int16_t)dc
->imm
));
227 return &cpu_R
[dc
->rb
];
230 static void dec_add(DisasContext
*dc
)
238 LOG_DIS("add%s%s%s r%d r%d r%d\n",
239 dc
->type_b
? "i" : "", k
? "k" : "", c
? "c" : "",
240 dc
->rd
, dc
->ra
, dc
->rb
);
242 /* Take care of the easy cases first. */
244 /* k - keep carry, no need to update MSR. */
245 /* If rd == r0, it's a nop. */
247 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
250 /* c - Add carry into the result. */
251 cf
= tcg_temp_new_i32();
254 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
255 tcg_temp_free_i32(cf
);
261 /* From now on, we can assume k is zero. So we need to update MSR. */
263 cf
= tcg_temp_new_i32();
267 tcg_gen_movi_i32(cf
, 0);
271 TCGv_i32 ncf
= tcg_temp_new_i32();
272 gen_helper_carry(ncf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
273 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
274 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
275 write_carry(dc
, ncf
);
276 tcg_temp_free_i32(ncf
);
278 gen_helper_carry(cf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
281 tcg_temp_free_i32(cf
);
284 static void dec_sub(DisasContext
*dc
)
286 unsigned int u
, cmp
, k
, c
;
292 cmp
= (dc
->imm
& 1) && (!dc
->type_b
) && k
;
295 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u
? "u" : "", dc
->rd
, dc
->ra
, dc
->ir
);
298 gen_helper_cmpu(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
300 gen_helper_cmp(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
305 LOG_DIS("sub%s%s r%d, r%d r%d\n",
306 k
? "k" : "", c
? "c" : "", dc
->rd
, dc
->ra
, dc
->rb
);
308 /* Take care of the easy cases first. */
310 /* k - keep carry, no need to update MSR. */
311 /* If rd == r0, it's a nop. */
313 tcg_gen_sub_i32(cpu_R
[dc
->rd
], *(dec_alu_op_b(dc
)), cpu_R
[dc
->ra
]);
316 /* c - Add carry into the result. */
317 cf
= tcg_temp_new_i32();
320 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
321 tcg_temp_free_i32(cf
);
327 /* From now on, we can assume k is zero. So we need to update MSR. */
328 /* Extract carry. And complement a into na. */
329 cf
= tcg_temp_new_i32();
330 na
= tcg_temp_new_i32();
334 tcg_gen_movi_i32(cf
, 1);
337 /* d = b + ~a + c. carry defaults to 1. */
338 tcg_gen_not_i32(na
, cpu_R
[dc
->ra
]);
341 TCGv_i32 ncf
= tcg_temp_new_i32();
342 gen_helper_carry(ncf
, na
, *(dec_alu_op_b(dc
)), cf
);
343 tcg_gen_add_i32(cpu_R
[dc
->rd
], na
, *(dec_alu_op_b(dc
)));
344 tcg_gen_add_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
345 write_carry(dc
, ncf
);
346 tcg_temp_free_i32(ncf
);
348 gen_helper_carry(cf
, na
, *(dec_alu_op_b(dc
)), cf
);
351 tcg_temp_free_i32(cf
);
352 tcg_temp_free_i32(na
);
355 static void dec_pattern(DisasContext
*dc
)
359 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_pcmp_instr
)) {
363 mode
= dc
->opcode
& 3;
367 LOG_DIS("pcmpbf r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
369 gen_helper_pcmpbf(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
372 LOG_DIS("pcmpeq r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
374 tcg_gen_setcond_i32(TCG_COND_EQ
, cpu_R
[dc
->rd
],
375 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
379 LOG_DIS("pcmpne r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
381 tcg_gen_setcond_i32(TCG_COND_NE
, cpu_R
[dc
->rd
],
382 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
386 cpu_abort(CPU(dc
->cpu
),
387 "unsupported pattern insn opcode=%x\n", dc
->opcode
);
392 static void dec_and(DisasContext
*dc
)
396 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
401 not = dc
->opcode
& (1 << 1);
402 LOG_DIS("and%s\n", not ? "n" : "");
408 tcg_gen_andc_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
410 tcg_gen_and_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
413 static void dec_or(DisasContext
*dc
)
415 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
420 LOG_DIS("or r%d r%d r%d imm=%x\n", dc
->rd
, dc
->ra
, dc
->rb
, dc
->imm
);
422 tcg_gen_or_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
425 static void dec_xor(DisasContext
*dc
)
427 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
432 LOG_DIS("xor r%d\n", dc
->rd
);
434 tcg_gen_xor_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
437 static inline void msr_read(DisasContext
*dc
, TCGv_i32 d
)
439 tcg_gen_extrl_i64_i32(d
, cpu_SR
[SR_MSR
]);
442 static inline void msr_write(DisasContext
*dc
, TCGv_i32 v
)
446 t
= tcg_temp_new_i64();
447 dc
->cpustate_changed
= 1;
448 /* PVR bit is not writable. */
449 tcg_gen_extu_i32_i64(t
, v
);
450 tcg_gen_andi_i64(t
, t
, ~MSR_PVR
);
451 tcg_gen_andi_i64(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], MSR_PVR
);
452 tcg_gen_or_i64(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t
);
453 tcg_temp_free_i64(t
);
456 static void dec_msr(DisasContext
*dc
)
458 CPUState
*cs
= CPU(dc
->cpu
);
461 bool to
, clrset
, extended
= false;
463 sr
= extract32(dc
->imm
, 0, 14);
464 to
= extract32(dc
->imm
, 14, 1);
465 clrset
= extract32(dc
->imm
, 15, 1) == 0;
468 dc
->cpustate_changed
= 1;
471 /* Extended MSRs are only available if addr_size > 32. */
472 if (dc
->cpu
->cfg
.addr_size
> 32) {
473 /* The E-bit is encoded differently for To/From MSR. */
474 static const unsigned int e_bit
[] = { 19, 24 };
476 extended
= extract32(dc
->imm
, e_bit
[to
], 1);
479 /* msrclr and msrset. */
481 bool clr
= extract32(dc
->ir
, 16, 1);
483 LOG_DIS("msr%s r%d imm=%x\n", clr
? "clr" : "set",
486 if (!dc
->cpu
->cfg
.use_msr_instr
) {
491 if (trap_userspace(dc
, dc
->imm
!= 4 && dc
->imm
!= 0)) {
496 msr_read(dc
, cpu_R
[dc
->rd
]);
498 t0
= tcg_temp_new_i32();
499 t1
= tcg_temp_new_i32();
501 tcg_gen_mov_i32(t1
, *(dec_alu_op_b(dc
)));
504 tcg_gen_not_i32(t1
, t1
);
505 tcg_gen_and_i32(t0
, t0
, t1
);
507 tcg_gen_or_i32(t0
, t0
, t1
);
509 tcg_temp_free_i32(t0
);
510 tcg_temp_free_i32(t1
);
511 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dc
->pc
+ 4);
512 dc
->is_jmp
= DISAS_UPDATE
;
516 if (trap_userspace(dc
, to
)) {
520 #if !defined(CONFIG_USER_ONLY)
521 /* Catch read/writes to the mmu block. */
522 if ((sr
& ~0xff) == 0x1000) {
523 TCGv_i32 tmp_ext
= tcg_const_i32(extended
);
527 tmp_sr
= tcg_const_i32(sr
);
528 LOG_DIS("m%ss sr%d r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
530 gen_helper_mmu_write(cpu_env
, tmp_ext
, tmp_sr
, cpu_R
[dc
->ra
]);
532 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tmp_ext
, tmp_sr
);
534 tcg_temp_free_i32(tmp_sr
);
535 tcg_temp_free_i32(tmp_ext
);
541 LOG_DIS("m%ss sr%x r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
546 msr_write(dc
, cpu_R
[dc
->ra
]);
551 tcg_gen_extu_i32_i64(cpu_SR
[sr
], cpu_R
[dc
->ra
]);
554 tcg_gen_st_i32(cpu_R
[dc
->ra
],
555 cpu_env
, offsetof(CPUMBState
, slr
));
558 tcg_gen_st_i32(cpu_R
[dc
->ra
],
559 cpu_env
, offsetof(CPUMBState
, shr
));
562 cpu_abort(CPU(dc
->cpu
), "unknown mts reg %x\n", sr
);
566 LOG_DIS("m%ss r%d sr%x imm=%x\n", to
? "t" : "f", dc
->rd
, sr
, dc
->imm
);
570 tcg_gen_movi_i32(cpu_R
[dc
->rd
], dc
->pc
);
573 msr_read(dc
, cpu_R
[dc
->rd
]);
577 tcg_gen_extrh_i64_i32(cpu_R
[dc
->rd
], cpu_SR
[sr
]);
583 tcg_gen_extrl_i64_i32(cpu_R
[dc
->rd
], cpu_SR
[sr
]);
586 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
587 cpu_env
, offsetof(CPUMBState
, slr
));
590 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
591 cpu_env
, offsetof(CPUMBState
, shr
));
593 case 0x2000 ... 0x200c:
595 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
596 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
599 cpu_abort(cs
, "unknown mfs reg %x\n", sr
);
605 tcg_gen_movi_i32(cpu_R
[0], 0);
609 /* Multiplier unit. */
610 static void dec_mul(DisasContext
*dc
)
613 unsigned int subcode
;
615 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_hw_mul
)) {
619 subcode
= dc
->imm
& 3;
622 LOG_DIS("muli r%d r%d %x\n", dc
->rd
, dc
->ra
, dc
->imm
);
623 tcg_gen_mul_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
627 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
628 if (subcode
>= 1 && subcode
<= 3 && dc
->cpu
->cfg
.use_hw_mul
< 2) {
632 tmp
= tcg_temp_new_i32();
635 LOG_DIS("mul r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
636 tcg_gen_mul_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
639 LOG_DIS("mulh r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
640 tcg_gen_muls2_i32(tmp
, cpu_R
[dc
->rd
],
641 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
644 LOG_DIS("mulhsu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
645 tcg_gen_mulsu2_i32(tmp
, cpu_R
[dc
->rd
],
646 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
649 LOG_DIS("mulhu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
650 tcg_gen_mulu2_i32(tmp
, cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
653 cpu_abort(CPU(dc
->cpu
), "unknown MUL insn %x\n", subcode
);
656 tcg_temp_free_i32(tmp
);
660 static void dec_div(DisasContext
*dc
)
667 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_div
)) {
672 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
675 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
678 tcg_gen_movi_i32(cpu_R
[dc
->rd
], 0);
681 static void dec_barrel(DisasContext
*dc
)
684 unsigned int imm_w
, imm_s
;
685 bool s
, t
, e
= false, i
= false;
687 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_barrel
)) {
692 /* Insert and extract are only available in immediate mode. */
693 i
= extract32(dc
->imm
, 15, 1);
694 e
= extract32(dc
->imm
, 14, 1);
696 s
= extract32(dc
->imm
, 10, 1);
697 t
= extract32(dc
->imm
, 9, 1);
698 imm_w
= extract32(dc
->imm
, 6, 5);
699 imm_s
= extract32(dc
->imm
, 0, 5);
701 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
703 s
? "l" : "r", t
? "a" : "l", dc
->rd
, dc
->ra
, dc
->rb
);
706 if (imm_w
+ imm_s
> 32 || imm_w
== 0) {
707 /* These inputs have an undefined behavior. */
708 qemu_log_mask(LOG_GUEST_ERROR
, "bsefi: Bad input w=%d s=%d\n",
711 tcg_gen_extract_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], imm_s
, imm_w
);
714 int width
= imm_w
- imm_s
+ 1;
717 /* These inputs have an undefined behavior. */
718 qemu_log_mask(LOG_GUEST_ERROR
, "bsifi: Bad input w=%d s=%d\n",
721 tcg_gen_deposit_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
],
725 t0
= tcg_temp_new_i32();
727 tcg_gen_mov_i32(t0
, *(dec_alu_op_b(dc
)));
728 tcg_gen_andi_i32(t0
, t0
, 31);
731 tcg_gen_shl_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
734 tcg_gen_sar_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
736 tcg_gen_shr_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
739 tcg_temp_free_i32(t0
);
743 static void dec_bit(DisasContext
*dc
)
745 CPUState
*cs
= CPU(dc
->cpu
);
749 op
= dc
->ir
& ((1 << 9) - 1);
753 t0
= tcg_temp_new_i32();
755 LOG_DIS("src r%d r%d\n", dc
->rd
, dc
->ra
);
756 tcg_gen_extrl_i64_i32(t0
, cpu_SR
[SR_MSR
]);
757 tcg_gen_andi_i32(t0
, t0
, MSR_CC
);
758 write_carry(dc
, cpu_R
[dc
->ra
]);
760 tcg_gen_shri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
761 tcg_gen_or_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
763 tcg_temp_free_i32(t0
);
769 LOG_DIS("srl r%d r%d\n", dc
->rd
, dc
->ra
);
771 /* Update carry. Note that write carry only looks at the LSB. */
772 write_carry(dc
, cpu_R
[dc
->ra
]);
775 tcg_gen_shri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
777 tcg_gen_sari_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
781 LOG_DIS("ext8s r%d r%d\n", dc
->rd
, dc
->ra
);
782 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
785 LOG_DIS("ext16s r%d r%d\n", dc
->rd
, dc
->ra
);
786 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
793 LOG_DIS("wdc r%d\n", dc
->ra
);
794 trap_userspace(dc
, true);
798 LOG_DIS("wic r%d\n", dc
->ra
);
799 trap_userspace(dc
, true);
802 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_pcmp_instr
)) {
805 if (dc
->cpu
->cfg
.use_pcmp_instr
) {
806 tcg_gen_clzi_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 32);
811 LOG_DIS("swapb r%d r%d\n", dc
->rd
, dc
->ra
);
812 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
816 LOG_DIS("swaph r%d r%d\n", dc
->rd
, dc
->ra
);
817 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
820 cpu_abort(cs
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
821 dc
->pc
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
826 static inline void sync_jmpstate(DisasContext
*dc
)
828 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
829 if (dc
->jmp
== JMP_DIRECT
) {
830 tcg_gen_movi_i32(env_btaken
, 1);
832 dc
->jmp
= JMP_INDIRECT
;
833 tcg_gen_movi_i64(env_btarget
, dc
->jmp_pc
);
837 static void dec_imm(DisasContext
*dc
)
839 LOG_DIS("imm %x\n", dc
->imm
<< 16);
840 tcg_gen_movi_i32(env_imm
, (dc
->imm
<< 16));
841 dc
->tb_flags
|= IMM_FLAG
;
845 static inline void compute_ldst_addr(DisasContext
*dc
, bool ea
, TCGv t
)
847 bool extimm
= dc
->tb_flags
& IMM_FLAG
;
848 /* Should be set to true if r1 is used by loadstores. */
849 bool stackprot
= false;
852 /* All load/stores use ra. */
853 if (dc
->ra
== 1 && dc
->cpu
->cfg
.stackprot
) {
857 /* Treat the common cases first. */
860 int addr_size
= dc
->cpu
->cfg
.addr_size
;
862 if (addr_size
== 32) {
863 tcg_gen_extu_i32_tl(t
, cpu_R
[dc
->rb
]);
867 tcg_gen_concat_i32_i64(t
, cpu_R
[dc
->rb
], cpu_R
[dc
->ra
]);
868 if (addr_size
< 64) {
869 /* Mask off out of range bits. */
870 tcg_gen_andi_i64(t
, t
, MAKE_64BIT_MASK(0, addr_size
));
875 /* If any of the regs is r0, set t to the value of the other reg. */
877 tcg_gen_extu_i32_tl(t
, cpu_R
[dc
->rb
]);
879 } else if (dc
->rb
== 0) {
880 tcg_gen_extu_i32_tl(t
, cpu_R
[dc
->ra
]);
884 if (dc
->rb
== 1 && dc
->cpu
->cfg
.stackprot
) {
888 t32
= tcg_temp_new_i32();
889 tcg_gen_add_i32(t32
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
890 tcg_gen_extu_i32_tl(t
, t32
);
891 tcg_temp_free_i32(t32
);
894 gen_helper_stackprot(cpu_env
, t
);
899 t32
= tcg_temp_new_i32();
901 tcg_gen_addi_i32(t32
, cpu_R
[dc
->ra
], (int16_t)dc
->imm
);
903 tcg_gen_add_i32(t32
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
905 tcg_gen_extu_i32_tl(t
, t32
);
906 tcg_temp_free_i32(t32
);
909 gen_helper_stackprot(cpu_env
, t
);
914 static void dec_load(DisasContext
*dc
)
919 bool rev
= false, ex
= false, ea
= false;
920 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
923 mop
= dc
->opcode
& 3;
926 ea
= extract32(dc
->ir
, 7, 1);
927 rev
= extract32(dc
->ir
, 9, 1);
928 ex
= extract32(dc
->ir
, 10, 1);
935 if (trap_illegal(dc
, size
> 4)) {
939 if (trap_userspace(dc
, ea
)) {
943 LOG_DIS("l%d%s%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
948 addr
= tcg_temp_new();
949 compute_ldst_addr(dc
, ea
, addr
);
950 /* Extended addressing bypasses the MMU. */
951 mem_index
= ea
? MMU_NOMMU_IDX
: mem_index
;
954 * When doing reverse accesses we need to do two things.
956 * 1. Reverse the address wrt endianness.
957 * 2. Byteswap the data lanes on the way back into the CPU core.
959 if (rev
&& size
!= 4) {
960 /* Endian reverse the address. t is addr. */
968 TCGv low
= tcg_temp_new();
970 tcg_gen_andi_tl(low
, addr
, 3);
971 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
972 tcg_gen_andi_tl(addr
, addr
, ~3);
973 tcg_gen_or_tl(addr
, addr
, low
);
981 tcg_gen_xori_tl(addr
, addr
, 2);
984 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
989 /* lwx does not throw unaligned access errors, so force alignment */
991 tcg_gen_andi_tl(addr
, addr
, ~3);
994 /* If we get a fault on a dslot, the jmpstate better be in sync. */
997 /* Verify alignment if needed. */
999 * Microblaze gives MMU faults priority over faults due to
1000 * unaligned addresses. That's why we speculatively do the load
1001 * into v. If the load succeeds, we verify alignment of the
1002 * address and if that succeeds we write into the destination reg.
1004 v
= tcg_temp_new_i32();
1005 tcg_gen_qemu_ld_i32(v
, addr
, mem_index
, mop
);
1007 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1008 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dc
->pc
);
1009 gen_helper_memalign(cpu_env
, addr
, tcg_const_i32(dc
->rd
),
1010 tcg_const_i32(0), tcg_const_i32(size
- 1));
1014 tcg_gen_mov_tl(env_res_addr
, addr
);
1015 tcg_gen_mov_i32(env_res_val
, v
);
1018 tcg_gen_mov_i32(cpu_R
[dc
->rd
], v
);
1020 tcg_temp_free_i32(v
);
1023 /* no support for AXI exclusive so always clear C */
1024 write_carryi(dc
, 0);
1027 tcg_temp_free(addr
);
1030 static void dec_store(DisasContext
*dc
)
1033 TCGLabel
*swx_skip
= NULL
;
1035 bool rev
= false, ex
= false, ea
= false;
1036 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1039 mop
= dc
->opcode
& 3;
1042 ea
= extract32(dc
->ir
, 7, 1);
1043 rev
= extract32(dc
->ir
, 9, 1);
1044 ex
= extract32(dc
->ir
, 10, 1);
1051 if (trap_illegal(dc
, size
> 4)) {
1055 trap_userspace(dc
, ea
);
1057 LOG_DIS("s%d%s%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
1061 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1063 /* SWX needs a temp_local. */
1064 addr
= ex
? tcg_temp_local_new() : tcg_temp_new();
1065 compute_ldst_addr(dc
, ea
, addr
);
1066 /* Extended addressing bypasses the MMU. */
1067 mem_index
= ea
? MMU_NOMMU_IDX
: mem_index
;
1072 /* swx does not throw unaligned access errors, so force alignment */
1073 tcg_gen_andi_tl(addr
, addr
, ~3);
1075 write_carryi(dc
, 1);
1076 swx_skip
= gen_new_label();
1077 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_addr
, addr
, swx_skip
);
1079 /* Compare the value loaded at lwx with current contents of
1080 the reserved location.
1081 FIXME: This only works for system emulation where we can expect
1082 this compare and the following write to be atomic. For user
1083 emulation we need to add atomicity between threads. */
1084 tval
= tcg_temp_new_i32();
1085 tcg_gen_qemu_ld_i32(tval
, addr
, cpu_mmu_index(&dc
->cpu
->env
, false),
1087 tcg_gen_brcond_i32(TCG_COND_NE
, env_res_val
, tval
, swx_skip
);
1088 write_carryi(dc
, 0);
1089 tcg_temp_free_i32(tval
);
1092 if (rev
&& size
!= 4) {
1093 /* Endian reverse the address. t is addr. */
1101 TCGv low
= tcg_temp_new();
1103 tcg_gen_andi_tl(low
, addr
, 3);
1104 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
1105 tcg_gen_andi_tl(addr
, addr
, ~3);
1106 tcg_gen_or_tl(addr
, addr
, low
);
1114 /* Force addr into the temp. */
1115 tcg_gen_xori_tl(addr
, addr
, 2);
1118 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1122 tcg_gen_qemu_st_i32(cpu_R
[dc
->rd
], addr
, mem_index
, mop
);
1124 /* Verify alignment if needed. */
1125 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1126 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dc
->pc
);
1127 /* FIXME: if the alignment is wrong, we should restore the value
1128 * in memory. One possible way to achieve this is to probe
1129 * the MMU prior to the memaccess, thay way we could put
1130 * the alignment checks in between the probe and the mem
1133 gen_helper_memalign(cpu_env
, addr
, tcg_const_i32(dc
->rd
),
1134 tcg_const_i32(1), tcg_const_i32(size
- 1));
1138 gen_set_label(swx_skip
);
1141 tcg_temp_free(addr
);
1144 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1145 TCGv_i32 d
, TCGv_i32 a
)
1147 static const int mb_to_tcg_cc
[] = {
1148 [CC_EQ
] = TCG_COND_EQ
,
1149 [CC_NE
] = TCG_COND_NE
,
1150 [CC_LT
] = TCG_COND_LT
,
1151 [CC_LE
] = TCG_COND_LE
,
1152 [CC_GE
] = TCG_COND_GE
,
1153 [CC_GT
] = TCG_COND_GT
,
1163 tcg_gen_setcondi_i32(mb_to_tcg_cc
[cc
], d
, a
, 0);
1166 cpu_abort(CPU(dc
->cpu
), "Unknown condition code %x.\n", cc
);
1171 static void eval_cond_jmp(DisasContext
*dc
, TCGv_i64 pc_true
, TCGv_i64 pc_false
)
1173 TCGv_i64 tmp_btaken
= tcg_temp_new_i64();
1174 TCGv_i64 tmp_zero
= tcg_const_i64(0);
1176 tcg_gen_extu_i32_i64(tmp_btaken
, env_btaken
);
1177 tcg_gen_movcond_i64(TCG_COND_NE
, cpu_SR
[SR_PC
],
1178 tmp_btaken
, tmp_zero
,
1181 tcg_temp_free_i64(tmp_btaken
);
1182 tcg_temp_free_i64(tmp_zero
);
1185 static void dec_bcc(DisasContext
*dc
)
1190 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1191 dslot
= dc
->ir
& (1 << 25);
1192 LOG_DIS("bcc%s r%d %x\n", dslot
? "d" : "", dc
->ra
, dc
->imm
);
1194 dc
->delayed_branch
= 1;
1196 dc
->delayed_branch
= 2;
1197 dc
->tb_flags
|= D_FLAG
;
1198 tcg_gen_st_i32(tcg_const_i32(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1199 cpu_env
, offsetof(CPUMBState
, bimm
));
1202 if (dec_alu_op_b_is_small_imm(dc
)) {
1203 int32_t offset
= (int32_t)((int16_t)dc
->imm
); /* sign-extend. */
1205 tcg_gen_movi_i64(env_btarget
, dc
->pc
+ offset
);
1206 dc
->jmp
= JMP_DIRECT_CC
;
1207 dc
->jmp_pc
= dc
->pc
+ offset
;
1209 dc
->jmp
= JMP_INDIRECT
;
1210 tcg_gen_extu_i32_i64(env_btarget
, *(dec_alu_op_b(dc
)));
1211 tcg_gen_addi_i64(env_btarget
, env_btarget
, dc
->pc
);
1212 tcg_gen_andi_i64(env_btarget
, env_btarget
, UINT32_MAX
);
1214 eval_cc(dc
, cc
, env_btaken
, cpu_R
[dc
->ra
]);
1217 static void dec_br(DisasContext
*dc
)
1219 unsigned int dslot
, link
, abs
, mbar
;
1221 dslot
= dc
->ir
& (1 << 20);
1222 abs
= dc
->ir
& (1 << 19);
1223 link
= dc
->ir
& (1 << 18);
1225 /* Memory barrier. */
1226 mbar
= (dc
->ir
>> 16) & 31;
1227 if (mbar
== 2 && dc
->imm
== 4) {
1228 /* mbar IMM & 16 decodes to sleep. */
1230 TCGv_i32 tmp_hlt
= tcg_const_i32(EXCP_HLT
);
1231 TCGv_i32 tmp_1
= tcg_const_i32(1);
1236 tcg_gen_st_i32(tmp_1
, cpu_env
,
1237 -offsetof(MicroBlazeCPU
, env
)
1238 +offsetof(CPUState
, halted
));
1239 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dc
->pc
+ 4);
1240 gen_helper_raise_exception(cpu_env
, tmp_hlt
);
1241 tcg_temp_free_i32(tmp_hlt
);
1242 tcg_temp_free_i32(tmp_1
);
1245 LOG_DIS("mbar %d\n", dc
->rd
);
1247 dc
->cpustate_changed
= 1;
1251 LOG_DIS("br%s%s%s%s imm=%x\n",
1252 abs
? "a" : "", link
? "l" : "",
1253 dc
->type_b
? "i" : "", dslot
? "d" : "",
1256 dc
->delayed_branch
= 1;
1258 dc
->delayed_branch
= 2;
1259 dc
->tb_flags
|= D_FLAG
;
1260 tcg_gen_st_i32(tcg_const_i32(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1261 cpu_env
, offsetof(CPUMBState
, bimm
));
1264 tcg_gen_movi_i32(cpu_R
[dc
->rd
], dc
->pc
);
1266 dc
->jmp
= JMP_INDIRECT
;
1268 tcg_gen_movi_i32(env_btaken
, 1);
1269 tcg_gen_extu_i32_i64(env_btarget
, *(dec_alu_op_b(dc
)));
1270 if (link
&& !dslot
) {
1271 if (!(dc
->tb_flags
& IMM_FLAG
) && (dc
->imm
== 8 || dc
->imm
== 0x18))
1272 t_gen_raise_exception(dc
, EXCP_BREAK
);
1274 if (trap_userspace(dc
, true)) {
1278 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1282 if (dec_alu_op_b_is_small_imm(dc
)) {
1283 dc
->jmp
= JMP_DIRECT
;
1284 dc
->jmp_pc
= dc
->pc
+ (int32_t)((int16_t)dc
->imm
);
1286 tcg_gen_movi_i32(env_btaken
, 1);
1287 tcg_gen_extu_i32_i64(env_btarget
, *(dec_alu_op_b(dc
)));
1288 tcg_gen_addi_i64(env_btarget
, env_btarget
, dc
->pc
);
1289 tcg_gen_andi_i64(env_btarget
, env_btarget
, UINT32_MAX
);
1294 static inline void do_rti(DisasContext
*dc
)
1297 t0
= tcg_temp_new_i32();
1298 t1
= tcg_temp_new_i32();
1299 tcg_gen_extrl_i64_i32(t1
, cpu_SR
[SR_MSR
]);
1300 tcg_gen_shri_i32(t0
, t1
, 1);
1301 tcg_gen_ori_i32(t1
, t1
, MSR_IE
);
1302 tcg_gen_andi_i32(t0
, t0
, (MSR_VM
| MSR_UM
));
1304 tcg_gen_andi_i32(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1305 tcg_gen_or_i32(t1
, t1
, t0
);
1307 tcg_temp_free_i32(t1
);
1308 tcg_temp_free_i32(t0
);
1309 dc
->tb_flags
&= ~DRTI_FLAG
;
1312 static inline void do_rtb(DisasContext
*dc
)
1315 t0
= tcg_temp_new_i32();
1316 t1
= tcg_temp_new_i32();
1317 tcg_gen_extrl_i64_i32(t1
, cpu_SR
[SR_MSR
]);
1318 tcg_gen_andi_i32(t1
, t1
, ~MSR_BIP
);
1319 tcg_gen_shri_i32(t0
, t1
, 1);
1320 tcg_gen_andi_i32(t0
, t0
, (MSR_VM
| MSR_UM
));
1322 tcg_gen_andi_i32(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1323 tcg_gen_or_i32(t1
, t1
, t0
);
1325 tcg_temp_free_i32(t1
);
1326 tcg_temp_free_i32(t0
);
1327 dc
->tb_flags
&= ~DRTB_FLAG
;
1330 static inline void do_rte(DisasContext
*dc
)
1333 t0
= tcg_temp_new_i32();
1334 t1
= tcg_temp_new_i32();
1336 tcg_gen_extrl_i64_i32(t1
, cpu_SR
[SR_MSR
]);
1337 tcg_gen_ori_i32(t1
, t1
, MSR_EE
);
1338 tcg_gen_andi_i32(t1
, t1
, ~MSR_EIP
);
1339 tcg_gen_shri_i32(t0
, t1
, 1);
1340 tcg_gen_andi_i32(t0
, t0
, (MSR_VM
| MSR_UM
));
1342 tcg_gen_andi_i32(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1343 tcg_gen_or_i32(t1
, t1
, t0
);
1345 tcg_temp_free_i32(t1
);
1346 tcg_temp_free_i32(t0
);
1347 dc
->tb_flags
&= ~DRTE_FLAG
;
1350 static void dec_rts(DisasContext
*dc
)
1352 unsigned int b_bit
, i_bit
, e_bit
;
1355 i_bit
= dc
->ir
& (1 << 21);
1356 b_bit
= dc
->ir
& (1 << 22);
1357 e_bit
= dc
->ir
& (1 << 23);
1359 if (trap_userspace(dc
, i_bit
|| b_bit
|| e_bit
)) {
1363 dc
->delayed_branch
= 2;
1364 dc
->tb_flags
|= D_FLAG
;
1365 tcg_gen_st_i32(tcg_const_i32(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1366 cpu_env
, offsetof(CPUMBState
, bimm
));
1369 LOG_DIS("rtid ir=%x\n", dc
->ir
);
1370 dc
->tb_flags
|= DRTI_FLAG
;
1372 LOG_DIS("rtbd ir=%x\n", dc
->ir
);
1373 dc
->tb_flags
|= DRTB_FLAG
;
1375 LOG_DIS("rted ir=%x\n", dc
->ir
);
1376 dc
->tb_flags
|= DRTE_FLAG
;
1378 LOG_DIS("rts ir=%x\n", dc
->ir
);
1380 dc
->jmp
= JMP_INDIRECT
;
1381 tcg_gen_movi_i32(env_btaken
, 1);
1383 tmp64
= tcg_temp_new_i64();
1384 tcg_gen_extu_i32_i64(env_btarget
, *(dec_alu_op_b(dc
)));
1385 tcg_gen_extu_i32_i64(tmp64
, cpu_R
[dc
->ra
]);
1386 tcg_gen_add_i64(env_btarget
, env_btarget
, tmp64
);
1387 tcg_gen_andi_i64(env_btarget
, env_btarget
, UINT32_MAX
);
1388 tcg_temp_free_i64(tmp64
);
1391 static int dec_check_fpuv2(DisasContext
*dc
)
1393 if ((dc
->cpu
->cfg
.use_fpu
!= 2) && (dc
->tb_flags
& MSR_EE_FLAG
)) {
1394 tcg_gen_movi_i64(cpu_SR
[SR_ESR
], ESR_EC_FPU
);
1395 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1397 return (dc
->cpu
->cfg
.use_fpu
== 2) ? 0 : PVR2_USE_FPU2_MASK
;
1400 static void dec_fpu(DisasContext
*dc
)
1402 unsigned int fpu_insn
;
1404 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_fpu
)) {
1408 fpu_insn
= (dc
->ir
>> 7) & 7;
1412 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1417 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1422 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1427 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1432 switch ((dc
->ir
>> 4) & 7) {
1434 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1435 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1438 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1439 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1442 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1443 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1446 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1447 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1450 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1451 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1454 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1455 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1458 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1459 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1462 qemu_log_mask(LOG_UNIMP
,
1463 "unimplemented fcmp fpu_insn=%x pc=%x"
1465 fpu_insn
, dc
->pc
, dc
->opcode
);
1466 dc
->abort_at_next_insn
= 1;
1472 if (!dec_check_fpuv2(dc
)) {
1475 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1479 if (!dec_check_fpuv2(dc
)) {
1482 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1486 if (!dec_check_fpuv2(dc
)) {
1489 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1493 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1495 fpu_insn
, dc
->pc
, dc
->opcode
);
1496 dc
->abort_at_next_insn
= 1;
1501 static void dec_null(DisasContext
*dc
)
1503 if (trap_illegal(dc
, true)) {
1506 qemu_log_mask(LOG_GUEST_ERROR
, "unknown insn pc=%x opc=%x\n", dc
->pc
, dc
->opcode
);
1507 dc
->abort_at_next_insn
= 1;
1510 /* Insns connected to FSL or AXI stream attached devices. */
1511 static void dec_stream(DisasContext
*dc
)
1513 TCGv_i32 t_id
, t_ctrl
;
1516 LOG_DIS("%s%s imm=%x\n", dc
->rd
? "get" : "put",
1517 dc
->type_b
? "" : "d", dc
->imm
);
1519 if (trap_userspace(dc
, true)) {
1523 t_id
= tcg_temp_new_i32();
1525 tcg_gen_movi_i32(t_id
, dc
->imm
& 0xf);
1526 ctrl
= dc
->imm
>> 10;
1528 tcg_gen_andi_i32(t_id
, cpu_R
[dc
->rb
], 0xf);
1529 ctrl
= dc
->imm
>> 5;
1532 t_ctrl
= tcg_const_i32(ctrl
);
1535 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1537 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1539 tcg_temp_free_i32(t_id
);
1540 tcg_temp_free_i32(t_ctrl
);
1543 static struct decoder_info
{
1548 void (*dec
)(DisasContext
*dc
);
1556 {DEC_BARREL
, dec_barrel
},
1558 {DEC_ST
, dec_store
},
1567 {DEC_STREAM
, dec_stream
},
1571 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1576 LOG_DIS("%8.8x\t", dc
->ir
);
1579 trap_illegal(dc
, dc
->cpu
->env
.pvr
.regs
[2] & PVR2_OPCODE_0x0_ILL_MASK
);
1580 /* Don't decode nop/zero instructions any further. */
1584 /* bit 2 seems to indicate insn type. */
1585 dc
->type_b
= ir
& (1 << 29);
1587 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1588 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1589 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1590 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1591 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1593 /* Large switch for all insns. */
1594 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1595 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1602 /* generate intermediate code for basic block 'tb'. */
1603 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
1605 CPUMBState
*env
= cs
->env_ptr
;
1606 MicroBlazeCPU
*cpu
= mb_env_get_cpu(env
);
1608 struct DisasContext ctx
;
1609 struct DisasContext
*dc
= &ctx
;
1610 uint32_t page_start
, org_flags
;
1618 org_flags
= dc
->synced_flags
= dc
->tb_flags
= tb
->flags
;
1620 dc
->is_jmp
= DISAS_NEXT
;
1622 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1623 if (dc
->delayed_branch
) {
1624 dc
->jmp
= JMP_INDIRECT
;
1627 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1628 dc
->cpustate_changed
= 0;
1629 dc
->abort_at_next_insn
= 0;
1632 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n", pc_start
);
1635 page_start
= pc_start
& TARGET_PAGE_MASK
;
1637 max_insns
= tb_cflags(tb
) & CF_COUNT_MASK
;
1638 if (max_insns
== 0) {
1639 max_insns
= CF_COUNT_MASK
;
1641 if (max_insns
> TCG_MAX_INSNS
) {
1642 max_insns
= TCG_MAX_INSNS
;
1648 tcg_gen_insn_start(dc
->pc
);
1652 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1653 tcg_gen_movi_i64(cpu_SR
[SR_PC
], dc
->pc
);
1658 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1659 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1660 dc
->is_jmp
= DISAS_UPDATE
;
1661 /* The address covered by the breakpoint must be included in
1662 [tb->pc, tb->pc + tb->size) in order to for it to be
1663 properly cleared -- thus we increment the PC here so that
1664 the logic setting tb->size below does the right thing. */
1670 LOG_DIS("%8.8x:\t", dc
->pc
);
1672 if (num_insns
== max_insns
&& (tb_cflags(tb
) & CF_LAST_IO
)) {
1677 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1679 dc
->tb_flags
&= ~IMM_FLAG
;
1682 if (dc
->delayed_branch
) {
1683 dc
->delayed_branch
--;
1684 if (!dc
->delayed_branch
) {
1685 if (dc
->tb_flags
& DRTI_FLAG
)
1687 if (dc
->tb_flags
& DRTB_FLAG
)
1689 if (dc
->tb_flags
& DRTE_FLAG
)
1691 /* Clear the delay slot flag. */
1692 dc
->tb_flags
&= ~D_FLAG
;
1693 /* If it is a direct jump, try direct chaining. */
1694 if (dc
->jmp
== JMP_INDIRECT
) {
1695 eval_cond_jmp(dc
, env_btarget
, tcg_const_i64(dc
->pc
));
1696 dc
->is_jmp
= DISAS_JUMP
;
1697 } else if (dc
->jmp
== JMP_DIRECT
) {
1699 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1700 dc
->is_jmp
= DISAS_TB_JUMP
;
1701 } else if (dc
->jmp
== JMP_DIRECT_CC
) {
1702 TCGLabel
*l1
= gen_new_label();
1704 /* Conditional jmp. */
1705 tcg_gen_brcondi_i32(TCG_COND_NE
, env_btaken
, 0, l1
);
1706 gen_goto_tb(dc
, 1, dc
->pc
);
1708 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1710 dc
->is_jmp
= DISAS_TB_JUMP
;
1715 if (cs
->singlestep_enabled
) {
1718 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
1719 && !tcg_op_buf_full()
1721 && (dc
->pc
- page_start
< TARGET_PAGE_SIZE
)
1722 && num_insns
< max_insns
);
1725 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
1726 if (dc
->tb_flags
& D_FLAG
) {
1727 dc
->is_jmp
= DISAS_UPDATE
;
1728 tcg_gen_movi_i64(cpu_SR
[SR_PC
], npc
);
1734 if (tb_cflags(tb
) & CF_LAST_IO
)
1736 /* Force an update if the per-tb cpu state has changed. */
1737 if (dc
->is_jmp
== DISAS_NEXT
1738 && (dc
->cpustate_changed
|| org_flags
!= dc
->tb_flags
)) {
1739 dc
->is_jmp
= DISAS_UPDATE
;
1740 tcg_gen_movi_i64(cpu_SR
[SR_PC
], npc
);
1744 if (unlikely(cs
->singlestep_enabled
)) {
1745 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
1747 if (dc
->is_jmp
!= DISAS_JUMP
) {
1748 tcg_gen_movi_i64(cpu_SR
[SR_PC
], npc
);
1750 gen_helper_raise_exception(cpu_env
, tmp
);
1751 tcg_temp_free_i32(tmp
);
1753 switch(dc
->is_jmp
) {
1755 gen_goto_tb(dc
, 1, npc
);
1760 /* indicate that the hash table must be used
1761 to find the next TB */
1762 tcg_gen_exit_tb(NULL
, 0);
1765 /* nothing more to generate */
1769 gen_tb_end(tb
, num_insns
);
1771 tb
->size
= dc
->pc
- pc_start
;
1772 tb
->icount
= num_insns
;
1776 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1777 && qemu_log_in_addr_range(pc_start
)) {
1779 qemu_log("--------------\n");
1780 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
);
1785 assert(!dc
->abort_at_next_insn
);
1788 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1791 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1792 CPUMBState
*env
= &cpu
->env
;
1798 cpu_fprintf(f
, "IN: PC=%" PRIx64
" %s\n",
1799 env
->sregs
[SR_PC
], lookup_symbol(env
->sregs
[SR_PC
]));
1800 cpu_fprintf(f
, "rmsr=%" PRIx64
" resr=%" PRIx64
" rear=%" PRIx64
" "
1801 "debug=%x imm=%x iflags=%x fsr=%" PRIx64
"\n",
1802 env
->sregs
[SR_MSR
], env
->sregs
[SR_ESR
], env
->sregs
[SR_EAR
],
1803 env
->debug
, env
->imm
, env
->iflags
, env
->sregs
[SR_FSR
]);
1804 cpu_fprintf(f
, "btaken=%d btarget=%" PRIx64
" mode=%s(saved=%s) "
1806 env
->btaken
, env
->btarget
,
1807 (env
->sregs
[SR_MSR
] & MSR_UM
) ? "user" : "kernel",
1808 (env
->sregs
[SR_MSR
] & MSR_UMS
) ? "user" : "kernel",
1809 (bool)(env
->sregs
[SR_MSR
] & MSR_EIP
),
1810 (bool)(env
->sregs
[SR_MSR
] & MSR_IE
));
1812 for (i
= 0; i
< 32; i
++) {
1813 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1814 if ((i
+ 1) % 4 == 0)
1815 cpu_fprintf(f
, "\n");
1817 cpu_fprintf(f
, "\n\n");
1820 void mb_tcg_init(void)
1824 env_debug
= tcg_global_mem_new_i32(cpu_env
,
1825 offsetof(CPUMBState
, debug
),
1827 env_iflags
= tcg_global_mem_new_i32(cpu_env
,
1828 offsetof(CPUMBState
, iflags
),
1830 env_imm
= tcg_global_mem_new_i32(cpu_env
,
1831 offsetof(CPUMBState
, imm
),
1833 env_btarget
= tcg_global_mem_new_i64(cpu_env
,
1834 offsetof(CPUMBState
, btarget
),
1836 env_btaken
= tcg_global_mem_new_i32(cpu_env
,
1837 offsetof(CPUMBState
, btaken
),
1839 env_res_addr
= tcg_global_mem_new(cpu_env
,
1840 offsetof(CPUMBState
, res_addr
),
1842 env_res_val
= tcg_global_mem_new_i32(cpu_env
,
1843 offsetof(CPUMBState
, res_val
),
1845 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1846 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
1847 offsetof(CPUMBState
, regs
[i
]),
1850 for (i
= 0; i
< ARRAY_SIZE(cpu_SR
); i
++) {
1851 cpu_SR
[i
] = tcg_global_mem_new_i64(cpu_env
,
1852 offsetof(CPUMBState
, sregs
[i
]),
1853 special_regnames
[i
]);
1857 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
,
1860 env
->sregs
[SR_PC
] = data
[0];