2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/helper-proto.h"
25 #include "microblaze-decode.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-gen.h"
29 #include "trace-tcg.h"
35 #if DISAS_MB && !SIM_COMPAT
36 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
38 # define LOG_DIS(...) do { } while (0)
43 #define EXTRACT_FIELD(src, start, end) \
44 (((src) >> start) & ((1 << (end - start + 1)) - 1))
46 static TCGv env_debug
;
47 static TCGv_ptr cpu_env
;
48 static TCGv cpu_R
[32];
49 static TCGv cpu_SR
[18];
51 static TCGv env_btaken
;
52 static TCGv env_btarget
;
53 static TCGv env_iflags
;
54 static TCGv env_res_addr
;
55 static TCGv env_res_val
;
57 #include "exec/gen-icount.h"
59 /* This is the state at translation time. */
60 typedef struct DisasContext
{
71 unsigned int cpustate_changed
;
72 unsigned int delayed_branch
;
73 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
74 unsigned int clear_imm
;
79 #define JMP_DIRECT_CC 2
80 #define JMP_INDIRECT 3
84 int abort_at_next_insn
;
86 struct TranslationBlock
*tb
;
87 int singlestep_enabled
;
90 static const char *regnames
[] =
92 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
93 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
94 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
95 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
98 static const char *special_regnames
[] =
100 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
101 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
102 "sr16", "sr17", "sr18"
105 static inline void t_sync_flags(DisasContext
*dc
)
107 /* Synch the tb dependent flags between translator and runtime. */
108 if (dc
->tb_flags
!= dc
->synced_flags
) {
109 tcg_gen_movi_tl(env_iflags
, dc
->tb_flags
);
110 dc
->synced_flags
= dc
->tb_flags
;
114 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
116 TCGv_i32 tmp
= tcg_const_i32(index
);
119 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
120 gen_helper_raise_exception(cpu_env
, tmp
);
121 tcg_temp_free_i32(tmp
);
122 dc
->is_jmp
= DISAS_UPDATE
;
125 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
127 TranslationBlock
*tb
;
129 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
131 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
132 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
134 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
139 static void read_carry(DisasContext
*dc
, TCGv d
)
141 tcg_gen_shri_tl(d
, cpu_SR
[SR_MSR
], 31);
145 * write_carry sets the carry bits in MSR based on bit 0 of v.
146 * v[31:1] are ignored.
148 static void write_carry(DisasContext
*dc
, TCGv v
)
150 TCGv t0
= tcg_temp_new();
151 tcg_gen_shli_tl(t0
, v
, 31);
152 tcg_gen_sari_tl(t0
, t0
, 31);
153 tcg_gen_andi_tl(t0
, t0
, (MSR_C
| MSR_CC
));
154 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
],
156 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
);
160 static void write_carryi(DisasContext
*dc
, bool carry
)
162 TCGv t0
= tcg_temp_new();
163 tcg_gen_movi_tl(t0
, carry
);
168 /* True if ALU operand b is a small immediate that may deserve
170 static inline int dec_alu_op_b_is_small_imm(DisasContext
*dc
)
172 /* Immediate insn without the imm prefix ? */
173 return dc
->type_b
&& !(dc
->tb_flags
& IMM_FLAG
);
176 static inline TCGv
*dec_alu_op_b(DisasContext
*dc
)
179 if (dc
->tb_flags
& IMM_FLAG
)
180 tcg_gen_ori_tl(env_imm
, env_imm
, dc
->imm
);
182 tcg_gen_movi_tl(env_imm
, (int32_t)((int16_t)dc
->imm
));
185 return &cpu_R
[dc
->rb
];
188 static void dec_add(DisasContext
*dc
)
196 LOG_DIS("add%s%s%s r%d r%d r%d\n",
197 dc
->type_b
? "i" : "", k
? "k" : "", c
? "c" : "",
198 dc
->rd
, dc
->ra
, dc
->rb
);
200 /* Take care of the easy cases first. */
202 /* k - keep carry, no need to update MSR. */
203 /* If rd == r0, it's a nop. */
205 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
208 /* c - Add carry into the result. */
212 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
219 /* From now on, we can assume k is zero. So we need to update MSR. */
225 tcg_gen_movi_tl(cf
, 0);
229 TCGv ncf
= tcg_temp_new();
230 gen_helper_carry(ncf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
231 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
232 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
233 write_carry(dc
, ncf
);
236 gen_helper_carry(cf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
242 static void dec_sub(DisasContext
*dc
)
244 unsigned int u
, cmp
, k
, c
;
250 cmp
= (dc
->imm
& 1) && (!dc
->type_b
) && k
;
253 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u
? "u" : "", dc
->rd
, dc
->ra
, dc
->ir
);
256 gen_helper_cmpu(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
258 gen_helper_cmp(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
263 LOG_DIS("sub%s%s r%d, r%d r%d\n",
264 k
? "k" : "", c
? "c" : "", dc
->rd
, dc
->ra
, dc
->rb
);
266 /* Take care of the easy cases first. */
268 /* k - keep carry, no need to update MSR. */
269 /* If rd == r0, it's a nop. */
271 tcg_gen_sub_tl(cpu_R
[dc
->rd
], *(dec_alu_op_b(dc
)), cpu_R
[dc
->ra
]);
274 /* c - Add carry into the result. */
278 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
285 /* From now on, we can assume k is zero. So we need to update MSR. */
286 /* Extract carry. And complement a into na. */
292 tcg_gen_movi_tl(cf
, 1);
295 /* d = b + ~a + c. carry defaults to 1. */
296 tcg_gen_not_tl(na
, cpu_R
[dc
->ra
]);
299 TCGv ncf
= tcg_temp_new();
300 gen_helper_carry(ncf
, na
, *(dec_alu_op_b(dc
)), cf
);
301 tcg_gen_add_tl(cpu_R
[dc
->rd
], na
, *(dec_alu_op_b(dc
)));
302 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
303 write_carry(dc
, ncf
);
306 gen_helper_carry(cf
, na
, *(dec_alu_op_b(dc
)), cf
);
313 static void dec_pattern(DisasContext
*dc
)
318 if ((dc
->tb_flags
& MSR_EE_FLAG
)
319 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
320 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
321 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
322 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
325 mode
= dc
->opcode
& 3;
329 LOG_DIS("pcmpbf r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
331 gen_helper_pcmpbf(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
334 LOG_DIS("pcmpeq r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
336 TCGv t0
= tcg_temp_local_new();
337 l1
= gen_new_label();
338 tcg_gen_movi_tl(t0
, 1);
339 tcg_gen_brcond_tl(TCG_COND_EQ
,
340 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
], l1
);
341 tcg_gen_movi_tl(t0
, 0);
343 tcg_gen_mov_tl(cpu_R
[dc
->rd
], t0
);
348 LOG_DIS("pcmpne r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
349 l1
= gen_new_label();
351 TCGv t0
= tcg_temp_local_new();
352 tcg_gen_movi_tl(t0
, 1);
353 tcg_gen_brcond_tl(TCG_COND_NE
,
354 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
], l1
);
355 tcg_gen_movi_tl(t0
, 0);
357 tcg_gen_mov_tl(cpu_R
[dc
->rd
], t0
);
362 cpu_abort(CPU(dc
->cpu
),
363 "unsupported pattern insn opcode=%x\n", dc
->opcode
);
368 static void dec_and(DisasContext
*dc
)
372 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
377 not = dc
->opcode
& (1 << 1);
378 LOG_DIS("and%s\n", not ? "n" : "");
384 tcg_gen_andc_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
386 tcg_gen_and_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
389 static void dec_or(DisasContext
*dc
)
391 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
396 LOG_DIS("or r%d r%d r%d imm=%x\n", dc
->rd
, dc
->ra
, dc
->rb
, dc
->imm
);
398 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
401 static void dec_xor(DisasContext
*dc
)
403 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
408 LOG_DIS("xor r%d\n", dc
->rd
);
410 tcg_gen_xor_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
413 static inline void msr_read(DisasContext
*dc
, TCGv d
)
415 tcg_gen_mov_tl(d
, cpu_SR
[SR_MSR
]);
418 static inline void msr_write(DisasContext
*dc
, TCGv v
)
423 dc
->cpustate_changed
= 1;
424 /* PVR bit is not writable. */
425 tcg_gen_andi_tl(t
, v
, ~MSR_PVR
);
426 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], MSR_PVR
);
427 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], v
);
431 static void dec_msr(DisasContext
*dc
)
433 CPUState
*cs
= CPU(dc
->cpu
);
435 unsigned int sr
, to
, rn
;
436 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
438 sr
= dc
->imm
& ((1 << 14) - 1);
439 to
= dc
->imm
& (1 << 14);
442 dc
->cpustate_changed
= 1;
444 /* msrclr and msrset. */
445 if (!(dc
->imm
& (1 << 15))) {
446 unsigned int clr
= dc
->ir
& (1 << 16);
448 LOG_DIS("msr%s r%d imm=%x\n", clr
? "clr" : "set",
451 if (!(dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MSR_INSTR
)) {
456 if ((dc
->tb_flags
& MSR_EE_FLAG
)
457 && mem_index
== MMU_USER_IDX
&& (dc
->imm
!= 4 && dc
->imm
!= 0)) {
458 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
459 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
464 msr_read(dc
, cpu_R
[dc
->rd
]);
469 tcg_gen_mov_tl(t1
, *(dec_alu_op_b(dc
)));
472 tcg_gen_not_tl(t1
, t1
);
473 tcg_gen_and_tl(t0
, t0
, t1
);
475 tcg_gen_or_tl(t0
, t0
, t1
);
479 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
480 dc
->is_jmp
= DISAS_UPDATE
;
485 if ((dc
->tb_flags
& MSR_EE_FLAG
)
486 && mem_index
== MMU_USER_IDX
) {
487 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
488 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
493 #if !defined(CONFIG_USER_ONLY)
494 /* Catch read/writes to the mmu block. */
495 if ((sr
& ~0xff) == 0x1000) {
497 LOG_DIS("m%ss sr%d r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
499 gen_helper_mmu_write(cpu_env
, tcg_const_tl(sr
), cpu_R
[dc
->ra
]);
501 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tcg_const_tl(sr
));
507 LOG_DIS("m%ss sr%x r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
512 msr_write(dc
, cpu_R
[dc
->ra
]);
515 tcg_gen_mov_tl(cpu_SR
[SR_EAR
], cpu_R
[dc
->ra
]);
518 tcg_gen_mov_tl(cpu_SR
[SR_ESR
], cpu_R
[dc
->ra
]);
521 tcg_gen_andi_tl(cpu_SR
[SR_FSR
], cpu_R
[dc
->ra
], 31);
524 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, slr
));
527 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, shr
));
530 cpu_abort(CPU(dc
->cpu
), "unknown mts reg %x\n", sr
);
534 LOG_DIS("m%ss r%d sr%x imm=%x\n", to
? "t" : "f", dc
->rd
, sr
, dc
->imm
);
538 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
541 msr_read(dc
, cpu_R
[dc
->rd
]);
544 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_EAR
]);
547 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_ESR
]);
550 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_FSR
]);
553 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_BTR
]);
556 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, slr
));
559 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, shr
));
575 tcg_gen_ld_tl(cpu_R
[dc
->rd
],
576 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
579 cpu_abort(cs
, "unknown mfs reg %x\n", sr
);
585 tcg_gen_movi_tl(cpu_R
[0], 0);
589 /* 64-bit signed mul, lower result in d and upper in d2. */
590 static void t_gen_muls(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
594 t0
= tcg_temp_new_i64();
595 t1
= tcg_temp_new_i64();
597 tcg_gen_ext_i32_i64(t0
, a
);
598 tcg_gen_ext_i32_i64(t1
, b
);
599 tcg_gen_mul_i64(t0
, t0
, t1
);
601 tcg_gen_trunc_i64_i32(d
, t0
);
602 tcg_gen_shri_i64(t0
, t0
, 32);
603 tcg_gen_trunc_i64_i32(d2
, t0
);
605 tcg_temp_free_i64(t0
);
606 tcg_temp_free_i64(t1
);
609 /* 64-bit unsigned muls, lower result in d and upper in d2. */
610 static void t_gen_mulu(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
614 t0
= tcg_temp_new_i64();
615 t1
= tcg_temp_new_i64();
617 tcg_gen_extu_i32_i64(t0
, a
);
618 tcg_gen_extu_i32_i64(t1
, b
);
619 tcg_gen_mul_i64(t0
, t0
, t1
);
621 tcg_gen_trunc_i64_i32(d
, t0
);
622 tcg_gen_shri_i64(t0
, t0
, 32);
623 tcg_gen_trunc_i64_i32(d2
, t0
);
625 tcg_temp_free_i64(t0
);
626 tcg_temp_free_i64(t1
);
629 /* Multiplier unit. */
630 static void dec_mul(DisasContext
*dc
)
633 unsigned int subcode
;
635 if ((dc
->tb_flags
& MSR_EE_FLAG
)
636 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
637 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_HW_MUL_MASK
)) {
638 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
639 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
643 subcode
= dc
->imm
& 3;
644 d
[0] = tcg_temp_new();
645 d
[1] = tcg_temp_new();
648 LOG_DIS("muli r%d r%d %x\n", dc
->rd
, dc
->ra
, dc
->imm
);
649 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
653 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
654 if (subcode
>= 1 && subcode
<= 3
655 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MUL64_MASK
))) {
661 LOG_DIS("mul r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
662 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
665 LOG_DIS("mulh r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
666 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
669 LOG_DIS("mulhsu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
670 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
673 LOG_DIS("mulhu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
674 t_gen_mulu(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
677 cpu_abort(CPU(dc
->cpu
), "unknown MUL insn %x\n", subcode
);
686 static void dec_div(DisasContext
*dc
)
693 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
694 && !((dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_DIV_MASK
))) {
695 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
696 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
700 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
703 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
706 tcg_gen_movi_tl(cpu_R
[dc
->rd
], 0);
709 static void dec_barrel(DisasContext
*dc
)
714 if ((dc
->tb_flags
& MSR_EE_FLAG
)
715 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
716 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_BARREL_MASK
)) {
717 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
718 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
722 s
= dc
->imm
& (1 << 10);
723 t
= dc
->imm
& (1 << 9);
725 LOG_DIS("bs%s%s r%d r%d r%d\n",
726 s
? "l" : "r", t
? "a" : "l", dc
->rd
, dc
->ra
, dc
->rb
);
730 tcg_gen_mov_tl(t0
, *(dec_alu_op_b(dc
)));
731 tcg_gen_andi_tl(t0
, t0
, 31);
734 tcg_gen_shl_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
737 tcg_gen_sar_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
739 tcg_gen_shr_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
743 static void dec_bit(DisasContext
*dc
)
745 CPUState
*cs
= CPU(dc
->cpu
);
748 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
750 op
= dc
->ir
& ((1 << 9) - 1);
756 LOG_DIS("src r%d r%d\n", dc
->rd
, dc
->ra
);
757 tcg_gen_andi_tl(t0
, cpu_SR
[SR_MSR
], MSR_CC
);
758 write_carry(dc
, cpu_R
[dc
->ra
]);
760 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
761 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
769 LOG_DIS("srl r%d r%d\n", dc
->rd
, dc
->ra
);
771 /* Update carry. Note that write carry only looks at the LSB. */
772 write_carry(dc
, cpu_R
[dc
->ra
]);
775 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
777 tcg_gen_sari_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
781 LOG_DIS("ext8s r%d r%d\n", dc
->rd
, dc
->ra
);
782 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
785 LOG_DIS("ext16s r%d r%d\n", dc
->rd
, dc
->ra
);
786 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
793 LOG_DIS("wdc r%d\n", dc
->ra
);
794 if ((dc
->tb_flags
& MSR_EE_FLAG
)
795 && mem_index
== MMU_USER_IDX
) {
796 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
797 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
803 LOG_DIS("wic r%d\n", dc
->ra
);
804 if ((dc
->tb_flags
& MSR_EE_FLAG
)
805 && mem_index
== MMU_USER_IDX
) {
806 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
807 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
812 if ((dc
->tb_flags
& MSR_EE_FLAG
)
813 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
814 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
815 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
816 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
818 if (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
) {
819 gen_helper_clz(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
824 LOG_DIS("swapb r%d r%d\n", dc
->rd
, dc
->ra
);
825 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
829 LOG_DIS("swaph r%d r%d\n", dc
->rd
, dc
->ra
);
830 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
833 cpu_abort(cs
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
834 dc
->pc
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
839 static inline void sync_jmpstate(DisasContext
*dc
)
841 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
842 if (dc
->jmp
== JMP_DIRECT
) {
843 tcg_gen_movi_tl(env_btaken
, 1);
845 dc
->jmp
= JMP_INDIRECT
;
846 tcg_gen_movi_tl(env_btarget
, dc
->jmp_pc
);
850 static void dec_imm(DisasContext
*dc
)
852 LOG_DIS("imm %x\n", dc
->imm
<< 16);
853 tcg_gen_movi_tl(env_imm
, (dc
->imm
<< 16));
854 dc
->tb_flags
|= IMM_FLAG
;
858 static inline TCGv
*compute_ldst_addr(DisasContext
*dc
, TCGv
*t
)
860 unsigned int extimm
= dc
->tb_flags
& IMM_FLAG
;
861 /* Should be set to one if r1 is used by loadstores. */
864 /* All load/stores use ra. */
865 if (dc
->ra
== 1 && dc
->cpu
->cfg
.stackprot
) {
869 /* Treat the common cases first. */
871 /* If any of the regs is r0, return a ptr to the other. */
873 return &cpu_R
[dc
->rb
];
874 } else if (dc
->rb
== 0) {
875 return &cpu_R
[dc
->ra
];
878 if (dc
->rb
== 1 && dc
->cpu
->cfg
.stackprot
) {
883 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
886 gen_helper_stackprot(cpu_env
, *t
);
893 return &cpu_R
[dc
->ra
];
896 tcg_gen_movi_tl(*t
, (int32_t)((int16_t)dc
->imm
));
897 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *t
);
900 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
904 gen_helper_stackprot(cpu_env
, *t
);
909 static void dec_load(DisasContext
*dc
)
912 unsigned int size
, rev
= 0, ex
= 0;
915 mop
= dc
->opcode
& 3;
918 rev
= (dc
->ir
>> 9) & 1;
919 ex
= (dc
->ir
>> 10) & 1;
926 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
927 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
928 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
929 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
933 LOG_DIS("l%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
937 addr
= compute_ldst_addr(dc
, &t
);
940 * When doing reverse accesses we need to do two things.
942 * 1. Reverse the address wrt endianness.
943 * 2. Byteswap the data lanes on the way back into the CPU core.
945 if (rev
&& size
!= 4) {
946 /* Endian reverse the address. t is addr. */
954 TCGv low
= tcg_temp_new();
956 /* Force addr into the temp. */
959 tcg_gen_mov_tl(t
, *addr
);
963 tcg_gen_andi_tl(low
, t
, 3);
964 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
965 tcg_gen_andi_tl(t
, t
, ~3);
966 tcg_gen_or_tl(t
, t
, low
);
967 tcg_gen_mov_tl(env_imm
, t
);
975 /* Force addr into the temp. */
978 tcg_gen_xori_tl(t
, *addr
, 2);
981 tcg_gen_xori_tl(t
, t
, 2);
985 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
990 /* lwx does not throw unaligned access errors, so force alignment */
992 /* Force addr into the temp. */
995 tcg_gen_mov_tl(t
, *addr
);
998 tcg_gen_andi_tl(t
, t
, ~3);
1001 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1004 /* Verify alignment if needed. */
1006 * Microblaze gives MMU faults priority over faults due to
1007 * unaligned addresses. That's why we speculatively do the load
1008 * into v. If the load succeeds, we verify alignment of the
1009 * address and if that succeeds we write into the destination reg.
1012 tcg_gen_qemu_ld_tl(v
, *addr
, cpu_mmu_index(&dc
->cpu
->env
), mop
);
1014 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1015 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1016 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1017 tcg_const_tl(0), tcg_const_tl(size
- 1));
1021 tcg_gen_mov_tl(env_res_addr
, *addr
);
1022 tcg_gen_mov_tl(env_res_val
, v
);
1025 tcg_gen_mov_tl(cpu_R
[dc
->rd
], v
);
1030 /* no support for for AXI exclusive so always clear C */
1031 write_carryi(dc
, 0);
1038 static void dec_store(DisasContext
*dc
)
1040 TCGv t
, *addr
, swx_addr
;
1041 TCGLabel
*swx_skip
= NULL
;
1042 unsigned int size
, rev
= 0, ex
= 0;
1045 mop
= dc
->opcode
& 3;
1048 rev
= (dc
->ir
>> 9) & 1;
1049 ex
= (dc
->ir
>> 10) & 1;
1056 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
1057 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1058 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1059 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1063 LOG_DIS("s%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
1066 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1068 addr
= compute_ldst_addr(dc
, &t
);
1070 swx_addr
= tcg_temp_local_new();
1074 /* Force addr into the swx_addr. */
1075 tcg_gen_mov_tl(swx_addr
, *addr
);
1077 /* swx does not throw unaligned access errors, so force alignment */
1078 tcg_gen_andi_tl(swx_addr
, swx_addr
, ~3);
1080 write_carryi(dc
, 1);
1081 swx_skip
= gen_new_label();
1082 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_addr
, swx_addr
, swx_skip
);
1084 /* Compare the value loaded at lwx with current contents of
1085 the reserved location.
1086 FIXME: This only works for system emulation where we can expect
1087 this compare and the following write to be atomic. For user
1088 emulation we need to add atomicity between threads. */
1089 tval
= tcg_temp_new();
1090 tcg_gen_qemu_ld_tl(tval
, swx_addr
, cpu_mmu_index(&dc
->cpu
->env
),
1092 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_val
, tval
, swx_skip
);
1093 write_carryi(dc
, 0);
1094 tcg_temp_free(tval
);
1097 if (rev
&& size
!= 4) {
1098 /* Endian reverse the address. t is addr. */
1106 TCGv low
= tcg_temp_new();
1108 /* Force addr into the temp. */
1111 tcg_gen_mov_tl(t
, *addr
);
1115 tcg_gen_andi_tl(low
, t
, 3);
1116 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
1117 tcg_gen_andi_tl(t
, t
, ~3);
1118 tcg_gen_or_tl(t
, t
, low
);
1119 tcg_gen_mov_tl(env_imm
, t
);
1127 /* Force addr into the temp. */
1130 tcg_gen_xori_tl(t
, *addr
, 2);
1133 tcg_gen_xori_tl(t
, t
, 2);
1137 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1141 tcg_gen_qemu_st_tl(cpu_R
[dc
->rd
], *addr
, cpu_mmu_index(&dc
->cpu
->env
), mop
);
1143 /* Verify alignment if needed. */
1144 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1145 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1146 /* FIXME: if the alignment is wrong, we should restore the value
1147 * in memory. One possible way to achieve this is to probe
1148 * the MMU prior to the memaccess, thay way we could put
1149 * the alignment checks in between the probe and the mem
1152 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1153 tcg_const_tl(1), tcg_const_tl(size
- 1));
1157 gen_set_label(swx_skip
);
1159 tcg_temp_free(swx_addr
);
1165 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1166 TCGv d
, TCGv a
, TCGv b
)
1170 tcg_gen_setcond_tl(TCG_COND_EQ
, d
, a
, b
);
1173 tcg_gen_setcond_tl(TCG_COND_NE
, d
, a
, b
);
1176 tcg_gen_setcond_tl(TCG_COND_LT
, d
, a
, b
);
1179 tcg_gen_setcond_tl(TCG_COND_LE
, d
, a
, b
);
1182 tcg_gen_setcond_tl(TCG_COND_GE
, d
, a
, b
);
1185 tcg_gen_setcond_tl(TCG_COND_GT
, d
, a
, b
);
1188 cpu_abort(CPU(dc
->cpu
), "Unknown condition code %x.\n", cc
);
1193 static void eval_cond_jmp(DisasContext
*dc
, TCGv pc_true
, TCGv pc_false
)
1195 TCGLabel
*l1
= gen_new_label();
1196 /* Conditional jmp. */
1197 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_false
);
1198 tcg_gen_brcondi_tl(TCG_COND_EQ
, env_btaken
, 0, l1
);
1199 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_true
);
1203 static void dec_bcc(DisasContext
*dc
)
1208 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1209 dslot
= dc
->ir
& (1 << 25);
1210 LOG_DIS("bcc%s r%d %x\n", dslot
? "d" : "", dc
->ra
, dc
->imm
);
1212 dc
->delayed_branch
= 1;
1214 dc
->delayed_branch
= 2;
1215 dc
->tb_flags
|= D_FLAG
;
1216 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1217 cpu_env
, offsetof(CPUMBState
, bimm
));
1220 if (dec_alu_op_b_is_small_imm(dc
)) {
1221 int32_t offset
= (int32_t)((int16_t)dc
->imm
); /* sign-extend. */
1223 tcg_gen_movi_tl(env_btarget
, dc
->pc
+ offset
);
1224 dc
->jmp
= JMP_DIRECT_CC
;
1225 dc
->jmp_pc
= dc
->pc
+ offset
;
1227 dc
->jmp
= JMP_INDIRECT
;
1228 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1229 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1231 eval_cc(dc
, cc
, env_btaken
, cpu_R
[dc
->ra
], tcg_const_tl(0));
1234 static void dec_br(DisasContext
*dc
)
1236 unsigned int dslot
, link
, abs
, mbar
;
1237 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
1239 dslot
= dc
->ir
& (1 << 20);
1240 abs
= dc
->ir
& (1 << 19);
1241 link
= dc
->ir
& (1 << 18);
1243 /* Memory barrier. */
1244 mbar
= (dc
->ir
>> 16) & 31;
1245 if (mbar
== 2 && dc
->imm
== 4) {
1246 /* mbar IMM & 16 decodes to sleep. */
1248 TCGv_i32 tmp_hlt
= tcg_const_i32(EXCP_HLT
);
1249 TCGv_i32 tmp_1
= tcg_const_i32(1);
1254 tcg_gen_st_i32(tmp_1
, cpu_env
,
1255 -offsetof(MicroBlazeCPU
, env
)
1256 +offsetof(CPUState
, halted
));
1257 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
1258 gen_helper_raise_exception(cpu_env
, tmp_hlt
);
1259 tcg_temp_free_i32(tmp_hlt
);
1260 tcg_temp_free_i32(tmp_1
);
1263 LOG_DIS("mbar %d\n", dc
->rd
);
1265 dc
->cpustate_changed
= 1;
1269 LOG_DIS("br%s%s%s%s imm=%x\n",
1270 abs
? "a" : "", link
? "l" : "",
1271 dc
->type_b
? "i" : "", dslot
? "d" : "",
1274 dc
->delayed_branch
= 1;
1276 dc
->delayed_branch
= 2;
1277 dc
->tb_flags
|= D_FLAG
;
1278 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1279 cpu_env
, offsetof(CPUMBState
, bimm
));
1282 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
1284 dc
->jmp
= JMP_INDIRECT
;
1286 tcg_gen_movi_tl(env_btaken
, 1);
1287 tcg_gen_mov_tl(env_btarget
, *(dec_alu_op_b(dc
)));
1288 if (link
&& !dslot
) {
1289 if (!(dc
->tb_flags
& IMM_FLAG
) && (dc
->imm
== 8 || dc
->imm
== 0x18))
1290 t_gen_raise_exception(dc
, EXCP_BREAK
);
1292 if ((dc
->tb_flags
& MSR_EE_FLAG
) && mem_index
== MMU_USER_IDX
) {
1293 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1294 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1298 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1302 if (dec_alu_op_b_is_small_imm(dc
)) {
1303 dc
->jmp
= JMP_DIRECT
;
1304 dc
->jmp_pc
= dc
->pc
+ (int32_t)((int16_t)dc
->imm
);
1306 tcg_gen_movi_tl(env_btaken
, 1);
1307 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1308 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1313 static inline void do_rti(DisasContext
*dc
)
1316 t0
= tcg_temp_new();
1317 t1
= tcg_temp_new();
1318 tcg_gen_shri_tl(t0
, cpu_SR
[SR_MSR
], 1);
1319 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_IE
);
1320 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1322 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1323 tcg_gen_or_tl(t1
, t1
, t0
);
1327 dc
->tb_flags
&= ~DRTI_FLAG
;
1330 static inline void do_rtb(DisasContext
*dc
)
1333 t0
= tcg_temp_new();
1334 t1
= tcg_temp_new();
1335 tcg_gen_andi_tl(t1
, cpu_SR
[SR_MSR
], ~MSR_BIP
);
1336 tcg_gen_shri_tl(t0
, t1
, 1);
1337 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1339 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1340 tcg_gen_or_tl(t1
, t1
, t0
);
1344 dc
->tb_flags
&= ~DRTB_FLAG
;
1347 static inline void do_rte(DisasContext
*dc
)
1350 t0
= tcg_temp_new();
1351 t1
= tcg_temp_new();
1353 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_EE
);
1354 tcg_gen_andi_tl(t1
, t1
, ~MSR_EIP
);
1355 tcg_gen_shri_tl(t0
, t1
, 1);
1356 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1358 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1359 tcg_gen_or_tl(t1
, t1
, t0
);
1363 dc
->tb_flags
&= ~DRTE_FLAG
;
1366 static void dec_rts(DisasContext
*dc
)
1368 unsigned int b_bit
, i_bit
, e_bit
;
1369 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
1371 i_bit
= dc
->ir
& (1 << 21);
1372 b_bit
= dc
->ir
& (1 << 22);
1373 e_bit
= dc
->ir
& (1 << 23);
1375 dc
->delayed_branch
= 2;
1376 dc
->tb_flags
|= D_FLAG
;
1377 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1378 cpu_env
, offsetof(CPUMBState
, bimm
));
1381 LOG_DIS("rtid ir=%x\n", dc
->ir
);
1382 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1383 && mem_index
== MMU_USER_IDX
) {
1384 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1385 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1387 dc
->tb_flags
|= DRTI_FLAG
;
1389 LOG_DIS("rtbd ir=%x\n", dc
->ir
);
1390 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1391 && mem_index
== MMU_USER_IDX
) {
1392 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1393 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1395 dc
->tb_flags
|= DRTB_FLAG
;
1397 LOG_DIS("rted ir=%x\n", dc
->ir
);
1398 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1399 && mem_index
== MMU_USER_IDX
) {
1400 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1401 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1403 dc
->tb_flags
|= DRTE_FLAG
;
1405 LOG_DIS("rts ir=%x\n", dc
->ir
);
1407 dc
->jmp
= JMP_INDIRECT
;
1408 tcg_gen_movi_tl(env_btaken
, 1);
1409 tcg_gen_add_tl(env_btarget
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
1412 static int dec_check_fpuv2(DisasContext
*dc
)
1414 if ((dc
->cpu
->cfg
.use_fpu
!= 2) && (dc
->tb_flags
& MSR_EE_FLAG
)) {
1415 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_FPU
);
1416 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1418 return (dc
->cpu
->cfg
.use_fpu
== 2) ? 0 : PVR2_USE_FPU2_MASK
;
1421 static void dec_fpu(DisasContext
*dc
)
1423 unsigned int fpu_insn
;
1425 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1426 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1427 && (dc
->cpu
->cfg
.use_fpu
!= 1)) {
1428 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1429 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1433 fpu_insn
= (dc
->ir
>> 7) & 7;
1437 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1442 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1447 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1452 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1457 switch ((dc
->ir
>> 4) & 7) {
1459 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1460 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1463 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1464 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1467 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1468 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1471 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1472 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1475 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1476 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1479 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1480 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1483 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1484 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1487 qemu_log_mask(LOG_UNIMP
,
1488 "unimplemented fcmp fpu_insn=%x pc=%x"
1490 fpu_insn
, dc
->pc
, dc
->opcode
);
1491 dc
->abort_at_next_insn
= 1;
1497 if (!dec_check_fpuv2(dc
)) {
1500 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1504 if (!dec_check_fpuv2(dc
)) {
1507 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1511 if (!dec_check_fpuv2(dc
)) {
1514 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1518 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1520 fpu_insn
, dc
->pc
, dc
->opcode
);
1521 dc
->abort_at_next_insn
= 1;
1526 static void dec_null(DisasContext
*dc
)
1528 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1529 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1530 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1531 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1534 qemu_log ("unknown insn pc=%x opc=%x\n", dc
->pc
, dc
->opcode
);
1535 dc
->abort_at_next_insn
= 1;
1538 /* Insns connected to FSL or AXI stream attached devices. */
1539 static void dec_stream(DisasContext
*dc
)
1541 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
1542 TCGv_i32 t_id
, t_ctrl
;
1545 LOG_DIS("%s%s imm=%x\n", dc
->rd
? "get" : "put",
1546 dc
->type_b
? "" : "d", dc
->imm
);
1548 if ((dc
->tb_flags
& MSR_EE_FLAG
) && (mem_index
== MMU_USER_IDX
)) {
1549 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1550 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1554 t_id
= tcg_temp_new();
1556 tcg_gen_movi_tl(t_id
, dc
->imm
& 0xf);
1557 ctrl
= dc
->imm
>> 10;
1559 tcg_gen_andi_tl(t_id
, cpu_R
[dc
->rb
], 0xf);
1560 ctrl
= dc
->imm
>> 5;
1563 t_ctrl
= tcg_const_tl(ctrl
);
1566 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1568 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1570 tcg_temp_free(t_id
);
1571 tcg_temp_free(t_ctrl
);
1574 static struct decoder_info
{
1579 void (*dec
)(DisasContext
*dc
);
1587 {DEC_BARREL
, dec_barrel
},
1589 {DEC_ST
, dec_store
},
1598 {DEC_STREAM
, dec_stream
},
1602 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1606 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1607 tcg_gen_debug_insn_start(dc
->pc
);
1611 LOG_DIS("%8.8x\t", dc
->ir
);
1616 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1617 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1618 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_OPCODE_0x0_ILL_MASK
)) {
1619 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1620 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1624 LOG_DIS("nr_nops=%d\t", dc
->nr_nops
);
1626 if (dc
->nr_nops
> 4) {
1627 cpu_abort(CPU(dc
->cpu
), "fetching nop sequence\n");
1630 /* bit 2 seems to indicate insn type. */
1631 dc
->type_b
= ir
& (1 << 29);
1633 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1634 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1635 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1636 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1637 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1639 /* Large switch for all insns. */
1640 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1641 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1648 static void check_breakpoint(CPUMBState
*env
, DisasContext
*dc
)
1650 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
1653 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
1654 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
1655 if (bp
->pc
== dc
->pc
) {
1656 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1657 dc
->is_jmp
= DISAS_UPDATE
;
1663 /* generate intermediate code for basic block 'tb'. */
1665 gen_intermediate_code_internal(MicroBlazeCPU
*cpu
, TranslationBlock
*tb
,
1668 CPUState
*cs
= CPU(cpu
);
1669 CPUMBState
*env
= &cpu
->env
;
1672 struct DisasContext ctx
;
1673 struct DisasContext
*dc
= &ctx
;
1674 uint32_t next_page_start
, org_flags
;
1682 org_flags
= dc
->synced_flags
= dc
->tb_flags
= tb
->flags
;
1684 dc
->is_jmp
= DISAS_NEXT
;
1686 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1687 if (dc
->delayed_branch
) {
1688 dc
->jmp
= JMP_INDIRECT
;
1691 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1692 dc
->cpustate_changed
= 0;
1693 dc
->abort_at_next_insn
= 0;
1697 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n", pc_start
);
1700 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1702 qemu_log("--------------\n");
1703 log_cpu_state(CPU(cpu
), 0);
1707 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1710 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1712 max_insns
= CF_COUNT_MASK
;
1718 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1719 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1723 check_breakpoint(env
, dc
);
1726 j
= tcg_op_buf_count();
1730 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
1732 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
1733 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
1734 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
1738 LOG_DIS("%8.8x:\t", dc
->pc
);
1740 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1744 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1746 dc
->tb_flags
&= ~IMM_FLAG
;
1750 if (dc
->delayed_branch
) {
1751 dc
->delayed_branch
--;
1752 if (!dc
->delayed_branch
) {
1753 if (dc
->tb_flags
& DRTI_FLAG
)
1755 if (dc
->tb_flags
& DRTB_FLAG
)
1757 if (dc
->tb_flags
& DRTE_FLAG
)
1759 /* Clear the delay slot flag. */
1760 dc
->tb_flags
&= ~D_FLAG
;
1761 /* If it is a direct jump, try direct chaining. */
1762 if (dc
->jmp
== JMP_INDIRECT
) {
1763 eval_cond_jmp(dc
, env_btarget
, tcg_const_tl(dc
->pc
));
1764 dc
->is_jmp
= DISAS_JUMP
;
1765 } else if (dc
->jmp
== JMP_DIRECT
) {
1767 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1768 dc
->is_jmp
= DISAS_TB_JUMP
;
1769 } else if (dc
->jmp
== JMP_DIRECT_CC
) {
1770 TCGLabel
*l1
= gen_new_label();
1772 /* Conditional jmp. */
1773 tcg_gen_brcondi_tl(TCG_COND_NE
, env_btaken
, 0, l1
);
1774 gen_goto_tb(dc
, 1, dc
->pc
);
1776 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1778 dc
->is_jmp
= DISAS_TB_JUMP
;
1783 if (cs
->singlestep_enabled
) {
1786 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
1787 && !tcg_op_buf_full()
1789 && (dc
->pc
< next_page_start
)
1790 && num_insns
< max_insns
);
1793 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
1794 if (dc
->tb_flags
& D_FLAG
) {
1795 dc
->is_jmp
= DISAS_UPDATE
;
1796 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1802 if (tb
->cflags
& CF_LAST_IO
)
1804 /* Force an update if the per-tb cpu state has changed. */
1805 if (dc
->is_jmp
== DISAS_NEXT
1806 && (dc
->cpustate_changed
|| org_flags
!= dc
->tb_flags
)) {
1807 dc
->is_jmp
= DISAS_UPDATE
;
1808 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1812 if (unlikely(cs
->singlestep_enabled
)) {
1813 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
1815 if (dc
->is_jmp
!= DISAS_JUMP
) {
1816 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1818 gen_helper_raise_exception(cpu_env
, tmp
);
1819 tcg_temp_free_i32(tmp
);
1821 switch(dc
->is_jmp
) {
1823 gen_goto_tb(dc
, 1, npc
);
1828 /* indicate that the hash table must be used
1829 to find the next TB */
1833 /* nothing more to generate */
1837 gen_tb_end(tb
, num_insns
);
1840 j
= tcg_op_buf_count();
1843 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
1845 tb
->size
= dc
->pc
- pc_start
;
1846 tb
->icount
= num_insns
;
1851 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1854 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
, 0);
1856 qemu_log("\nisize=%d osize=%d\n",
1857 dc
->pc
- pc_start
, tcg_op_buf_count());
1861 assert(!dc
->abort_at_next_insn
);
1864 void gen_intermediate_code (CPUMBState
*env
, struct TranslationBlock
*tb
)
1866 gen_intermediate_code_internal(mb_env_get_cpu(env
), tb
, false);
1869 void gen_intermediate_code_pc (CPUMBState
*env
, struct TranslationBlock
*tb
)
1871 gen_intermediate_code_internal(mb_env_get_cpu(env
), tb
, true);
1874 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1877 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1878 CPUMBState
*env
= &cpu
->env
;
1884 cpu_fprintf(f
, "IN: PC=%x %s\n",
1885 env
->sregs
[SR_PC
], lookup_symbol(env
->sregs
[SR_PC
]));
1886 cpu_fprintf(f
, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1887 env
->sregs
[SR_MSR
], env
->sregs
[SR_ESR
], env
->sregs
[SR_EAR
],
1888 env
->debug
, env
->imm
, env
->iflags
, env
->sregs
[SR_FSR
]);
1889 cpu_fprintf(f
, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1890 env
->btaken
, env
->btarget
,
1891 (env
->sregs
[SR_MSR
] & MSR_UM
) ? "user" : "kernel",
1892 (env
->sregs
[SR_MSR
] & MSR_UMS
) ? "user" : "kernel",
1893 (env
->sregs
[SR_MSR
] & MSR_EIP
),
1894 (env
->sregs
[SR_MSR
] & MSR_IE
));
1896 for (i
= 0; i
< 32; i
++) {
1897 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1898 if ((i
+ 1) % 4 == 0)
1899 cpu_fprintf(f
, "\n");
1901 cpu_fprintf(f
, "\n\n");
1904 MicroBlazeCPU
*cpu_mb_init(const char *cpu_model
)
1908 cpu
= MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU
));
1910 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
1915 void mb_tcg_init(void)
1919 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1921 env_debug
= tcg_global_mem_new(TCG_AREG0
,
1922 offsetof(CPUMBState
, debug
),
1924 env_iflags
= tcg_global_mem_new(TCG_AREG0
,
1925 offsetof(CPUMBState
, iflags
),
1927 env_imm
= tcg_global_mem_new(TCG_AREG0
,
1928 offsetof(CPUMBState
, imm
),
1930 env_btarget
= tcg_global_mem_new(TCG_AREG0
,
1931 offsetof(CPUMBState
, btarget
),
1933 env_btaken
= tcg_global_mem_new(TCG_AREG0
,
1934 offsetof(CPUMBState
, btaken
),
1936 env_res_addr
= tcg_global_mem_new(TCG_AREG0
,
1937 offsetof(CPUMBState
, res_addr
),
1939 env_res_val
= tcg_global_mem_new(TCG_AREG0
,
1940 offsetof(CPUMBState
, res_val
),
1942 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1943 cpu_R
[i
] = tcg_global_mem_new(TCG_AREG0
,
1944 offsetof(CPUMBState
, regs
[i
]),
1947 for (i
= 0; i
< ARRAY_SIZE(cpu_SR
); i
++) {
1948 cpu_SR
[i
] = tcg_global_mem_new(TCG_AREG0
,
1949 offsetof(CPUMBState
, sregs
[i
]),
1950 special_regnames
[i
]);
1954 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
, int pc_pos
)
1956 env
->sregs
[SR_PC
] = tcg_ctx
.gen_opc_pc
[pc_pos
];