2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/helper-proto.h"
25 #include "microblaze-decode.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-gen.h"
29 #include "trace-tcg.h"
35 #if DISAS_MB && !SIM_COMPAT
36 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
38 # define LOG_DIS(...) do { } while (0)
43 #define EXTRACT_FIELD(src, start, end) \
44 (((src) >> start) & ((1 << (end - start + 1)) - 1))
46 static TCGv env_debug
;
47 static TCGv_ptr cpu_env
;
48 static TCGv cpu_R
[32];
49 static TCGv cpu_SR
[18];
51 static TCGv env_btaken
;
52 static TCGv env_btarget
;
53 static TCGv env_iflags
;
54 static TCGv env_res_addr
;
55 static TCGv env_res_val
;
57 #include "exec/gen-icount.h"
59 /* This is the state at translation time. */
60 typedef struct DisasContext
{
71 unsigned int cpustate_changed
;
72 unsigned int delayed_branch
;
73 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
74 unsigned int clear_imm
;
79 #define JMP_DIRECT_CC 2
80 #define JMP_INDIRECT 3
84 int abort_at_next_insn
;
86 struct TranslationBlock
*tb
;
87 int singlestep_enabled
;
90 static const char *regnames
[] =
92 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
93 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
94 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
95 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
98 static const char *special_regnames
[] =
100 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
101 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
102 "sr16", "sr17", "sr18"
105 static inline void t_sync_flags(DisasContext
*dc
)
107 /* Synch the tb dependent flags between translator and runtime. */
108 if (dc
->tb_flags
!= dc
->synced_flags
) {
109 tcg_gen_movi_tl(env_iflags
, dc
->tb_flags
);
110 dc
->synced_flags
= dc
->tb_flags
;
114 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
116 TCGv_i32 tmp
= tcg_const_i32(index
);
119 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
120 gen_helper_raise_exception(cpu_env
, tmp
);
121 tcg_temp_free_i32(tmp
);
122 dc
->is_jmp
= DISAS_UPDATE
;
125 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
127 TranslationBlock
*tb
;
129 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
131 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
132 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
134 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
139 static void read_carry(DisasContext
*dc
, TCGv d
)
141 tcg_gen_shri_tl(d
, cpu_SR
[SR_MSR
], 31);
145 * write_carry sets the carry bits in MSR based on bit 0 of v.
146 * v[31:1] are ignored.
148 static void write_carry(DisasContext
*dc
, TCGv v
)
150 TCGv t0
= tcg_temp_new();
151 tcg_gen_shli_tl(t0
, v
, 31);
152 tcg_gen_sari_tl(t0
, t0
, 31);
153 tcg_gen_andi_tl(t0
, t0
, (MSR_C
| MSR_CC
));
154 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
],
156 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
);
160 static void write_carryi(DisasContext
*dc
, bool carry
)
162 TCGv t0
= tcg_temp_new();
163 tcg_gen_movi_tl(t0
, carry
);
168 /* True if ALU operand b is a small immediate that may deserve
170 static inline int dec_alu_op_b_is_small_imm(DisasContext
*dc
)
172 /* Immediate insn without the imm prefix ? */
173 return dc
->type_b
&& !(dc
->tb_flags
& IMM_FLAG
);
176 static inline TCGv
*dec_alu_op_b(DisasContext
*dc
)
179 if (dc
->tb_flags
& IMM_FLAG
)
180 tcg_gen_ori_tl(env_imm
, env_imm
, dc
->imm
);
182 tcg_gen_movi_tl(env_imm
, (int32_t)((int16_t)dc
->imm
));
185 return &cpu_R
[dc
->rb
];
188 static void dec_add(DisasContext
*dc
)
196 LOG_DIS("add%s%s%s r%d r%d r%d\n",
197 dc
->type_b
? "i" : "", k
? "k" : "", c
? "c" : "",
198 dc
->rd
, dc
->ra
, dc
->rb
);
200 /* Take care of the easy cases first. */
202 /* k - keep carry, no need to update MSR. */
203 /* If rd == r0, it's a nop. */
205 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
208 /* c - Add carry into the result. */
212 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
219 /* From now on, we can assume k is zero. So we need to update MSR. */
225 tcg_gen_movi_tl(cf
, 0);
229 TCGv ncf
= tcg_temp_new();
230 gen_helper_carry(ncf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
231 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
232 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
233 write_carry(dc
, ncf
);
236 gen_helper_carry(cf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
242 static void dec_sub(DisasContext
*dc
)
244 unsigned int u
, cmp
, k
, c
;
250 cmp
= (dc
->imm
& 1) && (!dc
->type_b
) && k
;
253 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u
? "u" : "", dc
->rd
, dc
->ra
, dc
->ir
);
256 gen_helper_cmpu(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
258 gen_helper_cmp(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
263 LOG_DIS("sub%s%s r%d, r%d r%d\n",
264 k
? "k" : "", c
? "c" : "", dc
->rd
, dc
->ra
, dc
->rb
);
266 /* Take care of the easy cases first. */
268 /* k - keep carry, no need to update MSR. */
269 /* If rd == r0, it's a nop. */
271 tcg_gen_sub_tl(cpu_R
[dc
->rd
], *(dec_alu_op_b(dc
)), cpu_R
[dc
->ra
]);
274 /* c - Add carry into the result. */
278 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
285 /* From now on, we can assume k is zero. So we need to update MSR. */
286 /* Extract carry. And complement a into na. */
292 tcg_gen_movi_tl(cf
, 1);
295 /* d = b + ~a + c. carry defaults to 1. */
296 tcg_gen_not_tl(na
, cpu_R
[dc
->ra
]);
299 TCGv ncf
= tcg_temp_new();
300 gen_helper_carry(ncf
, na
, *(dec_alu_op_b(dc
)), cf
);
301 tcg_gen_add_tl(cpu_R
[dc
->rd
], na
, *(dec_alu_op_b(dc
)));
302 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
303 write_carry(dc
, ncf
);
306 gen_helper_carry(cf
, na
, *(dec_alu_op_b(dc
)), cf
);
313 static void dec_pattern(DisasContext
*dc
)
317 if ((dc
->tb_flags
& MSR_EE_FLAG
)
318 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
319 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
320 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
321 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
324 mode
= dc
->opcode
& 3;
328 LOG_DIS("pcmpbf r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
330 gen_helper_pcmpbf(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
333 LOG_DIS("pcmpeq r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
335 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_R
[dc
->rd
],
336 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
340 LOG_DIS("pcmpne r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
342 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_R
[dc
->rd
],
343 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
347 cpu_abort(CPU(dc
->cpu
),
348 "unsupported pattern insn opcode=%x\n", dc
->opcode
);
353 static void dec_and(DisasContext
*dc
)
357 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
362 not = dc
->opcode
& (1 << 1);
363 LOG_DIS("and%s\n", not ? "n" : "");
369 tcg_gen_andc_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
371 tcg_gen_and_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
374 static void dec_or(DisasContext
*dc
)
376 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
381 LOG_DIS("or r%d r%d r%d imm=%x\n", dc
->rd
, dc
->ra
, dc
->rb
, dc
->imm
);
383 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
386 static void dec_xor(DisasContext
*dc
)
388 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
393 LOG_DIS("xor r%d\n", dc
->rd
);
395 tcg_gen_xor_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
398 static inline void msr_read(DisasContext
*dc
, TCGv d
)
400 tcg_gen_mov_tl(d
, cpu_SR
[SR_MSR
]);
403 static inline void msr_write(DisasContext
*dc
, TCGv v
)
408 dc
->cpustate_changed
= 1;
409 /* PVR bit is not writable. */
410 tcg_gen_andi_tl(t
, v
, ~MSR_PVR
);
411 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], MSR_PVR
);
412 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], v
);
416 static void dec_msr(DisasContext
*dc
)
418 CPUState
*cs
= CPU(dc
->cpu
);
420 unsigned int sr
, to
, rn
;
421 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
423 sr
= dc
->imm
& ((1 << 14) - 1);
424 to
= dc
->imm
& (1 << 14);
427 dc
->cpustate_changed
= 1;
429 /* msrclr and msrset. */
430 if (!(dc
->imm
& (1 << 15))) {
431 unsigned int clr
= dc
->ir
& (1 << 16);
433 LOG_DIS("msr%s r%d imm=%x\n", clr
? "clr" : "set",
436 if (!(dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MSR_INSTR
)) {
441 if ((dc
->tb_flags
& MSR_EE_FLAG
)
442 && mem_index
== MMU_USER_IDX
&& (dc
->imm
!= 4 && dc
->imm
!= 0)) {
443 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
444 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
449 msr_read(dc
, cpu_R
[dc
->rd
]);
454 tcg_gen_mov_tl(t1
, *(dec_alu_op_b(dc
)));
457 tcg_gen_not_tl(t1
, t1
);
458 tcg_gen_and_tl(t0
, t0
, t1
);
460 tcg_gen_or_tl(t0
, t0
, t1
);
464 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
465 dc
->is_jmp
= DISAS_UPDATE
;
470 if ((dc
->tb_flags
& MSR_EE_FLAG
)
471 && mem_index
== MMU_USER_IDX
) {
472 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
473 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
478 #if !defined(CONFIG_USER_ONLY)
479 /* Catch read/writes to the mmu block. */
480 if ((sr
& ~0xff) == 0x1000) {
482 LOG_DIS("m%ss sr%d r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
484 gen_helper_mmu_write(cpu_env
, tcg_const_tl(sr
), cpu_R
[dc
->ra
]);
486 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tcg_const_tl(sr
));
492 LOG_DIS("m%ss sr%x r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
497 msr_write(dc
, cpu_R
[dc
->ra
]);
500 tcg_gen_mov_tl(cpu_SR
[SR_EAR
], cpu_R
[dc
->ra
]);
503 tcg_gen_mov_tl(cpu_SR
[SR_ESR
], cpu_R
[dc
->ra
]);
506 tcg_gen_andi_tl(cpu_SR
[SR_FSR
], cpu_R
[dc
->ra
], 31);
509 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, slr
));
512 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, shr
));
515 cpu_abort(CPU(dc
->cpu
), "unknown mts reg %x\n", sr
);
519 LOG_DIS("m%ss r%d sr%x imm=%x\n", to
? "t" : "f", dc
->rd
, sr
, dc
->imm
);
523 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
526 msr_read(dc
, cpu_R
[dc
->rd
]);
529 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_EAR
]);
532 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_ESR
]);
535 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_FSR
]);
538 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_BTR
]);
541 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, slr
));
544 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, shr
));
560 tcg_gen_ld_tl(cpu_R
[dc
->rd
],
561 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
564 cpu_abort(cs
, "unknown mfs reg %x\n", sr
);
570 tcg_gen_movi_tl(cpu_R
[0], 0);
574 /* 64-bit signed mul, lower result in d and upper in d2. */
575 static void t_gen_muls(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
579 t0
= tcg_temp_new_i64();
580 t1
= tcg_temp_new_i64();
582 tcg_gen_ext_i32_i64(t0
, a
);
583 tcg_gen_ext_i32_i64(t1
, b
);
584 tcg_gen_mul_i64(t0
, t0
, t1
);
586 tcg_gen_extrl_i64_i32(d
, t0
);
587 tcg_gen_shri_i64(t0
, t0
, 32);
588 tcg_gen_extrl_i64_i32(d2
, t0
);
590 tcg_temp_free_i64(t0
);
591 tcg_temp_free_i64(t1
);
594 /* 64-bit unsigned muls, lower result in d and upper in d2. */
595 static void t_gen_mulu(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
599 t0
= tcg_temp_new_i64();
600 t1
= tcg_temp_new_i64();
602 tcg_gen_extu_i32_i64(t0
, a
);
603 tcg_gen_extu_i32_i64(t1
, b
);
604 tcg_gen_mul_i64(t0
, t0
, t1
);
606 tcg_gen_extrl_i64_i32(d
, t0
);
607 tcg_gen_shri_i64(t0
, t0
, 32);
608 tcg_gen_extrl_i64_i32(d2
, t0
);
610 tcg_temp_free_i64(t0
);
611 tcg_temp_free_i64(t1
);
614 /* Multiplier unit. */
615 static void dec_mul(DisasContext
*dc
)
618 unsigned int subcode
;
620 if ((dc
->tb_flags
& MSR_EE_FLAG
)
621 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
622 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_HW_MUL_MASK
)) {
623 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
624 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
628 subcode
= dc
->imm
& 3;
629 d
[0] = tcg_temp_new();
630 d
[1] = tcg_temp_new();
633 LOG_DIS("muli r%d r%d %x\n", dc
->rd
, dc
->ra
, dc
->imm
);
634 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
638 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
639 if (subcode
>= 1 && subcode
<= 3
640 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MUL64_MASK
))) {
646 LOG_DIS("mul r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
647 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
650 LOG_DIS("mulh r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
651 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
654 LOG_DIS("mulhsu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
655 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
658 LOG_DIS("mulhu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
659 t_gen_mulu(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
662 cpu_abort(CPU(dc
->cpu
), "unknown MUL insn %x\n", subcode
);
671 static void dec_div(DisasContext
*dc
)
678 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
679 && !((dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_DIV_MASK
))) {
680 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
681 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
685 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
688 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
691 tcg_gen_movi_tl(cpu_R
[dc
->rd
], 0);
694 static void dec_barrel(DisasContext
*dc
)
699 if ((dc
->tb_flags
& MSR_EE_FLAG
)
700 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
701 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_BARREL_MASK
)) {
702 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
703 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
707 s
= dc
->imm
& (1 << 10);
708 t
= dc
->imm
& (1 << 9);
710 LOG_DIS("bs%s%s r%d r%d r%d\n",
711 s
? "l" : "r", t
? "a" : "l", dc
->rd
, dc
->ra
, dc
->rb
);
715 tcg_gen_mov_tl(t0
, *(dec_alu_op_b(dc
)));
716 tcg_gen_andi_tl(t0
, t0
, 31);
719 tcg_gen_shl_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
722 tcg_gen_sar_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
724 tcg_gen_shr_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
728 static void dec_bit(DisasContext
*dc
)
730 CPUState
*cs
= CPU(dc
->cpu
);
733 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
735 op
= dc
->ir
& ((1 << 9) - 1);
741 LOG_DIS("src r%d r%d\n", dc
->rd
, dc
->ra
);
742 tcg_gen_andi_tl(t0
, cpu_SR
[SR_MSR
], MSR_CC
);
743 write_carry(dc
, cpu_R
[dc
->ra
]);
745 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
746 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
754 LOG_DIS("srl r%d r%d\n", dc
->rd
, dc
->ra
);
756 /* Update carry. Note that write carry only looks at the LSB. */
757 write_carry(dc
, cpu_R
[dc
->ra
]);
760 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
762 tcg_gen_sari_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
766 LOG_DIS("ext8s r%d r%d\n", dc
->rd
, dc
->ra
);
767 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
770 LOG_DIS("ext16s r%d r%d\n", dc
->rd
, dc
->ra
);
771 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
778 LOG_DIS("wdc r%d\n", dc
->ra
);
779 if ((dc
->tb_flags
& MSR_EE_FLAG
)
780 && mem_index
== MMU_USER_IDX
) {
781 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
782 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
788 LOG_DIS("wic r%d\n", dc
->ra
);
789 if ((dc
->tb_flags
& MSR_EE_FLAG
)
790 && mem_index
== MMU_USER_IDX
) {
791 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
792 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
797 if ((dc
->tb_flags
& MSR_EE_FLAG
)
798 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
799 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
800 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
801 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
803 if (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
) {
804 gen_helper_clz(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
809 LOG_DIS("swapb r%d r%d\n", dc
->rd
, dc
->ra
);
810 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
814 LOG_DIS("swaph r%d r%d\n", dc
->rd
, dc
->ra
);
815 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
818 cpu_abort(cs
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
819 dc
->pc
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
824 static inline void sync_jmpstate(DisasContext
*dc
)
826 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
827 if (dc
->jmp
== JMP_DIRECT
) {
828 tcg_gen_movi_tl(env_btaken
, 1);
830 dc
->jmp
= JMP_INDIRECT
;
831 tcg_gen_movi_tl(env_btarget
, dc
->jmp_pc
);
835 static void dec_imm(DisasContext
*dc
)
837 LOG_DIS("imm %x\n", dc
->imm
<< 16);
838 tcg_gen_movi_tl(env_imm
, (dc
->imm
<< 16));
839 dc
->tb_flags
|= IMM_FLAG
;
843 static inline TCGv
*compute_ldst_addr(DisasContext
*dc
, TCGv
*t
)
845 unsigned int extimm
= dc
->tb_flags
& IMM_FLAG
;
846 /* Should be set to one if r1 is used by loadstores. */
849 /* All load/stores use ra. */
850 if (dc
->ra
== 1 && dc
->cpu
->cfg
.stackprot
) {
854 /* Treat the common cases first. */
856 /* If any of the regs is r0, return a ptr to the other. */
858 return &cpu_R
[dc
->rb
];
859 } else if (dc
->rb
== 0) {
860 return &cpu_R
[dc
->ra
];
863 if (dc
->rb
== 1 && dc
->cpu
->cfg
.stackprot
) {
868 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
871 gen_helper_stackprot(cpu_env
, *t
);
878 return &cpu_R
[dc
->ra
];
881 tcg_gen_movi_tl(*t
, (int32_t)((int16_t)dc
->imm
));
882 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *t
);
885 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
889 gen_helper_stackprot(cpu_env
, *t
);
894 static void dec_load(DisasContext
*dc
)
897 unsigned int size
, rev
= 0, ex
= 0;
900 mop
= dc
->opcode
& 3;
903 rev
= (dc
->ir
>> 9) & 1;
904 ex
= (dc
->ir
>> 10) & 1;
911 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
912 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
913 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
914 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
918 LOG_DIS("l%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
922 addr
= compute_ldst_addr(dc
, &t
);
925 * When doing reverse accesses we need to do two things.
927 * 1. Reverse the address wrt endianness.
928 * 2. Byteswap the data lanes on the way back into the CPU core.
930 if (rev
&& size
!= 4) {
931 /* Endian reverse the address. t is addr. */
939 TCGv low
= tcg_temp_new();
941 /* Force addr into the temp. */
944 tcg_gen_mov_tl(t
, *addr
);
948 tcg_gen_andi_tl(low
, t
, 3);
949 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
950 tcg_gen_andi_tl(t
, t
, ~3);
951 tcg_gen_or_tl(t
, t
, low
);
952 tcg_gen_mov_tl(env_imm
, t
);
960 /* Force addr into the temp. */
963 tcg_gen_xori_tl(t
, *addr
, 2);
966 tcg_gen_xori_tl(t
, t
, 2);
970 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
975 /* lwx does not throw unaligned access errors, so force alignment */
977 /* Force addr into the temp. */
980 tcg_gen_mov_tl(t
, *addr
);
983 tcg_gen_andi_tl(t
, t
, ~3);
986 /* If we get a fault on a dslot, the jmpstate better be in sync. */
989 /* Verify alignment if needed. */
991 * Microblaze gives MMU faults priority over faults due to
992 * unaligned addresses. That's why we speculatively do the load
993 * into v. If the load succeeds, we verify alignment of the
994 * address and if that succeeds we write into the destination reg.
997 tcg_gen_qemu_ld_tl(v
, *addr
, cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
999 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1000 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1001 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1002 tcg_const_tl(0), tcg_const_tl(size
- 1));
1006 tcg_gen_mov_tl(env_res_addr
, *addr
);
1007 tcg_gen_mov_tl(env_res_val
, v
);
1010 tcg_gen_mov_tl(cpu_R
[dc
->rd
], v
);
1015 /* no support for AXI exclusive so always clear C */
1016 write_carryi(dc
, 0);
1023 static void dec_store(DisasContext
*dc
)
1025 TCGv t
, *addr
, swx_addr
;
1026 TCGLabel
*swx_skip
= NULL
;
1027 unsigned int size
, rev
= 0, ex
= 0;
1030 mop
= dc
->opcode
& 3;
1033 rev
= (dc
->ir
>> 9) & 1;
1034 ex
= (dc
->ir
>> 10) & 1;
1041 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
1042 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1043 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1044 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1048 LOG_DIS("s%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
1051 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1053 addr
= compute_ldst_addr(dc
, &t
);
1055 swx_addr
= tcg_temp_local_new();
1059 /* Force addr into the swx_addr. */
1060 tcg_gen_mov_tl(swx_addr
, *addr
);
1062 /* swx does not throw unaligned access errors, so force alignment */
1063 tcg_gen_andi_tl(swx_addr
, swx_addr
, ~3);
1065 write_carryi(dc
, 1);
1066 swx_skip
= gen_new_label();
1067 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_addr
, swx_addr
, swx_skip
);
1069 /* Compare the value loaded at lwx with current contents of
1070 the reserved location.
1071 FIXME: This only works for system emulation where we can expect
1072 this compare and the following write to be atomic. For user
1073 emulation we need to add atomicity between threads. */
1074 tval
= tcg_temp_new();
1075 tcg_gen_qemu_ld_tl(tval
, swx_addr
, cpu_mmu_index(&dc
->cpu
->env
, false),
1077 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_val
, tval
, swx_skip
);
1078 write_carryi(dc
, 0);
1079 tcg_temp_free(tval
);
1082 if (rev
&& size
!= 4) {
1083 /* Endian reverse the address. t is addr. */
1091 TCGv low
= tcg_temp_new();
1093 /* Force addr into the temp. */
1096 tcg_gen_mov_tl(t
, *addr
);
1100 tcg_gen_andi_tl(low
, t
, 3);
1101 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
1102 tcg_gen_andi_tl(t
, t
, ~3);
1103 tcg_gen_or_tl(t
, t
, low
);
1104 tcg_gen_mov_tl(env_imm
, t
);
1112 /* Force addr into the temp. */
1115 tcg_gen_xori_tl(t
, *addr
, 2);
1118 tcg_gen_xori_tl(t
, t
, 2);
1122 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1126 tcg_gen_qemu_st_tl(cpu_R
[dc
->rd
], *addr
, cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
1128 /* Verify alignment if needed. */
1129 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1130 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1131 /* FIXME: if the alignment is wrong, we should restore the value
1132 * in memory. One possible way to achieve this is to probe
1133 * the MMU prior to the memaccess, thay way we could put
1134 * the alignment checks in between the probe and the mem
1137 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1138 tcg_const_tl(1), tcg_const_tl(size
- 1));
1142 gen_set_label(swx_skip
);
1144 tcg_temp_free(swx_addr
);
1150 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1151 TCGv d
, TCGv a
, TCGv b
)
1155 tcg_gen_setcond_tl(TCG_COND_EQ
, d
, a
, b
);
1158 tcg_gen_setcond_tl(TCG_COND_NE
, d
, a
, b
);
1161 tcg_gen_setcond_tl(TCG_COND_LT
, d
, a
, b
);
1164 tcg_gen_setcond_tl(TCG_COND_LE
, d
, a
, b
);
1167 tcg_gen_setcond_tl(TCG_COND_GE
, d
, a
, b
);
1170 tcg_gen_setcond_tl(TCG_COND_GT
, d
, a
, b
);
1173 cpu_abort(CPU(dc
->cpu
), "Unknown condition code %x.\n", cc
);
1178 static void eval_cond_jmp(DisasContext
*dc
, TCGv pc_true
, TCGv pc_false
)
1180 TCGLabel
*l1
= gen_new_label();
1181 /* Conditional jmp. */
1182 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_false
);
1183 tcg_gen_brcondi_tl(TCG_COND_EQ
, env_btaken
, 0, l1
);
1184 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_true
);
1188 static void dec_bcc(DisasContext
*dc
)
1193 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1194 dslot
= dc
->ir
& (1 << 25);
1195 LOG_DIS("bcc%s r%d %x\n", dslot
? "d" : "", dc
->ra
, dc
->imm
);
1197 dc
->delayed_branch
= 1;
1199 dc
->delayed_branch
= 2;
1200 dc
->tb_flags
|= D_FLAG
;
1201 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1202 cpu_env
, offsetof(CPUMBState
, bimm
));
1205 if (dec_alu_op_b_is_small_imm(dc
)) {
1206 int32_t offset
= (int32_t)((int16_t)dc
->imm
); /* sign-extend. */
1208 tcg_gen_movi_tl(env_btarget
, dc
->pc
+ offset
);
1209 dc
->jmp
= JMP_DIRECT_CC
;
1210 dc
->jmp_pc
= dc
->pc
+ offset
;
1212 dc
->jmp
= JMP_INDIRECT
;
1213 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1214 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1216 eval_cc(dc
, cc
, env_btaken
, cpu_R
[dc
->ra
], tcg_const_tl(0));
1219 static void dec_br(DisasContext
*dc
)
1221 unsigned int dslot
, link
, abs
, mbar
;
1222 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1224 dslot
= dc
->ir
& (1 << 20);
1225 abs
= dc
->ir
& (1 << 19);
1226 link
= dc
->ir
& (1 << 18);
1228 /* Memory barrier. */
1229 mbar
= (dc
->ir
>> 16) & 31;
1230 if (mbar
== 2 && dc
->imm
== 4) {
1231 /* mbar IMM & 16 decodes to sleep. */
1233 TCGv_i32 tmp_hlt
= tcg_const_i32(EXCP_HLT
);
1234 TCGv_i32 tmp_1
= tcg_const_i32(1);
1239 tcg_gen_st_i32(tmp_1
, cpu_env
,
1240 -offsetof(MicroBlazeCPU
, env
)
1241 +offsetof(CPUState
, halted
));
1242 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
1243 gen_helper_raise_exception(cpu_env
, tmp_hlt
);
1244 tcg_temp_free_i32(tmp_hlt
);
1245 tcg_temp_free_i32(tmp_1
);
1248 LOG_DIS("mbar %d\n", dc
->rd
);
1250 dc
->cpustate_changed
= 1;
1254 LOG_DIS("br%s%s%s%s imm=%x\n",
1255 abs
? "a" : "", link
? "l" : "",
1256 dc
->type_b
? "i" : "", dslot
? "d" : "",
1259 dc
->delayed_branch
= 1;
1261 dc
->delayed_branch
= 2;
1262 dc
->tb_flags
|= D_FLAG
;
1263 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1264 cpu_env
, offsetof(CPUMBState
, bimm
));
1267 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
1269 dc
->jmp
= JMP_INDIRECT
;
1271 tcg_gen_movi_tl(env_btaken
, 1);
1272 tcg_gen_mov_tl(env_btarget
, *(dec_alu_op_b(dc
)));
1273 if (link
&& !dslot
) {
1274 if (!(dc
->tb_flags
& IMM_FLAG
) && (dc
->imm
== 8 || dc
->imm
== 0x18))
1275 t_gen_raise_exception(dc
, EXCP_BREAK
);
1277 if ((dc
->tb_flags
& MSR_EE_FLAG
) && mem_index
== MMU_USER_IDX
) {
1278 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1279 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1283 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1287 if (dec_alu_op_b_is_small_imm(dc
)) {
1288 dc
->jmp
= JMP_DIRECT
;
1289 dc
->jmp_pc
= dc
->pc
+ (int32_t)((int16_t)dc
->imm
);
1291 tcg_gen_movi_tl(env_btaken
, 1);
1292 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1293 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1298 static inline void do_rti(DisasContext
*dc
)
1301 t0
= tcg_temp_new();
1302 t1
= tcg_temp_new();
1303 tcg_gen_shri_tl(t0
, cpu_SR
[SR_MSR
], 1);
1304 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_IE
);
1305 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1307 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1308 tcg_gen_or_tl(t1
, t1
, t0
);
1312 dc
->tb_flags
&= ~DRTI_FLAG
;
1315 static inline void do_rtb(DisasContext
*dc
)
1318 t0
= tcg_temp_new();
1319 t1
= tcg_temp_new();
1320 tcg_gen_andi_tl(t1
, cpu_SR
[SR_MSR
], ~MSR_BIP
);
1321 tcg_gen_shri_tl(t0
, t1
, 1);
1322 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1324 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1325 tcg_gen_or_tl(t1
, t1
, t0
);
1329 dc
->tb_flags
&= ~DRTB_FLAG
;
1332 static inline void do_rte(DisasContext
*dc
)
1335 t0
= tcg_temp_new();
1336 t1
= tcg_temp_new();
1338 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_EE
);
1339 tcg_gen_andi_tl(t1
, t1
, ~MSR_EIP
);
1340 tcg_gen_shri_tl(t0
, t1
, 1);
1341 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1343 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1344 tcg_gen_or_tl(t1
, t1
, t0
);
1348 dc
->tb_flags
&= ~DRTE_FLAG
;
1351 static void dec_rts(DisasContext
*dc
)
1353 unsigned int b_bit
, i_bit
, e_bit
;
1354 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1356 i_bit
= dc
->ir
& (1 << 21);
1357 b_bit
= dc
->ir
& (1 << 22);
1358 e_bit
= dc
->ir
& (1 << 23);
1360 dc
->delayed_branch
= 2;
1361 dc
->tb_flags
|= D_FLAG
;
1362 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1363 cpu_env
, offsetof(CPUMBState
, bimm
));
1366 LOG_DIS("rtid ir=%x\n", dc
->ir
);
1367 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1368 && mem_index
== MMU_USER_IDX
) {
1369 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1370 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1372 dc
->tb_flags
|= DRTI_FLAG
;
1374 LOG_DIS("rtbd ir=%x\n", dc
->ir
);
1375 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1376 && mem_index
== MMU_USER_IDX
) {
1377 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1378 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1380 dc
->tb_flags
|= DRTB_FLAG
;
1382 LOG_DIS("rted ir=%x\n", dc
->ir
);
1383 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1384 && mem_index
== MMU_USER_IDX
) {
1385 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1386 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1388 dc
->tb_flags
|= DRTE_FLAG
;
1390 LOG_DIS("rts ir=%x\n", dc
->ir
);
1392 dc
->jmp
= JMP_INDIRECT
;
1393 tcg_gen_movi_tl(env_btaken
, 1);
1394 tcg_gen_add_tl(env_btarget
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
1397 static int dec_check_fpuv2(DisasContext
*dc
)
1399 if ((dc
->cpu
->cfg
.use_fpu
!= 2) && (dc
->tb_flags
& MSR_EE_FLAG
)) {
1400 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_FPU
);
1401 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1403 return (dc
->cpu
->cfg
.use_fpu
== 2) ? 0 : PVR2_USE_FPU2_MASK
;
1406 static void dec_fpu(DisasContext
*dc
)
1408 unsigned int fpu_insn
;
1410 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1411 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1412 && (dc
->cpu
->cfg
.use_fpu
!= 1)) {
1413 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1414 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1418 fpu_insn
= (dc
->ir
>> 7) & 7;
1422 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1427 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1432 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1437 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1442 switch ((dc
->ir
>> 4) & 7) {
1444 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1445 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1448 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1449 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1452 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1453 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1456 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1457 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1460 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1461 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1464 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1465 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1468 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1469 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1472 qemu_log_mask(LOG_UNIMP
,
1473 "unimplemented fcmp fpu_insn=%x pc=%x"
1475 fpu_insn
, dc
->pc
, dc
->opcode
);
1476 dc
->abort_at_next_insn
= 1;
1482 if (!dec_check_fpuv2(dc
)) {
1485 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1489 if (!dec_check_fpuv2(dc
)) {
1492 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1496 if (!dec_check_fpuv2(dc
)) {
1499 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1503 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1505 fpu_insn
, dc
->pc
, dc
->opcode
);
1506 dc
->abort_at_next_insn
= 1;
1511 static void dec_null(DisasContext
*dc
)
1513 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1514 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1515 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1516 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1519 qemu_log ("unknown insn pc=%x opc=%x\n", dc
->pc
, dc
->opcode
);
1520 dc
->abort_at_next_insn
= 1;
1523 /* Insns connected to FSL or AXI stream attached devices. */
1524 static void dec_stream(DisasContext
*dc
)
1526 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1527 TCGv_i32 t_id
, t_ctrl
;
1530 LOG_DIS("%s%s imm=%x\n", dc
->rd
? "get" : "put",
1531 dc
->type_b
? "" : "d", dc
->imm
);
1533 if ((dc
->tb_flags
& MSR_EE_FLAG
) && (mem_index
== MMU_USER_IDX
)) {
1534 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1535 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1539 t_id
= tcg_temp_new();
1541 tcg_gen_movi_tl(t_id
, dc
->imm
& 0xf);
1542 ctrl
= dc
->imm
>> 10;
1544 tcg_gen_andi_tl(t_id
, cpu_R
[dc
->rb
], 0xf);
1545 ctrl
= dc
->imm
>> 5;
1548 t_ctrl
= tcg_const_tl(ctrl
);
1551 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1553 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1555 tcg_temp_free(t_id
);
1556 tcg_temp_free(t_ctrl
);
1559 static struct decoder_info
{
1564 void (*dec
)(DisasContext
*dc
);
1572 {DEC_BARREL
, dec_barrel
},
1574 {DEC_ST
, dec_store
},
1583 {DEC_STREAM
, dec_stream
},
1587 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1592 LOG_DIS("%8.8x\t", dc
->ir
);
1597 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1598 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1599 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_OPCODE_0x0_ILL_MASK
)) {
1600 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1601 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1605 LOG_DIS("nr_nops=%d\t", dc
->nr_nops
);
1607 if (dc
->nr_nops
> 4) {
1608 cpu_abort(CPU(dc
->cpu
), "fetching nop sequence\n");
1611 /* bit 2 seems to indicate insn type. */
1612 dc
->type_b
= ir
& (1 << 29);
1614 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1615 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1616 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1617 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1618 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1620 /* Large switch for all insns. */
1621 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1622 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1629 /* generate intermediate code for basic block 'tb'. */
1630 void gen_intermediate_code(CPUMBState
*env
, struct TranslationBlock
*tb
)
1632 MicroBlazeCPU
*cpu
= mb_env_get_cpu(env
);
1633 CPUState
*cs
= CPU(cpu
);
1635 struct DisasContext ctx
;
1636 struct DisasContext
*dc
= &ctx
;
1637 uint32_t next_page_start
, org_flags
;
1645 org_flags
= dc
->synced_flags
= dc
->tb_flags
= tb
->flags
;
1647 dc
->is_jmp
= DISAS_NEXT
;
1649 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1650 if (dc
->delayed_branch
) {
1651 dc
->jmp
= JMP_INDIRECT
;
1654 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1655 dc
->cpustate_changed
= 0;
1656 dc
->abort_at_next_insn
= 0;
1660 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n", pc_start
);
1663 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1665 qemu_log("--------------\n");
1666 log_cpu_state(CPU(cpu
), 0);
1670 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1672 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1673 if (max_insns
== 0) {
1674 max_insns
= CF_COUNT_MASK
;
1676 if (max_insns
> TCG_MAX_INSNS
) {
1677 max_insns
= TCG_MAX_INSNS
;
1683 tcg_gen_insn_start(dc
->pc
);
1687 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1688 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1693 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1694 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1695 dc
->is_jmp
= DISAS_UPDATE
;
1696 /* The address covered by the breakpoint must be included in
1697 [tb->pc, tb->pc + tb->size) in order to for it to be
1698 properly cleared -- thus we increment the PC here so that
1699 the logic setting tb->size below does the right thing. */
1705 LOG_DIS("%8.8x:\t", dc
->pc
);
1707 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1712 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1714 dc
->tb_flags
&= ~IMM_FLAG
;
1717 if (dc
->delayed_branch
) {
1718 dc
->delayed_branch
--;
1719 if (!dc
->delayed_branch
) {
1720 if (dc
->tb_flags
& DRTI_FLAG
)
1722 if (dc
->tb_flags
& DRTB_FLAG
)
1724 if (dc
->tb_flags
& DRTE_FLAG
)
1726 /* Clear the delay slot flag. */
1727 dc
->tb_flags
&= ~D_FLAG
;
1728 /* If it is a direct jump, try direct chaining. */
1729 if (dc
->jmp
== JMP_INDIRECT
) {
1730 eval_cond_jmp(dc
, env_btarget
, tcg_const_tl(dc
->pc
));
1731 dc
->is_jmp
= DISAS_JUMP
;
1732 } else if (dc
->jmp
== JMP_DIRECT
) {
1734 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1735 dc
->is_jmp
= DISAS_TB_JUMP
;
1736 } else if (dc
->jmp
== JMP_DIRECT_CC
) {
1737 TCGLabel
*l1
= gen_new_label();
1739 /* Conditional jmp. */
1740 tcg_gen_brcondi_tl(TCG_COND_NE
, env_btaken
, 0, l1
);
1741 gen_goto_tb(dc
, 1, dc
->pc
);
1743 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1745 dc
->is_jmp
= DISAS_TB_JUMP
;
1750 if (cs
->singlestep_enabled
) {
1753 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
1754 && !tcg_op_buf_full()
1756 && (dc
->pc
< next_page_start
)
1757 && num_insns
< max_insns
);
1760 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
1761 if (dc
->tb_flags
& D_FLAG
) {
1762 dc
->is_jmp
= DISAS_UPDATE
;
1763 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1769 if (tb
->cflags
& CF_LAST_IO
)
1771 /* Force an update if the per-tb cpu state has changed. */
1772 if (dc
->is_jmp
== DISAS_NEXT
1773 && (dc
->cpustate_changed
|| org_flags
!= dc
->tb_flags
)) {
1774 dc
->is_jmp
= DISAS_UPDATE
;
1775 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1779 if (unlikely(cs
->singlestep_enabled
)) {
1780 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
1782 if (dc
->is_jmp
!= DISAS_JUMP
) {
1783 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1785 gen_helper_raise_exception(cpu_env
, tmp
);
1786 tcg_temp_free_i32(tmp
);
1788 switch(dc
->is_jmp
) {
1790 gen_goto_tb(dc
, 1, npc
);
1795 /* indicate that the hash table must be used
1796 to find the next TB */
1800 /* nothing more to generate */
1804 gen_tb_end(tb
, num_insns
);
1806 tb
->size
= dc
->pc
- pc_start
;
1807 tb
->icount
= num_insns
;
1811 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1814 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
, 0);
1816 qemu_log("\nisize=%d osize=%d\n",
1817 dc
->pc
- pc_start
, tcg_op_buf_count());
1821 assert(!dc
->abort_at_next_insn
);
1824 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1827 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1828 CPUMBState
*env
= &cpu
->env
;
1834 cpu_fprintf(f
, "IN: PC=%x %s\n",
1835 env
->sregs
[SR_PC
], lookup_symbol(env
->sregs
[SR_PC
]));
1836 cpu_fprintf(f
, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1837 env
->sregs
[SR_MSR
], env
->sregs
[SR_ESR
], env
->sregs
[SR_EAR
],
1838 env
->debug
, env
->imm
, env
->iflags
, env
->sregs
[SR_FSR
]);
1839 cpu_fprintf(f
, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1840 env
->btaken
, env
->btarget
,
1841 (env
->sregs
[SR_MSR
] & MSR_UM
) ? "user" : "kernel",
1842 (env
->sregs
[SR_MSR
] & MSR_UMS
) ? "user" : "kernel",
1843 (env
->sregs
[SR_MSR
] & MSR_EIP
),
1844 (env
->sregs
[SR_MSR
] & MSR_IE
));
1846 for (i
= 0; i
< 32; i
++) {
1847 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1848 if ((i
+ 1) % 4 == 0)
1849 cpu_fprintf(f
, "\n");
1851 cpu_fprintf(f
, "\n\n");
1854 MicroBlazeCPU
*cpu_mb_init(const char *cpu_model
)
1858 cpu
= MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU
));
1860 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
1865 void mb_tcg_init(void)
1869 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1871 env_debug
= tcg_global_mem_new(TCG_AREG0
,
1872 offsetof(CPUMBState
, debug
),
1874 env_iflags
= tcg_global_mem_new(TCG_AREG0
,
1875 offsetof(CPUMBState
, iflags
),
1877 env_imm
= tcg_global_mem_new(TCG_AREG0
,
1878 offsetof(CPUMBState
, imm
),
1880 env_btarget
= tcg_global_mem_new(TCG_AREG0
,
1881 offsetof(CPUMBState
, btarget
),
1883 env_btaken
= tcg_global_mem_new(TCG_AREG0
,
1884 offsetof(CPUMBState
, btaken
),
1886 env_res_addr
= tcg_global_mem_new(TCG_AREG0
,
1887 offsetof(CPUMBState
, res_addr
),
1889 env_res_val
= tcg_global_mem_new(TCG_AREG0
,
1890 offsetof(CPUMBState
, res_val
),
1892 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1893 cpu_R
[i
] = tcg_global_mem_new(TCG_AREG0
,
1894 offsetof(CPUMBState
, regs
[i
]),
1897 for (i
= 0; i
< ARRAY_SIZE(cpu_SR
); i
++) {
1898 cpu_SR
[i
] = tcg_global_mem_new(TCG_AREG0
,
1899 offsetof(CPUMBState
, sregs
[i
]),
1900 special_regnames
[i
]);
1904 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
,
1907 env
->sregs
[SR_PC
] = data
[0];