Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging
[qemu/ar7.git] / target / microblaze / translate.c
blob7e7f837c633d09f059c4d92a1053d5ff7fa149d5
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "qemu/qemu-print.h"
32 #include "exec/log.h"
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef HELPER_H
38 #define EXTRACT_FIELD(src, start, end) \
39 (((src) >> start) & ((1 << (end - start + 1)) - 1))
41 /* is_jmp field values */
42 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
43 #define DISAS_EXIT DISAS_TARGET_1 /* all cpu state modified dynamically */
45 /* cpu state besides pc was modified dynamically; update pc to next */
46 #define DISAS_EXIT_NEXT DISAS_TARGET_2
47 /* cpu state besides pc was modified dynamically; update pc to btarget */
48 #define DISAS_EXIT_JUMP DISAS_TARGET_3
50 static TCGv_i32 cpu_R[32];
51 static TCGv_i32 cpu_pc;
52 static TCGv_i32 cpu_msr;
53 static TCGv_i32 cpu_msr_c;
54 static TCGv_i32 cpu_imm;
55 static TCGv_i32 cpu_bvalue;
56 static TCGv_i32 cpu_btarget;
57 static TCGv_i32 cpu_iflags;
58 static TCGv cpu_res_addr;
59 static TCGv_i32 cpu_res_val;
61 /* This is the state at translation time. */
62 typedef struct DisasContext {
63 DisasContextBase base;
64 const MicroBlazeCPUConfig *cfg;
66 /* TCG op of the current insn_start. */
67 TCGOp *insn_start;
69 TCGv_i32 r0;
70 bool r0_set;
72 /* Decoder. */
73 uint32_t ext_imm;
74 unsigned int tb_flags;
75 unsigned int tb_flags_to_set;
76 int mem_index;
78 /* Condition under which to jump, including NEVER and ALWAYS. */
79 TCGCond jmp_cond;
81 /* Immediate branch-taken destination, or -1 for indirect. */
82 uint32_t jmp_dest;
83 } DisasContext;
85 static int typeb_imm(DisasContext *dc, int x)
87 if (dc->tb_flags & IMM_FLAG) {
88 return deposit32(dc->ext_imm, 0, 16, x);
90 return x;
93 /* Include the auto-generated decoder. */
94 #include "decode-insns.c.inc"
96 static void t_sync_flags(DisasContext *dc)
98 /* Synch the tb dependent flags between translator and runtime. */
99 if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
100 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
104 static void gen_raise_exception(DisasContext *dc, uint32_t index)
106 gen_helper_raise_exception(cpu_env, tcg_constant_i32(index));
107 dc->base.is_jmp = DISAS_NORETURN;
110 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
112 t_sync_flags(dc);
113 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
114 gen_raise_exception(dc, index);
117 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
119 TCGv_i32 tmp = tcg_constant_i32(esr_ec);
120 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
122 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
125 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
127 if (translator_use_goto_tb(&dc->base, dest)) {
128 tcg_gen_goto_tb(n);
129 tcg_gen_movi_i32(cpu_pc, dest);
130 tcg_gen_exit_tb(dc->base.tb, n);
131 } else {
132 tcg_gen_movi_i32(cpu_pc, dest);
133 tcg_gen_lookup_and_goto_ptr();
135 dc->base.is_jmp = DISAS_NORETURN;
139 * Returns true if the insn an illegal operation.
140 * If exceptions are enabled, an exception is raised.
142 static bool trap_illegal(DisasContext *dc, bool cond)
144 if (cond && (dc->tb_flags & MSR_EE)
145 && dc->cfg->illegal_opcode_exception) {
146 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
148 return cond;
152 * Returns true if the insn is illegal in userspace.
153 * If exceptions are enabled, an exception is raised.
155 static bool trap_userspace(DisasContext *dc, bool cond)
157 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
159 if (cond_user && (dc->tb_flags & MSR_EE)) {
160 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
162 return cond_user;
166 * Return true, and log an error, if the current insn is
167 * within a delay slot.
169 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
171 if (dc->tb_flags & D_FLAG) {
172 qemu_log_mask(LOG_GUEST_ERROR,
173 "Invalid insn in delay slot: %s at %08x\n",
174 insn_type, (uint32_t)dc->base.pc_next);
175 return true;
177 return false;
180 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
182 if (likely(reg != 0)) {
183 return cpu_R[reg];
185 if (!dc->r0_set) {
186 if (dc->r0 == NULL) {
187 dc->r0 = tcg_temp_new_i32();
189 tcg_gen_movi_i32(dc->r0, 0);
190 dc->r0_set = true;
192 return dc->r0;
195 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
197 if (likely(reg != 0)) {
198 return cpu_R[reg];
200 if (dc->r0 == NULL) {
201 dc->r0 = tcg_temp_new_i32();
203 return dc->r0;
206 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
207 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
209 TCGv_i32 rd, ra, rb;
211 if (arg->rd == 0 && !side_effects) {
212 return true;
215 rd = reg_for_write(dc, arg->rd);
216 ra = reg_for_read(dc, arg->ra);
217 rb = reg_for_read(dc, arg->rb);
218 fn(rd, ra, rb);
219 return true;
222 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
223 void (*fn)(TCGv_i32, TCGv_i32))
225 TCGv_i32 rd, ra;
227 if (arg->rd == 0 && !side_effects) {
228 return true;
231 rd = reg_for_write(dc, arg->rd);
232 ra = reg_for_read(dc, arg->ra);
233 fn(rd, ra);
234 return true;
237 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
238 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
240 TCGv_i32 rd, ra;
242 if (arg->rd == 0 && !side_effects) {
243 return true;
246 rd = reg_for_write(dc, arg->rd);
247 ra = reg_for_read(dc, arg->ra);
248 fni(rd, ra, arg->imm);
249 return true;
252 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
253 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
255 TCGv_i32 rd, ra, imm;
257 if (arg->rd == 0 && !side_effects) {
258 return true;
261 rd = reg_for_write(dc, arg->rd);
262 ra = reg_for_read(dc, arg->ra);
263 imm = tcg_constant_i32(arg->imm);
265 fn(rd, ra, imm);
266 return true;
269 #define DO_TYPEA(NAME, SE, FN) \
270 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
271 { return do_typea(dc, a, SE, FN); }
273 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
274 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
275 { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
277 #define DO_TYPEA0(NAME, SE, FN) \
278 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
279 { return do_typea0(dc, a, SE, FN); }
281 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
282 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
283 { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
285 #define DO_TYPEBI(NAME, SE, FNI) \
286 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
287 { return do_typeb_imm(dc, a, SE, FNI); }
289 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
290 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
291 { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
293 #define DO_TYPEBV(NAME, SE, FN) \
294 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
295 { return do_typeb_val(dc, a, SE, FN); }
297 #define ENV_WRAPPER2(NAME, HELPER) \
298 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
299 { HELPER(out, cpu_env, ina); }
301 #define ENV_WRAPPER3(NAME, HELPER) \
302 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
303 { HELPER(out, cpu_env, ina, inb); }
305 /* No input carry, but output carry. */
306 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
308 TCGv_i32 zero = tcg_constant_i32(0);
310 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
313 /* Input and output carry. */
314 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
316 TCGv_i32 zero = tcg_constant_i32(0);
317 TCGv_i32 tmp = tcg_temp_new_i32();
319 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
320 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
323 /* Input carry, but no output carry. */
324 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
326 tcg_gen_add_i32(out, ina, inb);
327 tcg_gen_add_i32(out, out, cpu_msr_c);
330 DO_TYPEA(add, true, gen_add)
331 DO_TYPEA(addc, true, gen_addc)
332 DO_TYPEA(addk, false, tcg_gen_add_i32)
333 DO_TYPEA(addkc, true, gen_addkc)
335 DO_TYPEBV(addi, true, gen_add)
336 DO_TYPEBV(addic, true, gen_addc)
337 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
338 DO_TYPEBV(addikc, true, gen_addkc)
340 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
342 tcg_gen_andi_i32(out, ina, ~imm);
345 DO_TYPEA(and, false, tcg_gen_and_i32)
346 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
347 DO_TYPEA(andn, false, tcg_gen_andc_i32)
348 DO_TYPEBI(andni, false, gen_andni)
350 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
352 TCGv_i32 tmp = tcg_temp_new_i32();
353 tcg_gen_andi_i32(tmp, inb, 31);
354 tcg_gen_sar_i32(out, ina, tmp);
357 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
359 TCGv_i32 tmp = tcg_temp_new_i32();
360 tcg_gen_andi_i32(tmp, inb, 31);
361 tcg_gen_shr_i32(out, ina, tmp);
364 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
366 TCGv_i32 tmp = tcg_temp_new_i32();
367 tcg_gen_andi_i32(tmp, inb, 31);
368 tcg_gen_shl_i32(out, ina, tmp);
371 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
373 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
374 int imm_w = extract32(imm, 5, 5);
375 int imm_s = extract32(imm, 0, 5);
377 if (imm_w + imm_s > 32 || imm_w == 0) {
378 /* These inputs have an undefined behavior. */
379 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
380 imm_w, imm_s);
381 } else {
382 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
386 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
388 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
389 int imm_w = extract32(imm, 5, 5);
390 int imm_s = extract32(imm, 0, 5);
391 int width = imm_w - imm_s + 1;
393 if (imm_w < imm_s) {
394 /* These inputs have an undefined behavior. */
395 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
396 imm_w, imm_s);
397 } else {
398 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
402 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
403 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
404 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
406 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
407 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
408 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
410 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
411 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
413 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
415 tcg_gen_clzi_i32(out, ina, 32);
418 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
420 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
422 TCGv_i32 lt = tcg_temp_new_i32();
424 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
425 tcg_gen_sub_i32(out, inb, ina);
426 tcg_gen_deposit_i32(out, out, lt, 31, 1);
429 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
431 TCGv_i32 lt = tcg_temp_new_i32();
433 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
434 tcg_gen_sub_i32(out, inb, ina);
435 tcg_gen_deposit_i32(out, out, lt, 31, 1);
438 DO_TYPEA(cmp, false, gen_cmp)
439 DO_TYPEA(cmpu, false, gen_cmpu)
441 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
442 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
443 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
444 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
445 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
446 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
447 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
448 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
449 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
450 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
451 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
453 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
454 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
455 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
456 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
457 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
458 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
459 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
460 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
461 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
462 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
463 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
465 ENV_WRAPPER2(gen_flt, gen_helper_flt)
466 ENV_WRAPPER2(gen_fint, gen_helper_fint)
467 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
469 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
470 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
471 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
473 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
474 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
476 gen_helper_divs(out, cpu_env, inb, ina);
479 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
481 gen_helper_divu(out, cpu_env, inb, ina);
484 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
485 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
487 static bool trans_imm(DisasContext *dc, arg_imm *arg)
489 if (invalid_delay_slot(dc, "imm")) {
490 return true;
492 dc->ext_imm = arg->imm << 16;
493 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
494 dc->tb_flags_to_set = IMM_FLAG;
495 return true;
498 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
500 TCGv_i32 tmp = tcg_temp_new_i32();
501 tcg_gen_muls2_i32(tmp, out, ina, inb);
504 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
506 TCGv_i32 tmp = tcg_temp_new_i32();
507 tcg_gen_mulu2_i32(tmp, out, ina, inb);
510 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
512 TCGv_i32 tmp = tcg_temp_new_i32();
513 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
516 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
517 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
518 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
519 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
520 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
522 DO_TYPEA(or, false, tcg_gen_or_i32)
523 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
525 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
527 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
530 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
532 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
535 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
536 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
537 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
539 /* No input carry, but output carry. */
540 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
542 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
543 tcg_gen_sub_i32(out, inb, ina);
546 /* Input and output carry. */
547 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
549 TCGv_i32 zero = tcg_constant_i32(0);
550 TCGv_i32 tmp = tcg_temp_new_i32();
552 tcg_gen_not_i32(tmp, ina);
553 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
554 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
557 /* No input or output carry. */
558 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
560 tcg_gen_sub_i32(out, inb, ina);
563 /* Input carry, no output carry. */
564 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
566 TCGv_i32 nota = tcg_temp_new_i32();
568 tcg_gen_not_i32(nota, ina);
569 tcg_gen_add_i32(out, inb, nota);
570 tcg_gen_add_i32(out, out, cpu_msr_c);
573 DO_TYPEA(rsub, true, gen_rsub)
574 DO_TYPEA(rsubc, true, gen_rsubc)
575 DO_TYPEA(rsubk, false, gen_rsubk)
576 DO_TYPEA(rsubkc, true, gen_rsubkc)
578 DO_TYPEBV(rsubi, true, gen_rsub)
579 DO_TYPEBV(rsubic, true, gen_rsubc)
580 DO_TYPEBV(rsubik, false, gen_rsubk)
581 DO_TYPEBV(rsubikc, true, gen_rsubkc)
583 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
584 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
586 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
588 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
589 tcg_gen_sari_i32(out, ina, 1);
592 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
594 TCGv_i32 tmp = tcg_temp_new_i32();
596 tcg_gen_mov_i32(tmp, cpu_msr_c);
597 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
598 tcg_gen_extract2_i32(out, ina, tmp, 1);
601 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
603 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
604 tcg_gen_shri_i32(out, ina, 1);
607 DO_TYPEA0(sra, false, gen_sra)
608 DO_TYPEA0(src, false, gen_src)
609 DO_TYPEA0(srl, false, gen_srl)
611 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
613 tcg_gen_rotri_i32(out, ina, 16);
616 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
617 DO_TYPEA0(swaph, false, gen_swaph)
619 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
621 /* Cache operations are nops: only check for supervisor mode. */
622 trap_userspace(dc, true);
623 return true;
626 DO_TYPEA(xor, false, tcg_gen_xor_i32)
627 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
629 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
631 TCGv ret = tcg_temp_new();
633 /* If any of the regs is r0, set t to the value of the other reg. */
634 if (ra && rb) {
635 TCGv_i32 tmp = tcg_temp_new_i32();
636 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
637 tcg_gen_extu_i32_tl(ret, tmp);
638 } else if (ra) {
639 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
640 } else if (rb) {
641 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
642 } else {
643 tcg_gen_movi_tl(ret, 0);
646 if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
647 gen_helper_stackprot(cpu_env, ret);
649 return ret;
652 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
654 TCGv ret = tcg_temp_new();
656 /* If any of the regs is r0, set t to the value of the other reg. */
657 if (ra) {
658 TCGv_i32 tmp = tcg_temp_new_i32();
659 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
660 tcg_gen_extu_i32_tl(ret, tmp);
661 } else {
662 tcg_gen_movi_tl(ret, (uint32_t)imm);
665 if (ra == 1 && dc->cfg->stackprot) {
666 gen_helper_stackprot(cpu_env, ret);
668 return ret;
671 #ifndef CONFIG_USER_ONLY
672 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
674 int addr_size = dc->cfg->addr_size;
675 TCGv ret = tcg_temp_new();
677 if (addr_size == 32 || ra == 0) {
678 if (rb) {
679 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
680 } else {
681 tcg_gen_movi_tl(ret, 0);
683 } else {
684 if (rb) {
685 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
686 } else {
687 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
688 tcg_gen_shli_tl(ret, ret, 32);
690 if (addr_size < 64) {
691 /* Mask off out of range bits. */
692 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
695 return ret;
697 #endif
699 #ifndef CONFIG_USER_ONLY
700 static void record_unaligned_ess(DisasContext *dc, int rd,
701 MemOp size, bool store)
703 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
705 iflags |= ESR_ESS_FLAG;
706 iflags |= rd << 5;
707 iflags |= store * ESR_S;
708 iflags |= (size == MO_32) * ESR_W;
710 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
712 #endif
714 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
715 int mem_index, bool rev)
717 MemOp size = mop & MO_SIZE;
720 * When doing reverse accesses we need to do two things.
722 * 1. Reverse the address wrt endianness.
723 * 2. Byteswap the data lanes on the way back into the CPU core.
725 if (rev) {
726 if (size > MO_8) {
727 mop ^= MO_BSWAP;
729 if (size < MO_32) {
730 tcg_gen_xori_tl(addr, addr, 3 - size);
735 * For system mode, enforce alignment if the cpu configuration
736 * requires it. For user-mode, the Linux kernel will have fixed up
737 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
739 #ifndef CONFIG_USER_ONLY
740 if (size > MO_8 &&
741 (dc->tb_flags & MSR_EE) &&
742 dc->cfg->unaligned_exceptions) {
743 record_unaligned_ess(dc, rd, size, false);
744 mop |= MO_ALIGN;
746 #endif
748 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
749 return true;
752 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
754 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
755 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
758 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
760 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
761 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
764 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
766 if (trap_userspace(dc, true)) {
767 return true;
769 #ifdef CONFIG_USER_ONLY
770 return true;
771 #else
772 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
773 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
774 #endif
777 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
779 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
780 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
783 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
785 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
786 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
789 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
791 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
792 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
795 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
797 if (trap_userspace(dc, true)) {
798 return true;
800 #ifdef CONFIG_USER_ONLY
801 return true;
802 #else
803 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
804 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
805 #endif
808 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
810 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
811 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
814 static bool trans_lw(DisasContext *dc, arg_typea *arg)
816 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
817 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
820 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
822 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
823 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
826 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
828 if (trap_userspace(dc, true)) {
829 return true;
831 #ifdef CONFIG_USER_ONLY
832 return true;
833 #else
834 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
835 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
836 #endif
839 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
841 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
842 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
845 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
847 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
849 /* lwx does not throw unaligned access errors, so force alignment */
850 tcg_gen_andi_tl(addr, addr, ~3);
852 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
853 tcg_gen_mov_tl(cpu_res_addr, addr);
855 if (arg->rd) {
856 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
859 /* No support for AXI exclusive so always clear C */
860 tcg_gen_movi_i32(cpu_msr_c, 0);
861 return true;
864 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
865 int mem_index, bool rev)
867 MemOp size = mop & MO_SIZE;
870 * When doing reverse accesses we need to do two things.
872 * 1. Reverse the address wrt endianness.
873 * 2. Byteswap the data lanes on the way back into the CPU core.
875 if (rev) {
876 if (size > MO_8) {
877 mop ^= MO_BSWAP;
879 if (size < MO_32) {
880 tcg_gen_xori_tl(addr, addr, 3 - size);
885 * For system mode, enforce alignment if the cpu configuration
886 * requires it. For user-mode, the Linux kernel will have fixed up
887 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
889 #ifndef CONFIG_USER_ONLY
890 if (size > MO_8 &&
891 (dc->tb_flags & MSR_EE) &&
892 dc->cfg->unaligned_exceptions) {
893 record_unaligned_ess(dc, rd, size, true);
894 mop |= MO_ALIGN;
896 #endif
898 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
899 return true;
902 static bool trans_sb(DisasContext *dc, arg_typea *arg)
904 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
905 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
908 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
910 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
911 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
914 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
916 if (trap_userspace(dc, true)) {
917 return true;
919 #ifdef CONFIG_USER_ONLY
920 return true;
921 #else
922 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
923 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
924 #endif
927 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
929 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
930 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
933 static bool trans_sh(DisasContext *dc, arg_typea *arg)
935 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
936 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
939 static bool trans_shr(DisasContext *dc, arg_typea *arg)
941 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
942 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
945 static bool trans_shea(DisasContext *dc, arg_typea *arg)
947 if (trap_userspace(dc, true)) {
948 return true;
950 #ifdef CONFIG_USER_ONLY
951 return true;
952 #else
953 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
954 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
955 #endif
958 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
960 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
961 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
964 static bool trans_sw(DisasContext *dc, arg_typea *arg)
966 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
967 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
970 static bool trans_swr(DisasContext *dc, arg_typea *arg)
972 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
973 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
976 static bool trans_swea(DisasContext *dc, arg_typea *arg)
978 if (trap_userspace(dc, true)) {
979 return true;
981 #ifdef CONFIG_USER_ONLY
982 return true;
983 #else
984 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
985 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
986 #endif
989 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
991 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
992 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
995 static bool trans_swx(DisasContext *dc, arg_typea *arg)
997 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
998 TCGLabel *swx_done = gen_new_label();
999 TCGLabel *swx_fail = gen_new_label();
1000 TCGv_i32 tval;
1002 /* swx does not throw unaligned access errors, so force alignment */
1003 tcg_gen_andi_tl(addr, addr, ~3);
1006 * Compare the address vs the one we used during lwx.
1007 * On mismatch, the operation fails. On match, addr dies at the
1008 * branch, but we know we can use the equal version in the global.
1009 * In either case, addr is no longer needed.
1011 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1014 * Compare the value loaded during lwx with current contents of
1015 * the reserved location.
1017 tval = tcg_temp_new_i32();
1019 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1020 reg_for_write(dc, arg->rd),
1021 dc->mem_index, MO_TEUL);
1023 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1025 /* Success */
1026 tcg_gen_movi_i32(cpu_msr_c, 0);
1027 tcg_gen_br(swx_done);
1029 /* Failure */
1030 gen_set_label(swx_fail);
1031 tcg_gen_movi_i32(cpu_msr_c, 1);
1033 gen_set_label(swx_done);
1036 * Prevent the saved address from working again without another ldx.
1037 * Akin to the pseudocode setting reservation = 0.
1039 tcg_gen_movi_tl(cpu_res_addr, -1);
1040 return true;
1043 static void setup_dslot(DisasContext *dc, bool type_b)
1045 dc->tb_flags_to_set |= D_FLAG;
1046 if (type_b && (dc->tb_flags & IMM_FLAG)) {
1047 dc->tb_flags_to_set |= BIMM_FLAG;
1051 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1052 bool delay, bool abs, int link)
1054 uint32_t add_pc;
1056 if (invalid_delay_slot(dc, "branch")) {
1057 return true;
1059 if (delay) {
1060 setup_dslot(dc, dest_rb < 0);
1063 if (link) {
1064 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1067 /* Store the branch taken destination into btarget. */
1068 add_pc = abs ? 0 : dc->base.pc_next;
1069 if (dest_rb > 0) {
1070 dc->jmp_dest = -1;
1071 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1072 } else {
1073 dc->jmp_dest = add_pc + dest_imm;
1074 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1076 dc->jmp_cond = TCG_COND_ALWAYS;
1077 return true;
1080 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
1081 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
1082 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1083 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
1084 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1086 DO_BR(br, bri, false, false, false)
1087 DO_BR(bra, brai, false, true, false)
1088 DO_BR(brd, brid, true, false, false)
1089 DO_BR(brad, braid, true, true, false)
1090 DO_BR(brld, brlid, true, false, true)
1091 DO_BR(brald, bralid, true, true, true)
1093 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1094 TCGCond cond, int ra, bool delay)
1096 TCGv_i32 zero, next;
1098 if (invalid_delay_slot(dc, "bcc")) {
1099 return true;
1101 if (delay) {
1102 setup_dslot(dc, dest_rb < 0);
1105 dc->jmp_cond = cond;
1107 /* Cache the condition register in cpu_bvalue across any delay slot. */
1108 tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1110 /* Store the branch taken destination into btarget. */
1111 if (dest_rb > 0) {
1112 dc->jmp_dest = -1;
1113 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1114 } else {
1115 dc->jmp_dest = dc->base.pc_next + dest_imm;
1116 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1119 /* Compute the final destination into btarget. */
1120 zero = tcg_constant_i32(0);
1121 next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4);
1122 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1123 reg_for_read(dc, ra), zero,
1124 cpu_btarget, next);
1126 return true;
1129 #define DO_BCC(NAME, COND) \
1130 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
1131 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1132 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
1133 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1134 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
1135 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1136 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
1137 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1139 DO_BCC(beq, TCG_COND_EQ)
1140 DO_BCC(bge, TCG_COND_GE)
1141 DO_BCC(bgt, TCG_COND_GT)
1142 DO_BCC(ble, TCG_COND_LE)
1143 DO_BCC(blt, TCG_COND_LT)
1144 DO_BCC(bne, TCG_COND_NE)
1146 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1148 if (trap_userspace(dc, true)) {
1149 return true;
1151 if (invalid_delay_slot(dc, "brk")) {
1152 return true;
1155 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1156 if (arg->rd) {
1157 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1159 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1160 tcg_gen_movi_tl(cpu_res_addr, -1);
1162 dc->base.is_jmp = DISAS_EXIT;
1163 return true;
1166 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1168 uint32_t imm = arg->imm;
1170 if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1171 return true;
1173 if (invalid_delay_slot(dc, "brki")) {
1174 return true;
1177 tcg_gen_movi_i32(cpu_pc, imm);
1178 if (arg->rd) {
1179 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1181 tcg_gen_movi_tl(cpu_res_addr, -1);
1183 #ifdef CONFIG_USER_ONLY
1184 switch (imm) {
1185 case 0x8: /* syscall trap */
1186 gen_raise_exception_sync(dc, EXCP_SYSCALL);
1187 break;
1188 case 0x18: /* debug trap */
1189 gen_raise_exception_sync(dc, EXCP_DEBUG);
1190 break;
1191 default: /* eliminated with trap_userspace check */
1192 g_assert_not_reached();
1194 #else
1195 uint32_t msr_to_set = 0;
1197 if (imm != 0x18) {
1198 msr_to_set |= MSR_BIP;
1200 if (imm == 0x8 || imm == 0x18) {
1201 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1202 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1203 tcg_gen_andi_i32(cpu_msr, cpu_msr,
1204 ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1206 tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1207 dc->base.is_jmp = DISAS_EXIT;
1208 #endif
1210 return true;
1213 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1215 int mbar_imm = arg->imm;
1217 /* Note that mbar is a specialized branch instruction. */
1218 if (invalid_delay_slot(dc, "mbar")) {
1219 return true;
1222 /* Data access memory barrier. */
1223 if ((mbar_imm & 2) == 0) {
1224 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1227 /* Sleep. */
1228 if (mbar_imm & 16) {
1229 if (trap_userspace(dc, true)) {
1230 /* Sleep is a privileged instruction. */
1231 return true;
1234 t_sync_flags(dc);
1236 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1237 -offsetof(MicroBlazeCPU, env)
1238 +offsetof(CPUState, halted));
1240 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1242 gen_raise_exception(dc, EXCP_HLT);
1246 * If !(mbar_imm & 1), this is an instruction access memory barrier
1247 * and we need to end the TB so that we recognize self-modified
1248 * code immediately.
1250 * However, there are some data mbars that need the TB break
1251 * (and return to main loop) to recognize interrupts right away.
1252 * E.g. recognizing a change to an interrupt controller register.
1254 * Therefore, choose to end the TB always.
1256 dc->base.is_jmp = DISAS_EXIT_NEXT;
1257 return true;
1260 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1262 if (trap_userspace(dc, to_set)) {
1263 return true;
1265 if (invalid_delay_slot(dc, "rts")) {
1266 return true;
1269 dc->tb_flags_to_set |= to_set;
1270 setup_dslot(dc, true);
1272 dc->jmp_cond = TCG_COND_ALWAYS;
1273 dc->jmp_dest = -1;
1274 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1275 return true;
1278 #define DO_RTS(NAME, IFLAG) \
1279 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1280 { return do_rts(dc, arg, IFLAG); }
1282 DO_RTS(rtbd, DRTB_FLAG)
1283 DO_RTS(rtid, DRTI_FLAG)
1284 DO_RTS(rted, DRTE_FLAG)
1285 DO_RTS(rtsd, 0)
1287 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1289 /* If opcode_0_illegal, trap. */
1290 if (dc->cfg->opcode_0_illegal) {
1291 trap_illegal(dc, true);
1292 return true;
1295 * Otherwise, this is "add r0, r0, r0".
1296 * Continue to trans_add so that MSR[C] gets cleared.
1298 return false;
1301 static void msr_read(DisasContext *dc, TCGv_i32 d)
1303 TCGv_i32 t;
1305 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1306 t = tcg_temp_new_i32();
1307 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1308 tcg_gen_or_i32(d, cpu_msr, t);
1311 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1313 uint32_t imm = arg->imm;
1315 if (trap_userspace(dc, imm != MSR_C)) {
1316 return true;
1319 if (arg->rd) {
1320 msr_read(dc, cpu_R[arg->rd]);
1324 * Handle the carry bit separately.
1325 * This is the only bit that userspace can modify.
1327 if (imm & MSR_C) {
1328 tcg_gen_movi_i32(cpu_msr_c, set);
1332 * MSR_C and MSR_CC set above.
1333 * MSR_PVR is not writable, and is always clear.
1335 imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1337 if (imm != 0) {
1338 if (set) {
1339 tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1340 } else {
1341 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1343 dc->base.is_jmp = DISAS_EXIT_NEXT;
1345 return true;
1348 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1350 return do_msrclrset(dc, arg, false);
1353 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1355 return do_msrclrset(dc, arg, true);
1358 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1360 if (trap_userspace(dc, true)) {
1361 return true;
1364 #ifdef CONFIG_USER_ONLY
1365 g_assert_not_reached();
1366 #else
1367 if (arg->e && arg->rs != 0x1003) {
1368 qemu_log_mask(LOG_GUEST_ERROR,
1369 "Invalid extended mts reg 0x%x\n", arg->rs);
1370 return true;
1373 TCGv_i32 src = reg_for_read(dc, arg->ra);
1374 switch (arg->rs) {
1375 case SR_MSR:
1376 /* Install MSR_C. */
1377 tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1379 * Clear MSR_C and MSR_CC;
1380 * MSR_PVR is not writable, and is always clear.
1382 tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1383 break;
1384 case SR_FSR:
1385 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
1386 break;
1387 case 0x800:
1388 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
1389 break;
1390 case 0x802:
1391 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
1392 break;
1394 case 0x1000: /* PID */
1395 case 0x1001: /* ZPR */
1396 case 0x1002: /* TLBX */
1397 case 0x1003: /* TLBLO */
1398 case 0x1004: /* TLBHI */
1399 case 0x1005: /* TLBSX */
1401 TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1402 TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1404 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
1406 break;
1408 default:
1409 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1410 return true;
1412 dc->base.is_jmp = DISAS_EXIT_NEXT;
1413 return true;
1414 #endif
1417 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1419 TCGv_i32 dest = reg_for_write(dc, arg->rd);
1421 if (arg->e) {
1422 switch (arg->rs) {
1423 case SR_EAR:
1425 TCGv_i64 t64 = tcg_temp_new_i64();
1426 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1427 tcg_gen_extrh_i64_i32(dest, t64);
1429 return true;
1430 #ifndef CONFIG_USER_ONLY
1431 case 0x1003: /* TLBLO */
1432 /* Handled below. */
1433 break;
1434 #endif
1435 case 0x2006 ... 0x2009:
1436 /* High bits of PVR6-9 not implemented. */
1437 tcg_gen_movi_i32(dest, 0);
1438 return true;
1439 default:
1440 qemu_log_mask(LOG_GUEST_ERROR,
1441 "Invalid extended mfs reg 0x%x\n", arg->rs);
1442 return true;
1446 switch (arg->rs) {
1447 case SR_PC:
1448 tcg_gen_movi_i32(dest, dc->base.pc_next);
1449 break;
1450 case SR_MSR:
1451 msr_read(dc, dest);
1452 break;
1453 case SR_EAR:
1455 TCGv_i64 t64 = tcg_temp_new_i64();
1456 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1457 tcg_gen_extrl_i64_i32(dest, t64);
1459 break;
1460 case SR_ESR:
1461 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
1462 break;
1463 case SR_FSR:
1464 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
1465 break;
1466 case SR_BTR:
1467 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
1468 break;
1469 case SR_EDR:
1470 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
1471 break;
1472 case 0x800:
1473 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
1474 break;
1475 case 0x802:
1476 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
1477 break;
1479 #ifndef CONFIG_USER_ONLY
1480 case 0x1000: /* PID */
1481 case 0x1001: /* ZPR */
1482 case 0x1002: /* TLBX */
1483 case 0x1003: /* TLBLO */
1484 case 0x1004: /* TLBHI */
1485 case 0x1005: /* TLBSX */
1487 TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
1488 TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
1490 gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
1492 break;
1493 #endif
1495 case 0x2000 ... 0x200c:
1496 tcg_gen_ld_i32(dest, cpu_env,
1497 offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1498 - offsetof(MicroBlazeCPU, env));
1499 break;
1500 default:
1501 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1502 break;
1504 return true;
1507 static void do_rti(DisasContext *dc)
1509 TCGv_i32 tmp = tcg_temp_new_i32();
1511 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1512 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1513 tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1514 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1515 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1518 static void do_rtb(DisasContext *dc)
1520 TCGv_i32 tmp = tcg_temp_new_i32();
1522 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1523 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1524 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1525 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1528 static void do_rte(DisasContext *dc)
1530 TCGv_i32 tmp = tcg_temp_new_i32();
1532 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1533 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1534 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1535 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1536 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1539 /* Insns connected to FSL or AXI stream attached devices. */
1540 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1542 TCGv_i32 t_id, t_ctrl;
1544 if (trap_userspace(dc, true)) {
1545 return true;
1548 t_id = tcg_temp_new_i32();
1549 if (rb) {
1550 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1551 } else {
1552 tcg_gen_movi_i32(t_id, imm);
1555 t_ctrl = tcg_constant_i32(ctrl);
1556 gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1557 return true;
1560 static bool trans_get(DisasContext *dc, arg_get *arg)
1562 return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1565 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1567 return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1570 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1572 TCGv_i32 t_id, t_ctrl;
1574 if (trap_userspace(dc, true)) {
1575 return true;
1578 t_id = tcg_temp_new_i32();
1579 if (rb) {
1580 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1581 } else {
1582 tcg_gen_movi_i32(t_id, imm);
1585 t_ctrl = tcg_constant_i32(ctrl);
1586 gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1587 return true;
1590 static bool trans_put(DisasContext *dc, arg_put *arg)
1592 return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1595 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1597 return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1600 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1602 DisasContext *dc = container_of(dcb, DisasContext, base);
1603 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1604 int bound;
1606 dc->cfg = &cpu->cfg;
1607 dc->tb_flags = dc->base.tb->flags;
1608 dc->ext_imm = dc->base.tb->cs_base;
1609 dc->r0 = NULL;
1610 dc->r0_set = false;
1611 dc->mem_index = cpu_mmu_index(&cpu->env, false);
1612 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1613 dc->jmp_dest = -1;
1615 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1616 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1619 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1623 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1625 DisasContext *dc = container_of(dcb, DisasContext, base);
1627 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1628 dc->insn_start = tcg_last_op();
1631 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1633 DisasContext *dc = container_of(dcb, DisasContext, base);
1634 CPUMBState *env = cs->env_ptr;
1635 uint32_t ir;
1637 /* TODO: This should raise an exception, not terminate qemu. */
1638 if (dc->base.pc_next & 3) {
1639 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1640 (uint32_t)dc->base.pc_next);
1643 dc->tb_flags_to_set = 0;
1645 ir = cpu_ldl_code(env, dc->base.pc_next);
1646 if (!decode(dc, ir)) {
1647 trap_illegal(dc, true);
1650 if (dc->r0) {
1651 dc->r0 = NULL;
1652 dc->r0_set = false;
1655 /* Discard the imm global when its contents cannot be used. */
1656 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1657 tcg_gen_discard_i32(cpu_imm);
1660 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1661 dc->tb_flags |= dc->tb_flags_to_set;
1662 dc->base.pc_next += 4;
1664 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1666 * Finish any return-from branch.
1668 uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1669 if (unlikely(rt_ibe != 0)) {
1670 dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1671 if (rt_ibe & DRTI_FLAG) {
1672 do_rti(dc);
1673 } else if (rt_ibe & DRTB_FLAG) {
1674 do_rtb(dc);
1675 } else {
1676 do_rte(dc);
1680 /* Complete the branch, ending the TB. */
1681 switch (dc->base.is_jmp) {
1682 case DISAS_NORETURN:
1684 * E.g. illegal insn in a delay slot. We've already exited
1685 * and will handle D_FLAG in mb_cpu_do_interrupt.
1687 break;
1688 case DISAS_NEXT:
1690 * Normal insn a delay slot.
1691 * However, the return-from-exception type insns should
1692 * return to the main loop, as they have adjusted MSR.
1694 dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1695 break;
1696 case DISAS_EXIT_NEXT:
1698 * E.g. mts insn in a delay slot. Continue with btarget,
1699 * but still return to the main loop.
1701 dc->base.is_jmp = DISAS_EXIT_JUMP;
1702 break;
1703 default:
1704 g_assert_not_reached();
1709 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1711 DisasContext *dc = container_of(dcb, DisasContext, base);
1713 if (dc->base.is_jmp == DISAS_NORETURN) {
1714 /* We have already exited the TB. */
1715 return;
1718 t_sync_flags(dc);
1720 switch (dc->base.is_jmp) {
1721 case DISAS_TOO_MANY:
1722 gen_goto_tb(dc, 0, dc->base.pc_next);
1723 return;
1725 case DISAS_EXIT:
1726 break;
1727 case DISAS_EXIT_NEXT:
1728 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1729 break;
1730 case DISAS_EXIT_JUMP:
1731 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1732 tcg_gen_discard_i32(cpu_btarget);
1733 break;
1735 case DISAS_JUMP:
1736 if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
1737 /* Direct jump. */
1738 tcg_gen_discard_i32(cpu_btarget);
1740 if (dc->jmp_cond != TCG_COND_ALWAYS) {
1741 /* Conditional direct jump. */
1742 TCGLabel *taken = gen_new_label();
1743 TCGv_i32 tmp = tcg_temp_new_i32();
1746 * Copy bvalue to a temp now, so we can discard bvalue.
1747 * This can avoid writing bvalue to memory when the
1748 * delay slot cannot raise an exception.
1750 tcg_gen_mov_i32(tmp, cpu_bvalue);
1751 tcg_gen_discard_i32(cpu_bvalue);
1753 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1754 gen_goto_tb(dc, 1, dc->base.pc_next);
1755 gen_set_label(taken);
1757 gen_goto_tb(dc, 0, dc->jmp_dest);
1758 return;
1761 /* Indirect jump (or direct jump w/ goto_tb disabled) */
1762 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1763 tcg_gen_discard_i32(cpu_btarget);
1764 tcg_gen_lookup_and_goto_ptr();
1765 return;
1767 default:
1768 g_assert_not_reached();
1771 /* Finish DISAS_EXIT_* */
1772 if (unlikely(cs->singlestep_enabled)) {
1773 gen_raise_exception(dc, EXCP_DEBUG);
1774 } else {
1775 tcg_gen_exit_tb(NULL, 0);
1779 static void mb_tr_disas_log(const DisasContextBase *dcb,
1780 CPUState *cs, FILE *logfile)
1782 fprintf(logfile, "IN: %s\n", lookup_symbol(dcb->pc_first));
1783 target_disas(logfile, cs, dcb->pc_first, dcb->tb->size);
1786 static const TranslatorOps mb_tr_ops = {
1787 .init_disas_context = mb_tr_init_disas_context,
1788 .tb_start = mb_tr_tb_start,
1789 .insn_start = mb_tr_insn_start,
1790 .translate_insn = mb_tr_translate_insn,
1791 .tb_stop = mb_tr_tb_stop,
1792 .disas_log = mb_tr_disas_log,
1795 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
1796 target_ulong pc, void *host_pc)
1798 DisasContext dc;
1799 translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1802 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1804 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1805 CPUMBState *env = &cpu->env;
1806 uint32_t iflags;
1807 int i;
1809 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1810 env->pc, env->msr,
1811 (env->msr & MSR_UM) ? "user" : "kernel",
1812 (env->msr & MSR_UMS) ? "user" : "kernel",
1813 (bool)(env->msr & MSR_EIP),
1814 (bool)(env->msr & MSR_IE));
1816 iflags = env->iflags;
1817 qemu_fprintf(f, "iflags: 0x%08x", iflags);
1818 if (iflags & IMM_FLAG) {
1819 qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1821 if (iflags & BIMM_FLAG) {
1822 qemu_fprintf(f, " BIMM");
1824 if (iflags & D_FLAG) {
1825 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1827 if (iflags & DRTI_FLAG) {
1828 qemu_fprintf(f, " DRTI");
1830 if (iflags & DRTE_FLAG) {
1831 qemu_fprintf(f, " DRTE");
1833 if (iflags & DRTB_FLAG) {
1834 qemu_fprintf(f, " DRTB");
1836 if (iflags & ESR_ESS_FLAG) {
1837 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1840 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1841 "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1842 env->esr, env->fsr, env->btr, env->edr,
1843 env->ear, env->slr, env->shr);
1845 for (i = 0; i < 32; i++) {
1846 qemu_fprintf(f, "r%2.2d=%08x%c",
1847 i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1849 qemu_fprintf(f, "\n");
1852 void mb_tcg_init(void)
1854 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1855 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1857 static const struct {
1858 TCGv_i32 *var; int ofs; char name[8];
1859 } i32s[] = {
1861 * Note that r0 is handled specially in reg_for_read
1862 * and reg_for_write. Nothing should touch cpu_R[0].
1863 * Leave that element NULL, which will assert quickly
1864 * inside the tcg generator functions.
1866 R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1867 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1868 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1869 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1871 SP(pc),
1872 SP(msr),
1873 SP(msr_c),
1874 SP(imm),
1875 SP(iflags),
1876 SP(bvalue),
1877 SP(btarget),
1878 SP(res_val),
1881 #undef R
1882 #undef SP
1884 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1885 *i32s[i].var =
1886 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1889 cpu_res_addr =
1890 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");