Jobs based on custom runners: add job definitions for QEMU's machines
[qemu.git] / target / microblaze / translate.c
blobc68a84a219e5b5ff51db1d68f1f7e3f3bd5b732b
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "qemu/qemu-print.h"
32 #include "exec/log.h"
34 #define EXTRACT_FIELD(src, start, end) \
35 (((src) >> start) & ((1 << (end - start + 1)) - 1))
37 /* is_jmp field values */
38 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
39 #define DISAS_EXIT DISAS_TARGET_1 /* all cpu state modified dynamically */
41 /* cpu state besides pc was modified dynamically; update pc to next */
42 #define DISAS_EXIT_NEXT DISAS_TARGET_2
43 /* cpu state besides pc was modified dynamically; update pc to btarget */
44 #define DISAS_EXIT_JUMP DISAS_TARGET_3
46 static TCGv_i32 cpu_R[32];
47 static TCGv_i32 cpu_pc;
48 static TCGv_i32 cpu_msr;
49 static TCGv_i32 cpu_msr_c;
50 static TCGv_i32 cpu_imm;
51 static TCGv_i32 cpu_bvalue;
52 static TCGv_i32 cpu_btarget;
53 static TCGv_i32 cpu_iflags;
54 static TCGv cpu_res_addr;
55 static TCGv_i32 cpu_res_val;
57 #include "exec/gen-icount.h"
59 /* This is the state at translation time. */
60 typedef struct DisasContext {
61 DisasContextBase base;
62 const MicroBlazeCPUConfig *cfg;
64 /* TCG op of the current insn_start. */
65 TCGOp *insn_start;
67 TCGv_i32 r0;
68 bool r0_set;
70 /* Decoder. */
71 uint32_t ext_imm;
72 unsigned int tb_flags;
73 unsigned int tb_flags_to_set;
74 int mem_index;
76 /* Condition under which to jump, including NEVER and ALWAYS. */
77 TCGCond jmp_cond;
79 /* Immediate branch-taken destination, or -1 for indirect. */
80 uint32_t jmp_dest;
81 } DisasContext;
83 static int typeb_imm(DisasContext *dc, int x)
85 if (dc->tb_flags & IMM_FLAG) {
86 return deposit32(dc->ext_imm, 0, 16, x);
88 return x;
91 /* Include the auto-generated decoder. */
92 #include "decode-insns.c.inc"
94 static void t_sync_flags(DisasContext *dc)
96 /* Synch the tb dependent flags between translator and runtime. */
97 if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
98 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
102 static void gen_raise_exception(DisasContext *dc, uint32_t index)
104 TCGv_i32 tmp = tcg_const_i32(index);
106 gen_helper_raise_exception(cpu_env, tmp);
107 tcg_temp_free_i32(tmp);
108 dc->base.is_jmp = DISAS_NORETURN;
111 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
113 t_sync_flags(dc);
114 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
115 gen_raise_exception(dc, index);
118 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
120 TCGv_i32 tmp = tcg_const_i32(esr_ec);
121 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
122 tcg_temp_free_i32(tmp);
124 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
127 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
129 if (dc->base.singlestep_enabled) {
130 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
131 tcg_gen_movi_i32(cpu_pc, dest);
132 gen_helper_raise_exception(cpu_env, tmp);
133 tcg_temp_free_i32(tmp);
134 } else if (translator_use_goto_tb(&dc->base, dest)) {
135 tcg_gen_goto_tb(n);
136 tcg_gen_movi_i32(cpu_pc, dest);
137 tcg_gen_exit_tb(dc->base.tb, n);
138 } else {
139 tcg_gen_movi_i32(cpu_pc, dest);
140 tcg_gen_lookup_and_goto_ptr();
142 dc->base.is_jmp = DISAS_NORETURN;
146 * Returns true if the insn an illegal operation.
147 * If exceptions are enabled, an exception is raised.
149 static bool trap_illegal(DisasContext *dc, bool cond)
151 if (cond && (dc->tb_flags & MSR_EE)
152 && dc->cfg->illegal_opcode_exception) {
153 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
155 return cond;
159 * Returns true if the insn is illegal in userspace.
160 * If exceptions are enabled, an exception is raised.
162 static bool trap_userspace(DisasContext *dc, bool cond)
164 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
166 if (cond_user && (dc->tb_flags & MSR_EE)) {
167 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
169 return cond_user;
173 * Return true, and log an error, if the current insn is
174 * within a delay slot.
176 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
178 if (dc->tb_flags & D_FLAG) {
179 qemu_log_mask(LOG_GUEST_ERROR,
180 "Invalid insn in delay slot: %s at %08x\n",
181 insn_type, (uint32_t)dc->base.pc_next);
182 return true;
184 return false;
187 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
189 if (likely(reg != 0)) {
190 return cpu_R[reg];
192 if (!dc->r0_set) {
193 if (dc->r0 == NULL) {
194 dc->r0 = tcg_temp_new_i32();
196 tcg_gen_movi_i32(dc->r0, 0);
197 dc->r0_set = true;
199 return dc->r0;
202 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
204 if (likely(reg != 0)) {
205 return cpu_R[reg];
207 if (dc->r0 == NULL) {
208 dc->r0 = tcg_temp_new_i32();
210 return dc->r0;
213 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
214 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
216 TCGv_i32 rd, ra, rb;
218 if (arg->rd == 0 && !side_effects) {
219 return true;
222 rd = reg_for_write(dc, arg->rd);
223 ra = reg_for_read(dc, arg->ra);
224 rb = reg_for_read(dc, arg->rb);
225 fn(rd, ra, rb);
226 return true;
229 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
230 void (*fn)(TCGv_i32, TCGv_i32))
232 TCGv_i32 rd, ra;
234 if (arg->rd == 0 && !side_effects) {
235 return true;
238 rd = reg_for_write(dc, arg->rd);
239 ra = reg_for_read(dc, arg->ra);
240 fn(rd, ra);
241 return true;
244 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
245 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
247 TCGv_i32 rd, ra;
249 if (arg->rd == 0 && !side_effects) {
250 return true;
253 rd = reg_for_write(dc, arg->rd);
254 ra = reg_for_read(dc, arg->ra);
255 fni(rd, ra, arg->imm);
256 return true;
259 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
260 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
262 TCGv_i32 rd, ra, imm;
264 if (arg->rd == 0 && !side_effects) {
265 return true;
268 rd = reg_for_write(dc, arg->rd);
269 ra = reg_for_read(dc, arg->ra);
270 imm = tcg_const_i32(arg->imm);
272 fn(rd, ra, imm);
274 tcg_temp_free_i32(imm);
275 return true;
278 #define DO_TYPEA(NAME, SE, FN) \
279 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
280 { return do_typea(dc, a, SE, FN); }
282 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
283 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
284 { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
286 #define DO_TYPEA0(NAME, SE, FN) \
287 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
288 { return do_typea0(dc, a, SE, FN); }
290 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
291 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
292 { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
294 #define DO_TYPEBI(NAME, SE, FNI) \
295 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
296 { return do_typeb_imm(dc, a, SE, FNI); }
298 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
299 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
300 { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
302 #define DO_TYPEBV(NAME, SE, FN) \
303 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
304 { return do_typeb_val(dc, a, SE, FN); }
306 #define ENV_WRAPPER2(NAME, HELPER) \
307 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
308 { HELPER(out, cpu_env, ina); }
310 #define ENV_WRAPPER3(NAME, HELPER) \
311 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
312 { HELPER(out, cpu_env, ina, inb); }
314 /* No input carry, but output carry. */
315 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
317 TCGv_i32 zero = tcg_const_i32(0);
319 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
321 tcg_temp_free_i32(zero);
324 /* Input and output carry. */
325 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
327 TCGv_i32 zero = tcg_const_i32(0);
328 TCGv_i32 tmp = tcg_temp_new_i32();
330 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
331 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
333 tcg_temp_free_i32(tmp);
334 tcg_temp_free_i32(zero);
337 /* Input carry, but no output carry. */
338 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
340 tcg_gen_add_i32(out, ina, inb);
341 tcg_gen_add_i32(out, out, cpu_msr_c);
344 DO_TYPEA(add, true, gen_add)
345 DO_TYPEA(addc, true, gen_addc)
346 DO_TYPEA(addk, false, tcg_gen_add_i32)
347 DO_TYPEA(addkc, true, gen_addkc)
349 DO_TYPEBV(addi, true, gen_add)
350 DO_TYPEBV(addic, true, gen_addc)
351 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
352 DO_TYPEBV(addikc, true, gen_addkc)
354 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
356 tcg_gen_andi_i32(out, ina, ~imm);
359 DO_TYPEA(and, false, tcg_gen_and_i32)
360 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
361 DO_TYPEA(andn, false, tcg_gen_andc_i32)
362 DO_TYPEBI(andni, false, gen_andni)
364 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
366 TCGv_i32 tmp = tcg_temp_new_i32();
367 tcg_gen_andi_i32(tmp, inb, 31);
368 tcg_gen_sar_i32(out, ina, tmp);
369 tcg_temp_free_i32(tmp);
372 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
374 TCGv_i32 tmp = tcg_temp_new_i32();
375 tcg_gen_andi_i32(tmp, inb, 31);
376 tcg_gen_shr_i32(out, ina, tmp);
377 tcg_temp_free_i32(tmp);
380 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
382 TCGv_i32 tmp = tcg_temp_new_i32();
383 tcg_gen_andi_i32(tmp, inb, 31);
384 tcg_gen_shl_i32(out, ina, tmp);
385 tcg_temp_free_i32(tmp);
388 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
390 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
391 int imm_w = extract32(imm, 5, 5);
392 int imm_s = extract32(imm, 0, 5);
394 if (imm_w + imm_s > 32 || imm_w == 0) {
395 /* These inputs have an undefined behavior. */
396 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
397 imm_w, imm_s);
398 } else {
399 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
403 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
405 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
406 int imm_w = extract32(imm, 5, 5);
407 int imm_s = extract32(imm, 0, 5);
408 int width = imm_w - imm_s + 1;
410 if (imm_w < imm_s) {
411 /* These inputs have an undefined behavior. */
412 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
413 imm_w, imm_s);
414 } else {
415 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
419 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
420 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
421 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
423 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
424 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
425 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
427 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
428 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
430 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
432 tcg_gen_clzi_i32(out, ina, 32);
435 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
437 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
439 TCGv_i32 lt = tcg_temp_new_i32();
441 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
442 tcg_gen_sub_i32(out, inb, ina);
443 tcg_gen_deposit_i32(out, out, lt, 31, 1);
444 tcg_temp_free_i32(lt);
447 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
449 TCGv_i32 lt = tcg_temp_new_i32();
451 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
452 tcg_gen_sub_i32(out, inb, ina);
453 tcg_gen_deposit_i32(out, out, lt, 31, 1);
454 tcg_temp_free_i32(lt);
457 DO_TYPEA(cmp, false, gen_cmp)
458 DO_TYPEA(cmpu, false, gen_cmpu)
460 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
461 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
462 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
463 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
464 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
465 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
466 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
467 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
468 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
469 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
470 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
472 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
473 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
474 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
475 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
476 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
477 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
478 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
479 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
480 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
481 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
482 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
484 ENV_WRAPPER2(gen_flt, gen_helper_flt)
485 ENV_WRAPPER2(gen_fint, gen_helper_fint)
486 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
488 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
489 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
490 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
492 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
493 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
495 gen_helper_divs(out, cpu_env, inb, ina);
498 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
500 gen_helper_divu(out, cpu_env, inb, ina);
503 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
504 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
506 static bool trans_imm(DisasContext *dc, arg_imm *arg)
508 if (invalid_delay_slot(dc, "imm")) {
509 return true;
511 dc->ext_imm = arg->imm << 16;
512 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
513 dc->tb_flags_to_set = IMM_FLAG;
514 return true;
517 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
519 TCGv_i32 tmp = tcg_temp_new_i32();
520 tcg_gen_muls2_i32(tmp, out, ina, inb);
521 tcg_temp_free_i32(tmp);
524 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
526 TCGv_i32 tmp = tcg_temp_new_i32();
527 tcg_gen_mulu2_i32(tmp, out, ina, inb);
528 tcg_temp_free_i32(tmp);
531 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
533 TCGv_i32 tmp = tcg_temp_new_i32();
534 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
535 tcg_temp_free_i32(tmp);
538 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
539 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
540 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
541 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
542 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
544 DO_TYPEA(or, false, tcg_gen_or_i32)
545 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
547 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
549 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
552 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
554 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
557 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
558 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
559 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
561 /* No input carry, but output carry. */
562 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
564 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
565 tcg_gen_sub_i32(out, inb, ina);
568 /* Input and output carry. */
569 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
571 TCGv_i32 zero = tcg_const_i32(0);
572 TCGv_i32 tmp = tcg_temp_new_i32();
574 tcg_gen_not_i32(tmp, ina);
575 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
576 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
578 tcg_temp_free_i32(zero);
579 tcg_temp_free_i32(tmp);
582 /* No input or output carry. */
583 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
585 tcg_gen_sub_i32(out, inb, ina);
588 /* Input carry, no output carry. */
589 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
591 TCGv_i32 nota = tcg_temp_new_i32();
593 tcg_gen_not_i32(nota, ina);
594 tcg_gen_add_i32(out, inb, nota);
595 tcg_gen_add_i32(out, out, cpu_msr_c);
597 tcg_temp_free_i32(nota);
600 DO_TYPEA(rsub, true, gen_rsub)
601 DO_TYPEA(rsubc, true, gen_rsubc)
602 DO_TYPEA(rsubk, false, gen_rsubk)
603 DO_TYPEA(rsubkc, true, gen_rsubkc)
605 DO_TYPEBV(rsubi, true, gen_rsub)
606 DO_TYPEBV(rsubic, true, gen_rsubc)
607 DO_TYPEBV(rsubik, false, gen_rsubk)
608 DO_TYPEBV(rsubikc, true, gen_rsubkc)
610 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
611 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
613 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
615 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
616 tcg_gen_sari_i32(out, ina, 1);
619 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
621 TCGv_i32 tmp = tcg_temp_new_i32();
623 tcg_gen_mov_i32(tmp, cpu_msr_c);
624 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
625 tcg_gen_extract2_i32(out, ina, tmp, 1);
627 tcg_temp_free_i32(tmp);
630 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
632 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
633 tcg_gen_shri_i32(out, ina, 1);
636 DO_TYPEA0(sra, false, gen_sra)
637 DO_TYPEA0(src, false, gen_src)
638 DO_TYPEA0(srl, false, gen_srl)
640 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
642 tcg_gen_rotri_i32(out, ina, 16);
645 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
646 DO_TYPEA0(swaph, false, gen_swaph)
648 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
650 /* Cache operations are nops: only check for supervisor mode. */
651 trap_userspace(dc, true);
652 return true;
655 DO_TYPEA(xor, false, tcg_gen_xor_i32)
656 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
658 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
660 TCGv ret = tcg_temp_new();
662 /* If any of the regs is r0, set t to the value of the other reg. */
663 if (ra && rb) {
664 TCGv_i32 tmp = tcg_temp_new_i32();
665 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
666 tcg_gen_extu_i32_tl(ret, tmp);
667 tcg_temp_free_i32(tmp);
668 } else if (ra) {
669 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
670 } else if (rb) {
671 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
672 } else {
673 tcg_gen_movi_tl(ret, 0);
676 if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
677 gen_helper_stackprot(cpu_env, ret);
679 return ret;
682 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
684 TCGv ret = tcg_temp_new();
686 /* If any of the regs is r0, set t to the value of the other reg. */
687 if (ra) {
688 TCGv_i32 tmp = tcg_temp_new_i32();
689 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
690 tcg_gen_extu_i32_tl(ret, tmp);
691 tcg_temp_free_i32(tmp);
692 } else {
693 tcg_gen_movi_tl(ret, (uint32_t)imm);
696 if (ra == 1 && dc->cfg->stackprot) {
697 gen_helper_stackprot(cpu_env, ret);
699 return ret;
702 #ifndef CONFIG_USER_ONLY
703 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
705 int addr_size = dc->cfg->addr_size;
706 TCGv ret = tcg_temp_new();
708 if (addr_size == 32 || ra == 0) {
709 if (rb) {
710 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
711 } else {
712 tcg_gen_movi_tl(ret, 0);
714 } else {
715 if (rb) {
716 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
717 } else {
718 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
719 tcg_gen_shli_tl(ret, ret, 32);
721 if (addr_size < 64) {
722 /* Mask off out of range bits. */
723 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
726 return ret;
728 #endif
730 static void record_unaligned_ess(DisasContext *dc, int rd,
731 MemOp size, bool store)
733 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
735 iflags |= ESR_ESS_FLAG;
736 iflags |= rd << 5;
737 iflags |= store * ESR_S;
738 iflags |= (size == MO_32) * ESR_W;
740 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
743 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
744 int mem_index, bool rev)
746 MemOp size = mop & MO_SIZE;
749 * When doing reverse accesses we need to do two things.
751 * 1. Reverse the address wrt endianness.
752 * 2. Byteswap the data lanes on the way back into the CPU core.
754 if (rev) {
755 if (size > MO_8) {
756 mop ^= MO_BSWAP;
758 if (size < MO_32) {
759 tcg_gen_xori_tl(addr, addr, 3 - size);
763 if (size > MO_8 &&
764 (dc->tb_flags & MSR_EE) &&
765 dc->cfg->unaligned_exceptions) {
766 record_unaligned_ess(dc, rd, size, false);
767 mop |= MO_ALIGN;
770 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
772 tcg_temp_free(addr);
773 return true;
776 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
778 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
779 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
782 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
784 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
785 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
788 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
790 if (trap_userspace(dc, true)) {
791 return true;
793 #ifdef CONFIG_USER_ONLY
794 return true;
795 #else
796 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
797 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
798 #endif
801 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
803 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
804 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
807 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
809 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
810 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
813 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
815 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
816 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
819 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
821 if (trap_userspace(dc, true)) {
822 return true;
824 #ifdef CONFIG_USER_ONLY
825 return true;
826 #else
827 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
828 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
829 #endif
832 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
834 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
835 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
838 static bool trans_lw(DisasContext *dc, arg_typea *arg)
840 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
841 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
844 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
846 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
847 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
850 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
852 if (trap_userspace(dc, true)) {
853 return true;
855 #ifdef CONFIG_USER_ONLY
856 return true;
857 #else
858 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
859 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
860 #endif
863 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
865 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
866 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
869 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
871 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
873 /* lwx does not throw unaligned access errors, so force alignment */
874 tcg_gen_andi_tl(addr, addr, ~3);
876 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
877 tcg_gen_mov_tl(cpu_res_addr, addr);
878 tcg_temp_free(addr);
880 if (arg->rd) {
881 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
884 /* No support for AXI exclusive so always clear C */
885 tcg_gen_movi_i32(cpu_msr_c, 0);
886 return true;
889 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
890 int mem_index, bool rev)
892 MemOp size = mop & MO_SIZE;
895 * When doing reverse accesses we need to do two things.
897 * 1. Reverse the address wrt endianness.
898 * 2. Byteswap the data lanes on the way back into the CPU core.
900 if (rev) {
901 if (size > MO_8) {
902 mop ^= MO_BSWAP;
904 if (size < MO_32) {
905 tcg_gen_xori_tl(addr, addr, 3 - size);
909 if (size > MO_8 &&
910 (dc->tb_flags & MSR_EE) &&
911 dc->cfg->unaligned_exceptions) {
912 record_unaligned_ess(dc, rd, size, true);
913 mop |= MO_ALIGN;
916 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
918 tcg_temp_free(addr);
919 return true;
922 static bool trans_sb(DisasContext *dc, arg_typea *arg)
924 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
925 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
928 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
930 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
931 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
934 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
936 if (trap_userspace(dc, true)) {
937 return true;
939 #ifdef CONFIG_USER_ONLY
940 return true;
941 #else
942 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
943 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
944 #endif
947 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
949 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
950 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
953 static bool trans_sh(DisasContext *dc, arg_typea *arg)
955 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
956 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
959 static bool trans_shr(DisasContext *dc, arg_typea *arg)
961 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
962 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
965 static bool trans_shea(DisasContext *dc, arg_typea *arg)
967 if (trap_userspace(dc, true)) {
968 return true;
970 #ifdef CONFIG_USER_ONLY
971 return true;
972 #else
973 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
974 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
975 #endif
978 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
980 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
981 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
984 static bool trans_sw(DisasContext *dc, arg_typea *arg)
986 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
987 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
990 static bool trans_swr(DisasContext *dc, arg_typea *arg)
992 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
993 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
996 static bool trans_swea(DisasContext *dc, arg_typea *arg)
998 if (trap_userspace(dc, true)) {
999 return true;
1001 #ifdef CONFIG_USER_ONLY
1002 return true;
1003 #else
1004 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
1005 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
1006 #endif
1009 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
1011 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1012 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
1015 static bool trans_swx(DisasContext *dc, arg_typea *arg)
1017 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1018 TCGLabel *swx_done = gen_new_label();
1019 TCGLabel *swx_fail = gen_new_label();
1020 TCGv_i32 tval;
1022 /* swx does not throw unaligned access errors, so force alignment */
1023 tcg_gen_andi_tl(addr, addr, ~3);
1026 * Compare the address vs the one we used during lwx.
1027 * On mismatch, the operation fails. On match, addr dies at the
1028 * branch, but we know we can use the equal version in the global.
1029 * In either case, addr is no longer needed.
1031 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1032 tcg_temp_free(addr);
1035 * Compare the value loaded during lwx with current contents of
1036 * the reserved location.
1038 tval = tcg_temp_new_i32();
1040 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1041 reg_for_write(dc, arg->rd),
1042 dc->mem_index, MO_TEUL);
1044 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1045 tcg_temp_free_i32(tval);
1047 /* Success */
1048 tcg_gen_movi_i32(cpu_msr_c, 0);
1049 tcg_gen_br(swx_done);
1051 /* Failure */
1052 gen_set_label(swx_fail);
1053 tcg_gen_movi_i32(cpu_msr_c, 1);
1055 gen_set_label(swx_done);
1058 * Prevent the saved address from working again without another ldx.
1059 * Akin to the pseudocode setting reservation = 0.
1061 tcg_gen_movi_tl(cpu_res_addr, -1);
1062 return true;
1065 static void setup_dslot(DisasContext *dc, bool type_b)
1067 dc->tb_flags_to_set |= D_FLAG;
1068 if (type_b && (dc->tb_flags & IMM_FLAG)) {
1069 dc->tb_flags_to_set |= BIMM_FLAG;
1073 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1074 bool delay, bool abs, int link)
1076 uint32_t add_pc;
1078 if (invalid_delay_slot(dc, "branch")) {
1079 return true;
1081 if (delay) {
1082 setup_dslot(dc, dest_rb < 0);
1085 if (link) {
1086 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1089 /* Store the branch taken destination into btarget. */
1090 add_pc = abs ? 0 : dc->base.pc_next;
1091 if (dest_rb > 0) {
1092 dc->jmp_dest = -1;
1093 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1094 } else {
1095 dc->jmp_dest = add_pc + dest_imm;
1096 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1098 dc->jmp_cond = TCG_COND_ALWAYS;
1099 return true;
1102 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
1103 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
1104 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1105 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
1106 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1108 DO_BR(br, bri, false, false, false)
1109 DO_BR(bra, brai, false, true, false)
1110 DO_BR(brd, brid, true, false, false)
1111 DO_BR(brad, braid, true, true, false)
1112 DO_BR(brld, brlid, true, false, true)
1113 DO_BR(brald, bralid, true, true, true)
1115 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1116 TCGCond cond, int ra, bool delay)
1118 TCGv_i32 zero, next;
1120 if (invalid_delay_slot(dc, "bcc")) {
1121 return true;
1123 if (delay) {
1124 setup_dslot(dc, dest_rb < 0);
1127 dc->jmp_cond = cond;
1129 /* Cache the condition register in cpu_bvalue across any delay slot. */
1130 tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1132 /* Store the branch taken destination into btarget. */
1133 if (dest_rb > 0) {
1134 dc->jmp_dest = -1;
1135 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1136 } else {
1137 dc->jmp_dest = dc->base.pc_next + dest_imm;
1138 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1141 /* Compute the final destination into btarget. */
1142 zero = tcg_const_i32(0);
1143 next = tcg_const_i32(dc->base.pc_next + (delay + 1) * 4);
1144 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1145 reg_for_read(dc, ra), zero,
1146 cpu_btarget, next);
1147 tcg_temp_free_i32(zero);
1148 tcg_temp_free_i32(next);
1150 return true;
1153 #define DO_BCC(NAME, COND) \
1154 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
1155 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1156 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
1157 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1158 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
1159 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1160 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
1161 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1163 DO_BCC(beq, TCG_COND_EQ)
1164 DO_BCC(bge, TCG_COND_GE)
1165 DO_BCC(bgt, TCG_COND_GT)
1166 DO_BCC(ble, TCG_COND_LE)
1167 DO_BCC(blt, TCG_COND_LT)
1168 DO_BCC(bne, TCG_COND_NE)
1170 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1172 if (trap_userspace(dc, true)) {
1173 return true;
1175 if (invalid_delay_slot(dc, "brk")) {
1176 return true;
1179 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1180 if (arg->rd) {
1181 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1183 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1184 tcg_gen_movi_tl(cpu_res_addr, -1);
1186 dc->base.is_jmp = DISAS_EXIT;
1187 return true;
1190 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1192 uint32_t imm = arg->imm;
1194 if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1195 return true;
1197 if (invalid_delay_slot(dc, "brki")) {
1198 return true;
1201 tcg_gen_movi_i32(cpu_pc, imm);
1202 if (arg->rd) {
1203 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1205 tcg_gen_movi_tl(cpu_res_addr, -1);
1207 #ifdef CONFIG_USER_ONLY
1208 switch (imm) {
1209 case 0x8: /* syscall trap */
1210 gen_raise_exception_sync(dc, EXCP_SYSCALL);
1211 break;
1212 case 0x18: /* debug trap */
1213 gen_raise_exception_sync(dc, EXCP_DEBUG);
1214 break;
1215 default: /* eliminated with trap_userspace check */
1216 g_assert_not_reached();
1218 #else
1219 uint32_t msr_to_set = 0;
1221 if (imm != 0x18) {
1222 msr_to_set |= MSR_BIP;
1224 if (imm == 0x8 || imm == 0x18) {
1225 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1226 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1227 tcg_gen_andi_i32(cpu_msr, cpu_msr,
1228 ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1230 tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1231 dc->base.is_jmp = DISAS_EXIT;
1232 #endif
1234 return true;
1237 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1239 int mbar_imm = arg->imm;
1241 /* Note that mbar is a specialized branch instruction. */
1242 if (invalid_delay_slot(dc, "mbar")) {
1243 return true;
1246 /* Data access memory barrier. */
1247 if ((mbar_imm & 2) == 0) {
1248 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1251 /* Sleep. */
1252 if (mbar_imm & 16) {
1253 TCGv_i32 tmp_1;
1255 if (trap_userspace(dc, true)) {
1256 /* Sleep is a privileged instruction. */
1257 return true;
1260 t_sync_flags(dc);
1262 tmp_1 = tcg_const_i32(1);
1263 tcg_gen_st_i32(tmp_1, cpu_env,
1264 -offsetof(MicroBlazeCPU, env)
1265 +offsetof(CPUState, halted));
1266 tcg_temp_free_i32(tmp_1);
1268 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1270 gen_raise_exception(dc, EXCP_HLT);
1274 * If !(mbar_imm & 1), this is an instruction access memory barrier
1275 * and we need to end the TB so that we recognize self-modified
1276 * code immediately.
1278 * However, there are some data mbars that need the TB break
1279 * (and return to main loop) to recognize interrupts right away.
1280 * E.g. recognizing a change to an interrupt controller register.
1282 * Therefore, choose to end the TB always.
1284 dc->base.is_jmp = DISAS_EXIT_NEXT;
1285 return true;
1288 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1290 if (trap_userspace(dc, to_set)) {
1291 return true;
1293 if (invalid_delay_slot(dc, "rts")) {
1294 return true;
1297 dc->tb_flags_to_set |= to_set;
1298 setup_dslot(dc, true);
1300 dc->jmp_cond = TCG_COND_ALWAYS;
1301 dc->jmp_dest = -1;
1302 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1303 return true;
1306 #define DO_RTS(NAME, IFLAG) \
1307 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1308 { return do_rts(dc, arg, IFLAG); }
1310 DO_RTS(rtbd, DRTB_FLAG)
1311 DO_RTS(rtid, DRTI_FLAG)
1312 DO_RTS(rted, DRTE_FLAG)
1313 DO_RTS(rtsd, 0)
1315 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1317 /* If opcode_0_illegal, trap. */
1318 if (dc->cfg->opcode_0_illegal) {
1319 trap_illegal(dc, true);
1320 return true;
1323 * Otherwise, this is "add r0, r0, r0".
1324 * Continue to trans_add so that MSR[C] gets cleared.
1326 return false;
1329 static void msr_read(DisasContext *dc, TCGv_i32 d)
1331 TCGv_i32 t;
1333 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1334 t = tcg_temp_new_i32();
1335 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1336 tcg_gen_or_i32(d, cpu_msr, t);
1337 tcg_temp_free_i32(t);
1340 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1342 uint32_t imm = arg->imm;
1344 if (trap_userspace(dc, imm != MSR_C)) {
1345 return true;
1348 if (arg->rd) {
1349 msr_read(dc, cpu_R[arg->rd]);
1353 * Handle the carry bit separately.
1354 * This is the only bit that userspace can modify.
1356 if (imm & MSR_C) {
1357 tcg_gen_movi_i32(cpu_msr_c, set);
1361 * MSR_C and MSR_CC set above.
1362 * MSR_PVR is not writable, and is always clear.
1364 imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1366 if (imm != 0) {
1367 if (set) {
1368 tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1369 } else {
1370 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1372 dc->base.is_jmp = DISAS_EXIT_NEXT;
1374 return true;
1377 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1379 return do_msrclrset(dc, arg, false);
1382 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1384 return do_msrclrset(dc, arg, true);
1387 static bool trans_mts(DisasContext *dc, arg_mts *arg)
1389 if (trap_userspace(dc, true)) {
1390 return true;
1393 #ifdef CONFIG_USER_ONLY
1394 g_assert_not_reached();
1395 #else
1396 if (arg->e && arg->rs != 0x1003) {
1397 qemu_log_mask(LOG_GUEST_ERROR,
1398 "Invalid extended mts reg 0x%x\n", arg->rs);
1399 return true;
1402 TCGv_i32 src = reg_for_read(dc, arg->ra);
1403 switch (arg->rs) {
1404 case SR_MSR:
1405 /* Install MSR_C. */
1406 tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1408 * Clear MSR_C and MSR_CC;
1409 * MSR_PVR is not writable, and is always clear.
1411 tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1412 break;
1413 case SR_FSR:
1414 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
1415 break;
1416 case 0x800:
1417 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
1418 break;
1419 case 0x802:
1420 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
1421 break;
1423 case 0x1000: /* PID */
1424 case 0x1001: /* ZPR */
1425 case 0x1002: /* TLBX */
1426 case 0x1003: /* TLBLO */
1427 case 0x1004: /* TLBHI */
1428 case 0x1005: /* TLBSX */
1430 TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1431 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1433 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
1434 tcg_temp_free_i32(tmp_reg);
1435 tcg_temp_free_i32(tmp_ext);
1437 break;
1439 default:
1440 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1441 return true;
1443 dc->base.is_jmp = DISAS_EXIT_NEXT;
1444 return true;
1445 #endif
1448 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1450 TCGv_i32 dest = reg_for_write(dc, arg->rd);
1452 if (arg->e) {
1453 switch (arg->rs) {
1454 case SR_EAR:
1456 TCGv_i64 t64 = tcg_temp_new_i64();
1457 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1458 tcg_gen_extrh_i64_i32(dest, t64);
1459 tcg_temp_free_i64(t64);
1461 return true;
1462 #ifndef CONFIG_USER_ONLY
1463 case 0x1003: /* TLBLO */
1464 /* Handled below. */
1465 break;
1466 #endif
1467 case 0x2006 ... 0x2009:
1468 /* High bits of PVR6-9 not implemented. */
1469 tcg_gen_movi_i32(dest, 0);
1470 return true;
1471 default:
1472 qemu_log_mask(LOG_GUEST_ERROR,
1473 "Invalid extended mfs reg 0x%x\n", arg->rs);
1474 return true;
1478 switch (arg->rs) {
1479 case SR_PC:
1480 tcg_gen_movi_i32(dest, dc->base.pc_next);
1481 break;
1482 case SR_MSR:
1483 msr_read(dc, dest);
1484 break;
1485 case SR_EAR:
1487 TCGv_i64 t64 = tcg_temp_new_i64();
1488 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1489 tcg_gen_extrl_i64_i32(dest, t64);
1490 tcg_temp_free_i64(t64);
1492 break;
1493 case SR_ESR:
1494 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
1495 break;
1496 case SR_FSR:
1497 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
1498 break;
1499 case SR_BTR:
1500 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
1501 break;
1502 case SR_EDR:
1503 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
1504 break;
1505 case 0x800:
1506 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
1507 break;
1508 case 0x802:
1509 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
1510 break;
1512 #ifndef CONFIG_USER_ONLY
1513 case 0x1000: /* PID */
1514 case 0x1001: /* ZPR */
1515 case 0x1002: /* TLBX */
1516 case 0x1003: /* TLBLO */
1517 case 0x1004: /* TLBHI */
1518 case 0x1005: /* TLBSX */
1520 TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1521 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1523 gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
1524 tcg_temp_free_i32(tmp_reg);
1525 tcg_temp_free_i32(tmp_ext);
1527 break;
1528 #endif
1530 case 0x2000 ... 0x200c:
1531 tcg_gen_ld_i32(dest, cpu_env,
1532 offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1533 - offsetof(MicroBlazeCPU, env));
1534 break;
1535 default:
1536 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1537 break;
1539 return true;
1542 static void do_rti(DisasContext *dc)
1544 TCGv_i32 tmp = tcg_temp_new_i32();
1546 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1547 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1548 tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1549 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1550 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1552 tcg_temp_free_i32(tmp);
1555 static void do_rtb(DisasContext *dc)
1557 TCGv_i32 tmp = tcg_temp_new_i32();
1559 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1560 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1561 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1562 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1564 tcg_temp_free_i32(tmp);
1567 static void do_rte(DisasContext *dc)
1569 TCGv_i32 tmp = tcg_temp_new_i32();
1571 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1572 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1573 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1574 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1575 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1577 tcg_temp_free_i32(tmp);
1580 /* Insns connected to FSL or AXI stream attached devices. */
1581 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1583 TCGv_i32 t_id, t_ctrl;
1585 if (trap_userspace(dc, true)) {
1586 return true;
1589 t_id = tcg_temp_new_i32();
1590 if (rb) {
1591 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1592 } else {
1593 tcg_gen_movi_i32(t_id, imm);
1596 t_ctrl = tcg_const_i32(ctrl);
1597 gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1598 tcg_temp_free_i32(t_id);
1599 tcg_temp_free_i32(t_ctrl);
1600 return true;
1603 static bool trans_get(DisasContext *dc, arg_get *arg)
1605 return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1608 static bool trans_getd(DisasContext *dc, arg_getd *arg)
1610 return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1613 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1615 TCGv_i32 t_id, t_ctrl;
1617 if (trap_userspace(dc, true)) {
1618 return true;
1621 t_id = tcg_temp_new_i32();
1622 if (rb) {
1623 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1624 } else {
1625 tcg_gen_movi_i32(t_id, imm);
1628 t_ctrl = tcg_const_i32(ctrl);
1629 gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1630 tcg_temp_free_i32(t_id);
1631 tcg_temp_free_i32(t_ctrl);
1632 return true;
1635 static bool trans_put(DisasContext *dc, arg_put *arg)
1637 return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1640 static bool trans_putd(DisasContext *dc, arg_putd *arg)
1642 return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1645 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1647 DisasContext *dc = container_of(dcb, DisasContext, base);
1648 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1649 int bound;
1651 dc->cfg = &cpu->cfg;
1652 dc->tb_flags = dc->base.tb->flags;
1653 dc->ext_imm = dc->base.tb->cs_base;
1654 dc->r0 = NULL;
1655 dc->r0_set = false;
1656 dc->mem_index = cpu_mmu_index(&cpu->env, false);
1657 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1658 dc->jmp_dest = -1;
1660 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1661 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1664 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1668 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1670 DisasContext *dc = container_of(dcb, DisasContext, base);
1672 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1673 dc->insn_start = tcg_last_op();
1676 static bool mb_tr_breakpoint_check(DisasContextBase *dcb, CPUState *cs,
1677 const CPUBreakpoint *bp)
1679 DisasContext *dc = container_of(dcb, DisasContext, base);
1681 gen_raise_exception_sync(dc, EXCP_DEBUG);
1684 * The address covered by the breakpoint must be included in
1685 * [tb->pc, tb->pc + tb->size) in order to for it to be
1686 * properly cleared -- thus we increment the PC here so that
1687 * the logic setting tb->size below does the right thing.
1689 dc->base.pc_next += 4;
1690 return true;
1693 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1695 DisasContext *dc = container_of(dcb, DisasContext, base);
1696 CPUMBState *env = cs->env_ptr;
1697 uint32_t ir;
1699 /* TODO: This should raise an exception, not terminate qemu. */
1700 if (dc->base.pc_next & 3) {
1701 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1702 (uint32_t)dc->base.pc_next);
1705 dc->tb_flags_to_set = 0;
1707 ir = cpu_ldl_code(env, dc->base.pc_next);
1708 if (!decode(dc, ir)) {
1709 trap_illegal(dc, true);
1712 if (dc->r0) {
1713 tcg_temp_free_i32(dc->r0);
1714 dc->r0 = NULL;
1715 dc->r0_set = false;
1718 /* Discard the imm global when its contents cannot be used. */
1719 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1720 tcg_gen_discard_i32(cpu_imm);
1723 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1724 dc->tb_flags |= dc->tb_flags_to_set;
1725 dc->base.pc_next += 4;
1727 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1729 * Finish any return-from branch.
1731 uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1732 if (unlikely(rt_ibe != 0)) {
1733 dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1734 if (rt_ibe & DRTI_FLAG) {
1735 do_rti(dc);
1736 } else if (rt_ibe & DRTB_FLAG) {
1737 do_rtb(dc);
1738 } else {
1739 do_rte(dc);
1743 /* Complete the branch, ending the TB. */
1744 switch (dc->base.is_jmp) {
1745 case DISAS_NORETURN:
1747 * E.g. illegal insn in a delay slot. We've already exited
1748 * and will handle D_FLAG in mb_cpu_do_interrupt.
1750 break;
1751 case DISAS_NEXT:
1753 * Normal insn a delay slot.
1754 * However, the return-from-exception type insns should
1755 * return to the main loop, as they have adjusted MSR.
1757 dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1758 break;
1759 case DISAS_EXIT_NEXT:
1761 * E.g. mts insn in a delay slot. Continue with btarget,
1762 * but still return to the main loop.
1764 dc->base.is_jmp = DISAS_EXIT_JUMP;
1765 break;
1766 default:
1767 g_assert_not_reached();
1772 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1774 DisasContext *dc = container_of(dcb, DisasContext, base);
1776 if (dc->base.is_jmp == DISAS_NORETURN) {
1777 /* We have already exited the TB. */
1778 return;
1781 t_sync_flags(dc);
1783 switch (dc->base.is_jmp) {
1784 case DISAS_TOO_MANY:
1785 gen_goto_tb(dc, 0, dc->base.pc_next);
1786 return;
1788 case DISAS_EXIT:
1789 break;
1790 case DISAS_EXIT_NEXT:
1791 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1792 break;
1793 case DISAS_EXIT_JUMP:
1794 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1795 tcg_gen_discard_i32(cpu_btarget);
1796 break;
1798 case DISAS_JUMP:
1799 if (dc->jmp_dest != -1 && !cs->singlestep_enabled) {
1800 /* Direct jump. */
1801 tcg_gen_discard_i32(cpu_btarget);
1803 if (dc->jmp_cond != TCG_COND_ALWAYS) {
1804 /* Conditional direct jump. */
1805 TCGLabel *taken = gen_new_label();
1806 TCGv_i32 tmp = tcg_temp_new_i32();
1809 * Copy bvalue to a temp now, so we can discard bvalue.
1810 * This can avoid writing bvalue to memory when the
1811 * delay slot cannot raise an exception.
1813 tcg_gen_mov_i32(tmp, cpu_bvalue);
1814 tcg_gen_discard_i32(cpu_bvalue);
1816 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1817 gen_goto_tb(dc, 1, dc->base.pc_next);
1818 gen_set_label(taken);
1820 gen_goto_tb(dc, 0, dc->jmp_dest);
1821 return;
1824 /* Indirect jump (or direct jump w/ singlestep) */
1825 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1826 tcg_gen_discard_i32(cpu_btarget);
1828 if (unlikely(cs->singlestep_enabled)) {
1829 gen_raise_exception(dc, EXCP_DEBUG);
1830 } else {
1831 tcg_gen_lookup_and_goto_ptr();
1833 return;
1835 default:
1836 g_assert_not_reached();
1839 /* Finish DISAS_EXIT_* */
1840 if (unlikely(cs->singlestep_enabled)) {
1841 gen_raise_exception(dc, EXCP_DEBUG);
1842 } else {
1843 tcg_gen_exit_tb(NULL, 0);
1847 static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
1849 qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
1850 log_target_disas(cs, dcb->pc_first, dcb->tb->size);
1853 static const TranslatorOps mb_tr_ops = {
1854 .init_disas_context = mb_tr_init_disas_context,
1855 .tb_start = mb_tr_tb_start,
1856 .insn_start = mb_tr_insn_start,
1857 .breakpoint_check = mb_tr_breakpoint_check,
1858 .translate_insn = mb_tr_translate_insn,
1859 .tb_stop = mb_tr_tb_stop,
1860 .disas_log = mb_tr_disas_log,
1863 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1865 DisasContext dc;
1866 translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
1869 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1871 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1872 CPUMBState *env = &cpu->env;
1873 uint32_t iflags;
1874 int i;
1876 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1877 env->pc, env->msr,
1878 (env->msr & MSR_UM) ? "user" : "kernel",
1879 (env->msr & MSR_UMS) ? "user" : "kernel",
1880 (bool)(env->msr & MSR_EIP),
1881 (bool)(env->msr & MSR_IE));
1883 iflags = env->iflags;
1884 qemu_fprintf(f, "iflags: 0x%08x", iflags);
1885 if (iflags & IMM_FLAG) {
1886 qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1888 if (iflags & BIMM_FLAG) {
1889 qemu_fprintf(f, " BIMM");
1891 if (iflags & D_FLAG) {
1892 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1894 if (iflags & DRTI_FLAG) {
1895 qemu_fprintf(f, " DRTI");
1897 if (iflags & DRTE_FLAG) {
1898 qemu_fprintf(f, " DRTE");
1900 if (iflags & DRTB_FLAG) {
1901 qemu_fprintf(f, " DRTB");
1903 if (iflags & ESR_ESS_FLAG) {
1904 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1907 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1908 "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1909 env->esr, env->fsr, env->btr, env->edr,
1910 env->ear, env->slr, env->shr);
1912 for (i = 0; i < 32; i++) {
1913 qemu_fprintf(f, "r%2.2d=%08x%c",
1914 i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1916 qemu_fprintf(f, "\n");
1919 void mb_tcg_init(void)
1921 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1922 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1924 static const struct {
1925 TCGv_i32 *var; int ofs; char name[8];
1926 } i32s[] = {
1928 * Note that r0 is handled specially in reg_for_read
1929 * and reg_for_write. Nothing should touch cpu_R[0].
1930 * Leave that element NULL, which will assert quickly
1931 * inside the tcg generator functions.
1933 R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1934 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1935 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1936 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1938 SP(pc),
1939 SP(msr),
1940 SP(msr_c),
1941 SP(imm),
1942 SP(iflags),
1943 SP(bvalue),
1944 SP(btarget),
1945 SP(res_val),
1948 #undef R
1949 #undef SP
1951 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1952 *i32s[i].var =
1953 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1956 cpu_res_addr =
1957 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
1960 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1961 target_ulong *data)
1963 env->pc = data[0];
1964 env->iflags = data[1];