2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "qemu/qemu-print.h"
34 #define EXTRACT_FIELD(src, start, end) \
35 (((src) >> start) & ((1 << (end - start + 1)) - 1))
37 /* is_jmp field values */
38 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
39 #define DISAS_EXIT DISAS_TARGET_1 /* all cpu state modified dynamically */
41 /* cpu state besides pc was modified dynamically; update pc to next */
42 #define DISAS_EXIT_NEXT DISAS_TARGET_2
43 /* cpu state besides pc was modified dynamically; update pc to btarget */
44 #define DISAS_EXIT_JUMP DISAS_TARGET_3
46 static TCGv_i32 cpu_R
[32];
47 static TCGv_i32 cpu_pc
;
48 static TCGv_i32 cpu_msr
;
49 static TCGv_i32 cpu_msr_c
;
50 static TCGv_i32 cpu_imm
;
51 static TCGv_i32 cpu_bvalue
;
52 static TCGv_i32 cpu_btarget
;
53 static TCGv_i32 cpu_iflags
;
54 static TCGv cpu_res_addr
;
55 static TCGv_i32 cpu_res_val
;
57 #include "exec/gen-icount.h"
59 /* This is the state at translation time. */
60 typedef struct DisasContext
{
61 DisasContextBase base
;
62 const MicroBlazeCPUConfig
*cfg
;
64 /* TCG op of the current insn_start. */
72 unsigned int tb_flags
;
73 unsigned int tb_flags_to_set
;
76 /* Condition under which to jump, including NEVER and ALWAYS. */
79 /* Immediate branch-taken destination, or -1 for indirect. */
83 static int typeb_imm(DisasContext
*dc
, int x
)
85 if (dc
->tb_flags
& IMM_FLAG
) {
86 return deposit32(dc
->ext_imm
, 0, 16, x
);
91 /* Include the auto-generated decoder. */
92 #include "decode-insns.c.inc"
94 static void t_sync_flags(DisasContext
*dc
)
96 /* Synch the tb dependent flags between translator and runtime. */
97 if ((dc
->tb_flags
^ dc
->base
.tb
->flags
) & IFLAGS_TB_MASK
) {
98 tcg_gen_movi_i32(cpu_iflags
, dc
->tb_flags
& IFLAGS_TB_MASK
);
102 static void gen_raise_exception(DisasContext
*dc
, uint32_t index
)
104 TCGv_i32 tmp
= tcg_const_i32(index
);
106 gen_helper_raise_exception(cpu_env
, tmp
);
107 tcg_temp_free_i32(tmp
);
108 dc
->base
.is_jmp
= DISAS_NORETURN
;
111 static void gen_raise_exception_sync(DisasContext
*dc
, uint32_t index
)
114 tcg_gen_movi_i32(cpu_pc
, dc
->base
.pc_next
);
115 gen_raise_exception(dc
, index
);
118 static void gen_raise_hw_excp(DisasContext
*dc
, uint32_t esr_ec
)
120 TCGv_i32 tmp
= tcg_const_i32(esr_ec
);
121 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUMBState
, esr
));
122 tcg_temp_free_i32(tmp
);
124 gen_raise_exception_sync(dc
, EXCP_HW_EXCP
);
127 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
129 if (translator_use_goto_tb(&dc
->base
, dest
)) {
131 tcg_gen_movi_i32(cpu_pc
, dest
);
132 tcg_gen_exit_tb(dc
->base
.tb
, n
);
134 tcg_gen_movi_i32(cpu_pc
, dest
);
135 tcg_gen_lookup_and_goto_ptr();
137 dc
->base
.is_jmp
= DISAS_NORETURN
;
141 * Returns true if the insn an illegal operation.
142 * If exceptions are enabled, an exception is raised.
144 static bool trap_illegal(DisasContext
*dc
, bool cond
)
146 if (cond
&& (dc
->tb_flags
& MSR_EE
)
147 && dc
->cfg
->illegal_opcode_exception
) {
148 gen_raise_hw_excp(dc
, ESR_EC_ILLEGAL_OP
);
154 * Returns true if the insn is illegal in userspace.
155 * If exceptions are enabled, an exception is raised.
157 static bool trap_userspace(DisasContext
*dc
, bool cond
)
159 bool cond_user
= cond
&& dc
->mem_index
== MMU_USER_IDX
;
161 if (cond_user
&& (dc
->tb_flags
& MSR_EE
)) {
162 gen_raise_hw_excp(dc
, ESR_EC_PRIVINSN
);
168 * Return true, and log an error, if the current insn is
169 * within a delay slot.
171 static bool invalid_delay_slot(DisasContext
*dc
, const char *insn_type
)
173 if (dc
->tb_flags
& D_FLAG
) {
174 qemu_log_mask(LOG_GUEST_ERROR
,
175 "Invalid insn in delay slot: %s at %08x\n",
176 insn_type
, (uint32_t)dc
->base
.pc_next
);
182 static TCGv_i32
reg_for_read(DisasContext
*dc
, int reg
)
184 if (likely(reg
!= 0)) {
188 if (dc
->r0
== NULL
) {
189 dc
->r0
= tcg_temp_new_i32();
191 tcg_gen_movi_i32(dc
->r0
, 0);
197 static TCGv_i32
reg_for_write(DisasContext
*dc
, int reg
)
199 if (likely(reg
!= 0)) {
202 if (dc
->r0
== NULL
) {
203 dc
->r0
= tcg_temp_new_i32();
208 static bool do_typea(DisasContext
*dc
, arg_typea
*arg
, bool side_effects
,
209 void (*fn
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
213 if (arg
->rd
== 0 && !side_effects
) {
217 rd
= reg_for_write(dc
, arg
->rd
);
218 ra
= reg_for_read(dc
, arg
->ra
);
219 rb
= reg_for_read(dc
, arg
->rb
);
224 static bool do_typea0(DisasContext
*dc
, arg_typea0
*arg
, bool side_effects
,
225 void (*fn
)(TCGv_i32
, TCGv_i32
))
229 if (arg
->rd
== 0 && !side_effects
) {
233 rd
= reg_for_write(dc
, arg
->rd
);
234 ra
= reg_for_read(dc
, arg
->ra
);
239 static bool do_typeb_imm(DisasContext
*dc
, arg_typeb
*arg
, bool side_effects
,
240 void (*fni
)(TCGv_i32
, TCGv_i32
, int32_t))
244 if (arg
->rd
== 0 && !side_effects
) {
248 rd
= reg_for_write(dc
, arg
->rd
);
249 ra
= reg_for_read(dc
, arg
->ra
);
250 fni(rd
, ra
, arg
->imm
);
254 static bool do_typeb_val(DisasContext
*dc
, arg_typeb
*arg
, bool side_effects
,
255 void (*fn
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
257 TCGv_i32 rd
, ra
, imm
;
259 if (arg
->rd
== 0 && !side_effects
) {
263 rd
= reg_for_write(dc
, arg
->rd
);
264 ra
= reg_for_read(dc
, arg
->ra
);
265 imm
= tcg_const_i32(arg
->imm
);
269 tcg_temp_free_i32(imm
);
273 #define DO_TYPEA(NAME, SE, FN) \
274 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
275 { return do_typea(dc, a, SE, FN); }
277 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
278 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
279 { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
281 #define DO_TYPEA0(NAME, SE, FN) \
282 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
283 { return do_typea0(dc, a, SE, FN); }
285 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
286 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
287 { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
289 #define DO_TYPEBI(NAME, SE, FNI) \
290 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
291 { return do_typeb_imm(dc, a, SE, FNI); }
293 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
294 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
295 { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
297 #define DO_TYPEBV(NAME, SE, FN) \
298 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
299 { return do_typeb_val(dc, a, SE, FN); }
301 #define ENV_WRAPPER2(NAME, HELPER) \
302 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
303 { HELPER(out, cpu_env, ina); }
305 #define ENV_WRAPPER3(NAME, HELPER) \
306 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
307 { HELPER(out, cpu_env, ina, inb); }
309 /* No input carry, but output carry. */
310 static void gen_add(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
312 TCGv_i32 zero
= tcg_const_i32(0);
314 tcg_gen_add2_i32(out
, cpu_msr_c
, ina
, zero
, inb
, zero
);
316 tcg_temp_free_i32(zero
);
319 /* Input and output carry. */
320 static void gen_addc(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
322 TCGv_i32 zero
= tcg_const_i32(0);
323 TCGv_i32 tmp
= tcg_temp_new_i32();
325 tcg_gen_add2_i32(tmp
, cpu_msr_c
, ina
, zero
, cpu_msr_c
, zero
);
326 tcg_gen_add2_i32(out
, cpu_msr_c
, tmp
, cpu_msr_c
, inb
, zero
);
328 tcg_temp_free_i32(tmp
);
329 tcg_temp_free_i32(zero
);
332 /* Input carry, but no output carry. */
333 static void gen_addkc(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
335 tcg_gen_add_i32(out
, ina
, inb
);
336 tcg_gen_add_i32(out
, out
, cpu_msr_c
);
339 DO_TYPEA(add
, true, gen_add
)
340 DO_TYPEA(addc
, true, gen_addc
)
341 DO_TYPEA(addk
, false, tcg_gen_add_i32
)
342 DO_TYPEA(addkc
, true, gen_addkc
)
344 DO_TYPEBV(addi
, true, gen_add
)
345 DO_TYPEBV(addic
, true, gen_addc
)
346 DO_TYPEBI(addik
, false, tcg_gen_addi_i32
)
347 DO_TYPEBV(addikc
, true, gen_addkc
)
349 static void gen_andni(TCGv_i32 out
, TCGv_i32 ina
, int32_t imm
)
351 tcg_gen_andi_i32(out
, ina
, ~imm
);
354 DO_TYPEA(and, false, tcg_gen_and_i32
)
355 DO_TYPEBI(andi
, false, tcg_gen_andi_i32
)
356 DO_TYPEA(andn
, false, tcg_gen_andc_i32
)
357 DO_TYPEBI(andni
, false, gen_andni
)
359 static void gen_bsra(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
361 TCGv_i32 tmp
= tcg_temp_new_i32();
362 tcg_gen_andi_i32(tmp
, inb
, 31);
363 tcg_gen_sar_i32(out
, ina
, tmp
);
364 tcg_temp_free_i32(tmp
);
367 static void gen_bsrl(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
369 TCGv_i32 tmp
= tcg_temp_new_i32();
370 tcg_gen_andi_i32(tmp
, inb
, 31);
371 tcg_gen_shr_i32(out
, ina
, tmp
);
372 tcg_temp_free_i32(tmp
);
375 static void gen_bsll(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
377 TCGv_i32 tmp
= tcg_temp_new_i32();
378 tcg_gen_andi_i32(tmp
, inb
, 31);
379 tcg_gen_shl_i32(out
, ina
, tmp
);
380 tcg_temp_free_i32(tmp
);
383 static void gen_bsefi(TCGv_i32 out
, TCGv_i32 ina
, int32_t imm
)
385 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
386 int imm_w
= extract32(imm
, 5, 5);
387 int imm_s
= extract32(imm
, 0, 5);
389 if (imm_w
+ imm_s
> 32 || imm_w
== 0) {
390 /* These inputs have an undefined behavior. */
391 qemu_log_mask(LOG_GUEST_ERROR
, "bsefi: Bad input w=%d s=%d\n",
394 tcg_gen_extract_i32(out
, ina
, imm_s
, imm_w
);
398 static void gen_bsifi(TCGv_i32 out
, TCGv_i32 ina
, int32_t imm
)
400 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
401 int imm_w
= extract32(imm
, 5, 5);
402 int imm_s
= extract32(imm
, 0, 5);
403 int width
= imm_w
- imm_s
+ 1;
406 /* These inputs have an undefined behavior. */
407 qemu_log_mask(LOG_GUEST_ERROR
, "bsifi: Bad input w=%d s=%d\n",
410 tcg_gen_deposit_i32(out
, out
, ina
, imm_s
, width
);
414 DO_TYPEA_CFG(bsra
, use_barrel
, false, gen_bsra
)
415 DO_TYPEA_CFG(bsrl
, use_barrel
, false, gen_bsrl
)
416 DO_TYPEA_CFG(bsll
, use_barrel
, false, gen_bsll
)
418 DO_TYPEBI_CFG(bsrai
, use_barrel
, false, tcg_gen_sari_i32
)
419 DO_TYPEBI_CFG(bsrli
, use_barrel
, false, tcg_gen_shri_i32
)
420 DO_TYPEBI_CFG(bslli
, use_barrel
, false, tcg_gen_shli_i32
)
422 DO_TYPEBI_CFG(bsefi
, use_barrel
, false, gen_bsefi
)
423 DO_TYPEBI_CFG(bsifi
, use_barrel
, false, gen_bsifi
)
425 static void gen_clz(TCGv_i32 out
, TCGv_i32 ina
)
427 tcg_gen_clzi_i32(out
, ina
, 32);
430 DO_TYPEA0_CFG(clz
, use_pcmp_instr
, false, gen_clz
)
432 static void gen_cmp(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
434 TCGv_i32 lt
= tcg_temp_new_i32();
436 tcg_gen_setcond_i32(TCG_COND_LT
, lt
, inb
, ina
);
437 tcg_gen_sub_i32(out
, inb
, ina
);
438 tcg_gen_deposit_i32(out
, out
, lt
, 31, 1);
439 tcg_temp_free_i32(lt
);
442 static void gen_cmpu(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
444 TCGv_i32 lt
= tcg_temp_new_i32();
446 tcg_gen_setcond_i32(TCG_COND_LTU
, lt
, inb
, ina
);
447 tcg_gen_sub_i32(out
, inb
, ina
);
448 tcg_gen_deposit_i32(out
, out
, lt
, 31, 1);
449 tcg_temp_free_i32(lt
);
452 DO_TYPEA(cmp
, false, gen_cmp
)
453 DO_TYPEA(cmpu
, false, gen_cmpu
)
455 ENV_WRAPPER3(gen_fadd
, gen_helper_fadd
)
456 ENV_WRAPPER3(gen_frsub
, gen_helper_frsub
)
457 ENV_WRAPPER3(gen_fmul
, gen_helper_fmul
)
458 ENV_WRAPPER3(gen_fdiv
, gen_helper_fdiv
)
459 ENV_WRAPPER3(gen_fcmp_un
, gen_helper_fcmp_un
)
460 ENV_WRAPPER3(gen_fcmp_lt
, gen_helper_fcmp_lt
)
461 ENV_WRAPPER3(gen_fcmp_eq
, gen_helper_fcmp_eq
)
462 ENV_WRAPPER3(gen_fcmp_le
, gen_helper_fcmp_le
)
463 ENV_WRAPPER3(gen_fcmp_gt
, gen_helper_fcmp_gt
)
464 ENV_WRAPPER3(gen_fcmp_ne
, gen_helper_fcmp_ne
)
465 ENV_WRAPPER3(gen_fcmp_ge
, gen_helper_fcmp_ge
)
467 DO_TYPEA_CFG(fadd
, use_fpu
, true, gen_fadd
)
468 DO_TYPEA_CFG(frsub
, use_fpu
, true, gen_frsub
)
469 DO_TYPEA_CFG(fmul
, use_fpu
, true, gen_fmul
)
470 DO_TYPEA_CFG(fdiv
, use_fpu
, true, gen_fdiv
)
471 DO_TYPEA_CFG(fcmp_un
, use_fpu
, true, gen_fcmp_un
)
472 DO_TYPEA_CFG(fcmp_lt
, use_fpu
, true, gen_fcmp_lt
)
473 DO_TYPEA_CFG(fcmp_eq
, use_fpu
, true, gen_fcmp_eq
)
474 DO_TYPEA_CFG(fcmp_le
, use_fpu
, true, gen_fcmp_le
)
475 DO_TYPEA_CFG(fcmp_gt
, use_fpu
, true, gen_fcmp_gt
)
476 DO_TYPEA_CFG(fcmp_ne
, use_fpu
, true, gen_fcmp_ne
)
477 DO_TYPEA_CFG(fcmp_ge
, use_fpu
, true, gen_fcmp_ge
)
479 ENV_WRAPPER2(gen_flt
, gen_helper_flt
)
480 ENV_WRAPPER2(gen_fint
, gen_helper_fint
)
481 ENV_WRAPPER2(gen_fsqrt
, gen_helper_fsqrt
)
483 DO_TYPEA0_CFG(flt
, use_fpu
>= 2, true, gen_flt
)
484 DO_TYPEA0_CFG(fint
, use_fpu
>= 2, true, gen_fint
)
485 DO_TYPEA0_CFG(fsqrt
, use_fpu
>= 2, true, gen_fsqrt
)
487 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
488 static void gen_idiv(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
490 gen_helper_divs(out
, cpu_env
, inb
, ina
);
493 static void gen_idivu(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
495 gen_helper_divu(out
, cpu_env
, inb
, ina
);
498 DO_TYPEA_CFG(idiv
, use_div
, true, gen_idiv
)
499 DO_TYPEA_CFG(idivu
, use_div
, true, gen_idivu
)
501 static bool trans_imm(DisasContext
*dc
, arg_imm
*arg
)
503 if (invalid_delay_slot(dc
, "imm")) {
506 dc
->ext_imm
= arg
->imm
<< 16;
507 tcg_gen_movi_i32(cpu_imm
, dc
->ext_imm
);
508 dc
->tb_flags_to_set
= IMM_FLAG
;
512 static void gen_mulh(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
514 TCGv_i32 tmp
= tcg_temp_new_i32();
515 tcg_gen_muls2_i32(tmp
, out
, ina
, inb
);
516 tcg_temp_free_i32(tmp
);
519 static void gen_mulhu(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
521 TCGv_i32 tmp
= tcg_temp_new_i32();
522 tcg_gen_mulu2_i32(tmp
, out
, ina
, inb
);
523 tcg_temp_free_i32(tmp
);
526 static void gen_mulhsu(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
528 TCGv_i32 tmp
= tcg_temp_new_i32();
529 tcg_gen_mulsu2_i32(tmp
, out
, ina
, inb
);
530 tcg_temp_free_i32(tmp
);
533 DO_TYPEA_CFG(mul
, use_hw_mul
, false, tcg_gen_mul_i32
)
534 DO_TYPEA_CFG(mulh
, use_hw_mul
>= 2, false, gen_mulh
)
535 DO_TYPEA_CFG(mulhu
, use_hw_mul
>= 2, false, gen_mulhu
)
536 DO_TYPEA_CFG(mulhsu
, use_hw_mul
>= 2, false, gen_mulhsu
)
537 DO_TYPEBI_CFG(muli
, use_hw_mul
, false, tcg_gen_muli_i32
)
539 DO_TYPEA(or, false, tcg_gen_or_i32
)
540 DO_TYPEBI(ori
, false, tcg_gen_ori_i32
)
542 static void gen_pcmpeq(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
544 tcg_gen_setcond_i32(TCG_COND_EQ
, out
, ina
, inb
);
547 static void gen_pcmpne(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
549 tcg_gen_setcond_i32(TCG_COND_NE
, out
, ina
, inb
);
552 DO_TYPEA_CFG(pcmpbf
, use_pcmp_instr
, false, gen_helper_pcmpbf
)
553 DO_TYPEA_CFG(pcmpeq
, use_pcmp_instr
, false, gen_pcmpeq
)
554 DO_TYPEA_CFG(pcmpne
, use_pcmp_instr
, false, gen_pcmpne
)
556 /* No input carry, but output carry. */
557 static void gen_rsub(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
559 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_msr_c
, inb
, ina
);
560 tcg_gen_sub_i32(out
, inb
, ina
);
563 /* Input and output carry. */
564 static void gen_rsubc(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
566 TCGv_i32 zero
= tcg_const_i32(0);
567 TCGv_i32 tmp
= tcg_temp_new_i32();
569 tcg_gen_not_i32(tmp
, ina
);
570 tcg_gen_add2_i32(tmp
, cpu_msr_c
, tmp
, zero
, cpu_msr_c
, zero
);
571 tcg_gen_add2_i32(out
, cpu_msr_c
, tmp
, cpu_msr_c
, inb
, zero
);
573 tcg_temp_free_i32(zero
);
574 tcg_temp_free_i32(tmp
);
577 /* No input or output carry. */
578 static void gen_rsubk(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
580 tcg_gen_sub_i32(out
, inb
, ina
);
583 /* Input carry, no output carry. */
584 static void gen_rsubkc(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
586 TCGv_i32 nota
= tcg_temp_new_i32();
588 tcg_gen_not_i32(nota
, ina
);
589 tcg_gen_add_i32(out
, inb
, nota
);
590 tcg_gen_add_i32(out
, out
, cpu_msr_c
);
592 tcg_temp_free_i32(nota
);
595 DO_TYPEA(rsub
, true, gen_rsub
)
596 DO_TYPEA(rsubc
, true, gen_rsubc
)
597 DO_TYPEA(rsubk
, false, gen_rsubk
)
598 DO_TYPEA(rsubkc
, true, gen_rsubkc
)
600 DO_TYPEBV(rsubi
, true, gen_rsub
)
601 DO_TYPEBV(rsubic
, true, gen_rsubc
)
602 DO_TYPEBV(rsubik
, false, gen_rsubk
)
603 DO_TYPEBV(rsubikc
, true, gen_rsubkc
)
605 DO_TYPEA0(sext8
, false, tcg_gen_ext8s_i32
)
606 DO_TYPEA0(sext16
, false, tcg_gen_ext16s_i32
)
608 static void gen_sra(TCGv_i32 out
, TCGv_i32 ina
)
610 tcg_gen_andi_i32(cpu_msr_c
, ina
, 1);
611 tcg_gen_sari_i32(out
, ina
, 1);
614 static void gen_src(TCGv_i32 out
, TCGv_i32 ina
)
616 TCGv_i32 tmp
= tcg_temp_new_i32();
618 tcg_gen_mov_i32(tmp
, cpu_msr_c
);
619 tcg_gen_andi_i32(cpu_msr_c
, ina
, 1);
620 tcg_gen_extract2_i32(out
, ina
, tmp
, 1);
622 tcg_temp_free_i32(tmp
);
625 static void gen_srl(TCGv_i32 out
, TCGv_i32 ina
)
627 tcg_gen_andi_i32(cpu_msr_c
, ina
, 1);
628 tcg_gen_shri_i32(out
, ina
, 1);
631 DO_TYPEA0(sra
, false, gen_sra
)
632 DO_TYPEA0(src
, false, gen_src
)
633 DO_TYPEA0(srl
, false, gen_srl
)
635 static void gen_swaph(TCGv_i32 out
, TCGv_i32 ina
)
637 tcg_gen_rotri_i32(out
, ina
, 16);
640 DO_TYPEA0(swapb
, false, tcg_gen_bswap32_i32
)
641 DO_TYPEA0(swaph
, false, gen_swaph
)
643 static bool trans_wdic(DisasContext
*dc
, arg_wdic
*a
)
645 /* Cache operations are nops: only check for supervisor mode. */
646 trap_userspace(dc
, true);
650 DO_TYPEA(xor, false, tcg_gen_xor_i32
)
651 DO_TYPEBI(xori
, false, tcg_gen_xori_i32
)
653 static TCGv
compute_ldst_addr_typea(DisasContext
*dc
, int ra
, int rb
)
655 TCGv ret
= tcg_temp_new();
657 /* If any of the regs is r0, set t to the value of the other reg. */
659 TCGv_i32 tmp
= tcg_temp_new_i32();
660 tcg_gen_add_i32(tmp
, cpu_R
[ra
], cpu_R
[rb
]);
661 tcg_gen_extu_i32_tl(ret
, tmp
);
662 tcg_temp_free_i32(tmp
);
664 tcg_gen_extu_i32_tl(ret
, cpu_R
[ra
]);
666 tcg_gen_extu_i32_tl(ret
, cpu_R
[rb
]);
668 tcg_gen_movi_tl(ret
, 0);
671 if ((ra
== 1 || rb
== 1) && dc
->cfg
->stackprot
) {
672 gen_helper_stackprot(cpu_env
, ret
);
677 static TCGv
compute_ldst_addr_typeb(DisasContext
*dc
, int ra
, int imm
)
679 TCGv ret
= tcg_temp_new();
681 /* If any of the regs is r0, set t to the value of the other reg. */
683 TCGv_i32 tmp
= tcg_temp_new_i32();
684 tcg_gen_addi_i32(tmp
, cpu_R
[ra
], imm
);
685 tcg_gen_extu_i32_tl(ret
, tmp
);
686 tcg_temp_free_i32(tmp
);
688 tcg_gen_movi_tl(ret
, (uint32_t)imm
);
691 if (ra
== 1 && dc
->cfg
->stackprot
) {
692 gen_helper_stackprot(cpu_env
, ret
);
697 #ifndef CONFIG_USER_ONLY
698 static TCGv
compute_ldst_addr_ea(DisasContext
*dc
, int ra
, int rb
)
700 int addr_size
= dc
->cfg
->addr_size
;
701 TCGv ret
= tcg_temp_new();
703 if (addr_size
== 32 || ra
== 0) {
705 tcg_gen_extu_i32_tl(ret
, cpu_R
[rb
]);
707 tcg_gen_movi_tl(ret
, 0);
711 tcg_gen_concat_i32_i64(ret
, cpu_R
[rb
], cpu_R
[ra
]);
713 tcg_gen_extu_i32_tl(ret
, cpu_R
[ra
]);
714 tcg_gen_shli_tl(ret
, ret
, 32);
716 if (addr_size
< 64) {
717 /* Mask off out of range bits. */
718 tcg_gen_andi_i64(ret
, ret
, MAKE_64BIT_MASK(0, addr_size
));
725 #ifndef CONFIG_USER_ONLY
726 static void record_unaligned_ess(DisasContext
*dc
, int rd
,
727 MemOp size
, bool store
)
729 uint32_t iflags
= tcg_get_insn_start_param(dc
->insn_start
, 1);
731 iflags
|= ESR_ESS_FLAG
;
733 iflags
|= store
* ESR_S
;
734 iflags
|= (size
== MO_32
) * ESR_W
;
736 tcg_set_insn_start_param(dc
->insn_start
, 1, iflags
);
740 static bool do_load(DisasContext
*dc
, int rd
, TCGv addr
, MemOp mop
,
741 int mem_index
, bool rev
)
743 MemOp size
= mop
& MO_SIZE
;
746 * When doing reverse accesses we need to do two things.
748 * 1. Reverse the address wrt endianness.
749 * 2. Byteswap the data lanes on the way back into the CPU core.
756 tcg_gen_xori_tl(addr
, addr
, 3 - size
);
761 * For system mode, enforce alignment if the cpu configuration
762 * requires it. For user-mode, the Linux kernel will have fixed up
763 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
765 #ifndef CONFIG_USER_ONLY
767 (dc
->tb_flags
& MSR_EE
) &&
768 dc
->cfg
->unaligned_exceptions
) {
769 record_unaligned_ess(dc
, rd
, size
, false);
774 tcg_gen_qemu_ld_i32(reg_for_write(dc
, rd
), addr
, mem_index
, mop
);
780 static bool trans_lbu(DisasContext
*dc
, arg_typea
*arg
)
782 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
783 return do_load(dc
, arg
->rd
, addr
, MO_UB
, dc
->mem_index
, false);
786 static bool trans_lbur(DisasContext
*dc
, arg_typea
*arg
)
788 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
789 return do_load(dc
, arg
->rd
, addr
, MO_UB
, dc
->mem_index
, true);
792 static bool trans_lbuea(DisasContext
*dc
, arg_typea
*arg
)
794 if (trap_userspace(dc
, true)) {
797 #ifdef CONFIG_USER_ONLY
800 TCGv addr
= compute_ldst_addr_ea(dc
, arg
->ra
, arg
->rb
);
801 return do_load(dc
, arg
->rd
, addr
, MO_UB
, MMU_NOMMU_IDX
, false);
805 static bool trans_lbui(DisasContext
*dc
, arg_typeb
*arg
)
807 TCGv addr
= compute_ldst_addr_typeb(dc
, arg
->ra
, arg
->imm
);
808 return do_load(dc
, arg
->rd
, addr
, MO_UB
, dc
->mem_index
, false);
811 static bool trans_lhu(DisasContext
*dc
, arg_typea
*arg
)
813 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
814 return do_load(dc
, arg
->rd
, addr
, MO_TEUW
, dc
->mem_index
, false);
817 static bool trans_lhur(DisasContext
*dc
, arg_typea
*arg
)
819 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
820 return do_load(dc
, arg
->rd
, addr
, MO_TEUW
, dc
->mem_index
, true);
823 static bool trans_lhuea(DisasContext
*dc
, arg_typea
*arg
)
825 if (trap_userspace(dc
, true)) {
828 #ifdef CONFIG_USER_ONLY
831 TCGv addr
= compute_ldst_addr_ea(dc
, arg
->ra
, arg
->rb
);
832 return do_load(dc
, arg
->rd
, addr
, MO_TEUW
, MMU_NOMMU_IDX
, false);
836 static bool trans_lhui(DisasContext
*dc
, arg_typeb
*arg
)
838 TCGv addr
= compute_ldst_addr_typeb(dc
, arg
->ra
, arg
->imm
);
839 return do_load(dc
, arg
->rd
, addr
, MO_TEUW
, dc
->mem_index
, false);
842 static bool trans_lw(DisasContext
*dc
, arg_typea
*arg
)
844 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
845 return do_load(dc
, arg
->rd
, addr
, MO_TEUL
, dc
->mem_index
, false);
848 static bool trans_lwr(DisasContext
*dc
, arg_typea
*arg
)
850 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
851 return do_load(dc
, arg
->rd
, addr
, MO_TEUL
, dc
->mem_index
, true);
854 static bool trans_lwea(DisasContext
*dc
, arg_typea
*arg
)
856 if (trap_userspace(dc
, true)) {
859 #ifdef CONFIG_USER_ONLY
862 TCGv addr
= compute_ldst_addr_ea(dc
, arg
->ra
, arg
->rb
);
863 return do_load(dc
, arg
->rd
, addr
, MO_TEUL
, MMU_NOMMU_IDX
, false);
867 static bool trans_lwi(DisasContext
*dc
, arg_typeb
*arg
)
869 TCGv addr
= compute_ldst_addr_typeb(dc
, arg
->ra
, arg
->imm
);
870 return do_load(dc
, arg
->rd
, addr
, MO_TEUL
, dc
->mem_index
, false);
873 static bool trans_lwx(DisasContext
*dc
, arg_typea
*arg
)
875 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
877 /* lwx does not throw unaligned access errors, so force alignment */
878 tcg_gen_andi_tl(addr
, addr
, ~3);
880 tcg_gen_qemu_ld_i32(cpu_res_val
, addr
, dc
->mem_index
, MO_TEUL
);
881 tcg_gen_mov_tl(cpu_res_addr
, addr
);
885 tcg_gen_mov_i32(cpu_R
[arg
->rd
], cpu_res_val
);
888 /* No support for AXI exclusive so always clear C */
889 tcg_gen_movi_i32(cpu_msr_c
, 0);
893 static bool do_store(DisasContext
*dc
, int rd
, TCGv addr
, MemOp mop
,
894 int mem_index
, bool rev
)
896 MemOp size
= mop
& MO_SIZE
;
899 * When doing reverse accesses we need to do two things.
901 * 1. Reverse the address wrt endianness.
902 * 2. Byteswap the data lanes on the way back into the CPU core.
909 tcg_gen_xori_tl(addr
, addr
, 3 - size
);
914 * For system mode, enforce alignment if the cpu configuration
915 * requires it. For user-mode, the Linux kernel will have fixed up
916 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
918 #ifndef CONFIG_USER_ONLY
920 (dc
->tb_flags
& MSR_EE
) &&
921 dc
->cfg
->unaligned_exceptions
) {
922 record_unaligned_ess(dc
, rd
, size
, true);
927 tcg_gen_qemu_st_i32(reg_for_read(dc
, rd
), addr
, mem_index
, mop
);
933 static bool trans_sb(DisasContext
*dc
, arg_typea
*arg
)
935 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
936 return do_store(dc
, arg
->rd
, addr
, MO_UB
, dc
->mem_index
, false);
939 static bool trans_sbr(DisasContext
*dc
, arg_typea
*arg
)
941 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
942 return do_store(dc
, arg
->rd
, addr
, MO_UB
, dc
->mem_index
, true);
945 static bool trans_sbea(DisasContext
*dc
, arg_typea
*arg
)
947 if (trap_userspace(dc
, true)) {
950 #ifdef CONFIG_USER_ONLY
953 TCGv addr
= compute_ldst_addr_ea(dc
, arg
->ra
, arg
->rb
);
954 return do_store(dc
, arg
->rd
, addr
, MO_UB
, MMU_NOMMU_IDX
, false);
958 static bool trans_sbi(DisasContext
*dc
, arg_typeb
*arg
)
960 TCGv addr
= compute_ldst_addr_typeb(dc
, arg
->ra
, arg
->imm
);
961 return do_store(dc
, arg
->rd
, addr
, MO_UB
, dc
->mem_index
, false);
964 static bool trans_sh(DisasContext
*dc
, arg_typea
*arg
)
966 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
967 return do_store(dc
, arg
->rd
, addr
, MO_TEUW
, dc
->mem_index
, false);
970 static bool trans_shr(DisasContext
*dc
, arg_typea
*arg
)
972 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
973 return do_store(dc
, arg
->rd
, addr
, MO_TEUW
, dc
->mem_index
, true);
976 static bool trans_shea(DisasContext
*dc
, arg_typea
*arg
)
978 if (trap_userspace(dc
, true)) {
981 #ifdef CONFIG_USER_ONLY
984 TCGv addr
= compute_ldst_addr_ea(dc
, arg
->ra
, arg
->rb
);
985 return do_store(dc
, arg
->rd
, addr
, MO_TEUW
, MMU_NOMMU_IDX
, false);
989 static bool trans_shi(DisasContext
*dc
, arg_typeb
*arg
)
991 TCGv addr
= compute_ldst_addr_typeb(dc
, arg
->ra
, arg
->imm
);
992 return do_store(dc
, arg
->rd
, addr
, MO_TEUW
, dc
->mem_index
, false);
995 static bool trans_sw(DisasContext
*dc
, arg_typea
*arg
)
997 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
998 return do_store(dc
, arg
->rd
, addr
, MO_TEUL
, dc
->mem_index
, false);
1001 static bool trans_swr(DisasContext
*dc
, arg_typea
*arg
)
1003 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
1004 return do_store(dc
, arg
->rd
, addr
, MO_TEUL
, dc
->mem_index
, true);
1007 static bool trans_swea(DisasContext
*dc
, arg_typea
*arg
)
1009 if (trap_userspace(dc
, true)) {
1012 #ifdef CONFIG_USER_ONLY
1015 TCGv addr
= compute_ldst_addr_ea(dc
, arg
->ra
, arg
->rb
);
1016 return do_store(dc
, arg
->rd
, addr
, MO_TEUL
, MMU_NOMMU_IDX
, false);
1020 static bool trans_swi(DisasContext
*dc
, arg_typeb
*arg
)
1022 TCGv addr
= compute_ldst_addr_typeb(dc
, arg
->ra
, arg
->imm
);
1023 return do_store(dc
, arg
->rd
, addr
, MO_TEUL
, dc
->mem_index
, false);
1026 static bool trans_swx(DisasContext
*dc
, arg_typea
*arg
)
1028 TCGv addr
= compute_ldst_addr_typea(dc
, arg
->ra
, arg
->rb
);
1029 TCGLabel
*swx_done
= gen_new_label();
1030 TCGLabel
*swx_fail
= gen_new_label();
1033 /* swx does not throw unaligned access errors, so force alignment */
1034 tcg_gen_andi_tl(addr
, addr
, ~3);
1037 * Compare the address vs the one we used during lwx.
1038 * On mismatch, the operation fails. On match, addr dies at the
1039 * branch, but we know we can use the equal version in the global.
1040 * In either case, addr is no longer needed.
1042 tcg_gen_brcond_tl(TCG_COND_NE
, cpu_res_addr
, addr
, swx_fail
);
1043 tcg_temp_free(addr
);
1046 * Compare the value loaded during lwx with current contents of
1047 * the reserved location.
1049 tval
= tcg_temp_new_i32();
1051 tcg_gen_atomic_cmpxchg_i32(tval
, cpu_res_addr
, cpu_res_val
,
1052 reg_for_write(dc
, arg
->rd
),
1053 dc
->mem_index
, MO_TEUL
);
1055 tcg_gen_brcond_i32(TCG_COND_NE
, cpu_res_val
, tval
, swx_fail
);
1056 tcg_temp_free_i32(tval
);
1059 tcg_gen_movi_i32(cpu_msr_c
, 0);
1060 tcg_gen_br(swx_done
);
1063 gen_set_label(swx_fail
);
1064 tcg_gen_movi_i32(cpu_msr_c
, 1);
1066 gen_set_label(swx_done
);
1069 * Prevent the saved address from working again without another ldx.
1070 * Akin to the pseudocode setting reservation = 0.
1072 tcg_gen_movi_tl(cpu_res_addr
, -1);
1076 static void setup_dslot(DisasContext
*dc
, bool type_b
)
1078 dc
->tb_flags_to_set
|= D_FLAG
;
1079 if (type_b
&& (dc
->tb_flags
& IMM_FLAG
)) {
1080 dc
->tb_flags_to_set
|= BIMM_FLAG
;
1084 static bool do_branch(DisasContext
*dc
, int dest_rb
, int dest_imm
,
1085 bool delay
, bool abs
, int link
)
1089 if (invalid_delay_slot(dc
, "branch")) {
1093 setup_dslot(dc
, dest_rb
< 0);
1097 tcg_gen_movi_i32(cpu_R
[link
], dc
->base
.pc_next
);
1100 /* Store the branch taken destination into btarget. */
1101 add_pc
= abs
? 0 : dc
->base
.pc_next
;
1104 tcg_gen_addi_i32(cpu_btarget
, cpu_R
[dest_rb
], add_pc
);
1106 dc
->jmp_dest
= add_pc
+ dest_imm
;
1107 tcg_gen_movi_i32(cpu_btarget
, dc
->jmp_dest
);
1109 dc
->jmp_cond
= TCG_COND_ALWAYS
;
1113 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
1114 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
1115 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1116 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
1117 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1119 DO_BR(br
, bri
, false, false, false)
1120 DO_BR(bra
, brai
, false, true, false)
1121 DO_BR(brd
, brid
, true, false, false)
1122 DO_BR(brad
, braid
, true, true, false)
1123 DO_BR(brld
, brlid
, true, false, true)
1124 DO_BR(brald
, bralid
, true, true, true)
1126 static bool do_bcc(DisasContext
*dc
, int dest_rb
, int dest_imm
,
1127 TCGCond cond
, int ra
, bool delay
)
1129 TCGv_i32 zero
, next
;
1131 if (invalid_delay_slot(dc
, "bcc")) {
1135 setup_dslot(dc
, dest_rb
< 0);
1138 dc
->jmp_cond
= cond
;
1140 /* Cache the condition register in cpu_bvalue across any delay slot. */
1141 tcg_gen_mov_i32(cpu_bvalue
, reg_for_read(dc
, ra
));
1143 /* Store the branch taken destination into btarget. */
1146 tcg_gen_addi_i32(cpu_btarget
, cpu_R
[dest_rb
], dc
->base
.pc_next
);
1148 dc
->jmp_dest
= dc
->base
.pc_next
+ dest_imm
;
1149 tcg_gen_movi_i32(cpu_btarget
, dc
->jmp_dest
);
1152 /* Compute the final destination into btarget. */
1153 zero
= tcg_const_i32(0);
1154 next
= tcg_const_i32(dc
->base
.pc_next
+ (delay
+ 1) * 4);
1155 tcg_gen_movcond_i32(dc
->jmp_cond
, cpu_btarget
,
1156 reg_for_read(dc
, ra
), zero
,
1158 tcg_temp_free_i32(zero
);
1159 tcg_temp_free_i32(next
);
1164 #define DO_BCC(NAME, COND) \
1165 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
1166 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1167 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
1168 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1169 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
1170 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1171 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
1172 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1174 DO_BCC(beq
, TCG_COND_EQ
)
1175 DO_BCC(bge
, TCG_COND_GE
)
1176 DO_BCC(bgt
, TCG_COND_GT
)
1177 DO_BCC(ble
, TCG_COND_LE
)
1178 DO_BCC(blt
, TCG_COND_LT
)
1179 DO_BCC(bne
, TCG_COND_NE
)
1181 static bool trans_brk(DisasContext
*dc
, arg_typea_br
*arg
)
1183 if (trap_userspace(dc
, true)) {
1186 if (invalid_delay_slot(dc
, "brk")) {
1190 tcg_gen_mov_i32(cpu_pc
, reg_for_read(dc
, arg
->rb
));
1192 tcg_gen_movi_i32(cpu_R
[arg
->rd
], dc
->base
.pc_next
);
1194 tcg_gen_ori_i32(cpu_msr
, cpu_msr
, MSR_BIP
);
1195 tcg_gen_movi_tl(cpu_res_addr
, -1);
1197 dc
->base
.is_jmp
= DISAS_EXIT
;
1201 static bool trans_brki(DisasContext
*dc
, arg_typeb_br
*arg
)
1203 uint32_t imm
= arg
->imm
;
1205 if (trap_userspace(dc
, imm
!= 0x8 && imm
!= 0x18)) {
1208 if (invalid_delay_slot(dc
, "brki")) {
1212 tcg_gen_movi_i32(cpu_pc
, imm
);
1214 tcg_gen_movi_i32(cpu_R
[arg
->rd
], dc
->base
.pc_next
);
1216 tcg_gen_movi_tl(cpu_res_addr
, -1);
1218 #ifdef CONFIG_USER_ONLY
1220 case 0x8: /* syscall trap */
1221 gen_raise_exception_sync(dc
, EXCP_SYSCALL
);
1223 case 0x18: /* debug trap */
1224 gen_raise_exception_sync(dc
, EXCP_DEBUG
);
1226 default: /* eliminated with trap_userspace check */
1227 g_assert_not_reached();
1230 uint32_t msr_to_set
= 0;
1233 msr_to_set
|= MSR_BIP
;
1235 if (imm
== 0x8 || imm
== 0x18) {
1236 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
1237 msr_to_set
|= (dc
->tb_flags
& (MSR_UM
| MSR_VM
)) << 1;
1238 tcg_gen_andi_i32(cpu_msr
, cpu_msr
,
1239 ~(MSR_VMS
| MSR_UMS
| MSR_VM
| MSR_UM
));
1241 tcg_gen_ori_i32(cpu_msr
, cpu_msr
, msr_to_set
);
1242 dc
->base
.is_jmp
= DISAS_EXIT
;
1248 static bool trans_mbar(DisasContext
*dc
, arg_mbar
*arg
)
1250 int mbar_imm
= arg
->imm
;
1252 /* Note that mbar is a specialized branch instruction. */
1253 if (invalid_delay_slot(dc
, "mbar")) {
1257 /* Data access memory barrier. */
1258 if ((mbar_imm
& 2) == 0) {
1259 tcg_gen_mb(TCG_BAR_SC
| TCG_MO_ALL
);
1263 if (mbar_imm
& 16) {
1266 if (trap_userspace(dc
, true)) {
1267 /* Sleep is a privileged instruction. */
1273 tmp_1
= tcg_const_i32(1);
1274 tcg_gen_st_i32(tmp_1
, cpu_env
,
1275 -offsetof(MicroBlazeCPU
, env
)
1276 +offsetof(CPUState
, halted
));
1277 tcg_temp_free_i32(tmp_1
);
1279 tcg_gen_movi_i32(cpu_pc
, dc
->base
.pc_next
+ 4);
1281 gen_raise_exception(dc
, EXCP_HLT
);
1285 * If !(mbar_imm & 1), this is an instruction access memory barrier
1286 * and we need to end the TB so that we recognize self-modified
1289 * However, there are some data mbars that need the TB break
1290 * (and return to main loop) to recognize interrupts right away.
1291 * E.g. recognizing a change to an interrupt controller register.
1293 * Therefore, choose to end the TB always.
1295 dc
->base
.is_jmp
= DISAS_EXIT_NEXT
;
1299 static bool do_rts(DisasContext
*dc
, arg_typeb_bc
*arg
, int to_set
)
1301 if (trap_userspace(dc
, to_set
)) {
1304 if (invalid_delay_slot(dc
, "rts")) {
1308 dc
->tb_flags_to_set
|= to_set
;
1309 setup_dslot(dc
, true);
1311 dc
->jmp_cond
= TCG_COND_ALWAYS
;
1313 tcg_gen_addi_i32(cpu_btarget
, reg_for_read(dc
, arg
->ra
), arg
->imm
);
1317 #define DO_RTS(NAME, IFLAG) \
1318 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1319 { return do_rts(dc, arg, IFLAG); }
1321 DO_RTS(rtbd
, DRTB_FLAG
)
1322 DO_RTS(rtid
, DRTI_FLAG
)
1323 DO_RTS(rted
, DRTE_FLAG
)
1326 static bool trans_zero(DisasContext
*dc
, arg_zero
*arg
)
1328 /* If opcode_0_illegal, trap. */
1329 if (dc
->cfg
->opcode_0_illegal
) {
1330 trap_illegal(dc
, true);
1334 * Otherwise, this is "add r0, r0, r0".
1335 * Continue to trans_add so that MSR[C] gets cleared.
1340 static void msr_read(DisasContext
*dc
, TCGv_i32 d
)
1344 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1345 t
= tcg_temp_new_i32();
1346 tcg_gen_muli_i32(t
, cpu_msr_c
, MSR_C
| MSR_CC
);
1347 tcg_gen_or_i32(d
, cpu_msr
, t
);
1348 tcg_temp_free_i32(t
);
1351 static bool do_msrclrset(DisasContext
*dc
, arg_type_msr
*arg
, bool set
)
1353 uint32_t imm
= arg
->imm
;
1355 if (trap_userspace(dc
, imm
!= MSR_C
)) {
1360 msr_read(dc
, cpu_R
[arg
->rd
]);
1364 * Handle the carry bit separately.
1365 * This is the only bit that userspace can modify.
1368 tcg_gen_movi_i32(cpu_msr_c
, set
);
1372 * MSR_C and MSR_CC set above.
1373 * MSR_PVR is not writable, and is always clear.
1375 imm
&= ~(MSR_C
| MSR_CC
| MSR_PVR
);
1379 tcg_gen_ori_i32(cpu_msr
, cpu_msr
, imm
);
1381 tcg_gen_andi_i32(cpu_msr
, cpu_msr
, ~imm
);
1383 dc
->base
.is_jmp
= DISAS_EXIT_NEXT
;
1388 static bool trans_msrclr(DisasContext
*dc
, arg_type_msr
*arg
)
1390 return do_msrclrset(dc
, arg
, false);
1393 static bool trans_msrset(DisasContext
*dc
, arg_type_msr
*arg
)
1395 return do_msrclrset(dc
, arg
, true);
1398 static bool trans_mts(DisasContext
*dc
, arg_mts
*arg
)
1400 if (trap_userspace(dc
, true)) {
1404 #ifdef CONFIG_USER_ONLY
1405 g_assert_not_reached();
1407 if (arg
->e
&& arg
->rs
!= 0x1003) {
1408 qemu_log_mask(LOG_GUEST_ERROR
,
1409 "Invalid extended mts reg 0x%x\n", arg
->rs
);
1413 TCGv_i32 src
= reg_for_read(dc
, arg
->ra
);
1416 /* Install MSR_C. */
1417 tcg_gen_extract_i32(cpu_msr_c
, src
, 2, 1);
1419 * Clear MSR_C and MSR_CC;
1420 * MSR_PVR is not writable, and is always clear.
1422 tcg_gen_andi_i32(cpu_msr
, src
, ~(MSR_C
| MSR_CC
| MSR_PVR
));
1425 tcg_gen_st_i32(src
, cpu_env
, offsetof(CPUMBState
, fsr
));
1428 tcg_gen_st_i32(src
, cpu_env
, offsetof(CPUMBState
, slr
));
1431 tcg_gen_st_i32(src
, cpu_env
, offsetof(CPUMBState
, shr
));
1434 case 0x1000: /* PID */
1435 case 0x1001: /* ZPR */
1436 case 0x1002: /* TLBX */
1437 case 0x1003: /* TLBLO */
1438 case 0x1004: /* TLBHI */
1439 case 0x1005: /* TLBSX */
1441 TCGv_i32 tmp_ext
= tcg_const_i32(arg
->e
);
1442 TCGv_i32 tmp_reg
= tcg_const_i32(arg
->rs
& 7);
1444 gen_helper_mmu_write(cpu_env
, tmp_ext
, tmp_reg
, src
);
1445 tcg_temp_free_i32(tmp_reg
);
1446 tcg_temp_free_i32(tmp_ext
);
1451 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid mts reg 0x%x\n", arg
->rs
);
1454 dc
->base
.is_jmp
= DISAS_EXIT_NEXT
;
1459 static bool trans_mfs(DisasContext
*dc
, arg_mfs
*arg
)
1461 TCGv_i32 dest
= reg_for_write(dc
, arg
->rd
);
1467 TCGv_i64 t64
= tcg_temp_new_i64();
1468 tcg_gen_ld_i64(t64
, cpu_env
, offsetof(CPUMBState
, ear
));
1469 tcg_gen_extrh_i64_i32(dest
, t64
);
1470 tcg_temp_free_i64(t64
);
1473 #ifndef CONFIG_USER_ONLY
1474 case 0x1003: /* TLBLO */
1475 /* Handled below. */
1478 case 0x2006 ... 0x2009:
1479 /* High bits of PVR6-9 not implemented. */
1480 tcg_gen_movi_i32(dest
, 0);
1483 qemu_log_mask(LOG_GUEST_ERROR
,
1484 "Invalid extended mfs reg 0x%x\n", arg
->rs
);
1491 tcg_gen_movi_i32(dest
, dc
->base
.pc_next
);
1498 TCGv_i64 t64
= tcg_temp_new_i64();
1499 tcg_gen_ld_i64(t64
, cpu_env
, offsetof(CPUMBState
, ear
));
1500 tcg_gen_extrl_i64_i32(dest
, t64
);
1501 tcg_temp_free_i64(t64
);
1505 tcg_gen_ld_i32(dest
, cpu_env
, offsetof(CPUMBState
, esr
));
1508 tcg_gen_ld_i32(dest
, cpu_env
, offsetof(CPUMBState
, fsr
));
1511 tcg_gen_ld_i32(dest
, cpu_env
, offsetof(CPUMBState
, btr
));
1514 tcg_gen_ld_i32(dest
, cpu_env
, offsetof(CPUMBState
, edr
));
1517 tcg_gen_ld_i32(dest
, cpu_env
, offsetof(CPUMBState
, slr
));
1520 tcg_gen_ld_i32(dest
, cpu_env
, offsetof(CPUMBState
, shr
));
1523 #ifndef CONFIG_USER_ONLY
1524 case 0x1000: /* PID */
1525 case 0x1001: /* ZPR */
1526 case 0x1002: /* TLBX */
1527 case 0x1003: /* TLBLO */
1528 case 0x1004: /* TLBHI */
1529 case 0x1005: /* TLBSX */
1531 TCGv_i32 tmp_ext
= tcg_const_i32(arg
->e
);
1532 TCGv_i32 tmp_reg
= tcg_const_i32(arg
->rs
& 7);
1534 gen_helper_mmu_read(dest
, cpu_env
, tmp_ext
, tmp_reg
);
1535 tcg_temp_free_i32(tmp_reg
);
1536 tcg_temp_free_i32(tmp_ext
);
1541 case 0x2000 ... 0x200c:
1542 tcg_gen_ld_i32(dest
, cpu_env
,
1543 offsetof(MicroBlazeCPU
, cfg
.pvr_regs
[arg
->rs
- 0x2000])
1544 - offsetof(MicroBlazeCPU
, env
));
1547 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid mfs reg 0x%x\n", arg
->rs
);
1553 static void do_rti(DisasContext
*dc
)
1555 TCGv_i32 tmp
= tcg_temp_new_i32();
1557 tcg_gen_shri_i32(tmp
, cpu_msr
, 1);
1558 tcg_gen_ori_i32(cpu_msr
, cpu_msr
, MSR_IE
);
1559 tcg_gen_andi_i32(tmp
, tmp
, MSR_VM
| MSR_UM
);
1560 tcg_gen_andi_i32(cpu_msr
, cpu_msr
, ~(MSR_VM
| MSR_UM
));
1561 tcg_gen_or_i32(cpu_msr
, cpu_msr
, tmp
);
1563 tcg_temp_free_i32(tmp
);
1566 static void do_rtb(DisasContext
*dc
)
1568 TCGv_i32 tmp
= tcg_temp_new_i32();
1570 tcg_gen_shri_i32(tmp
, cpu_msr
, 1);
1571 tcg_gen_andi_i32(cpu_msr
, cpu_msr
, ~(MSR_VM
| MSR_UM
| MSR_BIP
));
1572 tcg_gen_andi_i32(tmp
, tmp
, (MSR_VM
| MSR_UM
));
1573 tcg_gen_or_i32(cpu_msr
, cpu_msr
, tmp
);
1575 tcg_temp_free_i32(tmp
);
1578 static void do_rte(DisasContext
*dc
)
1580 TCGv_i32 tmp
= tcg_temp_new_i32();
1582 tcg_gen_shri_i32(tmp
, cpu_msr
, 1);
1583 tcg_gen_ori_i32(cpu_msr
, cpu_msr
, MSR_EE
);
1584 tcg_gen_andi_i32(tmp
, tmp
, (MSR_VM
| MSR_UM
));
1585 tcg_gen_andi_i32(cpu_msr
, cpu_msr
, ~(MSR_VM
| MSR_UM
| MSR_EIP
));
1586 tcg_gen_or_i32(cpu_msr
, cpu_msr
, tmp
);
1588 tcg_temp_free_i32(tmp
);
1591 /* Insns connected to FSL or AXI stream attached devices. */
1592 static bool do_get(DisasContext
*dc
, int rd
, int rb
, int imm
, int ctrl
)
1594 TCGv_i32 t_id
, t_ctrl
;
1596 if (trap_userspace(dc
, true)) {
1600 t_id
= tcg_temp_new_i32();
1602 tcg_gen_andi_i32(t_id
, cpu_R
[rb
], 0xf);
1604 tcg_gen_movi_i32(t_id
, imm
);
1607 t_ctrl
= tcg_const_i32(ctrl
);
1608 gen_helper_get(reg_for_write(dc
, rd
), t_id
, t_ctrl
);
1609 tcg_temp_free_i32(t_id
);
1610 tcg_temp_free_i32(t_ctrl
);
1614 static bool trans_get(DisasContext
*dc
, arg_get
*arg
)
1616 return do_get(dc
, arg
->rd
, 0, arg
->imm
, arg
->ctrl
);
1619 static bool trans_getd(DisasContext
*dc
, arg_getd
*arg
)
1621 return do_get(dc
, arg
->rd
, arg
->rb
, 0, arg
->ctrl
);
1624 static bool do_put(DisasContext
*dc
, int ra
, int rb
, int imm
, int ctrl
)
1626 TCGv_i32 t_id
, t_ctrl
;
1628 if (trap_userspace(dc
, true)) {
1632 t_id
= tcg_temp_new_i32();
1634 tcg_gen_andi_i32(t_id
, cpu_R
[rb
], 0xf);
1636 tcg_gen_movi_i32(t_id
, imm
);
1639 t_ctrl
= tcg_const_i32(ctrl
);
1640 gen_helper_put(t_id
, t_ctrl
, reg_for_read(dc
, ra
));
1641 tcg_temp_free_i32(t_id
);
1642 tcg_temp_free_i32(t_ctrl
);
1646 static bool trans_put(DisasContext
*dc
, arg_put
*arg
)
1648 return do_put(dc
, arg
->ra
, 0, arg
->imm
, arg
->ctrl
);
1651 static bool trans_putd(DisasContext
*dc
, arg_putd
*arg
)
1653 return do_put(dc
, arg
->ra
, arg
->rb
, 0, arg
->ctrl
);
1656 static void mb_tr_init_disas_context(DisasContextBase
*dcb
, CPUState
*cs
)
1658 DisasContext
*dc
= container_of(dcb
, DisasContext
, base
);
1659 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1662 dc
->cfg
= &cpu
->cfg
;
1663 dc
->tb_flags
= dc
->base
.tb
->flags
;
1664 dc
->ext_imm
= dc
->base
.tb
->cs_base
;
1667 dc
->mem_index
= cpu_mmu_index(&cpu
->env
, false);
1668 dc
->jmp_cond
= dc
->tb_flags
& D_FLAG
? TCG_COND_ALWAYS
: TCG_COND_NEVER
;
1671 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
1672 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
1675 static void mb_tr_tb_start(DisasContextBase
*dcb
, CPUState
*cs
)
1679 static void mb_tr_insn_start(DisasContextBase
*dcb
, CPUState
*cs
)
1681 DisasContext
*dc
= container_of(dcb
, DisasContext
, base
);
1683 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->tb_flags
& ~MSR_TB_MASK
);
1684 dc
->insn_start
= tcg_last_op();
1687 static void mb_tr_translate_insn(DisasContextBase
*dcb
, CPUState
*cs
)
1689 DisasContext
*dc
= container_of(dcb
, DisasContext
, base
);
1690 CPUMBState
*env
= cs
->env_ptr
;
1693 /* TODO: This should raise an exception, not terminate qemu. */
1694 if (dc
->base
.pc_next
& 3) {
1695 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n",
1696 (uint32_t)dc
->base
.pc_next
);
1699 dc
->tb_flags_to_set
= 0;
1701 ir
= cpu_ldl_code(env
, dc
->base
.pc_next
);
1702 if (!decode(dc
, ir
)) {
1703 trap_illegal(dc
, true);
1707 tcg_temp_free_i32(dc
->r0
);
1712 /* Discard the imm global when its contents cannot be used. */
1713 if ((dc
->tb_flags
& ~dc
->tb_flags_to_set
) & IMM_FLAG
) {
1714 tcg_gen_discard_i32(cpu_imm
);
1717 dc
->tb_flags
&= ~(IMM_FLAG
| BIMM_FLAG
| D_FLAG
);
1718 dc
->tb_flags
|= dc
->tb_flags_to_set
;
1719 dc
->base
.pc_next
+= 4;
1721 if (dc
->jmp_cond
!= TCG_COND_NEVER
&& !(dc
->tb_flags
& D_FLAG
)) {
1723 * Finish any return-from branch.
1725 uint32_t rt_ibe
= dc
->tb_flags
& (DRTI_FLAG
| DRTB_FLAG
| DRTE_FLAG
);
1726 if (unlikely(rt_ibe
!= 0)) {
1727 dc
->tb_flags
&= ~(DRTI_FLAG
| DRTB_FLAG
| DRTE_FLAG
);
1728 if (rt_ibe
& DRTI_FLAG
) {
1730 } else if (rt_ibe
& DRTB_FLAG
) {
1737 /* Complete the branch, ending the TB. */
1738 switch (dc
->base
.is_jmp
) {
1739 case DISAS_NORETURN
:
1741 * E.g. illegal insn in a delay slot. We've already exited
1742 * and will handle D_FLAG in mb_cpu_do_interrupt.
1747 * Normal insn a delay slot.
1748 * However, the return-from-exception type insns should
1749 * return to the main loop, as they have adjusted MSR.
1751 dc
->base
.is_jmp
= (rt_ibe
? DISAS_EXIT_JUMP
: DISAS_JUMP
);
1753 case DISAS_EXIT_NEXT
:
1755 * E.g. mts insn in a delay slot. Continue with btarget,
1756 * but still return to the main loop.
1758 dc
->base
.is_jmp
= DISAS_EXIT_JUMP
;
1761 g_assert_not_reached();
1766 static void mb_tr_tb_stop(DisasContextBase
*dcb
, CPUState
*cs
)
1768 DisasContext
*dc
= container_of(dcb
, DisasContext
, base
);
1770 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
1771 /* We have already exited the TB. */
1777 switch (dc
->base
.is_jmp
) {
1778 case DISAS_TOO_MANY
:
1779 gen_goto_tb(dc
, 0, dc
->base
.pc_next
);
1784 case DISAS_EXIT_NEXT
:
1785 tcg_gen_movi_i32(cpu_pc
, dc
->base
.pc_next
);
1787 case DISAS_EXIT_JUMP
:
1788 tcg_gen_mov_i32(cpu_pc
, cpu_btarget
);
1789 tcg_gen_discard_i32(cpu_btarget
);
1793 if (dc
->jmp_dest
!= -1 && !(tb_cflags(dc
->base
.tb
) & CF_NO_GOTO_TB
)) {
1795 tcg_gen_discard_i32(cpu_btarget
);
1797 if (dc
->jmp_cond
!= TCG_COND_ALWAYS
) {
1798 /* Conditional direct jump. */
1799 TCGLabel
*taken
= gen_new_label();
1800 TCGv_i32 tmp
= tcg_temp_new_i32();
1803 * Copy bvalue to a temp now, so we can discard bvalue.
1804 * This can avoid writing bvalue to memory when the
1805 * delay slot cannot raise an exception.
1807 tcg_gen_mov_i32(tmp
, cpu_bvalue
);
1808 tcg_gen_discard_i32(cpu_bvalue
);
1810 tcg_gen_brcondi_i32(dc
->jmp_cond
, tmp
, 0, taken
);
1811 gen_goto_tb(dc
, 1, dc
->base
.pc_next
);
1812 gen_set_label(taken
);
1814 gen_goto_tb(dc
, 0, dc
->jmp_dest
);
1818 /* Indirect jump (or direct jump w/ goto_tb disabled) */
1819 tcg_gen_mov_i32(cpu_pc
, cpu_btarget
);
1820 tcg_gen_discard_i32(cpu_btarget
);
1821 tcg_gen_lookup_and_goto_ptr();
1825 g_assert_not_reached();
1828 /* Finish DISAS_EXIT_* */
1829 if (unlikely(cs
->singlestep_enabled
)) {
1830 gen_raise_exception(dc
, EXCP_DEBUG
);
1832 tcg_gen_exit_tb(NULL
, 0);
1836 static void mb_tr_disas_log(const DisasContextBase
*dcb
,
1837 CPUState
*cs
, FILE *logfile
)
1839 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcb
->pc_first
));
1840 target_disas(logfile
, cs
, dcb
->pc_first
, dcb
->tb
->size
);
1843 static const TranslatorOps mb_tr_ops
= {
1844 .init_disas_context
= mb_tr_init_disas_context
,
1845 .tb_start
= mb_tr_tb_start
,
1846 .insn_start
= mb_tr_insn_start
,
1847 .translate_insn
= mb_tr_translate_insn
,
1848 .tb_stop
= mb_tr_tb_stop
,
1849 .disas_log
= mb_tr_disas_log
,
1852 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
1855 translator_loop(&mb_tr_ops
, &dc
.base
, cpu
, tb
, max_insns
);
1858 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
1860 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1861 CPUMBState
*env
= &cpu
->env
;
1865 qemu_fprintf(f
, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1867 (env
->msr
& MSR_UM
) ? "user" : "kernel",
1868 (env
->msr
& MSR_UMS
) ? "user" : "kernel",
1869 (bool)(env
->msr
& MSR_EIP
),
1870 (bool)(env
->msr
& MSR_IE
));
1872 iflags
= env
->iflags
;
1873 qemu_fprintf(f
, "iflags: 0x%08x", iflags
);
1874 if (iflags
& IMM_FLAG
) {
1875 qemu_fprintf(f
, " IMM(0x%08x)", env
->imm
);
1877 if (iflags
& BIMM_FLAG
) {
1878 qemu_fprintf(f
, " BIMM");
1880 if (iflags
& D_FLAG
) {
1881 qemu_fprintf(f
, " D(btarget=0x%08x)", env
->btarget
);
1883 if (iflags
& DRTI_FLAG
) {
1884 qemu_fprintf(f
, " DRTI");
1886 if (iflags
& DRTE_FLAG
) {
1887 qemu_fprintf(f
, " DRTE");
1889 if (iflags
& DRTB_FLAG
) {
1890 qemu_fprintf(f
, " DRTB");
1892 if (iflags
& ESR_ESS_FLAG
) {
1893 qemu_fprintf(f
, " ESR_ESS(0x%04x)", iflags
& ESR_ESS_MASK
);
1896 qemu_fprintf(f
, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1897 "ear=0x" TARGET_FMT_lx
" slr=0x%x shr=0x%x\n",
1898 env
->esr
, env
->fsr
, env
->btr
, env
->edr
,
1899 env
->ear
, env
->slr
, env
->shr
);
1901 for (i
= 0; i
< 32; i
++) {
1902 qemu_fprintf(f
, "r%2.2d=%08x%c",
1903 i
, env
->regs
[i
], i
% 4 == 3 ? '\n' : ' ');
1905 qemu_fprintf(f
, "\n");
1908 void mb_tcg_init(void)
1910 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1911 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1913 static const struct {
1914 TCGv_i32
*var
; int ofs
; char name
[8];
1917 * Note that r0 is handled specially in reg_for_read
1918 * and reg_for_write. Nothing should touch cpu_R[0].
1919 * Leave that element NULL, which will assert quickly
1920 * inside the tcg generator functions.
1922 R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1923 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1924 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1925 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1940 for (int i
= 0; i
< ARRAY_SIZE(i32s
); ++i
) {
1942 tcg_global_mem_new_i32(cpu_env
, i32s
[i
].ofs
, i32s
[i
].name
);
1946 tcg_global_mem_new(cpu_env
, offsetof(CPUMBState
, res_addr
), "res_addr");
1949 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
,
1953 env
->iflags
= data
[1];