2 * RISC-V emulation for qemu: main translation routines.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
34 /* global register indices */
35 static TCGv cpu_gpr
[32], cpu_pc
;
36 static TCGv_i64 cpu_fpr
[32]; /* assume F and D extensions */
40 #include "exec/gen-icount.h"
42 typedef struct DisasContext
{
43 DisasContextBase base
;
44 /* pc_succ_insn points to the instruction following base.pc_next */
45 target_ulong pc_succ_insn
;
46 target_ulong priv_ver
;
51 /* Remember the rounding mode encoded in the previous fp instruction,
52 which we have already installed into env->fp_status. Or -1 for
53 no previous fp instruction. Note that we exit the TB when writing
54 to any system register, which includes CSR_FRM, so we do not have
55 to reset this known value. */
59 /* convert riscv funct3 to qemu memop for load/store */
60 static const int tcg_memop_lookup
[8] = {
74 #define CASE_OP_32_64(X) case X: case glue(X, W)
76 #define CASE_OP_32_64(X) case X
79 static inline bool has_ext(DisasContext
*ctx
, uint32_t ext
)
81 return ctx
->misa
& ext
;
84 static void generate_exception(DisasContext
*ctx
, int excp
)
86 tcg_gen_movi_tl(cpu_pc
, ctx
->base
.pc_next
);
87 TCGv_i32 helper_tmp
= tcg_const_i32(excp
);
88 gen_helper_raise_exception(cpu_env
, helper_tmp
);
89 tcg_temp_free_i32(helper_tmp
);
90 ctx
->base
.is_jmp
= DISAS_NORETURN
;
93 static void generate_exception_mbadaddr(DisasContext
*ctx
, int excp
)
95 tcg_gen_movi_tl(cpu_pc
, ctx
->base
.pc_next
);
96 tcg_gen_st_tl(cpu_pc
, cpu_env
, offsetof(CPURISCVState
, badaddr
));
97 TCGv_i32 helper_tmp
= tcg_const_i32(excp
);
98 gen_helper_raise_exception(cpu_env
, helper_tmp
);
99 tcg_temp_free_i32(helper_tmp
);
100 ctx
->base
.is_jmp
= DISAS_NORETURN
;
103 static void gen_exception_debug(void)
105 TCGv_i32 helper_tmp
= tcg_const_i32(EXCP_DEBUG
);
106 gen_helper_raise_exception(cpu_env
, helper_tmp
);
107 tcg_temp_free_i32(helper_tmp
);
110 static void gen_exception_illegal(DisasContext
*ctx
)
112 generate_exception(ctx
, RISCV_EXCP_ILLEGAL_INST
);
115 static void gen_exception_inst_addr_mis(DisasContext
*ctx
)
117 generate_exception_mbadaddr(ctx
, RISCV_EXCP_INST_ADDR_MIS
);
120 static inline bool use_goto_tb(DisasContext
*ctx
, target_ulong dest
)
122 if (unlikely(ctx
->base
.singlestep_enabled
)) {
126 #ifndef CONFIG_USER_ONLY
127 return (ctx
->base
.tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
133 static void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
135 if (use_goto_tb(ctx
, dest
)) {
136 /* chaining is only allowed when the jump is to the same page */
138 tcg_gen_movi_tl(cpu_pc
, dest
);
139 tcg_gen_exit_tb(ctx
->base
.tb
, n
);
141 tcg_gen_movi_tl(cpu_pc
, dest
);
142 if (ctx
->base
.singlestep_enabled
) {
143 gen_exception_debug();
145 tcg_gen_lookup_and_goto_ptr();
150 /* Wrapper for getting reg values - need to check of reg is zero since
151 * cpu_gpr[0] is not actually allocated
153 static inline void gen_get_gpr(TCGv t
, int reg_num
)
156 tcg_gen_movi_tl(t
, 0);
158 tcg_gen_mov_tl(t
, cpu_gpr
[reg_num
]);
162 /* Wrapper for setting reg values - need to check of reg is zero since
163 * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
164 * since we usually avoid calling the OP_TYPE_gen function if we see a write to
167 static inline void gen_set_gpr(int reg_num_dst
, TCGv t
)
169 if (reg_num_dst
!= 0) {
170 tcg_gen_mov_tl(cpu_gpr
[reg_num_dst
], t
);
174 static void gen_mulhsu(TCGv ret
, TCGv arg1
, TCGv arg2
)
176 TCGv rl
= tcg_temp_new();
177 TCGv rh
= tcg_temp_new();
179 tcg_gen_mulu2_tl(rl
, rh
, arg1
, arg2
);
180 /* fix up for one negative */
181 tcg_gen_sari_tl(rl
, arg1
, TARGET_LONG_BITS
- 1);
182 tcg_gen_and_tl(rl
, rl
, arg2
);
183 tcg_gen_sub_tl(ret
, rh
, rl
);
189 static void gen_fsgnj(DisasContext
*ctx
, uint32_t rd
, uint32_t rs1
,
190 uint32_t rs2
, int rm
, uint64_t min
)
194 if (rs1
== rs2
) { /* FMOV */
195 tcg_gen_mov_i64(cpu_fpr
[rd
], cpu_fpr
[rs1
]);
197 tcg_gen_deposit_i64(cpu_fpr
[rd
], cpu_fpr
[rs2
], cpu_fpr
[rs1
],
198 0, min
== INT32_MIN
? 31 : 63);
202 if (rs1
== rs2
) { /* FNEG */
203 tcg_gen_xori_i64(cpu_fpr
[rd
], cpu_fpr
[rs1
], min
);
205 TCGv_i64 t0
= tcg_temp_new_i64();
206 tcg_gen_not_i64(t0
, cpu_fpr
[rs2
]);
207 tcg_gen_deposit_i64(cpu_fpr
[rd
], t0
, cpu_fpr
[rs1
],
208 0, min
== INT32_MIN
? 31 : 63);
209 tcg_temp_free_i64(t0
);
213 if (rs1
== rs2
) { /* FABS */
214 tcg_gen_andi_i64(cpu_fpr
[rd
], cpu_fpr
[rs1
], ~min
);
216 TCGv_i64 t0
= tcg_temp_new_i64();
217 tcg_gen_andi_i64(t0
, cpu_fpr
[rs2
], min
);
218 tcg_gen_xor_i64(cpu_fpr
[rd
], cpu_fpr
[rs1
], t0
);
219 tcg_temp_free_i64(t0
);
223 gen_exception_illegal(ctx
);
227 static void gen_arith(DisasContext
*ctx
, uint32_t opc
, int rd
, int rs1
,
230 TCGv source1
, source2
, cond1
, cond2
, zeroreg
, resultopt1
;
231 source1
= tcg_temp_new();
232 source2
= tcg_temp_new();
233 gen_get_gpr(source1
, rs1
);
234 gen_get_gpr(source2
, rs2
);
237 CASE_OP_32_64(OPC_RISC_ADD
):
238 tcg_gen_add_tl(source1
, source1
, source2
);
240 CASE_OP_32_64(OPC_RISC_SUB
):
241 tcg_gen_sub_tl(source1
, source1
, source2
);
243 #if defined(TARGET_RISCV64)
245 tcg_gen_andi_tl(source2
, source2
, 0x1F);
246 tcg_gen_shl_tl(source1
, source1
, source2
);
250 tcg_gen_andi_tl(source2
, source2
, TARGET_LONG_BITS
- 1);
251 tcg_gen_shl_tl(source1
, source1
, source2
);
254 tcg_gen_setcond_tl(TCG_COND_LT
, source1
, source1
, source2
);
257 tcg_gen_setcond_tl(TCG_COND_LTU
, source1
, source1
, source2
);
260 tcg_gen_xor_tl(source1
, source1
, source2
);
262 #if defined(TARGET_RISCV64)
265 tcg_gen_ext32u_tl(source1
, source1
);
266 tcg_gen_andi_tl(source2
, source2
, 0x1F);
267 tcg_gen_shr_tl(source1
, source1
, source2
);
271 tcg_gen_andi_tl(source2
, source2
, TARGET_LONG_BITS
- 1);
272 tcg_gen_shr_tl(source1
, source1
, source2
);
274 #if defined(TARGET_RISCV64)
276 /* first, trick to get it to act like working on 32 bits (get rid of
277 upper 32, sign extend to fill space) */
278 tcg_gen_ext32s_tl(source1
, source1
);
279 tcg_gen_andi_tl(source2
, source2
, 0x1F);
280 tcg_gen_sar_tl(source1
, source1
, source2
);
284 tcg_gen_andi_tl(source2
, source2
, TARGET_LONG_BITS
- 1);
285 tcg_gen_sar_tl(source1
, source1
, source2
);
288 tcg_gen_or_tl(source1
, source1
, source2
);
291 tcg_gen_and_tl(source1
, source1
, source2
);
293 CASE_OP_32_64(OPC_RISC_MUL
):
294 if (!has_ext(ctx
, RVM
)) {
297 tcg_gen_mul_tl(source1
, source1
, source2
);
300 if (!has_ext(ctx
, RVM
)) {
303 tcg_gen_muls2_tl(source2
, source1
, source1
, source2
);
305 case OPC_RISC_MULHSU
:
306 if (!has_ext(ctx
, RVM
)) {
309 gen_mulhsu(source1
, source1
, source2
);
312 if (!has_ext(ctx
, RVM
)) {
315 tcg_gen_mulu2_tl(source2
, source1
, source1
, source2
);
317 #if defined(TARGET_RISCV64)
319 if (!has_ext(ctx
, RVM
)) {
322 tcg_gen_ext32s_tl(source1
, source1
);
323 tcg_gen_ext32s_tl(source2
, source2
);
324 /* fall through to DIV */
327 if (!has_ext(ctx
, RVM
)) {
330 /* Handle by altering args to tcg_gen_div to produce req'd results:
331 * For overflow: want source1 in source1 and 1 in source2
332 * For div by zero: want -1 in source1 and 1 in source2 -> -1 result */
333 cond1
= tcg_temp_new();
334 cond2
= tcg_temp_new();
335 zeroreg
= tcg_const_tl(0);
336 resultopt1
= tcg_temp_new();
338 tcg_gen_movi_tl(resultopt1
, (target_ulong
)-1);
339 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond2
, source2
, (target_ulong
)(~0L));
340 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond1
, source1
,
341 ((target_ulong
)1) << (TARGET_LONG_BITS
- 1));
342 tcg_gen_and_tl(cond1
, cond1
, cond2
); /* cond1 = overflow */
343 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond2
, source2
, 0); /* cond2 = div 0 */
344 /* if div by zero, set source1 to -1, otherwise don't change */
345 tcg_gen_movcond_tl(TCG_COND_EQ
, source1
, cond2
, zeroreg
, source1
,
347 /* if overflow or div by zero, set source2 to 1, else don't change */
348 tcg_gen_or_tl(cond1
, cond1
, cond2
);
349 tcg_gen_movi_tl(resultopt1
, (target_ulong
)1);
350 tcg_gen_movcond_tl(TCG_COND_EQ
, source2
, cond1
, zeroreg
, source2
,
352 tcg_gen_div_tl(source1
, source1
, source2
);
354 tcg_temp_free(cond1
);
355 tcg_temp_free(cond2
);
356 tcg_temp_free(zeroreg
);
357 tcg_temp_free(resultopt1
);
359 #if defined(TARGET_RISCV64)
361 if (!has_ext(ctx
, RVM
)) {
364 tcg_gen_ext32u_tl(source1
, source1
);
365 tcg_gen_ext32u_tl(source2
, source2
);
366 /* fall through to DIVU */
369 if (!has_ext(ctx
, RVM
)) {
372 cond1
= tcg_temp_new();
373 zeroreg
= tcg_const_tl(0);
374 resultopt1
= tcg_temp_new();
376 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond1
, source2
, 0);
377 tcg_gen_movi_tl(resultopt1
, (target_ulong
)-1);
378 tcg_gen_movcond_tl(TCG_COND_EQ
, source1
, cond1
, zeroreg
, source1
,
380 tcg_gen_movi_tl(resultopt1
, (target_ulong
)1);
381 tcg_gen_movcond_tl(TCG_COND_EQ
, source2
, cond1
, zeroreg
, source2
,
383 tcg_gen_divu_tl(source1
, source1
, source2
);
385 tcg_temp_free(cond1
);
386 tcg_temp_free(zeroreg
);
387 tcg_temp_free(resultopt1
);
389 #if defined(TARGET_RISCV64)
391 if (!has_ext(ctx
, RVM
)) {
394 tcg_gen_ext32s_tl(source1
, source1
);
395 tcg_gen_ext32s_tl(source2
, source2
);
396 /* fall through to REM */
399 if (!has_ext(ctx
, RVM
)) {
402 cond1
= tcg_temp_new();
403 cond2
= tcg_temp_new();
404 zeroreg
= tcg_const_tl(0);
405 resultopt1
= tcg_temp_new();
407 tcg_gen_movi_tl(resultopt1
, 1L);
408 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond2
, source2
, (target_ulong
)-1);
409 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond1
, source1
,
410 (target_ulong
)1 << (TARGET_LONG_BITS
- 1));
411 tcg_gen_and_tl(cond2
, cond1
, cond2
); /* cond1 = overflow */
412 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond1
, source2
, 0); /* cond2 = div 0 */
413 /* if overflow or div by zero, set source2 to 1, else don't change */
414 tcg_gen_or_tl(cond2
, cond1
, cond2
);
415 tcg_gen_movcond_tl(TCG_COND_EQ
, source2
, cond2
, zeroreg
, source2
,
417 tcg_gen_rem_tl(resultopt1
, source1
, source2
);
418 /* if div by zero, just return the original dividend */
419 tcg_gen_movcond_tl(TCG_COND_EQ
, source1
, cond1
, zeroreg
, resultopt1
,
422 tcg_temp_free(cond1
);
423 tcg_temp_free(cond2
);
424 tcg_temp_free(zeroreg
);
425 tcg_temp_free(resultopt1
);
427 #if defined(TARGET_RISCV64)
429 if (!has_ext(ctx
, RVM
)) {
432 tcg_gen_ext32u_tl(source1
, source1
);
433 tcg_gen_ext32u_tl(source2
, source2
);
434 /* fall through to REMU */
437 if (!has_ext(ctx
, RVM
)) {
440 cond1
= tcg_temp_new();
441 zeroreg
= tcg_const_tl(0);
442 resultopt1
= tcg_temp_new();
444 tcg_gen_movi_tl(resultopt1
, (target_ulong
)1);
445 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond1
, source2
, 0);
446 tcg_gen_movcond_tl(TCG_COND_EQ
, source2
, cond1
, zeroreg
, source2
,
448 tcg_gen_remu_tl(resultopt1
, source1
, source2
);
449 /* if div by zero, just return the original dividend */
450 tcg_gen_movcond_tl(TCG_COND_EQ
, source1
, cond1
, zeroreg
, resultopt1
,
453 tcg_temp_free(cond1
);
454 tcg_temp_free(zeroreg
);
455 tcg_temp_free(resultopt1
);
459 gen_exception_illegal(ctx
);
463 if (opc
& 0x8) { /* sign extend for W instructions */
464 tcg_gen_ext32s_tl(source1
, source1
);
467 gen_set_gpr(rd
, source1
);
468 tcg_temp_free(source1
);
469 tcg_temp_free(source2
);
472 static void gen_arith_imm(DisasContext
*ctx
, uint32_t opc
, int rd
,
473 int rs1
, target_long imm
)
475 TCGv source1
= tcg_temp_new();
476 int shift_len
= TARGET_LONG_BITS
;
479 gen_get_gpr(source1
, rs1
);
483 #if defined(TARGET_RISCV64)
486 tcg_gen_addi_tl(source1
, source1
, imm
);
489 tcg_gen_setcondi_tl(TCG_COND_LT
, source1
, source1
, imm
);
492 tcg_gen_setcondi_tl(TCG_COND_LTU
, source1
, source1
, imm
);
495 tcg_gen_xori_tl(source1
, source1
, imm
);
498 tcg_gen_ori_tl(source1
, source1
, imm
);
501 tcg_gen_andi_tl(source1
, source1
, imm
);
503 #if defined(TARGET_RISCV64)
509 if (imm
>= shift_len
) {
512 tcg_gen_shli_tl(source1
, source1
, imm
);
514 #if defined(TARGET_RISCV64)
515 case OPC_RISC_SHIFT_RIGHT_IW
:
519 case OPC_RISC_SHIFT_RIGHT_I
:
520 /* differentiate on IMM */
521 shift_a
= imm
& 0x400;
523 if (imm
>= shift_len
) {
529 tcg_gen_sextract_tl(source1
, source1
, imm
, shift_len
- imm
);
532 tcg_gen_extract_tl(source1
, source1
, imm
, shift_len
- imm
);
534 /* No further sign-extension needed for W instructions. */
540 gen_exception_illegal(ctx
);
544 if (opc
& 0x8) { /* sign-extend for W instructions */
545 tcg_gen_ext32s_tl(source1
, source1
);
548 gen_set_gpr(rd
, source1
);
549 tcg_temp_free(source1
);
552 static void gen_jal(DisasContext
*ctx
, int rd
, target_ulong imm
)
554 target_ulong next_pc
;
556 /* check misaligned: */
557 next_pc
= ctx
->base
.pc_next
+ imm
;
558 if (!has_ext(ctx
, RVC
)) {
559 if ((next_pc
& 0x3) != 0) {
560 gen_exception_inst_addr_mis(ctx
);
565 tcg_gen_movi_tl(cpu_gpr
[rd
], ctx
->pc_succ_insn
);
568 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ imm
); /* must use this for safety */
569 ctx
->base
.is_jmp
= DISAS_NORETURN
;
572 static void gen_jalr(DisasContext
*ctx
, uint32_t opc
, int rd
, int rs1
,
575 /* no chaining with JALR */
576 TCGLabel
*misaligned
= NULL
;
577 TCGv t0
= tcg_temp_new();
581 gen_get_gpr(cpu_pc
, rs1
);
582 tcg_gen_addi_tl(cpu_pc
, cpu_pc
, imm
);
583 tcg_gen_andi_tl(cpu_pc
, cpu_pc
, (target_ulong
)-2);
585 if (!has_ext(ctx
, RVC
)) {
586 misaligned
= gen_new_label();
587 tcg_gen_andi_tl(t0
, cpu_pc
, 0x2);
588 tcg_gen_brcondi_tl(TCG_COND_NE
, t0
, 0x0, misaligned
);
592 tcg_gen_movi_tl(cpu_gpr
[rd
], ctx
->pc_succ_insn
);
594 tcg_gen_lookup_and_goto_ptr();
597 gen_set_label(misaligned
);
598 gen_exception_inst_addr_mis(ctx
);
600 ctx
->base
.is_jmp
= DISAS_NORETURN
;
604 gen_exception_illegal(ctx
);
610 static void gen_branch(DisasContext
*ctx
, uint32_t opc
, int rs1
, int rs2
,
613 TCGLabel
*l
= gen_new_label();
614 TCGv source1
, source2
;
615 source1
= tcg_temp_new();
616 source2
= tcg_temp_new();
617 gen_get_gpr(source1
, rs1
);
618 gen_get_gpr(source2
, rs2
);
622 tcg_gen_brcond_tl(TCG_COND_EQ
, source1
, source2
, l
);
625 tcg_gen_brcond_tl(TCG_COND_NE
, source1
, source2
, l
);
628 tcg_gen_brcond_tl(TCG_COND_LT
, source1
, source2
, l
);
631 tcg_gen_brcond_tl(TCG_COND_GE
, source1
, source2
, l
);
634 tcg_gen_brcond_tl(TCG_COND_LTU
, source1
, source2
, l
);
637 tcg_gen_brcond_tl(TCG_COND_GEU
, source1
, source2
, l
);
640 gen_exception_illegal(ctx
);
643 tcg_temp_free(source1
);
644 tcg_temp_free(source2
);
646 gen_goto_tb(ctx
, 1, ctx
->pc_succ_insn
);
647 gen_set_label(l
); /* branch taken */
648 if (!has_ext(ctx
, RVC
) && ((ctx
->base
.pc_next
+ bimm
) & 0x3)) {
650 gen_exception_inst_addr_mis(ctx
);
652 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ bimm
);
654 ctx
->base
.is_jmp
= DISAS_NORETURN
;
657 static void gen_load(DisasContext
*ctx
, uint32_t opc
, int rd
, int rs1
,
660 TCGv t0
= tcg_temp_new();
661 TCGv t1
= tcg_temp_new();
662 gen_get_gpr(t0
, rs1
);
663 tcg_gen_addi_tl(t0
, t0
, imm
);
664 int memop
= tcg_memop_lookup
[(opc
>> 12) & 0x7];
667 gen_exception_illegal(ctx
);
671 tcg_gen_qemu_ld_tl(t1
, t0
, ctx
->mem_idx
, memop
);
677 static void gen_store(DisasContext
*ctx
, uint32_t opc
, int rs1
, int rs2
,
680 TCGv t0
= tcg_temp_new();
681 TCGv dat
= tcg_temp_new();
682 gen_get_gpr(t0
, rs1
);
683 tcg_gen_addi_tl(t0
, t0
, imm
);
684 gen_get_gpr(dat
, rs2
);
685 int memop
= tcg_memop_lookup
[(opc
>> 12) & 0x7];
688 gen_exception_illegal(ctx
);
692 tcg_gen_qemu_st_tl(dat
, t0
, ctx
->mem_idx
, memop
);
697 #ifndef CONFIG_USER_ONLY
698 /* The states of mstatus_fs are:
699 * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
700 * We will have already diagnosed disabled state,
701 * and need to turn initial/clean into dirty.
703 static void mark_fs_dirty(DisasContext
*ctx
)
706 if (ctx
->mstatus_fs
== MSTATUS_FS
) {
709 /* Remember the state change for the rest of the TB. */
710 ctx
->mstatus_fs
= MSTATUS_FS
;
712 tmp
= tcg_temp_new();
713 tcg_gen_ld_tl(tmp
, cpu_env
, offsetof(CPURISCVState
, mstatus
));
714 tcg_gen_ori_tl(tmp
, tmp
, MSTATUS_FS
);
715 tcg_gen_st_tl(tmp
, cpu_env
, offsetof(CPURISCVState
, mstatus
));
719 static inline void mark_fs_dirty(DisasContext
*ctx
) { }
722 static void gen_fp_load(DisasContext
*ctx
, uint32_t opc
, int rd
,
723 int rs1
, target_long imm
)
727 if (ctx
->mstatus_fs
== 0) {
728 gen_exception_illegal(ctx
);
733 gen_get_gpr(t0
, rs1
);
734 tcg_gen_addi_tl(t0
, t0
, imm
);
738 if (!has_ext(ctx
, RVF
)) {
741 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
], t0
, ctx
->mem_idx
, MO_TEUL
);
742 /* RISC-V requires NaN-boxing of narrower width floating point values */
743 tcg_gen_ori_i64(cpu_fpr
[rd
], cpu_fpr
[rd
], 0xffffffff00000000ULL
);
746 if (!has_ext(ctx
, RVD
)) {
749 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
], t0
, ctx
->mem_idx
, MO_TEQ
);
753 gen_exception_illegal(ctx
);
761 static void gen_fp_store(DisasContext
*ctx
, uint32_t opc
, int rs1
,
762 int rs2
, target_long imm
)
766 if (ctx
->mstatus_fs
== 0) {
767 gen_exception_illegal(ctx
);
772 gen_get_gpr(t0
, rs1
);
773 tcg_gen_addi_tl(t0
, t0
, imm
);
777 if (!has_ext(ctx
, RVF
)) {
780 tcg_gen_qemu_st_i64(cpu_fpr
[rs2
], t0
, ctx
->mem_idx
, MO_TEUL
);
783 if (!has_ext(ctx
, RVD
)) {
786 tcg_gen_qemu_st_i64(cpu_fpr
[rs2
], t0
, ctx
->mem_idx
, MO_TEQ
);
790 gen_exception_illegal(ctx
);
797 static void gen_atomic(DisasContext
*ctx
, uint32_t opc
,
798 int rd
, int rs1
, int rs2
)
800 TCGv src1
, src2
, dat
;
805 /* Extract the size of the atomic operation. */
806 switch (extract32(opc
, 12, 3)) {
808 mop
= MO_ALIGN
| MO_TESL
;
810 #if defined(TARGET_RISCV64)
812 mop
= MO_ALIGN
| MO_TEQ
;
816 gen_exception_illegal(ctx
);
819 rl
= extract32(opc
, 25, 1);
820 aq
= extract32(opc
, 26, 1);
822 src1
= tcg_temp_new();
823 src2
= tcg_temp_new();
825 switch (MASK_OP_ATOMIC_NO_AQ_RL_SZ(opc
)) {
827 /* Put addr in load_res, data in load_val. */
828 gen_get_gpr(src1
, rs1
);
830 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
832 tcg_gen_qemu_ld_tl(load_val
, src1
, ctx
->mem_idx
, mop
);
834 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
836 tcg_gen_mov_tl(load_res
, src1
);
837 gen_set_gpr(rd
, load_val
);
841 l1
= gen_new_label();
842 l2
= gen_new_label();
843 dat
= tcg_temp_new();
845 gen_get_gpr(src1
, rs1
);
846 tcg_gen_brcond_tl(TCG_COND_NE
, load_res
, src1
, l1
);
848 gen_get_gpr(src2
, rs2
);
849 /* Note that the TCG atomic primitives are SC,
850 so we can ignore AQ/RL along this path. */
851 tcg_gen_atomic_cmpxchg_tl(src1
, load_res
, load_val
, src2
,
853 tcg_gen_setcond_tl(TCG_COND_NE
, dat
, src1
, load_val
);
854 gen_set_gpr(rd
, dat
);
858 /* Address comparion failure. However, we still need to
859 provide the memory barrier implied by AQ/RL. */
860 tcg_gen_mb(TCG_MO_ALL
+ aq
* TCG_BAR_LDAQ
+ rl
* TCG_BAR_STRL
);
861 tcg_gen_movi_tl(dat
, 1);
862 gen_set_gpr(rd
, dat
);
868 case OPC_RISC_AMOSWAP
:
869 /* Note that the TCG atomic primitives are SC,
870 so we can ignore AQ/RL along this path. */
871 gen_get_gpr(src1
, rs1
);
872 gen_get_gpr(src2
, rs2
);
873 tcg_gen_atomic_xchg_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
874 gen_set_gpr(rd
, src2
);
876 case OPC_RISC_AMOADD
:
877 gen_get_gpr(src1
, rs1
);
878 gen_get_gpr(src2
, rs2
);
879 tcg_gen_atomic_fetch_add_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
880 gen_set_gpr(rd
, src2
);
882 case OPC_RISC_AMOXOR
:
883 gen_get_gpr(src1
, rs1
);
884 gen_get_gpr(src2
, rs2
);
885 tcg_gen_atomic_fetch_xor_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
886 gen_set_gpr(rd
, src2
);
888 case OPC_RISC_AMOAND
:
889 gen_get_gpr(src1
, rs1
);
890 gen_get_gpr(src2
, rs2
);
891 tcg_gen_atomic_fetch_and_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
892 gen_set_gpr(rd
, src2
);
895 gen_get_gpr(src1
, rs1
);
896 gen_get_gpr(src2
, rs2
);
897 tcg_gen_atomic_fetch_or_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
898 gen_set_gpr(rd
, src2
);
900 case OPC_RISC_AMOMIN
:
901 gen_get_gpr(src1
, rs1
);
902 gen_get_gpr(src2
, rs2
);
903 tcg_gen_atomic_fetch_smin_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
904 gen_set_gpr(rd
, src2
);
906 case OPC_RISC_AMOMAX
:
907 gen_get_gpr(src1
, rs1
);
908 gen_get_gpr(src2
, rs2
);
909 tcg_gen_atomic_fetch_smax_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
910 gen_set_gpr(rd
, src2
);
912 case OPC_RISC_AMOMINU
:
913 gen_get_gpr(src1
, rs1
);
914 gen_get_gpr(src2
, rs2
);
915 tcg_gen_atomic_fetch_umin_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
916 gen_set_gpr(rd
, src2
);
918 case OPC_RISC_AMOMAXU
:
919 gen_get_gpr(src1
, rs1
);
920 gen_get_gpr(src2
, rs2
);
921 tcg_gen_atomic_fetch_umax_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
922 gen_set_gpr(rd
, src2
);
926 gen_exception_illegal(ctx
);
934 static void gen_set_rm(DisasContext
*ctx
, int rm
)
938 if (ctx
->frm
== rm
) {
942 t0
= tcg_const_i32(rm
);
943 gen_helper_set_rounding_mode(cpu_env
, t0
);
944 tcg_temp_free_i32(t0
);
947 static void gen_fp_fmadd(DisasContext
*ctx
, uint32_t opc
, int rd
,
948 int rs1
, int rs2
, int rs3
, int rm
)
951 case OPC_RISC_FMADD_S
:
952 if (!has_ext(ctx
, RVF
)) {
956 gen_helper_fmadd_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
957 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
959 case OPC_RISC_FMADD_D
:
960 if (!has_ext(ctx
, RVD
)) {
964 gen_helper_fmadd_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
965 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
969 gen_exception_illegal(ctx
);
974 static void gen_fp_fmsub(DisasContext
*ctx
, uint32_t opc
, int rd
,
975 int rs1
, int rs2
, int rs3
, int rm
)
978 case OPC_RISC_FMSUB_S
:
979 if (!has_ext(ctx
, RVF
)) {
983 gen_helper_fmsub_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
984 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
986 case OPC_RISC_FMSUB_D
:
987 if (!has_ext(ctx
, RVD
)) {
991 gen_helper_fmsub_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
992 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
996 gen_exception_illegal(ctx
);
1001 static void gen_fp_fnmsub(DisasContext
*ctx
, uint32_t opc
, int rd
,
1002 int rs1
, int rs2
, int rs3
, int rm
)
1005 case OPC_RISC_FNMSUB_S
:
1006 if (!has_ext(ctx
, RVF
)) {
1009 gen_set_rm(ctx
, rm
);
1010 gen_helper_fnmsub_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
1011 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
1013 case OPC_RISC_FNMSUB_D
:
1014 if (!has_ext(ctx
, RVD
)) {
1017 gen_set_rm(ctx
, rm
);
1018 gen_helper_fnmsub_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
1019 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
1023 gen_exception_illegal(ctx
);
1028 static void gen_fp_fnmadd(DisasContext
*ctx
, uint32_t opc
, int rd
,
1029 int rs1
, int rs2
, int rs3
, int rm
)
1032 case OPC_RISC_FNMADD_S
:
1033 if (!has_ext(ctx
, RVF
)) {
1036 gen_set_rm(ctx
, rm
);
1037 gen_helper_fnmadd_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
1038 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
1040 case OPC_RISC_FNMADD_D
:
1041 if (!has_ext(ctx
, RVD
)) {
1044 gen_set_rm(ctx
, rm
);
1045 gen_helper_fnmadd_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
1046 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
1050 gen_exception_illegal(ctx
);
1055 static void gen_fp_arith(DisasContext
*ctx
, uint32_t opc
, int rd
,
1056 int rs1
, int rs2
, int rm
)
1059 bool fp_output
= true;
1061 if (ctx
->mstatus_fs
== 0) {
1066 case OPC_RISC_FADD_S
:
1067 if (!has_ext(ctx
, RVF
)) {
1070 gen_set_rm(ctx
, rm
);
1071 gen_helper_fadd_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1073 case OPC_RISC_FSUB_S
:
1074 if (!has_ext(ctx
, RVF
)) {
1077 gen_set_rm(ctx
, rm
);
1078 gen_helper_fsub_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1080 case OPC_RISC_FMUL_S
:
1081 if (!has_ext(ctx
, RVF
)) {
1084 gen_set_rm(ctx
, rm
);
1085 gen_helper_fmul_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1087 case OPC_RISC_FDIV_S
:
1088 if (!has_ext(ctx
, RVF
)) {
1091 gen_set_rm(ctx
, rm
);
1092 gen_helper_fdiv_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1094 case OPC_RISC_FSQRT_S
:
1095 if (!has_ext(ctx
, RVF
)) {
1098 gen_set_rm(ctx
, rm
);
1099 gen_helper_fsqrt_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
]);
1101 case OPC_RISC_FSGNJ_S
:
1102 if (!has_ext(ctx
, RVF
)) {
1105 gen_fsgnj(ctx
, rd
, rs1
, rs2
, rm
, INT32_MIN
);
1108 case OPC_RISC_FMIN_S
:
1109 if (!has_ext(ctx
, RVF
)) {
1112 /* also handles: OPC_RISC_FMAX_S */
1115 gen_helper_fmin_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1118 gen_helper_fmax_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1125 case OPC_RISC_FEQ_S
:
1126 /* also handles: OPC_RISC_FLT_S, OPC_RISC_FLE_S */
1127 if (!has_ext(ctx
, RVF
)) {
1130 t0
= tcg_temp_new();
1133 gen_helper_fle_s(t0
, cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1136 gen_helper_flt_s(t0
, cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1139 gen_helper_feq_s(t0
, cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1144 gen_set_gpr(rd
, t0
);
1149 case OPC_RISC_FCVT_W_S
:
1150 /* also OPC_RISC_FCVT_WU_S, OPC_RISC_FCVT_L_S, OPC_RISC_FCVT_LU_S */
1151 if (!has_ext(ctx
, RVF
)) {
1154 t0
= tcg_temp_new();
1156 case 0: /* FCVT_W_S */
1157 gen_set_rm(ctx
, rm
);
1158 gen_helper_fcvt_w_s(t0
, cpu_env
, cpu_fpr
[rs1
]);
1160 case 1: /* FCVT_WU_S */
1161 gen_set_rm(ctx
, rm
);
1162 gen_helper_fcvt_wu_s(t0
, cpu_env
, cpu_fpr
[rs1
]);
1164 #if defined(TARGET_RISCV64)
1165 case 2: /* FCVT_L_S */
1166 gen_set_rm(ctx
, rm
);
1167 gen_helper_fcvt_l_s(t0
, cpu_env
, cpu_fpr
[rs1
]);
1169 case 3: /* FCVT_LU_S */
1170 gen_set_rm(ctx
, rm
);
1171 gen_helper_fcvt_lu_s(t0
, cpu_env
, cpu_fpr
[rs1
]);
1177 gen_set_gpr(rd
, t0
);
1182 case OPC_RISC_FCVT_S_W
:
1183 /* also OPC_RISC_FCVT_S_WU, OPC_RISC_FCVT_S_L, OPC_RISC_FCVT_S_LU */
1184 if (!has_ext(ctx
, RVF
)) {
1187 t0
= tcg_temp_new();
1188 gen_get_gpr(t0
, rs1
);
1190 case 0: /* FCVT_S_W */
1191 gen_set_rm(ctx
, rm
);
1192 gen_helper_fcvt_s_w(cpu_fpr
[rd
], cpu_env
, t0
);
1194 case 1: /* FCVT_S_WU */
1195 gen_set_rm(ctx
, rm
);
1196 gen_helper_fcvt_s_wu(cpu_fpr
[rd
], cpu_env
, t0
);
1198 #if defined(TARGET_RISCV64)
1199 case 2: /* FCVT_S_L */
1200 gen_set_rm(ctx
, rm
);
1201 gen_helper_fcvt_s_l(cpu_fpr
[rd
], cpu_env
, t0
);
1203 case 3: /* FCVT_S_LU */
1204 gen_set_rm(ctx
, rm
);
1205 gen_helper_fcvt_s_lu(cpu_fpr
[rd
], cpu_env
, t0
);
1214 case OPC_RISC_FMV_X_S
:
1215 /* also OPC_RISC_FCLASS_S */
1216 if (!has_ext(ctx
, RVF
)) {
1219 t0
= tcg_temp_new();
1222 #if defined(TARGET_RISCV64)
1223 tcg_gen_ext32s_tl(t0
, cpu_fpr
[rs1
]);
1225 tcg_gen_extrl_i64_i32(t0
, cpu_fpr
[rs1
]);
1229 gen_helper_fclass_s(t0
, cpu_fpr
[rs1
]);
1234 gen_set_gpr(rd
, t0
);
1239 case OPC_RISC_FMV_S_X
:
1240 if (!has_ext(ctx
, RVF
)) {
1243 t0
= tcg_temp_new();
1244 gen_get_gpr(t0
, rs1
);
1245 #if defined(TARGET_RISCV64)
1246 tcg_gen_mov_i64(cpu_fpr
[rd
], t0
);
1248 tcg_gen_extu_i32_i64(cpu_fpr
[rd
], t0
);
1254 case OPC_RISC_FADD_D
:
1255 if (!has_ext(ctx
, RVD
)) {
1258 gen_set_rm(ctx
, rm
);
1259 gen_helper_fadd_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1261 case OPC_RISC_FSUB_D
:
1262 if (!has_ext(ctx
, RVD
)) {
1265 gen_set_rm(ctx
, rm
);
1266 gen_helper_fsub_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1268 case OPC_RISC_FMUL_D
:
1269 if (!has_ext(ctx
, RVD
)) {
1272 gen_set_rm(ctx
, rm
);
1273 gen_helper_fmul_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1275 case OPC_RISC_FDIV_D
:
1276 if (!has_ext(ctx
, RVD
)) {
1279 gen_set_rm(ctx
, rm
);
1280 gen_helper_fdiv_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1282 case OPC_RISC_FSQRT_D
:
1283 if (!has_ext(ctx
, RVD
)) {
1286 gen_set_rm(ctx
, rm
);
1287 gen_helper_fsqrt_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
]);
1289 case OPC_RISC_FSGNJ_D
:
1290 gen_fsgnj(ctx
, rd
, rs1
, rs2
, rm
, INT64_MIN
);
1293 case OPC_RISC_FMIN_D
:
1294 /* also OPC_RISC_FMAX_D */
1295 if (!has_ext(ctx
, RVD
)) {
1300 gen_helper_fmin_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1303 gen_helper_fmax_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1310 case OPC_RISC_FCVT_S_D
:
1311 if (!has_ext(ctx
, RVD
)) {
1316 gen_set_rm(ctx
, rm
);
1317 gen_helper_fcvt_s_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
]);
1324 case OPC_RISC_FCVT_D_S
:
1325 if (!has_ext(ctx
, RVD
)) {
1330 gen_set_rm(ctx
, rm
);
1331 gen_helper_fcvt_d_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
]);
1338 case OPC_RISC_FEQ_D
:
1339 /* also OPC_RISC_FLT_D, OPC_RISC_FLE_D */
1340 if (!has_ext(ctx
, RVD
)) {
1343 t0
= tcg_temp_new();
1346 gen_helper_fle_d(t0
, cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1349 gen_helper_flt_d(t0
, cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1352 gen_helper_feq_d(t0
, cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1357 gen_set_gpr(rd
, t0
);
1362 case OPC_RISC_FCVT_W_D
:
1363 /* also OPC_RISC_FCVT_WU_D, OPC_RISC_FCVT_L_D, OPC_RISC_FCVT_LU_D */
1364 if (!has_ext(ctx
, RVD
)) {
1367 t0
= tcg_temp_new();
1370 gen_set_rm(ctx
, rm
);
1371 gen_helper_fcvt_w_d(t0
, cpu_env
, cpu_fpr
[rs1
]);
1374 gen_set_rm(ctx
, rm
);
1375 gen_helper_fcvt_wu_d(t0
, cpu_env
, cpu_fpr
[rs1
]);
1377 #if defined(TARGET_RISCV64)
1379 gen_set_rm(ctx
, rm
);
1380 gen_helper_fcvt_l_d(t0
, cpu_env
, cpu_fpr
[rs1
]);
1383 gen_set_rm(ctx
, rm
);
1384 gen_helper_fcvt_lu_d(t0
, cpu_env
, cpu_fpr
[rs1
]);
1390 gen_set_gpr(rd
, t0
);
1395 case OPC_RISC_FCVT_D_W
:
1396 /* also OPC_RISC_FCVT_D_WU, OPC_RISC_FCVT_D_L, OPC_RISC_FCVT_D_LU */
1397 if (!has_ext(ctx
, RVD
)) {
1400 t0
= tcg_temp_new();
1401 gen_get_gpr(t0
, rs1
);
1404 gen_set_rm(ctx
, rm
);
1405 gen_helper_fcvt_d_w(cpu_fpr
[rd
], cpu_env
, t0
);
1408 gen_set_rm(ctx
, rm
);
1409 gen_helper_fcvt_d_wu(cpu_fpr
[rd
], cpu_env
, t0
);
1411 #if defined(TARGET_RISCV64)
1413 gen_set_rm(ctx
, rm
);
1414 gen_helper_fcvt_d_l(cpu_fpr
[rd
], cpu_env
, t0
);
1417 gen_set_rm(ctx
, rm
);
1418 gen_helper_fcvt_d_lu(cpu_fpr
[rd
], cpu_env
, t0
);
1427 case OPC_RISC_FMV_X_D
:
1428 /* also OPC_RISC_FCLASS_D */
1429 if (!has_ext(ctx
, RVD
)) {
1433 #if defined(TARGET_RISCV64)
1435 gen_set_gpr(rd
, cpu_fpr
[rs1
]);
1439 t0
= tcg_temp_new();
1440 gen_helper_fclass_d(t0
, cpu_fpr
[rs1
]);
1441 gen_set_gpr(rd
, t0
);
1450 #if defined(TARGET_RISCV64)
1451 case OPC_RISC_FMV_D_X
:
1452 if (!has_ext(ctx
, RVD
)) {
1455 t0
= tcg_temp_new();
1456 gen_get_gpr(t0
, rs1
);
1457 tcg_gen_mov_tl(cpu_fpr
[rd
], t0
);
1467 gen_exception_illegal(ctx
);
1476 static void gen_system(DisasContext
*ctx
, uint32_t opc
, int rd
, int rs1
,
1479 TCGv source1
, csr_store
, dest
, rs1_pass
, imm_rs1
;
1480 source1
= tcg_temp_new();
1481 csr_store
= tcg_temp_new();
1482 dest
= tcg_temp_new();
1483 rs1_pass
= tcg_temp_new();
1484 imm_rs1
= tcg_temp_new();
1485 gen_get_gpr(source1
, rs1
);
1486 tcg_gen_movi_tl(cpu_pc
, ctx
->base
.pc_next
);
1487 tcg_gen_movi_tl(rs1_pass
, rs1
);
1488 tcg_gen_movi_tl(csr_store
, csr
); /* copy into temp reg to feed to helper */
1490 #ifndef CONFIG_USER_ONLY
1491 /* Extract funct7 value and check whether it matches SFENCE.VMA */
1492 if ((opc
== OPC_RISC_ECALL
) && ((csr
>> 5) == 9)) {
1493 if (ctx
->priv_ver
== PRIV_VERSION_1_10_0
) {
1495 /* TODO: handle ASID specific fences */
1496 gen_helper_tlb_flush(cpu_env
);
1499 gen_exception_illegal(ctx
);
1505 case OPC_RISC_ECALL
:
1507 case 0x0: /* ECALL */
1508 /* always generates U-level ECALL, fixed in do_interrupt handler */
1509 generate_exception(ctx
, RISCV_EXCP_U_ECALL
);
1510 tcg_gen_exit_tb(NULL
, 0); /* no chaining */
1511 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1513 case 0x1: /* EBREAK */
1514 generate_exception(ctx
, RISCV_EXCP_BREAKPOINT
);
1515 tcg_gen_exit_tb(NULL
, 0); /* no chaining */
1516 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1518 #ifndef CONFIG_USER_ONLY
1519 case 0x002: /* URET */
1520 gen_exception_illegal(ctx
);
1522 case 0x102: /* SRET */
1523 if (has_ext(ctx
, RVS
)) {
1524 gen_helper_sret(cpu_pc
, cpu_env
, cpu_pc
);
1525 tcg_gen_exit_tb(NULL
, 0); /* no chaining */
1526 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1528 gen_exception_illegal(ctx
);
1531 case 0x202: /* HRET */
1532 gen_exception_illegal(ctx
);
1534 case 0x302: /* MRET */
1535 gen_helper_mret(cpu_pc
, cpu_env
, cpu_pc
);
1536 tcg_gen_exit_tb(NULL
, 0); /* no chaining */
1537 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1539 case 0x7b2: /* DRET */
1540 gen_exception_illegal(ctx
);
1542 case 0x105: /* WFI */
1543 tcg_gen_movi_tl(cpu_pc
, ctx
->pc_succ_insn
);
1544 gen_helper_wfi(cpu_env
);
1546 case 0x104: /* SFENCE.VM */
1547 if (ctx
->priv_ver
<= PRIV_VERSION_1_09_1
) {
1548 gen_helper_tlb_flush(cpu_env
);
1550 gen_exception_illegal(ctx
);
1555 gen_exception_illegal(ctx
);
1560 tcg_gen_movi_tl(imm_rs1
, rs1
);
1563 case OPC_RISC_CSRRW
:
1564 gen_helper_csrrw(dest
, cpu_env
, source1
, csr_store
);
1566 case OPC_RISC_CSRRS
:
1567 gen_helper_csrrs(dest
, cpu_env
, source1
, csr_store
, rs1_pass
);
1569 case OPC_RISC_CSRRC
:
1570 gen_helper_csrrc(dest
, cpu_env
, source1
, csr_store
, rs1_pass
);
1572 case OPC_RISC_CSRRWI
:
1573 gen_helper_csrrw(dest
, cpu_env
, imm_rs1
, csr_store
);
1575 case OPC_RISC_CSRRSI
:
1576 gen_helper_csrrs(dest
, cpu_env
, imm_rs1
, csr_store
, rs1_pass
);
1578 case OPC_RISC_CSRRCI
:
1579 gen_helper_csrrc(dest
, cpu_env
, imm_rs1
, csr_store
, rs1_pass
);
1582 gen_exception_illegal(ctx
);
1586 gen_set_gpr(rd
, dest
);
1587 /* end tb since we may be changing priv modes, to get mmu_index right */
1588 tcg_gen_movi_tl(cpu_pc
, ctx
->pc_succ_insn
);
1589 tcg_gen_exit_tb(NULL
, 0); /* no chaining */
1590 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1593 tcg_temp_free(source1
);
1594 tcg_temp_free(csr_store
);
1595 tcg_temp_free(dest
);
1596 tcg_temp_free(rs1_pass
);
1597 tcg_temp_free(imm_rs1
);
1600 static void decode_RV32_64C0(DisasContext
*ctx
)
1602 uint8_t funct3
= extract32(ctx
->opcode
, 13, 3);
1603 uint8_t rd_rs2
= GET_C_RS2S(ctx
->opcode
);
1604 uint8_t rs1s
= GET_C_RS1S(ctx
->opcode
);
1609 if (ctx
->opcode
== 0) {
1610 gen_exception_illegal(ctx
);
1612 /* C.ADDI4SPN -> addi rd', x2, zimm[9:2]*/
1613 gen_arith_imm(ctx
, OPC_RISC_ADDI
, rd_rs2
, 2,
1614 GET_C_ADDI4SPN_IMM(ctx
->opcode
));
1618 /* C.FLD -> fld rd', offset[7:3](rs1')*/
1619 gen_fp_load(ctx
, OPC_RISC_FLD
, rd_rs2
, rs1s
,
1620 GET_C_LD_IMM(ctx
->opcode
));
1624 /* C.LW -> lw rd', offset[6:2](rs1') */
1625 gen_load(ctx
, OPC_RISC_LW
, rd_rs2
, rs1s
,
1626 GET_C_LW_IMM(ctx
->opcode
));
1629 #if defined(TARGET_RISCV64)
1630 /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
1631 gen_load(ctx
, OPC_RISC_LD
, rd_rs2
, rs1s
,
1632 GET_C_LD_IMM(ctx
->opcode
));
1634 /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
1635 gen_fp_load(ctx
, OPC_RISC_FLW
, rd_rs2
, rs1s
,
1636 GET_C_LW_IMM(ctx
->opcode
));
1641 gen_exception_illegal(ctx
);
1644 /* C.FSD(RV32/64) -> fsd rs2', offset[7:3](rs1') */
1645 gen_fp_store(ctx
, OPC_RISC_FSD
, rs1s
, rd_rs2
,
1646 GET_C_LD_IMM(ctx
->opcode
));
1650 /* C.SW -> sw rs2', offset[6:2](rs1')*/
1651 gen_store(ctx
, OPC_RISC_SW
, rs1s
, rd_rs2
,
1652 GET_C_LW_IMM(ctx
->opcode
));
1655 #if defined(TARGET_RISCV64)
1656 /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
1657 gen_store(ctx
, OPC_RISC_SD
, rs1s
, rd_rs2
,
1658 GET_C_LD_IMM(ctx
->opcode
));
1660 /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
1661 gen_fp_store(ctx
, OPC_RISC_FSW
, rs1s
, rd_rs2
,
1662 GET_C_LW_IMM(ctx
->opcode
));
1668 static void decode_RV32_64C1(DisasContext
*ctx
)
1670 uint8_t funct3
= extract32(ctx
->opcode
, 13, 3);
1671 uint8_t rd_rs1
= GET_C_RS1(ctx
->opcode
);
1677 /* C.ADDI -> addi rd, rd, nzimm[5:0] */
1678 gen_arith_imm(ctx
, OPC_RISC_ADDI
, rd_rs1
, rd_rs1
,
1679 GET_C_IMM(ctx
->opcode
));
1682 #if defined(TARGET_RISCV64)
1683 /* C.ADDIW (RV64/128) -> addiw rd, rd, imm[5:0]*/
1684 gen_arith_imm(ctx
, OPC_RISC_ADDIW
, rd_rs1
, rd_rs1
,
1685 GET_C_IMM(ctx
->opcode
));
1687 /* C.JAL(RV32) -> jal x1, offset[11:1] */
1688 gen_jal(ctx
, 1, GET_C_J_IMM(ctx
->opcode
));
1692 /* C.LI -> addi rd, x0, imm[5:0]*/
1693 gen_arith_imm(ctx
, OPC_RISC_ADDI
, rd_rs1
, 0, GET_C_IMM(ctx
->opcode
));
1697 /* C.ADDI16SP -> addi x2, x2, nzimm[9:4]*/
1698 gen_arith_imm(ctx
, OPC_RISC_ADDI
, 2, 2,
1699 GET_C_ADDI16SP_IMM(ctx
->opcode
));
1700 } else if (rd_rs1
!= 0) {
1701 /* C.LUI (rs1/rd =/= {0,2}) -> lui rd, nzimm[17:12]*/
1702 tcg_gen_movi_tl(cpu_gpr
[rd_rs1
],
1703 GET_C_IMM(ctx
->opcode
) << 12);
1707 funct2
= extract32(ctx
->opcode
, 10, 2);
1708 rs1s
= GET_C_RS1S(ctx
->opcode
);
1710 case 0: /* C.SRLI(RV32) -> srli rd', rd', shamt[5:0] */
1711 gen_arith_imm(ctx
, OPC_RISC_SHIFT_RIGHT_I
, rs1s
, rs1s
,
1712 GET_C_ZIMM(ctx
->opcode
));
1713 /* C.SRLI64(RV128) */
1716 /* C.SRAI -> srai rd', rd', shamt[5:0]*/
1717 gen_arith_imm(ctx
, OPC_RISC_SHIFT_RIGHT_I
, rs1s
, rs1s
,
1718 GET_C_ZIMM(ctx
->opcode
) | 0x400);
1719 /* C.SRAI64(RV128) */
1722 /* C.ANDI -> andi rd', rd', imm[5:0]*/
1723 gen_arith_imm(ctx
, OPC_RISC_ANDI
, rs1s
, rs1s
,
1724 GET_C_IMM(ctx
->opcode
));
1727 funct2
= extract32(ctx
->opcode
, 5, 2);
1728 rs2s
= GET_C_RS2S(ctx
->opcode
);
1731 /* C.SUB -> sub rd', rd', rs2' */
1732 if (extract32(ctx
->opcode
, 12, 1) == 0) {
1733 gen_arith(ctx
, OPC_RISC_SUB
, rs1s
, rs1s
, rs2s
);
1735 #if defined(TARGET_RISCV64)
1737 gen_arith(ctx
, OPC_RISC_SUBW
, rs1s
, rs1s
, rs2s
);
1742 /* C.XOR -> xor rs1', rs1', rs2' */
1743 if (extract32(ctx
->opcode
, 12, 1) == 0) {
1744 gen_arith(ctx
, OPC_RISC_XOR
, rs1s
, rs1s
, rs2s
);
1746 #if defined(TARGET_RISCV64)
1748 /* C.ADDW (RV64/128) */
1749 gen_arith(ctx
, OPC_RISC_ADDW
, rs1s
, rs1s
, rs2s
);
1754 /* C.OR -> or rs1', rs1', rs2' */
1755 gen_arith(ctx
, OPC_RISC_OR
, rs1s
, rs1s
, rs2s
);
1758 /* C.AND -> and rs1', rs1', rs2' */
1759 gen_arith(ctx
, OPC_RISC_AND
, rs1s
, rs1s
, rs2s
);
1766 /* C.J -> jal x0, offset[11:1]*/
1767 gen_jal(ctx
, 0, GET_C_J_IMM(ctx
->opcode
));
1770 /* C.BEQZ -> beq rs1', x0, offset[8:1]*/
1771 rs1s
= GET_C_RS1S(ctx
->opcode
);
1772 gen_branch(ctx
, OPC_RISC_BEQ
, rs1s
, 0, GET_C_B_IMM(ctx
->opcode
));
1775 /* C.BNEZ -> bne rs1', x0, offset[8:1]*/
1776 rs1s
= GET_C_RS1S(ctx
->opcode
);
1777 gen_branch(ctx
, OPC_RISC_BNE
, rs1s
, 0, GET_C_B_IMM(ctx
->opcode
));
1782 static void decode_RV32_64C2(DisasContext
*ctx
)
1785 uint8_t funct3
= extract32(ctx
->opcode
, 13, 3);
1788 rd
= GET_RD(ctx
->opcode
);
1791 case 0: /* C.SLLI -> slli rd, rd, shamt[5:0]
1793 gen_arith_imm(ctx
, OPC_RISC_SLLI
, rd
, rd
, GET_C_ZIMM(ctx
->opcode
));
1795 case 1: /* C.FLDSP(RV32/64DC) -> fld rd, offset[8:3](x2) */
1796 gen_fp_load(ctx
, OPC_RISC_FLD
, rd
, 2, GET_C_LDSP_IMM(ctx
->opcode
));
1798 case 2: /* C.LWSP -> lw rd, offset[7:2](x2) */
1799 gen_load(ctx
, OPC_RISC_LW
, rd
, 2, GET_C_LWSP_IMM(ctx
->opcode
));
1802 #if defined(TARGET_RISCV64)
1803 /* C.LDSP(RVC64) -> ld rd, offset[8:3](x2) */
1804 gen_load(ctx
, OPC_RISC_LD
, rd
, 2, GET_C_LDSP_IMM(ctx
->opcode
));
1806 /* C.FLWSP(RV32FC) -> flw rd, offset[7:2](x2) */
1807 gen_fp_load(ctx
, OPC_RISC_FLW
, rd
, 2, GET_C_LWSP_IMM(ctx
->opcode
));
1811 rs2
= GET_C_RS2(ctx
->opcode
);
1813 if (extract32(ctx
->opcode
, 12, 1) == 0) {
1815 /* C.JR -> jalr x0, rs1, 0*/
1816 gen_jalr(ctx
, OPC_RISC_JALR
, 0, rd
, 0);
1818 /* C.MV -> add rd, x0, rs2 */
1819 gen_arith(ctx
, OPC_RISC_ADD
, rd
, 0, rs2
);
1823 /* C.EBREAK -> ebreak*/
1824 gen_system(ctx
, OPC_RISC_ECALL
, 0, 0, 0x1);
1827 /* C.JALR -> jalr x1, rs1, 0*/
1828 gen_jalr(ctx
, OPC_RISC_JALR
, 1, rd
, 0);
1830 /* C.ADD -> add rd, rd, rs2 */
1831 gen_arith(ctx
, OPC_RISC_ADD
, rd
, rd
, rs2
);
1837 /* C.FSDSP -> fsd rs2, offset[8:3](x2)*/
1838 gen_fp_store(ctx
, OPC_RISC_FSD
, 2, GET_C_RS2(ctx
->opcode
),
1839 GET_C_SDSP_IMM(ctx
->opcode
));
1842 case 6: /* C.SWSP -> sw rs2, offset[7:2](x2)*/
1843 gen_store(ctx
, OPC_RISC_SW
, 2, GET_C_RS2(ctx
->opcode
),
1844 GET_C_SWSP_IMM(ctx
->opcode
));
1847 #if defined(TARGET_RISCV64)
1848 /* C.SDSP(Rv64/128) -> sd rs2, offset[8:3](x2)*/
1849 gen_store(ctx
, OPC_RISC_SD
, 2, GET_C_RS2(ctx
->opcode
),
1850 GET_C_SDSP_IMM(ctx
->opcode
));
1852 /* C.FSWSP(RV32) -> fsw rs2, offset[7:2](x2) */
1853 gen_fp_store(ctx
, OPC_RISC_FSW
, 2, GET_C_RS2(ctx
->opcode
),
1854 GET_C_SWSP_IMM(ctx
->opcode
));
1860 static void decode_RV32_64C(DisasContext
*ctx
)
1862 uint8_t op
= extract32(ctx
->opcode
, 0, 2);
1866 decode_RV32_64C0(ctx
);
1869 decode_RV32_64C1(ctx
);
1872 decode_RV32_64C2(ctx
);
1877 static void decode_RV32_64G(DisasContext
*ctx
)
1885 /* We do not do misaligned address check here: the address should never be
1886 * misaligned at this point. Instructions that set PC must do the check,
1887 * since epc must be the address of the instruction that caused us to
1888 * perform the misaligned instruction fetch */
1890 op
= MASK_OP_MAJOR(ctx
->opcode
);
1891 rs1
= GET_RS1(ctx
->opcode
);
1892 rs2
= GET_RS2(ctx
->opcode
);
1893 rd
= GET_RD(ctx
->opcode
);
1894 imm
= GET_IMM(ctx
->opcode
);
1901 tcg_gen_movi_tl(cpu_gpr
[rd
], sextract64(ctx
->opcode
, 12, 20) << 12);
1903 case OPC_RISC_AUIPC
:
1907 tcg_gen_movi_tl(cpu_gpr
[rd
], (sextract64(ctx
->opcode
, 12, 20) << 12) +
1911 imm
= GET_JAL_IMM(ctx
->opcode
);
1912 gen_jal(ctx
, rd
, imm
);
1915 gen_jalr(ctx
, MASK_OP_JALR(ctx
->opcode
), rd
, rs1
, imm
);
1917 case OPC_RISC_BRANCH
:
1918 gen_branch(ctx
, MASK_OP_BRANCH(ctx
->opcode
), rs1
, rs2
,
1919 GET_B_IMM(ctx
->opcode
));
1922 gen_load(ctx
, MASK_OP_LOAD(ctx
->opcode
), rd
, rs1
, imm
);
1924 case OPC_RISC_STORE
:
1925 gen_store(ctx
, MASK_OP_STORE(ctx
->opcode
), rs1
, rs2
,
1926 GET_STORE_IMM(ctx
->opcode
));
1928 case OPC_RISC_ARITH_IMM
:
1929 #if defined(TARGET_RISCV64)
1930 case OPC_RISC_ARITH_IMM_W
:
1935 gen_arith_imm(ctx
, MASK_OP_ARITH_IMM(ctx
->opcode
), rd
, rs1
, imm
);
1937 case OPC_RISC_ARITH
:
1938 #if defined(TARGET_RISCV64)
1939 case OPC_RISC_ARITH_W
:
1944 gen_arith(ctx
, MASK_OP_ARITH(ctx
->opcode
), rd
, rs1
, rs2
);
1946 case OPC_RISC_FP_LOAD
:
1947 gen_fp_load(ctx
, MASK_OP_FP_LOAD(ctx
->opcode
), rd
, rs1
, imm
);
1949 case OPC_RISC_FP_STORE
:
1950 gen_fp_store(ctx
, MASK_OP_FP_STORE(ctx
->opcode
), rs1
, rs2
,
1951 GET_STORE_IMM(ctx
->opcode
));
1953 case OPC_RISC_ATOMIC
:
1954 if (!has_ext(ctx
, RVA
)) {
1957 gen_atomic(ctx
, MASK_OP_ATOMIC(ctx
->opcode
), rd
, rs1
, rs2
);
1959 case OPC_RISC_FMADD
:
1960 gen_fp_fmadd(ctx
, MASK_OP_FP_FMADD(ctx
->opcode
), rd
, rs1
, rs2
,
1961 GET_RS3(ctx
->opcode
), GET_RM(ctx
->opcode
));
1963 case OPC_RISC_FMSUB
:
1964 gen_fp_fmsub(ctx
, MASK_OP_FP_FMSUB(ctx
->opcode
), rd
, rs1
, rs2
,
1965 GET_RS3(ctx
->opcode
), GET_RM(ctx
->opcode
));
1967 case OPC_RISC_FNMSUB
:
1968 gen_fp_fnmsub(ctx
, MASK_OP_FP_FNMSUB(ctx
->opcode
), rd
, rs1
, rs2
,
1969 GET_RS3(ctx
->opcode
), GET_RM(ctx
->opcode
));
1971 case OPC_RISC_FNMADD
:
1972 gen_fp_fnmadd(ctx
, MASK_OP_FP_FNMADD(ctx
->opcode
), rd
, rs1
, rs2
,
1973 GET_RS3(ctx
->opcode
), GET_RM(ctx
->opcode
));
1975 case OPC_RISC_FP_ARITH
:
1976 gen_fp_arith(ctx
, MASK_OP_FP_ARITH(ctx
->opcode
), rd
, rs1
, rs2
,
1977 GET_RM(ctx
->opcode
));
1979 case OPC_RISC_FENCE
:
1980 if (ctx
->opcode
& 0x1000) {
1981 /* FENCE_I is a no-op in QEMU,
1982 * however we need to end the translation block */
1983 tcg_gen_movi_tl(cpu_pc
, ctx
->pc_succ_insn
);
1984 tcg_gen_exit_tb(NULL
, 0);
1985 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1987 /* FENCE is a full memory barrier. */
1988 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1991 case OPC_RISC_SYSTEM
:
1992 gen_system(ctx
, MASK_OP_SYSTEM(ctx
->opcode
), rd
, rs1
,
1993 (ctx
->opcode
& 0xFFF00000) >> 20);
1997 gen_exception_illegal(ctx
);
2002 static void decode_opc(DisasContext
*ctx
)
2004 /* check for compressed insn */
2005 if (extract32(ctx
->opcode
, 0, 2) != 3) {
2006 if (!has_ext(ctx
, RVC
)) {
2007 gen_exception_illegal(ctx
);
2009 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 2;
2010 decode_RV32_64C(ctx
);
2013 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 4;
2014 decode_RV32_64G(ctx
);
2018 static void riscv_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
2020 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2021 CPURISCVState
*env
= cs
->env_ptr
;
2023 ctx
->pc_succ_insn
= ctx
->base
.pc_first
;
2024 ctx
->mem_idx
= ctx
->base
.tb
->flags
& TB_FLAGS_MMU_MASK
;
2025 ctx
->mstatus_fs
= ctx
->base
.tb
->flags
& TB_FLAGS_MSTATUS_FS
;
2026 ctx
->priv_ver
= env
->priv_ver
;
2027 ctx
->misa
= env
->misa
;
2028 ctx
->frm
= -1; /* unknown rounding mode */
2031 static void riscv_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
2035 static void riscv_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
2037 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2039 tcg_gen_insn_start(ctx
->base
.pc_next
);
2042 static bool riscv_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
2043 const CPUBreakpoint
*bp
)
2045 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2047 tcg_gen_movi_tl(cpu_pc
, ctx
->base
.pc_next
);
2048 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2049 gen_exception_debug();
2050 /* The address covered by the breakpoint must be included in
2051 [tb->pc, tb->pc + tb->size) in order to for it to be
2052 properly cleared -- thus we increment the PC here so that
2053 the logic setting tb->size below does the right thing. */
2054 ctx
->base
.pc_next
+= 4;
2058 static void riscv_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
2060 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2061 CPURISCVState
*env
= cpu
->env_ptr
;
2063 ctx
->opcode
= cpu_ldl_code(env
, ctx
->base
.pc_next
);
2065 ctx
->base
.pc_next
= ctx
->pc_succ_insn
;
2067 if (ctx
->base
.is_jmp
== DISAS_NEXT
) {
2068 target_ulong page_start
;
2070 page_start
= ctx
->base
.pc_first
& TARGET_PAGE_MASK
;
2071 if (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
) {
2072 ctx
->base
.is_jmp
= DISAS_TOO_MANY
;
2077 static void riscv_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
2079 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2081 switch (ctx
->base
.is_jmp
) {
2082 case DISAS_TOO_MANY
:
2083 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
);
2085 case DISAS_NORETURN
:
2088 g_assert_not_reached();
2092 static void riscv_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
2094 qemu_log("IN: %s\n", lookup_symbol(dcbase
->pc_first
));
2095 log_target_disas(cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
2098 static const TranslatorOps riscv_tr_ops
= {
2099 .init_disas_context
= riscv_tr_init_disas_context
,
2100 .tb_start
= riscv_tr_tb_start
,
2101 .insn_start
= riscv_tr_insn_start
,
2102 .breakpoint_check
= riscv_tr_breakpoint_check
,
2103 .translate_insn
= riscv_tr_translate_insn
,
2104 .tb_stop
= riscv_tr_tb_stop
,
2105 .disas_log
= riscv_tr_disas_log
,
2108 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
)
2112 translator_loop(&riscv_tr_ops
, &ctx
.base
, cs
, tb
);
2115 void riscv_translate_init(void)
2119 /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
2120 /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
2121 /* registers, unless you specifically block reads/writes to reg 0 */
2124 for (i
= 1; i
< 32; i
++) {
2125 cpu_gpr
[i
] = tcg_global_mem_new(cpu_env
,
2126 offsetof(CPURISCVState
, gpr
[i
]), riscv_int_regnames
[i
]);
2129 for (i
= 0; i
< 32; i
++) {
2130 cpu_fpr
[i
] = tcg_global_mem_new_i64(cpu_env
,
2131 offsetof(CPURISCVState
, fpr
[i
]), riscv_fpr_regnames
[i
]);
2134 cpu_pc
= tcg_global_mem_new(cpu_env
, offsetof(CPURISCVState
, pc
), "pc");
2135 load_res
= tcg_global_mem_new(cpu_env
, offsetof(CPURISCVState
, load_res
),
2137 load_val
= tcg_global_mem_new(cpu_env
, offsetof(CPURISCVState
, load_val
),