2 * RISC-V emulation for qemu: main translation routines.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "tcg/tcg-op.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
34 /* global register indices */
35 static TCGv cpu_gpr
[32], cpu_pc
, cpu_vl
;
36 static TCGv_i64 cpu_fpr
[32]; /* assume F and D extensions */
40 #include "exec/gen-icount.h"
43 * If an operation is being performed on less than TARGET_LONG_BITS,
44 * it may require the inputs to be sign- or zero-extended; which will
45 * depend on the exact operation being performed.
53 typedef struct DisasContext
{
54 DisasContextBase base
;
55 /* pc_succ_insn points to the instruction following base.pc_next */
56 target_ulong pc_succ_insn
;
57 target_ulong priv_ver
;
62 /* Remember the rounding mode encoded in the previous fp instruction,
63 which we have already installed into env->fp_status. Or -1 for
64 no previous fp instruction. Note that we exit the TB when writing
65 to any system register, which includes CSR_FRM, so we do not have
66 to reset this known value. */
72 /* vector extension */
82 /* Space for 3 operands plus 1 extra for address computation. */
86 static inline bool has_ext(DisasContext
*ctx
, uint32_t ext
)
88 return ctx
->misa
& ext
;
92 # define is_32bit(ctx) true
93 #elif defined(CONFIG_USER_ONLY)
94 # define is_32bit(ctx) false
96 static inline bool is_32bit(DisasContext
*ctx
)
98 return (ctx
->misa
& RV32
) == RV32
;
103 * RISC-V requires NaN-boxing of narrower width floating point values.
104 * This applies when a 32-bit value is assigned to a 64-bit FP register.
105 * For consistency and simplicity, we nanbox results even when the RVD
106 * extension is not present.
108 static void gen_nanbox_s(TCGv_i64 out
, TCGv_i64 in
)
110 tcg_gen_ori_i64(out
, in
, MAKE_64BIT_MASK(32, 32));
114 * A narrow n-bit operation, where n < FLEN, checks that input operands
115 * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
116 * If so, the least-significant bits of the input are used, otherwise the
117 * input value is treated as an n-bit canonical NaN (v2.2 section 9.2).
119 * Here, the result is always nan-boxed, even the canonical nan.
121 static void gen_check_nanbox_s(TCGv_i64 out
, TCGv_i64 in
)
123 TCGv_i64 t_max
= tcg_constant_i64(0xffffffff00000000ull
);
124 TCGv_i64 t_nan
= tcg_constant_i64(0xffffffff7fc00000ull
);
126 tcg_gen_movcond_i64(TCG_COND_GEU
, out
, in
, t_max
, in
, t_nan
);
129 static void generate_exception(DisasContext
*ctx
, int excp
)
131 tcg_gen_movi_tl(cpu_pc
, ctx
->base
.pc_next
);
132 gen_helper_raise_exception(cpu_env
, tcg_constant_i32(excp
));
133 ctx
->base
.is_jmp
= DISAS_NORETURN
;
136 static void generate_exception_mtval(DisasContext
*ctx
, int excp
)
138 tcg_gen_movi_tl(cpu_pc
, ctx
->base
.pc_next
);
139 tcg_gen_st_tl(cpu_pc
, cpu_env
, offsetof(CPURISCVState
, badaddr
));
140 gen_helper_raise_exception(cpu_env
, tcg_constant_i32(excp
));
141 ctx
->base
.is_jmp
= DISAS_NORETURN
;
144 static void gen_exception_debug(void)
146 gen_helper_raise_exception(cpu_env
, tcg_constant_i32(EXCP_DEBUG
));
149 /* Wrapper around tcg_gen_exit_tb that handles single stepping */
150 static void exit_tb(DisasContext
*ctx
)
152 if (ctx
->base
.singlestep_enabled
) {
153 gen_exception_debug();
155 tcg_gen_exit_tb(NULL
, 0);
159 /* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */
160 static void lookup_and_goto_ptr(DisasContext
*ctx
)
162 if (ctx
->base
.singlestep_enabled
) {
163 gen_exception_debug();
165 tcg_gen_lookup_and_goto_ptr();
169 static void gen_exception_illegal(DisasContext
*ctx
)
171 generate_exception(ctx
, RISCV_EXCP_ILLEGAL_INST
);
174 static void gen_exception_inst_addr_mis(DisasContext
*ctx
)
176 generate_exception_mtval(ctx
, RISCV_EXCP_INST_ADDR_MIS
);
179 static void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
181 if (translator_use_goto_tb(&ctx
->base
, dest
)) {
183 tcg_gen_movi_tl(cpu_pc
, dest
);
184 tcg_gen_exit_tb(ctx
->base
.tb
, n
);
186 tcg_gen_movi_tl(cpu_pc
, dest
);
187 lookup_and_goto_ptr(ctx
);
192 * Wrappers for getting reg values.
194 * The $zero register does not have cpu_gpr[0] allocated -- we supply the
195 * constant zero as a source, and an uninitialized sink as destination.
197 * Further, we may provide an extension for word operations.
199 static TCGv
temp_new(DisasContext
*ctx
)
201 assert(ctx
->ntemp
< ARRAY_SIZE(ctx
->temp
));
202 return ctx
->temp
[ctx
->ntemp
++] = tcg_temp_new();
205 static TCGv
get_gpr(DisasContext
*ctx
, int reg_num
, DisasExtend ext
)
213 switch (ctx
->w
? ext
: EXT_NONE
) {
215 return cpu_gpr
[reg_num
];
218 tcg_gen_ext32s_tl(t
, cpu_gpr
[reg_num
]);
222 tcg_gen_ext32u_tl(t
, cpu_gpr
[reg_num
]);
225 g_assert_not_reached();
228 static void gen_get_gpr(DisasContext
*ctx
, TCGv t
, int reg_num
)
230 tcg_gen_mov_tl(t
, get_gpr(ctx
, reg_num
, EXT_NONE
));
233 static TCGv
dest_gpr(DisasContext
*ctx
, int reg_num
)
235 if (reg_num
== 0 || ctx
->w
) {
236 return temp_new(ctx
);
238 return cpu_gpr
[reg_num
];
241 static void gen_set_gpr(DisasContext
*ctx
, int reg_num
, TCGv t
)
245 tcg_gen_ext32s_tl(cpu_gpr
[reg_num
], t
);
247 tcg_gen_mov_tl(cpu_gpr
[reg_num
], t
);
252 static void gen_jal(DisasContext
*ctx
, int rd
, target_ulong imm
)
254 target_ulong next_pc
;
256 /* check misaligned: */
257 next_pc
= ctx
->base
.pc_next
+ imm
;
258 if (!has_ext(ctx
, RVC
)) {
259 if ((next_pc
& 0x3) != 0) {
260 gen_exception_inst_addr_mis(ctx
);
265 tcg_gen_movi_tl(cpu_gpr
[rd
], ctx
->pc_succ_insn
);
268 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ imm
); /* must use this for safety */
269 ctx
->base
.is_jmp
= DISAS_NORETURN
;
272 #ifndef CONFIG_USER_ONLY
273 /* The states of mstatus_fs are:
274 * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
275 * We will have already diagnosed disabled state,
276 * and need to turn initial/clean into dirty.
278 static void mark_fs_dirty(DisasContext
*ctx
)
283 if (ctx
->mstatus_fs
== MSTATUS_FS
) {
286 /* Remember the state change for the rest of the TB. */
287 ctx
->mstatus_fs
= MSTATUS_FS
;
289 tmp
= tcg_temp_new();
290 sd
= is_32bit(ctx
) ? MSTATUS32_SD
: MSTATUS64_SD
;
292 tcg_gen_ld_tl(tmp
, cpu_env
, offsetof(CPURISCVState
, mstatus
));
293 tcg_gen_ori_tl(tmp
, tmp
, MSTATUS_FS
| sd
);
294 tcg_gen_st_tl(tmp
, cpu_env
, offsetof(CPURISCVState
, mstatus
));
296 if (ctx
->virt_enabled
) {
297 tcg_gen_ld_tl(tmp
, cpu_env
, offsetof(CPURISCVState
, mstatus_hs
));
298 tcg_gen_ori_tl(tmp
, tmp
, MSTATUS_FS
| sd
);
299 tcg_gen_st_tl(tmp
, cpu_env
, offsetof(CPURISCVState
, mstatus_hs
));
304 static inline void mark_fs_dirty(DisasContext
*ctx
) { }
307 static void gen_set_rm(DisasContext
*ctx
, int rm
)
309 if (ctx
->frm
== rm
) {
313 gen_helper_set_rounding_mode(cpu_env
, tcg_constant_i32(rm
));
316 static int ex_plus_1(DisasContext
*ctx
, int nf
)
321 #define EX_SH(amount) \
322 static int ex_shift_##amount(DisasContext *ctx, int imm) \
324 return imm << amount; \
332 #define REQUIRE_EXT(ctx, ext) do { \
333 if (!has_ext(ctx, ext)) { \
338 #define REQUIRE_64BIT(ctx) do { \
339 if (is_32bit(ctx)) { \
344 static int ex_rvc_register(DisasContext
*ctx
, int reg
)
349 static int ex_rvc_shifti(DisasContext
*ctx
, int imm
)
351 /* For RV128 a shamt of 0 means a shift by 64. */
352 return imm
? imm
: 64;
355 /* Include the auto-generated decoder for 32 bit insn */
356 #include "decode-insn32.c.inc"
358 static bool gen_arith_imm_fn(DisasContext
*ctx
, arg_i
*a
, DisasExtend ext
,
359 void (*func
)(TCGv
, TCGv
, target_long
))
361 TCGv dest
= dest_gpr(ctx
, a
->rd
);
362 TCGv src1
= get_gpr(ctx
, a
->rs1
, ext
);
364 func(dest
, src1
, a
->imm
);
366 gen_set_gpr(ctx
, a
->rd
, dest
);
370 static bool gen_arith_imm_tl(DisasContext
*ctx
, arg_i
*a
, DisasExtend ext
,
371 void (*func
)(TCGv
, TCGv
, TCGv
))
373 TCGv dest
= dest_gpr(ctx
, a
->rd
);
374 TCGv src1
= get_gpr(ctx
, a
->rs1
, ext
);
375 TCGv src2
= tcg_constant_tl(a
->imm
);
377 func(dest
, src1
, src2
);
379 gen_set_gpr(ctx
, a
->rd
, dest
);
383 static bool gen_arith(DisasContext
*ctx
, arg_r
*a
, DisasExtend ext
,
384 void (*func
)(TCGv
, TCGv
, TCGv
))
386 TCGv dest
= dest_gpr(ctx
, a
->rd
);
387 TCGv src1
= get_gpr(ctx
, a
->rs1
, ext
);
388 TCGv src2
= get_gpr(ctx
, a
->rs2
, ext
);
390 func(dest
, src1
, src2
);
392 gen_set_gpr(ctx
, a
->rd
, dest
);
396 static bool gen_shift(DisasContext
*ctx
, arg_r
*a
,
397 void(*func
)(TCGv
, TCGv
, TCGv
))
399 TCGv source1
= tcg_temp_new();
400 TCGv source2
= tcg_temp_new();
402 gen_get_gpr(ctx
, source1
, a
->rs1
);
403 gen_get_gpr(ctx
, source2
, a
->rs2
);
405 tcg_gen_andi_tl(source2
, source2
, TARGET_LONG_BITS
- 1);
406 (*func
)(source1
, source1
, source2
);
408 gen_set_gpr(ctx
, a
->rd
, source1
);
409 tcg_temp_free(source1
);
410 tcg_temp_free(source2
);
414 static uint32_t opcode_at(DisasContextBase
*dcbase
, target_ulong pc
)
416 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
417 CPUState
*cpu
= ctx
->cs
;
418 CPURISCVState
*env
= cpu
->env_ptr
;
420 return cpu_ldl_code(env
, pc
);
423 static bool gen_shifti(DisasContext
*ctx
, arg_shift
*a
,
424 void(*func
)(TCGv
, TCGv
, TCGv
))
426 if (a
->shamt
>= TARGET_LONG_BITS
) {
430 TCGv source1
= tcg_temp_new();
431 TCGv source2
= tcg_temp_new();
433 gen_get_gpr(ctx
, source1
, a
->rs1
);
435 tcg_gen_movi_tl(source2
, a
->shamt
);
436 (*func
)(source1
, source1
, source2
);
438 gen_set_gpr(ctx
, a
->rd
, source1
);
439 tcg_temp_free(source1
);
440 tcg_temp_free(source2
);
444 static bool gen_shiftw(DisasContext
*ctx
, arg_r
*a
,
445 void(*func
)(TCGv
, TCGv
, TCGv
))
447 TCGv source1
= tcg_temp_new();
448 TCGv source2
= tcg_temp_new();
450 gen_get_gpr(ctx
, source1
, a
->rs1
);
451 gen_get_gpr(ctx
, source2
, a
->rs2
);
453 tcg_gen_andi_tl(source2
, source2
, 31);
454 (*func
)(source1
, source1
, source2
);
455 tcg_gen_ext32s_tl(source1
, source1
);
457 gen_set_gpr(ctx
, a
->rd
, source1
);
458 tcg_temp_free(source1
);
459 tcg_temp_free(source2
);
463 static bool gen_shiftiw(DisasContext
*ctx
, arg_shift
*a
,
464 void(*func
)(TCGv
, TCGv
, TCGv
))
466 TCGv source1
= tcg_temp_new();
467 TCGv source2
= tcg_temp_new();
469 gen_get_gpr(ctx
, source1
, a
->rs1
);
470 tcg_gen_movi_tl(source2
, a
->shamt
);
472 (*func
)(source1
, source1
, source2
);
473 tcg_gen_ext32s_tl(source1
, source1
);
475 gen_set_gpr(ctx
, a
->rd
, source1
);
476 tcg_temp_free(source1
);
477 tcg_temp_free(source2
);
481 static bool gen_unary(DisasContext
*ctx
, arg_r2
*a
,
482 void(*func
)(TCGv
, TCGv
))
484 TCGv source
= tcg_temp_new();
486 gen_get_gpr(ctx
, source
, a
->rs1
);
488 (*func
)(source
, source
);
490 gen_set_gpr(ctx
, a
->rd
, source
);
491 tcg_temp_free(source
);
495 /* Include insn module translation function */
496 #include "insn_trans/trans_rvi.c.inc"
497 #include "insn_trans/trans_rvm.c.inc"
498 #include "insn_trans/trans_rva.c.inc"
499 #include "insn_trans/trans_rvf.c.inc"
500 #include "insn_trans/trans_rvd.c.inc"
501 #include "insn_trans/trans_rvh.c.inc"
502 #include "insn_trans/trans_rvv.c.inc"
503 #include "insn_trans/trans_rvb.c.inc"
504 #include "insn_trans/trans_privileged.c.inc"
506 /* Include the auto-generated decoder for 16 bit insn */
507 #include "decode-insn16.c.inc"
509 static void decode_opc(CPURISCVState
*env
, DisasContext
*ctx
, uint16_t opcode
)
511 /* check for compressed insn */
512 if (extract16(opcode
, 0, 2) != 3) {
513 if (!has_ext(ctx
, RVC
)) {
514 gen_exception_illegal(ctx
);
516 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 2;
517 if (!decode_insn16(ctx
, opcode
)) {
518 gen_exception_illegal(ctx
);
522 uint32_t opcode32
= opcode
;
523 opcode32
= deposit32(opcode32
, 16, 16,
524 translator_lduw(env
, ctx
->base
.pc_next
+ 2));
525 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 4;
526 if (!decode_insn32(ctx
, opcode32
)) {
527 gen_exception_illegal(ctx
);
532 static void riscv_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
534 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
535 CPURISCVState
*env
= cs
->env_ptr
;
536 RISCVCPU
*cpu
= RISCV_CPU(cs
);
537 uint32_t tb_flags
= ctx
->base
.tb
->flags
;
539 ctx
->pc_succ_insn
= ctx
->base
.pc_first
;
540 ctx
->mem_idx
= tb_flags
& TB_FLAGS_MMU_MASK
;
541 ctx
->mstatus_fs
= tb_flags
& TB_FLAGS_MSTATUS_FS
;
542 ctx
->priv_ver
= env
->priv_ver
;
543 #if !defined(CONFIG_USER_ONLY)
544 if (riscv_has_ext(env
, RVH
)) {
545 ctx
->virt_enabled
= riscv_cpu_virt_enabled(env
);
547 ctx
->virt_enabled
= false;
550 ctx
->virt_enabled
= false;
552 ctx
->misa
= env
->misa
;
553 ctx
->frm
= -1; /* unknown rounding mode */
554 ctx
->ext_ifencei
= cpu
->cfg
.ext_ifencei
;
555 ctx
->vlen
= cpu
->cfg
.vlen
;
556 ctx
->hlsx
= FIELD_EX32(tb_flags
, TB_FLAGS
, HLSX
);
557 ctx
->vill
= FIELD_EX32(tb_flags
, TB_FLAGS
, VILL
);
558 ctx
->sew
= FIELD_EX32(tb_flags
, TB_FLAGS
, SEW
);
559 ctx
->lmul
= FIELD_EX32(tb_flags
, TB_FLAGS
, LMUL
);
560 ctx
->mlen
= 1 << (ctx
->sew
+ 3 - ctx
->lmul
);
561 ctx
->vl_eq_vlmax
= FIELD_EX32(tb_flags
, TB_FLAGS
, VL_EQ_VLMAX
);
565 memset(ctx
->temp
, 0, sizeof(ctx
->temp
));
567 ctx
->zero
= tcg_constant_tl(0);
570 static void riscv_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
574 static void riscv_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
576 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
578 tcg_gen_insn_start(ctx
->base
.pc_next
);
581 static void riscv_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
583 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
584 CPURISCVState
*env
= cpu
->env_ptr
;
585 uint16_t opcode16
= translator_lduw(env
, ctx
->base
.pc_next
);
587 decode_opc(env
, ctx
, opcode16
);
588 ctx
->base
.pc_next
= ctx
->pc_succ_insn
;
591 for (int i
= ctx
->ntemp
- 1; i
>= 0; --i
) {
592 tcg_temp_free(ctx
->temp
[i
]);
597 if (ctx
->base
.is_jmp
== DISAS_NEXT
) {
598 target_ulong page_start
;
600 page_start
= ctx
->base
.pc_first
& TARGET_PAGE_MASK
;
601 if (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
) {
602 ctx
->base
.is_jmp
= DISAS_TOO_MANY
;
607 static void riscv_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
609 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
611 switch (ctx
->base
.is_jmp
) {
613 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
);
618 g_assert_not_reached();
622 static void riscv_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
624 #ifndef CONFIG_USER_ONLY
625 RISCVCPU
*rvcpu
= RISCV_CPU(cpu
);
626 CPURISCVState
*env
= &rvcpu
->env
;
629 qemu_log("IN: %s\n", lookup_symbol(dcbase
->pc_first
));
630 #ifndef CONFIG_USER_ONLY
631 qemu_log("Priv: "TARGET_FMT_ld
"; Virt: "TARGET_FMT_ld
"\n", env
->priv
, env
->virt
);
633 log_target_disas(cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
636 static const TranslatorOps riscv_tr_ops
= {
637 .init_disas_context
= riscv_tr_init_disas_context
,
638 .tb_start
= riscv_tr_tb_start
,
639 .insn_start
= riscv_tr_insn_start
,
640 .translate_insn
= riscv_tr_translate_insn
,
641 .tb_stop
= riscv_tr_tb_stop
,
642 .disas_log
= riscv_tr_disas_log
,
645 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
649 translator_loop(&riscv_tr_ops
, &ctx
.base
, cs
, tb
, max_insns
);
652 void riscv_translate_init(void)
656 /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
657 /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
658 /* registers, unless you specifically block reads/writes to reg 0 */
661 for (i
= 1; i
< 32; i
++) {
662 cpu_gpr
[i
] = tcg_global_mem_new(cpu_env
,
663 offsetof(CPURISCVState
, gpr
[i
]), riscv_int_regnames
[i
]);
666 for (i
= 0; i
< 32; i
++) {
667 cpu_fpr
[i
] = tcg_global_mem_new_i64(cpu_env
,
668 offsetof(CPURISCVState
, fpr
[i
]), riscv_fpr_regnames
[i
]);
671 cpu_pc
= tcg_global_mem_new(cpu_env
, offsetof(CPURISCVState
, pc
), "pc");
672 cpu_vl
= tcg_global_mem_new(cpu_env
, offsetof(CPURISCVState
, vl
), "vl");
673 load_res
= tcg_global_mem_new(cpu_env
, offsetof(CPURISCVState
, load_res
),
675 load_val
= tcg_global_mem_new(cpu_env
, offsetof(CPURISCVState
, load_val
),