2 * RISC-V emulation for qemu: main translation routines.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
33 /* global register indices */
34 static TCGv cpu_gpr
[32], cpu_pc
;
35 static TCGv_i64 cpu_fpr
[32]; /* assume F and D extensions */
39 #include "exec/gen-icount.h"
41 typedef struct DisasContext
{
42 struct TranslationBlock
*tb
;
48 int singlestep_enabled
;
50 /* Remember the rounding mode encoded in the previous fp instruction,
51 which we have already installed into env->fp_status. Or -1 for
52 no previous fp instruction. Note that we exit the TB when writing
53 to any system register, which includes CSR_FRM, so we do not have
54 to reset this known value. */
59 BS_NONE
= 0, /* When seen outside of translation while loop, indicates
60 need to exit tb due to end of page. */
61 BS_STOP
= 1, /* Need to exit tb for syscall, sret, etc. */
62 BS_BRANCH
= 2, /* Need to exit tb for branch, jal, etc. */
65 /* convert riscv funct3 to qemu memop for load/store */
66 static const int tcg_memop_lookup
[8] = {
80 #define CASE_OP_32_64(X) case X: case glue(X, W)
82 #define CASE_OP_32_64(X) case X
85 static void generate_exception(DisasContext
*ctx
, int excp
)
87 tcg_gen_movi_tl(cpu_pc
, ctx
->pc
);
88 TCGv_i32 helper_tmp
= tcg_const_i32(excp
);
89 gen_helper_raise_exception(cpu_env
, helper_tmp
);
90 tcg_temp_free_i32(helper_tmp
);
91 ctx
->bstate
= BS_BRANCH
;
94 static void generate_exception_mbadaddr(DisasContext
*ctx
, int excp
)
96 tcg_gen_movi_tl(cpu_pc
, ctx
->pc
);
97 tcg_gen_st_tl(cpu_pc
, cpu_env
, offsetof(CPURISCVState
, badaddr
));
98 TCGv_i32 helper_tmp
= tcg_const_i32(excp
);
99 gen_helper_raise_exception(cpu_env
, helper_tmp
);
100 tcg_temp_free_i32(helper_tmp
);
101 ctx
->bstate
= BS_BRANCH
;
104 static void gen_exception_debug(void)
106 TCGv_i32 helper_tmp
= tcg_const_i32(EXCP_DEBUG
);
107 gen_helper_raise_exception(cpu_env
, helper_tmp
);
108 tcg_temp_free_i32(helper_tmp
);
111 static void gen_exception_illegal(DisasContext
*ctx
)
113 generate_exception(ctx
, RISCV_EXCP_ILLEGAL_INST
);
116 static void gen_exception_inst_addr_mis(DisasContext
*ctx
)
118 generate_exception_mbadaddr(ctx
, RISCV_EXCP_INST_ADDR_MIS
);
121 static inline bool use_goto_tb(DisasContext
*ctx
, target_ulong dest
)
123 if (unlikely(ctx
->singlestep_enabled
)) {
127 #ifndef CONFIG_USER_ONLY
128 return (ctx
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
134 static void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
136 if (use_goto_tb(ctx
, dest
)) {
137 /* chaining is only allowed when the jump is to the same page */
139 tcg_gen_movi_tl(cpu_pc
, dest
);
140 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ n
);
142 tcg_gen_movi_tl(cpu_pc
, dest
);
143 if (ctx
->singlestep_enabled
) {
144 gen_exception_debug();
151 /* Wrapper for getting reg values - need to check of reg is zero since
152 * cpu_gpr[0] is not actually allocated
154 static inline void gen_get_gpr(TCGv t
, int reg_num
)
157 tcg_gen_movi_tl(t
, 0);
159 tcg_gen_mov_tl(t
, cpu_gpr
[reg_num
]);
163 /* Wrapper for setting reg values - need to check of reg is zero since
164 * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
165 * since we usually avoid calling the OP_TYPE_gen function if we see a write to
168 static inline void gen_set_gpr(int reg_num_dst
, TCGv t
)
170 if (reg_num_dst
!= 0) {
171 tcg_gen_mov_tl(cpu_gpr
[reg_num_dst
], t
);
175 static void gen_mulhsu(TCGv ret
, TCGv arg1
, TCGv arg2
)
177 TCGv rl
= tcg_temp_new();
178 TCGv rh
= tcg_temp_new();
180 tcg_gen_mulu2_tl(rl
, rh
, arg1
, arg2
);
181 /* fix up for one negative */
182 tcg_gen_sari_tl(rl
, arg1
, TARGET_LONG_BITS
- 1);
183 tcg_gen_and_tl(rl
, rl
, arg2
);
184 tcg_gen_sub_tl(ret
, rh
, rl
);
190 static void gen_fsgnj(DisasContext
*ctx
, uint32_t rd
, uint32_t rs1
,
191 uint32_t rs2
, int rm
, uint64_t min
)
195 if (rs1
== rs2
) { /* FMOV */
196 tcg_gen_mov_i64(cpu_fpr
[rd
], cpu_fpr
[rs1
]);
198 tcg_gen_deposit_i64(cpu_fpr
[rd
], cpu_fpr
[rs2
], cpu_fpr
[rs1
],
199 0, min
== INT32_MIN
? 31 : 63);
203 if (rs1
== rs2
) { /* FNEG */
204 tcg_gen_xori_i64(cpu_fpr
[rd
], cpu_fpr
[rs1
], min
);
206 TCGv_i64 t0
= tcg_temp_new_i64();
207 tcg_gen_not_i64(t0
, cpu_fpr
[rs2
]);
208 tcg_gen_deposit_i64(cpu_fpr
[rd
], t0
, cpu_fpr
[rs1
],
209 0, min
== INT32_MIN
? 31 : 63);
210 tcg_temp_free_i64(t0
);
214 if (rs1
== rs2
) { /* FABS */
215 tcg_gen_andi_i64(cpu_fpr
[rd
], cpu_fpr
[rs1
], ~min
);
217 TCGv_i64 t0
= tcg_temp_new_i64();
218 tcg_gen_andi_i64(t0
, cpu_fpr
[rs2
], min
);
219 tcg_gen_xor_i64(cpu_fpr
[rd
], cpu_fpr
[rs1
], t0
);
220 tcg_temp_free_i64(t0
);
224 gen_exception_illegal(ctx
);
228 static void gen_arith(DisasContext
*ctx
, uint32_t opc
, int rd
, int rs1
,
231 TCGv source1
, source2
, cond1
, cond2
, zeroreg
, resultopt1
;
232 source1
= tcg_temp_new();
233 source2
= tcg_temp_new();
234 gen_get_gpr(source1
, rs1
);
235 gen_get_gpr(source2
, rs2
);
238 CASE_OP_32_64(OPC_RISC_ADD
):
239 tcg_gen_add_tl(source1
, source1
, source2
);
241 CASE_OP_32_64(OPC_RISC_SUB
):
242 tcg_gen_sub_tl(source1
, source1
, source2
);
244 #if defined(TARGET_RISCV64)
246 tcg_gen_andi_tl(source2
, source2
, 0x1F);
247 tcg_gen_shl_tl(source1
, source1
, source2
);
251 tcg_gen_andi_tl(source2
, source2
, TARGET_LONG_BITS
- 1);
252 tcg_gen_shl_tl(source1
, source1
, source2
);
255 tcg_gen_setcond_tl(TCG_COND_LT
, source1
, source1
, source2
);
258 tcg_gen_setcond_tl(TCG_COND_LTU
, source1
, source1
, source2
);
261 tcg_gen_xor_tl(source1
, source1
, source2
);
263 #if defined(TARGET_RISCV64)
266 tcg_gen_ext32u_tl(source1
, source1
);
267 tcg_gen_andi_tl(source2
, source2
, 0x1F);
268 tcg_gen_shr_tl(source1
, source1
, source2
);
272 tcg_gen_andi_tl(source2
, source2
, TARGET_LONG_BITS
- 1);
273 tcg_gen_shr_tl(source1
, source1
, source2
);
275 #if defined(TARGET_RISCV64)
277 /* first, trick to get it to act like working on 32 bits (get rid of
278 upper 32, sign extend to fill space) */
279 tcg_gen_ext32s_tl(source1
, source1
);
280 tcg_gen_andi_tl(source2
, source2
, 0x1F);
281 tcg_gen_sar_tl(source1
, source1
, source2
);
283 /* fall through to SRA */
286 tcg_gen_andi_tl(source2
, source2
, TARGET_LONG_BITS
- 1);
287 tcg_gen_sar_tl(source1
, source1
, source2
);
290 tcg_gen_or_tl(source1
, source1
, source2
);
293 tcg_gen_and_tl(source1
, source1
, source2
);
295 CASE_OP_32_64(OPC_RISC_MUL
):
296 tcg_gen_mul_tl(source1
, source1
, source2
);
299 tcg_gen_muls2_tl(source2
, source1
, source1
, source2
);
301 case OPC_RISC_MULHSU
:
302 gen_mulhsu(source1
, source1
, source2
);
305 tcg_gen_mulu2_tl(source2
, source1
, source1
, source2
);
307 #if defined(TARGET_RISCV64)
309 tcg_gen_ext32s_tl(source1
, source1
);
310 tcg_gen_ext32s_tl(source2
, source2
);
311 /* fall through to DIV */
314 /* Handle by altering args to tcg_gen_div to produce req'd results:
315 * For overflow: want source1 in source1 and 1 in source2
316 * For div by zero: want -1 in source1 and 1 in source2 -> -1 result */
317 cond1
= tcg_temp_new();
318 cond2
= tcg_temp_new();
319 zeroreg
= tcg_const_tl(0);
320 resultopt1
= tcg_temp_new();
322 tcg_gen_movi_tl(resultopt1
, (target_ulong
)-1);
323 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond2
, source2
, (target_ulong
)(~0L));
324 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond1
, source1
,
325 ((target_ulong
)1) << (TARGET_LONG_BITS
- 1));
326 tcg_gen_and_tl(cond1
, cond1
, cond2
); /* cond1 = overflow */
327 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond2
, source2
, 0); /* cond2 = div 0 */
328 /* if div by zero, set source1 to -1, otherwise don't change */
329 tcg_gen_movcond_tl(TCG_COND_EQ
, source1
, cond2
, zeroreg
, source1
,
331 /* if overflow or div by zero, set source2 to 1, else don't change */
332 tcg_gen_or_tl(cond1
, cond1
, cond2
);
333 tcg_gen_movi_tl(resultopt1
, (target_ulong
)1);
334 tcg_gen_movcond_tl(TCG_COND_EQ
, source2
, cond1
, zeroreg
, source2
,
336 tcg_gen_div_tl(source1
, source1
, source2
);
338 tcg_temp_free(cond1
);
339 tcg_temp_free(cond2
);
340 tcg_temp_free(zeroreg
);
341 tcg_temp_free(resultopt1
);
343 #if defined(TARGET_RISCV64)
345 tcg_gen_ext32u_tl(source1
, source1
);
346 tcg_gen_ext32u_tl(source2
, source2
);
347 /* fall through to DIVU */
350 cond1
= tcg_temp_new();
351 zeroreg
= tcg_const_tl(0);
352 resultopt1
= tcg_temp_new();
354 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond1
, source2
, 0);
355 tcg_gen_movi_tl(resultopt1
, (target_ulong
)-1);
356 tcg_gen_movcond_tl(TCG_COND_EQ
, source1
, cond1
, zeroreg
, source1
,
358 tcg_gen_movi_tl(resultopt1
, (target_ulong
)1);
359 tcg_gen_movcond_tl(TCG_COND_EQ
, source2
, cond1
, zeroreg
, source2
,
361 tcg_gen_divu_tl(source1
, source1
, source2
);
363 tcg_temp_free(cond1
);
364 tcg_temp_free(zeroreg
);
365 tcg_temp_free(resultopt1
);
367 #if defined(TARGET_RISCV64)
369 tcg_gen_ext32s_tl(source1
, source1
);
370 tcg_gen_ext32s_tl(source2
, source2
);
371 /* fall through to REM */
374 cond1
= tcg_temp_new();
375 cond2
= tcg_temp_new();
376 zeroreg
= tcg_const_tl(0);
377 resultopt1
= tcg_temp_new();
379 tcg_gen_movi_tl(resultopt1
, 1L);
380 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond2
, source2
, (target_ulong
)-1);
381 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond1
, source1
,
382 (target_ulong
)1 << (TARGET_LONG_BITS
- 1));
383 tcg_gen_and_tl(cond2
, cond1
, cond2
); /* cond1 = overflow */
384 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond1
, source2
, 0); /* cond2 = div 0 */
385 /* if overflow or div by zero, set source2 to 1, else don't change */
386 tcg_gen_or_tl(cond2
, cond1
, cond2
);
387 tcg_gen_movcond_tl(TCG_COND_EQ
, source2
, cond2
, zeroreg
, source2
,
389 tcg_gen_rem_tl(resultopt1
, source1
, source2
);
390 /* if div by zero, just return the original dividend */
391 tcg_gen_movcond_tl(TCG_COND_EQ
, source1
, cond1
, zeroreg
, resultopt1
,
394 tcg_temp_free(cond1
);
395 tcg_temp_free(cond2
);
396 tcg_temp_free(zeroreg
);
397 tcg_temp_free(resultopt1
);
399 #if defined(TARGET_RISCV64)
401 tcg_gen_ext32u_tl(source1
, source1
);
402 tcg_gen_ext32u_tl(source2
, source2
);
403 /* fall through to REMU */
406 cond1
= tcg_temp_new();
407 zeroreg
= tcg_const_tl(0);
408 resultopt1
= tcg_temp_new();
410 tcg_gen_movi_tl(resultopt1
, (target_ulong
)1);
411 tcg_gen_setcondi_tl(TCG_COND_EQ
, cond1
, source2
, 0);
412 tcg_gen_movcond_tl(TCG_COND_EQ
, source2
, cond1
, zeroreg
, source2
,
414 tcg_gen_remu_tl(resultopt1
, source1
, source2
);
415 /* if div by zero, just return the original dividend */
416 tcg_gen_movcond_tl(TCG_COND_EQ
, source1
, cond1
, zeroreg
, resultopt1
,
419 tcg_temp_free(cond1
);
420 tcg_temp_free(zeroreg
);
421 tcg_temp_free(resultopt1
);
424 gen_exception_illegal(ctx
);
428 if (opc
& 0x8) { /* sign extend for W instructions */
429 tcg_gen_ext32s_tl(source1
, source1
);
432 gen_set_gpr(rd
, source1
);
433 tcg_temp_free(source1
);
434 tcg_temp_free(source2
);
437 static void gen_arith_imm(DisasContext
*ctx
, uint32_t opc
, int rd
,
438 int rs1
, target_long imm
)
440 TCGv source1
= tcg_temp_new();
441 int shift_len
= TARGET_LONG_BITS
;
444 gen_get_gpr(source1
, rs1
);
448 #if defined(TARGET_RISCV64)
451 tcg_gen_addi_tl(source1
, source1
, imm
);
454 tcg_gen_setcondi_tl(TCG_COND_LT
, source1
, source1
, imm
);
457 tcg_gen_setcondi_tl(TCG_COND_LTU
, source1
, source1
, imm
);
460 tcg_gen_xori_tl(source1
, source1
, imm
);
463 tcg_gen_ori_tl(source1
, source1
, imm
);
466 tcg_gen_andi_tl(source1
, source1
, imm
);
468 #if defined(TARGET_RISCV64)
474 if (imm
>= shift_len
) {
477 tcg_gen_shli_tl(source1
, source1
, imm
);
479 #if defined(TARGET_RISCV64)
480 case OPC_RISC_SHIFT_RIGHT_IW
:
484 case OPC_RISC_SHIFT_RIGHT_I
:
485 /* differentiate on IMM */
486 shift_a
= imm
& 0x400;
488 if (imm
>= shift_len
) {
494 tcg_gen_sextract_tl(source1
, source1
, imm
, shift_len
- imm
);
497 tcg_gen_extract_tl(source1
, source1
, imm
, shift_len
- imm
);
499 /* No further sign-extension needed for W instructions. */
505 gen_exception_illegal(ctx
);
509 if (opc
& 0x8) { /* sign-extend for W instructions */
510 tcg_gen_ext32s_tl(source1
, source1
);
513 gen_set_gpr(rd
, source1
);
514 tcg_temp_free(source1
);
517 static void gen_jal(CPURISCVState
*env
, DisasContext
*ctx
, int rd
,
520 target_ulong next_pc
;
522 /* check misaligned: */
523 next_pc
= ctx
->pc
+ imm
;
524 if (!riscv_has_ext(env
, RVC
)) {
525 if ((next_pc
& 0x3) != 0) {
526 gen_exception_inst_addr_mis(ctx
);
531 tcg_gen_movi_tl(cpu_gpr
[rd
], ctx
->next_pc
);
534 gen_goto_tb(ctx
, 0, ctx
->pc
+ imm
); /* must use this for safety */
535 ctx
->bstate
= BS_BRANCH
;
538 static void gen_jalr(CPURISCVState
*env
, DisasContext
*ctx
, uint32_t opc
,
539 int rd
, int rs1
, target_long imm
)
541 /* no chaining with JALR */
542 TCGLabel
*misaligned
= NULL
;
543 TCGv t0
= tcg_temp_new();
547 gen_get_gpr(cpu_pc
, rs1
);
548 tcg_gen_addi_tl(cpu_pc
, cpu_pc
, imm
);
549 tcg_gen_andi_tl(cpu_pc
, cpu_pc
, (target_ulong
)-2);
551 if (!riscv_has_ext(env
, RVC
)) {
552 misaligned
= gen_new_label();
553 tcg_gen_andi_tl(t0
, cpu_pc
, 0x2);
554 tcg_gen_brcondi_tl(TCG_COND_NE
, t0
, 0x0, misaligned
);
558 tcg_gen_movi_tl(cpu_gpr
[rd
], ctx
->next_pc
);
563 gen_set_label(misaligned
);
564 gen_exception_inst_addr_mis(ctx
);
566 ctx
->bstate
= BS_BRANCH
;
570 gen_exception_illegal(ctx
);
576 static void gen_branch(CPURISCVState
*env
, DisasContext
*ctx
, uint32_t opc
,
577 int rs1
, int rs2
, target_long bimm
)
579 TCGLabel
*l
= gen_new_label();
580 TCGv source1
, source2
;
581 source1
= tcg_temp_new();
582 source2
= tcg_temp_new();
583 gen_get_gpr(source1
, rs1
);
584 gen_get_gpr(source2
, rs2
);
588 tcg_gen_brcond_tl(TCG_COND_EQ
, source1
, source2
, l
);
591 tcg_gen_brcond_tl(TCG_COND_NE
, source1
, source2
, l
);
594 tcg_gen_brcond_tl(TCG_COND_LT
, source1
, source2
, l
);
597 tcg_gen_brcond_tl(TCG_COND_GE
, source1
, source2
, l
);
600 tcg_gen_brcond_tl(TCG_COND_LTU
, source1
, source2
, l
);
603 tcg_gen_brcond_tl(TCG_COND_GEU
, source1
, source2
, l
);
606 gen_exception_illegal(ctx
);
609 tcg_temp_free(source1
);
610 tcg_temp_free(source2
);
612 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
613 gen_set_label(l
); /* branch taken */
614 if (!riscv_has_ext(env
, RVC
) && ((ctx
->pc
+ bimm
) & 0x3)) {
616 gen_exception_inst_addr_mis(ctx
);
618 gen_goto_tb(ctx
, 0, ctx
->pc
+ bimm
);
620 ctx
->bstate
= BS_BRANCH
;
623 static void gen_load(DisasContext
*ctx
, uint32_t opc
, int rd
, int rs1
,
626 TCGv t0
= tcg_temp_new();
627 TCGv t1
= tcg_temp_new();
628 gen_get_gpr(t0
, rs1
);
629 tcg_gen_addi_tl(t0
, t0
, imm
);
630 int memop
= tcg_memop_lookup
[(opc
>> 12) & 0x7];
633 gen_exception_illegal(ctx
);
637 tcg_gen_qemu_ld_tl(t1
, t0
, ctx
->mem_idx
, memop
);
643 static void gen_store(DisasContext
*ctx
, uint32_t opc
, int rs1
, int rs2
,
646 TCGv t0
= tcg_temp_new();
647 TCGv dat
= tcg_temp_new();
648 gen_get_gpr(t0
, rs1
);
649 tcg_gen_addi_tl(t0
, t0
, imm
);
650 gen_get_gpr(dat
, rs2
);
651 int memop
= tcg_memop_lookup
[(opc
>> 12) & 0x7];
654 gen_exception_illegal(ctx
);
658 tcg_gen_qemu_st_tl(dat
, t0
, ctx
->mem_idx
, memop
);
663 static void gen_fp_load(DisasContext
*ctx
, uint32_t opc
, int rd
,
664 int rs1
, target_long imm
)
668 if (!(ctx
->flags
& TB_FLAGS_FP_ENABLE
)) {
669 gen_exception_illegal(ctx
);
674 gen_get_gpr(t0
, rs1
);
675 tcg_gen_addi_tl(t0
, t0
, imm
);
679 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
], t0
, ctx
->mem_idx
, MO_TEUL
);
680 /* RISC-V requires NaN-boxing of narrower width floating point values */
681 tcg_gen_ori_i64(cpu_fpr
[rd
], cpu_fpr
[rd
], 0xffffffff00000000ULL
);
684 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
], t0
, ctx
->mem_idx
, MO_TEQ
);
687 gen_exception_illegal(ctx
);
693 static void gen_fp_store(DisasContext
*ctx
, uint32_t opc
, int rs1
,
694 int rs2
, target_long imm
)
698 if (!(ctx
->flags
& TB_FLAGS_FP_ENABLE
)) {
699 gen_exception_illegal(ctx
);
704 gen_get_gpr(t0
, rs1
);
705 tcg_gen_addi_tl(t0
, t0
, imm
);
709 tcg_gen_qemu_st_i64(cpu_fpr
[rs2
], t0
, ctx
->mem_idx
, MO_TEUL
);
712 tcg_gen_qemu_st_i64(cpu_fpr
[rs2
], t0
, ctx
->mem_idx
, MO_TEQ
);
715 gen_exception_illegal(ctx
);
722 static void gen_atomic(DisasContext
*ctx
, uint32_t opc
,
723 int rd
, int rs1
, int rs2
)
725 TCGv src1
, src2
, dat
;
731 /* Extract the size of the atomic operation. */
732 switch (extract32(opc
, 12, 3)) {
734 mop
= MO_ALIGN
| MO_TESL
;
736 #if defined(TARGET_RISCV64)
738 mop
= MO_ALIGN
| MO_TEQ
;
742 gen_exception_illegal(ctx
);
745 rl
= extract32(opc
, 25, 1);
746 aq
= extract32(opc
, 26, 1);
748 src1
= tcg_temp_new();
749 src2
= tcg_temp_new();
751 switch (MASK_OP_ATOMIC_NO_AQ_RL_SZ(opc
)) {
753 /* Put addr in load_res, data in load_val. */
754 gen_get_gpr(src1
, rs1
);
756 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
758 tcg_gen_qemu_ld_tl(load_val
, src1
, ctx
->mem_idx
, mop
);
760 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
762 tcg_gen_mov_tl(load_res
, src1
);
763 gen_set_gpr(rd
, load_val
);
767 l1
= gen_new_label();
768 l2
= gen_new_label();
769 dat
= tcg_temp_new();
771 gen_get_gpr(src1
, rs1
);
772 tcg_gen_brcond_tl(TCG_COND_NE
, load_res
, src1
, l1
);
774 gen_get_gpr(src2
, rs2
);
775 /* Note that the TCG atomic primitives are SC,
776 so we can ignore AQ/RL along this path. */
777 tcg_gen_atomic_cmpxchg_tl(src1
, load_res
, load_val
, src2
,
779 tcg_gen_setcond_tl(TCG_COND_NE
, dat
, src1
, load_val
);
780 gen_set_gpr(rd
, dat
);
784 /* Address comparion failure. However, we still need to
785 provide the memory barrier implied by AQ/RL. */
786 tcg_gen_mb(TCG_MO_ALL
+ aq
* TCG_BAR_LDAQ
+ rl
* TCG_BAR_STRL
);
787 tcg_gen_movi_tl(dat
, 1);
788 gen_set_gpr(rd
, dat
);
794 case OPC_RISC_AMOSWAP
:
795 /* Note that the TCG atomic primitives are SC,
796 so we can ignore AQ/RL along this path. */
797 gen_get_gpr(src1
, rs1
);
798 gen_get_gpr(src2
, rs2
);
799 tcg_gen_atomic_xchg_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
800 gen_set_gpr(rd
, src2
);
802 case OPC_RISC_AMOADD
:
803 gen_get_gpr(src1
, rs1
);
804 gen_get_gpr(src2
, rs2
);
805 tcg_gen_atomic_fetch_add_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
806 gen_set_gpr(rd
, src2
);
808 case OPC_RISC_AMOXOR
:
809 gen_get_gpr(src1
, rs1
);
810 gen_get_gpr(src2
, rs2
);
811 tcg_gen_atomic_fetch_xor_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
812 gen_set_gpr(rd
, src2
);
814 case OPC_RISC_AMOAND
:
815 gen_get_gpr(src1
, rs1
);
816 gen_get_gpr(src2
, rs2
);
817 tcg_gen_atomic_fetch_and_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
818 gen_set_gpr(rd
, src2
);
821 gen_get_gpr(src1
, rs1
);
822 gen_get_gpr(src2
, rs2
);
823 tcg_gen_atomic_fetch_or_tl(src2
, src1
, src2
, ctx
->mem_idx
, mop
);
824 gen_set_gpr(rd
, src2
);
827 case OPC_RISC_AMOMIN
:
830 case OPC_RISC_AMOMAX
:
833 case OPC_RISC_AMOMINU
:
836 case OPC_RISC_AMOMAXU
:
840 /* Handle the RL barrier. The AQ barrier is handled along the
841 parallel path by the SC atomic cmpxchg. On the serial path,
842 of course, barriers do not matter. */
844 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
846 if (tb_cflags(ctx
->tb
) & CF_PARALLEL
) {
847 l1
= gen_new_label();
853 gen_get_gpr(src1
, rs1
);
854 gen_get_gpr(src2
, rs2
);
855 if ((mop
& MO_SSIZE
) == MO_SL
) {
856 /* Sign-extend the register comparison input. */
857 tcg_gen_ext32s_tl(src2
, src2
);
859 dat
= tcg_temp_local_new();
860 tcg_gen_qemu_ld_tl(dat
, src1
, ctx
->mem_idx
, mop
);
861 tcg_gen_movcond_tl(cond
, src2
, dat
, src2
, dat
, src2
);
863 if (tb_cflags(ctx
->tb
) & CF_PARALLEL
) {
864 /* Parallel context. Make this operation atomic by verifying
865 that the memory didn't change while we computed the result. */
866 tcg_gen_atomic_cmpxchg_tl(src2
, src1
, dat
, src2
, ctx
->mem_idx
, mop
);
868 /* If the cmpxchg failed, retry. */
869 /* ??? There is an assumption here that this will eventually
870 succeed, such that we don't live-lock. This is not unlike
871 a similar loop that the compiler would generate for e.g.
872 __atomic_fetch_and_xor, so don't worry about it. */
873 tcg_gen_brcond_tl(TCG_COND_NE
, dat
, src2
, l1
);
875 /* Serial context. Directly store the result. */
876 tcg_gen_qemu_st_tl(src2
, src1
, ctx
->mem_idx
, mop
);
878 gen_set_gpr(rd
, dat
);
883 gen_exception_illegal(ctx
);
891 static void gen_set_rm(DisasContext
*ctx
, int rm
)
895 if (ctx
->frm
== rm
) {
899 t0
= tcg_const_i32(rm
);
900 gen_helper_set_rounding_mode(cpu_env
, t0
);
901 tcg_temp_free_i32(t0
);
904 static void gen_fp_fmadd(DisasContext
*ctx
, uint32_t opc
, int rd
,
905 int rs1
, int rs2
, int rs3
, int rm
)
908 case OPC_RISC_FMADD_S
:
910 gen_helper_fmadd_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
911 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
913 case OPC_RISC_FMADD_D
:
915 gen_helper_fmadd_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
916 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
919 gen_exception_illegal(ctx
);
924 static void gen_fp_fmsub(DisasContext
*ctx
, uint32_t opc
, int rd
,
925 int rs1
, int rs2
, int rs3
, int rm
)
928 case OPC_RISC_FMSUB_S
:
930 gen_helper_fmsub_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
931 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
933 case OPC_RISC_FMSUB_D
:
935 gen_helper_fmsub_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
936 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
939 gen_exception_illegal(ctx
);
944 static void gen_fp_fnmsub(DisasContext
*ctx
, uint32_t opc
, int rd
,
945 int rs1
, int rs2
, int rs3
, int rm
)
948 case OPC_RISC_FNMSUB_S
:
950 gen_helper_fnmsub_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
951 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
953 case OPC_RISC_FNMSUB_D
:
955 gen_helper_fnmsub_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
956 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
959 gen_exception_illegal(ctx
);
964 static void gen_fp_fnmadd(DisasContext
*ctx
, uint32_t opc
, int rd
,
965 int rs1
, int rs2
, int rs3
, int rm
)
968 case OPC_RISC_FNMADD_S
:
970 gen_helper_fnmadd_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
971 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
973 case OPC_RISC_FNMADD_D
:
975 gen_helper_fnmadd_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
],
976 cpu_fpr
[rs2
], cpu_fpr
[rs3
]);
979 gen_exception_illegal(ctx
);
984 static void gen_fp_arith(DisasContext
*ctx
, uint32_t opc
, int rd
,
985 int rs1
, int rs2
, int rm
)
989 if (!(ctx
->flags
& TB_FLAGS_FP_ENABLE
)) {
994 case OPC_RISC_FADD_S
:
996 gen_helper_fadd_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
998 case OPC_RISC_FSUB_S
:
1000 gen_helper_fsub_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1002 case OPC_RISC_FMUL_S
:
1003 gen_set_rm(ctx
, rm
);
1004 gen_helper_fmul_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1006 case OPC_RISC_FDIV_S
:
1007 gen_set_rm(ctx
, rm
);
1008 gen_helper_fdiv_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1010 case OPC_RISC_FSQRT_S
:
1011 gen_set_rm(ctx
, rm
);
1012 gen_helper_fsqrt_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
]);
1014 case OPC_RISC_FSGNJ_S
:
1015 gen_fsgnj(ctx
, rd
, rs1
, rs2
, rm
, INT32_MIN
);
1018 case OPC_RISC_FMIN_S
:
1019 /* also handles: OPC_RISC_FMAX_S */
1022 gen_helper_fmin_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1025 gen_helper_fmax_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1032 case OPC_RISC_FEQ_S
:
1033 /* also handles: OPC_RISC_FLT_S, OPC_RISC_FLE_S */
1034 t0
= tcg_temp_new();
1037 gen_helper_fle_s(t0
, cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1040 gen_helper_flt_s(t0
, cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1043 gen_helper_feq_s(t0
, cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1048 gen_set_gpr(rd
, t0
);
1052 case OPC_RISC_FCVT_W_S
:
1053 /* also OPC_RISC_FCVT_WU_S, OPC_RISC_FCVT_L_S, OPC_RISC_FCVT_LU_S */
1054 t0
= tcg_temp_new();
1056 case 0: /* FCVT_W_S */
1057 gen_set_rm(ctx
, rm
);
1058 gen_helper_fcvt_w_s(t0
, cpu_env
, cpu_fpr
[rs1
]);
1060 case 1: /* FCVT_WU_S */
1061 gen_set_rm(ctx
, rm
);
1062 gen_helper_fcvt_wu_s(t0
, cpu_env
, cpu_fpr
[rs1
]);
1064 #if defined(TARGET_RISCV64)
1065 case 2: /* FCVT_L_S */
1066 gen_set_rm(ctx
, rm
);
1067 gen_helper_fcvt_l_s(t0
, cpu_env
, cpu_fpr
[rs1
]);
1069 case 3: /* FCVT_LU_S */
1070 gen_set_rm(ctx
, rm
);
1071 gen_helper_fcvt_lu_s(t0
, cpu_env
, cpu_fpr
[rs1
]);
1077 gen_set_gpr(rd
, t0
);
1081 case OPC_RISC_FCVT_S_W
:
1082 /* also OPC_RISC_FCVT_S_WU, OPC_RISC_FCVT_S_L, OPC_RISC_FCVT_S_LU */
1083 t0
= tcg_temp_new();
1084 gen_get_gpr(t0
, rs1
);
1086 case 0: /* FCVT_S_W */
1087 gen_set_rm(ctx
, rm
);
1088 gen_helper_fcvt_s_w(cpu_fpr
[rd
], cpu_env
, t0
);
1090 case 1: /* FCVT_S_WU */
1091 gen_set_rm(ctx
, rm
);
1092 gen_helper_fcvt_s_wu(cpu_fpr
[rd
], cpu_env
, t0
);
1094 #if defined(TARGET_RISCV64)
1095 case 2: /* FCVT_S_L */
1096 gen_set_rm(ctx
, rm
);
1097 gen_helper_fcvt_s_l(cpu_fpr
[rd
], cpu_env
, t0
);
1099 case 3: /* FCVT_S_LU */
1100 gen_set_rm(ctx
, rm
);
1101 gen_helper_fcvt_s_lu(cpu_fpr
[rd
], cpu_env
, t0
);
1110 case OPC_RISC_FMV_X_S
:
1111 /* also OPC_RISC_FCLASS_S */
1112 t0
= tcg_temp_new();
1115 #if defined(TARGET_RISCV64)
1116 tcg_gen_ext32s_tl(t0
, cpu_fpr
[rs1
]);
1118 tcg_gen_extrl_i64_i32(t0
, cpu_fpr
[rs1
]);
1122 gen_helper_fclass_s(t0
, cpu_fpr
[rs1
]);
1127 gen_set_gpr(rd
, t0
);
1131 case OPC_RISC_FMV_S_X
:
1132 t0
= tcg_temp_new();
1133 gen_get_gpr(t0
, rs1
);
1134 #if defined(TARGET_RISCV64)
1135 tcg_gen_mov_i64(cpu_fpr
[rd
], t0
);
1137 tcg_gen_extu_i32_i64(cpu_fpr
[rd
], t0
);
1143 case OPC_RISC_FADD_D
:
1144 gen_set_rm(ctx
, rm
);
1145 gen_helper_fadd_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1147 case OPC_RISC_FSUB_D
:
1148 gen_set_rm(ctx
, rm
);
1149 gen_helper_fsub_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1151 case OPC_RISC_FMUL_D
:
1152 gen_set_rm(ctx
, rm
);
1153 gen_helper_fmul_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1155 case OPC_RISC_FDIV_D
:
1156 gen_set_rm(ctx
, rm
);
1157 gen_helper_fdiv_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1159 case OPC_RISC_FSQRT_D
:
1160 gen_set_rm(ctx
, rm
);
1161 gen_helper_fsqrt_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
]);
1163 case OPC_RISC_FSGNJ_D
:
1164 gen_fsgnj(ctx
, rd
, rs1
, rs2
, rm
, INT64_MIN
);
1167 case OPC_RISC_FMIN_D
:
1168 /* also OPC_RISC_FMAX_D */
1171 gen_helper_fmin_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1174 gen_helper_fmax_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1181 case OPC_RISC_FCVT_S_D
:
1184 gen_set_rm(ctx
, rm
);
1185 gen_helper_fcvt_s_d(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
]);
1192 case OPC_RISC_FCVT_D_S
:
1195 gen_set_rm(ctx
, rm
);
1196 gen_helper_fcvt_d_s(cpu_fpr
[rd
], cpu_env
, cpu_fpr
[rs1
]);
1203 case OPC_RISC_FEQ_D
:
1204 /* also OPC_RISC_FLT_D, OPC_RISC_FLE_D */
1205 t0
= tcg_temp_new();
1208 gen_helper_fle_d(t0
, cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1211 gen_helper_flt_d(t0
, cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1214 gen_helper_feq_d(t0
, cpu_env
, cpu_fpr
[rs1
], cpu_fpr
[rs2
]);
1219 gen_set_gpr(rd
, t0
);
1223 case OPC_RISC_FCVT_W_D
:
1224 /* also OPC_RISC_FCVT_WU_D, OPC_RISC_FCVT_L_D, OPC_RISC_FCVT_LU_D */
1225 t0
= tcg_temp_new();
1228 gen_set_rm(ctx
, rm
);
1229 gen_helper_fcvt_w_d(t0
, cpu_env
, cpu_fpr
[rs1
]);
1232 gen_set_rm(ctx
, rm
);
1233 gen_helper_fcvt_wu_d(t0
, cpu_env
, cpu_fpr
[rs1
]);
1235 #if defined(TARGET_RISCV64)
1237 gen_set_rm(ctx
, rm
);
1238 gen_helper_fcvt_l_d(t0
, cpu_env
, cpu_fpr
[rs1
]);
1241 gen_set_rm(ctx
, rm
);
1242 gen_helper_fcvt_lu_d(t0
, cpu_env
, cpu_fpr
[rs1
]);
1248 gen_set_gpr(rd
, t0
);
1252 case OPC_RISC_FCVT_D_W
:
1253 /* also OPC_RISC_FCVT_D_WU, OPC_RISC_FCVT_D_L, OPC_RISC_FCVT_D_LU */
1254 t0
= tcg_temp_new();
1255 gen_get_gpr(t0
, rs1
);
1258 gen_set_rm(ctx
, rm
);
1259 gen_helper_fcvt_d_w(cpu_fpr
[rd
], cpu_env
, t0
);
1262 gen_set_rm(ctx
, rm
);
1263 gen_helper_fcvt_d_wu(cpu_fpr
[rd
], cpu_env
, t0
);
1265 #if defined(TARGET_RISCV64)
1267 gen_set_rm(ctx
, rm
);
1268 gen_helper_fcvt_d_l(cpu_fpr
[rd
], cpu_env
, t0
);
1271 gen_set_rm(ctx
, rm
);
1272 gen_helper_fcvt_d_lu(cpu_fpr
[rd
], cpu_env
, t0
);
1281 #if defined(TARGET_RISCV64)
1282 case OPC_RISC_FMV_X_D
:
1283 /* also OPC_RISC_FCLASS_D */
1286 gen_set_gpr(rd
, cpu_fpr
[rs1
]);
1289 t0
= tcg_temp_new();
1290 gen_helper_fclass_d(t0
, cpu_fpr
[rs1
]);
1291 gen_set_gpr(rd
, t0
);
1299 case OPC_RISC_FMV_D_X
:
1300 t0
= tcg_temp_new();
1301 gen_get_gpr(t0
, rs1
);
1302 tcg_gen_mov_tl(cpu_fpr
[rd
], t0
);
1312 gen_exception_illegal(ctx
);
1317 static void gen_system(CPURISCVState
*env
, DisasContext
*ctx
, uint32_t opc
,
1318 int rd
, int rs1
, int csr
)
1320 TCGv source1
, csr_store
, dest
, rs1_pass
, imm_rs1
;
1321 source1
= tcg_temp_new();
1322 csr_store
= tcg_temp_new();
1323 dest
= tcg_temp_new();
1324 rs1_pass
= tcg_temp_new();
1325 imm_rs1
= tcg_temp_new();
1326 gen_get_gpr(source1
, rs1
);
1327 tcg_gen_movi_tl(cpu_pc
, ctx
->pc
);
1328 tcg_gen_movi_tl(rs1_pass
, rs1
);
1329 tcg_gen_movi_tl(csr_store
, csr
); /* copy into temp reg to feed to helper */
1331 #ifndef CONFIG_USER_ONLY
1332 /* Extract funct7 value and check whether it matches SFENCE.VMA */
1333 if ((opc
== OPC_RISC_ECALL
) && ((csr
>> 5) == 9)) {
1335 /* TODO: handle ASID specific fences */
1336 gen_helper_tlb_flush(cpu_env
);
1342 case OPC_RISC_ECALL
:
1344 case 0x0: /* ECALL */
1345 /* always generates U-level ECALL, fixed in do_interrupt handler */
1346 generate_exception(ctx
, RISCV_EXCP_U_ECALL
);
1347 tcg_gen_exit_tb(0); /* no chaining */
1348 ctx
->bstate
= BS_BRANCH
;
1350 case 0x1: /* EBREAK */
1351 generate_exception(ctx
, RISCV_EXCP_BREAKPOINT
);
1352 tcg_gen_exit_tb(0); /* no chaining */
1353 ctx
->bstate
= BS_BRANCH
;
1355 #ifndef CONFIG_USER_ONLY
1356 case 0x002: /* URET */
1357 gen_exception_illegal(ctx
);
1359 case 0x102: /* SRET */
1360 if (riscv_has_ext(env
, RVS
)) {
1361 gen_helper_sret(cpu_pc
, cpu_env
, cpu_pc
);
1362 tcg_gen_exit_tb(0); /* no chaining */
1363 ctx
->bstate
= BS_BRANCH
;
1365 gen_exception_illegal(ctx
);
1368 case 0x202: /* HRET */
1369 gen_exception_illegal(ctx
);
1371 case 0x302: /* MRET */
1372 gen_helper_mret(cpu_pc
, cpu_env
, cpu_pc
);
1373 tcg_gen_exit_tb(0); /* no chaining */
1374 ctx
->bstate
= BS_BRANCH
;
1376 case 0x7b2: /* DRET */
1377 gen_exception_illegal(ctx
);
1379 case 0x105: /* WFI */
1380 tcg_gen_movi_tl(cpu_pc
, ctx
->next_pc
);
1381 gen_helper_wfi(cpu_env
);
1383 case 0x104: /* SFENCE.VM */
1384 gen_helper_tlb_flush(cpu_env
);
1388 gen_exception_illegal(ctx
);
1393 tcg_gen_movi_tl(imm_rs1
, rs1
);
1395 case OPC_RISC_CSRRW
:
1396 gen_helper_csrrw(dest
, cpu_env
, source1
, csr_store
);
1398 case OPC_RISC_CSRRS
:
1399 gen_helper_csrrs(dest
, cpu_env
, source1
, csr_store
, rs1_pass
);
1401 case OPC_RISC_CSRRC
:
1402 gen_helper_csrrc(dest
, cpu_env
, source1
, csr_store
, rs1_pass
);
1404 case OPC_RISC_CSRRWI
:
1405 gen_helper_csrrw(dest
, cpu_env
, imm_rs1
, csr_store
);
1407 case OPC_RISC_CSRRSI
:
1408 gen_helper_csrrs(dest
, cpu_env
, imm_rs1
, csr_store
, rs1_pass
);
1410 case OPC_RISC_CSRRCI
:
1411 gen_helper_csrrc(dest
, cpu_env
, imm_rs1
, csr_store
, rs1_pass
);
1414 gen_exception_illegal(ctx
);
1417 gen_set_gpr(rd
, dest
);
1418 /* end tb since we may be changing priv modes, to get mmu_index right */
1419 tcg_gen_movi_tl(cpu_pc
, ctx
->next_pc
);
1420 tcg_gen_exit_tb(0); /* no chaining */
1421 ctx
->bstate
= BS_BRANCH
;
1424 tcg_temp_free(source1
);
1425 tcg_temp_free(csr_store
);
1426 tcg_temp_free(dest
);
1427 tcg_temp_free(rs1_pass
);
1428 tcg_temp_free(imm_rs1
);
1431 static void decode_RV32_64C0(DisasContext
*ctx
)
1433 uint8_t funct3
= extract32(ctx
->opcode
, 13, 3);
1434 uint8_t rd_rs2
= GET_C_RS2S(ctx
->opcode
);
1435 uint8_t rs1s
= GET_C_RS1S(ctx
->opcode
);
1440 if (ctx
->opcode
== 0) {
1441 gen_exception_illegal(ctx
);
1443 /* C.ADDI4SPN -> addi rd', x2, zimm[9:2]*/
1444 gen_arith_imm(ctx
, OPC_RISC_ADDI
, rd_rs2
, 2,
1445 GET_C_ADDI4SPN_IMM(ctx
->opcode
));
1449 /* C.FLD -> fld rd', offset[7:3](rs1')*/
1450 gen_fp_load(ctx
, OPC_RISC_FLD
, rd_rs2
, rs1s
,
1451 GET_C_LD_IMM(ctx
->opcode
));
1455 /* C.LW -> lw rd', offset[6:2](rs1') */
1456 gen_load(ctx
, OPC_RISC_LW
, rd_rs2
, rs1s
,
1457 GET_C_LW_IMM(ctx
->opcode
));
1460 #if defined(TARGET_RISCV64)
1461 /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
1462 gen_load(ctx
, OPC_RISC_LD
, rd_rs2
, rs1s
,
1463 GET_C_LD_IMM(ctx
->opcode
));
1465 /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
1466 gen_fp_load(ctx
, OPC_RISC_FLW
, rd_rs2
, rs1s
,
1467 GET_C_LW_IMM(ctx
->opcode
));
1472 gen_exception_illegal(ctx
);
1475 /* C.FSD(RV32/64) -> fsd rs2', offset[7:3](rs1') */
1476 gen_fp_store(ctx
, OPC_RISC_FSD
, rs1s
, rd_rs2
,
1477 GET_C_LD_IMM(ctx
->opcode
));
1481 /* C.SW -> sw rs2', offset[6:2](rs1')*/
1482 gen_store(ctx
, OPC_RISC_SW
, rs1s
, rd_rs2
,
1483 GET_C_LW_IMM(ctx
->opcode
));
1486 #if defined(TARGET_RISCV64)
1487 /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
1488 gen_store(ctx
, OPC_RISC_SD
, rs1s
, rd_rs2
,
1489 GET_C_LD_IMM(ctx
->opcode
));
1491 /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
1492 gen_fp_store(ctx
, OPC_RISC_FSW
, rs1s
, rd_rs2
,
1493 GET_C_LW_IMM(ctx
->opcode
));
1499 static void decode_RV32_64C1(CPURISCVState
*env
, DisasContext
*ctx
)
1501 uint8_t funct3
= extract32(ctx
->opcode
, 13, 3);
1502 uint8_t rd_rs1
= GET_C_RS1(ctx
->opcode
);
1508 /* C.ADDI -> addi rd, rd, nzimm[5:0] */
1509 gen_arith_imm(ctx
, OPC_RISC_ADDI
, rd_rs1
, rd_rs1
,
1510 GET_C_IMM(ctx
->opcode
));
1513 #if defined(TARGET_RISCV64)
1514 /* C.ADDIW (RV64/128) -> addiw rd, rd, imm[5:0]*/
1515 gen_arith_imm(ctx
, OPC_RISC_ADDIW
, rd_rs1
, rd_rs1
,
1516 GET_C_IMM(ctx
->opcode
));
1518 /* C.JAL(RV32) -> jal x1, offset[11:1] */
1519 gen_jal(env
, ctx
, 1, GET_C_J_IMM(ctx
->opcode
));
1523 /* C.LI -> addi rd, x0, imm[5:0]*/
1524 gen_arith_imm(ctx
, OPC_RISC_ADDI
, rd_rs1
, 0, GET_C_IMM(ctx
->opcode
));
1528 /* C.ADDI16SP -> addi x2, x2, nzimm[9:4]*/
1529 gen_arith_imm(ctx
, OPC_RISC_ADDI
, 2, 2,
1530 GET_C_ADDI16SP_IMM(ctx
->opcode
));
1531 } else if (rd_rs1
!= 0) {
1532 /* C.LUI (rs1/rd =/= {0,2}) -> lui rd, nzimm[17:12]*/
1533 tcg_gen_movi_tl(cpu_gpr
[rd_rs1
],
1534 GET_C_IMM(ctx
->opcode
) << 12);
1538 funct2
= extract32(ctx
->opcode
, 10, 2);
1539 rs1s
= GET_C_RS1S(ctx
->opcode
);
1541 case 0: /* C.SRLI(RV32) -> srli rd', rd', shamt[5:0] */
1542 gen_arith_imm(ctx
, OPC_RISC_SHIFT_RIGHT_I
, rs1s
, rs1s
,
1543 GET_C_ZIMM(ctx
->opcode
));
1544 /* C.SRLI64(RV128) */
1547 /* C.SRAI -> srai rd', rd', shamt[5:0]*/
1548 gen_arith_imm(ctx
, OPC_RISC_SHIFT_RIGHT_I
, rs1s
, rs1s
,
1549 GET_C_ZIMM(ctx
->opcode
) | 0x400);
1550 /* C.SRAI64(RV128) */
1553 /* C.ANDI -> andi rd', rd', imm[5:0]*/
1554 gen_arith_imm(ctx
, OPC_RISC_ANDI
, rs1s
, rs1s
,
1555 GET_C_IMM(ctx
->opcode
));
1558 funct2
= extract32(ctx
->opcode
, 5, 2);
1559 rs2s
= GET_C_RS2S(ctx
->opcode
);
1562 /* C.SUB -> sub rd', rd', rs2' */
1563 if (extract32(ctx
->opcode
, 12, 1) == 0) {
1564 gen_arith(ctx
, OPC_RISC_SUB
, rs1s
, rs1s
, rs2s
);
1566 #if defined(TARGET_RISCV64)
1568 gen_arith(ctx
, OPC_RISC_SUBW
, rs1s
, rs1s
, rs2s
);
1573 /* C.XOR -> xor rs1', rs1', rs2' */
1574 if (extract32(ctx
->opcode
, 12, 1) == 0) {
1575 gen_arith(ctx
, OPC_RISC_XOR
, rs1s
, rs1s
, rs2s
);
1577 #if defined(TARGET_RISCV64)
1579 /* C.ADDW (RV64/128) */
1580 gen_arith(ctx
, OPC_RISC_ADDW
, rs1s
, rs1s
, rs2s
);
1585 /* C.OR -> or rs1', rs1', rs2' */
1586 gen_arith(ctx
, OPC_RISC_OR
, rs1s
, rs1s
, rs2s
);
1589 /* C.AND -> and rs1', rs1', rs2' */
1590 gen_arith(ctx
, OPC_RISC_AND
, rs1s
, rs1s
, rs2s
);
1597 /* C.J -> jal x0, offset[11:1]*/
1598 gen_jal(env
, ctx
, 0, GET_C_J_IMM(ctx
->opcode
));
1601 /* C.BEQZ -> beq rs1', x0, offset[8:1]*/
1602 rs1s
= GET_C_RS1S(ctx
->opcode
);
1603 gen_branch(env
, ctx
, OPC_RISC_BEQ
, rs1s
, 0, GET_C_B_IMM(ctx
->opcode
));
1606 /* C.BNEZ -> bne rs1', x0, offset[8:1]*/
1607 rs1s
= GET_C_RS1S(ctx
->opcode
);
1608 gen_branch(env
, ctx
, OPC_RISC_BNE
, rs1s
, 0, GET_C_B_IMM(ctx
->opcode
));
1613 static void decode_RV32_64C2(CPURISCVState
*env
, DisasContext
*ctx
)
1616 uint8_t funct3
= extract32(ctx
->opcode
, 13, 3);
1619 rd
= GET_RD(ctx
->opcode
);
1622 case 0: /* C.SLLI -> slli rd, rd, shamt[5:0]
1624 gen_arith_imm(ctx
, OPC_RISC_SLLI
, rd
, rd
, GET_C_ZIMM(ctx
->opcode
));
1626 case 1: /* C.FLDSP(RV32/64DC) -> fld rd, offset[8:3](x2) */
1627 gen_fp_load(ctx
, OPC_RISC_FLD
, rd
, 2, GET_C_LDSP_IMM(ctx
->opcode
));
1629 case 2: /* C.LWSP -> lw rd, offset[7:2](x2) */
1630 gen_load(ctx
, OPC_RISC_LW
, rd
, 2, GET_C_LWSP_IMM(ctx
->opcode
));
1633 #if defined(TARGET_RISCV64)
1634 /* C.LDSP(RVC64) -> ld rd, offset[8:3](x2) */
1635 gen_load(ctx
, OPC_RISC_LD
, rd
, 2, GET_C_LDSP_IMM(ctx
->opcode
));
1637 /* C.FLWSP(RV32FC) -> flw rd, offset[7:2](x2) */
1638 gen_fp_load(ctx
, OPC_RISC_FLW
, rd
, 2, GET_C_LWSP_IMM(ctx
->opcode
));
1642 rs2
= GET_C_RS2(ctx
->opcode
);
1644 if (extract32(ctx
->opcode
, 12, 1) == 0) {
1646 /* C.JR -> jalr x0, rs1, 0*/
1647 gen_jalr(env
, ctx
, OPC_RISC_JALR
, 0, rd
, 0);
1649 /* C.MV -> add rd, x0, rs2 */
1650 gen_arith(ctx
, OPC_RISC_ADD
, rd
, 0, rs2
);
1654 /* C.EBREAK -> ebreak*/
1655 gen_system(env
, ctx
, OPC_RISC_ECALL
, 0, 0, 0x1);
1658 /* C.JALR -> jalr x1, rs1, 0*/
1659 gen_jalr(env
, ctx
, OPC_RISC_JALR
, 1, rd
, 0);
1661 /* C.ADD -> add rd, rd, rs2 */
1662 gen_arith(ctx
, OPC_RISC_ADD
, rd
, rd
, rs2
);
1668 /* C.FSDSP -> fsd rs2, offset[8:3](x2)*/
1669 gen_fp_store(ctx
, OPC_RISC_FSD
, 2, GET_C_RS2(ctx
->opcode
),
1670 GET_C_SDSP_IMM(ctx
->opcode
));
1673 case 6: /* C.SWSP -> sw rs2, offset[7:2](x2)*/
1674 gen_store(ctx
, OPC_RISC_SW
, 2, GET_C_RS2(ctx
->opcode
),
1675 GET_C_SWSP_IMM(ctx
->opcode
));
1678 #if defined(TARGET_RISCV64)
1679 /* C.SDSP(Rv64/128) -> sd rs2, offset[8:3](x2)*/
1680 gen_store(ctx
, OPC_RISC_SD
, 2, GET_C_RS2(ctx
->opcode
),
1681 GET_C_SDSP_IMM(ctx
->opcode
));
1683 /* C.FSWSP(RV32) -> fsw rs2, offset[7:2](x2) */
1684 gen_fp_store(ctx
, OPC_RISC_FSW
, 2, GET_C_RS2(ctx
->opcode
),
1685 GET_C_SWSP_IMM(ctx
->opcode
));
1691 static void decode_RV32_64C(CPURISCVState
*env
, DisasContext
*ctx
)
1693 uint8_t op
= extract32(ctx
->opcode
, 0, 2);
1697 decode_RV32_64C0(ctx
);
1700 decode_RV32_64C1(env
, ctx
);
1703 decode_RV32_64C2(env
, ctx
);
1708 static void decode_RV32_64G(CPURISCVState
*env
, DisasContext
*ctx
)
1716 /* We do not do misaligned address check here: the address should never be
1717 * misaligned at this point. Instructions that set PC must do the check,
1718 * since epc must be the address of the instruction that caused us to
1719 * perform the misaligned instruction fetch */
1721 op
= MASK_OP_MAJOR(ctx
->opcode
);
1722 rs1
= GET_RS1(ctx
->opcode
);
1723 rs2
= GET_RS2(ctx
->opcode
);
1724 rd
= GET_RD(ctx
->opcode
);
1725 imm
= GET_IMM(ctx
->opcode
);
1732 tcg_gen_movi_tl(cpu_gpr
[rd
], sextract64(ctx
->opcode
, 12, 20) << 12);
1734 case OPC_RISC_AUIPC
:
1738 tcg_gen_movi_tl(cpu_gpr
[rd
], (sextract64(ctx
->opcode
, 12, 20) << 12) +
1742 imm
= GET_JAL_IMM(ctx
->opcode
);
1743 gen_jal(env
, ctx
, rd
, imm
);
1746 gen_jalr(env
, ctx
, MASK_OP_JALR(ctx
->opcode
), rd
, rs1
, imm
);
1748 case OPC_RISC_BRANCH
:
1749 gen_branch(env
, ctx
, MASK_OP_BRANCH(ctx
->opcode
), rs1
, rs2
,
1750 GET_B_IMM(ctx
->opcode
));
1753 gen_load(ctx
, MASK_OP_LOAD(ctx
->opcode
), rd
, rs1
, imm
);
1755 case OPC_RISC_STORE
:
1756 gen_store(ctx
, MASK_OP_STORE(ctx
->opcode
), rs1
, rs2
,
1757 GET_STORE_IMM(ctx
->opcode
));
1759 case OPC_RISC_ARITH_IMM
:
1760 #if defined(TARGET_RISCV64)
1761 case OPC_RISC_ARITH_IMM_W
:
1766 gen_arith_imm(ctx
, MASK_OP_ARITH_IMM(ctx
->opcode
), rd
, rs1
, imm
);
1768 case OPC_RISC_ARITH
:
1769 #if defined(TARGET_RISCV64)
1770 case OPC_RISC_ARITH_W
:
1775 gen_arith(ctx
, MASK_OP_ARITH(ctx
->opcode
), rd
, rs1
, rs2
);
1777 case OPC_RISC_FP_LOAD
:
1778 gen_fp_load(ctx
, MASK_OP_FP_LOAD(ctx
->opcode
), rd
, rs1
, imm
);
1780 case OPC_RISC_FP_STORE
:
1781 gen_fp_store(ctx
, MASK_OP_FP_STORE(ctx
->opcode
), rs1
, rs2
,
1782 GET_STORE_IMM(ctx
->opcode
));
1784 case OPC_RISC_ATOMIC
:
1785 gen_atomic(ctx
, MASK_OP_ATOMIC(ctx
->opcode
), rd
, rs1
, rs2
);
1787 case OPC_RISC_FMADD
:
1788 gen_fp_fmadd(ctx
, MASK_OP_FP_FMADD(ctx
->opcode
), rd
, rs1
, rs2
,
1789 GET_RS3(ctx
->opcode
), GET_RM(ctx
->opcode
));
1791 case OPC_RISC_FMSUB
:
1792 gen_fp_fmsub(ctx
, MASK_OP_FP_FMSUB(ctx
->opcode
), rd
, rs1
, rs2
,
1793 GET_RS3(ctx
->opcode
), GET_RM(ctx
->opcode
));
1795 case OPC_RISC_FNMSUB
:
1796 gen_fp_fnmsub(ctx
, MASK_OP_FP_FNMSUB(ctx
->opcode
), rd
, rs1
, rs2
,
1797 GET_RS3(ctx
->opcode
), GET_RM(ctx
->opcode
));
1799 case OPC_RISC_FNMADD
:
1800 gen_fp_fnmadd(ctx
, MASK_OP_FP_FNMADD(ctx
->opcode
), rd
, rs1
, rs2
,
1801 GET_RS3(ctx
->opcode
), GET_RM(ctx
->opcode
));
1803 case OPC_RISC_FP_ARITH
:
1804 gen_fp_arith(ctx
, MASK_OP_FP_ARITH(ctx
->opcode
), rd
, rs1
, rs2
,
1805 GET_RM(ctx
->opcode
));
1807 case OPC_RISC_FENCE
:
1808 #ifndef CONFIG_USER_ONLY
1809 if (ctx
->opcode
& 0x1000) {
1810 /* FENCE_I is a no-op in QEMU,
1811 * however we need to end the translation block */
1812 tcg_gen_movi_tl(cpu_pc
, ctx
->next_pc
);
1814 ctx
->bstate
= BS_BRANCH
;
1816 /* FENCE is a full memory barrier. */
1817 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1821 case OPC_RISC_SYSTEM
:
1822 gen_system(env
, ctx
, MASK_OP_SYSTEM(ctx
->opcode
), rd
, rs1
,
1823 (ctx
->opcode
& 0xFFF00000) >> 20);
1826 gen_exception_illegal(ctx
);
1831 static void decode_opc(CPURISCVState
*env
, DisasContext
*ctx
)
1833 /* check for compressed insn */
1834 if (extract32(ctx
->opcode
, 0, 2) != 3) {
1835 if (!riscv_has_ext(env
, RVC
)) {
1836 gen_exception_illegal(ctx
);
1838 ctx
->next_pc
= ctx
->pc
+ 2;
1839 decode_RV32_64C(env
, ctx
);
1842 ctx
->next_pc
= ctx
->pc
+ 4;
1843 decode_RV32_64G(env
, ctx
);
1847 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
)
1849 CPURISCVState
*env
= cs
->env_ptr
;
1851 target_ulong pc_start
;
1852 target_ulong next_page_start
;
1856 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1859 /* once we have GDB, the rest of the translate.c implementation should be
1860 ready for singlestep */
1861 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
1864 ctx
.bstate
= BS_NONE
;
1865 ctx
.flags
= tb
->flags
;
1866 ctx
.mem_idx
= tb
->flags
& TB_FLAGS_MMU_MASK
;
1867 ctx
.frm
= -1; /* unknown rounding mode */
1870 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1871 if (max_insns
== 0) {
1872 max_insns
= CF_COUNT_MASK
;
1874 if (max_insns
> TCG_MAX_INSNS
) {
1875 max_insns
= TCG_MAX_INSNS
;
1879 while (ctx
.bstate
== BS_NONE
) {
1880 tcg_gen_insn_start(ctx
.pc
);
1883 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
1884 tcg_gen_movi_tl(cpu_pc
, ctx
.pc
);
1885 ctx
.bstate
= BS_BRANCH
;
1886 gen_exception_debug();
1887 /* The address covered by the breakpoint must be included in
1888 [tb->pc, tb->pc + tb->size) in order to for it to be
1889 properly cleared -- thus we increment the PC here so that
1890 the logic setting tb->size below does the right thing. */
1892 goto done_generating
;
1895 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1899 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
1900 decode_opc(env
, &ctx
);
1901 ctx
.pc
= ctx
.next_pc
;
1903 if (cs
->singlestep_enabled
) {
1906 if (ctx
.pc
>= next_page_start
) {
1909 if (tcg_op_buf_full()) {
1912 if (num_insns
>= max_insns
) {
1920 if (tb
->cflags
& CF_LAST_IO
) {
1923 switch (ctx
.bstate
) {
1925 gen_goto_tb(&ctx
, 0, ctx
.pc
);
1927 case BS_NONE
: /* handle end of page - DO NOT CHAIN. See gen_goto_tb. */
1928 tcg_gen_movi_tl(cpu_pc
, ctx
.pc
);
1929 if (cs
->singlestep_enabled
) {
1930 gen_exception_debug();
1935 case BS_BRANCH
: /* ops using BS_BRANCH generate own exit seq */
1940 gen_tb_end(tb
, num_insns
);
1941 tb
->size
= ctx
.pc
- pc_start
;
1942 tb
->icount
= num_insns
;
1945 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1946 && qemu_log_in_addr_range(pc_start
)) {
1947 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
1948 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
);
1954 void riscv_translate_init(void)
1958 /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
1959 /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
1960 /* registers, unless you specifically block reads/writes to reg 0 */
1963 for (i
= 1; i
< 32; i
++) {
1964 cpu_gpr
[i
] = tcg_global_mem_new(cpu_env
,
1965 offsetof(CPURISCVState
, gpr
[i
]), riscv_int_regnames
[i
]);
1968 for (i
= 0; i
< 32; i
++) {
1969 cpu_fpr
[i
] = tcg_global_mem_new_i64(cpu_env
,
1970 offsetof(CPURISCVState
, fpr
[i
]), riscv_fpr_regnames
[i
]);
1973 cpu_pc
= tcg_global_mem_new(cpu_env
, offsetof(CPURISCVState
, pc
), "pc");
1974 load_res
= tcg_global_mem_new(cpu_env
, offsetof(CPURISCVState
, load_res
),
1976 load_val
= tcg_global_mem_new(cpu_env
, offsetof(CPURISCVState
, load_val
),