accel/tcg: Remove TranslatorOps.breakpoint_check
[qemu/ar7.git] / target / riscv / translate.c
blob6983be5723e70b990ffc84cac4c4c78a00438585
1 /*
2 * RISC-V emulation for qemu: main translation routines.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "tcg/tcg-op.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
32 #include "instmap.h"
34 /* global register indices */
35 static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
36 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
37 static TCGv load_res;
38 static TCGv load_val;
40 #include "exec/gen-icount.h"
42 typedef struct DisasContext {
43 DisasContextBase base;
44 /* pc_succ_insn points to the instruction following base.pc_next */
45 target_ulong pc_succ_insn;
46 target_ulong priv_ver;
47 bool virt_enabled;
48 uint32_t opcode;
49 uint32_t mstatus_fs;
50 target_ulong misa;
51 uint32_t mem_idx;
52 /* Remember the rounding mode encoded in the previous fp instruction,
53 which we have already installed into env->fp_status. Or -1 for
54 no previous fp instruction. Note that we exit the TB when writing
55 to any system register, which includes CSR_FRM, so we do not have
56 to reset this known value. */
57 int frm;
58 bool ext_ifencei;
59 bool hlsx;
60 /* vector extension */
61 bool vill;
62 uint8_t lmul;
63 uint8_t sew;
64 uint16_t vlen;
65 uint16_t mlen;
66 bool vl_eq_vlmax;
67 CPUState *cs;
68 } DisasContext;
70 static inline bool has_ext(DisasContext *ctx, uint32_t ext)
72 return ctx->misa & ext;
75 #ifdef TARGET_RISCV32
76 # define is_32bit(ctx) true
77 #elif defined(CONFIG_USER_ONLY)
78 # define is_32bit(ctx) false
79 #else
80 static inline bool is_32bit(DisasContext *ctx)
82 return (ctx->misa & RV32) == RV32;
84 #endif
87 * RISC-V requires NaN-boxing of narrower width floating point values.
88 * This applies when a 32-bit value is assigned to a 64-bit FP register.
89 * For consistency and simplicity, we nanbox results even when the RVD
90 * extension is not present.
92 static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
94 tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
98 * A narrow n-bit operation, where n < FLEN, checks that input operands
99 * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
100 * If so, the least-significant bits of the input are used, otherwise the
101 * input value is treated as an n-bit canonical NaN (v2.2 section 9.2).
103 * Here, the result is always nan-boxed, even the canonical nan.
105 static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
107 TCGv_i64 t_max = tcg_const_i64(0xffffffff00000000ull);
108 TCGv_i64 t_nan = tcg_const_i64(0xffffffff7fc00000ull);
110 tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
111 tcg_temp_free_i64(t_max);
112 tcg_temp_free_i64(t_nan);
115 static void generate_exception(DisasContext *ctx, int excp)
117 tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
118 TCGv_i32 helper_tmp = tcg_const_i32(excp);
119 gen_helper_raise_exception(cpu_env, helper_tmp);
120 tcg_temp_free_i32(helper_tmp);
121 ctx->base.is_jmp = DISAS_NORETURN;
124 static void generate_exception_mtval(DisasContext *ctx, int excp)
126 tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
127 tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
128 TCGv_i32 helper_tmp = tcg_const_i32(excp);
129 gen_helper_raise_exception(cpu_env, helper_tmp);
130 tcg_temp_free_i32(helper_tmp);
131 ctx->base.is_jmp = DISAS_NORETURN;
134 static void gen_exception_debug(void)
136 TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
137 gen_helper_raise_exception(cpu_env, helper_tmp);
138 tcg_temp_free_i32(helper_tmp);
141 /* Wrapper around tcg_gen_exit_tb that handles single stepping */
142 static void exit_tb(DisasContext *ctx)
144 if (ctx->base.singlestep_enabled) {
145 gen_exception_debug();
146 } else {
147 tcg_gen_exit_tb(NULL, 0);
151 /* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */
152 static void lookup_and_goto_ptr(DisasContext *ctx)
154 if (ctx->base.singlestep_enabled) {
155 gen_exception_debug();
156 } else {
157 tcg_gen_lookup_and_goto_ptr();
161 static void gen_exception_illegal(DisasContext *ctx)
163 generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
166 static void gen_exception_inst_addr_mis(DisasContext *ctx)
168 generate_exception_mtval(ctx, RISCV_EXCP_INST_ADDR_MIS);
171 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
173 if (translator_use_goto_tb(&ctx->base, dest)) {
174 tcg_gen_goto_tb(n);
175 tcg_gen_movi_tl(cpu_pc, dest);
176 tcg_gen_exit_tb(ctx->base.tb, n);
177 } else {
178 tcg_gen_movi_tl(cpu_pc, dest);
179 lookup_and_goto_ptr(ctx);
183 /* Wrapper for getting reg values - need to check of reg is zero since
184 * cpu_gpr[0] is not actually allocated
186 static inline void gen_get_gpr(TCGv t, int reg_num)
188 if (reg_num == 0) {
189 tcg_gen_movi_tl(t, 0);
190 } else {
191 tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
195 /* Wrapper for setting reg values - need to check of reg is zero since
196 * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
197 * since we usually avoid calling the OP_TYPE_gen function if we see a write to
198 * $zero
200 static inline void gen_set_gpr(int reg_num_dst, TCGv t)
202 if (reg_num_dst != 0) {
203 tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
207 static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
209 TCGv rl = tcg_temp_new();
210 TCGv rh = tcg_temp_new();
212 tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
213 /* fix up for one negative */
214 tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
215 tcg_gen_and_tl(rl, rl, arg2);
216 tcg_gen_sub_tl(ret, rh, rl);
218 tcg_temp_free(rl);
219 tcg_temp_free(rh);
222 static void gen_div(TCGv ret, TCGv source1, TCGv source2)
224 TCGv cond1, cond2, zeroreg, resultopt1;
226 * Handle by altering args to tcg_gen_div to produce req'd results:
227 * For overflow: want source1 in source1 and 1 in source2
228 * For div by zero: want -1 in source1 and 1 in source2 -> -1 result
230 cond1 = tcg_temp_new();
231 cond2 = tcg_temp_new();
232 zeroreg = tcg_const_tl(0);
233 resultopt1 = tcg_temp_new();
235 tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
236 tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
237 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
238 ((target_ulong)1) << (TARGET_LONG_BITS - 1));
239 tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
240 tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
241 /* if div by zero, set source1 to -1, otherwise don't change */
242 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
243 resultopt1);
244 /* if overflow or div by zero, set source2 to 1, else don't change */
245 tcg_gen_or_tl(cond1, cond1, cond2);
246 tcg_gen_movi_tl(resultopt1, (target_ulong)1);
247 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
248 resultopt1);
249 tcg_gen_div_tl(ret, source1, source2);
251 tcg_temp_free(cond1);
252 tcg_temp_free(cond2);
253 tcg_temp_free(zeroreg);
254 tcg_temp_free(resultopt1);
257 static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
259 TCGv cond1, zeroreg, resultopt1;
260 cond1 = tcg_temp_new();
262 zeroreg = tcg_const_tl(0);
263 resultopt1 = tcg_temp_new();
265 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
266 tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
267 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
268 resultopt1);
269 tcg_gen_movi_tl(resultopt1, (target_ulong)1);
270 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
271 resultopt1);
272 tcg_gen_divu_tl(ret, source1, source2);
274 tcg_temp_free(cond1);
275 tcg_temp_free(zeroreg);
276 tcg_temp_free(resultopt1);
279 static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
281 TCGv cond1, cond2, zeroreg, resultopt1;
283 cond1 = tcg_temp_new();
284 cond2 = tcg_temp_new();
285 zeroreg = tcg_const_tl(0);
286 resultopt1 = tcg_temp_new();
288 tcg_gen_movi_tl(resultopt1, 1L);
289 tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
290 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
291 (target_ulong)1 << (TARGET_LONG_BITS - 1));
292 tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
293 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
294 /* if overflow or div by zero, set source2 to 1, else don't change */
295 tcg_gen_or_tl(cond2, cond1, cond2);
296 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
297 resultopt1);
298 tcg_gen_rem_tl(resultopt1, source1, source2);
299 /* if div by zero, just return the original dividend */
300 tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
301 source1);
303 tcg_temp_free(cond1);
304 tcg_temp_free(cond2);
305 tcg_temp_free(zeroreg);
306 tcg_temp_free(resultopt1);
309 static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
311 TCGv cond1, zeroreg, resultopt1;
312 cond1 = tcg_temp_new();
313 zeroreg = tcg_const_tl(0);
314 resultopt1 = tcg_temp_new();
316 tcg_gen_movi_tl(resultopt1, (target_ulong)1);
317 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
318 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
319 resultopt1);
320 tcg_gen_remu_tl(resultopt1, source1, source2);
321 /* if div by zero, just return the original dividend */
322 tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
323 source1);
325 tcg_temp_free(cond1);
326 tcg_temp_free(zeroreg);
327 tcg_temp_free(resultopt1);
330 static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
332 target_ulong next_pc;
334 /* check misaligned: */
335 next_pc = ctx->base.pc_next + imm;
336 if (!has_ext(ctx, RVC)) {
337 if ((next_pc & 0x3) != 0) {
338 gen_exception_inst_addr_mis(ctx);
339 return;
342 if (rd != 0) {
343 tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
346 gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
347 ctx->base.is_jmp = DISAS_NORETURN;
350 #ifndef CONFIG_USER_ONLY
351 /* The states of mstatus_fs are:
352 * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
353 * We will have already diagnosed disabled state,
354 * and need to turn initial/clean into dirty.
356 static void mark_fs_dirty(DisasContext *ctx)
358 TCGv tmp;
359 target_ulong sd;
361 if (ctx->mstatus_fs == MSTATUS_FS) {
362 return;
364 /* Remember the state change for the rest of the TB. */
365 ctx->mstatus_fs = MSTATUS_FS;
367 tmp = tcg_temp_new();
368 sd = is_32bit(ctx) ? MSTATUS32_SD : MSTATUS64_SD;
370 tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
371 tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | sd);
372 tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
374 if (ctx->virt_enabled) {
375 tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
376 tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | sd);
377 tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
379 tcg_temp_free(tmp);
381 #else
382 static inline void mark_fs_dirty(DisasContext *ctx) { }
383 #endif
385 static void gen_set_rm(DisasContext *ctx, int rm)
387 TCGv_i32 t0;
389 if (ctx->frm == rm) {
390 return;
392 ctx->frm = rm;
393 t0 = tcg_const_i32(rm);
394 gen_helper_set_rounding_mode(cpu_env, t0);
395 tcg_temp_free_i32(t0);
398 static int ex_plus_1(DisasContext *ctx, int nf)
400 return nf + 1;
403 #define EX_SH(amount) \
404 static int ex_shift_##amount(DisasContext *ctx, int imm) \
406 return imm << amount; \
408 EX_SH(1)
409 EX_SH(2)
410 EX_SH(3)
411 EX_SH(4)
412 EX_SH(12)
414 #define REQUIRE_EXT(ctx, ext) do { \
415 if (!has_ext(ctx, ext)) { \
416 return false; \
418 } while (0)
420 #define REQUIRE_64BIT(ctx) do { \
421 if (is_32bit(ctx)) { \
422 return false; \
424 } while (0)
426 static int ex_rvc_register(DisasContext *ctx, int reg)
428 return 8 + reg;
431 static int ex_rvc_shifti(DisasContext *ctx, int imm)
433 /* For RV128 a shamt of 0 means a shift by 64. */
434 return imm ? imm : 64;
437 /* Include the auto-generated decoder for 32 bit insn */
438 #include "decode-insn32.c.inc"
440 static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a,
441 void (*func)(TCGv, TCGv, target_long))
443 TCGv source1;
444 source1 = tcg_temp_new();
446 gen_get_gpr(source1, a->rs1);
448 (*func)(source1, source1, a->imm);
450 gen_set_gpr(a->rd, source1);
451 tcg_temp_free(source1);
452 return true;
455 static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a,
456 void (*func)(TCGv, TCGv, TCGv))
458 TCGv source1, source2;
459 source1 = tcg_temp_new();
460 source2 = tcg_temp_new();
462 gen_get_gpr(source1, a->rs1);
463 tcg_gen_movi_tl(source2, a->imm);
465 (*func)(source1, source1, source2);
467 gen_set_gpr(a->rd, source1);
468 tcg_temp_free(source1);
469 tcg_temp_free(source2);
470 return true;
473 static void gen_addw(TCGv ret, TCGv arg1, TCGv arg2)
475 tcg_gen_add_tl(ret, arg1, arg2);
476 tcg_gen_ext32s_tl(ret, ret);
479 static void gen_subw(TCGv ret, TCGv arg1, TCGv arg2)
481 tcg_gen_sub_tl(ret, arg1, arg2);
482 tcg_gen_ext32s_tl(ret, ret);
485 static void gen_mulw(TCGv ret, TCGv arg1, TCGv arg2)
487 tcg_gen_mul_tl(ret, arg1, arg2);
488 tcg_gen_ext32s_tl(ret, ret);
491 static bool gen_arith_div_w(DisasContext *ctx, arg_r *a,
492 void(*func)(TCGv, TCGv, TCGv))
494 TCGv source1, source2;
495 source1 = tcg_temp_new();
496 source2 = tcg_temp_new();
498 gen_get_gpr(source1, a->rs1);
499 gen_get_gpr(source2, a->rs2);
500 tcg_gen_ext32s_tl(source1, source1);
501 tcg_gen_ext32s_tl(source2, source2);
503 (*func)(source1, source1, source2);
505 tcg_gen_ext32s_tl(source1, source1);
506 gen_set_gpr(a->rd, source1);
507 tcg_temp_free(source1);
508 tcg_temp_free(source2);
509 return true;
512 static bool gen_arith_div_uw(DisasContext *ctx, arg_r *a,
513 void(*func)(TCGv, TCGv, TCGv))
515 TCGv source1, source2;
516 source1 = tcg_temp_new();
517 source2 = tcg_temp_new();
519 gen_get_gpr(source1, a->rs1);
520 gen_get_gpr(source2, a->rs2);
521 tcg_gen_ext32u_tl(source1, source1);
522 tcg_gen_ext32u_tl(source2, source2);
524 (*func)(source1, source1, source2);
526 tcg_gen_ext32s_tl(source1, source1);
527 gen_set_gpr(a->rd, source1);
528 tcg_temp_free(source1);
529 tcg_temp_free(source2);
530 return true;
533 static void gen_pack(TCGv ret, TCGv arg1, TCGv arg2)
535 tcg_gen_deposit_tl(ret, arg1, arg2,
536 TARGET_LONG_BITS / 2,
537 TARGET_LONG_BITS / 2);
540 static void gen_packu(TCGv ret, TCGv arg1, TCGv arg2)
542 TCGv t = tcg_temp_new();
543 tcg_gen_shri_tl(t, arg1, TARGET_LONG_BITS / 2);
544 tcg_gen_deposit_tl(ret, arg2, t, 0, TARGET_LONG_BITS / 2);
545 tcg_temp_free(t);
548 static void gen_packh(TCGv ret, TCGv arg1, TCGv arg2)
550 TCGv t = tcg_temp_new();
551 tcg_gen_ext8u_tl(t, arg2);
552 tcg_gen_deposit_tl(ret, arg1, t, 8, TARGET_LONG_BITS - 8);
553 tcg_temp_free(t);
556 static void gen_sbop_mask(TCGv ret, TCGv shamt)
558 tcg_gen_movi_tl(ret, 1);
559 tcg_gen_shl_tl(ret, ret, shamt);
562 static void gen_bset(TCGv ret, TCGv arg1, TCGv shamt)
564 TCGv t = tcg_temp_new();
566 gen_sbop_mask(t, shamt);
567 tcg_gen_or_tl(ret, arg1, t);
569 tcg_temp_free(t);
572 static void gen_bclr(TCGv ret, TCGv arg1, TCGv shamt)
574 TCGv t = tcg_temp_new();
576 gen_sbop_mask(t, shamt);
577 tcg_gen_andc_tl(ret, arg1, t);
579 tcg_temp_free(t);
582 static void gen_binv(TCGv ret, TCGv arg1, TCGv shamt)
584 TCGv t = tcg_temp_new();
586 gen_sbop_mask(t, shamt);
587 tcg_gen_xor_tl(ret, arg1, t);
589 tcg_temp_free(t);
592 static void gen_bext(TCGv ret, TCGv arg1, TCGv shamt)
594 tcg_gen_shr_tl(ret, arg1, shamt);
595 tcg_gen_andi_tl(ret, ret, 1);
598 static void gen_slo(TCGv ret, TCGv arg1, TCGv arg2)
600 tcg_gen_not_tl(ret, arg1);
601 tcg_gen_shl_tl(ret, ret, arg2);
602 tcg_gen_not_tl(ret, ret);
605 static void gen_sro(TCGv ret, TCGv arg1, TCGv arg2)
607 tcg_gen_not_tl(ret, arg1);
608 tcg_gen_shr_tl(ret, ret, arg2);
609 tcg_gen_not_tl(ret, ret);
612 static bool gen_grevi(DisasContext *ctx, arg_grevi *a)
614 TCGv source1 = tcg_temp_new();
615 TCGv source2;
617 gen_get_gpr(source1, a->rs1);
619 if (a->shamt == (TARGET_LONG_BITS - 8)) {
620 /* rev8, byte swaps */
621 tcg_gen_bswap_tl(source1, source1);
622 } else {
623 source2 = tcg_temp_new();
624 tcg_gen_movi_tl(source2, a->shamt);
625 gen_helper_grev(source1, source1, source2);
626 tcg_temp_free(source2);
629 gen_set_gpr(a->rd, source1);
630 tcg_temp_free(source1);
631 return true;
634 #define GEN_SHADD(SHAMT) \
635 static void gen_sh##SHAMT##add(TCGv ret, TCGv arg1, TCGv arg2) \
637 TCGv t = tcg_temp_new(); \
639 tcg_gen_shli_tl(t, arg1, SHAMT); \
640 tcg_gen_add_tl(ret, t, arg2); \
642 tcg_temp_free(t); \
645 GEN_SHADD(1)
646 GEN_SHADD(2)
647 GEN_SHADD(3)
649 static void gen_ctzw(TCGv ret, TCGv arg1)
651 tcg_gen_ori_tl(ret, arg1, (target_ulong)MAKE_64BIT_MASK(32, 32));
652 tcg_gen_ctzi_tl(ret, ret, 64);
655 static void gen_clzw(TCGv ret, TCGv arg1)
657 tcg_gen_ext32u_tl(ret, arg1);
658 tcg_gen_clzi_tl(ret, ret, 64);
659 tcg_gen_subi_tl(ret, ret, 32);
662 static void gen_cpopw(TCGv ret, TCGv arg1)
664 tcg_gen_ext32u_tl(arg1, arg1);
665 tcg_gen_ctpop_tl(ret, arg1);
668 static void gen_packw(TCGv ret, TCGv arg1, TCGv arg2)
670 TCGv t = tcg_temp_new();
671 tcg_gen_ext16s_tl(t, arg2);
672 tcg_gen_deposit_tl(ret, arg1, t, 16, 48);
673 tcg_temp_free(t);
676 static void gen_packuw(TCGv ret, TCGv arg1, TCGv arg2)
678 TCGv t = tcg_temp_new();
679 tcg_gen_shri_tl(t, arg1, 16);
680 tcg_gen_deposit_tl(ret, arg2, t, 0, 16);
681 tcg_gen_ext32s_tl(ret, ret);
682 tcg_temp_free(t);
685 static void gen_rorw(TCGv ret, TCGv arg1, TCGv arg2)
687 TCGv_i32 t1 = tcg_temp_new_i32();
688 TCGv_i32 t2 = tcg_temp_new_i32();
690 /* truncate to 32-bits */
691 tcg_gen_trunc_tl_i32(t1, arg1);
692 tcg_gen_trunc_tl_i32(t2, arg2);
694 tcg_gen_rotr_i32(t1, t1, t2);
696 /* sign-extend 64-bits */
697 tcg_gen_ext_i32_tl(ret, t1);
699 tcg_temp_free_i32(t1);
700 tcg_temp_free_i32(t2);
703 static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2)
705 TCGv_i32 t1 = tcg_temp_new_i32();
706 TCGv_i32 t2 = tcg_temp_new_i32();
708 /* truncate to 32-bits */
709 tcg_gen_trunc_tl_i32(t1, arg1);
710 tcg_gen_trunc_tl_i32(t2, arg2);
712 tcg_gen_rotl_i32(t1, t1, t2);
714 /* sign-extend 64-bits */
715 tcg_gen_ext_i32_tl(ret, t1);
717 tcg_temp_free_i32(t1);
718 tcg_temp_free_i32(t2);
721 static void gen_grevw(TCGv ret, TCGv arg1, TCGv arg2)
723 tcg_gen_ext32u_tl(arg1, arg1);
724 gen_helper_grev(ret, arg1, arg2);
727 static void gen_gorcw(TCGv ret, TCGv arg1, TCGv arg2)
729 tcg_gen_ext32u_tl(arg1, arg1);
730 gen_helper_gorcw(ret, arg1, arg2);
733 #define GEN_SHADD_UW(SHAMT) \
734 static void gen_sh##SHAMT##add_uw(TCGv ret, TCGv arg1, TCGv arg2) \
736 TCGv t = tcg_temp_new(); \
738 tcg_gen_ext32u_tl(t, arg1); \
740 tcg_gen_shli_tl(t, t, SHAMT); \
741 tcg_gen_add_tl(ret, t, arg2); \
743 tcg_temp_free(t); \
746 GEN_SHADD_UW(1)
747 GEN_SHADD_UW(2)
748 GEN_SHADD_UW(3)
750 static void gen_add_uw(TCGv ret, TCGv arg1, TCGv arg2)
752 tcg_gen_ext32u_tl(arg1, arg1);
753 tcg_gen_add_tl(ret, arg1, arg2);
756 static bool gen_arith(DisasContext *ctx, arg_r *a,
757 void(*func)(TCGv, TCGv, TCGv))
759 TCGv source1, source2;
760 source1 = tcg_temp_new();
761 source2 = tcg_temp_new();
763 gen_get_gpr(source1, a->rs1);
764 gen_get_gpr(source2, a->rs2);
766 (*func)(source1, source1, source2);
768 gen_set_gpr(a->rd, source1);
769 tcg_temp_free(source1);
770 tcg_temp_free(source2);
771 return true;
774 static bool gen_shift(DisasContext *ctx, arg_r *a,
775 void(*func)(TCGv, TCGv, TCGv))
777 TCGv source1 = tcg_temp_new();
778 TCGv source2 = tcg_temp_new();
780 gen_get_gpr(source1, a->rs1);
781 gen_get_gpr(source2, a->rs2);
783 tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
784 (*func)(source1, source1, source2);
786 gen_set_gpr(a->rd, source1);
787 tcg_temp_free(source1);
788 tcg_temp_free(source2);
789 return true;
792 static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
794 DisasContext *ctx = container_of(dcbase, DisasContext, base);
795 CPUState *cpu = ctx->cs;
796 CPURISCVState *env = cpu->env_ptr;
798 return cpu_ldl_code(env, pc);
801 static bool gen_shifti(DisasContext *ctx, arg_shift *a,
802 void(*func)(TCGv, TCGv, TCGv))
804 if (a->shamt >= TARGET_LONG_BITS) {
805 return false;
808 TCGv source1 = tcg_temp_new();
809 TCGv source2 = tcg_temp_new();
811 gen_get_gpr(source1, a->rs1);
813 tcg_gen_movi_tl(source2, a->shamt);
814 (*func)(source1, source1, source2);
816 gen_set_gpr(a->rd, source1);
817 tcg_temp_free(source1);
818 tcg_temp_free(source2);
819 return true;
822 static bool gen_shiftw(DisasContext *ctx, arg_r *a,
823 void(*func)(TCGv, TCGv, TCGv))
825 TCGv source1 = tcg_temp_new();
826 TCGv source2 = tcg_temp_new();
828 gen_get_gpr(source1, a->rs1);
829 gen_get_gpr(source2, a->rs2);
831 tcg_gen_andi_tl(source2, source2, 31);
832 (*func)(source1, source1, source2);
833 tcg_gen_ext32s_tl(source1, source1);
835 gen_set_gpr(a->rd, source1);
836 tcg_temp_free(source1);
837 tcg_temp_free(source2);
838 return true;
841 static bool gen_shiftiw(DisasContext *ctx, arg_shift *a,
842 void(*func)(TCGv, TCGv, TCGv))
844 TCGv source1 = tcg_temp_new();
845 TCGv source2 = tcg_temp_new();
847 gen_get_gpr(source1, a->rs1);
848 tcg_gen_movi_tl(source2, a->shamt);
850 (*func)(source1, source1, source2);
851 tcg_gen_ext32s_tl(source1, source1);
853 gen_set_gpr(a->rd, source1);
854 tcg_temp_free(source1);
855 tcg_temp_free(source2);
856 return true;
859 static void gen_ctz(TCGv ret, TCGv arg1)
861 tcg_gen_ctzi_tl(ret, arg1, TARGET_LONG_BITS);
864 static void gen_clz(TCGv ret, TCGv arg1)
866 tcg_gen_clzi_tl(ret, arg1, TARGET_LONG_BITS);
869 static bool gen_unary(DisasContext *ctx, arg_r2 *a,
870 void(*func)(TCGv, TCGv))
872 TCGv source = tcg_temp_new();
874 gen_get_gpr(source, a->rs1);
876 (*func)(source, source);
878 gen_set_gpr(a->rd, source);
879 tcg_temp_free(source);
880 return true;
883 /* Include insn module translation function */
884 #include "insn_trans/trans_rvi.c.inc"
885 #include "insn_trans/trans_rvm.c.inc"
886 #include "insn_trans/trans_rva.c.inc"
887 #include "insn_trans/trans_rvf.c.inc"
888 #include "insn_trans/trans_rvd.c.inc"
889 #include "insn_trans/trans_rvh.c.inc"
890 #include "insn_trans/trans_rvv.c.inc"
891 #include "insn_trans/trans_rvb.c.inc"
892 #include "insn_trans/trans_privileged.c.inc"
894 /* Include the auto-generated decoder for 16 bit insn */
895 #include "decode-insn16.c.inc"
897 static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
899 /* check for compressed insn */
900 if (extract16(opcode, 0, 2) != 3) {
901 if (!has_ext(ctx, RVC)) {
902 gen_exception_illegal(ctx);
903 } else {
904 ctx->pc_succ_insn = ctx->base.pc_next + 2;
905 if (!decode_insn16(ctx, opcode)) {
906 gen_exception_illegal(ctx);
909 } else {
910 uint32_t opcode32 = opcode;
911 opcode32 = deposit32(opcode32, 16, 16,
912 translator_lduw(env, ctx->base.pc_next + 2));
913 ctx->pc_succ_insn = ctx->base.pc_next + 4;
914 if (!decode_insn32(ctx, opcode32)) {
915 gen_exception_illegal(ctx);
920 static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
922 DisasContext *ctx = container_of(dcbase, DisasContext, base);
923 CPURISCVState *env = cs->env_ptr;
924 RISCVCPU *cpu = RISCV_CPU(cs);
925 uint32_t tb_flags = ctx->base.tb->flags;
927 ctx->pc_succ_insn = ctx->base.pc_first;
928 ctx->mem_idx = tb_flags & TB_FLAGS_MMU_MASK;
929 ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
930 ctx->priv_ver = env->priv_ver;
931 #if !defined(CONFIG_USER_ONLY)
932 if (riscv_has_ext(env, RVH)) {
933 ctx->virt_enabled = riscv_cpu_virt_enabled(env);
934 } else {
935 ctx->virt_enabled = false;
937 #else
938 ctx->virt_enabled = false;
939 #endif
940 ctx->misa = env->misa;
941 ctx->frm = -1; /* unknown rounding mode */
942 ctx->ext_ifencei = cpu->cfg.ext_ifencei;
943 ctx->vlen = cpu->cfg.vlen;
944 ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
945 ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
946 ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
947 ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
948 ctx->mlen = 1 << (ctx->sew + 3 - ctx->lmul);
949 ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
950 ctx->cs = cs;
953 static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
957 static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
959 DisasContext *ctx = container_of(dcbase, DisasContext, base);
961 tcg_gen_insn_start(ctx->base.pc_next);
964 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
966 DisasContext *ctx = container_of(dcbase, DisasContext, base);
967 CPURISCVState *env = cpu->env_ptr;
968 uint16_t opcode16 = translator_lduw(env, ctx->base.pc_next);
970 decode_opc(env, ctx, opcode16);
971 ctx->base.pc_next = ctx->pc_succ_insn;
973 if (ctx->base.is_jmp == DISAS_NEXT) {
974 target_ulong page_start;
976 page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
977 if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
978 ctx->base.is_jmp = DISAS_TOO_MANY;
983 static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
985 DisasContext *ctx = container_of(dcbase, DisasContext, base);
987 switch (ctx->base.is_jmp) {
988 case DISAS_TOO_MANY:
989 gen_goto_tb(ctx, 0, ctx->base.pc_next);
990 break;
991 case DISAS_NORETURN:
992 break;
993 default:
994 g_assert_not_reached();
998 static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
1000 #ifndef CONFIG_USER_ONLY
1001 RISCVCPU *rvcpu = RISCV_CPU(cpu);
1002 CPURISCVState *env = &rvcpu->env;
1003 #endif
1005 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
1006 #ifndef CONFIG_USER_ONLY
1007 qemu_log("Priv: "TARGET_FMT_ld"; Virt: "TARGET_FMT_ld"\n", env->priv, env->virt);
1008 #endif
1009 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
1012 static const TranslatorOps riscv_tr_ops = {
1013 .init_disas_context = riscv_tr_init_disas_context,
1014 .tb_start = riscv_tr_tb_start,
1015 .insn_start = riscv_tr_insn_start,
1016 .translate_insn = riscv_tr_translate_insn,
1017 .tb_stop = riscv_tr_tb_stop,
1018 .disas_log = riscv_tr_disas_log,
1021 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1023 DisasContext ctx;
1025 translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
1028 void riscv_translate_init(void)
1030 int i;
1032 /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
1033 /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
1034 /* registers, unless you specifically block reads/writes to reg 0 */
1035 cpu_gpr[0] = NULL;
1037 for (i = 1; i < 32; i++) {
1038 cpu_gpr[i] = tcg_global_mem_new(cpu_env,
1039 offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
1042 for (i = 0; i < 32; i++) {
1043 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
1044 offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
1047 cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
1048 cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
1049 load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
1050 "load_res");
1051 load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
1052 "load_val");