block/file-posix: implement bdrv_co_invalidate_cache() on Linux
[qemu/ar7.git] / target / riscv / translate.c
blobc0e6a044d383a35f5f3686aab78edcdafee02c35
1 /*
2 * RISC-V emulation for qemu: main translation routines.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "tcg-op.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "exec/log.h"
31 #include "instmap.h"
33 /* global register indices */
34 static TCGv cpu_gpr[32], cpu_pc;
35 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
36 static TCGv load_res;
37 static TCGv load_val;
39 #include "exec/gen-icount.h"
41 typedef struct DisasContext {
42 struct TranslationBlock *tb;
43 target_ulong pc;
44 target_ulong next_pc;
45 uint32_t opcode;
46 uint32_t flags;
47 uint32_t mem_idx;
48 int singlestep_enabled;
49 int bstate;
50 /* Remember the rounding mode encoded in the previous fp instruction,
51 which we have already installed into env->fp_status. Or -1 for
52 no previous fp instruction. Note that we exit the TB when writing
53 to any system register, which includes CSR_FRM, so we do not have
54 to reset this known value. */
55 int frm;
56 } DisasContext;
58 enum {
59 BS_NONE = 0, /* When seen outside of translation while loop, indicates
60 need to exit tb due to end of page. */
61 BS_STOP = 1, /* Need to exit tb for syscall, sret, etc. */
62 BS_BRANCH = 2, /* Need to exit tb for branch, jal, etc. */
65 /* convert riscv funct3 to qemu memop for load/store */
66 static const int tcg_memop_lookup[8] = {
67 [0 ... 7] = -1,
68 [0] = MO_SB,
69 [1] = MO_TESW,
70 [2] = MO_TESL,
71 [4] = MO_UB,
72 [5] = MO_TEUW,
73 #ifdef TARGET_RISCV64
74 [3] = MO_TEQ,
75 [6] = MO_TEUL,
76 #endif
79 #ifdef TARGET_RISCV64
80 #define CASE_OP_32_64(X) case X: case glue(X, W)
81 #else
82 #define CASE_OP_32_64(X) case X
83 #endif
85 static void generate_exception(DisasContext *ctx, int excp)
87 tcg_gen_movi_tl(cpu_pc, ctx->pc);
88 TCGv_i32 helper_tmp = tcg_const_i32(excp);
89 gen_helper_raise_exception(cpu_env, helper_tmp);
90 tcg_temp_free_i32(helper_tmp);
91 ctx->bstate = BS_BRANCH;
94 static void generate_exception_mbadaddr(DisasContext *ctx, int excp)
96 tcg_gen_movi_tl(cpu_pc, ctx->pc);
97 tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
98 TCGv_i32 helper_tmp = tcg_const_i32(excp);
99 gen_helper_raise_exception(cpu_env, helper_tmp);
100 tcg_temp_free_i32(helper_tmp);
101 ctx->bstate = BS_BRANCH;
104 static void gen_exception_debug(void)
106 TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
107 gen_helper_raise_exception(cpu_env, helper_tmp);
108 tcg_temp_free_i32(helper_tmp);
111 static void gen_exception_illegal(DisasContext *ctx)
113 generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
116 static void gen_exception_inst_addr_mis(DisasContext *ctx)
118 generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS);
121 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
123 if (unlikely(ctx->singlestep_enabled)) {
124 return false;
127 #ifndef CONFIG_USER_ONLY
128 return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
129 #else
130 return true;
131 #endif
134 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
136 if (use_goto_tb(ctx, dest)) {
137 /* chaining is only allowed when the jump is to the same page */
138 tcg_gen_goto_tb(n);
139 tcg_gen_movi_tl(cpu_pc, dest);
140 tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
141 } else {
142 tcg_gen_movi_tl(cpu_pc, dest);
143 if (ctx->singlestep_enabled) {
144 gen_exception_debug();
145 } else {
146 tcg_gen_exit_tb(0);
151 /* Wrapper for getting reg values - need to check of reg is zero since
152 * cpu_gpr[0] is not actually allocated
154 static inline void gen_get_gpr(TCGv t, int reg_num)
156 if (reg_num == 0) {
157 tcg_gen_movi_tl(t, 0);
158 } else {
159 tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
163 /* Wrapper for setting reg values - need to check of reg is zero since
164 * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
165 * since we usually avoid calling the OP_TYPE_gen function if we see a write to
166 * $zero
168 static inline void gen_set_gpr(int reg_num_dst, TCGv t)
170 if (reg_num_dst != 0) {
171 tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
175 static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
177 TCGv rl = tcg_temp_new();
178 TCGv rh = tcg_temp_new();
180 tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
181 /* fix up for one negative */
182 tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
183 tcg_gen_and_tl(rl, rl, arg2);
184 tcg_gen_sub_tl(ret, rh, rl);
186 tcg_temp_free(rl);
187 tcg_temp_free(rh);
190 static void gen_fsgnj(DisasContext *ctx, uint32_t rd, uint32_t rs1,
191 uint32_t rs2, int rm, uint64_t min)
193 switch (rm) {
194 case 0: /* fsgnj */
195 if (rs1 == rs2) { /* FMOV */
196 tcg_gen_mov_i64(cpu_fpr[rd], cpu_fpr[rs1]);
197 } else {
198 tcg_gen_deposit_i64(cpu_fpr[rd], cpu_fpr[rs2], cpu_fpr[rs1],
199 0, min == INT32_MIN ? 31 : 63);
201 break;
202 case 1: /* fsgnjn */
203 if (rs1 == rs2) { /* FNEG */
204 tcg_gen_xori_i64(cpu_fpr[rd], cpu_fpr[rs1], min);
205 } else {
206 TCGv_i64 t0 = tcg_temp_new_i64();
207 tcg_gen_not_i64(t0, cpu_fpr[rs2]);
208 tcg_gen_deposit_i64(cpu_fpr[rd], t0, cpu_fpr[rs1],
209 0, min == INT32_MIN ? 31 : 63);
210 tcg_temp_free_i64(t0);
212 break;
213 case 2: /* fsgnjx */
214 if (rs1 == rs2) { /* FABS */
215 tcg_gen_andi_i64(cpu_fpr[rd], cpu_fpr[rs1], ~min);
216 } else {
217 TCGv_i64 t0 = tcg_temp_new_i64();
218 tcg_gen_andi_i64(t0, cpu_fpr[rs2], min);
219 tcg_gen_xor_i64(cpu_fpr[rd], cpu_fpr[rs1], t0);
220 tcg_temp_free_i64(t0);
222 break;
223 default:
224 gen_exception_illegal(ctx);
228 static void gen_arith(DisasContext *ctx, uint32_t opc, int rd, int rs1,
229 int rs2)
231 TCGv source1, source2, cond1, cond2, zeroreg, resultopt1;
232 source1 = tcg_temp_new();
233 source2 = tcg_temp_new();
234 gen_get_gpr(source1, rs1);
235 gen_get_gpr(source2, rs2);
237 switch (opc) {
238 CASE_OP_32_64(OPC_RISC_ADD):
239 tcg_gen_add_tl(source1, source1, source2);
240 break;
241 CASE_OP_32_64(OPC_RISC_SUB):
242 tcg_gen_sub_tl(source1, source1, source2);
243 break;
244 #if defined(TARGET_RISCV64)
245 case OPC_RISC_SLLW:
246 tcg_gen_andi_tl(source2, source2, 0x1F);
247 tcg_gen_shl_tl(source1, source1, source2);
248 break;
249 #endif
250 case OPC_RISC_SLL:
251 tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
252 tcg_gen_shl_tl(source1, source1, source2);
253 break;
254 case OPC_RISC_SLT:
255 tcg_gen_setcond_tl(TCG_COND_LT, source1, source1, source2);
256 break;
257 case OPC_RISC_SLTU:
258 tcg_gen_setcond_tl(TCG_COND_LTU, source1, source1, source2);
259 break;
260 case OPC_RISC_XOR:
261 tcg_gen_xor_tl(source1, source1, source2);
262 break;
263 #if defined(TARGET_RISCV64)
264 case OPC_RISC_SRLW:
265 /* clear upper 32 */
266 tcg_gen_ext32u_tl(source1, source1);
267 tcg_gen_andi_tl(source2, source2, 0x1F);
268 tcg_gen_shr_tl(source1, source1, source2);
269 break;
270 #endif
271 case OPC_RISC_SRL:
272 tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
273 tcg_gen_shr_tl(source1, source1, source2);
274 break;
275 #if defined(TARGET_RISCV64)
276 case OPC_RISC_SRAW:
277 /* first, trick to get it to act like working on 32 bits (get rid of
278 upper 32, sign extend to fill space) */
279 tcg_gen_ext32s_tl(source1, source1);
280 tcg_gen_andi_tl(source2, source2, 0x1F);
281 tcg_gen_sar_tl(source1, source1, source2);
282 break;
283 #endif
284 case OPC_RISC_SRA:
285 tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
286 tcg_gen_sar_tl(source1, source1, source2);
287 break;
288 case OPC_RISC_OR:
289 tcg_gen_or_tl(source1, source1, source2);
290 break;
291 case OPC_RISC_AND:
292 tcg_gen_and_tl(source1, source1, source2);
293 break;
294 CASE_OP_32_64(OPC_RISC_MUL):
295 tcg_gen_mul_tl(source1, source1, source2);
296 break;
297 case OPC_RISC_MULH:
298 tcg_gen_muls2_tl(source2, source1, source1, source2);
299 break;
300 case OPC_RISC_MULHSU:
301 gen_mulhsu(source1, source1, source2);
302 break;
303 case OPC_RISC_MULHU:
304 tcg_gen_mulu2_tl(source2, source1, source1, source2);
305 break;
306 #if defined(TARGET_RISCV64)
307 case OPC_RISC_DIVW:
308 tcg_gen_ext32s_tl(source1, source1);
309 tcg_gen_ext32s_tl(source2, source2);
310 /* fall through to DIV */
311 #endif
312 case OPC_RISC_DIV:
313 /* Handle by altering args to tcg_gen_div to produce req'd results:
314 * For overflow: want source1 in source1 and 1 in source2
315 * For div by zero: want -1 in source1 and 1 in source2 -> -1 result */
316 cond1 = tcg_temp_new();
317 cond2 = tcg_temp_new();
318 zeroreg = tcg_const_tl(0);
319 resultopt1 = tcg_temp_new();
321 tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
322 tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
323 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
324 ((target_ulong)1) << (TARGET_LONG_BITS - 1));
325 tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
326 tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
327 /* if div by zero, set source1 to -1, otherwise don't change */
328 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
329 resultopt1);
330 /* if overflow or div by zero, set source2 to 1, else don't change */
331 tcg_gen_or_tl(cond1, cond1, cond2);
332 tcg_gen_movi_tl(resultopt1, (target_ulong)1);
333 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
334 resultopt1);
335 tcg_gen_div_tl(source1, source1, source2);
337 tcg_temp_free(cond1);
338 tcg_temp_free(cond2);
339 tcg_temp_free(zeroreg);
340 tcg_temp_free(resultopt1);
341 break;
342 #if defined(TARGET_RISCV64)
343 case OPC_RISC_DIVUW:
344 tcg_gen_ext32u_tl(source1, source1);
345 tcg_gen_ext32u_tl(source2, source2);
346 /* fall through to DIVU */
347 #endif
348 case OPC_RISC_DIVU:
349 cond1 = tcg_temp_new();
350 zeroreg = tcg_const_tl(0);
351 resultopt1 = tcg_temp_new();
353 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
354 tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
355 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
356 resultopt1);
357 tcg_gen_movi_tl(resultopt1, (target_ulong)1);
358 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
359 resultopt1);
360 tcg_gen_divu_tl(source1, source1, source2);
362 tcg_temp_free(cond1);
363 tcg_temp_free(zeroreg);
364 tcg_temp_free(resultopt1);
365 break;
366 #if defined(TARGET_RISCV64)
367 case OPC_RISC_REMW:
368 tcg_gen_ext32s_tl(source1, source1);
369 tcg_gen_ext32s_tl(source2, source2);
370 /* fall through to REM */
371 #endif
372 case OPC_RISC_REM:
373 cond1 = tcg_temp_new();
374 cond2 = tcg_temp_new();
375 zeroreg = tcg_const_tl(0);
376 resultopt1 = tcg_temp_new();
378 tcg_gen_movi_tl(resultopt1, 1L);
379 tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
380 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
381 (target_ulong)1 << (TARGET_LONG_BITS - 1));
382 tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
383 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
384 /* if overflow or div by zero, set source2 to 1, else don't change */
385 tcg_gen_or_tl(cond2, cond1, cond2);
386 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
387 resultopt1);
388 tcg_gen_rem_tl(resultopt1, source1, source2);
389 /* if div by zero, just return the original dividend */
390 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1,
391 source1);
393 tcg_temp_free(cond1);
394 tcg_temp_free(cond2);
395 tcg_temp_free(zeroreg);
396 tcg_temp_free(resultopt1);
397 break;
398 #if defined(TARGET_RISCV64)
399 case OPC_RISC_REMUW:
400 tcg_gen_ext32u_tl(source1, source1);
401 tcg_gen_ext32u_tl(source2, source2);
402 /* fall through to REMU */
403 #endif
404 case OPC_RISC_REMU:
405 cond1 = tcg_temp_new();
406 zeroreg = tcg_const_tl(0);
407 resultopt1 = tcg_temp_new();
409 tcg_gen_movi_tl(resultopt1, (target_ulong)1);
410 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
411 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
412 resultopt1);
413 tcg_gen_remu_tl(resultopt1, source1, source2);
414 /* if div by zero, just return the original dividend */
415 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1,
416 source1);
418 tcg_temp_free(cond1);
419 tcg_temp_free(zeroreg);
420 tcg_temp_free(resultopt1);
421 break;
422 default:
423 gen_exception_illegal(ctx);
424 return;
427 if (opc & 0x8) { /* sign extend for W instructions */
428 tcg_gen_ext32s_tl(source1, source1);
431 gen_set_gpr(rd, source1);
432 tcg_temp_free(source1);
433 tcg_temp_free(source2);
436 static void gen_arith_imm(DisasContext *ctx, uint32_t opc, int rd,
437 int rs1, target_long imm)
439 TCGv source1 = tcg_temp_new();
440 int shift_len = TARGET_LONG_BITS;
441 int shift_a;
443 gen_get_gpr(source1, rs1);
445 switch (opc) {
446 case OPC_RISC_ADDI:
447 #if defined(TARGET_RISCV64)
448 case OPC_RISC_ADDIW:
449 #endif
450 tcg_gen_addi_tl(source1, source1, imm);
451 break;
452 case OPC_RISC_SLTI:
453 tcg_gen_setcondi_tl(TCG_COND_LT, source1, source1, imm);
454 break;
455 case OPC_RISC_SLTIU:
456 tcg_gen_setcondi_tl(TCG_COND_LTU, source1, source1, imm);
457 break;
458 case OPC_RISC_XORI:
459 tcg_gen_xori_tl(source1, source1, imm);
460 break;
461 case OPC_RISC_ORI:
462 tcg_gen_ori_tl(source1, source1, imm);
463 break;
464 case OPC_RISC_ANDI:
465 tcg_gen_andi_tl(source1, source1, imm);
466 break;
467 #if defined(TARGET_RISCV64)
468 case OPC_RISC_SLLIW:
469 shift_len = 32;
470 /* FALLTHRU */
471 #endif
472 case OPC_RISC_SLLI:
473 if (imm >= shift_len) {
474 goto do_illegal;
476 tcg_gen_shli_tl(source1, source1, imm);
477 break;
478 #if defined(TARGET_RISCV64)
479 case OPC_RISC_SHIFT_RIGHT_IW:
480 shift_len = 32;
481 /* FALLTHRU */
482 #endif
483 case OPC_RISC_SHIFT_RIGHT_I:
484 /* differentiate on IMM */
485 shift_a = imm & 0x400;
486 imm &= 0x3ff;
487 if (imm >= shift_len) {
488 goto do_illegal;
490 if (imm != 0) {
491 if (shift_a) {
492 /* SRAI[W] */
493 tcg_gen_sextract_tl(source1, source1, imm, shift_len - imm);
494 } else {
495 /* SRLI[W] */
496 tcg_gen_extract_tl(source1, source1, imm, shift_len - imm);
498 /* No further sign-extension needed for W instructions. */
499 opc &= ~0x8;
501 break;
502 default:
503 do_illegal:
504 gen_exception_illegal(ctx);
505 return;
508 if (opc & 0x8) { /* sign-extend for W instructions */
509 tcg_gen_ext32s_tl(source1, source1);
512 gen_set_gpr(rd, source1);
513 tcg_temp_free(source1);
516 static void gen_jal(CPURISCVState *env, DisasContext *ctx, int rd,
517 target_ulong imm)
519 target_ulong next_pc;
521 /* check misaligned: */
522 next_pc = ctx->pc + imm;
523 if (!riscv_has_ext(env, RVC)) {
524 if ((next_pc & 0x3) != 0) {
525 gen_exception_inst_addr_mis(ctx);
526 return;
529 if (rd != 0) {
530 tcg_gen_movi_tl(cpu_gpr[rd], ctx->next_pc);
533 gen_goto_tb(ctx, 0, ctx->pc + imm); /* must use this for safety */
534 ctx->bstate = BS_BRANCH;
537 static void gen_jalr(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
538 int rd, int rs1, target_long imm)
540 /* no chaining with JALR */
541 TCGLabel *misaligned = NULL;
542 TCGv t0 = tcg_temp_new();
544 switch (opc) {
545 case OPC_RISC_JALR:
546 gen_get_gpr(cpu_pc, rs1);
547 tcg_gen_addi_tl(cpu_pc, cpu_pc, imm);
548 tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
550 if (!riscv_has_ext(env, RVC)) {
551 misaligned = gen_new_label();
552 tcg_gen_andi_tl(t0, cpu_pc, 0x2);
553 tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
556 if (rd != 0) {
557 tcg_gen_movi_tl(cpu_gpr[rd], ctx->next_pc);
559 tcg_gen_exit_tb(0);
561 if (misaligned) {
562 gen_set_label(misaligned);
563 gen_exception_inst_addr_mis(ctx);
565 ctx->bstate = BS_BRANCH;
566 break;
568 default:
569 gen_exception_illegal(ctx);
570 break;
572 tcg_temp_free(t0);
575 static void gen_branch(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
576 int rs1, int rs2, target_long bimm)
578 TCGLabel *l = gen_new_label();
579 TCGv source1, source2;
580 source1 = tcg_temp_new();
581 source2 = tcg_temp_new();
582 gen_get_gpr(source1, rs1);
583 gen_get_gpr(source2, rs2);
585 switch (opc) {
586 case OPC_RISC_BEQ:
587 tcg_gen_brcond_tl(TCG_COND_EQ, source1, source2, l);
588 break;
589 case OPC_RISC_BNE:
590 tcg_gen_brcond_tl(TCG_COND_NE, source1, source2, l);
591 break;
592 case OPC_RISC_BLT:
593 tcg_gen_brcond_tl(TCG_COND_LT, source1, source2, l);
594 break;
595 case OPC_RISC_BGE:
596 tcg_gen_brcond_tl(TCG_COND_GE, source1, source2, l);
597 break;
598 case OPC_RISC_BLTU:
599 tcg_gen_brcond_tl(TCG_COND_LTU, source1, source2, l);
600 break;
601 case OPC_RISC_BGEU:
602 tcg_gen_brcond_tl(TCG_COND_GEU, source1, source2, l);
603 break;
604 default:
605 gen_exception_illegal(ctx);
606 return;
608 tcg_temp_free(source1);
609 tcg_temp_free(source2);
611 gen_goto_tb(ctx, 1, ctx->next_pc);
612 gen_set_label(l); /* branch taken */
613 if (!riscv_has_ext(env, RVC) && ((ctx->pc + bimm) & 0x3)) {
614 /* misaligned */
615 gen_exception_inst_addr_mis(ctx);
616 } else {
617 gen_goto_tb(ctx, 0, ctx->pc + bimm);
619 ctx->bstate = BS_BRANCH;
622 static void gen_load(DisasContext *ctx, uint32_t opc, int rd, int rs1,
623 target_long imm)
625 TCGv t0 = tcg_temp_new();
626 TCGv t1 = tcg_temp_new();
627 gen_get_gpr(t0, rs1);
628 tcg_gen_addi_tl(t0, t0, imm);
629 int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
631 if (memop < 0) {
632 gen_exception_illegal(ctx);
633 return;
636 tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
637 gen_set_gpr(rd, t1);
638 tcg_temp_free(t0);
639 tcg_temp_free(t1);
642 static void gen_store(DisasContext *ctx, uint32_t opc, int rs1, int rs2,
643 target_long imm)
645 TCGv t0 = tcg_temp_new();
646 TCGv dat = tcg_temp_new();
647 gen_get_gpr(t0, rs1);
648 tcg_gen_addi_tl(t0, t0, imm);
649 gen_get_gpr(dat, rs2);
650 int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
652 if (memop < 0) {
653 gen_exception_illegal(ctx);
654 return;
657 tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
658 tcg_temp_free(t0);
659 tcg_temp_free(dat);
662 static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd,
663 int rs1, target_long imm)
665 TCGv t0;
667 if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
668 gen_exception_illegal(ctx);
669 return;
672 t0 = tcg_temp_new();
673 gen_get_gpr(t0, rs1);
674 tcg_gen_addi_tl(t0, t0, imm);
676 switch (opc) {
677 case OPC_RISC_FLW:
678 tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL);
679 /* RISC-V requires NaN-boxing of narrower width floating point values */
680 tcg_gen_ori_i64(cpu_fpr[rd], cpu_fpr[rd], 0xffffffff00000000ULL);
681 break;
682 case OPC_RISC_FLD:
683 tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ);
684 break;
685 default:
686 gen_exception_illegal(ctx);
687 break;
689 tcg_temp_free(t0);
692 static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1,
693 int rs2, target_long imm)
695 TCGv t0;
697 if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
698 gen_exception_illegal(ctx);
699 return;
702 t0 = tcg_temp_new();
703 gen_get_gpr(t0, rs1);
704 tcg_gen_addi_tl(t0, t0, imm);
706 switch (opc) {
707 case OPC_RISC_FSW:
708 tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL);
709 break;
710 case OPC_RISC_FSD:
711 tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ);
712 break;
713 default:
714 gen_exception_illegal(ctx);
715 break;
718 tcg_temp_free(t0);
721 static void gen_atomic(DisasContext *ctx, uint32_t opc,
722 int rd, int rs1, int rs2)
724 TCGv src1, src2, dat;
725 TCGLabel *l1, *l2;
726 TCGMemOp mop;
727 TCGCond cond;
728 bool aq, rl;
730 /* Extract the size of the atomic operation. */
731 switch (extract32(opc, 12, 3)) {
732 case 2: /* 32-bit */
733 mop = MO_ALIGN | MO_TESL;
734 break;
735 #if defined(TARGET_RISCV64)
736 case 3: /* 64-bit */
737 mop = MO_ALIGN | MO_TEQ;
738 break;
739 #endif
740 default:
741 gen_exception_illegal(ctx);
742 return;
744 rl = extract32(opc, 25, 1);
745 aq = extract32(opc, 26, 1);
747 src1 = tcg_temp_new();
748 src2 = tcg_temp_new();
750 switch (MASK_OP_ATOMIC_NO_AQ_RL_SZ(opc)) {
751 case OPC_RISC_LR:
752 /* Put addr in load_res, data in load_val. */
753 gen_get_gpr(src1, rs1);
754 if (rl) {
755 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
757 tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop);
758 if (aq) {
759 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
761 tcg_gen_mov_tl(load_res, src1);
762 gen_set_gpr(rd, load_val);
763 break;
765 case OPC_RISC_SC:
766 l1 = gen_new_label();
767 l2 = gen_new_label();
768 dat = tcg_temp_new();
770 gen_get_gpr(src1, rs1);
771 tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
773 gen_get_gpr(src2, rs2);
774 /* Note that the TCG atomic primitives are SC,
775 so we can ignore AQ/RL along this path. */
776 tcg_gen_atomic_cmpxchg_tl(src1, load_res, load_val, src2,
777 ctx->mem_idx, mop);
778 tcg_gen_setcond_tl(TCG_COND_NE, dat, src1, load_val);
779 gen_set_gpr(rd, dat);
780 tcg_gen_br(l2);
782 gen_set_label(l1);
783 /* Address comparion failure. However, we still need to
784 provide the memory barrier implied by AQ/RL. */
785 tcg_gen_mb(TCG_MO_ALL + aq * TCG_BAR_LDAQ + rl * TCG_BAR_STRL);
786 tcg_gen_movi_tl(dat, 1);
787 gen_set_gpr(rd, dat);
789 gen_set_label(l2);
790 tcg_temp_free(dat);
791 break;
793 case OPC_RISC_AMOSWAP:
794 /* Note that the TCG atomic primitives are SC,
795 so we can ignore AQ/RL along this path. */
796 gen_get_gpr(src1, rs1);
797 gen_get_gpr(src2, rs2);
798 tcg_gen_atomic_xchg_tl(src2, src1, src2, ctx->mem_idx, mop);
799 gen_set_gpr(rd, src2);
800 break;
801 case OPC_RISC_AMOADD:
802 gen_get_gpr(src1, rs1);
803 gen_get_gpr(src2, rs2);
804 tcg_gen_atomic_fetch_add_tl(src2, src1, src2, ctx->mem_idx, mop);
805 gen_set_gpr(rd, src2);
806 break;
807 case OPC_RISC_AMOXOR:
808 gen_get_gpr(src1, rs1);
809 gen_get_gpr(src2, rs2);
810 tcg_gen_atomic_fetch_xor_tl(src2, src1, src2, ctx->mem_idx, mop);
811 gen_set_gpr(rd, src2);
812 break;
813 case OPC_RISC_AMOAND:
814 gen_get_gpr(src1, rs1);
815 gen_get_gpr(src2, rs2);
816 tcg_gen_atomic_fetch_and_tl(src2, src1, src2, ctx->mem_idx, mop);
817 gen_set_gpr(rd, src2);
818 break;
819 case OPC_RISC_AMOOR:
820 gen_get_gpr(src1, rs1);
821 gen_get_gpr(src2, rs2);
822 tcg_gen_atomic_fetch_or_tl(src2, src1, src2, ctx->mem_idx, mop);
823 gen_set_gpr(rd, src2);
824 break;
826 case OPC_RISC_AMOMIN:
827 cond = TCG_COND_LT;
828 goto do_minmax;
829 case OPC_RISC_AMOMAX:
830 cond = TCG_COND_GT;
831 goto do_minmax;
832 case OPC_RISC_AMOMINU:
833 cond = TCG_COND_LTU;
834 goto do_minmax;
835 case OPC_RISC_AMOMAXU:
836 cond = TCG_COND_GTU;
837 goto do_minmax;
838 do_minmax:
839 /* Handle the RL barrier. The AQ barrier is handled along the
840 parallel path by the SC atomic cmpxchg. On the serial path,
841 of course, barriers do not matter. */
842 if (rl) {
843 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
845 if (tb_cflags(ctx->tb) & CF_PARALLEL) {
846 l1 = gen_new_label();
847 gen_set_label(l1);
848 } else {
849 l1 = NULL;
852 gen_get_gpr(src1, rs1);
853 gen_get_gpr(src2, rs2);
854 if ((mop & MO_SSIZE) == MO_SL) {
855 /* Sign-extend the register comparison input. */
856 tcg_gen_ext32s_tl(src2, src2);
858 dat = tcg_temp_local_new();
859 tcg_gen_qemu_ld_tl(dat, src1, ctx->mem_idx, mop);
860 tcg_gen_movcond_tl(cond, src2, dat, src2, dat, src2);
862 if (tb_cflags(ctx->tb) & CF_PARALLEL) {
863 /* Parallel context. Make this operation atomic by verifying
864 that the memory didn't change while we computed the result. */
865 tcg_gen_atomic_cmpxchg_tl(src2, src1, dat, src2, ctx->mem_idx, mop);
867 /* If the cmpxchg failed, retry. */
868 /* ??? There is an assumption here that this will eventually
869 succeed, such that we don't live-lock. This is not unlike
870 a similar loop that the compiler would generate for e.g.
871 __atomic_fetch_and_xor, so don't worry about it. */
872 tcg_gen_brcond_tl(TCG_COND_NE, dat, src2, l1);
873 } else {
874 /* Serial context. Directly store the result. */
875 tcg_gen_qemu_st_tl(src2, src1, ctx->mem_idx, mop);
877 gen_set_gpr(rd, dat);
878 tcg_temp_free(dat);
879 break;
881 default:
882 gen_exception_illegal(ctx);
883 break;
886 tcg_temp_free(src1);
887 tcg_temp_free(src2);
890 static void gen_set_rm(DisasContext *ctx, int rm)
892 TCGv_i32 t0;
894 if (ctx->frm == rm) {
895 return;
897 ctx->frm = rm;
898 t0 = tcg_const_i32(rm);
899 gen_helper_set_rounding_mode(cpu_env, t0);
900 tcg_temp_free_i32(t0);
903 static void gen_fp_fmadd(DisasContext *ctx, uint32_t opc, int rd,
904 int rs1, int rs2, int rs3, int rm)
906 switch (opc) {
907 case OPC_RISC_FMADD_S:
908 gen_set_rm(ctx, rm);
909 gen_helper_fmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
910 cpu_fpr[rs2], cpu_fpr[rs3]);
911 break;
912 case OPC_RISC_FMADD_D:
913 gen_set_rm(ctx, rm);
914 gen_helper_fmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
915 cpu_fpr[rs2], cpu_fpr[rs3]);
916 break;
917 default:
918 gen_exception_illegal(ctx);
919 break;
923 static void gen_fp_fmsub(DisasContext *ctx, uint32_t opc, int rd,
924 int rs1, int rs2, int rs3, int rm)
926 switch (opc) {
927 case OPC_RISC_FMSUB_S:
928 gen_set_rm(ctx, rm);
929 gen_helper_fmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
930 cpu_fpr[rs2], cpu_fpr[rs3]);
931 break;
932 case OPC_RISC_FMSUB_D:
933 gen_set_rm(ctx, rm);
934 gen_helper_fmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
935 cpu_fpr[rs2], cpu_fpr[rs3]);
936 break;
937 default:
938 gen_exception_illegal(ctx);
939 break;
943 static void gen_fp_fnmsub(DisasContext *ctx, uint32_t opc, int rd,
944 int rs1, int rs2, int rs3, int rm)
946 switch (opc) {
947 case OPC_RISC_FNMSUB_S:
948 gen_set_rm(ctx, rm);
949 gen_helper_fnmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
950 cpu_fpr[rs2], cpu_fpr[rs3]);
951 break;
952 case OPC_RISC_FNMSUB_D:
953 gen_set_rm(ctx, rm);
954 gen_helper_fnmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
955 cpu_fpr[rs2], cpu_fpr[rs3]);
956 break;
957 default:
958 gen_exception_illegal(ctx);
959 break;
963 static void gen_fp_fnmadd(DisasContext *ctx, uint32_t opc, int rd,
964 int rs1, int rs2, int rs3, int rm)
966 switch (opc) {
967 case OPC_RISC_FNMADD_S:
968 gen_set_rm(ctx, rm);
969 gen_helper_fnmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
970 cpu_fpr[rs2], cpu_fpr[rs3]);
971 break;
972 case OPC_RISC_FNMADD_D:
973 gen_set_rm(ctx, rm);
974 gen_helper_fnmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
975 cpu_fpr[rs2], cpu_fpr[rs3]);
976 break;
977 default:
978 gen_exception_illegal(ctx);
979 break;
983 static void gen_fp_arith(DisasContext *ctx, uint32_t opc, int rd,
984 int rs1, int rs2, int rm)
986 TCGv t0 = NULL;
988 if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
989 goto do_illegal;
992 switch (opc) {
993 case OPC_RISC_FADD_S:
994 gen_set_rm(ctx, rm);
995 gen_helper_fadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
996 break;
997 case OPC_RISC_FSUB_S:
998 gen_set_rm(ctx, rm);
999 gen_helper_fsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1000 break;
1001 case OPC_RISC_FMUL_S:
1002 gen_set_rm(ctx, rm);
1003 gen_helper_fmul_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1004 break;
1005 case OPC_RISC_FDIV_S:
1006 gen_set_rm(ctx, rm);
1007 gen_helper_fdiv_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1008 break;
1009 case OPC_RISC_FSQRT_S:
1010 gen_set_rm(ctx, rm);
1011 gen_helper_fsqrt_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1012 break;
1013 case OPC_RISC_FSGNJ_S:
1014 gen_fsgnj(ctx, rd, rs1, rs2, rm, INT32_MIN);
1015 break;
1017 case OPC_RISC_FMIN_S:
1018 /* also handles: OPC_RISC_FMAX_S */
1019 switch (rm) {
1020 case 0x0:
1021 gen_helper_fmin_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1022 break;
1023 case 0x1:
1024 gen_helper_fmax_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1025 break;
1026 default:
1027 goto do_illegal;
1029 break;
1031 case OPC_RISC_FEQ_S:
1032 /* also handles: OPC_RISC_FLT_S, OPC_RISC_FLE_S */
1033 t0 = tcg_temp_new();
1034 switch (rm) {
1035 case 0x0:
1036 gen_helper_fle_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1037 break;
1038 case 0x1:
1039 gen_helper_flt_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1040 break;
1041 case 0x2:
1042 gen_helper_feq_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1043 break;
1044 default:
1045 goto do_illegal;
1047 gen_set_gpr(rd, t0);
1048 tcg_temp_free(t0);
1049 break;
1051 case OPC_RISC_FCVT_W_S:
1052 /* also OPC_RISC_FCVT_WU_S, OPC_RISC_FCVT_L_S, OPC_RISC_FCVT_LU_S */
1053 t0 = tcg_temp_new();
1054 switch (rs2) {
1055 case 0: /* FCVT_W_S */
1056 gen_set_rm(ctx, rm);
1057 gen_helper_fcvt_w_s(t0, cpu_env, cpu_fpr[rs1]);
1058 break;
1059 case 1: /* FCVT_WU_S */
1060 gen_set_rm(ctx, rm);
1061 gen_helper_fcvt_wu_s(t0, cpu_env, cpu_fpr[rs1]);
1062 break;
1063 #if defined(TARGET_RISCV64)
1064 case 2: /* FCVT_L_S */
1065 gen_set_rm(ctx, rm);
1066 gen_helper_fcvt_l_s(t0, cpu_env, cpu_fpr[rs1]);
1067 break;
1068 case 3: /* FCVT_LU_S */
1069 gen_set_rm(ctx, rm);
1070 gen_helper_fcvt_lu_s(t0, cpu_env, cpu_fpr[rs1]);
1071 break;
1072 #endif
1073 default:
1074 goto do_illegal;
1076 gen_set_gpr(rd, t0);
1077 tcg_temp_free(t0);
1078 break;
1080 case OPC_RISC_FCVT_S_W:
1081 /* also OPC_RISC_FCVT_S_WU, OPC_RISC_FCVT_S_L, OPC_RISC_FCVT_S_LU */
1082 t0 = tcg_temp_new();
1083 gen_get_gpr(t0, rs1);
1084 switch (rs2) {
1085 case 0: /* FCVT_S_W */
1086 gen_set_rm(ctx, rm);
1087 gen_helper_fcvt_s_w(cpu_fpr[rd], cpu_env, t0);
1088 break;
1089 case 1: /* FCVT_S_WU */
1090 gen_set_rm(ctx, rm);
1091 gen_helper_fcvt_s_wu(cpu_fpr[rd], cpu_env, t0);
1092 break;
1093 #if defined(TARGET_RISCV64)
1094 case 2: /* FCVT_S_L */
1095 gen_set_rm(ctx, rm);
1096 gen_helper_fcvt_s_l(cpu_fpr[rd], cpu_env, t0);
1097 break;
1098 case 3: /* FCVT_S_LU */
1099 gen_set_rm(ctx, rm);
1100 gen_helper_fcvt_s_lu(cpu_fpr[rd], cpu_env, t0);
1101 break;
1102 #endif
1103 default:
1104 goto do_illegal;
1106 tcg_temp_free(t0);
1107 break;
1109 case OPC_RISC_FMV_X_S:
1110 /* also OPC_RISC_FCLASS_S */
1111 t0 = tcg_temp_new();
1112 switch (rm) {
1113 case 0: /* FMV */
1114 #if defined(TARGET_RISCV64)
1115 tcg_gen_ext32s_tl(t0, cpu_fpr[rs1]);
1116 #else
1117 tcg_gen_extrl_i64_i32(t0, cpu_fpr[rs1]);
1118 #endif
1119 break;
1120 case 1:
1121 gen_helper_fclass_s(t0, cpu_fpr[rs1]);
1122 break;
1123 default:
1124 goto do_illegal;
1126 gen_set_gpr(rd, t0);
1127 tcg_temp_free(t0);
1128 break;
1130 case OPC_RISC_FMV_S_X:
1131 t0 = tcg_temp_new();
1132 gen_get_gpr(t0, rs1);
1133 #if defined(TARGET_RISCV64)
1134 tcg_gen_mov_i64(cpu_fpr[rd], t0);
1135 #else
1136 tcg_gen_extu_i32_i64(cpu_fpr[rd], t0);
1137 #endif
1138 tcg_temp_free(t0);
1139 break;
1141 /* double */
1142 case OPC_RISC_FADD_D:
1143 gen_set_rm(ctx, rm);
1144 gen_helper_fadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1145 break;
1146 case OPC_RISC_FSUB_D:
1147 gen_set_rm(ctx, rm);
1148 gen_helper_fsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1149 break;
1150 case OPC_RISC_FMUL_D:
1151 gen_set_rm(ctx, rm);
1152 gen_helper_fmul_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1153 break;
1154 case OPC_RISC_FDIV_D:
1155 gen_set_rm(ctx, rm);
1156 gen_helper_fdiv_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1157 break;
1158 case OPC_RISC_FSQRT_D:
1159 gen_set_rm(ctx, rm);
1160 gen_helper_fsqrt_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1161 break;
1162 case OPC_RISC_FSGNJ_D:
1163 gen_fsgnj(ctx, rd, rs1, rs2, rm, INT64_MIN);
1164 break;
1166 case OPC_RISC_FMIN_D:
1167 /* also OPC_RISC_FMAX_D */
1168 switch (rm) {
1169 case 0:
1170 gen_helper_fmin_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1171 break;
1172 case 1:
1173 gen_helper_fmax_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1174 break;
1175 default:
1176 goto do_illegal;
1178 break;
1180 case OPC_RISC_FCVT_S_D:
1181 switch (rs2) {
1182 case 1:
1183 gen_set_rm(ctx, rm);
1184 gen_helper_fcvt_s_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1185 break;
1186 default:
1187 goto do_illegal;
1189 break;
1191 case OPC_RISC_FCVT_D_S:
1192 switch (rs2) {
1193 case 0:
1194 gen_set_rm(ctx, rm);
1195 gen_helper_fcvt_d_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1196 break;
1197 default:
1198 goto do_illegal;
1200 break;
1202 case OPC_RISC_FEQ_D:
1203 /* also OPC_RISC_FLT_D, OPC_RISC_FLE_D */
1204 t0 = tcg_temp_new();
1205 switch (rm) {
1206 case 0:
1207 gen_helper_fle_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1208 break;
1209 case 1:
1210 gen_helper_flt_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1211 break;
1212 case 2:
1213 gen_helper_feq_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1214 break;
1215 default:
1216 goto do_illegal;
1218 gen_set_gpr(rd, t0);
1219 tcg_temp_free(t0);
1220 break;
1222 case OPC_RISC_FCVT_W_D:
1223 /* also OPC_RISC_FCVT_WU_D, OPC_RISC_FCVT_L_D, OPC_RISC_FCVT_LU_D */
1224 t0 = tcg_temp_new();
1225 switch (rs2) {
1226 case 0:
1227 gen_set_rm(ctx, rm);
1228 gen_helper_fcvt_w_d(t0, cpu_env, cpu_fpr[rs1]);
1229 break;
1230 case 1:
1231 gen_set_rm(ctx, rm);
1232 gen_helper_fcvt_wu_d(t0, cpu_env, cpu_fpr[rs1]);
1233 break;
1234 #if defined(TARGET_RISCV64)
1235 case 2:
1236 gen_set_rm(ctx, rm);
1237 gen_helper_fcvt_l_d(t0, cpu_env, cpu_fpr[rs1]);
1238 break;
1239 case 3:
1240 gen_set_rm(ctx, rm);
1241 gen_helper_fcvt_lu_d(t0, cpu_env, cpu_fpr[rs1]);
1242 break;
1243 #endif
1244 default:
1245 goto do_illegal;
1247 gen_set_gpr(rd, t0);
1248 tcg_temp_free(t0);
1249 break;
1251 case OPC_RISC_FCVT_D_W:
1252 /* also OPC_RISC_FCVT_D_WU, OPC_RISC_FCVT_D_L, OPC_RISC_FCVT_D_LU */
1253 t0 = tcg_temp_new();
1254 gen_get_gpr(t0, rs1);
1255 switch (rs2) {
1256 case 0:
1257 gen_set_rm(ctx, rm);
1258 gen_helper_fcvt_d_w(cpu_fpr[rd], cpu_env, t0);
1259 break;
1260 case 1:
1261 gen_set_rm(ctx, rm);
1262 gen_helper_fcvt_d_wu(cpu_fpr[rd], cpu_env, t0);
1263 break;
1264 #if defined(TARGET_RISCV64)
1265 case 2:
1266 gen_set_rm(ctx, rm);
1267 gen_helper_fcvt_d_l(cpu_fpr[rd], cpu_env, t0);
1268 break;
1269 case 3:
1270 gen_set_rm(ctx, rm);
1271 gen_helper_fcvt_d_lu(cpu_fpr[rd], cpu_env, t0);
1272 break;
1273 #endif
1274 default:
1275 goto do_illegal;
1277 tcg_temp_free(t0);
1278 break;
1280 #if defined(TARGET_RISCV64)
1281 case OPC_RISC_FMV_X_D:
1282 /* also OPC_RISC_FCLASS_D */
1283 switch (rm) {
1284 case 0: /* FMV */
1285 gen_set_gpr(rd, cpu_fpr[rs1]);
1286 break;
1287 case 1:
1288 t0 = tcg_temp_new();
1289 gen_helper_fclass_d(t0, cpu_fpr[rs1]);
1290 gen_set_gpr(rd, t0);
1291 tcg_temp_free(t0);
1292 break;
1293 default:
1294 goto do_illegal;
1296 break;
1298 case OPC_RISC_FMV_D_X:
1299 t0 = tcg_temp_new();
1300 gen_get_gpr(t0, rs1);
1301 tcg_gen_mov_tl(cpu_fpr[rd], t0);
1302 tcg_temp_free(t0);
1303 break;
1304 #endif
1306 default:
1307 do_illegal:
1308 if (t0) {
1309 tcg_temp_free(t0);
1311 gen_exception_illegal(ctx);
1312 break;
1316 static void gen_system(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
1317 int rd, int rs1, int csr)
1319 TCGv source1, csr_store, dest, rs1_pass, imm_rs1;
1320 source1 = tcg_temp_new();
1321 csr_store = tcg_temp_new();
1322 dest = tcg_temp_new();
1323 rs1_pass = tcg_temp_new();
1324 imm_rs1 = tcg_temp_new();
1325 gen_get_gpr(source1, rs1);
1326 tcg_gen_movi_tl(cpu_pc, ctx->pc);
1327 tcg_gen_movi_tl(rs1_pass, rs1);
1328 tcg_gen_movi_tl(csr_store, csr); /* copy into temp reg to feed to helper */
1330 #ifndef CONFIG_USER_ONLY
1331 /* Extract funct7 value and check whether it matches SFENCE.VMA */
1332 if ((opc == OPC_RISC_ECALL) && ((csr >> 5) == 9)) {
1333 /* sfence.vma */
1334 /* TODO: handle ASID specific fences */
1335 gen_helper_tlb_flush(cpu_env);
1336 return;
1338 #endif
1340 switch (opc) {
1341 case OPC_RISC_ECALL:
1342 switch (csr) {
1343 case 0x0: /* ECALL */
1344 /* always generates U-level ECALL, fixed in do_interrupt handler */
1345 generate_exception(ctx, RISCV_EXCP_U_ECALL);
1346 tcg_gen_exit_tb(0); /* no chaining */
1347 ctx->bstate = BS_BRANCH;
1348 break;
1349 case 0x1: /* EBREAK */
1350 generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
1351 tcg_gen_exit_tb(0); /* no chaining */
1352 ctx->bstate = BS_BRANCH;
1353 break;
1354 #ifndef CONFIG_USER_ONLY
1355 case 0x002: /* URET */
1356 gen_exception_illegal(ctx);
1357 break;
1358 case 0x102: /* SRET */
1359 if (riscv_has_ext(env, RVS)) {
1360 gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
1361 tcg_gen_exit_tb(0); /* no chaining */
1362 ctx->bstate = BS_BRANCH;
1363 } else {
1364 gen_exception_illegal(ctx);
1366 break;
1367 case 0x202: /* HRET */
1368 gen_exception_illegal(ctx);
1369 break;
1370 case 0x302: /* MRET */
1371 gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
1372 tcg_gen_exit_tb(0); /* no chaining */
1373 ctx->bstate = BS_BRANCH;
1374 break;
1375 case 0x7b2: /* DRET */
1376 gen_exception_illegal(ctx);
1377 break;
1378 case 0x105: /* WFI */
1379 tcg_gen_movi_tl(cpu_pc, ctx->next_pc);
1380 gen_helper_wfi(cpu_env);
1381 break;
1382 case 0x104: /* SFENCE.VM */
1383 gen_helper_tlb_flush(cpu_env);
1384 break;
1385 #endif
1386 default:
1387 gen_exception_illegal(ctx);
1388 break;
1390 break;
1391 default:
1392 tcg_gen_movi_tl(imm_rs1, rs1);
1393 gen_io_start();
1394 switch (opc) {
1395 case OPC_RISC_CSRRW:
1396 gen_helper_csrrw(dest, cpu_env, source1, csr_store);
1397 break;
1398 case OPC_RISC_CSRRS:
1399 gen_helper_csrrs(dest, cpu_env, source1, csr_store, rs1_pass);
1400 break;
1401 case OPC_RISC_CSRRC:
1402 gen_helper_csrrc(dest, cpu_env, source1, csr_store, rs1_pass);
1403 break;
1404 case OPC_RISC_CSRRWI:
1405 gen_helper_csrrw(dest, cpu_env, imm_rs1, csr_store);
1406 break;
1407 case OPC_RISC_CSRRSI:
1408 gen_helper_csrrs(dest, cpu_env, imm_rs1, csr_store, rs1_pass);
1409 break;
1410 case OPC_RISC_CSRRCI:
1411 gen_helper_csrrc(dest, cpu_env, imm_rs1, csr_store, rs1_pass);
1412 break;
1413 default:
1414 gen_exception_illegal(ctx);
1415 return;
1417 gen_io_end();
1418 gen_set_gpr(rd, dest);
1419 /* end tb since we may be changing priv modes, to get mmu_index right */
1420 tcg_gen_movi_tl(cpu_pc, ctx->next_pc);
1421 tcg_gen_exit_tb(0); /* no chaining */
1422 ctx->bstate = BS_BRANCH;
1423 break;
1425 tcg_temp_free(source1);
1426 tcg_temp_free(csr_store);
1427 tcg_temp_free(dest);
1428 tcg_temp_free(rs1_pass);
1429 tcg_temp_free(imm_rs1);
1432 static void decode_RV32_64C0(DisasContext *ctx)
1434 uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1435 uint8_t rd_rs2 = GET_C_RS2S(ctx->opcode);
1436 uint8_t rs1s = GET_C_RS1S(ctx->opcode);
1438 switch (funct3) {
1439 case 0:
1440 /* illegal */
1441 if (ctx->opcode == 0) {
1442 gen_exception_illegal(ctx);
1443 } else {
1444 /* C.ADDI4SPN -> addi rd', x2, zimm[9:2]*/
1445 gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs2, 2,
1446 GET_C_ADDI4SPN_IMM(ctx->opcode));
1448 break;
1449 case 1:
1450 /* C.FLD -> fld rd', offset[7:3](rs1')*/
1451 gen_fp_load(ctx, OPC_RISC_FLD, rd_rs2, rs1s,
1452 GET_C_LD_IMM(ctx->opcode));
1453 /* C.LQ(RV128) */
1454 break;
1455 case 2:
1456 /* C.LW -> lw rd', offset[6:2](rs1') */
1457 gen_load(ctx, OPC_RISC_LW, rd_rs2, rs1s,
1458 GET_C_LW_IMM(ctx->opcode));
1459 break;
1460 case 3:
1461 #if defined(TARGET_RISCV64)
1462 /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
1463 gen_load(ctx, OPC_RISC_LD, rd_rs2, rs1s,
1464 GET_C_LD_IMM(ctx->opcode));
1465 #else
1466 /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
1467 gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s,
1468 GET_C_LW_IMM(ctx->opcode));
1469 #endif
1470 break;
1471 case 4:
1472 /* reserved */
1473 gen_exception_illegal(ctx);
1474 break;
1475 case 5:
1476 /* C.FSD(RV32/64) -> fsd rs2', offset[7:3](rs1') */
1477 gen_fp_store(ctx, OPC_RISC_FSD, rs1s, rd_rs2,
1478 GET_C_LD_IMM(ctx->opcode));
1479 /* C.SQ (RV128) */
1480 break;
1481 case 6:
1482 /* C.SW -> sw rs2', offset[6:2](rs1')*/
1483 gen_store(ctx, OPC_RISC_SW, rs1s, rd_rs2,
1484 GET_C_LW_IMM(ctx->opcode));
1485 break;
1486 case 7:
1487 #if defined(TARGET_RISCV64)
1488 /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
1489 gen_store(ctx, OPC_RISC_SD, rs1s, rd_rs2,
1490 GET_C_LD_IMM(ctx->opcode));
1491 #else
1492 /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
1493 gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2,
1494 GET_C_LW_IMM(ctx->opcode));
1495 #endif
1496 break;
1500 static void decode_RV32_64C1(CPURISCVState *env, DisasContext *ctx)
1502 uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1503 uint8_t rd_rs1 = GET_C_RS1(ctx->opcode);
1504 uint8_t rs1s, rs2s;
1505 uint8_t funct2;
1507 switch (funct3) {
1508 case 0:
1509 /* C.ADDI -> addi rd, rd, nzimm[5:0] */
1510 gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, rd_rs1,
1511 GET_C_IMM(ctx->opcode));
1512 break;
1513 case 1:
1514 #if defined(TARGET_RISCV64)
1515 /* C.ADDIW (RV64/128) -> addiw rd, rd, imm[5:0]*/
1516 gen_arith_imm(ctx, OPC_RISC_ADDIW, rd_rs1, rd_rs1,
1517 GET_C_IMM(ctx->opcode));
1518 #else
1519 /* C.JAL(RV32) -> jal x1, offset[11:1] */
1520 gen_jal(env, ctx, 1, GET_C_J_IMM(ctx->opcode));
1521 #endif
1522 break;
1523 case 2:
1524 /* C.LI -> addi rd, x0, imm[5:0]*/
1525 gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, 0, GET_C_IMM(ctx->opcode));
1526 break;
1527 case 3:
1528 if (rd_rs1 == 2) {
1529 /* C.ADDI16SP -> addi x2, x2, nzimm[9:4]*/
1530 gen_arith_imm(ctx, OPC_RISC_ADDI, 2, 2,
1531 GET_C_ADDI16SP_IMM(ctx->opcode));
1532 } else if (rd_rs1 != 0) {
1533 /* C.LUI (rs1/rd =/= {0,2}) -> lui rd, nzimm[17:12]*/
1534 tcg_gen_movi_tl(cpu_gpr[rd_rs1],
1535 GET_C_IMM(ctx->opcode) << 12);
1537 break;
1538 case 4:
1539 funct2 = extract32(ctx->opcode, 10, 2);
1540 rs1s = GET_C_RS1S(ctx->opcode);
1541 switch (funct2) {
1542 case 0: /* C.SRLI(RV32) -> srli rd', rd', shamt[5:0] */
1543 gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s,
1544 GET_C_ZIMM(ctx->opcode));
1545 /* C.SRLI64(RV128) */
1546 break;
1547 case 1:
1548 /* C.SRAI -> srai rd', rd', shamt[5:0]*/
1549 gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s,
1550 GET_C_ZIMM(ctx->opcode) | 0x400);
1551 /* C.SRAI64(RV128) */
1552 break;
1553 case 2:
1554 /* C.ANDI -> andi rd', rd', imm[5:0]*/
1555 gen_arith_imm(ctx, OPC_RISC_ANDI, rs1s, rs1s,
1556 GET_C_IMM(ctx->opcode));
1557 break;
1558 case 3:
1559 funct2 = extract32(ctx->opcode, 5, 2);
1560 rs2s = GET_C_RS2S(ctx->opcode);
1561 switch (funct2) {
1562 case 0:
1563 /* C.SUB -> sub rd', rd', rs2' */
1564 if (extract32(ctx->opcode, 12, 1) == 0) {
1565 gen_arith(ctx, OPC_RISC_SUB, rs1s, rs1s, rs2s);
1567 #if defined(TARGET_RISCV64)
1568 else {
1569 gen_arith(ctx, OPC_RISC_SUBW, rs1s, rs1s, rs2s);
1571 #endif
1572 break;
1573 case 1:
1574 /* C.XOR -> xor rs1', rs1', rs2' */
1575 if (extract32(ctx->opcode, 12, 1) == 0) {
1576 gen_arith(ctx, OPC_RISC_XOR, rs1s, rs1s, rs2s);
1578 #if defined(TARGET_RISCV64)
1579 else {
1580 /* C.ADDW (RV64/128) */
1581 gen_arith(ctx, OPC_RISC_ADDW, rs1s, rs1s, rs2s);
1583 #endif
1584 break;
1585 case 2:
1586 /* C.OR -> or rs1', rs1', rs2' */
1587 gen_arith(ctx, OPC_RISC_OR, rs1s, rs1s, rs2s);
1588 break;
1589 case 3:
1590 /* C.AND -> and rs1', rs1', rs2' */
1591 gen_arith(ctx, OPC_RISC_AND, rs1s, rs1s, rs2s);
1592 break;
1594 break;
1596 break;
1597 case 5:
1598 /* C.J -> jal x0, offset[11:1]*/
1599 gen_jal(env, ctx, 0, GET_C_J_IMM(ctx->opcode));
1600 break;
1601 case 6:
1602 /* C.BEQZ -> beq rs1', x0, offset[8:1]*/
1603 rs1s = GET_C_RS1S(ctx->opcode);
1604 gen_branch(env, ctx, OPC_RISC_BEQ, rs1s, 0, GET_C_B_IMM(ctx->opcode));
1605 break;
1606 case 7:
1607 /* C.BNEZ -> bne rs1', x0, offset[8:1]*/
1608 rs1s = GET_C_RS1S(ctx->opcode);
1609 gen_branch(env, ctx, OPC_RISC_BNE, rs1s, 0, GET_C_B_IMM(ctx->opcode));
1610 break;
1614 static void decode_RV32_64C2(CPURISCVState *env, DisasContext *ctx)
1616 uint8_t rd, rs2;
1617 uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1620 rd = GET_RD(ctx->opcode);
1622 switch (funct3) {
1623 case 0: /* C.SLLI -> slli rd, rd, shamt[5:0]
1624 C.SLLI64 -> */
1625 gen_arith_imm(ctx, OPC_RISC_SLLI, rd, rd, GET_C_ZIMM(ctx->opcode));
1626 break;
1627 case 1: /* C.FLDSP(RV32/64DC) -> fld rd, offset[8:3](x2) */
1628 gen_fp_load(ctx, OPC_RISC_FLD, rd, 2, GET_C_LDSP_IMM(ctx->opcode));
1629 break;
1630 case 2: /* C.LWSP -> lw rd, offset[7:2](x2) */
1631 gen_load(ctx, OPC_RISC_LW, rd, 2, GET_C_LWSP_IMM(ctx->opcode));
1632 break;
1633 case 3:
1634 #if defined(TARGET_RISCV64)
1635 /* C.LDSP(RVC64) -> ld rd, offset[8:3](x2) */
1636 gen_load(ctx, OPC_RISC_LD, rd, 2, GET_C_LDSP_IMM(ctx->opcode));
1637 #else
1638 /* C.FLWSP(RV32FC) -> flw rd, offset[7:2](x2) */
1639 gen_fp_load(ctx, OPC_RISC_FLW, rd, 2, GET_C_LWSP_IMM(ctx->opcode));
1640 #endif
1641 break;
1642 case 4:
1643 rs2 = GET_C_RS2(ctx->opcode);
1645 if (extract32(ctx->opcode, 12, 1) == 0) {
1646 if (rs2 == 0) {
1647 /* C.JR -> jalr x0, rs1, 0*/
1648 gen_jalr(env, ctx, OPC_RISC_JALR, 0, rd, 0);
1649 } else {
1650 /* C.MV -> add rd, x0, rs2 */
1651 gen_arith(ctx, OPC_RISC_ADD, rd, 0, rs2);
1653 } else {
1654 if (rd == 0) {
1655 /* C.EBREAK -> ebreak*/
1656 gen_system(env, ctx, OPC_RISC_ECALL, 0, 0, 0x1);
1657 } else {
1658 if (rs2 == 0) {
1659 /* C.JALR -> jalr x1, rs1, 0*/
1660 gen_jalr(env, ctx, OPC_RISC_JALR, 1, rd, 0);
1661 } else {
1662 /* C.ADD -> add rd, rd, rs2 */
1663 gen_arith(ctx, OPC_RISC_ADD, rd, rd, rs2);
1667 break;
1668 case 5:
1669 /* C.FSDSP -> fsd rs2, offset[8:3](x2)*/
1670 gen_fp_store(ctx, OPC_RISC_FSD, 2, GET_C_RS2(ctx->opcode),
1671 GET_C_SDSP_IMM(ctx->opcode));
1672 /* C.SQSP */
1673 break;
1674 case 6: /* C.SWSP -> sw rs2, offset[7:2](x2)*/
1675 gen_store(ctx, OPC_RISC_SW, 2, GET_C_RS2(ctx->opcode),
1676 GET_C_SWSP_IMM(ctx->opcode));
1677 break;
1678 case 7:
1679 #if defined(TARGET_RISCV64)
1680 /* C.SDSP(Rv64/128) -> sd rs2, offset[8:3](x2)*/
1681 gen_store(ctx, OPC_RISC_SD, 2, GET_C_RS2(ctx->opcode),
1682 GET_C_SDSP_IMM(ctx->opcode));
1683 #else
1684 /* C.FSWSP(RV32) -> fsw rs2, offset[7:2](x2) */
1685 gen_fp_store(ctx, OPC_RISC_FSW, 2, GET_C_RS2(ctx->opcode),
1686 GET_C_SWSP_IMM(ctx->opcode));
1687 #endif
1688 break;
1692 static void decode_RV32_64C(CPURISCVState *env, DisasContext *ctx)
1694 uint8_t op = extract32(ctx->opcode, 0, 2);
1696 switch (op) {
1697 case 0:
1698 decode_RV32_64C0(ctx);
1699 break;
1700 case 1:
1701 decode_RV32_64C1(env, ctx);
1702 break;
1703 case 2:
1704 decode_RV32_64C2(env, ctx);
1705 break;
1709 static void decode_RV32_64G(CPURISCVState *env, DisasContext *ctx)
1711 int rs1;
1712 int rs2;
1713 int rd;
1714 uint32_t op;
1715 target_long imm;
1717 /* We do not do misaligned address check here: the address should never be
1718 * misaligned at this point. Instructions that set PC must do the check,
1719 * since epc must be the address of the instruction that caused us to
1720 * perform the misaligned instruction fetch */
1722 op = MASK_OP_MAJOR(ctx->opcode);
1723 rs1 = GET_RS1(ctx->opcode);
1724 rs2 = GET_RS2(ctx->opcode);
1725 rd = GET_RD(ctx->opcode);
1726 imm = GET_IMM(ctx->opcode);
1728 switch (op) {
1729 case OPC_RISC_LUI:
1730 if (rd == 0) {
1731 break; /* NOP */
1733 tcg_gen_movi_tl(cpu_gpr[rd], sextract64(ctx->opcode, 12, 20) << 12);
1734 break;
1735 case OPC_RISC_AUIPC:
1736 if (rd == 0) {
1737 break; /* NOP */
1739 tcg_gen_movi_tl(cpu_gpr[rd], (sextract64(ctx->opcode, 12, 20) << 12) +
1740 ctx->pc);
1741 break;
1742 case OPC_RISC_JAL:
1743 imm = GET_JAL_IMM(ctx->opcode);
1744 gen_jal(env, ctx, rd, imm);
1745 break;
1746 case OPC_RISC_JALR:
1747 gen_jalr(env, ctx, MASK_OP_JALR(ctx->opcode), rd, rs1, imm);
1748 break;
1749 case OPC_RISC_BRANCH:
1750 gen_branch(env, ctx, MASK_OP_BRANCH(ctx->opcode), rs1, rs2,
1751 GET_B_IMM(ctx->opcode));
1752 break;
1753 case OPC_RISC_LOAD:
1754 gen_load(ctx, MASK_OP_LOAD(ctx->opcode), rd, rs1, imm);
1755 break;
1756 case OPC_RISC_STORE:
1757 gen_store(ctx, MASK_OP_STORE(ctx->opcode), rs1, rs2,
1758 GET_STORE_IMM(ctx->opcode));
1759 break;
1760 case OPC_RISC_ARITH_IMM:
1761 #if defined(TARGET_RISCV64)
1762 case OPC_RISC_ARITH_IMM_W:
1763 #endif
1764 if (rd == 0) {
1765 break; /* NOP */
1767 gen_arith_imm(ctx, MASK_OP_ARITH_IMM(ctx->opcode), rd, rs1, imm);
1768 break;
1769 case OPC_RISC_ARITH:
1770 #if defined(TARGET_RISCV64)
1771 case OPC_RISC_ARITH_W:
1772 #endif
1773 if (rd == 0) {
1774 break; /* NOP */
1776 gen_arith(ctx, MASK_OP_ARITH(ctx->opcode), rd, rs1, rs2);
1777 break;
1778 case OPC_RISC_FP_LOAD:
1779 gen_fp_load(ctx, MASK_OP_FP_LOAD(ctx->opcode), rd, rs1, imm);
1780 break;
1781 case OPC_RISC_FP_STORE:
1782 gen_fp_store(ctx, MASK_OP_FP_STORE(ctx->opcode), rs1, rs2,
1783 GET_STORE_IMM(ctx->opcode));
1784 break;
1785 case OPC_RISC_ATOMIC:
1786 gen_atomic(ctx, MASK_OP_ATOMIC(ctx->opcode), rd, rs1, rs2);
1787 break;
1788 case OPC_RISC_FMADD:
1789 gen_fp_fmadd(ctx, MASK_OP_FP_FMADD(ctx->opcode), rd, rs1, rs2,
1790 GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1791 break;
1792 case OPC_RISC_FMSUB:
1793 gen_fp_fmsub(ctx, MASK_OP_FP_FMSUB(ctx->opcode), rd, rs1, rs2,
1794 GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1795 break;
1796 case OPC_RISC_FNMSUB:
1797 gen_fp_fnmsub(ctx, MASK_OP_FP_FNMSUB(ctx->opcode), rd, rs1, rs2,
1798 GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1799 break;
1800 case OPC_RISC_FNMADD:
1801 gen_fp_fnmadd(ctx, MASK_OP_FP_FNMADD(ctx->opcode), rd, rs1, rs2,
1802 GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1803 break;
1804 case OPC_RISC_FP_ARITH:
1805 gen_fp_arith(ctx, MASK_OP_FP_ARITH(ctx->opcode), rd, rs1, rs2,
1806 GET_RM(ctx->opcode));
1807 break;
1808 case OPC_RISC_FENCE:
1809 #ifndef CONFIG_USER_ONLY
1810 if (ctx->opcode & 0x1000) {
1811 /* FENCE_I is a no-op in QEMU,
1812 * however we need to end the translation block */
1813 tcg_gen_movi_tl(cpu_pc, ctx->next_pc);
1814 tcg_gen_exit_tb(0);
1815 ctx->bstate = BS_BRANCH;
1816 } else {
1817 /* FENCE is a full memory barrier. */
1818 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1820 #endif
1821 break;
1822 case OPC_RISC_SYSTEM:
1823 gen_system(env, ctx, MASK_OP_SYSTEM(ctx->opcode), rd, rs1,
1824 (ctx->opcode & 0xFFF00000) >> 20);
1825 break;
1826 default:
1827 gen_exception_illegal(ctx);
1828 break;
1832 static void decode_opc(CPURISCVState *env, DisasContext *ctx)
1834 /* check for compressed insn */
1835 if (extract32(ctx->opcode, 0, 2) != 3) {
1836 if (!riscv_has_ext(env, RVC)) {
1837 gen_exception_illegal(ctx);
1838 } else {
1839 ctx->next_pc = ctx->pc + 2;
1840 decode_RV32_64C(env, ctx);
1842 } else {
1843 ctx->next_pc = ctx->pc + 4;
1844 decode_RV32_64G(env, ctx);
1848 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
1850 CPURISCVState *env = cs->env_ptr;
1851 DisasContext ctx;
1852 target_ulong pc_start;
1853 target_ulong next_page_start;
1854 int num_insns;
1855 int max_insns;
1856 pc_start = tb->pc;
1857 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1858 ctx.pc = pc_start;
1860 /* once we have GDB, the rest of the translate.c implementation should be
1861 ready for singlestep */
1862 ctx.singlestep_enabled = cs->singlestep_enabled;
1864 ctx.tb = tb;
1865 ctx.bstate = BS_NONE;
1866 ctx.flags = tb->flags;
1867 ctx.mem_idx = tb->flags & TB_FLAGS_MMU_MASK;
1868 ctx.frm = -1; /* unknown rounding mode */
1870 num_insns = 0;
1871 max_insns = tb->cflags & CF_COUNT_MASK;
1872 if (max_insns == 0) {
1873 max_insns = CF_COUNT_MASK;
1875 if (max_insns > TCG_MAX_INSNS) {
1876 max_insns = TCG_MAX_INSNS;
1878 gen_tb_start(tb);
1880 while (ctx.bstate == BS_NONE) {
1881 tcg_gen_insn_start(ctx.pc);
1882 num_insns++;
1884 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
1885 tcg_gen_movi_tl(cpu_pc, ctx.pc);
1886 ctx.bstate = BS_BRANCH;
1887 gen_exception_debug();
1888 /* The address covered by the breakpoint must be included in
1889 [tb->pc, tb->pc + tb->size) in order to for it to be
1890 properly cleared -- thus we increment the PC here so that
1891 the logic setting tb->size below does the right thing. */
1892 ctx.pc += 4;
1893 goto done_generating;
1896 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1897 gen_io_start();
1900 ctx.opcode = cpu_ldl_code(env, ctx.pc);
1901 decode_opc(env, &ctx);
1902 ctx.pc = ctx.next_pc;
1904 if (cs->singlestep_enabled) {
1905 break;
1907 if (ctx.pc >= next_page_start) {
1908 break;
1910 if (tcg_op_buf_full()) {
1911 break;
1913 if (num_insns >= max_insns) {
1914 break;
1916 if (singlestep) {
1917 break;
1921 if (tb->cflags & CF_LAST_IO) {
1922 gen_io_end();
1924 switch (ctx.bstate) {
1925 case BS_STOP:
1926 gen_goto_tb(&ctx, 0, ctx.pc);
1927 break;
1928 case BS_NONE: /* handle end of page - DO NOT CHAIN. See gen_goto_tb. */
1929 tcg_gen_movi_tl(cpu_pc, ctx.pc);
1930 if (cs->singlestep_enabled) {
1931 gen_exception_debug();
1932 } else {
1933 tcg_gen_exit_tb(0);
1935 break;
1936 case BS_BRANCH: /* ops using BS_BRANCH generate own exit seq */
1937 default:
1938 break;
1940 done_generating:
1941 gen_tb_end(tb, num_insns);
1942 tb->size = ctx.pc - pc_start;
1943 tb->icount = num_insns;
1945 #ifdef DEBUG_DISAS
1946 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1947 && qemu_log_in_addr_range(pc_start)) {
1948 qemu_log("IN: %s\n", lookup_symbol(pc_start));
1949 log_target_disas(cs, pc_start, ctx.pc - pc_start);
1950 qemu_log("\n");
1952 #endif
1955 void riscv_translate_init(void)
1957 int i;
1959 /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
1960 /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
1961 /* registers, unless you specifically block reads/writes to reg 0 */
1962 cpu_gpr[0] = NULL;
1964 for (i = 1; i < 32; i++) {
1965 cpu_gpr[i] = tcg_global_mem_new(cpu_env,
1966 offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
1969 for (i = 0; i < 32; i++) {
1970 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
1971 offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
1974 cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
1975 load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
1976 "load_res");
1977 load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
1978 "load_val");