uninorth: remove obsolete pci_pmac_u3_init() function
[qemu.git] / target / riscv / translate.c
blob808eab7f5080895e6e685dc1b65b0fb3b56f71e5
1 /*
2 * RISC-V emulation for qemu: main translation routines.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "tcg-op.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "exec/log.h"
31 #include "instmap.h"
33 /* global register indices */
34 static TCGv cpu_gpr[32], cpu_pc;
35 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
36 static TCGv load_res;
37 static TCGv load_val;
39 #include "exec/gen-icount.h"
41 typedef struct DisasContext {
42 struct TranslationBlock *tb;
43 target_ulong pc;
44 target_ulong next_pc;
45 uint32_t opcode;
46 uint32_t flags;
47 uint32_t mem_idx;
48 int singlestep_enabled;
49 int bstate;
50 /* Remember the rounding mode encoded in the previous fp instruction,
51 which we have already installed into env->fp_status. Or -1 for
52 no previous fp instruction. Note that we exit the TB when writing
53 to any system register, which includes CSR_FRM, so we do not have
54 to reset this known value. */
55 int frm;
56 } DisasContext;
58 enum {
59 BS_NONE = 0, /* When seen outside of translation while loop, indicates
60 need to exit tb due to end of page. */
61 BS_STOP = 1, /* Need to exit tb for syscall, sret, etc. */
62 BS_BRANCH = 2, /* Need to exit tb for branch, jal, etc. */
65 /* convert riscv funct3 to qemu memop for load/store */
66 static const int tcg_memop_lookup[8] = {
67 [0 ... 7] = -1,
68 [0] = MO_SB,
69 [1] = MO_TESW,
70 [2] = MO_TESL,
71 [4] = MO_UB,
72 [5] = MO_TEUW,
73 #ifdef TARGET_RISCV64
74 [3] = MO_TEQ,
75 [6] = MO_TEUL,
76 #endif
79 #ifdef TARGET_RISCV64
80 #define CASE_OP_32_64(X) case X: case glue(X, W)
81 #else
82 #define CASE_OP_32_64(X) case X
83 #endif
85 static void generate_exception(DisasContext *ctx, int excp)
87 tcg_gen_movi_tl(cpu_pc, ctx->pc);
88 TCGv_i32 helper_tmp = tcg_const_i32(excp);
89 gen_helper_raise_exception(cpu_env, helper_tmp);
90 tcg_temp_free_i32(helper_tmp);
91 ctx->bstate = BS_BRANCH;
94 static void generate_exception_mbadaddr(DisasContext *ctx, int excp)
96 tcg_gen_movi_tl(cpu_pc, ctx->pc);
97 tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
98 TCGv_i32 helper_tmp = tcg_const_i32(excp);
99 gen_helper_raise_exception(cpu_env, helper_tmp);
100 tcg_temp_free_i32(helper_tmp);
101 ctx->bstate = BS_BRANCH;
104 static void gen_exception_debug(void)
106 TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
107 gen_helper_raise_exception(cpu_env, helper_tmp);
108 tcg_temp_free_i32(helper_tmp);
111 static void gen_exception_illegal(DisasContext *ctx)
113 generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
116 static void gen_exception_inst_addr_mis(DisasContext *ctx)
118 generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS);
121 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
123 if (unlikely(ctx->singlestep_enabled)) {
124 return false;
127 #ifndef CONFIG_USER_ONLY
128 return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
129 #else
130 return true;
131 #endif
134 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
136 if (use_goto_tb(ctx, dest)) {
137 /* chaining is only allowed when the jump is to the same page */
138 tcg_gen_goto_tb(n);
139 tcg_gen_movi_tl(cpu_pc, dest);
140 tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
141 } else {
142 tcg_gen_movi_tl(cpu_pc, dest);
143 if (ctx->singlestep_enabled) {
144 gen_exception_debug();
145 } else {
146 tcg_gen_exit_tb(0);
151 /* Wrapper for getting reg values - need to check of reg is zero since
152 * cpu_gpr[0] is not actually allocated
154 static inline void gen_get_gpr(TCGv t, int reg_num)
156 if (reg_num == 0) {
157 tcg_gen_movi_tl(t, 0);
158 } else {
159 tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
163 /* Wrapper for setting reg values - need to check of reg is zero since
164 * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
165 * since we usually avoid calling the OP_TYPE_gen function if we see a write to
166 * $zero
168 static inline void gen_set_gpr(int reg_num_dst, TCGv t)
170 if (reg_num_dst != 0) {
171 tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
175 static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
177 TCGv rl = tcg_temp_new();
178 TCGv rh = tcg_temp_new();
180 tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
181 /* fix up for one negative */
182 tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
183 tcg_gen_and_tl(rl, rl, arg2);
184 tcg_gen_sub_tl(ret, rh, rl);
186 tcg_temp_free(rl);
187 tcg_temp_free(rh);
190 static void gen_fsgnj(DisasContext *ctx, uint32_t rd, uint32_t rs1,
191 uint32_t rs2, int rm, uint64_t min)
193 switch (rm) {
194 case 0: /* fsgnj */
195 if (rs1 == rs2) { /* FMOV */
196 tcg_gen_mov_i64(cpu_fpr[rd], cpu_fpr[rs1]);
197 } else {
198 tcg_gen_deposit_i64(cpu_fpr[rd], cpu_fpr[rs2], cpu_fpr[rs1],
199 0, min == INT32_MIN ? 31 : 63);
201 break;
202 case 1: /* fsgnjn */
203 if (rs1 == rs2) { /* FNEG */
204 tcg_gen_xori_i64(cpu_fpr[rd], cpu_fpr[rs1], min);
205 } else {
206 TCGv_i64 t0 = tcg_temp_new_i64();
207 tcg_gen_not_i64(t0, cpu_fpr[rs2]);
208 tcg_gen_deposit_i64(cpu_fpr[rd], t0, cpu_fpr[rs1],
209 0, min == INT32_MIN ? 31 : 63);
210 tcg_temp_free_i64(t0);
212 break;
213 case 2: /* fsgnjx */
214 if (rs1 == rs2) { /* FABS */
215 tcg_gen_andi_i64(cpu_fpr[rd], cpu_fpr[rs1], ~min);
216 } else {
217 TCGv_i64 t0 = tcg_temp_new_i64();
218 tcg_gen_andi_i64(t0, cpu_fpr[rs2], min);
219 tcg_gen_xor_i64(cpu_fpr[rd], cpu_fpr[rs1], t0);
220 tcg_temp_free_i64(t0);
222 break;
223 default:
224 gen_exception_illegal(ctx);
228 static void gen_arith(DisasContext *ctx, uint32_t opc, int rd, int rs1,
229 int rs2)
231 TCGv source1, source2, cond1, cond2, zeroreg, resultopt1;
232 source1 = tcg_temp_new();
233 source2 = tcg_temp_new();
234 gen_get_gpr(source1, rs1);
235 gen_get_gpr(source2, rs2);
237 switch (opc) {
238 CASE_OP_32_64(OPC_RISC_ADD):
239 tcg_gen_add_tl(source1, source1, source2);
240 break;
241 CASE_OP_32_64(OPC_RISC_SUB):
242 tcg_gen_sub_tl(source1, source1, source2);
243 break;
244 #if defined(TARGET_RISCV64)
245 case OPC_RISC_SLLW:
246 tcg_gen_andi_tl(source2, source2, 0x1F);
247 tcg_gen_shl_tl(source1, source1, source2);
248 break;
249 #endif
250 case OPC_RISC_SLL:
251 tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
252 tcg_gen_shl_tl(source1, source1, source2);
253 break;
254 case OPC_RISC_SLT:
255 tcg_gen_setcond_tl(TCG_COND_LT, source1, source1, source2);
256 break;
257 case OPC_RISC_SLTU:
258 tcg_gen_setcond_tl(TCG_COND_LTU, source1, source1, source2);
259 break;
260 case OPC_RISC_XOR:
261 tcg_gen_xor_tl(source1, source1, source2);
262 break;
263 #if defined(TARGET_RISCV64)
264 case OPC_RISC_SRLW:
265 /* clear upper 32 */
266 tcg_gen_ext32u_tl(source1, source1);
267 tcg_gen_andi_tl(source2, source2, 0x1F);
268 tcg_gen_shr_tl(source1, source1, source2);
269 break;
270 #endif
271 case OPC_RISC_SRL:
272 tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
273 tcg_gen_shr_tl(source1, source1, source2);
274 break;
275 #if defined(TARGET_RISCV64)
276 case OPC_RISC_SRAW:
277 /* first, trick to get it to act like working on 32 bits (get rid of
278 upper 32, sign extend to fill space) */
279 tcg_gen_ext32s_tl(source1, source1);
280 tcg_gen_andi_tl(source2, source2, 0x1F);
281 tcg_gen_sar_tl(source1, source1, source2);
282 break;
283 /* fall through to SRA */
284 #endif
285 case OPC_RISC_SRA:
286 tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
287 tcg_gen_sar_tl(source1, source1, source2);
288 break;
289 case OPC_RISC_OR:
290 tcg_gen_or_tl(source1, source1, source2);
291 break;
292 case OPC_RISC_AND:
293 tcg_gen_and_tl(source1, source1, source2);
294 break;
295 CASE_OP_32_64(OPC_RISC_MUL):
296 tcg_gen_mul_tl(source1, source1, source2);
297 break;
298 case OPC_RISC_MULH:
299 tcg_gen_muls2_tl(source2, source1, source1, source2);
300 break;
301 case OPC_RISC_MULHSU:
302 gen_mulhsu(source1, source1, source2);
303 break;
304 case OPC_RISC_MULHU:
305 tcg_gen_mulu2_tl(source2, source1, source1, source2);
306 break;
307 #if defined(TARGET_RISCV64)
308 case OPC_RISC_DIVW:
309 tcg_gen_ext32s_tl(source1, source1);
310 tcg_gen_ext32s_tl(source2, source2);
311 /* fall through to DIV */
312 #endif
313 case OPC_RISC_DIV:
314 /* Handle by altering args to tcg_gen_div to produce req'd results:
315 * For overflow: want source1 in source1 and 1 in source2
316 * For div by zero: want -1 in source1 and 1 in source2 -> -1 result */
317 cond1 = tcg_temp_new();
318 cond2 = tcg_temp_new();
319 zeroreg = tcg_const_tl(0);
320 resultopt1 = tcg_temp_new();
322 tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
323 tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
324 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
325 ((target_ulong)1) << (TARGET_LONG_BITS - 1));
326 tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
327 tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
328 /* if div by zero, set source1 to -1, otherwise don't change */
329 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
330 resultopt1);
331 /* if overflow or div by zero, set source2 to 1, else don't change */
332 tcg_gen_or_tl(cond1, cond1, cond2);
333 tcg_gen_movi_tl(resultopt1, (target_ulong)1);
334 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
335 resultopt1);
336 tcg_gen_div_tl(source1, source1, source2);
338 tcg_temp_free(cond1);
339 tcg_temp_free(cond2);
340 tcg_temp_free(zeroreg);
341 tcg_temp_free(resultopt1);
342 break;
343 #if defined(TARGET_RISCV64)
344 case OPC_RISC_DIVUW:
345 tcg_gen_ext32u_tl(source1, source1);
346 tcg_gen_ext32u_tl(source2, source2);
347 /* fall through to DIVU */
348 #endif
349 case OPC_RISC_DIVU:
350 cond1 = tcg_temp_new();
351 zeroreg = tcg_const_tl(0);
352 resultopt1 = tcg_temp_new();
354 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
355 tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
356 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
357 resultopt1);
358 tcg_gen_movi_tl(resultopt1, (target_ulong)1);
359 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
360 resultopt1);
361 tcg_gen_divu_tl(source1, source1, source2);
363 tcg_temp_free(cond1);
364 tcg_temp_free(zeroreg);
365 tcg_temp_free(resultopt1);
366 break;
367 #if defined(TARGET_RISCV64)
368 case OPC_RISC_REMW:
369 tcg_gen_ext32s_tl(source1, source1);
370 tcg_gen_ext32s_tl(source2, source2);
371 /* fall through to REM */
372 #endif
373 case OPC_RISC_REM:
374 cond1 = tcg_temp_new();
375 cond2 = tcg_temp_new();
376 zeroreg = tcg_const_tl(0);
377 resultopt1 = tcg_temp_new();
379 tcg_gen_movi_tl(resultopt1, 1L);
380 tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
381 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
382 (target_ulong)1 << (TARGET_LONG_BITS - 1));
383 tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
384 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
385 /* if overflow or div by zero, set source2 to 1, else don't change */
386 tcg_gen_or_tl(cond2, cond1, cond2);
387 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
388 resultopt1);
389 tcg_gen_rem_tl(resultopt1, source1, source2);
390 /* if div by zero, just return the original dividend */
391 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1,
392 source1);
394 tcg_temp_free(cond1);
395 tcg_temp_free(cond2);
396 tcg_temp_free(zeroreg);
397 tcg_temp_free(resultopt1);
398 break;
399 #if defined(TARGET_RISCV64)
400 case OPC_RISC_REMUW:
401 tcg_gen_ext32u_tl(source1, source1);
402 tcg_gen_ext32u_tl(source2, source2);
403 /* fall through to REMU */
404 #endif
405 case OPC_RISC_REMU:
406 cond1 = tcg_temp_new();
407 zeroreg = tcg_const_tl(0);
408 resultopt1 = tcg_temp_new();
410 tcg_gen_movi_tl(resultopt1, (target_ulong)1);
411 tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
412 tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
413 resultopt1);
414 tcg_gen_remu_tl(resultopt1, source1, source2);
415 /* if div by zero, just return the original dividend */
416 tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1,
417 source1);
419 tcg_temp_free(cond1);
420 tcg_temp_free(zeroreg);
421 tcg_temp_free(resultopt1);
422 break;
423 default:
424 gen_exception_illegal(ctx);
425 return;
428 if (opc & 0x8) { /* sign extend for W instructions */
429 tcg_gen_ext32s_tl(source1, source1);
432 gen_set_gpr(rd, source1);
433 tcg_temp_free(source1);
434 tcg_temp_free(source2);
437 static void gen_arith_imm(DisasContext *ctx, uint32_t opc, int rd,
438 int rs1, target_long imm)
440 TCGv source1 = tcg_temp_new();
441 int shift_len = TARGET_LONG_BITS;
442 int shift_a;
444 gen_get_gpr(source1, rs1);
446 switch (opc) {
447 case OPC_RISC_ADDI:
448 #if defined(TARGET_RISCV64)
449 case OPC_RISC_ADDIW:
450 #endif
451 tcg_gen_addi_tl(source1, source1, imm);
452 break;
453 case OPC_RISC_SLTI:
454 tcg_gen_setcondi_tl(TCG_COND_LT, source1, source1, imm);
455 break;
456 case OPC_RISC_SLTIU:
457 tcg_gen_setcondi_tl(TCG_COND_LTU, source1, source1, imm);
458 break;
459 case OPC_RISC_XORI:
460 tcg_gen_xori_tl(source1, source1, imm);
461 break;
462 case OPC_RISC_ORI:
463 tcg_gen_ori_tl(source1, source1, imm);
464 break;
465 case OPC_RISC_ANDI:
466 tcg_gen_andi_tl(source1, source1, imm);
467 break;
468 #if defined(TARGET_RISCV64)
469 case OPC_RISC_SLLIW:
470 shift_len = 32;
471 /* FALLTHRU */
472 #endif
473 case OPC_RISC_SLLI:
474 if (imm >= shift_len) {
475 goto do_illegal;
477 tcg_gen_shli_tl(source1, source1, imm);
478 break;
479 #if defined(TARGET_RISCV64)
480 case OPC_RISC_SHIFT_RIGHT_IW:
481 shift_len = 32;
482 /* FALLTHRU */
483 #endif
484 case OPC_RISC_SHIFT_RIGHT_I:
485 /* differentiate on IMM */
486 shift_a = imm & 0x400;
487 imm &= 0x3ff;
488 if (imm >= shift_len) {
489 goto do_illegal;
491 if (imm != 0) {
492 if (shift_a) {
493 /* SRAI[W] */
494 tcg_gen_sextract_tl(source1, source1, imm, shift_len - imm);
495 } else {
496 /* SRLI[W] */
497 tcg_gen_extract_tl(source1, source1, imm, shift_len - imm);
499 /* No further sign-extension needed for W instructions. */
500 opc &= ~0x8;
502 break;
503 default:
504 do_illegal:
505 gen_exception_illegal(ctx);
506 return;
509 if (opc & 0x8) { /* sign-extend for W instructions */
510 tcg_gen_ext32s_tl(source1, source1);
513 gen_set_gpr(rd, source1);
514 tcg_temp_free(source1);
517 static void gen_jal(CPURISCVState *env, DisasContext *ctx, int rd,
518 target_ulong imm)
520 target_ulong next_pc;
522 /* check misaligned: */
523 next_pc = ctx->pc + imm;
524 if (!riscv_has_ext(env, RVC)) {
525 if ((next_pc & 0x3) != 0) {
526 gen_exception_inst_addr_mis(ctx);
527 return;
530 if (rd != 0) {
531 tcg_gen_movi_tl(cpu_gpr[rd], ctx->next_pc);
534 gen_goto_tb(ctx, 0, ctx->pc + imm); /* must use this for safety */
535 ctx->bstate = BS_BRANCH;
538 static void gen_jalr(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
539 int rd, int rs1, target_long imm)
541 /* no chaining with JALR */
542 TCGLabel *misaligned = NULL;
543 TCGv t0 = tcg_temp_new();
545 switch (opc) {
546 case OPC_RISC_JALR:
547 gen_get_gpr(cpu_pc, rs1);
548 tcg_gen_addi_tl(cpu_pc, cpu_pc, imm);
549 tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
551 if (!riscv_has_ext(env, RVC)) {
552 misaligned = gen_new_label();
553 tcg_gen_andi_tl(t0, cpu_pc, 0x2);
554 tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
557 if (rd != 0) {
558 tcg_gen_movi_tl(cpu_gpr[rd], ctx->next_pc);
560 tcg_gen_exit_tb(0);
562 if (misaligned) {
563 gen_set_label(misaligned);
564 gen_exception_inst_addr_mis(ctx);
566 ctx->bstate = BS_BRANCH;
567 break;
569 default:
570 gen_exception_illegal(ctx);
571 break;
573 tcg_temp_free(t0);
576 static void gen_branch(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
577 int rs1, int rs2, target_long bimm)
579 TCGLabel *l = gen_new_label();
580 TCGv source1, source2;
581 source1 = tcg_temp_new();
582 source2 = tcg_temp_new();
583 gen_get_gpr(source1, rs1);
584 gen_get_gpr(source2, rs2);
586 switch (opc) {
587 case OPC_RISC_BEQ:
588 tcg_gen_brcond_tl(TCG_COND_EQ, source1, source2, l);
589 break;
590 case OPC_RISC_BNE:
591 tcg_gen_brcond_tl(TCG_COND_NE, source1, source2, l);
592 break;
593 case OPC_RISC_BLT:
594 tcg_gen_brcond_tl(TCG_COND_LT, source1, source2, l);
595 break;
596 case OPC_RISC_BGE:
597 tcg_gen_brcond_tl(TCG_COND_GE, source1, source2, l);
598 break;
599 case OPC_RISC_BLTU:
600 tcg_gen_brcond_tl(TCG_COND_LTU, source1, source2, l);
601 break;
602 case OPC_RISC_BGEU:
603 tcg_gen_brcond_tl(TCG_COND_GEU, source1, source2, l);
604 break;
605 default:
606 gen_exception_illegal(ctx);
607 return;
609 tcg_temp_free(source1);
610 tcg_temp_free(source2);
612 gen_goto_tb(ctx, 1, ctx->next_pc);
613 gen_set_label(l); /* branch taken */
614 if (!riscv_has_ext(env, RVC) && ((ctx->pc + bimm) & 0x3)) {
615 /* misaligned */
616 gen_exception_inst_addr_mis(ctx);
617 } else {
618 gen_goto_tb(ctx, 0, ctx->pc + bimm);
620 ctx->bstate = BS_BRANCH;
623 static void gen_load(DisasContext *ctx, uint32_t opc, int rd, int rs1,
624 target_long imm)
626 TCGv t0 = tcg_temp_new();
627 TCGv t1 = tcg_temp_new();
628 gen_get_gpr(t0, rs1);
629 tcg_gen_addi_tl(t0, t0, imm);
630 int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
632 if (memop < 0) {
633 gen_exception_illegal(ctx);
634 return;
637 tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
638 gen_set_gpr(rd, t1);
639 tcg_temp_free(t0);
640 tcg_temp_free(t1);
643 static void gen_store(DisasContext *ctx, uint32_t opc, int rs1, int rs2,
644 target_long imm)
646 TCGv t0 = tcg_temp_new();
647 TCGv dat = tcg_temp_new();
648 gen_get_gpr(t0, rs1);
649 tcg_gen_addi_tl(t0, t0, imm);
650 gen_get_gpr(dat, rs2);
651 int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
653 if (memop < 0) {
654 gen_exception_illegal(ctx);
655 return;
658 tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
659 tcg_temp_free(t0);
660 tcg_temp_free(dat);
663 static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd,
664 int rs1, target_long imm)
666 TCGv t0;
668 if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
669 gen_exception_illegal(ctx);
670 return;
673 t0 = tcg_temp_new();
674 gen_get_gpr(t0, rs1);
675 tcg_gen_addi_tl(t0, t0, imm);
677 switch (opc) {
678 case OPC_RISC_FLW:
679 tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL);
680 /* RISC-V requires NaN-boxing of narrower width floating point values */
681 tcg_gen_ori_i64(cpu_fpr[rd], cpu_fpr[rd], 0xffffffff00000000ULL);
682 break;
683 case OPC_RISC_FLD:
684 tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ);
685 break;
686 default:
687 gen_exception_illegal(ctx);
688 break;
690 tcg_temp_free(t0);
693 static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1,
694 int rs2, target_long imm)
696 TCGv t0;
698 if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
699 gen_exception_illegal(ctx);
700 return;
703 t0 = tcg_temp_new();
704 gen_get_gpr(t0, rs1);
705 tcg_gen_addi_tl(t0, t0, imm);
707 switch (opc) {
708 case OPC_RISC_FSW:
709 tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL);
710 break;
711 case OPC_RISC_FSD:
712 tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ);
713 break;
714 default:
715 gen_exception_illegal(ctx);
716 break;
719 tcg_temp_free(t0);
722 static void gen_atomic(DisasContext *ctx, uint32_t opc,
723 int rd, int rs1, int rs2)
725 TCGv src1, src2, dat;
726 TCGLabel *l1, *l2;
727 TCGMemOp mop;
728 TCGCond cond;
729 bool aq, rl;
731 /* Extract the size of the atomic operation. */
732 switch (extract32(opc, 12, 3)) {
733 case 2: /* 32-bit */
734 mop = MO_ALIGN | MO_TESL;
735 break;
736 #if defined(TARGET_RISCV64)
737 case 3: /* 64-bit */
738 mop = MO_ALIGN | MO_TEQ;
739 break;
740 #endif
741 default:
742 gen_exception_illegal(ctx);
743 return;
745 rl = extract32(opc, 25, 1);
746 aq = extract32(opc, 26, 1);
748 src1 = tcg_temp_new();
749 src2 = tcg_temp_new();
751 switch (MASK_OP_ATOMIC_NO_AQ_RL_SZ(opc)) {
752 case OPC_RISC_LR:
753 /* Put addr in load_res, data in load_val. */
754 gen_get_gpr(src1, rs1);
755 if (rl) {
756 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
758 tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop);
759 if (aq) {
760 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
762 tcg_gen_mov_tl(load_res, src1);
763 gen_set_gpr(rd, load_val);
764 break;
766 case OPC_RISC_SC:
767 l1 = gen_new_label();
768 l2 = gen_new_label();
769 dat = tcg_temp_new();
771 gen_get_gpr(src1, rs1);
772 tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
774 gen_get_gpr(src2, rs2);
775 /* Note that the TCG atomic primitives are SC,
776 so we can ignore AQ/RL along this path. */
777 tcg_gen_atomic_cmpxchg_tl(src1, load_res, load_val, src2,
778 ctx->mem_idx, mop);
779 tcg_gen_setcond_tl(TCG_COND_NE, dat, src1, load_val);
780 gen_set_gpr(rd, dat);
781 tcg_gen_br(l2);
783 gen_set_label(l1);
784 /* Address comparion failure. However, we still need to
785 provide the memory barrier implied by AQ/RL. */
786 tcg_gen_mb(TCG_MO_ALL + aq * TCG_BAR_LDAQ + rl * TCG_BAR_STRL);
787 tcg_gen_movi_tl(dat, 1);
788 gen_set_gpr(rd, dat);
790 gen_set_label(l2);
791 tcg_temp_free(dat);
792 break;
794 case OPC_RISC_AMOSWAP:
795 /* Note that the TCG atomic primitives are SC,
796 so we can ignore AQ/RL along this path. */
797 gen_get_gpr(src1, rs1);
798 gen_get_gpr(src2, rs2);
799 tcg_gen_atomic_xchg_tl(src2, src1, src2, ctx->mem_idx, mop);
800 gen_set_gpr(rd, src2);
801 break;
802 case OPC_RISC_AMOADD:
803 gen_get_gpr(src1, rs1);
804 gen_get_gpr(src2, rs2);
805 tcg_gen_atomic_fetch_add_tl(src2, src1, src2, ctx->mem_idx, mop);
806 gen_set_gpr(rd, src2);
807 break;
808 case OPC_RISC_AMOXOR:
809 gen_get_gpr(src1, rs1);
810 gen_get_gpr(src2, rs2);
811 tcg_gen_atomic_fetch_xor_tl(src2, src1, src2, ctx->mem_idx, mop);
812 gen_set_gpr(rd, src2);
813 break;
814 case OPC_RISC_AMOAND:
815 gen_get_gpr(src1, rs1);
816 gen_get_gpr(src2, rs2);
817 tcg_gen_atomic_fetch_and_tl(src2, src1, src2, ctx->mem_idx, mop);
818 gen_set_gpr(rd, src2);
819 break;
820 case OPC_RISC_AMOOR:
821 gen_get_gpr(src1, rs1);
822 gen_get_gpr(src2, rs2);
823 tcg_gen_atomic_fetch_or_tl(src2, src1, src2, ctx->mem_idx, mop);
824 gen_set_gpr(rd, src2);
825 break;
827 case OPC_RISC_AMOMIN:
828 cond = TCG_COND_LT;
829 goto do_minmax;
830 case OPC_RISC_AMOMAX:
831 cond = TCG_COND_GT;
832 goto do_minmax;
833 case OPC_RISC_AMOMINU:
834 cond = TCG_COND_LTU;
835 goto do_minmax;
836 case OPC_RISC_AMOMAXU:
837 cond = TCG_COND_GTU;
838 goto do_minmax;
839 do_minmax:
840 /* Handle the RL barrier. The AQ barrier is handled along the
841 parallel path by the SC atomic cmpxchg. On the serial path,
842 of course, barriers do not matter. */
843 if (rl) {
844 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
846 if (tb_cflags(ctx->tb) & CF_PARALLEL) {
847 l1 = gen_new_label();
848 gen_set_label(l1);
849 } else {
850 l1 = NULL;
853 gen_get_gpr(src1, rs1);
854 gen_get_gpr(src2, rs2);
855 if ((mop & MO_SSIZE) == MO_SL) {
856 /* Sign-extend the register comparison input. */
857 tcg_gen_ext32s_tl(src2, src2);
859 dat = tcg_temp_local_new();
860 tcg_gen_qemu_ld_tl(dat, src1, ctx->mem_idx, mop);
861 tcg_gen_movcond_tl(cond, src2, dat, src2, dat, src2);
863 if (tb_cflags(ctx->tb) & CF_PARALLEL) {
864 /* Parallel context. Make this operation atomic by verifying
865 that the memory didn't change while we computed the result. */
866 tcg_gen_atomic_cmpxchg_tl(src2, src1, dat, src2, ctx->mem_idx, mop);
868 /* If the cmpxchg failed, retry. */
869 /* ??? There is an assumption here that this will eventually
870 succeed, such that we don't live-lock. This is not unlike
871 a similar loop that the compiler would generate for e.g.
872 __atomic_fetch_and_xor, so don't worry about it. */
873 tcg_gen_brcond_tl(TCG_COND_NE, dat, src2, l1);
874 } else {
875 /* Serial context. Directly store the result. */
876 tcg_gen_qemu_st_tl(src2, src1, ctx->mem_idx, mop);
878 gen_set_gpr(rd, dat);
879 tcg_temp_free(dat);
880 break;
882 default:
883 gen_exception_illegal(ctx);
884 break;
887 tcg_temp_free(src1);
888 tcg_temp_free(src2);
891 static void gen_set_rm(DisasContext *ctx, int rm)
893 TCGv_i32 t0;
895 if (ctx->frm == rm) {
896 return;
898 ctx->frm = rm;
899 t0 = tcg_const_i32(rm);
900 gen_helper_set_rounding_mode(cpu_env, t0);
901 tcg_temp_free_i32(t0);
904 static void gen_fp_fmadd(DisasContext *ctx, uint32_t opc, int rd,
905 int rs1, int rs2, int rs3, int rm)
907 switch (opc) {
908 case OPC_RISC_FMADD_S:
909 gen_set_rm(ctx, rm);
910 gen_helper_fmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
911 cpu_fpr[rs2], cpu_fpr[rs3]);
912 break;
913 case OPC_RISC_FMADD_D:
914 gen_set_rm(ctx, rm);
915 gen_helper_fmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
916 cpu_fpr[rs2], cpu_fpr[rs3]);
917 break;
918 default:
919 gen_exception_illegal(ctx);
920 break;
924 static void gen_fp_fmsub(DisasContext *ctx, uint32_t opc, int rd,
925 int rs1, int rs2, int rs3, int rm)
927 switch (opc) {
928 case OPC_RISC_FMSUB_S:
929 gen_set_rm(ctx, rm);
930 gen_helper_fmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
931 cpu_fpr[rs2], cpu_fpr[rs3]);
932 break;
933 case OPC_RISC_FMSUB_D:
934 gen_set_rm(ctx, rm);
935 gen_helper_fmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
936 cpu_fpr[rs2], cpu_fpr[rs3]);
937 break;
938 default:
939 gen_exception_illegal(ctx);
940 break;
944 static void gen_fp_fnmsub(DisasContext *ctx, uint32_t opc, int rd,
945 int rs1, int rs2, int rs3, int rm)
947 switch (opc) {
948 case OPC_RISC_FNMSUB_S:
949 gen_set_rm(ctx, rm);
950 gen_helper_fnmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
951 cpu_fpr[rs2], cpu_fpr[rs3]);
952 break;
953 case OPC_RISC_FNMSUB_D:
954 gen_set_rm(ctx, rm);
955 gen_helper_fnmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
956 cpu_fpr[rs2], cpu_fpr[rs3]);
957 break;
958 default:
959 gen_exception_illegal(ctx);
960 break;
964 static void gen_fp_fnmadd(DisasContext *ctx, uint32_t opc, int rd,
965 int rs1, int rs2, int rs3, int rm)
967 switch (opc) {
968 case OPC_RISC_FNMADD_S:
969 gen_set_rm(ctx, rm);
970 gen_helper_fnmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
971 cpu_fpr[rs2], cpu_fpr[rs3]);
972 break;
973 case OPC_RISC_FNMADD_D:
974 gen_set_rm(ctx, rm);
975 gen_helper_fnmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
976 cpu_fpr[rs2], cpu_fpr[rs3]);
977 break;
978 default:
979 gen_exception_illegal(ctx);
980 break;
984 static void gen_fp_arith(DisasContext *ctx, uint32_t opc, int rd,
985 int rs1, int rs2, int rm)
987 TCGv t0 = NULL;
989 if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
990 goto do_illegal;
993 switch (opc) {
994 case OPC_RISC_FADD_S:
995 gen_set_rm(ctx, rm);
996 gen_helper_fadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
997 break;
998 case OPC_RISC_FSUB_S:
999 gen_set_rm(ctx, rm);
1000 gen_helper_fsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1001 break;
1002 case OPC_RISC_FMUL_S:
1003 gen_set_rm(ctx, rm);
1004 gen_helper_fmul_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1005 break;
1006 case OPC_RISC_FDIV_S:
1007 gen_set_rm(ctx, rm);
1008 gen_helper_fdiv_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1009 break;
1010 case OPC_RISC_FSQRT_S:
1011 gen_set_rm(ctx, rm);
1012 gen_helper_fsqrt_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1013 break;
1014 case OPC_RISC_FSGNJ_S:
1015 gen_fsgnj(ctx, rd, rs1, rs2, rm, INT32_MIN);
1016 break;
1018 case OPC_RISC_FMIN_S:
1019 /* also handles: OPC_RISC_FMAX_S */
1020 switch (rm) {
1021 case 0x0:
1022 gen_helper_fmin_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1023 break;
1024 case 0x1:
1025 gen_helper_fmax_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1026 break;
1027 default:
1028 goto do_illegal;
1030 break;
1032 case OPC_RISC_FEQ_S:
1033 /* also handles: OPC_RISC_FLT_S, OPC_RISC_FLE_S */
1034 t0 = tcg_temp_new();
1035 switch (rm) {
1036 case 0x0:
1037 gen_helper_fle_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1038 break;
1039 case 0x1:
1040 gen_helper_flt_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1041 break;
1042 case 0x2:
1043 gen_helper_feq_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1044 break;
1045 default:
1046 goto do_illegal;
1048 gen_set_gpr(rd, t0);
1049 tcg_temp_free(t0);
1050 break;
1052 case OPC_RISC_FCVT_W_S:
1053 /* also OPC_RISC_FCVT_WU_S, OPC_RISC_FCVT_L_S, OPC_RISC_FCVT_LU_S */
1054 t0 = tcg_temp_new();
1055 switch (rs2) {
1056 case 0: /* FCVT_W_S */
1057 gen_set_rm(ctx, rm);
1058 gen_helper_fcvt_w_s(t0, cpu_env, cpu_fpr[rs1]);
1059 break;
1060 case 1: /* FCVT_WU_S */
1061 gen_set_rm(ctx, rm);
1062 gen_helper_fcvt_wu_s(t0, cpu_env, cpu_fpr[rs1]);
1063 break;
1064 #if defined(TARGET_RISCV64)
1065 case 2: /* FCVT_L_S */
1066 gen_set_rm(ctx, rm);
1067 gen_helper_fcvt_l_s(t0, cpu_env, cpu_fpr[rs1]);
1068 break;
1069 case 3: /* FCVT_LU_S */
1070 gen_set_rm(ctx, rm);
1071 gen_helper_fcvt_lu_s(t0, cpu_env, cpu_fpr[rs1]);
1072 break;
1073 #endif
1074 default:
1075 goto do_illegal;
1077 gen_set_gpr(rd, t0);
1078 tcg_temp_free(t0);
1079 break;
1081 case OPC_RISC_FCVT_S_W:
1082 /* also OPC_RISC_FCVT_S_WU, OPC_RISC_FCVT_S_L, OPC_RISC_FCVT_S_LU */
1083 t0 = tcg_temp_new();
1084 gen_get_gpr(t0, rs1);
1085 switch (rs2) {
1086 case 0: /* FCVT_S_W */
1087 gen_set_rm(ctx, rm);
1088 gen_helper_fcvt_s_w(cpu_fpr[rd], cpu_env, t0);
1089 break;
1090 case 1: /* FCVT_S_WU */
1091 gen_set_rm(ctx, rm);
1092 gen_helper_fcvt_s_wu(cpu_fpr[rd], cpu_env, t0);
1093 break;
1094 #if defined(TARGET_RISCV64)
1095 case 2: /* FCVT_S_L */
1096 gen_set_rm(ctx, rm);
1097 gen_helper_fcvt_s_l(cpu_fpr[rd], cpu_env, t0);
1098 break;
1099 case 3: /* FCVT_S_LU */
1100 gen_set_rm(ctx, rm);
1101 gen_helper_fcvt_s_lu(cpu_fpr[rd], cpu_env, t0);
1102 break;
1103 #endif
1104 default:
1105 goto do_illegal;
1107 tcg_temp_free(t0);
1108 break;
1110 case OPC_RISC_FMV_X_S:
1111 /* also OPC_RISC_FCLASS_S */
1112 t0 = tcg_temp_new();
1113 switch (rm) {
1114 case 0: /* FMV */
1115 #if defined(TARGET_RISCV64)
1116 tcg_gen_ext32s_tl(t0, cpu_fpr[rs1]);
1117 #else
1118 tcg_gen_extrl_i64_i32(t0, cpu_fpr[rs1]);
1119 #endif
1120 break;
1121 case 1:
1122 gen_helper_fclass_s(t0, cpu_fpr[rs1]);
1123 break;
1124 default:
1125 goto do_illegal;
1127 gen_set_gpr(rd, t0);
1128 tcg_temp_free(t0);
1129 break;
1131 case OPC_RISC_FMV_S_X:
1132 t0 = tcg_temp_new();
1133 gen_get_gpr(t0, rs1);
1134 #if defined(TARGET_RISCV64)
1135 tcg_gen_mov_i64(cpu_fpr[rd], t0);
1136 #else
1137 tcg_gen_extu_i32_i64(cpu_fpr[rd], t0);
1138 #endif
1139 tcg_temp_free(t0);
1140 break;
1142 /* double */
1143 case OPC_RISC_FADD_D:
1144 gen_set_rm(ctx, rm);
1145 gen_helper_fadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1146 break;
1147 case OPC_RISC_FSUB_D:
1148 gen_set_rm(ctx, rm);
1149 gen_helper_fsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1150 break;
1151 case OPC_RISC_FMUL_D:
1152 gen_set_rm(ctx, rm);
1153 gen_helper_fmul_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1154 break;
1155 case OPC_RISC_FDIV_D:
1156 gen_set_rm(ctx, rm);
1157 gen_helper_fdiv_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1158 break;
1159 case OPC_RISC_FSQRT_D:
1160 gen_set_rm(ctx, rm);
1161 gen_helper_fsqrt_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1162 break;
1163 case OPC_RISC_FSGNJ_D:
1164 gen_fsgnj(ctx, rd, rs1, rs2, rm, INT64_MIN);
1165 break;
1167 case OPC_RISC_FMIN_D:
1168 /* also OPC_RISC_FMAX_D */
1169 switch (rm) {
1170 case 0:
1171 gen_helper_fmin_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1172 break;
1173 case 1:
1174 gen_helper_fmax_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1175 break;
1176 default:
1177 goto do_illegal;
1179 break;
1181 case OPC_RISC_FCVT_S_D:
1182 switch (rs2) {
1183 case 1:
1184 gen_set_rm(ctx, rm);
1185 gen_helper_fcvt_s_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1186 break;
1187 default:
1188 goto do_illegal;
1190 break;
1192 case OPC_RISC_FCVT_D_S:
1193 switch (rs2) {
1194 case 0:
1195 gen_set_rm(ctx, rm);
1196 gen_helper_fcvt_d_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1197 break;
1198 default:
1199 goto do_illegal;
1201 break;
1203 case OPC_RISC_FEQ_D:
1204 /* also OPC_RISC_FLT_D, OPC_RISC_FLE_D */
1205 t0 = tcg_temp_new();
1206 switch (rm) {
1207 case 0:
1208 gen_helper_fle_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1209 break;
1210 case 1:
1211 gen_helper_flt_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1212 break;
1213 case 2:
1214 gen_helper_feq_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1215 break;
1216 default:
1217 goto do_illegal;
1219 gen_set_gpr(rd, t0);
1220 tcg_temp_free(t0);
1221 break;
1223 case OPC_RISC_FCVT_W_D:
1224 /* also OPC_RISC_FCVT_WU_D, OPC_RISC_FCVT_L_D, OPC_RISC_FCVT_LU_D */
1225 t0 = tcg_temp_new();
1226 switch (rs2) {
1227 case 0:
1228 gen_set_rm(ctx, rm);
1229 gen_helper_fcvt_w_d(t0, cpu_env, cpu_fpr[rs1]);
1230 break;
1231 case 1:
1232 gen_set_rm(ctx, rm);
1233 gen_helper_fcvt_wu_d(t0, cpu_env, cpu_fpr[rs1]);
1234 break;
1235 #if defined(TARGET_RISCV64)
1236 case 2:
1237 gen_set_rm(ctx, rm);
1238 gen_helper_fcvt_l_d(t0, cpu_env, cpu_fpr[rs1]);
1239 break;
1240 case 3:
1241 gen_set_rm(ctx, rm);
1242 gen_helper_fcvt_lu_d(t0, cpu_env, cpu_fpr[rs1]);
1243 break;
1244 #endif
1245 default:
1246 goto do_illegal;
1248 gen_set_gpr(rd, t0);
1249 tcg_temp_free(t0);
1250 break;
1252 case OPC_RISC_FCVT_D_W:
1253 /* also OPC_RISC_FCVT_D_WU, OPC_RISC_FCVT_D_L, OPC_RISC_FCVT_D_LU */
1254 t0 = tcg_temp_new();
1255 gen_get_gpr(t0, rs1);
1256 switch (rs2) {
1257 case 0:
1258 gen_set_rm(ctx, rm);
1259 gen_helper_fcvt_d_w(cpu_fpr[rd], cpu_env, t0);
1260 break;
1261 case 1:
1262 gen_set_rm(ctx, rm);
1263 gen_helper_fcvt_d_wu(cpu_fpr[rd], cpu_env, t0);
1264 break;
1265 #if defined(TARGET_RISCV64)
1266 case 2:
1267 gen_set_rm(ctx, rm);
1268 gen_helper_fcvt_d_l(cpu_fpr[rd], cpu_env, t0);
1269 break;
1270 case 3:
1271 gen_set_rm(ctx, rm);
1272 gen_helper_fcvt_d_lu(cpu_fpr[rd], cpu_env, t0);
1273 break;
1274 #endif
1275 default:
1276 goto do_illegal;
1278 tcg_temp_free(t0);
1279 break;
1281 #if defined(TARGET_RISCV64)
1282 case OPC_RISC_FMV_X_D:
1283 /* also OPC_RISC_FCLASS_D */
1284 switch (rm) {
1285 case 0: /* FMV */
1286 gen_set_gpr(rd, cpu_fpr[rs1]);
1287 break;
1288 case 1:
1289 t0 = tcg_temp_new();
1290 gen_helper_fclass_d(t0, cpu_fpr[rs1]);
1291 gen_set_gpr(rd, t0);
1292 tcg_temp_free(t0);
1293 break;
1294 default:
1295 goto do_illegal;
1297 break;
1299 case OPC_RISC_FMV_D_X:
1300 t0 = tcg_temp_new();
1301 gen_get_gpr(t0, rs1);
1302 tcg_gen_mov_tl(cpu_fpr[rd], t0);
1303 tcg_temp_free(t0);
1304 break;
1305 #endif
1307 default:
1308 do_illegal:
1309 if (t0) {
1310 tcg_temp_free(t0);
1312 gen_exception_illegal(ctx);
1313 break;
1317 static void gen_system(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
1318 int rd, int rs1, int csr)
1320 TCGv source1, csr_store, dest, rs1_pass, imm_rs1;
1321 source1 = tcg_temp_new();
1322 csr_store = tcg_temp_new();
1323 dest = tcg_temp_new();
1324 rs1_pass = tcg_temp_new();
1325 imm_rs1 = tcg_temp_new();
1326 gen_get_gpr(source1, rs1);
1327 tcg_gen_movi_tl(cpu_pc, ctx->pc);
1328 tcg_gen_movi_tl(rs1_pass, rs1);
1329 tcg_gen_movi_tl(csr_store, csr); /* copy into temp reg to feed to helper */
1331 #ifndef CONFIG_USER_ONLY
1332 /* Extract funct7 value and check whether it matches SFENCE.VMA */
1333 if ((opc == OPC_RISC_ECALL) && ((csr >> 5) == 9)) {
1334 /* sfence.vma */
1335 /* TODO: handle ASID specific fences */
1336 gen_helper_tlb_flush(cpu_env);
1337 return;
1339 #endif
1341 switch (opc) {
1342 case OPC_RISC_ECALL:
1343 switch (csr) {
1344 case 0x0: /* ECALL */
1345 /* always generates U-level ECALL, fixed in do_interrupt handler */
1346 generate_exception(ctx, RISCV_EXCP_U_ECALL);
1347 tcg_gen_exit_tb(0); /* no chaining */
1348 ctx->bstate = BS_BRANCH;
1349 break;
1350 case 0x1: /* EBREAK */
1351 generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
1352 tcg_gen_exit_tb(0); /* no chaining */
1353 ctx->bstate = BS_BRANCH;
1354 break;
1355 #ifndef CONFIG_USER_ONLY
1356 case 0x002: /* URET */
1357 gen_exception_illegal(ctx);
1358 break;
1359 case 0x102: /* SRET */
1360 if (riscv_has_ext(env, RVS)) {
1361 gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
1362 tcg_gen_exit_tb(0); /* no chaining */
1363 ctx->bstate = BS_BRANCH;
1364 } else {
1365 gen_exception_illegal(ctx);
1367 break;
1368 case 0x202: /* HRET */
1369 gen_exception_illegal(ctx);
1370 break;
1371 case 0x302: /* MRET */
1372 gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
1373 tcg_gen_exit_tb(0); /* no chaining */
1374 ctx->bstate = BS_BRANCH;
1375 break;
1376 case 0x7b2: /* DRET */
1377 gen_exception_illegal(ctx);
1378 break;
1379 case 0x105: /* WFI */
1380 tcg_gen_movi_tl(cpu_pc, ctx->next_pc);
1381 gen_helper_wfi(cpu_env);
1382 break;
1383 case 0x104: /* SFENCE.VM */
1384 gen_helper_tlb_flush(cpu_env);
1385 break;
1386 #endif
1387 default:
1388 gen_exception_illegal(ctx);
1389 break;
1391 break;
1392 default:
1393 tcg_gen_movi_tl(imm_rs1, rs1);
1394 switch (opc) {
1395 case OPC_RISC_CSRRW:
1396 gen_helper_csrrw(dest, cpu_env, source1, csr_store);
1397 break;
1398 case OPC_RISC_CSRRS:
1399 gen_helper_csrrs(dest, cpu_env, source1, csr_store, rs1_pass);
1400 break;
1401 case OPC_RISC_CSRRC:
1402 gen_helper_csrrc(dest, cpu_env, source1, csr_store, rs1_pass);
1403 break;
1404 case OPC_RISC_CSRRWI:
1405 gen_helper_csrrw(dest, cpu_env, imm_rs1, csr_store);
1406 break;
1407 case OPC_RISC_CSRRSI:
1408 gen_helper_csrrs(dest, cpu_env, imm_rs1, csr_store, rs1_pass);
1409 break;
1410 case OPC_RISC_CSRRCI:
1411 gen_helper_csrrc(dest, cpu_env, imm_rs1, csr_store, rs1_pass);
1412 break;
1413 default:
1414 gen_exception_illegal(ctx);
1415 return;
1417 gen_set_gpr(rd, dest);
1418 /* end tb since we may be changing priv modes, to get mmu_index right */
1419 tcg_gen_movi_tl(cpu_pc, ctx->next_pc);
1420 tcg_gen_exit_tb(0); /* no chaining */
1421 ctx->bstate = BS_BRANCH;
1422 break;
1424 tcg_temp_free(source1);
1425 tcg_temp_free(csr_store);
1426 tcg_temp_free(dest);
1427 tcg_temp_free(rs1_pass);
1428 tcg_temp_free(imm_rs1);
1431 static void decode_RV32_64C0(DisasContext *ctx)
1433 uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1434 uint8_t rd_rs2 = GET_C_RS2S(ctx->opcode);
1435 uint8_t rs1s = GET_C_RS1S(ctx->opcode);
1437 switch (funct3) {
1438 case 0:
1439 /* illegal */
1440 if (ctx->opcode == 0) {
1441 gen_exception_illegal(ctx);
1442 } else {
1443 /* C.ADDI4SPN -> addi rd', x2, zimm[9:2]*/
1444 gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs2, 2,
1445 GET_C_ADDI4SPN_IMM(ctx->opcode));
1447 break;
1448 case 1:
1449 /* C.FLD -> fld rd', offset[7:3](rs1')*/
1450 gen_fp_load(ctx, OPC_RISC_FLD, rd_rs2, rs1s,
1451 GET_C_LD_IMM(ctx->opcode));
1452 /* C.LQ(RV128) */
1453 break;
1454 case 2:
1455 /* C.LW -> lw rd', offset[6:2](rs1') */
1456 gen_load(ctx, OPC_RISC_LW, rd_rs2, rs1s,
1457 GET_C_LW_IMM(ctx->opcode));
1458 break;
1459 case 3:
1460 #if defined(TARGET_RISCV64)
1461 /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
1462 gen_load(ctx, OPC_RISC_LD, rd_rs2, rs1s,
1463 GET_C_LD_IMM(ctx->opcode));
1464 #else
1465 /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
1466 gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s,
1467 GET_C_LW_IMM(ctx->opcode));
1468 #endif
1469 break;
1470 case 4:
1471 /* reserved */
1472 gen_exception_illegal(ctx);
1473 break;
1474 case 5:
1475 /* C.FSD(RV32/64) -> fsd rs2', offset[7:3](rs1') */
1476 gen_fp_store(ctx, OPC_RISC_FSD, rs1s, rd_rs2,
1477 GET_C_LD_IMM(ctx->opcode));
1478 /* C.SQ (RV128) */
1479 break;
1480 case 6:
1481 /* C.SW -> sw rs2', offset[6:2](rs1')*/
1482 gen_store(ctx, OPC_RISC_SW, rs1s, rd_rs2,
1483 GET_C_LW_IMM(ctx->opcode));
1484 break;
1485 case 7:
1486 #if defined(TARGET_RISCV64)
1487 /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
1488 gen_store(ctx, OPC_RISC_SD, rs1s, rd_rs2,
1489 GET_C_LD_IMM(ctx->opcode));
1490 #else
1491 /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
1492 gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2,
1493 GET_C_LW_IMM(ctx->opcode));
1494 #endif
1495 break;
1499 static void decode_RV32_64C1(CPURISCVState *env, DisasContext *ctx)
1501 uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1502 uint8_t rd_rs1 = GET_C_RS1(ctx->opcode);
1503 uint8_t rs1s, rs2s;
1504 uint8_t funct2;
1506 switch (funct3) {
1507 case 0:
1508 /* C.ADDI -> addi rd, rd, nzimm[5:0] */
1509 gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, rd_rs1,
1510 GET_C_IMM(ctx->opcode));
1511 break;
1512 case 1:
1513 #if defined(TARGET_RISCV64)
1514 /* C.ADDIW (RV64/128) -> addiw rd, rd, imm[5:0]*/
1515 gen_arith_imm(ctx, OPC_RISC_ADDIW, rd_rs1, rd_rs1,
1516 GET_C_IMM(ctx->opcode));
1517 #else
1518 /* C.JAL(RV32) -> jal x1, offset[11:1] */
1519 gen_jal(env, ctx, 1, GET_C_J_IMM(ctx->opcode));
1520 #endif
1521 break;
1522 case 2:
1523 /* C.LI -> addi rd, x0, imm[5:0]*/
1524 gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, 0, GET_C_IMM(ctx->opcode));
1525 break;
1526 case 3:
1527 if (rd_rs1 == 2) {
1528 /* C.ADDI16SP -> addi x2, x2, nzimm[9:4]*/
1529 gen_arith_imm(ctx, OPC_RISC_ADDI, 2, 2,
1530 GET_C_ADDI16SP_IMM(ctx->opcode));
1531 } else if (rd_rs1 != 0) {
1532 /* C.LUI (rs1/rd =/= {0,2}) -> lui rd, nzimm[17:12]*/
1533 tcg_gen_movi_tl(cpu_gpr[rd_rs1],
1534 GET_C_IMM(ctx->opcode) << 12);
1536 break;
1537 case 4:
1538 funct2 = extract32(ctx->opcode, 10, 2);
1539 rs1s = GET_C_RS1S(ctx->opcode);
1540 switch (funct2) {
1541 case 0: /* C.SRLI(RV32) -> srli rd', rd', shamt[5:0] */
1542 gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s,
1543 GET_C_ZIMM(ctx->opcode));
1544 /* C.SRLI64(RV128) */
1545 break;
1546 case 1:
1547 /* C.SRAI -> srai rd', rd', shamt[5:0]*/
1548 gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s,
1549 GET_C_ZIMM(ctx->opcode) | 0x400);
1550 /* C.SRAI64(RV128) */
1551 break;
1552 case 2:
1553 /* C.ANDI -> andi rd', rd', imm[5:0]*/
1554 gen_arith_imm(ctx, OPC_RISC_ANDI, rs1s, rs1s,
1555 GET_C_IMM(ctx->opcode));
1556 break;
1557 case 3:
1558 funct2 = extract32(ctx->opcode, 5, 2);
1559 rs2s = GET_C_RS2S(ctx->opcode);
1560 switch (funct2) {
1561 case 0:
1562 /* C.SUB -> sub rd', rd', rs2' */
1563 if (extract32(ctx->opcode, 12, 1) == 0) {
1564 gen_arith(ctx, OPC_RISC_SUB, rs1s, rs1s, rs2s);
1566 #if defined(TARGET_RISCV64)
1567 else {
1568 gen_arith(ctx, OPC_RISC_SUBW, rs1s, rs1s, rs2s);
1570 #endif
1571 break;
1572 case 1:
1573 /* C.XOR -> xor rs1', rs1', rs2' */
1574 if (extract32(ctx->opcode, 12, 1) == 0) {
1575 gen_arith(ctx, OPC_RISC_XOR, rs1s, rs1s, rs2s);
1577 #if defined(TARGET_RISCV64)
1578 else {
1579 /* C.ADDW (RV64/128) */
1580 gen_arith(ctx, OPC_RISC_ADDW, rs1s, rs1s, rs2s);
1582 #endif
1583 break;
1584 case 2:
1585 /* C.OR -> or rs1', rs1', rs2' */
1586 gen_arith(ctx, OPC_RISC_OR, rs1s, rs1s, rs2s);
1587 break;
1588 case 3:
1589 /* C.AND -> and rs1', rs1', rs2' */
1590 gen_arith(ctx, OPC_RISC_AND, rs1s, rs1s, rs2s);
1591 break;
1593 break;
1595 break;
1596 case 5:
1597 /* C.J -> jal x0, offset[11:1]*/
1598 gen_jal(env, ctx, 0, GET_C_J_IMM(ctx->opcode));
1599 break;
1600 case 6:
1601 /* C.BEQZ -> beq rs1', x0, offset[8:1]*/
1602 rs1s = GET_C_RS1S(ctx->opcode);
1603 gen_branch(env, ctx, OPC_RISC_BEQ, rs1s, 0, GET_C_B_IMM(ctx->opcode));
1604 break;
1605 case 7:
1606 /* C.BNEZ -> bne rs1', x0, offset[8:1]*/
1607 rs1s = GET_C_RS1S(ctx->opcode);
1608 gen_branch(env, ctx, OPC_RISC_BNE, rs1s, 0, GET_C_B_IMM(ctx->opcode));
1609 break;
1613 static void decode_RV32_64C2(CPURISCVState *env, DisasContext *ctx)
1615 uint8_t rd, rs2;
1616 uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1619 rd = GET_RD(ctx->opcode);
1621 switch (funct3) {
1622 case 0: /* C.SLLI -> slli rd, rd, shamt[5:0]
1623 C.SLLI64 -> */
1624 gen_arith_imm(ctx, OPC_RISC_SLLI, rd, rd, GET_C_ZIMM(ctx->opcode));
1625 break;
1626 case 1: /* C.FLDSP(RV32/64DC) -> fld rd, offset[8:3](x2) */
1627 gen_fp_load(ctx, OPC_RISC_FLD, rd, 2, GET_C_LDSP_IMM(ctx->opcode));
1628 break;
1629 case 2: /* C.LWSP -> lw rd, offset[7:2](x2) */
1630 gen_load(ctx, OPC_RISC_LW, rd, 2, GET_C_LWSP_IMM(ctx->opcode));
1631 break;
1632 case 3:
1633 #if defined(TARGET_RISCV64)
1634 /* C.LDSP(RVC64) -> ld rd, offset[8:3](x2) */
1635 gen_load(ctx, OPC_RISC_LD, rd, 2, GET_C_LDSP_IMM(ctx->opcode));
1636 #else
1637 /* C.FLWSP(RV32FC) -> flw rd, offset[7:2](x2) */
1638 gen_fp_load(ctx, OPC_RISC_FLW, rd, 2, GET_C_LWSP_IMM(ctx->opcode));
1639 #endif
1640 break;
1641 case 4:
1642 rs2 = GET_C_RS2(ctx->opcode);
1644 if (extract32(ctx->opcode, 12, 1) == 0) {
1645 if (rs2 == 0) {
1646 /* C.JR -> jalr x0, rs1, 0*/
1647 gen_jalr(env, ctx, OPC_RISC_JALR, 0, rd, 0);
1648 } else {
1649 /* C.MV -> add rd, x0, rs2 */
1650 gen_arith(ctx, OPC_RISC_ADD, rd, 0, rs2);
1652 } else {
1653 if (rd == 0) {
1654 /* C.EBREAK -> ebreak*/
1655 gen_system(env, ctx, OPC_RISC_ECALL, 0, 0, 0x1);
1656 } else {
1657 if (rs2 == 0) {
1658 /* C.JALR -> jalr x1, rs1, 0*/
1659 gen_jalr(env, ctx, OPC_RISC_JALR, 1, rd, 0);
1660 } else {
1661 /* C.ADD -> add rd, rd, rs2 */
1662 gen_arith(ctx, OPC_RISC_ADD, rd, rd, rs2);
1666 break;
1667 case 5:
1668 /* C.FSDSP -> fsd rs2, offset[8:3](x2)*/
1669 gen_fp_store(ctx, OPC_RISC_FSD, 2, GET_C_RS2(ctx->opcode),
1670 GET_C_SDSP_IMM(ctx->opcode));
1671 /* C.SQSP */
1672 break;
1673 case 6: /* C.SWSP -> sw rs2, offset[7:2](x2)*/
1674 gen_store(ctx, OPC_RISC_SW, 2, GET_C_RS2(ctx->opcode),
1675 GET_C_SWSP_IMM(ctx->opcode));
1676 break;
1677 case 7:
1678 #if defined(TARGET_RISCV64)
1679 /* C.SDSP(Rv64/128) -> sd rs2, offset[8:3](x2)*/
1680 gen_store(ctx, OPC_RISC_SD, 2, GET_C_RS2(ctx->opcode),
1681 GET_C_SDSP_IMM(ctx->opcode));
1682 #else
1683 /* C.FSWSP(RV32) -> fsw rs2, offset[7:2](x2) */
1684 gen_fp_store(ctx, OPC_RISC_FSW, 2, GET_C_RS2(ctx->opcode),
1685 GET_C_SWSP_IMM(ctx->opcode));
1686 #endif
1687 break;
1691 static void decode_RV32_64C(CPURISCVState *env, DisasContext *ctx)
1693 uint8_t op = extract32(ctx->opcode, 0, 2);
1695 switch (op) {
1696 case 0:
1697 decode_RV32_64C0(ctx);
1698 break;
1699 case 1:
1700 decode_RV32_64C1(env, ctx);
1701 break;
1702 case 2:
1703 decode_RV32_64C2(env, ctx);
1704 break;
1708 static void decode_RV32_64G(CPURISCVState *env, DisasContext *ctx)
1710 int rs1;
1711 int rs2;
1712 int rd;
1713 uint32_t op;
1714 target_long imm;
1716 /* We do not do misaligned address check here: the address should never be
1717 * misaligned at this point. Instructions that set PC must do the check,
1718 * since epc must be the address of the instruction that caused us to
1719 * perform the misaligned instruction fetch */
1721 op = MASK_OP_MAJOR(ctx->opcode);
1722 rs1 = GET_RS1(ctx->opcode);
1723 rs2 = GET_RS2(ctx->opcode);
1724 rd = GET_RD(ctx->opcode);
1725 imm = GET_IMM(ctx->opcode);
1727 switch (op) {
1728 case OPC_RISC_LUI:
1729 if (rd == 0) {
1730 break; /* NOP */
1732 tcg_gen_movi_tl(cpu_gpr[rd], sextract64(ctx->opcode, 12, 20) << 12);
1733 break;
1734 case OPC_RISC_AUIPC:
1735 if (rd == 0) {
1736 break; /* NOP */
1738 tcg_gen_movi_tl(cpu_gpr[rd], (sextract64(ctx->opcode, 12, 20) << 12) +
1739 ctx->pc);
1740 break;
1741 case OPC_RISC_JAL:
1742 imm = GET_JAL_IMM(ctx->opcode);
1743 gen_jal(env, ctx, rd, imm);
1744 break;
1745 case OPC_RISC_JALR:
1746 gen_jalr(env, ctx, MASK_OP_JALR(ctx->opcode), rd, rs1, imm);
1747 break;
1748 case OPC_RISC_BRANCH:
1749 gen_branch(env, ctx, MASK_OP_BRANCH(ctx->opcode), rs1, rs2,
1750 GET_B_IMM(ctx->opcode));
1751 break;
1752 case OPC_RISC_LOAD:
1753 gen_load(ctx, MASK_OP_LOAD(ctx->opcode), rd, rs1, imm);
1754 break;
1755 case OPC_RISC_STORE:
1756 gen_store(ctx, MASK_OP_STORE(ctx->opcode), rs1, rs2,
1757 GET_STORE_IMM(ctx->opcode));
1758 break;
1759 case OPC_RISC_ARITH_IMM:
1760 #if defined(TARGET_RISCV64)
1761 case OPC_RISC_ARITH_IMM_W:
1762 #endif
1763 if (rd == 0) {
1764 break; /* NOP */
1766 gen_arith_imm(ctx, MASK_OP_ARITH_IMM(ctx->opcode), rd, rs1, imm);
1767 break;
1768 case OPC_RISC_ARITH:
1769 #if defined(TARGET_RISCV64)
1770 case OPC_RISC_ARITH_W:
1771 #endif
1772 if (rd == 0) {
1773 break; /* NOP */
1775 gen_arith(ctx, MASK_OP_ARITH(ctx->opcode), rd, rs1, rs2);
1776 break;
1777 case OPC_RISC_FP_LOAD:
1778 gen_fp_load(ctx, MASK_OP_FP_LOAD(ctx->opcode), rd, rs1, imm);
1779 break;
1780 case OPC_RISC_FP_STORE:
1781 gen_fp_store(ctx, MASK_OP_FP_STORE(ctx->opcode), rs1, rs2,
1782 GET_STORE_IMM(ctx->opcode));
1783 break;
1784 case OPC_RISC_ATOMIC:
1785 gen_atomic(ctx, MASK_OP_ATOMIC(ctx->opcode), rd, rs1, rs2);
1786 break;
1787 case OPC_RISC_FMADD:
1788 gen_fp_fmadd(ctx, MASK_OP_FP_FMADD(ctx->opcode), rd, rs1, rs2,
1789 GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1790 break;
1791 case OPC_RISC_FMSUB:
1792 gen_fp_fmsub(ctx, MASK_OP_FP_FMSUB(ctx->opcode), rd, rs1, rs2,
1793 GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1794 break;
1795 case OPC_RISC_FNMSUB:
1796 gen_fp_fnmsub(ctx, MASK_OP_FP_FNMSUB(ctx->opcode), rd, rs1, rs2,
1797 GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1798 break;
1799 case OPC_RISC_FNMADD:
1800 gen_fp_fnmadd(ctx, MASK_OP_FP_FNMADD(ctx->opcode), rd, rs1, rs2,
1801 GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1802 break;
1803 case OPC_RISC_FP_ARITH:
1804 gen_fp_arith(ctx, MASK_OP_FP_ARITH(ctx->opcode), rd, rs1, rs2,
1805 GET_RM(ctx->opcode));
1806 break;
1807 case OPC_RISC_FENCE:
1808 #ifndef CONFIG_USER_ONLY
1809 if (ctx->opcode & 0x1000) {
1810 /* FENCE_I is a no-op in QEMU,
1811 * however we need to end the translation block */
1812 tcg_gen_movi_tl(cpu_pc, ctx->next_pc);
1813 tcg_gen_exit_tb(0);
1814 ctx->bstate = BS_BRANCH;
1815 } else {
1816 /* FENCE is a full memory barrier. */
1817 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1819 #endif
1820 break;
1821 case OPC_RISC_SYSTEM:
1822 gen_system(env, ctx, MASK_OP_SYSTEM(ctx->opcode), rd, rs1,
1823 (ctx->opcode & 0xFFF00000) >> 20);
1824 break;
1825 default:
1826 gen_exception_illegal(ctx);
1827 break;
1831 static void decode_opc(CPURISCVState *env, DisasContext *ctx)
1833 /* check for compressed insn */
1834 if (extract32(ctx->opcode, 0, 2) != 3) {
1835 if (!riscv_has_ext(env, RVC)) {
1836 gen_exception_illegal(ctx);
1837 } else {
1838 ctx->next_pc = ctx->pc + 2;
1839 decode_RV32_64C(env, ctx);
1841 } else {
1842 ctx->next_pc = ctx->pc + 4;
1843 decode_RV32_64G(env, ctx);
1847 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
1849 CPURISCVState *env = cs->env_ptr;
1850 DisasContext ctx;
1851 target_ulong pc_start;
1852 target_ulong next_page_start;
1853 int num_insns;
1854 int max_insns;
1855 pc_start = tb->pc;
1856 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1857 ctx.pc = pc_start;
1859 /* once we have GDB, the rest of the translate.c implementation should be
1860 ready for singlestep */
1861 ctx.singlestep_enabled = cs->singlestep_enabled;
1863 ctx.tb = tb;
1864 ctx.bstate = BS_NONE;
1865 ctx.flags = tb->flags;
1866 ctx.mem_idx = tb->flags & TB_FLAGS_MMU_MASK;
1867 ctx.frm = -1; /* unknown rounding mode */
1869 num_insns = 0;
1870 max_insns = tb->cflags & CF_COUNT_MASK;
1871 if (max_insns == 0) {
1872 max_insns = CF_COUNT_MASK;
1874 if (max_insns > TCG_MAX_INSNS) {
1875 max_insns = TCG_MAX_INSNS;
1877 gen_tb_start(tb);
1879 while (ctx.bstate == BS_NONE) {
1880 tcg_gen_insn_start(ctx.pc);
1881 num_insns++;
1883 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
1884 tcg_gen_movi_tl(cpu_pc, ctx.pc);
1885 ctx.bstate = BS_BRANCH;
1886 gen_exception_debug();
1887 /* The address covered by the breakpoint must be included in
1888 [tb->pc, tb->pc + tb->size) in order to for it to be
1889 properly cleared -- thus we increment the PC here so that
1890 the logic setting tb->size below does the right thing. */
1891 ctx.pc += 4;
1892 goto done_generating;
1895 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1896 gen_io_start();
1899 ctx.opcode = cpu_ldl_code(env, ctx.pc);
1900 decode_opc(env, &ctx);
1901 ctx.pc = ctx.next_pc;
1903 if (cs->singlestep_enabled) {
1904 break;
1906 if (ctx.pc >= next_page_start) {
1907 break;
1909 if (tcg_op_buf_full()) {
1910 break;
1912 if (num_insns >= max_insns) {
1913 break;
1915 if (singlestep) {
1916 break;
1920 if (tb->cflags & CF_LAST_IO) {
1921 gen_io_end();
1923 switch (ctx.bstate) {
1924 case BS_STOP:
1925 gen_goto_tb(&ctx, 0, ctx.pc);
1926 break;
1927 case BS_NONE: /* handle end of page - DO NOT CHAIN. See gen_goto_tb. */
1928 tcg_gen_movi_tl(cpu_pc, ctx.pc);
1929 if (cs->singlestep_enabled) {
1930 gen_exception_debug();
1931 } else {
1932 tcg_gen_exit_tb(0);
1934 break;
1935 case BS_BRANCH: /* ops using BS_BRANCH generate own exit seq */
1936 default:
1937 break;
1939 done_generating:
1940 gen_tb_end(tb, num_insns);
1941 tb->size = ctx.pc - pc_start;
1942 tb->icount = num_insns;
1944 #ifdef DEBUG_DISAS
1945 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1946 && qemu_log_in_addr_range(pc_start)) {
1947 qemu_log("IN: %s\n", lookup_symbol(pc_start));
1948 log_target_disas(cs, pc_start, ctx.pc - pc_start);
1949 qemu_log("\n");
1951 #endif
1954 void riscv_translate_init(void)
1956 int i;
1958 /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
1959 /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
1960 /* registers, unless you specifically block reads/writes to reg 0 */
1961 cpu_gpr[0] = NULL;
1963 for (i = 1; i < 32; i++) {
1964 cpu_gpr[i] = tcg_global_mem_new(cpu_env,
1965 offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
1968 for (i = 0; i < 32; i++) {
1969 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
1970 offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
1973 cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
1974 load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
1975 "load_res");
1976 load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
1977 "load_val");