2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 * Bastian Koppelmann, kbastian@mail.uni-paderborn.de
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
21 static bool trans_illegal(DisasContext *ctx, arg_empty *a)
23 gen_exception_illegal(ctx);
27 static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
29 REQUIRE_64_OR_128BIT(ctx);
30 return trans_illegal(ctx, a);
33 static bool trans_lui(DisasContext *ctx, arg_lui *a)
35 gen_set_gpri(ctx, a->rd, a->imm);
39 static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
41 TCGv target_pc = dest_gpr(ctx, a->rd);
42 gen_pc_plus_diff(target_pc, ctx, a->imm);
43 gen_set_gpr(ctx, a->rd, target_pc);
47 static bool trans_jal(DisasContext *ctx, arg_jal *a)
49 gen_jal(ctx, a->rd, a->imm);
53 static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
55 TCGLabel *misaligned = NULL;
56 TCGv target_pc = tcg_temp_new();
57 TCGv succ_pc = dest_gpr(ctx, a->rd);
59 tcg_gen_addi_tl(target_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
60 tcg_gen_andi_tl(target_pc, target_pc, (target_ulong)-2);
62 if (get_xl(ctx) == MXL_RV32) {
63 tcg_gen_ext32s_tl(target_pc, target_pc);
66 if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
67 TCGv t0 = tcg_temp_new();
69 misaligned = gen_new_label();
70 tcg_gen_andi_tl(t0, target_pc, 0x2);
71 tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
74 gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
75 gen_set_gpr(ctx, a->rd, succ_pc);
77 tcg_gen_mov_tl(cpu_pc, target_pc);
78 lookup_and_goto_ptr(ctx);
81 gen_set_label(misaligned);
82 gen_exception_inst_addr_mis(ctx, target_pc);
84 ctx->base.is_jmp = DISAS_NORETURN;
89 static TCGCond gen_compare_i128(bool bz, TCGv rl,
90 TCGv al, TCGv ah, TCGv bl, TCGv bh,
93 TCGv rh = tcg_temp_new();
100 tcg_gen_or_tl(rl, al, ah);
102 tcg_gen_xor_tl(rl, al, bl);
103 tcg_gen_xor_tl(rh, ah, bh);
104 tcg_gen_or_tl(rl, rl, rh);
111 tcg_gen_mov_tl(rl, ah);
113 TCGv tmp = tcg_temp_new();
115 tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
116 tcg_gen_xor_tl(rl, rh, ah);
117 tcg_gen_xor_tl(tmp, ah, bh);
118 tcg_gen_and_tl(rl, rl, tmp);
119 tcg_gen_xor_tl(rl, rh, rl);
128 TCGv tmp = tcg_temp_new();
129 TCGv zero = tcg_constant_tl(0);
130 TCGv one = tcg_constant_tl(1);
133 /* borrow in to second word */
134 tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
135 /* seed third word with 1, which will be result */
136 tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
137 tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
142 g_assert_not_reached();
146 cond = tcg_invert_cond(cond);
151 static void gen_setcond_i128(TCGv rl, TCGv rh,
152 TCGv src1l, TCGv src1h,
153 TCGv src2l, TCGv src2h,
156 cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
157 tcg_gen_setcondi_tl(cond, rl, rl, 0);
158 tcg_gen_movi_tl(rh, 0);
161 static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
163 TCGLabel *l = gen_new_label();
164 TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
165 TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
166 target_ulong orig_pc_save = ctx->pc_save;
168 if (get_xl(ctx) == MXL_RV128) {
169 TCGv src1h = get_gprh(ctx, a->rs1);
170 TCGv src2h = get_gprh(ctx, a->rs2);
171 TCGv tmp = tcg_temp_new();
173 cond = gen_compare_i128(a->rs2 == 0,
174 tmp, src1, src1h, src2, src2h, cond);
175 tcg_gen_brcondi_tl(cond, tmp, 0, l);
177 tcg_gen_brcond_tl(cond, src1, src2, l);
179 gen_goto_tb(ctx, 1, ctx->cur_insn_len);
180 ctx->pc_save = orig_pc_save;
182 gen_set_label(l); /* branch taken */
184 if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca &&
187 TCGv target_pc = tcg_temp_new();
188 gen_pc_plus_diff(target_pc, ctx, a->imm);
189 gen_exception_inst_addr_mis(ctx, target_pc);
191 gen_goto_tb(ctx, 0, a->imm);
194 ctx->base.is_jmp = DISAS_NORETURN;
199 static bool trans_beq(DisasContext *ctx, arg_beq *a)
201 return gen_branch(ctx, a, TCG_COND_EQ);
204 static bool trans_bne(DisasContext *ctx, arg_bne *a)
206 return gen_branch(ctx, a, TCG_COND_NE);
209 static bool trans_blt(DisasContext *ctx, arg_blt *a)
211 return gen_branch(ctx, a, TCG_COND_LT);
214 static bool trans_bge(DisasContext *ctx, arg_bge *a)
216 return gen_branch(ctx, a, TCG_COND_GE);
219 static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
221 return gen_branch(ctx, a, TCG_COND_LTU);
224 static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
226 return gen_branch(ctx, a, TCG_COND_GEU);
229 static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
231 TCGv dest = dest_gpr(ctx, a->rd);
232 TCGv addr = get_address(ctx, a->rs1, a->imm);
234 tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
235 gen_set_gpr(ctx, a->rd, dest);
239 /* Compute only 64-bit addresses to use the address translation mechanism */
240 static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
242 TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
243 TCGv destl = dest_gpr(ctx, a->rd);
244 TCGv desth = dest_gprh(ctx, a->rd);
245 TCGv addrl = tcg_temp_new();
247 tcg_gen_addi_tl(addrl, src1l, a->imm);
249 if ((memop & MO_SIZE) <= MO_64) {
250 tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
251 if (memop & MO_SIGN) {
252 tcg_gen_sari_tl(desth, destl, 63);
254 tcg_gen_movi_tl(desth, 0);
257 /* assume little-endian memory access for now */
258 tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
259 tcg_gen_addi_tl(addrl, addrl, 8);
260 tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
263 gen_set_gpr128(ctx, a->rd, destl, desth);
267 static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
271 decode_save_opc(ctx);
272 if (get_xl(ctx) == MXL_RV128) {
273 out = gen_load_i128(ctx, a, memop);
275 out = gen_load_tl(ctx, a, memop);
279 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
285 static bool trans_lb(DisasContext *ctx, arg_lb *a)
287 return gen_load(ctx, a, MO_SB);
290 static bool trans_lh(DisasContext *ctx, arg_lh *a)
292 return gen_load(ctx, a, MO_TESW);
295 static bool trans_lw(DisasContext *ctx, arg_lw *a)
297 return gen_load(ctx, a, MO_TESL);
300 static bool trans_ld(DisasContext *ctx, arg_ld *a)
302 REQUIRE_64_OR_128BIT(ctx);
303 return gen_load(ctx, a, MO_TESQ);
306 static bool trans_lq(DisasContext *ctx, arg_lq *a)
309 return gen_load(ctx, a, MO_TEUO);
312 static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
314 return gen_load(ctx, a, MO_UB);
317 static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
319 return gen_load(ctx, a, MO_TEUW);
322 static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
324 REQUIRE_64_OR_128BIT(ctx);
325 return gen_load(ctx, a, MO_TEUL);
328 static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
331 return gen_load(ctx, a, MO_TEUQ);
334 static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
336 TCGv addr = get_address(ctx, a->rs1, a->imm);
337 TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
340 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
343 tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
347 static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
349 TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
350 TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
351 TCGv src2h = get_gprh(ctx, a->rs2);
352 TCGv addrl = tcg_temp_new();
354 tcg_gen_addi_tl(addrl, src1l, a->imm);
356 if ((memop & MO_SIZE) <= MO_64) {
357 tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
359 /* little-endian memory access assumed for now */
360 tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
361 tcg_gen_addi_tl(addrl, addrl, 8);
362 tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
367 static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
369 decode_save_opc(ctx);
370 if (get_xl(ctx) == MXL_RV128) {
371 return gen_store_i128(ctx, a, memop);
373 return gen_store_tl(ctx, a, memop);
377 static bool trans_sb(DisasContext *ctx, arg_sb *a)
379 return gen_store(ctx, a, MO_SB);
382 static bool trans_sh(DisasContext *ctx, arg_sh *a)
384 return gen_store(ctx, a, MO_TESW);
387 static bool trans_sw(DisasContext *ctx, arg_sw *a)
389 return gen_store(ctx, a, MO_TESL);
392 static bool trans_sd(DisasContext *ctx, arg_sd *a)
394 REQUIRE_64_OR_128BIT(ctx);
395 return gen_store(ctx, a, MO_TEUQ);
398 static bool trans_sq(DisasContext *ctx, arg_sq *a)
401 return gen_store(ctx, a, MO_TEUO);
404 static bool trans_addd(DisasContext *ctx, arg_addd *a)
408 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
411 static bool trans_addid(DisasContext *ctx, arg_addid *a)
415 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
418 static bool trans_subd(DisasContext *ctx, arg_subd *a)
422 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
425 static void gen_addi2_i128(TCGv retl, TCGv reth,
426 TCGv srcl, TCGv srch, target_long imm)
428 TCGv imml = tcg_constant_tl(imm);
429 TCGv immh = tcg_constant_tl(-(imm < 0));
430 tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
433 static bool trans_addi(DisasContext *ctx, arg_addi *a)
435 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
438 static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
440 tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
443 static void gen_slt_i128(TCGv retl, TCGv reth,
444 TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
446 gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
449 static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
451 tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
454 static void gen_sltu_i128(TCGv retl, TCGv reth,
455 TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
457 gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
460 static bool trans_slti(DisasContext *ctx, arg_slti *a)
462 return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
465 static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
467 return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
470 static bool trans_xori(DisasContext *ctx, arg_xori *a)
472 return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
475 static bool trans_ori(DisasContext *ctx, arg_ori *a)
477 return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
480 static bool trans_andi(DisasContext *ctx, arg_andi *a)
482 return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
485 static void gen_slli_i128(TCGv retl, TCGv reth,
486 TCGv src1l, TCGv src1h,
490 tcg_gen_shli_tl(reth, src1l, shamt - 64);
491 tcg_gen_movi_tl(retl, 0);
493 tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
494 tcg_gen_shli_tl(retl, src1l, shamt);
498 static bool trans_slli(DisasContext *ctx, arg_slli *a)
500 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
503 static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
505 tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
508 static void gen_srli_i128(TCGv retl, TCGv reth,
509 TCGv src1l, TCGv src1h,
513 tcg_gen_shri_tl(retl, src1h, shamt - 64);
514 tcg_gen_movi_tl(reth, 0);
516 tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
517 tcg_gen_shri_tl(reth, src1h, shamt);
521 static bool trans_srli(DisasContext *ctx, arg_srli *a)
523 return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
524 tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
527 static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
529 tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
532 static void gen_srai_i128(TCGv retl, TCGv reth,
533 TCGv src1l, TCGv src1h,
537 tcg_gen_sari_tl(retl, src1h, shamt - 64);
538 tcg_gen_sari_tl(reth, src1h, 63);
540 tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
541 tcg_gen_sari_tl(reth, src1h, shamt);
545 static bool trans_srai(DisasContext *ctx, arg_srai *a)
547 return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
548 tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
551 static bool trans_add(DisasContext *ctx, arg_add *a)
553 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
556 static bool trans_sub(DisasContext *ctx, arg_sub *a)
558 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
561 static void gen_sll_i128(TCGv destl, TCGv desth,
562 TCGv src1l, TCGv src1h, TCGv shamt)
564 TCGv ls = tcg_temp_new();
565 TCGv rs = tcg_temp_new();
566 TCGv hs = tcg_temp_new();
567 TCGv ll = tcg_temp_new();
568 TCGv lr = tcg_temp_new();
569 TCGv h0 = tcg_temp_new();
570 TCGv h1 = tcg_temp_new();
571 TCGv zero = tcg_constant_tl(0);
573 tcg_gen_andi_tl(hs, shamt, 64);
574 tcg_gen_andi_tl(ls, shamt, 63);
575 tcg_gen_neg_tl(shamt, shamt);
576 tcg_gen_andi_tl(rs, shamt, 63);
578 tcg_gen_shl_tl(ll, src1l, ls);
579 tcg_gen_shl_tl(h0, src1h, ls);
580 tcg_gen_shr_tl(lr, src1l, rs);
581 tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
582 tcg_gen_or_tl(h1, h0, lr);
584 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
585 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
588 static bool trans_sll(DisasContext *ctx, arg_sll *a)
590 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
593 static bool trans_slt(DisasContext *ctx, arg_slt *a)
595 return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
598 static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
600 return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
603 static void gen_srl_i128(TCGv destl, TCGv desth,
604 TCGv src1l, TCGv src1h, TCGv shamt)
606 TCGv ls = tcg_temp_new();
607 TCGv rs = tcg_temp_new();
608 TCGv hs = tcg_temp_new();
609 TCGv ll = tcg_temp_new();
610 TCGv lr = tcg_temp_new();
611 TCGv h0 = tcg_temp_new();
612 TCGv h1 = tcg_temp_new();
613 TCGv zero = tcg_constant_tl(0);
615 tcg_gen_andi_tl(hs, shamt, 64);
616 tcg_gen_andi_tl(rs, shamt, 63);
617 tcg_gen_neg_tl(shamt, shamt);
618 tcg_gen_andi_tl(ls, shamt, 63);
620 tcg_gen_shr_tl(lr, src1l, rs);
621 tcg_gen_shr_tl(h1, src1h, rs);
622 tcg_gen_shl_tl(ll, src1h, ls);
623 tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
624 tcg_gen_or_tl(h0, ll, lr);
626 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
627 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
630 static bool trans_srl(DisasContext *ctx, arg_srl *a)
632 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
635 static void gen_sra_i128(TCGv destl, TCGv desth,
636 TCGv src1l, TCGv src1h, TCGv shamt)
638 TCGv ls = tcg_temp_new();
639 TCGv rs = tcg_temp_new();
640 TCGv hs = tcg_temp_new();
641 TCGv ll = tcg_temp_new();
642 TCGv lr = tcg_temp_new();
643 TCGv h0 = tcg_temp_new();
644 TCGv h1 = tcg_temp_new();
645 TCGv zero = tcg_constant_tl(0);
647 tcg_gen_andi_tl(hs, shamt, 64);
648 tcg_gen_andi_tl(rs, shamt, 63);
649 tcg_gen_neg_tl(shamt, shamt);
650 tcg_gen_andi_tl(ls, shamt, 63);
652 tcg_gen_shr_tl(lr, src1l, rs);
653 tcg_gen_sar_tl(h1, src1h, rs);
654 tcg_gen_shl_tl(ll, src1h, ls);
655 tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
656 tcg_gen_or_tl(h0, ll, lr);
657 tcg_gen_sari_tl(lr, src1h, 63);
659 tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
660 tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
663 static bool trans_sra(DisasContext *ctx, arg_sra *a)
665 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
668 static bool trans_xor(DisasContext *ctx, arg_xor *a)
670 return gen_logic(ctx, a, tcg_gen_xor_tl);
673 static bool trans_or(DisasContext *ctx, arg_or *a)
675 return gen_logic(ctx, a, tcg_gen_or_tl);
678 static bool trans_and(DisasContext *ctx, arg_and *a)
680 return gen_logic(ctx, a, tcg_gen_and_tl);
683 static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
685 REQUIRE_64_OR_128BIT(ctx);
687 return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
690 static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
692 REQUIRE_64_OR_128BIT(ctx);
694 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
697 static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
699 REQUIRE_64_OR_128BIT(ctx);
701 return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
704 static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
706 REQUIRE_64_OR_128BIT(ctx);
708 return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
711 static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
715 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
718 static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
722 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
725 static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
729 return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl, NULL);
732 static bool trans_addw(DisasContext *ctx, arg_addw *a)
734 REQUIRE_64_OR_128BIT(ctx);
736 return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
739 static bool trans_subw(DisasContext *ctx, arg_subw *a)
741 REQUIRE_64_OR_128BIT(ctx);
743 return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
746 static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
748 REQUIRE_64_OR_128BIT(ctx);
750 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
753 static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
755 REQUIRE_64_OR_128BIT(ctx);
757 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
760 static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
762 REQUIRE_64_OR_128BIT(ctx);
764 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
767 static bool trans_slld(DisasContext *ctx, arg_slld *a)
771 return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
774 static bool trans_srld(DisasContext *ctx, arg_srld *a)
778 return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
781 static bool trans_srad(DisasContext *ctx, arg_srad *a)
785 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
788 static bool trans_pause(DisasContext *ctx, arg_pause *a)
790 if (!ctx->cfg_ptr->ext_zihintpause) {
795 * PAUSE is a no-op in QEMU,
796 * end the TB and return to main loop
798 gen_update_pc(ctx, ctx->cur_insn_len);
800 ctx->base.is_jmp = DISAS_NORETURN;
805 static bool trans_fence(DisasContext *ctx, arg_fence *a)
807 /* FENCE is a full memory barrier. */
808 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
812 static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
814 if (!ctx->cfg_ptr->ext_zifencei) {
819 * FENCE_I is a no-op in QEMU,
820 * however we need to end the translation block
822 gen_update_pc(ctx, ctx->cur_insn_len);
824 ctx->base.is_jmp = DISAS_NORETURN;
828 static bool do_csr_post(DisasContext *ctx)
830 /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
831 decode_save_opc(ctx);
832 /* We may have changed important cpu state -- exit to main loop. */
833 gen_update_pc(ctx, ctx->cur_insn_len);
835 ctx->base.is_jmp = DISAS_NORETURN;
839 static bool do_csrr(DisasContext *ctx, int rd, int rc)
841 TCGv dest = dest_gpr(ctx, rd);
842 TCGv_i32 csr = tcg_constant_i32(rc);
844 translator_io_start(&ctx->base);
845 gen_helper_csrr(dest, tcg_env, csr);
846 gen_set_gpr(ctx, rd, dest);
847 return do_csr_post(ctx);
850 static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
852 TCGv_i32 csr = tcg_constant_i32(rc);
854 translator_io_start(&ctx->base);
855 gen_helper_csrw(tcg_env, csr, src);
856 return do_csr_post(ctx);
859 static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
861 TCGv dest = dest_gpr(ctx, rd);
862 TCGv_i32 csr = tcg_constant_i32(rc);
864 translator_io_start(&ctx->base);
865 gen_helper_csrrw(dest, tcg_env, csr, src, mask);
866 gen_set_gpr(ctx, rd, dest);
867 return do_csr_post(ctx);
870 static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
872 TCGv destl = dest_gpr(ctx, rd);
873 TCGv desth = dest_gprh(ctx, rd);
874 TCGv_i32 csr = tcg_constant_i32(rc);
876 translator_io_start(&ctx->base);
877 gen_helper_csrr_i128(destl, tcg_env, csr);
878 tcg_gen_ld_tl(desth, tcg_env, offsetof(CPURISCVState, retxh));
879 gen_set_gpr128(ctx, rd, destl, desth);
880 return do_csr_post(ctx);
883 static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
885 TCGv_i32 csr = tcg_constant_i32(rc);
887 translator_io_start(&ctx->base);
888 gen_helper_csrw_i128(tcg_env, csr, srcl, srch);
889 return do_csr_post(ctx);
892 static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
893 TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
895 TCGv destl = dest_gpr(ctx, rd);
896 TCGv desth = dest_gprh(ctx, rd);
897 TCGv_i32 csr = tcg_constant_i32(rc);
899 translator_io_start(&ctx->base);
900 gen_helper_csrrw_i128(destl, tcg_env, csr, srcl, srch, maskl, maskh);
901 tcg_gen_ld_tl(desth, tcg_env, offsetof(CPURISCVState, retxh));
902 gen_set_gpr128(ctx, rd, destl, desth);
903 return do_csr_post(ctx);
906 static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
908 RISCVMXL xl = get_xl(ctx);
909 if (xl < MXL_RV128) {
910 TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
913 * If rd == 0, the insn shall not read the csr, nor cause any of the
914 * side effects that might occur on a csr read.
917 return do_csrw(ctx, a->csr, src);
920 TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
922 return do_csrrw(ctx, a->rd, a->csr, src, mask);
924 TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
925 TCGv srch = get_gprh(ctx, a->rs1);
928 * If rd == 0, the insn shall not read the csr, nor cause any of the
929 * side effects that might occur on a csr read.
932 return do_csrw_i128(ctx, a->csr, srcl, srch);
935 TCGv mask = tcg_constant_tl(-1);
936 return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
940 static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
943 * If rs1 == 0, the insn shall not write to the csr at all, nor
944 * cause any of the side effects that might occur on a csr write.
945 * Note that if rs1 specifies a register other than x0, holding
946 * a zero value, the instruction will still attempt to write the
947 * unmodified value back to the csr and will cause side effects.
949 if (get_xl(ctx) < MXL_RV128) {
951 return do_csrr(ctx, a->rd, a->csr);
954 TCGv ones = tcg_constant_tl(-1);
955 TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
956 return do_csrrw(ctx, a->rd, a->csr, ones, mask);
959 return do_csrr_i128(ctx, a->rd, a->csr);
962 TCGv ones = tcg_constant_tl(-1);
963 TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
964 TCGv maskh = get_gprh(ctx, a->rs1);
965 return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
969 static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
972 * If rs1 == 0, the insn shall not write to the csr at all, nor
973 * cause any of the side effects that might occur on a csr write.
974 * Note that if rs1 specifies a register other than x0, holding
975 * a zero value, the instruction will still attempt to write the
976 * unmodified value back to the csr and will cause side effects.
978 if (get_xl(ctx) < MXL_RV128) {
980 return do_csrr(ctx, a->rd, a->csr);
983 TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
984 return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
987 return do_csrr_i128(ctx, a->rd, a->csr);
990 TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
991 TCGv maskh = get_gprh(ctx, a->rs1);
992 return do_csrrw_i128(ctx, a->rd, a->csr,
993 ctx->zero, ctx->zero, maskl, maskh);
997 static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
999 RISCVMXL xl = get_xl(ctx);
1000 if (xl < MXL_RV128) {
1001 TCGv src = tcg_constant_tl(a->rs1);
1004 * If rd == 0, the insn shall not read the csr, nor cause any of the
1005 * side effects that might occur on a csr read.
1008 return do_csrw(ctx, a->csr, src);
1011 TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
1013 return do_csrrw(ctx, a->rd, a->csr, src, mask);
1015 TCGv src = tcg_constant_tl(a->rs1);
1018 * If rd == 0, the insn shall not read the csr, nor cause any of the
1019 * side effects that might occur on a csr read.
1022 return do_csrw_i128(ctx, a->csr, src, ctx->zero);
1025 TCGv mask = tcg_constant_tl(-1);
1026 return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1030 static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1033 * If rs1 == 0, the insn shall not write to the csr at all, nor
1034 * cause any of the side effects that might occur on a csr write.
1035 * Note that if rs1 specifies a register other than x0, holding
1036 * a zero value, the instruction will still attempt to write the
1037 * unmodified value back to the csr and will cause side effects.
1039 if (get_xl(ctx) < MXL_RV128) {
1041 return do_csrr(ctx, a->rd, a->csr);
1044 TCGv ones = tcg_constant_tl(-1);
1045 TCGv mask = tcg_constant_tl(a->rs1);
1046 return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1049 return do_csrr_i128(ctx, a->rd, a->csr);
1052 TCGv ones = tcg_constant_tl(-1);
1053 TCGv mask = tcg_constant_tl(a->rs1);
1054 return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1058 static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1061 * If rs1 == 0, the insn shall not write to the csr at all, nor
1062 * cause any of the side effects that might occur on a csr write.
1063 * Note that if rs1 specifies a register other than x0, holding
1064 * a zero value, the instruction will still attempt to write the
1065 * unmodified value back to the csr and will cause side effects.
1067 if (get_xl(ctx) < MXL_RV128) {
1069 return do_csrr(ctx, a->rd, a->csr);
1072 TCGv mask = tcg_constant_tl(a->rs1);
1073 return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1076 return do_csrr_i128(ctx, a->rd, a->csr);
1079 TCGv mask = tcg_constant_tl(a->rs1);
1080 return do_csrrw_i128(ctx, a->rd, a->csr,
1081 ctx->zero, ctx->zero, mask, ctx->zero);