2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2018 SiFive, Inc
5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
7 * Copyright (c) 2008 Fabrice Bellard
9 * Based on i386/tcg-target.c and mips/tcg-target.c
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
30 #include "../tcg-ldst.c.inc"
31 #include "../tcg-pool.c.inc"
33 #ifdef CONFIG_DEBUG_TCG
34 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
70 static const int tcg_target_reg_alloc_order[] = {
71 /* Call saved registers */
72 /* TCG_REG_S0 reserved for TCG_AREG0 */
85 /* Call clobbered registers */
94 /* Argument registers */
105 static const int tcg_target_call_iarg_regs[] = {
119 #if defined(__riscv_arch_test) && defined(__riscv_zba)
120 # define have_zba true
122 static bool have_zba;
124 #if defined(__riscv_arch_test) && defined(__riscv_zicond)
125 # define have_zicond true
127 static bool have_zicond;
130 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
132 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
133 tcg_debug_assert(slot >= 0 && slot <= 1);
134 return TCG_REG_A0 + slot;
137 #define TCG_CT_CONST_ZERO 0x100
138 #define TCG_CT_CONST_S12 0x200
139 #define TCG_CT_CONST_N12 0x400
140 #define TCG_CT_CONST_M12 0x800
141 #define TCG_CT_CONST_J12 0x1000
143 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
145 #define sextreg sextract64
147 /* test if a constant matches the constraint */
148 static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
150 if (ct & TCG_CT_CONST) {
153 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
157 * Sign extended from 12 bits: [-0x800, 0x7ff].
158 * Used for most arithmetic, as this is the isa field.
160 if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) {
164 * Sign extended from 12 bits, negated: [-0x7ff, 0x800].
165 * Used for subtraction, where a constant must be handled by ADDI.
167 if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) {
171 * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
172 * Used by addsub2 and movcond, which may need the negative value,
173 * and requires the modified constant to be representable.
175 if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
179 * Inverse of sign extended from 12 bits: ~[-0x800, 0x7ff].
180 * Used to map ANDN back to ANDI, etc.
182 if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) {
189 * RISC-V Base ISA opcodes (IM)
205 OPC_DIVU = 0x2005033,
217 OPC_MULH = 0x2001033,
218 OPC_MULHSU = 0x2002033,
219 OPC_MULHU = 0x2003033,
223 OPC_REMU = 0x2007033,
233 OPC_SRA = 0x40005033,
234 OPC_SRAI = 0x40005013,
237 OPC_SUB = 0x40000033,
244 OPC_DIVUW = 0x200503b,
245 OPC_DIVW = 0x200403b,
246 OPC_MULW = 0x200003b,
247 OPC_REMUW = 0x200703b,
248 OPC_REMW = 0x200603b,
251 OPC_SRAIW = 0x4000501b,
252 OPC_SRAW = 0x4000503b,
255 OPC_SUBW = 0x4000003b,
257 OPC_FENCE = 0x0000000f,
258 OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */
260 /* Zba: Bit manipulation extension, address generation */
261 OPC_ADD_UW = 0x0800003b,
263 /* Zbb: Bit manipulation extension, basic bit manipulation */
264 OPC_ANDN = 0x40007033,
265 OPC_CLZ = 0x60001013,
266 OPC_CLZW = 0x6000101b,
267 OPC_CPOP = 0x60201013,
268 OPC_CPOPW = 0x6020101b,
269 OPC_CTZ = 0x60101013,
270 OPC_CTZW = 0x6010101b,
271 OPC_ORN = 0x40006033,
272 OPC_REV8 = 0x6b805013,
273 OPC_ROL = 0x60001033,
274 OPC_ROLW = 0x6000103b,
275 OPC_ROR = 0x60005033,
276 OPC_RORW = 0x6000503b,
277 OPC_RORI = 0x60005013,
278 OPC_RORIW = 0x6000501b,
279 OPC_SEXT_B = 0x60401013,
280 OPC_SEXT_H = 0x60501013,
281 OPC_XNOR = 0x40004033,
282 OPC_ZEXT_H = 0x0800403b,
284 /* Zicond: integer conditional operations */
285 OPC_CZERO_EQZ = 0x0e005033,
286 OPC_CZERO_NEZ = 0x0e007033,
290 * RISC-V immediate and instruction encoders (excludes 16-bit RVC)
295 static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2)
297 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20;
302 static int32_t encode_imm12(uint32_t imm)
304 return (imm & 0xfff) << 20;
307 static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm)
309 return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm);
314 static int32_t encode_simm12(uint32_t imm)
318 ret |= (imm & 0xFE0) << 20;
319 ret |= (imm & 0x1F) << 7;
324 static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
326 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm);
331 static int32_t encode_sbimm12(uint32_t imm)
335 ret |= (imm & 0x1000) << 19;
336 ret |= (imm & 0x7e0) << 20;
337 ret |= (imm & 0x1e) << 7;
338 ret |= (imm & 0x800) >> 4;
343 static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
345 return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm);
350 static int32_t encode_uimm20(uint32_t imm)
352 return imm & 0xfffff000;
355 static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm)
357 return opc | (rd & 0x1f) << 7 | encode_uimm20(imm);
362 static int32_t encode_ujimm20(uint32_t imm)
366 ret |= (imm & 0x0007fe) << (21 - 1);
367 ret |= (imm & 0x000800) << (20 - 11);
368 ret |= (imm & 0x0ff000) << (12 - 12);
369 ret |= (imm & 0x100000) << (31 - 20);
374 static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm)
376 return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm);
380 * RISC-V instruction emitters
383 static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc,
384 TCGReg rd, TCGReg rs1, TCGReg rs2)
386 tcg_out32(s, encode_r(opc, rd, rs1, rs2));
389 static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc,
390 TCGReg rd, TCGReg rs1, TCGArg imm)
392 tcg_out32(s, encode_i(opc, rd, rs1, imm));
395 static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc,
396 TCGReg rs1, TCGReg rs2, uint32_t imm)
398 tcg_out32(s, encode_s(opc, rs1, rs2, imm));
401 static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc,
402 TCGReg rs1, TCGReg rs2, uint32_t imm)
404 tcg_out32(s, encode_sb(opc, rs1, rs2, imm));
407 static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc,
408 TCGReg rd, uint32_t imm)
410 tcg_out32(s, encode_u(opc, rd, imm));
413 static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc,
414 TCGReg rd, uint32_t imm)
416 tcg_out32(s, encode_uj(opc, rd, imm));
419 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
422 for (i = 0; i < count; ++i) {
431 static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
433 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
434 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
436 tcg_debug_assert((offset & 1) == 0);
437 if (offset == sextreg(offset, 0, 12)) {
438 *src_rw |= encode_sbimm12(offset);
445 static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
447 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
448 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
450 tcg_debug_assert((offset & 1) == 0);
451 if (offset == sextreg(offset, 0, 20)) {
452 *src_rw |= encode_ujimm20(offset);
459 static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
461 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
462 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
463 int32_t lo = sextreg(offset, 0, 12);
464 int32_t hi = offset - lo;
466 if (offset == hi + lo) {
467 src_rw[0] |= encode_uimm20(hi);
468 src_rw[1] |= encode_imm12(lo);
475 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
476 intptr_t value, intptr_t addend)
478 tcg_debug_assert(addend == 0);
481 return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value);
483 return reloc_jimm20(code_ptr, (tcg_insn_unit *)value);
485 return reloc_call(code_ptr, (tcg_insn_unit *)value);
487 g_assert_not_reached();
495 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
503 tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0);
506 g_assert_not_reached();
511 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
514 tcg_target_long lo, hi, tmp;
517 if (type == TCG_TYPE_I32) {
521 lo = sextreg(val, 0, 12);
523 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo);
528 if (val == (int32_t)val) {
529 tcg_out_opc_upper(s, OPC_LUI, rd, hi);
531 tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo);
536 tmp = tcg_pcrel_diff(s, (void *)val);
537 if (tmp == (int32_t)tmp) {
538 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
539 tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0);
540 ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val);
541 tcg_debug_assert(ret == true);
545 /* Look for a single 20-bit section. */
548 if (tmp == sextreg(tmp, 0, 20)) {
549 tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12);
551 tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12);
553 tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift);
558 /* Look for a few high zero bits, with lots of bits set in the middle. */
561 if (tmp == sextreg(tmp, 12, 20) << 12) {
562 tcg_out_opc_upper(s, OPC_LUI, rd, tmp);
563 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
565 } else if (tmp == sextreg(tmp, 0, 12)) {
566 tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp);
567 tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
571 /* Drop into the constant pool. */
572 new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0);
573 tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
574 tcg_out_opc_imm(s, OPC_LD, rd, rd, 0);
577 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
582 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
585 /* This function is only used for passing structs by reference. */
586 g_assert_not_reached();
589 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
591 tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff);
594 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
597 tcg_out_opc_reg(s, OPC_ZEXT_H, ret, arg, TCG_REG_ZERO);
599 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
600 tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16);
604 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
607 tcg_out_opc_reg(s, OPC_ADD_UW, ret, arg, TCG_REG_ZERO);
609 tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32);
610 tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32);
614 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
617 tcg_out_opc_imm(s, OPC_SEXT_B, ret, arg, 0);
619 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24);
620 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24);
624 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
627 tcg_out_opc_imm(s, OPC_SEXT_H, ret, arg, 0);
629 tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
630 tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16);
634 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
636 tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0);
639 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
642 tcg_out_ext32s(s, ret, arg);
646 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
648 tcg_out_ext32u(s, ret, arg);
651 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
653 tcg_out_ext32s(s, ret, arg);
656 static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
657 TCGReg addr, intptr_t offset)
659 intptr_t imm12 = sextreg(offset, 0, 12);
661 if (offset != imm12) {
662 intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
664 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
665 imm12 = sextreg(diff, 0, 12);
666 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12);
668 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
669 if (addr != TCG_REG_ZERO) {
670 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr);
681 tcg_out_opc_store(s, opc, addr, data, imm12);
690 tcg_out_opc_imm(s, opc, data, addr, imm12);
693 g_assert_not_reached();
697 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
698 TCGReg arg1, intptr_t arg2)
700 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD;
701 tcg_out_ldst(s, insn, arg, arg1, arg2);
704 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
705 TCGReg arg1, intptr_t arg2)
707 RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD;
708 tcg_out_ldst(s, insn, arg, arg1, arg2);
711 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
712 TCGReg base, intptr_t ofs)
715 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
721 static void tcg_out_addsub2(TCGContext *s,
722 TCGReg rl, TCGReg rh,
723 TCGReg al, TCGReg ah,
724 TCGArg bl, TCGArg bh,
725 bool cbl, bool cbh, bool is_sub, bool is32bit)
727 const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD;
728 const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI;
729 const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB;
730 TCGReg th = TCG_REG_TMP1;
732 /* If we have a negative constant such that negating it would
733 make the high part zero, we can (usually) eliminate one insn. */
734 if (cbl && cbh && bh == -1 && bl != 0) {
740 /* By operating on the high part first, we get to use the final
741 carry operation to move back from the temporary. */
743 tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh);
744 } else if (bh != 0 || ah == rl) {
745 tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh));
750 /* Note that tcg optimization should eliminate the bl == 0 case. */
753 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl);
754 tcg_out_opc_imm(s, opc_addi, rl, al, -bl);
756 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl);
757 tcg_out_opc_reg(s, opc_sub, rl, al, bl);
759 tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0);
762 tcg_out_opc_imm(s, opc_addi, rl, al, bl);
763 tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl);
764 } else if (al == bl) {
766 * If the input regs overlap, this is a simple doubling
767 * and carry-out is the input msb. This special case is
768 * required when the output reg overlaps the input,
769 * but we might as well use it always.
771 tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0);
772 tcg_out_opc_reg(s, opc_add, rl, al, al);
774 tcg_out_opc_reg(s, opc_add, rl, al, bl);
775 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0,
776 rl, (rl == bl ? al : bl));
778 tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0);
782 static const struct {
785 } tcg_brcond_to_riscv[] = {
786 [TCG_COND_EQ] = { OPC_BEQ, false },
787 [TCG_COND_NE] = { OPC_BNE, false },
788 [TCG_COND_LT] = { OPC_BLT, false },
789 [TCG_COND_GE] = { OPC_BGE, false },
790 [TCG_COND_LE] = { OPC_BGE, true },
791 [TCG_COND_GT] = { OPC_BLT, true },
792 [TCG_COND_LTU] = { OPC_BLTU, false },
793 [TCG_COND_GEU] = { OPC_BGEU, false },
794 [TCG_COND_LEU] = { OPC_BGEU, true },
795 [TCG_COND_GTU] = { OPC_BLTU, true }
798 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
799 TCGReg arg2, TCGLabel *l)
801 RISCVInsn op = tcg_brcond_to_riscv[cond].op;
803 tcg_debug_assert(op != 0);
805 if (tcg_brcond_to_riscv[cond].swap) {
811 tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0);
812 tcg_out_opc_branch(s, op, arg1, arg2, 0);
815 #define SETCOND_INV TCG_TARGET_NB_REGS
816 #define SETCOND_NEZ (SETCOND_INV << 1)
817 #define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
819 static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
820 TCGReg arg1, tcg_target_long arg2, bool c2)
825 case TCG_COND_EQ: /* -> NE */
826 case TCG_COND_GE: /* -> LT */
827 case TCG_COND_GEU: /* -> LTU */
828 case TCG_COND_GT: /* -> LE */
829 case TCG_COND_GTU: /* -> LEU */
830 cond = tcg_invert_cond(cond);
831 flags ^= SETCOND_INV;
841 * If we have a constant input, the most efficient way to implement
842 * LE is by adding 1 and using LT. Watch out for wrap around for LEU.
843 * We don't need to care for this for LE because the constant input
844 * is constrained to signed 12-bit, and 0x800 is representable in the
845 * temporary register.
848 if (cond == TCG_COND_LEU) {
849 /* unsigned <= -1 is true */
851 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
858 tcg_debug_assert(arg2 <= 0x7ff);
859 if (++arg2 == 0x800) {
860 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
868 cond = tcg_swap_cond(cond); /* LE -> GE */
869 cond = tcg_invert_cond(cond); /* GE -> LT */
870 flags ^= SETCOND_INV;
879 flags |= SETCOND_NEZ;
881 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
882 } else if (arg2 == 0) {
885 tcg_out_opc_imm(s, OPC_XORI, ret, arg1, arg2);
891 tcg_out_opc_imm(s, OPC_SLTI, ret, arg1, arg2);
893 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
899 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, arg2);
901 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
906 g_assert_not_reached();
912 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
913 TCGReg arg1, tcg_target_long arg2, bool c2)
915 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
917 if (tmpflags != ret) {
918 TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
920 switch (tmpflags & SETCOND_FLAGS) {
922 /* Intermediate result is boolean: simply invert. */
923 tcg_out_opc_imm(s, OPC_XORI, ret, tmp, 1);
926 /* Intermediate result is zero/non-zero: test != 0. */
927 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp);
929 case SETCOND_NEZ | SETCOND_INV:
930 /* Intermediate result is zero/non-zero: test == 0. */
931 tcg_out_opc_imm(s, OPC_SLTIU, ret, tmp, 1);
934 g_assert_not_reached();
939 static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret,
940 TCGReg arg1, tcg_target_long arg2, bool c2)
945 /* For LT/GE comparison against 0, replicate the sign bit. */
946 if (c2 && arg2 == 0) {
949 tcg_out_opc_imm(s, OPC_XORI, ret, arg1, -1);
953 tcg_out_opc_imm(s, OPC_SRAI, ret, arg1, TCG_TARGET_REG_BITS - 1);
960 tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
961 tmp = tmpflags & ~SETCOND_FLAGS;
963 /* If intermediate result is zero/non-zero: test != 0. */
964 if (tmpflags & SETCOND_NEZ) {
965 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp);
969 /* Produce the 0/-1 result. */
970 if (tmpflags & SETCOND_INV) {
971 tcg_out_opc_imm(s, OPC_ADDI, ret, tmp, -1);
973 tcg_out_opc_reg(s, OPC_SUB, ret, TCG_REG_ZERO, tmp);
977 static void tcg_out_movcond_zicond(TCGContext *s, TCGReg ret, TCGReg test_ne,
978 int val1, bool c_val1,
979 int val2, bool c_val2)
983 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val2);
986 tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, val2, test_ne);
992 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1);
995 tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, val1, test_ne);
1001 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1 - val2);
1003 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val1, -val2);
1005 tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, TCG_REG_TMP1, test_ne);
1006 tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val2);
1011 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val2, -val1);
1012 tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, TCG_REG_TMP1, test_ne);
1013 tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val1);
1017 tcg_out_opc_reg(s, OPC_CZERO_NEZ, TCG_REG_TMP1, val2, test_ne);
1018 tcg_out_opc_reg(s, OPC_CZERO_EQZ, TCG_REG_TMP0, val1, test_ne);
1019 tcg_out_opc_reg(s, OPC_OR, ret, TCG_REG_TMP0, TCG_REG_TMP1);
1022 static void tcg_out_movcond_br1(TCGContext *s, TCGCond cond, TCGReg ret,
1023 TCGReg cmp1, TCGReg cmp2,
1024 int val, bool c_val)
1029 tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_brcond_to_riscv));
1030 op = tcg_brcond_to_riscv[cond].op;
1031 tcg_debug_assert(op != 0);
1033 if (tcg_brcond_to_riscv[cond].swap) {
1034 tcg_out_opc_branch(s, op, cmp2, cmp1, disp);
1036 tcg_out_opc_branch(s, op, cmp1, cmp2, disp);
1039 tcg_out_opc_imm(s, OPC_ADDI, ret, TCG_REG_ZERO, val);
1041 tcg_out_opc_imm(s, OPC_ADDI, ret, val, 0);
1045 static void tcg_out_movcond_br2(TCGContext *s, TCGCond cond, TCGReg ret,
1046 TCGReg cmp1, TCGReg cmp2,
1047 int val1, bool c_val1,
1048 int val2, bool c_val2)
1052 /* TCG optimizer reorders to prefer ret matching val2. */
1053 if (!c_val2 && ret == val2) {
1054 cond = tcg_invert_cond(cond);
1055 tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val1, c_val1);
1059 if (!c_val1 && ret == val1) {
1060 tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val2, c_val2);
1064 tmp = (ret == cmp1 || ret == cmp2 ? TCG_REG_TMP1 : ret);
1066 tcg_out_movi(s, TCG_TYPE_REG, tmp, val1);
1068 tcg_out_mov(s, TCG_TYPE_REG, tmp, val1);
1070 tcg_out_movcond_br1(s, cond, tmp, cmp1, cmp2, val2, c_val2);
1071 tcg_out_mov(s, TCG_TYPE_REG, ret, tmp);
1074 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
1075 TCGReg cmp1, int cmp2, bool c_cmp2,
1076 TCGReg val1, bool c_val1,
1077 TCGReg val2, bool c_val2)
1082 if (!have_zicond && (!c_cmp2 || cmp2 == 0)) {
1083 tcg_out_movcond_br2(s, cond, ret, cmp1, cmp2,
1084 val1, c_val1, val2, c_val2);
1088 tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, cmp1, cmp2, c_cmp2);
1089 t = tmpflags & ~SETCOND_FLAGS;
1092 if (tmpflags & SETCOND_INV) {
1093 tcg_out_movcond_zicond(s, ret, t, val2, c_val2, val1, c_val1);
1095 tcg_out_movcond_zicond(s, ret, t, val1, c_val1, val2, c_val2);
1098 cond = tmpflags & SETCOND_INV ? TCG_COND_EQ : TCG_COND_NE;
1099 tcg_out_movcond_br2(s, cond, ret, t, TCG_REG_ZERO,
1100 val1, c_val1, val2, c_val2);
1104 static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn,
1105 TCGReg ret, TCGReg src1, int src2, bool c_src2)
1107 tcg_out_opc_imm(s, insn, ret, src1, 0);
1109 if (!c_src2 || src2 != (type == TCG_TYPE_I32 ? 32 : 64)) {
1111 * The requested zero result does not match the insn, so adjust.
1112 * Note that constraints put 'ret' in a new register, so the
1113 * computation above did not clobber either 'src1' or 'src2'.
1115 tcg_out_movcond(s, TCG_COND_EQ, ret, src1, 0, true,
1116 src2, c_src2, ret, false);
1120 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
1122 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
1123 ptrdiff_t offset = tcg_pcrel_diff(s, arg);
1126 tcg_debug_assert((offset & 1) == 0);
1127 if (offset == sextreg(offset, 0, 20)) {
1128 /* short jump: -2097150 to 2097152 */
1129 tcg_out_opc_jump(s, OPC_JAL, link, offset);
1130 } else if (offset == (int32_t)offset) {
1131 /* long jump: -2147483646 to 2147483648 */
1132 tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0);
1133 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0);
1134 ret = reloc_call(s->code_ptr - 2, arg);
1135 tcg_debug_assert(ret == true);
1137 /* far jump: 64-bit */
1138 tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12);
1139 tcg_target_long base = (tcg_target_long)arg - imm;
1140 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base);
1141 tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm);
1145 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
1146 const TCGHelperInfo *info)
1148 tcg_out_call_int(s, arg, false);
1151 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1153 tcg_insn_unit insn = OPC_FENCE;
1155 if (a0 & TCG_MO_LD_LD) {
1158 if (a0 & TCG_MO_ST_LD) {
1161 if (a0 & TCG_MO_LD_ST) {
1164 if (a0 & TCG_MO_ST_ST) {
1171 * Load/store and TLB
1174 static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
1176 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
1177 bool ok = reloc_jimm20(s->code_ptr - 1, target);
1178 tcg_debug_assert(ok);
1181 bool tcg_target_has_memory_bswap(MemOp memop)
1186 /* We have three temps, we might as well expose them. */
1187 static const TCGLdstHelperParam ldst_helper_param = {
1188 .ntmp = 3, .tmp = { TCG_REG_TMP0, TCG_REG_TMP1, TCG_REG_TMP2 }
1191 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1193 MemOp opc = get_memop(l->oi);
1195 /* resolve label address */
1196 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1200 /* call load helper */
1201 tcg_out_ld_helper_args(s, l, &ldst_helper_param);
1202 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SSIZE], false);
1203 tcg_out_ld_helper_ret(s, l, true, &ldst_helper_param);
1205 tcg_out_goto(s, l->raddr);
1209 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1211 MemOp opc = get_memop(l->oi);
1213 /* resolve label address */
1214 if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1218 /* call store helper */
1219 tcg_out_st_helper_args(s, l, &ldst_helper_param);
1220 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
1222 tcg_out_goto(s, l->raddr);
1226 /* We expect to use a 12-bit negative offset from ENV. */
1227 #define MIN_TLB_MASK_TABLE_OFS -(1 << 11)
1230 * For system-mode, perform the TLB load and compare.
1231 * For user-mode, perform any required alignment tests.
1232 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1233 * is required and fill in @h with the host address for the fast path.
1235 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
1236 TCGReg addr_reg, MemOpIdx oi,
1239 TCGType addr_type = s->addr_type;
1240 TCGLabelQemuLdst *ldst = NULL;
1241 MemOp opc = get_memop(oi);
1245 aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1246 a_mask = (1u << aa.align) - 1;
1248 if (tcg_use_softmmu) {
1249 unsigned s_bits = opc & MO_SIZE;
1250 unsigned s_mask = (1u << s_bits) - 1;
1251 int mem_index = get_mmuidx(oi);
1252 int fast_ofs = tlb_mask_table_ofs(s, mem_index);
1253 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
1254 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
1258 ldst = new_ldst_label(s);
1259 ldst->is_ld = is_ld;
1261 ldst->addrlo_reg = addr_reg;
1263 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
1264 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
1266 tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
1267 s->page_bits - CPU_TLB_ENTRY_BITS);
1268 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
1269 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
1272 * For aligned accesses, we check the first byte and include the
1273 * alignment bits within the address. For unaligned access, we
1274 * check that we don't cross pages using the address of the last
1275 * byte of the access.
1277 addr_adj = addr_reg;
1278 if (a_mask < s_mask) {
1279 addr_adj = TCG_REG_TMP0;
1280 tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
1281 addr_adj, addr_reg, s_mask - a_mask);
1283 compare_mask = s->page_mask | a_mask;
1284 if (compare_mask == sextreg(compare_mask, 0, 12)) {
1285 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
1287 tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask);
1288 tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
1291 /* Load the tlb comparator and the addend. */
1292 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1293 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
1294 is_ld ? offsetof(CPUTLBEntry, addr_read)
1295 : offsetof(CPUTLBEntry, addr_write));
1296 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
1297 offsetof(CPUTLBEntry, addend));
1299 /* Compare masked address with the TLB entry. */
1300 ldst->label_ptr[0] = s->code_ptr;
1301 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
1303 /* TLB Hit - translate address using addend. */
1304 if (addr_type != TCG_TYPE_I32) {
1305 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
1306 } else if (have_zba) {
1307 tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0,
1308 addr_reg, TCG_REG_TMP2);
1310 tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
1311 tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0,
1312 TCG_REG_TMP0, TCG_REG_TMP2);
1314 *pbase = TCG_REG_TMP0;
1319 ldst = new_ldst_label(s);
1320 ldst->is_ld = is_ld;
1322 ldst->addrlo_reg = addr_reg;
1324 /* We are expecting alignment max 7, so we can always use andi. */
1325 tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
1326 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
1328 ldst->label_ptr[0] = s->code_ptr;
1329 tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
1332 if (guest_base != 0) {
1333 base = TCG_REG_TMP0;
1334 if (addr_type != TCG_TYPE_I32) {
1335 tcg_out_opc_reg(s, OPC_ADD, base, addr_reg,
1336 TCG_GUEST_BASE_REG);
1337 } else if (have_zba) {
1338 tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg,
1339 TCG_GUEST_BASE_REG);
1341 tcg_out_ext32u(s, base, addr_reg);
1342 tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG);
1344 } else if (addr_type != TCG_TYPE_I32) {
1347 base = TCG_REG_TMP0;
1348 tcg_out_ext32u(s, base, addr_reg);
1356 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
1357 TCGReg base, MemOp opc, TCGType type)
1359 /* Byte swapping is left to middle-end expansion. */
1360 tcg_debug_assert((opc & MO_BSWAP) == 0);
1362 switch (opc & (MO_SSIZE)) {
1364 tcg_out_opc_imm(s, OPC_LBU, val, base, 0);
1367 tcg_out_opc_imm(s, OPC_LB, val, base, 0);
1370 tcg_out_opc_imm(s, OPC_LHU, val, base, 0);
1373 tcg_out_opc_imm(s, OPC_LH, val, base, 0);
1376 if (type == TCG_TYPE_I64) {
1377 tcg_out_opc_imm(s, OPC_LWU, val, base, 0);
1382 tcg_out_opc_imm(s, OPC_LW, val, base, 0);
1385 tcg_out_opc_imm(s, OPC_LD, val, base, 0);
1388 g_assert_not_reached();
1392 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1393 MemOpIdx oi, TCGType data_type)
1395 TCGLabelQemuLdst *ldst;
1398 ldst = prepare_host_addr(s, &base, addr_reg, oi, true);
1399 tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type);
1402 ldst->type = data_type;
1403 ldst->datalo_reg = data_reg;
1404 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1408 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
1409 TCGReg base, MemOp opc)
1411 /* Byte swapping is left to middle-end expansion. */
1412 tcg_debug_assert((opc & MO_BSWAP) == 0);
1414 switch (opc & (MO_SSIZE)) {
1416 tcg_out_opc_store(s, OPC_SB, base, val, 0);
1419 tcg_out_opc_store(s, OPC_SH, base, val, 0);
1422 tcg_out_opc_store(s, OPC_SW, base, val, 0);
1425 tcg_out_opc_store(s, OPC_SD, base, val, 0);
1428 g_assert_not_reached();
1432 static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1433 MemOpIdx oi, TCGType data_type)
1435 TCGLabelQemuLdst *ldst;
1438 ldst = prepare_host_addr(s, &base, addr_reg, oi, false);
1439 tcg_out_qemu_st_direct(s, data_reg, base, get_memop(oi));
1442 ldst->type = data_type;
1443 ldst->datalo_reg = data_reg;
1444 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1448 static const tcg_insn_unit *tb_ret_addr;
1450 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1452 /* Reuse the zeroing that exists for goto_ptr. */
1454 tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1456 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1457 tcg_out_call_int(s, tb_ret_addr, true);
1461 static void tcg_out_goto_tb(TCGContext *s, int which)
1463 /* Direct branch will be patched by tb_target_set_jmp_target. */
1464 set_jmp_insn_offset(s, which);
1465 tcg_out32(s, OPC_JAL);
1467 /* When branch is out of range, fall through to indirect. */
1468 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
1469 get_jmp_target_addr(s, which));
1470 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1471 set_jmp_reset_offset(s, which);
1474 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1475 uintptr_t jmp_rx, uintptr_t jmp_rw)
1477 uintptr_t addr = tb->jmp_target_addr[n];
1478 ptrdiff_t offset = addr - jmp_rx;
1481 /* Either directly branch, or fall through to indirect branch. */
1482 if (offset == sextreg(offset, 0, 20)) {
1483 insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset);
1487 qatomic_set((uint32_t *)jmp_rw, insn);
1488 flush_idcache_range(jmp_rx, jmp_rw, 4);
1491 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1492 const TCGArg args[TCG_MAX_OP_ARGS],
1493 const int const_args[TCG_MAX_OP_ARGS])
1495 TCGArg a0 = args[0];
1496 TCGArg a1 = args[1];
1497 TCGArg a2 = args[2];
1498 int c2 = const_args[2];
1501 case INDEX_op_goto_ptr:
1502 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
1506 tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0);
1507 tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
1510 case INDEX_op_ld8u_i32:
1511 case INDEX_op_ld8u_i64:
1512 tcg_out_ldst(s, OPC_LBU, a0, a1, a2);
1514 case INDEX_op_ld8s_i32:
1515 case INDEX_op_ld8s_i64:
1516 tcg_out_ldst(s, OPC_LB, a0, a1, a2);
1518 case INDEX_op_ld16u_i32:
1519 case INDEX_op_ld16u_i64:
1520 tcg_out_ldst(s, OPC_LHU, a0, a1, a2);
1522 case INDEX_op_ld16s_i32:
1523 case INDEX_op_ld16s_i64:
1524 tcg_out_ldst(s, OPC_LH, a0, a1, a2);
1526 case INDEX_op_ld32u_i64:
1527 tcg_out_ldst(s, OPC_LWU, a0, a1, a2);
1529 case INDEX_op_ld_i32:
1530 case INDEX_op_ld32s_i64:
1531 tcg_out_ldst(s, OPC_LW, a0, a1, a2);
1533 case INDEX_op_ld_i64:
1534 tcg_out_ldst(s, OPC_LD, a0, a1, a2);
1537 case INDEX_op_st8_i32:
1538 case INDEX_op_st8_i64:
1539 tcg_out_ldst(s, OPC_SB, a0, a1, a2);
1541 case INDEX_op_st16_i32:
1542 case INDEX_op_st16_i64:
1543 tcg_out_ldst(s, OPC_SH, a0, a1, a2);
1545 case INDEX_op_st_i32:
1546 case INDEX_op_st32_i64:
1547 tcg_out_ldst(s, OPC_SW, a0, a1, a2);
1549 case INDEX_op_st_i64:
1550 tcg_out_ldst(s, OPC_SD, a0, a1, a2);
1553 case INDEX_op_add_i32:
1555 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2);
1557 tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2);
1560 case INDEX_op_add_i64:
1562 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2);
1564 tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2);
1568 case INDEX_op_sub_i32:
1570 tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2);
1572 tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2);
1575 case INDEX_op_sub_i64:
1577 tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2);
1579 tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2);
1583 case INDEX_op_and_i32:
1584 case INDEX_op_and_i64:
1586 tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
1588 tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
1592 case INDEX_op_or_i32:
1593 case INDEX_op_or_i64:
1595 tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
1597 tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
1601 case INDEX_op_xor_i32:
1602 case INDEX_op_xor_i64:
1604 tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
1606 tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
1610 case INDEX_op_andc_i32:
1611 case INDEX_op_andc_i64:
1613 tcg_out_opc_imm(s, OPC_ANDI, a0, a1, ~a2);
1615 tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2);
1618 case INDEX_op_orc_i32:
1619 case INDEX_op_orc_i64:
1621 tcg_out_opc_imm(s, OPC_ORI, a0, a1, ~a2);
1623 tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2);
1626 case INDEX_op_eqv_i32:
1627 case INDEX_op_eqv_i64:
1629 tcg_out_opc_imm(s, OPC_XORI, a0, a1, ~a2);
1631 tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2);
1635 case INDEX_op_not_i32:
1636 case INDEX_op_not_i64:
1637 tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
1640 case INDEX_op_neg_i32:
1641 tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1);
1643 case INDEX_op_neg_i64:
1644 tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1);
1647 case INDEX_op_mul_i32:
1648 tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2);
1650 case INDEX_op_mul_i64:
1651 tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
1654 case INDEX_op_div_i32:
1655 tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2);
1657 case INDEX_op_div_i64:
1658 tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2);
1661 case INDEX_op_divu_i32:
1662 tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2);
1664 case INDEX_op_divu_i64:
1665 tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2);
1668 case INDEX_op_rem_i32:
1669 tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2);
1671 case INDEX_op_rem_i64:
1672 tcg_out_opc_reg(s, OPC_REM, a0, a1, a2);
1675 case INDEX_op_remu_i32:
1676 tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2);
1678 case INDEX_op_remu_i64:
1679 tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2);
1682 case INDEX_op_shl_i32:
1684 tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f);
1686 tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2);
1689 case INDEX_op_shl_i64:
1691 tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f);
1693 tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2);
1697 case INDEX_op_shr_i32:
1699 tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f);
1701 tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2);
1704 case INDEX_op_shr_i64:
1706 tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f);
1708 tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2);
1712 case INDEX_op_sar_i32:
1714 tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f);
1716 tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2);
1719 case INDEX_op_sar_i64:
1721 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f);
1723 tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2);
1727 case INDEX_op_rotl_i32:
1729 tcg_out_opc_imm(s, OPC_RORIW, a0, a1, -a2 & 0x1f);
1731 tcg_out_opc_reg(s, OPC_ROLW, a0, a1, a2);
1734 case INDEX_op_rotl_i64:
1736 tcg_out_opc_imm(s, OPC_RORI, a0, a1, -a2 & 0x3f);
1738 tcg_out_opc_reg(s, OPC_ROL, a0, a1, a2);
1742 case INDEX_op_rotr_i32:
1744 tcg_out_opc_imm(s, OPC_RORIW, a0, a1, a2 & 0x1f);
1746 tcg_out_opc_reg(s, OPC_RORW, a0, a1, a2);
1749 case INDEX_op_rotr_i64:
1751 tcg_out_opc_imm(s, OPC_RORI, a0, a1, a2 & 0x3f);
1753 tcg_out_opc_reg(s, OPC_ROR, a0, a1, a2);
1757 case INDEX_op_bswap64_i64:
1758 tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
1760 case INDEX_op_bswap32_i32:
1763 case INDEX_op_bswap32_i64:
1764 tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
1765 if (a2 & TCG_BSWAP_OZ) {
1766 tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32);
1768 tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32);
1771 case INDEX_op_bswap16_i64:
1772 case INDEX_op_bswap16_i32:
1773 tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
1774 if (a2 & TCG_BSWAP_OZ) {
1775 tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48);
1777 tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48);
1781 case INDEX_op_ctpop_i32:
1782 tcg_out_opc_imm(s, OPC_CPOPW, a0, a1, 0);
1784 case INDEX_op_ctpop_i64:
1785 tcg_out_opc_imm(s, OPC_CPOP, a0, a1, 0);
1788 case INDEX_op_clz_i32:
1789 tcg_out_cltz(s, TCG_TYPE_I32, OPC_CLZW, a0, a1, a2, c2);
1791 case INDEX_op_clz_i64:
1792 tcg_out_cltz(s, TCG_TYPE_I64, OPC_CLZ, a0, a1, a2, c2);
1794 case INDEX_op_ctz_i32:
1795 tcg_out_cltz(s, TCG_TYPE_I32, OPC_CTZW, a0, a1, a2, c2);
1797 case INDEX_op_ctz_i64:
1798 tcg_out_cltz(s, TCG_TYPE_I64, OPC_CTZ, a0, a1, a2, c2);
1801 case INDEX_op_add2_i32:
1802 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1803 const_args[4], const_args[5], false, true);
1805 case INDEX_op_add2_i64:
1806 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1807 const_args[4], const_args[5], false, false);
1809 case INDEX_op_sub2_i32:
1810 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1811 const_args[4], const_args[5], true, true);
1813 case INDEX_op_sub2_i64:
1814 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1815 const_args[4], const_args[5], true, false);
1818 case INDEX_op_brcond_i32:
1819 case INDEX_op_brcond_i64:
1820 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1823 case INDEX_op_setcond_i32:
1824 case INDEX_op_setcond_i64:
1825 tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1828 case INDEX_op_negsetcond_i32:
1829 case INDEX_op_negsetcond_i64:
1830 tcg_out_negsetcond(s, args[3], a0, a1, a2, c2);
1833 case INDEX_op_movcond_i32:
1834 case INDEX_op_movcond_i64:
1835 tcg_out_movcond(s, args[5], a0, a1, a2, c2,
1836 args[3], const_args[3], args[4], const_args[4]);
1839 case INDEX_op_qemu_ld_a32_i32:
1840 case INDEX_op_qemu_ld_a64_i32:
1841 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1843 case INDEX_op_qemu_ld_a32_i64:
1844 case INDEX_op_qemu_ld_a64_i64:
1845 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1847 case INDEX_op_qemu_st_a32_i32:
1848 case INDEX_op_qemu_st_a64_i32:
1849 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1851 case INDEX_op_qemu_st_a32_i64:
1852 case INDEX_op_qemu_st_a64_i64:
1853 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1856 case INDEX_op_extrh_i64_i32:
1857 tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
1860 case INDEX_op_mulsh_i32:
1861 case INDEX_op_mulsh_i64:
1862 tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2);
1865 case INDEX_op_muluh_i32:
1866 case INDEX_op_muluh_i64:
1867 tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2);
1874 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1875 case INDEX_op_mov_i64:
1876 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1877 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1878 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1879 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1880 case INDEX_op_ext8s_i64:
1881 case INDEX_op_ext8u_i32:
1882 case INDEX_op_ext8u_i64:
1883 case INDEX_op_ext16s_i32:
1884 case INDEX_op_ext16s_i64:
1885 case INDEX_op_ext16u_i32:
1886 case INDEX_op_ext16u_i64:
1887 case INDEX_op_ext32s_i64:
1888 case INDEX_op_ext32u_i64:
1889 case INDEX_op_ext_i32_i64:
1890 case INDEX_op_extu_i32_i64:
1891 case INDEX_op_extrl_i64_i32:
1893 g_assert_not_reached();
1897 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1900 case INDEX_op_goto_ptr:
1903 case INDEX_op_ld8u_i32:
1904 case INDEX_op_ld8s_i32:
1905 case INDEX_op_ld16u_i32:
1906 case INDEX_op_ld16s_i32:
1907 case INDEX_op_ld_i32:
1908 case INDEX_op_not_i32:
1909 case INDEX_op_neg_i32:
1910 case INDEX_op_ld8u_i64:
1911 case INDEX_op_ld8s_i64:
1912 case INDEX_op_ld16u_i64:
1913 case INDEX_op_ld16s_i64:
1914 case INDEX_op_ld32s_i64:
1915 case INDEX_op_ld32u_i64:
1916 case INDEX_op_ld_i64:
1917 case INDEX_op_not_i64:
1918 case INDEX_op_neg_i64:
1919 case INDEX_op_ext8u_i32:
1920 case INDEX_op_ext8u_i64:
1921 case INDEX_op_ext16u_i32:
1922 case INDEX_op_ext16u_i64:
1923 case INDEX_op_ext32u_i64:
1924 case INDEX_op_extu_i32_i64:
1925 case INDEX_op_ext8s_i32:
1926 case INDEX_op_ext8s_i64:
1927 case INDEX_op_ext16s_i32:
1928 case INDEX_op_ext16s_i64:
1929 case INDEX_op_ext32s_i64:
1930 case INDEX_op_extrl_i64_i32:
1931 case INDEX_op_extrh_i64_i32:
1932 case INDEX_op_ext_i32_i64:
1933 case INDEX_op_bswap16_i32:
1934 case INDEX_op_bswap32_i32:
1935 case INDEX_op_bswap16_i64:
1936 case INDEX_op_bswap32_i64:
1937 case INDEX_op_bswap64_i64:
1938 case INDEX_op_ctpop_i32:
1939 case INDEX_op_ctpop_i64:
1940 return C_O1_I1(r, r);
1942 case INDEX_op_st8_i32:
1943 case INDEX_op_st16_i32:
1944 case INDEX_op_st_i32:
1945 case INDEX_op_st8_i64:
1946 case INDEX_op_st16_i64:
1947 case INDEX_op_st32_i64:
1948 case INDEX_op_st_i64:
1949 return C_O0_I2(rZ, r);
1951 case INDEX_op_add_i32:
1952 case INDEX_op_and_i32:
1953 case INDEX_op_or_i32:
1954 case INDEX_op_xor_i32:
1955 case INDEX_op_add_i64:
1956 case INDEX_op_and_i64:
1957 case INDEX_op_or_i64:
1958 case INDEX_op_xor_i64:
1959 case INDEX_op_setcond_i32:
1960 case INDEX_op_setcond_i64:
1961 case INDEX_op_negsetcond_i32:
1962 case INDEX_op_negsetcond_i64:
1963 return C_O1_I2(r, r, rI);
1965 case INDEX_op_andc_i32:
1966 case INDEX_op_andc_i64:
1967 case INDEX_op_orc_i32:
1968 case INDEX_op_orc_i64:
1969 case INDEX_op_eqv_i32:
1970 case INDEX_op_eqv_i64:
1971 return C_O1_I2(r, r, rJ);
1973 case INDEX_op_sub_i32:
1974 case INDEX_op_sub_i64:
1975 return C_O1_I2(r, rZ, rN);
1977 case INDEX_op_mul_i32:
1978 case INDEX_op_mulsh_i32:
1979 case INDEX_op_muluh_i32:
1980 case INDEX_op_div_i32:
1981 case INDEX_op_divu_i32:
1982 case INDEX_op_rem_i32:
1983 case INDEX_op_remu_i32:
1984 case INDEX_op_mul_i64:
1985 case INDEX_op_mulsh_i64:
1986 case INDEX_op_muluh_i64:
1987 case INDEX_op_div_i64:
1988 case INDEX_op_divu_i64:
1989 case INDEX_op_rem_i64:
1990 case INDEX_op_remu_i64:
1991 return C_O1_I2(r, rZ, rZ);
1993 case INDEX_op_shl_i32:
1994 case INDEX_op_shr_i32:
1995 case INDEX_op_sar_i32:
1996 case INDEX_op_rotl_i32:
1997 case INDEX_op_rotr_i32:
1998 case INDEX_op_shl_i64:
1999 case INDEX_op_shr_i64:
2000 case INDEX_op_sar_i64:
2001 case INDEX_op_rotl_i64:
2002 case INDEX_op_rotr_i64:
2003 return C_O1_I2(r, r, ri);
2005 case INDEX_op_clz_i32:
2006 case INDEX_op_clz_i64:
2007 case INDEX_op_ctz_i32:
2008 case INDEX_op_ctz_i64:
2009 return C_N1_I2(r, r, rM);
2011 case INDEX_op_brcond_i32:
2012 case INDEX_op_brcond_i64:
2013 return C_O0_I2(rZ, rZ);
2015 case INDEX_op_movcond_i32:
2016 case INDEX_op_movcond_i64:
2017 return C_O1_I4(r, r, rI, rM, rM);
2019 case INDEX_op_add2_i32:
2020 case INDEX_op_add2_i64:
2021 case INDEX_op_sub2_i32:
2022 case INDEX_op_sub2_i64:
2023 return C_O2_I4(r, r, rZ, rZ, rM, rM);
2025 case INDEX_op_qemu_ld_a32_i32:
2026 case INDEX_op_qemu_ld_a64_i32:
2027 case INDEX_op_qemu_ld_a32_i64:
2028 case INDEX_op_qemu_ld_a64_i64:
2029 return C_O1_I1(r, r);
2030 case INDEX_op_qemu_st_a32_i32:
2031 case INDEX_op_qemu_st_a64_i32:
2032 case INDEX_op_qemu_st_a32_i64:
2033 case INDEX_op_qemu_st_a64_i64:
2034 return C_O0_I2(rZ, r);
2037 g_assert_not_reached();
2041 static const int tcg_target_callee_save_regs[] = {
2042 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
2054 TCG_REG_RA, /* should be last for ABI compliance */
2057 /* Stack frame parameters. */
2058 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
2059 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2060 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2061 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2062 + TCG_TARGET_STACK_ALIGN - 1) \
2063 & -TCG_TARGET_STACK_ALIGN)
2064 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2066 /* We're expecting to be able to use an immediate for frame allocation. */
2067 QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
2069 /* Generate global QEMU prologue and epilogue code */
2070 static void tcg_target_qemu_prologue(TCGContext *s)
2074 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2077 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2078 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2079 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2080 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2083 if (!tcg_use_softmmu && guest_base) {
2084 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2085 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2088 /* Call generated code */
2089 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2090 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
2092 /* Return path for goto_ptr. Set return value to 0 */
2093 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2094 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
2097 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2098 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2099 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2100 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2103 tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2104 tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0);
2107 static void tcg_out_tb_start(TCGContext *s)
2112 static volatile sig_atomic_t got_sigill;
2114 static void sigill_handler(int signo, siginfo_t *si, void *data)
2116 /* Skip the faulty instruction */
2117 ucontext_t *uc = (ucontext_t *)data;
2118 uc->uc_mcontext.__gregs[REG_PC] += 4;
2123 static void tcg_target_detect_isa(void)
2125 #if !defined(have_zba) || !defined(have_zbb) || !defined(have_zicond)
2127 * TODO: It is expected that this will be determinable via
2128 * linux riscv_hwprobe syscall, not yet merged.
2129 * In the meantime, test via sigill.
2132 struct sigaction sa_old, sa_new;
2134 memset(&sa_new, 0, sizeof(sa_new));
2135 sa_new.sa_flags = SA_SIGINFO;
2136 sa_new.sa_sigaction = sigill_handler;
2137 sigaction(SIGILL, &sa_new, &sa_old);
2140 /* Probe for Zba: add.uw zero,zero,zero. */
2142 asm volatile(".insn r 0x3b, 0, 0x04, zero, zero, zero" : : : "memory");
2143 have_zba = !got_sigill;
2147 /* Probe for Zba: andn zero,zero,zero. */
2149 asm volatile(".insn r 0x33, 7, 0x20, zero, zero, zero" : : : "memory");
2150 have_zbb = !got_sigill;
2154 /* Probe for Zicond: czero.eqz zero,zero,zero. */
2156 asm volatile(".insn r 0x33, 5, 0x07, zero, zero, zero" : : : "memory");
2157 have_zicond = !got_sigill;
2160 sigaction(SIGILL, &sa_old, NULL);
2164 static void tcg_target_init(TCGContext *s)
2166 tcg_target_detect_isa();
2168 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
2169 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
2171 tcg_target_call_clobber_regs = -1u;
2172 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
2173 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
2174 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
2175 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
2176 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
2177 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
2178 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
2179 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
2180 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
2181 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
2182 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10);
2183 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11);
2185 s->reserved_regs = 0;
2186 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
2187 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
2188 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
2189 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
2190 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
2191 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP);
2192 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
2197 uint8_t fde_def_cfa[4];
2198 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
2201 #define ELF_HOST_MACHINE EM_RISCV
2203 static const DebugFrame debug_frame = {
2204 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
2207 .h.cie.code_align = 1,
2208 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
2209 .h.cie.return_column = TCG_REG_RA,
2211 /* Total FDE size does not include the "len" member. */
2212 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2215 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
2216 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2220 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */
2221 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */
2222 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */
2223 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */
2224 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */
2225 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */
2226 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */
2227 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */
2228 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */
2229 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */
2230 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */
2231 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
2235 void tcg_register_jit(const void *buf, size_t buf_size)
2237 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));