2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2018 SiFive, Inc
5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
7 * Copyright (c) 2008 Fabrice Bellard
9 * Based on i386/tcg-target.c and mips/tcg-target.c
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
30 #include "../tcg-pool.inc.c"
32 #ifdef CONFIG_DEBUG_TCG
33 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
69 static const int tcg_target_reg_alloc_order
[] = {
70 /* Call saved registers */
71 /* TCG_REG_S0 reservered for TCG_AREG0 */
84 /* Call clobbered registers */
93 /* Argument registers */
104 static const int tcg_target_call_iarg_regs
[] = {
115 static const int tcg_target_call_oarg_regs
[] = {
120 #define TCG_CT_CONST_ZERO 0x100
121 #define TCG_CT_CONST_S12 0x200
122 #define TCG_CT_CONST_N12 0x400
123 #define TCG_CT_CONST_M12 0x800
125 static inline tcg_target_long
sextreg(tcg_target_long val
, int pos
, int len
)
127 if (TCG_TARGET_REG_BITS
== 32) {
128 return sextract32(val
, pos
, len
);
130 return sextract64(val
, pos
, len
);
134 /* parse target specific constraints */
135 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
136 const char *ct_str
, TCGType type
)
140 ct
->ct
|= TCG_CT_REG
;
141 ct
->u
.regs
= 0xffffffff;
144 /* qemu_ld/qemu_st constraint */
145 ct
->ct
|= TCG_CT_REG
;
146 ct
->u
.regs
= 0xffffffff;
147 /* qemu_ld/qemu_st uses TCG_REG_TMP0 */
148 #if defined(CONFIG_SOFTMMU)
149 tcg_regset_reset_reg(ct
->u
.regs
, tcg_target_call_iarg_regs
[0]);
150 tcg_regset_reset_reg(ct
->u
.regs
, tcg_target_call_iarg_regs
[1]);
151 tcg_regset_reset_reg(ct
->u
.regs
, tcg_target_call_iarg_regs
[2]);
152 tcg_regset_reset_reg(ct
->u
.regs
, tcg_target_call_iarg_regs
[3]);
153 tcg_regset_reset_reg(ct
->u
.regs
, tcg_target_call_iarg_regs
[4]);
157 ct
->ct
|= TCG_CT_CONST_S12
;
160 ct
->ct
|= TCG_CT_CONST_N12
;
163 ct
->ct
|= TCG_CT_CONST_M12
;
166 /* we can use a zero immediate as a zero register argument. */
167 ct
->ct
|= TCG_CT_CONST_ZERO
;
175 /* test if a constant matches the constraint */
176 static int tcg_target_const_match(tcg_target_long val
, TCGType type
,
177 const TCGArgConstraint
*arg_ct
)
180 if (ct
& TCG_CT_CONST
) {
183 if ((ct
& TCG_CT_CONST_ZERO
) && val
== 0) {
186 if ((ct
& TCG_CT_CONST_S12
) && val
== sextreg(val
, 0, 12)) {
189 if ((ct
& TCG_CT_CONST_N12
) && -val
== sextreg(-val
, 0, 12)) {
192 if ((ct
& TCG_CT_CONST_M12
) && val
>= -0xfff && val
<= 0xfff) {
199 * RISC-V Base ISA opcodes (IM)
215 OPC_DIVU
= 0x2005033,
227 OPC_MULH
= 0x2001033,
228 OPC_MULHSU
= 0x2002033,
229 OPC_MULHU
= 0x2003033,
233 OPC_REMU
= 0x2007033,
243 OPC_SRA
= 0x40005033,
244 OPC_SRAI
= 0x40005013,
247 OPC_SUB
= 0x40000033,
252 #if TCG_TARGET_REG_BITS == 64
255 OPC_DIVUW
= 0x200503b,
256 OPC_DIVW
= 0x200403b,
257 OPC_MULW
= 0x200003b,
258 OPC_REMUW
= 0x200703b,
259 OPC_REMW
= 0x200603b,
262 OPC_SRAIW
= 0x4000501b,
263 OPC_SRAW
= 0x4000503b,
266 OPC_SUBW
= 0x4000003b,
268 /* Simplify code throughout by defining aliases for RV32. */
269 OPC_ADDIW
= OPC_ADDI
,
271 OPC_DIVUW
= OPC_DIVU
,
274 OPC_REMUW
= OPC_REMU
,
276 OPC_SLLIW
= OPC_SLLI
,
278 OPC_SRAIW
= OPC_SRAI
,
280 OPC_SRLIW
= OPC_SRLI
,
285 OPC_FENCE
= 0x0000000f,
289 * RISC-V immediate and instruction encoders (excludes 16-bit RVC)
294 static int32_t encode_r(RISCVInsn opc
, TCGReg rd
, TCGReg rs1
, TCGReg rs2
)
296 return opc
| (rd
& 0x1f) << 7 | (rs1
& 0x1f) << 15 | (rs2
& 0x1f) << 20;
301 static int32_t encode_imm12(uint32_t imm
)
303 return (imm
& 0xfff) << 20;
306 static int32_t encode_i(RISCVInsn opc
, TCGReg rd
, TCGReg rs1
, uint32_t imm
)
308 return opc
| (rd
& 0x1f) << 7 | (rs1
& 0x1f) << 15 | encode_imm12(imm
);
313 static int32_t encode_simm12(uint32_t imm
)
317 ret
|= (imm
& 0xFE0) << 20;
318 ret
|= (imm
& 0x1F) << 7;
323 static int32_t encode_s(RISCVInsn opc
, TCGReg rs1
, TCGReg rs2
, uint32_t imm
)
325 return opc
| (rs1
& 0x1f) << 15 | (rs2
& 0x1f) << 20 | encode_simm12(imm
);
330 static int32_t encode_sbimm12(uint32_t imm
)
334 ret
|= (imm
& 0x1000) << 19;
335 ret
|= (imm
& 0x7e0) << 20;
336 ret
|= (imm
& 0x1e) << 7;
337 ret
|= (imm
& 0x800) >> 4;
342 static int32_t encode_sb(RISCVInsn opc
, TCGReg rs1
, TCGReg rs2
, uint32_t imm
)
344 return opc
| (rs1
& 0x1f) << 15 | (rs2
& 0x1f) << 20 | encode_sbimm12(imm
);
349 static int32_t encode_uimm20(uint32_t imm
)
351 return imm
& 0xfffff000;
354 static int32_t encode_u(RISCVInsn opc
, TCGReg rd
, uint32_t imm
)
356 return opc
| (rd
& 0x1f) << 7 | encode_uimm20(imm
);
361 static int32_t encode_ujimm20(uint32_t imm
)
365 ret
|= (imm
& 0x0007fe) << (21 - 1);
366 ret
|= (imm
& 0x000800) << (20 - 11);
367 ret
|= (imm
& 0x0ff000) << (12 - 12);
368 ret
|= (imm
& 0x100000) << (31 - 20);
373 static int32_t encode_uj(RISCVInsn opc
, TCGReg rd
, uint32_t imm
)
375 return opc
| (rd
& 0x1f) << 7 | encode_ujimm20(imm
);
379 * RISC-V instruction emitters
382 static void tcg_out_opc_reg(TCGContext
*s
, RISCVInsn opc
,
383 TCGReg rd
, TCGReg rs1
, TCGReg rs2
)
385 tcg_out32(s
, encode_r(opc
, rd
, rs1
, rs2
));
388 static void tcg_out_opc_imm(TCGContext
*s
, RISCVInsn opc
,
389 TCGReg rd
, TCGReg rs1
, TCGArg imm
)
391 tcg_out32(s
, encode_i(opc
, rd
, rs1
, imm
));
394 static void tcg_out_opc_store(TCGContext
*s
, RISCVInsn opc
,
395 TCGReg rs1
, TCGReg rs2
, uint32_t imm
)
397 tcg_out32(s
, encode_s(opc
, rs1
, rs2
, imm
));
400 static void tcg_out_opc_branch(TCGContext
*s
, RISCVInsn opc
,
401 TCGReg rs1
, TCGReg rs2
, uint32_t imm
)
403 tcg_out32(s
, encode_sb(opc
, rs1
, rs2
, imm
));
406 static void tcg_out_opc_upper(TCGContext
*s
, RISCVInsn opc
,
407 TCGReg rd
, uint32_t imm
)
409 tcg_out32(s
, encode_u(opc
, rd
, imm
));
412 static void tcg_out_opc_jump(TCGContext
*s
, RISCVInsn opc
,
413 TCGReg rd
, uint32_t imm
)
415 tcg_out32(s
, encode_uj(opc
, rd
, imm
));
418 static void tcg_out_nop_fill(tcg_insn_unit
*p
, int count
)
421 for (i
= 0; i
< count
; ++i
) {
422 p
[i
] = encode_i(OPC_ADDI
, TCG_REG_ZERO
, TCG_REG_ZERO
, 0);
430 static bool reloc_sbimm12(tcg_insn_unit
*code_ptr
, tcg_insn_unit
*target
)
432 intptr_t offset
= (intptr_t)target
- (intptr_t)code_ptr
;
434 if (offset
== sextreg(offset
, 1, 12) << 1) {
435 code_ptr
[0] |= encode_sbimm12(offset
);
442 static bool reloc_jimm20(tcg_insn_unit
*code_ptr
, tcg_insn_unit
*target
)
444 intptr_t offset
= (intptr_t)target
- (intptr_t)code_ptr
;
446 if (offset
== sextreg(offset
, 1, 20) << 1) {
447 code_ptr
[0] |= encode_ujimm20(offset
);
454 static bool reloc_call(tcg_insn_unit
*code_ptr
, tcg_insn_unit
*target
)
456 intptr_t offset
= (intptr_t)target
- (intptr_t)code_ptr
;
457 int32_t lo
= sextreg(offset
, 0, 12);
458 int32_t hi
= offset
- lo
;
460 if (offset
== hi
+ lo
) {
461 code_ptr
[0] |= encode_uimm20(hi
);
462 code_ptr
[1] |= encode_imm12(lo
);
469 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
470 intptr_t value
, intptr_t addend
)
472 uint32_t insn
= *code_ptr
;
476 tcg_debug_assert(addend
== 0);
480 diff
= value
- (uintptr_t)code_ptr
;
481 short_jmp
= diff
== sextreg(diff
, 0, 12);
483 return reloc_sbimm12(code_ptr
, (tcg_insn_unit
*)value
);
485 /* Invert the condition */
486 insn
= insn
^ (1 << 12);
487 /* Clear the offset */
489 /* Set the offset to the PC + 8 */
490 insn
|= encode_sbimm12(8);
495 /* Overwrite the NOP with jal x0,value */
496 diff
= value
- (uintptr_t)(code_ptr
+ 1);
497 insn
= encode_uj(OPC_JAL
, TCG_REG_ZERO
, diff
);
504 return reloc_jimm20(code_ptr
, (tcg_insn_unit
*)value
);
507 return reloc_call(code_ptr
, (tcg_insn_unit
*)value
);
518 static bool tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
)
526 tcg_out_opc_imm(s
, OPC_ADDI
, ret
, arg
, 0);
529 g_assert_not_reached();
534 static void tcg_out_movi(TCGContext
*s
, TCGType type
, TCGReg rd
,
537 tcg_target_long lo
, hi
, tmp
;
540 if (TCG_TARGET_REG_BITS
== 64 && type
== TCG_TYPE_I32
) {
544 lo
= sextreg(val
, 0, 12);
546 tcg_out_opc_imm(s
, OPC_ADDI
, rd
, TCG_REG_ZERO
, lo
);
551 if (TCG_TARGET_REG_BITS
== 32 || val
== (int32_t)val
) {
552 tcg_out_opc_upper(s
, OPC_LUI
, rd
, hi
);
554 tcg_out_opc_imm(s
, OPC_ADDIW
, rd
, rd
, lo
);
559 /* We can only be here if TCG_TARGET_REG_BITS != 32 */
560 tmp
= tcg_pcrel_diff(s
, (void *)val
);
561 if (tmp
== (int32_t)tmp
) {
562 tcg_out_opc_upper(s
, OPC_AUIPC
, rd
, 0);
563 tcg_out_opc_imm(s
, OPC_ADDI
, rd
, rd
, 0);
564 ret
= reloc_call(s
->code_ptr
- 2, (tcg_insn_unit
*)val
);
565 tcg_debug_assert(ret
== true);
569 /* Look for a single 20-bit section. */
572 if (tmp
== sextreg(tmp
, 0, 20)) {
573 tcg_out_opc_upper(s
, OPC_LUI
, rd
, tmp
<< 12);
575 tcg_out_opc_imm(s
, OPC_SLLI
, rd
, rd
, shift
- 12);
577 tcg_out_opc_imm(s
, OPC_SRAI
, rd
, rd
, 12 - shift
);
582 /* Look for a few high zero bits, with lots of bits set in the middle. */
585 if (tmp
== sextreg(tmp
, 12, 20) << 12) {
586 tcg_out_opc_upper(s
, OPC_LUI
, rd
, tmp
);
587 tcg_out_opc_imm(s
, OPC_SRLI
, rd
, rd
, shift
);
589 } else if (tmp
== sextreg(tmp
, 0, 12)) {
590 tcg_out_opc_imm(s
, OPC_ADDI
, rd
, TCG_REG_ZERO
, tmp
);
591 tcg_out_opc_imm(s
, OPC_SRLI
, rd
, rd
, shift
);
595 /* Drop into the constant pool. */
596 new_pool_label(s
, val
, R_RISCV_CALL
, s
->code_ptr
, 0);
597 tcg_out_opc_upper(s
, OPC_AUIPC
, rd
, 0);
598 tcg_out_opc_imm(s
, OPC_LD
, rd
, rd
, 0);
601 static void tcg_out_ext8u(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
603 tcg_out_opc_imm(s
, OPC_ANDI
, ret
, arg
, 0xff);
606 static void tcg_out_ext16u(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
608 tcg_out_opc_imm(s
, OPC_SLLIW
, ret
, arg
, 16);
609 tcg_out_opc_imm(s
, OPC_SRLIW
, ret
, ret
, 16);
612 static void tcg_out_ext32u(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
614 tcg_out_opc_imm(s
, OPC_SLLI
, ret
, arg
, 32);
615 tcg_out_opc_imm(s
, OPC_SRLI
, ret
, ret
, 32);
618 static void tcg_out_ext8s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
620 tcg_out_opc_imm(s
, OPC_SLLIW
, ret
, arg
, 24);
621 tcg_out_opc_imm(s
, OPC_SRAIW
, ret
, ret
, 24);
624 static void tcg_out_ext16s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
626 tcg_out_opc_imm(s
, OPC_SLLIW
, ret
, arg
, 16);
627 tcg_out_opc_imm(s
, OPC_SRAIW
, ret
, ret
, 16);
630 static void tcg_out_ext32s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
632 tcg_out_opc_imm(s
, OPC_ADDIW
, ret
, arg
, 0);
635 static void tcg_out_ldst(TCGContext
*s
, RISCVInsn opc
, TCGReg data
,
636 TCGReg addr
, intptr_t offset
)
638 intptr_t imm12
= sextreg(offset
, 0, 12);
640 if (offset
!= imm12
) {
641 intptr_t diff
= offset
- (uintptr_t)s
->code_ptr
;
643 if (addr
== TCG_REG_ZERO
&& diff
== (int32_t)diff
) {
644 imm12
= sextreg(diff
, 0, 12);
645 tcg_out_opc_upper(s
, OPC_AUIPC
, TCG_REG_TMP2
, diff
- imm12
);
647 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP2
, offset
- imm12
);
648 if (addr
!= TCG_REG_ZERO
) {
649 tcg_out_opc_reg(s
, OPC_ADD
, TCG_REG_TMP2
, TCG_REG_TMP2
, addr
);
660 tcg_out_opc_store(s
, opc
, addr
, data
, imm12
);
669 tcg_out_opc_imm(s
, opc
, data
, addr
, imm12
);
672 g_assert_not_reached();
676 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg arg
,
677 TCGReg arg1
, intptr_t arg2
)
679 bool is32bit
= (TCG_TARGET_REG_BITS
== 32 || type
== TCG_TYPE_I32
);
680 tcg_out_ldst(s
, is32bit
? OPC_LW
: OPC_LD
, arg
, arg1
, arg2
);
683 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
684 TCGReg arg1
, intptr_t arg2
)
686 bool is32bit
= (TCG_TARGET_REG_BITS
== 32 || type
== TCG_TYPE_I32
);
687 tcg_out_ldst(s
, is32bit
? OPC_SW
: OPC_SD
, arg
, arg1
, arg2
);
690 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
691 TCGReg base
, intptr_t ofs
)
694 tcg_out_st(s
, type
, TCG_REG_ZERO
, base
, ofs
);
700 static void tcg_out_addsub2(TCGContext
*s
,
701 TCGReg rl
, TCGReg rh
,
702 TCGReg al
, TCGReg ah
,
703 TCGArg bl
, TCGArg bh
,
704 bool cbl
, bool cbh
, bool is_sub
, bool is32bit
)
706 const RISCVInsn opc_add
= is32bit
? OPC_ADDW
: OPC_ADD
;
707 const RISCVInsn opc_addi
= is32bit
? OPC_ADDIW
: OPC_ADDI
;
708 const RISCVInsn opc_sub
= is32bit
? OPC_SUBW
: OPC_SUB
;
709 TCGReg th
= TCG_REG_TMP1
;
711 /* If we have a negative constant such that negating it would
712 make the high part zero, we can (usually) eliminate one insn. */
713 if (cbl
&& cbh
&& bh
== -1 && bl
!= 0) {
719 /* By operating on the high part first, we get to use the final
720 carry operation to move back from the temporary. */
722 tcg_out_opc_reg(s
, (is_sub
? opc_sub
: opc_add
), th
, ah
, bh
);
723 } else if (bh
!= 0 || ah
== rl
) {
724 tcg_out_opc_imm(s
, opc_addi
, th
, ah
, (is_sub
? -bh
: bh
));
729 /* Note that tcg optimization should eliminate the bl == 0 case. */
732 tcg_out_opc_imm(s
, OPC_SLTIU
, TCG_REG_TMP0
, al
, bl
);
733 tcg_out_opc_imm(s
, opc_addi
, rl
, al
, -bl
);
735 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_TMP0
, al
, bl
);
736 tcg_out_opc_reg(s
, opc_sub
, rl
, al
, bl
);
738 tcg_out_opc_reg(s
, opc_sub
, rh
, th
, TCG_REG_TMP0
);
741 tcg_out_opc_imm(s
, opc_addi
, rl
, al
, bl
);
742 tcg_out_opc_imm(s
, OPC_SLTIU
, TCG_REG_TMP0
, rl
, bl
);
743 } else if (rl
== al
&& rl
== bl
) {
744 tcg_out_opc_imm(s
, OPC_SLTI
, TCG_REG_TMP0
, al
, 0);
745 tcg_out_opc_reg(s
, opc_addi
, rl
, al
, bl
);
747 tcg_out_opc_reg(s
, opc_add
, rl
, al
, bl
);
748 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_TMP0
,
749 rl
, (rl
== bl
? al
: bl
));
751 tcg_out_opc_reg(s
, opc_add
, rh
, th
, TCG_REG_TMP0
);
755 static const struct {
758 } tcg_brcond_to_riscv
[] = {
759 [TCG_COND_EQ
] = { OPC_BEQ
, false },
760 [TCG_COND_NE
] = { OPC_BNE
, false },
761 [TCG_COND_LT
] = { OPC_BLT
, false },
762 [TCG_COND_GE
] = { OPC_BGE
, false },
763 [TCG_COND_LE
] = { OPC_BGE
, true },
764 [TCG_COND_GT
] = { OPC_BLT
, true },
765 [TCG_COND_LTU
] = { OPC_BLTU
, false },
766 [TCG_COND_GEU
] = { OPC_BGEU
, false },
767 [TCG_COND_LEU
] = { OPC_BGEU
, true },
768 [TCG_COND_GTU
] = { OPC_BLTU
, true }
771 static void tcg_out_brcond(TCGContext
*s
, TCGCond cond
, TCGReg arg1
,
772 TCGReg arg2
, TCGLabel
*l
)
774 RISCVInsn op
= tcg_brcond_to_riscv
[cond
].op
;
776 tcg_debug_assert(op
!= 0);
778 if (tcg_brcond_to_riscv
[cond
].swap
) {
785 intptr_t diff
= tcg_pcrel_diff(s
, l
->u
.value_ptr
);
786 if (diff
== sextreg(diff
, 0, 12)) {
787 tcg_out_opc_branch(s
, op
, arg1
, arg2
, diff
);
789 /* Invert the conditional branch. */
790 tcg_out_opc_branch(s
, op
^ (1 << 12), arg1
, arg2
, 8);
791 tcg_out_opc_jump(s
, OPC_JAL
, TCG_REG_ZERO
, diff
- 4);
794 tcg_out_reloc(s
, s
->code_ptr
, R_RISCV_BRANCH
, l
, 0);
795 tcg_out_opc_branch(s
, op
, arg1
, arg2
, 0);
796 /* NOP to allow patching later */
797 tcg_out_opc_imm(s
, OPC_ADDI
, TCG_REG_ZERO
, TCG_REG_ZERO
, 0);
801 static void tcg_out_setcond(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
802 TCGReg arg1
, TCGReg arg2
)
806 tcg_out_opc_reg(s
, OPC_SUB
, ret
, arg1
, arg2
);
807 tcg_out_opc_imm(s
, OPC_SLTIU
, ret
, ret
, 1);
810 tcg_out_opc_reg(s
, OPC_SUB
, ret
, arg1
, arg2
);
811 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, TCG_REG_ZERO
, ret
);
814 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg1
, arg2
);
817 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg1
, arg2
);
818 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
821 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg2
, arg1
);
822 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
825 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg2
, arg1
);
828 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg1
, arg2
);
831 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg1
, arg2
);
832 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
835 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg2
, arg1
);
836 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
839 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg2
, arg1
);
842 g_assert_not_reached();
847 static void tcg_out_brcond2(TCGContext
*s
, TCGCond cond
, TCGReg al
, TCGReg ah
,
848 TCGReg bl
, TCGReg bh
, TCGLabel
*l
)
851 g_assert_not_reached();
854 static void tcg_out_setcond2(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
855 TCGReg al
, TCGReg ah
, TCGReg bl
, TCGReg bh
)
858 g_assert_not_reached();
861 static inline void tcg_out_goto(TCGContext
*s
, tcg_insn_unit
*target
)
863 ptrdiff_t offset
= tcg_pcrel_diff(s
, target
);
864 tcg_debug_assert(offset
== sextreg(offset
, 1, 20) << 1);
865 tcg_out_opc_jump(s
, OPC_JAL
, TCG_REG_ZERO
, offset
);
868 static void tcg_out_call_int(TCGContext
*s
, tcg_insn_unit
*arg
, bool tail
)
870 TCGReg link
= tail
? TCG_REG_ZERO
: TCG_REG_RA
;
871 ptrdiff_t offset
= tcg_pcrel_diff(s
, arg
);
874 if (offset
== sextreg(offset
, 1, 20) << 1) {
875 /* short jump: -2097150 to 2097152 */
876 tcg_out_opc_jump(s
, OPC_JAL
, link
, offset
);
877 } else if (TCG_TARGET_REG_BITS
== 32 ||
878 offset
== sextreg(offset
, 1, 31) << 1) {
879 /* long jump: -2147483646 to 2147483648 */
880 tcg_out_opc_upper(s
, OPC_AUIPC
, TCG_REG_TMP0
, 0);
881 tcg_out_opc_imm(s
, OPC_JALR
, link
, TCG_REG_TMP0
, 0);
882 ret
= reloc_call(s
->code_ptr
- 2, arg
);\
883 tcg_debug_assert(ret
== true);
884 } else if (TCG_TARGET_REG_BITS
== 64) {
885 /* far jump: 64-bit */
886 tcg_target_long imm
= sextreg((tcg_target_long
)arg
, 0, 12);
887 tcg_target_long base
= (tcg_target_long
)arg
- imm
;
888 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP0
, base
);
889 tcg_out_opc_imm(s
, OPC_JALR
, link
, TCG_REG_TMP0
, imm
);
891 g_assert_not_reached();
895 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*arg
)
897 tcg_out_call_int(s
, arg
, false);
900 static void tcg_out_mb(TCGContext
*s
, TCGArg a0
)
902 tcg_insn_unit insn
= OPC_FENCE
;
904 if (a0
& TCG_MO_LD_LD
) {
907 if (a0
& TCG_MO_ST_LD
) {
910 if (a0
& TCG_MO_LD_ST
) {
913 if (a0
& TCG_MO_ST_ST
) {
923 #if defined(CONFIG_SOFTMMU)
924 #include "../tcg-ldst.inc.c"
926 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
927 * TCGMemOpIdx oi, uintptr_t ra)
929 static void * const qemu_ld_helpers
[16] = {
930 [MO_UB
] = helper_ret_ldub_mmu
,
931 [MO_SB
] = helper_ret_ldsb_mmu
,
932 [MO_LEUW
] = helper_le_lduw_mmu
,
933 [MO_LESW
] = helper_le_ldsw_mmu
,
934 [MO_LEUL
] = helper_le_ldul_mmu
,
935 #if TCG_TARGET_REG_BITS == 64
936 [MO_LESL
] = helper_le_ldsl_mmu
,
938 [MO_LEQ
] = helper_le_ldq_mmu
,
939 [MO_BEUW
] = helper_be_lduw_mmu
,
940 [MO_BESW
] = helper_be_ldsw_mmu
,
941 [MO_BEUL
] = helper_be_ldul_mmu
,
942 #if TCG_TARGET_REG_BITS == 64
943 [MO_BESL
] = helper_be_ldsl_mmu
,
945 [MO_BEQ
] = helper_be_ldq_mmu
,
948 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
949 * uintxx_t val, TCGMemOpIdx oi,
952 static void * const qemu_st_helpers
[16] = {
953 [MO_UB
] = helper_ret_stb_mmu
,
954 [MO_LEUW
] = helper_le_stw_mmu
,
955 [MO_LEUL
] = helper_le_stl_mmu
,
956 [MO_LEQ
] = helper_le_stq_mmu
,
957 [MO_BEUW
] = helper_be_stw_mmu
,
958 [MO_BEUL
] = helper_be_stl_mmu
,
959 [MO_BEQ
] = helper_be_stq_mmu
,
962 /* We don't support oversize guests */
963 QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
);
965 /* We expect to use a 12-bit negative offset from ENV. */
966 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
967 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
969 static void tcg_out_tlb_load(TCGContext
*s
, TCGReg addrl
,
970 TCGReg addrh
, TCGMemOpIdx oi
,
971 tcg_insn_unit
**label_ptr
, bool is_load
)
973 MemOp opc
= get_memop(oi
);
974 unsigned s_bits
= opc
& MO_SIZE
;
975 unsigned a_bits
= get_alignment_bits(opc
);
976 tcg_target_long compare_mask
;
977 int mem_index
= get_mmuidx(oi
);
978 int fast_ofs
= TLB_MASK_TABLE_OFS(mem_index
);
979 int mask_ofs
= fast_ofs
+ offsetof(CPUTLBDescFast
, mask
);
980 int table_ofs
= fast_ofs
+ offsetof(CPUTLBDescFast
, table
);
981 TCGReg mask_base
= TCG_AREG0
, table_base
= TCG_AREG0
;
983 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_TMP0
, mask_base
, mask_ofs
);
984 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_TMP1
, table_base
, table_ofs
);
986 tcg_out_opc_imm(s
, OPC_SRLI
, TCG_REG_TMP2
, addrl
,
987 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
988 tcg_out_opc_reg(s
, OPC_AND
, TCG_REG_TMP2
, TCG_REG_TMP2
, TCG_REG_TMP0
);
989 tcg_out_opc_reg(s
, OPC_ADD
, TCG_REG_TMP2
, TCG_REG_TMP2
, TCG_REG_TMP1
);
991 /* Load the tlb comparator and the addend. */
992 tcg_out_ld(s
, TCG_TYPE_TL
, TCG_REG_TMP0
, TCG_REG_TMP2
,
993 is_load
? offsetof(CPUTLBEntry
, addr_read
)
994 : offsetof(CPUTLBEntry
, addr_write
));
995 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_TMP2
, TCG_REG_TMP2
,
996 offsetof(CPUTLBEntry
, addend
));
998 /* We don't support unaligned accesses. */
999 if (a_bits
< s_bits
) {
1002 /* Clear the non-page, non-alignment bits from the address. */
1003 compare_mask
= (tcg_target_long
)TARGET_PAGE_MASK
| ((1 << a_bits
) - 1);
1004 if (compare_mask
== sextreg(compare_mask
, 0, 12)) {
1005 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_REG_TMP1
, addrl
, compare_mask
);
1007 tcg_out_movi(s
, TCG_TYPE_TL
, TCG_REG_TMP1
, compare_mask
);
1008 tcg_out_opc_reg(s
, OPC_AND
, TCG_REG_TMP1
, TCG_REG_TMP1
, addrl
);
1011 /* Compare masked address with the TLB entry. */
1012 label_ptr
[0] = s
->code_ptr
;
1013 tcg_out_opc_branch(s
, OPC_BNE
, TCG_REG_TMP0
, TCG_REG_TMP1
, 0);
1014 /* NOP to allow patching later */
1015 tcg_out_opc_imm(s
, OPC_ADDI
, TCG_REG_ZERO
, TCG_REG_ZERO
, 0);
1017 /* TLB Hit - translate address using addend. */
1018 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1019 tcg_out_ext32u(s
, TCG_REG_TMP0
, addrl
);
1020 addrl
= TCG_REG_TMP0
;
1022 tcg_out_opc_reg(s
, OPC_ADD
, TCG_REG_TMP0
, TCG_REG_TMP2
, addrl
);
1025 static void add_qemu_ldst_label(TCGContext
*s
, int is_ld
, TCGMemOpIdx oi
,
1027 TCGReg datalo
, TCGReg datahi
,
1028 TCGReg addrlo
, TCGReg addrhi
,
1029 void *raddr
, tcg_insn_unit
**label_ptr
)
1031 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1033 label
->is_ld
= is_ld
;
1036 label
->datalo_reg
= datalo
;
1037 label
->datahi_reg
= datahi
;
1038 label
->addrlo_reg
= addrlo
;
1039 label
->addrhi_reg
= addrhi
;
1040 label
->raddr
= raddr
;
1041 label
->label_ptr
[0] = label_ptr
[0];
1044 static bool tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1046 TCGMemOpIdx oi
= l
->oi
;
1047 MemOp opc
= get_memop(oi
);
1048 TCGReg a0
= tcg_target_call_iarg_regs
[0];
1049 TCGReg a1
= tcg_target_call_iarg_regs
[1];
1050 TCGReg a2
= tcg_target_call_iarg_regs
[2];
1051 TCGReg a3
= tcg_target_call_iarg_regs
[3];
1053 /* We don't support oversize guests */
1054 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1055 g_assert_not_reached();
1058 /* resolve label address */
1059 if (!patch_reloc(l
->label_ptr
[0], R_RISCV_BRANCH
,
1060 (intptr_t) s
->code_ptr
, 0)) {
1064 /* call load helper */
1065 tcg_out_mov(s
, TCG_TYPE_PTR
, a0
, TCG_AREG0
);
1066 tcg_out_mov(s
, TCG_TYPE_PTR
, a1
, l
->addrlo_reg
);
1067 tcg_out_movi(s
, TCG_TYPE_PTR
, a2
, oi
);
1068 tcg_out_movi(s
, TCG_TYPE_PTR
, a3
, (tcg_target_long
)l
->raddr
);
1070 tcg_out_call(s
, qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SSIZE
)]);
1071 tcg_out_mov(s
, (opc
& MO_SIZE
) == MO_64
, l
->datalo_reg
, a0
);
1073 tcg_out_goto(s
, l
->raddr
);
1077 static bool tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1079 TCGMemOpIdx oi
= l
->oi
;
1080 MemOp opc
= get_memop(oi
);
1081 MemOp s_bits
= opc
& MO_SIZE
;
1082 TCGReg a0
= tcg_target_call_iarg_regs
[0];
1083 TCGReg a1
= tcg_target_call_iarg_regs
[1];
1084 TCGReg a2
= tcg_target_call_iarg_regs
[2];
1085 TCGReg a3
= tcg_target_call_iarg_regs
[3];
1086 TCGReg a4
= tcg_target_call_iarg_regs
[4];
1088 /* We don't support oversize guests */
1089 if (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
) {
1090 g_assert_not_reached();
1093 /* resolve label address */
1094 if (!patch_reloc(l
->label_ptr
[0], R_RISCV_BRANCH
,
1095 (intptr_t) s
->code_ptr
, 0)) {
1099 /* call store helper */
1100 tcg_out_mov(s
, TCG_TYPE_PTR
, a0
, TCG_AREG0
);
1101 tcg_out_mov(s
, TCG_TYPE_PTR
, a1
, l
->addrlo_reg
);
1102 tcg_out_mov(s
, TCG_TYPE_PTR
, a2
, l
->datalo_reg
);
1105 tcg_out_ext8u(s
, a2
, a2
);
1108 tcg_out_ext16u(s
, a2
, a2
);
1113 tcg_out_movi(s
, TCG_TYPE_PTR
, a3
, oi
);
1114 tcg_out_movi(s
, TCG_TYPE_PTR
, a4
, (tcg_target_long
)l
->raddr
);
1116 tcg_out_call(s
, qemu_st_helpers
[opc
& (MO_BSWAP
| MO_SSIZE
)]);
1118 tcg_out_goto(s
, l
->raddr
);
1121 #endif /* CONFIG_SOFTMMU */
1123 static void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGReg lo
, TCGReg hi
,
1124 TCGReg base
, MemOp opc
, bool is_64
)
1126 const MemOp bswap
= opc
& MO_BSWAP
;
1128 /* We don't yet handle byteswapping, assert */
1131 switch (opc
& (MO_SSIZE
)) {
1133 tcg_out_opc_imm(s
, OPC_LBU
, lo
, base
, 0);
1136 tcg_out_opc_imm(s
, OPC_LB
, lo
, base
, 0);
1139 tcg_out_opc_imm(s
, OPC_LHU
, lo
, base
, 0);
1142 tcg_out_opc_imm(s
, OPC_LH
, lo
, base
, 0);
1145 if (TCG_TARGET_REG_BITS
== 64 && is_64
) {
1146 tcg_out_opc_imm(s
, OPC_LWU
, lo
, base
, 0);
1151 tcg_out_opc_imm(s
, OPC_LW
, lo
, base
, 0);
1154 /* Prefer to load from offset 0 first, but allow for overlap. */
1155 if (TCG_TARGET_REG_BITS
== 64) {
1156 tcg_out_opc_imm(s
, OPC_LD
, lo
, base
, 0);
1157 } else if (lo
!= base
) {
1158 tcg_out_opc_imm(s
, OPC_LW
, lo
, base
, 0);
1159 tcg_out_opc_imm(s
, OPC_LW
, hi
, base
, 4);
1161 tcg_out_opc_imm(s
, OPC_LW
, hi
, base
, 4);
1162 tcg_out_opc_imm(s
, OPC_LW
, lo
, base
, 0);
1166 g_assert_not_reached();
1170 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1172 TCGReg addr_regl
, addr_regh
__attribute__((unused
));
1173 TCGReg data_regl
, data_regh
;
1176 #if defined(CONFIG_SOFTMMU)
1177 tcg_insn_unit
*label_ptr
[1];
1179 TCGReg base
= TCG_REG_TMP0
;
1181 data_regl
= *args
++;
1182 data_regh
= (TCG_TARGET_REG_BITS
== 32 && is_64
? *args
++ : 0);
1183 addr_regl
= *args
++;
1184 addr_regh
= (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
? *args
++ : 0);
1186 opc
= get_memop(oi
);
1188 #if defined(CONFIG_SOFTMMU)
1189 tcg_out_tlb_load(s
, addr_regl
, addr_regh
, oi
, label_ptr
, 1);
1190 tcg_out_qemu_ld_direct(s
, data_regl
, data_regh
, base
, opc
, is_64
);
1191 add_qemu_ldst_label(s
, 1, oi
,
1192 (is_64
? TCG_TYPE_I64
: TCG_TYPE_I32
),
1193 data_regl
, data_regh
, addr_regl
, addr_regh
,
1194 s
->code_ptr
, label_ptr
);
1196 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1197 tcg_out_ext32u(s
, base
, addr_regl
);
1201 if (guest_base
== 0) {
1202 tcg_out_opc_reg(s
, OPC_ADD
, base
, addr_regl
, TCG_REG_ZERO
);
1204 tcg_out_opc_reg(s
, OPC_ADD
, base
, TCG_GUEST_BASE_REG
, addr_regl
);
1206 tcg_out_qemu_ld_direct(s
, data_regl
, data_regh
, base
, opc
, is_64
);
1210 static void tcg_out_qemu_st_direct(TCGContext
*s
, TCGReg lo
, TCGReg hi
,
1211 TCGReg base
, MemOp opc
)
1213 const MemOp bswap
= opc
& MO_BSWAP
;
1215 /* We don't yet handle byteswapping, assert */
1218 switch (opc
& (MO_SSIZE
)) {
1220 tcg_out_opc_store(s
, OPC_SB
, base
, lo
, 0);
1223 tcg_out_opc_store(s
, OPC_SH
, base
, lo
, 0);
1226 tcg_out_opc_store(s
, OPC_SW
, base
, lo
, 0);
1229 if (TCG_TARGET_REG_BITS
== 64) {
1230 tcg_out_opc_store(s
, OPC_SD
, base
, lo
, 0);
1232 tcg_out_opc_store(s
, OPC_SW
, base
, lo
, 0);
1233 tcg_out_opc_store(s
, OPC_SW
, base
, hi
, 4);
1237 g_assert_not_reached();
1241 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1243 TCGReg addr_regl
, addr_regh
__attribute__((unused
));
1244 TCGReg data_regl
, data_regh
;
1247 #if defined(CONFIG_SOFTMMU)
1248 tcg_insn_unit
*label_ptr
[1];
1250 TCGReg base
= TCG_REG_TMP0
;
1252 data_regl
= *args
++;
1253 data_regh
= (TCG_TARGET_REG_BITS
== 32 && is_64
? *args
++ : 0);
1254 addr_regl
= *args
++;
1255 addr_regh
= (TCG_TARGET_REG_BITS
< TARGET_LONG_BITS
? *args
++ : 0);
1257 opc
= get_memop(oi
);
1259 #if defined(CONFIG_SOFTMMU)
1260 tcg_out_tlb_load(s
, addr_regl
, addr_regh
, oi
, label_ptr
, 0);
1261 tcg_out_qemu_st_direct(s
, data_regl
, data_regh
, base
, opc
);
1262 add_qemu_ldst_label(s
, 0, oi
,
1263 (is_64
? TCG_TYPE_I64
: TCG_TYPE_I32
),
1264 data_regl
, data_regh
, addr_regl
, addr_regh
,
1265 s
->code_ptr
, label_ptr
);
1267 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
1268 tcg_out_ext32u(s
, base
, addr_regl
);
1272 if (guest_base
== 0) {
1273 tcg_out_opc_reg(s
, OPC_ADD
, base
, addr_regl
, TCG_REG_ZERO
);
1275 tcg_out_opc_reg(s
, OPC_ADD
, base
, TCG_GUEST_BASE_REG
, addr_regl
);
1277 tcg_out_qemu_st_direct(s
, data_regl
, data_regh
, base
, opc
);
1281 static tcg_insn_unit
*tb_ret_addr
;
1283 static void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1284 const TCGArg
*args
, const int *const_args
)
1286 TCGArg a0
= args
[0];
1287 TCGArg a1
= args
[1];
1288 TCGArg a2
= args
[2];
1289 int c2
= const_args
[2];
1292 case INDEX_op_exit_tb
:
1293 /* Reuse the zeroing that exists for goto_ptr. */
1295 tcg_out_call_int(s
, s
->code_gen_epilogue
, true);
1297 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_A0
, a0
);
1298 tcg_out_call_int(s
, tb_ret_addr
, true);
1302 case INDEX_op_goto_tb
:
1303 assert(s
->tb_jmp_insn_offset
== 0);
1304 /* indirect jump method */
1305 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_TMP0
, TCG_REG_ZERO
,
1306 (uintptr_t)(s
->tb_jmp_target_addr
+ a0
));
1307 tcg_out_opc_imm(s
, OPC_JALR
, TCG_REG_ZERO
, TCG_REG_TMP0
, 0);
1308 set_jmp_reset_offset(s
, a0
);
1311 case INDEX_op_goto_ptr
:
1312 tcg_out_opc_imm(s
, OPC_JALR
, TCG_REG_ZERO
, a0
, 0);
1316 tcg_out_reloc(s
, s
->code_ptr
, R_RISCV_JAL
, arg_label(a0
), 0);
1317 tcg_out_opc_jump(s
, OPC_JAL
, TCG_REG_ZERO
, 0);
1320 case INDEX_op_ld8u_i32
:
1321 case INDEX_op_ld8u_i64
:
1322 tcg_out_ldst(s
, OPC_LBU
, a0
, a1
, a2
);
1324 case INDEX_op_ld8s_i32
:
1325 case INDEX_op_ld8s_i64
:
1326 tcg_out_ldst(s
, OPC_LB
, a0
, a1
, a2
);
1328 case INDEX_op_ld16u_i32
:
1329 case INDEX_op_ld16u_i64
:
1330 tcg_out_ldst(s
, OPC_LHU
, a0
, a1
, a2
);
1332 case INDEX_op_ld16s_i32
:
1333 case INDEX_op_ld16s_i64
:
1334 tcg_out_ldst(s
, OPC_LH
, a0
, a1
, a2
);
1336 case INDEX_op_ld32u_i64
:
1337 tcg_out_ldst(s
, OPC_LWU
, a0
, a1
, a2
);
1339 case INDEX_op_ld_i32
:
1340 case INDEX_op_ld32s_i64
:
1341 tcg_out_ldst(s
, OPC_LW
, a0
, a1
, a2
);
1343 case INDEX_op_ld_i64
:
1344 tcg_out_ldst(s
, OPC_LD
, a0
, a1
, a2
);
1347 case INDEX_op_st8_i32
:
1348 case INDEX_op_st8_i64
:
1349 tcg_out_ldst(s
, OPC_SB
, a0
, a1
, a2
);
1351 case INDEX_op_st16_i32
:
1352 case INDEX_op_st16_i64
:
1353 tcg_out_ldst(s
, OPC_SH
, a0
, a1
, a2
);
1355 case INDEX_op_st_i32
:
1356 case INDEX_op_st32_i64
:
1357 tcg_out_ldst(s
, OPC_SW
, a0
, a1
, a2
);
1359 case INDEX_op_st_i64
:
1360 tcg_out_ldst(s
, OPC_SD
, a0
, a1
, a2
);
1363 case INDEX_op_add_i32
:
1365 tcg_out_opc_imm(s
, OPC_ADDIW
, a0
, a1
, a2
);
1367 tcg_out_opc_reg(s
, OPC_ADDW
, a0
, a1
, a2
);
1370 case INDEX_op_add_i64
:
1372 tcg_out_opc_imm(s
, OPC_ADDI
, a0
, a1
, a2
);
1374 tcg_out_opc_reg(s
, OPC_ADD
, a0
, a1
, a2
);
1378 case INDEX_op_sub_i32
:
1380 tcg_out_opc_imm(s
, OPC_ADDIW
, a0
, a1
, -a2
);
1382 tcg_out_opc_reg(s
, OPC_SUBW
, a0
, a1
, a2
);
1385 case INDEX_op_sub_i64
:
1387 tcg_out_opc_imm(s
, OPC_ADDI
, a0
, a1
, -a2
);
1389 tcg_out_opc_reg(s
, OPC_SUB
, a0
, a1
, a2
);
1393 case INDEX_op_and_i32
:
1394 case INDEX_op_and_i64
:
1396 tcg_out_opc_imm(s
, OPC_ANDI
, a0
, a1
, a2
);
1398 tcg_out_opc_reg(s
, OPC_AND
, a0
, a1
, a2
);
1402 case INDEX_op_or_i32
:
1403 case INDEX_op_or_i64
:
1405 tcg_out_opc_imm(s
, OPC_ORI
, a0
, a1
, a2
);
1407 tcg_out_opc_reg(s
, OPC_OR
, a0
, a1
, a2
);
1411 case INDEX_op_xor_i32
:
1412 case INDEX_op_xor_i64
:
1414 tcg_out_opc_imm(s
, OPC_XORI
, a0
, a1
, a2
);
1416 tcg_out_opc_reg(s
, OPC_XOR
, a0
, a1
, a2
);
1420 case INDEX_op_not_i32
:
1421 case INDEX_op_not_i64
:
1422 tcg_out_opc_imm(s
, OPC_XORI
, a0
, a1
, -1);
1425 case INDEX_op_neg_i32
:
1426 tcg_out_opc_reg(s
, OPC_SUBW
, a0
, TCG_REG_ZERO
, a1
);
1428 case INDEX_op_neg_i64
:
1429 tcg_out_opc_reg(s
, OPC_SUB
, a0
, TCG_REG_ZERO
, a1
);
1432 case INDEX_op_mul_i32
:
1433 tcg_out_opc_reg(s
, OPC_MULW
, a0
, a1
, a2
);
1435 case INDEX_op_mul_i64
:
1436 tcg_out_opc_reg(s
, OPC_MUL
, a0
, a1
, a2
);
1439 case INDEX_op_div_i32
:
1440 tcg_out_opc_reg(s
, OPC_DIVW
, a0
, a1
, a2
);
1442 case INDEX_op_div_i64
:
1443 tcg_out_opc_reg(s
, OPC_DIV
, a0
, a1
, a2
);
1446 case INDEX_op_divu_i32
:
1447 tcg_out_opc_reg(s
, OPC_DIVUW
, a0
, a1
, a2
);
1449 case INDEX_op_divu_i64
:
1450 tcg_out_opc_reg(s
, OPC_DIVU
, a0
, a1
, a2
);
1453 case INDEX_op_rem_i32
:
1454 tcg_out_opc_reg(s
, OPC_REMW
, a0
, a1
, a2
);
1456 case INDEX_op_rem_i64
:
1457 tcg_out_opc_reg(s
, OPC_REM
, a0
, a1
, a2
);
1460 case INDEX_op_remu_i32
:
1461 tcg_out_opc_reg(s
, OPC_REMUW
, a0
, a1
, a2
);
1463 case INDEX_op_remu_i64
:
1464 tcg_out_opc_reg(s
, OPC_REMU
, a0
, a1
, a2
);
1467 case INDEX_op_shl_i32
:
1469 tcg_out_opc_imm(s
, OPC_SLLIW
, a0
, a1
, a2
);
1471 tcg_out_opc_reg(s
, OPC_SLLW
, a0
, a1
, a2
);
1474 case INDEX_op_shl_i64
:
1476 tcg_out_opc_imm(s
, OPC_SLLI
, a0
, a1
, a2
);
1478 tcg_out_opc_reg(s
, OPC_SLL
, a0
, a1
, a2
);
1482 case INDEX_op_shr_i32
:
1484 tcg_out_opc_imm(s
, OPC_SRLIW
, a0
, a1
, a2
);
1486 tcg_out_opc_reg(s
, OPC_SRLW
, a0
, a1
, a2
);
1489 case INDEX_op_shr_i64
:
1491 tcg_out_opc_imm(s
, OPC_SRLI
, a0
, a1
, a2
);
1493 tcg_out_opc_reg(s
, OPC_SRL
, a0
, a1
, a2
);
1497 case INDEX_op_sar_i32
:
1499 tcg_out_opc_imm(s
, OPC_SRAIW
, a0
, a1
, a2
);
1501 tcg_out_opc_reg(s
, OPC_SRAW
, a0
, a1
, a2
);
1504 case INDEX_op_sar_i64
:
1506 tcg_out_opc_imm(s
, OPC_SRAI
, a0
, a1
, a2
);
1508 tcg_out_opc_reg(s
, OPC_SRA
, a0
, a1
, a2
);
1512 case INDEX_op_add2_i32
:
1513 tcg_out_addsub2(s
, a0
, a1
, a2
, args
[3], args
[4], args
[5],
1514 const_args
[4], const_args
[5], false, true);
1516 case INDEX_op_add2_i64
:
1517 tcg_out_addsub2(s
, a0
, a1
, a2
, args
[3], args
[4], args
[5],
1518 const_args
[4], const_args
[5], false, false);
1520 case INDEX_op_sub2_i32
:
1521 tcg_out_addsub2(s
, a0
, a1
, a2
, args
[3], args
[4], args
[5],
1522 const_args
[4], const_args
[5], true, true);
1524 case INDEX_op_sub2_i64
:
1525 tcg_out_addsub2(s
, a0
, a1
, a2
, args
[3], args
[4], args
[5],
1526 const_args
[4], const_args
[5], true, false);
1529 case INDEX_op_brcond_i32
:
1530 case INDEX_op_brcond_i64
:
1531 tcg_out_brcond(s
, a2
, a0
, a1
, arg_label(args
[3]));
1533 case INDEX_op_brcond2_i32
:
1534 tcg_out_brcond2(s
, args
[4], a0
, a1
, a2
, args
[3], arg_label(args
[5]));
1537 case INDEX_op_setcond_i32
:
1538 case INDEX_op_setcond_i64
:
1539 tcg_out_setcond(s
, args
[3], a0
, a1
, a2
);
1541 case INDEX_op_setcond2_i32
:
1542 tcg_out_setcond2(s
, args
[5], a0
, a1
, a2
, args
[3], args
[4]);
1545 case INDEX_op_qemu_ld_i32
:
1546 tcg_out_qemu_ld(s
, args
, false);
1548 case INDEX_op_qemu_ld_i64
:
1549 tcg_out_qemu_ld(s
, args
, true);
1551 case INDEX_op_qemu_st_i32
:
1552 tcg_out_qemu_st(s
, args
, false);
1554 case INDEX_op_qemu_st_i64
:
1555 tcg_out_qemu_st(s
, args
, true);
1558 case INDEX_op_ext8u_i32
:
1559 case INDEX_op_ext8u_i64
:
1560 tcg_out_ext8u(s
, a0
, a1
);
1563 case INDEX_op_ext16u_i32
:
1564 case INDEX_op_ext16u_i64
:
1565 tcg_out_ext16u(s
, a0
, a1
);
1568 case INDEX_op_ext32u_i64
:
1569 case INDEX_op_extu_i32_i64
:
1570 tcg_out_ext32u(s
, a0
, a1
);
1573 case INDEX_op_ext8s_i32
:
1574 case INDEX_op_ext8s_i64
:
1575 tcg_out_ext8s(s
, a0
, a1
);
1578 case INDEX_op_ext16s_i32
:
1579 case INDEX_op_ext16s_i64
:
1580 tcg_out_ext16s(s
, a0
, a1
);
1583 case INDEX_op_ext32s_i64
:
1584 case INDEX_op_extrl_i64_i32
:
1585 case INDEX_op_ext_i32_i64
:
1586 tcg_out_ext32s(s
, a0
, a1
);
1589 case INDEX_op_extrh_i64_i32
:
1590 tcg_out_opc_imm(s
, OPC_SRAI
, a0
, a1
, 32);
1593 case INDEX_op_mulsh_i32
:
1594 case INDEX_op_mulsh_i64
:
1595 tcg_out_opc_reg(s
, OPC_MULH
, a0
, a1
, a2
);
1598 case INDEX_op_muluh_i32
:
1599 case INDEX_op_muluh_i64
:
1600 tcg_out_opc_reg(s
, OPC_MULHU
, a0
, a1
, a2
);
1607 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
1608 case INDEX_op_mov_i64
:
1609 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
1610 case INDEX_op_movi_i64
:
1611 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
1613 g_assert_not_reached();
1617 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode op
)
1619 static const TCGTargetOpDef r
1620 = { .args_ct_str
= { "r" } };
1621 static const TCGTargetOpDef r_r
1622 = { .args_ct_str
= { "r", "r" } };
1623 static const TCGTargetOpDef rZ_r
1624 = { .args_ct_str
= { "rZ", "r" } };
1625 static const TCGTargetOpDef rZ_rZ
1626 = { .args_ct_str
= { "rZ", "rZ" } };
1627 static const TCGTargetOpDef rZ_rZ_rZ_rZ
1628 = { .args_ct_str
= { "rZ", "rZ", "rZ", "rZ" } };
1629 static const TCGTargetOpDef r_r_ri
1630 = { .args_ct_str
= { "r", "r", "ri" } };
1631 static const TCGTargetOpDef r_r_rI
1632 = { .args_ct_str
= { "r", "r", "rI" } };
1633 static const TCGTargetOpDef r_rZ_rN
1634 = { .args_ct_str
= { "r", "rZ", "rN" } };
1635 static const TCGTargetOpDef r_rZ_rZ
1636 = { .args_ct_str
= { "r", "rZ", "rZ" } };
1637 static const TCGTargetOpDef r_rZ_rZ_rZ_rZ
1638 = { .args_ct_str
= { "r", "rZ", "rZ", "rZ", "rZ" } };
1639 static const TCGTargetOpDef r_L
1640 = { .args_ct_str
= { "r", "L" } };
1641 static const TCGTargetOpDef r_r_L
1642 = { .args_ct_str
= { "r", "r", "L" } };
1643 static const TCGTargetOpDef r_L_L
1644 = { .args_ct_str
= { "r", "L", "L" } };
1645 static const TCGTargetOpDef r_r_L_L
1646 = { .args_ct_str
= { "r", "r", "L", "L" } };
1647 static const TCGTargetOpDef LZ_L
1648 = { .args_ct_str
= { "LZ", "L" } };
1649 static const TCGTargetOpDef LZ_L_L
1650 = { .args_ct_str
= { "LZ", "L", "L" } };
1651 static const TCGTargetOpDef LZ_LZ_L
1652 = { .args_ct_str
= { "LZ", "LZ", "L" } };
1653 static const TCGTargetOpDef LZ_LZ_L_L
1654 = { .args_ct_str
= { "LZ", "LZ", "L", "L" } };
1655 static const TCGTargetOpDef r_r_rZ_rZ_rM_rM
1656 = { .args_ct_str
= { "r", "r", "rZ", "rZ", "rM", "rM" } };
1659 case INDEX_op_goto_ptr
:
1662 case INDEX_op_ld8u_i32
:
1663 case INDEX_op_ld8s_i32
:
1664 case INDEX_op_ld16u_i32
:
1665 case INDEX_op_ld16s_i32
:
1666 case INDEX_op_ld_i32
:
1667 case INDEX_op_not_i32
:
1668 case INDEX_op_neg_i32
:
1669 case INDEX_op_ld8u_i64
:
1670 case INDEX_op_ld8s_i64
:
1671 case INDEX_op_ld16u_i64
:
1672 case INDEX_op_ld16s_i64
:
1673 case INDEX_op_ld32s_i64
:
1674 case INDEX_op_ld32u_i64
:
1675 case INDEX_op_ld_i64
:
1676 case INDEX_op_not_i64
:
1677 case INDEX_op_neg_i64
:
1678 case INDEX_op_ext8u_i32
:
1679 case INDEX_op_ext8u_i64
:
1680 case INDEX_op_ext16u_i32
:
1681 case INDEX_op_ext16u_i64
:
1682 case INDEX_op_ext32u_i64
:
1683 case INDEX_op_extu_i32_i64
:
1684 case INDEX_op_ext8s_i32
:
1685 case INDEX_op_ext8s_i64
:
1686 case INDEX_op_ext16s_i32
:
1687 case INDEX_op_ext16s_i64
:
1688 case INDEX_op_ext32s_i64
:
1689 case INDEX_op_extrl_i64_i32
:
1690 case INDEX_op_extrh_i64_i32
:
1691 case INDEX_op_ext_i32_i64
:
1694 case INDEX_op_st8_i32
:
1695 case INDEX_op_st16_i32
:
1696 case INDEX_op_st_i32
:
1697 case INDEX_op_st8_i64
:
1698 case INDEX_op_st16_i64
:
1699 case INDEX_op_st32_i64
:
1700 case INDEX_op_st_i64
:
1703 case INDEX_op_add_i32
:
1704 case INDEX_op_and_i32
:
1705 case INDEX_op_or_i32
:
1706 case INDEX_op_xor_i32
:
1707 case INDEX_op_add_i64
:
1708 case INDEX_op_and_i64
:
1709 case INDEX_op_or_i64
:
1710 case INDEX_op_xor_i64
:
1713 case INDEX_op_sub_i32
:
1714 case INDEX_op_sub_i64
:
1717 case INDEX_op_mul_i32
:
1718 case INDEX_op_mulsh_i32
:
1719 case INDEX_op_muluh_i32
:
1720 case INDEX_op_div_i32
:
1721 case INDEX_op_divu_i32
:
1722 case INDEX_op_rem_i32
:
1723 case INDEX_op_remu_i32
:
1724 case INDEX_op_setcond_i32
:
1725 case INDEX_op_mul_i64
:
1726 case INDEX_op_mulsh_i64
:
1727 case INDEX_op_muluh_i64
:
1728 case INDEX_op_div_i64
:
1729 case INDEX_op_divu_i64
:
1730 case INDEX_op_rem_i64
:
1731 case INDEX_op_remu_i64
:
1732 case INDEX_op_setcond_i64
:
1735 case INDEX_op_shl_i32
:
1736 case INDEX_op_shr_i32
:
1737 case INDEX_op_sar_i32
:
1738 case INDEX_op_shl_i64
:
1739 case INDEX_op_shr_i64
:
1740 case INDEX_op_sar_i64
:
1743 case INDEX_op_brcond_i32
:
1744 case INDEX_op_brcond_i64
:
1747 case INDEX_op_add2_i32
:
1748 case INDEX_op_add2_i64
:
1749 case INDEX_op_sub2_i32
:
1750 case INDEX_op_sub2_i64
:
1751 return &r_r_rZ_rZ_rM_rM
;
1753 case INDEX_op_brcond2_i32
:
1754 return &rZ_rZ_rZ_rZ
;
1756 case INDEX_op_setcond2_i32
:
1757 return &r_rZ_rZ_rZ_rZ
;
1759 case INDEX_op_qemu_ld_i32
:
1760 return TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &r_L
: &r_L_L
;
1761 case INDEX_op_qemu_st_i32
:
1762 return TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &LZ_L
: &LZ_L_L
;
1763 case INDEX_op_qemu_ld_i64
:
1764 return TCG_TARGET_REG_BITS
== 64 ? &r_L
1765 : TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &r_r_L
1767 case INDEX_op_qemu_st_i64
:
1768 return TCG_TARGET_REG_BITS
== 64 ? &LZ_L
1769 : TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &LZ_LZ_L
1777 static const int tcg_target_callee_save_regs
[] = {
1778 TCG_REG_S0
, /* used for the global env (TCG_AREG0) */
1790 TCG_REG_RA
, /* should be last for ABI compliance */
1793 /* Stack frame parameters. */
1794 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
1795 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
1796 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
1797 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
1798 + TCG_TARGET_STACK_ALIGN - 1) \
1799 & -TCG_TARGET_STACK_ALIGN)
1800 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
1802 /* We're expecting to be able to use an immediate for frame allocation. */
1803 QEMU_BUILD_BUG_ON(FRAME_SIZE
> 0x7ff);
1805 /* Generate global QEMU prologue and epilogue code */
1806 static void tcg_target_qemu_prologue(TCGContext
*s
)
1810 tcg_set_frame(s
, TCG_REG_SP
, TCG_STATIC_CALL_ARGS_SIZE
, TEMP_SIZE
);
1813 tcg_out_opc_imm(s
, OPC_ADDI
, TCG_REG_SP
, TCG_REG_SP
, -FRAME_SIZE
);
1814 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1815 tcg_out_st(s
, TCG_TYPE_REG
, tcg_target_callee_save_regs
[i
],
1816 TCG_REG_SP
, SAVE_OFS
+ i
* REG_SIZE
);
1819 #if !defined(CONFIG_SOFTMMU)
1820 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, guest_base
);
1821 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
1824 /* Call generated code */
1825 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
1826 tcg_out_opc_imm(s
, OPC_JALR
, TCG_REG_ZERO
, tcg_target_call_iarg_regs
[1], 0);
1828 /* Return path for goto_ptr. Set return value to 0 */
1829 s
->code_gen_epilogue
= s
->code_ptr
;
1830 tcg_out_mov(s
, TCG_TYPE_REG
, TCG_REG_A0
, TCG_REG_ZERO
);
1833 tb_ret_addr
= s
->code_ptr
;
1834 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
1835 tcg_out_ld(s
, TCG_TYPE_REG
, tcg_target_callee_save_regs
[i
],
1836 TCG_REG_SP
, SAVE_OFS
+ i
* REG_SIZE
);
1839 tcg_out_opc_imm(s
, OPC_ADDI
, TCG_REG_SP
, TCG_REG_SP
, FRAME_SIZE
);
1840 tcg_out_opc_imm(s
, OPC_JALR
, TCG_REG_ZERO
, TCG_REG_RA
, 0);
1843 static void tcg_target_init(TCGContext
*s
)
1845 tcg_target_available_regs
[TCG_TYPE_I32
] = 0xffffffff;
1846 if (TCG_TARGET_REG_BITS
== 64) {
1847 tcg_target_available_regs
[TCG_TYPE_I64
] = 0xffffffff;
1850 tcg_target_call_clobber_regs
= -1u;
1851 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S0
);
1852 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S1
);
1853 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S2
);
1854 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S3
);
1855 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S4
);
1856 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S5
);
1857 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S6
);
1858 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S7
);
1859 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S8
);
1860 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S9
);
1861 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S10
);
1862 tcg_regset_reset_reg(tcg_target_call_clobber_regs
, TCG_REG_S11
);
1864 s
->reserved_regs
= 0;
1865 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_ZERO
);
1866 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TMP0
);
1867 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TMP1
);
1868 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TMP2
);
1869 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_SP
);
1870 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_GP
);
1871 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TP
);
1876 uint8_t fde_def_cfa
[4];
1877 uint8_t fde_reg_ofs
[ARRAY_SIZE(tcg_target_callee_save_regs
) * 2];
1880 #define ELF_HOST_MACHINE EM_RISCV
1882 static const DebugFrame debug_frame
= {
1883 .h
.cie
.len
= sizeof(DebugFrameCIE
) - 4, /* length after .len member */
1886 .h
.cie
.code_align
= 1,
1887 .h
.cie
.data_align
= -(TCG_TARGET_REG_BITS
/ 8) & 0x7f, /* sleb128 */
1888 .h
.cie
.return_column
= TCG_REG_RA
,
1890 /* Total FDE size does not include the "len" member. */
1891 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
1894 12, TCG_REG_SP
, /* DW_CFA_def_cfa sp, ... */
1895 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
1899 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */
1900 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */
1901 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */
1902 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */
1903 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */
1904 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */
1905 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */
1906 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */
1907 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */
1908 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */
1909 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */
1910 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
1914 void tcg_register_jit(void *buf
, size_t buf_size
)
1916 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));