2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
6 * Based on tcg/riscv/tcg-target.c.inc
8 * Copyright (c) 2018 SiFive, Inc
9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
11 * Copyright (c) 2008 Fabrice Bellard
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to deal
15 * in the Software without restriction, including without limitation the rights
16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 * copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
32 #include "../tcg-ldst.c.inc"
34 #ifdef CONFIG_DEBUG_TCG
35 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
57 "r21", /* reserved in the LP64* ABI, hence no ABI name */
71 static const int tcg_target_reg_alloc_order[] = {
72 /* Registers preserved across calls */
73 /* TCG_REG_S0 reserved for TCG_AREG0 */
84 /* Registers (potentially) clobbered across calls */
95 /* Argument registers, opposite order of allocation. */
106 static const int tcg_target_call_iarg_regs[] = {
117 static const int tcg_target_call_oarg_regs[] = {
122 #ifndef CONFIG_SOFTMMU
123 #define USE_GUEST_BASE (guest_base != 0)
124 #define TCG_GUEST_BASE_REG TCG_REG_S1
127 #define TCG_CT_CONST_ZERO 0x100
128 #define TCG_CT_CONST_S12 0x200
129 #define TCG_CT_CONST_N12 0x400
130 #define TCG_CT_CONST_U12 0x800
131 #define TCG_CT_CONST_C12 0x1000
132 #define TCG_CT_CONST_WSZ 0x2000
134 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
136 * For softmmu, we need to avoid conflicts with the first 5
137 * argument registers to call the helper. Some of these are
138 * also used for the tlb lookup.
140 #ifdef CONFIG_SOFTMMU
141 #define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5)
143 #define SOFTMMU_RESERVE_REGS 0
147 static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
149 return sextract64(val, pos, len);
152 /* test if a constant matches the constraint */
153 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
155 if (ct & TCG_CT_CONST) {
158 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
161 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
164 if ((ct & TCG_CT_CONST_N12) && -val == sextreg(-val, 0, 12)) {
167 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
170 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
173 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
184 * Relocation records defined in LoongArch ELF psABI v1.00 is way too
185 * complicated; a whopping stack machine is needed to stuff the fields, at
186 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
189 * Hence, define our own simpler relocation types. Numbers are chosen as to
190 * not collide with potential future additions to the true ELF relocation
194 /* Field Sk16, shifted right by 2; suitable for conditional jumps */
195 #define R_LOONGARCH_BR_SK16 256
196 /* Field Sd10k16, shifted right by 2; suitable for B and BL */
197 #define R_LOONGARCH_BR_SD10K16 257
199 static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
201 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
202 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
204 tcg_debug_assert((offset & 3) == 0);
206 if (offset == sextreg(offset, 0, 16)) {
207 *src_rw = deposit64(*src_rw, 10, 16, offset);
214 static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
215 const tcg_insn_unit *target)
217 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
218 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
220 tcg_debug_assert((offset & 3) == 0);
222 if (offset == sextreg(offset, 0, 26)) {
223 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
224 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
231 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
232 intptr_t value, intptr_t addend)
234 tcg_debug_assert(addend == 0);
236 case R_LOONGARCH_BR_SK16:
237 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
238 case R_LOONGARCH_BR_SD10K16:
239 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
241 g_assert_not_reached();
245 #include "tcg-insn-defs.c.inc"
251 static void tcg_out_mb(TCGContext *s, TCGArg a0)
253 /* Baseline LoongArch only has the full barrier, unfortunately. */
254 tcg_out_opc_dbar(s, 0);
257 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
266 * Conventional register-register move used in LoongArch is
267 * `or dst, src, zero`.
269 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
272 g_assert_not_reached();
277 static bool imm_part_needs_loading(bool high_bits_are_ones,
278 tcg_target_long part)
280 if (high_bits_are_ones) {
287 /* Loads a 32-bit immediate into rd, sign-extended. */
288 static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
290 tcg_target_long lo = sextreg(val, 0, 12);
291 tcg_target_long hi12 = sextreg(val, 12, 20);
293 /* Single-instruction cases. */
295 /* val fits in simm12: addi.w rd, zero, val */
296 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
299 if (0x800 <= val && val <= 0xfff) {
300 /* val fits in uimm12: ori rd, zero, val */
301 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
305 /* High bits must be set; load with lu12i.w + optional ori. */
306 tcg_out_opc_lu12i_w(s, rd, hi12);
308 tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
312 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
316 * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
317 * with dedicated instructions for filling the respective bitfields
321 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
322 * +-----------------------+---------------------------------------+...
324 * +-----------------------+---------------------------------------+...
326 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
327 * ...+-------------------------------------+-------------------------+
329 * ...+-------------------------------------+-------------------------+
331 * Check if val belong to one of the several fast cases, before falling
332 * back to the slow path.
336 tcg_target_long val_lo, val_hi, pc_hi, offset_hi;
337 tcg_target_long hi32, hi52;
338 bool rd_high_bits_are_ones;
340 /* Value fits in signed i32. */
341 if (type == TCG_TYPE_I32 || val == (int32_t)val) {
342 tcg_out_movi_i32(s, rd, val);
346 /* PC-relative cases. */
347 pc_offset = tcg_pcrel_diff(s, (void *)val);
348 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) {
349 /* Single pcaddu2i. */
350 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
354 if (pc_offset == (int32_t)pc_offset) {
355 /* Offset within 32 bits; load with pcalau12i + ori. */
356 val_lo = sextreg(val, 0, 12);
358 pc_hi = (val - pc_offset) >> 12;
359 offset_hi = val_hi - pc_hi;
361 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20));
362 tcg_out_opc_pcalau12i(s, rd, offset_hi);
364 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff);
369 hi32 = sextreg(val, 32, 20);
370 hi52 = sextreg(val, 52, 12);
372 /* Single cu52i.d case. */
373 if (ctz64(val) >= 52) {
374 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
378 /* Slow path. Initialize the low 32 bits, then concat high bits. */
379 tcg_out_movi_i32(s, rd, val);
380 rd_high_bits_are_ones = (int32_t)val < 0;
382 if (imm_part_needs_loading(rd_high_bits_are_ones, hi32)) {
383 tcg_out_opc_cu32i_d(s, rd, hi32);
384 rd_high_bits_are_ones = hi32 < 0;
387 if (imm_part_needs_loading(rd_high_bits_are_ones, hi52)) {
388 tcg_out_opc_cu52i_d(s, rd, rd, hi52);
392 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
394 tcg_out_opc_andi(s, ret, arg, 0xff);
397 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
399 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
402 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
404 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
407 static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
409 tcg_out_opc_sext_b(s, ret, arg);
412 static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
414 tcg_out_opc_sext_h(s, ret, arg);
417 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
419 tcg_out_opc_addi_w(s, ret, arg, 0);
422 static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
423 TCGReg a0, TCGReg a1, TCGReg a2,
424 bool c2, bool is_32bit)
428 * Fast path: semantics already satisfied due to constraint and
429 * insn behavior, single instruction is enough.
431 tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
432 /* all clz/ctz insns belong to DJ-format */
433 tcg_out32(s, encode_dj_insn(opc, a0, a1));
437 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
438 /* a0 = a1 ? REG_TMP0 : a2 */
439 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
440 tcg_out_opc_masknez(s, a0, a2, a1);
441 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
444 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
445 TCGReg arg1, TCGReg arg2, bool c2)
450 tcg_debug_assert(arg2 == 0);
458 tcg_out_opc_sub_d(s, ret, arg1, arg2);
461 tcg_out_opc_sltui(s, ret, tmp, 1);
467 tcg_out_opc_sub_d(s, ret, arg1, arg2);
470 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
473 tcg_out_opc_slt(s, ret, arg1, arg2);
476 tcg_out_opc_slt(s, ret, arg1, arg2);
477 tcg_out_opc_xori(s, ret, ret, 1);
480 tcg_out_setcond(s, TCG_COND_GE, ret, arg2, arg1, false);
483 tcg_out_setcond(s, TCG_COND_LT, ret, arg2, arg1, false);
486 tcg_out_opc_sltu(s, ret, arg1, arg2);
489 tcg_out_opc_sltu(s, ret, arg1, arg2);
490 tcg_out_opc_xori(s, ret, ret, 1);
493 tcg_out_setcond(s, TCG_COND_GEU, ret, arg2, arg1, false);
496 tcg_out_setcond(s, TCG_COND_LTU, ret, arg2, arg1, false);
499 g_assert_not_reached();
508 static const struct {
511 } tcg_brcond_to_loongarch[] = {
512 [TCG_COND_EQ] = { OPC_BEQ, false },
513 [TCG_COND_NE] = { OPC_BNE, false },
514 [TCG_COND_LT] = { OPC_BGT, true },
515 [TCG_COND_GE] = { OPC_BLE, true },
516 [TCG_COND_LE] = { OPC_BLE, false },
517 [TCG_COND_GT] = { OPC_BGT, false },
518 [TCG_COND_LTU] = { OPC_BGTU, true },
519 [TCG_COND_GEU] = { OPC_BLEU, true },
520 [TCG_COND_LEU] = { OPC_BLEU, false },
521 [TCG_COND_GTU] = { OPC_BGTU, false }
524 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
525 TCGReg arg2, TCGLabel *l)
527 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
529 tcg_debug_assert(op != 0);
531 if (tcg_brcond_to_loongarch[cond].swap) {
537 /* all conditional branch insns belong to DJSk16-format */
538 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
539 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
542 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
544 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
545 ptrdiff_t offset = tcg_pcrel_diff(s, arg);
547 tcg_debug_assert((offset & 3) == 0);
548 if (offset == sextreg(offset, 0, 28)) {
549 /* short jump: +/- 256MiB */
551 tcg_out_opc_b(s, offset >> 2);
553 tcg_out_opc_bl(s, offset >> 2);
555 } else if (offset == sextreg(offset, 0, 38)) {
556 /* long jump: +/- 256GiB */
557 tcg_target_long lo = sextreg(offset, 0, 18);
558 tcg_target_long hi = offset - lo;
559 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
560 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
562 /* far jump: 64-bit */
563 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
564 tcg_target_long hi = (tcg_target_long)arg - lo;
565 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
566 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
570 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
572 tcg_out_call_int(s, arg, false);
579 static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
580 TCGReg addr, intptr_t offset)
582 intptr_t imm12 = sextreg(offset, 0, 12);
584 if (offset != imm12) {
585 intptr_t diff = offset - (uintptr_t)s->code_ptr;
587 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
588 imm12 = sextreg(diff, 0, 12);
589 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
591 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
592 if (addr != TCG_REG_ZERO) {
593 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
611 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
614 g_assert_not_reached();
618 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
619 TCGReg arg1, intptr_t arg2)
621 bool is_32bit = type == TCG_TYPE_I32;
622 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2);
625 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
626 TCGReg arg1, intptr_t arg2)
628 bool is_32bit = type == TCG_TYPE_I32;
629 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2);
632 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
633 TCGReg base, intptr_t ofs)
636 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
643 * Load/store helpers for SoftMMU, and qemu_ld/st implementations
646 #if defined(CONFIG_SOFTMMU)
648 * helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
649 * MemOpIdx oi, uintptr_t ra)
651 static void * const qemu_ld_helpers[4] = {
652 [MO_8] = helper_ret_ldub_mmu,
653 [MO_16] = helper_le_lduw_mmu,
654 [MO_32] = helper_le_ldul_mmu,
655 [MO_64] = helper_le_ldq_mmu,
659 * helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
660 * uintxx_t val, MemOpIdx oi,
663 static void * const qemu_st_helpers[4] = {
664 [MO_8] = helper_ret_stb_mmu,
665 [MO_16] = helper_le_stw_mmu,
666 [MO_32] = helper_le_stl_mmu,
667 [MO_64] = helper_le_stq_mmu,
670 /* We expect to use a 12-bit negative offset from ENV. */
671 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
672 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
674 static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
677 return reloc_br_sd10k16(s->code_ptr - 1, target);
681 * Emits common code for TLB addend lookup, that eventually loads the
682 * addend in TCG_REG_TMP2.
684 static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, MemOpIdx oi,
685 tcg_insn_unit **label_ptr, bool is_load)
687 MemOp opc = get_memop(oi);
688 unsigned s_bits = opc & MO_SIZE;
689 unsigned a_bits = get_alignment_bits(opc);
690 tcg_target_long compare_mask;
691 int mem_index = get_mmuidx(oi);
692 int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
693 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
694 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
696 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
697 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
699 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addrl,
700 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
701 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
702 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
704 /* Load the tlb comparator and the addend. */
705 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
706 is_load ? offsetof(CPUTLBEntry, addr_read)
707 : offsetof(CPUTLBEntry, addr_write));
708 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
709 offsetof(CPUTLBEntry, addend));
711 /* We don't support unaligned accesses. */
712 if (a_bits < s_bits) {
715 /* Clear the non-page, non-alignment bits from the address. */
716 compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
717 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
718 tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addrl);
720 /* Compare masked address with the TLB entry. */
721 label_ptr[0] = s->code_ptr;
722 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
724 /* TLB Hit - addend in TCG_REG_TMP2, ready for use. */
727 static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
729 TCGReg datalo, TCGReg addrlo,
730 void *raddr, tcg_insn_unit **label_ptr)
732 TCGLabelQemuLdst *label = new_ldst_label(s);
734 label->is_ld = is_ld;
737 label->datalo_reg = datalo;
738 label->datahi_reg = 0; /* unused */
739 label->addrlo_reg = addrlo;
740 label->addrhi_reg = 0; /* unused */
741 label->raddr = tcg_splitwx_to_rx(raddr);
742 label->label_ptr[0] = label_ptr[0];
745 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
748 MemOp opc = get_memop(oi);
749 MemOp size = opc & MO_SIZE;
750 TCGType type = l->type;
752 /* resolve label address */
753 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
757 /* call load helper */
758 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
759 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg);
760 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A2, oi);
761 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, (tcg_target_long)l->raddr);
763 tcg_out_call(s, qemu_ld_helpers[size]);
765 switch (opc & MO_SSIZE) {
767 tcg_out_ext8s(s, l->datalo_reg, TCG_REG_A0);
770 tcg_out_ext16s(s, l->datalo_reg, TCG_REG_A0);
773 tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0);
776 if (type == TCG_TYPE_I32) {
777 /* MO_UL loads of i32 should be sign-extended too */
778 tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0);
783 tcg_out_mov(s, type, l->datalo_reg, TCG_REG_A0);
787 return tcg_out_goto(s, l->raddr);
790 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
793 MemOp opc = get_memop(oi);
794 MemOp size = opc & MO_SIZE;
796 /* resolve label address */
797 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
801 /* call store helper */
802 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
803 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg);
806 tcg_out_ext8u(s, TCG_REG_A2, l->datalo_reg);
809 tcg_out_ext16u(s, TCG_REG_A2, l->datalo_reg);
812 tcg_out_ext32u(s, TCG_REG_A2, l->datalo_reg);
815 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_A2, l->datalo_reg);
818 g_assert_not_reached();
821 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, oi);
822 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A4, (tcg_target_long)l->raddr);
824 tcg_out_call(s, qemu_st_helpers[size]);
826 return tcg_out_goto(s, l->raddr);
831 * Alignment helpers for user-mode emulation
834 static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
837 TCGLabelQemuLdst *l = new_ldst_label(s);
840 l->addrlo_reg = addr_reg;
843 * Without micro-architecture details, we don't know which of bstrpick or
844 * andi is faster, so use bstrpick as it's not constrained by imm field
845 * width. (Not to say alignments >= 2^12 are going to happen any time
848 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
850 l->label_ptr[0] = s->code_ptr;
851 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
853 l->raddr = tcg_splitwx_to_rx(s->code_ptr);
856 static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
858 /* resolve label address */
859 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
863 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
864 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
866 /* tail call, with the return address back inline. */
867 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
868 tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
869 : helper_unaligned_st), true);
873 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
875 return tcg_out_fail_alignment(s, l);
878 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
880 return tcg_out_fail_alignment(s, l);
883 #endif /* CONFIG_SOFTMMU */
886 * `ext32u` the address register into the temp register given,
887 * if target is 32-bit, no-op otherwise.
889 * Returns the address register ready for use with TLB addend.
891 static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s,
892 TCGReg addr, TCGReg tmp)
894 if (TARGET_LONG_BITS == 32) {
895 tcg_out_ext32u(s, tmp, addr);
901 static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
902 TCGReg rk, MemOp opc, TCGType type)
904 /* Byte swapping is left to middle-end expansion. */
905 tcg_debug_assert((opc & MO_BSWAP) == 0);
907 switch (opc & MO_SSIZE) {
909 tcg_out_opc_ldx_bu(s, rd, rj, rk);
912 tcg_out_opc_ldx_b(s, rd, rj, rk);
915 tcg_out_opc_ldx_hu(s, rd, rj, rk);
918 tcg_out_opc_ldx_h(s, rd, rj, rk);
921 if (type == TCG_TYPE_I64) {
922 tcg_out_opc_ldx_wu(s, rd, rj, rk);
927 tcg_out_opc_ldx_w(s, rd, rj, rk);
930 tcg_out_opc_ldx_d(s, rd, rj, rk);
933 g_assert_not_reached();
937 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
943 #if defined(CONFIG_SOFTMMU)
944 tcg_insn_unit *label_ptr[1];
955 #if defined(CONFIG_SOFTMMU)
956 tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1);
957 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
958 tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type);
959 add_qemu_ldst_label(s, 1, oi, type,
960 data_regl, addr_regl,
961 s->code_ptr, label_ptr);
963 a_bits = get_alignment_bits(opc);
965 tcg_out_test_alignment(s, true, addr_regl, a_bits);
967 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
968 TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
969 tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type);
973 static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data,
974 TCGReg rj, TCGReg rk, MemOp opc)
976 /* Byte swapping is left to middle-end expansion. */
977 tcg_debug_assert((opc & MO_BSWAP) == 0);
979 switch (opc & MO_SIZE) {
981 tcg_out_opc_stx_b(s, data, rj, rk);
984 tcg_out_opc_stx_h(s, data, rj, rk);
987 tcg_out_opc_stx_w(s, data, rj, rk);
990 tcg_out_opc_stx_d(s, data, rj, rk);
993 g_assert_not_reached();
997 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
1003 #if defined(CONFIG_SOFTMMU)
1004 tcg_insn_unit *label_ptr[1];
1010 data_regl = *args++;
1011 addr_regl = *args++;
1013 opc = get_memop(oi);
1015 #if defined(CONFIG_SOFTMMU)
1016 tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0);
1017 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1018 tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc);
1019 add_qemu_ldst_label(s, 0, oi,
1020 0, /* type param is unused for stores */
1021 data_regl, addr_regl,
1022 s->code_ptr, label_ptr);
1024 a_bits = get_alignment_bits(opc);
1026 tcg_out_test_alignment(s, false, addr_regl, a_bits);
1028 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1029 TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
1030 tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc);
1034 /* LoongArch uses `andi zero, zero, 0` as NOP. */
1035 #define NOP OPC_ANDI
1036 static void tcg_out_nop(TCGContext *s)
1041 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1042 uintptr_t jmp_rw, uintptr_t addr)
1044 tcg_insn_unit i1, i2;
1045 ptrdiff_t upper, lower;
1046 ptrdiff_t offset = (ptrdiff_t)(addr - jmp_rx) >> 2;
1048 if (offset == sextreg(offset, 0, 26)) {
1049 i1 = encode_sd10k16_insn(OPC_B, offset);
1052 tcg_debug_assert(offset == sextreg(offset, 0, 36));
1053 lower = (int16_t)offset;
1054 upper = (offset - lower) >> 16;
1056 i1 = encode_dsj20_insn(OPC_PCADDU18I, TCG_REG_TMP0, upper);
1057 i2 = encode_djsk16_insn(OPC_JIRL, TCG_REG_ZERO, TCG_REG_TMP0, lower);
1059 uint64_t pair = ((uint64_t)i2 << 32) | i1;
1060 qatomic_set((uint64_t *)jmp_rw, pair);
1061 flush_idcache_range(jmp_rx, jmp_rw, 8);
1068 static const tcg_insn_unit *tb_ret_addr;
1070 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1071 const TCGArg args[TCG_MAX_OP_ARGS],
1072 const int const_args[TCG_MAX_OP_ARGS])
1074 TCGArg a0 = args[0];
1075 TCGArg a1 = args[1];
1076 TCGArg a2 = args[2];
1077 int c2 = const_args[2];
1080 case INDEX_op_exit_tb:
1081 /* Reuse the zeroing that exists for goto_ptr. */
1083 tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1085 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1086 tcg_out_call_int(s, tb_ret_addr, true);
1090 case INDEX_op_goto_tb:
1091 tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
1093 * Ensure that patch area is 8-byte aligned so that an
1094 * atomic write can be used to patch the target address.
1096 if ((uintptr_t)s->code_ptr & 7) {
1099 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1101 * actual branch destination will be patched by
1102 * tb_target_set_jmp_target later
1104 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0);
1105 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1106 set_jmp_reset_offset(s, a0);
1113 case INDEX_op_goto_ptr:
1114 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
1118 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
1120 tcg_out_opc_b(s, 0);
1123 case INDEX_op_brcond_i32:
1124 case INDEX_op_brcond_i64:
1125 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1128 case INDEX_op_ext8s_i32:
1129 case INDEX_op_ext8s_i64:
1130 tcg_out_ext8s(s, a0, a1);
1133 case INDEX_op_ext8u_i32:
1134 case INDEX_op_ext8u_i64:
1135 tcg_out_ext8u(s, a0, a1);
1138 case INDEX_op_ext16s_i32:
1139 case INDEX_op_ext16s_i64:
1140 tcg_out_ext16s(s, a0, a1);
1143 case INDEX_op_ext16u_i32:
1144 case INDEX_op_ext16u_i64:
1145 tcg_out_ext16u(s, a0, a1);
1148 case INDEX_op_ext32u_i64:
1149 case INDEX_op_extu_i32_i64:
1150 tcg_out_ext32u(s, a0, a1);
1153 case INDEX_op_ext32s_i64:
1154 case INDEX_op_extrl_i64_i32:
1155 case INDEX_op_ext_i32_i64:
1156 tcg_out_ext32s(s, a0, a1);
1159 case INDEX_op_extrh_i64_i32:
1160 tcg_out_opc_srai_d(s, a0, a1, 32);
1163 case INDEX_op_not_i32:
1164 case INDEX_op_not_i64:
1165 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
1168 case INDEX_op_nor_i32:
1169 case INDEX_op_nor_i64:
1171 tcg_out_opc_ori(s, a0, a1, a2);
1172 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
1174 tcg_out_opc_nor(s, a0, a1, a2);
1178 case INDEX_op_andc_i32:
1179 case INDEX_op_andc_i64:
1181 /* guaranteed to fit due to constraint */
1182 tcg_out_opc_andi(s, a0, a1, ~a2);
1184 tcg_out_opc_andn(s, a0, a1, a2);
1188 case INDEX_op_orc_i32:
1189 case INDEX_op_orc_i64:
1191 /* guaranteed to fit due to constraint */
1192 tcg_out_opc_ori(s, a0, a1, ~a2);
1194 tcg_out_opc_orn(s, a0, a1, a2);
1198 case INDEX_op_and_i32:
1199 case INDEX_op_and_i64:
1201 tcg_out_opc_andi(s, a0, a1, a2);
1203 tcg_out_opc_and(s, a0, a1, a2);
1207 case INDEX_op_or_i32:
1208 case INDEX_op_or_i64:
1210 tcg_out_opc_ori(s, a0, a1, a2);
1212 tcg_out_opc_or(s, a0, a1, a2);
1216 case INDEX_op_xor_i32:
1217 case INDEX_op_xor_i64:
1219 tcg_out_opc_xori(s, a0, a1, a2);
1221 tcg_out_opc_xor(s, a0, a1, a2);
1225 case INDEX_op_extract_i32:
1226 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
1228 case INDEX_op_extract_i64:
1229 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
1232 case INDEX_op_deposit_i32:
1233 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
1235 case INDEX_op_deposit_i64:
1236 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
1239 case INDEX_op_bswap16_i32:
1240 case INDEX_op_bswap16_i64:
1241 tcg_out_opc_revb_2h(s, a0, a1);
1242 if (a2 & TCG_BSWAP_OS) {
1243 tcg_out_ext16s(s, a0, a0);
1244 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1245 tcg_out_ext16u(s, a0, a0);
1249 case INDEX_op_bswap32_i32:
1250 /* All 32-bit values are computed sign-extended in the register. */
1253 case INDEX_op_bswap32_i64:
1254 tcg_out_opc_revb_2w(s, a0, a1);
1255 if (a2 & TCG_BSWAP_OS) {
1256 tcg_out_ext32s(s, a0, a0);
1257 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1258 tcg_out_ext32u(s, a0, a0);
1262 case INDEX_op_bswap64_i64:
1263 tcg_out_opc_revb_d(s, a0, a1);
1266 case INDEX_op_clz_i32:
1267 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
1269 case INDEX_op_clz_i64:
1270 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
1273 case INDEX_op_ctz_i32:
1274 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
1276 case INDEX_op_ctz_i64:
1277 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
1280 case INDEX_op_shl_i32:
1282 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
1284 tcg_out_opc_sll_w(s, a0, a1, a2);
1287 case INDEX_op_shl_i64:
1289 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
1291 tcg_out_opc_sll_d(s, a0, a1, a2);
1295 case INDEX_op_shr_i32:
1297 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
1299 tcg_out_opc_srl_w(s, a0, a1, a2);
1302 case INDEX_op_shr_i64:
1304 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
1306 tcg_out_opc_srl_d(s, a0, a1, a2);
1310 case INDEX_op_sar_i32:
1312 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
1314 tcg_out_opc_sra_w(s, a0, a1, a2);
1317 case INDEX_op_sar_i64:
1319 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
1321 tcg_out_opc_sra_d(s, a0, a1, a2);
1325 case INDEX_op_rotl_i32:
1326 /* transform into equivalent rotr/rotri */
1328 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
1330 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1331 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
1334 case INDEX_op_rotl_i64:
1335 /* transform into equivalent rotr/rotri */
1337 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
1339 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1340 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
1344 case INDEX_op_rotr_i32:
1346 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
1348 tcg_out_opc_rotr_w(s, a0, a1, a2);
1351 case INDEX_op_rotr_i64:
1353 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
1355 tcg_out_opc_rotr_d(s, a0, a1, a2);
1359 case INDEX_op_add_i32:
1361 tcg_out_opc_addi_w(s, a0, a1, a2);
1363 tcg_out_opc_add_w(s, a0, a1, a2);
1366 case INDEX_op_add_i64:
1368 tcg_out_opc_addi_d(s, a0, a1, a2);
1370 tcg_out_opc_add_d(s, a0, a1, a2);
1374 case INDEX_op_sub_i32:
1376 tcg_out_opc_addi_w(s, a0, a1, -a2);
1378 tcg_out_opc_sub_w(s, a0, a1, a2);
1381 case INDEX_op_sub_i64:
1383 tcg_out_opc_addi_d(s, a0, a1, -a2);
1385 tcg_out_opc_sub_d(s, a0, a1, a2);
1389 case INDEX_op_mul_i32:
1390 tcg_out_opc_mul_w(s, a0, a1, a2);
1392 case INDEX_op_mul_i64:
1393 tcg_out_opc_mul_d(s, a0, a1, a2);
1396 case INDEX_op_mulsh_i32:
1397 tcg_out_opc_mulh_w(s, a0, a1, a2);
1399 case INDEX_op_mulsh_i64:
1400 tcg_out_opc_mulh_d(s, a0, a1, a2);
1403 case INDEX_op_muluh_i32:
1404 tcg_out_opc_mulh_wu(s, a0, a1, a2);
1406 case INDEX_op_muluh_i64:
1407 tcg_out_opc_mulh_du(s, a0, a1, a2);
1410 case INDEX_op_div_i32:
1411 tcg_out_opc_div_w(s, a0, a1, a2);
1413 case INDEX_op_div_i64:
1414 tcg_out_opc_div_d(s, a0, a1, a2);
1417 case INDEX_op_divu_i32:
1418 tcg_out_opc_div_wu(s, a0, a1, a2);
1420 case INDEX_op_divu_i64:
1421 tcg_out_opc_div_du(s, a0, a1, a2);
1424 case INDEX_op_rem_i32:
1425 tcg_out_opc_mod_w(s, a0, a1, a2);
1427 case INDEX_op_rem_i64:
1428 tcg_out_opc_mod_d(s, a0, a1, a2);
1431 case INDEX_op_remu_i32:
1432 tcg_out_opc_mod_wu(s, a0, a1, a2);
1434 case INDEX_op_remu_i64:
1435 tcg_out_opc_mod_du(s, a0, a1, a2);
1438 case INDEX_op_setcond_i32:
1439 case INDEX_op_setcond_i64:
1440 tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1443 case INDEX_op_ld8s_i32:
1444 case INDEX_op_ld8s_i64:
1445 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
1447 case INDEX_op_ld8u_i32:
1448 case INDEX_op_ld8u_i64:
1449 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
1451 case INDEX_op_ld16s_i32:
1452 case INDEX_op_ld16s_i64:
1453 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
1455 case INDEX_op_ld16u_i32:
1456 case INDEX_op_ld16u_i64:
1457 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
1459 case INDEX_op_ld_i32:
1460 case INDEX_op_ld32s_i64:
1461 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
1463 case INDEX_op_ld32u_i64:
1464 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
1466 case INDEX_op_ld_i64:
1467 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
1470 case INDEX_op_st8_i32:
1471 case INDEX_op_st8_i64:
1472 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
1474 case INDEX_op_st16_i32:
1475 case INDEX_op_st16_i64:
1476 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
1478 case INDEX_op_st_i32:
1479 case INDEX_op_st32_i64:
1480 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
1482 case INDEX_op_st_i64:
1483 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
1486 case INDEX_op_qemu_ld_i32:
1487 tcg_out_qemu_ld(s, args, TCG_TYPE_I32);
1489 case INDEX_op_qemu_ld_i64:
1490 tcg_out_qemu_ld(s, args, TCG_TYPE_I64);
1492 case INDEX_op_qemu_st_i32:
1493 tcg_out_qemu_st(s, args);
1495 case INDEX_op_qemu_st_i64:
1496 tcg_out_qemu_st(s, args);
1499 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1500 case INDEX_op_mov_i64:
1501 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1503 g_assert_not_reached();
1507 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1510 case INDEX_op_goto_ptr:
1513 case INDEX_op_st8_i32:
1514 case INDEX_op_st8_i64:
1515 case INDEX_op_st16_i32:
1516 case INDEX_op_st16_i64:
1517 case INDEX_op_st32_i64:
1518 case INDEX_op_st_i32:
1519 case INDEX_op_st_i64:
1520 return C_O0_I2(rZ, r);
1522 case INDEX_op_brcond_i32:
1523 case INDEX_op_brcond_i64:
1524 return C_O0_I2(rZ, rZ);
1526 case INDEX_op_qemu_st_i32:
1527 case INDEX_op_qemu_st_i64:
1528 return C_O0_I2(LZ, L);
1530 case INDEX_op_ext8s_i32:
1531 case INDEX_op_ext8s_i64:
1532 case INDEX_op_ext8u_i32:
1533 case INDEX_op_ext8u_i64:
1534 case INDEX_op_ext16s_i32:
1535 case INDEX_op_ext16s_i64:
1536 case INDEX_op_ext16u_i32:
1537 case INDEX_op_ext16u_i64:
1538 case INDEX_op_ext32s_i64:
1539 case INDEX_op_ext32u_i64:
1540 case INDEX_op_extu_i32_i64:
1541 case INDEX_op_extrl_i64_i32:
1542 case INDEX_op_extrh_i64_i32:
1543 case INDEX_op_ext_i32_i64:
1544 case INDEX_op_not_i32:
1545 case INDEX_op_not_i64:
1546 case INDEX_op_extract_i32:
1547 case INDEX_op_extract_i64:
1548 case INDEX_op_bswap16_i32:
1549 case INDEX_op_bswap16_i64:
1550 case INDEX_op_bswap32_i32:
1551 case INDEX_op_bswap32_i64:
1552 case INDEX_op_bswap64_i64:
1553 case INDEX_op_ld8s_i32:
1554 case INDEX_op_ld8s_i64:
1555 case INDEX_op_ld8u_i32:
1556 case INDEX_op_ld8u_i64:
1557 case INDEX_op_ld16s_i32:
1558 case INDEX_op_ld16s_i64:
1559 case INDEX_op_ld16u_i32:
1560 case INDEX_op_ld16u_i64:
1561 case INDEX_op_ld32s_i64:
1562 case INDEX_op_ld32u_i64:
1563 case INDEX_op_ld_i32:
1564 case INDEX_op_ld_i64:
1565 return C_O1_I1(r, r);
1567 case INDEX_op_qemu_ld_i32:
1568 case INDEX_op_qemu_ld_i64:
1569 return C_O1_I1(r, L);
1571 case INDEX_op_andc_i32:
1572 case INDEX_op_andc_i64:
1573 case INDEX_op_orc_i32:
1574 case INDEX_op_orc_i64:
1576 * LoongArch insns for these ops don't have reg-imm forms, but we
1577 * can express using andi/ori if ~constant satisfies
1580 return C_O1_I2(r, r, rC);
1582 case INDEX_op_shl_i32:
1583 case INDEX_op_shl_i64:
1584 case INDEX_op_shr_i32:
1585 case INDEX_op_shr_i64:
1586 case INDEX_op_sar_i32:
1587 case INDEX_op_sar_i64:
1588 case INDEX_op_rotl_i32:
1589 case INDEX_op_rotl_i64:
1590 case INDEX_op_rotr_i32:
1591 case INDEX_op_rotr_i64:
1592 return C_O1_I2(r, r, ri);
1594 case INDEX_op_add_i32:
1595 case INDEX_op_add_i64:
1596 return C_O1_I2(r, r, rI);
1598 case INDEX_op_and_i32:
1599 case INDEX_op_and_i64:
1600 case INDEX_op_nor_i32:
1601 case INDEX_op_nor_i64:
1602 case INDEX_op_or_i32:
1603 case INDEX_op_or_i64:
1604 case INDEX_op_xor_i32:
1605 case INDEX_op_xor_i64:
1606 /* LoongArch reg-imm bitops have their imms ZERO-extended */
1607 return C_O1_I2(r, r, rU);
1609 case INDEX_op_clz_i32:
1610 case INDEX_op_clz_i64:
1611 case INDEX_op_ctz_i32:
1612 case INDEX_op_ctz_i64:
1613 return C_O1_I2(r, r, rW);
1615 case INDEX_op_setcond_i32:
1616 case INDEX_op_setcond_i64:
1617 return C_O1_I2(r, r, rZ);
1619 case INDEX_op_deposit_i32:
1620 case INDEX_op_deposit_i64:
1621 /* Must deposit into the same register as input */
1622 return C_O1_I2(r, 0, rZ);
1624 case INDEX_op_sub_i32:
1625 case INDEX_op_sub_i64:
1626 return C_O1_I2(r, rZ, rN);
1628 case INDEX_op_mul_i32:
1629 case INDEX_op_mul_i64:
1630 case INDEX_op_mulsh_i32:
1631 case INDEX_op_mulsh_i64:
1632 case INDEX_op_muluh_i32:
1633 case INDEX_op_muluh_i64:
1634 case INDEX_op_div_i32:
1635 case INDEX_op_div_i64:
1636 case INDEX_op_divu_i32:
1637 case INDEX_op_divu_i64:
1638 case INDEX_op_rem_i32:
1639 case INDEX_op_rem_i64:
1640 case INDEX_op_remu_i32:
1641 case INDEX_op_remu_i64:
1642 return C_O1_I2(r, rZ, rZ);
1645 g_assert_not_reached();
1649 static const int tcg_target_callee_save_regs[] = {
1650 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
1660 TCG_REG_RA, /* should be last for ABI compliance */
1663 /* Stack frame parameters. */
1664 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
1665 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
1666 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
1667 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
1668 + TCG_TARGET_STACK_ALIGN - 1) \
1669 & -TCG_TARGET_STACK_ALIGN)
1670 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
1672 /* We're expecting to be able to use an immediate for frame allocation. */
1673 QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
1675 /* Generate global QEMU prologue and epilogue code */
1676 static void tcg_target_qemu_prologue(TCGContext *s)
1680 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
1683 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
1684 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1685 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1686 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1689 #if !defined(CONFIG_SOFTMMU)
1690 if (USE_GUEST_BASE) {
1691 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
1692 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1696 /* Call generated code */
1697 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1698 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
1700 /* Return path for goto_ptr. Set return value to 0 */
1701 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1702 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
1705 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
1706 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1707 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1708 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1711 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
1712 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
1715 static void tcg_target_init(TCGContext *s)
1717 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1718 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1720 tcg_target_call_clobber_regs = ALL_GENERAL_REGS;
1721 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
1722 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
1723 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
1724 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
1725 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
1726 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
1727 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
1728 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
1729 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
1730 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
1732 s->reserved_regs = 0;
1733 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
1734 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
1735 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
1736 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
1737 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
1738 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
1739 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
1744 uint8_t fde_def_cfa[4];
1745 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
1748 #define ELF_HOST_MACHINE EM_LOONGARCH
1750 static const DebugFrame debug_frame = {
1751 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
1754 .h.cie.code_align = 1,
1755 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
1756 .h.cie.return_column = TCG_REG_RA,
1758 /* Total FDE size does not include the "len" member. */
1759 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1762 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
1763 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
1767 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */
1768 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */
1769 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */
1770 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */
1771 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */
1772 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */
1773 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */
1774 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */
1775 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */
1776 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */
1777 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
1781 void tcg_register_jit(const void *buf, size_t buf_size)
1783 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));