2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* We only support generating code for 64-bit mode. */
27 #error "unsupported code generation mode"
30 #include "../tcg-ldst.c.inc"
31 #include "../tcg-pool.c.inc"
33 #ifdef CONFIG_DEBUG_TCG
34 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
70 #define TCG_CT_CONST_S11 0x100
71 #define TCG_CT_CONST_S13 0x200
72 #define TCG_CT_CONST_ZERO 0x400
74 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
76 /* Define some temporary registers. T3 is used for constant generation. */
77 #define TCG_REG_T1 TCG_REG_G1
78 #define TCG_REG_T2 TCG_REG_G2
79 #define TCG_REG_T3 TCG_REG_O7
81 #ifndef CONFIG_SOFTMMU
82 # define TCG_GUEST_BASE_REG TCG_REG_I5
85 #define TCG_REG_TB TCG_REG_I1
87 static const int tcg_target_reg_alloc_order[] = {
116 static const int tcg_target_call_iarg_regs[6] = {
125 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
127 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
128 tcg_debug_assert(slot >= 0 && slot <= 3);
129 return TCG_REG_O0 + slot;
132 #define INSN_OP(x) ((x) << 30)
133 #define INSN_OP2(x) ((x) << 22)
134 #define INSN_OP3(x) ((x) << 19)
135 #define INSN_OPF(x) ((x) << 5)
136 #define INSN_RD(x) ((x) << 25)
137 #define INSN_RS1(x) ((x) << 14)
138 #define INSN_RS2(x) (x)
139 #define INSN_ASI(x) ((x) << 5)
141 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
142 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
143 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
144 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
145 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
146 #define INSN_COND(x) ((x) << 25)
164 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
173 #define MOVCC_ICC (1 << 18)
174 #define MOVCC_XCC (1 << 18 | 1 << 12)
177 #define BPCC_XCC (2 << 20)
178 #define BPCC_PT (1 << 19)
180 #define BPCC_A (1 << 29)
182 #define BPR_PT BPCC_PT
184 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
185 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
186 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
187 #define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
188 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
189 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
190 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
191 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
192 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
193 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
194 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
195 #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
196 #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
197 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
198 #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
199 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
200 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
201 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
202 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
203 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
204 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
205 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
207 #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
208 #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
210 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
211 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
212 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
214 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
215 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
216 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
218 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
219 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
220 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
221 #define RETURN (INSN_OP(2) | INSN_OP3(0x39))
222 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
223 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
224 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
225 #define CALL INSN_OP(1)
226 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
227 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
228 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
229 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
230 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
231 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
232 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
233 #define STB (INSN_OP(3) | INSN_OP3(0x05))
234 #define STH (INSN_OP(3) | INSN_OP3(0x06))
235 #define STW (INSN_OP(3) | INSN_OP3(0x04))
236 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
237 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
238 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
239 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
240 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
241 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
242 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
243 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
244 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
245 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
246 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
247 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
249 #define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
251 #define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
253 #ifndef ASI_PRIMARY_LITTLE
254 #define ASI_PRIMARY_LITTLE 0x88
257 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
258 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
259 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
260 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
261 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
263 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
264 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
265 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
267 #ifndef use_vis3_instructions
268 bool use_vis3_instructions;
271 static bool check_fit_i64(int64_t val, unsigned int bits)
273 return val == sextract64(val, 0, bits);
276 static bool check_fit_i32(int32_t val, unsigned int bits)
278 return val == sextract32(val, 0, bits);
281 #define check_fit_tl check_fit_i64
282 #define check_fit_ptr check_fit_i64
284 static bool patch_reloc(tcg_insn_unit *src_rw, int type,
285 intptr_t value, intptr_t addend)
287 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
288 uint32_t insn = *src_rw;
292 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
295 case R_SPARC_WDISP16:
296 if (!check_fit_ptr(pcrel >> 2, 16)) {
299 insn &= ~INSN_OFF16(-1);
300 insn |= INSN_OFF16(pcrel);
302 case R_SPARC_WDISP19:
303 if (!check_fit_ptr(pcrel >> 2, 19)) {
306 insn &= ~INSN_OFF19(-1);
307 insn |= INSN_OFF19(pcrel);
310 if (!check_fit_ptr(value, 13)) {
313 insn &= ~INSN_IMM13(-1);
314 insn |= INSN_IMM13(value);
317 g_assert_not_reached();
324 /* test if a constant matches the constraint */
325 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
327 if (ct & TCG_CT_CONST) {
331 if (type == TCG_TYPE_I32) {
335 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
337 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
339 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
346 static void tcg_out_nop(TCGContext *s)
351 static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
354 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
357 static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
358 int32_t offset, int op)
360 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
363 static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
364 int32_t val2, int val2const, int op)
366 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
367 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
370 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
373 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
378 static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
381 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
387 static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
389 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
392 /* A 13-bit constant sign-extended to 64 bits. */
393 static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg)
395 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
398 /* A 32-bit constant sign-extended to 64 bits. */
399 static void tcg_out_movi_s32(TCGContext *s, TCGReg ret, int32_t arg)
401 tcg_out_sethi(s, ret, ~arg);
402 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
405 /* A 32-bit constant zero-extended to 64 bits. */
406 static void tcg_out_movi_u32(TCGContext *s, TCGReg ret, uint32_t arg)
408 tcg_out_sethi(s, ret, arg);
410 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
414 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
415 tcg_target_long arg, bool in_prologue,
418 tcg_target_long hi, lo = (int32_t)arg;
419 tcg_target_long test, lsb;
421 /* A 13-bit constant sign-extended to 64-bits. */
422 if (check_fit_tl(arg, 13)) {
423 tcg_out_movi_s13(s, ret, arg);
427 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
428 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
429 tcg_out_movi_u32(s, ret, arg);
433 /* A 13-bit constant relative to the TB. */
435 test = tcg_tbrel_diff(s, (void *)arg);
436 if (check_fit_ptr(test, 13)) {
437 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
442 /* A 32-bit constant sign-extended to 64-bits. */
444 tcg_out_movi_s32(s, ret, arg);
448 /* A 32-bit constant, shifted. */
450 test = (tcg_target_long)arg >> lsb;
451 if (lsb > 10 && test == extract64(test, 0, 21)) {
452 tcg_out_sethi(s, ret, test << 10);
453 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
455 } else if (test == (uint32_t)test || test == (int32_t)test) {
456 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
457 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
461 /* Use the constant pool, if possible. */
463 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
464 tcg_tbrel_diff(s, NULL));
465 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
469 /* A 64-bit constant decomposed into 2 32-bit pieces. */
470 if (check_fit_i32(lo, 13)) {
471 hi = (arg - lo) >> 32;
472 tcg_out_movi_u32(s, ret, hi);
473 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
474 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
477 tcg_out_movi_u32(s, ret, hi);
478 tcg_out_movi_u32(s, scratch, lo);
479 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
480 tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
484 static void tcg_out_movi(TCGContext *s, TCGType type,
485 TCGReg ret, tcg_target_long arg)
487 tcg_debug_assert(ret != TCG_REG_T3);
488 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T3);
491 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
493 g_assert_not_reached();
496 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
498 g_assert_not_reached();
501 static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
503 tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
506 static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
508 tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL);
509 tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL);
512 static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
514 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
517 static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
519 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
522 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
524 tcg_out_ext32s(s, rd, rs);
527 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
529 tcg_out_ext32u(s, rd, rs);
532 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
537 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
540 /* This function is only used for passing structs by reference. */
541 g_assert_not_reached();
544 static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
547 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
550 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
551 intptr_t offset, int op)
553 if (check_fit_ptr(offset, 13)) {
554 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
557 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
558 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
562 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
563 TCGReg arg1, intptr_t arg2)
565 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
568 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
569 TCGReg arg1, intptr_t arg2)
571 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
574 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
575 TCGReg base, intptr_t ofs)
578 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
584 static void tcg_out_sety(TCGContext *s, TCGReg rs)
586 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
589 static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
590 int32_t val2, int val2const, int uns)
592 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
594 tcg_out_sety(s, TCG_REG_G0);
596 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
597 tcg_out_sety(s, TCG_REG_T1);
600 tcg_out_arithc(s, rd, rs1, val2, val2const,
601 uns ? ARITH_UDIV : ARITH_SDIV);
604 static const uint8_t tcg_cond_to_bcond[] = {
605 [TCG_COND_EQ] = COND_E,
606 [TCG_COND_NE] = COND_NE,
607 [TCG_COND_LT] = COND_L,
608 [TCG_COND_GE] = COND_GE,
609 [TCG_COND_LE] = COND_LE,
610 [TCG_COND_GT] = COND_G,
611 [TCG_COND_LTU] = COND_CS,
612 [TCG_COND_GEU] = COND_CC,
613 [TCG_COND_LEU] = COND_LEU,
614 [TCG_COND_GTU] = COND_GU,
617 static const uint8_t tcg_cond_to_rcond[] = {
618 [TCG_COND_EQ] = RCOND_Z,
619 [TCG_COND_NE] = RCOND_NZ,
620 [TCG_COND_LT] = RCOND_LZ,
621 [TCG_COND_GT] = RCOND_GZ,
622 [TCG_COND_LE] = RCOND_LEZ,
623 [TCG_COND_GE] = RCOND_GEZ
626 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
628 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
631 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
636 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
638 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
640 tcg_out_bpcc0(s, scond, flags, off19);
643 static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
645 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
648 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
649 int32_t arg2, int const_arg2, TCGLabel *l)
651 tcg_out_cmp(s, arg1, arg2, const_arg2);
652 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
656 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
657 int32_t v1, int v1const)
659 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
660 | INSN_RS1(tcg_cond_to_bcond[cond])
661 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
664 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
665 TCGReg c1, int32_t c2, int c2const,
666 int32_t v1, int v1const)
668 tcg_out_cmp(s, c1, c2, c2const);
669 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
672 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
673 int32_t arg2, int const_arg2, TCGLabel *l)
675 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
676 if (arg2 == 0 && !is_unsigned_cond(cond)) {
680 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
682 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
684 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
685 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
687 tcg_out_cmp(s, arg1, arg2, const_arg2);
688 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
693 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
694 int32_t v1, int v1const)
696 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
697 | (tcg_cond_to_rcond[cond] << 10)
698 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
701 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
702 TCGReg c1, int32_t c2, int c2const,
703 int32_t v1, int v1const)
705 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
706 Note that the immediate range is one bit smaller, so we must check
708 if (c2 == 0 && !is_unsigned_cond(cond)
709 && (!v1const || check_fit_i32(v1, 10))) {
710 tcg_out_movr(s, cond, ret, c1, v1, v1const);
712 tcg_out_cmp(s, c1, c2, c2const);
713 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
717 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
718 TCGReg c1, int32_t c2, int c2const, bool neg)
720 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
724 /* The result of the comparison is in the carry bit. */
729 /* For equality, we can transform to inequality vs zero. */
731 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
736 c1 = TCG_REG_G0, c2const = 0;
737 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
742 /* If we don't need to load a constant into a register, we can
743 swap the operands on GTU/LEU. There's no benefit to loading
744 the constant into a temporary register. */
745 if (!c2const || c2 == 0) {
750 cond = tcg_swap_cond(cond);
756 tcg_out_cmp(s, c1, c2, c2const);
757 tcg_out_movi_s13(s, ret, 0);
758 tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1);
762 tcg_out_cmp(s, c1, c2, c2const);
763 if (cond == TCG_COND_LTU) {
765 /* 0 - 0 - C = -C = (C ? -1 : 0) */
766 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_SUBC);
768 /* 0 + 0 + C = C = (C ? 1 : 0) */
769 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
773 /* 0 + -1 + C = C - 1 = (C ? 0 : -1) */
774 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_ADDC);
776 /* 0 - -1 - C = 1 - C = (C ? 0 : 1) */
777 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
782 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
783 TCGReg c1, int32_t c2, int c2const, bool neg)
785 if (use_vis3_instructions && !neg) {
791 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
794 tcg_out_cmp(s, c1, c2, c2const);
795 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
802 /* For 64-bit signed comparisons vs zero, we can avoid the compare
803 if the input does not overlap the output. */
804 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
805 tcg_out_movi_s13(s, ret, 0);
806 tcg_out_movr(s, cond, ret, c1, neg ? -1 : 1, 1);
808 tcg_out_cmp(s, c1, c2, c2const);
809 tcg_out_movi_s13(s, ret, 0);
810 tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1);
814 static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
815 TCGReg al, TCGReg ah, int32_t bl, int blconst,
816 int32_t bh, int bhconst, int opl, int oph)
818 TCGReg tmp = TCG_REG_T1;
820 /* Note that the low parts are fully consumed before tmp is set. */
821 if (rl != ah && (bhconst || rl != bh)) {
825 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
826 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
827 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
830 static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
831 TCGReg al, TCGReg ah, int32_t bl, int blconst,
832 int32_t bh, int bhconst, bool is_sub)
834 TCGReg tmp = TCG_REG_T1;
836 /* Note that the low parts are fully consumed before tmp is set. */
837 if (rl != ah && (bhconst || rl != bh)) {
841 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
843 if (use_vis3_instructions && !is_sub) {
844 /* Note that ADDXC doesn't accept immediates. */
845 if (bhconst && bh != 0) {
846 tcg_out_movi_s13(s, TCG_REG_T2, bh);
849 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
850 } else if (bh == TCG_REG_G0) {
851 /* If we have a zero, we can perform the operation in two insns,
852 with the arithmetic first, and a conditional move into place. */
854 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
855 is_sub ? ARITH_SUB : ARITH_ADD);
856 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
858 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
859 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
863 * Otherwise adjust BH as if there is carry into T2.
864 * Note that constant BH is constrained to 11 bits for the MOVCC,
865 * so the adjustment fits 12 bits.
868 tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
870 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
871 is_sub ? ARITH_SUB : ARITH_ADD);
873 /* ... smoosh T2 back to original BH if carry is clear ... */
874 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
875 /* ... and finally perform the arithmetic with the new operand. */
876 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
879 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
882 static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
883 bool in_prologue, bool tail_call)
885 uintptr_t desti = (uintptr_t)dest;
887 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
888 desti & ~0xfff, in_prologue, TCG_REG_T2);
889 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
890 TCG_REG_T1, desti & 0xfff, JMPL);
893 static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
896 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
898 if (disp == (int32_t)disp) {
899 tcg_out32(s, CALL | (uint32_t)disp >> 2);
901 tcg_out_jmpl_const(s, dest, in_prologue, false);
905 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
906 const TCGHelperInfo *info)
908 tcg_out_call_nodelay(s, dest, false);
912 static void tcg_out_mb(TCGContext *s, TCGArg a0)
914 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
915 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
918 /* Generate global QEMU prologue and epilogue code */
919 static void tcg_target_qemu_prologue(TCGContext *s)
921 int tmp_buf_size, frame_size;
924 * The TCG temp buffer is at the top of the frame, immediately
925 * below the frame pointer. Use the logical (aligned) offset here;
926 * the stack bias is applied in temp_allocate_frame().
928 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
929 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
932 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
933 * otherwise the minimal frame usable by callees.
935 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
936 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
937 frame_size += TCG_TARGET_STACK_ALIGN - 1;
938 frame_size &= -TCG_TARGET_STACK_ALIGN;
939 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
940 INSN_IMM13(-frame_size));
942 #ifndef CONFIG_SOFTMMU
943 if (guest_base != 0) {
944 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
945 guest_base, true, TCG_REG_T1);
946 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
950 /* We choose TCG_REG_TB such that no move is required. */
951 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
952 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
954 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
958 /* Epilogue for goto_ptr. */
959 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
960 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
962 tcg_out_movi_s13(s, TCG_REG_O0, 0);
965 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
968 for (i = 0; i < count; ++i) {
973 static const TCGLdstHelperParam ldst_helper_param = {
974 .ntmp = 1, .tmp = { TCG_REG_T1 }
977 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
979 MemOp opc = get_memop(lb->oi);
982 if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
983 (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
987 /* Use inline tcg_out_ext32s; otherwise let the helper sign-extend. */
988 sgn = (opc & MO_SIZE) < MO_32 ? MO_SIGN : 0;
990 tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
991 tcg_out_call(s, qemu_ld_helpers[opc & (MO_SIZE | sgn)], NULL);
992 tcg_out_ld_helper_ret(s, lb, sgn, &ldst_helper_param);
994 tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
995 return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
996 (intptr_t)lb->raddr, 0);
999 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1001 MemOp opc = get_memop(lb->oi);
1003 if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
1004 (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
1008 tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1009 tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE], NULL);
1011 tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
1012 return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
1013 (intptr_t)lb->raddr, 0);
1022 bool tcg_target_has_memory_bswap(MemOp memop)
1027 /* We expect to use a 13-bit negative offset from ENV. */
1028 #define MIN_TLB_MASK_TABLE_OFS -(1 << 12)
1031 * For softmmu, perform the TLB load and compare.
1032 * For useronly, perform any required alignment tests.
1033 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1034 * is required and fill in @h with the host address for the fast path.
1036 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1037 TCGReg addr_reg, MemOpIdx oi,
1040 TCGType addr_type = s->addr_type;
1041 TCGLabelQemuLdst *ldst = NULL;
1042 MemOp opc = get_memop(oi);
1043 MemOp s_bits = opc & MO_SIZE;
1046 /* We don't support unaligned accesses. */
1047 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1048 h->aa.align = MAX(h->aa.align, s_bits);
1049 a_mask = (1u << h->aa.align) - 1;
1051 #ifdef CONFIG_SOFTMMU
1052 int mem_index = get_mmuidx(oi);
1053 int fast_off = tlb_mask_table_ofs(s, mem_index);
1054 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1055 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1056 int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
1057 : offsetof(CPUTLBEntry, addr_write);
1058 int add_off = offsetof(CPUTLBEntry, addend);
1062 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1063 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T2, TCG_AREG0, mask_off);
1064 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T3, TCG_AREG0, table_off);
1066 /* Extract the page index, shifted into place for tlb index. */
1067 tcg_out_arithi(s, TCG_REG_T1, addr_reg,
1068 s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
1069 tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND);
1071 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1072 tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T3, ARITH_ADD);
1075 * Load the tlb comparator and the addend.
1076 * Always load the entire 64-bit comparator for simplicity.
1077 * We will ignore the high bits via BPCC_ICC below.
1079 tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_T2, TCG_REG_T1, cmp_off);
1080 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T1, TCG_REG_T1, add_off);
1081 h->base = TCG_REG_T1;
1083 /* Mask out the page offset, except for the required alignment. */
1084 compare_mask = s->page_mask | a_mask;
1085 if (check_fit_tl(compare_mask, 13)) {
1086 tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND);
1088 tcg_out_movi_s32(s, TCG_REG_T3, compare_mask);
1089 tcg_out_arith(s, TCG_REG_T3, addr_reg, TCG_REG_T3, ARITH_AND);
1091 tcg_out_cmp(s, TCG_REG_T2, TCG_REG_T3, 0);
1093 ldst = new_ldst_label(s);
1094 ldst->is_ld = is_ld;
1096 ldst->addrlo_reg = addr_reg;
1097 ldst->label_ptr[0] = s->code_ptr;
1099 /* bne,pn %[xi]cc, label0 */
1100 cc = addr_type == TCG_TYPE_I32 ? BPCC_ICC : BPCC_XCC;
1101 tcg_out_bpcc0(s, COND_NE, BPCC_PN | cc, 0);
1104 * If the size equals the required alignment, we can skip the test
1105 * and allow host SIGBUS to deliver SIGBUS to the guest.
1106 * Otherwise, test for at least natural alignment and defer
1107 * everything else to the helper functions.
1109 if (s_bits != get_alignment_bits(opc)) {
1110 tcg_debug_assert(check_fit_tl(a_mask, 13));
1111 tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC);
1113 ldst = new_ldst_label(s);
1114 ldst->is_ld = is_ld;
1116 ldst->addrlo_reg = addr_reg;
1117 ldst->label_ptr[0] = s->code_ptr;
1119 /* bne,pn %icc, label0 */
1120 tcg_out_bpcc0(s, COND_NE, BPCC_PN | BPCC_ICC, 0);
1122 h->base = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0;
1125 /* If the guest address must be zero-extended, do in the delay slot. */
1126 if (addr_type == TCG_TYPE_I32) {
1127 tcg_out_ext32u(s, TCG_REG_T2, addr_reg);
1128 h->index = TCG_REG_T2;
1133 h->index = addr_reg;
1138 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1139 MemOpIdx oi, TCGType data_type)
1141 static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
1144 [MO_UB | MO_LE] = LDUB,
1145 [MO_SB | MO_LE] = LDSB,
1154 [MO_LEUW] = LDUH_LE,
1155 [MO_LESW] = LDSH_LE,
1156 [MO_LEUL] = LDUW_LE,
1157 [MO_LESL] = LDSW_LE,
1162 TCGLabelQemuLdst *ldst;
1165 ldst = prepare_host_addr(s, &h, addr, oi, true);
1167 tcg_out_ldst_rr(s, data, h.base, h.index,
1168 ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]);
1171 ldst->type = data_type;
1172 ldst->datalo_reg = data;
1173 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1177 static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1178 MemOpIdx oi, TCGType data_type)
1180 static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1192 TCGLabelQemuLdst *ldst;
1195 ldst = prepare_host_addr(s, &h, addr, oi, false);
1197 tcg_out_ldst_rr(s, data, h.base, h.index,
1198 st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]);
1201 ldst->type = data_type;
1202 ldst->datalo_reg = data;
1203 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1207 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1209 if (check_fit_ptr(a0, 13)) {
1210 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1211 tcg_out_movi_s13(s, TCG_REG_O0, a0);
1214 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1215 if (check_fit_ptr(tb_diff, 13)) {
1216 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1217 /* Note that TCG_REG_TB has been unwound to O1. */
1218 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1222 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1223 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1224 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1227 static void tcg_out_goto_tb(TCGContext *s, int which)
1229 ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1231 /* Load link and indirect branch. */
1232 set_jmp_insn_offset(s, which);
1233 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
1234 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1237 set_jmp_reset_offset(s, which);
1240 * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1241 * to the beginning of this TB.
1243 off = -tcg_current_code_size(s);
1244 if (check_fit_i32(off, 13)) {
1245 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1247 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1248 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1252 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1253 uintptr_t jmp_rx, uintptr_t jmp_rw)
1257 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1258 const TCGArg args[TCG_MAX_OP_ARGS],
1259 const int const_args[TCG_MAX_OP_ARGS])
1264 /* Hoist the loads of the most common arguments. */
1271 case INDEX_op_goto_ptr:
1272 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1273 tcg_out_mov_delay(s, TCG_REG_TB, a0);
1276 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1280 #define OP_32_64(x) \
1281 glue(glue(case INDEX_op_, x), _i32): \
1282 glue(glue(case INDEX_op_, x), _i64)
1285 tcg_out_ldst(s, a0, a1, a2, LDUB);
1288 tcg_out_ldst(s, a0, a1, a2, LDSB);
1291 tcg_out_ldst(s, a0, a1, a2, LDUH);
1294 tcg_out_ldst(s, a0, a1, a2, LDSH);
1296 case INDEX_op_ld_i32:
1297 case INDEX_op_ld32u_i64:
1298 tcg_out_ldst(s, a0, a1, a2, LDUW);
1301 tcg_out_ldst(s, a0, a1, a2, STB);
1304 tcg_out_ldst(s, a0, a1, a2, STH);
1306 case INDEX_op_st_i32:
1307 case INDEX_op_st32_i64:
1308 tcg_out_ldst(s, a0, a1, a2, STW);
1331 case INDEX_op_shl_i32:
1334 /* Limit immediate shift count lest we create an illegal insn. */
1335 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1337 case INDEX_op_shr_i32:
1340 case INDEX_op_sar_i32:
1343 case INDEX_op_mul_i32:
1354 case INDEX_op_div_i32:
1355 tcg_out_div32(s, a0, a1, a2, c2, 0);
1357 case INDEX_op_divu_i32:
1358 tcg_out_div32(s, a0, a1, a2, c2, 1);
1361 case INDEX_op_brcond_i32:
1362 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1364 case INDEX_op_setcond_i32:
1365 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, false);
1367 case INDEX_op_negsetcond_i32:
1368 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, true);
1370 case INDEX_op_movcond_i32:
1371 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1374 case INDEX_op_add2_i32:
1375 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1376 args[4], const_args[4], args[5], const_args[5],
1377 ARITH_ADDCC, ARITH_ADDC);
1379 case INDEX_op_sub2_i32:
1380 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1381 args[4], const_args[4], args[5], const_args[5],
1382 ARITH_SUBCC, ARITH_SUBC);
1384 case INDEX_op_mulu2_i32:
1387 case INDEX_op_muls2_i32:
1390 /* The 32-bit multiply insns produce a full 64-bit result. */
1391 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1392 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1395 case INDEX_op_qemu_ld_a32_i32:
1396 case INDEX_op_qemu_ld_a64_i32:
1397 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1399 case INDEX_op_qemu_ld_a32_i64:
1400 case INDEX_op_qemu_ld_a64_i64:
1401 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1403 case INDEX_op_qemu_st_a32_i32:
1404 case INDEX_op_qemu_st_a64_i32:
1405 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1407 case INDEX_op_qemu_st_a32_i64:
1408 case INDEX_op_qemu_st_a64_i64:
1409 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1412 case INDEX_op_ld32s_i64:
1413 tcg_out_ldst(s, a0, a1, a2, LDSW);
1415 case INDEX_op_ld_i64:
1416 tcg_out_ldst(s, a0, a1, a2, LDX);
1418 case INDEX_op_st_i64:
1419 tcg_out_ldst(s, a0, a1, a2, STX);
1421 case INDEX_op_shl_i64:
1424 /* Limit immediate shift count lest we create an illegal insn. */
1425 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1427 case INDEX_op_shr_i64:
1430 case INDEX_op_sar_i64:
1433 case INDEX_op_mul_i64:
1436 case INDEX_op_div_i64:
1439 case INDEX_op_divu_i64:
1443 case INDEX_op_brcond_i64:
1444 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1446 case INDEX_op_setcond_i64:
1447 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, false);
1449 case INDEX_op_negsetcond_i64:
1450 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, true);
1452 case INDEX_op_movcond_i64:
1453 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1455 case INDEX_op_add2_i64:
1456 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1457 const_args[4], args[5], const_args[5], false);
1459 case INDEX_op_sub2_i64:
1460 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1461 const_args[4], args[5], const_args[5], true);
1463 case INDEX_op_muluh_i64:
1464 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1468 tcg_out_arithc(s, a0, a1, a2, c2, c);
1472 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1479 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1480 case INDEX_op_mov_i64:
1481 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1482 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1483 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1484 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1485 case INDEX_op_ext8s_i64:
1486 case INDEX_op_ext8u_i32:
1487 case INDEX_op_ext8u_i64:
1488 case INDEX_op_ext16s_i32:
1489 case INDEX_op_ext16s_i64:
1490 case INDEX_op_ext16u_i32:
1491 case INDEX_op_ext16u_i64:
1492 case INDEX_op_ext32s_i64:
1493 case INDEX_op_ext32u_i64:
1494 case INDEX_op_ext_i32_i64:
1495 case INDEX_op_extu_i32_i64:
1497 g_assert_not_reached();
1501 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1504 case INDEX_op_goto_ptr:
1507 case INDEX_op_ld8u_i32:
1508 case INDEX_op_ld8u_i64:
1509 case INDEX_op_ld8s_i32:
1510 case INDEX_op_ld8s_i64:
1511 case INDEX_op_ld16u_i32:
1512 case INDEX_op_ld16u_i64:
1513 case INDEX_op_ld16s_i32:
1514 case INDEX_op_ld16s_i64:
1515 case INDEX_op_ld_i32:
1516 case INDEX_op_ld32u_i64:
1517 case INDEX_op_ld32s_i64:
1518 case INDEX_op_ld_i64:
1519 case INDEX_op_neg_i32:
1520 case INDEX_op_neg_i64:
1521 case INDEX_op_not_i32:
1522 case INDEX_op_not_i64:
1523 case INDEX_op_ext32s_i64:
1524 case INDEX_op_ext32u_i64:
1525 case INDEX_op_ext_i32_i64:
1526 case INDEX_op_extu_i32_i64:
1527 case INDEX_op_qemu_ld_a32_i32:
1528 case INDEX_op_qemu_ld_a64_i32:
1529 case INDEX_op_qemu_ld_a32_i64:
1530 case INDEX_op_qemu_ld_a64_i64:
1531 return C_O1_I1(r, r);
1533 case INDEX_op_st8_i32:
1534 case INDEX_op_st8_i64:
1535 case INDEX_op_st16_i32:
1536 case INDEX_op_st16_i64:
1537 case INDEX_op_st_i32:
1538 case INDEX_op_st32_i64:
1539 case INDEX_op_st_i64:
1540 case INDEX_op_qemu_st_a32_i32:
1541 case INDEX_op_qemu_st_a64_i32:
1542 case INDEX_op_qemu_st_a32_i64:
1543 case INDEX_op_qemu_st_a64_i64:
1544 return C_O0_I2(rZ, r);
1546 case INDEX_op_add_i32:
1547 case INDEX_op_add_i64:
1548 case INDEX_op_mul_i32:
1549 case INDEX_op_mul_i64:
1550 case INDEX_op_div_i32:
1551 case INDEX_op_div_i64:
1552 case INDEX_op_divu_i32:
1553 case INDEX_op_divu_i64:
1554 case INDEX_op_sub_i32:
1555 case INDEX_op_sub_i64:
1556 case INDEX_op_and_i32:
1557 case INDEX_op_and_i64:
1558 case INDEX_op_andc_i32:
1559 case INDEX_op_andc_i64:
1560 case INDEX_op_or_i32:
1561 case INDEX_op_or_i64:
1562 case INDEX_op_orc_i32:
1563 case INDEX_op_orc_i64:
1564 case INDEX_op_xor_i32:
1565 case INDEX_op_xor_i64:
1566 case INDEX_op_shl_i32:
1567 case INDEX_op_shl_i64:
1568 case INDEX_op_shr_i32:
1569 case INDEX_op_shr_i64:
1570 case INDEX_op_sar_i32:
1571 case INDEX_op_sar_i64:
1572 case INDEX_op_setcond_i32:
1573 case INDEX_op_setcond_i64:
1574 case INDEX_op_negsetcond_i32:
1575 case INDEX_op_negsetcond_i64:
1576 return C_O1_I2(r, rZ, rJ);
1578 case INDEX_op_brcond_i32:
1579 case INDEX_op_brcond_i64:
1580 return C_O0_I2(rZ, rJ);
1581 case INDEX_op_movcond_i32:
1582 case INDEX_op_movcond_i64:
1583 return C_O1_I4(r, rZ, rJ, rI, 0);
1584 case INDEX_op_add2_i32:
1585 case INDEX_op_add2_i64:
1586 case INDEX_op_sub2_i32:
1587 case INDEX_op_sub2_i64:
1588 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
1589 case INDEX_op_mulu2_i32:
1590 case INDEX_op_muls2_i32:
1591 return C_O2_I2(r, r, rZ, rJ);
1592 case INDEX_op_muluh_i64:
1593 return C_O1_I2(r, r, r);
1596 g_assert_not_reached();
1600 static void tcg_target_init(TCGContext *s)
1603 * Only probe for the platform and capabilities if we haven't already
1604 * determined maximum values at compile time.
1606 #ifndef use_vis3_instructions
1608 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1609 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1613 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1614 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1616 tcg_target_call_clobber_regs = 0;
1617 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1618 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1619 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1620 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1621 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1622 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1623 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1624 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1625 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1626 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1627 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1628 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1629 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1630 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1631 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1633 s->reserved_regs = 0;
1634 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1635 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1636 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1637 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1638 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1639 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1640 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1641 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1642 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T3); /* for internal use */
1645 #define ELF_HOST_MACHINE EM_SPARCV9
1649 uint8_t fde_def_cfa[4];
1650 uint8_t fde_win_save;
1651 uint8_t fde_ret_save[3];
1654 static const DebugFrame debug_frame = {
1655 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1658 .h.cie.code_align = 1,
1659 .h.cie.data_align = -sizeof(void *) & 0x7f,
1660 .h.cie.return_column = 15, /* o7 */
1662 /* Total FDE size does not include the "len" member. */
1663 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1666 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1667 (2047 & 0x7f) | 0x80, (2047 >> 7)
1669 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1670 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1673 void tcg_register_jit(const void *buf, size_t buf_size)
1675 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));