2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "../tcg-pool.c.inc"
27 #ifdef CONFIG_DEBUG_TCG
28 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
70 #define TCG_CT_CONST_S11 0x100
71 #define TCG_CT_CONST_S13 0x200
72 #define TCG_CT_CONST_ZERO 0x400
75 * For softmmu, we need to avoid conflicts with the first 3
76 * argument registers to perform the tlb lookup, and to call
77 * the helper function.
80 #define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
82 #define SOFTMMU_RESERVE_REGS 0
86 * Note that sparcv8plus can only hold 64 bit quantities in %g and %o
87 * registers. These are saved manually by the kernel in full 64-bit
88 * slots. The %i and %l registers are saved by the register window
89 * mechanism, which only allocates space for 32 bits. Given that this
90 * window spill/fill can happen on any signal, we must consider the
91 * high bits of the %i and %l registers garbage at all times.
93 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
95 # define ALL_GENERAL_REGS64 ALL_GENERAL_REGS
97 # define ALL_GENERAL_REGS64 MAKE_64BIT_MASK(0, 16)
99 #define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
100 #define ALL_QLDST_REGS64 (ALL_GENERAL_REGS64 & ~SOFTMMU_RESERVE_REGS)
102 /* Define some temporary registers. T2 is used for constant generation. */
103 #define TCG_REG_T1 TCG_REG_G1
104 #define TCG_REG_T2 TCG_REG_O7
106 #ifndef CONFIG_SOFTMMU
107 # define TCG_GUEST_BASE_REG TCG_REG_I5
110 #define TCG_REG_TB TCG_REG_I1
111 #define USE_REG_TB (sizeof(void *) > 4)
113 static const int tcg_target_reg_alloc_order[] = {
143 static const int tcg_target_call_iarg_regs[6] = {
152 static const int tcg_target_call_oarg_regs[] = {
159 #define INSN_OP(x) ((x) << 30)
160 #define INSN_OP2(x) ((x) << 22)
161 #define INSN_OP3(x) ((x) << 19)
162 #define INSN_OPF(x) ((x) << 5)
163 #define INSN_RD(x) ((x) << 25)
164 #define INSN_RS1(x) ((x) << 14)
165 #define INSN_RS2(x) (x)
166 #define INSN_ASI(x) ((x) << 5)
168 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
169 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
170 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
171 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
172 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
173 #define INSN_COND(x) ((x) << 25)
191 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
200 #define MOVCC_ICC (1 << 18)
201 #define MOVCC_XCC (1 << 18 | 1 << 12)
204 #define BPCC_XCC (2 << 20)
205 #define BPCC_PT (1 << 19)
207 #define BPCC_A (1 << 29)
209 #define BPR_PT BPCC_PT
211 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
212 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
213 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
214 #define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
215 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
216 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
217 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
218 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
219 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
220 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
221 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
222 #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
223 #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
224 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
225 #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
226 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
227 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
228 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
229 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
230 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
231 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
232 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
234 #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
235 #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
237 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
238 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
239 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
241 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
242 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
243 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
245 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
246 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
247 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
248 #define RETURN (INSN_OP(2) | INSN_OP3(0x39))
249 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
250 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
251 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
252 #define CALL INSN_OP(1)
253 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
254 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
255 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
256 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
257 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
258 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
259 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
260 #define STB (INSN_OP(3) | INSN_OP3(0x05))
261 #define STH (INSN_OP(3) | INSN_OP3(0x06))
262 #define STW (INSN_OP(3) | INSN_OP3(0x04))
263 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
264 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
265 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
266 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
267 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
268 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
269 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
270 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
271 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
272 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
273 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
274 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
276 #define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
278 #define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
280 #ifndef ASI_PRIMARY_LITTLE
281 #define ASI_PRIMARY_LITTLE 0x88
284 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
285 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
286 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
287 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
288 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
290 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
291 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
292 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
294 #ifndef use_vis3_instructions
295 bool use_vis3_instructions;
298 static bool check_fit_i64(int64_t val, unsigned int bits)
300 return val == sextract64(val, 0, bits);
303 static bool check_fit_i32(int32_t val, unsigned int bits)
305 return val == sextract32(val, 0, bits);
308 #define check_fit_tl check_fit_i64
310 # define check_fit_ptr check_fit_i64
312 # define check_fit_ptr check_fit_i32
315 static bool patch_reloc(tcg_insn_unit *src_rw, int type,
316 intptr_t value, intptr_t addend)
318 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
319 uint32_t insn = *src_rw;
323 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
326 case R_SPARC_WDISP16:
327 if (!check_fit_ptr(pcrel >> 2, 16)) {
330 insn &= ~INSN_OFF16(-1);
331 insn |= INSN_OFF16(pcrel);
333 case R_SPARC_WDISP19:
334 if (!check_fit_ptr(pcrel >> 2, 19)) {
337 insn &= ~INSN_OFF19(-1);
338 insn |= INSN_OFF19(pcrel);
341 if (!check_fit_ptr(value, 13)) {
344 insn &= ~INSN_IMM13(-1);
345 insn |= INSN_IMM13(value);
348 g_assert_not_reached();
355 /* test if a constant matches the constraint */
356 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
358 if (ct & TCG_CT_CONST) {
362 if (type == TCG_TYPE_I32) {
366 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
368 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
370 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
377 static void tcg_out_nop(TCGContext *s)
382 static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
385 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
388 static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
389 int32_t offset, int op)
391 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
394 static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
395 int32_t val2, int val2const, int op)
397 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
398 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
401 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
404 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
409 static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
412 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
418 static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
420 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
423 static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
425 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
428 static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
430 if (check_fit_i32(arg, 13)) {
431 /* A 13-bit constant sign-extended to 64-bits. */
432 tcg_out_movi_imm13(s, ret, arg);
434 /* A 32-bit constant zero-extended to 64 bits. */
435 tcg_out_sethi(s, ret, arg);
437 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
442 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
443 tcg_target_long arg, bool in_prologue,
446 tcg_target_long hi, lo = (int32_t)arg;
447 tcg_target_long test, lsb;
449 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
450 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
451 tcg_out_movi_imm32(s, ret, arg);
455 /* A 13-bit constant sign-extended to 64-bits. */
456 if (check_fit_tl(arg, 13)) {
457 tcg_out_movi_imm13(s, ret, arg);
461 /* A 13-bit constant relative to the TB. */
462 if (!in_prologue && USE_REG_TB) {
463 test = tcg_tbrel_diff(s, (void *)arg);
464 if (check_fit_ptr(test, 13)) {
465 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
470 /* A 32-bit constant sign-extended to 64-bits. */
472 tcg_out_sethi(s, ret, ~arg);
473 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
477 /* A 32-bit constant, shifted. */
479 test = (tcg_target_long)arg >> lsb;
480 if (lsb > 10 && test == extract64(test, 0, 21)) {
481 tcg_out_sethi(s, ret, test << 10);
482 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
484 } else if (test == (uint32_t)test || test == (int32_t)test) {
485 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
486 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
490 /* Use the constant pool, if possible. */
491 if (!in_prologue && USE_REG_TB) {
492 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
493 tcg_tbrel_diff(s, NULL));
494 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
498 /* A 64-bit constant decomposed into 2 32-bit pieces. */
499 if (check_fit_i32(lo, 13)) {
500 hi = (arg - lo) >> 32;
501 tcg_out_movi_imm32(s, ret, hi);
502 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
503 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
506 tcg_out_movi_imm32(s, ret, hi);
507 tcg_out_movi_imm32(s, scratch, lo);
508 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
509 tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
513 static void tcg_out_movi(TCGContext *s, TCGType type,
514 TCGReg ret, tcg_target_long arg)
516 tcg_debug_assert(ret != TCG_REG_T2);
517 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
520 static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
523 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
526 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
527 intptr_t offset, int op)
529 if (check_fit_ptr(offset, 13)) {
530 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
533 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
534 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
538 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
539 TCGReg arg1, intptr_t arg2)
541 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
544 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
545 TCGReg arg1, intptr_t arg2)
547 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
550 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
551 TCGReg base, intptr_t ofs)
554 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
560 static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg)
562 intptr_t diff = tcg_tbrel_diff(s, arg);
563 if (USE_REG_TB && check_fit_ptr(diff, 13)) {
564 tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
567 tcg_out_movi(s, TCG_TYPE_PTR, ret, (uintptr_t)arg & ~0x3ff);
568 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff);
571 static void tcg_out_sety(TCGContext *s, TCGReg rs)
573 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
576 static void tcg_out_rdy(TCGContext *s, TCGReg rd)
578 tcg_out32(s, RDY | INSN_RD(rd));
581 static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
582 int32_t val2, int val2const, int uns)
584 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
586 tcg_out_sety(s, TCG_REG_G0);
588 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
589 tcg_out_sety(s, TCG_REG_T1);
592 tcg_out_arithc(s, rd, rs1, val2, val2const,
593 uns ? ARITH_UDIV : ARITH_SDIV);
596 static const uint8_t tcg_cond_to_bcond[] = {
597 [TCG_COND_EQ] = COND_E,
598 [TCG_COND_NE] = COND_NE,
599 [TCG_COND_LT] = COND_L,
600 [TCG_COND_GE] = COND_GE,
601 [TCG_COND_LE] = COND_LE,
602 [TCG_COND_GT] = COND_G,
603 [TCG_COND_LTU] = COND_CS,
604 [TCG_COND_GEU] = COND_CC,
605 [TCG_COND_LEU] = COND_LEU,
606 [TCG_COND_GTU] = COND_GU,
609 static const uint8_t tcg_cond_to_rcond[] = {
610 [TCG_COND_EQ] = RCOND_Z,
611 [TCG_COND_NE] = RCOND_NZ,
612 [TCG_COND_LT] = RCOND_LZ,
613 [TCG_COND_GT] = RCOND_GZ,
614 [TCG_COND_LE] = RCOND_LEZ,
615 [TCG_COND_GE] = RCOND_GEZ
618 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
620 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
623 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
628 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
630 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
632 tcg_out_bpcc0(s, scond, flags, off19);
635 static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
637 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
640 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
641 int32_t arg2, int const_arg2, TCGLabel *l)
643 tcg_out_cmp(s, arg1, arg2, const_arg2);
644 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
648 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
649 int32_t v1, int v1const)
651 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
652 | INSN_RS1(tcg_cond_to_bcond[cond])
653 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
656 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
657 TCGReg c1, int32_t c2, int c2const,
658 int32_t v1, int v1const)
660 tcg_out_cmp(s, c1, c2, c2const);
661 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
664 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
665 int32_t arg2, int const_arg2, TCGLabel *l)
667 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
668 if (arg2 == 0 && !is_unsigned_cond(cond)) {
672 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
674 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
676 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
677 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
679 tcg_out_cmp(s, arg1, arg2, const_arg2);
680 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
685 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
686 int32_t v1, int v1const)
688 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
689 | (tcg_cond_to_rcond[cond] << 10)
690 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
693 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
694 TCGReg c1, int32_t c2, int c2const,
695 int32_t v1, int v1const)
697 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
698 Note that the immediate range is one bit smaller, so we must check
700 if (c2 == 0 && !is_unsigned_cond(cond)
701 && (!v1const || check_fit_i32(v1, 10))) {
702 tcg_out_movr(s, cond, ret, c1, v1, v1const);
704 tcg_out_cmp(s, c1, c2, c2const);
705 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
709 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
710 TCGReg c1, int32_t c2, int c2const)
712 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
716 /* The result of the comparison is in the carry bit. */
721 /* For equality, we can transform to inequality vs zero. */
723 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
728 c1 = TCG_REG_G0, c2const = 0;
729 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
734 /* If we don't need to load a constant into a register, we can
735 swap the operands on GTU/LEU. There's no benefit to loading
736 the constant into a temporary register. */
737 if (!c2const || c2 == 0) {
742 cond = tcg_swap_cond(cond);
748 tcg_out_cmp(s, c1, c2, c2const);
749 tcg_out_movi_imm13(s, ret, 0);
750 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
754 tcg_out_cmp(s, c1, c2, c2const);
755 if (cond == TCG_COND_LTU) {
756 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
758 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
762 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
763 TCGReg c1, int32_t c2, int c2const)
765 if (use_vis3_instructions) {
771 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
774 tcg_out_cmp(s, c1, c2, c2const);
775 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
782 /* For 64-bit signed comparisons vs zero, we can avoid the compare
783 if the input does not overlap the output. */
784 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
785 tcg_out_movi_imm13(s, ret, 0);
786 tcg_out_movr(s, cond, ret, c1, 1, 1);
788 tcg_out_cmp(s, c1, c2, c2const);
789 tcg_out_movi_imm13(s, ret, 0);
790 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
794 static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
795 TCGReg al, TCGReg ah, int32_t bl, int blconst,
796 int32_t bh, int bhconst, int opl, int oph)
798 TCGReg tmp = TCG_REG_T1;
800 /* Note that the low parts are fully consumed before tmp is set. */
801 if (rl != ah && (bhconst || rl != bh)) {
805 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
806 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
807 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
810 static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
811 TCGReg al, TCGReg ah, int32_t bl, int blconst,
812 int32_t bh, int bhconst, bool is_sub)
814 TCGReg tmp = TCG_REG_T1;
816 /* Note that the low parts are fully consumed before tmp is set. */
817 if (rl != ah && (bhconst || rl != bh)) {
821 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
823 if (use_vis3_instructions && !is_sub) {
824 /* Note that ADDXC doesn't accept immediates. */
825 if (bhconst && bh != 0) {
826 tcg_out_movi_imm13(s, TCG_REG_T2, bh);
829 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
830 } else if (bh == TCG_REG_G0) {
831 /* If we have a zero, we can perform the operation in two insns,
832 with the arithmetic first, and a conditional move into place. */
834 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
835 is_sub ? ARITH_SUB : ARITH_ADD);
836 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
838 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
839 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
843 * Otherwise adjust BH as if there is carry into T2.
844 * Note that constant BH is constrained to 11 bits for the MOVCC,
845 * so the adjustment fits 12 bits.
848 tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
850 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
851 is_sub ? ARITH_SUB : ARITH_ADD);
853 /* ... smoosh T2 back to original BH if carry is clear ... */
854 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
855 /* ... and finally perform the arithmetic with the new operand. */
856 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
859 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
862 static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
863 bool in_prologue, bool tail_call)
865 uintptr_t desti = (uintptr_t)dest;
867 /* Be careful not to clobber %o7 for a tail call. */
868 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
869 desti & ~0xfff, in_prologue,
870 tail_call ? TCG_REG_G2 : TCG_REG_O7);
871 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
872 TCG_REG_T1, desti & 0xfff, JMPL);
875 static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
878 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
880 if (disp == (int32_t)disp) {
881 tcg_out32(s, CALL | (uint32_t)disp >> 2);
883 tcg_out_jmpl_const(s, dest, in_prologue, false);
887 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
889 tcg_out_call_nodelay(s, dest, false);
893 static void tcg_out_mb(TCGContext *s, TCGArg a0)
895 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
896 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
899 #ifdef CONFIG_SOFTMMU
900 static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
901 static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
903 static void emit_extend(TCGContext *s, TCGReg r, int op)
905 /* Emit zero extend of 8, 16 or 32 bit data as
906 * required by the MO_* value op; do nothing for 64 bit.
908 switch (op & MO_SIZE) {
910 tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
913 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
914 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
918 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
926 static void build_trampolines(TCGContext *s)
928 static void * const qemu_ld_helpers[] = {
929 [MO_UB] = helper_ret_ldub_mmu,
930 [MO_SB] = helper_ret_ldsb_mmu,
931 [MO_LEUW] = helper_le_lduw_mmu,
932 [MO_LESW] = helper_le_ldsw_mmu,
933 [MO_LEUL] = helper_le_ldul_mmu,
934 [MO_LEUQ] = helper_le_ldq_mmu,
935 [MO_BEUW] = helper_be_lduw_mmu,
936 [MO_BESW] = helper_be_ldsw_mmu,
937 [MO_BEUL] = helper_be_ldul_mmu,
938 [MO_BEUQ] = helper_be_ldq_mmu,
940 static void * const qemu_st_helpers[] = {
941 [MO_UB] = helper_ret_stb_mmu,
942 [MO_LEUW] = helper_le_stw_mmu,
943 [MO_LEUL] = helper_le_stl_mmu,
944 [MO_LEUQ] = helper_le_stq_mmu,
945 [MO_BEUW] = helper_be_stw_mmu,
946 [MO_BEUL] = helper_be_stl_mmu,
947 [MO_BEUQ] = helper_be_stq_mmu,
953 for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
954 if (qemu_ld_helpers[i] == NULL) {
958 /* May as well align the trampoline. */
959 while ((uintptr_t)s->code_ptr & 15) {
962 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
964 if (SPARC64 || TARGET_LONG_BITS == 32) {
967 /* Install the high part of the address. */
968 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
972 /* Set the retaddr operand. */
973 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
975 tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
976 /* delay slot -- set the env argument */
977 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
980 for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
981 if (qemu_st_helpers[i] == NULL) {
985 /* May as well align the trampoline. */
986 while ((uintptr_t)s->code_ptr & 15) {
989 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
992 emit_extend(s, TCG_REG_O2, i);
996 if (TARGET_LONG_BITS == 64) {
997 /* Install the high part of the address. */
998 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
1003 if ((i & MO_SIZE) == MO_64) {
1004 /* Install the high part of the data. */
1005 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
1008 emit_extend(s, ra, i);
1011 /* Skip the oi argument. */
1015 /* Set the retaddr operand. */
1016 if (ra >= TCG_REG_O6) {
1017 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
1018 TCG_TARGET_CALL_STACK_OFFSET);
1020 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
1024 tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
1025 /* delay slot -- set the env argument */
1026 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
1030 static const tcg_insn_unit *qemu_unalign_ld_trampoline;
1031 static const tcg_insn_unit *qemu_unalign_st_trampoline;
1033 static void build_trampolines(TCGContext *s)
1035 for (int ld = 0; ld < 2; ++ld) {
1038 while ((uintptr_t)s->code_ptr & 15) {
1043 helper = helper_unaligned_ld;
1044 qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
1046 helper = helper_unaligned_st;
1047 qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
1050 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1051 /* Install the high part of the address. */
1052 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
1056 tcg_out_jmpl_const(s, helper, true, true);
1057 /* delay slot -- set the env argument */
1058 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
1063 /* Generate global QEMU prologue and epilogue code */
1064 static void tcg_target_qemu_prologue(TCGContext *s)
1066 int tmp_buf_size, frame_size;
1069 * The TCG temp buffer is at the top of the frame, immediately
1070 * below the frame pointer. Use the logical (aligned) offset here;
1071 * the stack bias is applied in temp_allocate_frame().
1073 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
1074 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
1077 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1078 * otherwise the minimal frame usable by callees.
1080 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1081 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1082 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1083 frame_size &= -TCG_TARGET_STACK_ALIGN;
1084 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
1085 INSN_IMM13(-frame_size));
1087 #ifndef CONFIG_SOFTMMU
1088 if (guest_base != 0) {
1089 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1090 guest_base, true, TCG_REG_T1);
1091 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1095 /* We choose TCG_REG_TB such that no move is required. */
1097 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1098 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1101 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1105 /* Epilogue for goto_ptr. */
1106 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1107 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1109 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
1111 build_trampolines(s);
1114 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1117 for (i = 0; i < count; ++i) {
1122 #if defined(CONFIG_SOFTMMU)
1124 /* We expect to use a 13-bit negative offset from ENV. */
1125 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1126 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1128 /* Perform the TLB load and compare.
1131 ADDRLO and ADDRHI contain the possible two parts of the address.
1133 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1135 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1136 This should be offsetof addr_read or addr_write.
1138 The result of the TLB comparison is in %[ix]cc. The sanitized address
1139 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1141 static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1142 MemOp opc, int which)
1144 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1145 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1146 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1147 const TCGReg r0 = TCG_REG_O0;
1148 const TCGReg r1 = TCG_REG_O1;
1149 const TCGReg r2 = TCG_REG_O2;
1150 unsigned s_bits = opc & MO_SIZE;
1151 unsigned a_bits = get_alignment_bits(opc);
1152 tcg_target_long compare_mask;
1154 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1155 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1156 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
1158 /* Extract the page index, shifted into place for tlb index. */
1159 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1161 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1163 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1164 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1166 /* Load the tlb comparator and the addend. */
1167 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1168 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
1170 /* Mask out the page offset, except for the required alignment.
1171 We don't support unaligned accesses. */
1172 if (a_bits < s_bits) {
1175 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1176 if (check_fit_tl(compare_mask, 13)) {
1177 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1179 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1180 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
1182 tcg_out_cmp(s, r0, r2, 0);
1184 /* If the guest address must be zero-extended, do so now. */
1185 if (SPARC64 && TARGET_LONG_BITS == 32) {
1186 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
1191 #endif /* CONFIG_SOFTMMU */
1193 static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
1196 [MO_UB | MO_LE] = LDUB,
1197 [MO_SB | MO_LE] = LDSB,
1206 [MO_LEUW] = LDUH_LE,
1207 [MO_LESW] = LDSH_LE,
1208 [MO_LEUL] = LDUW_LE,
1209 [MO_LESL] = LDSW_LE,
1214 static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1226 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1227 MemOpIdx oi, bool is_64)
1229 MemOp memop = get_memop(oi);
1230 tcg_insn_unit *label_ptr;
1232 #ifdef CONFIG_SOFTMMU
1233 unsigned memi = get_mmuidx(oi);
1234 TCGReg addrz, param;
1235 const tcg_insn_unit *func;
1237 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1238 offsetof(CPUTLBEntry, addr_read));
1240 /* The fast path is exactly one insn. Thus we can perform the
1241 entire TLB Hit in the (annulled) delay slot of the branch
1242 over the TLB Miss case. */
1244 /* beq,a,pt %[xi]cc, label0 */
1245 label_ptr = s->code_ptr;
1246 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1247 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1249 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1250 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1255 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1256 /* Skip the high-part; we'll perform the extract in the trampoline. */
1259 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1261 /* We use the helpers to extend SB and SW data, leaving the case
1262 of SL needing explicit extending below. */
1263 if ((memop & MO_SSIZE) == MO_SL) {
1264 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1266 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1268 tcg_debug_assert(func != NULL);
1269 tcg_out_call_nodelay(s, func, false);
1271 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1273 /* Recall that all of the helpers return 64-bit results.
1274 Which complicates things for sparcv8plus. */
1276 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1277 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1278 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1280 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1283 if ((memop & MO_SIZE) == MO_64) {
1284 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1285 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1286 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1288 /* Re-extend from 32-bit rather than reassembling when we
1289 know the high register must be an extension. */
1290 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1291 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1293 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
1297 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1299 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1300 unsigned a_bits = get_alignment_bits(memop);
1301 unsigned s_bits = memop & MO_SIZE;
1304 if (SPARC64 && TARGET_LONG_BITS == 32) {
1305 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1310 * Normal case: alignment equal to access size.
1312 if (a_bits == s_bits) {
1313 tcg_out_ldst_rr(s, data, addr, index,
1314 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1319 * Test for at least natural alignment, and assume most accesses
1320 * will be aligned -- perform a straight load in the delay slot.
1321 * This is required to preserve atomicity for aligned accesses.
1323 t_bits = MAX(a_bits, s_bits);
1324 tcg_debug_assert(t_bits < 13);
1325 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1327 /* beq,a,pt %icc, label */
1328 label_ptr = s->code_ptr;
1329 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1331 tcg_out_ldst_rr(s, data, addr, index,
1332 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1334 if (a_bits >= s_bits) {
1336 * Overalignment: A successful alignment test will perform the memory
1337 * operation in the delay slot, and failure need only invoke the
1338 * handler for SIGBUS.
1340 TCGReg arg_low = TCG_REG_O1 + (!SPARC64 && TARGET_LONG_BITS == 64);
1341 tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
1342 /* delay slot -- move to low part of argument reg */
1343 tcg_out_mov_delay(s, arg_low, addr);
1345 /* Underalignment: load by pieces of minimum alignment. */
1346 int ld_opc, a_size, s_size, i;
1349 * Force full address into T1 early; avoids problems with
1350 * overlap between @addr and @data.
1352 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1354 a_size = 1 << a_bits;
1355 s_size = 1 << s_bits;
1356 if ((memop & MO_BSWAP) == MO_BE) {
1357 ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
1358 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1359 ld_opc = qemu_ld_opc[a_bits | MO_BE];
1360 for (i = a_size; i < s_size; i += a_size) {
1361 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1362 tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
1363 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1365 } else if (a_bits == 0) {
1367 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1368 for (i = a_size; i < s_size; i += a_size) {
1369 if ((memop & MO_SIGN) && i == s_size - a_size) {
1372 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1373 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1374 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1377 ld_opc = qemu_ld_opc[a_bits | MO_LE];
1378 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
1379 for (i = a_size; i < s_size; i += a_size) {
1380 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1381 if ((memop & MO_SIGN) && i == s_size - a_size) {
1382 ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
1384 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
1385 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1386 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1391 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1392 #endif /* CONFIG_SOFTMMU */
1395 static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1398 MemOp memop = get_memop(oi);
1399 tcg_insn_unit *label_ptr;
1401 #ifdef CONFIG_SOFTMMU
1402 unsigned memi = get_mmuidx(oi);
1403 TCGReg addrz, param;
1404 const tcg_insn_unit *func;
1406 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1407 offsetof(CPUTLBEntry, addr_write));
1409 /* The fast path is exactly one insn. Thus we can perform the entire
1410 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1411 /* beq,a,pt %[xi]cc, label0 */
1412 label_ptr = s->code_ptr;
1413 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1414 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1416 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1417 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1422 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1423 /* Skip the high-part; we'll perform the extract in the trampoline. */
1426 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1427 if (!SPARC64 && (memop & MO_SIZE) == MO_64) {
1428 /* Skip the high-part; we'll perform the extract in the trampoline. */
1431 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
1433 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1434 tcg_debug_assert(func != NULL);
1435 tcg_out_call_nodelay(s, func, false);
1437 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1439 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1441 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1442 unsigned a_bits = get_alignment_bits(memop);
1443 unsigned s_bits = memop & MO_SIZE;
1446 if (SPARC64 && TARGET_LONG_BITS == 32) {
1447 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1452 * Normal case: alignment equal to access size.
1454 if (a_bits == s_bits) {
1455 tcg_out_ldst_rr(s, data, addr, index,
1456 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1461 * Test for at least natural alignment, and assume most accesses
1462 * will be aligned -- perform a straight store in the delay slot.
1463 * This is required to preserve atomicity for aligned accesses.
1465 t_bits = MAX(a_bits, s_bits);
1466 tcg_debug_assert(t_bits < 13);
1467 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1469 /* beq,a,pt %icc, label */
1470 label_ptr = s->code_ptr;
1471 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1473 tcg_out_ldst_rr(s, data, addr, index,
1474 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1476 if (a_bits >= s_bits) {
1478 * Overalignment: A successful alignment test will perform the memory
1479 * operation in the delay slot, and failure need only invoke the
1480 * handler for SIGBUS.
1482 TCGReg arg_low = TCG_REG_O1 + (!SPARC64 && TARGET_LONG_BITS == 64);
1483 tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
1484 /* delay slot -- move to low part of argument reg */
1485 tcg_out_mov_delay(s, arg_low, addr);
1487 /* Underalignment: store by pieces of minimum alignment. */
1488 int st_opc, a_size, s_size, i;
1491 * Force full address into T1 early; avoids problems with
1492 * overlap between @addr and @data.
1494 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1496 a_size = 1 << a_bits;
1497 s_size = 1 << s_bits;
1498 if ((memop & MO_BSWAP) == MO_BE) {
1499 st_opc = qemu_st_opc[a_bits | MO_BE];
1500 for (i = 0; i < s_size; i += a_size) {
1502 int shift = (s_size - a_size - i) * 8;
1505 tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
1507 tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
1509 } else if (a_bits == 0) {
1510 tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
1511 for (i = 1; i < s_size; i++) {
1512 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1513 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
1516 /* Note that ST*A with immediate asi must use indexed address. */
1517 st_opc = qemu_st_opc[a_bits + MO_LE];
1518 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
1519 for (i = a_size; i < s_size; i += a_size) {
1520 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1521 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1522 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
1527 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1528 #endif /* CONFIG_SOFTMMU */
1531 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1532 const TCGArg args[TCG_MAX_OP_ARGS],
1533 const int const_args[TCG_MAX_OP_ARGS])
1538 /* Hoist the loads of the most common arguments. */
1545 case INDEX_op_exit_tb:
1546 if (check_fit_ptr(a0, 13)) {
1547 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1548 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1550 } else if (USE_REG_TB) {
1551 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1552 if (check_fit_ptr(tb_diff, 13)) {
1553 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1554 /* Note that TCG_REG_TB has been unwound to O1. */
1555 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1559 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1560 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1561 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1563 case INDEX_op_goto_tb:
1564 if (s->tb_jmp_insn_offset) {
1565 /* direct jump method */
1567 /* make sure the patch is 8-byte aligned. */
1568 if ((intptr_t)s->code_ptr & 4) {
1571 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1572 tcg_out_sethi(s, TCG_REG_T1, 0);
1573 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
1574 tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
1575 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1577 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1582 /* indirect jump method */
1583 tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
1584 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1587 set_jmp_reset_offset(s, a0);
1589 /* For the unlinked path of goto_tb, we need to reset
1590 TCG_REG_TB to the beginning of this TB. */
1592 c = -tcg_current_code_size(s);
1593 if (check_fit_i32(c, 13)) {
1594 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
1596 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
1597 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
1598 TCG_REG_T1, ARITH_ADD);
1602 case INDEX_op_goto_ptr:
1603 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1605 tcg_out_mov_delay(s, TCG_REG_TB, a0);
1611 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1615 #define OP_32_64(x) \
1616 glue(glue(case INDEX_op_, x), _i32): \
1617 glue(glue(case INDEX_op_, x), _i64)
1620 tcg_out_ldst(s, a0, a1, a2, LDUB);
1623 tcg_out_ldst(s, a0, a1, a2, LDSB);
1626 tcg_out_ldst(s, a0, a1, a2, LDUH);
1629 tcg_out_ldst(s, a0, a1, a2, LDSH);
1631 case INDEX_op_ld_i32:
1632 case INDEX_op_ld32u_i64:
1633 tcg_out_ldst(s, a0, a1, a2, LDUW);
1636 tcg_out_ldst(s, a0, a1, a2, STB);
1639 tcg_out_ldst(s, a0, a1, a2, STH);
1641 case INDEX_op_st_i32:
1642 case INDEX_op_st32_i64:
1643 tcg_out_ldst(s, a0, a1, a2, STW);
1666 case INDEX_op_shl_i32:
1669 /* Limit immediate shift count lest we create an illegal insn. */
1670 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1672 case INDEX_op_shr_i32:
1675 case INDEX_op_sar_i32:
1678 case INDEX_op_mul_i32:
1689 case INDEX_op_div_i32:
1690 tcg_out_div32(s, a0, a1, a2, c2, 0);
1692 case INDEX_op_divu_i32:
1693 tcg_out_div32(s, a0, a1, a2, c2, 1);
1696 case INDEX_op_brcond_i32:
1697 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1699 case INDEX_op_setcond_i32:
1700 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1702 case INDEX_op_movcond_i32:
1703 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1706 case INDEX_op_add2_i32:
1707 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1708 args[4], const_args[4], args[5], const_args[5],
1709 ARITH_ADDCC, ARITH_ADDC);
1711 case INDEX_op_sub2_i32:
1712 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1713 args[4], const_args[4], args[5], const_args[5],
1714 ARITH_SUBCC, ARITH_SUBC);
1716 case INDEX_op_mulu2_i32:
1719 case INDEX_op_muls2_i32:
1722 /* The 32-bit multiply insns produce a full 64-bit result. If the
1723 destination register can hold it, we can avoid the slower RDY. */
1724 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1725 if (SPARC64 || a0 <= TCG_REG_O7) {
1726 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1732 case INDEX_op_qemu_ld_i32:
1733 tcg_out_qemu_ld(s, a0, a1, a2, false);
1735 case INDEX_op_qemu_ld_i64:
1736 tcg_out_qemu_ld(s, a0, a1, a2, true);
1738 case INDEX_op_qemu_st_i32:
1739 case INDEX_op_qemu_st_i64:
1740 tcg_out_qemu_st(s, a0, a1, a2);
1743 case INDEX_op_ld32s_i64:
1744 tcg_out_ldst(s, a0, a1, a2, LDSW);
1746 case INDEX_op_ld_i64:
1747 tcg_out_ldst(s, a0, a1, a2, LDX);
1749 case INDEX_op_st_i64:
1750 tcg_out_ldst(s, a0, a1, a2, STX);
1752 case INDEX_op_shl_i64:
1755 /* Limit immediate shift count lest we create an illegal insn. */
1756 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1758 case INDEX_op_shr_i64:
1761 case INDEX_op_sar_i64:
1764 case INDEX_op_mul_i64:
1767 case INDEX_op_div_i64:
1770 case INDEX_op_divu_i64:
1773 case INDEX_op_ext_i32_i64:
1774 case INDEX_op_ext32s_i64:
1775 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1777 case INDEX_op_extu_i32_i64:
1778 case INDEX_op_ext32u_i64:
1779 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1781 case INDEX_op_extrl_i64_i32:
1782 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1784 case INDEX_op_extrh_i64_i32:
1785 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1788 case INDEX_op_brcond_i64:
1789 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1791 case INDEX_op_setcond_i64:
1792 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1794 case INDEX_op_movcond_i64:
1795 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1797 case INDEX_op_add2_i64:
1798 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1799 const_args[4], args[5], const_args[5], false);
1801 case INDEX_op_sub2_i64:
1802 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1803 const_args[4], args[5], const_args[5], true);
1805 case INDEX_op_muluh_i64:
1806 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1810 tcg_out_arithc(s, a0, a1, a2, c2, c);
1814 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1821 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1822 case INDEX_op_mov_i64:
1823 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1829 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1832 case INDEX_op_goto_ptr:
1835 case INDEX_op_ld8u_i32:
1836 case INDEX_op_ld8s_i32:
1837 case INDEX_op_ld16u_i32:
1838 case INDEX_op_ld16s_i32:
1839 case INDEX_op_ld_i32:
1840 case INDEX_op_neg_i32:
1841 case INDEX_op_not_i32:
1842 return C_O1_I1(r, r);
1844 case INDEX_op_st8_i32:
1845 case INDEX_op_st16_i32:
1846 case INDEX_op_st_i32:
1847 return C_O0_I2(rZ, r);
1849 case INDEX_op_add_i32:
1850 case INDEX_op_mul_i32:
1851 case INDEX_op_div_i32:
1852 case INDEX_op_divu_i32:
1853 case INDEX_op_sub_i32:
1854 case INDEX_op_and_i32:
1855 case INDEX_op_andc_i32:
1856 case INDEX_op_or_i32:
1857 case INDEX_op_orc_i32:
1858 case INDEX_op_xor_i32:
1859 case INDEX_op_shl_i32:
1860 case INDEX_op_shr_i32:
1861 case INDEX_op_sar_i32:
1862 case INDEX_op_setcond_i32:
1863 return C_O1_I2(r, rZ, rJ);
1865 case INDEX_op_brcond_i32:
1866 return C_O0_I2(rZ, rJ);
1867 case INDEX_op_movcond_i32:
1868 return C_O1_I4(r, rZ, rJ, rI, 0);
1869 case INDEX_op_add2_i32:
1870 case INDEX_op_sub2_i32:
1871 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
1872 case INDEX_op_mulu2_i32:
1873 case INDEX_op_muls2_i32:
1874 return C_O2_I2(r, r, rZ, rJ);
1876 case INDEX_op_ld8u_i64:
1877 case INDEX_op_ld8s_i64:
1878 case INDEX_op_ld16u_i64:
1879 case INDEX_op_ld16s_i64:
1880 case INDEX_op_ld32u_i64:
1881 case INDEX_op_ld32s_i64:
1882 case INDEX_op_ld_i64:
1883 case INDEX_op_ext_i32_i64:
1884 case INDEX_op_extu_i32_i64:
1885 return C_O1_I1(R, r);
1887 case INDEX_op_st8_i64:
1888 case INDEX_op_st16_i64:
1889 case INDEX_op_st32_i64:
1890 case INDEX_op_st_i64:
1891 return C_O0_I2(RZ, r);
1893 case INDEX_op_add_i64:
1894 case INDEX_op_mul_i64:
1895 case INDEX_op_div_i64:
1896 case INDEX_op_divu_i64:
1897 case INDEX_op_sub_i64:
1898 case INDEX_op_and_i64:
1899 case INDEX_op_andc_i64:
1900 case INDEX_op_or_i64:
1901 case INDEX_op_orc_i64:
1902 case INDEX_op_xor_i64:
1903 case INDEX_op_shl_i64:
1904 case INDEX_op_shr_i64:
1905 case INDEX_op_sar_i64:
1906 case INDEX_op_setcond_i64:
1907 return C_O1_I2(R, RZ, RJ);
1909 case INDEX_op_neg_i64:
1910 case INDEX_op_not_i64:
1911 case INDEX_op_ext32s_i64:
1912 case INDEX_op_ext32u_i64:
1913 return C_O1_I1(R, R);
1915 case INDEX_op_extrl_i64_i32:
1916 case INDEX_op_extrh_i64_i32:
1917 return C_O1_I1(r, R);
1919 case INDEX_op_brcond_i64:
1920 return C_O0_I2(RZ, RJ);
1921 case INDEX_op_movcond_i64:
1922 return C_O1_I4(R, RZ, RJ, RI, 0);
1923 case INDEX_op_add2_i64:
1924 case INDEX_op_sub2_i64:
1925 return C_O2_I4(R, R, RZ, RZ, RJ, RI);
1926 case INDEX_op_muluh_i64:
1927 return C_O1_I2(R, R, R);
1929 case INDEX_op_qemu_ld_i32:
1930 return C_O1_I1(r, A);
1931 case INDEX_op_qemu_ld_i64:
1932 return C_O1_I1(R, A);
1933 case INDEX_op_qemu_st_i32:
1934 return C_O0_I2(sZ, A);
1935 case INDEX_op_qemu_st_i64:
1936 return C_O0_I2(SZ, A);
1939 g_assert_not_reached();
1943 static void tcg_target_init(TCGContext *s)
1946 * Only probe for the platform and capabilities if we haven't already
1947 * determined maximum values at compile time.
1949 #ifndef use_vis3_instructions
1951 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1952 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1956 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1957 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS64;
1959 tcg_target_call_clobber_regs = 0;
1960 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1961 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1962 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1963 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1964 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1965 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1966 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1967 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1968 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1969 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1970 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1971 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1972 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1973 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1974 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1976 s->reserved_regs = 0;
1977 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1978 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1979 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1980 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1981 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1982 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1983 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1984 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1988 # define ELF_HOST_MACHINE EM_SPARCV9
1990 # define ELF_HOST_MACHINE EM_SPARC32PLUS
1991 # define ELF_HOST_FLAGS EF_SPARC_32PLUS
1996 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
1997 uint8_t fde_win_save;
1998 uint8_t fde_ret_save[3];
2001 static const DebugFrame debug_frame = {
2002 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2005 .h.cie.code_align = 1,
2006 .h.cie.data_align = -sizeof(void *) & 0x7f,
2007 .h.cie.return_column = 15, /* o7 */
2009 /* Total FDE size does not include the "len" member. */
2010 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2014 12, 30, /* DW_CFA_def_cfa i6, 2047 */
2015 (2047 & 0x7f) | 0x80, (2047 >> 7)
2017 13, 30 /* DW_CFA_def_cfa_register i6 */
2020 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
2021 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
2024 void tcg_register_jit(const void *buf, size_t buf_size)
2026 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2029 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
2030 uintptr_t jmp_rw, uintptr_t addr)
2032 intptr_t tb_disp = addr - tc_ptr;
2033 intptr_t br_disp = addr - jmp_rx;
2034 tcg_insn_unit i1, i2;
2036 /* We can reach the entire address space for ILP32.
2037 For LP64, the code_gen_buffer can't be larger than 2GB. */
2038 tcg_debug_assert(tb_disp == (int32_t)tb_disp);
2039 tcg_debug_assert(br_disp == (int32_t)br_disp);
2042 qatomic_set((uint32_t *)jmp_rw,
2043 deposit32(CALL, 0, 30, br_disp >> 2));
2044 flush_idcache_range(jmp_rx, jmp_rw, 4);
2048 /* This does not exercise the range of the branch, but we do
2049 still need to be able to load the new value of TCG_REG_TB.
2050 But this does still happen quite often. */
2051 if (check_fit_ptr(tb_disp, 13)) {
2052 /* ba,pt %icc, addr */
2053 i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
2054 | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
2055 i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
2056 | INSN_IMM13(tb_disp));
2057 } else if (tb_disp >= 0) {
2058 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
2059 i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
2060 | INSN_IMM13(tb_disp & 0x3ff));
2062 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
2063 i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
2064 | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
2067 qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
2068 flush_idcache_range(jmp_rx, jmp_rw, 8);