2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "../tcg-pool.c.inc"
27 #ifdef CONFIG_DEBUG_TCG
28 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
70 #define TCG_CT_CONST_S11 0x100
71 #define TCG_CT_CONST_S13 0x200
72 #define TCG_CT_CONST_ZERO 0x400
75 * For softmmu, we need to avoid conflicts with the first 3
76 * argument registers to perform the tlb lookup, and to call
77 * the helper function.
80 #define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
82 #define SOFTMMU_RESERVE_REGS 0
86 * Note that sparcv8plus can only hold 64 bit quantities in %g and %o
87 * registers. These are saved manually by the kernel in full 64-bit
88 * slots. The %i and %l registers are saved by the register window
89 * mechanism, which only allocates space for 32 bits. Given that this
90 * window spill/fill can happen on any signal, we must consider the
91 * high bits of the %i and %l registers garbage at all times.
93 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
95 # define ALL_GENERAL_REGS64 ALL_GENERAL_REGS
97 # define ALL_GENERAL_REGS64 MAKE_64BIT_MASK(0, 16)
99 #define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
100 #define ALL_QLDST_REGS64 (ALL_GENERAL_REGS64 & ~SOFTMMU_RESERVE_REGS)
102 /* Define some temporary registers. T2 is used for constant generation. */
103 #define TCG_REG_T1 TCG_REG_G1
104 #define TCG_REG_T2 TCG_REG_O7
106 #ifndef CONFIG_SOFTMMU
107 # define TCG_GUEST_BASE_REG TCG_REG_I5
110 #define TCG_REG_TB TCG_REG_I1
111 #define USE_REG_TB (sizeof(void *) > 4)
113 static const int tcg_target_reg_alloc_order[] = {
143 static const int tcg_target_call_iarg_regs[6] = {
152 static const int tcg_target_call_oarg_regs[] = {
159 #define INSN_OP(x) ((x) << 30)
160 #define INSN_OP2(x) ((x) << 22)
161 #define INSN_OP3(x) ((x) << 19)
162 #define INSN_OPF(x) ((x) << 5)
163 #define INSN_RD(x) ((x) << 25)
164 #define INSN_RS1(x) ((x) << 14)
165 #define INSN_RS2(x) (x)
166 #define INSN_ASI(x) ((x) << 5)
168 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
169 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
170 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
171 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
172 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
173 #define INSN_COND(x) ((x) << 25)
191 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
200 #define MOVCC_ICC (1 << 18)
201 #define MOVCC_XCC (1 << 18 | 1 << 12)
204 #define BPCC_XCC (2 << 20)
205 #define BPCC_PT (1 << 19)
207 #define BPCC_A (1 << 29)
209 #define BPR_PT BPCC_PT
211 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
212 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
213 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
214 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
215 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
216 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
217 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
218 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
219 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
220 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
221 #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
222 #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
223 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
224 #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
225 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
226 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
227 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
228 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
229 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
230 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
231 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
233 #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
234 #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
236 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
237 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
238 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
240 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
241 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
242 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
244 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
245 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
246 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
247 #define RETURN (INSN_OP(2) | INSN_OP3(0x39))
248 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
249 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
250 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
251 #define CALL INSN_OP(1)
252 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
253 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
254 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
255 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
256 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
257 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
258 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
259 #define STB (INSN_OP(3) | INSN_OP3(0x05))
260 #define STH (INSN_OP(3) | INSN_OP3(0x06))
261 #define STW (INSN_OP(3) | INSN_OP3(0x04))
262 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
263 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
264 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
265 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
266 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
267 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
268 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
269 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
270 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
271 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
272 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
273 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
275 #define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
277 #define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
279 #ifndef ASI_PRIMARY_LITTLE
280 #define ASI_PRIMARY_LITTLE 0x88
283 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
284 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
285 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
286 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
287 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
289 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
290 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
291 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
293 #ifndef use_vis3_instructions
294 bool use_vis3_instructions;
297 static inline int check_fit_i64(int64_t val, unsigned int bits)
299 return val == sextract64(val, 0, bits);
302 static inline int check_fit_i32(int32_t val, unsigned int bits)
304 return val == sextract32(val, 0, bits);
307 #define check_fit_tl check_fit_i64
309 # define check_fit_ptr check_fit_i64
311 # define check_fit_ptr check_fit_i32
314 static bool patch_reloc(tcg_insn_unit *src_rw, int type,
315 intptr_t value, intptr_t addend)
317 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
318 uint32_t insn = *src_rw;
322 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
325 case R_SPARC_WDISP16:
326 assert(check_fit_ptr(pcrel >> 2, 16));
327 insn &= ~INSN_OFF16(-1);
328 insn |= INSN_OFF16(pcrel);
330 case R_SPARC_WDISP19:
331 assert(check_fit_ptr(pcrel >> 2, 19));
332 insn &= ~INSN_OFF19(-1);
333 insn |= INSN_OFF19(pcrel);
336 g_assert_not_reached();
343 /* test if a constant matches the constraint */
344 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
345 const TCGArgConstraint *arg_ct)
349 if (ct & TCG_CT_CONST) {
353 if (type == TCG_TYPE_I32) {
357 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
359 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
361 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
368 static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
371 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
374 static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
375 int32_t offset, int op)
377 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
380 static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
381 int32_t val2, int val2const, int op)
383 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
384 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
387 static inline bool tcg_out_mov(TCGContext *s, TCGType type,
388 TCGReg ret, TCGReg arg)
391 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
396 static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
398 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
401 static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
403 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
406 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
407 tcg_target_long arg, bool in_prologue)
409 tcg_target_long hi, lo = (int32_t)arg;
410 tcg_target_long test, lsb;
412 /* Make sure we test 32-bit constants for imm13 properly. */
413 if (type == TCG_TYPE_I32) {
417 /* A 13-bit constant sign-extended to 64-bits. */
418 if (check_fit_tl(arg, 13)) {
419 tcg_out_movi_imm13(s, ret, arg);
423 /* A 13-bit constant relative to the TB. */
424 if (!in_prologue && USE_REG_TB) {
425 test = tcg_tbrel_diff(s, (void *)arg);
426 if (check_fit_ptr(test, 13)) {
427 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
432 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
433 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
434 tcg_out_sethi(s, ret, arg);
436 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
441 /* A 32-bit constant sign-extended to 64-bits. */
443 tcg_out_sethi(s, ret, ~arg);
444 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
448 /* A 21-bit constant, shifted. */
450 test = (tcg_target_long)arg >> lsb;
451 if (check_fit_tl(test, 13)) {
452 tcg_out_movi_imm13(s, ret, test);
453 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
455 } else if (lsb > 10 && test == extract64(test, 0, 21)) {
456 tcg_out_sethi(s, ret, test << 10);
457 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
461 /* A 64-bit constant decomposed into 2 32-bit pieces. */
462 if (check_fit_i32(lo, 13)) {
463 hi = (arg - lo) >> 32;
464 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
465 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
466 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
469 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
470 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
471 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
472 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
476 static inline void tcg_out_movi(TCGContext *s, TCGType type,
477 TCGReg ret, tcg_target_long arg)
479 tcg_out_movi_int(s, type, ret, arg, false);
482 static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
485 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
488 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
489 intptr_t offset, int op)
491 if (check_fit_ptr(offset, 13)) {
492 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
495 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
496 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
500 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
501 TCGReg arg1, intptr_t arg2)
503 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
506 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
507 TCGReg arg1, intptr_t arg2)
509 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
512 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
513 TCGReg base, intptr_t ofs)
516 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
522 static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg)
524 intptr_t diff = tcg_tbrel_diff(s, arg);
525 if (USE_REG_TB && check_fit_ptr(diff, 13)) {
526 tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
529 tcg_out_movi(s, TCG_TYPE_PTR, ret, (uintptr_t)arg & ~0x3ff);
530 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff);
533 static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
535 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
538 static inline void tcg_out_rdy(TCGContext *s, TCGReg rd)
540 tcg_out32(s, RDY | INSN_RD(rd));
543 static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
544 int32_t val2, int val2const, int uns)
546 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
548 tcg_out_sety(s, TCG_REG_G0);
550 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
551 tcg_out_sety(s, TCG_REG_T1);
554 tcg_out_arithc(s, rd, rs1, val2, val2const,
555 uns ? ARITH_UDIV : ARITH_SDIV);
558 static inline void tcg_out_nop(TCGContext *s)
563 static const uint8_t tcg_cond_to_bcond[] = {
564 [TCG_COND_EQ] = COND_E,
565 [TCG_COND_NE] = COND_NE,
566 [TCG_COND_LT] = COND_L,
567 [TCG_COND_GE] = COND_GE,
568 [TCG_COND_LE] = COND_LE,
569 [TCG_COND_GT] = COND_G,
570 [TCG_COND_LTU] = COND_CS,
571 [TCG_COND_GEU] = COND_CC,
572 [TCG_COND_LEU] = COND_LEU,
573 [TCG_COND_GTU] = COND_GU,
576 static const uint8_t tcg_cond_to_rcond[] = {
577 [TCG_COND_EQ] = RCOND_Z,
578 [TCG_COND_NE] = RCOND_NZ,
579 [TCG_COND_LT] = RCOND_LZ,
580 [TCG_COND_GT] = RCOND_GZ,
581 [TCG_COND_LE] = RCOND_LEZ,
582 [TCG_COND_GE] = RCOND_GEZ
585 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
587 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
590 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
595 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
597 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
599 tcg_out_bpcc0(s, scond, flags, off19);
602 static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
604 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
607 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
608 int32_t arg2, int const_arg2, TCGLabel *l)
610 tcg_out_cmp(s, arg1, arg2, const_arg2);
611 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
615 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
616 int32_t v1, int v1const)
618 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
619 | INSN_RS1(tcg_cond_to_bcond[cond])
620 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
623 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
624 TCGReg c1, int32_t c2, int c2const,
625 int32_t v1, int v1const)
627 tcg_out_cmp(s, c1, c2, c2const);
628 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
631 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
632 int32_t arg2, int const_arg2, TCGLabel *l)
634 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
635 if (arg2 == 0 && !is_unsigned_cond(cond)) {
639 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
641 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
643 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
644 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
646 tcg_out_cmp(s, arg1, arg2, const_arg2);
647 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
652 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
653 int32_t v1, int v1const)
655 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
656 | (tcg_cond_to_rcond[cond] << 10)
657 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
660 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
661 TCGReg c1, int32_t c2, int c2const,
662 int32_t v1, int v1const)
664 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
665 Note that the immediate range is one bit smaller, so we must check
667 if (c2 == 0 && !is_unsigned_cond(cond)
668 && (!v1const || check_fit_i32(v1, 10))) {
669 tcg_out_movr(s, cond, ret, c1, v1, v1const);
671 tcg_out_cmp(s, c1, c2, c2const);
672 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
676 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
677 TCGReg c1, int32_t c2, int c2const)
679 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
683 /* The result of the comparison is in the carry bit. */
688 /* For equality, we can transform to inequality vs zero. */
690 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
695 c1 = TCG_REG_G0, c2const = 0;
696 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
701 /* If we don't need to load a constant into a register, we can
702 swap the operands on GTU/LEU. There's no benefit to loading
703 the constant into a temporary register. */
704 if (!c2const || c2 == 0) {
709 cond = tcg_swap_cond(cond);
715 tcg_out_cmp(s, c1, c2, c2const);
716 tcg_out_movi_imm13(s, ret, 0);
717 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
721 tcg_out_cmp(s, c1, c2, c2const);
722 if (cond == TCG_COND_LTU) {
723 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
725 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
729 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
730 TCGReg c1, int32_t c2, int c2const)
732 if (use_vis3_instructions) {
738 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
741 tcg_out_cmp(s, c1, c2, c2const);
742 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
749 /* For 64-bit signed comparisons vs zero, we can avoid the compare
750 if the input does not overlap the output. */
751 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
752 tcg_out_movi_imm13(s, ret, 0);
753 tcg_out_movr(s, cond, ret, c1, 1, 1);
755 tcg_out_cmp(s, c1, c2, c2const);
756 tcg_out_movi_imm13(s, ret, 0);
757 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
761 static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
762 TCGReg al, TCGReg ah, int32_t bl, int blconst,
763 int32_t bh, int bhconst, int opl, int oph)
765 TCGReg tmp = TCG_REG_T1;
767 /* Note that the low parts are fully consumed before tmp is set. */
768 if (rl != ah && (bhconst || rl != bh)) {
772 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
773 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
774 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
777 static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
778 TCGReg al, TCGReg ah, int32_t bl, int blconst,
779 int32_t bh, int bhconst, bool is_sub)
781 TCGReg tmp = TCG_REG_T1;
783 /* Note that the low parts are fully consumed before tmp is set. */
784 if (rl != ah && (bhconst || rl != bh)) {
788 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
790 if (use_vis3_instructions && !is_sub) {
791 /* Note that ADDXC doesn't accept immediates. */
792 if (bhconst && bh != 0) {
793 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh);
796 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
797 } else if (bh == TCG_REG_G0) {
798 /* If we have a zero, we can perform the operation in two insns,
799 with the arithmetic first, and a conditional move into place. */
801 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
802 is_sub ? ARITH_SUB : ARITH_ADD);
803 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
805 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
806 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
809 /* Otherwise adjust BH as if there is carry into T2 ... */
811 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
813 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
814 is_sub ? ARITH_SUB : ARITH_ADD);
816 /* ... smoosh T2 back to original BH if carry is clear ... */
817 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
818 /* ... and finally perform the arithmetic with the new operand. */
819 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
822 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
825 static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
828 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
830 if (disp == (int32_t)disp) {
831 tcg_out32(s, CALL | (uint32_t)disp >> 2);
833 uintptr_t desti = (uintptr_t)dest;
834 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
835 desti & ~0xfff, in_prologue);
836 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
840 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
842 tcg_out_call_nodelay(s, dest, false);
846 static void tcg_out_mb(TCGContext *s, TCGArg a0)
848 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
849 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
852 #ifdef CONFIG_SOFTMMU
853 static const tcg_insn_unit *qemu_ld_trampoline[16];
854 static const tcg_insn_unit *qemu_st_trampoline[16];
856 static void emit_extend(TCGContext *s, TCGReg r, int op)
858 /* Emit zero extend of 8, 16 or 32 bit data as
859 * required by the MO_* value op; do nothing for 64 bit.
861 switch (op & MO_SIZE) {
863 tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
866 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
867 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
871 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
879 static void build_trampolines(TCGContext *s)
881 static void * const qemu_ld_helpers[16] = {
882 [MO_UB] = helper_ret_ldub_mmu,
883 [MO_SB] = helper_ret_ldsb_mmu,
884 [MO_LEUW] = helper_le_lduw_mmu,
885 [MO_LESW] = helper_le_ldsw_mmu,
886 [MO_LEUL] = helper_le_ldul_mmu,
887 [MO_LEQ] = helper_le_ldq_mmu,
888 [MO_BEUW] = helper_be_lduw_mmu,
889 [MO_BESW] = helper_be_ldsw_mmu,
890 [MO_BEUL] = helper_be_ldul_mmu,
891 [MO_BEQ] = helper_be_ldq_mmu,
893 static void * const qemu_st_helpers[16] = {
894 [MO_UB] = helper_ret_stb_mmu,
895 [MO_LEUW] = helper_le_stw_mmu,
896 [MO_LEUL] = helper_le_stl_mmu,
897 [MO_LEQ] = helper_le_stq_mmu,
898 [MO_BEUW] = helper_be_stw_mmu,
899 [MO_BEUL] = helper_be_stl_mmu,
900 [MO_BEQ] = helper_be_stq_mmu,
906 for (i = 0; i < 16; ++i) {
907 if (qemu_ld_helpers[i] == NULL) {
911 /* May as well align the trampoline. */
912 while ((uintptr_t)s->code_ptr & 15) {
915 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
917 if (SPARC64 || TARGET_LONG_BITS == 32) {
920 /* Install the high part of the address. */
921 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
925 /* Set the retaddr operand. */
926 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
927 /* Set the env operand. */
928 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
930 tcg_out_call_nodelay(s, qemu_ld_helpers[i], true);
931 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
934 for (i = 0; i < 16; ++i) {
935 if (qemu_st_helpers[i] == NULL) {
939 /* May as well align the trampoline. */
940 while ((uintptr_t)s->code_ptr & 15) {
943 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
946 emit_extend(s, TCG_REG_O2, i);
950 if (TARGET_LONG_BITS == 64) {
951 /* Install the high part of the address. */
952 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
957 if ((i & MO_SIZE) == MO_64) {
958 /* Install the high part of the data. */
959 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
962 emit_extend(s, ra, i);
965 /* Skip the oi argument. */
969 /* Set the retaddr operand. */
970 if (ra >= TCG_REG_O6) {
971 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
972 TCG_TARGET_CALL_STACK_OFFSET);
975 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
976 /* Set the env operand. */
977 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
979 tcg_out_call_nodelay(s, qemu_st_helpers[i], true);
980 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
985 /* Generate global QEMU prologue and epilogue code */
986 static void tcg_target_qemu_prologue(TCGContext *s)
988 int tmp_buf_size, frame_size;
990 /* The TCG temp buffer is at the top of the frame, immediately
991 below the frame pointer. */
992 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
993 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
996 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
997 otherwise the minimal frame usable by callees. */
998 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
999 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1000 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1001 frame_size &= -TCG_TARGET_STACK_ALIGN;
1002 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
1003 INSN_IMM13(-frame_size));
1005 #ifndef CONFIG_SOFTMMU
1006 if (guest_base != 0) {
1007 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
1008 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1012 /* We choose TCG_REG_TB such that no move is required. */
1014 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1015 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1018 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1022 /* Epilogue for goto_ptr. */
1023 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1024 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1026 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
1028 #ifdef CONFIG_SOFTMMU
1029 build_trampolines(s);
1033 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1036 for (i = 0; i < count; ++i) {
1041 #if defined(CONFIG_SOFTMMU)
1043 /* We expect to use a 13-bit negative offset from ENV. */
1044 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1045 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1047 /* Perform the TLB load and compare.
1050 ADDRLO and ADDRHI contain the possible two parts of the address.
1052 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1054 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1055 This should be offsetof addr_read or addr_write.
1057 The result of the TLB comparison is in %[ix]cc. The sanitized address
1058 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1060 static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1061 MemOp opc, int which)
1063 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1064 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1065 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1066 const TCGReg r0 = TCG_REG_O0;
1067 const TCGReg r1 = TCG_REG_O1;
1068 const TCGReg r2 = TCG_REG_O2;
1069 unsigned s_bits = opc & MO_SIZE;
1070 unsigned a_bits = get_alignment_bits(opc);
1071 tcg_target_long compare_mask;
1073 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1074 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1075 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
1077 /* Extract the page index, shifted into place for tlb index. */
1078 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1080 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1082 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1083 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1085 /* Load the tlb comparator and the addend. */
1086 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1087 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
1089 /* Mask out the page offset, except for the required alignment.
1090 We don't support unaligned accesses. */
1091 if (a_bits < s_bits) {
1094 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1095 if (check_fit_tl(compare_mask, 13)) {
1096 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1098 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1099 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
1101 tcg_out_cmp(s, r0, r2, 0);
1103 /* If the guest address must be zero-extended, do so now. */
1104 if (SPARC64 && TARGET_LONG_BITS == 32) {
1105 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
1110 #endif /* CONFIG_SOFTMMU */
1112 static const int qemu_ld_opc[16] = {
1122 [MO_LEUW] = LDUH_LE,
1123 [MO_LESW] = LDSH_LE,
1124 [MO_LEUL] = LDUW_LE,
1125 [MO_LESL] = LDSW_LE,
1129 static const int qemu_st_opc[16] = {
1141 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1142 TCGMemOpIdx oi, bool is_64)
1144 MemOp memop = get_memop(oi);
1145 #ifdef CONFIG_SOFTMMU
1146 unsigned memi = get_mmuidx(oi);
1147 TCGReg addrz, param;
1148 const tcg_insn_unit *func;
1149 tcg_insn_unit *label_ptr;
1151 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1152 offsetof(CPUTLBEntry, addr_read));
1154 /* The fast path is exactly one insn. Thus we can perform the
1155 entire TLB Hit in the (annulled) delay slot of the branch
1156 over the TLB Miss case. */
1158 /* beq,a,pt %[xi]cc, label0 */
1159 label_ptr = s->code_ptr;
1160 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1161 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1163 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1164 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1169 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1170 /* Skip the high-part; we'll perform the extract in the trampoline. */
1173 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1175 /* We use the helpers to extend SB and SW data, leaving the case
1176 of SL needing explicit extending below. */
1177 if ((memop & MO_SSIZE) == MO_SL) {
1178 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1180 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1182 tcg_debug_assert(func != NULL);
1183 tcg_out_call_nodelay(s, func, false);
1185 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1187 /* Recall that all of the helpers return 64-bit results.
1188 Which complicates things for sparcv8plus. */
1190 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1191 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1192 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1194 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1197 if ((memop & MO_SIZE) == MO_64) {
1198 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1199 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1200 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1202 /* Re-extend from 32-bit rather than reassembling when we
1203 know the high register must be an extension. */
1204 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1205 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1207 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
1211 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1213 if (SPARC64 && TARGET_LONG_BITS == 32) {
1214 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1217 tcg_out_ldst_rr(s, data, addr,
1218 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1219 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1220 #endif /* CONFIG_SOFTMMU */
1223 static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1226 MemOp memop = get_memop(oi);
1227 #ifdef CONFIG_SOFTMMU
1228 unsigned memi = get_mmuidx(oi);
1229 TCGReg addrz, param;
1230 const tcg_insn_unit *func;
1231 tcg_insn_unit *label_ptr;
1233 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1234 offsetof(CPUTLBEntry, addr_write));
1236 /* The fast path is exactly one insn. Thus we can perform the entire
1237 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1238 /* beq,a,pt %[xi]cc, label0 */
1239 label_ptr = s->code_ptr;
1240 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1241 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1243 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1244 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1249 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1250 /* Skip the high-part; we'll perform the extract in the trampoline. */
1253 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1254 if (!SPARC64 && (memop & MO_SIZE) == MO_64) {
1255 /* Skip the high-part; we'll perform the extract in the trampoline. */
1258 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
1260 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1261 tcg_debug_assert(func != NULL);
1262 tcg_out_call_nodelay(s, func, false);
1264 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1266 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1268 if (SPARC64 && TARGET_LONG_BITS == 32) {
1269 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1272 tcg_out_ldst_rr(s, data, addr,
1273 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1274 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1275 #endif /* CONFIG_SOFTMMU */
1278 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1279 const TCGArg args[TCG_MAX_OP_ARGS],
1280 const int const_args[TCG_MAX_OP_ARGS])
1285 /* Hoist the loads of the most common arguments. */
1292 case INDEX_op_exit_tb:
1293 if (check_fit_ptr(a0, 13)) {
1294 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1295 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1297 } else if (USE_REG_TB) {
1298 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1299 if (check_fit_ptr(tb_diff, 13)) {
1300 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1301 /* Note that TCG_REG_TB has been unwound to O1. */
1302 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1306 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1307 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1308 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1310 case INDEX_op_goto_tb:
1311 if (s->tb_jmp_insn_offset) {
1312 /* direct jump method */
1314 /* make sure the patch is 8-byte aligned. */
1315 if ((intptr_t)s->code_ptr & 4) {
1318 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1319 tcg_out_sethi(s, TCG_REG_T1, 0);
1320 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
1321 tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
1322 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1324 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1329 /* indirect jump method */
1330 tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
1331 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1334 set_jmp_reset_offset(s, a0);
1336 /* For the unlinked path of goto_tb, we need to reset
1337 TCG_REG_TB to the beginning of this TB. */
1339 c = -tcg_current_code_size(s);
1340 if (check_fit_i32(c, 13)) {
1341 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
1343 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
1344 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
1345 TCG_REG_T1, ARITH_ADD);
1349 case INDEX_op_goto_ptr:
1350 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1352 tcg_out_arith(s, TCG_REG_TB, a0, TCG_REG_G0, ARITH_OR);
1358 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1362 #define OP_32_64(x) \
1363 glue(glue(case INDEX_op_, x), _i32): \
1364 glue(glue(case INDEX_op_, x), _i64)
1367 tcg_out_ldst(s, a0, a1, a2, LDUB);
1370 tcg_out_ldst(s, a0, a1, a2, LDSB);
1373 tcg_out_ldst(s, a0, a1, a2, LDUH);
1376 tcg_out_ldst(s, a0, a1, a2, LDSH);
1378 case INDEX_op_ld_i32:
1379 case INDEX_op_ld32u_i64:
1380 tcg_out_ldst(s, a0, a1, a2, LDUW);
1383 tcg_out_ldst(s, a0, a1, a2, STB);
1386 tcg_out_ldst(s, a0, a1, a2, STH);
1388 case INDEX_op_st_i32:
1389 case INDEX_op_st32_i64:
1390 tcg_out_ldst(s, a0, a1, a2, STW);
1413 case INDEX_op_shl_i32:
1416 /* Limit immediate shift count lest we create an illegal insn. */
1417 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1419 case INDEX_op_shr_i32:
1422 case INDEX_op_sar_i32:
1425 case INDEX_op_mul_i32:
1436 case INDEX_op_div_i32:
1437 tcg_out_div32(s, a0, a1, a2, c2, 0);
1439 case INDEX_op_divu_i32:
1440 tcg_out_div32(s, a0, a1, a2, c2, 1);
1443 case INDEX_op_brcond_i32:
1444 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1446 case INDEX_op_setcond_i32:
1447 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1449 case INDEX_op_movcond_i32:
1450 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1453 case INDEX_op_add2_i32:
1454 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1455 args[4], const_args[4], args[5], const_args[5],
1456 ARITH_ADDCC, ARITH_ADDC);
1458 case INDEX_op_sub2_i32:
1459 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1460 args[4], const_args[4], args[5], const_args[5],
1461 ARITH_SUBCC, ARITH_SUBC);
1463 case INDEX_op_mulu2_i32:
1466 case INDEX_op_muls2_i32:
1469 /* The 32-bit multiply insns produce a full 64-bit result. If the
1470 destination register can hold it, we can avoid the slower RDY. */
1471 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1472 if (SPARC64 || a0 <= TCG_REG_O7) {
1473 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1479 case INDEX_op_qemu_ld_i32:
1480 tcg_out_qemu_ld(s, a0, a1, a2, false);
1482 case INDEX_op_qemu_ld_i64:
1483 tcg_out_qemu_ld(s, a0, a1, a2, true);
1485 case INDEX_op_qemu_st_i32:
1486 case INDEX_op_qemu_st_i64:
1487 tcg_out_qemu_st(s, a0, a1, a2);
1490 case INDEX_op_ld32s_i64:
1491 tcg_out_ldst(s, a0, a1, a2, LDSW);
1493 case INDEX_op_ld_i64:
1494 tcg_out_ldst(s, a0, a1, a2, LDX);
1496 case INDEX_op_st_i64:
1497 tcg_out_ldst(s, a0, a1, a2, STX);
1499 case INDEX_op_shl_i64:
1502 /* Limit immediate shift count lest we create an illegal insn. */
1503 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1505 case INDEX_op_shr_i64:
1508 case INDEX_op_sar_i64:
1511 case INDEX_op_mul_i64:
1514 case INDEX_op_div_i64:
1517 case INDEX_op_divu_i64:
1520 case INDEX_op_ext_i32_i64:
1521 case INDEX_op_ext32s_i64:
1522 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1524 case INDEX_op_extu_i32_i64:
1525 case INDEX_op_ext32u_i64:
1526 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1528 case INDEX_op_extrl_i64_i32:
1529 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1531 case INDEX_op_extrh_i64_i32:
1532 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1535 case INDEX_op_brcond_i64:
1536 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1538 case INDEX_op_setcond_i64:
1539 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1541 case INDEX_op_movcond_i64:
1542 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1544 case INDEX_op_add2_i64:
1545 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1546 const_args[4], args[5], const_args[5], false);
1548 case INDEX_op_sub2_i64:
1549 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1550 const_args[4], args[5], const_args[5], true);
1552 case INDEX_op_muluh_i64:
1553 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1557 tcg_out_arithc(s, a0, a1, a2, c2, c);
1561 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1568 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1569 case INDEX_op_mov_i64:
1570 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1576 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1579 case INDEX_op_goto_ptr:
1582 case INDEX_op_ld8u_i32:
1583 case INDEX_op_ld8s_i32:
1584 case INDEX_op_ld16u_i32:
1585 case INDEX_op_ld16s_i32:
1586 case INDEX_op_ld_i32:
1587 case INDEX_op_neg_i32:
1588 case INDEX_op_not_i32:
1589 return C_O1_I1(r, r);
1591 case INDEX_op_st8_i32:
1592 case INDEX_op_st16_i32:
1593 case INDEX_op_st_i32:
1594 return C_O0_I2(rZ, r);
1596 case INDEX_op_add_i32:
1597 case INDEX_op_mul_i32:
1598 case INDEX_op_div_i32:
1599 case INDEX_op_divu_i32:
1600 case INDEX_op_sub_i32:
1601 case INDEX_op_and_i32:
1602 case INDEX_op_andc_i32:
1603 case INDEX_op_or_i32:
1604 case INDEX_op_orc_i32:
1605 case INDEX_op_xor_i32:
1606 case INDEX_op_shl_i32:
1607 case INDEX_op_shr_i32:
1608 case INDEX_op_sar_i32:
1609 case INDEX_op_setcond_i32:
1610 return C_O1_I2(r, rZ, rJ);
1612 case INDEX_op_brcond_i32:
1613 return C_O0_I2(rZ, rJ);
1614 case INDEX_op_movcond_i32:
1615 return C_O1_I4(r, rZ, rJ, rI, 0);
1616 case INDEX_op_add2_i32:
1617 case INDEX_op_sub2_i32:
1618 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
1619 case INDEX_op_mulu2_i32:
1620 case INDEX_op_muls2_i32:
1621 return C_O2_I2(r, r, rZ, rJ);
1623 case INDEX_op_ld8u_i64:
1624 case INDEX_op_ld8s_i64:
1625 case INDEX_op_ld16u_i64:
1626 case INDEX_op_ld16s_i64:
1627 case INDEX_op_ld32u_i64:
1628 case INDEX_op_ld32s_i64:
1629 case INDEX_op_ld_i64:
1630 case INDEX_op_ext_i32_i64:
1631 case INDEX_op_extu_i32_i64:
1632 return C_O1_I1(R, r);
1634 case INDEX_op_st8_i64:
1635 case INDEX_op_st16_i64:
1636 case INDEX_op_st32_i64:
1637 case INDEX_op_st_i64:
1638 return C_O0_I2(RZ, r);
1640 case INDEX_op_add_i64:
1641 case INDEX_op_mul_i64:
1642 case INDEX_op_div_i64:
1643 case INDEX_op_divu_i64:
1644 case INDEX_op_sub_i64:
1645 case INDEX_op_and_i64:
1646 case INDEX_op_andc_i64:
1647 case INDEX_op_or_i64:
1648 case INDEX_op_orc_i64:
1649 case INDEX_op_xor_i64:
1650 case INDEX_op_shl_i64:
1651 case INDEX_op_shr_i64:
1652 case INDEX_op_sar_i64:
1653 case INDEX_op_setcond_i64:
1654 return C_O1_I2(R, RZ, RJ);
1656 case INDEX_op_neg_i64:
1657 case INDEX_op_not_i64:
1658 case INDEX_op_ext32s_i64:
1659 case INDEX_op_ext32u_i64:
1660 return C_O1_I1(R, R);
1662 case INDEX_op_extrl_i64_i32:
1663 case INDEX_op_extrh_i64_i32:
1664 return C_O1_I1(r, R);
1666 case INDEX_op_brcond_i64:
1667 return C_O0_I2(RZ, RJ);
1668 case INDEX_op_movcond_i64:
1669 return C_O1_I4(R, RZ, RJ, RI, 0);
1670 case INDEX_op_add2_i64:
1671 case INDEX_op_sub2_i64:
1672 return C_O2_I4(R, R, RZ, RZ, RJ, RI);
1673 case INDEX_op_muluh_i64:
1674 return C_O1_I2(R, R, R);
1676 case INDEX_op_qemu_ld_i32:
1677 return C_O1_I1(r, A);
1678 case INDEX_op_qemu_ld_i64:
1679 return C_O1_I1(R, A);
1680 case INDEX_op_qemu_st_i32:
1681 return C_O0_I2(sZ, A);
1682 case INDEX_op_qemu_st_i64:
1683 return C_O0_I2(SZ, A);
1686 g_assert_not_reached();
1690 static void tcg_target_init(TCGContext *s)
1692 /* Only probe for the platform and capabilities if we havn't already
1693 determined maximum values at compile time. */
1694 #ifndef use_vis3_instructions
1696 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1697 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1701 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1702 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS64;
1704 tcg_target_call_clobber_regs = 0;
1705 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1706 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1707 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1708 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1709 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1710 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1711 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1712 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1713 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1714 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1715 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1716 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1717 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1718 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1719 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1721 s->reserved_regs = 0;
1722 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1723 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1724 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1725 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1726 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1727 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1728 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1729 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1733 # define ELF_HOST_MACHINE EM_SPARCV9
1735 # define ELF_HOST_MACHINE EM_SPARC32PLUS
1736 # define ELF_HOST_FLAGS EF_SPARC_32PLUS
1741 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
1742 uint8_t fde_win_save;
1743 uint8_t fde_ret_save[3];
1746 static const DebugFrame debug_frame = {
1747 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1750 .h.cie.code_align = 1,
1751 .h.cie.data_align = -sizeof(void *) & 0x7f,
1752 .h.cie.return_column = 15, /* o7 */
1754 /* Total FDE size does not include the "len" member. */
1755 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1759 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1760 (2047 & 0x7f) | 0x80, (2047 >> 7)
1762 13, 30 /* DW_CFA_def_cfa_register i6 */
1765 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1766 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1769 void tcg_register_jit(const void *buf, size_t buf_size)
1771 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1774 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1775 uintptr_t jmp_rw, uintptr_t addr)
1777 intptr_t tb_disp = addr - tc_ptr;
1778 intptr_t br_disp = addr - jmp_rx;
1779 tcg_insn_unit i1, i2;
1781 /* We can reach the entire address space for ILP32.
1782 For LP64, the code_gen_buffer can't be larger than 2GB. */
1783 tcg_debug_assert(tb_disp == (int32_t)tb_disp);
1784 tcg_debug_assert(br_disp == (int32_t)br_disp);
1787 qatomic_set((uint32_t *)jmp_rw,
1788 deposit32(CALL, 0, 30, br_disp >> 2));
1789 flush_idcache_range(jmp_rx, jmp_rw, 4);
1793 /* This does not exercise the range of the branch, but we do
1794 still need to be able to load the new value of TCG_REG_TB.
1795 But this does still happen quite often. */
1796 if (check_fit_ptr(tb_disp, 13)) {
1797 /* ba,pt %icc, addr */
1798 i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
1799 | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
1800 i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
1801 | INSN_IMM13(tb_disp));
1802 } else if (tb_disp >= 0) {
1803 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
1804 i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1805 | INSN_IMM13(tb_disp & 0x3ff));
1807 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
1808 i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1809 | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
1812 qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
1813 flush_idcache_range(jmp_rx, jmp_rw, 8);