2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "../tcg-pool.c.inc"
27 #ifdef CONFIG_DEBUG_TCG
28 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
70 /* Note that sparcv8plus can only hold 64 bit quantities in %g and %o
71 registers. These are saved manually by the kernel in full 64-bit
72 slots. The %i and %l registers are saved by the register window
73 mechanism, which only allocates space for 32 bits. Given that this
74 window spill/fill can happen on any signal, we must consider the
75 high bits of the %i and %l registers garbage at all times. */
77 # define ALL_64 0xffffffffu
79 # define ALL_64 0xffffu
82 /* Define some temporary registers. T2 is used for constant generation. */
83 #define TCG_REG_T1 TCG_REG_G1
84 #define TCG_REG_T2 TCG_REG_O7
86 #ifndef CONFIG_SOFTMMU
87 # define TCG_GUEST_BASE_REG TCG_REG_I5
90 #define TCG_REG_TB TCG_REG_I1
91 #define USE_REG_TB (sizeof(void *) > 4)
93 static const int tcg_target_reg_alloc_order[] = {
123 static const int tcg_target_call_iarg_regs[6] = {
132 static const int tcg_target_call_oarg_regs[] = {
139 #define INSN_OP(x) ((x) << 30)
140 #define INSN_OP2(x) ((x) << 22)
141 #define INSN_OP3(x) ((x) << 19)
142 #define INSN_OPF(x) ((x) << 5)
143 #define INSN_RD(x) ((x) << 25)
144 #define INSN_RS1(x) ((x) << 14)
145 #define INSN_RS2(x) (x)
146 #define INSN_ASI(x) ((x) << 5)
148 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
149 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
150 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
151 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
152 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
153 #define INSN_COND(x) ((x) << 25)
171 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
180 #define MOVCC_ICC (1 << 18)
181 #define MOVCC_XCC (1 << 18 | 1 << 12)
184 #define BPCC_XCC (2 << 20)
185 #define BPCC_PT (1 << 19)
187 #define BPCC_A (1 << 29)
189 #define BPR_PT BPCC_PT
191 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
192 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
193 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
194 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
195 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
196 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
197 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
198 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
199 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
200 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
201 #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
202 #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
203 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
204 #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
205 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
206 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
207 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
208 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
209 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
210 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
211 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
213 #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
214 #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
216 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
217 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
218 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
220 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
221 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
222 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
224 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
225 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
226 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
227 #define RETURN (INSN_OP(2) | INSN_OP3(0x39))
228 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
229 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
230 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
231 #define CALL INSN_OP(1)
232 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
233 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
234 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
235 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
236 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
237 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
238 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
239 #define STB (INSN_OP(3) | INSN_OP3(0x05))
240 #define STH (INSN_OP(3) | INSN_OP3(0x06))
241 #define STW (INSN_OP(3) | INSN_OP3(0x04))
242 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
243 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
244 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
245 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
246 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
247 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
248 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
249 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
250 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
251 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
252 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
253 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
255 #define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
257 #define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
259 #ifndef ASI_PRIMARY_LITTLE
260 #define ASI_PRIMARY_LITTLE 0x88
263 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
264 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
265 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
266 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
267 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
269 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
270 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
273 #ifndef use_vis3_instructions
274 bool use_vis3_instructions;
277 static inline int check_fit_i64(int64_t val, unsigned int bits)
279 return val == sextract64(val, 0, bits);
282 static inline int check_fit_i32(int32_t val, unsigned int bits)
284 return val == sextract32(val, 0, bits);
287 #define check_fit_tl check_fit_i64
289 # define check_fit_ptr check_fit_i64
291 # define check_fit_ptr check_fit_i32
294 static bool patch_reloc(tcg_insn_unit *src_rw, int type,
295 intptr_t value, intptr_t addend)
297 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
298 uint32_t insn = *src_rw;
302 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
305 case R_SPARC_WDISP16:
306 assert(check_fit_ptr(pcrel >> 2, 16));
307 insn &= ~INSN_OFF16(-1);
308 insn |= INSN_OFF16(pcrel);
310 case R_SPARC_WDISP19:
311 assert(check_fit_ptr(pcrel >> 2, 19));
312 insn &= ~INSN_OFF19(-1);
313 insn |= INSN_OFF19(pcrel);
316 g_assert_not_reached();
323 /* parse target specific constraints */
324 static const char *target_parse_constraint(TCGArgConstraint *ct,
325 const char *ct_str, TCGType type)
329 ct->regs = 0xffffffff;
334 case 'A': /* qemu_ld/st address constraint */
335 ct->regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff;
337 tcg_regset_reset_reg(ct->regs, TCG_REG_O0);
338 tcg_regset_reset_reg(ct->regs, TCG_REG_O1);
339 tcg_regset_reset_reg(ct->regs, TCG_REG_O2);
341 case 's': /* qemu_st data 32-bit constraint */
342 ct->regs = 0xffffffff;
343 goto reserve_helpers;
344 case 'S': /* qemu_st data 64-bit constraint */
346 goto reserve_helpers;
348 ct->ct |= TCG_CT_CONST_S11;
351 ct->ct |= TCG_CT_CONST_S13;
354 ct->ct |= TCG_CT_CONST_ZERO;
362 /* test if a constant matches the constraint */
363 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
364 const TCGArgConstraint *arg_ct)
368 if (ct & TCG_CT_CONST) {
372 if (type == TCG_TYPE_I32) {
376 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
378 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
380 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
387 static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
390 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
393 static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
394 int32_t offset, int op)
396 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
399 static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
400 int32_t val2, int val2const, int op)
402 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
403 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
406 static inline bool tcg_out_mov(TCGContext *s, TCGType type,
407 TCGReg ret, TCGReg arg)
410 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
415 static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
417 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
420 static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
422 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
425 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
426 tcg_target_long arg, bool in_prologue)
428 tcg_target_long hi, lo = (int32_t)arg;
429 tcg_target_long test, lsb;
431 /* Make sure we test 32-bit constants for imm13 properly. */
432 if (type == TCG_TYPE_I32) {
436 /* A 13-bit constant sign-extended to 64-bits. */
437 if (check_fit_tl(arg, 13)) {
438 tcg_out_movi_imm13(s, ret, arg);
442 /* A 13-bit constant relative to the TB. */
443 if (!in_prologue && USE_REG_TB) {
444 test = tcg_tbrel_diff(s, (void *)arg);
445 if (check_fit_ptr(test, 13)) {
446 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
451 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
452 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
453 tcg_out_sethi(s, ret, arg);
455 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
460 /* A 32-bit constant sign-extended to 64-bits. */
462 tcg_out_sethi(s, ret, ~arg);
463 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
467 /* A 21-bit constant, shifted. */
469 test = (tcg_target_long)arg >> lsb;
470 if (check_fit_tl(test, 13)) {
471 tcg_out_movi_imm13(s, ret, test);
472 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
474 } else if (lsb > 10 && test == extract64(test, 0, 21)) {
475 tcg_out_sethi(s, ret, test << 10);
476 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
480 /* A 64-bit constant decomposed into 2 32-bit pieces. */
481 if (check_fit_i32(lo, 13)) {
482 hi = (arg - lo) >> 32;
483 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
484 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
485 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
488 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
489 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
490 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
491 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
495 static inline void tcg_out_movi(TCGContext *s, TCGType type,
496 TCGReg ret, tcg_target_long arg)
498 tcg_out_movi_int(s, type, ret, arg, false);
501 static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
504 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
507 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
508 intptr_t offset, int op)
510 if (check_fit_ptr(offset, 13)) {
511 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
514 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
515 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
519 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
520 TCGReg arg1, intptr_t arg2)
522 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
525 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
526 TCGReg arg1, intptr_t arg2)
528 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
531 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
532 TCGReg base, intptr_t ofs)
535 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
541 static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg)
543 intptr_t diff = tcg_tbrel_diff(s, arg);
544 if (USE_REG_TB && check_fit_ptr(diff, 13)) {
545 tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
548 tcg_out_movi(s, TCG_TYPE_PTR, ret, (uintptr_t)arg & ~0x3ff);
549 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff);
552 static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
554 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
557 static inline void tcg_out_rdy(TCGContext *s, TCGReg rd)
559 tcg_out32(s, RDY | INSN_RD(rd));
562 static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
563 int32_t val2, int val2const, int uns)
565 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
567 tcg_out_sety(s, TCG_REG_G0);
569 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
570 tcg_out_sety(s, TCG_REG_T1);
573 tcg_out_arithc(s, rd, rs1, val2, val2const,
574 uns ? ARITH_UDIV : ARITH_SDIV);
577 static inline void tcg_out_nop(TCGContext *s)
582 static const uint8_t tcg_cond_to_bcond[] = {
583 [TCG_COND_EQ] = COND_E,
584 [TCG_COND_NE] = COND_NE,
585 [TCG_COND_LT] = COND_L,
586 [TCG_COND_GE] = COND_GE,
587 [TCG_COND_LE] = COND_LE,
588 [TCG_COND_GT] = COND_G,
589 [TCG_COND_LTU] = COND_CS,
590 [TCG_COND_GEU] = COND_CC,
591 [TCG_COND_LEU] = COND_LEU,
592 [TCG_COND_GTU] = COND_GU,
595 static const uint8_t tcg_cond_to_rcond[] = {
596 [TCG_COND_EQ] = RCOND_Z,
597 [TCG_COND_NE] = RCOND_NZ,
598 [TCG_COND_LT] = RCOND_LZ,
599 [TCG_COND_GT] = RCOND_GZ,
600 [TCG_COND_LE] = RCOND_LEZ,
601 [TCG_COND_GE] = RCOND_GEZ
604 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
606 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
609 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
614 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
616 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
618 tcg_out_bpcc0(s, scond, flags, off19);
621 static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
623 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
626 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
627 int32_t arg2, int const_arg2, TCGLabel *l)
629 tcg_out_cmp(s, arg1, arg2, const_arg2);
630 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
634 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
635 int32_t v1, int v1const)
637 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
638 | INSN_RS1(tcg_cond_to_bcond[cond])
639 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
642 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
643 TCGReg c1, int32_t c2, int c2const,
644 int32_t v1, int v1const)
646 tcg_out_cmp(s, c1, c2, c2const);
647 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
650 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
651 int32_t arg2, int const_arg2, TCGLabel *l)
653 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
654 if (arg2 == 0 && !is_unsigned_cond(cond)) {
658 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
660 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
662 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
663 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
665 tcg_out_cmp(s, arg1, arg2, const_arg2);
666 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
671 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
672 int32_t v1, int v1const)
674 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
675 | (tcg_cond_to_rcond[cond] << 10)
676 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
679 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
680 TCGReg c1, int32_t c2, int c2const,
681 int32_t v1, int v1const)
683 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
684 Note that the immediate range is one bit smaller, so we must check
686 if (c2 == 0 && !is_unsigned_cond(cond)
687 && (!v1const || check_fit_i32(v1, 10))) {
688 tcg_out_movr(s, cond, ret, c1, v1, v1const);
690 tcg_out_cmp(s, c1, c2, c2const);
691 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
695 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
696 TCGReg c1, int32_t c2, int c2const)
698 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
702 /* The result of the comparison is in the carry bit. */
707 /* For equality, we can transform to inequality vs zero. */
709 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
714 c1 = TCG_REG_G0, c2const = 0;
715 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
720 /* If we don't need to load a constant into a register, we can
721 swap the operands on GTU/LEU. There's no benefit to loading
722 the constant into a temporary register. */
723 if (!c2const || c2 == 0) {
728 cond = tcg_swap_cond(cond);
734 tcg_out_cmp(s, c1, c2, c2const);
735 tcg_out_movi_imm13(s, ret, 0);
736 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
740 tcg_out_cmp(s, c1, c2, c2const);
741 if (cond == TCG_COND_LTU) {
742 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
744 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
748 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
749 TCGReg c1, int32_t c2, int c2const)
751 if (use_vis3_instructions) {
757 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
760 tcg_out_cmp(s, c1, c2, c2const);
761 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
768 /* For 64-bit signed comparisons vs zero, we can avoid the compare
769 if the input does not overlap the output. */
770 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
771 tcg_out_movi_imm13(s, ret, 0);
772 tcg_out_movr(s, cond, ret, c1, 1, 1);
774 tcg_out_cmp(s, c1, c2, c2const);
775 tcg_out_movi_imm13(s, ret, 0);
776 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
780 static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
781 TCGReg al, TCGReg ah, int32_t bl, int blconst,
782 int32_t bh, int bhconst, int opl, int oph)
784 TCGReg tmp = TCG_REG_T1;
786 /* Note that the low parts are fully consumed before tmp is set. */
787 if (rl != ah && (bhconst || rl != bh)) {
791 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
792 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
793 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
796 static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
797 TCGReg al, TCGReg ah, int32_t bl, int blconst,
798 int32_t bh, int bhconst, bool is_sub)
800 TCGReg tmp = TCG_REG_T1;
802 /* Note that the low parts are fully consumed before tmp is set. */
803 if (rl != ah && (bhconst || rl != bh)) {
807 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
809 if (use_vis3_instructions && !is_sub) {
810 /* Note that ADDXC doesn't accept immediates. */
811 if (bhconst && bh != 0) {
812 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh);
815 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
816 } else if (bh == TCG_REG_G0) {
817 /* If we have a zero, we can perform the operation in two insns,
818 with the arithmetic first, and a conditional move into place. */
820 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
821 is_sub ? ARITH_SUB : ARITH_ADD);
822 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
824 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
825 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
828 /* Otherwise adjust BH as if there is carry into T2 ... */
830 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
832 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
833 is_sub ? ARITH_SUB : ARITH_ADD);
835 /* ... smoosh T2 back to original BH if carry is clear ... */
836 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
837 /* ... and finally perform the arithmetic with the new operand. */
838 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
841 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
844 static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
847 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
849 if (disp == (int32_t)disp) {
850 tcg_out32(s, CALL | (uint32_t)disp >> 2);
852 uintptr_t desti = (uintptr_t)dest;
853 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
854 desti & ~0xfff, in_prologue);
855 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
859 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
861 tcg_out_call_nodelay(s, dest, false);
865 static void tcg_out_mb(TCGContext *s, TCGArg a0)
867 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
868 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
871 #ifdef CONFIG_SOFTMMU
872 static const tcg_insn_unit *qemu_ld_trampoline[16];
873 static const tcg_insn_unit *qemu_st_trampoline[16];
875 static void emit_extend(TCGContext *s, TCGReg r, int op)
877 /* Emit zero extend of 8, 16 or 32 bit data as
878 * required by the MO_* value op; do nothing for 64 bit.
880 switch (op & MO_SIZE) {
882 tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
885 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
886 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
890 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
898 static void build_trampolines(TCGContext *s)
900 static void * const qemu_ld_helpers[16] = {
901 [MO_UB] = helper_ret_ldub_mmu,
902 [MO_SB] = helper_ret_ldsb_mmu,
903 [MO_LEUW] = helper_le_lduw_mmu,
904 [MO_LESW] = helper_le_ldsw_mmu,
905 [MO_LEUL] = helper_le_ldul_mmu,
906 [MO_LEQ] = helper_le_ldq_mmu,
907 [MO_BEUW] = helper_be_lduw_mmu,
908 [MO_BESW] = helper_be_ldsw_mmu,
909 [MO_BEUL] = helper_be_ldul_mmu,
910 [MO_BEQ] = helper_be_ldq_mmu,
912 static void * const qemu_st_helpers[16] = {
913 [MO_UB] = helper_ret_stb_mmu,
914 [MO_LEUW] = helper_le_stw_mmu,
915 [MO_LEUL] = helper_le_stl_mmu,
916 [MO_LEQ] = helper_le_stq_mmu,
917 [MO_BEUW] = helper_be_stw_mmu,
918 [MO_BEUL] = helper_be_stl_mmu,
919 [MO_BEQ] = helper_be_stq_mmu,
925 for (i = 0; i < 16; ++i) {
926 if (qemu_ld_helpers[i] == NULL) {
930 /* May as well align the trampoline. */
931 while ((uintptr_t)s->code_ptr & 15) {
934 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
936 if (SPARC64 || TARGET_LONG_BITS == 32) {
939 /* Install the high part of the address. */
940 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
944 /* Set the retaddr operand. */
945 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
946 /* Set the env operand. */
947 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
949 tcg_out_call_nodelay(s, qemu_ld_helpers[i], true);
950 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
953 for (i = 0; i < 16; ++i) {
954 if (qemu_st_helpers[i] == NULL) {
958 /* May as well align the trampoline. */
959 while ((uintptr_t)s->code_ptr & 15) {
962 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
965 emit_extend(s, TCG_REG_O2, i);
969 if (TARGET_LONG_BITS == 64) {
970 /* Install the high part of the address. */
971 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
976 if ((i & MO_SIZE) == MO_64) {
977 /* Install the high part of the data. */
978 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
981 emit_extend(s, ra, i);
984 /* Skip the oi argument. */
988 /* Set the retaddr operand. */
989 if (ra >= TCG_REG_O6) {
990 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
991 TCG_TARGET_CALL_STACK_OFFSET);
994 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
995 /* Set the env operand. */
996 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
998 tcg_out_call_nodelay(s, qemu_st_helpers[i], true);
999 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
1004 /* Generate global QEMU prologue and epilogue code */
1005 static void tcg_target_qemu_prologue(TCGContext *s)
1007 int tmp_buf_size, frame_size;
1009 /* The TCG temp buffer is at the top of the frame, immediately
1010 below the frame pointer. */
1011 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
1012 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
1015 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1016 otherwise the minimal frame usable by callees. */
1017 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1018 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1019 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1020 frame_size &= -TCG_TARGET_STACK_ALIGN;
1021 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
1022 INSN_IMM13(-frame_size));
1024 #ifndef CONFIG_SOFTMMU
1025 if (guest_base != 0) {
1026 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
1027 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1031 /* We choose TCG_REG_TB such that no move is required. */
1033 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1034 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1037 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1041 /* Epilogue for goto_ptr. */
1042 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1043 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1045 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
1047 #ifdef CONFIG_SOFTMMU
1048 build_trampolines(s);
1052 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1055 for (i = 0; i < count; ++i) {
1060 #if defined(CONFIG_SOFTMMU)
1062 /* We expect to use a 13-bit negative offset from ENV. */
1063 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1064 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1066 /* Perform the TLB load and compare.
1069 ADDRLO and ADDRHI contain the possible two parts of the address.
1071 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1073 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1074 This should be offsetof addr_read or addr_write.
1076 The result of the TLB comparison is in %[ix]cc. The sanitized address
1077 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1079 static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1080 MemOp opc, int which)
1082 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1083 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1084 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1085 const TCGReg r0 = TCG_REG_O0;
1086 const TCGReg r1 = TCG_REG_O1;
1087 const TCGReg r2 = TCG_REG_O2;
1088 unsigned s_bits = opc & MO_SIZE;
1089 unsigned a_bits = get_alignment_bits(opc);
1090 tcg_target_long compare_mask;
1092 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1093 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1094 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
1096 /* Extract the page index, shifted into place for tlb index. */
1097 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1099 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1101 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1102 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1104 /* Load the tlb comparator and the addend. */
1105 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1106 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
1108 /* Mask out the page offset, except for the required alignment.
1109 We don't support unaligned accesses. */
1110 if (a_bits < s_bits) {
1113 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1114 if (check_fit_tl(compare_mask, 13)) {
1115 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1117 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1118 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
1120 tcg_out_cmp(s, r0, r2, 0);
1122 /* If the guest address must be zero-extended, do so now. */
1123 if (SPARC64 && TARGET_LONG_BITS == 32) {
1124 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
1129 #endif /* CONFIG_SOFTMMU */
1131 static const int qemu_ld_opc[16] = {
1141 [MO_LEUW] = LDUH_LE,
1142 [MO_LESW] = LDSH_LE,
1143 [MO_LEUL] = LDUW_LE,
1144 [MO_LESL] = LDSW_LE,
1148 static const int qemu_st_opc[16] = {
1160 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1161 TCGMemOpIdx oi, bool is_64)
1163 MemOp memop = get_memop(oi);
1164 #ifdef CONFIG_SOFTMMU
1165 unsigned memi = get_mmuidx(oi);
1166 TCGReg addrz, param;
1167 const tcg_insn_unit *func;
1168 tcg_insn_unit *label_ptr;
1170 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1171 offsetof(CPUTLBEntry, addr_read));
1173 /* The fast path is exactly one insn. Thus we can perform the
1174 entire TLB Hit in the (annulled) delay slot of the branch
1175 over the TLB Miss case. */
1177 /* beq,a,pt %[xi]cc, label0 */
1178 label_ptr = s->code_ptr;
1179 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1180 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1182 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1183 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1188 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1189 /* Skip the high-part; we'll perform the extract in the trampoline. */
1192 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1194 /* We use the helpers to extend SB and SW data, leaving the case
1195 of SL needing explicit extending below. */
1196 if ((memop & MO_SSIZE) == MO_SL) {
1197 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1199 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1201 tcg_debug_assert(func != NULL);
1202 tcg_out_call_nodelay(s, func, false);
1204 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1206 /* Recall that all of the helpers return 64-bit results.
1207 Which complicates things for sparcv8plus. */
1209 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1210 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1211 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1213 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1216 if ((memop & MO_SIZE) == MO_64) {
1217 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1218 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1219 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1221 /* Re-extend from 32-bit rather than reassembling when we
1222 know the high register must be an extension. */
1223 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1224 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1226 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
1230 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1232 if (SPARC64 && TARGET_LONG_BITS == 32) {
1233 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1236 tcg_out_ldst_rr(s, data, addr,
1237 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1238 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1239 #endif /* CONFIG_SOFTMMU */
1242 static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1245 MemOp memop = get_memop(oi);
1246 #ifdef CONFIG_SOFTMMU
1247 unsigned memi = get_mmuidx(oi);
1248 TCGReg addrz, param;
1249 const tcg_insn_unit *func;
1250 tcg_insn_unit *label_ptr;
1252 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1253 offsetof(CPUTLBEntry, addr_write));
1255 /* The fast path is exactly one insn. Thus we can perform the entire
1256 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1257 /* beq,a,pt %[xi]cc, label0 */
1258 label_ptr = s->code_ptr;
1259 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1260 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1262 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1263 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1268 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1269 /* Skip the high-part; we'll perform the extract in the trampoline. */
1272 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1273 if (!SPARC64 && (memop & MO_SIZE) == MO_64) {
1274 /* Skip the high-part; we'll perform the extract in the trampoline. */
1277 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
1279 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1280 tcg_debug_assert(func != NULL);
1281 tcg_out_call_nodelay(s, func, false);
1283 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1285 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1287 if (SPARC64 && TARGET_LONG_BITS == 32) {
1288 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1291 tcg_out_ldst_rr(s, data, addr,
1292 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1293 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1294 #endif /* CONFIG_SOFTMMU */
1297 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1298 const TCGArg args[TCG_MAX_OP_ARGS],
1299 const int const_args[TCG_MAX_OP_ARGS])
1304 /* Hoist the loads of the most common arguments. */
1311 case INDEX_op_exit_tb:
1312 if (check_fit_ptr(a0, 13)) {
1313 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1314 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1316 } else if (USE_REG_TB) {
1317 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1318 if (check_fit_ptr(tb_diff, 13)) {
1319 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1320 /* Note that TCG_REG_TB has been unwound to O1. */
1321 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1325 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1326 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1327 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1329 case INDEX_op_goto_tb:
1330 if (s->tb_jmp_insn_offset) {
1331 /* direct jump method */
1333 /* make sure the patch is 8-byte aligned. */
1334 if ((intptr_t)s->code_ptr & 4) {
1337 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1338 tcg_out_sethi(s, TCG_REG_T1, 0);
1339 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
1340 tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
1341 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1343 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1348 /* indirect jump method */
1349 tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
1350 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1353 set_jmp_reset_offset(s, a0);
1355 /* For the unlinked path of goto_tb, we need to reset
1356 TCG_REG_TB to the beginning of this TB. */
1358 c = -tcg_current_code_size(s);
1359 if (check_fit_i32(c, 13)) {
1360 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
1362 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
1363 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
1364 TCG_REG_T1, ARITH_ADD);
1368 case INDEX_op_goto_ptr:
1369 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1371 tcg_out_arith(s, TCG_REG_TB, a0, TCG_REG_G0, ARITH_OR);
1377 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1381 #define OP_32_64(x) \
1382 glue(glue(case INDEX_op_, x), _i32): \
1383 glue(glue(case INDEX_op_, x), _i64)
1386 tcg_out_ldst(s, a0, a1, a2, LDUB);
1389 tcg_out_ldst(s, a0, a1, a2, LDSB);
1392 tcg_out_ldst(s, a0, a1, a2, LDUH);
1395 tcg_out_ldst(s, a0, a1, a2, LDSH);
1397 case INDEX_op_ld_i32:
1398 case INDEX_op_ld32u_i64:
1399 tcg_out_ldst(s, a0, a1, a2, LDUW);
1402 tcg_out_ldst(s, a0, a1, a2, STB);
1405 tcg_out_ldst(s, a0, a1, a2, STH);
1407 case INDEX_op_st_i32:
1408 case INDEX_op_st32_i64:
1409 tcg_out_ldst(s, a0, a1, a2, STW);
1432 case INDEX_op_shl_i32:
1435 /* Limit immediate shift count lest we create an illegal insn. */
1436 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1438 case INDEX_op_shr_i32:
1441 case INDEX_op_sar_i32:
1444 case INDEX_op_mul_i32:
1455 case INDEX_op_div_i32:
1456 tcg_out_div32(s, a0, a1, a2, c2, 0);
1458 case INDEX_op_divu_i32:
1459 tcg_out_div32(s, a0, a1, a2, c2, 1);
1462 case INDEX_op_brcond_i32:
1463 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1465 case INDEX_op_setcond_i32:
1466 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1468 case INDEX_op_movcond_i32:
1469 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1472 case INDEX_op_add2_i32:
1473 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1474 args[4], const_args[4], args[5], const_args[5],
1475 ARITH_ADDCC, ARITH_ADDC);
1477 case INDEX_op_sub2_i32:
1478 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1479 args[4], const_args[4], args[5], const_args[5],
1480 ARITH_SUBCC, ARITH_SUBC);
1482 case INDEX_op_mulu2_i32:
1485 case INDEX_op_muls2_i32:
1488 /* The 32-bit multiply insns produce a full 64-bit result. If the
1489 destination register can hold it, we can avoid the slower RDY. */
1490 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1491 if (SPARC64 || a0 <= TCG_REG_O7) {
1492 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1498 case INDEX_op_qemu_ld_i32:
1499 tcg_out_qemu_ld(s, a0, a1, a2, false);
1501 case INDEX_op_qemu_ld_i64:
1502 tcg_out_qemu_ld(s, a0, a1, a2, true);
1504 case INDEX_op_qemu_st_i32:
1505 case INDEX_op_qemu_st_i64:
1506 tcg_out_qemu_st(s, a0, a1, a2);
1509 case INDEX_op_ld32s_i64:
1510 tcg_out_ldst(s, a0, a1, a2, LDSW);
1512 case INDEX_op_ld_i64:
1513 tcg_out_ldst(s, a0, a1, a2, LDX);
1515 case INDEX_op_st_i64:
1516 tcg_out_ldst(s, a0, a1, a2, STX);
1518 case INDEX_op_shl_i64:
1521 /* Limit immediate shift count lest we create an illegal insn. */
1522 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1524 case INDEX_op_shr_i64:
1527 case INDEX_op_sar_i64:
1530 case INDEX_op_mul_i64:
1533 case INDEX_op_div_i64:
1536 case INDEX_op_divu_i64:
1539 case INDEX_op_ext_i32_i64:
1540 case INDEX_op_ext32s_i64:
1541 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1543 case INDEX_op_extu_i32_i64:
1544 case INDEX_op_ext32u_i64:
1545 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1547 case INDEX_op_extrl_i64_i32:
1548 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1550 case INDEX_op_extrh_i64_i32:
1551 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1554 case INDEX_op_brcond_i64:
1555 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1557 case INDEX_op_setcond_i64:
1558 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1560 case INDEX_op_movcond_i64:
1561 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1563 case INDEX_op_add2_i64:
1564 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1565 const_args[4], args[5], const_args[5], false);
1567 case INDEX_op_sub2_i64:
1568 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1569 const_args[4], args[5], const_args[5], true);
1571 case INDEX_op_muluh_i64:
1572 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1576 tcg_out_arithc(s, a0, a1, a2, c2, c);
1580 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1587 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1588 case INDEX_op_mov_i64:
1589 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
1590 case INDEX_op_movi_i64:
1591 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1597 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
1599 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
1600 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
1601 static const TCGTargetOpDef R_r = { .args_ct_str = { "R", "r" } };
1602 static const TCGTargetOpDef r_R = { .args_ct_str = { "r", "R" } };
1603 static const TCGTargetOpDef R_R = { .args_ct_str = { "R", "R" } };
1604 static const TCGTargetOpDef r_A = { .args_ct_str = { "r", "A" } };
1605 static const TCGTargetOpDef R_A = { .args_ct_str = { "R", "A" } };
1606 static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } };
1607 static const TCGTargetOpDef RZ_r = { .args_ct_str = { "RZ", "r" } };
1608 static const TCGTargetOpDef sZ_A = { .args_ct_str = { "sZ", "A" } };
1609 static const TCGTargetOpDef SZ_A = { .args_ct_str = { "SZ", "A" } };
1610 static const TCGTargetOpDef rZ_rJ = { .args_ct_str = { "rZ", "rJ" } };
1611 static const TCGTargetOpDef RZ_RJ = { .args_ct_str = { "RZ", "RJ" } };
1612 static const TCGTargetOpDef R_R_R = { .args_ct_str = { "R", "R", "R" } };
1613 static const TCGTargetOpDef r_rZ_rJ
1614 = { .args_ct_str = { "r", "rZ", "rJ" } };
1615 static const TCGTargetOpDef R_RZ_RJ
1616 = { .args_ct_str = { "R", "RZ", "RJ" } };
1617 static const TCGTargetOpDef r_r_rZ_rJ
1618 = { .args_ct_str = { "r", "r", "rZ", "rJ" } };
1619 static const TCGTargetOpDef movc_32
1620 = { .args_ct_str = { "r", "rZ", "rJ", "rI", "0" } };
1621 static const TCGTargetOpDef movc_64
1622 = { .args_ct_str = { "R", "RZ", "RJ", "RI", "0" } };
1623 static const TCGTargetOpDef add2_32
1624 = { .args_ct_str = { "r", "r", "rZ", "rZ", "rJ", "rJ" } };
1625 static const TCGTargetOpDef add2_64
1626 = { .args_ct_str = { "R", "R", "RZ", "RZ", "RJ", "RI" } };
1629 case INDEX_op_goto_ptr:
1632 case INDEX_op_ld8u_i32:
1633 case INDEX_op_ld8s_i32:
1634 case INDEX_op_ld16u_i32:
1635 case INDEX_op_ld16s_i32:
1636 case INDEX_op_ld_i32:
1637 case INDEX_op_neg_i32:
1638 case INDEX_op_not_i32:
1641 case INDEX_op_st8_i32:
1642 case INDEX_op_st16_i32:
1643 case INDEX_op_st_i32:
1646 case INDEX_op_add_i32:
1647 case INDEX_op_mul_i32:
1648 case INDEX_op_div_i32:
1649 case INDEX_op_divu_i32:
1650 case INDEX_op_sub_i32:
1651 case INDEX_op_and_i32:
1652 case INDEX_op_andc_i32:
1653 case INDEX_op_or_i32:
1654 case INDEX_op_orc_i32:
1655 case INDEX_op_xor_i32:
1656 case INDEX_op_shl_i32:
1657 case INDEX_op_shr_i32:
1658 case INDEX_op_sar_i32:
1659 case INDEX_op_setcond_i32:
1662 case INDEX_op_brcond_i32:
1664 case INDEX_op_movcond_i32:
1666 case INDEX_op_add2_i32:
1667 case INDEX_op_sub2_i32:
1669 case INDEX_op_mulu2_i32:
1670 case INDEX_op_muls2_i32:
1673 case INDEX_op_ld8u_i64:
1674 case INDEX_op_ld8s_i64:
1675 case INDEX_op_ld16u_i64:
1676 case INDEX_op_ld16s_i64:
1677 case INDEX_op_ld32u_i64:
1678 case INDEX_op_ld32s_i64:
1679 case INDEX_op_ld_i64:
1680 case INDEX_op_ext_i32_i64:
1681 case INDEX_op_extu_i32_i64:
1684 case INDEX_op_st8_i64:
1685 case INDEX_op_st16_i64:
1686 case INDEX_op_st32_i64:
1687 case INDEX_op_st_i64:
1690 case INDEX_op_add_i64:
1691 case INDEX_op_mul_i64:
1692 case INDEX_op_div_i64:
1693 case INDEX_op_divu_i64:
1694 case INDEX_op_sub_i64:
1695 case INDEX_op_and_i64:
1696 case INDEX_op_andc_i64:
1697 case INDEX_op_or_i64:
1698 case INDEX_op_orc_i64:
1699 case INDEX_op_xor_i64:
1700 case INDEX_op_shl_i64:
1701 case INDEX_op_shr_i64:
1702 case INDEX_op_sar_i64:
1703 case INDEX_op_setcond_i64:
1706 case INDEX_op_neg_i64:
1707 case INDEX_op_not_i64:
1708 case INDEX_op_ext32s_i64:
1709 case INDEX_op_ext32u_i64:
1712 case INDEX_op_extrl_i64_i32:
1713 case INDEX_op_extrh_i64_i32:
1716 case INDEX_op_brcond_i64:
1718 case INDEX_op_movcond_i64:
1720 case INDEX_op_add2_i64:
1721 case INDEX_op_sub2_i64:
1723 case INDEX_op_muluh_i64:
1726 case INDEX_op_qemu_ld_i32:
1728 case INDEX_op_qemu_ld_i64:
1730 case INDEX_op_qemu_st_i32:
1732 case INDEX_op_qemu_st_i64:
1740 static void tcg_target_init(TCGContext *s)
1742 /* Only probe for the platform and capabilities if we havn't already
1743 determined maximum values at compile time. */
1744 #ifndef use_vis3_instructions
1746 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1747 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1751 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
1752 tcg_target_available_regs[TCG_TYPE_I64] = ALL_64;
1754 tcg_target_call_clobber_regs = 0;
1755 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1756 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1757 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1758 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1759 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1760 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1761 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1762 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1763 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1764 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1765 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1766 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1767 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1768 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1769 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1771 s->reserved_regs = 0;
1772 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1773 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1774 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1775 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1776 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1777 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1778 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1779 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1783 # define ELF_HOST_MACHINE EM_SPARCV9
1785 # define ELF_HOST_MACHINE EM_SPARC32PLUS
1786 # define ELF_HOST_FLAGS EF_SPARC_32PLUS
1791 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
1792 uint8_t fde_win_save;
1793 uint8_t fde_ret_save[3];
1796 static const DebugFrame debug_frame = {
1797 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1800 .h.cie.code_align = 1,
1801 .h.cie.data_align = -sizeof(void *) & 0x7f,
1802 .h.cie.return_column = 15, /* o7 */
1804 /* Total FDE size does not include the "len" member. */
1805 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1809 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1810 (2047 & 0x7f) | 0x80, (2047 >> 7)
1812 13, 30 /* DW_CFA_def_cfa_register i6 */
1815 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1816 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1819 void tcg_register_jit(const void *buf, size_t buf_size)
1821 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1824 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1825 uintptr_t jmp_rw, uintptr_t addr)
1827 intptr_t tb_disp = addr - tc_ptr;
1828 intptr_t br_disp = addr - jmp_rx;
1829 tcg_insn_unit i1, i2;
1831 /* We can reach the entire address space for ILP32.
1832 For LP64, the code_gen_buffer can't be larger than 2GB. */
1833 tcg_debug_assert(tb_disp == (int32_t)tb_disp);
1834 tcg_debug_assert(br_disp == (int32_t)br_disp);
1837 qatomic_set((uint32_t *)jmp_rw,
1838 deposit32(CALL, 0, 30, br_disp >> 2));
1839 flush_idcache_range(jmp_rx, jmp_rw, 4);
1843 /* This does not exercise the range of the branch, but we do
1844 still need to be able to load the new value of TCG_REG_TB.
1845 But this does still happen quite often. */
1846 if (check_fit_ptr(tb_disp, 13)) {
1847 /* ba,pt %icc, addr */
1848 i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
1849 | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
1850 i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
1851 | INSN_IMM13(tb_disp));
1852 } else if (tb_disp >= 0) {
1853 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
1854 i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1855 | INSN_IMM13(tb_disp & 0x3ff));
1857 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
1858 i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1859 | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
1862 qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
1863 flush_idcache_range(jmp_rx, jmp_rw, 8);