2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "../tcg-pool.c.inc"
27 #ifdef CONFIG_DEBUG_TCG
28 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
70 #define TCG_CT_CONST_S11 0x100
71 #define TCG_CT_CONST_S13 0x200
72 #define TCG_CT_CONST_ZERO 0x400
75 * For softmmu, we need to avoid conflicts with the first 3
76 * argument registers to perform the tlb lookup, and to call
77 * the helper function.
80 #define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
82 #define SOFTMMU_RESERVE_REGS 0
86 * Note that sparcv8plus can only hold 64 bit quantities in %g and %o
87 * registers. These are saved manually by the kernel in full 64-bit
88 * slots. The %i and %l registers are saved by the register window
89 * mechanism, which only allocates space for 32 bits. Given that this
90 * window spill/fill can happen on any signal, we must consider the
91 * high bits of the %i and %l registers garbage at all times.
93 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
95 # define ALL_GENERAL_REGS64 ALL_GENERAL_REGS
97 # define ALL_GENERAL_REGS64 MAKE_64BIT_MASK(0, 16)
99 #define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
100 #define ALL_QLDST_REGS64 (ALL_GENERAL_REGS64 & ~SOFTMMU_RESERVE_REGS)
102 /* Define some temporary registers. T2 is used for constant generation. */
103 #define TCG_REG_T1 TCG_REG_G1
104 #define TCG_REG_T2 TCG_REG_O7
106 #ifndef CONFIG_SOFTMMU
107 # define TCG_GUEST_BASE_REG TCG_REG_I5
110 #define TCG_REG_TB TCG_REG_I1
111 #define USE_REG_TB (sizeof(void *) > 4)
113 static const int tcg_target_reg_alloc_order[] = {
143 static const int tcg_target_call_iarg_regs[6] = {
152 static const int tcg_target_call_oarg_regs[] = {
159 #define INSN_OP(x) ((x) << 30)
160 #define INSN_OP2(x) ((x) << 22)
161 #define INSN_OP3(x) ((x) << 19)
162 #define INSN_OPF(x) ((x) << 5)
163 #define INSN_RD(x) ((x) << 25)
164 #define INSN_RS1(x) ((x) << 14)
165 #define INSN_RS2(x) (x)
166 #define INSN_ASI(x) ((x) << 5)
168 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
169 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
170 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
171 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
172 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
173 #define INSN_COND(x) ((x) << 25)
191 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
200 #define MOVCC_ICC (1 << 18)
201 #define MOVCC_XCC (1 << 18 | 1 << 12)
204 #define BPCC_XCC (2 << 20)
205 #define BPCC_PT (1 << 19)
207 #define BPCC_A (1 << 29)
209 #define BPR_PT BPCC_PT
211 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
212 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
213 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
214 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
215 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
216 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
217 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
218 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
219 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
220 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
221 #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
222 #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
223 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
224 #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
225 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
226 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
227 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
228 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
229 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
230 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
231 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
233 #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
234 #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
236 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
237 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
238 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
240 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
241 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
242 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
244 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
245 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
246 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
247 #define RETURN (INSN_OP(2) | INSN_OP3(0x39))
248 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
249 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
250 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
251 #define CALL INSN_OP(1)
252 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
253 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
254 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
255 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
256 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
257 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
258 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
259 #define STB (INSN_OP(3) | INSN_OP3(0x05))
260 #define STH (INSN_OP(3) | INSN_OP3(0x06))
261 #define STW (INSN_OP(3) | INSN_OP3(0x04))
262 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
263 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
264 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
265 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
266 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
267 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
268 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
269 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
270 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
271 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
272 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
273 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
275 #define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
277 #define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
279 #ifndef ASI_PRIMARY_LITTLE
280 #define ASI_PRIMARY_LITTLE 0x88
283 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
284 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
285 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
286 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
287 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
289 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
290 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
291 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
293 #ifndef use_vis3_instructions
294 bool use_vis3_instructions;
297 static inline int check_fit_i64(int64_t val, unsigned int bits)
299 return val == sextract64(val, 0, bits);
302 static inline int check_fit_i32(int32_t val, unsigned int bits)
304 return val == sextract32(val, 0, bits);
307 #define check_fit_tl check_fit_i64
309 # define check_fit_ptr check_fit_i64
311 # define check_fit_ptr check_fit_i32
314 static bool patch_reloc(tcg_insn_unit *src_rw, int type,
315 intptr_t value, intptr_t addend)
317 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
318 uint32_t insn = *src_rw;
322 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
325 case R_SPARC_WDISP16:
326 assert(check_fit_ptr(pcrel >> 2, 16));
327 insn &= ~INSN_OFF16(-1);
328 insn |= INSN_OFF16(pcrel);
330 case R_SPARC_WDISP19:
331 assert(check_fit_ptr(pcrel >> 2, 19));
332 insn &= ~INSN_OFF19(-1);
333 insn |= INSN_OFF19(pcrel);
336 g_assert_not_reached();
343 /* test if a constant matches the constraint */
344 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
346 if (ct & TCG_CT_CONST) {
350 if (type == TCG_TYPE_I32) {
354 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
356 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
358 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
365 static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
368 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
371 static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
372 int32_t offset, int op)
374 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
377 static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
378 int32_t val2, int val2const, int op)
380 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
381 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
384 static inline bool tcg_out_mov(TCGContext *s, TCGType type,
385 TCGReg ret, TCGReg arg)
388 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
393 static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
395 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
398 static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
400 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
403 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
404 tcg_target_long arg, bool in_prologue)
406 tcg_target_long hi, lo = (int32_t)arg;
407 tcg_target_long test, lsb;
409 /* Make sure we test 32-bit constants for imm13 properly. */
410 if (type == TCG_TYPE_I32) {
414 /* A 13-bit constant sign-extended to 64-bits. */
415 if (check_fit_tl(arg, 13)) {
416 tcg_out_movi_imm13(s, ret, arg);
420 /* A 13-bit constant relative to the TB. */
421 if (!in_prologue && USE_REG_TB) {
422 test = tcg_tbrel_diff(s, (void *)arg);
423 if (check_fit_ptr(test, 13)) {
424 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
429 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
430 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
431 tcg_out_sethi(s, ret, arg);
433 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
438 /* A 32-bit constant sign-extended to 64-bits. */
440 tcg_out_sethi(s, ret, ~arg);
441 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
445 /* A 21-bit constant, shifted. */
447 test = (tcg_target_long)arg >> lsb;
448 if (check_fit_tl(test, 13)) {
449 tcg_out_movi_imm13(s, ret, test);
450 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
452 } else if (lsb > 10 && test == extract64(test, 0, 21)) {
453 tcg_out_sethi(s, ret, test << 10);
454 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
458 /* A 64-bit constant decomposed into 2 32-bit pieces. */
459 if (check_fit_i32(lo, 13)) {
460 hi = (arg - lo) >> 32;
461 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
462 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
463 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
466 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
467 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
468 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
469 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
473 static inline void tcg_out_movi(TCGContext *s, TCGType type,
474 TCGReg ret, tcg_target_long arg)
476 tcg_out_movi_int(s, type, ret, arg, false);
479 static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
482 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
485 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
486 intptr_t offset, int op)
488 if (check_fit_ptr(offset, 13)) {
489 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
492 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
493 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
497 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
498 TCGReg arg1, intptr_t arg2)
500 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
503 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
504 TCGReg arg1, intptr_t arg2)
506 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
509 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
510 TCGReg base, intptr_t ofs)
513 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
519 static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg)
521 intptr_t diff = tcg_tbrel_diff(s, arg);
522 if (USE_REG_TB && check_fit_ptr(diff, 13)) {
523 tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
526 tcg_out_movi(s, TCG_TYPE_PTR, ret, (uintptr_t)arg & ~0x3ff);
527 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff);
530 static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
532 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
535 static inline void tcg_out_rdy(TCGContext *s, TCGReg rd)
537 tcg_out32(s, RDY | INSN_RD(rd));
540 static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
541 int32_t val2, int val2const, int uns)
543 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
545 tcg_out_sety(s, TCG_REG_G0);
547 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
548 tcg_out_sety(s, TCG_REG_T1);
551 tcg_out_arithc(s, rd, rs1, val2, val2const,
552 uns ? ARITH_UDIV : ARITH_SDIV);
555 static inline void tcg_out_nop(TCGContext *s)
560 static const uint8_t tcg_cond_to_bcond[] = {
561 [TCG_COND_EQ] = COND_E,
562 [TCG_COND_NE] = COND_NE,
563 [TCG_COND_LT] = COND_L,
564 [TCG_COND_GE] = COND_GE,
565 [TCG_COND_LE] = COND_LE,
566 [TCG_COND_GT] = COND_G,
567 [TCG_COND_LTU] = COND_CS,
568 [TCG_COND_GEU] = COND_CC,
569 [TCG_COND_LEU] = COND_LEU,
570 [TCG_COND_GTU] = COND_GU,
573 static const uint8_t tcg_cond_to_rcond[] = {
574 [TCG_COND_EQ] = RCOND_Z,
575 [TCG_COND_NE] = RCOND_NZ,
576 [TCG_COND_LT] = RCOND_LZ,
577 [TCG_COND_GT] = RCOND_GZ,
578 [TCG_COND_LE] = RCOND_LEZ,
579 [TCG_COND_GE] = RCOND_GEZ
582 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
584 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
587 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
592 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
594 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
596 tcg_out_bpcc0(s, scond, flags, off19);
599 static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
601 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
604 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
605 int32_t arg2, int const_arg2, TCGLabel *l)
607 tcg_out_cmp(s, arg1, arg2, const_arg2);
608 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
612 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
613 int32_t v1, int v1const)
615 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
616 | INSN_RS1(tcg_cond_to_bcond[cond])
617 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
620 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
621 TCGReg c1, int32_t c2, int c2const,
622 int32_t v1, int v1const)
624 tcg_out_cmp(s, c1, c2, c2const);
625 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
628 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
629 int32_t arg2, int const_arg2, TCGLabel *l)
631 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
632 if (arg2 == 0 && !is_unsigned_cond(cond)) {
636 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
638 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
640 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
641 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
643 tcg_out_cmp(s, arg1, arg2, const_arg2);
644 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
649 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
650 int32_t v1, int v1const)
652 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
653 | (tcg_cond_to_rcond[cond] << 10)
654 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
657 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
658 TCGReg c1, int32_t c2, int c2const,
659 int32_t v1, int v1const)
661 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
662 Note that the immediate range is one bit smaller, so we must check
664 if (c2 == 0 && !is_unsigned_cond(cond)
665 && (!v1const || check_fit_i32(v1, 10))) {
666 tcg_out_movr(s, cond, ret, c1, v1, v1const);
668 tcg_out_cmp(s, c1, c2, c2const);
669 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
673 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
674 TCGReg c1, int32_t c2, int c2const)
676 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
680 /* The result of the comparison is in the carry bit. */
685 /* For equality, we can transform to inequality vs zero. */
687 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
692 c1 = TCG_REG_G0, c2const = 0;
693 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
698 /* If we don't need to load a constant into a register, we can
699 swap the operands on GTU/LEU. There's no benefit to loading
700 the constant into a temporary register. */
701 if (!c2const || c2 == 0) {
706 cond = tcg_swap_cond(cond);
712 tcg_out_cmp(s, c1, c2, c2const);
713 tcg_out_movi_imm13(s, ret, 0);
714 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
718 tcg_out_cmp(s, c1, c2, c2const);
719 if (cond == TCG_COND_LTU) {
720 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
722 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
726 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
727 TCGReg c1, int32_t c2, int c2const)
729 if (use_vis3_instructions) {
735 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
738 tcg_out_cmp(s, c1, c2, c2const);
739 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
746 /* For 64-bit signed comparisons vs zero, we can avoid the compare
747 if the input does not overlap the output. */
748 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
749 tcg_out_movi_imm13(s, ret, 0);
750 tcg_out_movr(s, cond, ret, c1, 1, 1);
752 tcg_out_cmp(s, c1, c2, c2const);
753 tcg_out_movi_imm13(s, ret, 0);
754 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
758 static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
759 TCGReg al, TCGReg ah, int32_t bl, int blconst,
760 int32_t bh, int bhconst, int opl, int oph)
762 TCGReg tmp = TCG_REG_T1;
764 /* Note that the low parts are fully consumed before tmp is set. */
765 if (rl != ah && (bhconst || rl != bh)) {
769 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
770 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
771 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
774 static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
775 TCGReg al, TCGReg ah, int32_t bl, int blconst,
776 int32_t bh, int bhconst, bool is_sub)
778 TCGReg tmp = TCG_REG_T1;
780 /* Note that the low parts are fully consumed before tmp is set. */
781 if (rl != ah && (bhconst || rl != bh)) {
785 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
787 if (use_vis3_instructions && !is_sub) {
788 /* Note that ADDXC doesn't accept immediates. */
789 if (bhconst && bh != 0) {
790 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh);
793 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
794 } else if (bh == TCG_REG_G0) {
795 /* If we have a zero, we can perform the operation in two insns,
796 with the arithmetic first, and a conditional move into place. */
798 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
799 is_sub ? ARITH_SUB : ARITH_ADD);
800 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
802 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
803 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
806 /* Otherwise adjust BH as if there is carry into T2 ... */
808 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
810 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
811 is_sub ? ARITH_SUB : ARITH_ADD);
813 /* ... smoosh T2 back to original BH if carry is clear ... */
814 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
815 /* ... and finally perform the arithmetic with the new operand. */
816 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
819 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
822 static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
825 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
827 if (disp == (int32_t)disp) {
828 tcg_out32(s, CALL | (uint32_t)disp >> 2);
830 uintptr_t desti = (uintptr_t)dest;
831 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
832 desti & ~0xfff, in_prologue);
833 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
837 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
839 tcg_out_call_nodelay(s, dest, false);
843 static void tcg_out_mb(TCGContext *s, TCGArg a0)
845 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
846 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
849 #ifdef CONFIG_SOFTMMU
850 static const tcg_insn_unit *qemu_ld_trampoline[16];
851 static const tcg_insn_unit *qemu_st_trampoline[16];
853 static void emit_extend(TCGContext *s, TCGReg r, int op)
855 /* Emit zero extend of 8, 16 or 32 bit data as
856 * required by the MO_* value op; do nothing for 64 bit.
858 switch (op & MO_SIZE) {
860 tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
863 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
864 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
868 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
876 static void build_trampolines(TCGContext *s)
878 static void * const qemu_ld_helpers[16] = {
879 [MO_UB] = helper_ret_ldub_mmu,
880 [MO_SB] = helper_ret_ldsb_mmu,
881 [MO_LEUW] = helper_le_lduw_mmu,
882 [MO_LESW] = helper_le_ldsw_mmu,
883 [MO_LEUL] = helper_le_ldul_mmu,
884 [MO_LEQ] = helper_le_ldq_mmu,
885 [MO_BEUW] = helper_be_lduw_mmu,
886 [MO_BESW] = helper_be_ldsw_mmu,
887 [MO_BEUL] = helper_be_ldul_mmu,
888 [MO_BEQ] = helper_be_ldq_mmu,
890 static void * const qemu_st_helpers[16] = {
891 [MO_UB] = helper_ret_stb_mmu,
892 [MO_LEUW] = helper_le_stw_mmu,
893 [MO_LEUL] = helper_le_stl_mmu,
894 [MO_LEQ] = helper_le_stq_mmu,
895 [MO_BEUW] = helper_be_stw_mmu,
896 [MO_BEUL] = helper_be_stl_mmu,
897 [MO_BEQ] = helper_be_stq_mmu,
903 for (i = 0; i < 16; ++i) {
904 if (qemu_ld_helpers[i] == NULL) {
908 /* May as well align the trampoline. */
909 while ((uintptr_t)s->code_ptr & 15) {
912 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
914 if (SPARC64 || TARGET_LONG_BITS == 32) {
917 /* Install the high part of the address. */
918 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
922 /* Set the retaddr operand. */
923 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
924 /* Set the env operand. */
925 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
927 tcg_out_call_nodelay(s, qemu_ld_helpers[i], true);
928 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
931 for (i = 0; i < 16; ++i) {
932 if (qemu_st_helpers[i] == NULL) {
936 /* May as well align the trampoline. */
937 while ((uintptr_t)s->code_ptr & 15) {
940 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
943 emit_extend(s, TCG_REG_O2, i);
947 if (TARGET_LONG_BITS == 64) {
948 /* Install the high part of the address. */
949 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
954 if ((i & MO_SIZE) == MO_64) {
955 /* Install the high part of the data. */
956 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
959 emit_extend(s, ra, i);
962 /* Skip the oi argument. */
966 /* Set the retaddr operand. */
967 if (ra >= TCG_REG_O6) {
968 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
969 TCG_TARGET_CALL_STACK_OFFSET);
972 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
973 /* Set the env operand. */
974 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
976 tcg_out_call_nodelay(s, qemu_st_helpers[i], true);
977 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
982 /* Generate global QEMU prologue and epilogue code */
983 static void tcg_target_qemu_prologue(TCGContext *s)
985 int tmp_buf_size, frame_size;
988 * The TCG temp buffer is at the top of the frame, immediately
989 * below the frame pointer. Use the logical (aligned) offset here;
990 * the stack bias is applied in temp_allocate_frame().
992 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
993 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
996 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
997 * otherwise the minimal frame usable by callees.
999 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1000 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1001 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1002 frame_size &= -TCG_TARGET_STACK_ALIGN;
1003 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
1004 INSN_IMM13(-frame_size));
1006 #ifndef CONFIG_SOFTMMU
1007 if (guest_base != 0) {
1008 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
1009 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1013 /* We choose TCG_REG_TB such that no move is required. */
1015 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1016 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1019 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1023 /* Epilogue for goto_ptr. */
1024 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1025 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1027 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
1029 #ifdef CONFIG_SOFTMMU
1030 build_trampolines(s);
1034 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1037 for (i = 0; i < count; ++i) {
1042 #if defined(CONFIG_SOFTMMU)
1044 /* We expect to use a 13-bit negative offset from ENV. */
1045 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1046 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1048 /* Perform the TLB load and compare.
1051 ADDRLO and ADDRHI contain the possible two parts of the address.
1053 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1055 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1056 This should be offsetof addr_read or addr_write.
1058 The result of the TLB comparison is in %[ix]cc. The sanitized address
1059 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1061 static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1062 MemOp opc, int which)
1064 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1065 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1066 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1067 const TCGReg r0 = TCG_REG_O0;
1068 const TCGReg r1 = TCG_REG_O1;
1069 const TCGReg r2 = TCG_REG_O2;
1070 unsigned s_bits = opc & MO_SIZE;
1071 unsigned a_bits = get_alignment_bits(opc);
1072 tcg_target_long compare_mask;
1074 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1075 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1076 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
1078 /* Extract the page index, shifted into place for tlb index. */
1079 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1081 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1083 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1084 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1086 /* Load the tlb comparator and the addend. */
1087 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1088 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
1090 /* Mask out the page offset, except for the required alignment.
1091 We don't support unaligned accesses. */
1092 if (a_bits < s_bits) {
1095 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1096 if (check_fit_tl(compare_mask, 13)) {
1097 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1099 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1100 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
1102 tcg_out_cmp(s, r0, r2, 0);
1104 /* If the guest address must be zero-extended, do so now. */
1105 if (SPARC64 && TARGET_LONG_BITS == 32) {
1106 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
1111 #endif /* CONFIG_SOFTMMU */
1113 static const int qemu_ld_opc[16] = {
1123 [MO_LEUW] = LDUH_LE,
1124 [MO_LESW] = LDSH_LE,
1125 [MO_LEUL] = LDUW_LE,
1126 [MO_LESL] = LDSW_LE,
1130 static const int qemu_st_opc[16] = {
1142 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1143 TCGMemOpIdx oi, bool is_64)
1145 MemOp memop = get_memop(oi);
1146 #ifdef CONFIG_SOFTMMU
1147 unsigned memi = get_mmuidx(oi);
1148 TCGReg addrz, param;
1149 const tcg_insn_unit *func;
1150 tcg_insn_unit *label_ptr;
1152 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1153 offsetof(CPUTLBEntry, addr_read));
1155 /* The fast path is exactly one insn. Thus we can perform the
1156 entire TLB Hit in the (annulled) delay slot of the branch
1157 over the TLB Miss case. */
1159 /* beq,a,pt %[xi]cc, label0 */
1160 label_ptr = s->code_ptr;
1161 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1162 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1164 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1165 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1170 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1171 /* Skip the high-part; we'll perform the extract in the trampoline. */
1174 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1176 /* We use the helpers to extend SB and SW data, leaving the case
1177 of SL needing explicit extending below. */
1178 if ((memop & MO_SSIZE) == MO_SL) {
1179 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1181 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1183 tcg_debug_assert(func != NULL);
1184 tcg_out_call_nodelay(s, func, false);
1186 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1188 /* Recall that all of the helpers return 64-bit results.
1189 Which complicates things for sparcv8plus. */
1191 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1192 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1193 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1195 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1198 if ((memop & MO_SIZE) == MO_64) {
1199 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1200 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1201 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1203 /* Re-extend from 32-bit rather than reassembling when we
1204 know the high register must be an extension. */
1205 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1206 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1208 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
1212 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1214 if (SPARC64 && TARGET_LONG_BITS == 32) {
1215 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1218 tcg_out_ldst_rr(s, data, addr,
1219 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1220 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1221 #endif /* CONFIG_SOFTMMU */
1224 static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1227 MemOp memop = get_memop(oi);
1228 #ifdef CONFIG_SOFTMMU
1229 unsigned memi = get_mmuidx(oi);
1230 TCGReg addrz, param;
1231 const tcg_insn_unit *func;
1232 tcg_insn_unit *label_ptr;
1234 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1235 offsetof(CPUTLBEntry, addr_write));
1237 /* The fast path is exactly one insn. Thus we can perform the entire
1238 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1239 /* beq,a,pt %[xi]cc, label0 */
1240 label_ptr = s->code_ptr;
1241 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1242 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1244 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1245 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1250 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1251 /* Skip the high-part; we'll perform the extract in the trampoline. */
1254 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1255 if (!SPARC64 && (memop & MO_SIZE) == MO_64) {
1256 /* Skip the high-part; we'll perform the extract in the trampoline. */
1259 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
1261 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1262 tcg_debug_assert(func != NULL);
1263 tcg_out_call_nodelay(s, func, false);
1265 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1267 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1269 if (SPARC64 && TARGET_LONG_BITS == 32) {
1270 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1273 tcg_out_ldst_rr(s, data, addr,
1274 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1275 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1276 #endif /* CONFIG_SOFTMMU */
1279 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1280 const TCGArg args[TCG_MAX_OP_ARGS],
1281 const int const_args[TCG_MAX_OP_ARGS])
1286 /* Hoist the loads of the most common arguments. */
1293 case INDEX_op_exit_tb:
1294 if (check_fit_ptr(a0, 13)) {
1295 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1296 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1298 } else if (USE_REG_TB) {
1299 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1300 if (check_fit_ptr(tb_diff, 13)) {
1301 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1302 /* Note that TCG_REG_TB has been unwound to O1. */
1303 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1307 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1308 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1309 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1311 case INDEX_op_goto_tb:
1312 if (s->tb_jmp_insn_offset) {
1313 /* direct jump method */
1315 /* make sure the patch is 8-byte aligned. */
1316 if ((intptr_t)s->code_ptr & 4) {
1319 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1320 tcg_out_sethi(s, TCG_REG_T1, 0);
1321 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
1322 tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
1323 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1325 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1330 /* indirect jump method */
1331 tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
1332 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1335 set_jmp_reset_offset(s, a0);
1337 /* For the unlinked path of goto_tb, we need to reset
1338 TCG_REG_TB to the beginning of this TB. */
1340 c = -tcg_current_code_size(s);
1341 if (check_fit_i32(c, 13)) {
1342 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
1344 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
1345 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
1346 TCG_REG_T1, ARITH_ADD);
1350 case INDEX_op_goto_ptr:
1351 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1353 tcg_out_arith(s, TCG_REG_TB, a0, TCG_REG_G0, ARITH_OR);
1359 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1363 #define OP_32_64(x) \
1364 glue(glue(case INDEX_op_, x), _i32): \
1365 glue(glue(case INDEX_op_, x), _i64)
1368 tcg_out_ldst(s, a0, a1, a2, LDUB);
1371 tcg_out_ldst(s, a0, a1, a2, LDSB);
1374 tcg_out_ldst(s, a0, a1, a2, LDUH);
1377 tcg_out_ldst(s, a0, a1, a2, LDSH);
1379 case INDEX_op_ld_i32:
1380 case INDEX_op_ld32u_i64:
1381 tcg_out_ldst(s, a0, a1, a2, LDUW);
1384 tcg_out_ldst(s, a0, a1, a2, STB);
1387 tcg_out_ldst(s, a0, a1, a2, STH);
1389 case INDEX_op_st_i32:
1390 case INDEX_op_st32_i64:
1391 tcg_out_ldst(s, a0, a1, a2, STW);
1414 case INDEX_op_shl_i32:
1417 /* Limit immediate shift count lest we create an illegal insn. */
1418 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1420 case INDEX_op_shr_i32:
1423 case INDEX_op_sar_i32:
1426 case INDEX_op_mul_i32:
1437 case INDEX_op_div_i32:
1438 tcg_out_div32(s, a0, a1, a2, c2, 0);
1440 case INDEX_op_divu_i32:
1441 tcg_out_div32(s, a0, a1, a2, c2, 1);
1444 case INDEX_op_brcond_i32:
1445 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1447 case INDEX_op_setcond_i32:
1448 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1450 case INDEX_op_movcond_i32:
1451 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1454 case INDEX_op_add2_i32:
1455 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1456 args[4], const_args[4], args[5], const_args[5],
1457 ARITH_ADDCC, ARITH_ADDC);
1459 case INDEX_op_sub2_i32:
1460 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1461 args[4], const_args[4], args[5], const_args[5],
1462 ARITH_SUBCC, ARITH_SUBC);
1464 case INDEX_op_mulu2_i32:
1467 case INDEX_op_muls2_i32:
1470 /* The 32-bit multiply insns produce a full 64-bit result. If the
1471 destination register can hold it, we can avoid the slower RDY. */
1472 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1473 if (SPARC64 || a0 <= TCG_REG_O7) {
1474 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1480 case INDEX_op_qemu_ld_i32:
1481 tcg_out_qemu_ld(s, a0, a1, a2, false);
1483 case INDEX_op_qemu_ld_i64:
1484 tcg_out_qemu_ld(s, a0, a1, a2, true);
1486 case INDEX_op_qemu_st_i32:
1487 case INDEX_op_qemu_st_i64:
1488 tcg_out_qemu_st(s, a0, a1, a2);
1491 case INDEX_op_ld32s_i64:
1492 tcg_out_ldst(s, a0, a1, a2, LDSW);
1494 case INDEX_op_ld_i64:
1495 tcg_out_ldst(s, a0, a1, a2, LDX);
1497 case INDEX_op_st_i64:
1498 tcg_out_ldst(s, a0, a1, a2, STX);
1500 case INDEX_op_shl_i64:
1503 /* Limit immediate shift count lest we create an illegal insn. */
1504 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1506 case INDEX_op_shr_i64:
1509 case INDEX_op_sar_i64:
1512 case INDEX_op_mul_i64:
1515 case INDEX_op_div_i64:
1518 case INDEX_op_divu_i64:
1521 case INDEX_op_ext_i32_i64:
1522 case INDEX_op_ext32s_i64:
1523 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1525 case INDEX_op_extu_i32_i64:
1526 case INDEX_op_ext32u_i64:
1527 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1529 case INDEX_op_extrl_i64_i32:
1530 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1532 case INDEX_op_extrh_i64_i32:
1533 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1536 case INDEX_op_brcond_i64:
1537 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1539 case INDEX_op_setcond_i64:
1540 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1542 case INDEX_op_movcond_i64:
1543 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1545 case INDEX_op_add2_i64:
1546 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1547 const_args[4], args[5], const_args[5], false);
1549 case INDEX_op_sub2_i64:
1550 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1551 const_args[4], args[5], const_args[5], true);
1553 case INDEX_op_muluh_i64:
1554 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1558 tcg_out_arithc(s, a0, a1, a2, c2, c);
1562 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1569 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1570 case INDEX_op_mov_i64:
1571 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1577 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1580 case INDEX_op_goto_ptr:
1583 case INDEX_op_ld8u_i32:
1584 case INDEX_op_ld8s_i32:
1585 case INDEX_op_ld16u_i32:
1586 case INDEX_op_ld16s_i32:
1587 case INDEX_op_ld_i32:
1588 case INDEX_op_neg_i32:
1589 case INDEX_op_not_i32:
1590 return C_O1_I1(r, r);
1592 case INDEX_op_st8_i32:
1593 case INDEX_op_st16_i32:
1594 case INDEX_op_st_i32:
1595 return C_O0_I2(rZ, r);
1597 case INDEX_op_add_i32:
1598 case INDEX_op_mul_i32:
1599 case INDEX_op_div_i32:
1600 case INDEX_op_divu_i32:
1601 case INDEX_op_sub_i32:
1602 case INDEX_op_and_i32:
1603 case INDEX_op_andc_i32:
1604 case INDEX_op_or_i32:
1605 case INDEX_op_orc_i32:
1606 case INDEX_op_xor_i32:
1607 case INDEX_op_shl_i32:
1608 case INDEX_op_shr_i32:
1609 case INDEX_op_sar_i32:
1610 case INDEX_op_setcond_i32:
1611 return C_O1_I2(r, rZ, rJ);
1613 case INDEX_op_brcond_i32:
1614 return C_O0_I2(rZ, rJ);
1615 case INDEX_op_movcond_i32:
1616 return C_O1_I4(r, rZ, rJ, rI, 0);
1617 case INDEX_op_add2_i32:
1618 case INDEX_op_sub2_i32:
1619 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
1620 case INDEX_op_mulu2_i32:
1621 case INDEX_op_muls2_i32:
1622 return C_O2_I2(r, r, rZ, rJ);
1624 case INDEX_op_ld8u_i64:
1625 case INDEX_op_ld8s_i64:
1626 case INDEX_op_ld16u_i64:
1627 case INDEX_op_ld16s_i64:
1628 case INDEX_op_ld32u_i64:
1629 case INDEX_op_ld32s_i64:
1630 case INDEX_op_ld_i64:
1631 case INDEX_op_ext_i32_i64:
1632 case INDEX_op_extu_i32_i64:
1633 return C_O1_I1(R, r);
1635 case INDEX_op_st8_i64:
1636 case INDEX_op_st16_i64:
1637 case INDEX_op_st32_i64:
1638 case INDEX_op_st_i64:
1639 return C_O0_I2(RZ, r);
1641 case INDEX_op_add_i64:
1642 case INDEX_op_mul_i64:
1643 case INDEX_op_div_i64:
1644 case INDEX_op_divu_i64:
1645 case INDEX_op_sub_i64:
1646 case INDEX_op_and_i64:
1647 case INDEX_op_andc_i64:
1648 case INDEX_op_or_i64:
1649 case INDEX_op_orc_i64:
1650 case INDEX_op_xor_i64:
1651 case INDEX_op_shl_i64:
1652 case INDEX_op_shr_i64:
1653 case INDEX_op_sar_i64:
1654 case INDEX_op_setcond_i64:
1655 return C_O1_I2(R, RZ, RJ);
1657 case INDEX_op_neg_i64:
1658 case INDEX_op_not_i64:
1659 case INDEX_op_ext32s_i64:
1660 case INDEX_op_ext32u_i64:
1661 return C_O1_I1(R, R);
1663 case INDEX_op_extrl_i64_i32:
1664 case INDEX_op_extrh_i64_i32:
1665 return C_O1_I1(r, R);
1667 case INDEX_op_brcond_i64:
1668 return C_O0_I2(RZ, RJ);
1669 case INDEX_op_movcond_i64:
1670 return C_O1_I4(R, RZ, RJ, RI, 0);
1671 case INDEX_op_add2_i64:
1672 case INDEX_op_sub2_i64:
1673 return C_O2_I4(R, R, RZ, RZ, RJ, RI);
1674 case INDEX_op_muluh_i64:
1675 return C_O1_I2(R, R, R);
1677 case INDEX_op_qemu_ld_i32:
1678 return C_O1_I1(r, A);
1679 case INDEX_op_qemu_ld_i64:
1680 return C_O1_I1(R, A);
1681 case INDEX_op_qemu_st_i32:
1682 return C_O0_I2(sZ, A);
1683 case INDEX_op_qemu_st_i64:
1684 return C_O0_I2(SZ, A);
1687 g_assert_not_reached();
1691 static void tcg_target_init(TCGContext *s)
1694 * Only probe for the platform and capabilities if we haven't already
1695 * determined maximum values at compile time.
1697 #ifndef use_vis3_instructions
1699 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1700 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1704 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1705 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS64;
1707 tcg_target_call_clobber_regs = 0;
1708 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1709 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1710 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1711 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1712 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1713 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1714 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1715 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1716 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1717 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1718 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1719 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1720 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1721 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1722 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1724 s->reserved_regs = 0;
1725 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1726 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1727 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1728 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1729 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1730 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1731 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1732 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1736 # define ELF_HOST_MACHINE EM_SPARCV9
1738 # define ELF_HOST_MACHINE EM_SPARC32PLUS
1739 # define ELF_HOST_FLAGS EF_SPARC_32PLUS
1744 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
1745 uint8_t fde_win_save;
1746 uint8_t fde_ret_save[3];
1749 static const DebugFrame debug_frame = {
1750 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1753 .h.cie.code_align = 1,
1754 .h.cie.data_align = -sizeof(void *) & 0x7f,
1755 .h.cie.return_column = 15, /* o7 */
1757 /* Total FDE size does not include the "len" member. */
1758 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1762 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1763 (2047 & 0x7f) | 0x80, (2047 >> 7)
1765 13, 30 /* DW_CFA_def_cfa_register i6 */
1768 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1769 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1772 void tcg_register_jit(const void *buf, size_t buf_size)
1774 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1777 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1778 uintptr_t jmp_rw, uintptr_t addr)
1780 intptr_t tb_disp = addr - tc_ptr;
1781 intptr_t br_disp = addr - jmp_rx;
1782 tcg_insn_unit i1, i2;
1784 /* We can reach the entire address space for ILP32.
1785 For LP64, the code_gen_buffer can't be larger than 2GB. */
1786 tcg_debug_assert(tb_disp == (int32_t)tb_disp);
1787 tcg_debug_assert(br_disp == (int32_t)br_disp);
1790 qatomic_set((uint32_t *)jmp_rw,
1791 deposit32(CALL, 0, 30, br_disp >> 2));
1792 flush_idcache_range(jmp_rx, jmp_rw, 4);
1796 /* This does not exercise the range of the branch, but we do
1797 still need to be able to load the new value of TCG_REG_TB.
1798 But this does still happen quite often. */
1799 if (check_fit_ptr(tb_disp, 13)) {
1800 /* ba,pt %icc, addr */
1801 i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
1802 | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
1803 i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
1804 | INSN_IMM13(tb_disp));
1805 } else if (tb_disp >= 0) {
1806 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
1807 i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1808 | INSN_IMM13(tb_disp & 0x3ff));
1810 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
1811 i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1812 | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
1815 qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
1816 flush_idcache_range(jmp_rx, jmp_rw, 8);