2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* We only support generating code for 64-bit mode. */
27 #error "unsupported code generation mode"
30 #include "../tcg-ldst.c.inc"
31 #include "../tcg-pool.c.inc"
33 #ifdef CONFIG_DEBUG_TCG
34 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
70 #define TCG_CT_CONST_S11 0x100
71 #define TCG_CT_CONST_S13 0x200
72 #define TCG_CT_CONST_ZERO 0x400
74 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
76 /* Define some temporary registers. T3 is used for constant generation. */
77 #define TCG_REG_T1 TCG_REG_G1
78 #define TCG_REG_T2 TCG_REG_G2
79 #define TCG_REG_T3 TCG_REG_O7
81 #ifndef CONFIG_SOFTMMU
82 # define TCG_GUEST_BASE_REG TCG_REG_I5
85 #define TCG_REG_TB TCG_REG_I1
87 static const int tcg_target_reg_alloc_order[] = {
116 static const int tcg_target_call_iarg_regs[6] = {
125 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
127 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
128 tcg_debug_assert(slot >= 0 && slot <= 3);
129 return TCG_REG_O0 + slot;
132 #define INSN_OP(x) ((x) << 30)
133 #define INSN_OP2(x) ((x) << 22)
134 #define INSN_OP3(x) ((x) << 19)
135 #define INSN_OPF(x) ((x) << 5)
136 #define INSN_RD(x) ((x) << 25)
137 #define INSN_RS1(x) ((x) << 14)
138 #define INSN_RS2(x) (x)
139 #define INSN_ASI(x) ((x) << 5)
141 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
142 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
143 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
144 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
145 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
146 #define INSN_COND(x) ((x) << 25)
164 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
173 #define MOVCC_ICC (1 << 18)
174 #define MOVCC_XCC (1 << 18 | 1 << 12)
177 #define BPCC_XCC (2 << 20)
178 #define BPCC_PT (1 << 19)
180 #define BPCC_A (1 << 29)
182 #define BPR_PT BPCC_PT
184 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
185 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
186 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
187 #define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
188 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
189 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
190 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
191 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
192 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
193 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
194 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
195 #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
196 #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
197 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
198 #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
199 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
200 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
201 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
202 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
203 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
204 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
205 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
207 #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
208 #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
210 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
211 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
212 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
214 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
215 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
216 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
218 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
219 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
220 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
221 #define RETURN (INSN_OP(2) | INSN_OP3(0x39))
222 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
223 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
224 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
225 #define CALL INSN_OP(1)
226 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
227 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
228 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
229 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
230 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
231 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
232 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
233 #define STB (INSN_OP(3) | INSN_OP3(0x05))
234 #define STH (INSN_OP(3) | INSN_OP3(0x06))
235 #define STW (INSN_OP(3) | INSN_OP3(0x04))
236 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
237 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
238 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
239 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
240 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
241 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
242 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
243 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
244 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
245 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
246 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
247 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
249 #define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
251 #define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
253 #ifndef ASI_PRIMARY_LITTLE
254 #define ASI_PRIMARY_LITTLE 0x88
257 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
258 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
259 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
260 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
261 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
263 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
264 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
265 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
267 #ifndef use_vis3_instructions
268 bool use_vis3_instructions;
271 static bool check_fit_i64(int64_t val, unsigned int bits)
273 return val == sextract64(val, 0, bits);
276 static bool check_fit_i32(int32_t val, unsigned int bits)
278 return val == sextract32(val, 0, bits);
281 #define check_fit_tl check_fit_i64
282 #define check_fit_ptr check_fit_i64
284 static bool patch_reloc(tcg_insn_unit *src_rw, int type,
285 intptr_t value, intptr_t addend)
287 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
288 uint32_t insn = *src_rw;
292 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
295 case R_SPARC_WDISP16:
296 if (!check_fit_ptr(pcrel >> 2, 16)) {
299 insn &= ~INSN_OFF16(-1);
300 insn |= INSN_OFF16(pcrel);
302 case R_SPARC_WDISP19:
303 if (!check_fit_ptr(pcrel >> 2, 19)) {
306 insn &= ~INSN_OFF19(-1);
307 insn |= INSN_OFF19(pcrel);
310 if (!check_fit_ptr(value, 13)) {
313 insn &= ~INSN_IMM13(-1);
314 insn |= INSN_IMM13(value);
317 g_assert_not_reached();
324 /* test if a constant matches the constraint */
325 static bool tcg_target_const_match(int64_t val, int ct,
326 TCGType type, TCGCond cond, int vece)
328 if (ct & TCG_CT_CONST) {
332 if (type == TCG_TYPE_I32) {
336 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
338 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
340 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
347 static void tcg_out_nop(TCGContext *s)
352 static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
355 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
358 static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
359 int32_t offset, int op)
361 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
364 static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
365 int32_t val2, int val2const, int op)
367 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
368 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
371 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
374 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
379 static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
382 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
388 static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
390 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
393 /* A 13-bit constant sign-extended to 64 bits. */
394 static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg)
396 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
399 /* A 32-bit constant sign-extended to 64 bits. */
400 static void tcg_out_movi_s32(TCGContext *s, TCGReg ret, int32_t arg)
402 tcg_out_sethi(s, ret, ~arg);
403 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
406 /* A 32-bit constant zero-extended to 64 bits. */
407 static void tcg_out_movi_u32(TCGContext *s, TCGReg ret, uint32_t arg)
409 tcg_out_sethi(s, ret, arg);
411 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
415 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
416 tcg_target_long arg, bool in_prologue,
419 tcg_target_long hi, lo = (int32_t)arg;
420 tcg_target_long test, lsb;
422 /* A 13-bit constant sign-extended to 64-bits. */
423 if (check_fit_tl(arg, 13)) {
424 tcg_out_movi_s13(s, ret, arg);
428 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
429 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
430 tcg_out_movi_u32(s, ret, arg);
434 /* A 13-bit constant relative to the TB. */
436 test = tcg_tbrel_diff(s, (void *)arg);
437 if (check_fit_ptr(test, 13)) {
438 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
443 /* A 32-bit constant sign-extended to 64-bits. */
445 tcg_out_movi_s32(s, ret, arg);
449 /* A 32-bit constant, shifted. */
451 test = (tcg_target_long)arg >> lsb;
452 if (lsb > 10 && test == extract64(test, 0, 21)) {
453 tcg_out_sethi(s, ret, test << 10);
454 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
456 } else if (test == (uint32_t)test || test == (int32_t)test) {
457 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
458 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
462 /* Use the constant pool, if possible. */
464 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
465 tcg_tbrel_diff(s, NULL));
466 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
470 /* A 64-bit constant decomposed into 2 32-bit pieces. */
471 if (check_fit_i32(lo, 13)) {
472 hi = (arg - lo) >> 32;
473 tcg_out_movi_u32(s, ret, hi);
474 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
475 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
478 tcg_out_movi_u32(s, ret, hi);
479 tcg_out_movi_u32(s, scratch, lo);
480 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
481 tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
485 static void tcg_out_movi(TCGContext *s, TCGType type,
486 TCGReg ret, tcg_target_long arg)
488 tcg_debug_assert(ret != TCG_REG_T3);
489 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T3);
492 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
494 g_assert_not_reached();
497 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
499 g_assert_not_reached();
502 static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
504 tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
507 static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
509 tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL);
510 tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL);
513 static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
515 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
518 static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
520 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
523 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
525 tcg_out_ext32s(s, rd, rs);
528 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
530 tcg_out_ext32u(s, rd, rs);
533 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
535 tcg_out_ext32u(s, rd, rs);
538 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
543 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
546 /* This function is only used for passing structs by reference. */
547 g_assert_not_reached();
550 static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
553 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
556 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
557 intptr_t offset, int op)
559 if (check_fit_ptr(offset, 13)) {
560 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
563 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
564 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
568 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
569 TCGReg arg1, intptr_t arg2)
571 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
574 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
575 TCGReg arg1, intptr_t arg2)
577 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
580 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
581 TCGReg base, intptr_t ofs)
584 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
590 static void tcg_out_sety(TCGContext *s, TCGReg rs)
592 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
595 static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
596 int32_t val2, int val2const, int uns)
598 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
600 tcg_out_sety(s, TCG_REG_G0);
602 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
603 tcg_out_sety(s, TCG_REG_T1);
606 tcg_out_arithc(s, rd, rs1, val2, val2const,
607 uns ? ARITH_UDIV : ARITH_SDIV);
610 static const uint8_t tcg_cond_to_bcond[16] = {
611 [TCG_COND_EQ] = COND_E,
612 [TCG_COND_NE] = COND_NE,
613 [TCG_COND_TSTEQ] = COND_E,
614 [TCG_COND_TSTNE] = COND_NE,
615 [TCG_COND_LT] = COND_L,
616 [TCG_COND_GE] = COND_GE,
617 [TCG_COND_LE] = COND_LE,
618 [TCG_COND_GT] = COND_G,
619 [TCG_COND_LTU] = COND_CS,
620 [TCG_COND_GEU] = COND_CC,
621 [TCG_COND_LEU] = COND_LEU,
622 [TCG_COND_GTU] = COND_GU,
625 static const uint8_t tcg_cond_to_rcond[16] = {
626 [TCG_COND_EQ] = RCOND_Z,
627 [TCG_COND_NE] = RCOND_NZ,
628 [TCG_COND_LT] = RCOND_LZ,
629 [TCG_COND_GT] = RCOND_GZ,
630 [TCG_COND_LE] = RCOND_LEZ,
631 [TCG_COND_GE] = RCOND_GEZ
634 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
636 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
639 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
644 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
646 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
648 tcg_out_bpcc0(s, scond, flags, off19);
651 static void tcg_out_cmp(TCGContext *s, TCGCond cond,
652 TCGReg c1, int32_t c2, int c2const)
654 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const,
655 is_tst_cond(cond) ? ARITH_ANDCC : ARITH_SUBCC);
658 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
659 int32_t arg2, int const_arg2, TCGLabel *l)
661 tcg_out_cmp(s, cond, arg1, arg2, const_arg2);
662 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
666 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
667 int32_t v1, int v1const)
669 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
670 | INSN_RS1(tcg_cond_to_bcond[cond])
671 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
674 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
675 TCGReg c1, int32_t c2, int c2const,
676 int32_t v1, int v1const)
678 tcg_out_cmp(s, cond, c1, c2, c2const);
679 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
682 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
683 int32_t arg2, int const_arg2, TCGLabel *l)
685 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
686 int rcond = tcg_cond_to_rcond[cond];
687 if (arg2 == 0 && rcond) {
691 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
693 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
695 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
696 | INSN_COND(rcond) | off16);
698 tcg_out_cmp(s, cond, arg1, arg2, const_arg2);
699 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
704 static void tcg_out_movr(TCGContext *s, int rcond, TCGReg ret, TCGReg c1,
705 int32_t v1, int v1const)
707 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1) | (rcond << 10)
708 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
711 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
712 TCGReg c1, int32_t c2, int c2const,
713 int32_t v1, int v1const)
715 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
716 Note that the immediate range is one bit smaller, so we must check
718 int rcond = tcg_cond_to_rcond[cond];
719 if (c2 == 0 && rcond && (!v1const || check_fit_i32(v1, 10))) {
720 tcg_out_movr(s, rcond, ret, c1, v1, v1const);
722 tcg_out_cmp(s, cond, c1, c2, c2const);
723 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
727 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
728 TCGReg c1, int32_t c2, int c2const, bool neg)
730 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
734 /* The result of the comparison is in the carry bit. */
739 /* For equality, we can transform to inequality vs zero. */
741 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
746 c1 = TCG_REG_G0, c2const = 0;
747 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
752 /* Transform to inequality vs zero. */
753 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_AND);
755 c2 = TCG_REG_T1, c2const = 0;
756 cond = (cond == TCG_COND_TSTEQ ? TCG_COND_GEU : TCG_COND_LTU);
761 /* If we don't need to load a constant into a register, we can
762 swap the operands on GTU/LEU. There's no benefit to loading
763 the constant into a temporary register. */
764 if (!c2const || c2 == 0) {
769 cond = tcg_swap_cond(cond);
775 tcg_out_cmp(s, cond, c1, c2, c2const);
776 tcg_out_movi_s13(s, ret, 0);
777 tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1);
781 tcg_out_cmp(s, cond, c1, c2, c2const);
782 if (cond == TCG_COND_LTU) {
784 /* 0 - 0 - C = -C = (C ? -1 : 0) */
785 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_SUBC);
787 /* 0 + 0 + C = C = (C ? 1 : 0) */
788 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
792 /* 0 + -1 + C = C - 1 = (C ? 0 : -1) */
793 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_ADDC);
795 /* 0 - -1 - C = 1 - C = (C ? 0 : 1) */
796 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
801 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
802 TCGReg c1, int32_t c2, int c2const, bool neg)
806 if (use_vis3_instructions && !neg) {
812 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
815 tcg_out_cmp(s, cond, c1, c2, c2const);
816 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
823 /* For 64-bit signed comparisons vs zero, we can avoid the compare
824 if the input does not overlap the output. */
825 rcond = tcg_cond_to_rcond[cond];
826 if (c2 == 0 && rcond && c1 != ret) {
827 tcg_out_movi_s13(s, ret, 0);
828 tcg_out_movr(s, rcond, ret, c1, neg ? -1 : 1, 1);
830 tcg_out_cmp(s, cond, c1, c2, c2const);
831 tcg_out_movi_s13(s, ret, 0);
832 tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1);
836 static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
837 TCGReg al, TCGReg ah, int32_t bl, int blconst,
838 int32_t bh, int bhconst, int opl, int oph)
840 TCGReg tmp = TCG_REG_T1;
842 /* Note that the low parts are fully consumed before tmp is set. */
843 if (rl != ah && (bhconst || rl != bh)) {
847 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
848 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
849 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
852 static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
853 TCGReg al, TCGReg ah, int32_t bl, int blconst,
854 int32_t bh, int bhconst, bool is_sub)
856 TCGReg tmp = TCG_REG_T1;
858 /* Note that the low parts are fully consumed before tmp is set. */
859 if (rl != ah && (bhconst || rl != bh)) {
863 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
865 if (use_vis3_instructions && !is_sub) {
866 /* Note that ADDXC doesn't accept immediates. */
867 if (bhconst && bh != 0) {
868 tcg_out_movi_s13(s, TCG_REG_T2, bh);
871 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
872 } else if (bh == TCG_REG_G0) {
873 /* If we have a zero, we can perform the operation in two insns,
874 with the arithmetic first, and a conditional move into place. */
876 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
877 is_sub ? ARITH_SUB : ARITH_ADD);
878 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
880 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
881 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
885 * Otherwise adjust BH as if there is carry into T2.
886 * Note that constant BH is constrained to 11 bits for the MOVCC,
887 * so the adjustment fits 12 bits.
890 tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
892 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
893 is_sub ? ARITH_SUB : ARITH_ADD);
895 /* ... smoosh T2 back to original BH if carry is clear ... */
896 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
897 /* ... and finally perform the arithmetic with the new operand. */
898 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
901 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
904 static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
905 bool in_prologue, bool tail_call)
907 uintptr_t desti = (uintptr_t)dest;
909 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
910 desti & ~0xfff, in_prologue, TCG_REG_T2);
911 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
912 TCG_REG_T1, desti & 0xfff, JMPL);
915 static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
918 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
920 if (disp == (int32_t)disp) {
921 tcg_out32(s, CALL | (uint32_t)disp >> 2);
923 tcg_out_jmpl_const(s, dest, in_prologue, false);
927 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
928 const TCGHelperInfo *info)
930 tcg_out_call_nodelay(s, dest, false);
934 static void tcg_out_mb(TCGContext *s, TCGArg a0)
936 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
937 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
940 /* Generate global QEMU prologue and epilogue code */
941 static void tcg_target_qemu_prologue(TCGContext *s)
943 int tmp_buf_size, frame_size;
946 * The TCG temp buffer is at the top of the frame, immediately
947 * below the frame pointer. Use the logical (aligned) offset here;
948 * the stack bias is applied in temp_allocate_frame().
950 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
951 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
954 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
955 * otherwise the minimal frame usable by callees.
957 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
958 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
959 frame_size += TCG_TARGET_STACK_ALIGN - 1;
960 frame_size &= -TCG_TARGET_STACK_ALIGN;
961 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
962 INSN_IMM13(-frame_size));
964 #ifndef CONFIG_SOFTMMU
965 if (guest_base != 0) {
966 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
967 guest_base, true, TCG_REG_T1);
968 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
972 /* We choose TCG_REG_TB such that no move is required. */
973 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
974 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
976 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
980 /* Epilogue for goto_ptr. */
981 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
982 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
984 tcg_out_movi_s13(s, TCG_REG_O0, 0);
987 static void tcg_out_tb_start(TCGContext *s)
992 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
995 for (i = 0; i < count; ++i) {
1000 static const TCGLdstHelperParam ldst_helper_param = {
1001 .ntmp = 1, .tmp = { TCG_REG_T1 }
1004 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1006 MemOp opc = get_memop(lb->oi);
1009 if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
1010 (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
1014 /* Use inline tcg_out_ext32s; otherwise let the helper sign-extend. */
1015 sgn = (opc & MO_SIZE) < MO_32 ? MO_SIGN : 0;
1017 tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1018 tcg_out_call(s, qemu_ld_helpers[opc & (MO_SIZE | sgn)], NULL);
1019 tcg_out_ld_helper_ret(s, lb, sgn, &ldst_helper_param);
1021 tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
1022 return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
1023 (intptr_t)lb->raddr, 0);
1026 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1028 MemOp opc = get_memop(lb->oi);
1030 if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
1031 (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
1035 tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1036 tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE], NULL);
1038 tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
1039 return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
1040 (intptr_t)lb->raddr, 0);
1049 bool tcg_target_has_memory_bswap(MemOp memop)
1054 /* We expect to use a 13-bit negative offset from ENV. */
1055 #define MIN_TLB_MASK_TABLE_OFS -(1 << 12)
1058 * For system-mode, perform the TLB load and compare.
1059 * For user-mode, perform any required alignment tests.
1060 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1061 * is required and fill in @h with the host address for the fast path.
1063 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1064 TCGReg addr_reg, MemOpIdx oi,
1067 TCGType addr_type = s->addr_type;
1068 TCGLabelQemuLdst *ldst = NULL;
1069 MemOp opc = get_memop(oi);
1070 MemOp s_bits = opc & MO_SIZE;
1073 /* We don't support unaligned accesses. */
1074 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1075 h->aa.align = MAX(h->aa.align, s_bits);
1076 a_mask = (1u << h->aa.align) - 1;
1078 #ifdef CONFIG_SOFTMMU
1079 int mem_index = get_mmuidx(oi);
1080 int fast_off = tlb_mask_table_ofs(s, mem_index);
1081 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1082 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1083 int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
1084 : offsetof(CPUTLBEntry, addr_write);
1085 int add_off = offsetof(CPUTLBEntry, addend);
1089 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1090 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T2, TCG_AREG0, mask_off);
1091 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T3, TCG_AREG0, table_off);
1093 /* Extract the page index, shifted into place for tlb index. */
1094 tcg_out_arithi(s, TCG_REG_T1, addr_reg,
1095 s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
1096 tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND);
1098 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1099 tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T3, ARITH_ADD);
1102 * Load the tlb comparator and the addend.
1103 * Always load the entire 64-bit comparator for simplicity.
1104 * We will ignore the high bits via BPCC_ICC below.
1106 tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_T2, TCG_REG_T1, cmp_off);
1107 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T1, TCG_REG_T1, add_off);
1108 h->base = TCG_REG_T1;
1110 /* Mask out the page offset, except for the required alignment. */
1111 compare_mask = s->page_mask | a_mask;
1112 if (check_fit_tl(compare_mask, 13)) {
1113 tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND);
1115 tcg_out_movi_s32(s, TCG_REG_T3, compare_mask);
1116 tcg_out_arith(s, TCG_REG_T3, addr_reg, TCG_REG_T3, ARITH_AND);
1118 tcg_out_cmp(s, TCG_COND_NE, TCG_REG_T2, TCG_REG_T3, 0);
1120 ldst = new_ldst_label(s);
1121 ldst->is_ld = is_ld;
1123 ldst->addrlo_reg = addr_reg;
1124 ldst->label_ptr[0] = s->code_ptr;
1126 /* bne,pn %[xi]cc, label0 */
1127 cc = addr_type == TCG_TYPE_I32 ? BPCC_ICC : BPCC_XCC;
1128 tcg_out_bpcc0(s, COND_NE, BPCC_PN | cc, 0);
1131 * If the size equals the required alignment, we can skip the test
1132 * and allow host SIGBUS to deliver SIGBUS to the guest.
1133 * Otherwise, test for at least natural alignment and defer
1134 * everything else to the helper functions.
1136 if (s_bits != get_alignment_bits(opc)) {
1137 tcg_debug_assert(check_fit_tl(a_mask, 13));
1138 tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC);
1140 ldst = new_ldst_label(s);
1141 ldst->is_ld = is_ld;
1143 ldst->addrlo_reg = addr_reg;
1144 ldst->label_ptr[0] = s->code_ptr;
1146 /* bne,pn %icc, label0 */
1147 tcg_out_bpcc0(s, COND_NE, BPCC_PN | BPCC_ICC, 0);
1149 h->base = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0;
1152 /* If the guest address must be zero-extended, do in the delay slot. */
1153 if (addr_type == TCG_TYPE_I32) {
1154 tcg_out_ext32u(s, TCG_REG_T2, addr_reg);
1155 h->index = TCG_REG_T2;
1160 h->index = addr_reg;
1165 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1166 MemOpIdx oi, TCGType data_type)
1168 static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
1171 [MO_UB | MO_LE] = LDUB,
1172 [MO_SB | MO_LE] = LDSB,
1181 [MO_LEUW] = LDUH_LE,
1182 [MO_LESW] = LDSH_LE,
1183 [MO_LEUL] = LDUW_LE,
1184 [MO_LESL] = LDSW_LE,
1189 TCGLabelQemuLdst *ldst;
1192 ldst = prepare_host_addr(s, &h, addr, oi, true);
1194 tcg_out_ldst_rr(s, data, h.base, h.index,
1195 ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]);
1198 ldst->type = data_type;
1199 ldst->datalo_reg = data;
1200 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1204 static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1205 MemOpIdx oi, TCGType data_type)
1207 static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1219 TCGLabelQemuLdst *ldst;
1222 ldst = prepare_host_addr(s, &h, addr, oi, false);
1224 tcg_out_ldst_rr(s, data, h.base, h.index,
1225 st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]);
1228 ldst->type = data_type;
1229 ldst->datalo_reg = data;
1230 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1234 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1236 if (check_fit_ptr(a0, 13)) {
1237 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1238 tcg_out_movi_s13(s, TCG_REG_O0, a0);
1241 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1242 if (check_fit_ptr(tb_diff, 13)) {
1243 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1244 /* Note that TCG_REG_TB has been unwound to O1. */
1245 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1249 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1250 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1251 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1254 static void tcg_out_goto_tb(TCGContext *s, int which)
1256 ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1258 /* Load link and indirect branch. */
1259 set_jmp_insn_offset(s, which);
1260 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
1261 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1264 set_jmp_reset_offset(s, which);
1267 * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1268 * to the beginning of this TB.
1270 off = -tcg_current_code_size(s);
1271 if (check_fit_i32(off, 13)) {
1272 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1274 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1275 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1279 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1280 uintptr_t jmp_rx, uintptr_t jmp_rw)
1284 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1285 const TCGArg args[TCG_MAX_OP_ARGS],
1286 const int const_args[TCG_MAX_OP_ARGS])
1291 /* Hoist the loads of the most common arguments. */
1298 case INDEX_op_goto_ptr:
1299 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1300 tcg_out_mov_delay(s, TCG_REG_TB, a0);
1303 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1307 #define OP_32_64(x) \
1308 glue(glue(case INDEX_op_, x), _i32): \
1309 glue(glue(case INDEX_op_, x), _i64)
1312 tcg_out_ldst(s, a0, a1, a2, LDUB);
1315 tcg_out_ldst(s, a0, a1, a2, LDSB);
1318 tcg_out_ldst(s, a0, a1, a2, LDUH);
1321 tcg_out_ldst(s, a0, a1, a2, LDSH);
1323 case INDEX_op_ld_i32:
1324 case INDEX_op_ld32u_i64:
1325 tcg_out_ldst(s, a0, a1, a2, LDUW);
1328 tcg_out_ldst(s, a0, a1, a2, STB);
1331 tcg_out_ldst(s, a0, a1, a2, STH);
1333 case INDEX_op_st_i32:
1334 case INDEX_op_st32_i64:
1335 tcg_out_ldst(s, a0, a1, a2, STW);
1358 case INDEX_op_shl_i32:
1361 /* Limit immediate shift count lest we create an illegal insn. */
1362 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1364 case INDEX_op_shr_i32:
1367 case INDEX_op_sar_i32:
1370 case INDEX_op_mul_i32:
1381 case INDEX_op_div_i32:
1382 tcg_out_div32(s, a0, a1, a2, c2, 0);
1384 case INDEX_op_divu_i32:
1385 tcg_out_div32(s, a0, a1, a2, c2, 1);
1388 case INDEX_op_brcond_i32:
1389 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1391 case INDEX_op_setcond_i32:
1392 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, false);
1394 case INDEX_op_negsetcond_i32:
1395 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, true);
1397 case INDEX_op_movcond_i32:
1398 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1401 case INDEX_op_add2_i32:
1402 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1403 args[4], const_args[4], args[5], const_args[5],
1404 ARITH_ADDCC, ARITH_ADDC);
1406 case INDEX_op_sub2_i32:
1407 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1408 args[4], const_args[4], args[5], const_args[5],
1409 ARITH_SUBCC, ARITH_SUBC);
1411 case INDEX_op_mulu2_i32:
1414 case INDEX_op_muls2_i32:
1417 /* The 32-bit multiply insns produce a full 64-bit result. */
1418 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1419 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1422 case INDEX_op_qemu_ld_a32_i32:
1423 case INDEX_op_qemu_ld_a64_i32:
1424 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1426 case INDEX_op_qemu_ld_a32_i64:
1427 case INDEX_op_qemu_ld_a64_i64:
1428 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1430 case INDEX_op_qemu_st_a32_i32:
1431 case INDEX_op_qemu_st_a64_i32:
1432 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1434 case INDEX_op_qemu_st_a32_i64:
1435 case INDEX_op_qemu_st_a64_i64:
1436 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1439 case INDEX_op_ld32s_i64:
1440 tcg_out_ldst(s, a0, a1, a2, LDSW);
1442 case INDEX_op_ld_i64:
1443 tcg_out_ldst(s, a0, a1, a2, LDX);
1445 case INDEX_op_st_i64:
1446 tcg_out_ldst(s, a0, a1, a2, STX);
1448 case INDEX_op_shl_i64:
1451 /* Limit immediate shift count lest we create an illegal insn. */
1452 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1454 case INDEX_op_shr_i64:
1457 case INDEX_op_sar_i64:
1460 case INDEX_op_mul_i64:
1463 case INDEX_op_div_i64:
1466 case INDEX_op_divu_i64:
1470 case INDEX_op_brcond_i64:
1471 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1473 case INDEX_op_setcond_i64:
1474 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, false);
1476 case INDEX_op_negsetcond_i64:
1477 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, true);
1479 case INDEX_op_movcond_i64:
1480 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1482 case INDEX_op_add2_i64:
1483 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1484 const_args[4], args[5], const_args[5], false);
1486 case INDEX_op_sub2_i64:
1487 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1488 const_args[4], args[5], const_args[5], true);
1490 case INDEX_op_muluh_i64:
1491 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1495 tcg_out_arithc(s, a0, a1, a2, c2, c);
1499 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1506 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1507 case INDEX_op_mov_i64:
1508 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1509 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1510 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1511 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1512 case INDEX_op_ext8s_i64:
1513 case INDEX_op_ext8u_i32:
1514 case INDEX_op_ext8u_i64:
1515 case INDEX_op_ext16s_i32:
1516 case INDEX_op_ext16s_i64:
1517 case INDEX_op_ext16u_i32:
1518 case INDEX_op_ext16u_i64:
1519 case INDEX_op_ext32s_i64:
1520 case INDEX_op_ext32u_i64:
1521 case INDEX_op_ext_i32_i64:
1522 case INDEX_op_extu_i32_i64:
1524 g_assert_not_reached();
1528 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1531 case INDEX_op_goto_ptr:
1534 case INDEX_op_ld8u_i32:
1535 case INDEX_op_ld8u_i64:
1536 case INDEX_op_ld8s_i32:
1537 case INDEX_op_ld8s_i64:
1538 case INDEX_op_ld16u_i32:
1539 case INDEX_op_ld16u_i64:
1540 case INDEX_op_ld16s_i32:
1541 case INDEX_op_ld16s_i64:
1542 case INDEX_op_ld_i32:
1543 case INDEX_op_ld32u_i64:
1544 case INDEX_op_ld32s_i64:
1545 case INDEX_op_ld_i64:
1546 case INDEX_op_neg_i32:
1547 case INDEX_op_neg_i64:
1548 case INDEX_op_not_i32:
1549 case INDEX_op_not_i64:
1550 case INDEX_op_ext32s_i64:
1551 case INDEX_op_ext32u_i64:
1552 case INDEX_op_ext_i32_i64:
1553 case INDEX_op_extu_i32_i64:
1554 case INDEX_op_qemu_ld_a32_i32:
1555 case INDEX_op_qemu_ld_a64_i32:
1556 case INDEX_op_qemu_ld_a32_i64:
1557 case INDEX_op_qemu_ld_a64_i64:
1558 return C_O1_I1(r, r);
1560 case INDEX_op_st8_i32:
1561 case INDEX_op_st8_i64:
1562 case INDEX_op_st16_i32:
1563 case INDEX_op_st16_i64:
1564 case INDEX_op_st_i32:
1565 case INDEX_op_st32_i64:
1566 case INDEX_op_st_i64:
1567 case INDEX_op_qemu_st_a32_i32:
1568 case INDEX_op_qemu_st_a64_i32:
1569 case INDEX_op_qemu_st_a32_i64:
1570 case INDEX_op_qemu_st_a64_i64:
1571 return C_O0_I2(rZ, r);
1573 case INDEX_op_add_i32:
1574 case INDEX_op_add_i64:
1575 case INDEX_op_mul_i32:
1576 case INDEX_op_mul_i64:
1577 case INDEX_op_div_i32:
1578 case INDEX_op_div_i64:
1579 case INDEX_op_divu_i32:
1580 case INDEX_op_divu_i64:
1581 case INDEX_op_sub_i32:
1582 case INDEX_op_sub_i64:
1583 case INDEX_op_and_i32:
1584 case INDEX_op_and_i64:
1585 case INDEX_op_andc_i32:
1586 case INDEX_op_andc_i64:
1587 case INDEX_op_or_i32:
1588 case INDEX_op_or_i64:
1589 case INDEX_op_orc_i32:
1590 case INDEX_op_orc_i64:
1591 case INDEX_op_xor_i32:
1592 case INDEX_op_xor_i64:
1593 case INDEX_op_shl_i32:
1594 case INDEX_op_shl_i64:
1595 case INDEX_op_shr_i32:
1596 case INDEX_op_shr_i64:
1597 case INDEX_op_sar_i32:
1598 case INDEX_op_sar_i64:
1599 case INDEX_op_setcond_i32:
1600 case INDEX_op_setcond_i64:
1601 case INDEX_op_negsetcond_i32:
1602 case INDEX_op_negsetcond_i64:
1603 return C_O1_I2(r, rZ, rJ);
1605 case INDEX_op_brcond_i32:
1606 case INDEX_op_brcond_i64:
1607 return C_O0_I2(rZ, rJ);
1608 case INDEX_op_movcond_i32:
1609 case INDEX_op_movcond_i64:
1610 return C_O1_I4(r, rZ, rJ, rI, 0);
1611 case INDEX_op_add2_i32:
1612 case INDEX_op_add2_i64:
1613 case INDEX_op_sub2_i32:
1614 case INDEX_op_sub2_i64:
1615 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
1616 case INDEX_op_mulu2_i32:
1617 case INDEX_op_muls2_i32:
1618 return C_O2_I2(r, r, rZ, rJ);
1619 case INDEX_op_muluh_i64:
1620 return C_O1_I2(r, r, r);
1623 g_assert_not_reached();
1627 static void tcg_target_init(TCGContext *s)
1630 * Only probe for the platform and capabilities if we haven't already
1631 * determined maximum values at compile time.
1633 #ifndef use_vis3_instructions
1635 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1636 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1640 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1641 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1643 tcg_target_call_clobber_regs = 0;
1644 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1645 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1646 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1647 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1648 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1649 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1650 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1651 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1652 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1653 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1654 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1655 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1656 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1657 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1658 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1660 s->reserved_regs = 0;
1661 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1662 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1663 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1664 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1665 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1666 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1667 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1668 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1669 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T3); /* for internal use */
1672 #define ELF_HOST_MACHINE EM_SPARCV9
1676 uint8_t fde_def_cfa[4];
1677 uint8_t fde_win_save;
1678 uint8_t fde_ret_save[3];
1681 static const DebugFrame debug_frame = {
1682 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1685 .h.cie.code_align = 1,
1686 .h.cie.data_align = -sizeof(void *) & 0x7f,
1687 .h.cie.return_column = 15, /* o7 */
1689 /* Total FDE size does not include the "len" member. */
1690 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1693 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1694 (2047 & 0x7f) | 0x80, (2047 >> 7)
1696 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1697 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1700 void tcg_register_jit(const void *buf, size_t buf_size)
1702 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));