hw/scsi/lsi53c895a: Use sextract32 for sign-extension
[qemu/ar7.git] / tcg / sparc / tcg-target.c
blob9574954ac4224de0741d2d8cc7eaf8e0f4c0e11a
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%g0",
28 "%g1",
29 "%g2",
30 "%g3",
31 "%g4",
32 "%g5",
33 "%g6",
34 "%g7",
35 "%o0",
36 "%o1",
37 "%o2",
38 "%o3",
39 "%o4",
40 "%o5",
41 "%o6",
42 "%o7",
43 "%l0",
44 "%l1",
45 "%l2",
46 "%l3",
47 "%l4",
48 "%l5",
49 "%l6",
50 "%l7",
51 "%i0",
52 "%i1",
53 "%i2",
54 "%i3",
55 "%i4",
56 "%i5",
57 "%i6",
58 "%i7",
60 #endif
62 /* Define some temporary registers. T2 is used for constant generation. */
63 #define TCG_REG_T1 TCG_REG_G1
64 #define TCG_REG_T2 TCG_REG_O7
66 #ifdef CONFIG_USE_GUEST_BASE
67 # define TCG_GUEST_BASE_REG TCG_REG_I5
68 #else
69 # define TCG_GUEST_BASE_REG TCG_REG_G0
70 #endif
72 static const int tcg_target_reg_alloc_order[] = {
73 TCG_REG_L0,
74 TCG_REG_L1,
75 TCG_REG_L2,
76 TCG_REG_L3,
77 TCG_REG_L4,
78 TCG_REG_L5,
79 TCG_REG_L6,
80 TCG_REG_L7,
82 TCG_REG_I0,
83 TCG_REG_I1,
84 TCG_REG_I2,
85 TCG_REG_I3,
86 TCG_REG_I4,
87 TCG_REG_I5,
89 TCG_REG_G2,
90 TCG_REG_G3,
91 TCG_REG_G4,
92 TCG_REG_G5,
94 TCG_REG_O0,
95 TCG_REG_O1,
96 TCG_REG_O2,
97 TCG_REG_O3,
98 TCG_REG_O4,
99 TCG_REG_O5,
102 static const int tcg_target_call_iarg_regs[6] = {
103 TCG_REG_O0,
104 TCG_REG_O1,
105 TCG_REG_O2,
106 TCG_REG_O3,
107 TCG_REG_O4,
108 TCG_REG_O5,
111 static const int tcg_target_call_oarg_regs[] = {
112 TCG_REG_O0,
113 TCG_REG_O1,
114 TCG_REG_O2,
115 TCG_REG_O3,
118 #define INSN_OP(x) ((x) << 30)
119 #define INSN_OP2(x) ((x) << 22)
120 #define INSN_OP3(x) ((x) << 19)
121 #define INSN_OPF(x) ((x) << 5)
122 #define INSN_RD(x) ((x) << 25)
123 #define INSN_RS1(x) ((x) << 14)
124 #define INSN_RS2(x) (x)
125 #define INSN_ASI(x) ((x) << 5)
127 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
128 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
129 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
130 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
131 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
132 #define INSN_COND(x) ((x) << 25)
134 #define COND_N 0x0
135 #define COND_E 0x1
136 #define COND_LE 0x2
137 #define COND_L 0x3
138 #define COND_LEU 0x4
139 #define COND_CS 0x5
140 #define COND_NEG 0x6
141 #define COND_VS 0x7
142 #define COND_A 0x8
143 #define COND_NE 0x9
144 #define COND_G 0xa
145 #define COND_GE 0xb
146 #define COND_GU 0xc
147 #define COND_CC 0xd
148 #define COND_POS 0xe
149 #define COND_VC 0xf
150 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
152 #define RCOND_Z 1
153 #define RCOND_LEZ 2
154 #define RCOND_LZ 3
155 #define RCOND_NZ 5
156 #define RCOND_GZ 6
157 #define RCOND_GEZ 7
159 #define MOVCC_ICC (1 << 18)
160 #define MOVCC_XCC (1 << 18 | 1 << 12)
162 #define BPCC_ICC 0
163 #define BPCC_XCC (2 << 20)
164 #define BPCC_PT (1 << 19)
165 #define BPCC_PN 0
166 #define BPCC_A (1 << 29)
168 #define BPR_PT BPCC_PT
170 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
171 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
172 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
173 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
174 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
175 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
176 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
177 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
178 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
179 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
180 #define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
181 #define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
182 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
183 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
184 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
185 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
186 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
187 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
188 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
189 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
191 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
192 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
193 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
195 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
196 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
197 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
199 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
200 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
201 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
202 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
203 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
204 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
205 #define CALL INSN_OP(1)
206 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
207 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
208 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
209 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
210 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
211 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
212 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
213 #define STB (INSN_OP(3) | INSN_OP3(0x05))
214 #define STH (INSN_OP(3) | INSN_OP3(0x06))
215 #define STW (INSN_OP(3) | INSN_OP3(0x04))
216 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
217 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
218 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
219 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
220 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
221 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
222 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
223 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
224 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
225 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
226 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
227 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
229 #ifndef ASI_PRIMARY_LITTLE
230 #define ASI_PRIMARY_LITTLE 0x88
231 #endif
233 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
234 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
235 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
236 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
237 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
239 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
240 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
241 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
243 static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
245 return (val << ((sizeof(tcg_target_long) * 8 - bits))
246 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
249 static inline int check_fit_i32(uint32_t val, unsigned int bits)
251 return ((val << (32 - bits)) >> (32 - bits)) == val;
254 static void patch_reloc(uint8_t *code_ptr, int type,
255 intptr_t value, intptr_t addend)
257 uint32_t insn;
258 value += addend;
259 switch (type) {
260 case R_SPARC_32:
261 if (value != (uint32_t)value) {
262 tcg_abort();
264 *(uint32_t *)code_ptr = value;
265 break;
266 case R_SPARC_WDISP16:
267 value -= (intptr_t)code_ptr;
268 if (!check_fit_tl(value >> 2, 16)) {
269 tcg_abort();
271 insn = *(uint32_t *)code_ptr;
272 insn &= ~INSN_OFF16(-1);
273 insn |= INSN_OFF16(value);
274 *(uint32_t *)code_ptr = insn;
275 break;
276 case R_SPARC_WDISP19:
277 value -= (intptr_t)code_ptr;
278 if (!check_fit_tl(value >> 2, 19)) {
279 tcg_abort();
281 insn = *(uint32_t *)code_ptr;
282 insn &= ~INSN_OFF19(-1);
283 insn |= INSN_OFF19(value);
284 *(uint32_t *)code_ptr = insn;
285 break;
286 default:
287 tcg_abort();
291 /* parse target specific constraints */
292 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
294 const char *ct_str;
296 ct_str = *pct_str;
297 switch (ct_str[0]) {
298 case 'r':
299 ct->ct |= TCG_CT_REG;
300 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
301 break;
302 case 'L': /* qemu_ld/st constraint */
303 ct->ct |= TCG_CT_REG;
304 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
305 // Helper args
306 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
307 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
308 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
309 break;
310 case 'I':
311 ct->ct |= TCG_CT_CONST_S11;
312 break;
313 case 'J':
314 ct->ct |= TCG_CT_CONST_S13;
315 break;
316 case 'Z':
317 ct->ct |= TCG_CT_CONST_ZERO;
318 break;
319 default:
320 return -1;
322 ct_str++;
323 *pct_str = ct_str;
324 return 0;
327 /* test if a constant matches the constraint */
328 static inline int tcg_target_const_match(tcg_target_long val,
329 const TCGArgConstraint *arg_ct)
331 int ct = arg_ct->ct;
333 if (ct & TCG_CT_CONST) {
334 return 1;
335 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
336 return 1;
337 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
338 return 1;
339 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
340 return 1;
341 } else {
342 return 0;
346 static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
347 int op)
349 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
350 INSN_RS2(rs2));
353 static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
354 uint32_t offset, int op)
356 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
357 INSN_IMM13(offset));
360 static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
361 int val2, int val2const, int op)
363 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
364 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
367 static inline void tcg_out_mov(TCGContext *s, TCGType type,
368 TCGReg ret, TCGReg arg)
370 if (ret != arg) {
371 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
375 static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
377 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
380 static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
382 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
385 static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
387 if (check_fit_tl(arg, 13))
388 tcg_out_movi_imm13(s, ret, arg);
389 else {
390 tcg_out_sethi(s, ret, arg);
391 if (arg & 0x3ff)
392 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
396 static inline void tcg_out_movi(TCGContext *s, TCGType type,
397 TCGReg ret, tcg_target_long arg)
399 /* All 32-bit constants, as well as 64-bit constants with
400 no high bits set go through movi_imm32. */
401 if (TCG_TARGET_REG_BITS == 32
402 || type == TCG_TYPE_I32
403 || (arg & ~(tcg_target_long)0xffffffff) == 0) {
404 tcg_out_movi_imm32(s, ret, arg);
405 } else if (check_fit_tl(arg, 13)) {
406 /* A 13-bit constant sign-extended to 64-bits. */
407 tcg_out_movi_imm13(s, ret, arg);
408 } else if (check_fit_tl(arg, 32)) {
409 /* A 32-bit constant sign-extended to 64-bits. */
410 tcg_out_sethi(s, ret, ~arg);
411 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
412 } else {
413 tcg_out_movi_imm32(s, ret, arg >> (TCG_TARGET_REG_BITS / 2));
414 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
415 tcg_out_movi_imm32(s, TCG_REG_T2, arg);
416 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
420 static inline void tcg_out_ldst_rr(TCGContext *s, int data, int a1,
421 int a2, int op)
423 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
426 static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
427 int offset, int op)
429 if (check_fit_tl(offset, 13)) {
430 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
431 INSN_IMM13(offset));
432 } else {
433 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
434 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
438 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
439 TCGReg arg1, intptr_t arg2)
441 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
444 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
445 TCGReg arg1, intptr_t arg2)
447 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
450 static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
451 tcg_target_long arg)
453 if (!check_fit_tl(arg, 10)) {
454 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
456 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
459 static inline void tcg_out_sety(TCGContext *s, int rs)
461 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
464 static inline void tcg_out_rdy(TCGContext *s, int rd)
466 tcg_out32(s, RDY | INSN_RD(rd));
469 static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
471 if (val != 0) {
472 if (check_fit_tl(val, 13))
473 tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
474 else {
475 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, val);
476 tcg_out_arith(s, reg, reg, TCG_REG_T1, ARITH_ADD);
481 static inline void tcg_out_andi(TCGContext *s, int rd, int rs,
482 tcg_target_long val)
484 if (val != 0) {
485 if (check_fit_tl(val, 13))
486 tcg_out_arithi(s, rd, rs, val, ARITH_AND);
487 else {
488 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T1, val);
489 tcg_out_arith(s, rd, rs, TCG_REG_T1, ARITH_AND);
494 static void tcg_out_div32(TCGContext *s, int rd, int rs1,
495 int val2, int val2const, int uns)
497 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
498 if (uns) {
499 tcg_out_sety(s, TCG_REG_G0);
500 } else {
501 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
502 tcg_out_sety(s, TCG_REG_T1);
505 tcg_out_arithc(s, rd, rs1, val2, val2const,
506 uns ? ARITH_UDIV : ARITH_SDIV);
509 static inline void tcg_out_nop(TCGContext *s)
511 tcg_out_sethi(s, TCG_REG_G0, 0);
514 static const uint8_t tcg_cond_to_bcond[] = {
515 [TCG_COND_EQ] = COND_E,
516 [TCG_COND_NE] = COND_NE,
517 [TCG_COND_LT] = COND_L,
518 [TCG_COND_GE] = COND_GE,
519 [TCG_COND_LE] = COND_LE,
520 [TCG_COND_GT] = COND_G,
521 [TCG_COND_LTU] = COND_CS,
522 [TCG_COND_GEU] = COND_CC,
523 [TCG_COND_LEU] = COND_LEU,
524 [TCG_COND_GTU] = COND_GU,
527 static const uint8_t tcg_cond_to_rcond[] = {
528 [TCG_COND_EQ] = RCOND_Z,
529 [TCG_COND_NE] = RCOND_NZ,
530 [TCG_COND_LT] = RCOND_LZ,
531 [TCG_COND_GT] = RCOND_GZ,
532 [TCG_COND_LE] = RCOND_LEZ,
533 [TCG_COND_GE] = RCOND_GEZ
536 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
538 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
541 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, int label)
543 TCGLabel *l = &s->labels[label];
544 int off19;
546 if (l->has_value) {
547 off19 = INSN_OFF19(l->u.value - (unsigned long)s->code_ptr);
548 } else {
549 /* Make sure to preserve destinations during retranslation. */
550 off19 = *(uint32_t *)s->code_ptr & INSN_OFF19(-1);
551 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label, 0);
553 tcg_out_bpcc0(s, scond, flags, off19);
556 static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
558 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
561 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGArg arg1,
562 TCGArg arg2, int const_arg2, int label)
564 tcg_out_cmp(s, arg1, arg2, const_arg2);
565 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, label);
566 tcg_out_nop(s);
569 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGArg ret,
570 TCGArg v1, int v1const)
572 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
573 | INSN_RS1(tcg_cond_to_bcond[cond])
574 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
577 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
578 TCGArg c1, TCGArg c2, int c2const,
579 TCGArg v1, int v1const)
581 tcg_out_cmp(s, c1, c2, c2const);
582 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
585 #if TCG_TARGET_REG_BITS == 64
586 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGArg arg1,
587 TCGArg arg2, int const_arg2, int label)
589 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
590 if (arg2 == 0 && !is_unsigned_cond(cond)) {
591 TCGLabel *l = &s->labels[label];
592 int off16;
594 if (l->has_value) {
595 off16 = INSN_OFF16(l->u.value - (unsigned long)s->code_ptr);
596 } else {
597 /* Make sure to preserve destinations during retranslation. */
598 off16 = *(uint32_t *)s->code_ptr & INSN_OFF16(-1);
599 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, label, 0);
601 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
602 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
603 } else {
604 tcg_out_cmp(s, arg1, arg2, const_arg2);
605 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, label);
607 tcg_out_nop(s);
610 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGArg ret, TCGArg c1,
611 TCGArg v1, int v1const)
613 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
614 | (tcg_cond_to_rcond[cond] << 10)
615 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
618 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
619 TCGArg c1, TCGArg c2, int c2const,
620 TCGArg v1, int v1const)
622 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
623 Note that the immediate range is one bit smaller, so we must check
624 for that as well. */
625 if (c2 == 0 && !is_unsigned_cond(cond)
626 && (!v1const || check_fit_tl(v1, 10))) {
627 tcg_out_movr(s, cond, ret, c1, v1, v1const);
628 } else {
629 tcg_out_cmp(s, c1, c2, c2const);
630 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
633 #else
634 static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
635 TCGArg al, TCGArg ah,
636 TCGArg bl, int blconst,
637 TCGArg bh, int bhconst, int label_dest)
639 int scond, label_next = gen_new_label();
641 tcg_out_cmp(s, ah, bh, bhconst);
643 /* Note that we fill one of the delay slots with the second compare. */
644 switch (cond) {
645 case TCG_COND_EQ:
646 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_next);
647 tcg_out_cmp(s, al, bl, blconst);
648 tcg_out_bpcc(s, COND_E, BPCC_ICC | BPCC_PT, label_dest);
649 break;
651 case TCG_COND_NE:
652 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_dest);
653 tcg_out_cmp(s, al, bl, blconst);
654 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_dest);
655 break;
657 default:
658 scond = tcg_cond_to_bcond[tcg_high_cond(cond)];
659 tcg_out_bpcc(s, scond, BPCC_ICC | BPCC_PT, label_dest);
660 tcg_out_nop(s);
661 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_next);
662 tcg_out_cmp(s, al, bl, blconst);
663 scond = tcg_cond_to_bcond[tcg_unsigned_cond(cond)];
664 tcg_out_bpcc(s, scond, BPCC_ICC | BPCC_PT, label_dest);
665 break;
667 tcg_out_nop(s);
669 tcg_out_label(s, label_next, s->code_ptr);
671 #endif
673 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
674 TCGArg c1, TCGArg c2, int c2const)
676 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
677 switch (cond) {
678 case TCG_COND_LTU:
679 case TCG_COND_GEU:
680 /* The result of the comparison is in the carry bit. */
681 break;
683 case TCG_COND_EQ:
684 case TCG_COND_NE:
685 /* For equality, we can transform to inequality vs zero. */
686 if (c2 != 0) {
687 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
689 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
690 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
691 break;
693 case TCG_COND_GTU:
694 case TCG_COND_LEU:
695 /* If we don't need to load a constant into a register, we can
696 swap the operands on GTU/LEU. There's no benefit to loading
697 the constant into a temporary register. */
698 if (!c2const || c2 == 0) {
699 TCGArg t = c1;
700 c1 = c2;
701 c2 = t;
702 c2const = 0;
703 cond = tcg_swap_cond(cond);
704 break;
706 /* FALLTHRU */
708 default:
709 tcg_out_cmp(s, c1, c2, c2const);
710 tcg_out_movi_imm13(s, ret, 0);
711 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
712 return;
715 tcg_out_cmp(s, c1, c2, c2const);
716 if (cond == TCG_COND_LTU) {
717 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
718 } else {
719 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
723 #if TCG_TARGET_REG_BITS == 64
724 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
725 TCGArg c1, TCGArg c2, int c2const)
727 /* For 64-bit signed comparisons vs zero, we can avoid the compare
728 if the input does not overlap the output. */
729 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
730 tcg_out_movi_imm13(s, ret, 0);
731 tcg_out_movr(s, cond, ret, c1, 1, 1);
732 } else {
733 tcg_out_cmp(s, c1, c2, c2const);
734 tcg_out_movi_imm13(s, ret, 0);
735 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
738 #else
739 static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
740 TCGArg al, TCGArg ah,
741 TCGArg bl, int blconst,
742 TCGArg bh, int bhconst)
744 int tmp = TCG_REG_T1;
746 /* Note that the low parts are fully consumed before tmp is set. */
747 if (ret != ah && (bhconst || ret != bh)) {
748 tmp = ret;
751 switch (cond) {
752 case TCG_COND_EQ:
753 case TCG_COND_NE:
754 if (bl == 0 && bh == 0) {
755 if (cond == TCG_COND_EQ) {
756 tcg_out_arith(s, TCG_REG_G0, al, ah, ARITH_ORCC);
757 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
758 } else {
759 tcg_out_arith(s, ret, al, ah, ARITH_ORCC);
761 } else {
762 tcg_out_setcond_i32(s, cond, tmp, al, bl, blconst);
763 tcg_out_cmp(s, ah, bh, bhconst);
764 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
766 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, cond == TCG_COND_NE, 1);
767 break;
769 default:
770 /* <= : ah < bh | (ah == bh && al <= bl) */
771 tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), tmp, al, bl, blconst);
772 tcg_out_cmp(s, ah, bh, bhconst);
773 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
774 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, 0, 1);
775 tcg_out_movcc(s, tcg_high_cond(cond), MOVCC_ICC, ret, 1, 1);
776 break;
779 #endif
781 static void tcg_out_addsub2(TCGContext *s, TCGArg rl, TCGArg rh,
782 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
783 TCGArg bh, int bhconst, int opl, int oph)
785 TCGArg tmp = TCG_REG_T1;
787 /* Note that the low parts are fully consumed before tmp is set. */
788 if (rl != ah && (bhconst || rl != bh)) {
789 tmp = rl;
792 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
793 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
794 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
797 /* Generate global QEMU prologue and epilogue code */
798 static void tcg_target_qemu_prologue(TCGContext *s)
800 int tmp_buf_size, frame_size;
802 /* The TCG temp buffer is at the top of the frame, immediately
803 below the frame pointer. */
804 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
805 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
806 tmp_buf_size);
808 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
809 otherwise the minimal frame usable by callees. */
810 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
811 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
812 frame_size += TCG_TARGET_STACK_ALIGN - 1;
813 frame_size &= -TCG_TARGET_STACK_ALIGN;
814 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
815 INSN_IMM13(-frame_size));
817 #ifdef CONFIG_USE_GUEST_BASE
818 if (GUEST_BASE != 0) {
819 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
820 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
822 #endif
824 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I1) |
825 INSN_RS2(TCG_REG_G0));
826 /* delay slot */
827 tcg_out_nop(s);
829 /* No epilogue required. We issue ret + restore directly in the TB. */
832 #if defined(CONFIG_SOFTMMU)
834 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
835 int mmu_idx) */
836 static const void * const qemu_ld_helpers[4] = {
837 helper_ldb_mmu,
838 helper_ldw_mmu,
839 helper_ldl_mmu,
840 helper_ldq_mmu,
843 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
844 uintxx_t val, int mmu_idx) */
845 static const void * const qemu_st_helpers[4] = {
846 helper_stb_mmu,
847 helper_stw_mmu,
848 helper_stl_mmu,
849 helper_stq_mmu,
852 /* Perform the TLB load and compare.
854 Inputs:
855 ADDRLO_IDX contains the index into ARGS of the low part of the
856 address; the high part of the address is at ADDR_LOW_IDX+1.
858 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
860 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
861 This should be offsetof addr_read or addr_write.
863 The result of the TLB comparison is in %[ix]cc. The sanitized address
864 is in the returned register, maybe %o0. The TLB addend is in %o1. */
866 static int tcg_out_tlb_load(TCGContext *s, int addrlo_idx, int mem_index,
867 int s_bits, const TCGArg *args, int which)
869 const int addrlo = args[addrlo_idx];
870 const int r0 = TCG_REG_O0;
871 const int r1 = TCG_REG_O1;
872 const int r2 = TCG_REG_O2;
873 int addr = addrlo;
874 int tlb_ofs;
876 if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
877 /* Assemble the 64-bit address in R0. */
878 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
879 tcg_out_arithi(s, r1, args[addrlo_idx + 1], 32, SHIFT_SLLX);
880 tcg_out_arith(s, r0, r0, r1, ARITH_OR);
883 /* Shift the page number down to tlb-entry. */
884 tcg_out_arithi(s, r1, addrlo,
885 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
887 /* Mask out the page offset, except for the required alignment. */
888 tcg_out_andi(s, r0, addr, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
890 /* Compute tlb index, modulo tlb size. */
891 tcg_out_andi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
893 /* Relative to the current ENV. */
894 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
896 /* Find a base address that can load both tlb comparator and addend. */
897 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
898 if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
899 tcg_out_addi(s, r1, tlb_ofs);
900 tlb_ofs = 0;
903 /* Load the tlb comparator and the addend. */
904 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
905 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
907 /* subcc arg0, arg2, %g0 */
908 tcg_out_cmp(s, r0, r2, 0);
910 /* If the guest address must be zero-extended, do so now. */
911 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
912 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
913 return r0;
915 return addrlo;
917 #endif /* CONFIG_SOFTMMU */
919 static const int qemu_ld_opc[8] = {
920 #ifdef TARGET_WORDS_BIGENDIAN
921 LDUB, LDUH, LDUW, LDX, LDSB, LDSH, LDSW, LDX
922 #else
923 LDUB, LDUH_LE, LDUW_LE, LDX_LE, LDSB, LDSH_LE, LDSW_LE, LDX_LE
924 #endif
927 static const int qemu_st_opc[4] = {
928 #ifdef TARGET_WORDS_BIGENDIAN
929 STB, STH, STW, STX
930 #else
931 STB, STH_LE, STW_LE, STX_LE
932 #endif
935 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
937 int addrlo_idx = 1, datalo, datahi, addr_reg;
938 #if defined(CONFIG_SOFTMMU)
939 int memi_idx, memi, s_bits, n;
940 uint32_t *label_ptr[2];
941 #endif
943 datahi = datalo = args[0];
944 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
945 datahi = args[1];
946 addrlo_idx = 2;
949 #if defined(CONFIG_SOFTMMU)
950 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
951 memi = args[memi_idx];
952 s_bits = sizeop & 3;
954 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, s_bits, args,
955 offsetof(CPUTLBEntry, addr_read));
957 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
958 int reg64;
960 /* bne,pn %[xi]cc, label0 */
961 label_ptr[0] = (uint32_t *)s->code_ptr;
962 tcg_out_bpcc0(s, COND_NE, BPCC_PN
963 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
965 /* TLB Hit. */
966 /* Load all 64-bits into an O/G register. */
967 reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
968 tcg_out_ldst_rr(s, reg64, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
970 /* Move the two 32-bit pieces into the destination registers. */
971 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
972 if (reg64 != datalo) {
973 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
976 /* b,a,pt label1 */
977 label_ptr[1] = (uint32_t *)s->code_ptr;
978 tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
979 } else {
980 /* The fast path is exactly one insn. Thus we can perform the
981 entire TLB Hit in the (annulled) delay slot of the branch
982 over the TLB Miss case. */
984 /* beq,a,pt %[xi]cc, label0 */
985 label_ptr[0] = NULL;
986 label_ptr[1] = (uint32_t *)s->code_ptr;
987 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
988 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
989 /* delay slot */
990 tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
993 /* TLB Miss. */
995 if (label_ptr[0]) {
996 *label_ptr[0] |= INSN_OFF19((unsigned long)s->code_ptr -
997 (unsigned long)label_ptr[0]);
999 n = 0;
1000 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1001 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1002 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1003 args[addrlo_idx + 1]);
1005 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1006 args[addrlo_idx]);
1008 /* qemu_ld_helper[s_bits](arg0, arg1) */
1009 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
1010 - (tcg_target_ulong)s->code_ptr) >> 2)
1011 & 0x3fffffff));
1012 /* delay slot */
1013 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[n], memi);
1015 n = tcg_target_call_oarg_regs[0];
1016 /* datalo = sign_extend(arg0) */
1017 switch (sizeop) {
1018 case 0 | 4:
1019 /* Recall that SRA sign extends from bit 31 through bit 63. */
1020 tcg_out_arithi(s, datalo, n, 24, SHIFT_SLL);
1021 tcg_out_arithi(s, datalo, datalo, 24, SHIFT_SRA);
1022 break;
1023 case 1 | 4:
1024 tcg_out_arithi(s, datalo, n, 16, SHIFT_SLL);
1025 tcg_out_arithi(s, datalo, datalo, 16, SHIFT_SRA);
1026 break;
1027 case 2 | 4:
1028 tcg_out_arithi(s, datalo, n, 0, SHIFT_SRA);
1029 break;
1030 case 3:
1031 if (TCG_TARGET_REG_BITS == 32) {
1032 tcg_out_mov(s, TCG_TYPE_REG, datahi, n);
1033 tcg_out_mov(s, TCG_TYPE_REG, datalo, n + 1);
1034 break;
1036 /* FALLTHRU */
1037 case 0:
1038 case 1:
1039 case 2:
1040 default:
1041 /* mov */
1042 tcg_out_mov(s, TCG_TYPE_REG, datalo, n);
1043 break;
1046 *label_ptr[1] |= INSN_OFF19((unsigned long)s->code_ptr -
1047 (unsigned long)label_ptr[1]);
1048 #else
1049 addr_reg = args[addrlo_idx];
1050 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
1051 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1052 addr_reg = TCG_REG_T1;
1054 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1055 int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
1057 tcg_out_ldst_rr(s, reg64, addr_reg,
1058 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1059 qemu_ld_opc[sizeop]);
1061 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
1062 if (reg64 != datalo) {
1063 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
1065 } else {
1066 tcg_out_ldst_rr(s, datalo, addr_reg,
1067 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1068 qemu_ld_opc[sizeop]);
1070 #endif /* CONFIG_SOFTMMU */
1073 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
1075 int addrlo_idx = 1, datalo, datahi, addr_reg;
1076 #if defined(CONFIG_SOFTMMU)
1077 int memi_idx, memi, n, datafull;
1078 uint32_t *label_ptr;
1079 #endif
1081 datahi = datalo = args[0];
1082 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1083 datahi = args[1];
1084 addrlo_idx = 2;
1087 #if defined(CONFIG_SOFTMMU)
1088 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
1089 memi = args[memi_idx];
1091 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, sizeop, args,
1092 offsetof(CPUTLBEntry, addr_write));
1094 datafull = datalo;
1095 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1096 /* Reconstruct the full 64-bit value. */
1097 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
1098 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
1099 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1100 datafull = TCG_REG_O2;
1103 /* The fast path is exactly one insn. Thus we can perform the entire
1104 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1105 /* beq,a,pt %[xi]cc, label0 */
1106 label_ptr = (uint32_t *)s->code_ptr;
1107 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1108 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1109 /* delay slot */
1110 tcg_out_ldst_rr(s, datafull, addr_reg, TCG_REG_O1, qemu_st_opc[sizeop]);
1112 /* TLB Miss. */
1114 n = 0;
1115 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1116 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1117 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1118 args[addrlo_idx + 1]);
1120 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1121 args[addrlo_idx]);
1122 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1123 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datahi);
1125 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datalo);
1127 /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
1128 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[sizeop]
1129 - (tcg_target_ulong)s->code_ptr) >> 2)
1130 & 0x3fffffff));
1131 /* delay slot */
1132 tcg_out_movi(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n], memi);
1134 *label_ptr |= INSN_OFF19((unsigned long)s->code_ptr -
1135 (unsigned long)label_ptr);
1136 #else
1137 addr_reg = args[addrlo_idx];
1138 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
1139 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1140 addr_reg = TCG_REG_T1;
1142 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1143 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
1144 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
1145 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1146 datalo = TCG_REG_O2;
1148 tcg_out_ldst_rr(s, datalo, addr_reg,
1149 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1150 qemu_st_opc[sizeop]);
1151 #endif /* CONFIG_SOFTMMU */
1154 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1155 const int *const_args)
1157 int c;
1159 switch (opc) {
1160 case INDEX_op_exit_tb:
1161 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
1162 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
1163 INSN_IMM13(8));
1164 tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
1165 INSN_RS2(TCG_REG_G0));
1166 break;
1167 case INDEX_op_goto_tb:
1168 if (s->tb_jmp_offset) {
1169 /* direct jump method */
1170 uint32_t old_insn = *(uint32_t *)s->code_ptr;
1171 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1172 /* Make sure to preserve links during retranslation. */
1173 tcg_out32(s, CALL | (old_insn & ~INSN_OP(-1)));
1174 } else {
1175 /* indirect jump method */
1176 tcg_out_ld_ptr(s, TCG_REG_T1,
1177 (tcg_target_long)(s->tb_next + args[0]));
1178 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_T1) |
1179 INSN_RS2(TCG_REG_G0));
1181 tcg_out_nop(s);
1182 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1183 break;
1184 case INDEX_op_call:
1185 if (const_args[0]) {
1186 tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
1187 - (tcg_target_ulong)s->code_ptr) >> 2)
1188 & 0x3fffffff));
1189 } else {
1190 tcg_out_ld_ptr(s, TCG_REG_T1,
1191 (tcg_target_long)(s->tb_next + args[0]));
1192 tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_T1) |
1193 INSN_RS2(TCG_REG_G0));
1195 /* delay slot */
1196 tcg_out_nop(s);
1197 break;
1198 case INDEX_op_br:
1199 tcg_out_bpcc(s, COND_A, BPCC_PT, args[0]);
1200 tcg_out_nop(s);
1201 break;
1202 case INDEX_op_movi_i32:
1203 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1204 break;
1206 #if TCG_TARGET_REG_BITS == 64
1207 #define OP_32_64(x) \
1208 glue(glue(case INDEX_op_, x), _i32): \
1209 glue(glue(case INDEX_op_, x), _i64)
1210 #else
1211 #define OP_32_64(x) \
1212 glue(glue(case INDEX_op_, x), _i32)
1213 #endif
1214 OP_32_64(ld8u):
1215 tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
1216 break;
1217 OP_32_64(ld8s):
1218 tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
1219 break;
1220 OP_32_64(ld16u):
1221 tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
1222 break;
1223 OP_32_64(ld16s):
1224 tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
1225 break;
1226 case INDEX_op_ld_i32:
1227 #if TCG_TARGET_REG_BITS == 64
1228 case INDEX_op_ld32u_i64:
1229 #endif
1230 tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
1231 break;
1232 OP_32_64(st8):
1233 tcg_out_ldst(s, args[0], args[1], args[2], STB);
1234 break;
1235 OP_32_64(st16):
1236 tcg_out_ldst(s, args[0], args[1], args[2], STH);
1237 break;
1238 case INDEX_op_st_i32:
1239 #if TCG_TARGET_REG_BITS == 64
1240 case INDEX_op_st32_i64:
1241 #endif
1242 tcg_out_ldst(s, args[0], args[1], args[2], STW);
1243 break;
1244 OP_32_64(add):
1245 c = ARITH_ADD;
1246 goto gen_arith;
1247 OP_32_64(sub):
1248 c = ARITH_SUB;
1249 goto gen_arith;
1250 OP_32_64(and):
1251 c = ARITH_AND;
1252 goto gen_arith;
1253 OP_32_64(andc):
1254 c = ARITH_ANDN;
1255 goto gen_arith;
1256 OP_32_64(or):
1257 c = ARITH_OR;
1258 goto gen_arith;
1259 OP_32_64(orc):
1260 c = ARITH_ORN;
1261 goto gen_arith;
1262 OP_32_64(xor):
1263 c = ARITH_XOR;
1264 goto gen_arith;
1265 case INDEX_op_shl_i32:
1266 c = SHIFT_SLL;
1267 do_shift32:
1268 /* Limit immediate shift count lest we create an illegal insn. */
1269 tcg_out_arithc(s, args[0], args[1], args[2] & 31, const_args[2], c);
1270 break;
1271 case INDEX_op_shr_i32:
1272 c = SHIFT_SRL;
1273 goto do_shift32;
1274 case INDEX_op_sar_i32:
1275 c = SHIFT_SRA;
1276 goto do_shift32;
1277 case INDEX_op_mul_i32:
1278 c = ARITH_UMUL;
1279 goto gen_arith;
1281 OP_32_64(neg):
1282 c = ARITH_SUB;
1283 goto gen_arith1;
1284 OP_32_64(not):
1285 c = ARITH_ORN;
1286 goto gen_arith1;
1288 case INDEX_op_div_i32:
1289 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
1290 break;
1291 case INDEX_op_divu_i32:
1292 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
1293 break;
1295 case INDEX_op_rem_i32:
1296 case INDEX_op_remu_i32:
1297 tcg_out_div32(s, TCG_REG_T1, args[1], args[2], const_args[2],
1298 opc == INDEX_op_remu_i32);
1299 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
1300 ARITH_UMUL);
1301 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
1302 break;
1304 case INDEX_op_brcond_i32:
1305 tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
1306 args[3]);
1307 break;
1308 case INDEX_op_setcond_i32:
1309 tcg_out_setcond_i32(s, args[3], args[0], args[1],
1310 args[2], const_args[2]);
1311 break;
1312 case INDEX_op_movcond_i32:
1313 tcg_out_movcond_i32(s, args[5], args[0], args[1],
1314 args[2], const_args[2], args[3], const_args[3]);
1315 break;
1317 #if TCG_TARGET_REG_BITS == 32
1318 case INDEX_op_brcond2_i32:
1319 tcg_out_brcond2_i32(s, args[4], args[0], args[1],
1320 args[2], const_args[2],
1321 args[3], const_args[3], args[5]);
1322 break;
1323 case INDEX_op_setcond2_i32:
1324 tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
1325 args[3], const_args[3],
1326 args[4], const_args[4]);
1327 break;
1328 #endif
1330 case INDEX_op_add2_i32:
1331 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1332 args[4], const_args[4], args[5], const_args[5],
1333 ARITH_ADDCC, ARITH_ADDX);
1334 break;
1335 case INDEX_op_sub2_i32:
1336 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1337 args[4], const_args[4], args[5], const_args[5],
1338 ARITH_SUBCC, ARITH_SUBX);
1339 break;
1340 case INDEX_op_mulu2_i32:
1341 tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
1342 ARITH_UMUL);
1343 tcg_out_rdy(s, args[1]);
1344 break;
1346 case INDEX_op_qemu_ld8u:
1347 tcg_out_qemu_ld(s, args, 0);
1348 break;
1349 case INDEX_op_qemu_ld8s:
1350 tcg_out_qemu_ld(s, args, 0 | 4);
1351 break;
1352 case INDEX_op_qemu_ld16u:
1353 tcg_out_qemu_ld(s, args, 1);
1354 break;
1355 case INDEX_op_qemu_ld16s:
1356 tcg_out_qemu_ld(s, args, 1 | 4);
1357 break;
1358 case INDEX_op_qemu_ld32:
1359 #if TCG_TARGET_REG_BITS == 64
1360 case INDEX_op_qemu_ld32u:
1361 #endif
1362 tcg_out_qemu_ld(s, args, 2);
1363 break;
1364 #if TCG_TARGET_REG_BITS == 64
1365 case INDEX_op_qemu_ld32s:
1366 tcg_out_qemu_ld(s, args, 2 | 4);
1367 break;
1368 #endif
1369 case INDEX_op_qemu_ld64:
1370 tcg_out_qemu_ld(s, args, 3);
1371 break;
1372 case INDEX_op_qemu_st8:
1373 tcg_out_qemu_st(s, args, 0);
1374 break;
1375 case INDEX_op_qemu_st16:
1376 tcg_out_qemu_st(s, args, 1);
1377 break;
1378 case INDEX_op_qemu_st32:
1379 tcg_out_qemu_st(s, args, 2);
1380 break;
1381 case INDEX_op_qemu_st64:
1382 tcg_out_qemu_st(s, args, 3);
1383 break;
1385 #if TCG_TARGET_REG_BITS == 64
1386 case INDEX_op_movi_i64:
1387 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1388 break;
1389 case INDEX_op_ld32s_i64:
1390 tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
1391 break;
1392 case INDEX_op_ld_i64:
1393 tcg_out_ldst(s, args[0], args[1], args[2], LDX);
1394 break;
1395 case INDEX_op_st_i64:
1396 tcg_out_ldst(s, args[0], args[1], args[2], STX);
1397 break;
1398 case INDEX_op_shl_i64:
1399 c = SHIFT_SLLX;
1400 do_shift64:
1401 /* Limit immediate shift count lest we create an illegal insn. */
1402 tcg_out_arithc(s, args[0], args[1], args[2] & 63, const_args[2], c);
1403 break;
1404 case INDEX_op_shr_i64:
1405 c = SHIFT_SRLX;
1406 goto do_shift64;
1407 case INDEX_op_sar_i64:
1408 c = SHIFT_SRAX;
1409 goto do_shift64;
1410 case INDEX_op_mul_i64:
1411 c = ARITH_MULX;
1412 goto gen_arith;
1413 case INDEX_op_div_i64:
1414 c = ARITH_SDIVX;
1415 goto gen_arith;
1416 case INDEX_op_divu_i64:
1417 c = ARITH_UDIVX;
1418 goto gen_arith;
1419 case INDEX_op_rem_i64:
1420 case INDEX_op_remu_i64:
1421 tcg_out_arithc(s, TCG_REG_T1, args[1], args[2], const_args[2],
1422 opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
1423 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
1424 ARITH_MULX);
1425 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
1426 break;
1427 case INDEX_op_ext32s_i64:
1428 if (const_args[1]) {
1429 tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]);
1430 } else {
1431 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
1433 break;
1434 case INDEX_op_ext32u_i64:
1435 if (const_args[1]) {
1436 tcg_out_movi_imm32(s, args[0], args[1]);
1437 } else {
1438 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
1440 break;
1442 case INDEX_op_brcond_i64:
1443 tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
1444 args[3]);
1445 break;
1446 case INDEX_op_setcond_i64:
1447 tcg_out_setcond_i64(s, args[3], args[0], args[1],
1448 args[2], const_args[2]);
1449 break;
1450 case INDEX_op_movcond_i64:
1451 tcg_out_movcond_i64(s, args[5], args[0], args[1],
1452 args[2], const_args[2], args[3], const_args[3]);
1453 break;
1454 #endif
1455 gen_arith:
1456 tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
1457 break;
1459 gen_arith1:
1460 tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
1461 break;
1463 default:
1464 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1465 tcg_abort();
1469 static const TCGTargetOpDef sparc_op_defs[] = {
1470 { INDEX_op_exit_tb, { } },
1471 { INDEX_op_goto_tb, { } },
1472 { INDEX_op_call, { "ri" } },
1473 { INDEX_op_br, { } },
1475 { INDEX_op_mov_i32, { "r", "r" } },
1476 { INDEX_op_movi_i32, { "r" } },
1477 { INDEX_op_ld8u_i32, { "r", "r" } },
1478 { INDEX_op_ld8s_i32, { "r", "r" } },
1479 { INDEX_op_ld16u_i32, { "r", "r" } },
1480 { INDEX_op_ld16s_i32, { "r", "r" } },
1481 { INDEX_op_ld_i32, { "r", "r" } },
1482 { INDEX_op_st8_i32, { "rZ", "r" } },
1483 { INDEX_op_st16_i32, { "rZ", "r" } },
1484 { INDEX_op_st_i32, { "rZ", "r" } },
1486 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1487 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1488 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1489 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
1490 { INDEX_op_rem_i32, { "r", "rZ", "rJ" } },
1491 { INDEX_op_remu_i32, { "r", "rZ", "rJ" } },
1492 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1493 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1494 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1495 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1496 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1497 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1499 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1500 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1501 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
1503 { INDEX_op_neg_i32, { "r", "rJ" } },
1504 { INDEX_op_not_i32, { "r", "rJ" } },
1506 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1507 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1508 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
1510 #if TCG_TARGET_REG_BITS == 32
1511 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1512 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rJ", "rJ" } },
1513 #endif
1515 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1516 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1517 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
1519 #if TCG_TARGET_REG_BITS == 64
1520 { INDEX_op_mov_i64, { "r", "r" } },
1521 { INDEX_op_movi_i64, { "r" } },
1522 { INDEX_op_ld8u_i64, { "r", "r" } },
1523 { INDEX_op_ld8s_i64, { "r", "r" } },
1524 { INDEX_op_ld16u_i64, { "r", "r" } },
1525 { INDEX_op_ld16s_i64, { "r", "r" } },
1526 { INDEX_op_ld32u_i64, { "r", "r" } },
1527 { INDEX_op_ld32s_i64, { "r", "r" } },
1528 { INDEX_op_ld_i64, { "r", "r" } },
1529 { INDEX_op_st8_i64, { "rZ", "r" } },
1530 { INDEX_op_st16_i64, { "rZ", "r" } },
1531 { INDEX_op_st32_i64, { "rZ", "r" } },
1532 { INDEX_op_st_i64, { "rZ", "r" } },
1534 { INDEX_op_add_i64, { "r", "rZ", "rJ" } },
1535 { INDEX_op_mul_i64, { "r", "rZ", "rJ" } },
1536 { INDEX_op_div_i64, { "r", "rZ", "rJ" } },
1537 { INDEX_op_divu_i64, { "r", "rZ", "rJ" } },
1538 { INDEX_op_rem_i64, { "r", "rZ", "rJ" } },
1539 { INDEX_op_remu_i64, { "r", "rZ", "rJ" } },
1540 { INDEX_op_sub_i64, { "r", "rZ", "rJ" } },
1541 { INDEX_op_and_i64, { "r", "rZ", "rJ" } },
1542 { INDEX_op_andc_i64, { "r", "rZ", "rJ" } },
1543 { INDEX_op_or_i64, { "r", "rZ", "rJ" } },
1544 { INDEX_op_orc_i64, { "r", "rZ", "rJ" } },
1545 { INDEX_op_xor_i64, { "r", "rZ", "rJ" } },
1547 { INDEX_op_shl_i64, { "r", "rZ", "rJ" } },
1548 { INDEX_op_shr_i64, { "r", "rZ", "rJ" } },
1549 { INDEX_op_sar_i64, { "r", "rZ", "rJ" } },
1551 { INDEX_op_neg_i64, { "r", "rJ" } },
1552 { INDEX_op_not_i64, { "r", "rJ" } },
1554 { INDEX_op_ext32s_i64, { "r", "ri" } },
1555 { INDEX_op_ext32u_i64, { "r", "ri" } },
1557 { INDEX_op_brcond_i64, { "rZ", "rJ" } },
1558 { INDEX_op_setcond_i64, { "r", "rZ", "rJ" } },
1559 { INDEX_op_movcond_i64, { "r", "rZ", "rJ", "rI", "0" } },
1560 #endif
1562 #if TCG_TARGET_REG_BITS == 64
1563 { INDEX_op_qemu_ld8u, { "r", "L" } },
1564 { INDEX_op_qemu_ld8s, { "r", "L" } },
1565 { INDEX_op_qemu_ld16u, { "r", "L" } },
1566 { INDEX_op_qemu_ld16s, { "r", "L" } },
1567 { INDEX_op_qemu_ld32, { "r", "L" } },
1568 { INDEX_op_qemu_ld32u, { "r", "L" } },
1569 { INDEX_op_qemu_ld32s, { "r", "L" } },
1570 { INDEX_op_qemu_ld64, { "r", "L" } },
1572 { INDEX_op_qemu_st8, { "L", "L" } },
1573 { INDEX_op_qemu_st16, { "L", "L" } },
1574 { INDEX_op_qemu_st32, { "L", "L" } },
1575 { INDEX_op_qemu_st64, { "L", "L" } },
1576 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1577 { INDEX_op_qemu_ld8u, { "r", "L" } },
1578 { INDEX_op_qemu_ld8s, { "r", "L" } },
1579 { INDEX_op_qemu_ld16u, { "r", "L" } },
1580 { INDEX_op_qemu_ld16s, { "r", "L" } },
1581 { INDEX_op_qemu_ld32, { "r", "L" } },
1582 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1584 { INDEX_op_qemu_st8, { "L", "L" } },
1585 { INDEX_op_qemu_st16, { "L", "L" } },
1586 { INDEX_op_qemu_st32, { "L", "L" } },
1587 { INDEX_op_qemu_st64, { "L", "L", "L" } },
1588 #else
1589 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1590 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1591 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1592 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1593 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1594 { INDEX_op_qemu_ld64, { "L", "L", "L", "L" } },
1596 { INDEX_op_qemu_st8, { "L", "L", "L" } },
1597 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1598 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1599 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
1600 #endif
1602 { -1 },
1605 static void tcg_target_init(TCGContext *s)
1607 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1608 #if TCG_TARGET_REG_BITS == 64
1609 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1610 #endif
1611 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1612 (1 << TCG_REG_G1) |
1613 (1 << TCG_REG_G2) |
1614 (1 << TCG_REG_G3) |
1615 (1 << TCG_REG_G4) |
1616 (1 << TCG_REG_G5) |
1617 (1 << TCG_REG_G6) |
1618 (1 << TCG_REG_G7) |
1619 (1 << TCG_REG_O0) |
1620 (1 << TCG_REG_O1) |
1621 (1 << TCG_REG_O2) |
1622 (1 << TCG_REG_O3) |
1623 (1 << TCG_REG_O4) |
1624 (1 << TCG_REG_O5) |
1625 (1 << TCG_REG_O7));
1627 tcg_regset_clear(s->reserved_regs);
1628 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1629 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1630 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1631 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1632 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1633 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1634 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1635 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1637 tcg_add_target_add_op_defs(sparc_op_defs);
1640 #if TCG_TARGET_REG_BITS == 64
1641 # define ELF_HOST_MACHINE EM_SPARCV9
1642 #else
1643 # define ELF_HOST_MACHINE EM_SPARC32PLUS
1644 # define ELF_HOST_FLAGS EF_SPARC_32PLUS
1645 #endif
1647 typedef struct {
1648 DebugFrameCIE cie;
1649 DebugFrameFDEHeader fde;
1650 uint8_t fde_def_cfa[TCG_TARGET_REG_BITS == 64 ? 4 : 2];
1651 uint8_t fde_win_save;
1652 uint8_t fde_ret_save[3];
1653 } DebugFrame;
1655 static DebugFrame debug_frame = {
1656 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1657 .cie.id = -1,
1658 .cie.version = 1,
1659 .cie.code_align = 1,
1660 .cie.data_align = -sizeof(void *) & 0x7f,
1661 .cie.return_column = 15, /* o7 */
1663 /* Total FDE size does not include the "len" member. */
1664 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
1666 .fde_def_cfa = {
1667 #if TCG_TARGET_REG_BITS == 64
1668 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1669 (2047 & 0x7f) | 0x80, (2047 >> 7)
1670 #else
1671 13, 30 /* DW_CFA_def_cfa_register i6 */
1672 #endif
1674 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1675 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1678 void tcg_register_jit(void *buf, size_t buf_size)
1680 debug_frame.fde.func_start = (tcg_target_long) buf;
1681 debug_frame.fde.func_len = buf_size;
1683 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1686 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1688 uint32_t *ptr = (uint32_t *)jmp_addr;
1689 tcg_target_long disp = (tcg_target_long)(addr - jmp_addr) >> 2;
1691 /* We can reach the entire address space for 32-bit. For 64-bit
1692 the code_gen_buffer can't be larger than 2GB. */
1693 if (TCG_TARGET_REG_BITS == 64 && !check_fit_tl(disp, 30)) {
1694 tcg_abort();
1697 *ptr = CALL | (disp & 0x3fffffff);
1698 flush_icache_range(jmp_addr, jmp_addr + 4);