softmmu: commonize helper definitions
[qemu-kvm.git] / tcg / sparc / tcg-target.c
blob40f2ec10274f7abf4693b6c06872e32b0e500b4f
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "tcg-be-null.h"
27 #ifndef NDEBUG
28 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
62 #endif
64 #ifdef __arch64__
65 # define SPARC64 1
66 #else
67 # define SPARC64 0
68 #endif
70 /* Note that sparcv8plus can only hold 64 bit quantities in %g and %o
71 registers. These are saved manually by the kernel in full 64-bit
72 slots. The %i and %l registers are saved by the register window
73 mechanism, which only allocates space for 32 bits. Given that this
74 window spill/fill can happen on any signal, we must consider the
75 high bits of the %i and %l registers garbage at all times. */
76 #if SPARC64
77 # define ALL_64 0xffffffffu
78 #else
79 # define ALL_64 0xffffu
80 #endif
82 /* Define some temporary registers. T2 is used for constant generation. */
83 #define TCG_REG_T1 TCG_REG_G1
84 #define TCG_REG_T2 TCG_REG_O7
86 #ifdef CONFIG_USE_GUEST_BASE
87 # define TCG_GUEST_BASE_REG TCG_REG_I5
88 #else
89 # define TCG_GUEST_BASE_REG TCG_REG_G0
90 #endif
92 static const int tcg_target_reg_alloc_order[] = {
93 TCG_REG_L0,
94 TCG_REG_L1,
95 TCG_REG_L2,
96 TCG_REG_L3,
97 TCG_REG_L4,
98 TCG_REG_L5,
99 TCG_REG_L6,
100 TCG_REG_L7,
102 TCG_REG_I0,
103 TCG_REG_I1,
104 TCG_REG_I2,
105 TCG_REG_I3,
106 TCG_REG_I4,
107 TCG_REG_I5,
109 TCG_REG_G2,
110 TCG_REG_G3,
111 TCG_REG_G4,
112 TCG_REG_G5,
114 TCG_REG_O0,
115 TCG_REG_O1,
116 TCG_REG_O2,
117 TCG_REG_O3,
118 TCG_REG_O4,
119 TCG_REG_O5,
122 static const int tcg_target_call_iarg_regs[6] = {
123 TCG_REG_O0,
124 TCG_REG_O1,
125 TCG_REG_O2,
126 TCG_REG_O3,
127 TCG_REG_O4,
128 TCG_REG_O5,
131 static const int tcg_target_call_oarg_regs[] = {
132 TCG_REG_O0,
133 TCG_REG_O1,
134 TCG_REG_O2,
135 TCG_REG_O3,
138 #define INSN_OP(x) ((x) << 30)
139 #define INSN_OP2(x) ((x) << 22)
140 #define INSN_OP3(x) ((x) << 19)
141 #define INSN_OPF(x) ((x) << 5)
142 #define INSN_RD(x) ((x) << 25)
143 #define INSN_RS1(x) ((x) << 14)
144 #define INSN_RS2(x) (x)
145 #define INSN_ASI(x) ((x) << 5)
147 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
148 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
149 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
150 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
151 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
152 #define INSN_COND(x) ((x) << 25)
154 #define COND_N 0x0
155 #define COND_E 0x1
156 #define COND_LE 0x2
157 #define COND_L 0x3
158 #define COND_LEU 0x4
159 #define COND_CS 0x5
160 #define COND_NEG 0x6
161 #define COND_VS 0x7
162 #define COND_A 0x8
163 #define COND_NE 0x9
164 #define COND_G 0xa
165 #define COND_GE 0xb
166 #define COND_GU 0xc
167 #define COND_CC 0xd
168 #define COND_POS 0xe
169 #define COND_VC 0xf
170 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
172 #define RCOND_Z 1
173 #define RCOND_LEZ 2
174 #define RCOND_LZ 3
175 #define RCOND_NZ 5
176 #define RCOND_GZ 6
177 #define RCOND_GEZ 7
179 #define MOVCC_ICC (1 << 18)
180 #define MOVCC_XCC (1 << 18 | 1 << 12)
182 #define BPCC_ICC 0
183 #define BPCC_XCC (2 << 20)
184 #define BPCC_PT (1 << 19)
185 #define BPCC_PN 0
186 #define BPCC_A (1 << 29)
188 #define BPR_PT BPCC_PT
190 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
191 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
192 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
193 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
194 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
195 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
196 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
197 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
198 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
199 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
200 #define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
201 #define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
202 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
203 #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
204 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
205 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
206 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
207 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
208 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
209 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
210 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
212 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
213 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
214 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
216 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
217 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
218 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
220 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
221 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
222 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
223 #define RETURN (INSN_OP(2) | INSN_OP3(0x39))
224 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
225 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
226 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
227 #define CALL INSN_OP(1)
228 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
229 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
230 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
231 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
232 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
233 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
234 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
235 #define STB (INSN_OP(3) | INSN_OP3(0x05))
236 #define STH (INSN_OP(3) | INSN_OP3(0x06))
237 #define STW (INSN_OP(3) | INSN_OP3(0x04))
238 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
239 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
240 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
241 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
242 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
243 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
244 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
245 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
246 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
247 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
248 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
249 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
251 #ifndef ASI_PRIMARY_LITTLE
252 #define ASI_PRIMARY_LITTLE 0x88
253 #endif
255 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
256 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
257 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
258 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
259 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
261 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
262 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
263 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
265 static inline int check_fit_i64(int64_t val, unsigned int bits)
267 return val == sextract64(val, 0, bits);
270 static inline int check_fit_i32(int32_t val, unsigned int bits)
272 return val == sextract32(val, 0, bits);
275 #define check_fit_tl check_fit_i64
276 #if SPARC64
277 # define check_fit_ptr check_fit_i64
278 #else
279 # define check_fit_ptr check_fit_i32
280 #endif
282 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
283 intptr_t value, intptr_t addend)
285 uint32_t insn;
287 assert(addend == 0);
288 value = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr);
290 switch (type) {
291 case R_SPARC_WDISP16:
292 if (!check_fit_ptr(value >> 2, 16)) {
293 tcg_abort();
295 insn = *code_ptr;
296 insn &= ~INSN_OFF16(-1);
297 insn |= INSN_OFF16(value);
298 *code_ptr = insn;
299 break;
300 case R_SPARC_WDISP19:
301 if (!check_fit_ptr(value >> 2, 19)) {
302 tcg_abort();
304 insn = *code_ptr;
305 insn &= ~INSN_OFF19(-1);
306 insn |= INSN_OFF19(value);
307 *code_ptr = insn;
308 break;
309 default:
310 tcg_abort();
314 /* parse target specific constraints */
315 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
317 const char *ct_str;
319 ct_str = *pct_str;
320 switch (ct_str[0]) {
321 case 'r':
322 ct->ct |= TCG_CT_REG;
323 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
324 break;
325 case 'R':
326 ct->ct |= TCG_CT_REG;
327 tcg_regset_set32(ct->u.regs, 0, ALL_64);
328 break;
329 case 'A': /* qemu_ld/st address constraint */
330 ct->ct |= TCG_CT_REG;
331 tcg_regset_set32(ct->u.regs, 0,
332 TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff);
333 reserve_helpers:
334 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
335 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
336 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
337 break;
338 case 's': /* qemu_st data 32-bit constraint */
339 ct->ct |= TCG_CT_REG;
340 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
341 goto reserve_helpers;
342 case 'S': /* qemu_st data 64-bit constraint */
343 ct->ct |= TCG_CT_REG;
344 tcg_regset_set32(ct->u.regs, 0, ALL_64);
345 goto reserve_helpers;
346 case 'I':
347 ct->ct |= TCG_CT_CONST_S11;
348 break;
349 case 'J':
350 ct->ct |= TCG_CT_CONST_S13;
351 break;
352 case 'Z':
353 ct->ct |= TCG_CT_CONST_ZERO;
354 break;
355 default:
356 return -1;
358 ct_str++;
359 *pct_str = ct_str;
360 return 0;
363 /* test if a constant matches the constraint */
364 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
365 const TCGArgConstraint *arg_ct)
367 int ct = arg_ct->ct;
369 if (ct & TCG_CT_CONST) {
370 return 1;
373 if (type == TCG_TYPE_I32) {
374 val = (int32_t)val;
377 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
378 return 1;
379 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
380 return 1;
381 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
382 return 1;
383 } else {
384 return 0;
388 static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
389 TCGReg rs2, int op)
391 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
394 static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
395 int32_t offset, int op)
397 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
400 static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
401 int32_t val2, int val2const, int op)
403 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
404 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
407 static inline void tcg_out_mov(TCGContext *s, TCGType type,
408 TCGReg ret, TCGReg arg)
410 if (ret != arg) {
411 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
415 static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
417 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
420 static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
422 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
425 static void tcg_out_movi(TCGContext *s, TCGType type,
426 TCGReg ret, tcg_target_long arg)
428 tcg_target_long hi, lo = (int32_t)arg;
430 /* Make sure we test 32-bit constants for imm13 properly. */
431 if (type == TCG_TYPE_I32) {
432 arg = lo;
435 /* A 13-bit constant sign-extended to 64-bits. */
436 if (check_fit_tl(arg, 13)) {
437 tcg_out_movi_imm13(s, ret, arg);
438 return;
441 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
442 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
443 tcg_out_sethi(s, ret, arg);
444 if (arg & 0x3ff) {
445 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
447 return;
450 /* A 32-bit constant sign-extended to 64-bits. */
451 if (arg == lo) {
452 tcg_out_sethi(s, ret, ~arg);
453 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
454 return;
457 /* A 64-bit constant decomposed into 2 32-bit pieces. */
458 if (check_fit_i32(lo, 13)) {
459 hi = (arg - lo) >> 32;
460 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
461 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
462 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
463 } else {
464 hi = arg >> 32;
465 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
466 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
467 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
468 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
472 static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
473 TCGReg a2, int op)
475 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
478 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
479 intptr_t offset, int op)
481 if (check_fit_ptr(offset, 13)) {
482 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
483 INSN_IMM13(offset));
484 } else {
485 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
486 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
490 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
491 TCGReg arg1, intptr_t arg2)
493 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
496 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
497 TCGReg arg1, intptr_t arg2)
499 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
502 static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
504 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
505 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
508 static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
510 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
513 static inline void tcg_out_rdy(TCGContext *s, TCGReg rd)
515 tcg_out32(s, RDY | INSN_RD(rd));
518 static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
519 int32_t val2, int val2const, int uns)
521 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
522 if (uns) {
523 tcg_out_sety(s, TCG_REG_G0);
524 } else {
525 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
526 tcg_out_sety(s, TCG_REG_T1);
529 tcg_out_arithc(s, rd, rs1, val2, val2const,
530 uns ? ARITH_UDIV : ARITH_SDIV);
533 static inline void tcg_out_nop(TCGContext *s)
535 tcg_out_sethi(s, TCG_REG_G0, 0);
538 static const uint8_t tcg_cond_to_bcond[] = {
539 [TCG_COND_EQ] = COND_E,
540 [TCG_COND_NE] = COND_NE,
541 [TCG_COND_LT] = COND_L,
542 [TCG_COND_GE] = COND_GE,
543 [TCG_COND_LE] = COND_LE,
544 [TCG_COND_GT] = COND_G,
545 [TCG_COND_LTU] = COND_CS,
546 [TCG_COND_GEU] = COND_CC,
547 [TCG_COND_LEU] = COND_LEU,
548 [TCG_COND_GTU] = COND_GU,
551 static const uint8_t tcg_cond_to_rcond[] = {
552 [TCG_COND_EQ] = RCOND_Z,
553 [TCG_COND_NE] = RCOND_NZ,
554 [TCG_COND_LT] = RCOND_LZ,
555 [TCG_COND_GT] = RCOND_GZ,
556 [TCG_COND_LE] = RCOND_LEZ,
557 [TCG_COND_GE] = RCOND_GEZ
560 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
562 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
565 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, int label)
567 TCGLabel *l = &s->labels[label];
568 int off19;
570 if (l->has_value) {
571 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
572 } else {
573 /* Make sure to preserve destinations during retranslation. */
574 off19 = *s->code_ptr & INSN_OFF19(-1);
575 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label, 0);
577 tcg_out_bpcc0(s, scond, flags, off19);
580 static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
582 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
585 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
586 int32_t arg2, int const_arg2, int label)
588 tcg_out_cmp(s, arg1, arg2, const_arg2);
589 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, label);
590 tcg_out_nop(s);
593 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
594 int32_t v1, int v1const)
596 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
597 | INSN_RS1(tcg_cond_to_bcond[cond])
598 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
601 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
602 TCGReg c1, int32_t c2, int c2const,
603 int32_t v1, int v1const)
605 tcg_out_cmp(s, c1, c2, c2const);
606 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
609 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
610 int32_t arg2, int const_arg2, int label)
612 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
613 if (arg2 == 0 && !is_unsigned_cond(cond)) {
614 TCGLabel *l = &s->labels[label];
615 int off16;
617 if (l->has_value) {
618 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
619 } else {
620 /* Make sure to preserve destinations during retranslation. */
621 off16 = *s->code_ptr & INSN_OFF16(-1);
622 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, label, 0);
624 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
625 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
626 } else {
627 tcg_out_cmp(s, arg1, arg2, const_arg2);
628 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, label);
630 tcg_out_nop(s);
633 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
634 int32_t v1, int v1const)
636 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
637 | (tcg_cond_to_rcond[cond] << 10)
638 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
641 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
642 TCGReg c1, int32_t c2, int c2const,
643 int32_t v1, int v1const)
645 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
646 Note that the immediate range is one bit smaller, so we must check
647 for that as well. */
648 if (c2 == 0 && !is_unsigned_cond(cond)
649 && (!v1const || check_fit_i32(v1, 10))) {
650 tcg_out_movr(s, cond, ret, c1, v1, v1const);
651 } else {
652 tcg_out_cmp(s, c1, c2, c2const);
653 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
657 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
658 TCGReg c1, int32_t c2, int c2const)
660 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
661 switch (cond) {
662 case TCG_COND_LTU:
663 case TCG_COND_GEU:
664 /* The result of the comparison is in the carry bit. */
665 break;
667 case TCG_COND_EQ:
668 case TCG_COND_NE:
669 /* For equality, we can transform to inequality vs zero. */
670 if (c2 != 0) {
671 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
673 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
674 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
675 break;
677 case TCG_COND_GTU:
678 case TCG_COND_LEU:
679 /* If we don't need to load a constant into a register, we can
680 swap the operands on GTU/LEU. There's no benefit to loading
681 the constant into a temporary register. */
682 if (!c2const || c2 == 0) {
683 TCGReg t = c1;
684 c1 = c2;
685 c2 = t;
686 c2const = 0;
687 cond = tcg_swap_cond(cond);
688 break;
690 /* FALLTHRU */
692 default:
693 tcg_out_cmp(s, c1, c2, c2const);
694 tcg_out_movi_imm13(s, ret, 0);
695 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
696 return;
699 tcg_out_cmp(s, c1, c2, c2const);
700 if (cond == TCG_COND_LTU) {
701 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
702 } else {
703 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
707 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
708 TCGReg c1, int32_t c2, int c2const)
710 /* For 64-bit signed comparisons vs zero, we can avoid the compare
711 if the input does not overlap the output. */
712 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
713 tcg_out_movi_imm13(s, ret, 0);
714 tcg_out_movr(s, cond, ret, c1, 1, 1);
715 } else {
716 tcg_out_cmp(s, c1, c2, c2const);
717 tcg_out_movi_imm13(s, ret, 0);
718 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
722 static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh,
723 TCGReg al, TCGReg ah, int32_t bl, int blconst,
724 int32_t bh, int bhconst, int opl, int oph)
726 TCGReg tmp = TCG_REG_T1;
728 /* Note that the low parts are fully consumed before tmp is set. */
729 if (rl != ah && (bhconst || rl != bh)) {
730 tmp = rl;
733 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
734 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
735 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
738 static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest)
740 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
742 if (disp == (int32_t)disp) {
743 tcg_out32(s, CALL | (uint32_t)disp >> 2);
744 } else {
745 uintptr_t desti = (uintptr_t)dest;
746 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, desti & ~0xfff);
747 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
751 static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
753 tcg_out_call_nodelay(s, dest);
754 tcg_out_nop(s);
757 #ifdef CONFIG_SOFTMMU
758 static tcg_insn_unit *qemu_ld_trampoline[16];
759 static tcg_insn_unit *qemu_st_trampoline[16];
761 static void build_trampolines(TCGContext *s)
763 static void * const qemu_ld_helpers[16] = {
764 [MO_UB] = helper_ret_ldub_mmu,
765 [MO_SB] = helper_ret_ldsb_mmu,
766 [MO_LEUW] = helper_le_lduw_mmu,
767 [MO_LESW] = helper_le_ldsw_mmu,
768 [MO_LEUL] = helper_le_ldul_mmu,
769 [MO_LEQ] = helper_le_ldq_mmu,
770 [MO_BEUW] = helper_be_lduw_mmu,
771 [MO_BESW] = helper_be_ldsw_mmu,
772 [MO_BEUL] = helper_be_ldul_mmu,
773 [MO_BEQ] = helper_be_ldq_mmu,
775 static void * const qemu_st_helpers[16] = {
776 [MO_UB] = helper_ret_stb_mmu,
777 [MO_LEUW] = helper_le_stw_mmu,
778 [MO_LEUL] = helper_le_stl_mmu,
779 [MO_LEQ] = helper_le_stq_mmu,
780 [MO_BEUW] = helper_be_stw_mmu,
781 [MO_BEUL] = helper_be_stl_mmu,
782 [MO_BEQ] = helper_be_stq_mmu,
785 int i;
786 TCGReg ra;
788 for (i = 0; i < 16; ++i) {
789 if (qemu_ld_helpers[i] == NULL) {
790 continue;
793 /* May as well align the trampoline. */
794 while ((uintptr_t)s->code_ptr & 15) {
795 tcg_out_nop(s);
797 qemu_ld_trampoline[i] = s->code_ptr;
799 if (SPARC64 || TARGET_LONG_BITS == 32) {
800 ra = TCG_REG_O3;
801 } else {
802 /* Install the high part of the address. */
803 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
804 ra = TCG_REG_O4;
807 /* Set the retaddr operand. */
808 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
809 /* Set the env operand. */
810 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
811 /* Tail call. */
812 tcg_out_call_nodelay(s, qemu_ld_helpers[i]);
813 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
816 for (i = 0; i < 16; ++i) {
817 if (qemu_st_helpers[i] == NULL) {
818 continue;
821 /* May as well align the trampoline. */
822 while ((uintptr_t)s->code_ptr & 15) {
823 tcg_out_nop(s);
825 qemu_st_trampoline[i] = s->code_ptr;
827 if (SPARC64) {
828 ra = TCG_REG_O4;
829 } else {
830 ra = TCG_REG_O1;
831 if (TARGET_LONG_BITS == 64) {
832 /* Install the high part of the address. */
833 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
834 ra += 2;
835 } else {
836 ra += 1;
838 if ((i & MO_SIZE) == MO_64) {
839 /* Install the high part of the data. */
840 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
841 ra += 2;
842 } else {
843 ra += 1;
845 /* Skip the mem_index argument. */
846 ra += 1;
849 /* Set the retaddr operand. */
850 if (ra >= TCG_REG_O6) {
851 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
852 TCG_TARGET_CALL_STACK_OFFSET);
853 ra = TCG_REG_G1;
855 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
856 /* Set the env operand. */
857 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
858 /* Tail call. */
859 tcg_out_call_nodelay(s, qemu_st_helpers[i]);
860 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
863 #endif
865 /* Generate global QEMU prologue and epilogue code */
866 static void tcg_target_qemu_prologue(TCGContext *s)
868 int tmp_buf_size, frame_size;
870 /* The TCG temp buffer is at the top of the frame, immediately
871 below the frame pointer. */
872 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
873 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
874 tmp_buf_size);
876 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
877 otherwise the minimal frame usable by callees. */
878 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
879 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
880 frame_size += TCG_TARGET_STACK_ALIGN - 1;
881 frame_size &= -TCG_TARGET_STACK_ALIGN;
882 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
883 INSN_IMM13(-frame_size));
885 #ifdef CONFIG_USE_GUEST_BASE
886 if (GUEST_BASE != 0) {
887 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
888 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
890 #endif
892 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
893 /* delay slot */
894 tcg_out_nop(s);
896 /* No epilogue required. We issue ret + restore directly in the TB. */
898 #ifdef CONFIG_SOFTMMU
899 build_trampolines(s);
900 #endif
903 #if defined(CONFIG_SOFTMMU)
904 /* Perform the TLB load and compare.
906 Inputs:
907 ADDRLO and ADDRHI contain the possible two parts of the address.
909 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
911 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
912 This should be offsetof addr_read or addr_write.
914 The result of the TLB comparison is in %[ix]cc. The sanitized address
915 is in the returned register, maybe %o0. The TLB addend is in %o1. */
917 static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
918 TCGMemOp s_bits, int which)
920 const TCGReg r0 = TCG_REG_O0;
921 const TCGReg r1 = TCG_REG_O1;
922 const TCGReg r2 = TCG_REG_O2;
923 int tlb_ofs;
925 /* Shift the page number down. */
926 tcg_out_arithi(s, r1, addr, TARGET_PAGE_BITS, SHIFT_SRL);
928 /* Mask out the page offset, except for the required alignment. */
929 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_T1,
930 TARGET_PAGE_MASK | ((1 << s_bits) - 1));
932 /* Mask the tlb index. */
933 tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND);
935 /* Mask page, part 2. */
936 tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND);
938 /* Shift the tlb index into place. */
939 tcg_out_arithi(s, r1, r1, CPU_TLB_ENTRY_BITS, SHIFT_SLL);
941 /* Relative to the current ENV. */
942 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
944 /* Find a base address that can load both tlb comparator and addend. */
945 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
946 if (!check_fit_ptr(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
947 if (tlb_ofs & ~0x3ff) {
948 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, tlb_ofs & ~0x3ff);
949 tcg_out_arith(s, r1, r1, TCG_REG_T1, ARITH_ADD);
951 tlb_ofs &= 0x3ff;
954 /* Load the tlb comparator and the addend. */
955 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
956 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
958 /* subcc arg0, arg2, %g0 */
959 tcg_out_cmp(s, r0, r2, 0);
961 /* If the guest address must be zero-extended, do so now. */
962 if (SPARC64 && TARGET_LONG_BITS == 32) {
963 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
964 return r0;
966 return addr;
968 #endif /* CONFIG_SOFTMMU */
970 static const int qemu_ld_opc[16] = {
971 [MO_UB] = LDUB,
972 [MO_SB] = LDSB,
974 [MO_BEUW] = LDUH,
975 [MO_BESW] = LDSH,
976 [MO_BEUL] = LDUW,
977 [MO_BESL] = LDSW,
978 [MO_BEQ] = LDX,
980 [MO_LEUW] = LDUH_LE,
981 [MO_LESW] = LDSH_LE,
982 [MO_LEUL] = LDUW_LE,
983 [MO_LESL] = LDSW_LE,
984 [MO_LEQ] = LDX_LE,
987 static const int qemu_st_opc[16] = {
988 [MO_UB] = STB,
990 [MO_BEUW] = STH,
991 [MO_BEUL] = STW,
992 [MO_BEQ] = STX,
994 [MO_LEUW] = STH_LE,
995 [MO_LEUL] = STW_LE,
996 [MO_LEQ] = STX_LE,
999 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1000 TCGMemOp memop, int memi, bool is_64)
1002 #ifdef CONFIG_SOFTMMU
1003 TCGMemOp s_bits = memop & MO_SIZE;
1004 TCGReg addrz, param;
1005 tcg_insn_unit *func;
1006 tcg_insn_unit *label_ptr;
1008 addrz = tcg_out_tlb_load(s, addr, memi, s_bits,
1009 offsetof(CPUTLBEntry, addr_read));
1011 /* The fast path is exactly one insn. Thus we can perform the
1012 entire TLB Hit in the (annulled) delay slot of the branch
1013 over the TLB Miss case. */
1015 /* beq,a,pt %[xi]cc, label0 */
1016 label_ptr = s->code_ptr;
1017 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1018 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1019 /* delay slot */
1020 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_ld_opc[memop]);
1022 /* TLB Miss. */
1024 param = TCG_REG_O1;
1025 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1026 /* Skip the high-part; we'll perform the extract in the trampoline. */
1027 param++;
1029 tcg_out_mov(s, TCG_TYPE_REG, param++, addr);
1031 /* We use the helpers to extend SB and SW data, leaving the case
1032 of SL needing explicit extending below. */
1033 if ((memop & ~MO_BSWAP) == MO_SL) {
1034 func = qemu_ld_trampoline[memop & ~MO_SIGN];
1035 } else {
1036 func = qemu_ld_trampoline[memop];
1038 assert(func != NULL);
1039 tcg_out_call_nodelay(s, func);
1040 /* delay slot */
1041 tcg_out_movi(s, TCG_TYPE_I32, param, memi);
1043 /* Recall that all of the helpers return 64-bit results.
1044 Which complicates things for sparcv8plus. */
1045 if (SPARC64) {
1046 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1047 if (is_64 && (memop & ~MO_BSWAP) == MO_SL) {
1048 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1049 } else {
1050 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1052 } else {
1053 if (s_bits == MO_64) {
1054 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1055 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1056 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1057 } else if (is_64) {
1058 /* Re-extend from 32-bit rather than reassembling when we
1059 know the high register must be an extension. */
1060 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1061 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1062 } else {
1063 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
1067 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1068 #else
1069 if (SPARC64 && TARGET_LONG_BITS == 32) {
1070 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1071 addr = TCG_REG_T1;
1073 tcg_out_ldst_rr(s, data, addr,
1074 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1075 qemu_ld_opc[memop]);
1076 #endif /* CONFIG_SOFTMMU */
1079 static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1080 TCGMemOp memop, int memi)
1082 #ifdef CONFIG_SOFTMMU
1083 TCGMemOp s_bits = memop & MO_SIZE;
1084 TCGReg addrz, param;
1085 tcg_insn_unit *func;
1086 tcg_insn_unit *label_ptr;
1088 addrz = tcg_out_tlb_load(s, addr, memi, s_bits,
1089 offsetof(CPUTLBEntry, addr_write));
1091 /* The fast path is exactly one insn. Thus we can perform the entire
1092 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1093 /* beq,a,pt %[xi]cc, label0 */
1094 label_ptr = s->code_ptr;
1095 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1096 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1097 /* delay slot */
1098 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_st_opc[memop]);
1100 /* TLB Miss. */
1102 param = TCG_REG_O1;
1103 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1104 /* Skip the high-part; we'll perform the extract in the trampoline. */
1105 param++;
1107 tcg_out_mov(s, TCG_TYPE_REG, param++, addr);
1108 if (!SPARC64 && s_bits == MO_64) {
1109 /* Skip the high-part; we'll perform the extract in the trampoline. */
1110 param++;
1112 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
1114 func = qemu_st_trampoline[memop];
1115 assert(func != NULL);
1116 tcg_out_call_nodelay(s, func);
1117 /* delay slot */
1118 tcg_out_movi(s, TCG_TYPE_REG, param, memi);
1120 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1121 #else
1122 if (SPARC64 && TARGET_LONG_BITS == 32) {
1123 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1124 addr = TCG_REG_T1;
1126 tcg_out_ldst_rr(s, data, addr,
1127 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1128 qemu_st_opc[memop]);
1129 #endif /* CONFIG_SOFTMMU */
1132 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1133 const TCGArg args[TCG_MAX_OP_ARGS],
1134 const int const_args[TCG_MAX_OP_ARGS])
1136 TCGArg a0, a1, a2;
1137 int c, c2;
1139 /* Hoist the loads of the most common arguments. */
1140 a0 = args[0];
1141 a1 = args[1];
1142 a2 = args[2];
1143 c2 = const_args[2];
1145 switch (opc) {
1146 case INDEX_op_exit_tb:
1147 if (check_fit_ptr(a0, 13)) {
1148 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1149 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1150 } else {
1151 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1152 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1153 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1155 break;
1156 case INDEX_op_goto_tb:
1157 if (s->tb_jmp_offset) {
1158 /* direct jump method */
1159 s->tb_jmp_offset[a0] = tcg_current_code_size(s);
1160 /* Make sure to preserve links during retranslation. */
1161 tcg_out32(s, CALL | (*s->code_ptr & ~INSN_OP(-1)));
1162 } else {
1163 /* indirect jump method */
1164 tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + a0));
1165 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL);
1167 tcg_out_nop(s);
1168 s->tb_next_offset[a0] = tcg_current_code_size(s);
1169 break;
1170 case INDEX_op_br:
1171 tcg_out_bpcc(s, COND_A, BPCC_PT, a0);
1172 tcg_out_nop(s);
1173 break;
1175 #define OP_32_64(x) \
1176 glue(glue(case INDEX_op_, x), _i32): \
1177 glue(glue(case INDEX_op_, x), _i64)
1179 OP_32_64(ld8u):
1180 tcg_out_ldst(s, a0, a1, a2, LDUB);
1181 break;
1182 OP_32_64(ld8s):
1183 tcg_out_ldst(s, a0, a1, a2, LDSB);
1184 break;
1185 OP_32_64(ld16u):
1186 tcg_out_ldst(s, a0, a1, a2, LDUH);
1187 break;
1188 OP_32_64(ld16s):
1189 tcg_out_ldst(s, a0, a1, a2, LDSH);
1190 break;
1191 case INDEX_op_ld_i32:
1192 case INDEX_op_ld32u_i64:
1193 tcg_out_ldst(s, a0, a1, a2, LDUW);
1194 break;
1195 OP_32_64(st8):
1196 tcg_out_ldst(s, a0, a1, a2, STB);
1197 break;
1198 OP_32_64(st16):
1199 tcg_out_ldst(s, a0, a1, a2, STH);
1200 break;
1201 case INDEX_op_st_i32:
1202 case INDEX_op_st32_i64:
1203 tcg_out_ldst(s, a0, a1, a2, STW);
1204 break;
1205 OP_32_64(add):
1206 c = ARITH_ADD;
1207 goto gen_arith;
1208 OP_32_64(sub):
1209 c = ARITH_SUB;
1210 goto gen_arith;
1211 OP_32_64(and):
1212 c = ARITH_AND;
1213 goto gen_arith;
1214 OP_32_64(andc):
1215 c = ARITH_ANDN;
1216 goto gen_arith;
1217 OP_32_64(or):
1218 c = ARITH_OR;
1219 goto gen_arith;
1220 OP_32_64(orc):
1221 c = ARITH_ORN;
1222 goto gen_arith;
1223 OP_32_64(xor):
1224 c = ARITH_XOR;
1225 goto gen_arith;
1226 case INDEX_op_shl_i32:
1227 c = SHIFT_SLL;
1228 do_shift32:
1229 /* Limit immediate shift count lest we create an illegal insn. */
1230 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1231 break;
1232 case INDEX_op_shr_i32:
1233 c = SHIFT_SRL;
1234 goto do_shift32;
1235 case INDEX_op_sar_i32:
1236 c = SHIFT_SRA;
1237 goto do_shift32;
1238 case INDEX_op_mul_i32:
1239 c = ARITH_UMUL;
1240 goto gen_arith;
1242 OP_32_64(neg):
1243 c = ARITH_SUB;
1244 goto gen_arith1;
1245 OP_32_64(not):
1246 c = ARITH_ORN;
1247 goto gen_arith1;
1249 case INDEX_op_div_i32:
1250 tcg_out_div32(s, a0, a1, a2, c2, 0);
1251 break;
1252 case INDEX_op_divu_i32:
1253 tcg_out_div32(s, a0, a1, a2, c2, 1);
1254 break;
1256 case INDEX_op_brcond_i32:
1257 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], args[3]);
1258 break;
1259 case INDEX_op_setcond_i32:
1260 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1261 break;
1262 case INDEX_op_movcond_i32:
1263 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1264 break;
1266 case INDEX_op_add2_i32:
1267 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], const_args[4],
1268 args[5], const_args[5], ARITH_ADDCC, ARITH_ADDX);
1269 break;
1270 case INDEX_op_sub2_i32:
1271 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], const_args[4],
1272 args[5], const_args[5], ARITH_SUBCC, ARITH_SUBX);
1273 break;
1274 case INDEX_op_mulu2_i32:
1275 c = ARITH_UMUL;
1276 goto do_mul2;
1277 case INDEX_op_muls2_i32:
1278 c = ARITH_SMUL;
1279 do_mul2:
1280 /* The 32-bit multiply insns produce a full 64-bit result. If the
1281 destination register can hold it, we can avoid the slower RDY. */
1282 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1283 if (SPARC64 || a0 <= TCG_REG_O7) {
1284 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1285 } else {
1286 tcg_out_rdy(s, a1);
1288 break;
1290 case INDEX_op_qemu_ld_i32:
1291 tcg_out_qemu_ld(s, a0, a1, a2, args[3], false);
1292 break;
1293 case INDEX_op_qemu_ld_i64:
1294 tcg_out_qemu_ld(s, a0, a1, a2, args[3], true);
1295 break;
1296 case INDEX_op_qemu_st_i32:
1297 case INDEX_op_qemu_st_i64:
1298 tcg_out_qemu_st(s, a0, a1, a2, args[3]);
1299 break;
1301 case INDEX_op_ld32s_i64:
1302 tcg_out_ldst(s, a0, a1, a2, LDSW);
1303 break;
1304 case INDEX_op_ld_i64:
1305 tcg_out_ldst(s, a0, a1, a2, LDX);
1306 break;
1307 case INDEX_op_st_i64:
1308 tcg_out_ldst(s, a0, a1, a2, STX);
1309 break;
1310 case INDEX_op_shl_i64:
1311 c = SHIFT_SLLX;
1312 do_shift64:
1313 /* Limit immediate shift count lest we create an illegal insn. */
1314 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1315 break;
1316 case INDEX_op_shr_i64:
1317 c = SHIFT_SRLX;
1318 goto do_shift64;
1319 case INDEX_op_sar_i64:
1320 c = SHIFT_SRAX;
1321 goto do_shift64;
1322 case INDEX_op_mul_i64:
1323 c = ARITH_MULX;
1324 goto gen_arith;
1325 case INDEX_op_div_i64:
1326 c = ARITH_SDIVX;
1327 goto gen_arith;
1328 case INDEX_op_divu_i64:
1329 c = ARITH_UDIVX;
1330 goto gen_arith;
1331 case INDEX_op_ext32s_i64:
1332 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1333 break;
1334 case INDEX_op_ext32u_i64:
1335 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1336 break;
1337 case INDEX_op_trunc_shr_i32:
1338 if (a2 == 0) {
1339 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1340 } else {
1341 tcg_out_arithi(s, a0, a1, a2, SHIFT_SRLX);
1343 break;
1345 case INDEX_op_brcond_i64:
1346 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], args[3]);
1347 break;
1348 case INDEX_op_setcond_i64:
1349 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1350 break;
1351 case INDEX_op_movcond_i64:
1352 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1353 break;
1355 gen_arith:
1356 tcg_out_arithc(s, a0, a1, a2, c2, c);
1357 break;
1359 gen_arith1:
1360 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1361 break;
1363 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1364 case INDEX_op_mov_i64:
1365 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
1366 case INDEX_op_movi_i64:
1367 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1368 default:
1369 tcg_abort();
1373 static const TCGTargetOpDef sparc_op_defs[] = {
1374 { INDEX_op_exit_tb, { } },
1375 { INDEX_op_goto_tb, { } },
1376 { INDEX_op_br, { } },
1378 { INDEX_op_ld8u_i32, { "r", "r" } },
1379 { INDEX_op_ld8s_i32, { "r", "r" } },
1380 { INDEX_op_ld16u_i32, { "r", "r" } },
1381 { INDEX_op_ld16s_i32, { "r", "r" } },
1382 { INDEX_op_ld_i32, { "r", "r" } },
1383 { INDEX_op_st8_i32, { "rZ", "r" } },
1384 { INDEX_op_st16_i32, { "rZ", "r" } },
1385 { INDEX_op_st_i32, { "rZ", "r" } },
1387 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1388 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1389 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1390 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
1391 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1392 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1393 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1394 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1395 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1396 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1398 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1399 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1400 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
1402 { INDEX_op_neg_i32, { "r", "rJ" } },
1403 { INDEX_op_not_i32, { "r", "rJ" } },
1405 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1406 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1407 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
1409 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1410 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1411 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
1412 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rJ" } },
1414 { INDEX_op_ld8u_i64, { "R", "r" } },
1415 { INDEX_op_ld8s_i64, { "R", "r" } },
1416 { INDEX_op_ld16u_i64, { "R", "r" } },
1417 { INDEX_op_ld16s_i64, { "R", "r" } },
1418 { INDEX_op_ld32u_i64, { "R", "r" } },
1419 { INDEX_op_ld32s_i64, { "R", "r" } },
1420 { INDEX_op_ld_i64, { "R", "r" } },
1421 { INDEX_op_st8_i64, { "RZ", "r" } },
1422 { INDEX_op_st16_i64, { "RZ", "r" } },
1423 { INDEX_op_st32_i64, { "RZ", "r" } },
1424 { INDEX_op_st_i64, { "RZ", "r" } },
1426 { INDEX_op_add_i64, { "R", "RZ", "RJ" } },
1427 { INDEX_op_mul_i64, { "R", "RZ", "RJ" } },
1428 { INDEX_op_div_i64, { "R", "RZ", "RJ" } },
1429 { INDEX_op_divu_i64, { "R", "RZ", "RJ" } },
1430 { INDEX_op_sub_i64, { "R", "RZ", "RJ" } },
1431 { INDEX_op_and_i64, { "R", "RZ", "RJ" } },
1432 { INDEX_op_andc_i64, { "R", "RZ", "RJ" } },
1433 { INDEX_op_or_i64, { "R", "RZ", "RJ" } },
1434 { INDEX_op_orc_i64, { "R", "RZ", "RJ" } },
1435 { INDEX_op_xor_i64, { "R", "RZ", "RJ" } },
1437 { INDEX_op_shl_i64, { "R", "RZ", "RJ" } },
1438 { INDEX_op_shr_i64, { "R", "RZ", "RJ" } },
1439 { INDEX_op_sar_i64, { "R", "RZ", "RJ" } },
1441 { INDEX_op_neg_i64, { "R", "RJ" } },
1442 { INDEX_op_not_i64, { "R", "RJ" } },
1444 { INDEX_op_ext32s_i64, { "R", "r" } },
1445 { INDEX_op_ext32u_i64, { "R", "r" } },
1446 { INDEX_op_trunc_shr_i32, { "r", "R" } },
1448 { INDEX_op_brcond_i64, { "RZ", "RJ" } },
1449 { INDEX_op_setcond_i64, { "R", "RZ", "RJ" } },
1450 { INDEX_op_movcond_i64, { "R", "RZ", "RJ", "RI", "0" } },
1452 { INDEX_op_qemu_ld_i32, { "r", "A" } },
1453 { INDEX_op_qemu_ld_i64, { "R", "A" } },
1454 { INDEX_op_qemu_st_i32, { "sZ", "A" } },
1455 { INDEX_op_qemu_st_i64, { "SZ", "A" } },
1457 { -1 },
1460 static void tcg_target_init(TCGContext *s)
1462 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1463 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, ALL_64);
1465 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1466 (1 << TCG_REG_G1) |
1467 (1 << TCG_REG_G2) |
1468 (1 << TCG_REG_G3) |
1469 (1 << TCG_REG_G4) |
1470 (1 << TCG_REG_G5) |
1471 (1 << TCG_REG_G6) |
1472 (1 << TCG_REG_G7) |
1473 (1 << TCG_REG_O0) |
1474 (1 << TCG_REG_O1) |
1475 (1 << TCG_REG_O2) |
1476 (1 << TCG_REG_O3) |
1477 (1 << TCG_REG_O4) |
1478 (1 << TCG_REG_O5) |
1479 (1 << TCG_REG_O7));
1481 tcg_regset_clear(s->reserved_regs);
1482 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1483 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1484 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1485 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1486 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1487 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1488 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1489 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1491 tcg_add_target_add_op_defs(sparc_op_defs);
1494 #if SPARC64
1495 # define ELF_HOST_MACHINE EM_SPARCV9
1496 #else
1497 # define ELF_HOST_MACHINE EM_SPARC32PLUS
1498 # define ELF_HOST_FLAGS EF_SPARC_32PLUS
1499 #endif
1501 typedef struct {
1502 DebugFrameHeader h;
1503 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
1504 uint8_t fde_win_save;
1505 uint8_t fde_ret_save[3];
1506 } DebugFrame;
1508 static const DebugFrame debug_frame = {
1509 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1510 .h.cie.id = -1,
1511 .h.cie.version = 1,
1512 .h.cie.code_align = 1,
1513 .h.cie.data_align = -sizeof(void *) & 0x7f,
1514 .h.cie.return_column = 15, /* o7 */
1516 /* Total FDE size does not include the "len" member. */
1517 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1519 .fde_def_cfa = {
1520 #if SPARC64
1521 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1522 (2047 & 0x7f) | 0x80, (2047 >> 7)
1523 #else
1524 13, 30 /* DW_CFA_def_cfa_register i6 */
1525 #endif
1527 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1528 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1531 void tcg_register_jit(void *buf, size_t buf_size)
1533 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1536 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1538 uint32_t *ptr = (uint32_t *)jmp_addr;
1539 uintptr_t disp = addr - jmp_addr;
1541 /* We can reach the entire address space for 32-bit. For 64-bit
1542 the code_gen_buffer can't be larger than 2GB. */
1543 assert(disp == (int32_t)disp);
1545 *ptr = CALL | (uint32_t)disp >> 2;
1546 flush_icache_range(jmp_addr, jmp_addr + 4);