tests/tcg: target/mips: Amend and rearrange MSA wrappers
[qemu/ar7.git] / tcg / sparc / tcg-target.inc.c
blob83295955a7804dbd1f7d2d74e7bedcfa7e6f4099
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "tcg-pool.inc.c"
27 #ifdef CONFIG_DEBUG_TCG
28 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
62 #endif
64 #ifdef __arch64__
65 # define SPARC64 1
66 #else
67 # define SPARC64 0
68 #endif
70 /* Note that sparcv8plus can only hold 64 bit quantities in %g and %o
71 registers. These are saved manually by the kernel in full 64-bit
72 slots. The %i and %l registers are saved by the register window
73 mechanism, which only allocates space for 32 bits. Given that this
74 window spill/fill can happen on any signal, we must consider the
75 high bits of the %i and %l registers garbage at all times. */
76 #if SPARC64
77 # define ALL_64 0xffffffffu
78 #else
79 # define ALL_64 0xffffu
80 #endif
82 /* Define some temporary registers. T2 is used for constant generation. */
83 #define TCG_REG_T1 TCG_REG_G1
84 #define TCG_REG_T2 TCG_REG_O7
86 #ifndef CONFIG_SOFTMMU
87 # define TCG_GUEST_BASE_REG TCG_REG_I5
88 #endif
90 #define TCG_REG_TB TCG_REG_I1
91 #define USE_REG_TB (sizeof(void *) > 4)
93 static const int tcg_target_reg_alloc_order[] = {
94 TCG_REG_L0,
95 TCG_REG_L1,
96 TCG_REG_L2,
97 TCG_REG_L3,
98 TCG_REG_L4,
99 TCG_REG_L5,
100 TCG_REG_L6,
101 TCG_REG_L7,
103 TCG_REG_I0,
104 TCG_REG_I1,
105 TCG_REG_I2,
106 TCG_REG_I3,
107 TCG_REG_I4,
108 TCG_REG_I5,
110 TCG_REG_G2,
111 TCG_REG_G3,
112 TCG_REG_G4,
113 TCG_REG_G5,
115 TCG_REG_O0,
116 TCG_REG_O1,
117 TCG_REG_O2,
118 TCG_REG_O3,
119 TCG_REG_O4,
120 TCG_REG_O5,
123 static const int tcg_target_call_iarg_regs[6] = {
124 TCG_REG_O0,
125 TCG_REG_O1,
126 TCG_REG_O2,
127 TCG_REG_O3,
128 TCG_REG_O4,
129 TCG_REG_O5,
132 static const int tcg_target_call_oarg_regs[] = {
133 TCG_REG_O0,
134 TCG_REG_O1,
135 TCG_REG_O2,
136 TCG_REG_O3,
139 #define INSN_OP(x) ((x) << 30)
140 #define INSN_OP2(x) ((x) << 22)
141 #define INSN_OP3(x) ((x) << 19)
142 #define INSN_OPF(x) ((x) << 5)
143 #define INSN_RD(x) ((x) << 25)
144 #define INSN_RS1(x) ((x) << 14)
145 #define INSN_RS2(x) (x)
146 #define INSN_ASI(x) ((x) << 5)
148 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
149 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
150 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
151 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
152 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
153 #define INSN_COND(x) ((x) << 25)
155 #define COND_N 0x0
156 #define COND_E 0x1
157 #define COND_LE 0x2
158 #define COND_L 0x3
159 #define COND_LEU 0x4
160 #define COND_CS 0x5
161 #define COND_NEG 0x6
162 #define COND_VS 0x7
163 #define COND_A 0x8
164 #define COND_NE 0x9
165 #define COND_G 0xa
166 #define COND_GE 0xb
167 #define COND_GU 0xc
168 #define COND_CC 0xd
169 #define COND_POS 0xe
170 #define COND_VC 0xf
171 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
173 #define RCOND_Z 1
174 #define RCOND_LEZ 2
175 #define RCOND_LZ 3
176 #define RCOND_NZ 5
177 #define RCOND_GZ 6
178 #define RCOND_GEZ 7
180 #define MOVCC_ICC (1 << 18)
181 #define MOVCC_XCC (1 << 18 | 1 << 12)
183 #define BPCC_ICC 0
184 #define BPCC_XCC (2 << 20)
185 #define BPCC_PT (1 << 19)
186 #define BPCC_PN 0
187 #define BPCC_A (1 << 29)
189 #define BPR_PT BPCC_PT
191 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
192 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
193 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
194 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
195 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
196 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
197 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
198 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
199 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
200 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
201 #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
202 #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
203 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
204 #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
205 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
206 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
207 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
208 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
209 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
210 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
211 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
213 #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
214 #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
216 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
217 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
218 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
220 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
221 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
222 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
224 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
225 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
226 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
227 #define RETURN (INSN_OP(2) | INSN_OP3(0x39))
228 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
229 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
230 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
231 #define CALL INSN_OP(1)
232 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
233 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
234 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
235 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
236 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
237 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
238 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
239 #define STB (INSN_OP(3) | INSN_OP3(0x05))
240 #define STH (INSN_OP(3) | INSN_OP3(0x06))
241 #define STW (INSN_OP(3) | INSN_OP3(0x04))
242 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
243 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
244 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
245 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
246 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
247 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
248 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
249 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
250 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
251 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
252 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
253 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
255 #define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
257 #define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
259 #ifndef ASI_PRIMARY_LITTLE
260 #define ASI_PRIMARY_LITTLE 0x88
261 #endif
263 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
264 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
265 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
266 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
267 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
269 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
270 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
273 #ifndef use_vis3_instructions
274 bool use_vis3_instructions;
275 #endif
277 static inline int check_fit_i64(int64_t val, unsigned int bits)
279 return val == sextract64(val, 0, bits);
282 static inline int check_fit_i32(int32_t val, unsigned int bits)
284 return val == sextract32(val, 0, bits);
287 #define check_fit_tl check_fit_i64
288 #if SPARC64
289 # define check_fit_ptr check_fit_i64
290 #else
291 # define check_fit_ptr check_fit_i32
292 #endif
294 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
295 intptr_t value, intptr_t addend)
297 uint32_t insn = *code_ptr;
298 intptr_t pcrel;
300 value += addend;
301 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr);
303 switch (type) {
304 case R_SPARC_WDISP16:
305 assert(check_fit_ptr(pcrel >> 2, 16));
306 insn &= ~INSN_OFF16(-1);
307 insn |= INSN_OFF16(pcrel);
308 break;
309 case R_SPARC_WDISP19:
310 assert(check_fit_ptr(pcrel >> 2, 19));
311 insn &= ~INSN_OFF19(-1);
312 insn |= INSN_OFF19(pcrel);
313 break;
314 default:
315 g_assert_not_reached();
318 *code_ptr = insn;
319 return true;
322 /* parse target specific constraints */
323 static const char *target_parse_constraint(TCGArgConstraint *ct,
324 const char *ct_str, TCGType type)
326 switch (*ct_str++) {
327 case 'r':
328 ct->ct |= TCG_CT_REG;
329 ct->u.regs = 0xffffffff;
330 break;
331 case 'R':
332 ct->ct |= TCG_CT_REG;
333 ct->u.regs = ALL_64;
334 break;
335 case 'A': /* qemu_ld/st address constraint */
336 ct->ct |= TCG_CT_REG;
337 ct->u.regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff;
338 reserve_helpers:
339 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
340 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
341 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
342 break;
343 case 's': /* qemu_st data 32-bit constraint */
344 ct->ct |= TCG_CT_REG;
345 ct->u.regs = 0xffffffff;
346 goto reserve_helpers;
347 case 'S': /* qemu_st data 64-bit constraint */
348 ct->ct |= TCG_CT_REG;
349 ct->u.regs = ALL_64;
350 goto reserve_helpers;
351 case 'I':
352 ct->ct |= TCG_CT_CONST_S11;
353 break;
354 case 'J':
355 ct->ct |= TCG_CT_CONST_S13;
356 break;
357 case 'Z':
358 ct->ct |= TCG_CT_CONST_ZERO;
359 break;
360 default:
361 return NULL;
363 return ct_str;
366 /* test if a constant matches the constraint */
367 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
368 const TCGArgConstraint *arg_ct)
370 int ct = arg_ct->ct;
372 if (ct & TCG_CT_CONST) {
373 return 1;
376 if (type == TCG_TYPE_I32) {
377 val = (int32_t)val;
380 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
381 return 1;
382 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
383 return 1;
384 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
385 return 1;
386 } else {
387 return 0;
391 static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
392 TCGReg rs2, int op)
394 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
397 static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
398 int32_t offset, int op)
400 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
403 static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
404 int32_t val2, int val2const, int op)
406 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
407 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
410 static inline bool tcg_out_mov(TCGContext *s, TCGType type,
411 TCGReg ret, TCGReg arg)
413 if (ret != arg) {
414 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
416 return true;
419 static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
421 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
424 static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
426 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
429 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
430 tcg_target_long arg, bool in_prologue)
432 tcg_target_long hi, lo = (int32_t)arg;
433 tcg_target_long test, lsb;
435 /* Make sure we test 32-bit constants for imm13 properly. */
436 if (type == TCG_TYPE_I32) {
437 arg = lo;
440 /* A 13-bit constant sign-extended to 64-bits. */
441 if (check_fit_tl(arg, 13)) {
442 tcg_out_movi_imm13(s, ret, arg);
443 return;
446 /* A 13-bit constant relative to the TB. */
447 if (!in_prologue && USE_REG_TB) {
448 test = arg - (uintptr_t)s->code_gen_ptr;
449 if (check_fit_ptr(test, 13)) {
450 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
451 return;
455 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
456 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
457 tcg_out_sethi(s, ret, arg);
458 if (arg & 0x3ff) {
459 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
461 return;
464 /* A 32-bit constant sign-extended to 64-bits. */
465 if (arg == lo) {
466 tcg_out_sethi(s, ret, ~arg);
467 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
468 return;
471 /* A 21-bit constant, shifted. */
472 lsb = ctz64(arg);
473 test = (tcg_target_long)arg >> lsb;
474 if (check_fit_tl(test, 13)) {
475 tcg_out_movi_imm13(s, ret, test);
476 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
477 return;
478 } else if (lsb > 10 && test == extract64(test, 0, 21)) {
479 tcg_out_sethi(s, ret, test << 10);
480 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
481 return;
484 /* A 64-bit constant decomposed into 2 32-bit pieces. */
485 if (check_fit_i32(lo, 13)) {
486 hi = (arg - lo) >> 32;
487 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
488 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
489 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
490 } else {
491 hi = arg >> 32;
492 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
493 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
494 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
495 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
499 static inline void tcg_out_movi(TCGContext *s, TCGType type,
500 TCGReg ret, tcg_target_long arg)
502 tcg_out_movi_int(s, type, ret, arg, false);
505 static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
506 TCGReg a2, int op)
508 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
511 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
512 intptr_t offset, int op)
514 if (check_fit_ptr(offset, 13)) {
515 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
516 INSN_IMM13(offset));
517 } else {
518 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
519 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
523 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
524 TCGReg arg1, intptr_t arg2)
526 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
529 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
530 TCGReg arg1, intptr_t arg2)
532 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
535 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
536 TCGReg base, intptr_t ofs)
538 if (val == 0) {
539 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
540 return true;
542 return false;
545 static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
547 intptr_t diff = arg - (uintptr_t)s->code_gen_ptr;
548 if (USE_REG_TB && check_fit_ptr(diff, 13)) {
549 tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
550 return;
552 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
553 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
556 static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
558 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
561 static inline void tcg_out_rdy(TCGContext *s, TCGReg rd)
563 tcg_out32(s, RDY | INSN_RD(rd));
566 static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
567 int32_t val2, int val2const, int uns)
569 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
570 if (uns) {
571 tcg_out_sety(s, TCG_REG_G0);
572 } else {
573 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
574 tcg_out_sety(s, TCG_REG_T1);
577 tcg_out_arithc(s, rd, rs1, val2, val2const,
578 uns ? ARITH_UDIV : ARITH_SDIV);
581 static inline void tcg_out_nop(TCGContext *s)
583 tcg_out32(s, NOP);
586 static const uint8_t tcg_cond_to_bcond[] = {
587 [TCG_COND_EQ] = COND_E,
588 [TCG_COND_NE] = COND_NE,
589 [TCG_COND_LT] = COND_L,
590 [TCG_COND_GE] = COND_GE,
591 [TCG_COND_LE] = COND_LE,
592 [TCG_COND_GT] = COND_G,
593 [TCG_COND_LTU] = COND_CS,
594 [TCG_COND_GEU] = COND_CC,
595 [TCG_COND_LEU] = COND_LEU,
596 [TCG_COND_GTU] = COND_GU,
599 static const uint8_t tcg_cond_to_rcond[] = {
600 [TCG_COND_EQ] = RCOND_Z,
601 [TCG_COND_NE] = RCOND_NZ,
602 [TCG_COND_LT] = RCOND_LZ,
603 [TCG_COND_GT] = RCOND_GZ,
604 [TCG_COND_LE] = RCOND_LEZ,
605 [TCG_COND_GE] = RCOND_GEZ
608 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
610 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
613 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
615 int off19 = 0;
617 if (l->has_value) {
618 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
619 } else {
620 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
622 tcg_out_bpcc0(s, scond, flags, off19);
625 static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
627 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
630 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
631 int32_t arg2, int const_arg2, TCGLabel *l)
633 tcg_out_cmp(s, arg1, arg2, const_arg2);
634 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
635 tcg_out_nop(s);
638 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
639 int32_t v1, int v1const)
641 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
642 | INSN_RS1(tcg_cond_to_bcond[cond])
643 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
646 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
647 TCGReg c1, int32_t c2, int c2const,
648 int32_t v1, int v1const)
650 tcg_out_cmp(s, c1, c2, c2const);
651 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
654 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
655 int32_t arg2, int const_arg2, TCGLabel *l)
657 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
658 if (arg2 == 0 && !is_unsigned_cond(cond)) {
659 int off16 = 0;
661 if (l->has_value) {
662 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
663 } else {
664 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
666 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
667 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
668 } else {
669 tcg_out_cmp(s, arg1, arg2, const_arg2);
670 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
672 tcg_out_nop(s);
675 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
676 int32_t v1, int v1const)
678 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
679 | (tcg_cond_to_rcond[cond] << 10)
680 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
683 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
684 TCGReg c1, int32_t c2, int c2const,
685 int32_t v1, int v1const)
687 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
688 Note that the immediate range is one bit smaller, so we must check
689 for that as well. */
690 if (c2 == 0 && !is_unsigned_cond(cond)
691 && (!v1const || check_fit_i32(v1, 10))) {
692 tcg_out_movr(s, cond, ret, c1, v1, v1const);
693 } else {
694 tcg_out_cmp(s, c1, c2, c2const);
695 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
699 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
700 TCGReg c1, int32_t c2, int c2const)
702 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
703 switch (cond) {
704 case TCG_COND_LTU:
705 case TCG_COND_GEU:
706 /* The result of the comparison is in the carry bit. */
707 break;
709 case TCG_COND_EQ:
710 case TCG_COND_NE:
711 /* For equality, we can transform to inequality vs zero. */
712 if (c2 != 0) {
713 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
714 c2 = TCG_REG_T1;
715 } else {
716 c2 = c1;
718 c1 = TCG_REG_G0, c2const = 0;
719 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
720 break;
722 case TCG_COND_GTU:
723 case TCG_COND_LEU:
724 /* If we don't need to load a constant into a register, we can
725 swap the operands on GTU/LEU. There's no benefit to loading
726 the constant into a temporary register. */
727 if (!c2const || c2 == 0) {
728 TCGReg t = c1;
729 c1 = c2;
730 c2 = t;
731 c2const = 0;
732 cond = tcg_swap_cond(cond);
733 break;
735 /* FALLTHRU */
737 default:
738 tcg_out_cmp(s, c1, c2, c2const);
739 tcg_out_movi_imm13(s, ret, 0);
740 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
741 return;
744 tcg_out_cmp(s, c1, c2, c2const);
745 if (cond == TCG_COND_LTU) {
746 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
747 } else {
748 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
752 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
753 TCGReg c1, int32_t c2, int c2const)
755 if (use_vis3_instructions) {
756 switch (cond) {
757 case TCG_COND_NE:
758 if (c2 != 0) {
759 break;
761 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
762 /* FALLTHRU */
763 case TCG_COND_LTU:
764 tcg_out_cmp(s, c1, c2, c2const);
765 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
766 return;
767 default:
768 break;
772 /* For 64-bit signed comparisons vs zero, we can avoid the compare
773 if the input does not overlap the output. */
774 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
775 tcg_out_movi_imm13(s, ret, 0);
776 tcg_out_movr(s, cond, ret, c1, 1, 1);
777 } else {
778 tcg_out_cmp(s, c1, c2, c2const);
779 tcg_out_movi_imm13(s, ret, 0);
780 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
784 static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
785 TCGReg al, TCGReg ah, int32_t bl, int blconst,
786 int32_t bh, int bhconst, int opl, int oph)
788 TCGReg tmp = TCG_REG_T1;
790 /* Note that the low parts are fully consumed before tmp is set. */
791 if (rl != ah && (bhconst || rl != bh)) {
792 tmp = rl;
795 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
796 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
797 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
800 static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
801 TCGReg al, TCGReg ah, int32_t bl, int blconst,
802 int32_t bh, int bhconst, bool is_sub)
804 TCGReg tmp = TCG_REG_T1;
806 /* Note that the low parts are fully consumed before tmp is set. */
807 if (rl != ah && (bhconst || rl != bh)) {
808 tmp = rl;
811 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
813 if (use_vis3_instructions && !is_sub) {
814 /* Note that ADDXC doesn't accept immediates. */
815 if (bhconst && bh != 0) {
816 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh);
817 bh = TCG_REG_T2;
819 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
820 } else if (bh == TCG_REG_G0) {
821 /* If we have a zero, we can perform the operation in two insns,
822 with the arithmetic first, and a conditional move into place. */
823 if (rh == ah) {
824 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
825 is_sub ? ARITH_SUB : ARITH_ADD);
826 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
827 } else {
828 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
829 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
831 } else {
832 /* Otherwise adjust BH as if there is carry into T2 ... */
833 if (bhconst) {
834 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
835 } else {
836 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
837 is_sub ? ARITH_SUB : ARITH_ADD);
839 /* ... smoosh T2 back to original BH if carry is clear ... */
840 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
841 /* ... and finally perform the arithmetic with the new operand. */
842 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
845 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
848 static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest,
849 bool in_prologue)
851 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
853 if (disp == (int32_t)disp) {
854 tcg_out32(s, CALL | (uint32_t)disp >> 2);
855 } else {
856 uintptr_t desti = (uintptr_t)dest;
857 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
858 desti & ~0xfff, in_prologue);
859 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
863 static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
865 tcg_out_call_nodelay(s, dest, false);
866 tcg_out_nop(s);
869 static void tcg_out_mb(TCGContext *s, TCGArg a0)
871 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
872 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
875 #ifdef CONFIG_SOFTMMU
876 static tcg_insn_unit *qemu_ld_trampoline[16];
877 static tcg_insn_unit *qemu_st_trampoline[16];
879 static void emit_extend(TCGContext *s, TCGReg r, int op)
881 /* Emit zero extend of 8, 16 or 32 bit data as
882 * required by the MO_* value op; do nothing for 64 bit.
884 switch (op & MO_SIZE) {
885 case MO_8:
886 tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
887 break;
888 case MO_16:
889 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
890 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
891 break;
892 case MO_32:
893 if (SPARC64) {
894 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
896 break;
897 case MO_64:
898 break;
902 static void build_trampolines(TCGContext *s)
904 static void * const qemu_ld_helpers[16] = {
905 [MO_UB] = helper_ret_ldub_mmu,
906 [MO_SB] = helper_ret_ldsb_mmu,
907 [MO_LEUW] = helper_le_lduw_mmu,
908 [MO_LESW] = helper_le_ldsw_mmu,
909 [MO_LEUL] = helper_le_ldul_mmu,
910 [MO_LEQ] = helper_le_ldq_mmu,
911 [MO_BEUW] = helper_be_lduw_mmu,
912 [MO_BESW] = helper_be_ldsw_mmu,
913 [MO_BEUL] = helper_be_ldul_mmu,
914 [MO_BEQ] = helper_be_ldq_mmu,
916 static void * const qemu_st_helpers[16] = {
917 [MO_UB] = helper_ret_stb_mmu,
918 [MO_LEUW] = helper_le_stw_mmu,
919 [MO_LEUL] = helper_le_stl_mmu,
920 [MO_LEQ] = helper_le_stq_mmu,
921 [MO_BEUW] = helper_be_stw_mmu,
922 [MO_BEUL] = helper_be_stl_mmu,
923 [MO_BEQ] = helper_be_stq_mmu,
926 int i;
927 TCGReg ra;
929 for (i = 0; i < 16; ++i) {
930 if (qemu_ld_helpers[i] == NULL) {
931 continue;
934 /* May as well align the trampoline. */
935 while ((uintptr_t)s->code_ptr & 15) {
936 tcg_out_nop(s);
938 qemu_ld_trampoline[i] = s->code_ptr;
940 if (SPARC64 || TARGET_LONG_BITS == 32) {
941 ra = TCG_REG_O3;
942 } else {
943 /* Install the high part of the address. */
944 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
945 ra = TCG_REG_O4;
948 /* Set the retaddr operand. */
949 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
950 /* Set the env operand. */
951 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
952 /* Tail call. */
953 tcg_out_call_nodelay(s, qemu_ld_helpers[i], true);
954 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
957 for (i = 0; i < 16; ++i) {
958 if (qemu_st_helpers[i] == NULL) {
959 continue;
962 /* May as well align the trampoline. */
963 while ((uintptr_t)s->code_ptr & 15) {
964 tcg_out_nop(s);
966 qemu_st_trampoline[i] = s->code_ptr;
968 if (SPARC64) {
969 emit_extend(s, TCG_REG_O2, i);
970 ra = TCG_REG_O4;
971 } else {
972 ra = TCG_REG_O1;
973 if (TARGET_LONG_BITS == 64) {
974 /* Install the high part of the address. */
975 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
976 ra += 2;
977 } else {
978 ra += 1;
980 if ((i & MO_SIZE) == MO_64) {
981 /* Install the high part of the data. */
982 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
983 ra += 2;
984 } else {
985 emit_extend(s, ra, i);
986 ra += 1;
988 /* Skip the oi argument. */
989 ra += 1;
992 /* Set the retaddr operand. */
993 if (ra >= TCG_REG_O6) {
994 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
995 TCG_TARGET_CALL_STACK_OFFSET);
996 ra = TCG_REG_G1;
998 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
999 /* Set the env operand. */
1000 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
1001 /* Tail call. */
1002 tcg_out_call_nodelay(s, qemu_st_helpers[i], true);
1003 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
1006 #endif
1008 /* Generate global QEMU prologue and epilogue code */
1009 static void tcg_target_qemu_prologue(TCGContext *s)
1011 int tmp_buf_size, frame_size;
1013 /* The TCG temp buffer is at the top of the frame, immediately
1014 below the frame pointer. */
1015 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
1016 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
1017 tmp_buf_size);
1019 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1020 otherwise the minimal frame usable by callees. */
1021 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1022 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1023 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1024 frame_size &= -TCG_TARGET_STACK_ALIGN;
1025 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
1026 INSN_IMM13(-frame_size));
1028 #ifndef CONFIG_SOFTMMU
1029 if (guest_base != 0) {
1030 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
1031 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1033 #endif
1035 /* We choose TCG_REG_TB such that no move is required. */
1036 if (USE_REG_TB) {
1037 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1038 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1041 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1042 /* delay slot */
1043 tcg_out_nop(s);
1045 /* Epilogue for goto_ptr. */
1046 s->code_gen_epilogue = s->code_ptr;
1047 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1048 /* delay slot */
1049 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
1051 #ifdef CONFIG_SOFTMMU
1052 build_trampolines(s);
1053 #endif
1056 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1058 int i;
1059 for (i = 0; i < count; ++i) {
1060 p[i] = NOP;
1064 #if defined(CONFIG_SOFTMMU)
1065 /* Perform the TLB load and compare.
1067 Inputs:
1068 ADDRLO and ADDRHI contain the possible two parts of the address.
1070 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1072 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1073 This should be offsetof addr_read or addr_write.
1075 The result of the TLB comparison is in %[ix]cc. The sanitized address
1076 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1078 /* We expect tlb_mask to be before tlb_table. */
1079 QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) <
1080 offsetof(CPUArchState, tlb_mask));
1082 /* We expect tlb_mask to be "near" tlb_table. */
1083 QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) -
1084 offsetof(CPUArchState, tlb_mask) >= (1 << 13));
1086 static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1087 TCGMemOp opc, int which)
1089 int mask_off = offsetof(CPUArchState, tlb_mask[mem_index]);
1090 int table_off = offsetof(CPUArchState, tlb_table[mem_index]);
1091 TCGReg base = TCG_AREG0;
1092 const TCGReg r0 = TCG_REG_O0;
1093 const TCGReg r1 = TCG_REG_O1;
1094 const TCGReg r2 = TCG_REG_O2;
1095 unsigned s_bits = opc & MO_SIZE;
1096 unsigned a_bits = get_alignment_bits(opc);
1097 tcg_target_long compare_mask;
1099 if (!check_fit_i32(table_off, 13)) {
1100 int table_hi;
1102 base = r1;
1103 if (table_off <= 2 * 0xfff) {
1104 table_hi = 0xfff;
1105 tcg_out_arithi(s, base, TCG_AREG0, table_hi, ARITH_ADD);
1106 } else {
1107 table_hi = table_off & ~0x3ff;
1108 tcg_out_sethi(s, base, table_hi);
1109 tcg_out_arith(s, base, TCG_AREG0, base, ARITH_ADD);
1111 mask_off -= table_hi;
1112 table_off -= table_hi;
1113 tcg_debug_assert(check_fit_i32(mask_off, 13));
1116 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1117 tcg_out_ld(s, TCG_TYPE_PTR, r0, base, mask_off);
1118 tcg_out_ld(s, TCG_TYPE_PTR, r1, base, table_off);
1120 /* Extract the page index, shifted into place for tlb index. */
1121 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1122 SHIFT_SRL);
1123 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1125 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1126 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1128 /* Load the tlb comparator and the addend. */
1129 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1130 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
1132 /* Mask out the page offset, except for the required alignment.
1133 We don't support unaligned accesses. */
1134 if (a_bits < s_bits) {
1135 a_bits = s_bits;
1137 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1138 if (check_fit_tl(compare_mask, 13)) {
1139 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1140 } else {
1141 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1142 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
1144 tcg_out_cmp(s, r0, r2, 0);
1146 /* If the guest address must be zero-extended, do so now. */
1147 if (SPARC64 && TARGET_LONG_BITS == 32) {
1148 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
1149 return r0;
1151 return addr;
1153 #endif /* CONFIG_SOFTMMU */
1155 static const int qemu_ld_opc[16] = {
1156 [MO_UB] = LDUB,
1157 [MO_SB] = LDSB,
1159 [MO_BEUW] = LDUH,
1160 [MO_BESW] = LDSH,
1161 [MO_BEUL] = LDUW,
1162 [MO_BESL] = LDSW,
1163 [MO_BEQ] = LDX,
1165 [MO_LEUW] = LDUH_LE,
1166 [MO_LESW] = LDSH_LE,
1167 [MO_LEUL] = LDUW_LE,
1168 [MO_LESL] = LDSW_LE,
1169 [MO_LEQ] = LDX_LE,
1172 static const int qemu_st_opc[16] = {
1173 [MO_UB] = STB,
1175 [MO_BEUW] = STH,
1176 [MO_BEUL] = STW,
1177 [MO_BEQ] = STX,
1179 [MO_LEUW] = STH_LE,
1180 [MO_LEUL] = STW_LE,
1181 [MO_LEQ] = STX_LE,
1184 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1185 TCGMemOpIdx oi, bool is_64)
1187 TCGMemOp memop = get_memop(oi);
1188 #ifdef CONFIG_SOFTMMU
1189 unsigned memi = get_mmuidx(oi);
1190 TCGReg addrz, param;
1191 tcg_insn_unit *func;
1192 tcg_insn_unit *label_ptr;
1194 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1195 offsetof(CPUTLBEntry, addr_read));
1197 /* The fast path is exactly one insn. Thus we can perform the
1198 entire TLB Hit in the (annulled) delay slot of the branch
1199 over the TLB Miss case. */
1201 /* beq,a,pt %[xi]cc, label0 */
1202 label_ptr = s->code_ptr;
1203 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1204 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1205 /* delay slot */
1206 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1207 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1209 /* TLB Miss. */
1211 param = TCG_REG_O1;
1212 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1213 /* Skip the high-part; we'll perform the extract in the trampoline. */
1214 param++;
1216 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1218 /* We use the helpers to extend SB and SW data, leaving the case
1219 of SL needing explicit extending below. */
1220 if ((memop & MO_SSIZE) == MO_SL) {
1221 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1222 } else {
1223 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1225 tcg_debug_assert(func != NULL);
1226 tcg_out_call_nodelay(s, func, false);
1227 /* delay slot */
1228 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1230 /* Recall that all of the helpers return 64-bit results.
1231 Which complicates things for sparcv8plus. */
1232 if (SPARC64) {
1233 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1234 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1235 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1236 } else {
1237 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1239 } else {
1240 if ((memop & MO_SIZE) == MO_64) {
1241 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1242 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1243 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1244 } else if (is_64) {
1245 /* Re-extend from 32-bit rather than reassembling when we
1246 know the high register must be an extension. */
1247 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1248 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1249 } else {
1250 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
1254 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1255 #else
1256 if (SPARC64 && TARGET_LONG_BITS == 32) {
1257 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1258 addr = TCG_REG_T1;
1260 tcg_out_ldst_rr(s, data, addr,
1261 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1262 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1263 #endif /* CONFIG_SOFTMMU */
1266 static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1267 TCGMemOpIdx oi)
1269 TCGMemOp memop = get_memop(oi);
1270 #ifdef CONFIG_SOFTMMU
1271 unsigned memi = get_mmuidx(oi);
1272 TCGReg addrz, param;
1273 tcg_insn_unit *func;
1274 tcg_insn_unit *label_ptr;
1276 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1277 offsetof(CPUTLBEntry, addr_write));
1279 /* The fast path is exactly one insn. Thus we can perform the entire
1280 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1281 /* beq,a,pt %[xi]cc, label0 */
1282 label_ptr = s->code_ptr;
1283 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1284 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1285 /* delay slot */
1286 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1287 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1289 /* TLB Miss. */
1291 param = TCG_REG_O1;
1292 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1293 /* Skip the high-part; we'll perform the extract in the trampoline. */
1294 param++;
1296 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1297 if (!SPARC64 && (memop & MO_SIZE) == MO_64) {
1298 /* Skip the high-part; we'll perform the extract in the trampoline. */
1299 param++;
1301 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
1303 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1304 tcg_debug_assert(func != NULL);
1305 tcg_out_call_nodelay(s, func, false);
1306 /* delay slot */
1307 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1309 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1310 #else
1311 if (SPARC64 && TARGET_LONG_BITS == 32) {
1312 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1313 addr = TCG_REG_T1;
1315 tcg_out_ldst_rr(s, data, addr,
1316 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1317 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1318 #endif /* CONFIG_SOFTMMU */
1321 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1322 const TCGArg args[TCG_MAX_OP_ARGS],
1323 const int const_args[TCG_MAX_OP_ARGS])
1325 TCGArg a0, a1, a2;
1326 int c, c2;
1328 /* Hoist the loads of the most common arguments. */
1329 a0 = args[0];
1330 a1 = args[1];
1331 a2 = args[2];
1332 c2 = const_args[2];
1334 switch (opc) {
1335 case INDEX_op_exit_tb:
1336 if (check_fit_ptr(a0, 13)) {
1337 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1338 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1339 break;
1340 } else if (USE_REG_TB) {
1341 intptr_t tb_diff = a0 - (uintptr_t)s->code_gen_ptr;
1342 if (check_fit_ptr(tb_diff, 13)) {
1343 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1344 /* Note that TCG_REG_TB has been unwound to O1. */
1345 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1346 break;
1349 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1350 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1351 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1352 break;
1353 case INDEX_op_goto_tb:
1354 if (s->tb_jmp_insn_offset) {
1355 /* direct jump method */
1356 if (USE_REG_TB) {
1357 /* make sure the patch is 8-byte aligned. */
1358 if ((intptr_t)s->code_ptr & 4) {
1359 tcg_out_nop(s);
1361 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1362 tcg_out_sethi(s, TCG_REG_T1, 0);
1363 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
1364 tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
1365 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1366 } else {
1367 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1368 tcg_out32(s, CALL);
1369 tcg_out_nop(s);
1371 } else {
1372 /* indirect jump method */
1373 tcg_out_ld_ptr(s, TCG_REG_TB,
1374 (uintptr_t)(s->tb_jmp_target_addr + a0));
1375 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1376 tcg_out_nop(s);
1378 set_jmp_reset_offset(s, a0);
1380 /* For the unlinked path of goto_tb, we need to reset
1381 TCG_REG_TB to the beginning of this TB. */
1382 if (USE_REG_TB) {
1383 c = -tcg_current_code_size(s);
1384 if (check_fit_i32(c, 13)) {
1385 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
1386 } else {
1387 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
1388 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
1389 TCG_REG_T1, ARITH_ADD);
1392 break;
1393 case INDEX_op_goto_ptr:
1394 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1395 if (USE_REG_TB) {
1396 tcg_out_arith(s, TCG_REG_TB, a0, TCG_REG_G0, ARITH_OR);
1397 } else {
1398 tcg_out_nop(s);
1400 break;
1401 case INDEX_op_br:
1402 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1403 tcg_out_nop(s);
1404 break;
1406 #define OP_32_64(x) \
1407 glue(glue(case INDEX_op_, x), _i32): \
1408 glue(glue(case INDEX_op_, x), _i64)
1410 OP_32_64(ld8u):
1411 tcg_out_ldst(s, a0, a1, a2, LDUB);
1412 break;
1413 OP_32_64(ld8s):
1414 tcg_out_ldst(s, a0, a1, a2, LDSB);
1415 break;
1416 OP_32_64(ld16u):
1417 tcg_out_ldst(s, a0, a1, a2, LDUH);
1418 break;
1419 OP_32_64(ld16s):
1420 tcg_out_ldst(s, a0, a1, a2, LDSH);
1421 break;
1422 case INDEX_op_ld_i32:
1423 case INDEX_op_ld32u_i64:
1424 tcg_out_ldst(s, a0, a1, a2, LDUW);
1425 break;
1426 OP_32_64(st8):
1427 tcg_out_ldst(s, a0, a1, a2, STB);
1428 break;
1429 OP_32_64(st16):
1430 tcg_out_ldst(s, a0, a1, a2, STH);
1431 break;
1432 case INDEX_op_st_i32:
1433 case INDEX_op_st32_i64:
1434 tcg_out_ldst(s, a0, a1, a2, STW);
1435 break;
1436 OP_32_64(add):
1437 c = ARITH_ADD;
1438 goto gen_arith;
1439 OP_32_64(sub):
1440 c = ARITH_SUB;
1441 goto gen_arith;
1442 OP_32_64(and):
1443 c = ARITH_AND;
1444 goto gen_arith;
1445 OP_32_64(andc):
1446 c = ARITH_ANDN;
1447 goto gen_arith;
1448 OP_32_64(or):
1449 c = ARITH_OR;
1450 goto gen_arith;
1451 OP_32_64(orc):
1452 c = ARITH_ORN;
1453 goto gen_arith;
1454 OP_32_64(xor):
1455 c = ARITH_XOR;
1456 goto gen_arith;
1457 case INDEX_op_shl_i32:
1458 c = SHIFT_SLL;
1459 do_shift32:
1460 /* Limit immediate shift count lest we create an illegal insn. */
1461 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1462 break;
1463 case INDEX_op_shr_i32:
1464 c = SHIFT_SRL;
1465 goto do_shift32;
1466 case INDEX_op_sar_i32:
1467 c = SHIFT_SRA;
1468 goto do_shift32;
1469 case INDEX_op_mul_i32:
1470 c = ARITH_UMUL;
1471 goto gen_arith;
1473 OP_32_64(neg):
1474 c = ARITH_SUB;
1475 goto gen_arith1;
1476 OP_32_64(not):
1477 c = ARITH_ORN;
1478 goto gen_arith1;
1480 case INDEX_op_div_i32:
1481 tcg_out_div32(s, a0, a1, a2, c2, 0);
1482 break;
1483 case INDEX_op_divu_i32:
1484 tcg_out_div32(s, a0, a1, a2, c2, 1);
1485 break;
1487 case INDEX_op_brcond_i32:
1488 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1489 break;
1490 case INDEX_op_setcond_i32:
1491 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1492 break;
1493 case INDEX_op_movcond_i32:
1494 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1495 break;
1497 case INDEX_op_add2_i32:
1498 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1499 args[4], const_args[4], args[5], const_args[5],
1500 ARITH_ADDCC, ARITH_ADDC);
1501 break;
1502 case INDEX_op_sub2_i32:
1503 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1504 args[4], const_args[4], args[5], const_args[5],
1505 ARITH_SUBCC, ARITH_SUBC);
1506 break;
1507 case INDEX_op_mulu2_i32:
1508 c = ARITH_UMUL;
1509 goto do_mul2;
1510 case INDEX_op_muls2_i32:
1511 c = ARITH_SMUL;
1512 do_mul2:
1513 /* The 32-bit multiply insns produce a full 64-bit result. If the
1514 destination register can hold it, we can avoid the slower RDY. */
1515 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1516 if (SPARC64 || a0 <= TCG_REG_O7) {
1517 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1518 } else {
1519 tcg_out_rdy(s, a1);
1521 break;
1523 case INDEX_op_qemu_ld_i32:
1524 tcg_out_qemu_ld(s, a0, a1, a2, false);
1525 break;
1526 case INDEX_op_qemu_ld_i64:
1527 tcg_out_qemu_ld(s, a0, a1, a2, true);
1528 break;
1529 case INDEX_op_qemu_st_i32:
1530 case INDEX_op_qemu_st_i64:
1531 tcg_out_qemu_st(s, a0, a1, a2);
1532 break;
1534 case INDEX_op_ld32s_i64:
1535 tcg_out_ldst(s, a0, a1, a2, LDSW);
1536 break;
1537 case INDEX_op_ld_i64:
1538 tcg_out_ldst(s, a0, a1, a2, LDX);
1539 break;
1540 case INDEX_op_st_i64:
1541 tcg_out_ldst(s, a0, a1, a2, STX);
1542 break;
1543 case INDEX_op_shl_i64:
1544 c = SHIFT_SLLX;
1545 do_shift64:
1546 /* Limit immediate shift count lest we create an illegal insn. */
1547 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1548 break;
1549 case INDEX_op_shr_i64:
1550 c = SHIFT_SRLX;
1551 goto do_shift64;
1552 case INDEX_op_sar_i64:
1553 c = SHIFT_SRAX;
1554 goto do_shift64;
1555 case INDEX_op_mul_i64:
1556 c = ARITH_MULX;
1557 goto gen_arith;
1558 case INDEX_op_div_i64:
1559 c = ARITH_SDIVX;
1560 goto gen_arith;
1561 case INDEX_op_divu_i64:
1562 c = ARITH_UDIVX;
1563 goto gen_arith;
1564 case INDEX_op_ext_i32_i64:
1565 case INDEX_op_ext32s_i64:
1566 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1567 break;
1568 case INDEX_op_extu_i32_i64:
1569 case INDEX_op_ext32u_i64:
1570 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1571 break;
1572 case INDEX_op_extrl_i64_i32:
1573 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1574 break;
1575 case INDEX_op_extrh_i64_i32:
1576 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1577 break;
1579 case INDEX_op_brcond_i64:
1580 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1581 break;
1582 case INDEX_op_setcond_i64:
1583 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1584 break;
1585 case INDEX_op_movcond_i64:
1586 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1587 break;
1588 case INDEX_op_add2_i64:
1589 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1590 const_args[4], args[5], const_args[5], false);
1591 break;
1592 case INDEX_op_sub2_i64:
1593 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1594 const_args[4], args[5], const_args[5], true);
1595 break;
1596 case INDEX_op_muluh_i64:
1597 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1598 break;
1600 gen_arith:
1601 tcg_out_arithc(s, a0, a1, a2, c2, c);
1602 break;
1604 gen_arith1:
1605 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1606 break;
1608 case INDEX_op_mb:
1609 tcg_out_mb(s, a0);
1610 break;
1612 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1613 case INDEX_op_mov_i64:
1614 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
1615 case INDEX_op_movi_i64:
1616 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1617 default:
1618 tcg_abort();
1622 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
1624 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
1625 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
1626 static const TCGTargetOpDef R_r = { .args_ct_str = { "R", "r" } };
1627 static const TCGTargetOpDef r_R = { .args_ct_str = { "r", "R" } };
1628 static const TCGTargetOpDef R_R = { .args_ct_str = { "R", "R" } };
1629 static const TCGTargetOpDef r_A = { .args_ct_str = { "r", "A" } };
1630 static const TCGTargetOpDef R_A = { .args_ct_str = { "R", "A" } };
1631 static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } };
1632 static const TCGTargetOpDef RZ_r = { .args_ct_str = { "RZ", "r" } };
1633 static const TCGTargetOpDef sZ_A = { .args_ct_str = { "sZ", "A" } };
1634 static const TCGTargetOpDef SZ_A = { .args_ct_str = { "SZ", "A" } };
1635 static const TCGTargetOpDef rZ_rJ = { .args_ct_str = { "rZ", "rJ" } };
1636 static const TCGTargetOpDef RZ_RJ = { .args_ct_str = { "RZ", "RJ" } };
1637 static const TCGTargetOpDef R_R_R = { .args_ct_str = { "R", "R", "R" } };
1638 static const TCGTargetOpDef r_rZ_rJ
1639 = { .args_ct_str = { "r", "rZ", "rJ" } };
1640 static const TCGTargetOpDef R_RZ_RJ
1641 = { .args_ct_str = { "R", "RZ", "RJ" } };
1642 static const TCGTargetOpDef r_r_rZ_rJ
1643 = { .args_ct_str = { "r", "r", "rZ", "rJ" } };
1644 static const TCGTargetOpDef movc_32
1645 = { .args_ct_str = { "r", "rZ", "rJ", "rI", "0" } };
1646 static const TCGTargetOpDef movc_64
1647 = { .args_ct_str = { "R", "RZ", "RJ", "RI", "0" } };
1648 static const TCGTargetOpDef add2_32
1649 = { .args_ct_str = { "r", "r", "rZ", "rZ", "rJ", "rJ" } };
1650 static const TCGTargetOpDef add2_64
1651 = { .args_ct_str = { "R", "R", "RZ", "RZ", "RJ", "RI" } };
1653 switch (op) {
1654 case INDEX_op_goto_ptr:
1655 return &r;
1657 case INDEX_op_ld8u_i32:
1658 case INDEX_op_ld8s_i32:
1659 case INDEX_op_ld16u_i32:
1660 case INDEX_op_ld16s_i32:
1661 case INDEX_op_ld_i32:
1662 case INDEX_op_neg_i32:
1663 case INDEX_op_not_i32:
1664 return &r_r;
1666 case INDEX_op_st8_i32:
1667 case INDEX_op_st16_i32:
1668 case INDEX_op_st_i32:
1669 return &rZ_r;
1671 case INDEX_op_add_i32:
1672 case INDEX_op_mul_i32:
1673 case INDEX_op_div_i32:
1674 case INDEX_op_divu_i32:
1675 case INDEX_op_sub_i32:
1676 case INDEX_op_and_i32:
1677 case INDEX_op_andc_i32:
1678 case INDEX_op_or_i32:
1679 case INDEX_op_orc_i32:
1680 case INDEX_op_xor_i32:
1681 case INDEX_op_shl_i32:
1682 case INDEX_op_shr_i32:
1683 case INDEX_op_sar_i32:
1684 case INDEX_op_setcond_i32:
1685 return &r_rZ_rJ;
1687 case INDEX_op_brcond_i32:
1688 return &rZ_rJ;
1689 case INDEX_op_movcond_i32:
1690 return &movc_32;
1691 case INDEX_op_add2_i32:
1692 case INDEX_op_sub2_i32:
1693 return &add2_32;
1694 case INDEX_op_mulu2_i32:
1695 case INDEX_op_muls2_i32:
1696 return &r_r_rZ_rJ;
1698 case INDEX_op_ld8u_i64:
1699 case INDEX_op_ld8s_i64:
1700 case INDEX_op_ld16u_i64:
1701 case INDEX_op_ld16s_i64:
1702 case INDEX_op_ld32u_i64:
1703 case INDEX_op_ld32s_i64:
1704 case INDEX_op_ld_i64:
1705 case INDEX_op_ext_i32_i64:
1706 case INDEX_op_extu_i32_i64:
1707 return &R_r;
1709 case INDEX_op_st8_i64:
1710 case INDEX_op_st16_i64:
1711 case INDEX_op_st32_i64:
1712 case INDEX_op_st_i64:
1713 return &RZ_r;
1715 case INDEX_op_add_i64:
1716 case INDEX_op_mul_i64:
1717 case INDEX_op_div_i64:
1718 case INDEX_op_divu_i64:
1719 case INDEX_op_sub_i64:
1720 case INDEX_op_and_i64:
1721 case INDEX_op_andc_i64:
1722 case INDEX_op_or_i64:
1723 case INDEX_op_orc_i64:
1724 case INDEX_op_xor_i64:
1725 case INDEX_op_shl_i64:
1726 case INDEX_op_shr_i64:
1727 case INDEX_op_sar_i64:
1728 case INDEX_op_setcond_i64:
1729 return &R_RZ_RJ;
1731 case INDEX_op_neg_i64:
1732 case INDEX_op_not_i64:
1733 case INDEX_op_ext32s_i64:
1734 case INDEX_op_ext32u_i64:
1735 return &R_R;
1737 case INDEX_op_extrl_i64_i32:
1738 case INDEX_op_extrh_i64_i32:
1739 return &r_R;
1741 case INDEX_op_brcond_i64:
1742 return &RZ_RJ;
1743 case INDEX_op_movcond_i64:
1744 return &movc_64;
1745 case INDEX_op_add2_i64:
1746 case INDEX_op_sub2_i64:
1747 return &add2_64;
1748 case INDEX_op_muluh_i64:
1749 return &R_R_R;
1751 case INDEX_op_qemu_ld_i32:
1752 return &r_A;
1753 case INDEX_op_qemu_ld_i64:
1754 return &R_A;
1755 case INDEX_op_qemu_st_i32:
1756 return &sZ_A;
1757 case INDEX_op_qemu_st_i64:
1758 return &SZ_A;
1760 default:
1761 return NULL;
1765 static void tcg_target_init(TCGContext *s)
1767 /* Only probe for the platform and capabilities if we havn't already
1768 determined maximum values at compile time. */
1769 #ifndef use_vis3_instructions
1771 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1772 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1774 #endif
1776 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
1777 tcg_target_available_regs[TCG_TYPE_I64] = ALL_64;
1779 tcg_target_call_clobber_regs = 0;
1780 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1781 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1782 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1783 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1784 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1785 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1786 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1787 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1788 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1789 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1790 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1791 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1792 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1793 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1794 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1796 s->reserved_regs = 0;
1797 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1798 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1799 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1800 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1801 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1802 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1803 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1804 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1807 #if SPARC64
1808 # define ELF_HOST_MACHINE EM_SPARCV9
1809 #else
1810 # define ELF_HOST_MACHINE EM_SPARC32PLUS
1811 # define ELF_HOST_FLAGS EF_SPARC_32PLUS
1812 #endif
1814 typedef struct {
1815 DebugFrameHeader h;
1816 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
1817 uint8_t fde_win_save;
1818 uint8_t fde_ret_save[3];
1819 } DebugFrame;
1821 static const DebugFrame debug_frame = {
1822 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1823 .h.cie.id = -1,
1824 .h.cie.version = 1,
1825 .h.cie.code_align = 1,
1826 .h.cie.data_align = -sizeof(void *) & 0x7f,
1827 .h.cie.return_column = 15, /* o7 */
1829 /* Total FDE size does not include the "len" member. */
1830 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1832 .fde_def_cfa = {
1833 #if SPARC64
1834 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1835 (2047 & 0x7f) | 0x80, (2047 >> 7)
1836 #else
1837 13, 30 /* DW_CFA_def_cfa_register i6 */
1838 #endif
1840 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1841 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1844 void tcg_register_jit(void *buf, size_t buf_size)
1846 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1849 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
1850 uintptr_t addr)
1852 intptr_t tb_disp = addr - tc_ptr;
1853 intptr_t br_disp = addr - jmp_addr;
1854 tcg_insn_unit i1, i2;
1856 /* We can reach the entire address space for ILP32.
1857 For LP64, the code_gen_buffer can't be larger than 2GB. */
1858 tcg_debug_assert(tb_disp == (int32_t)tb_disp);
1859 tcg_debug_assert(br_disp == (int32_t)br_disp);
1861 if (!USE_REG_TB) {
1862 atomic_set((uint32_t *)jmp_addr, deposit32(CALL, 0, 30, br_disp >> 2));
1863 flush_icache_range(jmp_addr, jmp_addr + 4);
1864 return;
1867 /* This does not exercise the range of the branch, but we do
1868 still need to be able to load the new value of TCG_REG_TB.
1869 But this does still happen quite often. */
1870 if (check_fit_ptr(tb_disp, 13)) {
1871 /* ba,pt %icc, addr */
1872 i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
1873 | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
1874 i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
1875 | INSN_IMM13(tb_disp));
1876 } else if (tb_disp >= 0) {
1877 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
1878 i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1879 | INSN_IMM13(tb_disp & 0x3ff));
1880 } else {
1881 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
1882 i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1883 | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
1886 atomic_set((uint64_t *)jmp_addr, deposit64(i2, 32, 32, i1));
1887 flush_icache_range(jmp_addr, jmp_addr + 8);