2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "tcg-be-null.h"
28 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
70 /* Note that sparcv8plus can only hold 64 bit quantities in %g and %o
71 registers. These are saved manually by the kernel in full 64-bit
72 slots. The %i and %l registers are saved by the register window
73 mechanism, which only allocates space for 32 bits. Given that this
74 window spill/fill can happen on any signal, we must consider the
75 high bits of the %i and %l registers garbage at all times. */
77 # define ALL_64 0xffffffffu
79 # define ALL_64 0xffffu
82 /* Define some temporary registers. T2 is used for constant generation. */
83 #define TCG_REG_T1 TCG_REG_G1
84 #define TCG_REG_T2 TCG_REG_O7
86 #ifdef CONFIG_USE_GUEST_BASE
87 # define TCG_GUEST_BASE_REG TCG_REG_I5
89 # define TCG_GUEST_BASE_REG TCG_REG_G0
92 static const int tcg_target_reg_alloc_order
[] = {
122 static const int tcg_target_call_iarg_regs
[6] = {
131 static const int tcg_target_call_oarg_regs
[] = {
138 #define INSN_OP(x) ((x) << 30)
139 #define INSN_OP2(x) ((x) << 22)
140 #define INSN_OP3(x) ((x) << 19)
141 #define INSN_OPF(x) ((x) << 5)
142 #define INSN_RD(x) ((x) << 25)
143 #define INSN_RS1(x) ((x) << 14)
144 #define INSN_RS2(x) (x)
145 #define INSN_ASI(x) ((x) << 5)
147 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
148 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
149 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
150 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
151 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
152 #define INSN_COND(x) ((x) << 25)
170 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
179 #define MOVCC_ICC (1 << 18)
180 #define MOVCC_XCC (1 << 18 | 1 << 12)
183 #define BPCC_XCC (2 << 20)
184 #define BPCC_PT (1 << 19)
186 #define BPCC_A (1 << 29)
188 #define BPR_PT BPCC_PT
190 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
191 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
192 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
193 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
194 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
195 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
196 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
197 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
198 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
199 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
200 #define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
201 #define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
202 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
203 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
204 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
205 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
206 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
207 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
208 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
209 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
211 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
212 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
213 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
215 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
216 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
217 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
219 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
220 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
221 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
222 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
223 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
224 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
225 #define CALL INSN_OP(1)
226 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
227 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
228 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
229 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
230 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
231 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
232 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
233 #define STB (INSN_OP(3) | INSN_OP3(0x05))
234 #define STH (INSN_OP(3) | INSN_OP3(0x06))
235 #define STW (INSN_OP(3) | INSN_OP3(0x04))
236 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
237 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
238 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
239 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
240 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
241 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
242 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
243 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
244 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
245 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
246 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
247 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
249 #ifndef ASI_PRIMARY_LITTLE
250 #define ASI_PRIMARY_LITTLE 0x88
253 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
254 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
255 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
256 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
257 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
259 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
260 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
261 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
263 static inline int check_fit_tl(tcg_target_long val
, unsigned int bits
)
265 return (val
<< ((sizeof(tcg_target_long
) * 8 - bits
))
266 >> (sizeof(tcg_target_long
) * 8 - bits
)) == val
;
269 static inline int check_fit_i32(uint32_t val
, unsigned int bits
)
271 return ((val
<< (32 - bits
)) >> (32 - bits
)) == val
;
274 static void patch_reloc(uint8_t *code_ptr
, int type
,
275 intptr_t value
, intptr_t addend
)
281 if (value
!= (uint32_t)value
) {
284 *(uint32_t *)code_ptr
= value
;
286 case R_SPARC_WDISP16
:
287 value
-= (intptr_t)code_ptr
;
288 if (!check_fit_tl(value
>> 2, 16)) {
291 insn
= *(uint32_t *)code_ptr
;
292 insn
&= ~INSN_OFF16(-1);
293 insn
|= INSN_OFF16(value
);
294 *(uint32_t *)code_ptr
= insn
;
296 case R_SPARC_WDISP19
:
297 value
-= (intptr_t)code_ptr
;
298 if (!check_fit_tl(value
>> 2, 19)) {
301 insn
= *(uint32_t *)code_ptr
;
302 insn
&= ~INSN_OFF19(-1);
303 insn
|= INSN_OFF19(value
);
304 *(uint32_t *)code_ptr
= insn
;
311 /* parse target specific constraints */
312 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
319 ct
->ct
|= TCG_CT_REG
;
320 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
323 ct
->ct
|= TCG_CT_REG
;
324 tcg_regset_set32(ct
->u
.regs
, 0, ALL_64
);
326 case 'A': /* qemu_ld/st address constraint */
327 ct
->ct
|= TCG_CT_REG
;
328 tcg_regset_set32(ct
->u
.regs
, 0,
329 TARGET_LONG_BITS
== 64 ? ALL_64
: 0xffffffff);
331 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_O0
);
332 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_O1
);
333 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_O2
);
335 case 's': /* qemu_st data 32-bit constraint */
336 ct
->ct
|= TCG_CT_REG
;
337 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
338 goto reserve_helpers
;
339 case 'S': /* qemu_st data 64-bit constraint */
340 ct
->ct
|= TCG_CT_REG
;
341 tcg_regset_set32(ct
->u
.regs
, 0, ALL_64
);
342 goto reserve_helpers
;
344 ct
->ct
|= TCG_CT_CONST_S11
;
347 ct
->ct
|= TCG_CT_CONST_S13
;
350 ct
->ct
|= TCG_CT_CONST_ZERO
;
360 /* test if a constant matches the constraint */
361 static inline int tcg_target_const_match(tcg_target_long val
, TCGType type
,
362 const TCGArgConstraint
*arg_ct
)
366 if (ct
& TCG_CT_CONST
) {
370 if (type
== TCG_TYPE_I32
) {
374 if ((ct
& TCG_CT_CONST_ZERO
) && val
== 0) {
376 } else if ((ct
& TCG_CT_CONST_S11
) && check_fit_tl(val
, 11)) {
378 } else if ((ct
& TCG_CT_CONST_S13
) && check_fit_tl(val
, 13)) {
385 static inline void tcg_out_arith(TCGContext
*s
, int rd
, int rs1
, int rs2
,
388 tcg_out32(s
, op
| INSN_RD(rd
) | INSN_RS1(rs1
) |
392 static inline void tcg_out_arithi(TCGContext
*s
, int rd
, int rs1
,
393 uint32_t offset
, int op
)
395 tcg_out32(s
, op
| INSN_RD(rd
) | INSN_RS1(rs1
) |
399 static void tcg_out_arithc(TCGContext
*s
, int rd
, int rs1
,
400 int val2
, int val2const
, int op
)
402 tcg_out32(s
, op
| INSN_RD(rd
) | INSN_RS1(rs1
)
403 | (val2const
? INSN_IMM13(val2
) : INSN_RS2(val2
)));
406 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
,
407 TCGReg ret
, TCGReg arg
)
410 tcg_out_arith(s
, ret
, arg
, TCG_REG_G0
, ARITH_OR
);
414 static inline void tcg_out_sethi(TCGContext
*s
, int ret
, uint32_t arg
)
416 tcg_out32(s
, SETHI
| INSN_RD(ret
) | ((arg
& 0xfffffc00) >> 10));
419 static inline void tcg_out_movi_imm13(TCGContext
*s
, int ret
, uint32_t arg
)
421 tcg_out_arithi(s
, ret
, TCG_REG_G0
, arg
, ARITH_OR
);
424 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
425 TCGReg ret
, tcg_target_long arg
)
427 tcg_target_long hi
, lo
;
429 /* A 13-bit constant sign-extended to 64-bits. */
430 if (check_fit_tl(arg
, 13)) {
431 tcg_out_movi_imm13(s
, ret
, arg
);
435 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
436 if (type
== TCG_TYPE_I32
|| arg
== (uint32_t)arg
) {
437 tcg_out_sethi(s
, ret
, arg
);
439 tcg_out_arithi(s
, ret
, ret
, arg
& 0x3ff, ARITH_OR
);
444 /* A 32-bit constant sign-extended to 64-bits. */
445 if (check_fit_tl(arg
, 32)) {
446 tcg_out_sethi(s
, ret
, ~arg
);
447 tcg_out_arithi(s
, ret
, ret
, (arg
& 0x3ff) | -0x400, ARITH_XOR
);
451 /* A 64-bit constant decomposed into 2 32-bit pieces. */
453 if (check_fit_tl(lo
, 13)) {
454 hi
= (arg
- lo
) >> 32;
455 tcg_out_movi(s
, TCG_TYPE_I32
, ret
, hi
);
456 tcg_out_arithi(s
, ret
, ret
, 32, SHIFT_SLLX
);
457 tcg_out_arithi(s
, ret
, ret
, lo
, ARITH_ADD
);
460 tcg_out_movi(s
, TCG_TYPE_I32
, ret
, hi
);
461 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_T2
, lo
);
462 tcg_out_arithi(s
, ret
, ret
, 32, SHIFT_SLLX
);
463 tcg_out_arith(s
, ret
, ret
, TCG_REG_T2
, ARITH_OR
);
467 static inline void tcg_out_ldst_rr(TCGContext
*s
, int data
, int a1
,
470 tcg_out32(s
, op
| INSN_RD(data
) | INSN_RS1(a1
) | INSN_RS2(a2
));
473 static inline void tcg_out_ldst(TCGContext
*s
, int ret
, int addr
,
476 if (check_fit_tl(offset
, 13)) {
477 tcg_out32(s
, op
| INSN_RD(ret
) | INSN_RS1(addr
) |
480 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_T1
, offset
);
481 tcg_out_ldst_rr(s
, ret
, addr
, TCG_REG_T1
, op
);
485 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
,
486 TCGReg arg1
, intptr_t arg2
)
488 tcg_out_ldst(s
, ret
, arg1
, arg2
, (type
== TCG_TYPE_I32
? LDUW
: LDX
));
491 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
492 TCGReg arg1
, intptr_t arg2
)
494 tcg_out_ldst(s
, arg
, arg1
, arg2
, (type
== TCG_TYPE_I32
? STW
: STX
));
497 static inline void tcg_out_ld_ptr(TCGContext
*s
, TCGReg ret
, uintptr_t arg
)
499 TCGReg base
= TCG_REG_G0
;
500 if (!check_fit_tl(arg
, 10)) {
501 tcg_out_movi(s
, TCG_TYPE_PTR
, ret
, arg
& ~0x3ff);
504 tcg_out_ld(s
, TCG_TYPE_PTR
, ret
, base
, arg
& 0x3ff);
507 static inline void tcg_out_sety(TCGContext
*s
, int rs
)
509 tcg_out32(s
, WRY
| INSN_RS1(TCG_REG_G0
) | INSN_RS2(rs
));
512 static inline void tcg_out_rdy(TCGContext
*s
, int rd
)
514 tcg_out32(s
, RDY
| INSN_RD(rd
));
517 static inline void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
520 if (check_fit_tl(val
, 13))
521 tcg_out_arithi(s
, reg
, reg
, val
, ARITH_ADD
);
523 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_T1
, val
);
524 tcg_out_arith(s
, reg
, reg
, TCG_REG_T1
, ARITH_ADD
);
529 static void tcg_out_div32(TCGContext
*s
, int rd
, int rs1
,
530 int val2
, int val2const
, int uns
)
532 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
534 tcg_out_sety(s
, TCG_REG_G0
);
536 tcg_out_arithi(s
, TCG_REG_T1
, rs1
, 31, SHIFT_SRA
);
537 tcg_out_sety(s
, TCG_REG_T1
);
540 tcg_out_arithc(s
, rd
, rs1
, val2
, val2const
,
541 uns
? ARITH_UDIV
: ARITH_SDIV
);
544 static inline void tcg_out_nop(TCGContext
*s
)
546 tcg_out_sethi(s
, TCG_REG_G0
, 0);
549 static const uint8_t tcg_cond_to_bcond
[] = {
550 [TCG_COND_EQ
] = COND_E
,
551 [TCG_COND_NE
] = COND_NE
,
552 [TCG_COND_LT
] = COND_L
,
553 [TCG_COND_GE
] = COND_GE
,
554 [TCG_COND_LE
] = COND_LE
,
555 [TCG_COND_GT
] = COND_G
,
556 [TCG_COND_LTU
] = COND_CS
,
557 [TCG_COND_GEU
] = COND_CC
,
558 [TCG_COND_LEU
] = COND_LEU
,
559 [TCG_COND_GTU
] = COND_GU
,
562 static const uint8_t tcg_cond_to_rcond
[] = {
563 [TCG_COND_EQ
] = RCOND_Z
,
564 [TCG_COND_NE
] = RCOND_NZ
,
565 [TCG_COND_LT
] = RCOND_LZ
,
566 [TCG_COND_GT
] = RCOND_GZ
,
567 [TCG_COND_LE
] = RCOND_LEZ
,
568 [TCG_COND_GE
] = RCOND_GEZ
571 static void tcg_out_bpcc0(TCGContext
*s
, int scond
, int flags
, int off19
)
573 tcg_out32(s
, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond
) | flags
| off19
);
576 static void tcg_out_bpcc(TCGContext
*s
, int scond
, int flags
, int label
)
578 TCGLabel
*l
= &s
->labels
[label
];
582 off19
= INSN_OFF19(l
->u
.value
- (unsigned long)s
->code_ptr
);
584 /* Make sure to preserve destinations during retranslation. */
585 off19
= *(uint32_t *)s
->code_ptr
& INSN_OFF19(-1);
586 tcg_out_reloc(s
, s
->code_ptr
, R_SPARC_WDISP19
, label
, 0);
588 tcg_out_bpcc0(s
, scond
, flags
, off19
);
591 static void tcg_out_cmp(TCGContext
*s
, TCGArg c1
, TCGArg c2
, int c2const
)
593 tcg_out_arithc(s
, TCG_REG_G0
, c1
, c2
, c2const
, ARITH_SUBCC
);
596 static void tcg_out_brcond_i32(TCGContext
*s
, TCGCond cond
, TCGArg arg1
,
597 TCGArg arg2
, int const_arg2
, int label
)
599 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
);
600 tcg_out_bpcc(s
, tcg_cond_to_bcond
[cond
], BPCC_ICC
| BPCC_PT
, label
);
604 static void tcg_out_movcc(TCGContext
*s
, TCGCond cond
, int cc
, TCGArg ret
,
605 TCGArg v1
, int v1const
)
607 tcg_out32(s
, ARITH_MOVCC
| cc
| INSN_RD(ret
)
608 | INSN_RS1(tcg_cond_to_bcond
[cond
])
609 | (v1const
? INSN_IMM11(v1
) : INSN_RS2(v1
)));
612 static void tcg_out_movcond_i32(TCGContext
*s
, TCGCond cond
, TCGArg ret
,
613 TCGArg c1
, TCGArg c2
, int c2const
,
614 TCGArg v1
, int v1const
)
616 tcg_out_cmp(s
, c1
, c2
, c2const
);
617 tcg_out_movcc(s
, cond
, MOVCC_ICC
, ret
, v1
, v1const
);
620 static void tcg_out_brcond_i64(TCGContext
*s
, TCGCond cond
, TCGArg arg1
,
621 TCGArg arg2
, int const_arg2
, int label
)
623 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
624 if (arg2
== 0 && !is_unsigned_cond(cond
)) {
625 TCGLabel
*l
= &s
->labels
[label
];
629 off16
= INSN_OFF16(l
->u
.value
- (unsigned long)s
->code_ptr
);
631 /* Make sure to preserve destinations during retranslation. */
632 off16
= *(uint32_t *)s
->code_ptr
& INSN_OFF16(-1);
633 tcg_out_reloc(s
, s
->code_ptr
, R_SPARC_WDISP16
, label
, 0);
635 tcg_out32(s
, INSN_OP(0) | INSN_OP2(3) | BPR_PT
| INSN_RS1(arg1
)
636 | INSN_COND(tcg_cond_to_rcond
[cond
]) | off16
);
638 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
);
639 tcg_out_bpcc(s
, tcg_cond_to_bcond
[cond
], BPCC_XCC
| BPCC_PT
, label
);
644 static void tcg_out_movr(TCGContext
*s
, TCGCond cond
, TCGArg ret
, TCGArg c1
,
645 TCGArg v1
, int v1const
)
647 tcg_out32(s
, ARITH_MOVR
| INSN_RD(ret
) | INSN_RS1(c1
)
648 | (tcg_cond_to_rcond
[cond
] << 10)
649 | (v1const
? INSN_IMM10(v1
) : INSN_RS2(v1
)));
652 static void tcg_out_movcond_i64(TCGContext
*s
, TCGCond cond
, TCGArg ret
,
653 TCGArg c1
, TCGArg c2
, int c2const
,
654 TCGArg v1
, int v1const
)
656 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
657 Note that the immediate range is one bit smaller, so we must check
659 if (c2
== 0 && !is_unsigned_cond(cond
)
660 && (!v1const
|| check_fit_tl(v1
, 10))) {
661 tcg_out_movr(s
, cond
, ret
, c1
, v1
, v1const
);
663 tcg_out_cmp(s
, c1
, c2
, c2const
);
664 tcg_out_movcc(s
, cond
, MOVCC_XCC
, ret
, v1
, v1const
);
668 static void tcg_out_setcond_i32(TCGContext
*s
, TCGCond cond
, TCGArg ret
,
669 TCGArg c1
, TCGArg c2
, int c2const
)
671 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
675 /* The result of the comparison is in the carry bit. */
680 /* For equality, we can transform to inequality vs zero. */
682 tcg_out_arithc(s
, ret
, c1
, c2
, c2const
, ARITH_XOR
);
684 c1
= TCG_REG_G0
, c2
= ret
, c2const
= 0;
685 cond
= (cond
== TCG_COND_EQ
? TCG_COND_GEU
: TCG_COND_LTU
);
690 /* If we don't need to load a constant into a register, we can
691 swap the operands on GTU/LEU. There's no benefit to loading
692 the constant into a temporary register. */
693 if (!c2const
|| c2
== 0) {
698 cond
= tcg_swap_cond(cond
);
704 tcg_out_cmp(s
, c1
, c2
, c2const
);
705 tcg_out_movi_imm13(s
, ret
, 0);
706 tcg_out_movcc(s
, cond
, MOVCC_ICC
, ret
, 1, 1);
710 tcg_out_cmp(s
, c1
, c2
, c2const
);
711 if (cond
== TCG_COND_LTU
) {
712 tcg_out_arithi(s
, ret
, TCG_REG_G0
, 0, ARITH_ADDX
);
714 tcg_out_arithi(s
, ret
, TCG_REG_G0
, -1, ARITH_SUBX
);
718 static void tcg_out_setcond_i64(TCGContext
*s
, TCGCond cond
, TCGArg ret
,
719 TCGArg c1
, TCGArg c2
, int c2const
)
721 /* For 64-bit signed comparisons vs zero, we can avoid the compare
722 if the input does not overlap the output. */
723 if (c2
== 0 && !is_unsigned_cond(cond
) && c1
!= ret
) {
724 tcg_out_movi_imm13(s
, ret
, 0);
725 tcg_out_movr(s
, cond
, ret
, c1
, 1, 1);
727 tcg_out_cmp(s
, c1
, c2
, c2const
);
728 tcg_out_movi_imm13(s
, ret
, 0);
729 tcg_out_movcc(s
, cond
, MOVCC_XCC
, ret
, 1, 1);
733 static void tcg_out_addsub2(TCGContext
*s
, TCGArg rl
, TCGArg rh
,
734 TCGArg al
, TCGArg ah
, TCGArg bl
, int blconst
,
735 TCGArg bh
, int bhconst
, int opl
, int oph
)
737 TCGArg tmp
= TCG_REG_T1
;
739 /* Note that the low parts are fully consumed before tmp is set. */
740 if (rl
!= ah
&& (bhconst
|| rl
!= bh
)) {
744 tcg_out_arithc(s
, tmp
, al
, bl
, blconst
, opl
);
745 tcg_out_arithc(s
, rh
, ah
, bh
, bhconst
, oph
);
746 tcg_out_mov(s
, TCG_TYPE_I32
, rl
, tmp
);
749 static inline void tcg_out_calli(TCGContext
*s
, uintptr_t dest
)
751 intptr_t disp
= dest
- (uintptr_t)s
->code_ptr
;
753 if (disp
== (int32_t)disp
) {
754 tcg_out32(s
, CALL
| (uint32_t)disp
>> 2);
756 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_T1
, dest
& ~0xfff);
757 tcg_out_arithi(s
, TCG_REG_O7
, TCG_REG_T1
, dest
& 0xfff, JMPL
);
761 #ifdef CONFIG_SOFTMMU
762 static uintptr_t qemu_ld_trampoline
[16];
763 static uintptr_t qemu_st_trampoline
[16];
765 static void build_trampolines(TCGContext
*s
)
767 static uintptr_t const qemu_ld_helpers
[16] = {
768 [MO_UB
] = (uintptr_t)helper_ret_ldub_mmu
,
769 [MO_SB
] = (uintptr_t)helper_ret_ldsb_mmu
,
770 [MO_LEUW
] = (uintptr_t)helper_le_lduw_mmu
,
771 [MO_LESW
] = (uintptr_t)helper_le_ldsw_mmu
,
772 [MO_LEUL
] = (uintptr_t)helper_le_ldul_mmu
,
773 [MO_LEQ
] = (uintptr_t)helper_le_ldq_mmu
,
774 [MO_BEUW
] = (uintptr_t)helper_be_lduw_mmu
,
775 [MO_BESW
] = (uintptr_t)helper_be_ldsw_mmu
,
776 [MO_BEUL
] = (uintptr_t)helper_be_ldul_mmu
,
777 [MO_BEQ
] = (uintptr_t)helper_be_ldq_mmu
,
779 static uintptr_t const qemu_st_helpers
[16] = {
780 [MO_UB
] = (uintptr_t)helper_ret_stb_mmu
,
781 [MO_LEUW
] = (uintptr_t)helper_le_stw_mmu
,
782 [MO_LEUL
] = (uintptr_t)helper_le_stl_mmu
,
783 [MO_LEQ
] = (uintptr_t)helper_le_stq_mmu
,
784 [MO_BEUW
] = (uintptr_t)helper_be_stw_mmu
,
785 [MO_BEUL
] = (uintptr_t)helper_be_stl_mmu
,
786 [MO_BEQ
] = (uintptr_t)helper_be_stq_mmu
,
793 for (i
= 0; i
< 16; ++i
) {
794 if (qemu_ld_helpers
[i
] == 0) {
798 /* May as well align the trampoline. */
799 tramp
= (uintptr_t)s
->code_ptr
;
804 qemu_ld_trampoline
[i
] = tramp
;
806 if (SPARC64
|| TARGET_LONG_BITS
== 32) {
809 /* Install the high part of the address. */
810 tcg_out_arithi(s
, TCG_REG_O1
, TCG_REG_O2
, 32, SHIFT_SRLX
);
814 /* Set the retaddr operand. */
815 tcg_out_mov(s
, TCG_TYPE_PTR
, ra
, TCG_REG_O7
);
816 /* Set the env operand. */
817 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_O0
, TCG_AREG0
);
819 tcg_out_calli(s
, qemu_ld_helpers
[i
]);
820 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_O7
, ra
);
823 for (i
= 0; i
< 16; ++i
) {
824 if (qemu_st_helpers
[i
] == 0) {
828 /* May as well align the trampoline. */
829 tramp
= (uintptr_t)s
->code_ptr
;
834 qemu_st_trampoline
[i
] = tramp
;
840 if (TARGET_LONG_BITS
== 64) {
841 /* Install the high part of the address. */
842 tcg_out_arithi(s
, ra
, ra
+ 1, 32, SHIFT_SRLX
);
847 if ((i
& MO_SIZE
) == MO_64
) {
848 /* Install the high part of the data. */
849 tcg_out_arithi(s
, ra
, ra
+ 1, 32, SHIFT_SRLX
);
854 /* Skip the mem_index argument. */
858 /* Set the retaddr operand. */
859 if (ra
>= TCG_REG_O6
) {
860 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_REG_O7
, TCG_REG_CALL_STACK
,
861 TCG_TARGET_CALL_STACK_OFFSET
);
864 tcg_out_mov(s
, TCG_TYPE_PTR
, ra
, TCG_REG_O7
);
865 /* Set the env operand. */
866 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_O0
, TCG_AREG0
);
868 tcg_out_calli(s
, qemu_st_helpers
[i
]);
869 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_O7
, ra
);
874 /* Generate global QEMU prologue and epilogue code */
875 static void tcg_target_qemu_prologue(TCGContext
*s
)
877 int tmp_buf_size
, frame_size
;
879 /* The TCG temp buffer is at the top of the frame, immediately
880 below the frame pointer. */
881 tmp_buf_size
= CPU_TEMP_BUF_NLONGS
* (int)sizeof(long);
882 tcg_set_frame(s
, TCG_REG_I6
, TCG_TARGET_STACK_BIAS
- tmp_buf_size
,
885 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
886 otherwise the minimal frame usable by callees. */
887 frame_size
= TCG_TARGET_CALL_STACK_OFFSET
- TCG_TARGET_STACK_BIAS
;
888 frame_size
+= TCG_STATIC_CALL_ARGS_SIZE
+ tmp_buf_size
;
889 frame_size
+= TCG_TARGET_STACK_ALIGN
- 1;
890 frame_size
&= -TCG_TARGET_STACK_ALIGN
;
891 tcg_out32(s
, SAVE
| INSN_RD(TCG_REG_O6
) | INSN_RS1(TCG_REG_O6
) |
892 INSN_IMM13(-frame_size
));
894 #ifdef CONFIG_USE_GUEST_BASE
895 if (GUEST_BASE
!= 0) {
896 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, GUEST_BASE
);
897 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
901 tcg_out_arithi(s
, TCG_REG_G0
, TCG_REG_I1
, 0, JMPL
);
905 /* No epilogue required. We issue ret + restore directly in the TB. */
907 #ifdef CONFIG_SOFTMMU
908 build_trampolines(s
);
912 #if defined(CONFIG_SOFTMMU)
913 /* Perform the TLB load and compare.
916 ADDRLO and ADDRHI contain the possible two parts of the address.
918 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
920 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
921 This should be offsetof addr_read or addr_write.
923 The result of the TLB comparison is in %[ix]cc. The sanitized address
924 is in the returned register, maybe %o0. The TLB addend is in %o1. */
926 static TCGReg
tcg_out_tlb_load(TCGContext
*s
, TCGReg addr
, int mem_index
,
927 TCGMemOp s_bits
, int which
)
929 const TCGReg r0
= TCG_REG_O0
;
930 const TCGReg r1
= TCG_REG_O1
;
931 const TCGReg r2
= TCG_REG_O2
;
934 /* Shift the page number down. */
935 tcg_out_arithi(s
, r1
, addr
, TARGET_PAGE_BITS
, SHIFT_SRL
);
937 /* Mask out the page offset, except for the required alignment. */
938 tcg_out_movi(s
, TCG_TYPE_TL
, TCG_REG_T1
,
939 TARGET_PAGE_MASK
| ((1 << s_bits
) - 1));
941 /* Mask the tlb index. */
942 tcg_out_arithi(s
, r1
, r1
, CPU_TLB_SIZE
- 1, ARITH_AND
);
944 /* Mask page, part 2. */
945 tcg_out_arith(s
, r0
, addr
, TCG_REG_T1
, ARITH_AND
);
947 /* Shift the tlb index into place. */
948 tcg_out_arithi(s
, r1
, r1
, CPU_TLB_ENTRY_BITS
, SHIFT_SLL
);
950 /* Relative to the current ENV. */
951 tcg_out_arith(s
, r1
, TCG_AREG0
, r1
, ARITH_ADD
);
953 /* Find a base address that can load both tlb comparator and addend. */
954 tlb_ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0]);
955 if (!check_fit_tl(tlb_ofs
+ sizeof(CPUTLBEntry
), 13)) {
956 tcg_out_addi(s
, r1
, tlb_ofs
& ~0x3ff);
960 /* Load the tlb comparator and the addend. */
961 tcg_out_ld(s
, TCG_TYPE_TL
, r2
, r1
, tlb_ofs
+ which
);
962 tcg_out_ld(s
, TCG_TYPE_PTR
, r1
, r1
, tlb_ofs
+offsetof(CPUTLBEntry
, addend
));
964 /* subcc arg0, arg2, %g0 */
965 tcg_out_cmp(s
, r0
, r2
, 0);
967 /* If the guest address must be zero-extended, do so now. */
968 if (SPARC64
&& TARGET_LONG_BITS
== 32) {
969 tcg_out_arithi(s
, r0
, addr
, 0, SHIFT_SRL
);
974 #endif /* CONFIG_SOFTMMU */
976 static const int qemu_ld_opc
[16] = {
993 static const int qemu_st_opc
[16] = {
1005 static void tcg_out_qemu_ld(TCGContext
*s
, TCGReg data
, TCGReg addr
,
1006 TCGMemOp memop
, int memi
, bool is_64
)
1008 #ifdef CONFIG_SOFTMMU
1009 TCGMemOp s_bits
= memop
& MO_SIZE
;
1010 TCGReg addrz
, param
;
1012 uint32_t *label_ptr
;
1014 addrz
= tcg_out_tlb_load(s
, addr
, memi
, s_bits
,
1015 offsetof(CPUTLBEntry
, addr_read
));
1017 /* The fast path is exactly one insn. Thus we can perform the
1018 entire TLB Hit in the (annulled) delay slot of the branch
1019 over the TLB Miss case. */
1021 /* beq,a,pt %[xi]cc, label0 */
1022 label_ptr
= (uint32_t *)s
->code_ptr
;
1023 tcg_out_bpcc0(s
, COND_E
, BPCC_A
| BPCC_PT
1024 | (TARGET_LONG_BITS
== 64 ? BPCC_XCC
: BPCC_ICC
), 0);
1026 tcg_out_ldst_rr(s
, data
, addrz
, TCG_REG_O1
, qemu_ld_opc
[memop
]);
1031 if (!SPARC64
&& TARGET_LONG_BITS
== 64) {
1032 /* Skip the high-part; we'll perform the extract in the trampoline. */
1035 tcg_out_mov(s
, TCG_TYPE_REG
, param
++, addr
);
1037 /* We use the helpers to extend SB and SW data, leaving the case
1038 of SL needing explicit extending below. */
1039 if ((memop
& ~MO_BSWAP
) == MO_SL
) {
1040 func
= qemu_ld_trampoline
[memop
& ~MO_SIGN
];
1042 func
= qemu_ld_trampoline
[memop
];
1045 tcg_out_calli(s
, func
);
1047 tcg_out_movi(s
, TCG_TYPE_I32
, param
, memi
);
1049 /* Recall that all of the helpers return 64-bit results.
1050 Which complicates things for sparcv8plus. */
1052 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1053 if (is_64
&& (memop
& ~MO_BSWAP
) == MO_SL
) {
1054 tcg_out_arithi(s
, data
, TCG_REG_O0
, 0, SHIFT_SRA
);
1056 tcg_out_mov(s
, TCG_TYPE_REG
, data
, TCG_REG_O0
);
1059 if (s_bits
== MO_64
) {
1060 tcg_out_arithi(s
, TCG_REG_O0
, TCG_REG_O0
, 32, SHIFT_SLLX
);
1061 tcg_out_arithi(s
, TCG_REG_O1
, TCG_REG_O1
, 0, SHIFT_SRL
);
1062 tcg_out_arith(s
, data
, TCG_REG_O0
, TCG_REG_O1
, ARITH_OR
);
1064 /* Re-extend from 32-bit rather than reassembling when we
1065 know the high register must be an extension. */
1066 tcg_out_arithi(s
, data
, TCG_REG_O1
, 0,
1067 memop
& MO_SIGN
? SHIFT_SRA
: SHIFT_SRL
);
1069 tcg_out_mov(s
, TCG_TYPE_I32
, data
, TCG_REG_O1
);
1073 *label_ptr
|= INSN_OFF19((uintptr_t)s
->code_ptr
- (uintptr_t)label_ptr
);
1075 if (SPARC64
&& TARGET_LONG_BITS
== 32) {
1076 tcg_out_arithi(s
, TCG_REG_T1
, addr
, 0, SHIFT_SRL
);
1079 tcg_out_ldst_rr(s
, data
, addr
,
1080 (GUEST_BASE
? TCG_GUEST_BASE_REG
: TCG_REG_G0
),
1081 qemu_ld_opc
[memop
]);
1082 #endif /* CONFIG_SOFTMMU */
1085 static void tcg_out_qemu_st(TCGContext
*s
, TCGReg data
, TCGReg addr
,
1086 TCGMemOp memop
, int memi
)
1088 #ifdef CONFIG_SOFTMMU
1089 TCGMemOp s_bits
= memop
& MO_SIZE
;
1090 TCGReg addrz
, param
;
1092 uint32_t *label_ptr
;
1094 addrz
= tcg_out_tlb_load(s
, addr
, memi
, s_bits
,
1095 offsetof(CPUTLBEntry
, addr_write
));
1097 /* The fast path is exactly one insn. Thus we can perform the entire
1098 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1099 /* beq,a,pt %[xi]cc, label0 */
1100 label_ptr
= (uint32_t *)s
->code_ptr
;
1101 tcg_out_bpcc0(s
, COND_E
, BPCC_A
| BPCC_PT
1102 | (TARGET_LONG_BITS
== 64 ? BPCC_XCC
: BPCC_ICC
), 0);
1104 tcg_out_ldst_rr(s
, data
, addrz
, TCG_REG_O1
, qemu_st_opc
[memop
]);
1109 if (!SPARC64
&& TARGET_LONG_BITS
== 64) {
1110 /* Skip the high-part; we'll perform the extract in the trampoline. */
1113 tcg_out_mov(s
, TCG_TYPE_REG
, param
++, addr
);
1114 if (!SPARC64
&& s_bits
== MO_64
) {
1115 /* Skip the high-part; we'll perform the extract in the trampoline. */
1118 tcg_out_mov(s
, TCG_TYPE_REG
, param
++, data
);
1120 func
= qemu_st_trampoline
[memop
];
1122 tcg_out_calli(s
, func
);
1124 tcg_out_movi(s
, TCG_TYPE_REG
, param
, memi
);
1126 *label_ptr
|= INSN_OFF19((uintptr_t)s
->code_ptr
- (uintptr_t)label_ptr
);
1128 if (SPARC64
&& TARGET_LONG_BITS
== 32) {
1129 tcg_out_arithi(s
, TCG_REG_T1
, addr
, 0, SHIFT_SRL
);
1132 tcg_out_ldst_rr(s
, data
, addr
,
1133 (GUEST_BASE
? TCG_GUEST_BASE_REG
: TCG_REG_G0
),
1134 qemu_st_opc
[memop
]);
1135 #endif /* CONFIG_SOFTMMU */
1138 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
1139 const int *const_args
)
1144 case INDEX_op_exit_tb
:
1145 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_I0
, args
[0]);
1146 tcg_out_arithi(s
, TCG_REG_G0
, TCG_REG_I7
, 8, JMPL
);
1147 tcg_out32(s
, RESTORE
| INSN_RD(TCG_REG_G0
) | INSN_RS1(TCG_REG_G0
) |
1148 INSN_RS2(TCG_REG_G0
));
1150 case INDEX_op_goto_tb
:
1151 if (s
->tb_jmp_offset
) {
1152 /* direct jump method */
1153 uint32_t old_insn
= *(uint32_t *)s
->code_ptr
;
1154 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1155 /* Make sure to preserve links during retranslation. */
1156 tcg_out32(s
, CALL
| (old_insn
& ~INSN_OP(-1)));
1158 /* indirect jump method */
1159 tcg_out_ld_ptr(s
, TCG_REG_T1
, (uintptr_t)(s
->tb_next
+ args
[0]));
1160 tcg_out_arithi(s
, TCG_REG_G0
, TCG_REG_T1
, 0, JMPL
);
1163 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1166 if (const_args
[0]) {
1167 tcg_out_calli(s
, args
[0]);
1169 tcg_out_arithi(s
, TCG_REG_O7
, args
[0], 0, JMPL
);
1175 tcg_out_bpcc(s
, COND_A
, BPCC_PT
, args
[0]);
1178 case INDEX_op_movi_i32
:
1179 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], (uint32_t)args
[1]);
1182 #define OP_32_64(x) \
1183 glue(glue(case INDEX_op_, x), _i32): \
1184 glue(glue(case INDEX_op_, x), _i64)
1187 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDUB
);
1190 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDSB
);
1193 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDUH
);
1196 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDSH
);
1198 case INDEX_op_ld_i32
:
1199 case INDEX_op_ld32u_i64
:
1200 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDUW
);
1203 tcg_out_ldst(s
, args
[0], args
[1], args
[2], STB
);
1206 tcg_out_ldst(s
, args
[0], args
[1], args
[2], STH
);
1208 case INDEX_op_st_i32
:
1209 case INDEX_op_st32_i64
:
1210 tcg_out_ldst(s
, args
[0], args
[1], args
[2], STW
);
1233 case INDEX_op_shl_i32
:
1236 /* Limit immediate shift count lest we create an illegal insn. */
1237 tcg_out_arithc(s
, args
[0], args
[1], args
[2] & 31, const_args
[2], c
);
1239 case INDEX_op_shr_i32
:
1242 case INDEX_op_sar_i32
:
1245 case INDEX_op_mul_i32
:
1256 case INDEX_op_div_i32
:
1257 tcg_out_div32(s
, args
[0], args
[1], args
[2], const_args
[2], 0);
1259 case INDEX_op_divu_i32
:
1260 tcg_out_div32(s
, args
[0], args
[1], args
[2], const_args
[2], 1);
1263 case INDEX_op_brcond_i32
:
1264 tcg_out_brcond_i32(s
, args
[2], args
[0], args
[1], const_args
[1],
1267 case INDEX_op_setcond_i32
:
1268 tcg_out_setcond_i32(s
, args
[3], args
[0], args
[1],
1269 args
[2], const_args
[2]);
1271 case INDEX_op_movcond_i32
:
1272 tcg_out_movcond_i32(s
, args
[5], args
[0], args
[1],
1273 args
[2], const_args
[2], args
[3], const_args
[3]);
1276 case INDEX_op_add2_i32
:
1277 tcg_out_addsub2(s
, args
[0], args
[1], args
[2], args
[3],
1278 args
[4], const_args
[4], args
[5], const_args
[5],
1279 ARITH_ADDCC
, ARITH_ADDX
);
1281 case INDEX_op_sub2_i32
:
1282 tcg_out_addsub2(s
, args
[0], args
[1], args
[2], args
[3],
1283 args
[4], const_args
[4], args
[5], const_args
[5],
1284 ARITH_SUBCC
, ARITH_SUBX
);
1286 case INDEX_op_mulu2_i32
:
1287 tcg_out_arithc(s
, args
[0], args
[2], args
[3], const_args
[3],
1289 tcg_out_rdy(s
, args
[1]);
1292 case INDEX_op_qemu_ld_i32
:
1293 tcg_out_qemu_ld(s
, args
[0], args
[1], args
[2], args
[3], false);
1295 case INDEX_op_qemu_ld_i64
:
1296 tcg_out_qemu_ld(s
, args
[0], args
[1], args
[2], args
[3], true);
1298 case INDEX_op_qemu_st_i32
:
1299 tcg_out_qemu_st(s
, args
[0], args
[1], args
[2], args
[3]);
1301 case INDEX_op_qemu_st_i64
:
1302 tcg_out_qemu_st(s
, args
[0], args
[1], args
[2], args
[3]);
1305 case INDEX_op_movi_i64
:
1306 tcg_out_movi(s
, TCG_TYPE_I64
, args
[0], args
[1]);
1308 case INDEX_op_ld32s_i64
:
1309 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDSW
);
1311 case INDEX_op_ld_i64
:
1312 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDX
);
1314 case INDEX_op_st_i64
:
1315 tcg_out_ldst(s
, args
[0], args
[1], args
[2], STX
);
1317 case INDEX_op_shl_i64
:
1320 /* Limit immediate shift count lest we create an illegal insn. */
1321 tcg_out_arithc(s
, args
[0], args
[1], args
[2] & 63, const_args
[2], c
);
1323 case INDEX_op_shr_i64
:
1326 case INDEX_op_sar_i64
:
1329 case INDEX_op_mul_i64
:
1332 case INDEX_op_div_i64
:
1335 case INDEX_op_divu_i64
:
1338 case INDEX_op_ext32s_i64
:
1339 tcg_out_arithi(s
, args
[0], args
[1], 0, SHIFT_SRA
);
1341 case INDEX_op_ext32u_i64
:
1342 tcg_out_arithi(s
, args
[0], args
[1], 0, SHIFT_SRL
);
1344 case INDEX_op_trunc_shr_i32
:
1346 tcg_out_mov(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1348 tcg_out_arithi(s
, args
[0], args
[1], args
[2], SHIFT_SRLX
);
1352 case INDEX_op_brcond_i64
:
1353 tcg_out_brcond_i64(s
, args
[2], args
[0], args
[1], const_args
[1],
1356 case INDEX_op_setcond_i64
:
1357 tcg_out_setcond_i64(s
, args
[3], args
[0], args
[1],
1358 args
[2], const_args
[2]);
1360 case INDEX_op_movcond_i64
:
1361 tcg_out_movcond_i64(s
, args
[5], args
[0], args
[1],
1362 args
[2], const_args
[2], args
[3], const_args
[3]);
1366 tcg_out_arithc(s
, args
[0], args
[1], args
[2], const_args
[2], c
);
1370 tcg_out_arithc(s
, args
[0], TCG_REG_G0
, args
[1], const_args
[1], c
);
1374 fprintf(stderr
, "unknown opcode 0x%x\n", opc
);
1379 static const TCGTargetOpDef sparc_op_defs
[] = {
1380 { INDEX_op_exit_tb
, { } },
1381 { INDEX_op_goto_tb
, { } },
1382 { INDEX_op_call
, { "ri" } },
1383 { INDEX_op_br
, { } },
1385 { INDEX_op_mov_i32
, { "r", "r" } },
1386 { INDEX_op_movi_i32
, { "r" } },
1387 { INDEX_op_ld8u_i32
, { "r", "r" } },
1388 { INDEX_op_ld8s_i32
, { "r", "r" } },
1389 { INDEX_op_ld16u_i32
, { "r", "r" } },
1390 { INDEX_op_ld16s_i32
, { "r", "r" } },
1391 { INDEX_op_ld_i32
, { "r", "r" } },
1392 { INDEX_op_st8_i32
, { "rZ", "r" } },
1393 { INDEX_op_st16_i32
, { "rZ", "r" } },
1394 { INDEX_op_st_i32
, { "rZ", "r" } },
1396 { INDEX_op_add_i32
, { "r", "rZ", "rJ" } },
1397 { INDEX_op_mul_i32
, { "r", "rZ", "rJ" } },
1398 { INDEX_op_div_i32
, { "r", "rZ", "rJ" } },
1399 { INDEX_op_divu_i32
, { "r", "rZ", "rJ" } },
1400 { INDEX_op_sub_i32
, { "r", "rZ", "rJ" } },
1401 { INDEX_op_and_i32
, { "r", "rZ", "rJ" } },
1402 { INDEX_op_andc_i32
, { "r", "rZ", "rJ" } },
1403 { INDEX_op_or_i32
, { "r", "rZ", "rJ" } },
1404 { INDEX_op_orc_i32
, { "r", "rZ", "rJ" } },
1405 { INDEX_op_xor_i32
, { "r", "rZ", "rJ" } },
1407 { INDEX_op_shl_i32
, { "r", "rZ", "rJ" } },
1408 { INDEX_op_shr_i32
, { "r", "rZ", "rJ" } },
1409 { INDEX_op_sar_i32
, { "r", "rZ", "rJ" } },
1411 { INDEX_op_neg_i32
, { "r", "rJ" } },
1412 { INDEX_op_not_i32
, { "r", "rJ" } },
1414 { INDEX_op_brcond_i32
, { "rZ", "rJ" } },
1415 { INDEX_op_setcond_i32
, { "r", "rZ", "rJ" } },
1416 { INDEX_op_movcond_i32
, { "r", "rZ", "rJ", "rI", "0" } },
1418 { INDEX_op_add2_i32
, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1419 { INDEX_op_sub2_i32
, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1420 { INDEX_op_mulu2_i32
, { "r", "r", "rZ", "rJ" } },
1422 { INDEX_op_mov_i64
, { "R", "R" } },
1423 { INDEX_op_movi_i64
, { "R" } },
1424 { INDEX_op_ld8u_i64
, { "R", "r" } },
1425 { INDEX_op_ld8s_i64
, { "R", "r" } },
1426 { INDEX_op_ld16u_i64
, { "R", "r" } },
1427 { INDEX_op_ld16s_i64
, { "R", "r" } },
1428 { INDEX_op_ld32u_i64
, { "R", "r" } },
1429 { INDEX_op_ld32s_i64
, { "R", "r" } },
1430 { INDEX_op_ld_i64
, { "R", "r" } },
1431 { INDEX_op_st8_i64
, { "RZ", "r" } },
1432 { INDEX_op_st16_i64
, { "RZ", "r" } },
1433 { INDEX_op_st32_i64
, { "RZ", "r" } },
1434 { INDEX_op_st_i64
, { "RZ", "r" } },
1436 { INDEX_op_add_i64
, { "R", "RZ", "RJ" } },
1437 { INDEX_op_mul_i64
, { "R", "RZ", "RJ" } },
1438 { INDEX_op_div_i64
, { "R", "RZ", "RJ" } },
1439 { INDEX_op_divu_i64
, { "R", "RZ", "RJ" } },
1440 { INDEX_op_sub_i64
, { "R", "RZ", "RJ" } },
1441 { INDEX_op_and_i64
, { "R", "RZ", "RJ" } },
1442 { INDEX_op_andc_i64
, { "R", "RZ", "RJ" } },
1443 { INDEX_op_or_i64
, { "R", "RZ", "RJ" } },
1444 { INDEX_op_orc_i64
, { "R", "RZ", "RJ" } },
1445 { INDEX_op_xor_i64
, { "R", "RZ", "RJ" } },
1447 { INDEX_op_shl_i64
, { "R", "RZ", "RJ" } },
1448 { INDEX_op_shr_i64
, { "R", "RZ", "RJ" } },
1449 { INDEX_op_sar_i64
, { "R", "RZ", "RJ" } },
1451 { INDEX_op_neg_i64
, { "R", "RJ" } },
1452 { INDEX_op_not_i64
, { "R", "RJ" } },
1454 { INDEX_op_ext32s_i64
, { "R", "r" } },
1455 { INDEX_op_ext32u_i64
, { "R", "r" } },
1456 { INDEX_op_trunc_shr_i32
, { "r", "R" } },
1458 { INDEX_op_brcond_i64
, { "RZ", "RJ" } },
1459 { INDEX_op_setcond_i64
, { "R", "RZ", "RJ" } },
1460 { INDEX_op_movcond_i64
, { "R", "RZ", "RJ", "RI", "0" } },
1462 { INDEX_op_qemu_ld_i32
, { "r", "A" } },
1463 { INDEX_op_qemu_ld_i64
, { "R", "A" } },
1464 { INDEX_op_qemu_st_i32
, { "s", "A" } },
1465 { INDEX_op_qemu_st_i64
, { "S", "A" } },
1470 static void tcg_target_init(TCGContext
*s
)
1472 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffffffff);
1473 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I64
], 0, ALL_64
);
1475 tcg_regset_set32(tcg_target_call_clobber_regs
, 0,
1491 tcg_regset_clear(s
->reserved_regs
);
1492 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_G0
); /* zero */
1493 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_G6
); /* reserved for os */
1494 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_G7
); /* thread pointer */
1495 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_I6
); /* frame pointer */
1496 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_I7
); /* return address */
1497 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_O6
); /* stack pointer */
1498 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_T1
); /* for internal use */
1499 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_T2
); /* for internal use */
1501 tcg_add_target_add_op_defs(sparc_op_defs
);
1505 # define ELF_HOST_MACHINE EM_SPARCV9
1507 # define ELF_HOST_MACHINE EM_SPARC32PLUS
1508 # define ELF_HOST_FLAGS EF_SPARC_32PLUS
1513 DebugFrameFDEHeader fde
;
1514 uint8_t fde_def_cfa
[SPARC64
? 4 : 2];
1515 uint8_t fde_win_save
;
1516 uint8_t fde_ret_save
[3];
1519 static DebugFrame debug_frame
= {
1520 .cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
1523 .cie
.code_align
= 1,
1524 .cie
.data_align
= -sizeof(void *) & 0x7f,
1525 .cie
.return_column
= 15, /* o7 */
1527 /* Total FDE size does not include the "len" member. */
1528 .fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, fde
.cie_offset
),
1532 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1533 (2047 & 0x7f) | 0x80, (2047 >> 7)
1535 13, 30 /* DW_CFA_def_cfa_register i6 */
1538 .fde_win_save
= 0x2d, /* DW_CFA_GNU_window_save */
1539 .fde_ret_save
= { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1542 void tcg_register_jit(void *buf
, size_t buf_size
)
1544 debug_frame
.fde
.func_start
= (uintptr_t)buf
;
1545 debug_frame
.fde
.func_len
= buf_size
;
1547 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));
1550 void tb_set_jmp_target1(uintptr_t jmp_addr
, uintptr_t addr
)
1552 uint32_t *ptr
= (uint32_t *)jmp_addr
;
1553 uintptr_t disp
= addr
- jmp_addr
;
1555 /* We can reach the entire address space for 32-bit. For 64-bit
1556 the code_gen_buffer can't be larger than 2GB. */
1557 assert(disp
== (int32_t)disp
);
1559 *ptr
= CALL
| (uint32_t)disp
>> 2;
1560 flush_icache_range(jmp_addr
, jmp_addr
+ 4);