2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
62 /* Define some temporary registers. T2 is used for constant generation. */
63 #define TCG_REG_T1 TCG_REG_G1
64 #define TCG_REG_T2 TCG_REG_O7
66 #ifdef CONFIG_USE_GUEST_BASE
67 # define TCG_GUEST_BASE_REG TCG_REG_I5
69 # define TCG_GUEST_BASE_REG TCG_REG_G0
72 static const int tcg_target_reg_alloc_order
[] = {
102 static const int tcg_target_call_iarg_regs
[6] = {
111 static const int tcg_target_call_oarg_regs
[] = {
118 static inline int check_fit_tl(tcg_target_long val
, unsigned int bits
)
120 return (val
<< ((sizeof(tcg_target_long
) * 8 - bits
))
121 >> (sizeof(tcg_target_long
) * 8 - bits
)) == val
;
124 static inline int check_fit_i32(uint32_t val
, unsigned int bits
)
126 return ((val
<< (32 - bits
)) >> (32 - bits
)) == val
;
129 static void patch_reloc(uint8_t *code_ptr
, int type
,
130 tcg_target_long value
, tcg_target_long addend
)
135 if (value
!= (uint32_t)value
)
137 *(uint32_t *)code_ptr
= value
;
139 case R_SPARC_WDISP22
:
140 value
-= (long)code_ptr
;
142 if (!check_fit_tl(value
, 22))
144 *(uint32_t *)code_ptr
= ((*(uint32_t *)code_ptr
) & ~0x3fffff) | value
;
146 case R_SPARC_WDISP19
:
147 value
-= (long)code_ptr
;
149 if (!check_fit_tl(value
, 19))
151 *(uint32_t *)code_ptr
= ((*(uint32_t *)code_ptr
) & ~0x7ffff) | value
;
158 /* parse target specific constraints */
159 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
166 ct
->ct
|= TCG_CT_REG
;
167 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
169 case 'L': /* qemu_ld/st constraint */
170 ct
->ct
|= TCG_CT_REG
;
171 tcg_regset_set32(ct
->u
.regs
, 0, 0xffffffff);
173 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_O0
);
174 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_O1
);
175 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_O2
);
178 ct
->ct
|= TCG_CT_CONST_S11
;
181 ct
->ct
|= TCG_CT_CONST_S13
;
191 /* test if a constant matches the constraint */
192 static inline int tcg_target_const_match(tcg_target_long val
,
193 const TCGArgConstraint
*arg_ct
)
198 if (ct
& TCG_CT_CONST
)
200 else if ((ct
& TCG_CT_CONST_S11
) && check_fit_tl(val
, 11))
202 else if ((ct
& TCG_CT_CONST_S13
) && check_fit_tl(val
, 13))
208 #define INSN_OP(x) ((x) << 30)
209 #define INSN_OP2(x) ((x) << 22)
210 #define INSN_OP3(x) ((x) << 19)
211 #define INSN_OPF(x) ((x) << 5)
212 #define INSN_RD(x) ((x) << 25)
213 #define INSN_RS1(x) ((x) << 14)
214 #define INSN_RS2(x) (x)
215 #define INSN_ASI(x) ((x) << 5)
217 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
218 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
219 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
220 #define INSN_OFF22(x) (((x) >> 2) & 0x3fffff)
222 #define INSN_COND(x, a) (((x) << 25) | ((a) << 29))
239 #define BA (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2))
241 #define MOVCC_ICC (1 << 18)
242 #define MOVCC_XCC (1 << 18 | 1 << 12)
244 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
245 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
246 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
247 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
248 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
249 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
250 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
251 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
252 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
253 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
254 #define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
255 #define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
256 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
257 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
258 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
259 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
260 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
261 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
262 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
264 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
265 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
266 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
268 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
269 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
270 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
272 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
273 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
274 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
275 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
276 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
277 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
278 #define CALL INSN_OP(1)
279 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
280 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
281 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
282 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
283 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
284 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
285 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
286 #define STB (INSN_OP(3) | INSN_OP3(0x05))
287 #define STH (INSN_OP(3) | INSN_OP3(0x06))
288 #define STW (INSN_OP(3) | INSN_OP3(0x04))
289 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
290 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
291 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
292 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
293 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
294 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
295 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
296 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
297 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
298 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
299 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
300 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
302 #ifndef ASI_PRIMARY_LITTLE
303 #define ASI_PRIMARY_LITTLE 0x88
306 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
307 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
308 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
309 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
310 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
312 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
313 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
314 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
316 static inline void tcg_out_arith(TCGContext
*s
, int rd
, int rs1
, int rs2
,
319 tcg_out32(s
, op
| INSN_RD(rd
) | INSN_RS1(rs1
) |
323 static inline void tcg_out_arithi(TCGContext
*s
, int rd
, int rs1
,
324 uint32_t offset
, int op
)
326 tcg_out32(s
, op
| INSN_RD(rd
) | INSN_RS1(rs1
) |
330 static void tcg_out_arithc(TCGContext
*s
, int rd
, int rs1
,
331 int val2
, int val2const
, int op
)
333 tcg_out32(s
, op
| INSN_RD(rd
) | INSN_RS1(rs1
)
334 | (val2const
? INSN_IMM13(val2
) : INSN_RS2(val2
)));
337 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
,
338 TCGReg ret
, TCGReg arg
)
340 tcg_out_arith(s
, ret
, arg
, TCG_REG_G0
, ARITH_OR
);
343 static inline void tcg_out_sethi(TCGContext
*s
, int ret
, uint32_t arg
)
345 tcg_out32(s
, SETHI
| INSN_RD(ret
) | ((arg
& 0xfffffc00) >> 10));
348 static inline void tcg_out_movi_imm13(TCGContext
*s
, int ret
, uint32_t arg
)
350 tcg_out_arithi(s
, ret
, TCG_REG_G0
, arg
, ARITH_OR
);
353 static inline void tcg_out_movi_imm32(TCGContext
*s
, int ret
, uint32_t arg
)
355 if (check_fit_tl(arg
, 13))
356 tcg_out_movi_imm13(s
, ret
, arg
);
358 tcg_out_sethi(s
, ret
, arg
);
360 tcg_out_arithi(s
, ret
, ret
, arg
& 0x3ff, ARITH_OR
);
364 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
365 TCGReg ret
, tcg_target_long arg
)
367 /* All 32-bit constants, as well as 64-bit constants with
368 no high bits set go through movi_imm32. */
369 if (TCG_TARGET_REG_BITS
== 32
370 || type
== TCG_TYPE_I32
371 || (arg
& ~(tcg_target_long
)0xffffffff) == 0) {
372 tcg_out_movi_imm32(s
, ret
, arg
);
373 } else if (check_fit_tl(arg
, 13)) {
374 /* A 13-bit constant sign-extended to 64-bits. */
375 tcg_out_movi_imm13(s
, ret
, arg
);
376 } else if (check_fit_tl(arg
, 32)) {
377 /* A 32-bit constant sign-extended to 64-bits. */
378 tcg_out_sethi(s
, ret
, ~arg
);
379 tcg_out_arithi(s
, ret
, ret
, (arg
& 0x3ff) | -0x400, ARITH_XOR
);
381 tcg_out_movi_imm32(s
, ret
, arg
>> (TCG_TARGET_REG_BITS
/ 2));
382 tcg_out_arithi(s
, ret
, ret
, 32, SHIFT_SLLX
);
383 tcg_out_movi_imm32(s
, TCG_REG_T2
, arg
);
384 tcg_out_arith(s
, ret
, ret
, TCG_REG_T2
, ARITH_OR
);
388 static inline void tcg_out_ldst_rr(TCGContext
*s
, int data
, int a1
,
391 tcg_out32(s
, op
| INSN_RD(data
) | INSN_RS1(a1
) | INSN_RS2(a2
));
394 static inline void tcg_out_ldst(TCGContext
*s
, int ret
, int addr
,
397 if (check_fit_tl(offset
, 13)) {
398 tcg_out32(s
, op
| INSN_RD(ret
) | INSN_RS1(addr
) |
401 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_T1
, offset
);
402 tcg_out_ldst_rr(s
, ret
, addr
, TCG_REG_T1
, op
);
406 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
,
407 TCGReg arg1
, tcg_target_long arg2
)
409 tcg_out_ldst(s
, ret
, arg1
, arg2
, (type
== TCG_TYPE_I32
? LDUW
: LDX
));
412 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
413 TCGReg arg1
, tcg_target_long arg2
)
415 tcg_out_ldst(s
, arg
, arg1
, arg2
, (type
== TCG_TYPE_I32
? STW
: STX
));
418 static inline void tcg_out_ld_ptr(TCGContext
*s
, int ret
,
421 if (!check_fit_tl(arg
, 10)) {
422 tcg_out_movi(s
, TCG_TYPE_PTR
, ret
, arg
& ~0x3ff);
424 tcg_out_ld(s
, TCG_TYPE_PTR
, ret
, ret
, arg
& 0x3ff);
427 static inline void tcg_out_sety(TCGContext
*s
, int rs
)
429 tcg_out32(s
, WRY
| INSN_RS1(TCG_REG_G0
) | INSN_RS2(rs
));
432 static inline void tcg_out_rdy(TCGContext
*s
, int rd
)
434 tcg_out32(s
, RDY
| INSN_RD(rd
));
437 static inline void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
440 if (check_fit_tl(val
, 13))
441 tcg_out_arithi(s
, reg
, reg
, val
, ARITH_ADD
);
443 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_T1
, val
);
444 tcg_out_arith(s
, reg
, reg
, TCG_REG_T1
, ARITH_ADD
);
449 static inline void tcg_out_andi(TCGContext
*s
, int rd
, int rs
,
453 if (check_fit_tl(val
, 13))
454 tcg_out_arithi(s
, rd
, rs
, val
, ARITH_AND
);
456 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_T1
, val
);
457 tcg_out_arith(s
, rd
, rs
, TCG_REG_T1
, ARITH_AND
);
462 static void tcg_out_div32(TCGContext
*s
, int rd
, int rs1
,
463 int val2
, int val2const
, int uns
)
465 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
467 tcg_out_sety(s
, TCG_REG_G0
);
469 tcg_out_arithi(s
, TCG_REG_T1
, rs1
, 31, SHIFT_SRA
);
470 tcg_out_sety(s
, TCG_REG_T1
);
473 tcg_out_arithc(s
, rd
, rs1
, val2
, val2const
,
474 uns
? ARITH_UDIV
: ARITH_SDIV
);
477 static inline void tcg_out_nop(TCGContext
*s
)
479 tcg_out_sethi(s
, TCG_REG_G0
, 0);
482 static void tcg_out_branch_i32(TCGContext
*s
, int opc
, int label_index
)
484 TCGLabel
*l
= &s
->labels
[label_index
];
488 off22
= INSN_OFF22(l
->u
.value
- (unsigned long)s
->code_ptr
);
490 /* Make sure to preserve destinations during retranslation. */
491 off22
= *(uint32_t *)s
->code_ptr
& INSN_OFF22(-1);
492 tcg_out_reloc(s
, s
->code_ptr
, R_SPARC_WDISP22
, label_index
, 0);
494 tcg_out32(s
, INSN_OP(0) | INSN_COND(opc
, 0) | INSN_OP2(0x2) | off22
);
497 #if TCG_TARGET_REG_BITS == 64
498 static void tcg_out_branch_i64(TCGContext
*s
, int opc
, int label_index
)
500 TCGLabel
*l
= &s
->labels
[label_index
];
504 off19
= INSN_OFF19(l
->u
.value
- (unsigned long)s
->code_ptr
);
506 /* Make sure to preserve destinations during retranslation. */
507 off19
= *(uint32_t *)s
->code_ptr
& INSN_OFF19(-1);
508 tcg_out_reloc(s
, s
->code_ptr
, R_SPARC_WDISP19
, label_index
, 0);
510 tcg_out32(s
, (INSN_OP(0) | INSN_COND(opc
, 0) | INSN_OP2(0x1) |
511 (0x5 << 19) | off19
));
515 static const uint8_t tcg_cond_to_bcond
[10] = {
516 [TCG_COND_EQ
] = COND_E
,
517 [TCG_COND_NE
] = COND_NE
,
518 [TCG_COND_LT
] = COND_L
,
519 [TCG_COND_GE
] = COND_GE
,
520 [TCG_COND_LE
] = COND_LE
,
521 [TCG_COND_GT
] = COND_G
,
522 [TCG_COND_LTU
] = COND_CS
,
523 [TCG_COND_GEU
] = COND_CC
,
524 [TCG_COND_LEU
] = COND_LEU
,
525 [TCG_COND_GTU
] = COND_GU
,
528 static void tcg_out_cmp(TCGContext
*s
, TCGArg c1
, TCGArg c2
, int c2const
)
530 tcg_out_arithc(s
, TCG_REG_G0
, c1
, c2
, c2const
, ARITH_SUBCC
);
533 static void tcg_out_brcond_i32(TCGContext
*s
, TCGCond cond
,
534 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
537 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
);
538 tcg_out_branch_i32(s
, tcg_cond_to_bcond
[cond
], label_index
);
542 #if TCG_TARGET_REG_BITS == 64
543 static void tcg_out_brcond_i64(TCGContext
*s
, TCGCond cond
,
544 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
547 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
);
548 tcg_out_branch_i64(s
, tcg_cond_to_bcond
[cond
], label_index
);
552 static void tcg_out_brcond2_i32(TCGContext
*s
, TCGCond cond
,
553 TCGArg al
, TCGArg ah
,
554 TCGArg bl
, int blconst
,
555 TCGArg bh
, int bhconst
, int label_dest
)
557 int cc
, label_next
= gen_new_label();
559 tcg_out_cmp(s
, ah
, bh
, bhconst
);
561 /* Note that we fill one of the delay slots with the second compare. */
564 cc
= INSN_COND(tcg_cond_to_bcond
[TCG_COND_NE
], 0);
565 tcg_out_branch_i32(s
, cc
, label_next
);
566 tcg_out_cmp(s
, al
, bl
, blconst
);
567 cc
= INSN_COND(tcg_cond_to_bcond
[TCG_COND_EQ
], 0);
568 tcg_out_branch_i32(s
, cc
, label_dest
);
572 cc
= INSN_COND(tcg_cond_to_bcond
[TCG_COND_NE
], 0);
573 tcg_out_branch_i32(s
, cc
, label_dest
);
574 tcg_out_cmp(s
, al
, bl
, blconst
);
575 tcg_out_branch_i32(s
, cc
, label_dest
);
579 /* ??? One could fairly easily special-case 64-bit unsigned
580 compares against 32-bit zero-extended constants. For instance,
581 we know that (unsigned)AH < 0 is false and need not emit it.
582 Similarly, (unsigned)AH > 0 being true implies AH != 0, so the
583 second branch will never be taken. */
584 cc
= INSN_COND(tcg_cond_to_bcond
[cond
], 0);
585 tcg_out_branch_i32(s
, cc
, label_dest
);
587 cc
= INSN_COND(tcg_cond_to_bcond
[TCG_COND_NE
], 0);
588 tcg_out_branch_i32(s
, cc
, label_next
);
589 tcg_out_cmp(s
, al
, bl
, blconst
);
590 cc
= INSN_COND(tcg_cond_to_bcond
[tcg_unsigned_cond(cond
)], 0);
591 tcg_out_branch_i32(s
, cc
, label_dest
);
596 tcg_out_label(s
, label_next
, s
->code_ptr
);
600 static void tcg_out_setcond_i32(TCGContext
*s
, TCGCond cond
, TCGArg ret
,
601 TCGArg c1
, TCGArg c2
, int c2const
)
605 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
610 tcg_out_arithc(s
, ret
, c1
, c2
, c2const
, ARITH_XOR
);
612 c1
= TCG_REG_G0
, c2
= ret
, c2const
= 0;
613 cond
= (cond
== TCG_COND_EQ
? TCG_COND_LEU
: TCG_COND_LTU
);
618 if (c2const
&& c2
!= 0) {
619 tcg_out_movi_imm13(s
, TCG_REG_T1
, c2
);
622 t
= c1
, c1
= c2
, c2
= t
, c2const
= 0;
623 cond
= tcg_swap_cond(cond
);
631 tcg_out_cmp(s
, c1
, c2
, c2const
);
632 tcg_out_movi_imm13(s
, ret
, 0);
633 tcg_out32(s
, ARITH_MOVCC
| INSN_RD(ret
)
634 | INSN_RS1(tcg_cond_to_bcond
[cond
])
635 | MOVCC_ICC
| INSN_IMM11(1));
639 tcg_out_cmp(s
, c1
, c2
, c2const
);
640 if (cond
== TCG_COND_LTU
) {
641 tcg_out_arithi(s
, ret
, TCG_REG_G0
, 0, ARITH_ADDX
);
643 tcg_out_arithi(s
, ret
, TCG_REG_G0
, -1, ARITH_SUBX
);
647 #if TCG_TARGET_REG_BITS == 64
648 static void tcg_out_setcond_i64(TCGContext
*s
, TCGCond cond
, TCGArg ret
,
649 TCGArg c1
, TCGArg c2
, int c2const
)
651 tcg_out_cmp(s
, c1
, c2
, c2const
);
652 tcg_out_movi_imm13(s
, ret
, 0);
653 tcg_out32 (s
, ARITH_MOVCC
| INSN_RD(ret
)
654 | INSN_RS1(tcg_cond_to_bcond
[cond
])
655 | MOVCC_XCC
| INSN_IMM11(1));
658 static void tcg_out_setcond2_i32(TCGContext
*s
, TCGCond cond
, TCGArg ret
,
659 TCGArg al
, TCGArg ah
,
660 TCGArg bl
, int blconst
,
661 TCGArg bh
, int bhconst
)
667 tcg_out_setcond_i32(s
, TCG_COND_EQ
, TCG_REG_T1
, al
, bl
, blconst
);
668 tcg_out_setcond_i32(s
, TCG_COND_EQ
, ret
, ah
, bh
, bhconst
);
669 tcg_out_arith(s
, ret
, ret
, TCG_REG_T1
, ARITH_AND
);
673 tcg_out_setcond_i32(s
, TCG_COND_NE
, TCG_REG_T1
, al
, al
, blconst
);
674 tcg_out_setcond_i32(s
, TCG_COND_NE
, ret
, ah
, bh
, bhconst
);
675 tcg_out_arith(s
, ret
, ret
, TCG_REG_T1
, ARITH_OR
);
679 lab
= gen_new_label();
681 tcg_out_cmp(s
, ah
, bh
, bhconst
);
682 tcg_out_branch_i32(s
, INSN_COND(tcg_cond_to_bcond
[cond
], 1), lab
);
683 tcg_out_movi_imm13(s
, ret
, 1);
684 tcg_out_branch_i32(s
, INSN_COND(COND_NE
, 1), lab
);
685 tcg_out_movi_imm13(s
, ret
, 0);
687 tcg_out_setcond_i32(s
, tcg_unsigned_cond(cond
), ret
, al
, bl
, blconst
);
689 tcg_out_label(s
, lab
, s
->code_ptr
);
695 /* Generate global QEMU prologue and epilogue code */
696 static void tcg_target_qemu_prologue(TCGContext
*s
)
698 int tmp_buf_size
, frame_size
;
700 /* The TCG temp buffer is at the top of the frame, immediately
701 below the frame pointer. */
702 tmp_buf_size
= CPU_TEMP_BUF_NLONGS
* (int)sizeof(long);
703 tcg_set_frame(s
, TCG_REG_I6
, TCG_TARGET_STACK_BIAS
- tmp_buf_size
,
706 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
707 otherwise the minimal frame usable by callees. */
708 frame_size
= TCG_TARGET_CALL_STACK_OFFSET
- TCG_TARGET_STACK_BIAS
;
709 frame_size
+= TCG_STATIC_CALL_ARGS_SIZE
+ tmp_buf_size
;
710 frame_size
+= TCG_TARGET_STACK_ALIGN
- 1;
711 frame_size
&= -TCG_TARGET_STACK_ALIGN
;
712 tcg_out32(s
, SAVE
| INSN_RD(TCG_REG_O6
) | INSN_RS1(TCG_REG_O6
) |
713 INSN_IMM13(-frame_size
));
715 #ifdef CONFIG_USE_GUEST_BASE
716 if (GUEST_BASE
!= 0) {
717 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, GUEST_BASE
);
718 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
722 tcg_out32(s
, JMPL
| INSN_RD(TCG_REG_G0
) | INSN_RS1(TCG_REG_I1
) |
723 INSN_RS2(TCG_REG_G0
));
727 /* No epilogue required. We issue ret + restore directly in the TB. */
730 #if defined(CONFIG_SOFTMMU)
732 #include "../../softmmu_defs.h"
734 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
736 static const void * const qemu_ld_helpers
[4] = {
743 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
744 uintxx_t val, int mmu_idx) */
745 static const void * const qemu_st_helpers
[4] = {
752 /* Perform the TLB load and compare.
755 ADDRLO_IDX contains the index into ARGS of the low part of the
756 address; the high part of the address is at ADDR_LOW_IDX+1.
758 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
760 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
761 This should be offsetof addr_read or addr_write.
763 The result of the TLB comparison is in %[ix]cc. The sanitized address
764 is in the returned register, maybe %o0. The TLB addend is in %o1. */
766 static int tcg_out_tlb_load(TCGContext
*s
, int addrlo_idx
, int mem_index
,
767 int s_bits
, const TCGArg
*args
, int which
)
769 const int addrlo
= args
[addrlo_idx
];
770 const int r0
= TCG_REG_O0
;
771 const int r1
= TCG_REG_O1
;
772 const int r2
= TCG_REG_O2
;
776 if (TCG_TARGET_REG_BITS
== 32 && TARGET_LONG_BITS
== 64) {
777 /* Assemble the 64-bit address in R0. */
778 tcg_out_arithi(s
, r0
, addrlo
, 0, SHIFT_SRL
);
779 tcg_out_arithi(s
, r1
, args
[addrlo_idx
+ 1], 32, SHIFT_SLLX
);
780 tcg_out_arith(s
, r0
, r0
, r1
, ARITH_OR
);
783 /* Shift the page number down to tlb-entry. */
784 tcg_out_arithi(s
, r1
, addrlo
,
785 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
, SHIFT_SRL
);
787 /* Mask out the page offset, except for the required alignment. */
788 tcg_out_andi(s
, r0
, addr
, TARGET_PAGE_MASK
| ((1 << s_bits
) - 1));
790 /* Compute tlb index, modulo tlb size. */
791 tcg_out_andi(s
, r1
, r1
, (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
);
793 /* Relative to the current ENV. */
794 tcg_out_arith(s
, r1
, TCG_AREG0
, r1
, ARITH_ADD
);
796 /* Find a base address that can load both tlb comparator and addend. */
797 tlb_ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0]);
798 if (!check_fit_tl(tlb_ofs
+ sizeof(CPUTLBEntry
), 13)) {
799 tcg_out_addi(s
, r1
, tlb_ofs
);
803 /* Load the tlb comparator and the addend. */
804 tcg_out_ld(s
, TCG_TYPE_TL
, r2
, r1
, tlb_ofs
+ which
);
805 tcg_out_ld(s
, TCG_TYPE_PTR
, r1
, r1
, tlb_ofs
+offsetof(CPUTLBEntry
, addend
));
807 /* subcc arg0, arg2, %g0 */
808 tcg_out_cmp(s
, r0
, r2
, 0);
810 /* If the guest address must be zero-extended, do so now. */
811 if (TCG_TARGET_REG_BITS
== 64 && TARGET_LONG_BITS
== 32) {
812 tcg_out_arithi(s
, r0
, addrlo
, 0, SHIFT_SRL
);
817 #endif /* CONFIG_SOFTMMU */
819 static const int qemu_ld_opc
[8] = {
820 #ifdef TARGET_WORDS_BIGENDIAN
821 LDUB
, LDUH
, LDUW
, LDX
, LDSB
, LDSH
, LDSW
, LDX
823 LDUB
, LDUH_LE
, LDUW_LE
, LDX_LE
, LDSB
, LDSH_LE
, LDSW_LE
, LDX_LE
827 static const int qemu_st_opc
[4] = {
828 #ifdef TARGET_WORDS_BIGENDIAN
831 STB
, STH_LE
, STW_LE
, STX_LE
835 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, int sizeop
)
837 int addrlo_idx
= 1, datalo
, datahi
, addr_reg
;
838 #if defined(CONFIG_SOFTMMU)
839 int memi_idx
, memi
, s_bits
, n
;
840 uint32_t *label_ptr
[2];
843 datahi
= datalo
= args
[0];
844 if (TCG_TARGET_REG_BITS
== 32 && sizeop
== 3) {
849 #if defined(CONFIG_SOFTMMU)
850 memi_idx
= addrlo_idx
+ 1 + (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
);
851 memi
= args
[memi_idx
];
854 addr_reg
= tcg_out_tlb_load(s
, addrlo_idx
, memi
, s_bits
, args
,
855 offsetof(CPUTLBEntry
, addr_read
));
857 if (TCG_TARGET_REG_BITS
== 32 && sizeop
== 3) {
860 /* bne,pn %[xi]cc, label0 */
861 label_ptr
[0] = (uint32_t *)s
->code_ptr
;
862 tcg_out32(s
, (INSN_OP(0) | INSN_COND(COND_NE
, 0) | INSN_OP2(0x1)
863 | ((TARGET_LONG_BITS
== 64) << 21)));
866 /* Load all 64-bits into an O/G register. */
867 reg64
= (datalo
< 16 ? datalo
: TCG_REG_O0
);
868 tcg_out_ldst_rr(s
, reg64
, addr_reg
, TCG_REG_O1
, qemu_ld_opc
[sizeop
]);
870 /* Move the two 32-bit pieces into the destination registers. */
871 tcg_out_arithi(s
, datahi
, reg64
, 32, SHIFT_SRLX
);
872 if (reg64
!= datalo
) {
873 tcg_out_mov(s
, TCG_TYPE_I32
, datalo
, reg64
);
877 label_ptr
[1] = (uint32_t *)s
->code_ptr
;
878 tcg_out32(s
, (INSN_OP(0) | INSN_COND(COND_A
, 0) | INSN_OP2(0x1)
879 | (1 << 29) | (1 << 19)));
881 /* The fast path is exactly one insn. Thus we can perform the
882 entire TLB Hit in the (annulled) delay slot of the branch
883 over the TLB Miss case. */
885 /* beq,a,pt %[xi]cc, label0 */
887 label_ptr
[1] = (uint32_t *)s
->code_ptr
;
888 tcg_out32(s
, (INSN_OP(0) | INSN_COND(COND_E
, 0) | INSN_OP2(0x1)
889 | ((TARGET_LONG_BITS
== 64) << 21)
890 | (1 << 29) | (1 << 19)));
892 tcg_out_ldst_rr(s
, datalo
, addr_reg
, TCG_REG_O1
, qemu_ld_opc
[sizeop
]);
898 *label_ptr
[0] |= INSN_OFF19((unsigned long)s
->code_ptr
-
899 (unsigned long)label_ptr
[0]);
902 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[n
++], TCG_AREG0
);
903 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
904 tcg_out_mov(s
, TCG_TYPE_REG
, tcg_target_call_iarg_regs
[n
++],
905 args
[addrlo_idx
+ 1]);
907 tcg_out_mov(s
, TCG_TYPE_REG
, tcg_target_call_iarg_regs
[n
++],
910 /* qemu_ld_helper[s_bits](arg0, arg1) */
911 tcg_out32(s
, CALL
| ((((tcg_target_ulong
)qemu_ld_helpers
[s_bits
]
912 - (tcg_target_ulong
)s
->code_ptr
) >> 2)
915 tcg_out_movi(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[n
], memi
);
917 n
= tcg_target_call_oarg_regs
[0];
918 /* datalo = sign_extend(arg0) */
921 /* Recall that SRA sign extends from bit 31 through bit 63. */
922 tcg_out_arithi(s
, datalo
, n
, 24, SHIFT_SLL
);
923 tcg_out_arithi(s
, datalo
, datalo
, 24, SHIFT_SRA
);
926 tcg_out_arithi(s
, datalo
, n
, 16, SHIFT_SLL
);
927 tcg_out_arithi(s
, datalo
, datalo
, 16, SHIFT_SRA
);
930 tcg_out_arithi(s
, datalo
, n
, 0, SHIFT_SRA
);
933 if (TCG_TARGET_REG_BITS
== 32) {
934 tcg_out_mov(s
, TCG_TYPE_REG
, datahi
, n
);
935 tcg_out_mov(s
, TCG_TYPE_REG
, datalo
, n
+ 1);
944 tcg_out_mov(s
, TCG_TYPE_REG
, datalo
, n
);
948 *label_ptr
[1] |= INSN_OFF19((unsigned long)s
->code_ptr
-
949 (unsigned long)label_ptr
[1]);
951 addr_reg
= args
[addrlo_idx
];
952 if (TCG_TARGET_REG_BITS
== 64 && TARGET_LONG_BITS
== 32) {
953 tcg_out_arithi(s
, TCG_REG_T1
, addr_reg
, 0, SHIFT_SRL
);
954 addr_reg
= TCG_REG_T1
;
956 if (TCG_TARGET_REG_BITS
== 32 && sizeop
== 3) {
957 int reg64
= (datalo
< 16 ? datalo
: TCG_REG_O0
);
959 tcg_out_ldst_rr(s
, reg64
, addr_reg
,
960 (GUEST_BASE
? TCG_GUEST_BASE_REG
: TCG_REG_G0
),
961 qemu_ld_opc
[sizeop
]);
963 tcg_out_arithi(s
, datahi
, reg64
, 32, SHIFT_SRLX
);
964 if (reg64
!= datalo
) {
965 tcg_out_mov(s
, TCG_TYPE_I32
, datalo
, reg64
);
968 tcg_out_ldst_rr(s
, datalo
, addr_reg
,
969 (GUEST_BASE
? TCG_GUEST_BASE_REG
: TCG_REG_G0
),
970 qemu_ld_opc
[sizeop
]);
972 #endif /* CONFIG_SOFTMMU */
975 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, int sizeop
)
977 int addrlo_idx
= 1, datalo
, datahi
, addr_reg
;
978 #if defined(CONFIG_SOFTMMU)
979 int memi_idx
, memi
, n
;
983 datahi
= datalo
= args
[0];
984 if (TCG_TARGET_REG_BITS
== 32 && sizeop
== 3) {
989 #if defined(CONFIG_SOFTMMU)
990 memi_idx
= addrlo_idx
+ 1 + (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
);
991 memi
= args
[memi_idx
];
993 addr_reg
= tcg_out_tlb_load(s
, addrlo_idx
, memi
, sizeop
, args
,
994 offsetof(CPUTLBEntry
, addr_write
));
996 if (TCG_TARGET_REG_BITS
== 32 && sizeop
== 3) {
997 /* Reconstruct the full 64-bit value. */
998 tcg_out_arithi(s
, TCG_REG_T1
, datalo
, 0, SHIFT_SRL
);
999 tcg_out_arithi(s
, TCG_REG_O2
, datahi
, 32, SHIFT_SLLX
);
1000 tcg_out_arith(s
, TCG_REG_O2
, TCG_REG_T1
, TCG_REG_O2
, ARITH_OR
);
1001 datalo
= TCG_REG_O2
;
1004 /* The fast path is exactly one insn. Thus we can perform the entire
1005 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1006 /* beq,a,pt %[xi]cc, label0 */
1007 label_ptr
= (uint32_t *)s
->code_ptr
;
1008 tcg_out32(s
, (INSN_OP(0) | INSN_COND(COND_E
, 0) | INSN_OP2(0x1)
1009 | ((TARGET_LONG_BITS
== 64) << 21)
1010 | (1 << 29) | (1 << 19)));
1012 tcg_out_ldst_rr(s
, datalo
, addr_reg
, TCG_REG_O1
, qemu_st_opc
[sizeop
]);
1017 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[n
++], TCG_AREG0
);
1018 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1019 tcg_out_mov(s
, TCG_TYPE_REG
, tcg_target_call_iarg_regs
[n
++],
1020 args
[addrlo_idx
+ 1]);
1022 tcg_out_mov(s
, TCG_TYPE_REG
, tcg_target_call_iarg_regs
[n
++],
1024 if (TCG_TARGET_REG_BITS
== 32 && sizeop
== 3) {
1025 tcg_out_mov(s
, TCG_TYPE_REG
, tcg_target_call_iarg_regs
[n
++], datahi
);
1027 tcg_out_mov(s
, TCG_TYPE_REG
, tcg_target_call_iarg_regs
[n
++], datalo
);
1029 /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
1030 tcg_out32(s
, CALL
| ((((tcg_target_ulong
)qemu_st_helpers
[sizeop
]
1031 - (tcg_target_ulong
)s
->code_ptr
) >> 2)
1034 tcg_out_movi(s
, TCG_TYPE_REG
, tcg_target_call_iarg_regs
[n
], memi
);
1036 *label_ptr
|= INSN_OFF19((unsigned long)s
->code_ptr
-
1037 (unsigned long)label_ptr
);
1039 addr_reg
= args
[addrlo_idx
];
1040 if (TCG_TARGET_REG_BITS
== 64 && TARGET_LONG_BITS
== 32) {
1041 tcg_out_arithi(s
, TCG_REG_T1
, addr_reg
, 0, SHIFT_SRL
);
1042 addr_reg
= TCG_REG_T1
;
1044 if (TCG_TARGET_REG_BITS
== 32 && sizeop
== 3) {
1045 tcg_out_arithi(s
, TCG_REG_T1
, datalo
, 0, SHIFT_SRL
);
1046 tcg_out_arithi(s
, TCG_REG_O2
, datahi
, 32, SHIFT_SLLX
);
1047 tcg_out_arith(s
, TCG_REG_O2
, TCG_REG_T1
, TCG_REG_O2
, ARITH_OR
);
1048 datalo
= TCG_REG_O2
;
1050 tcg_out_ldst_rr(s
, datalo
, addr_reg
,
1051 (GUEST_BASE
? TCG_GUEST_BASE_REG
: TCG_REG_G0
),
1052 qemu_st_opc
[sizeop
]);
1053 #endif /* CONFIG_SOFTMMU */
1056 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
, const TCGArg
*args
,
1057 const int *const_args
)
1062 case INDEX_op_exit_tb
:
1063 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_I0
, args
[0]);
1064 tcg_out32(s
, JMPL
| INSN_RD(TCG_REG_G0
) | INSN_RS1(TCG_REG_I7
) |
1066 tcg_out32(s
, RESTORE
| INSN_RD(TCG_REG_G0
) | INSN_RS1(TCG_REG_G0
) |
1067 INSN_RS2(TCG_REG_G0
));
1069 case INDEX_op_goto_tb
:
1070 if (s
->tb_jmp_offset
) {
1071 /* direct jump method */
1072 uint32_t old_insn
= *(uint32_t *)s
->code_ptr
;
1073 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1074 /* Make sure to preserve links during retranslation. */
1075 tcg_out32(s
, CALL
| (old_insn
& ~INSN_OP(-1)));
1077 /* indirect jump method */
1078 tcg_out_ld_ptr(s
, TCG_REG_T1
,
1079 (tcg_target_long
)(s
->tb_next
+ args
[0]));
1080 tcg_out32(s
, JMPL
| INSN_RD(TCG_REG_G0
) | INSN_RS1(TCG_REG_T1
) |
1081 INSN_RS2(TCG_REG_G0
));
1084 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1087 if (const_args
[0]) {
1088 tcg_out32(s
, CALL
| ((((tcg_target_ulong
)args
[0]
1089 - (tcg_target_ulong
)s
->code_ptr
) >> 2)
1092 tcg_out_ld_ptr(s
, TCG_REG_T1
,
1093 (tcg_target_long
)(s
->tb_next
+ args
[0]));
1094 tcg_out32(s
, JMPL
| INSN_RD(TCG_REG_O7
) | INSN_RS1(TCG_REG_T1
) |
1095 INSN_RS2(TCG_REG_G0
));
1102 tcg_out_branch_i32(s
, COND_A
, args
[0]);
1105 case INDEX_op_movi_i32
:
1106 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], (uint32_t)args
[1]);
1109 #if TCG_TARGET_REG_BITS == 64
1110 #define OP_32_64(x) \
1111 glue(glue(case INDEX_op_, x), _i32): \
1112 glue(glue(case INDEX_op_, x), _i64)
1114 #define OP_32_64(x) \
1115 glue(glue(case INDEX_op_, x), _i32)
1118 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDUB
);
1121 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDSB
);
1124 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDUH
);
1127 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDSH
);
1129 case INDEX_op_ld_i32
:
1130 #if TCG_TARGET_REG_BITS == 64
1131 case INDEX_op_ld32u_i64
:
1133 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDUW
);
1136 tcg_out_ldst(s
, args
[0], args
[1], args
[2], STB
);
1139 tcg_out_ldst(s
, args
[0], args
[1], args
[2], STH
);
1141 case INDEX_op_st_i32
:
1142 #if TCG_TARGET_REG_BITS == 64
1143 case INDEX_op_st32_i64
:
1145 tcg_out_ldst(s
, args
[0], args
[1], args
[2], STW
);
1168 case INDEX_op_shl_i32
:
1171 /* Limit immediate shift count lest we create an illegal insn. */
1172 tcg_out_arithc(s
, args
[0], args
[1], args
[2] & 31, const_args
[2], c
);
1174 case INDEX_op_shr_i32
:
1177 case INDEX_op_sar_i32
:
1180 case INDEX_op_mul_i32
:
1191 case INDEX_op_div_i32
:
1192 tcg_out_div32(s
, args
[0], args
[1], args
[2], const_args
[2], 0);
1194 case INDEX_op_divu_i32
:
1195 tcg_out_div32(s
, args
[0], args
[1], args
[2], const_args
[2], 1);
1198 case INDEX_op_rem_i32
:
1199 case INDEX_op_remu_i32
:
1200 tcg_out_div32(s
, TCG_REG_T1
, args
[1], args
[2], const_args
[2],
1201 opc
== INDEX_op_remu_i32
);
1202 tcg_out_arithc(s
, TCG_REG_T1
, TCG_REG_T1
, args
[2], const_args
[2],
1204 tcg_out_arith(s
, args
[0], args
[1], TCG_REG_T1
, ARITH_SUB
);
1207 case INDEX_op_brcond_i32
:
1208 tcg_out_brcond_i32(s
, args
[2], args
[0], args
[1], const_args
[1],
1211 case INDEX_op_setcond_i32
:
1212 tcg_out_setcond_i32(s
, args
[3], args
[0], args
[1],
1213 args
[2], const_args
[2]);
1216 #if TCG_TARGET_REG_BITS == 32
1217 case INDEX_op_brcond2_i32
:
1218 tcg_out_brcond2_i32(s
, args
[4], args
[0], args
[1],
1219 args
[2], const_args
[2],
1220 args
[3], const_args
[3], args
[5]);
1222 case INDEX_op_setcond2_i32
:
1223 tcg_out_setcond2_i32(s
, args
[5], args
[0], args
[1], args
[2],
1224 args
[3], const_args
[3],
1225 args
[4], const_args
[4]);
1227 case INDEX_op_add2_i32
:
1228 tcg_out_arithc(s
, args
[0], args
[2], args
[4], const_args
[4],
1230 tcg_out_arithc(s
, args
[1], args
[3], args
[5], const_args
[5],
1233 case INDEX_op_sub2_i32
:
1234 tcg_out_arithc(s
, args
[0], args
[2], args
[4], const_args
[4],
1236 tcg_out_arithc(s
, args
[1], args
[3], args
[5], const_args
[5],
1239 case INDEX_op_mulu2_i32
:
1240 tcg_out_arithc(s
, args
[0], args
[2], args
[3], const_args
[3],
1242 tcg_out_rdy(s
, args
[1]);
1246 case INDEX_op_qemu_ld8u
:
1247 tcg_out_qemu_ld(s
, args
, 0);
1249 case INDEX_op_qemu_ld8s
:
1250 tcg_out_qemu_ld(s
, args
, 0 | 4);
1252 case INDEX_op_qemu_ld16u
:
1253 tcg_out_qemu_ld(s
, args
, 1);
1255 case INDEX_op_qemu_ld16s
:
1256 tcg_out_qemu_ld(s
, args
, 1 | 4);
1258 case INDEX_op_qemu_ld32
:
1259 #if TCG_TARGET_REG_BITS == 64
1260 case INDEX_op_qemu_ld32u
:
1262 tcg_out_qemu_ld(s
, args
, 2);
1264 #if TCG_TARGET_REG_BITS == 64
1265 case INDEX_op_qemu_ld32s
:
1266 tcg_out_qemu_ld(s
, args
, 2 | 4);
1269 case INDEX_op_qemu_ld64
:
1270 tcg_out_qemu_ld(s
, args
, 3);
1272 case INDEX_op_qemu_st8
:
1273 tcg_out_qemu_st(s
, args
, 0);
1275 case INDEX_op_qemu_st16
:
1276 tcg_out_qemu_st(s
, args
, 1);
1278 case INDEX_op_qemu_st32
:
1279 tcg_out_qemu_st(s
, args
, 2);
1281 case INDEX_op_qemu_st64
:
1282 tcg_out_qemu_st(s
, args
, 3);
1285 #if TCG_TARGET_REG_BITS == 64
1286 case INDEX_op_movi_i64
:
1287 tcg_out_movi(s
, TCG_TYPE_I64
, args
[0], args
[1]);
1289 case INDEX_op_ld32s_i64
:
1290 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDSW
);
1292 case INDEX_op_ld_i64
:
1293 tcg_out_ldst(s
, args
[0], args
[1], args
[2], LDX
);
1295 case INDEX_op_st_i64
:
1296 tcg_out_ldst(s
, args
[0], args
[1], args
[2], STX
);
1298 case INDEX_op_shl_i64
:
1301 /* Limit immediate shift count lest we create an illegal insn. */
1302 tcg_out_arithc(s
, args
[0], args
[1], args
[2] & 63, const_args
[2], c
);
1304 case INDEX_op_shr_i64
:
1307 case INDEX_op_sar_i64
:
1310 case INDEX_op_mul_i64
:
1313 case INDEX_op_div_i64
:
1316 case INDEX_op_divu_i64
:
1319 case INDEX_op_rem_i64
:
1320 case INDEX_op_remu_i64
:
1321 tcg_out_arithc(s
, TCG_REG_T1
, args
[1], args
[2], const_args
[2],
1322 opc
== INDEX_op_rem_i64
? ARITH_SDIVX
: ARITH_UDIVX
);
1323 tcg_out_arithc(s
, TCG_REG_T1
, TCG_REG_T1
, args
[2], const_args
[2],
1325 tcg_out_arith(s
, args
[0], args
[1], TCG_REG_T1
, ARITH_SUB
);
1327 case INDEX_op_ext32s_i64
:
1328 if (const_args
[1]) {
1329 tcg_out_movi(s
, TCG_TYPE_I64
, args
[0], (int32_t)args
[1]);
1331 tcg_out_arithi(s
, args
[0], args
[1], 0, SHIFT_SRA
);
1334 case INDEX_op_ext32u_i64
:
1335 if (const_args
[1]) {
1336 tcg_out_movi_imm32(s
, args
[0], args
[1]);
1338 tcg_out_arithi(s
, args
[0], args
[1], 0, SHIFT_SRL
);
1342 case INDEX_op_brcond_i64
:
1343 tcg_out_brcond_i64(s
, args
[2], args
[0], args
[1], const_args
[1],
1346 case INDEX_op_setcond_i64
:
1347 tcg_out_setcond_i64(s
, args
[3], args
[0], args
[1],
1348 args
[2], const_args
[2]);
1353 tcg_out_arithc(s
, args
[0], args
[1], args
[2], const_args
[2], c
);
1357 tcg_out_arithc(s
, args
[0], TCG_REG_G0
, args
[1], const_args
[1], c
);
1361 fprintf(stderr
, "unknown opcode 0x%x\n", opc
);
1366 static const TCGTargetOpDef sparc_op_defs
[] = {
1367 { INDEX_op_exit_tb
, { } },
1368 { INDEX_op_goto_tb
, { } },
1369 { INDEX_op_call
, { "ri" } },
1370 { INDEX_op_jmp
, { "ri" } },
1371 { INDEX_op_br
, { } },
1373 { INDEX_op_mov_i32
, { "r", "r" } },
1374 { INDEX_op_movi_i32
, { "r" } },
1375 { INDEX_op_ld8u_i32
, { "r", "r" } },
1376 { INDEX_op_ld8s_i32
, { "r", "r" } },
1377 { INDEX_op_ld16u_i32
, { "r", "r" } },
1378 { INDEX_op_ld16s_i32
, { "r", "r" } },
1379 { INDEX_op_ld_i32
, { "r", "r" } },
1380 { INDEX_op_st8_i32
, { "r", "r" } },
1381 { INDEX_op_st16_i32
, { "r", "r" } },
1382 { INDEX_op_st_i32
, { "r", "r" } },
1384 { INDEX_op_add_i32
, { "r", "r", "rJ" } },
1385 { INDEX_op_mul_i32
, { "r", "r", "rJ" } },
1386 { INDEX_op_div_i32
, { "r", "r", "rJ" } },
1387 { INDEX_op_divu_i32
, { "r", "r", "rJ" } },
1388 { INDEX_op_rem_i32
, { "r", "r", "rJ" } },
1389 { INDEX_op_remu_i32
, { "r", "r", "rJ" } },
1390 { INDEX_op_sub_i32
, { "r", "r", "rJ" } },
1391 { INDEX_op_and_i32
, { "r", "r", "rJ" } },
1392 { INDEX_op_andc_i32
, { "r", "r", "rJ" } },
1393 { INDEX_op_or_i32
, { "r", "r", "rJ" } },
1394 { INDEX_op_orc_i32
, { "r", "r", "rJ" } },
1395 { INDEX_op_xor_i32
, { "r", "r", "rJ" } },
1397 { INDEX_op_shl_i32
, { "r", "r", "rJ" } },
1398 { INDEX_op_shr_i32
, { "r", "r", "rJ" } },
1399 { INDEX_op_sar_i32
, { "r", "r", "rJ" } },
1401 { INDEX_op_neg_i32
, { "r", "rJ" } },
1402 { INDEX_op_not_i32
, { "r", "rJ" } },
1404 { INDEX_op_brcond_i32
, { "r", "rJ" } },
1405 { INDEX_op_setcond_i32
, { "r", "r", "rJ" } },
1407 #if TCG_TARGET_REG_BITS == 32
1408 { INDEX_op_brcond2_i32
, { "r", "r", "rJ", "rJ" } },
1409 { INDEX_op_setcond2_i32
, { "r", "r", "r", "rJ", "rJ" } },
1410 { INDEX_op_add2_i32
, { "r", "r", "r", "r", "rJ", "rJ" } },
1411 { INDEX_op_sub2_i32
, { "r", "r", "r", "r", "rJ", "rJ" } },
1412 { INDEX_op_mulu2_i32
, { "r", "r", "r", "rJ" } },
1415 #if TCG_TARGET_REG_BITS == 64
1416 { INDEX_op_mov_i64
, { "r", "r" } },
1417 { INDEX_op_movi_i64
, { "r" } },
1418 { INDEX_op_ld8u_i64
, { "r", "r" } },
1419 { INDEX_op_ld8s_i64
, { "r", "r" } },
1420 { INDEX_op_ld16u_i64
, { "r", "r" } },
1421 { INDEX_op_ld16s_i64
, { "r", "r" } },
1422 { INDEX_op_ld32u_i64
, { "r", "r" } },
1423 { INDEX_op_ld32s_i64
, { "r", "r" } },
1424 { INDEX_op_ld_i64
, { "r", "r" } },
1425 { INDEX_op_st8_i64
, { "r", "r" } },
1426 { INDEX_op_st16_i64
, { "r", "r" } },
1427 { INDEX_op_st32_i64
, { "r", "r" } },
1428 { INDEX_op_st_i64
, { "r", "r" } },
1430 { INDEX_op_add_i64
, { "r", "r", "rJ" } },
1431 { INDEX_op_mul_i64
, { "r", "r", "rJ" } },
1432 { INDEX_op_div_i64
, { "r", "r", "rJ" } },
1433 { INDEX_op_divu_i64
, { "r", "r", "rJ" } },
1434 { INDEX_op_rem_i64
, { "r", "r", "rJ" } },
1435 { INDEX_op_remu_i64
, { "r", "r", "rJ" } },
1436 { INDEX_op_sub_i64
, { "r", "r", "rJ" } },
1437 { INDEX_op_and_i64
, { "r", "r", "rJ" } },
1438 { INDEX_op_andc_i64
, { "r", "r", "rJ" } },
1439 { INDEX_op_or_i64
, { "r", "r", "rJ" } },
1440 { INDEX_op_orc_i64
, { "r", "r", "rJ" } },
1441 { INDEX_op_xor_i64
, { "r", "r", "rJ" } },
1443 { INDEX_op_shl_i64
, { "r", "r", "rJ" } },
1444 { INDEX_op_shr_i64
, { "r", "r", "rJ" } },
1445 { INDEX_op_sar_i64
, { "r", "r", "rJ" } },
1447 { INDEX_op_neg_i64
, { "r", "rJ" } },
1448 { INDEX_op_not_i64
, { "r", "rJ" } },
1450 { INDEX_op_ext32s_i64
, { "r", "ri" } },
1451 { INDEX_op_ext32u_i64
, { "r", "ri" } },
1453 { INDEX_op_brcond_i64
, { "r", "rJ" } },
1454 { INDEX_op_setcond_i64
, { "r", "r", "rJ" } },
1457 #if TCG_TARGET_REG_BITS == 64
1458 { INDEX_op_qemu_ld8u
, { "r", "L" } },
1459 { INDEX_op_qemu_ld8s
, { "r", "L" } },
1460 { INDEX_op_qemu_ld16u
, { "r", "L" } },
1461 { INDEX_op_qemu_ld16s
, { "r", "L" } },
1462 { INDEX_op_qemu_ld32
, { "r", "L" } },
1463 { INDEX_op_qemu_ld32u
, { "r", "L" } },
1464 { INDEX_op_qemu_ld32s
, { "r", "L" } },
1465 { INDEX_op_qemu_ld64
, { "r", "L" } },
1467 { INDEX_op_qemu_st8
, { "L", "L" } },
1468 { INDEX_op_qemu_st16
, { "L", "L" } },
1469 { INDEX_op_qemu_st32
, { "L", "L" } },
1470 { INDEX_op_qemu_st64
, { "L", "L" } },
1471 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1472 { INDEX_op_qemu_ld8u
, { "r", "L" } },
1473 { INDEX_op_qemu_ld8s
, { "r", "L" } },
1474 { INDEX_op_qemu_ld16u
, { "r", "L" } },
1475 { INDEX_op_qemu_ld16s
, { "r", "L" } },
1476 { INDEX_op_qemu_ld32
, { "r", "L" } },
1477 { INDEX_op_qemu_ld64
, { "r", "r", "L" } },
1479 { INDEX_op_qemu_st8
, { "L", "L" } },
1480 { INDEX_op_qemu_st16
, { "L", "L" } },
1481 { INDEX_op_qemu_st32
, { "L", "L" } },
1482 { INDEX_op_qemu_st64
, { "L", "L", "L" } },
1484 { INDEX_op_qemu_ld8u
, { "r", "L", "L" } },
1485 { INDEX_op_qemu_ld8s
, { "r", "L", "L" } },
1486 { INDEX_op_qemu_ld16u
, { "r", "L", "L" } },
1487 { INDEX_op_qemu_ld16s
, { "r", "L", "L" } },
1488 { INDEX_op_qemu_ld32
, { "r", "L", "L" } },
1489 { INDEX_op_qemu_ld64
, { "L", "L", "L", "L" } },
1491 { INDEX_op_qemu_st8
, { "L", "L", "L" } },
1492 { INDEX_op_qemu_st16
, { "L", "L", "L" } },
1493 { INDEX_op_qemu_st32
, { "L", "L", "L" } },
1494 { INDEX_op_qemu_st64
, { "L", "L", "L", "L" } },
1500 static void tcg_target_init(TCGContext
*s
)
1502 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffffffff);
1503 #if TCG_TARGET_REG_BITS == 64
1504 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I64
], 0, 0xffffffff);
1506 tcg_regset_set32(tcg_target_call_clobber_regs
, 0,
1522 tcg_regset_clear(s
->reserved_regs
);
1523 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_G0
); /* zero */
1524 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_G6
); /* reserved for os */
1525 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_G7
); /* thread pointer */
1526 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_I6
); /* frame pointer */
1527 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_I7
); /* return address */
1528 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_O6
); /* stack pointer */
1529 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_T1
); /* for internal use */
1530 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_T2
); /* for internal use */
1532 tcg_add_target_add_op_defs(sparc_op_defs
);
1535 #if TCG_TARGET_REG_BITS == 64
1536 # define ELF_HOST_MACHINE EM_SPARCV9
1538 # define ELF_HOST_MACHINE EM_SPARC32PLUS
1539 # define ELF_HOST_FLAGS EF_SPARC_32PLUS
1543 uint32_t len
__attribute__((aligned((sizeof(void *)))));
1546 char augmentation
[1];
1549 uint8_t return_column
;
1553 uint32_t len
__attribute__((aligned((sizeof(void *)))));
1554 uint32_t cie_offset
;
1555 tcg_target_long func_start
__attribute__((packed
));
1556 tcg_target_long func_len
__attribute__((packed
));
1557 uint8_t def_cfa
[TCG_TARGET_REG_BITS
== 64 ? 4 : 2];
1559 uint8_t ret_save
[3];
1567 static DebugFrame debug_frame
= {
1568 .cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
1571 .cie
.code_align
= 1,
1572 .cie
.data_align
= -sizeof(void *) & 0x7f,
1573 .cie
.return_column
= 15, /* o7 */
1575 .fde
.len
= sizeof(DebugFrameFDE
)-4, /* length after .len member */
1577 #if TCG_TARGET_REG_BITS == 64
1578 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1579 (2047 & 0x7f) | 0x80, (2047 >> 7)
1581 13, 30 /* DW_CFA_def_cfa_register i6 */
1584 .fde
.win_save
= 0x2d, /* DW_CFA_GNU_window_save */
1585 .fde
.ret_save
= { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1588 void tcg_register_jit(void *buf
, size_t buf_size
)
1590 debug_frame
.fde
.func_start
= (tcg_target_long
) buf
;
1591 debug_frame
.fde
.func_len
= buf_size
;
1593 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));
1596 void tb_set_jmp_target1(uintptr_t jmp_addr
, uintptr_t addr
)
1598 uint32_t *ptr
= (uint32_t *)jmp_addr
;
1599 tcg_target_long disp
= (tcg_target_long
)(addr
- jmp_addr
) >> 2;
1601 /* We can reach the entire address space for 32-bit. For 64-bit
1602 the code_gen_buffer can't be larger than 2GB. */
1603 if (TCG_TARGET_REG_BITS
== 64 && !check_fit_tl(disp
, 30)) {
1607 *ptr
= CALL
| (disp
& 0x3fffffff);
1608 flush_icache_range(jmp_addr
, jmp_addr
+ 4);