2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
5 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
6 * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "tcg-be-ldst.h"
29 #ifdef HOST_WORDS_BIGENDIAN
35 #define LO_OFF (MIPS_BE * 4)
36 #define HI_OFF (4 - LO_OFF)
39 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
75 #define TCG_TMP0 TCG_REG_AT
76 #define TCG_TMP1 TCG_REG_T9
78 /* check if we really need so many registers :P */
79 static const TCGReg tcg_target_reg_alloc_order
[] = {
80 /* Call saved registers. */
91 /* Call clobbered registers. */
105 /* Argument registers, opposite order of allocation. */
112 static const TCGReg tcg_target_call_iarg_regs
[4] = {
119 static const TCGReg tcg_target_call_oarg_regs
[2] = {
124 static tcg_insn_unit
*tb_ret_addr
;
126 static inline uint32_t reloc_pc16_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
128 /* Let the compiler perform the right-shift as part of the arithmetic. */
129 ptrdiff_t disp
= target
- (pc
+ 1);
130 assert(disp
== (int16_t)disp
);
131 return disp
& 0xffff;
134 static inline void reloc_pc16(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
136 *pc
= deposit32(*pc
, 0, 16, reloc_pc16_val(pc
, target
));
139 static inline uint32_t reloc_26_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
141 assert((((uintptr_t)pc
^ (uintptr_t)target
) & 0xf0000000) == 0);
142 return ((uintptr_t)target
>> 2) & 0x3ffffff;
145 static inline void reloc_26(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
147 *pc
= deposit32(*pc
, 0, 26, reloc_26_val(pc
, target
));
150 static void patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
151 intptr_t value
, intptr_t addend
)
153 assert(type
== R_MIPS_PC16
);
155 reloc_pc16(code_ptr
, (tcg_insn_unit
*)value
);
158 #define TCG_CT_CONST_ZERO 0x100
159 #define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */
160 #define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */
161 #define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */
162 #define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */
164 static inline bool is_p2m1(tcg_target_long val
)
166 return val
&& ((val
+ 1) & val
) == 0;
169 /* parse target specific constraints */
170 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
177 ct
->ct
|= TCG_CT_REG
;
178 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
180 case 'L': /* qemu_ld output arg constraint */
181 ct
->ct
|= TCG_CT_REG
;
182 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
183 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_V0
);
185 case 'l': /* qemu_ld input arg constraint */
186 ct
->ct
|= TCG_CT_REG
;
187 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
188 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A0
);
189 #if defined(CONFIG_SOFTMMU)
190 if (TARGET_LONG_BITS
== 64) {
191 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A2
);
195 case 'S': /* qemu_st constraint */
196 ct
->ct
|= TCG_CT_REG
;
197 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
198 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A0
);
199 #if defined(CONFIG_SOFTMMU)
200 if (TARGET_LONG_BITS
== 32) {
201 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A1
);
203 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A2
);
204 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A3
);
209 ct
->ct
|= TCG_CT_CONST_U16
;
212 ct
->ct
|= TCG_CT_CONST_S16
;
215 ct
->ct
|= TCG_CT_CONST_P2M1
;
218 ct
->ct
|= TCG_CT_CONST_N16
;
221 /* We are cheating a bit here, using the fact that the register
222 ZERO is also the register number 0. Hence there is no need
223 to check for const_args in each instruction. */
224 ct
->ct
|= TCG_CT_CONST_ZERO
;
234 /* test if a constant matches the constraint */
235 static inline int tcg_target_const_match(tcg_target_long val
, TCGType type
,
236 const TCGArgConstraint
*arg_ct
)
240 if (ct
& TCG_CT_CONST
) {
242 } else if ((ct
& TCG_CT_CONST_ZERO
) && val
== 0) {
244 } else if ((ct
& TCG_CT_CONST_U16
) && val
== (uint16_t)val
) {
246 } else if ((ct
& TCG_CT_CONST_S16
) && val
== (int16_t)val
) {
248 } else if ((ct
& TCG_CT_CONST_N16
) && val
>= -32767 && val
<= 32767) {
250 } else if ((ct
& TCG_CT_CONST_P2M1
)
251 && use_mips32r2_instructions
&& is_p2m1(val
)) {
257 /* instruction opcodes */
260 OPC_JAL
= 0x03 << 26,
261 OPC_BEQ
= 0x04 << 26,
262 OPC_BNE
= 0x05 << 26,
263 OPC_BLEZ
= 0x06 << 26,
264 OPC_BGTZ
= 0x07 << 26,
265 OPC_ADDIU
= 0x09 << 26,
266 OPC_SLTI
= 0x0A << 26,
267 OPC_SLTIU
= 0x0B << 26,
268 OPC_ANDI
= 0x0C << 26,
269 OPC_ORI
= 0x0D << 26,
270 OPC_XORI
= 0x0E << 26,
271 OPC_LUI
= 0x0F << 26,
275 OPC_LBU
= 0x24 << 26,
276 OPC_LHU
= 0x25 << 26,
277 OPC_LWU
= 0x27 << 26,
282 OPC_SPECIAL
= 0x00 << 26,
283 OPC_SLL
= OPC_SPECIAL
| 0x00,
284 OPC_SRL
= OPC_SPECIAL
| 0x02,
285 OPC_ROTR
= OPC_SPECIAL
| (0x01 << 21) | 0x02,
286 OPC_SRA
= OPC_SPECIAL
| 0x03,
287 OPC_SLLV
= OPC_SPECIAL
| 0x04,
288 OPC_SRLV
= OPC_SPECIAL
| 0x06,
289 OPC_ROTRV
= OPC_SPECIAL
| (0x01 << 6) | 0x06,
290 OPC_SRAV
= OPC_SPECIAL
| 0x07,
291 OPC_JR
= OPC_SPECIAL
| 0x08,
292 OPC_JALR
= OPC_SPECIAL
| 0x09,
293 OPC_MOVZ
= OPC_SPECIAL
| 0x0A,
294 OPC_MOVN
= OPC_SPECIAL
| 0x0B,
295 OPC_MFHI
= OPC_SPECIAL
| 0x10,
296 OPC_MFLO
= OPC_SPECIAL
| 0x12,
297 OPC_MULT
= OPC_SPECIAL
| 0x18,
298 OPC_MULTU
= OPC_SPECIAL
| 0x19,
299 OPC_DIV
= OPC_SPECIAL
| 0x1A,
300 OPC_DIVU
= OPC_SPECIAL
| 0x1B,
301 OPC_ADDU
= OPC_SPECIAL
| 0x21,
302 OPC_SUBU
= OPC_SPECIAL
| 0x23,
303 OPC_AND
= OPC_SPECIAL
| 0x24,
304 OPC_OR
= OPC_SPECIAL
| 0x25,
305 OPC_XOR
= OPC_SPECIAL
| 0x26,
306 OPC_NOR
= OPC_SPECIAL
| 0x27,
307 OPC_SLT
= OPC_SPECIAL
| 0x2A,
308 OPC_SLTU
= OPC_SPECIAL
| 0x2B,
310 OPC_REGIMM
= 0x01 << 26,
311 OPC_BLTZ
= OPC_REGIMM
| (0x00 << 16),
312 OPC_BGEZ
= OPC_REGIMM
| (0x01 << 16),
314 OPC_SPECIAL2
= 0x1c << 26,
315 OPC_MUL
= OPC_SPECIAL2
| 0x002,
317 OPC_SPECIAL3
= 0x1f << 26,
318 OPC_EXT
= OPC_SPECIAL3
| 0x000,
319 OPC_INS
= OPC_SPECIAL3
| 0x004,
320 OPC_WSBH
= OPC_SPECIAL3
| 0x0a0,
321 OPC_SEB
= OPC_SPECIAL3
| 0x420,
322 OPC_SEH
= OPC_SPECIAL3
| 0x620,
328 static inline void tcg_out_opc_reg(TCGContext
*s
, MIPSInsn opc
,
329 TCGReg rd
, TCGReg rs
, TCGReg rt
)
334 inst
|= (rs
& 0x1F) << 21;
335 inst
|= (rt
& 0x1F) << 16;
336 inst
|= (rd
& 0x1F) << 11;
343 static inline void tcg_out_opc_imm(TCGContext
*s
, MIPSInsn opc
,
344 TCGReg rt
, TCGReg rs
, TCGArg imm
)
349 inst
|= (rs
& 0x1F) << 21;
350 inst
|= (rt
& 0x1F) << 16;
351 inst
|= (imm
& 0xffff);
358 static inline void tcg_out_opc_bf(TCGContext
*s
, MIPSInsn opc
, TCGReg rt
,
359 TCGReg rs
, int msb
, int lsb
)
364 inst
|= (rs
& 0x1F) << 21;
365 inst
|= (rt
& 0x1F) << 16;
366 inst
|= (msb
& 0x1F) << 11;
367 inst
|= (lsb
& 0x1F) << 6;
374 static inline void tcg_out_opc_br(TCGContext
*s
, MIPSInsn opc
,
375 TCGReg rt
, TCGReg rs
)
377 /* We pay attention here to not modify the branch target by reading
378 the existing value and using it again. This ensure that caches and
379 memory are kept coherent during retranslation. */
380 uint16_t offset
= (uint16_t)*s
->code_ptr
;
382 tcg_out_opc_imm(s
, opc
, rt
, rs
, offset
);
388 static inline void tcg_out_opc_sa(TCGContext
*s
, MIPSInsn opc
,
389 TCGReg rd
, TCGReg rt
, TCGArg sa
)
394 inst
|= (rt
& 0x1F) << 16;
395 inst
|= (rd
& 0x1F) << 11;
396 inst
|= (sa
& 0x1F) << 6;
403 * Returns true if the branch was in range and the insn was emitted.
405 static bool tcg_out_opc_jmp(TCGContext
*s
, MIPSInsn opc
, void *target
)
407 uintptr_t dest
= (uintptr_t)target
;
408 uintptr_t from
= (uintptr_t)s
->code_ptr
+ 4;
411 /* The pc-region branch happens within the 256MB region of
412 the delay slot (thus the +4). */
413 if ((from
^ dest
) & -(1 << 28)) {
416 assert((dest
& 3) == 0);
419 inst
|= (dest
>> 2) & 0x3ffffff;
424 static inline void tcg_out_nop(TCGContext
*s
)
429 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
,
430 TCGReg ret
, TCGReg arg
)
432 /* Simple reg-reg move, optimising out the 'do nothing' case */
434 tcg_out_opc_reg(s
, OPC_ADDU
, ret
, arg
, TCG_REG_ZERO
);
438 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
439 TCGReg reg
, tcg_target_long arg
)
441 if (arg
== (int16_t)arg
) {
442 tcg_out_opc_imm(s
, OPC_ADDIU
, reg
, TCG_REG_ZERO
, arg
);
443 } else if (arg
== (uint16_t)arg
) {
444 tcg_out_opc_imm(s
, OPC_ORI
, reg
, TCG_REG_ZERO
, arg
);
446 tcg_out_opc_imm(s
, OPC_LUI
, reg
, TCG_REG_ZERO
, arg
>> 16);
448 tcg_out_opc_imm(s
, OPC_ORI
, reg
, reg
, arg
& 0xffff);
453 static inline void tcg_out_bswap16(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
455 if (use_mips32r2_instructions
) {
456 tcg_out_opc_reg(s
, OPC_WSBH
, ret
, 0, arg
);
458 /* ret and arg can't be register at */
459 if (ret
== TCG_TMP0
|| arg
== TCG_TMP0
) {
463 tcg_out_opc_sa(s
, OPC_SRL
, TCG_TMP0
, arg
, 8);
464 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 8);
465 tcg_out_opc_imm(s
, OPC_ANDI
, ret
, ret
, 0xff00);
466 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_TMP0
);
470 static inline void tcg_out_bswap16s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
472 if (use_mips32r2_instructions
) {
473 tcg_out_opc_reg(s
, OPC_WSBH
, ret
, 0, arg
);
474 tcg_out_opc_reg(s
, OPC_SEH
, ret
, 0, ret
);
476 /* ret and arg can't be register at */
477 if (ret
== TCG_TMP0
|| arg
== TCG_TMP0
) {
481 tcg_out_opc_sa(s
, OPC_SRL
, TCG_TMP0
, arg
, 8);
482 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 24);
483 tcg_out_opc_sa(s
, OPC_SRA
, ret
, ret
, 16);
484 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_TMP0
);
488 static inline void tcg_out_bswap32(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
490 if (use_mips32r2_instructions
) {
491 tcg_out_opc_reg(s
, OPC_WSBH
, ret
, 0, arg
);
492 tcg_out_opc_sa(s
, OPC_ROTR
, ret
, ret
, 16);
494 /* ret and arg must be different and can't be register at */
495 if (ret
== arg
|| ret
== TCG_TMP0
|| arg
== TCG_TMP0
) {
499 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 24);
501 tcg_out_opc_sa(s
, OPC_SRL
, TCG_TMP0
, arg
, 24);
502 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_TMP0
);
504 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP0
, arg
, 0xff00);
505 tcg_out_opc_sa(s
, OPC_SLL
, TCG_TMP0
, TCG_TMP0
, 8);
506 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_TMP0
);
508 tcg_out_opc_sa(s
, OPC_SRL
, TCG_TMP0
, arg
, 8);
509 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP0
, TCG_TMP0
, 0xff00);
510 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_TMP0
);
514 static inline void tcg_out_ext8s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
516 if (use_mips32r2_instructions
) {
517 tcg_out_opc_reg(s
, OPC_SEB
, ret
, 0, arg
);
519 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 24);
520 tcg_out_opc_sa(s
, OPC_SRA
, ret
, ret
, 24);
524 static inline void tcg_out_ext16s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
526 if (use_mips32r2_instructions
) {
527 tcg_out_opc_reg(s
, OPC_SEH
, ret
, 0, arg
);
529 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 16);
530 tcg_out_opc_sa(s
, OPC_SRA
, ret
, ret
, 16);
534 static void tcg_out_ldst(TCGContext
*s
, MIPSInsn opc
, TCGReg data
,
535 TCGReg addr
, intptr_t ofs
)
539 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, ofs
- lo
);
540 if (addr
!= TCG_REG_ZERO
) {
541 tcg_out_opc_reg(s
, OPC_ADDU
, TCG_TMP0
, TCG_TMP0
, addr
);
545 tcg_out_opc_imm(s
, opc
, data
, addr
, lo
);
548 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg arg
,
549 TCGReg arg1
, intptr_t arg2
)
551 tcg_out_ldst(s
, OPC_LW
, arg
, arg1
, arg2
);
554 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
555 TCGReg arg1
, intptr_t arg2
)
557 tcg_out_ldst(s
, OPC_SW
, arg
, arg1
, arg2
);
560 static inline void tcg_out_addi(TCGContext
*s
, TCGReg reg
, TCGArg val
)
562 if (val
== (int16_t)val
) {
563 tcg_out_opc_imm(s
, OPC_ADDIU
, reg
, reg
, val
);
565 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, val
);
566 tcg_out_opc_reg(s
, OPC_ADDU
, reg
, reg
, TCG_TMP0
);
570 /* Bit 0 set if inversion required; bit 1 set if swapping required. */
571 #define MIPS_CMP_INV 1
572 #define MIPS_CMP_SWAP 2
574 static const uint8_t mips_cmp_map
[16] = {
577 [TCG_COND_GE
] = MIPS_CMP_INV
,
578 [TCG_COND_GEU
] = MIPS_CMP_INV
,
579 [TCG_COND_LE
] = MIPS_CMP_INV
| MIPS_CMP_SWAP
,
580 [TCG_COND_LEU
] = MIPS_CMP_INV
| MIPS_CMP_SWAP
,
581 [TCG_COND_GT
] = MIPS_CMP_SWAP
,
582 [TCG_COND_GTU
] = MIPS_CMP_SWAP
,
585 static void tcg_out_setcond(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
586 TCGReg arg1
, TCGReg arg2
)
588 MIPSInsn s_opc
= OPC_SLTU
;
594 tcg_out_opc_reg(s
, OPC_XOR
, ret
, arg1
, arg2
);
597 tcg_out_opc_imm(s
, OPC_SLTIU
, ret
, arg1
, 1);
602 tcg_out_opc_reg(s
, OPC_XOR
, ret
, arg1
, arg2
);
605 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, TCG_REG_ZERO
, arg1
);
619 cmp_map
= mips_cmp_map
[cond
];
620 if (cmp_map
& MIPS_CMP_SWAP
) {
625 tcg_out_opc_reg(s
, s_opc
, ret
, arg1
, arg2
);
626 if (cmp_map
& MIPS_CMP_INV
) {
627 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
637 static void tcg_out_brcond(TCGContext
*s
, TCGCond cond
, TCGReg arg1
,
638 TCGReg arg2
, TCGLabel
*l
)
640 static const MIPSInsn b_zero
[16] = {
641 [TCG_COND_LT
] = OPC_BLTZ
,
642 [TCG_COND_GT
] = OPC_BGTZ
,
643 [TCG_COND_LE
] = OPC_BLEZ
,
644 [TCG_COND_GE
] = OPC_BGEZ
,
647 MIPSInsn s_opc
= OPC_SLTU
;
664 b_opc
= b_zero
[cond
];
676 cmp_map
= mips_cmp_map
[cond
];
677 if (cmp_map
& MIPS_CMP_SWAP
) {
682 tcg_out_opc_reg(s
, s_opc
, TCG_TMP0
, arg1
, arg2
);
683 b_opc
= (cmp_map
& MIPS_CMP_INV
? OPC_BEQ
: OPC_BNE
);
693 tcg_out_opc_br(s
, b_opc
, arg1
, arg2
);
695 reloc_pc16(s
->code_ptr
- 1, l
->u
.value_ptr
);
697 tcg_out_reloc(s
, s
->code_ptr
- 1, R_MIPS_PC16
, l
, 0);
702 static TCGReg
tcg_out_reduce_eq2(TCGContext
*s
, TCGReg tmp0
, TCGReg tmp1
,
703 TCGReg al
, TCGReg ah
,
704 TCGReg bl
, TCGReg bh
)
706 /* Merge highpart comparison into AH. */
709 tcg_out_opc_reg(s
, OPC_XOR
, tmp0
, ah
, bh
);
715 /* Merge lowpart comparison into AL. */
718 tcg_out_opc_reg(s
, OPC_XOR
, tmp1
, al
, bl
);
724 /* Merge high and low part comparisons into AL. */
727 tcg_out_opc_reg(s
, OPC_OR
, tmp0
, ah
, al
);
736 static void tcg_out_setcond2(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
737 TCGReg al
, TCGReg ah
, TCGReg bl
, TCGReg bh
)
739 TCGReg tmp0
= TCG_TMP0
;
742 assert(ret
!= TCG_TMP0
);
743 if (ret
== ah
|| ret
== bh
) {
744 assert(ret
!= TCG_TMP1
);
751 tmp1
= tcg_out_reduce_eq2(s
, tmp0
, tmp1
, al
, ah
, bl
, bh
);
752 tcg_out_setcond(s
, cond
, ret
, tmp1
, TCG_REG_ZERO
);
756 tcg_out_setcond(s
, TCG_COND_EQ
, tmp0
, ah
, bh
);
757 tcg_out_setcond(s
, tcg_unsigned_cond(cond
), tmp1
, al
, bl
);
758 tcg_out_opc_reg(s
, OPC_AND
, tmp1
, tmp1
, tmp0
);
759 tcg_out_setcond(s
, tcg_high_cond(cond
), tmp0
, ah
, bh
);
760 tcg_out_opc_reg(s
, OPC_OR
, ret
, tmp1
, tmp0
);
765 static void tcg_out_brcond2(TCGContext
*s
, TCGCond cond
, TCGReg al
, TCGReg ah
,
766 TCGReg bl
, TCGReg bh
, TCGLabel
*l
)
768 TCGCond b_cond
= TCG_COND_NE
;
769 TCGReg tmp
= TCG_TMP1
;
771 /* With branches, we emit between 4 and 9 insns with 2 or 3 branches.
772 With setcond, we emit between 3 and 10 insns and only 1 branch,
773 which ought to get better branch prediction. */
778 tmp
= tcg_out_reduce_eq2(s
, TCG_TMP0
, TCG_TMP1
, al
, ah
, bl
, bh
);
782 /* Minimize code size by preferring a compare not requiring INV. */
783 if (mips_cmp_map
[cond
] & MIPS_CMP_INV
) {
784 cond
= tcg_invert_cond(cond
);
785 b_cond
= TCG_COND_EQ
;
787 tcg_out_setcond2(s
, cond
, tmp
, al
, ah
, bl
, bh
);
791 tcg_out_brcond(s
, b_cond
, tmp
, TCG_REG_ZERO
, l
);
794 static void tcg_out_movcond(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
795 TCGReg c1
, TCGReg c2
, TCGReg v
)
797 MIPSInsn m_opc
= OPC_MOVN
;
805 tcg_out_opc_reg(s
, OPC_XOR
, TCG_TMP0
, c1
, c2
);
811 /* Minimize code size by preferring a compare not requiring INV. */
812 if (mips_cmp_map
[cond
] & MIPS_CMP_INV
) {
813 cond
= tcg_invert_cond(cond
);
816 tcg_out_setcond(s
, cond
, TCG_TMP0
, c1
, c2
);
821 tcg_out_opc_reg(s
, m_opc
, ret
, v
, c1
);
824 static void tcg_out_call_int(TCGContext
*s
, tcg_insn_unit
*arg
, bool tail
)
826 /* Note that the ABI requires the called function's address to be
827 loaded into T9, even if a direct branch is in range. */
828 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_T9
, (uintptr_t)arg
);
830 /* But do try a direct branch, allowing the cpu better insn prefetch. */
832 if (!tcg_out_opc_jmp(s
, OPC_J
, arg
)) {
833 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_T9
, 0);
836 if (!tcg_out_opc_jmp(s
, OPC_JAL
, arg
)) {
837 tcg_out_opc_reg(s
, OPC_JALR
, TCG_REG_RA
, TCG_REG_T9
, 0);
842 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*arg
)
844 tcg_out_call_int(s
, arg
, false);
848 #if defined(CONFIG_SOFTMMU)
849 static void * const qemu_ld_helpers
[16] = {
850 [MO_UB
] = helper_ret_ldub_mmu
,
851 [MO_SB
] = helper_ret_ldsb_mmu
,
852 [MO_LEUW
] = helper_le_lduw_mmu
,
853 [MO_LESW
] = helper_le_ldsw_mmu
,
854 [MO_LEUL
] = helper_le_ldul_mmu
,
855 [MO_LEQ
] = helper_le_ldq_mmu
,
856 [MO_BEUW
] = helper_be_lduw_mmu
,
857 [MO_BESW
] = helper_be_ldsw_mmu
,
858 [MO_BEUL
] = helper_be_ldul_mmu
,
859 [MO_BEQ
] = helper_be_ldq_mmu
,
862 static void * const qemu_st_helpers
[16] = {
863 [MO_UB
] = helper_ret_stb_mmu
,
864 [MO_LEUW
] = helper_le_stw_mmu
,
865 [MO_LEUL
] = helper_le_stl_mmu
,
866 [MO_LEQ
] = helper_le_stq_mmu
,
867 [MO_BEUW
] = helper_be_stw_mmu
,
868 [MO_BEUL
] = helper_be_stl_mmu
,
869 [MO_BEQ
] = helper_be_stq_mmu
,
872 /* Helper routines for marshalling helper function arguments into
873 * the correct registers and stack.
874 * I is where we want to put this argument, and is updated and returned
875 * for the next call. ARG is the argument itself.
877 * We provide routines for arguments which are: immediate, 32 bit
878 * value in register, 16 and 8 bit values in register (which must be zero
879 * extended before use) and 64 bit value in a lo:hi register pair.
882 static int tcg_out_call_iarg_reg(TCGContext
*s
, int i
, TCGReg arg
)
884 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
885 tcg_out_mov(s
, TCG_TYPE_REG
, tcg_target_call_iarg_regs
[i
], arg
);
887 tcg_out_st(s
, TCG_TYPE_REG
, arg
, TCG_REG_SP
, 4 * i
);
892 static int tcg_out_call_iarg_reg8(TCGContext
*s
, int i
, TCGReg arg
)
894 TCGReg tmp
= TCG_TMP0
;
895 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
896 tmp
= tcg_target_call_iarg_regs
[i
];
898 tcg_out_opc_imm(s
, OPC_ANDI
, tmp
, arg
, 0xff);
899 return tcg_out_call_iarg_reg(s
, i
, tmp
);
902 static int tcg_out_call_iarg_reg16(TCGContext
*s
, int i
, TCGReg arg
)
904 TCGReg tmp
= TCG_TMP0
;
905 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
906 tmp
= tcg_target_call_iarg_regs
[i
];
908 tcg_out_opc_imm(s
, OPC_ANDI
, tmp
, arg
, 0xffff);
909 return tcg_out_call_iarg_reg(s
, i
, tmp
);
912 static int tcg_out_call_iarg_imm(TCGContext
*s
, int i
, TCGArg arg
)
914 TCGReg tmp
= TCG_TMP0
;
918 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
919 tmp
= tcg_target_call_iarg_regs
[i
];
921 tcg_out_movi(s
, TCG_TYPE_REG
, tmp
, arg
);
923 return tcg_out_call_iarg_reg(s
, i
, tmp
);
926 static int tcg_out_call_iarg_reg2(TCGContext
*s
, int i
, TCGReg al
, TCGReg ah
)
929 i
= tcg_out_call_iarg_reg(s
, i
, (MIPS_BE
? ah
: al
));
930 i
= tcg_out_call_iarg_reg(s
, i
, (MIPS_BE
? al
: ah
));
934 /* Perform the tlb comparison operation. The complete host address is
935 placed in BASE. Clobbers AT, T0, A0. */
936 static void tcg_out_tlb_load(TCGContext
*s
, TCGReg base
, TCGReg addrl
,
937 TCGReg addrh
, int mem_index
, TCGMemOp s_bits
,
938 tcg_insn_unit
*label_ptr
[2], bool is_load
)
942 ? offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
)
943 : offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
));
944 int add_off
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
946 tcg_out_opc_sa(s
, OPC_SRL
, TCG_REG_A0
, addrl
,
947 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
948 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_REG_A0
, TCG_REG_A0
,
949 (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
);
950 tcg_out_opc_reg(s
, OPC_ADDU
, TCG_REG_A0
, TCG_REG_A0
, TCG_AREG0
);
952 /* Compensate for very large offsets. */
953 if (add_off
>= 0x8000) {
954 /* Most target env are smaller than 32k; none are larger than 64k.
955 Simplify the logic here merely to offset by 0x7ff0, giving us a
956 range just shy of 64k. Check this assumption. */
957 QEMU_BUILD_BUG_ON(offsetof(CPUArchState
,
958 tlb_table
[NB_MMU_MODES
- 1][1])
960 tcg_out_opc_imm(s
, OPC_ADDIU
, TCG_REG_A0
, TCG_REG_A0
, 0x7ff0);
965 /* Load the tlb comparator. */
966 tcg_out_opc_imm(s
, OPC_LW
, TCG_TMP0
, TCG_REG_A0
, cmp_off
+ LO_OFF
);
967 if (TARGET_LONG_BITS
== 64) {
968 tcg_out_opc_imm(s
, OPC_LW
, base
, TCG_REG_A0
, cmp_off
+ HI_OFF
);
971 /* Mask the page bits, keeping the alignment bits to compare against.
972 In between, load the tlb addend for the fast path. */
973 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_TMP1
,
974 TARGET_PAGE_MASK
| ((1 << s_bits
) - 1));
975 tcg_out_opc_imm(s
, OPC_LW
, TCG_REG_A0
, TCG_REG_A0
, add_off
);
976 tcg_out_opc_reg(s
, OPC_AND
, TCG_TMP1
, TCG_TMP1
, addrl
);
978 label_ptr
[0] = s
->code_ptr
;
979 tcg_out_opc_br(s
, OPC_BNE
, TCG_TMP1
, TCG_TMP0
);
981 if (TARGET_LONG_BITS
== 64) {
985 label_ptr
[1] = s
->code_ptr
;
986 tcg_out_opc_br(s
, OPC_BNE
, addrh
, base
);
990 tcg_out_opc_reg(s
, OPC_ADDU
, base
, TCG_REG_A0
, addrl
);
993 static void add_qemu_ldst_label(TCGContext
*s
, int is_ld
, TCGMemOpIdx oi
,
994 TCGReg datalo
, TCGReg datahi
,
995 TCGReg addrlo
, TCGReg addrhi
,
996 void *raddr
, tcg_insn_unit
*label_ptr
[2])
998 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1000 label
->is_ld
= is_ld
;
1002 label
->datalo_reg
= datalo
;
1003 label
->datahi_reg
= datahi
;
1004 label
->addrlo_reg
= addrlo
;
1005 label
->addrhi_reg
= addrhi
;
1006 label
->raddr
= raddr
;
1007 label
->label_ptr
[0] = label_ptr
[0];
1008 if (TARGET_LONG_BITS
== 64) {
1009 label
->label_ptr
[1] = label_ptr
[1];
1013 static void tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1015 TCGMemOpIdx oi
= lb
->oi
;
1016 TCGMemOp opc
= get_memop(oi
);
1020 /* resolve label address */
1021 reloc_pc16(l
->label_ptr
[0], s
->code_ptr
);
1022 if (TARGET_LONG_BITS
== 64) {
1023 reloc_pc16(l
->label_ptr
[1], s
->code_ptr
);
1027 if (TARGET_LONG_BITS
== 64) {
1028 i
= tcg_out_call_iarg_reg2(s
, i
, l
->addrlo_reg
, l
->addrhi_reg
);
1030 i
= tcg_out_call_iarg_reg(s
, i
, l
->addrlo_reg
);
1032 i
= tcg_out_call_iarg_imm(s
, i
, oi
);
1033 i
= tcg_out_call_iarg_imm(s
, i
, (intptr_t)l
->raddr
);
1034 tcg_out_call_int(s
, qemu_ld_helpers
[opc
], false);
1036 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1039 if ((opc
& MO_SIZE
) == MO_64
) {
1040 /* We eliminated V0 from the possible output registers, so it
1041 cannot be clobbered here. So we must move V1 first. */
1043 tcg_out_mov(s
, TCG_TYPE_I32
, v0
, TCG_REG_V1
);
1046 tcg_out_mov(s
, TCG_TYPE_I32
, l
->datahi_reg
, TCG_REG_V1
);
1050 reloc_pc16(s
->code_ptr
, l
->raddr
);
1051 tcg_out_opc_br(s
, OPC_BEQ
, TCG_REG_ZERO
, TCG_REG_ZERO
);
1053 tcg_out_mov(s
, TCG_TYPE_REG
, v0
, TCG_REG_V0
);
1056 static void tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1058 TCGMemOpIdx oi
= lb
->oi
;
1059 TCGMemOp opc
= get_memop(oi
);
1060 TCGMemOp s_bits
= opc
& MO_SIZE
;
1063 /* resolve label address */
1064 reloc_pc16(l
->label_ptr
[0], s
->code_ptr
);
1065 if (TARGET_LONG_BITS
== 64) {
1066 reloc_pc16(l
->label_ptr
[1], s
->code_ptr
);
1070 if (TARGET_LONG_BITS
== 64) {
1071 i
= tcg_out_call_iarg_reg2(s
, i
, l
->addrlo_reg
, l
->addrhi_reg
);
1073 i
= tcg_out_call_iarg_reg(s
, i
, l
->addrlo_reg
);
1077 i
= tcg_out_call_iarg_reg8(s
, i
, l
->datalo_reg
);
1080 i
= tcg_out_call_iarg_reg16(s
, i
, l
->datalo_reg
);
1083 i
= tcg_out_call_iarg_reg(s
, i
, l
->datalo_reg
);
1086 i
= tcg_out_call_iarg_reg2(s
, i
, l
->datalo_reg
, l
->datahi_reg
);
1091 i
= tcg_out_call_iarg_imm(s
, i
, oi
);
1093 /* Tail call to the store helper. Thus force the return address
1094 computation to take place in the return address register. */
1095 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_RA
, (intptr_t)l
->raddr
);
1096 i
= tcg_out_call_iarg_reg(s
, i
, TCG_REG_RA
);
1097 tcg_out_call_int(s
, qemu_st_helpers
[opc
], true);
1099 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1103 static void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGReg datalo
, TCGReg datahi
,
1104 TCGReg base
, TCGMemOp opc
)
1108 tcg_out_opc_imm(s
, OPC_LBU
, datalo
, base
, 0);
1111 tcg_out_opc_imm(s
, OPC_LB
, datalo
, base
, 0);
1113 case MO_UW
| MO_BSWAP
:
1114 tcg_out_opc_imm(s
, OPC_LHU
, TCG_TMP1
, base
, 0);
1115 tcg_out_bswap16(s
, datalo
, TCG_TMP1
);
1118 tcg_out_opc_imm(s
, OPC_LHU
, datalo
, base
, 0);
1120 case MO_SW
| MO_BSWAP
:
1121 tcg_out_opc_imm(s
, OPC_LHU
, TCG_TMP1
, base
, 0);
1122 tcg_out_bswap16s(s
, datalo
, TCG_TMP1
);
1125 tcg_out_opc_imm(s
, OPC_LH
, datalo
, base
, 0);
1127 case MO_UL
| MO_BSWAP
:
1128 tcg_out_opc_imm(s
, OPC_LW
, TCG_TMP1
, base
, 0);
1129 tcg_out_bswap32(s
, datalo
, TCG_TMP1
);
1132 tcg_out_opc_imm(s
, OPC_LW
, datalo
, base
, 0);
1134 case MO_Q
| MO_BSWAP
:
1135 tcg_out_opc_imm(s
, OPC_LW
, TCG_TMP1
, base
, HI_OFF
);
1136 tcg_out_bswap32(s
, datalo
, TCG_TMP1
);
1137 tcg_out_opc_imm(s
, OPC_LW
, TCG_TMP1
, base
, LO_OFF
);
1138 tcg_out_bswap32(s
, datahi
, TCG_TMP1
);
1141 tcg_out_opc_imm(s
, OPC_LW
, datalo
, base
, LO_OFF
);
1142 tcg_out_opc_imm(s
, OPC_LW
, datahi
, base
, HI_OFF
);
1149 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1151 TCGReg addr_regl
, addr_regh
__attribute__((unused
));
1152 TCGReg data_regl
, data_regh
;
1155 #if defined(CONFIG_SOFTMMU)
1156 tcg_insn_unit
*label_ptr
[2];
1160 /* Note that we've eliminated V0 from the output registers,
1161 so we won't overwrite the base register during loading. */
1162 TCGReg base
= TCG_REG_V0
;
1164 data_regl
= *args
++;
1165 data_regh
= (is_64
? *args
++ : 0);
1166 addr_regl
= *args
++;
1167 addr_regh
= (TARGET_LONG_BITS
== 64 ? *args
++ : 0);
1169 opc
= get_memop(oi
);
1171 #if defined(CONFIG_SOFTMMU)
1172 mem_index
= get_mmuidx(oi
);
1173 s_bits
= opc
& MO_SIZE
;
1175 tcg_out_tlb_load(s
, base
, addr_regl
, addr_regh
, mem_index
,
1176 s_bits
, label_ptr
, 1);
1177 tcg_out_qemu_ld_direct(s
, data_regl
, data_regh
, base
, opc
);
1178 add_qemu_ldst_label(s
, 1, oi
, data_regl
, data_regh
, addr_regl
, addr_regh
,
1179 s
->code_ptr
, label_ptr
);
1181 if (GUEST_BASE
== 0 && data_regl
!= addr_regl
) {
1183 } else if (GUEST_BASE
== (int16_t)GUEST_BASE
) {
1184 tcg_out_opc_imm(s
, OPC_ADDIU
, base
, addr_regl
, GUEST_BASE
);
1186 tcg_out_movi(s
, TCG_TYPE_PTR
, base
, GUEST_BASE
);
1187 tcg_out_opc_reg(s
, OPC_ADDU
, base
, base
, addr_regl
);
1189 tcg_out_qemu_ld_direct(s
, data_regl
, data_regh
, base
, opc
);
1193 static void tcg_out_qemu_st_direct(TCGContext
*s
, TCGReg datalo
, TCGReg datahi
,
1194 TCGReg base
, TCGMemOp opc
)
1198 tcg_out_opc_imm(s
, OPC_SB
, datalo
, base
, 0);
1201 case MO_16
| MO_BSWAP
:
1202 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_TMP1
, datalo
, 0xffff);
1203 tcg_out_bswap16(s
, TCG_TMP1
, TCG_TMP1
);
1207 tcg_out_opc_imm(s
, OPC_SH
, datalo
, base
, 0);
1210 case MO_32
| MO_BSWAP
:
1211 tcg_out_bswap32(s
, TCG_TMP1
, datalo
);
1215 tcg_out_opc_imm(s
, OPC_SW
, datalo
, base
, 0);
1218 case MO_64
| MO_BSWAP
:
1219 tcg_out_bswap32(s
, TCG_TMP1
, datalo
);
1220 tcg_out_opc_imm(s
, OPC_SW
, TCG_TMP1
, base
, HI_OFF
);
1221 tcg_out_bswap32(s
, TCG_TMP1
, datahi
);
1222 tcg_out_opc_imm(s
, OPC_SW
, TCG_TMP1
, base
, LO_OFF
);
1225 tcg_out_opc_imm(s
, OPC_SW
, datalo
, base
, LO_OFF
);
1226 tcg_out_opc_imm(s
, OPC_SW
, datahi
, base
, HI_OFF
);
1234 static void tcg_out_addsub2(TCGContext
*s
, TCGReg rl
, TCGReg rh
, TCGReg al
,
1235 TCGReg ah
, TCGArg bl
, TCGArg bh
, bool cbl
,
1236 bool cbh
, bool is_sub
)
1238 TCGReg th
= TCG_TMP1
;
1240 /* If we have a negative constant such that negating it would
1241 make the high part zero, we can (usually) eliminate one insn. */
1242 if (cbl
&& cbh
&& bh
== -1 && bl
!= 0) {
1248 /* By operating on the high part first, we get to use the final
1249 carry operation to move back from the temporary. */
1251 tcg_out_opc_reg(s
, (is_sub
? OPC_SUBU
: OPC_ADDU
), th
, ah
, bh
);
1252 } else if (bh
!= 0 || ah
== rl
) {
1253 tcg_out_opc_imm(s
, OPC_ADDIU
, th
, ah
, (is_sub
? -bh
: bh
));
1258 /* Note that tcg optimization should eliminate the bl == 0 case. */
1261 tcg_out_opc_imm(s
, OPC_SLTIU
, TCG_TMP0
, al
, bl
);
1262 tcg_out_opc_imm(s
, OPC_ADDIU
, rl
, al
, -bl
);
1264 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_TMP0
, al
, bl
);
1265 tcg_out_opc_reg(s
, OPC_SUBU
, rl
, al
, bl
);
1267 tcg_out_opc_reg(s
, OPC_SUBU
, rh
, th
, TCG_TMP0
);
1270 tcg_out_opc_imm(s
, OPC_ADDIU
, rl
, al
, bl
);
1271 tcg_out_opc_imm(s
, OPC_SLTIU
, TCG_TMP0
, rl
, bl
);
1273 tcg_out_opc_reg(s
, OPC_ADDU
, rl
, al
, bl
);
1274 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_TMP0
, rl
, (rl
== bl
? al
: bl
));
1276 tcg_out_opc_reg(s
, OPC_ADDU
, rh
, th
, TCG_TMP0
);
1280 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1282 TCGReg addr_regl
, addr_regh
__attribute__((unused
));
1283 TCGReg data_regl
, data_regh
, base
;
1286 #if defined(CONFIG_SOFTMMU)
1287 tcg_insn_unit
*label_ptr
[2];
1292 data_regl
= *args
++;
1293 data_regh
= (is_64
? *args
++ : 0);
1294 addr_regl
= *args
++;
1295 addr_regh
= (TARGET_LONG_BITS
== 64 ? *args
++ : 0);
1297 opc
= get_memop(oi
);
1299 #if defined(CONFIG_SOFTMMU)
1300 mem_index
= get_mmuidx(oi
);
1303 /* Note that we eliminated the helper's address argument,
1304 so we can reuse that for the base. */
1305 base
= (TARGET_LONG_BITS
== 32 ? TCG_REG_A1
: TCG_REG_A2
);
1306 tcg_out_tlb_load(s
, base
, addr_regl
, addr_regh
, mem_index
,
1307 s_bits
, label_ptr
, 0);
1308 tcg_out_qemu_st_direct(s
, data_regl
, data_regh
, base
, opc
);
1309 add_qemu_ldst_label(s
, 0, oi
, data_regl
, data_regh
, addr_regl
, addr_regh
,
1310 s
->code_ptr
, label_ptr
);
1312 if (GUEST_BASE
== 0) {
1316 if (GUEST_BASE
== (int16_t)GUEST_BASE
) {
1317 tcg_out_opc_imm(s
, OPC_ADDIU
, base
, addr_regl
, GUEST_BASE
);
1319 tcg_out_movi(s
, TCG_TYPE_PTR
, base
, GUEST_BASE
);
1320 tcg_out_opc_reg(s
, OPC_ADDU
, base
, base
, addr_regl
);
1323 tcg_out_qemu_st_direct(s
, data_regl
, data_regh
, base
, opc
);
1327 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1328 const TCGArg
*args
, const int *const_args
)
1340 case INDEX_op_exit_tb
:
1342 TCGReg b0
= TCG_REG_ZERO
;
1345 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_V0
, a0
& ~0xffff);
1348 if (!tcg_out_opc_jmp(s
, OPC_J
, tb_ret_addr
)) {
1349 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
,
1350 (uintptr_t)tb_ret_addr
);
1351 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_TMP0
, 0);
1353 tcg_out_opc_imm(s
, OPC_ORI
, TCG_REG_V0
, b0
, a0
& 0xffff);
1356 case INDEX_op_goto_tb
:
1357 if (s
->tb_jmp_offset
) {
1358 /* direct jump method */
1359 s
->tb_jmp_offset
[a0
] = tcg_current_code_size(s
);
1360 /* Avoid clobbering the address during retranslation. */
1361 tcg_out32(s
, OPC_J
| (*(uint32_t *)s
->code_ptr
& 0x3ffffff));
1363 /* indirect jump method */
1364 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_TMP0
, TCG_REG_ZERO
,
1365 (uintptr_t)(s
->tb_next
+ a0
));
1366 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_TMP0
, 0);
1369 s
->tb_next_offset
[a0
] = tcg_current_code_size(s
);
1372 tcg_out_brcond(s
, TCG_COND_EQ
, TCG_REG_ZERO
, TCG_REG_ZERO
,
1376 case INDEX_op_ld8u_i32
:
1379 case INDEX_op_ld8s_i32
:
1382 case INDEX_op_ld16u_i32
:
1385 case INDEX_op_ld16s_i32
:
1388 case INDEX_op_ld_i32
:
1391 case INDEX_op_st8_i32
:
1394 case INDEX_op_st16_i32
:
1397 case INDEX_op_st_i32
:
1400 tcg_out_ldst(s
, i1
, a0
, a1
, a2
);
1403 case INDEX_op_add_i32
:
1404 i1
= OPC_ADDU
, i2
= OPC_ADDIU
;
1406 case INDEX_op_or_i32
:
1407 i1
= OPC_OR
, i2
= OPC_ORI
;
1409 case INDEX_op_xor_i32
:
1410 i1
= OPC_XOR
, i2
= OPC_XORI
;
1413 tcg_out_opc_imm(s
, i2
, a0
, a1
, a2
);
1417 tcg_out_opc_reg(s
, i1
, a0
, a1
, a2
);
1420 case INDEX_op_sub_i32
:
1422 tcg_out_opc_imm(s
, OPC_ADDIU
, a0
, a1
, -a2
);
1427 case INDEX_op_and_i32
:
1428 if (c2
&& a2
!= (uint16_t)a2
) {
1429 int msb
= ctz32(~a2
) - 1;
1430 assert(use_mips32r2_instructions
);
1431 assert(is_p2m1(a2
));
1432 tcg_out_opc_bf(s
, OPC_EXT
, a0
, a1
, msb
, 0);
1435 i1
= OPC_AND
, i2
= OPC_ANDI
;
1437 case INDEX_op_nor_i32
:
1441 case INDEX_op_mul_i32
:
1442 if (use_mips32_instructions
) {
1443 tcg_out_opc_reg(s
, OPC_MUL
, a0
, a1
, a2
);
1446 i1
= OPC_MULT
, i2
= OPC_MFLO
;
1448 case INDEX_op_mulsh_i32
:
1449 i1
= OPC_MULT
, i2
= OPC_MFHI
;
1451 case INDEX_op_muluh_i32
:
1452 i1
= OPC_MULTU
, i2
= OPC_MFHI
;
1454 case INDEX_op_div_i32
:
1455 i1
= OPC_DIV
, i2
= OPC_MFLO
;
1457 case INDEX_op_divu_i32
:
1458 i1
= OPC_DIVU
, i2
= OPC_MFLO
;
1460 case INDEX_op_rem_i32
:
1461 i1
= OPC_DIV
, i2
= OPC_MFHI
;
1463 case INDEX_op_remu_i32
:
1464 i1
= OPC_DIVU
, i2
= OPC_MFHI
;
1466 tcg_out_opc_reg(s
, i1
, 0, a1
, a2
);
1467 tcg_out_opc_reg(s
, i2
, a0
, 0, 0);
1470 case INDEX_op_muls2_i32
:
1473 case INDEX_op_mulu2_i32
:
1476 tcg_out_opc_reg(s
, i1
, 0, a2
, args
[3]);
1477 tcg_out_opc_reg(s
, OPC_MFLO
, a0
, 0, 0);
1478 tcg_out_opc_reg(s
, OPC_MFHI
, a1
, 0, 0);
1481 case INDEX_op_not_i32
:
1484 case INDEX_op_bswap16_i32
:
1487 case INDEX_op_ext8s_i32
:
1490 case INDEX_op_ext16s_i32
:
1493 tcg_out_opc_reg(s
, i1
, a0
, TCG_REG_ZERO
, a1
);
1496 case INDEX_op_sar_i32
:
1497 i1
= OPC_SRAV
, i2
= OPC_SRA
;
1499 case INDEX_op_shl_i32
:
1500 i1
= OPC_SLLV
, i2
= OPC_SLL
;
1502 case INDEX_op_shr_i32
:
1503 i1
= OPC_SRLV
, i2
= OPC_SRL
;
1505 case INDEX_op_rotr_i32
:
1506 i1
= OPC_ROTRV
, i2
= OPC_ROTR
;
1509 tcg_out_opc_sa(s
, i2
, a0
, a1
, a2
);
1511 tcg_out_opc_reg(s
, i1
, a0
, a2
, a1
);
1514 case INDEX_op_rotl_i32
:
1516 tcg_out_opc_sa(s
, OPC_ROTR
, a0
, a1
, 32 - a2
);
1518 tcg_out_opc_reg(s
, OPC_SUBU
, TCG_TMP0
, TCG_REG_ZERO
, a2
);
1519 tcg_out_opc_reg(s
, OPC_ROTRV
, a0
, TCG_TMP0
, a1
);
1523 case INDEX_op_bswap32_i32
:
1524 tcg_out_opc_reg(s
, OPC_WSBH
, a0
, 0, a1
);
1525 tcg_out_opc_sa(s
, OPC_ROTR
, a0
, a0
, 16);
1528 case INDEX_op_deposit_i32
:
1529 tcg_out_opc_bf(s
, OPC_INS
, a0
, a2
, args
[3] + args
[4] - 1, args
[3]);
1532 case INDEX_op_brcond_i32
:
1533 tcg_out_brcond(s
, a2
, a0
, a1
, arg_label(args
[3]));
1535 case INDEX_op_brcond2_i32
:
1536 tcg_out_brcond2(s
, args
[4], a0
, a1
, a2
, args
[3], arg_label(args
[5]));
1539 case INDEX_op_movcond_i32
:
1540 tcg_out_movcond(s
, args
[5], a0
, a1
, a2
, args
[3]);
1543 case INDEX_op_setcond_i32
:
1544 tcg_out_setcond(s
, args
[3], a0
, a1
, a2
);
1546 case INDEX_op_setcond2_i32
:
1547 tcg_out_setcond2(s
, args
[5], a0
, a1
, a2
, args
[3], args
[4]);
1550 case INDEX_op_qemu_ld_i32
:
1551 tcg_out_qemu_ld(s
, args
, false);
1553 case INDEX_op_qemu_ld_i64
:
1554 tcg_out_qemu_ld(s
, args
, true);
1556 case INDEX_op_qemu_st_i32
:
1557 tcg_out_qemu_st(s
, args
, false);
1559 case INDEX_op_qemu_st_i64
:
1560 tcg_out_qemu_st(s
, args
, true);
1563 case INDEX_op_add2_i32
:
1564 tcg_out_addsub2(s
, a0
, a1
, a2
, args
[3], args
[4], args
[5],
1565 const_args
[4], const_args
[5], false);
1567 case INDEX_op_sub2_i32
:
1568 tcg_out_addsub2(s
, a0
, a1
, a2
, args
[3], args
[4], args
[5],
1569 const_args
[4], const_args
[5], true);
1572 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
1573 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
1574 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
1580 static const TCGTargetOpDef mips_op_defs
[] = {
1581 { INDEX_op_exit_tb
, { } },
1582 { INDEX_op_goto_tb
, { } },
1583 { INDEX_op_br
, { } },
1585 { INDEX_op_ld8u_i32
, { "r", "r" } },
1586 { INDEX_op_ld8s_i32
, { "r", "r" } },
1587 { INDEX_op_ld16u_i32
, { "r", "r" } },
1588 { INDEX_op_ld16s_i32
, { "r", "r" } },
1589 { INDEX_op_ld_i32
, { "r", "r" } },
1590 { INDEX_op_st8_i32
, { "rZ", "r" } },
1591 { INDEX_op_st16_i32
, { "rZ", "r" } },
1592 { INDEX_op_st_i32
, { "rZ", "r" } },
1594 { INDEX_op_add_i32
, { "r", "rZ", "rJ" } },
1595 { INDEX_op_mul_i32
, { "r", "rZ", "rZ" } },
1596 { INDEX_op_muls2_i32
, { "r", "r", "rZ", "rZ" } },
1597 { INDEX_op_mulu2_i32
, { "r", "r", "rZ", "rZ" } },
1598 { INDEX_op_mulsh_i32
, { "r", "rZ", "rZ" } },
1599 { INDEX_op_muluh_i32
, { "r", "rZ", "rZ" } },
1600 { INDEX_op_div_i32
, { "r", "rZ", "rZ" } },
1601 { INDEX_op_divu_i32
, { "r", "rZ", "rZ" } },
1602 { INDEX_op_rem_i32
, { "r", "rZ", "rZ" } },
1603 { INDEX_op_remu_i32
, { "r", "rZ", "rZ" } },
1604 { INDEX_op_sub_i32
, { "r", "rZ", "rN" } },
1606 { INDEX_op_and_i32
, { "r", "rZ", "rIK" } },
1607 { INDEX_op_nor_i32
, { "r", "rZ", "rZ" } },
1608 { INDEX_op_not_i32
, { "r", "rZ" } },
1609 { INDEX_op_or_i32
, { "r", "rZ", "rIZ" } },
1610 { INDEX_op_xor_i32
, { "r", "rZ", "rIZ" } },
1612 { INDEX_op_shl_i32
, { "r", "rZ", "ri" } },
1613 { INDEX_op_shr_i32
, { "r", "rZ", "ri" } },
1614 { INDEX_op_sar_i32
, { "r", "rZ", "ri" } },
1615 { INDEX_op_rotr_i32
, { "r", "rZ", "ri" } },
1616 { INDEX_op_rotl_i32
, { "r", "rZ", "ri" } },
1618 { INDEX_op_bswap16_i32
, { "r", "r" } },
1619 { INDEX_op_bswap32_i32
, { "r", "r" } },
1621 { INDEX_op_ext8s_i32
, { "r", "rZ" } },
1622 { INDEX_op_ext16s_i32
, { "r", "rZ" } },
1624 { INDEX_op_deposit_i32
, { "r", "0", "rZ" } },
1626 { INDEX_op_brcond_i32
, { "rZ", "rZ" } },
1627 { INDEX_op_movcond_i32
, { "r", "rZ", "rZ", "rZ", "0" } },
1628 { INDEX_op_setcond_i32
, { "r", "rZ", "rZ" } },
1629 { INDEX_op_setcond2_i32
, { "r", "rZ", "rZ", "rZ", "rZ" } },
1631 { INDEX_op_add2_i32
, { "r", "r", "rZ", "rZ", "rN", "rN" } },
1632 { INDEX_op_sub2_i32
, { "r", "r", "rZ", "rZ", "rN", "rN" } },
1633 { INDEX_op_brcond2_i32
, { "rZ", "rZ", "rZ", "rZ" } },
1635 #if TARGET_LONG_BITS == 32
1636 { INDEX_op_qemu_ld_i32
, { "L", "lZ" } },
1637 { INDEX_op_qemu_st_i32
, { "SZ", "SZ" } },
1638 { INDEX_op_qemu_ld_i64
, { "L", "L", "lZ" } },
1639 { INDEX_op_qemu_st_i64
, { "SZ", "SZ", "SZ" } },
1641 { INDEX_op_qemu_ld_i32
, { "L", "lZ", "lZ" } },
1642 { INDEX_op_qemu_st_i32
, { "SZ", "SZ", "SZ" } },
1643 { INDEX_op_qemu_ld_i64
, { "L", "L", "lZ", "lZ" } },
1644 { INDEX_op_qemu_st_i64
, { "SZ", "SZ", "SZ", "SZ" } },
1649 static int tcg_target_callee_save_regs
[] = {
1650 TCG_REG_S0
, /* used for the global env (TCG_AREG0) */
1659 TCG_REG_RA
, /* should be last for ABI compliance */
1662 /* The Linux kernel doesn't provide any information about the available
1663 instruction set. Probe it using a signal handler. */
1667 #ifndef use_movnz_instructions
1668 bool use_movnz_instructions
= false;
1671 #ifndef use_mips32_instructions
1672 bool use_mips32_instructions
= false;
1675 #ifndef use_mips32r2_instructions
1676 bool use_mips32r2_instructions
= false;
1679 static volatile sig_atomic_t got_sigill
;
1681 static void sigill_handler(int signo
, siginfo_t
*si
, void *data
)
1683 /* Skip the faulty instruction */
1684 ucontext_t
*uc
= (ucontext_t
*)data
;
1685 uc
->uc_mcontext
.pc
+= 4;
1690 static void tcg_target_detect_isa(void)
1692 struct sigaction sa_old
, sa_new
;
1694 memset(&sa_new
, 0, sizeof(sa_new
));
1695 sa_new
.sa_flags
= SA_SIGINFO
;
1696 sa_new
.sa_sigaction
= sigill_handler
;
1697 sigaction(SIGILL
, &sa_new
, &sa_old
);
1699 /* Probe for movn/movz, necessary to implement movcond. */
1700 #ifndef use_movnz_instructions
1702 asm volatile(".set push\n"
1704 "movn $zero, $zero, $zero\n"
1705 "movz $zero, $zero, $zero\n"
1708 use_movnz_instructions
= !got_sigill
;
1711 /* Probe for MIPS32 instructions. As no subsetting is allowed
1712 by the specification, it is only necessary to probe for one
1713 of the instructions. */
1714 #ifndef use_mips32_instructions
1716 asm volatile(".set push\n"
1718 "mul $zero, $zero\n"
1721 use_mips32_instructions
= !got_sigill
;
1724 /* Probe for MIPS32r2 instructions if MIPS32 instructions are
1725 available. As no subsetting is allowed by the specification,
1726 it is only necessary to probe for one of the instructions. */
1727 #ifndef use_mips32r2_instructions
1728 if (use_mips32_instructions
) {
1730 asm volatile(".set push\n"
1732 "seb $zero, $zero\n"
1735 use_mips32r2_instructions
= !got_sigill
;
1739 sigaction(SIGILL
, &sa_old
, NULL
);
1742 /* Generate global QEMU prologue and epilogue code */
1743 static void tcg_target_qemu_prologue(TCGContext
*s
)
1747 /* reserve some stack space, also for TCG temps. */
1748 frame_size
= ARRAY_SIZE(tcg_target_callee_save_regs
) * 4
1749 + TCG_STATIC_CALL_ARGS_SIZE
1750 + CPU_TEMP_BUF_NLONGS
* sizeof(long);
1751 frame_size
= (frame_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
1752 ~(TCG_TARGET_STACK_ALIGN
- 1);
1753 tcg_set_frame(s
, TCG_REG_SP
, ARRAY_SIZE(tcg_target_callee_save_regs
) * 4
1754 + TCG_STATIC_CALL_ARGS_SIZE
,
1755 CPU_TEMP_BUF_NLONGS
* sizeof(long));
1758 tcg_out_addi(s
, TCG_REG_SP
, -frame_size
);
1759 for(i
= 0 ; i
< ARRAY_SIZE(tcg_target_callee_save_regs
) ; i
++) {
1760 tcg_out_st(s
, TCG_TYPE_I32
, tcg_target_callee_save_regs
[i
],
1761 TCG_REG_SP
, TCG_STATIC_CALL_ARGS_SIZE
+ i
* 4);
1764 /* Call generated code */
1765 tcg_out_opc_reg(s
, OPC_JR
, 0, tcg_target_call_iarg_regs
[1], 0);
1766 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
1767 tb_ret_addr
= s
->code_ptr
;
1770 for(i
= 0 ; i
< ARRAY_SIZE(tcg_target_callee_save_regs
) ; i
++) {
1771 tcg_out_ld(s
, TCG_TYPE_I32
, tcg_target_callee_save_regs
[i
],
1772 TCG_REG_SP
, TCG_STATIC_CALL_ARGS_SIZE
+ i
* 4);
1775 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_RA
, 0);
1776 tcg_out_addi(s
, TCG_REG_SP
, frame_size
);
1779 static void tcg_target_init(TCGContext
*s
)
1781 tcg_target_detect_isa();
1782 tcg_regset_set(tcg_target_available_regs
[TCG_TYPE_I32
], 0xffffffff);
1783 tcg_regset_set(tcg_target_call_clobber_regs
,
1801 tcg_regset_clear(s
->reserved_regs
);
1802 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_ZERO
); /* zero register */
1803 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_K0
); /* kernel use only */
1804 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_K1
); /* kernel use only */
1805 tcg_regset_set_reg(s
->reserved_regs
, TCG_TMP0
); /* internal use */
1806 tcg_regset_set_reg(s
->reserved_regs
, TCG_TMP1
); /* internal use */
1807 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_RA
); /* return address */
1808 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_SP
); /* stack pointer */
1809 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_GP
); /* global pointer */
1811 tcg_add_target_add_op_defs(mips_op_defs
);
1814 void tb_set_jmp_target1(uintptr_t jmp_addr
, uintptr_t addr
)
1816 uint32_t *ptr
= (uint32_t *)jmp_addr
;
1817 *ptr
= deposit32(*ptr
, 0, 26, addr
>> 2);
1818 flush_icache_range(jmp_addr
, jmp_addr
+ 4);