2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
5 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
6 * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "tcg-be-ldst.h"
29 #ifdef HOST_WORDS_BIGENDIAN
35 #define LO_OFF (MIPS_BE * 4)
36 #define HI_OFF (4 - LO_OFF)
39 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
75 /* check if we really need so many registers :P */
76 static const TCGReg tcg_target_reg_alloc_order
[] = {
102 static const TCGReg tcg_target_call_iarg_regs
[4] = {
109 static const TCGReg tcg_target_call_oarg_regs
[2] = {
114 static tcg_insn_unit
*tb_ret_addr
;
116 static inline uint32_t reloc_pc16_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
118 /* Let the compiler perform the right-shift as part of the arithmetic. */
119 ptrdiff_t disp
= target
- (pc
+ 1);
120 assert(disp
== (int16_t)disp
);
121 return disp
& 0xffff;
124 static inline void reloc_pc16(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
126 *pc
= deposit32(*pc
, 0, 16, reloc_pc16_val(pc
, target
));
129 static inline uint32_t reloc_26_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
131 assert((((uintptr_t)pc
^ (uintptr_t)target
) & 0xf0000000) == 0);
132 return ((uintptr_t)target
>> 2) & 0x3ffffff;
135 static inline void reloc_26(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
137 *pc
= deposit32(*pc
, 0, 26, reloc_26_val(pc
, target
));
140 static void patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
141 intptr_t value
, intptr_t addend
)
143 assert(type
== R_MIPS_PC16
);
145 reloc_pc16(code_ptr
, (tcg_insn_unit
*)value
);
148 /* parse target specific constraints */
149 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
156 ct
->ct
|= TCG_CT_REG
;
157 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
159 case 'L': /* qemu_ld output arg constraint */
160 ct
->ct
|= TCG_CT_REG
;
161 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
162 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_V0
);
164 case 'l': /* qemu_ld input arg constraint */
165 ct
->ct
|= TCG_CT_REG
;
166 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
167 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A0
);
168 #if defined(CONFIG_SOFTMMU)
169 if (TARGET_LONG_BITS
== 64) {
170 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A2
);
174 case 'S': /* qemu_st constraint */
175 ct
->ct
|= TCG_CT_REG
;
176 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
177 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A0
);
178 #if defined(CONFIG_SOFTMMU)
179 if (TARGET_LONG_BITS
== 32) {
180 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A1
);
182 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A2
);
183 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A3
);
188 ct
->ct
|= TCG_CT_CONST_U16
;
191 ct
->ct
|= TCG_CT_CONST_S16
;
194 /* We are cheating a bit here, using the fact that the register
195 ZERO is also the register number 0. Hence there is no need
196 to check for const_args in each instruction. */
197 ct
->ct
|= TCG_CT_CONST_ZERO
;
207 /* test if a constant matches the constraint */
208 static inline int tcg_target_const_match(tcg_target_long val
, TCGType type
,
209 const TCGArgConstraint
*arg_ct
)
213 if (ct
& TCG_CT_CONST
)
215 else if ((ct
& TCG_CT_CONST_ZERO
) && val
== 0)
217 else if ((ct
& TCG_CT_CONST_U16
) && val
== (uint16_t)val
)
219 else if ((ct
& TCG_CT_CONST_S16
) && val
== (int16_t)val
)
225 /* instruction opcodes */
228 OPC_JAL
= 0x03 << 26,
229 OPC_BEQ
= 0x04 << 26,
230 OPC_BNE
= 0x05 << 26,
231 OPC_BLEZ
= 0x06 << 26,
232 OPC_BGTZ
= 0x07 << 26,
233 OPC_ADDIU
= 0x09 << 26,
234 OPC_SLTI
= 0x0A << 26,
235 OPC_SLTIU
= 0x0B << 26,
236 OPC_ANDI
= 0x0C << 26,
237 OPC_ORI
= 0x0D << 26,
238 OPC_XORI
= 0x0E << 26,
239 OPC_LUI
= 0x0F << 26,
243 OPC_LBU
= 0x24 << 26,
244 OPC_LHU
= 0x25 << 26,
245 OPC_LWU
= 0x27 << 26,
250 OPC_SPECIAL
= 0x00 << 26,
251 OPC_SLL
= OPC_SPECIAL
| 0x00,
252 OPC_SRL
= OPC_SPECIAL
| 0x02,
253 OPC_ROTR
= OPC_SPECIAL
| (0x01 << 21) | 0x02,
254 OPC_SRA
= OPC_SPECIAL
| 0x03,
255 OPC_SLLV
= OPC_SPECIAL
| 0x04,
256 OPC_SRLV
= OPC_SPECIAL
| 0x06,
257 OPC_ROTRV
= OPC_SPECIAL
| (0x01 << 6) | 0x06,
258 OPC_SRAV
= OPC_SPECIAL
| 0x07,
259 OPC_JR
= OPC_SPECIAL
| 0x08,
260 OPC_JALR
= OPC_SPECIAL
| 0x09,
261 OPC_MOVZ
= OPC_SPECIAL
| 0x0A,
262 OPC_MOVN
= OPC_SPECIAL
| 0x0B,
263 OPC_MFHI
= OPC_SPECIAL
| 0x10,
264 OPC_MFLO
= OPC_SPECIAL
| 0x12,
265 OPC_MULT
= OPC_SPECIAL
| 0x18,
266 OPC_MULTU
= OPC_SPECIAL
| 0x19,
267 OPC_DIV
= OPC_SPECIAL
| 0x1A,
268 OPC_DIVU
= OPC_SPECIAL
| 0x1B,
269 OPC_ADDU
= OPC_SPECIAL
| 0x21,
270 OPC_SUBU
= OPC_SPECIAL
| 0x23,
271 OPC_AND
= OPC_SPECIAL
| 0x24,
272 OPC_OR
= OPC_SPECIAL
| 0x25,
273 OPC_XOR
= OPC_SPECIAL
| 0x26,
274 OPC_NOR
= OPC_SPECIAL
| 0x27,
275 OPC_SLT
= OPC_SPECIAL
| 0x2A,
276 OPC_SLTU
= OPC_SPECIAL
| 0x2B,
278 OPC_REGIMM
= 0x01 << 26,
279 OPC_BLTZ
= OPC_REGIMM
| (0x00 << 16),
280 OPC_BGEZ
= OPC_REGIMM
| (0x01 << 16),
282 OPC_SPECIAL2
= 0x1c << 26,
283 OPC_MUL
= OPC_SPECIAL2
| 0x002,
285 OPC_SPECIAL3
= 0x1f << 26,
286 OPC_INS
= OPC_SPECIAL3
| 0x004,
287 OPC_WSBH
= OPC_SPECIAL3
| 0x0a0,
288 OPC_SEB
= OPC_SPECIAL3
| 0x420,
289 OPC_SEH
= OPC_SPECIAL3
| 0x620,
295 static inline void tcg_out_opc_reg(TCGContext
*s
, int opc
,
296 TCGReg rd
, TCGReg rs
, TCGReg rt
)
301 inst
|= (rs
& 0x1F) << 21;
302 inst
|= (rt
& 0x1F) << 16;
303 inst
|= (rd
& 0x1F) << 11;
310 static inline void tcg_out_opc_imm(TCGContext
*s
, int opc
,
311 TCGReg rt
, TCGReg rs
, TCGArg imm
)
316 inst
|= (rs
& 0x1F) << 21;
317 inst
|= (rt
& 0x1F) << 16;
318 inst
|= (imm
& 0xffff);
325 static inline void tcg_out_opc_br(TCGContext
*s
, int opc
,
326 TCGReg rt
, TCGReg rs
)
328 /* We pay attention here to not modify the branch target by reading
329 the existing value and using it again. This ensure that caches and
330 memory are kept coherent during retranslation. */
331 uint16_t offset
= (uint16_t)*s
->code_ptr
;
333 tcg_out_opc_imm(s
, opc
, rt
, rs
, offset
);
339 static inline void tcg_out_opc_sa(TCGContext
*s
, int opc
,
340 TCGReg rd
, TCGReg rt
, TCGArg sa
)
345 inst
|= (rt
& 0x1F) << 16;
346 inst
|= (rd
& 0x1F) << 11;
347 inst
|= (sa
& 0x1F) << 6;
354 * Returns true if the branch was in range and the insn was emitted.
356 static bool tcg_out_opc_jmp(TCGContext
*s
, int opc
, void *target
)
358 uintptr_t dest
= (uintptr_t)target
;
359 uintptr_t from
= (uintptr_t)s
->code_ptr
+ 4;
362 /* The pc-region branch happens within the 256MB region of
363 the delay slot (thus the +4). */
364 if ((from
^ dest
) & -(1 << 28)) {
367 assert((dest
& 3) == 0);
370 inst
|= (dest
>> 2) & 0x3ffffff;
375 static inline void tcg_out_nop(TCGContext
*s
)
380 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
,
381 TCGReg ret
, TCGReg arg
)
383 /* Simple reg-reg move, optimising out the 'do nothing' case */
385 tcg_out_opc_reg(s
, OPC_ADDU
, ret
, arg
, TCG_REG_ZERO
);
389 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
390 TCGReg reg
, tcg_target_long arg
)
392 if (arg
== (int16_t)arg
) {
393 tcg_out_opc_imm(s
, OPC_ADDIU
, reg
, TCG_REG_ZERO
, arg
);
394 } else if (arg
== (uint16_t)arg
) {
395 tcg_out_opc_imm(s
, OPC_ORI
, reg
, TCG_REG_ZERO
, arg
);
397 tcg_out_opc_imm(s
, OPC_LUI
, reg
, TCG_REG_ZERO
, arg
>> 16);
399 tcg_out_opc_imm(s
, OPC_ORI
, reg
, reg
, arg
& 0xffff);
404 static inline void tcg_out_bswap16(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
406 if (use_mips32r2_instructions
) {
407 tcg_out_opc_reg(s
, OPC_WSBH
, ret
, 0, arg
);
409 /* ret and arg can't be register at */
410 if (ret
== TCG_REG_AT
|| arg
== TCG_REG_AT
) {
414 tcg_out_opc_sa(s
, OPC_SRL
, TCG_REG_AT
, arg
, 8);
415 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 8);
416 tcg_out_opc_imm(s
, OPC_ANDI
, ret
, ret
, 0xff00);
417 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_REG_AT
);
421 static inline void tcg_out_bswap16s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
423 if (use_mips32r2_instructions
) {
424 tcg_out_opc_reg(s
, OPC_WSBH
, ret
, 0, arg
);
425 tcg_out_opc_reg(s
, OPC_SEH
, ret
, 0, ret
);
427 /* ret and arg can't be register at */
428 if (ret
== TCG_REG_AT
|| arg
== TCG_REG_AT
) {
432 tcg_out_opc_sa(s
, OPC_SRL
, TCG_REG_AT
, arg
, 8);
433 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 24);
434 tcg_out_opc_sa(s
, OPC_SRA
, ret
, ret
, 16);
435 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_REG_AT
);
439 static inline void tcg_out_bswap32(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
441 if (use_mips32r2_instructions
) {
442 tcg_out_opc_reg(s
, OPC_WSBH
, ret
, 0, arg
);
443 tcg_out_opc_sa(s
, OPC_ROTR
, ret
, ret
, 16);
445 /* ret and arg must be different and can't be register at */
446 if (ret
== arg
|| ret
== TCG_REG_AT
|| arg
== TCG_REG_AT
) {
450 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 24);
452 tcg_out_opc_sa(s
, OPC_SRL
, TCG_REG_AT
, arg
, 24);
453 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_REG_AT
);
455 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_REG_AT
, arg
, 0xff00);
456 tcg_out_opc_sa(s
, OPC_SLL
, TCG_REG_AT
, TCG_REG_AT
, 8);
457 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_REG_AT
);
459 tcg_out_opc_sa(s
, OPC_SRL
, TCG_REG_AT
, arg
, 8);
460 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_REG_AT
, TCG_REG_AT
, 0xff00);
461 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_REG_AT
);
465 static inline void tcg_out_ext8s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
467 if (use_mips32r2_instructions
) {
468 tcg_out_opc_reg(s
, OPC_SEB
, ret
, 0, arg
);
470 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 24);
471 tcg_out_opc_sa(s
, OPC_SRA
, ret
, ret
, 24);
475 static inline void tcg_out_ext16s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
477 if (use_mips32r2_instructions
) {
478 tcg_out_opc_reg(s
, OPC_SEH
, ret
, 0, arg
);
480 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 16);
481 tcg_out_opc_sa(s
, OPC_SRA
, ret
, ret
, 16);
485 static void tcg_out_ldst(TCGContext
*s
, int opc
, TCGReg data
,
486 TCGReg addr
, intptr_t ofs
)
490 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_AT
, ofs
- lo
);
491 if (addr
!= TCG_REG_ZERO
) {
492 tcg_out_opc_reg(s
, OPC_ADDU
, TCG_REG_AT
, TCG_REG_AT
, addr
);
496 tcg_out_opc_imm(s
, opc
, data
, addr
, lo
);
499 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg arg
,
500 TCGReg arg1
, intptr_t arg2
)
502 tcg_out_ldst(s
, OPC_LW
, arg
, arg1
, arg2
);
505 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
506 TCGReg arg1
, intptr_t arg2
)
508 tcg_out_ldst(s
, OPC_SW
, arg
, arg1
, arg2
);
511 static inline void tcg_out_addi(TCGContext
*s
, TCGReg reg
, TCGArg val
)
513 if (val
== (int16_t)val
) {
514 tcg_out_opc_imm(s
, OPC_ADDIU
, reg
, reg
, val
);
516 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_AT
, val
);
517 tcg_out_opc_reg(s
, OPC_ADDU
, reg
, reg
, TCG_REG_AT
);
521 static void tcg_out_brcond(TCGContext
*s
, TCGCond cond
, TCGArg arg1
,
522 TCGArg arg2
, int label_index
)
524 TCGLabel
*l
= &s
->labels
[label_index
];
528 tcg_out_opc_br(s
, OPC_BEQ
, arg1
, arg2
);
531 tcg_out_opc_br(s
, OPC_BNE
, arg1
, arg2
);
535 tcg_out_opc_br(s
, OPC_BLTZ
, 0, arg1
);
537 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, arg1
, arg2
);
538 tcg_out_opc_br(s
, OPC_BNE
, TCG_REG_AT
, TCG_REG_ZERO
);
542 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, arg1
, arg2
);
543 tcg_out_opc_br(s
, OPC_BNE
, TCG_REG_AT
, TCG_REG_ZERO
);
547 tcg_out_opc_br(s
, OPC_BGEZ
, 0, arg1
);
549 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, arg1
, arg2
);
550 tcg_out_opc_br(s
, OPC_BEQ
, TCG_REG_AT
, TCG_REG_ZERO
);
554 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, arg1
, arg2
);
555 tcg_out_opc_br(s
, OPC_BEQ
, TCG_REG_AT
, TCG_REG_ZERO
);
559 tcg_out_opc_br(s
, OPC_BLEZ
, 0, arg1
);
561 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, arg2
, arg1
);
562 tcg_out_opc_br(s
, OPC_BEQ
, TCG_REG_AT
, TCG_REG_ZERO
);
566 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, arg2
, arg1
);
567 tcg_out_opc_br(s
, OPC_BEQ
, TCG_REG_AT
, TCG_REG_ZERO
);
571 tcg_out_opc_br(s
, OPC_BGTZ
, 0, arg1
);
573 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, arg2
, arg1
);
574 tcg_out_opc_br(s
, OPC_BNE
, TCG_REG_AT
, TCG_REG_ZERO
);
578 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, arg2
, arg1
);
579 tcg_out_opc_br(s
, OPC_BNE
, TCG_REG_AT
, TCG_REG_ZERO
);
586 reloc_pc16(s
->code_ptr
- 1, l
->u
.value_ptr
);
588 tcg_out_reloc(s
, s
->code_ptr
- 1, R_MIPS_PC16
, label_index
, 0);
593 /* XXX: we implement it at the target level to avoid having to
594 handle cross basic blocks temporaries */
595 static void tcg_out_brcond2(TCGContext
*s
, TCGCond cond
, TCGArg arg1
,
596 TCGArg arg2
, TCGArg arg3
, TCGArg arg4
,
599 tcg_insn_unit
*label_ptr
;
603 tcg_out_brcond(s
, TCG_COND_NE
, arg2
, arg4
, label_index
);
604 tcg_out_brcond(s
, TCG_COND_NE
, arg1
, arg3
, label_index
);
610 tcg_out_brcond(s
, TCG_COND_LT
, arg2
, arg4
, label_index
);
614 tcg_out_brcond(s
, TCG_COND_GT
, arg2
, arg4
, label_index
);
618 tcg_out_brcond(s
, TCG_COND_LTU
, arg2
, arg4
, label_index
);
622 tcg_out_brcond(s
, TCG_COND_GTU
, arg2
, arg4
, label_index
);
628 label_ptr
= s
->code_ptr
;
629 tcg_out_opc_br(s
, OPC_BNE
, arg2
, arg4
);
634 tcg_out_brcond(s
, TCG_COND_EQ
, arg1
, arg3
, label_index
);
638 tcg_out_brcond(s
, TCG_COND_LTU
, arg1
, arg3
, label_index
);
642 tcg_out_brcond(s
, TCG_COND_LEU
, arg1
, arg3
, label_index
);
646 tcg_out_brcond(s
, TCG_COND_GTU
, arg1
, arg3
, label_index
);
650 tcg_out_brcond(s
, TCG_COND_GEU
, arg1
, arg3
, label_index
);
656 reloc_pc16(label_ptr
, s
->code_ptr
);
659 static void tcg_out_movcond(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
660 TCGArg c1
, TCGArg c2
, TCGArg v
)
665 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, c2
);
666 } else if (c2
== 0) {
667 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, c1
);
669 tcg_out_opc_reg(s
, OPC_XOR
, TCG_REG_AT
, c1
, c2
);
670 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, TCG_REG_AT
);
675 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, c2
);
676 } else if (c2
== 0) {
677 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, c1
);
679 tcg_out_opc_reg(s
, OPC_XOR
, TCG_REG_AT
, c1
, c2
);
680 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, TCG_REG_AT
);
684 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, c1
, c2
);
685 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, TCG_REG_AT
);
688 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, c1
, c2
);
689 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, TCG_REG_AT
);
692 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, c1
, c2
);
693 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, TCG_REG_AT
);
696 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, c1
, c2
);
697 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, TCG_REG_AT
);
700 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, c2
, c1
);
701 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, TCG_REG_AT
);
704 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, c2
, c1
);
705 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, TCG_REG_AT
);
708 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, c2
, c1
);
709 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, TCG_REG_AT
);
712 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, c2
, c1
);
713 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, TCG_REG_AT
);
721 static void tcg_out_setcond(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
722 TCGArg arg1
, TCGArg arg2
)
727 tcg_out_opc_imm(s
, OPC_SLTIU
, ret
, arg2
, 1);
728 } else if (arg2
== 0) {
729 tcg_out_opc_imm(s
, OPC_SLTIU
, ret
, arg1
, 1);
731 tcg_out_opc_reg(s
, OPC_XOR
, ret
, arg1
, arg2
);
732 tcg_out_opc_imm(s
, OPC_SLTIU
, ret
, ret
, 1);
737 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, TCG_REG_ZERO
, arg2
);
738 } else if (arg2
== 0) {
739 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, TCG_REG_ZERO
, arg1
);
741 tcg_out_opc_reg(s
, OPC_XOR
, ret
, arg1
, arg2
);
742 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, TCG_REG_ZERO
, ret
);
746 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg1
, arg2
);
749 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg1
, arg2
);
752 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg1
, arg2
);
753 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
756 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg1
, arg2
);
757 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
760 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg2
, arg1
);
761 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
764 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg2
, arg1
);
765 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
768 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg2
, arg1
);
771 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg2
, arg1
);
779 /* XXX: we implement it at the target level to avoid having to
780 handle cross basic blocks temporaries */
781 static void tcg_out_setcond2(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
782 TCGArg arg1
, TCGArg arg2
, TCGArg arg3
, TCGArg arg4
)
786 tcg_out_setcond(s
, TCG_COND_EQ
, TCG_REG_AT
, arg2
, arg4
);
787 tcg_out_setcond(s
, TCG_COND_EQ
, TCG_REG_T0
, arg1
, arg3
);
788 tcg_out_opc_reg(s
, OPC_AND
, ret
, TCG_REG_AT
, TCG_REG_T0
);
791 tcg_out_setcond(s
, TCG_COND_NE
, TCG_REG_AT
, arg2
, arg4
);
792 tcg_out_setcond(s
, TCG_COND_NE
, TCG_REG_T0
, arg1
, arg3
);
793 tcg_out_opc_reg(s
, OPC_OR
, ret
, TCG_REG_AT
, TCG_REG_T0
);
797 tcg_out_setcond(s
, TCG_COND_LT
, TCG_REG_AT
, arg2
, arg4
);
801 tcg_out_setcond(s
, TCG_COND_GT
, TCG_REG_AT
, arg2
, arg4
);
805 tcg_out_setcond(s
, TCG_COND_LTU
, TCG_REG_AT
, arg2
, arg4
);
809 tcg_out_setcond(s
, TCG_COND_GTU
, TCG_REG_AT
, arg2
, arg4
);
816 tcg_out_setcond(s
, TCG_COND_EQ
, TCG_REG_T0
, arg2
, arg4
);
821 tcg_out_setcond(s
, TCG_COND_LTU
, ret
, arg1
, arg3
);
825 tcg_out_setcond(s
, TCG_COND_LEU
, ret
, arg1
, arg3
);
829 tcg_out_setcond(s
, TCG_COND_GTU
, ret
, arg1
, arg3
);
833 tcg_out_setcond(s
, TCG_COND_GEU
, ret
, arg1
, arg3
);
839 tcg_out_opc_reg(s
, OPC_AND
, ret
, ret
, TCG_REG_T0
);
840 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_REG_AT
);
843 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*arg
)
845 /* Note that the ABI requires the called function's address to be
846 loaded into T9, even if a direct branch is in range. */
847 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_T9
, (uintptr_t)arg
);
849 /* But do try a direct branch, allowing the cpu better insn prefetch. */
850 if (!tcg_out_opc_jmp(s
, OPC_JAL
, arg
)) {
851 tcg_out_opc_reg(s
, OPC_JALR
, TCG_REG_RA
, TCG_REG_T9
, 0);
857 #if defined(CONFIG_SOFTMMU)
858 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
860 static void * const qemu_ld_helpers
[4] = {
867 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
868 uintxx_t val, int mmu_idx) */
869 static void * const qemu_st_helpers
[4] = {
876 /* Helper routines for marshalling helper function arguments into
877 * the correct registers and stack.
878 * I is where we want to put this argument, and is updated and returned
879 * for the next call. ARG is the argument itself.
881 * We provide routines for arguments which are: immediate, 32 bit
882 * value in register, 16 and 8 bit values in register (which must be zero
883 * extended before use) and 64 bit value in a lo:hi register pair.
886 static int tcg_out_call_iarg_reg(TCGContext
*s
, int i
, TCGReg arg
)
888 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
889 tcg_out_mov(s
, TCG_TYPE_REG
, tcg_target_call_iarg_regs
[i
], arg
);
891 tcg_out_st(s
, TCG_TYPE_REG
, arg
, TCG_REG_SP
, 4 * i
);
896 static int tcg_out_call_iarg_reg8(TCGContext
*s
, int i
, TCGReg arg
)
898 TCGReg tmp
= TCG_REG_AT
;
899 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
900 tmp
= tcg_target_call_iarg_regs
[i
];
902 tcg_out_opc_imm(s
, OPC_ANDI
, tmp
, arg
, 0xff);
903 return tcg_out_call_iarg_reg(s
, i
, tmp
);
906 static int tcg_out_call_iarg_reg16(TCGContext
*s
, int i
, TCGReg arg
)
908 TCGReg tmp
= TCG_REG_AT
;
909 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
910 tmp
= tcg_target_call_iarg_regs
[i
];
912 tcg_out_opc_imm(s
, OPC_ANDI
, tmp
, arg
, 0xffff);
913 return tcg_out_call_iarg_reg(s
, i
, tmp
);
916 static int tcg_out_call_iarg_imm(TCGContext
*s
, int i
, TCGArg arg
)
918 TCGReg tmp
= TCG_REG_AT
;
922 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
923 tmp
= tcg_target_call_iarg_regs
[i
];
925 tcg_out_movi(s
, TCG_TYPE_REG
, tmp
, arg
);
927 return tcg_out_call_iarg_reg(s
, i
, tmp
);
930 static int tcg_out_call_iarg_reg2(TCGContext
*s
, int i
, TCGReg al
, TCGReg ah
)
933 i
= tcg_out_call_iarg_reg(s
, i
, (MIPS_BE
? ah
: al
));
934 i
= tcg_out_call_iarg_reg(s
, i
, (MIPS_BE
? al
: ah
));
938 /* Perform the tlb comparison operation. The complete host address is
939 placed in BASE. Clobbers AT, T0, A0. */
940 static void tcg_out_tlb_load(TCGContext
*s
, TCGReg base
, TCGReg addrl
,
941 TCGReg addrh
, int mem_index
, TCGMemOp s_bits
,
942 tcg_insn_unit
*label_ptr
[2], bool is_load
)
946 ? offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
)
947 : offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
));
948 int add_off
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
950 tcg_out_opc_sa(s
, OPC_SRL
, TCG_REG_A0
, addrl
,
951 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
952 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_REG_A0
, TCG_REG_A0
,
953 (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
);
954 tcg_out_opc_reg(s
, OPC_ADDU
, TCG_REG_A0
, TCG_REG_A0
, TCG_AREG0
);
956 /* Compensate for very large offsets. */
957 if (add_off
>= 0x8000) {
958 /* Most target env are smaller than 32k; none are larger than 64k.
959 Simplify the logic here merely to offset by 0x7ff0, giving us a
960 range just shy of 64k. Check this assumption. */
961 QEMU_BUILD_BUG_ON(offsetof(CPUArchState
,
962 tlb_table
[NB_MMU_MODES
- 1][1])
964 tcg_out_opc_imm(s
, OPC_ADDIU
, TCG_REG_A0
, TCG_REG_A0
, 0x7ff0);
969 /* Load the tlb comparator. */
970 tcg_out_opc_imm(s
, OPC_LW
, TCG_REG_AT
, TCG_REG_A0
, cmp_off
+ LO_OFF
);
971 if (TARGET_LONG_BITS
== 64) {
972 tcg_out_opc_imm(s
, OPC_LW
, base
, TCG_REG_A0
, cmp_off
+ HI_OFF
);
975 /* Mask the page bits, keeping the alignment bits to compare against.
976 In between, load the tlb addend for the fast path. */
977 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_T0
,
978 TARGET_PAGE_MASK
| ((1 << s_bits
) - 1));
979 tcg_out_opc_imm(s
, OPC_LW
, TCG_REG_A0
, TCG_REG_A0
, add_off
);
980 tcg_out_opc_reg(s
, OPC_AND
, TCG_REG_T0
, TCG_REG_T0
, addrl
);
982 label_ptr
[0] = s
->code_ptr
;
983 tcg_out_opc_br(s
, OPC_BNE
, TCG_REG_T0
, TCG_REG_AT
);
985 if (TARGET_LONG_BITS
== 64) {
989 label_ptr
[1] = s
->code_ptr
;
990 tcg_out_opc_br(s
, OPC_BNE
, addrh
, base
);
994 tcg_out_opc_reg(s
, OPC_ADDU
, base
, TCG_REG_A0
, addrl
);
997 static void add_qemu_ldst_label(TCGContext
*s
, int is_ld
, TCGMemOp opc
,
998 TCGReg datalo
, TCGReg datahi
,
999 TCGReg addrlo
, TCGReg addrhi
,
1000 int mem_index
, void *raddr
,
1001 tcg_insn_unit
*label_ptr
[2])
1003 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1005 label
->is_ld
= is_ld
;
1007 label
->datalo_reg
= datalo
;
1008 label
->datahi_reg
= datahi
;
1009 label
->addrlo_reg
= addrlo
;
1010 label
->addrhi_reg
= addrhi
;
1011 label
->mem_index
= mem_index
;
1012 label
->raddr
= raddr
;
1013 label
->label_ptr
[0] = label_ptr
[0];
1014 if (TARGET_LONG_BITS
== 64) {
1015 label
->label_ptr
[1] = label_ptr
[1];
1019 static void tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1021 TCGMemOp opc
= l
->opc
;
1024 /* resolve label address */
1025 reloc_pc16(l
->label_ptr
[0], s
->code_ptr
);
1026 if (TARGET_LONG_BITS
== 64) {
1027 reloc_pc16(l
->label_ptr
[1], s
->code_ptr
);
1031 i
= tcg_out_call_iarg_reg(s
, i
, TCG_AREG0
);
1032 if (TARGET_LONG_BITS
== 64) {
1033 i
= tcg_out_call_iarg_reg2(s
, i
, l
->addrlo_reg
, l
->addrhi_reg
);
1035 i
= tcg_out_call_iarg_reg(s
, i
, l
->addrlo_reg
);
1037 i
= tcg_out_call_iarg_imm(s
, i
, l
->mem_index
);
1038 tcg_out_call(s
, qemu_ld_helpers
[opc
& MO_SIZE
]);
1040 switch (opc
& MO_SSIZE
) {
1042 tcg_out_opc_imm(s
, OPC_ANDI
, l
->datalo_reg
, TCG_REG_V0
, 0xff);
1045 tcg_out_ext8s(s
, l
->datalo_reg
, TCG_REG_V0
);
1048 tcg_out_opc_imm(s
, OPC_ANDI
, l
->datalo_reg
, TCG_REG_V0
, 0xffff);
1051 tcg_out_ext16s(s
, l
->datalo_reg
, TCG_REG_V0
);
1054 tcg_out_mov(s
, TCG_TYPE_I32
, l
->datalo_reg
, TCG_REG_V0
);
1057 /* We eliminated V0 from the possible output registers, so it
1058 cannot be clobbered here. So we must move V1 first. */
1059 tcg_out_mov(s
, TCG_TYPE_I32
, MIPS_BE
? l
->datalo_reg
: l
->datahi_reg
,
1061 tcg_out_mov(s
, TCG_TYPE_I32
, MIPS_BE
? l
->datahi_reg
: l
->datalo_reg
,
1068 reloc_pc16(s
->code_ptr
, l
->raddr
);
1069 tcg_out_opc_br(s
, OPC_BEQ
, TCG_REG_ZERO
, TCG_REG_ZERO
);
1073 static void tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1075 TCGMemOp opc
= l
->opc
;
1076 TCGMemOp s_bits
= opc
& MO_SIZE
;
1079 /* resolve label address */
1080 reloc_pc16(l
->label_ptr
[0], s
->code_ptr
);
1081 if (TARGET_LONG_BITS
== 64) {
1082 reloc_pc16(l
->label_ptr
[1], s
->code_ptr
);
1086 i
= tcg_out_call_iarg_reg(s
, i
, TCG_AREG0
);
1087 if (TARGET_LONG_BITS
== 64) {
1088 i
= tcg_out_call_iarg_reg2(s
, i
, l
->addrlo_reg
, l
->addrhi_reg
);
1090 i
= tcg_out_call_iarg_reg(s
, i
, l
->addrlo_reg
);
1094 i
= tcg_out_call_iarg_reg8(s
, i
, l
->datalo_reg
);
1097 i
= tcg_out_call_iarg_reg16(s
, i
, l
->datalo_reg
);
1100 i
= tcg_out_call_iarg_reg(s
, i
, l
->datalo_reg
);
1103 i
= tcg_out_call_iarg_reg2(s
, i
, l
->datalo_reg
, l
->datahi_reg
);
1108 i
= tcg_out_call_iarg_imm(s
, i
, l
->mem_index
);
1109 tcg_out_call(s
, qemu_st_helpers
[s_bits
]);
1111 reloc_pc16(s
->code_ptr
, l
->raddr
);
1112 tcg_out_opc_br(s
, OPC_BEQ
, TCG_REG_ZERO
, TCG_REG_ZERO
);
1117 static void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGReg datalo
, TCGReg datahi
,
1118 TCGReg base
, TCGMemOp opc
)
1122 tcg_out_opc_imm(s
, OPC_LBU
, datalo
, base
, 0);
1125 tcg_out_opc_imm(s
, OPC_LB
, datalo
, base
, 0);
1127 case MO_UW
| MO_BSWAP
:
1128 tcg_out_opc_imm(s
, OPC_LHU
, TCG_REG_T0
, base
, 0);
1129 tcg_out_bswap16(s
, datalo
, TCG_REG_T0
);
1132 tcg_out_opc_imm(s
, OPC_LHU
, datalo
, base
, 0);
1134 case MO_SW
| MO_BSWAP
:
1135 tcg_out_opc_imm(s
, OPC_LHU
, TCG_REG_T0
, base
, 0);
1136 tcg_out_bswap16s(s
, datalo
, TCG_REG_T0
);
1139 tcg_out_opc_imm(s
, OPC_LH
, datalo
, base
, 0);
1141 case MO_UL
| MO_BSWAP
:
1142 tcg_out_opc_imm(s
, OPC_LW
, TCG_REG_T0
, base
, 0);
1143 tcg_out_bswap32(s
, datalo
, TCG_REG_T0
);
1146 tcg_out_opc_imm(s
, OPC_LW
, datalo
, base
, 0);
1148 case MO_Q
| MO_BSWAP
:
1149 tcg_out_opc_imm(s
, OPC_LW
, TCG_REG_T0
, base
, HI_OFF
);
1150 tcg_out_bswap32(s
, datalo
, TCG_REG_T0
);
1151 tcg_out_opc_imm(s
, OPC_LW
, TCG_REG_T0
, base
, LO_OFF
);
1152 tcg_out_bswap32(s
, datahi
, TCG_REG_T0
);
1155 tcg_out_opc_imm(s
, OPC_LW
, datalo
, base
, LO_OFF
);
1156 tcg_out_opc_imm(s
, OPC_LW
, datahi
, base
, HI_OFF
);
1163 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, TCGMemOp opc
)
1165 TCGReg addr_regl
, addr_regh
__attribute__((unused
));
1166 TCGReg data_regl
, data_regh
;
1167 #if defined(CONFIG_SOFTMMU)
1168 tcg_insn_unit
*label_ptr
[2];
1172 /* Note that we've eliminated V0 from the output registers,
1173 so we won't overwrite the base register during loading. */
1174 TCGReg base
= TCG_REG_V0
;
1176 data_regl
= *args
++;
1177 data_regh
= ((opc
& MO_SIZE
) == MO_64
? *args
++ : 0);
1178 addr_regl
= *args
++;
1179 addr_regh
= (TARGET_LONG_BITS
== 64 ? *args
++ : 0);
1181 #if defined(CONFIG_SOFTMMU)
1183 s_bits
= opc
& MO_SIZE
;
1185 tcg_out_tlb_load(s
, base
, addr_regl
, addr_regh
, mem_index
,
1186 s_bits
, label_ptr
, 1);
1187 tcg_out_qemu_ld_direct(s
, data_regl
, data_regh
, base
, opc
);
1188 add_qemu_ldst_label(s
, 1, opc
, data_regl
, data_regh
, addr_regl
, addr_regh
,
1189 mem_index
, s
->code_ptr
, label_ptr
);
1191 if (GUEST_BASE
== 0 && data_regl
!= addr_regl
) {
1193 } else if (GUEST_BASE
== (int16_t)GUEST_BASE
) {
1194 tcg_out_opc_imm(s
, OPC_ADDIU
, base
, addr_regl
, GUEST_BASE
);
1196 tcg_out_movi(s
, TCG_TYPE_PTR
, base
, GUEST_BASE
);
1197 tcg_out_opc_reg(s
, OPC_ADDU
, base
, base
, addr_regl
);
1199 tcg_out_qemu_ld_direct(s
, data_regl
, data_regh
, base
, opc
);
1203 static void tcg_out_qemu_st_direct(TCGContext
*s
, TCGReg datalo
, TCGReg datahi
,
1204 TCGReg base
, TCGMemOp opc
)
1208 tcg_out_opc_imm(s
, OPC_SB
, datalo
, base
, 0);
1211 case MO_16
| MO_BSWAP
:
1212 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_REG_T0
, datalo
, 0xffff);
1213 tcg_out_bswap16(s
, TCG_REG_T0
, TCG_REG_T0
);
1214 datalo
= TCG_REG_T0
;
1217 tcg_out_opc_imm(s
, OPC_SH
, datalo
, base
, 0);
1220 case MO_32
| MO_BSWAP
:
1221 tcg_out_bswap32(s
, TCG_REG_T0
, datalo
);
1222 datalo
= TCG_REG_T0
;
1225 tcg_out_opc_imm(s
, OPC_SW
, datalo
, base
, 0);
1228 case MO_64
| MO_BSWAP
:
1229 tcg_out_bswap32(s
, TCG_REG_T0
, datalo
);
1230 tcg_out_opc_imm(s
, OPC_SW
, TCG_REG_T0
, base
, HI_OFF
);
1231 tcg_out_bswap32(s
, TCG_REG_T0
, datahi
);
1232 tcg_out_opc_imm(s
, OPC_SW
, TCG_REG_T0
, base
, LO_OFF
);
1235 tcg_out_opc_imm(s
, OPC_SW
, datalo
, base
, LO_OFF
);
1236 tcg_out_opc_imm(s
, OPC_SW
, datahi
, base
, HI_OFF
);
1244 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, TCGMemOp opc
)
1246 TCGReg addr_regl
, addr_regh
__attribute__((unused
));
1247 TCGReg data_regl
, data_regh
, base
;
1248 #if defined(CONFIG_SOFTMMU)
1249 tcg_insn_unit
*label_ptr
[2];
1254 data_regl
= *args
++;
1255 data_regh
= ((opc
& MO_SIZE
) == MO_64
? *args
++ : 0);
1256 addr_regl
= *args
++;
1257 addr_regh
= (TARGET_LONG_BITS
== 64 ? *args
++ : 0);
1259 #if defined(CONFIG_SOFTMMU)
1263 /* Note that we eliminated the helper's address argument,
1264 so we can reuse that for the base. */
1265 base
= (TARGET_LONG_BITS
== 32 ? TCG_REG_A1
: TCG_REG_A2
);
1266 tcg_out_tlb_load(s
, base
, addr_regl
, addr_regh
, mem_index
,
1267 s_bits
, label_ptr
, 1);
1268 tcg_out_qemu_st_direct(s
, data_regl
, data_regh
, base
, opc
);
1269 add_qemu_ldst_label(s
, 0, opc
, data_regl
, data_regh
, addr_regl
, addr_regh
,
1270 mem_index
, s
->code_ptr
, label_ptr
);
1272 if (GUEST_BASE
== 0) {
1276 if (GUEST_BASE
== (int16_t)GUEST_BASE
) {
1277 tcg_out_opc_imm(s
, OPC_ADDIU
, base
, addr_regl
, GUEST_BASE
);
1279 tcg_out_movi(s
, TCG_TYPE_PTR
, base
, GUEST_BASE
);
1280 tcg_out_opc_reg(s
, OPC_ADDU
, base
, base
, addr_regl
);
1283 tcg_out_qemu_st_direct(s
, data_regl
, data_regh
, base
, opc
);
1287 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1288 const TCGArg
*args
, const int *const_args
)
1291 case INDEX_op_exit_tb
:
1293 uintptr_t a0
= args
[0];
1294 TCGReg b0
= TCG_REG_ZERO
;
1297 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_V0
, a0
& ~0xffff);
1300 if (!tcg_out_opc_jmp(s
, OPC_J
, tb_ret_addr
)) {
1301 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_AT
,
1302 (uintptr_t)tb_ret_addr
);
1303 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_AT
, 0);
1305 tcg_out_opc_imm(s
, OPC_ORI
, TCG_REG_V0
, b0
, a0
& 0xffff);
1308 case INDEX_op_goto_tb
:
1309 if (s
->tb_jmp_offset
) {
1310 /* direct jump method */
1313 /* indirect jump method */
1314 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_AT
, TCG_REG_ZERO
,
1315 (uintptr_t)(s
->tb_next
+ args
[0]));
1316 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_AT
, 0);
1319 s
->tb_next_offset
[args
[0]] = tcg_current_code_size(s
);
1322 tcg_out_brcond(s
, TCG_COND_EQ
, TCG_REG_ZERO
, TCG_REG_ZERO
, args
[0]);
1325 case INDEX_op_ld8u_i32
:
1326 tcg_out_ldst(s
, OPC_LBU
, args
[0], args
[1], args
[2]);
1328 case INDEX_op_ld8s_i32
:
1329 tcg_out_ldst(s
, OPC_LB
, args
[0], args
[1], args
[2]);
1331 case INDEX_op_ld16u_i32
:
1332 tcg_out_ldst(s
, OPC_LHU
, args
[0], args
[1], args
[2]);
1334 case INDEX_op_ld16s_i32
:
1335 tcg_out_ldst(s
, OPC_LH
, args
[0], args
[1], args
[2]);
1337 case INDEX_op_ld_i32
:
1338 tcg_out_ldst(s
, OPC_LW
, args
[0], args
[1], args
[2]);
1340 case INDEX_op_st8_i32
:
1341 tcg_out_ldst(s
, OPC_SB
, args
[0], args
[1], args
[2]);
1343 case INDEX_op_st16_i32
:
1344 tcg_out_ldst(s
, OPC_SH
, args
[0], args
[1], args
[2]);
1346 case INDEX_op_st_i32
:
1347 tcg_out_ldst(s
, OPC_SW
, args
[0], args
[1], args
[2]);
1350 case INDEX_op_add_i32
:
1351 if (const_args
[2]) {
1352 tcg_out_opc_imm(s
, OPC_ADDIU
, args
[0], args
[1], args
[2]);
1354 tcg_out_opc_reg(s
, OPC_ADDU
, args
[0], args
[1], args
[2]);
1357 case INDEX_op_add2_i32
:
1358 if (const_args
[4]) {
1359 tcg_out_opc_imm(s
, OPC_ADDIU
, TCG_REG_AT
, args
[2], args
[4]);
1361 tcg_out_opc_reg(s
, OPC_ADDU
, TCG_REG_AT
, args
[2], args
[4]);
1363 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_T0
, TCG_REG_AT
, args
[2]);
1364 if (const_args
[5]) {
1365 tcg_out_opc_imm(s
, OPC_ADDIU
, args
[1], args
[3], args
[5]);
1367 tcg_out_opc_reg(s
, OPC_ADDU
, args
[1], args
[3], args
[5]);
1369 tcg_out_opc_reg(s
, OPC_ADDU
, args
[1], args
[1], TCG_REG_T0
);
1370 tcg_out_mov(s
, TCG_TYPE_I32
, args
[0], TCG_REG_AT
);
1372 case INDEX_op_sub_i32
:
1373 if (const_args
[2]) {
1374 tcg_out_opc_imm(s
, OPC_ADDIU
, args
[0], args
[1], -args
[2]);
1376 tcg_out_opc_reg(s
, OPC_SUBU
, args
[0], args
[1], args
[2]);
1379 case INDEX_op_sub2_i32
:
1380 if (const_args
[4]) {
1381 tcg_out_opc_imm(s
, OPC_ADDIU
, TCG_REG_AT
, args
[2], -args
[4]);
1383 tcg_out_opc_reg(s
, OPC_SUBU
, TCG_REG_AT
, args
[2], args
[4]);
1385 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_T0
, args
[2], TCG_REG_AT
);
1386 if (const_args
[5]) {
1387 tcg_out_opc_imm(s
, OPC_ADDIU
, args
[1], args
[3], -args
[5]);
1389 tcg_out_opc_reg(s
, OPC_SUBU
, args
[1], args
[3], args
[5]);
1391 tcg_out_opc_reg(s
, OPC_SUBU
, args
[1], args
[1], TCG_REG_T0
);
1392 tcg_out_mov(s
, TCG_TYPE_I32
, args
[0], TCG_REG_AT
);
1394 case INDEX_op_mul_i32
:
1395 if (use_mips32_instructions
) {
1396 tcg_out_opc_reg(s
, OPC_MUL
, args
[0], args
[1], args
[2]);
1398 tcg_out_opc_reg(s
, OPC_MULT
, 0, args
[1], args
[2]);
1399 tcg_out_opc_reg(s
, OPC_MFLO
, args
[0], 0, 0);
1402 case INDEX_op_muls2_i32
:
1403 tcg_out_opc_reg(s
, OPC_MULT
, 0, args
[2], args
[3]);
1404 tcg_out_opc_reg(s
, OPC_MFLO
, args
[0], 0, 0);
1405 tcg_out_opc_reg(s
, OPC_MFHI
, args
[1], 0, 0);
1407 case INDEX_op_mulu2_i32
:
1408 tcg_out_opc_reg(s
, OPC_MULTU
, 0, args
[2], args
[3]);
1409 tcg_out_opc_reg(s
, OPC_MFLO
, args
[0], 0, 0);
1410 tcg_out_opc_reg(s
, OPC_MFHI
, args
[1], 0, 0);
1412 case INDEX_op_mulsh_i32
:
1413 tcg_out_opc_reg(s
, OPC_MULT
, 0, args
[1], args
[2]);
1414 tcg_out_opc_reg(s
, OPC_MFHI
, args
[0], 0, 0);
1416 case INDEX_op_muluh_i32
:
1417 tcg_out_opc_reg(s
, OPC_MULTU
, 0, args
[1], args
[2]);
1418 tcg_out_opc_reg(s
, OPC_MFHI
, args
[0], 0, 0);
1420 case INDEX_op_div_i32
:
1421 tcg_out_opc_reg(s
, OPC_DIV
, 0, args
[1], args
[2]);
1422 tcg_out_opc_reg(s
, OPC_MFLO
, args
[0], 0, 0);
1424 case INDEX_op_divu_i32
:
1425 tcg_out_opc_reg(s
, OPC_DIVU
, 0, args
[1], args
[2]);
1426 tcg_out_opc_reg(s
, OPC_MFLO
, args
[0], 0, 0);
1428 case INDEX_op_rem_i32
:
1429 tcg_out_opc_reg(s
, OPC_DIV
, 0, args
[1], args
[2]);
1430 tcg_out_opc_reg(s
, OPC_MFHI
, args
[0], 0, 0);
1432 case INDEX_op_remu_i32
:
1433 tcg_out_opc_reg(s
, OPC_DIVU
, 0, args
[1], args
[2]);
1434 tcg_out_opc_reg(s
, OPC_MFHI
, args
[0], 0, 0);
1437 case INDEX_op_and_i32
:
1438 if (const_args
[2]) {
1439 tcg_out_opc_imm(s
, OPC_ANDI
, args
[0], args
[1], args
[2]);
1441 tcg_out_opc_reg(s
, OPC_AND
, args
[0], args
[1], args
[2]);
1444 case INDEX_op_or_i32
:
1445 if (const_args
[2]) {
1446 tcg_out_opc_imm(s
, OPC_ORI
, args
[0], args
[1], args
[2]);
1448 tcg_out_opc_reg(s
, OPC_OR
, args
[0], args
[1], args
[2]);
1451 case INDEX_op_nor_i32
:
1452 tcg_out_opc_reg(s
, OPC_NOR
, args
[0], args
[1], args
[2]);
1454 case INDEX_op_not_i32
:
1455 tcg_out_opc_reg(s
, OPC_NOR
, args
[0], TCG_REG_ZERO
, args
[1]);
1457 case INDEX_op_xor_i32
:
1458 if (const_args
[2]) {
1459 tcg_out_opc_imm(s
, OPC_XORI
, args
[0], args
[1], args
[2]);
1461 tcg_out_opc_reg(s
, OPC_XOR
, args
[0], args
[1], args
[2]);
1465 case INDEX_op_sar_i32
:
1466 if (const_args
[2]) {
1467 tcg_out_opc_sa(s
, OPC_SRA
, args
[0], args
[1], args
[2]);
1469 tcg_out_opc_reg(s
, OPC_SRAV
, args
[0], args
[2], args
[1]);
1472 case INDEX_op_shl_i32
:
1473 if (const_args
[2]) {
1474 tcg_out_opc_sa(s
, OPC_SLL
, args
[0], args
[1], args
[2]);
1476 tcg_out_opc_reg(s
, OPC_SLLV
, args
[0], args
[2], args
[1]);
1479 case INDEX_op_shr_i32
:
1480 if (const_args
[2]) {
1481 tcg_out_opc_sa(s
, OPC_SRL
, args
[0], args
[1], args
[2]);
1483 tcg_out_opc_reg(s
, OPC_SRLV
, args
[0], args
[2], args
[1]);
1486 case INDEX_op_rotl_i32
:
1487 if (const_args
[2]) {
1488 tcg_out_opc_sa(s
, OPC_ROTR
, args
[0], args
[1], 0x20 - args
[2]);
1490 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_AT
, 32);
1491 tcg_out_opc_reg(s
, OPC_SUBU
, TCG_REG_AT
, TCG_REG_AT
, args
[2]);
1492 tcg_out_opc_reg(s
, OPC_ROTRV
, args
[0], TCG_REG_AT
, args
[1]);
1495 case INDEX_op_rotr_i32
:
1496 if (const_args
[2]) {
1497 tcg_out_opc_sa(s
, OPC_ROTR
, args
[0], args
[1], args
[2]);
1499 tcg_out_opc_reg(s
, OPC_ROTRV
, args
[0], args
[2], args
[1]);
1503 case INDEX_op_bswap16_i32
:
1504 tcg_out_opc_reg(s
, OPC_WSBH
, args
[0], 0, args
[1]);
1506 case INDEX_op_bswap32_i32
:
1507 tcg_out_opc_reg(s
, OPC_WSBH
, args
[0], 0, args
[1]);
1508 tcg_out_opc_sa(s
, OPC_ROTR
, args
[0], args
[0], 16);
1511 case INDEX_op_ext8s_i32
:
1512 tcg_out_opc_reg(s
, OPC_SEB
, args
[0], 0, args
[1]);
1514 case INDEX_op_ext16s_i32
:
1515 tcg_out_opc_reg(s
, OPC_SEH
, args
[0], 0, args
[1]);
1518 case INDEX_op_deposit_i32
:
1519 tcg_out_opc_imm(s
, OPC_INS
, args
[0], args
[2],
1520 ((args
[3] + args
[4] - 1) << 11) | (args
[3] << 6));
1523 case INDEX_op_brcond_i32
:
1524 tcg_out_brcond(s
, args
[2], args
[0], args
[1], args
[3]);
1526 case INDEX_op_brcond2_i32
:
1527 tcg_out_brcond2(s
, args
[4], args
[0], args
[1], args
[2], args
[3], args
[5]);
1530 case INDEX_op_movcond_i32
:
1531 tcg_out_movcond(s
, args
[5], args
[0], args
[1], args
[2], args
[3]);
1534 case INDEX_op_setcond_i32
:
1535 tcg_out_setcond(s
, args
[3], args
[0], args
[1], args
[2]);
1537 case INDEX_op_setcond2_i32
:
1538 tcg_out_setcond2(s
, args
[5], args
[0], args
[1], args
[2], args
[3], args
[4]);
1541 case INDEX_op_qemu_ld8u
:
1542 tcg_out_qemu_ld(s
, args
, MO_UB
);
1544 case INDEX_op_qemu_ld8s
:
1545 tcg_out_qemu_ld(s
, args
, MO_SB
);
1547 case INDEX_op_qemu_ld16u
:
1548 tcg_out_qemu_ld(s
, args
, MO_TEUW
);
1550 case INDEX_op_qemu_ld16s
:
1551 tcg_out_qemu_ld(s
, args
, MO_TESW
);
1553 case INDEX_op_qemu_ld32
:
1554 tcg_out_qemu_ld(s
, args
, MO_TEUL
);
1556 case INDEX_op_qemu_ld64
:
1557 tcg_out_qemu_ld(s
, args
, MO_TEQ
);
1559 case INDEX_op_qemu_st8
:
1560 tcg_out_qemu_st(s
, args
, MO_UB
);
1562 case INDEX_op_qemu_st16
:
1563 tcg_out_qemu_st(s
, args
, MO_TEUW
);
1565 case INDEX_op_qemu_st32
:
1566 tcg_out_qemu_st(s
, args
, MO_TEUL
);
1568 case INDEX_op_qemu_st64
:
1569 tcg_out_qemu_st(s
, args
, MO_TEQ
);
1572 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
1573 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
1574 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
1580 static const TCGTargetOpDef mips_op_defs
[] = {
1581 { INDEX_op_exit_tb
, { } },
1582 { INDEX_op_goto_tb
, { } },
1583 { INDEX_op_br
, { } },
1585 { INDEX_op_ld8u_i32
, { "r", "r" } },
1586 { INDEX_op_ld8s_i32
, { "r", "r" } },
1587 { INDEX_op_ld16u_i32
, { "r", "r" } },
1588 { INDEX_op_ld16s_i32
, { "r", "r" } },
1589 { INDEX_op_ld_i32
, { "r", "r" } },
1590 { INDEX_op_st8_i32
, { "rZ", "r" } },
1591 { INDEX_op_st16_i32
, { "rZ", "r" } },
1592 { INDEX_op_st_i32
, { "rZ", "r" } },
1594 { INDEX_op_add_i32
, { "r", "rZ", "rJ" } },
1595 { INDEX_op_mul_i32
, { "r", "rZ", "rZ" } },
1596 { INDEX_op_muls2_i32
, { "r", "r", "rZ", "rZ" } },
1597 { INDEX_op_mulu2_i32
, { "r", "r", "rZ", "rZ" } },
1598 { INDEX_op_mulsh_i32
, { "r", "rZ", "rZ" } },
1599 { INDEX_op_muluh_i32
, { "r", "rZ", "rZ" } },
1600 { INDEX_op_div_i32
, { "r", "rZ", "rZ" } },
1601 { INDEX_op_divu_i32
, { "r", "rZ", "rZ" } },
1602 { INDEX_op_rem_i32
, { "r", "rZ", "rZ" } },
1603 { INDEX_op_remu_i32
, { "r", "rZ", "rZ" } },
1604 { INDEX_op_sub_i32
, { "r", "rZ", "rJ" } },
1606 { INDEX_op_and_i32
, { "r", "rZ", "rI" } },
1607 { INDEX_op_nor_i32
, { "r", "rZ", "rZ" } },
1608 { INDEX_op_not_i32
, { "r", "rZ" } },
1609 { INDEX_op_or_i32
, { "r", "rZ", "rIZ" } },
1610 { INDEX_op_xor_i32
, { "r", "rZ", "rIZ" } },
1612 { INDEX_op_shl_i32
, { "r", "rZ", "ri" } },
1613 { INDEX_op_shr_i32
, { "r", "rZ", "ri" } },
1614 { INDEX_op_sar_i32
, { "r", "rZ", "ri" } },
1615 { INDEX_op_rotr_i32
, { "r", "rZ", "ri" } },
1616 { INDEX_op_rotl_i32
, { "r", "rZ", "ri" } },
1618 { INDEX_op_bswap16_i32
, { "r", "r" } },
1619 { INDEX_op_bswap32_i32
, { "r", "r" } },
1621 { INDEX_op_ext8s_i32
, { "r", "rZ" } },
1622 { INDEX_op_ext16s_i32
, { "r", "rZ" } },
1624 { INDEX_op_deposit_i32
, { "r", "0", "rZ" } },
1626 { INDEX_op_brcond_i32
, { "rZ", "rZ" } },
1627 { INDEX_op_movcond_i32
, { "r", "rZ", "rZ", "rZ", "0" } },
1628 { INDEX_op_setcond_i32
, { "r", "rZ", "rZ" } },
1629 { INDEX_op_setcond2_i32
, { "r", "rZ", "rZ", "rZ", "rZ" } },
1631 { INDEX_op_add2_i32
, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1632 { INDEX_op_sub2_i32
, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1633 { INDEX_op_brcond2_i32
, { "rZ", "rZ", "rZ", "rZ" } },
1635 #if TARGET_LONG_BITS == 32
1636 { INDEX_op_qemu_ld8u
, { "L", "lZ" } },
1637 { INDEX_op_qemu_ld8s
, { "L", "lZ" } },
1638 { INDEX_op_qemu_ld16u
, { "L", "lZ" } },
1639 { INDEX_op_qemu_ld16s
, { "L", "lZ" } },
1640 { INDEX_op_qemu_ld32
, { "L", "lZ" } },
1641 { INDEX_op_qemu_ld64
, { "L", "L", "lZ" } },
1643 { INDEX_op_qemu_st8
, { "SZ", "SZ" } },
1644 { INDEX_op_qemu_st16
, { "SZ", "SZ" } },
1645 { INDEX_op_qemu_st32
, { "SZ", "SZ" } },
1646 { INDEX_op_qemu_st64
, { "SZ", "SZ", "SZ" } },
1648 { INDEX_op_qemu_ld8u
, { "L", "lZ", "lZ" } },
1649 { INDEX_op_qemu_ld8s
, { "L", "lZ", "lZ" } },
1650 { INDEX_op_qemu_ld16u
, { "L", "lZ", "lZ" } },
1651 { INDEX_op_qemu_ld16s
, { "L", "lZ", "lZ" } },
1652 { INDEX_op_qemu_ld32
, { "L", "lZ", "lZ" } },
1653 { INDEX_op_qemu_ld64
, { "L", "L", "lZ", "lZ" } },
1655 { INDEX_op_qemu_st8
, { "SZ", "SZ", "SZ" } },
1656 { INDEX_op_qemu_st16
, { "SZ", "SZ", "SZ" } },
1657 { INDEX_op_qemu_st32
, { "SZ", "SZ", "SZ" } },
1658 { INDEX_op_qemu_st64
, { "SZ", "SZ", "SZ", "SZ" } },
1663 static int tcg_target_callee_save_regs
[] = {
1664 TCG_REG_S0
, /* used for the global env (TCG_AREG0) */
1673 TCG_REG_RA
, /* should be last for ABI compliance */
1676 /* The Linux kernel doesn't provide any information about the available
1677 instruction set. Probe it using a signal handler. */
1681 #ifndef use_movnz_instructions
1682 bool use_movnz_instructions
= false;
1685 #ifndef use_mips32_instructions
1686 bool use_mips32_instructions
= false;
1689 #ifndef use_mips32r2_instructions
1690 bool use_mips32r2_instructions
= false;
1693 static volatile sig_atomic_t got_sigill
;
1695 static void sigill_handler(int signo
, siginfo_t
*si
, void *data
)
1697 /* Skip the faulty instruction */
1698 ucontext_t
*uc
= (ucontext_t
*)data
;
1699 uc
->uc_mcontext
.pc
+= 4;
1704 static void tcg_target_detect_isa(void)
1706 struct sigaction sa_old
, sa_new
;
1708 memset(&sa_new
, 0, sizeof(sa_new
));
1709 sa_new
.sa_flags
= SA_SIGINFO
;
1710 sa_new
.sa_sigaction
= sigill_handler
;
1711 sigaction(SIGILL
, &sa_new
, &sa_old
);
1713 /* Probe for movn/movz, necessary to implement movcond. */
1714 #ifndef use_movnz_instructions
1716 asm volatile(".set push\n"
1718 "movn $zero, $zero, $zero\n"
1719 "movz $zero, $zero, $zero\n"
1722 use_movnz_instructions
= !got_sigill
;
1725 /* Probe for MIPS32 instructions. As no subsetting is allowed
1726 by the specification, it is only necessary to probe for one
1727 of the instructions. */
1728 #ifndef use_mips32_instructions
1730 asm volatile(".set push\n"
1732 "mul $zero, $zero\n"
1735 use_mips32_instructions
= !got_sigill
;
1738 /* Probe for MIPS32r2 instructions if MIPS32 instructions are
1739 available. As no subsetting is allowed by the specification,
1740 it is only necessary to probe for one of the instructions. */
1741 #ifndef use_mips32r2_instructions
1742 if (use_mips32_instructions
) {
1744 asm volatile(".set push\n"
1746 "seb $zero, $zero\n"
1749 use_mips32r2_instructions
= !got_sigill
;
1753 sigaction(SIGILL
, &sa_old
, NULL
);
1756 /* Generate global QEMU prologue and epilogue code */
1757 static void tcg_target_qemu_prologue(TCGContext
*s
)
1761 /* reserve some stack space, also for TCG temps. */
1762 frame_size
= ARRAY_SIZE(tcg_target_callee_save_regs
) * 4
1763 + TCG_STATIC_CALL_ARGS_SIZE
1764 + CPU_TEMP_BUF_NLONGS
* sizeof(long);
1765 frame_size
= (frame_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
1766 ~(TCG_TARGET_STACK_ALIGN
- 1);
1767 tcg_set_frame(s
, TCG_REG_SP
, ARRAY_SIZE(tcg_target_callee_save_regs
) * 4
1768 + TCG_STATIC_CALL_ARGS_SIZE
,
1769 CPU_TEMP_BUF_NLONGS
* sizeof(long));
1772 tcg_out_addi(s
, TCG_REG_SP
, -frame_size
);
1773 for(i
= 0 ; i
< ARRAY_SIZE(tcg_target_callee_save_regs
) ; i
++) {
1774 tcg_out_st(s
, TCG_TYPE_I32
, tcg_target_callee_save_regs
[i
],
1775 TCG_REG_SP
, TCG_STATIC_CALL_ARGS_SIZE
+ i
* 4);
1778 /* Call generated code */
1779 tcg_out_opc_reg(s
, OPC_JR
, 0, tcg_target_call_iarg_regs
[1], 0);
1780 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
1781 tb_ret_addr
= s
->code_ptr
;
1784 for(i
= 0 ; i
< ARRAY_SIZE(tcg_target_callee_save_regs
) ; i
++) {
1785 tcg_out_ld(s
, TCG_TYPE_I32
, tcg_target_callee_save_regs
[i
],
1786 TCG_REG_SP
, TCG_STATIC_CALL_ARGS_SIZE
+ i
* 4);
1789 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_RA
, 0);
1790 tcg_out_addi(s
, TCG_REG_SP
, frame_size
);
1793 static void tcg_target_init(TCGContext
*s
)
1795 tcg_target_detect_isa();
1796 tcg_regset_set(tcg_target_available_regs
[TCG_TYPE_I32
], 0xffffffff);
1797 tcg_regset_set(tcg_target_call_clobber_regs
,
1814 tcg_regset_clear(s
->reserved_regs
);
1815 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_ZERO
); /* zero register */
1816 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_K0
); /* kernel use only */
1817 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_K1
); /* kernel use only */
1818 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_AT
); /* internal use */
1819 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_T0
); /* internal use */
1820 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_RA
); /* return address */
1821 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_SP
); /* stack pointer */
1822 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_GP
); /* global pointer */
1824 tcg_add_target_add_op_defs(mips_op_defs
);