2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
5 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
6 * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "tcg-be-ldst.h"
29 #ifdef HOST_WORDS_BIGENDIAN
35 #define LO_OFF (MIPS_BE * 4)
36 #define HI_OFF (4 - LO_OFF)
39 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
75 /* check if we really need so many registers :P */
76 static const TCGReg tcg_target_reg_alloc_order
[] = {
102 static const TCGReg tcg_target_call_iarg_regs
[4] = {
109 static const TCGReg tcg_target_call_oarg_regs
[2] = {
114 static tcg_insn_unit
*tb_ret_addr
;
116 static inline uint32_t reloc_pc16_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
118 /* Let the compiler perform the right-shift as part of the arithmetic. */
119 ptrdiff_t disp
= target
- (pc
+ 1);
120 assert(disp
== (int16_t)disp
);
121 return disp
& 0xffff;
124 static inline void reloc_pc16(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
126 *pc
= deposit32(*pc
, 0, 16, reloc_pc16_val(pc
, target
));
129 static inline uint32_t reloc_26_val(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
131 assert((((uintptr_t)pc
^ (uintptr_t)target
) & 0xf0000000) == 0);
132 return ((uintptr_t)target
>> 2) & 0x3ffffff;
135 static inline void reloc_26(tcg_insn_unit
*pc
, tcg_insn_unit
*target
)
137 *pc
= deposit32(*pc
, 0, 26, reloc_26_val(pc
, target
));
140 static void patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
141 intptr_t value
, intptr_t addend
)
143 assert(type
== R_MIPS_PC16
);
145 reloc_pc16(code_ptr
, (tcg_insn_unit
*)value
);
148 /* parse target specific constraints */
149 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
156 ct
->ct
|= TCG_CT_REG
;
157 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
159 case 'L': /* qemu_ld output arg constraint */
160 ct
->ct
|= TCG_CT_REG
;
161 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
162 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_V0
);
164 case 'l': /* qemu_ld input arg constraint */
165 ct
->ct
|= TCG_CT_REG
;
166 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
167 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A0
);
168 #if defined(CONFIG_SOFTMMU)
169 if (TARGET_LONG_BITS
== 64) {
170 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A2
);
174 case 'S': /* qemu_st constraint */
175 ct
->ct
|= TCG_CT_REG
;
176 tcg_regset_set(ct
->u
.regs
, 0xffffffff);
177 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A0
);
178 #if defined(CONFIG_SOFTMMU)
179 if (TARGET_LONG_BITS
== 32) {
180 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A1
);
182 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A2
);
183 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_A3
);
188 ct
->ct
|= TCG_CT_CONST_U16
;
191 ct
->ct
|= TCG_CT_CONST_S16
;
194 /* We are cheating a bit here, using the fact that the register
195 ZERO is also the register number 0. Hence there is no need
196 to check for const_args in each instruction. */
197 ct
->ct
|= TCG_CT_CONST_ZERO
;
207 /* test if a constant matches the constraint */
208 static inline int tcg_target_const_match(tcg_target_long val
, TCGType type
,
209 const TCGArgConstraint
*arg_ct
)
213 if (ct
& TCG_CT_CONST
)
215 else if ((ct
& TCG_CT_CONST_ZERO
) && val
== 0)
217 else if ((ct
& TCG_CT_CONST_U16
) && val
== (uint16_t)val
)
219 else if ((ct
& TCG_CT_CONST_S16
) && val
== (int16_t)val
)
225 /* instruction opcodes */
228 OPC_JAL
= 0x03 << 26,
229 OPC_BEQ
= 0x04 << 26,
230 OPC_BNE
= 0x05 << 26,
231 OPC_BLEZ
= 0x06 << 26,
232 OPC_BGTZ
= 0x07 << 26,
233 OPC_ADDIU
= 0x09 << 26,
234 OPC_SLTI
= 0x0A << 26,
235 OPC_SLTIU
= 0x0B << 26,
236 OPC_ANDI
= 0x0C << 26,
237 OPC_ORI
= 0x0D << 26,
238 OPC_XORI
= 0x0E << 26,
239 OPC_LUI
= 0x0F << 26,
243 OPC_LBU
= 0x24 << 26,
244 OPC_LHU
= 0x25 << 26,
245 OPC_LWU
= 0x27 << 26,
250 OPC_SPECIAL
= 0x00 << 26,
251 OPC_SLL
= OPC_SPECIAL
| 0x00,
252 OPC_SRL
= OPC_SPECIAL
| 0x02,
253 OPC_ROTR
= OPC_SPECIAL
| (0x01 << 21) | 0x02,
254 OPC_SRA
= OPC_SPECIAL
| 0x03,
255 OPC_SLLV
= OPC_SPECIAL
| 0x04,
256 OPC_SRLV
= OPC_SPECIAL
| 0x06,
257 OPC_ROTRV
= OPC_SPECIAL
| (0x01 << 6) | 0x06,
258 OPC_SRAV
= OPC_SPECIAL
| 0x07,
259 OPC_JR
= OPC_SPECIAL
| 0x08,
260 OPC_JALR
= OPC_SPECIAL
| 0x09,
261 OPC_MOVZ
= OPC_SPECIAL
| 0x0A,
262 OPC_MOVN
= OPC_SPECIAL
| 0x0B,
263 OPC_MFHI
= OPC_SPECIAL
| 0x10,
264 OPC_MFLO
= OPC_SPECIAL
| 0x12,
265 OPC_MULT
= OPC_SPECIAL
| 0x18,
266 OPC_MULTU
= OPC_SPECIAL
| 0x19,
267 OPC_DIV
= OPC_SPECIAL
| 0x1A,
268 OPC_DIVU
= OPC_SPECIAL
| 0x1B,
269 OPC_ADDU
= OPC_SPECIAL
| 0x21,
270 OPC_SUBU
= OPC_SPECIAL
| 0x23,
271 OPC_AND
= OPC_SPECIAL
| 0x24,
272 OPC_OR
= OPC_SPECIAL
| 0x25,
273 OPC_XOR
= OPC_SPECIAL
| 0x26,
274 OPC_NOR
= OPC_SPECIAL
| 0x27,
275 OPC_SLT
= OPC_SPECIAL
| 0x2A,
276 OPC_SLTU
= OPC_SPECIAL
| 0x2B,
278 OPC_REGIMM
= 0x01 << 26,
279 OPC_BLTZ
= OPC_REGIMM
| (0x00 << 16),
280 OPC_BGEZ
= OPC_REGIMM
| (0x01 << 16),
282 OPC_SPECIAL2
= 0x1c << 26,
283 OPC_MUL
= OPC_SPECIAL2
| 0x002,
285 OPC_SPECIAL3
= 0x1f << 26,
286 OPC_INS
= OPC_SPECIAL3
| 0x004,
287 OPC_WSBH
= OPC_SPECIAL3
| 0x0a0,
288 OPC_SEB
= OPC_SPECIAL3
| 0x420,
289 OPC_SEH
= OPC_SPECIAL3
| 0x620,
295 static inline void tcg_out_opc_reg(TCGContext
*s
, int opc
,
296 TCGReg rd
, TCGReg rs
, TCGReg rt
)
301 inst
|= (rs
& 0x1F) << 21;
302 inst
|= (rt
& 0x1F) << 16;
303 inst
|= (rd
& 0x1F) << 11;
310 static inline void tcg_out_opc_imm(TCGContext
*s
, int opc
,
311 TCGReg rt
, TCGReg rs
, TCGArg imm
)
316 inst
|= (rs
& 0x1F) << 21;
317 inst
|= (rt
& 0x1F) << 16;
318 inst
|= (imm
& 0xffff);
325 static inline void tcg_out_opc_br(TCGContext
*s
, int opc
,
326 TCGReg rt
, TCGReg rs
)
328 /* We pay attention here to not modify the branch target by reading
329 the existing value and using it again. This ensure that caches and
330 memory are kept coherent during retranslation. */
331 uint16_t offset
= (uint16_t)*s
->code_ptr
;
333 tcg_out_opc_imm(s
, opc
, rt
, rs
, offset
);
339 static inline void tcg_out_opc_sa(TCGContext
*s
, int opc
,
340 TCGReg rd
, TCGReg rt
, TCGArg sa
)
345 inst
|= (rt
& 0x1F) << 16;
346 inst
|= (rd
& 0x1F) << 11;
347 inst
|= (sa
& 0x1F) << 6;
354 * Returns true if the branch was in range and the insn was emitted.
356 static bool tcg_out_opc_jmp(TCGContext
*s
, int opc
, void *target
)
358 uintptr_t dest
= (uintptr_t)target
;
359 uintptr_t from
= (uintptr_t)s
->code_ptr
+ 4;
362 /* The pc-region branch happens within the 256MB region of
363 the delay slot (thus the +4). */
364 if ((from
^ dest
) & -(1 << 28)) {
367 assert((dest
& 3) == 0);
370 inst
|= (dest
>> 2) & 0x3ffffff;
375 static inline void tcg_out_nop(TCGContext
*s
)
380 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
,
381 TCGReg ret
, TCGReg arg
)
383 /* Simple reg-reg move, optimising out the 'do nothing' case */
385 tcg_out_opc_reg(s
, OPC_ADDU
, ret
, arg
, TCG_REG_ZERO
);
389 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
390 TCGReg reg
, tcg_target_long arg
)
392 if (arg
== (int16_t)arg
) {
393 tcg_out_opc_imm(s
, OPC_ADDIU
, reg
, TCG_REG_ZERO
, arg
);
394 } else if (arg
== (uint16_t)arg
) {
395 tcg_out_opc_imm(s
, OPC_ORI
, reg
, TCG_REG_ZERO
, arg
);
397 tcg_out_opc_imm(s
, OPC_LUI
, reg
, TCG_REG_ZERO
, arg
>> 16);
399 tcg_out_opc_imm(s
, OPC_ORI
, reg
, reg
, arg
& 0xffff);
404 static inline void tcg_out_bswap16(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
406 if (use_mips32r2_instructions
) {
407 tcg_out_opc_reg(s
, OPC_WSBH
, ret
, 0, arg
);
409 /* ret and arg can't be register at */
410 if (ret
== TCG_REG_AT
|| arg
== TCG_REG_AT
) {
414 tcg_out_opc_sa(s
, OPC_SRL
, TCG_REG_AT
, arg
, 8);
415 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 8);
416 tcg_out_opc_imm(s
, OPC_ANDI
, ret
, ret
, 0xff00);
417 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_REG_AT
);
421 static inline void tcg_out_bswap16s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
423 if (use_mips32r2_instructions
) {
424 tcg_out_opc_reg(s
, OPC_WSBH
, ret
, 0, arg
);
425 tcg_out_opc_reg(s
, OPC_SEH
, ret
, 0, ret
);
427 /* ret and arg can't be register at */
428 if (ret
== TCG_REG_AT
|| arg
== TCG_REG_AT
) {
432 tcg_out_opc_sa(s
, OPC_SRL
, TCG_REG_AT
, arg
, 8);
433 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 24);
434 tcg_out_opc_sa(s
, OPC_SRA
, ret
, ret
, 16);
435 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_REG_AT
);
439 static inline void tcg_out_bswap32(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
441 if (use_mips32r2_instructions
) {
442 tcg_out_opc_reg(s
, OPC_WSBH
, ret
, 0, arg
);
443 tcg_out_opc_sa(s
, OPC_ROTR
, ret
, ret
, 16);
445 /* ret and arg must be different and can't be register at */
446 if (ret
== arg
|| ret
== TCG_REG_AT
|| arg
== TCG_REG_AT
) {
450 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 24);
452 tcg_out_opc_sa(s
, OPC_SRL
, TCG_REG_AT
, arg
, 24);
453 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_REG_AT
);
455 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_REG_AT
, arg
, 0xff00);
456 tcg_out_opc_sa(s
, OPC_SLL
, TCG_REG_AT
, TCG_REG_AT
, 8);
457 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_REG_AT
);
459 tcg_out_opc_sa(s
, OPC_SRL
, TCG_REG_AT
, arg
, 8);
460 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_REG_AT
, TCG_REG_AT
, 0xff00);
461 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_REG_AT
);
465 static inline void tcg_out_ext8s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
467 if (use_mips32r2_instructions
) {
468 tcg_out_opc_reg(s
, OPC_SEB
, ret
, 0, arg
);
470 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 24);
471 tcg_out_opc_sa(s
, OPC_SRA
, ret
, ret
, 24);
475 static inline void tcg_out_ext16s(TCGContext
*s
, TCGReg ret
, TCGReg arg
)
477 if (use_mips32r2_instructions
) {
478 tcg_out_opc_reg(s
, OPC_SEH
, ret
, 0, arg
);
480 tcg_out_opc_sa(s
, OPC_SLL
, ret
, arg
, 16);
481 tcg_out_opc_sa(s
, OPC_SRA
, ret
, ret
, 16);
485 static void tcg_out_ldst(TCGContext
*s
, int opc
, TCGReg data
,
486 TCGReg addr
, intptr_t ofs
)
490 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_AT
, ofs
- lo
);
491 if (addr
!= TCG_REG_ZERO
) {
492 tcg_out_opc_reg(s
, OPC_ADDU
, TCG_REG_AT
, TCG_REG_AT
, addr
);
496 tcg_out_opc_imm(s
, opc
, data
, addr
, lo
);
499 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg arg
,
500 TCGReg arg1
, intptr_t arg2
)
502 tcg_out_ldst(s
, OPC_LW
, arg
, arg1
, arg2
);
505 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
506 TCGReg arg1
, intptr_t arg2
)
508 tcg_out_ldst(s
, OPC_SW
, arg
, arg1
, arg2
);
511 static inline void tcg_out_addi(TCGContext
*s
, TCGReg reg
, TCGArg val
)
513 if (val
== (int16_t)val
) {
514 tcg_out_opc_imm(s
, OPC_ADDIU
, reg
, reg
, val
);
516 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_AT
, val
);
517 tcg_out_opc_reg(s
, OPC_ADDU
, reg
, reg
, TCG_REG_AT
);
521 static void tcg_out_brcond(TCGContext
*s
, TCGCond cond
, TCGArg arg1
,
522 TCGArg arg2
, int label_index
)
524 TCGLabel
*l
= &s
->labels
[label_index
];
528 tcg_out_opc_br(s
, OPC_BEQ
, arg1
, arg2
);
531 tcg_out_opc_br(s
, OPC_BNE
, arg1
, arg2
);
535 tcg_out_opc_br(s
, OPC_BLTZ
, 0, arg1
);
537 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, arg1
, arg2
);
538 tcg_out_opc_br(s
, OPC_BNE
, TCG_REG_AT
, TCG_REG_ZERO
);
542 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, arg1
, arg2
);
543 tcg_out_opc_br(s
, OPC_BNE
, TCG_REG_AT
, TCG_REG_ZERO
);
547 tcg_out_opc_br(s
, OPC_BGEZ
, 0, arg1
);
549 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, arg1
, arg2
);
550 tcg_out_opc_br(s
, OPC_BEQ
, TCG_REG_AT
, TCG_REG_ZERO
);
554 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, arg1
, arg2
);
555 tcg_out_opc_br(s
, OPC_BEQ
, TCG_REG_AT
, TCG_REG_ZERO
);
559 tcg_out_opc_br(s
, OPC_BLEZ
, 0, arg1
);
561 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, arg2
, arg1
);
562 tcg_out_opc_br(s
, OPC_BEQ
, TCG_REG_AT
, TCG_REG_ZERO
);
566 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, arg2
, arg1
);
567 tcg_out_opc_br(s
, OPC_BEQ
, TCG_REG_AT
, TCG_REG_ZERO
);
571 tcg_out_opc_br(s
, OPC_BGTZ
, 0, arg1
);
573 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, arg2
, arg1
);
574 tcg_out_opc_br(s
, OPC_BNE
, TCG_REG_AT
, TCG_REG_ZERO
);
578 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, arg2
, arg1
);
579 tcg_out_opc_br(s
, OPC_BNE
, TCG_REG_AT
, TCG_REG_ZERO
);
586 reloc_pc16(s
->code_ptr
- 1, l
->u
.value_ptr
);
588 tcg_out_reloc(s
, s
->code_ptr
- 1, R_MIPS_PC16
, label_index
, 0);
593 /* XXX: we implement it at the target level to avoid having to
594 handle cross basic blocks temporaries */
595 static void tcg_out_brcond2(TCGContext
*s
, TCGCond cond
, TCGArg arg1
,
596 TCGArg arg2
, TCGArg arg3
, TCGArg arg4
,
599 tcg_insn_unit
*label_ptr
;
603 tcg_out_brcond(s
, TCG_COND_NE
, arg2
, arg4
, label_index
);
604 tcg_out_brcond(s
, TCG_COND_NE
, arg1
, arg3
, label_index
);
610 tcg_out_brcond(s
, TCG_COND_LT
, arg2
, arg4
, label_index
);
614 tcg_out_brcond(s
, TCG_COND_GT
, arg2
, arg4
, label_index
);
618 tcg_out_brcond(s
, TCG_COND_LTU
, arg2
, arg4
, label_index
);
622 tcg_out_brcond(s
, TCG_COND_GTU
, arg2
, arg4
, label_index
);
628 label_ptr
= s
->code_ptr
;
629 tcg_out_opc_br(s
, OPC_BNE
, arg2
, arg4
);
634 tcg_out_brcond(s
, TCG_COND_EQ
, arg1
, arg3
, label_index
);
638 tcg_out_brcond(s
, TCG_COND_LTU
, arg1
, arg3
, label_index
);
642 tcg_out_brcond(s
, TCG_COND_LEU
, arg1
, arg3
, label_index
);
646 tcg_out_brcond(s
, TCG_COND_GTU
, arg1
, arg3
, label_index
);
650 tcg_out_brcond(s
, TCG_COND_GEU
, arg1
, arg3
, label_index
);
656 reloc_pc16(label_ptr
, s
->code_ptr
);
659 static void tcg_out_movcond(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
660 TCGArg c1
, TCGArg c2
, TCGArg v
)
665 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, c2
);
666 } else if (c2
== 0) {
667 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, c1
);
669 tcg_out_opc_reg(s
, OPC_XOR
, TCG_REG_AT
, c1
, c2
);
670 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, TCG_REG_AT
);
675 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, c2
);
676 } else if (c2
== 0) {
677 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, c1
);
679 tcg_out_opc_reg(s
, OPC_XOR
, TCG_REG_AT
, c1
, c2
);
680 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, TCG_REG_AT
);
684 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, c1
, c2
);
685 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, TCG_REG_AT
);
688 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, c1
, c2
);
689 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, TCG_REG_AT
);
692 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, c1
, c2
);
693 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, TCG_REG_AT
);
696 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, c1
, c2
);
697 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, TCG_REG_AT
);
700 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, c2
, c1
);
701 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, TCG_REG_AT
);
704 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, c2
, c1
);
705 tcg_out_opc_reg(s
, OPC_MOVZ
, ret
, v
, TCG_REG_AT
);
708 tcg_out_opc_reg(s
, OPC_SLT
, TCG_REG_AT
, c2
, c1
);
709 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, TCG_REG_AT
);
712 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_AT
, c2
, c1
);
713 tcg_out_opc_reg(s
, OPC_MOVN
, ret
, v
, TCG_REG_AT
);
721 static void tcg_out_setcond(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
722 TCGArg arg1
, TCGArg arg2
)
727 tcg_out_opc_imm(s
, OPC_SLTIU
, ret
, arg2
, 1);
728 } else if (arg2
== 0) {
729 tcg_out_opc_imm(s
, OPC_SLTIU
, ret
, arg1
, 1);
731 tcg_out_opc_reg(s
, OPC_XOR
, ret
, arg1
, arg2
);
732 tcg_out_opc_imm(s
, OPC_SLTIU
, ret
, ret
, 1);
737 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, TCG_REG_ZERO
, arg2
);
738 } else if (arg2
== 0) {
739 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, TCG_REG_ZERO
, arg1
);
741 tcg_out_opc_reg(s
, OPC_XOR
, ret
, arg1
, arg2
);
742 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, TCG_REG_ZERO
, ret
);
746 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg1
, arg2
);
749 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg1
, arg2
);
752 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg1
, arg2
);
753 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
756 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg1
, arg2
);
757 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
760 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg2
, arg1
);
761 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
764 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg2
, arg1
);
765 tcg_out_opc_imm(s
, OPC_XORI
, ret
, ret
, 1);
768 tcg_out_opc_reg(s
, OPC_SLT
, ret
, arg2
, arg1
);
771 tcg_out_opc_reg(s
, OPC_SLTU
, ret
, arg2
, arg1
);
779 /* XXX: we implement it at the target level to avoid having to
780 handle cross basic blocks temporaries */
781 static void tcg_out_setcond2(TCGContext
*s
, TCGCond cond
, TCGReg ret
,
782 TCGArg arg1
, TCGArg arg2
, TCGArg arg3
, TCGArg arg4
)
786 tcg_out_setcond(s
, TCG_COND_EQ
, TCG_REG_AT
, arg2
, arg4
);
787 tcg_out_setcond(s
, TCG_COND_EQ
, TCG_REG_T0
, arg1
, arg3
);
788 tcg_out_opc_reg(s
, OPC_AND
, ret
, TCG_REG_AT
, TCG_REG_T0
);
791 tcg_out_setcond(s
, TCG_COND_NE
, TCG_REG_AT
, arg2
, arg4
);
792 tcg_out_setcond(s
, TCG_COND_NE
, TCG_REG_T0
, arg1
, arg3
);
793 tcg_out_opc_reg(s
, OPC_OR
, ret
, TCG_REG_AT
, TCG_REG_T0
);
797 tcg_out_setcond(s
, TCG_COND_LT
, TCG_REG_AT
, arg2
, arg4
);
801 tcg_out_setcond(s
, TCG_COND_GT
, TCG_REG_AT
, arg2
, arg4
);
805 tcg_out_setcond(s
, TCG_COND_LTU
, TCG_REG_AT
, arg2
, arg4
);
809 tcg_out_setcond(s
, TCG_COND_GTU
, TCG_REG_AT
, arg2
, arg4
);
816 tcg_out_setcond(s
, TCG_COND_EQ
, TCG_REG_T0
, arg2
, arg4
);
821 tcg_out_setcond(s
, TCG_COND_LTU
, ret
, arg1
, arg3
);
825 tcg_out_setcond(s
, TCG_COND_LEU
, ret
, arg1
, arg3
);
829 tcg_out_setcond(s
, TCG_COND_GTU
, ret
, arg1
, arg3
);
833 tcg_out_setcond(s
, TCG_COND_GEU
, ret
, arg1
, arg3
);
839 tcg_out_opc_reg(s
, OPC_AND
, ret
, ret
, TCG_REG_T0
);
840 tcg_out_opc_reg(s
, OPC_OR
, ret
, ret
, TCG_REG_AT
);
843 static void tcg_out_call_int(TCGContext
*s
, tcg_insn_unit
*arg
, bool tail
)
845 /* Note that the ABI requires the called function's address to be
846 loaded into T9, even if a direct branch is in range. */
847 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_T9
, (uintptr_t)arg
);
849 /* But do try a direct branch, allowing the cpu better insn prefetch. */
851 if (!tcg_out_opc_jmp(s
, OPC_J
, arg
)) {
852 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_T9
, 0);
855 if (!tcg_out_opc_jmp(s
, OPC_JAL
, arg
)) {
856 tcg_out_opc_reg(s
, OPC_JALR
, TCG_REG_RA
, TCG_REG_T9
, 0);
861 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*arg
)
863 tcg_out_call_int(s
, arg
, false);
867 #if defined(CONFIG_SOFTMMU)
868 static void * const qemu_ld_helpers
[16] = {
869 [MO_UB
] = helper_ret_ldub_mmu
,
870 [MO_SB
] = helper_ret_ldsb_mmu
,
871 [MO_LEUW
] = helper_le_lduw_mmu
,
872 [MO_LESW
] = helper_le_ldsw_mmu
,
873 [MO_LEUL
] = helper_le_ldul_mmu
,
874 [MO_LEQ
] = helper_le_ldq_mmu
,
875 [MO_BEUW
] = helper_be_lduw_mmu
,
876 [MO_BESW
] = helper_be_ldsw_mmu
,
877 [MO_BEUL
] = helper_be_ldul_mmu
,
878 [MO_BEQ
] = helper_be_ldq_mmu
,
881 static void * const qemu_st_helpers
[16] = {
882 [MO_UB
] = helper_ret_stb_mmu
,
883 [MO_LEUW
] = helper_le_stw_mmu
,
884 [MO_LEUL
] = helper_le_stl_mmu
,
885 [MO_LEQ
] = helper_le_stq_mmu
,
886 [MO_BEUW
] = helper_be_stw_mmu
,
887 [MO_BEUL
] = helper_be_stl_mmu
,
888 [MO_BEQ
] = helper_be_stq_mmu
,
891 /* Helper routines for marshalling helper function arguments into
892 * the correct registers and stack.
893 * I is where we want to put this argument, and is updated and returned
894 * for the next call. ARG is the argument itself.
896 * We provide routines for arguments which are: immediate, 32 bit
897 * value in register, 16 and 8 bit values in register (which must be zero
898 * extended before use) and 64 bit value in a lo:hi register pair.
901 static int tcg_out_call_iarg_reg(TCGContext
*s
, int i
, TCGReg arg
)
903 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
904 tcg_out_mov(s
, TCG_TYPE_REG
, tcg_target_call_iarg_regs
[i
], arg
);
906 tcg_out_st(s
, TCG_TYPE_REG
, arg
, TCG_REG_SP
, 4 * i
);
911 static int tcg_out_call_iarg_reg8(TCGContext
*s
, int i
, TCGReg arg
)
913 TCGReg tmp
= TCG_REG_AT
;
914 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
915 tmp
= tcg_target_call_iarg_regs
[i
];
917 tcg_out_opc_imm(s
, OPC_ANDI
, tmp
, arg
, 0xff);
918 return tcg_out_call_iarg_reg(s
, i
, tmp
);
921 static int tcg_out_call_iarg_reg16(TCGContext
*s
, int i
, TCGReg arg
)
923 TCGReg tmp
= TCG_REG_AT
;
924 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
925 tmp
= tcg_target_call_iarg_regs
[i
];
927 tcg_out_opc_imm(s
, OPC_ANDI
, tmp
, arg
, 0xffff);
928 return tcg_out_call_iarg_reg(s
, i
, tmp
);
931 static int tcg_out_call_iarg_imm(TCGContext
*s
, int i
, TCGArg arg
)
933 TCGReg tmp
= TCG_REG_AT
;
937 if (i
< ARRAY_SIZE(tcg_target_call_iarg_regs
)) {
938 tmp
= tcg_target_call_iarg_regs
[i
];
940 tcg_out_movi(s
, TCG_TYPE_REG
, tmp
, arg
);
942 return tcg_out_call_iarg_reg(s
, i
, tmp
);
945 static int tcg_out_call_iarg_reg2(TCGContext
*s
, int i
, TCGReg al
, TCGReg ah
)
948 i
= tcg_out_call_iarg_reg(s
, i
, (MIPS_BE
? ah
: al
));
949 i
= tcg_out_call_iarg_reg(s
, i
, (MIPS_BE
? al
: ah
));
953 /* Perform the tlb comparison operation. The complete host address is
954 placed in BASE. Clobbers AT, T0, A0. */
955 static void tcg_out_tlb_load(TCGContext
*s
, TCGReg base
, TCGReg addrl
,
956 TCGReg addrh
, int mem_index
, TCGMemOp s_bits
,
957 tcg_insn_unit
*label_ptr
[2], bool is_load
)
961 ? offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
)
962 : offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
));
963 int add_off
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
965 tcg_out_opc_sa(s
, OPC_SRL
, TCG_REG_A0
, addrl
,
966 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
967 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_REG_A0
, TCG_REG_A0
,
968 (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
);
969 tcg_out_opc_reg(s
, OPC_ADDU
, TCG_REG_A0
, TCG_REG_A0
, TCG_AREG0
);
971 /* Compensate for very large offsets. */
972 if (add_off
>= 0x8000) {
973 /* Most target env are smaller than 32k; none are larger than 64k.
974 Simplify the logic here merely to offset by 0x7ff0, giving us a
975 range just shy of 64k. Check this assumption. */
976 QEMU_BUILD_BUG_ON(offsetof(CPUArchState
,
977 tlb_table
[NB_MMU_MODES
- 1][1])
979 tcg_out_opc_imm(s
, OPC_ADDIU
, TCG_REG_A0
, TCG_REG_A0
, 0x7ff0);
984 /* Load the tlb comparator. */
985 tcg_out_opc_imm(s
, OPC_LW
, TCG_REG_AT
, TCG_REG_A0
, cmp_off
+ LO_OFF
);
986 if (TARGET_LONG_BITS
== 64) {
987 tcg_out_opc_imm(s
, OPC_LW
, base
, TCG_REG_A0
, cmp_off
+ HI_OFF
);
990 /* Mask the page bits, keeping the alignment bits to compare against.
991 In between, load the tlb addend for the fast path. */
992 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_T0
,
993 TARGET_PAGE_MASK
| ((1 << s_bits
) - 1));
994 tcg_out_opc_imm(s
, OPC_LW
, TCG_REG_A0
, TCG_REG_A0
, add_off
);
995 tcg_out_opc_reg(s
, OPC_AND
, TCG_REG_T0
, TCG_REG_T0
, addrl
);
997 label_ptr
[0] = s
->code_ptr
;
998 tcg_out_opc_br(s
, OPC_BNE
, TCG_REG_T0
, TCG_REG_AT
);
1000 if (TARGET_LONG_BITS
== 64) {
1004 label_ptr
[1] = s
->code_ptr
;
1005 tcg_out_opc_br(s
, OPC_BNE
, addrh
, base
);
1009 tcg_out_opc_reg(s
, OPC_ADDU
, base
, TCG_REG_A0
, addrl
);
1012 static void add_qemu_ldst_label(TCGContext
*s
, int is_ld
, TCGMemOp opc
,
1013 TCGReg datalo
, TCGReg datahi
,
1014 TCGReg addrlo
, TCGReg addrhi
,
1015 int mem_index
, void *raddr
,
1016 tcg_insn_unit
*label_ptr
[2])
1018 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1020 label
->is_ld
= is_ld
;
1022 label
->datalo_reg
= datalo
;
1023 label
->datahi_reg
= datahi
;
1024 label
->addrlo_reg
= addrlo
;
1025 label
->addrhi_reg
= addrhi
;
1026 label
->mem_index
= mem_index
;
1027 label
->raddr
= raddr
;
1028 label
->label_ptr
[0] = label_ptr
[0];
1029 if (TARGET_LONG_BITS
== 64) {
1030 label
->label_ptr
[1] = label_ptr
[1];
1034 static void tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1036 TCGMemOp opc
= l
->opc
;
1040 /* resolve label address */
1041 reloc_pc16(l
->label_ptr
[0], s
->code_ptr
);
1042 if (TARGET_LONG_BITS
== 64) {
1043 reloc_pc16(l
->label_ptr
[1], s
->code_ptr
);
1047 if (TARGET_LONG_BITS
== 64) {
1048 i
= tcg_out_call_iarg_reg2(s
, i
, l
->addrlo_reg
, l
->addrhi_reg
);
1050 i
= tcg_out_call_iarg_reg(s
, i
, l
->addrlo_reg
);
1052 i
= tcg_out_call_iarg_imm(s
, i
, l
->mem_index
);
1053 i
= tcg_out_call_iarg_imm(s
, i
, (intptr_t)l
->raddr
);
1054 tcg_out_call_int(s
, qemu_ld_helpers
[opc
], false);
1056 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1059 if ((opc
& MO_SIZE
) == MO_64
) {
1060 /* We eliminated V0 from the possible output registers, so it
1061 cannot be clobbered here. So we must move V1 first. */
1063 tcg_out_mov(s
, TCG_TYPE_I32
, v0
, TCG_REG_V1
);
1066 tcg_out_mov(s
, TCG_TYPE_I32
, l
->datahi_reg
, TCG_REG_V1
);
1070 reloc_pc16(s
->code_ptr
, l
->raddr
);
1071 tcg_out_opc_br(s
, OPC_BEQ
, TCG_REG_ZERO
, TCG_REG_ZERO
);
1073 tcg_out_mov(s
, TCG_TYPE_REG
, v0
, TCG_REG_V0
);
1076 static void tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1078 TCGMemOp opc
= l
->opc
;
1079 TCGMemOp s_bits
= opc
& MO_SIZE
;
1082 /* resolve label address */
1083 reloc_pc16(l
->label_ptr
[0], s
->code_ptr
);
1084 if (TARGET_LONG_BITS
== 64) {
1085 reloc_pc16(l
->label_ptr
[1], s
->code_ptr
);
1089 if (TARGET_LONG_BITS
== 64) {
1090 i
= tcg_out_call_iarg_reg2(s
, i
, l
->addrlo_reg
, l
->addrhi_reg
);
1092 i
= tcg_out_call_iarg_reg(s
, i
, l
->addrlo_reg
);
1096 i
= tcg_out_call_iarg_reg8(s
, i
, l
->datalo_reg
);
1099 i
= tcg_out_call_iarg_reg16(s
, i
, l
->datalo_reg
);
1102 i
= tcg_out_call_iarg_reg(s
, i
, l
->datalo_reg
);
1105 i
= tcg_out_call_iarg_reg2(s
, i
, l
->datalo_reg
, l
->datahi_reg
);
1110 i
= tcg_out_call_iarg_imm(s
, i
, l
->mem_index
);
1112 /* Tail call to the store helper. Thus force the return address
1113 computation to take place in the return address register. */
1114 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_RA
, (intptr_t)l
->raddr
);
1115 i
= tcg_out_call_iarg_reg(s
, i
, TCG_REG_RA
);
1116 tcg_out_call_int(s
, qemu_st_helpers
[opc
], true);
1118 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1122 static void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGReg datalo
, TCGReg datahi
,
1123 TCGReg base
, TCGMemOp opc
)
1127 tcg_out_opc_imm(s
, OPC_LBU
, datalo
, base
, 0);
1130 tcg_out_opc_imm(s
, OPC_LB
, datalo
, base
, 0);
1132 case MO_UW
| MO_BSWAP
:
1133 tcg_out_opc_imm(s
, OPC_LHU
, TCG_REG_T0
, base
, 0);
1134 tcg_out_bswap16(s
, datalo
, TCG_REG_T0
);
1137 tcg_out_opc_imm(s
, OPC_LHU
, datalo
, base
, 0);
1139 case MO_SW
| MO_BSWAP
:
1140 tcg_out_opc_imm(s
, OPC_LHU
, TCG_REG_T0
, base
, 0);
1141 tcg_out_bswap16s(s
, datalo
, TCG_REG_T0
);
1144 tcg_out_opc_imm(s
, OPC_LH
, datalo
, base
, 0);
1146 case MO_UL
| MO_BSWAP
:
1147 tcg_out_opc_imm(s
, OPC_LW
, TCG_REG_T0
, base
, 0);
1148 tcg_out_bswap32(s
, datalo
, TCG_REG_T0
);
1151 tcg_out_opc_imm(s
, OPC_LW
, datalo
, base
, 0);
1153 case MO_Q
| MO_BSWAP
:
1154 tcg_out_opc_imm(s
, OPC_LW
, TCG_REG_T0
, base
, HI_OFF
);
1155 tcg_out_bswap32(s
, datalo
, TCG_REG_T0
);
1156 tcg_out_opc_imm(s
, OPC_LW
, TCG_REG_T0
, base
, LO_OFF
);
1157 tcg_out_bswap32(s
, datahi
, TCG_REG_T0
);
1160 tcg_out_opc_imm(s
, OPC_LW
, datalo
, base
, LO_OFF
);
1161 tcg_out_opc_imm(s
, OPC_LW
, datahi
, base
, HI_OFF
);
1168 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1170 TCGReg addr_regl
, addr_regh
__attribute__((unused
));
1171 TCGReg data_regl
, data_regh
;
1173 #if defined(CONFIG_SOFTMMU)
1174 tcg_insn_unit
*label_ptr
[2];
1178 /* Note that we've eliminated V0 from the output registers,
1179 so we won't overwrite the base register during loading. */
1180 TCGReg base
= TCG_REG_V0
;
1182 data_regl
= *args
++;
1183 data_regh
= (is_64
? *args
++ : 0);
1184 addr_regl
= *args
++;
1185 addr_regh
= (TARGET_LONG_BITS
== 64 ? *args
++ : 0);
1188 #if defined(CONFIG_SOFTMMU)
1190 s_bits
= opc
& MO_SIZE
;
1192 tcg_out_tlb_load(s
, base
, addr_regl
, addr_regh
, mem_index
,
1193 s_bits
, label_ptr
, 1);
1194 tcg_out_qemu_ld_direct(s
, data_regl
, data_regh
, base
, opc
);
1195 add_qemu_ldst_label(s
, 1, opc
, data_regl
, data_regh
, addr_regl
, addr_regh
,
1196 mem_index
, s
->code_ptr
, label_ptr
);
1198 if (GUEST_BASE
== 0 && data_regl
!= addr_regl
) {
1200 } else if (GUEST_BASE
== (int16_t)GUEST_BASE
) {
1201 tcg_out_opc_imm(s
, OPC_ADDIU
, base
, addr_regl
, GUEST_BASE
);
1203 tcg_out_movi(s
, TCG_TYPE_PTR
, base
, GUEST_BASE
);
1204 tcg_out_opc_reg(s
, OPC_ADDU
, base
, base
, addr_regl
);
1206 tcg_out_qemu_ld_direct(s
, data_regl
, data_regh
, base
, opc
);
1210 static void tcg_out_qemu_st_direct(TCGContext
*s
, TCGReg datalo
, TCGReg datahi
,
1211 TCGReg base
, TCGMemOp opc
)
1215 tcg_out_opc_imm(s
, OPC_SB
, datalo
, base
, 0);
1218 case MO_16
| MO_BSWAP
:
1219 tcg_out_opc_imm(s
, OPC_ANDI
, TCG_REG_T0
, datalo
, 0xffff);
1220 tcg_out_bswap16(s
, TCG_REG_T0
, TCG_REG_T0
);
1221 datalo
= TCG_REG_T0
;
1224 tcg_out_opc_imm(s
, OPC_SH
, datalo
, base
, 0);
1227 case MO_32
| MO_BSWAP
:
1228 tcg_out_bswap32(s
, TCG_REG_T0
, datalo
);
1229 datalo
= TCG_REG_T0
;
1232 tcg_out_opc_imm(s
, OPC_SW
, datalo
, base
, 0);
1235 case MO_64
| MO_BSWAP
:
1236 tcg_out_bswap32(s
, TCG_REG_T0
, datalo
);
1237 tcg_out_opc_imm(s
, OPC_SW
, TCG_REG_T0
, base
, HI_OFF
);
1238 tcg_out_bswap32(s
, TCG_REG_T0
, datahi
);
1239 tcg_out_opc_imm(s
, OPC_SW
, TCG_REG_T0
, base
, LO_OFF
);
1242 tcg_out_opc_imm(s
, OPC_SW
, datalo
, base
, LO_OFF
);
1243 tcg_out_opc_imm(s
, OPC_SW
, datahi
, base
, HI_OFF
);
1251 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is_64
)
1253 TCGReg addr_regl
, addr_regh
__attribute__((unused
));
1254 TCGReg data_regl
, data_regh
, base
;
1256 #if defined(CONFIG_SOFTMMU)
1257 tcg_insn_unit
*label_ptr
[2];
1262 data_regl
= *args
++;
1263 data_regh
= (is_64
? *args
++ : 0);
1264 addr_regl
= *args
++;
1265 addr_regh
= (TARGET_LONG_BITS
== 64 ? *args
++ : 0);
1268 #if defined(CONFIG_SOFTMMU)
1272 /* Note that we eliminated the helper's address argument,
1273 so we can reuse that for the base. */
1274 base
= (TARGET_LONG_BITS
== 32 ? TCG_REG_A1
: TCG_REG_A2
);
1275 tcg_out_tlb_load(s
, base
, addr_regl
, addr_regh
, mem_index
,
1276 s_bits
, label_ptr
, 1);
1277 tcg_out_qemu_st_direct(s
, data_regl
, data_regh
, base
, opc
);
1278 add_qemu_ldst_label(s
, 0, opc
, data_regl
, data_regh
, addr_regl
, addr_regh
,
1279 mem_index
, s
->code_ptr
, label_ptr
);
1281 if (GUEST_BASE
== 0) {
1285 if (GUEST_BASE
== (int16_t)GUEST_BASE
) {
1286 tcg_out_opc_imm(s
, OPC_ADDIU
, base
, addr_regl
, GUEST_BASE
);
1288 tcg_out_movi(s
, TCG_TYPE_PTR
, base
, GUEST_BASE
);
1289 tcg_out_opc_reg(s
, OPC_ADDU
, base
, base
, addr_regl
);
1292 tcg_out_qemu_st_direct(s
, data_regl
, data_regh
, base
, opc
);
1296 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1297 const TCGArg
*args
, const int *const_args
)
1300 case INDEX_op_exit_tb
:
1302 uintptr_t a0
= args
[0];
1303 TCGReg b0
= TCG_REG_ZERO
;
1306 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_V0
, a0
& ~0xffff);
1309 if (!tcg_out_opc_jmp(s
, OPC_J
, tb_ret_addr
)) {
1310 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_AT
,
1311 (uintptr_t)tb_ret_addr
);
1312 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_AT
, 0);
1314 tcg_out_opc_imm(s
, OPC_ORI
, TCG_REG_V0
, b0
, a0
& 0xffff);
1317 case INDEX_op_goto_tb
:
1318 if (s
->tb_jmp_offset
) {
1319 /* direct jump method */
1322 /* indirect jump method */
1323 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_REG_AT
, TCG_REG_ZERO
,
1324 (uintptr_t)(s
->tb_next
+ args
[0]));
1325 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_AT
, 0);
1328 s
->tb_next_offset
[args
[0]] = tcg_current_code_size(s
);
1331 tcg_out_brcond(s
, TCG_COND_EQ
, TCG_REG_ZERO
, TCG_REG_ZERO
, args
[0]);
1334 case INDEX_op_ld8u_i32
:
1335 tcg_out_ldst(s
, OPC_LBU
, args
[0], args
[1], args
[2]);
1337 case INDEX_op_ld8s_i32
:
1338 tcg_out_ldst(s
, OPC_LB
, args
[0], args
[1], args
[2]);
1340 case INDEX_op_ld16u_i32
:
1341 tcg_out_ldst(s
, OPC_LHU
, args
[0], args
[1], args
[2]);
1343 case INDEX_op_ld16s_i32
:
1344 tcg_out_ldst(s
, OPC_LH
, args
[0], args
[1], args
[2]);
1346 case INDEX_op_ld_i32
:
1347 tcg_out_ldst(s
, OPC_LW
, args
[0], args
[1], args
[2]);
1349 case INDEX_op_st8_i32
:
1350 tcg_out_ldst(s
, OPC_SB
, args
[0], args
[1], args
[2]);
1352 case INDEX_op_st16_i32
:
1353 tcg_out_ldst(s
, OPC_SH
, args
[0], args
[1], args
[2]);
1355 case INDEX_op_st_i32
:
1356 tcg_out_ldst(s
, OPC_SW
, args
[0], args
[1], args
[2]);
1359 case INDEX_op_add_i32
:
1360 if (const_args
[2]) {
1361 tcg_out_opc_imm(s
, OPC_ADDIU
, args
[0], args
[1], args
[2]);
1363 tcg_out_opc_reg(s
, OPC_ADDU
, args
[0], args
[1], args
[2]);
1366 case INDEX_op_add2_i32
:
1367 if (const_args
[4]) {
1368 tcg_out_opc_imm(s
, OPC_ADDIU
, TCG_REG_AT
, args
[2], args
[4]);
1370 tcg_out_opc_reg(s
, OPC_ADDU
, TCG_REG_AT
, args
[2], args
[4]);
1372 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_T0
, TCG_REG_AT
, args
[2]);
1373 if (const_args
[5]) {
1374 tcg_out_opc_imm(s
, OPC_ADDIU
, args
[1], args
[3], args
[5]);
1376 tcg_out_opc_reg(s
, OPC_ADDU
, args
[1], args
[3], args
[5]);
1378 tcg_out_opc_reg(s
, OPC_ADDU
, args
[1], args
[1], TCG_REG_T0
);
1379 tcg_out_mov(s
, TCG_TYPE_I32
, args
[0], TCG_REG_AT
);
1381 case INDEX_op_sub_i32
:
1382 if (const_args
[2]) {
1383 tcg_out_opc_imm(s
, OPC_ADDIU
, args
[0], args
[1], -args
[2]);
1385 tcg_out_opc_reg(s
, OPC_SUBU
, args
[0], args
[1], args
[2]);
1388 case INDEX_op_sub2_i32
:
1389 if (const_args
[4]) {
1390 tcg_out_opc_imm(s
, OPC_ADDIU
, TCG_REG_AT
, args
[2], -args
[4]);
1392 tcg_out_opc_reg(s
, OPC_SUBU
, TCG_REG_AT
, args
[2], args
[4]);
1394 tcg_out_opc_reg(s
, OPC_SLTU
, TCG_REG_T0
, args
[2], TCG_REG_AT
);
1395 if (const_args
[5]) {
1396 tcg_out_opc_imm(s
, OPC_ADDIU
, args
[1], args
[3], -args
[5]);
1398 tcg_out_opc_reg(s
, OPC_SUBU
, args
[1], args
[3], args
[5]);
1400 tcg_out_opc_reg(s
, OPC_SUBU
, args
[1], args
[1], TCG_REG_T0
);
1401 tcg_out_mov(s
, TCG_TYPE_I32
, args
[0], TCG_REG_AT
);
1403 case INDEX_op_mul_i32
:
1404 if (use_mips32_instructions
) {
1405 tcg_out_opc_reg(s
, OPC_MUL
, args
[0], args
[1], args
[2]);
1407 tcg_out_opc_reg(s
, OPC_MULT
, 0, args
[1], args
[2]);
1408 tcg_out_opc_reg(s
, OPC_MFLO
, args
[0], 0, 0);
1411 case INDEX_op_muls2_i32
:
1412 tcg_out_opc_reg(s
, OPC_MULT
, 0, args
[2], args
[3]);
1413 tcg_out_opc_reg(s
, OPC_MFLO
, args
[0], 0, 0);
1414 tcg_out_opc_reg(s
, OPC_MFHI
, args
[1], 0, 0);
1416 case INDEX_op_mulu2_i32
:
1417 tcg_out_opc_reg(s
, OPC_MULTU
, 0, args
[2], args
[3]);
1418 tcg_out_opc_reg(s
, OPC_MFLO
, args
[0], 0, 0);
1419 tcg_out_opc_reg(s
, OPC_MFHI
, args
[1], 0, 0);
1421 case INDEX_op_mulsh_i32
:
1422 tcg_out_opc_reg(s
, OPC_MULT
, 0, args
[1], args
[2]);
1423 tcg_out_opc_reg(s
, OPC_MFHI
, args
[0], 0, 0);
1425 case INDEX_op_muluh_i32
:
1426 tcg_out_opc_reg(s
, OPC_MULTU
, 0, args
[1], args
[2]);
1427 tcg_out_opc_reg(s
, OPC_MFHI
, args
[0], 0, 0);
1429 case INDEX_op_div_i32
:
1430 tcg_out_opc_reg(s
, OPC_DIV
, 0, args
[1], args
[2]);
1431 tcg_out_opc_reg(s
, OPC_MFLO
, args
[0], 0, 0);
1433 case INDEX_op_divu_i32
:
1434 tcg_out_opc_reg(s
, OPC_DIVU
, 0, args
[1], args
[2]);
1435 tcg_out_opc_reg(s
, OPC_MFLO
, args
[0], 0, 0);
1437 case INDEX_op_rem_i32
:
1438 tcg_out_opc_reg(s
, OPC_DIV
, 0, args
[1], args
[2]);
1439 tcg_out_opc_reg(s
, OPC_MFHI
, args
[0], 0, 0);
1441 case INDEX_op_remu_i32
:
1442 tcg_out_opc_reg(s
, OPC_DIVU
, 0, args
[1], args
[2]);
1443 tcg_out_opc_reg(s
, OPC_MFHI
, args
[0], 0, 0);
1446 case INDEX_op_and_i32
:
1447 if (const_args
[2]) {
1448 tcg_out_opc_imm(s
, OPC_ANDI
, args
[0], args
[1], args
[2]);
1450 tcg_out_opc_reg(s
, OPC_AND
, args
[0], args
[1], args
[2]);
1453 case INDEX_op_or_i32
:
1454 if (const_args
[2]) {
1455 tcg_out_opc_imm(s
, OPC_ORI
, args
[0], args
[1], args
[2]);
1457 tcg_out_opc_reg(s
, OPC_OR
, args
[0], args
[1], args
[2]);
1460 case INDEX_op_nor_i32
:
1461 tcg_out_opc_reg(s
, OPC_NOR
, args
[0], args
[1], args
[2]);
1463 case INDEX_op_not_i32
:
1464 tcg_out_opc_reg(s
, OPC_NOR
, args
[0], TCG_REG_ZERO
, args
[1]);
1466 case INDEX_op_xor_i32
:
1467 if (const_args
[2]) {
1468 tcg_out_opc_imm(s
, OPC_XORI
, args
[0], args
[1], args
[2]);
1470 tcg_out_opc_reg(s
, OPC_XOR
, args
[0], args
[1], args
[2]);
1474 case INDEX_op_sar_i32
:
1475 if (const_args
[2]) {
1476 tcg_out_opc_sa(s
, OPC_SRA
, args
[0], args
[1], args
[2]);
1478 tcg_out_opc_reg(s
, OPC_SRAV
, args
[0], args
[2], args
[1]);
1481 case INDEX_op_shl_i32
:
1482 if (const_args
[2]) {
1483 tcg_out_opc_sa(s
, OPC_SLL
, args
[0], args
[1], args
[2]);
1485 tcg_out_opc_reg(s
, OPC_SLLV
, args
[0], args
[2], args
[1]);
1488 case INDEX_op_shr_i32
:
1489 if (const_args
[2]) {
1490 tcg_out_opc_sa(s
, OPC_SRL
, args
[0], args
[1], args
[2]);
1492 tcg_out_opc_reg(s
, OPC_SRLV
, args
[0], args
[2], args
[1]);
1495 case INDEX_op_rotl_i32
:
1496 if (const_args
[2]) {
1497 tcg_out_opc_sa(s
, OPC_ROTR
, args
[0], args
[1], 0x20 - args
[2]);
1499 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_AT
, 32);
1500 tcg_out_opc_reg(s
, OPC_SUBU
, TCG_REG_AT
, TCG_REG_AT
, args
[2]);
1501 tcg_out_opc_reg(s
, OPC_ROTRV
, args
[0], TCG_REG_AT
, args
[1]);
1504 case INDEX_op_rotr_i32
:
1505 if (const_args
[2]) {
1506 tcg_out_opc_sa(s
, OPC_ROTR
, args
[0], args
[1], args
[2]);
1508 tcg_out_opc_reg(s
, OPC_ROTRV
, args
[0], args
[2], args
[1]);
1512 case INDEX_op_bswap16_i32
:
1513 tcg_out_opc_reg(s
, OPC_WSBH
, args
[0], 0, args
[1]);
1515 case INDEX_op_bswap32_i32
:
1516 tcg_out_opc_reg(s
, OPC_WSBH
, args
[0], 0, args
[1]);
1517 tcg_out_opc_sa(s
, OPC_ROTR
, args
[0], args
[0], 16);
1520 case INDEX_op_ext8s_i32
:
1521 tcg_out_opc_reg(s
, OPC_SEB
, args
[0], 0, args
[1]);
1523 case INDEX_op_ext16s_i32
:
1524 tcg_out_opc_reg(s
, OPC_SEH
, args
[0], 0, args
[1]);
1527 case INDEX_op_deposit_i32
:
1528 tcg_out_opc_imm(s
, OPC_INS
, args
[0], args
[2],
1529 ((args
[3] + args
[4] - 1) << 11) | (args
[3] << 6));
1532 case INDEX_op_brcond_i32
:
1533 tcg_out_brcond(s
, args
[2], args
[0], args
[1], args
[3]);
1535 case INDEX_op_brcond2_i32
:
1536 tcg_out_brcond2(s
, args
[4], args
[0], args
[1], args
[2], args
[3], args
[5]);
1539 case INDEX_op_movcond_i32
:
1540 tcg_out_movcond(s
, args
[5], args
[0], args
[1], args
[2], args
[3]);
1543 case INDEX_op_setcond_i32
:
1544 tcg_out_setcond(s
, args
[3], args
[0], args
[1], args
[2]);
1546 case INDEX_op_setcond2_i32
:
1547 tcg_out_setcond2(s
, args
[5], args
[0], args
[1], args
[2], args
[3], args
[4]);
1550 case INDEX_op_qemu_ld_i32
:
1551 tcg_out_qemu_ld(s
, args
, false);
1553 case INDEX_op_qemu_ld_i64
:
1554 tcg_out_qemu_ld(s
, args
, true);
1556 case INDEX_op_qemu_st_i32
:
1557 tcg_out_qemu_st(s
, args
, false);
1559 case INDEX_op_qemu_st_i64
:
1560 tcg_out_qemu_st(s
, args
, true);
1563 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
1564 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
1565 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
1571 static const TCGTargetOpDef mips_op_defs
[] = {
1572 { INDEX_op_exit_tb
, { } },
1573 { INDEX_op_goto_tb
, { } },
1574 { INDEX_op_br
, { } },
1576 { INDEX_op_ld8u_i32
, { "r", "r" } },
1577 { INDEX_op_ld8s_i32
, { "r", "r" } },
1578 { INDEX_op_ld16u_i32
, { "r", "r" } },
1579 { INDEX_op_ld16s_i32
, { "r", "r" } },
1580 { INDEX_op_ld_i32
, { "r", "r" } },
1581 { INDEX_op_st8_i32
, { "rZ", "r" } },
1582 { INDEX_op_st16_i32
, { "rZ", "r" } },
1583 { INDEX_op_st_i32
, { "rZ", "r" } },
1585 { INDEX_op_add_i32
, { "r", "rZ", "rJ" } },
1586 { INDEX_op_mul_i32
, { "r", "rZ", "rZ" } },
1587 { INDEX_op_muls2_i32
, { "r", "r", "rZ", "rZ" } },
1588 { INDEX_op_mulu2_i32
, { "r", "r", "rZ", "rZ" } },
1589 { INDEX_op_mulsh_i32
, { "r", "rZ", "rZ" } },
1590 { INDEX_op_muluh_i32
, { "r", "rZ", "rZ" } },
1591 { INDEX_op_div_i32
, { "r", "rZ", "rZ" } },
1592 { INDEX_op_divu_i32
, { "r", "rZ", "rZ" } },
1593 { INDEX_op_rem_i32
, { "r", "rZ", "rZ" } },
1594 { INDEX_op_remu_i32
, { "r", "rZ", "rZ" } },
1595 { INDEX_op_sub_i32
, { "r", "rZ", "rJ" } },
1597 { INDEX_op_and_i32
, { "r", "rZ", "rI" } },
1598 { INDEX_op_nor_i32
, { "r", "rZ", "rZ" } },
1599 { INDEX_op_not_i32
, { "r", "rZ" } },
1600 { INDEX_op_or_i32
, { "r", "rZ", "rIZ" } },
1601 { INDEX_op_xor_i32
, { "r", "rZ", "rIZ" } },
1603 { INDEX_op_shl_i32
, { "r", "rZ", "ri" } },
1604 { INDEX_op_shr_i32
, { "r", "rZ", "ri" } },
1605 { INDEX_op_sar_i32
, { "r", "rZ", "ri" } },
1606 { INDEX_op_rotr_i32
, { "r", "rZ", "ri" } },
1607 { INDEX_op_rotl_i32
, { "r", "rZ", "ri" } },
1609 { INDEX_op_bswap16_i32
, { "r", "r" } },
1610 { INDEX_op_bswap32_i32
, { "r", "r" } },
1612 { INDEX_op_ext8s_i32
, { "r", "rZ" } },
1613 { INDEX_op_ext16s_i32
, { "r", "rZ" } },
1615 { INDEX_op_deposit_i32
, { "r", "0", "rZ" } },
1617 { INDEX_op_brcond_i32
, { "rZ", "rZ" } },
1618 { INDEX_op_movcond_i32
, { "r", "rZ", "rZ", "rZ", "0" } },
1619 { INDEX_op_setcond_i32
, { "r", "rZ", "rZ" } },
1620 { INDEX_op_setcond2_i32
, { "r", "rZ", "rZ", "rZ", "rZ" } },
1622 { INDEX_op_add2_i32
, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1623 { INDEX_op_sub2_i32
, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1624 { INDEX_op_brcond2_i32
, { "rZ", "rZ", "rZ", "rZ" } },
1626 #if TARGET_LONG_BITS == 32
1627 { INDEX_op_qemu_ld_i32
, { "L", "lZ" } },
1628 { INDEX_op_qemu_st_i32
, { "SZ", "SZ" } },
1629 { INDEX_op_qemu_ld_i64
, { "L", "L", "lZ" } },
1630 { INDEX_op_qemu_st_i64
, { "SZ", "SZ", "SZ" } },
1632 { INDEX_op_qemu_ld_i32
, { "L", "lZ", "lZ" } },
1633 { INDEX_op_qemu_st_i32
, { "SZ", "SZ", "SZ" } },
1634 { INDEX_op_qemu_ld_i64
, { "L", "L", "lZ", "lZ" } },
1635 { INDEX_op_qemu_st_i64
, { "SZ", "SZ", "SZ", "SZ" } },
1640 static int tcg_target_callee_save_regs
[] = {
1641 TCG_REG_S0
, /* used for the global env (TCG_AREG0) */
1650 TCG_REG_RA
, /* should be last for ABI compliance */
1653 /* The Linux kernel doesn't provide any information about the available
1654 instruction set. Probe it using a signal handler. */
1658 #ifndef use_movnz_instructions
1659 bool use_movnz_instructions
= false;
1662 #ifndef use_mips32_instructions
1663 bool use_mips32_instructions
= false;
1666 #ifndef use_mips32r2_instructions
1667 bool use_mips32r2_instructions
= false;
1670 static volatile sig_atomic_t got_sigill
;
1672 static void sigill_handler(int signo
, siginfo_t
*si
, void *data
)
1674 /* Skip the faulty instruction */
1675 ucontext_t
*uc
= (ucontext_t
*)data
;
1676 uc
->uc_mcontext
.pc
+= 4;
1681 static void tcg_target_detect_isa(void)
1683 struct sigaction sa_old
, sa_new
;
1685 memset(&sa_new
, 0, sizeof(sa_new
));
1686 sa_new
.sa_flags
= SA_SIGINFO
;
1687 sa_new
.sa_sigaction
= sigill_handler
;
1688 sigaction(SIGILL
, &sa_new
, &sa_old
);
1690 /* Probe for movn/movz, necessary to implement movcond. */
1691 #ifndef use_movnz_instructions
1693 asm volatile(".set push\n"
1695 "movn $zero, $zero, $zero\n"
1696 "movz $zero, $zero, $zero\n"
1699 use_movnz_instructions
= !got_sigill
;
1702 /* Probe for MIPS32 instructions. As no subsetting is allowed
1703 by the specification, it is only necessary to probe for one
1704 of the instructions. */
1705 #ifndef use_mips32_instructions
1707 asm volatile(".set push\n"
1709 "mul $zero, $zero\n"
1712 use_mips32_instructions
= !got_sigill
;
1715 /* Probe for MIPS32r2 instructions if MIPS32 instructions are
1716 available. As no subsetting is allowed by the specification,
1717 it is only necessary to probe for one of the instructions. */
1718 #ifndef use_mips32r2_instructions
1719 if (use_mips32_instructions
) {
1721 asm volatile(".set push\n"
1723 "seb $zero, $zero\n"
1726 use_mips32r2_instructions
= !got_sigill
;
1730 sigaction(SIGILL
, &sa_old
, NULL
);
1733 /* Generate global QEMU prologue and epilogue code */
1734 static void tcg_target_qemu_prologue(TCGContext
*s
)
1738 /* reserve some stack space, also for TCG temps. */
1739 frame_size
= ARRAY_SIZE(tcg_target_callee_save_regs
) * 4
1740 + TCG_STATIC_CALL_ARGS_SIZE
1741 + CPU_TEMP_BUF_NLONGS
* sizeof(long);
1742 frame_size
= (frame_size
+ TCG_TARGET_STACK_ALIGN
- 1) &
1743 ~(TCG_TARGET_STACK_ALIGN
- 1);
1744 tcg_set_frame(s
, TCG_REG_SP
, ARRAY_SIZE(tcg_target_callee_save_regs
) * 4
1745 + TCG_STATIC_CALL_ARGS_SIZE
,
1746 CPU_TEMP_BUF_NLONGS
* sizeof(long));
1749 tcg_out_addi(s
, TCG_REG_SP
, -frame_size
);
1750 for(i
= 0 ; i
< ARRAY_SIZE(tcg_target_callee_save_regs
) ; i
++) {
1751 tcg_out_st(s
, TCG_TYPE_I32
, tcg_target_callee_save_regs
[i
],
1752 TCG_REG_SP
, TCG_STATIC_CALL_ARGS_SIZE
+ i
* 4);
1755 /* Call generated code */
1756 tcg_out_opc_reg(s
, OPC_JR
, 0, tcg_target_call_iarg_regs
[1], 0);
1757 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
1758 tb_ret_addr
= s
->code_ptr
;
1761 for(i
= 0 ; i
< ARRAY_SIZE(tcg_target_callee_save_regs
) ; i
++) {
1762 tcg_out_ld(s
, TCG_TYPE_I32
, tcg_target_callee_save_regs
[i
],
1763 TCG_REG_SP
, TCG_STATIC_CALL_ARGS_SIZE
+ i
* 4);
1766 tcg_out_opc_reg(s
, OPC_JR
, 0, TCG_REG_RA
, 0);
1767 tcg_out_addi(s
, TCG_REG_SP
, frame_size
);
1770 static void tcg_target_init(TCGContext
*s
)
1772 tcg_target_detect_isa();
1773 tcg_regset_set(tcg_target_available_regs
[TCG_TYPE_I32
], 0xffffffff);
1774 tcg_regset_set(tcg_target_call_clobber_regs
,
1791 tcg_regset_clear(s
->reserved_regs
);
1792 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_ZERO
); /* zero register */
1793 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_K0
); /* kernel use only */
1794 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_K1
); /* kernel use only */
1795 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_AT
); /* internal use */
1796 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_T0
); /* internal use */
1797 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_RA
); /* return address */
1798 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_SP
); /* stack pointer */
1799 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_GP
); /* global pointer */
1801 tcg_add_target_add_op_defs(mips_op_defs
);