tcg-mips: Convert to new_ldst
[qemu.git] / tcg / mips / tcg-target.c
blob4c3102783b2c1496f8849cedd0e3c6427264cef3
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
5 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
6 * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
27 #include "tcg-be-ldst.h"
29 #ifdef HOST_WORDS_BIGENDIAN
30 # define MIPS_BE 1
31 #else
32 # define MIPS_BE 0
33 #endif
35 #define LO_OFF (MIPS_BE * 4)
36 #define HI_OFF (4 - LO_OFF)
38 #ifndef NDEBUG
39 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
40 "zero",
41 "at",
42 "v0",
43 "v1",
44 "a0",
45 "a1",
46 "a2",
47 "a3",
48 "t0",
49 "t1",
50 "t2",
51 "t3",
52 "t4",
53 "t5",
54 "t6",
55 "t7",
56 "s0",
57 "s1",
58 "s2",
59 "s3",
60 "s4",
61 "s5",
62 "s6",
63 "s7",
64 "t8",
65 "t9",
66 "k0",
67 "k1",
68 "gp",
69 "sp",
70 "fp",
71 "ra",
73 #endif
75 /* check if we really need so many registers :P */
76 static const TCGReg tcg_target_reg_alloc_order[] = {
77 TCG_REG_S0,
78 TCG_REG_S1,
79 TCG_REG_S2,
80 TCG_REG_S3,
81 TCG_REG_S4,
82 TCG_REG_S5,
83 TCG_REG_S6,
84 TCG_REG_S7,
85 TCG_REG_T1,
86 TCG_REG_T2,
87 TCG_REG_T3,
88 TCG_REG_T4,
89 TCG_REG_T5,
90 TCG_REG_T6,
91 TCG_REG_T7,
92 TCG_REG_T8,
93 TCG_REG_T9,
94 TCG_REG_A0,
95 TCG_REG_A1,
96 TCG_REG_A2,
97 TCG_REG_A3,
98 TCG_REG_V0,
99 TCG_REG_V1
102 static const TCGReg tcg_target_call_iarg_regs[4] = {
103 TCG_REG_A0,
104 TCG_REG_A1,
105 TCG_REG_A2,
106 TCG_REG_A3
109 static const TCGReg tcg_target_call_oarg_regs[2] = {
110 TCG_REG_V0,
111 TCG_REG_V1
114 static tcg_insn_unit *tb_ret_addr;
116 static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target)
118 /* Let the compiler perform the right-shift as part of the arithmetic. */
119 ptrdiff_t disp = target - (pc + 1);
120 assert(disp == (int16_t)disp);
121 return disp & 0xffff;
124 static inline void reloc_pc16(tcg_insn_unit *pc, tcg_insn_unit *target)
126 *pc = deposit32(*pc, 0, 16, reloc_pc16_val(pc, target));
129 static inline uint32_t reloc_26_val(tcg_insn_unit *pc, tcg_insn_unit *target)
131 assert((((uintptr_t)pc ^ (uintptr_t)target) & 0xf0000000) == 0);
132 return ((uintptr_t)target >> 2) & 0x3ffffff;
135 static inline void reloc_26(tcg_insn_unit *pc, tcg_insn_unit *target)
137 *pc = deposit32(*pc, 0, 26, reloc_26_val(pc, target));
140 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
141 intptr_t value, intptr_t addend)
143 assert(type == R_MIPS_PC16);
144 assert(addend == 0);
145 reloc_pc16(code_ptr, (tcg_insn_unit *)value);
148 /* parse target specific constraints */
149 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
151 const char *ct_str;
153 ct_str = *pct_str;
154 switch(ct_str[0]) {
155 case 'r':
156 ct->ct |= TCG_CT_REG;
157 tcg_regset_set(ct->u.regs, 0xffffffff);
158 break;
159 case 'L': /* qemu_ld output arg constraint */
160 ct->ct |= TCG_CT_REG;
161 tcg_regset_set(ct->u.regs, 0xffffffff);
162 tcg_regset_reset_reg(ct->u.regs, TCG_REG_V0);
163 break;
164 case 'l': /* qemu_ld input arg constraint */
165 ct->ct |= TCG_CT_REG;
166 tcg_regset_set(ct->u.regs, 0xffffffff);
167 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
168 #if defined(CONFIG_SOFTMMU)
169 if (TARGET_LONG_BITS == 64) {
170 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
172 #endif
173 break;
174 case 'S': /* qemu_st constraint */
175 ct->ct |= TCG_CT_REG;
176 tcg_regset_set(ct->u.regs, 0xffffffff);
177 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
178 #if defined(CONFIG_SOFTMMU)
179 if (TARGET_LONG_BITS == 32) {
180 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1);
181 } else {
182 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
183 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3);
185 #endif
186 break;
187 case 'I':
188 ct->ct |= TCG_CT_CONST_U16;
189 break;
190 case 'J':
191 ct->ct |= TCG_CT_CONST_S16;
192 break;
193 case 'Z':
194 /* We are cheating a bit here, using the fact that the register
195 ZERO is also the register number 0. Hence there is no need
196 to check for const_args in each instruction. */
197 ct->ct |= TCG_CT_CONST_ZERO;
198 break;
199 default:
200 return -1;
202 ct_str++;
203 *pct_str = ct_str;
204 return 0;
207 /* test if a constant matches the constraint */
208 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
209 const TCGArgConstraint *arg_ct)
211 int ct;
212 ct = arg_ct->ct;
213 if (ct & TCG_CT_CONST)
214 return 1;
215 else if ((ct & TCG_CT_CONST_ZERO) && val == 0)
216 return 1;
217 else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val)
218 return 1;
219 else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val)
220 return 1;
221 else
222 return 0;
225 /* instruction opcodes */
226 enum {
227 OPC_J = 0x02 << 26,
228 OPC_JAL = 0x03 << 26,
229 OPC_BEQ = 0x04 << 26,
230 OPC_BNE = 0x05 << 26,
231 OPC_BLEZ = 0x06 << 26,
232 OPC_BGTZ = 0x07 << 26,
233 OPC_ADDIU = 0x09 << 26,
234 OPC_SLTI = 0x0A << 26,
235 OPC_SLTIU = 0x0B << 26,
236 OPC_ANDI = 0x0C << 26,
237 OPC_ORI = 0x0D << 26,
238 OPC_XORI = 0x0E << 26,
239 OPC_LUI = 0x0F << 26,
240 OPC_LB = 0x20 << 26,
241 OPC_LH = 0x21 << 26,
242 OPC_LW = 0x23 << 26,
243 OPC_LBU = 0x24 << 26,
244 OPC_LHU = 0x25 << 26,
245 OPC_LWU = 0x27 << 26,
246 OPC_SB = 0x28 << 26,
247 OPC_SH = 0x29 << 26,
248 OPC_SW = 0x2B << 26,
250 OPC_SPECIAL = 0x00 << 26,
251 OPC_SLL = OPC_SPECIAL | 0x00,
252 OPC_SRL = OPC_SPECIAL | 0x02,
253 OPC_ROTR = OPC_SPECIAL | (0x01 << 21) | 0x02,
254 OPC_SRA = OPC_SPECIAL | 0x03,
255 OPC_SLLV = OPC_SPECIAL | 0x04,
256 OPC_SRLV = OPC_SPECIAL | 0x06,
257 OPC_ROTRV = OPC_SPECIAL | (0x01 << 6) | 0x06,
258 OPC_SRAV = OPC_SPECIAL | 0x07,
259 OPC_JR = OPC_SPECIAL | 0x08,
260 OPC_JALR = OPC_SPECIAL | 0x09,
261 OPC_MOVZ = OPC_SPECIAL | 0x0A,
262 OPC_MOVN = OPC_SPECIAL | 0x0B,
263 OPC_MFHI = OPC_SPECIAL | 0x10,
264 OPC_MFLO = OPC_SPECIAL | 0x12,
265 OPC_MULT = OPC_SPECIAL | 0x18,
266 OPC_MULTU = OPC_SPECIAL | 0x19,
267 OPC_DIV = OPC_SPECIAL | 0x1A,
268 OPC_DIVU = OPC_SPECIAL | 0x1B,
269 OPC_ADDU = OPC_SPECIAL | 0x21,
270 OPC_SUBU = OPC_SPECIAL | 0x23,
271 OPC_AND = OPC_SPECIAL | 0x24,
272 OPC_OR = OPC_SPECIAL | 0x25,
273 OPC_XOR = OPC_SPECIAL | 0x26,
274 OPC_NOR = OPC_SPECIAL | 0x27,
275 OPC_SLT = OPC_SPECIAL | 0x2A,
276 OPC_SLTU = OPC_SPECIAL | 0x2B,
278 OPC_REGIMM = 0x01 << 26,
279 OPC_BLTZ = OPC_REGIMM | (0x00 << 16),
280 OPC_BGEZ = OPC_REGIMM | (0x01 << 16),
282 OPC_SPECIAL2 = 0x1c << 26,
283 OPC_MUL = OPC_SPECIAL2 | 0x002,
285 OPC_SPECIAL3 = 0x1f << 26,
286 OPC_INS = OPC_SPECIAL3 | 0x004,
287 OPC_WSBH = OPC_SPECIAL3 | 0x0a0,
288 OPC_SEB = OPC_SPECIAL3 | 0x420,
289 OPC_SEH = OPC_SPECIAL3 | 0x620,
293 * Type reg
295 static inline void tcg_out_opc_reg(TCGContext *s, int opc,
296 TCGReg rd, TCGReg rs, TCGReg rt)
298 int32_t inst;
300 inst = opc;
301 inst |= (rs & 0x1F) << 21;
302 inst |= (rt & 0x1F) << 16;
303 inst |= (rd & 0x1F) << 11;
304 tcg_out32(s, inst);
308 * Type immediate
310 static inline void tcg_out_opc_imm(TCGContext *s, int opc,
311 TCGReg rt, TCGReg rs, TCGArg imm)
313 int32_t inst;
315 inst = opc;
316 inst |= (rs & 0x1F) << 21;
317 inst |= (rt & 0x1F) << 16;
318 inst |= (imm & 0xffff);
319 tcg_out32(s, inst);
323 * Type branch
325 static inline void tcg_out_opc_br(TCGContext *s, int opc,
326 TCGReg rt, TCGReg rs)
328 /* We pay attention here to not modify the branch target by reading
329 the existing value and using it again. This ensure that caches and
330 memory are kept coherent during retranslation. */
331 uint16_t offset = (uint16_t)*s->code_ptr;
333 tcg_out_opc_imm(s, opc, rt, rs, offset);
337 * Type sa
339 static inline void tcg_out_opc_sa(TCGContext *s, int opc,
340 TCGReg rd, TCGReg rt, TCGArg sa)
342 int32_t inst;
344 inst = opc;
345 inst |= (rt & 0x1F) << 16;
346 inst |= (rd & 0x1F) << 11;
347 inst |= (sa & 0x1F) << 6;
348 tcg_out32(s, inst);
353 * Type jump.
354 * Returns true if the branch was in range and the insn was emitted.
356 static bool tcg_out_opc_jmp(TCGContext *s, int opc, void *target)
358 uintptr_t dest = (uintptr_t)target;
359 uintptr_t from = (uintptr_t)s->code_ptr + 4;
360 int32_t inst;
362 /* The pc-region branch happens within the 256MB region of
363 the delay slot (thus the +4). */
364 if ((from ^ dest) & -(1 << 28)) {
365 return false;
367 assert((dest & 3) == 0);
369 inst = opc;
370 inst |= (dest >> 2) & 0x3ffffff;
371 tcg_out32(s, inst);
372 return true;
375 static inline void tcg_out_nop(TCGContext *s)
377 tcg_out32(s, 0);
380 static inline void tcg_out_mov(TCGContext *s, TCGType type,
381 TCGReg ret, TCGReg arg)
383 /* Simple reg-reg move, optimising out the 'do nothing' case */
384 if (ret != arg) {
385 tcg_out_opc_reg(s, OPC_ADDU, ret, arg, TCG_REG_ZERO);
389 static inline void tcg_out_movi(TCGContext *s, TCGType type,
390 TCGReg reg, tcg_target_long arg)
392 if (arg == (int16_t)arg) {
393 tcg_out_opc_imm(s, OPC_ADDIU, reg, TCG_REG_ZERO, arg);
394 } else if (arg == (uint16_t)arg) {
395 tcg_out_opc_imm(s, OPC_ORI, reg, TCG_REG_ZERO, arg);
396 } else {
397 tcg_out_opc_imm(s, OPC_LUI, reg, TCG_REG_ZERO, arg >> 16);
398 if (arg & 0xffff) {
399 tcg_out_opc_imm(s, OPC_ORI, reg, reg, arg & 0xffff);
404 static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg)
406 if (use_mips32r2_instructions) {
407 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
408 } else {
409 /* ret and arg can't be register at */
410 if (ret == TCG_REG_AT || arg == TCG_REG_AT) {
411 tcg_abort();
414 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 8);
415 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8);
416 tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00);
417 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
421 static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg)
423 if (use_mips32r2_instructions) {
424 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
425 tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret);
426 } else {
427 /* ret and arg can't be register at */
428 if (ret == TCG_REG_AT || arg == TCG_REG_AT) {
429 tcg_abort();
432 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 8);
433 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
434 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
435 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
439 static inline void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg)
441 if (use_mips32r2_instructions) {
442 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
443 tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
444 } else {
445 /* ret and arg must be different and can't be register at */
446 if (ret == arg || ret == TCG_REG_AT || arg == TCG_REG_AT) {
447 tcg_abort();
450 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
452 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 24);
453 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
455 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_AT, arg, 0xff00);
456 tcg_out_opc_sa(s, OPC_SLL, TCG_REG_AT, TCG_REG_AT, 8);
457 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
459 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 8);
460 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_AT, TCG_REG_AT, 0xff00);
461 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
465 static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
467 if (use_mips32r2_instructions) {
468 tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg);
469 } else {
470 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
471 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24);
475 static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
477 if (use_mips32r2_instructions) {
478 tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg);
479 } else {
480 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16);
481 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
485 static void tcg_out_ldst(TCGContext *s, int opc, TCGReg data,
486 TCGReg addr, intptr_t ofs)
488 int16_t lo = ofs;
489 if (ofs != lo) {
490 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_AT, ofs - lo);
491 if (addr != TCG_REG_ZERO) {
492 tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_AT, TCG_REG_AT, addr);
494 addr = TCG_REG_AT;
496 tcg_out_opc_imm(s, opc, data, addr, lo);
499 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
500 TCGReg arg1, intptr_t arg2)
502 tcg_out_ldst(s, OPC_LW, arg, arg1, arg2);
505 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
506 TCGReg arg1, intptr_t arg2)
508 tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
511 static inline void tcg_out_addi(TCGContext *s, TCGReg reg, TCGArg val)
513 if (val == (int16_t)val) {
514 tcg_out_opc_imm(s, OPC_ADDIU, reg, reg, val);
515 } else {
516 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_AT, val);
517 tcg_out_opc_reg(s, OPC_ADDU, reg, reg, TCG_REG_AT);
521 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGArg arg1,
522 TCGArg arg2, int label_index)
524 TCGLabel *l = &s->labels[label_index];
526 switch (cond) {
527 case TCG_COND_EQ:
528 tcg_out_opc_br(s, OPC_BEQ, arg1, arg2);
529 break;
530 case TCG_COND_NE:
531 tcg_out_opc_br(s, OPC_BNE, arg1, arg2);
532 break;
533 case TCG_COND_LT:
534 if (arg2 == 0) {
535 tcg_out_opc_br(s, OPC_BLTZ, 0, arg1);
536 } else {
537 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, arg1, arg2);
538 tcg_out_opc_br(s, OPC_BNE, TCG_REG_AT, TCG_REG_ZERO);
540 break;
541 case TCG_COND_LTU:
542 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, arg1, arg2);
543 tcg_out_opc_br(s, OPC_BNE, TCG_REG_AT, TCG_REG_ZERO);
544 break;
545 case TCG_COND_GE:
546 if (arg2 == 0) {
547 tcg_out_opc_br(s, OPC_BGEZ, 0, arg1);
548 } else {
549 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, arg1, arg2);
550 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_AT, TCG_REG_ZERO);
552 break;
553 case TCG_COND_GEU:
554 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, arg1, arg2);
555 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_AT, TCG_REG_ZERO);
556 break;
557 case TCG_COND_LE:
558 if (arg2 == 0) {
559 tcg_out_opc_br(s, OPC_BLEZ, 0, arg1);
560 } else {
561 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, arg2, arg1);
562 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_AT, TCG_REG_ZERO);
564 break;
565 case TCG_COND_LEU:
566 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, arg2, arg1);
567 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_AT, TCG_REG_ZERO);
568 break;
569 case TCG_COND_GT:
570 if (arg2 == 0) {
571 tcg_out_opc_br(s, OPC_BGTZ, 0, arg1);
572 } else {
573 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, arg2, arg1);
574 tcg_out_opc_br(s, OPC_BNE, TCG_REG_AT, TCG_REG_ZERO);
576 break;
577 case TCG_COND_GTU:
578 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, arg2, arg1);
579 tcg_out_opc_br(s, OPC_BNE, TCG_REG_AT, TCG_REG_ZERO);
580 break;
581 default:
582 tcg_abort();
583 break;
585 if (l->has_value) {
586 reloc_pc16(s->code_ptr - 1, l->u.value_ptr);
587 } else {
588 tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, label_index, 0);
590 tcg_out_nop(s);
593 /* XXX: we implement it at the target level to avoid having to
594 handle cross basic blocks temporaries */
595 static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGArg arg1,
596 TCGArg arg2, TCGArg arg3, TCGArg arg4,
597 int label_index)
599 tcg_insn_unit *label_ptr;
601 switch(cond) {
602 case TCG_COND_NE:
603 tcg_out_brcond(s, TCG_COND_NE, arg2, arg4, label_index);
604 tcg_out_brcond(s, TCG_COND_NE, arg1, arg3, label_index);
605 return;
606 case TCG_COND_EQ:
607 break;
608 case TCG_COND_LT:
609 case TCG_COND_LE:
610 tcg_out_brcond(s, TCG_COND_LT, arg2, arg4, label_index);
611 break;
612 case TCG_COND_GT:
613 case TCG_COND_GE:
614 tcg_out_brcond(s, TCG_COND_GT, arg2, arg4, label_index);
615 break;
616 case TCG_COND_LTU:
617 case TCG_COND_LEU:
618 tcg_out_brcond(s, TCG_COND_LTU, arg2, arg4, label_index);
619 break;
620 case TCG_COND_GTU:
621 case TCG_COND_GEU:
622 tcg_out_brcond(s, TCG_COND_GTU, arg2, arg4, label_index);
623 break;
624 default:
625 tcg_abort();
628 label_ptr = s->code_ptr;
629 tcg_out_opc_br(s, OPC_BNE, arg2, arg4);
630 tcg_out_nop(s);
632 switch(cond) {
633 case TCG_COND_EQ:
634 tcg_out_brcond(s, TCG_COND_EQ, arg1, arg3, label_index);
635 break;
636 case TCG_COND_LT:
637 case TCG_COND_LTU:
638 tcg_out_brcond(s, TCG_COND_LTU, arg1, arg3, label_index);
639 break;
640 case TCG_COND_LE:
641 case TCG_COND_LEU:
642 tcg_out_brcond(s, TCG_COND_LEU, arg1, arg3, label_index);
643 break;
644 case TCG_COND_GT:
645 case TCG_COND_GTU:
646 tcg_out_brcond(s, TCG_COND_GTU, arg1, arg3, label_index);
647 break;
648 case TCG_COND_GE:
649 case TCG_COND_GEU:
650 tcg_out_brcond(s, TCG_COND_GEU, arg1, arg3, label_index);
651 break;
652 default:
653 tcg_abort();
656 reloc_pc16(label_ptr, s->code_ptr);
659 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
660 TCGArg c1, TCGArg c2, TCGArg v)
662 switch (cond) {
663 case TCG_COND_EQ:
664 if (c1 == 0) {
665 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, c2);
666 } else if (c2 == 0) {
667 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, c1);
668 } else {
669 tcg_out_opc_reg(s, OPC_XOR, TCG_REG_AT, c1, c2);
670 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, TCG_REG_AT);
672 break;
673 case TCG_COND_NE:
674 if (c1 == 0) {
675 tcg_out_opc_reg(s, OPC_MOVN, ret, v, c2);
676 } else if (c2 == 0) {
677 tcg_out_opc_reg(s, OPC_MOVN, ret, v, c1);
678 } else {
679 tcg_out_opc_reg(s, OPC_XOR, TCG_REG_AT, c1, c2);
680 tcg_out_opc_reg(s, OPC_MOVN, ret, v, TCG_REG_AT);
682 break;
683 case TCG_COND_LT:
684 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, c1, c2);
685 tcg_out_opc_reg(s, OPC_MOVN, ret, v, TCG_REG_AT);
686 break;
687 case TCG_COND_LTU:
688 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, c1, c2);
689 tcg_out_opc_reg(s, OPC_MOVN, ret, v, TCG_REG_AT);
690 break;
691 case TCG_COND_GE:
692 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, c1, c2);
693 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, TCG_REG_AT);
694 break;
695 case TCG_COND_GEU:
696 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, c1, c2);
697 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, TCG_REG_AT);
698 break;
699 case TCG_COND_LE:
700 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, c2, c1);
701 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, TCG_REG_AT);
702 break;
703 case TCG_COND_LEU:
704 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, c2, c1);
705 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, TCG_REG_AT);
706 break;
707 case TCG_COND_GT:
708 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, c2, c1);
709 tcg_out_opc_reg(s, OPC_MOVN, ret, v, TCG_REG_AT);
710 break;
711 case TCG_COND_GTU:
712 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, c2, c1);
713 tcg_out_opc_reg(s, OPC_MOVN, ret, v, TCG_REG_AT);
714 break;
715 default:
716 tcg_abort();
717 break;
721 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
722 TCGArg arg1, TCGArg arg2)
724 switch (cond) {
725 case TCG_COND_EQ:
726 if (arg1 == 0) {
727 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg2, 1);
728 } else if (arg2 == 0) {
729 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, 1);
730 } else {
731 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
732 tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1);
734 break;
735 case TCG_COND_NE:
736 if (arg1 == 0) {
737 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg2);
738 } else if (arg2 == 0) {
739 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg1);
740 } else {
741 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
742 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret);
744 break;
745 case TCG_COND_LT:
746 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
747 break;
748 case TCG_COND_LTU:
749 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
750 break;
751 case TCG_COND_GE:
752 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
753 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
754 break;
755 case TCG_COND_GEU:
756 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
757 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
758 break;
759 case TCG_COND_LE:
760 tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1);
761 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
762 break;
763 case TCG_COND_LEU:
764 tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1);
765 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
766 break;
767 case TCG_COND_GT:
768 tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1);
769 break;
770 case TCG_COND_GTU:
771 tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1);
772 break;
773 default:
774 tcg_abort();
775 break;
779 /* XXX: we implement it at the target level to avoid having to
780 handle cross basic blocks temporaries */
781 static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
782 TCGArg arg1, TCGArg arg2, TCGArg arg3, TCGArg arg4)
784 switch (cond) {
785 case TCG_COND_EQ:
786 tcg_out_setcond(s, TCG_COND_EQ, TCG_REG_AT, arg2, arg4);
787 tcg_out_setcond(s, TCG_COND_EQ, TCG_REG_T0, arg1, arg3);
788 tcg_out_opc_reg(s, OPC_AND, ret, TCG_REG_AT, TCG_REG_T0);
789 return;
790 case TCG_COND_NE:
791 tcg_out_setcond(s, TCG_COND_NE, TCG_REG_AT, arg2, arg4);
792 tcg_out_setcond(s, TCG_COND_NE, TCG_REG_T0, arg1, arg3);
793 tcg_out_opc_reg(s, OPC_OR, ret, TCG_REG_AT, TCG_REG_T0);
794 return;
795 case TCG_COND_LT:
796 case TCG_COND_LE:
797 tcg_out_setcond(s, TCG_COND_LT, TCG_REG_AT, arg2, arg4);
798 break;
799 case TCG_COND_GT:
800 case TCG_COND_GE:
801 tcg_out_setcond(s, TCG_COND_GT, TCG_REG_AT, arg2, arg4);
802 break;
803 case TCG_COND_LTU:
804 case TCG_COND_LEU:
805 tcg_out_setcond(s, TCG_COND_LTU, TCG_REG_AT, arg2, arg4);
806 break;
807 case TCG_COND_GTU:
808 case TCG_COND_GEU:
809 tcg_out_setcond(s, TCG_COND_GTU, TCG_REG_AT, arg2, arg4);
810 break;
811 default:
812 tcg_abort();
813 break;
816 tcg_out_setcond(s, TCG_COND_EQ, TCG_REG_T0, arg2, arg4);
818 switch(cond) {
819 case TCG_COND_LT:
820 case TCG_COND_LTU:
821 tcg_out_setcond(s, TCG_COND_LTU, ret, arg1, arg3);
822 break;
823 case TCG_COND_LE:
824 case TCG_COND_LEU:
825 tcg_out_setcond(s, TCG_COND_LEU, ret, arg1, arg3);
826 break;
827 case TCG_COND_GT:
828 case TCG_COND_GTU:
829 tcg_out_setcond(s, TCG_COND_GTU, ret, arg1, arg3);
830 break;
831 case TCG_COND_GE:
832 case TCG_COND_GEU:
833 tcg_out_setcond(s, TCG_COND_GEU, ret, arg1, arg3);
834 break;
835 default:
836 tcg_abort();
839 tcg_out_opc_reg(s, OPC_AND, ret, ret, TCG_REG_T0);
840 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
843 static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail)
845 /* Note that the ABI requires the called function's address to be
846 loaded into T9, even if a direct branch is in range. */
847 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg);
849 /* But do try a direct branch, allowing the cpu better insn prefetch. */
850 if (tail) {
851 if (!tcg_out_opc_jmp(s, OPC_J, arg)) {
852 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_T9, 0);
854 } else {
855 if (!tcg_out_opc_jmp(s, OPC_JAL, arg)) {
856 tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0);
861 static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
863 tcg_out_call_int(s, arg, false);
864 tcg_out_nop(s);
867 #if defined(CONFIG_SOFTMMU)
868 static void * const qemu_ld_helpers[16] = {
869 [MO_UB] = helper_ret_ldub_mmu,
870 [MO_SB] = helper_ret_ldsb_mmu,
871 [MO_LEUW] = helper_le_lduw_mmu,
872 [MO_LESW] = helper_le_ldsw_mmu,
873 [MO_LEUL] = helper_le_ldul_mmu,
874 [MO_LEQ] = helper_le_ldq_mmu,
875 [MO_BEUW] = helper_be_lduw_mmu,
876 [MO_BESW] = helper_be_ldsw_mmu,
877 [MO_BEUL] = helper_be_ldul_mmu,
878 [MO_BEQ] = helper_be_ldq_mmu,
881 static void * const qemu_st_helpers[16] = {
882 [MO_UB] = helper_ret_stb_mmu,
883 [MO_LEUW] = helper_le_stw_mmu,
884 [MO_LEUL] = helper_le_stl_mmu,
885 [MO_LEQ] = helper_le_stq_mmu,
886 [MO_BEUW] = helper_be_stw_mmu,
887 [MO_BEUL] = helper_be_stl_mmu,
888 [MO_BEQ] = helper_be_stq_mmu,
891 /* Helper routines for marshalling helper function arguments into
892 * the correct registers and stack.
893 * I is where we want to put this argument, and is updated and returned
894 * for the next call. ARG is the argument itself.
896 * We provide routines for arguments which are: immediate, 32 bit
897 * value in register, 16 and 8 bit values in register (which must be zero
898 * extended before use) and 64 bit value in a lo:hi register pair.
901 static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg)
903 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
904 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg);
905 } else {
906 tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i);
908 return i + 1;
911 static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg)
913 TCGReg tmp = TCG_REG_AT;
914 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
915 tmp = tcg_target_call_iarg_regs[i];
917 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff);
918 return tcg_out_call_iarg_reg(s, i, tmp);
921 static int tcg_out_call_iarg_reg16(TCGContext *s, int i, TCGReg arg)
923 TCGReg tmp = TCG_REG_AT;
924 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
925 tmp = tcg_target_call_iarg_regs[i];
927 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xffff);
928 return tcg_out_call_iarg_reg(s, i, tmp);
931 static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg)
933 TCGReg tmp = TCG_REG_AT;
934 if (arg == 0) {
935 tmp = TCG_REG_ZERO;
936 } else {
937 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
938 tmp = tcg_target_call_iarg_regs[i];
940 tcg_out_movi(s, TCG_TYPE_REG, tmp, arg);
942 return tcg_out_call_iarg_reg(s, i, tmp);
945 static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah)
947 i = (i + 1) & ~1;
948 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al));
949 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah));
950 return i;
953 /* Perform the tlb comparison operation. The complete host address is
954 placed in BASE. Clobbers AT, T0, A0. */
955 static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
956 TCGReg addrh, int mem_index, TCGMemOp s_bits,
957 tcg_insn_unit *label_ptr[2], bool is_load)
959 int cmp_off
960 = (is_load
961 ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
962 : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
963 int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
965 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_A0, addrl,
966 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
967 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0,
968 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
969 tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
971 /* Compensate for very large offsets. */
972 if (add_off >= 0x8000) {
973 /* Most target env are smaller than 32k; none are larger than 64k.
974 Simplify the logic here merely to offset by 0x7ff0, giving us a
975 range just shy of 64k. Check this assumption. */
976 QEMU_BUILD_BUG_ON(offsetof(CPUArchState,
977 tlb_table[NB_MMU_MODES - 1][1])
978 > 0x7ff0 + 0x7fff);
979 tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_A0, TCG_REG_A0, 0x7ff0);
980 cmp_off -= 0x7ff0;
981 add_off -= 0x7ff0;
984 /* Load the tlb comparator. */
985 tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, cmp_off + LO_OFF);
986 if (TARGET_LONG_BITS == 64) {
987 tcg_out_opc_imm(s, OPC_LW, base, TCG_REG_A0, cmp_off + HI_OFF);
990 /* Mask the page bits, keeping the alignment bits to compare against.
991 In between, load the tlb addend for the fast path. */
992 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T0,
993 TARGET_PAGE_MASK | ((1 << s_bits) - 1));
994 tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
995 tcg_out_opc_reg(s, OPC_AND, TCG_REG_T0, TCG_REG_T0, addrl);
997 label_ptr[0] = s->code_ptr;
998 tcg_out_opc_br(s, OPC_BNE, TCG_REG_T0, TCG_REG_AT);
1000 if (TARGET_LONG_BITS == 64) {
1001 /* delay slot */
1002 tcg_out_nop(s);
1004 label_ptr[1] = s->code_ptr;
1005 tcg_out_opc_br(s, OPC_BNE, addrh, base);
1008 /* delay slot */
1009 tcg_out_opc_reg(s, OPC_ADDU, base, TCG_REG_A0, addrl);
1012 static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc,
1013 TCGReg datalo, TCGReg datahi,
1014 TCGReg addrlo, TCGReg addrhi,
1015 int mem_index, void *raddr,
1016 tcg_insn_unit *label_ptr[2])
1018 TCGLabelQemuLdst *label = new_ldst_label(s);
1020 label->is_ld = is_ld;
1021 label->opc = opc;
1022 label->datalo_reg = datalo;
1023 label->datahi_reg = datahi;
1024 label->addrlo_reg = addrlo;
1025 label->addrhi_reg = addrhi;
1026 label->mem_index = mem_index;
1027 label->raddr = raddr;
1028 label->label_ptr[0] = label_ptr[0];
1029 if (TARGET_LONG_BITS == 64) {
1030 label->label_ptr[1] = label_ptr[1];
1034 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1036 TCGMemOp opc = l->opc;
1037 TCGReg v0;
1038 int i;
1040 /* resolve label address */
1041 reloc_pc16(l->label_ptr[0], s->code_ptr);
1042 if (TARGET_LONG_BITS == 64) {
1043 reloc_pc16(l->label_ptr[1], s->code_ptr);
1046 i = 1;
1047 if (TARGET_LONG_BITS == 64) {
1048 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1049 } else {
1050 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1052 i = tcg_out_call_iarg_imm(s, i, l->mem_index);
1053 i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr);
1054 tcg_out_call_int(s, qemu_ld_helpers[opc], false);
1055 /* delay slot */
1056 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1058 v0 = l->datalo_reg;
1059 if ((opc & MO_SIZE) == MO_64) {
1060 /* We eliminated V0 from the possible output registers, so it
1061 cannot be clobbered here. So we must move V1 first. */
1062 if (MIPS_BE) {
1063 tcg_out_mov(s, TCG_TYPE_I32, v0, TCG_REG_V1);
1064 v0 = l->datahi_reg;
1065 } else {
1066 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_V1);
1070 reloc_pc16(s->code_ptr, l->raddr);
1071 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO);
1072 /* delay slot */
1073 tcg_out_mov(s, TCG_TYPE_REG, v0, TCG_REG_V0);
1076 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1078 TCGMemOp opc = l->opc;
1079 TCGMemOp s_bits = opc & MO_SIZE;
1080 int i;
1082 /* resolve label address */
1083 reloc_pc16(l->label_ptr[0], s->code_ptr);
1084 if (TARGET_LONG_BITS == 64) {
1085 reloc_pc16(l->label_ptr[1], s->code_ptr);
1088 i = 1;
1089 if (TARGET_LONG_BITS == 64) {
1090 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1091 } else {
1092 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1094 switch (s_bits) {
1095 case MO_8:
1096 i = tcg_out_call_iarg_reg8(s, i, l->datalo_reg);
1097 break;
1098 case MO_16:
1099 i = tcg_out_call_iarg_reg16(s, i, l->datalo_reg);
1100 break;
1101 case MO_32:
1102 i = tcg_out_call_iarg_reg(s, i, l->datalo_reg);
1103 break;
1104 case MO_64:
1105 i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg);
1106 break;
1107 default:
1108 tcg_abort();
1110 i = tcg_out_call_iarg_imm(s, i, l->mem_index);
1112 /* Tail call to the store helper. Thus force the return address
1113 computation to take place in the return address register. */
1114 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)l->raddr);
1115 i = tcg_out_call_iarg_reg(s, i, TCG_REG_RA);
1116 tcg_out_call_int(s, qemu_st_helpers[opc], true);
1117 /* delay slot */
1118 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1120 #endif
1122 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1123 TCGReg base, TCGMemOp opc)
1125 switch (opc) {
1126 case MO_UB:
1127 tcg_out_opc_imm(s, OPC_LBU, datalo, base, 0);
1128 break;
1129 case MO_SB:
1130 tcg_out_opc_imm(s, OPC_LB, datalo, base, 0);
1131 break;
1132 case MO_UW | MO_BSWAP:
1133 tcg_out_opc_imm(s, OPC_LHU, TCG_REG_T0, base, 0);
1134 tcg_out_bswap16(s, datalo, TCG_REG_T0);
1135 break;
1136 case MO_UW:
1137 tcg_out_opc_imm(s, OPC_LHU, datalo, base, 0);
1138 break;
1139 case MO_SW | MO_BSWAP:
1140 tcg_out_opc_imm(s, OPC_LHU, TCG_REG_T0, base, 0);
1141 tcg_out_bswap16s(s, datalo, TCG_REG_T0);
1142 break;
1143 case MO_SW:
1144 tcg_out_opc_imm(s, OPC_LH, datalo, base, 0);
1145 break;
1146 case MO_UL | MO_BSWAP:
1147 tcg_out_opc_imm(s, OPC_LW, TCG_REG_T0, base, 0);
1148 tcg_out_bswap32(s, datalo, TCG_REG_T0);
1149 break;
1150 case MO_UL:
1151 tcg_out_opc_imm(s, OPC_LW, datalo, base, 0);
1152 break;
1153 case MO_Q | MO_BSWAP:
1154 tcg_out_opc_imm(s, OPC_LW, TCG_REG_T0, base, HI_OFF);
1155 tcg_out_bswap32(s, datalo, TCG_REG_T0);
1156 tcg_out_opc_imm(s, OPC_LW, TCG_REG_T0, base, LO_OFF);
1157 tcg_out_bswap32(s, datahi, TCG_REG_T0);
1158 break;
1159 case MO_Q:
1160 tcg_out_opc_imm(s, OPC_LW, datalo, base, LO_OFF);
1161 tcg_out_opc_imm(s, OPC_LW, datahi, base, HI_OFF);
1162 break;
1163 default:
1164 tcg_abort();
1168 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
1170 TCGReg addr_regl, addr_regh __attribute__((unused));
1171 TCGReg data_regl, data_regh;
1172 TCGMemOp opc;
1173 #if defined(CONFIG_SOFTMMU)
1174 tcg_insn_unit *label_ptr[2];
1175 int mem_index;
1176 TCGMemOp s_bits;
1177 #endif
1178 /* Note that we've eliminated V0 from the output registers,
1179 so we won't overwrite the base register during loading. */
1180 TCGReg base = TCG_REG_V0;
1182 data_regl = *args++;
1183 data_regh = (is_64 ? *args++ : 0);
1184 addr_regl = *args++;
1185 addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1186 opc = *args++;
1188 #if defined(CONFIG_SOFTMMU)
1189 mem_index = *args;
1190 s_bits = opc & MO_SIZE;
1192 tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
1193 s_bits, label_ptr, 1);
1194 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
1195 add_qemu_ldst_label(s, 1, opc, data_regl, data_regh, addr_regl, addr_regh,
1196 mem_index, s->code_ptr, label_ptr);
1197 #else
1198 if (GUEST_BASE == 0 && data_regl != addr_regl) {
1199 base = addr_regl;
1200 } else if (GUEST_BASE == (int16_t)GUEST_BASE) {
1201 tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE);
1202 } else {
1203 tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE);
1204 tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
1206 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
1207 #endif
1210 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1211 TCGReg base, TCGMemOp opc)
1213 switch (opc) {
1214 case MO_8:
1215 tcg_out_opc_imm(s, OPC_SB, datalo, base, 0);
1216 break;
1218 case MO_16 | MO_BSWAP:
1219 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_T0, datalo, 0xffff);
1220 tcg_out_bswap16(s, TCG_REG_T0, TCG_REG_T0);
1221 datalo = TCG_REG_T0;
1222 /* FALLTHRU */
1223 case MO_16:
1224 tcg_out_opc_imm(s, OPC_SH, datalo, base, 0);
1225 break;
1227 case MO_32 | MO_BSWAP:
1228 tcg_out_bswap32(s, TCG_REG_T0, datalo);
1229 datalo = TCG_REG_T0;
1230 /* FALLTHRU */
1231 case MO_32:
1232 tcg_out_opc_imm(s, OPC_SW, datalo, base, 0);
1233 break;
1235 case MO_64 | MO_BSWAP:
1236 tcg_out_bswap32(s, TCG_REG_T0, datalo);
1237 tcg_out_opc_imm(s, OPC_SW, TCG_REG_T0, base, HI_OFF);
1238 tcg_out_bswap32(s, TCG_REG_T0, datahi);
1239 tcg_out_opc_imm(s, OPC_SW, TCG_REG_T0, base, LO_OFF);
1240 break;
1241 case MO_64:
1242 tcg_out_opc_imm(s, OPC_SW, datalo, base, LO_OFF);
1243 tcg_out_opc_imm(s, OPC_SW, datahi, base, HI_OFF);
1244 break;
1246 default:
1247 tcg_abort();
1251 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
1253 TCGReg addr_regl, addr_regh __attribute__((unused));
1254 TCGReg data_regl, data_regh, base;
1255 TCGMemOp opc;
1256 #if defined(CONFIG_SOFTMMU)
1257 tcg_insn_unit *label_ptr[2];
1258 int mem_index;
1259 TCGMemOp s_bits;
1260 #endif
1262 data_regl = *args++;
1263 data_regh = (is_64 ? *args++ : 0);
1264 addr_regl = *args++;
1265 addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1266 opc = *args++;
1268 #if defined(CONFIG_SOFTMMU)
1269 mem_index = *args;
1270 s_bits = opc & 3;
1272 /* Note that we eliminated the helper's address argument,
1273 so we can reuse that for the base. */
1274 base = (TARGET_LONG_BITS == 32 ? TCG_REG_A1 : TCG_REG_A2);
1275 tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
1276 s_bits, label_ptr, 1);
1277 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1278 add_qemu_ldst_label(s, 0, opc, data_regl, data_regh, addr_regl, addr_regh,
1279 mem_index, s->code_ptr, label_ptr);
1280 #else
1281 if (GUEST_BASE == 0) {
1282 base = addr_regl;
1283 } else {
1284 base = TCG_REG_A0;
1285 if (GUEST_BASE == (int16_t)GUEST_BASE) {
1286 tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE);
1287 } else {
1288 tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE);
1289 tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
1292 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1293 #endif
1296 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1297 const TCGArg *args, const int *const_args)
1299 switch(opc) {
1300 case INDEX_op_exit_tb:
1302 uintptr_t a0 = args[0];
1303 TCGReg b0 = TCG_REG_ZERO;
1305 if (a0 & ~0xffff) {
1306 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
1307 b0 = TCG_REG_V0;
1309 if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
1310 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_AT,
1311 (uintptr_t)tb_ret_addr);
1312 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_AT, 0);
1314 tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
1316 break;
1317 case INDEX_op_goto_tb:
1318 if (s->tb_jmp_offset) {
1319 /* direct jump method */
1320 tcg_abort();
1321 } else {
1322 /* indirect jump method */
1323 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_AT, TCG_REG_ZERO,
1324 (uintptr_t)(s->tb_next + args[0]));
1325 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_AT, 0);
1327 tcg_out_nop(s);
1328 s->tb_next_offset[args[0]] = tcg_current_code_size(s);
1329 break;
1330 case INDEX_op_br:
1331 tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, args[0]);
1332 break;
1334 case INDEX_op_ld8u_i32:
1335 tcg_out_ldst(s, OPC_LBU, args[0], args[1], args[2]);
1336 break;
1337 case INDEX_op_ld8s_i32:
1338 tcg_out_ldst(s, OPC_LB, args[0], args[1], args[2]);
1339 break;
1340 case INDEX_op_ld16u_i32:
1341 tcg_out_ldst(s, OPC_LHU, args[0], args[1], args[2]);
1342 break;
1343 case INDEX_op_ld16s_i32:
1344 tcg_out_ldst(s, OPC_LH, args[0], args[1], args[2]);
1345 break;
1346 case INDEX_op_ld_i32:
1347 tcg_out_ldst(s, OPC_LW, args[0], args[1], args[2]);
1348 break;
1349 case INDEX_op_st8_i32:
1350 tcg_out_ldst(s, OPC_SB, args[0], args[1], args[2]);
1351 break;
1352 case INDEX_op_st16_i32:
1353 tcg_out_ldst(s, OPC_SH, args[0], args[1], args[2]);
1354 break;
1355 case INDEX_op_st_i32:
1356 tcg_out_ldst(s, OPC_SW, args[0], args[1], args[2]);
1357 break;
1359 case INDEX_op_add_i32:
1360 if (const_args[2]) {
1361 tcg_out_opc_imm(s, OPC_ADDIU, args[0], args[1], args[2]);
1362 } else {
1363 tcg_out_opc_reg(s, OPC_ADDU, args[0], args[1], args[2]);
1365 break;
1366 case INDEX_op_add2_i32:
1367 if (const_args[4]) {
1368 tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_AT, args[2], args[4]);
1369 } else {
1370 tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_AT, args[2], args[4]);
1372 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_T0, TCG_REG_AT, args[2]);
1373 if (const_args[5]) {
1374 tcg_out_opc_imm(s, OPC_ADDIU, args[1], args[3], args[5]);
1375 } else {
1376 tcg_out_opc_reg(s, OPC_ADDU, args[1], args[3], args[5]);
1378 tcg_out_opc_reg(s, OPC_ADDU, args[1], args[1], TCG_REG_T0);
1379 tcg_out_mov(s, TCG_TYPE_I32, args[0], TCG_REG_AT);
1380 break;
1381 case INDEX_op_sub_i32:
1382 if (const_args[2]) {
1383 tcg_out_opc_imm(s, OPC_ADDIU, args[0], args[1], -args[2]);
1384 } else {
1385 tcg_out_opc_reg(s, OPC_SUBU, args[0], args[1], args[2]);
1387 break;
1388 case INDEX_op_sub2_i32:
1389 if (const_args[4]) {
1390 tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_AT, args[2], -args[4]);
1391 } else {
1392 tcg_out_opc_reg(s, OPC_SUBU, TCG_REG_AT, args[2], args[4]);
1394 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_T0, args[2], TCG_REG_AT);
1395 if (const_args[5]) {
1396 tcg_out_opc_imm(s, OPC_ADDIU, args[1], args[3], -args[5]);
1397 } else {
1398 tcg_out_opc_reg(s, OPC_SUBU, args[1], args[3], args[5]);
1400 tcg_out_opc_reg(s, OPC_SUBU, args[1], args[1], TCG_REG_T0);
1401 tcg_out_mov(s, TCG_TYPE_I32, args[0], TCG_REG_AT);
1402 break;
1403 case INDEX_op_mul_i32:
1404 if (use_mips32_instructions) {
1405 tcg_out_opc_reg(s, OPC_MUL, args[0], args[1], args[2]);
1406 } else {
1407 tcg_out_opc_reg(s, OPC_MULT, 0, args[1], args[2]);
1408 tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
1410 break;
1411 case INDEX_op_muls2_i32:
1412 tcg_out_opc_reg(s, OPC_MULT, 0, args[2], args[3]);
1413 tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
1414 tcg_out_opc_reg(s, OPC_MFHI, args[1], 0, 0);
1415 break;
1416 case INDEX_op_mulu2_i32:
1417 tcg_out_opc_reg(s, OPC_MULTU, 0, args[2], args[3]);
1418 tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
1419 tcg_out_opc_reg(s, OPC_MFHI, args[1], 0, 0);
1420 break;
1421 case INDEX_op_mulsh_i32:
1422 tcg_out_opc_reg(s, OPC_MULT, 0, args[1], args[2]);
1423 tcg_out_opc_reg(s, OPC_MFHI, args[0], 0, 0);
1424 break;
1425 case INDEX_op_muluh_i32:
1426 tcg_out_opc_reg(s, OPC_MULTU, 0, args[1], args[2]);
1427 tcg_out_opc_reg(s, OPC_MFHI, args[0], 0, 0);
1428 break;
1429 case INDEX_op_div_i32:
1430 tcg_out_opc_reg(s, OPC_DIV, 0, args[1], args[2]);
1431 tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
1432 break;
1433 case INDEX_op_divu_i32:
1434 tcg_out_opc_reg(s, OPC_DIVU, 0, args[1], args[2]);
1435 tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
1436 break;
1437 case INDEX_op_rem_i32:
1438 tcg_out_opc_reg(s, OPC_DIV, 0, args[1], args[2]);
1439 tcg_out_opc_reg(s, OPC_MFHI, args[0], 0, 0);
1440 break;
1441 case INDEX_op_remu_i32:
1442 tcg_out_opc_reg(s, OPC_DIVU, 0, args[1], args[2]);
1443 tcg_out_opc_reg(s, OPC_MFHI, args[0], 0, 0);
1444 break;
1446 case INDEX_op_and_i32:
1447 if (const_args[2]) {
1448 tcg_out_opc_imm(s, OPC_ANDI, args[0], args[1], args[2]);
1449 } else {
1450 tcg_out_opc_reg(s, OPC_AND, args[0], args[1], args[2]);
1452 break;
1453 case INDEX_op_or_i32:
1454 if (const_args[2]) {
1455 tcg_out_opc_imm(s, OPC_ORI, args[0], args[1], args[2]);
1456 } else {
1457 tcg_out_opc_reg(s, OPC_OR, args[0], args[1], args[2]);
1459 break;
1460 case INDEX_op_nor_i32:
1461 tcg_out_opc_reg(s, OPC_NOR, args[0], args[1], args[2]);
1462 break;
1463 case INDEX_op_not_i32:
1464 tcg_out_opc_reg(s, OPC_NOR, args[0], TCG_REG_ZERO, args[1]);
1465 break;
1466 case INDEX_op_xor_i32:
1467 if (const_args[2]) {
1468 tcg_out_opc_imm(s, OPC_XORI, args[0], args[1], args[2]);
1469 } else {
1470 tcg_out_opc_reg(s, OPC_XOR, args[0], args[1], args[2]);
1472 break;
1474 case INDEX_op_sar_i32:
1475 if (const_args[2]) {
1476 tcg_out_opc_sa(s, OPC_SRA, args[0], args[1], args[2]);
1477 } else {
1478 tcg_out_opc_reg(s, OPC_SRAV, args[0], args[2], args[1]);
1480 break;
1481 case INDEX_op_shl_i32:
1482 if (const_args[2]) {
1483 tcg_out_opc_sa(s, OPC_SLL, args[0], args[1], args[2]);
1484 } else {
1485 tcg_out_opc_reg(s, OPC_SLLV, args[0], args[2], args[1]);
1487 break;
1488 case INDEX_op_shr_i32:
1489 if (const_args[2]) {
1490 tcg_out_opc_sa(s, OPC_SRL, args[0], args[1], args[2]);
1491 } else {
1492 tcg_out_opc_reg(s, OPC_SRLV, args[0], args[2], args[1]);
1494 break;
1495 case INDEX_op_rotl_i32:
1496 if (const_args[2]) {
1497 tcg_out_opc_sa(s, OPC_ROTR, args[0], args[1], 0x20 - args[2]);
1498 } else {
1499 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_AT, 32);
1500 tcg_out_opc_reg(s, OPC_SUBU, TCG_REG_AT, TCG_REG_AT, args[2]);
1501 tcg_out_opc_reg(s, OPC_ROTRV, args[0], TCG_REG_AT, args[1]);
1503 break;
1504 case INDEX_op_rotr_i32:
1505 if (const_args[2]) {
1506 tcg_out_opc_sa(s, OPC_ROTR, args[0], args[1], args[2]);
1507 } else {
1508 tcg_out_opc_reg(s, OPC_ROTRV, args[0], args[2], args[1]);
1510 break;
1512 case INDEX_op_bswap16_i32:
1513 tcg_out_opc_reg(s, OPC_WSBH, args[0], 0, args[1]);
1514 break;
1515 case INDEX_op_bswap32_i32:
1516 tcg_out_opc_reg(s, OPC_WSBH, args[0], 0, args[1]);
1517 tcg_out_opc_sa(s, OPC_ROTR, args[0], args[0], 16);
1518 break;
1520 case INDEX_op_ext8s_i32:
1521 tcg_out_opc_reg(s, OPC_SEB, args[0], 0, args[1]);
1522 break;
1523 case INDEX_op_ext16s_i32:
1524 tcg_out_opc_reg(s, OPC_SEH, args[0], 0, args[1]);
1525 break;
1527 case INDEX_op_deposit_i32:
1528 tcg_out_opc_imm(s, OPC_INS, args[0], args[2],
1529 ((args[3] + args[4] - 1) << 11) | (args[3] << 6));
1530 break;
1532 case INDEX_op_brcond_i32:
1533 tcg_out_brcond(s, args[2], args[0], args[1], args[3]);
1534 break;
1535 case INDEX_op_brcond2_i32:
1536 tcg_out_brcond2(s, args[4], args[0], args[1], args[2], args[3], args[5]);
1537 break;
1539 case INDEX_op_movcond_i32:
1540 tcg_out_movcond(s, args[5], args[0], args[1], args[2], args[3]);
1541 break;
1543 case INDEX_op_setcond_i32:
1544 tcg_out_setcond(s, args[3], args[0], args[1], args[2]);
1545 break;
1546 case INDEX_op_setcond2_i32:
1547 tcg_out_setcond2(s, args[5], args[0], args[1], args[2], args[3], args[4]);
1548 break;
1550 case INDEX_op_qemu_ld_i32:
1551 tcg_out_qemu_ld(s, args, false);
1552 break;
1553 case INDEX_op_qemu_ld_i64:
1554 tcg_out_qemu_ld(s, args, true);
1555 break;
1556 case INDEX_op_qemu_st_i32:
1557 tcg_out_qemu_st(s, args, false);
1558 break;
1559 case INDEX_op_qemu_st_i64:
1560 tcg_out_qemu_st(s, args, true);
1561 break;
1563 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1564 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
1565 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1566 default:
1567 tcg_abort();
1571 static const TCGTargetOpDef mips_op_defs[] = {
1572 { INDEX_op_exit_tb, { } },
1573 { INDEX_op_goto_tb, { } },
1574 { INDEX_op_br, { } },
1576 { INDEX_op_ld8u_i32, { "r", "r" } },
1577 { INDEX_op_ld8s_i32, { "r", "r" } },
1578 { INDEX_op_ld16u_i32, { "r", "r" } },
1579 { INDEX_op_ld16s_i32, { "r", "r" } },
1580 { INDEX_op_ld_i32, { "r", "r" } },
1581 { INDEX_op_st8_i32, { "rZ", "r" } },
1582 { INDEX_op_st16_i32, { "rZ", "r" } },
1583 { INDEX_op_st_i32, { "rZ", "r" } },
1585 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1586 { INDEX_op_mul_i32, { "r", "rZ", "rZ" } },
1587 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rZ" } },
1588 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rZ" } },
1589 { INDEX_op_mulsh_i32, { "r", "rZ", "rZ" } },
1590 { INDEX_op_muluh_i32, { "r", "rZ", "rZ" } },
1591 { INDEX_op_div_i32, { "r", "rZ", "rZ" } },
1592 { INDEX_op_divu_i32, { "r", "rZ", "rZ" } },
1593 { INDEX_op_rem_i32, { "r", "rZ", "rZ" } },
1594 { INDEX_op_remu_i32, { "r", "rZ", "rZ" } },
1595 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1597 { INDEX_op_and_i32, { "r", "rZ", "rI" } },
1598 { INDEX_op_nor_i32, { "r", "rZ", "rZ" } },
1599 { INDEX_op_not_i32, { "r", "rZ" } },
1600 { INDEX_op_or_i32, { "r", "rZ", "rIZ" } },
1601 { INDEX_op_xor_i32, { "r", "rZ", "rIZ" } },
1603 { INDEX_op_shl_i32, { "r", "rZ", "ri" } },
1604 { INDEX_op_shr_i32, { "r", "rZ", "ri" } },
1605 { INDEX_op_sar_i32, { "r", "rZ", "ri" } },
1606 { INDEX_op_rotr_i32, { "r", "rZ", "ri" } },
1607 { INDEX_op_rotl_i32, { "r", "rZ", "ri" } },
1609 { INDEX_op_bswap16_i32, { "r", "r" } },
1610 { INDEX_op_bswap32_i32, { "r", "r" } },
1612 { INDEX_op_ext8s_i32, { "r", "rZ" } },
1613 { INDEX_op_ext16s_i32, { "r", "rZ" } },
1615 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
1617 { INDEX_op_brcond_i32, { "rZ", "rZ" } },
1618 { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "0" } },
1619 { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
1620 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
1622 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1623 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1624 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rZ", "rZ" } },
1626 #if TARGET_LONG_BITS == 32
1627 { INDEX_op_qemu_ld_i32, { "L", "lZ" } },
1628 { INDEX_op_qemu_st_i32, { "SZ", "SZ" } },
1629 { INDEX_op_qemu_ld_i64, { "L", "L", "lZ" } },
1630 { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ" } },
1631 #else
1632 { INDEX_op_qemu_ld_i32, { "L", "lZ", "lZ" } },
1633 { INDEX_op_qemu_st_i32, { "SZ", "SZ", "SZ" } },
1634 { INDEX_op_qemu_ld_i64, { "L", "L", "lZ", "lZ" } },
1635 { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ", "SZ" } },
1636 #endif
1637 { -1 },
1640 static int tcg_target_callee_save_regs[] = {
1641 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
1642 TCG_REG_S1,
1643 TCG_REG_S2,
1644 TCG_REG_S3,
1645 TCG_REG_S4,
1646 TCG_REG_S5,
1647 TCG_REG_S6,
1648 TCG_REG_S7,
1649 TCG_REG_FP,
1650 TCG_REG_RA, /* should be last for ABI compliance */
1653 /* The Linux kernel doesn't provide any information about the available
1654 instruction set. Probe it using a signal handler. */
1656 #include <signal.h>
1658 #ifndef use_movnz_instructions
1659 bool use_movnz_instructions = false;
1660 #endif
1662 #ifndef use_mips32_instructions
1663 bool use_mips32_instructions = false;
1664 #endif
1666 #ifndef use_mips32r2_instructions
1667 bool use_mips32r2_instructions = false;
1668 #endif
1670 static volatile sig_atomic_t got_sigill;
1672 static void sigill_handler(int signo, siginfo_t *si, void *data)
1674 /* Skip the faulty instruction */
1675 ucontext_t *uc = (ucontext_t *)data;
1676 uc->uc_mcontext.pc += 4;
1678 got_sigill = 1;
1681 static void tcg_target_detect_isa(void)
1683 struct sigaction sa_old, sa_new;
1685 memset(&sa_new, 0, sizeof(sa_new));
1686 sa_new.sa_flags = SA_SIGINFO;
1687 sa_new.sa_sigaction = sigill_handler;
1688 sigaction(SIGILL, &sa_new, &sa_old);
1690 /* Probe for movn/movz, necessary to implement movcond. */
1691 #ifndef use_movnz_instructions
1692 got_sigill = 0;
1693 asm volatile(".set push\n"
1694 ".set mips32\n"
1695 "movn $zero, $zero, $zero\n"
1696 "movz $zero, $zero, $zero\n"
1697 ".set pop\n"
1698 : : : );
1699 use_movnz_instructions = !got_sigill;
1700 #endif
1702 /* Probe for MIPS32 instructions. As no subsetting is allowed
1703 by the specification, it is only necessary to probe for one
1704 of the instructions. */
1705 #ifndef use_mips32_instructions
1706 got_sigill = 0;
1707 asm volatile(".set push\n"
1708 ".set mips32\n"
1709 "mul $zero, $zero\n"
1710 ".set pop\n"
1711 : : : );
1712 use_mips32_instructions = !got_sigill;
1713 #endif
1715 /* Probe for MIPS32r2 instructions if MIPS32 instructions are
1716 available. As no subsetting is allowed by the specification,
1717 it is only necessary to probe for one of the instructions. */
1718 #ifndef use_mips32r2_instructions
1719 if (use_mips32_instructions) {
1720 got_sigill = 0;
1721 asm volatile(".set push\n"
1722 ".set mips32r2\n"
1723 "seb $zero, $zero\n"
1724 ".set pop\n"
1725 : : : );
1726 use_mips32r2_instructions = !got_sigill;
1728 #endif
1730 sigaction(SIGILL, &sa_old, NULL);
1733 /* Generate global QEMU prologue and epilogue code */
1734 static void tcg_target_qemu_prologue(TCGContext *s)
1736 int i, frame_size;
1738 /* reserve some stack space, also for TCG temps. */
1739 frame_size = ARRAY_SIZE(tcg_target_callee_save_regs) * 4
1740 + TCG_STATIC_CALL_ARGS_SIZE
1741 + CPU_TEMP_BUF_NLONGS * sizeof(long);
1742 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
1743 ~(TCG_TARGET_STACK_ALIGN - 1);
1744 tcg_set_frame(s, TCG_REG_SP, ARRAY_SIZE(tcg_target_callee_save_regs) * 4
1745 + TCG_STATIC_CALL_ARGS_SIZE,
1746 CPU_TEMP_BUF_NLONGS * sizeof(long));
1748 /* TB prologue */
1749 tcg_out_addi(s, TCG_REG_SP, -frame_size);
1750 for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
1751 tcg_out_st(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
1752 TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
1755 /* Call generated code */
1756 tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0);
1757 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1758 tb_ret_addr = s->code_ptr;
1760 /* TB epilogue */
1761 for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
1762 tcg_out_ld(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
1763 TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
1766 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
1767 tcg_out_addi(s, TCG_REG_SP, frame_size);
1770 static void tcg_target_init(TCGContext *s)
1772 tcg_target_detect_isa();
1773 tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32], 0xffffffff);
1774 tcg_regset_set(tcg_target_call_clobber_regs,
1775 (1 << TCG_REG_V0) |
1776 (1 << TCG_REG_V1) |
1777 (1 << TCG_REG_A0) |
1778 (1 << TCG_REG_A1) |
1779 (1 << TCG_REG_A2) |
1780 (1 << TCG_REG_A3) |
1781 (1 << TCG_REG_T1) |
1782 (1 << TCG_REG_T2) |
1783 (1 << TCG_REG_T3) |
1784 (1 << TCG_REG_T4) |
1785 (1 << TCG_REG_T5) |
1786 (1 << TCG_REG_T6) |
1787 (1 << TCG_REG_T7) |
1788 (1 << TCG_REG_T8) |
1789 (1 << TCG_REG_T9));
1791 tcg_regset_clear(s->reserved_regs);
1792 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */
1793 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */
1794 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */
1795 tcg_regset_set_reg(s->reserved_regs, TCG_REG_AT); /* internal use */
1796 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T0); /* internal use */
1797 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */
1798 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
1799 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */
1801 tcg_add_target_add_op_defs(mips_op_defs);