hw/virtio: group virtio flags into an enum
[qemu/kevin.git] / tcg / mips / tcg-target.inc.c
blob297bd00910b766c44a420bad3fa506ec32c67d4b
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
5 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
6 * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
27 #include "tcg-be-ldst.h"
29 #ifdef HOST_WORDS_BIGENDIAN
30 # define MIPS_BE 1
31 #else
32 # define MIPS_BE 0
33 #endif
35 #define LO_OFF (MIPS_BE * 4)
36 #define HI_OFF (4 - LO_OFF)
38 #ifndef NDEBUG
39 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
40 "zero",
41 "at",
42 "v0",
43 "v1",
44 "a0",
45 "a1",
46 "a2",
47 "a3",
48 "t0",
49 "t1",
50 "t2",
51 "t3",
52 "t4",
53 "t5",
54 "t6",
55 "t7",
56 "s0",
57 "s1",
58 "s2",
59 "s3",
60 "s4",
61 "s5",
62 "s6",
63 "s7",
64 "t8",
65 "t9",
66 "k0",
67 "k1",
68 "gp",
69 "sp",
70 "s8",
71 "ra",
73 #endif
75 #define TCG_TMP0 TCG_REG_AT
76 #define TCG_TMP1 TCG_REG_T9
78 /* check if we really need so many registers :P */
79 static const TCGReg tcg_target_reg_alloc_order[] = {
80 /* Call saved registers. */
81 TCG_REG_S0,
82 TCG_REG_S1,
83 TCG_REG_S2,
84 TCG_REG_S3,
85 TCG_REG_S4,
86 TCG_REG_S5,
87 TCG_REG_S6,
88 TCG_REG_S7,
89 TCG_REG_S8,
91 /* Call clobbered registers. */
92 TCG_REG_T0,
93 TCG_REG_T1,
94 TCG_REG_T2,
95 TCG_REG_T3,
96 TCG_REG_T4,
97 TCG_REG_T5,
98 TCG_REG_T6,
99 TCG_REG_T7,
100 TCG_REG_T8,
101 TCG_REG_T9,
102 TCG_REG_V1,
103 TCG_REG_V0,
105 /* Argument registers, opposite order of allocation. */
106 TCG_REG_A3,
107 TCG_REG_A2,
108 TCG_REG_A1,
109 TCG_REG_A0,
112 static const TCGReg tcg_target_call_iarg_regs[4] = {
113 TCG_REG_A0,
114 TCG_REG_A1,
115 TCG_REG_A2,
116 TCG_REG_A3
119 static const TCGReg tcg_target_call_oarg_regs[2] = {
120 TCG_REG_V0,
121 TCG_REG_V1
124 static tcg_insn_unit *tb_ret_addr;
126 static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target)
128 /* Let the compiler perform the right-shift as part of the arithmetic. */
129 ptrdiff_t disp = target - (pc + 1);
130 assert(disp == (int16_t)disp);
131 return disp & 0xffff;
134 static inline void reloc_pc16(tcg_insn_unit *pc, tcg_insn_unit *target)
136 *pc = deposit32(*pc, 0, 16, reloc_pc16_val(pc, target));
139 static inline uint32_t reloc_26_val(tcg_insn_unit *pc, tcg_insn_unit *target)
141 assert((((uintptr_t)pc ^ (uintptr_t)target) & 0xf0000000) == 0);
142 return ((uintptr_t)target >> 2) & 0x3ffffff;
145 static inline void reloc_26(tcg_insn_unit *pc, tcg_insn_unit *target)
147 *pc = deposit32(*pc, 0, 26, reloc_26_val(pc, target));
150 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
151 intptr_t value, intptr_t addend)
153 assert(type == R_MIPS_PC16);
154 assert(addend == 0);
155 reloc_pc16(code_ptr, (tcg_insn_unit *)value);
158 #define TCG_CT_CONST_ZERO 0x100
159 #define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */
160 #define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */
161 #define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */
162 #define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */
164 static inline bool is_p2m1(tcg_target_long val)
166 return val && ((val + 1) & val) == 0;
169 /* parse target specific constraints */
170 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
172 const char *ct_str;
174 ct_str = *pct_str;
175 switch(ct_str[0]) {
176 case 'r':
177 ct->ct |= TCG_CT_REG;
178 tcg_regset_set(ct->u.regs, 0xffffffff);
179 break;
180 case 'L': /* qemu_ld output arg constraint */
181 ct->ct |= TCG_CT_REG;
182 tcg_regset_set(ct->u.regs, 0xffffffff);
183 tcg_regset_reset_reg(ct->u.regs, TCG_REG_V0);
184 break;
185 case 'l': /* qemu_ld input arg constraint */
186 ct->ct |= TCG_CT_REG;
187 tcg_regset_set(ct->u.regs, 0xffffffff);
188 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
189 #if defined(CONFIG_SOFTMMU)
190 if (TARGET_LONG_BITS == 64) {
191 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
193 #endif
194 break;
195 case 'S': /* qemu_st constraint */
196 ct->ct |= TCG_CT_REG;
197 tcg_regset_set(ct->u.regs, 0xffffffff);
198 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
199 #if defined(CONFIG_SOFTMMU)
200 if (TARGET_LONG_BITS == 32) {
201 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1);
202 } else {
203 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
204 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3);
206 #endif
207 break;
208 case 'I':
209 ct->ct |= TCG_CT_CONST_U16;
210 break;
211 case 'J':
212 ct->ct |= TCG_CT_CONST_S16;
213 break;
214 case 'K':
215 ct->ct |= TCG_CT_CONST_P2M1;
216 break;
217 case 'N':
218 ct->ct |= TCG_CT_CONST_N16;
219 break;
220 case 'Z':
221 /* We are cheating a bit here, using the fact that the register
222 ZERO is also the register number 0. Hence there is no need
223 to check for const_args in each instruction. */
224 ct->ct |= TCG_CT_CONST_ZERO;
225 break;
226 default:
227 return -1;
229 ct_str++;
230 *pct_str = ct_str;
231 return 0;
234 /* test if a constant matches the constraint */
235 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
236 const TCGArgConstraint *arg_ct)
238 int ct;
239 ct = arg_ct->ct;
240 if (ct & TCG_CT_CONST) {
241 return 1;
242 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
243 return 1;
244 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
245 return 1;
246 } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
247 return 1;
248 } else if ((ct & TCG_CT_CONST_N16) && val >= -32767 && val <= 32767) {
249 return 1;
250 } else if ((ct & TCG_CT_CONST_P2M1)
251 && use_mips32r2_instructions && is_p2m1(val)) {
252 return 1;
254 return 0;
257 /* instruction opcodes */
258 typedef enum {
259 OPC_J = 0x02 << 26,
260 OPC_JAL = 0x03 << 26,
261 OPC_BEQ = 0x04 << 26,
262 OPC_BNE = 0x05 << 26,
263 OPC_BLEZ = 0x06 << 26,
264 OPC_BGTZ = 0x07 << 26,
265 OPC_ADDIU = 0x09 << 26,
266 OPC_SLTI = 0x0A << 26,
267 OPC_SLTIU = 0x0B << 26,
268 OPC_ANDI = 0x0C << 26,
269 OPC_ORI = 0x0D << 26,
270 OPC_XORI = 0x0E << 26,
271 OPC_LUI = 0x0F << 26,
272 OPC_LB = 0x20 << 26,
273 OPC_LH = 0x21 << 26,
274 OPC_LW = 0x23 << 26,
275 OPC_LBU = 0x24 << 26,
276 OPC_LHU = 0x25 << 26,
277 OPC_LWU = 0x27 << 26,
278 OPC_SB = 0x28 << 26,
279 OPC_SH = 0x29 << 26,
280 OPC_SW = 0x2B << 26,
282 OPC_SPECIAL = 0x00 << 26,
283 OPC_SLL = OPC_SPECIAL | 0x00,
284 OPC_SRL = OPC_SPECIAL | 0x02,
285 OPC_ROTR = OPC_SPECIAL | (0x01 << 21) | 0x02,
286 OPC_SRA = OPC_SPECIAL | 0x03,
287 OPC_SLLV = OPC_SPECIAL | 0x04,
288 OPC_SRLV = OPC_SPECIAL | 0x06,
289 OPC_ROTRV = OPC_SPECIAL | (0x01 << 6) | 0x06,
290 OPC_SRAV = OPC_SPECIAL | 0x07,
291 OPC_JR_R5 = OPC_SPECIAL | 0x08,
292 OPC_JALR = OPC_SPECIAL | 0x09,
293 OPC_MOVZ = OPC_SPECIAL | 0x0A,
294 OPC_MOVN = OPC_SPECIAL | 0x0B,
295 OPC_MFHI = OPC_SPECIAL | 0x10,
296 OPC_MFLO = OPC_SPECIAL | 0x12,
297 OPC_MULT = OPC_SPECIAL | 0x18,
298 OPC_MUL_R6 = OPC_SPECIAL | (0x02 << 6) | 0x18,
299 OPC_MUH = OPC_SPECIAL | (0x03 << 6) | 0x18,
300 OPC_MULTU = OPC_SPECIAL | 0x19,
301 OPC_MULU = OPC_SPECIAL | (0x02 << 6) | 0x19,
302 OPC_MUHU = OPC_SPECIAL | (0x03 << 6) | 0x19,
303 OPC_DIV = OPC_SPECIAL | 0x1A,
304 OPC_DIV_R6 = OPC_SPECIAL | (0x02 << 6) | 0x1A,
305 OPC_MOD = OPC_SPECIAL | (0x03 << 6) | 0x1A,
306 OPC_DIVU = OPC_SPECIAL | 0x1B,
307 OPC_DIVU_R6 = OPC_SPECIAL | (0x02 << 6) | 0x1B,
308 OPC_MODU = OPC_SPECIAL | (0x03 << 6) | 0x1B,
309 OPC_ADDU = OPC_SPECIAL | 0x21,
310 OPC_SUBU = OPC_SPECIAL | 0x23,
311 OPC_AND = OPC_SPECIAL | 0x24,
312 OPC_OR = OPC_SPECIAL | 0x25,
313 OPC_XOR = OPC_SPECIAL | 0x26,
314 OPC_NOR = OPC_SPECIAL | 0x27,
315 OPC_SLT = OPC_SPECIAL | 0x2A,
316 OPC_SLTU = OPC_SPECIAL | 0x2B,
317 OPC_SELEQZ = OPC_SPECIAL | 0x35,
318 OPC_SELNEZ = OPC_SPECIAL | 0x37,
320 OPC_REGIMM = 0x01 << 26,
321 OPC_BLTZ = OPC_REGIMM | (0x00 << 16),
322 OPC_BGEZ = OPC_REGIMM | (0x01 << 16),
324 OPC_SPECIAL2 = 0x1c << 26,
325 OPC_MUL_R5 = OPC_SPECIAL2 | 0x002,
327 OPC_SPECIAL3 = 0x1f << 26,
328 OPC_EXT = OPC_SPECIAL3 | 0x000,
329 OPC_INS = OPC_SPECIAL3 | 0x004,
330 OPC_WSBH = OPC_SPECIAL3 | 0x0a0,
331 OPC_SEB = OPC_SPECIAL3 | 0x420,
332 OPC_SEH = OPC_SPECIAL3 | 0x620,
334 /* MIPS r6 doesn't have JR, JALR should be used instead */
335 OPC_JR = use_mips32r6_instructions ? OPC_JALR : OPC_JR_R5,
338 * MIPS r6 replaces MUL with an alternative encoding which is
339 * backwards-compatible at the assembly level.
341 OPC_MUL = use_mips32r6_instructions ? OPC_MUL_R6 : OPC_MUL_R5,
342 } MIPSInsn;
345 * Type reg
347 static inline void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc,
348 TCGReg rd, TCGReg rs, TCGReg rt)
350 int32_t inst;
352 inst = opc;
353 inst |= (rs & 0x1F) << 21;
354 inst |= (rt & 0x1F) << 16;
355 inst |= (rd & 0x1F) << 11;
356 tcg_out32(s, inst);
360 * Type immediate
362 static inline void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc,
363 TCGReg rt, TCGReg rs, TCGArg imm)
365 int32_t inst;
367 inst = opc;
368 inst |= (rs & 0x1F) << 21;
369 inst |= (rt & 0x1F) << 16;
370 inst |= (imm & 0xffff);
371 tcg_out32(s, inst);
375 * Type bitfield
377 static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt,
378 TCGReg rs, int msb, int lsb)
380 int32_t inst;
382 inst = opc;
383 inst |= (rs & 0x1F) << 21;
384 inst |= (rt & 0x1F) << 16;
385 inst |= (msb & 0x1F) << 11;
386 inst |= (lsb & 0x1F) << 6;
387 tcg_out32(s, inst);
391 * Type branch
393 static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc,
394 TCGReg rt, TCGReg rs)
396 /* We pay attention here to not modify the branch target by reading
397 the existing value and using it again. This ensure that caches and
398 memory are kept coherent during retranslation. */
399 uint16_t offset = (uint16_t)*s->code_ptr;
401 tcg_out_opc_imm(s, opc, rt, rs, offset);
405 * Type sa
407 static inline void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc,
408 TCGReg rd, TCGReg rt, TCGArg sa)
410 int32_t inst;
412 inst = opc;
413 inst |= (rt & 0x1F) << 16;
414 inst |= (rd & 0x1F) << 11;
415 inst |= (sa & 0x1F) << 6;
416 tcg_out32(s, inst);
421 * Type jump.
422 * Returns true if the branch was in range and the insn was emitted.
424 static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, void *target)
426 uintptr_t dest = (uintptr_t)target;
427 uintptr_t from = (uintptr_t)s->code_ptr + 4;
428 int32_t inst;
430 /* The pc-region branch happens within the 256MB region of
431 the delay slot (thus the +4). */
432 if ((from ^ dest) & -(1 << 28)) {
433 return false;
435 assert((dest & 3) == 0);
437 inst = opc;
438 inst |= (dest >> 2) & 0x3ffffff;
439 tcg_out32(s, inst);
440 return true;
443 static inline void tcg_out_nop(TCGContext *s)
445 tcg_out32(s, 0);
448 static inline void tcg_out_mov(TCGContext *s, TCGType type,
449 TCGReg ret, TCGReg arg)
451 /* Simple reg-reg move, optimising out the 'do nothing' case */
452 if (ret != arg) {
453 tcg_out_opc_reg(s, OPC_ADDU, ret, arg, TCG_REG_ZERO);
457 static inline void tcg_out_movi(TCGContext *s, TCGType type,
458 TCGReg reg, tcg_target_long arg)
460 if (arg == (int16_t)arg) {
461 tcg_out_opc_imm(s, OPC_ADDIU, reg, TCG_REG_ZERO, arg);
462 } else if (arg == (uint16_t)arg) {
463 tcg_out_opc_imm(s, OPC_ORI, reg, TCG_REG_ZERO, arg);
464 } else {
465 tcg_out_opc_imm(s, OPC_LUI, reg, TCG_REG_ZERO, arg >> 16);
466 if (arg & 0xffff) {
467 tcg_out_opc_imm(s, OPC_ORI, reg, reg, arg & 0xffff);
472 static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg)
474 if (use_mips32r2_instructions) {
475 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
476 } else {
477 /* ret and arg can't be register at */
478 if (ret == TCG_TMP0 || arg == TCG_TMP0) {
479 tcg_abort();
482 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
483 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8);
484 tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00);
485 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
489 static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg)
491 if (use_mips32r2_instructions) {
492 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
493 tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret);
494 } else {
495 /* ret and arg can't be register at */
496 if (ret == TCG_TMP0 || arg == TCG_TMP0) {
497 tcg_abort();
500 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
501 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
502 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
503 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
507 static inline void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg)
509 if (use_mips32r2_instructions) {
510 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
511 tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
512 } else {
513 /* ret and arg must be different and can't be register at */
514 if (ret == arg || ret == TCG_TMP0 || arg == TCG_TMP0) {
515 tcg_abort();
518 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
520 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 24);
521 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
523 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, arg, 0xff00);
524 tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8);
525 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
527 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
528 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0xff00);
529 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
533 static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
535 if (use_mips32r2_instructions) {
536 tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg);
537 } else {
538 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
539 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24);
543 static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
545 if (use_mips32r2_instructions) {
546 tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg);
547 } else {
548 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16);
549 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
553 static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data,
554 TCGReg addr, intptr_t ofs)
556 int16_t lo = ofs;
557 if (ofs != lo) {
558 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - lo);
559 if (addr != TCG_REG_ZERO) {
560 tcg_out_opc_reg(s, OPC_ADDU, TCG_TMP0, TCG_TMP0, addr);
562 addr = TCG_TMP0;
564 tcg_out_opc_imm(s, opc, data, addr, lo);
567 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
568 TCGReg arg1, intptr_t arg2)
570 tcg_out_ldst(s, OPC_LW, arg, arg1, arg2);
573 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
574 TCGReg arg1, intptr_t arg2)
576 tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
579 static inline void tcg_out_addi(TCGContext *s, TCGReg reg, TCGArg val)
581 if (val == (int16_t)val) {
582 tcg_out_opc_imm(s, OPC_ADDIU, reg, reg, val);
583 } else {
584 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, val);
585 tcg_out_opc_reg(s, OPC_ADDU, reg, reg, TCG_TMP0);
589 static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
590 TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
591 bool cbh, bool is_sub)
593 TCGReg th = TCG_TMP1;
595 /* If we have a negative constant such that negating it would
596 make the high part zero, we can (usually) eliminate one insn. */
597 if (cbl && cbh && bh == -1 && bl != 0) {
598 bl = -bl;
599 bh = 0;
600 is_sub = !is_sub;
603 /* By operating on the high part first, we get to use the final
604 carry operation to move back from the temporary. */
605 if (!cbh) {
606 tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
607 } else if (bh != 0 || ah == rl) {
608 tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
609 } else {
610 th = ah;
613 /* Note that tcg optimization should eliminate the bl == 0 case. */
614 if (is_sub) {
615 if (cbl) {
616 tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
617 tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
618 } else {
619 tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
620 tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
622 tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
623 } else {
624 if (cbl) {
625 tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
626 tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
627 } else if (rl == al && rl == bl) {
628 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, 31);
629 tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
630 } else {
631 tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
632 tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
634 tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
638 /* Bit 0 set if inversion required; bit 1 set if swapping required. */
639 #define MIPS_CMP_INV 1
640 #define MIPS_CMP_SWAP 2
642 static const uint8_t mips_cmp_map[16] = {
643 [TCG_COND_LT] = 0,
644 [TCG_COND_LTU] = 0,
645 [TCG_COND_GE] = MIPS_CMP_INV,
646 [TCG_COND_GEU] = MIPS_CMP_INV,
647 [TCG_COND_LE] = MIPS_CMP_INV | MIPS_CMP_SWAP,
648 [TCG_COND_LEU] = MIPS_CMP_INV | MIPS_CMP_SWAP,
649 [TCG_COND_GT] = MIPS_CMP_SWAP,
650 [TCG_COND_GTU] = MIPS_CMP_SWAP,
653 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
654 TCGReg arg1, TCGReg arg2)
656 MIPSInsn s_opc = OPC_SLTU;
657 int cmp_map;
659 switch (cond) {
660 case TCG_COND_EQ:
661 if (arg2 != 0) {
662 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
663 arg1 = ret;
665 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, 1);
666 break;
668 case TCG_COND_NE:
669 if (arg2 != 0) {
670 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
671 arg1 = ret;
673 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg1);
674 break;
676 case TCG_COND_LT:
677 case TCG_COND_GE:
678 case TCG_COND_LE:
679 case TCG_COND_GT:
680 s_opc = OPC_SLT;
681 /* FALLTHRU */
683 case TCG_COND_LTU:
684 case TCG_COND_GEU:
685 case TCG_COND_LEU:
686 case TCG_COND_GTU:
687 cmp_map = mips_cmp_map[cond];
688 if (cmp_map & MIPS_CMP_SWAP) {
689 TCGReg t = arg1;
690 arg1 = arg2;
691 arg2 = t;
693 tcg_out_opc_reg(s, s_opc, ret, arg1, arg2);
694 if (cmp_map & MIPS_CMP_INV) {
695 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
697 break;
699 default:
700 tcg_abort();
701 break;
705 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
706 TCGReg arg2, TCGLabel *l)
708 static const MIPSInsn b_zero[16] = {
709 [TCG_COND_LT] = OPC_BLTZ,
710 [TCG_COND_GT] = OPC_BGTZ,
711 [TCG_COND_LE] = OPC_BLEZ,
712 [TCG_COND_GE] = OPC_BGEZ,
715 MIPSInsn s_opc = OPC_SLTU;
716 MIPSInsn b_opc;
717 int cmp_map;
719 switch (cond) {
720 case TCG_COND_EQ:
721 b_opc = OPC_BEQ;
722 break;
723 case TCG_COND_NE:
724 b_opc = OPC_BNE;
725 break;
727 case TCG_COND_LT:
728 case TCG_COND_GT:
729 case TCG_COND_LE:
730 case TCG_COND_GE:
731 if (arg2 == 0) {
732 b_opc = b_zero[cond];
733 arg2 = arg1;
734 arg1 = 0;
735 break;
737 s_opc = OPC_SLT;
738 /* FALLTHRU */
740 case TCG_COND_LTU:
741 case TCG_COND_GTU:
742 case TCG_COND_LEU:
743 case TCG_COND_GEU:
744 cmp_map = mips_cmp_map[cond];
745 if (cmp_map & MIPS_CMP_SWAP) {
746 TCGReg t = arg1;
747 arg1 = arg2;
748 arg2 = t;
750 tcg_out_opc_reg(s, s_opc, TCG_TMP0, arg1, arg2);
751 b_opc = (cmp_map & MIPS_CMP_INV ? OPC_BEQ : OPC_BNE);
752 arg1 = TCG_TMP0;
753 arg2 = TCG_REG_ZERO;
754 break;
756 default:
757 tcg_abort();
758 break;
761 tcg_out_opc_br(s, b_opc, arg1, arg2);
762 if (l->has_value) {
763 reloc_pc16(s->code_ptr - 1, l->u.value_ptr);
764 } else {
765 tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, l, 0);
767 tcg_out_nop(s);
770 static TCGReg tcg_out_reduce_eq2(TCGContext *s, TCGReg tmp0, TCGReg tmp1,
771 TCGReg al, TCGReg ah,
772 TCGReg bl, TCGReg bh)
774 /* Merge highpart comparison into AH. */
775 if (bh != 0) {
776 if (ah != 0) {
777 tcg_out_opc_reg(s, OPC_XOR, tmp0, ah, bh);
778 ah = tmp0;
779 } else {
780 ah = bh;
783 /* Merge lowpart comparison into AL. */
784 if (bl != 0) {
785 if (al != 0) {
786 tcg_out_opc_reg(s, OPC_XOR, tmp1, al, bl);
787 al = tmp1;
788 } else {
789 al = bl;
792 /* Merge high and low part comparisons into AL. */
793 if (ah != 0) {
794 if (al != 0) {
795 tcg_out_opc_reg(s, OPC_OR, tmp0, ah, al);
796 al = tmp0;
797 } else {
798 al = ah;
801 return al;
804 static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
805 TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
807 TCGReg tmp0 = TCG_TMP0;
808 TCGReg tmp1 = ret;
810 assert(ret != TCG_TMP0);
811 if (ret == ah || ret == bh) {
812 assert(ret != TCG_TMP1);
813 tmp1 = TCG_TMP1;
816 switch (cond) {
817 case TCG_COND_EQ:
818 case TCG_COND_NE:
819 tmp1 = tcg_out_reduce_eq2(s, tmp0, tmp1, al, ah, bl, bh);
820 tcg_out_setcond(s, cond, ret, tmp1, TCG_REG_ZERO);
821 break;
823 default:
824 tcg_out_setcond(s, TCG_COND_EQ, tmp0, ah, bh);
825 tcg_out_setcond(s, tcg_unsigned_cond(cond), tmp1, al, bl);
826 tcg_out_opc_reg(s, OPC_AND, tmp1, tmp1, tmp0);
827 tcg_out_setcond(s, tcg_high_cond(cond), tmp0, ah, bh);
828 tcg_out_opc_reg(s, OPC_OR, ret, tmp1, tmp0);
829 break;
833 static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
834 TCGReg bl, TCGReg bh, TCGLabel *l)
836 TCGCond b_cond = TCG_COND_NE;
837 TCGReg tmp = TCG_TMP1;
839 /* With branches, we emit between 4 and 9 insns with 2 or 3 branches.
840 With setcond, we emit between 3 and 10 insns and only 1 branch,
841 which ought to get better branch prediction. */
842 switch (cond) {
843 case TCG_COND_EQ:
844 case TCG_COND_NE:
845 b_cond = cond;
846 tmp = tcg_out_reduce_eq2(s, TCG_TMP0, TCG_TMP1, al, ah, bl, bh);
847 break;
849 default:
850 /* Minimize code size by preferring a compare not requiring INV. */
851 if (mips_cmp_map[cond] & MIPS_CMP_INV) {
852 cond = tcg_invert_cond(cond);
853 b_cond = TCG_COND_EQ;
855 tcg_out_setcond2(s, cond, tmp, al, ah, bl, bh);
856 break;
859 tcg_out_brcond(s, b_cond, tmp, TCG_REG_ZERO, l);
862 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
863 TCGReg c1, TCGReg c2, TCGReg v1, TCGReg v2)
865 bool eqz = false;
867 /* If one of the values is zero, put it last to match SEL*Z instructions */
868 if (use_mips32r6_instructions && v1 == 0) {
869 v1 = v2;
870 v2 = 0;
871 cond = tcg_invert_cond(cond);
874 switch (cond) {
875 case TCG_COND_EQ:
876 eqz = true;
877 /* FALLTHRU */
878 case TCG_COND_NE:
879 if (c2 != 0) {
880 tcg_out_opc_reg(s, OPC_XOR, TCG_TMP0, c1, c2);
881 c1 = TCG_TMP0;
883 break;
885 default:
886 /* Minimize code size by preferring a compare not requiring INV. */
887 if (mips_cmp_map[cond] & MIPS_CMP_INV) {
888 cond = tcg_invert_cond(cond);
889 eqz = true;
891 tcg_out_setcond(s, cond, TCG_TMP0, c1, c2);
892 c1 = TCG_TMP0;
893 break;
896 if (use_mips32r6_instructions) {
897 MIPSInsn m_opc_t = eqz ? OPC_SELEQZ : OPC_SELNEZ;
898 MIPSInsn m_opc_f = eqz ? OPC_SELNEZ : OPC_SELEQZ;
900 if (v2 != 0) {
901 tcg_out_opc_reg(s, m_opc_f, TCG_TMP1, v2, c1);
903 tcg_out_opc_reg(s, m_opc_t, ret, v1, c1);
904 if (v2 != 0) {
905 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP1);
907 } else {
908 MIPSInsn m_opc = eqz ? OPC_MOVZ : OPC_MOVN;
910 tcg_out_opc_reg(s, m_opc, ret, v1, c1);
912 /* This should be guaranteed via constraints */
913 tcg_debug_assert(v2 == ret);
917 static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail)
919 /* Note that the ABI requires the called function's address to be
920 loaded into T9, even if a direct branch is in range. */
921 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg);
923 /* But do try a direct branch, allowing the cpu better insn prefetch. */
924 if (tail) {
925 if (!tcg_out_opc_jmp(s, OPC_J, arg)) {
926 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_T9, 0);
928 } else {
929 if (!tcg_out_opc_jmp(s, OPC_JAL, arg)) {
930 tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0);
935 static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
937 tcg_out_call_int(s, arg, false);
938 tcg_out_nop(s);
941 #if defined(CONFIG_SOFTMMU)
942 static void * const qemu_ld_helpers[16] = {
943 [MO_UB] = helper_ret_ldub_mmu,
944 [MO_SB] = helper_ret_ldsb_mmu,
945 [MO_LEUW] = helper_le_lduw_mmu,
946 [MO_LESW] = helper_le_ldsw_mmu,
947 [MO_LEUL] = helper_le_ldul_mmu,
948 [MO_LEQ] = helper_le_ldq_mmu,
949 [MO_BEUW] = helper_be_lduw_mmu,
950 [MO_BESW] = helper_be_ldsw_mmu,
951 [MO_BEUL] = helper_be_ldul_mmu,
952 [MO_BEQ] = helper_be_ldq_mmu,
955 static void * const qemu_st_helpers[16] = {
956 [MO_UB] = helper_ret_stb_mmu,
957 [MO_LEUW] = helper_le_stw_mmu,
958 [MO_LEUL] = helper_le_stl_mmu,
959 [MO_LEQ] = helper_le_stq_mmu,
960 [MO_BEUW] = helper_be_stw_mmu,
961 [MO_BEUL] = helper_be_stl_mmu,
962 [MO_BEQ] = helper_be_stq_mmu,
965 /* Helper routines for marshalling helper function arguments into
966 * the correct registers and stack.
967 * I is where we want to put this argument, and is updated and returned
968 * for the next call. ARG is the argument itself.
970 * We provide routines for arguments which are: immediate, 32 bit
971 * value in register, 16 and 8 bit values in register (which must be zero
972 * extended before use) and 64 bit value in a lo:hi register pair.
975 static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg)
977 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
978 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg);
979 } else {
980 tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i);
982 return i + 1;
985 static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg)
987 TCGReg tmp = TCG_TMP0;
988 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
989 tmp = tcg_target_call_iarg_regs[i];
991 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff);
992 return tcg_out_call_iarg_reg(s, i, tmp);
995 static int tcg_out_call_iarg_reg16(TCGContext *s, int i, TCGReg arg)
997 TCGReg tmp = TCG_TMP0;
998 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
999 tmp = tcg_target_call_iarg_regs[i];
1001 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xffff);
1002 return tcg_out_call_iarg_reg(s, i, tmp);
1005 static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg)
1007 TCGReg tmp = TCG_TMP0;
1008 if (arg == 0) {
1009 tmp = TCG_REG_ZERO;
1010 } else {
1011 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
1012 tmp = tcg_target_call_iarg_regs[i];
1014 tcg_out_movi(s, TCG_TYPE_REG, tmp, arg);
1016 return tcg_out_call_iarg_reg(s, i, tmp);
1019 static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah)
1021 i = (i + 1) & ~1;
1022 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al));
1023 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah));
1024 return i;
1027 /* Perform the tlb comparison operation. The complete host address is
1028 placed in BASE. Clobbers AT, T0, A0. */
1029 static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
1030 TCGReg addrh, TCGMemOpIdx oi,
1031 tcg_insn_unit *label_ptr[2], bool is_load)
1033 TCGMemOp s_bits = get_memop(oi) & MO_SIZE;
1034 int mem_index = get_mmuidx(oi);
1035 int cmp_off
1036 = (is_load
1037 ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
1038 : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
1039 int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1041 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_A0, addrl,
1042 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1043 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0,
1044 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1045 tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
1047 /* Compensate for very large offsets. */
1048 if (add_off >= 0x8000) {
1049 /* Most target env are smaller than 32k; none are larger than 64k.
1050 Simplify the logic here merely to offset by 0x7ff0, giving us a
1051 range just shy of 64k. Check this assumption. */
1052 QEMU_BUILD_BUG_ON(offsetof(CPUArchState,
1053 tlb_table[NB_MMU_MODES - 1][1])
1054 > 0x7ff0 + 0x7fff);
1055 tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_A0, TCG_REG_A0, 0x7ff0);
1056 cmp_off -= 0x7ff0;
1057 add_off -= 0x7ff0;
1060 /* Load the (low half) tlb comparator. */
1061 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0,
1062 cmp_off + (TARGET_LONG_BITS == 64 ? LO_OFF : 0));
1064 /* Mask the page bits, keeping the alignment bits to compare against.
1065 In between on 32-bit targets, load the tlb addend for the fast path. */
1066 tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1,
1067 TARGET_PAGE_MASK | ((1 << s_bits) - 1));
1068 if (TARGET_LONG_BITS == 32) {
1069 tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
1071 tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
1073 label_ptr[0] = s->code_ptr;
1074 tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
1076 /* Load and test the high half tlb comparator. */
1077 if (TARGET_LONG_BITS == 64) {
1078 /* delay slot */
1079 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0, cmp_off + HI_OFF);
1081 /* Load the tlb addend for the fast path. We can't do it earlier with
1082 64-bit targets or we'll clobber a0 before reading the high half tlb
1083 comparator. */
1084 tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
1086 label_ptr[1] = s->code_ptr;
1087 tcg_out_opc_br(s, OPC_BNE, addrh, TCG_TMP0);
1090 /* delay slot */
1091 tcg_out_opc_reg(s, OPC_ADDU, base, TCG_REG_A0, addrl);
1094 static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
1095 TCGReg datalo, TCGReg datahi,
1096 TCGReg addrlo, TCGReg addrhi,
1097 void *raddr, tcg_insn_unit *label_ptr[2])
1099 TCGLabelQemuLdst *label = new_ldst_label(s);
1101 label->is_ld = is_ld;
1102 label->oi = oi;
1103 label->datalo_reg = datalo;
1104 label->datahi_reg = datahi;
1105 label->addrlo_reg = addrlo;
1106 label->addrhi_reg = addrhi;
1107 label->raddr = raddr;
1108 label->label_ptr[0] = label_ptr[0];
1109 if (TARGET_LONG_BITS == 64) {
1110 label->label_ptr[1] = label_ptr[1];
1114 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1116 TCGMemOpIdx oi = l->oi;
1117 TCGMemOp opc = get_memop(oi);
1118 TCGReg v0;
1119 int i;
1121 /* resolve label address */
1122 reloc_pc16(l->label_ptr[0], s->code_ptr);
1123 if (TARGET_LONG_BITS == 64) {
1124 reloc_pc16(l->label_ptr[1], s->code_ptr);
1127 i = 1;
1128 if (TARGET_LONG_BITS == 64) {
1129 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1130 } else {
1131 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1133 i = tcg_out_call_iarg_imm(s, i, oi);
1134 i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr);
1135 tcg_out_call_int(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)], false);
1136 /* delay slot */
1137 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1139 v0 = l->datalo_reg;
1140 if ((opc & MO_SIZE) == MO_64) {
1141 /* We eliminated V0 from the possible output registers, so it
1142 cannot be clobbered here. So we must move V1 first. */
1143 if (MIPS_BE) {
1144 tcg_out_mov(s, TCG_TYPE_I32, v0, TCG_REG_V1);
1145 v0 = l->datahi_reg;
1146 } else {
1147 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_V1);
1151 reloc_pc16(s->code_ptr, l->raddr);
1152 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO);
1153 /* delay slot */
1154 tcg_out_mov(s, TCG_TYPE_REG, v0, TCG_REG_V0);
1157 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1159 TCGMemOpIdx oi = l->oi;
1160 TCGMemOp opc = get_memop(oi);
1161 TCGMemOp s_bits = opc & MO_SIZE;
1162 int i;
1164 /* resolve label address */
1165 reloc_pc16(l->label_ptr[0], s->code_ptr);
1166 if (TARGET_LONG_BITS == 64) {
1167 reloc_pc16(l->label_ptr[1], s->code_ptr);
1170 i = 1;
1171 if (TARGET_LONG_BITS == 64) {
1172 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1173 } else {
1174 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1176 switch (s_bits) {
1177 case MO_8:
1178 i = tcg_out_call_iarg_reg8(s, i, l->datalo_reg);
1179 break;
1180 case MO_16:
1181 i = tcg_out_call_iarg_reg16(s, i, l->datalo_reg);
1182 break;
1183 case MO_32:
1184 i = tcg_out_call_iarg_reg(s, i, l->datalo_reg);
1185 break;
1186 case MO_64:
1187 i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg);
1188 break;
1189 default:
1190 tcg_abort();
1192 i = tcg_out_call_iarg_imm(s, i, oi);
1194 /* Tail call to the store helper. Thus force the return address
1195 computation to take place in the return address register. */
1196 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)l->raddr);
1197 i = tcg_out_call_iarg_reg(s, i, TCG_REG_RA);
1198 tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true);
1199 /* delay slot */
1200 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1202 #endif
1204 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1205 TCGReg base, TCGMemOp opc)
1207 switch (opc & (MO_SSIZE | MO_BSWAP)) {
1208 case MO_UB:
1209 tcg_out_opc_imm(s, OPC_LBU, datalo, base, 0);
1210 break;
1211 case MO_SB:
1212 tcg_out_opc_imm(s, OPC_LB, datalo, base, 0);
1213 break;
1214 case MO_UW | MO_BSWAP:
1215 tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
1216 tcg_out_bswap16(s, datalo, TCG_TMP1);
1217 break;
1218 case MO_UW:
1219 tcg_out_opc_imm(s, OPC_LHU, datalo, base, 0);
1220 break;
1221 case MO_SW | MO_BSWAP:
1222 tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
1223 tcg_out_bswap16s(s, datalo, TCG_TMP1);
1224 break;
1225 case MO_SW:
1226 tcg_out_opc_imm(s, OPC_LH, datalo, base, 0);
1227 break;
1228 case MO_UL | MO_BSWAP:
1229 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, 0);
1230 tcg_out_bswap32(s, datalo, TCG_TMP1);
1231 break;
1232 case MO_UL:
1233 tcg_out_opc_imm(s, OPC_LW, datalo, base, 0);
1234 break;
1235 case MO_Q | MO_BSWAP:
1236 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, HI_OFF);
1237 tcg_out_bswap32(s, datalo, TCG_TMP1);
1238 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, LO_OFF);
1239 tcg_out_bswap32(s, datahi, TCG_TMP1);
1240 break;
1241 case MO_Q:
1242 tcg_out_opc_imm(s, OPC_LW, datalo, base, LO_OFF);
1243 tcg_out_opc_imm(s, OPC_LW, datahi, base, HI_OFF);
1244 break;
1245 default:
1246 tcg_abort();
1250 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
1252 TCGReg addr_regl, addr_regh __attribute__((unused));
1253 TCGReg data_regl, data_regh;
1254 TCGMemOpIdx oi;
1255 TCGMemOp opc;
1256 #if defined(CONFIG_SOFTMMU)
1257 tcg_insn_unit *label_ptr[2];
1258 #endif
1259 /* Note that we've eliminated V0 from the output registers,
1260 so we won't overwrite the base register during loading. */
1261 TCGReg base = TCG_REG_V0;
1263 data_regl = *args++;
1264 data_regh = (is_64 ? *args++ : 0);
1265 addr_regl = *args++;
1266 addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1267 oi = *args++;
1268 opc = get_memop(oi);
1270 #if defined(CONFIG_SOFTMMU)
1271 tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1);
1272 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
1273 add_qemu_ldst_label(s, 1, oi, data_regl, data_regh, addr_regl, addr_regh,
1274 s->code_ptr, label_ptr);
1275 #else
1276 if (guest_base == 0 && data_regl != addr_regl) {
1277 base = addr_regl;
1278 } else if (guest_base == (int16_t)guest_base) {
1279 tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base);
1280 } else {
1281 tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base);
1282 tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
1284 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
1285 #endif
1288 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1289 TCGReg base, TCGMemOp opc)
1291 switch (opc & (MO_SIZE | MO_BSWAP)) {
1292 case MO_8:
1293 tcg_out_opc_imm(s, OPC_SB, datalo, base, 0);
1294 break;
1296 case MO_16 | MO_BSWAP:
1297 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, datalo, 0xffff);
1298 tcg_out_bswap16(s, TCG_TMP1, TCG_TMP1);
1299 datalo = TCG_TMP1;
1300 /* FALLTHRU */
1301 case MO_16:
1302 tcg_out_opc_imm(s, OPC_SH, datalo, base, 0);
1303 break;
1305 case MO_32 | MO_BSWAP:
1306 tcg_out_bswap32(s, TCG_TMP1, datalo);
1307 datalo = TCG_TMP1;
1308 /* FALLTHRU */
1309 case MO_32:
1310 tcg_out_opc_imm(s, OPC_SW, datalo, base, 0);
1311 break;
1313 case MO_64 | MO_BSWAP:
1314 tcg_out_bswap32(s, TCG_TMP1, datalo);
1315 tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, HI_OFF);
1316 tcg_out_bswap32(s, TCG_TMP1, datahi);
1317 tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, LO_OFF);
1318 break;
1319 case MO_64:
1320 tcg_out_opc_imm(s, OPC_SW, datalo, base, LO_OFF);
1321 tcg_out_opc_imm(s, OPC_SW, datahi, base, HI_OFF);
1322 break;
1324 default:
1325 tcg_abort();
1329 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
1331 TCGReg addr_regl, addr_regh __attribute__((unused));
1332 TCGReg data_regl, data_regh, base;
1333 TCGMemOpIdx oi;
1334 TCGMemOp opc;
1335 #if defined(CONFIG_SOFTMMU)
1336 tcg_insn_unit *label_ptr[2];
1337 #endif
1339 data_regl = *args++;
1340 data_regh = (is_64 ? *args++ : 0);
1341 addr_regl = *args++;
1342 addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1343 oi = *args++;
1344 opc = get_memop(oi);
1346 #if defined(CONFIG_SOFTMMU)
1347 /* Note that we eliminated the helper's address argument,
1348 so we can reuse that for the base. */
1349 base = (TARGET_LONG_BITS == 32 ? TCG_REG_A1 : TCG_REG_A2);
1350 tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0);
1351 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1352 add_qemu_ldst_label(s, 0, oi, data_regl, data_regh, addr_regl, addr_regh,
1353 s->code_ptr, label_ptr);
1354 #else
1355 if (guest_base == 0) {
1356 base = addr_regl;
1357 } else {
1358 base = TCG_REG_A0;
1359 if (guest_base == (int16_t)guest_base) {
1360 tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base);
1361 } else {
1362 tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base);
1363 tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
1366 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1367 #endif
1370 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1371 const TCGArg *args, const int *const_args)
1373 MIPSInsn i1, i2;
1374 TCGArg a0, a1, a2;
1375 int c2;
1377 a0 = args[0];
1378 a1 = args[1];
1379 a2 = args[2];
1380 c2 = const_args[2];
1382 switch (opc) {
1383 case INDEX_op_exit_tb:
1385 TCGReg b0 = TCG_REG_ZERO;
1387 if (a0 & ~0xffff) {
1388 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
1389 b0 = TCG_REG_V0;
1391 if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
1392 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
1393 (uintptr_t)tb_ret_addr);
1394 tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
1396 tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
1398 break;
1399 case INDEX_op_goto_tb:
1400 if (s->tb_jmp_offset) {
1401 /* direct jump method */
1402 s->tb_jmp_offset[a0] = tcg_current_code_size(s);
1403 /* Avoid clobbering the address during retranslation. */
1404 tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff));
1405 } else {
1406 /* indirect jump method */
1407 tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
1408 (uintptr_t)(s->tb_next + a0));
1409 tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
1411 tcg_out_nop(s);
1412 s->tb_next_offset[a0] = tcg_current_code_size(s);
1413 break;
1414 case INDEX_op_br:
1415 tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO,
1416 arg_label(a0));
1417 break;
1419 case INDEX_op_ld8u_i32:
1420 i1 = OPC_LBU;
1421 goto do_ldst;
1422 case INDEX_op_ld8s_i32:
1423 i1 = OPC_LB;
1424 goto do_ldst;
1425 case INDEX_op_ld16u_i32:
1426 i1 = OPC_LHU;
1427 goto do_ldst;
1428 case INDEX_op_ld16s_i32:
1429 i1 = OPC_LH;
1430 goto do_ldst;
1431 case INDEX_op_ld_i32:
1432 i1 = OPC_LW;
1433 goto do_ldst;
1434 case INDEX_op_st8_i32:
1435 i1 = OPC_SB;
1436 goto do_ldst;
1437 case INDEX_op_st16_i32:
1438 i1 = OPC_SH;
1439 goto do_ldst;
1440 case INDEX_op_st_i32:
1441 i1 = OPC_SW;
1442 do_ldst:
1443 tcg_out_ldst(s, i1, a0, a1, a2);
1444 break;
1446 case INDEX_op_add_i32:
1447 i1 = OPC_ADDU, i2 = OPC_ADDIU;
1448 goto do_binary;
1449 case INDEX_op_or_i32:
1450 i1 = OPC_OR, i2 = OPC_ORI;
1451 goto do_binary;
1452 case INDEX_op_xor_i32:
1453 i1 = OPC_XOR, i2 = OPC_XORI;
1454 do_binary:
1455 if (c2) {
1456 tcg_out_opc_imm(s, i2, a0, a1, a2);
1457 break;
1459 do_binaryv:
1460 tcg_out_opc_reg(s, i1, a0, a1, a2);
1461 break;
1463 case INDEX_op_sub_i32:
1464 if (c2) {
1465 tcg_out_opc_imm(s, OPC_ADDIU, a0, a1, -a2);
1466 break;
1468 i1 = OPC_SUBU;
1469 goto do_binary;
1470 case INDEX_op_and_i32:
1471 if (c2 && a2 != (uint16_t)a2) {
1472 int msb = ctz32(~a2) - 1;
1473 assert(use_mips32r2_instructions);
1474 assert(is_p2m1(a2));
1475 tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0);
1476 break;
1478 i1 = OPC_AND, i2 = OPC_ANDI;
1479 goto do_binary;
1480 case INDEX_op_nor_i32:
1481 i1 = OPC_NOR;
1482 goto do_binaryv;
1484 case INDEX_op_mul_i32:
1485 if (use_mips32_instructions) {
1486 tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
1487 break;
1489 i1 = OPC_MULT, i2 = OPC_MFLO;
1490 goto do_hilo1;
1491 case INDEX_op_mulsh_i32:
1492 if (use_mips32r6_instructions) {
1493 tcg_out_opc_reg(s, OPC_MUH, a0, a1, a2);
1494 break;
1496 i1 = OPC_MULT, i2 = OPC_MFHI;
1497 goto do_hilo1;
1498 case INDEX_op_muluh_i32:
1499 if (use_mips32r6_instructions) {
1500 tcg_out_opc_reg(s, OPC_MUHU, a0, a1, a2);
1501 break;
1503 i1 = OPC_MULTU, i2 = OPC_MFHI;
1504 goto do_hilo1;
1505 case INDEX_op_div_i32:
1506 if (use_mips32r6_instructions) {
1507 tcg_out_opc_reg(s, OPC_DIV_R6, a0, a1, a2);
1508 break;
1510 i1 = OPC_DIV, i2 = OPC_MFLO;
1511 goto do_hilo1;
1512 case INDEX_op_divu_i32:
1513 if (use_mips32r6_instructions) {
1514 tcg_out_opc_reg(s, OPC_DIVU_R6, a0, a1, a2);
1515 break;
1517 i1 = OPC_DIVU, i2 = OPC_MFLO;
1518 goto do_hilo1;
1519 case INDEX_op_rem_i32:
1520 if (use_mips32r6_instructions) {
1521 tcg_out_opc_reg(s, OPC_MOD, a0, a1, a2);
1522 break;
1524 i1 = OPC_DIV, i2 = OPC_MFHI;
1525 goto do_hilo1;
1526 case INDEX_op_remu_i32:
1527 if (use_mips32r6_instructions) {
1528 tcg_out_opc_reg(s, OPC_MODU, a0, a1, a2);
1529 break;
1531 i1 = OPC_DIVU, i2 = OPC_MFHI;
1532 do_hilo1:
1533 tcg_out_opc_reg(s, i1, 0, a1, a2);
1534 tcg_out_opc_reg(s, i2, a0, 0, 0);
1535 break;
1537 case INDEX_op_muls2_i32:
1538 i1 = OPC_MULT;
1539 goto do_hilo2;
1540 case INDEX_op_mulu2_i32:
1541 i1 = OPC_MULTU;
1542 do_hilo2:
1543 tcg_out_opc_reg(s, i1, 0, a2, args[3]);
1544 tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
1545 tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
1546 break;
1548 case INDEX_op_not_i32:
1549 i1 = OPC_NOR;
1550 goto do_unary;
1551 case INDEX_op_bswap16_i32:
1552 i1 = OPC_WSBH;
1553 goto do_unary;
1554 case INDEX_op_ext8s_i32:
1555 i1 = OPC_SEB;
1556 goto do_unary;
1557 case INDEX_op_ext16s_i32:
1558 i1 = OPC_SEH;
1559 do_unary:
1560 tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1);
1561 break;
1563 case INDEX_op_sar_i32:
1564 i1 = OPC_SRAV, i2 = OPC_SRA;
1565 goto do_shift;
1566 case INDEX_op_shl_i32:
1567 i1 = OPC_SLLV, i2 = OPC_SLL;
1568 goto do_shift;
1569 case INDEX_op_shr_i32:
1570 i1 = OPC_SRLV, i2 = OPC_SRL;
1571 goto do_shift;
1572 case INDEX_op_rotr_i32:
1573 i1 = OPC_ROTRV, i2 = OPC_ROTR;
1574 do_shift:
1575 if (c2) {
1576 tcg_out_opc_sa(s, i2, a0, a1, a2);
1577 } else {
1578 tcg_out_opc_reg(s, i1, a0, a2, a1);
1580 break;
1581 case INDEX_op_rotl_i32:
1582 if (c2) {
1583 tcg_out_opc_sa(s, OPC_ROTR, a0, a1, 32 - a2);
1584 } else {
1585 tcg_out_opc_reg(s, OPC_SUBU, TCG_TMP0, TCG_REG_ZERO, a2);
1586 tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1);
1588 break;
1590 case INDEX_op_bswap32_i32:
1591 tcg_out_opc_reg(s, OPC_WSBH, a0, 0, a1);
1592 tcg_out_opc_sa(s, OPC_ROTR, a0, a0, 16);
1593 break;
1595 case INDEX_op_deposit_i32:
1596 tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]);
1597 break;
1599 case INDEX_op_brcond_i32:
1600 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1601 break;
1602 case INDEX_op_brcond2_i32:
1603 tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
1604 break;
1606 case INDEX_op_movcond_i32:
1607 tcg_out_movcond(s, args[5], a0, a1, a2, args[3], args[4]);
1608 break;
1610 case INDEX_op_setcond_i32:
1611 tcg_out_setcond(s, args[3], a0, a1, a2);
1612 break;
1613 case INDEX_op_setcond2_i32:
1614 tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
1615 break;
1617 case INDEX_op_qemu_ld_i32:
1618 tcg_out_qemu_ld(s, args, false);
1619 break;
1620 case INDEX_op_qemu_ld_i64:
1621 tcg_out_qemu_ld(s, args, true);
1622 break;
1623 case INDEX_op_qemu_st_i32:
1624 tcg_out_qemu_st(s, args, false);
1625 break;
1626 case INDEX_op_qemu_st_i64:
1627 tcg_out_qemu_st(s, args, true);
1628 break;
1630 case INDEX_op_add2_i32:
1631 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1632 const_args[4], const_args[5], false);
1633 break;
1634 case INDEX_op_sub2_i32:
1635 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1636 const_args[4], const_args[5], true);
1637 break;
1639 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1640 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
1641 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1642 default:
1643 tcg_abort();
1647 static const TCGTargetOpDef mips_op_defs[] = {
1648 { INDEX_op_exit_tb, { } },
1649 { INDEX_op_goto_tb, { } },
1650 { INDEX_op_br, { } },
1652 { INDEX_op_ld8u_i32, { "r", "r" } },
1653 { INDEX_op_ld8s_i32, { "r", "r" } },
1654 { INDEX_op_ld16u_i32, { "r", "r" } },
1655 { INDEX_op_ld16s_i32, { "r", "r" } },
1656 { INDEX_op_ld_i32, { "r", "r" } },
1657 { INDEX_op_st8_i32, { "rZ", "r" } },
1658 { INDEX_op_st16_i32, { "rZ", "r" } },
1659 { INDEX_op_st_i32, { "rZ", "r" } },
1661 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1662 { INDEX_op_mul_i32, { "r", "rZ", "rZ" } },
1663 #if !use_mips32r6_instructions
1664 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rZ" } },
1665 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rZ" } },
1666 #endif
1667 { INDEX_op_mulsh_i32, { "r", "rZ", "rZ" } },
1668 { INDEX_op_muluh_i32, { "r", "rZ", "rZ" } },
1669 { INDEX_op_div_i32, { "r", "rZ", "rZ" } },
1670 { INDEX_op_divu_i32, { "r", "rZ", "rZ" } },
1671 { INDEX_op_rem_i32, { "r", "rZ", "rZ" } },
1672 { INDEX_op_remu_i32, { "r", "rZ", "rZ" } },
1673 { INDEX_op_sub_i32, { "r", "rZ", "rN" } },
1675 { INDEX_op_and_i32, { "r", "rZ", "rIK" } },
1676 { INDEX_op_nor_i32, { "r", "rZ", "rZ" } },
1677 { INDEX_op_not_i32, { "r", "rZ" } },
1678 { INDEX_op_or_i32, { "r", "rZ", "rIZ" } },
1679 { INDEX_op_xor_i32, { "r", "rZ", "rIZ" } },
1681 { INDEX_op_shl_i32, { "r", "rZ", "ri" } },
1682 { INDEX_op_shr_i32, { "r", "rZ", "ri" } },
1683 { INDEX_op_sar_i32, { "r", "rZ", "ri" } },
1684 { INDEX_op_rotr_i32, { "r", "rZ", "ri" } },
1685 { INDEX_op_rotl_i32, { "r", "rZ", "ri" } },
1687 { INDEX_op_bswap16_i32, { "r", "r" } },
1688 { INDEX_op_bswap32_i32, { "r", "r" } },
1690 { INDEX_op_ext8s_i32, { "r", "rZ" } },
1691 { INDEX_op_ext16s_i32, { "r", "rZ" } },
1693 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
1695 { INDEX_op_brcond_i32, { "rZ", "rZ" } },
1696 #if use_mips32r6_instructions
1697 { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
1698 #else
1699 { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "0" } },
1700 #endif
1701 { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
1702 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
1704 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
1705 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
1706 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rZ", "rZ" } },
1708 #if TARGET_LONG_BITS == 32
1709 { INDEX_op_qemu_ld_i32, { "L", "lZ" } },
1710 { INDEX_op_qemu_st_i32, { "SZ", "SZ" } },
1711 { INDEX_op_qemu_ld_i64, { "L", "L", "lZ" } },
1712 { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ" } },
1713 #else
1714 { INDEX_op_qemu_ld_i32, { "L", "lZ", "lZ" } },
1715 { INDEX_op_qemu_st_i32, { "SZ", "SZ", "SZ" } },
1716 { INDEX_op_qemu_ld_i64, { "L", "L", "lZ", "lZ" } },
1717 { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ", "SZ" } },
1718 #endif
1719 { -1 },
1722 static int tcg_target_callee_save_regs[] = {
1723 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
1724 TCG_REG_S1,
1725 TCG_REG_S2,
1726 TCG_REG_S3,
1727 TCG_REG_S4,
1728 TCG_REG_S5,
1729 TCG_REG_S6,
1730 TCG_REG_S7,
1731 TCG_REG_S8,
1732 TCG_REG_RA, /* should be last for ABI compliance */
1735 /* The Linux kernel doesn't provide any information about the available
1736 instruction set. Probe it using a signal handler. */
1739 #ifndef use_movnz_instructions
1740 bool use_movnz_instructions = false;
1741 #endif
1743 #ifndef use_mips32_instructions
1744 bool use_mips32_instructions = false;
1745 #endif
1747 #ifndef use_mips32r2_instructions
1748 bool use_mips32r2_instructions = false;
1749 #endif
1751 static volatile sig_atomic_t got_sigill;
1753 static void sigill_handler(int signo, siginfo_t *si, void *data)
1755 /* Skip the faulty instruction */
1756 ucontext_t *uc = (ucontext_t *)data;
1757 uc->uc_mcontext.pc += 4;
1759 got_sigill = 1;
1762 static void tcg_target_detect_isa(void)
1764 struct sigaction sa_old, sa_new;
1766 memset(&sa_new, 0, sizeof(sa_new));
1767 sa_new.sa_flags = SA_SIGINFO;
1768 sa_new.sa_sigaction = sigill_handler;
1769 sigaction(SIGILL, &sa_new, &sa_old);
1771 /* Probe for movn/movz, necessary to implement movcond. */
1772 #ifndef use_movnz_instructions
1773 got_sigill = 0;
1774 asm volatile(".set push\n"
1775 ".set mips32\n"
1776 "movn $zero, $zero, $zero\n"
1777 "movz $zero, $zero, $zero\n"
1778 ".set pop\n"
1779 : : : );
1780 use_movnz_instructions = !got_sigill;
1781 #endif
1783 /* Probe for MIPS32 instructions. As no subsetting is allowed
1784 by the specification, it is only necessary to probe for one
1785 of the instructions. */
1786 #ifndef use_mips32_instructions
1787 got_sigill = 0;
1788 asm volatile(".set push\n"
1789 ".set mips32\n"
1790 "mul $zero, $zero\n"
1791 ".set pop\n"
1792 : : : );
1793 use_mips32_instructions = !got_sigill;
1794 #endif
1796 /* Probe for MIPS32r2 instructions if MIPS32 instructions are
1797 available. As no subsetting is allowed by the specification,
1798 it is only necessary to probe for one of the instructions. */
1799 #ifndef use_mips32r2_instructions
1800 if (use_mips32_instructions) {
1801 got_sigill = 0;
1802 asm volatile(".set push\n"
1803 ".set mips32r2\n"
1804 "seb $zero, $zero\n"
1805 ".set pop\n"
1806 : : : );
1807 use_mips32r2_instructions = !got_sigill;
1809 #endif
1811 sigaction(SIGILL, &sa_old, NULL);
1814 /* Generate global QEMU prologue and epilogue code */
1815 static void tcg_target_qemu_prologue(TCGContext *s)
1817 int i, frame_size;
1819 /* reserve some stack space, also for TCG temps. */
1820 frame_size = ARRAY_SIZE(tcg_target_callee_save_regs) * 4
1821 + TCG_STATIC_CALL_ARGS_SIZE
1822 + CPU_TEMP_BUF_NLONGS * sizeof(long);
1823 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
1824 ~(TCG_TARGET_STACK_ALIGN - 1);
1825 tcg_set_frame(s, TCG_REG_SP, ARRAY_SIZE(tcg_target_callee_save_regs) * 4
1826 + TCG_STATIC_CALL_ARGS_SIZE,
1827 CPU_TEMP_BUF_NLONGS * sizeof(long));
1829 /* TB prologue */
1830 tcg_out_addi(s, TCG_REG_SP, -frame_size);
1831 for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
1832 tcg_out_st(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
1833 TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
1836 /* Call generated code */
1837 tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0);
1838 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1839 tb_ret_addr = s->code_ptr;
1841 /* TB epilogue */
1842 for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
1843 tcg_out_ld(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
1844 TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
1847 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
1848 tcg_out_addi(s, TCG_REG_SP, frame_size);
1851 static void tcg_target_init(TCGContext *s)
1853 tcg_target_detect_isa();
1854 tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32], 0xffffffff);
1855 tcg_regset_set(tcg_target_call_clobber_regs,
1856 (1 << TCG_REG_V0) |
1857 (1 << TCG_REG_V1) |
1858 (1 << TCG_REG_A0) |
1859 (1 << TCG_REG_A1) |
1860 (1 << TCG_REG_A2) |
1861 (1 << TCG_REG_A3) |
1862 (1 << TCG_REG_T0) |
1863 (1 << TCG_REG_T1) |
1864 (1 << TCG_REG_T2) |
1865 (1 << TCG_REG_T3) |
1866 (1 << TCG_REG_T4) |
1867 (1 << TCG_REG_T5) |
1868 (1 << TCG_REG_T6) |
1869 (1 << TCG_REG_T7) |
1870 (1 << TCG_REG_T8) |
1871 (1 << TCG_REG_T9));
1873 tcg_regset_clear(s->reserved_regs);
1874 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */
1875 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */
1876 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */
1877 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* internal use */
1878 tcg_regset_set_reg(s->reserved_regs, TCG_TMP1); /* internal use */
1879 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */
1880 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
1881 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */
1883 tcg_add_target_add_op_defs(mips_op_defs);
1886 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1888 uint32_t *ptr = (uint32_t *)jmp_addr;
1889 *ptr = deposit32(*ptr, 0, 26, addr >> 2);
1890 flush_icache_range(jmp_addr, jmp_addr + 4);