qapi: Add burst length parameters to block_set_io_throttle
[qemu/ar7.git] / tcg / mips / tcg-target.c
blob2dc4998719be393769d792d9e03648b2290c7448
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
5 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
6 * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
27 #include "qemu/osdep.h"
28 #include "tcg-be-ldst.h"
30 #ifdef HOST_WORDS_BIGENDIAN
31 # define MIPS_BE 1
32 #else
33 # define MIPS_BE 0
34 #endif
36 #define LO_OFF (MIPS_BE * 4)
37 #define HI_OFF (4 - LO_OFF)
39 #ifndef NDEBUG
40 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
41 "zero",
42 "at",
43 "v0",
44 "v1",
45 "a0",
46 "a1",
47 "a2",
48 "a3",
49 "t0",
50 "t1",
51 "t2",
52 "t3",
53 "t4",
54 "t5",
55 "t6",
56 "t7",
57 "s0",
58 "s1",
59 "s2",
60 "s3",
61 "s4",
62 "s5",
63 "s6",
64 "s7",
65 "t8",
66 "t9",
67 "k0",
68 "k1",
69 "gp",
70 "sp",
71 "s8",
72 "ra",
74 #endif
76 #define TCG_TMP0 TCG_REG_AT
77 #define TCG_TMP1 TCG_REG_T9
79 /* check if we really need so many registers :P */
80 static const TCGReg tcg_target_reg_alloc_order[] = {
81 /* Call saved registers. */
82 TCG_REG_S0,
83 TCG_REG_S1,
84 TCG_REG_S2,
85 TCG_REG_S3,
86 TCG_REG_S4,
87 TCG_REG_S5,
88 TCG_REG_S6,
89 TCG_REG_S7,
90 TCG_REG_S8,
92 /* Call clobbered registers. */
93 TCG_REG_T0,
94 TCG_REG_T1,
95 TCG_REG_T2,
96 TCG_REG_T3,
97 TCG_REG_T4,
98 TCG_REG_T5,
99 TCG_REG_T6,
100 TCG_REG_T7,
101 TCG_REG_T8,
102 TCG_REG_T9,
103 TCG_REG_V1,
104 TCG_REG_V0,
106 /* Argument registers, opposite order of allocation. */
107 TCG_REG_A3,
108 TCG_REG_A2,
109 TCG_REG_A1,
110 TCG_REG_A0,
113 static const TCGReg tcg_target_call_iarg_regs[4] = {
114 TCG_REG_A0,
115 TCG_REG_A1,
116 TCG_REG_A2,
117 TCG_REG_A3
120 static const TCGReg tcg_target_call_oarg_regs[2] = {
121 TCG_REG_V0,
122 TCG_REG_V1
125 static tcg_insn_unit *tb_ret_addr;
127 static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target)
129 /* Let the compiler perform the right-shift as part of the arithmetic. */
130 ptrdiff_t disp = target - (pc + 1);
131 assert(disp == (int16_t)disp);
132 return disp & 0xffff;
135 static inline void reloc_pc16(tcg_insn_unit *pc, tcg_insn_unit *target)
137 *pc = deposit32(*pc, 0, 16, reloc_pc16_val(pc, target));
140 static inline uint32_t reloc_26_val(tcg_insn_unit *pc, tcg_insn_unit *target)
142 assert((((uintptr_t)pc ^ (uintptr_t)target) & 0xf0000000) == 0);
143 return ((uintptr_t)target >> 2) & 0x3ffffff;
146 static inline void reloc_26(tcg_insn_unit *pc, tcg_insn_unit *target)
148 *pc = deposit32(*pc, 0, 26, reloc_26_val(pc, target));
151 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
152 intptr_t value, intptr_t addend)
154 assert(type == R_MIPS_PC16);
155 assert(addend == 0);
156 reloc_pc16(code_ptr, (tcg_insn_unit *)value);
159 #define TCG_CT_CONST_ZERO 0x100
160 #define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */
161 #define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */
162 #define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */
163 #define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */
165 static inline bool is_p2m1(tcg_target_long val)
167 return val && ((val + 1) & val) == 0;
170 /* parse target specific constraints */
171 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
173 const char *ct_str;
175 ct_str = *pct_str;
176 switch(ct_str[0]) {
177 case 'r':
178 ct->ct |= TCG_CT_REG;
179 tcg_regset_set(ct->u.regs, 0xffffffff);
180 break;
181 case 'L': /* qemu_ld output arg constraint */
182 ct->ct |= TCG_CT_REG;
183 tcg_regset_set(ct->u.regs, 0xffffffff);
184 tcg_regset_reset_reg(ct->u.regs, TCG_REG_V0);
185 break;
186 case 'l': /* qemu_ld input arg constraint */
187 ct->ct |= TCG_CT_REG;
188 tcg_regset_set(ct->u.regs, 0xffffffff);
189 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
190 #if defined(CONFIG_SOFTMMU)
191 if (TARGET_LONG_BITS == 64) {
192 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
194 #endif
195 break;
196 case 'S': /* qemu_st constraint */
197 ct->ct |= TCG_CT_REG;
198 tcg_regset_set(ct->u.regs, 0xffffffff);
199 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
200 #if defined(CONFIG_SOFTMMU)
201 if (TARGET_LONG_BITS == 32) {
202 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1);
203 } else {
204 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
205 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3);
207 #endif
208 break;
209 case 'I':
210 ct->ct |= TCG_CT_CONST_U16;
211 break;
212 case 'J':
213 ct->ct |= TCG_CT_CONST_S16;
214 break;
215 case 'K':
216 ct->ct |= TCG_CT_CONST_P2M1;
217 break;
218 case 'N':
219 ct->ct |= TCG_CT_CONST_N16;
220 break;
221 case 'Z':
222 /* We are cheating a bit here, using the fact that the register
223 ZERO is also the register number 0. Hence there is no need
224 to check for const_args in each instruction. */
225 ct->ct |= TCG_CT_CONST_ZERO;
226 break;
227 default:
228 return -1;
230 ct_str++;
231 *pct_str = ct_str;
232 return 0;
235 /* test if a constant matches the constraint */
236 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
237 const TCGArgConstraint *arg_ct)
239 int ct;
240 ct = arg_ct->ct;
241 if (ct & TCG_CT_CONST) {
242 return 1;
243 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
244 return 1;
245 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
246 return 1;
247 } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
248 return 1;
249 } else if ((ct & TCG_CT_CONST_N16) && val >= -32767 && val <= 32767) {
250 return 1;
251 } else if ((ct & TCG_CT_CONST_P2M1)
252 && use_mips32r2_instructions && is_p2m1(val)) {
253 return 1;
255 return 0;
258 /* instruction opcodes */
259 typedef enum {
260 OPC_J = 0x02 << 26,
261 OPC_JAL = 0x03 << 26,
262 OPC_BEQ = 0x04 << 26,
263 OPC_BNE = 0x05 << 26,
264 OPC_BLEZ = 0x06 << 26,
265 OPC_BGTZ = 0x07 << 26,
266 OPC_ADDIU = 0x09 << 26,
267 OPC_SLTI = 0x0A << 26,
268 OPC_SLTIU = 0x0B << 26,
269 OPC_ANDI = 0x0C << 26,
270 OPC_ORI = 0x0D << 26,
271 OPC_XORI = 0x0E << 26,
272 OPC_LUI = 0x0F << 26,
273 OPC_LB = 0x20 << 26,
274 OPC_LH = 0x21 << 26,
275 OPC_LW = 0x23 << 26,
276 OPC_LBU = 0x24 << 26,
277 OPC_LHU = 0x25 << 26,
278 OPC_LWU = 0x27 << 26,
279 OPC_SB = 0x28 << 26,
280 OPC_SH = 0x29 << 26,
281 OPC_SW = 0x2B << 26,
283 OPC_SPECIAL = 0x00 << 26,
284 OPC_SLL = OPC_SPECIAL | 0x00,
285 OPC_SRL = OPC_SPECIAL | 0x02,
286 OPC_ROTR = OPC_SPECIAL | (0x01 << 21) | 0x02,
287 OPC_SRA = OPC_SPECIAL | 0x03,
288 OPC_SLLV = OPC_SPECIAL | 0x04,
289 OPC_SRLV = OPC_SPECIAL | 0x06,
290 OPC_ROTRV = OPC_SPECIAL | (0x01 << 6) | 0x06,
291 OPC_SRAV = OPC_SPECIAL | 0x07,
292 OPC_JR_R5 = OPC_SPECIAL | 0x08,
293 OPC_JALR = OPC_SPECIAL | 0x09,
294 OPC_MOVZ = OPC_SPECIAL | 0x0A,
295 OPC_MOVN = OPC_SPECIAL | 0x0B,
296 OPC_MFHI = OPC_SPECIAL | 0x10,
297 OPC_MFLO = OPC_SPECIAL | 0x12,
298 OPC_MULT = OPC_SPECIAL | 0x18,
299 OPC_MUL_R6 = OPC_SPECIAL | (0x02 << 6) | 0x18,
300 OPC_MUH = OPC_SPECIAL | (0x03 << 6) | 0x18,
301 OPC_MULTU = OPC_SPECIAL | 0x19,
302 OPC_MULU = OPC_SPECIAL | (0x02 << 6) | 0x19,
303 OPC_MUHU = OPC_SPECIAL | (0x03 << 6) | 0x19,
304 OPC_DIV = OPC_SPECIAL | 0x1A,
305 OPC_DIV_R6 = OPC_SPECIAL | (0x02 << 6) | 0x1A,
306 OPC_MOD = OPC_SPECIAL | (0x03 << 6) | 0x1A,
307 OPC_DIVU = OPC_SPECIAL | 0x1B,
308 OPC_DIVU_R6 = OPC_SPECIAL | (0x02 << 6) | 0x1B,
309 OPC_MODU = OPC_SPECIAL | (0x03 << 6) | 0x1B,
310 OPC_ADDU = OPC_SPECIAL | 0x21,
311 OPC_SUBU = OPC_SPECIAL | 0x23,
312 OPC_AND = OPC_SPECIAL | 0x24,
313 OPC_OR = OPC_SPECIAL | 0x25,
314 OPC_XOR = OPC_SPECIAL | 0x26,
315 OPC_NOR = OPC_SPECIAL | 0x27,
316 OPC_SLT = OPC_SPECIAL | 0x2A,
317 OPC_SLTU = OPC_SPECIAL | 0x2B,
318 OPC_SELEQZ = OPC_SPECIAL | 0x35,
319 OPC_SELNEZ = OPC_SPECIAL | 0x37,
321 OPC_REGIMM = 0x01 << 26,
322 OPC_BLTZ = OPC_REGIMM | (0x00 << 16),
323 OPC_BGEZ = OPC_REGIMM | (0x01 << 16),
325 OPC_SPECIAL2 = 0x1c << 26,
326 OPC_MUL_R5 = OPC_SPECIAL2 | 0x002,
328 OPC_SPECIAL3 = 0x1f << 26,
329 OPC_EXT = OPC_SPECIAL3 | 0x000,
330 OPC_INS = OPC_SPECIAL3 | 0x004,
331 OPC_WSBH = OPC_SPECIAL3 | 0x0a0,
332 OPC_SEB = OPC_SPECIAL3 | 0x420,
333 OPC_SEH = OPC_SPECIAL3 | 0x620,
335 /* MIPS r6 doesn't have JR, JALR should be used instead */
336 OPC_JR = use_mips32r6_instructions ? OPC_JALR : OPC_JR_R5,
339 * MIPS r6 replaces MUL with an alternative encoding which is
340 * backwards-compatible at the assembly level.
342 OPC_MUL = use_mips32r6_instructions ? OPC_MUL_R6 : OPC_MUL_R5,
343 } MIPSInsn;
346 * Type reg
348 static inline void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc,
349 TCGReg rd, TCGReg rs, TCGReg rt)
351 int32_t inst;
353 inst = opc;
354 inst |= (rs & 0x1F) << 21;
355 inst |= (rt & 0x1F) << 16;
356 inst |= (rd & 0x1F) << 11;
357 tcg_out32(s, inst);
361 * Type immediate
363 static inline void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc,
364 TCGReg rt, TCGReg rs, TCGArg imm)
366 int32_t inst;
368 inst = opc;
369 inst |= (rs & 0x1F) << 21;
370 inst |= (rt & 0x1F) << 16;
371 inst |= (imm & 0xffff);
372 tcg_out32(s, inst);
376 * Type bitfield
378 static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt,
379 TCGReg rs, int msb, int lsb)
381 int32_t inst;
383 inst = opc;
384 inst |= (rs & 0x1F) << 21;
385 inst |= (rt & 0x1F) << 16;
386 inst |= (msb & 0x1F) << 11;
387 inst |= (lsb & 0x1F) << 6;
388 tcg_out32(s, inst);
392 * Type branch
394 static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc,
395 TCGReg rt, TCGReg rs)
397 /* We pay attention here to not modify the branch target by reading
398 the existing value and using it again. This ensure that caches and
399 memory are kept coherent during retranslation. */
400 uint16_t offset = (uint16_t)*s->code_ptr;
402 tcg_out_opc_imm(s, opc, rt, rs, offset);
406 * Type sa
408 static inline void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc,
409 TCGReg rd, TCGReg rt, TCGArg sa)
411 int32_t inst;
413 inst = opc;
414 inst |= (rt & 0x1F) << 16;
415 inst |= (rd & 0x1F) << 11;
416 inst |= (sa & 0x1F) << 6;
417 tcg_out32(s, inst);
422 * Type jump.
423 * Returns true if the branch was in range and the insn was emitted.
425 static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, void *target)
427 uintptr_t dest = (uintptr_t)target;
428 uintptr_t from = (uintptr_t)s->code_ptr + 4;
429 int32_t inst;
431 /* The pc-region branch happens within the 256MB region of
432 the delay slot (thus the +4). */
433 if ((from ^ dest) & -(1 << 28)) {
434 return false;
436 assert((dest & 3) == 0);
438 inst = opc;
439 inst |= (dest >> 2) & 0x3ffffff;
440 tcg_out32(s, inst);
441 return true;
444 static inline void tcg_out_nop(TCGContext *s)
446 tcg_out32(s, 0);
449 static inline void tcg_out_mov(TCGContext *s, TCGType type,
450 TCGReg ret, TCGReg arg)
452 /* Simple reg-reg move, optimising out the 'do nothing' case */
453 if (ret != arg) {
454 tcg_out_opc_reg(s, OPC_ADDU, ret, arg, TCG_REG_ZERO);
458 static inline void tcg_out_movi(TCGContext *s, TCGType type,
459 TCGReg reg, tcg_target_long arg)
461 if (arg == (int16_t)arg) {
462 tcg_out_opc_imm(s, OPC_ADDIU, reg, TCG_REG_ZERO, arg);
463 } else if (arg == (uint16_t)arg) {
464 tcg_out_opc_imm(s, OPC_ORI, reg, TCG_REG_ZERO, arg);
465 } else {
466 tcg_out_opc_imm(s, OPC_LUI, reg, TCG_REG_ZERO, arg >> 16);
467 if (arg & 0xffff) {
468 tcg_out_opc_imm(s, OPC_ORI, reg, reg, arg & 0xffff);
473 static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg)
475 if (use_mips32r2_instructions) {
476 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
477 } else {
478 /* ret and arg can't be register at */
479 if (ret == TCG_TMP0 || arg == TCG_TMP0) {
480 tcg_abort();
483 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
484 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8);
485 tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00);
486 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
490 static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg)
492 if (use_mips32r2_instructions) {
493 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
494 tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret);
495 } else {
496 /* ret and arg can't be register at */
497 if (ret == TCG_TMP0 || arg == TCG_TMP0) {
498 tcg_abort();
501 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
502 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
503 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
504 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
508 static inline void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg)
510 if (use_mips32r2_instructions) {
511 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
512 tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
513 } else {
514 /* ret and arg must be different and can't be register at */
515 if (ret == arg || ret == TCG_TMP0 || arg == TCG_TMP0) {
516 tcg_abort();
519 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
521 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 24);
522 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
524 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, arg, 0xff00);
525 tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8);
526 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
528 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
529 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0xff00);
530 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
534 static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
536 if (use_mips32r2_instructions) {
537 tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg);
538 } else {
539 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
540 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24);
544 static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
546 if (use_mips32r2_instructions) {
547 tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg);
548 } else {
549 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16);
550 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
554 static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data,
555 TCGReg addr, intptr_t ofs)
557 int16_t lo = ofs;
558 if (ofs != lo) {
559 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - lo);
560 if (addr != TCG_REG_ZERO) {
561 tcg_out_opc_reg(s, OPC_ADDU, TCG_TMP0, TCG_TMP0, addr);
563 addr = TCG_TMP0;
565 tcg_out_opc_imm(s, opc, data, addr, lo);
568 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
569 TCGReg arg1, intptr_t arg2)
571 tcg_out_ldst(s, OPC_LW, arg, arg1, arg2);
574 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
575 TCGReg arg1, intptr_t arg2)
577 tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
580 static inline void tcg_out_addi(TCGContext *s, TCGReg reg, TCGArg val)
582 if (val == (int16_t)val) {
583 tcg_out_opc_imm(s, OPC_ADDIU, reg, reg, val);
584 } else {
585 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, val);
586 tcg_out_opc_reg(s, OPC_ADDU, reg, reg, TCG_TMP0);
590 static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
591 TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
592 bool cbh, bool is_sub)
594 TCGReg th = TCG_TMP1;
596 /* If we have a negative constant such that negating it would
597 make the high part zero, we can (usually) eliminate one insn. */
598 if (cbl && cbh && bh == -1 && bl != 0) {
599 bl = -bl;
600 bh = 0;
601 is_sub = !is_sub;
604 /* By operating on the high part first, we get to use the final
605 carry operation to move back from the temporary. */
606 if (!cbh) {
607 tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
608 } else if (bh != 0 || ah == rl) {
609 tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
610 } else {
611 th = ah;
614 /* Note that tcg optimization should eliminate the bl == 0 case. */
615 if (is_sub) {
616 if (cbl) {
617 tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
618 tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
619 } else {
620 tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
621 tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
623 tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
624 } else {
625 if (cbl) {
626 tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
627 tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
628 } else if (rl == al && rl == bl) {
629 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, 31);
630 tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
631 } else {
632 tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
633 tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
635 tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
639 /* Bit 0 set if inversion required; bit 1 set if swapping required. */
640 #define MIPS_CMP_INV 1
641 #define MIPS_CMP_SWAP 2
643 static const uint8_t mips_cmp_map[16] = {
644 [TCG_COND_LT] = 0,
645 [TCG_COND_LTU] = 0,
646 [TCG_COND_GE] = MIPS_CMP_INV,
647 [TCG_COND_GEU] = MIPS_CMP_INV,
648 [TCG_COND_LE] = MIPS_CMP_INV | MIPS_CMP_SWAP,
649 [TCG_COND_LEU] = MIPS_CMP_INV | MIPS_CMP_SWAP,
650 [TCG_COND_GT] = MIPS_CMP_SWAP,
651 [TCG_COND_GTU] = MIPS_CMP_SWAP,
654 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
655 TCGReg arg1, TCGReg arg2)
657 MIPSInsn s_opc = OPC_SLTU;
658 int cmp_map;
660 switch (cond) {
661 case TCG_COND_EQ:
662 if (arg2 != 0) {
663 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
664 arg1 = ret;
666 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, 1);
667 break;
669 case TCG_COND_NE:
670 if (arg2 != 0) {
671 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
672 arg1 = ret;
674 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg1);
675 break;
677 case TCG_COND_LT:
678 case TCG_COND_GE:
679 case TCG_COND_LE:
680 case TCG_COND_GT:
681 s_opc = OPC_SLT;
682 /* FALLTHRU */
684 case TCG_COND_LTU:
685 case TCG_COND_GEU:
686 case TCG_COND_LEU:
687 case TCG_COND_GTU:
688 cmp_map = mips_cmp_map[cond];
689 if (cmp_map & MIPS_CMP_SWAP) {
690 TCGReg t = arg1;
691 arg1 = arg2;
692 arg2 = t;
694 tcg_out_opc_reg(s, s_opc, ret, arg1, arg2);
695 if (cmp_map & MIPS_CMP_INV) {
696 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
698 break;
700 default:
701 tcg_abort();
702 break;
706 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
707 TCGReg arg2, TCGLabel *l)
709 static const MIPSInsn b_zero[16] = {
710 [TCG_COND_LT] = OPC_BLTZ,
711 [TCG_COND_GT] = OPC_BGTZ,
712 [TCG_COND_LE] = OPC_BLEZ,
713 [TCG_COND_GE] = OPC_BGEZ,
716 MIPSInsn s_opc = OPC_SLTU;
717 MIPSInsn b_opc;
718 int cmp_map;
720 switch (cond) {
721 case TCG_COND_EQ:
722 b_opc = OPC_BEQ;
723 break;
724 case TCG_COND_NE:
725 b_opc = OPC_BNE;
726 break;
728 case TCG_COND_LT:
729 case TCG_COND_GT:
730 case TCG_COND_LE:
731 case TCG_COND_GE:
732 if (arg2 == 0) {
733 b_opc = b_zero[cond];
734 arg2 = arg1;
735 arg1 = 0;
736 break;
738 s_opc = OPC_SLT;
739 /* FALLTHRU */
741 case TCG_COND_LTU:
742 case TCG_COND_GTU:
743 case TCG_COND_LEU:
744 case TCG_COND_GEU:
745 cmp_map = mips_cmp_map[cond];
746 if (cmp_map & MIPS_CMP_SWAP) {
747 TCGReg t = arg1;
748 arg1 = arg2;
749 arg2 = t;
751 tcg_out_opc_reg(s, s_opc, TCG_TMP0, arg1, arg2);
752 b_opc = (cmp_map & MIPS_CMP_INV ? OPC_BEQ : OPC_BNE);
753 arg1 = TCG_TMP0;
754 arg2 = TCG_REG_ZERO;
755 break;
757 default:
758 tcg_abort();
759 break;
762 tcg_out_opc_br(s, b_opc, arg1, arg2);
763 if (l->has_value) {
764 reloc_pc16(s->code_ptr - 1, l->u.value_ptr);
765 } else {
766 tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, l, 0);
768 tcg_out_nop(s);
771 static TCGReg tcg_out_reduce_eq2(TCGContext *s, TCGReg tmp0, TCGReg tmp1,
772 TCGReg al, TCGReg ah,
773 TCGReg bl, TCGReg bh)
775 /* Merge highpart comparison into AH. */
776 if (bh != 0) {
777 if (ah != 0) {
778 tcg_out_opc_reg(s, OPC_XOR, tmp0, ah, bh);
779 ah = tmp0;
780 } else {
781 ah = bh;
784 /* Merge lowpart comparison into AL. */
785 if (bl != 0) {
786 if (al != 0) {
787 tcg_out_opc_reg(s, OPC_XOR, tmp1, al, bl);
788 al = tmp1;
789 } else {
790 al = bl;
793 /* Merge high and low part comparisons into AL. */
794 if (ah != 0) {
795 if (al != 0) {
796 tcg_out_opc_reg(s, OPC_OR, tmp0, ah, al);
797 al = tmp0;
798 } else {
799 al = ah;
802 return al;
805 static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
806 TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
808 TCGReg tmp0 = TCG_TMP0;
809 TCGReg tmp1 = ret;
811 assert(ret != TCG_TMP0);
812 if (ret == ah || ret == bh) {
813 assert(ret != TCG_TMP1);
814 tmp1 = TCG_TMP1;
817 switch (cond) {
818 case TCG_COND_EQ:
819 case TCG_COND_NE:
820 tmp1 = tcg_out_reduce_eq2(s, tmp0, tmp1, al, ah, bl, bh);
821 tcg_out_setcond(s, cond, ret, tmp1, TCG_REG_ZERO);
822 break;
824 default:
825 tcg_out_setcond(s, TCG_COND_EQ, tmp0, ah, bh);
826 tcg_out_setcond(s, tcg_unsigned_cond(cond), tmp1, al, bl);
827 tcg_out_opc_reg(s, OPC_AND, tmp1, tmp1, tmp0);
828 tcg_out_setcond(s, tcg_high_cond(cond), tmp0, ah, bh);
829 tcg_out_opc_reg(s, OPC_OR, ret, tmp1, tmp0);
830 break;
834 static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
835 TCGReg bl, TCGReg bh, TCGLabel *l)
837 TCGCond b_cond = TCG_COND_NE;
838 TCGReg tmp = TCG_TMP1;
840 /* With branches, we emit between 4 and 9 insns with 2 or 3 branches.
841 With setcond, we emit between 3 and 10 insns and only 1 branch,
842 which ought to get better branch prediction. */
843 switch (cond) {
844 case TCG_COND_EQ:
845 case TCG_COND_NE:
846 b_cond = cond;
847 tmp = tcg_out_reduce_eq2(s, TCG_TMP0, TCG_TMP1, al, ah, bl, bh);
848 break;
850 default:
851 /* Minimize code size by preferring a compare not requiring INV. */
852 if (mips_cmp_map[cond] & MIPS_CMP_INV) {
853 cond = tcg_invert_cond(cond);
854 b_cond = TCG_COND_EQ;
856 tcg_out_setcond2(s, cond, tmp, al, ah, bl, bh);
857 break;
860 tcg_out_brcond(s, b_cond, tmp, TCG_REG_ZERO, l);
863 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
864 TCGReg c1, TCGReg c2, TCGReg v1, TCGReg v2)
866 bool eqz = false;
868 /* If one of the values is zero, put it last to match SEL*Z instructions */
869 if (use_mips32r6_instructions && v1 == 0) {
870 v1 = v2;
871 v2 = 0;
872 cond = tcg_invert_cond(cond);
875 switch (cond) {
876 case TCG_COND_EQ:
877 eqz = true;
878 /* FALLTHRU */
879 case TCG_COND_NE:
880 if (c2 != 0) {
881 tcg_out_opc_reg(s, OPC_XOR, TCG_TMP0, c1, c2);
882 c1 = TCG_TMP0;
884 break;
886 default:
887 /* Minimize code size by preferring a compare not requiring INV. */
888 if (mips_cmp_map[cond] & MIPS_CMP_INV) {
889 cond = tcg_invert_cond(cond);
890 eqz = true;
892 tcg_out_setcond(s, cond, TCG_TMP0, c1, c2);
893 c1 = TCG_TMP0;
894 break;
897 if (use_mips32r6_instructions) {
898 MIPSInsn m_opc_t = eqz ? OPC_SELEQZ : OPC_SELNEZ;
899 MIPSInsn m_opc_f = eqz ? OPC_SELNEZ : OPC_SELEQZ;
901 if (v2 != 0) {
902 tcg_out_opc_reg(s, m_opc_f, TCG_TMP1, v2, c1);
904 tcg_out_opc_reg(s, m_opc_t, ret, v1, c1);
905 if (v2 != 0) {
906 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP1);
908 } else {
909 MIPSInsn m_opc = eqz ? OPC_MOVZ : OPC_MOVN;
911 tcg_out_opc_reg(s, m_opc, ret, v1, c1);
913 /* This should be guaranteed via constraints */
914 tcg_debug_assert(v2 == ret);
918 static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail)
920 /* Note that the ABI requires the called function's address to be
921 loaded into T9, even if a direct branch is in range. */
922 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg);
924 /* But do try a direct branch, allowing the cpu better insn prefetch. */
925 if (tail) {
926 if (!tcg_out_opc_jmp(s, OPC_J, arg)) {
927 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_T9, 0);
929 } else {
930 if (!tcg_out_opc_jmp(s, OPC_JAL, arg)) {
931 tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0);
936 static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
938 tcg_out_call_int(s, arg, false);
939 tcg_out_nop(s);
942 #if defined(CONFIG_SOFTMMU)
943 static void * const qemu_ld_helpers[16] = {
944 [MO_UB] = helper_ret_ldub_mmu,
945 [MO_SB] = helper_ret_ldsb_mmu,
946 [MO_LEUW] = helper_le_lduw_mmu,
947 [MO_LESW] = helper_le_ldsw_mmu,
948 [MO_LEUL] = helper_le_ldul_mmu,
949 [MO_LEQ] = helper_le_ldq_mmu,
950 [MO_BEUW] = helper_be_lduw_mmu,
951 [MO_BESW] = helper_be_ldsw_mmu,
952 [MO_BEUL] = helper_be_ldul_mmu,
953 [MO_BEQ] = helper_be_ldq_mmu,
956 static void * const qemu_st_helpers[16] = {
957 [MO_UB] = helper_ret_stb_mmu,
958 [MO_LEUW] = helper_le_stw_mmu,
959 [MO_LEUL] = helper_le_stl_mmu,
960 [MO_LEQ] = helper_le_stq_mmu,
961 [MO_BEUW] = helper_be_stw_mmu,
962 [MO_BEUL] = helper_be_stl_mmu,
963 [MO_BEQ] = helper_be_stq_mmu,
966 /* Helper routines for marshalling helper function arguments into
967 * the correct registers and stack.
968 * I is where we want to put this argument, and is updated and returned
969 * for the next call. ARG is the argument itself.
971 * We provide routines for arguments which are: immediate, 32 bit
972 * value in register, 16 and 8 bit values in register (which must be zero
973 * extended before use) and 64 bit value in a lo:hi register pair.
976 static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg)
978 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
979 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg);
980 } else {
981 tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i);
983 return i + 1;
986 static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg)
988 TCGReg tmp = TCG_TMP0;
989 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
990 tmp = tcg_target_call_iarg_regs[i];
992 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff);
993 return tcg_out_call_iarg_reg(s, i, tmp);
996 static int tcg_out_call_iarg_reg16(TCGContext *s, int i, TCGReg arg)
998 TCGReg tmp = TCG_TMP0;
999 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
1000 tmp = tcg_target_call_iarg_regs[i];
1002 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xffff);
1003 return tcg_out_call_iarg_reg(s, i, tmp);
1006 static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg)
1008 TCGReg tmp = TCG_TMP0;
1009 if (arg == 0) {
1010 tmp = TCG_REG_ZERO;
1011 } else {
1012 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
1013 tmp = tcg_target_call_iarg_regs[i];
1015 tcg_out_movi(s, TCG_TYPE_REG, tmp, arg);
1017 return tcg_out_call_iarg_reg(s, i, tmp);
1020 static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah)
1022 i = (i + 1) & ~1;
1023 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al));
1024 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah));
1025 return i;
1028 /* Perform the tlb comparison operation. The complete host address is
1029 placed in BASE. Clobbers AT, T0, A0. */
1030 static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
1031 TCGReg addrh, TCGMemOpIdx oi,
1032 tcg_insn_unit *label_ptr[2], bool is_load)
1034 TCGMemOp s_bits = get_memop(oi) & MO_SIZE;
1035 int mem_index = get_mmuidx(oi);
1036 int cmp_off
1037 = (is_load
1038 ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
1039 : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
1040 int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1042 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_A0, addrl,
1043 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1044 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0,
1045 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1046 tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
1048 /* Compensate for very large offsets. */
1049 if (add_off >= 0x8000) {
1050 /* Most target env are smaller than 32k; none are larger than 64k.
1051 Simplify the logic here merely to offset by 0x7ff0, giving us a
1052 range just shy of 64k. Check this assumption. */
1053 QEMU_BUILD_BUG_ON(offsetof(CPUArchState,
1054 tlb_table[NB_MMU_MODES - 1][1])
1055 > 0x7ff0 + 0x7fff);
1056 tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_A0, TCG_REG_A0, 0x7ff0);
1057 cmp_off -= 0x7ff0;
1058 add_off -= 0x7ff0;
1061 /* Load the (low half) tlb comparator. */
1062 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0,
1063 cmp_off + (TARGET_LONG_BITS == 64 ? LO_OFF : 0));
1065 /* Mask the page bits, keeping the alignment bits to compare against.
1066 In between on 32-bit targets, load the tlb addend for the fast path. */
1067 tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1,
1068 TARGET_PAGE_MASK | ((1 << s_bits) - 1));
1069 if (TARGET_LONG_BITS == 32) {
1070 tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
1072 tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
1074 label_ptr[0] = s->code_ptr;
1075 tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
1077 /* Load and test the high half tlb comparator. */
1078 if (TARGET_LONG_BITS == 64) {
1079 /* delay slot */
1080 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0, cmp_off + HI_OFF);
1082 /* Load the tlb addend for the fast path. We can't do it earlier with
1083 64-bit targets or we'll clobber a0 before reading the high half tlb
1084 comparator. */
1085 tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
1087 label_ptr[1] = s->code_ptr;
1088 tcg_out_opc_br(s, OPC_BNE, addrh, TCG_TMP0);
1091 /* delay slot */
1092 tcg_out_opc_reg(s, OPC_ADDU, base, TCG_REG_A0, addrl);
1095 static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
1096 TCGReg datalo, TCGReg datahi,
1097 TCGReg addrlo, TCGReg addrhi,
1098 void *raddr, tcg_insn_unit *label_ptr[2])
1100 TCGLabelQemuLdst *label = new_ldst_label(s);
1102 label->is_ld = is_ld;
1103 label->oi = oi;
1104 label->datalo_reg = datalo;
1105 label->datahi_reg = datahi;
1106 label->addrlo_reg = addrlo;
1107 label->addrhi_reg = addrhi;
1108 label->raddr = raddr;
1109 label->label_ptr[0] = label_ptr[0];
1110 if (TARGET_LONG_BITS == 64) {
1111 label->label_ptr[1] = label_ptr[1];
1115 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1117 TCGMemOpIdx oi = l->oi;
1118 TCGMemOp opc = get_memop(oi);
1119 TCGReg v0;
1120 int i;
1122 /* resolve label address */
1123 reloc_pc16(l->label_ptr[0], s->code_ptr);
1124 if (TARGET_LONG_BITS == 64) {
1125 reloc_pc16(l->label_ptr[1], s->code_ptr);
1128 i = 1;
1129 if (TARGET_LONG_BITS == 64) {
1130 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1131 } else {
1132 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1134 i = tcg_out_call_iarg_imm(s, i, oi);
1135 i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr);
1136 tcg_out_call_int(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)], false);
1137 /* delay slot */
1138 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1140 v0 = l->datalo_reg;
1141 if ((opc & MO_SIZE) == MO_64) {
1142 /* We eliminated V0 from the possible output registers, so it
1143 cannot be clobbered here. So we must move V1 first. */
1144 if (MIPS_BE) {
1145 tcg_out_mov(s, TCG_TYPE_I32, v0, TCG_REG_V1);
1146 v0 = l->datahi_reg;
1147 } else {
1148 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_V1);
1152 reloc_pc16(s->code_ptr, l->raddr);
1153 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO);
1154 /* delay slot */
1155 tcg_out_mov(s, TCG_TYPE_REG, v0, TCG_REG_V0);
1158 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1160 TCGMemOpIdx oi = l->oi;
1161 TCGMemOp opc = get_memop(oi);
1162 TCGMemOp s_bits = opc & MO_SIZE;
1163 int i;
1165 /* resolve label address */
1166 reloc_pc16(l->label_ptr[0], s->code_ptr);
1167 if (TARGET_LONG_BITS == 64) {
1168 reloc_pc16(l->label_ptr[1], s->code_ptr);
1171 i = 1;
1172 if (TARGET_LONG_BITS == 64) {
1173 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1174 } else {
1175 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1177 switch (s_bits) {
1178 case MO_8:
1179 i = tcg_out_call_iarg_reg8(s, i, l->datalo_reg);
1180 break;
1181 case MO_16:
1182 i = tcg_out_call_iarg_reg16(s, i, l->datalo_reg);
1183 break;
1184 case MO_32:
1185 i = tcg_out_call_iarg_reg(s, i, l->datalo_reg);
1186 break;
1187 case MO_64:
1188 i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg);
1189 break;
1190 default:
1191 tcg_abort();
1193 i = tcg_out_call_iarg_imm(s, i, oi);
1195 /* Tail call to the store helper. Thus force the return address
1196 computation to take place in the return address register. */
1197 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)l->raddr);
1198 i = tcg_out_call_iarg_reg(s, i, TCG_REG_RA);
1199 tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true);
1200 /* delay slot */
1201 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1203 #endif
1205 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1206 TCGReg base, TCGMemOp opc)
1208 switch (opc & (MO_SSIZE | MO_BSWAP)) {
1209 case MO_UB:
1210 tcg_out_opc_imm(s, OPC_LBU, datalo, base, 0);
1211 break;
1212 case MO_SB:
1213 tcg_out_opc_imm(s, OPC_LB, datalo, base, 0);
1214 break;
1215 case MO_UW | MO_BSWAP:
1216 tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
1217 tcg_out_bswap16(s, datalo, TCG_TMP1);
1218 break;
1219 case MO_UW:
1220 tcg_out_opc_imm(s, OPC_LHU, datalo, base, 0);
1221 break;
1222 case MO_SW | MO_BSWAP:
1223 tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
1224 tcg_out_bswap16s(s, datalo, TCG_TMP1);
1225 break;
1226 case MO_SW:
1227 tcg_out_opc_imm(s, OPC_LH, datalo, base, 0);
1228 break;
1229 case MO_UL | MO_BSWAP:
1230 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, 0);
1231 tcg_out_bswap32(s, datalo, TCG_TMP1);
1232 break;
1233 case MO_UL:
1234 tcg_out_opc_imm(s, OPC_LW, datalo, base, 0);
1235 break;
1236 case MO_Q | MO_BSWAP:
1237 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, HI_OFF);
1238 tcg_out_bswap32(s, datalo, TCG_TMP1);
1239 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, LO_OFF);
1240 tcg_out_bswap32(s, datahi, TCG_TMP1);
1241 break;
1242 case MO_Q:
1243 tcg_out_opc_imm(s, OPC_LW, datalo, base, LO_OFF);
1244 tcg_out_opc_imm(s, OPC_LW, datahi, base, HI_OFF);
1245 break;
1246 default:
1247 tcg_abort();
1251 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
1253 TCGReg addr_regl, addr_regh __attribute__((unused));
1254 TCGReg data_regl, data_regh;
1255 TCGMemOpIdx oi;
1256 TCGMemOp opc;
1257 #if defined(CONFIG_SOFTMMU)
1258 tcg_insn_unit *label_ptr[2];
1259 #endif
1260 /* Note that we've eliminated V0 from the output registers,
1261 so we won't overwrite the base register during loading. */
1262 TCGReg base = TCG_REG_V0;
1264 data_regl = *args++;
1265 data_regh = (is_64 ? *args++ : 0);
1266 addr_regl = *args++;
1267 addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1268 oi = *args++;
1269 opc = get_memop(oi);
1271 #if defined(CONFIG_SOFTMMU)
1272 tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1);
1273 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
1274 add_qemu_ldst_label(s, 1, oi, data_regl, data_regh, addr_regl, addr_regh,
1275 s->code_ptr, label_ptr);
1276 #else
1277 if (guest_base == 0 && data_regl != addr_regl) {
1278 base = addr_regl;
1279 } else if (guest_base == (int16_t)guest_base) {
1280 tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base);
1281 } else {
1282 tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base);
1283 tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
1285 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
1286 #endif
1289 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1290 TCGReg base, TCGMemOp opc)
1292 switch (opc & (MO_SIZE | MO_BSWAP)) {
1293 case MO_8:
1294 tcg_out_opc_imm(s, OPC_SB, datalo, base, 0);
1295 break;
1297 case MO_16 | MO_BSWAP:
1298 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, datalo, 0xffff);
1299 tcg_out_bswap16(s, TCG_TMP1, TCG_TMP1);
1300 datalo = TCG_TMP1;
1301 /* FALLTHRU */
1302 case MO_16:
1303 tcg_out_opc_imm(s, OPC_SH, datalo, base, 0);
1304 break;
1306 case MO_32 | MO_BSWAP:
1307 tcg_out_bswap32(s, TCG_TMP1, datalo);
1308 datalo = TCG_TMP1;
1309 /* FALLTHRU */
1310 case MO_32:
1311 tcg_out_opc_imm(s, OPC_SW, datalo, base, 0);
1312 break;
1314 case MO_64 | MO_BSWAP:
1315 tcg_out_bswap32(s, TCG_TMP1, datalo);
1316 tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, HI_OFF);
1317 tcg_out_bswap32(s, TCG_TMP1, datahi);
1318 tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, LO_OFF);
1319 break;
1320 case MO_64:
1321 tcg_out_opc_imm(s, OPC_SW, datalo, base, LO_OFF);
1322 tcg_out_opc_imm(s, OPC_SW, datahi, base, HI_OFF);
1323 break;
1325 default:
1326 tcg_abort();
1330 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
1332 TCGReg addr_regl, addr_regh __attribute__((unused));
1333 TCGReg data_regl, data_regh, base;
1334 TCGMemOpIdx oi;
1335 TCGMemOp opc;
1336 #if defined(CONFIG_SOFTMMU)
1337 tcg_insn_unit *label_ptr[2];
1338 #endif
1340 data_regl = *args++;
1341 data_regh = (is_64 ? *args++ : 0);
1342 addr_regl = *args++;
1343 addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1344 oi = *args++;
1345 opc = get_memop(oi);
1347 #if defined(CONFIG_SOFTMMU)
1348 /* Note that we eliminated the helper's address argument,
1349 so we can reuse that for the base. */
1350 base = (TARGET_LONG_BITS == 32 ? TCG_REG_A1 : TCG_REG_A2);
1351 tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0);
1352 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1353 add_qemu_ldst_label(s, 0, oi, data_regl, data_regh, addr_regl, addr_regh,
1354 s->code_ptr, label_ptr);
1355 #else
1356 if (guest_base == 0) {
1357 base = addr_regl;
1358 } else {
1359 base = TCG_REG_A0;
1360 if (guest_base == (int16_t)guest_base) {
1361 tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base);
1362 } else {
1363 tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base);
1364 tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
1367 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1368 #endif
1371 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1372 const TCGArg *args, const int *const_args)
1374 MIPSInsn i1, i2;
1375 TCGArg a0, a1, a2;
1376 int c2;
1378 a0 = args[0];
1379 a1 = args[1];
1380 a2 = args[2];
1381 c2 = const_args[2];
1383 switch (opc) {
1384 case INDEX_op_exit_tb:
1386 TCGReg b0 = TCG_REG_ZERO;
1388 if (a0 & ~0xffff) {
1389 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
1390 b0 = TCG_REG_V0;
1392 if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
1393 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
1394 (uintptr_t)tb_ret_addr);
1395 tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
1397 tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
1399 break;
1400 case INDEX_op_goto_tb:
1401 if (s->tb_jmp_offset) {
1402 /* direct jump method */
1403 s->tb_jmp_offset[a0] = tcg_current_code_size(s);
1404 /* Avoid clobbering the address during retranslation. */
1405 tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff));
1406 } else {
1407 /* indirect jump method */
1408 tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
1409 (uintptr_t)(s->tb_next + a0));
1410 tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
1412 tcg_out_nop(s);
1413 s->tb_next_offset[a0] = tcg_current_code_size(s);
1414 break;
1415 case INDEX_op_br:
1416 tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO,
1417 arg_label(a0));
1418 break;
1420 case INDEX_op_ld8u_i32:
1421 i1 = OPC_LBU;
1422 goto do_ldst;
1423 case INDEX_op_ld8s_i32:
1424 i1 = OPC_LB;
1425 goto do_ldst;
1426 case INDEX_op_ld16u_i32:
1427 i1 = OPC_LHU;
1428 goto do_ldst;
1429 case INDEX_op_ld16s_i32:
1430 i1 = OPC_LH;
1431 goto do_ldst;
1432 case INDEX_op_ld_i32:
1433 i1 = OPC_LW;
1434 goto do_ldst;
1435 case INDEX_op_st8_i32:
1436 i1 = OPC_SB;
1437 goto do_ldst;
1438 case INDEX_op_st16_i32:
1439 i1 = OPC_SH;
1440 goto do_ldst;
1441 case INDEX_op_st_i32:
1442 i1 = OPC_SW;
1443 do_ldst:
1444 tcg_out_ldst(s, i1, a0, a1, a2);
1445 break;
1447 case INDEX_op_add_i32:
1448 i1 = OPC_ADDU, i2 = OPC_ADDIU;
1449 goto do_binary;
1450 case INDEX_op_or_i32:
1451 i1 = OPC_OR, i2 = OPC_ORI;
1452 goto do_binary;
1453 case INDEX_op_xor_i32:
1454 i1 = OPC_XOR, i2 = OPC_XORI;
1455 do_binary:
1456 if (c2) {
1457 tcg_out_opc_imm(s, i2, a0, a1, a2);
1458 break;
1460 do_binaryv:
1461 tcg_out_opc_reg(s, i1, a0, a1, a2);
1462 break;
1464 case INDEX_op_sub_i32:
1465 if (c2) {
1466 tcg_out_opc_imm(s, OPC_ADDIU, a0, a1, -a2);
1467 break;
1469 i1 = OPC_SUBU;
1470 goto do_binary;
1471 case INDEX_op_and_i32:
1472 if (c2 && a2 != (uint16_t)a2) {
1473 int msb = ctz32(~a2) - 1;
1474 assert(use_mips32r2_instructions);
1475 assert(is_p2m1(a2));
1476 tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0);
1477 break;
1479 i1 = OPC_AND, i2 = OPC_ANDI;
1480 goto do_binary;
1481 case INDEX_op_nor_i32:
1482 i1 = OPC_NOR;
1483 goto do_binaryv;
1485 case INDEX_op_mul_i32:
1486 if (use_mips32_instructions) {
1487 tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
1488 break;
1490 i1 = OPC_MULT, i2 = OPC_MFLO;
1491 goto do_hilo1;
1492 case INDEX_op_mulsh_i32:
1493 if (use_mips32r6_instructions) {
1494 tcg_out_opc_reg(s, OPC_MUH, a0, a1, a2);
1495 break;
1497 i1 = OPC_MULT, i2 = OPC_MFHI;
1498 goto do_hilo1;
1499 case INDEX_op_muluh_i32:
1500 if (use_mips32r6_instructions) {
1501 tcg_out_opc_reg(s, OPC_MUHU, a0, a1, a2);
1502 break;
1504 i1 = OPC_MULTU, i2 = OPC_MFHI;
1505 goto do_hilo1;
1506 case INDEX_op_div_i32:
1507 if (use_mips32r6_instructions) {
1508 tcg_out_opc_reg(s, OPC_DIV_R6, a0, a1, a2);
1509 break;
1511 i1 = OPC_DIV, i2 = OPC_MFLO;
1512 goto do_hilo1;
1513 case INDEX_op_divu_i32:
1514 if (use_mips32r6_instructions) {
1515 tcg_out_opc_reg(s, OPC_DIVU_R6, a0, a1, a2);
1516 break;
1518 i1 = OPC_DIVU, i2 = OPC_MFLO;
1519 goto do_hilo1;
1520 case INDEX_op_rem_i32:
1521 if (use_mips32r6_instructions) {
1522 tcg_out_opc_reg(s, OPC_MOD, a0, a1, a2);
1523 break;
1525 i1 = OPC_DIV, i2 = OPC_MFHI;
1526 goto do_hilo1;
1527 case INDEX_op_remu_i32:
1528 if (use_mips32r6_instructions) {
1529 tcg_out_opc_reg(s, OPC_MODU, a0, a1, a2);
1530 break;
1532 i1 = OPC_DIVU, i2 = OPC_MFHI;
1533 do_hilo1:
1534 tcg_out_opc_reg(s, i1, 0, a1, a2);
1535 tcg_out_opc_reg(s, i2, a0, 0, 0);
1536 break;
1538 case INDEX_op_muls2_i32:
1539 i1 = OPC_MULT;
1540 goto do_hilo2;
1541 case INDEX_op_mulu2_i32:
1542 i1 = OPC_MULTU;
1543 do_hilo2:
1544 tcg_out_opc_reg(s, i1, 0, a2, args[3]);
1545 tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
1546 tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
1547 break;
1549 case INDEX_op_not_i32:
1550 i1 = OPC_NOR;
1551 goto do_unary;
1552 case INDEX_op_bswap16_i32:
1553 i1 = OPC_WSBH;
1554 goto do_unary;
1555 case INDEX_op_ext8s_i32:
1556 i1 = OPC_SEB;
1557 goto do_unary;
1558 case INDEX_op_ext16s_i32:
1559 i1 = OPC_SEH;
1560 do_unary:
1561 tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1);
1562 break;
1564 case INDEX_op_sar_i32:
1565 i1 = OPC_SRAV, i2 = OPC_SRA;
1566 goto do_shift;
1567 case INDEX_op_shl_i32:
1568 i1 = OPC_SLLV, i2 = OPC_SLL;
1569 goto do_shift;
1570 case INDEX_op_shr_i32:
1571 i1 = OPC_SRLV, i2 = OPC_SRL;
1572 goto do_shift;
1573 case INDEX_op_rotr_i32:
1574 i1 = OPC_ROTRV, i2 = OPC_ROTR;
1575 do_shift:
1576 if (c2) {
1577 tcg_out_opc_sa(s, i2, a0, a1, a2);
1578 } else {
1579 tcg_out_opc_reg(s, i1, a0, a2, a1);
1581 break;
1582 case INDEX_op_rotl_i32:
1583 if (c2) {
1584 tcg_out_opc_sa(s, OPC_ROTR, a0, a1, 32 - a2);
1585 } else {
1586 tcg_out_opc_reg(s, OPC_SUBU, TCG_TMP0, TCG_REG_ZERO, a2);
1587 tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1);
1589 break;
1591 case INDEX_op_bswap32_i32:
1592 tcg_out_opc_reg(s, OPC_WSBH, a0, 0, a1);
1593 tcg_out_opc_sa(s, OPC_ROTR, a0, a0, 16);
1594 break;
1596 case INDEX_op_deposit_i32:
1597 tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]);
1598 break;
1600 case INDEX_op_brcond_i32:
1601 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1602 break;
1603 case INDEX_op_brcond2_i32:
1604 tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
1605 break;
1607 case INDEX_op_movcond_i32:
1608 tcg_out_movcond(s, args[5], a0, a1, a2, args[3], args[4]);
1609 break;
1611 case INDEX_op_setcond_i32:
1612 tcg_out_setcond(s, args[3], a0, a1, a2);
1613 break;
1614 case INDEX_op_setcond2_i32:
1615 tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
1616 break;
1618 case INDEX_op_qemu_ld_i32:
1619 tcg_out_qemu_ld(s, args, false);
1620 break;
1621 case INDEX_op_qemu_ld_i64:
1622 tcg_out_qemu_ld(s, args, true);
1623 break;
1624 case INDEX_op_qemu_st_i32:
1625 tcg_out_qemu_st(s, args, false);
1626 break;
1627 case INDEX_op_qemu_st_i64:
1628 tcg_out_qemu_st(s, args, true);
1629 break;
1631 case INDEX_op_add2_i32:
1632 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1633 const_args[4], const_args[5], false);
1634 break;
1635 case INDEX_op_sub2_i32:
1636 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1637 const_args[4], const_args[5], true);
1638 break;
1640 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1641 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
1642 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1643 default:
1644 tcg_abort();
1648 static const TCGTargetOpDef mips_op_defs[] = {
1649 { INDEX_op_exit_tb, { } },
1650 { INDEX_op_goto_tb, { } },
1651 { INDEX_op_br, { } },
1653 { INDEX_op_ld8u_i32, { "r", "r" } },
1654 { INDEX_op_ld8s_i32, { "r", "r" } },
1655 { INDEX_op_ld16u_i32, { "r", "r" } },
1656 { INDEX_op_ld16s_i32, { "r", "r" } },
1657 { INDEX_op_ld_i32, { "r", "r" } },
1658 { INDEX_op_st8_i32, { "rZ", "r" } },
1659 { INDEX_op_st16_i32, { "rZ", "r" } },
1660 { INDEX_op_st_i32, { "rZ", "r" } },
1662 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1663 { INDEX_op_mul_i32, { "r", "rZ", "rZ" } },
1664 #if !use_mips32r6_instructions
1665 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rZ" } },
1666 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rZ" } },
1667 #endif
1668 { INDEX_op_mulsh_i32, { "r", "rZ", "rZ" } },
1669 { INDEX_op_muluh_i32, { "r", "rZ", "rZ" } },
1670 { INDEX_op_div_i32, { "r", "rZ", "rZ" } },
1671 { INDEX_op_divu_i32, { "r", "rZ", "rZ" } },
1672 { INDEX_op_rem_i32, { "r", "rZ", "rZ" } },
1673 { INDEX_op_remu_i32, { "r", "rZ", "rZ" } },
1674 { INDEX_op_sub_i32, { "r", "rZ", "rN" } },
1676 { INDEX_op_and_i32, { "r", "rZ", "rIK" } },
1677 { INDEX_op_nor_i32, { "r", "rZ", "rZ" } },
1678 { INDEX_op_not_i32, { "r", "rZ" } },
1679 { INDEX_op_or_i32, { "r", "rZ", "rIZ" } },
1680 { INDEX_op_xor_i32, { "r", "rZ", "rIZ" } },
1682 { INDEX_op_shl_i32, { "r", "rZ", "ri" } },
1683 { INDEX_op_shr_i32, { "r", "rZ", "ri" } },
1684 { INDEX_op_sar_i32, { "r", "rZ", "ri" } },
1685 { INDEX_op_rotr_i32, { "r", "rZ", "ri" } },
1686 { INDEX_op_rotl_i32, { "r", "rZ", "ri" } },
1688 { INDEX_op_bswap16_i32, { "r", "r" } },
1689 { INDEX_op_bswap32_i32, { "r", "r" } },
1691 { INDEX_op_ext8s_i32, { "r", "rZ" } },
1692 { INDEX_op_ext16s_i32, { "r", "rZ" } },
1694 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
1696 { INDEX_op_brcond_i32, { "rZ", "rZ" } },
1697 #if use_mips32r6_instructions
1698 { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
1699 #else
1700 { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "0" } },
1701 #endif
1702 { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
1703 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
1705 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
1706 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
1707 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rZ", "rZ" } },
1709 #if TARGET_LONG_BITS == 32
1710 { INDEX_op_qemu_ld_i32, { "L", "lZ" } },
1711 { INDEX_op_qemu_st_i32, { "SZ", "SZ" } },
1712 { INDEX_op_qemu_ld_i64, { "L", "L", "lZ" } },
1713 { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ" } },
1714 #else
1715 { INDEX_op_qemu_ld_i32, { "L", "lZ", "lZ" } },
1716 { INDEX_op_qemu_st_i32, { "SZ", "SZ", "SZ" } },
1717 { INDEX_op_qemu_ld_i64, { "L", "L", "lZ", "lZ" } },
1718 { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ", "SZ" } },
1719 #endif
1720 { -1 },
1723 static int tcg_target_callee_save_regs[] = {
1724 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
1725 TCG_REG_S1,
1726 TCG_REG_S2,
1727 TCG_REG_S3,
1728 TCG_REG_S4,
1729 TCG_REG_S5,
1730 TCG_REG_S6,
1731 TCG_REG_S7,
1732 TCG_REG_S8,
1733 TCG_REG_RA, /* should be last for ABI compliance */
1736 /* The Linux kernel doesn't provide any information about the available
1737 instruction set. Probe it using a signal handler. */
1740 #ifndef use_movnz_instructions
1741 bool use_movnz_instructions = false;
1742 #endif
1744 #ifndef use_mips32_instructions
1745 bool use_mips32_instructions = false;
1746 #endif
1748 #ifndef use_mips32r2_instructions
1749 bool use_mips32r2_instructions = false;
1750 #endif
1752 static volatile sig_atomic_t got_sigill;
1754 static void sigill_handler(int signo, siginfo_t *si, void *data)
1756 /* Skip the faulty instruction */
1757 ucontext_t *uc = (ucontext_t *)data;
1758 uc->uc_mcontext.pc += 4;
1760 got_sigill = 1;
1763 static void tcg_target_detect_isa(void)
1765 struct sigaction sa_old, sa_new;
1767 memset(&sa_new, 0, sizeof(sa_new));
1768 sa_new.sa_flags = SA_SIGINFO;
1769 sa_new.sa_sigaction = sigill_handler;
1770 sigaction(SIGILL, &sa_new, &sa_old);
1772 /* Probe for movn/movz, necessary to implement movcond. */
1773 #ifndef use_movnz_instructions
1774 got_sigill = 0;
1775 asm volatile(".set push\n"
1776 ".set mips32\n"
1777 "movn $zero, $zero, $zero\n"
1778 "movz $zero, $zero, $zero\n"
1779 ".set pop\n"
1780 : : : );
1781 use_movnz_instructions = !got_sigill;
1782 #endif
1784 /* Probe for MIPS32 instructions. As no subsetting is allowed
1785 by the specification, it is only necessary to probe for one
1786 of the instructions. */
1787 #ifndef use_mips32_instructions
1788 got_sigill = 0;
1789 asm volatile(".set push\n"
1790 ".set mips32\n"
1791 "mul $zero, $zero\n"
1792 ".set pop\n"
1793 : : : );
1794 use_mips32_instructions = !got_sigill;
1795 #endif
1797 /* Probe for MIPS32r2 instructions if MIPS32 instructions are
1798 available. As no subsetting is allowed by the specification,
1799 it is only necessary to probe for one of the instructions. */
1800 #ifndef use_mips32r2_instructions
1801 if (use_mips32_instructions) {
1802 got_sigill = 0;
1803 asm volatile(".set push\n"
1804 ".set mips32r2\n"
1805 "seb $zero, $zero\n"
1806 ".set pop\n"
1807 : : : );
1808 use_mips32r2_instructions = !got_sigill;
1810 #endif
1812 sigaction(SIGILL, &sa_old, NULL);
1815 /* Generate global QEMU prologue and epilogue code */
1816 static void tcg_target_qemu_prologue(TCGContext *s)
1818 int i, frame_size;
1820 /* reserve some stack space, also for TCG temps. */
1821 frame_size = ARRAY_SIZE(tcg_target_callee_save_regs) * 4
1822 + TCG_STATIC_CALL_ARGS_SIZE
1823 + CPU_TEMP_BUF_NLONGS * sizeof(long);
1824 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
1825 ~(TCG_TARGET_STACK_ALIGN - 1);
1826 tcg_set_frame(s, TCG_REG_SP, ARRAY_SIZE(tcg_target_callee_save_regs) * 4
1827 + TCG_STATIC_CALL_ARGS_SIZE,
1828 CPU_TEMP_BUF_NLONGS * sizeof(long));
1830 /* TB prologue */
1831 tcg_out_addi(s, TCG_REG_SP, -frame_size);
1832 for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
1833 tcg_out_st(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
1834 TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
1837 /* Call generated code */
1838 tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0);
1839 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1840 tb_ret_addr = s->code_ptr;
1842 /* TB epilogue */
1843 for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
1844 tcg_out_ld(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
1845 TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
1848 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
1849 tcg_out_addi(s, TCG_REG_SP, frame_size);
1852 static void tcg_target_init(TCGContext *s)
1854 tcg_target_detect_isa();
1855 tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32], 0xffffffff);
1856 tcg_regset_set(tcg_target_call_clobber_regs,
1857 (1 << TCG_REG_V0) |
1858 (1 << TCG_REG_V1) |
1859 (1 << TCG_REG_A0) |
1860 (1 << TCG_REG_A1) |
1861 (1 << TCG_REG_A2) |
1862 (1 << TCG_REG_A3) |
1863 (1 << TCG_REG_T0) |
1864 (1 << TCG_REG_T1) |
1865 (1 << TCG_REG_T2) |
1866 (1 << TCG_REG_T3) |
1867 (1 << TCG_REG_T4) |
1868 (1 << TCG_REG_T5) |
1869 (1 << TCG_REG_T6) |
1870 (1 << TCG_REG_T7) |
1871 (1 << TCG_REG_T8) |
1872 (1 << TCG_REG_T9));
1874 tcg_regset_clear(s->reserved_regs);
1875 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */
1876 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */
1877 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */
1878 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* internal use */
1879 tcg_regset_set_reg(s->reserved_regs, TCG_TMP1); /* internal use */
1880 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */
1881 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
1882 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */
1884 tcg_add_target_add_op_defs(mips_op_defs);
1887 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1889 uint32_t *ptr = (uint32_t *)jmp_addr;
1890 *ptr = deposit32(*ptr, 0, 26, addr >> 2);
1891 flush_icache_range(jmp_addr, jmp_addr + 4);