tcg/mips: move tcg_out_addsub2
[qemu/ar7.git] / tcg / mips / tcg-target.c
blob4f1e0025dc2499bd1b3b748793319a3d2c5d654b
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
5 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
6 * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
27 #include "tcg-be-ldst.h"
29 #ifdef HOST_WORDS_BIGENDIAN
30 # define MIPS_BE 1
31 #else
32 # define MIPS_BE 0
33 #endif
35 #define LO_OFF (MIPS_BE * 4)
36 #define HI_OFF (4 - LO_OFF)
38 #ifndef NDEBUG
39 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
40 "zero",
41 "at",
42 "v0",
43 "v1",
44 "a0",
45 "a1",
46 "a2",
47 "a3",
48 "t0",
49 "t1",
50 "t2",
51 "t3",
52 "t4",
53 "t5",
54 "t6",
55 "t7",
56 "s0",
57 "s1",
58 "s2",
59 "s3",
60 "s4",
61 "s5",
62 "s6",
63 "s7",
64 "t8",
65 "t9",
66 "k0",
67 "k1",
68 "gp",
69 "sp",
70 "s8",
71 "ra",
73 #endif
75 #define TCG_TMP0 TCG_REG_AT
76 #define TCG_TMP1 TCG_REG_T9
78 /* check if we really need so many registers :P */
79 static const TCGReg tcg_target_reg_alloc_order[] = {
80 /* Call saved registers. */
81 TCG_REG_S0,
82 TCG_REG_S1,
83 TCG_REG_S2,
84 TCG_REG_S3,
85 TCG_REG_S4,
86 TCG_REG_S5,
87 TCG_REG_S6,
88 TCG_REG_S7,
89 TCG_REG_S8,
91 /* Call clobbered registers. */
92 TCG_REG_T0,
93 TCG_REG_T1,
94 TCG_REG_T2,
95 TCG_REG_T3,
96 TCG_REG_T4,
97 TCG_REG_T5,
98 TCG_REG_T6,
99 TCG_REG_T7,
100 TCG_REG_T8,
101 TCG_REG_T9,
102 TCG_REG_V1,
103 TCG_REG_V0,
105 /* Argument registers, opposite order of allocation. */
106 TCG_REG_A3,
107 TCG_REG_A2,
108 TCG_REG_A1,
109 TCG_REG_A0,
112 static const TCGReg tcg_target_call_iarg_regs[4] = {
113 TCG_REG_A0,
114 TCG_REG_A1,
115 TCG_REG_A2,
116 TCG_REG_A3
119 static const TCGReg tcg_target_call_oarg_regs[2] = {
120 TCG_REG_V0,
121 TCG_REG_V1
124 static tcg_insn_unit *tb_ret_addr;
126 static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target)
128 /* Let the compiler perform the right-shift as part of the arithmetic. */
129 ptrdiff_t disp = target - (pc + 1);
130 assert(disp == (int16_t)disp);
131 return disp & 0xffff;
134 static inline void reloc_pc16(tcg_insn_unit *pc, tcg_insn_unit *target)
136 *pc = deposit32(*pc, 0, 16, reloc_pc16_val(pc, target));
139 static inline uint32_t reloc_26_val(tcg_insn_unit *pc, tcg_insn_unit *target)
141 assert((((uintptr_t)pc ^ (uintptr_t)target) & 0xf0000000) == 0);
142 return ((uintptr_t)target >> 2) & 0x3ffffff;
145 static inline void reloc_26(tcg_insn_unit *pc, tcg_insn_unit *target)
147 *pc = deposit32(*pc, 0, 26, reloc_26_val(pc, target));
150 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
151 intptr_t value, intptr_t addend)
153 assert(type == R_MIPS_PC16);
154 assert(addend == 0);
155 reloc_pc16(code_ptr, (tcg_insn_unit *)value);
158 #define TCG_CT_CONST_ZERO 0x100
159 #define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */
160 #define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */
161 #define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */
162 #define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */
164 static inline bool is_p2m1(tcg_target_long val)
166 return val && ((val + 1) & val) == 0;
169 /* parse target specific constraints */
170 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
172 const char *ct_str;
174 ct_str = *pct_str;
175 switch(ct_str[0]) {
176 case 'r':
177 ct->ct |= TCG_CT_REG;
178 tcg_regset_set(ct->u.regs, 0xffffffff);
179 break;
180 case 'L': /* qemu_ld output arg constraint */
181 ct->ct |= TCG_CT_REG;
182 tcg_regset_set(ct->u.regs, 0xffffffff);
183 tcg_regset_reset_reg(ct->u.regs, TCG_REG_V0);
184 break;
185 case 'l': /* qemu_ld input arg constraint */
186 ct->ct |= TCG_CT_REG;
187 tcg_regset_set(ct->u.regs, 0xffffffff);
188 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
189 #if defined(CONFIG_SOFTMMU)
190 if (TARGET_LONG_BITS == 64) {
191 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
193 #endif
194 break;
195 case 'S': /* qemu_st constraint */
196 ct->ct |= TCG_CT_REG;
197 tcg_regset_set(ct->u.regs, 0xffffffff);
198 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
199 #if defined(CONFIG_SOFTMMU)
200 if (TARGET_LONG_BITS == 32) {
201 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1);
202 } else {
203 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
204 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3);
206 #endif
207 break;
208 case 'I':
209 ct->ct |= TCG_CT_CONST_U16;
210 break;
211 case 'J':
212 ct->ct |= TCG_CT_CONST_S16;
213 break;
214 case 'K':
215 ct->ct |= TCG_CT_CONST_P2M1;
216 break;
217 case 'N':
218 ct->ct |= TCG_CT_CONST_N16;
219 break;
220 case 'Z':
221 /* We are cheating a bit here, using the fact that the register
222 ZERO is also the register number 0. Hence there is no need
223 to check for const_args in each instruction. */
224 ct->ct |= TCG_CT_CONST_ZERO;
225 break;
226 default:
227 return -1;
229 ct_str++;
230 *pct_str = ct_str;
231 return 0;
234 /* test if a constant matches the constraint */
235 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
236 const TCGArgConstraint *arg_ct)
238 int ct;
239 ct = arg_ct->ct;
240 if (ct & TCG_CT_CONST) {
241 return 1;
242 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
243 return 1;
244 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
245 return 1;
246 } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
247 return 1;
248 } else if ((ct & TCG_CT_CONST_N16) && val >= -32767 && val <= 32767) {
249 return 1;
250 } else if ((ct & TCG_CT_CONST_P2M1)
251 && use_mips32r2_instructions && is_p2m1(val)) {
252 return 1;
254 return 0;
257 /* instruction opcodes */
258 typedef enum {
259 OPC_J = 0x02 << 26,
260 OPC_JAL = 0x03 << 26,
261 OPC_BEQ = 0x04 << 26,
262 OPC_BNE = 0x05 << 26,
263 OPC_BLEZ = 0x06 << 26,
264 OPC_BGTZ = 0x07 << 26,
265 OPC_ADDIU = 0x09 << 26,
266 OPC_SLTI = 0x0A << 26,
267 OPC_SLTIU = 0x0B << 26,
268 OPC_ANDI = 0x0C << 26,
269 OPC_ORI = 0x0D << 26,
270 OPC_XORI = 0x0E << 26,
271 OPC_LUI = 0x0F << 26,
272 OPC_LB = 0x20 << 26,
273 OPC_LH = 0x21 << 26,
274 OPC_LW = 0x23 << 26,
275 OPC_LBU = 0x24 << 26,
276 OPC_LHU = 0x25 << 26,
277 OPC_LWU = 0x27 << 26,
278 OPC_SB = 0x28 << 26,
279 OPC_SH = 0x29 << 26,
280 OPC_SW = 0x2B << 26,
282 OPC_SPECIAL = 0x00 << 26,
283 OPC_SLL = OPC_SPECIAL | 0x00,
284 OPC_SRL = OPC_SPECIAL | 0x02,
285 OPC_ROTR = OPC_SPECIAL | (0x01 << 21) | 0x02,
286 OPC_SRA = OPC_SPECIAL | 0x03,
287 OPC_SLLV = OPC_SPECIAL | 0x04,
288 OPC_SRLV = OPC_SPECIAL | 0x06,
289 OPC_ROTRV = OPC_SPECIAL | (0x01 << 6) | 0x06,
290 OPC_SRAV = OPC_SPECIAL | 0x07,
291 OPC_JR = OPC_SPECIAL | 0x08,
292 OPC_JALR = OPC_SPECIAL | 0x09,
293 OPC_MOVZ = OPC_SPECIAL | 0x0A,
294 OPC_MOVN = OPC_SPECIAL | 0x0B,
295 OPC_MFHI = OPC_SPECIAL | 0x10,
296 OPC_MFLO = OPC_SPECIAL | 0x12,
297 OPC_MULT = OPC_SPECIAL | 0x18,
298 OPC_MULTU = OPC_SPECIAL | 0x19,
299 OPC_DIV = OPC_SPECIAL | 0x1A,
300 OPC_DIVU = OPC_SPECIAL | 0x1B,
301 OPC_ADDU = OPC_SPECIAL | 0x21,
302 OPC_SUBU = OPC_SPECIAL | 0x23,
303 OPC_AND = OPC_SPECIAL | 0x24,
304 OPC_OR = OPC_SPECIAL | 0x25,
305 OPC_XOR = OPC_SPECIAL | 0x26,
306 OPC_NOR = OPC_SPECIAL | 0x27,
307 OPC_SLT = OPC_SPECIAL | 0x2A,
308 OPC_SLTU = OPC_SPECIAL | 0x2B,
310 OPC_REGIMM = 0x01 << 26,
311 OPC_BLTZ = OPC_REGIMM | (0x00 << 16),
312 OPC_BGEZ = OPC_REGIMM | (0x01 << 16),
314 OPC_SPECIAL2 = 0x1c << 26,
315 OPC_MUL = OPC_SPECIAL2 | 0x002,
317 OPC_SPECIAL3 = 0x1f << 26,
318 OPC_EXT = OPC_SPECIAL3 | 0x000,
319 OPC_INS = OPC_SPECIAL3 | 0x004,
320 OPC_WSBH = OPC_SPECIAL3 | 0x0a0,
321 OPC_SEB = OPC_SPECIAL3 | 0x420,
322 OPC_SEH = OPC_SPECIAL3 | 0x620,
323 } MIPSInsn;
326 * Type reg
328 static inline void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc,
329 TCGReg rd, TCGReg rs, TCGReg rt)
331 int32_t inst;
333 inst = opc;
334 inst |= (rs & 0x1F) << 21;
335 inst |= (rt & 0x1F) << 16;
336 inst |= (rd & 0x1F) << 11;
337 tcg_out32(s, inst);
341 * Type immediate
343 static inline void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc,
344 TCGReg rt, TCGReg rs, TCGArg imm)
346 int32_t inst;
348 inst = opc;
349 inst |= (rs & 0x1F) << 21;
350 inst |= (rt & 0x1F) << 16;
351 inst |= (imm & 0xffff);
352 tcg_out32(s, inst);
356 * Type bitfield
358 static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt,
359 TCGReg rs, int msb, int lsb)
361 int32_t inst;
363 inst = opc;
364 inst |= (rs & 0x1F) << 21;
365 inst |= (rt & 0x1F) << 16;
366 inst |= (msb & 0x1F) << 11;
367 inst |= (lsb & 0x1F) << 6;
368 tcg_out32(s, inst);
372 * Type branch
374 static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc,
375 TCGReg rt, TCGReg rs)
377 /* We pay attention here to not modify the branch target by reading
378 the existing value and using it again. This ensure that caches and
379 memory are kept coherent during retranslation. */
380 uint16_t offset = (uint16_t)*s->code_ptr;
382 tcg_out_opc_imm(s, opc, rt, rs, offset);
386 * Type sa
388 static inline void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc,
389 TCGReg rd, TCGReg rt, TCGArg sa)
391 int32_t inst;
393 inst = opc;
394 inst |= (rt & 0x1F) << 16;
395 inst |= (rd & 0x1F) << 11;
396 inst |= (sa & 0x1F) << 6;
397 tcg_out32(s, inst);
402 * Type jump.
403 * Returns true if the branch was in range and the insn was emitted.
405 static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, void *target)
407 uintptr_t dest = (uintptr_t)target;
408 uintptr_t from = (uintptr_t)s->code_ptr + 4;
409 int32_t inst;
411 /* The pc-region branch happens within the 256MB region of
412 the delay slot (thus the +4). */
413 if ((from ^ dest) & -(1 << 28)) {
414 return false;
416 assert((dest & 3) == 0);
418 inst = opc;
419 inst |= (dest >> 2) & 0x3ffffff;
420 tcg_out32(s, inst);
421 return true;
424 static inline void tcg_out_nop(TCGContext *s)
426 tcg_out32(s, 0);
429 static inline void tcg_out_mov(TCGContext *s, TCGType type,
430 TCGReg ret, TCGReg arg)
432 /* Simple reg-reg move, optimising out the 'do nothing' case */
433 if (ret != arg) {
434 tcg_out_opc_reg(s, OPC_ADDU, ret, arg, TCG_REG_ZERO);
438 static inline void tcg_out_movi(TCGContext *s, TCGType type,
439 TCGReg reg, tcg_target_long arg)
441 if (arg == (int16_t)arg) {
442 tcg_out_opc_imm(s, OPC_ADDIU, reg, TCG_REG_ZERO, arg);
443 } else if (arg == (uint16_t)arg) {
444 tcg_out_opc_imm(s, OPC_ORI, reg, TCG_REG_ZERO, arg);
445 } else {
446 tcg_out_opc_imm(s, OPC_LUI, reg, TCG_REG_ZERO, arg >> 16);
447 if (arg & 0xffff) {
448 tcg_out_opc_imm(s, OPC_ORI, reg, reg, arg & 0xffff);
453 static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg)
455 if (use_mips32r2_instructions) {
456 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
457 } else {
458 /* ret and arg can't be register at */
459 if (ret == TCG_TMP0 || arg == TCG_TMP0) {
460 tcg_abort();
463 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
464 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8);
465 tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00);
466 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
470 static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg)
472 if (use_mips32r2_instructions) {
473 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
474 tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret);
475 } else {
476 /* ret and arg can't be register at */
477 if (ret == TCG_TMP0 || arg == TCG_TMP0) {
478 tcg_abort();
481 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
482 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
483 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
484 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
488 static inline void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg)
490 if (use_mips32r2_instructions) {
491 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
492 tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
493 } else {
494 /* ret and arg must be different and can't be register at */
495 if (ret == arg || ret == TCG_TMP0 || arg == TCG_TMP0) {
496 tcg_abort();
499 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
501 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 24);
502 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
504 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, arg, 0xff00);
505 tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8);
506 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
508 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
509 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0xff00);
510 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
514 static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
516 if (use_mips32r2_instructions) {
517 tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg);
518 } else {
519 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
520 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24);
524 static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
526 if (use_mips32r2_instructions) {
527 tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg);
528 } else {
529 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16);
530 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
534 static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data,
535 TCGReg addr, intptr_t ofs)
537 int16_t lo = ofs;
538 if (ofs != lo) {
539 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - lo);
540 if (addr != TCG_REG_ZERO) {
541 tcg_out_opc_reg(s, OPC_ADDU, TCG_TMP0, TCG_TMP0, addr);
543 addr = TCG_TMP0;
545 tcg_out_opc_imm(s, opc, data, addr, lo);
548 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
549 TCGReg arg1, intptr_t arg2)
551 tcg_out_ldst(s, OPC_LW, arg, arg1, arg2);
554 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
555 TCGReg arg1, intptr_t arg2)
557 tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
560 static inline void tcg_out_addi(TCGContext *s, TCGReg reg, TCGArg val)
562 if (val == (int16_t)val) {
563 tcg_out_opc_imm(s, OPC_ADDIU, reg, reg, val);
564 } else {
565 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, val);
566 tcg_out_opc_reg(s, OPC_ADDU, reg, reg, TCG_TMP0);
570 static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
571 TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
572 bool cbh, bool is_sub)
574 TCGReg th = TCG_TMP1;
576 /* If we have a negative constant such that negating it would
577 make the high part zero, we can (usually) eliminate one insn. */
578 if (cbl && cbh && bh == -1 && bl != 0) {
579 bl = -bl;
580 bh = 0;
581 is_sub = !is_sub;
584 /* By operating on the high part first, we get to use the final
585 carry operation to move back from the temporary. */
586 if (!cbh) {
587 tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
588 } else if (bh != 0 || ah == rl) {
589 tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
590 } else {
591 th = ah;
594 /* Note that tcg optimization should eliminate the bl == 0 case. */
595 if (is_sub) {
596 if (cbl) {
597 tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
598 tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
599 } else {
600 tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
601 tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
603 tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
604 } else {
605 if (cbl) {
606 tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
607 tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
608 } else if (rl == al && rl == bl) {
609 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, 31);
610 tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
611 } else {
612 tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
613 tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
615 tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
619 /* Bit 0 set if inversion required; bit 1 set if swapping required. */
620 #define MIPS_CMP_INV 1
621 #define MIPS_CMP_SWAP 2
623 static const uint8_t mips_cmp_map[16] = {
624 [TCG_COND_LT] = 0,
625 [TCG_COND_LTU] = 0,
626 [TCG_COND_GE] = MIPS_CMP_INV,
627 [TCG_COND_GEU] = MIPS_CMP_INV,
628 [TCG_COND_LE] = MIPS_CMP_INV | MIPS_CMP_SWAP,
629 [TCG_COND_LEU] = MIPS_CMP_INV | MIPS_CMP_SWAP,
630 [TCG_COND_GT] = MIPS_CMP_SWAP,
631 [TCG_COND_GTU] = MIPS_CMP_SWAP,
634 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
635 TCGReg arg1, TCGReg arg2)
637 MIPSInsn s_opc = OPC_SLTU;
638 int cmp_map;
640 switch (cond) {
641 case TCG_COND_EQ:
642 if (arg2 != 0) {
643 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
644 arg1 = ret;
646 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, 1);
647 break;
649 case TCG_COND_NE:
650 if (arg2 != 0) {
651 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
652 arg1 = ret;
654 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg1);
655 break;
657 case TCG_COND_LT:
658 case TCG_COND_GE:
659 case TCG_COND_LE:
660 case TCG_COND_GT:
661 s_opc = OPC_SLT;
662 /* FALLTHRU */
664 case TCG_COND_LTU:
665 case TCG_COND_GEU:
666 case TCG_COND_LEU:
667 case TCG_COND_GTU:
668 cmp_map = mips_cmp_map[cond];
669 if (cmp_map & MIPS_CMP_SWAP) {
670 TCGReg t = arg1;
671 arg1 = arg2;
672 arg2 = t;
674 tcg_out_opc_reg(s, s_opc, ret, arg1, arg2);
675 if (cmp_map & MIPS_CMP_INV) {
676 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
678 break;
680 default:
681 tcg_abort();
682 break;
686 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
687 TCGReg arg2, TCGLabel *l)
689 static const MIPSInsn b_zero[16] = {
690 [TCG_COND_LT] = OPC_BLTZ,
691 [TCG_COND_GT] = OPC_BGTZ,
692 [TCG_COND_LE] = OPC_BLEZ,
693 [TCG_COND_GE] = OPC_BGEZ,
696 MIPSInsn s_opc = OPC_SLTU;
697 MIPSInsn b_opc;
698 int cmp_map;
700 switch (cond) {
701 case TCG_COND_EQ:
702 b_opc = OPC_BEQ;
703 break;
704 case TCG_COND_NE:
705 b_opc = OPC_BNE;
706 break;
708 case TCG_COND_LT:
709 case TCG_COND_GT:
710 case TCG_COND_LE:
711 case TCG_COND_GE:
712 if (arg2 == 0) {
713 b_opc = b_zero[cond];
714 arg2 = arg1;
715 arg1 = 0;
716 break;
718 s_opc = OPC_SLT;
719 /* FALLTHRU */
721 case TCG_COND_LTU:
722 case TCG_COND_GTU:
723 case TCG_COND_LEU:
724 case TCG_COND_GEU:
725 cmp_map = mips_cmp_map[cond];
726 if (cmp_map & MIPS_CMP_SWAP) {
727 TCGReg t = arg1;
728 arg1 = arg2;
729 arg2 = t;
731 tcg_out_opc_reg(s, s_opc, TCG_TMP0, arg1, arg2);
732 b_opc = (cmp_map & MIPS_CMP_INV ? OPC_BEQ : OPC_BNE);
733 arg1 = TCG_TMP0;
734 arg2 = TCG_REG_ZERO;
735 break;
737 default:
738 tcg_abort();
739 break;
742 tcg_out_opc_br(s, b_opc, arg1, arg2);
743 if (l->has_value) {
744 reloc_pc16(s->code_ptr - 1, l->u.value_ptr);
745 } else {
746 tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, l, 0);
748 tcg_out_nop(s);
751 static TCGReg tcg_out_reduce_eq2(TCGContext *s, TCGReg tmp0, TCGReg tmp1,
752 TCGReg al, TCGReg ah,
753 TCGReg bl, TCGReg bh)
755 /* Merge highpart comparison into AH. */
756 if (bh != 0) {
757 if (ah != 0) {
758 tcg_out_opc_reg(s, OPC_XOR, tmp0, ah, bh);
759 ah = tmp0;
760 } else {
761 ah = bh;
764 /* Merge lowpart comparison into AL. */
765 if (bl != 0) {
766 if (al != 0) {
767 tcg_out_opc_reg(s, OPC_XOR, tmp1, al, bl);
768 al = tmp1;
769 } else {
770 al = bl;
773 /* Merge high and low part comparisons into AL. */
774 if (ah != 0) {
775 if (al != 0) {
776 tcg_out_opc_reg(s, OPC_OR, tmp0, ah, al);
777 al = tmp0;
778 } else {
779 al = ah;
782 return al;
785 static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
786 TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
788 TCGReg tmp0 = TCG_TMP0;
789 TCGReg tmp1 = ret;
791 assert(ret != TCG_TMP0);
792 if (ret == ah || ret == bh) {
793 assert(ret != TCG_TMP1);
794 tmp1 = TCG_TMP1;
797 switch (cond) {
798 case TCG_COND_EQ:
799 case TCG_COND_NE:
800 tmp1 = tcg_out_reduce_eq2(s, tmp0, tmp1, al, ah, bl, bh);
801 tcg_out_setcond(s, cond, ret, tmp1, TCG_REG_ZERO);
802 break;
804 default:
805 tcg_out_setcond(s, TCG_COND_EQ, tmp0, ah, bh);
806 tcg_out_setcond(s, tcg_unsigned_cond(cond), tmp1, al, bl);
807 tcg_out_opc_reg(s, OPC_AND, tmp1, tmp1, tmp0);
808 tcg_out_setcond(s, tcg_high_cond(cond), tmp0, ah, bh);
809 tcg_out_opc_reg(s, OPC_OR, ret, tmp1, tmp0);
810 break;
814 static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
815 TCGReg bl, TCGReg bh, TCGLabel *l)
817 TCGCond b_cond = TCG_COND_NE;
818 TCGReg tmp = TCG_TMP1;
820 /* With branches, we emit between 4 and 9 insns with 2 or 3 branches.
821 With setcond, we emit between 3 and 10 insns and only 1 branch,
822 which ought to get better branch prediction. */
823 switch (cond) {
824 case TCG_COND_EQ:
825 case TCG_COND_NE:
826 b_cond = cond;
827 tmp = tcg_out_reduce_eq2(s, TCG_TMP0, TCG_TMP1, al, ah, bl, bh);
828 break;
830 default:
831 /* Minimize code size by preferring a compare not requiring INV. */
832 if (mips_cmp_map[cond] & MIPS_CMP_INV) {
833 cond = tcg_invert_cond(cond);
834 b_cond = TCG_COND_EQ;
836 tcg_out_setcond2(s, cond, tmp, al, ah, bl, bh);
837 break;
840 tcg_out_brcond(s, b_cond, tmp, TCG_REG_ZERO, l);
843 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
844 TCGReg c1, TCGReg c2, TCGReg v)
846 MIPSInsn m_opc = OPC_MOVN;
848 switch (cond) {
849 case TCG_COND_EQ:
850 m_opc = OPC_MOVZ;
851 /* FALLTHRU */
852 case TCG_COND_NE:
853 if (c2 != 0) {
854 tcg_out_opc_reg(s, OPC_XOR, TCG_TMP0, c1, c2);
855 c1 = TCG_TMP0;
857 break;
859 default:
860 /* Minimize code size by preferring a compare not requiring INV. */
861 if (mips_cmp_map[cond] & MIPS_CMP_INV) {
862 cond = tcg_invert_cond(cond);
863 m_opc = OPC_MOVZ;
865 tcg_out_setcond(s, cond, TCG_TMP0, c1, c2);
866 c1 = TCG_TMP0;
867 break;
870 tcg_out_opc_reg(s, m_opc, ret, v, c1);
873 static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail)
875 /* Note that the ABI requires the called function's address to be
876 loaded into T9, even if a direct branch is in range. */
877 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg);
879 /* But do try a direct branch, allowing the cpu better insn prefetch. */
880 if (tail) {
881 if (!tcg_out_opc_jmp(s, OPC_J, arg)) {
882 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_T9, 0);
884 } else {
885 if (!tcg_out_opc_jmp(s, OPC_JAL, arg)) {
886 tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0);
891 static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
893 tcg_out_call_int(s, arg, false);
894 tcg_out_nop(s);
897 #if defined(CONFIG_SOFTMMU)
898 static void * const qemu_ld_helpers[16] = {
899 [MO_UB] = helper_ret_ldub_mmu,
900 [MO_SB] = helper_ret_ldsb_mmu,
901 [MO_LEUW] = helper_le_lduw_mmu,
902 [MO_LESW] = helper_le_ldsw_mmu,
903 [MO_LEUL] = helper_le_ldul_mmu,
904 [MO_LEQ] = helper_le_ldq_mmu,
905 [MO_BEUW] = helper_be_lduw_mmu,
906 [MO_BESW] = helper_be_ldsw_mmu,
907 [MO_BEUL] = helper_be_ldul_mmu,
908 [MO_BEQ] = helper_be_ldq_mmu,
911 static void * const qemu_st_helpers[16] = {
912 [MO_UB] = helper_ret_stb_mmu,
913 [MO_LEUW] = helper_le_stw_mmu,
914 [MO_LEUL] = helper_le_stl_mmu,
915 [MO_LEQ] = helper_le_stq_mmu,
916 [MO_BEUW] = helper_be_stw_mmu,
917 [MO_BEUL] = helper_be_stl_mmu,
918 [MO_BEQ] = helper_be_stq_mmu,
921 /* Helper routines for marshalling helper function arguments into
922 * the correct registers and stack.
923 * I is where we want to put this argument, and is updated and returned
924 * for the next call. ARG is the argument itself.
926 * We provide routines for arguments which are: immediate, 32 bit
927 * value in register, 16 and 8 bit values in register (which must be zero
928 * extended before use) and 64 bit value in a lo:hi register pair.
931 static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg)
933 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
934 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg);
935 } else {
936 tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i);
938 return i + 1;
941 static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg)
943 TCGReg tmp = TCG_TMP0;
944 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
945 tmp = tcg_target_call_iarg_regs[i];
947 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff);
948 return tcg_out_call_iarg_reg(s, i, tmp);
951 static int tcg_out_call_iarg_reg16(TCGContext *s, int i, TCGReg arg)
953 TCGReg tmp = TCG_TMP0;
954 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
955 tmp = tcg_target_call_iarg_regs[i];
957 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xffff);
958 return tcg_out_call_iarg_reg(s, i, tmp);
961 static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg)
963 TCGReg tmp = TCG_TMP0;
964 if (arg == 0) {
965 tmp = TCG_REG_ZERO;
966 } else {
967 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
968 tmp = tcg_target_call_iarg_regs[i];
970 tcg_out_movi(s, TCG_TYPE_REG, tmp, arg);
972 return tcg_out_call_iarg_reg(s, i, tmp);
975 static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah)
977 i = (i + 1) & ~1;
978 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al));
979 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah));
980 return i;
983 /* Perform the tlb comparison operation. The complete host address is
984 placed in BASE. Clobbers AT, T0, A0. */
985 static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
986 TCGReg addrh, int mem_index, TCGMemOp s_bits,
987 tcg_insn_unit *label_ptr[2], bool is_load)
989 int cmp_off
990 = (is_load
991 ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
992 : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
993 int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
995 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_A0, addrl,
996 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
997 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0,
998 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
999 tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
1001 /* Compensate for very large offsets. */
1002 if (add_off >= 0x8000) {
1003 /* Most target env are smaller than 32k; none are larger than 64k.
1004 Simplify the logic here merely to offset by 0x7ff0, giving us a
1005 range just shy of 64k. Check this assumption. */
1006 QEMU_BUILD_BUG_ON(offsetof(CPUArchState,
1007 tlb_table[NB_MMU_MODES - 1][1])
1008 > 0x7ff0 + 0x7fff);
1009 tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_A0, TCG_REG_A0, 0x7ff0);
1010 cmp_off -= 0x7ff0;
1011 add_off -= 0x7ff0;
1014 /* Load the (low half) tlb comparator. */
1015 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0,
1016 cmp_off + (TARGET_LONG_BITS == 64 ? LO_OFF : 0));
1018 /* Mask the page bits, keeping the alignment bits to compare against.
1019 In between on 32-bit targets, load the tlb addend for the fast path. */
1020 tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1,
1021 TARGET_PAGE_MASK | ((1 << s_bits) - 1));
1022 if (TARGET_LONG_BITS == 32) {
1023 tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
1025 tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
1027 label_ptr[0] = s->code_ptr;
1028 tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
1030 /* Load and test the high half tlb comparator. */
1031 if (TARGET_LONG_BITS == 64) {
1032 /* delay slot */
1033 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0, cmp_off + HI_OFF);
1035 /* Load the tlb addend for the fast path. We can't do it earlier with
1036 64-bit targets or we'll clobber a0 before reading the high half tlb
1037 comparator. */
1038 tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
1040 label_ptr[1] = s->code_ptr;
1041 tcg_out_opc_br(s, OPC_BNE, addrh, TCG_TMP0);
1044 /* delay slot */
1045 tcg_out_opc_reg(s, OPC_ADDU, base, TCG_REG_A0, addrl);
1048 static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
1049 TCGReg datalo, TCGReg datahi,
1050 TCGReg addrlo, TCGReg addrhi,
1051 void *raddr, tcg_insn_unit *label_ptr[2])
1053 TCGLabelQemuLdst *label = new_ldst_label(s);
1055 label->is_ld = is_ld;
1056 label->oi = oi;
1057 label->datalo_reg = datalo;
1058 label->datahi_reg = datahi;
1059 label->addrlo_reg = addrlo;
1060 label->addrhi_reg = addrhi;
1061 label->raddr = raddr;
1062 label->label_ptr[0] = label_ptr[0];
1063 if (TARGET_LONG_BITS == 64) {
1064 label->label_ptr[1] = label_ptr[1];
1068 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1070 TCGMemOpIdx oi = l->oi;
1071 TCGMemOp opc = get_memop(oi);
1072 TCGReg v0;
1073 int i;
1075 /* resolve label address */
1076 reloc_pc16(l->label_ptr[0], s->code_ptr);
1077 if (TARGET_LONG_BITS == 64) {
1078 reloc_pc16(l->label_ptr[1], s->code_ptr);
1081 i = 1;
1082 if (TARGET_LONG_BITS == 64) {
1083 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1084 } else {
1085 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1087 i = tcg_out_call_iarg_imm(s, i, oi);
1088 i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr);
1089 tcg_out_call_int(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)], false);
1090 /* delay slot */
1091 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1093 v0 = l->datalo_reg;
1094 if ((opc & MO_SIZE) == MO_64) {
1095 /* We eliminated V0 from the possible output registers, so it
1096 cannot be clobbered here. So we must move V1 first. */
1097 if (MIPS_BE) {
1098 tcg_out_mov(s, TCG_TYPE_I32, v0, TCG_REG_V1);
1099 v0 = l->datahi_reg;
1100 } else {
1101 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_V1);
1105 reloc_pc16(s->code_ptr, l->raddr);
1106 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO);
1107 /* delay slot */
1108 tcg_out_mov(s, TCG_TYPE_REG, v0, TCG_REG_V0);
1111 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1113 TCGMemOpIdx oi = l->oi;
1114 TCGMemOp opc = get_memop(oi);
1115 TCGMemOp s_bits = opc & MO_SIZE;
1116 int i;
1118 /* resolve label address */
1119 reloc_pc16(l->label_ptr[0], s->code_ptr);
1120 if (TARGET_LONG_BITS == 64) {
1121 reloc_pc16(l->label_ptr[1], s->code_ptr);
1124 i = 1;
1125 if (TARGET_LONG_BITS == 64) {
1126 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1127 } else {
1128 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1130 switch (s_bits) {
1131 case MO_8:
1132 i = tcg_out_call_iarg_reg8(s, i, l->datalo_reg);
1133 break;
1134 case MO_16:
1135 i = tcg_out_call_iarg_reg16(s, i, l->datalo_reg);
1136 break;
1137 case MO_32:
1138 i = tcg_out_call_iarg_reg(s, i, l->datalo_reg);
1139 break;
1140 case MO_64:
1141 i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg);
1142 break;
1143 default:
1144 tcg_abort();
1146 i = tcg_out_call_iarg_imm(s, i, oi);
1148 /* Tail call to the store helper. Thus force the return address
1149 computation to take place in the return address register. */
1150 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)l->raddr);
1151 i = tcg_out_call_iarg_reg(s, i, TCG_REG_RA);
1152 tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true);
1153 /* delay slot */
1154 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1156 #endif
1158 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1159 TCGReg base, TCGMemOp opc)
1161 switch (opc & (MO_SSIZE | MO_BSWAP)) {
1162 case MO_UB:
1163 tcg_out_opc_imm(s, OPC_LBU, datalo, base, 0);
1164 break;
1165 case MO_SB:
1166 tcg_out_opc_imm(s, OPC_LB, datalo, base, 0);
1167 break;
1168 case MO_UW | MO_BSWAP:
1169 tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
1170 tcg_out_bswap16(s, datalo, TCG_TMP1);
1171 break;
1172 case MO_UW:
1173 tcg_out_opc_imm(s, OPC_LHU, datalo, base, 0);
1174 break;
1175 case MO_SW | MO_BSWAP:
1176 tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
1177 tcg_out_bswap16s(s, datalo, TCG_TMP1);
1178 break;
1179 case MO_SW:
1180 tcg_out_opc_imm(s, OPC_LH, datalo, base, 0);
1181 break;
1182 case MO_UL | MO_BSWAP:
1183 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, 0);
1184 tcg_out_bswap32(s, datalo, TCG_TMP1);
1185 break;
1186 case MO_UL:
1187 tcg_out_opc_imm(s, OPC_LW, datalo, base, 0);
1188 break;
1189 case MO_Q | MO_BSWAP:
1190 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, HI_OFF);
1191 tcg_out_bswap32(s, datalo, TCG_TMP1);
1192 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, LO_OFF);
1193 tcg_out_bswap32(s, datahi, TCG_TMP1);
1194 break;
1195 case MO_Q:
1196 tcg_out_opc_imm(s, OPC_LW, datalo, base, LO_OFF);
1197 tcg_out_opc_imm(s, OPC_LW, datahi, base, HI_OFF);
1198 break;
1199 default:
1200 tcg_abort();
1204 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
1206 TCGReg addr_regl, addr_regh __attribute__((unused));
1207 TCGReg data_regl, data_regh;
1208 TCGMemOpIdx oi;
1209 TCGMemOp opc;
1210 #if defined(CONFIG_SOFTMMU)
1211 tcg_insn_unit *label_ptr[2];
1212 int mem_index;
1213 TCGMemOp s_bits;
1214 #endif
1215 /* Note that we've eliminated V0 from the output registers,
1216 so we won't overwrite the base register during loading. */
1217 TCGReg base = TCG_REG_V0;
1219 data_regl = *args++;
1220 data_regh = (is_64 ? *args++ : 0);
1221 addr_regl = *args++;
1222 addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1223 oi = *args++;
1224 opc = get_memop(oi);
1226 #if defined(CONFIG_SOFTMMU)
1227 mem_index = get_mmuidx(oi);
1228 s_bits = opc & MO_SIZE;
1230 tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
1231 s_bits, label_ptr, 1);
1232 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
1233 add_qemu_ldst_label(s, 1, oi, data_regl, data_regh, addr_regl, addr_regh,
1234 s->code_ptr, label_ptr);
1235 #else
1236 if (guest_base == 0 && data_regl != addr_regl) {
1237 base = addr_regl;
1238 } else if (guest_base == (int16_t)guest_base) {
1239 tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base);
1240 } else {
1241 tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base);
1242 tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
1244 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
1245 #endif
1248 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1249 TCGReg base, TCGMemOp opc)
1251 switch (opc & (MO_SIZE | MO_BSWAP)) {
1252 case MO_8:
1253 tcg_out_opc_imm(s, OPC_SB, datalo, base, 0);
1254 break;
1256 case MO_16 | MO_BSWAP:
1257 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, datalo, 0xffff);
1258 tcg_out_bswap16(s, TCG_TMP1, TCG_TMP1);
1259 datalo = TCG_TMP1;
1260 /* FALLTHRU */
1261 case MO_16:
1262 tcg_out_opc_imm(s, OPC_SH, datalo, base, 0);
1263 break;
1265 case MO_32 | MO_BSWAP:
1266 tcg_out_bswap32(s, TCG_TMP1, datalo);
1267 datalo = TCG_TMP1;
1268 /* FALLTHRU */
1269 case MO_32:
1270 tcg_out_opc_imm(s, OPC_SW, datalo, base, 0);
1271 break;
1273 case MO_64 | MO_BSWAP:
1274 tcg_out_bswap32(s, TCG_TMP1, datalo);
1275 tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, HI_OFF);
1276 tcg_out_bswap32(s, TCG_TMP1, datahi);
1277 tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, LO_OFF);
1278 break;
1279 case MO_64:
1280 tcg_out_opc_imm(s, OPC_SW, datalo, base, LO_OFF);
1281 tcg_out_opc_imm(s, OPC_SW, datahi, base, HI_OFF);
1282 break;
1284 default:
1285 tcg_abort();
1289 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
1291 TCGReg addr_regl, addr_regh __attribute__((unused));
1292 TCGReg data_regl, data_regh, base;
1293 TCGMemOpIdx oi;
1294 TCGMemOp opc;
1295 #if defined(CONFIG_SOFTMMU)
1296 tcg_insn_unit *label_ptr[2];
1297 int mem_index;
1298 TCGMemOp s_bits;
1299 #endif
1301 data_regl = *args++;
1302 data_regh = (is_64 ? *args++ : 0);
1303 addr_regl = *args++;
1304 addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1305 oi = *args++;
1306 opc = get_memop(oi);
1308 #if defined(CONFIG_SOFTMMU)
1309 mem_index = get_mmuidx(oi);
1310 s_bits = opc & 3;
1312 /* Note that we eliminated the helper's address argument,
1313 so we can reuse that for the base. */
1314 base = (TARGET_LONG_BITS == 32 ? TCG_REG_A1 : TCG_REG_A2);
1315 tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
1316 s_bits, label_ptr, 0);
1317 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1318 add_qemu_ldst_label(s, 0, oi, data_regl, data_regh, addr_regl, addr_regh,
1319 s->code_ptr, label_ptr);
1320 #else
1321 if (guest_base == 0) {
1322 base = addr_regl;
1323 } else {
1324 base = TCG_REG_A0;
1325 if (guest_base == (int16_t)guest_base) {
1326 tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base);
1327 } else {
1328 tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base);
1329 tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
1332 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1333 #endif
1336 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1337 const TCGArg *args, const int *const_args)
1339 MIPSInsn i1, i2;
1340 TCGArg a0, a1, a2;
1341 int c2;
1343 a0 = args[0];
1344 a1 = args[1];
1345 a2 = args[2];
1346 c2 = const_args[2];
1348 switch (opc) {
1349 case INDEX_op_exit_tb:
1351 TCGReg b0 = TCG_REG_ZERO;
1353 if (a0 & ~0xffff) {
1354 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
1355 b0 = TCG_REG_V0;
1357 if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
1358 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
1359 (uintptr_t)tb_ret_addr);
1360 tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
1362 tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
1364 break;
1365 case INDEX_op_goto_tb:
1366 if (s->tb_jmp_offset) {
1367 /* direct jump method */
1368 s->tb_jmp_offset[a0] = tcg_current_code_size(s);
1369 /* Avoid clobbering the address during retranslation. */
1370 tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff));
1371 } else {
1372 /* indirect jump method */
1373 tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
1374 (uintptr_t)(s->tb_next + a0));
1375 tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
1377 tcg_out_nop(s);
1378 s->tb_next_offset[a0] = tcg_current_code_size(s);
1379 break;
1380 case INDEX_op_br:
1381 tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO,
1382 arg_label(a0));
1383 break;
1385 case INDEX_op_ld8u_i32:
1386 i1 = OPC_LBU;
1387 goto do_ldst;
1388 case INDEX_op_ld8s_i32:
1389 i1 = OPC_LB;
1390 goto do_ldst;
1391 case INDEX_op_ld16u_i32:
1392 i1 = OPC_LHU;
1393 goto do_ldst;
1394 case INDEX_op_ld16s_i32:
1395 i1 = OPC_LH;
1396 goto do_ldst;
1397 case INDEX_op_ld_i32:
1398 i1 = OPC_LW;
1399 goto do_ldst;
1400 case INDEX_op_st8_i32:
1401 i1 = OPC_SB;
1402 goto do_ldst;
1403 case INDEX_op_st16_i32:
1404 i1 = OPC_SH;
1405 goto do_ldst;
1406 case INDEX_op_st_i32:
1407 i1 = OPC_SW;
1408 do_ldst:
1409 tcg_out_ldst(s, i1, a0, a1, a2);
1410 break;
1412 case INDEX_op_add_i32:
1413 i1 = OPC_ADDU, i2 = OPC_ADDIU;
1414 goto do_binary;
1415 case INDEX_op_or_i32:
1416 i1 = OPC_OR, i2 = OPC_ORI;
1417 goto do_binary;
1418 case INDEX_op_xor_i32:
1419 i1 = OPC_XOR, i2 = OPC_XORI;
1420 do_binary:
1421 if (c2) {
1422 tcg_out_opc_imm(s, i2, a0, a1, a2);
1423 break;
1425 do_binaryv:
1426 tcg_out_opc_reg(s, i1, a0, a1, a2);
1427 break;
1429 case INDEX_op_sub_i32:
1430 if (c2) {
1431 tcg_out_opc_imm(s, OPC_ADDIU, a0, a1, -a2);
1432 break;
1434 i1 = OPC_SUBU;
1435 goto do_binary;
1436 case INDEX_op_and_i32:
1437 if (c2 && a2 != (uint16_t)a2) {
1438 int msb = ctz32(~a2) - 1;
1439 assert(use_mips32r2_instructions);
1440 assert(is_p2m1(a2));
1441 tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0);
1442 break;
1444 i1 = OPC_AND, i2 = OPC_ANDI;
1445 goto do_binary;
1446 case INDEX_op_nor_i32:
1447 i1 = OPC_NOR;
1448 goto do_binaryv;
1450 case INDEX_op_mul_i32:
1451 if (use_mips32_instructions) {
1452 tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
1453 break;
1455 i1 = OPC_MULT, i2 = OPC_MFLO;
1456 goto do_hilo1;
1457 case INDEX_op_mulsh_i32:
1458 i1 = OPC_MULT, i2 = OPC_MFHI;
1459 goto do_hilo1;
1460 case INDEX_op_muluh_i32:
1461 i1 = OPC_MULTU, i2 = OPC_MFHI;
1462 goto do_hilo1;
1463 case INDEX_op_div_i32:
1464 i1 = OPC_DIV, i2 = OPC_MFLO;
1465 goto do_hilo1;
1466 case INDEX_op_divu_i32:
1467 i1 = OPC_DIVU, i2 = OPC_MFLO;
1468 goto do_hilo1;
1469 case INDEX_op_rem_i32:
1470 i1 = OPC_DIV, i2 = OPC_MFHI;
1471 goto do_hilo1;
1472 case INDEX_op_remu_i32:
1473 i1 = OPC_DIVU, i2 = OPC_MFHI;
1474 do_hilo1:
1475 tcg_out_opc_reg(s, i1, 0, a1, a2);
1476 tcg_out_opc_reg(s, i2, a0, 0, 0);
1477 break;
1479 case INDEX_op_muls2_i32:
1480 i1 = OPC_MULT;
1481 goto do_hilo2;
1482 case INDEX_op_mulu2_i32:
1483 i1 = OPC_MULTU;
1484 do_hilo2:
1485 tcg_out_opc_reg(s, i1, 0, a2, args[3]);
1486 tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
1487 tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
1488 break;
1490 case INDEX_op_not_i32:
1491 i1 = OPC_NOR;
1492 goto do_unary;
1493 case INDEX_op_bswap16_i32:
1494 i1 = OPC_WSBH;
1495 goto do_unary;
1496 case INDEX_op_ext8s_i32:
1497 i1 = OPC_SEB;
1498 goto do_unary;
1499 case INDEX_op_ext16s_i32:
1500 i1 = OPC_SEH;
1501 do_unary:
1502 tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1);
1503 break;
1505 case INDEX_op_sar_i32:
1506 i1 = OPC_SRAV, i2 = OPC_SRA;
1507 goto do_shift;
1508 case INDEX_op_shl_i32:
1509 i1 = OPC_SLLV, i2 = OPC_SLL;
1510 goto do_shift;
1511 case INDEX_op_shr_i32:
1512 i1 = OPC_SRLV, i2 = OPC_SRL;
1513 goto do_shift;
1514 case INDEX_op_rotr_i32:
1515 i1 = OPC_ROTRV, i2 = OPC_ROTR;
1516 do_shift:
1517 if (c2) {
1518 tcg_out_opc_sa(s, i2, a0, a1, a2);
1519 } else {
1520 tcg_out_opc_reg(s, i1, a0, a2, a1);
1522 break;
1523 case INDEX_op_rotl_i32:
1524 if (c2) {
1525 tcg_out_opc_sa(s, OPC_ROTR, a0, a1, 32 - a2);
1526 } else {
1527 tcg_out_opc_reg(s, OPC_SUBU, TCG_TMP0, TCG_REG_ZERO, a2);
1528 tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1);
1530 break;
1532 case INDEX_op_bswap32_i32:
1533 tcg_out_opc_reg(s, OPC_WSBH, a0, 0, a1);
1534 tcg_out_opc_sa(s, OPC_ROTR, a0, a0, 16);
1535 break;
1537 case INDEX_op_deposit_i32:
1538 tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]);
1539 break;
1541 case INDEX_op_brcond_i32:
1542 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1543 break;
1544 case INDEX_op_brcond2_i32:
1545 tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
1546 break;
1548 case INDEX_op_movcond_i32:
1549 tcg_out_movcond(s, args[5], a0, a1, a2, args[3]);
1550 break;
1552 case INDEX_op_setcond_i32:
1553 tcg_out_setcond(s, args[3], a0, a1, a2);
1554 break;
1555 case INDEX_op_setcond2_i32:
1556 tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
1557 break;
1559 case INDEX_op_qemu_ld_i32:
1560 tcg_out_qemu_ld(s, args, false);
1561 break;
1562 case INDEX_op_qemu_ld_i64:
1563 tcg_out_qemu_ld(s, args, true);
1564 break;
1565 case INDEX_op_qemu_st_i32:
1566 tcg_out_qemu_st(s, args, false);
1567 break;
1568 case INDEX_op_qemu_st_i64:
1569 tcg_out_qemu_st(s, args, true);
1570 break;
1572 case INDEX_op_add2_i32:
1573 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1574 const_args[4], const_args[5], false);
1575 break;
1576 case INDEX_op_sub2_i32:
1577 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1578 const_args[4], const_args[5], true);
1579 break;
1581 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1582 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
1583 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1584 default:
1585 tcg_abort();
1589 static const TCGTargetOpDef mips_op_defs[] = {
1590 { INDEX_op_exit_tb, { } },
1591 { INDEX_op_goto_tb, { } },
1592 { INDEX_op_br, { } },
1594 { INDEX_op_ld8u_i32, { "r", "r" } },
1595 { INDEX_op_ld8s_i32, { "r", "r" } },
1596 { INDEX_op_ld16u_i32, { "r", "r" } },
1597 { INDEX_op_ld16s_i32, { "r", "r" } },
1598 { INDEX_op_ld_i32, { "r", "r" } },
1599 { INDEX_op_st8_i32, { "rZ", "r" } },
1600 { INDEX_op_st16_i32, { "rZ", "r" } },
1601 { INDEX_op_st_i32, { "rZ", "r" } },
1603 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1604 { INDEX_op_mul_i32, { "r", "rZ", "rZ" } },
1605 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rZ" } },
1606 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rZ" } },
1607 { INDEX_op_mulsh_i32, { "r", "rZ", "rZ" } },
1608 { INDEX_op_muluh_i32, { "r", "rZ", "rZ" } },
1609 { INDEX_op_div_i32, { "r", "rZ", "rZ" } },
1610 { INDEX_op_divu_i32, { "r", "rZ", "rZ" } },
1611 { INDEX_op_rem_i32, { "r", "rZ", "rZ" } },
1612 { INDEX_op_remu_i32, { "r", "rZ", "rZ" } },
1613 { INDEX_op_sub_i32, { "r", "rZ", "rN" } },
1615 { INDEX_op_and_i32, { "r", "rZ", "rIK" } },
1616 { INDEX_op_nor_i32, { "r", "rZ", "rZ" } },
1617 { INDEX_op_not_i32, { "r", "rZ" } },
1618 { INDEX_op_or_i32, { "r", "rZ", "rIZ" } },
1619 { INDEX_op_xor_i32, { "r", "rZ", "rIZ" } },
1621 { INDEX_op_shl_i32, { "r", "rZ", "ri" } },
1622 { INDEX_op_shr_i32, { "r", "rZ", "ri" } },
1623 { INDEX_op_sar_i32, { "r", "rZ", "ri" } },
1624 { INDEX_op_rotr_i32, { "r", "rZ", "ri" } },
1625 { INDEX_op_rotl_i32, { "r", "rZ", "ri" } },
1627 { INDEX_op_bswap16_i32, { "r", "r" } },
1628 { INDEX_op_bswap32_i32, { "r", "r" } },
1630 { INDEX_op_ext8s_i32, { "r", "rZ" } },
1631 { INDEX_op_ext16s_i32, { "r", "rZ" } },
1633 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
1635 { INDEX_op_brcond_i32, { "rZ", "rZ" } },
1636 { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "0" } },
1637 { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
1638 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
1640 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
1641 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
1642 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rZ", "rZ" } },
1644 #if TARGET_LONG_BITS == 32
1645 { INDEX_op_qemu_ld_i32, { "L", "lZ" } },
1646 { INDEX_op_qemu_st_i32, { "SZ", "SZ" } },
1647 { INDEX_op_qemu_ld_i64, { "L", "L", "lZ" } },
1648 { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ" } },
1649 #else
1650 { INDEX_op_qemu_ld_i32, { "L", "lZ", "lZ" } },
1651 { INDEX_op_qemu_st_i32, { "SZ", "SZ", "SZ" } },
1652 { INDEX_op_qemu_ld_i64, { "L", "L", "lZ", "lZ" } },
1653 { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ", "SZ" } },
1654 #endif
1655 { -1 },
1658 static int tcg_target_callee_save_regs[] = {
1659 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
1660 TCG_REG_S1,
1661 TCG_REG_S2,
1662 TCG_REG_S3,
1663 TCG_REG_S4,
1664 TCG_REG_S5,
1665 TCG_REG_S6,
1666 TCG_REG_S7,
1667 TCG_REG_S8,
1668 TCG_REG_RA, /* should be last for ABI compliance */
1671 /* The Linux kernel doesn't provide any information about the available
1672 instruction set. Probe it using a signal handler. */
1674 #include <signal.h>
1676 #ifndef use_movnz_instructions
1677 bool use_movnz_instructions = false;
1678 #endif
1680 #ifndef use_mips32_instructions
1681 bool use_mips32_instructions = false;
1682 #endif
1684 #ifndef use_mips32r2_instructions
1685 bool use_mips32r2_instructions = false;
1686 #endif
1688 static volatile sig_atomic_t got_sigill;
1690 static void sigill_handler(int signo, siginfo_t *si, void *data)
1692 /* Skip the faulty instruction */
1693 ucontext_t *uc = (ucontext_t *)data;
1694 uc->uc_mcontext.pc += 4;
1696 got_sigill = 1;
1699 static void tcg_target_detect_isa(void)
1701 struct sigaction sa_old, sa_new;
1703 memset(&sa_new, 0, sizeof(sa_new));
1704 sa_new.sa_flags = SA_SIGINFO;
1705 sa_new.sa_sigaction = sigill_handler;
1706 sigaction(SIGILL, &sa_new, &sa_old);
1708 /* Probe for movn/movz, necessary to implement movcond. */
1709 #ifndef use_movnz_instructions
1710 got_sigill = 0;
1711 asm volatile(".set push\n"
1712 ".set mips32\n"
1713 "movn $zero, $zero, $zero\n"
1714 "movz $zero, $zero, $zero\n"
1715 ".set pop\n"
1716 : : : );
1717 use_movnz_instructions = !got_sigill;
1718 #endif
1720 /* Probe for MIPS32 instructions. As no subsetting is allowed
1721 by the specification, it is only necessary to probe for one
1722 of the instructions. */
1723 #ifndef use_mips32_instructions
1724 got_sigill = 0;
1725 asm volatile(".set push\n"
1726 ".set mips32\n"
1727 "mul $zero, $zero\n"
1728 ".set pop\n"
1729 : : : );
1730 use_mips32_instructions = !got_sigill;
1731 #endif
1733 /* Probe for MIPS32r2 instructions if MIPS32 instructions are
1734 available. As no subsetting is allowed by the specification,
1735 it is only necessary to probe for one of the instructions. */
1736 #ifndef use_mips32r2_instructions
1737 if (use_mips32_instructions) {
1738 got_sigill = 0;
1739 asm volatile(".set push\n"
1740 ".set mips32r2\n"
1741 "seb $zero, $zero\n"
1742 ".set pop\n"
1743 : : : );
1744 use_mips32r2_instructions = !got_sigill;
1746 #endif
1748 sigaction(SIGILL, &sa_old, NULL);
1751 /* Generate global QEMU prologue and epilogue code */
1752 static void tcg_target_qemu_prologue(TCGContext *s)
1754 int i, frame_size;
1756 /* reserve some stack space, also for TCG temps. */
1757 frame_size = ARRAY_SIZE(tcg_target_callee_save_regs) * 4
1758 + TCG_STATIC_CALL_ARGS_SIZE
1759 + CPU_TEMP_BUF_NLONGS * sizeof(long);
1760 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
1761 ~(TCG_TARGET_STACK_ALIGN - 1);
1762 tcg_set_frame(s, TCG_REG_SP, ARRAY_SIZE(tcg_target_callee_save_regs) * 4
1763 + TCG_STATIC_CALL_ARGS_SIZE,
1764 CPU_TEMP_BUF_NLONGS * sizeof(long));
1766 /* TB prologue */
1767 tcg_out_addi(s, TCG_REG_SP, -frame_size);
1768 for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
1769 tcg_out_st(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
1770 TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
1773 /* Call generated code */
1774 tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0);
1775 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1776 tb_ret_addr = s->code_ptr;
1778 /* TB epilogue */
1779 for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
1780 tcg_out_ld(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
1781 TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
1784 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
1785 tcg_out_addi(s, TCG_REG_SP, frame_size);
1788 static void tcg_target_init(TCGContext *s)
1790 tcg_target_detect_isa();
1791 tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32], 0xffffffff);
1792 tcg_regset_set(tcg_target_call_clobber_regs,
1793 (1 << TCG_REG_V0) |
1794 (1 << TCG_REG_V1) |
1795 (1 << TCG_REG_A0) |
1796 (1 << TCG_REG_A1) |
1797 (1 << TCG_REG_A2) |
1798 (1 << TCG_REG_A3) |
1799 (1 << TCG_REG_T0) |
1800 (1 << TCG_REG_T1) |
1801 (1 << TCG_REG_T2) |
1802 (1 << TCG_REG_T3) |
1803 (1 << TCG_REG_T4) |
1804 (1 << TCG_REG_T5) |
1805 (1 << TCG_REG_T6) |
1806 (1 << TCG_REG_T7) |
1807 (1 << TCG_REG_T8) |
1808 (1 << TCG_REG_T9));
1810 tcg_regset_clear(s->reserved_regs);
1811 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */
1812 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */
1813 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */
1814 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* internal use */
1815 tcg_regset_set_reg(s->reserved_regs, TCG_TMP1); /* internal use */
1816 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */
1817 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
1818 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */
1820 tcg_add_target_add_op_defs(mips_op_defs);
1823 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1825 uint32_t *ptr = (uint32_t *)jmp_addr;
1826 *ptr = deposit32(*ptr, 0, 26, addr >> 2);
1827 flush_icache_range(jmp_addr, jmp_addr + 4);