spapr_rng: Convert to DEFINE_PROP_LINK
[qemu/ar7.git] / tcg / mips / tcg-target.inc.c
blob85756b81d5397e8e0df54dd1c33ea350efaab03f
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
5 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
6 * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
27 #include "tcg-be-ldst.h"
29 #ifdef HOST_WORDS_BIGENDIAN
30 # define MIPS_BE 1
31 #else
32 # define MIPS_BE 0
33 #endif
35 #if TCG_TARGET_REG_BITS == 32
36 # define LO_OFF (MIPS_BE * 4)
37 # define HI_OFF (4 - LO_OFF)
38 #else
39 /* To assert at compile-time that these values are never used
40 for TCG_TARGET_REG_BITS == 64. */
41 /* extern */ int link_error(void);
42 # define LO_OFF link_error()
43 # define HI_OFF link_error()
44 #endif
46 #ifdef CONFIG_DEBUG_TCG
47 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
48 "zero",
49 "at",
50 "v0",
51 "v1",
52 "a0",
53 "a1",
54 "a2",
55 "a3",
56 "t0",
57 "t1",
58 "t2",
59 "t3",
60 "t4",
61 "t5",
62 "t6",
63 "t7",
64 "s0",
65 "s1",
66 "s2",
67 "s3",
68 "s4",
69 "s5",
70 "s6",
71 "s7",
72 "t8",
73 "t9",
74 "k0",
75 "k1",
76 "gp",
77 "sp",
78 "s8",
79 "ra",
81 #endif
83 #define TCG_TMP0 TCG_REG_AT
84 #define TCG_TMP1 TCG_REG_T9
85 #define TCG_TMP2 TCG_REG_T8
86 #define TCG_TMP3 TCG_REG_T7
88 /* check if we really need so many registers :P */
89 static const int tcg_target_reg_alloc_order[] = {
90 /* Call saved registers. */
91 TCG_REG_S0,
92 TCG_REG_S1,
93 TCG_REG_S2,
94 TCG_REG_S3,
95 TCG_REG_S4,
96 TCG_REG_S5,
97 TCG_REG_S6,
98 TCG_REG_S7,
99 TCG_REG_S8,
101 /* Call clobbered registers. */
102 TCG_REG_T4,
103 TCG_REG_T5,
104 TCG_REG_T6,
105 TCG_REG_T7,
106 TCG_REG_T8,
107 TCG_REG_T9,
108 TCG_REG_V1,
109 TCG_REG_V0,
111 /* Argument registers, opposite order of allocation. */
112 TCG_REG_T3,
113 TCG_REG_T2,
114 TCG_REG_T1,
115 TCG_REG_T0,
116 TCG_REG_A3,
117 TCG_REG_A2,
118 TCG_REG_A1,
119 TCG_REG_A0,
122 static const TCGReg tcg_target_call_iarg_regs[] = {
123 TCG_REG_A0,
124 TCG_REG_A1,
125 TCG_REG_A2,
126 TCG_REG_A3,
127 #if _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64
128 TCG_REG_T0,
129 TCG_REG_T1,
130 TCG_REG_T2,
131 TCG_REG_T3,
132 #endif
135 static const TCGReg tcg_target_call_oarg_regs[2] = {
136 TCG_REG_V0,
137 TCG_REG_V1
140 static tcg_insn_unit *tb_ret_addr;
141 static tcg_insn_unit *bswap32_addr;
142 static tcg_insn_unit *bswap32u_addr;
143 static tcg_insn_unit *bswap64_addr;
145 static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target)
147 /* Let the compiler perform the right-shift as part of the arithmetic. */
148 ptrdiff_t disp = target - (pc + 1);
149 tcg_debug_assert(disp == (int16_t)disp);
150 return disp & 0xffff;
153 static inline void reloc_pc16(tcg_insn_unit *pc, tcg_insn_unit *target)
155 *pc = deposit32(*pc, 0, 16, reloc_pc16_val(pc, target));
158 static inline uint32_t reloc_26_val(tcg_insn_unit *pc, tcg_insn_unit *target)
160 tcg_debug_assert((((uintptr_t)pc ^ (uintptr_t)target) & 0xf0000000) == 0);
161 return ((uintptr_t)target >> 2) & 0x3ffffff;
164 static inline void reloc_26(tcg_insn_unit *pc, tcg_insn_unit *target)
166 *pc = deposit32(*pc, 0, 26, reloc_26_val(pc, target));
169 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
170 intptr_t value, intptr_t addend)
172 tcg_debug_assert(type == R_MIPS_PC16);
173 tcg_debug_assert(addend == 0);
174 reloc_pc16(code_ptr, (tcg_insn_unit *)value);
177 #define TCG_CT_CONST_ZERO 0x100
178 #define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */
179 #define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */
180 #define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */
181 #define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */
182 #define TCG_CT_CONST_WSZ 0x2000 /* word size */
184 static inline bool is_p2m1(tcg_target_long val)
186 return val && ((val + 1) & val) == 0;
189 /* parse target specific constraints */
190 static const char *target_parse_constraint(TCGArgConstraint *ct,
191 const char *ct_str, TCGType type)
193 switch(*ct_str++) {
194 case 'r':
195 ct->ct |= TCG_CT_REG;
196 tcg_regset_set(ct->u.regs, 0xffffffff);
197 break;
198 case 'L': /* qemu_ld input arg constraint */
199 ct->ct |= TCG_CT_REG;
200 tcg_regset_set(ct->u.regs, 0xffffffff);
201 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
202 #if defined(CONFIG_SOFTMMU)
203 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
204 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
206 #endif
207 break;
208 case 'S': /* qemu_st constraint */
209 ct->ct |= TCG_CT_REG;
210 tcg_regset_set(ct->u.regs, 0xffffffff);
211 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
212 #if defined(CONFIG_SOFTMMU)
213 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
214 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
215 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3);
216 } else {
217 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1);
219 #endif
220 break;
221 case 'I':
222 ct->ct |= TCG_CT_CONST_U16;
223 break;
224 case 'J':
225 ct->ct |= TCG_CT_CONST_S16;
226 break;
227 case 'K':
228 ct->ct |= TCG_CT_CONST_P2M1;
229 break;
230 case 'N':
231 ct->ct |= TCG_CT_CONST_N16;
232 break;
233 case 'W':
234 ct->ct |= TCG_CT_CONST_WSZ;
235 break;
236 case 'Z':
237 /* We are cheating a bit here, using the fact that the register
238 ZERO is also the register number 0. Hence there is no need
239 to check for const_args in each instruction. */
240 ct->ct |= TCG_CT_CONST_ZERO;
241 break;
242 default:
243 return NULL;
245 return ct_str;
248 /* test if a constant matches the constraint */
249 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
250 const TCGArgConstraint *arg_ct)
252 int ct;
253 ct = arg_ct->ct;
254 if (ct & TCG_CT_CONST) {
255 return 1;
256 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
257 return 1;
258 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
259 return 1;
260 } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
261 return 1;
262 } else if ((ct & TCG_CT_CONST_N16) && val >= -32767 && val <= 32767) {
263 return 1;
264 } else if ((ct & TCG_CT_CONST_P2M1)
265 && use_mips32r2_instructions && is_p2m1(val)) {
266 return 1;
267 } else if ((ct & TCG_CT_CONST_WSZ)
268 && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
269 return 1;
271 return 0;
274 /* instruction opcodes */
275 typedef enum {
276 OPC_J = 002 << 26,
277 OPC_JAL = 003 << 26,
278 OPC_BEQ = 004 << 26,
279 OPC_BNE = 005 << 26,
280 OPC_BLEZ = 006 << 26,
281 OPC_BGTZ = 007 << 26,
282 OPC_ADDIU = 011 << 26,
283 OPC_SLTI = 012 << 26,
284 OPC_SLTIU = 013 << 26,
285 OPC_ANDI = 014 << 26,
286 OPC_ORI = 015 << 26,
287 OPC_XORI = 016 << 26,
288 OPC_LUI = 017 << 26,
289 OPC_DADDIU = 031 << 26,
290 OPC_LB = 040 << 26,
291 OPC_LH = 041 << 26,
292 OPC_LW = 043 << 26,
293 OPC_LBU = 044 << 26,
294 OPC_LHU = 045 << 26,
295 OPC_LWU = 047 << 26,
296 OPC_SB = 050 << 26,
297 OPC_SH = 051 << 26,
298 OPC_SW = 053 << 26,
299 OPC_LD = 067 << 26,
300 OPC_SD = 077 << 26,
302 OPC_SPECIAL = 000 << 26,
303 OPC_SLL = OPC_SPECIAL | 000,
304 OPC_SRL = OPC_SPECIAL | 002,
305 OPC_ROTR = OPC_SPECIAL | 002 | (1 << 21),
306 OPC_SRA = OPC_SPECIAL | 003,
307 OPC_SLLV = OPC_SPECIAL | 004,
308 OPC_SRLV = OPC_SPECIAL | 006,
309 OPC_ROTRV = OPC_SPECIAL | 006 | 0100,
310 OPC_SRAV = OPC_SPECIAL | 007,
311 OPC_JR_R5 = OPC_SPECIAL | 010,
312 OPC_JALR = OPC_SPECIAL | 011,
313 OPC_MOVZ = OPC_SPECIAL | 012,
314 OPC_MOVN = OPC_SPECIAL | 013,
315 OPC_SYNC = OPC_SPECIAL | 017,
316 OPC_MFHI = OPC_SPECIAL | 020,
317 OPC_MFLO = OPC_SPECIAL | 022,
318 OPC_DSLLV = OPC_SPECIAL | 024,
319 OPC_DSRLV = OPC_SPECIAL | 026,
320 OPC_DROTRV = OPC_SPECIAL | 026 | 0100,
321 OPC_DSRAV = OPC_SPECIAL | 027,
322 OPC_MULT = OPC_SPECIAL | 030,
323 OPC_MUL_R6 = OPC_SPECIAL | 030 | 0200,
324 OPC_MUH = OPC_SPECIAL | 030 | 0300,
325 OPC_MULTU = OPC_SPECIAL | 031,
326 OPC_MULU = OPC_SPECIAL | 031 | 0200,
327 OPC_MUHU = OPC_SPECIAL | 031 | 0300,
328 OPC_DIV = OPC_SPECIAL | 032,
329 OPC_DIV_R6 = OPC_SPECIAL | 032 | 0200,
330 OPC_MOD = OPC_SPECIAL | 032 | 0300,
331 OPC_DIVU = OPC_SPECIAL | 033,
332 OPC_DIVU_R6 = OPC_SPECIAL | 033 | 0200,
333 OPC_MODU = OPC_SPECIAL | 033 | 0300,
334 OPC_DMULT = OPC_SPECIAL | 034,
335 OPC_DMUL = OPC_SPECIAL | 034 | 0200,
336 OPC_DMUH = OPC_SPECIAL | 034 | 0300,
337 OPC_DMULTU = OPC_SPECIAL | 035,
338 OPC_DMULU = OPC_SPECIAL | 035 | 0200,
339 OPC_DMUHU = OPC_SPECIAL | 035 | 0300,
340 OPC_DDIV = OPC_SPECIAL | 036,
341 OPC_DDIV_R6 = OPC_SPECIAL | 036 | 0200,
342 OPC_DMOD = OPC_SPECIAL | 036 | 0300,
343 OPC_DDIVU = OPC_SPECIAL | 037,
344 OPC_DDIVU_R6 = OPC_SPECIAL | 037 | 0200,
345 OPC_DMODU = OPC_SPECIAL | 037 | 0300,
346 OPC_ADDU = OPC_SPECIAL | 041,
347 OPC_SUBU = OPC_SPECIAL | 043,
348 OPC_AND = OPC_SPECIAL | 044,
349 OPC_OR = OPC_SPECIAL | 045,
350 OPC_XOR = OPC_SPECIAL | 046,
351 OPC_NOR = OPC_SPECIAL | 047,
352 OPC_SLT = OPC_SPECIAL | 052,
353 OPC_SLTU = OPC_SPECIAL | 053,
354 OPC_DADDU = OPC_SPECIAL | 055,
355 OPC_DSUBU = OPC_SPECIAL | 057,
356 OPC_SELEQZ = OPC_SPECIAL | 065,
357 OPC_SELNEZ = OPC_SPECIAL | 067,
358 OPC_DSLL = OPC_SPECIAL | 070,
359 OPC_DSRL = OPC_SPECIAL | 072,
360 OPC_DROTR = OPC_SPECIAL | 072 | (1 << 21),
361 OPC_DSRA = OPC_SPECIAL | 073,
362 OPC_DSLL32 = OPC_SPECIAL | 074,
363 OPC_DSRL32 = OPC_SPECIAL | 076,
364 OPC_DROTR32 = OPC_SPECIAL | 076 | (1 << 21),
365 OPC_DSRA32 = OPC_SPECIAL | 077,
366 OPC_CLZ_R6 = OPC_SPECIAL | 0120,
367 OPC_DCLZ_R6 = OPC_SPECIAL | 0122,
369 OPC_REGIMM = 001 << 26,
370 OPC_BLTZ = OPC_REGIMM | (000 << 16),
371 OPC_BGEZ = OPC_REGIMM | (001 << 16),
373 OPC_SPECIAL2 = 034 << 26,
374 OPC_MUL_R5 = OPC_SPECIAL2 | 002,
375 OPC_CLZ = OPC_SPECIAL2 | 040,
376 OPC_DCLZ = OPC_SPECIAL2 | 044,
378 OPC_SPECIAL3 = 037 << 26,
379 OPC_EXT = OPC_SPECIAL3 | 000,
380 OPC_DEXTM = OPC_SPECIAL3 | 001,
381 OPC_DEXTU = OPC_SPECIAL3 | 002,
382 OPC_DEXT = OPC_SPECIAL3 | 003,
383 OPC_INS = OPC_SPECIAL3 | 004,
384 OPC_DINSM = OPC_SPECIAL3 | 005,
385 OPC_DINSU = OPC_SPECIAL3 | 006,
386 OPC_DINS = OPC_SPECIAL3 | 007,
387 OPC_WSBH = OPC_SPECIAL3 | 00240,
388 OPC_DSBH = OPC_SPECIAL3 | 00244,
389 OPC_DSHD = OPC_SPECIAL3 | 00544,
390 OPC_SEB = OPC_SPECIAL3 | 02040,
391 OPC_SEH = OPC_SPECIAL3 | 03040,
393 /* MIPS r6 doesn't have JR, JALR should be used instead */
394 OPC_JR = use_mips32r6_instructions ? OPC_JALR : OPC_JR_R5,
397 * MIPS r6 replaces MUL with an alternative encoding which is
398 * backwards-compatible at the assembly level.
400 OPC_MUL = use_mips32r6_instructions ? OPC_MUL_R6 : OPC_MUL_R5,
402 /* MIPS r6 introduced names for weaker variants of SYNC. These are
403 backward compatible to previous architecture revisions. */
404 OPC_SYNC_WMB = OPC_SYNC | 0x04 << 5,
405 OPC_SYNC_MB = OPC_SYNC | 0x10 << 5,
406 OPC_SYNC_ACQUIRE = OPC_SYNC | 0x11 << 5,
407 OPC_SYNC_RELEASE = OPC_SYNC | 0x12 << 5,
408 OPC_SYNC_RMB = OPC_SYNC | 0x13 << 5,
410 /* Aliases for convenience. */
411 ALIAS_PADD = sizeof(void *) == 4 ? OPC_ADDU : OPC_DADDU,
412 ALIAS_PADDI = sizeof(void *) == 4 ? OPC_ADDIU : OPC_DADDIU,
413 ALIAS_TSRL = TARGET_LONG_BITS == 32 || TCG_TARGET_REG_BITS == 32
414 ? OPC_SRL : OPC_DSRL,
415 } MIPSInsn;
418 * Type reg
420 static inline void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc,
421 TCGReg rd, TCGReg rs, TCGReg rt)
423 int32_t inst;
425 inst = opc;
426 inst |= (rs & 0x1F) << 21;
427 inst |= (rt & 0x1F) << 16;
428 inst |= (rd & 0x1F) << 11;
429 tcg_out32(s, inst);
433 * Type immediate
435 static inline void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc,
436 TCGReg rt, TCGReg rs, TCGArg imm)
438 int32_t inst;
440 inst = opc;
441 inst |= (rs & 0x1F) << 21;
442 inst |= (rt & 0x1F) << 16;
443 inst |= (imm & 0xffff);
444 tcg_out32(s, inst);
448 * Type bitfield
450 static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt,
451 TCGReg rs, int msb, int lsb)
453 int32_t inst;
455 inst = opc;
456 inst |= (rs & 0x1F) << 21;
457 inst |= (rt & 0x1F) << 16;
458 inst |= (msb & 0x1F) << 11;
459 inst |= (lsb & 0x1F) << 6;
460 tcg_out32(s, inst);
463 static inline void tcg_out_opc_bf64(TCGContext *s, MIPSInsn opc, MIPSInsn opm,
464 MIPSInsn oph, TCGReg rt, TCGReg rs,
465 int msb, int lsb)
467 if (lsb >= 32) {
468 opc = oph;
469 msb -= 32;
470 lsb -= 32;
471 } else if (msb >= 32) {
472 opc = opm;
473 msb -= 32;
475 tcg_out_opc_bf(s, opc, rt, rs, msb, lsb);
479 * Type branch
481 static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc,
482 TCGReg rt, TCGReg rs)
484 /* We pay attention here to not modify the branch target by reading
485 the existing value and using it again. This ensure that caches and
486 memory are kept coherent during retranslation. */
487 uint16_t offset = (uint16_t)*s->code_ptr;
489 tcg_out_opc_imm(s, opc, rt, rs, offset);
493 * Type sa
495 static inline void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc,
496 TCGReg rd, TCGReg rt, TCGArg sa)
498 int32_t inst;
500 inst = opc;
501 inst |= (rt & 0x1F) << 16;
502 inst |= (rd & 0x1F) << 11;
503 inst |= (sa & 0x1F) << 6;
504 tcg_out32(s, inst);
508 static void tcg_out_opc_sa64(TCGContext *s, MIPSInsn opc1, MIPSInsn opc2,
509 TCGReg rd, TCGReg rt, TCGArg sa)
511 int32_t inst;
513 inst = (sa & 32 ? opc2 : opc1);
514 inst |= (rt & 0x1F) << 16;
515 inst |= (rd & 0x1F) << 11;
516 inst |= (sa & 0x1F) << 6;
517 tcg_out32(s, inst);
521 * Type jump.
522 * Returns true if the branch was in range and the insn was emitted.
524 static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, void *target)
526 uintptr_t dest = (uintptr_t)target;
527 uintptr_t from = (uintptr_t)s->code_ptr + 4;
528 int32_t inst;
530 /* The pc-region branch happens within the 256MB region of
531 the delay slot (thus the +4). */
532 if ((from ^ dest) & -(1 << 28)) {
533 return false;
535 tcg_debug_assert((dest & 3) == 0);
537 inst = opc;
538 inst |= (dest >> 2) & 0x3ffffff;
539 tcg_out32(s, inst);
540 return true;
543 static inline void tcg_out_nop(TCGContext *s)
545 tcg_out32(s, 0);
548 static inline void tcg_out_dsll(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa)
550 tcg_out_opc_sa64(s, OPC_DSLL, OPC_DSLL32, rd, rt, sa);
553 static inline void tcg_out_dsrl(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa)
555 tcg_out_opc_sa64(s, OPC_DSRL, OPC_DSRL32, rd, rt, sa);
558 static inline void tcg_out_dsra(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa)
560 tcg_out_opc_sa64(s, OPC_DSRA, OPC_DSRA32, rd, rt, sa);
563 static inline void tcg_out_mov(TCGContext *s, TCGType type,
564 TCGReg ret, TCGReg arg)
566 /* Simple reg-reg move, optimising out the 'do nothing' case */
567 if (ret != arg) {
568 tcg_out_opc_reg(s, OPC_OR, ret, arg, TCG_REG_ZERO);
572 static void tcg_out_movi(TCGContext *s, TCGType type,
573 TCGReg ret, tcg_target_long arg)
575 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
576 arg = (int32_t)arg;
578 if (arg == (int16_t)arg) {
579 tcg_out_opc_imm(s, OPC_ADDIU, ret, TCG_REG_ZERO, arg);
580 return;
582 if (arg == (uint16_t)arg) {
583 tcg_out_opc_imm(s, OPC_ORI, ret, TCG_REG_ZERO, arg);
584 return;
586 if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
587 tcg_out_opc_imm(s, OPC_LUI, ret, TCG_REG_ZERO, arg >> 16);
588 } else {
589 tcg_out_movi(s, TCG_TYPE_I32, ret, arg >> 31 >> 1);
590 if (arg & 0xffff0000ull) {
591 tcg_out_dsll(s, ret, ret, 16);
592 tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg >> 16);
593 tcg_out_dsll(s, ret, ret, 16);
594 } else {
595 tcg_out_dsll(s, ret, ret, 32);
598 if (arg & 0xffff) {
599 tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg & 0xffff);
603 static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg)
605 if (use_mips32r2_instructions) {
606 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
607 } else {
608 /* ret and arg can't be register at */
609 if (ret == TCG_TMP0 || arg == TCG_TMP0) {
610 tcg_abort();
613 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
614 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8);
615 tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00);
616 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
620 static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg)
622 if (use_mips32r2_instructions) {
623 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
624 tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret);
625 } else {
626 /* ret and arg can't be register at */
627 if (ret == TCG_TMP0 || arg == TCG_TMP0) {
628 tcg_abort();
631 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
632 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
633 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
634 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
638 static void tcg_out_bswap_subr(TCGContext *s, tcg_insn_unit *sub)
640 bool ok = tcg_out_opc_jmp(s, OPC_JAL, sub);
641 tcg_debug_assert(ok);
644 static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg)
646 if (use_mips32r2_instructions) {
647 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
648 tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
649 } else {
650 tcg_out_bswap_subr(s, bswap32_addr);
651 /* delay slot -- never omit the insn, like tcg_out_mov might. */
652 tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
653 tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
657 static void tcg_out_bswap32u(TCGContext *s, TCGReg ret, TCGReg arg)
659 if (use_mips32r2_instructions) {
660 tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg);
661 tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret);
662 tcg_out_dsrl(s, ret, ret, 32);
663 } else {
664 tcg_out_bswap_subr(s, bswap32u_addr);
665 /* delay slot -- never omit the insn, like tcg_out_mov might. */
666 tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
667 tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
671 static void tcg_out_bswap64(TCGContext *s, TCGReg ret, TCGReg arg)
673 if (use_mips32r2_instructions) {
674 tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg);
675 tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret);
676 } else {
677 tcg_out_bswap_subr(s, bswap64_addr);
678 /* delay slot -- never omit the insn, like tcg_out_mov might. */
679 tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
680 tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
684 static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
686 if (use_mips32r2_instructions) {
687 tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg);
688 } else {
689 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
690 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24);
694 static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
696 if (use_mips32r2_instructions) {
697 tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg);
698 } else {
699 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16);
700 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
704 static inline void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
706 if (use_mips32r2_instructions) {
707 tcg_out_opc_bf(s, OPC_DEXT, ret, arg, 31, 0);
708 } else {
709 tcg_out_dsll(s, ret, arg, 32);
710 tcg_out_dsrl(s, ret, ret, 32);
714 static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data,
715 TCGReg addr, intptr_t ofs)
717 int16_t lo = ofs;
718 if (ofs != lo) {
719 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - lo);
720 if (addr != TCG_REG_ZERO) {
721 tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP0, TCG_TMP0, addr);
723 addr = TCG_TMP0;
725 tcg_out_opc_imm(s, opc, data, addr, lo);
728 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
729 TCGReg arg1, intptr_t arg2)
731 MIPSInsn opc = OPC_LD;
732 if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) {
733 opc = OPC_LW;
735 tcg_out_ldst(s, opc, arg, arg1, arg2);
738 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
739 TCGReg arg1, intptr_t arg2)
741 MIPSInsn opc = OPC_SD;
742 if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) {
743 opc = OPC_SW;
745 tcg_out_ldst(s, opc, arg, arg1, arg2);
748 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
749 TCGReg base, intptr_t ofs)
751 if (val == 0) {
752 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
753 return true;
755 return false;
758 static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
759 TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
760 bool cbh, bool is_sub)
762 TCGReg th = TCG_TMP1;
764 /* If we have a negative constant such that negating it would
765 make the high part zero, we can (usually) eliminate one insn. */
766 if (cbl && cbh && bh == -1 && bl != 0) {
767 bl = -bl;
768 bh = 0;
769 is_sub = !is_sub;
772 /* By operating on the high part first, we get to use the final
773 carry operation to move back from the temporary. */
774 if (!cbh) {
775 tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
776 } else if (bh != 0 || ah == rl) {
777 tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
778 } else {
779 th = ah;
782 /* Note that tcg optimization should eliminate the bl == 0 case. */
783 if (is_sub) {
784 if (cbl) {
785 tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
786 tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
787 } else {
788 tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
789 tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
791 tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
792 } else {
793 if (cbl) {
794 tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
795 tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
796 } else if (rl == al && rl == bl) {
797 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, 31);
798 tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
799 } else {
800 tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
801 tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
803 tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
807 /* Bit 0 set if inversion required; bit 1 set if swapping required. */
808 #define MIPS_CMP_INV 1
809 #define MIPS_CMP_SWAP 2
811 static const uint8_t mips_cmp_map[16] = {
812 [TCG_COND_LT] = 0,
813 [TCG_COND_LTU] = 0,
814 [TCG_COND_GE] = MIPS_CMP_INV,
815 [TCG_COND_GEU] = MIPS_CMP_INV,
816 [TCG_COND_LE] = MIPS_CMP_INV | MIPS_CMP_SWAP,
817 [TCG_COND_LEU] = MIPS_CMP_INV | MIPS_CMP_SWAP,
818 [TCG_COND_GT] = MIPS_CMP_SWAP,
819 [TCG_COND_GTU] = MIPS_CMP_SWAP,
822 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
823 TCGReg arg1, TCGReg arg2)
825 MIPSInsn s_opc = OPC_SLTU;
826 int cmp_map;
828 switch (cond) {
829 case TCG_COND_EQ:
830 if (arg2 != 0) {
831 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
832 arg1 = ret;
834 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, 1);
835 break;
837 case TCG_COND_NE:
838 if (arg2 != 0) {
839 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
840 arg1 = ret;
842 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg1);
843 break;
845 case TCG_COND_LT:
846 case TCG_COND_GE:
847 case TCG_COND_LE:
848 case TCG_COND_GT:
849 s_opc = OPC_SLT;
850 /* FALLTHRU */
852 case TCG_COND_LTU:
853 case TCG_COND_GEU:
854 case TCG_COND_LEU:
855 case TCG_COND_GTU:
856 cmp_map = mips_cmp_map[cond];
857 if (cmp_map & MIPS_CMP_SWAP) {
858 TCGReg t = arg1;
859 arg1 = arg2;
860 arg2 = t;
862 tcg_out_opc_reg(s, s_opc, ret, arg1, arg2);
863 if (cmp_map & MIPS_CMP_INV) {
864 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
866 break;
868 default:
869 tcg_abort();
870 break;
874 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
875 TCGReg arg2, TCGLabel *l)
877 static const MIPSInsn b_zero[16] = {
878 [TCG_COND_LT] = OPC_BLTZ,
879 [TCG_COND_GT] = OPC_BGTZ,
880 [TCG_COND_LE] = OPC_BLEZ,
881 [TCG_COND_GE] = OPC_BGEZ,
884 MIPSInsn s_opc = OPC_SLTU;
885 MIPSInsn b_opc;
886 int cmp_map;
888 switch (cond) {
889 case TCG_COND_EQ:
890 b_opc = OPC_BEQ;
891 break;
892 case TCG_COND_NE:
893 b_opc = OPC_BNE;
894 break;
896 case TCG_COND_LT:
897 case TCG_COND_GT:
898 case TCG_COND_LE:
899 case TCG_COND_GE:
900 if (arg2 == 0) {
901 b_opc = b_zero[cond];
902 arg2 = arg1;
903 arg1 = 0;
904 break;
906 s_opc = OPC_SLT;
907 /* FALLTHRU */
909 case TCG_COND_LTU:
910 case TCG_COND_GTU:
911 case TCG_COND_LEU:
912 case TCG_COND_GEU:
913 cmp_map = mips_cmp_map[cond];
914 if (cmp_map & MIPS_CMP_SWAP) {
915 TCGReg t = arg1;
916 arg1 = arg2;
917 arg2 = t;
919 tcg_out_opc_reg(s, s_opc, TCG_TMP0, arg1, arg2);
920 b_opc = (cmp_map & MIPS_CMP_INV ? OPC_BEQ : OPC_BNE);
921 arg1 = TCG_TMP0;
922 arg2 = TCG_REG_ZERO;
923 break;
925 default:
926 tcg_abort();
927 break;
930 tcg_out_opc_br(s, b_opc, arg1, arg2);
931 if (l->has_value) {
932 reloc_pc16(s->code_ptr - 1, l->u.value_ptr);
933 } else {
934 tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, l, 0);
936 tcg_out_nop(s);
939 static TCGReg tcg_out_reduce_eq2(TCGContext *s, TCGReg tmp0, TCGReg tmp1,
940 TCGReg al, TCGReg ah,
941 TCGReg bl, TCGReg bh)
943 /* Merge highpart comparison into AH. */
944 if (bh != 0) {
945 if (ah != 0) {
946 tcg_out_opc_reg(s, OPC_XOR, tmp0, ah, bh);
947 ah = tmp0;
948 } else {
949 ah = bh;
952 /* Merge lowpart comparison into AL. */
953 if (bl != 0) {
954 if (al != 0) {
955 tcg_out_opc_reg(s, OPC_XOR, tmp1, al, bl);
956 al = tmp1;
957 } else {
958 al = bl;
961 /* Merge high and low part comparisons into AL. */
962 if (ah != 0) {
963 if (al != 0) {
964 tcg_out_opc_reg(s, OPC_OR, tmp0, ah, al);
965 al = tmp0;
966 } else {
967 al = ah;
970 return al;
973 static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
974 TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
976 TCGReg tmp0 = TCG_TMP0;
977 TCGReg tmp1 = ret;
979 tcg_debug_assert(ret != TCG_TMP0);
980 if (ret == ah || ret == bh) {
981 tcg_debug_assert(ret != TCG_TMP1);
982 tmp1 = TCG_TMP1;
985 switch (cond) {
986 case TCG_COND_EQ:
987 case TCG_COND_NE:
988 tmp1 = tcg_out_reduce_eq2(s, tmp0, tmp1, al, ah, bl, bh);
989 tcg_out_setcond(s, cond, ret, tmp1, TCG_REG_ZERO);
990 break;
992 default:
993 tcg_out_setcond(s, TCG_COND_EQ, tmp0, ah, bh);
994 tcg_out_setcond(s, tcg_unsigned_cond(cond), tmp1, al, bl);
995 tcg_out_opc_reg(s, OPC_AND, tmp1, tmp1, tmp0);
996 tcg_out_setcond(s, tcg_high_cond(cond), tmp0, ah, bh);
997 tcg_out_opc_reg(s, OPC_OR, ret, tmp1, tmp0);
998 break;
1002 static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
1003 TCGReg bl, TCGReg bh, TCGLabel *l)
1005 TCGCond b_cond = TCG_COND_NE;
1006 TCGReg tmp = TCG_TMP1;
1008 /* With branches, we emit between 4 and 9 insns with 2 or 3 branches.
1009 With setcond, we emit between 3 and 10 insns and only 1 branch,
1010 which ought to get better branch prediction. */
1011 switch (cond) {
1012 case TCG_COND_EQ:
1013 case TCG_COND_NE:
1014 b_cond = cond;
1015 tmp = tcg_out_reduce_eq2(s, TCG_TMP0, TCG_TMP1, al, ah, bl, bh);
1016 break;
1018 default:
1019 /* Minimize code size by preferring a compare not requiring INV. */
1020 if (mips_cmp_map[cond] & MIPS_CMP_INV) {
1021 cond = tcg_invert_cond(cond);
1022 b_cond = TCG_COND_EQ;
1024 tcg_out_setcond2(s, cond, tmp, al, ah, bl, bh);
1025 break;
1028 tcg_out_brcond(s, b_cond, tmp, TCG_REG_ZERO, l);
1031 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
1032 TCGReg c1, TCGReg c2, TCGReg v1, TCGReg v2)
1034 bool eqz = false;
1036 /* If one of the values is zero, put it last to match SEL*Z instructions */
1037 if (use_mips32r6_instructions && v1 == 0) {
1038 v1 = v2;
1039 v2 = 0;
1040 cond = tcg_invert_cond(cond);
1043 switch (cond) {
1044 case TCG_COND_EQ:
1045 eqz = true;
1046 /* FALLTHRU */
1047 case TCG_COND_NE:
1048 if (c2 != 0) {
1049 tcg_out_opc_reg(s, OPC_XOR, TCG_TMP0, c1, c2);
1050 c1 = TCG_TMP0;
1052 break;
1054 default:
1055 /* Minimize code size by preferring a compare not requiring INV. */
1056 if (mips_cmp_map[cond] & MIPS_CMP_INV) {
1057 cond = tcg_invert_cond(cond);
1058 eqz = true;
1060 tcg_out_setcond(s, cond, TCG_TMP0, c1, c2);
1061 c1 = TCG_TMP0;
1062 break;
1065 if (use_mips32r6_instructions) {
1066 MIPSInsn m_opc_t = eqz ? OPC_SELEQZ : OPC_SELNEZ;
1067 MIPSInsn m_opc_f = eqz ? OPC_SELNEZ : OPC_SELEQZ;
1069 if (v2 != 0) {
1070 tcg_out_opc_reg(s, m_opc_f, TCG_TMP1, v2, c1);
1072 tcg_out_opc_reg(s, m_opc_t, ret, v1, c1);
1073 if (v2 != 0) {
1074 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP1);
1076 } else {
1077 MIPSInsn m_opc = eqz ? OPC_MOVZ : OPC_MOVN;
1079 tcg_out_opc_reg(s, m_opc, ret, v1, c1);
1081 /* This should be guaranteed via constraints */
1082 tcg_debug_assert(v2 == ret);
1086 static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail)
1088 /* Note that the ABI requires the called function's address to be
1089 loaded into T9, even if a direct branch is in range. */
1090 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg);
1092 /* But do try a direct branch, allowing the cpu better insn prefetch. */
1093 if (tail) {
1094 if (!tcg_out_opc_jmp(s, OPC_J, arg)) {
1095 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_T9, 0);
1097 } else {
1098 if (!tcg_out_opc_jmp(s, OPC_JAL, arg)) {
1099 tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0);
1104 static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
1106 tcg_out_call_int(s, arg, false);
1107 tcg_out_nop(s);
1110 #if defined(CONFIG_SOFTMMU)
1111 static void * const qemu_ld_helpers[16] = {
1112 [MO_UB] = helper_ret_ldub_mmu,
1113 [MO_SB] = helper_ret_ldsb_mmu,
1114 [MO_LEUW] = helper_le_lduw_mmu,
1115 [MO_LESW] = helper_le_ldsw_mmu,
1116 [MO_LEUL] = helper_le_ldul_mmu,
1117 [MO_LEQ] = helper_le_ldq_mmu,
1118 [MO_BEUW] = helper_be_lduw_mmu,
1119 [MO_BESW] = helper_be_ldsw_mmu,
1120 [MO_BEUL] = helper_be_ldul_mmu,
1121 [MO_BEQ] = helper_be_ldq_mmu,
1122 #if TCG_TARGET_REG_BITS == 64
1123 [MO_LESL] = helper_le_ldsl_mmu,
1124 [MO_BESL] = helper_be_ldsl_mmu,
1125 #endif
1128 static void * const qemu_st_helpers[16] = {
1129 [MO_UB] = helper_ret_stb_mmu,
1130 [MO_LEUW] = helper_le_stw_mmu,
1131 [MO_LEUL] = helper_le_stl_mmu,
1132 [MO_LEQ] = helper_le_stq_mmu,
1133 [MO_BEUW] = helper_be_stw_mmu,
1134 [MO_BEUL] = helper_be_stl_mmu,
1135 [MO_BEQ] = helper_be_stq_mmu,
1138 /* Helper routines for marshalling helper function arguments into
1139 * the correct registers and stack.
1140 * I is where we want to put this argument, and is updated and returned
1141 * for the next call. ARG is the argument itself.
1143 * We provide routines for arguments which are: immediate, 32 bit
1144 * value in register, 16 and 8 bit values in register (which must be zero
1145 * extended before use) and 64 bit value in a lo:hi register pair.
1148 static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg)
1150 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
1151 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg);
1152 } else {
1153 /* For N32 and N64, the initial offset is different. But there
1154 we also have 8 argument register so we don't run out here. */
1155 tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
1156 tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i);
1158 return i + 1;
1161 static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg)
1163 TCGReg tmp = TCG_TMP0;
1164 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
1165 tmp = tcg_target_call_iarg_regs[i];
1167 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff);
1168 return tcg_out_call_iarg_reg(s, i, tmp);
1171 static int tcg_out_call_iarg_reg16(TCGContext *s, int i, TCGReg arg)
1173 TCGReg tmp = TCG_TMP0;
1174 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
1175 tmp = tcg_target_call_iarg_regs[i];
1177 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xffff);
1178 return tcg_out_call_iarg_reg(s, i, tmp);
1181 static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg)
1183 TCGReg tmp = TCG_TMP0;
1184 if (arg == 0) {
1185 tmp = TCG_REG_ZERO;
1186 } else {
1187 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
1188 tmp = tcg_target_call_iarg_regs[i];
1190 tcg_out_movi(s, TCG_TYPE_REG, tmp, arg);
1192 return tcg_out_call_iarg_reg(s, i, tmp);
1195 static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah)
1197 tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
1198 i = (i + 1) & ~1;
1199 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al));
1200 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah));
1201 return i;
1204 /* Perform the tlb comparison operation. The complete host address is
1205 placed in BASE. Clobbers TMP0, TMP1, TMP2, A0. */
1206 static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
1207 TCGReg addrh, TCGMemOpIdx oi,
1208 tcg_insn_unit *label_ptr[2], bool is_load)
1210 TCGMemOp opc = get_memop(oi);
1211 unsigned s_bits = opc & MO_SIZE;
1212 unsigned a_bits = get_alignment_bits(opc);
1213 target_ulong mask;
1214 int mem_index = get_mmuidx(oi);
1215 int cmp_off
1216 = (is_load
1217 ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
1218 : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
1219 int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1221 tcg_out_opc_sa(s, ALIAS_TSRL, TCG_REG_A0, addrl,
1222 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1223 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0,
1224 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1225 tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
1227 /* Compensate for very large offsets. */
1228 if (add_off >= 0x8000) {
1229 /* Most target env are smaller than 32k; none are larger than 64k.
1230 Simplify the logic here merely to offset by 0x7ff0, giving us a
1231 range just shy of 64k. Check this assumption. */
1232 QEMU_BUILD_BUG_ON(offsetof(CPUArchState,
1233 tlb_table[NB_MMU_MODES - 1][1])
1234 > 0x7ff0 + 0x7fff);
1235 tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, TCG_REG_A0, 0x7ff0);
1236 cmp_off -= 0x7ff0;
1237 add_off -= 0x7ff0;
1240 /* We don't currently support unaligned accesses.
1241 We could do so with mips32r6. */
1242 if (a_bits < s_bits) {
1243 a_bits = s_bits;
1246 mask = (target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1248 /* Load the (low half) tlb comparator. Mask the page bits, keeping the
1249 alignment bits to compare against. */
1250 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1251 tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A0, cmp_off + LO_OFF);
1252 tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, mask);
1253 } else {
1254 tcg_out_ldst(s,
1255 (TARGET_LONG_BITS == 64 ? OPC_LD
1256 : TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW),
1257 TCG_TMP0, TCG_REG_A0, cmp_off);
1258 tcg_out_movi(s, TCG_TYPE_TL, TCG_TMP1, mask);
1259 /* No second compare is required here;
1260 load the tlb addend for the fast path. */
1261 tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_REG_A0, add_off);
1263 tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
1265 /* Zero extend a 32-bit guest address for a 64-bit host. */
1266 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
1267 tcg_out_ext32u(s, base, addrl);
1268 addrl = base;
1271 label_ptr[0] = s->code_ptr;
1272 tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
1274 /* Load and test the high half tlb comparator. */
1275 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1276 /* delay slot */
1277 tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A0, cmp_off + HI_OFF);
1279 /* Load the tlb addend for the fast path. */
1280 tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_REG_A0, add_off);
1282 label_ptr[1] = s->code_ptr;
1283 tcg_out_opc_br(s, OPC_BNE, addrh, TCG_TMP0);
1286 /* delay slot */
1287 tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP2, addrl);
1290 static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
1291 TCGType ext,
1292 TCGReg datalo, TCGReg datahi,
1293 TCGReg addrlo, TCGReg addrhi,
1294 void *raddr, tcg_insn_unit *label_ptr[2])
1296 TCGLabelQemuLdst *label = new_ldst_label(s);
1298 label->is_ld = is_ld;
1299 label->oi = oi;
1300 label->type = ext;
1301 label->datalo_reg = datalo;
1302 label->datahi_reg = datahi;
1303 label->addrlo_reg = addrlo;
1304 label->addrhi_reg = addrhi;
1305 label->raddr = raddr;
1306 label->label_ptr[0] = label_ptr[0];
1307 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1308 label->label_ptr[1] = label_ptr[1];
1312 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1314 TCGMemOpIdx oi = l->oi;
1315 TCGMemOp opc = get_memop(oi);
1316 TCGReg v0;
1317 int i;
1319 /* resolve label address */
1320 reloc_pc16(l->label_ptr[0], s->code_ptr);
1321 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1322 reloc_pc16(l->label_ptr[1], s->code_ptr);
1325 i = 1;
1326 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1327 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1328 } else {
1329 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1331 i = tcg_out_call_iarg_imm(s, i, oi);
1332 i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr);
1333 tcg_out_call_int(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)], false);
1334 /* delay slot */
1335 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1337 v0 = l->datalo_reg;
1338 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
1339 /* We eliminated V0 from the possible output registers, so it
1340 cannot be clobbered here. So we must move V1 first. */
1341 if (MIPS_BE) {
1342 tcg_out_mov(s, TCG_TYPE_I32, v0, TCG_REG_V1);
1343 v0 = l->datahi_reg;
1344 } else {
1345 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_V1);
1349 reloc_pc16(s->code_ptr, l->raddr);
1350 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO);
1351 /* delay slot */
1352 if (TCG_TARGET_REG_BITS == 64 && l->type == TCG_TYPE_I32) {
1353 /* we always sign-extend 32-bit loads */
1354 tcg_out_opc_sa(s, OPC_SLL, v0, TCG_REG_V0, 0);
1355 } else {
1356 tcg_out_opc_reg(s, OPC_OR, v0, TCG_REG_V0, TCG_REG_ZERO);
1360 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1362 TCGMemOpIdx oi = l->oi;
1363 TCGMemOp opc = get_memop(oi);
1364 TCGMemOp s_bits = opc & MO_SIZE;
1365 int i;
1367 /* resolve label address */
1368 reloc_pc16(l->label_ptr[0], s->code_ptr);
1369 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1370 reloc_pc16(l->label_ptr[1], s->code_ptr);
1373 i = 1;
1374 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1375 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1376 } else {
1377 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1379 switch (s_bits) {
1380 case MO_8:
1381 i = tcg_out_call_iarg_reg8(s, i, l->datalo_reg);
1382 break;
1383 case MO_16:
1384 i = tcg_out_call_iarg_reg16(s, i, l->datalo_reg);
1385 break;
1386 case MO_32:
1387 i = tcg_out_call_iarg_reg(s, i, l->datalo_reg);
1388 break;
1389 case MO_64:
1390 if (TCG_TARGET_REG_BITS == 32) {
1391 i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg);
1392 } else {
1393 i = tcg_out_call_iarg_reg(s, i, l->datalo_reg);
1395 break;
1396 default:
1397 tcg_abort();
1399 i = tcg_out_call_iarg_imm(s, i, oi);
1401 /* Tail call to the store helper. Thus force the return address
1402 computation to take place in the return address register. */
1403 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)l->raddr);
1404 i = tcg_out_call_iarg_reg(s, i, TCG_REG_RA);
1405 tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true);
1406 /* delay slot */
1407 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1409 #endif
1411 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
1412 TCGReg base, TCGMemOp opc, bool is_64)
1414 switch (opc & (MO_SSIZE | MO_BSWAP)) {
1415 case MO_UB:
1416 tcg_out_opc_imm(s, OPC_LBU, lo, base, 0);
1417 break;
1418 case MO_SB:
1419 tcg_out_opc_imm(s, OPC_LB, lo, base, 0);
1420 break;
1421 case MO_UW | MO_BSWAP:
1422 tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
1423 tcg_out_bswap16(s, lo, TCG_TMP1);
1424 break;
1425 case MO_UW:
1426 tcg_out_opc_imm(s, OPC_LHU, lo, base, 0);
1427 break;
1428 case MO_SW | MO_BSWAP:
1429 tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
1430 tcg_out_bswap16s(s, lo, TCG_TMP1);
1431 break;
1432 case MO_SW:
1433 tcg_out_opc_imm(s, OPC_LH, lo, base, 0);
1434 break;
1435 case MO_UL | MO_BSWAP:
1436 if (TCG_TARGET_REG_BITS == 64 && is_64) {
1437 if (use_mips32r2_instructions) {
1438 tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
1439 tcg_out_bswap32u(s, lo, lo);
1440 } else {
1441 tcg_out_bswap_subr(s, bswap32u_addr);
1442 /* delay slot */
1443 tcg_out_opc_imm(s, OPC_LWU, TCG_TMP0, base, 0);
1444 tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3);
1446 break;
1448 /* FALLTHRU */
1449 case MO_SL | MO_BSWAP:
1450 if (use_mips32r2_instructions) {
1451 tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
1452 tcg_out_bswap32(s, lo, lo);
1453 } else {
1454 tcg_out_bswap_subr(s, bswap32_addr);
1455 /* delay slot */
1456 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0);
1457 tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_TMP3);
1459 break;
1460 case MO_UL:
1461 if (TCG_TARGET_REG_BITS == 64 && is_64) {
1462 tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
1463 break;
1465 /* FALLTHRU */
1466 case MO_SL:
1467 tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
1468 break;
1469 case MO_Q | MO_BSWAP:
1470 if (TCG_TARGET_REG_BITS == 64) {
1471 if (use_mips32r2_instructions) {
1472 tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
1473 tcg_out_bswap64(s, lo, lo);
1474 } else {
1475 tcg_out_bswap_subr(s, bswap64_addr);
1476 /* delay slot */
1477 tcg_out_opc_imm(s, OPC_LD, TCG_TMP0, base, 0);
1478 tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3);
1480 } else if (use_mips32r2_instructions) {
1481 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0);
1482 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, 4);
1483 tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, TCG_TMP0);
1484 tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, TCG_TMP1);
1485 tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? lo : hi, TCG_TMP0, 16);
1486 tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? hi : lo, TCG_TMP1, 16);
1487 } else {
1488 tcg_out_bswap_subr(s, bswap32_addr);
1489 /* delay slot */
1490 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0);
1491 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 4);
1492 tcg_out_bswap_subr(s, bswap32_addr);
1493 /* delay slot */
1494 tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? lo : hi, TCG_TMP3);
1495 tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3);
1497 break;
1498 case MO_Q:
1499 /* Prefer to load from offset 0 first, but allow for overlap. */
1500 if (TCG_TARGET_REG_BITS == 64) {
1501 tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
1502 } else if (MIPS_BE ? hi != base : lo == base) {
1503 tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF);
1504 tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF);
1505 } else {
1506 tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF);
1507 tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF);
1509 break;
1510 default:
1511 tcg_abort();
1515 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
1517 TCGReg addr_regl, addr_regh __attribute__((unused));
1518 TCGReg data_regl, data_regh;
1519 TCGMemOpIdx oi;
1520 TCGMemOp opc;
1521 #if defined(CONFIG_SOFTMMU)
1522 tcg_insn_unit *label_ptr[2];
1523 #endif
1524 TCGReg base = TCG_REG_A0;
1526 data_regl = *args++;
1527 data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
1528 addr_regl = *args++;
1529 addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
1530 oi = *args++;
1531 opc = get_memop(oi);
1533 #if defined(CONFIG_SOFTMMU)
1534 tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1);
1535 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
1536 add_qemu_ldst_label(s, 1, oi,
1537 (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1538 data_regl, data_regh, addr_regl, addr_regh,
1539 s->code_ptr, label_ptr);
1540 #else
1541 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
1542 tcg_out_ext32u(s, base, addr_regl);
1543 addr_regl = base;
1545 if (guest_base == 0 && data_regl != addr_regl) {
1546 base = addr_regl;
1547 } else if (guest_base == (int16_t)guest_base) {
1548 tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base);
1549 } else {
1550 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, guest_base);
1551 tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP0, addr_regl);
1553 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
1554 #endif
1557 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
1558 TCGReg base, TCGMemOp opc)
1560 /* Don't clutter the code below with checks to avoid bswapping ZERO. */
1561 if ((lo | hi) == 0) {
1562 opc &= ~MO_BSWAP;
1565 switch (opc & (MO_SIZE | MO_BSWAP)) {
1566 case MO_8:
1567 tcg_out_opc_imm(s, OPC_SB, lo, base, 0);
1568 break;
1570 case MO_16 | MO_BSWAP:
1571 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, lo, 0xffff);
1572 tcg_out_bswap16(s, TCG_TMP1, TCG_TMP1);
1573 lo = TCG_TMP1;
1574 /* FALLTHRU */
1575 case MO_16:
1576 tcg_out_opc_imm(s, OPC_SH, lo, base, 0);
1577 break;
1579 case MO_32 | MO_BSWAP:
1580 tcg_out_bswap32(s, TCG_TMP3, lo);
1581 lo = TCG_TMP3;
1582 /* FALLTHRU */
1583 case MO_32:
1584 tcg_out_opc_imm(s, OPC_SW, lo, base, 0);
1585 break;
1587 case MO_64 | MO_BSWAP:
1588 if (TCG_TARGET_REG_BITS == 64) {
1589 tcg_out_bswap64(s, TCG_TMP3, lo);
1590 tcg_out_opc_imm(s, OPC_SD, TCG_TMP3, base, 0);
1591 } else if (use_mips32r2_instructions) {
1592 tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, MIPS_BE ? lo : hi);
1593 tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, MIPS_BE ? hi : lo);
1594 tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP0, TCG_TMP0, 16);
1595 tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP1, TCG_TMP1, 16);
1596 tcg_out_opc_imm(s, OPC_SW, TCG_TMP0, base, 0);
1597 tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, 4);
1598 } else {
1599 tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi);
1600 tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 0);
1601 tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo);
1602 tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 4);
1604 break;
1605 case MO_64:
1606 if (TCG_TARGET_REG_BITS == 64) {
1607 tcg_out_opc_imm(s, OPC_SD, lo, base, 0);
1608 } else {
1609 tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? hi : lo, base, 0);
1610 tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? lo : hi, base, 4);
1612 break;
1614 default:
1615 tcg_abort();
1619 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
1621 TCGReg addr_regl, addr_regh __attribute__((unused));
1622 TCGReg data_regl, data_regh;
1623 TCGMemOpIdx oi;
1624 TCGMemOp opc;
1625 #if defined(CONFIG_SOFTMMU)
1626 tcg_insn_unit *label_ptr[2];
1627 #endif
1628 TCGReg base = TCG_REG_A0;
1630 data_regl = *args++;
1631 data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
1632 addr_regl = *args++;
1633 addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
1634 oi = *args++;
1635 opc = get_memop(oi);
1637 #if defined(CONFIG_SOFTMMU)
1638 tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0);
1639 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1640 add_qemu_ldst_label(s, 0, oi,
1641 (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1642 data_regl, data_regh, addr_regl, addr_regh,
1643 s->code_ptr, label_ptr);
1644 #else
1645 base = TCG_REG_A0;
1646 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
1647 tcg_out_ext32u(s, base, addr_regl);
1648 addr_regl = base;
1650 if (guest_base == 0) {
1651 base = addr_regl;
1652 } else if (guest_base == (int16_t)guest_base) {
1653 tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base);
1654 } else {
1655 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, guest_base);
1656 tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP0, addr_regl);
1658 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1659 #endif
1662 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1664 static const MIPSInsn sync[] = {
1665 /* Note that SYNC_MB is a slightly weaker than SYNC 0,
1666 as the former is an ordering barrier and the latter
1667 is a completion barrier. */
1668 [0 ... TCG_MO_ALL] = OPC_SYNC_MB,
1669 [TCG_MO_LD_LD] = OPC_SYNC_RMB,
1670 [TCG_MO_ST_ST] = OPC_SYNC_WMB,
1671 [TCG_MO_LD_ST] = OPC_SYNC_RELEASE,
1672 [TCG_MO_LD_ST | TCG_MO_ST_ST] = OPC_SYNC_RELEASE,
1673 [TCG_MO_LD_ST | TCG_MO_LD_LD] = OPC_SYNC_ACQUIRE,
1675 tcg_out32(s, sync[a0 & TCG_MO_ALL]);
1678 static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6,
1679 int width, TCGReg a0, TCGReg a1, TCGArg a2)
1681 if (use_mips32r6_instructions) {
1682 if (a2 == width) {
1683 tcg_out_opc_reg(s, opcv6, a0, a1, 0);
1684 } else {
1685 tcg_out_opc_reg(s, opcv6, TCG_TMP0, a1, 0);
1686 tcg_out_movcond(s, TCG_COND_EQ, a0, a1, 0, a2, TCG_TMP0);
1688 } else {
1689 if (a2 == width) {
1690 tcg_out_opc_reg(s, opcv2, a0, a1, a1);
1691 } else if (a0 == a2) {
1692 tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
1693 tcg_out_opc_reg(s, OPC_MOVN, a0, TCG_TMP0, a1);
1694 } else if (a0 != a1) {
1695 tcg_out_opc_reg(s, opcv2, a0, a1, a1);
1696 tcg_out_opc_reg(s, OPC_MOVZ, a0, a2, a1);
1697 } else {
1698 tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
1699 tcg_out_opc_reg(s, OPC_MOVZ, TCG_TMP0, a2, a1);
1700 tcg_out_mov(s, TCG_TYPE_REG, a0, TCG_TMP0);
1705 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1706 const TCGArg *args, const int *const_args)
1708 MIPSInsn i1, i2;
1709 TCGArg a0, a1, a2;
1710 int c2;
1712 a0 = args[0];
1713 a1 = args[1];
1714 a2 = args[2];
1715 c2 = const_args[2];
1717 switch (opc) {
1718 case INDEX_op_exit_tb:
1720 TCGReg b0 = TCG_REG_ZERO;
1722 a0 = (intptr_t)a0;
1723 if (a0 & ~0xffff) {
1724 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
1725 b0 = TCG_REG_V0;
1727 if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
1728 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
1729 (uintptr_t)tb_ret_addr);
1730 tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
1732 tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
1734 break;
1735 case INDEX_op_goto_tb:
1736 if (s->tb_jmp_insn_offset) {
1737 /* direct jump method */
1738 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1739 /* Avoid clobbering the address during retranslation. */
1740 tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff));
1741 } else {
1742 /* indirect jump method */
1743 tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
1744 (uintptr_t)(s->tb_jmp_target_addr + a0));
1745 tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
1747 tcg_out_nop(s);
1748 s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
1749 break;
1750 case INDEX_op_goto_ptr:
1751 /* jmp to the given host address (could be epilogue) */
1752 tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
1753 tcg_out_nop(s);
1754 break;
1755 case INDEX_op_br:
1756 tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO,
1757 arg_label(a0));
1758 break;
1760 case INDEX_op_ld8u_i32:
1761 case INDEX_op_ld8u_i64:
1762 i1 = OPC_LBU;
1763 goto do_ldst;
1764 case INDEX_op_ld8s_i32:
1765 case INDEX_op_ld8s_i64:
1766 i1 = OPC_LB;
1767 goto do_ldst;
1768 case INDEX_op_ld16u_i32:
1769 case INDEX_op_ld16u_i64:
1770 i1 = OPC_LHU;
1771 goto do_ldst;
1772 case INDEX_op_ld16s_i32:
1773 case INDEX_op_ld16s_i64:
1774 i1 = OPC_LH;
1775 goto do_ldst;
1776 case INDEX_op_ld_i32:
1777 case INDEX_op_ld32s_i64:
1778 i1 = OPC_LW;
1779 goto do_ldst;
1780 case INDEX_op_ld32u_i64:
1781 i1 = OPC_LWU;
1782 goto do_ldst;
1783 case INDEX_op_ld_i64:
1784 i1 = OPC_LD;
1785 goto do_ldst;
1786 case INDEX_op_st8_i32:
1787 case INDEX_op_st8_i64:
1788 i1 = OPC_SB;
1789 goto do_ldst;
1790 case INDEX_op_st16_i32:
1791 case INDEX_op_st16_i64:
1792 i1 = OPC_SH;
1793 goto do_ldst;
1794 case INDEX_op_st_i32:
1795 case INDEX_op_st32_i64:
1796 i1 = OPC_SW;
1797 goto do_ldst;
1798 case INDEX_op_st_i64:
1799 i1 = OPC_SD;
1800 do_ldst:
1801 tcg_out_ldst(s, i1, a0, a1, a2);
1802 break;
1804 case INDEX_op_add_i32:
1805 i1 = OPC_ADDU, i2 = OPC_ADDIU;
1806 goto do_binary;
1807 case INDEX_op_add_i64:
1808 i1 = OPC_DADDU, i2 = OPC_DADDIU;
1809 goto do_binary;
1810 case INDEX_op_or_i32:
1811 case INDEX_op_or_i64:
1812 i1 = OPC_OR, i2 = OPC_ORI;
1813 goto do_binary;
1814 case INDEX_op_xor_i32:
1815 case INDEX_op_xor_i64:
1816 i1 = OPC_XOR, i2 = OPC_XORI;
1817 do_binary:
1818 if (c2) {
1819 tcg_out_opc_imm(s, i2, a0, a1, a2);
1820 break;
1822 do_binaryv:
1823 tcg_out_opc_reg(s, i1, a0, a1, a2);
1824 break;
1826 case INDEX_op_sub_i32:
1827 i1 = OPC_SUBU, i2 = OPC_ADDIU;
1828 goto do_subtract;
1829 case INDEX_op_sub_i64:
1830 i1 = OPC_DSUBU, i2 = OPC_DADDIU;
1831 do_subtract:
1832 if (c2) {
1833 tcg_out_opc_imm(s, i2, a0, a1, -a2);
1834 break;
1836 goto do_binaryv;
1837 case INDEX_op_and_i32:
1838 if (c2 && a2 != (uint16_t)a2) {
1839 int msb = ctz32(~a2) - 1;
1840 tcg_debug_assert(use_mips32r2_instructions);
1841 tcg_debug_assert(is_p2m1(a2));
1842 tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0);
1843 break;
1845 i1 = OPC_AND, i2 = OPC_ANDI;
1846 goto do_binary;
1847 case INDEX_op_and_i64:
1848 if (c2 && a2 != (uint16_t)a2) {
1849 int msb = ctz64(~a2) - 1;
1850 tcg_debug_assert(use_mips32r2_instructions);
1851 tcg_debug_assert(is_p2m1(a2));
1852 tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, msb, 0);
1853 break;
1855 i1 = OPC_AND, i2 = OPC_ANDI;
1856 goto do_binary;
1857 case INDEX_op_nor_i32:
1858 case INDEX_op_nor_i64:
1859 i1 = OPC_NOR;
1860 goto do_binaryv;
1862 case INDEX_op_mul_i32:
1863 if (use_mips32_instructions) {
1864 tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
1865 break;
1867 i1 = OPC_MULT, i2 = OPC_MFLO;
1868 goto do_hilo1;
1869 case INDEX_op_mulsh_i32:
1870 if (use_mips32r6_instructions) {
1871 tcg_out_opc_reg(s, OPC_MUH, a0, a1, a2);
1872 break;
1874 i1 = OPC_MULT, i2 = OPC_MFHI;
1875 goto do_hilo1;
1876 case INDEX_op_muluh_i32:
1877 if (use_mips32r6_instructions) {
1878 tcg_out_opc_reg(s, OPC_MUHU, a0, a1, a2);
1879 break;
1881 i1 = OPC_MULTU, i2 = OPC_MFHI;
1882 goto do_hilo1;
1883 case INDEX_op_div_i32:
1884 if (use_mips32r6_instructions) {
1885 tcg_out_opc_reg(s, OPC_DIV_R6, a0, a1, a2);
1886 break;
1888 i1 = OPC_DIV, i2 = OPC_MFLO;
1889 goto do_hilo1;
1890 case INDEX_op_divu_i32:
1891 if (use_mips32r6_instructions) {
1892 tcg_out_opc_reg(s, OPC_DIVU_R6, a0, a1, a2);
1893 break;
1895 i1 = OPC_DIVU, i2 = OPC_MFLO;
1896 goto do_hilo1;
1897 case INDEX_op_rem_i32:
1898 if (use_mips32r6_instructions) {
1899 tcg_out_opc_reg(s, OPC_MOD, a0, a1, a2);
1900 break;
1902 i1 = OPC_DIV, i2 = OPC_MFHI;
1903 goto do_hilo1;
1904 case INDEX_op_remu_i32:
1905 if (use_mips32r6_instructions) {
1906 tcg_out_opc_reg(s, OPC_MODU, a0, a1, a2);
1907 break;
1909 i1 = OPC_DIVU, i2 = OPC_MFHI;
1910 goto do_hilo1;
1911 case INDEX_op_mul_i64:
1912 if (use_mips32r6_instructions) {
1913 tcg_out_opc_reg(s, OPC_DMUL, a0, a1, a2);
1914 break;
1916 i1 = OPC_DMULT, i2 = OPC_MFLO;
1917 goto do_hilo1;
1918 case INDEX_op_mulsh_i64:
1919 if (use_mips32r6_instructions) {
1920 tcg_out_opc_reg(s, OPC_DMUH, a0, a1, a2);
1921 break;
1923 i1 = OPC_DMULT, i2 = OPC_MFHI;
1924 goto do_hilo1;
1925 case INDEX_op_muluh_i64:
1926 if (use_mips32r6_instructions) {
1927 tcg_out_opc_reg(s, OPC_DMUHU, a0, a1, a2);
1928 break;
1930 i1 = OPC_DMULTU, i2 = OPC_MFHI;
1931 goto do_hilo1;
1932 case INDEX_op_div_i64:
1933 if (use_mips32r6_instructions) {
1934 tcg_out_opc_reg(s, OPC_DDIV_R6, a0, a1, a2);
1935 break;
1937 i1 = OPC_DDIV, i2 = OPC_MFLO;
1938 goto do_hilo1;
1939 case INDEX_op_divu_i64:
1940 if (use_mips32r6_instructions) {
1941 tcg_out_opc_reg(s, OPC_DDIVU_R6, a0, a1, a2);
1942 break;
1944 i1 = OPC_DDIVU, i2 = OPC_MFLO;
1945 goto do_hilo1;
1946 case INDEX_op_rem_i64:
1947 if (use_mips32r6_instructions) {
1948 tcg_out_opc_reg(s, OPC_DMOD, a0, a1, a2);
1949 break;
1951 i1 = OPC_DDIV, i2 = OPC_MFHI;
1952 goto do_hilo1;
1953 case INDEX_op_remu_i64:
1954 if (use_mips32r6_instructions) {
1955 tcg_out_opc_reg(s, OPC_DMODU, a0, a1, a2);
1956 break;
1958 i1 = OPC_DDIVU, i2 = OPC_MFHI;
1959 do_hilo1:
1960 tcg_out_opc_reg(s, i1, 0, a1, a2);
1961 tcg_out_opc_reg(s, i2, a0, 0, 0);
1962 break;
1964 case INDEX_op_muls2_i32:
1965 i1 = OPC_MULT;
1966 goto do_hilo2;
1967 case INDEX_op_mulu2_i32:
1968 i1 = OPC_MULTU;
1969 goto do_hilo2;
1970 case INDEX_op_muls2_i64:
1971 i1 = OPC_DMULT;
1972 goto do_hilo2;
1973 case INDEX_op_mulu2_i64:
1974 i1 = OPC_DMULTU;
1975 do_hilo2:
1976 tcg_out_opc_reg(s, i1, 0, a2, args[3]);
1977 tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
1978 tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
1979 break;
1981 case INDEX_op_not_i32:
1982 case INDEX_op_not_i64:
1983 i1 = OPC_NOR;
1984 goto do_unary;
1985 case INDEX_op_bswap16_i32:
1986 case INDEX_op_bswap16_i64:
1987 i1 = OPC_WSBH;
1988 goto do_unary;
1989 case INDEX_op_ext8s_i32:
1990 case INDEX_op_ext8s_i64:
1991 i1 = OPC_SEB;
1992 goto do_unary;
1993 case INDEX_op_ext16s_i32:
1994 case INDEX_op_ext16s_i64:
1995 i1 = OPC_SEH;
1996 do_unary:
1997 tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1);
1998 break;
2000 case INDEX_op_bswap32_i32:
2001 tcg_out_bswap32(s, a0, a1);
2002 break;
2003 case INDEX_op_bswap32_i64:
2004 tcg_out_bswap32u(s, a0, a1);
2005 break;
2006 case INDEX_op_bswap64_i64:
2007 tcg_out_bswap64(s, a0, a1);
2008 break;
2009 case INDEX_op_extrh_i64_i32:
2010 tcg_out_dsra(s, a0, a1, 32);
2011 break;
2012 case INDEX_op_ext32s_i64:
2013 case INDEX_op_ext_i32_i64:
2014 case INDEX_op_extrl_i64_i32:
2015 tcg_out_opc_sa(s, OPC_SLL, a0, a1, 0);
2016 break;
2017 case INDEX_op_ext32u_i64:
2018 case INDEX_op_extu_i32_i64:
2019 tcg_out_ext32u(s, a0, a1);
2020 break;
2022 case INDEX_op_sar_i32:
2023 i1 = OPC_SRAV, i2 = OPC_SRA;
2024 goto do_shift;
2025 case INDEX_op_shl_i32:
2026 i1 = OPC_SLLV, i2 = OPC_SLL;
2027 goto do_shift;
2028 case INDEX_op_shr_i32:
2029 i1 = OPC_SRLV, i2 = OPC_SRL;
2030 goto do_shift;
2031 case INDEX_op_rotr_i32:
2032 i1 = OPC_ROTRV, i2 = OPC_ROTR;
2033 do_shift:
2034 if (c2) {
2035 tcg_out_opc_sa(s, i2, a0, a1, a2);
2036 break;
2038 do_shiftv:
2039 tcg_out_opc_reg(s, i1, a0, a2, a1);
2040 break;
2041 case INDEX_op_rotl_i32:
2042 if (c2) {
2043 tcg_out_opc_sa(s, OPC_ROTR, a0, a1, 32 - a2);
2044 } else {
2045 tcg_out_opc_reg(s, OPC_SUBU, TCG_TMP0, TCG_REG_ZERO, a2);
2046 tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1);
2048 break;
2049 case INDEX_op_sar_i64:
2050 if (c2) {
2051 tcg_out_dsra(s, a0, a1, a2);
2052 break;
2054 i1 = OPC_DSRAV;
2055 goto do_shiftv;
2056 case INDEX_op_shl_i64:
2057 if (c2) {
2058 tcg_out_dsll(s, a0, a1, a2);
2059 break;
2061 i1 = OPC_DSLLV;
2062 goto do_shiftv;
2063 case INDEX_op_shr_i64:
2064 if (c2) {
2065 tcg_out_dsrl(s, a0, a1, a2);
2066 break;
2068 i1 = OPC_DSRLV;
2069 goto do_shiftv;
2070 case INDEX_op_rotr_i64:
2071 if (c2) {
2072 tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, a2);
2073 break;
2075 i1 = OPC_DROTRV;
2076 goto do_shiftv;
2077 case INDEX_op_rotl_i64:
2078 if (c2) {
2079 tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, 64 - a2);
2080 } else {
2081 tcg_out_opc_reg(s, OPC_DSUBU, TCG_TMP0, TCG_REG_ZERO, a2);
2082 tcg_out_opc_reg(s, OPC_DROTRV, a0, TCG_TMP0, a1);
2084 break;
2086 case INDEX_op_clz_i32:
2087 tcg_out_clz(s, OPC_CLZ, OPC_CLZ_R6, 32, a0, a1, a2);
2088 break;
2089 case INDEX_op_clz_i64:
2090 tcg_out_clz(s, OPC_DCLZ, OPC_DCLZ_R6, 64, a0, a1, a2);
2091 break;
2093 case INDEX_op_deposit_i32:
2094 tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]);
2095 break;
2096 case INDEX_op_deposit_i64:
2097 tcg_out_opc_bf64(s, OPC_DINS, OPC_DINSM, OPC_DINSU, a0, a2,
2098 args[3] + args[4] - 1, args[3]);
2099 break;
2100 case INDEX_op_extract_i32:
2101 tcg_out_opc_bf(s, OPC_EXT, a0, a1, args[3] - 1, a2);
2102 break;
2103 case INDEX_op_extract_i64:
2104 tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1,
2105 args[3] - 1, a2);
2106 break;
2108 case INDEX_op_brcond_i32:
2109 case INDEX_op_brcond_i64:
2110 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
2111 break;
2112 case INDEX_op_brcond2_i32:
2113 tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
2114 break;
2116 case INDEX_op_movcond_i32:
2117 case INDEX_op_movcond_i64:
2118 tcg_out_movcond(s, args[5], a0, a1, a2, args[3], args[4]);
2119 break;
2121 case INDEX_op_setcond_i32:
2122 case INDEX_op_setcond_i64:
2123 tcg_out_setcond(s, args[3], a0, a1, a2);
2124 break;
2125 case INDEX_op_setcond2_i32:
2126 tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
2127 break;
2129 case INDEX_op_qemu_ld_i32:
2130 tcg_out_qemu_ld(s, args, false);
2131 break;
2132 case INDEX_op_qemu_ld_i64:
2133 tcg_out_qemu_ld(s, args, true);
2134 break;
2135 case INDEX_op_qemu_st_i32:
2136 tcg_out_qemu_st(s, args, false);
2137 break;
2138 case INDEX_op_qemu_st_i64:
2139 tcg_out_qemu_st(s, args, true);
2140 break;
2142 case INDEX_op_add2_i32:
2143 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
2144 const_args[4], const_args[5], false);
2145 break;
2146 case INDEX_op_sub2_i32:
2147 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
2148 const_args[4], const_args[5], true);
2149 break;
2151 case INDEX_op_mb:
2152 tcg_out_mb(s, a0);
2153 break;
2154 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2155 case INDEX_op_mov_i64:
2156 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2157 case INDEX_op_movi_i64:
2158 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2159 default:
2160 tcg_abort();
2164 static const TCGTargetOpDef mips_op_defs[] = {
2165 { INDEX_op_exit_tb, { } },
2166 { INDEX_op_goto_tb, { } },
2167 { INDEX_op_br, { } },
2168 { INDEX_op_goto_ptr, { "r" } },
2170 { INDEX_op_ld8u_i32, { "r", "r" } },
2171 { INDEX_op_ld8s_i32, { "r", "r" } },
2172 { INDEX_op_ld16u_i32, { "r", "r" } },
2173 { INDEX_op_ld16s_i32, { "r", "r" } },
2174 { INDEX_op_ld_i32, { "r", "r" } },
2175 { INDEX_op_st8_i32, { "rZ", "r" } },
2176 { INDEX_op_st16_i32, { "rZ", "r" } },
2177 { INDEX_op_st_i32, { "rZ", "r" } },
2179 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
2180 { INDEX_op_mul_i32, { "r", "rZ", "rZ" } },
2181 #if !use_mips32r6_instructions
2182 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rZ" } },
2183 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rZ" } },
2184 #endif
2185 { INDEX_op_mulsh_i32, { "r", "rZ", "rZ" } },
2186 { INDEX_op_muluh_i32, { "r", "rZ", "rZ" } },
2187 { INDEX_op_div_i32, { "r", "rZ", "rZ" } },
2188 { INDEX_op_divu_i32, { "r", "rZ", "rZ" } },
2189 { INDEX_op_rem_i32, { "r", "rZ", "rZ" } },
2190 { INDEX_op_remu_i32, { "r", "rZ", "rZ" } },
2191 { INDEX_op_sub_i32, { "r", "rZ", "rN" } },
2193 { INDEX_op_and_i32, { "r", "rZ", "rIK" } },
2194 { INDEX_op_nor_i32, { "r", "rZ", "rZ" } },
2195 { INDEX_op_not_i32, { "r", "rZ" } },
2196 { INDEX_op_or_i32, { "r", "rZ", "rIZ" } },
2197 { INDEX_op_xor_i32, { "r", "rZ", "rIZ" } },
2199 { INDEX_op_shl_i32, { "r", "rZ", "ri" } },
2200 { INDEX_op_shr_i32, { "r", "rZ", "ri" } },
2201 { INDEX_op_sar_i32, { "r", "rZ", "ri" } },
2202 { INDEX_op_rotr_i32, { "r", "rZ", "ri" } },
2203 { INDEX_op_rotl_i32, { "r", "rZ", "ri" } },
2204 { INDEX_op_clz_i32, { "r", "r", "rWZ" } },
2206 { INDEX_op_bswap16_i32, { "r", "r" } },
2207 { INDEX_op_bswap32_i32, { "r", "r" } },
2209 { INDEX_op_ext8s_i32, { "r", "rZ" } },
2210 { INDEX_op_ext16s_i32, { "r", "rZ" } },
2212 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
2213 { INDEX_op_extract_i32, { "r", "r" } },
2215 { INDEX_op_brcond_i32, { "rZ", "rZ" } },
2216 #if use_mips32r6_instructions
2217 { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
2218 #else
2219 { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "0" } },
2220 #endif
2221 { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
2223 #if TCG_TARGET_REG_BITS == 32
2224 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
2225 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
2226 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
2227 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rZ", "rZ" } },
2228 #endif
2230 #if TCG_TARGET_REG_BITS == 64
2231 { INDEX_op_ld8u_i64, { "r", "r" } },
2232 { INDEX_op_ld8s_i64, { "r", "r" } },
2233 { INDEX_op_ld16u_i64, { "r", "r" } },
2234 { INDEX_op_ld16s_i64, { "r", "r" } },
2235 { INDEX_op_ld32s_i64, { "r", "r" } },
2236 { INDEX_op_ld32u_i64, { "r", "r" } },
2237 { INDEX_op_ld_i64, { "r", "r" } },
2238 { INDEX_op_st8_i64, { "rZ", "r" } },
2239 { INDEX_op_st16_i64, { "rZ", "r" } },
2240 { INDEX_op_st32_i64, { "rZ", "r" } },
2241 { INDEX_op_st_i64, { "rZ", "r" } },
2243 { INDEX_op_add_i64, { "r", "rZ", "rJ" } },
2244 { INDEX_op_mul_i64, { "r", "rZ", "rZ" } },
2245 #if !use_mips32r6_instructions
2246 { INDEX_op_muls2_i64, { "r", "r", "rZ", "rZ" } },
2247 { INDEX_op_mulu2_i64, { "r", "r", "rZ", "rZ" } },
2248 #endif
2249 { INDEX_op_mulsh_i64, { "r", "rZ", "rZ" } },
2250 { INDEX_op_muluh_i64, { "r", "rZ", "rZ" } },
2251 { INDEX_op_div_i64, { "r", "rZ", "rZ" } },
2252 { INDEX_op_divu_i64, { "r", "rZ", "rZ" } },
2253 { INDEX_op_rem_i64, { "r", "rZ", "rZ" } },
2254 { INDEX_op_remu_i64, { "r", "rZ", "rZ" } },
2255 { INDEX_op_sub_i64, { "r", "rZ", "rN" } },
2257 { INDEX_op_and_i64, { "r", "rZ", "rIK" } },
2258 { INDEX_op_nor_i64, { "r", "rZ", "rZ" } },
2259 { INDEX_op_not_i64, { "r", "rZ" } },
2260 { INDEX_op_or_i64, { "r", "rZ", "rI" } },
2261 { INDEX_op_xor_i64, { "r", "rZ", "rI" } },
2263 { INDEX_op_shl_i64, { "r", "rZ", "ri" } },
2264 { INDEX_op_shr_i64, { "r", "rZ", "ri" } },
2265 { INDEX_op_sar_i64, { "r", "rZ", "ri" } },
2266 { INDEX_op_rotr_i64, { "r", "rZ", "ri" } },
2267 { INDEX_op_rotl_i64, { "r", "rZ", "ri" } },
2268 { INDEX_op_clz_i64, { "r", "r", "rWZ" } },
2270 { INDEX_op_bswap16_i64, { "r", "r" } },
2271 { INDEX_op_bswap32_i64, { "r", "r" } },
2272 { INDEX_op_bswap64_i64, { "r", "r" } },
2274 { INDEX_op_ext8s_i64, { "r", "rZ" } },
2275 { INDEX_op_ext16s_i64, { "r", "rZ" } },
2276 { INDEX_op_ext32s_i64, { "r", "rZ" } },
2277 { INDEX_op_ext32u_i64, { "r", "rZ" } },
2278 { INDEX_op_ext_i32_i64, { "r", "rZ" } },
2279 { INDEX_op_extu_i32_i64, { "r", "rZ" } },
2280 { INDEX_op_extrl_i64_i32, { "r", "rZ" } },
2281 { INDEX_op_extrh_i64_i32, { "r", "rZ" } },
2283 { INDEX_op_deposit_i64, { "r", "0", "rZ" } },
2284 { INDEX_op_extract_i64, { "r", "r" } },
2286 { INDEX_op_brcond_i64, { "rZ", "rZ" } },
2287 #if use_mips32r6_instructions
2288 { INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rZ", "rZ" } },
2289 #else
2290 { INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rZ", "0" } },
2291 #endif
2292 { INDEX_op_setcond_i64, { "r", "rZ", "rZ" } },
2294 { INDEX_op_qemu_ld_i32, { "r", "LZ" } },
2295 { INDEX_op_qemu_st_i32, { "SZ", "SZ" } },
2296 { INDEX_op_qemu_ld_i64, { "r", "LZ" } },
2297 { INDEX_op_qemu_st_i64, { "SZ", "SZ" } },
2298 #elif TARGET_LONG_BITS == 32
2299 { INDEX_op_qemu_ld_i32, { "r", "LZ" } },
2300 { INDEX_op_qemu_st_i32, { "SZ", "SZ" } },
2301 { INDEX_op_qemu_ld_i64, { "r", "r", "LZ" } },
2302 { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ" } },
2303 #else
2304 { INDEX_op_qemu_ld_i32, { "r", "LZ", "LZ" } },
2305 { INDEX_op_qemu_st_i32, { "SZ", "SZ", "SZ" } },
2306 { INDEX_op_qemu_ld_i64, { "r", "r", "LZ", "LZ" } },
2307 { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ", "SZ" } },
2308 #endif
2310 { INDEX_op_mb, { } },
2311 { -1 },
2314 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
2316 int i, n = ARRAY_SIZE(mips_op_defs);
2318 for (i = 0; i < n; ++i) {
2319 if (mips_op_defs[i].op == op) {
2320 return &mips_op_defs[i];
2323 return NULL;
2326 static int tcg_target_callee_save_regs[] = {
2327 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
2328 TCG_REG_S1,
2329 TCG_REG_S2,
2330 TCG_REG_S3,
2331 TCG_REG_S4,
2332 TCG_REG_S5,
2333 TCG_REG_S6,
2334 TCG_REG_S7,
2335 TCG_REG_S8,
2336 TCG_REG_RA, /* should be last for ABI compliance */
2339 /* The Linux kernel doesn't provide any information about the available
2340 instruction set. Probe it using a signal handler. */
2343 #ifndef use_movnz_instructions
2344 bool use_movnz_instructions = false;
2345 #endif
2347 #ifndef use_mips32_instructions
2348 bool use_mips32_instructions = false;
2349 #endif
2351 #ifndef use_mips32r2_instructions
2352 bool use_mips32r2_instructions = false;
2353 #endif
2355 static volatile sig_atomic_t got_sigill;
2357 static void sigill_handler(int signo, siginfo_t *si, void *data)
2359 /* Skip the faulty instruction */
2360 ucontext_t *uc = (ucontext_t *)data;
2361 uc->uc_mcontext.pc += 4;
2363 got_sigill = 1;
2366 static void tcg_target_detect_isa(void)
2368 struct sigaction sa_old, sa_new;
2370 memset(&sa_new, 0, sizeof(sa_new));
2371 sa_new.sa_flags = SA_SIGINFO;
2372 sa_new.sa_sigaction = sigill_handler;
2373 sigaction(SIGILL, &sa_new, &sa_old);
2375 /* Probe for movn/movz, necessary to implement movcond. */
2376 #ifndef use_movnz_instructions
2377 got_sigill = 0;
2378 asm volatile(".set push\n"
2379 ".set mips32\n"
2380 "movn $zero, $zero, $zero\n"
2381 "movz $zero, $zero, $zero\n"
2382 ".set pop\n"
2383 : : : );
2384 use_movnz_instructions = !got_sigill;
2385 #endif
2387 /* Probe for MIPS32 instructions. As no subsetting is allowed
2388 by the specification, it is only necessary to probe for one
2389 of the instructions. */
2390 #ifndef use_mips32_instructions
2391 got_sigill = 0;
2392 asm volatile(".set push\n"
2393 ".set mips32\n"
2394 "mul $zero, $zero\n"
2395 ".set pop\n"
2396 : : : );
2397 use_mips32_instructions = !got_sigill;
2398 #endif
2400 /* Probe for MIPS32r2 instructions if MIPS32 instructions are
2401 available. As no subsetting is allowed by the specification,
2402 it is only necessary to probe for one of the instructions. */
2403 #ifndef use_mips32r2_instructions
2404 if (use_mips32_instructions) {
2405 got_sigill = 0;
2406 asm volatile(".set push\n"
2407 ".set mips32r2\n"
2408 "seb $zero, $zero\n"
2409 ".set pop\n"
2410 : : : );
2411 use_mips32r2_instructions = !got_sigill;
2413 #endif
2415 sigaction(SIGILL, &sa_old, NULL);
2418 static tcg_insn_unit *align_code_ptr(TCGContext *s)
2420 uintptr_t p = (uintptr_t)s->code_ptr;
2421 if (p & 15) {
2422 p = (p + 15) & -16;
2423 s->code_ptr = (void *)p;
2425 return s->code_ptr;
2428 /* Stack frame parameters. */
2429 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
2430 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2431 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2433 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2434 + TCG_TARGET_STACK_ALIGN - 1) \
2435 & -TCG_TARGET_STACK_ALIGN)
2436 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2438 /* We're expecting to be able to use an immediate for frame allocation. */
2439 QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7fff);
2441 /* Generate global QEMU prologue and epilogue code */
2442 static void tcg_target_qemu_prologue(TCGContext *s)
2444 int i;
2446 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2448 /* TB prologue */
2449 tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2450 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2451 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2452 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2455 /* Call generated code */
2456 tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0);
2457 /* delay slot */
2458 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2461 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2462 * and fall through to the rest of the epilogue.
2464 s->code_gen_epilogue = s->code_ptr;
2465 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_V0, TCG_REG_ZERO);
2467 /* TB epilogue */
2468 tb_ret_addr = s->code_ptr;
2469 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2470 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2471 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2474 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
2475 /* delay slot */
2476 tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2478 if (use_mips32r2_instructions) {
2479 return;
2482 /* Bswap subroutines: Input in TCG_TMP0, output in TCG_TMP3;
2483 clobbers TCG_TMP1, TCG_TMP2. */
2486 * bswap32 -- 32-bit swap (signed result for mips64). a0 = abcd.
2488 bswap32_addr = align_code_ptr(s);
2489 /* t3 = (ssss)d000 */
2490 tcg_out_opc_sa(s, OPC_SLL, TCG_TMP3, TCG_TMP0, 24);
2491 /* t1 = 000a */
2492 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 24);
2493 /* t2 = 00c0 */
2494 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00);
2495 /* t3 = d00a */
2496 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2497 /* t1 = 0abc */
2498 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 8);
2499 /* t2 = 0c00 */
2500 tcg_out_opc_sa(s, OPC_SLL, TCG_TMP2, TCG_TMP2, 8);
2501 /* t1 = 00b0 */
2502 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00);
2503 /* t3 = dc0a */
2504 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
2505 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
2506 /* t3 = dcba -- delay slot */
2507 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2509 if (TCG_TARGET_REG_BITS == 32) {
2510 return;
2514 * bswap32u -- unsigned 32-bit swap. a0 = ....abcd.
2516 bswap32u_addr = align_code_ptr(s);
2517 /* t1 = (0000)000d */
2518 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP0, 0xff);
2519 /* t3 = 000a */
2520 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, TCG_TMP0, 24);
2521 /* t1 = (0000)d000 */
2522 tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 24);
2523 /* t2 = 00c0 */
2524 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00);
2525 /* t3 = d00a */
2526 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2527 /* t1 = 0abc */
2528 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 8);
2529 /* t2 = 0c00 */
2530 tcg_out_opc_sa(s, OPC_SLL, TCG_TMP2, TCG_TMP2, 8);
2531 /* t1 = 00b0 */
2532 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00);
2533 /* t3 = dc0a */
2534 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
2535 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
2536 /* t3 = dcba -- delay slot */
2537 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2540 * bswap64 -- 64-bit swap. a0 = abcdefgh
2542 bswap64_addr = align_code_ptr(s);
2543 /* t3 = h0000000 */
2544 tcg_out_dsll(s, TCG_TMP3, TCG_TMP0, 56);
2545 /* t1 = 0000000a */
2546 tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 56);
2548 /* t2 = 000000g0 */
2549 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00);
2550 /* t3 = h000000a */
2551 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2552 /* t1 = 00000abc */
2553 tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 40);
2554 /* t2 = 0g000000 */
2555 tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 40);
2556 /* t1 = 000000b0 */
2557 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00);
2559 /* t3 = hg00000a */
2560 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
2561 /* t2 = 0000abcd */
2562 tcg_out_dsrl(s, TCG_TMP2, TCG_TMP0, 32);
2563 /* t3 = hg0000ba */
2564 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2566 /* t1 = 000000c0 */
2567 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP2, 0xff00);
2568 /* t2 = 0000000d */
2569 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP2, 0x00ff);
2570 /* t1 = 00000c00 */
2571 tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 8);
2572 /* t2 = 0000d000 */
2573 tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 24);
2575 /* t3 = hg000cba */
2576 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2577 /* t1 = 00abcdef */
2578 tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 16);
2579 /* t3 = hg00dcba */
2580 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
2582 /* t2 = 0000000f */
2583 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP1, 0x00ff);
2584 /* t1 = 000000e0 */
2585 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00);
2586 /* t2 = 00f00000 */
2587 tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 40);
2588 /* t1 = 000e0000 */
2589 tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 24);
2591 /* t3 = hgf0dcba */
2592 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
2593 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
2594 /* t3 = hgfedcba -- delay slot */
2595 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2598 static void tcg_target_init(TCGContext *s)
2600 tcg_target_detect_isa();
2601 tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32], 0xffffffff);
2602 if (TCG_TARGET_REG_BITS == 64) {
2603 tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I64], 0xffffffff);
2605 tcg_regset_set(tcg_target_call_clobber_regs,
2606 (1 << TCG_REG_V0) |
2607 (1 << TCG_REG_V1) |
2608 (1 << TCG_REG_A0) |
2609 (1 << TCG_REG_A1) |
2610 (1 << TCG_REG_A2) |
2611 (1 << TCG_REG_A3) |
2612 (1 << TCG_REG_T0) |
2613 (1 << TCG_REG_T1) |
2614 (1 << TCG_REG_T2) |
2615 (1 << TCG_REG_T3) |
2616 (1 << TCG_REG_T4) |
2617 (1 << TCG_REG_T5) |
2618 (1 << TCG_REG_T6) |
2619 (1 << TCG_REG_T7) |
2620 (1 << TCG_REG_T8) |
2621 (1 << TCG_REG_T9));
2623 tcg_regset_clear(s->reserved_regs);
2624 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */
2625 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */
2626 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */
2627 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* internal use */
2628 tcg_regset_set_reg(s->reserved_regs, TCG_TMP1); /* internal use */
2629 tcg_regset_set_reg(s->reserved_regs, TCG_TMP2); /* internal use */
2630 tcg_regset_set_reg(s->reserved_regs, TCG_TMP3); /* internal use */
2631 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */
2632 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
2633 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */
2636 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
2638 atomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2));
2639 flush_icache_range(jmp_addr, jmp_addr + 4);
2642 typedef struct {
2643 DebugFrameHeader h;
2644 uint8_t fde_def_cfa[4];
2645 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
2646 } DebugFrame;
2648 #define ELF_HOST_MACHINE EM_MIPS
2649 /* GDB doesn't appear to require proper setting of ELF_HOST_FLAGS,
2650 which is good because they're really quite complicated for MIPS. */
2652 static const DebugFrame debug_frame = {
2653 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
2654 .h.cie.id = -1,
2655 .h.cie.version = 1,
2656 .h.cie.code_align = 1,
2657 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
2658 .h.cie.return_column = TCG_REG_RA,
2660 /* Total FDE size does not include the "len" member. */
2661 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2663 .fde_def_cfa = {
2664 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
2665 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2666 (FRAME_SIZE >> 7)
2668 .fde_reg_ofs = {
2669 0x80 + 16, 9, /* DW_CFA_offset, s0, -72 */
2670 0x80 + 17, 8, /* DW_CFA_offset, s2, -64 */
2671 0x80 + 18, 7, /* DW_CFA_offset, s3, -56 */
2672 0x80 + 19, 6, /* DW_CFA_offset, s4, -48 */
2673 0x80 + 20, 5, /* DW_CFA_offset, s5, -40 */
2674 0x80 + 21, 4, /* DW_CFA_offset, s6, -32 */
2675 0x80 + 22, 3, /* DW_CFA_offset, s7, -24 */
2676 0x80 + 30, 2, /* DW_CFA_offset, s8, -16 */
2677 0x80 + 31, 1, /* DW_CFA_offset, ra, -8 */
2681 void tcg_register_jit(void *buf, size_t buf_size)
2683 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));