tcg-mips: Move softmmu slow path out of line
[qemu/ar7.git] / tcg / mips / tcg-target.c
blobe7dbb3b79fd709b3f2a0ff6573ff9e446aa57722
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
5 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
6 * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
27 #include "tcg-be-ldst.h"
29 #ifdef HOST_WORDS_BIGENDIAN
30 # define MIPS_BE 1
31 #else
32 # define MIPS_BE 0
33 #endif
35 #define LO_OFF (MIPS_BE * 4)
36 #define HI_OFF (4 - LO_OFF)
38 #ifndef NDEBUG
39 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
40 "zero",
41 "at",
42 "v0",
43 "v1",
44 "a0",
45 "a1",
46 "a2",
47 "a3",
48 "t0",
49 "t1",
50 "t2",
51 "t3",
52 "t4",
53 "t5",
54 "t6",
55 "t7",
56 "s0",
57 "s1",
58 "s2",
59 "s3",
60 "s4",
61 "s5",
62 "s6",
63 "s7",
64 "t8",
65 "t9",
66 "k0",
67 "k1",
68 "gp",
69 "sp",
70 "fp",
71 "ra",
73 #endif
75 /* check if we really need so many registers :P */
76 static const TCGReg tcg_target_reg_alloc_order[] = {
77 TCG_REG_S0,
78 TCG_REG_S1,
79 TCG_REG_S2,
80 TCG_REG_S3,
81 TCG_REG_S4,
82 TCG_REG_S5,
83 TCG_REG_S6,
84 TCG_REG_S7,
85 TCG_REG_T1,
86 TCG_REG_T2,
87 TCG_REG_T3,
88 TCG_REG_T4,
89 TCG_REG_T5,
90 TCG_REG_T6,
91 TCG_REG_T7,
92 TCG_REG_T8,
93 TCG_REG_T9,
94 TCG_REG_A0,
95 TCG_REG_A1,
96 TCG_REG_A2,
97 TCG_REG_A3,
98 TCG_REG_V0,
99 TCG_REG_V1
102 static const TCGReg tcg_target_call_iarg_regs[4] = {
103 TCG_REG_A0,
104 TCG_REG_A1,
105 TCG_REG_A2,
106 TCG_REG_A3
109 static const TCGReg tcg_target_call_oarg_regs[2] = {
110 TCG_REG_V0,
111 TCG_REG_V1
114 static tcg_insn_unit *tb_ret_addr;
116 static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target)
118 /* Let the compiler perform the right-shift as part of the arithmetic. */
119 ptrdiff_t disp = target - (pc + 1);
120 assert(disp == (int16_t)disp);
121 return disp & 0xffff;
124 static inline void reloc_pc16(tcg_insn_unit *pc, tcg_insn_unit *target)
126 *pc = deposit32(*pc, 0, 16, reloc_pc16_val(pc, target));
129 static inline uint32_t reloc_26_val(tcg_insn_unit *pc, tcg_insn_unit *target)
131 assert((((uintptr_t)pc ^ (uintptr_t)target) & 0xf0000000) == 0);
132 return ((uintptr_t)target >> 2) & 0x3ffffff;
135 static inline void reloc_26(tcg_insn_unit *pc, tcg_insn_unit *target)
137 *pc = deposit32(*pc, 0, 26, reloc_26_val(pc, target));
140 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
141 intptr_t value, intptr_t addend)
143 assert(type == R_MIPS_PC16);
144 assert(addend == 0);
145 reloc_pc16(code_ptr, (tcg_insn_unit *)value);
148 /* parse target specific constraints */
149 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
151 const char *ct_str;
153 ct_str = *pct_str;
154 switch(ct_str[0]) {
155 case 'r':
156 ct->ct |= TCG_CT_REG;
157 tcg_regset_set(ct->u.regs, 0xffffffff);
158 break;
159 case 'L': /* qemu_ld output arg constraint */
160 ct->ct |= TCG_CT_REG;
161 tcg_regset_set(ct->u.regs, 0xffffffff);
162 tcg_regset_reset_reg(ct->u.regs, TCG_REG_V0);
163 break;
164 case 'l': /* qemu_ld input arg constraint */
165 ct->ct |= TCG_CT_REG;
166 tcg_regset_set(ct->u.regs, 0xffffffff);
167 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
168 #if defined(CONFIG_SOFTMMU)
169 if (TARGET_LONG_BITS == 64) {
170 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
172 #endif
173 break;
174 case 'S': /* qemu_st constraint */
175 ct->ct |= TCG_CT_REG;
176 tcg_regset_set(ct->u.regs, 0xffffffff);
177 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
178 #if defined(CONFIG_SOFTMMU)
179 if (TARGET_LONG_BITS == 32) {
180 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1);
181 } else {
182 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
183 tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3);
185 #endif
186 break;
187 case 'I':
188 ct->ct |= TCG_CT_CONST_U16;
189 break;
190 case 'J':
191 ct->ct |= TCG_CT_CONST_S16;
192 break;
193 case 'Z':
194 /* We are cheating a bit here, using the fact that the register
195 ZERO is also the register number 0. Hence there is no need
196 to check for const_args in each instruction. */
197 ct->ct |= TCG_CT_CONST_ZERO;
198 break;
199 default:
200 return -1;
202 ct_str++;
203 *pct_str = ct_str;
204 return 0;
207 /* test if a constant matches the constraint */
208 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
209 const TCGArgConstraint *arg_ct)
211 int ct;
212 ct = arg_ct->ct;
213 if (ct & TCG_CT_CONST)
214 return 1;
215 else if ((ct & TCG_CT_CONST_ZERO) && val == 0)
216 return 1;
217 else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val)
218 return 1;
219 else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val)
220 return 1;
221 else
222 return 0;
225 /* instruction opcodes */
226 enum {
227 OPC_J = 0x02 << 26,
228 OPC_JAL = 0x03 << 26,
229 OPC_BEQ = 0x04 << 26,
230 OPC_BNE = 0x05 << 26,
231 OPC_BLEZ = 0x06 << 26,
232 OPC_BGTZ = 0x07 << 26,
233 OPC_ADDIU = 0x09 << 26,
234 OPC_SLTI = 0x0A << 26,
235 OPC_SLTIU = 0x0B << 26,
236 OPC_ANDI = 0x0C << 26,
237 OPC_ORI = 0x0D << 26,
238 OPC_XORI = 0x0E << 26,
239 OPC_LUI = 0x0F << 26,
240 OPC_LB = 0x20 << 26,
241 OPC_LH = 0x21 << 26,
242 OPC_LW = 0x23 << 26,
243 OPC_LBU = 0x24 << 26,
244 OPC_LHU = 0x25 << 26,
245 OPC_LWU = 0x27 << 26,
246 OPC_SB = 0x28 << 26,
247 OPC_SH = 0x29 << 26,
248 OPC_SW = 0x2B << 26,
250 OPC_SPECIAL = 0x00 << 26,
251 OPC_SLL = OPC_SPECIAL | 0x00,
252 OPC_SRL = OPC_SPECIAL | 0x02,
253 OPC_ROTR = OPC_SPECIAL | (0x01 << 21) | 0x02,
254 OPC_SRA = OPC_SPECIAL | 0x03,
255 OPC_SLLV = OPC_SPECIAL | 0x04,
256 OPC_SRLV = OPC_SPECIAL | 0x06,
257 OPC_ROTRV = OPC_SPECIAL | (0x01 << 6) | 0x06,
258 OPC_SRAV = OPC_SPECIAL | 0x07,
259 OPC_JR = OPC_SPECIAL | 0x08,
260 OPC_JALR = OPC_SPECIAL | 0x09,
261 OPC_MOVZ = OPC_SPECIAL | 0x0A,
262 OPC_MOVN = OPC_SPECIAL | 0x0B,
263 OPC_MFHI = OPC_SPECIAL | 0x10,
264 OPC_MFLO = OPC_SPECIAL | 0x12,
265 OPC_MULT = OPC_SPECIAL | 0x18,
266 OPC_MULTU = OPC_SPECIAL | 0x19,
267 OPC_DIV = OPC_SPECIAL | 0x1A,
268 OPC_DIVU = OPC_SPECIAL | 0x1B,
269 OPC_ADDU = OPC_SPECIAL | 0x21,
270 OPC_SUBU = OPC_SPECIAL | 0x23,
271 OPC_AND = OPC_SPECIAL | 0x24,
272 OPC_OR = OPC_SPECIAL | 0x25,
273 OPC_XOR = OPC_SPECIAL | 0x26,
274 OPC_NOR = OPC_SPECIAL | 0x27,
275 OPC_SLT = OPC_SPECIAL | 0x2A,
276 OPC_SLTU = OPC_SPECIAL | 0x2B,
278 OPC_REGIMM = 0x01 << 26,
279 OPC_BLTZ = OPC_REGIMM | (0x00 << 16),
280 OPC_BGEZ = OPC_REGIMM | (0x01 << 16),
282 OPC_SPECIAL2 = 0x1c << 26,
283 OPC_MUL = OPC_SPECIAL2 | 0x002,
285 OPC_SPECIAL3 = 0x1f << 26,
286 OPC_INS = OPC_SPECIAL3 | 0x004,
287 OPC_WSBH = OPC_SPECIAL3 | 0x0a0,
288 OPC_SEB = OPC_SPECIAL3 | 0x420,
289 OPC_SEH = OPC_SPECIAL3 | 0x620,
293 * Type reg
295 static inline void tcg_out_opc_reg(TCGContext *s, int opc,
296 TCGReg rd, TCGReg rs, TCGReg rt)
298 int32_t inst;
300 inst = opc;
301 inst |= (rs & 0x1F) << 21;
302 inst |= (rt & 0x1F) << 16;
303 inst |= (rd & 0x1F) << 11;
304 tcg_out32(s, inst);
308 * Type immediate
310 static inline void tcg_out_opc_imm(TCGContext *s, int opc,
311 TCGReg rt, TCGReg rs, TCGArg imm)
313 int32_t inst;
315 inst = opc;
316 inst |= (rs & 0x1F) << 21;
317 inst |= (rt & 0x1F) << 16;
318 inst |= (imm & 0xffff);
319 tcg_out32(s, inst);
323 * Type branch
325 static inline void tcg_out_opc_br(TCGContext *s, int opc,
326 TCGReg rt, TCGReg rs)
328 /* We pay attention here to not modify the branch target by reading
329 the existing value and using it again. This ensure that caches and
330 memory are kept coherent during retranslation. */
331 uint16_t offset = (uint16_t)*s->code_ptr;
333 tcg_out_opc_imm(s, opc, rt, rs, offset);
337 * Type sa
339 static inline void tcg_out_opc_sa(TCGContext *s, int opc,
340 TCGReg rd, TCGReg rt, TCGArg sa)
342 int32_t inst;
344 inst = opc;
345 inst |= (rt & 0x1F) << 16;
346 inst |= (rd & 0x1F) << 11;
347 inst |= (sa & 0x1F) << 6;
348 tcg_out32(s, inst);
353 * Type jump.
354 * Returns true if the branch was in range and the insn was emitted.
356 static bool tcg_out_opc_jmp(TCGContext *s, int opc, void *target)
358 uintptr_t dest = (uintptr_t)target;
359 uintptr_t from = (uintptr_t)s->code_ptr + 4;
360 int32_t inst;
362 /* The pc-region branch happens within the 256MB region of
363 the delay slot (thus the +4). */
364 if ((from ^ dest) & -(1 << 28)) {
365 return false;
367 assert((dest & 3) == 0);
369 inst = opc;
370 inst |= (dest >> 2) & 0x3ffffff;
371 tcg_out32(s, inst);
372 return true;
375 static inline void tcg_out_nop(TCGContext *s)
377 tcg_out32(s, 0);
380 static inline void tcg_out_mov(TCGContext *s, TCGType type,
381 TCGReg ret, TCGReg arg)
383 /* Simple reg-reg move, optimising out the 'do nothing' case */
384 if (ret != arg) {
385 tcg_out_opc_reg(s, OPC_ADDU, ret, arg, TCG_REG_ZERO);
389 static inline void tcg_out_movi(TCGContext *s, TCGType type,
390 TCGReg reg, tcg_target_long arg)
392 if (arg == (int16_t)arg) {
393 tcg_out_opc_imm(s, OPC_ADDIU, reg, TCG_REG_ZERO, arg);
394 } else if (arg == (uint16_t)arg) {
395 tcg_out_opc_imm(s, OPC_ORI, reg, TCG_REG_ZERO, arg);
396 } else {
397 tcg_out_opc_imm(s, OPC_LUI, reg, TCG_REG_ZERO, arg >> 16);
398 if (arg & 0xffff) {
399 tcg_out_opc_imm(s, OPC_ORI, reg, reg, arg & 0xffff);
404 static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg)
406 if (use_mips32r2_instructions) {
407 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
408 } else {
409 /* ret and arg can't be register at */
410 if (ret == TCG_REG_AT || arg == TCG_REG_AT) {
411 tcg_abort();
414 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 8);
415 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8);
416 tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00);
417 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
421 static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg)
423 if (use_mips32r2_instructions) {
424 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
425 tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret);
426 } else {
427 /* ret and arg can't be register at */
428 if (ret == TCG_REG_AT || arg == TCG_REG_AT) {
429 tcg_abort();
432 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 8);
433 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
434 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
435 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
439 static inline void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg)
441 if (use_mips32r2_instructions) {
442 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
443 tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
444 } else {
445 /* ret and arg must be different and can't be register at */
446 if (ret == arg || ret == TCG_REG_AT || arg == TCG_REG_AT) {
447 tcg_abort();
450 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
452 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 24);
453 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
455 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_AT, arg, 0xff00);
456 tcg_out_opc_sa(s, OPC_SLL, TCG_REG_AT, TCG_REG_AT, 8);
457 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
459 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_AT, arg, 8);
460 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_AT, TCG_REG_AT, 0xff00);
461 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
465 static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
467 if (use_mips32r2_instructions) {
468 tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg);
469 } else {
470 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
471 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24);
475 static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
477 if (use_mips32r2_instructions) {
478 tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg);
479 } else {
480 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16);
481 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16);
485 static void tcg_out_ldst(TCGContext *s, int opc, TCGReg data,
486 TCGReg addr, intptr_t ofs)
488 int16_t lo = ofs;
489 if (ofs != lo) {
490 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_AT, ofs - lo);
491 if (addr != TCG_REG_ZERO) {
492 tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_AT, TCG_REG_AT, addr);
494 addr = TCG_REG_AT;
496 tcg_out_opc_imm(s, opc, data, addr, lo);
499 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
500 TCGReg arg1, intptr_t arg2)
502 tcg_out_ldst(s, OPC_LW, arg, arg1, arg2);
505 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
506 TCGReg arg1, intptr_t arg2)
508 tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
511 static inline void tcg_out_addi(TCGContext *s, TCGReg reg, TCGArg val)
513 if (val == (int16_t)val) {
514 tcg_out_opc_imm(s, OPC_ADDIU, reg, reg, val);
515 } else {
516 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_AT, val);
517 tcg_out_opc_reg(s, OPC_ADDU, reg, reg, TCG_REG_AT);
521 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGArg arg1,
522 TCGArg arg2, int label_index)
524 TCGLabel *l = &s->labels[label_index];
526 switch (cond) {
527 case TCG_COND_EQ:
528 tcg_out_opc_br(s, OPC_BEQ, arg1, arg2);
529 break;
530 case TCG_COND_NE:
531 tcg_out_opc_br(s, OPC_BNE, arg1, arg2);
532 break;
533 case TCG_COND_LT:
534 if (arg2 == 0) {
535 tcg_out_opc_br(s, OPC_BLTZ, 0, arg1);
536 } else {
537 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, arg1, arg2);
538 tcg_out_opc_br(s, OPC_BNE, TCG_REG_AT, TCG_REG_ZERO);
540 break;
541 case TCG_COND_LTU:
542 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, arg1, arg2);
543 tcg_out_opc_br(s, OPC_BNE, TCG_REG_AT, TCG_REG_ZERO);
544 break;
545 case TCG_COND_GE:
546 if (arg2 == 0) {
547 tcg_out_opc_br(s, OPC_BGEZ, 0, arg1);
548 } else {
549 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, arg1, arg2);
550 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_AT, TCG_REG_ZERO);
552 break;
553 case TCG_COND_GEU:
554 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, arg1, arg2);
555 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_AT, TCG_REG_ZERO);
556 break;
557 case TCG_COND_LE:
558 if (arg2 == 0) {
559 tcg_out_opc_br(s, OPC_BLEZ, 0, arg1);
560 } else {
561 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, arg2, arg1);
562 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_AT, TCG_REG_ZERO);
564 break;
565 case TCG_COND_LEU:
566 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, arg2, arg1);
567 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_AT, TCG_REG_ZERO);
568 break;
569 case TCG_COND_GT:
570 if (arg2 == 0) {
571 tcg_out_opc_br(s, OPC_BGTZ, 0, arg1);
572 } else {
573 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, arg2, arg1);
574 tcg_out_opc_br(s, OPC_BNE, TCG_REG_AT, TCG_REG_ZERO);
576 break;
577 case TCG_COND_GTU:
578 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, arg2, arg1);
579 tcg_out_opc_br(s, OPC_BNE, TCG_REG_AT, TCG_REG_ZERO);
580 break;
581 default:
582 tcg_abort();
583 break;
585 if (l->has_value) {
586 reloc_pc16(s->code_ptr - 1, l->u.value_ptr);
587 } else {
588 tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, label_index, 0);
590 tcg_out_nop(s);
593 /* XXX: we implement it at the target level to avoid having to
594 handle cross basic blocks temporaries */
595 static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGArg arg1,
596 TCGArg arg2, TCGArg arg3, TCGArg arg4,
597 int label_index)
599 tcg_insn_unit *label_ptr;
601 switch(cond) {
602 case TCG_COND_NE:
603 tcg_out_brcond(s, TCG_COND_NE, arg2, arg4, label_index);
604 tcg_out_brcond(s, TCG_COND_NE, arg1, arg3, label_index);
605 return;
606 case TCG_COND_EQ:
607 break;
608 case TCG_COND_LT:
609 case TCG_COND_LE:
610 tcg_out_brcond(s, TCG_COND_LT, arg2, arg4, label_index);
611 break;
612 case TCG_COND_GT:
613 case TCG_COND_GE:
614 tcg_out_brcond(s, TCG_COND_GT, arg2, arg4, label_index);
615 break;
616 case TCG_COND_LTU:
617 case TCG_COND_LEU:
618 tcg_out_brcond(s, TCG_COND_LTU, arg2, arg4, label_index);
619 break;
620 case TCG_COND_GTU:
621 case TCG_COND_GEU:
622 tcg_out_brcond(s, TCG_COND_GTU, arg2, arg4, label_index);
623 break;
624 default:
625 tcg_abort();
628 label_ptr = s->code_ptr;
629 tcg_out_opc_br(s, OPC_BNE, arg2, arg4);
630 tcg_out_nop(s);
632 switch(cond) {
633 case TCG_COND_EQ:
634 tcg_out_brcond(s, TCG_COND_EQ, arg1, arg3, label_index);
635 break;
636 case TCG_COND_LT:
637 case TCG_COND_LTU:
638 tcg_out_brcond(s, TCG_COND_LTU, arg1, arg3, label_index);
639 break;
640 case TCG_COND_LE:
641 case TCG_COND_LEU:
642 tcg_out_brcond(s, TCG_COND_LEU, arg1, arg3, label_index);
643 break;
644 case TCG_COND_GT:
645 case TCG_COND_GTU:
646 tcg_out_brcond(s, TCG_COND_GTU, arg1, arg3, label_index);
647 break;
648 case TCG_COND_GE:
649 case TCG_COND_GEU:
650 tcg_out_brcond(s, TCG_COND_GEU, arg1, arg3, label_index);
651 break;
652 default:
653 tcg_abort();
656 reloc_pc16(label_ptr, s->code_ptr);
659 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
660 TCGArg c1, TCGArg c2, TCGArg v)
662 switch (cond) {
663 case TCG_COND_EQ:
664 if (c1 == 0) {
665 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, c2);
666 } else if (c2 == 0) {
667 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, c1);
668 } else {
669 tcg_out_opc_reg(s, OPC_XOR, TCG_REG_AT, c1, c2);
670 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, TCG_REG_AT);
672 break;
673 case TCG_COND_NE:
674 if (c1 == 0) {
675 tcg_out_opc_reg(s, OPC_MOVN, ret, v, c2);
676 } else if (c2 == 0) {
677 tcg_out_opc_reg(s, OPC_MOVN, ret, v, c1);
678 } else {
679 tcg_out_opc_reg(s, OPC_XOR, TCG_REG_AT, c1, c2);
680 tcg_out_opc_reg(s, OPC_MOVN, ret, v, TCG_REG_AT);
682 break;
683 case TCG_COND_LT:
684 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, c1, c2);
685 tcg_out_opc_reg(s, OPC_MOVN, ret, v, TCG_REG_AT);
686 break;
687 case TCG_COND_LTU:
688 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, c1, c2);
689 tcg_out_opc_reg(s, OPC_MOVN, ret, v, TCG_REG_AT);
690 break;
691 case TCG_COND_GE:
692 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, c1, c2);
693 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, TCG_REG_AT);
694 break;
695 case TCG_COND_GEU:
696 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, c1, c2);
697 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, TCG_REG_AT);
698 break;
699 case TCG_COND_LE:
700 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, c2, c1);
701 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, TCG_REG_AT);
702 break;
703 case TCG_COND_LEU:
704 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, c2, c1);
705 tcg_out_opc_reg(s, OPC_MOVZ, ret, v, TCG_REG_AT);
706 break;
707 case TCG_COND_GT:
708 tcg_out_opc_reg(s, OPC_SLT, TCG_REG_AT, c2, c1);
709 tcg_out_opc_reg(s, OPC_MOVN, ret, v, TCG_REG_AT);
710 break;
711 case TCG_COND_GTU:
712 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_AT, c2, c1);
713 tcg_out_opc_reg(s, OPC_MOVN, ret, v, TCG_REG_AT);
714 break;
715 default:
716 tcg_abort();
717 break;
721 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
722 TCGArg arg1, TCGArg arg2)
724 switch (cond) {
725 case TCG_COND_EQ:
726 if (arg1 == 0) {
727 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg2, 1);
728 } else if (arg2 == 0) {
729 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, 1);
730 } else {
731 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
732 tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1);
734 break;
735 case TCG_COND_NE:
736 if (arg1 == 0) {
737 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg2);
738 } else if (arg2 == 0) {
739 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg1);
740 } else {
741 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
742 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret);
744 break;
745 case TCG_COND_LT:
746 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
747 break;
748 case TCG_COND_LTU:
749 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
750 break;
751 case TCG_COND_GE:
752 tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
753 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
754 break;
755 case TCG_COND_GEU:
756 tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
757 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
758 break;
759 case TCG_COND_LE:
760 tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1);
761 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
762 break;
763 case TCG_COND_LEU:
764 tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1);
765 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
766 break;
767 case TCG_COND_GT:
768 tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1);
769 break;
770 case TCG_COND_GTU:
771 tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1);
772 break;
773 default:
774 tcg_abort();
775 break;
779 /* XXX: we implement it at the target level to avoid having to
780 handle cross basic blocks temporaries */
781 static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
782 TCGArg arg1, TCGArg arg2, TCGArg arg3, TCGArg arg4)
784 switch (cond) {
785 case TCG_COND_EQ:
786 tcg_out_setcond(s, TCG_COND_EQ, TCG_REG_AT, arg2, arg4);
787 tcg_out_setcond(s, TCG_COND_EQ, TCG_REG_T0, arg1, arg3);
788 tcg_out_opc_reg(s, OPC_AND, ret, TCG_REG_AT, TCG_REG_T0);
789 return;
790 case TCG_COND_NE:
791 tcg_out_setcond(s, TCG_COND_NE, TCG_REG_AT, arg2, arg4);
792 tcg_out_setcond(s, TCG_COND_NE, TCG_REG_T0, arg1, arg3);
793 tcg_out_opc_reg(s, OPC_OR, ret, TCG_REG_AT, TCG_REG_T0);
794 return;
795 case TCG_COND_LT:
796 case TCG_COND_LE:
797 tcg_out_setcond(s, TCG_COND_LT, TCG_REG_AT, arg2, arg4);
798 break;
799 case TCG_COND_GT:
800 case TCG_COND_GE:
801 tcg_out_setcond(s, TCG_COND_GT, TCG_REG_AT, arg2, arg4);
802 break;
803 case TCG_COND_LTU:
804 case TCG_COND_LEU:
805 tcg_out_setcond(s, TCG_COND_LTU, TCG_REG_AT, arg2, arg4);
806 break;
807 case TCG_COND_GTU:
808 case TCG_COND_GEU:
809 tcg_out_setcond(s, TCG_COND_GTU, TCG_REG_AT, arg2, arg4);
810 break;
811 default:
812 tcg_abort();
813 break;
816 tcg_out_setcond(s, TCG_COND_EQ, TCG_REG_T0, arg2, arg4);
818 switch(cond) {
819 case TCG_COND_LT:
820 case TCG_COND_LTU:
821 tcg_out_setcond(s, TCG_COND_LTU, ret, arg1, arg3);
822 break;
823 case TCG_COND_LE:
824 case TCG_COND_LEU:
825 tcg_out_setcond(s, TCG_COND_LEU, ret, arg1, arg3);
826 break;
827 case TCG_COND_GT:
828 case TCG_COND_GTU:
829 tcg_out_setcond(s, TCG_COND_GTU, ret, arg1, arg3);
830 break;
831 case TCG_COND_GE:
832 case TCG_COND_GEU:
833 tcg_out_setcond(s, TCG_COND_GEU, ret, arg1, arg3);
834 break;
835 default:
836 tcg_abort();
839 tcg_out_opc_reg(s, OPC_AND, ret, ret, TCG_REG_T0);
840 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_REG_AT);
843 static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
845 /* Note that the ABI requires the called function's address to be
846 loaded into T9, even if a direct branch is in range. */
847 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg);
849 /* But do try a direct branch, allowing the cpu better insn prefetch. */
850 if (!tcg_out_opc_jmp(s, OPC_JAL, arg)) {
851 tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0);
854 tcg_out_nop(s);
857 #if defined(CONFIG_SOFTMMU)
858 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
859 int mmu_idx) */
860 static void * const qemu_ld_helpers[4] = {
861 helper_ldb_mmu,
862 helper_ldw_mmu,
863 helper_ldl_mmu,
864 helper_ldq_mmu,
867 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
868 uintxx_t val, int mmu_idx) */
869 static void * const qemu_st_helpers[4] = {
870 helper_stb_mmu,
871 helper_stw_mmu,
872 helper_stl_mmu,
873 helper_stq_mmu,
876 /* Helper routines for marshalling helper function arguments into
877 * the correct registers and stack.
878 * I is where we want to put this argument, and is updated and returned
879 * for the next call. ARG is the argument itself.
881 * We provide routines for arguments which are: immediate, 32 bit
882 * value in register, 16 and 8 bit values in register (which must be zero
883 * extended before use) and 64 bit value in a lo:hi register pair.
886 static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg)
888 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
889 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg);
890 } else {
891 tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i);
893 return i + 1;
896 static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg)
898 TCGReg tmp = TCG_REG_AT;
899 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
900 tmp = tcg_target_call_iarg_regs[i];
902 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff);
903 return tcg_out_call_iarg_reg(s, i, tmp);
906 static int tcg_out_call_iarg_reg16(TCGContext *s, int i, TCGReg arg)
908 TCGReg tmp = TCG_REG_AT;
909 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
910 tmp = tcg_target_call_iarg_regs[i];
912 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xffff);
913 return tcg_out_call_iarg_reg(s, i, tmp);
916 static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg)
918 TCGReg tmp = TCG_REG_AT;
919 if (arg == 0) {
920 tmp = TCG_REG_ZERO;
921 } else {
922 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
923 tmp = tcg_target_call_iarg_regs[i];
925 tcg_out_movi(s, TCG_TYPE_REG, tmp, arg);
927 return tcg_out_call_iarg_reg(s, i, tmp);
930 static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah)
932 i = (i + 1) & ~1;
933 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al));
934 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah));
935 return i;
938 /* Perform the tlb comparison operation. The complete host address is
939 placed in BASE. Clobbers AT, T0, A0. */
940 static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
941 TCGReg addrh, int mem_index, TCGMemOp s_bits,
942 tcg_insn_unit *label_ptr[2], bool is_load)
944 int cmp_off
945 = (is_load
946 ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
947 : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
948 int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
950 tcg_out_opc_sa(s, OPC_SRL, TCG_REG_A0, addrl,
951 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
952 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0,
953 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
954 tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
956 /* Compensate for very large offsets. */
957 if (add_off >= 0x8000) {
958 /* Most target env are smaller than 32k; none are larger than 64k.
959 Simplify the logic here merely to offset by 0x7ff0, giving us a
960 range just shy of 64k. Check this assumption. */
961 QEMU_BUILD_BUG_ON(offsetof(CPUArchState,
962 tlb_table[NB_MMU_MODES - 1][1])
963 > 0x7ff0 + 0x7fff);
964 tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_A0, TCG_REG_A0, 0x7ff0);
965 cmp_off -= 0x7ff0;
966 add_off -= 0x7ff0;
969 /* Load the tlb comparator. */
970 tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, cmp_off + LO_OFF);
971 if (TARGET_LONG_BITS == 64) {
972 tcg_out_opc_imm(s, OPC_LW, base, TCG_REG_A0, cmp_off + HI_OFF);
975 /* Mask the page bits, keeping the alignment bits to compare against.
976 In between, load the tlb addend for the fast path. */
977 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T0,
978 TARGET_PAGE_MASK | ((1 << s_bits) - 1));
979 tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
980 tcg_out_opc_reg(s, OPC_AND, TCG_REG_T0, TCG_REG_T0, addrl);
982 label_ptr[0] = s->code_ptr;
983 tcg_out_opc_br(s, OPC_BNE, TCG_REG_T0, TCG_REG_AT);
985 if (TARGET_LONG_BITS == 64) {
986 /* delay slot */
987 tcg_out_nop(s);
989 label_ptr[1] = s->code_ptr;
990 tcg_out_opc_br(s, OPC_BNE, addrh, base);
993 /* delay slot */
994 tcg_out_opc_reg(s, OPC_ADDU, base, TCG_REG_A0, addrl);
997 static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc,
998 TCGReg datalo, TCGReg datahi,
999 TCGReg addrlo, TCGReg addrhi,
1000 int mem_index, void *raddr,
1001 tcg_insn_unit *label_ptr[2])
1003 TCGLabelQemuLdst *label = new_ldst_label(s);
1005 label->is_ld = is_ld;
1006 label->opc = opc;
1007 label->datalo_reg = datalo;
1008 label->datahi_reg = datahi;
1009 label->addrlo_reg = addrlo;
1010 label->addrhi_reg = addrhi;
1011 label->mem_index = mem_index;
1012 label->raddr = raddr;
1013 label->label_ptr[0] = label_ptr[0];
1014 if (TARGET_LONG_BITS == 64) {
1015 label->label_ptr[1] = label_ptr[1];
1019 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1021 TCGMemOp opc = l->opc;
1022 int i;
1024 /* resolve label address */
1025 reloc_pc16(l->label_ptr[0], s->code_ptr);
1026 if (TARGET_LONG_BITS == 64) {
1027 reloc_pc16(l->label_ptr[1], s->code_ptr);
1030 i = 0;
1031 i = tcg_out_call_iarg_reg(s, i, TCG_AREG0);
1032 if (TARGET_LONG_BITS == 64) {
1033 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1034 } else {
1035 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1037 i = tcg_out_call_iarg_imm(s, i, l->mem_index);
1038 tcg_out_call(s, qemu_ld_helpers[opc & MO_SIZE]);
1040 switch (opc & MO_SSIZE) {
1041 case MO_UB:
1042 tcg_out_opc_imm(s, OPC_ANDI, l->datalo_reg, TCG_REG_V0, 0xff);
1043 break;
1044 case MO_SB:
1045 tcg_out_ext8s(s, l->datalo_reg, TCG_REG_V0);
1046 break;
1047 case MO_UW:
1048 tcg_out_opc_imm(s, OPC_ANDI, l->datalo_reg, TCG_REG_V0, 0xffff);
1049 break;
1050 case MO_SW:
1051 tcg_out_ext16s(s, l->datalo_reg, TCG_REG_V0);
1052 break;
1053 case MO_UL:
1054 tcg_out_mov(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_V0);
1055 break;
1056 case MO_Q:
1057 /* We eliminated V0 from the possible output registers, so it
1058 cannot be clobbered here. So we must move V1 first. */
1059 tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? l->datalo_reg : l->datahi_reg,
1060 TCG_REG_V1);
1061 tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? l->datahi_reg : l->datalo_reg,
1062 TCG_REG_V0);
1063 break;
1064 default:
1065 tcg_abort();
1068 reloc_pc16(s->code_ptr, l->raddr);
1069 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO);
1070 tcg_out_nop(s);
1073 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1075 TCGMemOp opc = l->opc;
1076 TCGMemOp s_bits = opc & MO_SIZE;
1077 int i;
1079 /* resolve label address */
1080 reloc_pc16(l->label_ptr[0], s->code_ptr);
1081 if (TARGET_LONG_BITS == 64) {
1082 reloc_pc16(l->label_ptr[1], s->code_ptr);
1085 i = 0;
1086 i = tcg_out_call_iarg_reg(s, i, TCG_AREG0);
1087 if (TARGET_LONG_BITS == 64) {
1088 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1089 } else {
1090 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1092 switch (s_bits) {
1093 case MO_8:
1094 i = tcg_out_call_iarg_reg8(s, i, l->datalo_reg);
1095 break;
1096 case MO_16:
1097 i = tcg_out_call_iarg_reg16(s, i, l->datalo_reg);
1098 break;
1099 case MO_32:
1100 i = tcg_out_call_iarg_reg(s, i, l->datalo_reg);
1101 break;
1102 case MO_64:
1103 i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg);
1104 break;
1105 default:
1106 tcg_abort();
1108 i = tcg_out_call_iarg_imm(s, i, l->mem_index);
1109 tcg_out_call(s, qemu_st_helpers[s_bits]);
1111 reloc_pc16(s->code_ptr, l->raddr);
1112 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO);
1113 tcg_out_nop(s);
1115 #endif
1117 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1118 TCGReg base, TCGMemOp opc)
1120 switch (opc) {
1121 case MO_UB:
1122 tcg_out_opc_imm(s, OPC_LBU, datalo, base, 0);
1123 break;
1124 case MO_SB:
1125 tcg_out_opc_imm(s, OPC_LB, datalo, base, 0);
1126 break;
1127 case MO_UW | MO_BSWAP:
1128 tcg_out_opc_imm(s, OPC_LHU, TCG_REG_T0, base, 0);
1129 tcg_out_bswap16(s, datalo, TCG_REG_T0);
1130 break;
1131 case MO_UW:
1132 tcg_out_opc_imm(s, OPC_LHU, datalo, base, 0);
1133 break;
1134 case MO_SW | MO_BSWAP:
1135 tcg_out_opc_imm(s, OPC_LHU, TCG_REG_T0, base, 0);
1136 tcg_out_bswap16s(s, datalo, TCG_REG_T0);
1137 break;
1138 case MO_SW:
1139 tcg_out_opc_imm(s, OPC_LH, datalo, base, 0);
1140 break;
1141 case MO_UL | MO_BSWAP:
1142 tcg_out_opc_imm(s, OPC_LW, TCG_REG_T0, base, 0);
1143 tcg_out_bswap32(s, datalo, TCG_REG_T0);
1144 break;
1145 case MO_UL:
1146 tcg_out_opc_imm(s, OPC_LW, datalo, base, 0);
1147 break;
1148 case MO_Q | MO_BSWAP:
1149 tcg_out_opc_imm(s, OPC_LW, TCG_REG_T0, base, HI_OFF);
1150 tcg_out_bswap32(s, datalo, TCG_REG_T0);
1151 tcg_out_opc_imm(s, OPC_LW, TCG_REG_T0, base, LO_OFF);
1152 tcg_out_bswap32(s, datahi, TCG_REG_T0);
1153 break;
1154 case MO_Q:
1155 tcg_out_opc_imm(s, OPC_LW, datalo, base, LO_OFF);
1156 tcg_out_opc_imm(s, OPC_LW, datahi, base, HI_OFF);
1157 break;
1158 default:
1159 tcg_abort();
1163 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGMemOp opc)
1165 TCGReg addr_regl, addr_regh __attribute__((unused));
1166 TCGReg data_regl, data_regh;
1167 #if defined(CONFIG_SOFTMMU)
1168 tcg_insn_unit *label_ptr[2];
1169 int mem_index;
1170 TCGMemOp s_bits;
1171 #endif
1172 /* Note that we've eliminated V0 from the output registers,
1173 so we won't overwrite the base register during loading. */
1174 TCGReg base = TCG_REG_V0;
1176 data_regl = *args++;
1177 data_regh = ((opc & MO_SIZE) == MO_64 ? *args++ : 0);
1178 addr_regl = *args++;
1179 addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1181 #if defined(CONFIG_SOFTMMU)
1182 mem_index = *args;
1183 s_bits = opc & MO_SIZE;
1185 tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
1186 s_bits, label_ptr, 1);
1187 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
1188 add_qemu_ldst_label(s, 1, opc, data_regl, data_regh, addr_regl, addr_regh,
1189 mem_index, s->code_ptr, label_ptr);
1190 #else
1191 if (GUEST_BASE == 0 && data_regl != addr_regl) {
1192 base = addr_regl;
1193 } else if (GUEST_BASE == (int16_t)GUEST_BASE) {
1194 tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE);
1195 } else {
1196 tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE);
1197 tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
1199 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
1200 #endif
1203 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1204 TCGReg base, TCGMemOp opc)
1206 switch (opc) {
1207 case MO_8:
1208 tcg_out_opc_imm(s, OPC_SB, datalo, base, 0);
1209 break;
1211 case MO_16 | MO_BSWAP:
1212 tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_T0, datalo, 0xffff);
1213 tcg_out_bswap16(s, TCG_REG_T0, TCG_REG_T0);
1214 datalo = TCG_REG_T0;
1215 /* FALLTHRU */
1216 case MO_16:
1217 tcg_out_opc_imm(s, OPC_SH, datalo, base, 0);
1218 break;
1220 case MO_32 | MO_BSWAP:
1221 tcg_out_bswap32(s, TCG_REG_T0, datalo);
1222 datalo = TCG_REG_T0;
1223 /* FALLTHRU */
1224 case MO_32:
1225 tcg_out_opc_imm(s, OPC_SW, datalo, base, 0);
1226 break;
1228 case MO_64 | MO_BSWAP:
1229 tcg_out_bswap32(s, TCG_REG_T0, datalo);
1230 tcg_out_opc_imm(s, OPC_SW, TCG_REG_T0, base, HI_OFF);
1231 tcg_out_bswap32(s, TCG_REG_T0, datahi);
1232 tcg_out_opc_imm(s, OPC_SW, TCG_REG_T0, base, LO_OFF);
1233 break;
1234 case MO_64:
1235 tcg_out_opc_imm(s, OPC_SW, datalo, base, LO_OFF);
1236 tcg_out_opc_imm(s, OPC_SW, datahi, base, HI_OFF);
1237 break;
1239 default:
1240 tcg_abort();
1244 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGMemOp opc)
1246 TCGReg addr_regl, addr_regh __attribute__((unused));
1247 TCGReg data_regl, data_regh, base;
1248 #if defined(CONFIG_SOFTMMU)
1249 tcg_insn_unit *label_ptr[2];
1250 int mem_index;
1251 TCGMemOp s_bits;
1252 #endif
1254 data_regl = *args++;
1255 data_regh = ((opc & MO_SIZE) == MO_64 ? *args++ : 0);
1256 addr_regl = *args++;
1257 addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1259 #if defined(CONFIG_SOFTMMU)
1260 mem_index = *args;
1261 s_bits = opc & 3;
1263 /* Note that we eliminated the helper's address argument,
1264 so we can reuse that for the base. */
1265 base = (TARGET_LONG_BITS == 32 ? TCG_REG_A1 : TCG_REG_A2);
1266 tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
1267 s_bits, label_ptr, 1);
1268 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1269 add_qemu_ldst_label(s, 0, opc, data_regl, data_regh, addr_regl, addr_regh,
1270 mem_index, s->code_ptr, label_ptr);
1271 #else
1272 if (GUEST_BASE == 0) {
1273 base = addr_regl;
1274 } else {
1275 base = TCG_REG_A0;
1276 if (GUEST_BASE == (int16_t)GUEST_BASE) {
1277 tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE);
1278 } else {
1279 tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE);
1280 tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
1283 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1284 #endif
1287 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1288 const TCGArg *args, const int *const_args)
1290 switch(opc) {
1291 case INDEX_op_exit_tb:
1293 uintptr_t a0 = args[0];
1294 TCGReg b0 = TCG_REG_ZERO;
1296 if (a0 & ~0xffff) {
1297 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
1298 b0 = TCG_REG_V0;
1300 if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
1301 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_AT,
1302 (uintptr_t)tb_ret_addr);
1303 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_AT, 0);
1305 tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
1307 break;
1308 case INDEX_op_goto_tb:
1309 if (s->tb_jmp_offset) {
1310 /* direct jump method */
1311 tcg_abort();
1312 } else {
1313 /* indirect jump method */
1314 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_AT, TCG_REG_ZERO,
1315 (uintptr_t)(s->tb_next + args[0]));
1316 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_AT, 0);
1318 tcg_out_nop(s);
1319 s->tb_next_offset[args[0]] = tcg_current_code_size(s);
1320 break;
1321 case INDEX_op_br:
1322 tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, args[0]);
1323 break;
1325 case INDEX_op_ld8u_i32:
1326 tcg_out_ldst(s, OPC_LBU, args[0], args[1], args[2]);
1327 break;
1328 case INDEX_op_ld8s_i32:
1329 tcg_out_ldst(s, OPC_LB, args[0], args[1], args[2]);
1330 break;
1331 case INDEX_op_ld16u_i32:
1332 tcg_out_ldst(s, OPC_LHU, args[0], args[1], args[2]);
1333 break;
1334 case INDEX_op_ld16s_i32:
1335 tcg_out_ldst(s, OPC_LH, args[0], args[1], args[2]);
1336 break;
1337 case INDEX_op_ld_i32:
1338 tcg_out_ldst(s, OPC_LW, args[0], args[1], args[2]);
1339 break;
1340 case INDEX_op_st8_i32:
1341 tcg_out_ldst(s, OPC_SB, args[0], args[1], args[2]);
1342 break;
1343 case INDEX_op_st16_i32:
1344 tcg_out_ldst(s, OPC_SH, args[0], args[1], args[2]);
1345 break;
1346 case INDEX_op_st_i32:
1347 tcg_out_ldst(s, OPC_SW, args[0], args[1], args[2]);
1348 break;
1350 case INDEX_op_add_i32:
1351 if (const_args[2]) {
1352 tcg_out_opc_imm(s, OPC_ADDIU, args[0], args[1], args[2]);
1353 } else {
1354 tcg_out_opc_reg(s, OPC_ADDU, args[0], args[1], args[2]);
1356 break;
1357 case INDEX_op_add2_i32:
1358 if (const_args[4]) {
1359 tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_AT, args[2], args[4]);
1360 } else {
1361 tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_AT, args[2], args[4]);
1363 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_T0, TCG_REG_AT, args[2]);
1364 if (const_args[5]) {
1365 tcg_out_opc_imm(s, OPC_ADDIU, args[1], args[3], args[5]);
1366 } else {
1367 tcg_out_opc_reg(s, OPC_ADDU, args[1], args[3], args[5]);
1369 tcg_out_opc_reg(s, OPC_ADDU, args[1], args[1], TCG_REG_T0);
1370 tcg_out_mov(s, TCG_TYPE_I32, args[0], TCG_REG_AT);
1371 break;
1372 case INDEX_op_sub_i32:
1373 if (const_args[2]) {
1374 tcg_out_opc_imm(s, OPC_ADDIU, args[0], args[1], -args[2]);
1375 } else {
1376 tcg_out_opc_reg(s, OPC_SUBU, args[0], args[1], args[2]);
1378 break;
1379 case INDEX_op_sub2_i32:
1380 if (const_args[4]) {
1381 tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_AT, args[2], -args[4]);
1382 } else {
1383 tcg_out_opc_reg(s, OPC_SUBU, TCG_REG_AT, args[2], args[4]);
1385 tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_T0, args[2], TCG_REG_AT);
1386 if (const_args[5]) {
1387 tcg_out_opc_imm(s, OPC_ADDIU, args[1], args[3], -args[5]);
1388 } else {
1389 tcg_out_opc_reg(s, OPC_SUBU, args[1], args[3], args[5]);
1391 tcg_out_opc_reg(s, OPC_SUBU, args[1], args[1], TCG_REG_T0);
1392 tcg_out_mov(s, TCG_TYPE_I32, args[0], TCG_REG_AT);
1393 break;
1394 case INDEX_op_mul_i32:
1395 if (use_mips32_instructions) {
1396 tcg_out_opc_reg(s, OPC_MUL, args[0], args[1], args[2]);
1397 } else {
1398 tcg_out_opc_reg(s, OPC_MULT, 0, args[1], args[2]);
1399 tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
1401 break;
1402 case INDEX_op_muls2_i32:
1403 tcg_out_opc_reg(s, OPC_MULT, 0, args[2], args[3]);
1404 tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
1405 tcg_out_opc_reg(s, OPC_MFHI, args[1], 0, 0);
1406 break;
1407 case INDEX_op_mulu2_i32:
1408 tcg_out_opc_reg(s, OPC_MULTU, 0, args[2], args[3]);
1409 tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
1410 tcg_out_opc_reg(s, OPC_MFHI, args[1], 0, 0);
1411 break;
1412 case INDEX_op_mulsh_i32:
1413 tcg_out_opc_reg(s, OPC_MULT, 0, args[1], args[2]);
1414 tcg_out_opc_reg(s, OPC_MFHI, args[0], 0, 0);
1415 break;
1416 case INDEX_op_muluh_i32:
1417 tcg_out_opc_reg(s, OPC_MULTU, 0, args[1], args[2]);
1418 tcg_out_opc_reg(s, OPC_MFHI, args[0], 0, 0);
1419 break;
1420 case INDEX_op_div_i32:
1421 tcg_out_opc_reg(s, OPC_DIV, 0, args[1], args[2]);
1422 tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
1423 break;
1424 case INDEX_op_divu_i32:
1425 tcg_out_opc_reg(s, OPC_DIVU, 0, args[1], args[2]);
1426 tcg_out_opc_reg(s, OPC_MFLO, args[0], 0, 0);
1427 break;
1428 case INDEX_op_rem_i32:
1429 tcg_out_opc_reg(s, OPC_DIV, 0, args[1], args[2]);
1430 tcg_out_opc_reg(s, OPC_MFHI, args[0], 0, 0);
1431 break;
1432 case INDEX_op_remu_i32:
1433 tcg_out_opc_reg(s, OPC_DIVU, 0, args[1], args[2]);
1434 tcg_out_opc_reg(s, OPC_MFHI, args[0], 0, 0);
1435 break;
1437 case INDEX_op_and_i32:
1438 if (const_args[2]) {
1439 tcg_out_opc_imm(s, OPC_ANDI, args[0], args[1], args[2]);
1440 } else {
1441 tcg_out_opc_reg(s, OPC_AND, args[0], args[1], args[2]);
1443 break;
1444 case INDEX_op_or_i32:
1445 if (const_args[2]) {
1446 tcg_out_opc_imm(s, OPC_ORI, args[0], args[1], args[2]);
1447 } else {
1448 tcg_out_opc_reg(s, OPC_OR, args[0], args[1], args[2]);
1450 break;
1451 case INDEX_op_nor_i32:
1452 tcg_out_opc_reg(s, OPC_NOR, args[0], args[1], args[2]);
1453 break;
1454 case INDEX_op_not_i32:
1455 tcg_out_opc_reg(s, OPC_NOR, args[0], TCG_REG_ZERO, args[1]);
1456 break;
1457 case INDEX_op_xor_i32:
1458 if (const_args[2]) {
1459 tcg_out_opc_imm(s, OPC_XORI, args[0], args[1], args[2]);
1460 } else {
1461 tcg_out_opc_reg(s, OPC_XOR, args[0], args[1], args[2]);
1463 break;
1465 case INDEX_op_sar_i32:
1466 if (const_args[2]) {
1467 tcg_out_opc_sa(s, OPC_SRA, args[0], args[1], args[2]);
1468 } else {
1469 tcg_out_opc_reg(s, OPC_SRAV, args[0], args[2], args[1]);
1471 break;
1472 case INDEX_op_shl_i32:
1473 if (const_args[2]) {
1474 tcg_out_opc_sa(s, OPC_SLL, args[0], args[1], args[2]);
1475 } else {
1476 tcg_out_opc_reg(s, OPC_SLLV, args[0], args[2], args[1]);
1478 break;
1479 case INDEX_op_shr_i32:
1480 if (const_args[2]) {
1481 tcg_out_opc_sa(s, OPC_SRL, args[0], args[1], args[2]);
1482 } else {
1483 tcg_out_opc_reg(s, OPC_SRLV, args[0], args[2], args[1]);
1485 break;
1486 case INDEX_op_rotl_i32:
1487 if (const_args[2]) {
1488 tcg_out_opc_sa(s, OPC_ROTR, args[0], args[1], 0x20 - args[2]);
1489 } else {
1490 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_AT, 32);
1491 tcg_out_opc_reg(s, OPC_SUBU, TCG_REG_AT, TCG_REG_AT, args[2]);
1492 tcg_out_opc_reg(s, OPC_ROTRV, args[0], TCG_REG_AT, args[1]);
1494 break;
1495 case INDEX_op_rotr_i32:
1496 if (const_args[2]) {
1497 tcg_out_opc_sa(s, OPC_ROTR, args[0], args[1], args[2]);
1498 } else {
1499 tcg_out_opc_reg(s, OPC_ROTRV, args[0], args[2], args[1]);
1501 break;
1503 case INDEX_op_bswap16_i32:
1504 tcg_out_opc_reg(s, OPC_WSBH, args[0], 0, args[1]);
1505 break;
1506 case INDEX_op_bswap32_i32:
1507 tcg_out_opc_reg(s, OPC_WSBH, args[0], 0, args[1]);
1508 tcg_out_opc_sa(s, OPC_ROTR, args[0], args[0], 16);
1509 break;
1511 case INDEX_op_ext8s_i32:
1512 tcg_out_opc_reg(s, OPC_SEB, args[0], 0, args[1]);
1513 break;
1514 case INDEX_op_ext16s_i32:
1515 tcg_out_opc_reg(s, OPC_SEH, args[0], 0, args[1]);
1516 break;
1518 case INDEX_op_deposit_i32:
1519 tcg_out_opc_imm(s, OPC_INS, args[0], args[2],
1520 ((args[3] + args[4] - 1) << 11) | (args[3] << 6));
1521 break;
1523 case INDEX_op_brcond_i32:
1524 tcg_out_brcond(s, args[2], args[0], args[1], args[3]);
1525 break;
1526 case INDEX_op_brcond2_i32:
1527 tcg_out_brcond2(s, args[4], args[0], args[1], args[2], args[3], args[5]);
1528 break;
1530 case INDEX_op_movcond_i32:
1531 tcg_out_movcond(s, args[5], args[0], args[1], args[2], args[3]);
1532 break;
1534 case INDEX_op_setcond_i32:
1535 tcg_out_setcond(s, args[3], args[0], args[1], args[2]);
1536 break;
1537 case INDEX_op_setcond2_i32:
1538 tcg_out_setcond2(s, args[5], args[0], args[1], args[2], args[3], args[4]);
1539 break;
1541 case INDEX_op_qemu_ld8u:
1542 tcg_out_qemu_ld(s, args, MO_UB);
1543 break;
1544 case INDEX_op_qemu_ld8s:
1545 tcg_out_qemu_ld(s, args, MO_SB);
1546 break;
1547 case INDEX_op_qemu_ld16u:
1548 tcg_out_qemu_ld(s, args, MO_TEUW);
1549 break;
1550 case INDEX_op_qemu_ld16s:
1551 tcg_out_qemu_ld(s, args, MO_TESW);
1552 break;
1553 case INDEX_op_qemu_ld32:
1554 tcg_out_qemu_ld(s, args, MO_TEUL);
1555 break;
1556 case INDEX_op_qemu_ld64:
1557 tcg_out_qemu_ld(s, args, MO_TEQ);
1558 break;
1559 case INDEX_op_qemu_st8:
1560 tcg_out_qemu_st(s, args, MO_UB);
1561 break;
1562 case INDEX_op_qemu_st16:
1563 tcg_out_qemu_st(s, args, MO_TEUW);
1564 break;
1565 case INDEX_op_qemu_st32:
1566 tcg_out_qemu_st(s, args, MO_TEUL);
1567 break;
1568 case INDEX_op_qemu_st64:
1569 tcg_out_qemu_st(s, args, MO_TEQ);
1570 break;
1572 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1573 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
1574 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1575 default:
1576 tcg_abort();
1580 static const TCGTargetOpDef mips_op_defs[] = {
1581 { INDEX_op_exit_tb, { } },
1582 { INDEX_op_goto_tb, { } },
1583 { INDEX_op_br, { } },
1585 { INDEX_op_ld8u_i32, { "r", "r" } },
1586 { INDEX_op_ld8s_i32, { "r", "r" } },
1587 { INDEX_op_ld16u_i32, { "r", "r" } },
1588 { INDEX_op_ld16s_i32, { "r", "r" } },
1589 { INDEX_op_ld_i32, { "r", "r" } },
1590 { INDEX_op_st8_i32, { "rZ", "r" } },
1591 { INDEX_op_st16_i32, { "rZ", "r" } },
1592 { INDEX_op_st_i32, { "rZ", "r" } },
1594 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1595 { INDEX_op_mul_i32, { "r", "rZ", "rZ" } },
1596 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rZ" } },
1597 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rZ" } },
1598 { INDEX_op_mulsh_i32, { "r", "rZ", "rZ" } },
1599 { INDEX_op_muluh_i32, { "r", "rZ", "rZ" } },
1600 { INDEX_op_div_i32, { "r", "rZ", "rZ" } },
1601 { INDEX_op_divu_i32, { "r", "rZ", "rZ" } },
1602 { INDEX_op_rem_i32, { "r", "rZ", "rZ" } },
1603 { INDEX_op_remu_i32, { "r", "rZ", "rZ" } },
1604 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1606 { INDEX_op_and_i32, { "r", "rZ", "rI" } },
1607 { INDEX_op_nor_i32, { "r", "rZ", "rZ" } },
1608 { INDEX_op_not_i32, { "r", "rZ" } },
1609 { INDEX_op_or_i32, { "r", "rZ", "rIZ" } },
1610 { INDEX_op_xor_i32, { "r", "rZ", "rIZ" } },
1612 { INDEX_op_shl_i32, { "r", "rZ", "ri" } },
1613 { INDEX_op_shr_i32, { "r", "rZ", "ri" } },
1614 { INDEX_op_sar_i32, { "r", "rZ", "ri" } },
1615 { INDEX_op_rotr_i32, { "r", "rZ", "ri" } },
1616 { INDEX_op_rotl_i32, { "r", "rZ", "ri" } },
1618 { INDEX_op_bswap16_i32, { "r", "r" } },
1619 { INDEX_op_bswap32_i32, { "r", "r" } },
1621 { INDEX_op_ext8s_i32, { "r", "rZ" } },
1622 { INDEX_op_ext16s_i32, { "r", "rZ" } },
1624 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
1626 { INDEX_op_brcond_i32, { "rZ", "rZ" } },
1627 { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "0" } },
1628 { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
1629 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
1631 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1632 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1633 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rZ", "rZ" } },
1635 #if TARGET_LONG_BITS == 32
1636 { INDEX_op_qemu_ld8u, { "L", "lZ" } },
1637 { INDEX_op_qemu_ld8s, { "L", "lZ" } },
1638 { INDEX_op_qemu_ld16u, { "L", "lZ" } },
1639 { INDEX_op_qemu_ld16s, { "L", "lZ" } },
1640 { INDEX_op_qemu_ld32, { "L", "lZ" } },
1641 { INDEX_op_qemu_ld64, { "L", "L", "lZ" } },
1643 { INDEX_op_qemu_st8, { "SZ", "SZ" } },
1644 { INDEX_op_qemu_st16, { "SZ", "SZ" } },
1645 { INDEX_op_qemu_st32, { "SZ", "SZ" } },
1646 { INDEX_op_qemu_st64, { "SZ", "SZ", "SZ" } },
1647 #else
1648 { INDEX_op_qemu_ld8u, { "L", "lZ", "lZ" } },
1649 { INDEX_op_qemu_ld8s, { "L", "lZ", "lZ" } },
1650 { INDEX_op_qemu_ld16u, { "L", "lZ", "lZ" } },
1651 { INDEX_op_qemu_ld16s, { "L", "lZ", "lZ" } },
1652 { INDEX_op_qemu_ld32, { "L", "lZ", "lZ" } },
1653 { INDEX_op_qemu_ld64, { "L", "L", "lZ", "lZ" } },
1655 { INDEX_op_qemu_st8, { "SZ", "SZ", "SZ" } },
1656 { INDEX_op_qemu_st16, { "SZ", "SZ", "SZ" } },
1657 { INDEX_op_qemu_st32, { "SZ", "SZ", "SZ" } },
1658 { INDEX_op_qemu_st64, { "SZ", "SZ", "SZ", "SZ" } },
1659 #endif
1660 { -1 },
1663 static int tcg_target_callee_save_regs[] = {
1664 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
1665 TCG_REG_S1,
1666 TCG_REG_S2,
1667 TCG_REG_S3,
1668 TCG_REG_S4,
1669 TCG_REG_S5,
1670 TCG_REG_S6,
1671 TCG_REG_S7,
1672 TCG_REG_FP,
1673 TCG_REG_RA, /* should be last for ABI compliance */
1676 /* The Linux kernel doesn't provide any information about the available
1677 instruction set. Probe it using a signal handler. */
1679 #include <signal.h>
1681 #ifndef use_movnz_instructions
1682 bool use_movnz_instructions = false;
1683 #endif
1685 #ifndef use_mips32_instructions
1686 bool use_mips32_instructions = false;
1687 #endif
1689 #ifndef use_mips32r2_instructions
1690 bool use_mips32r2_instructions = false;
1691 #endif
1693 static volatile sig_atomic_t got_sigill;
1695 static void sigill_handler(int signo, siginfo_t *si, void *data)
1697 /* Skip the faulty instruction */
1698 ucontext_t *uc = (ucontext_t *)data;
1699 uc->uc_mcontext.pc += 4;
1701 got_sigill = 1;
1704 static void tcg_target_detect_isa(void)
1706 struct sigaction sa_old, sa_new;
1708 memset(&sa_new, 0, sizeof(sa_new));
1709 sa_new.sa_flags = SA_SIGINFO;
1710 sa_new.sa_sigaction = sigill_handler;
1711 sigaction(SIGILL, &sa_new, &sa_old);
1713 /* Probe for movn/movz, necessary to implement movcond. */
1714 #ifndef use_movnz_instructions
1715 got_sigill = 0;
1716 asm volatile(".set push\n"
1717 ".set mips32\n"
1718 "movn $zero, $zero, $zero\n"
1719 "movz $zero, $zero, $zero\n"
1720 ".set pop\n"
1721 : : : );
1722 use_movnz_instructions = !got_sigill;
1723 #endif
1725 /* Probe for MIPS32 instructions. As no subsetting is allowed
1726 by the specification, it is only necessary to probe for one
1727 of the instructions. */
1728 #ifndef use_mips32_instructions
1729 got_sigill = 0;
1730 asm volatile(".set push\n"
1731 ".set mips32\n"
1732 "mul $zero, $zero\n"
1733 ".set pop\n"
1734 : : : );
1735 use_mips32_instructions = !got_sigill;
1736 #endif
1738 /* Probe for MIPS32r2 instructions if MIPS32 instructions are
1739 available. As no subsetting is allowed by the specification,
1740 it is only necessary to probe for one of the instructions. */
1741 #ifndef use_mips32r2_instructions
1742 if (use_mips32_instructions) {
1743 got_sigill = 0;
1744 asm volatile(".set push\n"
1745 ".set mips32r2\n"
1746 "seb $zero, $zero\n"
1747 ".set pop\n"
1748 : : : );
1749 use_mips32r2_instructions = !got_sigill;
1751 #endif
1753 sigaction(SIGILL, &sa_old, NULL);
1756 /* Generate global QEMU prologue and epilogue code */
1757 static void tcg_target_qemu_prologue(TCGContext *s)
1759 int i, frame_size;
1761 /* reserve some stack space, also for TCG temps. */
1762 frame_size = ARRAY_SIZE(tcg_target_callee_save_regs) * 4
1763 + TCG_STATIC_CALL_ARGS_SIZE
1764 + CPU_TEMP_BUF_NLONGS * sizeof(long);
1765 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
1766 ~(TCG_TARGET_STACK_ALIGN - 1);
1767 tcg_set_frame(s, TCG_REG_SP, ARRAY_SIZE(tcg_target_callee_save_regs) * 4
1768 + TCG_STATIC_CALL_ARGS_SIZE,
1769 CPU_TEMP_BUF_NLONGS * sizeof(long));
1771 /* TB prologue */
1772 tcg_out_addi(s, TCG_REG_SP, -frame_size);
1773 for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
1774 tcg_out_st(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
1775 TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
1778 /* Call generated code */
1779 tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0);
1780 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1781 tb_ret_addr = s->code_ptr;
1783 /* TB epilogue */
1784 for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
1785 tcg_out_ld(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
1786 TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
1789 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
1790 tcg_out_addi(s, TCG_REG_SP, frame_size);
1793 static void tcg_target_init(TCGContext *s)
1795 tcg_target_detect_isa();
1796 tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32], 0xffffffff);
1797 tcg_regset_set(tcg_target_call_clobber_regs,
1798 (1 << TCG_REG_V0) |
1799 (1 << TCG_REG_V1) |
1800 (1 << TCG_REG_A0) |
1801 (1 << TCG_REG_A1) |
1802 (1 << TCG_REG_A2) |
1803 (1 << TCG_REG_A3) |
1804 (1 << TCG_REG_T1) |
1805 (1 << TCG_REG_T2) |
1806 (1 << TCG_REG_T3) |
1807 (1 << TCG_REG_T4) |
1808 (1 << TCG_REG_T5) |
1809 (1 << TCG_REG_T6) |
1810 (1 << TCG_REG_T7) |
1811 (1 << TCG_REG_T8) |
1812 (1 << TCG_REG_T9));
1814 tcg_regset_clear(s->reserved_regs);
1815 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */
1816 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */
1817 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */
1818 tcg_regset_set_reg(s->reserved_regs, TCG_REG_AT); /* internal use */
1819 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T0); /* internal use */
1820 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */
1821 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
1822 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */
1824 tcg_add_target_add_op_defs(mips_op_defs);