tcg-aarch64: Update to helper_ret_*_mmu routines
[qemu/ar7.git] / tcg / i386 / tcg-target.c
blobc1f07415abc4ca4eebb22492629847dbd855fdeb
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 #if TCG_TARGET_REG_BITS == 64
28 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
29 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
30 #else
31 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
32 #endif
34 #endif
36 static const int tcg_target_reg_alloc_order[] = {
37 #if TCG_TARGET_REG_BITS == 64
38 TCG_REG_RBP,
39 TCG_REG_RBX,
40 TCG_REG_R12,
41 TCG_REG_R13,
42 TCG_REG_R14,
43 TCG_REG_R15,
44 TCG_REG_R10,
45 TCG_REG_R11,
46 TCG_REG_R9,
47 TCG_REG_R8,
48 TCG_REG_RCX,
49 TCG_REG_RDX,
50 TCG_REG_RSI,
51 TCG_REG_RDI,
52 TCG_REG_RAX,
53 #else
54 TCG_REG_EBX,
55 TCG_REG_ESI,
56 TCG_REG_EDI,
57 TCG_REG_EBP,
58 TCG_REG_ECX,
59 TCG_REG_EDX,
60 TCG_REG_EAX,
61 #endif
64 static const int tcg_target_call_iarg_regs[] = {
65 #if TCG_TARGET_REG_BITS == 64
66 #if defined(_WIN64)
67 TCG_REG_RCX,
68 TCG_REG_RDX,
69 #else
70 TCG_REG_RDI,
71 TCG_REG_RSI,
72 TCG_REG_RDX,
73 TCG_REG_RCX,
74 #endif
75 TCG_REG_R8,
76 TCG_REG_R9,
77 #else
78 /* 32 bit mode uses stack based calling convention (GCC default). */
79 #endif
82 static const int tcg_target_call_oarg_regs[] = {
83 TCG_REG_EAX,
84 #if TCG_TARGET_REG_BITS == 32
85 TCG_REG_EDX
86 #endif
89 /* Registers used with L constraint, which are the first argument
90 registers on x86_64, and two random call clobbered registers on
91 i386. */
92 #if TCG_TARGET_REG_BITS == 64
93 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
94 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
95 #else
96 # define TCG_REG_L0 TCG_REG_EAX
97 # define TCG_REG_L1 TCG_REG_EDX
98 #endif
100 /* For 32-bit, we are going to attempt to determine at runtime whether cmov
101 is available. However, the host compiler must supply <cpuid.h>, as we're
102 not going to go so far as our own inline assembly. */
103 #if TCG_TARGET_REG_BITS == 64
104 # define have_cmov 1
105 #elif defined(CONFIG_CPUID_H)
106 #include <cpuid.h>
107 static bool have_cmov;
108 #else
109 # define have_cmov 0
110 #endif
112 static uint8_t *tb_ret_addr;
114 static void patch_reloc(uint8_t *code_ptr, int type,
115 intptr_t value, intptr_t addend)
117 value += addend;
118 switch(type) {
119 case R_386_PC32:
120 value -= (uintptr_t)code_ptr;
121 if (value != (int32_t)value) {
122 tcg_abort();
124 *(uint32_t *)code_ptr = value;
125 break;
126 case R_386_PC8:
127 value -= (uintptr_t)code_ptr;
128 if (value != (int8_t)value) {
129 tcg_abort();
131 *(uint8_t *)code_ptr = value;
132 break;
133 default:
134 tcg_abort();
138 /* parse target specific constraints */
139 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
141 const char *ct_str;
143 ct_str = *pct_str;
144 switch(ct_str[0]) {
145 case 'a':
146 ct->ct |= TCG_CT_REG;
147 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
148 break;
149 case 'b':
150 ct->ct |= TCG_CT_REG;
151 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
152 break;
153 case 'c':
154 ct->ct |= TCG_CT_REG;
155 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
156 break;
157 case 'd':
158 ct->ct |= TCG_CT_REG;
159 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
160 break;
161 case 'S':
162 ct->ct |= TCG_CT_REG;
163 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
164 break;
165 case 'D':
166 ct->ct |= TCG_CT_REG;
167 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
168 break;
169 case 'q':
170 ct->ct |= TCG_CT_REG;
171 if (TCG_TARGET_REG_BITS == 64) {
172 tcg_regset_set32(ct->u.regs, 0, 0xffff);
173 } else {
174 tcg_regset_set32(ct->u.regs, 0, 0xf);
176 break;
177 case 'Q':
178 ct->ct |= TCG_CT_REG;
179 tcg_regset_set32(ct->u.regs, 0, 0xf);
180 break;
181 case 'r':
182 ct->ct |= TCG_CT_REG;
183 if (TCG_TARGET_REG_BITS == 64) {
184 tcg_regset_set32(ct->u.regs, 0, 0xffff);
185 } else {
186 tcg_regset_set32(ct->u.regs, 0, 0xff);
188 break;
190 /* qemu_ld/st address constraint */
191 case 'L':
192 ct->ct |= TCG_CT_REG;
193 if (TCG_TARGET_REG_BITS == 64) {
194 tcg_regset_set32(ct->u.regs, 0, 0xffff);
195 } else {
196 tcg_regset_set32(ct->u.regs, 0, 0xff);
198 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0);
199 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1);
200 break;
202 case 'e':
203 ct->ct |= TCG_CT_CONST_S32;
204 break;
205 case 'Z':
206 ct->ct |= TCG_CT_CONST_U32;
207 break;
209 default:
210 return -1;
212 ct_str++;
213 *pct_str = ct_str;
214 return 0;
217 /* test if a constant matches the constraint */
218 static inline int tcg_target_const_match(tcg_target_long val,
219 const TCGArgConstraint *arg_ct)
221 int ct = arg_ct->ct;
222 if (ct & TCG_CT_CONST) {
223 return 1;
225 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
226 return 1;
228 if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
229 return 1;
231 return 0;
234 #if TCG_TARGET_REG_BITS == 64
235 # define LOWREGMASK(x) ((x) & 7)
236 #else
237 # define LOWREGMASK(x) (x)
238 #endif
240 #define P_EXT 0x100 /* 0x0f opcode prefix */
241 #define P_DATA16 0x200 /* 0x66 opcode prefix */
242 #if TCG_TARGET_REG_BITS == 64
243 # define P_ADDR32 0x400 /* 0x67 opcode prefix */
244 # define P_REXW 0x800 /* Set REX.W = 1 */
245 # define P_REXB_R 0x1000 /* REG field as byte register */
246 # define P_REXB_RM 0x2000 /* R/M field as byte register */
247 # define P_GS 0x4000 /* gs segment override */
248 #else
249 # define P_ADDR32 0
250 # define P_REXW 0
251 # define P_REXB_R 0
252 # define P_REXB_RM 0
253 # define P_GS 0
254 #endif
256 #define OPC_ARITH_EvIz (0x81)
257 #define OPC_ARITH_EvIb (0x83)
258 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
259 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
260 #define OPC_BSWAP (0xc8 | P_EXT)
261 #define OPC_CALL_Jz (0xe8)
262 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
263 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
264 #define OPC_DEC_r32 (0x48)
265 #define OPC_IMUL_GvEv (0xaf | P_EXT)
266 #define OPC_IMUL_GvEvIb (0x6b)
267 #define OPC_IMUL_GvEvIz (0x69)
268 #define OPC_INC_r32 (0x40)
269 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
270 #define OPC_JCC_short (0x70) /* ... plus condition code */
271 #define OPC_JMP_long (0xe9)
272 #define OPC_JMP_short (0xeb)
273 #define OPC_LEA (0x8d)
274 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
275 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
276 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
277 #define OPC_MOVB_EvIz (0xc6)
278 #define OPC_MOVL_EvIz (0xc7)
279 #define OPC_MOVL_Iv (0xb8)
280 #define OPC_MOVSBL (0xbe | P_EXT)
281 #define OPC_MOVSWL (0xbf | P_EXT)
282 #define OPC_MOVSLQ (0x63 | P_REXW)
283 #define OPC_MOVZBL (0xb6 | P_EXT)
284 #define OPC_MOVZWL (0xb7 | P_EXT)
285 #define OPC_POP_r32 (0x58)
286 #define OPC_PUSH_r32 (0x50)
287 #define OPC_PUSH_Iv (0x68)
288 #define OPC_PUSH_Ib (0x6a)
289 #define OPC_RET (0xc3)
290 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
291 #define OPC_SHIFT_1 (0xd1)
292 #define OPC_SHIFT_Ib (0xc1)
293 #define OPC_SHIFT_cl (0xd3)
294 #define OPC_TESTL (0x85)
295 #define OPC_XCHG_ax_r32 (0x90)
297 #define OPC_GRP3_Ev (0xf7)
298 #define OPC_GRP5 (0xff)
300 /* Group 1 opcode extensions for 0x80-0x83.
301 These are also used as modifiers for OPC_ARITH. */
302 #define ARITH_ADD 0
303 #define ARITH_OR 1
304 #define ARITH_ADC 2
305 #define ARITH_SBB 3
306 #define ARITH_AND 4
307 #define ARITH_SUB 5
308 #define ARITH_XOR 6
309 #define ARITH_CMP 7
311 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
312 #define SHIFT_ROL 0
313 #define SHIFT_ROR 1
314 #define SHIFT_SHL 4
315 #define SHIFT_SHR 5
316 #define SHIFT_SAR 7
318 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
319 #define EXT3_NOT 2
320 #define EXT3_NEG 3
321 #define EXT3_MUL 4
322 #define EXT3_IMUL 5
323 #define EXT3_DIV 6
324 #define EXT3_IDIV 7
326 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
327 #define EXT5_INC_Ev 0
328 #define EXT5_DEC_Ev 1
329 #define EXT5_CALLN_Ev 2
330 #define EXT5_JMPN_Ev 4
332 /* Condition codes to be added to OPC_JCC_{long,short}. */
333 #define JCC_JMP (-1)
334 #define JCC_JO 0x0
335 #define JCC_JNO 0x1
336 #define JCC_JB 0x2
337 #define JCC_JAE 0x3
338 #define JCC_JE 0x4
339 #define JCC_JNE 0x5
340 #define JCC_JBE 0x6
341 #define JCC_JA 0x7
342 #define JCC_JS 0x8
343 #define JCC_JNS 0x9
344 #define JCC_JP 0xa
345 #define JCC_JNP 0xb
346 #define JCC_JL 0xc
347 #define JCC_JGE 0xd
348 #define JCC_JLE 0xe
349 #define JCC_JG 0xf
351 static const uint8_t tcg_cond_to_jcc[] = {
352 [TCG_COND_EQ] = JCC_JE,
353 [TCG_COND_NE] = JCC_JNE,
354 [TCG_COND_LT] = JCC_JL,
355 [TCG_COND_GE] = JCC_JGE,
356 [TCG_COND_LE] = JCC_JLE,
357 [TCG_COND_GT] = JCC_JG,
358 [TCG_COND_LTU] = JCC_JB,
359 [TCG_COND_GEU] = JCC_JAE,
360 [TCG_COND_LEU] = JCC_JBE,
361 [TCG_COND_GTU] = JCC_JA,
364 #if TCG_TARGET_REG_BITS == 64
365 static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
367 int rex;
369 if (opc & P_GS) {
370 tcg_out8(s, 0x65);
372 if (opc & P_DATA16) {
373 /* We should never be asking for both 16 and 64-bit operation. */
374 assert((opc & P_REXW) == 0);
375 tcg_out8(s, 0x66);
377 if (opc & P_ADDR32) {
378 tcg_out8(s, 0x67);
381 rex = 0;
382 rex |= (opc & P_REXW) >> 8; /* REX.W */
383 rex |= (r & 8) >> 1; /* REX.R */
384 rex |= (x & 8) >> 2; /* REX.X */
385 rex |= (rm & 8) >> 3; /* REX.B */
387 /* P_REXB_{R,RM} indicates that the given register is the low byte.
388 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
389 as otherwise the encoding indicates %[abcd]h. Note that the values
390 that are ORed in merely indicate that the REX byte must be present;
391 those bits get discarded in output. */
392 rex |= opc & (r >= 4 ? P_REXB_R : 0);
393 rex |= opc & (rm >= 4 ? P_REXB_RM : 0);
395 if (rex) {
396 tcg_out8(s, (uint8_t)(rex | 0x40));
399 if (opc & P_EXT) {
400 tcg_out8(s, 0x0f);
402 tcg_out8(s, opc);
404 #else
405 static void tcg_out_opc(TCGContext *s, int opc)
407 if (opc & P_DATA16) {
408 tcg_out8(s, 0x66);
410 if (opc & P_EXT) {
411 tcg_out8(s, 0x0f);
413 tcg_out8(s, opc);
415 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
416 the 32-bit compilation paths. This method works with all versions of gcc,
417 whereas relying on optimization may not be able to exclude them. */
418 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
419 #endif
421 static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
423 tcg_out_opc(s, opc, r, rm, 0);
424 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
427 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
428 We handle either RM and INDEX missing with a negative value. In 64-bit
429 mode for absolute addresses, ~RM is the size of the immediate operand
430 that will follow the instruction. */
432 static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
433 int index, int shift, intptr_t offset)
435 int mod, len;
437 if (index < 0 && rm < 0) {
438 if (TCG_TARGET_REG_BITS == 64) {
439 /* Try for a rip-relative addressing mode. This has replaced
440 the 32-bit-mode absolute addressing encoding. */
441 intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm;
442 intptr_t disp = offset - pc;
443 if (disp == (int32_t)disp) {
444 tcg_out_opc(s, opc, r, 0, 0);
445 tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
446 tcg_out32(s, disp);
447 return;
450 /* Try for an absolute address encoding. This requires the
451 use of the MODRM+SIB encoding and is therefore larger than
452 rip-relative addressing. */
453 if (offset == (int32_t)offset) {
454 tcg_out_opc(s, opc, r, 0, 0);
455 tcg_out8(s, (LOWREGMASK(r) << 3) | 4);
456 tcg_out8(s, (4 << 3) | 5);
457 tcg_out32(s, offset);
458 return;
461 /* ??? The memory isn't directly addressable. */
462 tcg_abort();
463 } else {
464 /* Absolute address. */
465 tcg_out_opc(s, opc, r, 0, 0);
466 tcg_out8(s, (r << 3) | 5);
467 tcg_out32(s, offset);
468 return;
472 /* Find the length of the immediate addend. Note that the encoding
473 that would be used for (%ebp) indicates absolute addressing. */
474 if (rm < 0) {
475 mod = 0, len = 4, rm = 5;
476 } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) {
477 mod = 0, len = 0;
478 } else if (offset == (int8_t)offset) {
479 mod = 0x40, len = 1;
480 } else {
481 mod = 0x80, len = 4;
484 /* Use a single byte MODRM format if possible. Note that the encoding
485 that would be used for %esp is the escape to the two byte form. */
486 if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) {
487 /* Single byte MODRM format. */
488 tcg_out_opc(s, opc, r, rm, 0);
489 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
490 } else {
491 /* Two byte MODRM+SIB format. */
493 /* Note that the encoding that would place %esp into the index
494 field indicates no index register. In 64-bit mode, the REX.X
495 bit counts, so %r12 can be used as the index. */
496 if (index < 0) {
497 index = 4;
498 } else {
499 assert(index != TCG_REG_ESP);
502 tcg_out_opc(s, opc, r, rm, index);
503 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4);
504 tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm));
507 if (len == 1) {
508 tcg_out8(s, offset);
509 } else if (len == 4) {
510 tcg_out32(s, offset);
514 /* A simplification of the above with no index or shift. */
515 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
516 int rm, intptr_t offset)
518 tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
521 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
522 static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
524 /* Propagate an opcode prefix, such as P_REXW. */
525 int ext = subop & ~0x7;
526 subop &= 0x7;
528 tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src);
531 static inline void tcg_out_mov(TCGContext *s, TCGType type,
532 TCGReg ret, TCGReg arg)
534 if (arg != ret) {
535 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
536 tcg_out_modrm(s, opc, ret, arg);
540 static void tcg_out_movi(TCGContext *s, TCGType type,
541 TCGReg ret, tcg_target_long arg)
543 tcg_target_long diff;
545 if (arg == 0) {
546 tgen_arithr(s, ARITH_XOR, ret, ret);
547 return;
549 if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
550 tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0);
551 tcg_out32(s, arg);
552 return;
554 if (arg == (int32_t)arg) {
555 tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret);
556 tcg_out32(s, arg);
557 return;
560 /* Try a 7 byte pc-relative lea before the 10 byte movq. */
561 diff = arg - ((uintptr_t)s->code_ptr + 7);
562 if (diff == (int32_t)diff) {
563 tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0);
564 tcg_out8(s, (LOWREGMASK(ret) << 3) | 5);
565 tcg_out32(s, diff);
566 return;
569 tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
570 tcg_out64(s, arg);
573 static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
575 if (val == (int8_t)val) {
576 tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0);
577 tcg_out8(s, val);
578 } else if (val == (int32_t)val) {
579 tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0);
580 tcg_out32(s, val);
581 } else {
582 tcg_abort();
586 static inline void tcg_out_push(TCGContext *s, int reg)
588 tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0);
591 static inline void tcg_out_pop(TCGContext *s, int reg)
593 tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0);
596 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
597 TCGReg arg1, intptr_t arg2)
599 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
600 tcg_out_modrm_offset(s, opc, ret, arg1, arg2);
603 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
604 TCGReg arg1, intptr_t arg2)
606 int opc = OPC_MOVL_EvGv + (type == TCG_TYPE_I64 ? P_REXW : 0);
607 tcg_out_modrm_offset(s, opc, arg, arg1, arg2);
610 static inline void tcg_out_sti(TCGContext *s, TCGType type, TCGReg base,
611 tcg_target_long ofs, tcg_target_long val)
613 int opc = OPC_MOVL_EvIz + (type == TCG_TYPE_I64 ? P_REXW : 0);
614 tcg_out_modrm_offset(s, opc, 0, base, ofs);
615 tcg_out32(s, val);
618 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
620 /* Propagate an opcode prefix, such as P_DATA16. */
621 int ext = subopc & ~0x7;
622 subopc &= 0x7;
624 if (count == 1) {
625 tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg);
626 } else {
627 tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg);
628 tcg_out8(s, count);
632 static inline void tcg_out_bswap32(TCGContext *s, int reg)
634 tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0);
637 static inline void tcg_out_rolw_8(TCGContext *s, int reg)
639 tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8);
642 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
644 /* movzbl */
645 assert(src < 4 || TCG_TARGET_REG_BITS == 64);
646 tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
649 static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw)
651 /* movsbl */
652 assert(src < 4 || TCG_TARGET_REG_BITS == 64);
653 tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
656 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
658 /* movzwl */
659 tcg_out_modrm(s, OPC_MOVZWL, dest, src);
662 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw)
664 /* movsw[lq] */
665 tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
668 static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
670 /* 32-bit mov zero extends. */
671 tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
674 static inline void tcg_out_ext32s(TCGContext *s, int dest, int src)
676 tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
679 static inline void tcg_out_bswap64(TCGContext *s, int reg)
681 tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
684 static void tgen_arithi(TCGContext *s, int c, int r0,
685 tcg_target_long val, int cf)
687 int rexw = 0;
689 if (TCG_TARGET_REG_BITS == 64) {
690 rexw = c & -8;
691 c &= 7;
694 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
695 partial flags update stalls on Pentium4 and are not recommended
696 by current Intel optimization manuals. */
697 if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
698 int is_inc = (c == ARITH_ADD) ^ (val < 0);
699 if (TCG_TARGET_REG_BITS == 64) {
700 /* The single-byte increment encodings are re-tasked as the
701 REX prefixes. Use the MODRM encoding. */
702 tcg_out_modrm(s, OPC_GRP5 + rexw,
703 (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
704 } else {
705 tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
707 return;
710 if (c == ARITH_AND) {
711 if (TCG_TARGET_REG_BITS == 64) {
712 if (val == 0xffffffffu) {
713 tcg_out_ext32u(s, r0, r0);
714 return;
716 if (val == (uint32_t)val) {
717 /* AND with no high bits set can use a 32-bit operation. */
718 rexw = 0;
721 if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) {
722 tcg_out_ext8u(s, r0, r0);
723 return;
725 if (val == 0xffffu) {
726 tcg_out_ext16u(s, r0, r0);
727 return;
731 if (val == (int8_t)val) {
732 tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0);
733 tcg_out8(s, val);
734 return;
736 if (rexw == 0 || val == (int32_t)val) {
737 tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0);
738 tcg_out32(s, val);
739 return;
742 tcg_abort();
745 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
747 if (val != 0) {
748 tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0);
752 /* Use SMALL != 0 to force a short forward branch. */
753 static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
755 int32_t val, val1;
756 TCGLabel *l = &s->labels[label_index];
758 if (l->has_value) {
759 val = l->u.value - (intptr_t)s->code_ptr;
760 val1 = val - 2;
761 if ((int8_t)val1 == val1) {
762 if (opc == -1) {
763 tcg_out8(s, OPC_JMP_short);
764 } else {
765 tcg_out8(s, OPC_JCC_short + opc);
767 tcg_out8(s, val1);
768 } else {
769 if (small) {
770 tcg_abort();
772 if (opc == -1) {
773 tcg_out8(s, OPC_JMP_long);
774 tcg_out32(s, val - 5);
775 } else {
776 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
777 tcg_out32(s, val - 6);
780 } else if (small) {
781 if (opc == -1) {
782 tcg_out8(s, OPC_JMP_short);
783 } else {
784 tcg_out8(s, OPC_JCC_short + opc);
786 tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1);
787 s->code_ptr += 1;
788 } else {
789 if (opc == -1) {
790 tcg_out8(s, OPC_JMP_long);
791 } else {
792 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
794 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
795 s->code_ptr += 4;
799 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
800 int const_arg2, int rexw)
802 if (const_arg2) {
803 if (arg2 == 0) {
804 /* test r, r */
805 tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
806 } else {
807 tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
809 } else {
810 tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
814 static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
815 TCGArg arg1, TCGArg arg2, int const_arg2,
816 int label_index, int small)
818 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
819 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
822 #if TCG_TARGET_REG_BITS == 64
823 static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
824 TCGArg arg1, TCGArg arg2, int const_arg2,
825 int label_index, int small)
827 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
828 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
830 #else
831 /* XXX: we implement it at the target level to avoid having to
832 handle cross basic blocks temporaries */
833 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
834 const int *const_args, int small)
836 int label_next;
837 label_next = gen_new_label();
838 switch(args[4]) {
839 case TCG_COND_EQ:
840 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
841 label_next, 1);
842 tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
843 args[5], small);
844 break;
845 case TCG_COND_NE:
846 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
847 args[5], small);
848 tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
849 args[5], small);
850 break;
851 case TCG_COND_LT:
852 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
853 args[5], small);
854 tcg_out_jxx(s, JCC_JNE, label_next, 1);
855 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
856 args[5], small);
857 break;
858 case TCG_COND_LE:
859 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
860 args[5], small);
861 tcg_out_jxx(s, JCC_JNE, label_next, 1);
862 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
863 args[5], small);
864 break;
865 case TCG_COND_GT:
866 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
867 args[5], small);
868 tcg_out_jxx(s, JCC_JNE, label_next, 1);
869 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
870 args[5], small);
871 break;
872 case TCG_COND_GE:
873 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
874 args[5], small);
875 tcg_out_jxx(s, JCC_JNE, label_next, 1);
876 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
877 args[5], small);
878 break;
879 case TCG_COND_LTU:
880 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
881 args[5], small);
882 tcg_out_jxx(s, JCC_JNE, label_next, 1);
883 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
884 args[5], small);
885 break;
886 case TCG_COND_LEU:
887 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
888 args[5], small);
889 tcg_out_jxx(s, JCC_JNE, label_next, 1);
890 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
891 args[5], small);
892 break;
893 case TCG_COND_GTU:
894 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
895 args[5], small);
896 tcg_out_jxx(s, JCC_JNE, label_next, 1);
897 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
898 args[5], small);
899 break;
900 case TCG_COND_GEU:
901 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
902 args[5], small);
903 tcg_out_jxx(s, JCC_JNE, label_next, 1);
904 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
905 args[5], small);
906 break;
907 default:
908 tcg_abort();
910 tcg_out_label(s, label_next, s->code_ptr);
912 #endif
914 static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest,
915 TCGArg arg1, TCGArg arg2, int const_arg2)
917 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
918 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
919 tcg_out_ext8u(s, dest, dest);
922 #if TCG_TARGET_REG_BITS == 64
923 static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest,
924 TCGArg arg1, TCGArg arg2, int const_arg2)
926 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
927 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
928 tcg_out_ext8u(s, dest, dest);
930 #else
931 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
932 const int *const_args)
934 TCGArg new_args[6];
935 int label_true, label_over;
937 memcpy(new_args, args+1, 5*sizeof(TCGArg));
939 if (args[0] == args[1] || args[0] == args[2]
940 || (!const_args[3] && args[0] == args[3])
941 || (!const_args[4] && args[0] == args[4])) {
942 /* When the destination overlaps with one of the argument
943 registers, don't do anything tricky. */
944 label_true = gen_new_label();
945 label_over = gen_new_label();
947 new_args[5] = label_true;
948 tcg_out_brcond2(s, new_args, const_args+1, 1);
950 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
951 tcg_out_jxx(s, JCC_JMP, label_over, 1);
952 tcg_out_label(s, label_true, s->code_ptr);
954 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
955 tcg_out_label(s, label_over, s->code_ptr);
956 } else {
957 /* When the destination does not overlap one of the arguments,
958 clear the destination first, jump if cond false, and emit an
959 increment in the true case. This results in smaller code. */
961 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
963 label_over = gen_new_label();
964 new_args[4] = tcg_invert_cond(new_args[4]);
965 new_args[5] = label_over;
966 tcg_out_brcond2(s, new_args, const_args+1, 1);
968 tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
969 tcg_out_label(s, label_over, s->code_ptr);
972 #endif
974 static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGArg dest,
975 TCGArg c1, TCGArg c2, int const_c2,
976 TCGArg v1)
978 tcg_out_cmp(s, c1, c2, const_c2, 0);
979 if (have_cmov) {
980 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1);
981 } else {
982 int over = gen_new_label();
983 tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
984 tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
985 tcg_out_label(s, over, s->code_ptr);
989 #if TCG_TARGET_REG_BITS == 64
990 static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGArg dest,
991 TCGArg c1, TCGArg c2, int const_c2,
992 TCGArg v1)
994 tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
995 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | P_REXW, dest, v1);
997 #endif
999 static void tcg_out_branch(TCGContext *s, int call, uintptr_t dest)
1001 intptr_t disp = dest - (intptr_t)s->code_ptr - 5;
1003 if (disp == (int32_t)disp) {
1004 tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
1005 tcg_out32(s, disp);
1006 } else {
1007 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, dest);
1008 tcg_out_modrm(s, OPC_GRP5,
1009 call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev, TCG_REG_R10);
1013 static inline void tcg_out_calli(TCGContext *s, uintptr_t dest)
1015 tcg_out_branch(s, 1, dest);
1018 static void tcg_out_jmp(TCGContext *s, uintptr_t dest)
1020 tcg_out_branch(s, 0, dest);
1023 #if defined(CONFIG_SOFTMMU)
1024 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1025 * int mmu_idx, uintptr_t ra)
1027 static const void * const qemu_ld_helpers[4] = {
1028 helper_ret_ldub_mmu,
1029 helper_ret_lduw_mmu,
1030 helper_ret_ldul_mmu,
1031 helper_ret_ldq_mmu,
1034 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1035 * uintxx_t val, int mmu_idx, uintptr_t ra)
1037 static const void * const qemu_st_helpers[4] = {
1038 helper_ret_stb_mmu,
1039 helper_ret_stw_mmu,
1040 helper_ret_stl_mmu,
1041 helper_ret_stq_mmu,
1044 static void add_qemu_ldst_label(TCGContext *s,
1045 int is_ld,
1046 int opc,
1047 int data_reg,
1048 int data_reg2,
1049 int addrlo_reg,
1050 int addrhi_reg,
1051 int mem_index,
1052 uint8_t *raddr,
1053 uint8_t **label_ptr);
1055 /* Perform the TLB load and compare.
1057 Inputs:
1058 ADDRLO_IDX contains the index into ARGS of the low part of the
1059 address; the high part of the address is at ADDR_LOW_IDX+1.
1061 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1063 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1064 This should be offsetof addr_read or addr_write.
1066 Outputs:
1067 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1068 positions of the displacements of forward jumps to the TLB miss case.
1070 Second argument register is loaded with the low part of the address.
1071 In the TLB hit case, it has been adjusted as indicated by the TLB
1072 and so is a host address. In the TLB miss case, it continues to
1073 hold a guest address.
1075 First argument register is clobbered. */
1077 static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
1078 int mem_index, int s_bits,
1079 const TCGArg *args,
1080 uint8_t **label_ptr, int which)
1082 const int addrlo = args[addrlo_idx];
1083 const int r0 = TCG_REG_L0;
1084 const int r1 = TCG_REG_L1;
1085 TCGType ttype = TCG_TYPE_I32;
1086 TCGType htype = TCG_TYPE_I32;
1087 int trexw = 0, hrexw = 0;
1089 if (TCG_TARGET_REG_BITS == 64) {
1090 if (TARGET_LONG_BITS == 64) {
1091 ttype = TCG_TYPE_I64;
1092 trexw = P_REXW;
1094 if (TCG_TYPE_PTR == TCG_TYPE_I64) {
1095 htype = TCG_TYPE_I64;
1096 hrexw = P_REXW;
1100 tcg_out_mov(s, htype, r0, addrlo);
1101 tcg_out_mov(s, ttype, r1, addrlo);
1103 tcg_out_shifti(s, SHIFT_SHR + hrexw, r0,
1104 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1106 tgen_arithi(s, ARITH_AND + trexw, r1,
1107 TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
1108 tgen_arithi(s, ARITH_AND + hrexw, r0,
1109 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
1111 tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0,
1112 offsetof(CPUArchState, tlb_table[mem_index][0])
1113 + which);
1115 /* cmp 0(r0), r1 */
1116 tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0);
1118 /* Prepare for both the fast path add of the tlb addend, and the slow
1119 path function argument setup. There are two cases worth note:
1120 For 32-bit guest and x86_64 host, MOVL zero-extends the guest address
1121 before the fastpath ADDQ below. For 64-bit guest and x32 host, MOVQ
1122 copies the entire guest address for the slow path, while truncation
1123 for the 32-bit host happens with the fastpath ADDL below. */
1124 tcg_out_mov(s, ttype, r1, addrlo);
1126 /* jne slow_path */
1127 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1128 label_ptr[0] = s->code_ptr;
1129 s->code_ptr += 4;
1131 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1132 /* cmp 4(r0), addrhi */
1133 tcg_out_modrm_offset(s, OPC_CMP_GvEv, args[addrlo_idx+1], r0, 4);
1135 /* jne slow_path */
1136 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1137 label_ptr[1] = s->code_ptr;
1138 s->code_ptr += 4;
1141 /* TLB Hit. */
1143 /* add addend(r0), r1 */
1144 tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
1145 offsetof(CPUTLBEntry, addend) - which);
1147 #elif defined(__x86_64__) && defined(__linux__)
1148 # include <asm/prctl.h>
1149 # include <sys/prctl.h>
1151 int arch_prctl(int code, unsigned long addr);
1153 static int guest_base_flags;
1154 static inline void setup_guest_base_seg(void)
1156 if (arch_prctl(ARCH_SET_GS, GUEST_BASE) == 0) {
1157 guest_base_flags = P_GS;
1160 #else
1161 # define guest_base_flags 0
1162 static inline void setup_guest_base_seg(void) { }
1163 #endif /* SOFTMMU */
1165 static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi,
1166 int base, intptr_t ofs, int seg, int sizeop)
1168 #ifdef TARGET_WORDS_BIGENDIAN
1169 const int bswap = 1;
1170 #else
1171 const int bswap = 0;
1172 #endif
1173 switch (sizeop) {
1174 case 0:
1175 tcg_out_modrm_offset(s, OPC_MOVZBL + seg, datalo, base, ofs);
1176 break;
1177 case 0 | 4:
1178 tcg_out_modrm_offset(s, OPC_MOVSBL + P_REXW + seg, datalo, base, ofs);
1179 break;
1180 case 1:
1181 tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
1182 if (bswap) {
1183 tcg_out_rolw_8(s, datalo);
1185 break;
1186 case 1 | 4:
1187 if (bswap) {
1188 tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
1189 tcg_out_rolw_8(s, datalo);
1190 tcg_out_modrm(s, OPC_MOVSWL + P_REXW, datalo, datalo);
1191 } else {
1192 tcg_out_modrm_offset(s, OPC_MOVSWL + P_REXW + seg,
1193 datalo, base, ofs);
1195 break;
1196 case 2:
1197 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs);
1198 if (bswap) {
1199 tcg_out_bswap32(s, datalo);
1201 break;
1202 #if TCG_TARGET_REG_BITS == 64
1203 case 2 | 4:
1204 if (bswap) {
1205 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs);
1206 tcg_out_bswap32(s, datalo);
1207 tcg_out_ext32s(s, datalo, datalo);
1208 } else {
1209 tcg_out_modrm_offset(s, OPC_MOVSLQ + seg, datalo, base, ofs);
1211 break;
1212 #endif
1213 case 3:
1214 if (TCG_TARGET_REG_BITS == 64) {
1215 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + P_REXW + seg,
1216 datalo, base, ofs);
1217 if (bswap) {
1218 tcg_out_bswap64(s, datalo);
1220 } else {
1221 if (bswap) {
1222 int t = datalo;
1223 datalo = datahi;
1224 datahi = t;
1226 if (base != datalo) {
1227 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1228 datalo, base, ofs);
1229 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1230 datahi, base, ofs + 4);
1231 } else {
1232 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1233 datahi, base, ofs + 4);
1234 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1235 datalo, base, ofs);
1237 if (bswap) {
1238 tcg_out_bswap32(s, datalo);
1239 tcg_out_bswap32(s, datahi);
1242 break;
1243 default:
1244 tcg_abort();
1248 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
1249 EAX. It will be useful once fixed registers globals are less
1250 common. */
1251 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
1252 int opc)
1254 int data_reg, data_reg2 = 0;
1255 int addrlo_idx;
1256 #if defined(CONFIG_SOFTMMU)
1257 int mem_index, s_bits;
1258 uint8_t *label_ptr[2];
1259 #endif
1261 data_reg = args[0];
1262 addrlo_idx = 1;
1263 if (TCG_TARGET_REG_BITS == 32 && opc == 3) {
1264 data_reg2 = args[1];
1265 addrlo_idx = 2;
1268 #if defined(CONFIG_SOFTMMU)
1269 mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
1270 s_bits = opc & 3;
1272 tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
1273 label_ptr, offsetof(CPUTLBEntry, addr_read));
1275 /* TLB Hit. */
1276 tcg_out_qemu_ld_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc);
1278 /* Record the current context of a load into ldst label */
1279 add_qemu_ldst_label(s,
1281 opc,
1282 data_reg,
1283 data_reg2,
1284 args[addrlo_idx],
1285 args[addrlo_idx + 1],
1286 mem_index,
1287 s->code_ptr,
1288 label_ptr);
1289 #else
1291 int32_t offset = GUEST_BASE;
1292 int base = args[addrlo_idx];
1293 int seg = 0;
1295 /* ??? We assume all operations have left us with register contents
1296 that are zero extended. So far this appears to be true. If we
1297 want to enforce this, we can either do an explicit zero-extension
1298 here, or (if GUEST_BASE == 0, or a segment register is in use)
1299 use the ADDR32 prefix. For now, do nothing. */
1300 if (GUEST_BASE && guest_base_flags) {
1301 seg = guest_base_flags;
1302 offset = 0;
1303 } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) {
1304 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
1305 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
1306 base = TCG_REG_L1;
1307 offset = 0;
1310 tcg_out_qemu_ld_direct(s, data_reg, data_reg2, base, offset, seg, opc);
1312 #endif
1315 static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi,
1316 int base, intptr_t ofs, int seg,
1317 int sizeop)
1319 #ifdef TARGET_WORDS_BIGENDIAN
1320 const int bswap = 1;
1321 #else
1322 const int bswap = 0;
1323 #endif
1324 /* ??? Ideally we wouldn't need a scratch register. For user-only,
1325 we could perform the bswap twice to restore the original value
1326 instead of moving to the scratch. But as it is, the L constraint
1327 means that TCG_REG_L0 is definitely free here. */
1328 const int scratch = TCG_REG_L0;
1330 switch (sizeop) {
1331 case 0:
1332 tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
1333 datalo, base, ofs);
1334 break;
1335 case 1:
1336 if (bswap) {
1337 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1338 tcg_out_rolw_8(s, scratch);
1339 datalo = scratch;
1341 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_DATA16 + seg,
1342 datalo, base, ofs);
1343 break;
1344 case 2:
1345 if (bswap) {
1346 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1347 tcg_out_bswap32(s, scratch);
1348 datalo = scratch;
1350 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs);
1351 break;
1352 case 3:
1353 if (TCG_TARGET_REG_BITS == 64) {
1354 if (bswap) {
1355 tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo);
1356 tcg_out_bswap64(s, scratch);
1357 datalo = scratch;
1359 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_REXW + seg,
1360 datalo, base, ofs);
1361 } else if (bswap) {
1362 tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi);
1363 tcg_out_bswap32(s, scratch);
1364 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs);
1365 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1366 tcg_out_bswap32(s, scratch);
1367 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs+4);
1368 } else {
1369 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs);
1370 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datahi, base, ofs+4);
1372 break;
1373 default:
1374 tcg_abort();
1378 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
1379 int opc)
1381 int data_reg, data_reg2 = 0;
1382 int addrlo_idx;
1383 #if defined(CONFIG_SOFTMMU)
1384 int mem_index, s_bits;
1385 uint8_t *label_ptr[2];
1386 #endif
1388 data_reg = args[0];
1389 addrlo_idx = 1;
1390 if (TCG_TARGET_REG_BITS == 32 && opc == 3) {
1391 data_reg2 = args[1];
1392 addrlo_idx = 2;
1395 #if defined(CONFIG_SOFTMMU)
1396 mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
1397 s_bits = opc;
1399 tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
1400 label_ptr, offsetof(CPUTLBEntry, addr_write));
1402 /* TLB Hit. */
1403 tcg_out_qemu_st_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc);
1405 /* Record the current context of a store into ldst label */
1406 add_qemu_ldst_label(s,
1408 opc,
1409 data_reg,
1410 data_reg2,
1411 args[addrlo_idx],
1412 args[addrlo_idx + 1],
1413 mem_index,
1414 s->code_ptr,
1415 label_ptr);
1416 #else
1418 int32_t offset = GUEST_BASE;
1419 int base = args[addrlo_idx];
1420 int seg = 0;
1422 /* ??? We assume all operations have left us with register contents
1423 that are zero extended. So far this appears to be true. If we
1424 want to enforce this, we can either do an explicit zero-extension
1425 here, or (if GUEST_BASE == 0, or a segment register is in use)
1426 use the ADDR32 prefix. For now, do nothing. */
1427 if (GUEST_BASE && guest_base_flags) {
1428 seg = guest_base_flags;
1429 offset = 0;
1430 } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) {
1431 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
1432 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
1433 base = TCG_REG_L1;
1434 offset = 0;
1437 tcg_out_qemu_st_direct(s, data_reg, data_reg2, base, offset, seg, opc);
1439 #endif
1442 #if defined(CONFIG_SOFTMMU)
1444 * Record the context of a call to the out of line helper code for the slow path
1445 * for a load or store, so that we can later generate the correct helper code
1447 static void add_qemu_ldst_label(TCGContext *s,
1448 int is_ld,
1449 int opc,
1450 int data_reg,
1451 int data_reg2,
1452 int addrlo_reg,
1453 int addrhi_reg,
1454 int mem_index,
1455 uint8_t *raddr,
1456 uint8_t **label_ptr)
1458 int idx;
1459 TCGLabelQemuLdst *label;
1461 if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
1462 tcg_abort();
1465 idx = s->nb_qemu_ldst_labels++;
1466 label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
1467 label->is_ld = is_ld;
1468 label->opc = opc;
1469 label->datalo_reg = data_reg;
1470 label->datahi_reg = data_reg2;
1471 label->addrlo_reg = addrlo_reg;
1472 label->addrhi_reg = addrhi_reg;
1473 label->mem_index = mem_index;
1474 label->raddr = raddr;
1475 label->label_ptr[0] = label_ptr[0];
1476 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1477 label->label_ptr[1] = label_ptr[1];
1482 * Generate code for the slow path for a load at the end of block
1484 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1486 int opc = l->opc;
1487 int s_bits = opc & 3;
1488 TCGReg data_reg;
1489 uint8_t **label_ptr = &l->label_ptr[0];
1491 /* resolve label address */
1492 *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
1493 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1494 *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
1497 if (TCG_TARGET_REG_BITS == 32) {
1498 int ofs = 0;
1500 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1501 ofs += 4;
1503 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1504 ofs += 4;
1506 if (TARGET_LONG_BITS == 64) {
1507 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1508 ofs += 4;
1511 tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
1512 ofs += 4;
1514 tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
1515 } else {
1516 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1517 /* The second argument is already loaded with addrlo. */
1518 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
1519 l->mem_index);
1520 tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
1521 (uintptr_t)l->raddr);
1524 tcg_out_calli(s, (uintptr_t)qemu_ld_helpers[s_bits]);
1526 data_reg = l->datalo_reg;
1527 switch(opc) {
1528 case 0 | 4:
1529 tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
1530 break;
1531 case 1 | 4:
1532 tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
1533 break;
1534 #if TCG_TARGET_REG_BITS == 64
1535 case 2 | 4:
1536 tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
1537 break;
1538 #endif
1539 case 0:
1540 case 1:
1541 /* Note that the helpers have zero-extended to tcg_target_long. */
1542 case 2:
1543 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1544 break;
1545 case 3:
1546 if (TCG_TARGET_REG_BITS == 64) {
1547 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
1548 } else if (data_reg == TCG_REG_EDX) {
1549 /* xchg %edx, %eax */
1550 tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
1551 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
1552 } else {
1553 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1554 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
1556 break;
1557 default:
1558 tcg_abort();
1561 /* Jump to the code corresponding to next IR of qemu_st */
1562 tcg_out_jmp(s, (uintptr_t)l->raddr);
1566 * Generate code for the slow path for a store at the end of block
1568 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1570 int opc = l->opc;
1571 int s_bits = opc & 3;
1572 uint8_t **label_ptr = &l->label_ptr[0];
1573 TCGReg retaddr;
1575 /* resolve label address */
1576 *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
1577 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1578 *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
1581 if (TCG_TARGET_REG_BITS == 32) {
1582 int ofs = 0;
1584 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1585 ofs += 4;
1587 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1588 ofs += 4;
1590 if (TARGET_LONG_BITS == 64) {
1591 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1592 ofs += 4;
1595 tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
1596 ofs += 4;
1598 if (opc == 3) {
1599 tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
1600 ofs += 4;
1603 tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
1604 ofs += 4;
1606 retaddr = TCG_REG_EAX;
1607 tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr);
1608 tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs);
1609 } else {
1610 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1611 /* The second argument is already loaded with addrlo. */
1612 tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1613 tcg_target_call_iarg_regs[2], l->datalo_reg);
1614 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
1615 l->mem_index);
1617 if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
1618 retaddr = tcg_target_call_iarg_regs[4];
1619 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1620 } else {
1621 retaddr = TCG_REG_RAX;
1622 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1623 tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0);
1627 /* "Tail call" to the helper, with the return address back inline. */
1628 tcg_out_push(s, retaddr);
1629 tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[s_bits]);
1633 * Generate TB finalization at the end of block
1635 void tcg_out_tb_finalize(TCGContext *s)
1637 int i;
1638 TCGLabelQemuLdst *label;
1640 /* qemu_ld/st slow paths */
1641 for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
1642 label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[i];
1643 if (label->is_ld) {
1644 tcg_out_qemu_ld_slow_path(s, label);
1645 } else {
1646 tcg_out_qemu_st_slow_path(s, label);
1650 #endif /* CONFIG_SOFTMMU */
1652 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1653 const TCGArg *args, const int *const_args)
1655 int c, rexw = 0;
1657 #if TCG_TARGET_REG_BITS == 64
1658 # define OP_32_64(x) \
1659 case glue(glue(INDEX_op_, x), _i64): \
1660 rexw = P_REXW; /* FALLTHRU */ \
1661 case glue(glue(INDEX_op_, x), _i32)
1662 #else
1663 # define OP_32_64(x) \
1664 case glue(glue(INDEX_op_, x), _i32)
1665 #endif
1667 switch(opc) {
1668 case INDEX_op_exit_tb:
1669 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]);
1670 tcg_out_jmp(s, (uintptr_t)tb_ret_addr);
1671 break;
1672 case INDEX_op_goto_tb:
1673 if (s->tb_jmp_offset) {
1674 /* direct jump method */
1675 tcg_out8(s, OPC_JMP_long); /* jmp im */
1676 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1677 tcg_out32(s, 0);
1678 } else {
1679 /* indirect jump method */
1680 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
1681 (intptr_t)(s->tb_next + args[0]));
1683 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1684 break;
1685 case INDEX_op_call:
1686 if (const_args[0]) {
1687 tcg_out_calli(s, args[0]);
1688 } else {
1689 /* call *reg */
1690 tcg_out_modrm(s, OPC_GRP5, EXT5_CALLN_Ev, args[0]);
1692 break;
1693 case INDEX_op_br:
1694 tcg_out_jxx(s, JCC_JMP, args[0], 0);
1695 break;
1696 case INDEX_op_movi_i32:
1697 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1698 break;
1699 OP_32_64(ld8u):
1700 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1701 tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]);
1702 break;
1703 OP_32_64(ld8s):
1704 tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, args[0], args[1], args[2]);
1705 break;
1706 OP_32_64(ld16u):
1707 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1708 tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]);
1709 break;
1710 OP_32_64(ld16s):
1711 tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, args[0], args[1], args[2]);
1712 break;
1713 #if TCG_TARGET_REG_BITS == 64
1714 case INDEX_op_ld32u_i64:
1715 #endif
1716 case INDEX_op_ld_i32:
1717 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1718 break;
1720 OP_32_64(st8):
1721 if (const_args[0]) {
1722 tcg_out_modrm_offset(s, OPC_MOVB_EvIz,
1723 0, args[1], args[2]);
1724 tcg_out8(s, args[0]);
1725 } else {
1726 tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R,
1727 args[0], args[1], args[2]);
1729 break;
1730 OP_32_64(st16):
1731 if (const_args[0]) {
1732 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16,
1733 0, args[1], args[2]);
1734 tcg_out16(s, args[0]);
1735 } else {
1736 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16,
1737 args[0], args[1], args[2]);
1739 break;
1740 #if TCG_TARGET_REG_BITS == 64
1741 case INDEX_op_st32_i64:
1742 #endif
1743 case INDEX_op_st_i32:
1744 if (const_args[0]) {
1745 tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, args[1], args[2]);
1746 tcg_out32(s, args[0]);
1747 } else {
1748 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1750 break;
1752 OP_32_64(add):
1753 /* For 3-operand addition, use LEA. */
1754 if (args[0] != args[1]) {
1755 TCGArg a0 = args[0], a1 = args[1], a2 = args[2], c3 = 0;
1757 if (const_args[2]) {
1758 c3 = a2, a2 = -1;
1759 } else if (a0 == a2) {
1760 /* Watch out for dest = src + dest, since we've removed
1761 the matching constraint on the add. */
1762 tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
1763 break;
1766 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
1767 break;
1769 c = ARITH_ADD;
1770 goto gen_arith;
1771 OP_32_64(sub):
1772 c = ARITH_SUB;
1773 goto gen_arith;
1774 OP_32_64(and):
1775 c = ARITH_AND;
1776 goto gen_arith;
1777 OP_32_64(or):
1778 c = ARITH_OR;
1779 goto gen_arith;
1780 OP_32_64(xor):
1781 c = ARITH_XOR;
1782 goto gen_arith;
1783 gen_arith:
1784 if (const_args[2]) {
1785 tgen_arithi(s, c + rexw, args[0], args[2], 0);
1786 } else {
1787 tgen_arithr(s, c + rexw, args[0], args[2]);
1789 break;
1791 OP_32_64(mul):
1792 if (const_args[2]) {
1793 int32_t val;
1794 val = args[2];
1795 if (val == (int8_t)val) {
1796 tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, args[0], args[0]);
1797 tcg_out8(s, val);
1798 } else {
1799 tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, args[0], args[0]);
1800 tcg_out32(s, val);
1802 } else {
1803 tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, args[0], args[2]);
1805 break;
1807 OP_32_64(div2):
1808 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
1809 break;
1810 OP_32_64(divu2):
1811 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
1812 break;
1814 OP_32_64(shl):
1815 c = SHIFT_SHL;
1816 goto gen_shift;
1817 OP_32_64(shr):
1818 c = SHIFT_SHR;
1819 goto gen_shift;
1820 OP_32_64(sar):
1821 c = SHIFT_SAR;
1822 goto gen_shift;
1823 OP_32_64(rotl):
1824 c = SHIFT_ROL;
1825 goto gen_shift;
1826 OP_32_64(rotr):
1827 c = SHIFT_ROR;
1828 goto gen_shift;
1829 gen_shift:
1830 if (const_args[2]) {
1831 tcg_out_shifti(s, c + rexw, args[0], args[2]);
1832 } else {
1833 tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, args[0]);
1835 break;
1837 case INDEX_op_brcond_i32:
1838 tcg_out_brcond32(s, args[2], args[0], args[1], const_args[1],
1839 args[3], 0);
1840 break;
1841 case INDEX_op_setcond_i32:
1842 tcg_out_setcond32(s, args[3], args[0], args[1],
1843 args[2], const_args[2]);
1844 break;
1845 case INDEX_op_movcond_i32:
1846 tcg_out_movcond32(s, args[5], args[0], args[1],
1847 args[2], const_args[2], args[3]);
1848 break;
1850 OP_32_64(bswap16):
1851 tcg_out_rolw_8(s, args[0]);
1852 break;
1853 OP_32_64(bswap32):
1854 tcg_out_bswap32(s, args[0]);
1855 break;
1857 OP_32_64(neg):
1858 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, args[0]);
1859 break;
1860 OP_32_64(not):
1861 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, args[0]);
1862 break;
1864 OP_32_64(ext8s):
1865 tcg_out_ext8s(s, args[0], args[1], rexw);
1866 break;
1867 OP_32_64(ext16s):
1868 tcg_out_ext16s(s, args[0], args[1], rexw);
1869 break;
1870 OP_32_64(ext8u):
1871 tcg_out_ext8u(s, args[0], args[1]);
1872 break;
1873 OP_32_64(ext16u):
1874 tcg_out_ext16u(s, args[0], args[1]);
1875 break;
1877 case INDEX_op_qemu_ld8u:
1878 tcg_out_qemu_ld(s, args, 0);
1879 break;
1880 case INDEX_op_qemu_ld8s:
1881 tcg_out_qemu_ld(s, args, 0 | 4);
1882 break;
1883 case INDEX_op_qemu_ld16u:
1884 tcg_out_qemu_ld(s, args, 1);
1885 break;
1886 case INDEX_op_qemu_ld16s:
1887 tcg_out_qemu_ld(s, args, 1 | 4);
1888 break;
1889 #if TCG_TARGET_REG_BITS == 64
1890 case INDEX_op_qemu_ld32u:
1891 #endif
1892 case INDEX_op_qemu_ld32:
1893 tcg_out_qemu_ld(s, args, 2);
1894 break;
1895 case INDEX_op_qemu_ld64:
1896 tcg_out_qemu_ld(s, args, 3);
1897 break;
1899 case INDEX_op_qemu_st8:
1900 tcg_out_qemu_st(s, args, 0);
1901 break;
1902 case INDEX_op_qemu_st16:
1903 tcg_out_qemu_st(s, args, 1);
1904 break;
1905 case INDEX_op_qemu_st32:
1906 tcg_out_qemu_st(s, args, 2);
1907 break;
1908 case INDEX_op_qemu_st64:
1909 tcg_out_qemu_st(s, args, 3);
1910 break;
1912 OP_32_64(mulu2):
1913 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
1914 break;
1915 OP_32_64(muls2):
1916 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]);
1917 break;
1918 OP_32_64(add2):
1919 if (const_args[4]) {
1920 tgen_arithi(s, ARITH_ADD + rexw, args[0], args[4], 1);
1921 } else {
1922 tgen_arithr(s, ARITH_ADD + rexw, args[0], args[4]);
1924 if (const_args[5]) {
1925 tgen_arithi(s, ARITH_ADC + rexw, args[1], args[5], 1);
1926 } else {
1927 tgen_arithr(s, ARITH_ADC + rexw, args[1], args[5]);
1929 break;
1930 OP_32_64(sub2):
1931 if (const_args[4]) {
1932 tgen_arithi(s, ARITH_SUB + rexw, args[0], args[4], 1);
1933 } else {
1934 tgen_arithr(s, ARITH_SUB + rexw, args[0], args[4]);
1936 if (const_args[5]) {
1937 tgen_arithi(s, ARITH_SBB + rexw, args[1], args[5], 1);
1938 } else {
1939 tgen_arithr(s, ARITH_SBB + rexw, args[1], args[5]);
1941 break;
1943 #if TCG_TARGET_REG_BITS == 32
1944 case INDEX_op_brcond2_i32:
1945 tcg_out_brcond2(s, args, const_args, 0);
1946 break;
1947 case INDEX_op_setcond2_i32:
1948 tcg_out_setcond2(s, args, const_args);
1949 break;
1950 #else /* TCG_TARGET_REG_BITS == 64 */
1951 case INDEX_op_movi_i64:
1952 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1953 break;
1954 case INDEX_op_ld32s_i64:
1955 tcg_out_modrm_offset(s, OPC_MOVSLQ, args[0], args[1], args[2]);
1956 break;
1957 case INDEX_op_ld_i64:
1958 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1959 break;
1960 case INDEX_op_st_i64:
1961 if (const_args[0]) {
1962 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW,
1963 0, args[1], args[2]);
1964 tcg_out32(s, args[0]);
1965 } else {
1966 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1968 break;
1969 case INDEX_op_qemu_ld32s:
1970 tcg_out_qemu_ld(s, args, 2 | 4);
1971 break;
1973 case INDEX_op_brcond_i64:
1974 tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1],
1975 args[3], 0);
1976 break;
1977 case INDEX_op_setcond_i64:
1978 tcg_out_setcond64(s, args[3], args[0], args[1],
1979 args[2], const_args[2]);
1980 break;
1981 case INDEX_op_movcond_i64:
1982 tcg_out_movcond64(s, args[5], args[0], args[1],
1983 args[2], const_args[2], args[3]);
1984 break;
1986 case INDEX_op_bswap64_i64:
1987 tcg_out_bswap64(s, args[0]);
1988 break;
1989 case INDEX_op_ext32u_i64:
1990 tcg_out_ext32u(s, args[0], args[1]);
1991 break;
1992 case INDEX_op_ext32s_i64:
1993 tcg_out_ext32s(s, args[0], args[1]);
1994 break;
1995 #endif
1997 OP_32_64(deposit):
1998 if (args[3] == 0 && args[4] == 8) {
1999 /* load bits 0..7 */
2000 tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM,
2001 args[2], args[0]);
2002 } else if (args[3] == 8 && args[4] == 8) {
2003 /* load bits 8..15 */
2004 tcg_out_modrm(s, OPC_MOVB_EvGv, args[2], args[0] + 4);
2005 } else if (args[3] == 0 && args[4] == 16) {
2006 /* load bits 0..15 */
2007 tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, args[2], args[0]);
2008 } else {
2009 tcg_abort();
2011 break;
2013 default:
2014 tcg_abort();
2017 #undef OP_32_64
2020 static const TCGTargetOpDef x86_op_defs[] = {
2021 { INDEX_op_exit_tb, { } },
2022 { INDEX_op_goto_tb, { } },
2023 { INDEX_op_call, { "ri" } },
2024 { INDEX_op_br, { } },
2025 { INDEX_op_mov_i32, { "r", "r" } },
2026 { INDEX_op_movi_i32, { "r" } },
2027 { INDEX_op_ld8u_i32, { "r", "r" } },
2028 { INDEX_op_ld8s_i32, { "r", "r" } },
2029 { INDEX_op_ld16u_i32, { "r", "r" } },
2030 { INDEX_op_ld16s_i32, { "r", "r" } },
2031 { INDEX_op_ld_i32, { "r", "r" } },
2032 { INDEX_op_st8_i32, { "qi", "r" } },
2033 { INDEX_op_st16_i32, { "ri", "r" } },
2034 { INDEX_op_st_i32, { "ri", "r" } },
2036 { INDEX_op_add_i32, { "r", "r", "ri" } },
2037 { INDEX_op_sub_i32, { "r", "0", "ri" } },
2038 { INDEX_op_mul_i32, { "r", "0", "ri" } },
2039 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
2040 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
2041 { INDEX_op_and_i32, { "r", "0", "ri" } },
2042 { INDEX_op_or_i32, { "r", "0", "ri" } },
2043 { INDEX_op_xor_i32, { "r", "0", "ri" } },
2045 { INDEX_op_shl_i32, { "r", "0", "ci" } },
2046 { INDEX_op_shr_i32, { "r", "0", "ci" } },
2047 { INDEX_op_sar_i32, { "r", "0", "ci" } },
2048 { INDEX_op_rotl_i32, { "r", "0", "ci" } },
2049 { INDEX_op_rotr_i32, { "r", "0", "ci" } },
2051 { INDEX_op_brcond_i32, { "r", "ri" } },
2053 { INDEX_op_bswap16_i32, { "r", "0" } },
2054 { INDEX_op_bswap32_i32, { "r", "0" } },
2056 { INDEX_op_neg_i32, { "r", "0" } },
2058 { INDEX_op_not_i32, { "r", "0" } },
2060 { INDEX_op_ext8s_i32, { "r", "q" } },
2061 { INDEX_op_ext16s_i32, { "r", "r" } },
2062 { INDEX_op_ext8u_i32, { "r", "q" } },
2063 { INDEX_op_ext16u_i32, { "r", "r" } },
2065 { INDEX_op_setcond_i32, { "q", "r", "ri" } },
2067 { INDEX_op_deposit_i32, { "Q", "0", "Q" } },
2068 #if TCG_TARGET_HAS_movcond_i32
2069 { INDEX_op_movcond_i32, { "r", "r", "ri", "r", "0" } },
2070 #endif
2072 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
2073 { INDEX_op_muls2_i32, { "a", "d", "a", "r" } },
2074 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
2075 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
2077 #if TCG_TARGET_REG_BITS == 32
2078 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
2079 { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
2080 #else
2081 { INDEX_op_mov_i64, { "r", "r" } },
2082 { INDEX_op_movi_i64, { "r" } },
2083 { INDEX_op_ld8u_i64, { "r", "r" } },
2084 { INDEX_op_ld8s_i64, { "r", "r" } },
2085 { INDEX_op_ld16u_i64, { "r", "r" } },
2086 { INDEX_op_ld16s_i64, { "r", "r" } },
2087 { INDEX_op_ld32u_i64, { "r", "r" } },
2088 { INDEX_op_ld32s_i64, { "r", "r" } },
2089 { INDEX_op_ld_i64, { "r", "r" } },
2090 { INDEX_op_st8_i64, { "ri", "r" } },
2091 { INDEX_op_st16_i64, { "ri", "r" } },
2092 { INDEX_op_st32_i64, { "ri", "r" } },
2093 { INDEX_op_st_i64, { "re", "r" } },
2095 { INDEX_op_add_i64, { "r", "r", "re" } },
2096 { INDEX_op_mul_i64, { "r", "0", "re" } },
2097 { INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } },
2098 { INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } },
2099 { INDEX_op_sub_i64, { "r", "0", "re" } },
2100 { INDEX_op_and_i64, { "r", "0", "reZ" } },
2101 { INDEX_op_or_i64, { "r", "0", "re" } },
2102 { INDEX_op_xor_i64, { "r", "0", "re" } },
2104 { INDEX_op_shl_i64, { "r", "0", "ci" } },
2105 { INDEX_op_shr_i64, { "r", "0", "ci" } },
2106 { INDEX_op_sar_i64, { "r", "0", "ci" } },
2107 { INDEX_op_rotl_i64, { "r", "0", "ci" } },
2108 { INDEX_op_rotr_i64, { "r", "0", "ci" } },
2110 { INDEX_op_brcond_i64, { "r", "re" } },
2111 { INDEX_op_setcond_i64, { "r", "r", "re" } },
2113 { INDEX_op_bswap16_i64, { "r", "0" } },
2114 { INDEX_op_bswap32_i64, { "r", "0" } },
2115 { INDEX_op_bswap64_i64, { "r", "0" } },
2116 { INDEX_op_neg_i64, { "r", "0" } },
2117 { INDEX_op_not_i64, { "r", "0" } },
2119 { INDEX_op_ext8s_i64, { "r", "r" } },
2120 { INDEX_op_ext16s_i64, { "r", "r" } },
2121 { INDEX_op_ext32s_i64, { "r", "r" } },
2122 { INDEX_op_ext8u_i64, { "r", "r" } },
2123 { INDEX_op_ext16u_i64, { "r", "r" } },
2124 { INDEX_op_ext32u_i64, { "r", "r" } },
2126 { INDEX_op_deposit_i64, { "Q", "0", "Q" } },
2127 { INDEX_op_movcond_i64, { "r", "r", "re", "r", "0" } },
2129 { INDEX_op_mulu2_i64, { "a", "d", "a", "r" } },
2130 { INDEX_op_muls2_i64, { "a", "d", "a", "r" } },
2131 { INDEX_op_add2_i64, { "r", "r", "0", "1", "re", "re" } },
2132 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "re", "re" } },
2133 #endif
2135 #if TCG_TARGET_REG_BITS == 64
2136 { INDEX_op_qemu_ld8u, { "r", "L" } },
2137 { INDEX_op_qemu_ld8s, { "r", "L" } },
2138 { INDEX_op_qemu_ld16u, { "r", "L" } },
2139 { INDEX_op_qemu_ld16s, { "r", "L" } },
2140 { INDEX_op_qemu_ld32, { "r", "L" } },
2141 { INDEX_op_qemu_ld32u, { "r", "L" } },
2142 { INDEX_op_qemu_ld32s, { "r", "L" } },
2143 { INDEX_op_qemu_ld64, { "r", "L" } },
2145 { INDEX_op_qemu_st8, { "L", "L" } },
2146 { INDEX_op_qemu_st16, { "L", "L" } },
2147 { INDEX_op_qemu_st32, { "L", "L" } },
2148 { INDEX_op_qemu_st64, { "L", "L" } },
2149 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
2150 { INDEX_op_qemu_ld8u, { "r", "L" } },
2151 { INDEX_op_qemu_ld8s, { "r", "L" } },
2152 { INDEX_op_qemu_ld16u, { "r", "L" } },
2153 { INDEX_op_qemu_ld16s, { "r", "L" } },
2154 { INDEX_op_qemu_ld32, { "r", "L" } },
2155 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
2157 { INDEX_op_qemu_st8, { "cb", "L" } },
2158 { INDEX_op_qemu_st16, { "L", "L" } },
2159 { INDEX_op_qemu_st32, { "L", "L" } },
2160 { INDEX_op_qemu_st64, { "L", "L", "L" } },
2161 #else
2162 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
2163 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
2164 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
2165 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
2166 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
2167 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
2169 { INDEX_op_qemu_st8, { "cb", "L", "L" } },
2170 { INDEX_op_qemu_st16, { "L", "L", "L" } },
2171 { INDEX_op_qemu_st32, { "L", "L", "L" } },
2172 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
2173 #endif
2174 { -1 },
2177 static int tcg_target_callee_save_regs[] = {
2178 #if TCG_TARGET_REG_BITS == 64
2179 TCG_REG_RBP,
2180 TCG_REG_RBX,
2181 #if defined(_WIN64)
2182 TCG_REG_RDI,
2183 TCG_REG_RSI,
2184 #endif
2185 TCG_REG_R12,
2186 TCG_REG_R13,
2187 TCG_REG_R14, /* Currently used for the global env. */
2188 TCG_REG_R15,
2189 #else
2190 TCG_REG_EBP, /* Currently used for the global env. */
2191 TCG_REG_EBX,
2192 TCG_REG_ESI,
2193 TCG_REG_EDI,
2194 #endif
2197 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2198 and tcg_register_jit. */
2200 #define PUSH_SIZE \
2201 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
2202 * (TCG_TARGET_REG_BITS / 8))
2204 #define FRAME_SIZE \
2205 ((PUSH_SIZE \
2206 + TCG_STATIC_CALL_ARGS_SIZE \
2207 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2208 + TCG_TARGET_STACK_ALIGN - 1) \
2209 & ~(TCG_TARGET_STACK_ALIGN - 1))
2211 /* Generate global QEMU prologue and epilogue code */
2212 static void tcg_target_qemu_prologue(TCGContext *s)
2214 int i, stack_addend;
2216 /* TB prologue */
2218 /* Reserve some stack space, also for TCG temps. */
2219 stack_addend = FRAME_SIZE - PUSH_SIZE;
2220 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2221 CPU_TEMP_BUF_NLONGS * sizeof(long));
2223 /* Save all callee saved registers. */
2224 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2225 tcg_out_push(s, tcg_target_callee_save_regs[i]);
2228 #if TCG_TARGET_REG_BITS == 32
2229 tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
2230 (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
2231 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
2232 /* jmp *tb. */
2233 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
2234 (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
2235 + stack_addend);
2236 #else
2237 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2238 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
2239 /* jmp *tb. */
2240 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
2241 #endif
2243 /* TB epilogue */
2244 tb_ret_addr = s->code_ptr;
2246 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend);
2248 for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
2249 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
2251 tcg_out_opc(s, OPC_RET, 0, 0, 0);
2253 #if !defined(CONFIG_SOFTMMU)
2254 /* Try to set up a segment register to point to GUEST_BASE. */
2255 if (GUEST_BASE) {
2256 setup_guest_base_seg();
2258 #endif
2261 static void tcg_target_init(TCGContext *s)
2263 /* For 32-bit, 99% certainty that we're running on hardware that supports
2264 cmov, but we still need to check. In case cmov is not available, we'll
2265 use a small forward branch. */
2266 #ifndef have_cmov
2268 unsigned a, b, c, d;
2269 have_cmov = (__get_cpuid(1, &a, &b, &c, &d) && (d & bit_CMOV));
2271 #endif
2273 if (TCG_TARGET_REG_BITS == 64) {
2274 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2275 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2276 } else {
2277 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
2280 tcg_regset_clear(tcg_target_call_clobber_regs);
2281 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
2282 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
2283 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
2284 if (TCG_TARGET_REG_BITS == 64) {
2285 #if !defined(_WIN64)
2286 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI);
2287 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI);
2288 #endif
2289 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
2290 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
2291 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
2292 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
2295 tcg_regset_clear(s->reserved_regs);
2296 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2298 tcg_add_target_add_op_defs(x86_op_defs);
2301 typedef struct {
2302 DebugFrameCIE cie;
2303 DebugFrameFDEHeader fde;
2304 uint8_t fde_def_cfa[4];
2305 uint8_t fde_reg_ofs[14];
2306 } DebugFrame;
2308 /* We're expecting a 2 byte uleb128 encoded value. */
2309 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2311 #if !defined(__ELF__)
2312 /* Host machine without ELF. */
2313 #elif TCG_TARGET_REG_BITS == 64
2314 #define ELF_HOST_MACHINE EM_X86_64
2315 static DebugFrame debug_frame = {
2316 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2317 .cie.id = -1,
2318 .cie.version = 1,
2319 .cie.code_align = 1,
2320 .cie.data_align = 0x78, /* sleb128 -8 */
2321 .cie.return_column = 16,
2323 /* Total FDE size does not include the "len" member. */
2324 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
2326 .fde_def_cfa = {
2327 12, 7, /* DW_CFA_def_cfa %rsp, ... */
2328 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2329 (FRAME_SIZE >> 7)
2331 .fde_reg_ofs = {
2332 0x90, 1, /* DW_CFA_offset, %rip, -8 */
2333 /* The following ordering must match tcg_target_callee_save_regs. */
2334 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
2335 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
2336 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
2337 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
2338 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
2339 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
2342 #else
2343 #define ELF_HOST_MACHINE EM_386
2344 static DebugFrame debug_frame = {
2345 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2346 .cie.id = -1,
2347 .cie.version = 1,
2348 .cie.code_align = 1,
2349 .cie.data_align = 0x7c, /* sleb128 -4 */
2350 .cie.return_column = 8,
2352 /* Total FDE size does not include the "len" member. */
2353 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
2355 .fde_def_cfa = {
2356 12, 4, /* DW_CFA_def_cfa %esp, ... */
2357 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2358 (FRAME_SIZE >> 7)
2360 .fde_reg_ofs = {
2361 0x88, 1, /* DW_CFA_offset, %eip, -4 */
2362 /* The following ordering must match tcg_target_callee_save_regs. */
2363 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
2364 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
2365 0x86, 4, /* DW_CFA_offset, %esi, -16 */
2366 0x87, 5, /* DW_CFA_offset, %edi, -20 */
2369 #endif
2371 #if defined(ELF_HOST_MACHINE)
2372 void tcg_register_jit(void *buf, size_t buf_size)
2374 debug_frame.fde.func_start = (uintptr_t)buf;
2375 debug_frame.fde.func_len = buf_size;
2377 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2379 #endif