tcg-i386: Rename tcg_out_calli to tcg_out_call
[qemu.git] / tcg / i386 / tcg-target.c
blob48a95f8da2814725ef763500ef2fb6a052f30d5c
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "tcg-be-ldst.h"
27 #ifndef NDEBUG
28 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 #if TCG_TARGET_REG_BITS == 64
30 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
31 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
32 #else
33 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
34 #endif
36 #endif
38 static const int tcg_target_reg_alloc_order[] = {
39 #if TCG_TARGET_REG_BITS == 64
40 TCG_REG_RBP,
41 TCG_REG_RBX,
42 TCG_REG_R12,
43 TCG_REG_R13,
44 TCG_REG_R14,
45 TCG_REG_R15,
46 TCG_REG_R10,
47 TCG_REG_R11,
48 TCG_REG_R9,
49 TCG_REG_R8,
50 TCG_REG_RCX,
51 TCG_REG_RDX,
52 TCG_REG_RSI,
53 TCG_REG_RDI,
54 TCG_REG_RAX,
55 #else
56 TCG_REG_EBX,
57 TCG_REG_ESI,
58 TCG_REG_EDI,
59 TCG_REG_EBP,
60 TCG_REG_ECX,
61 TCG_REG_EDX,
62 TCG_REG_EAX,
63 #endif
66 static const int tcg_target_call_iarg_regs[] = {
67 #if TCG_TARGET_REG_BITS == 64
68 #if defined(_WIN64)
69 TCG_REG_RCX,
70 TCG_REG_RDX,
71 #else
72 TCG_REG_RDI,
73 TCG_REG_RSI,
74 TCG_REG_RDX,
75 TCG_REG_RCX,
76 #endif
77 TCG_REG_R8,
78 TCG_REG_R9,
79 #else
80 /* 32 bit mode uses stack based calling convention (GCC default). */
81 #endif
84 static const int tcg_target_call_oarg_regs[] = {
85 TCG_REG_EAX,
86 #if TCG_TARGET_REG_BITS == 32
87 TCG_REG_EDX
88 #endif
91 /* Constants we accept. */
92 #define TCG_CT_CONST_S32 0x100
93 #define TCG_CT_CONST_U32 0x200
94 #define TCG_CT_CONST_I32 0x400
96 /* Registers used with L constraint, which are the first argument
97 registers on x86_64, and two random call clobbered registers on
98 i386. */
99 #if TCG_TARGET_REG_BITS == 64
100 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
101 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
102 #else
103 # define TCG_REG_L0 TCG_REG_EAX
104 # define TCG_REG_L1 TCG_REG_EDX
105 #endif
107 /* The host compiler should supply <cpuid.h> to enable runtime features
108 detection, as we're not going to go so far as our own inline assembly.
109 If not available, default values will be assumed. */
110 #if defined(CONFIG_CPUID_H)
111 #include <cpuid.h>
112 #endif
114 /* For 32-bit, we are going to attempt to determine at runtime whether cmov
115 is available. */
116 #if TCG_TARGET_REG_BITS == 64
117 # define have_cmov 1
118 #elif defined(CONFIG_CPUID_H) && defined(bit_CMOV)
119 static bool have_cmov;
120 #else
121 # define have_cmov 0
122 #endif
124 /* If bit_MOVBE is defined in cpuid.h (added in GCC version 4.6), we are
125 going to attempt to determine at runtime whether movbe is available. */
126 #if defined(CONFIG_CPUID_H) && defined(bit_MOVBE)
127 static bool have_movbe;
128 #else
129 # define have_movbe 0
130 #endif
132 /* We need this symbol in tcg-target.h, and we can't properly conditionalize
133 it there. Therefore we always define the variable. */
134 bool have_bmi1;
136 #if defined(CONFIG_CPUID_H) && defined(bit_BMI2)
137 static bool have_bmi2;
138 #else
139 # define have_bmi2 0
140 #endif
142 static tcg_insn_unit *tb_ret_addr;
144 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
145 intptr_t value, intptr_t addend)
147 value += addend;
148 switch(type) {
149 case R_386_PC32:
150 value -= (uintptr_t)code_ptr;
151 if (value != (int32_t)value) {
152 tcg_abort();
154 tcg_patch32(code_ptr, value);
155 break;
156 case R_386_PC8:
157 value -= (uintptr_t)code_ptr;
158 if (value != (int8_t)value) {
159 tcg_abort();
161 tcg_patch8(code_ptr, value);
162 break;
163 default:
164 tcg_abort();
168 /* parse target specific constraints */
169 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
171 const char *ct_str;
173 ct_str = *pct_str;
174 switch(ct_str[0]) {
175 case 'a':
176 ct->ct |= TCG_CT_REG;
177 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
178 break;
179 case 'b':
180 ct->ct |= TCG_CT_REG;
181 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
182 break;
183 case 'c':
184 case_c:
185 ct->ct |= TCG_CT_REG;
186 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
187 break;
188 case 'd':
189 ct->ct |= TCG_CT_REG;
190 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
191 break;
192 case 'S':
193 ct->ct |= TCG_CT_REG;
194 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
195 break;
196 case 'D':
197 ct->ct |= TCG_CT_REG;
198 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
199 break;
200 case 'q':
201 ct->ct |= TCG_CT_REG;
202 if (TCG_TARGET_REG_BITS == 64) {
203 tcg_regset_set32(ct->u.regs, 0, 0xffff);
204 } else {
205 tcg_regset_set32(ct->u.regs, 0, 0xf);
207 break;
208 case 'Q':
209 ct->ct |= TCG_CT_REG;
210 tcg_regset_set32(ct->u.regs, 0, 0xf);
211 break;
212 case 'r':
213 case_r:
214 ct->ct |= TCG_CT_REG;
215 if (TCG_TARGET_REG_BITS == 64) {
216 tcg_regset_set32(ct->u.regs, 0, 0xffff);
217 } else {
218 tcg_regset_set32(ct->u.regs, 0, 0xff);
220 break;
221 case 'C':
222 /* With SHRX et al, we need not use ECX as shift count register. */
223 if (have_bmi2) {
224 goto case_r;
225 } else {
226 goto case_c;
229 /* qemu_ld/st address constraint */
230 case 'L':
231 ct->ct |= TCG_CT_REG;
232 if (TCG_TARGET_REG_BITS == 64) {
233 tcg_regset_set32(ct->u.regs, 0, 0xffff);
234 } else {
235 tcg_regset_set32(ct->u.regs, 0, 0xff);
237 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0);
238 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1);
239 break;
241 case 'e':
242 ct->ct |= TCG_CT_CONST_S32;
243 break;
244 case 'Z':
245 ct->ct |= TCG_CT_CONST_U32;
246 break;
247 case 'I':
248 ct->ct |= TCG_CT_CONST_I32;
249 break;
251 default:
252 return -1;
254 ct_str++;
255 *pct_str = ct_str;
256 return 0;
259 /* test if a constant matches the constraint */
260 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
261 const TCGArgConstraint *arg_ct)
263 int ct = arg_ct->ct;
264 if (ct & TCG_CT_CONST) {
265 return 1;
267 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
268 return 1;
270 if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
271 return 1;
273 if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
274 return 1;
276 return 0;
279 #if TCG_TARGET_REG_BITS == 64
280 # define LOWREGMASK(x) ((x) & 7)
281 #else
282 # define LOWREGMASK(x) (x)
283 #endif
285 #define P_EXT 0x100 /* 0x0f opcode prefix */
286 #define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */
287 #define P_DATA16 0x400 /* 0x66 opcode prefix */
288 #if TCG_TARGET_REG_BITS == 64
289 # define P_ADDR32 0x800 /* 0x67 opcode prefix */
290 # define P_REXW 0x1000 /* Set REX.W = 1 */
291 # define P_REXB_R 0x2000 /* REG field as byte register */
292 # define P_REXB_RM 0x4000 /* R/M field as byte register */
293 # define P_GS 0x8000 /* gs segment override */
294 #else
295 # define P_ADDR32 0
296 # define P_REXW 0
297 # define P_REXB_R 0
298 # define P_REXB_RM 0
299 # define P_GS 0
300 #endif
301 #define P_SIMDF3 0x10000 /* 0xf3 opcode prefix */
302 #define P_SIMDF2 0x20000 /* 0xf2 opcode prefix */
304 #define OPC_ARITH_EvIz (0x81)
305 #define OPC_ARITH_EvIb (0x83)
306 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
307 #define OPC_ANDN (0xf2 | P_EXT38)
308 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
309 #define OPC_BSWAP (0xc8 | P_EXT)
310 #define OPC_CALL_Jz (0xe8)
311 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
312 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
313 #define OPC_DEC_r32 (0x48)
314 #define OPC_IMUL_GvEv (0xaf | P_EXT)
315 #define OPC_IMUL_GvEvIb (0x6b)
316 #define OPC_IMUL_GvEvIz (0x69)
317 #define OPC_INC_r32 (0x40)
318 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
319 #define OPC_JCC_short (0x70) /* ... plus condition code */
320 #define OPC_JMP_long (0xe9)
321 #define OPC_JMP_short (0xeb)
322 #define OPC_LEA (0x8d)
323 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
324 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
325 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
326 #define OPC_MOVB_EvIz (0xc6)
327 #define OPC_MOVL_EvIz (0xc7)
328 #define OPC_MOVL_Iv (0xb8)
329 #define OPC_MOVBE_GyMy (0xf0 | P_EXT38)
330 #define OPC_MOVBE_MyGy (0xf1 | P_EXT38)
331 #define OPC_MOVSBL (0xbe | P_EXT)
332 #define OPC_MOVSWL (0xbf | P_EXT)
333 #define OPC_MOVSLQ (0x63 | P_REXW)
334 #define OPC_MOVZBL (0xb6 | P_EXT)
335 #define OPC_MOVZWL (0xb7 | P_EXT)
336 #define OPC_POP_r32 (0x58)
337 #define OPC_PUSH_r32 (0x50)
338 #define OPC_PUSH_Iv (0x68)
339 #define OPC_PUSH_Ib (0x6a)
340 #define OPC_RET (0xc3)
341 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
342 #define OPC_SHIFT_1 (0xd1)
343 #define OPC_SHIFT_Ib (0xc1)
344 #define OPC_SHIFT_cl (0xd3)
345 #define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3)
346 #define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
347 #define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
348 #define OPC_TESTL (0x85)
349 #define OPC_XCHG_ax_r32 (0x90)
351 #define OPC_GRP3_Ev (0xf7)
352 #define OPC_GRP5 (0xff)
354 /* Group 1 opcode extensions for 0x80-0x83.
355 These are also used as modifiers for OPC_ARITH. */
356 #define ARITH_ADD 0
357 #define ARITH_OR 1
358 #define ARITH_ADC 2
359 #define ARITH_SBB 3
360 #define ARITH_AND 4
361 #define ARITH_SUB 5
362 #define ARITH_XOR 6
363 #define ARITH_CMP 7
365 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
366 #define SHIFT_ROL 0
367 #define SHIFT_ROR 1
368 #define SHIFT_SHL 4
369 #define SHIFT_SHR 5
370 #define SHIFT_SAR 7
372 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
373 #define EXT3_NOT 2
374 #define EXT3_NEG 3
375 #define EXT3_MUL 4
376 #define EXT3_IMUL 5
377 #define EXT3_DIV 6
378 #define EXT3_IDIV 7
380 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
381 #define EXT5_INC_Ev 0
382 #define EXT5_DEC_Ev 1
383 #define EXT5_CALLN_Ev 2
384 #define EXT5_JMPN_Ev 4
386 /* Condition codes to be added to OPC_JCC_{long,short}. */
387 #define JCC_JMP (-1)
388 #define JCC_JO 0x0
389 #define JCC_JNO 0x1
390 #define JCC_JB 0x2
391 #define JCC_JAE 0x3
392 #define JCC_JE 0x4
393 #define JCC_JNE 0x5
394 #define JCC_JBE 0x6
395 #define JCC_JA 0x7
396 #define JCC_JS 0x8
397 #define JCC_JNS 0x9
398 #define JCC_JP 0xa
399 #define JCC_JNP 0xb
400 #define JCC_JL 0xc
401 #define JCC_JGE 0xd
402 #define JCC_JLE 0xe
403 #define JCC_JG 0xf
405 static const uint8_t tcg_cond_to_jcc[] = {
406 [TCG_COND_EQ] = JCC_JE,
407 [TCG_COND_NE] = JCC_JNE,
408 [TCG_COND_LT] = JCC_JL,
409 [TCG_COND_GE] = JCC_JGE,
410 [TCG_COND_LE] = JCC_JLE,
411 [TCG_COND_GT] = JCC_JG,
412 [TCG_COND_LTU] = JCC_JB,
413 [TCG_COND_GEU] = JCC_JAE,
414 [TCG_COND_LEU] = JCC_JBE,
415 [TCG_COND_GTU] = JCC_JA,
418 #if TCG_TARGET_REG_BITS == 64
419 static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
421 int rex;
423 if (opc & P_GS) {
424 tcg_out8(s, 0x65);
426 if (opc & P_DATA16) {
427 /* We should never be asking for both 16 and 64-bit operation. */
428 assert((opc & P_REXW) == 0);
429 tcg_out8(s, 0x66);
431 if (opc & P_ADDR32) {
432 tcg_out8(s, 0x67);
435 rex = 0;
436 rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */
437 rex |= (r & 8) >> 1; /* REX.R */
438 rex |= (x & 8) >> 2; /* REX.X */
439 rex |= (rm & 8) >> 3; /* REX.B */
441 /* P_REXB_{R,RM} indicates that the given register is the low byte.
442 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
443 as otherwise the encoding indicates %[abcd]h. Note that the values
444 that are ORed in merely indicate that the REX byte must be present;
445 those bits get discarded in output. */
446 rex |= opc & (r >= 4 ? P_REXB_R : 0);
447 rex |= opc & (rm >= 4 ? P_REXB_RM : 0);
449 if (rex) {
450 tcg_out8(s, (uint8_t)(rex | 0x40));
453 if (opc & (P_EXT | P_EXT38)) {
454 tcg_out8(s, 0x0f);
455 if (opc & P_EXT38) {
456 tcg_out8(s, 0x38);
460 tcg_out8(s, opc);
462 #else
463 static void tcg_out_opc(TCGContext *s, int opc)
465 if (opc & P_DATA16) {
466 tcg_out8(s, 0x66);
468 if (opc & (P_EXT | P_EXT38)) {
469 tcg_out8(s, 0x0f);
470 if (opc & P_EXT38) {
471 tcg_out8(s, 0x38);
474 tcg_out8(s, opc);
476 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
477 the 32-bit compilation paths. This method works with all versions of gcc,
478 whereas relying on optimization may not be able to exclude them. */
479 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
480 #endif
482 static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
484 tcg_out_opc(s, opc, r, rm, 0);
485 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
488 static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm)
490 int tmp;
492 if ((opc & (P_REXW | P_EXT | P_EXT38)) || (rm & 8)) {
493 /* Three byte VEX prefix. */
494 tcg_out8(s, 0xc4);
496 /* VEX.m-mmmm */
497 if (opc & P_EXT38) {
498 tmp = 2;
499 } else if (opc & P_EXT) {
500 tmp = 1;
501 } else {
502 tcg_abort();
504 tmp |= 0x40; /* VEX.X */
505 tmp |= (r & 8 ? 0 : 0x80); /* VEX.R */
506 tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */
507 tcg_out8(s, tmp);
509 tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */
510 } else {
511 /* Two byte VEX prefix. */
512 tcg_out8(s, 0xc5);
514 tmp = (r & 8 ? 0 : 0x80); /* VEX.R */
516 /* VEX.pp */
517 if (opc & P_DATA16) {
518 tmp |= 1; /* 0x66 */
519 } else if (opc & P_SIMDF3) {
520 tmp |= 2; /* 0xf3 */
521 } else if (opc & P_SIMDF2) {
522 tmp |= 3; /* 0xf2 */
524 tmp |= (~v & 15) << 3; /* VEX.vvvv */
525 tcg_out8(s, tmp);
526 tcg_out8(s, opc);
527 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
530 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
531 We handle either RM and INDEX missing with a negative value. In 64-bit
532 mode for absolute addresses, ~RM is the size of the immediate operand
533 that will follow the instruction. */
535 static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
536 int index, int shift, intptr_t offset)
538 int mod, len;
540 if (index < 0 && rm < 0) {
541 if (TCG_TARGET_REG_BITS == 64) {
542 /* Try for a rip-relative addressing mode. This has replaced
543 the 32-bit-mode absolute addressing encoding. */
544 intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm;
545 intptr_t disp = offset - pc;
546 if (disp == (int32_t)disp) {
547 tcg_out_opc(s, opc, r, 0, 0);
548 tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
549 tcg_out32(s, disp);
550 return;
553 /* Try for an absolute address encoding. This requires the
554 use of the MODRM+SIB encoding and is therefore larger than
555 rip-relative addressing. */
556 if (offset == (int32_t)offset) {
557 tcg_out_opc(s, opc, r, 0, 0);
558 tcg_out8(s, (LOWREGMASK(r) << 3) | 4);
559 tcg_out8(s, (4 << 3) | 5);
560 tcg_out32(s, offset);
561 return;
564 /* ??? The memory isn't directly addressable. */
565 tcg_abort();
566 } else {
567 /* Absolute address. */
568 tcg_out_opc(s, opc, r, 0, 0);
569 tcg_out8(s, (r << 3) | 5);
570 tcg_out32(s, offset);
571 return;
575 /* Find the length of the immediate addend. Note that the encoding
576 that would be used for (%ebp) indicates absolute addressing. */
577 if (rm < 0) {
578 mod = 0, len = 4, rm = 5;
579 } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) {
580 mod = 0, len = 0;
581 } else if (offset == (int8_t)offset) {
582 mod = 0x40, len = 1;
583 } else {
584 mod = 0x80, len = 4;
587 /* Use a single byte MODRM format if possible. Note that the encoding
588 that would be used for %esp is the escape to the two byte form. */
589 if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) {
590 /* Single byte MODRM format. */
591 tcg_out_opc(s, opc, r, rm, 0);
592 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
593 } else {
594 /* Two byte MODRM+SIB format. */
596 /* Note that the encoding that would place %esp into the index
597 field indicates no index register. In 64-bit mode, the REX.X
598 bit counts, so %r12 can be used as the index. */
599 if (index < 0) {
600 index = 4;
601 } else {
602 assert(index != TCG_REG_ESP);
605 tcg_out_opc(s, opc, r, rm, index);
606 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4);
607 tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm));
610 if (len == 1) {
611 tcg_out8(s, offset);
612 } else if (len == 4) {
613 tcg_out32(s, offset);
617 /* A simplification of the above with no index or shift. */
618 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
619 int rm, intptr_t offset)
621 tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
624 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
625 static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
627 /* Propagate an opcode prefix, such as P_REXW. */
628 int ext = subop & ~0x7;
629 subop &= 0x7;
631 tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src);
634 static inline void tcg_out_mov(TCGContext *s, TCGType type,
635 TCGReg ret, TCGReg arg)
637 if (arg != ret) {
638 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
639 tcg_out_modrm(s, opc, ret, arg);
643 static void tcg_out_movi(TCGContext *s, TCGType type,
644 TCGReg ret, tcg_target_long arg)
646 tcg_target_long diff;
648 if (arg == 0) {
649 tgen_arithr(s, ARITH_XOR, ret, ret);
650 return;
652 if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
653 tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0);
654 tcg_out32(s, arg);
655 return;
657 if (arg == (int32_t)arg) {
658 tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret);
659 tcg_out32(s, arg);
660 return;
663 /* Try a 7 byte pc-relative lea before the 10 byte movq. */
664 diff = arg - ((uintptr_t)s->code_ptr + 7);
665 if (diff == (int32_t)diff) {
666 tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0);
667 tcg_out8(s, (LOWREGMASK(ret) << 3) | 5);
668 tcg_out32(s, diff);
669 return;
672 tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
673 tcg_out64(s, arg);
676 static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
678 if (val == (int8_t)val) {
679 tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0);
680 tcg_out8(s, val);
681 } else if (val == (int32_t)val) {
682 tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0);
683 tcg_out32(s, val);
684 } else {
685 tcg_abort();
689 static inline void tcg_out_push(TCGContext *s, int reg)
691 tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0);
694 static inline void tcg_out_pop(TCGContext *s, int reg)
696 tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0);
699 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
700 TCGReg arg1, intptr_t arg2)
702 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
703 tcg_out_modrm_offset(s, opc, ret, arg1, arg2);
706 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
707 TCGReg arg1, intptr_t arg2)
709 int opc = OPC_MOVL_EvGv + (type == TCG_TYPE_I64 ? P_REXW : 0);
710 tcg_out_modrm_offset(s, opc, arg, arg1, arg2);
713 static inline void tcg_out_sti(TCGContext *s, TCGType type, TCGReg base,
714 tcg_target_long ofs, tcg_target_long val)
716 int opc = OPC_MOVL_EvIz + (type == TCG_TYPE_I64 ? P_REXW : 0);
717 tcg_out_modrm_offset(s, opc, 0, base, ofs);
718 tcg_out32(s, val);
721 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
723 /* Propagate an opcode prefix, such as P_DATA16. */
724 int ext = subopc & ~0x7;
725 subopc &= 0x7;
727 if (count == 1) {
728 tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg);
729 } else {
730 tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg);
731 tcg_out8(s, count);
735 static inline void tcg_out_bswap32(TCGContext *s, int reg)
737 tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0);
740 static inline void tcg_out_rolw_8(TCGContext *s, int reg)
742 tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8);
745 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
747 /* movzbl */
748 assert(src < 4 || TCG_TARGET_REG_BITS == 64);
749 tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
752 static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw)
754 /* movsbl */
755 assert(src < 4 || TCG_TARGET_REG_BITS == 64);
756 tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
759 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
761 /* movzwl */
762 tcg_out_modrm(s, OPC_MOVZWL, dest, src);
765 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw)
767 /* movsw[lq] */
768 tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
771 static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
773 /* 32-bit mov zero extends. */
774 tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
777 static inline void tcg_out_ext32s(TCGContext *s, int dest, int src)
779 tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
782 static inline void tcg_out_bswap64(TCGContext *s, int reg)
784 tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
787 static void tgen_arithi(TCGContext *s, int c, int r0,
788 tcg_target_long val, int cf)
790 int rexw = 0;
792 if (TCG_TARGET_REG_BITS == 64) {
793 rexw = c & -8;
794 c &= 7;
797 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
798 partial flags update stalls on Pentium4 and are not recommended
799 by current Intel optimization manuals. */
800 if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
801 int is_inc = (c == ARITH_ADD) ^ (val < 0);
802 if (TCG_TARGET_REG_BITS == 64) {
803 /* The single-byte increment encodings are re-tasked as the
804 REX prefixes. Use the MODRM encoding. */
805 tcg_out_modrm(s, OPC_GRP5 + rexw,
806 (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
807 } else {
808 tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
810 return;
813 if (c == ARITH_AND) {
814 if (TCG_TARGET_REG_BITS == 64) {
815 if (val == 0xffffffffu) {
816 tcg_out_ext32u(s, r0, r0);
817 return;
819 if (val == (uint32_t)val) {
820 /* AND with no high bits set can use a 32-bit operation. */
821 rexw = 0;
824 if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) {
825 tcg_out_ext8u(s, r0, r0);
826 return;
828 if (val == 0xffffu) {
829 tcg_out_ext16u(s, r0, r0);
830 return;
834 if (val == (int8_t)val) {
835 tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0);
836 tcg_out8(s, val);
837 return;
839 if (rexw == 0 || val == (int32_t)val) {
840 tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0);
841 tcg_out32(s, val);
842 return;
845 tcg_abort();
848 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
850 if (val != 0) {
851 tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0);
855 /* Use SMALL != 0 to force a short forward branch. */
856 static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
858 int32_t val, val1;
859 TCGLabel *l = &s->labels[label_index];
861 if (l->has_value) {
862 val = tcg_pcrel_diff(s, l->u.value_ptr);
863 val1 = val - 2;
864 if ((int8_t)val1 == val1) {
865 if (opc == -1) {
866 tcg_out8(s, OPC_JMP_short);
867 } else {
868 tcg_out8(s, OPC_JCC_short + opc);
870 tcg_out8(s, val1);
871 } else {
872 if (small) {
873 tcg_abort();
875 if (opc == -1) {
876 tcg_out8(s, OPC_JMP_long);
877 tcg_out32(s, val - 5);
878 } else {
879 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
880 tcg_out32(s, val - 6);
883 } else if (small) {
884 if (opc == -1) {
885 tcg_out8(s, OPC_JMP_short);
886 } else {
887 tcg_out8(s, OPC_JCC_short + opc);
889 tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1);
890 s->code_ptr += 1;
891 } else {
892 if (opc == -1) {
893 tcg_out8(s, OPC_JMP_long);
894 } else {
895 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
897 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
898 s->code_ptr += 4;
902 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
903 int const_arg2, int rexw)
905 if (const_arg2) {
906 if (arg2 == 0) {
907 /* test r, r */
908 tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
909 } else {
910 tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
912 } else {
913 tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
917 static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
918 TCGArg arg1, TCGArg arg2, int const_arg2,
919 int label_index, int small)
921 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
922 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
925 #if TCG_TARGET_REG_BITS == 64
926 static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
927 TCGArg arg1, TCGArg arg2, int const_arg2,
928 int label_index, int small)
930 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
931 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
933 #else
934 /* XXX: we implement it at the target level to avoid having to
935 handle cross basic blocks temporaries */
936 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
937 const int *const_args, int small)
939 int label_next;
940 label_next = gen_new_label();
941 switch(args[4]) {
942 case TCG_COND_EQ:
943 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
944 label_next, 1);
945 tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
946 args[5], small);
947 break;
948 case TCG_COND_NE:
949 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
950 args[5], small);
951 tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
952 args[5], small);
953 break;
954 case TCG_COND_LT:
955 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
956 args[5], small);
957 tcg_out_jxx(s, JCC_JNE, label_next, 1);
958 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
959 args[5], small);
960 break;
961 case TCG_COND_LE:
962 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
963 args[5], small);
964 tcg_out_jxx(s, JCC_JNE, label_next, 1);
965 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
966 args[5], small);
967 break;
968 case TCG_COND_GT:
969 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
970 args[5], small);
971 tcg_out_jxx(s, JCC_JNE, label_next, 1);
972 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
973 args[5], small);
974 break;
975 case TCG_COND_GE:
976 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
977 args[5], small);
978 tcg_out_jxx(s, JCC_JNE, label_next, 1);
979 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
980 args[5], small);
981 break;
982 case TCG_COND_LTU:
983 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
984 args[5], small);
985 tcg_out_jxx(s, JCC_JNE, label_next, 1);
986 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
987 args[5], small);
988 break;
989 case TCG_COND_LEU:
990 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
991 args[5], small);
992 tcg_out_jxx(s, JCC_JNE, label_next, 1);
993 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
994 args[5], small);
995 break;
996 case TCG_COND_GTU:
997 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
998 args[5], small);
999 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1000 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
1001 args[5], small);
1002 break;
1003 case TCG_COND_GEU:
1004 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
1005 args[5], small);
1006 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1007 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
1008 args[5], small);
1009 break;
1010 default:
1011 tcg_abort();
1013 tcg_out_label(s, label_next, s->code_ptr);
1015 #endif
1017 static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest,
1018 TCGArg arg1, TCGArg arg2, int const_arg2)
1020 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
1021 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1022 tcg_out_ext8u(s, dest, dest);
1025 #if TCG_TARGET_REG_BITS == 64
1026 static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest,
1027 TCGArg arg1, TCGArg arg2, int const_arg2)
1029 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
1030 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1031 tcg_out_ext8u(s, dest, dest);
1033 #else
1034 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1035 const int *const_args)
1037 TCGArg new_args[6];
1038 int label_true, label_over;
1040 memcpy(new_args, args+1, 5*sizeof(TCGArg));
1042 if (args[0] == args[1] || args[0] == args[2]
1043 || (!const_args[3] && args[0] == args[3])
1044 || (!const_args[4] && args[0] == args[4])) {
1045 /* When the destination overlaps with one of the argument
1046 registers, don't do anything tricky. */
1047 label_true = gen_new_label();
1048 label_over = gen_new_label();
1050 new_args[5] = label_true;
1051 tcg_out_brcond2(s, new_args, const_args+1, 1);
1053 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1054 tcg_out_jxx(s, JCC_JMP, label_over, 1);
1055 tcg_out_label(s, label_true, s->code_ptr);
1057 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
1058 tcg_out_label(s, label_over, s->code_ptr);
1059 } else {
1060 /* When the destination does not overlap one of the arguments,
1061 clear the destination first, jump if cond false, and emit an
1062 increment in the true case. This results in smaller code. */
1064 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1066 label_over = gen_new_label();
1067 new_args[4] = tcg_invert_cond(new_args[4]);
1068 new_args[5] = label_over;
1069 tcg_out_brcond2(s, new_args, const_args+1, 1);
1071 tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
1072 tcg_out_label(s, label_over, s->code_ptr);
1075 #endif
1077 static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGArg dest,
1078 TCGArg c1, TCGArg c2, int const_c2,
1079 TCGArg v1)
1081 tcg_out_cmp(s, c1, c2, const_c2, 0);
1082 if (have_cmov) {
1083 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1);
1084 } else {
1085 int over = gen_new_label();
1086 tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
1087 tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
1088 tcg_out_label(s, over, s->code_ptr);
1092 #if TCG_TARGET_REG_BITS == 64
1093 static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGArg dest,
1094 TCGArg c1, TCGArg c2, int const_c2,
1095 TCGArg v1)
1097 tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
1098 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | P_REXW, dest, v1);
1100 #endif
1102 static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest)
1104 intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
1106 if (disp == (int32_t)disp) {
1107 tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
1108 tcg_out32(s, disp);
1109 } else {
1110 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, (uintptr_t)dest);
1111 tcg_out_modrm(s, OPC_GRP5,
1112 call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev, TCG_REG_R10);
1116 static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
1118 tcg_out_branch(s, 1, dest);
1121 static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest)
1123 tcg_out_branch(s, 0, dest);
1126 #if defined(CONFIG_SOFTMMU)
1127 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1128 * int mmu_idx, uintptr_t ra)
1130 static void * const qemu_ld_helpers[16] = {
1131 [MO_UB] = helper_ret_ldub_mmu,
1132 [MO_LEUW] = helper_le_lduw_mmu,
1133 [MO_LEUL] = helper_le_ldul_mmu,
1134 [MO_LEQ] = helper_le_ldq_mmu,
1135 [MO_BEUW] = helper_be_lduw_mmu,
1136 [MO_BEUL] = helper_be_ldul_mmu,
1137 [MO_BEQ] = helper_be_ldq_mmu,
1140 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1141 * uintxx_t val, int mmu_idx, uintptr_t ra)
1143 static void * const qemu_st_helpers[16] = {
1144 [MO_UB] = helper_ret_stb_mmu,
1145 [MO_LEUW] = helper_le_stw_mmu,
1146 [MO_LEUL] = helper_le_stl_mmu,
1147 [MO_LEQ] = helper_le_stq_mmu,
1148 [MO_BEUW] = helper_be_stw_mmu,
1149 [MO_BEUL] = helper_be_stl_mmu,
1150 [MO_BEQ] = helper_be_stq_mmu,
1153 /* Perform the TLB load and compare.
1155 Inputs:
1156 ADDRLO and ADDRHI contain the low and high part of the address.
1158 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1160 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1161 This should be offsetof addr_read or addr_write.
1163 Outputs:
1164 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1165 positions of the displacements of forward jumps to the TLB miss case.
1167 Second argument register is loaded with the low part of the address.
1168 In the TLB hit case, it has been adjusted as indicated by the TLB
1169 and so is a host address. In the TLB miss case, it continues to
1170 hold a guest address.
1172 First argument register is clobbered. */
1174 static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1175 int mem_index, TCGMemOp s_bits,
1176 tcg_insn_unit **label_ptr, int which)
1178 const TCGReg r0 = TCG_REG_L0;
1179 const TCGReg r1 = TCG_REG_L1;
1180 TCGType ttype = TCG_TYPE_I32;
1181 TCGType htype = TCG_TYPE_I32;
1182 int trexw = 0, hrexw = 0;
1184 if (TCG_TARGET_REG_BITS == 64) {
1185 if (TARGET_LONG_BITS == 64) {
1186 ttype = TCG_TYPE_I64;
1187 trexw = P_REXW;
1189 if (TCG_TYPE_PTR == TCG_TYPE_I64) {
1190 htype = TCG_TYPE_I64;
1191 hrexw = P_REXW;
1195 tcg_out_mov(s, htype, r0, addrlo);
1196 tcg_out_mov(s, ttype, r1, addrlo);
1198 tcg_out_shifti(s, SHIFT_SHR + hrexw, r0,
1199 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1201 tgen_arithi(s, ARITH_AND + trexw, r1,
1202 TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
1203 tgen_arithi(s, ARITH_AND + hrexw, r0,
1204 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
1206 tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0,
1207 offsetof(CPUArchState, tlb_table[mem_index][0])
1208 + which);
1210 /* cmp 0(r0), r1 */
1211 tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0);
1213 /* Prepare for both the fast path add of the tlb addend, and the slow
1214 path function argument setup. There are two cases worth note:
1215 For 32-bit guest and x86_64 host, MOVL zero-extends the guest address
1216 before the fastpath ADDQ below. For 64-bit guest and x32 host, MOVQ
1217 copies the entire guest address for the slow path, while truncation
1218 for the 32-bit host happens with the fastpath ADDL below. */
1219 tcg_out_mov(s, ttype, r1, addrlo);
1221 /* jne slow_path */
1222 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1223 label_ptr[0] = s->code_ptr;
1224 s->code_ptr += 4;
1226 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1227 /* cmp 4(r0), addrhi */
1228 tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, 4);
1230 /* jne slow_path */
1231 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1232 label_ptr[1] = s->code_ptr;
1233 s->code_ptr += 4;
1236 /* TLB Hit. */
1238 /* add addend(r0), r1 */
1239 tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
1240 offsetof(CPUTLBEntry, addend) - which);
1244 * Record the context of a call to the out of line helper code for the slow path
1245 * for a load or store, so that we can later generate the correct helper code
1247 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
1248 TCGReg datalo, TCGReg datahi,
1249 TCGReg addrlo, TCGReg addrhi,
1250 int mem_index, tcg_insn_unit *raddr,
1251 tcg_insn_unit **label_ptr)
1253 TCGLabelQemuLdst *label = new_ldst_label(s);
1255 label->is_ld = is_ld;
1256 label->opc = opc;
1257 label->datalo_reg = datalo;
1258 label->datahi_reg = datahi;
1259 label->addrlo_reg = addrlo;
1260 label->addrhi_reg = addrhi;
1261 label->mem_index = mem_index;
1262 label->raddr = raddr;
1263 label->label_ptr[0] = label_ptr[0];
1264 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1265 label->label_ptr[1] = label_ptr[1];
1270 * Generate code for the slow path for a load at the end of block
1272 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1274 TCGMemOp opc = l->opc;
1275 TCGReg data_reg;
1276 tcg_insn_unit **label_ptr = &l->label_ptr[0];
1278 /* resolve label address */
1279 tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1280 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1281 tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1284 if (TCG_TARGET_REG_BITS == 32) {
1285 int ofs = 0;
1287 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1288 ofs += 4;
1290 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1291 ofs += 4;
1293 if (TARGET_LONG_BITS == 64) {
1294 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1295 ofs += 4;
1298 tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
1299 ofs += 4;
1301 tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
1302 } else {
1303 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1304 /* The second argument is already loaded with addrlo. */
1305 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
1306 l->mem_index);
1307 tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
1308 (uintptr_t)l->raddr);
1311 tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]);
1313 data_reg = l->datalo_reg;
1314 switch (opc & MO_SSIZE) {
1315 case MO_SB:
1316 tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
1317 break;
1318 case MO_SW:
1319 tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
1320 break;
1321 #if TCG_TARGET_REG_BITS == 64
1322 case MO_SL:
1323 tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
1324 break;
1325 #endif
1326 case MO_UB:
1327 case MO_UW:
1328 /* Note that the helpers have zero-extended to tcg_target_long. */
1329 case MO_UL:
1330 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1331 break;
1332 case MO_Q:
1333 if (TCG_TARGET_REG_BITS == 64) {
1334 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
1335 } else if (data_reg == TCG_REG_EDX) {
1336 /* xchg %edx, %eax */
1337 tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
1338 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
1339 } else {
1340 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1341 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
1343 break;
1344 default:
1345 tcg_abort();
1348 /* Jump to the code corresponding to next IR of qemu_st */
1349 tcg_out_jmp(s, l->raddr);
1353 * Generate code for the slow path for a store at the end of block
1355 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1357 TCGMemOp opc = l->opc;
1358 TCGMemOp s_bits = opc & MO_SIZE;
1359 tcg_insn_unit **label_ptr = &l->label_ptr[0];
1360 TCGReg retaddr;
1362 /* resolve label address */
1363 tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1364 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1365 tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1368 if (TCG_TARGET_REG_BITS == 32) {
1369 int ofs = 0;
1371 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1372 ofs += 4;
1374 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1375 ofs += 4;
1377 if (TARGET_LONG_BITS == 64) {
1378 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1379 ofs += 4;
1382 tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
1383 ofs += 4;
1385 if (s_bits == MO_64) {
1386 tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
1387 ofs += 4;
1390 tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
1391 ofs += 4;
1393 retaddr = TCG_REG_EAX;
1394 tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr);
1395 tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs);
1396 } else {
1397 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1398 /* The second argument is already loaded with addrlo. */
1399 tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1400 tcg_target_call_iarg_regs[2], l->datalo_reg);
1401 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
1402 l->mem_index);
1404 if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
1405 retaddr = tcg_target_call_iarg_regs[4];
1406 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1407 } else {
1408 retaddr = TCG_REG_RAX;
1409 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1410 tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0);
1414 /* "Tail call" to the helper, with the return address back inline. */
1415 tcg_out_push(s, retaddr);
1416 tcg_out_jmp(s, qemu_st_helpers[opc]);
1418 #elif defined(__x86_64__) && defined(__linux__)
1419 # include <asm/prctl.h>
1420 # include <sys/prctl.h>
1422 int arch_prctl(int code, unsigned long addr);
1424 static int guest_base_flags;
1425 static inline void setup_guest_base_seg(void)
1427 if (arch_prctl(ARCH_SET_GS, GUEST_BASE) == 0) {
1428 guest_base_flags = P_GS;
1431 #else
1432 # define guest_base_flags 0
1433 static inline void setup_guest_base_seg(void) { }
1434 #endif /* SOFTMMU */
1436 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1437 TCGReg base, intptr_t ofs, int seg,
1438 TCGMemOp memop)
1440 const TCGMemOp real_bswap = memop & MO_BSWAP;
1441 TCGMemOp bswap = real_bswap;
1442 int movop = OPC_MOVL_GvEv;
1444 if (have_movbe && real_bswap) {
1445 bswap = 0;
1446 movop = OPC_MOVBE_GyMy;
1449 switch (memop & MO_SSIZE) {
1450 case MO_UB:
1451 tcg_out_modrm_offset(s, OPC_MOVZBL + seg, datalo, base, ofs);
1452 break;
1453 case MO_SB:
1454 tcg_out_modrm_offset(s, OPC_MOVSBL + P_REXW + seg, datalo, base, ofs);
1455 break;
1456 case MO_UW:
1457 tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
1458 if (real_bswap) {
1459 tcg_out_rolw_8(s, datalo);
1461 break;
1462 case MO_SW:
1463 if (real_bswap) {
1464 if (have_movbe) {
1465 tcg_out_modrm_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
1466 datalo, base, ofs);
1467 } else {
1468 tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
1469 tcg_out_rolw_8(s, datalo);
1471 tcg_out_modrm(s, OPC_MOVSWL + P_REXW, datalo, datalo);
1472 } else {
1473 tcg_out_modrm_offset(s, OPC_MOVSWL + P_REXW + seg,
1474 datalo, base, ofs);
1476 break;
1477 case MO_UL:
1478 tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
1479 if (bswap) {
1480 tcg_out_bswap32(s, datalo);
1482 break;
1483 #if TCG_TARGET_REG_BITS == 64
1484 case MO_SL:
1485 if (real_bswap) {
1486 tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
1487 if (bswap) {
1488 tcg_out_bswap32(s, datalo);
1490 tcg_out_ext32s(s, datalo, datalo);
1491 } else {
1492 tcg_out_modrm_offset(s, OPC_MOVSLQ + seg, datalo, base, ofs);
1494 break;
1495 #endif
1496 case MO_Q:
1497 if (TCG_TARGET_REG_BITS == 64) {
1498 tcg_out_modrm_offset(s, movop + P_REXW + seg, datalo, base, ofs);
1499 if (bswap) {
1500 tcg_out_bswap64(s, datalo);
1502 } else {
1503 if (real_bswap) {
1504 int t = datalo;
1505 datalo = datahi;
1506 datahi = t;
1508 if (base != datalo) {
1509 tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
1510 tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs + 4);
1511 } else {
1512 tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs + 4);
1513 tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
1515 if (bswap) {
1516 tcg_out_bswap32(s, datalo);
1517 tcg_out_bswap32(s, datahi);
1520 break;
1521 default:
1522 tcg_abort();
1526 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
1527 EAX. It will be useful once fixed registers globals are less
1528 common. */
1529 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1531 TCGReg datalo, datahi, addrlo;
1532 TCGReg addrhi __attribute__((unused));
1533 TCGMemOp opc;
1534 #if defined(CONFIG_SOFTMMU)
1535 int mem_index;
1536 TCGMemOp s_bits;
1537 tcg_insn_unit *label_ptr[2];
1538 #endif
1540 datalo = *args++;
1541 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
1542 addrlo = *args++;
1543 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
1544 opc = *args++;
1546 #if defined(CONFIG_SOFTMMU)
1547 mem_index = *args++;
1548 s_bits = opc & MO_SIZE;
1550 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
1551 label_ptr, offsetof(CPUTLBEntry, addr_read));
1553 /* TLB Hit. */
1554 tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
1556 /* Record the current context of a load into ldst label */
1557 add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi,
1558 mem_index, s->code_ptr, label_ptr);
1559 #else
1561 int32_t offset = GUEST_BASE;
1562 TCGReg base = addrlo;
1563 int seg = 0;
1565 /* ??? We assume all operations have left us with register contents
1566 that are zero extended. So far this appears to be true. If we
1567 want to enforce this, we can either do an explicit zero-extension
1568 here, or (if GUEST_BASE == 0, or a segment register is in use)
1569 use the ADDR32 prefix. For now, do nothing. */
1570 if (GUEST_BASE && guest_base_flags) {
1571 seg = guest_base_flags;
1572 offset = 0;
1573 } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) {
1574 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
1575 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
1576 base = TCG_REG_L1;
1577 offset = 0;
1580 tcg_out_qemu_ld_direct(s, datalo, datahi, base, offset, seg, opc);
1582 #endif
1585 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1586 TCGReg base, intptr_t ofs, int seg,
1587 TCGMemOp memop)
1589 /* ??? Ideally we wouldn't need a scratch register. For user-only,
1590 we could perform the bswap twice to restore the original value
1591 instead of moving to the scratch. But as it is, the L constraint
1592 means that TCG_REG_L0 is definitely free here. */
1593 const TCGReg scratch = TCG_REG_L0;
1594 const TCGMemOp real_bswap = memop & MO_BSWAP;
1595 TCGMemOp bswap = real_bswap;
1596 int movop = OPC_MOVL_EvGv;
1598 if (have_movbe && real_bswap) {
1599 bswap = 0;
1600 movop = OPC_MOVBE_MyGy;
1603 switch (memop & MO_SIZE) {
1604 case MO_8:
1605 /* In 32-bit mode, 8-bit stores can only happen from [abcd]x.
1606 Use the scratch register if necessary. */
1607 if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) {
1608 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1609 datalo = scratch;
1611 tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
1612 datalo, base, ofs);
1613 break;
1614 case MO_16:
1615 if (bswap) {
1616 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1617 tcg_out_rolw_8(s, scratch);
1618 datalo = scratch;
1620 tcg_out_modrm_offset(s, movop + P_DATA16 + seg, datalo, base, ofs);
1621 break;
1622 case MO_32:
1623 if (bswap) {
1624 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1625 tcg_out_bswap32(s, scratch);
1626 datalo = scratch;
1628 tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
1629 break;
1630 case MO_64:
1631 if (TCG_TARGET_REG_BITS == 64) {
1632 if (bswap) {
1633 tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo);
1634 tcg_out_bswap64(s, scratch);
1635 datalo = scratch;
1637 tcg_out_modrm_offset(s, movop + P_REXW + seg, datalo, base, ofs);
1638 } else if (bswap) {
1639 tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi);
1640 tcg_out_bswap32(s, scratch);
1641 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs);
1642 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1643 tcg_out_bswap32(s, scratch);
1644 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs+4);
1645 } else {
1646 if (real_bswap) {
1647 int t = datalo;
1648 datalo = datahi;
1649 datahi = t;
1651 tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
1652 tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs+4);
1654 break;
1655 default:
1656 tcg_abort();
1660 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1662 TCGReg datalo, datahi, addrlo;
1663 TCGReg addrhi __attribute__((unused));
1664 TCGMemOp opc;
1665 #if defined(CONFIG_SOFTMMU)
1666 int mem_index;
1667 TCGMemOp s_bits;
1668 tcg_insn_unit *label_ptr[2];
1669 #endif
1671 datalo = *args++;
1672 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
1673 addrlo = *args++;
1674 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
1675 opc = *args++;
1677 #if defined(CONFIG_SOFTMMU)
1678 mem_index = *args++;
1679 s_bits = opc & MO_SIZE;
1681 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
1682 label_ptr, offsetof(CPUTLBEntry, addr_write));
1684 /* TLB Hit. */
1685 tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
1687 /* Record the current context of a store into ldst label */
1688 add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi,
1689 mem_index, s->code_ptr, label_ptr);
1690 #else
1692 int32_t offset = GUEST_BASE;
1693 TCGReg base = addrlo;
1694 int seg = 0;
1696 /* ??? We assume all operations have left us with register contents
1697 that are zero extended. So far this appears to be true. If we
1698 want to enforce this, we can either do an explicit zero-extension
1699 here, or (if GUEST_BASE == 0, or a segment register is in use)
1700 use the ADDR32 prefix. For now, do nothing. */
1701 if (GUEST_BASE && guest_base_flags) {
1702 seg = guest_base_flags;
1703 offset = 0;
1704 } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) {
1705 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
1706 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
1707 base = TCG_REG_L1;
1708 offset = 0;
1711 tcg_out_qemu_st_direct(s, datalo, datahi, base, offset, seg, opc);
1713 #endif
1716 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1717 const TCGArg *args, const int *const_args)
1719 int c, vexop, rexw = 0;
1721 #if TCG_TARGET_REG_BITS == 64
1722 # define OP_32_64(x) \
1723 case glue(glue(INDEX_op_, x), _i64): \
1724 rexw = P_REXW; /* FALLTHRU */ \
1725 case glue(glue(INDEX_op_, x), _i32)
1726 #else
1727 # define OP_32_64(x) \
1728 case glue(glue(INDEX_op_, x), _i32)
1729 #endif
1731 switch(opc) {
1732 case INDEX_op_exit_tb:
1733 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]);
1734 tcg_out_jmp(s, tb_ret_addr);
1735 break;
1736 case INDEX_op_goto_tb:
1737 if (s->tb_jmp_offset) {
1738 /* direct jump method */
1739 tcg_out8(s, OPC_JMP_long); /* jmp im */
1740 s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
1741 tcg_out32(s, 0);
1742 } else {
1743 /* indirect jump method */
1744 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
1745 (intptr_t)(s->tb_next + args[0]));
1747 s->tb_next_offset[args[0]] = tcg_current_code_size(s);
1748 break;
1749 case INDEX_op_call:
1750 if (const_args[0]) {
1751 tcg_out_call(s, (tcg_insn_unit *)(uintptr_t)args[0]);
1752 } else {
1753 /* call *reg */
1754 tcg_out_modrm(s, OPC_GRP5, EXT5_CALLN_Ev, args[0]);
1756 break;
1757 case INDEX_op_br:
1758 tcg_out_jxx(s, JCC_JMP, args[0], 0);
1759 break;
1760 case INDEX_op_movi_i32:
1761 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1762 break;
1763 OP_32_64(ld8u):
1764 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1765 tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]);
1766 break;
1767 OP_32_64(ld8s):
1768 tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, args[0], args[1], args[2]);
1769 break;
1770 OP_32_64(ld16u):
1771 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1772 tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]);
1773 break;
1774 OP_32_64(ld16s):
1775 tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, args[0], args[1], args[2]);
1776 break;
1777 #if TCG_TARGET_REG_BITS == 64
1778 case INDEX_op_ld32u_i64:
1779 #endif
1780 case INDEX_op_ld_i32:
1781 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1782 break;
1784 OP_32_64(st8):
1785 if (const_args[0]) {
1786 tcg_out_modrm_offset(s, OPC_MOVB_EvIz,
1787 0, args[1], args[2]);
1788 tcg_out8(s, args[0]);
1789 } else {
1790 tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R,
1791 args[0], args[1], args[2]);
1793 break;
1794 OP_32_64(st16):
1795 if (const_args[0]) {
1796 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16,
1797 0, args[1], args[2]);
1798 tcg_out16(s, args[0]);
1799 } else {
1800 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16,
1801 args[0], args[1], args[2]);
1803 break;
1804 #if TCG_TARGET_REG_BITS == 64
1805 case INDEX_op_st32_i64:
1806 #endif
1807 case INDEX_op_st_i32:
1808 if (const_args[0]) {
1809 tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, args[1], args[2]);
1810 tcg_out32(s, args[0]);
1811 } else {
1812 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1814 break;
1816 OP_32_64(add):
1817 /* For 3-operand addition, use LEA. */
1818 if (args[0] != args[1]) {
1819 TCGArg a0 = args[0], a1 = args[1], a2 = args[2], c3 = 0;
1821 if (const_args[2]) {
1822 c3 = a2, a2 = -1;
1823 } else if (a0 == a2) {
1824 /* Watch out for dest = src + dest, since we've removed
1825 the matching constraint on the add. */
1826 tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
1827 break;
1830 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
1831 break;
1833 c = ARITH_ADD;
1834 goto gen_arith;
1835 OP_32_64(sub):
1836 c = ARITH_SUB;
1837 goto gen_arith;
1838 OP_32_64(and):
1839 c = ARITH_AND;
1840 goto gen_arith;
1841 OP_32_64(or):
1842 c = ARITH_OR;
1843 goto gen_arith;
1844 OP_32_64(xor):
1845 c = ARITH_XOR;
1846 goto gen_arith;
1847 gen_arith:
1848 if (const_args[2]) {
1849 tgen_arithi(s, c + rexw, args[0], args[2], 0);
1850 } else {
1851 tgen_arithr(s, c + rexw, args[0], args[2]);
1853 break;
1855 OP_32_64(andc):
1856 if (const_args[2]) {
1857 tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32,
1858 args[0], args[1]);
1859 tgen_arithi(s, ARITH_AND + rexw, args[0], ~args[2], 0);
1860 } else {
1861 tcg_out_vex_modrm(s, OPC_ANDN + rexw, args[0], args[2], args[1]);
1863 break;
1865 OP_32_64(mul):
1866 if (const_args[2]) {
1867 int32_t val;
1868 val = args[2];
1869 if (val == (int8_t)val) {
1870 tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, args[0], args[0]);
1871 tcg_out8(s, val);
1872 } else {
1873 tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, args[0], args[0]);
1874 tcg_out32(s, val);
1876 } else {
1877 tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, args[0], args[2]);
1879 break;
1881 OP_32_64(div2):
1882 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
1883 break;
1884 OP_32_64(divu2):
1885 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
1886 break;
1888 OP_32_64(shl):
1889 c = SHIFT_SHL;
1890 vexop = OPC_SHLX;
1891 goto gen_shift_maybe_vex;
1892 OP_32_64(shr):
1893 c = SHIFT_SHR;
1894 vexop = OPC_SHRX;
1895 goto gen_shift_maybe_vex;
1896 OP_32_64(sar):
1897 c = SHIFT_SAR;
1898 vexop = OPC_SARX;
1899 goto gen_shift_maybe_vex;
1900 OP_32_64(rotl):
1901 c = SHIFT_ROL;
1902 goto gen_shift;
1903 OP_32_64(rotr):
1904 c = SHIFT_ROR;
1905 goto gen_shift;
1906 gen_shift_maybe_vex:
1907 if (have_bmi2 && !const_args[2]) {
1908 tcg_out_vex_modrm(s, vexop + rexw, args[0], args[2], args[1]);
1909 break;
1911 /* FALLTHRU */
1912 gen_shift:
1913 if (const_args[2]) {
1914 tcg_out_shifti(s, c + rexw, args[0], args[2]);
1915 } else {
1916 tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, args[0]);
1918 break;
1920 case INDEX_op_brcond_i32:
1921 tcg_out_brcond32(s, args[2], args[0], args[1], const_args[1],
1922 args[3], 0);
1923 break;
1924 case INDEX_op_setcond_i32:
1925 tcg_out_setcond32(s, args[3], args[0], args[1],
1926 args[2], const_args[2]);
1927 break;
1928 case INDEX_op_movcond_i32:
1929 tcg_out_movcond32(s, args[5], args[0], args[1],
1930 args[2], const_args[2], args[3]);
1931 break;
1933 OP_32_64(bswap16):
1934 tcg_out_rolw_8(s, args[0]);
1935 break;
1936 OP_32_64(bswap32):
1937 tcg_out_bswap32(s, args[0]);
1938 break;
1940 OP_32_64(neg):
1941 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, args[0]);
1942 break;
1943 OP_32_64(not):
1944 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, args[0]);
1945 break;
1947 OP_32_64(ext8s):
1948 tcg_out_ext8s(s, args[0], args[1], rexw);
1949 break;
1950 OP_32_64(ext16s):
1951 tcg_out_ext16s(s, args[0], args[1], rexw);
1952 break;
1953 OP_32_64(ext8u):
1954 tcg_out_ext8u(s, args[0], args[1]);
1955 break;
1956 OP_32_64(ext16u):
1957 tcg_out_ext16u(s, args[0], args[1]);
1958 break;
1960 case INDEX_op_qemu_ld_i32:
1961 tcg_out_qemu_ld(s, args, 0);
1962 break;
1963 case INDEX_op_qemu_ld_i64:
1964 tcg_out_qemu_ld(s, args, 1);
1965 break;
1966 case INDEX_op_qemu_st_i32:
1967 tcg_out_qemu_st(s, args, 0);
1968 break;
1969 case INDEX_op_qemu_st_i64:
1970 tcg_out_qemu_st(s, args, 1);
1971 break;
1973 OP_32_64(mulu2):
1974 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
1975 break;
1976 OP_32_64(muls2):
1977 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]);
1978 break;
1979 OP_32_64(add2):
1980 if (const_args[4]) {
1981 tgen_arithi(s, ARITH_ADD + rexw, args[0], args[4], 1);
1982 } else {
1983 tgen_arithr(s, ARITH_ADD + rexw, args[0], args[4]);
1985 if (const_args[5]) {
1986 tgen_arithi(s, ARITH_ADC + rexw, args[1], args[5], 1);
1987 } else {
1988 tgen_arithr(s, ARITH_ADC + rexw, args[1], args[5]);
1990 break;
1991 OP_32_64(sub2):
1992 if (const_args[4]) {
1993 tgen_arithi(s, ARITH_SUB + rexw, args[0], args[4], 1);
1994 } else {
1995 tgen_arithr(s, ARITH_SUB + rexw, args[0], args[4]);
1997 if (const_args[5]) {
1998 tgen_arithi(s, ARITH_SBB + rexw, args[1], args[5], 1);
1999 } else {
2000 tgen_arithr(s, ARITH_SBB + rexw, args[1], args[5]);
2002 break;
2004 #if TCG_TARGET_REG_BITS == 32
2005 case INDEX_op_brcond2_i32:
2006 tcg_out_brcond2(s, args, const_args, 0);
2007 break;
2008 case INDEX_op_setcond2_i32:
2009 tcg_out_setcond2(s, args, const_args);
2010 break;
2011 #else /* TCG_TARGET_REG_BITS == 64 */
2012 case INDEX_op_movi_i64:
2013 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
2014 break;
2015 case INDEX_op_ld32s_i64:
2016 tcg_out_modrm_offset(s, OPC_MOVSLQ, args[0], args[1], args[2]);
2017 break;
2018 case INDEX_op_ld_i64:
2019 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2020 break;
2021 case INDEX_op_st_i64:
2022 if (const_args[0]) {
2023 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW,
2024 0, args[1], args[2]);
2025 tcg_out32(s, args[0]);
2026 } else {
2027 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2029 break;
2031 case INDEX_op_brcond_i64:
2032 tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1],
2033 args[3], 0);
2034 break;
2035 case INDEX_op_setcond_i64:
2036 tcg_out_setcond64(s, args[3], args[0], args[1],
2037 args[2], const_args[2]);
2038 break;
2039 case INDEX_op_movcond_i64:
2040 tcg_out_movcond64(s, args[5], args[0], args[1],
2041 args[2], const_args[2], args[3]);
2042 break;
2044 case INDEX_op_bswap64_i64:
2045 tcg_out_bswap64(s, args[0]);
2046 break;
2047 case INDEX_op_ext32u_i64:
2048 tcg_out_ext32u(s, args[0], args[1]);
2049 break;
2050 case INDEX_op_ext32s_i64:
2051 tcg_out_ext32s(s, args[0], args[1]);
2052 break;
2053 #endif
2055 OP_32_64(deposit):
2056 if (args[3] == 0 && args[4] == 8) {
2057 /* load bits 0..7 */
2058 tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM,
2059 args[2], args[0]);
2060 } else if (args[3] == 8 && args[4] == 8) {
2061 /* load bits 8..15 */
2062 tcg_out_modrm(s, OPC_MOVB_EvGv, args[2], args[0] + 4);
2063 } else if (args[3] == 0 && args[4] == 16) {
2064 /* load bits 0..15 */
2065 tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, args[2], args[0]);
2066 } else {
2067 tcg_abort();
2069 break;
2071 default:
2072 tcg_abort();
2075 #undef OP_32_64
2078 static const TCGTargetOpDef x86_op_defs[] = {
2079 { INDEX_op_exit_tb, { } },
2080 { INDEX_op_goto_tb, { } },
2081 { INDEX_op_call, { "ri" } },
2082 { INDEX_op_br, { } },
2083 { INDEX_op_mov_i32, { "r", "r" } },
2084 { INDEX_op_movi_i32, { "r" } },
2085 { INDEX_op_ld8u_i32, { "r", "r" } },
2086 { INDEX_op_ld8s_i32, { "r", "r" } },
2087 { INDEX_op_ld16u_i32, { "r", "r" } },
2088 { INDEX_op_ld16s_i32, { "r", "r" } },
2089 { INDEX_op_ld_i32, { "r", "r" } },
2090 { INDEX_op_st8_i32, { "qi", "r" } },
2091 { INDEX_op_st16_i32, { "ri", "r" } },
2092 { INDEX_op_st_i32, { "ri", "r" } },
2094 { INDEX_op_add_i32, { "r", "r", "ri" } },
2095 { INDEX_op_sub_i32, { "r", "0", "ri" } },
2096 { INDEX_op_mul_i32, { "r", "0", "ri" } },
2097 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
2098 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
2099 { INDEX_op_and_i32, { "r", "0", "ri" } },
2100 { INDEX_op_or_i32, { "r", "0", "ri" } },
2101 { INDEX_op_xor_i32, { "r", "0", "ri" } },
2102 { INDEX_op_andc_i32, { "r", "r", "ri" } },
2104 { INDEX_op_shl_i32, { "r", "0", "Ci" } },
2105 { INDEX_op_shr_i32, { "r", "0", "Ci" } },
2106 { INDEX_op_sar_i32, { "r", "0", "Ci" } },
2107 { INDEX_op_rotl_i32, { "r", "0", "ci" } },
2108 { INDEX_op_rotr_i32, { "r", "0", "ci" } },
2110 { INDEX_op_brcond_i32, { "r", "ri" } },
2112 { INDEX_op_bswap16_i32, { "r", "0" } },
2113 { INDEX_op_bswap32_i32, { "r", "0" } },
2115 { INDEX_op_neg_i32, { "r", "0" } },
2117 { INDEX_op_not_i32, { "r", "0" } },
2119 { INDEX_op_ext8s_i32, { "r", "q" } },
2120 { INDEX_op_ext16s_i32, { "r", "r" } },
2121 { INDEX_op_ext8u_i32, { "r", "q" } },
2122 { INDEX_op_ext16u_i32, { "r", "r" } },
2124 { INDEX_op_setcond_i32, { "q", "r", "ri" } },
2126 { INDEX_op_deposit_i32, { "Q", "0", "Q" } },
2127 { INDEX_op_movcond_i32, { "r", "r", "ri", "r", "0" } },
2129 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
2130 { INDEX_op_muls2_i32, { "a", "d", "a", "r" } },
2131 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
2132 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
2134 #if TCG_TARGET_REG_BITS == 32
2135 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
2136 { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
2137 #else
2138 { INDEX_op_mov_i64, { "r", "r" } },
2139 { INDEX_op_movi_i64, { "r" } },
2140 { INDEX_op_ld8u_i64, { "r", "r" } },
2141 { INDEX_op_ld8s_i64, { "r", "r" } },
2142 { INDEX_op_ld16u_i64, { "r", "r" } },
2143 { INDEX_op_ld16s_i64, { "r", "r" } },
2144 { INDEX_op_ld32u_i64, { "r", "r" } },
2145 { INDEX_op_ld32s_i64, { "r", "r" } },
2146 { INDEX_op_ld_i64, { "r", "r" } },
2147 { INDEX_op_st8_i64, { "ri", "r" } },
2148 { INDEX_op_st16_i64, { "ri", "r" } },
2149 { INDEX_op_st32_i64, { "ri", "r" } },
2150 { INDEX_op_st_i64, { "re", "r" } },
2152 { INDEX_op_add_i64, { "r", "r", "re" } },
2153 { INDEX_op_mul_i64, { "r", "0", "re" } },
2154 { INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } },
2155 { INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } },
2156 { INDEX_op_sub_i64, { "r", "0", "re" } },
2157 { INDEX_op_and_i64, { "r", "0", "reZ" } },
2158 { INDEX_op_or_i64, { "r", "0", "re" } },
2159 { INDEX_op_xor_i64, { "r", "0", "re" } },
2160 { INDEX_op_andc_i64, { "r", "r", "rI" } },
2162 { INDEX_op_shl_i64, { "r", "0", "Ci" } },
2163 { INDEX_op_shr_i64, { "r", "0", "Ci" } },
2164 { INDEX_op_sar_i64, { "r", "0", "Ci" } },
2165 { INDEX_op_rotl_i64, { "r", "0", "ci" } },
2166 { INDEX_op_rotr_i64, { "r", "0", "ci" } },
2168 { INDEX_op_brcond_i64, { "r", "re" } },
2169 { INDEX_op_setcond_i64, { "r", "r", "re" } },
2171 { INDEX_op_bswap16_i64, { "r", "0" } },
2172 { INDEX_op_bswap32_i64, { "r", "0" } },
2173 { INDEX_op_bswap64_i64, { "r", "0" } },
2174 { INDEX_op_neg_i64, { "r", "0" } },
2175 { INDEX_op_not_i64, { "r", "0" } },
2177 { INDEX_op_ext8s_i64, { "r", "r" } },
2178 { INDEX_op_ext16s_i64, { "r", "r" } },
2179 { INDEX_op_ext32s_i64, { "r", "r" } },
2180 { INDEX_op_ext8u_i64, { "r", "r" } },
2181 { INDEX_op_ext16u_i64, { "r", "r" } },
2182 { INDEX_op_ext32u_i64, { "r", "r" } },
2184 { INDEX_op_deposit_i64, { "Q", "0", "Q" } },
2185 { INDEX_op_movcond_i64, { "r", "r", "re", "r", "0" } },
2187 { INDEX_op_mulu2_i64, { "a", "d", "a", "r" } },
2188 { INDEX_op_muls2_i64, { "a", "d", "a", "r" } },
2189 { INDEX_op_add2_i64, { "r", "r", "0", "1", "re", "re" } },
2190 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "re", "re" } },
2191 #endif
2193 #if TCG_TARGET_REG_BITS == 64
2194 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2195 { INDEX_op_qemu_st_i32, { "L", "L" } },
2196 { INDEX_op_qemu_ld_i64, { "r", "L" } },
2197 { INDEX_op_qemu_st_i64, { "L", "L" } },
2198 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
2199 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2200 { INDEX_op_qemu_st_i32, { "L", "L" } },
2201 { INDEX_op_qemu_ld_i64, { "r", "r", "L" } },
2202 { INDEX_op_qemu_st_i64, { "L", "L", "L" } },
2203 #else
2204 { INDEX_op_qemu_ld_i32, { "r", "L", "L" } },
2205 { INDEX_op_qemu_st_i32, { "L", "L", "L" } },
2206 { INDEX_op_qemu_ld_i64, { "r", "r", "L", "L" } },
2207 { INDEX_op_qemu_st_i64, { "L", "L", "L", "L" } },
2208 #endif
2209 { -1 },
2212 static int tcg_target_callee_save_regs[] = {
2213 #if TCG_TARGET_REG_BITS == 64
2214 TCG_REG_RBP,
2215 TCG_REG_RBX,
2216 #if defined(_WIN64)
2217 TCG_REG_RDI,
2218 TCG_REG_RSI,
2219 #endif
2220 TCG_REG_R12,
2221 TCG_REG_R13,
2222 TCG_REG_R14, /* Currently used for the global env. */
2223 TCG_REG_R15,
2224 #else
2225 TCG_REG_EBP, /* Currently used for the global env. */
2226 TCG_REG_EBX,
2227 TCG_REG_ESI,
2228 TCG_REG_EDI,
2229 #endif
2232 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2233 and tcg_register_jit. */
2235 #define PUSH_SIZE \
2236 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
2237 * (TCG_TARGET_REG_BITS / 8))
2239 #define FRAME_SIZE \
2240 ((PUSH_SIZE \
2241 + TCG_STATIC_CALL_ARGS_SIZE \
2242 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2243 + TCG_TARGET_STACK_ALIGN - 1) \
2244 & ~(TCG_TARGET_STACK_ALIGN - 1))
2246 /* Generate global QEMU prologue and epilogue code */
2247 static void tcg_target_qemu_prologue(TCGContext *s)
2249 int i, stack_addend;
2251 /* TB prologue */
2253 /* Reserve some stack space, also for TCG temps. */
2254 stack_addend = FRAME_SIZE - PUSH_SIZE;
2255 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2256 CPU_TEMP_BUF_NLONGS * sizeof(long));
2258 /* Save all callee saved registers. */
2259 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2260 tcg_out_push(s, tcg_target_callee_save_regs[i]);
2263 #if TCG_TARGET_REG_BITS == 32
2264 tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
2265 (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
2266 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
2267 /* jmp *tb. */
2268 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
2269 (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
2270 + stack_addend);
2271 #else
2272 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2273 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
2274 /* jmp *tb. */
2275 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
2276 #endif
2278 /* TB epilogue */
2279 tb_ret_addr = s->code_ptr;
2281 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend);
2283 for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
2284 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
2286 tcg_out_opc(s, OPC_RET, 0, 0, 0);
2288 #if !defined(CONFIG_SOFTMMU)
2289 /* Try to set up a segment register to point to GUEST_BASE. */
2290 if (GUEST_BASE) {
2291 setup_guest_base_seg();
2293 #endif
2296 static void tcg_target_init(TCGContext *s)
2298 #ifdef CONFIG_CPUID_H
2299 unsigned a, b, c, d;
2300 int max = __get_cpuid_max(0, 0);
2302 if (max >= 1) {
2303 __cpuid(1, a, b, c, d);
2304 #ifndef have_cmov
2305 /* For 32-bit, 99% certainty that we're running on hardware that
2306 supports cmov, but we still need to check. In case cmov is not
2307 available, we'll use a small forward branch. */
2308 have_cmov = (d & bit_CMOV) != 0;
2309 #endif
2310 #ifndef have_movbe
2311 /* MOVBE is only available on Intel Atom and Haswell CPUs, so we
2312 need to probe for it. */
2313 have_movbe = (c & bit_MOVBE) != 0;
2314 #endif
2317 if (max >= 7) {
2318 /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
2319 __cpuid_count(7, 0, a, b, c, d);
2320 #ifdef bit_BMI
2321 have_bmi1 = (b & bit_BMI) != 0;
2322 #endif
2323 #ifndef have_bmi2
2324 have_bmi2 = (b & bit_BMI2) != 0;
2325 #endif
2327 #endif
2329 if (TCG_TARGET_REG_BITS == 64) {
2330 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2331 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2332 } else {
2333 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
2336 tcg_regset_clear(tcg_target_call_clobber_regs);
2337 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
2338 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
2339 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
2340 if (TCG_TARGET_REG_BITS == 64) {
2341 #if !defined(_WIN64)
2342 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI);
2343 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI);
2344 #endif
2345 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
2346 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
2347 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
2348 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
2351 tcg_regset_clear(s->reserved_regs);
2352 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2354 tcg_add_target_add_op_defs(x86_op_defs);
2357 typedef struct {
2358 DebugFrameCIE cie;
2359 DebugFrameFDEHeader fde;
2360 uint8_t fde_def_cfa[4];
2361 uint8_t fde_reg_ofs[14];
2362 } DebugFrame;
2364 /* We're expecting a 2 byte uleb128 encoded value. */
2365 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2367 #if !defined(__ELF__)
2368 /* Host machine without ELF. */
2369 #elif TCG_TARGET_REG_BITS == 64
2370 #define ELF_HOST_MACHINE EM_X86_64
2371 static DebugFrame debug_frame = {
2372 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2373 .cie.id = -1,
2374 .cie.version = 1,
2375 .cie.code_align = 1,
2376 .cie.data_align = 0x78, /* sleb128 -8 */
2377 .cie.return_column = 16,
2379 /* Total FDE size does not include the "len" member. */
2380 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
2382 .fde_def_cfa = {
2383 12, 7, /* DW_CFA_def_cfa %rsp, ... */
2384 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2385 (FRAME_SIZE >> 7)
2387 .fde_reg_ofs = {
2388 0x90, 1, /* DW_CFA_offset, %rip, -8 */
2389 /* The following ordering must match tcg_target_callee_save_regs. */
2390 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
2391 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
2392 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
2393 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
2394 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
2395 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
2398 #else
2399 #define ELF_HOST_MACHINE EM_386
2400 static DebugFrame debug_frame = {
2401 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2402 .cie.id = -1,
2403 .cie.version = 1,
2404 .cie.code_align = 1,
2405 .cie.data_align = 0x7c, /* sleb128 -4 */
2406 .cie.return_column = 8,
2408 /* Total FDE size does not include the "len" member. */
2409 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
2411 .fde_def_cfa = {
2412 12, 4, /* DW_CFA_def_cfa %esp, ... */
2413 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2414 (FRAME_SIZE >> 7)
2416 .fde_reg_ofs = {
2417 0x88, 1, /* DW_CFA_offset, %eip, -4 */
2418 /* The following ordering must match tcg_target_callee_save_regs. */
2419 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
2420 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
2421 0x86, 4, /* DW_CFA_offset, %esi, -16 */
2422 0x87, 5, /* DW_CFA_offset, %edi, -20 */
2425 #endif
2427 #if defined(ELF_HOST_MACHINE)
2428 void tcg_register_jit(void *buf, size_t buf_size)
2430 debug_frame.fde.func_start = (uintptr_t)buf;
2431 debug_frame.fde.func_len = buf_size;
2433 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2435 #endif