block: fix return code for partial write for Linux AIO
[qemu/ar7.git] / tcg / i386 / tcg-target.inc.c
blob317484cb5db3ada461e8fece9ead7b89bfc21ba2
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "tcg-be-ldst.h"
27 #ifdef CONFIG_DEBUG_TCG
28 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 #if TCG_TARGET_REG_BITS == 64
30 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
31 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
32 #else
33 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
34 #endif
36 #endif
38 static const int tcg_target_reg_alloc_order[] = {
39 #if TCG_TARGET_REG_BITS == 64
40 TCG_REG_RBP,
41 TCG_REG_RBX,
42 TCG_REG_R12,
43 TCG_REG_R13,
44 TCG_REG_R14,
45 TCG_REG_R15,
46 TCG_REG_R10,
47 TCG_REG_R11,
48 TCG_REG_R9,
49 TCG_REG_R8,
50 TCG_REG_RCX,
51 TCG_REG_RDX,
52 TCG_REG_RSI,
53 TCG_REG_RDI,
54 TCG_REG_RAX,
55 #else
56 TCG_REG_EBX,
57 TCG_REG_ESI,
58 TCG_REG_EDI,
59 TCG_REG_EBP,
60 TCG_REG_ECX,
61 TCG_REG_EDX,
62 TCG_REG_EAX,
63 #endif
66 static const int tcg_target_call_iarg_regs[] = {
67 #if TCG_TARGET_REG_BITS == 64
68 #if defined(_WIN64)
69 TCG_REG_RCX,
70 TCG_REG_RDX,
71 #else
72 TCG_REG_RDI,
73 TCG_REG_RSI,
74 TCG_REG_RDX,
75 TCG_REG_RCX,
76 #endif
77 TCG_REG_R8,
78 TCG_REG_R9,
79 #else
80 /* 32 bit mode uses stack based calling convention (GCC default). */
81 #endif
84 static const int tcg_target_call_oarg_regs[] = {
85 TCG_REG_EAX,
86 #if TCG_TARGET_REG_BITS == 32
87 TCG_REG_EDX
88 #endif
91 /* Constants we accept. */
92 #define TCG_CT_CONST_S32 0x100
93 #define TCG_CT_CONST_U32 0x200
94 #define TCG_CT_CONST_I32 0x400
96 /* Registers used with L constraint, which are the first argument
97 registers on x86_64, and two random call clobbered registers on
98 i386. */
99 #if TCG_TARGET_REG_BITS == 64
100 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
101 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
102 #else
103 # define TCG_REG_L0 TCG_REG_EAX
104 # define TCG_REG_L1 TCG_REG_EDX
105 #endif
107 /* The host compiler should supply <cpuid.h> to enable runtime features
108 detection, as we're not going to go so far as our own inline assembly.
109 If not available, default values will be assumed. */
110 #if defined(CONFIG_CPUID_H)
111 #include <cpuid.h>
112 #endif
114 /* For 32-bit, we are going to attempt to determine at runtime whether cmov
115 is available. */
116 #if TCG_TARGET_REG_BITS == 64
117 # define have_cmov 1
118 #elif defined(CONFIG_CPUID_H) && defined(bit_CMOV)
119 static bool have_cmov;
120 #else
121 # define have_cmov 0
122 #endif
124 /* If bit_MOVBE is defined in cpuid.h (added in GCC version 4.6), we are
125 going to attempt to determine at runtime whether movbe is available. */
126 #if defined(CONFIG_CPUID_H) && defined(bit_MOVBE)
127 static bool have_movbe;
128 #else
129 # define have_movbe 0
130 #endif
132 /* We need this symbol in tcg-target.h, and we can't properly conditionalize
133 it there. Therefore we always define the variable. */
134 bool have_bmi1;
136 #if defined(CONFIG_CPUID_H) && defined(bit_BMI2)
137 static bool have_bmi2;
138 #else
139 # define have_bmi2 0
140 #endif
142 static tcg_insn_unit *tb_ret_addr;
144 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
145 intptr_t value, intptr_t addend)
147 value += addend;
148 switch(type) {
149 case R_386_PC32:
150 value -= (uintptr_t)code_ptr;
151 if (value != (int32_t)value) {
152 tcg_abort();
154 tcg_patch32(code_ptr, value);
155 break;
156 case R_386_PC8:
157 value -= (uintptr_t)code_ptr;
158 if (value != (int8_t)value) {
159 tcg_abort();
161 tcg_patch8(code_ptr, value);
162 break;
163 default:
164 tcg_abort();
168 /* parse target specific constraints */
169 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
171 const char *ct_str;
173 ct_str = *pct_str;
174 switch(ct_str[0]) {
175 case 'a':
176 ct->ct |= TCG_CT_REG;
177 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
178 break;
179 case 'b':
180 ct->ct |= TCG_CT_REG;
181 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
182 break;
183 case 'c':
184 case_c:
185 ct->ct |= TCG_CT_REG;
186 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
187 break;
188 case 'd':
189 ct->ct |= TCG_CT_REG;
190 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
191 break;
192 case 'S':
193 ct->ct |= TCG_CT_REG;
194 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
195 break;
196 case 'D':
197 ct->ct |= TCG_CT_REG;
198 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
199 break;
200 case 'q':
201 ct->ct |= TCG_CT_REG;
202 if (TCG_TARGET_REG_BITS == 64) {
203 tcg_regset_set32(ct->u.regs, 0, 0xffff);
204 } else {
205 tcg_regset_set32(ct->u.regs, 0, 0xf);
207 break;
208 case 'Q':
209 ct->ct |= TCG_CT_REG;
210 tcg_regset_set32(ct->u.regs, 0, 0xf);
211 break;
212 case 'r':
213 case_r:
214 ct->ct |= TCG_CT_REG;
215 if (TCG_TARGET_REG_BITS == 64) {
216 tcg_regset_set32(ct->u.regs, 0, 0xffff);
217 } else {
218 tcg_regset_set32(ct->u.regs, 0, 0xff);
220 break;
221 case 'C':
222 /* With SHRX et al, we need not use ECX as shift count register. */
223 if (have_bmi2) {
224 goto case_r;
225 } else {
226 goto case_c;
229 /* qemu_ld/st address constraint */
230 case 'L':
231 ct->ct |= TCG_CT_REG;
232 if (TCG_TARGET_REG_BITS == 64) {
233 tcg_regset_set32(ct->u.regs, 0, 0xffff);
234 } else {
235 tcg_regset_set32(ct->u.regs, 0, 0xff);
237 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0);
238 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1);
239 break;
241 case 'e':
242 ct->ct |= TCG_CT_CONST_S32;
243 break;
244 case 'Z':
245 ct->ct |= TCG_CT_CONST_U32;
246 break;
247 case 'I':
248 ct->ct |= TCG_CT_CONST_I32;
249 break;
251 default:
252 return -1;
254 ct_str++;
255 *pct_str = ct_str;
256 return 0;
259 /* test if a constant matches the constraint */
260 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
261 const TCGArgConstraint *arg_ct)
263 int ct = arg_ct->ct;
264 if (ct & TCG_CT_CONST) {
265 return 1;
267 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
268 return 1;
270 if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
271 return 1;
273 if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
274 return 1;
276 return 0;
279 #if TCG_TARGET_REG_BITS == 64
280 # define LOWREGMASK(x) ((x) & 7)
281 #else
282 # define LOWREGMASK(x) (x)
283 #endif
285 #define P_EXT 0x100 /* 0x0f opcode prefix */
286 #define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */
287 #define P_DATA16 0x400 /* 0x66 opcode prefix */
288 #if TCG_TARGET_REG_BITS == 64
289 # define P_ADDR32 0x800 /* 0x67 opcode prefix */
290 # define P_REXW 0x1000 /* Set REX.W = 1 */
291 # define P_REXB_R 0x2000 /* REG field as byte register */
292 # define P_REXB_RM 0x4000 /* R/M field as byte register */
293 # define P_GS 0x8000 /* gs segment override */
294 #else
295 # define P_ADDR32 0
296 # define P_REXW 0
297 # define P_REXB_R 0
298 # define P_REXB_RM 0
299 # define P_GS 0
300 #endif
301 #define P_SIMDF3 0x10000 /* 0xf3 opcode prefix */
302 #define P_SIMDF2 0x20000 /* 0xf2 opcode prefix */
304 #define OPC_ARITH_EvIz (0x81)
305 #define OPC_ARITH_EvIb (0x83)
306 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
307 #define OPC_ANDN (0xf2 | P_EXT38)
308 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
309 #define OPC_BSWAP (0xc8 | P_EXT)
310 #define OPC_CALL_Jz (0xe8)
311 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
312 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
313 #define OPC_DEC_r32 (0x48)
314 #define OPC_IMUL_GvEv (0xaf | P_EXT)
315 #define OPC_IMUL_GvEvIb (0x6b)
316 #define OPC_IMUL_GvEvIz (0x69)
317 #define OPC_INC_r32 (0x40)
318 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
319 #define OPC_JCC_short (0x70) /* ... plus condition code */
320 #define OPC_JMP_long (0xe9)
321 #define OPC_JMP_short (0xeb)
322 #define OPC_LEA (0x8d)
323 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
324 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
325 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
326 #define OPC_MOVB_EvIz (0xc6)
327 #define OPC_MOVL_EvIz (0xc7)
328 #define OPC_MOVL_Iv (0xb8)
329 #define OPC_MOVBE_GyMy (0xf0 | P_EXT38)
330 #define OPC_MOVBE_MyGy (0xf1 | P_EXT38)
331 #define OPC_MOVSBL (0xbe | P_EXT)
332 #define OPC_MOVSWL (0xbf | P_EXT)
333 #define OPC_MOVSLQ (0x63 | P_REXW)
334 #define OPC_MOVZBL (0xb6 | P_EXT)
335 #define OPC_MOVZWL (0xb7 | P_EXT)
336 #define OPC_POP_r32 (0x58)
337 #define OPC_PUSH_r32 (0x50)
338 #define OPC_PUSH_Iv (0x68)
339 #define OPC_PUSH_Ib (0x6a)
340 #define OPC_RET (0xc3)
341 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
342 #define OPC_SHIFT_1 (0xd1)
343 #define OPC_SHIFT_Ib (0xc1)
344 #define OPC_SHIFT_cl (0xd3)
345 #define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3)
346 #define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
347 #define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
348 #define OPC_TESTL (0x85)
349 #define OPC_XCHG_ax_r32 (0x90)
351 #define OPC_GRP3_Ev (0xf7)
352 #define OPC_GRP5 (0xff)
354 /* Group 1 opcode extensions for 0x80-0x83.
355 These are also used as modifiers for OPC_ARITH. */
356 #define ARITH_ADD 0
357 #define ARITH_OR 1
358 #define ARITH_ADC 2
359 #define ARITH_SBB 3
360 #define ARITH_AND 4
361 #define ARITH_SUB 5
362 #define ARITH_XOR 6
363 #define ARITH_CMP 7
365 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
366 #define SHIFT_ROL 0
367 #define SHIFT_ROR 1
368 #define SHIFT_SHL 4
369 #define SHIFT_SHR 5
370 #define SHIFT_SAR 7
372 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
373 #define EXT3_NOT 2
374 #define EXT3_NEG 3
375 #define EXT3_MUL 4
376 #define EXT3_IMUL 5
377 #define EXT3_DIV 6
378 #define EXT3_IDIV 7
380 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
381 #define EXT5_INC_Ev 0
382 #define EXT5_DEC_Ev 1
383 #define EXT5_CALLN_Ev 2
384 #define EXT5_JMPN_Ev 4
386 /* Condition codes to be added to OPC_JCC_{long,short}. */
387 #define JCC_JMP (-1)
388 #define JCC_JO 0x0
389 #define JCC_JNO 0x1
390 #define JCC_JB 0x2
391 #define JCC_JAE 0x3
392 #define JCC_JE 0x4
393 #define JCC_JNE 0x5
394 #define JCC_JBE 0x6
395 #define JCC_JA 0x7
396 #define JCC_JS 0x8
397 #define JCC_JNS 0x9
398 #define JCC_JP 0xa
399 #define JCC_JNP 0xb
400 #define JCC_JL 0xc
401 #define JCC_JGE 0xd
402 #define JCC_JLE 0xe
403 #define JCC_JG 0xf
405 static const uint8_t tcg_cond_to_jcc[] = {
406 [TCG_COND_EQ] = JCC_JE,
407 [TCG_COND_NE] = JCC_JNE,
408 [TCG_COND_LT] = JCC_JL,
409 [TCG_COND_GE] = JCC_JGE,
410 [TCG_COND_LE] = JCC_JLE,
411 [TCG_COND_GT] = JCC_JG,
412 [TCG_COND_LTU] = JCC_JB,
413 [TCG_COND_GEU] = JCC_JAE,
414 [TCG_COND_LEU] = JCC_JBE,
415 [TCG_COND_GTU] = JCC_JA,
418 #if TCG_TARGET_REG_BITS == 64
419 static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
421 int rex;
423 if (opc & P_GS) {
424 tcg_out8(s, 0x65);
426 if (opc & P_DATA16) {
427 /* We should never be asking for both 16 and 64-bit operation. */
428 tcg_debug_assert((opc & P_REXW) == 0);
429 tcg_out8(s, 0x66);
431 if (opc & P_ADDR32) {
432 tcg_out8(s, 0x67);
435 rex = 0;
436 rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */
437 rex |= (r & 8) >> 1; /* REX.R */
438 rex |= (x & 8) >> 2; /* REX.X */
439 rex |= (rm & 8) >> 3; /* REX.B */
441 /* P_REXB_{R,RM} indicates that the given register is the low byte.
442 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
443 as otherwise the encoding indicates %[abcd]h. Note that the values
444 that are ORed in merely indicate that the REX byte must be present;
445 those bits get discarded in output. */
446 rex |= opc & (r >= 4 ? P_REXB_R : 0);
447 rex |= opc & (rm >= 4 ? P_REXB_RM : 0);
449 if (rex) {
450 tcg_out8(s, (uint8_t)(rex | 0x40));
453 if (opc & (P_EXT | P_EXT38)) {
454 tcg_out8(s, 0x0f);
455 if (opc & P_EXT38) {
456 tcg_out8(s, 0x38);
460 tcg_out8(s, opc);
462 #else
463 static void tcg_out_opc(TCGContext *s, int opc)
465 if (opc & P_DATA16) {
466 tcg_out8(s, 0x66);
468 if (opc & (P_EXT | P_EXT38)) {
469 tcg_out8(s, 0x0f);
470 if (opc & P_EXT38) {
471 tcg_out8(s, 0x38);
474 tcg_out8(s, opc);
476 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
477 the 32-bit compilation paths. This method works with all versions of gcc,
478 whereas relying on optimization may not be able to exclude them. */
479 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
480 #endif
482 static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
484 tcg_out_opc(s, opc, r, rm, 0);
485 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
488 static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm)
490 int tmp;
492 if ((opc & (P_REXW | P_EXT | P_EXT38)) || (rm & 8)) {
493 /* Three byte VEX prefix. */
494 tcg_out8(s, 0xc4);
496 /* VEX.m-mmmm */
497 if (opc & P_EXT38) {
498 tmp = 2;
499 } else if (opc & P_EXT) {
500 tmp = 1;
501 } else {
502 tcg_abort();
504 tmp |= 0x40; /* VEX.X */
505 tmp |= (r & 8 ? 0 : 0x80); /* VEX.R */
506 tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */
507 tcg_out8(s, tmp);
509 tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */
510 } else {
511 /* Two byte VEX prefix. */
512 tcg_out8(s, 0xc5);
514 tmp = (r & 8 ? 0 : 0x80); /* VEX.R */
516 /* VEX.pp */
517 if (opc & P_DATA16) {
518 tmp |= 1; /* 0x66 */
519 } else if (opc & P_SIMDF3) {
520 tmp |= 2; /* 0xf3 */
521 } else if (opc & P_SIMDF2) {
522 tmp |= 3; /* 0xf2 */
524 tmp |= (~v & 15) << 3; /* VEX.vvvv */
525 tcg_out8(s, tmp);
526 tcg_out8(s, opc);
527 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
530 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
531 We handle either RM and INDEX missing with a negative value. In 64-bit
532 mode for absolute addresses, ~RM is the size of the immediate operand
533 that will follow the instruction. */
535 static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
536 int index, int shift, intptr_t offset)
538 int mod, len;
540 if (index < 0 && rm < 0) {
541 if (TCG_TARGET_REG_BITS == 64) {
542 /* Try for a rip-relative addressing mode. This has replaced
543 the 32-bit-mode absolute addressing encoding. */
544 intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm;
545 intptr_t disp = offset - pc;
546 if (disp == (int32_t)disp) {
547 tcg_out_opc(s, opc, r, 0, 0);
548 tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
549 tcg_out32(s, disp);
550 return;
553 /* Try for an absolute address encoding. This requires the
554 use of the MODRM+SIB encoding and is therefore larger than
555 rip-relative addressing. */
556 if (offset == (int32_t)offset) {
557 tcg_out_opc(s, opc, r, 0, 0);
558 tcg_out8(s, (LOWREGMASK(r) << 3) | 4);
559 tcg_out8(s, (4 << 3) | 5);
560 tcg_out32(s, offset);
561 return;
564 /* ??? The memory isn't directly addressable. */
565 tcg_abort();
566 } else {
567 /* Absolute address. */
568 tcg_out_opc(s, opc, r, 0, 0);
569 tcg_out8(s, (r << 3) | 5);
570 tcg_out32(s, offset);
571 return;
575 /* Find the length of the immediate addend. Note that the encoding
576 that would be used for (%ebp) indicates absolute addressing. */
577 if (rm < 0) {
578 mod = 0, len = 4, rm = 5;
579 } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) {
580 mod = 0, len = 0;
581 } else if (offset == (int8_t)offset) {
582 mod = 0x40, len = 1;
583 } else {
584 mod = 0x80, len = 4;
587 /* Use a single byte MODRM format if possible. Note that the encoding
588 that would be used for %esp is the escape to the two byte form. */
589 if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) {
590 /* Single byte MODRM format. */
591 tcg_out_opc(s, opc, r, rm, 0);
592 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
593 } else {
594 /* Two byte MODRM+SIB format. */
596 /* Note that the encoding that would place %esp into the index
597 field indicates no index register. In 64-bit mode, the REX.X
598 bit counts, so %r12 can be used as the index. */
599 if (index < 0) {
600 index = 4;
601 } else {
602 tcg_debug_assert(index != TCG_REG_ESP);
605 tcg_out_opc(s, opc, r, rm, index);
606 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4);
607 tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm));
610 if (len == 1) {
611 tcg_out8(s, offset);
612 } else if (len == 4) {
613 tcg_out32(s, offset);
617 /* A simplification of the above with no index or shift. */
618 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
619 int rm, intptr_t offset)
621 tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
624 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
625 static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
627 /* Propagate an opcode prefix, such as P_REXW. */
628 int ext = subop & ~0x7;
629 subop &= 0x7;
631 tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src);
634 static inline void tcg_out_mov(TCGContext *s, TCGType type,
635 TCGReg ret, TCGReg arg)
637 if (arg != ret) {
638 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
639 tcg_out_modrm(s, opc, ret, arg);
643 static void tcg_out_movi(TCGContext *s, TCGType type,
644 TCGReg ret, tcg_target_long arg)
646 tcg_target_long diff;
648 if (arg == 0) {
649 tgen_arithr(s, ARITH_XOR, ret, ret);
650 return;
652 if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
653 tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0);
654 tcg_out32(s, arg);
655 return;
657 if (arg == (int32_t)arg) {
658 tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret);
659 tcg_out32(s, arg);
660 return;
663 /* Try a 7 byte pc-relative lea before the 10 byte movq. */
664 diff = arg - ((uintptr_t)s->code_ptr + 7);
665 if (diff == (int32_t)diff) {
666 tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0);
667 tcg_out8(s, (LOWREGMASK(ret) << 3) | 5);
668 tcg_out32(s, diff);
669 return;
672 tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
673 tcg_out64(s, arg);
676 static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
678 if (val == (int8_t)val) {
679 tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0);
680 tcg_out8(s, val);
681 } else if (val == (int32_t)val) {
682 tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0);
683 tcg_out32(s, val);
684 } else {
685 tcg_abort();
689 static inline void tcg_out_push(TCGContext *s, int reg)
691 tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0);
694 static inline void tcg_out_pop(TCGContext *s, int reg)
696 tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0);
699 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
700 TCGReg arg1, intptr_t arg2)
702 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
703 tcg_out_modrm_offset(s, opc, ret, arg1, arg2);
706 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
707 TCGReg arg1, intptr_t arg2)
709 int opc = OPC_MOVL_EvGv + (type == TCG_TYPE_I64 ? P_REXW : 0);
710 tcg_out_modrm_offset(s, opc, arg, arg1, arg2);
713 static inline void tcg_out_sti(TCGContext *s, TCGType type, TCGReg base,
714 tcg_target_long ofs, tcg_target_long val)
716 int opc = OPC_MOVL_EvIz + (type == TCG_TYPE_I64 ? P_REXW : 0);
717 tcg_out_modrm_offset(s, opc, 0, base, ofs);
718 tcg_out32(s, val);
721 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
723 /* Propagate an opcode prefix, such as P_DATA16. */
724 int ext = subopc & ~0x7;
725 subopc &= 0x7;
727 if (count == 1) {
728 tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg);
729 } else {
730 tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg);
731 tcg_out8(s, count);
735 static inline void tcg_out_bswap32(TCGContext *s, int reg)
737 tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0);
740 static inline void tcg_out_rolw_8(TCGContext *s, int reg)
742 tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8);
745 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
747 /* movzbl */
748 tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
749 tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
752 static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw)
754 /* movsbl */
755 tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
756 tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
759 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
761 /* movzwl */
762 tcg_out_modrm(s, OPC_MOVZWL, dest, src);
765 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw)
767 /* movsw[lq] */
768 tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
771 static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
773 /* 32-bit mov zero extends. */
774 tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
777 static inline void tcg_out_ext32s(TCGContext *s, int dest, int src)
779 tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
782 static inline void tcg_out_bswap64(TCGContext *s, int reg)
784 tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
787 static void tgen_arithi(TCGContext *s, int c, int r0,
788 tcg_target_long val, int cf)
790 int rexw = 0;
792 if (TCG_TARGET_REG_BITS == 64) {
793 rexw = c & -8;
794 c &= 7;
797 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
798 partial flags update stalls on Pentium4 and are not recommended
799 by current Intel optimization manuals. */
800 if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
801 int is_inc = (c == ARITH_ADD) ^ (val < 0);
802 if (TCG_TARGET_REG_BITS == 64) {
803 /* The single-byte increment encodings are re-tasked as the
804 REX prefixes. Use the MODRM encoding. */
805 tcg_out_modrm(s, OPC_GRP5 + rexw,
806 (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
807 } else {
808 tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
810 return;
813 if (c == ARITH_AND) {
814 if (TCG_TARGET_REG_BITS == 64) {
815 if (val == 0xffffffffu) {
816 tcg_out_ext32u(s, r0, r0);
817 return;
819 if (val == (uint32_t)val) {
820 /* AND with no high bits set can use a 32-bit operation. */
821 rexw = 0;
824 if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) {
825 tcg_out_ext8u(s, r0, r0);
826 return;
828 if (val == 0xffffu) {
829 tcg_out_ext16u(s, r0, r0);
830 return;
834 if (val == (int8_t)val) {
835 tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0);
836 tcg_out8(s, val);
837 return;
839 if (rexw == 0 || val == (int32_t)val) {
840 tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0);
841 tcg_out32(s, val);
842 return;
845 tcg_abort();
848 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
850 if (val != 0) {
851 tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0);
855 /* Use SMALL != 0 to force a short forward branch. */
856 static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, int small)
858 int32_t val, val1;
860 if (l->has_value) {
861 val = tcg_pcrel_diff(s, l->u.value_ptr);
862 val1 = val - 2;
863 if ((int8_t)val1 == val1) {
864 if (opc == -1) {
865 tcg_out8(s, OPC_JMP_short);
866 } else {
867 tcg_out8(s, OPC_JCC_short + opc);
869 tcg_out8(s, val1);
870 } else {
871 if (small) {
872 tcg_abort();
874 if (opc == -1) {
875 tcg_out8(s, OPC_JMP_long);
876 tcg_out32(s, val - 5);
877 } else {
878 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
879 tcg_out32(s, val - 6);
882 } else if (small) {
883 if (opc == -1) {
884 tcg_out8(s, OPC_JMP_short);
885 } else {
886 tcg_out8(s, OPC_JCC_short + opc);
888 tcg_out_reloc(s, s->code_ptr, R_386_PC8, l, -1);
889 s->code_ptr += 1;
890 } else {
891 if (opc == -1) {
892 tcg_out8(s, OPC_JMP_long);
893 } else {
894 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
896 tcg_out_reloc(s, s->code_ptr, R_386_PC32, l, -4);
897 s->code_ptr += 4;
901 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
902 int const_arg2, int rexw)
904 if (const_arg2) {
905 if (arg2 == 0) {
906 /* test r, r */
907 tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
908 } else {
909 tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
911 } else {
912 tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
916 static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
917 TCGArg arg1, TCGArg arg2, int const_arg2,
918 TCGLabel *label, int small)
920 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
921 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
924 #if TCG_TARGET_REG_BITS == 64
925 static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
926 TCGArg arg1, TCGArg arg2, int const_arg2,
927 TCGLabel *label, int small)
929 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
930 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
932 #else
933 /* XXX: we implement it at the target level to avoid having to
934 handle cross basic blocks temporaries */
935 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
936 const int *const_args, int small)
938 TCGLabel *label_next = gen_new_label();
939 TCGLabel *label_this = arg_label(args[5]);
941 switch(args[4]) {
942 case TCG_COND_EQ:
943 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
944 label_next, 1);
945 tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
946 label_this, small);
947 break;
948 case TCG_COND_NE:
949 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
950 label_this, small);
951 tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
952 label_this, small);
953 break;
954 case TCG_COND_LT:
955 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
956 label_this, small);
957 tcg_out_jxx(s, JCC_JNE, label_next, 1);
958 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
959 label_this, small);
960 break;
961 case TCG_COND_LE:
962 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
963 label_this, small);
964 tcg_out_jxx(s, JCC_JNE, label_next, 1);
965 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
966 label_this, small);
967 break;
968 case TCG_COND_GT:
969 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
970 label_this, small);
971 tcg_out_jxx(s, JCC_JNE, label_next, 1);
972 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
973 label_this, small);
974 break;
975 case TCG_COND_GE:
976 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
977 label_this, small);
978 tcg_out_jxx(s, JCC_JNE, label_next, 1);
979 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
980 label_this, small);
981 break;
982 case TCG_COND_LTU:
983 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
984 label_this, small);
985 tcg_out_jxx(s, JCC_JNE, label_next, 1);
986 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
987 label_this, small);
988 break;
989 case TCG_COND_LEU:
990 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
991 label_this, small);
992 tcg_out_jxx(s, JCC_JNE, label_next, 1);
993 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
994 label_this, small);
995 break;
996 case TCG_COND_GTU:
997 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
998 label_this, small);
999 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1000 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
1001 label_this, small);
1002 break;
1003 case TCG_COND_GEU:
1004 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
1005 label_this, small);
1006 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1007 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
1008 label_this, small);
1009 break;
1010 default:
1011 tcg_abort();
1013 tcg_out_label(s, label_next, s->code_ptr);
1015 #endif
1017 static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest,
1018 TCGArg arg1, TCGArg arg2, int const_arg2)
1020 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
1021 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1022 tcg_out_ext8u(s, dest, dest);
1025 #if TCG_TARGET_REG_BITS == 64
1026 static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest,
1027 TCGArg arg1, TCGArg arg2, int const_arg2)
1029 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
1030 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1031 tcg_out_ext8u(s, dest, dest);
1033 #else
1034 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1035 const int *const_args)
1037 TCGArg new_args[6];
1038 TCGLabel *label_true, *label_over;
1040 memcpy(new_args, args+1, 5*sizeof(TCGArg));
1042 if (args[0] == args[1] || args[0] == args[2]
1043 || (!const_args[3] && args[0] == args[3])
1044 || (!const_args[4] && args[0] == args[4])) {
1045 /* When the destination overlaps with one of the argument
1046 registers, don't do anything tricky. */
1047 label_true = gen_new_label();
1048 label_over = gen_new_label();
1050 new_args[5] = label_arg(label_true);
1051 tcg_out_brcond2(s, new_args, const_args+1, 1);
1053 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1054 tcg_out_jxx(s, JCC_JMP, label_over, 1);
1055 tcg_out_label(s, label_true, s->code_ptr);
1057 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
1058 tcg_out_label(s, label_over, s->code_ptr);
1059 } else {
1060 /* When the destination does not overlap one of the arguments,
1061 clear the destination first, jump if cond false, and emit an
1062 increment in the true case. This results in smaller code. */
1064 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1066 label_over = gen_new_label();
1067 new_args[4] = tcg_invert_cond(new_args[4]);
1068 new_args[5] = label_arg(label_over);
1069 tcg_out_brcond2(s, new_args, const_args+1, 1);
1071 tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
1072 tcg_out_label(s, label_over, s->code_ptr);
1075 #endif
1077 static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGArg dest,
1078 TCGArg c1, TCGArg c2, int const_c2,
1079 TCGArg v1)
1081 tcg_out_cmp(s, c1, c2, const_c2, 0);
1082 if (have_cmov) {
1083 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1);
1084 } else {
1085 TCGLabel *over = gen_new_label();
1086 tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
1087 tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
1088 tcg_out_label(s, over, s->code_ptr);
1092 #if TCG_TARGET_REG_BITS == 64
1093 static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGArg dest,
1094 TCGArg c1, TCGArg c2, int const_c2,
1095 TCGArg v1)
1097 tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
1098 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | P_REXW, dest, v1);
1100 #endif
1102 static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest)
1104 intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
1106 if (disp == (int32_t)disp) {
1107 tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
1108 tcg_out32(s, disp);
1109 } else {
1110 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, (uintptr_t)dest);
1111 tcg_out_modrm(s, OPC_GRP5,
1112 call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev, TCG_REG_R10);
1116 static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
1118 tcg_out_branch(s, 1, dest);
1121 static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest)
1123 tcg_out_branch(s, 0, dest);
1126 static void tcg_out_nopn(TCGContext *s, int n)
1128 int i;
1129 /* Emit 1 or 2 operand size prefixes for the standard one byte nop,
1130 * "xchg %eax,%eax", forming "xchg %ax,%ax". All cores accept the
1131 * duplicate prefix, and all of the interesting recent cores can
1132 * decode and discard the duplicates in a single cycle.
1134 tcg_debug_assert(n >= 1);
1135 for (i = 1; i < n; ++i) {
1136 tcg_out8(s, 0x66);
1138 tcg_out8(s, 0x90);
1141 #if defined(CONFIG_SOFTMMU)
1142 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1143 * int mmu_idx, uintptr_t ra)
1145 static void * const qemu_ld_helpers[16] = {
1146 [MO_UB] = helper_ret_ldub_mmu,
1147 [MO_LEUW] = helper_le_lduw_mmu,
1148 [MO_LEUL] = helper_le_ldul_mmu,
1149 [MO_LEQ] = helper_le_ldq_mmu,
1150 [MO_BEUW] = helper_be_lduw_mmu,
1151 [MO_BEUL] = helper_be_ldul_mmu,
1152 [MO_BEQ] = helper_be_ldq_mmu,
1155 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1156 * uintxx_t val, int mmu_idx, uintptr_t ra)
1158 static void * const qemu_st_helpers[16] = {
1159 [MO_UB] = helper_ret_stb_mmu,
1160 [MO_LEUW] = helper_le_stw_mmu,
1161 [MO_LEUL] = helper_le_stl_mmu,
1162 [MO_LEQ] = helper_le_stq_mmu,
1163 [MO_BEUW] = helper_be_stw_mmu,
1164 [MO_BEUL] = helper_be_stl_mmu,
1165 [MO_BEQ] = helper_be_stq_mmu,
1168 /* Perform the TLB load and compare.
1170 Inputs:
1171 ADDRLO and ADDRHI contain the low and high part of the address.
1173 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1175 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1176 This should be offsetof addr_read or addr_write.
1178 Outputs:
1179 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1180 positions of the displacements of forward jumps to the TLB miss case.
1182 Second argument register is loaded with the low part of the address.
1183 In the TLB hit case, it has been adjusted as indicated by the TLB
1184 and so is a host address. In the TLB miss case, it continues to
1185 hold a guest address.
1187 First argument register is clobbered. */
1189 static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1190 int mem_index, TCGMemOp opc,
1191 tcg_insn_unit **label_ptr, int which)
1193 const TCGReg r0 = TCG_REG_L0;
1194 const TCGReg r1 = TCG_REG_L1;
1195 TCGType ttype = TCG_TYPE_I32;
1196 TCGType tlbtype = TCG_TYPE_I32;
1197 int trexw = 0, hrexw = 0, tlbrexw = 0;
1198 int s_mask = (1 << (opc & MO_SIZE)) - 1;
1199 bool aligned = (opc & MO_AMASK) == MO_ALIGN || s_mask == 0;
1201 if (TCG_TARGET_REG_BITS == 64) {
1202 if (TARGET_LONG_BITS == 64) {
1203 ttype = TCG_TYPE_I64;
1204 trexw = P_REXW;
1206 if (TCG_TYPE_PTR == TCG_TYPE_I64) {
1207 hrexw = P_REXW;
1208 if (TARGET_PAGE_BITS + CPU_TLB_BITS > 32) {
1209 tlbtype = TCG_TYPE_I64;
1210 tlbrexw = P_REXW;
1215 tcg_out_mov(s, tlbtype, r0, addrlo);
1216 if (aligned) {
1217 tcg_out_mov(s, ttype, r1, addrlo);
1218 } else {
1219 /* For unaligned access check that we don't cross pages using
1220 the page address of the last byte. */
1221 tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask);
1224 tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
1225 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1227 tgen_arithi(s, ARITH_AND + trexw, r1,
1228 TARGET_PAGE_MASK | (aligned ? s_mask : 0), 0);
1229 tgen_arithi(s, ARITH_AND + tlbrexw, r0,
1230 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
1232 tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0,
1233 offsetof(CPUArchState, tlb_table[mem_index][0])
1234 + which);
1236 /* cmp 0(r0), r1 */
1237 tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0);
1239 /* Prepare for both the fast path add of the tlb addend, and the slow
1240 path function argument setup. There are two cases worth note:
1241 For 32-bit guest and x86_64 host, MOVL zero-extends the guest address
1242 before the fastpath ADDQ below. For 64-bit guest and x32 host, MOVQ
1243 copies the entire guest address for the slow path, while truncation
1244 for the 32-bit host happens with the fastpath ADDL below. */
1245 tcg_out_mov(s, ttype, r1, addrlo);
1247 /* jne slow_path */
1248 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1249 label_ptr[0] = s->code_ptr;
1250 s->code_ptr += 4;
1252 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1253 /* cmp 4(r0), addrhi */
1254 tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, 4);
1256 /* jne slow_path */
1257 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1258 label_ptr[1] = s->code_ptr;
1259 s->code_ptr += 4;
1262 /* TLB Hit. */
1264 /* add addend(r0), r1 */
1265 tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
1266 offsetof(CPUTLBEntry, addend) - which);
1270 * Record the context of a call to the out of line helper code for the slow path
1271 * for a load or store, so that we can later generate the correct helper code
1273 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1274 TCGReg datalo, TCGReg datahi,
1275 TCGReg addrlo, TCGReg addrhi,
1276 tcg_insn_unit *raddr,
1277 tcg_insn_unit **label_ptr)
1279 TCGLabelQemuLdst *label = new_ldst_label(s);
1281 label->is_ld = is_ld;
1282 label->oi = oi;
1283 label->datalo_reg = datalo;
1284 label->datahi_reg = datahi;
1285 label->addrlo_reg = addrlo;
1286 label->addrhi_reg = addrhi;
1287 label->raddr = raddr;
1288 label->label_ptr[0] = label_ptr[0];
1289 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1290 label->label_ptr[1] = label_ptr[1];
1295 * Generate code for the slow path for a load at the end of block
1297 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1299 TCGMemOpIdx oi = l->oi;
1300 TCGMemOp opc = get_memop(oi);
1301 TCGReg data_reg;
1302 tcg_insn_unit **label_ptr = &l->label_ptr[0];
1304 /* resolve label address */
1305 tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1306 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1307 tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1310 if (TCG_TARGET_REG_BITS == 32) {
1311 int ofs = 0;
1313 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1314 ofs += 4;
1316 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1317 ofs += 4;
1319 if (TARGET_LONG_BITS == 64) {
1320 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1321 ofs += 4;
1324 tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, oi);
1325 ofs += 4;
1327 tcg_out_sti(s, TCG_TYPE_PTR, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
1328 } else {
1329 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1330 /* The second argument is already loaded with addrlo. */
1331 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi);
1332 tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
1333 (uintptr_t)l->raddr);
1336 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1338 data_reg = l->datalo_reg;
1339 switch (opc & MO_SSIZE) {
1340 case MO_SB:
1341 tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
1342 break;
1343 case MO_SW:
1344 tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
1345 break;
1346 #if TCG_TARGET_REG_BITS == 64
1347 case MO_SL:
1348 tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
1349 break;
1350 #endif
1351 case MO_UB:
1352 case MO_UW:
1353 /* Note that the helpers have zero-extended to tcg_target_long. */
1354 case MO_UL:
1355 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1356 break;
1357 case MO_Q:
1358 if (TCG_TARGET_REG_BITS == 64) {
1359 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
1360 } else if (data_reg == TCG_REG_EDX) {
1361 /* xchg %edx, %eax */
1362 tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
1363 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
1364 } else {
1365 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1366 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
1368 break;
1369 default:
1370 tcg_abort();
1373 /* Jump to the code corresponding to next IR of qemu_st */
1374 tcg_out_jmp(s, l->raddr);
1378 * Generate code for the slow path for a store at the end of block
1380 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1382 TCGMemOpIdx oi = l->oi;
1383 TCGMemOp opc = get_memop(oi);
1384 TCGMemOp s_bits = opc & MO_SIZE;
1385 tcg_insn_unit **label_ptr = &l->label_ptr[0];
1386 TCGReg retaddr;
1388 /* resolve label address */
1389 tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1390 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1391 tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1394 if (TCG_TARGET_REG_BITS == 32) {
1395 int ofs = 0;
1397 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1398 ofs += 4;
1400 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1401 ofs += 4;
1403 if (TARGET_LONG_BITS == 64) {
1404 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1405 ofs += 4;
1408 tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
1409 ofs += 4;
1411 if (s_bits == MO_64) {
1412 tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
1413 ofs += 4;
1416 tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, oi);
1417 ofs += 4;
1419 retaddr = TCG_REG_EAX;
1420 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1421 tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, ofs);
1422 } else {
1423 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1424 /* The second argument is already loaded with addrlo. */
1425 tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1426 tcg_target_call_iarg_regs[2], l->datalo_reg);
1427 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], oi);
1429 if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
1430 retaddr = tcg_target_call_iarg_regs[4];
1431 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1432 } else {
1433 retaddr = TCG_REG_RAX;
1434 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1435 tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP,
1436 TCG_TARGET_CALL_STACK_OFFSET);
1440 /* "Tail call" to the helper, with the return address back inline. */
1441 tcg_out_push(s, retaddr);
1442 tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1444 #elif defined(__x86_64__) && defined(__linux__)
1445 # include <asm/prctl.h>
1446 # include <sys/prctl.h>
1448 int arch_prctl(int code, unsigned long addr);
1450 static int guest_base_flags;
1451 static inline void setup_guest_base_seg(void)
1453 if (arch_prctl(ARCH_SET_GS, guest_base) == 0) {
1454 guest_base_flags = P_GS;
1457 #else
1458 # define guest_base_flags 0
1459 static inline void setup_guest_base_seg(void) { }
1460 #endif /* SOFTMMU */
1462 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1463 TCGReg base, int index, intptr_t ofs,
1464 int seg, TCGMemOp memop)
1466 const TCGMemOp real_bswap = memop & MO_BSWAP;
1467 TCGMemOp bswap = real_bswap;
1468 int movop = OPC_MOVL_GvEv;
1470 if (have_movbe && real_bswap) {
1471 bswap = 0;
1472 movop = OPC_MOVBE_GyMy;
1475 switch (memop & MO_SSIZE) {
1476 case MO_UB:
1477 tcg_out_modrm_sib_offset(s, OPC_MOVZBL + seg, datalo,
1478 base, index, 0, ofs);
1479 break;
1480 case MO_SB:
1481 tcg_out_modrm_sib_offset(s, OPC_MOVSBL + P_REXW + seg, datalo,
1482 base, index, 0, ofs);
1483 break;
1484 case MO_UW:
1485 tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
1486 base, index, 0, ofs);
1487 if (real_bswap) {
1488 tcg_out_rolw_8(s, datalo);
1490 break;
1491 case MO_SW:
1492 if (real_bswap) {
1493 if (have_movbe) {
1494 tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
1495 datalo, base, index, 0, ofs);
1496 } else {
1497 tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
1498 base, index, 0, ofs);
1499 tcg_out_rolw_8(s, datalo);
1501 tcg_out_modrm(s, OPC_MOVSWL + P_REXW, datalo, datalo);
1502 } else {
1503 tcg_out_modrm_sib_offset(s, OPC_MOVSWL + P_REXW + seg,
1504 datalo, base, index, 0, ofs);
1506 break;
1507 case MO_UL:
1508 tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
1509 if (bswap) {
1510 tcg_out_bswap32(s, datalo);
1512 break;
1513 #if TCG_TARGET_REG_BITS == 64
1514 case MO_SL:
1515 if (real_bswap) {
1516 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
1517 base, index, 0, ofs);
1518 if (bswap) {
1519 tcg_out_bswap32(s, datalo);
1521 tcg_out_ext32s(s, datalo, datalo);
1522 } else {
1523 tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo,
1524 base, index, 0, ofs);
1526 break;
1527 #endif
1528 case MO_Q:
1529 if (TCG_TARGET_REG_BITS == 64) {
1530 tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
1531 base, index, 0, ofs);
1532 if (bswap) {
1533 tcg_out_bswap64(s, datalo);
1535 } else {
1536 if (real_bswap) {
1537 int t = datalo;
1538 datalo = datahi;
1539 datahi = t;
1541 if (base != datalo) {
1542 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
1543 base, index, 0, ofs);
1544 tcg_out_modrm_sib_offset(s, movop + seg, datahi,
1545 base, index, 0, ofs + 4);
1546 } else {
1547 tcg_out_modrm_sib_offset(s, movop + seg, datahi,
1548 base, index, 0, ofs + 4);
1549 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
1550 base, index, 0, ofs);
1552 if (bswap) {
1553 tcg_out_bswap32(s, datalo);
1554 tcg_out_bswap32(s, datahi);
1557 break;
1558 default:
1559 tcg_abort();
1563 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
1564 EAX. It will be useful once fixed registers globals are less
1565 common. */
1566 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1568 TCGReg datalo, datahi, addrlo;
1569 TCGReg addrhi __attribute__((unused));
1570 TCGMemOpIdx oi;
1571 TCGMemOp opc;
1572 #if defined(CONFIG_SOFTMMU)
1573 int mem_index;
1574 tcg_insn_unit *label_ptr[2];
1575 #endif
1577 datalo = *args++;
1578 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
1579 addrlo = *args++;
1580 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
1581 oi = *args++;
1582 opc = get_memop(oi);
1584 #if defined(CONFIG_SOFTMMU)
1585 mem_index = get_mmuidx(oi);
1587 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
1588 label_ptr, offsetof(CPUTLBEntry, addr_read));
1590 /* TLB Hit. */
1591 tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
1593 /* Record the current context of a load into ldst label */
1594 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
1595 s->code_ptr, label_ptr);
1596 #else
1598 int32_t offset = guest_base;
1599 TCGReg base = addrlo;
1600 int index = -1;
1601 int seg = 0;
1603 /* For a 32-bit guest, the high 32 bits may contain garbage.
1604 We can do this with the ADDR32 prefix if we're not using
1605 a guest base, or when using segmentation. Otherwise we
1606 need to zero-extend manually. */
1607 if (guest_base == 0 || guest_base_flags) {
1608 seg = guest_base_flags;
1609 offset = 0;
1610 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
1611 seg |= P_ADDR32;
1613 } else if (TCG_TARGET_REG_BITS == 64) {
1614 if (TARGET_LONG_BITS == 32) {
1615 tcg_out_ext32u(s, TCG_REG_L0, base);
1616 base = TCG_REG_L0;
1618 if (offset != guest_base) {
1619 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base);
1620 index = TCG_REG_L1;
1621 offset = 0;
1625 tcg_out_qemu_ld_direct(s, datalo, datahi,
1626 base, index, offset, seg, opc);
1628 #endif
1631 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1632 TCGReg base, intptr_t ofs, int seg,
1633 TCGMemOp memop)
1635 /* ??? Ideally we wouldn't need a scratch register. For user-only,
1636 we could perform the bswap twice to restore the original value
1637 instead of moving to the scratch. But as it is, the L constraint
1638 means that TCG_REG_L0 is definitely free here. */
1639 const TCGReg scratch = TCG_REG_L0;
1640 const TCGMemOp real_bswap = memop & MO_BSWAP;
1641 TCGMemOp bswap = real_bswap;
1642 int movop = OPC_MOVL_EvGv;
1644 if (have_movbe && real_bswap) {
1645 bswap = 0;
1646 movop = OPC_MOVBE_MyGy;
1649 switch (memop & MO_SIZE) {
1650 case MO_8:
1651 /* In 32-bit mode, 8-bit stores can only happen from [abcd]x.
1652 Use the scratch register if necessary. */
1653 if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) {
1654 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1655 datalo = scratch;
1657 tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
1658 datalo, base, ofs);
1659 break;
1660 case MO_16:
1661 if (bswap) {
1662 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1663 tcg_out_rolw_8(s, scratch);
1664 datalo = scratch;
1666 tcg_out_modrm_offset(s, movop + P_DATA16 + seg, datalo, base, ofs);
1667 break;
1668 case MO_32:
1669 if (bswap) {
1670 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1671 tcg_out_bswap32(s, scratch);
1672 datalo = scratch;
1674 tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
1675 break;
1676 case MO_64:
1677 if (TCG_TARGET_REG_BITS == 64) {
1678 if (bswap) {
1679 tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo);
1680 tcg_out_bswap64(s, scratch);
1681 datalo = scratch;
1683 tcg_out_modrm_offset(s, movop + P_REXW + seg, datalo, base, ofs);
1684 } else if (bswap) {
1685 tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi);
1686 tcg_out_bswap32(s, scratch);
1687 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs);
1688 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1689 tcg_out_bswap32(s, scratch);
1690 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs+4);
1691 } else {
1692 if (real_bswap) {
1693 int t = datalo;
1694 datalo = datahi;
1695 datahi = t;
1697 tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
1698 tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs+4);
1700 break;
1701 default:
1702 tcg_abort();
1706 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1708 TCGReg datalo, datahi, addrlo;
1709 TCGReg addrhi __attribute__((unused));
1710 TCGMemOpIdx oi;
1711 TCGMemOp opc;
1712 #if defined(CONFIG_SOFTMMU)
1713 int mem_index;
1714 tcg_insn_unit *label_ptr[2];
1715 #endif
1717 datalo = *args++;
1718 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
1719 addrlo = *args++;
1720 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
1721 oi = *args++;
1722 opc = get_memop(oi);
1724 #if defined(CONFIG_SOFTMMU)
1725 mem_index = get_mmuidx(oi);
1727 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
1728 label_ptr, offsetof(CPUTLBEntry, addr_write));
1730 /* TLB Hit. */
1731 tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
1733 /* Record the current context of a store into ldst label */
1734 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
1735 s->code_ptr, label_ptr);
1736 #else
1738 int32_t offset = guest_base;
1739 TCGReg base = addrlo;
1740 int seg = 0;
1742 /* See comment in tcg_out_qemu_ld re zero-extension of addrlo. */
1743 if (guest_base == 0 || guest_base_flags) {
1744 seg = guest_base_flags;
1745 offset = 0;
1746 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
1747 seg |= P_ADDR32;
1749 } else if (TCG_TARGET_REG_BITS == 64) {
1750 /* ??? Note that we can't use the same SIB addressing scheme
1751 as for loads, since we require L0 free for bswap. */
1752 if (offset != guest_base) {
1753 if (TARGET_LONG_BITS == 32) {
1754 tcg_out_ext32u(s, TCG_REG_L0, base);
1755 base = TCG_REG_L0;
1757 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base);
1758 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
1759 base = TCG_REG_L1;
1760 offset = 0;
1761 } else if (TARGET_LONG_BITS == 32) {
1762 tcg_out_ext32u(s, TCG_REG_L1, base);
1763 base = TCG_REG_L1;
1767 tcg_out_qemu_st_direct(s, datalo, datahi, base, offset, seg, opc);
1769 #endif
1772 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1773 const TCGArg *args, const int *const_args)
1775 int c, vexop, rexw = 0;
1777 #if TCG_TARGET_REG_BITS == 64
1778 # define OP_32_64(x) \
1779 case glue(glue(INDEX_op_, x), _i64): \
1780 rexw = P_REXW; /* FALLTHRU */ \
1781 case glue(glue(INDEX_op_, x), _i32)
1782 #else
1783 # define OP_32_64(x) \
1784 case glue(glue(INDEX_op_, x), _i32)
1785 #endif
1787 switch(opc) {
1788 case INDEX_op_exit_tb:
1789 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]);
1790 tcg_out_jmp(s, tb_ret_addr);
1791 break;
1792 case INDEX_op_goto_tb:
1793 if (s->tb_jmp_insn_offset) {
1794 /* direct jump method */
1795 int gap;
1796 /* jump displacement must be aligned for atomic patching;
1797 * see if we need to add extra nops before jump
1799 gap = tcg_pcrel_diff(s, QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4));
1800 if (gap != 1) {
1801 tcg_out_nopn(s, gap - 1);
1803 tcg_out8(s, OPC_JMP_long); /* jmp im */
1804 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
1805 tcg_out32(s, 0);
1806 } else {
1807 /* indirect jump method */
1808 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
1809 (intptr_t)(s->tb_jmp_target_addr + args[0]));
1811 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
1812 break;
1813 case INDEX_op_br:
1814 tcg_out_jxx(s, JCC_JMP, arg_label(args[0]), 0);
1815 break;
1816 OP_32_64(ld8u):
1817 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1818 tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]);
1819 break;
1820 OP_32_64(ld8s):
1821 tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, args[0], args[1], args[2]);
1822 break;
1823 OP_32_64(ld16u):
1824 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1825 tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]);
1826 break;
1827 OP_32_64(ld16s):
1828 tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, args[0], args[1], args[2]);
1829 break;
1830 #if TCG_TARGET_REG_BITS == 64
1831 case INDEX_op_ld32u_i64:
1832 #endif
1833 case INDEX_op_ld_i32:
1834 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1835 break;
1837 OP_32_64(st8):
1838 if (const_args[0]) {
1839 tcg_out_modrm_offset(s, OPC_MOVB_EvIz,
1840 0, args[1], args[2]);
1841 tcg_out8(s, args[0]);
1842 } else {
1843 tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R,
1844 args[0], args[1], args[2]);
1846 break;
1847 OP_32_64(st16):
1848 if (const_args[0]) {
1849 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16,
1850 0, args[1], args[2]);
1851 tcg_out16(s, args[0]);
1852 } else {
1853 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16,
1854 args[0], args[1], args[2]);
1856 break;
1857 #if TCG_TARGET_REG_BITS == 64
1858 case INDEX_op_st32_i64:
1859 #endif
1860 case INDEX_op_st_i32:
1861 if (const_args[0]) {
1862 tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, args[1], args[2]);
1863 tcg_out32(s, args[0]);
1864 } else {
1865 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1867 break;
1869 OP_32_64(add):
1870 /* For 3-operand addition, use LEA. */
1871 if (args[0] != args[1]) {
1872 TCGArg a0 = args[0], a1 = args[1], a2 = args[2], c3 = 0;
1874 if (const_args[2]) {
1875 c3 = a2, a2 = -1;
1876 } else if (a0 == a2) {
1877 /* Watch out for dest = src + dest, since we've removed
1878 the matching constraint on the add. */
1879 tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
1880 break;
1883 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
1884 break;
1886 c = ARITH_ADD;
1887 goto gen_arith;
1888 OP_32_64(sub):
1889 c = ARITH_SUB;
1890 goto gen_arith;
1891 OP_32_64(and):
1892 c = ARITH_AND;
1893 goto gen_arith;
1894 OP_32_64(or):
1895 c = ARITH_OR;
1896 goto gen_arith;
1897 OP_32_64(xor):
1898 c = ARITH_XOR;
1899 goto gen_arith;
1900 gen_arith:
1901 if (const_args[2]) {
1902 tgen_arithi(s, c + rexw, args[0], args[2], 0);
1903 } else {
1904 tgen_arithr(s, c + rexw, args[0], args[2]);
1906 break;
1908 OP_32_64(andc):
1909 if (const_args[2]) {
1910 tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32,
1911 args[0], args[1]);
1912 tgen_arithi(s, ARITH_AND + rexw, args[0], ~args[2], 0);
1913 } else {
1914 tcg_out_vex_modrm(s, OPC_ANDN + rexw, args[0], args[2], args[1]);
1916 break;
1918 OP_32_64(mul):
1919 if (const_args[2]) {
1920 int32_t val;
1921 val = args[2];
1922 if (val == (int8_t)val) {
1923 tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, args[0], args[0]);
1924 tcg_out8(s, val);
1925 } else {
1926 tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, args[0], args[0]);
1927 tcg_out32(s, val);
1929 } else {
1930 tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, args[0], args[2]);
1932 break;
1934 OP_32_64(div2):
1935 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
1936 break;
1937 OP_32_64(divu2):
1938 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
1939 break;
1941 OP_32_64(shl):
1942 c = SHIFT_SHL;
1943 vexop = OPC_SHLX;
1944 goto gen_shift_maybe_vex;
1945 OP_32_64(shr):
1946 c = SHIFT_SHR;
1947 vexop = OPC_SHRX;
1948 goto gen_shift_maybe_vex;
1949 OP_32_64(sar):
1950 c = SHIFT_SAR;
1951 vexop = OPC_SARX;
1952 goto gen_shift_maybe_vex;
1953 OP_32_64(rotl):
1954 c = SHIFT_ROL;
1955 goto gen_shift;
1956 OP_32_64(rotr):
1957 c = SHIFT_ROR;
1958 goto gen_shift;
1959 gen_shift_maybe_vex:
1960 if (have_bmi2 && !const_args[2]) {
1961 tcg_out_vex_modrm(s, vexop + rexw, args[0], args[2], args[1]);
1962 break;
1964 /* FALLTHRU */
1965 gen_shift:
1966 if (const_args[2]) {
1967 tcg_out_shifti(s, c + rexw, args[0], args[2]);
1968 } else {
1969 tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, args[0]);
1971 break;
1973 case INDEX_op_brcond_i32:
1974 tcg_out_brcond32(s, args[2], args[0], args[1], const_args[1],
1975 arg_label(args[3]), 0);
1976 break;
1977 case INDEX_op_setcond_i32:
1978 tcg_out_setcond32(s, args[3], args[0], args[1],
1979 args[2], const_args[2]);
1980 break;
1981 case INDEX_op_movcond_i32:
1982 tcg_out_movcond32(s, args[5], args[0], args[1],
1983 args[2], const_args[2], args[3]);
1984 break;
1986 OP_32_64(bswap16):
1987 tcg_out_rolw_8(s, args[0]);
1988 break;
1989 OP_32_64(bswap32):
1990 tcg_out_bswap32(s, args[0]);
1991 break;
1993 OP_32_64(neg):
1994 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, args[0]);
1995 break;
1996 OP_32_64(not):
1997 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, args[0]);
1998 break;
2000 OP_32_64(ext8s):
2001 tcg_out_ext8s(s, args[0], args[1], rexw);
2002 break;
2003 OP_32_64(ext16s):
2004 tcg_out_ext16s(s, args[0], args[1], rexw);
2005 break;
2006 OP_32_64(ext8u):
2007 tcg_out_ext8u(s, args[0], args[1]);
2008 break;
2009 OP_32_64(ext16u):
2010 tcg_out_ext16u(s, args[0], args[1]);
2011 break;
2013 case INDEX_op_qemu_ld_i32:
2014 tcg_out_qemu_ld(s, args, 0);
2015 break;
2016 case INDEX_op_qemu_ld_i64:
2017 tcg_out_qemu_ld(s, args, 1);
2018 break;
2019 case INDEX_op_qemu_st_i32:
2020 tcg_out_qemu_st(s, args, 0);
2021 break;
2022 case INDEX_op_qemu_st_i64:
2023 tcg_out_qemu_st(s, args, 1);
2024 break;
2026 OP_32_64(mulu2):
2027 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
2028 break;
2029 OP_32_64(muls2):
2030 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]);
2031 break;
2032 OP_32_64(add2):
2033 if (const_args[4]) {
2034 tgen_arithi(s, ARITH_ADD + rexw, args[0], args[4], 1);
2035 } else {
2036 tgen_arithr(s, ARITH_ADD + rexw, args[0], args[4]);
2038 if (const_args[5]) {
2039 tgen_arithi(s, ARITH_ADC + rexw, args[1], args[5], 1);
2040 } else {
2041 tgen_arithr(s, ARITH_ADC + rexw, args[1], args[5]);
2043 break;
2044 OP_32_64(sub2):
2045 if (const_args[4]) {
2046 tgen_arithi(s, ARITH_SUB + rexw, args[0], args[4], 1);
2047 } else {
2048 tgen_arithr(s, ARITH_SUB + rexw, args[0], args[4]);
2050 if (const_args[5]) {
2051 tgen_arithi(s, ARITH_SBB + rexw, args[1], args[5], 1);
2052 } else {
2053 tgen_arithr(s, ARITH_SBB + rexw, args[1], args[5]);
2055 break;
2057 #if TCG_TARGET_REG_BITS == 32
2058 case INDEX_op_brcond2_i32:
2059 tcg_out_brcond2(s, args, const_args, 0);
2060 break;
2061 case INDEX_op_setcond2_i32:
2062 tcg_out_setcond2(s, args, const_args);
2063 break;
2064 #else /* TCG_TARGET_REG_BITS == 64 */
2065 case INDEX_op_ld32s_i64:
2066 tcg_out_modrm_offset(s, OPC_MOVSLQ, args[0], args[1], args[2]);
2067 break;
2068 case INDEX_op_ld_i64:
2069 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2070 break;
2071 case INDEX_op_st_i64:
2072 if (const_args[0]) {
2073 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW,
2074 0, args[1], args[2]);
2075 tcg_out32(s, args[0]);
2076 } else {
2077 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2079 break;
2081 case INDEX_op_brcond_i64:
2082 tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1],
2083 arg_label(args[3]), 0);
2084 break;
2085 case INDEX_op_setcond_i64:
2086 tcg_out_setcond64(s, args[3], args[0], args[1],
2087 args[2], const_args[2]);
2088 break;
2089 case INDEX_op_movcond_i64:
2090 tcg_out_movcond64(s, args[5], args[0], args[1],
2091 args[2], const_args[2], args[3]);
2092 break;
2094 case INDEX_op_bswap64_i64:
2095 tcg_out_bswap64(s, args[0]);
2096 break;
2097 case INDEX_op_extu_i32_i64:
2098 case INDEX_op_ext32u_i64:
2099 tcg_out_ext32u(s, args[0], args[1]);
2100 break;
2101 case INDEX_op_ext_i32_i64:
2102 case INDEX_op_ext32s_i64:
2103 tcg_out_ext32s(s, args[0], args[1]);
2104 break;
2105 #endif
2107 OP_32_64(deposit):
2108 if (args[3] == 0 && args[4] == 8) {
2109 /* load bits 0..7 */
2110 tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM,
2111 args[2], args[0]);
2112 } else if (args[3] == 8 && args[4] == 8) {
2113 /* load bits 8..15 */
2114 tcg_out_modrm(s, OPC_MOVB_EvGv, args[2], args[0] + 4);
2115 } else if (args[3] == 0 && args[4] == 16) {
2116 /* load bits 0..15 */
2117 tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, args[2], args[0]);
2118 } else {
2119 tcg_abort();
2121 break;
2123 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2124 case INDEX_op_mov_i64:
2125 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2126 case INDEX_op_movi_i64:
2127 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2128 default:
2129 tcg_abort();
2132 #undef OP_32_64
2135 static const TCGTargetOpDef x86_op_defs[] = {
2136 { INDEX_op_exit_tb, { } },
2137 { INDEX_op_goto_tb, { } },
2138 { INDEX_op_br, { } },
2139 { INDEX_op_ld8u_i32, { "r", "r" } },
2140 { INDEX_op_ld8s_i32, { "r", "r" } },
2141 { INDEX_op_ld16u_i32, { "r", "r" } },
2142 { INDEX_op_ld16s_i32, { "r", "r" } },
2143 { INDEX_op_ld_i32, { "r", "r" } },
2144 { INDEX_op_st8_i32, { "qi", "r" } },
2145 { INDEX_op_st16_i32, { "ri", "r" } },
2146 { INDEX_op_st_i32, { "ri", "r" } },
2148 { INDEX_op_add_i32, { "r", "r", "ri" } },
2149 { INDEX_op_sub_i32, { "r", "0", "ri" } },
2150 { INDEX_op_mul_i32, { "r", "0", "ri" } },
2151 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
2152 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
2153 { INDEX_op_and_i32, { "r", "0", "ri" } },
2154 { INDEX_op_or_i32, { "r", "0", "ri" } },
2155 { INDEX_op_xor_i32, { "r", "0", "ri" } },
2156 { INDEX_op_andc_i32, { "r", "r", "ri" } },
2158 { INDEX_op_shl_i32, { "r", "0", "Ci" } },
2159 { INDEX_op_shr_i32, { "r", "0", "Ci" } },
2160 { INDEX_op_sar_i32, { "r", "0", "Ci" } },
2161 { INDEX_op_rotl_i32, { "r", "0", "ci" } },
2162 { INDEX_op_rotr_i32, { "r", "0", "ci" } },
2164 { INDEX_op_brcond_i32, { "r", "ri" } },
2166 { INDEX_op_bswap16_i32, { "r", "0" } },
2167 { INDEX_op_bswap32_i32, { "r", "0" } },
2169 { INDEX_op_neg_i32, { "r", "0" } },
2171 { INDEX_op_not_i32, { "r", "0" } },
2173 { INDEX_op_ext8s_i32, { "r", "q" } },
2174 { INDEX_op_ext16s_i32, { "r", "r" } },
2175 { INDEX_op_ext8u_i32, { "r", "q" } },
2176 { INDEX_op_ext16u_i32, { "r", "r" } },
2178 { INDEX_op_setcond_i32, { "q", "r", "ri" } },
2180 { INDEX_op_deposit_i32, { "Q", "0", "Q" } },
2181 { INDEX_op_movcond_i32, { "r", "r", "ri", "r", "0" } },
2183 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
2184 { INDEX_op_muls2_i32, { "a", "d", "a", "r" } },
2185 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
2186 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
2188 #if TCG_TARGET_REG_BITS == 32
2189 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
2190 { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
2191 #else
2192 { INDEX_op_ld8u_i64, { "r", "r" } },
2193 { INDEX_op_ld8s_i64, { "r", "r" } },
2194 { INDEX_op_ld16u_i64, { "r", "r" } },
2195 { INDEX_op_ld16s_i64, { "r", "r" } },
2196 { INDEX_op_ld32u_i64, { "r", "r" } },
2197 { INDEX_op_ld32s_i64, { "r", "r" } },
2198 { INDEX_op_ld_i64, { "r", "r" } },
2199 { INDEX_op_st8_i64, { "ri", "r" } },
2200 { INDEX_op_st16_i64, { "ri", "r" } },
2201 { INDEX_op_st32_i64, { "ri", "r" } },
2202 { INDEX_op_st_i64, { "re", "r" } },
2204 { INDEX_op_add_i64, { "r", "r", "re" } },
2205 { INDEX_op_mul_i64, { "r", "0", "re" } },
2206 { INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } },
2207 { INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } },
2208 { INDEX_op_sub_i64, { "r", "0", "re" } },
2209 { INDEX_op_and_i64, { "r", "0", "reZ" } },
2210 { INDEX_op_or_i64, { "r", "0", "re" } },
2211 { INDEX_op_xor_i64, { "r", "0", "re" } },
2212 { INDEX_op_andc_i64, { "r", "r", "rI" } },
2214 { INDEX_op_shl_i64, { "r", "0", "Ci" } },
2215 { INDEX_op_shr_i64, { "r", "0", "Ci" } },
2216 { INDEX_op_sar_i64, { "r", "0", "Ci" } },
2217 { INDEX_op_rotl_i64, { "r", "0", "ci" } },
2218 { INDEX_op_rotr_i64, { "r", "0", "ci" } },
2220 { INDEX_op_brcond_i64, { "r", "re" } },
2221 { INDEX_op_setcond_i64, { "r", "r", "re" } },
2223 { INDEX_op_bswap16_i64, { "r", "0" } },
2224 { INDEX_op_bswap32_i64, { "r", "0" } },
2225 { INDEX_op_bswap64_i64, { "r", "0" } },
2226 { INDEX_op_neg_i64, { "r", "0" } },
2227 { INDEX_op_not_i64, { "r", "0" } },
2229 { INDEX_op_ext8s_i64, { "r", "r" } },
2230 { INDEX_op_ext16s_i64, { "r", "r" } },
2231 { INDEX_op_ext32s_i64, { "r", "r" } },
2232 { INDEX_op_ext8u_i64, { "r", "r" } },
2233 { INDEX_op_ext16u_i64, { "r", "r" } },
2234 { INDEX_op_ext32u_i64, { "r", "r" } },
2236 { INDEX_op_ext_i32_i64, { "r", "r" } },
2237 { INDEX_op_extu_i32_i64, { "r", "r" } },
2239 { INDEX_op_deposit_i64, { "Q", "0", "Q" } },
2240 { INDEX_op_movcond_i64, { "r", "r", "re", "r", "0" } },
2242 { INDEX_op_mulu2_i64, { "a", "d", "a", "r" } },
2243 { INDEX_op_muls2_i64, { "a", "d", "a", "r" } },
2244 { INDEX_op_add2_i64, { "r", "r", "0", "1", "re", "re" } },
2245 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "re", "re" } },
2246 #endif
2248 #if TCG_TARGET_REG_BITS == 64
2249 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2250 { INDEX_op_qemu_st_i32, { "L", "L" } },
2251 { INDEX_op_qemu_ld_i64, { "r", "L" } },
2252 { INDEX_op_qemu_st_i64, { "L", "L" } },
2253 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
2254 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2255 { INDEX_op_qemu_st_i32, { "L", "L" } },
2256 { INDEX_op_qemu_ld_i64, { "r", "r", "L" } },
2257 { INDEX_op_qemu_st_i64, { "L", "L", "L" } },
2258 #else
2259 { INDEX_op_qemu_ld_i32, { "r", "L", "L" } },
2260 { INDEX_op_qemu_st_i32, { "L", "L", "L" } },
2261 { INDEX_op_qemu_ld_i64, { "r", "r", "L", "L" } },
2262 { INDEX_op_qemu_st_i64, { "L", "L", "L", "L" } },
2263 #endif
2264 { -1 },
2267 static int tcg_target_callee_save_regs[] = {
2268 #if TCG_TARGET_REG_BITS == 64
2269 TCG_REG_RBP,
2270 TCG_REG_RBX,
2271 #if defined(_WIN64)
2272 TCG_REG_RDI,
2273 TCG_REG_RSI,
2274 #endif
2275 TCG_REG_R12,
2276 TCG_REG_R13,
2277 TCG_REG_R14, /* Currently used for the global env. */
2278 TCG_REG_R15,
2279 #else
2280 TCG_REG_EBP, /* Currently used for the global env. */
2281 TCG_REG_EBX,
2282 TCG_REG_ESI,
2283 TCG_REG_EDI,
2284 #endif
2287 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2288 and tcg_register_jit. */
2290 #define PUSH_SIZE \
2291 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
2292 * (TCG_TARGET_REG_BITS / 8))
2294 #define FRAME_SIZE \
2295 ((PUSH_SIZE \
2296 + TCG_STATIC_CALL_ARGS_SIZE \
2297 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2298 + TCG_TARGET_STACK_ALIGN - 1) \
2299 & ~(TCG_TARGET_STACK_ALIGN - 1))
2301 /* Generate global QEMU prologue and epilogue code */
2302 static void tcg_target_qemu_prologue(TCGContext *s)
2304 int i, stack_addend;
2306 /* TB prologue */
2308 /* Reserve some stack space, also for TCG temps. */
2309 stack_addend = FRAME_SIZE - PUSH_SIZE;
2310 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2311 CPU_TEMP_BUF_NLONGS * sizeof(long));
2313 /* Save all callee saved registers. */
2314 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2315 tcg_out_push(s, tcg_target_callee_save_regs[i]);
2318 #if TCG_TARGET_REG_BITS == 32
2319 tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
2320 (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
2321 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
2322 /* jmp *tb. */
2323 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
2324 (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
2325 + stack_addend);
2326 #else
2327 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2328 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
2329 /* jmp *tb. */
2330 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
2331 #endif
2333 /* TB epilogue */
2334 tb_ret_addr = s->code_ptr;
2336 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend);
2338 for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
2339 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
2341 tcg_out_opc(s, OPC_RET, 0, 0, 0);
2343 #if !defined(CONFIG_SOFTMMU)
2344 /* Try to set up a segment register to point to guest_base. */
2345 if (guest_base) {
2346 setup_guest_base_seg();
2348 #endif
2351 static void tcg_target_init(TCGContext *s)
2353 #ifdef CONFIG_CPUID_H
2354 unsigned a, b, c, d;
2355 int max = __get_cpuid_max(0, 0);
2357 if (max >= 1) {
2358 __cpuid(1, a, b, c, d);
2359 #ifndef have_cmov
2360 /* For 32-bit, 99% certainty that we're running on hardware that
2361 supports cmov, but we still need to check. In case cmov is not
2362 available, we'll use a small forward branch. */
2363 have_cmov = (d & bit_CMOV) != 0;
2364 #endif
2365 #ifndef have_movbe
2366 /* MOVBE is only available on Intel Atom and Haswell CPUs, so we
2367 need to probe for it. */
2368 have_movbe = (c & bit_MOVBE) != 0;
2369 #endif
2372 if (max >= 7) {
2373 /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
2374 __cpuid_count(7, 0, a, b, c, d);
2375 #ifdef bit_BMI
2376 have_bmi1 = (b & bit_BMI) != 0;
2377 #endif
2378 #ifndef have_bmi2
2379 have_bmi2 = (b & bit_BMI2) != 0;
2380 #endif
2382 #endif
2384 if (TCG_TARGET_REG_BITS == 64) {
2385 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2386 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2387 } else {
2388 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
2391 tcg_regset_clear(tcg_target_call_clobber_regs);
2392 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
2393 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
2394 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
2395 if (TCG_TARGET_REG_BITS == 64) {
2396 #if !defined(_WIN64)
2397 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI);
2398 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI);
2399 #endif
2400 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
2401 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
2402 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
2403 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
2406 tcg_regset_clear(s->reserved_regs);
2407 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2409 tcg_add_target_add_op_defs(x86_op_defs);
2412 typedef struct {
2413 DebugFrameHeader h;
2414 uint8_t fde_def_cfa[4];
2415 uint8_t fde_reg_ofs[14];
2416 } DebugFrame;
2418 /* We're expecting a 2 byte uleb128 encoded value. */
2419 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2421 #if !defined(__ELF__)
2422 /* Host machine without ELF. */
2423 #elif TCG_TARGET_REG_BITS == 64
2424 #define ELF_HOST_MACHINE EM_X86_64
2425 static const DebugFrame debug_frame = {
2426 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2427 .h.cie.id = -1,
2428 .h.cie.version = 1,
2429 .h.cie.code_align = 1,
2430 .h.cie.data_align = 0x78, /* sleb128 -8 */
2431 .h.cie.return_column = 16,
2433 /* Total FDE size does not include the "len" member. */
2434 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2436 .fde_def_cfa = {
2437 12, 7, /* DW_CFA_def_cfa %rsp, ... */
2438 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2439 (FRAME_SIZE >> 7)
2441 .fde_reg_ofs = {
2442 0x90, 1, /* DW_CFA_offset, %rip, -8 */
2443 /* The following ordering must match tcg_target_callee_save_regs. */
2444 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
2445 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
2446 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
2447 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
2448 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
2449 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
2452 #else
2453 #define ELF_HOST_MACHINE EM_386
2454 static const DebugFrame debug_frame = {
2455 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2456 .h.cie.id = -1,
2457 .h.cie.version = 1,
2458 .h.cie.code_align = 1,
2459 .h.cie.data_align = 0x7c, /* sleb128 -4 */
2460 .h.cie.return_column = 8,
2462 /* Total FDE size does not include the "len" member. */
2463 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2465 .fde_def_cfa = {
2466 12, 4, /* DW_CFA_def_cfa %esp, ... */
2467 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2468 (FRAME_SIZE >> 7)
2470 .fde_reg_ofs = {
2471 0x88, 1, /* DW_CFA_offset, %eip, -4 */
2472 /* The following ordering must match tcg_target_callee_save_regs. */
2473 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
2474 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
2475 0x86, 4, /* DW_CFA_offset, %esi, -16 */
2476 0x87, 5, /* DW_CFA_offset, %edi, -20 */
2479 #endif
2481 #if defined(ELF_HOST_MACHINE)
2482 void tcg_register_jit(void *buf, size_t buf_size)
2484 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2486 #endif