travis: define all the build matrix entries in one place
[qemu/ar7.git] / tcg / i386 / tcg-target.inc.c
blob1b4e3b80e17f26a3dd6a9214fc9c06d734fe4c5d
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "tcg-pool.inc.c"
27 #ifdef CONFIG_DEBUG_TCG
28 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 #if TCG_TARGET_REG_BITS == 64
30 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
31 #else
32 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
33 #endif
34 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
35 "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7",
36 #if TCG_TARGET_REG_BITS == 64
37 "%xmm8", "%xmm9", "%xmm10", "%xmm11",
38 "%xmm12", "%xmm13", "%xmm14", "%xmm15",
39 #endif
41 #endif
43 static const int tcg_target_reg_alloc_order[] = {
44 #if TCG_TARGET_REG_BITS == 64
45 TCG_REG_RBP,
46 TCG_REG_RBX,
47 TCG_REG_R12,
48 TCG_REG_R13,
49 TCG_REG_R14,
50 TCG_REG_R15,
51 TCG_REG_R10,
52 TCG_REG_R11,
53 TCG_REG_R9,
54 TCG_REG_R8,
55 TCG_REG_RCX,
56 TCG_REG_RDX,
57 TCG_REG_RSI,
58 TCG_REG_RDI,
59 TCG_REG_RAX,
60 #else
61 TCG_REG_EBX,
62 TCG_REG_ESI,
63 TCG_REG_EDI,
64 TCG_REG_EBP,
65 TCG_REG_ECX,
66 TCG_REG_EDX,
67 TCG_REG_EAX,
68 #endif
69 TCG_REG_XMM0,
70 TCG_REG_XMM1,
71 TCG_REG_XMM2,
72 TCG_REG_XMM3,
73 TCG_REG_XMM4,
74 TCG_REG_XMM5,
75 #ifndef _WIN64
76 /* The Win64 ABI has xmm6-xmm15 as caller-saves, and we do not save
77 any of them. Therefore only allow xmm0-xmm5 to be allocated. */
78 TCG_REG_XMM6,
79 TCG_REG_XMM7,
80 #if TCG_TARGET_REG_BITS == 64
81 TCG_REG_XMM8,
82 TCG_REG_XMM9,
83 TCG_REG_XMM10,
84 TCG_REG_XMM11,
85 TCG_REG_XMM12,
86 TCG_REG_XMM13,
87 TCG_REG_XMM14,
88 TCG_REG_XMM15,
89 #endif
90 #endif
93 static const int tcg_target_call_iarg_regs[] = {
94 #if TCG_TARGET_REG_BITS == 64
95 #if defined(_WIN64)
96 TCG_REG_RCX,
97 TCG_REG_RDX,
98 #else
99 TCG_REG_RDI,
100 TCG_REG_RSI,
101 TCG_REG_RDX,
102 TCG_REG_RCX,
103 #endif
104 TCG_REG_R8,
105 TCG_REG_R9,
106 #else
107 /* 32 bit mode uses stack based calling convention (GCC default). */
108 #endif
111 static const int tcg_target_call_oarg_regs[] = {
112 TCG_REG_EAX,
113 #if TCG_TARGET_REG_BITS == 32
114 TCG_REG_EDX
115 #endif
118 /* Constants we accept. */
119 #define TCG_CT_CONST_S32 0x100
120 #define TCG_CT_CONST_U32 0x200
121 #define TCG_CT_CONST_I32 0x400
122 #define TCG_CT_CONST_WSZ 0x800
124 /* Registers used with L constraint, which are the first argument
125 registers on x86_64, and two random call clobbered registers on
126 i386. */
127 #if TCG_TARGET_REG_BITS == 64
128 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
129 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
130 #else
131 # define TCG_REG_L0 TCG_REG_EAX
132 # define TCG_REG_L1 TCG_REG_EDX
133 #endif
135 /* The host compiler should supply <cpuid.h> to enable runtime features
136 detection, as we're not going to go so far as our own inline assembly.
137 If not available, default values will be assumed. */
138 #if defined(CONFIG_CPUID_H)
139 #include "qemu/cpuid.h"
140 #endif
142 /* For 64-bit, we always know that CMOV is available. */
143 #if TCG_TARGET_REG_BITS == 64
144 # define have_cmov 1
145 #elif defined(CONFIG_CPUID_H)
146 static bool have_cmov;
147 #else
148 # define have_cmov 0
149 #endif
151 /* We need these symbols in tcg-target.h, and we can't properly conditionalize
152 it there. Therefore we always define the variable. */
153 bool have_bmi1;
154 bool have_popcnt;
155 bool have_avx1;
156 bool have_avx2;
158 #ifdef CONFIG_CPUID_H
159 static bool have_movbe;
160 static bool have_bmi2;
161 static bool have_lzcnt;
162 #else
163 # define have_movbe 0
164 # define have_bmi2 0
165 # define have_lzcnt 0
166 #endif
168 static tcg_insn_unit *tb_ret_addr;
170 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
171 intptr_t value, intptr_t addend)
173 value += addend;
174 switch(type) {
175 case R_386_PC32:
176 value -= (uintptr_t)code_ptr;
177 if (value != (int32_t)value) {
178 return false;
180 /* FALLTHRU */
181 case R_386_32:
182 tcg_patch32(code_ptr, value);
183 break;
184 case R_386_PC8:
185 value -= (uintptr_t)code_ptr;
186 if (value != (int8_t)value) {
187 return false;
189 tcg_patch8(code_ptr, value);
190 break;
191 default:
192 tcg_abort();
194 return true;
197 #if TCG_TARGET_REG_BITS == 64
198 #define ALL_GENERAL_REGS 0x0000ffffu
199 #define ALL_VECTOR_REGS 0xffff0000u
200 #else
201 #define ALL_GENERAL_REGS 0x000000ffu
202 #define ALL_VECTOR_REGS 0x00ff0000u
203 #endif
205 /* parse target specific constraints */
206 static const char *target_parse_constraint(TCGArgConstraint *ct,
207 const char *ct_str, TCGType type)
209 switch(*ct_str++) {
210 case 'a':
211 ct->ct |= TCG_CT_REG;
212 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
213 break;
214 case 'b':
215 ct->ct |= TCG_CT_REG;
216 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
217 break;
218 case 'c':
219 ct->ct |= TCG_CT_REG;
220 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
221 break;
222 case 'd':
223 ct->ct |= TCG_CT_REG;
224 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
225 break;
226 case 'S':
227 ct->ct |= TCG_CT_REG;
228 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
229 break;
230 case 'D':
231 ct->ct |= TCG_CT_REG;
232 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
233 break;
234 case 'q':
235 /* A register that can be used as a byte operand. */
236 ct->ct |= TCG_CT_REG;
237 ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xf;
238 break;
239 case 'Q':
240 /* A register with an addressable second byte (e.g. %ah). */
241 ct->ct |= TCG_CT_REG;
242 ct->u.regs = 0xf;
243 break;
244 case 'r':
245 /* A general register. */
246 ct->ct |= TCG_CT_REG;
247 ct->u.regs |= ALL_GENERAL_REGS;
248 break;
249 case 'W':
250 /* With TZCNT/LZCNT, we can have operand-size as an input. */
251 ct->ct |= TCG_CT_CONST_WSZ;
252 break;
253 case 'x':
254 /* A vector register. */
255 ct->ct |= TCG_CT_REG;
256 ct->u.regs |= ALL_VECTOR_REGS;
257 break;
259 /* qemu_ld/st address constraint */
260 case 'L':
261 ct->ct |= TCG_CT_REG;
262 ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff;
263 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0);
264 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1);
265 break;
267 case 'e':
268 ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_S32);
269 break;
270 case 'Z':
271 ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_U32);
272 break;
273 case 'I':
274 ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_I32);
275 break;
277 default:
278 return NULL;
280 return ct_str;
283 /* test if a constant matches the constraint */
284 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
285 const TCGArgConstraint *arg_ct)
287 int ct = arg_ct->ct;
288 if (ct & TCG_CT_CONST) {
289 return 1;
291 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
292 return 1;
294 if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
295 return 1;
297 if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
298 return 1;
300 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
301 return 1;
303 return 0;
306 # define LOWREGMASK(x) ((x) & 7)
308 #define P_EXT 0x100 /* 0x0f opcode prefix */
309 #define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */
310 #define P_DATA16 0x400 /* 0x66 opcode prefix */
311 #if TCG_TARGET_REG_BITS == 64
312 # define P_REXW 0x1000 /* Set REX.W = 1 */
313 # define P_REXB_R 0x2000 /* REG field as byte register */
314 # define P_REXB_RM 0x4000 /* R/M field as byte register */
315 # define P_GS 0x8000 /* gs segment override */
316 #else
317 # define P_REXW 0
318 # define P_REXB_R 0
319 # define P_REXB_RM 0
320 # define P_GS 0
321 #endif
322 #define P_EXT3A 0x10000 /* 0x0f 0x3a opcode prefix */
323 #define P_SIMDF3 0x20000 /* 0xf3 opcode prefix */
324 #define P_SIMDF2 0x40000 /* 0xf2 opcode prefix */
325 #define P_VEXL 0x80000 /* Set VEX.L = 1 */
327 #define OPC_ARITH_EvIz (0x81)
328 #define OPC_ARITH_EvIb (0x83)
329 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
330 #define OPC_ANDN (0xf2 | P_EXT38)
331 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
332 #define OPC_BLENDPS (0x0c | P_EXT3A | P_DATA16)
333 #define OPC_BSF (0xbc | P_EXT)
334 #define OPC_BSR (0xbd | P_EXT)
335 #define OPC_BSWAP (0xc8 | P_EXT)
336 #define OPC_CALL_Jz (0xe8)
337 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
338 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
339 #define OPC_DEC_r32 (0x48)
340 #define OPC_IMUL_GvEv (0xaf | P_EXT)
341 #define OPC_IMUL_GvEvIb (0x6b)
342 #define OPC_IMUL_GvEvIz (0x69)
343 #define OPC_INC_r32 (0x40)
344 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
345 #define OPC_JCC_short (0x70) /* ... plus condition code */
346 #define OPC_JMP_long (0xe9)
347 #define OPC_JMP_short (0xeb)
348 #define OPC_LEA (0x8d)
349 #define OPC_LZCNT (0xbd | P_EXT | P_SIMDF3)
350 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
351 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
352 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
353 #define OPC_MOVB_EvIz (0xc6)
354 #define OPC_MOVL_EvIz (0xc7)
355 #define OPC_MOVL_Iv (0xb8)
356 #define OPC_MOVBE_GyMy (0xf0 | P_EXT38)
357 #define OPC_MOVBE_MyGy (0xf1 | P_EXT38)
358 #define OPC_MOVD_VyEy (0x6e | P_EXT | P_DATA16)
359 #define OPC_MOVD_EyVy (0x7e | P_EXT | P_DATA16)
360 #define OPC_MOVDDUP (0x12 | P_EXT | P_SIMDF2)
361 #define OPC_MOVDQA_VxWx (0x6f | P_EXT | P_DATA16)
362 #define OPC_MOVDQA_WxVx (0x7f | P_EXT | P_DATA16)
363 #define OPC_MOVDQU_VxWx (0x6f | P_EXT | P_SIMDF3)
364 #define OPC_MOVDQU_WxVx (0x7f | P_EXT | P_SIMDF3)
365 #define OPC_MOVQ_VqWq (0x7e | P_EXT | P_SIMDF3)
366 #define OPC_MOVQ_WqVq (0xd6 | P_EXT | P_DATA16)
367 #define OPC_MOVSBL (0xbe | P_EXT)
368 #define OPC_MOVSWL (0xbf | P_EXT)
369 #define OPC_MOVSLQ (0x63 | P_REXW)
370 #define OPC_MOVZBL (0xb6 | P_EXT)
371 #define OPC_MOVZWL (0xb7 | P_EXT)
372 #define OPC_PACKSSDW (0x6b | P_EXT | P_DATA16)
373 #define OPC_PACKSSWB (0x63 | P_EXT | P_DATA16)
374 #define OPC_PACKUSDW (0x2b | P_EXT38 | P_DATA16)
375 #define OPC_PACKUSWB (0x67 | P_EXT | P_DATA16)
376 #define OPC_PADDB (0xfc | P_EXT | P_DATA16)
377 #define OPC_PADDW (0xfd | P_EXT | P_DATA16)
378 #define OPC_PADDD (0xfe | P_EXT | P_DATA16)
379 #define OPC_PADDQ (0xd4 | P_EXT | P_DATA16)
380 #define OPC_PAND (0xdb | P_EXT | P_DATA16)
381 #define OPC_PANDN (0xdf | P_EXT | P_DATA16)
382 #define OPC_PBLENDW (0x0e | P_EXT3A | P_DATA16)
383 #define OPC_PCMPEQB (0x74 | P_EXT | P_DATA16)
384 #define OPC_PCMPEQW (0x75 | P_EXT | P_DATA16)
385 #define OPC_PCMPEQD (0x76 | P_EXT | P_DATA16)
386 #define OPC_PCMPEQQ (0x29 | P_EXT38 | P_DATA16)
387 #define OPC_PCMPGTB (0x64 | P_EXT | P_DATA16)
388 #define OPC_PCMPGTW (0x65 | P_EXT | P_DATA16)
389 #define OPC_PCMPGTD (0x66 | P_EXT | P_DATA16)
390 #define OPC_PCMPGTQ (0x37 | P_EXT38 | P_DATA16)
391 #define OPC_PMOVSXBW (0x20 | P_EXT38 | P_DATA16)
392 #define OPC_PMOVSXWD (0x23 | P_EXT38 | P_DATA16)
393 #define OPC_PMOVSXDQ (0x25 | P_EXT38 | P_DATA16)
394 #define OPC_PMOVZXBW (0x30 | P_EXT38 | P_DATA16)
395 #define OPC_PMOVZXWD (0x33 | P_EXT38 | P_DATA16)
396 #define OPC_PMOVZXDQ (0x35 | P_EXT38 | P_DATA16)
397 #define OPC_PMULLW (0xd5 | P_EXT | P_DATA16)
398 #define OPC_PMULLD (0x40 | P_EXT38 | P_DATA16)
399 #define OPC_POR (0xeb | P_EXT | P_DATA16)
400 #define OPC_PSHUFB (0x00 | P_EXT38 | P_DATA16)
401 #define OPC_PSHUFD (0x70 | P_EXT | P_DATA16)
402 #define OPC_PSHUFLW (0x70 | P_EXT | P_SIMDF2)
403 #define OPC_PSHUFHW (0x70 | P_EXT | P_SIMDF3)
404 #define OPC_PSHIFTW_Ib (0x71 | P_EXT | P_DATA16) /* /2 /6 /4 */
405 #define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /2 /6 /4 */
406 #define OPC_PSHIFTQ_Ib (0x73 | P_EXT | P_DATA16) /* /2 /6 /4 */
407 #define OPC_PSUBB (0xf8 | P_EXT | P_DATA16)
408 #define OPC_PSUBW (0xf9 | P_EXT | P_DATA16)
409 #define OPC_PSUBD (0xfa | P_EXT | P_DATA16)
410 #define OPC_PSUBQ (0xfb | P_EXT | P_DATA16)
411 #define OPC_PUNPCKLBW (0x60 | P_EXT | P_DATA16)
412 #define OPC_PUNPCKLWD (0x61 | P_EXT | P_DATA16)
413 #define OPC_PUNPCKLDQ (0x62 | P_EXT | P_DATA16)
414 #define OPC_PUNPCKLQDQ (0x6c | P_EXT | P_DATA16)
415 #define OPC_PUNPCKHBW (0x68 | P_EXT | P_DATA16)
416 #define OPC_PUNPCKHWD (0x69 | P_EXT | P_DATA16)
417 #define OPC_PUNPCKHDQ (0x6a | P_EXT | P_DATA16)
418 #define OPC_PUNPCKHQDQ (0x6d | P_EXT | P_DATA16)
419 #define OPC_PXOR (0xef | P_EXT | P_DATA16)
420 #define OPC_POP_r32 (0x58)
421 #define OPC_POPCNT (0xb8 | P_EXT | P_SIMDF3)
422 #define OPC_PUSH_r32 (0x50)
423 #define OPC_PUSH_Iv (0x68)
424 #define OPC_PUSH_Ib (0x6a)
425 #define OPC_RET (0xc3)
426 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
427 #define OPC_SHIFT_1 (0xd1)
428 #define OPC_SHIFT_Ib (0xc1)
429 #define OPC_SHIFT_cl (0xd3)
430 #define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3)
431 #define OPC_SHUFPS (0xc6 | P_EXT)
432 #define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
433 #define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
434 #define OPC_TESTL (0x85)
435 #define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
436 #define OPC_UD2 (0x0b | P_EXT)
437 #define OPC_VPBLENDD (0x02 | P_EXT3A | P_DATA16)
438 #define OPC_VPBLENDVB (0x4c | P_EXT3A | P_DATA16)
439 #define OPC_VPBROADCASTB (0x78 | P_EXT38 | P_DATA16)
440 #define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16)
441 #define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16)
442 #define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
443 #define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_REXW)
444 #define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
445 #define OPC_VZEROUPPER (0x77 | P_EXT)
446 #define OPC_XCHG_ax_r32 (0x90)
448 #define OPC_GRP3_Ev (0xf7)
449 #define OPC_GRP5 (0xff)
450 #define OPC_GRP14 (0x73 | P_EXT | P_DATA16)
452 /* Group 1 opcode extensions for 0x80-0x83.
453 These are also used as modifiers for OPC_ARITH. */
454 #define ARITH_ADD 0
455 #define ARITH_OR 1
456 #define ARITH_ADC 2
457 #define ARITH_SBB 3
458 #define ARITH_AND 4
459 #define ARITH_SUB 5
460 #define ARITH_XOR 6
461 #define ARITH_CMP 7
463 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
464 #define SHIFT_ROL 0
465 #define SHIFT_ROR 1
466 #define SHIFT_SHL 4
467 #define SHIFT_SHR 5
468 #define SHIFT_SAR 7
470 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
471 #define EXT3_NOT 2
472 #define EXT3_NEG 3
473 #define EXT3_MUL 4
474 #define EXT3_IMUL 5
475 #define EXT3_DIV 6
476 #define EXT3_IDIV 7
478 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
479 #define EXT5_INC_Ev 0
480 #define EXT5_DEC_Ev 1
481 #define EXT5_CALLN_Ev 2
482 #define EXT5_JMPN_Ev 4
484 /* Condition codes to be added to OPC_JCC_{long,short}. */
485 #define JCC_JMP (-1)
486 #define JCC_JO 0x0
487 #define JCC_JNO 0x1
488 #define JCC_JB 0x2
489 #define JCC_JAE 0x3
490 #define JCC_JE 0x4
491 #define JCC_JNE 0x5
492 #define JCC_JBE 0x6
493 #define JCC_JA 0x7
494 #define JCC_JS 0x8
495 #define JCC_JNS 0x9
496 #define JCC_JP 0xa
497 #define JCC_JNP 0xb
498 #define JCC_JL 0xc
499 #define JCC_JGE 0xd
500 #define JCC_JLE 0xe
501 #define JCC_JG 0xf
503 static const uint8_t tcg_cond_to_jcc[] = {
504 [TCG_COND_EQ] = JCC_JE,
505 [TCG_COND_NE] = JCC_JNE,
506 [TCG_COND_LT] = JCC_JL,
507 [TCG_COND_GE] = JCC_JGE,
508 [TCG_COND_LE] = JCC_JLE,
509 [TCG_COND_GT] = JCC_JG,
510 [TCG_COND_LTU] = JCC_JB,
511 [TCG_COND_GEU] = JCC_JAE,
512 [TCG_COND_LEU] = JCC_JBE,
513 [TCG_COND_GTU] = JCC_JA,
516 #if TCG_TARGET_REG_BITS == 64
517 static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
519 int rex;
521 if (opc & P_GS) {
522 tcg_out8(s, 0x65);
524 if (opc & P_DATA16) {
525 /* We should never be asking for both 16 and 64-bit operation. */
526 tcg_debug_assert((opc & P_REXW) == 0);
527 tcg_out8(s, 0x66);
529 if (opc & P_SIMDF3) {
530 tcg_out8(s, 0xf3);
531 } else if (opc & P_SIMDF2) {
532 tcg_out8(s, 0xf2);
535 rex = 0;
536 rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */
537 rex |= (r & 8) >> 1; /* REX.R */
538 rex |= (x & 8) >> 2; /* REX.X */
539 rex |= (rm & 8) >> 3; /* REX.B */
541 /* P_REXB_{R,RM} indicates that the given register is the low byte.
542 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
543 as otherwise the encoding indicates %[abcd]h. Note that the values
544 that are ORed in merely indicate that the REX byte must be present;
545 those bits get discarded in output. */
546 rex |= opc & (r >= 4 ? P_REXB_R : 0);
547 rex |= opc & (rm >= 4 ? P_REXB_RM : 0);
549 if (rex) {
550 tcg_out8(s, (uint8_t)(rex | 0x40));
553 if (opc & (P_EXT | P_EXT38 | P_EXT3A)) {
554 tcg_out8(s, 0x0f);
555 if (opc & P_EXT38) {
556 tcg_out8(s, 0x38);
557 } else if (opc & P_EXT3A) {
558 tcg_out8(s, 0x3a);
562 tcg_out8(s, opc);
564 #else
565 static void tcg_out_opc(TCGContext *s, int opc)
567 if (opc & P_DATA16) {
568 tcg_out8(s, 0x66);
570 if (opc & P_SIMDF3) {
571 tcg_out8(s, 0xf3);
572 } else if (opc & P_SIMDF2) {
573 tcg_out8(s, 0xf2);
575 if (opc & (P_EXT | P_EXT38 | P_EXT3A)) {
576 tcg_out8(s, 0x0f);
577 if (opc & P_EXT38) {
578 tcg_out8(s, 0x38);
579 } else if (opc & P_EXT3A) {
580 tcg_out8(s, 0x3a);
583 tcg_out8(s, opc);
585 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
586 the 32-bit compilation paths. This method works with all versions of gcc,
587 whereas relying on optimization may not be able to exclude them. */
588 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
589 #endif
591 static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
593 tcg_out_opc(s, opc, r, rm, 0);
594 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
597 static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
598 int rm, int index)
600 int tmp;
602 /* Use the two byte form if possible, which cannot encode
603 VEX.W, VEX.B, VEX.X, or an m-mmmm field other than P_EXT. */
604 if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_REXW)) == P_EXT
605 && ((rm | index) & 8) == 0) {
606 /* Two byte VEX prefix. */
607 tcg_out8(s, 0xc5);
609 tmp = (r & 8 ? 0 : 0x80); /* VEX.R */
610 } else {
611 /* Three byte VEX prefix. */
612 tcg_out8(s, 0xc4);
614 /* VEX.m-mmmm */
615 if (opc & P_EXT3A) {
616 tmp = 3;
617 } else if (opc & P_EXT38) {
618 tmp = 2;
619 } else if (opc & P_EXT) {
620 tmp = 1;
621 } else {
622 g_assert_not_reached();
624 tmp |= (r & 8 ? 0 : 0x80); /* VEX.R */
625 tmp |= (index & 8 ? 0 : 0x40); /* VEX.X */
626 tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */
627 tcg_out8(s, tmp);
629 tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */
632 tmp |= (opc & P_VEXL ? 0x04 : 0); /* VEX.L */
633 /* VEX.pp */
634 if (opc & P_DATA16) {
635 tmp |= 1; /* 0x66 */
636 } else if (opc & P_SIMDF3) {
637 tmp |= 2; /* 0xf3 */
638 } else if (opc & P_SIMDF2) {
639 tmp |= 3; /* 0xf2 */
641 tmp |= (~v & 15) << 3; /* VEX.vvvv */
642 tcg_out8(s, tmp);
643 tcg_out8(s, opc);
646 static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm)
648 tcg_out_vex_opc(s, opc, r, v, rm, 0);
649 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
652 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
653 We handle either RM and INDEX missing with a negative value. In 64-bit
654 mode for absolute addresses, ~RM is the size of the immediate operand
655 that will follow the instruction. */
657 static void tcg_out_sib_offset(TCGContext *s, int r, int rm, int index,
658 int shift, intptr_t offset)
660 int mod, len;
662 if (index < 0 && rm < 0) {
663 if (TCG_TARGET_REG_BITS == 64) {
664 /* Try for a rip-relative addressing mode. This has replaced
665 the 32-bit-mode absolute addressing encoding. */
666 intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm;
667 intptr_t disp = offset - pc;
668 if (disp == (int32_t)disp) {
669 tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
670 tcg_out32(s, disp);
671 return;
674 /* Try for an absolute address encoding. This requires the
675 use of the MODRM+SIB encoding and is therefore larger than
676 rip-relative addressing. */
677 if (offset == (int32_t)offset) {
678 tcg_out8(s, (LOWREGMASK(r) << 3) | 4);
679 tcg_out8(s, (4 << 3) | 5);
680 tcg_out32(s, offset);
681 return;
684 /* ??? The memory isn't directly addressable. */
685 g_assert_not_reached();
686 } else {
687 /* Absolute address. */
688 tcg_out8(s, (r << 3) | 5);
689 tcg_out32(s, offset);
690 return;
694 /* Find the length of the immediate addend. Note that the encoding
695 that would be used for (%ebp) indicates absolute addressing. */
696 if (rm < 0) {
697 mod = 0, len = 4, rm = 5;
698 } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) {
699 mod = 0, len = 0;
700 } else if (offset == (int8_t)offset) {
701 mod = 0x40, len = 1;
702 } else {
703 mod = 0x80, len = 4;
706 /* Use a single byte MODRM format if possible. Note that the encoding
707 that would be used for %esp is the escape to the two byte form. */
708 if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) {
709 /* Single byte MODRM format. */
710 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
711 } else {
712 /* Two byte MODRM+SIB format. */
714 /* Note that the encoding that would place %esp into the index
715 field indicates no index register. In 64-bit mode, the REX.X
716 bit counts, so %r12 can be used as the index. */
717 if (index < 0) {
718 index = 4;
719 } else {
720 tcg_debug_assert(index != TCG_REG_ESP);
723 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4);
724 tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm));
727 if (len == 1) {
728 tcg_out8(s, offset);
729 } else if (len == 4) {
730 tcg_out32(s, offset);
734 static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
735 int index, int shift, intptr_t offset)
737 tcg_out_opc(s, opc, r, rm < 0 ? 0 : rm, index < 0 ? 0 : index);
738 tcg_out_sib_offset(s, r, rm, index, shift, offset);
741 static void tcg_out_vex_modrm_sib_offset(TCGContext *s, int opc, int r, int v,
742 int rm, int index, int shift,
743 intptr_t offset)
745 tcg_out_vex_opc(s, opc, r, v, rm < 0 ? 0 : rm, index < 0 ? 0 : index);
746 tcg_out_sib_offset(s, r, rm, index, shift, offset);
749 /* A simplification of the above with no index or shift. */
750 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
751 int rm, intptr_t offset)
753 tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
756 static inline void tcg_out_vex_modrm_offset(TCGContext *s, int opc, int r,
757 int v, int rm, intptr_t offset)
759 tcg_out_vex_modrm_sib_offset(s, opc, r, v, rm, -1, 0, offset);
762 /* Output an opcode with an expected reference to the constant pool. */
763 static inline void tcg_out_modrm_pool(TCGContext *s, int opc, int r)
765 tcg_out_opc(s, opc, r, 0, 0);
766 /* Absolute for 32-bit, pc-relative for 64-bit. */
767 tcg_out8(s, LOWREGMASK(r) << 3 | 5);
768 tcg_out32(s, 0);
771 /* Output an opcode with an expected reference to the constant pool. */
772 static inline void tcg_out_vex_modrm_pool(TCGContext *s, int opc, int r)
774 tcg_out_vex_opc(s, opc, r, 0, 0, 0);
775 /* Absolute for 32-bit, pc-relative for 64-bit. */
776 tcg_out8(s, LOWREGMASK(r) << 3 | 5);
777 tcg_out32(s, 0);
780 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
781 static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
783 /* Propagate an opcode prefix, such as P_REXW. */
784 int ext = subop & ~0x7;
785 subop &= 0x7;
787 tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src);
790 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
792 int rexw = 0;
794 if (arg == ret) {
795 return;
797 switch (type) {
798 case TCG_TYPE_I64:
799 rexw = P_REXW;
800 /* fallthru */
801 case TCG_TYPE_I32:
802 if (ret < 16) {
803 if (arg < 16) {
804 tcg_out_modrm(s, OPC_MOVL_GvEv + rexw, ret, arg);
805 } else {
806 tcg_out_vex_modrm(s, OPC_MOVD_EyVy + rexw, arg, 0, ret);
808 } else {
809 if (arg < 16) {
810 tcg_out_vex_modrm(s, OPC_MOVD_VyEy + rexw, ret, 0, arg);
811 } else {
812 tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg);
815 break;
817 case TCG_TYPE_V64:
818 tcg_debug_assert(ret >= 16 && arg >= 16);
819 tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg);
820 break;
821 case TCG_TYPE_V128:
822 tcg_debug_assert(ret >= 16 && arg >= 16);
823 tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx, ret, 0, arg);
824 break;
825 case TCG_TYPE_V256:
826 tcg_debug_assert(ret >= 16 && arg >= 16);
827 tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx | P_VEXL, ret, 0, arg);
828 break;
830 default:
831 g_assert_not_reached();
835 static void tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
836 TCGReg r, TCGReg a)
838 if (have_avx2) {
839 static const int dup_insn[4] = {
840 OPC_VPBROADCASTB, OPC_VPBROADCASTW,
841 OPC_VPBROADCASTD, OPC_VPBROADCASTQ,
843 int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
844 tcg_out_vex_modrm(s, dup_insn[vece] + vex_l, r, 0, a);
845 } else {
846 switch (vece) {
847 case MO_8:
848 /* ??? With zero in a register, use PSHUFB. */
849 tcg_out_vex_modrm(s, OPC_PUNPCKLBW, r, a, a);
850 a = r;
851 /* FALLTHRU */
852 case MO_16:
853 tcg_out_vex_modrm(s, OPC_PUNPCKLWD, r, a, a);
854 a = r;
855 /* FALLTHRU */
856 case MO_32:
857 tcg_out_vex_modrm(s, OPC_PSHUFD, r, 0, a);
858 /* imm8 operand: all output lanes selected from input lane 0. */
859 tcg_out8(s, 0);
860 break;
861 case MO_64:
862 tcg_out_vex_modrm(s, OPC_PUNPCKLQDQ, r, a, a);
863 break;
864 default:
865 g_assert_not_reached();
870 static void tcg_out_dupi_vec(TCGContext *s, TCGType type,
871 TCGReg ret, tcg_target_long arg)
873 int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
875 if (arg == 0) {
876 tcg_out_vex_modrm(s, OPC_PXOR, ret, ret, ret);
877 return;
879 if (arg == -1) {
880 tcg_out_vex_modrm(s, OPC_PCMPEQB + vex_l, ret, ret, ret);
881 return;
884 if (TCG_TARGET_REG_BITS == 64) {
885 if (type == TCG_TYPE_V64) {
886 tcg_out_vex_modrm_pool(s, OPC_MOVQ_VqWq, ret);
887 } else if (have_avx2) {
888 tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTQ + vex_l, ret);
889 } else {
890 tcg_out_vex_modrm_pool(s, OPC_MOVDDUP, ret);
892 new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4);
893 } else if (have_avx2) {
894 tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTD + vex_l, ret);
895 new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
896 } else {
897 tcg_out_vex_modrm_pool(s, OPC_MOVD_VyEy, ret);
898 new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
899 tcg_out_dup_vec(s, type, MO_32, ret, ret);
903 static void tcg_out_movi(TCGContext *s, TCGType type,
904 TCGReg ret, tcg_target_long arg)
906 tcg_target_long diff;
908 switch (type) {
909 case TCG_TYPE_I32:
910 #if TCG_TARGET_REG_BITS == 64
911 case TCG_TYPE_I64:
912 #endif
913 if (ret < 16) {
914 break;
916 /* fallthru */
917 case TCG_TYPE_V64:
918 case TCG_TYPE_V128:
919 case TCG_TYPE_V256:
920 tcg_debug_assert(ret >= 16);
921 tcg_out_dupi_vec(s, type, ret, arg);
922 return;
923 default:
924 g_assert_not_reached();
927 if (arg == 0) {
928 tgen_arithr(s, ARITH_XOR, ret, ret);
929 return;
931 if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
932 tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0);
933 tcg_out32(s, arg);
934 return;
936 if (arg == (int32_t)arg) {
937 tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret);
938 tcg_out32(s, arg);
939 return;
942 /* Try a 7 byte pc-relative lea before the 10 byte movq. */
943 diff = arg - ((uintptr_t)s->code_ptr + 7);
944 if (diff == (int32_t)diff) {
945 tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0);
946 tcg_out8(s, (LOWREGMASK(ret) << 3) | 5);
947 tcg_out32(s, diff);
948 return;
951 tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
952 tcg_out64(s, arg);
955 static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
957 if (val == (int8_t)val) {
958 tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0);
959 tcg_out8(s, val);
960 } else if (val == (int32_t)val) {
961 tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0);
962 tcg_out32(s, val);
963 } else {
964 tcg_abort();
968 static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
970 /* Given the strength of x86 memory ordering, we only need care for
971 store-load ordering. Experimentally, "lock orl $0,0(%esp)" is
972 faster than "mfence", so don't bother with the sse insn. */
973 if (a0 & TCG_MO_ST_LD) {
974 tcg_out8(s, 0xf0);
975 tcg_out_modrm_offset(s, OPC_ARITH_EvIb, ARITH_OR, TCG_REG_ESP, 0);
976 tcg_out8(s, 0);
980 static inline void tcg_out_push(TCGContext *s, int reg)
982 tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0);
985 static inline void tcg_out_pop(TCGContext *s, int reg)
987 tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0);
990 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
991 TCGReg arg1, intptr_t arg2)
993 switch (type) {
994 case TCG_TYPE_I32:
995 if (ret < 16) {
996 tcg_out_modrm_offset(s, OPC_MOVL_GvEv, ret, arg1, arg2);
997 } else {
998 tcg_out_vex_modrm_offset(s, OPC_MOVD_VyEy, ret, 0, arg1, arg2);
1000 break;
1001 case TCG_TYPE_I64:
1002 if (ret < 16) {
1003 tcg_out_modrm_offset(s, OPC_MOVL_GvEv | P_REXW, ret, arg1, arg2);
1004 break;
1006 /* FALLTHRU */
1007 case TCG_TYPE_V64:
1008 tcg_debug_assert(ret >= 16);
1009 tcg_out_vex_modrm_offset(s, OPC_MOVQ_VqWq, ret, 0, arg1, arg2);
1010 break;
1011 case TCG_TYPE_V128:
1012 tcg_debug_assert(ret >= 16);
1013 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_VxWx, ret, 0, arg1, arg2);
1014 break;
1015 case TCG_TYPE_V256:
1016 tcg_debug_assert(ret >= 16);
1017 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_VxWx | P_VEXL,
1018 ret, 0, arg1, arg2);
1019 break;
1020 default:
1021 g_assert_not_reached();
1025 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1026 TCGReg arg1, intptr_t arg2)
1028 switch (type) {
1029 case TCG_TYPE_I32:
1030 if (arg < 16) {
1031 tcg_out_modrm_offset(s, OPC_MOVL_EvGv, arg, arg1, arg2);
1032 } else {
1033 tcg_out_vex_modrm_offset(s, OPC_MOVD_EyVy, arg, 0, arg1, arg2);
1035 break;
1036 case TCG_TYPE_I64:
1037 if (arg < 16) {
1038 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_REXW, arg, arg1, arg2);
1039 break;
1041 /* FALLTHRU */
1042 case TCG_TYPE_V64:
1043 tcg_debug_assert(arg >= 16);
1044 tcg_out_vex_modrm_offset(s, OPC_MOVQ_WqVq, arg, 0, arg1, arg2);
1045 break;
1046 case TCG_TYPE_V128:
1047 tcg_debug_assert(arg >= 16);
1048 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_WxVx, arg, 0, arg1, arg2);
1049 break;
1050 case TCG_TYPE_V256:
1051 tcg_debug_assert(arg >= 16);
1052 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_WxVx | P_VEXL,
1053 arg, 0, arg1, arg2);
1054 break;
1055 default:
1056 g_assert_not_reached();
1060 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1061 TCGReg base, intptr_t ofs)
1063 int rexw = 0;
1064 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) {
1065 if (val != (int32_t)val) {
1066 return false;
1068 rexw = P_REXW;
1069 } else if (type != TCG_TYPE_I32) {
1070 return false;
1072 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | rexw, 0, base, ofs);
1073 tcg_out32(s, val);
1074 return true;
1077 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
1079 /* Propagate an opcode prefix, such as P_DATA16. */
1080 int ext = subopc & ~0x7;
1081 subopc &= 0x7;
1083 if (count == 1) {
1084 tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg);
1085 } else {
1086 tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg);
1087 tcg_out8(s, count);
1091 static inline void tcg_out_bswap32(TCGContext *s, int reg)
1093 tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0);
1096 static inline void tcg_out_rolw_8(TCGContext *s, int reg)
1098 tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8);
1101 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
1103 /* movzbl */
1104 tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
1105 tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
1108 static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw)
1110 /* movsbl */
1111 tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
1112 tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
1115 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
1117 /* movzwl */
1118 tcg_out_modrm(s, OPC_MOVZWL, dest, src);
1121 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw)
1123 /* movsw[lq] */
1124 tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
1127 static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
1129 /* 32-bit mov zero extends. */
1130 tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
1133 static inline void tcg_out_ext32s(TCGContext *s, int dest, int src)
1135 tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
1138 static inline void tcg_out_bswap64(TCGContext *s, int reg)
1140 tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
1143 static void tgen_arithi(TCGContext *s, int c, int r0,
1144 tcg_target_long val, int cf)
1146 int rexw = 0;
1148 if (TCG_TARGET_REG_BITS == 64) {
1149 rexw = c & -8;
1150 c &= 7;
1153 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
1154 partial flags update stalls on Pentium4 and are not recommended
1155 by current Intel optimization manuals. */
1156 if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
1157 int is_inc = (c == ARITH_ADD) ^ (val < 0);
1158 if (TCG_TARGET_REG_BITS == 64) {
1159 /* The single-byte increment encodings are re-tasked as the
1160 REX prefixes. Use the MODRM encoding. */
1161 tcg_out_modrm(s, OPC_GRP5 + rexw,
1162 (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
1163 } else {
1164 tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
1166 return;
1169 if (c == ARITH_AND) {
1170 if (TCG_TARGET_REG_BITS == 64) {
1171 if (val == 0xffffffffu) {
1172 tcg_out_ext32u(s, r0, r0);
1173 return;
1175 if (val == (uint32_t)val) {
1176 /* AND with no high bits set can use a 32-bit operation. */
1177 rexw = 0;
1180 if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) {
1181 tcg_out_ext8u(s, r0, r0);
1182 return;
1184 if (val == 0xffffu) {
1185 tcg_out_ext16u(s, r0, r0);
1186 return;
1190 if (val == (int8_t)val) {
1191 tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0);
1192 tcg_out8(s, val);
1193 return;
1195 if (rexw == 0 || val == (int32_t)val) {
1196 tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0);
1197 tcg_out32(s, val);
1198 return;
1201 tcg_abort();
1204 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
1206 if (val != 0) {
1207 tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0);
1211 /* Use SMALL != 0 to force a short forward branch. */
1212 static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, int small)
1214 int32_t val, val1;
1216 if (l->has_value) {
1217 val = tcg_pcrel_diff(s, l->u.value_ptr);
1218 val1 = val - 2;
1219 if ((int8_t)val1 == val1) {
1220 if (opc == -1) {
1221 tcg_out8(s, OPC_JMP_short);
1222 } else {
1223 tcg_out8(s, OPC_JCC_short + opc);
1225 tcg_out8(s, val1);
1226 } else {
1227 if (small) {
1228 tcg_abort();
1230 if (opc == -1) {
1231 tcg_out8(s, OPC_JMP_long);
1232 tcg_out32(s, val - 5);
1233 } else {
1234 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
1235 tcg_out32(s, val - 6);
1238 } else if (small) {
1239 if (opc == -1) {
1240 tcg_out8(s, OPC_JMP_short);
1241 } else {
1242 tcg_out8(s, OPC_JCC_short + opc);
1244 tcg_out_reloc(s, s->code_ptr, R_386_PC8, l, -1);
1245 s->code_ptr += 1;
1246 } else {
1247 if (opc == -1) {
1248 tcg_out8(s, OPC_JMP_long);
1249 } else {
1250 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
1252 tcg_out_reloc(s, s->code_ptr, R_386_PC32, l, -4);
1253 s->code_ptr += 4;
1257 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
1258 int const_arg2, int rexw)
1260 if (const_arg2) {
1261 if (arg2 == 0) {
1262 /* test r, r */
1263 tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
1264 } else {
1265 tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
1267 } else {
1268 tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
1272 static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
1273 TCGArg arg1, TCGArg arg2, int const_arg2,
1274 TCGLabel *label, int small)
1276 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
1277 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
1280 #if TCG_TARGET_REG_BITS == 64
1281 static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
1282 TCGArg arg1, TCGArg arg2, int const_arg2,
1283 TCGLabel *label, int small)
1285 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
1286 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
1288 #else
1289 /* XXX: we implement it at the target level to avoid having to
1290 handle cross basic blocks temporaries */
1291 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
1292 const int *const_args, int small)
1294 TCGLabel *label_next = gen_new_label();
1295 TCGLabel *label_this = arg_label(args[5]);
1297 switch(args[4]) {
1298 case TCG_COND_EQ:
1299 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
1300 label_next, 1);
1301 tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
1302 label_this, small);
1303 break;
1304 case TCG_COND_NE:
1305 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
1306 label_this, small);
1307 tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
1308 label_this, small);
1309 break;
1310 case TCG_COND_LT:
1311 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
1312 label_this, small);
1313 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1314 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
1315 label_this, small);
1316 break;
1317 case TCG_COND_LE:
1318 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
1319 label_this, small);
1320 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1321 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
1322 label_this, small);
1323 break;
1324 case TCG_COND_GT:
1325 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
1326 label_this, small);
1327 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1328 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
1329 label_this, small);
1330 break;
1331 case TCG_COND_GE:
1332 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
1333 label_this, small);
1334 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1335 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
1336 label_this, small);
1337 break;
1338 case TCG_COND_LTU:
1339 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
1340 label_this, small);
1341 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1342 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
1343 label_this, small);
1344 break;
1345 case TCG_COND_LEU:
1346 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
1347 label_this, small);
1348 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1349 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
1350 label_this, small);
1351 break;
1352 case TCG_COND_GTU:
1353 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
1354 label_this, small);
1355 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1356 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
1357 label_this, small);
1358 break;
1359 case TCG_COND_GEU:
1360 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
1361 label_this, small);
1362 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1363 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
1364 label_this, small);
1365 break;
1366 default:
1367 tcg_abort();
1369 tcg_out_label(s, label_next, s->code_ptr);
1371 #endif
1373 static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest,
1374 TCGArg arg1, TCGArg arg2, int const_arg2)
1376 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
1377 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1378 tcg_out_ext8u(s, dest, dest);
1381 #if TCG_TARGET_REG_BITS == 64
1382 static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest,
1383 TCGArg arg1, TCGArg arg2, int const_arg2)
1385 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
1386 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1387 tcg_out_ext8u(s, dest, dest);
1389 #else
1390 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1391 const int *const_args)
1393 TCGArg new_args[6];
1394 TCGLabel *label_true, *label_over;
1396 memcpy(new_args, args+1, 5*sizeof(TCGArg));
1398 if (args[0] == args[1] || args[0] == args[2]
1399 || (!const_args[3] && args[0] == args[3])
1400 || (!const_args[4] && args[0] == args[4])) {
1401 /* When the destination overlaps with one of the argument
1402 registers, don't do anything tricky. */
1403 label_true = gen_new_label();
1404 label_over = gen_new_label();
1406 new_args[5] = label_arg(label_true);
1407 tcg_out_brcond2(s, new_args, const_args+1, 1);
1409 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1410 tcg_out_jxx(s, JCC_JMP, label_over, 1);
1411 tcg_out_label(s, label_true, s->code_ptr);
1413 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
1414 tcg_out_label(s, label_over, s->code_ptr);
1415 } else {
1416 /* When the destination does not overlap one of the arguments,
1417 clear the destination first, jump if cond false, and emit an
1418 increment in the true case. This results in smaller code. */
1420 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1422 label_over = gen_new_label();
1423 new_args[4] = tcg_invert_cond(new_args[4]);
1424 new_args[5] = label_arg(label_over);
1425 tcg_out_brcond2(s, new_args, const_args+1, 1);
1427 tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
1428 tcg_out_label(s, label_over, s->code_ptr);
1431 #endif
1433 static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw,
1434 TCGReg dest, TCGReg v1)
1436 if (have_cmov) {
1437 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | rexw, dest, v1);
1438 } else {
1439 TCGLabel *over = gen_new_label();
1440 tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
1441 tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
1442 tcg_out_label(s, over, s->code_ptr);
1446 static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGReg dest,
1447 TCGReg c1, TCGArg c2, int const_c2,
1448 TCGReg v1)
1450 tcg_out_cmp(s, c1, c2, const_c2, 0);
1451 tcg_out_cmov(s, cond, 0, dest, v1);
1454 #if TCG_TARGET_REG_BITS == 64
1455 static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGReg dest,
1456 TCGReg c1, TCGArg c2, int const_c2,
1457 TCGReg v1)
1459 tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
1460 tcg_out_cmov(s, cond, P_REXW, dest, v1);
1462 #endif
1464 static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
1465 TCGArg arg2, bool const_a2)
1467 if (have_bmi1) {
1468 tcg_out_modrm(s, OPC_TZCNT + rexw, dest, arg1);
1469 if (const_a2) {
1470 tcg_debug_assert(arg2 == (rexw ? 64 : 32));
1471 } else {
1472 tcg_debug_assert(dest != arg2);
1473 tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
1475 } else {
1476 tcg_debug_assert(dest != arg2);
1477 tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1);
1478 tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
1482 static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
1483 TCGArg arg2, bool const_a2)
1485 if (have_lzcnt) {
1486 tcg_out_modrm(s, OPC_LZCNT + rexw, dest, arg1);
1487 if (const_a2) {
1488 tcg_debug_assert(arg2 == (rexw ? 64 : 32));
1489 } else {
1490 tcg_debug_assert(dest != arg2);
1491 tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
1493 } else {
1494 tcg_debug_assert(!const_a2);
1495 tcg_debug_assert(dest != arg1);
1496 tcg_debug_assert(dest != arg2);
1498 /* Recall that the output of BSR is the index not the count. */
1499 tcg_out_modrm(s, OPC_BSR + rexw, dest, arg1);
1500 tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0);
1502 /* Since we have destroyed the flags from BSR, we have to re-test. */
1503 tcg_out_cmp(s, arg1, 0, 1, rexw);
1504 tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
1508 static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest)
1510 intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
1512 if (disp == (int32_t)disp) {
1513 tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
1514 tcg_out32(s, disp);
1515 } else {
1516 /* rip-relative addressing into the constant pool.
1517 This is 6 + 8 = 14 bytes, as compared to using an
1518 an immediate load 10 + 6 = 16 bytes, plus we may
1519 be able to re-use the pool constant for more calls. */
1520 tcg_out_opc(s, OPC_GRP5, 0, 0, 0);
1521 tcg_out8(s, (call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev) << 3 | 5);
1522 new_pool_label(s, (uintptr_t)dest, R_386_PC32, s->code_ptr, -4);
1523 tcg_out32(s, 0);
1527 static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
1529 tcg_out_branch(s, 1, dest);
1532 static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest)
1534 tcg_out_branch(s, 0, dest);
1537 static void tcg_out_nopn(TCGContext *s, int n)
1539 int i;
1540 /* Emit 1 or 2 operand size prefixes for the standard one byte nop,
1541 * "xchg %eax,%eax", forming "xchg %ax,%ax". All cores accept the
1542 * duplicate prefix, and all of the interesting recent cores can
1543 * decode and discard the duplicates in a single cycle.
1545 tcg_debug_assert(n >= 1);
1546 for (i = 1; i < n; ++i) {
1547 tcg_out8(s, 0x66);
1549 tcg_out8(s, 0x90);
1552 #if defined(CONFIG_SOFTMMU)
1553 #include "tcg-ldst.inc.c"
1555 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1556 * int mmu_idx, uintptr_t ra)
1558 static void * const qemu_ld_helpers[16] = {
1559 [MO_UB] = helper_ret_ldub_mmu,
1560 [MO_LEUW] = helper_le_lduw_mmu,
1561 [MO_LEUL] = helper_le_ldul_mmu,
1562 [MO_LEQ] = helper_le_ldq_mmu,
1563 [MO_BEUW] = helper_be_lduw_mmu,
1564 [MO_BEUL] = helper_be_ldul_mmu,
1565 [MO_BEQ] = helper_be_ldq_mmu,
1568 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1569 * uintxx_t val, int mmu_idx, uintptr_t ra)
1571 static void * const qemu_st_helpers[16] = {
1572 [MO_UB] = helper_ret_stb_mmu,
1573 [MO_LEUW] = helper_le_stw_mmu,
1574 [MO_LEUL] = helper_le_stl_mmu,
1575 [MO_LEQ] = helper_le_stq_mmu,
1576 [MO_BEUW] = helper_be_stw_mmu,
1577 [MO_BEUL] = helper_be_stl_mmu,
1578 [MO_BEQ] = helper_be_stq_mmu,
1581 /* Perform the TLB load and compare.
1583 Inputs:
1584 ADDRLO and ADDRHI contain the low and high part of the address.
1586 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1588 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1589 This should be offsetof addr_read or addr_write.
1591 Outputs:
1592 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1593 positions of the displacements of forward jumps to the TLB miss case.
1595 Second argument register is loaded with the low part of the address.
1596 In the TLB hit case, it has been adjusted as indicated by the TLB
1597 and so is a host address. In the TLB miss case, it continues to
1598 hold a guest address.
1600 First argument register is clobbered. */
1602 static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1603 int mem_index, TCGMemOp opc,
1604 tcg_insn_unit **label_ptr, int which)
1606 const TCGReg r0 = TCG_REG_L0;
1607 const TCGReg r1 = TCG_REG_L1;
1608 TCGType ttype = TCG_TYPE_I32;
1609 TCGType tlbtype = TCG_TYPE_I32;
1610 int trexw = 0, hrexw = 0, tlbrexw = 0;
1611 unsigned a_bits = get_alignment_bits(opc);
1612 unsigned s_bits = opc & MO_SIZE;
1613 unsigned a_mask = (1 << a_bits) - 1;
1614 unsigned s_mask = (1 << s_bits) - 1;
1615 target_ulong tlb_mask;
1617 if (TCG_TARGET_REG_BITS == 64) {
1618 if (TARGET_LONG_BITS == 64) {
1619 ttype = TCG_TYPE_I64;
1620 trexw = P_REXW;
1622 if (TCG_TYPE_PTR == TCG_TYPE_I64) {
1623 hrexw = P_REXW;
1624 if (TARGET_PAGE_BITS + CPU_TLB_BITS > 32) {
1625 tlbtype = TCG_TYPE_I64;
1626 tlbrexw = P_REXW;
1631 tcg_out_mov(s, tlbtype, r0, addrlo);
1632 /* If the required alignment is at least as large as the access, simply
1633 copy the address and mask. For lesser alignments, check that we don't
1634 cross pages for the complete access. */
1635 if (a_bits >= s_bits) {
1636 tcg_out_mov(s, ttype, r1, addrlo);
1637 } else {
1638 tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask);
1640 tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask;
1642 tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
1643 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1645 tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0);
1646 tgen_arithi(s, ARITH_AND + tlbrexw, r0,
1647 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
1649 tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0,
1650 offsetof(CPUArchState, tlb_table[mem_index][0])
1651 + which);
1653 /* cmp 0(r0), r1 */
1654 tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0);
1656 /* Prepare for both the fast path add of the tlb addend, and the slow
1657 path function argument setup. */
1658 tcg_out_mov(s, ttype, r1, addrlo);
1660 /* jne slow_path */
1661 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1662 label_ptr[0] = s->code_ptr;
1663 s->code_ptr += 4;
1665 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1666 /* cmp 4(r0), addrhi */
1667 tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, 4);
1669 /* jne slow_path */
1670 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1671 label_ptr[1] = s->code_ptr;
1672 s->code_ptr += 4;
1675 /* TLB Hit. */
1677 /* add addend(r0), r1 */
1678 tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
1679 offsetof(CPUTLBEntry, addend) - which);
1683 * Record the context of a call to the out of line helper code for the slow path
1684 * for a load or store, so that we can later generate the correct helper code
1686 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
1687 TCGMemOpIdx oi,
1688 TCGReg datalo, TCGReg datahi,
1689 TCGReg addrlo, TCGReg addrhi,
1690 tcg_insn_unit *raddr,
1691 tcg_insn_unit **label_ptr)
1693 TCGLabelQemuLdst *label = new_ldst_label(s);
1695 label->is_ld = is_ld;
1696 label->oi = oi;
1697 label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
1698 label->datalo_reg = datalo;
1699 label->datahi_reg = datahi;
1700 label->addrlo_reg = addrlo;
1701 label->addrhi_reg = addrhi;
1702 label->raddr = raddr;
1703 label->label_ptr[0] = label_ptr[0];
1704 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1705 label->label_ptr[1] = label_ptr[1];
1710 * Generate code for the slow path for a load at the end of block
1712 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1714 TCGMemOpIdx oi = l->oi;
1715 TCGMemOp opc = get_memop(oi);
1716 TCGReg data_reg;
1717 tcg_insn_unit **label_ptr = &l->label_ptr[0];
1718 int rexw = (l->type == TCG_TYPE_I64 ? P_REXW : 0);
1720 /* resolve label address */
1721 tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1722 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1723 tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1726 if (TCG_TARGET_REG_BITS == 32) {
1727 int ofs = 0;
1729 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1730 ofs += 4;
1732 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1733 ofs += 4;
1735 if (TARGET_LONG_BITS == 64) {
1736 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1737 ofs += 4;
1740 tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs);
1741 ofs += 4;
1743 tcg_out_sti(s, TCG_TYPE_PTR, (uintptr_t)l->raddr, TCG_REG_ESP, ofs);
1744 } else {
1745 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1746 /* The second argument is already loaded with addrlo. */
1747 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi);
1748 tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
1749 (uintptr_t)l->raddr);
1752 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1754 data_reg = l->datalo_reg;
1755 switch (opc & MO_SSIZE) {
1756 case MO_SB:
1757 tcg_out_ext8s(s, data_reg, TCG_REG_EAX, rexw);
1758 break;
1759 case MO_SW:
1760 tcg_out_ext16s(s, data_reg, TCG_REG_EAX, rexw);
1761 break;
1762 #if TCG_TARGET_REG_BITS == 64
1763 case MO_SL:
1764 tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
1765 break;
1766 #endif
1767 case MO_UB:
1768 case MO_UW:
1769 /* Note that the helpers have zero-extended to tcg_target_long. */
1770 case MO_UL:
1771 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1772 break;
1773 case MO_Q:
1774 if (TCG_TARGET_REG_BITS == 64) {
1775 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
1776 } else if (data_reg == TCG_REG_EDX) {
1777 /* xchg %edx, %eax */
1778 tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
1779 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
1780 } else {
1781 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1782 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
1784 break;
1785 default:
1786 tcg_abort();
1789 /* Jump to the code corresponding to next IR of qemu_st */
1790 tcg_out_jmp(s, l->raddr);
1794 * Generate code for the slow path for a store at the end of block
1796 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1798 TCGMemOpIdx oi = l->oi;
1799 TCGMemOp opc = get_memop(oi);
1800 TCGMemOp s_bits = opc & MO_SIZE;
1801 tcg_insn_unit **label_ptr = &l->label_ptr[0];
1802 TCGReg retaddr;
1804 /* resolve label address */
1805 tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1806 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1807 tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1810 if (TCG_TARGET_REG_BITS == 32) {
1811 int ofs = 0;
1813 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1814 ofs += 4;
1816 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1817 ofs += 4;
1819 if (TARGET_LONG_BITS == 64) {
1820 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1821 ofs += 4;
1824 tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
1825 ofs += 4;
1827 if (s_bits == MO_64) {
1828 tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
1829 ofs += 4;
1832 tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs);
1833 ofs += 4;
1835 retaddr = TCG_REG_EAX;
1836 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1837 tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, ofs);
1838 } else {
1839 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1840 /* The second argument is already loaded with addrlo. */
1841 tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1842 tcg_target_call_iarg_regs[2], l->datalo_reg);
1843 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], oi);
1845 if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
1846 retaddr = tcg_target_call_iarg_regs[4];
1847 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1848 } else {
1849 retaddr = TCG_REG_RAX;
1850 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1851 tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP,
1852 TCG_TARGET_CALL_STACK_OFFSET);
1856 /* "Tail call" to the helper, with the return address back inline. */
1857 tcg_out_push(s, retaddr);
1858 tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1860 #elif TCG_TARGET_REG_BITS == 32
1861 # define x86_guest_base_seg 0
1862 # define x86_guest_base_index -1
1863 # define x86_guest_base_offset guest_base
1864 #else
1865 static int x86_guest_base_seg;
1866 static int x86_guest_base_index = -1;
1867 static int32_t x86_guest_base_offset;
1868 # if defined(__x86_64__) && defined(__linux__)
1869 # include <asm/prctl.h>
1870 # include <sys/prctl.h>
1871 int arch_prctl(int code, unsigned long addr);
1872 static inline int setup_guest_base_seg(void)
1874 if (arch_prctl(ARCH_SET_GS, guest_base) == 0) {
1875 return P_GS;
1877 return 0;
1879 # elif defined (__FreeBSD__) || defined (__FreeBSD_kernel__)
1880 # include <machine/sysarch.h>
1881 static inline int setup_guest_base_seg(void)
1883 if (sysarch(AMD64_SET_GSBASE, &guest_base) == 0) {
1884 return P_GS;
1886 return 0;
1888 # else
1889 static inline int setup_guest_base_seg(void)
1891 return 0;
1893 # endif
1894 #endif /* SOFTMMU */
1896 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1897 TCGReg base, int index, intptr_t ofs,
1898 int seg, bool is64, TCGMemOp memop)
1900 const TCGMemOp real_bswap = memop & MO_BSWAP;
1901 TCGMemOp bswap = real_bswap;
1902 int rexw = is64 * P_REXW;
1903 int movop = OPC_MOVL_GvEv;
1905 if (have_movbe && real_bswap) {
1906 bswap = 0;
1907 movop = OPC_MOVBE_GyMy;
1910 switch (memop & MO_SSIZE) {
1911 case MO_UB:
1912 tcg_out_modrm_sib_offset(s, OPC_MOVZBL + seg, datalo,
1913 base, index, 0, ofs);
1914 break;
1915 case MO_SB:
1916 tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + seg, datalo,
1917 base, index, 0, ofs);
1918 break;
1919 case MO_UW:
1920 tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
1921 base, index, 0, ofs);
1922 if (real_bswap) {
1923 tcg_out_rolw_8(s, datalo);
1925 break;
1926 case MO_SW:
1927 if (real_bswap) {
1928 if (have_movbe) {
1929 tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
1930 datalo, base, index, 0, ofs);
1931 } else {
1932 tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
1933 base, index, 0, ofs);
1934 tcg_out_rolw_8(s, datalo);
1936 tcg_out_modrm(s, OPC_MOVSWL + rexw, datalo, datalo);
1937 } else {
1938 tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + seg,
1939 datalo, base, index, 0, ofs);
1941 break;
1942 case MO_UL:
1943 tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
1944 if (bswap) {
1945 tcg_out_bswap32(s, datalo);
1947 break;
1948 #if TCG_TARGET_REG_BITS == 64
1949 case MO_SL:
1950 if (real_bswap) {
1951 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
1952 base, index, 0, ofs);
1953 if (bswap) {
1954 tcg_out_bswap32(s, datalo);
1956 tcg_out_ext32s(s, datalo, datalo);
1957 } else {
1958 tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo,
1959 base, index, 0, ofs);
1961 break;
1962 #endif
1963 case MO_Q:
1964 if (TCG_TARGET_REG_BITS == 64) {
1965 tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
1966 base, index, 0, ofs);
1967 if (bswap) {
1968 tcg_out_bswap64(s, datalo);
1970 } else {
1971 if (real_bswap) {
1972 int t = datalo;
1973 datalo = datahi;
1974 datahi = t;
1976 if (base != datalo) {
1977 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
1978 base, index, 0, ofs);
1979 tcg_out_modrm_sib_offset(s, movop + seg, datahi,
1980 base, index, 0, ofs + 4);
1981 } else {
1982 tcg_out_modrm_sib_offset(s, movop + seg, datahi,
1983 base, index, 0, ofs + 4);
1984 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
1985 base, index, 0, ofs);
1987 if (bswap) {
1988 tcg_out_bswap32(s, datalo);
1989 tcg_out_bswap32(s, datahi);
1992 break;
1993 default:
1994 tcg_abort();
1998 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
1999 EAX. It will be useful once fixed registers globals are less
2000 common. */
2001 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
2003 TCGReg datalo, datahi, addrlo;
2004 TCGReg addrhi __attribute__((unused));
2005 TCGMemOpIdx oi;
2006 TCGMemOp opc;
2007 #if defined(CONFIG_SOFTMMU)
2008 int mem_index;
2009 tcg_insn_unit *label_ptr[2];
2010 #endif
2012 datalo = *args++;
2013 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
2014 addrlo = *args++;
2015 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
2016 oi = *args++;
2017 opc = get_memop(oi);
2019 #if defined(CONFIG_SOFTMMU)
2020 mem_index = get_mmuidx(oi);
2022 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
2023 label_ptr, offsetof(CPUTLBEntry, addr_read));
2025 /* TLB Hit. */
2026 tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc);
2028 /* Record the current context of a load into ldst label */
2029 add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi,
2030 s->code_ptr, label_ptr);
2031 #else
2032 tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
2033 x86_guest_base_offset, x86_guest_base_seg,
2034 is64, opc);
2035 #endif
2038 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
2039 TCGReg base, int index, intptr_t ofs,
2040 int seg, TCGMemOp memop)
2042 /* ??? Ideally we wouldn't need a scratch register. For user-only,
2043 we could perform the bswap twice to restore the original value
2044 instead of moving to the scratch. But as it is, the L constraint
2045 means that TCG_REG_L0 is definitely free here. */
2046 const TCGReg scratch = TCG_REG_L0;
2047 const TCGMemOp real_bswap = memop & MO_BSWAP;
2048 TCGMemOp bswap = real_bswap;
2049 int movop = OPC_MOVL_EvGv;
2051 if (have_movbe && real_bswap) {
2052 bswap = 0;
2053 movop = OPC_MOVBE_MyGy;
2056 switch (memop & MO_SIZE) {
2057 case MO_8:
2058 /* In 32-bit mode, 8-bit stores can only happen from [abcd]x.
2059 Use the scratch register if necessary. */
2060 if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) {
2061 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
2062 datalo = scratch;
2064 tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
2065 datalo, base, index, 0, ofs);
2066 break;
2067 case MO_16:
2068 if (bswap) {
2069 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
2070 tcg_out_rolw_8(s, scratch);
2071 datalo = scratch;
2073 tcg_out_modrm_sib_offset(s, movop + P_DATA16 + seg, datalo,
2074 base, index, 0, ofs);
2075 break;
2076 case MO_32:
2077 if (bswap) {
2078 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
2079 tcg_out_bswap32(s, scratch);
2080 datalo = scratch;
2082 tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
2083 break;
2084 case MO_64:
2085 if (TCG_TARGET_REG_BITS == 64) {
2086 if (bswap) {
2087 tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo);
2088 tcg_out_bswap64(s, scratch);
2089 datalo = scratch;
2091 tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
2092 base, index, 0, ofs);
2093 } else if (bswap) {
2094 tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi);
2095 tcg_out_bswap32(s, scratch);
2096 tcg_out_modrm_sib_offset(s, OPC_MOVL_EvGv + seg, scratch,
2097 base, index, 0, ofs);
2098 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
2099 tcg_out_bswap32(s, scratch);
2100 tcg_out_modrm_sib_offset(s, OPC_MOVL_EvGv + seg, scratch,
2101 base, index, 0, ofs + 4);
2102 } else {
2103 if (real_bswap) {
2104 int t = datalo;
2105 datalo = datahi;
2106 datahi = t;
2108 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
2109 base, index, 0, ofs);
2110 tcg_out_modrm_sib_offset(s, movop + seg, datahi,
2111 base, index, 0, ofs + 4);
2113 break;
2114 default:
2115 tcg_abort();
2119 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
2121 TCGReg datalo, datahi, addrlo;
2122 TCGReg addrhi __attribute__((unused));
2123 TCGMemOpIdx oi;
2124 TCGMemOp opc;
2125 #if defined(CONFIG_SOFTMMU)
2126 int mem_index;
2127 tcg_insn_unit *label_ptr[2];
2128 #endif
2130 datalo = *args++;
2131 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
2132 addrlo = *args++;
2133 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
2134 oi = *args++;
2135 opc = get_memop(oi);
2137 #if defined(CONFIG_SOFTMMU)
2138 mem_index = get_mmuidx(oi);
2140 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
2141 label_ptr, offsetof(CPUTLBEntry, addr_write));
2143 /* TLB Hit. */
2144 tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
2146 /* Record the current context of a store into ldst label */
2147 add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi,
2148 s->code_ptr, label_ptr);
2149 #else
2150 tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
2151 x86_guest_base_offset, x86_guest_base_seg, opc);
2152 #endif
2155 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2156 const TCGArg *args, const int *const_args)
2158 TCGArg a0, a1, a2;
2159 int c, const_a2, vexop, rexw = 0;
2161 #if TCG_TARGET_REG_BITS == 64
2162 # define OP_32_64(x) \
2163 case glue(glue(INDEX_op_, x), _i64): \
2164 rexw = P_REXW; /* FALLTHRU */ \
2165 case glue(glue(INDEX_op_, x), _i32)
2166 #else
2167 # define OP_32_64(x) \
2168 case glue(glue(INDEX_op_, x), _i32)
2169 #endif
2171 /* Hoist the loads of the most common arguments. */
2172 a0 = args[0];
2173 a1 = args[1];
2174 a2 = args[2];
2175 const_a2 = const_args[2];
2177 switch (opc) {
2178 case INDEX_op_exit_tb:
2179 /* Reuse the zeroing that exists for goto_ptr. */
2180 if (a0 == 0) {
2181 tcg_out_jmp(s, s->code_gen_epilogue);
2182 } else {
2183 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
2184 tcg_out_jmp(s, tb_ret_addr);
2186 break;
2187 case INDEX_op_goto_tb:
2188 if (s->tb_jmp_insn_offset) {
2189 /* direct jump method */
2190 int gap;
2191 /* jump displacement must be aligned for atomic patching;
2192 * see if we need to add extra nops before jump
2194 gap = tcg_pcrel_diff(s, QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4));
2195 if (gap != 1) {
2196 tcg_out_nopn(s, gap - 1);
2198 tcg_out8(s, OPC_JMP_long); /* jmp im */
2199 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
2200 tcg_out32(s, 0);
2201 } else {
2202 /* indirect jump method */
2203 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
2204 (intptr_t)(s->tb_jmp_target_addr + a0));
2206 set_jmp_reset_offset(s, a0);
2207 break;
2208 case INDEX_op_goto_ptr:
2209 /* jmp to the given host address (could be epilogue) */
2210 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
2211 break;
2212 case INDEX_op_br:
2213 tcg_out_jxx(s, JCC_JMP, arg_label(a0), 0);
2214 break;
2215 OP_32_64(ld8u):
2216 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
2217 tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2);
2218 break;
2219 OP_32_64(ld8s):
2220 tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, a0, a1, a2);
2221 break;
2222 OP_32_64(ld16u):
2223 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
2224 tcg_out_modrm_offset(s, OPC_MOVZWL, a0, a1, a2);
2225 break;
2226 OP_32_64(ld16s):
2227 tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, a0, a1, a2);
2228 break;
2229 #if TCG_TARGET_REG_BITS == 64
2230 case INDEX_op_ld32u_i64:
2231 #endif
2232 case INDEX_op_ld_i32:
2233 tcg_out_ld(s, TCG_TYPE_I32, a0, a1, a2);
2234 break;
2236 OP_32_64(st8):
2237 if (const_args[0]) {
2238 tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, a1, a2);
2239 tcg_out8(s, a0);
2240 } else {
2241 tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, a0, a1, a2);
2243 break;
2244 OP_32_64(st16):
2245 if (const_args[0]) {
2246 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, a1, a2);
2247 tcg_out16(s, a0);
2248 } else {
2249 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, a0, a1, a2);
2251 break;
2252 #if TCG_TARGET_REG_BITS == 64
2253 case INDEX_op_st32_i64:
2254 #endif
2255 case INDEX_op_st_i32:
2256 if (const_args[0]) {
2257 tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, a1, a2);
2258 tcg_out32(s, a0);
2259 } else {
2260 tcg_out_st(s, TCG_TYPE_I32, a0, a1, a2);
2262 break;
2264 OP_32_64(add):
2265 /* For 3-operand addition, use LEA. */
2266 if (a0 != a1) {
2267 TCGArg c3 = 0;
2268 if (const_a2) {
2269 c3 = a2, a2 = -1;
2270 } else if (a0 == a2) {
2271 /* Watch out for dest = src + dest, since we've removed
2272 the matching constraint on the add. */
2273 tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
2274 break;
2277 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
2278 break;
2280 c = ARITH_ADD;
2281 goto gen_arith;
2282 OP_32_64(sub):
2283 c = ARITH_SUB;
2284 goto gen_arith;
2285 OP_32_64(and):
2286 c = ARITH_AND;
2287 goto gen_arith;
2288 OP_32_64(or):
2289 c = ARITH_OR;
2290 goto gen_arith;
2291 OP_32_64(xor):
2292 c = ARITH_XOR;
2293 goto gen_arith;
2294 gen_arith:
2295 if (const_a2) {
2296 tgen_arithi(s, c + rexw, a0, a2, 0);
2297 } else {
2298 tgen_arithr(s, c + rexw, a0, a2);
2300 break;
2302 OP_32_64(andc):
2303 if (const_a2) {
2304 tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
2305 tgen_arithi(s, ARITH_AND + rexw, a0, ~a2, 0);
2306 } else {
2307 tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1);
2309 break;
2311 OP_32_64(mul):
2312 if (const_a2) {
2313 int32_t val;
2314 val = a2;
2315 if (val == (int8_t)val) {
2316 tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0);
2317 tcg_out8(s, val);
2318 } else {
2319 tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0);
2320 tcg_out32(s, val);
2322 } else {
2323 tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2);
2325 break;
2327 OP_32_64(div2):
2328 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
2329 break;
2330 OP_32_64(divu2):
2331 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
2332 break;
2334 OP_32_64(shl):
2335 /* For small constant 3-operand shift, use LEA. */
2336 if (const_a2 && a0 != a1 && (a2 - 1) < 3) {
2337 if (a2 - 1 == 0) {
2338 /* shl $1,a1,a0 -> lea (a1,a1),a0 */
2339 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0);
2340 } else {
2341 /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
2342 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0);
2344 break;
2346 c = SHIFT_SHL;
2347 vexop = OPC_SHLX;
2348 goto gen_shift_maybe_vex;
2349 OP_32_64(shr):
2350 c = SHIFT_SHR;
2351 vexop = OPC_SHRX;
2352 goto gen_shift_maybe_vex;
2353 OP_32_64(sar):
2354 c = SHIFT_SAR;
2355 vexop = OPC_SARX;
2356 goto gen_shift_maybe_vex;
2357 OP_32_64(rotl):
2358 c = SHIFT_ROL;
2359 goto gen_shift;
2360 OP_32_64(rotr):
2361 c = SHIFT_ROR;
2362 goto gen_shift;
2363 gen_shift_maybe_vex:
2364 if (have_bmi2) {
2365 if (!const_a2) {
2366 tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1);
2367 break;
2369 tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
2371 /* FALLTHRU */
2372 gen_shift:
2373 if (const_a2) {
2374 tcg_out_shifti(s, c + rexw, a0, a2);
2375 } else {
2376 tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, a0);
2378 break;
2380 OP_32_64(ctz):
2381 tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]);
2382 break;
2383 OP_32_64(clz):
2384 tcg_out_clz(s, rexw, args[0], args[1], args[2], const_args[2]);
2385 break;
2386 OP_32_64(ctpop):
2387 tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1);
2388 break;
2390 case INDEX_op_brcond_i32:
2391 tcg_out_brcond32(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
2392 break;
2393 case INDEX_op_setcond_i32:
2394 tcg_out_setcond32(s, args[3], a0, a1, a2, const_a2);
2395 break;
2396 case INDEX_op_movcond_i32:
2397 tcg_out_movcond32(s, args[5], a0, a1, a2, const_a2, args[3]);
2398 break;
2400 OP_32_64(bswap16):
2401 tcg_out_rolw_8(s, a0);
2402 break;
2403 OP_32_64(bswap32):
2404 tcg_out_bswap32(s, a0);
2405 break;
2407 OP_32_64(neg):
2408 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0);
2409 break;
2410 OP_32_64(not):
2411 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
2412 break;
2414 OP_32_64(ext8s):
2415 tcg_out_ext8s(s, a0, a1, rexw);
2416 break;
2417 OP_32_64(ext16s):
2418 tcg_out_ext16s(s, a0, a1, rexw);
2419 break;
2420 OP_32_64(ext8u):
2421 tcg_out_ext8u(s, a0, a1);
2422 break;
2423 OP_32_64(ext16u):
2424 tcg_out_ext16u(s, a0, a1);
2425 break;
2427 case INDEX_op_qemu_ld_i32:
2428 tcg_out_qemu_ld(s, args, 0);
2429 break;
2430 case INDEX_op_qemu_ld_i64:
2431 tcg_out_qemu_ld(s, args, 1);
2432 break;
2433 case INDEX_op_qemu_st_i32:
2434 tcg_out_qemu_st(s, args, 0);
2435 break;
2436 case INDEX_op_qemu_st_i64:
2437 tcg_out_qemu_st(s, args, 1);
2438 break;
2440 OP_32_64(mulu2):
2441 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
2442 break;
2443 OP_32_64(muls2):
2444 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]);
2445 break;
2446 OP_32_64(add2):
2447 if (const_args[4]) {
2448 tgen_arithi(s, ARITH_ADD + rexw, a0, args[4], 1);
2449 } else {
2450 tgen_arithr(s, ARITH_ADD + rexw, a0, args[4]);
2452 if (const_args[5]) {
2453 tgen_arithi(s, ARITH_ADC + rexw, a1, args[5], 1);
2454 } else {
2455 tgen_arithr(s, ARITH_ADC + rexw, a1, args[5]);
2457 break;
2458 OP_32_64(sub2):
2459 if (const_args[4]) {
2460 tgen_arithi(s, ARITH_SUB + rexw, a0, args[4], 1);
2461 } else {
2462 tgen_arithr(s, ARITH_SUB + rexw, a0, args[4]);
2464 if (const_args[5]) {
2465 tgen_arithi(s, ARITH_SBB + rexw, a1, args[5], 1);
2466 } else {
2467 tgen_arithr(s, ARITH_SBB + rexw, a1, args[5]);
2469 break;
2471 #if TCG_TARGET_REG_BITS == 32
2472 case INDEX_op_brcond2_i32:
2473 tcg_out_brcond2(s, args, const_args, 0);
2474 break;
2475 case INDEX_op_setcond2_i32:
2476 tcg_out_setcond2(s, args, const_args);
2477 break;
2478 #else /* TCG_TARGET_REG_BITS == 64 */
2479 case INDEX_op_ld32s_i64:
2480 tcg_out_modrm_offset(s, OPC_MOVSLQ, a0, a1, a2);
2481 break;
2482 case INDEX_op_ld_i64:
2483 tcg_out_ld(s, TCG_TYPE_I64, a0, a1, a2);
2484 break;
2485 case INDEX_op_st_i64:
2486 if (const_args[0]) {
2487 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, 0, a1, a2);
2488 tcg_out32(s, a0);
2489 } else {
2490 tcg_out_st(s, TCG_TYPE_I64, a0, a1, a2);
2492 break;
2494 case INDEX_op_brcond_i64:
2495 tcg_out_brcond64(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
2496 break;
2497 case INDEX_op_setcond_i64:
2498 tcg_out_setcond64(s, args[3], a0, a1, a2, const_a2);
2499 break;
2500 case INDEX_op_movcond_i64:
2501 tcg_out_movcond64(s, args[5], a0, a1, a2, const_a2, args[3]);
2502 break;
2504 case INDEX_op_bswap64_i64:
2505 tcg_out_bswap64(s, a0);
2506 break;
2507 case INDEX_op_extu_i32_i64:
2508 case INDEX_op_ext32u_i64:
2509 case INDEX_op_extrl_i64_i32:
2510 tcg_out_ext32u(s, a0, a1);
2511 break;
2512 case INDEX_op_ext_i32_i64:
2513 case INDEX_op_ext32s_i64:
2514 tcg_out_ext32s(s, a0, a1);
2515 break;
2516 case INDEX_op_extrh_i64_i32:
2517 tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32);
2518 break;
2519 #endif
2521 OP_32_64(deposit):
2522 if (args[3] == 0 && args[4] == 8) {
2523 /* load bits 0..7 */
2524 tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0);
2525 } else if (args[3] == 8 && args[4] == 8) {
2526 /* load bits 8..15 */
2527 tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4);
2528 } else if (args[3] == 0 && args[4] == 16) {
2529 /* load bits 0..15 */
2530 tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0);
2531 } else {
2532 tcg_abort();
2534 break;
2536 case INDEX_op_extract_i64:
2537 if (a2 + args[3] == 32) {
2538 /* This is a 32-bit zero-extending right shift. */
2539 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2540 tcg_out_shifti(s, SHIFT_SHR, a0, a2);
2541 break;
2543 /* FALLTHRU */
2544 case INDEX_op_extract_i32:
2545 /* On the off-chance that we can use the high-byte registers.
2546 Otherwise we emit the same ext16 + shift pattern that we
2547 would have gotten from the normal tcg-op.c expansion. */
2548 tcg_debug_assert(a2 == 8 && args[3] == 8);
2549 if (a1 < 4 && a0 < 8) {
2550 tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4);
2551 } else {
2552 tcg_out_ext16u(s, a0, a1);
2553 tcg_out_shifti(s, SHIFT_SHR, a0, 8);
2555 break;
2557 case INDEX_op_sextract_i32:
2558 /* We don't implement sextract_i64, as we cannot sign-extend to
2559 64-bits without using the REX prefix that explicitly excludes
2560 access to the high-byte registers. */
2561 tcg_debug_assert(a2 == 8 && args[3] == 8);
2562 if (a1 < 4 && a0 < 8) {
2563 tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4);
2564 } else {
2565 tcg_out_ext16s(s, a0, a1, 0);
2566 tcg_out_shifti(s, SHIFT_SAR, a0, 8);
2568 break;
2570 case INDEX_op_mb:
2571 tcg_out_mb(s, a0);
2572 break;
2573 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2574 case INDEX_op_mov_i64:
2575 case INDEX_op_mov_vec:
2576 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2577 case INDEX_op_movi_i64:
2578 case INDEX_op_dupi_vec:
2579 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2580 default:
2581 tcg_abort();
2584 #undef OP_32_64
2587 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2588 unsigned vecl, unsigned vece,
2589 const TCGArg *args, const int *const_args)
2591 static int const add_insn[4] = {
2592 OPC_PADDB, OPC_PADDW, OPC_PADDD, OPC_PADDQ
2594 static int const sub_insn[4] = {
2595 OPC_PSUBB, OPC_PSUBW, OPC_PSUBD, OPC_PSUBQ
2597 static int const mul_insn[4] = {
2598 OPC_UD2, OPC_PMULLW, OPC_PMULLD, OPC_UD2
2600 static int const shift_imm_insn[4] = {
2601 OPC_UD2, OPC_PSHIFTW_Ib, OPC_PSHIFTD_Ib, OPC_PSHIFTQ_Ib
2603 static int const cmpeq_insn[4] = {
2604 OPC_PCMPEQB, OPC_PCMPEQW, OPC_PCMPEQD, OPC_PCMPEQQ
2606 static int const cmpgt_insn[4] = {
2607 OPC_PCMPGTB, OPC_PCMPGTW, OPC_PCMPGTD, OPC_PCMPGTQ
2609 static int const punpckl_insn[4] = {
2610 OPC_PUNPCKLBW, OPC_PUNPCKLWD, OPC_PUNPCKLDQ, OPC_PUNPCKLQDQ
2612 static int const punpckh_insn[4] = {
2613 OPC_PUNPCKHBW, OPC_PUNPCKHWD, OPC_PUNPCKHDQ, OPC_PUNPCKHQDQ
2615 static int const packss_insn[4] = {
2616 OPC_PACKSSWB, OPC_PACKSSDW, OPC_UD2, OPC_UD2
2618 static int const packus_insn[4] = {
2619 OPC_PACKUSWB, OPC_PACKUSDW, OPC_UD2, OPC_UD2
2622 TCGType type = vecl + TCG_TYPE_V64;
2623 int insn, sub;
2624 TCGArg a0, a1, a2;
2626 a0 = args[0];
2627 a1 = args[1];
2628 a2 = args[2];
2630 switch (opc) {
2631 case INDEX_op_add_vec:
2632 insn = add_insn[vece];
2633 goto gen_simd;
2634 case INDEX_op_sub_vec:
2635 insn = sub_insn[vece];
2636 goto gen_simd;
2637 case INDEX_op_mul_vec:
2638 insn = mul_insn[vece];
2639 goto gen_simd;
2640 case INDEX_op_and_vec:
2641 insn = OPC_PAND;
2642 goto gen_simd;
2643 case INDEX_op_or_vec:
2644 insn = OPC_POR;
2645 goto gen_simd;
2646 case INDEX_op_xor_vec:
2647 insn = OPC_PXOR;
2648 goto gen_simd;
2649 case INDEX_op_x86_punpckl_vec:
2650 insn = punpckl_insn[vece];
2651 goto gen_simd;
2652 case INDEX_op_x86_punpckh_vec:
2653 insn = punpckh_insn[vece];
2654 goto gen_simd;
2655 case INDEX_op_x86_packss_vec:
2656 insn = packss_insn[vece];
2657 goto gen_simd;
2658 case INDEX_op_x86_packus_vec:
2659 insn = packus_insn[vece];
2660 goto gen_simd;
2661 #if TCG_TARGET_REG_BITS == 32
2662 case INDEX_op_dup2_vec:
2663 /* Constraints have already placed both 32-bit inputs in xmm regs. */
2664 insn = OPC_PUNPCKLDQ;
2665 goto gen_simd;
2666 #endif
2667 gen_simd:
2668 tcg_debug_assert(insn != OPC_UD2);
2669 if (type == TCG_TYPE_V256) {
2670 insn |= P_VEXL;
2672 tcg_out_vex_modrm(s, insn, a0, a1, a2);
2673 break;
2675 case INDEX_op_cmp_vec:
2676 sub = args[3];
2677 if (sub == TCG_COND_EQ) {
2678 insn = cmpeq_insn[vece];
2679 } else if (sub == TCG_COND_GT) {
2680 insn = cmpgt_insn[vece];
2681 } else {
2682 g_assert_not_reached();
2684 goto gen_simd;
2686 case INDEX_op_andc_vec:
2687 insn = OPC_PANDN;
2688 if (type == TCG_TYPE_V256) {
2689 insn |= P_VEXL;
2691 tcg_out_vex_modrm(s, insn, a0, a2, a1);
2692 break;
2694 case INDEX_op_shli_vec:
2695 sub = 6;
2696 goto gen_shift;
2697 case INDEX_op_shri_vec:
2698 sub = 2;
2699 goto gen_shift;
2700 case INDEX_op_sari_vec:
2701 tcg_debug_assert(vece != MO_64);
2702 sub = 4;
2703 gen_shift:
2704 tcg_debug_assert(vece != MO_8);
2705 insn = shift_imm_insn[vece];
2706 if (type == TCG_TYPE_V256) {
2707 insn |= P_VEXL;
2709 tcg_out_vex_modrm(s, insn, sub, a0, a1);
2710 tcg_out8(s, a2);
2711 break;
2713 case INDEX_op_ld_vec:
2714 tcg_out_ld(s, type, a0, a1, a2);
2715 break;
2716 case INDEX_op_st_vec:
2717 tcg_out_st(s, type, a0, a1, a2);
2718 break;
2719 case INDEX_op_dup_vec:
2720 tcg_out_dup_vec(s, type, vece, a0, a1);
2721 break;
2723 case INDEX_op_x86_shufps_vec:
2724 insn = OPC_SHUFPS;
2725 sub = args[3];
2726 goto gen_simd_imm8;
2727 case INDEX_op_x86_blend_vec:
2728 if (vece == MO_16) {
2729 insn = OPC_PBLENDW;
2730 } else if (vece == MO_32) {
2731 insn = (have_avx2 ? OPC_VPBLENDD : OPC_BLENDPS);
2732 } else {
2733 g_assert_not_reached();
2735 sub = args[3];
2736 goto gen_simd_imm8;
2737 case INDEX_op_x86_vperm2i128_vec:
2738 insn = OPC_VPERM2I128;
2739 sub = args[3];
2740 goto gen_simd_imm8;
2741 gen_simd_imm8:
2742 if (type == TCG_TYPE_V256) {
2743 insn |= P_VEXL;
2745 tcg_out_vex_modrm(s, insn, a0, a1, a2);
2746 tcg_out8(s, sub);
2747 break;
2749 case INDEX_op_x86_vpblendvb_vec:
2750 insn = OPC_VPBLENDVB;
2751 if (type == TCG_TYPE_V256) {
2752 insn |= P_VEXL;
2754 tcg_out_vex_modrm(s, insn, a0, a1, a2);
2755 tcg_out8(s, args[3] << 4);
2756 break;
2758 case INDEX_op_x86_psrldq_vec:
2759 tcg_out_vex_modrm(s, OPC_GRP14, 3, a0, a1);
2760 tcg_out8(s, a2);
2761 break;
2763 default:
2764 g_assert_not_reached();
2768 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
2770 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
2771 static const TCGTargetOpDef ri_r = { .args_ct_str = { "ri", "r" } };
2772 static const TCGTargetOpDef re_r = { .args_ct_str = { "re", "r" } };
2773 static const TCGTargetOpDef qi_r = { .args_ct_str = { "qi", "r" } };
2774 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
2775 static const TCGTargetOpDef r_q = { .args_ct_str = { "r", "q" } };
2776 static const TCGTargetOpDef r_re = { .args_ct_str = { "r", "re" } };
2777 static const TCGTargetOpDef r_0 = { .args_ct_str = { "r", "0" } };
2778 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
2779 static const TCGTargetOpDef r_r_re = { .args_ct_str = { "r", "r", "re" } };
2780 static const TCGTargetOpDef r_0_re = { .args_ct_str = { "r", "0", "re" } };
2781 static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } };
2782 static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
2783 static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
2784 static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } };
2785 static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } };
2786 static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } };
2787 static const TCGTargetOpDef r_r_L_L
2788 = { .args_ct_str = { "r", "r", "L", "L" } };
2789 static const TCGTargetOpDef L_L_L_L
2790 = { .args_ct_str = { "L", "L", "L", "L" } };
2791 static const TCGTargetOpDef x_x = { .args_ct_str = { "x", "x" } };
2792 static const TCGTargetOpDef x_x_x = { .args_ct_str = { "x", "x", "x" } };
2793 static const TCGTargetOpDef x_x_x_x
2794 = { .args_ct_str = { "x", "x", "x", "x" } };
2795 static const TCGTargetOpDef x_r = { .args_ct_str = { "x", "r" } };
2797 switch (op) {
2798 case INDEX_op_goto_ptr:
2799 return &r;
2801 case INDEX_op_ld8u_i32:
2802 case INDEX_op_ld8u_i64:
2803 case INDEX_op_ld8s_i32:
2804 case INDEX_op_ld8s_i64:
2805 case INDEX_op_ld16u_i32:
2806 case INDEX_op_ld16u_i64:
2807 case INDEX_op_ld16s_i32:
2808 case INDEX_op_ld16s_i64:
2809 case INDEX_op_ld_i32:
2810 case INDEX_op_ld32u_i64:
2811 case INDEX_op_ld32s_i64:
2812 case INDEX_op_ld_i64:
2813 return &r_r;
2815 case INDEX_op_st8_i32:
2816 case INDEX_op_st8_i64:
2817 return &qi_r;
2818 case INDEX_op_st16_i32:
2819 case INDEX_op_st16_i64:
2820 case INDEX_op_st_i32:
2821 case INDEX_op_st32_i64:
2822 return &ri_r;
2823 case INDEX_op_st_i64:
2824 return &re_r;
2826 case INDEX_op_add_i32:
2827 case INDEX_op_add_i64:
2828 return &r_r_re;
2829 case INDEX_op_sub_i32:
2830 case INDEX_op_sub_i64:
2831 case INDEX_op_mul_i32:
2832 case INDEX_op_mul_i64:
2833 case INDEX_op_or_i32:
2834 case INDEX_op_or_i64:
2835 case INDEX_op_xor_i32:
2836 case INDEX_op_xor_i64:
2837 return &r_0_re;
2839 case INDEX_op_and_i32:
2840 case INDEX_op_and_i64:
2842 static const TCGTargetOpDef and
2843 = { .args_ct_str = { "r", "0", "reZ" } };
2844 return &and;
2846 break;
2847 case INDEX_op_andc_i32:
2848 case INDEX_op_andc_i64:
2850 static const TCGTargetOpDef andc
2851 = { .args_ct_str = { "r", "r", "rI" } };
2852 return &andc;
2854 break;
2856 case INDEX_op_shl_i32:
2857 case INDEX_op_shl_i64:
2858 case INDEX_op_shr_i32:
2859 case INDEX_op_shr_i64:
2860 case INDEX_op_sar_i32:
2861 case INDEX_op_sar_i64:
2862 return have_bmi2 ? &r_r_ri : &r_0_ci;
2863 case INDEX_op_rotl_i32:
2864 case INDEX_op_rotl_i64:
2865 case INDEX_op_rotr_i32:
2866 case INDEX_op_rotr_i64:
2867 return &r_0_ci;
2869 case INDEX_op_brcond_i32:
2870 case INDEX_op_brcond_i64:
2871 return &r_re;
2873 case INDEX_op_bswap16_i32:
2874 case INDEX_op_bswap16_i64:
2875 case INDEX_op_bswap32_i32:
2876 case INDEX_op_bswap32_i64:
2877 case INDEX_op_bswap64_i64:
2878 case INDEX_op_neg_i32:
2879 case INDEX_op_neg_i64:
2880 case INDEX_op_not_i32:
2881 case INDEX_op_not_i64:
2882 case INDEX_op_extrh_i64_i32:
2883 return &r_0;
2885 case INDEX_op_ext8s_i32:
2886 case INDEX_op_ext8s_i64:
2887 case INDEX_op_ext8u_i32:
2888 case INDEX_op_ext8u_i64:
2889 return &r_q;
2890 case INDEX_op_ext16s_i32:
2891 case INDEX_op_ext16s_i64:
2892 case INDEX_op_ext16u_i32:
2893 case INDEX_op_ext16u_i64:
2894 case INDEX_op_ext32s_i64:
2895 case INDEX_op_ext32u_i64:
2896 case INDEX_op_ext_i32_i64:
2897 case INDEX_op_extu_i32_i64:
2898 case INDEX_op_extrl_i64_i32:
2899 case INDEX_op_extract_i32:
2900 case INDEX_op_extract_i64:
2901 case INDEX_op_sextract_i32:
2902 case INDEX_op_ctpop_i32:
2903 case INDEX_op_ctpop_i64:
2904 return &r_r;
2906 case INDEX_op_deposit_i32:
2907 case INDEX_op_deposit_i64:
2909 static const TCGTargetOpDef dep
2910 = { .args_ct_str = { "Q", "0", "Q" } };
2911 return &dep;
2913 case INDEX_op_setcond_i32:
2914 case INDEX_op_setcond_i64:
2916 static const TCGTargetOpDef setc
2917 = { .args_ct_str = { "q", "r", "re" } };
2918 return &setc;
2920 case INDEX_op_movcond_i32:
2921 case INDEX_op_movcond_i64:
2923 static const TCGTargetOpDef movc
2924 = { .args_ct_str = { "r", "r", "re", "r", "0" } };
2925 return &movc;
2927 case INDEX_op_div2_i32:
2928 case INDEX_op_div2_i64:
2929 case INDEX_op_divu2_i32:
2930 case INDEX_op_divu2_i64:
2932 static const TCGTargetOpDef div2
2933 = { .args_ct_str = { "a", "d", "0", "1", "r" } };
2934 return &div2;
2936 case INDEX_op_mulu2_i32:
2937 case INDEX_op_mulu2_i64:
2938 case INDEX_op_muls2_i32:
2939 case INDEX_op_muls2_i64:
2941 static const TCGTargetOpDef mul2
2942 = { .args_ct_str = { "a", "d", "a", "r" } };
2943 return &mul2;
2945 case INDEX_op_add2_i32:
2946 case INDEX_op_add2_i64:
2947 case INDEX_op_sub2_i32:
2948 case INDEX_op_sub2_i64:
2950 static const TCGTargetOpDef arith2
2951 = { .args_ct_str = { "r", "r", "0", "1", "re", "re" } };
2952 return &arith2;
2954 case INDEX_op_ctz_i32:
2955 case INDEX_op_ctz_i64:
2957 static const TCGTargetOpDef ctz[2] = {
2958 { .args_ct_str = { "&r", "r", "r" } },
2959 { .args_ct_str = { "&r", "r", "rW" } },
2961 return &ctz[have_bmi1];
2963 case INDEX_op_clz_i32:
2964 case INDEX_op_clz_i64:
2966 static const TCGTargetOpDef clz[2] = {
2967 { .args_ct_str = { "&r", "r", "r" } },
2968 { .args_ct_str = { "&r", "r", "rW" } },
2970 return &clz[have_lzcnt];
2973 case INDEX_op_qemu_ld_i32:
2974 return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L;
2975 case INDEX_op_qemu_st_i32:
2976 return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L : &L_L_L;
2977 case INDEX_op_qemu_ld_i64:
2978 return (TCG_TARGET_REG_BITS == 64 ? &r_L
2979 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L
2980 : &r_r_L_L);
2981 case INDEX_op_qemu_st_i64:
2982 return (TCG_TARGET_REG_BITS == 64 ? &L_L
2983 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L_L
2984 : &L_L_L_L);
2986 case INDEX_op_brcond2_i32:
2988 static const TCGTargetOpDef b2
2989 = { .args_ct_str = { "r", "r", "ri", "ri" } };
2990 return &b2;
2992 case INDEX_op_setcond2_i32:
2994 static const TCGTargetOpDef s2
2995 = { .args_ct_str = { "r", "r", "r", "ri", "ri" } };
2996 return &s2;
2999 case INDEX_op_ld_vec:
3000 case INDEX_op_st_vec:
3001 return &x_r;
3003 case INDEX_op_add_vec:
3004 case INDEX_op_sub_vec:
3005 case INDEX_op_mul_vec:
3006 case INDEX_op_and_vec:
3007 case INDEX_op_or_vec:
3008 case INDEX_op_xor_vec:
3009 case INDEX_op_andc_vec:
3010 case INDEX_op_cmp_vec:
3011 case INDEX_op_x86_shufps_vec:
3012 case INDEX_op_x86_blend_vec:
3013 case INDEX_op_x86_packss_vec:
3014 case INDEX_op_x86_packus_vec:
3015 case INDEX_op_x86_vperm2i128_vec:
3016 case INDEX_op_x86_punpckl_vec:
3017 case INDEX_op_x86_punpckh_vec:
3018 #if TCG_TARGET_REG_BITS == 32
3019 case INDEX_op_dup2_vec:
3020 #endif
3021 return &x_x_x;
3022 case INDEX_op_dup_vec:
3023 case INDEX_op_shli_vec:
3024 case INDEX_op_shri_vec:
3025 case INDEX_op_sari_vec:
3026 case INDEX_op_x86_psrldq_vec:
3027 return &x_x;
3028 case INDEX_op_x86_vpblendvb_vec:
3029 return &x_x_x_x;
3031 default:
3032 break;
3034 return NULL;
3037 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3039 switch (opc) {
3040 case INDEX_op_add_vec:
3041 case INDEX_op_sub_vec:
3042 case INDEX_op_and_vec:
3043 case INDEX_op_or_vec:
3044 case INDEX_op_xor_vec:
3045 case INDEX_op_andc_vec:
3046 return 1;
3047 case INDEX_op_cmp_vec:
3048 return -1;
3050 case INDEX_op_shli_vec:
3051 case INDEX_op_shri_vec:
3052 /* We must expand the operation for MO_8. */
3053 return vece == MO_8 ? -1 : 1;
3055 case INDEX_op_sari_vec:
3056 /* We must expand the operation for MO_8. */
3057 if (vece == MO_8) {
3058 return -1;
3060 /* We can emulate this for MO_64, but it does not pay off
3061 unless we're producing at least 4 values. */
3062 if (vece == MO_64) {
3063 return type >= TCG_TYPE_V256 ? -1 : 0;
3065 return 1;
3067 case INDEX_op_mul_vec:
3068 if (vece == MO_8) {
3069 /* We can expand the operation for MO_8. */
3070 return -1;
3072 if (vece == MO_64) {
3073 return 0;
3075 return 1;
3077 default:
3078 return 0;
3082 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3083 TCGArg a0, ...)
3085 va_list va;
3086 TCGArg a1, a2;
3087 TCGv_vec v0, t1, t2, t3, t4;
3089 va_start(va, a0);
3090 v0 = temp_tcgv_vec(arg_temp(a0));
3092 switch (opc) {
3093 case INDEX_op_shli_vec:
3094 case INDEX_op_shri_vec:
3095 tcg_debug_assert(vece == MO_8);
3096 a1 = va_arg(va, TCGArg);
3097 a2 = va_arg(va, TCGArg);
3098 /* Unpack to W, shift, and repack. Tricky bits:
3099 (1) Use punpck*bw x,x to produce DDCCBBAA,
3100 i.e. duplicate in other half of the 16-bit lane.
3101 (2) For right-shift, add 8 so that the high half of
3102 the lane becomes zero. For left-shift, we must
3103 shift up and down again.
3104 (3) Step 2 leaves high half zero such that PACKUSWB
3105 (pack with unsigned saturation) does not modify
3106 the quantity. */
3107 t1 = tcg_temp_new_vec(type);
3108 t2 = tcg_temp_new_vec(type);
3109 vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3110 tcgv_vec_arg(t1), a1, a1);
3111 vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3112 tcgv_vec_arg(t2), a1, a1);
3113 if (opc == INDEX_op_shri_vec) {
3114 vec_gen_3(INDEX_op_shri_vec, type, MO_16,
3115 tcgv_vec_arg(t1), tcgv_vec_arg(t1), a2 + 8);
3116 vec_gen_3(INDEX_op_shri_vec, type, MO_16,
3117 tcgv_vec_arg(t2), tcgv_vec_arg(t2), a2 + 8);
3118 } else {
3119 vec_gen_3(INDEX_op_shli_vec, type, MO_16,
3120 tcgv_vec_arg(t1), tcgv_vec_arg(t1), a2 + 8);
3121 vec_gen_3(INDEX_op_shli_vec, type, MO_16,
3122 tcgv_vec_arg(t2), tcgv_vec_arg(t2), a2 + 8);
3123 vec_gen_3(INDEX_op_shri_vec, type, MO_16,
3124 tcgv_vec_arg(t1), tcgv_vec_arg(t1), 8);
3125 vec_gen_3(INDEX_op_shri_vec, type, MO_16,
3126 tcgv_vec_arg(t2), tcgv_vec_arg(t2), 8);
3128 vec_gen_3(INDEX_op_x86_packus_vec, type, MO_8,
3129 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3130 tcg_temp_free_vec(t1);
3131 tcg_temp_free_vec(t2);
3132 break;
3134 case INDEX_op_sari_vec:
3135 a1 = va_arg(va, TCGArg);
3136 a2 = va_arg(va, TCGArg);
3137 if (vece == MO_8) {
3138 /* Unpack to W, shift, and repack, as above. */
3139 t1 = tcg_temp_new_vec(type);
3140 t2 = tcg_temp_new_vec(type);
3141 vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3142 tcgv_vec_arg(t1), a1, a1);
3143 vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3144 tcgv_vec_arg(t2), a1, a1);
3145 vec_gen_3(INDEX_op_sari_vec, type, MO_16,
3146 tcgv_vec_arg(t1), tcgv_vec_arg(t1), a2 + 8);
3147 vec_gen_3(INDEX_op_sari_vec, type, MO_16,
3148 tcgv_vec_arg(t2), tcgv_vec_arg(t2), a2 + 8);
3149 vec_gen_3(INDEX_op_x86_packss_vec, type, MO_8,
3150 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3151 tcg_temp_free_vec(t1);
3152 tcg_temp_free_vec(t2);
3153 break;
3155 tcg_debug_assert(vece == MO_64);
3156 /* MO_64: If the shift is <= 32, we can emulate the sign extend by
3157 performing an arithmetic 32-bit shift and overwriting the high
3158 half of the result (note that the ISA says shift of 32 is valid). */
3159 if (a2 <= 32) {
3160 t1 = tcg_temp_new_vec(type);
3161 vec_gen_3(INDEX_op_sari_vec, type, MO_32, tcgv_vec_arg(t1), a1, a2);
3162 vec_gen_3(INDEX_op_shri_vec, type, MO_64, a0, a1, a2);
3163 vec_gen_4(INDEX_op_x86_blend_vec, type, MO_32,
3164 a0, a0, tcgv_vec_arg(t1), 0xaa);
3165 tcg_temp_free_vec(t1);
3166 break;
3168 /* Otherwise we will need to use a compare vs 0 to produce the
3169 sign-extend, shift and merge. */
3170 t1 = tcg_temp_new_vec(type);
3171 t2 = tcg_const_zeros_vec(type);
3172 vec_gen_4(INDEX_op_cmp_vec, type, MO_64,
3173 tcgv_vec_arg(t1), tcgv_vec_arg(t2), a1, TCG_COND_GT);
3174 tcg_temp_free_vec(t2);
3175 vec_gen_3(INDEX_op_shri_vec, type, MO_64, a0, a1, a2);
3176 vec_gen_3(INDEX_op_shli_vec, type, MO_64,
3177 tcgv_vec_arg(t1), tcgv_vec_arg(t1), 64 - a2);
3178 vec_gen_3(INDEX_op_or_vec, type, MO_64, a0, a0, tcgv_vec_arg(t1));
3179 tcg_temp_free_vec(t1);
3180 break;
3182 case INDEX_op_mul_vec:
3183 tcg_debug_assert(vece == MO_8);
3184 a1 = va_arg(va, TCGArg);
3185 a2 = va_arg(va, TCGArg);
3186 switch (type) {
3187 case TCG_TYPE_V64:
3188 t1 = tcg_temp_new_vec(TCG_TYPE_V128);
3189 t2 = tcg_temp_new_vec(TCG_TYPE_V128);
3190 tcg_gen_dup16i_vec(t2, 0);
3191 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3192 tcgv_vec_arg(t1), a1, tcgv_vec_arg(t2));
3193 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3194 tcgv_vec_arg(t2), tcgv_vec_arg(t2), a2);
3195 tcg_gen_mul_vec(MO_16, t1, t1, t2);
3196 tcg_gen_shri_vec(MO_16, t1, t1, 8);
3197 vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V128, MO_8,
3198 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t1));
3199 tcg_temp_free_vec(t1);
3200 tcg_temp_free_vec(t2);
3201 break;
3203 case TCG_TYPE_V128:
3204 t1 = tcg_temp_new_vec(TCG_TYPE_V128);
3205 t2 = tcg_temp_new_vec(TCG_TYPE_V128);
3206 t3 = tcg_temp_new_vec(TCG_TYPE_V128);
3207 t4 = tcg_temp_new_vec(TCG_TYPE_V128);
3208 tcg_gen_dup16i_vec(t4, 0);
3209 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3210 tcgv_vec_arg(t1), a1, tcgv_vec_arg(t4));
3211 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3212 tcgv_vec_arg(t2), tcgv_vec_arg(t4), a2);
3213 vec_gen_3(INDEX_op_x86_punpckh_vec, TCG_TYPE_V128, MO_8,
3214 tcgv_vec_arg(t3), a1, tcgv_vec_arg(t4));
3215 vec_gen_3(INDEX_op_x86_punpckh_vec, TCG_TYPE_V128, MO_8,
3216 tcgv_vec_arg(t4), tcgv_vec_arg(t4), a2);
3217 tcg_gen_mul_vec(MO_16, t1, t1, t2);
3218 tcg_gen_mul_vec(MO_16, t3, t3, t4);
3219 tcg_gen_shri_vec(MO_16, t1, t1, 8);
3220 tcg_gen_shri_vec(MO_16, t3, t3, 8);
3221 vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V128, MO_8,
3222 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t3));
3223 tcg_temp_free_vec(t1);
3224 tcg_temp_free_vec(t2);
3225 tcg_temp_free_vec(t3);
3226 tcg_temp_free_vec(t4);
3227 break;
3229 case TCG_TYPE_V256:
3230 t1 = tcg_temp_new_vec(TCG_TYPE_V256);
3231 t2 = tcg_temp_new_vec(TCG_TYPE_V256);
3232 t3 = tcg_temp_new_vec(TCG_TYPE_V256);
3233 t4 = tcg_temp_new_vec(TCG_TYPE_V256);
3234 tcg_gen_dup16i_vec(t4, 0);
3235 /* a1: A[0-7] ... D[0-7]; a2: W[0-7] ... Z[0-7]
3236 t1: extends of B[0-7], D[0-7]
3237 t2: extends of X[0-7], Z[0-7]
3238 t3: extends of A[0-7], C[0-7]
3239 t4: extends of W[0-7], Y[0-7]. */
3240 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V256, MO_8,
3241 tcgv_vec_arg(t1), a1, tcgv_vec_arg(t4));
3242 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V256, MO_8,
3243 tcgv_vec_arg(t2), tcgv_vec_arg(t4), a2);
3244 vec_gen_3(INDEX_op_x86_punpckh_vec, TCG_TYPE_V256, MO_8,
3245 tcgv_vec_arg(t3), a1, tcgv_vec_arg(t4));
3246 vec_gen_3(INDEX_op_x86_punpckh_vec, TCG_TYPE_V256, MO_8,
3247 tcgv_vec_arg(t4), tcgv_vec_arg(t4), a2);
3248 /* t1: BX DZ; t2: AW CY. */
3249 tcg_gen_mul_vec(MO_16, t1, t1, t2);
3250 tcg_gen_mul_vec(MO_16, t3, t3, t4);
3251 tcg_gen_shri_vec(MO_16, t1, t1, 8);
3252 tcg_gen_shri_vec(MO_16, t3, t3, 8);
3253 /* a0: AW BX CY DZ. */
3254 vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V256, MO_8,
3255 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t3));
3256 tcg_temp_free_vec(t1);
3257 tcg_temp_free_vec(t2);
3258 tcg_temp_free_vec(t3);
3259 tcg_temp_free_vec(t4);
3260 break;
3262 default:
3263 g_assert_not_reached();
3265 break;
3267 case INDEX_op_cmp_vec:
3269 enum {
3270 NEED_SWAP = 1,
3271 NEED_INV = 2,
3272 NEED_BIAS = 4
3274 static const uint8_t fixups[16] = {
3275 [0 ... 15] = -1,
3276 [TCG_COND_EQ] = 0,
3277 [TCG_COND_NE] = NEED_INV,
3278 [TCG_COND_GT] = 0,
3279 [TCG_COND_LT] = NEED_SWAP,
3280 [TCG_COND_LE] = NEED_INV,
3281 [TCG_COND_GE] = NEED_SWAP | NEED_INV,
3282 [TCG_COND_GTU] = NEED_BIAS,
3283 [TCG_COND_LTU] = NEED_BIAS | NEED_SWAP,
3284 [TCG_COND_LEU] = NEED_BIAS | NEED_INV,
3285 [TCG_COND_GEU] = NEED_BIAS | NEED_SWAP | NEED_INV,
3288 TCGCond cond;
3289 uint8_t fixup;
3291 a1 = va_arg(va, TCGArg);
3292 a2 = va_arg(va, TCGArg);
3293 cond = va_arg(va, TCGArg);
3294 fixup = fixups[cond & 15];
3295 tcg_debug_assert(fixup != 0xff);
3297 if (fixup & NEED_INV) {
3298 cond = tcg_invert_cond(cond);
3300 if (fixup & NEED_SWAP) {
3301 TCGArg t;
3302 t = a1, a1 = a2, a2 = t;
3303 cond = tcg_swap_cond(cond);
3306 t1 = t2 = NULL;
3307 if (fixup & NEED_BIAS) {
3308 t1 = tcg_temp_new_vec(type);
3309 t2 = tcg_temp_new_vec(type);
3310 tcg_gen_dupi_vec(vece, t2, 1ull << ((8 << vece) - 1));
3311 tcg_gen_sub_vec(vece, t1, temp_tcgv_vec(arg_temp(a1)), t2);
3312 tcg_gen_sub_vec(vece, t2, temp_tcgv_vec(arg_temp(a2)), t2);
3313 a1 = tcgv_vec_arg(t1);
3314 a2 = tcgv_vec_arg(t2);
3315 cond = tcg_signed_cond(cond);
3318 tcg_debug_assert(cond == TCG_COND_EQ || cond == TCG_COND_GT);
3319 vec_gen_4(INDEX_op_cmp_vec, type, vece, a0, a1, a2, cond);
3321 if (fixup & NEED_BIAS) {
3322 tcg_temp_free_vec(t1);
3323 tcg_temp_free_vec(t2);
3325 if (fixup & NEED_INV) {
3326 tcg_gen_not_vec(vece, v0, v0);
3329 break;
3331 default:
3332 break;
3335 va_end(va);
3338 static const int tcg_target_callee_save_regs[] = {
3339 #if TCG_TARGET_REG_BITS == 64
3340 TCG_REG_RBP,
3341 TCG_REG_RBX,
3342 #if defined(_WIN64)
3343 TCG_REG_RDI,
3344 TCG_REG_RSI,
3345 #endif
3346 TCG_REG_R12,
3347 TCG_REG_R13,
3348 TCG_REG_R14, /* Currently used for the global env. */
3349 TCG_REG_R15,
3350 #else
3351 TCG_REG_EBP, /* Currently used for the global env. */
3352 TCG_REG_EBX,
3353 TCG_REG_ESI,
3354 TCG_REG_EDI,
3355 #endif
3358 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
3359 and tcg_register_jit. */
3361 #define PUSH_SIZE \
3362 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
3363 * (TCG_TARGET_REG_BITS / 8))
3365 #define FRAME_SIZE \
3366 ((PUSH_SIZE \
3367 + TCG_STATIC_CALL_ARGS_SIZE \
3368 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
3369 + TCG_TARGET_STACK_ALIGN - 1) \
3370 & ~(TCG_TARGET_STACK_ALIGN - 1))
3372 /* Generate global QEMU prologue and epilogue code */
3373 static void tcg_target_qemu_prologue(TCGContext *s)
3375 int i, stack_addend;
3377 /* TB prologue */
3379 /* Reserve some stack space, also for TCG temps. */
3380 stack_addend = FRAME_SIZE - PUSH_SIZE;
3381 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
3382 CPU_TEMP_BUF_NLONGS * sizeof(long));
3384 /* Save all callee saved registers. */
3385 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
3386 tcg_out_push(s, tcg_target_callee_save_regs[i]);
3389 #if TCG_TARGET_REG_BITS == 32
3390 tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
3391 (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
3392 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
3393 /* jmp *tb. */
3394 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
3395 (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
3396 + stack_addend);
3397 #else
3398 # if !defined(CONFIG_SOFTMMU) && TCG_TARGET_REG_BITS == 64
3399 if (guest_base) {
3400 int seg = setup_guest_base_seg();
3401 if (seg != 0) {
3402 x86_guest_base_seg = seg;
3403 } else if (guest_base == (int32_t)guest_base) {
3404 x86_guest_base_offset = guest_base;
3405 } else {
3406 /* Choose R12 because, as a base, it requires a SIB byte. */
3407 x86_guest_base_index = TCG_REG_R12;
3408 tcg_out_mov(s, TCG_TYPE_PTR, x86_guest_base_index, guest_base);
3409 tcg_regset_set_reg(s->reserved_regs, x86_guest_base_index);
3412 # endif
3413 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3414 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
3415 /* jmp *tb. */
3416 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
3417 #endif
3420 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3421 * and fall through to the rest of the epilogue.
3423 s->code_gen_epilogue = s->code_ptr;
3424 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_EAX, 0);
3426 /* TB epilogue */
3427 tb_ret_addr = s->code_ptr;
3429 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend);
3431 if (have_avx2) {
3432 tcg_out_vex_opc(s, OPC_VZEROUPPER, 0, 0, 0, 0);
3434 for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
3435 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
3437 tcg_out_opc(s, OPC_RET, 0, 0, 0);
3440 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3442 memset(p, 0x90, count);
3445 static void tcg_target_init(TCGContext *s)
3447 #ifdef CONFIG_CPUID_H
3448 unsigned a, b, c, d, b7 = 0;
3449 int max = __get_cpuid_max(0, 0);
3451 if (max >= 7) {
3452 /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
3453 __cpuid_count(7, 0, a, b7, c, d);
3454 have_bmi1 = (b7 & bit_BMI) != 0;
3455 have_bmi2 = (b7 & bit_BMI2) != 0;
3458 if (max >= 1) {
3459 __cpuid(1, a, b, c, d);
3460 #ifndef have_cmov
3461 /* For 32-bit, 99% certainty that we're running on hardware that
3462 supports cmov, but we still need to check. In case cmov is not
3463 available, we'll use a small forward branch. */
3464 have_cmov = (d & bit_CMOV) != 0;
3465 #endif
3467 /* MOVBE is only available on Intel Atom and Haswell CPUs, so we
3468 need to probe for it. */
3469 have_movbe = (c & bit_MOVBE) != 0;
3470 have_popcnt = (c & bit_POPCNT) != 0;
3472 /* There are a number of things we must check before we can be
3473 sure of not hitting invalid opcode. */
3474 if (c & bit_OSXSAVE) {
3475 unsigned xcrl, xcrh;
3476 /* The xgetbv instruction is not available to older versions of
3477 * the assembler, so we encode the instruction manually.
3479 asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl), "=d" (xcrh) : "c" (0));
3480 if ((xcrl & 6) == 6) {
3481 have_avx1 = (c & bit_AVX) != 0;
3482 have_avx2 = (b7 & bit_AVX2) != 0;
3487 max = __get_cpuid_max(0x8000000, 0);
3488 if (max >= 1) {
3489 __cpuid(0x80000001, a, b, c, d);
3490 /* LZCNT was introduced with AMD Barcelona and Intel Haswell CPUs. */
3491 have_lzcnt = (c & bit_LZCNT) != 0;
3493 #endif /* CONFIG_CPUID_H */
3495 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
3496 if (TCG_TARGET_REG_BITS == 64) {
3497 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
3499 if (have_avx1) {
3500 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
3501 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
3503 if (have_avx2) {
3504 tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
3507 tcg_target_call_clobber_regs = ALL_VECTOR_REGS;
3508 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
3509 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
3510 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
3511 if (TCG_TARGET_REG_BITS == 64) {
3512 #if !defined(_WIN64)
3513 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI);
3514 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI);
3515 #endif
3516 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3517 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3518 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3519 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3522 s->reserved_regs = 0;
3523 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
3526 typedef struct {
3527 DebugFrameHeader h;
3528 uint8_t fde_def_cfa[4];
3529 uint8_t fde_reg_ofs[14];
3530 } DebugFrame;
3532 /* We're expecting a 2 byte uleb128 encoded value. */
3533 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3535 #if !defined(__ELF__)
3536 /* Host machine without ELF. */
3537 #elif TCG_TARGET_REG_BITS == 64
3538 #define ELF_HOST_MACHINE EM_X86_64
3539 static const DebugFrame debug_frame = {
3540 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3541 .h.cie.id = -1,
3542 .h.cie.version = 1,
3543 .h.cie.code_align = 1,
3544 .h.cie.data_align = 0x78, /* sleb128 -8 */
3545 .h.cie.return_column = 16,
3547 /* Total FDE size does not include the "len" member. */
3548 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3550 .fde_def_cfa = {
3551 12, 7, /* DW_CFA_def_cfa %rsp, ... */
3552 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3553 (FRAME_SIZE >> 7)
3555 .fde_reg_ofs = {
3556 0x90, 1, /* DW_CFA_offset, %rip, -8 */
3557 /* The following ordering must match tcg_target_callee_save_regs. */
3558 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
3559 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
3560 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
3561 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
3562 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
3563 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
3566 #else
3567 #define ELF_HOST_MACHINE EM_386
3568 static const DebugFrame debug_frame = {
3569 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3570 .h.cie.id = -1,
3571 .h.cie.version = 1,
3572 .h.cie.code_align = 1,
3573 .h.cie.data_align = 0x7c, /* sleb128 -4 */
3574 .h.cie.return_column = 8,
3576 /* Total FDE size does not include the "len" member. */
3577 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3579 .fde_def_cfa = {
3580 12, 4, /* DW_CFA_def_cfa %esp, ... */
3581 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3582 (FRAME_SIZE >> 7)
3584 .fde_reg_ofs = {
3585 0x88, 1, /* DW_CFA_offset, %eip, -4 */
3586 /* The following ordering must match tcg_target_callee_save_regs. */
3587 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
3588 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
3589 0x86, 4, /* DW_CFA_offset, %esi, -16 */
3590 0x87, 5, /* DW_CFA_offset, %edi, -20 */
3593 #endif
3595 #if defined(ELF_HOST_MACHINE)
3596 void tcg_register_jit(void *buf, size_t buf_size)
3598 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3600 #endif