tcg/i386: Assume 32-bit values are zero-extended
[qemu/ar7.git] / tcg / i386 / tcg-target.inc.c
blobf7b548545ab6b7fe652038f2d5df09a157e96173
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "tcg-pool.inc.c"
27 #ifdef CONFIG_DEBUG_TCG
28 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 #if TCG_TARGET_REG_BITS == 64
30 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
31 #else
32 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
33 #endif
34 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
35 "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7",
36 #if TCG_TARGET_REG_BITS == 64
37 "%xmm8", "%xmm9", "%xmm10", "%xmm11",
38 "%xmm12", "%xmm13", "%xmm14", "%xmm15",
39 #endif
41 #endif
43 static const int tcg_target_reg_alloc_order[] = {
44 #if TCG_TARGET_REG_BITS == 64
45 TCG_REG_RBP,
46 TCG_REG_RBX,
47 TCG_REG_R12,
48 TCG_REG_R13,
49 TCG_REG_R14,
50 TCG_REG_R15,
51 TCG_REG_R10,
52 TCG_REG_R11,
53 TCG_REG_R9,
54 TCG_REG_R8,
55 TCG_REG_RCX,
56 TCG_REG_RDX,
57 TCG_REG_RSI,
58 TCG_REG_RDI,
59 TCG_REG_RAX,
60 #else
61 TCG_REG_EBX,
62 TCG_REG_ESI,
63 TCG_REG_EDI,
64 TCG_REG_EBP,
65 TCG_REG_ECX,
66 TCG_REG_EDX,
67 TCG_REG_EAX,
68 #endif
69 TCG_REG_XMM0,
70 TCG_REG_XMM1,
71 TCG_REG_XMM2,
72 TCG_REG_XMM3,
73 TCG_REG_XMM4,
74 TCG_REG_XMM5,
75 #ifndef _WIN64
76 /* The Win64 ABI has xmm6-xmm15 as caller-saves, and we do not save
77 any of them. Therefore only allow xmm0-xmm5 to be allocated. */
78 TCG_REG_XMM6,
79 TCG_REG_XMM7,
80 #if TCG_TARGET_REG_BITS == 64
81 TCG_REG_XMM8,
82 TCG_REG_XMM9,
83 TCG_REG_XMM10,
84 TCG_REG_XMM11,
85 TCG_REG_XMM12,
86 TCG_REG_XMM13,
87 TCG_REG_XMM14,
88 TCG_REG_XMM15,
89 #endif
90 #endif
93 static const int tcg_target_call_iarg_regs[] = {
94 #if TCG_TARGET_REG_BITS == 64
95 #if defined(_WIN64)
96 TCG_REG_RCX,
97 TCG_REG_RDX,
98 #else
99 TCG_REG_RDI,
100 TCG_REG_RSI,
101 TCG_REG_RDX,
102 TCG_REG_RCX,
103 #endif
104 TCG_REG_R8,
105 TCG_REG_R9,
106 #else
107 /* 32 bit mode uses stack based calling convention (GCC default). */
108 #endif
111 static const int tcg_target_call_oarg_regs[] = {
112 TCG_REG_EAX,
113 #if TCG_TARGET_REG_BITS == 32
114 TCG_REG_EDX
115 #endif
118 /* Constants we accept. */
119 #define TCG_CT_CONST_S32 0x100
120 #define TCG_CT_CONST_U32 0x200
121 #define TCG_CT_CONST_I32 0x400
122 #define TCG_CT_CONST_WSZ 0x800
124 /* Registers used with L constraint, which are the first argument
125 registers on x86_64, and two random call clobbered registers on
126 i386. */
127 #if TCG_TARGET_REG_BITS == 64
128 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
129 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
130 #else
131 # define TCG_REG_L0 TCG_REG_EAX
132 # define TCG_REG_L1 TCG_REG_EDX
133 #endif
135 /* The host compiler should supply <cpuid.h> to enable runtime features
136 detection, as we're not going to go so far as our own inline assembly.
137 If not available, default values will be assumed. */
138 #if defined(CONFIG_CPUID_H)
139 #include "qemu/cpuid.h"
140 #endif
142 /* For 64-bit, we always know that CMOV is available. */
143 #if TCG_TARGET_REG_BITS == 64
144 # define have_cmov 1
145 #elif defined(CONFIG_CPUID_H)
146 static bool have_cmov;
147 #else
148 # define have_cmov 0
149 #endif
151 /* We need these symbols in tcg-target.h, and we can't properly conditionalize
152 it there. Therefore we always define the variable. */
153 bool have_bmi1;
154 bool have_popcnt;
155 bool have_avx1;
156 bool have_avx2;
158 #ifdef CONFIG_CPUID_H
159 static bool have_movbe;
160 static bool have_bmi2;
161 static bool have_lzcnt;
162 #else
163 # define have_movbe 0
164 # define have_bmi2 0
165 # define have_lzcnt 0
166 #endif
168 static tcg_insn_unit *tb_ret_addr;
170 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
171 intptr_t value, intptr_t addend)
173 value += addend;
174 switch(type) {
175 case R_386_PC32:
176 value -= (uintptr_t)code_ptr;
177 if (value != (int32_t)value) {
178 return false;
180 /* FALLTHRU */
181 case R_386_32:
182 tcg_patch32(code_ptr, value);
183 break;
184 case R_386_PC8:
185 value -= (uintptr_t)code_ptr;
186 if (value != (int8_t)value) {
187 return false;
189 tcg_patch8(code_ptr, value);
190 break;
191 default:
192 tcg_abort();
194 return true;
197 #if TCG_TARGET_REG_BITS == 64
198 #define ALL_GENERAL_REGS 0x0000ffffu
199 #define ALL_VECTOR_REGS 0xffff0000u
200 #else
201 #define ALL_GENERAL_REGS 0x000000ffu
202 #define ALL_VECTOR_REGS 0x00ff0000u
203 #endif
205 /* parse target specific constraints */
206 static const char *target_parse_constraint(TCGArgConstraint *ct,
207 const char *ct_str, TCGType type)
209 switch(*ct_str++) {
210 case 'a':
211 ct->ct |= TCG_CT_REG;
212 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
213 break;
214 case 'b':
215 ct->ct |= TCG_CT_REG;
216 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
217 break;
218 case 'c':
219 ct->ct |= TCG_CT_REG;
220 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
221 break;
222 case 'd':
223 ct->ct |= TCG_CT_REG;
224 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
225 break;
226 case 'S':
227 ct->ct |= TCG_CT_REG;
228 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
229 break;
230 case 'D':
231 ct->ct |= TCG_CT_REG;
232 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
233 break;
234 case 'q':
235 /* A register that can be used as a byte operand. */
236 ct->ct |= TCG_CT_REG;
237 ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xf;
238 break;
239 case 'Q':
240 /* A register with an addressable second byte (e.g. %ah). */
241 ct->ct |= TCG_CT_REG;
242 ct->u.regs = 0xf;
243 break;
244 case 'r':
245 /* A general register. */
246 ct->ct |= TCG_CT_REG;
247 ct->u.regs |= ALL_GENERAL_REGS;
248 break;
249 case 'W':
250 /* With TZCNT/LZCNT, we can have operand-size as an input. */
251 ct->ct |= TCG_CT_CONST_WSZ;
252 break;
253 case 'x':
254 /* A vector register. */
255 ct->ct |= TCG_CT_REG;
256 ct->u.regs |= ALL_VECTOR_REGS;
257 break;
259 /* qemu_ld/st address constraint */
260 case 'L':
261 ct->ct |= TCG_CT_REG;
262 ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff;
263 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0);
264 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1);
265 break;
267 case 'e':
268 ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_S32);
269 break;
270 case 'Z':
271 ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_U32);
272 break;
273 case 'I':
274 ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_I32);
275 break;
277 default:
278 return NULL;
280 return ct_str;
283 /* test if a constant matches the constraint */
284 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
285 const TCGArgConstraint *arg_ct)
287 int ct = arg_ct->ct;
288 if (ct & TCG_CT_CONST) {
289 return 1;
291 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
292 return 1;
294 if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
295 return 1;
297 if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
298 return 1;
300 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
301 return 1;
303 return 0;
306 # define LOWREGMASK(x) ((x) & 7)
308 #define P_EXT 0x100 /* 0x0f opcode prefix */
309 #define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */
310 #define P_DATA16 0x400 /* 0x66 opcode prefix */
311 #if TCG_TARGET_REG_BITS == 64
312 # define P_REXW 0x1000 /* Set REX.W = 1 */
313 # define P_REXB_R 0x2000 /* REG field as byte register */
314 # define P_REXB_RM 0x4000 /* R/M field as byte register */
315 # define P_GS 0x8000 /* gs segment override */
316 #else
317 # define P_REXW 0
318 # define P_REXB_R 0
319 # define P_REXB_RM 0
320 # define P_GS 0
321 #endif
322 #define P_EXT3A 0x10000 /* 0x0f 0x3a opcode prefix */
323 #define P_SIMDF3 0x20000 /* 0xf3 opcode prefix */
324 #define P_SIMDF2 0x40000 /* 0xf2 opcode prefix */
325 #define P_VEXL 0x80000 /* Set VEX.L = 1 */
327 #define OPC_ARITH_EvIz (0x81)
328 #define OPC_ARITH_EvIb (0x83)
329 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
330 #define OPC_ANDN (0xf2 | P_EXT38)
331 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
332 #define OPC_BLENDPS (0x0c | P_EXT3A | P_DATA16)
333 #define OPC_BSF (0xbc | P_EXT)
334 #define OPC_BSR (0xbd | P_EXT)
335 #define OPC_BSWAP (0xc8 | P_EXT)
336 #define OPC_CALL_Jz (0xe8)
337 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
338 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
339 #define OPC_DEC_r32 (0x48)
340 #define OPC_IMUL_GvEv (0xaf | P_EXT)
341 #define OPC_IMUL_GvEvIb (0x6b)
342 #define OPC_IMUL_GvEvIz (0x69)
343 #define OPC_INC_r32 (0x40)
344 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
345 #define OPC_JCC_short (0x70) /* ... plus condition code */
346 #define OPC_JMP_long (0xe9)
347 #define OPC_JMP_short (0xeb)
348 #define OPC_LEA (0x8d)
349 #define OPC_LZCNT (0xbd | P_EXT | P_SIMDF3)
350 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
351 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
352 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
353 #define OPC_MOVB_EvIz (0xc6)
354 #define OPC_MOVL_EvIz (0xc7)
355 #define OPC_MOVL_Iv (0xb8)
356 #define OPC_MOVBE_GyMy (0xf0 | P_EXT38)
357 #define OPC_MOVBE_MyGy (0xf1 | P_EXT38)
358 #define OPC_MOVD_VyEy (0x6e | P_EXT | P_DATA16)
359 #define OPC_MOVD_EyVy (0x7e | P_EXT | P_DATA16)
360 #define OPC_MOVDDUP (0x12 | P_EXT | P_SIMDF2)
361 #define OPC_MOVDQA_VxWx (0x6f | P_EXT | P_DATA16)
362 #define OPC_MOVDQA_WxVx (0x7f | P_EXT | P_DATA16)
363 #define OPC_MOVDQU_VxWx (0x6f | P_EXT | P_SIMDF3)
364 #define OPC_MOVDQU_WxVx (0x7f | P_EXT | P_SIMDF3)
365 #define OPC_MOVQ_VqWq (0x7e | P_EXT | P_SIMDF3)
366 #define OPC_MOVQ_WqVq (0xd6 | P_EXT | P_DATA16)
367 #define OPC_MOVSBL (0xbe | P_EXT)
368 #define OPC_MOVSWL (0xbf | P_EXT)
369 #define OPC_MOVSLQ (0x63 | P_REXW)
370 #define OPC_MOVZBL (0xb6 | P_EXT)
371 #define OPC_MOVZWL (0xb7 | P_EXT)
372 #define OPC_PACKSSDW (0x6b | P_EXT | P_DATA16)
373 #define OPC_PACKSSWB (0x63 | P_EXT | P_DATA16)
374 #define OPC_PACKUSDW (0x2b | P_EXT38 | P_DATA16)
375 #define OPC_PACKUSWB (0x67 | P_EXT | P_DATA16)
376 #define OPC_PADDB (0xfc | P_EXT | P_DATA16)
377 #define OPC_PADDW (0xfd | P_EXT | P_DATA16)
378 #define OPC_PADDD (0xfe | P_EXT | P_DATA16)
379 #define OPC_PADDQ (0xd4 | P_EXT | P_DATA16)
380 #define OPC_PAND (0xdb | P_EXT | P_DATA16)
381 #define OPC_PANDN (0xdf | P_EXT | P_DATA16)
382 #define OPC_PBLENDW (0x0e | P_EXT3A | P_DATA16)
383 #define OPC_PCMPEQB (0x74 | P_EXT | P_DATA16)
384 #define OPC_PCMPEQW (0x75 | P_EXT | P_DATA16)
385 #define OPC_PCMPEQD (0x76 | P_EXT | P_DATA16)
386 #define OPC_PCMPEQQ (0x29 | P_EXT38 | P_DATA16)
387 #define OPC_PCMPGTB (0x64 | P_EXT | P_DATA16)
388 #define OPC_PCMPGTW (0x65 | P_EXT | P_DATA16)
389 #define OPC_PCMPGTD (0x66 | P_EXT | P_DATA16)
390 #define OPC_PCMPGTQ (0x37 | P_EXT38 | P_DATA16)
391 #define OPC_PMOVSXBW (0x20 | P_EXT38 | P_DATA16)
392 #define OPC_PMOVSXWD (0x23 | P_EXT38 | P_DATA16)
393 #define OPC_PMOVSXDQ (0x25 | P_EXT38 | P_DATA16)
394 #define OPC_PMOVZXBW (0x30 | P_EXT38 | P_DATA16)
395 #define OPC_PMOVZXWD (0x33 | P_EXT38 | P_DATA16)
396 #define OPC_PMOVZXDQ (0x35 | P_EXT38 | P_DATA16)
397 #define OPC_PMULLW (0xd5 | P_EXT | P_DATA16)
398 #define OPC_PMULLD (0x40 | P_EXT38 | P_DATA16)
399 #define OPC_POR (0xeb | P_EXT | P_DATA16)
400 #define OPC_PSHUFB (0x00 | P_EXT38 | P_DATA16)
401 #define OPC_PSHUFD (0x70 | P_EXT | P_DATA16)
402 #define OPC_PSHUFLW (0x70 | P_EXT | P_SIMDF2)
403 #define OPC_PSHUFHW (0x70 | P_EXT | P_SIMDF3)
404 #define OPC_PSHIFTW_Ib (0x71 | P_EXT | P_DATA16) /* /2 /6 /4 */
405 #define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /2 /6 /4 */
406 #define OPC_PSHIFTQ_Ib (0x73 | P_EXT | P_DATA16) /* /2 /6 /4 */
407 #define OPC_PSUBB (0xf8 | P_EXT | P_DATA16)
408 #define OPC_PSUBW (0xf9 | P_EXT | P_DATA16)
409 #define OPC_PSUBD (0xfa | P_EXT | P_DATA16)
410 #define OPC_PSUBQ (0xfb | P_EXT | P_DATA16)
411 #define OPC_PUNPCKLBW (0x60 | P_EXT | P_DATA16)
412 #define OPC_PUNPCKLWD (0x61 | P_EXT | P_DATA16)
413 #define OPC_PUNPCKLDQ (0x62 | P_EXT | P_DATA16)
414 #define OPC_PUNPCKLQDQ (0x6c | P_EXT | P_DATA16)
415 #define OPC_PUNPCKHBW (0x68 | P_EXT | P_DATA16)
416 #define OPC_PUNPCKHWD (0x69 | P_EXT | P_DATA16)
417 #define OPC_PUNPCKHDQ (0x6a | P_EXT | P_DATA16)
418 #define OPC_PUNPCKHQDQ (0x6d | P_EXT | P_DATA16)
419 #define OPC_PXOR (0xef | P_EXT | P_DATA16)
420 #define OPC_POP_r32 (0x58)
421 #define OPC_POPCNT (0xb8 | P_EXT | P_SIMDF3)
422 #define OPC_PUSH_r32 (0x50)
423 #define OPC_PUSH_Iv (0x68)
424 #define OPC_PUSH_Ib (0x6a)
425 #define OPC_RET (0xc3)
426 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
427 #define OPC_SHIFT_1 (0xd1)
428 #define OPC_SHIFT_Ib (0xc1)
429 #define OPC_SHIFT_cl (0xd3)
430 #define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3)
431 #define OPC_SHUFPS (0xc6 | P_EXT)
432 #define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
433 #define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
434 #define OPC_TESTL (0x85)
435 #define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
436 #define OPC_UD2 (0x0b | P_EXT)
437 #define OPC_VPBLENDD (0x02 | P_EXT3A | P_DATA16)
438 #define OPC_VPBLENDVB (0x4c | P_EXT3A | P_DATA16)
439 #define OPC_VPBROADCASTB (0x78 | P_EXT38 | P_DATA16)
440 #define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16)
441 #define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16)
442 #define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
443 #define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_REXW)
444 #define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
445 #define OPC_VZEROUPPER (0x77 | P_EXT)
446 #define OPC_XCHG_ax_r32 (0x90)
448 #define OPC_GRP3_Ev (0xf7)
449 #define OPC_GRP5 (0xff)
450 #define OPC_GRP14 (0x73 | P_EXT | P_DATA16)
452 /* Group 1 opcode extensions for 0x80-0x83.
453 These are also used as modifiers for OPC_ARITH. */
454 #define ARITH_ADD 0
455 #define ARITH_OR 1
456 #define ARITH_ADC 2
457 #define ARITH_SBB 3
458 #define ARITH_AND 4
459 #define ARITH_SUB 5
460 #define ARITH_XOR 6
461 #define ARITH_CMP 7
463 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
464 #define SHIFT_ROL 0
465 #define SHIFT_ROR 1
466 #define SHIFT_SHL 4
467 #define SHIFT_SHR 5
468 #define SHIFT_SAR 7
470 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
471 #define EXT3_NOT 2
472 #define EXT3_NEG 3
473 #define EXT3_MUL 4
474 #define EXT3_IMUL 5
475 #define EXT3_DIV 6
476 #define EXT3_IDIV 7
478 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
479 #define EXT5_INC_Ev 0
480 #define EXT5_DEC_Ev 1
481 #define EXT5_CALLN_Ev 2
482 #define EXT5_JMPN_Ev 4
484 /* Condition codes to be added to OPC_JCC_{long,short}. */
485 #define JCC_JMP (-1)
486 #define JCC_JO 0x0
487 #define JCC_JNO 0x1
488 #define JCC_JB 0x2
489 #define JCC_JAE 0x3
490 #define JCC_JE 0x4
491 #define JCC_JNE 0x5
492 #define JCC_JBE 0x6
493 #define JCC_JA 0x7
494 #define JCC_JS 0x8
495 #define JCC_JNS 0x9
496 #define JCC_JP 0xa
497 #define JCC_JNP 0xb
498 #define JCC_JL 0xc
499 #define JCC_JGE 0xd
500 #define JCC_JLE 0xe
501 #define JCC_JG 0xf
503 static const uint8_t tcg_cond_to_jcc[] = {
504 [TCG_COND_EQ] = JCC_JE,
505 [TCG_COND_NE] = JCC_JNE,
506 [TCG_COND_LT] = JCC_JL,
507 [TCG_COND_GE] = JCC_JGE,
508 [TCG_COND_LE] = JCC_JLE,
509 [TCG_COND_GT] = JCC_JG,
510 [TCG_COND_LTU] = JCC_JB,
511 [TCG_COND_GEU] = JCC_JAE,
512 [TCG_COND_LEU] = JCC_JBE,
513 [TCG_COND_GTU] = JCC_JA,
516 #if TCG_TARGET_REG_BITS == 64
517 static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
519 int rex;
521 if (opc & P_GS) {
522 tcg_out8(s, 0x65);
524 if (opc & P_DATA16) {
525 /* We should never be asking for both 16 and 64-bit operation. */
526 tcg_debug_assert((opc & P_REXW) == 0);
527 tcg_out8(s, 0x66);
529 if (opc & P_SIMDF3) {
530 tcg_out8(s, 0xf3);
531 } else if (opc & P_SIMDF2) {
532 tcg_out8(s, 0xf2);
535 rex = 0;
536 rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */
537 rex |= (r & 8) >> 1; /* REX.R */
538 rex |= (x & 8) >> 2; /* REX.X */
539 rex |= (rm & 8) >> 3; /* REX.B */
541 /* P_REXB_{R,RM} indicates that the given register is the low byte.
542 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
543 as otherwise the encoding indicates %[abcd]h. Note that the values
544 that are ORed in merely indicate that the REX byte must be present;
545 those bits get discarded in output. */
546 rex |= opc & (r >= 4 ? P_REXB_R : 0);
547 rex |= opc & (rm >= 4 ? P_REXB_RM : 0);
549 if (rex) {
550 tcg_out8(s, (uint8_t)(rex | 0x40));
553 if (opc & (P_EXT | P_EXT38 | P_EXT3A)) {
554 tcg_out8(s, 0x0f);
555 if (opc & P_EXT38) {
556 tcg_out8(s, 0x38);
557 } else if (opc & P_EXT3A) {
558 tcg_out8(s, 0x3a);
562 tcg_out8(s, opc);
564 #else
565 static void tcg_out_opc(TCGContext *s, int opc)
567 if (opc & P_DATA16) {
568 tcg_out8(s, 0x66);
570 if (opc & P_SIMDF3) {
571 tcg_out8(s, 0xf3);
572 } else if (opc & P_SIMDF2) {
573 tcg_out8(s, 0xf2);
575 if (opc & (P_EXT | P_EXT38 | P_EXT3A)) {
576 tcg_out8(s, 0x0f);
577 if (opc & P_EXT38) {
578 tcg_out8(s, 0x38);
579 } else if (opc & P_EXT3A) {
580 tcg_out8(s, 0x3a);
583 tcg_out8(s, opc);
585 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
586 the 32-bit compilation paths. This method works with all versions of gcc,
587 whereas relying on optimization may not be able to exclude them. */
588 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
589 #endif
591 static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
593 tcg_out_opc(s, opc, r, rm, 0);
594 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
597 static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
598 int rm, int index)
600 int tmp;
602 /* Use the two byte form if possible, which cannot encode
603 VEX.W, VEX.B, VEX.X, or an m-mmmm field other than P_EXT. */
604 if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_REXW)) == P_EXT
605 && ((rm | index) & 8) == 0) {
606 /* Two byte VEX prefix. */
607 tcg_out8(s, 0xc5);
609 tmp = (r & 8 ? 0 : 0x80); /* VEX.R */
610 } else {
611 /* Three byte VEX prefix. */
612 tcg_out8(s, 0xc4);
614 /* VEX.m-mmmm */
615 if (opc & P_EXT3A) {
616 tmp = 3;
617 } else if (opc & P_EXT38) {
618 tmp = 2;
619 } else if (opc & P_EXT) {
620 tmp = 1;
621 } else {
622 g_assert_not_reached();
624 tmp |= (r & 8 ? 0 : 0x80); /* VEX.R */
625 tmp |= (index & 8 ? 0 : 0x40); /* VEX.X */
626 tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */
627 tcg_out8(s, tmp);
629 tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */
632 tmp |= (opc & P_VEXL ? 0x04 : 0); /* VEX.L */
633 /* VEX.pp */
634 if (opc & P_DATA16) {
635 tmp |= 1; /* 0x66 */
636 } else if (opc & P_SIMDF3) {
637 tmp |= 2; /* 0xf3 */
638 } else if (opc & P_SIMDF2) {
639 tmp |= 3; /* 0xf2 */
641 tmp |= (~v & 15) << 3; /* VEX.vvvv */
642 tcg_out8(s, tmp);
643 tcg_out8(s, opc);
646 static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm)
648 tcg_out_vex_opc(s, opc, r, v, rm, 0);
649 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
652 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
653 We handle either RM and INDEX missing with a negative value. In 64-bit
654 mode for absolute addresses, ~RM is the size of the immediate operand
655 that will follow the instruction. */
657 static void tcg_out_sib_offset(TCGContext *s, int r, int rm, int index,
658 int shift, intptr_t offset)
660 int mod, len;
662 if (index < 0 && rm < 0) {
663 if (TCG_TARGET_REG_BITS == 64) {
664 /* Try for a rip-relative addressing mode. This has replaced
665 the 32-bit-mode absolute addressing encoding. */
666 intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm;
667 intptr_t disp = offset - pc;
668 if (disp == (int32_t)disp) {
669 tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
670 tcg_out32(s, disp);
671 return;
674 /* Try for an absolute address encoding. This requires the
675 use of the MODRM+SIB encoding and is therefore larger than
676 rip-relative addressing. */
677 if (offset == (int32_t)offset) {
678 tcg_out8(s, (LOWREGMASK(r) << 3) | 4);
679 tcg_out8(s, (4 << 3) | 5);
680 tcg_out32(s, offset);
681 return;
684 /* ??? The memory isn't directly addressable. */
685 g_assert_not_reached();
686 } else {
687 /* Absolute address. */
688 tcg_out8(s, (r << 3) | 5);
689 tcg_out32(s, offset);
690 return;
694 /* Find the length of the immediate addend. Note that the encoding
695 that would be used for (%ebp) indicates absolute addressing. */
696 if (rm < 0) {
697 mod = 0, len = 4, rm = 5;
698 } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) {
699 mod = 0, len = 0;
700 } else if (offset == (int8_t)offset) {
701 mod = 0x40, len = 1;
702 } else {
703 mod = 0x80, len = 4;
706 /* Use a single byte MODRM format if possible. Note that the encoding
707 that would be used for %esp is the escape to the two byte form. */
708 if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) {
709 /* Single byte MODRM format. */
710 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
711 } else {
712 /* Two byte MODRM+SIB format. */
714 /* Note that the encoding that would place %esp into the index
715 field indicates no index register. In 64-bit mode, the REX.X
716 bit counts, so %r12 can be used as the index. */
717 if (index < 0) {
718 index = 4;
719 } else {
720 tcg_debug_assert(index != TCG_REG_ESP);
723 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4);
724 tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm));
727 if (len == 1) {
728 tcg_out8(s, offset);
729 } else if (len == 4) {
730 tcg_out32(s, offset);
734 static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
735 int index, int shift, intptr_t offset)
737 tcg_out_opc(s, opc, r, rm < 0 ? 0 : rm, index < 0 ? 0 : index);
738 tcg_out_sib_offset(s, r, rm, index, shift, offset);
741 static void tcg_out_vex_modrm_sib_offset(TCGContext *s, int opc, int r, int v,
742 int rm, int index, int shift,
743 intptr_t offset)
745 tcg_out_vex_opc(s, opc, r, v, rm < 0 ? 0 : rm, index < 0 ? 0 : index);
746 tcg_out_sib_offset(s, r, rm, index, shift, offset);
749 /* A simplification of the above with no index or shift. */
750 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
751 int rm, intptr_t offset)
753 tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
756 static inline void tcg_out_vex_modrm_offset(TCGContext *s, int opc, int r,
757 int v, int rm, intptr_t offset)
759 tcg_out_vex_modrm_sib_offset(s, opc, r, v, rm, -1, 0, offset);
762 /* Output an opcode with an expected reference to the constant pool. */
763 static inline void tcg_out_modrm_pool(TCGContext *s, int opc, int r)
765 tcg_out_opc(s, opc, r, 0, 0);
766 /* Absolute for 32-bit, pc-relative for 64-bit. */
767 tcg_out8(s, LOWREGMASK(r) << 3 | 5);
768 tcg_out32(s, 0);
771 /* Output an opcode with an expected reference to the constant pool. */
772 static inline void tcg_out_vex_modrm_pool(TCGContext *s, int opc, int r)
774 tcg_out_vex_opc(s, opc, r, 0, 0, 0);
775 /* Absolute for 32-bit, pc-relative for 64-bit. */
776 tcg_out8(s, LOWREGMASK(r) << 3 | 5);
777 tcg_out32(s, 0);
780 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
781 static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
783 /* Propagate an opcode prefix, such as P_REXW. */
784 int ext = subop & ~0x7;
785 subop &= 0x7;
787 tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src);
790 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
792 int rexw = 0;
794 if (arg == ret) {
795 return;
797 switch (type) {
798 case TCG_TYPE_I64:
799 rexw = P_REXW;
800 /* fallthru */
801 case TCG_TYPE_I32:
802 if (ret < 16) {
803 if (arg < 16) {
804 tcg_out_modrm(s, OPC_MOVL_GvEv + rexw, ret, arg);
805 } else {
806 tcg_out_vex_modrm(s, OPC_MOVD_EyVy + rexw, arg, 0, ret);
808 } else {
809 if (arg < 16) {
810 tcg_out_vex_modrm(s, OPC_MOVD_VyEy + rexw, ret, 0, arg);
811 } else {
812 tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg);
815 break;
817 case TCG_TYPE_V64:
818 tcg_debug_assert(ret >= 16 && arg >= 16);
819 tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg);
820 break;
821 case TCG_TYPE_V128:
822 tcg_debug_assert(ret >= 16 && arg >= 16);
823 tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx, ret, 0, arg);
824 break;
825 case TCG_TYPE_V256:
826 tcg_debug_assert(ret >= 16 && arg >= 16);
827 tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx | P_VEXL, ret, 0, arg);
828 break;
830 default:
831 g_assert_not_reached();
835 static void tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
836 TCGReg r, TCGReg a)
838 if (have_avx2) {
839 static const int dup_insn[4] = {
840 OPC_VPBROADCASTB, OPC_VPBROADCASTW,
841 OPC_VPBROADCASTD, OPC_VPBROADCASTQ,
843 int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
844 tcg_out_vex_modrm(s, dup_insn[vece] + vex_l, r, 0, a);
845 } else {
846 switch (vece) {
847 case MO_8:
848 /* ??? With zero in a register, use PSHUFB. */
849 tcg_out_vex_modrm(s, OPC_PUNPCKLBW, r, a, a);
850 a = r;
851 /* FALLTHRU */
852 case MO_16:
853 tcg_out_vex_modrm(s, OPC_PUNPCKLWD, r, a, a);
854 a = r;
855 /* FALLTHRU */
856 case MO_32:
857 tcg_out_vex_modrm(s, OPC_PSHUFD, r, 0, a);
858 /* imm8 operand: all output lanes selected from input lane 0. */
859 tcg_out8(s, 0);
860 break;
861 case MO_64:
862 tcg_out_vex_modrm(s, OPC_PUNPCKLQDQ, r, a, a);
863 break;
864 default:
865 g_assert_not_reached();
870 static void tcg_out_dupi_vec(TCGContext *s, TCGType type,
871 TCGReg ret, tcg_target_long arg)
873 int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
875 if (arg == 0) {
876 tcg_out_vex_modrm(s, OPC_PXOR, ret, ret, ret);
877 return;
879 if (arg == -1) {
880 tcg_out_vex_modrm(s, OPC_PCMPEQB + vex_l, ret, ret, ret);
881 return;
884 if (TCG_TARGET_REG_BITS == 64) {
885 if (type == TCG_TYPE_V64) {
886 tcg_out_vex_modrm_pool(s, OPC_MOVQ_VqWq, ret);
887 } else if (have_avx2) {
888 tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTQ + vex_l, ret);
889 } else {
890 tcg_out_vex_modrm_pool(s, OPC_MOVDDUP, ret);
892 new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4);
893 } else if (have_avx2) {
894 tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTD + vex_l, ret);
895 new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
896 } else {
897 tcg_out_vex_modrm_pool(s, OPC_MOVD_VyEy, ret);
898 new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
899 tcg_out_dup_vec(s, type, MO_32, ret, ret);
903 static void tcg_out_movi(TCGContext *s, TCGType type,
904 TCGReg ret, tcg_target_long arg)
906 tcg_target_long diff;
908 switch (type) {
909 case TCG_TYPE_I32:
910 #if TCG_TARGET_REG_BITS == 64
911 case TCG_TYPE_I64:
912 #endif
913 if (ret < 16) {
914 break;
916 /* fallthru */
917 case TCG_TYPE_V64:
918 case TCG_TYPE_V128:
919 case TCG_TYPE_V256:
920 tcg_debug_assert(ret >= 16);
921 tcg_out_dupi_vec(s, type, ret, arg);
922 return;
923 default:
924 g_assert_not_reached();
927 if (arg == 0) {
928 tgen_arithr(s, ARITH_XOR, ret, ret);
929 return;
931 if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
932 tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0);
933 tcg_out32(s, arg);
934 return;
936 if (arg == (int32_t)arg) {
937 tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret);
938 tcg_out32(s, arg);
939 return;
942 /* Try a 7 byte pc-relative lea before the 10 byte movq. */
943 diff = arg - ((uintptr_t)s->code_ptr + 7);
944 if (diff == (int32_t)diff) {
945 tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0);
946 tcg_out8(s, (LOWREGMASK(ret) << 3) | 5);
947 tcg_out32(s, diff);
948 return;
951 tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
952 tcg_out64(s, arg);
955 static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
957 if (val == (int8_t)val) {
958 tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0);
959 tcg_out8(s, val);
960 } else if (val == (int32_t)val) {
961 tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0);
962 tcg_out32(s, val);
963 } else {
964 tcg_abort();
968 static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
970 /* Given the strength of x86 memory ordering, we only need care for
971 store-load ordering. Experimentally, "lock orl $0,0(%esp)" is
972 faster than "mfence", so don't bother with the sse insn. */
973 if (a0 & TCG_MO_ST_LD) {
974 tcg_out8(s, 0xf0);
975 tcg_out_modrm_offset(s, OPC_ARITH_EvIb, ARITH_OR, TCG_REG_ESP, 0);
976 tcg_out8(s, 0);
980 static inline void tcg_out_push(TCGContext *s, int reg)
982 tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0);
985 static inline void tcg_out_pop(TCGContext *s, int reg)
987 tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0);
990 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
991 TCGReg arg1, intptr_t arg2)
993 switch (type) {
994 case TCG_TYPE_I32:
995 if (ret < 16) {
996 tcg_out_modrm_offset(s, OPC_MOVL_GvEv, ret, arg1, arg2);
997 } else {
998 tcg_out_vex_modrm_offset(s, OPC_MOVD_VyEy, ret, 0, arg1, arg2);
1000 break;
1001 case TCG_TYPE_I64:
1002 if (ret < 16) {
1003 tcg_out_modrm_offset(s, OPC_MOVL_GvEv | P_REXW, ret, arg1, arg2);
1004 break;
1006 /* FALLTHRU */
1007 case TCG_TYPE_V64:
1008 tcg_debug_assert(ret >= 16);
1009 tcg_out_vex_modrm_offset(s, OPC_MOVQ_VqWq, ret, 0, arg1, arg2);
1010 break;
1011 case TCG_TYPE_V128:
1012 tcg_debug_assert(ret >= 16);
1013 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_VxWx, ret, 0, arg1, arg2);
1014 break;
1015 case TCG_TYPE_V256:
1016 tcg_debug_assert(ret >= 16);
1017 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_VxWx | P_VEXL,
1018 ret, 0, arg1, arg2);
1019 break;
1020 default:
1021 g_assert_not_reached();
1025 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1026 TCGReg arg1, intptr_t arg2)
1028 switch (type) {
1029 case TCG_TYPE_I32:
1030 if (arg < 16) {
1031 tcg_out_modrm_offset(s, OPC_MOVL_EvGv, arg, arg1, arg2);
1032 } else {
1033 tcg_out_vex_modrm_offset(s, OPC_MOVD_EyVy, arg, 0, arg1, arg2);
1035 break;
1036 case TCG_TYPE_I64:
1037 if (arg < 16) {
1038 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_REXW, arg, arg1, arg2);
1039 break;
1041 /* FALLTHRU */
1042 case TCG_TYPE_V64:
1043 tcg_debug_assert(arg >= 16);
1044 tcg_out_vex_modrm_offset(s, OPC_MOVQ_WqVq, arg, 0, arg1, arg2);
1045 break;
1046 case TCG_TYPE_V128:
1047 tcg_debug_assert(arg >= 16);
1048 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_WxVx, arg, 0, arg1, arg2);
1049 break;
1050 case TCG_TYPE_V256:
1051 tcg_debug_assert(arg >= 16);
1052 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_WxVx | P_VEXL,
1053 arg, 0, arg1, arg2);
1054 break;
1055 default:
1056 g_assert_not_reached();
1060 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1061 TCGReg base, intptr_t ofs)
1063 int rexw = 0;
1064 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) {
1065 if (val != (int32_t)val) {
1066 return false;
1068 rexw = P_REXW;
1069 } else if (type != TCG_TYPE_I32) {
1070 return false;
1072 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | rexw, 0, base, ofs);
1073 tcg_out32(s, val);
1074 return true;
1077 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
1079 /* Propagate an opcode prefix, such as P_DATA16. */
1080 int ext = subopc & ~0x7;
1081 subopc &= 0x7;
1083 if (count == 1) {
1084 tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg);
1085 } else {
1086 tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg);
1087 tcg_out8(s, count);
1091 static inline void tcg_out_bswap32(TCGContext *s, int reg)
1093 tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0);
1096 static inline void tcg_out_rolw_8(TCGContext *s, int reg)
1098 tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8);
1101 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
1103 /* movzbl */
1104 tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
1105 tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
1108 static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw)
1110 /* movsbl */
1111 tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
1112 tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
1115 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
1117 /* movzwl */
1118 tcg_out_modrm(s, OPC_MOVZWL, dest, src);
1121 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw)
1123 /* movsw[lq] */
1124 tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
1127 static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
1129 /* 32-bit mov zero extends. */
1130 tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
1133 static inline void tcg_out_ext32s(TCGContext *s, int dest, int src)
1135 tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
1138 static inline void tcg_out_bswap64(TCGContext *s, int reg)
1140 tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
1143 static void tgen_arithi(TCGContext *s, int c, int r0,
1144 tcg_target_long val, int cf)
1146 int rexw = 0;
1148 if (TCG_TARGET_REG_BITS == 64) {
1149 rexw = c & -8;
1150 c &= 7;
1153 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
1154 partial flags update stalls on Pentium4 and are not recommended
1155 by current Intel optimization manuals. */
1156 if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
1157 int is_inc = (c == ARITH_ADD) ^ (val < 0);
1158 if (TCG_TARGET_REG_BITS == 64) {
1159 /* The single-byte increment encodings are re-tasked as the
1160 REX prefixes. Use the MODRM encoding. */
1161 tcg_out_modrm(s, OPC_GRP5 + rexw,
1162 (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
1163 } else {
1164 tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
1166 return;
1169 if (c == ARITH_AND) {
1170 if (TCG_TARGET_REG_BITS == 64) {
1171 if (val == 0xffffffffu) {
1172 tcg_out_ext32u(s, r0, r0);
1173 return;
1175 if (val == (uint32_t)val) {
1176 /* AND with no high bits set can use a 32-bit operation. */
1177 rexw = 0;
1180 if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) {
1181 tcg_out_ext8u(s, r0, r0);
1182 return;
1184 if (val == 0xffffu) {
1185 tcg_out_ext16u(s, r0, r0);
1186 return;
1190 if (val == (int8_t)val) {
1191 tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0);
1192 tcg_out8(s, val);
1193 return;
1195 if (rexw == 0 || val == (int32_t)val) {
1196 tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0);
1197 tcg_out32(s, val);
1198 return;
1201 tcg_abort();
1204 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
1206 if (val != 0) {
1207 tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0);
1211 /* Use SMALL != 0 to force a short forward branch. */
1212 static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, int small)
1214 int32_t val, val1;
1216 if (l->has_value) {
1217 val = tcg_pcrel_diff(s, l->u.value_ptr);
1218 val1 = val - 2;
1219 if ((int8_t)val1 == val1) {
1220 if (opc == -1) {
1221 tcg_out8(s, OPC_JMP_short);
1222 } else {
1223 tcg_out8(s, OPC_JCC_short + opc);
1225 tcg_out8(s, val1);
1226 } else {
1227 if (small) {
1228 tcg_abort();
1230 if (opc == -1) {
1231 tcg_out8(s, OPC_JMP_long);
1232 tcg_out32(s, val - 5);
1233 } else {
1234 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
1235 tcg_out32(s, val - 6);
1238 } else if (small) {
1239 if (opc == -1) {
1240 tcg_out8(s, OPC_JMP_short);
1241 } else {
1242 tcg_out8(s, OPC_JCC_short + opc);
1244 tcg_out_reloc(s, s->code_ptr, R_386_PC8, l, -1);
1245 s->code_ptr += 1;
1246 } else {
1247 if (opc == -1) {
1248 tcg_out8(s, OPC_JMP_long);
1249 } else {
1250 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
1252 tcg_out_reloc(s, s->code_ptr, R_386_PC32, l, -4);
1253 s->code_ptr += 4;
1257 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
1258 int const_arg2, int rexw)
1260 if (const_arg2) {
1261 if (arg2 == 0) {
1262 /* test r, r */
1263 tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
1264 } else {
1265 tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
1267 } else {
1268 tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
1272 static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
1273 TCGArg arg1, TCGArg arg2, int const_arg2,
1274 TCGLabel *label, int small)
1276 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
1277 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
1280 #if TCG_TARGET_REG_BITS == 64
1281 static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
1282 TCGArg arg1, TCGArg arg2, int const_arg2,
1283 TCGLabel *label, int small)
1285 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
1286 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
1288 #else
1289 /* XXX: we implement it at the target level to avoid having to
1290 handle cross basic blocks temporaries */
1291 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
1292 const int *const_args, int small)
1294 TCGLabel *label_next = gen_new_label();
1295 TCGLabel *label_this = arg_label(args[5]);
1297 switch(args[4]) {
1298 case TCG_COND_EQ:
1299 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
1300 label_next, 1);
1301 tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
1302 label_this, small);
1303 break;
1304 case TCG_COND_NE:
1305 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
1306 label_this, small);
1307 tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
1308 label_this, small);
1309 break;
1310 case TCG_COND_LT:
1311 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
1312 label_this, small);
1313 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1314 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
1315 label_this, small);
1316 break;
1317 case TCG_COND_LE:
1318 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
1319 label_this, small);
1320 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1321 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
1322 label_this, small);
1323 break;
1324 case TCG_COND_GT:
1325 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
1326 label_this, small);
1327 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1328 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
1329 label_this, small);
1330 break;
1331 case TCG_COND_GE:
1332 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
1333 label_this, small);
1334 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1335 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
1336 label_this, small);
1337 break;
1338 case TCG_COND_LTU:
1339 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
1340 label_this, small);
1341 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1342 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
1343 label_this, small);
1344 break;
1345 case TCG_COND_LEU:
1346 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
1347 label_this, small);
1348 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1349 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
1350 label_this, small);
1351 break;
1352 case TCG_COND_GTU:
1353 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
1354 label_this, small);
1355 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1356 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
1357 label_this, small);
1358 break;
1359 case TCG_COND_GEU:
1360 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
1361 label_this, small);
1362 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1363 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
1364 label_this, small);
1365 break;
1366 default:
1367 tcg_abort();
1369 tcg_out_label(s, label_next, s->code_ptr);
1371 #endif
1373 static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest,
1374 TCGArg arg1, TCGArg arg2, int const_arg2)
1376 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
1377 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1378 tcg_out_ext8u(s, dest, dest);
1381 #if TCG_TARGET_REG_BITS == 64
1382 static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest,
1383 TCGArg arg1, TCGArg arg2, int const_arg2)
1385 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
1386 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1387 tcg_out_ext8u(s, dest, dest);
1389 #else
1390 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1391 const int *const_args)
1393 TCGArg new_args[6];
1394 TCGLabel *label_true, *label_over;
1396 memcpy(new_args, args+1, 5*sizeof(TCGArg));
1398 if (args[0] == args[1] || args[0] == args[2]
1399 || (!const_args[3] && args[0] == args[3])
1400 || (!const_args[4] && args[0] == args[4])) {
1401 /* When the destination overlaps with one of the argument
1402 registers, don't do anything tricky. */
1403 label_true = gen_new_label();
1404 label_over = gen_new_label();
1406 new_args[5] = label_arg(label_true);
1407 tcg_out_brcond2(s, new_args, const_args+1, 1);
1409 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1410 tcg_out_jxx(s, JCC_JMP, label_over, 1);
1411 tcg_out_label(s, label_true, s->code_ptr);
1413 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
1414 tcg_out_label(s, label_over, s->code_ptr);
1415 } else {
1416 /* When the destination does not overlap one of the arguments,
1417 clear the destination first, jump if cond false, and emit an
1418 increment in the true case. This results in smaller code. */
1420 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1422 label_over = gen_new_label();
1423 new_args[4] = tcg_invert_cond(new_args[4]);
1424 new_args[5] = label_arg(label_over);
1425 tcg_out_brcond2(s, new_args, const_args+1, 1);
1427 tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
1428 tcg_out_label(s, label_over, s->code_ptr);
1431 #endif
1433 static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw,
1434 TCGReg dest, TCGReg v1)
1436 if (have_cmov) {
1437 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | rexw, dest, v1);
1438 } else {
1439 TCGLabel *over = gen_new_label();
1440 tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
1441 tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
1442 tcg_out_label(s, over, s->code_ptr);
1446 static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGReg dest,
1447 TCGReg c1, TCGArg c2, int const_c2,
1448 TCGReg v1)
1450 tcg_out_cmp(s, c1, c2, const_c2, 0);
1451 tcg_out_cmov(s, cond, 0, dest, v1);
1454 #if TCG_TARGET_REG_BITS == 64
1455 static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGReg dest,
1456 TCGReg c1, TCGArg c2, int const_c2,
1457 TCGReg v1)
1459 tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
1460 tcg_out_cmov(s, cond, P_REXW, dest, v1);
1462 #endif
1464 static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
1465 TCGArg arg2, bool const_a2)
1467 if (have_bmi1) {
1468 tcg_out_modrm(s, OPC_TZCNT + rexw, dest, arg1);
1469 if (const_a2) {
1470 tcg_debug_assert(arg2 == (rexw ? 64 : 32));
1471 } else {
1472 tcg_debug_assert(dest != arg2);
1473 tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
1475 } else {
1476 tcg_debug_assert(dest != arg2);
1477 tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1);
1478 tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
1482 static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
1483 TCGArg arg2, bool const_a2)
1485 if (have_lzcnt) {
1486 tcg_out_modrm(s, OPC_LZCNT + rexw, dest, arg1);
1487 if (const_a2) {
1488 tcg_debug_assert(arg2 == (rexw ? 64 : 32));
1489 } else {
1490 tcg_debug_assert(dest != arg2);
1491 tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
1493 } else {
1494 tcg_debug_assert(!const_a2);
1495 tcg_debug_assert(dest != arg1);
1496 tcg_debug_assert(dest != arg2);
1498 /* Recall that the output of BSR is the index not the count. */
1499 tcg_out_modrm(s, OPC_BSR + rexw, dest, arg1);
1500 tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0);
1502 /* Since we have destroyed the flags from BSR, we have to re-test. */
1503 tcg_out_cmp(s, arg1, 0, 1, rexw);
1504 tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
1508 static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest)
1510 intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
1512 if (disp == (int32_t)disp) {
1513 tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
1514 tcg_out32(s, disp);
1515 } else {
1516 /* rip-relative addressing into the constant pool.
1517 This is 6 + 8 = 14 bytes, as compared to using an
1518 an immediate load 10 + 6 = 16 bytes, plus we may
1519 be able to re-use the pool constant for more calls. */
1520 tcg_out_opc(s, OPC_GRP5, 0, 0, 0);
1521 tcg_out8(s, (call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev) << 3 | 5);
1522 new_pool_label(s, (uintptr_t)dest, R_386_PC32, s->code_ptr, -4);
1523 tcg_out32(s, 0);
1527 static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
1529 tcg_out_branch(s, 1, dest);
1532 static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest)
1534 tcg_out_branch(s, 0, dest);
1537 static void tcg_out_nopn(TCGContext *s, int n)
1539 int i;
1540 /* Emit 1 or 2 operand size prefixes for the standard one byte nop,
1541 * "xchg %eax,%eax", forming "xchg %ax,%ax". All cores accept the
1542 * duplicate prefix, and all of the interesting recent cores can
1543 * decode and discard the duplicates in a single cycle.
1545 tcg_debug_assert(n >= 1);
1546 for (i = 1; i < n; ++i) {
1547 tcg_out8(s, 0x66);
1549 tcg_out8(s, 0x90);
1552 #if defined(CONFIG_SOFTMMU)
1553 #include "tcg-ldst.inc.c"
1555 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1556 * int mmu_idx, uintptr_t ra)
1558 static void * const qemu_ld_helpers[16] = {
1559 [MO_UB] = helper_ret_ldub_mmu,
1560 [MO_LEUW] = helper_le_lduw_mmu,
1561 [MO_LEUL] = helper_le_ldul_mmu,
1562 [MO_LEQ] = helper_le_ldq_mmu,
1563 [MO_BEUW] = helper_be_lduw_mmu,
1564 [MO_BEUL] = helper_be_ldul_mmu,
1565 [MO_BEQ] = helper_be_ldq_mmu,
1568 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1569 * uintxx_t val, int mmu_idx, uintptr_t ra)
1571 static void * const qemu_st_helpers[16] = {
1572 [MO_UB] = helper_ret_stb_mmu,
1573 [MO_LEUW] = helper_le_stw_mmu,
1574 [MO_LEUL] = helper_le_stl_mmu,
1575 [MO_LEQ] = helper_le_stq_mmu,
1576 [MO_BEUW] = helper_be_stw_mmu,
1577 [MO_BEUL] = helper_be_stl_mmu,
1578 [MO_BEQ] = helper_be_stq_mmu,
1581 /* Perform the TLB load and compare.
1583 Inputs:
1584 ADDRLO and ADDRHI contain the low and high part of the address.
1586 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1588 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1589 This should be offsetof addr_read or addr_write.
1591 Outputs:
1592 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1593 positions of the displacements of forward jumps to the TLB miss case.
1595 Second argument register is loaded with the low part of the address.
1596 In the TLB hit case, it has been adjusted as indicated by the TLB
1597 and so is a host address. In the TLB miss case, it continues to
1598 hold a guest address.
1600 First argument register is clobbered. */
1602 static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1603 int mem_index, TCGMemOp opc,
1604 tcg_insn_unit **label_ptr, int which)
1606 const TCGReg r0 = TCG_REG_L0;
1607 const TCGReg r1 = TCG_REG_L1;
1608 TCGType ttype = TCG_TYPE_I32;
1609 TCGType tlbtype = TCG_TYPE_I32;
1610 int trexw = 0, hrexw = 0, tlbrexw = 0;
1611 unsigned a_bits = get_alignment_bits(opc);
1612 unsigned s_bits = opc & MO_SIZE;
1613 unsigned a_mask = (1 << a_bits) - 1;
1614 unsigned s_mask = (1 << s_bits) - 1;
1615 target_ulong tlb_mask;
1617 if (TCG_TARGET_REG_BITS == 64) {
1618 if (TARGET_LONG_BITS == 64) {
1619 ttype = TCG_TYPE_I64;
1620 trexw = P_REXW;
1622 if (TCG_TYPE_PTR == TCG_TYPE_I64) {
1623 hrexw = P_REXW;
1624 if (TARGET_PAGE_BITS + CPU_TLB_BITS > 32) {
1625 tlbtype = TCG_TYPE_I64;
1626 tlbrexw = P_REXW;
1631 tcg_out_mov(s, tlbtype, r0, addrlo);
1632 /* If the required alignment is at least as large as the access, simply
1633 copy the address and mask. For lesser alignments, check that we don't
1634 cross pages for the complete access. */
1635 if (a_bits >= s_bits) {
1636 tcg_out_mov(s, ttype, r1, addrlo);
1637 } else {
1638 tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask);
1640 tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask;
1642 tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
1643 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1645 tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0);
1646 tgen_arithi(s, ARITH_AND + tlbrexw, r0,
1647 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
1649 tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0,
1650 offsetof(CPUArchState, tlb_table[mem_index][0])
1651 + which);
1653 /* cmp 0(r0), r1 */
1654 tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0);
1656 /* Prepare for both the fast path add of the tlb addend, and the slow
1657 path function argument setup. */
1658 tcg_out_mov(s, ttype, r1, addrlo);
1660 /* jne slow_path */
1661 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1662 label_ptr[0] = s->code_ptr;
1663 s->code_ptr += 4;
1665 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1666 /* cmp 4(r0), addrhi */
1667 tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, 4);
1669 /* jne slow_path */
1670 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1671 label_ptr[1] = s->code_ptr;
1672 s->code_ptr += 4;
1675 /* TLB Hit. */
1677 /* add addend(r0), r1 */
1678 tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
1679 offsetof(CPUTLBEntry, addend) - which);
1683 * Record the context of a call to the out of line helper code for the slow path
1684 * for a load or store, so that we can later generate the correct helper code
1686 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
1687 TCGMemOpIdx oi,
1688 TCGReg datalo, TCGReg datahi,
1689 TCGReg addrlo, TCGReg addrhi,
1690 tcg_insn_unit *raddr,
1691 tcg_insn_unit **label_ptr)
1693 TCGLabelQemuLdst *label = new_ldst_label(s);
1695 label->is_ld = is_ld;
1696 label->oi = oi;
1697 label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
1698 label->datalo_reg = datalo;
1699 label->datahi_reg = datahi;
1700 label->addrlo_reg = addrlo;
1701 label->addrhi_reg = addrhi;
1702 label->raddr = raddr;
1703 label->label_ptr[0] = label_ptr[0];
1704 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1705 label->label_ptr[1] = label_ptr[1];
1710 * Generate code for the slow path for a load at the end of block
1712 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1714 TCGMemOpIdx oi = l->oi;
1715 TCGMemOp opc = get_memop(oi);
1716 TCGReg data_reg;
1717 tcg_insn_unit **label_ptr = &l->label_ptr[0];
1718 int rexw = (l->type == TCG_TYPE_I64 ? P_REXW : 0);
1720 /* resolve label address */
1721 tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1722 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1723 tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1726 if (TCG_TARGET_REG_BITS == 32) {
1727 int ofs = 0;
1729 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1730 ofs += 4;
1732 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1733 ofs += 4;
1735 if (TARGET_LONG_BITS == 64) {
1736 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1737 ofs += 4;
1740 tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs);
1741 ofs += 4;
1743 tcg_out_sti(s, TCG_TYPE_PTR, (uintptr_t)l->raddr, TCG_REG_ESP, ofs);
1744 } else {
1745 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1746 /* The second argument is already loaded with addrlo. */
1747 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi);
1748 tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
1749 (uintptr_t)l->raddr);
1752 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1754 data_reg = l->datalo_reg;
1755 switch (opc & MO_SSIZE) {
1756 case MO_SB:
1757 tcg_out_ext8s(s, data_reg, TCG_REG_EAX, rexw);
1758 break;
1759 case MO_SW:
1760 tcg_out_ext16s(s, data_reg, TCG_REG_EAX, rexw);
1761 break;
1762 #if TCG_TARGET_REG_BITS == 64
1763 case MO_SL:
1764 tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
1765 break;
1766 #endif
1767 case MO_UB:
1768 case MO_UW:
1769 /* Note that the helpers have zero-extended to tcg_target_long. */
1770 case MO_UL:
1771 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1772 break;
1773 case MO_Q:
1774 if (TCG_TARGET_REG_BITS == 64) {
1775 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
1776 } else if (data_reg == TCG_REG_EDX) {
1777 /* xchg %edx, %eax */
1778 tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
1779 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
1780 } else {
1781 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1782 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
1784 break;
1785 default:
1786 tcg_abort();
1789 /* Jump to the code corresponding to next IR of qemu_st */
1790 tcg_out_jmp(s, l->raddr);
1794 * Generate code for the slow path for a store at the end of block
1796 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1798 TCGMemOpIdx oi = l->oi;
1799 TCGMemOp opc = get_memop(oi);
1800 TCGMemOp s_bits = opc & MO_SIZE;
1801 tcg_insn_unit **label_ptr = &l->label_ptr[0];
1802 TCGReg retaddr;
1804 /* resolve label address */
1805 tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1806 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1807 tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1810 if (TCG_TARGET_REG_BITS == 32) {
1811 int ofs = 0;
1813 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1814 ofs += 4;
1816 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1817 ofs += 4;
1819 if (TARGET_LONG_BITS == 64) {
1820 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1821 ofs += 4;
1824 tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
1825 ofs += 4;
1827 if (s_bits == MO_64) {
1828 tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
1829 ofs += 4;
1832 tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs);
1833 ofs += 4;
1835 retaddr = TCG_REG_EAX;
1836 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1837 tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, ofs);
1838 } else {
1839 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1840 /* The second argument is already loaded with addrlo. */
1841 tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1842 tcg_target_call_iarg_regs[2], l->datalo_reg);
1843 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], oi);
1845 if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
1846 retaddr = tcg_target_call_iarg_regs[4];
1847 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1848 } else {
1849 retaddr = TCG_REG_RAX;
1850 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1851 tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP,
1852 TCG_TARGET_CALL_STACK_OFFSET);
1856 /* "Tail call" to the helper, with the return address back inline. */
1857 tcg_out_push(s, retaddr);
1858 tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1860 #elif defined(__x86_64__) && defined(__linux__)
1861 # include <asm/prctl.h>
1862 # include <sys/prctl.h>
1864 int arch_prctl(int code, unsigned long addr);
1866 static int guest_base_flags;
1867 static inline void setup_guest_base_seg(void)
1869 if (arch_prctl(ARCH_SET_GS, guest_base) == 0) {
1870 guest_base_flags = P_GS;
1873 #else
1874 # define guest_base_flags 0
1875 static inline void setup_guest_base_seg(void) { }
1876 #endif /* SOFTMMU */
1878 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1879 TCGReg base, int index, intptr_t ofs,
1880 int seg, bool is64, TCGMemOp memop)
1882 const TCGMemOp real_bswap = memop & MO_BSWAP;
1883 TCGMemOp bswap = real_bswap;
1884 int rexw = is64 * P_REXW;
1885 int movop = OPC_MOVL_GvEv;
1887 if (have_movbe && real_bswap) {
1888 bswap = 0;
1889 movop = OPC_MOVBE_GyMy;
1892 switch (memop & MO_SSIZE) {
1893 case MO_UB:
1894 tcg_out_modrm_sib_offset(s, OPC_MOVZBL + seg, datalo,
1895 base, index, 0, ofs);
1896 break;
1897 case MO_SB:
1898 tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + seg, datalo,
1899 base, index, 0, ofs);
1900 break;
1901 case MO_UW:
1902 tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
1903 base, index, 0, ofs);
1904 if (real_bswap) {
1905 tcg_out_rolw_8(s, datalo);
1907 break;
1908 case MO_SW:
1909 if (real_bswap) {
1910 if (have_movbe) {
1911 tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
1912 datalo, base, index, 0, ofs);
1913 } else {
1914 tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
1915 base, index, 0, ofs);
1916 tcg_out_rolw_8(s, datalo);
1918 tcg_out_modrm(s, OPC_MOVSWL + rexw, datalo, datalo);
1919 } else {
1920 tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + seg,
1921 datalo, base, index, 0, ofs);
1923 break;
1924 case MO_UL:
1925 tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
1926 if (bswap) {
1927 tcg_out_bswap32(s, datalo);
1929 break;
1930 #if TCG_TARGET_REG_BITS == 64
1931 case MO_SL:
1932 if (real_bswap) {
1933 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
1934 base, index, 0, ofs);
1935 if (bswap) {
1936 tcg_out_bswap32(s, datalo);
1938 tcg_out_ext32s(s, datalo, datalo);
1939 } else {
1940 tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo,
1941 base, index, 0, ofs);
1943 break;
1944 #endif
1945 case MO_Q:
1946 if (TCG_TARGET_REG_BITS == 64) {
1947 tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
1948 base, index, 0, ofs);
1949 if (bswap) {
1950 tcg_out_bswap64(s, datalo);
1952 } else {
1953 if (real_bswap) {
1954 int t = datalo;
1955 datalo = datahi;
1956 datahi = t;
1958 if (base != datalo) {
1959 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
1960 base, index, 0, ofs);
1961 tcg_out_modrm_sib_offset(s, movop + seg, datahi,
1962 base, index, 0, ofs + 4);
1963 } else {
1964 tcg_out_modrm_sib_offset(s, movop + seg, datahi,
1965 base, index, 0, ofs + 4);
1966 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
1967 base, index, 0, ofs);
1969 if (bswap) {
1970 tcg_out_bswap32(s, datalo);
1971 tcg_out_bswap32(s, datahi);
1974 break;
1975 default:
1976 tcg_abort();
1980 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
1981 EAX. It will be useful once fixed registers globals are less
1982 common. */
1983 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1985 TCGReg datalo, datahi, addrlo;
1986 TCGReg addrhi __attribute__((unused));
1987 TCGMemOpIdx oi;
1988 TCGMemOp opc;
1989 #if defined(CONFIG_SOFTMMU)
1990 int mem_index;
1991 tcg_insn_unit *label_ptr[2];
1992 #endif
1994 datalo = *args++;
1995 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
1996 addrlo = *args++;
1997 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
1998 oi = *args++;
1999 opc = get_memop(oi);
2001 #if defined(CONFIG_SOFTMMU)
2002 mem_index = get_mmuidx(oi);
2004 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
2005 label_ptr, offsetof(CPUTLBEntry, addr_read));
2007 /* TLB Hit. */
2008 tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc);
2010 /* Record the current context of a load into ldst label */
2011 add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi,
2012 s->code_ptr, label_ptr);
2013 #else
2015 int32_t offset = guest_base;
2016 int index = -1;
2017 int seg = 0;
2020 * Recall we store 32-bit values zero-extended. No need for
2021 * further manual extension or an addr32 (0x67) prefix.
2023 if (guest_base == 0 || guest_base_flags) {
2024 seg = guest_base_flags;
2025 offset = 0;
2026 } else if (TCG_TARGET_REG_BITS == 64 && offset != guest_base) {
2027 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base);
2028 index = TCG_REG_L1;
2029 offset = 0;
2032 tcg_out_qemu_ld_direct(s, datalo, datahi,
2033 addrlo, index, offset, seg, is64, opc);
2035 #endif
2038 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
2039 TCGReg base, int index, intptr_t ofs,
2040 int seg, TCGMemOp memop)
2042 /* ??? Ideally we wouldn't need a scratch register. For user-only,
2043 we could perform the bswap twice to restore the original value
2044 instead of moving to the scratch. But as it is, the L constraint
2045 means that TCG_REG_L0 is definitely free here. */
2046 const TCGReg scratch = TCG_REG_L0;
2047 const TCGMemOp real_bswap = memop & MO_BSWAP;
2048 TCGMemOp bswap = real_bswap;
2049 int movop = OPC_MOVL_EvGv;
2051 if (have_movbe && real_bswap) {
2052 bswap = 0;
2053 movop = OPC_MOVBE_MyGy;
2056 switch (memop & MO_SIZE) {
2057 case MO_8:
2058 /* In 32-bit mode, 8-bit stores can only happen from [abcd]x.
2059 Use the scratch register if necessary. */
2060 if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) {
2061 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
2062 datalo = scratch;
2064 tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
2065 datalo, base, index, 0, ofs);
2066 break;
2067 case MO_16:
2068 if (bswap) {
2069 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
2070 tcg_out_rolw_8(s, scratch);
2071 datalo = scratch;
2073 tcg_out_modrm_sib_offset(s, movop + P_DATA16 + seg, datalo,
2074 base, index, 0, ofs);
2075 break;
2076 case MO_32:
2077 if (bswap) {
2078 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
2079 tcg_out_bswap32(s, scratch);
2080 datalo = scratch;
2082 tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
2083 break;
2084 case MO_64:
2085 if (TCG_TARGET_REG_BITS == 64) {
2086 if (bswap) {
2087 tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo);
2088 tcg_out_bswap64(s, scratch);
2089 datalo = scratch;
2091 tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
2092 base, index, 0, ofs);
2093 } else if (bswap) {
2094 tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi);
2095 tcg_out_bswap32(s, scratch);
2096 tcg_out_modrm_sib_offset(s, OPC_MOVL_EvGv + seg, scratch,
2097 base, index, 0, ofs);
2098 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
2099 tcg_out_bswap32(s, scratch);
2100 tcg_out_modrm_sib_offset(s, OPC_MOVL_EvGv + seg, scratch,
2101 base, index, 0, ofs + 4);
2102 } else {
2103 if (real_bswap) {
2104 int t = datalo;
2105 datalo = datahi;
2106 datahi = t;
2108 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
2109 base, index, 0, ofs);
2110 tcg_out_modrm_sib_offset(s, movop + seg, datahi,
2111 base, index, 0, ofs + 4);
2113 break;
2114 default:
2115 tcg_abort();
2119 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
2121 TCGReg datalo, datahi, addrlo;
2122 TCGReg addrhi __attribute__((unused));
2123 TCGMemOpIdx oi;
2124 TCGMemOp opc;
2125 #if defined(CONFIG_SOFTMMU)
2126 int mem_index;
2127 tcg_insn_unit *label_ptr[2];
2128 #endif
2130 datalo = *args++;
2131 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
2132 addrlo = *args++;
2133 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
2134 oi = *args++;
2135 opc = get_memop(oi);
2137 #if defined(CONFIG_SOFTMMU)
2138 mem_index = get_mmuidx(oi);
2140 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
2141 label_ptr, offsetof(CPUTLBEntry, addr_write));
2143 /* TLB Hit. */
2144 tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
2146 /* Record the current context of a store into ldst label */
2147 add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi,
2148 s->code_ptr, label_ptr);
2149 #else
2151 int32_t offset = guest_base;
2152 int index = -1;
2153 int seg = 0;
2156 * Recall we store 32-bit values zero-extended. No need for
2157 * further manual extension or an addr32 (0x67) prefix.
2159 if (guest_base == 0 || guest_base_flags) {
2160 seg = guest_base_flags;
2161 offset = 0;
2162 } else if (TCG_TARGET_REG_BITS == 64 && offset != guest_base) {
2163 /* ??? Note that we require L0 free for bswap. */
2164 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base);
2165 index = TCG_REG_L1;
2166 offset = 0;
2169 tcg_out_qemu_st_direct(s, datalo, datahi,
2170 addrlo, index, offset, seg, opc);
2172 #endif
2175 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2176 const TCGArg *args, const int *const_args)
2178 TCGArg a0, a1, a2;
2179 int c, const_a2, vexop, rexw = 0;
2181 #if TCG_TARGET_REG_BITS == 64
2182 # define OP_32_64(x) \
2183 case glue(glue(INDEX_op_, x), _i64): \
2184 rexw = P_REXW; /* FALLTHRU */ \
2185 case glue(glue(INDEX_op_, x), _i32)
2186 #else
2187 # define OP_32_64(x) \
2188 case glue(glue(INDEX_op_, x), _i32)
2189 #endif
2191 /* Hoist the loads of the most common arguments. */
2192 a0 = args[0];
2193 a1 = args[1];
2194 a2 = args[2];
2195 const_a2 = const_args[2];
2197 switch (opc) {
2198 case INDEX_op_exit_tb:
2199 /* Reuse the zeroing that exists for goto_ptr. */
2200 if (a0 == 0) {
2201 tcg_out_jmp(s, s->code_gen_epilogue);
2202 } else {
2203 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
2204 tcg_out_jmp(s, tb_ret_addr);
2206 break;
2207 case INDEX_op_goto_tb:
2208 if (s->tb_jmp_insn_offset) {
2209 /* direct jump method */
2210 int gap;
2211 /* jump displacement must be aligned for atomic patching;
2212 * see if we need to add extra nops before jump
2214 gap = tcg_pcrel_diff(s, QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4));
2215 if (gap != 1) {
2216 tcg_out_nopn(s, gap - 1);
2218 tcg_out8(s, OPC_JMP_long); /* jmp im */
2219 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
2220 tcg_out32(s, 0);
2221 } else {
2222 /* indirect jump method */
2223 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
2224 (intptr_t)(s->tb_jmp_target_addr + a0));
2226 set_jmp_reset_offset(s, a0);
2227 break;
2228 case INDEX_op_goto_ptr:
2229 /* jmp to the given host address (could be epilogue) */
2230 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
2231 break;
2232 case INDEX_op_br:
2233 tcg_out_jxx(s, JCC_JMP, arg_label(a0), 0);
2234 break;
2235 OP_32_64(ld8u):
2236 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
2237 tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2);
2238 break;
2239 OP_32_64(ld8s):
2240 tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, a0, a1, a2);
2241 break;
2242 OP_32_64(ld16u):
2243 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
2244 tcg_out_modrm_offset(s, OPC_MOVZWL, a0, a1, a2);
2245 break;
2246 OP_32_64(ld16s):
2247 tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, a0, a1, a2);
2248 break;
2249 #if TCG_TARGET_REG_BITS == 64
2250 case INDEX_op_ld32u_i64:
2251 #endif
2252 case INDEX_op_ld_i32:
2253 tcg_out_ld(s, TCG_TYPE_I32, a0, a1, a2);
2254 break;
2256 OP_32_64(st8):
2257 if (const_args[0]) {
2258 tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, a1, a2);
2259 tcg_out8(s, a0);
2260 } else {
2261 tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, a0, a1, a2);
2263 break;
2264 OP_32_64(st16):
2265 if (const_args[0]) {
2266 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, a1, a2);
2267 tcg_out16(s, a0);
2268 } else {
2269 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, a0, a1, a2);
2271 break;
2272 #if TCG_TARGET_REG_BITS == 64
2273 case INDEX_op_st32_i64:
2274 #endif
2275 case INDEX_op_st_i32:
2276 if (const_args[0]) {
2277 tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, a1, a2);
2278 tcg_out32(s, a0);
2279 } else {
2280 tcg_out_st(s, TCG_TYPE_I32, a0, a1, a2);
2282 break;
2284 OP_32_64(add):
2285 /* For 3-operand addition, use LEA. */
2286 if (a0 != a1) {
2287 TCGArg c3 = 0;
2288 if (const_a2) {
2289 c3 = a2, a2 = -1;
2290 } else if (a0 == a2) {
2291 /* Watch out for dest = src + dest, since we've removed
2292 the matching constraint on the add. */
2293 tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
2294 break;
2297 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
2298 break;
2300 c = ARITH_ADD;
2301 goto gen_arith;
2302 OP_32_64(sub):
2303 c = ARITH_SUB;
2304 goto gen_arith;
2305 OP_32_64(and):
2306 c = ARITH_AND;
2307 goto gen_arith;
2308 OP_32_64(or):
2309 c = ARITH_OR;
2310 goto gen_arith;
2311 OP_32_64(xor):
2312 c = ARITH_XOR;
2313 goto gen_arith;
2314 gen_arith:
2315 if (const_a2) {
2316 tgen_arithi(s, c + rexw, a0, a2, 0);
2317 } else {
2318 tgen_arithr(s, c + rexw, a0, a2);
2320 break;
2322 OP_32_64(andc):
2323 if (const_a2) {
2324 tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
2325 tgen_arithi(s, ARITH_AND + rexw, a0, ~a2, 0);
2326 } else {
2327 tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1);
2329 break;
2331 OP_32_64(mul):
2332 if (const_a2) {
2333 int32_t val;
2334 val = a2;
2335 if (val == (int8_t)val) {
2336 tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0);
2337 tcg_out8(s, val);
2338 } else {
2339 tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0);
2340 tcg_out32(s, val);
2342 } else {
2343 tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2);
2345 break;
2347 OP_32_64(div2):
2348 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
2349 break;
2350 OP_32_64(divu2):
2351 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
2352 break;
2354 OP_32_64(shl):
2355 /* For small constant 3-operand shift, use LEA. */
2356 if (const_a2 && a0 != a1 && (a2 - 1) < 3) {
2357 if (a2 - 1 == 0) {
2358 /* shl $1,a1,a0 -> lea (a1,a1),a0 */
2359 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0);
2360 } else {
2361 /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
2362 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0);
2364 break;
2366 c = SHIFT_SHL;
2367 vexop = OPC_SHLX;
2368 goto gen_shift_maybe_vex;
2369 OP_32_64(shr):
2370 c = SHIFT_SHR;
2371 vexop = OPC_SHRX;
2372 goto gen_shift_maybe_vex;
2373 OP_32_64(sar):
2374 c = SHIFT_SAR;
2375 vexop = OPC_SARX;
2376 goto gen_shift_maybe_vex;
2377 OP_32_64(rotl):
2378 c = SHIFT_ROL;
2379 goto gen_shift;
2380 OP_32_64(rotr):
2381 c = SHIFT_ROR;
2382 goto gen_shift;
2383 gen_shift_maybe_vex:
2384 if (have_bmi2) {
2385 if (!const_a2) {
2386 tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1);
2387 break;
2389 tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
2391 /* FALLTHRU */
2392 gen_shift:
2393 if (const_a2) {
2394 tcg_out_shifti(s, c + rexw, a0, a2);
2395 } else {
2396 tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, a0);
2398 break;
2400 OP_32_64(ctz):
2401 tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]);
2402 break;
2403 OP_32_64(clz):
2404 tcg_out_clz(s, rexw, args[0], args[1], args[2], const_args[2]);
2405 break;
2406 OP_32_64(ctpop):
2407 tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1);
2408 break;
2410 case INDEX_op_brcond_i32:
2411 tcg_out_brcond32(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
2412 break;
2413 case INDEX_op_setcond_i32:
2414 tcg_out_setcond32(s, args[3], a0, a1, a2, const_a2);
2415 break;
2416 case INDEX_op_movcond_i32:
2417 tcg_out_movcond32(s, args[5], a0, a1, a2, const_a2, args[3]);
2418 break;
2420 OP_32_64(bswap16):
2421 tcg_out_rolw_8(s, a0);
2422 break;
2423 OP_32_64(bswap32):
2424 tcg_out_bswap32(s, a0);
2425 break;
2427 OP_32_64(neg):
2428 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0);
2429 break;
2430 OP_32_64(not):
2431 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
2432 break;
2434 OP_32_64(ext8s):
2435 tcg_out_ext8s(s, a0, a1, rexw);
2436 break;
2437 OP_32_64(ext16s):
2438 tcg_out_ext16s(s, a0, a1, rexw);
2439 break;
2440 OP_32_64(ext8u):
2441 tcg_out_ext8u(s, a0, a1);
2442 break;
2443 OP_32_64(ext16u):
2444 tcg_out_ext16u(s, a0, a1);
2445 break;
2447 case INDEX_op_qemu_ld_i32:
2448 tcg_out_qemu_ld(s, args, 0);
2449 break;
2450 case INDEX_op_qemu_ld_i64:
2451 tcg_out_qemu_ld(s, args, 1);
2452 break;
2453 case INDEX_op_qemu_st_i32:
2454 tcg_out_qemu_st(s, args, 0);
2455 break;
2456 case INDEX_op_qemu_st_i64:
2457 tcg_out_qemu_st(s, args, 1);
2458 break;
2460 OP_32_64(mulu2):
2461 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
2462 break;
2463 OP_32_64(muls2):
2464 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]);
2465 break;
2466 OP_32_64(add2):
2467 if (const_args[4]) {
2468 tgen_arithi(s, ARITH_ADD + rexw, a0, args[4], 1);
2469 } else {
2470 tgen_arithr(s, ARITH_ADD + rexw, a0, args[4]);
2472 if (const_args[5]) {
2473 tgen_arithi(s, ARITH_ADC + rexw, a1, args[5], 1);
2474 } else {
2475 tgen_arithr(s, ARITH_ADC + rexw, a1, args[5]);
2477 break;
2478 OP_32_64(sub2):
2479 if (const_args[4]) {
2480 tgen_arithi(s, ARITH_SUB + rexw, a0, args[4], 1);
2481 } else {
2482 tgen_arithr(s, ARITH_SUB + rexw, a0, args[4]);
2484 if (const_args[5]) {
2485 tgen_arithi(s, ARITH_SBB + rexw, a1, args[5], 1);
2486 } else {
2487 tgen_arithr(s, ARITH_SBB + rexw, a1, args[5]);
2489 break;
2491 #if TCG_TARGET_REG_BITS == 32
2492 case INDEX_op_brcond2_i32:
2493 tcg_out_brcond2(s, args, const_args, 0);
2494 break;
2495 case INDEX_op_setcond2_i32:
2496 tcg_out_setcond2(s, args, const_args);
2497 break;
2498 #else /* TCG_TARGET_REG_BITS == 64 */
2499 case INDEX_op_ld32s_i64:
2500 tcg_out_modrm_offset(s, OPC_MOVSLQ, a0, a1, a2);
2501 break;
2502 case INDEX_op_ld_i64:
2503 tcg_out_ld(s, TCG_TYPE_I64, a0, a1, a2);
2504 break;
2505 case INDEX_op_st_i64:
2506 if (const_args[0]) {
2507 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, 0, a1, a2);
2508 tcg_out32(s, a0);
2509 } else {
2510 tcg_out_st(s, TCG_TYPE_I64, a0, a1, a2);
2512 break;
2514 case INDEX_op_brcond_i64:
2515 tcg_out_brcond64(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
2516 break;
2517 case INDEX_op_setcond_i64:
2518 tcg_out_setcond64(s, args[3], a0, a1, a2, const_a2);
2519 break;
2520 case INDEX_op_movcond_i64:
2521 tcg_out_movcond64(s, args[5], a0, a1, a2, const_a2, args[3]);
2522 break;
2524 case INDEX_op_bswap64_i64:
2525 tcg_out_bswap64(s, a0);
2526 break;
2527 case INDEX_op_extu_i32_i64:
2528 case INDEX_op_ext32u_i64:
2529 case INDEX_op_extrl_i64_i32:
2530 tcg_out_ext32u(s, a0, a1);
2531 break;
2532 case INDEX_op_ext_i32_i64:
2533 case INDEX_op_ext32s_i64:
2534 tcg_out_ext32s(s, a0, a1);
2535 break;
2536 case INDEX_op_extrh_i64_i32:
2537 tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32);
2538 break;
2539 #endif
2541 OP_32_64(deposit):
2542 if (args[3] == 0 && args[4] == 8) {
2543 /* load bits 0..7 */
2544 tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0);
2545 } else if (args[3] == 8 && args[4] == 8) {
2546 /* load bits 8..15 */
2547 tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4);
2548 } else if (args[3] == 0 && args[4] == 16) {
2549 /* load bits 0..15 */
2550 tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0);
2551 } else {
2552 tcg_abort();
2554 break;
2556 case INDEX_op_extract_i64:
2557 if (a2 + args[3] == 32) {
2558 /* This is a 32-bit zero-extending right shift. */
2559 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2560 tcg_out_shifti(s, SHIFT_SHR, a0, a2);
2561 break;
2563 /* FALLTHRU */
2564 case INDEX_op_extract_i32:
2565 /* On the off-chance that we can use the high-byte registers.
2566 Otherwise we emit the same ext16 + shift pattern that we
2567 would have gotten from the normal tcg-op.c expansion. */
2568 tcg_debug_assert(a2 == 8 && args[3] == 8);
2569 if (a1 < 4 && a0 < 8) {
2570 tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4);
2571 } else {
2572 tcg_out_ext16u(s, a0, a1);
2573 tcg_out_shifti(s, SHIFT_SHR, a0, 8);
2575 break;
2577 case INDEX_op_sextract_i32:
2578 /* We don't implement sextract_i64, as we cannot sign-extend to
2579 64-bits without using the REX prefix that explicitly excludes
2580 access to the high-byte registers. */
2581 tcg_debug_assert(a2 == 8 && args[3] == 8);
2582 if (a1 < 4 && a0 < 8) {
2583 tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4);
2584 } else {
2585 tcg_out_ext16s(s, a0, a1, 0);
2586 tcg_out_shifti(s, SHIFT_SAR, a0, 8);
2588 break;
2590 case INDEX_op_mb:
2591 tcg_out_mb(s, a0);
2592 break;
2593 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2594 case INDEX_op_mov_i64:
2595 case INDEX_op_mov_vec:
2596 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2597 case INDEX_op_movi_i64:
2598 case INDEX_op_dupi_vec:
2599 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2600 default:
2601 tcg_abort();
2604 #undef OP_32_64
2607 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2608 unsigned vecl, unsigned vece,
2609 const TCGArg *args, const int *const_args)
2611 static int const add_insn[4] = {
2612 OPC_PADDB, OPC_PADDW, OPC_PADDD, OPC_PADDQ
2614 static int const sub_insn[4] = {
2615 OPC_PSUBB, OPC_PSUBW, OPC_PSUBD, OPC_PSUBQ
2617 static int const mul_insn[4] = {
2618 OPC_UD2, OPC_PMULLW, OPC_PMULLD, OPC_UD2
2620 static int const shift_imm_insn[4] = {
2621 OPC_UD2, OPC_PSHIFTW_Ib, OPC_PSHIFTD_Ib, OPC_PSHIFTQ_Ib
2623 static int const cmpeq_insn[4] = {
2624 OPC_PCMPEQB, OPC_PCMPEQW, OPC_PCMPEQD, OPC_PCMPEQQ
2626 static int const cmpgt_insn[4] = {
2627 OPC_PCMPGTB, OPC_PCMPGTW, OPC_PCMPGTD, OPC_PCMPGTQ
2629 static int const punpckl_insn[4] = {
2630 OPC_PUNPCKLBW, OPC_PUNPCKLWD, OPC_PUNPCKLDQ, OPC_PUNPCKLQDQ
2632 static int const punpckh_insn[4] = {
2633 OPC_PUNPCKHBW, OPC_PUNPCKHWD, OPC_PUNPCKHDQ, OPC_PUNPCKHQDQ
2635 static int const packss_insn[4] = {
2636 OPC_PACKSSWB, OPC_PACKSSDW, OPC_UD2, OPC_UD2
2638 static int const packus_insn[4] = {
2639 OPC_PACKUSWB, OPC_PACKUSDW, OPC_UD2, OPC_UD2
2642 TCGType type = vecl + TCG_TYPE_V64;
2643 int insn, sub;
2644 TCGArg a0, a1, a2;
2646 a0 = args[0];
2647 a1 = args[1];
2648 a2 = args[2];
2650 switch (opc) {
2651 case INDEX_op_add_vec:
2652 insn = add_insn[vece];
2653 goto gen_simd;
2654 case INDEX_op_sub_vec:
2655 insn = sub_insn[vece];
2656 goto gen_simd;
2657 case INDEX_op_mul_vec:
2658 insn = mul_insn[vece];
2659 goto gen_simd;
2660 case INDEX_op_and_vec:
2661 insn = OPC_PAND;
2662 goto gen_simd;
2663 case INDEX_op_or_vec:
2664 insn = OPC_POR;
2665 goto gen_simd;
2666 case INDEX_op_xor_vec:
2667 insn = OPC_PXOR;
2668 goto gen_simd;
2669 case INDEX_op_x86_punpckl_vec:
2670 insn = punpckl_insn[vece];
2671 goto gen_simd;
2672 case INDEX_op_x86_punpckh_vec:
2673 insn = punpckh_insn[vece];
2674 goto gen_simd;
2675 case INDEX_op_x86_packss_vec:
2676 insn = packss_insn[vece];
2677 goto gen_simd;
2678 case INDEX_op_x86_packus_vec:
2679 insn = packus_insn[vece];
2680 goto gen_simd;
2681 #if TCG_TARGET_REG_BITS == 32
2682 case INDEX_op_dup2_vec:
2683 /* Constraints have already placed both 32-bit inputs in xmm regs. */
2684 insn = OPC_PUNPCKLDQ;
2685 goto gen_simd;
2686 #endif
2687 gen_simd:
2688 tcg_debug_assert(insn != OPC_UD2);
2689 if (type == TCG_TYPE_V256) {
2690 insn |= P_VEXL;
2692 tcg_out_vex_modrm(s, insn, a0, a1, a2);
2693 break;
2695 case INDEX_op_cmp_vec:
2696 sub = args[3];
2697 if (sub == TCG_COND_EQ) {
2698 insn = cmpeq_insn[vece];
2699 } else if (sub == TCG_COND_GT) {
2700 insn = cmpgt_insn[vece];
2701 } else {
2702 g_assert_not_reached();
2704 goto gen_simd;
2706 case INDEX_op_andc_vec:
2707 insn = OPC_PANDN;
2708 if (type == TCG_TYPE_V256) {
2709 insn |= P_VEXL;
2711 tcg_out_vex_modrm(s, insn, a0, a2, a1);
2712 break;
2714 case INDEX_op_shli_vec:
2715 sub = 6;
2716 goto gen_shift;
2717 case INDEX_op_shri_vec:
2718 sub = 2;
2719 goto gen_shift;
2720 case INDEX_op_sari_vec:
2721 tcg_debug_assert(vece != MO_64);
2722 sub = 4;
2723 gen_shift:
2724 tcg_debug_assert(vece != MO_8);
2725 insn = shift_imm_insn[vece];
2726 if (type == TCG_TYPE_V256) {
2727 insn |= P_VEXL;
2729 tcg_out_vex_modrm(s, insn, sub, a0, a1);
2730 tcg_out8(s, a2);
2731 break;
2733 case INDEX_op_ld_vec:
2734 tcg_out_ld(s, type, a0, a1, a2);
2735 break;
2736 case INDEX_op_st_vec:
2737 tcg_out_st(s, type, a0, a1, a2);
2738 break;
2739 case INDEX_op_dup_vec:
2740 tcg_out_dup_vec(s, type, vece, a0, a1);
2741 break;
2743 case INDEX_op_x86_shufps_vec:
2744 insn = OPC_SHUFPS;
2745 sub = args[3];
2746 goto gen_simd_imm8;
2747 case INDEX_op_x86_blend_vec:
2748 if (vece == MO_16) {
2749 insn = OPC_PBLENDW;
2750 } else if (vece == MO_32) {
2751 insn = (have_avx2 ? OPC_VPBLENDD : OPC_BLENDPS);
2752 } else {
2753 g_assert_not_reached();
2755 sub = args[3];
2756 goto gen_simd_imm8;
2757 case INDEX_op_x86_vperm2i128_vec:
2758 insn = OPC_VPERM2I128;
2759 sub = args[3];
2760 goto gen_simd_imm8;
2761 gen_simd_imm8:
2762 if (type == TCG_TYPE_V256) {
2763 insn |= P_VEXL;
2765 tcg_out_vex_modrm(s, insn, a0, a1, a2);
2766 tcg_out8(s, sub);
2767 break;
2769 case INDEX_op_x86_vpblendvb_vec:
2770 insn = OPC_VPBLENDVB;
2771 if (type == TCG_TYPE_V256) {
2772 insn |= P_VEXL;
2774 tcg_out_vex_modrm(s, insn, a0, a1, a2);
2775 tcg_out8(s, args[3] << 4);
2776 break;
2778 case INDEX_op_x86_psrldq_vec:
2779 tcg_out_vex_modrm(s, OPC_GRP14, 3, a0, a1);
2780 tcg_out8(s, a2);
2781 break;
2783 default:
2784 g_assert_not_reached();
2788 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
2790 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
2791 static const TCGTargetOpDef ri_r = { .args_ct_str = { "ri", "r" } };
2792 static const TCGTargetOpDef re_r = { .args_ct_str = { "re", "r" } };
2793 static const TCGTargetOpDef qi_r = { .args_ct_str = { "qi", "r" } };
2794 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
2795 static const TCGTargetOpDef r_q = { .args_ct_str = { "r", "q" } };
2796 static const TCGTargetOpDef r_re = { .args_ct_str = { "r", "re" } };
2797 static const TCGTargetOpDef r_0 = { .args_ct_str = { "r", "0" } };
2798 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
2799 static const TCGTargetOpDef r_r_re = { .args_ct_str = { "r", "r", "re" } };
2800 static const TCGTargetOpDef r_0_re = { .args_ct_str = { "r", "0", "re" } };
2801 static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } };
2802 static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
2803 static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
2804 static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } };
2805 static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } };
2806 static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } };
2807 static const TCGTargetOpDef r_r_L_L
2808 = { .args_ct_str = { "r", "r", "L", "L" } };
2809 static const TCGTargetOpDef L_L_L_L
2810 = { .args_ct_str = { "L", "L", "L", "L" } };
2811 static const TCGTargetOpDef x_x = { .args_ct_str = { "x", "x" } };
2812 static const TCGTargetOpDef x_x_x = { .args_ct_str = { "x", "x", "x" } };
2813 static const TCGTargetOpDef x_x_x_x
2814 = { .args_ct_str = { "x", "x", "x", "x" } };
2815 static const TCGTargetOpDef x_r = { .args_ct_str = { "x", "r" } };
2817 switch (op) {
2818 case INDEX_op_goto_ptr:
2819 return &r;
2821 case INDEX_op_ld8u_i32:
2822 case INDEX_op_ld8u_i64:
2823 case INDEX_op_ld8s_i32:
2824 case INDEX_op_ld8s_i64:
2825 case INDEX_op_ld16u_i32:
2826 case INDEX_op_ld16u_i64:
2827 case INDEX_op_ld16s_i32:
2828 case INDEX_op_ld16s_i64:
2829 case INDEX_op_ld_i32:
2830 case INDEX_op_ld32u_i64:
2831 case INDEX_op_ld32s_i64:
2832 case INDEX_op_ld_i64:
2833 return &r_r;
2835 case INDEX_op_st8_i32:
2836 case INDEX_op_st8_i64:
2837 return &qi_r;
2838 case INDEX_op_st16_i32:
2839 case INDEX_op_st16_i64:
2840 case INDEX_op_st_i32:
2841 case INDEX_op_st32_i64:
2842 return &ri_r;
2843 case INDEX_op_st_i64:
2844 return &re_r;
2846 case INDEX_op_add_i32:
2847 case INDEX_op_add_i64:
2848 return &r_r_re;
2849 case INDEX_op_sub_i32:
2850 case INDEX_op_sub_i64:
2851 case INDEX_op_mul_i32:
2852 case INDEX_op_mul_i64:
2853 case INDEX_op_or_i32:
2854 case INDEX_op_or_i64:
2855 case INDEX_op_xor_i32:
2856 case INDEX_op_xor_i64:
2857 return &r_0_re;
2859 case INDEX_op_and_i32:
2860 case INDEX_op_and_i64:
2862 static const TCGTargetOpDef and
2863 = { .args_ct_str = { "r", "0", "reZ" } };
2864 return &and;
2866 break;
2867 case INDEX_op_andc_i32:
2868 case INDEX_op_andc_i64:
2870 static const TCGTargetOpDef andc
2871 = { .args_ct_str = { "r", "r", "rI" } };
2872 return &andc;
2874 break;
2876 case INDEX_op_shl_i32:
2877 case INDEX_op_shl_i64:
2878 case INDEX_op_shr_i32:
2879 case INDEX_op_shr_i64:
2880 case INDEX_op_sar_i32:
2881 case INDEX_op_sar_i64:
2882 return have_bmi2 ? &r_r_ri : &r_0_ci;
2883 case INDEX_op_rotl_i32:
2884 case INDEX_op_rotl_i64:
2885 case INDEX_op_rotr_i32:
2886 case INDEX_op_rotr_i64:
2887 return &r_0_ci;
2889 case INDEX_op_brcond_i32:
2890 case INDEX_op_brcond_i64:
2891 return &r_re;
2893 case INDEX_op_bswap16_i32:
2894 case INDEX_op_bswap16_i64:
2895 case INDEX_op_bswap32_i32:
2896 case INDEX_op_bswap32_i64:
2897 case INDEX_op_bswap64_i64:
2898 case INDEX_op_neg_i32:
2899 case INDEX_op_neg_i64:
2900 case INDEX_op_not_i32:
2901 case INDEX_op_not_i64:
2902 case INDEX_op_extrh_i64_i32:
2903 return &r_0;
2905 case INDEX_op_ext8s_i32:
2906 case INDEX_op_ext8s_i64:
2907 case INDEX_op_ext8u_i32:
2908 case INDEX_op_ext8u_i64:
2909 return &r_q;
2910 case INDEX_op_ext16s_i32:
2911 case INDEX_op_ext16s_i64:
2912 case INDEX_op_ext16u_i32:
2913 case INDEX_op_ext16u_i64:
2914 case INDEX_op_ext32s_i64:
2915 case INDEX_op_ext32u_i64:
2916 case INDEX_op_ext_i32_i64:
2917 case INDEX_op_extu_i32_i64:
2918 case INDEX_op_extrl_i64_i32:
2919 case INDEX_op_extract_i32:
2920 case INDEX_op_extract_i64:
2921 case INDEX_op_sextract_i32:
2922 case INDEX_op_ctpop_i32:
2923 case INDEX_op_ctpop_i64:
2924 return &r_r;
2926 case INDEX_op_deposit_i32:
2927 case INDEX_op_deposit_i64:
2929 static const TCGTargetOpDef dep
2930 = { .args_ct_str = { "Q", "0", "Q" } };
2931 return &dep;
2933 case INDEX_op_setcond_i32:
2934 case INDEX_op_setcond_i64:
2936 static const TCGTargetOpDef setc
2937 = { .args_ct_str = { "q", "r", "re" } };
2938 return &setc;
2940 case INDEX_op_movcond_i32:
2941 case INDEX_op_movcond_i64:
2943 static const TCGTargetOpDef movc
2944 = { .args_ct_str = { "r", "r", "re", "r", "0" } };
2945 return &movc;
2947 case INDEX_op_div2_i32:
2948 case INDEX_op_div2_i64:
2949 case INDEX_op_divu2_i32:
2950 case INDEX_op_divu2_i64:
2952 static const TCGTargetOpDef div2
2953 = { .args_ct_str = { "a", "d", "0", "1", "r" } };
2954 return &div2;
2956 case INDEX_op_mulu2_i32:
2957 case INDEX_op_mulu2_i64:
2958 case INDEX_op_muls2_i32:
2959 case INDEX_op_muls2_i64:
2961 static const TCGTargetOpDef mul2
2962 = { .args_ct_str = { "a", "d", "a", "r" } };
2963 return &mul2;
2965 case INDEX_op_add2_i32:
2966 case INDEX_op_add2_i64:
2967 case INDEX_op_sub2_i32:
2968 case INDEX_op_sub2_i64:
2970 static const TCGTargetOpDef arith2
2971 = { .args_ct_str = { "r", "r", "0", "1", "re", "re" } };
2972 return &arith2;
2974 case INDEX_op_ctz_i32:
2975 case INDEX_op_ctz_i64:
2977 static const TCGTargetOpDef ctz[2] = {
2978 { .args_ct_str = { "&r", "r", "r" } },
2979 { .args_ct_str = { "&r", "r", "rW" } },
2981 return &ctz[have_bmi1];
2983 case INDEX_op_clz_i32:
2984 case INDEX_op_clz_i64:
2986 static const TCGTargetOpDef clz[2] = {
2987 { .args_ct_str = { "&r", "r", "r" } },
2988 { .args_ct_str = { "&r", "r", "rW" } },
2990 return &clz[have_lzcnt];
2993 case INDEX_op_qemu_ld_i32:
2994 return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L;
2995 case INDEX_op_qemu_st_i32:
2996 return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L : &L_L_L;
2997 case INDEX_op_qemu_ld_i64:
2998 return (TCG_TARGET_REG_BITS == 64 ? &r_L
2999 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L
3000 : &r_r_L_L);
3001 case INDEX_op_qemu_st_i64:
3002 return (TCG_TARGET_REG_BITS == 64 ? &L_L
3003 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L_L
3004 : &L_L_L_L);
3006 case INDEX_op_brcond2_i32:
3008 static const TCGTargetOpDef b2
3009 = { .args_ct_str = { "r", "r", "ri", "ri" } };
3010 return &b2;
3012 case INDEX_op_setcond2_i32:
3014 static const TCGTargetOpDef s2
3015 = { .args_ct_str = { "r", "r", "r", "ri", "ri" } };
3016 return &s2;
3019 case INDEX_op_ld_vec:
3020 case INDEX_op_st_vec:
3021 return &x_r;
3023 case INDEX_op_add_vec:
3024 case INDEX_op_sub_vec:
3025 case INDEX_op_mul_vec:
3026 case INDEX_op_and_vec:
3027 case INDEX_op_or_vec:
3028 case INDEX_op_xor_vec:
3029 case INDEX_op_andc_vec:
3030 case INDEX_op_cmp_vec:
3031 case INDEX_op_x86_shufps_vec:
3032 case INDEX_op_x86_blend_vec:
3033 case INDEX_op_x86_packss_vec:
3034 case INDEX_op_x86_packus_vec:
3035 case INDEX_op_x86_vperm2i128_vec:
3036 case INDEX_op_x86_punpckl_vec:
3037 case INDEX_op_x86_punpckh_vec:
3038 #if TCG_TARGET_REG_BITS == 32
3039 case INDEX_op_dup2_vec:
3040 #endif
3041 return &x_x_x;
3042 case INDEX_op_dup_vec:
3043 case INDEX_op_shli_vec:
3044 case INDEX_op_shri_vec:
3045 case INDEX_op_sari_vec:
3046 case INDEX_op_x86_psrldq_vec:
3047 return &x_x;
3048 case INDEX_op_x86_vpblendvb_vec:
3049 return &x_x_x_x;
3051 default:
3052 break;
3054 return NULL;
3057 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3059 switch (opc) {
3060 case INDEX_op_add_vec:
3061 case INDEX_op_sub_vec:
3062 case INDEX_op_and_vec:
3063 case INDEX_op_or_vec:
3064 case INDEX_op_xor_vec:
3065 case INDEX_op_andc_vec:
3066 return 1;
3067 case INDEX_op_cmp_vec:
3068 return -1;
3070 case INDEX_op_shli_vec:
3071 case INDEX_op_shri_vec:
3072 /* We must expand the operation for MO_8. */
3073 return vece == MO_8 ? -1 : 1;
3075 case INDEX_op_sari_vec:
3076 /* We must expand the operation for MO_8. */
3077 if (vece == MO_8) {
3078 return -1;
3080 /* We can emulate this for MO_64, but it does not pay off
3081 unless we're producing at least 4 values. */
3082 if (vece == MO_64) {
3083 return type >= TCG_TYPE_V256 ? -1 : 0;
3085 return 1;
3087 case INDEX_op_mul_vec:
3088 if (vece == MO_8) {
3089 /* We can expand the operation for MO_8. */
3090 return -1;
3092 if (vece == MO_64) {
3093 return 0;
3095 return 1;
3097 default:
3098 return 0;
3102 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3103 TCGArg a0, ...)
3105 va_list va;
3106 TCGArg a1, a2;
3107 TCGv_vec v0, t1, t2, t3, t4;
3109 va_start(va, a0);
3110 v0 = temp_tcgv_vec(arg_temp(a0));
3112 switch (opc) {
3113 case INDEX_op_shli_vec:
3114 case INDEX_op_shri_vec:
3115 tcg_debug_assert(vece == MO_8);
3116 a1 = va_arg(va, TCGArg);
3117 a2 = va_arg(va, TCGArg);
3118 /* Unpack to W, shift, and repack. Tricky bits:
3119 (1) Use punpck*bw x,x to produce DDCCBBAA,
3120 i.e. duplicate in other half of the 16-bit lane.
3121 (2) For right-shift, add 8 so that the high half of
3122 the lane becomes zero. For left-shift, we must
3123 shift up and down again.
3124 (3) Step 2 leaves high half zero such that PACKUSWB
3125 (pack with unsigned saturation) does not modify
3126 the quantity. */
3127 t1 = tcg_temp_new_vec(type);
3128 t2 = tcg_temp_new_vec(type);
3129 vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3130 tcgv_vec_arg(t1), a1, a1);
3131 vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3132 tcgv_vec_arg(t2), a1, a1);
3133 if (opc == INDEX_op_shri_vec) {
3134 vec_gen_3(INDEX_op_shri_vec, type, MO_16,
3135 tcgv_vec_arg(t1), tcgv_vec_arg(t1), a2 + 8);
3136 vec_gen_3(INDEX_op_shri_vec, type, MO_16,
3137 tcgv_vec_arg(t2), tcgv_vec_arg(t2), a2 + 8);
3138 } else {
3139 vec_gen_3(INDEX_op_shli_vec, type, MO_16,
3140 tcgv_vec_arg(t1), tcgv_vec_arg(t1), a2 + 8);
3141 vec_gen_3(INDEX_op_shli_vec, type, MO_16,
3142 tcgv_vec_arg(t2), tcgv_vec_arg(t2), a2 + 8);
3143 vec_gen_3(INDEX_op_shri_vec, type, MO_16,
3144 tcgv_vec_arg(t1), tcgv_vec_arg(t1), 8);
3145 vec_gen_3(INDEX_op_shri_vec, type, MO_16,
3146 tcgv_vec_arg(t2), tcgv_vec_arg(t2), 8);
3148 vec_gen_3(INDEX_op_x86_packus_vec, type, MO_8,
3149 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3150 tcg_temp_free_vec(t1);
3151 tcg_temp_free_vec(t2);
3152 break;
3154 case INDEX_op_sari_vec:
3155 a1 = va_arg(va, TCGArg);
3156 a2 = va_arg(va, TCGArg);
3157 if (vece == MO_8) {
3158 /* Unpack to W, shift, and repack, as above. */
3159 t1 = tcg_temp_new_vec(type);
3160 t2 = tcg_temp_new_vec(type);
3161 vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3162 tcgv_vec_arg(t1), a1, a1);
3163 vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3164 tcgv_vec_arg(t2), a1, a1);
3165 vec_gen_3(INDEX_op_sari_vec, type, MO_16,
3166 tcgv_vec_arg(t1), tcgv_vec_arg(t1), a2 + 8);
3167 vec_gen_3(INDEX_op_sari_vec, type, MO_16,
3168 tcgv_vec_arg(t2), tcgv_vec_arg(t2), a2 + 8);
3169 vec_gen_3(INDEX_op_x86_packss_vec, type, MO_8,
3170 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3171 tcg_temp_free_vec(t1);
3172 tcg_temp_free_vec(t2);
3173 break;
3175 tcg_debug_assert(vece == MO_64);
3176 /* MO_64: If the shift is <= 32, we can emulate the sign extend by
3177 performing an arithmetic 32-bit shift and overwriting the high
3178 half of the result (note that the ISA says shift of 32 is valid). */
3179 if (a2 <= 32) {
3180 t1 = tcg_temp_new_vec(type);
3181 vec_gen_3(INDEX_op_sari_vec, type, MO_32, tcgv_vec_arg(t1), a1, a2);
3182 vec_gen_3(INDEX_op_shri_vec, type, MO_64, a0, a1, a2);
3183 vec_gen_4(INDEX_op_x86_blend_vec, type, MO_32,
3184 a0, a0, tcgv_vec_arg(t1), 0xaa);
3185 tcg_temp_free_vec(t1);
3186 break;
3188 /* Otherwise we will need to use a compare vs 0 to produce the
3189 sign-extend, shift and merge. */
3190 t1 = tcg_temp_new_vec(type);
3191 t2 = tcg_const_zeros_vec(type);
3192 vec_gen_4(INDEX_op_cmp_vec, type, MO_64,
3193 tcgv_vec_arg(t1), tcgv_vec_arg(t2), a1, TCG_COND_GT);
3194 tcg_temp_free_vec(t2);
3195 vec_gen_3(INDEX_op_shri_vec, type, MO_64, a0, a1, a2);
3196 vec_gen_3(INDEX_op_shli_vec, type, MO_64,
3197 tcgv_vec_arg(t1), tcgv_vec_arg(t1), 64 - a2);
3198 vec_gen_3(INDEX_op_or_vec, type, MO_64, a0, a0, tcgv_vec_arg(t1));
3199 tcg_temp_free_vec(t1);
3200 break;
3202 case INDEX_op_mul_vec:
3203 tcg_debug_assert(vece == MO_8);
3204 a1 = va_arg(va, TCGArg);
3205 a2 = va_arg(va, TCGArg);
3206 switch (type) {
3207 case TCG_TYPE_V64:
3208 t1 = tcg_temp_new_vec(TCG_TYPE_V128);
3209 t2 = tcg_temp_new_vec(TCG_TYPE_V128);
3210 tcg_gen_dup16i_vec(t2, 0);
3211 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3212 tcgv_vec_arg(t1), a1, tcgv_vec_arg(t2));
3213 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3214 tcgv_vec_arg(t2), tcgv_vec_arg(t2), a2);
3215 tcg_gen_mul_vec(MO_16, t1, t1, t2);
3216 tcg_gen_shri_vec(MO_16, t1, t1, 8);
3217 vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V128, MO_8,
3218 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t1));
3219 tcg_temp_free_vec(t1);
3220 tcg_temp_free_vec(t2);
3221 break;
3223 case TCG_TYPE_V128:
3224 t1 = tcg_temp_new_vec(TCG_TYPE_V128);
3225 t2 = tcg_temp_new_vec(TCG_TYPE_V128);
3226 t3 = tcg_temp_new_vec(TCG_TYPE_V128);
3227 t4 = tcg_temp_new_vec(TCG_TYPE_V128);
3228 tcg_gen_dup16i_vec(t4, 0);
3229 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3230 tcgv_vec_arg(t1), a1, tcgv_vec_arg(t4));
3231 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3232 tcgv_vec_arg(t2), tcgv_vec_arg(t4), a2);
3233 vec_gen_3(INDEX_op_x86_punpckh_vec, TCG_TYPE_V128, MO_8,
3234 tcgv_vec_arg(t3), a1, tcgv_vec_arg(t4));
3235 vec_gen_3(INDEX_op_x86_punpckh_vec, TCG_TYPE_V128, MO_8,
3236 tcgv_vec_arg(t4), tcgv_vec_arg(t4), a2);
3237 tcg_gen_mul_vec(MO_16, t1, t1, t2);
3238 tcg_gen_mul_vec(MO_16, t3, t3, t4);
3239 tcg_gen_shri_vec(MO_16, t1, t1, 8);
3240 tcg_gen_shri_vec(MO_16, t3, t3, 8);
3241 vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V128, MO_8,
3242 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t3));
3243 tcg_temp_free_vec(t1);
3244 tcg_temp_free_vec(t2);
3245 tcg_temp_free_vec(t3);
3246 tcg_temp_free_vec(t4);
3247 break;
3249 case TCG_TYPE_V256:
3250 t1 = tcg_temp_new_vec(TCG_TYPE_V256);
3251 t2 = tcg_temp_new_vec(TCG_TYPE_V256);
3252 t3 = tcg_temp_new_vec(TCG_TYPE_V256);
3253 t4 = tcg_temp_new_vec(TCG_TYPE_V256);
3254 tcg_gen_dup16i_vec(t4, 0);
3255 /* a1: A[0-7] ... D[0-7]; a2: W[0-7] ... Z[0-7]
3256 t1: extends of B[0-7], D[0-7]
3257 t2: extends of X[0-7], Z[0-7]
3258 t3: extends of A[0-7], C[0-7]
3259 t4: extends of W[0-7], Y[0-7]. */
3260 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V256, MO_8,
3261 tcgv_vec_arg(t1), a1, tcgv_vec_arg(t4));
3262 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V256, MO_8,
3263 tcgv_vec_arg(t2), tcgv_vec_arg(t4), a2);
3264 vec_gen_3(INDEX_op_x86_punpckh_vec, TCG_TYPE_V256, MO_8,
3265 tcgv_vec_arg(t3), a1, tcgv_vec_arg(t4));
3266 vec_gen_3(INDEX_op_x86_punpckh_vec, TCG_TYPE_V256, MO_8,
3267 tcgv_vec_arg(t4), tcgv_vec_arg(t4), a2);
3268 /* t1: BX DZ; t2: AW CY. */
3269 tcg_gen_mul_vec(MO_16, t1, t1, t2);
3270 tcg_gen_mul_vec(MO_16, t3, t3, t4);
3271 tcg_gen_shri_vec(MO_16, t1, t1, 8);
3272 tcg_gen_shri_vec(MO_16, t3, t3, 8);
3273 /* a0: AW BX CY DZ. */
3274 vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V256, MO_8,
3275 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t3));
3276 tcg_temp_free_vec(t1);
3277 tcg_temp_free_vec(t2);
3278 tcg_temp_free_vec(t3);
3279 tcg_temp_free_vec(t4);
3280 break;
3282 default:
3283 g_assert_not_reached();
3285 break;
3287 case INDEX_op_cmp_vec:
3289 enum {
3290 NEED_SWAP = 1,
3291 NEED_INV = 2,
3292 NEED_BIAS = 4
3294 static const uint8_t fixups[16] = {
3295 [0 ... 15] = -1,
3296 [TCG_COND_EQ] = 0,
3297 [TCG_COND_NE] = NEED_INV,
3298 [TCG_COND_GT] = 0,
3299 [TCG_COND_LT] = NEED_SWAP,
3300 [TCG_COND_LE] = NEED_INV,
3301 [TCG_COND_GE] = NEED_SWAP | NEED_INV,
3302 [TCG_COND_GTU] = NEED_BIAS,
3303 [TCG_COND_LTU] = NEED_BIAS | NEED_SWAP,
3304 [TCG_COND_LEU] = NEED_BIAS | NEED_INV,
3305 [TCG_COND_GEU] = NEED_BIAS | NEED_SWAP | NEED_INV,
3308 TCGCond cond;
3309 uint8_t fixup;
3311 a1 = va_arg(va, TCGArg);
3312 a2 = va_arg(va, TCGArg);
3313 cond = va_arg(va, TCGArg);
3314 fixup = fixups[cond & 15];
3315 tcg_debug_assert(fixup != 0xff);
3317 if (fixup & NEED_INV) {
3318 cond = tcg_invert_cond(cond);
3320 if (fixup & NEED_SWAP) {
3321 TCGArg t;
3322 t = a1, a1 = a2, a2 = t;
3323 cond = tcg_swap_cond(cond);
3326 t1 = t2 = NULL;
3327 if (fixup & NEED_BIAS) {
3328 t1 = tcg_temp_new_vec(type);
3329 t2 = tcg_temp_new_vec(type);
3330 tcg_gen_dupi_vec(vece, t2, 1ull << ((8 << vece) - 1));
3331 tcg_gen_sub_vec(vece, t1, temp_tcgv_vec(arg_temp(a1)), t2);
3332 tcg_gen_sub_vec(vece, t2, temp_tcgv_vec(arg_temp(a2)), t2);
3333 a1 = tcgv_vec_arg(t1);
3334 a2 = tcgv_vec_arg(t2);
3335 cond = tcg_signed_cond(cond);
3338 tcg_debug_assert(cond == TCG_COND_EQ || cond == TCG_COND_GT);
3339 vec_gen_4(INDEX_op_cmp_vec, type, vece, a0, a1, a2, cond);
3341 if (fixup & NEED_BIAS) {
3342 tcg_temp_free_vec(t1);
3343 tcg_temp_free_vec(t2);
3345 if (fixup & NEED_INV) {
3346 tcg_gen_not_vec(vece, v0, v0);
3349 break;
3351 default:
3352 break;
3355 va_end(va);
3358 static const int tcg_target_callee_save_regs[] = {
3359 #if TCG_TARGET_REG_BITS == 64
3360 TCG_REG_RBP,
3361 TCG_REG_RBX,
3362 #if defined(_WIN64)
3363 TCG_REG_RDI,
3364 TCG_REG_RSI,
3365 #endif
3366 TCG_REG_R12,
3367 TCG_REG_R13,
3368 TCG_REG_R14, /* Currently used for the global env. */
3369 TCG_REG_R15,
3370 #else
3371 TCG_REG_EBP, /* Currently used for the global env. */
3372 TCG_REG_EBX,
3373 TCG_REG_ESI,
3374 TCG_REG_EDI,
3375 #endif
3378 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
3379 and tcg_register_jit. */
3381 #define PUSH_SIZE \
3382 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
3383 * (TCG_TARGET_REG_BITS / 8))
3385 #define FRAME_SIZE \
3386 ((PUSH_SIZE \
3387 + TCG_STATIC_CALL_ARGS_SIZE \
3388 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
3389 + TCG_TARGET_STACK_ALIGN - 1) \
3390 & ~(TCG_TARGET_STACK_ALIGN - 1))
3392 /* Generate global QEMU prologue and epilogue code */
3393 static void tcg_target_qemu_prologue(TCGContext *s)
3395 int i, stack_addend;
3397 /* TB prologue */
3399 /* Reserve some stack space, also for TCG temps. */
3400 stack_addend = FRAME_SIZE - PUSH_SIZE;
3401 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
3402 CPU_TEMP_BUF_NLONGS * sizeof(long));
3404 /* Save all callee saved registers. */
3405 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
3406 tcg_out_push(s, tcg_target_callee_save_regs[i]);
3409 #if TCG_TARGET_REG_BITS == 32
3410 tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
3411 (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
3412 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
3413 /* jmp *tb. */
3414 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
3415 (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
3416 + stack_addend);
3417 #else
3418 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3419 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
3420 /* jmp *tb. */
3421 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
3422 #endif
3425 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3426 * and fall through to the rest of the epilogue.
3428 s->code_gen_epilogue = s->code_ptr;
3429 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_EAX, 0);
3431 /* TB epilogue */
3432 tb_ret_addr = s->code_ptr;
3434 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend);
3436 if (have_avx2) {
3437 tcg_out_vex_opc(s, OPC_VZEROUPPER, 0, 0, 0, 0);
3439 for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
3440 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
3442 tcg_out_opc(s, OPC_RET, 0, 0, 0);
3444 #if !defined(CONFIG_SOFTMMU)
3445 /* Try to set up a segment register to point to guest_base. */
3446 if (guest_base) {
3447 setup_guest_base_seg();
3449 #endif
3452 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3454 memset(p, 0x90, count);
3457 static void tcg_target_init(TCGContext *s)
3459 #ifdef CONFIG_CPUID_H
3460 unsigned a, b, c, d, b7 = 0;
3461 int max = __get_cpuid_max(0, 0);
3463 if (max >= 7) {
3464 /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
3465 __cpuid_count(7, 0, a, b7, c, d);
3466 have_bmi1 = (b7 & bit_BMI) != 0;
3467 have_bmi2 = (b7 & bit_BMI2) != 0;
3470 if (max >= 1) {
3471 __cpuid(1, a, b, c, d);
3472 #ifndef have_cmov
3473 /* For 32-bit, 99% certainty that we're running on hardware that
3474 supports cmov, but we still need to check. In case cmov is not
3475 available, we'll use a small forward branch. */
3476 have_cmov = (d & bit_CMOV) != 0;
3477 #endif
3479 /* MOVBE is only available on Intel Atom and Haswell CPUs, so we
3480 need to probe for it. */
3481 have_movbe = (c & bit_MOVBE) != 0;
3482 have_popcnt = (c & bit_POPCNT) != 0;
3484 /* There are a number of things we must check before we can be
3485 sure of not hitting invalid opcode. */
3486 if (c & bit_OSXSAVE) {
3487 unsigned xcrl, xcrh;
3488 /* The xgetbv instruction is not available to older versions of
3489 * the assembler, so we encode the instruction manually.
3491 asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl), "=d" (xcrh) : "c" (0));
3492 if ((xcrl & 6) == 6) {
3493 have_avx1 = (c & bit_AVX) != 0;
3494 have_avx2 = (b7 & bit_AVX2) != 0;
3499 max = __get_cpuid_max(0x8000000, 0);
3500 if (max >= 1) {
3501 __cpuid(0x80000001, a, b, c, d);
3502 /* LZCNT was introduced with AMD Barcelona and Intel Haswell CPUs. */
3503 have_lzcnt = (c & bit_LZCNT) != 0;
3505 #endif /* CONFIG_CPUID_H */
3507 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
3508 if (TCG_TARGET_REG_BITS == 64) {
3509 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
3511 if (have_avx1) {
3512 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
3513 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
3515 if (have_avx2) {
3516 tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
3519 tcg_target_call_clobber_regs = ALL_VECTOR_REGS;
3520 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
3521 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
3522 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
3523 if (TCG_TARGET_REG_BITS == 64) {
3524 #if !defined(_WIN64)
3525 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI);
3526 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI);
3527 #endif
3528 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3529 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3530 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3531 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3534 s->reserved_regs = 0;
3535 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
3538 typedef struct {
3539 DebugFrameHeader h;
3540 uint8_t fde_def_cfa[4];
3541 uint8_t fde_reg_ofs[14];
3542 } DebugFrame;
3544 /* We're expecting a 2 byte uleb128 encoded value. */
3545 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3547 #if !defined(__ELF__)
3548 /* Host machine without ELF. */
3549 #elif TCG_TARGET_REG_BITS == 64
3550 #define ELF_HOST_MACHINE EM_X86_64
3551 static const DebugFrame debug_frame = {
3552 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3553 .h.cie.id = -1,
3554 .h.cie.version = 1,
3555 .h.cie.code_align = 1,
3556 .h.cie.data_align = 0x78, /* sleb128 -8 */
3557 .h.cie.return_column = 16,
3559 /* Total FDE size does not include the "len" member. */
3560 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3562 .fde_def_cfa = {
3563 12, 7, /* DW_CFA_def_cfa %rsp, ... */
3564 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3565 (FRAME_SIZE >> 7)
3567 .fde_reg_ofs = {
3568 0x90, 1, /* DW_CFA_offset, %rip, -8 */
3569 /* The following ordering must match tcg_target_callee_save_regs. */
3570 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
3571 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
3572 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
3573 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
3574 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
3575 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
3578 #else
3579 #define ELF_HOST_MACHINE EM_386
3580 static const DebugFrame debug_frame = {
3581 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3582 .h.cie.id = -1,
3583 .h.cie.version = 1,
3584 .h.cie.code_align = 1,
3585 .h.cie.data_align = 0x7c, /* sleb128 -4 */
3586 .h.cie.return_column = 8,
3588 /* Total FDE size does not include the "len" member. */
3589 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3591 .fde_def_cfa = {
3592 12, 4, /* DW_CFA_def_cfa %esp, ... */
3593 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3594 (FRAME_SIZE >> 7)
3596 .fde_reg_ofs = {
3597 0x88, 1, /* DW_CFA_offset, %eip, -4 */
3598 /* The following ordering must match tcg_target_callee_save_regs. */
3599 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
3600 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
3601 0x86, 4, /* DW_CFA_offset, %esi, -16 */
3602 0x87, 5, /* DW_CFA_offset, %edi, -20 */
3605 #endif
3607 #if defined(ELF_HOST_MACHINE)
3608 void tcg_register_jit(void *buf, size_t buf_size)
3610 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3612 #endif