tcg/i386: Return false on failure from patch_reloc
[qemu/ar7.git] / tcg / i386 / tcg-target.inc.c
blob28192f460833544255e437c1af35e9fbb0fab3b5
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "tcg-pool.inc.c"
27 #ifdef CONFIG_DEBUG_TCG
28 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 #if TCG_TARGET_REG_BITS == 64
30 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
31 #else
32 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
33 #endif
34 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
35 "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7",
36 #if TCG_TARGET_REG_BITS == 64
37 "%xmm8", "%xmm9", "%xmm10", "%xmm11",
38 "%xmm12", "%xmm13", "%xmm14", "%xmm15",
39 #endif
41 #endif
43 static const int tcg_target_reg_alloc_order[] = {
44 #if TCG_TARGET_REG_BITS == 64
45 TCG_REG_RBP,
46 TCG_REG_RBX,
47 TCG_REG_R12,
48 TCG_REG_R13,
49 TCG_REG_R14,
50 TCG_REG_R15,
51 TCG_REG_R10,
52 TCG_REG_R11,
53 TCG_REG_R9,
54 TCG_REG_R8,
55 TCG_REG_RCX,
56 TCG_REG_RDX,
57 TCG_REG_RSI,
58 TCG_REG_RDI,
59 TCG_REG_RAX,
60 #else
61 TCG_REG_EBX,
62 TCG_REG_ESI,
63 TCG_REG_EDI,
64 TCG_REG_EBP,
65 TCG_REG_ECX,
66 TCG_REG_EDX,
67 TCG_REG_EAX,
68 #endif
69 TCG_REG_XMM0,
70 TCG_REG_XMM1,
71 TCG_REG_XMM2,
72 TCG_REG_XMM3,
73 TCG_REG_XMM4,
74 TCG_REG_XMM5,
75 #ifndef _WIN64
76 /* The Win64 ABI has xmm6-xmm15 as caller-saves, and we do not save
77 any of them. Therefore only allow xmm0-xmm5 to be allocated. */
78 TCG_REG_XMM6,
79 TCG_REG_XMM7,
80 #if TCG_TARGET_REG_BITS == 64
81 TCG_REG_XMM8,
82 TCG_REG_XMM9,
83 TCG_REG_XMM10,
84 TCG_REG_XMM11,
85 TCG_REG_XMM12,
86 TCG_REG_XMM13,
87 TCG_REG_XMM14,
88 TCG_REG_XMM15,
89 #endif
90 #endif
93 static const int tcg_target_call_iarg_regs[] = {
94 #if TCG_TARGET_REG_BITS == 64
95 #if defined(_WIN64)
96 TCG_REG_RCX,
97 TCG_REG_RDX,
98 #else
99 TCG_REG_RDI,
100 TCG_REG_RSI,
101 TCG_REG_RDX,
102 TCG_REG_RCX,
103 #endif
104 TCG_REG_R8,
105 TCG_REG_R9,
106 #else
107 /* 32 bit mode uses stack based calling convention (GCC default). */
108 #endif
111 static const int tcg_target_call_oarg_regs[] = {
112 TCG_REG_EAX,
113 #if TCG_TARGET_REG_BITS == 32
114 TCG_REG_EDX
115 #endif
118 /* Constants we accept. */
119 #define TCG_CT_CONST_S32 0x100
120 #define TCG_CT_CONST_U32 0x200
121 #define TCG_CT_CONST_I32 0x400
122 #define TCG_CT_CONST_WSZ 0x800
124 /* Registers used with L constraint, which are the first argument
125 registers on x86_64, and two random call clobbered registers on
126 i386. */
127 #if TCG_TARGET_REG_BITS == 64
128 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
129 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
130 #else
131 # define TCG_REG_L0 TCG_REG_EAX
132 # define TCG_REG_L1 TCG_REG_EDX
133 #endif
135 /* The host compiler should supply <cpuid.h> to enable runtime features
136 detection, as we're not going to go so far as our own inline assembly.
137 If not available, default values will be assumed. */
138 #if defined(CONFIG_CPUID_H)
139 #include "qemu/cpuid.h"
140 #endif
142 /* For 64-bit, we always know that CMOV is available. */
143 #if TCG_TARGET_REG_BITS == 64
144 # define have_cmov 1
145 #elif defined(CONFIG_CPUID_H)
146 static bool have_cmov;
147 #else
148 # define have_cmov 0
149 #endif
151 /* We need these symbols in tcg-target.h, and we can't properly conditionalize
152 it there. Therefore we always define the variable. */
153 bool have_bmi1;
154 bool have_popcnt;
155 bool have_avx1;
156 bool have_avx2;
158 #ifdef CONFIG_CPUID_H
159 static bool have_movbe;
160 static bool have_bmi2;
161 static bool have_lzcnt;
162 #else
163 # define have_movbe 0
164 # define have_bmi2 0
165 # define have_lzcnt 0
166 #endif
168 static tcg_insn_unit *tb_ret_addr;
170 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
171 intptr_t value, intptr_t addend)
173 value += addend;
174 switch(type) {
175 case R_386_PC32:
176 value -= (uintptr_t)code_ptr;
177 if (value != (int32_t)value) {
178 return false;
180 /* FALLTHRU */
181 case R_386_32:
182 tcg_patch32(code_ptr, value);
183 break;
184 case R_386_PC8:
185 value -= (uintptr_t)code_ptr;
186 if (value != (int8_t)value) {
187 return false;
189 tcg_patch8(code_ptr, value);
190 break;
191 default:
192 tcg_abort();
194 return true;
197 #if TCG_TARGET_REG_BITS == 64
198 #define ALL_GENERAL_REGS 0x0000ffffu
199 #define ALL_VECTOR_REGS 0xffff0000u
200 #else
201 #define ALL_GENERAL_REGS 0x000000ffu
202 #define ALL_VECTOR_REGS 0x00ff0000u
203 #endif
205 /* parse target specific constraints */
206 static const char *target_parse_constraint(TCGArgConstraint *ct,
207 const char *ct_str, TCGType type)
209 switch(*ct_str++) {
210 case 'a':
211 ct->ct |= TCG_CT_REG;
212 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
213 break;
214 case 'b':
215 ct->ct |= TCG_CT_REG;
216 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
217 break;
218 case 'c':
219 ct->ct |= TCG_CT_REG;
220 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
221 break;
222 case 'd':
223 ct->ct |= TCG_CT_REG;
224 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
225 break;
226 case 'S':
227 ct->ct |= TCG_CT_REG;
228 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
229 break;
230 case 'D':
231 ct->ct |= TCG_CT_REG;
232 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
233 break;
234 case 'q':
235 /* A register that can be used as a byte operand. */
236 ct->ct |= TCG_CT_REG;
237 ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xf;
238 break;
239 case 'Q':
240 /* A register with an addressable second byte (e.g. %ah). */
241 ct->ct |= TCG_CT_REG;
242 ct->u.regs = 0xf;
243 break;
244 case 'r':
245 /* A general register. */
246 ct->ct |= TCG_CT_REG;
247 ct->u.regs |= ALL_GENERAL_REGS;
248 break;
249 case 'W':
250 /* With TZCNT/LZCNT, we can have operand-size as an input. */
251 ct->ct |= TCG_CT_CONST_WSZ;
252 break;
253 case 'x':
254 /* A vector register. */
255 ct->ct |= TCG_CT_REG;
256 ct->u.regs |= ALL_VECTOR_REGS;
257 break;
259 /* qemu_ld/st address constraint */
260 case 'L':
261 ct->ct |= TCG_CT_REG;
262 ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff;
263 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0);
264 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1);
265 break;
267 case 'e':
268 ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_S32);
269 break;
270 case 'Z':
271 ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_U32);
272 break;
273 case 'I':
274 ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_I32);
275 break;
277 default:
278 return NULL;
280 return ct_str;
283 /* test if a constant matches the constraint */
284 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
285 const TCGArgConstraint *arg_ct)
287 int ct = arg_ct->ct;
288 if (ct & TCG_CT_CONST) {
289 return 1;
291 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
292 return 1;
294 if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
295 return 1;
297 if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
298 return 1;
300 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
301 return 1;
303 return 0;
306 # define LOWREGMASK(x) ((x) & 7)
308 #define P_EXT 0x100 /* 0x0f opcode prefix */
309 #define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */
310 #define P_DATA16 0x400 /* 0x66 opcode prefix */
311 #if TCG_TARGET_REG_BITS == 64
312 # define P_ADDR32 0x800 /* 0x67 opcode prefix */
313 # define P_REXW 0x1000 /* Set REX.W = 1 */
314 # define P_REXB_R 0x2000 /* REG field as byte register */
315 # define P_REXB_RM 0x4000 /* R/M field as byte register */
316 # define P_GS 0x8000 /* gs segment override */
317 #else
318 # define P_ADDR32 0
319 # define P_REXW 0
320 # define P_REXB_R 0
321 # define P_REXB_RM 0
322 # define P_GS 0
323 #endif
324 #define P_EXT3A 0x10000 /* 0x0f 0x3a opcode prefix */
325 #define P_SIMDF3 0x20000 /* 0xf3 opcode prefix */
326 #define P_SIMDF2 0x40000 /* 0xf2 opcode prefix */
327 #define P_VEXL 0x80000 /* Set VEX.L = 1 */
329 #define OPC_ARITH_EvIz (0x81)
330 #define OPC_ARITH_EvIb (0x83)
331 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
332 #define OPC_ANDN (0xf2 | P_EXT38)
333 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
334 #define OPC_BLENDPS (0x0c | P_EXT3A | P_DATA16)
335 #define OPC_BSF (0xbc | P_EXT)
336 #define OPC_BSR (0xbd | P_EXT)
337 #define OPC_BSWAP (0xc8 | P_EXT)
338 #define OPC_CALL_Jz (0xe8)
339 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
340 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
341 #define OPC_DEC_r32 (0x48)
342 #define OPC_IMUL_GvEv (0xaf | P_EXT)
343 #define OPC_IMUL_GvEvIb (0x6b)
344 #define OPC_IMUL_GvEvIz (0x69)
345 #define OPC_INC_r32 (0x40)
346 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
347 #define OPC_JCC_short (0x70) /* ... plus condition code */
348 #define OPC_JMP_long (0xe9)
349 #define OPC_JMP_short (0xeb)
350 #define OPC_LEA (0x8d)
351 #define OPC_LZCNT (0xbd | P_EXT | P_SIMDF3)
352 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
353 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
354 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
355 #define OPC_MOVB_EvIz (0xc6)
356 #define OPC_MOVL_EvIz (0xc7)
357 #define OPC_MOVL_Iv (0xb8)
358 #define OPC_MOVBE_GyMy (0xf0 | P_EXT38)
359 #define OPC_MOVBE_MyGy (0xf1 | P_EXT38)
360 #define OPC_MOVD_VyEy (0x6e | P_EXT | P_DATA16)
361 #define OPC_MOVD_EyVy (0x7e | P_EXT | P_DATA16)
362 #define OPC_MOVDDUP (0x12 | P_EXT | P_SIMDF2)
363 #define OPC_MOVDQA_VxWx (0x6f | P_EXT | P_DATA16)
364 #define OPC_MOVDQA_WxVx (0x7f | P_EXT | P_DATA16)
365 #define OPC_MOVDQU_VxWx (0x6f | P_EXT | P_SIMDF3)
366 #define OPC_MOVDQU_WxVx (0x7f | P_EXT | P_SIMDF3)
367 #define OPC_MOVQ_VqWq (0x7e | P_EXT | P_SIMDF3)
368 #define OPC_MOVQ_WqVq (0xd6 | P_EXT | P_DATA16)
369 #define OPC_MOVSBL (0xbe | P_EXT)
370 #define OPC_MOVSWL (0xbf | P_EXT)
371 #define OPC_MOVSLQ (0x63 | P_REXW)
372 #define OPC_MOVZBL (0xb6 | P_EXT)
373 #define OPC_MOVZWL (0xb7 | P_EXT)
374 #define OPC_PACKSSDW (0x6b | P_EXT | P_DATA16)
375 #define OPC_PACKSSWB (0x63 | P_EXT | P_DATA16)
376 #define OPC_PACKUSDW (0x2b | P_EXT38 | P_DATA16)
377 #define OPC_PACKUSWB (0x67 | P_EXT | P_DATA16)
378 #define OPC_PADDB (0xfc | P_EXT | P_DATA16)
379 #define OPC_PADDW (0xfd | P_EXT | P_DATA16)
380 #define OPC_PADDD (0xfe | P_EXT | P_DATA16)
381 #define OPC_PADDQ (0xd4 | P_EXT | P_DATA16)
382 #define OPC_PAND (0xdb | P_EXT | P_DATA16)
383 #define OPC_PANDN (0xdf | P_EXT | P_DATA16)
384 #define OPC_PBLENDW (0x0e | P_EXT3A | P_DATA16)
385 #define OPC_PCMPEQB (0x74 | P_EXT | P_DATA16)
386 #define OPC_PCMPEQW (0x75 | P_EXT | P_DATA16)
387 #define OPC_PCMPEQD (0x76 | P_EXT | P_DATA16)
388 #define OPC_PCMPEQQ (0x29 | P_EXT38 | P_DATA16)
389 #define OPC_PCMPGTB (0x64 | P_EXT | P_DATA16)
390 #define OPC_PCMPGTW (0x65 | P_EXT | P_DATA16)
391 #define OPC_PCMPGTD (0x66 | P_EXT | P_DATA16)
392 #define OPC_PCMPGTQ (0x37 | P_EXT38 | P_DATA16)
393 #define OPC_PMOVSXBW (0x20 | P_EXT38 | P_DATA16)
394 #define OPC_PMOVSXWD (0x23 | P_EXT38 | P_DATA16)
395 #define OPC_PMOVSXDQ (0x25 | P_EXT38 | P_DATA16)
396 #define OPC_PMOVZXBW (0x30 | P_EXT38 | P_DATA16)
397 #define OPC_PMOVZXWD (0x33 | P_EXT38 | P_DATA16)
398 #define OPC_PMOVZXDQ (0x35 | P_EXT38 | P_DATA16)
399 #define OPC_PMULLW (0xd5 | P_EXT | P_DATA16)
400 #define OPC_PMULLD (0x40 | P_EXT38 | P_DATA16)
401 #define OPC_POR (0xeb | P_EXT | P_DATA16)
402 #define OPC_PSHUFB (0x00 | P_EXT38 | P_DATA16)
403 #define OPC_PSHUFD (0x70 | P_EXT | P_DATA16)
404 #define OPC_PSHUFLW (0x70 | P_EXT | P_SIMDF2)
405 #define OPC_PSHUFHW (0x70 | P_EXT | P_SIMDF3)
406 #define OPC_PSHIFTW_Ib (0x71 | P_EXT | P_DATA16) /* /2 /6 /4 */
407 #define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /2 /6 /4 */
408 #define OPC_PSHIFTQ_Ib (0x73 | P_EXT | P_DATA16) /* /2 /6 /4 */
409 #define OPC_PSUBB (0xf8 | P_EXT | P_DATA16)
410 #define OPC_PSUBW (0xf9 | P_EXT | P_DATA16)
411 #define OPC_PSUBD (0xfa | P_EXT | P_DATA16)
412 #define OPC_PSUBQ (0xfb | P_EXT | P_DATA16)
413 #define OPC_PUNPCKLBW (0x60 | P_EXT | P_DATA16)
414 #define OPC_PUNPCKLWD (0x61 | P_EXT | P_DATA16)
415 #define OPC_PUNPCKLDQ (0x62 | P_EXT | P_DATA16)
416 #define OPC_PUNPCKLQDQ (0x6c | P_EXT | P_DATA16)
417 #define OPC_PUNPCKHBW (0x68 | P_EXT | P_DATA16)
418 #define OPC_PUNPCKHWD (0x69 | P_EXT | P_DATA16)
419 #define OPC_PUNPCKHDQ (0x6a | P_EXT | P_DATA16)
420 #define OPC_PUNPCKHQDQ (0x6d | P_EXT | P_DATA16)
421 #define OPC_PXOR (0xef | P_EXT | P_DATA16)
422 #define OPC_POP_r32 (0x58)
423 #define OPC_POPCNT (0xb8 | P_EXT | P_SIMDF3)
424 #define OPC_PUSH_r32 (0x50)
425 #define OPC_PUSH_Iv (0x68)
426 #define OPC_PUSH_Ib (0x6a)
427 #define OPC_RET (0xc3)
428 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
429 #define OPC_SHIFT_1 (0xd1)
430 #define OPC_SHIFT_Ib (0xc1)
431 #define OPC_SHIFT_cl (0xd3)
432 #define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3)
433 #define OPC_SHUFPS (0xc6 | P_EXT)
434 #define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
435 #define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
436 #define OPC_TESTL (0x85)
437 #define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
438 #define OPC_UD2 (0x0b | P_EXT)
439 #define OPC_VPBLENDD (0x02 | P_EXT3A | P_DATA16)
440 #define OPC_VPBLENDVB (0x4c | P_EXT3A | P_DATA16)
441 #define OPC_VPBROADCASTB (0x78 | P_EXT38 | P_DATA16)
442 #define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16)
443 #define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16)
444 #define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
445 #define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_REXW)
446 #define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
447 #define OPC_VZEROUPPER (0x77 | P_EXT)
448 #define OPC_XCHG_ax_r32 (0x90)
450 #define OPC_GRP3_Ev (0xf7)
451 #define OPC_GRP5 (0xff)
452 #define OPC_GRP14 (0x73 | P_EXT | P_DATA16)
454 /* Group 1 opcode extensions for 0x80-0x83.
455 These are also used as modifiers for OPC_ARITH. */
456 #define ARITH_ADD 0
457 #define ARITH_OR 1
458 #define ARITH_ADC 2
459 #define ARITH_SBB 3
460 #define ARITH_AND 4
461 #define ARITH_SUB 5
462 #define ARITH_XOR 6
463 #define ARITH_CMP 7
465 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
466 #define SHIFT_ROL 0
467 #define SHIFT_ROR 1
468 #define SHIFT_SHL 4
469 #define SHIFT_SHR 5
470 #define SHIFT_SAR 7
472 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
473 #define EXT3_NOT 2
474 #define EXT3_NEG 3
475 #define EXT3_MUL 4
476 #define EXT3_IMUL 5
477 #define EXT3_DIV 6
478 #define EXT3_IDIV 7
480 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
481 #define EXT5_INC_Ev 0
482 #define EXT5_DEC_Ev 1
483 #define EXT5_CALLN_Ev 2
484 #define EXT5_JMPN_Ev 4
486 /* Condition codes to be added to OPC_JCC_{long,short}. */
487 #define JCC_JMP (-1)
488 #define JCC_JO 0x0
489 #define JCC_JNO 0x1
490 #define JCC_JB 0x2
491 #define JCC_JAE 0x3
492 #define JCC_JE 0x4
493 #define JCC_JNE 0x5
494 #define JCC_JBE 0x6
495 #define JCC_JA 0x7
496 #define JCC_JS 0x8
497 #define JCC_JNS 0x9
498 #define JCC_JP 0xa
499 #define JCC_JNP 0xb
500 #define JCC_JL 0xc
501 #define JCC_JGE 0xd
502 #define JCC_JLE 0xe
503 #define JCC_JG 0xf
505 static const uint8_t tcg_cond_to_jcc[] = {
506 [TCG_COND_EQ] = JCC_JE,
507 [TCG_COND_NE] = JCC_JNE,
508 [TCG_COND_LT] = JCC_JL,
509 [TCG_COND_GE] = JCC_JGE,
510 [TCG_COND_LE] = JCC_JLE,
511 [TCG_COND_GT] = JCC_JG,
512 [TCG_COND_LTU] = JCC_JB,
513 [TCG_COND_GEU] = JCC_JAE,
514 [TCG_COND_LEU] = JCC_JBE,
515 [TCG_COND_GTU] = JCC_JA,
518 #if TCG_TARGET_REG_BITS == 64
519 static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
521 int rex;
523 if (opc & P_GS) {
524 tcg_out8(s, 0x65);
526 if (opc & P_DATA16) {
527 /* We should never be asking for both 16 and 64-bit operation. */
528 tcg_debug_assert((opc & P_REXW) == 0);
529 tcg_out8(s, 0x66);
531 if (opc & P_ADDR32) {
532 tcg_out8(s, 0x67);
534 if (opc & P_SIMDF3) {
535 tcg_out8(s, 0xf3);
536 } else if (opc & P_SIMDF2) {
537 tcg_out8(s, 0xf2);
540 rex = 0;
541 rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */
542 rex |= (r & 8) >> 1; /* REX.R */
543 rex |= (x & 8) >> 2; /* REX.X */
544 rex |= (rm & 8) >> 3; /* REX.B */
546 /* P_REXB_{R,RM} indicates that the given register is the low byte.
547 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
548 as otherwise the encoding indicates %[abcd]h. Note that the values
549 that are ORed in merely indicate that the REX byte must be present;
550 those bits get discarded in output. */
551 rex |= opc & (r >= 4 ? P_REXB_R : 0);
552 rex |= opc & (rm >= 4 ? P_REXB_RM : 0);
554 if (rex) {
555 tcg_out8(s, (uint8_t)(rex | 0x40));
558 if (opc & (P_EXT | P_EXT38 | P_EXT3A)) {
559 tcg_out8(s, 0x0f);
560 if (opc & P_EXT38) {
561 tcg_out8(s, 0x38);
562 } else if (opc & P_EXT3A) {
563 tcg_out8(s, 0x3a);
567 tcg_out8(s, opc);
569 #else
570 static void tcg_out_opc(TCGContext *s, int opc)
572 if (opc & P_DATA16) {
573 tcg_out8(s, 0x66);
575 if (opc & P_SIMDF3) {
576 tcg_out8(s, 0xf3);
577 } else if (opc & P_SIMDF2) {
578 tcg_out8(s, 0xf2);
580 if (opc & (P_EXT | P_EXT38 | P_EXT3A)) {
581 tcg_out8(s, 0x0f);
582 if (opc & P_EXT38) {
583 tcg_out8(s, 0x38);
584 } else if (opc & P_EXT3A) {
585 tcg_out8(s, 0x3a);
588 tcg_out8(s, opc);
590 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
591 the 32-bit compilation paths. This method works with all versions of gcc,
592 whereas relying on optimization may not be able to exclude them. */
593 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
594 #endif
596 static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
598 tcg_out_opc(s, opc, r, rm, 0);
599 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
602 static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
603 int rm, int index)
605 int tmp;
607 /* Use the two byte form if possible, which cannot encode
608 VEX.W, VEX.B, VEX.X, or an m-mmmm field other than P_EXT. */
609 if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_REXW)) == P_EXT
610 && ((rm | index) & 8) == 0) {
611 /* Two byte VEX prefix. */
612 tcg_out8(s, 0xc5);
614 tmp = (r & 8 ? 0 : 0x80); /* VEX.R */
615 } else {
616 /* Three byte VEX prefix. */
617 tcg_out8(s, 0xc4);
619 /* VEX.m-mmmm */
620 if (opc & P_EXT3A) {
621 tmp = 3;
622 } else if (opc & P_EXT38) {
623 tmp = 2;
624 } else if (opc & P_EXT) {
625 tmp = 1;
626 } else {
627 g_assert_not_reached();
629 tmp |= (r & 8 ? 0 : 0x80); /* VEX.R */
630 tmp |= (index & 8 ? 0 : 0x40); /* VEX.X */
631 tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */
632 tcg_out8(s, tmp);
634 tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */
637 tmp |= (opc & P_VEXL ? 0x04 : 0); /* VEX.L */
638 /* VEX.pp */
639 if (opc & P_DATA16) {
640 tmp |= 1; /* 0x66 */
641 } else if (opc & P_SIMDF3) {
642 tmp |= 2; /* 0xf3 */
643 } else if (opc & P_SIMDF2) {
644 tmp |= 3; /* 0xf2 */
646 tmp |= (~v & 15) << 3; /* VEX.vvvv */
647 tcg_out8(s, tmp);
648 tcg_out8(s, opc);
651 static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm)
653 tcg_out_vex_opc(s, opc, r, v, rm, 0);
654 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
657 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
658 We handle either RM and INDEX missing with a negative value. In 64-bit
659 mode for absolute addresses, ~RM is the size of the immediate operand
660 that will follow the instruction. */
662 static void tcg_out_sib_offset(TCGContext *s, int r, int rm, int index,
663 int shift, intptr_t offset)
665 int mod, len;
667 if (index < 0 && rm < 0) {
668 if (TCG_TARGET_REG_BITS == 64) {
669 /* Try for a rip-relative addressing mode. This has replaced
670 the 32-bit-mode absolute addressing encoding. */
671 intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm;
672 intptr_t disp = offset - pc;
673 if (disp == (int32_t)disp) {
674 tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
675 tcg_out32(s, disp);
676 return;
679 /* Try for an absolute address encoding. This requires the
680 use of the MODRM+SIB encoding and is therefore larger than
681 rip-relative addressing. */
682 if (offset == (int32_t)offset) {
683 tcg_out8(s, (LOWREGMASK(r) << 3) | 4);
684 tcg_out8(s, (4 << 3) | 5);
685 tcg_out32(s, offset);
686 return;
689 /* ??? The memory isn't directly addressable. */
690 g_assert_not_reached();
691 } else {
692 /* Absolute address. */
693 tcg_out8(s, (r << 3) | 5);
694 tcg_out32(s, offset);
695 return;
699 /* Find the length of the immediate addend. Note that the encoding
700 that would be used for (%ebp) indicates absolute addressing. */
701 if (rm < 0) {
702 mod = 0, len = 4, rm = 5;
703 } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) {
704 mod = 0, len = 0;
705 } else if (offset == (int8_t)offset) {
706 mod = 0x40, len = 1;
707 } else {
708 mod = 0x80, len = 4;
711 /* Use a single byte MODRM format if possible. Note that the encoding
712 that would be used for %esp is the escape to the two byte form. */
713 if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) {
714 /* Single byte MODRM format. */
715 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
716 } else {
717 /* Two byte MODRM+SIB format. */
719 /* Note that the encoding that would place %esp into the index
720 field indicates no index register. In 64-bit mode, the REX.X
721 bit counts, so %r12 can be used as the index. */
722 if (index < 0) {
723 index = 4;
724 } else {
725 tcg_debug_assert(index != TCG_REG_ESP);
728 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4);
729 tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm));
732 if (len == 1) {
733 tcg_out8(s, offset);
734 } else if (len == 4) {
735 tcg_out32(s, offset);
739 static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
740 int index, int shift, intptr_t offset)
742 tcg_out_opc(s, opc, r, rm < 0 ? 0 : rm, index < 0 ? 0 : index);
743 tcg_out_sib_offset(s, r, rm, index, shift, offset);
746 static void tcg_out_vex_modrm_sib_offset(TCGContext *s, int opc, int r, int v,
747 int rm, int index, int shift,
748 intptr_t offset)
750 tcg_out_vex_opc(s, opc, r, v, rm < 0 ? 0 : rm, index < 0 ? 0 : index);
751 tcg_out_sib_offset(s, r, rm, index, shift, offset);
754 /* A simplification of the above with no index or shift. */
755 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
756 int rm, intptr_t offset)
758 tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
761 static inline void tcg_out_vex_modrm_offset(TCGContext *s, int opc, int r,
762 int v, int rm, intptr_t offset)
764 tcg_out_vex_modrm_sib_offset(s, opc, r, v, rm, -1, 0, offset);
767 /* Output an opcode with an expected reference to the constant pool. */
768 static inline void tcg_out_modrm_pool(TCGContext *s, int opc, int r)
770 tcg_out_opc(s, opc, r, 0, 0);
771 /* Absolute for 32-bit, pc-relative for 64-bit. */
772 tcg_out8(s, LOWREGMASK(r) << 3 | 5);
773 tcg_out32(s, 0);
776 /* Output an opcode with an expected reference to the constant pool. */
777 static inline void tcg_out_vex_modrm_pool(TCGContext *s, int opc, int r)
779 tcg_out_vex_opc(s, opc, r, 0, 0, 0);
780 /* Absolute for 32-bit, pc-relative for 64-bit. */
781 tcg_out8(s, LOWREGMASK(r) << 3 | 5);
782 tcg_out32(s, 0);
785 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
786 static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
788 /* Propagate an opcode prefix, such as P_REXW. */
789 int ext = subop & ~0x7;
790 subop &= 0x7;
792 tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src);
795 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
797 int rexw = 0;
799 if (arg == ret) {
800 return;
802 switch (type) {
803 case TCG_TYPE_I64:
804 rexw = P_REXW;
805 /* fallthru */
806 case TCG_TYPE_I32:
807 if (ret < 16) {
808 if (arg < 16) {
809 tcg_out_modrm(s, OPC_MOVL_GvEv + rexw, ret, arg);
810 } else {
811 tcg_out_vex_modrm(s, OPC_MOVD_EyVy + rexw, arg, 0, ret);
813 } else {
814 if (arg < 16) {
815 tcg_out_vex_modrm(s, OPC_MOVD_VyEy + rexw, ret, 0, arg);
816 } else {
817 tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg);
820 break;
822 case TCG_TYPE_V64:
823 tcg_debug_assert(ret >= 16 && arg >= 16);
824 tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg);
825 break;
826 case TCG_TYPE_V128:
827 tcg_debug_assert(ret >= 16 && arg >= 16);
828 tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx, ret, 0, arg);
829 break;
830 case TCG_TYPE_V256:
831 tcg_debug_assert(ret >= 16 && arg >= 16);
832 tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx | P_VEXL, ret, 0, arg);
833 break;
835 default:
836 g_assert_not_reached();
840 static void tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
841 TCGReg r, TCGReg a)
843 if (have_avx2) {
844 static const int dup_insn[4] = {
845 OPC_VPBROADCASTB, OPC_VPBROADCASTW,
846 OPC_VPBROADCASTD, OPC_VPBROADCASTQ,
848 int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
849 tcg_out_vex_modrm(s, dup_insn[vece] + vex_l, r, 0, a);
850 } else {
851 switch (vece) {
852 case MO_8:
853 /* ??? With zero in a register, use PSHUFB. */
854 tcg_out_vex_modrm(s, OPC_PUNPCKLBW, r, a, a);
855 a = r;
856 /* FALLTHRU */
857 case MO_16:
858 tcg_out_vex_modrm(s, OPC_PUNPCKLWD, r, a, a);
859 a = r;
860 /* FALLTHRU */
861 case MO_32:
862 tcg_out_vex_modrm(s, OPC_PSHUFD, r, 0, a);
863 /* imm8 operand: all output lanes selected from input lane 0. */
864 tcg_out8(s, 0);
865 break;
866 case MO_64:
867 tcg_out_vex_modrm(s, OPC_PUNPCKLQDQ, r, a, a);
868 break;
869 default:
870 g_assert_not_reached();
875 static void tcg_out_dupi_vec(TCGContext *s, TCGType type,
876 TCGReg ret, tcg_target_long arg)
878 int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
880 if (arg == 0) {
881 tcg_out_vex_modrm(s, OPC_PXOR, ret, ret, ret);
882 return;
884 if (arg == -1) {
885 tcg_out_vex_modrm(s, OPC_PCMPEQB + vex_l, ret, ret, ret);
886 return;
889 if (TCG_TARGET_REG_BITS == 64) {
890 if (type == TCG_TYPE_V64) {
891 tcg_out_vex_modrm_pool(s, OPC_MOVQ_VqWq, ret);
892 } else if (have_avx2) {
893 tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTQ + vex_l, ret);
894 } else {
895 tcg_out_vex_modrm_pool(s, OPC_MOVDDUP, ret);
897 new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4);
898 } else if (have_avx2) {
899 tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTD + vex_l, ret);
900 new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
901 } else {
902 tcg_out_vex_modrm_pool(s, OPC_MOVD_VyEy, ret);
903 new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
904 tcg_out_dup_vec(s, type, MO_32, ret, ret);
908 static void tcg_out_movi(TCGContext *s, TCGType type,
909 TCGReg ret, tcg_target_long arg)
911 tcg_target_long diff;
913 switch (type) {
914 case TCG_TYPE_I32:
915 #if TCG_TARGET_REG_BITS == 64
916 case TCG_TYPE_I64:
917 #endif
918 if (ret < 16) {
919 break;
921 /* fallthru */
922 case TCG_TYPE_V64:
923 case TCG_TYPE_V128:
924 case TCG_TYPE_V256:
925 tcg_debug_assert(ret >= 16);
926 tcg_out_dupi_vec(s, type, ret, arg);
927 return;
928 default:
929 g_assert_not_reached();
932 if (arg == 0) {
933 tgen_arithr(s, ARITH_XOR, ret, ret);
934 return;
936 if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
937 tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0);
938 tcg_out32(s, arg);
939 return;
941 if (arg == (int32_t)arg) {
942 tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret);
943 tcg_out32(s, arg);
944 return;
947 /* Try a 7 byte pc-relative lea before the 10 byte movq. */
948 diff = arg - ((uintptr_t)s->code_ptr + 7);
949 if (diff == (int32_t)diff) {
950 tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0);
951 tcg_out8(s, (LOWREGMASK(ret) << 3) | 5);
952 tcg_out32(s, diff);
953 return;
956 tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
957 tcg_out64(s, arg);
960 static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
962 if (val == (int8_t)val) {
963 tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0);
964 tcg_out8(s, val);
965 } else if (val == (int32_t)val) {
966 tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0);
967 tcg_out32(s, val);
968 } else {
969 tcg_abort();
973 static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
975 /* Given the strength of x86 memory ordering, we only need care for
976 store-load ordering. Experimentally, "lock orl $0,0(%esp)" is
977 faster than "mfence", so don't bother with the sse insn. */
978 if (a0 & TCG_MO_ST_LD) {
979 tcg_out8(s, 0xf0);
980 tcg_out_modrm_offset(s, OPC_ARITH_EvIb, ARITH_OR, TCG_REG_ESP, 0);
981 tcg_out8(s, 0);
985 static inline void tcg_out_push(TCGContext *s, int reg)
987 tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0);
990 static inline void tcg_out_pop(TCGContext *s, int reg)
992 tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0);
995 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
996 TCGReg arg1, intptr_t arg2)
998 switch (type) {
999 case TCG_TYPE_I32:
1000 if (ret < 16) {
1001 tcg_out_modrm_offset(s, OPC_MOVL_GvEv, ret, arg1, arg2);
1002 } else {
1003 tcg_out_vex_modrm_offset(s, OPC_MOVD_VyEy, ret, 0, arg1, arg2);
1005 break;
1006 case TCG_TYPE_I64:
1007 if (ret < 16) {
1008 tcg_out_modrm_offset(s, OPC_MOVL_GvEv | P_REXW, ret, arg1, arg2);
1009 break;
1011 /* FALLTHRU */
1012 case TCG_TYPE_V64:
1013 tcg_debug_assert(ret >= 16);
1014 tcg_out_vex_modrm_offset(s, OPC_MOVQ_VqWq, ret, 0, arg1, arg2);
1015 break;
1016 case TCG_TYPE_V128:
1017 tcg_debug_assert(ret >= 16);
1018 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_VxWx, ret, 0, arg1, arg2);
1019 break;
1020 case TCG_TYPE_V256:
1021 tcg_debug_assert(ret >= 16);
1022 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_VxWx | P_VEXL,
1023 ret, 0, arg1, arg2);
1024 break;
1025 default:
1026 g_assert_not_reached();
1030 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1031 TCGReg arg1, intptr_t arg2)
1033 switch (type) {
1034 case TCG_TYPE_I32:
1035 if (arg < 16) {
1036 tcg_out_modrm_offset(s, OPC_MOVL_EvGv, arg, arg1, arg2);
1037 } else {
1038 tcg_out_vex_modrm_offset(s, OPC_MOVD_EyVy, arg, 0, arg1, arg2);
1040 break;
1041 case TCG_TYPE_I64:
1042 if (arg < 16) {
1043 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_REXW, arg, arg1, arg2);
1044 break;
1046 /* FALLTHRU */
1047 case TCG_TYPE_V64:
1048 tcg_debug_assert(arg >= 16);
1049 tcg_out_vex_modrm_offset(s, OPC_MOVQ_WqVq, arg, 0, arg1, arg2);
1050 break;
1051 case TCG_TYPE_V128:
1052 tcg_debug_assert(arg >= 16);
1053 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_WxVx, arg, 0, arg1, arg2);
1054 break;
1055 case TCG_TYPE_V256:
1056 tcg_debug_assert(arg >= 16);
1057 tcg_out_vex_modrm_offset(s, OPC_MOVDQU_WxVx | P_VEXL,
1058 arg, 0, arg1, arg2);
1059 break;
1060 default:
1061 g_assert_not_reached();
1065 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1066 TCGReg base, intptr_t ofs)
1068 int rexw = 0;
1069 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) {
1070 if (val != (int32_t)val) {
1071 return false;
1073 rexw = P_REXW;
1074 } else if (type != TCG_TYPE_I32) {
1075 return false;
1077 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | rexw, 0, base, ofs);
1078 tcg_out32(s, val);
1079 return true;
1082 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
1084 /* Propagate an opcode prefix, such as P_DATA16. */
1085 int ext = subopc & ~0x7;
1086 subopc &= 0x7;
1088 if (count == 1) {
1089 tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg);
1090 } else {
1091 tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg);
1092 tcg_out8(s, count);
1096 static inline void tcg_out_bswap32(TCGContext *s, int reg)
1098 tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0);
1101 static inline void tcg_out_rolw_8(TCGContext *s, int reg)
1103 tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8);
1106 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
1108 /* movzbl */
1109 tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
1110 tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
1113 static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw)
1115 /* movsbl */
1116 tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
1117 tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
1120 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
1122 /* movzwl */
1123 tcg_out_modrm(s, OPC_MOVZWL, dest, src);
1126 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw)
1128 /* movsw[lq] */
1129 tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
1132 static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
1134 /* 32-bit mov zero extends. */
1135 tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
1138 static inline void tcg_out_ext32s(TCGContext *s, int dest, int src)
1140 tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
1143 static inline void tcg_out_bswap64(TCGContext *s, int reg)
1145 tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
1148 static void tgen_arithi(TCGContext *s, int c, int r0,
1149 tcg_target_long val, int cf)
1151 int rexw = 0;
1153 if (TCG_TARGET_REG_BITS == 64) {
1154 rexw = c & -8;
1155 c &= 7;
1158 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
1159 partial flags update stalls on Pentium4 and are not recommended
1160 by current Intel optimization manuals. */
1161 if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
1162 int is_inc = (c == ARITH_ADD) ^ (val < 0);
1163 if (TCG_TARGET_REG_BITS == 64) {
1164 /* The single-byte increment encodings are re-tasked as the
1165 REX prefixes. Use the MODRM encoding. */
1166 tcg_out_modrm(s, OPC_GRP5 + rexw,
1167 (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
1168 } else {
1169 tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
1171 return;
1174 if (c == ARITH_AND) {
1175 if (TCG_TARGET_REG_BITS == 64) {
1176 if (val == 0xffffffffu) {
1177 tcg_out_ext32u(s, r0, r0);
1178 return;
1180 if (val == (uint32_t)val) {
1181 /* AND with no high bits set can use a 32-bit operation. */
1182 rexw = 0;
1185 if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) {
1186 tcg_out_ext8u(s, r0, r0);
1187 return;
1189 if (val == 0xffffu) {
1190 tcg_out_ext16u(s, r0, r0);
1191 return;
1195 if (val == (int8_t)val) {
1196 tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0);
1197 tcg_out8(s, val);
1198 return;
1200 if (rexw == 0 || val == (int32_t)val) {
1201 tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0);
1202 tcg_out32(s, val);
1203 return;
1206 tcg_abort();
1209 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
1211 if (val != 0) {
1212 tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0);
1216 /* Use SMALL != 0 to force a short forward branch. */
1217 static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, int small)
1219 int32_t val, val1;
1221 if (l->has_value) {
1222 val = tcg_pcrel_diff(s, l->u.value_ptr);
1223 val1 = val - 2;
1224 if ((int8_t)val1 == val1) {
1225 if (opc == -1) {
1226 tcg_out8(s, OPC_JMP_short);
1227 } else {
1228 tcg_out8(s, OPC_JCC_short + opc);
1230 tcg_out8(s, val1);
1231 } else {
1232 if (small) {
1233 tcg_abort();
1235 if (opc == -1) {
1236 tcg_out8(s, OPC_JMP_long);
1237 tcg_out32(s, val - 5);
1238 } else {
1239 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
1240 tcg_out32(s, val - 6);
1243 } else if (small) {
1244 if (opc == -1) {
1245 tcg_out8(s, OPC_JMP_short);
1246 } else {
1247 tcg_out8(s, OPC_JCC_short + opc);
1249 tcg_out_reloc(s, s->code_ptr, R_386_PC8, l, -1);
1250 s->code_ptr += 1;
1251 } else {
1252 if (opc == -1) {
1253 tcg_out8(s, OPC_JMP_long);
1254 } else {
1255 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
1257 tcg_out_reloc(s, s->code_ptr, R_386_PC32, l, -4);
1258 s->code_ptr += 4;
1262 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
1263 int const_arg2, int rexw)
1265 if (const_arg2) {
1266 if (arg2 == 0) {
1267 /* test r, r */
1268 tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
1269 } else {
1270 tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
1272 } else {
1273 tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
1277 static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
1278 TCGArg arg1, TCGArg arg2, int const_arg2,
1279 TCGLabel *label, int small)
1281 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
1282 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
1285 #if TCG_TARGET_REG_BITS == 64
1286 static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
1287 TCGArg arg1, TCGArg arg2, int const_arg2,
1288 TCGLabel *label, int small)
1290 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
1291 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
1293 #else
1294 /* XXX: we implement it at the target level to avoid having to
1295 handle cross basic blocks temporaries */
1296 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
1297 const int *const_args, int small)
1299 TCGLabel *label_next = gen_new_label();
1300 TCGLabel *label_this = arg_label(args[5]);
1302 switch(args[4]) {
1303 case TCG_COND_EQ:
1304 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
1305 label_next, 1);
1306 tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
1307 label_this, small);
1308 break;
1309 case TCG_COND_NE:
1310 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
1311 label_this, small);
1312 tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
1313 label_this, small);
1314 break;
1315 case TCG_COND_LT:
1316 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
1317 label_this, small);
1318 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1319 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
1320 label_this, small);
1321 break;
1322 case TCG_COND_LE:
1323 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
1324 label_this, small);
1325 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1326 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
1327 label_this, small);
1328 break;
1329 case TCG_COND_GT:
1330 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
1331 label_this, small);
1332 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1333 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
1334 label_this, small);
1335 break;
1336 case TCG_COND_GE:
1337 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
1338 label_this, small);
1339 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1340 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
1341 label_this, small);
1342 break;
1343 case TCG_COND_LTU:
1344 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
1345 label_this, small);
1346 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1347 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
1348 label_this, small);
1349 break;
1350 case TCG_COND_LEU:
1351 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
1352 label_this, small);
1353 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1354 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
1355 label_this, small);
1356 break;
1357 case TCG_COND_GTU:
1358 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
1359 label_this, small);
1360 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1361 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
1362 label_this, small);
1363 break;
1364 case TCG_COND_GEU:
1365 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
1366 label_this, small);
1367 tcg_out_jxx(s, JCC_JNE, label_next, 1);
1368 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
1369 label_this, small);
1370 break;
1371 default:
1372 tcg_abort();
1374 tcg_out_label(s, label_next, s->code_ptr);
1376 #endif
1378 static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest,
1379 TCGArg arg1, TCGArg arg2, int const_arg2)
1381 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
1382 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1383 tcg_out_ext8u(s, dest, dest);
1386 #if TCG_TARGET_REG_BITS == 64
1387 static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest,
1388 TCGArg arg1, TCGArg arg2, int const_arg2)
1390 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
1391 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1392 tcg_out_ext8u(s, dest, dest);
1394 #else
1395 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1396 const int *const_args)
1398 TCGArg new_args[6];
1399 TCGLabel *label_true, *label_over;
1401 memcpy(new_args, args+1, 5*sizeof(TCGArg));
1403 if (args[0] == args[1] || args[0] == args[2]
1404 || (!const_args[3] && args[0] == args[3])
1405 || (!const_args[4] && args[0] == args[4])) {
1406 /* When the destination overlaps with one of the argument
1407 registers, don't do anything tricky. */
1408 label_true = gen_new_label();
1409 label_over = gen_new_label();
1411 new_args[5] = label_arg(label_true);
1412 tcg_out_brcond2(s, new_args, const_args+1, 1);
1414 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1415 tcg_out_jxx(s, JCC_JMP, label_over, 1);
1416 tcg_out_label(s, label_true, s->code_ptr);
1418 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
1419 tcg_out_label(s, label_over, s->code_ptr);
1420 } else {
1421 /* When the destination does not overlap one of the arguments,
1422 clear the destination first, jump if cond false, and emit an
1423 increment in the true case. This results in smaller code. */
1425 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1427 label_over = gen_new_label();
1428 new_args[4] = tcg_invert_cond(new_args[4]);
1429 new_args[5] = label_arg(label_over);
1430 tcg_out_brcond2(s, new_args, const_args+1, 1);
1432 tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
1433 tcg_out_label(s, label_over, s->code_ptr);
1436 #endif
1438 static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw,
1439 TCGReg dest, TCGReg v1)
1441 if (have_cmov) {
1442 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | rexw, dest, v1);
1443 } else {
1444 TCGLabel *over = gen_new_label();
1445 tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
1446 tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
1447 tcg_out_label(s, over, s->code_ptr);
1451 static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGReg dest,
1452 TCGReg c1, TCGArg c2, int const_c2,
1453 TCGReg v1)
1455 tcg_out_cmp(s, c1, c2, const_c2, 0);
1456 tcg_out_cmov(s, cond, 0, dest, v1);
1459 #if TCG_TARGET_REG_BITS == 64
1460 static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGReg dest,
1461 TCGReg c1, TCGArg c2, int const_c2,
1462 TCGReg v1)
1464 tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
1465 tcg_out_cmov(s, cond, P_REXW, dest, v1);
1467 #endif
1469 static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
1470 TCGArg arg2, bool const_a2)
1472 if (have_bmi1) {
1473 tcg_out_modrm(s, OPC_TZCNT + rexw, dest, arg1);
1474 if (const_a2) {
1475 tcg_debug_assert(arg2 == (rexw ? 64 : 32));
1476 } else {
1477 tcg_debug_assert(dest != arg2);
1478 tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
1480 } else {
1481 tcg_debug_assert(dest != arg2);
1482 tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1);
1483 tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
1487 static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
1488 TCGArg arg2, bool const_a2)
1490 if (have_lzcnt) {
1491 tcg_out_modrm(s, OPC_LZCNT + rexw, dest, arg1);
1492 if (const_a2) {
1493 tcg_debug_assert(arg2 == (rexw ? 64 : 32));
1494 } else {
1495 tcg_debug_assert(dest != arg2);
1496 tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
1498 } else {
1499 tcg_debug_assert(!const_a2);
1500 tcg_debug_assert(dest != arg1);
1501 tcg_debug_assert(dest != arg2);
1503 /* Recall that the output of BSR is the index not the count. */
1504 tcg_out_modrm(s, OPC_BSR + rexw, dest, arg1);
1505 tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0);
1507 /* Since we have destroyed the flags from BSR, we have to re-test. */
1508 tcg_out_cmp(s, arg1, 0, 1, rexw);
1509 tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
1513 static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest)
1515 intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
1517 if (disp == (int32_t)disp) {
1518 tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
1519 tcg_out32(s, disp);
1520 } else {
1521 /* rip-relative addressing into the constant pool.
1522 This is 6 + 8 = 14 bytes, as compared to using an
1523 an immediate load 10 + 6 = 16 bytes, plus we may
1524 be able to re-use the pool constant for more calls. */
1525 tcg_out_opc(s, OPC_GRP5, 0, 0, 0);
1526 tcg_out8(s, (call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev) << 3 | 5);
1527 new_pool_label(s, (uintptr_t)dest, R_386_PC32, s->code_ptr, -4);
1528 tcg_out32(s, 0);
1532 static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
1534 tcg_out_branch(s, 1, dest);
1537 static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest)
1539 tcg_out_branch(s, 0, dest);
1542 static void tcg_out_nopn(TCGContext *s, int n)
1544 int i;
1545 /* Emit 1 or 2 operand size prefixes for the standard one byte nop,
1546 * "xchg %eax,%eax", forming "xchg %ax,%ax". All cores accept the
1547 * duplicate prefix, and all of the interesting recent cores can
1548 * decode and discard the duplicates in a single cycle.
1550 tcg_debug_assert(n >= 1);
1551 for (i = 1; i < n; ++i) {
1552 tcg_out8(s, 0x66);
1554 tcg_out8(s, 0x90);
1557 #if defined(CONFIG_SOFTMMU)
1558 #include "tcg-ldst.inc.c"
1560 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1561 * int mmu_idx, uintptr_t ra)
1563 static void * const qemu_ld_helpers[16] = {
1564 [MO_UB] = helper_ret_ldub_mmu,
1565 [MO_LEUW] = helper_le_lduw_mmu,
1566 [MO_LEUL] = helper_le_ldul_mmu,
1567 [MO_LEQ] = helper_le_ldq_mmu,
1568 [MO_BEUW] = helper_be_lduw_mmu,
1569 [MO_BEUL] = helper_be_ldul_mmu,
1570 [MO_BEQ] = helper_be_ldq_mmu,
1573 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1574 * uintxx_t val, int mmu_idx, uintptr_t ra)
1576 static void * const qemu_st_helpers[16] = {
1577 [MO_UB] = helper_ret_stb_mmu,
1578 [MO_LEUW] = helper_le_stw_mmu,
1579 [MO_LEUL] = helper_le_stl_mmu,
1580 [MO_LEQ] = helper_le_stq_mmu,
1581 [MO_BEUW] = helper_be_stw_mmu,
1582 [MO_BEUL] = helper_be_stl_mmu,
1583 [MO_BEQ] = helper_be_stq_mmu,
1586 /* Perform the TLB load and compare.
1588 Inputs:
1589 ADDRLO and ADDRHI contain the low and high part of the address.
1591 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1593 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1594 This should be offsetof addr_read or addr_write.
1596 Outputs:
1597 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1598 positions of the displacements of forward jumps to the TLB miss case.
1600 Second argument register is loaded with the low part of the address.
1601 In the TLB hit case, it has been adjusted as indicated by the TLB
1602 and so is a host address. In the TLB miss case, it continues to
1603 hold a guest address.
1605 First argument register is clobbered. */
1607 static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1608 int mem_index, TCGMemOp opc,
1609 tcg_insn_unit **label_ptr, int which)
1611 const TCGReg r0 = TCG_REG_L0;
1612 const TCGReg r1 = TCG_REG_L1;
1613 TCGType ttype = TCG_TYPE_I32;
1614 TCGType tlbtype = TCG_TYPE_I32;
1615 int trexw = 0, hrexw = 0, tlbrexw = 0;
1616 unsigned a_bits = get_alignment_bits(opc);
1617 unsigned s_bits = opc & MO_SIZE;
1618 unsigned a_mask = (1 << a_bits) - 1;
1619 unsigned s_mask = (1 << s_bits) - 1;
1620 target_ulong tlb_mask;
1622 if (TCG_TARGET_REG_BITS == 64) {
1623 if (TARGET_LONG_BITS == 64) {
1624 ttype = TCG_TYPE_I64;
1625 trexw = P_REXW;
1627 if (TCG_TYPE_PTR == TCG_TYPE_I64) {
1628 hrexw = P_REXW;
1629 if (TARGET_PAGE_BITS + CPU_TLB_BITS > 32) {
1630 tlbtype = TCG_TYPE_I64;
1631 tlbrexw = P_REXW;
1636 tcg_out_mov(s, tlbtype, r0, addrlo);
1637 /* If the required alignment is at least as large as the access, simply
1638 copy the address and mask. For lesser alignments, check that we don't
1639 cross pages for the complete access. */
1640 if (a_bits >= s_bits) {
1641 tcg_out_mov(s, ttype, r1, addrlo);
1642 } else {
1643 tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask);
1645 tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask;
1647 tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
1648 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1650 tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0);
1651 tgen_arithi(s, ARITH_AND + tlbrexw, r0,
1652 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
1654 tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0,
1655 offsetof(CPUArchState, tlb_table[mem_index][0])
1656 + which);
1658 /* cmp 0(r0), r1 */
1659 tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0);
1661 /* Prepare for both the fast path add of the tlb addend, and the slow
1662 path function argument setup. There are two cases worth note:
1663 For 32-bit guest and x86_64 host, MOVL zero-extends the guest address
1664 before the fastpath ADDQ below. For 64-bit guest and x32 host, MOVQ
1665 copies the entire guest address for the slow path, while truncation
1666 for the 32-bit host happens with the fastpath ADDL below. */
1667 tcg_out_mov(s, ttype, r1, addrlo);
1669 /* jne slow_path */
1670 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1671 label_ptr[0] = s->code_ptr;
1672 s->code_ptr += 4;
1674 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1675 /* cmp 4(r0), addrhi */
1676 tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, 4);
1678 /* jne slow_path */
1679 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1680 label_ptr[1] = s->code_ptr;
1681 s->code_ptr += 4;
1684 /* TLB Hit. */
1686 /* add addend(r0), r1 */
1687 tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
1688 offsetof(CPUTLBEntry, addend) - which);
1692 * Record the context of a call to the out of line helper code for the slow path
1693 * for a load or store, so that we can later generate the correct helper code
1695 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1696 TCGReg datalo, TCGReg datahi,
1697 TCGReg addrlo, TCGReg addrhi,
1698 tcg_insn_unit *raddr,
1699 tcg_insn_unit **label_ptr)
1701 TCGLabelQemuLdst *label = new_ldst_label(s);
1703 label->is_ld = is_ld;
1704 label->oi = oi;
1705 label->datalo_reg = datalo;
1706 label->datahi_reg = datahi;
1707 label->addrlo_reg = addrlo;
1708 label->addrhi_reg = addrhi;
1709 label->raddr = raddr;
1710 label->label_ptr[0] = label_ptr[0];
1711 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1712 label->label_ptr[1] = label_ptr[1];
1717 * Generate code for the slow path for a load at the end of block
1719 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1721 TCGMemOpIdx oi = l->oi;
1722 TCGMemOp opc = get_memop(oi);
1723 TCGReg data_reg;
1724 tcg_insn_unit **label_ptr = &l->label_ptr[0];
1726 /* resolve label address */
1727 tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1728 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1729 tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1732 if (TCG_TARGET_REG_BITS == 32) {
1733 int ofs = 0;
1735 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1736 ofs += 4;
1738 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1739 ofs += 4;
1741 if (TARGET_LONG_BITS == 64) {
1742 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1743 ofs += 4;
1746 tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs);
1747 ofs += 4;
1749 tcg_out_sti(s, TCG_TYPE_PTR, (uintptr_t)l->raddr, TCG_REG_ESP, ofs);
1750 } else {
1751 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1752 /* The second argument is already loaded with addrlo. */
1753 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi);
1754 tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
1755 (uintptr_t)l->raddr);
1758 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1760 data_reg = l->datalo_reg;
1761 switch (opc & MO_SSIZE) {
1762 case MO_SB:
1763 tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
1764 break;
1765 case MO_SW:
1766 tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
1767 break;
1768 #if TCG_TARGET_REG_BITS == 64
1769 case MO_SL:
1770 tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
1771 break;
1772 #endif
1773 case MO_UB:
1774 case MO_UW:
1775 /* Note that the helpers have zero-extended to tcg_target_long. */
1776 case MO_UL:
1777 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1778 break;
1779 case MO_Q:
1780 if (TCG_TARGET_REG_BITS == 64) {
1781 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
1782 } else if (data_reg == TCG_REG_EDX) {
1783 /* xchg %edx, %eax */
1784 tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
1785 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
1786 } else {
1787 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1788 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
1790 break;
1791 default:
1792 tcg_abort();
1795 /* Jump to the code corresponding to next IR of qemu_st */
1796 tcg_out_jmp(s, l->raddr);
1800 * Generate code for the slow path for a store at the end of block
1802 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1804 TCGMemOpIdx oi = l->oi;
1805 TCGMemOp opc = get_memop(oi);
1806 TCGMemOp s_bits = opc & MO_SIZE;
1807 tcg_insn_unit **label_ptr = &l->label_ptr[0];
1808 TCGReg retaddr;
1810 /* resolve label address */
1811 tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1812 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1813 tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1816 if (TCG_TARGET_REG_BITS == 32) {
1817 int ofs = 0;
1819 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1820 ofs += 4;
1822 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1823 ofs += 4;
1825 if (TARGET_LONG_BITS == 64) {
1826 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1827 ofs += 4;
1830 tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
1831 ofs += 4;
1833 if (s_bits == MO_64) {
1834 tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
1835 ofs += 4;
1838 tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs);
1839 ofs += 4;
1841 retaddr = TCG_REG_EAX;
1842 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1843 tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, ofs);
1844 } else {
1845 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1846 /* The second argument is already loaded with addrlo. */
1847 tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1848 tcg_target_call_iarg_regs[2], l->datalo_reg);
1849 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], oi);
1851 if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
1852 retaddr = tcg_target_call_iarg_regs[4];
1853 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1854 } else {
1855 retaddr = TCG_REG_RAX;
1856 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1857 tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP,
1858 TCG_TARGET_CALL_STACK_OFFSET);
1862 /* "Tail call" to the helper, with the return address back inline. */
1863 tcg_out_push(s, retaddr);
1864 tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1866 #elif defined(__x86_64__) && defined(__linux__)
1867 # include <asm/prctl.h>
1868 # include <sys/prctl.h>
1870 int arch_prctl(int code, unsigned long addr);
1872 static int guest_base_flags;
1873 static inline void setup_guest_base_seg(void)
1875 if (arch_prctl(ARCH_SET_GS, guest_base) == 0) {
1876 guest_base_flags = P_GS;
1879 #else
1880 # define guest_base_flags 0
1881 static inline void setup_guest_base_seg(void) { }
1882 #endif /* SOFTMMU */
1884 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1885 TCGReg base, int index, intptr_t ofs,
1886 int seg, TCGMemOp memop)
1888 const TCGMemOp real_bswap = memop & MO_BSWAP;
1889 TCGMemOp bswap = real_bswap;
1890 int movop = OPC_MOVL_GvEv;
1892 if (have_movbe && real_bswap) {
1893 bswap = 0;
1894 movop = OPC_MOVBE_GyMy;
1897 switch (memop & MO_SSIZE) {
1898 case MO_UB:
1899 tcg_out_modrm_sib_offset(s, OPC_MOVZBL + seg, datalo,
1900 base, index, 0, ofs);
1901 break;
1902 case MO_SB:
1903 tcg_out_modrm_sib_offset(s, OPC_MOVSBL + P_REXW + seg, datalo,
1904 base, index, 0, ofs);
1905 break;
1906 case MO_UW:
1907 tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
1908 base, index, 0, ofs);
1909 if (real_bswap) {
1910 tcg_out_rolw_8(s, datalo);
1912 break;
1913 case MO_SW:
1914 if (real_bswap) {
1915 if (have_movbe) {
1916 tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
1917 datalo, base, index, 0, ofs);
1918 } else {
1919 tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
1920 base, index, 0, ofs);
1921 tcg_out_rolw_8(s, datalo);
1923 tcg_out_modrm(s, OPC_MOVSWL + P_REXW, datalo, datalo);
1924 } else {
1925 tcg_out_modrm_sib_offset(s, OPC_MOVSWL + P_REXW + seg,
1926 datalo, base, index, 0, ofs);
1928 break;
1929 case MO_UL:
1930 tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
1931 if (bswap) {
1932 tcg_out_bswap32(s, datalo);
1934 break;
1935 #if TCG_TARGET_REG_BITS == 64
1936 case MO_SL:
1937 if (real_bswap) {
1938 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
1939 base, index, 0, ofs);
1940 if (bswap) {
1941 tcg_out_bswap32(s, datalo);
1943 tcg_out_ext32s(s, datalo, datalo);
1944 } else {
1945 tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo,
1946 base, index, 0, ofs);
1948 break;
1949 #endif
1950 case MO_Q:
1951 if (TCG_TARGET_REG_BITS == 64) {
1952 tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
1953 base, index, 0, ofs);
1954 if (bswap) {
1955 tcg_out_bswap64(s, datalo);
1957 } else {
1958 if (real_bswap) {
1959 int t = datalo;
1960 datalo = datahi;
1961 datahi = t;
1963 if (base != datalo) {
1964 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
1965 base, index, 0, ofs);
1966 tcg_out_modrm_sib_offset(s, movop + seg, datahi,
1967 base, index, 0, ofs + 4);
1968 } else {
1969 tcg_out_modrm_sib_offset(s, movop + seg, datahi,
1970 base, index, 0, ofs + 4);
1971 tcg_out_modrm_sib_offset(s, movop + seg, datalo,
1972 base, index, 0, ofs);
1974 if (bswap) {
1975 tcg_out_bswap32(s, datalo);
1976 tcg_out_bswap32(s, datahi);
1979 break;
1980 default:
1981 tcg_abort();
1985 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
1986 EAX. It will be useful once fixed registers globals are less
1987 common. */
1988 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1990 TCGReg datalo, datahi, addrlo;
1991 TCGReg addrhi __attribute__((unused));
1992 TCGMemOpIdx oi;
1993 TCGMemOp opc;
1994 #if defined(CONFIG_SOFTMMU)
1995 int mem_index;
1996 tcg_insn_unit *label_ptr[2];
1997 #endif
1999 datalo = *args++;
2000 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
2001 addrlo = *args++;
2002 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
2003 oi = *args++;
2004 opc = get_memop(oi);
2006 #if defined(CONFIG_SOFTMMU)
2007 mem_index = get_mmuidx(oi);
2009 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
2010 label_ptr, offsetof(CPUTLBEntry, addr_read));
2012 /* TLB Hit. */
2013 tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
2015 /* Record the current context of a load into ldst label */
2016 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
2017 s->code_ptr, label_ptr);
2018 #else
2020 int32_t offset = guest_base;
2021 TCGReg base = addrlo;
2022 int index = -1;
2023 int seg = 0;
2025 /* For a 32-bit guest, the high 32 bits may contain garbage.
2026 We can do this with the ADDR32 prefix if we're not using
2027 a guest base, or when using segmentation. Otherwise we
2028 need to zero-extend manually. */
2029 if (guest_base == 0 || guest_base_flags) {
2030 seg = guest_base_flags;
2031 offset = 0;
2032 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2033 seg |= P_ADDR32;
2035 } else if (TCG_TARGET_REG_BITS == 64) {
2036 if (TARGET_LONG_BITS == 32) {
2037 tcg_out_ext32u(s, TCG_REG_L0, base);
2038 base = TCG_REG_L0;
2040 if (offset != guest_base) {
2041 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base);
2042 index = TCG_REG_L1;
2043 offset = 0;
2047 tcg_out_qemu_ld_direct(s, datalo, datahi,
2048 base, index, offset, seg, opc);
2050 #endif
2053 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
2054 TCGReg base, intptr_t ofs, int seg,
2055 TCGMemOp memop)
2057 /* ??? Ideally we wouldn't need a scratch register. For user-only,
2058 we could perform the bswap twice to restore the original value
2059 instead of moving to the scratch. But as it is, the L constraint
2060 means that TCG_REG_L0 is definitely free here. */
2061 const TCGReg scratch = TCG_REG_L0;
2062 const TCGMemOp real_bswap = memop & MO_BSWAP;
2063 TCGMemOp bswap = real_bswap;
2064 int movop = OPC_MOVL_EvGv;
2066 if (have_movbe && real_bswap) {
2067 bswap = 0;
2068 movop = OPC_MOVBE_MyGy;
2071 switch (memop & MO_SIZE) {
2072 case MO_8:
2073 /* In 32-bit mode, 8-bit stores can only happen from [abcd]x.
2074 Use the scratch register if necessary. */
2075 if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) {
2076 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
2077 datalo = scratch;
2079 tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
2080 datalo, base, ofs);
2081 break;
2082 case MO_16:
2083 if (bswap) {
2084 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
2085 tcg_out_rolw_8(s, scratch);
2086 datalo = scratch;
2088 tcg_out_modrm_offset(s, movop + P_DATA16 + seg, datalo, base, ofs);
2089 break;
2090 case MO_32:
2091 if (bswap) {
2092 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
2093 tcg_out_bswap32(s, scratch);
2094 datalo = scratch;
2096 tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
2097 break;
2098 case MO_64:
2099 if (TCG_TARGET_REG_BITS == 64) {
2100 if (bswap) {
2101 tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo);
2102 tcg_out_bswap64(s, scratch);
2103 datalo = scratch;
2105 tcg_out_modrm_offset(s, movop + P_REXW + seg, datalo, base, ofs);
2106 } else if (bswap) {
2107 tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi);
2108 tcg_out_bswap32(s, scratch);
2109 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs);
2110 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
2111 tcg_out_bswap32(s, scratch);
2112 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs+4);
2113 } else {
2114 if (real_bswap) {
2115 int t = datalo;
2116 datalo = datahi;
2117 datahi = t;
2119 tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs);
2120 tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs+4);
2122 break;
2123 default:
2124 tcg_abort();
2128 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
2130 TCGReg datalo, datahi, addrlo;
2131 TCGReg addrhi __attribute__((unused));
2132 TCGMemOpIdx oi;
2133 TCGMemOp opc;
2134 #if defined(CONFIG_SOFTMMU)
2135 int mem_index;
2136 tcg_insn_unit *label_ptr[2];
2137 #endif
2139 datalo = *args++;
2140 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
2141 addrlo = *args++;
2142 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
2143 oi = *args++;
2144 opc = get_memop(oi);
2146 #if defined(CONFIG_SOFTMMU)
2147 mem_index = get_mmuidx(oi);
2149 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
2150 label_ptr, offsetof(CPUTLBEntry, addr_write));
2152 /* TLB Hit. */
2153 tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
2155 /* Record the current context of a store into ldst label */
2156 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
2157 s->code_ptr, label_ptr);
2158 #else
2160 int32_t offset = guest_base;
2161 TCGReg base = addrlo;
2162 int seg = 0;
2164 /* See comment in tcg_out_qemu_ld re zero-extension of addrlo. */
2165 if (guest_base == 0 || guest_base_flags) {
2166 seg = guest_base_flags;
2167 offset = 0;
2168 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2169 seg |= P_ADDR32;
2171 } else if (TCG_TARGET_REG_BITS == 64) {
2172 /* ??? Note that we can't use the same SIB addressing scheme
2173 as for loads, since we require L0 free for bswap. */
2174 if (offset != guest_base) {
2175 if (TARGET_LONG_BITS == 32) {
2176 tcg_out_ext32u(s, TCG_REG_L0, base);
2177 base = TCG_REG_L0;
2179 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base);
2180 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
2181 base = TCG_REG_L1;
2182 offset = 0;
2183 } else if (TARGET_LONG_BITS == 32) {
2184 tcg_out_ext32u(s, TCG_REG_L1, base);
2185 base = TCG_REG_L1;
2189 tcg_out_qemu_st_direct(s, datalo, datahi, base, offset, seg, opc);
2191 #endif
2194 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2195 const TCGArg *args, const int *const_args)
2197 TCGArg a0, a1, a2;
2198 int c, const_a2, vexop, rexw = 0;
2200 #if TCG_TARGET_REG_BITS == 64
2201 # define OP_32_64(x) \
2202 case glue(glue(INDEX_op_, x), _i64): \
2203 rexw = P_REXW; /* FALLTHRU */ \
2204 case glue(glue(INDEX_op_, x), _i32)
2205 #else
2206 # define OP_32_64(x) \
2207 case glue(glue(INDEX_op_, x), _i32)
2208 #endif
2210 /* Hoist the loads of the most common arguments. */
2211 a0 = args[0];
2212 a1 = args[1];
2213 a2 = args[2];
2214 const_a2 = const_args[2];
2216 switch (opc) {
2217 case INDEX_op_exit_tb:
2218 /* Reuse the zeroing that exists for goto_ptr. */
2219 if (a0 == 0) {
2220 tcg_out_jmp(s, s->code_gen_epilogue);
2221 } else {
2222 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
2223 tcg_out_jmp(s, tb_ret_addr);
2225 break;
2226 case INDEX_op_goto_tb:
2227 if (s->tb_jmp_insn_offset) {
2228 /* direct jump method */
2229 int gap;
2230 /* jump displacement must be aligned for atomic patching;
2231 * see if we need to add extra nops before jump
2233 gap = tcg_pcrel_diff(s, QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4));
2234 if (gap != 1) {
2235 tcg_out_nopn(s, gap - 1);
2237 tcg_out8(s, OPC_JMP_long); /* jmp im */
2238 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
2239 tcg_out32(s, 0);
2240 } else {
2241 /* indirect jump method */
2242 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
2243 (intptr_t)(s->tb_jmp_target_addr + a0));
2245 set_jmp_reset_offset(s, a0);
2246 break;
2247 case INDEX_op_goto_ptr:
2248 /* jmp to the given host address (could be epilogue) */
2249 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
2250 break;
2251 case INDEX_op_br:
2252 tcg_out_jxx(s, JCC_JMP, arg_label(a0), 0);
2253 break;
2254 OP_32_64(ld8u):
2255 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
2256 tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2);
2257 break;
2258 OP_32_64(ld8s):
2259 tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, a0, a1, a2);
2260 break;
2261 OP_32_64(ld16u):
2262 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
2263 tcg_out_modrm_offset(s, OPC_MOVZWL, a0, a1, a2);
2264 break;
2265 OP_32_64(ld16s):
2266 tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, a0, a1, a2);
2267 break;
2268 #if TCG_TARGET_REG_BITS == 64
2269 case INDEX_op_ld32u_i64:
2270 #endif
2271 case INDEX_op_ld_i32:
2272 tcg_out_ld(s, TCG_TYPE_I32, a0, a1, a2);
2273 break;
2275 OP_32_64(st8):
2276 if (const_args[0]) {
2277 tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, a1, a2);
2278 tcg_out8(s, a0);
2279 } else {
2280 tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, a0, a1, a2);
2282 break;
2283 OP_32_64(st16):
2284 if (const_args[0]) {
2285 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, a1, a2);
2286 tcg_out16(s, a0);
2287 } else {
2288 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, a0, a1, a2);
2290 break;
2291 #if TCG_TARGET_REG_BITS == 64
2292 case INDEX_op_st32_i64:
2293 #endif
2294 case INDEX_op_st_i32:
2295 if (const_args[0]) {
2296 tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, a1, a2);
2297 tcg_out32(s, a0);
2298 } else {
2299 tcg_out_st(s, TCG_TYPE_I32, a0, a1, a2);
2301 break;
2303 OP_32_64(add):
2304 /* For 3-operand addition, use LEA. */
2305 if (a0 != a1) {
2306 TCGArg c3 = 0;
2307 if (const_a2) {
2308 c3 = a2, a2 = -1;
2309 } else if (a0 == a2) {
2310 /* Watch out for dest = src + dest, since we've removed
2311 the matching constraint on the add. */
2312 tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
2313 break;
2316 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
2317 break;
2319 c = ARITH_ADD;
2320 goto gen_arith;
2321 OP_32_64(sub):
2322 c = ARITH_SUB;
2323 goto gen_arith;
2324 OP_32_64(and):
2325 c = ARITH_AND;
2326 goto gen_arith;
2327 OP_32_64(or):
2328 c = ARITH_OR;
2329 goto gen_arith;
2330 OP_32_64(xor):
2331 c = ARITH_XOR;
2332 goto gen_arith;
2333 gen_arith:
2334 if (const_a2) {
2335 tgen_arithi(s, c + rexw, a0, a2, 0);
2336 } else {
2337 tgen_arithr(s, c + rexw, a0, a2);
2339 break;
2341 OP_32_64(andc):
2342 if (const_a2) {
2343 tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
2344 tgen_arithi(s, ARITH_AND + rexw, a0, ~a2, 0);
2345 } else {
2346 tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1);
2348 break;
2350 OP_32_64(mul):
2351 if (const_a2) {
2352 int32_t val;
2353 val = a2;
2354 if (val == (int8_t)val) {
2355 tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0);
2356 tcg_out8(s, val);
2357 } else {
2358 tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0);
2359 tcg_out32(s, val);
2361 } else {
2362 tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2);
2364 break;
2366 OP_32_64(div2):
2367 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
2368 break;
2369 OP_32_64(divu2):
2370 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
2371 break;
2373 OP_32_64(shl):
2374 /* For small constant 3-operand shift, use LEA. */
2375 if (const_a2 && a0 != a1 && (a2 - 1) < 3) {
2376 if (a2 - 1 == 0) {
2377 /* shl $1,a1,a0 -> lea (a1,a1),a0 */
2378 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0);
2379 } else {
2380 /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
2381 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0);
2383 break;
2385 c = SHIFT_SHL;
2386 vexop = OPC_SHLX;
2387 goto gen_shift_maybe_vex;
2388 OP_32_64(shr):
2389 c = SHIFT_SHR;
2390 vexop = OPC_SHRX;
2391 goto gen_shift_maybe_vex;
2392 OP_32_64(sar):
2393 c = SHIFT_SAR;
2394 vexop = OPC_SARX;
2395 goto gen_shift_maybe_vex;
2396 OP_32_64(rotl):
2397 c = SHIFT_ROL;
2398 goto gen_shift;
2399 OP_32_64(rotr):
2400 c = SHIFT_ROR;
2401 goto gen_shift;
2402 gen_shift_maybe_vex:
2403 if (have_bmi2) {
2404 if (!const_a2) {
2405 tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1);
2406 break;
2408 tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
2410 /* FALLTHRU */
2411 gen_shift:
2412 if (const_a2) {
2413 tcg_out_shifti(s, c + rexw, a0, a2);
2414 } else {
2415 tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, a0);
2417 break;
2419 OP_32_64(ctz):
2420 tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]);
2421 break;
2422 OP_32_64(clz):
2423 tcg_out_clz(s, rexw, args[0], args[1], args[2], const_args[2]);
2424 break;
2425 OP_32_64(ctpop):
2426 tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1);
2427 break;
2429 case INDEX_op_brcond_i32:
2430 tcg_out_brcond32(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
2431 break;
2432 case INDEX_op_setcond_i32:
2433 tcg_out_setcond32(s, args[3], a0, a1, a2, const_a2);
2434 break;
2435 case INDEX_op_movcond_i32:
2436 tcg_out_movcond32(s, args[5], a0, a1, a2, const_a2, args[3]);
2437 break;
2439 OP_32_64(bswap16):
2440 tcg_out_rolw_8(s, a0);
2441 break;
2442 OP_32_64(bswap32):
2443 tcg_out_bswap32(s, a0);
2444 break;
2446 OP_32_64(neg):
2447 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0);
2448 break;
2449 OP_32_64(not):
2450 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
2451 break;
2453 OP_32_64(ext8s):
2454 tcg_out_ext8s(s, a0, a1, rexw);
2455 break;
2456 OP_32_64(ext16s):
2457 tcg_out_ext16s(s, a0, a1, rexw);
2458 break;
2459 OP_32_64(ext8u):
2460 tcg_out_ext8u(s, a0, a1);
2461 break;
2462 OP_32_64(ext16u):
2463 tcg_out_ext16u(s, a0, a1);
2464 break;
2466 case INDEX_op_qemu_ld_i32:
2467 tcg_out_qemu_ld(s, args, 0);
2468 break;
2469 case INDEX_op_qemu_ld_i64:
2470 tcg_out_qemu_ld(s, args, 1);
2471 break;
2472 case INDEX_op_qemu_st_i32:
2473 tcg_out_qemu_st(s, args, 0);
2474 break;
2475 case INDEX_op_qemu_st_i64:
2476 tcg_out_qemu_st(s, args, 1);
2477 break;
2479 OP_32_64(mulu2):
2480 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
2481 break;
2482 OP_32_64(muls2):
2483 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]);
2484 break;
2485 OP_32_64(add2):
2486 if (const_args[4]) {
2487 tgen_arithi(s, ARITH_ADD + rexw, a0, args[4], 1);
2488 } else {
2489 tgen_arithr(s, ARITH_ADD + rexw, a0, args[4]);
2491 if (const_args[5]) {
2492 tgen_arithi(s, ARITH_ADC + rexw, a1, args[5], 1);
2493 } else {
2494 tgen_arithr(s, ARITH_ADC + rexw, a1, args[5]);
2496 break;
2497 OP_32_64(sub2):
2498 if (const_args[4]) {
2499 tgen_arithi(s, ARITH_SUB + rexw, a0, args[4], 1);
2500 } else {
2501 tgen_arithr(s, ARITH_SUB + rexw, a0, args[4]);
2503 if (const_args[5]) {
2504 tgen_arithi(s, ARITH_SBB + rexw, a1, args[5], 1);
2505 } else {
2506 tgen_arithr(s, ARITH_SBB + rexw, a1, args[5]);
2508 break;
2510 #if TCG_TARGET_REG_BITS == 32
2511 case INDEX_op_brcond2_i32:
2512 tcg_out_brcond2(s, args, const_args, 0);
2513 break;
2514 case INDEX_op_setcond2_i32:
2515 tcg_out_setcond2(s, args, const_args);
2516 break;
2517 #else /* TCG_TARGET_REG_BITS == 64 */
2518 case INDEX_op_ld32s_i64:
2519 tcg_out_modrm_offset(s, OPC_MOVSLQ, a0, a1, a2);
2520 break;
2521 case INDEX_op_ld_i64:
2522 tcg_out_ld(s, TCG_TYPE_I64, a0, a1, a2);
2523 break;
2524 case INDEX_op_st_i64:
2525 if (const_args[0]) {
2526 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, 0, a1, a2);
2527 tcg_out32(s, a0);
2528 } else {
2529 tcg_out_st(s, TCG_TYPE_I64, a0, a1, a2);
2531 break;
2533 case INDEX_op_brcond_i64:
2534 tcg_out_brcond64(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
2535 break;
2536 case INDEX_op_setcond_i64:
2537 tcg_out_setcond64(s, args[3], a0, a1, a2, const_a2);
2538 break;
2539 case INDEX_op_movcond_i64:
2540 tcg_out_movcond64(s, args[5], a0, a1, a2, const_a2, args[3]);
2541 break;
2543 case INDEX_op_bswap64_i64:
2544 tcg_out_bswap64(s, a0);
2545 break;
2546 case INDEX_op_extu_i32_i64:
2547 case INDEX_op_ext32u_i64:
2548 tcg_out_ext32u(s, a0, a1);
2549 break;
2550 case INDEX_op_ext_i32_i64:
2551 case INDEX_op_ext32s_i64:
2552 tcg_out_ext32s(s, a0, a1);
2553 break;
2554 #endif
2556 OP_32_64(deposit):
2557 if (args[3] == 0 && args[4] == 8) {
2558 /* load bits 0..7 */
2559 tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0);
2560 } else if (args[3] == 8 && args[4] == 8) {
2561 /* load bits 8..15 */
2562 tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4);
2563 } else if (args[3] == 0 && args[4] == 16) {
2564 /* load bits 0..15 */
2565 tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0);
2566 } else {
2567 tcg_abort();
2569 break;
2571 case INDEX_op_extract_i64:
2572 if (a2 + args[3] == 32) {
2573 /* This is a 32-bit zero-extending right shift. */
2574 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2575 tcg_out_shifti(s, SHIFT_SHR, a0, a2);
2576 break;
2578 /* FALLTHRU */
2579 case INDEX_op_extract_i32:
2580 /* On the off-chance that we can use the high-byte registers.
2581 Otherwise we emit the same ext16 + shift pattern that we
2582 would have gotten from the normal tcg-op.c expansion. */
2583 tcg_debug_assert(a2 == 8 && args[3] == 8);
2584 if (a1 < 4 && a0 < 8) {
2585 tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4);
2586 } else {
2587 tcg_out_ext16u(s, a0, a1);
2588 tcg_out_shifti(s, SHIFT_SHR, a0, 8);
2590 break;
2592 case INDEX_op_sextract_i32:
2593 /* We don't implement sextract_i64, as we cannot sign-extend to
2594 64-bits without using the REX prefix that explicitly excludes
2595 access to the high-byte registers. */
2596 tcg_debug_assert(a2 == 8 && args[3] == 8);
2597 if (a1 < 4 && a0 < 8) {
2598 tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4);
2599 } else {
2600 tcg_out_ext16s(s, a0, a1, 0);
2601 tcg_out_shifti(s, SHIFT_SAR, a0, 8);
2603 break;
2605 case INDEX_op_mb:
2606 tcg_out_mb(s, a0);
2607 break;
2608 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2609 case INDEX_op_mov_i64:
2610 case INDEX_op_mov_vec:
2611 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2612 case INDEX_op_movi_i64:
2613 case INDEX_op_dupi_vec:
2614 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2615 default:
2616 tcg_abort();
2619 #undef OP_32_64
2622 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2623 unsigned vecl, unsigned vece,
2624 const TCGArg *args, const int *const_args)
2626 static int const add_insn[4] = {
2627 OPC_PADDB, OPC_PADDW, OPC_PADDD, OPC_PADDQ
2629 static int const sub_insn[4] = {
2630 OPC_PSUBB, OPC_PSUBW, OPC_PSUBD, OPC_PSUBQ
2632 static int const mul_insn[4] = {
2633 OPC_UD2, OPC_PMULLW, OPC_PMULLD, OPC_UD2
2635 static int const shift_imm_insn[4] = {
2636 OPC_UD2, OPC_PSHIFTW_Ib, OPC_PSHIFTD_Ib, OPC_PSHIFTQ_Ib
2638 static int const cmpeq_insn[4] = {
2639 OPC_PCMPEQB, OPC_PCMPEQW, OPC_PCMPEQD, OPC_PCMPEQQ
2641 static int const cmpgt_insn[4] = {
2642 OPC_PCMPGTB, OPC_PCMPGTW, OPC_PCMPGTD, OPC_PCMPGTQ
2644 static int const punpckl_insn[4] = {
2645 OPC_PUNPCKLBW, OPC_PUNPCKLWD, OPC_PUNPCKLDQ, OPC_PUNPCKLQDQ
2647 static int const punpckh_insn[4] = {
2648 OPC_PUNPCKHBW, OPC_PUNPCKHWD, OPC_PUNPCKHDQ, OPC_PUNPCKHQDQ
2650 static int const packss_insn[4] = {
2651 OPC_PACKSSWB, OPC_PACKSSDW, OPC_UD2, OPC_UD2
2653 static int const packus_insn[4] = {
2654 OPC_PACKUSWB, OPC_PACKUSDW, OPC_UD2, OPC_UD2
2657 TCGType type = vecl + TCG_TYPE_V64;
2658 int insn, sub;
2659 TCGArg a0, a1, a2;
2661 a0 = args[0];
2662 a1 = args[1];
2663 a2 = args[2];
2665 switch (opc) {
2666 case INDEX_op_add_vec:
2667 insn = add_insn[vece];
2668 goto gen_simd;
2669 case INDEX_op_sub_vec:
2670 insn = sub_insn[vece];
2671 goto gen_simd;
2672 case INDEX_op_mul_vec:
2673 insn = mul_insn[vece];
2674 goto gen_simd;
2675 case INDEX_op_and_vec:
2676 insn = OPC_PAND;
2677 goto gen_simd;
2678 case INDEX_op_or_vec:
2679 insn = OPC_POR;
2680 goto gen_simd;
2681 case INDEX_op_xor_vec:
2682 insn = OPC_PXOR;
2683 goto gen_simd;
2684 case INDEX_op_x86_punpckl_vec:
2685 insn = punpckl_insn[vece];
2686 goto gen_simd;
2687 case INDEX_op_x86_punpckh_vec:
2688 insn = punpckh_insn[vece];
2689 goto gen_simd;
2690 case INDEX_op_x86_packss_vec:
2691 insn = packss_insn[vece];
2692 goto gen_simd;
2693 case INDEX_op_x86_packus_vec:
2694 insn = packus_insn[vece];
2695 goto gen_simd;
2696 #if TCG_TARGET_REG_BITS == 32
2697 case INDEX_op_dup2_vec:
2698 /* Constraints have already placed both 32-bit inputs in xmm regs. */
2699 insn = OPC_PUNPCKLDQ;
2700 goto gen_simd;
2701 #endif
2702 gen_simd:
2703 tcg_debug_assert(insn != OPC_UD2);
2704 if (type == TCG_TYPE_V256) {
2705 insn |= P_VEXL;
2707 tcg_out_vex_modrm(s, insn, a0, a1, a2);
2708 break;
2710 case INDEX_op_cmp_vec:
2711 sub = args[3];
2712 if (sub == TCG_COND_EQ) {
2713 insn = cmpeq_insn[vece];
2714 } else if (sub == TCG_COND_GT) {
2715 insn = cmpgt_insn[vece];
2716 } else {
2717 g_assert_not_reached();
2719 goto gen_simd;
2721 case INDEX_op_andc_vec:
2722 insn = OPC_PANDN;
2723 if (type == TCG_TYPE_V256) {
2724 insn |= P_VEXL;
2726 tcg_out_vex_modrm(s, insn, a0, a2, a1);
2727 break;
2729 case INDEX_op_shli_vec:
2730 sub = 6;
2731 goto gen_shift;
2732 case INDEX_op_shri_vec:
2733 sub = 2;
2734 goto gen_shift;
2735 case INDEX_op_sari_vec:
2736 tcg_debug_assert(vece != MO_64);
2737 sub = 4;
2738 gen_shift:
2739 tcg_debug_assert(vece != MO_8);
2740 insn = shift_imm_insn[vece];
2741 if (type == TCG_TYPE_V256) {
2742 insn |= P_VEXL;
2744 tcg_out_vex_modrm(s, insn, sub, a0, a1);
2745 tcg_out8(s, a2);
2746 break;
2748 case INDEX_op_ld_vec:
2749 tcg_out_ld(s, type, a0, a1, a2);
2750 break;
2751 case INDEX_op_st_vec:
2752 tcg_out_st(s, type, a0, a1, a2);
2753 break;
2754 case INDEX_op_dup_vec:
2755 tcg_out_dup_vec(s, type, vece, a0, a1);
2756 break;
2758 case INDEX_op_x86_shufps_vec:
2759 insn = OPC_SHUFPS;
2760 sub = args[3];
2761 goto gen_simd_imm8;
2762 case INDEX_op_x86_blend_vec:
2763 if (vece == MO_16) {
2764 insn = OPC_PBLENDW;
2765 } else if (vece == MO_32) {
2766 insn = (have_avx2 ? OPC_VPBLENDD : OPC_BLENDPS);
2767 } else {
2768 g_assert_not_reached();
2770 sub = args[3];
2771 goto gen_simd_imm8;
2772 case INDEX_op_x86_vperm2i128_vec:
2773 insn = OPC_VPERM2I128;
2774 sub = args[3];
2775 goto gen_simd_imm8;
2776 gen_simd_imm8:
2777 if (type == TCG_TYPE_V256) {
2778 insn |= P_VEXL;
2780 tcg_out_vex_modrm(s, insn, a0, a1, a2);
2781 tcg_out8(s, sub);
2782 break;
2784 case INDEX_op_x86_vpblendvb_vec:
2785 insn = OPC_VPBLENDVB;
2786 if (type == TCG_TYPE_V256) {
2787 insn |= P_VEXL;
2789 tcg_out_vex_modrm(s, insn, a0, a1, a2);
2790 tcg_out8(s, args[3] << 4);
2791 break;
2793 case INDEX_op_x86_psrldq_vec:
2794 tcg_out_vex_modrm(s, OPC_GRP14, 3, a0, a1);
2795 tcg_out8(s, a2);
2796 break;
2798 default:
2799 g_assert_not_reached();
2803 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
2805 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
2806 static const TCGTargetOpDef ri_r = { .args_ct_str = { "ri", "r" } };
2807 static const TCGTargetOpDef re_r = { .args_ct_str = { "re", "r" } };
2808 static const TCGTargetOpDef qi_r = { .args_ct_str = { "qi", "r" } };
2809 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
2810 static const TCGTargetOpDef r_q = { .args_ct_str = { "r", "q" } };
2811 static const TCGTargetOpDef r_re = { .args_ct_str = { "r", "re" } };
2812 static const TCGTargetOpDef r_0 = { .args_ct_str = { "r", "0" } };
2813 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
2814 static const TCGTargetOpDef r_r_re = { .args_ct_str = { "r", "r", "re" } };
2815 static const TCGTargetOpDef r_0_re = { .args_ct_str = { "r", "0", "re" } };
2816 static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } };
2817 static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
2818 static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
2819 static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } };
2820 static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } };
2821 static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } };
2822 static const TCGTargetOpDef r_r_L_L
2823 = { .args_ct_str = { "r", "r", "L", "L" } };
2824 static const TCGTargetOpDef L_L_L_L
2825 = { .args_ct_str = { "L", "L", "L", "L" } };
2826 static const TCGTargetOpDef x_x = { .args_ct_str = { "x", "x" } };
2827 static const TCGTargetOpDef x_x_x = { .args_ct_str = { "x", "x", "x" } };
2828 static const TCGTargetOpDef x_x_x_x
2829 = { .args_ct_str = { "x", "x", "x", "x" } };
2830 static const TCGTargetOpDef x_r = { .args_ct_str = { "x", "r" } };
2832 switch (op) {
2833 case INDEX_op_goto_ptr:
2834 return &r;
2836 case INDEX_op_ld8u_i32:
2837 case INDEX_op_ld8u_i64:
2838 case INDEX_op_ld8s_i32:
2839 case INDEX_op_ld8s_i64:
2840 case INDEX_op_ld16u_i32:
2841 case INDEX_op_ld16u_i64:
2842 case INDEX_op_ld16s_i32:
2843 case INDEX_op_ld16s_i64:
2844 case INDEX_op_ld_i32:
2845 case INDEX_op_ld32u_i64:
2846 case INDEX_op_ld32s_i64:
2847 case INDEX_op_ld_i64:
2848 return &r_r;
2850 case INDEX_op_st8_i32:
2851 case INDEX_op_st8_i64:
2852 return &qi_r;
2853 case INDEX_op_st16_i32:
2854 case INDEX_op_st16_i64:
2855 case INDEX_op_st_i32:
2856 case INDEX_op_st32_i64:
2857 return &ri_r;
2858 case INDEX_op_st_i64:
2859 return &re_r;
2861 case INDEX_op_add_i32:
2862 case INDEX_op_add_i64:
2863 return &r_r_re;
2864 case INDEX_op_sub_i32:
2865 case INDEX_op_sub_i64:
2866 case INDEX_op_mul_i32:
2867 case INDEX_op_mul_i64:
2868 case INDEX_op_or_i32:
2869 case INDEX_op_or_i64:
2870 case INDEX_op_xor_i32:
2871 case INDEX_op_xor_i64:
2872 return &r_0_re;
2874 case INDEX_op_and_i32:
2875 case INDEX_op_and_i64:
2877 static const TCGTargetOpDef and
2878 = { .args_ct_str = { "r", "0", "reZ" } };
2879 return &and;
2881 break;
2882 case INDEX_op_andc_i32:
2883 case INDEX_op_andc_i64:
2885 static const TCGTargetOpDef andc
2886 = { .args_ct_str = { "r", "r", "rI" } };
2887 return &andc;
2889 break;
2891 case INDEX_op_shl_i32:
2892 case INDEX_op_shl_i64:
2893 case INDEX_op_shr_i32:
2894 case INDEX_op_shr_i64:
2895 case INDEX_op_sar_i32:
2896 case INDEX_op_sar_i64:
2897 return have_bmi2 ? &r_r_ri : &r_0_ci;
2898 case INDEX_op_rotl_i32:
2899 case INDEX_op_rotl_i64:
2900 case INDEX_op_rotr_i32:
2901 case INDEX_op_rotr_i64:
2902 return &r_0_ci;
2904 case INDEX_op_brcond_i32:
2905 case INDEX_op_brcond_i64:
2906 return &r_re;
2908 case INDEX_op_bswap16_i32:
2909 case INDEX_op_bswap16_i64:
2910 case INDEX_op_bswap32_i32:
2911 case INDEX_op_bswap32_i64:
2912 case INDEX_op_bswap64_i64:
2913 case INDEX_op_neg_i32:
2914 case INDEX_op_neg_i64:
2915 case INDEX_op_not_i32:
2916 case INDEX_op_not_i64:
2917 return &r_0;
2919 case INDEX_op_ext8s_i32:
2920 case INDEX_op_ext8s_i64:
2921 case INDEX_op_ext8u_i32:
2922 case INDEX_op_ext8u_i64:
2923 return &r_q;
2924 case INDEX_op_ext16s_i32:
2925 case INDEX_op_ext16s_i64:
2926 case INDEX_op_ext16u_i32:
2927 case INDEX_op_ext16u_i64:
2928 case INDEX_op_ext32s_i64:
2929 case INDEX_op_ext32u_i64:
2930 case INDEX_op_ext_i32_i64:
2931 case INDEX_op_extu_i32_i64:
2932 case INDEX_op_extract_i32:
2933 case INDEX_op_extract_i64:
2934 case INDEX_op_sextract_i32:
2935 case INDEX_op_ctpop_i32:
2936 case INDEX_op_ctpop_i64:
2937 return &r_r;
2939 case INDEX_op_deposit_i32:
2940 case INDEX_op_deposit_i64:
2942 static const TCGTargetOpDef dep
2943 = { .args_ct_str = { "Q", "0", "Q" } };
2944 return &dep;
2946 case INDEX_op_setcond_i32:
2947 case INDEX_op_setcond_i64:
2949 static const TCGTargetOpDef setc
2950 = { .args_ct_str = { "q", "r", "re" } };
2951 return &setc;
2953 case INDEX_op_movcond_i32:
2954 case INDEX_op_movcond_i64:
2956 static const TCGTargetOpDef movc
2957 = { .args_ct_str = { "r", "r", "re", "r", "0" } };
2958 return &movc;
2960 case INDEX_op_div2_i32:
2961 case INDEX_op_div2_i64:
2962 case INDEX_op_divu2_i32:
2963 case INDEX_op_divu2_i64:
2965 static const TCGTargetOpDef div2
2966 = { .args_ct_str = { "a", "d", "0", "1", "r" } };
2967 return &div2;
2969 case INDEX_op_mulu2_i32:
2970 case INDEX_op_mulu2_i64:
2971 case INDEX_op_muls2_i32:
2972 case INDEX_op_muls2_i64:
2974 static const TCGTargetOpDef mul2
2975 = { .args_ct_str = { "a", "d", "a", "r" } };
2976 return &mul2;
2978 case INDEX_op_add2_i32:
2979 case INDEX_op_add2_i64:
2980 case INDEX_op_sub2_i32:
2981 case INDEX_op_sub2_i64:
2983 static const TCGTargetOpDef arith2
2984 = { .args_ct_str = { "r", "r", "0", "1", "re", "re" } };
2985 return &arith2;
2987 case INDEX_op_ctz_i32:
2988 case INDEX_op_ctz_i64:
2990 static const TCGTargetOpDef ctz[2] = {
2991 { .args_ct_str = { "&r", "r", "r" } },
2992 { .args_ct_str = { "&r", "r", "rW" } },
2994 return &ctz[have_bmi1];
2996 case INDEX_op_clz_i32:
2997 case INDEX_op_clz_i64:
2999 static const TCGTargetOpDef clz[2] = {
3000 { .args_ct_str = { "&r", "r", "r" } },
3001 { .args_ct_str = { "&r", "r", "rW" } },
3003 return &clz[have_lzcnt];
3006 case INDEX_op_qemu_ld_i32:
3007 return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L;
3008 case INDEX_op_qemu_st_i32:
3009 return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L : &L_L_L;
3010 case INDEX_op_qemu_ld_i64:
3011 return (TCG_TARGET_REG_BITS == 64 ? &r_L
3012 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L
3013 : &r_r_L_L);
3014 case INDEX_op_qemu_st_i64:
3015 return (TCG_TARGET_REG_BITS == 64 ? &L_L
3016 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L_L
3017 : &L_L_L_L);
3019 case INDEX_op_brcond2_i32:
3021 static const TCGTargetOpDef b2
3022 = { .args_ct_str = { "r", "r", "ri", "ri" } };
3023 return &b2;
3025 case INDEX_op_setcond2_i32:
3027 static const TCGTargetOpDef s2
3028 = { .args_ct_str = { "r", "r", "r", "ri", "ri" } };
3029 return &s2;
3032 case INDEX_op_ld_vec:
3033 case INDEX_op_st_vec:
3034 return &x_r;
3036 case INDEX_op_add_vec:
3037 case INDEX_op_sub_vec:
3038 case INDEX_op_mul_vec:
3039 case INDEX_op_and_vec:
3040 case INDEX_op_or_vec:
3041 case INDEX_op_xor_vec:
3042 case INDEX_op_andc_vec:
3043 case INDEX_op_cmp_vec:
3044 case INDEX_op_x86_shufps_vec:
3045 case INDEX_op_x86_blend_vec:
3046 case INDEX_op_x86_packss_vec:
3047 case INDEX_op_x86_packus_vec:
3048 case INDEX_op_x86_vperm2i128_vec:
3049 case INDEX_op_x86_punpckl_vec:
3050 case INDEX_op_x86_punpckh_vec:
3051 #if TCG_TARGET_REG_BITS == 32
3052 case INDEX_op_dup2_vec:
3053 #endif
3054 return &x_x_x;
3055 case INDEX_op_dup_vec:
3056 case INDEX_op_shli_vec:
3057 case INDEX_op_shri_vec:
3058 case INDEX_op_sari_vec:
3059 case INDEX_op_x86_psrldq_vec:
3060 return &x_x;
3061 case INDEX_op_x86_vpblendvb_vec:
3062 return &x_x_x_x;
3064 default:
3065 break;
3067 return NULL;
3070 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3072 switch (opc) {
3073 case INDEX_op_add_vec:
3074 case INDEX_op_sub_vec:
3075 case INDEX_op_and_vec:
3076 case INDEX_op_or_vec:
3077 case INDEX_op_xor_vec:
3078 case INDEX_op_andc_vec:
3079 return 1;
3080 case INDEX_op_cmp_vec:
3081 return -1;
3083 case INDEX_op_shli_vec:
3084 case INDEX_op_shri_vec:
3085 /* We must expand the operation for MO_8. */
3086 return vece == MO_8 ? -1 : 1;
3088 case INDEX_op_sari_vec:
3089 /* We must expand the operation for MO_8. */
3090 if (vece == MO_8) {
3091 return -1;
3093 /* We can emulate this for MO_64, but it does not pay off
3094 unless we're producing at least 4 values. */
3095 if (vece == MO_64) {
3096 return type >= TCG_TYPE_V256 ? -1 : 0;
3098 return 1;
3100 case INDEX_op_mul_vec:
3101 if (vece == MO_8) {
3102 /* We can expand the operation for MO_8. */
3103 return -1;
3105 if (vece == MO_64) {
3106 return 0;
3108 return 1;
3110 default:
3111 return 0;
3115 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3116 TCGArg a0, ...)
3118 va_list va;
3119 TCGArg a1, a2;
3120 TCGv_vec v0, t1, t2, t3, t4;
3122 va_start(va, a0);
3123 v0 = temp_tcgv_vec(arg_temp(a0));
3125 switch (opc) {
3126 case INDEX_op_shli_vec:
3127 case INDEX_op_shri_vec:
3128 tcg_debug_assert(vece == MO_8);
3129 a1 = va_arg(va, TCGArg);
3130 a2 = va_arg(va, TCGArg);
3131 /* Unpack to W, shift, and repack. Tricky bits:
3132 (1) Use punpck*bw x,x to produce DDCCBBAA,
3133 i.e. duplicate in other half of the 16-bit lane.
3134 (2) For right-shift, add 8 so that the high half of
3135 the lane becomes zero. For left-shift, we must
3136 shift up and down again.
3137 (3) Step 2 leaves high half zero such that PACKUSWB
3138 (pack with unsigned saturation) does not modify
3139 the quantity. */
3140 t1 = tcg_temp_new_vec(type);
3141 t2 = tcg_temp_new_vec(type);
3142 vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3143 tcgv_vec_arg(t1), a1, a1);
3144 vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3145 tcgv_vec_arg(t2), a1, a1);
3146 if (opc == INDEX_op_shri_vec) {
3147 vec_gen_3(INDEX_op_shri_vec, type, MO_16,
3148 tcgv_vec_arg(t1), tcgv_vec_arg(t1), a2 + 8);
3149 vec_gen_3(INDEX_op_shri_vec, type, MO_16,
3150 tcgv_vec_arg(t2), tcgv_vec_arg(t2), a2 + 8);
3151 } else {
3152 vec_gen_3(INDEX_op_shli_vec, type, MO_16,
3153 tcgv_vec_arg(t1), tcgv_vec_arg(t1), a2 + 8);
3154 vec_gen_3(INDEX_op_shli_vec, type, MO_16,
3155 tcgv_vec_arg(t2), tcgv_vec_arg(t2), a2 + 8);
3156 vec_gen_3(INDEX_op_shri_vec, type, MO_16,
3157 tcgv_vec_arg(t1), tcgv_vec_arg(t1), 8);
3158 vec_gen_3(INDEX_op_shri_vec, type, MO_16,
3159 tcgv_vec_arg(t2), tcgv_vec_arg(t2), 8);
3161 vec_gen_3(INDEX_op_x86_packus_vec, type, MO_8,
3162 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3163 tcg_temp_free_vec(t1);
3164 tcg_temp_free_vec(t2);
3165 break;
3167 case INDEX_op_sari_vec:
3168 a1 = va_arg(va, TCGArg);
3169 a2 = va_arg(va, TCGArg);
3170 if (vece == MO_8) {
3171 /* Unpack to W, shift, and repack, as above. */
3172 t1 = tcg_temp_new_vec(type);
3173 t2 = tcg_temp_new_vec(type);
3174 vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3175 tcgv_vec_arg(t1), a1, a1);
3176 vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3177 tcgv_vec_arg(t2), a1, a1);
3178 vec_gen_3(INDEX_op_sari_vec, type, MO_16,
3179 tcgv_vec_arg(t1), tcgv_vec_arg(t1), a2 + 8);
3180 vec_gen_3(INDEX_op_sari_vec, type, MO_16,
3181 tcgv_vec_arg(t2), tcgv_vec_arg(t2), a2 + 8);
3182 vec_gen_3(INDEX_op_x86_packss_vec, type, MO_8,
3183 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3184 tcg_temp_free_vec(t1);
3185 tcg_temp_free_vec(t2);
3186 break;
3188 tcg_debug_assert(vece == MO_64);
3189 /* MO_64: If the shift is <= 32, we can emulate the sign extend by
3190 performing an arithmetic 32-bit shift and overwriting the high
3191 half of the result (note that the ISA says shift of 32 is valid). */
3192 if (a2 <= 32) {
3193 t1 = tcg_temp_new_vec(type);
3194 vec_gen_3(INDEX_op_sari_vec, type, MO_32, tcgv_vec_arg(t1), a1, a2);
3195 vec_gen_3(INDEX_op_shri_vec, type, MO_64, a0, a1, a2);
3196 vec_gen_4(INDEX_op_x86_blend_vec, type, MO_32,
3197 a0, a0, tcgv_vec_arg(t1), 0xaa);
3198 tcg_temp_free_vec(t1);
3199 break;
3201 /* Otherwise we will need to use a compare vs 0 to produce the
3202 sign-extend, shift and merge. */
3203 t1 = tcg_temp_new_vec(type);
3204 t2 = tcg_const_zeros_vec(type);
3205 vec_gen_4(INDEX_op_cmp_vec, type, MO_64,
3206 tcgv_vec_arg(t1), tcgv_vec_arg(t2), a1, TCG_COND_GT);
3207 tcg_temp_free_vec(t2);
3208 vec_gen_3(INDEX_op_shri_vec, type, MO_64, a0, a1, a2);
3209 vec_gen_3(INDEX_op_shli_vec, type, MO_64,
3210 tcgv_vec_arg(t1), tcgv_vec_arg(t1), 64 - a2);
3211 vec_gen_3(INDEX_op_or_vec, type, MO_64, a0, a0, tcgv_vec_arg(t1));
3212 tcg_temp_free_vec(t1);
3213 break;
3215 case INDEX_op_mul_vec:
3216 tcg_debug_assert(vece == MO_8);
3217 a1 = va_arg(va, TCGArg);
3218 a2 = va_arg(va, TCGArg);
3219 switch (type) {
3220 case TCG_TYPE_V64:
3221 t1 = tcg_temp_new_vec(TCG_TYPE_V128);
3222 t2 = tcg_temp_new_vec(TCG_TYPE_V128);
3223 tcg_gen_dup16i_vec(t2, 0);
3224 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3225 tcgv_vec_arg(t1), a1, tcgv_vec_arg(t2));
3226 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3227 tcgv_vec_arg(t2), tcgv_vec_arg(t2), a2);
3228 tcg_gen_mul_vec(MO_16, t1, t1, t2);
3229 tcg_gen_shri_vec(MO_16, t1, t1, 8);
3230 vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V128, MO_8,
3231 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t1));
3232 tcg_temp_free_vec(t1);
3233 tcg_temp_free_vec(t2);
3234 break;
3236 case TCG_TYPE_V128:
3237 t1 = tcg_temp_new_vec(TCG_TYPE_V128);
3238 t2 = tcg_temp_new_vec(TCG_TYPE_V128);
3239 t3 = tcg_temp_new_vec(TCG_TYPE_V128);
3240 t4 = tcg_temp_new_vec(TCG_TYPE_V128);
3241 tcg_gen_dup16i_vec(t4, 0);
3242 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3243 tcgv_vec_arg(t1), a1, tcgv_vec_arg(t4));
3244 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3245 tcgv_vec_arg(t2), tcgv_vec_arg(t4), a2);
3246 vec_gen_3(INDEX_op_x86_punpckh_vec, TCG_TYPE_V128, MO_8,
3247 tcgv_vec_arg(t3), a1, tcgv_vec_arg(t4));
3248 vec_gen_3(INDEX_op_x86_punpckh_vec, TCG_TYPE_V128, MO_8,
3249 tcgv_vec_arg(t4), tcgv_vec_arg(t4), a2);
3250 tcg_gen_mul_vec(MO_16, t1, t1, t2);
3251 tcg_gen_mul_vec(MO_16, t3, t3, t4);
3252 tcg_gen_shri_vec(MO_16, t1, t1, 8);
3253 tcg_gen_shri_vec(MO_16, t3, t3, 8);
3254 vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V128, MO_8,
3255 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t3));
3256 tcg_temp_free_vec(t1);
3257 tcg_temp_free_vec(t2);
3258 tcg_temp_free_vec(t3);
3259 tcg_temp_free_vec(t4);
3260 break;
3262 case TCG_TYPE_V256:
3263 t1 = tcg_temp_new_vec(TCG_TYPE_V256);
3264 t2 = tcg_temp_new_vec(TCG_TYPE_V256);
3265 t3 = tcg_temp_new_vec(TCG_TYPE_V256);
3266 t4 = tcg_temp_new_vec(TCG_TYPE_V256);
3267 tcg_gen_dup16i_vec(t4, 0);
3268 /* a1: A[0-7] ... D[0-7]; a2: W[0-7] ... Z[0-7]
3269 t1: extends of B[0-7], D[0-7]
3270 t2: extends of X[0-7], Z[0-7]
3271 t3: extends of A[0-7], C[0-7]
3272 t4: extends of W[0-7], Y[0-7]. */
3273 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V256, MO_8,
3274 tcgv_vec_arg(t1), a1, tcgv_vec_arg(t4));
3275 vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V256, MO_8,
3276 tcgv_vec_arg(t2), tcgv_vec_arg(t4), a2);
3277 vec_gen_3(INDEX_op_x86_punpckh_vec, TCG_TYPE_V256, MO_8,
3278 tcgv_vec_arg(t3), a1, tcgv_vec_arg(t4));
3279 vec_gen_3(INDEX_op_x86_punpckh_vec, TCG_TYPE_V256, MO_8,
3280 tcgv_vec_arg(t4), tcgv_vec_arg(t4), a2);
3281 /* t1: BX DZ; t2: AW CY. */
3282 tcg_gen_mul_vec(MO_16, t1, t1, t2);
3283 tcg_gen_mul_vec(MO_16, t3, t3, t4);
3284 tcg_gen_shri_vec(MO_16, t1, t1, 8);
3285 tcg_gen_shri_vec(MO_16, t3, t3, 8);
3286 /* a0: AW BX CY DZ. */
3287 vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V256, MO_8,
3288 a0, tcgv_vec_arg(t1), tcgv_vec_arg(t3));
3289 tcg_temp_free_vec(t1);
3290 tcg_temp_free_vec(t2);
3291 tcg_temp_free_vec(t3);
3292 tcg_temp_free_vec(t4);
3293 break;
3295 default:
3296 g_assert_not_reached();
3298 break;
3300 case INDEX_op_cmp_vec:
3302 enum {
3303 NEED_SWAP = 1,
3304 NEED_INV = 2,
3305 NEED_BIAS = 4
3307 static const uint8_t fixups[16] = {
3308 [0 ... 15] = -1,
3309 [TCG_COND_EQ] = 0,
3310 [TCG_COND_NE] = NEED_INV,
3311 [TCG_COND_GT] = 0,
3312 [TCG_COND_LT] = NEED_SWAP,
3313 [TCG_COND_LE] = NEED_INV,
3314 [TCG_COND_GE] = NEED_SWAP | NEED_INV,
3315 [TCG_COND_GTU] = NEED_BIAS,
3316 [TCG_COND_LTU] = NEED_BIAS | NEED_SWAP,
3317 [TCG_COND_LEU] = NEED_BIAS | NEED_INV,
3318 [TCG_COND_GEU] = NEED_BIAS | NEED_SWAP | NEED_INV,
3321 TCGCond cond;
3322 uint8_t fixup;
3324 a1 = va_arg(va, TCGArg);
3325 a2 = va_arg(va, TCGArg);
3326 cond = va_arg(va, TCGArg);
3327 fixup = fixups[cond & 15];
3328 tcg_debug_assert(fixup != 0xff);
3330 if (fixup & NEED_INV) {
3331 cond = tcg_invert_cond(cond);
3333 if (fixup & NEED_SWAP) {
3334 TCGArg t;
3335 t = a1, a1 = a2, a2 = t;
3336 cond = tcg_swap_cond(cond);
3339 t1 = t2 = NULL;
3340 if (fixup & NEED_BIAS) {
3341 t1 = tcg_temp_new_vec(type);
3342 t2 = tcg_temp_new_vec(type);
3343 tcg_gen_dupi_vec(vece, t2, 1ull << ((8 << vece) - 1));
3344 tcg_gen_sub_vec(vece, t1, temp_tcgv_vec(arg_temp(a1)), t2);
3345 tcg_gen_sub_vec(vece, t2, temp_tcgv_vec(arg_temp(a2)), t2);
3346 a1 = tcgv_vec_arg(t1);
3347 a2 = tcgv_vec_arg(t2);
3348 cond = tcg_signed_cond(cond);
3351 tcg_debug_assert(cond == TCG_COND_EQ || cond == TCG_COND_GT);
3352 vec_gen_4(INDEX_op_cmp_vec, type, vece, a0, a1, a2, cond);
3354 if (fixup & NEED_BIAS) {
3355 tcg_temp_free_vec(t1);
3356 tcg_temp_free_vec(t2);
3358 if (fixup & NEED_INV) {
3359 tcg_gen_not_vec(vece, v0, v0);
3362 break;
3364 default:
3365 break;
3368 va_end(va);
3371 static const int tcg_target_callee_save_regs[] = {
3372 #if TCG_TARGET_REG_BITS == 64
3373 TCG_REG_RBP,
3374 TCG_REG_RBX,
3375 #if defined(_WIN64)
3376 TCG_REG_RDI,
3377 TCG_REG_RSI,
3378 #endif
3379 TCG_REG_R12,
3380 TCG_REG_R13,
3381 TCG_REG_R14, /* Currently used for the global env. */
3382 TCG_REG_R15,
3383 #else
3384 TCG_REG_EBP, /* Currently used for the global env. */
3385 TCG_REG_EBX,
3386 TCG_REG_ESI,
3387 TCG_REG_EDI,
3388 #endif
3391 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
3392 and tcg_register_jit. */
3394 #define PUSH_SIZE \
3395 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
3396 * (TCG_TARGET_REG_BITS / 8))
3398 #define FRAME_SIZE \
3399 ((PUSH_SIZE \
3400 + TCG_STATIC_CALL_ARGS_SIZE \
3401 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
3402 + TCG_TARGET_STACK_ALIGN - 1) \
3403 & ~(TCG_TARGET_STACK_ALIGN - 1))
3405 /* Generate global QEMU prologue and epilogue code */
3406 static void tcg_target_qemu_prologue(TCGContext *s)
3408 int i, stack_addend;
3410 /* TB prologue */
3412 /* Reserve some stack space, also for TCG temps. */
3413 stack_addend = FRAME_SIZE - PUSH_SIZE;
3414 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
3415 CPU_TEMP_BUF_NLONGS * sizeof(long));
3417 /* Save all callee saved registers. */
3418 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
3419 tcg_out_push(s, tcg_target_callee_save_regs[i]);
3422 #if TCG_TARGET_REG_BITS == 32
3423 tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
3424 (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
3425 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
3426 /* jmp *tb. */
3427 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
3428 (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
3429 + stack_addend);
3430 #else
3431 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3432 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
3433 /* jmp *tb. */
3434 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
3435 #endif
3438 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3439 * and fall through to the rest of the epilogue.
3441 s->code_gen_epilogue = s->code_ptr;
3442 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_EAX, 0);
3444 /* TB epilogue */
3445 tb_ret_addr = s->code_ptr;
3447 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend);
3449 if (have_avx2) {
3450 tcg_out_vex_opc(s, OPC_VZEROUPPER, 0, 0, 0, 0);
3452 for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
3453 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
3455 tcg_out_opc(s, OPC_RET, 0, 0, 0);
3457 #if !defined(CONFIG_SOFTMMU)
3458 /* Try to set up a segment register to point to guest_base. */
3459 if (guest_base) {
3460 setup_guest_base_seg();
3462 #endif
3465 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3467 memset(p, 0x90, count);
3470 static void tcg_target_init(TCGContext *s)
3472 #ifdef CONFIG_CPUID_H
3473 unsigned a, b, c, d, b7 = 0;
3474 int max = __get_cpuid_max(0, 0);
3476 if (max >= 7) {
3477 /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
3478 __cpuid_count(7, 0, a, b7, c, d);
3479 have_bmi1 = (b7 & bit_BMI) != 0;
3480 have_bmi2 = (b7 & bit_BMI2) != 0;
3483 if (max >= 1) {
3484 __cpuid(1, a, b, c, d);
3485 #ifndef have_cmov
3486 /* For 32-bit, 99% certainty that we're running on hardware that
3487 supports cmov, but we still need to check. In case cmov is not
3488 available, we'll use a small forward branch. */
3489 have_cmov = (d & bit_CMOV) != 0;
3490 #endif
3492 /* MOVBE is only available on Intel Atom and Haswell CPUs, so we
3493 need to probe for it. */
3494 have_movbe = (c & bit_MOVBE) != 0;
3495 have_popcnt = (c & bit_POPCNT) != 0;
3497 /* There are a number of things we must check before we can be
3498 sure of not hitting invalid opcode. */
3499 if (c & bit_OSXSAVE) {
3500 unsigned xcrl, xcrh;
3501 /* The xgetbv instruction is not available to older versions of
3502 * the assembler, so we encode the instruction manually.
3504 asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl), "=d" (xcrh) : "c" (0));
3505 if ((xcrl & 6) == 6) {
3506 have_avx1 = (c & bit_AVX) != 0;
3507 have_avx2 = (b7 & bit_AVX2) != 0;
3512 max = __get_cpuid_max(0x8000000, 0);
3513 if (max >= 1) {
3514 __cpuid(0x80000001, a, b, c, d);
3515 /* LZCNT was introduced with AMD Barcelona and Intel Haswell CPUs. */
3516 have_lzcnt = (c & bit_LZCNT) != 0;
3518 #endif /* CONFIG_CPUID_H */
3520 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
3521 if (TCG_TARGET_REG_BITS == 64) {
3522 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
3524 if (have_avx1) {
3525 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
3526 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
3528 if (have_avx2) {
3529 tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
3532 tcg_target_call_clobber_regs = ALL_VECTOR_REGS;
3533 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
3534 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
3535 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
3536 if (TCG_TARGET_REG_BITS == 64) {
3537 #if !defined(_WIN64)
3538 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI);
3539 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI);
3540 #endif
3541 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3542 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3543 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3544 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3547 s->reserved_regs = 0;
3548 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
3551 typedef struct {
3552 DebugFrameHeader h;
3553 uint8_t fde_def_cfa[4];
3554 uint8_t fde_reg_ofs[14];
3555 } DebugFrame;
3557 /* We're expecting a 2 byte uleb128 encoded value. */
3558 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3560 #if !defined(__ELF__)
3561 /* Host machine without ELF. */
3562 #elif TCG_TARGET_REG_BITS == 64
3563 #define ELF_HOST_MACHINE EM_X86_64
3564 static const DebugFrame debug_frame = {
3565 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3566 .h.cie.id = -1,
3567 .h.cie.version = 1,
3568 .h.cie.code_align = 1,
3569 .h.cie.data_align = 0x78, /* sleb128 -8 */
3570 .h.cie.return_column = 16,
3572 /* Total FDE size does not include the "len" member. */
3573 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3575 .fde_def_cfa = {
3576 12, 7, /* DW_CFA_def_cfa %rsp, ... */
3577 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3578 (FRAME_SIZE >> 7)
3580 .fde_reg_ofs = {
3581 0x90, 1, /* DW_CFA_offset, %rip, -8 */
3582 /* The following ordering must match tcg_target_callee_save_regs. */
3583 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
3584 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
3585 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
3586 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
3587 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
3588 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
3591 #else
3592 #define ELF_HOST_MACHINE EM_386
3593 static const DebugFrame debug_frame = {
3594 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3595 .h.cie.id = -1,
3596 .h.cie.version = 1,
3597 .h.cie.code_align = 1,
3598 .h.cie.data_align = 0x7c, /* sleb128 -4 */
3599 .h.cie.return_column = 8,
3601 /* Total FDE size does not include the "len" member. */
3602 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3604 .fde_def_cfa = {
3605 12, 4, /* DW_CFA_def_cfa %esp, ... */
3606 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3607 (FRAME_SIZE >> 7)
3609 .fde_reg_ofs = {
3610 0x88, 1, /* DW_CFA_offset, %eip, -4 */
3611 /* The following ordering must match tcg_target_callee_save_regs. */
3612 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
3613 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
3614 0x86, 4, /* DW_CFA_offset, %esi, -16 */
3615 0x87, 5, /* DW_CFA_offset, %edi, -20 */
3618 #endif
3620 #if defined(ELF_HOST_MACHINE)
3621 void tcg_register_jit(void *buf, size_t buf_size)
3623 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3625 #endif