arm64 isel: in a couple places, use `xzr` as a source rather than loading zero into...
[valgrind.git] / VEX / priv / host_x86_defs.h
blob2455b3245e17fbdf4f7d6668916929c072bfe31f
2 /*---------------------------------------------------------------*/
3 /*--- begin host_x86_defs.h ---*/
4 /*---------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2004-2017 OpenWorks LLP
11 info@open-works.net
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
28 Neither the names of the U.S. Department of Energy nor the
29 University of California nor the names of its contributors may be
30 used to endorse or promote products derived from this software
31 without prior written permission.
34 #ifndef __VEX_HOST_X86_DEFS_H
35 #define __VEX_HOST_X86_DEFS_H
37 #include "libvex_basictypes.h"
38 #include "libvex.h" // VexArch
39 #include "host_generic_regs.h" // HReg
41 /* --------- Registers. --------- */
43 /* The usual HReg abstraction. There are 8 real int regs,
44 6 real float regs, and 8 real vector regs.
47 #define ST_IN static inline
48 ST_IN HReg hregX86_EBX ( void ) { return mkHReg(False, HRcInt32, 3, 0); }
49 ST_IN HReg hregX86_ESI ( void ) { return mkHReg(False, HRcInt32, 6, 1); }
50 ST_IN HReg hregX86_EDI ( void ) { return mkHReg(False, HRcInt32, 7, 2); }
51 ST_IN HReg hregX86_EAX ( void ) { return mkHReg(False, HRcInt32, 0, 3); }
52 ST_IN HReg hregX86_ECX ( void ) { return mkHReg(False, HRcInt32, 1, 4); }
53 ST_IN HReg hregX86_EDX ( void ) { return mkHReg(False, HRcInt32, 2, 5); }
55 ST_IN HReg hregX86_FAKE0 ( void ) { return mkHReg(False, HRcFlt64, 0, 6); }
56 ST_IN HReg hregX86_FAKE1 ( void ) { return mkHReg(False, HRcFlt64, 1, 7); }
57 ST_IN HReg hregX86_FAKE2 ( void ) { return mkHReg(False, HRcFlt64, 2, 8); }
58 ST_IN HReg hregX86_FAKE3 ( void ) { return mkHReg(False, HRcFlt64, 3, 9); }
59 ST_IN HReg hregX86_FAKE4 ( void ) { return mkHReg(False, HRcFlt64, 4, 10); }
60 ST_IN HReg hregX86_FAKE5 ( void ) { return mkHReg(False, HRcFlt64, 5, 11); }
62 ST_IN HReg hregX86_XMM0 ( void ) { return mkHReg(False, HRcVec128, 0, 12); }
63 ST_IN HReg hregX86_XMM1 ( void ) { return mkHReg(False, HRcVec128, 1, 13); }
64 ST_IN HReg hregX86_XMM2 ( void ) { return mkHReg(False, HRcVec128, 2, 14); }
65 ST_IN HReg hregX86_XMM3 ( void ) { return mkHReg(False, HRcVec128, 3, 15); }
66 ST_IN HReg hregX86_XMM4 ( void ) { return mkHReg(False, HRcVec128, 4, 16); }
67 ST_IN HReg hregX86_XMM5 ( void ) { return mkHReg(False, HRcVec128, 5, 17); }
68 ST_IN HReg hregX86_XMM6 ( void ) { return mkHReg(False, HRcVec128, 6, 18); }
69 ST_IN HReg hregX86_XMM7 ( void ) { return mkHReg(False, HRcVec128, 7, 19); }
71 ST_IN HReg hregX86_ESP ( void ) { return mkHReg(False, HRcInt32, 4, 20); }
72 ST_IN HReg hregX86_EBP ( void ) { return mkHReg(False, HRcInt32, 5, 21); }
73 #undef ST_IN
75 extern UInt ppHRegX86 ( HReg );
78 /* --------- Condition codes, Intel encoding. --------- */
80 typedef
81 enum {
82 Xcc_O = 0, /* overflow */
83 Xcc_NO = 1, /* no overflow */
85 Xcc_B = 2, /* below */
86 Xcc_NB = 3, /* not below */
88 Xcc_Z = 4, /* zero */
89 Xcc_NZ = 5, /* not zero */
91 Xcc_BE = 6, /* below or equal */
92 Xcc_NBE = 7, /* not below or equal */
94 Xcc_S = 8, /* negative */
95 Xcc_NS = 9, /* not negative */
97 Xcc_P = 10, /* parity even */
98 Xcc_NP = 11, /* not parity even */
100 Xcc_L = 12, /* jump less */
101 Xcc_NL = 13, /* not less */
103 Xcc_LE = 14, /* less or equal */
104 Xcc_NLE = 15, /* not less or equal */
106 Xcc_ALWAYS = 16 /* the usual hack */
108 X86CondCode;
110 extern const HChar* showX86CondCode ( X86CondCode );
113 /* --------- Memory address expressions (amodes). --------- */
115 typedef
116 enum {
117 Xam_IR, /* Immediate + Reg */
118 Xam_IRRS /* Immediate + Reg1 + (Reg2 << Shift) */
120 X86AModeTag;
122 typedef
123 struct {
124 X86AModeTag tag;
125 union {
126 struct {
127 UInt imm;
128 HReg reg;
129 } IR;
130 struct {
131 UInt imm;
132 HReg base;
133 HReg index;
134 Int shift; /* 0, 1, 2 or 3 only */
135 } IRRS;
136 } Xam;
138 X86AMode;
140 extern X86AMode* X86AMode_IR ( UInt, HReg );
141 extern X86AMode* X86AMode_IRRS ( UInt, HReg, HReg, Int );
143 extern X86AMode* dopyX86AMode ( X86AMode* );
145 extern void ppX86AMode ( X86AMode* );
148 /* --------- Operand, which can be reg, immediate or memory. --------- */
150 typedef
151 enum {
152 Xrmi_Imm,
153 Xrmi_Reg,
154 Xrmi_Mem
156 X86RMITag;
158 typedef
159 struct {
160 X86RMITag tag;
161 union {
162 struct {
163 UInt imm32;
164 } Imm;
165 struct {
166 HReg reg;
167 } Reg;
168 struct {
169 X86AMode* am;
170 } Mem;
172 Xrmi;
174 X86RMI;
176 extern X86RMI* X86RMI_Imm ( UInt );
177 extern X86RMI* X86RMI_Reg ( HReg );
178 extern X86RMI* X86RMI_Mem ( X86AMode* );
180 extern void ppX86RMI ( X86RMI* );
183 /* --------- Operand, which can be reg or immediate only. --------- */
185 typedef
186 enum {
187 Xri_Imm,
188 Xri_Reg
190 X86RITag;
192 typedef
193 struct {
194 X86RITag tag;
195 union {
196 struct {
197 UInt imm32;
198 } Imm;
199 struct {
200 HReg reg;
201 } Reg;
203 Xri;
205 X86RI;
207 extern X86RI* X86RI_Imm ( UInt );
208 extern X86RI* X86RI_Reg ( HReg );
210 extern void ppX86RI ( X86RI* );
213 /* --------- Operand, which can be reg or memory only. --------- */
215 typedef
216 enum {
217 Xrm_Reg,
218 Xrm_Mem
220 X86RMTag;
222 typedef
223 struct {
224 X86RMTag tag;
225 union {
226 struct {
227 HReg reg;
228 } Reg;
229 struct {
230 X86AMode* am;
231 } Mem;
233 Xrm;
235 X86RM;
237 extern X86RM* X86RM_Reg ( HReg );
238 extern X86RM* X86RM_Mem ( X86AMode* );
240 extern void ppX86RM ( X86RM* );
243 /* --------- Instructions. --------- */
245 /* --------- */
246 typedef
247 enum {
248 Xun_NEG,
249 Xun_NOT
251 X86UnaryOp;
253 extern const HChar* showX86UnaryOp ( X86UnaryOp );
256 /* --------- */
257 typedef
258 enum {
259 Xalu_INVALID,
260 Xalu_MOV,
261 Xalu_CMP,
262 Xalu_ADD, Xalu_SUB, Xalu_ADC, Xalu_SBB,
263 Xalu_AND, Xalu_OR, Xalu_XOR,
264 Xalu_MUL
266 X86AluOp;
268 extern const HChar* showX86AluOp ( X86AluOp );
271 /* --------- */
272 typedef
273 enum {
274 Xsh_INVALID,
275 Xsh_SHL, Xsh_SHR, Xsh_SAR
277 X86ShiftOp;
279 extern const HChar* showX86ShiftOp ( X86ShiftOp );
282 /* --------- */
283 typedef
284 enum {
285 Xfp_INVALID,
286 /* Binary */
287 Xfp_ADD, Xfp_SUB, Xfp_MUL, Xfp_DIV,
288 Xfp_SCALE, Xfp_ATAN, Xfp_YL2X, Xfp_YL2XP1, Xfp_PREM, Xfp_PREM1,
289 /* Unary */
290 Xfp_SQRT, Xfp_ABS, Xfp_NEG, Xfp_MOV, Xfp_SIN, Xfp_COS, Xfp_TAN,
291 Xfp_ROUND, Xfp_2XM1
293 X86FpOp;
295 extern const HChar* showX86FpOp ( X86FpOp );
298 /* --------- */
299 typedef
300 enum {
301 Xsse_INVALID,
302 /* mov */
303 Xsse_MOV,
304 /* Floating point binary */
305 Xsse_ADDF, Xsse_SUBF, Xsse_MULF, Xsse_DIVF,
306 Xsse_MAXF, Xsse_MINF,
307 Xsse_CMPEQF, Xsse_CMPLTF, Xsse_CMPLEF, Xsse_CMPUNF,
308 /* Floating point unary */
309 Xsse_RCPF, Xsse_RSQRTF, Xsse_SQRTF,
310 /* Bitwise */
311 Xsse_AND, Xsse_OR, Xsse_XOR, Xsse_ANDN,
312 /* Integer binary */
313 Xsse_ADD8, Xsse_ADD16, Xsse_ADD32, Xsse_ADD64,
314 Xsse_QADD8U, Xsse_QADD16U,
315 Xsse_QADD8S, Xsse_QADD16S,
316 Xsse_SUB8, Xsse_SUB16, Xsse_SUB32, Xsse_SUB64,
317 Xsse_QSUB8U, Xsse_QSUB16U,
318 Xsse_QSUB8S, Xsse_QSUB16S,
319 Xsse_MUL16,
320 Xsse_MULHI16U,
321 Xsse_MULHI16S,
322 Xsse_AVG8U, Xsse_AVG16U,
323 Xsse_MAX16S,
324 Xsse_MAX8U,
325 Xsse_MIN16S,
326 Xsse_MIN8U,
327 Xsse_CMPEQ8, Xsse_CMPEQ16, Xsse_CMPEQ32,
328 Xsse_CMPGT8S, Xsse_CMPGT16S, Xsse_CMPGT32S,
329 Xsse_SHL16, Xsse_SHL32, Xsse_SHL64,
330 Xsse_SHR16, Xsse_SHR32, Xsse_SHR64,
331 Xsse_SAR16, Xsse_SAR32,
332 Xsse_PACKSSD, Xsse_PACKSSW, Xsse_PACKUSW,
333 Xsse_UNPCKHB, Xsse_UNPCKHW, Xsse_UNPCKHD, Xsse_UNPCKHQ,
334 Xsse_UNPCKLB, Xsse_UNPCKLW, Xsse_UNPCKLD, Xsse_UNPCKLQ
336 X86SseOp;
338 extern const HChar* showX86SseOp ( X86SseOp );
341 /* --------- */
342 typedef
343 enum {
344 Xin_Alu32R, /* 32-bit mov/arith/logical, dst=REG */
345 Xin_Alu32M, /* 32-bit mov/arith/logical, dst=MEM */
346 Xin_Sh32, /* 32-bit shift/rotate, dst=REG */
347 Xin_Test32, /* 32-bit test of REG or MEM against imm32 (AND, set
348 flags, discard result) */
349 Xin_Unary32, /* 32-bit not and neg */
350 Xin_Lea32, /* 32-bit compute EA into a reg */
351 Xin_MulL, /* 32 x 32 -> 64 multiply */
352 Xin_Div, /* 64/32 -> (32,32) div and mod */
353 Xin_Sh3232, /* shldl or shrdl */
354 Xin_Push, /* push (32-bit?) value on stack */
355 Xin_Call, /* call to address in register */
356 Xin_XDirect, /* direct transfer to GA */
357 Xin_XIndir, /* indirect transfer to GA */
358 Xin_XAssisted, /* assisted transfer to GA */
359 Xin_CMov32, /* conditional move */
360 Xin_LoadEX, /* mov{s,z}{b,w}l from mem to reg */
361 Xin_Store, /* store 16/8 bit value in memory */
362 Xin_Set32, /* convert condition code to 32-bit value */
363 Xin_Bsfr32, /* 32-bit bsf/bsr */
364 Xin_MFence, /* mem fence (not just sse2, but sse0 and 1/mmxext too) */
365 Xin_ACAS, /* 8/16/32-bit lock;cmpxchg */
366 Xin_DACAS, /* lock;cmpxchg8b (doubleword ACAS, 2 x 32-bit only) */
368 Xin_FpUnary, /* FP fake unary op */
369 Xin_FpBinary, /* FP fake binary op */
370 Xin_FpLdSt, /* FP fake load/store */
371 Xin_FpLdStI, /* FP fake load/store, converting to/from Int */
372 Xin_Fp64to32, /* FP round IEEE754 double to IEEE754 single */
373 Xin_FpCMov, /* FP fake floating point conditional move */
374 Xin_FpLdCW, /* fldcw */
375 Xin_FpStSW_AX, /* fstsw %ax */
376 Xin_FpCmp, /* FP compare, generating a C320 value into int reg */
378 Xin_SseConst, /* Generate restricted SSE literal */
379 Xin_SseLdSt, /* SSE load/store, no alignment constraints */
380 Xin_SseLdzLO, /* SSE load low 32/64 bits, zero remainder of reg */
381 Xin_Sse32Fx4, /* SSE binary, 32Fx4 */
382 Xin_Sse32FLo, /* SSE binary, 32F in lowest lane only */
383 Xin_Sse64Fx2, /* SSE binary, 64Fx2 */
384 Xin_Sse64FLo, /* SSE binary, 64F in lowest lane only */
385 Xin_SseReRg, /* SSE binary general reg-reg, Re, Rg */
386 Xin_SseCMov, /* SSE conditional move */
387 Xin_SseShuf, /* SSE2 shuffle (pshufd) */
388 Xin_EvCheck, /* Event check */
389 Xin_ProfInc /* 64-bit profile counter increment */
391 X86InstrTag;
393 /* Destinations are on the RIGHT (second operand) */
395 typedef
396 struct {
397 X86InstrTag tag;
398 union {
399 struct {
400 X86AluOp op;
401 X86RMI* src;
402 HReg dst;
403 } Alu32R;
404 struct {
405 X86AluOp op;
406 X86RI* src;
407 X86AMode* dst;
408 } Alu32M;
409 struct {
410 X86ShiftOp op;
411 UInt src; /* shift amount, or 0 means %cl */
412 HReg dst;
413 } Sh32;
414 struct {
415 UInt imm32;
416 X86RM* dst; /* not written, only read */
417 } Test32;
418 /* Not and Neg */
419 struct {
420 X86UnaryOp op;
421 HReg dst;
422 } Unary32;
423 /* 32-bit compute EA into a reg */
424 struct {
425 X86AMode* am;
426 HReg dst;
427 } Lea32;
428 /* EDX:EAX = EAX *s/u r/m32 */
429 struct {
430 Bool syned;
431 X86RM* src;
432 } MulL;
433 /* x86 div/idiv instruction. Modifies EDX and EAX and reads src. */
434 struct {
435 Bool syned;
436 X86RM* src;
437 } Div;
438 /* shld/shrd. op may only be Xsh_SHL or Xsh_SHR */
439 struct {
440 X86ShiftOp op;
441 UInt amt; /* shift amount, or 0 means %cl */
442 HReg src;
443 HReg dst;
444 } Sh3232;
445 struct {
446 X86RMI* src;
447 } Push;
448 /* Pseudo-insn. Call target (an absolute address), on given
449 condition (which could be Xcc_ALWAYS). */
450 struct {
451 X86CondCode cond;
452 Addr32 target;
453 Int regparms; /* 0 .. 3 */
454 RetLoc rloc; /* where the return value will be */
455 } Call;
456 /* Update the guest EIP value, then exit requesting to chain
457 to it. May be conditional. Urr, use of Addr32 implicitly
458 assumes that wordsize(guest) == wordsize(host). */
459 struct {
460 Addr32 dstGA; /* next guest address */
461 X86AMode* amEIP; /* amode in guest state for EIP */
462 X86CondCode cond; /* can be Xcc_ALWAYS */
463 Bool toFastEP; /* chain to the slow or fast point? */
464 } XDirect;
465 /* Boring transfer to a guest address not known at JIT time.
466 Not chainable. May be conditional. */
467 struct {
468 HReg dstGA;
469 X86AMode* amEIP;
470 X86CondCode cond; /* can be Xcc_ALWAYS */
471 } XIndir;
472 /* Assisted transfer to a guest address, most general case.
473 Not chainable. May be conditional. */
474 struct {
475 HReg dstGA;
476 X86AMode* amEIP;
477 X86CondCode cond; /* can be Xcc_ALWAYS */
478 IRJumpKind jk;
479 } XAssisted;
480 /* Mov src to dst on the given condition, which may not
481 be the bogus Xcc_ALWAYS. */
482 struct {
483 X86CondCode cond;
484 X86RM* src;
485 HReg dst;
486 } CMov32;
487 /* Sign/Zero extending loads. Dst size is always 32 bits. */
488 struct {
489 UChar szSmall;
490 Bool syned;
491 X86AMode* src;
492 HReg dst;
493 } LoadEX;
494 /* 16/8 bit stores, which are troublesome (particularly
495 8-bit) */
496 struct {
497 UChar sz; /* only 1 or 2 */
498 HReg src;
499 X86AMode* dst;
500 } Store;
501 /* Convert a x86 condition code to a 32-bit value (0 or 1). */
502 struct {
503 X86CondCode cond;
504 HReg dst;
505 } Set32;
506 /* 32-bit bsf or bsr. */
507 struct {
508 Bool isFwds;
509 HReg src;
510 HReg dst;
511 } Bsfr32;
512 /* Mem fence (not just sse2, but sse0 and sse1/mmxext too).
513 In short, an insn which flushes all preceding loads and
514 stores as much as possible before continuing. On SSE2
515 we emit a real "mfence", on SSE1 or the MMXEXT subset
516 "sfence ; lock addl $0,0(%esp)" and on SSE0
517 "lock addl $0,0(%esp)". This insn therefore carries the
518 host's hwcaps so the assembler knows what to emit. */
519 struct {
520 UInt hwcaps;
521 } MFence;
522 /* "lock;cmpxchg": mem address in .addr,
523 expected value in %eax, new value in %ebx */
524 struct {
525 X86AMode* addr;
526 UChar sz; /* 1, 2 or 4 */
527 } ACAS;
528 /* "lock;cmpxchg8b": mem address in .addr, expected value in
529 %edx:%eax, new value in %ecx:%ebx */
530 struct {
531 X86AMode* addr;
532 } DACAS;
534 /* X86 Floating point (fake 3-operand, "flat reg file" insns) */
535 struct {
536 X86FpOp op;
537 HReg src;
538 HReg dst;
539 } FpUnary;
540 struct {
541 X86FpOp op;
542 HReg srcL;
543 HReg srcR;
544 HReg dst;
545 } FpBinary;
546 struct {
547 Bool isLoad;
548 UChar sz; /* only 4 (IEEE single) or 8 (IEEE double) */
549 HReg reg;
550 X86AMode* addr;
551 } FpLdSt;
552 /* Move 64-bit float to/from memory, converting to/from
553 signed int on the way. Note the conversions will observe
554 the host FPU rounding mode currently in force. */
555 struct {
556 Bool isLoad;
557 UChar sz; /* only 2, 4 or 8 */
558 HReg reg;
559 X86AMode* addr;
560 } FpLdStI;
561 /* By observing the current FPU rounding mode, round (etc)
562 src into dst given that dst should be interpreted as an
563 IEEE754 32-bit (float) type. */
564 struct {
565 HReg src;
566 HReg dst;
567 } Fp64to32;
568 /* Mov src to dst on the given condition, which may not
569 be the bogus Xcc_ALWAYS. */
570 struct {
571 X86CondCode cond;
572 HReg src;
573 HReg dst;
574 } FpCMov;
575 /* Load the FPU's 16-bit control word (fldcw) */
576 struct {
577 X86AMode* addr;
579 FpLdCW;
580 /* fstsw %ax */
581 struct {
582 /* no fields */
584 FpStSW_AX;
585 /* Do a compare, generating the C320 bits into the dst. */
586 struct {
587 HReg srcL;
588 HReg srcR;
589 HReg dst;
590 } FpCmp;
592 /* Simplistic SSE[123] */
593 struct {
594 UShort con;
595 HReg dst;
596 } SseConst;
597 struct {
598 Bool isLoad;
599 HReg reg;
600 X86AMode* addr;
601 } SseLdSt;
602 struct {
603 UChar sz; /* 4 or 8 only */
604 HReg reg;
605 X86AMode* addr;
606 } SseLdzLO;
607 struct {
608 X86SseOp op;
609 HReg src;
610 HReg dst;
611 } Sse32Fx4;
612 struct {
613 X86SseOp op;
614 HReg src;
615 HReg dst;
616 } Sse32FLo;
617 struct {
618 X86SseOp op;
619 HReg src;
620 HReg dst;
621 } Sse64Fx2;
622 struct {
623 X86SseOp op;
624 HReg src;
625 HReg dst;
626 } Sse64FLo;
627 struct {
628 X86SseOp op;
629 HReg src;
630 HReg dst;
631 } SseReRg;
632 /* Mov src to dst on the given condition, which may not
633 be the bogus Xcc_ALWAYS. */
634 struct {
635 X86CondCode cond;
636 HReg src;
637 HReg dst;
638 } SseCMov;
639 struct {
640 Int order; /* 0 <= order <= 0xFF */
641 HReg src;
642 HReg dst;
643 } SseShuf;
644 struct {
645 X86AMode* amCounter;
646 X86AMode* amFailAddr;
647 } EvCheck;
648 struct {
649 /* No fields. The address of the counter to inc is
650 installed later, post-translation, by patching it in,
651 as it is not known at translation time. */
652 } ProfInc;
654 } Xin;
656 X86Instr;
658 extern X86Instr* X86Instr_Alu32R ( X86AluOp, X86RMI*, HReg );
659 extern X86Instr* X86Instr_Alu32M ( X86AluOp, X86RI*, X86AMode* );
660 extern X86Instr* X86Instr_Unary32 ( X86UnaryOp op, HReg dst );
661 extern X86Instr* X86Instr_Lea32 ( X86AMode* am, HReg dst );
663 extern X86Instr* X86Instr_Sh32 ( X86ShiftOp, UInt, HReg );
664 extern X86Instr* X86Instr_Test32 ( UInt imm32, X86RM* dst );
665 extern X86Instr* X86Instr_MulL ( Bool syned, X86RM* );
666 extern X86Instr* X86Instr_Div ( Bool syned, X86RM* );
667 extern X86Instr* X86Instr_Sh3232 ( X86ShiftOp, UInt amt, HReg src, HReg dst );
668 extern X86Instr* X86Instr_Push ( X86RMI* );
669 extern X86Instr* X86Instr_Call ( X86CondCode, Addr32, Int, RetLoc );
670 extern X86Instr* X86Instr_XDirect ( Addr32 dstGA, X86AMode* amEIP,
671 X86CondCode cond, Bool toFastEP );
672 extern X86Instr* X86Instr_XIndir ( HReg dstGA, X86AMode* amEIP,
673 X86CondCode cond );
674 extern X86Instr* X86Instr_XAssisted ( HReg dstGA, X86AMode* amEIP,
675 X86CondCode cond, IRJumpKind jk );
676 extern X86Instr* X86Instr_CMov32 ( X86CondCode, X86RM* src, HReg dst );
677 extern X86Instr* X86Instr_LoadEX ( UChar szSmall, Bool syned,
678 X86AMode* src, HReg dst );
679 extern X86Instr* X86Instr_Store ( UChar sz, HReg src, X86AMode* dst );
680 extern X86Instr* X86Instr_Set32 ( X86CondCode cond, HReg dst );
681 extern X86Instr* X86Instr_Bsfr32 ( Bool isFwds, HReg src, HReg dst );
682 extern X86Instr* X86Instr_MFence ( UInt hwcaps );
683 extern X86Instr* X86Instr_ACAS ( X86AMode* addr, UChar sz );
684 extern X86Instr* X86Instr_DACAS ( X86AMode* addr );
686 extern X86Instr* X86Instr_FpUnary ( X86FpOp op, HReg src, HReg dst );
687 extern X86Instr* X86Instr_FpBinary ( X86FpOp op, HReg srcL, HReg srcR, HReg dst );
688 extern X86Instr* X86Instr_FpLdSt ( Bool isLoad, UChar sz, HReg reg, X86AMode* );
689 extern X86Instr* X86Instr_FpLdStI ( Bool isLoad, UChar sz, HReg reg, X86AMode* );
690 extern X86Instr* X86Instr_Fp64to32 ( HReg src, HReg dst );
691 extern X86Instr* X86Instr_FpCMov ( X86CondCode, HReg src, HReg dst );
692 extern X86Instr* X86Instr_FpLdCW ( X86AMode* );
693 extern X86Instr* X86Instr_FpStSW_AX ( void );
694 extern X86Instr* X86Instr_FpCmp ( HReg srcL, HReg srcR, HReg dst );
696 extern X86Instr* X86Instr_SseConst ( UShort con, HReg dst );
697 extern X86Instr* X86Instr_SseLdSt ( Bool isLoad, HReg, X86AMode* );
698 extern X86Instr* X86Instr_SseLdzLO ( Int sz, HReg, X86AMode* );
699 extern X86Instr* X86Instr_Sse32Fx4 ( X86SseOp, HReg, HReg );
700 extern X86Instr* X86Instr_Sse32FLo ( X86SseOp, HReg, HReg );
701 extern X86Instr* X86Instr_Sse64Fx2 ( X86SseOp, HReg, HReg );
702 extern X86Instr* X86Instr_Sse64FLo ( X86SseOp, HReg, HReg );
703 extern X86Instr* X86Instr_SseReRg ( X86SseOp, HReg, HReg );
704 extern X86Instr* X86Instr_SseCMov ( X86CondCode, HReg src, HReg dst );
705 extern X86Instr* X86Instr_SseShuf ( Int order, HReg src, HReg dst );
706 extern X86Instr* X86Instr_EvCheck ( X86AMode* amCounter,
707 X86AMode* amFailAddr );
708 extern X86Instr* X86Instr_ProfInc ( void );
711 extern void ppX86Instr ( const X86Instr*, Bool );
713 /* Some functions that insulate the register allocator from details
714 of the underlying instruction set. */
715 extern void getRegUsage_X86Instr ( HRegUsage*, const X86Instr*, Bool );
716 extern void mapRegs_X86Instr ( HRegRemap*, X86Instr*, Bool );
717 extern Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc,
718 UChar* buf, Int nbuf, const X86Instr* i,
719 Bool mode64,
720 VexEndness endness_host,
721 const void* disp_cp_chain_me_to_slowEP,
722 const void* disp_cp_chain_me_to_fastEP,
723 const void* disp_cp_xindir,
724 const void* disp_cp_xassisted );
726 extern void genSpill_X86 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
727 HReg rreg, Int offset, Bool );
728 extern void genReload_X86 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
729 HReg rreg, Int offset, Bool );
730 extern X86Instr* genMove_X86(HReg from, HReg to, Bool);
731 extern X86Instr* directReload_X86 ( X86Instr* i, HReg vreg, Short spill_off );
733 extern const RRegUniverse* getRRegUniverse_X86 ( void );
735 extern HInstrArray* iselSB_X86 ( const IRSB*,
736 VexArch,
737 const VexArchInfo*,
738 const VexAbiInfo*,
739 Int offs_Host_EvC_Counter,
740 Int offs_Host_EvC_FailAddr,
741 Bool chainingAllowed,
742 Bool addProfInc,
743 Addr max_ga );
745 /* How big is an event check? This is kind of a kludge because it
746 depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
747 and so assumes that they are both <= 128, and so can use the short
748 offset encoding. This is all checked with assertions, so in the
749 worst case we will merely assert at startup. */
750 extern Int evCheckSzB_X86 (void);
752 /* Perform a chaining and unchaining of an XDirect jump. */
753 extern VexInvalRange chainXDirect_X86 ( VexEndness endness_host,
754 void* place_to_chain,
755 const void* disp_cp_chain_me_EXPECTED,
756 const void* place_to_jump_to );
758 extern VexInvalRange unchainXDirect_X86 ( VexEndness endness_host,
759 void* place_to_unchain,
760 const void* place_to_jump_to_EXPECTED,
761 const void* disp_cp_chain_me );
763 /* Patch the counter location into an existing ProfInc point. */
764 extern VexInvalRange patchProfInc_X86 ( VexEndness endness_host,
765 void* place_to_patch,
766 const ULong* location_of_counter );
769 #endif /* ndef __VEX_HOST_X86_DEFS_H */
771 /*---------------------------------------------------------------*/
772 /*--- end host_x86_defs.h ---*/
773 /*---------------------------------------------------------------*/