2 /*---------------------------------------------------------------*/
3 /*--- begin host_amd64_defs.h ---*/
4 /*---------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2004-2017 OpenWorks LLP
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
30 Neither the names of the U.S. Department of Energy nor the
31 University of California nor the names of its contributors may be
32 used to endorse or promote products derived from this software
33 without prior written permission.
36 #ifndef __VEX_HOST_AMD64_DEFS_H
37 #define __VEX_HOST_AMD64_DEFS_H
39 #include "libvex_basictypes.h"
40 #include "libvex.h" // VexArch
41 #include "host_generic_regs.h" // HReg
43 /* --------- Registers. --------- */
45 /* The usual HReg abstraction. There are 16 real int regs, 6 real
46 float regs, and 16 real vector regs.
49 #define ST_IN static inline
50 ST_IN HReg
hregAMD64_R12 ( void ) { return mkHReg(False
, HRcInt64
, 12, 0); }
51 ST_IN HReg
hregAMD64_R13 ( void ) { return mkHReg(False
, HRcInt64
, 13, 1); }
52 ST_IN HReg
hregAMD64_R14 ( void ) { return mkHReg(False
, HRcInt64
, 14, 2); }
53 ST_IN HReg
hregAMD64_R15 ( void ) { return mkHReg(False
, HRcInt64
, 15, 3); }
54 ST_IN HReg
hregAMD64_RBX ( void ) { return mkHReg(False
, HRcInt64
, 3, 4); }
55 ST_IN HReg
hregAMD64_RSI ( void ) { return mkHReg(False
, HRcInt64
, 6, 5); }
56 ST_IN HReg
hregAMD64_RDI ( void ) { return mkHReg(False
, HRcInt64
, 7, 6); }
57 ST_IN HReg
hregAMD64_R8 ( void ) { return mkHReg(False
, HRcInt64
, 8, 7); }
58 ST_IN HReg
hregAMD64_R9 ( void ) { return mkHReg(False
, HRcInt64
, 9, 8); }
59 ST_IN HReg
hregAMD64_R10 ( void ) { return mkHReg(False
, HRcInt64
, 10, 9); }
61 ST_IN HReg
hregAMD64_XMM3 ( void ) { return mkHReg(False
, HRcVec128
, 3, 10); }
62 ST_IN HReg
hregAMD64_XMM4 ( void ) { return mkHReg(False
, HRcVec128
, 4, 11); }
63 ST_IN HReg
hregAMD64_XMM5 ( void ) { return mkHReg(False
, HRcVec128
, 5, 12); }
64 ST_IN HReg
hregAMD64_XMM6 ( void ) { return mkHReg(False
, HRcVec128
, 6, 13); }
65 ST_IN HReg
hregAMD64_XMM7 ( void ) { return mkHReg(False
, HRcVec128
, 7, 14); }
66 ST_IN HReg
hregAMD64_XMM8 ( void ) { return mkHReg(False
, HRcVec128
, 8, 15); }
67 ST_IN HReg
hregAMD64_XMM9 ( void ) { return mkHReg(False
, HRcVec128
, 9, 16); }
68 ST_IN HReg
hregAMD64_XMM10 ( void ) { return mkHReg(False
, HRcVec128
, 10, 17); }
69 ST_IN HReg
hregAMD64_XMM11 ( void ) { return mkHReg(False
, HRcVec128
, 11, 18); }
70 ST_IN HReg
hregAMD64_XMM12 ( void ) { return mkHReg(False
, HRcVec128
, 12, 19); }
72 ST_IN HReg
hregAMD64_RAX ( void ) { return mkHReg(False
, HRcInt64
, 0, 20); }
73 ST_IN HReg
hregAMD64_RCX ( void ) { return mkHReg(False
, HRcInt64
, 1, 21); }
74 ST_IN HReg
hregAMD64_RDX ( void ) { return mkHReg(False
, HRcInt64
, 2, 22); }
75 ST_IN HReg
hregAMD64_RSP ( void ) { return mkHReg(False
, HRcInt64
, 4, 23); }
76 ST_IN HReg
hregAMD64_RBP ( void ) { return mkHReg(False
, HRcInt64
, 5, 24); }
77 ST_IN HReg
hregAMD64_R11 ( void ) { return mkHReg(False
, HRcInt64
, 11, 25); }
79 ST_IN HReg
hregAMD64_XMM0 ( void ) { return mkHReg(False
, HRcVec128
, 0, 26); }
80 ST_IN HReg
hregAMD64_XMM1 ( void ) { return mkHReg(False
, HRcVec128
, 1, 27); }
83 extern UInt
ppHRegAMD64 ( HReg
);
86 /* --------- Condition codes, AMD encoding. --------- */
90 Acc_O
= 0, /* overflow */
91 Acc_NO
= 1, /* no overflow */
93 Acc_B
= 2, /* below */
94 Acc_NB
= 3, /* not below */
97 Acc_NZ
= 5, /* not zero */
99 Acc_BE
= 6, /* below or equal */
100 Acc_NBE
= 7, /* not below or equal */
102 Acc_S
= 8, /* negative */
103 Acc_NS
= 9, /* not negative */
105 Acc_P
= 10, /* parity even */
106 Acc_NP
= 11, /* not parity even */
108 Acc_L
= 12, /* jump less */
109 Acc_NL
= 13, /* not less */
111 Acc_LE
= 14, /* less or equal */
112 Acc_NLE
= 15, /* not less or equal */
114 Acc_ALWAYS
= 16 /* the usual hack */
118 extern const HChar
* showAMD64CondCode ( AMD64CondCode
);
121 /* --------- Memory address expressions (amodes). --------- */
125 Aam_IR
, /* Immediate + Reg */
126 Aam_IRRS
/* Immediate + Reg1 + (Reg2 << Shift) */
142 Int shift
; /* 0, 1, 2 or 3 only */
148 extern AMD64AMode
* AMD64AMode_IR ( UInt
, HReg
);
149 extern AMD64AMode
* AMD64AMode_IRRS ( UInt
, HReg
, HReg
, Int
);
151 extern AMD64AMode
* dopyAMD64AMode ( AMD64AMode
* );
153 extern void ppAMD64AMode ( AMD64AMode
* );
156 /* --------- Operand, which can be reg, immediate or memory. --------- */
184 extern AMD64RMI
* AMD64RMI_Imm ( UInt
);
185 extern AMD64RMI
* AMD64RMI_Reg ( HReg
);
186 extern AMD64RMI
* AMD64RMI_Mem ( AMD64AMode
* );
188 extern void ppAMD64RMI ( AMD64RMI
* );
189 extern void ppAMD64RMI_lo32 ( AMD64RMI
* );
192 /* --------- Operand, which can be reg or immediate only. --------- */
216 extern AMD64RI
* AMD64RI_Imm ( UInt
);
217 extern AMD64RI
* AMD64RI_Reg ( HReg
);
219 extern void ppAMD64RI ( AMD64RI
* );
222 /* --------- Operand, which can be reg or memory only. --------- */
246 extern AMD64RM
* AMD64RM_Reg ( HReg
);
247 extern AMD64RM
* AMD64RM_Mem ( AMD64AMode
* );
249 extern void ppAMD64RM ( AMD64RM
* );
252 /* --------- Instructions. --------- */
262 extern const HChar
* showAMD64UnaryOp ( AMD64UnaryOp
);
271 Aalu_ADD
, Aalu_SUB
, Aalu_ADC
, Aalu_SBB
,
272 Aalu_AND
, Aalu_OR
, Aalu_XOR
,
277 extern const HChar
* showAMD64AluOp ( AMD64AluOp
);
284 Ash_SHL
, Ash_SHR
, Ash_SAR
288 extern const HChar
* showAMD64ShiftOp ( AMD64ShiftOp
);
296 Afp_SCALE
, Afp_ATAN
, Afp_YL2X
, Afp_YL2XP1
, Afp_PREM
, Afp_PREM1
,
299 Afp_SIN
, Afp_COS
, Afp_TAN
,
304 extern const HChar
* showA87FpOp ( A87FpOp
);
313 /* Floating point binary */
314 Asse_ADDF
, Asse_SUBF
, Asse_MULF
, Asse_DIVF
,
315 Asse_MAXF
, Asse_MINF
,
316 Asse_CMPEQF
, Asse_CMPLTF
, Asse_CMPLEF
, Asse_CMPUNF
,
317 /* Floating point unary */
318 Asse_RCPF
, Asse_RSQRTF
, Asse_SQRTF
,
319 /* Floating point conversion */
320 Asse_I2F
, // i32-signed to float conversion, aka cvtdq2ps in vec form
321 Asse_F2I
, // float to i32-signed conversion, aka cvtps2dq in vec form
323 Asse_AND
, Asse_OR
, Asse_XOR
, Asse_ANDN
,
324 Asse_ADD8
, Asse_ADD16
, Asse_ADD32
, Asse_ADD64
,
325 Asse_QADD8U
, Asse_QADD16U
,
326 Asse_QADD8S
, Asse_QADD16S
,
327 Asse_SUB8
, Asse_SUB16
, Asse_SUB32
, Asse_SUB64
,
328 Asse_QSUB8U
, Asse_QSUB16U
,
329 Asse_QSUB8S
, Asse_QSUB16S
,
333 Asse_AVG8U
, Asse_AVG16U
,
338 Asse_CMPEQ8
, Asse_CMPEQ16
, Asse_CMPEQ32
,
339 Asse_CMPGT8S
, Asse_CMPGT16S
, Asse_CMPGT32S
,
340 Asse_SHL16
, Asse_SHL32
, Asse_SHL64
, Asse_SHL128
,
341 Asse_SHR16
, Asse_SHR32
, Asse_SHR64
, Asse_SHR128
,
342 Asse_SAR16
, Asse_SAR32
,
343 Asse_PACKSSD
, Asse_PACKSSW
, Asse_PACKUSW
,
344 Asse_UNPCKHB
, Asse_UNPCKHW
, Asse_UNPCKHD
, Asse_UNPCKHQ
,
345 Asse_UNPCKLB
, Asse_UNPCKLW
, Asse_UNPCKLD
, Asse_UNPCKLQ
,
346 Asse_PSHUFB
// Only for SSSE3 capable hosts
350 extern const HChar
* showAMD64SseOp ( AMD64SseOp
);
356 Ain_Imm64
, /* Generate 64-bit literal to register */
357 Ain_Alu64R
, /* 64-bit mov/arith/logical, dst=REG */
358 Ain_Alu64M
, /* 64-bit mov/arith/logical, dst=MEM */
359 Ain_Sh64
, /* 64-bit shift/rotate, dst=REG or MEM */
360 Ain_Test64
, /* 64-bit test (AND, set flags, discard result) */
361 Ain_Unary64
, /* 64-bit not and neg */
362 Ain_Lea64
, /* 64-bit compute EA into a reg */
363 Ain_Alu32R
, /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
364 Ain_MulL
, /* widening multiply */
365 Ain_Div
, /* div and mod */
366 Ain_Push
, /* push 64-bit value on stack */
367 Ain_Call
, /* call to address in register */
368 Ain_XDirect
, /* direct transfer to GA */
369 Ain_XIndir
, /* indirect transfer to GA */
370 Ain_XAssisted
, /* assisted transfer to GA */
371 Ain_CMov64
, /* conditional move, 64-bit reg-reg only */
372 Ain_CLoad
, /* cond. load to int reg, 32 bit ZX or 64 bit only */
373 Ain_CStore
, /* cond. store from int reg, 32 or 64 bit only */
374 Ain_MovxLQ
, /* reg-reg move, zx-ing/sx-ing top half */
375 Ain_LoadEX
, /* mov{s,z}{b,w,l}q from mem to reg */
376 Ain_Store
, /* store 32/16/8 bit value in memory */
377 Ain_Set64
, /* convert condition code to 64-bit value */
378 Ain_Bsfr64
, /* 64-bit bsf/bsr */
379 Ain_MFence
, /* mem fence */
380 Ain_ACAS
, /* 8/16/32/64-bit lock;cmpxchg */
381 Ain_DACAS
, /* lock;cmpxchg8b/16b (doubleword ACAS, 2 x
382 32-bit or 2 x 64-bit only) */
383 Ain_A87Free
, /* free up x87 registers */
384 Ain_A87PushPop
, /* x87 loads/stores */
385 Ain_A87FpOp
, /* x87 operations */
386 Ain_A87LdCW
, /* load x87 control word */
387 Ain_A87StSW
, /* store x87 status word */
388 Ain_LdMXCSR
, /* load %mxcsr */
389 Ain_SseUComIS
, /* ucomisd/ucomiss, then get %rflags into int
391 Ain_SseSI2SF
, /* scalar 32/64 int to 32/64 float conversion */
392 Ain_SseSF2SI
, /* scalar 32/64 float to 32/64 int conversion */
393 Ain_SseSDSS
, /* scalar float32 to/from float64 */
394 Ain_SseLdSt
, /* SSE load/store 32/64/128 bits, no alignment
395 constraints, upper 96/64/0 bits arbitrary */
396 Ain_SseCStore
, /* SSE conditional store, 128 bit only, any alignment */
397 Ain_SseCLoad
, /* SSE conditional load, 128 bit only, any alignment */
398 Ain_SseLdzLO
, /* SSE load low 32/64 bits, zero remainder of reg */
399 Ain_Sse32Fx4
, /* SSE binary, 32Fx4 */
400 Ain_Sse32FLo
, /* SSE binary, 32F in lowest lane only */
401 Ain_Sse64Fx2
, /* SSE binary, 64Fx2 */
402 Ain_Sse64FLo
, /* SSE binary, 64F in lowest lane only */
403 Ain_SseReRg
, /* SSE binary general reg-reg, Re, Rg */
404 Ain_SseCMov
, /* SSE conditional move */
405 Ain_SseShuf
, /* SSE2 shuffle (pshufd) */
406 Ain_SseShiftN
, /* SSE2 shift by immediate */
407 Ain_SseMOVQ
, /* SSE2 moves of xmm[63:0] to/from GPR */
408 //uu Ain_AvxLdSt, /* AVX load/store 256 bits,
409 //uu no alignment constraints */
410 //uu Ain_AvxReRg, /* AVX binary general reg-reg, Re, Rg */
411 Ain_EvCheck
, /* Event check */
412 Ain_ProfInc
/* 64-bit profile counter increment */
416 /* Destinations are on the RIGHT (second operand) */
438 UInt src
; /* shift amount, or 0 means %cl */
450 /* 64-bit compute EA into a reg */
455 /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
461 /* 64 x 64 -> 128 bit widening multiply: RDX:RAX = RAX *s/u
467 /* amd64 div/idiv instruction. Modifies RDX and RAX and
471 Int sz
; /* 4 or 8 only */
477 /* Pseudo-insn. Call target (an absolute address), on given
478 condition (which could be Xcc_ALWAYS). */
482 Int regparms
; /* 0 .. 6 */
483 RetLoc rloc
; /* where the return value will be */
485 /* Update the guest RIP value, then exit requesting to chain
486 to it. May be conditional. */
488 Addr64 dstGA
; /* next guest address */
489 AMD64AMode
* amRIP
; /* amode in guest state for RIP */
490 AMD64CondCode cond
; /* can be Acc_ALWAYS */
491 Bool toFastEP
; /* chain to the slow or fast point? */
493 /* Boring transfer to a guest address not known at JIT time.
494 Not chainable. May be conditional. */
498 AMD64CondCode cond
; /* can be Acc_ALWAYS */
500 /* Assisted transfer to a guest address, most general case.
501 Not chainable. May be conditional. */
505 AMD64CondCode cond
; /* can be Acc_ALWAYS */
508 /* Mov src to dst on the given condition, which may not
509 be the bogus Acc_ALWAYS. */
515 /* conditional load to int reg, 32 bit ZX or 64 bit only.
516 cond may not be Acc_ALWAYS. */
519 UChar szB
; /* 4 or 8 only */
523 /* cond. store from int reg, 32 or 64 bit only.
524 cond may not be Acc_ALWAYS. */
527 UChar szB
; /* 4 or 8 only */
531 /* reg-reg move, sx-ing/zx-ing top half */
537 /* Sign/Zero extending loads. Dst size is always 64 bits. */
539 UChar szSmall
; /* only 1, 2 or 4 */
544 /* 32/16/8 bit stores. */
546 UChar sz
; /* only 1, 2 or 4 */
550 /* Convert an amd64 condition code to a 64-bit value (0 or 1). */
555 /* 64-bit bsf or bsr. */
561 /* Mem fence. In short, an insn which flushes all preceding
562 loads and stores as much as possible before continuing.
563 On AMD64 we emit a real "mfence". */
568 UChar sz
; /* 1, 2, 4 or 8 */
572 UChar sz
; /* 4 or 8 only */
577 /* A very minimal set of x87 insns, that operate exactly in a
578 stack-like way so no need to think about x87 registers. */
580 /* Do 'ffree' on %st(7) .. %st(7-nregs) */
582 Int nregs
; /* 1 <= nregs <= 7 */
585 /* Push a 32- or 64-bit FP value from memory onto the stack,
586 or move a value from the stack to memory and remove it
591 UChar szB
; /* 4 or 8 */
594 /* Do an operation on the top-of-stack. This can be unary, in
595 which case it is %st0 = OP( %st0 ), or binary: %st0 = OP(
601 /* Load the FPU control word. */
606 /* Store the FPU status word (fstsw m16) */
613 /* Load 32 bits into %mxcsr. */
618 /* ucomisd/ucomiss, then get %rflags into int register */
620 UChar sz
; /* 4 or 8 only */
625 /* scalar 32/64 int to 32/64 float conversion */
627 UChar szS
; /* 4 or 8 */
628 UChar szD
; /* 4 or 8 */
629 HReg src
; /* i class */
630 HReg dst
; /* v class */
632 /* scalar 32/64 float to 32/64 int conversion */
634 UChar szS
; /* 4 or 8 */
635 UChar szD
; /* 4 or 8 */
636 HReg src
; /* v class */
637 HReg dst
; /* i class */
639 /* scalar float32 to/from float64 */
641 Bool from64
; /* True: 64->32; False: 32->64 */
647 UChar sz
; /* 4, 8 or 16 only */
652 AMD64CondCode cond
; /* may not be Acc_ALWAYS */
657 AMD64CondCode cond
; /* may not be Acc_ALWAYS */
662 Int sz
; /* 4 or 8 only */
691 /* Mov src to dst on the given condition, which may not
692 be the bogus Xcc_ALWAYS. */
699 Int order
; /* 0 <= order <= 0xFF */
711 Bool toXMM
; // when moving to xmm, xmm[127:64] is zeroed out
716 //uu AMD64AMode* addr;
724 AMD64AMode
* amCounter
;
725 AMD64AMode
* amFailAddr
;
728 /* No fields. The address of the counter to inc is
729 installed later, post-translation, by patching it in,
730 as it is not known at translation time. */
737 extern AMD64Instr
* AMD64Instr_Imm64 ( ULong imm64
, HReg dst
);
738 extern AMD64Instr
* AMD64Instr_Alu64R ( AMD64AluOp
, AMD64RMI
*, HReg
);
739 extern AMD64Instr
* AMD64Instr_Alu64M ( AMD64AluOp
, AMD64RI
*, AMD64AMode
* );
740 extern AMD64Instr
* AMD64Instr_Unary64 ( AMD64UnaryOp op
, HReg dst
);
741 extern AMD64Instr
* AMD64Instr_Lea64 ( AMD64AMode
* am
, HReg dst
);
742 extern AMD64Instr
* AMD64Instr_Alu32R ( AMD64AluOp
, AMD64RMI
*, HReg
);
743 extern AMD64Instr
* AMD64Instr_Sh64 ( AMD64ShiftOp
, UInt
, HReg
);
744 extern AMD64Instr
* AMD64Instr_Test64 ( UInt imm32
, HReg dst
);
745 extern AMD64Instr
* AMD64Instr_MulL ( Bool syned
, AMD64RM
* );
746 extern AMD64Instr
* AMD64Instr_Div ( Bool syned
, Int sz
, AMD64RM
* );
747 extern AMD64Instr
* AMD64Instr_Push ( AMD64RMI
* );
748 extern AMD64Instr
* AMD64Instr_Call ( AMD64CondCode
, Addr64
, Int
, RetLoc
);
749 extern AMD64Instr
* AMD64Instr_XDirect ( Addr64 dstGA
, AMD64AMode
* amRIP
,
750 AMD64CondCode cond
, Bool toFastEP
);
751 extern AMD64Instr
* AMD64Instr_XIndir ( HReg dstGA
, AMD64AMode
* amRIP
,
752 AMD64CondCode cond
);
753 extern AMD64Instr
* AMD64Instr_XAssisted ( HReg dstGA
, AMD64AMode
* amRIP
,
754 AMD64CondCode cond
, IRJumpKind jk
);
755 extern AMD64Instr
* AMD64Instr_CMov64 ( AMD64CondCode
, HReg src
, HReg dst
);
756 extern AMD64Instr
* AMD64Instr_CLoad ( AMD64CondCode cond
, UChar szB
,
757 AMD64AMode
* addr
, HReg dst
);
758 extern AMD64Instr
* AMD64Instr_CStore ( AMD64CondCode cond
, UChar szB
,
759 HReg src
, AMD64AMode
* addr
);
760 extern AMD64Instr
* AMD64Instr_MovxLQ ( Bool syned
, HReg src
, HReg dst
);
761 extern AMD64Instr
* AMD64Instr_LoadEX ( UChar szSmall
, Bool syned
,
762 AMD64AMode
* src
, HReg dst
);
763 extern AMD64Instr
* AMD64Instr_Store ( UChar sz
, HReg src
, AMD64AMode
* dst
);
764 extern AMD64Instr
* AMD64Instr_Set64 ( AMD64CondCode cond
, HReg dst
);
765 extern AMD64Instr
* AMD64Instr_Bsfr64 ( Bool isFwds
, HReg src
, HReg dst
);
766 extern AMD64Instr
* AMD64Instr_MFence ( void );
767 extern AMD64Instr
* AMD64Instr_ACAS ( AMD64AMode
* addr
, UChar sz
);
768 extern AMD64Instr
* AMD64Instr_DACAS ( AMD64AMode
* addr
, UChar sz
);
770 extern AMD64Instr
* AMD64Instr_A87Free ( Int nregs
);
771 extern AMD64Instr
* AMD64Instr_A87PushPop ( AMD64AMode
* addr
, Bool isPush
, UChar szB
);
772 extern AMD64Instr
* AMD64Instr_A87FpOp ( A87FpOp op
);
773 extern AMD64Instr
* AMD64Instr_A87LdCW ( AMD64AMode
* addr
);
774 extern AMD64Instr
* AMD64Instr_A87StSW ( AMD64AMode
* addr
);
775 extern AMD64Instr
* AMD64Instr_LdMXCSR ( AMD64AMode
* );
776 extern AMD64Instr
* AMD64Instr_SseUComIS ( Int sz
, HReg srcL
, HReg srcR
, HReg dst
);
777 extern AMD64Instr
* AMD64Instr_SseSI2SF ( Int szS
, Int szD
, HReg src
, HReg dst
);
778 extern AMD64Instr
* AMD64Instr_SseSF2SI ( Int szS
, Int szD
, HReg src
, HReg dst
);
779 extern AMD64Instr
* AMD64Instr_SseSDSS ( Bool from64
, HReg src
, HReg dst
);
780 extern AMD64Instr
* AMD64Instr_SseLdSt ( Bool isLoad
, Int sz
, HReg
, AMD64AMode
* );
781 extern AMD64Instr
* AMD64Instr_SseCStore ( AMD64CondCode
, HReg
, AMD64AMode
* );
782 extern AMD64Instr
* AMD64Instr_SseCLoad ( AMD64CondCode
, AMD64AMode
*, HReg
);
783 extern AMD64Instr
* AMD64Instr_SseLdzLO ( Int sz
, HReg
, AMD64AMode
* );
784 extern AMD64Instr
* AMD64Instr_Sse32Fx4 ( AMD64SseOp
, HReg
, HReg
);
785 extern AMD64Instr
* AMD64Instr_Sse32FLo ( AMD64SseOp
, HReg
, HReg
);
786 extern AMD64Instr
* AMD64Instr_Sse64Fx2 ( AMD64SseOp
, HReg
, HReg
);
787 extern AMD64Instr
* AMD64Instr_Sse64FLo ( AMD64SseOp
, HReg
, HReg
);
788 extern AMD64Instr
* AMD64Instr_SseReRg ( AMD64SseOp
, HReg
, HReg
);
789 extern AMD64Instr
* AMD64Instr_SseCMov ( AMD64CondCode
, HReg src
, HReg dst
);
790 extern AMD64Instr
* AMD64Instr_SseShuf ( Int order
, HReg src
, HReg dst
);
791 extern AMD64Instr
* AMD64Instr_SseShiftN ( AMD64SseOp
,
792 UInt shiftBits
, HReg dst
);
793 extern AMD64Instr
* AMD64Instr_SseMOVQ ( HReg gpr
, HReg xmm
, Bool toXMM
);
794 //uu extern AMD64Instr* AMD64Instr_AvxLdSt ( Bool isLoad, HReg, AMD64AMode* );
795 //uu extern AMD64Instr* AMD64Instr_AvxReRg ( AMD64SseOp, HReg, HReg );
796 extern AMD64Instr
* AMD64Instr_EvCheck ( AMD64AMode
* amCounter
,
797 AMD64AMode
* amFailAddr
);
798 extern AMD64Instr
* AMD64Instr_ProfInc ( void );
801 extern void ppAMD64Instr ( const AMD64Instr
*, Bool
);
803 /* Some functions that insulate the register allocator from details
804 of the underlying instruction set. */
805 extern void getRegUsage_AMD64Instr ( HRegUsage
*, const AMD64Instr
*, Bool
);
806 extern void mapRegs_AMD64Instr ( HRegRemap
*, AMD64Instr
*, Bool
);
807 extern Int
emit_AMD64Instr ( /*MB_MOD*/Bool
* is_profInc
,
808 UChar
* buf
, Int nbuf
,
811 VexEndness endness_host
,
812 const void* disp_cp_chain_me_to_slowEP
,
813 const void* disp_cp_chain_me_to_fastEP
,
814 const void* disp_cp_xindir
,
815 const void* disp_cp_xassisted
);
817 extern void genSpill_AMD64 ( /*OUT*/HInstr
** i1
, /*OUT*/HInstr
** i2
,
818 HReg rreg
, Int offset
, Bool
);
819 extern void genReload_AMD64 ( /*OUT*/HInstr
** i1
, /*OUT*/HInstr
** i2
,
820 HReg rreg
, Int offset
, Bool
);
821 extern AMD64Instr
* genMove_AMD64(HReg from
, HReg to
, Bool
);
822 extern AMD64Instr
* directReload_AMD64 ( AMD64Instr
* i
,
823 HReg vreg
, Short spill_off
);
825 extern const RRegUniverse
* getRRegUniverse_AMD64 ( void );
827 extern HInstrArray
* iselSB_AMD64 ( const IRSB
*,
831 Int offs_Host_EvC_Counter
,
832 Int offs_Host_EvC_FailAddr
,
833 Bool chainingAllowed
,
837 /* How big is an event check? This is kind of a kludge because it
838 depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
839 and so assumes that they are both <= 128, and so can use the short
840 offset encoding. This is all checked with assertions, so in the
841 worst case we will merely assert at startup. */
842 extern Int
evCheckSzB_AMD64 (void);
844 /* Perform a chaining and unchaining of an XDirect jump. */
845 extern VexInvalRange
chainXDirect_AMD64 ( VexEndness endness_host
,
846 void* place_to_chain
,
847 const void* disp_cp_chain_me_EXPECTED
,
848 const void* place_to_jump_to
);
850 extern VexInvalRange
unchainXDirect_AMD64 ( VexEndness endness_host
,
851 void* place_to_unchain
,
852 const void* place_to_jump_to_EXPECTED
,
853 const void* disp_cp_chain_me
);
855 /* Patch the counter location into an existing ProfInc point. */
856 extern VexInvalRange
patchProfInc_AMD64 ( VexEndness endness_host
,
857 void* place_to_patch
,
858 const ULong
* location_of_counter
);
861 #endif /* ndef __VEX_HOST_AMD64_DEFS_H */
863 /*---------------------------------------------------------------*/
864 /*--- end host_amd64_defs.h ---*/
865 /*---------------------------------------------------------------*/