Advance the head to 3.16.0.GIT.
[valgrind.git] / VEX / priv / host_amd64_defs.h
blob93a6a5adf88f02cf7cec451d8d0990322102838b
2 /*---------------------------------------------------------------*/
3 /*--- begin host_amd64_defs.h ---*/
4 /*---------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2004-2017 OpenWorks LLP
11 info@open-works.net
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 02110-1301, USA.
28 The GNU General Public License is contained in the file COPYING.
30 Neither the names of the U.S. Department of Energy nor the
31 University of California nor the names of its contributors may be
32 used to endorse or promote products derived from this software
33 without prior written permission.
36 #ifndef __VEX_HOST_AMD64_DEFS_H
37 #define __VEX_HOST_AMD64_DEFS_H
39 #include "libvex_basictypes.h"
40 #include "libvex.h" // VexArch
41 #include "host_generic_regs.h" // HReg
43 /* --------- Registers. --------- */
45 /* The usual HReg abstraction. There are 16 real int regs, 6 real
46 float regs, and 16 real vector regs.
49 #define ST_IN static inline
50 ST_IN HReg hregAMD64_R12 ( void ) { return mkHReg(False, HRcInt64, 12, 0); }
51 ST_IN HReg hregAMD64_R13 ( void ) { return mkHReg(False, HRcInt64, 13, 1); }
52 ST_IN HReg hregAMD64_R14 ( void ) { return mkHReg(False, HRcInt64, 14, 2); }
53 ST_IN HReg hregAMD64_R15 ( void ) { return mkHReg(False, HRcInt64, 15, 3); }
54 ST_IN HReg hregAMD64_RBX ( void ) { return mkHReg(False, HRcInt64, 3, 4); }
55 ST_IN HReg hregAMD64_RSI ( void ) { return mkHReg(False, HRcInt64, 6, 5); }
56 ST_IN HReg hregAMD64_RDI ( void ) { return mkHReg(False, HRcInt64, 7, 6); }
57 ST_IN HReg hregAMD64_R8 ( void ) { return mkHReg(False, HRcInt64, 8, 7); }
58 ST_IN HReg hregAMD64_R9 ( void ) { return mkHReg(False, HRcInt64, 9, 8); }
59 ST_IN HReg hregAMD64_R10 ( void ) { return mkHReg(False, HRcInt64, 10, 9); }
61 ST_IN HReg hregAMD64_XMM3 ( void ) { return mkHReg(False, HRcVec128, 3, 10); }
62 ST_IN HReg hregAMD64_XMM4 ( void ) { return mkHReg(False, HRcVec128, 4, 11); }
63 ST_IN HReg hregAMD64_XMM5 ( void ) { return mkHReg(False, HRcVec128, 5, 12); }
64 ST_IN HReg hregAMD64_XMM6 ( void ) { return mkHReg(False, HRcVec128, 6, 13); }
65 ST_IN HReg hregAMD64_XMM7 ( void ) { return mkHReg(False, HRcVec128, 7, 14); }
66 ST_IN HReg hregAMD64_XMM8 ( void ) { return mkHReg(False, HRcVec128, 8, 15); }
67 ST_IN HReg hregAMD64_XMM9 ( void ) { return mkHReg(False, HRcVec128, 9, 16); }
68 ST_IN HReg hregAMD64_XMM10 ( void ) { return mkHReg(False, HRcVec128, 10, 17); }
69 ST_IN HReg hregAMD64_XMM11 ( void ) { return mkHReg(False, HRcVec128, 11, 18); }
70 ST_IN HReg hregAMD64_XMM12 ( void ) { return mkHReg(False, HRcVec128, 12, 19); }
72 ST_IN HReg hregAMD64_RAX ( void ) { return mkHReg(False, HRcInt64, 0, 20); }
73 ST_IN HReg hregAMD64_RCX ( void ) { return mkHReg(False, HRcInt64, 1, 21); }
74 ST_IN HReg hregAMD64_RDX ( void ) { return mkHReg(False, HRcInt64, 2, 22); }
75 ST_IN HReg hregAMD64_RSP ( void ) { return mkHReg(False, HRcInt64, 4, 23); }
76 ST_IN HReg hregAMD64_RBP ( void ) { return mkHReg(False, HRcInt64, 5, 24); }
77 ST_IN HReg hregAMD64_R11 ( void ) { return mkHReg(False, HRcInt64, 11, 25); }
79 ST_IN HReg hregAMD64_XMM0 ( void ) { return mkHReg(False, HRcVec128, 0, 26); }
80 ST_IN HReg hregAMD64_XMM1 ( void ) { return mkHReg(False, HRcVec128, 1, 27); }
81 #undef ST_IN
83 extern UInt ppHRegAMD64 ( HReg );
86 /* --------- Condition codes, AMD encoding. --------- */
88 typedef
89 enum {
90 Acc_O = 0, /* overflow */
91 Acc_NO = 1, /* no overflow */
93 Acc_B = 2, /* below */
94 Acc_NB = 3, /* not below */
96 Acc_Z = 4, /* zero */
97 Acc_NZ = 5, /* not zero */
99 Acc_BE = 6, /* below or equal */
100 Acc_NBE = 7, /* not below or equal */
102 Acc_S = 8, /* negative */
103 Acc_NS = 9, /* not negative */
105 Acc_P = 10, /* parity even */
106 Acc_NP = 11, /* not parity even */
108 Acc_L = 12, /* jump less */
109 Acc_NL = 13, /* not less */
111 Acc_LE = 14, /* less or equal */
112 Acc_NLE = 15, /* not less or equal */
114 Acc_ALWAYS = 16 /* the usual hack */
116 AMD64CondCode;
118 extern const HChar* showAMD64CondCode ( AMD64CondCode );
121 /* --------- Memory address expressions (amodes). --------- */
123 typedef
124 enum {
125 Aam_IR, /* Immediate + Reg */
126 Aam_IRRS /* Immediate + Reg1 + (Reg2 << Shift) */
128 AMD64AModeTag;
130 typedef
131 struct {
132 AMD64AModeTag tag;
133 union {
134 struct {
135 UInt imm;
136 HReg reg;
137 } IR;
138 struct {
139 UInt imm;
140 HReg base;
141 HReg index;
142 Int shift; /* 0, 1, 2 or 3 only */
143 } IRRS;
144 } Aam;
146 AMD64AMode;
148 extern AMD64AMode* AMD64AMode_IR ( UInt, HReg );
149 extern AMD64AMode* AMD64AMode_IRRS ( UInt, HReg, HReg, Int );
151 extern AMD64AMode* dopyAMD64AMode ( AMD64AMode* );
153 extern void ppAMD64AMode ( AMD64AMode* );
156 /* --------- Operand, which can be reg, immediate or memory. --------- */
158 typedef
159 enum {
160 Armi_Imm,
161 Armi_Reg,
162 Armi_Mem
164 AMD64RMITag;
166 typedef
167 struct {
168 AMD64RMITag tag;
169 union {
170 struct {
171 UInt imm32;
172 } Imm;
173 struct {
174 HReg reg;
175 } Reg;
176 struct {
177 AMD64AMode* am;
178 } Mem;
180 Armi;
182 AMD64RMI;
184 extern AMD64RMI* AMD64RMI_Imm ( UInt );
185 extern AMD64RMI* AMD64RMI_Reg ( HReg );
186 extern AMD64RMI* AMD64RMI_Mem ( AMD64AMode* );
188 extern void ppAMD64RMI ( AMD64RMI* );
189 extern void ppAMD64RMI_lo32 ( AMD64RMI* );
192 /* --------- Operand, which can be reg or immediate only. --------- */
194 typedef
195 enum {
196 Ari_Imm,
197 Ari_Reg
199 AMD64RITag;
201 typedef
202 struct {
203 AMD64RITag tag;
204 union {
205 struct {
206 UInt imm32;
207 } Imm;
208 struct {
209 HReg reg;
210 } Reg;
212 Ari;
214 AMD64RI;
216 extern AMD64RI* AMD64RI_Imm ( UInt );
217 extern AMD64RI* AMD64RI_Reg ( HReg );
219 extern void ppAMD64RI ( AMD64RI* );
222 /* --------- Operand, which can be reg or memory only. --------- */
224 typedef
225 enum {
226 Arm_Reg,
227 Arm_Mem
229 AMD64RMTag;
231 typedef
232 struct {
233 AMD64RMTag tag;
234 union {
235 struct {
236 HReg reg;
237 } Reg;
238 struct {
239 AMD64AMode* am;
240 } Mem;
242 Arm;
244 AMD64RM;
246 extern AMD64RM* AMD64RM_Reg ( HReg );
247 extern AMD64RM* AMD64RM_Mem ( AMD64AMode* );
249 extern void ppAMD64RM ( AMD64RM* );
252 /* --------- Instructions. --------- */
254 /* --------- */
255 typedef
256 enum {
257 Aun_NEG,
258 Aun_NOT
260 AMD64UnaryOp;
262 extern const HChar* showAMD64UnaryOp ( AMD64UnaryOp );
265 /* --------- */
266 typedef
267 enum {
268 Aalu_INVALID,
269 Aalu_MOV,
270 Aalu_CMP,
271 Aalu_ADD, Aalu_SUB, Aalu_ADC, Aalu_SBB,
272 Aalu_AND, Aalu_OR, Aalu_XOR,
273 Aalu_MUL
275 AMD64AluOp;
277 extern const HChar* showAMD64AluOp ( AMD64AluOp );
280 /* --------- */
281 typedef
282 enum {
283 Ash_INVALID,
284 Ash_SHL, Ash_SHR, Ash_SAR
286 AMD64ShiftOp;
288 extern const HChar* showAMD64ShiftOp ( AMD64ShiftOp );
291 /* --------- */
292 typedef
293 enum {
294 Afp_INVALID,
295 /* Binary */
296 Afp_SCALE, Afp_ATAN, Afp_YL2X, Afp_YL2XP1, Afp_PREM, Afp_PREM1,
297 /* Unary */
298 Afp_SQRT,
299 Afp_SIN, Afp_COS, Afp_TAN,
300 Afp_ROUND, Afp_2XM1
302 A87FpOp;
304 extern const HChar* showA87FpOp ( A87FpOp );
307 /* --------- */
308 typedef
309 enum {
310 Asse_INVALID,
311 /* mov */
312 Asse_MOV,
313 /* Floating point binary */
314 Asse_ADDF, Asse_SUBF, Asse_MULF, Asse_DIVF,
315 Asse_MAXF, Asse_MINF,
316 Asse_CMPEQF, Asse_CMPLTF, Asse_CMPLEF, Asse_CMPUNF,
317 /* Floating point unary */
318 Asse_RCPF, Asse_RSQRTF, Asse_SQRTF,
319 /* Floating point conversion */
320 Asse_I2F, // i32-signed to float conversion, aka cvtdq2ps in vec form
321 Asse_F2I, // float to i32-signed conversion, aka cvtps2dq in vec form
322 /* Bitwise */
323 Asse_AND, Asse_OR, Asse_XOR, Asse_ANDN,
324 Asse_ADD8, Asse_ADD16, Asse_ADD32, Asse_ADD64,
325 Asse_QADD8U, Asse_QADD16U,
326 Asse_QADD8S, Asse_QADD16S,
327 Asse_SUB8, Asse_SUB16, Asse_SUB32, Asse_SUB64,
328 Asse_QSUB8U, Asse_QSUB16U,
329 Asse_QSUB8S, Asse_QSUB16S,
330 Asse_MUL16,
331 Asse_MULHI16U,
332 Asse_MULHI16S,
333 Asse_AVG8U, Asse_AVG16U,
334 Asse_MAX16S,
335 Asse_MAX8U,
336 Asse_MIN16S,
337 Asse_MIN8U,
338 Asse_CMPEQ8, Asse_CMPEQ16, Asse_CMPEQ32,
339 Asse_CMPGT8S, Asse_CMPGT16S, Asse_CMPGT32S,
340 Asse_SHL16, Asse_SHL32, Asse_SHL64, Asse_SHL128,
341 Asse_SHR16, Asse_SHR32, Asse_SHR64, Asse_SHR128,
342 Asse_SAR16, Asse_SAR32,
343 Asse_PACKSSD, Asse_PACKSSW, Asse_PACKUSW,
344 Asse_UNPCKHB, Asse_UNPCKHW, Asse_UNPCKHD, Asse_UNPCKHQ,
345 Asse_UNPCKLB, Asse_UNPCKLW, Asse_UNPCKLD, Asse_UNPCKLQ,
346 // Only for SSSE3 capable hosts:
347 Asse_PSHUFB,
348 Asse_PMADDUBSW,
349 // Only for F16C capable hosts:
350 Asse_F32toF16, // F32 to F16 conversion, aka vcvtps2ph
351 Asse_F16toF32, // F16 to F32 conversion, aka vcvtph2ps
353 AMD64SseOp;
355 extern const HChar* showAMD64SseOp ( AMD64SseOp );
358 /* --------- */
359 typedef
360 enum {
361 Ain_Imm64, /* Generate 64-bit literal to register */
362 Ain_Alu64R, /* 64-bit mov/arith/logical, dst=REG */
363 Ain_Alu64M, /* 64-bit mov/arith/logical, dst=MEM */
364 Ain_Sh64, /* 64-bit shift/rotate, dst=REG or MEM */
365 Ain_Test64, /* 64-bit test (AND, set flags, discard result) */
366 Ain_Unary64, /* 64-bit not and neg */
367 Ain_Lea64, /* 64-bit compute EA into a reg */
368 Ain_Alu32R, /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
369 Ain_MulL, /* widening multiply */
370 Ain_Div, /* div and mod */
371 Ain_Push, /* push 64-bit value on stack */
372 Ain_Call, /* call to address in register */
373 Ain_XDirect, /* direct transfer to GA */
374 Ain_XIndir, /* indirect transfer to GA */
375 Ain_XAssisted, /* assisted transfer to GA */
376 Ain_CMov64, /* conditional move, 64-bit reg-reg only */
377 Ain_CLoad, /* cond. load to int reg, 32 bit ZX or 64 bit only */
378 Ain_CStore, /* cond. store from int reg, 32 or 64 bit only */
379 Ain_MovxLQ, /* reg-reg move, zx-ing/sx-ing top half */
380 Ain_LoadEX, /* mov{s,z}{b,w,l}q from mem to reg */
381 Ain_Store, /* store 32/16/8 bit value in memory */
382 Ain_Set64, /* convert condition code to 64-bit value */
383 Ain_Bsfr64, /* 64-bit bsf/bsr */
384 Ain_MFence, /* mem fence */
385 Ain_ACAS, /* 8/16/32/64-bit lock;cmpxchg */
386 Ain_DACAS, /* lock;cmpxchg8b/16b (doubleword ACAS, 2 x
387 32-bit or 2 x 64-bit only) */
388 Ain_A87Free, /* free up x87 registers */
389 Ain_A87PushPop, /* x87 loads/stores */
390 Ain_A87FpOp, /* x87 operations */
391 Ain_A87LdCW, /* load x87 control word */
392 Ain_A87StSW, /* store x87 status word */
393 Ain_LdMXCSR, /* load %mxcsr */
394 Ain_SseUComIS, /* ucomisd/ucomiss, then get %rflags into int
395 register */
396 Ain_SseSI2SF, /* scalar 32/64 int to 32/64 float conversion */
397 Ain_SseSF2SI, /* scalar 32/64 float to 32/64 int conversion */
398 Ain_SseSDSS, /* scalar float32 to/from float64 */
399 Ain_SseLdSt, /* SSE load/store 32/64/128 bits, no alignment
400 constraints, upper 96/64/0 bits arbitrary */
401 Ain_SseCStore, /* SSE conditional store, 128 bit only, any alignment */
402 Ain_SseCLoad, /* SSE conditional load, 128 bit only, any alignment */
403 Ain_SseLdzLO, /* SSE load low 32/64 bits, zero remainder of reg */
404 Ain_Sse32Fx4, /* SSE binary, 32Fx4 */
405 Ain_Sse32FLo, /* SSE binary, 32F in lowest lane only */
406 Ain_Sse64Fx2, /* SSE binary, 64Fx2 */
407 Ain_Sse64FLo, /* SSE binary, 64F in lowest lane only */
408 Ain_SseReRg, /* SSE binary general reg-reg, Re, Rg */
409 Ain_SseCMov, /* SSE conditional move */
410 Ain_SseShuf, /* SSE2 shuffle (pshufd) */
411 Ain_SseShiftN, /* SSE2 shift by immediate */
412 Ain_SseMOVQ, /* SSE2 moves of xmm[63:0] to/from GPR */
413 //uu Ain_AvxLdSt, /* AVX load/store 256 bits,
414 //uu no alignment constraints */
415 //uu Ain_AvxReRg, /* AVX binary general reg-reg, Re, Rg */
416 Ain_EvCheck, /* Event check */
417 Ain_ProfInc /* 64-bit profile counter increment */
419 AMD64InstrTag;
421 /* Destinations are on the RIGHT (second operand) */
423 typedef
424 struct {
425 AMD64InstrTag tag;
426 union {
427 struct {
428 ULong imm64;
429 HReg dst;
430 } Imm64;
431 struct {
432 AMD64AluOp op;
433 AMD64RMI* src;
434 HReg dst;
435 } Alu64R;
436 struct {
437 AMD64AluOp op;
438 AMD64RI* src;
439 AMD64AMode* dst;
440 } Alu64M;
441 struct {
442 AMD64ShiftOp op;
443 UInt src; /* shift amount, or 0 means %cl */
444 HReg dst;
445 } Sh64;
446 struct {
447 UInt imm32;
448 HReg dst;
449 } Test64;
450 /* Not and Neg */
451 struct {
452 AMD64UnaryOp op;
453 HReg dst;
454 } Unary64;
455 /* 64-bit compute EA into a reg */
456 struct {
457 AMD64AMode* am;
458 HReg dst;
459 } Lea64;
460 /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
461 struct {
462 AMD64AluOp op;
463 AMD64RMI* src;
464 HReg dst;
465 } Alu32R;
466 /* 64 x 64 -> 128 bit widening multiply: RDX:RAX = RAX *s/u
467 r/m64 */
468 struct {
469 Bool syned;
470 AMD64RM* src;
471 } MulL;
472 /* amd64 div/idiv instruction. Modifies RDX and RAX and
473 reads src. */
474 struct {
475 Bool syned;
476 Int sz; /* 4 or 8 only */
477 AMD64RM* src;
478 } Div;
479 struct {
480 AMD64RMI* src;
481 } Push;
482 /* Pseudo-insn. Call target (an absolute address), on given
483 condition (which could be Xcc_ALWAYS). */
484 struct {
485 AMD64CondCode cond;
486 Addr64 target;
487 Int regparms; /* 0 .. 6 */
488 RetLoc rloc; /* where the return value will be */
489 } Call;
490 /* Update the guest RIP value, then exit requesting to chain
491 to it. May be conditional. */
492 struct {
493 Addr64 dstGA; /* next guest address */
494 AMD64AMode* amRIP; /* amode in guest state for RIP */
495 AMD64CondCode cond; /* can be Acc_ALWAYS */
496 Bool toFastEP; /* chain to the slow or fast point? */
497 } XDirect;
498 /* Boring transfer to a guest address not known at JIT time.
499 Not chainable. May be conditional. */
500 struct {
501 HReg dstGA;
502 AMD64AMode* amRIP;
503 AMD64CondCode cond; /* can be Acc_ALWAYS */
504 } XIndir;
505 /* Assisted transfer to a guest address, most general case.
506 Not chainable. May be conditional. */
507 struct {
508 HReg dstGA;
509 AMD64AMode* amRIP;
510 AMD64CondCode cond; /* can be Acc_ALWAYS */
511 IRJumpKind jk;
512 } XAssisted;
513 /* Mov src to dst on the given condition, which may not
514 be the bogus Acc_ALWAYS. */
515 struct {
516 AMD64CondCode cond;
517 HReg src;
518 HReg dst;
519 } CMov64;
520 /* conditional load to int reg, 32 bit ZX or 64 bit only.
521 cond may not be Acc_ALWAYS. */
522 struct {
523 AMD64CondCode cond;
524 UChar szB; /* 4 or 8 only */
525 AMD64AMode* addr;
526 HReg dst;
527 } CLoad;
528 /* cond. store from int reg, 32 or 64 bit only.
529 cond may not be Acc_ALWAYS. */
530 struct {
531 AMD64CondCode cond;
532 UChar szB; /* 4 or 8 only */
533 HReg src;
534 AMD64AMode* addr;
535 } CStore;
536 /* reg-reg move, sx-ing/zx-ing top half */
537 struct {
538 Bool syned;
539 HReg src;
540 HReg dst;
541 } MovxLQ;
542 /* Sign/Zero extending loads. Dst size is always 64 bits. */
543 struct {
544 UChar szSmall; /* only 1, 2 or 4 */
545 Bool syned;
546 AMD64AMode* src;
547 HReg dst;
548 } LoadEX;
549 /* 32/16/8 bit stores. */
550 struct {
551 UChar sz; /* only 1, 2 or 4 */
552 HReg src;
553 AMD64AMode* dst;
554 } Store;
555 /* Convert an amd64 condition code to a 64-bit value (0 or 1). */
556 struct {
557 AMD64CondCode cond;
558 HReg dst;
559 } Set64;
560 /* 64-bit bsf or bsr. */
561 struct {
562 Bool isFwds;
563 HReg src;
564 HReg dst;
565 } Bsfr64;
566 /* Mem fence. In short, an insn which flushes all preceding
567 loads and stores as much as possible before continuing.
568 On AMD64 we emit a real "mfence". */
569 struct {
570 } MFence;
571 struct {
572 AMD64AMode* addr;
573 UChar sz; /* 1, 2, 4 or 8 */
574 } ACAS;
575 struct {
576 AMD64AMode* addr;
577 UChar sz; /* 4 or 8 only */
578 } DACAS;
580 /* --- X87 --- */
582 /* A very minimal set of x87 insns, that operate exactly in a
583 stack-like way so no need to think about x87 registers. */
585 /* Do 'ffree' on %st(7) .. %st(7-nregs) */
586 struct {
587 Int nregs; /* 1 <= nregs <= 7 */
588 } A87Free;
590 /* Push a 32- or 64-bit FP value from memory onto the stack,
591 or move a value from the stack to memory and remove it
592 from the stack. */
593 struct {
594 AMD64AMode* addr;
595 Bool isPush;
596 UChar szB; /* 4 or 8 */
597 } A87PushPop;
599 /* Do an operation on the top-of-stack. This can be unary, in
600 which case it is %st0 = OP( %st0 ), or binary: %st0 = OP(
601 %st0, %st1 ). */
602 struct {
603 A87FpOp op;
604 } A87FpOp;
606 /* Load the FPU control word. */
607 struct {
608 AMD64AMode* addr;
609 } A87LdCW;
611 /* Store the FPU status word (fstsw m16) */
612 struct {
613 AMD64AMode* addr;
614 } A87StSW;
616 /* --- SSE --- */
618 /* Load 32 bits into %mxcsr. */
619 struct {
620 AMD64AMode* addr;
622 LdMXCSR;
623 /* ucomisd/ucomiss, then get %rflags into int register */
624 struct {
625 UChar sz; /* 4 or 8 only */
626 HReg srcL; /* xmm */
627 HReg srcR; /* xmm */
628 HReg dst; /* int */
629 } SseUComIS;
630 /* scalar 32/64 int to 32/64 float conversion */
631 struct {
632 UChar szS; /* 4 or 8 */
633 UChar szD; /* 4 or 8 */
634 HReg src; /* i class */
635 HReg dst; /* v class */
636 } SseSI2SF;
637 /* scalar 32/64 float to 32/64 int conversion */
638 struct {
639 UChar szS; /* 4 or 8 */
640 UChar szD; /* 4 or 8 */
641 HReg src; /* v class */
642 HReg dst; /* i class */
643 } SseSF2SI;
644 /* scalar float32 to/from float64 */
645 struct {
646 Bool from64; /* True: 64->32; False: 32->64 */
647 HReg src;
648 HReg dst;
649 } SseSDSS;
650 struct {
651 Bool isLoad;
652 UChar sz; /* 4, 8 or 16 only */
653 HReg reg;
654 AMD64AMode* addr;
655 } SseLdSt;
656 struct {
657 AMD64CondCode cond; /* may not be Acc_ALWAYS */
658 HReg src;
659 AMD64AMode* addr;
660 } SseCStore;
661 struct {
662 AMD64CondCode cond; /* may not be Acc_ALWAYS */
663 AMD64AMode* addr;
664 HReg dst;
665 } SseCLoad;
666 struct {
667 Int sz; /* 4 or 8 only */
668 HReg reg;
669 AMD64AMode* addr;
670 } SseLdzLO;
671 struct {
672 AMD64SseOp op;
673 HReg src;
674 HReg dst;
675 } Sse32Fx4;
676 struct {
677 AMD64SseOp op;
678 HReg src;
679 HReg dst;
680 } Sse32FLo;
681 struct {
682 AMD64SseOp op;
683 HReg src;
684 HReg dst;
685 } Sse64Fx2;
686 struct {
687 AMD64SseOp op;
688 HReg src;
689 HReg dst;
690 } Sse64FLo;
691 struct {
692 AMD64SseOp op;
693 HReg src;
694 HReg dst;
695 } SseReRg;
696 /* Mov src to dst on the given condition, which may not
697 be the bogus Xcc_ALWAYS. */
698 struct {
699 AMD64CondCode cond;
700 HReg src;
701 HReg dst;
702 } SseCMov;
703 struct {
704 Int order; /* 0 <= order <= 0xFF */
705 HReg src;
706 HReg dst;
707 } SseShuf;
708 struct {
709 AMD64SseOp op;
710 UInt shiftBits;
711 HReg dst;
712 } SseShiftN;
713 struct {
714 HReg gpr;
715 HReg xmm;
716 Bool toXMM; // when moving to xmm, xmm[127:64] is zeroed out
717 } SseMOVQ;
718 //uu struct {
719 //uu Bool isLoad;
720 //uu HReg reg;
721 //uu AMD64AMode* addr;
722 //uu } AvxLdSt;
723 //uu struct {
724 //uu AMD64SseOp op;
725 //uu HReg src;
726 //uu HReg dst;
727 //uu } AvxReRg;
728 struct {
729 AMD64AMode* amCounter;
730 AMD64AMode* amFailAddr;
731 } EvCheck;
732 struct {
733 /* No fields. The address of the counter to inc is
734 installed later, post-translation, by patching it in,
735 as it is not known at translation time. */
736 } ProfInc;
738 } Ain;
740 AMD64Instr;
742 extern AMD64Instr* AMD64Instr_Imm64 ( ULong imm64, HReg dst );
743 extern AMD64Instr* AMD64Instr_Alu64R ( AMD64AluOp, AMD64RMI*, HReg );
744 extern AMD64Instr* AMD64Instr_Alu64M ( AMD64AluOp, AMD64RI*, AMD64AMode* );
745 extern AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst );
746 extern AMD64Instr* AMD64Instr_Lea64 ( AMD64AMode* am, HReg dst );
747 extern AMD64Instr* AMD64Instr_Alu32R ( AMD64AluOp, AMD64RMI*, HReg );
748 extern AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp, UInt, HReg );
749 extern AMD64Instr* AMD64Instr_Test64 ( UInt imm32, HReg dst );
750 extern AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* );
751 extern AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* );
752 extern AMD64Instr* AMD64Instr_Push ( AMD64RMI* );
753 extern AMD64Instr* AMD64Instr_Call ( AMD64CondCode, Addr64, Int, RetLoc );
754 extern AMD64Instr* AMD64Instr_XDirect ( Addr64 dstGA, AMD64AMode* amRIP,
755 AMD64CondCode cond, Bool toFastEP );
756 extern AMD64Instr* AMD64Instr_XIndir ( HReg dstGA, AMD64AMode* amRIP,
757 AMD64CondCode cond );
758 extern AMD64Instr* AMD64Instr_XAssisted ( HReg dstGA, AMD64AMode* amRIP,
759 AMD64CondCode cond, IRJumpKind jk );
760 extern AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode, HReg src, HReg dst );
761 extern AMD64Instr* AMD64Instr_CLoad ( AMD64CondCode cond, UChar szB,
762 AMD64AMode* addr, HReg dst );
763 extern AMD64Instr* AMD64Instr_CStore ( AMD64CondCode cond, UChar szB,
764 HReg src, AMD64AMode* addr );
765 extern AMD64Instr* AMD64Instr_MovxLQ ( Bool syned, HReg src, HReg dst );
766 extern AMD64Instr* AMD64Instr_LoadEX ( UChar szSmall, Bool syned,
767 AMD64AMode* src, HReg dst );
768 extern AMD64Instr* AMD64Instr_Store ( UChar sz, HReg src, AMD64AMode* dst );
769 extern AMD64Instr* AMD64Instr_Set64 ( AMD64CondCode cond, HReg dst );
770 extern AMD64Instr* AMD64Instr_Bsfr64 ( Bool isFwds, HReg src, HReg dst );
771 extern AMD64Instr* AMD64Instr_MFence ( void );
772 extern AMD64Instr* AMD64Instr_ACAS ( AMD64AMode* addr, UChar sz );
773 extern AMD64Instr* AMD64Instr_DACAS ( AMD64AMode* addr, UChar sz );
775 extern AMD64Instr* AMD64Instr_A87Free ( Int nregs );
776 extern AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush, UChar szB );
777 extern AMD64Instr* AMD64Instr_A87FpOp ( A87FpOp op );
778 extern AMD64Instr* AMD64Instr_A87LdCW ( AMD64AMode* addr );
779 extern AMD64Instr* AMD64Instr_A87StSW ( AMD64AMode* addr );
780 extern AMD64Instr* AMD64Instr_LdMXCSR ( AMD64AMode* );
781 extern AMD64Instr* AMD64Instr_SseUComIS ( Int sz, HReg srcL, HReg srcR, HReg dst );
782 extern AMD64Instr* AMD64Instr_SseSI2SF ( Int szS, Int szD, HReg src, HReg dst );
783 extern AMD64Instr* AMD64Instr_SseSF2SI ( Int szS, Int szD, HReg src, HReg dst );
784 extern AMD64Instr* AMD64Instr_SseSDSS ( Bool from64, HReg src, HReg dst );
785 extern AMD64Instr* AMD64Instr_SseLdSt ( Bool isLoad, Int sz, HReg, AMD64AMode* );
786 extern AMD64Instr* AMD64Instr_SseCStore ( AMD64CondCode, HReg, AMD64AMode* );
787 extern AMD64Instr* AMD64Instr_SseCLoad ( AMD64CondCode, AMD64AMode*, HReg );
788 extern AMD64Instr* AMD64Instr_SseLdzLO ( Int sz, HReg, AMD64AMode* );
789 extern AMD64Instr* AMD64Instr_Sse32Fx4 ( AMD64SseOp, HReg, HReg );
790 extern AMD64Instr* AMD64Instr_Sse32FLo ( AMD64SseOp, HReg, HReg );
791 extern AMD64Instr* AMD64Instr_Sse64Fx2 ( AMD64SseOp, HReg, HReg );
792 extern AMD64Instr* AMD64Instr_Sse64FLo ( AMD64SseOp, HReg, HReg );
793 extern AMD64Instr* AMD64Instr_SseReRg ( AMD64SseOp, HReg, HReg );
794 extern AMD64Instr* AMD64Instr_SseCMov ( AMD64CondCode, HReg src, HReg dst );
795 extern AMD64Instr* AMD64Instr_SseShuf ( Int order, HReg src, HReg dst );
796 extern AMD64Instr* AMD64Instr_SseShiftN ( AMD64SseOp,
797 UInt shiftBits, HReg dst );
798 extern AMD64Instr* AMD64Instr_SseMOVQ ( HReg gpr, HReg xmm, Bool toXMM );
799 //uu extern AMD64Instr* AMD64Instr_AvxLdSt ( Bool isLoad, HReg, AMD64AMode* );
800 //uu extern AMD64Instr* AMD64Instr_AvxReRg ( AMD64SseOp, HReg, HReg );
801 extern AMD64Instr* AMD64Instr_EvCheck ( AMD64AMode* amCounter,
802 AMD64AMode* amFailAddr );
803 extern AMD64Instr* AMD64Instr_ProfInc ( void );
806 extern void ppAMD64Instr ( const AMD64Instr*, Bool );
808 /* Some functions that insulate the register allocator from details
809 of the underlying instruction set. */
810 extern void getRegUsage_AMD64Instr ( HRegUsage*, const AMD64Instr*, Bool );
811 extern void mapRegs_AMD64Instr ( HRegRemap*, AMD64Instr*, Bool );
812 extern Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
813 UChar* buf, Int nbuf,
814 const AMD64Instr* i,
815 Bool mode64,
816 VexEndness endness_host,
817 const void* disp_cp_chain_me_to_slowEP,
818 const void* disp_cp_chain_me_to_fastEP,
819 const void* disp_cp_xindir,
820 const void* disp_cp_xassisted );
822 extern void genSpill_AMD64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
823 HReg rreg, Int offset, Bool );
824 extern void genReload_AMD64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
825 HReg rreg, Int offset, Bool );
826 extern AMD64Instr* genMove_AMD64(HReg from, HReg to, Bool);
827 extern AMD64Instr* directReload_AMD64 ( AMD64Instr* i,
828 HReg vreg, Short spill_off );
830 extern const RRegUniverse* getRRegUniverse_AMD64 ( void );
832 extern HInstrArray* iselSB_AMD64 ( const IRSB*,
833 VexArch,
834 const VexArchInfo*,
835 const VexAbiInfo*,
836 Int offs_Host_EvC_Counter,
837 Int offs_Host_EvC_FailAddr,
838 Bool chainingAllowed,
839 Bool addProfInc,
840 Addr max_ga );
842 /* How big is an event check? This is kind of a kludge because it
843 depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
844 and so assumes that they are both <= 128, and so can use the short
845 offset encoding. This is all checked with assertions, so in the
846 worst case we will merely assert at startup. */
847 extern Int evCheckSzB_AMD64 (void);
849 /* Perform a chaining and unchaining of an XDirect jump. */
850 extern VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
851 void* place_to_chain,
852 const void* disp_cp_chain_me_EXPECTED,
853 const void* place_to_jump_to );
855 extern VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
856 void* place_to_unchain,
857 const void* place_to_jump_to_EXPECTED,
858 const void* disp_cp_chain_me );
860 /* Patch the counter location into an existing ProfInc point. */
861 extern VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
862 void* place_to_patch,
863 const ULong* location_of_counter );
866 #endif /* ndef __VEX_HOST_AMD64_DEFS_H */
868 /*---------------------------------------------------------------*/
869 /*--- end host_amd64_defs.h ---*/
870 /*---------------------------------------------------------------*/