2 /*---------------------------------------------------------------*/
3 /*--- begin host_arm64_defs.h ---*/
4 /*---------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2013-2017 OpenWorks
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 #ifndef __VEX_HOST_ARM64_DEFS_H
30 #define __VEX_HOST_ARM64_DEFS_H
32 #include "libvex_basictypes.h"
33 #include "libvex.h" // VexArch
34 #include "host_generic_regs.h" // HReg
37 /* --------- Registers. --------- */
39 #define ST_IN static inline
40 ST_IN HReg
hregARM64_X22 ( void ) { return mkHReg(False
, HRcInt64
, 22, 0); }
41 ST_IN HReg
hregARM64_X23 ( void ) { return mkHReg(False
, HRcInt64
, 23, 1); }
42 ST_IN HReg
hregARM64_X24 ( void ) { return mkHReg(False
, HRcInt64
, 24, 2); }
43 ST_IN HReg
hregARM64_X25 ( void ) { return mkHReg(False
, HRcInt64
, 25, 3); }
44 ST_IN HReg
hregARM64_X26 ( void ) { return mkHReg(False
, HRcInt64
, 26, 4); }
45 ST_IN HReg
hregARM64_X27 ( void ) { return mkHReg(False
, HRcInt64
, 27, 5); }
46 ST_IN HReg
hregARM64_X28 ( void ) { return mkHReg(False
, HRcInt64
, 28, 6); }
48 ST_IN HReg
hregARM64_X0 ( void ) { return mkHReg(False
, HRcInt64
, 0, 7); }
49 ST_IN HReg
hregARM64_X1 ( void ) { return mkHReg(False
, HRcInt64
, 1, 8); }
50 ST_IN HReg
hregARM64_X2 ( void ) { return mkHReg(False
, HRcInt64
, 2, 9); }
51 ST_IN HReg
hregARM64_X3 ( void ) { return mkHReg(False
, HRcInt64
, 3, 10); }
52 ST_IN HReg
hregARM64_X4 ( void ) { return mkHReg(False
, HRcInt64
, 4, 11); }
53 ST_IN HReg
hregARM64_X5 ( void ) { return mkHReg(False
, HRcInt64
, 5, 12); }
54 ST_IN HReg
hregARM64_X6 ( void ) { return mkHReg(False
, HRcInt64
, 6, 13); }
55 ST_IN HReg
hregARM64_X7 ( void ) { return mkHReg(False
, HRcInt64
, 7, 14); }
57 ST_IN HReg
hregARM64_Q16 ( void ) { return mkHReg(False
, HRcVec128
, 16, 15); }
58 ST_IN HReg
hregARM64_Q17 ( void ) { return mkHReg(False
, HRcVec128
, 17, 16); }
59 ST_IN HReg
hregARM64_Q18 ( void ) { return mkHReg(False
, HRcVec128
, 18, 17); }
60 ST_IN HReg
hregARM64_Q19 ( void ) { return mkHReg(False
, HRcVec128
, 19, 18); }
61 ST_IN HReg
hregARM64_Q20 ( void ) { return mkHReg(False
, HRcVec128
, 20, 19); }
63 ST_IN HReg
hregARM64_D8 ( void ) { return mkHReg(False
, HRcFlt64
, 8, 20); }
64 ST_IN HReg
hregARM64_D9 ( void ) { return mkHReg(False
, HRcFlt64
, 9, 21); }
65 ST_IN HReg
hregARM64_D10 ( void ) { return mkHReg(False
, HRcFlt64
, 10, 22); }
66 ST_IN HReg
hregARM64_D11 ( void ) { return mkHReg(False
, HRcFlt64
, 11, 23); }
67 ST_IN HReg
hregARM64_D12 ( void ) { return mkHReg(False
, HRcFlt64
, 12, 24); }
68 ST_IN HReg
hregARM64_D13 ( void ) { return mkHReg(False
, HRcFlt64
, 13, 25); }
70 ST_IN HReg
hregARM64_X8 ( void ) { return mkHReg(False
, HRcInt64
, 8, 26); }
71 ST_IN HReg
hregARM64_X9 ( void ) { return mkHReg(False
, HRcInt64
, 9, 27); }
72 ST_IN HReg
hregARM64_X21 ( void ) { return mkHReg(False
, HRcInt64
, 21, 28); }
75 extern UInt
ppHRegARM64 ( HReg
);
77 /* Number of registers used arg passing in function calls */
78 #define ARM64_N_ARGREGS 8 /* x0 .. x7 */
81 /* --------- Condition codes. --------- */
85 ARM64cc_EQ
= 0, /* equal : Z=1 */
86 ARM64cc_NE
= 1, /* not equal : Z=0 */
88 ARM64cc_CS
= 2, /* >=u (higher or same) : C=1 */
89 ARM64cc_CC
= 3, /* <u (lower) : C=0 */
91 ARM64cc_MI
= 4, /* minus (negative) : N=1 */
92 ARM64cc_PL
= 5, /* plus (zero or +ve) : N=0 */
94 ARM64cc_VS
= 6, /* overflow : V=1 */
95 ARM64cc_VC
= 7, /* no overflow : V=0 */
97 ARM64cc_HI
= 8, /* >u (higher) : C=1 && Z=0 */
98 ARM64cc_LS
= 9, /* <=u (lower or same) : !(C=1 && Z=0) */
100 ARM64cc_GE
= 10, /* >=s (signed greater or equal) : N=V */
101 ARM64cc_LT
= 11, /* <s (signed less than) : !(N=V) */
103 ARM64cc_GT
= 12, /* >s (signed greater) : Z=0 && N=V */
104 ARM64cc_LE
= 13, /* <=s (signed less or equal) : !(Z=0 && N=V) */
106 ARM64cc_AL
= 14, /* always (unconditional) */
107 ARM64cc_NV
= 15 /* in 64-bit mode also means "always" */
112 /* --------- Memory address expressions (amodes). --------- */
116 ARM64am_RI9
=10, /* reg + simm9 */
117 ARM64am_RI12
, /* reg + uimm12 * szB (iow, scaled by access size) */
118 ARM64am_RR
/* reg1 + reg2 */
128 Int simm9
; /* -256 .. +255 */
132 UInt uimm12
; /* 0 .. 4095 */
133 UChar szB
; /* 1, 2, 4, 8 (16 ?) */
143 extern ARM64AMode
* ARM64AMode_RI9 ( HReg reg
, Int simm9
);
144 extern ARM64AMode
* ARM64AMode_RI12 ( HReg reg
, Int uimm12
, UChar szB
);
145 extern ARM64AMode
* ARM64AMode_RR ( HReg base
, HReg index
);
148 /* --------- Reg or uimm12 or (uimm12 << 12) operands --------- */
152 ARM64riA_I12
=20, /* uimm12 << 0 or 12 only */
162 UShort imm12
; /* 0 .. 4095 */
163 UChar shift
; /* 0 or 12 only */
172 extern ARM64RIA
* ARM64RIA_I12 ( UShort imm12
, UChar shift
);
173 extern ARM64RIA
* ARM64RIA_R ( HReg
);
176 /* --------- Reg or "bitfield" (logic immediate) operands --------- */
180 ARM64riL_I13
=6, /* wierd-o bitfield immediate, 13 bits in total */
190 UChar bitN
; /* 0 .. 1 */
191 UChar immR
; /* 0 .. 63 */
192 UChar immS
; /* 0 .. 63 */
201 extern ARM64RIL
* ARM64RIL_I13 ( UChar bitN
, UChar immR
, UChar immS
);
202 extern ARM64RIL
* ARM64RIL_R ( HReg
);
205 /* --------------- Reg or uimm6 operands --------------- */
209 ARM64ri6_I6
=30, /* uimm6, 1 .. 63 only */
219 UInt imm6
; /* 1 .. 63 */
228 extern ARM64RI6
* ARM64RI6_I6 ( UInt imm6
);
229 extern ARM64RI6
* ARM64RI6_R ( HReg
);
232 /* --------------------- Instructions --------------------- */
260 ARM64mul_PLAIN
=70, /* lo64(64 * 64) */
261 ARM64mul_ZX
, /* hi64(64 *u 64) */
262 ARM64mul_SX
/* hi64(64 *s 64) */
267 /* These characterise an integer-FP conversion, but don't imply any
268 particular direction. */
270 ARM64cvt_F32_I32S
=80,
305 ARM64vecb_ADD64x2
=120, ARM64vecb_ADD32x4
,
306 ARM64vecb_ADD16x8
, ARM64vecb_ADD8x16
,
307 ARM64vecb_SUB64x2
, ARM64vecb_SUB32x4
,
308 ARM64vecb_SUB16x8
, ARM64vecb_SUB8x16
,
310 ARM64vecb_MUL16x8
, ARM64vecb_MUL8x16
,
311 ARM64vecb_FADD64x2
, ARM64vecb_FADD32x4
,
312 ARM64vecb_FSUB64x2
, ARM64vecb_FSUB32x4
,
313 ARM64vecb_FMUL64x2
, ARM64vecb_FMUL32x4
,
314 ARM64vecb_FDIV64x2
, ARM64vecb_FDIV32x4
,
315 ARM64vecb_FMAX64x2
, ARM64vecb_FMAX32x4
,
316 ARM64vecb_FMIN64x2
, ARM64vecb_FMIN32x4
,
318 ARM64vecb_UMAX16x8
, ARM64vecb_UMAX8x16
,
320 ARM64vecb_UMIN16x8
, ARM64vecb_UMIN8x16
,
322 ARM64vecb_SMAX16x8
, ARM64vecb_SMAX8x16
,
324 ARM64vecb_SMIN16x8
, ARM64vecb_SMIN8x16
,
328 ARM64vecb_CMEQ64x2
, ARM64vecb_CMEQ32x4
,
329 ARM64vecb_CMEQ16x8
, ARM64vecb_CMEQ8x16
,
330 ARM64vecb_CMHI64x2
, ARM64vecb_CMHI32x4
, /* >u */
331 ARM64vecb_CMHI16x8
, ARM64vecb_CMHI8x16
,
332 ARM64vecb_CMGT64x2
, ARM64vecb_CMGT32x4
, /* >s */
333 ARM64vecb_CMGT16x8
, ARM64vecb_CMGT8x16
,
334 ARM64vecb_FCMEQ64x2
, ARM64vecb_FCMEQ32x4
,
335 ARM64vecb_FCMGE64x2
, ARM64vecb_FCMGE32x4
,
336 ARM64vecb_FCMGT64x2
, ARM64vecb_FCMGT32x4
,
338 ARM64vecb_UZP164x2
, ARM64vecb_UZP132x4
,
339 ARM64vecb_UZP116x8
, ARM64vecb_UZP18x16
,
340 ARM64vecb_UZP264x2
, ARM64vecb_UZP232x4
,
341 ARM64vecb_UZP216x8
, ARM64vecb_UZP28x16
,
342 ARM64vecb_ZIP132x4
, ARM64vecb_ZIP116x8
,
343 ARM64vecb_ZIP18x16
, ARM64vecb_ZIP232x4
,
344 ARM64vecb_ZIP216x8
, ARM64vecb_ZIP28x16
,
348 ARM64vecb_UMULL4SHH
, ARM64vecb_UMULL8HBB
,
350 ARM64vecb_SMULL4SHH
, ARM64vecb_SMULL8HBB
,
351 ARM64vecb_SQADD64x2
, ARM64vecb_SQADD32x4
,
352 ARM64vecb_SQADD16x8
, ARM64vecb_SQADD8x16
,
353 ARM64vecb_UQADD64x2
, ARM64vecb_UQADD32x4
,
354 ARM64vecb_UQADD16x8
, ARM64vecb_UQADD8x16
,
355 ARM64vecb_SQSUB64x2
, ARM64vecb_SQSUB32x4
,
356 ARM64vecb_SQSUB16x8
, ARM64vecb_SQSUB8x16
,
357 ARM64vecb_UQSUB64x2
, ARM64vecb_UQSUB32x4
,
358 ARM64vecb_UQSUB16x8
, ARM64vecb_UQSUB8x16
,
359 ARM64vecb_SQDMULL2DSS
,
360 ARM64vecb_SQDMULL4SHH
,
361 ARM64vecb_SQDMULH32x4
,
362 ARM64vecb_SQDMULH16x8
,
363 ARM64vecb_SQRDMULH32x4
,
364 ARM64vecb_SQRDMULH16x8
,
365 ARM64vecb_SQSHL64x2
, ARM64vecb_SQSHL32x4
,
366 ARM64vecb_SQSHL16x8
, ARM64vecb_SQSHL8x16
,
367 ARM64vecb_UQSHL64x2
, ARM64vecb_UQSHL32x4
,
368 ARM64vecb_UQSHL16x8
, ARM64vecb_UQSHL8x16
,
369 ARM64vecb_SQRSHL64x2
, ARM64vecb_SQRSHL32x4
,
370 ARM64vecb_SQRSHL16x8
, ARM64vecb_SQRSHL8x16
,
371 ARM64vecb_UQRSHL64x2
, ARM64vecb_UQRSHL32x4
,
372 ARM64vecb_UQRSHL16x8
, ARM64vecb_UQRSHL8x16
,
373 ARM64vecb_SSHL64x2
, ARM64vecb_SSHL32x4
,
374 ARM64vecb_SSHL16x8
, ARM64vecb_SSHL8x16
,
375 ARM64vecb_USHL64x2
, ARM64vecb_USHL32x4
,
376 ARM64vecb_USHL16x8
, ARM64vecb_USHL8x16
,
377 ARM64vecb_SRSHL64x2
, ARM64vecb_SRSHL32x4
,
378 ARM64vecb_SRSHL16x8
, ARM64vecb_SRSHL8x16
,
379 ARM64vecb_URSHL64x2
, ARM64vecb_URSHL32x4
,
380 ARM64vecb_URSHL16x8
, ARM64vecb_URSHL8x16
,
381 ARM64vecb_FRECPS64x2
, ARM64vecb_FRECPS32x4
,
382 ARM64vecb_FRSQRTS64x2
, ARM64vecb_FRSQRTS32x4
,
389 ARM64vecmo_SUQADD64x2
=300, ARM64vecmo_SUQADD32x4
,
390 ARM64vecmo_SUQADD16x8
, ARM64vecmo_SUQADD8x16
,
391 ARM64vecmo_USQADD64x2
, ARM64vecmo_USQADD32x4
,
392 ARM64vecmo_USQADD16x8
, ARM64vecmo_USQADD8x16
,
399 ARM64vecu_FNEG64x2
=350, ARM64vecu_FNEG32x4
,
400 ARM64vecu_FABS64x2
, ARM64vecu_FABS32x4
,
402 ARM64vecu_ABS64x2
, ARM64vecu_ABS32x4
,
403 ARM64vecu_ABS16x8
, ARM64vecu_ABS8x16
,
404 ARM64vecu_CLS32x4
, ARM64vecu_CLS16x8
, ARM64vecu_CLS8x16
,
405 ARM64vecu_CLZ32x4
, ARM64vecu_CLZ16x8
, ARM64vecu_CLZ8x16
,
409 ARM64vecu_REV3216B
, ARM64vecu_REV328H
,
410 ARM64vecu_REV6416B
, ARM64vecu_REV648H
, ARM64vecu_REV644S
,
411 ARM64vecu_URECPE32x4
,
412 ARM64vecu_URSQRTE32x4
,
413 ARM64vecu_FRECPE64x2
, ARM64vecu_FRECPE32x4
,
414 ARM64vecu_FRSQRTE64x2
, ARM64vecu_FRSQRTE32x4
,
415 ARM64vecu_FSQRT64x2
, ARM64vecu_FSQRT32x4
,
422 ARM64vecshi_USHR64x2
=400, ARM64vecshi_USHR32x4
,
423 ARM64vecshi_USHR16x8
, ARM64vecshi_USHR8x16
,
424 ARM64vecshi_SSHR64x2
, ARM64vecshi_SSHR32x4
,
425 ARM64vecshi_SSHR16x8
, ARM64vecshi_SSHR8x16
,
426 ARM64vecshi_SHL64x2
, ARM64vecshi_SHL32x4
,
427 ARM64vecshi_SHL16x8
, ARM64vecshi_SHL8x16
,
428 /* These narrowing shifts zero out the top half of the destination
430 ARM64vecshi_SQSHRN2SD
, ARM64vecshi_SQSHRN4HS
, ARM64vecshi_SQSHRN8BH
,
431 ARM64vecshi_UQSHRN2SD
, ARM64vecshi_UQSHRN4HS
, ARM64vecshi_UQSHRN8BH
,
432 ARM64vecshi_SQSHRUN2SD
, ARM64vecshi_SQSHRUN4HS
, ARM64vecshi_SQSHRUN8BH
,
433 ARM64vecshi_SQRSHRN2SD
, ARM64vecshi_SQRSHRN4HS
, ARM64vecshi_SQRSHRN8BH
,
434 ARM64vecshi_UQRSHRN2SD
, ARM64vecshi_UQRSHRN4HS
, ARM64vecshi_UQRSHRN8BH
,
435 ARM64vecshi_SQRSHRUN2SD
, ARM64vecshi_SQRSHRUN4HS
, ARM64vecshi_SQRSHRUN8BH
,
436 /* Saturating left shifts, of various flavours. */
437 ARM64vecshi_UQSHL64x2
, ARM64vecshi_UQSHL32x4
,
438 ARM64vecshi_UQSHL16x8
, ARM64vecshi_UQSHL8x16
,
439 ARM64vecshi_SQSHL64x2
, ARM64vecshi_SQSHL32x4
,
440 ARM64vecshi_SQSHL16x8
, ARM64vecshi_SQSHL8x16
,
441 ARM64vecshi_SQSHLU64x2
, ARM64vecshi_SQSHLU32x4
,
442 ARM64vecshi_SQSHLU16x8
, ARM64vecshi_SQSHLU8x16
,
467 ARM64in_MovI
, /* int reg-reg move */
470 ARM64in_LdSt32
, /* w/ ZX loads */
471 ARM64in_LdSt16
, /* w/ ZX loads */
472 ARM64in_LdSt8
, /* w/ ZX loads */
473 ARM64in_XDirect
, /* direct transfer to GA */
474 ARM64in_XIndir
, /* indirect transfer to GA */
475 ARM64in_XAssisted
, /* assisted transfer to GA */
478 ARM64in_AddToSP
, /* move SP by small, signed constant */
479 ARM64in_FromSP
, /* move SP to integer register */
487 /* ARM64in_V*: scalar ops involving vector registers */
488 ARM64in_VLdStH
, /* ld/st to/from low 16 bits of vec reg, imm offset */
489 ARM64in_VLdStS
, /* ld/st to/from low 32 bits of vec reg, imm offset */
490 ARM64in_VLdStD
, /* ld/st to/from low 64 bits of vec reg, imm offset */
491 ARM64in_VLdStQ
, /* ld/st to/from all 128 bits of vec reg, no offset */
494 ARM64in_VCvtSD
, /* scalar 32 bit FP <--> 64 bit FP */
495 ARM64in_VCvtHS
, /* scalar 16 bit FP <--> 32 bit FP */
496 ARM64in_VCvtHD
, /* scalar 16 bit FP <--> 64 bit FP */
506 /* ARM64in_V*V: vector ops on vector registers */
514 ARM64in_VDfromX
, /* Move an Xreg to a Dreg */
515 ARM64in_VQfromX
, /* Move an Xreg to a Qreg lo64, and zero hi64 */
516 ARM64in_VQfromXX
, /* Move 2 Xregs to a Qreg */
517 ARM64in_VXfromQ
, /* Move half a Qreg to an Xreg */
518 ARM64in_VXfromDorS
, /* Move Dreg or Sreg(ZX) to an Xreg */
519 ARM64in_VMov
, /* vector reg-reg move, 16, 8 or 4 bytes */
521 ARM64in_EvCheck
, /* Event check */
522 ARM64in_ProfInc
/* 64-bit profile counter increment */
526 /* Destinations are on the LEFT (first operand) */
532 /* --- INTEGER INSTRUCTIONS --- */
533 /* 64 bit ADD/SUB reg, reg or uimm12<<{0,12} */
540 /* 64 or 32 bit CMP reg, reg or aimm (SUB and set flags) */
546 /* 64 bit AND/OR/XOR reg, reg or bitfield-immediate */
553 /* 64 bit TST reg, reg or bimm (AND and set flags) */
558 /* 64 bit SHL/SHR/SAR, 2nd arg is reg or imm */
565 /* NOT/NEG/CLZ, 64 bit only */
571 /* CSET -- Convert a condition code to a 64-bit value (0 or 1). */
576 /* MOV dst, src -- reg-reg move for integer registers */
581 /* Pseudo-insn; make a 64-bit immediate */
586 /* 64-bit load or store */
592 /* zx-32-to-64-bit load, or 32-bit store */
598 /* zx-16-to-64-bit load, or 16-bit store */
604 /* zx-8-to-64-bit load, or 8-bit store */
610 /* Update the guest PC value, then exit requesting to chain
611 to it. May be conditional. Urr, use of Addr64 implicitly
612 assumes that wordsize(guest) == wordsize(host). */
614 Addr64 dstGA
; /* next guest address */
615 ARM64AMode
* amPC
; /* amode in guest state for PC */
616 ARM64CondCode cond
; /* can be ARM64cc_AL */
617 Bool toFastEP
; /* chain to the slow or fast point? */
619 /* Boring transfer to a guest address not known at JIT time.
620 Not chainable. May be conditional. */
624 ARM64CondCode cond
; /* can be ARM64cc_AL */
626 /* Assisted transfer to a guest address, most general case.
627 Not chainable. May be conditional. */
631 ARM64CondCode cond
; /* can be ARM64cc_AL */
634 /* CSEL: dst = if cond then argL else argR. cond may be anything. */
641 /* Pseudo-insn. Call target (an absolute address), on given
642 condition (which could be ARM64cc_AL). */
644 RetLoc rloc
; /* where the return value will be */
647 Int nArgRegs
; /* # regs carrying args: 0 .. 8 */
649 /* move SP by small, signed constant */
651 Int simm
; /* needs to be 0 % 16 and in the range -4095
654 /* move SP to integer register */
658 /* Integer multiply, with 3 variants:
659 (PLAIN) lo64(64 * 64)
669 /* LDXR{,H,B} x2, [x4] */
671 Int szB
; /* 1, 2, 4 or 8 */
673 /* STXR{,H,B} w0, x2, [x4] */
675 Int szB
; /* 1, 2, 4 or 8 */
677 /* x1 = CAS(x3(addr), x5(expected) -> x7(new)),
678 where x1[8*szB-1 : 0] == x5[8*szB-1 : 0] indicates success,
679 x1[8*szB-1 : 0] != x5[8*szB-1 : 0] indicates failure.
680 Uses x8 as scratch (but that's not allocatable).
681 Hence: RD x3, x5, x7; WR x1
685 (szB=4) and x8, x5, #0xFFFFFFFF
686 (szB=2) and x8, x5, #0xFFFF
687 (szB=1) and x8, x5, #0xFF
688 -- x8 is correctly zero-extended expected value
690 -- x1 is correctly zero-extended actual value
693 -- if branch taken, failure; x1[[8*szB-1 : 0] holds old value
696 -- if store successful, x1==0, so the eor is "x1 := x5"
697 -- if store failed, branch back and try again.
702 Int szB
; /* 1, 2, 4 or 8 */
705 Int szB
; /* 4 or 8 */
707 /* Mem fence. An insn which fences all loads and stores as
708 much as possible before continuing. On ARM64 we emit the
709 sequence "dsb sy ; dmb sy ; isb sy", which is probably
710 total nuclear overkill, but better safe than sorry. */
713 /* A CLREX instruction. */
716 /* --- INSTRUCTIONS INVOLVING VECTOR REGISTERS --- */
717 /* ld/st to/from low 16 bits of vec reg, imm offset */
722 UInt uimm12
; /* 0 .. 8190 inclusive, 0 % 2 */
724 /* ld/st to/from low 32 bits of vec reg, imm offset */
729 UInt uimm12
; /* 0 .. 16380 inclusive, 0 % 4 */
731 /* ld/st to/from low 64 bits of vec reg, imm offset */
736 UInt uimm12
; /* 0 .. 32760 inclusive, 0 % 8 */
738 /* ld/st to/from all 128 bits of vec reg, no offset */
744 /* Scalar conversion of int to float. */
747 HReg rD
; // dst, a D or S register
748 HReg rS
; // src, a W or X register
750 /* Scalar conversion of float to int, w/ specified RM. */
753 HReg rD
; // dst, a W or X register
754 HReg rS
; // src, a D or S register
755 UChar armRM
; // ARM encoded RM:
756 // 00=nearest, 01=+inf, 10=-inf, 11=zero
758 /* Convert between 32-bit and 64-bit FP values (both ways). (FCVT) */
760 Bool sToD
; /* True: F32->F64. False: F64->F32 */
764 /* Convert between 16-bit and 32-bit FP values (both ways). (FCVT) */
766 Bool hToS
; /* True: F16->F32. False: F32->F16 */
770 /* Convert between 16-bit and 64-bit FP values (both ways). (FCVT) */
772 Bool hToD
; /* True: F16->F64. False: F64->F16 */
776 /* 64-bit FP unary */
782 /* 32-bit FP unary */
788 /* 64-bit FP binary arithmetic */
795 /* 32-bit FP binary arithmetic */
802 /* 64-bit FP compare */
807 /* 32-bit FP compare */
812 /* 32- or 64-bit FP conditional select */
821 /* Move a 32-bit value to/from the FPCR */
826 /* Move a 32-bit value to/from the FPSR */
831 /* binary vector operation on vector registers */
838 /* binary vector operation on vector registers.
839 Dst reg is also a src. */
845 /* unary vector operation on vector registers */
851 /* vector narrowing, Q -> Q. Result goes in the bottom half
852 of dst and the top half is zeroed out. Iow one of the
856 UInt dszBlg2
; // 0: 16to8_x8 1: 32to16_x4 2: 64to32_x2
860 /* Vector shift by immediate. For left shifts, |amt| must be
861 >= 0 and < implied lane size of |op|. For right shifts,
862 |amt| must be > 0 and <= implied lane size of |op|. Shifts
863 beyond these ranges are not allowed. */
865 ARM64VecShiftImmOp op
;
878 UShort imm
; /* Same 1-bit-per-byte encoding as IR */
896 UInt laneNo
; /* either 0 or 1 */
903 /* MOV dst, src -- reg-reg move for vector registers */
905 UInt szB
; // 16=mov qD,qS; 8=mov dD,dS; 4=mov sD,sS
910 ARM64AMode
* amCounter
;
911 ARM64AMode
* amFailAddr
;
914 /* No fields. The address of the counter to inc is
915 installed later, post-translation, by patching it in,
916 as it is not known at translation time. */
923 extern ARM64Instr
* ARM64Instr_Arith ( HReg
, HReg
, ARM64RIA
*, Bool isAdd
);
924 extern ARM64Instr
* ARM64Instr_Cmp ( HReg
, ARM64RIA
*, Bool is64
);
925 extern ARM64Instr
* ARM64Instr_Logic ( HReg
, HReg
, ARM64RIL
*, ARM64LogicOp
);
926 extern ARM64Instr
* ARM64Instr_Test ( HReg
, ARM64RIL
* );
927 extern ARM64Instr
* ARM64Instr_Shift ( HReg
, HReg
, ARM64RI6
*, ARM64ShiftOp
);
928 extern ARM64Instr
* ARM64Instr_Unary ( HReg
, HReg
, ARM64UnaryOp
);
929 extern ARM64Instr
* ARM64Instr_Set64 ( HReg
, ARM64CondCode
);
930 extern ARM64Instr
* ARM64Instr_MovI ( HReg
, HReg
);
931 extern ARM64Instr
* ARM64Instr_Imm64 ( HReg
, ULong
);
932 extern ARM64Instr
* ARM64Instr_LdSt64 ( Bool isLoad
, HReg
, ARM64AMode
* );
933 extern ARM64Instr
* ARM64Instr_LdSt32 ( Bool isLoad
, HReg
, ARM64AMode
* );
934 extern ARM64Instr
* ARM64Instr_LdSt16 ( Bool isLoad
, HReg
, ARM64AMode
* );
935 extern ARM64Instr
* ARM64Instr_LdSt8 ( Bool isLoad
, HReg
, ARM64AMode
* );
936 extern ARM64Instr
* ARM64Instr_XDirect ( Addr64 dstGA
, ARM64AMode
* amPC
,
937 ARM64CondCode cond
, Bool toFastEP
);
938 extern ARM64Instr
* ARM64Instr_XIndir ( HReg dstGA
, ARM64AMode
* amPC
,
939 ARM64CondCode cond
);
940 extern ARM64Instr
* ARM64Instr_XAssisted ( HReg dstGA
, ARM64AMode
* amPC
,
941 ARM64CondCode cond
, IRJumpKind jk
);
942 extern ARM64Instr
* ARM64Instr_CSel ( HReg dst
, HReg argL
, HReg argR
,
943 ARM64CondCode cond
);
944 extern ARM64Instr
* ARM64Instr_Call ( ARM64CondCode
, Addr64
, Int nArgRegs
,
946 extern ARM64Instr
* ARM64Instr_AddToSP ( Int simm
);
947 extern ARM64Instr
* ARM64Instr_FromSP ( HReg dst
);
948 extern ARM64Instr
* ARM64Instr_Mul ( HReg dst
, HReg argL
, HReg argR
,
950 extern ARM64Instr
* ARM64Instr_LdrEX ( Int szB
);
951 extern ARM64Instr
* ARM64Instr_StrEX ( Int szB
);
952 extern ARM64Instr
* ARM64Instr_CAS ( Int szB
);
953 extern ARM64Instr
* ARM64Instr_CASP ( Int szB
);
954 extern ARM64Instr
* ARM64Instr_MFence ( void );
955 extern ARM64Instr
* ARM64Instr_ClrEX ( void );
956 extern ARM64Instr
* ARM64Instr_VLdStH ( Bool isLoad
, HReg sD
, HReg rN
,
957 UInt uimm12
/* 0 .. 8190, 0 % 2 */ );
958 extern ARM64Instr
* ARM64Instr_VLdStS ( Bool isLoad
, HReg sD
, HReg rN
,
959 UInt uimm12
/* 0 .. 16380, 0 % 4 */ );
960 extern ARM64Instr
* ARM64Instr_VLdStD ( Bool isLoad
, HReg dD
, HReg rN
,
961 UInt uimm12
/* 0 .. 32760, 0 % 8 */ );
962 extern ARM64Instr
* ARM64Instr_VLdStQ ( Bool isLoad
, HReg rQ
, HReg rN
);
963 extern ARM64Instr
* ARM64Instr_VCvtI2F ( ARM64CvtOp how
, HReg rD
, HReg rS
);
964 extern ARM64Instr
* ARM64Instr_VCvtF2I ( ARM64CvtOp how
, HReg rD
, HReg rS
,
966 extern ARM64Instr
* ARM64Instr_VCvtSD ( Bool sToD
, HReg dst
, HReg src
);
967 extern ARM64Instr
* ARM64Instr_VCvtHS ( Bool hToS
, HReg dst
, HReg src
);
968 extern ARM64Instr
* ARM64Instr_VCvtHD ( Bool hToD
, HReg dst
, HReg src
);
969 extern ARM64Instr
* ARM64Instr_VUnaryD ( ARM64FpUnaryOp op
, HReg dst
, HReg src
);
970 extern ARM64Instr
* ARM64Instr_VUnaryS ( ARM64FpUnaryOp op
, HReg dst
, HReg src
);
971 extern ARM64Instr
* ARM64Instr_VBinD ( ARM64FpBinOp op
, HReg
, HReg
, HReg
);
972 extern ARM64Instr
* ARM64Instr_VBinS ( ARM64FpBinOp op
, HReg
, HReg
, HReg
);
973 extern ARM64Instr
* ARM64Instr_VCmpD ( HReg argL
, HReg argR
);
974 extern ARM64Instr
* ARM64Instr_VCmpS ( HReg argL
, HReg argR
);
975 extern ARM64Instr
* ARM64Instr_VFCSel ( HReg dst
, HReg argL
, HReg argR
,
976 ARM64CondCode cond
, Bool isD
);
977 extern ARM64Instr
* ARM64Instr_FPCR ( Bool toFPCR
, HReg iReg
);
978 extern ARM64Instr
* ARM64Instr_FPSR ( Bool toFPSR
, HReg iReg
);
979 extern ARM64Instr
* ARM64Instr_VBinV ( ARM64VecBinOp op
, HReg
, HReg
, HReg
);
980 extern ARM64Instr
* ARM64Instr_VModifyV ( ARM64VecModifyOp
, HReg
, HReg
);
981 extern ARM64Instr
* ARM64Instr_VUnaryV ( ARM64VecUnaryOp op
, HReg
, HReg
);
982 extern ARM64Instr
* ARM64Instr_VNarrowV ( ARM64VecNarrowOp op
, UInt dszBlg2
,
983 HReg dst
, HReg src
);
984 extern ARM64Instr
* ARM64Instr_VShiftImmV ( ARM64VecShiftImmOp op
,
985 HReg dst
, HReg src
, UInt amt
);
986 extern ARM64Instr
* ARM64Instr_VExtV ( HReg dst
,
987 HReg srcLo
, HReg srcHi
, UInt amtB
);
988 extern ARM64Instr
* ARM64Instr_VImmQ ( HReg
, UShort
);
989 extern ARM64Instr
* ARM64Instr_VDfromX ( HReg rD
, HReg rX
);
990 extern ARM64Instr
* ARM64Instr_VQfromX ( HReg rQ
, HReg rXlo
);
991 extern ARM64Instr
* ARM64Instr_VQfromXX( HReg rQ
, HReg rXhi
, HReg rXlo
);
992 extern ARM64Instr
* ARM64Instr_VXfromQ ( HReg rX
, HReg rQ
, UInt laneNo
);
993 extern ARM64Instr
* ARM64Instr_VXfromDorS ( HReg rX
, HReg rDorS
, Bool fromD
);
994 extern ARM64Instr
* ARM64Instr_VMov ( UInt szB
, HReg dst
, HReg src
);
996 extern ARM64Instr
* ARM64Instr_EvCheck ( ARM64AMode
* amCounter
,
997 ARM64AMode
* amFailAddr
);
998 extern ARM64Instr
* ARM64Instr_ProfInc ( void );
1000 extern void ppARM64Instr ( const ARM64Instr
* );
1003 /* Some functions that insulate the register allocator from details
1004 of the underlying instruction set. */
1005 extern void getRegUsage_ARM64Instr ( HRegUsage
*, const ARM64Instr
*, Bool
);
1006 extern void mapRegs_ARM64Instr ( HRegRemap
*, ARM64Instr
*, Bool
);
1007 extern Int
emit_ARM64Instr ( /*MB_MOD*/Bool
* is_profInc
,
1008 UChar
* buf
, Int nbuf
, const ARM64Instr
* i
,
1010 VexEndness endness_host
,
1011 const void* disp_cp_chain_me_to_slowEP
,
1012 const void* disp_cp_chain_me_to_fastEP
,
1013 const void* disp_cp_xindir
,
1014 const void* disp_cp_xassisted
);
1016 extern void genSpill_ARM64 ( /*OUT*/HInstr
** i1
, /*OUT*/HInstr
** i2
,
1017 HReg rreg
, Int offset
, Bool
);
1018 extern void genReload_ARM64 ( /*OUT*/HInstr
** i1
, /*OUT*/HInstr
** i2
,
1019 HReg rreg
, Int offset
, Bool
);
1020 extern ARM64Instr
* genMove_ARM64(HReg from
, HReg to
, Bool
);
1022 extern const RRegUniverse
* getRRegUniverse_ARM64 ( void );
1024 extern HInstrArray
* iselSB_ARM64 ( const IRSB
*,
1028 Int offs_Host_EvC_Counter
,
1029 Int offs_Host_EvC_FailAddr
,
1030 Bool chainingAllowed
,
1034 /* How big is an event check? This is kind of a kludge because it
1035 depends on the offsets of host_EvC_FAILADDR and
1036 host_EvC_COUNTER. */
1037 extern Int
evCheckSzB_ARM64 (void);
1039 /* Perform a chaining and unchaining of an XDirect jump. */
1040 extern VexInvalRange
chainXDirect_ARM64 ( VexEndness endness_host
,
1041 void* place_to_chain
,
1042 const void* disp_cp_chain_me_EXPECTED
,
1043 const void* place_to_jump_to
);
1045 extern VexInvalRange
unchainXDirect_ARM64 ( VexEndness endness_host
,
1046 void* place_to_unchain
,
1047 const void* place_to_jump_to_EXPECTED
,
1048 const void* disp_cp_chain_me
);
1050 /* Patch the counter location into an existing ProfInc point. */
1051 extern VexInvalRange
patchProfInc_ARM64 ( VexEndness endness_host
,
1052 void* place_to_patch
,
1053 const ULong
* location_of_counter
);
1056 #endif /* ndef __VEX_HOST_ARM64_DEFS_H */
1058 /*---------------------------------------------------------------*/
1059 /*--- end host_arm64_defs.h ---*/
1060 /*---------------------------------------------------------------*/