2 * Initial TCG Implementation for aarch64
4 * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH
5 * Written by Claudio Fontana
7 * This work is licensed under the terms of the GNU GPL, version 2 or
8 * (at your option) any later version.
10 * See the COPYING file in the top-level directory for details.
13 #include "../tcg-ldst.c.inc"
14 #include "../tcg-pool.c.inc"
15 #include "qemu/bitops.h"
17 /* We're going to re-use TCGType in setting of the SF bit, which controls
18 the size of the operation performed. If we know the values match, it
19 makes things much cleaner. */
20 QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1);
22 #ifdef CONFIG_DEBUG_TCG
23 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
24 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
25 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
26 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
27 "x24", "x25", "x26", "x27", "x28", "fp", "x30", "sp",
29 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
30 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
31 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
32 "v24", "v25", "v26", "v27", "v28", "fp", "v30", "v31",
34 #endif /* CONFIG_DEBUG_TCG */
36 static const int tcg_target_reg_alloc_order[] = {
37 TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23,
38 TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27,
39 TCG_REG_X28, /* we will reserve this for guest_base if configured */
41 TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11,
42 TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15,
43 TCG_REG_X16, TCG_REG_X17,
45 TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
46 TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7,
48 /* X18 reserved by system */
49 /* X19 reserved for AREG0 */
50 /* X29 reserved as fp */
51 /* X30 reserved as temporary */
53 TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
54 TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
55 /* V8 - V15 are call-saved, and skipped. */
56 TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
57 TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
58 TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27,
59 TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
62 static const int tcg_target_call_iarg_regs[8] = {
63 TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
64 TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7
66 static const int tcg_target_call_oarg_regs[1] = {
70 #define TCG_REG_TMP TCG_REG_X30
71 #define TCG_VEC_TMP TCG_REG_V31
73 #ifndef CONFIG_SOFTMMU
74 /* Note that XZR cannot be encoded in the address base register slot,
75 as that actaully encodes SP. So if we need to zero-extend the guest
76 address, via the address index register slot, we need to load even
77 a zero guest base into a register. */
78 #define USE_GUEST_BASE (guest_base != 0 || TARGET_LONG_BITS == 32)
79 #define TCG_REG_GUEST_BASE TCG_REG_X28
82 static bool reloc_pc26(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
84 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
85 ptrdiff_t offset = target - src_rx;
87 if (offset == sextract64(offset, 0, 26)) {
88 /* read instruction, mask away previous PC_REL26 parameter contents,
89 set the proper offset, then write back the instruction. */
90 *src_rw = deposit32(*src_rw, 0, 26, offset);
96 static bool reloc_pc19(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
98 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
99 ptrdiff_t offset = target - src_rx;
101 if (offset == sextract64(offset, 0, 19)) {
102 *src_rw = deposit32(*src_rw, 5, 19, offset);
108 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
109 intptr_t value, intptr_t addend)
111 tcg_debug_assert(addend == 0);
113 case R_AARCH64_JUMP26:
114 case R_AARCH64_CALL26:
115 return reloc_pc26(code_ptr, (const tcg_insn_unit *)value);
116 case R_AARCH64_CONDBR19:
117 return reloc_pc19(code_ptr, (const tcg_insn_unit *)value);
119 g_assert_not_reached();
123 #define TCG_CT_CONST_AIMM 0x100
124 #define TCG_CT_CONST_LIMM 0x200
125 #define TCG_CT_CONST_ZERO 0x400
126 #define TCG_CT_CONST_MONE 0x800
127 #define TCG_CT_CONST_ORRI 0x1000
128 #define TCG_CT_CONST_ANDI 0x2000
130 #define ALL_GENERAL_REGS 0xffffffffu
131 #define ALL_VECTOR_REGS 0xffffffff00000000ull
133 #ifdef CONFIG_SOFTMMU
134 #define ALL_QLDST_REGS \
135 (ALL_GENERAL_REGS & ~((1 << TCG_REG_X0) | (1 << TCG_REG_X1) | \
136 (1 << TCG_REG_X2) | (1 << TCG_REG_X3)))
138 #define ALL_QLDST_REGS ALL_GENERAL_REGS
141 /* Match a constant valid for addition (12-bit, optionally shifted). */
142 static inline bool is_aimm(uint64_t val)
144 return (val & ~0xfff) == 0 || (val & ~0xfff000) == 0;
147 /* Match a constant valid for logical operations. */
148 static inline bool is_limm(uint64_t val)
150 /* Taking a simplified view of the logical immediates for now, ignoring
151 the replication that can happen across the field. Match bit patterns
155 and their inverses. */
157 /* Make things easier below, by testing the form with msb clear. */
158 if ((int64_t)val < 0) {
165 return (val & (val - 1)) == 0;
168 /* Return true if v16 is a valid 16-bit shifted immediate. */
169 static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
171 if (v16 == (v16 & 0xff)) {
175 } else if (v16 == (v16 & 0xff00)) {
183 /* Return true if v32 is a valid 32-bit shifted immediate. */
184 static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
186 if (v32 == (v32 & 0xff)) {
190 } else if (v32 == (v32 & 0xff00)) {
192 *imm8 = (v32 >> 8) & 0xff;
194 } else if (v32 == (v32 & 0xff0000)) {
196 *imm8 = (v32 >> 16) & 0xff;
198 } else if (v32 == (v32 & 0xff000000)) {
206 /* Return true if v32 is a valid 32-bit shifting ones immediate. */
207 static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
209 if ((v32 & 0xffff00ff) == 0xff) {
211 *imm8 = (v32 >> 8) & 0xff;
213 } else if ((v32 & 0xff00ffff) == 0xffff) {
215 *imm8 = (v32 >> 16) & 0xff;
221 /* Return true if v32 is a valid float32 immediate. */
222 static bool is_fimm32(uint32_t v32, int *cmode, int *imm8)
224 if (extract32(v32, 0, 19) == 0
225 && (extract32(v32, 25, 6) == 0x20
226 || extract32(v32, 25, 6) == 0x1f)) {
228 *imm8 = (extract32(v32, 31, 1) << 7)
229 | (extract32(v32, 25, 1) << 6)
230 | extract32(v32, 19, 6);
236 /* Return true if v64 is a valid float64 immediate. */
237 static bool is_fimm64(uint64_t v64, int *cmode, int *imm8)
239 if (extract64(v64, 0, 48) == 0
240 && (extract64(v64, 54, 9) == 0x100
241 || extract64(v64, 54, 9) == 0x0ff)) {
243 *imm8 = (extract64(v64, 63, 1) << 7)
244 | (extract64(v64, 54, 1) << 6)
245 | extract64(v64, 48, 6);
252 * Return non-zero if v32 can be formed by MOVI+ORR.
253 * Place the parameters for MOVI in (cmode, imm8).
254 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
256 static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
260 for (i = 6; i > 0; i -= 2) {
261 /* Mask out one byte we can add with ORR. */
262 uint32_t tmp = v32 & ~(0xffu << (i * 4));
263 if (is_shimm32(tmp, cmode, imm8) ||
264 is_soimm32(tmp, cmode, imm8)) {
271 /* Return true if V is a valid 16-bit or 32-bit shifted immediate. */
272 static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
274 if (v32 == deposit32(v32, 16, 16, v32)) {
275 return is_shimm16(v32, cmode, imm8);
277 return is_shimm32(v32, cmode, imm8);
281 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
283 if (ct & TCG_CT_CONST) {
286 if (type == TCG_TYPE_I32) {
289 if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) {
292 if ((ct & TCG_CT_CONST_LIMM) && is_limm(val)) {
295 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
298 if ((ct & TCG_CT_CONST_MONE) && val == -1) {
302 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
305 case TCG_CT_CONST_ANDI:
308 case TCG_CT_CONST_ORRI:
309 if (val == deposit64(val, 32, 32, val)) {
311 return is_shimm1632(val, &cmode, &imm8);
315 /* Both bits should not be set for the same insn. */
316 g_assert_not_reached();
322 enum aarch64_cond_code {
325 COND_CS = 0x2, /* Unsigned greater or equal */
326 COND_HS = COND_CS, /* ALIAS greater or equal */
327 COND_CC = 0x3, /* Unsigned less than */
328 COND_LO = COND_CC, /* ALIAS Lower */
329 COND_MI = 0x4, /* Negative */
330 COND_PL = 0x5, /* Zero or greater */
331 COND_VS = 0x6, /* Overflow */
332 COND_VC = 0x7, /* No overflow */
333 COND_HI = 0x8, /* Unsigned greater than */
334 COND_LS = 0x9, /* Unsigned less or equal */
340 COND_NV = 0xf, /* behaves like COND_AL here */
343 static const enum aarch64_cond_code tcg_cond_to_aarch64[] = {
344 [TCG_COND_EQ] = COND_EQ,
345 [TCG_COND_NE] = COND_NE,
346 [TCG_COND_LT] = COND_LT,
347 [TCG_COND_GE] = COND_GE,
348 [TCG_COND_LE] = COND_LE,
349 [TCG_COND_GT] = COND_GT,
351 [TCG_COND_LTU] = COND_LO,
352 [TCG_COND_GTU] = COND_HI,
353 [TCG_COND_GEU] = COND_HS,
354 [TCG_COND_LEU] = COND_LS,
358 LDST_ST = 0, /* store */
359 LDST_LD = 1, /* load */
360 LDST_LD_S_X = 2, /* load and sign-extend into Xt */
361 LDST_LD_S_W = 3, /* load and sign-extend into Wt */
364 /* We encode the format of the insn into the beginning of the name, so that
365 we can have the preprocessor help "typecheck" the insn vs the output
366 function. Arm didn't provide us with nice names for the formats, so we
367 use the section number of the architecture reference manual in which the
368 instruction group is described. */
370 /* Compare and branch (immediate). */
371 I3201_CBZ = 0x34000000,
372 I3201_CBNZ = 0x35000000,
374 /* Conditional branch (immediate). */
375 I3202_B_C = 0x54000000,
377 /* Unconditional branch (immediate). */
378 I3206_B = 0x14000000,
379 I3206_BL = 0x94000000,
381 /* Unconditional branch (register). */
382 I3207_BR = 0xd61f0000,
383 I3207_BLR = 0xd63f0000,
384 I3207_RET = 0xd65f0000,
386 /* AdvSIMD load/store single structure. */
387 I3303_LD1R = 0x0d40c000,
389 /* Load literal for loading the address at pc-relative offset */
390 I3305_LDR = 0x58000000,
391 I3305_LDR_v64 = 0x5c000000,
392 I3305_LDR_v128 = 0x9c000000,
394 /* Load/store register. Described here as 3.3.12, but the helper
395 that emits them can transform to 3.3.10 or 3.3.13. */
396 I3312_STRB = 0x38000000 | LDST_ST << 22 | MO_8 << 30,
397 I3312_STRH = 0x38000000 | LDST_ST << 22 | MO_16 << 30,
398 I3312_STRW = 0x38000000 | LDST_ST << 22 | MO_32 << 30,
399 I3312_STRX = 0x38000000 | LDST_ST << 22 | MO_64 << 30,
401 I3312_LDRB = 0x38000000 | LDST_LD << 22 | MO_8 << 30,
402 I3312_LDRH = 0x38000000 | LDST_LD << 22 | MO_16 << 30,
403 I3312_LDRW = 0x38000000 | LDST_LD << 22 | MO_32 << 30,
404 I3312_LDRX = 0x38000000 | LDST_LD << 22 | MO_64 << 30,
406 I3312_LDRSBW = 0x38000000 | LDST_LD_S_W << 22 | MO_8 << 30,
407 I3312_LDRSHW = 0x38000000 | LDST_LD_S_W << 22 | MO_16 << 30,
409 I3312_LDRSBX = 0x38000000 | LDST_LD_S_X << 22 | MO_8 << 30,
410 I3312_LDRSHX = 0x38000000 | LDST_LD_S_X << 22 | MO_16 << 30,
411 I3312_LDRSWX = 0x38000000 | LDST_LD_S_X << 22 | MO_32 << 30,
413 I3312_LDRVS = 0x3c000000 | LDST_LD << 22 | MO_32 << 30,
414 I3312_STRVS = 0x3c000000 | LDST_ST << 22 | MO_32 << 30,
416 I3312_LDRVD = 0x3c000000 | LDST_LD << 22 | MO_64 << 30,
417 I3312_STRVD = 0x3c000000 | LDST_ST << 22 | MO_64 << 30,
419 I3312_LDRVQ = 0x3c000000 | 3 << 22 | 0 << 30,
420 I3312_STRVQ = 0x3c000000 | 2 << 22 | 0 << 30,
422 I3312_TO_I3310 = 0x00200800,
423 I3312_TO_I3313 = 0x01000000,
425 /* Load/store register pair instructions. */
426 I3314_LDP = 0x28400000,
427 I3314_STP = 0x28000000,
429 /* Add/subtract immediate instructions. */
430 I3401_ADDI = 0x11000000,
431 I3401_ADDSI = 0x31000000,
432 I3401_SUBI = 0x51000000,
433 I3401_SUBSI = 0x71000000,
435 /* Bitfield instructions. */
436 I3402_BFM = 0x33000000,
437 I3402_SBFM = 0x13000000,
438 I3402_UBFM = 0x53000000,
440 /* Extract instruction. */
441 I3403_EXTR = 0x13800000,
443 /* Logical immediate instructions. */
444 I3404_ANDI = 0x12000000,
445 I3404_ORRI = 0x32000000,
446 I3404_EORI = 0x52000000,
447 I3404_ANDSI = 0x72000000,
449 /* Move wide immediate instructions. */
450 I3405_MOVN = 0x12800000,
451 I3405_MOVZ = 0x52800000,
452 I3405_MOVK = 0x72800000,
454 /* PC relative addressing instructions. */
455 I3406_ADR = 0x10000000,
456 I3406_ADRP = 0x90000000,
458 /* Add/subtract shifted register instructions (without a shift). */
459 I3502_ADD = 0x0b000000,
460 I3502_ADDS = 0x2b000000,
461 I3502_SUB = 0x4b000000,
462 I3502_SUBS = 0x6b000000,
464 /* Add/subtract shifted register instructions (with a shift). */
465 I3502S_ADD_LSL = I3502_ADD,
467 /* Add/subtract with carry instructions. */
468 I3503_ADC = 0x1a000000,
469 I3503_SBC = 0x5a000000,
471 /* Conditional select instructions. */
472 I3506_CSEL = 0x1a800000,
473 I3506_CSINC = 0x1a800400,
474 I3506_CSINV = 0x5a800000,
475 I3506_CSNEG = 0x5a800400,
477 /* Data-processing (1 source) instructions. */
478 I3507_CLZ = 0x5ac01000,
479 I3507_RBIT = 0x5ac00000,
480 I3507_REV = 0x5ac00000, /* + size << 10 */
482 /* Data-processing (2 source) instructions. */
483 I3508_LSLV = 0x1ac02000,
484 I3508_LSRV = 0x1ac02400,
485 I3508_ASRV = 0x1ac02800,
486 I3508_RORV = 0x1ac02c00,
487 I3508_SMULH = 0x9b407c00,
488 I3508_UMULH = 0x9bc07c00,
489 I3508_UDIV = 0x1ac00800,
490 I3508_SDIV = 0x1ac00c00,
492 /* Data-processing (3 source) instructions. */
493 I3509_MADD = 0x1b000000,
494 I3509_MSUB = 0x1b008000,
496 /* Logical shifted register instructions (without a shift). */
497 I3510_AND = 0x0a000000,
498 I3510_BIC = 0x0a200000,
499 I3510_ORR = 0x2a000000,
500 I3510_ORN = 0x2a200000,
501 I3510_EOR = 0x4a000000,
502 I3510_EON = 0x4a200000,
503 I3510_ANDS = 0x6a000000,
505 /* Logical shifted register instructions (with a shift). */
506 I3502S_AND_LSR = I3510_AND | (1 << 22),
509 I3605_DUP = 0x0e000400,
510 I3605_INS = 0x4e001c00,
511 I3605_UMOV = 0x0e003c00,
513 /* AdvSIMD modified immediate */
514 I3606_MOVI = 0x0f000400,
515 I3606_MVNI = 0x2f000400,
516 I3606_BIC = 0x2f001400,
517 I3606_ORR = 0x0f001400,
519 /* AdvSIMD scalar shift by immediate */
520 I3609_SSHR = 0x5f000400,
521 I3609_SSRA = 0x5f001400,
522 I3609_SHL = 0x5f005400,
523 I3609_USHR = 0x7f000400,
524 I3609_USRA = 0x7f001400,
525 I3609_SLI = 0x7f005400,
527 /* AdvSIMD scalar three same */
528 I3611_SQADD = 0x5e200c00,
529 I3611_SQSUB = 0x5e202c00,
530 I3611_CMGT = 0x5e203400,
531 I3611_CMGE = 0x5e203c00,
532 I3611_SSHL = 0x5e204400,
533 I3611_ADD = 0x5e208400,
534 I3611_CMTST = 0x5e208c00,
535 I3611_UQADD = 0x7e200c00,
536 I3611_UQSUB = 0x7e202c00,
537 I3611_CMHI = 0x7e203400,
538 I3611_CMHS = 0x7e203c00,
539 I3611_USHL = 0x7e204400,
540 I3611_SUB = 0x7e208400,
541 I3611_CMEQ = 0x7e208c00,
543 /* AdvSIMD scalar two-reg misc */
544 I3612_CMGT0 = 0x5e208800,
545 I3612_CMEQ0 = 0x5e209800,
546 I3612_CMLT0 = 0x5e20a800,
547 I3612_ABS = 0x5e20b800,
548 I3612_CMGE0 = 0x7e208800,
549 I3612_CMLE0 = 0x7e209800,
550 I3612_NEG = 0x7e20b800,
552 /* AdvSIMD shift by immediate */
553 I3614_SSHR = 0x0f000400,
554 I3614_SSRA = 0x0f001400,
555 I3614_SHL = 0x0f005400,
556 I3614_SLI = 0x2f005400,
557 I3614_USHR = 0x2f000400,
558 I3614_USRA = 0x2f001400,
560 /* AdvSIMD three same. */
561 I3616_ADD = 0x0e208400,
562 I3616_AND = 0x0e201c00,
563 I3616_BIC = 0x0e601c00,
564 I3616_BIF = 0x2ee01c00,
565 I3616_BIT = 0x2ea01c00,
566 I3616_BSL = 0x2e601c00,
567 I3616_EOR = 0x2e201c00,
568 I3616_MUL = 0x0e209c00,
569 I3616_ORR = 0x0ea01c00,
570 I3616_ORN = 0x0ee01c00,
571 I3616_SUB = 0x2e208400,
572 I3616_CMGT = 0x0e203400,
573 I3616_CMGE = 0x0e203c00,
574 I3616_CMTST = 0x0e208c00,
575 I3616_CMHI = 0x2e203400,
576 I3616_CMHS = 0x2e203c00,
577 I3616_CMEQ = 0x2e208c00,
578 I3616_SMAX = 0x0e206400,
579 I3616_SMIN = 0x0e206c00,
580 I3616_SSHL = 0x0e204400,
581 I3616_SQADD = 0x0e200c00,
582 I3616_SQSUB = 0x0e202c00,
583 I3616_UMAX = 0x2e206400,
584 I3616_UMIN = 0x2e206c00,
585 I3616_UQADD = 0x2e200c00,
586 I3616_UQSUB = 0x2e202c00,
587 I3616_USHL = 0x2e204400,
589 /* AdvSIMD two-reg misc. */
590 I3617_CMGT0 = 0x0e208800,
591 I3617_CMEQ0 = 0x0e209800,
592 I3617_CMLT0 = 0x0e20a800,
593 I3617_CMGE0 = 0x2e208800,
594 I3617_CMLE0 = 0x2e209800,
595 I3617_NOT = 0x2e205800,
596 I3617_ABS = 0x0e20b800,
597 I3617_NEG = 0x2e20b800,
599 /* System instructions. */
601 DMB_ISH = 0xd50338bf,
606 static inline uint32_t tcg_in32(TCGContext *s)
608 uint32_t v = *(uint32_t *)s->code_ptr;
612 /* Emit an opcode with "type-checking" of the format. */
613 #define tcg_out_insn(S, FMT, OP, ...) \
614 glue(tcg_out_insn_,FMT)(S, glue(glue(glue(I,FMT),_),OP), ## __VA_ARGS__)
616 static void tcg_out_insn_3303(TCGContext *s, AArch64Insn insn, bool q,
617 TCGReg rt, TCGReg rn, unsigned size)
619 tcg_out32(s, insn | (rt & 0x1f) | (rn << 5) | (size << 10) | (q << 30));
622 static void tcg_out_insn_3305(TCGContext *s, AArch64Insn insn,
623 int imm19, TCGReg rt)
625 tcg_out32(s, insn | (imm19 & 0x7ffff) << 5 | rt);
628 static void tcg_out_insn_3201(TCGContext *s, AArch64Insn insn, TCGType ext,
629 TCGReg rt, int imm19)
631 tcg_out32(s, insn | ext << 31 | (imm19 & 0x7ffff) << 5 | rt);
634 static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn,
635 TCGCond c, int imm19)
637 tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5);
640 static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26)
642 tcg_out32(s, insn | (imm26 & 0x03ffffff));
645 static void tcg_out_insn_3207(TCGContext *s, AArch64Insn insn, TCGReg rn)
647 tcg_out32(s, insn | rn << 5);
650 static void tcg_out_insn_3314(TCGContext *s, AArch64Insn insn,
651 TCGReg r1, TCGReg r2, TCGReg rn,
652 tcg_target_long ofs, bool pre, bool w)
654 insn |= 1u << 31; /* ext */
658 tcg_debug_assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0);
659 insn |= (ofs & (0x7f << 3)) << (15 - 3);
661 tcg_out32(s, insn | r2 << 10 | rn << 5 | r1);
664 static void tcg_out_insn_3401(TCGContext *s, AArch64Insn insn, TCGType ext,
665 TCGReg rd, TCGReg rn, uint64_t aimm)
668 tcg_debug_assert((aimm & 0xfff) == 0);
670 tcg_debug_assert(aimm <= 0xfff);
671 aimm |= 1 << 12; /* apply LSL 12 */
673 tcg_out32(s, insn | ext << 31 | aimm << 10 | rn << 5 | rd);
676 /* This function can be used for both 3.4.2 (Bitfield) and 3.4.4
677 (Logical immediate). Both insn groups have N, IMMR and IMMS fields
678 that feed the DecodeBitMasks pseudo function. */
679 static void tcg_out_insn_3402(TCGContext *s, AArch64Insn insn, TCGType ext,
680 TCGReg rd, TCGReg rn, int n, int immr, int imms)
682 tcg_out32(s, insn | ext << 31 | n << 22 | immr << 16 | imms << 10
686 #define tcg_out_insn_3404 tcg_out_insn_3402
688 static void tcg_out_insn_3403(TCGContext *s, AArch64Insn insn, TCGType ext,
689 TCGReg rd, TCGReg rn, TCGReg rm, int imms)
691 tcg_out32(s, insn | ext << 31 | ext << 22 | rm << 16 | imms << 10
695 /* This function is used for the Move (wide immediate) instruction group.
696 Note that SHIFT is a full shift count, not the 2 bit HW field. */
697 static void tcg_out_insn_3405(TCGContext *s, AArch64Insn insn, TCGType ext,
698 TCGReg rd, uint16_t half, unsigned shift)
700 tcg_debug_assert((shift & ~0x30) == 0);
701 tcg_out32(s, insn | ext << 31 | shift << (21 - 4) | half << 5 | rd);
704 static void tcg_out_insn_3406(TCGContext *s, AArch64Insn insn,
705 TCGReg rd, int64_t disp)
707 tcg_out32(s, insn | (disp & 3) << 29 | (disp & 0x1ffffc) << (5 - 2) | rd);
710 /* This function is for both 3.5.2 (Add/Subtract shifted register), for
711 the rare occasion when we actually want to supply a shift amount. */
712 static inline void tcg_out_insn_3502S(TCGContext *s, AArch64Insn insn,
713 TCGType ext, TCGReg rd, TCGReg rn,
716 tcg_out32(s, insn | ext << 31 | rm << 16 | imm6 << 10 | rn << 5 | rd);
719 /* This function is for 3.5.2 (Add/subtract shifted register),
720 and 3.5.10 (Logical shifted register), for the vast majorty of cases
721 when we don't want to apply a shift. Thus it can also be used for
722 3.5.3 (Add/subtract with carry) and 3.5.8 (Data processing 2 source). */
723 static void tcg_out_insn_3502(TCGContext *s, AArch64Insn insn, TCGType ext,
724 TCGReg rd, TCGReg rn, TCGReg rm)
726 tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd);
729 #define tcg_out_insn_3503 tcg_out_insn_3502
730 #define tcg_out_insn_3508 tcg_out_insn_3502
731 #define tcg_out_insn_3510 tcg_out_insn_3502
733 static void tcg_out_insn_3506(TCGContext *s, AArch64Insn insn, TCGType ext,
734 TCGReg rd, TCGReg rn, TCGReg rm, TCGCond c)
736 tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd
737 | tcg_cond_to_aarch64[c] << 12);
740 static void tcg_out_insn_3507(TCGContext *s, AArch64Insn insn, TCGType ext,
741 TCGReg rd, TCGReg rn)
743 tcg_out32(s, insn | ext << 31 | rn << 5 | rd);
746 static void tcg_out_insn_3509(TCGContext *s, AArch64Insn insn, TCGType ext,
747 TCGReg rd, TCGReg rn, TCGReg rm, TCGReg ra)
749 tcg_out32(s, insn | ext << 31 | rm << 16 | ra << 10 | rn << 5 | rd);
752 static void tcg_out_insn_3605(TCGContext *s, AArch64Insn insn, bool q,
753 TCGReg rd, TCGReg rn, int dst_idx, int src_idx)
755 /* Note that bit 11 set means general register input. Therefore
756 we can handle both register sets with one function. */
757 tcg_out32(s, insn | q << 30 | (dst_idx << 16) | (src_idx << 11)
758 | (rd & 0x1f) | (~rn & 0x20) << 6 | (rn & 0x1f) << 5);
761 static void tcg_out_insn_3606(TCGContext *s, AArch64Insn insn, bool q,
762 TCGReg rd, bool op, int cmode, uint8_t imm8)
764 tcg_out32(s, insn | q << 30 | op << 29 | cmode << 12 | (rd & 0x1f)
765 | (imm8 & 0xe0) << (16 - 5) | (imm8 & 0x1f) << 5);
768 static void tcg_out_insn_3609(TCGContext *s, AArch64Insn insn,
769 TCGReg rd, TCGReg rn, unsigned immhb)
771 tcg_out32(s, insn | immhb << 16 | (rn & 0x1f) << 5 | (rd & 0x1f));
774 static void tcg_out_insn_3611(TCGContext *s, AArch64Insn insn,
775 unsigned size, TCGReg rd, TCGReg rn, TCGReg rm)
777 tcg_out32(s, insn | (size << 22) | (rm & 0x1f) << 16
778 | (rn & 0x1f) << 5 | (rd & 0x1f));
781 static void tcg_out_insn_3612(TCGContext *s, AArch64Insn insn,
782 unsigned size, TCGReg rd, TCGReg rn)
784 tcg_out32(s, insn | (size << 22) | (rn & 0x1f) << 5 | (rd & 0x1f));
787 static void tcg_out_insn_3614(TCGContext *s, AArch64Insn insn, bool q,
788 TCGReg rd, TCGReg rn, unsigned immhb)
790 tcg_out32(s, insn | q << 30 | immhb << 16
791 | (rn & 0x1f) << 5 | (rd & 0x1f));
794 static void tcg_out_insn_3616(TCGContext *s, AArch64Insn insn, bool q,
795 unsigned size, TCGReg rd, TCGReg rn, TCGReg rm)
797 tcg_out32(s, insn | q << 30 | (size << 22) | (rm & 0x1f) << 16
798 | (rn & 0x1f) << 5 | (rd & 0x1f));
801 static void tcg_out_insn_3617(TCGContext *s, AArch64Insn insn, bool q,
802 unsigned size, TCGReg rd, TCGReg rn)
804 tcg_out32(s, insn | q << 30 | (size << 22)
805 | (rn & 0x1f) << 5 | (rd & 0x1f));
808 static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn,
809 TCGReg rd, TCGReg base, TCGType ext,
812 /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */
813 tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 |
814 0x4000 | ext << 13 | base << 5 | (rd & 0x1f));
817 static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn,
818 TCGReg rd, TCGReg rn, intptr_t offset)
820 tcg_out32(s, insn | (offset & 0x1ff) << 12 | rn << 5 | (rd & 0x1f));
823 static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn,
824 TCGReg rd, TCGReg rn, uintptr_t scaled_uimm)
826 /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */
827 tcg_out32(s, insn | I3312_TO_I3313 | scaled_uimm << 10
828 | rn << 5 | (rd & 0x1f));
831 /* Register to register move using ORR (shifted register with no shift). */
832 static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm)
834 tcg_out_insn(s, 3510, ORR, ext, rd, TCG_REG_XZR, rm);
837 /* Register to register move using ADDI (move to/from SP). */
838 static void tcg_out_movr_sp(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn)
840 tcg_out_insn(s, 3401, ADDI, ext, rd, rn, 0);
843 /* This function is used for the Logical (immediate) instruction group.
844 The value of LIMM must satisfy IS_LIMM. See the comment above about
845 only supporting simplified logical immediates. */
846 static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext,
847 TCGReg rd, TCGReg rn, uint64_t limm)
851 tcg_debug_assert(is_limm(limm));
856 r = 0; /* form 0....01....1 */
857 c = ctz64(~limm) - 1;
859 r = clz64(~limm); /* form 1..10..01..1 */
863 r = 64 - l; /* form 1....10....0 or 0..01..10..0 */
866 if (ext == TCG_TYPE_I32) {
871 tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c);
874 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
875 TCGReg rd, int64_t v64)
877 bool q = type == TCG_TYPE_V128;
880 /* Test all bytes equal first. */
883 tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0xe, imm8);
888 * Test all bytes 0x00 or 0xff second. This can match cases that
889 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
891 for (i = imm8 = 0; i < 8; i++) {
892 uint8_t byte = v64 >> (i * 8);
895 } else if (byte != 0) {
899 tcg_out_insn(s, 3606, MOVI, q, rd, 1, 0xe, imm8);
904 * Tests for various replications. For each element width, if we
905 * cannot find an expansion there's no point checking a larger
906 * width because we already know by replication it cannot match.
911 if (is_shimm16(v16, &cmode, &imm8)) {
912 tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8);
915 if (is_shimm16(~v16, &cmode, &imm8)) {
916 tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8);
921 * Otherwise, all remaining constants can be loaded in two insns:
922 * rd = v16 & 0xff, rd |= v16 & 0xff00.
924 tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0x8, v16 & 0xff);
925 tcg_out_insn(s, 3606, ORR, q, rd, 0, 0xa, v16 >> 8);
927 } else if (vece == MO_32) {
931 if (is_shimm32(v32, &cmode, &imm8) ||
932 is_soimm32(v32, &cmode, &imm8) ||
933 is_fimm32(v32, &cmode, &imm8)) {
934 tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8);
937 if (is_shimm32(n32, &cmode, &imm8) ||
938 is_soimm32(n32, &cmode, &imm8)) {
939 tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8);
944 * Restrict the set of constants to those we can load with
945 * two instructions. Others we load from the pool.
947 i = is_shimm32_pair(v32, &cmode, &imm8);
949 tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8);
950 tcg_out_insn(s, 3606, ORR, q, rd, 0, i, extract32(v32, i * 4, 8));
953 i = is_shimm32_pair(n32, &cmode, &imm8);
955 tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8);
956 tcg_out_insn(s, 3606, BIC, q, rd, 0, i, extract32(n32, i * 4, 8));
959 } else if (is_fimm64(v64, &cmode, &imm8)) {
960 tcg_out_insn(s, 3606, MOVI, q, rd, 1, cmode, imm8);
965 * As a last resort, load from the constant pool. Sadly there
966 * is no LD1R (literal), so store the full 16-byte vector.
968 if (type == TCG_TYPE_V128) {
969 new_pool_l2(s, R_AARCH64_CONDBR19, s->code_ptr, 0, v64, v64);
970 tcg_out_insn(s, 3305, LDR_v128, 0, rd);
972 new_pool_label(s, v64, R_AARCH64_CONDBR19, s->code_ptr, 0);
973 tcg_out_insn(s, 3305, LDR_v64, 0, rd);
977 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
978 TCGReg rd, TCGReg rs)
980 int is_q = type - TCG_TYPE_V64;
981 tcg_out_insn(s, 3605, DUP, is_q, rd, rs, 1 << vece, 0);
985 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
986 TCGReg r, TCGReg base, intptr_t offset)
988 TCGReg temp = TCG_REG_TMP;
990 if (offset < -0xffffff || offset > 0xffffff) {
991 tcg_out_movi(s, TCG_TYPE_PTR, temp, offset);
992 tcg_out_insn(s, 3502, ADD, 1, temp, temp, base);
995 AArch64Insn add_insn = I3401_ADDI;
998 add_insn = I3401_SUBI;
1001 if (offset & 0xfff000) {
1002 tcg_out_insn_3401(s, add_insn, 1, temp, base, offset & 0xfff000);
1005 if (offset & 0xfff) {
1006 tcg_out_insn_3401(s, add_insn, 1, temp, base, offset & 0xfff);
1010 tcg_out_insn(s, 3303, LD1R, type == TCG_TYPE_V128, r, base, vece);
1014 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
1015 tcg_target_long value)
1017 tcg_target_long svalue = value;
1018 tcg_target_long ivalue = ~value;
1019 tcg_target_long t0, t1, t2;
1026 tcg_debug_assert(rd < 32);
1029 g_assert_not_reached();
1032 /* For 32-bit values, discard potential garbage in value. For 64-bit
1033 values within [2**31, 2**32-1], we can create smaller sequences by
1034 interpreting this as a negative 32-bit number, while ensuring that
1035 the high 32 bits are cleared by setting SF=0. */
1036 if (type == TCG_TYPE_I32 || (value & ~0xffffffffull) == 0) {
1037 svalue = (int32_t)value;
1038 value = (uint32_t)value;
1039 ivalue = (uint32_t)ivalue;
1040 type = TCG_TYPE_I32;
1043 /* Speed things up by handling the common case of small positive
1044 and negative values specially. */
1045 if ((value & ~0xffffull) == 0) {
1046 tcg_out_insn(s, 3405, MOVZ, type, rd, value, 0);
1048 } else if ((ivalue & ~0xffffull) == 0) {
1049 tcg_out_insn(s, 3405, MOVN, type, rd, ivalue, 0);
1053 /* Check for bitfield immediates. For the benefit of 32-bit quantities,
1054 use the sign-extended value. That lets us match rotated values such
1055 as 0xff0000ff with the same 64-bit logic matching 0xffffffffff0000ff. */
1056 if (is_limm(svalue)) {
1057 tcg_out_logicali(s, I3404_ORRI, type, rd, TCG_REG_XZR, svalue);
1061 /* Look for host pointer values within 4G of the PC. This happens
1062 often when loading pointers to QEMU's own data structures. */
1063 if (type == TCG_TYPE_I64) {
1064 intptr_t src_rx = (intptr_t)tcg_splitwx_to_rx(s->code_ptr);
1065 tcg_target_long disp = value - src_rx;
1066 if (disp == sextract64(disp, 0, 21)) {
1067 tcg_out_insn(s, 3406, ADR, rd, disp);
1070 disp = (value >> 12) - (src_rx >> 12);
1071 if (disp == sextract64(disp, 0, 21)) {
1072 tcg_out_insn(s, 3406, ADRP, rd, disp);
1073 if (value & 0xfff) {
1074 tcg_out_insn(s, 3401, ADDI, type, rd, rd, value & 0xfff);
1080 /* Would it take fewer insns to begin with MOVN? */
1081 if (ctpop64(value) >= 32) {
1088 s0 = ctz64(t0) & (63 & -16);
1089 t1 = t0 & ~(0xffffull << s0);
1090 s1 = ctz64(t1) & (63 & -16);
1091 t2 = t1 & ~(0xffffull << s1);
1093 tcg_out_insn_3405(s, opc, type, rd, t0 >> s0, s0);
1095 tcg_out_insn(s, 3405, MOVK, type, rd, value >> s1, s1);
1100 /* For more than 2 insns, dump it into the constant pool. */
1101 new_pool_label(s, value, R_AARCH64_CONDBR19, s->code_ptr, 0);
1102 tcg_out_insn(s, 3305, LDR, 0, rd);
1105 /* Define something more legible for general use. */
1106 #define tcg_out_ldst_r tcg_out_insn_3310
1108 static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, TCGReg rd,
1109 TCGReg rn, intptr_t offset, int lgsize)
1111 /* If the offset is naturally aligned and in range, then we can
1112 use the scaled uimm12 encoding */
1113 if (offset >= 0 && !(offset & ((1 << lgsize) - 1))) {
1114 uintptr_t scaled_uimm = offset >> lgsize;
1115 if (scaled_uimm <= 0xfff) {
1116 tcg_out_insn_3313(s, insn, rd, rn, scaled_uimm);
1121 /* Small signed offsets can use the unscaled encoding. */
1122 if (offset >= -256 && offset < 256) {
1123 tcg_out_insn_3312(s, insn, rd, rn, offset);
1127 /* Worst-case scenario, move offset to temp register, use reg offset. */
1128 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
1129 tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP);
1132 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
1140 if (ret < 32 && arg < 32) {
1141 tcg_out_movr(s, type, ret, arg);
1143 } else if (ret < 32) {
1144 tcg_out_insn(s, 3605, UMOV, type, ret, arg, 0, 0);
1146 } else if (arg < 32) {
1147 tcg_out_insn(s, 3605, INS, 0, ret, arg, 4 << type, 0);
1153 tcg_debug_assert(ret >= 32 && arg >= 32);
1154 tcg_out_insn(s, 3616, ORR, 0, 0, ret, arg, arg);
1157 tcg_debug_assert(ret >= 32 && arg >= 32);
1158 tcg_out_insn(s, 3616, ORR, 1, 0, ret, arg, arg);
1162 g_assert_not_reached();
1167 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1168 TCGReg base, intptr_t ofs)
1175 insn = (ret < 32 ? I3312_LDRW : I3312_LDRVS);
1179 insn = (ret < 32 ? I3312_LDRX : I3312_LDRVD);
1191 g_assert_not_reached();
1193 tcg_out_ldst(s, insn, ret, base, ofs, lgsz);
1196 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src,
1197 TCGReg base, intptr_t ofs)
1204 insn = (src < 32 ? I3312_STRW : I3312_STRVS);
1208 insn = (src < 32 ? I3312_STRX : I3312_STRVD);
1220 g_assert_not_reached();
1222 tcg_out_ldst(s, insn, src, base, ofs, lgsz);
1225 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1226 TCGReg base, intptr_t ofs)
1228 if (type <= TCG_TYPE_I64 && val == 0) {
1229 tcg_out_st(s, type, TCG_REG_XZR, base, ofs);
1235 static inline void tcg_out_bfm(TCGContext *s, TCGType ext, TCGReg rd,
1236 TCGReg rn, unsigned int a, unsigned int b)
1238 tcg_out_insn(s, 3402, BFM, ext, rd, rn, ext, a, b);
1241 static inline void tcg_out_ubfm(TCGContext *s, TCGType ext, TCGReg rd,
1242 TCGReg rn, unsigned int a, unsigned int b)
1244 tcg_out_insn(s, 3402, UBFM, ext, rd, rn, ext, a, b);
1247 static inline void tcg_out_sbfm(TCGContext *s, TCGType ext, TCGReg rd,
1248 TCGReg rn, unsigned int a, unsigned int b)
1250 tcg_out_insn(s, 3402, SBFM, ext, rd, rn, ext, a, b);
1253 static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd,
1254 TCGReg rn, TCGReg rm, unsigned int a)
1256 tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a);
1259 static inline void tcg_out_shl(TCGContext *s, TCGType ext,
1260 TCGReg rd, TCGReg rn, unsigned int m)
1262 int bits = ext ? 64 : 32;
1264 tcg_out_ubfm(s, ext, rd, rn, (bits - m) & max, (max - m) & max);
1267 static inline void tcg_out_shr(TCGContext *s, TCGType ext,
1268 TCGReg rd, TCGReg rn, unsigned int m)
1270 int max = ext ? 63 : 31;
1271 tcg_out_ubfm(s, ext, rd, rn, m & max, max);
1274 static inline void tcg_out_sar(TCGContext *s, TCGType ext,
1275 TCGReg rd, TCGReg rn, unsigned int m)
1277 int max = ext ? 63 : 31;
1278 tcg_out_sbfm(s, ext, rd, rn, m & max, max);
1281 static inline void tcg_out_rotr(TCGContext *s, TCGType ext,
1282 TCGReg rd, TCGReg rn, unsigned int m)
1284 int max = ext ? 63 : 31;
1285 tcg_out_extr(s, ext, rd, rn, rn, m & max);
1288 static inline void tcg_out_rotl(TCGContext *s, TCGType ext,
1289 TCGReg rd, TCGReg rn, unsigned int m)
1291 int max = ext ? 63 : 31;
1292 tcg_out_extr(s, ext, rd, rn, rn, -m & max);
1295 static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd,
1296 TCGReg rn, unsigned lsb, unsigned width)
1298 unsigned size = ext ? 64 : 32;
1299 unsigned a = (size - lsb) & (size - 1);
1300 unsigned b = width - 1;
1301 tcg_out_bfm(s, ext, rd, rn, a, b);
1304 static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
1305 tcg_target_long b, bool const_b)
1308 /* Using CMP or CMN aliases. */
1310 tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
1312 tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
1315 /* Using CMP alias SUBS wzr, Wn, Wm */
1316 tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
1320 static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
1322 ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
1323 tcg_debug_assert(offset == sextract64(offset, 0, 26));
1324 tcg_out_insn(s, 3206, B, offset);
1327 static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target)
1329 ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
1330 if (offset == sextract64(offset, 0, 26)) {
1331 tcg_out_insn(s, 3206, B, offset);
1333 /* Choose X9 as a call-clobbered non-LR temporary. */
1334 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X9, (intptr_t)target);
1335 tcg_out_insn(s, 3207, BR, TCG_REG_X9);
1339 static inline void tcg_out_callr(TCGContext *s, TCGReg reg)
1341 tcg_out_insn(s, 3207, BLR, reg);
1344 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
1346 ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
1347 if (offset == sextract64(offset, 0, 26)) {
1348 tcg_out_insn(s, 3206, BL, offset);
1350 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target);
1351 tcg_out_callr(s, TCG_REG_TMP);
1355 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1356 uintptr_t jmp_rw, uintptr_t addr)
1358 tcg_insn_unit i1, i2;
1359 TCGType rt = TCG_TYPE_I64;
1360 TCGReg rd = TCG_REG_TMP;
1363 ptrdiff_t offset = addr - jmp_rx;
1365 if (offset == sextract64(offset, 0, 26)) {
1366 i1 = I3206_B | ((offset >> 2) & 0x3ffffff);
1369 offset = (addr >> 12) - (jmp_rx >> 12);
1372 i1 = I3406_ADRP | (offset & 3) << 29 | (offset & 0x1ffffc) << (5 - 2) | rd;
1374 i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd;
1376 pair = (uint64_t)i2 << 32 | i1;
1377 qatomic_set((uint64_t *)jmp_rw, pair);
1378 flush_idcache_range(jmp_rx, jmp_rw, 8);
1381 static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
1383 if (!l->has_value) {
1384 tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, l, 0);
1385 tcg_out_insn(s, 3206, B, 0);
1387 tcg_out_goto(s, l->u.value_ptr);
1391 static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
1392 TCGArg b, bool b_const, TCGLabel *l)
1397 if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) {
1401 tcg_out_cmp(s, ext, a, b, b_const);
1404 if (!l->has_value) {
1405 tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
1406 offset = tcg_in32(s) >> 5;
1408 offset = tcg_pcrel_diff(s, l->u.value_ptr) >> 2;
1409 tcg_debug_assert(offset == sextract64(offset, 0, 19));
1413 tcg_out_insn(s, 3202, B_C, c, offset);
1414 } else if (c == TCG_COND_EQ) {
1415 tcg_out_insn(s, 3201, CBZ, ext, a, offset);
1417 tcg_out_insn(s, 3201, CBNZ, ext, a, offset);
1421 static inline void tcg_out_rev(TCGContext *s, int ext, MemOp s_bits,
1422 TCGReg rd, TCGReg rn)
1424 /* REV, REV16, REV32 */
1425 tcg_out_insn_3507(s, I3507_REV | (s_bits << 10), ext, rd, rn);
1428 static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits,
1429 TCGReg rd, TCGReg rn)
1431 /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */
1432 int bits = (8 << s_bits) - 1;
1433 tcg_out_sbfm(s, ext, rd, rn, 0, bits);
1436 static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits,
1437 TCGReg rd, TCGReg rn)
1439 /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */
1440 int bits = (8 << s_bits) - 1;
1441 tcg_out_ubfm(s, 0, rd, rn, 0, bits);
1444 static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
1445 TCGReg rn, int64_t aimm)
1448 tcg_out_insn(s, 3401, ADDI, ext, rd, rn, aimm);
1450 tcg_out_insn(s, 3401, SUBI, ext, rd, rn, -aimm);
1454 static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
1455 TCGReg rh, TCGReg al, TCGReg ah,
1456 tcg_target_long bl, tcg_target_long bh,
1457 bool const_bl, bool const_bh, bool sub)
1459 TCGReg orig_rl = rl;
1462 if (rl == ah || (!const_bh && rl == bh)) {
1469 insn = sub ? I3401_ADDSI : I3401_SUBSI;
1471 insn = sub ? I3401_SUBSI : I3401_ADDSI;
1474 if (unlikely(al == TCG_REG_XZR)) {
1475 /* ??? We want to allow al to be zero for the benefit of
1476 negation via subtraction. However, that leaves open the
1477 possibility of adding 0+const in the low part, and the
1478 immediate add instructions encode XSP not XZR. Don't try
1479 anything more elaborate here than loading another zero. */
1481 tcg_out_movi(s, ext, al, 0);
1483 tcg_out_insn_3401(s, insn, ext, rl, al, bl);
1485 tcg_out_insn_3502(s, sub ? I3502_SUBS : I3502_ADDS, ext, rl, al, bl);
1490 /* Note that the only two constants we support are 0 and -1, and
1491 that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa. */
1492 if ((bh != 0) ^ sub) {
1499 tcg_out_insn_3503(s, insn, ext, rh, ah, bh);
1501 tcg_out_mov(s, ext, orig_rl, rl);
1504 static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
1506 static const uint32_t sync[] = {
1507 [0 ... TCG_MO_ALL] = DMB_ISH | DMB_LD | DMB_ST,
1508 [TCG_MO_ST_ST] = DMB_ISH | DMB_ST,
1509 [TCG_MO_LD_LD] = DMB_ISH | DMB_LD,
1510 [TCG_MO_LD_ST] = DMB_ISH | DMB_LD,
1511 [TCG_MO_LD_ST | TCG_MO_LD_LD] = DMB_ISH | DMB_LD,
1513 tcg_out32(s, sync[a0 & TCG_MO_ALL]);
1516 static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
1517 TCGReg a0, TCGArg b, bool const_b, bool is_ctz)
1522 tcg_out_insn(s, 3507, RBIT, ext, a1, a0);
1524 if (const_b && b == (ext ? 64 : 32)) {
1525 tcg_out_insn(s, 3507, CLZ, ext, d, a1);
1527 AArch64Insn sel = I3506_CSEL;
1529 tcg_out_cmp(s, ext, a0, 0, 1);
1530 tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP, a1);
1536 } else if (b == 0) {
1539 tcg_out_movi(s, ext, d, b);
1543 tcg_out_insn_3506(s, sel, ext, d, TCG_REG_TMP, b, TCG_COND_NE);
1547 static void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
1549 ptrdiff_t offset = tcg_pcrel_diff(s, target);
1550 tcg_debug_assert(offset == sextract64(offset, 0, 21));
1551 tcg_out_insn(s, 3406, ADR, rd, offset);
1554 #ifdef CONFIG_SOFTMMU
1555 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1556 * MemOpIdx oi, uintptr_t ra)
1558 static void * const qemu_ld_helpers[MO_SIZE + 1] = {
1559 [MO_8] = helper_ret_ldub_mmu,
1561 [MO_16] = helper_be_lduw_mmu,
1562 [MO_32] = helper_be_ldul_mmu,
1563 [MO_64] = helper_be_ldq_mmu,
1565 [MO_16] = helper_le_lduw_mmu,
1566 [MO_32] = helper_le_ldul_mmu,
1567 [MO_64] = helper_le_ldq_mmu,
1571 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1572 * uintxx_t val, MemOpIdx oi,
1575 static void * const qemu_st_helpers[MO_SIZE + 1] = {
1576 [MO_8] = helper_ret_stb_mmu,
1578 [MO_16] = helper_be_stw_mmu,
1579 [MO_32] = helper_be_stl_mmu,
1580 [MO_64] = helper_be_stq_mmu,
1582 [MO_16] = helper_le_stw_mmu,
1583 [MO_32] = helper_le_stl_mmu,
1584 [MO_64] = helper_le_stq_mmu,
1588 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1590 MemOpIdx oi = lb->oi;
1591 MemOp opc = get_memop(oi);
1592 MemOp size = opc & MO_SIZE;
1594 if (!reloc_pc19(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1598 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
1599 tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
1600 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, oi);
1601 tcg_out_adr(s, TCG_REG_X3, lb->raddr);
1602 tcg_out_call(s, qemu_ld_helpers[opc & MO_SIZE]);
1603 if (opc & MO_SIGN) {
1604 tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0);
1606 tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0);
1609 tcg_out_goto(s, lb->raddr);
1613 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1615 MemOpIdx oi = lb->oi;
1616 MemOp opc = get_memop(oi);
1617 MemOp size = opc & MO_SIZE;
1619 if (!reloc_pc19(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1623 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
1624 tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
1625 tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg);
1626 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, oi);
1627 tcg_out_adr(s, TCG_REG_X4, lb->raddr);
1628 tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE]);
1629 tcg_out_goto(s, lb->raddr);
1633 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
1634 TCGType ext, TCGReg data_reg, TCGReg addr_reg,
1635 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1637 TCGLabelQemuLdst *label = new_ldst_label(s);
1639 label->is_ld = is_ld;
1642 label->datalo_reg = data_reg;
1643 label->addrlo_reg = addr_reg;
1644 label->raddr = tcg_splitwx_to_rx(raddr);
1645 label->label_ptr[0] = label_ptr;
1648 /* We expect to use a 7-bit scaled negative offset from ENV. */
1649 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1650 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -512);
1652 /* These offsets are built into the LDP below. */
1653 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1654 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8);
1656 /* Load and compare a TLB entry, emitting the conditional jump to the
1657 slow path for the failure case, which will be patched later when finalizing
1658 the slow path. Generated code returns the host addend in X1,
1659 clobbers X0,X2,X3,TMP. */
1660 static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
1661 tcg_insn_unit **label_ptr, int mem_index,
1664 unsigned a_bits = get_alignment_bits(opc);
1665 unsigned s_bits = opc & MO_SIZE;
1666 unsigned a_mask = (1u << a_bits) - 1;
1667 unsigned s_mask = (1u << s_bits) - 1;
1670 uint64_t compare_mask;
1672 mask_type = (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32
1673 ? TCG_TYPE_I64 : TCG_TYPE_I32);
1675 /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {x0,x1}. */
1676 tcg_out_insn(s, 3314, LDP, TCG_REG_X0, TCG_REG_X1, TCG_AREG0,
1677 TLB_MASK_TABLE_OFS(mem_index), 1, 0);
1679 /* Extract the TLB index from the address into X0. */
1680 tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64,
1681 TCG_REG_X0, TCG_REG_X0, addr_reg,
1682 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1684 /* Add the tlb_table pointer, creating the CPUTLBEntry address into X1. */
1685 tcg_out_insn(s, 3502, ADD, 1, TCG_REG_X1, TCG_REG_X1, TCG_REG_X0);
1687 /* Load the tlb comparator into X0, and the fast path addend into X1. */
1688 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_X0, TCG_REG_X1, is_read
1689 ? offsetof(CPUTLBEntry, addr_read)
1690 : offsetof(CPUTLBEntry, addr_write));
1691 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_X1, TCG_REG_X1,
1692 offsetof(CPUTLBEntry, addend));
1694 /* For aligned accesses, we check the first byte and include the alignment
1695 bits within the address. For unaligned access, we check that we don't
1696 cross pages using the address of the last byte of the access. */
1697 if (a_bits >= s_bits) {
1700 tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64,
1701 TCG_REG_X3, addr_reg, s_mask - a_mask);
1704 compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
1706 /* Store the page mask part of the address into X3. */
1707 tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64,
1708 TCG_REG_X3, x3, compare_mask);
1710 /* Perform the address comparison. */
1711 tcg_out_cmp(s, TARGET_LONG_BITS == 64, TCG_REG_X0, TCG_REG_X3, 0);
1713 /* If not equal, we jump to the slow path. */
1714 *label_ptr = s->code_ptr;
1715 tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
1719 static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
1722 unsigned a_mask = (1 << a_bits) - 1;
1723 TCGLabelQemuLdst *label = new_ldst_label(s);
1725 label->is_ld = is_ld;
1726 label->addrlo_reg = addr_reg;
1728 /* tst addr, #mask */
1729 tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask);
1731 label->label_ptr[0] = s->code_ptr;
1733 /* b.ne slow_path */
1734 tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
1736 label->raddr = tcg_splitwx_to_rx(s->code_ptr);
1739 static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
1741 if (!reloc_pc19(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1745 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_X1, l->addrlo_reg);
1746 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
1748 /* "Tail call" to the helper, with the return address back inline. */
1749 tcg_out_adr(s, TCG_REG_LR, l->raddr);
1750 tcg_out_goto_long(s, (const void *)(l->is_ld ? helper_unaligned_ld
1751 : helper_unaligned_st));
1755 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1757 return tcg_out_fail_alignment(s, l);
1760 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1762 return tcg_out_fail_alignment(s, l);
1764 #endif /* CONFIG_SOFTMMU */
1766 static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
1767 TCGReg data_r, TCGReg addr_r,
1768 TCGType otype, TCGReg off_r)
1770 switch (memop & MO_SSIZE) {
1772 tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
1775 tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
1776 data_r, addr_r, otype, off_r);
1779 tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
1782 tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
1783 data_r, addr_r, otype, off_r);
1786 tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
1789 tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
1792 tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
1799 static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
1800 TCGReg data_r, TCGReg addr_r,
1801 TCGType otype, TCGReg off_r)
1803 switch (memop & MO_SIZE) {
1805 tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
1808 tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r);
1811 tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r);
1814 tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);
1821 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1822 MemOpIdx oi, TCGType ext)
1824 MemOp memop = get_memop(oi);
1825 const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
1827 /* Byte swapping is left to middle-end expansion. */
1828 tcg_debug_assert((memop & MO_BSWAP) == 0);
1830 #ifdef CONFIG_SOFTMMU
1831 unsigned mem_index = get_mmuidx(oi);
1832 tcg_insn_unit *label_ptr;
1834 tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1);
1835 tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
1836 TCG_REG_X1, otype, addr_reg);
1837 add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
1838 s->code_ptr, label_ptr);
1839 #else /* !CONFIG_SOFTMMU */
1840 unsigned a_bits = get_alignment_bits(memop);
1842 tcg_out_test_alignment(s, true, addr_reg, a_bits);
1844 if (USE_GUEST_BASE) {
1845 tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
1846 TCG_REG_GUEST_BASE, otype, addr_reg);
1848 tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
1849 addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
1851 #endif /* CONFIG_SOFTMMU */
1854 static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1857 MemOp memop = get_memop(oi);
1858 const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
1860 /* Byte swapping is left to middle-end expansion. */
1861 tcg_debug_assert((memop & MO_BSWAP) == 0);
1863 #ifdef CONFIG_SOFTMMU
1864 unsigned mem_index = get_mmuidx(oi);
1865 tcg_insn_unit *label_ptr;
1867 tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0);
1868 tcg_out_qemu_st_direct(s, memop, data_reg,
1869 TCG_REG_X1, otype, addr_reg);
1870 add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
1871 data_reg, addr_reg, s->code_ptr, label_ptr);
1872 #else /* !CONFIG_SOFTMMU */
1873 unsigned a_bits = get_alignment_bits(memop);
1875 tcg_out_test_alignment(s, false, addr_reg, a_bits);
1877 if (USE_GUEST_BASE) {
1878 tcg_out_qemu_st_direct(s, memop, data_reg,
1879 TCG_REG_GUEST_BASE, otype, addr_reg);
1881 tcg_out_qemu_st_direct(s, memop, data_reg,
1882 addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
1884 #endif /* CONFIG_SOFTMMU */
1887 static const tcg_insn_unit *tb_ret_addr;
1889 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1890 const TCGArg args[TCG_MAX_OP_ARGS],
1891 const int const_args[TCG_MAX_OP_ARGS])
1893 /* 99% of the time, we can signal the use of extension registers
1894 by looking to see if the opcode handles 64-bit data. */
1895 TCGType ext = (tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0;
1897 /* Hoist the loads of the most common arguments. */
1898 TCGArg a0 = args[0];
1899 TCGArg a1 = args[1];
1900 TCGArg a2 = args[2];
1901 int c2 = const_args[2];
1903 /* Some operands are defined with "rZ" constraint, a register or
1904 the zero register. These need not actually test args[I] == 0. */
1905 #define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
1908 case INDEX_op_exit_tb:
1909 /* Reuse the zeroing that exists for goto_ptr. */
1911 tcg_out_goto_long(s, tcg_code_gen_epilogue);
1913 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
1914 tcg_out_goto_long(s, tb_ret_addr);
1918 case INDEX_op_goto_tb:
1919 if (s->tb_jmp_insn_offset != NULL) {
1920 /* TCG_TARGET_HAS_direct_jump */
1921 /* Ensure that ADRP+ADD are 8-byte aligned so that an atomic
1922 write can be used to patch the target address. */
1923 if ((uintptr_t)s->code_ptr & 7) {
1926 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1927 /* actual branch destination will be patched by
1928 tb_target_set_jmp_target later. */
1929 tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
1930 tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
1932 /* !TCG_TARGET_HAS_direct_jump */
1933 tcg_debug_assert(s->tb_jmp_target_addr != NULL);
1934 intptr_t offset = tcg_pcrel_diff(s, (s->tb_jmp_target_addr + a0)) >> 2;
1935 tcg_out_insn(s, 3305, LDR, offset, TCG_REG_TMP);
1937 tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
1938 set_jmp_reset_offset(s, a0);
1941 case INDEX_op_goto_ptr:
1942 tcg_out_insn(s, 3207, BR, a0);
1946 tcg_out_goto_label(s, arg_label(a0));
1949 case INDEX_op_ld8u_i32:
1950 case INDEX_op_ld8u_i64:
1951 tcg_out_ldst(s, I3312_LDRB, a0, a1, a2, 0);
1953 case INDEX_op_ld8s_i32:
1954 tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2, 0);
1956 case INDEX_op_ld8s_i64:
1957 tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2, 0);
1959 case INDEX_op_ld16u_i32:
1960 case INDEX_op_ld16u_i64:
1961 tcg_out_ldst(s, I3312_LDRH, a0, a1, a2, 1);
1963 case INDEX_op_ld16s_i32:
1964 tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2, 1);
1966 case INDEX_op_ld16s_i64:
1967 tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2, 1);
1969 case INDEX_op_ld_i32:
1970 case INDEX_op_ld32u_i64:
1971 tcg_out_ldst(s, I3312_LDRW, a0, a1, a2, 2);
1973 case INDEX_op_ld32s_i64:
1974 tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2, 2);
1976 case INDEX_op_ld_i64:
1977 tcg_out_ldst(s, I3312_LDRX, a0, a1, a2, 3);
1980 case INDEX_op_st8_i32:
1981 case INDEX_op_st8_i64:
1982 tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2, 0);
1984 case INDEX_op_st16_i32:
1985 case INDEX_op_st16_i64:
1986 tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2, 1);
1988 case INDEX_op_st_i32:
1989 case INDEX_op_st32_i64:
1990 tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2, 2);
1992 case INDEX_op_st_i64:
1993 tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2, 3);
1996 case INDEX_op_add_i32:
1999 case INDEX_op_add_i64:
2001 tcg_out_addsubi(s, ext, a0, a1, a2);
2003 tcg_out_insn(s, 3502, ADD, ext, a0, a1, a2);
2007 case INDEX_op_sub_i32:
2010 case INDEX_op_sub_i64:
2012 tcg_out_addsubi(s, ext, a0, a1, -a2);
2014 tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2);
2018 case INDEX_op_neg_i64:
2019 case INDEX_op_neg_i32:
2020 tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
2023 case INDEX_op_and_i32:
2026 case INDEX_op_and_i64:
2028 tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, a2);
2030 tcg_out_insn(s, 3510, AND, ext, a0, a1, a2);
2034 case INDEX_op_andc_i32:
2037 case INDEX_op_andc_i64:
2039 tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, ~a2);
2041 tcg_out_insn(s, 3510, BIC, ext, a0, a1, a2);
2045 case INDEX_op_or_i32:
2048 case INDEX_op_or_i64:
2050 tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, a2);
2052 tcg_out_insn(s, 3510, ORR, ext, a0, a1, a2);
2056 case INDEX_op_orc_i32:
2059 case INDEX_op_orc_i64:
2061 tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, ~a2);
2063 tcg_out_insn(s, 3510, ORN, ext, a0, a1, a2);
2067 case INDEX_op_xor_i32:
2070 case INDEX_op_xor_i64:
2072 tcg_out_logicali(s, I3404_EORI, ext, a0, a1, a2);
2074 tcg_out_insn(s, 3510, EOR, ext, a0, a1, a2);
2078 case INDEX_op_eqv_i32:
2081 case INDEX_op_eqv_i64:
2083 tcg_out_logicali(s, I3404_EORI, ext, a0, a1, ~a2);
2085 tcg_out_insn(s, 3510, EON, ext, a0, a1, a2);
2089 case INDEX_op_not_i64:
2090 case INDEX_op_not_i32:
2091 tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1);
2094 case INDEX_op_mul_i64:
2095 case INDEX_op_mul_i32:
2096 tcg_out_insn(s, 3509, MADD, ext, a0, a1, a2, TCG_REG_XZR);
2099 case INDEX_op_div_i64:
2100 case INDEX_op_div_i32:
2101 tcg_out_insn(s, 3508, SDIV, ext, a0, a1, a2);
2103 case INDEX_op_divu_i64:
2104 case INDEX_op_divu_i32:
2105 tcg_out_insn(s, 3508, UDIV, ext, a0, a1, a2);
2108 case INDEX_op_rem_i64:
2109 case INDEX_op_rem_i32:
2110 tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP, a1, a2);
2111 tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1);
2113 case INDEX_op_remu_i64:
2114 case INDEX_op_remu_i32:
2115 tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP, a1, a2);
2116 tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1);
2119 case INDEX_op_shl_i64:
2120 case INDEX_op_shl_i32:
2122 tcg_out_shl(s, ext, a0, a1, a2);
2124 tcg_out_insn(s, 3508, LSLV, ext, a0, a1, a2);
2128 case INDEX_op_shr_i64:
2129 case INDEX_op_shr_i32:
2131 tcg_out_shr(s, ext, a0, a1, a2);
2133 tcg_out_insn(s, 3508, LSRV, ext, a0, a1, a2);
2137 case INDEX_op_sar_i64:
2138 case INDEX_op_sar_i32:
2140 tcg_out_sar(s, ext, a0, a1, a2);
2142 tcg_out_insn(s, 3508, ASRV, ext, a0, a1, a2);
2146 case INDEX_op_rotr_i64:
2147 case INDEX_op_rotr_i32:
2149 tcg_out_rotr(s, ext, a0, a1, a2);
2151 tcg_out_insn(s, 3508, RORV, ext, a0, a1, a2);
2155 case INDEX_op_rotl_i64:
2156 case INDEX_op_rotl_i32:
2158 tcg_out_rotl(s, ext, a0, a1, a2);
2160 tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2);
2161 tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP);
2165 case INDEX_op_clz_i64:
2166 case INDEX_op_clz_i32:
2167 tcg_out_cltz(s, ext, a0, a1, a2, c2, false);
2169 case INDEX_op_ctz_i64:
2170 case INDEX_op_ctz_i32:
2171 tcg_out_cltz(s, ext, a0, a1, a2, c2, true);
2174 case INDEX_op_brcond_i32:
2177 case INDEX_op_brcond_i64:
2178 tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3]));
2181 case INDEX_op_setcond_i32:
2184 case INDEX_op_setcond_i64:
2185 tcg_out_cmp(s, ext, a1, a2, c2);
2186 /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */
2187 tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR,
2188 TCG_REG_XZR, tcg_invert_cond(args[3]));
2191 case INDEX_op_movcond_i32:
2194 case INDEX_op_movcond_i64:
2195 tcg_out_cmp(s, ext, a1, a2, c2);
2196 tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]);
2199 case INDEX_op_qemu_ld_i32:
2200 case INDEX_op_qemu_ld_i64:
2201 tcg_out_qemu_ld(s, a0, a1, a2, ext);
2203 case INDEX_op_qemu_st_i32:
2204 case INDEX_op_qemu_st_i64:
2205 tcg_out_qemu_st(s, REG0(0), a1, a2);
2208 case INDEX_op_bswap64_i64:
2209 tcg_out_rev(s, TCG_TYPE_I64, MO_64, a0, a1);
2211 case INDEX_op_bswap32_i64:
2212 tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
2213 if (a2 & TCG_BSWAP_OS) {
2214 tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a0);
2217 case INDEX_op_bswap32_i32:
2218 tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
2220 case INDEX_op_bswap16_i64:
2221 case INDEX_op_bswap16_i32:
2222 tcg_out_rev(s, TCG_TYPE_I32, MO_16, a0, a1);
2223 if (a2 & TCG_BSWAP_OS) {
2224 /* Output must be sign-extended. */
2225 tcg_out_sxt(s, ext, MO_16, a0, a0);
2226 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
2227 /* Output must be zero-extended, but input isn't. */
2228 tcg_out_uxt(s, MO_16, a0, a0);
2232 case INDEX_op_ext8s_i64:
2233 case INDEX_op_ext8s_i32:
2234 tcg_out_sxt(s, ext, MO_8, a0, a1);
2236 case INDEX_op_ext16s_i64:
2237 case INDEX_op_ext16s_i32:
2238 tcg_out_sxt(s, ext, MO_16, a0, a1);
2240 case INDEX_op_ext_i32_i64:
2241 case INDEX_op_ext32s_i64:
2242 tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1);
2244 case INDEX_op_ext8u_i64:
2245 case INDEX_op_ext8u_i32:
2246 tcg_out_uxt(s, MO_8, a0, a1);
2248 case INDEX_op_ext16u_i64:
2249 case INDEX_op_ext16u_i32:
2250 tcg_out_uxt(s, MO_16, a0, a1);
2252 case INDEX_op_extu_i32_i64:
2253 case INDEX_op_ext32u_i64:
2254 tcg_out_movr(s, TCG_TYPE_I32, a0, a1);
2257 case INDEX_op_deposit_i64:
2258 case INDEX_op_deposit_i32:
2259 tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]);
2262 case INDEX_op_extract_i64:
2263 case INDEX_op_extract_i32:
2264 tcg_out_ubfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
2267 case INDEX_op_sextract_i64:
2268 case INDEX_op_sextract_i32:
2269 tcg_out_sbfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
2272 case INDEX_op_extract2_i64:
2273 case INDEX_op_extract2_i32:
2274 tcg_out_extr(s, ext, a0, REG0(2), REG0(1), args[3]);
2277 case INDEX_op_add2_i32:
2278 tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
2279 (int32_t)args[4], args[5], const_args[4],
2280 const_args[5], false);
2282 case INDEX_op_add2_i64:
2283 tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
2284 args[5], const_args[4], const_args[5], false);
2286 case INDEX_op_sub2_i32:
2287 tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
2288 (int32_t)args[4], args[5], const_args[4],
2289 const_args[5], true);
2291 case INDEX_op_sub2_i64:
2292 tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
2293 args[5], const_args[4], const_args[5], true);
2296 case INDEX_op_muluh_i64:
2297 tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2);
2299 case INDEX_op_mulsh_i64:
2300 tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2);
2307 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2308 case INDEX_op_mov_i64:
2309 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2311 g_assert_not_reached();
2317 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2318 unsigned vecl, unsigned vece,
2319 const TCGArg args[TCG_MAX_OP_ARGS],
2320 const int const_args[TCG_MAX_OP_ARGS])
2322 static const AArch64Insn cmp_vec_insn[16] = {
2323 [TCG_COND_EQ] = I3616_CMEQ,
2324 [TCG_COND_GT] = I3616_CMGT,
2325 [TCG_COND_GE] = I3616_CMGE,
2326 [TCG_COND_GTU] = I3616_CMHI,
2327 [TCG_COND_GEU] = I3616_CMHS,
2329 static const AArch64Insn cmp_scalar_insn[16] = {
2330 [TCG_COND_EQ] = I3611_CMEQ,
2331 [TCG_COND_GT] = I3611_CMGT,
2332 [TCG_COND_GE] = I3611_CMGE,
2333 [TCG_COND_GTU] = I3611_CMHI,
2334 [TCG_COND_GEU] = I3611_CMHS,
2336 static const AArch64Insn cmp0_vec_insn[16] = {
2337 [TCG_COND_EQ] = I3617_CMEQ0,
2338 [TCG_COND_GT] = I3617_CMGT0,
2339 [TCG_COND_GE] = I3617_CMGE0,
2340 [TCG_COND_LT] = I3617_CMLT0,
2341 [TCG_COND_LE] = I3617_CMLE0,
2343 static const AArch64Insn cmp0_scalar_insn[16] = {
2344 [TCG_COND_EQ] = I3612_CMEQ0,
2345 [TCG_COND_GT] = I3612_CMGT0,
2346 [TCG_COND_GE] = I3612_CMGE0,
2347 [TCG_COND_LT] = I3612_CMLT0,
2348 [TCG_COND_LE] = I3612_CMLE0,
2351 TCGType type = vecl + TCG_TYPE_V64;
2352 unsigned is_q = vecl;
2353 bool is_scalar = !is_q && vece == MO_64;
2354 TCGArg a0, a1, a2, a3;
2362 case INDEX_op_ld_vec:
2363 tcg_out_ld(s, type, a0, a1, a2);
2365 case INDEX_op_st_vec:
2366 tcg_out_st(s, type, a0, a1, a2);
2368 case INDEX_op_dupm_vec:
2369 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2371 case INDEX_op_add_vec:
2373 tcg_out_insn(s, 3611, ADD, vece, a0, a1, a2);
2375 tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2);
2378 case INDEX_op_sub_vec:
2380 tcg_out_insn(s, 3611, SUB, vece, a0, a1, a2);
2382 tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2);
2385 case INDEX_op_mul_vec:
2386 tcg_out_insn(s, 3616, MUL, is_q, vece, a0, a1, a2);
2388 case INDEX_op_neg_vec:
2390 tcg_out_insn(s, 3612, NEG, vece, a0, a1);
2392 tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1);
2395 case INDEX_op_abs_vec:
2397 tcg_out_insn(s, 3612, ABS, vece, a0, a1);
2399 tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1);
2402 case INDEX_op_and_vec:
2403 if (const_args[2]) {
2404 is_shimm1632(~a2, &cmode, &imm8);
2406 tcg_out_insn(s, 3606, BIC, is_q, a0, 0, cmode, imm8);
2409 tcg_out_insn(s, 3606, MVNI, is_q, a0, 0, cmode, imm8);
2412 tcg_out_insn(s, 3616, AND, is_q, 0, a0, a1, a2);
2414 case INDEX_op_or_vec:
2415 if (const_args[2]) {
2416 is_shimm1632(a2, &cmode, &imm8);
2418 tcg_out_insn(s, 3606, ORR, is_q, a0, 0, cmode, imm8);
2421 tcg_out_insn(s, 3606, MOVI, is_q, a0, 0, cmode, imm8);
2424 tcg_out_insn(s, 3616, ORR, is_q, 0, a0, a1, a2);
2426 case INDEX_op_andc_vec:
2427 if (const_args[2]) {
2428 is_shimm1632(a2, &cmode, &imm8);
2430 tcg_out_insn(s, 3606, BIC, is_q, a0, 0, cmode, imm8);
2433 tcg_out_insn(s, 3606, MOVI, is_q, a0, 0, cmode, imm8);
2436 tcg_out_insn(s, 3616, BIC, is_q, 0, a0, a1, a2);
2438 case INDEX_op_orc_vec:
2439 if (const_args[2]) {
2440 is_shimm1632(~a2, &cmode, &imm8);
2442 tcg_out_insn(s, 3606, ORR, is_q, a0, 0, cmode, imm8);
2445 tcg_out_insn(s, 3606, MVNI, is_q, a0, 0, cmode, imm8);
2448 tcg_out_insn(s, 3616, ORN, is_q, 0, a0, a1, a2);
2450 case INDEX_op_xor_vec:
2451 tcg_out_insn(s, 3616, EOR, is_q, 0, a0, a1, a2);
2453 case INDEX_op_ssadd_vec:
2455 tcg_out_insn(s, 3611, SQADD, vece, a0, a1, a2);
2457 tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2);
2460 case INDEX_op_sssub_vec:
2462 tcg_out_insn(s, 3611, SQSUB, vece, a0, a1, a2);
2464 tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2);
2467 case INDEX_op_usadd_vec:
2469 tcg_out_insn(s, 3611, UQADD, vece, a0, a1, a2);
2471 tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2);
2474 case INDEX_op_ussub_vec:
2476 tcg_out_insn(s, 3611, UQSUB, vece, a0, a1, a2);
2478 tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2);
2481 case INDEX_op_smax_vec:
2482 tcg_out_insn(s, 3616, SMAX, is_q, vece, a0, a1, a2);
2484 case INDEX_op_smin_vec:
2485 tcg_out_insn(s, 3616, SMIN, is_q, vece, a0, a1, a2);
2487 case INDEX_op_umax_vec:
2488 tcg_out_insn(s, 3616, UMAX, is_q, vece, a0, a1, a2);
2490 case INDEX_op_umin_vec:
2491 tcg_out_insn(s, 3616, UMIN, is_q, vece, a0, a1, a2);
2493 case INDEX_op_not_vec:
2494 tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a1);
2496 case INDEX_op_shli_vec:
2498 tcg_out_insn(s, 3609, SHL, a0, a1, a2 + (8 << vece));
2500 tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece));
2503 case INDEX_op_shri_vec:
2505 tcg_out_insn(s, 3609, USHR, a0, a1, (16 << vece) - a2);
2507 tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2);
2510 case INDEX_op_sari_vec:
2512 tcg_out_insn(s, 3609, SSHR, a0, a1, (16 << vece) - a2);
2514 tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2);
2517 case INDEX_op_aa64_sli_vec:
2519 tcg_out_insn(s, 3609, SLI, a0, a2, args[3] + (8 << vece));
2521 tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece));
2524 case INDEX_op_shlv_vec:
2526 tcg_out_insn(s, 3611, USHL, vece, a0, a1, a2);
2528 tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2);
2531 case INDEX_op_aa64_sshl_vec:
2533 tcg_out_insn(s, 3611, SSHL, vece, a0, a1, a2);
2535 tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2);
2538 case INDEX_op_cmp_vec:
2540 TCGCond cond = args[3];
2543 if (cond == TCG_COND_NE) {
2544 if (const_args[2]) {
2546 tcg_out_insn(s, 3611, CMTST, vece, a0, a1, a1);
2548 tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1);
2552 tcg_out_insn(s, 3611, CMEQ, vece, a0, a1, a2);
2554 tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2);
2556 tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a0);
2559 if (const_args[2]) {
2561 insn = cmp0_scalar_insn[cond];
2563 tcg_out_insn_3612(s, insn, vece, a0, a1);
2567 insn = cmp0_vec_insn[cond];
2569 tcg_out_insn_3617(s, insn, is_q, vece, a0, a1);
2573 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
2577 insn = cmp_scalar_insn[cond];
2580 t = a1, a1 = a2, a2 = t;
2581 cond = tcg_swap_cond(cond);
2582 insn = cmp_scalar_insn[cond];
2583 tcg_debug_assert(insn != 0);
2585 tcg_out_insn_3611(s, insn, vece, a0, a1, a2);
2587 insn = cmp_vec_insn[cond];
2590 t = a1, a1 = a2, a2 = t;
2591 cond = tcg_swap_cond(cond);
2592 insn = cmp_vec_insn[cond];
2593 tcg_debug_assert(insn != 0);
2595 tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2);
2601 case INDEX_op_bitsel_vec:
2604 tcg_out_insn(s, 3616, BIT, is_q, 0, a0, a2, a1);
2605 } else if (a0 == a2) {
2606 tcg_out_insn(s, 3616, BIF, is_q, 0, a0, a3, a1);
2609 tcg_out_mov(s, type, a0, a1);
2611 tcg_out_insn(s, 3616, BSL, is_q, 0, a0, a2, a3);
2615 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
2616 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
2618 g_assert_not_reached();
2622 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2625 case INDEX_op_add_vec:
2626 case INDEX_op_sub_vec:
2627 case INDEX_op_and_vec:
2628 case INDEX_op_or_vec:
2629 case INDEX_op_xor_vec:
2630 case INDEX_op_andc_vec:
2631 case INDEX_op_orc_vec:
2632 case INDEX_op_neg_vec:
2633 case INDEX_op_abs_vec:
2634 case INDEX_op_not_vec:
2635 case INDEX_op_cmp_vec:
2636 case INDEX_op_shli_vec:
2637 case INDEX_op_shri_vec:
2638 case INDEX_op_sari_vec:
2639 case INDEX_op_ssadd_vec:
2640 case INDEX_op_sssub_vec:
2641 case INDEX_op_usadd_vec:
2642 case INDEX_op_ussub_vec:
2643 case INDEX_op_shlv_vec:
2644 case INDEX_op_bitsel_vec:
2646 case INDEX_op_rotli_vec:
2647 case INDEX_op_shrv_vec:
2648 case INDEX_op_sarv_vec:
2649 case INDEX_op_rotlv_vec:
2650 case INDEX_op_rotrv_vec:
2652 case INDEX_op_mul_vec:
2653 case INDEX_op_smax_vec:
2654 case INDEX_op_smin_vec:
2655 case INDEX_op_umax_vec:
2656 case INDEX_op_umin_vec:
2657 return vece < MO_64;
2664 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2668 TCGv_vec v0, v1, v2, t1, t2, c1;
2672 v0 = temp_tcgv_vec(arg_temp(a0));
2673 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
2674 a2 = va_arg(va, TCGArg);
2678 case INDEX_op_rotli_vec:
2679 t1 = tcg_temp_new_vec(type);
2680 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
2681 vec_gen_4(INDEX_op_aa64_sli_vec, type, vece,
2682 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
2683 tcg_temp_free_vec(t1);
2686 case INDEX_op_shrv_vec:
2687 case INDEX_op_sarv_vec:
2688 /* Right shifts are negative left shifts for AArch64. */
2689 v2 = temp_tcgv_vec(arg_temp(a2));
2690 t1 = tcg_temp_new_vec(type);
2691 tcg_gen_neg_vec(vece, t1, v2);
2692 opc = (opc == INDEX_op_shrv_vec
2693 ? INDEX_op_shlv_vec : INDEX_op_aa64_sshl_vec);
2694 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
2695 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2696 tcg_temp_free_vec(t1);
2699 case INDEX_op_rotlv_vec:
2700 v2 = temp_tcgv_vec(arg_temp(a2));
2701 t1 = tcg_temp_new_vec(type);
2702 c1 = tcg_constant_vec(type, vece, 8 << vece);
2703 tcg_gen_sub_vec(vece, t1, v2, c1);
2704 /* Right shifts are negative left shifts for AArch64. */
2705 vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1),
2706 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2707 vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(v0),
2708 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
2709 tcg_gen_or_vec(vece, v0, v0, t1);
2710 tcg_temp_free_vec(t1);
2713 case INDEX_op_rotrv_vec:
2714 v2 = temp_tcgv_vec(arg_temp(a2));
2715 t1 = tcg_temp_new_vec(type);
2716 t2 = tcg_temp_new_vec(type);
2717 c1 = tcg_constant_vec(type, vece, 8 << vece);
2718 tcg_gen_neg_vec(vece, t1, v2);
2719 tcg_gen_sub_vec(vece, t2, c1, v2);
2720 /* Right shifts are negative left shifts for AArch64. */
2721 vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1),
2722 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2723 vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t2),
2724 tcgv_vec_arg(v1), tcgv_vec_arg(t2));
2725 tcg_gen_or_vec(vece, v0, t1, t2);
2726 tcg_temp_free_vec(t1);
2727 tcg_temp_free_vec(t2);
2731 g_assert_not_reached();
2735 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2738 case INDEX_op_goto_ptr:
2741 case INDEX_op_ld8u_i32:
2742 case INDEX_op_ld8s_i32:
2743 case INDEX_op_ld16u_i32:
2744 case INDEX_op_ld16s_i32:
2745 case INDEX_op_ld_i32:
2746 case INDEX_op_ld8u_i64:
2747 case INDEX_op_ld8s_i64:
2748 case INDEX_op_ld16u_i64:
2749 case INDEX_op_ld16s_i64:
2750 case INDEX_op_ld32u_i64:
2751 case INDEX_op_ld32s_i64:
2752 case INDEX_op_ld_i64:
2753 case INDEX_op_neg_i32:
2754 case INDEX_op_neg_i64:
2755 case INDEX_op_not_i32:
2756 case INDEX_op_not_i64:
2757 case INDEX_op_bswap16_i32:
2758 case INDEX_op_bswap32_i32:
2759 case INDEX_op_bswap16_i64:
2760 case INDEX_op_bswap32_i64:
2761 case INDEX_op_bswap64_i64:
2762 case INDEX_op_ext8s_i32:
2763 case INDEX_op_ext16s_i32:
2764 case INDEX_op_ext8u_i32:
2765 case INDEX_op_ext16u_i32:
2766 case INDEX_op_ext8s_i64:
2767 case INDEX_op_ext16s_i64:
2768 case INDEX_op_ext32s_i64:
2769 case INDEX_op_ext8u_i64:
2770 case INDEX_op_ext16u_i64:
2771 case INDEX_op_ext32u_i64:
2772 case INDEX_op_ext_i32_i64:
2773 case INDEX_op_extu_i32_i64:
2774 case INDEX_op_extract_i32:
2775 case INDEX_op_extract_i64:
2776 case INDEX_op_sextract_i32:
2777 case INDEX_op_sextract_i64:
2778 return C_O1_I1(r, r);
2780 case INDEX_op_st8_i32:
2781 case INDEX_op_st16_i32:
2782 case INDEX_op_st_i32:
2783 case INDEX_op_st8_i64:
2784 case INDEX_op_st16_i64:
2785 case INDEX_op_st32_i64:
2786 case INDEX_op_st_i64:
2787 return C_O0_I2(rZ, r);
2789 case INDEX_op_add_i32:
2790 case INDEX_op_add_i64:
2791 case INDEX_op_sub_i32:
2792 case INDEX_op_sub_i64:
2793 case INDEX_op_setcond_i32:
2794 case INDEX_op_setcond_i64:
2795 return C_O1_I2(r, r, rA);
2797 case INDEX_op_mul_i32:
2798 case INDEX_op_mul_i64:
2799 case INDEX_op_div_i32:
2800 case INDEX_op_div_i64:
2801 case INDEX_op_divu_i32:
2802 case INDEX_op_divu_i64:
2803 case INDEX_op_rem_i32:
2804 case INDEX_op_rem_i64:
2805 case INDEX_op_remu_i32:
2806 case INDEX_op_remu_i64:
2807 case INDEX_op_muluh_i64:
2808 case INDEX_op_mulsh_i64:
2809 return C_O1_I2(r, r, r);
2811 case INDEX_op_and_i32:
2812 case INDEX_op_and_i64:
2813 case INDEX_op_or_i32:
2814 case INDEX_op_or_i64:
2815 case INDEX_op_xor_i32:
2816 case INDEX_op_xor_i64:
2817 case INDEX_op_andc_i32:
2818 case INDEX_op_andc_i64:
2819 case INDEX_op_orc_i32:
2820 case INDEX_op_orc_i64:
2821 case INDEX_op_eqv_i32:
2822 case INDEX_op_eqv_i64:
2823 return C_O1_I2(r, r, rL);
2825 case INDEX_op_shl_i32:
2826 case INDEX_op_shr_i32:
2827 case INDEX_op_sar_i32:
2828 case INDEX_op_rotl_i32:
2829 case INDEX_op_rotr_i32:
2830 case INDEX_op_shl_i64:
2831 case INDEX_op_shr_i64:
2832 case INDEX_op_sar_i64:
2833 case INDEX_op_rotl_i64:
2834 case INDEX_op_rotr_i64:
2835 return C_O1_I2(r, r, ri);
2837 case INDEX_op_clz_i32:
2838 case INDEX_op_ctz_i32:
2839 case INDEX_op_clz_i64:
2840 case INDEX_op_ctz_i64:
2841 return C_O1_I2(r, r, rAL);
2843 case INDEX_op_brcond_i32:
2844 case INDEX_op_brcond_i64:
2845 return C_O0_I2(r, rA);
2847 case INDEX_op_movcond_i32:
2848 case INDEX_op_movcond_i64:
2849 return C_O1_I4(r, r, rA, rZ, rZ);
2851 case INDEX_op_qemu_ld_i32:
2852 case INDEX_op_qemu_ld_i64:
2853 return C_O1_I1(r, l);
2854 case INDEX_op_qemu_st_i32:
2855 case INDEX_op_qemu_st_i64:
2856 return C_O0_I2(lZ, l);
2858 case INDEX_op_deposit_i32:
2859 case INDEX_op_deposit_i64:
2860 return C_O1_I2(r, 0, rZ);
2862 case INDEX_op_extract2_i32:
2863 case INDEX_op_extract2_i64:
2864 return C_O1_I2(r, rZ, rZ);
2866 case INDEX_op_add2_i32:
2867 case INDEX_op_add2_i64:
2868 case INDEX_op_sub2_i32:
2869 case INDEX_op_sub2_i64:
2870 return C_O2_I4(r, r, rZ, rZ, rA, rMZ);
2872 case INDEX_op_add_vec:
2873 case INDEX_op_sub_vec:
2874 case INDEX_op_mul_vec:
2875 case INDEX_op_xor_vec:
2876 case INDEX_op_ssadd_vec:
2877 case INDEX_op_sssub_vec:
2878 case INDEX_op_usadd_vec:
2879 case INDEX_op_ussub_vec:
2880 case INDEX_op_smax_vec:
2881 case INDEX_op_smin_vec:
2882 case INDEX_op_umax_vec:
2883 case INDEX_op_umin_vec:
2884 case INDEX_op_shlv_vec:
2885 case INDEX_op_shrv_vec:
2886 case INDEX_op_sarv_vec:
2887 case INDEX_op_aa64_sshl_vec:
2888 return C_O1_I2(w, w, w);
2889 case INDEX_op_not_vec:
2890 case INDEX_op_neg_vec:
2891 case INDEX_op_abs_vec:
2892 case INDEX_op_shli_vec:
2893 case INDEX_op_shri_vec:
2894 case INDEX_op_sari_vec:
2895 return C_O1_I1(w, w);
2896 case INDEX_op_ld_vec:
2897 case INDEX_op_dupm_vec:
2898 return C_O1_I1(w, r);
2899 case INDEX_op_st_vec:
2900 return C_O0_I2(w, r);
2901 case INDEX_op_dup_vec:
2902 return C_O1_I1(w, wr);
2903 case INDEX_op_or_vec:
2904 case INDEX_op_andc_vec:
2905 return C_O1_I2(w, w, wO);
2906 case INDEX_op_and_vec:
2907 case INDEX_op_orc_vec:
2908 return C_O1_I2(w, w, wN);
2909 case INDEX_op_cmp_vec:
2910 return C_O1_I2(w, w, wZ);
2911 case INDEX_op_bitsel_vec:
2912 return C_O1_I3(w, w, w, w);
2913 case INDEX_op_aa64_sli_vec:
2914 return C_O1_I2(w, 0, w);
2917 g_assert_not_reached();
2921 static void tcg_target_init(TCGContext *s)
2923 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu;
2924 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu;
2925 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
2926 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
2928 tcg_target_call_clobber_regs = -1ull;
2929 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X19);
2930 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X20);
2931 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X21);
2932 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X22);
2933 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X23);
2934 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X24);
2935 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X25);
2936 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X26);
2937 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X27);
2938 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X28);
2939 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X29);
2940 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
2941 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
2942 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
2943 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
2944 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
2945 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
2946 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
2947 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
2949 s->reserved_regs = 0;
2950 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
2951 tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP);
2952 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2953 tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */
2954 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
2957 /* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */
2958 #define PUSH_SIZE ((30 - 19 + 1) * 8)
2960 #define FRAME_SIZE \
2962 + TCG_STATIC_CALL_ARGS_SIZE \
2963 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2964 + TCG_TARGET_STACK_ALIGN - 1) \
2965 & ~(TCG_TARGET_STACK_ALIGN - 1))
2967 /* We're expecting a 2 byte uleb128 encoded value. */
2968 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2970 /* We're expecting to use a single ADDI insn. */
2971 QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff);
2973 static void tcg_target_qemu_prologue(TCGContext *s)
2977 /* Push (FP, LR) and allocate space for all saved registers. */
2978 tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR,
2979 TCG_REG_SP, -PUSH_SIZE, 1, 1);
2981 /* Set up frame pointer for canonical unwinding. */
2982 tcg_out_movr_sp(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP);
2984 /* Store callee-preserved regs x19..x28. */
2985 for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) {
2986 int ofs = (r - TCG_REG_X19 + 2) * 8;
2987 tcg_out_insn(s, 3314, STP, r, r + 1, TCG_REG_SP, ofs, 1, 0);
2990 /* Make stack space for TCG locals. */
2991 tcg_out_insn(s, 3401, SUBI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
2992 FRAME_SIZE - PUSH_SIZE);
2994 /* Inform TCG about how to find TCG locals with register, offset, size. */
2995 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE,
2996 CPU_TEMP_BUF_NLONGS * sizeof(long));
2998 #if !defined(CONFIG_SOFTMMU)
2999 if (USE_GUEST_BASE) {
3000 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
3001 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
3005 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3006 tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]);
3009 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3010 * and fall through to the rest of the epilogue.
3012 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3013 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_X0, 0);
3016 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
3018 /* Remove TCG locals stack space. */
3019 tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
3020 FRAME_SIZE - PUSH_SIZE);
3022 /* Restore registers x19..x28. */
3023 for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) {
3024 int ofs = (r - TCG_REG_X19 + 2) * 8;
3025 tcg_out_insn(s, 3314, LDP, r, r + 1, TCG_REG_SP, ofs, 1, 0);
3028 /* Pop (FP, LR), restore SP to previous frame. */
3029 tcg_out_insn(s, 3314, LDP, TCG_REG_FP, TCG_REG_LR,
3030 TCG_REG_SP, PUSH_SIZE, 0, 1);
3031 tcg_out_insn(s, 3207, RET, TCG_REG_LR);
3034 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3037 for (i = 0; i < count; ++i) {
3044 uint8_t fde_def_cfa[4];
3045 uint8_t fde_reg_ofs[24];
3048 #define ELF_HOST_MACHINE EM_AARCH64
3050 static const DebugFrame debug_frame = {
3051 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3054 .h.cie.code_align = 1,
3055 .h.cie.data_align = 0x78, /* sleb128 -8 */
3056 .h.cie.return_column = TCG_REG_LR,
3058 /* Total FDE size does not include the "len" member. */
3059 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3062 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
3063 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3067 0x80 + 28, 1, /* DW_CFA_offset, x28, -8 */
3068 0x80 + 27, 2, /* DW_CFA_offset, x27, -16 */
3069 0x80 + 26, 3, /* DW_CFA_offset, x26, -24 */
3070 0x80 + 25, 4, /* DW_CFA_offset, x25, -32 */
3071 0x80 + 24, 5, /* DW_CFA_offset, x24, -40 */
3072 0x80 + 23, 6, /* DW_CFA_offset, x23, -48 */
3073 0x80 + 22, 7, /* DW_CFA_offset, x22, -56 */
3074 0x80 + 21, 8, /* DW_CFA_offset, x21, -64 */
3075 0x80 + 20, 9, /* DW_CFA_offset, x20, -72 */
3076 0x80 + 19, 10, /* DW_CFA_offset, x1p, -80 */
3077 0x80 + 30, 11, /* DW_CFA_offset, lr, -88 */
3078 0x80 + 29, 12, /* DW_CFA_offset, fp, -96 */
3082 void tcg_register_jit(const void *buf, size_t buf_size)
3084 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));