2 * Initial TCG Implementation for aarch64
4 * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH
5 * Written by Claudio Fontana
7 * This work is licensed under the terms of the GNU GPL, version 2 or
8 * (at your option) any later version.
10 * See the COPYING file in the top-level directory for details.
13 #include "../tcg-pool.c.inc"
14 #include "qemu/bitops.h"
16 /* We're going to re-use TCGType in setting of the SF bit, which controls
17 the size of the operation performed. If we know the values match, it
18 makes things much cleaner. */
19 QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1);
21 #ifdef CONFIG_DEBUG_TCG
22 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
23 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
24 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
25 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
26 "x24", "x25", "x26", "x27", "x28", "fp", "x30", "sp",
28 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
29 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
30 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
31 "v24", "v25", "v26", "v27", "v28", "fp", "v30", "v31",
33 #endif /* CONFIG_DEBUG_TCG */
35 static const int tcg_target_reg_alloc_order[] = {
36 TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23,
37 TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27,
38 TCG_REG_X28, /* we will reserve this for guest_base if configured */
40 TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11,
41 TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15,
42 TCG_REG_X16, TCG_REG_X17,
44 TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
45 TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7,
47 /* X18 reserved by system */
48 /* X19 reserved for AREG0 */
49 /* X29 reserved as fp */
50 /* X30 reserved as temporary */
52 TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
53 TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
54 /* V8 - V15 are call-saved, and skipped. */
55 TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
56 TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
57 TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27,
58 TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
61 static const int tcg_target_call_iarg_regs[8] = {
62 TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
63 TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7
65 static const int tcg_target_call_oarg_regs[1] = {
69 #define TCG_REG_TMP TCG_REG_X30
70 #define TCG_VEC_TMP TCG_REG_V31
72 #ifndef CONFIG_SOFTMMU
73 /* Note that XZR cannot be encoded in the address base register slot,
74 as that actaully encodes SP. So if we need to zero-extend the guest
75 address, via the address index register slot, we need to load even
76 a zero guest base into a register. */
77 #define USE_GUEST_BASE (guest_base != 0 || TARGET_LONG_BITS == 32)
78 #define TCG_REG_GUEST_BASE TCG_REG_X28
81 static bool reloc_pc26(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
83 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
84 ptrdiff_t offset = target - src_rx;
86 if (offset == sextract64(offset, 0, 26)) {
87 /* read instruction, mask away previous PC_REL26 parameter contents,
88 set the proper offset, then write back the instruction. */
89 *src_rw = deposit32(*src_rw, 0, 26, offset);
95 static bool reloc_pc19(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
97 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
98 ptrdiff_t offset = target - src_rx;
100 if (offset == sextract64(offset, 0, 19)) {
101 *src_rw = deposit32(*src_rw, 5, 19, offset);
107 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
108 intptr_t value, intptr_t addend)
110 tcg_debug_assert(addend == 0);
112 case R_AARCH64_JUMP26:
113 case R_AARCH64_CALL26:
114 return reloc_pc26(code_ptr, (const tcg_insn_unit *)value);
115 case R_AARCH64_CONDBR19:
116 return reloc_pc19(code_ptr, (const tcg_insn_unit *)value);
118 g_assert_not_reached();
122 #define TCG_CT_CONST_AIMM 0x100
123 #define TCG_CT_CONST_LIMM 0x200
124 #define TCG_CT_CONST_ZERO 0x400
125 #define TCG_CT_CONST_MONE 0x800
126 #define TCG_CT_CONST_ORRI 0x1000
127 #define TCG_CT_CONST_ANDI 0x2000
129 #define ALL_GENERAL_REGS 0xffffffffu
130 #define ALL_VECTOR_REGS 0xffffffff00000000ull
132 #ifdef CONFIG_SOFTMMU
133 #define ALL_QLDST_REGS \
134 (ALL_GENERAL_REGS & ~((1 << TCG_REG_X0) | (1 << TCG_REG_X1) | \
135 (1 << TCG_REG_X2) | (1 << TCG_REG_X3)))
137 #define ALL_QLDST_REGS ALL_GENERAL_REGS
140 /* Match a constant valid for addition (12-bit, optionally shifted). */
141 static inline bool is_aimm(uint64_t val)
143 return (val & ~0xfff) == 0 || (val & ~0xfff000) == 0;
146 /* Match a constant valid for logical operations. */
147 static inline bool is_limm(uint64_t val)
149 /* Taking a simplified view of the logical immediates for now, ignoring
150 the replication that can happen across the field. Match bit patterns
154 and their inverses. */
156 /* Make things easier below, by testing the form with msb clear. */
157 if ((int64_t)val < 0) {
164 return (val & (val - 1)) == 0;
167 /* Return true if v16 is a valid 16-bit shifted immediate. */
168 static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
170 if (v16 == (v16 & 0xff)) {
174 } else if (v16 == (v16 & 0xff00)) {
182 /* Return true if v32 is a valid 32-bit shifted immediate. */
183 static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
185 if (v32 == (v32 & 0xff)) {
189 } else if (v32 == (v32 & 0xff00)) {
191 *imm8 = (v32 >> 8) & 0xff;
193 } else if (v32 == (v32 & 0xff0000)) {
195 *imm8 = (v32 >> 16) & 0xff;
197 } else if (v32 == (v32 & 0xff000000)) {
205 /* Return true if v32 is a valid 32-bit shifting ones immediate. */
206 static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
208 if ((v32 & 0xffff00ff) == 0xff) {
210 *imm8 = (v32 >> 8) & 0xff;
212 } else if ((v32 & 0xff00ffff) == 0xffff) {
214 *imm8 = (v32 >> 16) & 0xff;
220 /* Return true if v32 is a valid float32 immediate. */
221 static bool is_fimm32(uint32_t v32, int *cmode, int *imm8)
223 if (extract32(v32, 0, 19) == 0
224 && (extract32(v32, 25, 6) == 0x20
225 || extract32(v32, 25, 6) == 0x1f)) {
227 *imm8 = (extract32(v32, 31, 1) << 7)
228 | (extract32(v32, 25, 1) << 6)
229 | extract32(v32, 19, 6);
235 /* Return true if v64 is a valid float64 immediate. */
236 static bool is_fimm64(uint64_t v64, int *cmode, int *imm8)
238 if (extract64(v64, 0, 48) == 0
239 && (extract64(v64, 54, 9) == 0x100
240 || extract64(v64, 54, 9) == 0x0ff)) {
242 *imm8 = (extract64(v64, 63, 1) << 7)
243 | (extract64(v64, 54, 1) << 6)
244 | extract64(v64, 48, 6);
251 * Return non-zero if v32 can be formed by MOVI+ORR.
252 * Place the parameters for MOVI in (cmode, imm8).
253 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
255 static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
259 for (i = 6; i > 0; i -= 2) {
260 /* Mask out one byte we can add with ORR. */
261 uint32_t tmp = v32 & ~(0xffu << (i * 4));
262 if (is_shimm32(tmp, cmode, imm8) ||
263 is_soimm32(tmp, cmode, imm8)) {
270 /* Return true if V is a valid 16-bit or 32-bit shifted immediate. */
271 static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
273 if (v32 == deposit32(v32, 16, 16, v32)) {
274 return is_shimm16(v32, cmode, imm8);
276 return is_shimm32(v32, cmode, imm8);
280 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
282 if (ct & TCG_CT_CONST) {
285 if (type == TCG_TYPE_I32) {
288 if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) {
291 if ((ct & TCG_CT_CONST_LIMM) && is_limm(val)) {
294 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
297 if ((ct & TCG_CT_CONST_MONE) && val == -1) {
301 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
304 case TCG_CT_CONST_ANDI:
307 case TCG_CT_CONST_ORRI:
308 if (val == deposit64(val, 32, 32, val)) {
310 return is_shimm1632(val, &cmode, &imm8);
314 /* Both bits should not be set for the same insn. */
315 g_assert_not_reached();
321 enum aarch64_cond_code {
324 COND_CS = 0x2, /* Unsigned greater or equal */
325 COND_HS = COND_CS, /* ALIAS greater or equal */
326 COND_CC = 0x3, /* Unsigned less than */
327 COND_LO = COND_CC, /* ALIAS Lower */
328 COND_MI = 0x4, /* Negative */
329 COND_PL = 0x5, /* Zero or greater */
330 COND_VS = 0x6, /* Overflow */
331 COND_VC = 0x7, /* No overflow */
332 COND_HI = 0x8, /* Unsigned greater than */
333 COND_LS = 0x9, /* Unsigned less or equal */
339 COND_NV = 0xf, /* behaves like COND_AL here */
342 static const enum aarch64_cond_code tcg_cond_to_aarch64[] = {
343 [TCG_COND_EQ] = COND_EQ,
344 [TCG_COND_NE] = COND_NE,
345 [TCG_COND_LT] = COND_LT,
346 [TCG_COND_GE] = COND_GE,
347 [TCG_COND_LE] = COND_LE,
348 [TCG_COND_GT] = COND_GT,
350 [TCG_COND_LTU] = COND_LO,
351 [TCG_COND_GTU] = COND_HI,
352 [TCG_COND_GEU] = COND_HS,
353 [TCG_COND_LEU] = COND_LS,
357 LDST_ST = 0, /* store */
358 LDST_LD = 1, /* load */
359 LDST_LD_S_X = 2, /* load and sign-extend into Xt */
360 LDST_LD_S_W = 3, /* load and sign-extend into Wt */
363 /* We encode the format of the insn into the beginning of the name, so that
364 we can have the preprocessor help "typecheck" the insn vs the output
365 function. Arm didn't provide us with nice names for the formats, so we
366 use the section number of the architecture reference manual in which the
367 instruction group is described. */
369 /* Compare and branch (immediate). */
370 I3201_CBZ = 0x34000000,
371 I3201_CBNZ = 0x35000000,
373 /* Conditional branch (immediate). */
374 I3202_B_C = 0x54000000,
376 /* Unconditional branch (immediate). */
377 I3206_B = 0x14000000,
378 I3206_BL = 0x94000000,
380 /* Unconditional branch (register). */
381 I3207_BR = 0xd61f0000,
382 I3207_BLR = 0xd63f0000,
383 I3207_RET = 0xd65f0000,
385 /* AdvSIMD load/store single structure. */
386 I3303_LD1R = 0x0d40c000,
388 /* Load literal for loading the address at pc-relative offset */
389 I3305_LDR = 0x58000000,
390 I3305_LDR_v64 = 0x5c000000,
391 I3305_LDR_v128 = 0x9c000000,
393 /* Load/store register. Described here as 3.3.12, but the helper
394 that emits them can transform to 3.3.10 or 3.3.13. */
395 I3312_STRB = 0x38000000 | LDST_ST << 22 | MO_8 << 30,
396 I3312_STRH = 0x38000000 | LDST_ST << 22 | MO_16 << 30,
397 I3312_STRW = 0x38000000 | LDST_ST << 22 | MO_32 << 30,
398 I3312_STRX = 0x38000000 | LDST_ST << 22 | MO_64 << 30,
400 I3312_LDRB = 0x38000000 | LDST_LD << 22 | MO_8 << 30,
401 I3312_LDRH = 0x38000000 | LDST_LD << 22 | MO_16 << 30,
402 I3312_LDRW = 0x38000000 | LDST_LD << 22 | MO_32 << 30,
403 I3312_LDRX = 0x38000000 | LDST_LD << 22 | MO_64 << 30,
405 I3312_LDRSBW = 0x38000000 | LDST_LD_S_W << 22 | MO_8 << 30,
406 I3312_LDRSHW = 0x38000000 | LDST_LD_S_W << 22 | MO_16 << 30,
408 I3312_LDRSBX = 0x38000000 | LDST_LD_S_X << 22 | MO_8 << 30,
409 I3312_LDRSHX = 0x38000000 | LDST_LD_S_X << 22 | MO_16 << 30,
410 I3312_LDRSWX = 0x38000000 | LDST_LD_S_X << 22 | MO_32 << 30,
412 I3312_LDRVS = 0x3c000000 | LDST_LD << 22 | MO_32 << 30,
413 I3312_STRVS = 0x3c000000 | LDST_ST << 22 | MO_32 << 30,
415 I3312_LDRVD = 0x3c000000 | LDST_LD << 22 | MO_64 << 30,
416 I3312_STRVD = 0x3c000000 | LDST_ST << 22 | MO_64 << 30,
418 I3312_LDRVQ = 0x3c000000 | 3 << 22 | 0 << 30,
419 I3312_STRVQ = 0x3c000000 | 2 << 22 | 0 << 30,
421 I3312_TO_I3310 = 0x00200800,
422 I3312_TO_I3313 = 0x01000000,
424 /* Load/store register pair instructions. */
425 I3314_LDP = 0x28400000,
426 I3314_STP = 0x28000000,
428 /* Add/subtract immediate instructions. */
429 I3401_ADDI = 0x11000000,
430 I3401_ADDSI = 0x31000000,
431 I3401_SUBI = 0x51000000,
432 I3401_SUBSI = 0x71000000,
434 /* Bitfield instructions. */
435 I3402_BFM = 0x33000000,
436 I3402_SBFM = 0x13000000,
437 I3402_UBFM = 0x53000000,
439 /* Extract instruction. */
440 I3403_EXTR = 0x13800000,
442 /* Logical immediate instructions. */
443 I3404_ANDI = 0x12000000,
444 I3404_ORRI = 0x32000000,
445 I3404_EORI = 0x52000000,
447 /* Move wide immediate instructions. */
448 I3405_MOVN = 0x12800000,
449 I3405_MOVZ = 0x52800000,
450 I3405_MOVK = 0x72800000,
452 /* PC relative addressing instructions. */
453 I3406_ADR = 0x10000000,
454 I3406_ADRP = 0x90000000,
456 /* Add/subtract shifted register instructions (without a shift). */
457 I3502_ADD = 0x0b000000,
458 I3502_ADDS = 0x2b000000,
459 I3502_SUB = 0x4b000000,
460 I3502_SUBS = 0x6b000000,
462 /* Add/subtract shifted register instructions (with a shift). */
463 I3502S_ADD_LSL = I3502_ADD,
465 /* Add/subtract with carry instructions. */
466 I3503_ADC = 0x1a000000,
467 I3503_SBC = 0x5a000000,
469 /* Conditional select instructions. */
470 I3506_CSEL = 0x1a800000,
471 I3506_CSINC = 0x1a800400,
472 I3506_CSINV = 0x5a800000,
473 I3506_CSNEG = 0x5a800400,
475 /* Data-processing (1 source) instructions. */
476 I3507_CLZ = 0x5ac01000,
477 I3507_RBIT = 0x5ac00000,
478 I3507_REV = 0x5ac00000, /* + size << 10 */
480 /* Data-processing (2 source) instructions. */
481 I3508_LSLV = 0x1ac02000,
482 I3508_LSRV = 0x1ac02400,
483 I3508_ASRV = 0x1ac02800,
484 I3508_RORV = 0x1ac02c00,
485 I3508_SMULH = 0x9b407c00,
486 I3508_UMULH = 0x9bc07c00,
487 I3508_UDIV = 0x1ac00800,
488 I3508_SDIV = 0x1ac00c00,
490 /* Data-processing (3 source) instructions. */
491 I3509_MADD = 0x1b000000,
492 I3509_MSUB = 0x1b008000,
494 /* Logical shifted register instructions (without a shift). */
495 I3510_AND = 0x0a000000,
496 I3510_BIC = 0x0a200000,
497 I3510_ORR = 0x2a000000,
498 I3510_ORN = 0x2a200000,
499 I3510_EOR = 0x4a000000,
500 I3510_EON = 0x4a200000,
501 I3510_ANDS = 0x6a000000,
503 /* Logical shifted register instructions (with a shift). */
504 I3502S_AND_LSR = I3510_AND | (1 << 22),
507 I3605_DUP = 0x0e000400,
508 I3605_INS = 0x4e001c00,
509 I3605_UMOV = 0x0e003c00,
511 /* AdvSIMD modified immediate */
512 I3606_MOVI = 0x0f000400,
513 I3606_MVNI = 0x2f000400,
514 I3606_BIC = 0x2f001400,
515 I3606_ORR = 0x0f001400,
517 /* AdvSIMD scalar shift by immediate */
518 I3609_SSHR = 0x5f000400,
519 I3609_SSRA = 0x5f001400,
520 I3609_SHL = 0x5f005400,
521 I3609_USHR = 0x7f000400,
522 I3609_USRA = 0x7f001400,
523 I3609_SLI = 0x7f005400,
525 /* AdvSIMD scalar three same */
526 I3611_SQADD = 0x5e200c00,
527 I3611_SQSUB = 0x5e202c00,
528 I3611_CMGT = 0x5e203400,
529 I3611_CMGE = 0x5e203c00,
530 I3611_SSHL = 0x5e204400,
531 I3611_ADD = 0x5e208400,
532 I3611_CMTST = 0x5e208c00,
533 I3611_UQADD = 0x7e200c00,
534 I3611_UQSUB = 0x7e202c00,
535 I3611_CMHI = 0x7e203400,
536 I3611_CMHS = 0x7e203c00,
537 I3611_USHL = 0x7e204400,
538 I3611_SUB = 0x7e208400,
539 I3611_CMEQ = 0x7e208c00,
541 /* AdvSIMD scalar two-reg misc */
542 I3612_CMGT0 = 0x5e208800,
543 I3612_CMEQ0 = 0x5e209800,
544 I3612_CMLT0 = 0x5e20a800,
545 I3612_ABS = 0x5e20b800,
546 I3612_CMGE0 = 0x7e208800,
547 I3612_CMLE0 = 0x7e209800,
548 I3612_NEG = 0x7e20b800,
550 /* AdvSIMD shift by immediate */
551 I3614_SSHR = 0x0f000400,
552 I3614_SSRA = 0x0f001400,
553 I3614_SHL = 0x0f005400,
554 I3614_SLI = 0x2f005400,
555 I3614_USHR = 0x2f000400,
556 I3614_USRA = 0x2f001400,
558 /* AdvSIMD three same. */
559 I3616_ADD = 0x0e208400,
560 I3616_AND = 0x0e201c00,
561 I3616_BIC = 0x0e601c00,
562 I3616_BIF = 0x2ee01c00,
563 I3616_BIT = 0x2ea01c00,
564 I3616_BSL = 0x2e601c00,
565 I3616_EOR = 0x2e201c00,
566 I3616_MUL = 0x0e209c00,
567 I3616_ORR = 0x0ea01c00,
568 I3616_ORN = 0x0ee01c00,
569 I3616_SUB = 0x2e208400,
570 I3616_CMGT = 0x0e203400,
571 I3616_CMGE = 0x0e203c00,
572 I3616_CMTST = 0x0e208c00,
573 I3616_CMHI = 0x2e203400,
574 I3616_CMHS = 0x2e203c00,
575 I3616_CMEQ = 0x2e208c00,
576 I3616_SMAX = 0x0e206400,
577 I3616_SMIN = 0x0e206c00,
578 I3616_SSHL = 0x0e204400,
579 I3616_SQADD = 0x0e200c00,
580 I3616_SQSUB = 0x0e202c00,
581 I3616_UMAX = 0x2e206400,
582 I3616_UMIN = 0x2e206c00,
583 I3616_UQADD = 0x2e200c00,
584 I3616_UQSUB = 0x2e202c00,
585 I3616_USHL = 0x2e204400,
587 /* AdvSIMD two-reg misc. */
588 I3617_CMGT0 = 0x0e208800,
589 I3617_CMEQ0 = 0x0e209800,
590 I3617_CMLT0 = 0x0e20a800,
591 I3617_CMGE0 = 0x2e208800,
592 I3617_CMLE0 = 0x2e209800,
593 I3617_NOT = 0x2e205800,
594 I3617_ABS = 0x0e20b800,
595 I3617_NEG = 0x2e20b800,
597 /* System instructions. */
599 DMB_ISH = 0xd50338bf,
604 static inline uint32_t tcg_in32(TCGContext *s)
606 uint32_t v = *(uint32_t *)s->code_ptr;
610 /* Emit an opcode with "type-checking" of the format. */
611 #define tcg_out_insn(S, FMT, OP, ...) \
612 glue(tcg_out_insn_,FMT)(S, glue(glue(glue(I,FMT),_),OP), ## __VA_ARGS__)
614 static void tcg_out_insn_3303(TCGContext *s, AArch64Insn insn, bool q,
615 TCGReg rt, TCGReg rn, unsigned size)
617 tcg_out32(s, insn | (rt & 0x1f) | (rn << 5) | (size << 10) | (q << 30));
620 static void tcg_out_insn_3305(TCGContext *s, AArch64Insn insn,
621 int imm19, TCGReg rt)
623 tcg_out32(s, insn | (imm19 & 0x7ffff) << 5 | rt);
626 static void tcg_out_insn_3201(TCGContext *s, AArch64Insn insn, TCGType ext,
627 TCGReg rt, int imm19)
629 tcg_out32(s, insn | ext << 31 | (imm19 & 0x7ffff) << 5 | rt);
632 static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn,
633 TCGCond c, int imm19)
635 tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5);
638 static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26)
640 tcg_out32(s, insn | (imm26 & 0x03ffffff));
643 static void tcg_out_insn_3207(TCGContext *s, AArch64Insn insn, TCGReg rn)
645 tcg_out32(s, insn | rn << 5);
648 static void tcg_out_insn_3314(TCGContext *s, AArch64Insn insn,
649 TCGReg r1, TCGReg r2, TCGReg rn,
650 tcg_target_long ofs, bool pre, bool w)
652 insn |= 1u << 31; /* ext */
656 tcg_debug_assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0);
657 insn |= (ofs & (0x7f << 3)) << (15 - 3);
659 tcg_out32(s, insn | r2 << 10 | rn << 5 | r1);
662 static void tcg_out_insn_3401(TCGContext *s, AArch64Insn insn, TCGType ext,
663 TCGReg rd, TCGReg rn, uint64_t aimm)
666 tcg_debug_assert((aimm & 0xfff) == 0);
668 tcg_debug_assert(aimm <= 0xfff);
669 aimm |= 1 << 12; /* apply LSL 12 */
671 tcg_out32(s, insn | ext << 31 | aimm << 10 | rn << 5 | rd);
674 /* This function can be used for both 3.4.2 (Bitfield) and 3.4.4
675 (Logical immediate). Both insn groups have N, IMMR and IMMS fields
676 that feed the DecodeBitMasks pseudo function. */
677 static void tcg_out_insn_3402(TCGContext *s, AArch64Insn insn, TCGType ext,
678 TCGReg rd, TCGReg rn, int n, int immr, int imms)
680 tcg_out32(s, insn | ext << 31 | n << 22 | immr << 16 | imms << 10
684 #define tcg_out_insn_3404 tcg_out_insn_3402
686 static void tcg_out_insn_3403(TCGContext *s, AArch64Insn insn, TCGType ext,
687 TCGReg rd, TCGReg rn, TCGReg rm, int imms)
689 tcg_out32(s, insn | ext << 31 | ext << 22 | rm << 16 | imms << 10
693 /* This function is used for the Move (wide immediate) instruction group.
694 Note that SHIFT is a full shift count, not the 2 bit HW field. */
695 static void tcg_out_insn_3405(TCGContext *s, AArch64Insn insn, TCGType ext,
696 TCGReg rd, uint16_t half, unsigned shift)
698 tcg_debug_assert((shift & ~0x30) == 0);
699 tcg_out32(s, insn | ext << 31 | shift << (21 - 4) | half << 5 | rd);
702 static void tcg_out_insn_3406(TCGContext *s, AArch64Insn insn,
703 TCGReg rd, int64_t disp)
705 tcg_out32(s, insn | (disp & 3) << 29 | (disp & 0x1ffffc) << (5 - 2) | rd);
708 /* This function is for both 3.5.2 (Add/Subtract shifted register), for
709 the rare occasion when we actually want to supply a shift amount. */
710 static inline void tcg_out_insn_3502S(TCGContext *s, AArch64Insn insn,
711 TCGType ext, TCGReg rd, TCGReg rn,
714 tcg_out32(s, insn | ext << 31 | rm << 16 | imm6 << 10 | rn << 5 | rd);
717 /* This function is for 3.5.2 (Add/subtract shifted register),
718 and 3.5.10 (Logical shifted register), for the vast majorty of cases
719 when we don't want to apply a shift. Thus it can also be used for
720 3.5.3 (Add/subtract with carry) and 3.5.8 (Data processing 2 source). */
721 static void tcg_out_insn_3502(TCGContext *s, AArch64Insn insn, TCGType ext,
722 TCGReg rd, TCGReg rn, TCGReg rm)
724 tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd);
727 #define tcg_out_insn_3503 tcg_out_insn_3502
728 #define tcg_out_insn_3508 tcg_out_insn_3502
729 #define tcg_out_insn_3510 tcg_out_insn_3502
731 static void tcg_out_insn_3506(TCGContext *s, AArch64Insn insn, TCGType ext,
732 TCGReg rd, TCGReg rn, TCGReg rm, TCGCond c)
734 tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd
735 | tcg_cond_to_aarch64[c] << 12);
738 static void tcg_out_insn_3507(TCGContext *s, AArch64Insn insn, TCGType ext,
739 TCGReg rd, TCGReg rn)
741 tcg_out32(s, insn | ext << 31 | rn << 5 | rd);
744 static void tcg_out_insn_3509(TCGContext *s, AArch64Insn insn, TCGType ext,
745 TCGReg rd, TCGReg rn, TCGReg rm, TCGReg ra)
747 tcg_out32(s, insn | ext << 31 | rm << 16 | ra << 10 | rn << 5 | rd);
750 static void tcg_out_insn_3605(TCGContext *s, AArch64Insn insn, bool q,
751 TCGReg rd, TCGReg rn, int dst_idx, int src_idx)
753 /* Note that bit 11 set means general register input. Therefore
754 we can handle both register sets with one function. */
755 tcg_out32(s, insn | q << 30 | (dst_idx << 16) | (src_idx << 11)
756 | (rd & 0x1f) | (~rn & 0x20) << 6 | (rn & 0x1f) << 5);
759 static void tcg_out_insn_3606(TCGContext *s, AArch64Insn insn, bool q,
760 TCGReg rd, bool op, int cmode, uint8_t imm8)
762 tcg_out32(s, insn | q << 30 | op << 29 | cmode << 12 | (rd & 0x1f)
763 | (imm8 & 0xe0) << (16 - 5) | (imm8 & 0x1f) << 5);
766 static void tcg_out_insn_3609(TCGContext *s, AArch64Insn insn,
767 TCGReg rd, TCGReg rn, unsigned immhb)
769 tcg_out32(s, insn | immhb << 16 | (rn & 0x1f) << 5 | (rd & 0x1f));
772 static void tcg_out_insn_3611(TCGContext *s, AArch64Insn insn,
773 unsigned size, TCGReg rd, TCGReg rn, TCGReg rm)
775 tcg_out32(s, insn | (size << 22) | (rm & 0x1f) << 16
776 | (rn & 0x1f) << 5 | (rd & 0x1f));
779 static void tcg_out_insn_3612(TCGContext *s, AArch64Insn insn,
780 unsigned size, TCGReg rd, TCGReg rn)
782 tcg_out32(s, insn | (size << 22) | (rn & 0x1f) << 5 | (rd & 0x1f));
785 static void tcg_out_insn_3614(TCGContext *s, AArch64Insn insn, bool q,
786 TCGReg rd, TCGReg rn, unsigned immhb)
788 tcg_out32(s, insn | q << 30 | immhb << 16
789 | (rn & 0x1f) << 5 | (rd & 0x1f));
792 static void tcg_out_insn_3616(TCGContext *s, AArch64Insn insn, bool q,
793 unsigned size, TCGReg rd, TCGReg rn, TCGReg rm)
795 tcg_out32(s, insn | q << 30 | (size << 22) | (rm & 0x1f) << 16
796 | (rn & 0x1f) << 5 | (rd & 0x1f));
799 static void tcg_out_insn_3617(TCGContext *s, AArch64Insn insn, bool q,
800 unsigned size, TCGReg rd, TCGReg rn)
802 tcg_out32(s, insn | q << 30 | (size << 22)
803 | (rn & 0x1f) << 5 | (rd & 0x1f));
806 static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn,
807 TCGReg rd, TCGReg base, TCGType ext,
810 /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */
811 tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 |
812 0x4000 | ext << 13 | base << 5 | (rd & 0x1f));
815 static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn,
816 TCGReg rd, TCGReg rn, intptr_t offset)
818 tcg_out32(s, insn | (offset & 0x1ff) << 12 | rn << 5 | (rd & 0x1f));
821 static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn,
822 TCGReg rd, TCGReg rn, uintptr_t scaled_uimm)
824 /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */
825 tcg_out32(s, insn | I3312_TO_I3313 | scaled_uimm << 10
826 | rn << 5 | (rd & 0x1f));
829 /* Register to register move using ORR (shifted register with no shift). */
830 static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm)
832 tcg_out_insn(s, 3510, ORR, ext, rd, TCG_REG_XZR, rm);
835 /* Register to register move using ADDI (move to/from SP). */
836 static void tcg_out_movr_sp(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn)
838 tcg_out_insn(s, 3401, ADDI, ext, rd, rn, 0);
841 /* This function is used for the Logical (immediate) instruction group.
842 The value of LIMM must satisfy IS_LIMM. See the comment above about
843 only supporting simplified logical immediates. */
844 static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext,
845 TCGReg rd, TCGReg rn, uint64_t limm)
849 tcg_debug_assert(is_limm(limm));
854 r = 0; /* form 0....01....1 */
855 c = ctz64(~limm) - 1;
857 r = clz64(~limm); /* form 1..10..01..1 */
861 r = 64 - l; /* form 1....10....0 or 0..01..10..0 */
864 if (ext == TCG_TYPE_I32) {
869 tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c);
872 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
873 TCGReg rd, int64_t v64)
875 bool q = type == TCG_TYPE_V128;
878 /* Test all bytes equal first. */
881 tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0xe, imm8);
886 * Test all bytes 0x00 or 0xff second. This can match cases that
887 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
889 for (i = imm8 = 0; i < 8; i++) {
890 uint8_t byte = v64 >> (i * 8);
893 } else if (byte != 0) {
897 tcg_out_insn(s, 3606, MOVI, q, rd, 1, 0xe, imm8);
902 * Tests for various replications. For each element width, if we
903 * cannot find an expansion there's no point checking a larger
904 * width because we already know by replication it cannot match.
909 if (is_shimm16(v16, &cmode, &imm8)) {
910 tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8);
913 if (is_shimm16(~v16, &cmode, &imm8)) {
914 tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8);
919 * Otherwise, all remaining constants can be loaded in two insns:
920 * rd = v16 & 0xff, rd |= v16 & 0xff00.
922 tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0x8, v16 & 0xff);
923 tcg_out_insn(s, 3606, ORR, q, rd, 0, 0xa, v16 >> 8);
925 } else if (vece == MO_32) {
929 if (is_shimm32(v32, &cmode, &imm8) ||
930 is_soimm32(v32, &cmode, &imm8) ||
931 is_fimm32(v32, &cmode, &imm8)) {
932 tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8);
935 if (is_shimm32(n32, &cmode, &imm8) ||
936 is_soimm32(n32, &cmode, &imm8)) {
937 tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8);
942 * Restrict the set of constants to those we can load with
943 * two instructions. Others we load from the pool.
945 i = is_shimm32_pair(v32, &cmode, &imm8);
947 tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8);
948 tcg_out_insn(s, 3606, ORR, q, rd, 0, i, extract32(v32, i * 4, 8));
951 i = is_shimm32_pair(n32, &cmode, &imm8);
953 tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8);
954 tcg_out_insn(s, 3606, BIC, q, rd, 0, i, extract32(n32, i * 4, 8));
957 } else if (is_fimm64(v64, &cmode, &imm8)) {
958 tcg_out_insn(s, 3606, MOVI, q, rd, 1, cmode, imm8);
963 * As a last resort, load from the constant pool. Sadly there
964 * is no LD1R (literal), so store the full 16-byte vector.
966 if (type == TCG_TYPE_V128) {
967 new_pool_l2(s, R_AARCH64_CONDBR19, s->code_ptr, 0, v64, v64);
968 tcg_out_insn(s, 3305, LDR_v128, 0, rd);
970 new_pool_label(s, v64, R_AARCH64_CONDBR19, s->code_ptr, 0);
971 tcg_out_insn(s, 3305, LDR_v64, 0, rd);
975 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
976 TCGReg rd, TCGReg rs)
978 int is_q = type - TCG_TYPE_V64;
979 tcg_out_insn(s, 3605, DUP, is_q, rd, rs, 1 << vece, 0);
983 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
984 TCGReg r, TCGReg base, intptr_t offset)
986 TCGReg temp = TCG_REG_TMP;
988 if (offset < -0xffffff || offset > 0xffffff) {
989 tcg_out_movi(s, TCG_TYPE_PTR, temp, offset);
990 tcg_out_insn(s, 3502, ADD, 1, temp, temp, base);
993 AArch64Insn add_insn = I3401_ADDI;
996 add_insn = I3401_SUBI;
999 if (offset & 0xfff000) {
1000 tcg_out_insn_3401(s, add_insn, 1, temp, base, offset & 0xfff000);
1003 if (offset & 0xfff) {
1004 tcg_out_insn_3401(s, add_insn, 1, temp, base, offset & 0xfff);
1008 tcg_out_insn(s, 3303, LD1R, type == TCG_TYPE_V128, r, base, vece);
1012 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
1013 tcg_target_long value)
1015 tcg_target_long svalue = value;
1016 tcg_target_long ivalue = ~value;
1017 tcg_target_long t0, t1, t2;
1024 tcg_debug_assert(rd < 32);
1027 g_assert_not_reached();
1030 /* For 32-bit values, discard potential garbage in value. For 64-bit
1031 values within [2**31, 2**32-1], we can create smaller sequences by
1032 interpreting this as a negative 32-bit number, while ensuring that
1033 the high 32 bits are cleared by setting SF=0. */
1034 if (type == TCG_TYPE_I32 || (value & ~0xffffffffull) == 0) {
1035 svalue = (int32_t)value;
1036 value = (uint32_t)value;
1037 ivalue = (uint32_t)ivalue;
1038 type = TCG_TYPE_I32;
1041 /* Speed things up by handling the common case of small positive
1042 and negative values specially. */
1043 if ((value & ~0xffffull) == 0) {
1044 tcg_out_insn(s, 3405, MOVZ, type, rd, value, 0);
1046 } else if ((ivalue & ~0xffffull) == 0) {
1047 tcg_out_insn(s, 3405, MOVN, type, rd, ivalue, 0);
1051 /* Check for bitfield immediates. For the benefit of 32-bit quantities,
1052 use the sign-extended value. That lets us match rotated values such
1053 as 0xff0000ff with the same 64-bit logic matching 0xffffffffff0000ff. */
1054 if (is_limm(svalue)) {
1055 tcg_out_logicali(s, I3404_ORRI, type, rd, TCG_REG_XZR, svalue);
1059 /* Look for host pointer values within 4G of the PC. This happens
1060 often when loading pointers to QEMU's own data structures. */
1061 if (type == TCG_TYPE_I64) {
1062 intptr_t src_rx = (intptr_t)tcg_splitwx_to_rx(s->code_ptr);
1063 tcg_target_long disp = value - src_rx;
1064 if (disp == sextract64(disp, 0, 21)) {
1065 tcg_out_insn(s, 3406, ADR, rd, disp);
1068 disp = (value >> 12) - (src_rx >> 12);
1069 if (disp == sextract64(disp, 0, 21)) {
1070 tcg_out_insn(s, 3406, ADRP, rd, disp);
1071 if (value & 0xfff) {
1072 tcg_out_insn(s, 3401, ADDI, type, rd, rd, value & 0xfff);
1078 /* Would it take fewer insns to begin with MOVN? */
1079 if (ctpop64(value) >= 32) {
1086 s0 = ctz64(t0) & (63 & -16);
1087 t1 = t0 & ~(0xffffUL << s0);
1088 s1 = ctz64(t1) & (63 & -16);
1089 t2 = t1 & ~(0xffffUL << s1);
1091 tcg_out_insn_3405(s, opc, type, rd, t0 >> s0, s0);
1093 tcg_out_insn(s, 3405, MOVK, type, rd, value >> s1, s1);
1098 /* For more than 2 insns, dump it into the constant pool. */
1099 new_pool_label(s, value, R_AARCH64_CONDBR19, s->code_ptr, 0);
1100 tcg_out_insn(s, 3305, LDR, 0, rd);
1103 /* Define something more legible for general use. */
1104 #define tcg_out_ldst_r tcg_out_insn_3310
1106 static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, TCGReg rd,
1107 TCGReg rn, intptr_t offset, int lgsize)
1109 /* If the offset is naturally aligned and in range, then we can
1110 use the scaled uimm12 encoding */
1111 if (offset >= 0 && !(offset & ((1 << lgsize) - 1))) {
1112 uintptr_t scaled_uimm = offset >> lgsize;
1113 if (scaled_uimm <= 0xfff) {
1114 tcg_out_insn_3313(s, insn, rd, rn, scaled_uimm);
1119 /* Small signed offsets can use the unscaled encoding. */
1120 if (offset >= -256 && offset < 256) {
1121 tcg_out_insn_3312(s, insn, rd, rn, offset);
1125 /* Worst-case scenario, move offset to temp register, use reg offset. */
1126 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
1127 tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP);
1130 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
1138 if (ret < 32 && arg < 32) {
1139 tcg_out_movr(s, type, ret, arg);
1141 } else if (ret < 32) {
1142 tcg_out_insn(s, 3605, UMOV, type, ret, arg, 0, 0);
1144 } else if (arg < 32) {
1145 tcg_out_insn(s, 3605, INS, 0, ret, arg, 4 << type, 0);
1151 tcg_debug_assert(ret >= 32 && arg >= 32);
1152 tcg_out_insn(s, 3616, ORR, 0, 0, ret, arg, arg);
1155 tcg_debug_assert(ret >= 32 && arg >= 32);
1156 tcg_out_insn(s, 3616, ORR, 1, 0, ret, arg, arg);
1160 g_assert_not_reached();
1165 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1166 TCGReg base, intptr_t ofs)
1173 insn = (ret < 32 ? I3312_LDRW : I3312_LDRVS);
1177 insn = (ret < 32 ? I3312_LDRX : I3312_LDRVD);
1189 g_assert_not_reached();
1191 tcg_out_ldst(s, insn, ret, base, ofs, lgsz);
1194 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src,
1195 TCGReg base, intptr_t ofs)
1202 insn = (src < 32 ? I3312_STRW : I3312_STRVS);
1206 insn = (src < 32 ? I3312_STRX : I3312_STRVD);
1218 g_assert_not_reached();
1220 tcg_out_ldst(s, insn, src, base, ofs, lgsz);
1223 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1224 TCGReg base, intptr_t ofs)
1226 if (type <= TCG_TYPE_I64 && val == 0) {
1227 tcg_out_st(s, type, TCG_REG_XZR, base, ofs);
1233 static inline void tcg_out_bfm(TCGContext *s, TCGType ext, TCGReg rd,
1234 TCGReg rn, unsigned int a, unsigned int b)
1236 tcg_out_insn(s, 3402, BFM, ext, rd, rn, ext, a, b);
1239 static inline void tcg_out_ubfm(TCGContext *s, TCGType ext, TCGReg rd,
1240 TCGReg rn, unsigned int a, unsigned int b)
1242 tcg_out_insn(s, 3402, UBFM, ext, rd, rn, ext, a, b);
1245 static inline void tcg_out_sbfm(TCGContext *s, TCGType ext, TCGReg rd,
1246 TCGReg rn, unsigned int a, unsigned int b)
1248 tcg_out_insn(s, 3402, SBFM, ext, rd, rn, ext, a, b);
1251 static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd,
1252 TCGReg rn, TCGReg rm, unsigned int a)
1254 tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a);
1257 static inline void tcg_out_shl(TCGContext *s, TCGType ext,
1258 TCGReg rd, TCGReg rn, unsigned int m)
1260 int bits = ext ? 64 : 32;
1262 tcg_out_ubfm(s, ext, rd, rn, bits - (m & max), max - (m & max));
1265 static inline void tcg_out_shr(TCGContext *s, TCGType ext,
1266 TCGReg rd, TCGReg rn, unsigned int m)
1268 int max = ext ? 63 : 31;
1269 tcg_out_ubfm(s, ext, rd, rn, m & max, max);
1272 static inline void tcg_out_sar(TCGContext *s, TCGType ext,
1273 TCGReg rd, TCGReg rn, unsigned int m)
1275 int max = ext ? 63 : 31;
1276 tcg_out_sbfm(s, ext, rd, rn, m & max, max);
1279 static inline void tcg_out_rotr(TCGContext *s, TCGType ext,
1280 TCGReg rd, TCGReg rn, unsigned int m)
1282 int max = ext ? 63 : 31;
1283 tcg_out_extr(s, ext, rd, rn, rn, m & max);
1286 static inline void tcg_out_rotl(TCGContext *s, TCGType ext,
1287 TCGReg rd, TCGReg rn, unsigned int m)
1289 int max = ext ? 63 : 31;
1290 tcg_out_extr(s, ext, rd, rn, rn, -m & max);
1293 static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd,
1294 TCGReg rn, unsigned lsb, unsigned width)
1296 unsigned size = ext ? 64 : 32;
1297 unsigned a = (size - lsb) & (size - 1);
1298 unsigned b = width - 1;
1299 tcg_out_bfm(s, ext, rd, rn, a, b);
1302 static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
1303 tcg_target_long b, bool const_b)
1306 /* Using CMP or CMN aliases. */
1308 tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
1310 tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
1313 /* Using CMP alias SUBS wzr, Wn, Wm */
1314 tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
1318 static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
1320 ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
1321 tcg_debug_assert(offset == sextract64(offset, 0, 26));
1322 tcg_out_insn(s, 3206, B, offset);
1325 static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target)
1327 ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
1328 if (offset == sextract64(offset, 0, 26)) {
1329 tcg_out_insn(s, 3206, B, offset);
1331 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target);
1332 tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
1336 static inline void tcg_out_callr(TCGContext *s, TCGReg reg)
1338 tcg_out_insn(s, 3207, BLR, reg);
1341 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
1343 ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
1344 if (offset == sextract64(offset, 0, 26)) {
1345 tcg_out_insn(s, 3206, BL, offset);
1347 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target);
1348 tcg_out_callr(s, TCG_REG_TMP);
1352 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1353 uintptr_t jmp_rw, uintptr_t addr)
1355 tcg_insn_unit i1, i2;
1356 TCGType rt = TCG_TYPE_I64;
1357 TCGReg rd = TCG_REG_TMP;
1360 ptrdiff_t offset = addr - jmp_rx;
1362 if (offset == sextract64(offset, 0, 26)) {
1363 i1 = I3206_B | ((offset >> 2) & 0x3ffffff);
1366 offset = (addr >> 12) - (jmp_rx >> 12);
1369 i1 = I3406_ADRP | (offset & 3) << 29 | (offset & 0x1ffffc) << (5 - 2) | rd;
1371 i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd;
1373 pair = (uint64_t)i2 << 32 | i1;
1374 qatomic_set((uint64_t *)jmp_rw, pair);
1375 flush_idcache_range(jmp_rx, jmp_rw, 8);
1378 static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
1380 if (!l->has_value) {
1381 tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, l, 0);
1382 tcg_out_insn(s, 3206, B, 0);
1384 tcg_out_goto(s, l->u.value_ptr);
1388 static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
1389 TCGArg b, bool b_const, TCGLabel *l)
1394 if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) {
1398 tcg_out_cmp(s, ext, a, b, b_const);
1401 if (!l->has_value) {
1402 tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
1403 offset = tcg_in32(s) >> 5;
1405 offset = tcg_pcrel_diff(s, l->u.value_ptr) >> 2;
1406 tcg_debug_assert(offset == sextract64(offset, 0, 19));
1410 tcg_out_insn(s, 3202, B_C, c, offset);
1411 } else if (c == TCG_COND_EQ) {
1412 tcg_out_insn(s, 3201, CBZ, ext, a, offset);
1414 tcg_out_insn(s, 3201, CBNZ, ext, a, offset);
1418 static inline void tcg_out_rev(TCGContext *s, int ext, MemOp s_bits,
1419 TCGReg rd, TCGReg rn)
1421 /* REV, REV16, REV32 */
1422 tcg_out_insn_3507(s, I3507_REV | (s_bits << 10), ext, rd, rn);
1425 static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits,
1426 TCGReg rd, TCGReg rn)
1428 /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */
1429 int bits = (8 << s_bits) - 1;
1430 tcg_out_sbfm(s, ext, rd, rn, 0, bits);
1433 static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits,
1434 TCGReg rd, TCGReg rn)
1436 /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */
1437 int bits = (8 << s_bits) - 1;
1438 tcg_out_ubfm(s, 0, rd, rn, 0, bits);
1441 static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
1442 TCGReg rn, int64_t aimm)
1445 tcg_out_insn(s, 3401, ADDI, ext, rd, rn, aimm);
1447 tcg_out_insn(s, 3401, SUBI, ext, rd, rn, -aimm);
1451 static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
1452 TCGReg rh, TCGReg al, TCGReg ah,
1453 tcg_target_long bl, tcg_target_long bh,
1454 bool const_bl, bool const_bh, bool sub)
1456 TCGReg orig_rl = rl;
1459 if (rl == ah || (!const_bh && rl == bh)) {
1466 insn = sub ? I3401_ADDSI : I3401_SUBSI;
1468 insn = sub ? I3401_SUBSI : I3401_ADDSI;
1471 if (unlikely(al == TCG_REG_XZR)) {
1472 /* ??? We want to allow al to be zero for the benefit of
1473 negation via subtraction. However, that leaves open the
1474 possibility of adding 0+const in the low part, and the
1475 immediate add instructions encode XSP not XZR. Don't try
1476 anything more elaborate here than loading another zero. */
1478 tcg_out_movi(s, ext, al, 0);
1480 tcg_out_insn_3401(s, insn, ext, rl, al, bl);
1482 tcg_out_insn_3502(s, sub ? I3502_SUBS : I3502_ADDS, ext, rl, al, bl);
1487 /* Note that the only two constants we support are 0 and -1, and
1488 that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa. */
1489 if ((bh != 0) ^ sub) {
1496 tcg_out_insn_3503(s, insn, ext, rh, ah, bh);
1498 tcg_out_mov(s, ext, orig_rl, rl);
1501 static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
1503 static const uint32_t sync[] = {
1504 [0 ... TCG_MO_ALL] = DMB_ISH | DMB_LD | DMB_ST,
1505 [TCG_MO_ST_ST] = DMB_ISH | DMB_ST,
1506 [TCG_MO_LD_LD] = DMB_ISH | DMB_LD,
1507 [TCG_MO_LD_ST] = DMB_ISH | DMB_LD,
1508 [TCG_MO_LD_ST | TCG_MO_LD_LD] = DMB_ISH | DMB_LD,
1510 tcg_out32(s, sync[a0 & TCG_MO_ALL]);
1513 static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
1514 TCGReg a0, TCGArg b, bool const_b, bool is_ctz)
1519 tcg_out_insn(s, 3507, RBIT, ext, a1, a0);
1521 if (const_b && b == (ext ? 64 : 32)) {
1522 tcg_out_insn(s, 3507, CLZ, ext, d, a1);
1524 AArch64Insn sel = I3506_CSEL;
1526 tcg_out_cmp(s, ext, a0, 0, 1);
1527 tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP, a1);
1533 } else if (b == 0) {
1536 tcg_out_movi(s, ext, d, b);
1540 tcg_out_insn_3506(s, sel, ext, d, TCG_REG_TMP, b, TCG_COND_NE);
1544 #ifdef CONFIG_SOFTMMU
1545 #include "../tcg-ldst.c.inc"
1547 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1548 * MemOpIdx oi, uintptr_t ra)
1550 static void * const qemu_ld_helpers[MO_SIZE + 1] = {
1551 [MO_8] = helper_ret_ldub_mmu,
1552 #ifdef HOST_WORDS_BIGENDIAN
1553 [MO_16] = helper_be_lduw_mmu,
1554 [MO_32] = helper_be_ldul_mmu,
1555 [MO_64] = helper_be_ldq_mmu,
1557 [MO_16] = helper_le_lduw_mmu,
1558 [MO_32] = helper_le_ldul_mmu,
1559 [MO_64] = helper_le_ldq_mmu,
1563 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1564 * uintxx_t val, MemOpIdx oi,
1567 static void * const qemu_st_helpers[MO_SIZE + 1] = {
1568 [MO_8] = helper_ret_stb_mmu,
1569 #ifdef HOST_WORDS_BIGENDIAN
1570 [MO_16] = helper_be_stw_mmu,
1571 [MO_32] = helper_be_stl_mmu,
1572 [MO_64] = helper_be_stq_mmu,
1574 [MO_16] = helper_le_stw_mmu,
1575 [MO_32] = helper_le_stl_mmu,
1576 [MO_64] = helper_le_stq_mmu,
1580 static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
1582 ptrdiff_t offset = tcg_pcrel_diff(s, target);
1583 tcg_debug_assert(offset == sextract64(offset, 0, 21));
1584 tcg_out_insn(s, 3406, ADR, rd, offset);
1587 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1589 MemOpIdx oi = lb->oi;
1590 MemOp opc = get_memop(oi);
1591 MemOp size = opc & MO_SIZE;
1593 if (!reloc_pc19(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1597 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
1598 tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
1599 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, oi);
1600 tcg_out_adr(s, TCG_REG_X3, lb->raddr);
1601 tcg_out_call(s, qemu_ld_helpers[opc & MO_SIZE]);
1602 if (opc & MO_SIGN) {
1603 tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0);
1605 tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0);
1608 tcg_out_goto(s, lb->raddr);
1612 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1614 MemOpIdx oi = lb->oi;
1615 MemOp opc = get_memop(oi);
1616 MemOp size = opc & MO_SIZE;
1618 if (!reloc_pc19(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1622 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
1623 tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
1624 tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg);
1625 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, oi);
1626 tcg_out_adr(s, TCG_REG_X4, lb->raddr);
1627 tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE]);
1628 tcg_out_goto(s, lb->raddr);
1632 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
1633 TCGType ext, TCGReg data_reg, TCGReg addr_reg,
1634 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1636 TCGLabelQemuLdst *label = new_ldst_label(s);
1638 label->is_ld = is_ld;
1641 label->datalo_reg = data_reg;
1642 label->addrlo_reg = addr_reg;
1643 label->raddr = tcg_splitwx_to_rx(raddr);
1644 label->label_ptr[0] = label_ptr;
1647 /* We expect to use a 7-bit scaled negative offset from ENV. */
1648 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1649 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -512);
1651 /* These offsets are built into the LDP below. */
1652 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1653 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8);
1655 /* Load and compare a TLB entry, emitting the conditional jump to the
1656 slow path for the failure case, which will be patched later when finalizing
1657 the slow path. Generated code returns the host addend in X1,
1658 clobbers X0,X2,X3,TMP. */
1659 static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
1660 tcg_insn_unit **label_ptr, int mem_index,
1663 unsigned a_bits = get_alignment_bits(opc);
1664 unsigned s_bits = opc & MO_SIZE;
1665 unsigned a_mask = (1u << a_bits) - 1;
1666 unsigned s_mask = (1u << s_bits) - 1;
1669 uint64_t compare_mask;
1671 mask_type = (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32
1672 ? TCG_TYPE_I64 : TCG_TYPE_I32);
1674 /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {x0,x1}. */
1675 tcg_out_insn(s, 3314, LDP, TCG_REG_X0, TCG_REG_X1, TCG_AREG0,
1676 TLB_MASK_TABLE_OFS(mem_index), 1, 0);
1678 /* Extract the TLB index from the address into X0. */
1679 tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64,
1680 TCG_REG_X0, TCG_REG_X0, addr_reg,
1681 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1683 /* Add the tlb_table pointer, creating the CPUTLBEntry address into X1. */
1684 tcg_out_insn(s, 3502, ADD, 1, TCG_REG_X1, TCG_REG_X1, TCG_REG_X0);
1686 /* Load the tlb comparator into X0, and the fast path addend into X1. */
1687 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_X0, TCG_REG_X1, is_read
1688 ? offsetof(CPUTLBEntry, addr_read)
1689 : offsetof(CPUTLBEntry, addr_write));
1690 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_X1, TCG_REG_X1,
1691 offsetof(CPUTLBEntry, addend));
1693 /* For aligned accesses, we check the first byte and include the alignment
1694 bits within the address. For unaligned access, we check that we don't
1695 cross pages using the address of the last byte of the access. */
1696 if (a_bits >= s_bits) {
1699 tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64,
1700 TCG_REG_X3, addr_reg, s_mask - a_mask);
1703 compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
1705 /* Store the page mask part of the address into X3. */
1706 tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64,
1707 TCG_REG_X3, x3, compare_mask);
1709 /* Perform the address comparison. */
1710 tcg_out_cmp(s, TARGET_LONG_BITS == 64, TCG_REG_X0, TCG_REG_X3, 0);
1712 /* If not equal, we jump to the slow path. */
1713 *label_ptr = s->code_ptr;
1714 tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
1717 #endif /* CONFIG_SOFTMMU */
1719 static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
1720 TCGReg data_r, TCGReg addr_r,
1721 TCGType otype, TCGReg off_r)
1723 /* Byte swapping is left to middle-end expansion. */
1724 tcg_debug_assert((memop & MO_BSWAP) == 0);
1726 switch (memop & MO_SSIZE) {
1728 tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
1731 tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
1732 data_r, addr_r, otype, off_r);
1735 tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
1738 tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
1739 data_r, addr_r, otype, off_r);
1742 tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
1745 tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
1748 tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
1755 static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
1756 TCGReg data_r, TCGReg addr_r,
1757 TCGType otype, TCGReg off_r)
1759 /* Byte swapping is left to middle-end expansion. */
1760 tcg_debug_assert((memop & MO_BSWAP) == 0);
1762 switch (memop & MO_SIZE) {
1764 tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
1767 tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r);
1770 tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r);
1773 tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);
1780 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1781 MemOpIdx oi, TCGType ext)
1783 MemOp memop = get_memop(oi);
1784 const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
1785 #ifdef CONFIG_SOFTMMU
1786 unsigned mem_index = get_mmuidx(oi);
1787 tcg_insn_unit *label_ptr;
1789 tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1);
1790 tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
1791 TCG_REG_X1, otype, addr_reg);
1792 add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
1793 s->code_ptr, label_ptr);
1794 #else /* !CONFIG_SOFTMMU */
1795 if (USE_GUEST_BASE) {
1796 tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
1797 TCG_REG_GUEST_BASE, otype, addr_reg);
1799 tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
1800 addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
1802 #endif /* CONFIG_SOFTMMU */
1805 static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1808 MemOp memop = get_memop(oi);
1809 const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
1810 #ifdef CONFIG_SOFTMMU
1811 unsigned mem_index = get_mmuidx(oi);
1812 tcg_insn_unit *label_ptr;
1814 tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0);
1815 tcg_out_qemu_st_direct(s, memop, data_reg,
1816 TCG_REG_X1, otype, addr_reg);
1817 add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
1818 data_reg, addr_reg, s->code_ptr, label_ptr);
1819 #else /* !CONFIG_SOFTMMU */
1820 if (USE_GUEST_BASE) {
1821 tcg_out_qemu_st_direct(s, memop, data_reg,
1822 TCG_REG_GUEST_BASE, otype, addr_reg);
1824 tcg_out_qemu_st_direct(s, memop, data_reg,
1825 addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
1827 #endif /* CONFIG_SOFTMMU */
1830 static const tcg_insn_unit *tb_ret_addr;
1832 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1833 const TCGArg args[TCG_MAX_OP_ARGS],
1834 const int const_args[TCG_MAX_OP_ARGS])
1836 /* 99% of the time, we can signal the use of extension registers
1837 by looking to see if the opcode handles 64-bit data. */
1838 TCGType ext = (tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0;
1840 /* Hoist the loads of the most common arguments. */
1841 TCGArg a0 = args[0];
1842 TCGArg a1 = args[1];
1843 TCGArg a2 = args[2];
1844 int c2 = const_args[2];
1846 /* Some operands are defined with "rZ" constraint, a register or
1847 the zero register. These need not actually test args[I] == 0. */
1848 #define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
1851 case INDEX_op_exit_tb:
1852 /* Reuse the zeroing that exists for goto_ptr. */
1854 tcg_out_goto_long(s, tcg_code_gen_epilogue);
1856 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
1857 tcg_out_goto_long(s, tb_ret_addr);
1861 case INDEX_op_goto_tb:
1862 if (s->tb_jmp_insn_offset != NULL) {
1863 /* TCG_TARGET_HAS_direct_jump */
1864 /* Ensure that ADRP+ADD are 8-byte aligned so that an atomic
1865 write can be used to patch the target address. */
1866 if ((uintptr_t)s->code_ptr & 7) {
1869 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1870 /* actual branch destination will be patched by
1871 tb_target_set_jmp_target later. */
1872 tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
1873 tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
1875 /* !TCG_TARGET_HAS_direct_jump */
1876 tcg_debug_assert(s->tb_jmp_target_addr != NULL);
1877 intptr_t offset = tcg_pcrel_diff(s, (s->tb_jmp_target_addr + a0)) >> 2;
1878 tcg_out_insn(s, 3305, LDR, offset, TCG_REG_TMP);
1880 tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
1881 set_jmp_reset_offset(s, a0);
1884 case INDEX_op_goto_ptr:
1885 tcg_out_insn(s, 3207, BR, a0);
1889 tcg_out_goto_label(s, arg_label(a0));
1892 case INDEX_op_ld8u_i32:
1893 case INDEX_op_ld8u_i64:
1894 tcg_out_ldst(s, I3312_LDRB, a0, a1, a2, 0);
1896 case INDEX_op_ld8s_i32:
1897 tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2, 0);
1899 case INDEX_op_ld8s_i64:
1900 tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2, 0);
1902 case INDEX_op_ld16u_i32:
1903 case INDEX_op_ld16u_i64:
1904 tcg_out_ldst(s, I3312_LDRH, a0, a1, a2, 1);
1906 case INDEX_op_ld16s_i32:
1907 tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2, 1);
1909 case INDEX_op_ld16s_i64:
1910 tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2, 1);
1912 case INDEX_op_ld_i32:
1913 case INDEX_op_ld32u_i64:
1914 tcg_out_ldst(s, I3312_LDRW, a0, a1, a2, 2);
1916 case INDEX_op_ld32s_i64:
1917 tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2, 2);
1919 case INDEX_op_ld_i64:
1920 tcg_out_ldst(s, I3312_LDRX, a0, a1, a2, 3);
1923 case INDEX_op_st8_i32:
1924 case INDEX_op_st8_i64:
1925 tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2, 0);
1927 case INDEX_op_st16_i32:
1928 case INDEX_op_st16_i64:
1929 tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2, 1);
1931 case INDEX_op_st_i32:
1932 case INDEX_op_st32_i64:
1933 tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2, 2);
1935 case INDEX_op_st_i64:
1936 tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2, 3);
1939 case INDEX_op_add_i32:
1942 case INDEX_op_add_i64:
1944 tcg_out_addsubi(s, ext, a0, a1, a2);
1946 tcg_out_insn(s, 3502, ADD, ext, a0, a1, a2);
1950 case INDEX_op_sub_i32:
1953 case INDEX_op_sub_i64:
1955 tcg_out_addsubi(s, ext, a0, a1, -a2);
1957 tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2);
1961 case INDEX_op_neg_i64:
1962 case INDEX_op_neg_i32:
1963 tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
1966 case INDEX_op_and_i32:
1969 case INDEX_op_and_i64:
1971 tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, a2);
1973 tcg_out_insn(s, 3510, AND, ext, a0, a1, a2);
1977 case INDEX_op_andc_i32:
1980 case INDEX_op_andc_i64:
1982 tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, ~a2);
1984 tcg_out_insn(s, 3510, BIC, ext, a0, a1, a2);
1988 case INDEX_op_or_i32:
1991 case INDEX_op_or_i64:
1993 tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, a2);
1995 tcg_out_insn(s, 3510, ORR, ext, a0, a1, a2);
1999 case INDEX_op_orc_i32:
2002 case INDEX_op_orc_i64:
2004 tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, ~a2);
2006 tcg_out_insn(s, 3510, ORN, ext, a0, a1, a2);
2010 case INDEX_op_xor_i32:
2013 case INDEX_op_xor_i64:
2015 tcg_out_logicali(s, I3404_EORI, ext, a0, a1, a2);
2017 tcg_out_insn(s, 3510, EOR, ext, a0, a1, a2);
2021 case INDEX_op_eqv_i32:
2024 case INDEX_op_eqv_i64:
2026 tcg_out_logicali(s, I3404_EORI, ext, a0, a1, ~a2);
2028 tcg_out_insn(s, 3510, EON, ext, a0, a1, a2);
2032 case INDEX_op_not_i64:
2033 case INDEX_op_not_i32:
2034 tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1);
2037 case INDEX_op_mul_i64:
2038 case INDEX_op_mul_i32:
2039 tcg_out_insn(s, 3509, MADD, ext, a0, a1, a2, TCG_REG_XZR);
2042 case INDEX_op_div_i64:
2043 case INDEX_op_div_i32:
2044 tcg_out_insn(s, 3508, SDIV, ext, a0, a1, a2);
2046 case INDEX_op_divu_i64:
2047 case INDEX_op_divu_i32:
2048 tcg_out_insn(s, 3508, UDIV, ext, a0, a1, a2);
2051 case INDEX_op_rem_i64:
2052 case INDEX_op_rem_i32:
2053 tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP, a1, a2);
2054 tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1);
2056 case INDEX_op_remu_i64:
2057 case INDEX_op_remu_i32:
2058 tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP, a1, a2);
2059 tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1);
2062 case INDEX_op_shl_i64:
2063 case INDEX_op_shl_i32:
2065 tcg_out_shl(s, ext, a0, a1, a2);
2067 tcg_out_insn(s, 3508, LSLV, ext, a0, a1, a2);
2071 case INDEX_op_shr_i64:
2072 case INDEX_op_shr_i32:
2074 tcg_out_shr(s, ext, a0, a1, a2);
2076 tcg_out_insn(s, 3508, LSRV, ext, a0, a1, a2);
2080 case INDEX_op_sar_i64:
2081 case INDEX_op_sar_i32:
2083 tcg_out_sar(s, ext, a0, a1, a2);
2085 tcg_out_insn(s, 3508, ASRV, ext, a0, a1, a2);
2089 case INDEX_op_rotr_i64:
2090 case INDEX_op_rotr_i32:
2092 tcg_out_rotr(s, ext, a0, a1, a2);
2094 tcg_out_insn(s, 3508, RORV, ext, a0, a1, a2);
2098 case INDEX_op_rotl_i64:
2099 case INDEX_op_rotl_i32:
2101 tcg_out_rotl(s, ext, a0, a1, a2);
2103 tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2);
2104 tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP);
2108 case INDEX_op_clz_i64:
2109 case INDEX_op_clz_i32:
2110 tcg_out_cltz(s, ext, a0, a1, a2, c2, false);
2112 case INDEX_op_ctz_i64:
2113 case INDEX_op_ctz_i32:
2114 tcg_out_cltz(s, ext, a0, a1, a2, c2, true);
2117 case INDEX_op_brcond_i32:
2120 case INDEX_op_brcond_i64:
2121 tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3]));
2124 case INDEX_op_setcond_i32:
2127 case INDEX_op_setcond_i64:
2128 tcg_out_cmp(s, ext, a1, a2, c2);
2129 /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */
2130 tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR,
2131 TCG_REG_XZR, tcg_invert_cond(args[3]));
2134 case INDEX_op_movcond_i32:
2137 case INDEX_op_movcond_i64:
2138 tcg_out_cmp(s, ext, a1, a2, c2);
2139 tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]);
2142 case INDEX_op_qemu_ld_i32:
2143 case INDEX_op_qemu_ld_i64:
2144 tcg_out_qemu_ld(s, a0, a1, a2, ext);
2146 case INDEX_op_qemu_st_i32:
2147 case INDEX_op_qemu_st_i64:
2148 tcg_out_qemu_st(s, REG0(0), a1, a2);
2151 case INDEX_op_bswap64_i64:
2152 tcg_out_rev(s, TCG_TYPE_I64, MO_64, a0, a1);
2154 case INDEX_op_bswap32_i64:
2155 tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
2156 if (a2 & TCG_BSWAP_OS) {
2157 tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a0);
2160 case INDEX_op_bswap32_i32:
2161 tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
2163 case INDEX_op_bswap16_i64:
2164 case INDEX_op_bswap16_i32:
2165 tcg_out_rev(s, TCG_TYPE_I32, MO_16, a0, a1);
2166 if (a2 & TCG_BSWAP_OS) {
2167 /* Output must be sign-extended. */
2168 tcg_out_sxt(s, ext, MO_16, a0, a0);
2169 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
2170 /* Output must be zero-extended, but input isn't. */
2171 tcg_out_uxt(s, MO_16, a0, a0);
2175 case INDEX_op_ext8s_i64:
2176 case INDEX_op_ext8s_i32:
2177 tcg_out_sxt(s, ext, MO_8, a0, a1);
2179 case INDEX_op_ext16s_i64:
2180 case INDEX_op_ext16s_i32:
2181 tcg_out_sxt(s, ext, MO_16, a0, a1);
2183 case INDEX_op_ext_i32_i64:
2184 case INDEX_op_ext32s_i64:
2185 tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1);
2187 case INDEX_op_ext8u_i64:
2188 case INDEX_op_ext8u_i32:
2189 tcg_out_uxt(s, MO_8, a0, a1);
2191 case INDEX_op_ext16u_i64:
2192 case INDEX_op_ext16u_i32:
2193 tcg_out_uxt(s, MO_16, a0, a1);
2195 case INDEX_op_extu_i32_i64:
2196 case INDEX_op_ext32u_i64:
2197 tcg_out_movr(s, TCG_TYPE_I32, a0, a1);
2200 case INDEX_op_deposit_i64:
2201 case INDEX_op_deposit_i32:
2202 tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]);
2205 case INDEX_op_extract_i64:
2206 case INDEX_op_extract_i32:
2207 tcg_out_ubfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
2210 case INDEX_op_sextract_i64:
2211 case INDEX_op_sextract_i32:
2212 tcg_out_sbfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
2215 case INDEX_op_extract2_i64:
2216 case INDEX_op_extract2_i32:
2217 tcg_out_extr(s, ext, a0, REG0(2), REG0(1), args[3]);
2220 case INDEX_op_add2_i32:
2221 tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
2222 (int32_t)args[4], args[5], const_args[4],
2223 const_args[5], false);
2225 case INDEX_op_add2_i64:
2226 tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
2227 args[5], const_args[4], const_args[5], false);
2229 case INDEX_op_sub2_i32:
2230 tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
2231 (int32_t)args[4], args[5], const_args[4],
2232 const_args[5], true);
2234 case INDEX_op_sub2_i64:
2235 tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
2236 args[5], const_args[4], const_args[5], true);
2239 case INDEX_op_muluh_i64:
2240 tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2);
2242 case INDEX_op_mulsh_i64:
2243 tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2);
2250 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2251 case INDEX_op_mov_i64:
2252 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2254 g_assert_not_reached();
2260 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2261 unsigned vecl, unsigned vece,
2262 const TCGArg args[TCG_MAX_OP_ARGS],
2263 const int const_args[TCG_MAX_OP_ARGS])
2265 static const AArch64Insn cmp_vec_insn[16] = {
2266 [TCG_COND_EQ] = I3616_CMEQ,
2267 [TCG_COND_GT] = I3616_CMGT,
2268 [TCG_COND_GE] = I3616_CMGE,
2269 [TCG_COND_GTU] = I3616_CMHI,
2270 [TCG_COND_GEU] = I3616_CMHS,
2272 static const AArch64Insn cmp_scalar_insn[16] = {
2273 [TCG_COND_EQ] = I3611_CMEQ,
2274 [TCG_COND_GT] = I3611_CMGT,
2275 [TCG_COND_GE] = I3611_CMGE,
2276 [TCG_COND_GTU] = I3611_CMHI,
2277 [TCG_COND_GEU] = I3611_CMHS,
2279 static const AArch64Insn cmp0_vec_insn[16] = {
2280 [TCG_COND_EQ] = I3617_CMEQ0,
2281 [TCG_COND_GT] = I3617_CMGT0,
2282 [TCG_COND_GE] = I3617_CMGE0,
2283 [TCG_COND_LT] = I3617_CMLT0,
2284 [TCG_COND_LE] = I3617_CMLE0,
2286 static const AArch64Insn cmp0_scalar_insn[16] = {
2287 [TCG_COND_EQ] = I3612_CMEQ0,
2288 [TCG_COND_GT] = I3612_CMGT0,
2289 [TCG_COND_GE] = I3612_CMGE0,
2290 [TCG_COND_LT] = I3612_CMLT0,
2291 [TCG_COND_LE] = I3612_CMLE0,
2294 TCGType type = vecl + TCG_TYPE_V64;
2295 unsigned is_q = vecl;
2296 bool is_scalar = !is_q && vece == MO_64;
2297 TCGArg a0, a1, a2, a3;
2305 case INDEX_op_ld_vec:
2306 tcg_out_ld(s, type, a0, a1, a2);
2308 case INDEX_op_st_vec:
2309 tcg_out_st(s, type, a0, a1, a2);
2311 case INDEX_op_dupm_vec:
2312 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2314 case INDEX_op_add_vec:
2316 tcg_out_insn(s, 3611, ADD, vece, a0, a1, a2);
2318 tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2);
2321 case INDEX_op_sub_vec:
2323 tcg_out_insn(s, 3611, SUB, vece, a0, a1, a2);
2325 tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2);
2328 case INDEX_op_mul_vec:
2329 tcg_out_insn(s, 3616, MUL, is_q, vece, a0, a1, a2);
2331 case INDEX_op_neg_vec:
2333 tcg_out_insn(s, 3612, NEG, vece, a0, a1);
2335 tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1);
2338 case INDEX_op_abs_vec:
2340 tcg_out_insn(s, 3612, ABS, vece, a0, a1);
2342 tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1);
2345 case INDEX_op_and_vec:
2346 if (const_args[2]) {
2347 is_shimm1632(~a2, &cmode, &imm8);
2349 tcg_out_insn(s, 3606, BIC, is_q, a0, 0, cmode, imm8);
2352 tcg_out_insn(s, 3606, MVNI, is_q, a0, 0, cmode, imm8);
2355 tcg_out_insn(s, 3616, AND, is_q, 0, a0, a1, a2);
2357 case INDEX_op_or_vec:
2358 if (const_args[2]) {
2359 is_shimm1632(a2, &cmode, &imm8);
2361 tcg_out_insn(s, 3606, ORR, is_q, a0, 0, cmode, imm8);
2364 tcg_out_insn(s, 3606, MOVI, is_q, a0, 0, cmode, imm8);
2367 tcg_out_insn(s, 3616, ORR, is_q, 0, a0, a1, a2);
2369 case INDEX_op_andc_vec:
2370 if (const_args[2]) {
2371 is_shimm1632(a2, &cmode, &imm8);
2373 tcg_out_insn(s, 3606, BIC, is_q, a0, 0, cmode, imm8);
2376 tcg_out_insn(s, 3606, MOVI, is_q, a0, 0, cmode, imm8);
2379 tcg_out_insn(s, 3616, BIC, is_q, 0, a0, a1, a2);
2381 case INDEX_op_orc_vec:
2382 if (const_args[2]) {
2383 is_shimm1632(~a2, &cmode, &imm8);
2385 tcg_out_insn(s, 3606, ORR, is_q, a0, 0, cmode, imm8);
2388 tcg_out_insn(s, 3606, MVNI, is_q, a0, 0, cmode, imm8);
2391 tcg_out_insn(s, 3616, ORN, is_q, 0, a0, a1, a2);
2393 case INDEX_op_xor_vec:
2394 tcg_out_insn(s, 3616, EOR, is_q, 0, a0, a1, a2);
2396 case INDEX_op_ssadd_vec:
2398 tcg_out_insn(s, 3611, SQADD, vece, a0, a1, a2);
2400 tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2);
2403 case INDEX_op_sssub_vec:
2405 tcg_out_insn(s, 3611, SQSUB, vece, a0, a1, a2);
2407 tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2);
2410 case INDEX_op_usadd_vec:
2412 tcg_out_insn(s, 3611, UQADD, vece, a0, a1, a2);
2414 tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2);
2417 case INDEX_op_ussub_vec:
2419 tcg_out_insn(s, 3611, UQSUB, vece, a0, a1, a2);
2421 tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2);
2424 case INDEX_op_smax_vec:
2425 tcg_out_insn(s, 3616, SMAX, is_q, vece, a0, a1, a2);
2427 case INDEX_op_smin_vec:
2428 tcg_out_insn(s, 3616, SMIN, is_q, vece, a0, a1, a2);
2430 case INDEX_op_umax_vec:
2431 tcg_out_insn(s, 3616, UMAX, is_q, vece, a0, a1, a2);
2433 case INDEX_op_umin_vec:
2434 tcg_out_insn(s, 3616, UMIN, is_q, vece, a0, a1, a2);
2436 case INDEX_op_not_vec:
2437 tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a1);
2439 case INDEX_op_shli_vec:
2441 tcg_out_insn(s, 3609, SHL, a0, a1, a2 + (8 << vece));
2443 tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece));
2446 case INDEX_op_shri_vec:
2448 tcg_out_insn(s, 3609, USHR, a0, a1, (16 << vece) - a2);
2450 tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2);
2453 case INDEX_op_sari_vec:
2455 tcg_out_insn(s, 3609, SSHR, a0, a1, (16 << vece) - a2);
2457 tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2);
2460 case INDEX_op_aa64_sli_vec:
2462 tcg_out_insn(s, 3609, SLI, a0, a2, args[3] + (8 << vece));
2464 tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece));
2467 case INDEX_op_shlv_vec:
2469 tcg_out_insn(s, 3611, USHL, vece, a0, a1, a2);
2471 tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2);
2474 case INDEX_op_aa64_sshl_vec:
2476 tcg_out_insn(s, 3611, SSHL, vece, a0, a1, a2);
2478 tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2);
2481 case INDEX_op_cmp_vec:
2483 TCGCond cond = args[3];
2486 if (cond == TCG_COND_NE) {
2487 if (const_args[2]) {
2489 tcg_out_insn(s, 3611, CMTST, vece, a0, a1, a1);
2491 tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1);
2495 tcg_out_insn(s, 3611, CMEQ, vece, a0, a1, a2);
2497 tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2);
2499 tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a0);
2502 if (const_args[2]) {
2504 insn = cmp0_scalar_insn[cond];
2506 tcg_out_insn_3612(s, insn, vece, a0, a1);
2510 insn = cmp0_vec_insn[cond];
2512 tcg_out_insn_3617(s, insn, is_q, vece, a0, a1);
2516 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
2520 insn = cmp_scalar_insn[cond];
2523 t = a1, a1 = a2, a2 = t;
2524 cond = tcg_swap_cond(cond);
2525 insn = cmp_scalar_insn[cond];
2526 tcg_debug_assert(insn != 0);
2528 tcg_out_insn_3611(s, insn, vece, a0, a1, a2);
2530 insn = cmp_vec_insn[cond];
2533 t = a1, a1 = a2, a2 = t;
2534 cond = tcg_swap_cond(cond);
2535 insn = cmp_vec_insn[cond];
2536 tcg_debug_assert(insn != 0);
2538 tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2);
2544 case INDEX_op_bitsel_vec:
2547 tcg_out_insn(s, 3616, BIT, is_q, 0, a0, a2, a1);
2548 } else if (a0 == a2) {
2549 tcg_out_insn(s, 3616, BIF, is_q, 0, a0, a3, a1);
2552 tcg_out_mov(s, type, a0, a1);
2554 tcg_out_insn(s, 3616, BSL, is_q, 0, a0, a2, a3);
2558 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
2559 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
2561 g_assert_not_reached();
2565 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2568 case INDEX_op_add_vec:
2569 case INDEX_op_sub_vec:
2570 case INDEX_op_and_vec:
2571 case INDEX_op_or_vec:
2572 case INDEX_op_xor_vec:
2573 case INDEX_op_andc_vec:
2574 case INDEX_op_orc_vec:
2575 case INDEX_op_neg_vec:
2576 case INDEX_op_abs_vec:
2577 case INDEX_op_not_vec:
2578 case INDEX_op_cmp_vec:
2579 case INDEX_op_shli_vec:
2580 case INDEX_op_shri_vec:
2581 case INDEX_op_sari_vec:
2582 case INDEX_op_ssadd_vec:
2583 case INDEX_op_sssub_vec:
2584 case INDEX_op_usadd_vec:
2585 case INDEX_op_ussub_vec:
2586 case INDEX_op_shlv_vec:
2587 case INDEX_op_bitsel_vec:
2589 case INDEX_op_rotli_vec:
2590 case INDEX_op_shrv_vec:
2591 case INDEX_op_sarv_vec:
2592 case INDEX_op_rotlv_vec:
2593 case INDEX_op_rotrv_vec:
2595 case INDEX_op_mul_vec:
2596 case INDEX_op_smax_vec:
2597 case INDEX_op_smin_vec:
2598 case INDEX_op_umax_vec:
2599 case INDEX_op_umin_vec:
2600 return vece < MO_64;
2607 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2611 TCGv_vec v0, v1, v2, t1, t2, c1;
2615 v0 = temp_tcgv_vec(arg_temp(a0));
2616 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
2617 a2 = va_arg(va, TCGArg);
2621 case INDEX_op_rotli_vec:
2622 t1 = tcg_temp_new_vec(type);
2623 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
2624 vec_gen_4(INDEX_op_aa64_sli_vec, type, vece,
2625 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
2626 tcg_temp_free_vec(t1);
2629 case INDEX_op_shrv_vec:
2630 case INDEX_op_sarv_vec:
2631 /* Right shifts are negative left shifts for AArch64. */
2632 v2 = temp_tcgv_vec(arg_temp(a2));
2633 t1 = tcg_temp_new_vec(type);
2634 tcg_gen_neg_vec(vece, t1, v2);
2635 opc = (opc == INDEX_op_shrv_vec
2636 ? INDEX_op_shlv_vec : INDEX_op_aa64_sshl_vec);
2637 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
2638 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2639 tcg_temp_free_vec(t1);
2642 case INDEX_op_rotlv_vec:
2643 v2 = temp_tcgv_vec(arg_temp(a2));
2644 t1 = tcg_temp_new_vec(type);
2645 c1 = tcg_constant_vec(type, vece, 8 << vece);
2646 tcg_gen_sub_vec(vece, t1, v2, c1);
2647 /* Right shifts are negative left shifts for AArch64. */
2648 vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1),
2649 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2650 vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(v0),
2651 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
2652 tcg_gen_or_vec(vece, v0, v0, t1);
2653 tcg_temp_free_vec(t1);
2656 case INDEX_op_rotrv_vec:
2657 v2 = temp_tcgv_vec(arg_temp(a2));
2658 t1 = tcg_temp_new_vec(type);
2659 t2 = tcg_temp_new_vec(type);
2660 c1 = tcg_constant_vec(type, vece, 8 << vece);
2661 tcg_gen_neg_vec(vece, t1, v2);
2662 tcg_gen_sub_vec(vece, t2, c1, v2);
2663 /* Right shifts are negative left shifts for AArch64. */
2664 vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1),
2665 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2666 vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t2),
2667 tcgv_vec_arg(v1), tcgv_vec_arg(t2));
2668 tcg_gen_or_vec(vece, v0, t1, t2);
2669 tcg_temp_free_vec(t1);
2670 tcg_temp_free_vec(t2);
2674 g_assert_not_reached();
2678 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2681 case INDEX_op_goto_ptr:
2684 case INDEX_op_ld8u_i32:
2685 case INDEX_op_ld8s_i32:
2686 case INDEX_op_ld16u_i32:
2687 case INDEX_op_ld16s_i32:
2688 case INDEX_op_ld_i32:
2689 case INDEX_op_ld8u_i64:
2690 case INDEX_op_ld8s_i64:
2691 case INDEX_op_ld16u_i64:
2692 case INDEX_op_ld16s_i64:
2693 case INDEX_op_ld32u_i64:
2694 case INDEX_op_ld32s_i64:
2695 case INDEX_op_ld_i64:
2696 case INDEX_op_neg_i32:
2697 case INDEX_op_neg_i64:
2698 case INDEX_op_not_i32:
2699 case INDEX_op_not_i64:
2700 case INDEX_op_bswap16_i32:
2701 case INDEX_op_bswap32_i32:
2702 case INDEX_op_bswap16_i64:
2703 case INDEX_op_bswap32_i64:
2704 case INDEX_op_bswap64_i64:
2705 case INDEX_op_ext8s_i32:
2706 case INDEX_op_ext16s_i32:
2707 case INDEX_op_ext8u_i32:
2708 case INDEX_op_ext16u_i32:
2709 case INDEX_op_ext8s_i64:
2710 case INDEX_op_ext16s_i64:
2711 case INDEX_op_ext32s_i64:
2712 case INDEX_op_ext8u_i64:
2713 case INDEX_op_ext16u_i64:
2714 case INDEX_op_ext32u_i64:
2715 case INDEX_op_ext_i32_i64:
2716 case INDEX_op_extu_i32_i64:
2717 case INDEX_op_extract_i32:
2718 case INDEX_op_extract_i64:
2719 case INDEX_op_sextract_i32:
2720 case INDEX_op_sextract_i64:
2721 return C_O1_I1(r, r);
2723 case INDEX_op_st8_i32:
2724 case INDEX_op_st16_i32:
2725 case INDEX_op_st_i32:
2726 case INDEX_op_st8_i64:
2727 case INDEX_op_st16_i64:
2728 case INDEX_op_st32_i64:
2729 case INDEX_op_st_i64:
2730 return C_O0_I2(rZ, r);
2732 case INDEX_op_add_i32:
2733 case INDEX_op_add_i64:
2734 case INDEX_op_sub_i32:
2735 case INDEX_op_sub_i64:
2736 case INDEX_op_setcond_i32:
2737 case INDEX_op_setcond_i64:
2738 return C_O1_I2(r, r, rA);
2740 case INDEX_op_mul_i32:
2741 case INDEX_op_mul_i64:
2742 case INDEX_op_div_i32:
2743 case INDEX_op_div_i64:
2744 case INDEX_op_divu_i32:
2745 case INDEX_op_divu_i64:
2746 case INDEX_op_rem_i32:
2747 case INDEX_op_rem_i64:
2748 case INDEX_op_remu_i32:
2749 case INDEX_op_remu_i64:
2750 case INDEX_op_muluh_i64:
2751 case INDEX_op_mulsh_i64:
2752 return C_O1_I2(r, r, r);
2754 case INDEX_op_and_i32:
2755 case INDEX_op_and_i64:
2756 case INDEX_op_or_i32:
2757 case INDEX_op_or_i64:
2758 case INDEX_op_xor_i32:
2759 case INDEX_op_xor_i64:
2760 case INDEX_op_andc_i32:
2761 case INDEX_op_andc_i64:
2762 case INDEX_op_orc_i32:
2763 case INDEX_op_orc_i64:
2764 case INDEX_op_eqv_i32:
2765 case INDEX_op_eqv_i64:
2766 return C_O1_I2(r, r, rL);
2768 case INDEX_op_shl_i32:
2769 case INDEX_op_shr_i32:
2770 case INDEX_op_sar_i32:
2771 case INDEX_op_rotl_i32:
2772 case INDEX_op_rotr_i32:
2773 case INDEX_op_shl_i64:
2774 case INDEX_op_shr_i64:
2775 case INDEX_op_sar_i64:
2776 case INDEX_op_rotl_i64:
2777 case INDEX_op_rotr_i64:
2778 return C_O1_I2(r, r, ri);
2780 case INDEX_op_clz_i32:
2781 case INDEX_op_ctz_i32:
2782 case INDEX_op_clz_i64:
2783 case INDEX_op_ctz_i64:
2784 return C_O1_I2(r, r, rAL);
2786 case INDEX_op_brcond_i32:
2787 case INDEX_op_brcond_i64:
2788 return C_O0_I2(r, rA);
2790 case INDEX_op_movcond_i32:
2791 case INDEX_op_movcond_i64:
2792 return C_O1_I4(r, r, rA, rZ, rZ);
2794 case INDEX_op_qemu_ld_i32:
2795 case INDEX_op_qemu_ld_i64:
2796 return C_O1_I1(r, l);
2797 case INDEX_op_qemu_st_i32:
2798 case INDEX_op_qemu_st_i64:
2799 return C_O0_I2(lZ, l);
2801 case INDEX_op_deposit_i32:
2802 case INDEX_op_deposit_i64:
2803 return C_O1_I2(r, 0, rZ);
2805 case INDEX_op_extract2_i32:
2806 case INDEX_op_extract2_i64:
2807 return C_O1_I2(r, rZ, rZ);
2809 case INDEX_op_add2_i32:
2810 case INDEX_op_add2_i64:
2811 case INDEX_op_sub2_i32:
2812 case INDEX_op_sub2_i64:
2813 return C_O2_I4(r, r, rZ, rZ, rA, rMZ);
2815 case INDEX_op_add_vec:
2816 case INDEX_op_sub_vec:
2817 case INDEX_op_mul_vec:
2818 case INDEX_op_xor_vec:
2819 case INDEX_op_ssadd_vec:
2820 case INDEX_op_sssub_vec:
2821 case INDEX_op_usadd_vec:
2822 case INDEX_op_ussub_vec:
2823 case INDEX_op_smax_vec:
2824 case INDEX_op_smin_vec:
2825 case INDEX_op_umax_vec:
2826 case INDEX_op_umin_vec:
2827 case INDEX_op_shlv_vec:
2828 case INDEX_op_shrv_vec:
2829 case INDEX_op_sarv_vec:
2830 case INDEX_op_aa64_sshl_vec:
2831 return C_O1_I2(w, w, w);
2832 case INDEX_op_not_vec:
2833 case INDEX_op_neg_vec:
2834 case INDEX_op_abs_vec:
2835 case INDEX_op_shli_vec:
2836 case INDEX_op_shri_vec:
2837 case INDEX_op_sari_vec:
2838 return C_O1_I1(w, w);
2839 case INDEX_op_ld_vec:
2840 case INDEX_op_dupm_vec:
2841 return C_O1_I1(w, r);
2842 case INDEX_op_st_vec:
2843 return C_O0_I2(w, r);
2844 case INDEX_op_dup_vec:
2845 return C_O1_I1(w, wr);
2846 case INDEX_op_or_vec:
2847 case INDEX_op_andc_vec:
2848 return C_O1_I2(w, w, wO);
2849 case INDEX_op_and_vec:
2850 case INDEX_op_orc_vec:
2851 return C_O1_I2(w, w, wN);
2852 case INDEX_op_cmp_vec:
2853 return C_O1_I2(w, w, wZ);
2854 case INDEX_op_bitsel_vec:
2855 return C_O1_I3(w, w, w, w);
2856 case INDEX_op_aa64_sli_vec:
2857 return C_O1_I2(w, 0, w);
2860 g_assert_not_reached();
2864 static void tcg_target_init(TCGContext *s)
2866 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu;
2867 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu;
2868 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
2869 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
2871 tcg_target_call_clobber_regs = -1ull;
2872 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X19);
2873 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X20);
2874 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X21);
2875 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X22);
2876 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X23);
2877 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X24);
2878 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X25);
2879 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X26);
2880 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X27);
2881 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X28);
2882 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X29);
2883 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
2884 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
2885 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
2886 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
2887 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
2888 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
2889 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
2890 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
2892 s->reserved_regs = 0;
2893 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
2894 tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP);
2895 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2896 tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */
2897 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
2900 /* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */
2901 #define PUSH_SIZE ((30 - 19 + 1) * 8)
2903 #define FRAME_SIZE \
2905 + TCG_STATIC_CALL_ARGS_SIZE \
2906 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2907 + TCG_TARGET_STACK_ALIGN - 1) \
2908 & ~(TCG_TARGET_STACK_ALIGN - 1))
2910 /* We're expecting a 2 byte uleb128 encoded value. */
2911 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2913 /* We're expecting to use a single ADDI insn. */
2914 QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff);
2916 static void tcg_target_qemu_prologue(TCGContext *s)
2920 /* Push (FP, LR) and allocate space for all saved registers. */
2921 tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR,
2922 TCG_REG_SP, -PUSH_SIZE, 1, 1);
2924 /* Set up frame pointer for canonical unwinding. */
2925 tcg_out_movr_sp(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP);
2927 /* Store callee-preserved regs x19..x28. */
2928 for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) {
2929 int ofs = (r - TCG_REG_X19 + 2) * 8;
2930 tcg_out_insn(s, 3314, STP, r, r + 1, TCG_REG_SP, ofs, 1, 0);
2933 /* Make stack space for TCG locals. */
2934 tcg_out_insn(s, 3401, SUBI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
2935 FRAME_SIZE - PUSH_SIZE);
2937 /* Inform TCG about how to find TCG locals with register, offset, size. */
2938 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE,
2939 CPU_TEMP_BUF_NLONGS * sizeof(long));
2941 #if !defined(CONFIG_SOFTMMU)
2942 if (USE_GUEST_BASE) {
2943 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
2944 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
2948 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2949 tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]);
2952 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2953 * and fall through to the rest of the epilogue.
2955 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2956 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_X0, 0);
2959 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2961 /* Remove TCG locals stack space. */
2962 tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
2963 FRAME_SIZE - PUSH_SIZE);
2965 /* Restore registers x19..x28. */
2966 for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) {
2967 int ofs = (r - TCG_REG_X19 + 2) * 8;
2968 tcg_out_insn(s, 3314, LDP, r, r + 1, TCG_REG_SP, ofs, 1, 0);
2971 /* Pop (FP, LR), restore SP to previous frame. */
2972 tcg_out_insn(s, 3314, LDP, TCG_REG_FP, TCG_REG_LR,
2973 TCG_REG_SP, PUSH_SIZE, 0, 1);
2974 tcg_out_insn(s, 3207, RET, TCG_REG_LR);
2977 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2980 for (i = 0; i < count; ++i) {
2987 uint8_t fde_def_cfa[4];
2988 uint8_t fde_reg_ofs[24];
2991 #define ELF_HOST_MACHINE EM_AARCH64
2993 static const DebugFrame debug_frame = {
2994 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2997 .h.cie.code_align = 1,
2998 .h.cie.data_align = 0x78, /* sleb128 -8 */
2999 .h.cie.return_column = TCG_REG_LR,
3001 /* Total FDE size does not include the "len" member. */
3002 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3005 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
3006 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3010 0x80 + 28, 1, /* DW_CFA_offset, x28, -8 */
3011 0x80 + 27, 2, /* DW_CFA_offset, x27, -16 */
3012 0x80 + 26, 3, /* DW_CFA_offset, x26, -24 */
3013 0x80 + 25, 4, /* DW_CFA_offset, x25, -32 */
3014 0x80 + 24, 5, /* DW_CFA_offset, x24, -40 */
3015 0x80 + 23, 6, /* DW_CFA_offset, x23, -48 */
3016 0x80 + 22, 7, /* DW_CFA_offset, x22, -56 */
3017 0x80 + 21, 8, /* DW_CFA_offset, x21, -64 */
3018 0x80 + 20, 9, /* DW_CFA_offset, x20, -72 */
3019 0x80 + 19, 10, /* DW_CFA_offset, x1p, -80 */
3020 0x80 + 30, 11, /* DW_CFA_offset, lr, -88 */
3021 0x80 + 29, 12, /* DW_CFA_offset, fp, -96 */
3025 void tcg_register_jit(const void *buf, size_t buf_size)
3027 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));