tcg/aarch64: Make direct jump patching thread-safe
[qemu/ar7.git] / tcg / aarch64 / tcg-target.inc.c
blob88183c830fb0c26ae6dd726036254cf10d257ff5
1 /*
2 * Initial TCG Implementation for aarch64
4 * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH
5 * Written by Claudio Fontana
7 * This work is licensed under the terms of the GNU GPL, version 2 or
8 * (at your option) any later version.
10 * See the COPYING file in the top-level directory for details.
13 #include "tcg-be-ldst.h"
14 #include "qemu/bitops.h"
16 /* We're going to re-use TCGType in setting of the SF bit, which controls
17 the size of the operation performed. If we know the values match, it
18 makes things much cleaner. */
19 QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1);
21 #ifdef CONFIG_DEBUG_TCG
22 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
23 "%x0", "%x1", "%x2", "%x3", "%x4", "%x5", "%x6", "%x7",
24 "%x8", "%x9", "%x10", "%x11", "%x12", "%x13", "%x14", "%x15",
25 "%x16", "%x17", "%x18", "%x19", "%x20", "%x21", "%x22", "%x23",
26 "%x24", "%x25", "%x26", "%x27", "%x28", "%fp", "%x30", "%sp",
28 #endif /* CONFIG_DEBUG_TCG */
30 static const int tcg_target_reg_alloc_order[] = {
31 TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23,
32 TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27,
33 TCG_REG_X28, /* we will reserve this for guest_base if configured */
35 TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11,
36 TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15,
37 TCG_REG_X16, TCG_REG_X17,
39 TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
40 TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7,
42 /* X18 reserved by system */
43 /* X19 reserved for AREG0 */
44 /* X29 reserved as fp */
45 /* X30 reserved as temporary */
48 static const int tcg_target_call_iarg_regs[8] = {
49 TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
50 TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7
52 static const int tcg_target_call_oarg_regs[1] = {
53 TCG_REG_X0
56 #define TCG_REG_TMP TCG_REG_X30
58 #ifndef CONFIG_SOFTMMU
59 /* Note that XZR cannot be encoded in the address base register slot,
60 as that actaully encodes SP. So if we need to zero-extend the guest
61 address, via the address index register slot, we need to load even
62 a zero guest base into a register. */
63 #define USE_GUEST_BASE (guest_base != 0 || TARGET_LONG_BITS == 32)
64 #define TCG_REG_GUEST_BASE TCG_REG_X28
65 #endif
67 static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
69 ptrdiff_t offset = target - code_ptr;
70 tcg_debug_assert(offset == sextract64(offset, 0, 26));
71 /* read instruction, mask away previous PC_REL26 parameter contents,
72 set the proper offset, then write back the instruction. */
73 *code_ptr = deposit32(*code_ptr, 0, 26, offset);
76 static inline void reloc_pc26_atomic(tcg_insn_unit *code_ptr,
77 tcg_insn_unit *target)
79 ptrdiff_t offset = target - code_ptr;
80 tcg_insn_unit insn;
81 tcg_debug_assert(offset == sextract64(offset, 0, 26));
82 /* read instruction, mask away previous PC_REL26 parameter contents,
83 set the proper offset, then write back the instruction. */
84 insn = atomic_read(code_ptr);
85 atomic_set(code_ptr, deposit32(insn, 0, 26, offset));
88 static inline void reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
90 ptrdiff_t offset = target - code_ptr;
91 tcg_debug_assert(offset == sextract64(offset, 0, 19));
92 *code_ptr = deposit32(*code_ptr, 5, 19, offset);
95 static inline void patch_reloc(tcg_insn_unit *code_ptr, int type,
96 intptr_t value, intptr_t addend)
98 tcg_debug_assert(addend == 0);
99 switch (type) {
100 case R_AARCH64_JUMP26:
101 case R_AARCH64_CALL26:
102 reloc_pc26(code_ptr, (tcg_insn_unit *)value);
103 break;
104 case R_AARCH64_CONDBR19:
105 reloc_pc19(code_ptr, (tcg_insn_unit *)value);
106 break;
107 default:
108 tcg_abort();
112 #define TCG_CT_CONST_AIMM 0x100
113 #define TCG_CT_CONST_LIMM 0x200
114 #define TCG_CT_CONST_ZERO 0x400
115 #define TCG_CT_CONST_MONE 0x800
117 /* parse target specific constraints */
118 static int target_parse_constraint(TCGArgConstraint *ct,
119 const char **pct_str)
121 const char *ct_str = *pct_str;
123 switch (ct_str[0]) {
124 case 'r':
125 ct->ct |= TCG_CT_REG;
126 tcg_regset_set32(ct->u.regs, 0, (1ULL << TCG_TARGET_NB_REGS) - 1);
127 break;
128 case 'l': /* qemu_ld / qemu_st address, data_reg */
129 ct->ct |= TCG_CT_REG;
130 tcg_regset_set32(ct->u.regs, 0, (1ULL << TCG_TARGET_NB_REGS) - 1);
131 #ifdef CONFIG_SOFTMMU
132 /* x0 and x1 will be overwritten when reading the tlb entry,
133 and x2, and x3 for helper args, better to avoid using them. */
134 tcg_regset_reset_reg(ct->u.regs, TCG_REG_X0);
135 tcg_regset_reset_reg(ct->u.regs, TCG_REG_X1);
136 tcg_regset_reset_reg(ct->u.regs, TCG_REG_X2);
137 tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3);
138 #endif
139 break;
140 case 'A': /* Valid for arithmetic immediate (positive or negative). */
141 ct->ct |= TCG_CT_CONST_AIMM;
142 break;
143 case 'L': /* Valid for logical immediate. */
144 ct->ct |= TCG_CT_CONST_LIMM;
145 break;
146 case 'M': /* minus one */
147 ct->ct |= TCG_CT_CONST_MONE;
148 break;
149 case 'Z': /* zero */
150 ct->ct |= TCG_CT_CONST_ZERO;
151 break;
152 default:
153 return -1;
156 ct_str++;
157 *pct_str = ct_str;
158 return 0;
161 static inline bool is_aimm(uint64_t val)
163 return (val & ~0xfff) == 0 || (val & ~0xfff000) == 0;
166 static inline bool is_limm(uint64_t val)
168 /* Taking a simplified view of the logical immediates for now, ignoring
169 the replication that can happen across the field. Match bit patterns
170 of the forms
171 0....01....1
172 0..01..10..0
173 and their inverses. */
175 /* Make things easier below, by testing the form with msb clear. */
176 if ((int64_t)val < 0) {
177 val = ~val;
179 if (val == 0) {
180 return false;
182 val += val & -val;
183 return (val & (val - 1)) == 0;
186 static int tcg_target_const_match(tcg_target_long val, TCGType type,
187 const TCGArgConstraint *arg_ct)
189 int ct = arg_ct->ct;
191 if (ct & TCG_CT_CONST) {
192 return 1;
194 if (type == TCG_TYPE_I32) {
195 val = (int32_t)val;
197 if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) {
198 return 1;
200 if ((ct & TCG_CT_CONST_LIMM) && is_limm(val)) {
201 return 1;
203 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
204 return 1;
206 if ((ct & TCG_CT_CONST_MONE) && val == -1) {
207 return 1;
210 return 0;
213 enum aarch64_cond_code {
214 COND_EQ = 0x0,
215 COND_NE = 0x1,
216 COND_CS = 0x2, /* Unsigned greater or equal */
217 COND_HS = COND_CS, /* ALIAS greater or equal */
218 COND_CC = 0x3, /* Unsigned less than */
219 COND_LO = COND_CC, /* ALIAS Lower */
220 COND_MI = 0x4, /* Negative */
221 COND_PL = 0x5, /* Zero or greater */
222 COND_VS = 0x6, /* Overflow */
223 COND_VC = 0x7, /* No overflow */
224 COND_HI = 0x8, /* Unsigned greater than */
225 COND_LS = 0x9, /* Unsigned less or equal */
226 COND_GE = 0xa,
227 COND_LT = 0xb,
228 COND_GT = 0xc,
229 COND_LE = 0xd,
230 COND_AL = 0xe,
231 COND_NV = 0xf, /* behaves like COND_AL here */
234 static const enum aarch64_cond_code tcg_cond_to_aarch64[] = {
235 [TCG_COND_EQ] = COND_EQ,
236 [TCG_COND_NE] = COND_NE,
237 [TCG_COND_LT] = COND_LT,
238 [TCG_COND_GE] = COND_GE,
239 [TCG_COND_LE] = COND_LE,
240 [TCG_COND_GT] = COND_GT,
241 /* unsigned */
242 [TCG_COND_LTU] = COND_LO,
243 [TCG_COND_GTU] = COND_HI,
244 [TCG_COND_GEU] = COND_HS,
245 [TCG_COND_LEU] = COND_LS,
248 typedef enum {
249 LDST_ST = 0, /* store */
250 LDST_LD = 1, /* load */
251 LDST_LD_S_X = 2, /* load and sign-extend into Xt */
252 LDST_LD_S_W = 3, /* load and sign-extend into Wt */
253 } AArch64LdstType;
255 /* We encode the format of the insn into the beginning of the name, so that
256 we can have the preprocessor help "typecheck" the insn vs the output
257 function. Arm didn't provide us with nice names for the formats, so we
258 use the section number of the architecture reference manual in which the
259 instruction group is described. */
260 typedef enum {
261 /* Compare and branch (immediate). */
262 I3201_CBZ = 0x34000000,
263 I3201_CBNZ = 0x35000000,
265 /* Conditional branch (immediate). */
266 I3202_B_C = 0x54000000,
268 /* Unconditional branch (immediate). */
269 I3206_B = 0x14000000,
270 I3206_BL = 0x94000000,
272 /* Unconditional branch (register). */
273 I3207_BR = 0xd61f0000,
274 I3207_BLR = 0xd63f0000,
275 I3207_RET = 0xd65f0000,
277 /* Load/store register. Described here as 3.3.12, but the helper
278 that emits them can transform to 3.3.10 or 3.3.13. */
279 I3312_STRB = 0x38000000 | LDST_ST << 22 | MO_8 << 30,
280 I3312_STRH = 0x38000000 | LDST_ST << 22 | MO_16 << 30,
281 I3312_STRW = 0x38000000 | LDST_ST << 22 | MO_32 << 30,
282 I3312_STRX = 0x38000000 | LDST_ST << 22 | MO_64 << 30,
284 I3312_LDRB = 0x38000000 | LDST_LD << 22 | MO_8 << 30,
285 I3312_LDRH = 0x38000000 | LDST_LD << 22 | MO_16 << 30,
286 I3312_LDRW = 0x38000000 | LDST_LD << 22 | MO_32 << 30,
287 I3312_LDRX = 0x38000000 | LDST_LD << 22 | MO_64 << 30,
289 I3312_LDRSBW = 0x38000000 | LDST_LD_S_W << 22 | MO_8 << 30,
290 I3312_LDRSHW = 0x38000000 | LDST_LD_S_W << 22 | MO_16 << 30,
292 I3312_LDRSBX = 0x38000000 | LDST_LD_S_X << 22 | MO_8 << 30,
293 I3312_LDRSHX = 0x38000000 | LDST_LD_S_X << 22 | MO_16 << 30,
294 I3312_LDRSWX = 0x38000000 | LDST_LD_S_X << 22 | MO_32 << 30,
296 I3312_TO_I3310 = 0x00200800,
297 I3312_TO_I3313 = 0x01000000,
299 /* Load/store register pair instructions. */
300 I3314_LDP = 0x28400000,
301 I3314_STP = 0x28000000,
303 /* Add/subtract immediate instructions. */
304 I3401_ADDI = 0x11000000,
305 I3401_ADDSI = 0x31000000,
306 I3401_SUBI = 0x51000000,
307 I3401_SUBSI = 0x71000000,
309 /* Bitfield instructions. */
310 I3402_BFM = 0x33000000,
311 I3402_SBFM = 0x13000000,
312 I3402_UBFM = 0x53000000,
314 /* Extract instruction. */
315 I3403_EXTR = 0x13800000,
317 /* Logical immediate instructions. */
318 I3404_ANDI = 0x12000000,
319 I3404_ORRI = 0x32000000,
320 I3404_EORI = 0x52000000,
322 /* Move wide immediate instructions. */
323 I3405_MOVN = 0x12800000,
324 I3405_MOVZ = 0x52800000,
325 I3405_MOVK = 0x72800000,
327 /* PC relative addressing instructions. */
328 I3406_ADR = 0x10000000,
329 I3406_ADRP = 0x90000000,
331 /* Add/subtract shifted register instructions (without a shift). */
332 I3502_ADD = 0x0b000000,
333 I3502_ADDS = 0x2b000000,
334 I3502_SUB = 0x4b000000,
335 I3502_SUBS = 0x6b000000,
337 /* Add/subtract shifted register instructions (with a shift). */
338 I3502S_ADD_LSL = I3502_ADD,
340 /* Add/subtract with carry instructions. */
341 I3503_ADC = 0x1a000000,
342 I3503_SBC = 0x5a000000,
344 /* Conditional select instructions. */
345 I3506_CSEL = 0x1a800000,
346 I3506_CSINC = 0x1a800400,
348 /* Data-processing (1 source) instructions. */
349 I3507_REV16 = 0x5ac00400,
350 I3507_REV32 = 0x5ac00800,
351 I3507_REV64 = 0x5ac00c00,
353 /* Data-processing (2 source) instructions. */
354 I3508_LSLV = 0x1ac02000,
355 I3508_LSRV = 0x1ac02400,
356 I3508_ASRV = 0x1ac02800,
357 I3508_RORV = 0x1ac02c00,
358 I3508_SMULH = 0x9b407c00,
359 I3508_UMULH = 0x9bc07c00,
360 I3508_UDIV = 0x1ac00800,
361 I3508_SDIV = 0x1ac00c00,
363 /* Data-processing (3 source) instructions. */
364 I3509_MADD = 0x1b000000,
365 I3509_MSUB = 0x1b008000,
367 /* Logical shifted register instructions (without a shift). */
368 I3510_AND = 0x0a000000,
369 I3510_BIC = 0x0a200000,
370 I3510_ORR = 0x2a000000,
371 I3510_ORN = 0x2a200000,
372 I3510_EOR = 0x4a000000,
373 I3510_EON = 0x4a200000,
374 I3510_ANDS = 0x6a000000,
375 } AArch64Insn;
377 static inline uint32_t tcg_in32(TCGContext *s)
379 uint32_t v = *(uint32_t *)s->code_ptr;
380 return v;
383 /* Emit an opcode with "type-checking" of the format. */
384 #define tcg_out_insn(S, FMT, OP, ...) \
385 glue(tcg_out_insn_,FMT)(S, glue(glue(glue(I,FMT),_),OP), ## __VA_ARGS__)
387 static void tcg_out_insn_3201(TCGContext *s, AArch64Insn insn, TCGType ext,
388 TCGReg rt, int imm19)
390 tcg_out32(s, insn | ext << 31 | (imm19 & 0x7ffff) << 5 | rt);
393 static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn,
394 TCGCond c, int imm19)
396 tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5);
399 static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26)
401 tcg_out32(s, insn | (imm26 & 0x03ffffff));
404 static void tcg_out_insn_3207(TCGContext *s, AArch64Insn insn, TCGReg rn)
406 tcg_out32(s, insn | rn << 5);
409 static void tcg_out_insn_3314(TCGContext *s, AArch64Insn insn,
410 TCGReg r1, TCGReg r2, TCGReg rn,
411 tcg_target_long ofs, bool pre, bool w)
413 insn |= 1u << 31; /* ext */
414 insn |= pre << 24;
415 insn |= w << 23;
417 tcg_debug_assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0);
418 insn |= (ofs & (0x7f << 3)) << (15 - 3);
420 tcg_out32(s, insn | r2 << 10 | rn << 5 | r1);
423 static void tcg_out_insn_3401(TCGContext *s, AArch64Insn insn, TCGType ext,
424 TCGReg rd, TCGReg rn, uint64_t aimm)
426 if (aimm > 0xfff) {
427 tcg_debug_assert((aimm & 0xfff) == 0);
428 aimm >>= 12;
429 tcg_debug_assert(aimm <= 0xfff);
430 aimm |= 1 << 12; /* apply LSL 12 */
432 tcg_out32(s, insn | ext << 31 | aimm << 10 | rn << 5 | rd);
435 /* This function can be used for both 3.4.2 (Bitfield) and 3.4.4
436 (Logical immediate). Both insn groups have N, IMMR and IMMS fields
437 that feed the DecodeBitMasks pseudo function. */
438 static void tcg_out_insn_3402(TCGContext *s, AArch64Insn insn, TCGType ext,
439 TCGReg rd, TCGReg rn, int n, int immr, int imms)
441 tcg_out32(s, insn | ext << 31 | n << 22 | immr << 16 | imms << 10
442 | rn << 5 | rd);
445 #define tcg_out_insn_3404 tcg_out_insn_3402
447 static void tcg_out_insn_3403(TCGContext *s, AArch64Insn insn, TCGType ext,
448 TCGReg rd, TCGReg rn, TCGReg rm, int imms)
450 tcg_out32(s, insn | ext << 31 | ext << 22 | rm << 16 | imms << 10
451 | rn << 5 | rd);
454 /* This function is used for the Move (wide immediate) instruction group.
455 Note that SHIFT is a full shift count, not the 2 bit HW field. */
456 static void tcg_out_insn_3405(TCGContext *s, AArch64Insn insn, TCGType ext,
457 TCGReg rd, uint16_t half, unsigned shift)
459 tcg_debug_assert((shift & ~0x30) == 0);
460 tcg_out32(s, insn | ext << 31 | shift << (21 - 4) | half << 5 | rd);
463 static void tcg_out_insn_3406(TCGContext *s, AArch64Insn insn,
464 TCGReg rd, int64_t disp)
466 tcg_out32(s, insn | (disp & 3) << 29 | (disp & 0x1ffffc) << (5 - 2) | rd);
469 /* This function is for both 3.5.2 (Add/Subtract shifted register), for
470 the rare occasion when we actually want to supply a shift amount. */
471 static inline void tcg_out_insn_3502S(TCGContext *s, AArch64Insn insn,
472 TCGType ext, TCGReg rd, TCGReg rn,
473 TCGReg rm, int imm6)
475 tcg_out32(s, insn | ext << 31 | rm << 16 | imm6 << 10 | rn << 5 | rd);
478 /* This function is for 3.5.2 (Add/subtract shifted register),
479 and 3.5.10 (Logical shifted register), for the vast majorty of cases
480 when we don't want to apply a shift. Thus it can also be used for
481 3.5.3 (Add/subtract with carry) and 3.5.8 (Data processing 2 source). */
482 static void tcg_out_insn_3502(TCGContext *s, AArch64Insn insn, TCGType ext,
483 TCGReg rd, TCGReg rn, TCGReg rm)
485 tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd);
488 #define tcg_out_insn_3503 tcg_out_insn_3502
489 #define tcg_out_insn_3508 tcg_out_insn_3502
490 #define tcg_out_insn_3510 tcg_out_insn_3502
492 static void tcg_out_insn_3506(TCGContext *s, AArch64Insn insn, TCGType ext,
493 TCGReg rd, TCGReg rn, TCGReg rm, TCGCond c)
495 tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd
496 | tcg_cond_to_aarch64[c] << 12);
499 static void tcg_out_insn_3507(TCGContext *s, AArch64Insn insn, TCGType ext,
500 TCGReg rd, TCGReg rn)
502 tcg_out32(s, insn | ext << 31 | rn << 5 | rd);
505 static void tcg_out_insn_3509(TCGContext *s, AArch64Insn insn, TCGType ext,
506 TCGReg rd, TCGReg rn, TCGReg rm, TCGReg ra)
508 tcg_out32(s, insn | ext << 31 | rm << 16 | ra << 10 | rn << 5 | rd);
511 static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn,
512 TCGReg rd, TCGReg base, TCGType ext,
513 TCGReg regoff)
515 /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */
516 tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 |
517 0x4000 | ext << 13 | base << 5 | rd);
520 static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn,
521 TCGReg rd, TCGReg rn, intptr_t offset)
523 tcg_out32(s, insn | (offset & 0x1ff) << 12 | rn << 5 | rd);
526 static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn,
527 TCGReg rd, TCGReg rn, uintptr_t scaled_uimm)
529 /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */
530 tcg_out32(s, insn | I3312_TO_I3313 | scaled_uimm << 10 | rn << 5 | rd);
533 /* Register to register move using ORR (shifted register with no shift). */
534 static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm)
536 tcg_out_insn(s, 3510, ORR, ext, rd, TCG_REG_XZR, rm);
539 /* Register to register move using ADDI (move to/from SP). */
540 static void tcg_out_movr_sp(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn)
542 tcg_out_insn(s, 3401, ADDI, ext, rd, rn, 0);
545 /* This function is used for the Logical (immediate) instruction group.
546 The value of LIMM must satisfy IS_LIMM. See the comment above about
547 only supporting simplified logical immediates. */
548 static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext,
549 TCGReg rd, TCGReg rn, uint64_t limm)
551 unsigned h, l, r, c;
553 tcg_debug_assert(is_limm(limm));
555 h = clz64(limm);
556 l = ctz64(limm);
557 if (l == 0) {
558 r = 0; /* form 0....01....1 */
559 c = ctz64(~limm) - 1;
560 if (h == 0) {
561 r = clz64(~limm); /* form 1..10..01..1 */
562 c += r;
564 } else {
565 r = 64 - l; /* form 1....10....0 or 0..01..10..0 */
566 c = r - h - 1;
568 if (ext == TCG_TYPE_I32) {
569 r &= 31;
570 c &= 31;
573 tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c);
576 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
577 tcg_target_long value)
579 AArch64Insn insn;
580 int i, wantinv, shift;
581 tcg_target_long svalue = value;
582 tcg_target_long ivalue = ~value;
583 tcg_target_long imask;
585 /* For 32-bit values, discard potential garbage in value. For 64-bit
586 values within [2**31, 2**32-1], we can create smaller sequences by
587 interpreting this as a negative 32-bit number, while ensuring that
588 the high 32 bits are cleared by setting SF=0. */
589 if (type == TCG_TYPE_I32 || (value & ~0xffffffffull) == 0) {
590 svalue = (int32_t)value;
591 value = (uint32_t)value;
592 ivalue = (uint32_t)ivalue;
593 type = TCG_TYPE_I32;
596 /* Speed things up by handling the common case of small positive
597 and negative values specially. */
598 if ((value & ~0xffffull) == 0) {
599 tcg_out_insn(s, 3405, MOVZ, type, rd, value, 0);
600 return;
601 } else if ((ivalue & ~0xffffull) == 0) {
602 tcg_out_insn(s, 3405, MOVN, type, rd, ivalue, 0);
603 return;
606 /* Check for bitfield immediates. For the benefit of 32-bit quantities,
607 use the sign-extended value. That lets us match rotated values such
608 as 0xff0000ff with the same 64-bit logic matching 0xffffffffff0000ff. */
609 if (is_limm(svalue)) {
610 tcg_out_logicali(s, I3404_ORRI, type, rd, TCG_REG_XZR, svalue);
611 return;
614 /* Look for host pointer values within 4G of the PC. This happens
615 often when loading pointers to QEMU's own data structures. */
616 if (type == TCG_TYPE_I64) {
617 tcg_target_long disp = (value >> 12) - ((intptr_t)s->code_ptr >> 12);
618 if (disp == sextract64(disp, 0, 21)) {
619 tcg_out_insn(s, 3406, ADRP, rd, disp);
620 if (value & 0xfff) {
621 tcg_out_insn(s, 3401, ADDI, type, rd, rd, value & 0xfff);
623 return;
627 /* Would it take fewer insns to begin with MOVN? For the value and its
628 inverse, count the number of 16-bit lanes that are 0. */
629 for (i = wantinv = imask = 0; i < 64; i += 16) {
630 tcg_target_long mask = 0xffffull << i;
631 if ((value & mask) == 0) {
632 wantinv -= 1;
634 if ((ivalue & mask) == 0) {
635 wantinv += 1;
636 imask |= mask;
640 /* If we had more 0xffff than 0x0000, invert VALUE and use MOVN. */
641 insn = I3405_MOVZ;
642 if (wantinv > 0) {
643 value = ivalue;
644 insn = I3405_MOVN;
647 /* Find the lowest lane that is not 0x0000. */
648 shift = ctz64(value) & (63 & -16);
649 tcg_out_insn_3405(s, insn, type, rd, value >> shift, shift);
651 if (wantinv > 0) {
652 /* Re-invert the value, so MOVK sees non-inverted bits. */
653 value = ~value;
654 /* Clear out all the 0xffff lanes. */
655 value ^= imask;
657 /* Clear out the lane that we just set. */
658 value &= ~(0xffffUL << shift);
660 /* Iterate until all lanes have been set, and thus cleared from VALUE. */
661 while (value) {
662 shift = ctz64(value) & (63 & -16);
663 tcg_out_insn(s, 3405, MOVK, type, rd, value >> shift, shift);
664 value &= ~(0xffffUL << shift);
668 /* Define something more legible for general use. */
669 #define tcg_out_ldst_r tcg_out_insn_3310
671 static void tcg_out_ldst(TCGContext *s, AArch64Insn insn,
672 TCGReg rd, TCGReg rn, intptr_t offset)
674 TCGMemOp size = (uint32_t)insn >> 30;
676 /* If the offset is naturally aligned and in range, then we can
677 use the scaled uimm12 encoding */
678 if (offset >= 0 && !(offset & ((1 << size) - 1))) {
679 uintptr_t scaled_uimm = offset >> size;
680 if (scaled_uimm <= 0xfff) {
681 tcg_out_insn_3313(s, insn, rd, rn, scaled_uimm);
682 return;
686 /* Small signed offsets can use the unscaled encoding. */
687 if (offset >= -256 && offset < 256) {
688 tcg_out_insn_3312(s, insn, rd, rn, offset);
689 return;
692 /* Worst-case scenario, move offset to temp register, use reg offset. */
693 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
694 tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP);
697 static inline void tcg_out_mov(TCGContext *s,
698 TCGType type, TCGReg ret, TCGReg arg)
700 if (ret != arg) {
701 tcg_out_movr(s, type, ret, arg);
705 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
706 TCGReg arg1, intptr_t arg2)
708 tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_LDRW : I3312_LDRX,
709 arg, arg1, arg2);
712 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
713 TCGReg arg1, intptr_t arg2)
715 tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_STRW : I3312_STRX,
716 arg, arg1, arg2);
719 static inline void tcg_out_bfm(TCGContext *s, TCGType ext, TCGReg rd,
720 TCGReg rn, unsigned int a, unsigned int b)
722 tcg_out_insn(s, 3402, BFM, ext, rd, rn, ext, a, b);
725 static inline void tcg_out_ubfm(TCGContext *s, TCGType ext, TCGReg rd,
726 TCGReg rn, unsigned int a, unsigned int b)
728 tcg_out_insn(s, 3402, UBFM, ext, rd, rn, ext, a, b);
731 static inline void tcg_out_sbfm(TCGContext *s, TCGType ext, TCGReg rd,
732 TCGReg rn, unsigned int a, unsigned int b)
734 tcg_out_insn(s, 3402, SBFM, ext, rd, rn, ext, a, b);
737 static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd,
738 TCGReg rn, TCGReg rm, unsigned int a)
740 tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a);
743 static inline void tcg_out_shl(TCGContext *s, TCGType ext,
744 TCGReg rd, TCGReg rn, unsigned int m)
746 int bits = ext ? 64 : 32;
747 int max = bits - 1;
748 tcg_out_ubfm(s, ext, rd, rn, bits - (m & max), max - (m & max));
751 static inline void tcg_out_shr(TCGContext *s, TCGType ext,
752 TCGReg rd, TCGReg rn, unsigned int m)
754 int max = ext ? 63 : 31;
755 tcg_out_ubfm(s, ext, rd, rn, m & max, max);
758 static inline void tcg_out_sar(TCGContext *s, TCGType ext,
759 TCGReg rd, TCGReg rn, unsigned int m)
761 int max = ext ? 63 : 31;
762 tcg_out_sbfm(s, ext, rd, rn, m & max, max);
765 static inline void tcg_out_rotr(TCGContext *s, TCGType ext,
766 TCGReg rd, TCGReg rn, unsigned int m)
768 int max = ext ? 63 : 31;
769 tcg_out_extr(s, ext, rd, rn, rn, m & max);
772 static inline void tcg_out_rotl(TCGContext *s, TCGType ext,
773 TCGReg rd, TCGReg rn, unsigned int m)
775 int bits = ext ? 64 : 32;
776 int max = bits - 1;
777 tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max));
780 static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd,
781 TCGReg rn, unsigned lsb, unsigned width)
783 unsigned size = ext ? 64 : 32;
784 unsigned a = (size - lsb) & (size - 1);
785 unsigned b = width - 1;
786 tcg_out_bfm(s, ext, rd, rn, a, b);
789 static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
790 tcg_target_long b, bool const_b)
792 if (const_b) {
793 /* Using CMP or CMN aliases. */
794 if (b >= 0) {
795 tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
796 } else {
797 tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
799 } else {
800 /* Using CMP alias SUBS wzr, Wn, Wm */
801 tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
805 static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target)
807 ptrdiff_t offset = target - s->code_ptr;
808 tcg_debug_assert(offset == sextract64(offset, 0, 26));
809 tcg_out_insn(s, 3206, B, offset);
812 static inline void tcg_out_goto_noaddr(TCGContext *s)
814 /* We pay attention here to not modify the branch target by reading from
815 the buffer. This ensure that caches and memory are kept coherent during
816 retranslation. Mask away possible garbage in the high bits for the
817 first translation, while keeping the offset bits for retranslation. */
818 uint32_t old = tcg_in32(s);
819 tcg_out_insn(s, 3206, B, old);
822 static inline void tcg_out_goto_cond_noaddr(TCGContext *s, TCGCond c)
824 /* See comments in tcg_out_goto_noaddr. */
825 uint32_t old = tcg_in32(s) >> 5;
826 tcg_out_insn(s, 3202, B_C, c, old);
829 static inline void tcg_out_callr(TCGContext *s, TCGReg reg)
831 tcg_out_insn(s, 3207, BLR, reg);
834 static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
836 ptrdiff_t offset = target - s->code_ptr;
837 if (offset == sextract64(offset, 0, 26)) {
838 tcg_out_insn(s, 3206, BL, offset);
839 } else {
840 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target);
841 tcg_out_callr(s, TCG_REG_TMP);
845 void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
847 tcg_insn_unit *code_ptr = (tcg_insn_unit *)jmp_addr;
848 tcg_insn_unit *target = (tcg_insn_unit *)addr;
850 reloc_pc26_atomic(code_ptr, target);
851 flush_icache_range(jmp_addr, jmp_addr + 4);
854 static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
856 if (!l->has_value) {
857 tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, l, 0);
858 tcg_out_goto_noaddr(s);
859 } else {
860 tcg_out_goto(s, l->u.value_ptr);
864 static void tcg_out_brcond(TCGContext *s, TCGMemOp ext, TCGCond c, TCGArg a,
865 TCGArg b, bool b_const, TCGLabel *l)
867 intptr_t offset;
868 bool need_cmp;
870 if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) {
871 need_cmp = false;
872 } else {
873 need_cmp = true;
874 tcg_out_cmp(s, ext, a, b, b_const);
877 if (!l->has_value) {
878 tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
879 offset = tcg_in32(s) >> 5;
880 } else {
881 offset = l->u.value_ptr - s->code_ptr;
882 tcg_debug_assert(offset == sextract64(offset, 0, 19));
885 if (need_cmp) {
886 tcg_out_insn(s, 3202, B_C, c, offset);
887 } else if (c == TCG_COND_EQ) {
888 tcg_out_insn(s, 3201, CBZ, ext, a, offset);
889 } else {
890 tcg_out_insn(s, 3201, CBNZ, ext, a, offset);
894 static inline void tcg_out_rev64(TCGContext *s, TCGReg rd, TCGReg rn)
896 tcg_out_insn(s, 3507, REV64, TCG_TYPE_I64, rd, rn);
899 static inline void tcg_out_rev32(TCGContext *s, TCGReg rd, TCGReg rn)
901 tcg_out_insn(s, 3507, REV32, TCG_TYPE_I32, rd, rn);
904 static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn)
906 tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn);
909 static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits,
910 TCGReg rd, TCGReg rn)
912 /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */
913 int bits = (8 << s_bits) - 1;
914 tcg_out_sbfm(s, ext, rd, rn, 0, bits);
917 static inline void tcg_out_uxt(TCGContext *s, TCGMemOp s_bits,
918 TCGReg rd, TCGReg rn)
920 /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */
921 int bits = (8 << s_bits) - 1;
922 tcg_out_ubfm(s, 0, rd, rn, 0, bits);
925 static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
926 TCGReg rn, int64_t aimm)
928 if (aimm >= 0) {
929 tcg_out_insn(s, 3401, ADDI, ext, rd, rn, aimm);
930 } else {
931 tcg_out_insn(s, 3401, SUBI, ext, rd, rn, -aimm);
935 static inline void tcg_out_addsub2(TCGContext *s, int ext, TCGReg rl,
936 TCGReg rh, TCGReg al, TCGReg ah,
937 tcg_target_long bl, tcg_target_long bh,
938 bool const_bl, bool const_bh, bool sub)
940 TCGReg orig_rl = rl;
941 AArch64Insn insn;
943 if (rl == ah || (!const_bh && rl == bh)) {
944 rl = TCG_REG_TMP;
947 if (const_bl) {
948 insn = I3401_ADDSI;
949 if ((bl < 0) ^ sub) {
950 insn = I3401_SUBSI;
951 bl = -bl;
953 tcg_out_insn_3401(s, insn, ext, rl, al, bl);
954 } else {
955 tcg_out_insn_3502(s, sub ? I3502_SUBS : I3502_ADDS, ext, rl, al, bl);
958 insn = I3503_ADC;
959 if (const_bh) {
960 /* Note that the only two constants we support are 0 and -1, and
961 that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa. */
962 if ((bh != 0) ^ sub) {
963 insn = I3503_SBC;
965 bh = TCG_REG_XZR;
966 } else if (sub) {
967 insn = I3503_SBC;
969 tcg_out_insn_3503(s, insn, ext, rh, ah, bh);
971 tcg_out_mov(s, ext, orig_rl, rl);
974 #ifdef CONFIG_SOFTMMU
975 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
976 * TCGMemOpIdx oi, uintptr_t ra)
978 static void * const qemu_ld_helpers[16] = {
979 [MO_UB] = helper_ret_ldub_mmu,
980 [MO_LEUW] = helper_le_lduw_mmu,
981 [MO_LEUL] = helper_le_ldul_mmu,
982 [MO_LEQ] = helper_le_ldq_mmu,
983 [MO_BEUW] = helper_be_lduw_mmu,
984 [MO_BEUL] = helper_be_ldul_mmu,
985 [MO_BEQ] = helper_be_ldq_mmu,
988 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
989 * uintxx_t val, TCGMemOpIdx oi,
990 * uintptr_t ra)
992 static void * const qemu_st_helpers[16] = {
993 [MO_UB] = helper_ret_stb_mmu,
994 [MO_LEUW] = helper_le_stw_mmu,
995 [MO_LEUL] = helper_le_stl_mmu,
996 [MO_LEQ] = helper_le_stq_mmu,
997 [MO_BEUW] = helper_be_stw_mmu,
998 [MO_BEUL] = helper_be_stl_mmu,
999 [MO_BEQ] = helper_be_stq_mmu,
1002 static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target)
1004 ptrdiff_t offset = tcg_pcrel_diff(s, target);
1005 tcg_debug_assert(offset == sextract64(offset, 0, 21));
1006 tcg_out_insn(s, 3406, ADR, rd, offset);
1009 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1011 TCGMemOpIdx oi = lb->oi;
1012 TCGMemOp opc = get_memop(oi);
1013 TCGMemOp size = opc & MO_SIZE;
1015 reloc_pc19(lb->label_ptr[0], s->code_ptr);
1017 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
1018 tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
1019 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, oi);
1020 tcg_out_adr(s, TCG_REG_X3, lb->raddr);
1021 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1022 if (opc & MO_SIGN) {
1023 tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0);
1024 } else {
1025 tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0);
1028 tcg_out_goto(s, lb->raddr);
1031 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1033 TCGMemOpIdx oi = lb->oi;
1034 TCGMemOp opc = get_memop(oi);
1035 TCGMemOp size = opc & MO_SIZE;
1037 reloc_pc19(lb->label_ptr[0], s->code_ptr);
1039 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
1040 tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
1041 tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg);
1042 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, oi);
1043 tcg_out_adr(s, TCG_REG_X4, lb->raddr);
1044 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1045 tcg_out_goto(s, lb->raddr);
1048 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1049 TCGType ext, TCGReg data_reg, TCGReg addr_reg,
1050 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1052 TCGLabelQemuLdst *label = new_ldst_label(s);
1054 label->is_ld = is_ld;
1055 label->oi = oi;
1056 label->type = ext;
1057 label->datalo_reg = data_reg;
1058 label->addrlo_reg = addr_reg;
1059 label->raddr = raddr;
1060 label->label_ptr[0] = label_ptr;
1063 /* Load and compare a TLB entry, emitting the conditional jump to the
1064 slow path for the failure case, which will be patched later when finalizing
1065 the slow path. Generated code returns the host addend in X1,
1066 clobbers X0,X2,X3,TMP. */
1067 static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc,
1068 tcg_insn_unit **label_ptr, int mem_index,
1069 bool is_read)
1071 int tlb_offset = is_read ?
1072 offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
1073 : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1074 int s_mask = (1 << (opc & MO_SIZE)) - 1;
1075 TCGReg base = TCG_AREG0, x3;
1076 uint64_t tlb_mask;
1078 /* For aligned accesses, we check the first byte and include the alignment
1079 bits within the address. For unaligned access, we check that we don't
1080 cross pages using the address of the last byte of the access. */
1081 if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
1082 tlb_mask = TARGET_PAGE_MASK | s_mask;
1083 x3 = addr_reg;
1084 } else {
1085 tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64,
1086 TCG_REG_X3, addr_reg, s_mask);
1087 tlb_mask = TARGET_PAGE_MASK;
1088 x3 = TCG_REG_X3;
1091 /* Extract the TLB index from the address into X0.
1092 X0<CPU_TLB_BITS:0> =
1093 addr_reg<TARGET_PAGE_BITS+CPU_TLB_BITS:TARGET_PAGE_BITS> */
1094 tcg_out_ubfm(s, TARGET_LONG_BITS == 64, TCG_REG_X0, addr_reg,
1095 TARGET_PAGE_BITS, TARGET_PAGE_BITS + CPU_TLB_BITS);
1097 /* Store the page mask part of the address into X3. */
1098 tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64,
1099 TCG_REG_X3, x3, tlb_mask);
1101 /* Add any "high bits" from the tlb offset to the env address into X2,
1102 to take advantage of the LSL12 form of the ADDI instruction.
1103 X2 = env + (tlb_offset & 0xfff000) */
1104 if (tlb_offset & 0xfff000) {
1105 tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_X2, base,
1106 tlb_offset & 0xfff000);
1107 base = TCG_REG_X2;
1110 /* Merge the tlb index contribution into X2.
1111 X2 = X2 + (X0 << CPU_TLB_ENTRY_BITS) */
1112 tcg_out_insn(s, 3502S, ADD_LSL, TCG_TYPE_I64, TCG_REG_X2, base,
1113 TCG_REG_X0, CPU_TLB_ENTRY_BITS);
1115 /* Merge "low bits" from tlb offset, load the tlb comparator into X0.
1116 X0 = load [X2 + (tlb_offset & 0x000fff)] */
1117 tcg_out_ldst(s, TARGET_LONG_BITS == 32 ? I3312_LDRW : I3312_LDRX,
1118 TCG_REG_X0, TCG_REG_X2, tlb_offset & 0xfff);
1120 /* Load the tlb addend. Do that early to avoid stalling.
1121 X1 = load [X2 + (tlb_offset & 0xfff) + offsetof(addend)] */
1122 tcg_out_ldst(s, I3312_LDRX, TCG_REG_X1, TCG_REG_X2,
1123 (tlb_offset & 0xfff) + (offsetof(CPUTLBEntry, addend)) -
1124 (is_read ? offsetof(CPUTLBEntry, addr_read)
1125 : offsetof(CPUTLBEntry, addr_write)));
1127 /* Perform the address comparison. */
1128 tcg_out_cmp(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, TCG_REG_X3, 0);
1130 /* If not equal, we jump to the slow path. */
1131 *label_ptr = s->code_ptr;
1132 tcg_out_goto_cond_noaddr(s, TCG_COND_NE);
1135 #endif /* CONFIG_SOFTMMU */
1137 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext,
1138 TCGReg data_r, TCGReg addr_r,
1139 TCGType otype, TCGReg off_r)
1141 const TCGMemOp bswap = memop & MO_BSWAP;
1143 switch (memop & MO_SSIZE) {
1144 case MO_UB:
1145 tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
1146 break;
1147 case MO_SB:
1148 tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
1149 data_r, addr_r, otype, off_r);
1150 break;
1151 case MO_UW:
1152 tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
1153 if (bswap) {
1154 tcg_out_rev16(s, data_r, data_r);
1156 break;
1157 case MO_SW:
1158 if (bswap) {
1159 tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
1160 tcg_out_rev16(s, data_r, data_r);
1161 tcg_out_sxt(s, ext, MO_16, data_r, data_r);
1162 } else {
1163 tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
1164 data_r, addr_r, otype, off_r);
1166 break;
1167 case MO_UL:
1168 tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
1169 if (bswap) {
1170 tcg_out_rev32(s, data_r, data_r);
1172 break;
1173 case MO_SL:
1174 if (bswap) {
1175 tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
1176 tcg_out_rev32(s, data_r, data_r);
1177 tcg_out_sxt(s, TCG_TYPE_I64, MO_32, data_r, data_r);
1178 } else {
1179 tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
1181 break;
1182 case MO_Q:
1183 tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
1184 if (bswap) {
1185 tcg_out_rev64(s, data_r, data_r);
1187 break;
1188 default:
1189 tcg_abort();
1193 static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop,
1194 TCGReg data_r, TCGReg addr_r,
1195 TCGType otype, TCGReg off_r)
1197 const TCGMemOp bswap = memop & MO_BSWAP;
1199 switch (memop & MO_SIZE) {
1200 case MO_8:
1201 tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
1202 break;
1203 case MO_16:
1204 if (bswap && data_r != TCG_REG_XZR) {
1205 tcg_out_rev16(s, TCG_REG_TMP, data_r);
1206 data_r = TCG_REG_TMP;
1208 tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r);
1209 break;
1210 case MO_32:
1211 if (bswap && data_r != TCG_REG_XZR) {
1212 tcg_out_rev32(s, TCG_REG_TMP, data_r);
1213 data_r = TCG_REG_TMP;
1215 tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r);
1216 break;
1217 case MO_64:
1218 if (bswap && data_r != TCG_REG_XZR) {
1219 tcg_out_rev64(s, TCG_REG_TMP, data_r);
1220 data_r = TCG_REG_TMP;
1222 tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);
1223 break;
1224 default:
1225 tcg_abort();
1229 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1230 TCGMemOpIdx oi, TCGType ext)
1232 TCGMemOp memop = get_memop(oi);
1233 const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
1234 #ifdef CONFIG_SOFTMMU
1235 unsigned mem_index = get_mmuidx(oi);
1236 tcg_insn_unit *label_ptr;
1238 tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1);
1239 tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
1240 TCG_REG_X1, otype, addr_reg);
1241 add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
1242 s->code_ptr, label_ptr);
1243 #else /* !CONFIG_SOFTMMU */
1244 if (USE_GUEST_BASE) {
1245 tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
1246 TCG_REG_GUEST_BASE, otype, addr_reg);
1247 } else {
1248 tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
1249 addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
1251 #endif /* CONFIG_SOFTMMU */
1254 static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1255 TCGMemOpIdx oi)
1257 TCGMemOp memop = get_memop(oi);
1258 const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
1259 #ifdef CONFIG_SOFTMMU
1260 unsigned mem_index = get_mmuidx(oi);
1261 tcg_insn_unit *label_ptr;
1263 tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0);
1264 tcg_out_qemu_st_direct(s, memop, data_reg,
1265 TCG_REG_X1, otype, addr_reg);
1266 add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
1267 data_reg, addr_reg, s->code_ptr, label_ptr);
1268 #else /* !CONFIG_SOFTMMU */
1269 if (USE_GUEST_BASE) {
1270 tcg_out_qemu_st_direct(s, memop, data_reg,
1271 TCG_REG_GUEST_BASE, otype, addr_reg);
1272 } else {
1273 tcg_out_qemu_st_direct(s, memop, data_reg,
1274 addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
1276 #endif /* CONFIG_SOFTMMU */
1279 static tcg_insn_unit *tb_ret_addr;
1281 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1282 const TCGArg args[TCG_MAX_OP_ARGS],
1283 const int const_args[TCG_MAX_OP_ARGS])
1285 /* 99% of the time, we can signal the use of extension registers
1286 by looking to see if the opcode handles 64-bit data. */
1287 TCGType ext = (tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0;
1289 /* Hoist the loads of the most common arguments. */
1290 TCGArg a0 = args[0];
1291 TCGArg a1 = args[1];
1292 TCGArg a2 = args[2];
1293 int c2 = const_args[2];
1295 /* Some operands are defined with "rZ" constraint, a register or
1296 the zero register. These need not actually test args[I] == 0. */
1297 #define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
1299 switch (opc) {
1300 case INDEX_op_exit_tb:
1301 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
1302 tcg_out_goto(s, tb_ret_addr);
1303 break;
1305 case INDEX_op_goto_tb:
1306 #ifndef USE_DIRECT_JUMP
1307 #error "USE_DIRECT_JUMP required for aarch64"
1308 #endif
1309 tcg_debug_assert(s->tb_jmp_offset != NULL); /* consistency for USE_DIRECT_JUMP */
1310 s->tb_jmp_offset[a0] = tcg_current_code_size(s);
1311 /* actual branch destination will be patched by
1312 aarch64_tb_set_jmp_target later, beware retranslation. */
1313 tcg_out_goto_noaddr(s);
1314 s->tb_next_offset[a0] = tcg_current_code_size(s);
1315 break;
1317 case INDEX_op_br:
1318 tcg_out_goto_label(s, arg_label(a0));
1319 break;
1321 case INDEX_op_ld8u_i32:
1322 case INDEX_op_ld8u_i64:
1323 tcg_out_ldst(s, I3312_LDRB, a0, a1, a2);
1324 break;
1325 case INDEX_op_ld8s_i32:
1326 tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2);
1327 break;
1328 case INDEX_op_ld8s_i64:
1329 tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2);
1330 break;
1331 case INDEX_op_ld16u_i32:
1332 case INDEX_op_ld16u_i64:
1333 tcg_out_ldst(s, I3312_LDRH, a0, a1, a2);
1334 break;
1335 case INDEX_op_ld16s_i32:
1336 tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2);
1337 break;
1338 case INDEX_op_ld16s_i64:
1339 tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2);
1340 break;
1341 case INDEX_op_ld_i32:
1342 case INDEX_op_ld32u_i64:
1343 tcg_out_ldst(s, I3312_LDRW, a0, a1, a2);
1344 break;
1345 case INDEX_op_ld32s_i64:
1346 tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2);
1347 break;
1348 case INDEX_op_ld_i64:
1349 tcg_out_ldst(s, I3312_LDRX, a0, a1, a2);
1350 break;
1352 case INDEX_op_st8_i32:
1353 case INDEX_op_st8_i64:
1354 tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2);
1355 break;
1356 case INDEX_op_st16_i32:
1357 case INDEX_op_st16_i64:
1358 tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2);
1359 break;
1360 case INDEX_op_st_i32:
1361 case INDEX_op_st32_i64:
1362 tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2);
1363 break;
1364 case INDEX_op_st_i64:
1365 tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2);
1366 break;
1368 case INDEX_op_add_i32:
1369 a2 = (int32_t)a2;
1370 /* FALLTHRU */
1371 case INDEX_op_add_i64:
1372 if (c2) {
1373 tcg_out_addsubi(s, ext, a0, a1, a2);
1374 } else {
1375 tcg_out_insn(s, 3502, ADD, ext, a0, a1, a2);
1377 break;
1379 case INDEX_op_sub_i32:
1380 a2 = (int32_t)a2;
1381 /* FALLTHRU */
1382 case INDEX_op_sub_i64:
1383 if (c2) {
1384 tcg_out_addsubi(s, ext, a0, a1, -a2);
1385 } else {
1386 tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2);
1388 break;
1390 case INDEX_op_neg_i64:
1391 case INDEX_op_neg_i32:
1392 tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
1393 break;
1395 case INDEX_op_and_i32:
1396 a2 = (int32_t)a2;
1397 /* FALLTHRU */
1398 case INDEX_op_and_i64:
1399 if (c2) {
1400 tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, a2);
1401 } else {
1402 tcg_out_insn(s, 3510, AND, ext, a0, a1, a2);
1404 break;
1406 case INDEX_op_andc_i32:
1407 a2 = (int32_t)a2;
1408 /* FALLTHRU */
1409 case INDEX_op_andc_i64:
1410 if (c2) {
1411 tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, ~a2);
1412 } else {
1413 tcg_out_insn(s, 3510, BIC, ext, a0, a1, a2);
1415 break;
1417 case INDEX_op_or_i32:
1418 a2 = (int32_t)a2;
1419 /* FALLTHRU */
1420 case INDEX_op_or_i64:
1421 if (c2) {
1422 tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, a2);
1423 } else {
1424 tcg_out_insn(s, 3510, ORR, ext, a0, a1, a2);
1426 break;
1428 case INDEX_op_orc_i32:
1429 a2 = (int32_t)a2;
1430 /* FALLTHRU */
1431 case INDEX_op_orc_i64:
1432 if (c2) {
1433 tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, ~a2);
1434 } else {
1435 tcg_out_insn(s, 3510, ORN, ext, a0, a1, a2);
1437 break;
1439 case INDEX_op_xor_i32:
1440 a2 = (int32_t)a2;
1441 /* FALLTHRU */
1442 case INDEX_op_xor_i64:
1443 if (c2) {
1444 tcg_out_logicali(s, I3404_EORI, ext, a0, a1, a2);
1445 } else {
1446 tcg_out_insn(s, 3510, EOR, ext, a0, a1, a2);
1448 break;
1450 case INDEX_op_eqv_i32:
1451 a2 = (int32_t)a2;
1452 /* FALLTHRU */
1453 case INDEX_op_eqv_i64:
1454 if (c2) {
1455 tcg_out_logicali(s, I3404_EORI, ext, a0, a1, ~a2);
1456 } else {
1457 tcg_out_insn(s, 3510, EON, ext, a0, a1, a2);
1459 break;
1461 case INDEX_op_not_i64:
1462 case INDEX_op_not_i32:
1463 tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1);
1464 break;
1466 case INDEX_op_mul_i64:
1467 case INDEX_op_mul_i32:
1468 tcg_out_insn(s, 3509, MADD, ext, a0, a1, a2, TCG_REG_XZR);
1469 break;
1471 case INDEX_op_div_i64:
1472 case INDEX_op_div_i32:
1473 tcg_out_insn(s, 3508, SDIV, ext, a0, a1, a2);
1474 break;
1475 case INDEX_op_divu_i64:
1476 case INDEX_op_divu_i32:
1477 tcg_out_insn(s, 3508, UDIV, ext, a0, a1, a2);
1478 break;
1480 case INDEX_op_rem_i64:
1481 case INDEX_op_rem_i32:
1482 tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP, a1, a2);
1483 tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1);
1484 break;
1485 case INDEX_op_remu_i64:
1486 case INDEX_op_remu_i32:
1487 tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP, a1, a2);
1488 tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1);
1489 break;
1491 case INDEX_op_shl_i64:
1492 case INDEX_op_shl_i32:
1493 if (c2) {
1494 tcg_out_shl(s, ext, a0, a1, a2);
1495 } else {
1496 tcg_out_insn(s, 3508, LSLV, ext, a0, a1, a2);
1498 break;
1500 case INDEX_op_shr_i64:
1501 case INDEX_op_shr_i32:
1502 if (c2) {
1503 tcg_out_shr(s, ext, a0, a1, a2);
1504 } else {
1505 tcg_out_insn(s, 3508, LSRV, ext, a0, a1, a2);
1507 break;
1509 case INDEX_op_sar_i64:
1510 case INDEX_op_sar_i32:
1511 if (c2) {
1512 tcg_out_sar(s, ext, a0, a1, a2);
1513 } else {
1514 tcg_out_insn(s, 3508, ASRV, ext, a0, a1, a2);
1516 break;
1518 case INDEX_op_rotr_i64:
1519 case INDEX_op_rotr_i32:
1520 if (c2) {
1521 tcg_out_rotr(s, ext, a0, a1, a2);
1522 } else {
1523 tcg_out_insn(s, 3508, RORV, ext, a0, a1, a2);
1525 break;
1527 case INDEX_op_rotl_i64:
1528 case INDEX_op_rotl_i32:
1529 if (c2) {
1530 tcg_out_rotl(s, ext, a0, a1, a2);
1531 } else {
1532 tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2);
1533 tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP);
1535 break;
1537 case INDEX_op_brcond_i32:
1538 a1 = (int32_t)a1;
1539 /* FALLTHRU */
1540 case INDEX_op_brcond_i64:
1541 tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3]));
1542 break;
1544 case INDEX_op_setcond_i32:
1545 a2 = (int32_t)a2;
1546 /* FALLTHRU */
1547 case INDEX_op_setcond_i64:
1548 tcg_out_cmp(s, ext, a1, a2, c2);
1549 /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */
1550 tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR,
1551 TCG_REG_XZR, tcg_invert_cond(args[3]));
1552 break;
1554 case INDEX_op_movcond_i32:
1555 a2 = (int32_t)a2;
1556 /* FALLTHRU */
1557 case INDEX_op_movcond_i64:
1558 tcg_out_cmp(s, ext, a1, a2, c2);
1559 tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]);
1560 break;
1562 case INDEX_op_qemu_ld_i32:
1563 case INDEX_op_qemu_ld_i64:
1564 tcg_out_qemu_ld(s, a0, a1, a2, ext);
1565 break;
1566 case INDEX_op_qemu_st_i32:
1567 case INDEX_op_qemu_st_i64:
1568 tcg_out_qemu_st(s, REG0(0), a1, a2);
1569 break;
1571 case INDEX_op_bswap64_i64:
1572 tcg_out_rev64(s, a0, a1);
1573 break;
1574 case INDEX_op_bswap32_i64:
1575 case INDEX_op_bswap32_i32:
1576 tcg_out_rev32(s, a0, a1);
1577 break;
1578 case INDEX_op_bswap16_i64:
1579 case INDEX_op_bswap16_i32:
1580 tcg_out_rev16(s, a0, a1);
1581 break;
1583 case INDEX_op_ext8s_i64:
1584 case INDEX_op_ext8s_i32:
1585 tcg_out_sxt(s, ext, MO_8, a0, a1);
1586 break;
1587 case INDEX_op_ext16s_i64:
1588 case INDEX_op_ext16s_i32:
1589 tcg_out_sxt(s, ext, MO_16, a0, a1);
1590 break;
1591 case INDEX_op_ext_i32_i64:
1592 case INDEX_op_ext32s_i64:
1593 tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1);
1594 break;
1595 case INDEX_op_ext8u_i64:
1596 case INDEX_op_ext8u_i32:
1597 tcg_out_uxt(s, MO_8, a0, a1);
1598 break;
1599 case INDEX_op_ext16u_i64:
1600 case INDEX_op_ext16u_i32:
1601 tcg_out_uxt(s, MO_16, a0, a1);
1602 break;
1603 case INDEX_op_extu_i32_i64:
1604 case INDEX_op_ext32u_i64:
1605 tcg_out_movr(s, TCG_TYPE_I32, a0, a1);
1606 break;
1608 case INDEX_op_deposit_i64:
1609 case INDEX_op_deposit_i32:
1610 tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]);
1611 break;
1613 case INDEX_op_add2_i32:
1614 tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
1615 (int32_t)args[4], args[5], const_args[4],
1616 const_args[5], false);
1617 break;
1618 case INDEX_op_add2_i64:
1619 tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
1620 args[5], const_args[4], const_args[5], false);
1621 break;
1622 case INDEX_op_sub2_i32:
1623 tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
1624 (int32_t)args[4], args[5], const_args[4],
1625 const_args[5], true);
1626 break;
1627 case INDEX_op_sub2_i64:
1628 tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
1629 args[5], const_args[4], const_args[5], true);
1630 break;
1632 case INDEX_op_muluh_i64:
1633 tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2);
1634 break;
1635 case INDEX_op_mulsh_i64:
1636 tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2);
1637 break;
1639 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1640 case INDEX_op_mov_i64:
1641 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
1642 case INDEX_op_movi_i64:
1643 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1644 default:
1645 tcg_abort();
1648 #undef REG0
1651 static const TCGTargetOpDef aarch64_op_defs[] = {
1652 { INDEX_op_exit_tb, { } },
1653 { INDEX_op_goto_tb, { } },
1654 { INDEX_op_br, { } },
1656 { INDEX_op_ld8u_i32, { "r", "r" } },
1657 { INDEX_op_ld8s_i32, { "r", "r" } },
1658 { INDEX_op_ld16u_i32, { "r", "r" } },
1659 { INDEX_op_ld16s_i32, { "r", "r" } },
1660 { INDEX_op_ld_i32, { "r", "r" } },
1661 { INDEX_op_ld8u_i64, { "r", "r" } },
1662 { INDEX_op_ld8s_i64, { "r", "r" } },
1663 { INDEX_op_ld16u_i64, { "r", "r" } },
1664 { INDEX_op_ld16s_i64, { "r", "r" } },
1665 { INDEX_op_ld32u_i64, { "r", "r" } },
1666 { INDEX_op_ld32s_i64, { "r", "r" } },
1667 { INDEX_op_ld_i64, { "r", "r" } },
1669 { INDEX_op_st8_i32, { "rZ", "r" } },
1670 { INDEX_op_st16_i32, { "rZ", "r" } },
1671 { INDEX_op_st_i32, { "rZ", "r" } },
1672 { INDEX_op_st8_i64, { "rZ", "r" } },
1673 { INDEX_op_st16_i64, { "rZ", "r" } },
1674 { INDEX_op_st32_i64, { "rZ", "r" } },
1675 { INDEX_op_st_i64, { "rZ", "r" } },
1677 { INDEX_op_add_i32, { "r", "r", "rA" } },
1678 { INDEX_op_add_i64, { "r", "r", "rA" } },
1679 { INDEX_op_sub_i32, { "r", "r", "rA" } },
1680 { INDEX_op_sub_i64, { "r", "r", "rA" } },
1681 { INDEX_op_mul_i32, { "r", "r", "r" } },
1682 { INDEX_op_mul_i64, { "r", "r", "r" } },
1683 { INDEX_op_div_i32, { "r", "r", "r" } },
1684 { INDEX_op_div_i64, { "r", "r", "r" } },
1685 { INDEX_op_divu_i32, { "r", "r", "r" } },
1686 { INDEX_op_divu_i64, { "r", "r", "r" } },
1687 { INDEX_op_rem_i32, { "r", "r", "r" } },
1688 { INDEX_op_rem_i64, { "r", "r", "r" } },
1689 { INDEX_op_remu_i32, { "r", "r", "r" } },
1690 { INDEX_op_remu_i64, { "r", "r", "r" } },
1691 { INDEX_op_and_i32, { "r", "r", "rL" } },
1692 { INDEX_op_and_i64, { "r", "r", "rL" } },
1693 { INDEX_op_or_i32, { "r", "r", "rL" } },
1694 { INDEX_op_or_i64, { "r", "r", "rL" } },
1695 { INDEX_op_xor_i32, { "r", "r", "rL" } },
1696 { INDEX_op_xor_i64, { "r", "r", "rL" } },
1697 { INDEX_op_andc_i32, { "r", "r", "rL" } },
1698 { INDEX_op_andc_i64, { "r", "r", "rL" } },
1699 { INDEX_op_orc_i32, { "r", "r", "rL" } },
1700 { INDEX_op_orc_i64, { "r", "r", "rL" } },
1701 { INDEX_op_eqv_i32, { "r", "r", "rL" } },
1702 { INDEX_op_eqv_i64, { "r", "r", "rL" } },
1704 { INDEX_op_neg_i32, { "r", "r" } },
1705 { INDEX_op_neg_i64, { "r", "r" } },
1706 { INDEX_op_not_i32, { "r", "r" } },
1707 { INDEX_op_not_i64, { "r", "r" } },
1709 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1710 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1711 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1712 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1713 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1714 { INDEX_op_shl_i64, { "r", "r", "ri" } },
1715 { INDEX_op_shr_i64, { "r", "r", "ri" } },
1716 { INDEX_op_sar_i64, { "r", "r", "ri" } },
1717 { INDEX_op_rotl_i64, { "r", "r", "ri" } },
1718 { INDEX_op_rotr_i64, { "r", "r", "ri" } },
1720 { INDEX_op_brcond_i32, { "r", "rA" } },
1721 { INDEX_op_brcond_i64, { "r", "rA" } },
1722 { INDEX_op_setcond_i32, { "r", "r", "rA" } },
1723 { INDEX_op_setcond_i64, { "r", "r", "rA" } },
1724 { INDEX_op_movcond_i32, { "r", "r", "rA", "rZ", "rZ" } },
1725 { INDEX_op_movcond_i64, { "r", "r", "rA", "rZ", "rZ" } },
1727 { INDEX_op_qemu_ld_i32, { "r", "l" } },
1728 { INDEX_op_qemu_ld_i64, { "r", "l" } },
1729 { INDEX_op_qemu_st_i32, { "lZ", "l" } },
1730 { INDEX_op_qemu_st_i64, { "lZ", "l" } },
1732 { INDEX_op_bswap16_i32, { "r", "r" } },
1733 { INDEX_op_bswap32_i32, { "r", "r" } },
1734 { INDEX_op_bswap16_i64, { "r", "r" } },
1735 { INDEX_op_bswap32_i64, { "r", "r" } },
1736 { INDEX_op_bswap64_i64, { "r", "r" } },
1738 { INDEX_op_ext8s_i32, { "r", "r" } },
1739 { INDEX_op_ext16s_i32, { "r", "r" } },
1740 { INDEX_op_ext8u_i32, { "r", "r" } },
1741 { INDEX_op_ext16u_i32, { "r", "r" } },
1743 { INDEX_op_ext8s_i64, { "r", "r" } },
1744 { INDEX_op_ext16s_i64, { "r", "r" } },
1745 { INDEX_op_ext32s_i64, { "r", "r" } },
1746 { INDEX_op_ext8u_i64, { "r", "r" } },
1747 { INDEX_op_ext16u_i64, { "r", "r" } },
1748 { INDEX_op_ext32u_i64, { "r", "r" } },
1749 { INDEX_op_ext_i32_i64, { "r", "r" } },
1750 { INDEX_op_extu_i32_i64, { "r", "r" } },
1752 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
1753 { INDEX_op_deposit_i64, { "r", "0", "rZ" } },
1755 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } },
1756 { INDEX_op_add2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } },
1757 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } },
1758 { INDEX_op_sub2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } },
1760 { INDEX_op_muluh_i64, { "r", "r", "r" } },
1761 { INDEX_op_mulsh_i64, { "r", "r", "r" } },
1763 { -1 },
1766 static void tcg_target_init(TCGContext *s)
1768 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1769 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1771 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1772 (1 << TCG_REG_X0) | (1 << TCG_REG_X1) |
1773 (1 << TCG_REG_X2) | (1 << TCG_REG_X3) |
1774 (1 << TCG_REG_X4) | (1 << TCG_REG_X5) |
1775 (1 << TCG_REG_X6) | (1 << TCG_REG_X7) |
1776 (1 << TCG_REG_X8) | (1 << TCG_REG_X9) |
1777 (1 << TCG_REG_X10) | (1 << TCG_REG_X11) |
1778 (1 << TCG_REG_X12) | (1 << TCG_REG_X13) |
1779 (1 << TCG_REG_X14) | (1 << TCG_REG_X15) |
1780 (1 << TCG_REG_X16) | (1 << TCG_REG_X17) |
1781 (1 << TCG_REG_X18) | (1 << TCG_REG_X30));
1783 tcg_regset_clear(s->reserved_regs);
1784 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
1785 tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP);
1786 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
1787 tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */
1789 tcg_add_target_add_op_defs(aarch64_op_defs);
1792 /* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */
1793 #define PUSH_SIZE ((30 - 19 + 1) * 8)
1795 #define FRAME_SIZE \
1796 ((PUSH_SIZE \
1797 + TCG_STATIC_CALL_ARGS_SIZE \
1798 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
1799 + TCG_TARGET_STACK_ALIGN - 1) \
1800 & ~(TCG_TARGET_STACK_ALIGN - 1))
1802 /* We're expecting a 2 byte uleb128 encoded value. */
1803 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
1805 /* We're expecting to use a single ADDI insn. */
1806 QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff);
1808 static void tcg_target_qemu_prologue(TCGContext *s)
1810 TCGReg r;
1812 /* Push (FP, LR) and allocate space for all saved registers. */
1813 tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR,
1814 TCG_REG_SP, -PUSH_SIZE, 1, 1);
1816 /* Set up frame pointer for canonical unwinding. */
1817 tcg_out_movr_sp(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP);
1819 /* Store callee-preserved regs x19..x28. */
1820 for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) {
1821 int ofs = (r - TCG_REG_X19 + 2) * 8;
1822 tcg_out_insn(s, 3314, STP, r, r + 1, TCG_REG_SP, ofs, 1, 0);
1825 /* Make stack space for TCG locals. */
1826 tcg_out_insn(s, 3401, SUBI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
1827 FRAME_SIZE - PUSH_SIZE);
1829 /* Inform TCG about how to find TCG locals with register, offset, size. */
1830 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE,
1831 CPU_TEMP_BUF_NLONGS * sizeof(long));
1833 #if !defined(CONFIG_SOFTMMU)
1834 if (USE_GUEST_BASE) {
1835 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
1836 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
1838 #endif
1840 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1841 tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]);
1843 tb_ret_addr = s->code_ptr;
1845 /* Remove TCG locals stack space. */
1846 tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
1847 FRAME_SIZE - PUSH_SIZE);
1849 /* Restore registers x19..x28. */
1850 for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) {
1851 int ofs = (r - TCG_REG_X19 + 2) * 8;
1852 tcg_out_insn(s, 3314, LDP, r, r + 1, TCG_REG_SP, ofs, 1, 0);
1855 /* Pop (FP, LR), restore SP to previous frame. */
1856 tcg_out_insn(s, 3314, LDP, TCG_REG_FP, TCG_REG_LR,
1857 TCG_REG_SP, PUSH_SIZE, 0, 1);
1858 tcg_out_insn(s, 3207, RET, TCG_REG_LR);
1861 typedef struct {
1862 DebugFrameHeader h;
1863 uint8_t fde_def_cfa[4];
1864 uint8_t fde_reg_ofs[24];
1865 } DebugFrame;
1867 #define ELF_HOST_MACHINE EM_AARCH64
1869 static const DebugFrame debug_frame = {
1870 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1871 .h.cie.id = -1,
1872 .h.cie.version = 1,
1873 .h.cie.code_align = 1,
1874 .h.cie.data_align = 0x78, /* sleb128 -8 */
1875 .h.cie.return_column = TCG_REG_LR,
1877 /* Total FDE size does not include the "len" member. */
1878 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1880 .fde_def_cfa = {
1881 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
1882 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
1883 (FRAME_SIZE >> 7)
1885 .fde_reg_ofs = {
1886 0x80 + 28, 1, /* DW_CFA_offset, x28, -8 */
1887 0x80 + 27, 2, /* DW_CFA_offset, x27, -16 */
1888 0x80 + 26, 3, /* DW_CFA_offset, x26, -24 */
1889 0x80 + 25, 4, /* DW_CFA_offset, x25, -32 */
1890 0x80 + 24, 5, /* DW_CFA_offset, x24, -40 */
1891 0x80 + 23, 6, /* DW_CFA_offset, x23, -48 */
1892 0x80 + 22, 7, /* DW_CFA_offset, x22, -56 */
1893 0x80 + 21, 8, /* DW_CFA_offset, x21, -64 */
1894 0x80 + 20, 9, /* DW_CFA_offset, x20, -72 */
1895 0x80 + 19, 10, /* DW_CFA_offset, x1p, -80 */
1896 0x80 + 30, 11, /* DW_CFA_offset, lr, -88 */
1897 0x80 + 29, 12, /* DW_CFA_offset, fp, -96 */
1901 void tcg_register_jit(void *buf, size_t buf_size)
1903 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));