2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "../tcg-pool.c.inc"
28 int arm_arch = __ARM_ARCH;
30 #ifndef use_idiv_instructions
31 bool use_idiv_instructions;
33 #ifndef use_neon_instructions
34 bool use_neon_instructions;
37 /* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */
39 # define USING_SOFTMMU 1
41 # define USING_SOFTMMU 0
44 #ifdef CONFIG_DEBUG_TCG
45 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
46 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
47 "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc",
48 "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7",
49 "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
53 static const int tcg_target_reg_alloc_order[] = {
74 /* Q4 - Q7 are call-saved, and skipped. */
85 static const int tcg_target_call_iarg_regs[4] = {
86 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
88 static const int tcg_target_call_oarg_regs[2] = {
89 TCG_REG_R0, TCG_REG_R1
92 #define TCG_REG_TMP TCG_REG_R12
93 #define TCG_VEC_TMP TCG_REG_Q15
95 enum arm_cond_code_e {
98 COND_CS = 0x2, /* Unsigned greater or equal */
99 COND_CC = 0x3, /* Unsigned less than */
100 COND_MI = 0x4, /* Negative */
101 COND_PL = 0x5, /* Zero or greater */
102 COND_VS = 0x6, /* Overflow */
103 COND_VC = 0x7, /* No overflow */
104 COND_HI = 0x8, /* Unsigned greater than */
105 COND_LS = 0x9, /* Unsigned less or equal */
113 #define TO_CPSR (1 << 20)
115 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
116 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
117 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
118 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
119 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
120 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
121 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
122 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
125 ARITH_AND = 0x0 << 21,
126 ARITH_EOR = 0x1 << 21,
127 ARITH_SUB = 0x2 << 21,
128 ARITH_RSB = 0x3 << 21,
129 ARITH_ADD = 0x4 << 21,
130 ARITH_ADC = 0x5 << 21,
131 ARITH_SBC = 0x6 << 21,
132 ARITH_RSC = 0x7 << 21,
133 ARITH_TST = 0x8 << 21 | TO_CPSR,
134 ARITH_CMP = 0xa << 21 | TO_CPSR,
135 ARITH_CMN = 0xb << 21 | TO_CPSR,
136 ARITH_ORR = 0xc << 21,
137 ARITH_MOV = 0xd << 21,
138 ARITH_BIC = 0xe << 21,
139 ARITH_MVN = 0xf << 21,
141 INSN_CLZ = 0x016f0f10,
142 INSN_RBIT = 0x06ff0f30,
144 INSN_LDR_IMM = 0x04100000,
145 INSN_LDR_REG = 0x06100000,
146 INSN_STR_IMM = 0x04000000,
147 INSN_STR_REG = 0x06000000,
149 INSN_LDRH_IMM = 0x005000b0,
150 INSN_LDRH_REG = 0x001000b0,
151 INSN_LDRSH_IMM = 0x005000f0,
152 INSN_LDRSH_REG = 0x001000f0,
153 INSN_STRH_IMM = 0x004000b0,
154 INSN_STRH_REG = 0x000000b0,
156 INSN_LDRB_IMM = 0x04500000,
157 INSN_LDRB_REG = 0x06500000,
158 INSN_LDRSB_IMM = 0x005000d0,
159 INSN_LDRSB_REG = 0x001000d0,
160 INSN_STRB_IMM = 0x04400000,
161 INSN_STRB_REG = 0x06400000,
163 INSN_LDRD_IMM = 0x004000d0,
164 INSN_LDRD_REG = 0x000000d0,
165 INSN_STRD_IMM = 0x004000f0,
166 INSN_STRD_REG = 0x000000f0,
168 INSN_DMB_ISH = 0xf57ff05b,
169 INSN_DMB_MCR = 0xee070fba,
171 /* Architected nop introduced in v6k. */
172 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
173 also Just So Happened to do nothing on pre-v6k so that we
174 don't need to conditionalize it? */
175 INSN_NOP_v6k = 0xe320f000,
176 /* Otherwise the assembler uses mov r0,r0 */
177 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV,
179 INSN_VADD = 0xf2000800,
180 INSN_VAND = 0xf2000110,
181 INSN_VBIC = 0xf2100110,
182 INSN_VEOR = 0xf3000110,
183 INSN_VORN = 0xf2300110,
184 INSN_VORR = 0xf2200110,
185 INSN_VSUB = 0xf3000800,
186 INSN_VMUL = 0xf2000910,
187 INSN_VQADD = 0xf2000010,
188 INSN_VQADD_U = 0xf3000010,
189 INSN_VQSUB = 0xf2000210,
190 INSN_VQSUB_U = 0xf3000210,
191 INSN_VMAX = 0xf2000600,
192 INSN_VMAX_U = 0xf3000600,
193 INSN_VMIN = 0xf2000610,
194 INSN_VMIN_U = 0xf3000610,
196 INSN_VABS = 0xf3b10300,
197 INSN_VMVN = 0xf3b00580,
198 INSN_VNEG = 0xf3b10380,
200 INSN_VCEQ0 = 0xf3b10100,
201 INSN_VCGT0 = 0xf3b10000,
202 INSN_VCGE0 = 0xf3b10080,
203 INSN_VCLE0 = 0xf3b10180,
204 INSN_VCLT0 = 0xf3b10200,
206 INSN_VCEQ = 0xf3000810,
207 INSN_VCGE = 0xf2000310,
208 INSN_VCGT = 0xf2000300,
209 INSN_VCGE_U = 0xf3000310,
210 INSN_VCGT_U = 0xf3000300,
212 INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */
213 INSN_VSARI = 0xf2800010, /* VSHR.S */
214 INSN_VSHRI = 0xf3800010, /* VSHR.U */
215 INSN_VSLI = 0xf3800510,
216 INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */
217 INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */
219 INSN_VBSL = 0xf3100110,
220 INSN_VBIT = 0xf3200110,
221 INSN_VBIF = 0xf3300110,
223 INSN_VTST = 0xf2000810,
225 INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */
226 INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */
227 INSN_VLDR_D = 0xed100b00, /* VLDR.64 */
228 INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */
229 INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */
230 INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */
231 INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */
234 #define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
236 static const uint8_t tcg_cond_to_arm_cond[] = {
237 [TCG_COND_EQ] = COND_EQ,
238 [TCG_COND_NE] = COND_NE,
239 [TCG_COND_LT] = COND_LT,
240 [TCG_COND_GE] = COND_GE,
241 [TCG_COND_LE] = COND_LE,
242 [TCG_COND_GT] = COND_GT,
244 [TCG_COND_LTU] = COND_CC,
245 [TCG_COND_GEU] = COND_CS,
246 [TCG_COND_LEU] = COND_LS,
247 [TCG_COND_GTU] = COND_HI,
250 static int encode_imm(uint32_t imm);
252 /* TCG private relocation type: add with pc+imm8 */
255 /* TCG private relocation type: vldr with imm8 << 2 */
256 #define R_ARM_PC11 12
258 static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
260 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
261 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
263 if (offset == sextract32(offset, 0, 24)) {
264 *src_rw = deposit32(*src_rw, 0, 24, offset);
270 static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
272 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
273 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
275 if (offset >= -0xfff && offset <= 0xfff) {
276 tcg_insn_unit insn = *src_rw;
277 bool u = (offset >= 0);
281 insn = deposit32(insn, 23, 1, u);
282 insn = deposit32(insn, 0, 12, offset);
289 static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
291 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
292 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
294 if (offset >= -0xff && offset <= 0xff) {
295 tcg_insn_unit insn = *src_rw;
296 bool u = (offset >= 0);
300 insn = deposit32(insn, 23, 1, u);
301 insn = deposit32(insn, 0, 8, offset);
308 static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
310 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
311 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
312 int rot = encode_imm(offset);
315 *src_rw = deposit32(*src_rw, 0, 12, rol32(offset, rot) | (rot << 7));
321 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
322 intptr_t value, intptr_t addend)
324 tcg_debug_assert(addend == 0);
327 return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
329 return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
331 return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
333 return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
335 g_assert_not_reached();
339 #define TCG_CT_CONST_ARM 0x100
340 #define TCG_CT_CONST_INV 0x200
341 #define TCG_CT_CONST_NEG 0x400
342 #define TCG_CT_CONST_ZERO 0x800
343 #define TCG_CT_CONST_ORRI 0x1000
344 #define TCG_CT_CONST_ANDI 0x2000
346 #define ALL_GENERAL_REGS 0xffffu
347 #define ALL_VECTOR_REGS 0xffff0000u
350 * r0-r2 will be overwritten when reading the tlb entry (softmmu only)
351 * and r0-r1 doing the byte swapping, so don't use these.
352 * r3 is removed for softmmu to avoid clashes with helper arguments.
354 #ifdef CONFIG_SOFTMMU
355 #define ALL_QLOAD_REGS \
356 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
357 (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \
359 #define ALL_QSTORE_REGS \
360 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
361 (1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \
362 ((TARGET_LONG_BITS == 64) << TCG_REG_R3)))
364 #define ALL_QLOAD_REGS ALL_GENERAL_REGS
365 #define ALL_QSTORE_REGS \
366 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1)))
369 static inline uint32_t rotl(uint32_t val, int n)
371 return (val << n) | (val >> (32 - n));
374 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
375 right-rotated by an even amount between 0 and 30. */
376 static int encode_imm(uint32_t imm)
380 /* simple case, only lower bits */
381 if ((imm & ~0xff) == 0)
383 /* then try a simple even shift */
384 shift = ctz32(imm) & ~1;
385 if (((imm >> shift) & ~0xff) == 0)
387 /* now try harder with rotations */
388 if ((rotl(imm, 2) & ~0xff) == 0)
390 if ((rotl(imm, 4) & ~0xff) == 0)
392 if ((rotl(imm, 6) & ~0xff) == 0)
394 /* imm can't be encoded */
398 static inline int check_fit_imm(uint32_t imm)
400 return encode_imm(imm) >= 0;
403 /* Return true if v16 is a valid 16-bit shifted immediate. */
404 static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
406 if (v16 == (v16 & 0xff)) {
410 } else if (v16 == (v16 & 0xff00)) {
418 /* Return true if v32 is a valid 32-bit shifted immediate. */
419 static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
421 if (v32 == (v32 & 0xff)) {
425 } else if (v32 == (v32 & 0xff00)) {
427 *imm8 = (v32 >> 8) & 0xff;
429 } else if (v32 == (v32 & 0xff0000)) {
431 *imm8 = (v32 >> 16) & 0xff;
433 } else if (v32 == (v32 & 0xff000000)) {
441 /* Return true if v32 is a valid 32-bit shifting ones immediate. */
442 static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
444 if ((v32 & 0xffff00ff) == 0xff) {
446 *imm8 = (v32 >> 8) & 0xff;
448 } else if ((v32 & 0xff00ffff) == 0xffff) {
450 *imm8 = (v32 >> 16) & 0xff;
457 * Return non-zero if v32 can be formed by MOVI+ORR.
458 * Place the parameters for MOVI in (cmode, imm8).
459 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
461 static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
465 for (i = 6; i > 0; i -= 2) {
466 /* Mask out one byte we can add with ORR. */
467 uint32_t tmp = v32 & ~(0xffu << (i * 4));
468 if (is_shimm32(tmp, cmode, imm8) ||
469 is_soimm32(tmp, cmode, imm8)) {
476 /* Return true if V is a valid 16-bit or 32-bit shifted immediate. */
477 static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
479 if (v32 == deposit32(v32, 16, 16, v32)) {
480 return is_shimm16(v32, cmode, imm8);
482 return is_shimm32(v32, cmode, imm8);
486 /* Test if a constant matches the constraint.
487 * TODO: define constraints for:
489 * ldr/str offset: between -0xfff and 0xfff
490 * ldrh/strh offset: between -0xff and 0xff
491 * mov operand2: values represented with x << (2 * y), x < 0x100
492 * add, sub, eor...: ditto
494 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
496 if (ct & TCG_CT_CONST) {
498 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
500 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
502 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
504 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
508 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
511 case TCG_CT_CONST_ANDI:
514 case TCG_CT_CONST_ORRI:
515 if (val == deposit64(val, 32, 32, val)) {
517 return is_shimm1632(val, &cmode, &imm8);
521 /* Both bits should not be set for the same insn. */
522 g_assert_not_reached();
528 static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
530 tcg_out32(s, (cond << 28) | 0x0a000000 |
531 (((offset - 8) >> 2) & 0x00ffffff));
534 static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
536 tcg_out32(s, (cond << 28) | 0x0b000000 |
537 (((offset - 8) >> 2) & 0x00ffffff));
540 static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
542 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
545 static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
547 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
548 (((offset - 8) >> 2) & 0x00ffffff));
551 static inline void tcg_out_dat_reg(TCGContext *s,
552 int cond, int opc, int rd, int rn, int rm, int shift)
554 tcg_out32(s, (cond << 28) | (0 << 25) | opc |
555 (rn << 16) | (rd << 12) | shift | rm);
558 static inline void tcg_out_nop(TCGContext *s)
560 tcg_out32(s, INSN_NOP);
563 static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
565 /* Simple reg-reg move, optimising out the 'do nothing' case */
567 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
571 static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn)
573 /* Unless the C portion of QEMU is compiled as thumb, we don't
574 actually need true BX semantics; merely a branch to an address
575 held in a register. */
576 if (use_armv5t_instructions) {
577 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
579 tcg_out_mov_reg(s, cond, TCG_REG_PC, rn);
583 static inline void tcg_out_dat_imm(TCGContext *s,
584 int cond, int opc, int rd, int rn, int im)
586 tcg_out32(s, (cond << 28) | (1 << 25) | opc |
587 (rn << 16) | (rd << 12) | im);
590 /* Note that this routine is used for both LDR and LDRH formats, so we do
591 not wish to include an immediate shift at this point. */
592 static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
593 TCGReg rn, TCGReg rm, bool u, bool p, bool w)
595 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
596 | (w << 21) | (rn << 16) | (rt << 12) | rm);
599 static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
600 TCGReg rn, int imm8, bool p, bool w)
607 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
608 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
611 static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
612 TCGReg rn, int imm12, bool p, bool w)
619 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
620 (rn << 16) | (rt << 12) | imm12);
623 static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
624 TCGReg rn, int imm12)
626 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
629 static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
630 TCGReg rn, int imm12)
632 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
635 static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
636 TCGReg rn, TCGReg rm)
638 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
641 static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
642 TCGReg rn, TCGReg rm)
644 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
647 static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt,
650 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
653 static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt,
654 TCGReg rn, TCGReg rm)
656 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
659 static inline void tcg_out_ldrd_rwb(TCGContext *s, int cond, TCGReg rt,
660 TCGReg rn, TCGReg rm)
662 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
665 static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt,
668 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
671 static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt,
672 TCGReg rn, TCGReg rm)
674 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
677 /* Register pre-increment with base writeback. */
678 static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
679 TCGReg rn, TCGReg rm)
681 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
684 static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
685 TCGReg rn, TCGReg rm)
687 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
690 static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
693 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
696 static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
699 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
702 static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
703 TCGReg rn, TCGReg rm)
705 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
708 static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
709 TCGReg rn, TCGReg rm)
711 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
714 static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
717 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
720 static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
721 TCGReg rn, TCGReg rm)
723 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
726 static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
727 TCGReg rn, int imm12)
729 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
732 static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
733 TCGReg rn, int imm12)
735 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
738 static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
739 TCGReg rn, TCGReg rm)
741 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
744 static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
745 TCGReg rn, TCGReg rm)
747 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
750 static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
753 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
756 static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
757 TCGReg rn, TCGReg rm)
759 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
762 static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg)
764 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
765 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
768 static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
770 int rot, diff, opc, sh1, sh2;
771 uint32_t tt0, tt1, tt2;
773 /* Check a single MOV/MVN before anything else. */
774 rot = encode_imm(arg);
776 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
777 rotl(arg, rot) | (rot << 7));
780 rot = encode_imm(~arg);
782 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
783 rotl(~arg, rot) | (rot << 7));
787 /* Check for a pc-relative address. This will usually be the TB,
788 or within the TB, which is immediately before the code block. */
789 diff = tcg_pcrel_diff(s, (void *)arg) - 8;
791 rot = encode_imm(diff);
793 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC,
794 rotl(diff, rot) | (rot << 7));
798 rot = encode_imm(-diff);
800 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC,
801 rotl(-diff, rot) | (rot << 7));
806 /* Use movw + movt. */
807 if (use_armv7_instructions) {
809 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
810 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
811 if (arg & 0xffff0000) {
813 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
814 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
819 /* Look for sequences of two insns. If we have lots of 1's, we can
820 shorten the sequence by beginning with mvn and then clearing
821 higher bits with eor. */
824 if (ctpop32(arg) > 16) {
828 sh1 = ctz32(tt0) & ~1;
829 tt1 = tt0 & ~(0xff << sh1);
830 sh2 = ctz32(tt1) & ~1;
831 tt2 = tt1 & ~(0xff << sh2);
833 rot = ((32 - sh1) << 7) & 0xf00;
834 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot);
835 rot = ((32 - sh2) << 7) & 0xf00;
836 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
837 ((tt0 >> sh2) & 0xff) | rot);
841 /* Otherwise, drop it into the constant pool. */
842 tcg_out_movi_pool(s, cond, rd, arg);
845 static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
846 TCGArg lhs, TCGArg rhs, int rhs_is_const)
848 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
849 * rhs must satisfy the "rI" constraint.
852 int rot = encode_imm(rhs);
853 tcg_debug_assert(rot >= 0);
854 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
856 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
860 static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
861 TCGReg dst, TCGReg lhs, TCGArg rhs,
864 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
865 * rhs must satisfy the "rIK" constraint.
868 int rot = encode_imm(rhs);
871 rot = encode_imm(rhs);
872 tcg_debug_assert(rot >= 0);
875 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
877 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
881 static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
882 TCGArg dst, TCGArg lhs, TCGArg rhs,
885 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
886 * rhs must satisfy the "rIN" constraint.
889 int rot = encode_imm(rhs);
892 rot = encode_imm(rhs);
893 tcg_debug_assert(rot >= 0);
896 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
898 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
902 static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
903 TCGReg rn, TCGReg rm)
905 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
906 if (!use_armv6_instructions && rd == rn) {
908 /* rd == rn == rm; copy an input to tmp first. */
909 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
910 rm = rn = TCG_REG_TMP;
917 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
920 static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
921 TCGReg rd1, TCGReg rn, TCGReg rm)
923 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
924 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
925 if (rd0 == rm || rd1 == rm) {
926 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
935 tcg_out32(s, (cond << 28) | 0x00800090 |
936 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
939 static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
940 TCGReg rd1, TCGReg rn, TCGReg rm)
942 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
943 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
944 if (rd0 == rm || rd1 == rm) {
945 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
954 tcg_out32(s, (cond << 28) | 0x00c00090 |
955 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
958 static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
960 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
963 static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
965 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
968 static inline void tcg_out_ext8s(TCGContext *s, int cond,
971 if (use_armv6_instructions) {
973 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
975 tcg_out_dat_reg(s, cond, ARITH_MOV,
976 rd, 0, rn, SHIFT_IMM_LSL(24));
977 tcg_out_dat_reg(s, cond, ARITH_MOV,
978 rd, 0, rd, SHIFT_IMM_ASR(24));
982 static inline void tcg_out_ext8u(TCGContext *s, int cond,
985 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
988 static inline void tcg_out_ext16s(TCGContext *s, int cond,
991 if (use_armv6_instructions) {
993 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
995 tcg_out_dat_reg(s, cond, ARITH_MOV,
996 rd, 0, rn, SHIFT_IMM_LSL(16));
997 tcg_out_dat_reg(s, cond, ARITH_MOV,
998 rd, 0, rd, SHIFT_IMM_ASR(16));
1002 static inline void tcg_out_ext16u(TCGContext *s, int cond,
1005 if (use_armv6_instructions) {
1007 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
1009 tcg_out_dat_reg(s, cond, ARITH_MOV,
1010 rd, 0, rn, SHIFT_IMM_LSL(16));
1011 tcg_out_dat_reg(s, cond, ARITH_MOV,
1012 rd, 0, rd, SHIFT_IMM_LSR(16));
1016 static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
1018 if (use_armv6_instructions) {
1020 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
1022 tcg_out_dat_reg(s, cond, ARITH_MOV,
1023 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
1024 tcg_out_dat_reg(s, cond, ARITH_MOV,
1025 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
1026 tcg_out_dat_reg(s, cond, ARITH_ORR,
1027 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
1031 static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
1033 if (use_armv6_instructions) {
1035 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
1037 tcg_out_dat_reg(s, cond, ARITH_MOV,
1038 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
1039 tcg_out_dat_reg(s, cond, ARITH_MOV,
1040 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
1041 tcg_out_dat_reg(s, cond, ARITH_ORR,
1042 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
1046 /* swap the two low bytes assuming that the two high input bytes and the
1047 two high output bit can hold any value. */
1048 static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
1050 if (use_armv6_instructions) {
1052 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
1054 tcg_out_dat_reg(s, cond, ARITH_MOV,
1055 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
1056 tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
1057 tcg_out_dat_reg(s, cond, ARITH_ORR,
1058 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
1062 static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
1064 if (use_armv6_instructions) {
1066 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
1068 tcg_out_dat_reg(s, cond, ARITH_EOR,
1069 TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
1070 tcg_out_dat_imm(s, cond, ARITH_BIC,
1071 TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
1072 tcg_out_dat_reg(s, cond, ARITH_MOV,
1073 rd, 0, rn, SHIFT_IMM_ROR(8));
1074 tcg_out_dat_reg(s, cond, ARITH_EOR,
1075 rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
1079 static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
1080 TCGArg a1, int ofs, int len, bool const_a1)
1083 /* bfi becomes bfc with rn == 15. */
1087 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
1088 | (ofs << 7) | ((ofs + len - 1) << 16));
1091 static inline void tcg_out_extract(TCGContext *s, int cond, TCGReg rd,
1092 TCGArg a1, int ofs, int len)
1095 tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | a1
1096 | (ofs << 7) | ((len - 1) << 16));
1099 static inline void tcg_out_sextract(TCGContext *s, int cond, TCGReg rd,
1100 TCGArg a1, int ofs, int len)
1103 tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | a1
1104 | (ofs << 7) | ((len - 1) << 16));
1107 static inline void tcg_out_ld32u(TCGContext *s, int cond,
1108 int rd, int rn, int32_t offset)
1110 if (offset > 0xfff || offset < -0xfff) {
1111 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1112 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
1114 tcg_out_ld32_12(s, cond, rd, rn, offset);
1117 static inline void tcg_out_st32(TCGContext *s, int cond,
1118 int rd, int rn, int32_t offset)
1120 if (offset > 0xfff || offset < -0xfff) {
1121 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1122 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
1124 tcg_out_st32_12(s, cond, rd, rn, offset);
1127 static inline void tcg_out_ld16u(TCGContext *s, int cond,
1128 int rd, int rn, int32_t offset)
1130 if (offset > 0xff || offset < -0xff) {
1131 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1132 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
1134 tcg_out_ld16u_8(s, cond, rd, rn, offset);
1137 static inline void tcg_out_ld16s(TCGContext *s, int cond,
1138 int rd, int rn, int32_t offset)
1140 if (offset > 0xff || offset < -0xff) {
1141 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1142 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
1144 tcg_out_ld16s_8(s, cond, rd, rn, offset);
1147 static inline void tcg_out_st16(TCGContext *s, int cond,
1148 int rd, int rn, int32_t offset)
1150 if (offset > 0xff || offset < -0xff) {
1151 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1152 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
1154 tcg_out_st16_8(s, cond, rd, rn, offset);
1157 static inline void tcg_out_ld8u(TCGContext *s, int cond,
1158 int rd, int rn, int32_t offset)
1160 if (offset > 0xfff || offset < -0xfff) {
1161 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1162 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
1164 tcg_out_ld8_12(s, cond, rd, rn, offset);
1167 static inline void tcg_out_ld8s(TCGContext *s, int cond,
1168 int rd, int rn, int32_t offset)
1170 if (offset > 0xff || offset < -0xff) {
1171 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1172 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1174 tcg_out_ld8s_8(s, cond, rd, rn, offset);
1177 static inline void tcg_out_st8(TCGContext *s, int cond,
1178 int rd, int rn, int32_t offset)
1180 if (offset > 0xfff || offset < -0xfff) {
1181 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1182 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1184 tcg_out_st8_12(s, cond, rd, rn, offset);
1187 /* The _goto case is normally between TBs within the same code buffer, and
1188 * with the code buffer limited to 16MB we wouldn't need the long case.
1189 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1191 static void tcg_out_goto(TCGContext *s, int cond, const tcg_insn_unit *addr)
1193 intptr_t addri = (intptr_t)addr;
1194 ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1196 if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1197 tcg_out_b(s, cond, disp);
1200 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1203 /* The call case is mostly used for helpers - so it's not unreasonable
1204 * for them to be beyond branch range */
1205 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr)
1207 intptr_t addri = (intptr_t)addr;
1208 ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1210 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1212 /* Use BLX if the target is in Thumb mode */
1213 if (!use_armv5t_instructions) {
1216 tcg_out_blx_imm(s, disp);
1218 tcg_out_bl(s, COND_AL, disp);
1220 } else if (use_armv7_instructions) {
1221 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1222 tcg_out_blx(s, COND_AL, TCG_REG_TMP);
1224 /* ??? Know that movi_pool emits exactly 1 insn. */
1225 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0);
1226 tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri);
1230 static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l)
1233 tcg_out_goto(s, cond, l->u.value_ptr);
1235 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1236 tcg_out_b(s, cond, 0);
1240 static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
1242 if (use_armv7_instructions) {
1243 tcg_out32(s, INSN_DMB_ISH);
1244 } else if (use_armv6_instructions) {
1245 tcg_out32(s, INSN_DMB_MCR);
1249 static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1250 const int *const_args)
1252 TCGReg al = args[0];
1253 TCGReg ah = args[1];
1254 TCGArg bl = args[2];
1255 TCGArg bh = args[3];
1256 TCGCond cond = args[4];
1257 int const_bl = const_args[2];
1258 int const_bh = const_args[3];
1267 /* We perform a conditional comparision. If the high half is
1268 equal, then overwrite the flags with the comparison of the
1269 low half. The resulting flags cover the whole. */
1270 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1271 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1276 /* We perform a double-word subtraction and examine the result.
1277 We do not actually need the result of the subtract, so the
1278 low part "subtract" is a compare. For the high half we have
1279 no choice but to compute into a temporary. */
1280 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1281 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1282 TCG_REG_TMP, ah, bh, const_bh);
1287 /* Similar, but with swapped arguments, via reversed subtract. */
1288 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1289 TCG_REG_TMP, al, bl, const_bl);
1290 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1291 TCG_REG_TMP, ah, bh, const_bh);
1292 return tcg_swap_cond(cond);
1295 g_assert_not_reached();
1300 * Note that TCGReg references Q-registers.
1301 * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting.
1303 static uint32_t encode_vd(TCGReg rd)
1305 tcg_debug_assert(rd >= TCG_REG_Q0);
1306 return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
1309 static uint32_t encode_vn(TCGReg rn)
1311 tcg_debug_assert(rn >= TCG_REG_Q0);
1312 return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
1315 static uint32_t encode_vm(TCGReg rm)
1317 tcg_debug_assert(rm >= TCG_REG_Q0);
1318 return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
1321 static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
1324 tcg_out32(s, insn | (vece << 18) | (q << 6) |
1325 encode_vd(d) | encode_vm(m));
1328 static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
1329 TCGReg d, TCGReg n, TCGReg m)
1331 tcg_out32(s, insn | (vece << 20) | (q << 6) |
1332 encode_vd(d) | encode_vn(n) | encode_vm(m));
1335 static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
1336 int q, int op, int cmode, uint8_t imm8)
1338 tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
1339 | (cmode << 8) | extract32(imm8, 0, 4)
1340 | (extract32(imm8, 4, 3) << 16)
1341 | (extract32(imm8, 7, 1) << 24));
1344 static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
1345 TCGReg rd, TCGReg rm, int l_imm6)
1347 tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
1348 (extract32(l_imm6, 6, 1) << 7) |
1349 (extract32(l_imm6, 0, 6) << 16));
1352 static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
1353 TCGReg rd, TCGReg rn, int offset)
1356 if (check_fit_imm(offset) || check_fit_imm(-offset)) {
1357 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1358 TCG_REG_TMP, rn, offset, true);
1360 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
1361 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1362 TCG_REG_TMP, TCG_REG_TMP, rn, 0);
1366 tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
1369 #ifdef CONFIG_SOFTMMU
1370 #include "../tcg-ldst.c.inc"
1372 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1373 * int mmu_idx, uintptr_t ra)
1375 static void * const qemu_ld_helpers[16] = {
1376 [MO_UB] = helper_ret_ldub_mmu,
1377 [MO_SB] = helper_ret_ldsb_mmu,
1379 [MO_LEUW] = helper_le_lduw_mmu,
1380 [MO_LEUL] = helper_le_ldul_mmu,
1381 [MO_LEQ] = helper_le_ldq_mmu,
1382 [MO_LESW] = helper_le_ldsw_mmu,
1383 [MO_LESL] = helper_le_ldul_mmu,
1385 [MO_BEUW] = helper_be_lduw_mmu,
1386 [MO_BEUL] = helper_be_ldul_mmu,
1387 [MO_BEQ] = helper_be_ldq_mmu,
1388 [MO_BESW] = helper_be_ldsw_mmu,
1389 [MO_BESL] = helper_be_ldul_mmu,
1392 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1393 * uintxx_t val, int mmu_idx, uintptr_t ra)
1395 static void * const qemu_st_helpers[16] = {
1396 [MO_UB] = helper_ret_stb_mmu,
1397 [MO_LEUW] = helper_le_stw_mmu,
1398 [MO_LEUL] = helper_le_stl_mmu,
1399 [MO_LEQ] = helper_le_stq_mmu,
1400 [MO_BEUW] = helper_be_stw_mmu,
1401 [MO_BEUL] = helper_be_stl_mmu,
1402 [MO_BEQ] = helper_be_stq_mmu,
1405 /* Helper routines for marshalling helper function arguments into
1406 * the correct registers and stack.
1407 * argreg is where we want to put this argument, arg is the argument itself.
1408 * Return value is the updated argreg ready for the next call.
1409 * Note that argreg 0..3 is real registers, 4+ on stack.
1411 * We provide routines for arguments which are: immediate, 32 bit
1412 * value in register, 16 and 8 bit values in register (which must be zero
1413 * extended before use) and 64 bit value in a lo:hi register pair.
1415 #define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1416 static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1419 MOV_ARG(s, COND_AL, argreg, arg); \
1421 int ofs = (argreg - 4) * 4; \
1423 tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1424 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1426 return argreg + 1; \
1429 DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
1430 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1431 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
1432 (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1433 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
1434 (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1435 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1437 static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1438 TCGReg arglo, TCGReg arghi)
1440 /* 64 bit arguments must go in even/odd register pairs
1441 * and in 8-aligned stack slots.
1446 if (use_armv6_instructions && argreg >= 4
1447 && (arglo & 1) == 0 && arghi == arglo + 1) {
1448 tcg_out_strd_8(s, COND_AL, arglo,
1449 TCG_REG_CALL_STACK, (argreg - 4) * 4);
1452 argreg = tcg_out_arg_reg32(s, argreg, arglo);
1453 argreg = tcg_out_arg_reg32(s, argreg, arghi);
1458 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1460 /* We expect to use an 9-bit sign-magnitude negative offset from ENV. */
1461 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1462 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256);
1464 /* These offsets are built into the LDRD below. */
1465 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1466 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
1468 /* Load and compare a TLB entry, leaving the flags set. Returns the register
1469 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */
1471 static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1472 MemOp opc, int mem_index, bool is_load)
1474 int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
1475 : offsetof(CPUTLBEntry, addr_write));
1476 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1477 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1478 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1479 unsigned s_bits = opc & MO_SIZE;
1480 unsigned a_bits = get_alignment_bits(opc);
1483 * We don't support inline unaligned acceses, but we can easily
1484 * support overalignment checks.
1486 if (a_bits < s_bits) {
1490 /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */
1491 if (use_armv6_instructions) {
1492 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
1494 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off);
1495 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off);
1498 /* Extract the tlb index from the address into R0. */
1499 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
1500 SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
1503 * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
1504 * Load the tlb comparator into R2/R3 and the fast path addend into R1.
1507 if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1508 tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1510 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1513 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1514 TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
1515 if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1516 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1518 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1521 if (!use_armv6_instructions && TARGET_LONG_BITS == 64) {
1522 tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4);
1525 /* Load the tlb addend. */
1526 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
1527 offsetof(CPUTLBEntry, addend));
1530 * Check alignment, check comparators.
1531 * Do this in no more than 3 insns. Use MOVW for v7, if possible,
1532 * to reduce the number of sequential conditional instructions.
1533 * Almost all guests have at least 4k pages, which means that we need
1534 * to clear at least 9 bits even for an 8-byte memory, which means it
1535 * isn't worth checking for an immediate operand for BIC.
1537 if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
1538 tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1));
1540 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask);
1541 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1542 addrlo, TCG_REG_TMP, 0);
1543 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
1546 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo,
1549 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo,
1550 SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1551 tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP,
1552 0, TCG_REG_R2, TCG_REG_TMP,
1553 SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1556 if (TARGET_LONG_BITS == 64) {
1557 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
1563 /* Record the context of a call to the out of line helper code for the slow
1564 path for a load or store, so that we can later generate the correct
1566 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1567 TCGReg datalo, TCGReg datahi, TCGReg addrlo,
1568 TCGReg addrhi, tcg_insn_unit *raddr,
1569 tcg_insn_unit *label_ptr)
1571 TCGLabelQemuLdst *label = new_ldst_label(s);
1573 label->is_ld = is_ld;
1575 label->datalo_reg = datalo;
1576 label->datahi_reg = datahi;
1577 label->addrlo_reg = addrlo;
1578 label->addrhi_reg = addrhi;
1579 label->raddr = tcg_splitwx_to_rx(raddr);
1580 label->label_ptr[0] = label_ptr;
1583 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1585 TCGReg argreg, datalo, datahi;
1586 TCGMemOpIdx oi = lb->oi;
1587 MemOp opc = get_memop(oi);
1590 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1594 argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
1595 if (TARGET_LONG_BITS == 64) {
1596 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1598 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1600 argreg = tcg_out_arg_imm32(s, argreg, oi);
1601 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1603 /* For armv6 we can use the canonical unsigned helpers and minimize
1604 icache usage. For pre-armv6, use the signed helpers since we do
1605 not have a single insn sign-extend. */
1606 if (use_armv6_instructions) {
1607 func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)];
1609 func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)];
1610 if (opc & MO_SIGN) {
1614 tcg_out_call(s, func);
1616 datalo = lb->datalo_reg;
1617 datahi = lb->datahi_reg;
1618 switch (opc & MO_SSIZE) {
1620 tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0);
1623 tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0);
1626 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1629 if (datalo != TCG_REG_R1) {
1630 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1631 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1632 } else if (datahi != TCG_REG_R0) {
1633 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1634 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1636 tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0);
1637 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1638 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP);
1643 tcg_out_goto(s, COND_AL, lb->raddr);
1647 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1649 TCGReg argreg, datalo, datahi;
1650 TCGMemOpIdx oi = lb->oi;
1651 MemOp opc = get_memop(oi);
1653 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1657 argreg = TCG_REG_R0;
1658 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1659 if (TARGET_LONG_BITS == 64) {
1660 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1662 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1665 datalo = lb->datalo_reg;
1666 datahi = lb->datahi_reg;
1667 switch (opc & MO_SIZE) {
1669 argreg = tcg_out_arg_reg8(s, argreg, datalo);
1672 argreg = tcg_out_arg_reg16(s, argreg, datalo);
1676 argreg = tcg_out_arg_reg32(s, argreg, datalo);
1679 argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi);
1683 argreg = tcg_out_arg_imm32(s, argreg, oi);
1684 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1686 /* Tail-call to the helper, which will return to the fast path. */
1687 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1690 #endif /* SOFTMMU */
1692 static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
1693 TCGReg datalo, TCGReg datahi,
1694 TCGReg addrlo, TCGReg addend)
1696 MemOp bswap = opc & MO_BSWAP;
1698 switch (opc & MO_SSIZE) {
1700 tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend);
1703 tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend);
1706 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1708 tcg_out_bswap16(s, COND_AL, datalo, datalo);
1713 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1714 tcg_out_bswap16s(s, COND_AL, datalo, datalo);
1716 tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
1721 tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
1723 tcg_out_bswap32(s, COND_AL, datalo, datalo);
1728 TCGReg dl = (bswap ? datahi : datalo);
1729 TCGReg dh = (bswap ? datalo : datahi);
1731 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1732 if (USING_SOFTMMU && use_armv6_instructions
1733 && (dl & 1) == 0 && dh == dl + 1) {
1734 tcg_out_ldrd_r(s, COND_AL, dl, addrlo, addend);
1735 } else if (dl != addend) {
1736 tcg_out_ld32_rwb(s, COND_AL, dl, addend, addrlo);
1737 tcg_out_ld32_12(s, COND_AL, dh, addend, 4);
1739 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
1740 addend, addrlo, SHIFT_IMM_LSL(0));
1741 tcg_out_ld32_12(s, COND_AL, dl, TCG_REG_TMP, 0);
1742 tcg_out_ld32_12(s, COND_AL, dh, TCG_REG_TMP, 4);
1745 tcg_out_bswap32(s, COND_AL, dl, dl);
1746 tcg_out_bswap32(s, COND_AL, dh, dh);
1753 static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
1754 TCGReg datalo, TCGReg datahi,
1757 MemOp bswap = opc & MO_BSWAP;
1759 switch (opc & MO_SSIZE) {
1761 tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
1764 tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
1767 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1769 tcg_out_bswap16(s, COND_AL, datalo, datalo);
1774 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1775 tcg_out_bswap16s(s, COND_AL, datalo, datalo);
1777 tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
1782 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1784 tcg_out_bswap32(s, COND_AL, datalo, datalo);
1789 TCGReg dl = (bswap ? datahi : datalo);
1790 TCGReg dh = (bswap ? datalo : datahi);
1792 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1793 if (USING_SOFTMMU && use_armv6_instructions
1794 && (dl & 1) == 0 && dh == dl + 1) {
1795 tcg_out_ldrd_8(s, COND_AL, dl, addrlo, 0);
1796 } else if (dl == addrlo) {
1797 tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
1798 tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
1800 tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
1801 tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
1804 tcg_out_bswap32(s, COND_AL, dl, dl);
1805 tcg_out_bswap32(s, COND_AL, dh, dh);
1812 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1814 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1817 #ifdef CONFIG_SOFTMMU
1820 tcg_insn_unit *label_ptr;
1824 datahi = (is64 ? *args++ : 0);
1826 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1828 opc = get_memop(oi);
1830 #ifdef CONFIG_SOFTMMU
1831 mem_index = get_mmuidx(oi);
1832 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1);
1834 /* This a conditional BL only to load a pointer within this opcode into LR
1835 for the slow path. We will not be using the value for a tail call. */
1836 label_ptr = s->code_ptr;
1837 tcg_out_bl(s, COND_NE, 0);
1839 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
1841 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
1842 s->code_ptr, label_ptr);
1843 #else /* !CONFIG_SOFTMMU */
1845 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
1846 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP);
1848 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
1853 static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc,
1854 TCGReg datalo, TCGReg datahi,
1855 TCGReg addrlo, TCGReg addend)
1857 MemOp bswap = opc & MO_BSWAP;
1859 switch (opc & MO_SIZE) {
1861 tcg_out_st8_r(s, cond, datalo, addrlo, addend);
1865 tcg_out_bswap16st(s, cond, TCG_REG_R0, datalo);
1866 tcg_out_st16_r(s, cond, TCG_REG_R0, addrlo, addend);
1868 tcg_out_st16_r(s, cond, datalo, addrlo, addend);
1874 tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
1875 tcg_out_st32_r(s, cond, TCG_REG_R0, addrlo, addend);
1877 tcg_out_st32_r(s, cond, datalo, addrlo, addend);
1881 /* Avoid strd for user-only emulation, to handle unaligned. */
1883 tcg_out_bswap32(s, cond, TCG_REG_R0, datahi);
1884 tcg_out_st32_rwb(s, cond, TCG_REG_R0, addend, addrlo);
1885 tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
1886 tcg_out_st32_12(s, cond, TCG_REG_R0, addend, 4);
1887 } else if (USING_SOFTMMU && use_armv6_instructions
1888 && (datalo & 1) == 0 && datahi == datalo + 1) {
1889 tcg_out_strd_r(s, cond, datalo, addrlo, addend);
1891 tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
1892 tcg_out_st32_12(s, cond, datahi, addend, 4);
1898 static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc,
1899 TCGReg datalo, TCGReg datahi,
1902 MemOp bswap = opc & MO_BSWAP;
1904 switch (opc & MO_SIZE) {
1906 tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
1910 tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, datalo);
1911 tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addrlo, 0);
1913 tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
1919 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
1920 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
1922 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1926 /* Avoid strd for user-only emulation, to handle unaligned. */
1928 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datahi);
1929 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
1930 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
1931 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 4);
1932 } else if (USING_SOFTMMU && use_armv6_instructions
1933 && (datalo & 1) == 0 && datahi == datalo + 1) {
1934 tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
1936 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1937 tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
1943 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1945 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1948 #ifdef CONFIG_SOFTMMU
1951 tcg_insn_unit *label_ptr;
1955 datahi = (is64 ? *args++ : 0);
1957 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1959 opc = get_memop(oi);
1961 #ifdef CONFIG_SOFTMMU
1962 mem_index = get_mmuidx(oi);
1963 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
1965 tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend);
1967 /* The conditional call must come last, as we're going to return here. */
1968 label_ptr = s->code_ptr;
1969 tcg_out_bl(s, COND_NE, 0);
1971 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
1972 s->code_ptr, label_ptr);
1973 #else /* !CONFIG_SOFTMMU */
1975 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
1976 tcg_out_qemu_st_index(s, COND_AL, opc, datalo,
1977 datahi, addrlo, TCG_REG_TMP);
1979 tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
1984 static void tcg_out_epilogue(TCGContext *s);
1986 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1987 const TCGArg *args, const int *const_args)
1989 TCGArg a0, a1, a2, a3, a4, a5;
1993 case INDEX_op_exit_tb:
1994 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, args[0]);
1995 tcg_out_epilogue(s);
1997 case INDEX_op_goto_tb:
1999 /* Indirect jump method */
2000 intptr_t ptr, dif, dil;
2001 TCGReg base = TCG_REG_PC;
2003 tcg_debug_assert(s->tb_jmp_insn_offset == 0);
2004 ptr = (intptr_t)tcg_splitwx_to_rx(s->tb_jmp_target_addr + args[0]);
2005 dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
2006 dil = sextract32(dif, 0, 12);
2008 /* The TB is close, but outside the 12 bits addressable by
2009 the load. We can extend this to 20 bits with a sub of a
2010 shifted immediate from pc. In the vastly unlikely event
2011 the code requires more than 1MB, we'll use 2 insns and
2014 tcg_out_movi32(s, COND_AL, base, ptr - dil);
2016 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
2017 set_jmp_reset_offset(s, args[0]);
2020 case INDEX_op_goto_ptr:
2021 tcg_out_bx(s, COND_AL, args[0]);
2024 tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
2027 case INDEX_op_ld8u_i32:
2028 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
2030 case INDEX_op_ld8s_i32:
2031 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
2033 case INDEX_op_ld16u_i32:
2034 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
2036 case INDEX_op_ld16s_i32:
2037 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
2039 case INDEX_op_ld_i32:
2040 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
2042 case INDEX_op_st8_i32:
2043 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
2045 case INDEX_op_st16_i32:
2046 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
2048 case INDEX_op_st_i32:
2049 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
2052 case INDEX_op_movcond_i32:
2053 /* Constraints mean that v2 is always in the same register as dest,
2054 * so we only need to do "if condition passed, move v1 to dest".
2056 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2057 args[1], args[2], const_args[2]);
2058 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
2059 ARITH_MVN, args[0], 0, args[3], const_args[3]);
2061 case INDEX_op_add_i32:
2062 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
2063 args[0], args[1], args[2], const_args[2]);
2065 case INDEX_op_sub_i32:
2066 if (const_args[1]) {
2067 if (const_args[2]) {
2068 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
2070 tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
2071 args[0], args[2], args[1], 1);
2074 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
2075 args[0], args[1], args[2], const_args[2]);
2078 case INDEX_op_and_i32:
2079 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
2080 args[0], args[1], args[2], const_args[2]);
2082 case INDEX_op_andc_i32:
2083 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
2084 args[0], args[1], args[2], const_args[2]);
2086 case INDEX_op_or_i32:
2089 case INDEX_op_xor_i32:
2093 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
2095 case INDEX_op_add2_i32:
2096 a0 = args[0], a1 = args[1], a2 = args[2];
2097 a3 = args[3], a4 = args[4], a5 = args[5];
2098 if (a0 == a3 || (a0 == a5 && !const_args[5])) {
2101 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
2102 a0, a2, a4, const_args[4]);
2103 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
2104 a1, a3, a5, const_args[5]);
2105 tcg_out_mov_reg(s, COND_AL, args[0], a0);
2107 case INDEX_op_sub2_i32:
2108 a0 = args[0], a1 = args[1], a2 = args[2];
2109 a3 = args[3], a4 = args[4], a5 = args[5];
2110 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
2113 if (const_args[2]) {
2114 if (const_args[4]) {
2115 tcg_out_movi32(s, COND_AL, a0, a4);
2118 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
2120 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
2121 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
2123 if (const_args[3]) {
2124 if (const_args[5]) {
2125 tcg_out_movi32(s, COND_AL, a1, a5);
2128 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
2130 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
2131 a1, a3, a5, const_args[5]);
2133 tcg_out_mov_reg(s, COND_AL, args[0], a0);
2135 case INDEX_op_neg_i32:
2136 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
2138 case INDEX_op_not_i32:
2139 tcg_out_dat_reg(s, COND_AL,
2140 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
2142 case INDEX_op_mul_i32:
2143 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
2145 case INDEX_op_mulu2_i32:
2146 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
2148 case INDEX_op_muls2_i32:
2149 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
2151 /* XXX: Perhaps args[2] & 0x1f is wrong */
2152 case INDEX_op_shl_i32:
2154 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
2156 case INDEX_op_shr_i32:
2157 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
2158 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
2160 case INDEX_op_sar_i32:
2161 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
2162 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
2164 case INDEX_op_rotr_i32:
2165 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
2166 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
2169 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
2172 case INDEX_op_rotl_i32:
2173 if (const_args[2]) {
2174 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
2175 ((0x20 - args[2]) & 0x1f) ?
2176 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
2179 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
2180 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
2181 SHIFT_REG_ROR(TCG_REG_TMP));
2185 case INDEX_op_ctz_i32:
2186 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
2190 case INDEX_op_clz_i32:
2196 if (c && a2 == 32) {
2197 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
2200 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
2201 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
2202 if (c || a0 != a2) {
2203 tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
2207 case INDEX_op_brcond_i32:
2208 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2209 args[0], args[1], const_args[1]);
2210 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
2211 arg_label(args[3]));
2213 case INDEX_op_setcond_i32:
2214 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2215 args[1], args[2], const_args[2]);
2216 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
2217 ARITH_MOV, args[0], 0, 1);
2218 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
2219 ARITH_MOV, args[0], 0, 0);
2222 case INDEX_op_brcond2_i32:
2223 c = tcg_out_cmp2(s, args, const_args);
2224 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
2226 case INDEX_op_setcond2_i32:
2227 c = tcg_out_cmp2(s, args + 1, const_args + 1);
2228 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
2229 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2230 ARITH_MOV, args[0], 0, 0);
2233 case INDEX_op_qemu_ld_i32:
2234 tcg_out_qemu_ld(s, args, 0);
2236 case INDEX_op_qemu_ld_i64:
2237 tcg_out_qemu_ld(s, args, 1);
2239 case INDEX_op_qemu_st_i32:
2240 tcg_out_qemu_st(s, args, 0);
2242 case INDEX_op_qemu_st_i64:
2243 tcg_out_qemu_st(s, args, 1);
2246 case INDEX_op_bswap16_i32:
2247 tcg_out_bswap16(s, COND_AL, args[0], args[1]);
2249 case INDEX_op_bswap32_i32:
2250 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2253 case INDEX_op_ext8s_i32:
2254 tcg_out_ext8s(s, COND_AL, args[0], args[1]);
2256 case INDEX_op_ext16s_i32:
2257 tcg_out_ext16s(s, COND_AL, args[0], args[1]);
2259 case INDEX_op_ext16u_i32:
2260 tcg_out_ext16u(s, COND_AL, args[0], args[1]);
2263 case INDEX_op_deposit_i32:
2264 tcg_out_deposit(s, COND_AL, args[0], args[2],
2265 args[3], args[4], const_args[2]);
2267 case INDEX_op_extract_i32:
2268 tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2270 case INDEX_op_sextract_i32:
2271 tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2273 case INDEX_op_extract2_i32:
2274 /* ??? These optimization vs zero should be generic. */
2275 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
2276 if (const_args[1]) {
2277 if (const_args[2]) {
2278 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2280 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2281 args[2], SHIFT_IMM_LSL(32 - args[3]));
2283 } else if (const_args[2]) {
2284 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2285 args[1], SHIFT_IMM_LSR(args[3]));
2287 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */
2288 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2289 args[2], SHIFT_IMM_LSL(32 - args[3]));
2290 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2291 args[1], SHIFT_IMM_LSR(args[3]));
2295 case INDEX_op_div_i32:
2296 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
2298 case INDEX_op_divu_i32:
2299 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
2303 tcg_out_mb(s, args[0]);
2306 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2307 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2313 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2316 case INDEX_op_goto_ptr:
2319 case INDEX_op_ld8u_i32:
2320 case INDEX_op_ld8s_i32:
2321 case INDEX_op_ld16u_i32:
2322 case INDEX_op_ld16s_i32:
2323 case INDEX_op_ld_i32:
2324 case INDEX_op_neg_i32:
2325 case INDEX_op_not_i32:
2326 case INDEX_op_bswap16_i32:
2327 case INDEX_op_bswap32_i32:
2328 case INDEX_op_ext8s_i32:
2329 case INDEX_op_ext16s_i32:
2330 case INDEX_op_ext16u_i32:
2331 case INDEX_op_extract_i32:
2332 case INDEX_op_sextract_i32:
2333 return C_O1_I1(r, r);
2335 case INDEX_op_st8_i32:
2336 case INDEX_op_st16_i32:
2337 case INDEX_op_st_i32:
2338 return C_O0_I2(r, r);
2340 case INDEX_op_add_i32:
2341 case INDEX_op_sub_i32:
2342 case INDEX_op_setcond_i32:
2343 return C_O1_I2(r, r, rIN);
2345 case INDEX_op_and_i32:
2346 case INDEX_op_andc_i32:
2347 case INDEX_op_clz_i32:
2348 case INDEX_op_ctz_i32:
2349 return C_O1_I2(r, r, rIK);
2351 case INDEX_op_mul_i32:
2352 case INDEX_op_div_i32:
2353 case INDEX_op_divu_i32:
2354 return C_O1_I2(r, r, r);
2356 case INDEX_op_mulu2_i32:
2357 case INDEX_op_muls2_i32:
2358 return C_O2_I2(r, r, r, r);
2360 case INDEX_op_or_i32:
2361 case INDEX_op_xor_i32:
2362 return C_O1_I2(r, r, rI);
2364 case INDEX_op_shl_i32:
2365 case INDEX_op_shr_i32:
2366 case INDEX_op_sar_i32:
2367 case INDEX_op_rotl_i32:
2368 case INDEX_op_rotr_i32:
2369 return C_O1_I2(r, r, ri);
2371 case INDEX_op_brcond_i32:
2372 return C_O0_I2(r, rIN);
2373 case INDEX_op_deposit_i32:
2374 return C_O1_I2(r, 0, rZ);
2375 case INDEX_op_extract2_i32:
2376 return C_O1_I2(r, rZ, rZ);
2377 case INDEX_op_movcond_i32:
2378 return C_O1_I4(r, r, rIN, rIK, 0);
2379 case INDEX_op_add2_i32:
2380 return C_O2_I4(r, r, r, r, rIN, rIK);
2381 case INDEX_op_sub2_i32:
2382 return C_O2_I4(r, r, rI, rI, rIN, rIK);
2383 case INDEX_op_brcond2_i32:
2384 return C_O0_I4(r, r, rI, rI);
2385 case INDEX_op_setcond2_i32:
2386 return C_O1_I4(r, r, r, rI, rI);
2388 case INDEX_op_qemu_ld_i32:
2389 return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l);
2390 case INDEX_op_qemu_ld_i64:
2391 return TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, l) : C_O2_I2(r, r, l, l);
2392 case INDEX_op_qemu_st_i32:
2393 return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s);
2394 case INDEX_op_qemu_st_i64:
2395 return TARGET_LONG_BITS == 32 ? C_O0_I3(s, s, s) : C_O0_I4(s, s, s, s);
2397 case INDEX_op_st_vec:
2398 return C_O0_I2(w, r);
2399 case INDEX_op_ld_vec:
2400 case INDEX_op_dupm_vec:
2401 return C_O1_I1(w, r);
2402 case INDEX_op_dup_vec:
2403 return C_O1_I1(w, wr);
2404 case INDEX_op_abs_vec:
2405 case INDEX_op_neg_vec:
2406 case INDEX_op_not_vec:
2407 case INDEX_op_shli_vec:
2408 case INDEX_op_shri_vec:
2409 case INDEX_op_sari_vec:
2410 return C_O1_I1(w, w);
2411 case INDEX_op_dup2_vec:
2412 case INDEX_op_add_vec:
2413 case INDEX_op_mul_vec:
2414 case INDEX_op_smax_vec:
2415 case INDEX_op_smin_vec:
2416 case INDEX_op_ssadd_vec:
2417 case INDEX_op_sssub_vec:
2418 case INDEX_op_sub_vec:
2419 case INDEX_op_umax_vec:
2420 case INDEX_op_umin_vec:
2421 case INDEX_op_usadd_vec:
2422 case INDEX_op_ussub_vec:
2423 case INDEX_op_xor_vec:
2424 case INDEX_op_arm_sshl_vec:
2425 case INDEX_op_arm_ushl_vec:
2426 return C_O1_I2(w, w, w);
2427 case INDEX_op_arm_sli_vec:
2428 return C_O1_I2(w, 0, w);
2429 case INDEX_op_or_vec:
2430 case INDEX_op_andc_vec:
2431 return C_O1_I2(w, w, wO);
2432 case INDEX_op_and_vec:
2433 case INDEX_op_orc_vec:
2434 return C_O1_I2(w, w, wV);
2435 case INDEX_op_cmp_vec:
2436 return C_O1_I2(w, w, wZ);
2437 case INDEX_op_bitsel_vec:
2438 return C_O1_I3(w, w, w, w);
2440 g_assert_not_reached();
2444 static void tcg_target_init(TCGContext *s)
2446 /* Only probe for the platform and capabilities if we havn't already
2447 determined maximum values at compile time. */
2448 #if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
2450 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2451 #ifndef use_idiv_instructions
2452 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2454 #ifndef use_neon_instructions
2455 use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
2460 if (__ARM_ARCH < 7) {
2461 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2462 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2463 arm_arch = pl[1] - '0';
2467 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2469 tcg_target_call_clobber_regs = 0;
2470 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2471 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2472 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2473 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2474 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2475 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2477 if (use_neon_instructions) {
2478 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
2479 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2481 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
2482 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
2483 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
2484 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
2485 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
2486 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
2487 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
2488 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
2489 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
2490 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
2491 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
2492 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
2495 s->reserved_regs = 0;
2496 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2497 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2498 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2499 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
2502 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2503 TCGReg arg1, intptr_t arg2)
2507 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2510 /* regs 1; size 8; align 8 */
2511 tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
2514 /* regs 2; size 8; align 16 */
2515 tcg_out_vldst(s, INSN_VLD1 | 0xae0, arg, arg1, arg2);
2518 g_assert_not_reached();
2522 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2523 TCGReg arg1, intptr_t arg2)
2527 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2530 /* regs 1; size 8; align 8 */
2531 tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
2534 /* regs 2; size 8; align 16 */
2535 tcg_out_vldst(s, INSN_VST1 | 0xae0, arg, arg1, arg2);
2538 g_assert_not_reached();
2542 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2543 TCGReg base, intptr_t ofs)
2548 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
2555 if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
2556 tcg_out_mov_reg(s, COND_AL, ret, arg);
2563 /* "VMOV D,N" is an alias for "VORR D,N,N". */
2564 tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
2568 g_assert_not_reached();
2572 static void tcg_out_movi(TCGContext *s, TCGType type,
2573 TCGReg ret, tcg_target_long arg)
2575 tcg_debug_assert(type == TCG_TYPE_I32);
2576 tcg_debug_assert(ret < TCG_REG_Q0);
2577 tcg_out_movi32(s, COND_AL, ret, arg);
2580 /* Type is always V128, with I64 elements. */
2581 static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
2583 /* Move high element into place first. */
2585 tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
2586 /* Move low element into place; tcg_out_mov will check for nop. */
2587 tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
2590 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2591 TCGReg rd, TCGReg rs)
2593 int q = type - TCG_TYPE_V64;
2595 if (vece == MO_64) {
2596 if (type == TCG_TYPE_V128) {
2597 tcg_out_dup2_vec(s, rd, rs, rs);
2599 tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
2601 } else if (rs < TCG_REG_Q0) {
2602 int b = (vece == MO_8);
2603 int e = (vece == MO_16);
2604 tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
2605 encode_vn(rd) | (rs << 12));
2607 int imm4 = 1 << vece;
2608 tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
2609 encode_vd(rd) | encode_vm(rs));
2614 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2615 TCGReg rd, TCGReg base, intptr_t offset)
2617 if (vece == MO_64) {
2618 tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
2619 if (type == TCG_TYPE_V128) {
2620 tcg_out_dup2_vec(s, rd, rd, rd);
2623 int q = type - TCG_TYPE_V64;
2624 tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
2630 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2631 TCGReg rd, int64_t v64)
2633 int q = type - TCG_TYPE_V64;
2636 /* Test all bytes equal first. */
2638 tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
2643 * Test all bytes 0x00 or 0xff second. This can match cases that
2644 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
2646 for (i = imm8 = 0; i < 8; i++) {
2647 uint8_t byte = v64 >> (i * 8);
2650 } else if (byte != 0) {
2654 tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
2659 * Tests for various replications. For each element width, if we
2660 * cannot find an expansion there's no point checking a larger
2661 * width because we already know by replication it cannot match.
2663 if (vece == MO_16) {
2666 if (is_shimm16(v16, &cmode, &imm8)) {
2667 tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2670 if (is_shimm16(~v16, &cmode, &imm8)) {
2671 tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2676 * Otherwise, all remaining constants can be loaded in two insns:
2677 * rd = v16 & 0xff, rd |= v16 & 0xff00.
2679 tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
2680 tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */
2684 if (vece == MO_32) {
2687 if (is_shimm32(v32, &cmode, &imm8) ||
2688 is_soimm32(v32, &cmode, &imm8)) {
2689 tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2692 if (is_shimm32(~v32, &cmode, &imm8) ||
2693 is_soimm32(~v32, &cmode, &imm8)) {
2694 tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2699 * Restrict the set of constants to those we can load with
2700 * two instructions. Others we load from the pool.
2702 i = is_shimm32_pair(v32, &cmode, &imm8);
2704 tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2705 tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
2708 i = is_shimm32_pair(~v32, &cmode, &imm8);
2710 tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2711 tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
2717 * As a last resort, load from the constant pool.
2719 if (!q || vece == MO_64) {
2720 new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
2721 /* VLDR Dd, [pc + offset] */
2722 tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
2724 tcg_out_dup2_vec(s, rd, rd, rd);
2727 new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
2728 /* add tmp, pc, offset */
2729 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
2730 tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
2734 static const ARMInsn vec_cmp_insn[16] = {
2735 [TCG_COND_EQ] = INSN_VCEQ,
2736 [TCG_COND_GT] = INSN_VCGT,
2737 [TCG_COND_GE] = INSN_VCGE,
2738 [TCG_COND_GTU] = INSN_VCGT_U,
2739 [TCG_COND_GEU] = INSN_VCGE_U,
2742 static const ARMInsn vec_cmp0_insn[16] = {
2743 [TCG_COND_EQ] = INSN_VCEQ0,
2744 [TCG_COND_GT] = INSN_VCGT0,
2745 [TCG_COND_GE] = INSN_VCGE0,
2746 [TCG_COND_LT] = INSN_VCLT0,
2747 [TCG_COND_LE] = INSN_VCLE0,
2750 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2751 unsigned vecl, unsigned vece,
2752 const TCGArg *args, const int *const_args)
2754 TCGType type = vecl + TCG_TYPE_V64;
2756 TCGArg a0, a1, a2, a3;
2764 case INDEX_op_ld_vec:
2765 tcg_out_ld(s, type, a0, a1, a2);
2767 case INDEX_op_st_vec:
2768 tcg_out_st(s, type, a0, a1, a2);
2770 case INDEX_op_dupm_vec:
2771 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2773 case INDEX_op_dup2_vec:
2774 tcg_out_dup2_vec(s, a0, a1, a2);
2776 case INDEX_op_abs_vec:
2777 tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
2779 case INDEX_op_neg_vec:
2780 tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
2782 case INDEX_op_not_vec:
2783 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
2785 case INDEX_op_add_vec:
2786 tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
2788 case INDEX_op_mul_vec:
2789 tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
2791 case INDEX_op_smax_vec:
2792 tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
2794 case INDEX_op_smin_vec:
2795 tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
2797 case INDEX_op_sub_vec:
2798 tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
2800 case INDEX_op_ssadd_vec:
2801 tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
2803 case INDEX_op_sssub_vec:
2804 tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
2806 case INDEX_op_umax_vec:
2807 tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
2809 case INDEX_op_umin_vec:
2810 tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
2812 case INDEX_op_usadd_vec:
2813 tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
2815 case INDEX_op_ussub_vec:
2816 tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
2818 case INDEX_op_xor_vec:
2819 tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
2821 case INDEX_op_arm_sshl_vec:
2823 * Note that Vm is the data and Vn is the shift count,
2824 * therefore the arguments appear reversed.
2826 tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
2828 case INDEX_op_arm_ushl_vec:
2830 tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
2832 case INDEX_op_shli_vec:
2833 tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
2835 case INDEX_op_shri_vec:
2836 tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
2838 case INDEX_op_sari_vec:
2839 tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
2841 case INDEX_op_arm_sli_vec:
2842 tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
2845 case INDEX_op_andc_vec:
2846 if (!const_args[2]) {
2847 tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
2852 case INDEX_op_and_vec:
2853 if (const_args[2]) {
2854 is_shimm1632(~a2, &cmode, &imm8);
2856 tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
2859 tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
2862 tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
2865 case INDEX_op_orc_vec:
2866 if (!const_args[2]) {
2867 tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
2872 case INDEX_op_or_vec:
2873 if (const_args[2]) {
2874 is_shimm1632(a2, &cmode, &imm8);
2876 tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
2879 tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
2882 tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
2885 case INDEX_op_cmp_vec:
2887 TCGCond cond = args[3];
2889 if (cond == TCG_COND_NE) {
2890 if (const_args[2]) {
2891 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
2893 tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
2894 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2899 if (const_args[2]) {
2900 insn = vec_cmp0_insn[cond];
2902 tcg_out_vreg2(s, insn, q, vece, a0, a1);
2905 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
2908 insn = vec_cmp_insn[cond];
2911 t = a1, a1 = a2, a2 = t;
2912 cond = tcg_swap_cond(cond);
2913 insn = vec_cmp_insn[cond];
2914 tcg_debug_assert(insn != 0);
2916 tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
2921 case INDEX_op_bitsel_vec:
2924 tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
2925 } else if (a0 == a2) {
2926 tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
2928 tcg_out_mov(s, type, a0, a1);
2929 tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
2933 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
2934 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
2936 g_assert_not_reached();
2940 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2943 case INDEX_op_add_vec:
2944 case INDEX_op_sub_vec:
2945 case INDEX_op_and_vec:
2946 case INDEX_op_andc_vec:
2947 case INDEX_op_or_vec:
2948 case INDEX_op_orc_vec:
2949 case INDEX_op_xor_vec:
2950 case INDEX_op_not_vec:
2951 case INDEX_op_shli_vec:
2952 case INDEX_op_shri_vec:
2953 case INDEX_op_sari_vec:
2954 case INDEX_op_ssadd_vec:
2955 case INDEX_op_sssub_vec:
2956 case INDEX_op_usadd_vec:
2957 case INDEX_op_ussub_vec:
2958 case INDEX_op_bitsel_vec:
2960 case INDEX_op_abs_vec:
2961 case INDEX_op_cmp_vec:
2962 case INDEX_op_mul_vec:
2963 case INDEX_op_neg_vec:
2964 case INDEX_op_smax_vec:
2965 case INDEX_op_smin_vec:
2966 case INDEX_op_umax_vec:
2967 case INDEX_op_umin_vec:
2968 return vece < MO_64;
2969 case INDEX_op_shlv_vec:
2970 case INDEX_op_shrv_vec:
2971 case INDEX_op_sarv_vec:
2972 case INDEX_op_rotli_vec:
2973 case INDEX_op_rotlv_vec:
2974 case INDEX_op_rotrv_vec:
2981 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2985 TCGv_vec v0, v1, v2, t1, t2, c1;
2989 v0 = temp_tcgv_vec(arg_temp(a0));
2990 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
2991 a2 = va_arg(va, TCGArg);
2995 case INDEX_op_shlv_vec:
2997 * Merely propagate shlv_vec to arm_ushl_vec.
2998 * In this way we don't set TCG_TARGET_HAS_shv_vec
2999 * because everything is done via expansion.
3001 v2 = temp_tcgv_vec(arg_temp(a2));
3002 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
3003 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3006 case INDEX_op_shrv_vec:
3007 case INDEX_op_sarv_vec:
3008 /* Right shifts are negative left shifts for NEON. */
3009 v2 = temp_tcgv_vec(arg_temp(a2));
3010 t1 = tcg_temp_new_vec(type);
3011 tcg_gen_neg_vec(vece, t1, v2);
3012 if (opc == INDEX_op_shrv_vec) {
3013 opc = INDEX_op_arm_ushl_vec;
3015 opc = INDEX_op_arm_sshl_vec;
3017 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
3018 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3019 tcg_temp_free_vec(t1);
3022 case INDEX_op_rotli_vec:
3023 t1 = tcg_temp_new_vec(type);
3024 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
3025 vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
3026 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
3027 tcg_temp_free_vec(t1);
3030 case INDEX_op_rotlv_vec:
3031 v2 = temp_tcgv_vec(arg_temp(a2));
3032 t1 = tcg_temp_new_vec(type);
3033 c1 = tcg_constant_vec(type, vece, 8 << vece);
3034 tcg_gen_sub_vec(vece, t1, v2, c1);
3035 /* Right shifts are negative left shifts for NEON. */
3036 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3037 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3038 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
3039 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3040 tcg_gen_or_vec(vece, v0, v0, t1);
3041 tcg_temp_free_vec(t1);
3044 case INDEX_op_rotrv_vec:
3045 v2 = temp_tcgv_vec(arg_temp(a2));
3046 t1 = tcg_temp_new_vec(type);
3047 t2 = tcg_temp_new_vec(type);
3048 c1 = tcg_constant_vec(type, vece, 8 << vece);
3049 tcg_gen_neg_vec(vece, t1, v2);
3050 tcg_gen_sub_vec(vece, t2, c1, v2);
3051 /* Right shifts are negative left shifts for NEON. */
3052 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3053 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3054 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
3055 tcgv_vec_arg(v1), tcgv_vec_arg(t2));
3056 tcg_gen_or_vec(vece, v0, t1, t2);
3057 tcg_temp_free_vec(t1);
3058 tcg_temp_free_vec(t2);
3062 g_assert_not_reached();
3066 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3069 for (i = 0; i < count; ++i) {
3074 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
3075 and tcg_register_jit. */
3077 #define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
3079 #define FRAME_SIZE \
3081 + TCG_STATIC_CALL_ARGS_SIZE \
3082 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
3083 + TCG_TARGET_STACK_ALIGN - 1) \
3084 & -TCG_TARGET_STACK_ALIGN)
3086 #define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE)
3088 static void tcg_target_qemu_prologue(TCGContext *s)
3090 /* Calling convention requires us to save r4-r11 and lr. */
3091 /* stmdb sp!, { r4 - r11, lr } */
3092 tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
3094 /* Reserve callee argument and tcg temp space. */
3095 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
3096 TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3097 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
3098 CPU_TEMP_BUF_NLONGS * sizeof(long));
3100 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3102 tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
3105 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3106 * and fall through to the rest of the epilogue.
3108 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3109 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
3110 tcg_out_epilogue(s);
3113 static void tcg_out_epilogue(TCGContext *s)
3115 /* Release local stack frame. */
3116 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
3117 TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3119 /* ldmia sp!, { r4 - r11, pc } */
3120 tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
3125 uint8_t fde_def_cfa[4];
3126 uint8_t fde_reg_ofs[18];
3129 #define ELF_HOST_MACHINE EM_ARM
3131 /* We're expecting a 2 byte uleb128 encoded value. */
3132 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3134 static const DebugFrame debug_frame = {
3135 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3138 .h.cie.code_align = 1,
3139 .h.cie.data_align = 0x7c, /* sleb128 -4 */
3140 .h.cie.return_column = 14,
3142 /* Total FDE size does not include the "len" member. */
3143 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3146 12, 13, /* DW_CFA_def_cfa sp, ... */
3147 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3151 /* The following must match the stmdb in the prologue. */
3152 0x8e, 1, /* DW_CFA_offset, lr, -4 */
3153 0x8b, 2, /* DW_CFA_offset, r11, -8 */
3154 0x8a, 3, /* DW_CFA_offset, r10, -12 */
3155 0x89, 4, /* DW_CFA_offset, r9, -16 */
3156 0x88, 5, /* DW_CFA_offset, r8, -20 */
3157 0x87, 6, /* DW_CFA_offset, r7, -24 */
3158 0x86, 7, /* DW_CFA_offset, r6, -28 */
3159 0x85, 8, /* DW_CFA_offset, r5, -32 */
3160 0x84, 9, /* DW_CFA_offset, r4, -36 */
3164 void tcg_register_jit(const void *buf, size_t buf_size)
3166 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));