2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "../tcg-pool.c.inc"
28 int arm_arch = __ARM_ARCH;
30 #ifndef use_idiv_instructions
31 bool use_idiv_instructions;
33 #ifndef use_neon_instructions
34 bool use_neon_instructions;
37 /* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */
39 # define USING_SOFTMMU 1
41 # define USING_SOFTMMU 0
44 #ifdef CONFIG_DEBUG_TCG
45 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
46 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
47 "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc",
48 "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7",
49 "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
53 static const int tcg_target_reg_alloc_order[] = {
74 /* Q4 - Q7 are call-saved, and skipped. */
85 static const int tcg_target_call_iarg_regs[4] = {
86 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
88 static const int tcg_target_call_oarg_regs[2] = {
89 TCG_REG_R0, TCG_REG_R1
92 #define TCG_REG_TMP TCG_REG_R12
93 #define TCG_VEC_TMP TCG_REG_Q15
98 COND_CS = 0x2, /* Unsigned greater or equal */
99 COND_CC = 0x3, /* Unsigned less than */
100 COND_MI = 0x4, /* Negative */
101 COND_PL = 0x5, /* Zero or greater */
102 COND_VS = 0x6, /* Overflow */
103 COND_VC = 0x7, /* No overflow */
104 COND_HI = 0x8, /* Unsigned greater than */
105 COND_LS = 0x9, /* Unsigned less or equal */
113 #define TO_CPSR (1 << 20)
115 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
116 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
117 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
118 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
119 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
120 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
121 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
122 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
125 ARITH_AND = 0x0 << 21,
126 ARITH_EOR = 0x1 << 21,
127 ARITH_SUB = 0x2 << 21,
128 ARITH_RSB = 0x3 << 21,
129 ARITH_ADD = 0x4 << 21,
130 ARITH_ADC = 0x5 << 21,
131 ARITH_SBC = 0x6 << 21,
132 ARITH_RSC = 0x7 << 21,
133 ARITH_TST = 0x8 << 21 | TO_CPSR,
134 ARITH_CMP = 0xa << 21 | TO_CPSR,
135 ARITH_CMN = 0xb << 21 | TO_CPSR,
136 ARITH_ORR = 0xc << 21,
137 ARITH_MOV = 0xd << 21,
138 ARITH_BIC = 0xe << 21,
139 ARITH_MVN = 0xf << 21,
141 INSN_CLZ = 0x016f0f10,
142 INSN_RBIT = 0x06ff0f30,
144 INSN_LDMIA = 0x08b00000,
145 INSN_STMDB = 0x09200000,
147 INSN_LDR_IMM = 0x04100000,
148 INSN_LDR_REG = 0x06100000,
149 INSN_STR_IMM = 0x04000000,
150 INSN_STR_REG = 0x06000000,
152 INSN_LDRH_IMM = 0x005000b0,
153 INSN_LDRH_REG = 0x001000b0,
154 INSN_LDRSH_IMM = 0x005000f0,
155 INSN_LDRSH_REG = 0x001000f0,
156 INSN_STRH_IMM = 0x004000b0,
157 INSN_STRH_REG = 0x000000b0,
159 INSN_LDRB_IMM = 0x04500000,
160 INSN_LDRB_REG = 0x06500000,
161 INSN_LDRSB_IMM = 0x005000d0,
162 INSN_LDRSB_REG = 0x001000d0,
163 INSN_STRB_IMM = 0x04400000,
164 INSN_STRB_REG = 0x06400000,
166 INSN_LDRD_IMM = 0x004000d0,
167 INSN_LDRD_REG = 0x000000d0,
168 INSN_STRD_IMM = 0x004000f0,
169 INSN_STRD_REG = 0x000000f0,
171 INSN_DMB_ISH = 0xf57ff05b,
172 INSN_DMB_MCR = 0xee070fba,
174 /* Architected nop introduced in v6k. */
175 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
176 also Just So Happened to do nothing on pre-v6k so that we
177 don't need to conditionalize it? */
178 INSN_NOP_v6k = 0xe320f000,
179 /* Otherwise the assembler uses mov r0,r0 */
180 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV,
182 INSN_VADD = 0xf2000800,
183 INSN_VAND = 0xf2000110,
184 INSN_VBIC = 0xf2100110,
185 INSN_VEOR = 0xf3000110,
186 INSN_VORN = 0xf2300110,
187 INSN_VORR = 0xf2200110,
188 INSN_VSUB = 0xf3000800,
189 INSN_VMUL = 0xf2000910,
190 INSN_VQADD = 0xf2000010,
191 INSN_VQADD_U = 0xf3000010,
192 INSN_VQSUB = 0xf2000210,
193 INSN_VQSUB_U = 0xf3000210,
194 INSN_VMAX = 0xf2000600,
195 INSN_VMAX_U = 0xf3000600,
196 INSN_VMIN = 0xf2000610,
197 INSN_VMIN_U = 0xf3000610,
199 INSN_VABS = 0xf3b10300,
200 INSN_VMVN = 0xf3b00580,
201 INSN_VNEG = 0xf3b10380,
203 INSN_VCEQ0 = 0xf3b10100,
204 INSN_VCGT0 = 0xf3b10000,
205 INSN_VCGE0 = 0xf3b10080,
206 INSN_VCLE0 = 0xf3b10180,
207 INSN_VCLT0 = 0xf3b10200,
209 INSN_VCEQ = 0xf3000810,
210 INSN_VCGE = 0xf2000310,
211 INSN_VCGT = 0xf2000300,
212 INSN_VCGE_U = 0xf3000310,
213 INSN_VCGT_U = 0xf3000300,
215 INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */
216 INSN_VSARI = 0xf2800010, /* VSHR.S */
217 INSN_VSHRI = 0xf3800010, /* VSHR.U */
218 INSN_VSLI = 0xf3800510,
219 INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */
220 INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */
222 INSN_VBSL = 0xf3100110,
223 INSN_VBIT = 0xf3200110,
224 INSN_VBIF = 0xf3300110,
226 INSN_VTST = 0xf2000810,
228 INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */
229 INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */
230 INSN_VLDR_D = 0xed100b00, /* VLDR.64 */
231 INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */
232 INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */
233 INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */
234 INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */
237 #define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
239 static const uint8_t tcg_cond_to_arm_cond[] = {
240 [TCG_COND_EQ] = COND_EQ,
241 [TCG_COND_NE] = COND_NE,
242 [TCG_COND_LT] = COND_LT,
243 [TCG_COND_GE] = COND_GE,
244 [TCG_COND_LE] = COND_LE,
245 [TCG_COND_GT] = COND_GT,
247 [TCG_COND_LTU] = COND_CC,
248 [TCG_COND_GEU] = COND_CS,
249 [TCG_COND_LEU] = COND_LS,
250 [TCG_COND_GTU] = COND_HI,
253 static int encode_imm(uint32_t imm);
255 /* TCG private relocation type: add with pc+imm8 */
258 /* TCG private relocation type: vldr with imm8 << 2 */
259 #define R_ARM_PC11 12
261 static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
263 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
264 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
266 if (offset == sextract32(offset, 0, 24)) {
267 *src_rw = deposit32(*src_rw, 0, 24, offset);
273 static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
275 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
276 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
278 if (offset >= -0xfff && offset <= 0xfff) {
279 tcg_insn_unit insn = *src_rw;
280 bool u = (offset >= 0);
284 insn = deposit32(insn, 23, 1, u);
285 insn = deposit32(insn, 0, 12, offset);
292 static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
294 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
295 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
297 if (offset >= -0xff && offset <= 0xff) {
298 tcg_insn_unit insn = *src_rw;
299 bool u = (offset >= 0);
303 insn = deposit32(insn, 23, 1, u);
304 insn = deposit32(insn, 0, 8, offset);
311 static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
313 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
314 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
315 int imm12 = encode_imm(offset);
318 *src_rw = deposit32(*src_rw, 0, 12, imm12);
324 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
325 intptr_t value, intptr_t addend)
327 tcg_debug_assert(addend == 0);
330 return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
332 return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
334 return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
336 return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
338 g_assert_not_reached();
342 #define TCG_CT_CONST_ARM 0x100
343 #define TCG_CT_CONST_INV 0x200
344 #define TCG_CT_CONST_NEG 0x400
345 #define TCG_CT_CONST_ZERO 0x800
346 #define TCG_CT_CONST_ORRI 0x1000
347 #define TCG_CT_CONST_ANDI 0x2000
349 #define ALL_GENERAL_REGS 0xffffu
350 #define ALL_VECTOR_REGS 0xffff0000u
353 * r0-r2 will be overwritten when reading the tlb entry (softmmu only)
354 * and r0-r1 doing the byte swapping, so don't use these.
355 * r3 is removed for softmmu to avoid clashes with helper arguments.
357 #ifdef CONFIG_SOFTMMU
358 #define ALL_QLOAD_REGS \
359 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
360 (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \
362 #define ALL_QSTORE_REGS \
363 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
364 (1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \
365 ((TARGET_LONG_BITS == 64) << TCG_REG_R3)))
367 #define ALL_QLOAD_REGS ALL_GENERAL_REGS
368 #define ALL_QSTORE_REGS \
369 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1)))
373 * ARM immediates for ALU instructions are made of an unsigned 8-bit
374 * right-rotated by an even amount between 0 and 30.
376 * Return < 0 if @imm cannot be encoded, else the entire imm12 field.
378 static int encode_imm(uint32_t imm)
382 /* Simple case, no rotation required. */
383 if ((imm & ~0xff) == 0) {
387 /* Next, try a simple even shift. */
388 rot = ctz32(imm) & ~1;
391 if ((imm8 & ~0xff) == 0) {
396 * Finally, try harder with rotations.
397 * The ctz test above will have taken care of rotates >= 8.
399 for (rot = 2; rot < 8; rot += 2) {
400 imm8 = rol32(imm, rot);
401 if ((imm8 & ~0xff) == 0) {
405 /* Fail: imm cannot be encoded. */
409 /* Note that rot is even, and we discard bit 0 by shifting by 7. */
410 return rot << 7 | imm8;
413 static int encode_imm_nofail(uint32_t imm)
415 int ret = encode_imm(imm);
416 tcg_debug_assert(ret >= 0);
420 static bool check_fit_imm(uint32_t imm)
422 return encode_imm(imm) >= 0;
425 /* Return true if v16 is a valid 16-bit shifted immediate. */
426 static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
428 if (v16 == (v16 & 0xff)) {
432 } else if (v16 == (v16 & 0xff00)) {
440 /* Return true if v32 is a valid 32-bit shifted immediate. */
441 static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
443 if (v32 == (v32 & 0xff)) {
447 } else if (v32 == (v32 & 0xff00)) {
449 *imm8 = (v32 >> 8) & 0xff;
451 } else if (v32 == (v32 & 0xff0000)) {
453 *imm8 = (v32 >> 16) & 0xff;
455 } else if (v32 == (v32 & 0xff000000)) {
463 /* Return true if v32 is a valid 32-bit shifting ones immediate. */
464 static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
466 if ((v32 & 0xffff00ff) == 0xff) {
468 *imm8 = (v32 >> 8) & 0xff;
470 } else if ((v32 & 0xff00ffff) == 0xffff) {
472 *imm8 = (v32 >> 16) & 0xff;
479 * Return non-zero if v32 can be formed by MOVI+ORR.
480 * Place the parameters for MOVI in (cmode, imm8).
481 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
483 static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
487 for (i = 6; i > 0; i -= 2) {
488 /* Mask out one byte we can add with ORR. */
489 uint32_t tmp = v32 & ~(0xffu << (i * 4));
490 if (is_shimm32(tmp, cmode, imm8) ||
491 is_soimm32(tmp, cmode, imm8)) {
498 /* Return true if V is a valid 16-bit or 32-bit shifted immediate. */
499 static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
501 if (v32 == deposit32(v32, 16, 16, v32)) {
502 return is_shimm16(v32, cmode, imm8);
504 return is_shimm32(v32, cmode, imm8);
508 /* Test if a constant matches the constraint.
509 * TODO: define constraints for:
511 * ldr/str offset: between -0xfff and 0xfff
512 * ldrh/strh offset: between -0xff and 0xff
513 * mov operand2: values represented with x << (2 * y), x < 0x100
514 * add, sub, eor...: ditto
516 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
518 if (ct & TCG_CT_CONST) {
520 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
522 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
524 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
526 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
530 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
533 case TCG_CT_CONST_ANDI:
536 case TCG_CT_CONST_ORRI:
537 if (val == deposit64(val, 32, 32, val)) {
539 return is_shimm1632(val, &cmode, &imm8);
543 /* Both bits should not be set for the same insn. */
544 g_assert_not_reached();
550 static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
552 tcg_out32(s, (cond << 28) | 0x0a000000 |
553 (((offset - 8) >> 2) & 0x00ffffff));
556 static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset)
558 tcg_out32(s, (cond << 28) | 0x0b000000 |
559 (((offset - 8) >> 2) & 0x00ffffff));
562 static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
564 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
567 static void tcg_out_blx_imm(TCGContext *s, int32_t offset)
569 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
570 (((offset - 8) >> 2) & 0x00ffffff));
573 static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc,
574 TCGReg rd, TCGReg rn, TCGReg rm, int shift)
576 tcg_out32(s, (cond << 28) | (0 << 25) | opc |
577 (rn << 16) | (rd << 12) | shift | rm);
580 static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm)
582 /* Simple reg-reg move, optimising out the 'do nothing' case */
584 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
588 static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
590 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
593 static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
596 * Unless the C portion of QEMU is compiled as thumb, we don't need
597 * true BX semantics; merely a branch to an address held in a register.
599 if (use_armv5t_instructions) {
600 tcg_out_bx_reg(s, cond, rn);
602 tcg_out_mov_reg(s, cond, TCG_REG_PC, rn);
606 static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
607 TCGReg rd, TCGReg rn, int im)
609 tcg_out32(s, (cond << 28) | (1 << 25) | opc |
610 (rn << 16) | (rd << 12) | im);
613 static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc,
614 TCGReg rn, uint16_t mask)
616 tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask);
619 /* Note that this routine is used for both LDR and LDRH formats, so we do
620 not wish to include an immediate shift at this point. */
621 static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
622 TCGReg rn, TCGReg rm, bool u, bool p, bool w)
624 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
625 | (w << 21) | (rn << 16) | (rt << 12) | rm);
628 static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
629 TCGReg rn, int imm8, bool p, bool w)
636 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
637 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
640 static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc,
641 TCGReg rt, TCGReg rn, int imm12, bool p, bool w)
648 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
649 (rn << 16) | (rt << 12) | imm12);
652 static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt,
653 TCGReg rn, int imm12)
655 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
658 static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt,
659 TCGReg rn, int imm12)
661 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
664 static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt,
665 TCGReg rn, TCGReg rm)
667 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
670 static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt,
671 TCGReg rn, TCGReg rm)
673 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
676 static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt,
679 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
682 static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
683 TCGReg rn, TCGReg rm)
685 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
688 static void __attribute__((unused))
689 tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
691 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
694 static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt,
697 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
700 static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt,
701 TCGReg rn, TCGReg rm)
703 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
706 /* Register pre-increment with base writeback. */
707 static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
708 TCGReg rn, TCGReg rm)
710 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
713 static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
714 TCGReg rn, TCGReg rm)
716 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
719 static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt,
722 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
725 static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt,
728 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
731 static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt,
732 TCGReg rn, TCGReg rm)
734 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
737 static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt,
738 TCGReg rn, TCGReg rm)
740 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
743 static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt,
746 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
749 static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt,
750 TCGReg rn, TCGReg rm)
752 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
755 static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt,
756 TCGReg rn, int imm12)
758 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
761 static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt,
762 TCGReg rn, int imm12)
764 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
767 static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt,
768 TCGReg rn, TCGReg rm)
770 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
773 static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt,
774 TCGReg rn, TCGReg rm)
776 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
779 static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt,
782 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
785 static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt,
786 TCGReg rn, TCGReg rm)
788 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
791 static void tcg_out_movi_pool(TCGContext *s, ARMCond cond,
792 TCGReg rd, uint32_t arg)
794 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
795 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
798 static void tcg_out_movi32(TCGContext *s, ARMCond cond,
799 TCGReg rd, uint32_t arg)
801 int imm12, diff, opc, sh1, sh2;
802 uint32_t tt0, tt1, tt2;
804 /* Check a single MOV/MVN before anything else. */
805 imm12 = encode_imm(arg);
807 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12);
810 imm12 = encode_imm(~arg);
812 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12);
816 /* Check for a pc-relative address. This will usually be the TB,
817 or within the TB, which is immediately before the code block. */
818 diff = tcg_pcrel_diff(s, (void *)arg) - 8;
820 imm12 = encode_imm(diff);
822 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12);
826 imm12 = encode_imm(-diff);
828 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12);
833 /* Use movw + movt. */
834 if (use_armv7_instructions) {
836 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
837 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
838 if (arg & 0xffff0000) {
840 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
841 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
846 /* Look for sequences of two insns. If we have lots of 1's, we can
847 shorten the sequence by beginning with mvn and then clearing
848 higher bits with eor. */
851 if (ctpop32(arg) > 16) {
855 sh1 = ctz32(tt0) & ~1;
856 tt1 = tt0 & ~(0xff << sh1);
857 sh2 = ctz32(tt1) & ~1;
858 tt2 = tt1 & ~(0xff << sh2);
862 rot = ((32 - sh1) << 7) & 0xf00;
863 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot);
864 rot = ((32 - sh2) << 7) & 0xf00;
865 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
866 ((tt0 >> sh2) & 0xff) | rot);
870 /* Otherwise, drop it into the constant pool. */
871 tcg_out_movi_pool(s, cond, rd, arg);
875 * Emit either the reg,imm or reg,reg form of a data-processing insn.
876 * rhs must satisfy the "rI" constraint.
878 static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
879 TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const)
882 tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs));
884 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
889 * Emit either the reg,imm or reg,reg form of a data-processing insn.
890 * rhs must satisfy the "rIK" constraint.
892 static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
893 ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
897 int imm12 = encode_imm(rhs);
899 imm12 = encode_imm_nofail(~rhs);
902 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
904 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
908 static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
909 ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
912 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
913 * rhs must satisfy the "rIN" constraint.
916 int imm12 = encode_imm(rhs);
918 imm12 = encode_imm_nofail(-rhs);
921 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
923 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
927 static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
928 TCGReg rn, TCGReg rm)
930 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
931 if (!use_armv6_instructions && rd == rn) {
933 /* rd == rn == rm; copy an input to tmp first. */
934 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
935 rm = rn = TCG_REG_TMP;
942 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
945 static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
946 TCGReg rd1, TCGReg rn, TCGReg rm)
948 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
949 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
950 if (rd0 == rm || rd1 == rm) {
951 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
960 tcg_out32(s, (cond << 28) | 0x00800090 |
961 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
964 static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
965 TCGReg rd1, TCGReg rn, TCGReg rm)
967 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
968 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
969 if (rd0 == rm || rd1 == rm) {
970 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
979 tcg_out32(s, (cond << 28) | 0x00c00090 |
980 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
983 static void tcg_out_sdiv(TCGContext *s, ARMCond cond,
984 TCGReg rd, TCGReg rn, TCGReg rm)
986 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
989 static void tcg_out_udiv(TCGContext *s, ARMCond cond,
990 TCGReg rd, TCGReg rn, TCGReg rm)
992 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
995 static void tcg_out_ext8s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
997 if (use_armv6_instructions) {
999 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
1001 tcg_out_dat_reg(s, cond, ARITH_MOV,
1002 rd, 0, rn, SHIFT_IMM_LSL(24));
1003 tcg_out_dat_reg(s, cond, ARITH_MOV,
1004 rd, 0, rd, SHIFT_IMM_ASR(24));
1008 static void __attribute__((unused))
1009 tcg_out_ext8u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
1011 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
1014 static void tcg_out_ext16s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
1016 if (use_armv6_instructions) {
1018 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
1020 tcg_out_dat_reg(s, cond, ARITH_MOV,
1021 rd, 0, rn, SHIFT_IMM_LSL(16));
1022 tcg_out_dat_reg(s, cond, ARITH_MOV,
1023 rd, 0, rd, SHIFT_IMM_ASR(16));
1027 static void tcg_out_ext16u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
1029 if (use_armv6_instructions) {
1031 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
1033 tcg_out_dat_reg(s, cond, ARITH_MOV,
1034 rd, 0, rn, SHIFT_IMM_LSL(16));
1035 tcg_out_dat_reg(s, cond, ARITH_MOV,
1036 rd, 0, rd, SHIFT_IMM_LSR(16));
1040 static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
1041 TCGReg rd, TCGReg rn, int flags)
1043 if (use_armv6_instructions) {
1044 if (flags & TCG_BSWAP_OS) {
1046 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
1051 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
1052 if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1054 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
1061 * For stores, no input or output extension:
1063 * lsr tmp, rn, #8 tmp = 0xxA
1064 * and tmp, tmp, #0xff tmp = 000A
1065 * orr rd, tmp, rn, lsl #8 rd = xABA
1067 tcg_out_dat_reg(s, cond, ARITH_MOV,
1068 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
1069 tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
1070 tcg_out_dat_reg(s, cond, ARITH_ORR,
1071 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
1076 * Byte swap, leaving the result at the top of the register.
1077 * We will then shift down, zero or sign-extending.
1079 if (flags & TCG_BSWAP_IZ) {
1082 * ror tmp, rn, #8 tmp = B00A
1083 * orr tmp, tmp, tmp, lsl #16 tmp = BA00
1085 tcg_out_dat_reg(s, cond, ARITH_MOV,
1086 TCG_REG_TMP, 0, rn, SHIFT_IMM_ROR(8));
1087 tcg_out_dat_reg(s, cond, ARITH_ORR,
1088 TCG_REG_TMP, TCG_REG_TMP, TCG_REG_TMP,
1093 * and tmp, rn, #0xff00 tmp = 00A0
1094 * lsl tmp, tmp, #8 tmp = 0A00
1095 * orr tmp, tmp, rn, lsl #24 tmp = BA00
1097 tcg_out_dat_rI(s, cond, ARITH_AND, TCG_REG_TMP, rn, 0xff00, 1);
1098 tcg_out_dat_reg(s, cond, ARITH_MOV,
1099 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSL(8));
1100 tcg_out_dat_reg(s, cond, ARITH_ORR,
1101 TCG_REG_TMP, TCG_REG_TMP, rn, SHIFT_IMM_LSL(24));
1103 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, TCG_REG_TMP,
1104 (flags & TCG_BSWAP_OS
1105 ? SHIFT_IMM_ASR(8) : SHIFT_IMM_LSR(8)));
1108 static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
1110 if (use_armv6_instructions) {
1112 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
1114 tcg_out_dat_reg(s, cond, ARITH_EOR,
1115 TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
1116 tcg_out_dat_imm(s, cond, ARITH_BIC,
1117 TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
1118 tcg_out_dat_reg(s, cond, ARITH_MOV,
1119 rd, 0, rn, SHIFT_IMM_ROR(8));
1120 tcg_out_dat_reg(s, cond, ARITH_EOR,
1121 rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
1125 static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
1126 TCGArg a1, int ofs, int len, bool const_a1)
1129 /* bfi becomes bfc with rn == 15. */
1133 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
1134 | (ofs << 7) | ((ofs + len - 1) << 16));
1137 static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
1138 TCGReg rn, int ofs, int len)
1141 tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
1142 | (ofs << 7) | ((len - 1) << 16));
1145 static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
1146 TCGReg rn, int ofs, int len)
1149 tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
1150 | (ofs << 7) | ((len - 1) << 16));
1153 static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
1154 TCGReg rd, TCGReg rn, int32_t offset)
1156 if (offset > 0xfff || offset < -0xfff) {
1157 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1158 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
1160 tcg_out_ld32_12(s, cond, rd, rn, offset);
1163 static void tcg_out_st32(TCGContext *s, ARMCond cond,
1164 TCGReg rd, TCGReg rn, int32_t offset)
1166 if (offset > 0xfff || offset < -0xfff) {
1167 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1168 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
1170 tcg_out_st32_12(s, cond, rd, rn, offset);
1173 static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
1174 TCGReg rd, TCGReg rn, int32_t offset)
1176 if (offset > 0xff || offset < -0xff) {
1177 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1178 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
1180 tcg_out_ld16u_8(s, cond, rd, rn, offset);
1183 static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
1184 TCGReg rd, TCGReg rn, int32_t offset)
1186 if (offset > 0xff || offset < -0xff) {
1187 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1188 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
1190 tcg_out_ld16s_8(s, cond, rd, rn, offset);
1193 static void tcg_out_st16(TCGContext *s, ARMCond cond,
1194 TCGReg rd, TCGReg rn, int32_t offset)
1196 if (offset > 0xff || offset < -0xff) {
1197 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1198 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
1200 tcg_out_st16_8(s, cond, rd, rn, offset);
1203 static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
1204 TCGReg rd, TCGReg rn, int32_t offset)
1206 if (offset > 0xfff || offset < -0xfff) {
1207 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1208 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
1210 tcg_out_ld8_12(s, cond, rd, rn, offset);
1213 static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
1214 TCGReg rd, TCGReg rn, int32_t offset)
1216 if (offset > 0xff || offset < -0xff) {
1217 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1218 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1220 tcg_out_ld8s_8(s, cond, rd, rn, offset);
1223 static void tcg_out_st8(TCGContext *s, ARMCond cond,
1224 TCGReg rd, TCGReg rn, int32_t offset)
1226 if (offset > 0xfff || offset < -0xfff) {
1227 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1228 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1230 tcg_out_st8_12(s, cond, rd, rn, offset);
1234 * The _goto case is normally between TBs within the same code buffer, and
1235 * with the code buffer limited to 16MB we wouldn't need the long case.
1236 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1238 static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
1240 intptr_t addri = (intptr_t)addr;
1241 ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1242 bool arm_mode = !(addri & 1);
1244 if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1245 tcg_out_b_imm(s, cond, disp);
1249 /* LDR is interworking from v5t. */
1250 if (arm_mode || use_armv5t_instructions) {
1251 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1256 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1257 tcg_out_bx_reg(s, COND_AL, TCG_REG_TMP);
1261 * The call case is mostly used for helpers - so it's not unreasonable
1262 * for them to be beyond branch range.
1264 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr)
1266 intptr_t addri = (intptr_t)addr;
1267 ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1268 bool arm_mode = !(addri & 1);
1270 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1272 tcg_out_bl_imm(s, COND_AL, disp);
1275 if (use_armv5t_instructions) {
1276 tcg_out_blx_imm(s, disp);
1281 if (use_armv5t_instructions) {
1282 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1283 tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
1284 } else if (arm_mode) {
1285 /* ??? Know that movi_pool emits exactly 1 insn. */
1286 tcg_out_mov_reg(s, COND_AL, TCG_REG_R14, TCG_REG_PC);
1287 tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri);
1289 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1290 tcg_out_mov_reg(s, COND_AL, TCG_REG_R14, TCG_REG_PC);
1291 tcg_out_bx_reg(s, COND_AL, TCG_REG_TMP);
1295 static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
1298 tcg_out_goto(s, cond, l->u.value_ptr);
1300 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1301 tcg_out_b_imm(s, cond, 0);
1305 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1307 if (use_armv7_instructions) {
1308 tcg_out32(s, INSN_DMB_ISH);
1309 } else if (use_armv6_instructions) {
1310 tcg_out32(s, INSN_DMB_MCR);
1314 static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1315 const int *const_args)
1317 TCGReg al = args[0];
1318 TCGReg ah = args[1];
1319 TCGArg bl = args[2];
1320 TCGArg bh = args[3];
1321 TCGCond cond = args[4];
1322 int const_bl = const_args[2];
1323 int const_bh = const_args[3];
1332 /* We perform a conditional comparision. If the high half is
1333 equal, then overwrite the flags with the comparison of the
1334 low half. The resulting flags cover the whole. */
1335 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1336 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1341 /* We perform a double-word subtraction and examine the result.
1342 We do not actually need the result of the subtract, so the
1343 low part "subtract" is a compare. For the high half we have
1344 no choice but to compute into a temporary. */
1345 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1346 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1347 TCG_REG_TMP, ah, bh, const_bh);
1352 /* Similar, but with swapped arguments, via reversed subtract. */
1353 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1354 TCG_REG_TMP, al, bl, const_bl);
1355 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1356 TCG_REG_TMP, ah, bh, const_bh);
1357 return tcg_swap_cond(cond);
1360 g_assert_not_reached();
1365 * Note that TCGReg references Q-registers.
1366 * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting.
1368 static uint32_t encode_vd(TCGReg rd)
1370 tcg_debug_assert(rd >= TCG_REG_Q0);
1371 return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
1374 static uint32_t encode_vn(TCGReg rn)
1376 tcg_debug_assert(rn >= TCG_REG_Q0);
1377 return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
1380 static uint32_t encode_vm(TCGReg rm)
1382 tcg_debug_assert(rm >= TCG_REG_Q0);
1383 return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
1386 static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
1389 tcg_out32(s, insn | (vece << 18) | (q << 6) |
1390 encode_vd(d) | encode_vm(m));
1393 static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
1394 TCGReg d, TCGReg n, TCGReg m)
1396 tcg_out32(s, insn | (vece << 20) | (q << 6) |
1397 encode_vd(d) | encode_vn(n) | encode_vm(m));
1400 static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
1401 int q, int op, int cmode, uint8_t imm8)
1403 tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
1404 | (cmode << 8) | extract32(imm8, 0, 4)
1405 | (extract32(imm8, 4, 3) << 16)
1406 | (extract32(imm8, 7, 1) << 24));
1409 static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
1410 TCGReg rd, TCGReg rm, int l_imm6)
1412 tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
1413 (extract32(l_imm6, 6, 1) << 7) |
1414 (extract32(l_imm6, 0, 6) << 16));
1417 static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
1418 TCGReg rd, TCGReg rn, int offset)
1421 if (check_fit_imm(offset) || check_fit_imm(-offset)) {
1422 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1423 TCG_REG_TMP, rn, offset, true);
1425 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
1426 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1427 TCG_REG_TMP, TCG_REG_TMP, rn, 0);
1431 tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
1434 #ifdef CONFIG_SOFTMMU
1435 #include "../tcg-ldst.c.inc"
1437 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1438 * int mmu_idx, uintptr_t ra)
1440 static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
1441 [MO_UB] = helper_ret_ldub_mmu,
1442 [MO_SB] = helper_ret_ldsb_mmu,
1443 #ifdef HOST_WORDS_BIGENDIAN
1444 [MO_UW] = helper_be_lduw_mmu,
1445 [MO_UL] = helper_be_ldul_mmu,
1446 [MO_Q] = helper_be_ldq_mmu,
1447 [MO_SW] = helper_be_ldsw_mmu,
1448 [MO_SL] = helper_be_ldul_mmu,
1450 [MO_UW] = helper_le_lduw_mmu,
1451 [MO_UL] = helper_le_ldul_mmu,
1452 [MO_Q] = helper_le_ldq_mmu,
1453 [MO_SW] = helper_le_ldsw_mmu,
1454 [MO_SL] = helper_le_ldul_mmu,
1458 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1459 * uintxx_t val, int mmu_idx, uintptr_t ra)
1461 static void * const qemu_st_helpers[MO_SIZE + 1] = {
1462 [MO_8] = helper_ret_stb_mmu,
1463 #ifdef HOST_WORDS_BIGENDIAN
1464 [MO_16] = helper_be_stw_mmu,
1465 [MO_32] = helper_be_stl_mmu,
1466 [MO_64] = helper_be_stq_mmu,
1468 [MO_16] = helper_le_stw_mmu,
1469 [MO_32] = helper_le_stl_mmu,
1470 [MO_64] = helper_le_stq_mmu,
1474 /* Helper routines for marshalling helper function arguments into
1475 * the correct registers and stack.
1476 * argreg is where we want to put this argument, arg is the argument itself.
1477 * Return value is the updated argreg ready for the next call.
1478 * Note that argreg 0..3 is real registers, 4+ on stack.
1480 * We provide routines for arguments which are: immediate, 32 bit
1481 * value in register, 16 and 8 bit values in register (which must be zero
1482 * extended before use) and 64 bit value in a lo:hi register pair.
1484 #define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1485 static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1488 MOV_ARG(s, COND_AL, argreg, arg); \
1490 int ofs = (argreg - 4) * 4; \
1492 tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1493 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1495 return argreg + 1; \
1498 DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
1499 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1500 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
1501 (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1502 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
1503 (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1504 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1506 static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1507 TCGReg arglo, TCGReg arghi)
1509 /* 64 bit arguments must go in even/odd register pairs
1510 * and in 8-aligned stack slots.
1515 if (use_armv6_instructions && argreg >= 4
1516 && (arglo & 1) == 0 && arghi == arglo + 1) {
1517 tcg_out_strd_8(s, COND_AL, arglo,
1518 TCG_REG_CALL_STACK, (argreg - 4) * 4);
1521 argreg = tcg_out_arg_reg32(s, argreg, arglo);
1522 argreg = tcg_out_arg_reg32(s, argreg, arghi);
1527 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1529 /* We expect to use an 9-bit sign-magnitude negative offset from ENV. */
1530 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1531 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256);
1533 /* These offsets are built into the LDRD below. */
1534 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1535 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
1537 /* Load and compare a TLB entry, leaving the flags set. Returns the register
1538 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */
1540 static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1541 MemOp opc, int mem_index, bool is_load)
1543 int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
1544 : offsetof(CPUTLBEntry, addr_write));
1545 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1546 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1547 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1548 unsigned s_bits = opc & MO_SIZE;
1549 unsigned a_bits = get_alignment_bits(opc);
1552 * We don't support inline unaligned acceses, but we can easily
1553 * support overalignment checks.
1555 if (a_bits < s_bits) {
1559 /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */
1560 if (use_armv6_instructions) {
1561 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
1563 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off);
1564 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off);
1567 /* Extract the tlb index from the address into R0. */
1568 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
1569 SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
1572 * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
1573 * Load the tlb comparator into R2/R3 and the fast path addend into R1.
1576 if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1577 tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1579 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1582 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1583 TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
1584 if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1585 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1587 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1590 if (!use_armv6_instructions && TARGET_LONG_BITS == 64) {
1591 tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4);
1594 /* Load the tlb addend. */
1595 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
1596 offsetof(CPUTLBEntry, addend));
1599 * Check alignment, check comparators.
1600 * Do this in no more than 3 insns. Use MOVW for v7, if possible,
1601 * to reduce the number of sequential conditional instructions.
1602 * Almost all guests have at least 4k pages, which means that we need
1603 * to clear at least 9 bits even for an 8-byte memory, which means it
1604 * isn't worth checking for an immediate operand for BIC.
1606 if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
1607 tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1));
1609 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask);
1610 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1611 addrlo, TCG_REG_TMP, 0);
1612 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
1615 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo,
1618 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo,
1619 SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1620 tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP,
1621 0, TCG_REG_R2, TCG_REG_TMP,
1622 SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1625 if (TARGET_LONG_BITS == 64) {
1626 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
1632 /* Record the context of a call to the out of line helper code for the slow
1633 path for a load or store, so that we can later generate the correct
1635 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
1636 TCGReg datalo, TCGReg datahi, TCGReg addrlo,
1637 TCGReg addrhi, tcg_insn_unit *raddr,
1638 tcg_insn_unit *label_ptr)
1640 TCGLabelQemuLdst *label = new_ldst_label(s);
1642 label->is_ld = is_ld;
1644 label->datalo_reg = datalo;
1645 label->datahi_reg = datahi;
1646 label->addrlo_reg = addrlo;
1647 label->addrhi_reg = addrhi;
1648 label->raddr = tcg_splitwx_to_rx(raddr);
1649 label->label_ptr[0] = label_ptr;
1652 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1654 TCGReg argreg, datalo, datahi;
1655 MemOpIdx oi = lb->oi;
1656 MemOp opc = get_memop(oi);
1659 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1663 argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
1664 if (TARGET_LONG_BITS == 64) {
1665 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1667 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1669 argreg = tcg_out_arg_imm32(s, argreg, oi);
1670 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1672 /* For armv6 we can use the canonical unsigned helpers and minimize
1673 icache usage. For pre-armv6, use the signed helpers since we do
1674 not have a single insn sign-extend. */
1675 if (use_armv6_instructions) {
1676 func = qemu_ld_helpers[opc & MO_SIZE];
1678 func = qemu_ld_helpers[opc & MO_SSIZE];
1679 if (opc & MO_SIGN) {
1683 tcg_out_call(s, func);
1685 datalo = lb->datalo_reg;
1686 datahi = lb->datahi_reg;
1687 switch (opc & MO_SSIZE) {
1689 tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0);
1692 tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0);
1695 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1698 if (datalo != TCG_REG_R1) {
1699 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1700 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1701 } else if (datahi != TCG_REG_R0) {
1702 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1703 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1705 tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0);
1706 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1707 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP);
1712 tcg_out_goto(s, COND_AL, lb->raddr);
1716 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1718 TCGReg argreg, datalo, datahi;
1719 MemOpIdx oi = lb->oi;
1720 MemOp opc = get_memop(oi);
1722 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1726 argreg = TCG_REG_R0;
1727 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1728 if (TARGET_LONG_BITS == 64) {
1729 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1731 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1734 datalo = lb->datalo_reg;
1735 datahi = lb->datahi_reg;
1736 switch (opc & MO_SIZE) {
1738 argreg = tcg_out_arg_reg8(s, argreg, datalo);
1741 argreg = tcg_out_arg_reg16(s, argreg, datalo);
1745 argreg = tcg_out_arg_reg32(s, argreg, datalo);
1748 argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi);
1752 argreg = tcg_out_arg_imm32(s, argreg, oi);
1753 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1755 /* Tail-call to the helper, which will return to the fast path. */
1756 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
1759 #endif /* SOFTMMU */
1761 static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
1762 TCGReg datalo, TCGReg datahi,
1763 TCGReg addrlo, TCGReg addend)
1765 /* Byte swapping is left to middle-end expansion. */
1766 tcg_debug_assert((opc & MO_BSWAP) == 0);
1768 switch (opc & MO_SSIZE) {
1770 tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend);
1773 tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend);
1776 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1779 tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
1782 tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
1785 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1786 if (USING_SOFTMMU && use_armv6_instructions
1787 && (datalo & 1) == 0 && datahi == datalo + 1) {
1788 tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
1789 } else if (datalo != addend) {
1790 tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo);
1791 tcg_out_ld32_12(s, COND_AL, datahi, addend, 4);
1793 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
1794 addend, addrlo, SHIFT_IMM_LSL(0));
1795 tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0);
1796 tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4);
1800 g_assert_not_reached();
1804 #ifndef CONFIG_SOFTMMU
1805 static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1806 TCGReg datahi, TCGReg addrlo)
1808 /* Byte swapping is left to middle-end expansion. */
1809 tcg_debug_assert((opc & MO_BSWAP) == 0);
1811 switch (opc & MO_SSIZE) {
1813 tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
1816 tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
1819 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1822 tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
1825 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1828 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1829 if (USING_SOFTMMU && use_armv6_instructions
1830 && (datalo & 1) == 0 && datahi == datalo + 1) {
1831 tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
1832 } else if (datalo == addrlo) {
1833 tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
1834 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1836 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1837 tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
1841 g_assert_not_reached();
1846 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1848 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1851 #ifdef CONFIG_SOFTMMU
1854 tcg_insn_unit *label_ptr;
1858 datahi = (is64 ? *args++ : 0);
1860 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1862 opc = get_memop(oi);
1864 #ifdef CONFIG_SOFTMMU
1865 mem_index = get_mmuidx(oi);
1866 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1);
1868 /* This a conditional BL only to load a pointer within this opcode into LR
1869 for the slow path. We will not be using the value for a tail call. */
1870 label_ptr = s->code_ptr;
1871 tcg_out_bl_imm(s, COND_NE, 0);
1873 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
1875 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
1876 s->code_ptr, label_ptr);
1877 #else /* !CONFIG_SOFTMMU */
1879 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
1880 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP);
1882 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
1887 static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
1888 TCGReg datalo, TCGReg datahi,
1889 TCGReg addrlo, TCGReg addend)
1891 /* Byte swapping is left to middle-end expansion. */
1892 tcg_debug_assert((opc & MO_BSWAP) == 0);
1894 switch (opc & MO_SIZE) {
1896 tcg_out_st8_r(s, cond, datalo, addrlo, addend);
1899 tcg_out_st16_r(s, cond, datalo, addrlo, addend);
1902 tcg_out_st32_r(s, cond, datalo, addrlo, addend);
1905 /* Avoid strd for user-only emulation, to handle unaligned. */
1906 if (USING_SOFTMMU && use_armv6_instructions
1907 && (datalo & 1) == 0 && datahi == datalo + 1) {
1908 tcg_out_strd_r(s, cond, datalo, addrlo, addend);
1910 tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
1911 tcg_out_st32_12(s, cond, datahi, addend, 4);
1915 g_assert_not_reached();
1919 #ifndef CONFIG_SOFTMMU
1920 static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1921 TCGReg datahi, TCGReg addrlo)
1923 /* Byte swapping is left to middle-end expansion. */
1924 tcg_debug_assert((opc & MO_BSWAP) == 0);
1926 switch (opc & MO_SIZE) {
1928 tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
1931 tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
1934 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1937 /* Avoid strd for user-only emulation, to handle unaligned. */
1938 if (USING_SOFTMMU && use_armv6_instructions
1939 && (datalo & 1) == 0 && datahi == datalo + 1) {
1940 tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
1942 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1943 tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
1947 g_assert_not_reached();
1952 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1954 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1957 #ifdef CONFIG_SOFTMMU
1960 tcg_insn_unit *label_ptr;
1964 datahi = (is64 ? *args++ : 0);
1966 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1968 opc = get_memop(oi);
1970 #ifdef CONFIG_SOFTMMU
1971 mem_index = get_mmuidx(oi);
1972 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
1974 tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend);
1976 /* The conditional call must come last, as we're going to return here. */
1977 label_ptr = s->code_ptr;
1978 tcg_out_bl_imm(s, COND_NE, 0);
1980 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
1981 s->code_ptr, label_ptr);
1982 #else /* !CONFIG_SOFTMMU */
1984 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
1985 tcg_out_qemu_st_index(s, COND_AL, opc, datalo,
1986 datahi, addrlo, TCG_REG_TMP);
1988 tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
1993 static void tcg_out_epilogue(TCGContext *s);
1995 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1996 const TCGArg args[TCG_MAX_OP_ARGS],
1997 const int const_args[TCG_MAX_OP_ARGS])
1999 TCGArg a0, a1, a2, a3, a4, a5;
2003 case INDEX_op_exit_tb:
2004 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, args[0]);
2005 tcg_out_epilogue(s);
2007 case INDEX_op_goto_tb:
2009 /* Indirect jump method */
2010 intptr_t ptr, dif, dil;
2011 TCGReg base = TCG_REG_PC;
2013 tcg_debug_assert(s->tb_jmp_insn_offset == 0);
2014 ptr = (intptr_t)tcg_splitwx_to_rx(s->tb_jmp_target_addr + args[0]);
2015 dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
2016 dil = sextract32(dif, 0, 12);
2018 /* The TB is close, but outside the 12 bits addressable by
2019 the load. We can extend this to 20 bits with a sub of a
2020 shifted immediate from pc. In the vastly unlikely event
2021 the code requires more than 1MB, we'll use 2 insns and
2024 tcg_out_movi32(s, COND_AL, base, ptr - dil);
2026 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
2027 set_jmp_reset_offset(s, args[0]);
2030 case INDEX_op_goto_ptr:
2031 tcg_out_b_reg(s, COND_AL, args[0]);
2034 tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
2037 case INDEX_op_ld8u_i32:
2038 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
2040 case INDEX_op_ld8s_i32:
2041 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
2043 case INDEX_op_ld16u_i32:
2044 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
2046 case INDEX_op_ld16s_i32:
2047 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
2049 case INDEX_op_ld_i32:
2050 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
2052 case INDEX_op_st8_i32:
2053 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
2055 case INDEX_op_st16_i32:
2056 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
2058 case INDEX_op_st_i32:
2059 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
2062 case INDEX_op_movcond_i32:
2063 /* Constraints mean that v2 is always in the same register as dest,
2064 * so we only need to do "if condition passed, move v1 to dest".
2066 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2067 args[1], args[2], const_args[2]);
2068 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
2069 ARITH_MVN, args[0], 0, args[3], const_args[3]);
2071 case INDEX_op_add_i32:
2072 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
2073 args[0], args[1], args[2], const_args[2]);
2075 case INDEX_op_sub_i32:
2076 if (const_args[1]) {
2077 if (const_args[2]) {
2078 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
2080 tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
2081 args[0], args[2], args[1], 1);
2084 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
2085 args[0], args[1], args[2], const_args[2]);
2088 case INDEX_op_and_i32:
2089 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
2090 args[0], args[1], args[2], const_args[2]);
2092 case INDEX_op_andc_i32:
2093 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
2094 args[0], args[1], args[2], const_args[2]);
2096 case INDEX_op_or_i32:
2099 case INDEX_op_xor_i32:
2103 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
2105 case INDEX_op_add2_i32:
2106 a0 = args[0], a1 = args[1], a2 = args[2];
2107 a3 = args[3], a4 = args[4], a5 = args[5];
2108 if (a0 == a3 || (a0 == a5 && !const_args[5])) {
2111 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
2112 a0, a2, a4, const_args[4]);
2113 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
2114 a1, a3, a5, const_args[5]);
2115 tcg_out_mov_reg(s, COND_AL, args[0], a0);
2117 case INDEX_op_sub2_i32:
2118 a0 = args[0], a1 = args[1], a2 = args[2];
2119 a3 = args[3], a4 = args[4], a5 = args[5];
2120 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
2123 if (const_args[2]) {
2124 if (const_args[4]) {
2125 tcg_out_movi32(s, COND_AL, a0, a4);
2128 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
2130 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
2131 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
2133 if (const_args[3]) {
2134 if (const_args[5]) {
2135 tcg_out_movi32(s, COND_AL, a1, a5);
2138 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
2140 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
2141 a1, a3, a5, const_args[5]);
2143 tcg_out_mov_reg(s, COND_AL, args[0], a0);
2145 case INDEX_op_neg_i32:
2146 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
2148 case INDEX_op_not_i32:
2149 tcg_out_dat_reg(s, COND_AL,
2150 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
2152 case INDEX_op_mul_i32:
2153 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
2155 case INDEX_op_mulu2_i32:
2156 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
2158 case INDEX_op_muls2_i32:
2159 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
2161 /* XXX: Perhaps args[2] & 0x1f is wrong */
2162 case INDEX_op_shl_i32:
2164 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
2166 case INDEX_op_shr_i32:
2167 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
2168 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
2170 case INDEX_op_sar_i32:
2171 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
2172 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
2174 case INDEX_op_rotr_i32:
2175 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
2176 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
2179 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
2182 case INDEX_op_rotl_i32:
2183 if (const_args[2]) {
2184 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
2185 ((0x20 - args[2]) & 0x1f) ?
2186 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
2189 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
2190 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
2191 SHIFT_REG_ROR(TCG_REG_TMP));
2195 case INDEX_op_ctz_i32:
2196 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
2200 case INDEX_op_clz_i32:
2206 if (c && a2 == 32) {
2207 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
2210 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
2211 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
2212 if (c || a0 != a2) {
2213 tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
2217 case INDEX_op_brcond_i32:
2218 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2219 args[0], args[1], const_args[1]);
2220 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
2221 arg_label(args[3]));
2223 case INDEX_op_setcond_i32:
2224 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2225 args[1], args[2], const_args[2]);
2226 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
2227 ARITH_MOV, args[0], 0, 1);
2228 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
2229 ARITH_MOV, args[0], 0, 0);
2232 case INDEX_op_brcond2_i32:
2233 c = tcg_out_cmp2(s, args, const_args);
2234 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
2236 case INDEX_op_setcond2_i32:
2237 c = tcg_out_cmp2(s, args + 1, const_args + 1);
2238 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
2239 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2240 ARITH_MOV, args[0], 0, 0);
2243 case INDEX_op_qemu_ld_i32:
2244 tcg_out_qemu_ld(s, args, 0);
2246 case INDEX_op_qemu_ld_i64:
2247 tcg_out_qemu_ld(s, args, 1);
2249 case INDEX_op_qemu_st_i32:
2250 tcg_out_qemu_st(s, args, 0);
2252 case INDEX_op_qemu_st_i64:
2253 tcg_out_qemu_st(s, args, 1);
2256 case INDEX_op_bswap16_i32:
2257 tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
2259 case INDEX_op_bswap32_i32:
2260 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2263 case INDEX_op_ext8s_i32:
2264 tcg_out_ext8s(s, COND_AL, args[0], args[1]);
2266 case INDEX_op_ext16s_i32:
2267 tcg_out_ext16s(s, COND_AL, args[0], args[1]);
2269 case INDEX_op_ext16u_i32:
2270 tcg_out_ext16u(s, COND_AL, args[0], args[1]);
2273 case INDEX_op_deposit_i32:
2274 tcg_out_deposit(s, COND_AL, args[0], args[2],
2275 args[3], args[4], const_args[2]);
2277 case INDEX_op_extract_i32:
2278 tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2280 case INDEX_op_sextract_i32:
2281 tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2283 case INDEX_op_extract2_i32:
2284 /* ??? These optimization vs zero should be generic. */
2285 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
2286 if (const_args[1]) {
2287 if (const_args[2]) {
2288 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2290 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2291 args[2], SHIFT_IMM_LSL(32 - args[3]));
2293 } else if (const_args[2]) {
2294 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2295 args[1], SHIFT_IMM_LSR(args[3]));
2297 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */
2298 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2299 args[2], SHIFT_IMM_LSL(32 - args[3]));
2300 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2301 args[1], SHIFT_IMM_LSR(args[3]));
2305 case INDEX_op_div_i32:
2306 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
2308 case INDEX_op_divu_i32:
2309 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
2313 tcg_out_mb(s, args[0]);
2316 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2317 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2323 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2326 case INDEX_op_goto_ptr:
2329 case INDEX_op_ld8u_i32:
2330 case INDEX_op_ld8s_i32:
2331 case INDEX_op_ld16u_i32:
2332 case INDEX_op_ld16s_i32:
2333 case INDEX_op_ld_i32:
2334 case INDEX_op_neg_i32:
2335 case INDEX_op_not_i32:
2336 case INDEX_op_bswap16_i32:
2337 case INDEX_op_bswap32_i32:
2338 case INDEX_op_ext8s_i32:
2339 case INDEX_op_ext16s_i32:
2340 case INDEX_op_ext16u_i32:
2341 case INDEX_op_extract_i32:
2342 case INDEX_op_sextract_i32:
2343 return C_O1_I1(r, r);
2345 case INDEX_op_st8_i32:
2346 case INDEX_op_st16_i32:
2347 case INDEX_op_st_i32:
2348 return C_O0_I2(r, r);
2350 case INDEX_op_add_i32:
2351 case INDEX_op_sub_i32:
2352 case INDEX_op_setcond_i32:
2353 return C_O1_I2(r, r, rIN);
2355 case INDEX_op_and_i32:
2356 case INDEX_op_andc_i32:
2357 case INDEX_op_clz_i32:
2358 case INDEX_op_ctz_i32:
2359 return C_O1_I2(r, r, rIK);
2361 case INDEX_op_mul_i32:
2362 case INDEX_op_div_i32:
2363 case INDEX_op_divu_i32:
2364 return C_O1_I2(r, r, r);
2366 case INDEX_op_mulu2_i32:
2367 case INDEX_op_muls2_i32:
2368 return C_O2_I2(r, r, r, r);
2370 case INDEX_op_or_i32:
2371 case INDEX_op_xor_i32:
2372 return C_O1_I2(r, r, rI);
2374 case INDEX_op_shl_i32:
2375 case INDEX_op_shr_i32:
2376 case INDEX_op_sar_i32:
2377 case INDEX_op_rotl_i32:
2378 case INDEX_op_rotr_i32:
2379 return C_O1_I2(r, r, ri);
2381 case INDEX_op_brcond_i32:
2382 return C_O0_I2(r, rIN);
2383 case INDEX_op_deposit_i32:
2384 return C_O1_I2(r, 0, rZ);
2385 case INDEX_op_extract2_i32:
2386 return C_O1_I2(r, rZ, rZ);
2387 case INDEX_op_movcond_i32:
2388 return C_O1_I4(r, r, rIN, rIK, 0);
2389 case INDEX_op_add2_i32:
2390 return C_O2_I4(r, r, r, r, rIN, rIK);
2391 case INDEX_op_sub2_i32:
2392 return C_O2_I4(r, r, rI, rI, rIN, rIK);
2393 case INDEX_op_brcond2_i32:
2394 return C_O0_I4(r, r, rI, rI);
2395 case INDEX_op_setcond2_i32:
2396 return C_O1_I4(r, r, r, rI, rI);
2398 case INDEX_op_qemu_ld_i32:
2399 return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l);
2400 case INDEX_op_qemu_ld_i64:
2401 return TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, l) : C_O2_I2(r, r, l, l);
2402 case INDEX_op_qemu_st_i32:
2403 return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s);
2404 case INDEX_op_qemu_st_i64:
2405 return TARGET_LONG_BITS == 32 ? C_O0_I3(s, s, s) : C_O0_I4(s, s, s, s);
2407 case INDEX_op_st_vec:
2408 return C_O0_I2(w, r);
2409 case INDEX_op_ld_vec:
2410 case INDEX_op_dupm_vec:
2411 return C_O1_I1(w, r);
2412 case INDEX_op_dup_vec:
2413 return C_O1_I1(w, wr);
2414 case INDEX_op_abs_vec:
2415 case INDEX_op_neg_vec:
2416 case INDEX_op_not_vec:
2417 case INDEX_op_shli_vec:
2418 case INDEX_op_shri_vec:
2419 case INDEX_op_sari_vec:
2420 return C_O1_I1(w, w);
2421 case INDEX_op_dup2_vec:
2422 case INDEX_op_add_vec:
2423 case INDEX_op_mul_vec:
2424 case INDEX_op_smax_vec:
2425 case INDEX_op_smin_vec:
2426 case INDEX_op_ssadd_vec:
2427 case INDEX_op_sssub_vec:
2428 case INDEX_op_sub_vec:
2429 case INDEX_op_umax_vec:
2430 case INDEX_op_umin_vec:
2431 case INDEX_op_usadd_vec:
2432 case INDEX_op_ussub_vec:
2433 case INDEX_op_xor_vec:
2434 case INDEX_op_arm_sshl_vec:
2435 case INDEX_op_arm_ushl_vec:
2436 return C_O1_I2(w, w, w);
2437 case INDEX_op_arm_sli_vec:
2438 return C_O1_I2(w, 0, w);
2439 case INDEX_op_or_vec:
2440 case INDEX_op_andc_vec:
2441 return C_O1_I2(w, w, wO);
2442 case INDEX_op_and_vec:
2443 case INDEX_op_orc_vec:
2444 return C_O1_I2(w, w, wV);
2445 case INDEX_op_cmp_vec:
2446 return C_O1_I2(w, w, wZ);
2447 case INDEX_op_bitsel_vec:
2448 return C_O1_I3(w, w, w, w);
2450 g_assert_not_reached();
2454 static void tcg_target_init(TCGContext *s)
2457 * Only probe for the platform and capabilities if we haven't already
2458 * determined maximum values at compile time.
2460 #if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
2462 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2463 #ifndef use_idiv_instructions
2464 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2466 #ifndef use_neon_instructions
2467 use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
2472 if (__ARM_ARCH < 7) {
2473 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2474 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2475 arm_arch = pl[1] - '0';
2479 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2481 tcg_target_call_clobber_regs = 0;
2482 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2483 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2484 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2485 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2486 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2487 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2489 if (use_neon_instructions) {
2490 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
2491 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2493 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
2494 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
2495 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
2496 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
2497 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
2498 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
2499 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
2500 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
2501 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
2502 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
2503 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
2504 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
2507 s->reserved_regs = 0;
2508 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2509 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2510 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2511 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
2514 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2515 TCGReg arg1, intptr_t arg2)
2519 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2522 /* regs 1; size 8; align 8 */
2523 tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
2526 /* regs 2; size 8; align 16 */
2527 tcg_out_vldst(s, INSN_VLD1 | 0xae0, arg, arg1, arg2);
2530 g_assert_not_reached();
2534 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2535 TCGReg arg1, intptr_t arg2)
2539 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2542 /* regs 1; size 8; align 8 */
2543 tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
2546 /* regs 2; size 8; align 16 */
2547 tcg_out_vldst(s, INSN_VST1 | 0xae0, arg, arg1, arg2);
2550 g_assert_not_reached();
2554 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2555 TCGReg base, intptr_t ofs)
2560 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
2567 if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
2568 tcg_out_mov_reg(s, COND_AL, ret, arg);
2575 /* "VMOV D,N" is an alias for "VORR D,N,N". */
2576 tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
2580 g_assert_not_reached();
2584 static void tcg_out_movi(TCGContext *s, TCGType type,
2585 TCGReg ret, tcg_target_long arg)
2587 tcg_debug_assert(type == TCG_TYPE_I32);
2588 tcg_debug_assert(ret < TCG_REG_Q0);
2589 tcg_out_movi32(s, COND_AL, ret, arg);
2592 /* Type is always V128, with I64 elements. */
2593 static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
2595 /* Move high element into place first. */
2597 tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
2598 /* Move low element into place; tcg_out_mov will check for nop. */
2599 tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
2602 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2603 TCGReg rd, TCGReg rs)
2605 int q = type - TCG_TYPE_V64;
2607 if (vece == MO_64) {
2608 if (type == TCG_TYPE_V128) {
2609 tcg_out_dup2_vec(s, rd, rs, rs);
2611 tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
2613 } else if (rs < TCG_REG_Q0) {
2614 int b = (vece == MO_8);
2615 int e = (vece == MO_16);
2616 tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
2617 encode_vn(rd) | (rs << 12));
2619 int imm4 = 1 << vece;
2620 tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
2621 encode_vd(rd) | encode_vm(rs));
2626 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2627 TCGReg rd, TCGReg base, intptr_t offset)
2629 if (vece == MO_64) {
2630 tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
2631 if (type == TCG_TYPE_V128) {
2632 tcg_out_dup2_vec(s, rd, rd, rd);
2635 int q = type - TCG_TYPE_V64;
2636 tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
2642 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2643 TCGReg rd, int64_t v64)
2645 int q = type - TCG_TYPE_V64;
2648 /* Test all bytes equal first. */
2650 tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
2655 * Test all bytes 0x00 or 0xff second. This can match cases that
2656 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
2658 for (i = imm8 = 0; i < 8; i++) {
2659 uint8_t byte = v64 >> (i * 8);
2662 } else if (byte != 0) {
2666 tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
2671 * Tests for various replications. For each element width, if we
2672 * cannot find an expansion there's no point checking a larger
2673 * width because we already know by replication it cannot match.
2675 if (vece == MO_16) {
2678 if (is_shimm16(v16, &cmode, &imm8)) {
2679 tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2682 if (is_shimm16(~v16, &cmode, &imm8)) {
2683 tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2688 * Otherwise, all remaining constants can be loaded in two insns:
2689 * rd = v16 & 0xff, rd |= v16 & 0xff00.
2691 tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
2692 tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */
2696 if (vece == MO_32) {
2699 if (is_shimm32(v32, &cmode, &imm8) ||
2700 is_soimm32(v32, &cmode, &imm8)) {
2701 tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2704 if (is_shimm32(~v32, &cmode, &imm8) ||
2705 is_soimm32(~v32, &cmode, &imm8)) {
2706 tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2711 * Restrict the set of constants to those we can load with
2712 * two instructions. Others we load from the pool.
2714 i = is_shimm32_pair(v32, &cmode, &imm8);
2716 tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2717 tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
2720 i = is_shimm32_pair(~v32, &cmode, &imm8);
2722 tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2723 tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
2729 * As a last resort, load from the constant pool.
2731 if (!q || vece == MO_64) {
2732 new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
2733 /* VLDR Dd, [pc + offset] */
2734 tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
2736 tcg_out_dup2_vec(s, rd, rd, rd);
2739 new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
2740 /* add tmp, pc, offset */
2741 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
2742 tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
2746 static const ARMInsn vec_cmp_insn[16] = {
2747 [TCG_COND_EQ] = INSN_VCEQ,
2748 [TCG_COND_GT] = INSN_VCGT,
2749 [TCG_COND_GE] = INSN_VCGE,
2750 [TCG_COND_GTU] = INSN_VCGT_U,
2751 [TCG_COND_GEU] = INSN_VCGE_U,
2754 static const ARMInsn vec_cmp0_insn[16] = {
2755 [TCG_COND_EQ] = INSN_VCEQ0,
2756 [TCG_COND_GT] = INSN_VCGT0,
2757 [TCG_COND_GE] = INSN_VCGE0,
2758 [TCG_COND_LT] = INSN_VCLT0,
2759 [TCG_COND_LE] = INSN_VCLE0,
2762 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2763 unsigned vecl, unsigned vece,
2764 const TCGArg args[TCG_MAX_OP_ARGS],
2765 const int const_args[TCG_MAX_OP_ARGS])
2767 TCGType type = vecl + TCG_TYPE_V64;
2769 TCGArg a0, a1, a2, a3;
2777 case INDEX_op_ld_vec:
2778 tcg_out_ld(s, type, a0, a1, a2);
2780 case INDEX_op_st_vec:
2781 tcg_out_st(s, type, a0, a1, a2);
2783 case INDEX_op_dupm_vec:
2784 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2786 case INDEX_op_dup2_vec:
2787 tcg_out_dup2_vec(s, a0, a1, a2);
2789 case INDEX_op_abs_vec:
2790 tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
2792 case INDEX_op_neg_vec:
2793 tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
2795 case INDEX_op_not_vec:
2796 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
2798 case INDEX_op_add_vec:
2799 tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
2801 case INDEX_op_mul_vec:
2802 tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
2804 case INDEX_op_smax_vec:
2805 tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
2807 case INDEX_op_smin_vec:
2808 tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
2810 case INDEX_op_sub_vec:
2811 tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
2813 case INDEX_op_ssadd_vec:
2814 tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
2816 case INDEX_op_sssub_vec:
2817 tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
2819 case INDEX_op_umax_vec:
2820 tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
2822 case INDEX_op_umin_vec:
2823 tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
2825 case INDEX_op_usadd_vec:
2826 tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
2828 case INDEX_op_ussub_vec:
2829 tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
2831 case INDEX_op_xor_vec:
2832 tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
2834 case INDEX_op_arm_sshl_vec:
2836 * Note that Vm is the data and Vn is the shift count,
2837 * therefore the arguments appear reversed.
2839 tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
2841 case INDEX_op_arm_ushl_vec:
2843 tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
2845 case INDEX_op_shli_vec:
2846 tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
2848 case INDEX_op_shri_vec:
2849 tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
2851 case INDEX_op_sari_vec:
2852 tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
2854 case INDEX_op_arm_sli_vec:
2855 tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
2858 case INDEX_op_andc_vec:
2859 if (!const_args[2]) {
2860 tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
2865 case INDEX_op_and_vec:
2866 if (const_args[2]) {
2867 is_shimm1632(~a2, &cmode, &imm8);
2869 tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
2872 tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
2875 tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
2878 case INDEX_op_orc_vec:
2879 if (!const_args[2]) {
2880 tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
2885 case INDEX_op_or_vec:
2886 if (const_args[2]) {
2887 is_shimm1632(a2, &cmode, &imm8);
2889 tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
2892 tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
2895 tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
2898 case INDEX_op_cmp_vec:
2900 TCGCond cond = args[3];
2902 if (cond == TCG_COND_NE) {
2903 if (const_args[2]) {
2904 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
2906 tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
2907 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2912 if (const_args[2]) {
2913 insn = vec_cmp0_insn[cond];
2915 tcg_out_vreg2(s, insn, q, vece, a0, a1);
2918 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
2921 insn = vec_cmp_insn[cond];
2924 t = a1, a1 = a2, a2 = t;
2925 cond = tcg_swap_cond(cond);
2926 insn = vec_cmp_insn[cond];
2927 tcg_debug_assert(insn != 0);
2929 tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
2934 case INDEX_op_bitsel_vec:
2937 tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
2938 } else if (a0 == a2) {
2939 tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
2941 tcg_out_mov(s, type, a0, a1);
2942 tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
2946 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
2947 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
2949 g_assert_not_reached();
2953 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2956 case INDEX_op_add_vec:
2957 case INDEX_op_sub_vec:
2958 case INDEX_op_and_vec:
2959 case INDEX_op_andc_vec:
2960 case INDEX_op_or_vec:
2961 case INDEX_op_orc_vec:
2962 case INDEX_op_xor_vec:
2963 case INDEX_op_not_vec:
2964 case INDEX_op_shli_vec:
2965 case INDEX_op_shri_vec:
2966 case INDEX_op_sari_vec:
2967 case INDEX_op_ssadd_vec:
2968 case INDEX_op_sssub_vec:
2969 case INDEX_op_usadd_vec:
2970 case INDEX_op_ussub_vec:
2971 case INDEX_op_bitsel_vec:
2973 case INDEX_op_abs_vec:
2974 case INDEX_op_cmp_vec:
2975 case INDEX_op_mul_vec:
2976 case INDEX_op_neg_vec:
2977 case INDEX_op_smax_vec:
2978 case INDEX_op_smin_vec:
2979 case INDEX_op_umax_vec:
2980 case INDEX_op_umin_vec:
2981 return vece < MO_64;
2982 case INDEX_op_shlv_vec:
2983 case INDEX_op_shrv_vec:
2984 case INDEX_op_sarv_vec:
2985 case INDEX_op_rotli_vec:
2986 case INDEX_op_rotlv_vec:
2987 case INDEX_op_rotrv_vec:
2994 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2998 TCGv_vec v0, v1, v2, t1, t2, c1;
3002 v0 = temp_tcgv_vec(arg_temp(a0));
3003 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3004 a2 = va_arg(va, TCGArg);
3008 case INDEX_op_shlv_vec:
3010 * Merely propagate shlv_vec to arm_ushl_vec.
3011 * In this way we don't set TCG_TARGET_HAS_shv_vec
3012 * because everything is done via expansion.
3014 v2 = temp_tcgv_vec(arg_temp(a2));
3015 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
3016 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3019 case INDEX_op_shrv_vec:
3020 case INDEX_op_sarv_vec:
3021 /* Right shifts are negative left shifts for NEON. */
3022 v2 = temp_tcgv_vec(arg_temp(a2));
3023 t1 = tcg_temp_new_vec(type);
3024 tcg_gen_neg_vec(vece, t1, v2);
3025 if (opc == INDEX_op_shrv_vec) {
3026 opc = INDEX_op_arm_ushl_vec;
3028 opc = INDEX_op_arm_sshl_vec;
3030 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
3031 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3032 tcg_temp_free_vec(t1);
3035 case INDEX_op_rotli_vec:
3036 t1 = tcg_temp_new_vec(type);
3037 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
3038 vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
3039 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
3040 tcg_temp_free_vec(t1);
3043 case INDEX_op_rotlv_vec:
3044 v2 = temp_tcgv_vec(arg_temp(a2));
3045 t1 = tcg_temp_new_vec(type);
3046 c1 = tcg_constant_vec(type, vece, 8 << vece);
3047 tcg_gen_sub_vec(vece, t1, v2, c1);
3048 /* Right shifts are negative left shifts for NEON. */
3049 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3050 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3051 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
3052 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3053 tcg_gen_or_vec(vece, v0, v0, t1);
3054 tcg_temp_free_vec(t1);
3057 case INDEX_op_rotrv_vec:
3058 v2 = temp_tcgv_vec(arg_temp(a2));
3059 t1 = tcg_temp_new_vec(type);
3060 t2 = tcg_temp_new_vec(type);
3061 c1 = tcg_constant_vec(type, vece, 8 << vece);
3062 tcg_gen_neg_vec(vece, t1, v2);
3063 tcg_gen_sub_vec(vece, t2, c1, v2);
3064 /* Right shifts are negative left shifts for NEON. */
3065 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3066 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3067 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
3068 tcgv_vec_arg(v1), tcgv_vec_arg(t2));
3069 tcg_gen_or_vec(vece, v0, t1, t2);
3070 tcg_temp_free_vec(t1);
3071 tcg_temp_free_vec(t2);
3075 g_assert_not_reached();
3079 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3082 for (i = 0; i < count; ++i) {
3087 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
3088 and tcg_register_jit. */
3090 #define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
3092 #define FRAME_SIZE \
3094 + TCG_STATIC_CALL_ARGS_SIZE \
3095 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
3096 + TCG_TARGET_STACK_ALIGN - 1) \
3097 & -TCG_TARGET_STACK_ALIGN)
3099 #define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE)
3101 static void tcg_target_qemu_prologue(TCGContext *s)
3103 /* Calling convention requires us to save r4-r11 and lr. */
3104 /* stmdb sp!, { r4 - r11, lr } */
3105 tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK,
3106 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
3107 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
3108 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14));
3110 /* Reserve callee argument and tcg temp space. */
3111 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
3112 TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3113 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
3114 CPU_TEMP_BUF_NLONGS * sizeof(long));
3116 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3118 tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
3121 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3122 * and fall through to the rest of the epilogue.
3124 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3125 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
3126 tcg_out_epilogue(s);
3129 static void tcg_out_epilogue(TCGContext *s)
3131 /* Release local stack frame. */
3132 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
3133 TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3135 /* ldmia sp!, { r4 - r11, pc } */
3136 tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK,
3137 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
3138 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
3139 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
3144 uint8_t fde_def_cfa[4];
3145 uint8_t fde_reg_ofs[18];
3148 #define ELF_HOST_MACHINE EM_ARM
3150 /* We're expecting a 2 byte uleb128 encoded value. */
3151 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3153 static const DebugFrame debug_frame = {
3154 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3157 .h.cie.code_align = 1,
3158 .h.cie.data_align = 0x7c, /* sleb128 -4 */
3159 .h.cie.return_column = 14,
3161 /* Total FDE size does not include the "len" member. */
3162 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3165 12, 13, /* DW_CFA_def_cfa sp, ... */
3166 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3170 /* The following must match the stmdb in the prologue. */
3171 0x8e, 1, /* DW_CFA_offset, lr, -4 */
3172 0x8b, 2, /* DW_CFA_offset, r11, -8 */
3173 0x8a, 3, /* DW_CFA_offset, r10, -12 */
3174 0x89, 4, /* DW_CFA_offset, r9, -16 */
3175 0x88, 5, /* DW_CFA_offset, r8, -20 */
3176 0x87, 6, /* DW_CFA_offset, r7, -24 */
3177 0x86, 7, /* DW_CFA_offset, r6, -28 */
3178 0x85, 8, /* DW_CFA_offset, r5, -32 */
3179 0x84, 9, /* DW_CFA_offset, r4, -36 */
3183 void tcg_register_jit(const void *buf, size_t buf_size)
3185 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));