migration: fix xbzrle encoding rate calculation
[qemu/armbru.git] / tcg / arm / tcg-target.inc.c
blob6aa7757aac630a4f5cd7dcf3fbee7ec123abf2a3
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "elf.h"
26 #include "../tcg-pool.inc.c"
28 int arm_arch = __ARM_ARCH;
30 #ifndef use_idiv_instructions
31 bool use_idiv_instructions;
32 #endif
34 /* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */
35 #ifdef CONFIG_SOFTMMU
36 # define USING_SOFTMMU 1
37 #else
38 # define USING_SOFTMMU 0
39 #endif
41 #ifdef CONFIG_DEBUG_TCG
42 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
43 "%r0",
44 "%r1",
45 "%r2",
46 "%r3",
47 "%r4",
48 "%r5",
49 "%r6",
50 "%r7",
51 "%r8",
52 "%r9",
53 "%r10",
54 "%r11",
55 "%r12",
56 "%r13",
57 "%r14",
58 "%pc",
60 #endif
62 static const int tcg_target_reg_alloc_order[] = {
63 TCG_REG_R4,
64 TCG_REG_R5,
65 TCG_REG_R6,
66 TCG_REG_R7,
67 TCG_REG_R8,
68 TCG_REG_R9,
69 TCG_REG_R10,
70 TCG_REG_R11,
71 TCG_REG_R13,
72 TCG_REG_R0,
73 TCG_REG_R1,
74 TCG_REG_R2,
75 TCG_REG_R3,
76 TCG_REG_R12,
77 TCG_REG_R14,
80 static const int tcg_target_call_iarg_regs[4] = {
81 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
83 static const int tcg_target_call_oarg_regs[2] = {
84 TCG_REG_R0, TCG_REG_R1
87 #define TCG_REG_TMP TCG_REG_R12
89 enum arm_cond_code_e {
90 COND_EQ = 0x0,
91 COND_NE = 0x1,
92 COND_CS = 0x2, /* Unsigned greater or equal */
93 COND_CC = 0x3, /* Unsigned less than */
94 COND_MI = 0x4, /* Negative */
95 COND_PL = 0x5, /* Zero or greater */
96 COND_VS = 0x6, /* Overflow */
97 COND_VC = 0x7, /* No overflow */
98 COND_HI = 0x8, /* Unsigned greater than */
99 COND_LS = 0x9, /* Unsigned less or equal */
100 COND_GE = 0xa,
101 COND_LT = 0xb,
102 COND_GT = 0xc,
103 COND_LE = 0xd,
104 COND_AL = 0xe,
107 #define TO_CPSR (1 << 20)
109 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
110 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
111 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
112 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
113 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
114 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
115 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
116 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
118 typedef enum {
119 ARITH_AND = 0x0 << 21,
120 ARITH_EOR = 0x1 << 21,
121 ARITH_SUB = 0x2 << 21,
122 ARITH_RSB = 0x3 << 21,
123 ARITH_ADD = 0x4 << 21,
124 ARITH_ADC = 0x5 << 21,
125 ARITH_SBC = 0x6 << 21,
126 ARITH_RSC = 0x7 << 21,
127 ARITH_TST = 0x8 << 21 | TO_CPSR,
128 ARITH_CMP = 0xa << 21 | TO_CPSR,
129 ARITH_CMN = 0xb << 21 | TO_CPSR,
130 ARITH_ORR = 0xc << 21,
131 ARITH_MOV = 0xd << 21,
132 ARITH_BIC = 0xe << 21,
133 ARITH_MVN = 0xf << 21,
135 INSN_CLZ = 0x016f0f10,
136 INSN_RBIT = 0x06ff0f30,
138 INSN_LDR_IMM = 0x04100000,
139 INSN_LDR_REG = 0x06100000,
140 INSN_STR_IMM = 0x04000000,
141 INSN_STR_REG = 0x06000000,
143 INSN_LDRH_IMM = 0x005000b0,
144 INSN_LDRH_REG = 0x001000b0,
145 INSN_LDRSH_IMM = 0x005000f0,
146 INSN_LDRSH_REG = 0x001000f0,
147 INSN_STRH_IMM = 0x004000b0,
148 INSN_STRH_REG = 0x000000b0,
150 INSN_LDRB_IMM = 0x04500000,
151 INSN_LDRB_REG = 0x06500000,
152 INSN_LDRSB_IMM = 0x005000d0,
153 INSN_LDRSB_REG = 0x001000d0,
154 INSN_STRB_IMM = 0x04400000,
155 INSN_STRB_REG = 0x06400000,
157 INSN_LDRD_IMM = 0x004000d0,
158 INSN_LDRD_REG = 0x000000d0,
159 INSN_STRD_IMM = 0x004000f0,
160 INSN_STRD_REG = 0x000000f0,
162 INSN_DMB_ISH = 0xf57ff05b,
163 INSN_DMB_MCR = 0xee070fba,
165 /* Architected nop introduced in v6k. */
166 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
167 also Just So Happened to do nothing on pre-v6k so that we
168 don't need to conditionalize it? */
169 INSN_NOP_v6k = 0xe320f000,
170 /* Otherwise the assembler uses mov r0,r0 */
171 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV,
172 } ARMInsn;
174 #define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
176 static const uint8_t tcg_cond_to_arm_cond[] = {
177 [TCG_COND_EQ] = COND_EQ,
178 [TCG_COND_NE] = COND_NE,
179 [TCG_COND_LT] = COND_LT,
180 [TCG_COND_GE] = COND_GE,
181 [TCG_COND_LE] = COND_LE,
182 [TCG_COND_GT] = COND_GT,
183 /* unsigned */
184 [TCG_COND_LTU] = COND_CC,
185 [TCG_COND_GEU] = COND_CS,
186 [TCG_COND_LEU] = COND_LS,
187 [TCG_COND_GTU] = COND_HI,
190 static inline bool reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
192 ptrdiff_t offset = (tcg_ptr_byte_diff(target, code_ptr) - 8) >> 2;
193 if (offset == sextract32(offset, 0, 24)) {
194 *code_ptr = (*code_ptr & ~0xffffff) | (offset & 0xffffff);
195 return true;
197 return false;
200 static inline bool reloc_pc13(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
202 ptrdiff_t offset = tcg_ptr_byte_diff(target, code_ptr) - 8;
204 if (offset >= -0xfff && offset <= 0xfff) {
205 tcg_insn_unit insn = *code_ptr;
206 bool u = (offset >= 0);
207 if (!u) {
208 offset = -offset;
210 insn = deposit32(insn, 23, 1, u);
211 insn = deposit32(insn, 0, 12, offset);
212 *code_ptr = insn;
213 return true;
215 return false;
218 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
219 intptr_t value, intptr_t addend)
221 tcg_debug_assert(addend == 0);
223 if (type == R_ARM_PC24) {
224 return reloc_pc24(code_ptr, (tcg_insn_unit *)value);
225 } else if (type == R_ARM_PC13) {
226 return reloc_pc13(code_ptr, (tcg_insn_unit *)value);
227 } else {
228 g_assert_not_reached();
232 #define TCG_CT_CONST_ARM 0x100
233 #define TCG_CT_CONST_INV 0x200
234 #define TCG_CT_CONST_NEG 0x400
235 #define TCG_CT_CONST_ZERO 0x800
237 /* parse target specific constraints */
238 static const char *target_parse_constraint(TCGArgConstraint *ct,
239 const char *ct_str, TCGType type)
241 switch (*ct_str++) {
242 case 'I':
243 ct->ct |= TCG_CT_CONST_ARM;
244 break;
245 case 'K':
246 ct->ct |= TCG_CT_CONST_INV;
247 break;
248 case 'N': /* The gcc constraint letter is L, already used here. */
249 ct->ct |= TCG_CT_CONST_NEG;
250 break;
251 case 'Z':
252 ct->ct |= TCG_CT_CONST_ZERO;
253 break;
255 case 'r':
256 ct->ct |= TCG_CT_REG;
257 ct->u.regs = 0xffff;
258 break;
260 /* qemu_ld address */
261 case 'l':
262 ct->ct |= TCG_CT_REG;
263 ct->u.regs = 0xffff;
264 #ifdef CONFIG_SOFTMMU
265 /* r0-r2,lr will be overwritten when reading the tlb entry,
266 so don't use these. */
267 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
268 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
269 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
270 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
271 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
272 #endif
273 break;
275 /* qemu_st address & data */
276 case 's':
277 ct->ct |= TCG_CT_REG;
278 ct->u.regs = 0xffff;
279 /* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
280 and r0-r1 doing the byte swapping, so don't use these. */
281 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
282 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
283 #if defined(CONFIG_SOFTMMU)
284 /* Avoid clashes with registers being used for helper args */
285 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
286 #if TARGET_LONG_BITS == 64
287 /* Avoid clashes with registers being used for helper args */
288 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
289 #endif
290 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
291 #endif
292 break;
294 default:
295 return NULL;
297 return ct_str;
300 static inline uint32_t rotl(uint32_t val, int n)
302 return (val << n) | (val >> (32 - n));
305 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
306 right-rotated by an even amount between 0 and 30. */
307 static inline int encode_imm(uint32_t imm)
309 int shift;
311 /* simple case, only lower bits */
312 if ((imm & ~0xff) == 0)
313 return 0;
314 /* then try a simple even shift */
315 shift = ctz32(imm) & ~1;
316 if (((imm >> shift) & ~0xff) == 0)
317 return 32 - shift;
318 /* now try harder with rotations */
319 if ((rotl(imm, 2) & ~0xff) == 0)
320 return 2;
321 if ((rotl(imm, 4) & ~0xff) == 0)
322 return 4;
323 if ((rotl(imm, 6) & ~0xff) == 0)
324 return 6;
325 /* imm can't be encoded */
326 return -1;
329 static inline int check_fit_imm(uint32_t imm)
331 return encode_imm(imm) >= 0;
334 /* Test if a constant matches the constraint.
335 * TODO: define constraints for:
337 * ldr/str offset: between -0xfff and 0xfff
338 * ldrh/strh offset: between -0xff and 0xff
339 * mov operand2: values represented with x << (2 * y), x < 0x100
340 * add, sub, eor...: ditto
342 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
343 const TCGArgConstraint *arg_ct)
345 int ct;
346 ct = arg_ct->ct;
347 if (ct & TCG_CT_CONST) {
348 return 1;
349 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
350 return 1;
351 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
352 return 1;
353 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
354 return 1;
355 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
356 return 1;
357 } else {
358 return 0;
362 static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
364 tcg_out32(s, (cond << 28) | 0x0a000000 |
365 (((offset - 8) >> 2) & 0x00ffffff));
368 static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
370 tcg_out32(s, (cond << 28) | 0x0b000000 |
371 (((offset - 8) >> 2) & 0x00ffffff));
374 static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
376 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
379 static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
381 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
382 (((offset - 8) >> 2) & 0x00ffffff));
385 static inline void tcg_out_dat_reg(TCGContext *s,
386 int cond, int opc, int rd, int rn, int rm, int shift)
388 tcg_out32(s, (cond << 28) | (0 << 25) | opc |
389 (rn << 16) | (rd << 12) | shift | rm);
392 static inline void tcg_out_nop(TCGContext *s)
394 tcg_out32(s, INSN_NOP);
397 static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
399 /* Simple reg-reg move, optimising out the 'do nothing' case */
400 if (rd != rm) {
401 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
405 static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn)
407 /* Unless the C portion of QEMU is compiled as thumb, we don't
408 actually need true BX semantics; merely a branch to an address
409 held in a register. */
410 if (use_armv5t_instructions) {
411 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
412 } else {
413 tcg_out_mov_reg(s, cond, TCG_REG_PC, rn);
417 static inline void tcg_out_dat_imm(TCGContext *s,
418 int cond, int opc, int rd, int rn, int im)
420 tcg_out32(s, (cond << 28) | (1 << 25) | opc |
421 (rn << 16) | (rd << 12) | im);
424 /* Note that this routine is used for both LDR and LDRH formats, so we do
425 not wish to include an immediate shift at this point. */
426 static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
427 TCGReg rn, TCGReg rm, bool u, bool p, bool w)
429 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
430 | (w << 21) | (rn << 16) | (rt << 12) | rm);
433 static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
434 TCGReg rn, int imm8, bool p, bool w)
436 bool u = 1;
437 if (imm8 < 0) {
438 imm8 = -imm8;
439 u = 0;
441 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
442 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
445 static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
446 TCGReg rn, int imm12, bool p, bool w)
448 bool u = 1;
449 if (imm12 < 0) {
450 imm12 = -imm12;
451 u = 0;
453 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
454 (rn << 16) | (rt << 12) | imm12);
457 static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
458 TCGReg rn, int imm12)
460 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
463 static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
464 TCGReg rn, int imm12)
466 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
469 static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
470 TCGReg rn, TCGReg rm)
472 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
475 static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
476 TCGReg rn, TCGReg rm)
478 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
481 static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt,
482 TCGReg rn, int imm8)
484 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
487 static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt,
488 TCGReg rn, TCGReg rm)
490 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
493 static inline void tcg_out_ldrd_rwb(TCGContext *s, int cond, TCGReg rt,
494 TCGReg rn, TCGReg rm)
496 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
499 static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt,
500 TCGReg rn, int imm8)
502 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
505 static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt,
506 TCGReg rn, TCGReg rm)
508 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
511 /* Register pre-increment with base writeback. */
512 static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
513 TCGReg rn, TCGReg rm)
515 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
518 static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
519 TCGReg rn, TCGReg rm)
521 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
524 static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
525 TCGReg rn, int imm8)
527 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
530 static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
531 TCGReg rn, int imm8)
533 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
536 static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
537 TCGReg rn, TCGReg rm)
539 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
542 static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
543 TCGReg rn, TCGReg rm)
545 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
548 static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
549 TCGReg rn, int imm8)
551 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
554 static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
555 TCGReg rn, TCGReg rm)
557 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
560 static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
561 TCGReg rn, int imm12)
563 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
566 static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
567 TCGReg rn, int imm12)
569 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
572 static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
573 TCGReg rn, TCGReg rm)
575 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
578 static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
579 TCGReg rn, TCGReg rm)
581 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
584 static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
585 TCGReg rn, int imm8)
587 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
590 static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
591 TCGReg rn, TCGReg rm)
593 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
596 static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg)
598 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
599 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
602 static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
604 int rot, diff, opc, sh1, sh2;
605 uint32_t tt0, tt1, tt2;
607 /* Check a single MOV/MVN before anything else. */
608 rot = encode_imm(arg);
609 if (rot >= 0) {
610 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
611 rotl(arg, rot) | (rot << 7));
612 return;
614 rot = encode_imm(~arg);
615 if (rot >= 0) {
616 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
617 rotl(~arg, rot) | (rot << 7));
618 return;
621 /* Check for a pc-relative address. This will usually be the TB,
622 or within the TB, which is immediately before the code block. */
623 diff = arg - ((intptr_t)s->code_ptr + 8);
624 if (diff >= 0) {
625 rot = encode_imm(diff);
626 if (rot >= 0) {
627 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC,
628 rotl(diff, rot) | (rot << 7));
629 return;
631 } else {
632 rot = encode_imm(-diff);
633 if (rot >= 0) {
634 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC,
635 rotl(-diff, rot) | (rot << 7));
636 return;
640 /* Use movw + movt. */
641 if (use_armv7_instructions) {
642 /* movw */
643 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
644 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
645 if (arg & 0xffff0000) {
646 /* movt */
647 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
648 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
650 return;
653 /* Look for sequences of two insns. If we have lots of 1's, we can
654 shorten the sequence by beginning with mvn and then clearing
655 higher bits with eor. */
656 tt0 = arg;
657 opc = ARITH_MOV;
658 if (ctpop32(arg) > 16) {
659 tt0 = ~arg;
660 opc = ARITH_MVN;
662 sh1 = ctz32(tt0) & ~1;
663 tt1 = tt0 & ~(0xff << sh1);
664 sh2 = ctz32(tt1) & ~1;
665 tt2 = tt1 & ~(0xff << sh2);
666 if (tt2 == 0) {
667 rot = ((32 - sh1) << 7) & 0xf00;
668 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot);
669 rot = ((32 - sh2) << 7) & 0xf00;
670 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
671 ((tt0 >> sh2) & 0xff) | rot);
672 return;
675 /* Otherwise, drop it into the constant pool. */
676 tcg_out_movi_pool(s, cond, rd, arg);
679 static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
680 TCGArg lhs, TCGArg rhs, int rhs_is_const)
682 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
683 * rhs must satisfy the "rI" constraint.
685 if (rhs_is_const) {
686 int rot = encode_imm(rhs);
687 tcg_debug_assert(rot >= 0);
688 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
689 } else {
690 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
694 static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
695 TCGReg dst, TCGReg lhs, TCGArg rhs,
696 bool rhs_is_const)
698 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
699 * rhs must satisfy the "rIK" constraint.
701 if (rhs_is_const) {
702 int rot = encode_imm(rhs);
703 if (rot < 0) {
704 rhs = ~rhs;
705 rot = encode_imm(rhs);
706 tcg_debug_assert(rot >= 0);
707 opc = opinv;
709 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
710 } else {
711 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
715 static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
716 TCGArg dst, TCGArg lhs, TCGArg rhs,
717 bool rhs_is_const)
719 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
720 * rhs must satisfy the "rIN" constraint.
722 if (rhs_is_const) {
723 int rot = encode_imm(rhs);
724 if (rot < 0) {
725 rhs = -rhs;
726 rot = encode_imm(rhs);
727 tcg_debug_assert(rot >= 0);
728 opc = opneg;
730 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
731 } else {
732 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
736 static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
737 TCGReg rn, TCGReg rm)
739 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
740 if (!use_armv6_instructions && rd == rn) {
741 if (rd == rm) {
742 /* rd == rn == rm; copy an input to tmp first. */
743 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
744 rm = rn = TCG_REG_TMP;
745 } else {
746 rn = rm;
747 rm = rd;
750 /* mul */
751 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
754 static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
755 TCGReg rd1, TCGReg rn, TCGReg rm)
757 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
758 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
759 if (rd0 == rm || rd1 == rm) {
760 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
761 rn = TCG_REG_TMP;
762 } else {
763 TCGReg t = rn;
764 rn = rm;
765 rm = t;
768 /* umull */
769 tcg_out32(s, (cond << 28) | 0x00800090 |
770 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
773 static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
774 TCGReg rd1, TCGReg rn, TCGReg rm)
776 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
777 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
778 if (rd0 == rm || rd1 == rm) {
779 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
780 rn = TCG_REG_TMP;
781 } else {
782 TCGReg t = rn;
783 rn = rm;
784 rm = t;
787 /* smull */
788 tcg_out32(s, (cond << 28) | 0x00c00090 |
789 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
792 static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
794 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
797 static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
799 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
802 static inline void tcg_out_ext8s(TCGContext *s, int cond,
803 int rd, int rn)
805 if (use_armv6_instructions) {
806 /* sxtb */
807 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
808 } else {
809 tcg_out_dat_reg(s, cond, ARITH_MOV,
810 rd, 0, rn, SHIFT_IMM_LSL(24));
811 tcg_out_dat_reg(s, cond, ARITH_MOV,
812 rd, 0, rd, SHIFT_IMM_ASR(24));
816 static inline void tcg_out_ext8u(TCGContext *s, int cond,
817 int rd, int rn)
819 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
822 static inline void tcg_out_ext16s(TCGContext *s, int cond,
823 int rd, int rn)
825 if (use_armv6_instructions) {
826 /* sxth */
827 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
828 } else {
829 tcg_out_dat_reg(s, cond, ARITH_MOV,
830 rd, 0, rn, SHIFT_IMM_LSL(16));
831 tcg_out_dat_reg(s, cond, ARITH_MOV,
832 rd, 0, rd, SHIFT_IMM_ASR(16));
836 static inline void tcg_out_ext16u(TCGContext *s, int cond,
837 int rd, int rn)
839 if (use_armv6_instructions) {
840 /* uxth */
841 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
842 } else {
843 tcg_out_dat_reg(s, cond, ARITH_MOV,
844 rd, 0, rn, SHIFT_IMM_LSL(16));
845 tcg_out_dat_reg(s, cond, ARITH_MOV,
846 rd, 0, rd, SHIFT_IMM_LSR(16));
850 static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
852 if (use_armv6_instructions) {
853 /* revsh */
854 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
855 } else {
856 tcg_out_dat_reg(s, cond, ARITH_MOV,
857 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
858 tcg_out_dat_reg(s, cond, ARITH_MOV,
859 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
860 tcg_out_dat_reg(s, cond, ARITH_ORR,
861 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
865 static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
867 if (use_armv6_instructions) {
868 /* rev16 */
869 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
870 } else {
871 tcg_out_dat_reg(s, cond, ARITH_MOV,
872 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
873 tcg_out_dat_reg(s, cond, ARITH_MOV,
874 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
875 tcg_out_dat_reg(s, cond, ARITH_ORR,
876 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
880 /* swap the two low bytes assuming that the two high input bytes and the
881 two high output bit can hold any value. */
882 static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
884 if (use_armv6_instructions) {
885 /* rev16 */
886 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
887 } else {
888 tcg_out_dat_reg(s, cond, ARITH_MOV,
889 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
890 tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
891 tcg_out_dat_reg(s, cond, ARITH_ORR,
892 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
896 static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
898 if (use_armv6_instructions) {
899 /* rev */
900 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
901 } else {
902 tcg_out_dat_reg(s, cond, ARITH_EOR,
903 TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
904 tcg_out_dat_imm(s, cond, ARITH_BIC,
905 TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
906 tcg_out_dat_reg(s, cond, ARITH_MOV,
907 rd, 0, rn, SHIFT_IMM_ROR(8));
908 tcg_out_dat_reg(s, cond, ARITH_EOR,
909 rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
913 static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
914 TCGArg a1, int ofs, int len, bool const_a1)
916 if (const_a1) {
917 /* bfi becomes bfc with rn == 15. */
918 a1 = 15;
920 /* bfi/bfc */
921 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
922 | (ofs << 7) | ((ofs + len - 1) << 16));
925 static inline void tcg_out_extract(TCGContext *s, int cond, TCGReg rd,
926 TCGArg a1, int ofs, int len)
928 /* ubfx */
929 tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | a1
930 | (ofs << 7) | ((len - 1) << 16));
933 static inline void tcg_out_sextract(TCGContext *s, int cond, TCGReg rd,
934 TCGArg a1, int ofs, int len)
936 /* sbfx */
937 tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | a1
938 | (ofs << 7) | ((len - 1) << 16));
941 static inline void tcg_out_ld32u(TCGContext *s, int cond,
942 int rd, int rn, int32_t offset)
944 if (offset > 0xfff || offset < -0xfff) {
945 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
946 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
947 } else
948 tcg_out_ld32_12(s, cond, rd, rn, offset);
951 static inline void tcg_out_st32(TCGContext *s, int cond,
952 int rd, int rn, int32_t offset)
954 if (offset > 0xfff || offset < -0xfff) {
955 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
956 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
957 } else
958 tcg_out_st32_12(s, cond, rd, rn, offset);
961 static inline void tcg_out_ld16u(TCGContext *s, int cond,
962 int rd, int rn, int32_t offset)
964 if (offset > 0xff || offset < -0xff) {
965 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
966 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
967 } else
968 tcg_out_ld16u_8(s, cond, rd, rn, offset);
971 static inline void tcg_out_ld16s(TCGContext *s, int cond,
972 int rd, int rn, int32_t offset)
974 if (offset > 0xff || offset < -0xff) {
975 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
976 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
977 } else
978 tcg_out_ld16s_8(s, cond, rd, rn, offset);
981 static inline void tcg_out_st16(TCGContext *s, int cond,
982 int rd, int rn, int32_t offset)
984 if (offset > 0xff || offset < -0xff) {
985 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
986 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
987 } else
988 tcg_out_st16_8(s, cond, rd, rn, offset);
991 static inline void tcg_out_ld8u(TCGContext *s, int cond,
992 int rd, int rn, int32_t offset)
994 if (offset > 0xfff || offset < -0xfff) {
995 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
996 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
997 } else
998 tcg_out_ld8_12(s, cond, rd, rn, offset);
1001 static inline void tcg_out_ld8s(TCGContext *s, int cond,
1002 int rd, int rn, int32_t offset)
1004 if (offset > 0xff || offset < -0xff) {
1005 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1006 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1007 } else
1008 tcg_out_ld8s_8(s, cond, rd, rn, offset);
1011 static inline void tcg_out_st8(TCGContext *s, int cond,
1012 int rd, int rn, int32_t offset)
1014 if (offset > 0xfff || offset < -0xfff) {
1015 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1016 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1017 } else
1018 tcg_out_st8_12(s, cond, rd, rn, offset);
1021 /* The _goto case is normally between TBs within the same code buffer, and
1022 * with the code buffer limited to 16MB we wouldn't need the long case.
1023 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1025 static void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
1027 intptr_t addri = (intptr_t)addr;
1028 ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1030 if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1031 tcg_out_b(s, cond, disp);
1032 return;
1034 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1037 /* The call case is mostly used for helpers - so it's not unreasonable
1038 * for them to be beyond branch range */
1039 static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr)
1041 intptr_t addri = (intptr_t)addr;
1042 ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1044 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1045 if (addri & 1) {
1046 /* Use BLX if the target is in Thumb mode */
1047 if (!use_armv5t_instructions) {
1048 tcg_abort();
1050 tcg_out_blx_imm(s, disp);
1051 } else {
1052 tcg_out_bl(s, COND_AL, disp);
1054 } else if (use_armv7_instructions) {
1055 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1056 tcg_out_blx(s, COND_AL, TCG_REG_TMP);
1057 } else {
1058 /* ??? Know that movi_pool emits exactly 1 insn. */
1059 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0);
1060 tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri);
1064 static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l)
1066 if (l->has_value) {
1067 tcg_out_goto(s, cond, l->u.value_ptr);
1068 } else {
1069 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1070 tcg_out_b(s, cond, 0);
1074 static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
1076 if (use_armv7_instructions) {
1077 tcg_out32(s, INSN_DMB_ISH);
1078 } else if (use_armv6_instructions) {
1079 tcg_out32(s, INSN_DMB_MCR);
1083 static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1084 const int *const_args)
1086 TCGReg al = args[0];
1087 TCGReg ah = args[1];
1088 TCGArg bl = args[2];
1089 TCGArg bh = args[3];
1090 TCGCond cond = args[4];
1091 int const_bl = const_args[2];
1092 int const_bh = const_args[3];
1094 switch (cond) {
1095 case TCG_COND_EQ:
1096 case TCG_COND_NE:
1097 case TCG_COND_LTU:
1098 case TCG_COND_LEU:
1099 case TCG_COND_GTU:
1100 case TCG_COND_GEU:
1101 /* We perform a conditional comparision. If the high half is
1102 equal, then overwrite the flags with the comparison of the
1103 low half. The resulting flags cover the whole. */
1104 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1105 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1106 return cond;
1108 case TCG_COND_LT:
1109 case TCG_COND_GE:
1110 /* We perform a double-word subtraction and examine the result.
1111 We do not actually need the result of the subtract, so the
1112 low part "subtract" is a compare. For the high half we have
1113 no choice but to compute into a temporary. */
1114 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1115 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1116 TCG_REG_TMP, ah, bh, const_bh);
1117 return cond;
1119 case TCG_COND_LE:
1120 case TCG_COND_GT:
1121 /* Similar, but with swapped arguments, via reversed subtract. */
1122 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1123 TCG_REG_TMP, al, bl, const_bl);
1124 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1125 TCG_REG_TMP, ah, bh, const_bh);
1126 return tcg_swap_cond(cond);
1128 default:
1129 g_assert_not_reached();
1133 #ifdef CONFIG_SOFTMMU
1134 #include "../tcg-ldst.inc.c"
1136 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1137 * int mmu_idx, uintptr_t ra)
1139 static void * const qemu_ld_helpers[16] = {
1140 [MO_UB] = helper_ret_ldub_mmu,
1141 [MO_SB] = helper_ret_ldsb_mmu,
1143 [MO_LEUW] = helper_le_lduw_mmu,
1144 [MO_LEUL] = helper_le_ldul_mmu,
1145 [MO_LEQ] = helper_le_ldq_mmu,
1146 [MO_LESW] = helper_le_ldsw_mmu,
1147 [MO_LESL] = helper_le_ldul_mmu,
1149 [MO_BEUW] = helper_be_lduw_mmu,
1150 [MO_BEUL] = helper_be_ldul_mmu,
1151 [MO_BEQ] = helper_be_ldq_mmu,
1152 [MO_BESW] = helper_be_ldsw_mmu,
1153 [MO_BESL] = helper_be_ldul_mmu,
1156 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1157 * uintxx_t val, int mmu_idx, uintptr_t ra)
1159 static void * const qemu_st_helpers[16] = {
1160 [MO_UB] = helper_ret_stb_mmu,
1161 [MO_LEUW] = helper_le_stw_mmu,
1162 [MO_LEUL] = helper_le_stl_mmu,
1163 [MO_LEQ] = helper_le_stq_mmu,
1164 [MO_BEUW] = helper_be_stw_mmu,
1165 [MO_BEUL] = helper_be_stl_mmu,
1166 [MO_BEQ] = helper_be_stq_mmu,
1169 /* Helper routines for marshalling helper function arguments into
1170 * the correct registers and stack.
1171 * argreg is where we want to put this argument, arg is the argument itself.
1172 * Return value is the updated argreg ready for the next call.
1173 * Note that argreg 0..3 is real registers, 4+ on stack.
1175 * We provide routines for arguments which are: immediate, 32 bit
1176 * value in register, 16 and 8 bit values in register (which must be zero
1177 * extended before use) and 64 bit value in a lo:hi register pair.
1179 #define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1180 static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1182 if (argreg < 4) { \
1183 MOV_ARG(s, COND_AL, argreg, arg); \
1184 } else { \
1185 int ofs = (argreg - 4) * 4; \
1186 EXT_ARG; \
1187 tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1188 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1190 return argreg + 1; \
1193 DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
1194 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1195 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
1196 (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1197 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
1198 (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1199 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1201 static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1202 TCGReg arglo, TCGReg arghi)
1204 /* 64 bit arguments must go in even/odd register pairs
1205 * and in 8-aligned stack slots.
1207 if (argreg & 1) {
1208 argreg++;
1210 if (use_armv6_instructions && argreg >= 4
1211 && (arglo & 1) == 0 && arghi == arglo + 1) {
1212 tcg_out_strd_8(s, COND_AL, arglo,
1213 TCG_REG_CALL_STACK, (argreg - 4) * 4);
1214 return argreg + 2;
1215 } else {
1216 argreg = tcg_out_arg_reg32(s, argreg, arglo);
1217 argreg = tcg_out_arg_reg32(s, argreg, arghi);
1218 return argreg;
1222 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1224 /* We expect to use an 9-bit sign-magnitude negative offset from ENV. */
1225 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1226 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256);
1228 /* These offsets are built into the LDRD below. */
1229 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1230 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
1232 /* Load and compare a TLB entry, leaving the flags set. Returns the register
1233 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */
1235 static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1236 MemOp opc, int mem_index, bool is_load)
1238 int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
1239 : offsetof(CPUTLBEntry, addr_write));
1240 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1241 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1242 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1243 unsigned s_bits = opc & MO_SIZE;
1244 unsigned a_bits = get_alignment_bits(opc);
1247 * We don't support inline unaligned acceses, but we can easily
1248 * support overalignment checks.
1250 if (a_bits < s_bits) {
1251 a_bits = s_bits;
1254 /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */
1255 if (use_armv6_instructions) {
1256 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
1257 } else {
1258 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off);
1259 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off);
1262 /* Extract the tlb index from the address into R0. */
1263 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
1264 SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
1267 * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
1268 * Load the tlb comparator into R2/R3 and the fast path addend into R1.
1270 if (cmp_off == 0) {
1271 if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1272 tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1273 } else {
1274 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1276 } else {
1277 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1278 TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
1279 if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1280 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1281 } else {
1282 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1285 if (!use_armv6_instructions && TARGET_LONG_BITS == 64) {
1286 tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4);
1289 /* Load the tlb addend. */
1290 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
1291 offsetof(CPUTLBEntry, addend));
1294 * Check alignment, check comparators.
1295 * Do this in no more than 3 insns. Use MOVW for v7, if possible,
1296 * to reduce the number of sequential conditional instructions.
1297 * Almost all guests have at least 4k pages, which means that we need
1298 * to clear at least 9 bits even for an 8-byte memory, which means it
1299 * isn't worth checking for an immediate operand for BIC.
1301 if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
1302 tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1));
1304 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask);
1305 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1306 addrlo, TCG_REG_TMP, 0);
1307 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
1308 } else {
1309 if (a_bits) {
1310 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo,
1311 (1 << a_bits) - 1);
1313 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo,
1314 SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1315 tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP,
1316 0, TCG_REG_R2, TCG_REG_TMP,
1317 SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1320 if (TARGET_LONG_BITS == 64) {
1321 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
1324 return TCG_REG_R1;
1327 /* Record the context of a call to the out of line helper code for the slow
1328 path for a load or store, so that we can later generate the correct
1329 helper code. */
1330 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1331 TCGReg datalo, TCGReg datahi, TCGReg addrlo,
1332 TCGReg addrhi, tcg_insn_unit *raddr,
1333 tcg_insn_unit *label_ptr)
1335 TCGLabelQemuLdst *label = new_ldst_label(s);
1337 label->is_ld = is_ld;
1338 label->oi = oi;
1339 label->datalo_reg = datalo;
1340 label->datahi_reg = datahi;
1341 label->addrlo_reg = addrlo;
1342 label->addrhi_reg = addrhi;
1343 label->raddr = raddr;
1344 label->label_ptr[0] = label_ptr;
1347 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1349 TCGReg argreg, datalo, datahi;
1350 TCGMemOpIdx oi = lb->oi;
1351 MemOp opc = get_memop(oi);
1352 void *func;
1354 if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
1355 return false;
1358 argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
1359 if (TARGET_LONG_BITS == 64) {
1360 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1361 } else {
1362 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1364 argreg = tcg_out_arg_imm32(s, argreg, oi);
1365 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1367 /* For armv6 we can use the canonical unsigned helpers and minimize
1368 icache usage. For pre-armv6, use the signed helpers since we do
1369 not have a single insn sign-extend. */
1370 if (use_armv6_instructions) {
1371 func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)];
1372 } else {
1373 func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)];
1374 if (opc & MO_SIGN) {
1375 opc = MO_UL;
1378 tcg_out_call(s, func);
1380 datalo = lb->datalo_reg;
1381 datahi = lb->datahi_reg;
1382 switch (opc & MO_SSIZE) {
1383 case MO_SB:
1384 tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0);
1385 break;
1386 case MO_SW:
1387 tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0);
1388 break;
1389 default:
1390 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1391 break;
1392 case MO_Q:
1393 if (datalo != TCG_REG_R1) {
1394 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1395 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1396 } else if (datahi != TCG_REG_R0) {
1397 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1398 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1399 } else {
1400 tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0);
1401 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1402 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP);
1404 break;
1407 tcg_out_goto(s, COND_AL, lb->raddr);
1408 return true;
1411 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1413 TCGReg argreg, datalo, datahi;
1414 TCGMemOpIdx oi = lb->oi;
1415 MemOp opc = get_memop(oi);
1417 if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
1418 return false;
1421 argreg = TCG_REG_R0;
1422 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1423 if (TARGET_LONG_BITS == 64) {
1424 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1425 } else {
1426 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1429 datalo = lb->datalo_reg;
1430 datahi = lb->datahi_reg;
1431 switch (opc & MO_SIZE) {
1432 case MO_8:
1433 argreg = tcg_out_arg_reg8(s, argreg, datalo);
1434 break;
1435 case MO_16:
1436 argreg = tcg_out_arg_reg16(s, argreg, datalo);
1437 break;
1438 case MO_32:
1439 default:
1440 argreg = tcg_out_arg_reg32(s, argreg, datalo);
1441 break;
1442 case MO_64:
1443 argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi);
1444 break;
1447 argreg = tcg_out_arg_imm32(s, argreg, oi);
1448 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1450 /* Tail-call to the helper, which will return to the fast path. */
1451 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1452 return true;
1454 #endif /* SOFTMMU */
1456 static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
1457 TCGReg datalo, TCGReg datahi,
1458 TCGReg addrlo, TCGReg addend)
1460 MemOp bswap = opc & MO_BSWAP;
1462 switch (opc & MO_SSIZE) {
1463 case MO_UB:
1464 tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend);
1465 break;
1466 case MO_SB:
1467 tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend);
1468 break;
1469 case MO_UW:
1470 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1471 if (bswap) {
1472 tcg_out_bswap16(s, COND_AL, datalo, datalo);
1474 break;
1475 case MO_SW:
1476 if (bswap) {
1477 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1478 tcg_out_bswap16s(s, COND_AL, datalo, datalo);
1479 } else {
1480 tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
1482 break;
1483 case MO_UL:
1484 default:
1485 tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
1486 if (bswap) {
1487 tcg_out_bswap32(s, COND_AL, datalo, datalo);
1489 break;
1490 case MO_Q:
1492 TCGReg dl = (bswap ? datahi : datalo);
1493 TCGReg dh = (bswap ? datalo : datahi);
1495 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1496 if (USING_SOFTMMU && use_armv6_instructions
1497 && (dl & 1) == 0 && dh == dl + 1) {
1498 tcg_out_ldrd_r(s, COND_AL, dl, addrlo, addend);
1499 } else if (dl != addend) {
1500 tcg_out_ld32_rwb(s, COND_AL, dl, addend, addrlo);
1501 tcg_out_ld32_12(s, COND_AL, dh, addend, 4);
1502 } else {
1503 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
1504 addend, addrlo, SHIFT_IMM_LSL(0));
1505 tcg_out_ld32_12(s, COND_AL, dl, TCG_REG_TMP, 0);
1506 tcg_out_ld32_12(s, COND_AL, dh, TCG_REG_TMP, 4);
1508 if (bswap) {
1509 tcg_out_bswap32(s, COND_AL, dl, dl);
1510 tcg_out_bswap32(s, COND_AL, dh, dh);
1513 break;
1517 static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
1518 TCGReg datalo, TCGReg datahi,
1519 TCGReg addrlo)
1521 MemOp bswap = opc & MO_BSWAP;
1523 switch (opc & MO_SSIZE) {
1524 case MO_UB:
1525 tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
1526 break;
1527 case MO_SB:
1528 tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
1529 break;
1530 case MO_UW:
1531 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1532 if (bswap) {
1533 tcg_out_bswap16(s, COND_AL, datalo, datalo);
1535 break;
1536 case MO_SW:
1537 if (bswap) {
1538 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1539 tcg_out_bswap16s(s, COND_AL, datalo, datalo);
1540 } else {
1541 tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
1543 break;
1544 case MO_UL:
1545 default:
1546 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1547 if (bswap) {
1548 tcg_out_bswap32(s, COND_AL, datalo, datalo);
1550 break;
1551 case MO_Q:
1553 TCGReg dl = (bswap ? datahi : datalo);
1554 TCGReg dh = (bswap ? datalo : datahi);
1556 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1557 if (USING_SOFTMMU && use_armv6_instructions
1558 && (dl & 1) == 0 && dh == dl + 1) {
1559 tcg_out_ldrd_8(s, COND_AL, dl, addrlo, 0);
1560 } else if (dl == addrlo) {
1561 tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
1562 tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
1563 } else {
1564 tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
1565 tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
1567 if (bswap) {
1568 tcg_out_bswap32(s, COND_AL, dl, dl);
1569 tcg_out_bswap32(s, COND_AL, dh, dh);
1572 break;
1576 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1578 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1579 TCGMemOpIdx oi;
1580 MemOp opc;
1581 #ifdef CONFIG_SOFTMMU
1582 int mem_index;
1583 TCGReg addend;
1584 tcg_insn_unit *label_ptr;
1585 #endif
1587 datalo = *args++;
1588 datahi = (is64 ? *args++ : 0);
1589 addrlo = *args++;
1590 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1591 oi = *args++;
1592 opc = get_memop(oi);
1594 #ifdef CONFIG_SOFTMMU
1595 mem_index = get_mmuidx(oi);
1596 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1);
1598 /* This a conditional BL only to load a pointer within this opcode into LR
1599 for the slow path. We will not be using the value for a tail call. */
1600 label_ptr = s->code_ptr;
1601 tcg_out_bl(s, COND_NE, 0);
1603 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
1605 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
1606 s->code_ptr, label_ptr);
1607 #else /* !CONFIG_SOFTMMU */
1608 if (guest_base) {
1609 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
1610 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP);
1611 } else {
1612 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
1614 #endif
1617 static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc,
1618 TCGReg datalo, TCGReg datahi,
1619 TCGReg addrlo, TCGReg addend)
1621 MemOp bswap = opc & MO_BSWAP;
1623 switch (opc & MO_SIZE) {
1624 case MO_8:
1625 tcg_out_st8_r(s, cond, datalo, addrlo, addend);
1626 break;
1627 case MO_16:
1628 if (bswap) {
1629 tcg_out_bswap16st(s, cond, TCG_REG_R0, datalo);
1630 tcg_out_st16_r(s, cond, TCG_REG_R0, addrlo, addend);
1631 } else {
1632 tcg_out_st16_r(s, cond, datalo, addrlo, addend);
1634 break;
1635 case MO_32:
1636 default:
1637 if (bswap) {
1638 tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
1639 tcg_out_st32_r(s, cond, TCG_REG_R0, addrlo, addend);
1640 } else {
1641 tcg_out_st32_r(s, cond, datalo, addrlo, addend);
1643 break;
1644 case MO_64:
1645 /* Avoid strd for user-only emulation, to handle unaligned. */
1646 if (bswap) {
1647 tcg_out_bswap32(s, cond, TCG_REG_R0, datahi);
1648 tcg_out_st32_rwb(s, cond, TCG_REG_R0, addend, addrlo);
1649 tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
1650 tcg_out_st32_12(s, cond, TCG_REG_R0, addend, 4);
1651 } else if (USING_SOFTMMU && use_armv6_instructions
1652 && (datalo & 1) == 0 && datahi == datalo + 1) {
1653 tcg_out_strd_r(s, cond, datalo, addrlo, addend);
1654 } else {
1655 tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
1656 tcg_out_st32_12(s, cond, datahi, addend, 4);
1658 break;
1662 static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc,
1663 TCGReg datalo, TCGReg datahi,
1664 TCGReg addrlo)
1666 MemOp bswap = opc & MO_BSWAP;
1668 switch (opc & MO_SIZE) {
1669 case MO_8:
1670 tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
1671 break;
1672 case MO_16:
1673 if (bswap) {
1674 tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, datalo);
1675 tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addrlo, 0);
1676 } else {
1677 tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
1679 break;
1680 case MO_32:
1681 default:
1682 if (bswap) {
1683 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
1684 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
1685 } else {
1686 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1688 break;
1689 case MO_64:
1690 /* Avoid strd for user-only emulation, to handle unaligned. */
1691 if (bswap) {
1692 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datahi);
1693 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
1694 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
1695 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 4);
1696 } else if (USING_SOFTMMU && use_armv6_instructions
1697 && (datalo & 1) == 0 && datahi == datalo + 1) {
1698 tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
1699 } else {
1700 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1701 tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
1703 break;
1707 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1709 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1710 TCGMemOpIdx oi;
1711 MemOp opc;
1712 #ifdef CONFIG_SOFTMMU
1713 int mem_index;
1714 TCGReg addend;
1715 tcg_insn_unit *label_ptr;
1716 #endif
1718 datalo = *args++;
1719 datahi = (is64 ? *args++ : 0);
1720 addrlo = *args++;
1721 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1722 oi = *args++;
1723 opc = get_memop(oi);
1725 #ifdef CONFIG_SOFTMMU
1726 mem_index = get_mmuidx(oi);
1727 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
1729 tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend);
1731 /* The conditional call must come last, as we're going to return here. */
1732 label_ptr = s->code_ptr;
1733 tcg_out_bl(s, COND_NE, 0);
1735 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
1736 s->code_ptr, label_ptr);
1737 #else /* !CONFIG_SOFTMMU */
1738 if (guest_base) {
1739 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
1740 tcg_out_qemu_st_index(s, COND_AL, opc, datalo,
1741 datahi, addrlo, TCG_REG_TMP);
1742 } else {
1743 tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
1745 #endif
1748 static void tcg_out_epilogue(TCGContext *s);
1750 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1751 const TCGArg *args, const int *const_args)
1753 TCGArg a0, a1, a2, a3, a4, a5;
1754 int c;
1756 switch (opc) {
1757 case INDEX_op_exit_tb:
1758 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, args[0]);
1759 tcg_out_epilogue(s);
1760 break;
1761 case INDEX_op_goto_tb:
1763 /* Indirect jump method */
1764 intptr_t ptr, dif, dil;
1765 TCGReg base = TCG_REG_PC;
1767 tcg_debug_assert(s->tb_jmp_insn_offset == 0);
1768 ptr = (intptr_t)(s->tb_jmp_target_addr + args[0]);
1769 dif = ptr - ((intptr_t)s->code_ptr + 8);
1770 dil = sextract32(dif, 0, 12);
1771 if (dif != dil) {
1772 /* The TB is close, but outside the 12 bits addressable by
1773 the load. We can extend this to 20 bits with a sub of a
1774 shifted immediate from pc. In the vastly unlikely event
1775 the code requires more than 1MB, we'll use 2 insns and
1776 be no worse off. */
1777 base = TCG_REG_R0;
1778 tcg_out_movi32(s, COND_AL, base, ptr - dil);
1780 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
1781 set_jmp_reset_offset(s, args[0]);
1783 break;
1784 case INDEX_op_goto_ptr:
1785 tcg_out_bx(s, COND_AL, args[0]);
1786 break;
1787 case INDEX_op_br:
1788 tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
1789 break;
1791 case INDEX_op_ld8u_i32:
1792 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1793 break;
1794 case INDEX_op_ld8s_i32:
1795 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1796 break;
1797 case INDEX_op_ld16u_i32:
1798 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1799 break;
1800 case INDEX_op_ld16s_i32:
1801 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1802 break;
1803 case INDEX_op_ld_i32:
1804 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1805 break;
1806 case INDEX_op_st8_i32:
1807 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1808 break;
1809 case INDEX_op_st16_i32:
1810 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1811 break;
1812 case INDEX_op_st_i32:
1813 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1814 break;
1816 case INDEX_op_movcond_i32:
1817 /* Constraints mean that v2 is always in the same register as dest,
1818 * so we only need to do "if condition passed, move v1 to dest".
1820 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1821 args[1], args[2], const_args[2]);
1822 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
1823 ARITH_MVN, args[0], 0, args[3], const_args[3]);
1824 break;
1825 case INDEX_op_add_i32:
1826 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1827 args[0], args[1], args[2], const_args[2]);
1828 break;
1829 case INDEX_op_sub_i32:
1830 if (const_args[1]) {
1831 if (const_args[2]) {
1832 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
1833 } else {
1834 tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
1835 args[0], args[2], args[1], 1);
1837 } else {
1838 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
1839 args[0], args[1], args[2], const_args[2]);
1841 break;
1842 case INDEX_op_and_i32:
1843 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
1844 args[0], args[1], args[2], const_args[2]);
1845 break;
1846 case INDEX_op_andc_i32:
1847 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
1848 args[0], args[1], args[2], const_args[2]);
1849 break;
1850 case INDEX_op_or_i32:
1851 c = ARITH_ORR;
1852 goto gen_arith;
1853 case INDEX_op_xor_i32:
1854 c = ARITH_EOR;
1855 /* Fall through. */
1856 gen_arith:
1857 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
1858 break;
1859 case INDEX_op_add2_i32:
1860 a0 = args[0], a1 = args[1], a2 = args[2];
1861 a3 = args[3], a4 = args[4], a5 = args[5];
1862 if (a0 == a3 || (a0 == a5 && !const_args[5])) {
1863 a0 = TCG_REG_TMP;
1865 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
1866 a0, a2, a4, const_args[4]);
1867 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
1868 a1, a3, a5, const_args[5]);
1869 tcg_out_mov_reg(s, COND_AL, args[0], a0);
1870 break;
1871 case INDEX_op_sub2_i32:
1872 a0 = args[0], a1 = args[1], a2 = args[2];
1873 a3 = args[3], a4 = args[4], a5 = args[5];
1874 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
1875 a0 = TCG_REG_TMP;
1877 if (const_args[2]) {
1878 if (const_args[4]) {
1879 tcg_out_movi32(s, COND_AL, a0, a4);
1880 a4 = a0;
1882 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
1883 } else {
1884 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
1885 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
1887 if (const_args[3]) {
1888 if (const_args[5]) {
1889 tcg_out_movi32(s, COND_AL, a1, a5);
1890 a5 = a1;
1892 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
1893 } else {
1894 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
1895 a1, a3, a5, const_args[5]);
1897 tcg_out_mov_reg(s, COND_AL, args[0], a0);
1898 break;
1899 case INDEX_op_neg_i32:
1900 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1901 break;
1902 case INDEX_op_not_i32:
1903 tcg_out_dat_reg(s, COND_AL,
1904 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1905 break;
1906 case INDEX_op_mul_i32:
1907 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1908 break;
1909 case INDEX_op_mulu2_i32:
1910 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1911 break;
1912 case INDEX_op_muls2_i32:
1913 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1914 break;
1915 /* XXX: Perhaps args[2] & 0x1f is wrong */
1916 case INDEX_op_shl_i32:
1917 c = const_args[2] ?
1918 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1919 goto gen_shift32;
1920 case INDEX_op_shr_i32:
1921 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1922 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1923 goto gen_shift32;
1924 case INDEX_op_sar_i32:
1925 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1926 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1927 goto gen_shift32;
1928 case INDEX_op_rotr_i32:
1929 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1930 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1931 /* Fall through. */
1932 gen_shift32:
1933 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1934 break;
1936 case INDEX_op_rotl_i32:
1937 if (const_args[2]) {
1938 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1939 ((0x20 - args[2]) & 0x1f) ?
1940 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1941 SHIFT_IMM_LSL(0));
1942 } else {
1943 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
1944 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1945 SHIFT_REG_ROR(TCG_REG_TMP));
1947 break;
1949 case INDEX_op_ctz_i32:
1950 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
1951 a1 = TCG_REG_TMP;
1952 goto do_clz;
1954 case INDEX_op_clz_i32:
1955 a1 = args[1];
1956 do_clz:
1957 a0 = args[0];
1958 a2 = args[2];
1959 c = const_args[2];
1960 if (c && a2 == 32) {
1961 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
1962 break;
1964 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
1965 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
1966 if (c || a0 != a2) {
1967 tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
1969 break;
1971 case INDEX_op_brcond_i32:
1972 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1973 args[0], args[1], const_args[1]);
1974 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
1975 arg_label(args[3]));
1976 break;
1977 case INDEX_op_setcond_i32:
1978 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1979 args[1], args[2], const_args[2]);
1980 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1981 ARITH_MOV, args[0], 0, 1);
1982 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1983 ARITH_MOV, args[0], 0, 0);
1984 break;
1986 case INDEX_op_brcond2_i32:
1987 c = tcg_out_cmp2(s, args, const_args);
1988 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
1989 break;
1990 case INDEX_op_setcond2_i32:
1991 c = tcg_out_cmp2(s, args + 1, const_args + 1);
1992 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
1993 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
1994 ARITH_MOV, args[0], 0, 0);
1995 break;
1997 case INDEX_op_qemu_ld_i32:
1998 tcg_out_qemu_ld(s, args, 0);
1999 break;
2000 case INDEX_op_qemu_ld_i64:
2001 tcg_out_qemu_ld(s, args, 1);
2002 break;
2003 case INDEX_op_qemu_st_i32:
2004 tcg_out_qemu_st(s, args, 0);
2005 break;
2006 case INDEX_op_qemu_st_i64:
2007 tcg_out_qemu_st(s, args, 1);
2008 break;
2010 case INDEX_op_bswap16_i32:
2011 tcg_out_bswap16(s, COND_AL, args[0], args[1]);
2012 break;
2013 case INDEX_op_bswap32_i32:
2014 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2015 break;
2017 case INDEX_op_ext8s_i32:
2018 tcg_out_ext8s(s, COND_AL, args[0], args[1]);
2019 break;
2020 case INDEX_op_ext16s_i32:
2021 tcg_out_ext16s(s, COND_AL, args[0], args[1]);
2022 break;
2023 case INDEX_op_ext16u_i32:
2024 tcg_out_ext16u(s, COND_AL, args[0], args[1]);
2025 break;
2027 case INDEX_op_deposit_i32:
2028 tcg_out_deposit(s, COND_AL, args[0], args[2],
2029 args[3], args[4], const_args[2]);
2030 break;
2031 case INDEX_op_extract_i32:
2032 tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2033 break;
2034 case INDEX_op_sextract_i32:
2035 tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2036 break;
2037 case INDEX_op_extract2_i32:
2038 /* ??? These optimization vs zero should be generic. */
2039 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
2040 if (const_args[1]) {
2041 if (const_args[2]) {
2042 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2043 } else {
2044 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2045 args[2], SHIFT_IMM_LSL(32 - args[3]));
2047 } else if (const_args[2]) {
2048 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2049 args[1], SHIFT_IMM_LSR(args[3]));
2050 } else {
2051 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */
2052 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2053 args[2], SHIFT_IMM_LSL(32 - args[3]));
2054 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2055 args[1], SHIFT_IMM_LSR(args[3]));
2057 break;
2059 case INDEX_op_div_i32:
2060 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
2061 break;
2062 case INDEX_op_divu_i32:
2063 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
2064 break;
2066 case INDEX_op_mb:
2067 tcg_out_mb(s, args[0]);
2068 break;
2070 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2071 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2072 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2073 default:
2074 tcg_abort();
2078 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
2080 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
2081 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
2082 static const TCGTargetOpDef s_s = { .args_ct_str = { "s", "s" } };
2083 static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } };
2084 static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } };
2085 static const TCGTargetOpDef r_r_l = { .args_ct_str = { "r", "r", "l" } };
2086 static const TCGTargetOpDef r_l_l = { .args_ct_str = { "r", "l", "l" } };
2087 static const TCGTargetOpDef s_s_s = { .args_ct_str = { "s", "s", "s" } };
2088 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
2089 static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } };
2090 static const TCGTargetOpDef r_r_rIN
2091 = { .args_ct_str = { "r", "r", "rIN" } };
2092 static const TCGTargetOpDef r_r_rIK
2093 = { .args_ct_str = { "r", "r", "rIK" } };
2094 static const TCGTargetOpDef r_r_r_r
2095 = { .args_ct_str = { "r", "r", "r", "r" } };
2096 static const TCGTargetOpDef r_r_l_l
2097 = { .args_ct_str = { "r", "r", "l", "l" } };
2098 static const TCGTargetOpDef s_s_s_s
2099 = { .args_ct_str = { "s", "s", "s", "s" } };
2100 static const TCGTargetOpDef br
2101 = { .args_ct_str = { "r", "rIN" } };
2102 static const TCGTargetOpDef ext2
2103 = { .args_ct_str = { "r", "rZ", "rZ" } };
2104 static const TCGTargetOpDef dep
2105 = { .args_ct_str = { "r", "0", "rZ" } };
2106 static const TCGTargetOpDef movc
2107 = { .args_ct_str = { "r", "r", "rIN", "rIK", "0" } };
2108 static const TCGTargetOpDef add2
2109 = { .args_ct_str = { "r", "r", "r", "r", "rIN", "rIK" } };
2110 static const TCGTargetOpDef sub2
2111 = { .args_ct_str = { "r", "r", "rI", "rI", "rIN", "rIK" } };
2112 static const TCGTargetOpDef br2
2113 = { .args_ct_str = { "r", "r", "rI", "rI" } };
2114 static const TCGTargetOpDef setc2
2115 = { .args_ct_str = { "r", "r", "r", "rI", "rI" } };
2117 switch (op) {
2118 case INDEX_op_goto_ptr:
2119 return &r;
2121 case INDEX_op_ld8u_i32:
2122 case INDEX_op_ld8s_i32:
2123 case INDEX_op_ld16u_i32:
2124 case INDEX_op_ld16s_i32:
2125 case INDEX_op_ld_i32:
2126 case INDEX_op_st8_i32:
2127 case INDEX_op_st16_i32:
2128 case INDEX_op_st_i32:
2129 case INDEX_op_neg_i32:
2130 case INDEX_op_not_i32:
2131 case INDEX_op_bswap16_i32:
2132 case INDEX_op_bswap32_i32:
2133 case INDEX_op_ext8s_i32:
2134 case INDEX_op_ext16s_i32:
2135 case INDEX_op_ext16u_i32:
2136 case INDEX_op_extract_i32:
2137 case INDEX_op_sextract_i32:
2138 return &r_r;
2140 case INDEX_op_add_i32:
2141 case INDEX_op_sub_i32:
2142 case INDEX_op_setcond_i32:
2143 return &r_r_rIN;
2144 case INDEX_op_and_i32:
2145 case INDEX_op_andc_i32:
2146 case INDEX_op_clz_i32:
2147 case INDEX_op_ctz_i32:
2148 return &r_r_rIK;
2149 case INDEX_op_mul_i32:
2150 case INDEX_op_div_i32:
2151 case INDEX_op_divu_i32:
2152 return &r_r_r;
2153 case INDEX_op_mulu2_i32:
2154 case INDEX_op_muls2_i32:
2155 return &r_r_r_r;
2156 case INDEX_op_or_i32:
2157 case INDEX_op_xor_i32:
2158 return &r_r_rI;
2159 case INDEX_op_shl_i32:
2160 case INDEX_op_shr_i32:
2161 case INDEX_op_sar_i32:
2162 case INDEX_op_rotl_i32:
2163 case INDEX_op_rotr_i32:
2164 return &r_r_ri;
2166 case INDEX_op_brcond_i32:
2167 return &br;
2168 case INDEX_op_deposit_i32:
2169 return &dep;
2170 case INDEX_op_extract2_i32:
2171 return &ext2;
2172 case INDEX_op_movcond_i32:
2173 return &movc;
2174 case INDEX_op_add2_i32:
2175 return &add2;
2176 case INDEX_op_sub2_i32:
2177 return &sub2;
2178 case INDEX_op_brcond2_i32:
2179 return &br2;
2180 case INDEX_op_setcond2_i32:
2181 return &setc2;
2183 case INDEX_op_qemu_ld_i32:
2184 return TARGET_LONG_BITS == 32 ? &r_l : &r_l_l;
2185 case INDEX_op_qemu_ld_i64:
2186 return TARGET_LONG_BITS == 32 ? &r_r_l : &r_r_l_l;
2187 case INDEX_op_qemu_st_i32:
2188 return TARGET_LONG_BITS == 32 ? &s_s : &s_s_s;
2189 case INDEX_op_qemu_st_i64:
2190 return TARGET_LONG_BITS == 32 ? &s_s_s : &s_s_s_s;
2192 default:
2193 return NULL;
2197 static void tcg_target_init(TCGContext *s)
2199 /* Only probe for the platform and capabilities if we havn't already
2200 determined maximum values at compile time. */
2201 #ifndef use_idiv_instructions
2203 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2204 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2206 #endif
2207 if (__ARM_ARCH < 7) {
2208 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2209 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2210 arm_arch = pl[1] - '0';
2214 tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
2216 tcg_target_call_clobber_regs = 0;
2217 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2218 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2219 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2220 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2221 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2222 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2224 s->reserved_regs = 0;
2225 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2226 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2227 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2230 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2231 TCGReg arg1, intptr_t arg2)
2233 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2236 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2237 TCGReg arg1, intptr_t arg2)
2239 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2242 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2243 TCGReg base, intptr_t ofs)
2245 return false;
2248 static inline bool tcg_out_mov(TCGContext *s, TCGType type,
2249 TCGReg ret, TCGReg arg)
2251 tcg_out_mov_reg(s, COND_AL, ret, arg);
2252 return true;
2255 static inline void tcg_out_movi(TCGContext *s, TCGType type,
2256 TCGReg ret, tcg_target_long arg)
2258 tcg_out_movi32(s, COND_AL, ret, arg);
2261 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2263 int i;
2264 for (i = 0; i < count; ++i) {
2265 p[i] = INSN_NOP;
2269 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2270 and tcg_register_jit. */
2272 #define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
2274 #define FRAME_SIZE \
2275 ((PUSH_SIZE \
2276 + TCG_STATIC_CALL_ARGS_SIZE \
2277 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2278 + TCG_TARGET_STACK_ALIGN - 1) \
2279 & -TCG_TARGET_STACK_ALIGN)
2281 #define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE)
2283 static void tcg_target_qemu_prologue(TCGContext *s)
2285 /* Calling convention requires us to save r4-r11 and lr. */
2286 /* stmdb sp!, { r4 - r11, lr } */
2287 tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
2289 /* Reserve callee argument and tcg temp space. */
2290 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
2291 TCG_REG_CALL_STACK, STACK_ADDEND, 1);
2292 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2293 CPU_TEMP_BUF_NLONGS * sizeof(long));
2295 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2297 tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
2300 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2301 * and fall through to the rest of the epilogue.
2303 s->code_gen_epilogue = s->code_ptr;
2304 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
2305 tcg_out_epilogue(s);
2308 static void tcg_out_epilogue(TCGContext *s)
2310 /* Release local stack frame. */
2311 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
2312 TCG_REG_CALL_STACK, STACK_ADDEND, 1);
2314 /* ldmia sp!, { r4 - r11, pc } */
2315 tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
2318 typedef struct {
2319 DebugFrameHeader h;
2320 uint8_t fde_def_cfa[4];
2321 uint8_t fde_reg_ofs[18];
2322 } DebugFrame;
2324 #define ELF_HOST_MACHINE EM_ARM
2326 /* We're expecting a 2 byte uleb128 encoded value. */
2327 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2329 static const DebugFrame debug_frame = {
2330 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2331 .h.cie.id = -1,
2332 .h.cie.version = 1,
2333 .h.cie.code_align = 1,
2334 .h.cie.data_align = 0x7c, /* sleb128 -4 */
2335 .h.cie.return_column = 14,
2337 /* Total FDE size does not include the "len" member. */
2338 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2340 .fde_def_cfa = {
2341 12, 13, /* DW_CFA_def_cfa sp, ... */
2342 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2343 (FRAME_SIZE >> 7)
2345 .fde_reg_ofs = {
2346 /* The following must match the stmdb in the prologue. */
2347 0x8e, 1, /* DW_CFA_offset, lr, -4 */
2348 0x8b, 2, /* DW_CFA_offset, r11, -8 */
2349 0x8a, 3, /* DW_CFA_offset, r10, -12 */
2350 0x89, 4, /* DW_CFA_offset, r9, -16 */
2351 0x88, 5, /* DW_CFA_offset, r8, -20 */
2352 0x87, 6, /* DW_CFA_offset, r7, -24 */
2353 0x86, 7, /* DW_CFA_offset, r6, -28 */
2354 0x85, 8, /* DW_CFA_offset, r5, -32 */
2355 0x84, 9, /* DW_CFA_offset, r4, -36 */
2359 void tcg_register_jit(void *buf, size_t buf_size)
2361 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));