2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "tcg-be-ldst.h"
28 int arm_arch
= __ARM_ARCH
;
30 #ifndef use_idiv_instructions
31 bool use_idiv_instructions
;
34 /* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */
36 # define USING_SOFTMMU 1
38 # define USING_SOFTMMU 0
41 #ifdef CONFIG_DEBUG_TCG
42 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
62 static const int tcg_target_reg_alloc_order
[] = {
80 static const int tcg_target_call_iarg_regs
[4] = {
81 TCG_REG_R0
, TCG_REG_R1
, TCG_REG_R2
, TCG_REG_R3
83 static const int tcg_target_call_oarg_regs
[2] = {
84 TCG_REG_R0
, TCG_REG_R1
87 #define TCG_REG_TMP TCG_REG_R12
89 static inline void reloc_pc24(tcg_insn_unit
*code_ptr
, tcg_insn_unit
*target
)
91 ptrdiff_t offset
= (tcg_ptr_byte_diff(target
, code_ptr
) - 8) >> 2;
92 *code_ptr
= (*code_ptr
& ~0xffffff) | (offset
& 0xffffff);
95 static inline void reloc_pc24_atomic(tcg_insn_unit
*code_ptr
, tcg_insn_unit
*target
)
97 ptrdiff_t offset
= (tcg_ptr_byte_diff(target
, code_ptr
) - 8) >> 2;
98 tcg_insn_unit insn
= atomic_read(code_ptr
);
99 tcg_debug_assert(offset
== sextract32(offset
, 0, 24));
100 atomic_set(code_ptr
, deposit32(insn
, 0, 24, offset
));
103 static void patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
104 intptr_t value
, intptr_t addend
)
106 tcg_debug_assert(type
== R_ARM_PC24
);
107 tcg_debug_assert(addend
== 0);
108 reloc_pc24(code_ptr
, (tcg_insn_unit
*)value
);
111 #define TCG_CT_CONST_ARM 0x100
112 #define TCG_CT_CONST_INV 0x200
113 #define TCG_CT_CONST_NEG 0x400
114 #define TCG_CT_CONST_ZERO 0x800
116 /* parse target specific constraints */
117 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
124 ct
->ct
|= TCG_CT_CONST_ARM
;
127 ct
->ct
|= TCG_CT_CONST_INV
;
129 case 'N': /* The gcc constraint letter is L, already used here. */
130 ct
->ct
|= TCG_CT_CONST_NEG
;
133 ct
->ct
|= TCG_CT_CONST_ZERO
;
137 ct
->ct
|= TCG_CT_REG
;
138 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
141 /* qemu_ld address */
143 ct
->ct
|= TCG_CT_REG
;
144 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
145 #ifdef CONFIG_SOFTMMU
146 /* r0-r2,lr will be overwritten when reading the tlb entry,
147 so don't use these. */
148 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
149 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
150 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
151 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R14
);
155 /* qemu_st address & data */
157 ct
->ct
|= TCG_CT_REG
;
158 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
159 /* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
160 and r0-r1 doing the byte swapping, so don't use these. */
161 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
162 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
163 #if defined(CONFIG_SOFTMMU)
164 /* Avoid clashes with registers being used for helper args */
165 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
166 #if TARGET_LONG_BITS == 64
167 /* Avoid clashes with registers being used for helper args */
168 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R3
);
170 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R14
);
183 static inline uint32_t rotl(uint32_t val
, int n
)
185 return (val
<< n
) | (val
>> (32 - n
));
188 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
189 right-rotated by an even amount between 0 and 30. */
190 static inline int encode_imm(uint32_t imm
)
194 /* simple case, only lower bits */
195 if ((imm
& ~0xff) == 0)
197 /* then try a simple even shift */
198 shift
= ctz32(imm
) & ~1;
199 if (((imm
>> shift
) & ~0xff) == 0)
201 /* now try harder with rotations */
202 if ((rotl(imm
, 2) & ~0xff) == 0)
204 if ((rotl(imm
, 4) & ~0xff) == 0)
206 if ((rotl(imm
, 6) & ~0xff) == 0)
208 /* imm can't be encoded */
212 static inline int check_fit_imm(uint32_t imm
)
214 return encode_imm(imm
) >= 0;
217 /* Test if a constant matches the constraint.
218 * TODO: define constraints for:
220 * ldr/str offset: between -0xfff and 0xfff
221 * ldrh/strh offset: between -0xff and 0xff
222 * mov operand2: values represented with x << (2 * y), x < 0x100
223 * add, sub, eor...: ditto
225 static inline int tcg_target_const_match(tcg_target_long val
, TCGType type
,
226 const TCGArgConstraint
*arg_ct
)
230 if (ct
& TCG_CT_CONST
) {
232 } else if ((ct
& TCG_CT_CONST_ARM
) && check_fit_imm(val
)) {
234 } else if ((ct
& TCG_CT_CONST_INV
) && check_fit_imm(~val
)) {
236 } else if ((ct
& TCG_CT_CONST_NEG
) && check_fit_imm(-val
)) {
238 } else if ((ct
& TCG_CT_CONST_ZERO
) && val
== 0) {
245 #define TO_CPSR (1 << 20)
248 ARITH_AND
= 0x0 << 21,
249 ARITH_EOR
= 0x1 << 21,
250 ARITH_SUB
= 0x2 << 21,
251 ARITH_RSB
= 0x3 << 21,
252 ARITH_ADD
= 0x4 << 21,
253 ARITH_ADC
= 0x5 << 21,
254 ARITH_SBC
= 0x6 << 21,
255 ARITH_RSC
= 0x7 << 21,
256 ARITH_TST
= 0x8 << 21 | TO_CPSR
,
257 ARITH_CMP
= 0xa << 21 | TO_CPSR
,
258 ARITH_CMN
= 0xb << 21 | TO_CPSR
,
259 ARITH_ORR
= 0xc << 21,
260 ARITH_MOV
= 0xd << 21,
261 ARITH_BIC
= 0xe << 21,
262 ARITH_MVN
= 0xf << 21,
264 INSN_LDR_IMM
= 0x04100000,
265 INSN_LDR_REG
= 0x06100000,
266 INSN_STR_IMM
= 0x04000000,
267 INSN_STR_REG
= 0x06000000,
269 INSN_LDRH_IMM
= 0x005000b0,
270 INSN_LDRH_REG
= 0x001000b0,
271 INSN_LDRSH_IMM
= 0x005000f0,
272 INSN_LDRSH_REG
= 0x001000f0,
273 INSN_STRH_IMM
= 0x004000b0,
274 INSN_STRH_REG
= 0x000000b0,
276 INSN_LDRB_IMM
= 0x04500000,
277 INSN_LDRB_REG
= 0x06500000,
278 INSN_LDRSB_IMM
= 0x005000d0,
279 INSN_LDRSB_REG
= 0x001000d0,
280 INSN_STRB_IMM
= 0x04400000,
281 INSN_STRB_REG
= 0x06400000,
283 INSN_LDRD_IMM
= 0x004000d0,
284 INSN_LDRD_REG
= 0x000000d0,
285 INSN_STRD_IMM
= 0x004000f0,
286 INSN_STRD_REG
= 0x000000f0,
288 INSN_DMB_ISH
= 0x5bf07ff5,
289 INSN_DMB_MCR
= 0xba0f07ee,
293 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
294 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
295 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
296 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
297 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
298 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
299 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
300 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
302 enum arm_cond_code_e
{
305 COND_CS
= 0x2, /* Unsigned greater or equal */
306 COND_CC
= 0x3, /* Unsigned less than */
307 COND_MI
= 0x4, /* Negative */
308 COND_PL
= 0x5, /* Zero or greater */
309 COND_VS
= 0x6, /* Overflow */
310 COND_VC
= 0x7, /* No overflow */
311 COND_HI
= 0x8, /* Unsigned greater than */
312 COND_LS
= 0x9, /* Unsigned less or equal */
320 static const uint8_t tcg_cond_to_arm_cond
[] = {
321 [TCG_COND_EQ
] = COND_EQ
,
322 [TCG_COND_NE
] = COND_NE
,
323 [TCG_COND_LT
] = COND_LT
,
324 [TCG_COND_GE
] = COND_GE
,
325 [TCG_COND_LE
] = COND_LE
,
326 [TCG_COND_GT
] = COND_GT
,
328 [TCG_COND_LTU
] = COND_CC
,
329 [TCG_COND_GEU
] = COND_CS
,
330 [TCG_COND_LEU
] = COND_LS
,
331 [TCG_COND_GTU
] = COND_HI
,
334 static inline void tcg_out_bx(TCGContext
*s
, int cond
, int rn
)
336 tcg_out32(s
, (cond
<< 28) | 0x012fff10 | rn
);
339 static inline void tcg_out_b(TCGContext
*s
, int cond
, int32_t offset
)
341 tcg_out32(s
, (cond
<< 28) | 0x0a000000 |
342 (((offset
- 8) >> 2) & 0x00ffffff));
345 static inline void tcg_out_b_noaddr(TCGContext
*s
, int cond
)
347 /* We pay attention here to not modify the branch target by masking
348 the corresponding bytes. This ensure that caches and memory are
349 kept coherent during retranslation. */
350 tcg_out32(s
, deposit32(*s
->code_ptr
, 24, 8, (cond
<< 4) | 0x0a));
353 static inline void tcg_out_bl_noaddr(TCGContext
*s
, int cond
)
355 /* We pay attention here to not modify the branch target by masking
356 the corresponding bytes. This ensure that caches and memory are
357 kept coherent during retranslation. */
358 tcg_out32(s
, deposit32(*s
->code_ptr
, 24, 8, (cond
<< 4) | 0x0b));
361 static inline void tcg_out_bl(TCGContext
*s
, int cond
, int32_t offset
)
363 tcg_out32(s
, (cond
<< 28) | 0x0b000000 |
364 (((offset
- 8) >> 2) & 0x00ffffff));
367 static inline void tcg_out_blx(TCGContext
*s
, int cond
, int rn
)
369 tcg_out32(s
, (cond
<< 28) | 0x012fff30 | rn
);
372 static inline void tcg_out_blx_imm(TCGContext
*s
, int32_t offset
)
374 tcg_out32(s
, 0xfa000000 | ((offset
& 2) << 23) |
375 (((offset
- 8) >> 2) & 0x00ffffff));
378 static inline void tcg_out_dat_reg(TCGContext
*s
,
379 int cond
, int opc
, int rd
, int rn
, int rm
, int shift
)
381 tcg_out32(s
, (cond
<< 28) | (0 << 25) | opc
|
382 (rn
<< 16) | (rd
<< 12) | shift
| rm
);
385 static inline void tcg_out_nop(TCGContext
*s
)
387 if (use_armv7_instructions
) {
388 /* Architected nop introduced in v6k. */
389 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
390 also Just So Happened to do nothing on pre-v6k so that we
391 don't need to conditionalize it? */
392 tcg_out32(s
, 0xe320f000);
394 /* Prior to that the assembler uses mov r0, r0. */
395 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, 0, 0, 0, SHIFT_IMM_LSL(0));
399 static inline void tcg_out_mov_reg(TCGContext
*s
, int cond
, int rd
, int rm
)
401 /* Simple reg-reg move, optimising out the 'do nothing' case */
403 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, rd
, 0, rm
, SHIFT_IMM_LSL(0));
407 static inline void tcg_out_dat_imm(TCGContext
*s
,
408 int cond
, int opc
, int rd
, int rn
, int im
)
410 tcg_out32(s
, (cond
<< 28) | (1 << 25) | opc
|
411 (rn
<< 16) | (rd
<< 12) | im
);
414 static void tcg_out_movi32(TCGContext
*s
, int cond
, int rd
, uint32_t arg
)
418 /* For armv7, make sure not to use movw+movt when mov/mvn would do.
419 Speed things up by only checking when movt would be required.
420 Prior to armv7, have one go at fully rotated immediates before
421 doing the decomposition thing below. */
422 if (!use_armv7_instructions
|| (arg
& 0xffff0000)) {
423 rot
= encode_imm(arg
);
425 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, rd
, 0,
426 rotl(arg
, rot
) | (rot
<< 7));
429 rot
= encode_imm(~arg
);
431 tcg_out_dat_imm(s
, cond
, ARITH_MVN
, rd
, 0,
432 rotl(~arg
, rot
) | (rot
<< 7));
437 /* Use movw + movt. */
438 if (use_armv7_instructions
) {
440 tcg_out32(s
, (cond
<< 28) | 0x03000000 | (rd
<< 12)
441 | ((arg
<< 4) & 0x000f0000) | (arg
& 0xfff));
442 if (arg
& 0xffff0000) {
444 tcg_out32(s
, (cond
<< 28) | 0x03400000 | (rd
<< 12)
445 | ((arg
>> 12) & 0x000f0000) | ((arg
>> 16) & 0xfff));
450 /* TODO: This is very suboptimal, we can easily have a constant
451 pool somewhere after all the instructions. */
454 /* If we have lots of leading 1's, we can shorten the sequence by
455 beginning with mvn and then clearing higher bits with eor. */
456 if (clz32(~arg
) > clz32(arg
)) {
457 opc
= ARITH_MVN
, arg
= ~arg
;
460 int i
= ctz32(arg
) & ~1;
461 rot
= ((32 - i
) << 7) & 0xf00;
462 tcg_out_dat_imm(s
, cond
, opc
, rd
, rn
, ((arg
>> i
) & 0xff) | rot
);
470 static inline void tcg_out_dat_rI(TCGContext
*s
, int cond
, int opc
, TCGArg dst
,
471 TCGArg lhs
, TCGArg rhs
, int rhs_is_const
)
473 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
474 * rhs must satisfy the "rI" constraint.
477 int rot
= encode_imm(rhs
);
478 tcg_debug_assert(rot
>= 0);
479 tcg_out_dat_imm(s
, cond
, opc
, dst
, lhs
, rotl(rhs
, rot
) | (rot
<< 7));
481 tcg_out_dat_reg(s
, cond
, opc
, dst
, lhs
, rhs
, SHIFT_IMM_LSL(0));
485 static void tcg_out_dat_rIK(TCGContext
*s
, int cond
, int opc
, int opinv
,
486 TCGReg dst
, TCGReg lhs
, TCGArg rhs
,
489 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
490 * rhs must satisfy the "rIK" constraint.
493 int rot
= encode_imm(rhs
);
496 rot
= encode_imm(rhs
);
497 tcg_debug_assert(rot
>= 0);
500 tcg_out_dat_imm(s
, cond
, opc
, dst
, lhs
, rotl(rhs
, rot
) | (rot
<< 7));
502 tcg_out_dat_reg(s
, cond
, opc
, dst
, lhs
, rhs
, SHIFT_IMM_LSL(0));
506 static void tcg_out_dat_rIN(TCGContext
*s
, int cond
, int opc
, int opneg
,
507 TCGArg dst
, TCGArg lhs
, TCGArg rhs
,
510 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
511 * rhs must satisfy the "rIN" constraint.
514 int rot
= encode_imm(rhs
);
517 rot
= encode_imm(rhs
);
518 tcg_debug_assert(rot
>= 0);
521 tcg_out_dat_imm(s
, cond
, opc
, dst
, lhs
, rotl(rhs
, rot
) | (rot
<< 7));
523 tcg_out_dat_reg(s
, cond
, opc
, dst
, lhs
, rhs
, SHIFT_IMM_LSL(0));
527 static inline void tcg_out_mul32(TCGContext
*s
, int cond
, TCGReg rd
,
528 TCGReg rn
, TCGReg rm
)
530 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
531 if (!use_armv6_instructions
&& rd
== rn
) {
533 /* rd == rn == rm; copy an input to tmp first. */
534 tcg_out_mov_reg(s
, cond
, TCG_REG_TMP
, rn
);
535 rm
= rn
= TCG_REG_TMP
;
542 tcg_out32(s
, (cond
<< 28) | 0x90 | (rd
<< 16) | (rm
<< 8) | rn
);
545 static inline void tcg_out_umull32(TCGContext
*s
, int cond
, TCGReg rd0
,
546 TCGReg rd1
, TCGReg rn
, TCGReg rm
)
548 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
549 if (!use_armv6_instructions
&& (rd0
== rn
|| rd1
== rn
)) {
550 if (rd0
== rm
|| rd1
== rm
) {
551 tcg_out_mov_reg(s
, cond
, TCG_REG_TMP
, rn
);
560 tcg_out32(s
, (cond
<< 28) | 0x00800090 |
561 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rn
);
564 static inline void tcg_out_smull32(TCGContext
*s
, int cond
, TCGReg rd0
,
565 TCGReg rd1
, TCGReg rn
, TCGReg rm
)
567 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
568 if (!use_armv6_instructions
&& (rd0
== rn
|| rd1
== rn
)) {
569 if (rd0
== rm
|| rd1
== rm
) {
570 tcg_out_mov_reg(s
, cond
, TCG_REG_TMP
, rn
);
579 tcg_out32(s
, (cond
<< 28) | 0x00c00090 |
580 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rn
);
583 static inline void tcg_out_sdiv(TCGContext
*s
, int cond
, int rd
, int rn
, int rm
)
585 tcg_out32(s
, 0x0710f010 | (cond
<< 28) | (rd
<< 16) | rn
| (rm
<< 8));
588 static inline void tcg_out_udiv(TCGContext
*s
, int cond
, int rd
, int rn
, int rm
)
590 tcg_out32(s
, 0x0730f010 | (cond
<< 28) | (rd
<< 16) | rn
| (rm
<< 8));
593 static inline void tcg_out_ext8s(TCGContext
*s
, int cond
,
596 if (use_armv6_instructions
) {
598 tcg_out32(s
, 0x06af0070 | (cond
<< 28) | (rd
<< 12) | rn
);
600 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
601 rd
, 0, rn
, SHIFT_IMM_LSL(24));
602 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
603 rd
, 0, rd
, SHIFT_IMM_ASR(24));
607 static inline void tcg_out_ext8u(TCGContext
*s
, int cond
,
610 tcg_out_dat_imm(s
, cond
, ARITH_AND
, rd
, rn
, 0xff);
613 static inline void tcg_out_ext16s(TCGContext
*s
, int cond
,
616 if (use_armv6_instructions
) {
618 tcg_out32(s
, 0x06bf0070 | (cond
<< 28) | (rd
<< 12) | rn
);
620 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
621 rd
, 0, rn
, SHIFT_IMM_LSL(16));
622 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
623 rd
, 0, rd
, SHIFT_IMM_ASR(16));
627 static inline void tcg_out_ext16u(TCGContext
*s
, int cond
,
630 if (use_armv6_instructions
) {
632 tcg_out32(s
, 0x06ff0070 | (cond
<< 28) | (rd
<< 12) | rn
);
634 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
635 rd
, 0, rn
, SHIFT_IMM_LSL(16));
636 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
637 rd
, 0, rd
, SHIFT_IMM_LSR(16));
641 static inline void tcg_out_bswap16s(TCGContext
*s
, int cond
, int rd
, int rn
)
643 if (use_armv6_instructions
) {
645 tcg_out32(s
, 0x06ff0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
647 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
648 TCG_REG_TMP
, 0, rn
, SHIFT_IMM_LSL(24));
649 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
650 TCG_REG_TMP
, 0, TCG_REG_TMP
, SHIFT_IMM_ASR(16));
651 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
652 rd
, TCG_REG_TMP
, rn
, SHIFT_IMM_LSR(8));
656 static inline void tcg_out_bswap16(TCGContext
*s
, int cond
, int rd
, int rn
)
658 if (use_armv6_instructions
) {
660 tcg_out32(s
, 0x06bf0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
662 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
663 TCG_REG_TMP
, 0, rn
, SHIFT_IMM_LSL(24));
664 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
665 TCG_REG_TMP
, 0, TCG_REG_TMP
, SHIFT_IMM_LSR(16));
666 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
667 rd
, TCG_REG_TMP
, rn
, SHIFT_IMM_LSR(8));
671 /* swap the two low bytes assuming that the two high input bytes and the
672 two high output bit can hold any value. */
673 static inline void tcg_out_bswap16st(TCGContext
*s
, int cond
, int rd
, int rn
)
675 if (use_armv6_instructions
) {
677 tcg_out32(s
, 0x06bf0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
679 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
680 TCG_REG_TMP
, 0, rn
, SHIFT_IMM_LSR(8));
681 tcg_out_dat_imm(s
, cond
, ARITH_AND
, TCG_REG_TMP
, TCG_REG_TMP
, 0xff);
682 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
683 rd
, TCG_REG_TMP
, rn
, SHIFT_IMM_LSL(8));
687 static inline void tcg_out_bswap32(TCGContext
*s
, int cond
, int rd
, int rn
)
689 if (use_armv6_instructions
) {
691 tcg_out32(s
, 0x06bf0f30 | (cond
<< 28) | (rd
<< 12) | rn
);
693 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
694 TCG_REG_TMP
, rn
, rn
, SHIFT_IMM_ROR(16));
695 tcg_out_dat_imm(s
, cond
, ARITH_BIC
,
696 TCG_REG_TMP
, TCG_REG_TMP
, 0xff | 0x800);
697 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
698 rd
, 0, rn
, SHIFT_IMM_ROR(8));
699 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
700 rd
, rd
, TCG_REG_TMP
, SHIFT_IMM_LSR(8));
704 static inline void tcg_out_deposit(TCGContext
*s
, int cond
, TCGReg rd
,
705 TCGArg a1
, int ofs
, int len
, bool const_a1
)
708 /* bfi becomes bfc with rn == 15. */
712 tcg_out32(s
, 0x07c00010 | (cond
<< 28) | (rd
<< 12) | a1
713 | (ofs
<< 7) | ((ofs
+ len
- 1) << 16));
716 static inline void tcg_out_extract(TCGContext
*s
, int cond
, TCGReg rd
,
717 TCGArg a1
, int ofs
, int len
)
720 tcg_out32(s
, 0x07e00050 | (cond
<< 28) | (rd
<< 12) | a1
721 | (ofs
<< 7) | ((len
- 1) << 16));
724 static inline void tcg_out_sextract(TCGContext
*s
, int cond
, TCGReg rd
,
725 TCGArg a1
, int ofs
, int len
)
728 tcg_out32(s
, 0x07a00050 | (cond
<< 28) | (rd
<< 12) | a1
729 | (ofs
<< 7) | ((len
- 1) << 16));
732 /* Note that this routine is used for both LDR and LDRH formats, so we do
733 not wish to include an immediate shift at this point. */
734 static void tcg_out_memop_r(TCGContext
*s
, int cond
, ARMInsn opc
, TCGReg rt
,
735 TCGReg rn
, TCGReg rm
, bool u
, bool p
, bool w
)
737 tcg_out32(s
, (cond
<< 28) | opc
| (u
<< 23) | (p
<< 24)
738 | (w
<< 21) | (rn
<< 16) | (rt
<< 12) | rm
);
741 static void tcg_out_memop_8(TCGContext
*s
, int cond
, ARMInsn opc
, TCGReg rt
,
742 TCGReg rn
, int imm8
, bool p
, bool w
)
749 tcg_out32(s
, (cond
<< 28) | opc
| (u
<< 23) | (p
<< 24) | (w
<< 21) |
750 (rn
<< 16) | (rt
<< 12) | ((imm8
& 0xf0) << 4) | (imm8
& 0xf));
753 static void tcg_out_memop_12(TCGContext
*s
, int cond
, ARMInsn opc
, TCGReg rt
,
754 TCGReg rn
, int imm12
, bool p
, bool w
)
761 tcg_out32(s
, (cond
<< 28) | opc
| (u
<< 23) | (p
<< 24) | (w
<< 21) |
762 (rn
<< 16) | (rt
<< 12) | imm12
);
765 static inline void tcg_out_ld32_12(TCGContext
*s
, int cond
, TCGReg rt
,
766 TCGReg rn
, int imm12
)
768 tcg_out_memop_12(s
, cond
, INSN_LDR_IMM
, rt
, rn
, imm12
, 1, 0);
771 static inline void tcg_out_st32_12(TCGContext
*s
, int cond
, TCGReg rt
,
772 TCGReg rn
, int imm12
)
774 tcg_out_memop_12(s
, cond
, INSN_STR_IMM
, rt
, rn
, imm12
, 1, 0);
777 static inline void tcg_out_ld32_r(TCGContext
*s
, int cond
, TCGReg rt
,
778 TCGReg rn
, TCGReg rm
)
780 tcg_out_memop_r(s
, cond
, INSN_LDR_REG
, rt
, rn
, rm
, 1, 1, 0);
783 static inline void tcg_out_st32_r(TCGContext
*s
, int cond
, TCGReg rt
,
784 TCGReg rn
, TCGReg rm
)
786 tcg_out_memop_r(s
, cond
, INSN_STR_REG
, rt
, rn
, rm
, 1, 1, 0);
789 static inline void tcg_out_ldrd_8(TCGContext
*s
, int cond
, TCGReg rt
,
792 tcg_out_memop_8(s
, cond
, INSN_LDRD_IMM
, rt
, rn
, imm8
, 1, 0);
795 static inline void tcg_out_ldrd_r(TCGContext
*s
, int cond
, TCGReg rt
,
796 TCGReg rn
, TCGReg rm
)
798 tcg_out_memop_r(s
, cond
, INSN_LDRD_REG
, rt
, rn
, rm
, 1, 1, 0);
801 static inline void tcg_out_strd_8(TCGContext
*s
, int cond
, TCGReg rt
,
804 tcg_out_memop_8(s
, cond
, INSN_STRD_IMM
, rt
, rn
, imm8
, 1, 0);
807 static inline void tcg_out_strd_r(TCGContext
*s
, int cond
, TCGReg rt
,
808 TCGReg rn
, TCGReg rm
)
810 tcg_out_memop_r(s
, cond
, INSN_STRD_REG
, rt
, rn
, rm
, 1, 1, 0);
813 /* Register pre-increment with base writeback. */
814 static inline void tcg_out_ld32_rwb(TCGContext
*s
, int cond
, TCGReg rt
,
815 TCGReg rn
, TCGReg rm
)
817 tcg_out_memop_r(s
, cond
, INSN_LDR_REG
, rt
, rn
, rm
, 1, 1, 1);
820 static inline void tcg_out_st32_rwb(TCGContext
*s
, int cond
, TCGReg rt
,
821 TCGReg rn
, TCGReg rm
)
823 tcg_out_memop_r(s
, cond
, INSN_STR_REG
, rt
, rn
, rm
, 1, 1, 1);
826 static inline void tcg_out_ld16u_8(TCGContext
*s
, int cond
, TCGReg rt
,
829 tcg_out_memop_8(s
, cond
, INSN_LDRH_IMM
, rt
, rn
, imm8
, 1, 0);
832 static inline void tcg_out_st16_8(TCGContext
*s
, int cond
, TCGReg rt
,
835 tcg_out_memop_8(s
, cond
, INSN_STRH_IMM
, rt
, rn
, imm8
, 1, 0);
838 static inline void tcg_out_ld16u_r(TCGContext
*s
, int cond
, TCGReg rt
,
839 TCGReg rn
, TCGReg rm
)
841 tcg_out_memop_r(s
, cond
, INSN_LDRH_REG
, rt
, rn
, rm
, 1, 1, 0);
844 static inline void tcg_out_st16_r(TCGContext
*s
, int cond
, TCGReg rt
,
845 TCGReg rn
, TCGReg rm
)
847 tcg_out_memop_r(s
, cond
, INSN_STRH_REG
, rt
, rn
, rm
, 1, 1, 0);
850 static inline void tcg_out_ld16s_8(TCGContext
*s
, int cond
, TCGReg rt
,
853 tcg_out_memop_8(s
, cond
, INSN_LDRSH_IMM
, rt
, rn
, imm8
, 1, 0);
856 static inline void tcg_out_ld16s_r(TCGContext
*s
, int cond
, TCGReg rt
,
857 TCGReg rn
, TCGReg rm
)
859 tcg_out_memop_r(s
, cond
, INSN_LDRSH_REG
, rt
, rn
, rm
, 1, 1, 0);
862 static inline void tcg_out_ld8_12(TCGContext
*s
, int cond
, TCGReg rt
,
863 TCGReg rn
, int imm12
)
865 tcg_out_memop_12(s
, cond
, INSN_LDRB_IMM
, rt
, rn
, imm12
, 1, 0);
868 static inline void tcg_out_st8_12(TCGContext
*s
, int cond
, TCGReg rt
,
869 TCGReg rn
, int imm12
)
871 tcg_out_memop_12(s
, cond
, INSN_STRB_IMM
, rt
, rn
, imm12
, 1, 0);
874 static inline void tcg_out_ld8_r(TCGContext
*s
, int cond
, TCGReg rt
,
875 TCGReg rn
, TCGReg rm
)
877 tcg_out_memop_r(s
, cond
, INSN_LDRB_REG
, rt
, rn
, rm
, 1, 1, 0);
880 static inline void tcg_out_st8_r(TCGContext
*s
, int cond
, TCGReg rt
,
881 TCGReg rn
, TCGReg rm
)
883 tcg_out_memop_r(s
, cond
, INSN_STRB_REG
, rt
, rn
, rm
, 1, 1, 0);
886 static inline void tcg_out_ld8s_8(TCGContext
*s
, int cond
, TCGReg rt
,
889 tcg_out_memop_8(s
, cond
, INSN_LDRSB_IMM
, rt
, rn
, imm8
, 1, 0);
892 static inline void tcg_out_ld8s_r(TCGContext
*s
, int cond
, TCGReg rt
,
893 TCGReg rn
, TCGReg rm
)
895 tcg_out_memop_r(s
, cond
, INSN_LDRSB_REG
, rt
, rn
, rm
, 1, 1, 0);
898 static inline void tcg_out_ld32u(TCGContext
*s
, int cond
,
899 int rd
, int rn
, int32_t offset
)
901 if (offset
> 0xfff || offset
< -0xfff) {
902 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
903 tcg_out_ld32_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
905 tcg_out_ld32_12(s
, cond
, rd
, rn
, offset
);
908 static inline void tcg_out_st32(TCGContext
*s
, int cond
,
909 int rd
, int rn
, int32_t offset
)
911 if (offset
> 0xfff || offset
< -0xfff) {
912 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
913 tcg_out_st32_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
915 tcg_out_st32_12(s
, cond
, rd
, rn
, offset
);
918 static inline void tcg_out_ld16u(TCGContext
*s
, int cond
,
919 int rd
, int rn
, int32_t offset
)
921 if (offset
> 0xff || offset
< -0xff) {
922 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
923 tcg_out_ld16u_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
925 tcg_out_ld16u_8(s
, cond
, rd
, rn
, offset
);
928 static inline void tcg_out_ld16s(TCGContext
*s
, int cond
,
929 int rd
, int rn
, int32_t offset
)
931 if (offset
> 0xff || offset
< -0xff) {
932 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
933 tcg_out_ld16s_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
935 tcg_out_ld16s_8(s
, cond
, rd
, rn
, offset
);
938 static inline void tcg_out_st16(TCGContext
*s
, int cond
,
939 int rd
, int rn
, int32_t offset
)
941 if (offset
> 0xff || offset
< -0xff) {
942 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
943 tcg_out_st16_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
945 tcg_out_st16_8(s
, cond
, rd
, rn
, offset
);
948 static inline void tcg_out_ld8u(TCGContext
*s
, int cond
,
949 int rd
, int rn
, int32_t offset
)
951 if (offset
> 0xfff || offset
< -0xfff) {
952 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
953 tcg_out_ld8_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
955 tcg_out_ld8_12(s
, cond
, rd
, rn
, offset
);
958 static inline void tcg_out_ld8s(TCGContext
*s
, int cond
,
959 int rd
, int rn
, int32_t offset
)
961 if (offset
> 0xff || offset
< -0xff) {
962 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
963 tcg_out_ld8s_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
965 tcg_out_ld8s_8(s
, cond
, rd
, rn
, offset
);
968 static inline void tcg_out_st8(TCGContext
*s
, int cond
,
969 int rd
, int rn
, int32_t offset
)
971 if (offset
> 0xfff || offset
< -0xfff) {
972 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, offset
);
973 tcg_out_st8_r(s
, cond
, rd
, rn
, TCG_REG_TMP
);
975 tcg_out_st8_12(s
, cond
, rd
, rn
, offset
);
978 /* The _goto case is normally between TBs within the same code buffer, and
979 * with the code buffer limited to 16MB we wouldn't need the long case.
980 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
982 static inline void tcg_out_goto(TCGContext
*s
, int cond
, tcg_insn_unit
*addr
)
984 intptr_t addri
= (intptr_t)addr
;
985 ptrdiff_t disp
= tcg_pcrel_diff(s
, addr
);
987 if ((addri
& 1) == 0 && disp
- 8 < 0x01fffffd && disp
- 8 > -0x01fffffd) {
988 tcg_out_b(s
, cond
, disp
);
992 tcg_out_movi32(s
, cond
, TCG_REG_TMP
, addri
);
993 if (use_armv5t_instructions
) {
994 tcg_out_bx(s
, cond
, TCG_REG_TMP
);
999 tcg_out_mov_reg(s
, cond
, TCG_REG_PC
, TCG_REG_TMP
);
1003 /* The call case is mostly used for helpers - so it's not unreasonable
1004 * for them to be beyond branch range */
1005 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*addr
)
1007 intptr_t addri
= (intptr_t)addr
;
1008 ptrdiff_t disp
= tcg_pcrel_diff(s
, addr
);
1010 if (disp
- 8 < 0x02000000 && disp
- 8 >= -0x02000000) {
1012 /* Use BLX if the target is in Thumb mode */
1013 if (!use_armv5t_instructions
) {
1016 tcg_out_blx_imm(s
, disp
);
1018 tcg_out_bl(s
, COND_AL
, disp
);
1020 } else if (use_armv7_instructions
) {
1021 tcg_out_movi32(s
, COND_AL
, TCG_REG_TMP
, addri
);
1022 tcg_out_blx(s
, COND_AL
, TCG_REG_TMP
);
1024 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R14
, TCG_REG_PC
, 4);
1025 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
1026 tcg_out32(s
, addri
);
1030 void arm_tb_set_jmp_target(uintptr_t jmp_addr
, uintptr_t addr
)
1032 tcg_insn_unit
*code_ptr
= (tcg_insn_unit
*)jmp_addr
;
1033 tcg_insn_unit
*target
= (tcg_insn_unit
*)addr
;
1035 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
1036 reloc_pc24_atomic(code_ptr
, target
);
1037 flush_icache_range(jmp_addr
, jmp_addr
+ 4);
1040 static inline void tcg_out_goto_label(TCGContext
*s
, int cond
, TCGLabel
*l
)
1043 tcg_out_goto(s
, cond
, l
->u
.value_ptr
);
1045 tcg_out_reloc(s
, s
->code_ptr
, R_ARM_PC24
, l
, 0);
1046 tcg_out_b_noaddr(s
, cond
);
1050 static inline void tcg_out_mb(TCGContext
*s
, TCGArg a0
)
1052 if (use_armv7_instructions
) {
1053 tcg_out32(s
, INSN_DMB_ISH
);
1054 } else if (use_armv6_instructions
) {
1055 tcg_out32(s
, INSN_DMB_MCR
);
1059 #ifdef CONFIG_SOFTMMU
1060 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1061 * int mmu_idx, uintptr_t ra)
1063 static void * const qemu_ld_helpers
[16] = {
1064 [MO_UB
] = helper_ret_ldub_mmu
,
1065 [MO_SB
] = helper_ret_ldsb_mmu
,
1067 [MO_LEUW
] = helper_le_lduw_mmu
,
1068 [MO_LEUL
] = helper_le_ldul_mmu
,
1069 [MO_LEQ
] = helper_le_ldq_mmu
,
1070 [MO_LESW
] = helper_le_ldsw_mmu
,
1071 [MO_LESL
] = helper_le_ldul_mmu
,
1073 [MO_BEUW
] = helper_be_lduw_mmu
,
1074 [MO_BEUL
] = helper_be_ldul_mmu
,
1075 [MO_BEQ
] = helper_be_ldq_mmu
,
1076 [MO_BESW
] = helper_be_ldsw_mmu
,
1077 [MO_BESL
] = helper_be_ldul_mmu
,
1080 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1081 * uintxx_t val, int mmu_idx, uintptr_t ra)
1083 static void * const qemu_st_helpers
[16] = {
1084 [MO_UB
] = helper_ret_stb_mmu
,
1085 [MO_LEUW
] = helper_le_stw_mmu
,
1086 [MO_LEUL
] = helper_le_stl_mmu
,
1087 [MO_LEQ
] = helper_le_stq_mmu
,
1088 [MO_BEUW
] = helper_be_stw_mmu
,
1089 [MO_BEUL
] = helper_be_stl_mmu
,
1090 [MO_BEQ
] = helper_be_stq_mmu
,
1093 /* Helper routines for marshalling helper function arguments into
1094 * the correct registers and stack.
1095 * argreg is where we want to put this argument, arg is the argument itself.
1096 * Return value is the updated argreg ready for the next call.
1097 * Note that argreg 0..3 is real registers, 4+ on stack.
1099 * We provide routines for arguments which are: immediate, 32 bit
1100 * value in register, 16 and 8 bit values in register (which must be zero
1101 * extended before use) and 64 bit value in a lo:hi register pair.
1103 #define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1104 static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1107 MOV_ARG(s, COND_AL, argreg, arg); \
1109 int ofs = (argreg - 4) * 4; \
1111 tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1112 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1114 return argreg + 1; \
1117 DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32
, uint32_t, tcg_out_movi32
,
1118 (tcg_out_movi32(s
, COND_AL
, TCG_REG_TMP
, arg
), arg
= TCG_REG_TMP
))
1119 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8
, TCGReg
, tcg_out_ext8u
,
1120 (tcg_out_ext8u(s
, COND_AL
, TCG_REG_TMP
, arg
), arg
= TCG_REG_TMP
))
1121 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16
, TCGReg
, tcg_out_ext16u
,
1122 (tcg_out_ext16u(s
, COND_AL
, TCG_REG_TMP
, arg
), arg
= TCG_REG_TMP
))
1123 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32
, TCGReg
, tcg_out_mov_reg
, )
1125 static TCGReg
tcg_out_arg_reg64(TCGContext
*s
, TCGReg argreg
,
1126 TCGReg arglo
, TCGReg arghi
)
1128 /* 64 bit arguments must go in even/odd register pairs
1129 * and in 8-aligned stack slots.
1134 if (use_armv6_instructions
&& argreg
>= 4
1135 && (arglo
& 1) == 0 && arghi
== arglo
+ 1) {
1136 tcg_out_strd_8(s
, COND_AL
, arglo
,
1137 TCG_REG_CALL_STACK
, (argreg
- 4) * 4);
1140 argreg
= tcg_out_arg_reg32(s
, argreg
, arglo
);
1141 argreg
= tcg_out_arg_reg32(s
, argreg
, arghi
);
1146 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1148 /* We're expecting to use an 8-bit immediate and to mask. */
1149 QEMU_BUILD_BUG_ON(CPU_TLB_BITS
> 8);
1151 /* We're expecting to use an 8-bit immediate add + 8-bit ldrd offset.
1152 Using the offset of the second entry in the last tlb table ensures
1153 that we can index all of the elements of the first entry. */
1154 QEMU_BUILD_BUG_ON(offsetof(CPUArchState
, tlb_table
[NB_MMU_MODES
- 1][1])
1157 /* Load and compare a TLB entry, leaving the flags set. Returns the register
1158 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */
1160 static TCGReg
tcg_out_tlb_read(TCGContext
*s
, TCGReg addrlo
, TCGReg addrhi
,
1161 TCGMemOp opc
, int mem_index
, bool is_load
)
1163 TCGReg base
= TCG_AREG0
;
1166 ? offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
)
1167 : offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
));
1168 int add_off
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
1169 unsigned s_bits
= opc
& MO_SIZE
;
1170 unsigned a_bits
= get_alignment_bits(opc
);
1172 /* Should generate something like the following:
1173 * shr tmp, addrlo, #TARGET_PAGE_BITS (1)
1174 * add r2, env, #high
1175 * and r0, tmp, #(CPU_TLB_SIZE - 1) (2)
1176 * add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS (3)
1177 * ldr r0, [r2, #cmp] (4)
1178 * tst addrlo, #s_mask
1179 * ldr r2, [r2, #add] (5)
1180 * cmpeq r0, tmp, lsl #TARGET_PAGE_BITS
1182 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, TCG_REG_TMP
,
1183 0, addrlo
, SHIFT_IMM_LSR(TARGET_PAGE_BITS
));
1185 /* We checked that the offset is contained within 16 bits above. */
1186 if (add_off
> 0xfff || (use_armv6_instructions
&& cmp_off
> 0xff)) {
1187 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R2
, base
,
1188 (24 << 7) | (cmp_off
>> 8));
1190 add_off
-= cmp_off
& 0xff00;
1194 tcg_out_dat_imm(s
, COND_AL
, ARITH_AND
,
1195 TCG_REG_R0
, TCG_REG_TMP
, CPU_TLB_SIZE
- 1);
1196 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
, TCG_REG_R2
, base
,
1197 TCG_REG_R0
, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS
));
1199 /* Load the tlb comparator. Use ldrd if needed and available,
1200 but due to how the pointer needs setting up, ldm isn't useful.
1201 Base arm5 doesn't have ldrd, but armv5te does. */
1202 if (use_armv6_instructions
&& TARGET_LONG_BITS
== 64) {
1203 tcg_out_ldrd_8(s
, COND_AL
, TCG_REG_R0
, TCG_REG_R2
, cmp_off
);
1205 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R0
, TCG_REG_R2
, cmp_off
);
1206 if (TARGET_LONG_BITS
== 64) {
1207 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R1
, TCG_REG_R2
, cmp_off
+ 4);
1211 /* Check alignment. We don't support inline unaligned acceses,
1212 but we can easily support overalignment checks. */
1213 if (a_bits
< s_bits
) {
1217 tcg_out_dat_imm(s
, COND_AL
, ARITH_TST
, 0, addrlo
, (1 << a_bits
) - 1);
1220 /* Load the tlb addend. */
1221 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R2
, TCG_REG_R2
, add_off
);
1223 tcg_out_dat_reg(s
, (s_bits
? COND_EQ
: COND_AL
), ARITH_CMP
, 0,
1224 TCG_REG_R0
, TCG_REG_TMP
, SHIFT_IMM_LSL(TARGET_PAGE_BITS
));
1226 if (TARGET_LONG_BITS
== 64) {
1227 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1228 TCG_REG_R1
, addrhi
, SHIFT_IMM_LSL(0));
1234 /* Record the context of a call to the out of line helper code for the slow
1235 path for a load or store, so that we can later generate the correct
1237 static void add_qemu_ldst_label(TCGContext
*s
, bool is_ld
, TCGMemOpIdx oi
,
1238 TCGReg datalo
, TCGReg datahi
, TCGReg addrlo
,
1239 TCGReg addrhi
, tcg_insn_unit
*raddr
,
1240 tcg_insn_unit
*label_ptr
)
1242 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1244 label
->is_ld
= is_ld
;
1246 label
->datalo_reg
= datalo
;
1247 label
->datahi_reg
= datahi
;
1248 label
->addrlo_reg
= addrlo
;
1249 label
->addrhi_reg
= addrhi
;
1250 label
->raddr
= raddr
;
1251 label
->label_ptr
[0] = label_ptr
;
1254 static void tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1256 TCGReg argreg
, datalo
, datahi
;
1257 TCGMemOpIdx oi
= lb
->oi
;
1258 TCGMemOp opc
= get_memop(oi
);
1261 reloc_pc24(lb
->label_ptr
[0], s
->code_ptr
);
1263 argreg
= tcg_out_arg_reg32(s
, TCG_REG_R0
, TCG_AREG0
);
1264 if (TARGET_LONG_BITS
== 64) {
1265 argreg
= tcg_out_arg_reg64(s
, argreg
, lb
->addrlo_reg
, lb
->addrhi_reg
);
1267 argreg
= tcg_out_arg_reg32(s
, argreg
, lb
->addrlo_reg
);
1269 argreg
= tcg_out_arg_imm32(s
, argreg
, oi
);
1270 argreg
= tcg_out_arg_reg32(s
, argreg
, TCG_REG_R14
);
1272 /* For armv6 we can use the canonical unsigned helpers and minimize
1273 icache usage. For pre-armv6, use the signed helpers since we do
1274 not have a single insn sign-extend. */
1275 if (use_armv6_instructions
) {
1276 func
= qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)];
1278 func
= qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SSIZE
)];
1279 if (opc
& MO_SIGN
) {
1283 tcg_out_call(s
, func
);
1285 datalo
= lb
->datalo_reg
;
1286 datahi
= lb
->datahi_reg
;
1287 switch (opc
& MO_SSIZE
) {
1289 tcg_out_ext8s(s
, COND_AL
, datalo
, TCG_REG_R0
);
1292 tcg_out_ext16s(s
, COND_AL
, datalo
, TCG_REG_R0
);
1295 tcg_out_mov_reg(s
, COND_AL
, datalo
, TCG_REG_R0
);
1298 if (datalo
!= TCG_REG_R1
) {
1299 tcg_out_mov_reg(s
, COND_AL
, datalo
, TCG_REG_R0
);
1300 tcg_out_mov_reg(s
, COND_AL
, datahi
, TCG_REG_R1
);
1301 } else if (datahi
!= TCG_REG_R0
) {
1302 tcg_out_mov_reg(s
, COND_AL
, datahi
, TCG_REG_R1
);
1303 tcg_out_mov_reg(s
, COND_AL
, datalo
, TCG_REG_R0
);
1305 tcg_out_mov_reg(s
, COND_AL
, TCG_REG_TMP
, TCG_REG_R0
);
1306 tcg_out_mov_reg(s
, COND_AL
, datahi
, TCG_REG_R1
);
1307 tcg_out_mov_reg(s
, COND_AL
, datalo
, TCG_REG_TMP
);
1312 tcg_out_goto(s
, COND_AL
, lb
->raddr
);
1315 static void tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1317 TCGReg argreg
, datalo
, datahi
;
1318 TCGMemOpIdx oi
= lb
->oi
;
1319 TCGMemOp opc
= get_memop(oi
);
1321 reloc_pc24(lb
->label_ptr
[0], s
->code_ptr
);
1323 argreg
= TCG_REG_R0
;
1324 argreg
= tcg_out_arg_reg32(s
, argreg
, TCG_AREG0
);
1325 if (TARGET_LONG_BITS
== 64) {
1326 argreg
= tcg_out_arg_reg64(s
, argreg
, lb
->addrlo_reg
, lb
->addrhi_reg
);
1328 argreg
= tcg_out_arg_reg32(s
, argreg
, lb
->addrlo_reg
);
1331 datalo
= lb
->datalo_reg
;
1332 datahi
= lb
->datahi_reg
;
1333 switch (opc
& MO_SIZE
) {
1335 argreg
= tcg_out_arg_reg8(s
, argreg
, datalo
);
1338 argreg
= tcg_out_arg_reg16(s
, argreg
, datalo
);
1342 argreg
= tcg_out_arg_reg32(s
, argreg
, datalo
);
1345 argreg
= tcg_out_arg_reg64(s
, argreg
, datalo
, datahi
);
1349 argreg
= tcg_out_arg_imm32(s
, argreg
, oi
);
1350 argreg
= tcg_out_arg_reg32(s
, argreg
, TCG_REG_R14
);
1352 /* Tail-call to the helper, which will return to the fast path. */
1353 tcg_out_goto(s
, COND_AL
, qemu_st_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
1355 #endif /* SOFTMMU */
1357 static inline void tcg_out_qemu_ld_index(TCGContext
*s
, TCGMemOp opc
,
1358 TCGReg datalo
, TCGReg datahi
,
1359 TCGReg addrlo
, TCGReg addend
)
1361 TCGMemOp bswap
= opc
& MO_BSWAP
;
1363 switch (opc
& MO_SSIZE
) {
1365 tcg_out_ld8_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1368 tcg_out_ld8s_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1371 tcg_out_ld16u_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1373 tcg_out_bswap16(s
, COND_AL
, datalo
, datalo
);
1378 tcg_out_ld16u_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1379 tcg_out_bswap16s(s
, COND_AL
, datalo
, datalo
);
1381 tcg_out_ld16s_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1386 tcg_out_ld32_r(s
, COND_AL
, datalo
, addrlo
, addend
);
1388 tcg_out_bswap32(s
, COND_AL
, datalo
, datalo
);
1393 TCGReg dl
= (bswap
? datahi
: datalo
);
1394 TCGReg dh
= (bswap
? datalo
: datahi
);
1396 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1397 if (USING_SOFTMMU
&& use_armv6_instructions
1398 && (dl
& 1) == 0 && dh
== dl
+ 1) {
1399 tcg_out_ldrd_r(s
, COND_AL
, dl
, addrlo
, addend
);
1400 } else if (dl
!= addend
) {
1401 tcg_out_ld32_rwb(s
, COND_AL
, dl
, addend
, addrlo
);
1402 tcg_out_ld32_12(s
, COND_AL
, dh
, addend
, 4);
1404 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
, TCG_REG_TMP
,
1405 addend
, addrlo
, SHIFT_IMM_LSL(0));
1406 tcg_out_ld32_12(s
, COND_AL
, dl
, TCG_REG_TMP
, 0);
1407 tcg_out_ld32_12(s
, COND_AL
, dh
, TCG_REG_TMP
, 4);
1410 tcg_out_bswap32(s
, COND_AL
, dl
, dl
);
1411 tcg_out_bswap32(s
, COND_AL
, dh
, dh
);
1418 static inline void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGMemOp opc
,
1419 TCGReg datalo
, TCGReg datahi
,
1422 TCGMemOp bswap
= opc
& MO_BSWAP
;
1424 switch (opc
& MO_SSIZE
) {
1426 tcg_out_ld8_12(s
, COND_AL
, datalo
, addrlo
, 0);
1429 tcg_out_ld8s_8(s
, COND_AL
, datalo
, addrlo
, 0);
1432 tcg_out_ld16u_8(s
, COND_AL
, datalo
, addrlo
, 0);
1434 tcg_out_bswap16(s
, COND_AL
, datalo
, datalo
);
1439 tcg_out_ld16u_8(s
, COND_AL
, datalo
, addrlo
, 0);
1440 tcg_out_bswap16s(s
, COND_AL
, datalo
, datalo
);
1442 tcg_out_ld16s_8(s
, COND_AL
, datalo
, addrlo
, 0);
1447 tcg_out_ld32_12(s
, COND_AL
, datalo
, addrlo
, 0);
1449 tcg_out_bswap32(s
, COND_AL
, datalo
, datalo
);
1454 TCGReg dl
= (bswap
? datahi
: datalo
);
1455 TCGReg dh
= (bswap
? datalo
: datahi
);
1457 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1458 if (USING_SOFTMMU
&& use_armv6_instructions
1459 && (dl
& 1) == 0 && dh
== dl
+ 1) {
1460 tcg_out_ldrd_8(s
, COND_AL
, dl
, addrlo
, 0);
1461 } else if (dl
== addrlo
) {
1462 tcg_out_ld32_12(s
, COND_AL
, dh
, addrlo
, bswap
? 0 : 4);
1463 tcg_out_ld32_12(s
, COND_AL
, dl
, addrlo
, bswap
? 4 : 0);
1465 tcg_out_ld32_12(s
, COND_AL
, dl
, addrlo
, bswap
? 4 : 0);
1466 tcg_out_ld32_12(s
, COND_AL
, dh
, addrlo
, bswap
? 0 : 4);
1469 tcg_out_bswap32(s
, COND_AL
, dl
, dl
);
1470 tcg_out_bswap32(s
, COND_AL
, dh
, dh
);
1477 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is64
)
1479 TCGReg addrlo
, datalo
, datahi
, addrhi
__attribute__((unused
));
1482 #ifdef CONFIG_SOFTMMU
1485 tcg_insn_unit
*label_ptr
;
1489 datahi
= (is64
? *args
++ : 0);
1491 addrhi
= (TARGET_LONG_BITS
== 64 ? *args
++ : 0);
1493 opc
= get_memop(oi
);
1495 #ifdef CONFIG_SOFTMMU
1496 mem_index
= get_mmuidx(oi
);
1497 addend
= tcg_out_tlb_read(s
, addrlo
, addrhi
, opc
, mem_index
, 1);
1499 /* This a conditional BL only to load a pointer within this opcode into LR
1500 for the slow path. We will not be using the value for a tail call. */
1501 label_ptr
= s
->code_ptr
;
1502 tcg_out_bl_noaddr(s
, COND_NE
);
1504 tcg_out_qemu_ld_index(s
, opc
, datalo
, datahi
, addrlo
, addend
);
1506 add_qemu_ldst_label(s
, true, oi
, datalo
, datahi
, addrlo
, addrhi
,
1507 s
->code_ptr
, label_ptr
);
1508 #else /* !CONFIG_SOFTMMU */
1510 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP
, guest_base
);
1511 tcg_out_qemu_ld_index(s
, opc
, datalo
, datahi
, addrlo
, TCG_REG_TMP
);
1513 tcg_out_qemu_ld_direct(s
, opc
, datalo
, datahi
, addrlo
);
1518 static inline void tcg_out_qemu_st_index(TCGContext
*s
, int cond
, TCGMemOp opc
,
1519 TCGReg datalo
, TCGReg datahi
,
1520 TCGReg addrlo
, TCGReg addend
)
1522 TCGMemOp bswap
= opc
& MO_BSWAP
;
1524 switch (opc
& MO_SIZE
) {
1526 tcg_out_st8_r(s
, cond
, datalo
, addrlo
, addend
);
1530 tcg_out_bswap16st(s
, cond
, TCG_REG_R0
, datalo
);
1531 tcg_out_st16_r(s
, cond
, TCG_REG_R0
, addrlo
, addend
);
1533 tcg_out_st16_r(s
, cond
, datalo
, addrlo
, addend
);
1539 tcg_out_bswap32(s
, cond
, TCG_REG_R0
, datalo
);
1540 tcg_out_st32_r(s
, cond
, TCG_REG_R0
, addrlo
, addend
);
1542 tcg_out_st32_r(s
, cond
, datalo
, addrlo
, addend
);
1546 /* Avoid strd for user-only emulation, to handle unaligned. */
1548 tcg_out_bswap32(s
, cond
, TCG_REG_R0
, datahi
);
1549 tcg_out_st32_rwb(s
, cond
, TCG_REG_R0
, addend
, addrlo
);
1550 tcg_out_bswap32(s
, cond
, TCG_REG_R0
, datalo
);
1551 tcg_out_st32_12(s
, cond
, TCG_REG_R0
, addend
, 4);
1552 } else if (USING_SOFTMMU
&& use_armv6_instructions
1553 && (datalo
& 1) == 0 && datahi
== datalo
+ 1) {
1554 tcg_out_strd_r(s
, cond
, datalo
, addrlo
, addend
);
1556 tcg_out_st32_rwb(s
, cond
, datalo
, addend
, addrlo
);
1557 tcg_out_st32_12(s
, cond
, datahi
, addend
, 4);
1563 static inline void tcg_out_qemu_st_direct(TCGContext
*s
, TCGMemOp opc
,
1564 TCGReg datalo
, TCGReg datahi
,
1567 TCGMemOp bswap
= opc
& MO_BSWAP
;
1569 switch (opc
& MO_SIZE
) {
1571 tcg_out_st8_12(s
, COND_AL
, datalo
, addrlo
, 0);
1575 tcg_out_bswap16st(s
, COND_AL
, TCG_REG_R0
, datalo
);
1576 tcg_out_st16_8(s
, COND_AL
, TCG_REG_R0
, addrlo
, 0);
1578 tcg_out_st16_8(s
, COND_AL
, datalo
, addrlo
, 0);
1584 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, datalo
);
1585 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addrlo
, 0);
1587 tcg_out_st32_12(s
, COND_AL
, datalo
, addrlo
, 0);
1591 /* Avoid strd for user-only emulation, to handle unaligned. */
1593 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, datahi
);
1594 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addrlo
, 0);
1595 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, datalo
);
1596 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addrlo
, 4);
1597 } else if (USING_SOFTMMU
&& use_armv6_instructions
1598 && (datalo
& 1) == 0 && datahi
== datalo
+ 1) {
1599 tcg_out_strd_8(s
, COND_AL
, datalo
, addrlo
, 0);
1601 tcg_out_st32_12(s
, COND_AL
, datalo
, addrlo
, 0);
1602 tcg_out_st32_12(s
, COND_AL
, datahi
, addrlo
, 4);
1608 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is64
)
1610 TCGReg addrlo
, datalo
, datahi
, addrhi
__attribute__((unused
));
1613 #ifdef CONFIG_SOFTMMU
1616 tcg_insn_unit
*label_ptr
;
1620 datahi
= (is64
? *args
++ : 0);
1622 addrhi
= (TARGET_LONG_BITS
== 64 ? *args
++ : 0);
1624 opc
= get_memop(oi
);
1626 #ifdef CONFIG_SOFTMMU
1627 mem_index
= get_mmuidx(oi
);
1628 addend
= tcg_out_tlb_read(s
, addrlo
, addrhi
, opc
, mem_index
, 0);
1630 tcg_out_qemu_st_index(s
, COND_EQ
, opc
, datalo
, datahi
, addrlo
, addend
);
1632 /* The conditional call must come last, as we're going to return here. */
1633 label_ptr
= s
->code_ptr
;
1634 tcg_out_bl_noaddr(s
, COND_NE
);
1636 add_qemu_ldst_label(s
, false, oi
, datalo
, datahi
, addrlo
, addrhi
,
1637 s
->code_ptr
, label_ptr
);
1638 #else /* !CONFIG_SOFTMMU */
1640 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_TMP
, guest_base
);
1641 tcg_out_qemu_st_index(s
, COND_AL
, opc
, datalo
,
1642 datahi
, addrlo
, TCG_REG_TMP
);
1644 tcg_out_qemu_st_direct(s
, opc
, datalo
, datahi
, addrlo
);
1649 static tcg_insn_unit
*tb_ret_addr
;
1651 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1652 const TCGArg
*args
, const int *const_args
)
1654 TCGArg a0
, a1
, a2
, a3
, a4
, a5
;
1658 case INDEX_op_exit_tb
:
1659 tcg_out_movi32(s
, COND_AL
, TCG_REG_R0
, args
[0]);
1660 tcg_out_goto(s
, COND_AL
, tb_ret_addr
);
1662 case INDEX_op_goto_tb
:
1663 if (s
->tb_jmp_insn_offset
) {
1664 /* Direct jump method */
1665 s
->tb_jmp_insn_offset
[args
[0]] = tcg_current_code_size(s
);
1666 tcg_out_b_noaddr(s
, COND_AL
);
1668 /* Indirect jump method */
1669 intptr_t ptr
= (intptr_t)(s
->tb_jmp_target_addr
+ args
[0]);
1670 tcg_out_movi32(s
, COND_AL
, TCG_REG_R0
, ptr
& ~0xfff);
1671 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_R0
, ptr
& 0xfff);
1673 s
->tb_jmp_reset_offset
[args
[0]] = tcg_current_code_size(s
);
1676 tcg_out_goto_label(s
, COND_AL
, arg_label(args
[0]));
1679 case INDEX_op_ld8u_i32
:
1680 tcg_out_ld8u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1682 case INDEX_op_ld8s_i32
:
1683 tcg_out_ld8s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1685 case INDEX_op_ld16u_i32
:
1686 tcg_out_ld16u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1688 case INDEX_op_ld16s_i32
:
1689 tcg_out_ld16s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1691 case INDEX_op_ld_i32
:
1692 tcg_out_ld32u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1694 case INDEX_op_st8_i32
:
1695 tcg_out_st8(s
, COND_AL
, args
[0], args
[1], args
[2]);
1697 case INDEX_op_st16_i32
:
1698 tcg_out_st16(s
, COND_AL
, args
[0], args
[1], args
[2]);
1700 case INDEX_op_st_i32
:
1701 tcg_out_st32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1704 case INDEX_op_movcond_i32
:
1705 /* Constraints mean that v2 is always in the same register as dest,
1706 * so we only need to do "if condition passed, move v1 to dest".
1708 tcg_out_dat_rIN(s
, COND_AL
, ARITH_CMP
, ARITH_CMN
, 0,
1709 args
[1], args
[2], const_args
[2]);
1710 tcg_out_dat_rIK(s
, tcg_cond_to_arm_cond
[args
[5]], ARITH_MOV
,
1711 ARITH_MVN
, args
[0], 0, args
[3], const_args
[3]);
1713 case INDEX_op_add_i32
:
1714 tcg_out_dat_rIN(s
, COND_AL
, ARITH_ADD
, ARITH_SUB
,
1715 args
[0], args
[1], args
[2], const_args
[2]);
1717 case INDEX_op_sub_i32
:
1718 if (const_args
[1]) {
1719 if (const_args
[2]) {
1720 tcg_out_movi32(s
, COND_AL
, args
[0], args
[1] - args
[2]);
1722 tcg_out_dat_rI(s
, COND_AL
, ARITH_RSB
,
1723 args
[0], args
[2], args
[1], 1);
1726 tcg_out_dat_rIN(s
, COND_AL
, ARITH_SUB
, ARITH_ADD
,
1727 args
[0], args
[1], args
[2], const_args
[2]);
1730 case INDEX_op_and_i32
:
1731 tcg_out_dat_rIK(s
, COND_AL
, ARITH_AND
, ARITH_BIC
,
1732 args
[0], args
[1], args
[2], const_args
[2]);
1734 case INDEX_op_andc_i32
:
1735 tcg_out_dat_rIK(s
, COND_AL
, ARITH_BIC
, ARITH_AND
,
1736 args
[0], args
[1], args
[2], const_args
[2]);
1738 case INDEX_op_or_i32
:
1741 case INDEX_op_xor_i32
:
1745 tcg_out_dat_rI(s
, COND_AL
, c
, args
[0], args
[1], args
[2], const_args
[2]);
1747 case INDEX_op_add2_i32
:
1748 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1749 a3
= args
[3], a4
= args
[4], a5
= args
[5];
1750 if (a0
== a3
|| (a0
== a5
&& !const_args
[5])) {
1753 tcg_out_dat_rIN(s
, COND_AL
, ARITH_ADD
| TO_CPSR
, ARITH_SUB
| TO_CPSR
,
1754 a0
, a2
, a4
, const_args
[4]);
1755 tcg_out_dat_rIK(s
, COND_AL
, ARITH_ADC
, ARITH_SBC
,
1756 a1
, a3
, a5
, const_args
[5]);
1757 tcg_out_mov_reg(s
, COND_AL
, args
[0], a0
);
1759 case INDEX_op_sub2_i32
:
1760 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1761 a3
= args
[3], a4
= args
[4], a5
= args
[5];
1762 if ((a0
== a3
&& !const_args
[3]) || (a0
== a5
&& !const_args
[5])) {
1765 if (const_args
[2]) {
1766 if (const_args
[4]) {
1767 tcg_out_movi32(s
, COND_AL
, a0
, a4
);
1770 tcg_out_dat_rI(s
, COND_AL
, ARITH_RSB
| TO_CPSR
, a0
, a4
, a2
, 1);
1772 tcg_out_dat_rIN(s
, COND_AL
, ARITH_SUB
| TO_CPSR
,
1773 ARITH_ADD
| TO_CPSR
, a0
, a2
, a4
, const_args
[4]);
1775 if (const_args
[3]) {
1776 if (const_args
[5]) {
1777 tcg_out_movi32(s
, COND_AL
, a1
, a5
);
1780 tcg_out_dat_rI(s
, COND_AL
, ARITH_RSC
, a1
, a5
, a3
, 1);
1782 tcg_out_dat_rIK(s
, COND_AL
, ARITH_SBC
, ARITH_ADC
,
1783 a1
, a3
, a5
, const_args
[5]);
1785 tcg_out_mov_reg(s
, COND_AL
, args
[0], a0
);
1787 case INDEX_op_neg_i32
:
1788 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, args
[0], args
[1], 0);
1790 case INDEX_op_not_i32
:
1791 tcg_out_dat_reg(s
, COND_AL
,
1792 ARITH_MVN
, args
[0], 0, args
[1], SHIFT_IMM_LSL(0));
1794 case INDEX_op_mul_i32
:
1795 tcg_out_mul32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1797 case INDEX_op_mulu2_i32
:
1798 tcg_out_umull32(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
1800 case INDEX_op_muls2_i32
:
1801 tcg_out_smull32(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
1803 /* XXX: Perhaps args[2] & 0x1f is wrong */
1804 case INDEX_op_shl_i32
:
1806 SHIFT_IMM_LSL(args
[2] & 0x1f) : SHIFT_REG_LSL(args
[2]);
1808 case INDEX_op_shr_i32
:
1809 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_LSR(args
[2] & 0x1f) :
1810 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args
[2]);
1812 case INDEX_op_sar_i32
:
1813 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ASR(args
[2] & 0x1f) :
1814 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args
[2]);
1816 case INDEX_op_rotr_i32
:
1817 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ROR(args
[2] & 0x1f) :
1818 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args
[2]);
1821 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1], c
);
1824 case INDEX_op_rotl_i32
:
1825 if (const_args
[2]) {
1826 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1827 ((0x20 - args
[2]) & 0x1f) ?
1828 SHIFT_IMM_ROR((0x20 - args
[2]) & 0x1f) :
1831 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, TCG_REG_TMP
, args
[2], 0x20);
1832 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1833 SHIFT_REG_ROR(TCG_REG_TMP
));
1837 case INDEX_op_brcond_i32
:
1838 tcg_out_dat_rIN(s
, COND_AL
, ARITH_CMP
, ARITH_CMN
, 0,
1839 args
[0], args
[1], const_args
[1]);
1840 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[2]],
1841 arg_label(args
[3]));
1843 case INDEX_op_brcond2_i32
:
1844 /* The resulting conditions are:
1845 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1846 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1847 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1848 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1849 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1850 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1852 tcg_out_dat_rIN(s
, COND_AL
, ARITH_CMP
, ARITH_CMN
, 0,
1853 args
[1], args
[3], const_args
[3]);
1854 tcg_out_dat_rIN(s
, COND_EQ
, ARITH_CMP
, ARITH_CMN
, 0,
1855 args
[0], args
[2], const_args
[2]);
1856 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[4]],
1857 arg_label(args
[5]));
1859 case INDEX_op_setcond_i32
:
1860 tcg_out_dat_rIN(s
, COND_AL
, ARITH_CMP
, ARITH_CMN
, 0,
1861 args
[1], args
[2], const_args
[2]);
1862 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[3]],
1863 ARITH_MOV
, args
[0], 0, 1);
1864 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[3])],
1865 ARITH_MOV
, args
[0], 0, 0);
1867 case INDEX_op_setcond2_i32
:
1868 /* See brcond2_i32 comment */
1869 tcg_out_dat_rIN(s
, COND_AL
, ARITH_CMP
, ARITH_CMN
, 0,
1870 args
[2], args
[4], const_args
[4]);
1871 tcg_out_dat_rIN(s
, COND_EQ
, ARITH_CMP
, ARITH_CMN
, 0,
1872 args
[1], args
[3], const_args
[3]);
1873 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[5]],
1874 ARITH_MOV
, args
[0], 0, 1);
1875 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[5])],
1876 ARITH_MOV
, args
[0], 0, 0);
1879 case INDEX_op_qemu_ld_i32
:
1880 tcg_out_qemu_ld(s
, args
, 0);
1882 case INDEX_op_qemu_ld_i64
:
1883 tcg_out_qemu_ld(s
, args
, 1);
1885 case INDEX_op_qemu_st_i32
:
1886 tcg_out_qemu_st(s
, args
, 0);
1888 case INDEX_op_qemu_st_i64
:
1889 tcg_out_qemu_st(s
, args
, 1);
1892 case INDEX_op_bswap16_i32
:
1893 tcg_out_bswap16(s
, COND_AL
, args
[0], args
[1]);
1895 case INDEX_op_bswap32_i32
:
1896 tcg_out_bswap32(s
, COND_AL
, args
[0], args
[1]);
1899 case INDEX_op_ext8s_i32
:
1900 tcg_out_ext8s(s
, COND_AL
, args
[0], args
[1]);
1902 case INDEX_op_ext16s_i32
:
1903 tcg_out_ext16s(s
, COND_AL
, args
[0], args
[1]);
1905 case INDEX_op_ext16u_i32
:
1906 tcg_out_ext16u(s
, COND_AL
, args
[0], args
[1]);
1909 case INDEX_op_deposit_i32
:
1910 tcg_out_deposit(s
, COND_AL
, args
[0], args
[2],
1911 args
[3], args
[4], const_args
[2]);
1913 case INDEX_op_extract_i32
:
1914 tcg_out_extract(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
1916 case INDEX_op_sextract_i32
:
1917 tcg_out_sextract(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
1920 case INDEX_op_div_i32
:
1921 tcg_out_sdiv(s
, COND_AL
, args
[0], args
[1], args
[2]);
1923 case INDEX_op_divu_i32
:
1924 tcg_out_udiv(s
, COND_AL
, args
[0], args
[1], args
[2]);
1928 tcg_out_mb(s
, args
[0]);
1931 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
1932 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
1933 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
1939 static const TCGTargetOpDef arm_op_defs
[] = {
1940 { INDEX_op_exit_tb
, { } },
1941 { INDEX_op_goto_tb
, { } },
1942 { INDEX_op_br
, { } },
1944 { INDEX_op_ld8u_i32
, { "r", "r" } },
1945 { INDEX_op_ld8s_i32
, { "r", "r" } },
1946 { INDEX_op_ld16u_i32
, { "r", "r" } },
1947 { INDEX_op_ld16s_i32
, { "r", "r" } },
1948 { INDEX_op_ld_i32
, { "r", "r" } },
1949 { INDEX_op_st8_i32
, { "r", "r" } },
1950 { INDEX_op_st16_i32
, { "r", "r" } },
1951 { INDEX_op_st_i32
, { "r", "r" } },
1953 /* TODO: "r", "r", "ri" */
1954 { INDEX_op_add_i32
, { "r", "r", "rIN" } },
1955 { INDEX_op_sub_i32
, { "r", "rI", "rIN" } },
1956 { INDEX_op_mul_i32
, { "r", "r", "r" } },
1957 { INDEX_op_mulu2_i32
, { "r", "r", "r", "r" } },
1958 { INDEX_op_muls2_i32
, { "r", "r", "r", "r" } },
1959 { INDEX_op_and_i32
, { "r", "r", "rIK" } },
1960 { INDEX_op_andc_i32
, { "r", "r", "rIK" } },
1961 { INDEX_op_or_i32
, { "r", "r", "rI" } },
1962 { INDEX_op_xor_i32
, { "r", "r", "rI" } },
1963 { INDEX_op_neg_i32
, { "r", "r" } },
1964 { INDEX_op_not_i32
, { "r", "r" } },
1966 { INDEX_op_shl_i32
, { "r", "r", "ri" } },
1967 { INDEX_op_shr_i32
, { "r", "r", "ri" } },
1968 { INDEX_op_sar_i32
, { "r", "r", "ri" } },
1969 { INDEX_op_rotl_i32
, { "r", "r", "ri" } },
1970 { INDEX_op_rotr_i32
, { "r", "r", "ri" } },
1972 { INDEX_op_brcond_i32
, { "r", "rIN" } },
1973 { INDEX_op_setcond_i32
, { "r", "r", "rIN" } },
1974 { INDEX_op_movcond_i32
, { "r", "r", "rIN", "rIK", "0" } },
1976 { INDEX_op_add2_i32
, { "r", "r", "r", "r", "rIN", "rIK" } },
1977 { INDEX_op_sub2_i32
, { "r", "r", "rI", "rI", "rIN", "rIK" } },
1978 { INDEX_op_brcond2_i32
, { "r", "r", "rIN", "rIN" } },
1979 { INDEX_op_setcond2_i32
, { "r", "r", "r", "rIN", "rIN" } },
1981 #if TARGET_LONG_BITS == 32
1982 { INDEX_op_qemu_ld_i32
, { "r", "l" } },
1983 { INDEX_op_qemu_ld_i64
, { "r", "r", "l" } },
1984 { INDEX_op_qemu_st_i32
, { "s", "s" } },
1985 { INDEX_op_qemu_st_i64
, { "s", "s", "s" } },
1987 { INDEX_op_qemu_ld_i32
, { "r", "l", "l" } },
1988 { INDEX_op_qemu_ld_i64
, { "r", "r", "l", "l" } },
1989 { INDEX_op_qemu_st_i32
, { "s", "s", "s" } },
1990 { INDEX_op_qemu_st_i64
, { "s", "s", "s", "s" } },
1993 { INDEX_op_bswap16_i32
, { "r", "r" } },
1994 { INDEX_op_bswap32_i32
, { "r", "r" } },
1996 { INDEX_op_ext8s_i32
, { "r", "r" } },
1997 { INDEX_op_ext16s_i32
, { "r", "r" } },
1998 { INDEX_op_ext16u_i32
, { "r", "r" } },
2000 { INDEX_op_deposit_i32
, { "r", "0", "rZ" } },
2001 { INDEX_op_extract_i32
, { "r", "r" } },
2002 { INDEX_op_sextract_i32
, { "r", "r" } },
2004 { INDEX_op_div_i32
, { "r", "r", "r" } },
2005 { INDEX_op_divu_i32
, { "r", "r", "r" } },
2007 { INDEX_op_mb
, { } },
2011 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode op
)
2013 int i
, n
= ARRAY_SIZE(arm_op_defs
);
2015 for (i
= 0; i
< n
; ++i
) {
2016 if (arm_op_defs
[i
].op
== op
) {
2017 return &arm_op_defs
[i
];
2023 static void tcg_target_init(TCGContext
*s
)
2025 /* Only probe for the platform and capabilities if we havn't already
2026 determined maximum values at compile time. */
2027 #ifndef use_idiv_instructions
2029 unsigned long hwcap
= qemu_getauxval(AT_HWCAP
);
2030 use_idiv_instructions
= (hwcap
& HWCAP_ARM_IDIVA
) != 0;
2033 if (__ARM_ARCH
< 7) {
2034 const char *pl
= (const char *)qemu_getauxval(AT_PLATFORM
);
2035 if (pl
!= NULL
&& pl
[0] == 'v' && pl
[1] >= '4' && pl
[1] <= '9') {
2036 arm_arch
= pl
[1] - '0';
2040 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffff);
2041 tcg_regset_set32(tcg_target_call_clobber_regs
, 0,
2046 (1 << TCG_REG_R12
) |
2047 (1 << TCG_REG_R14
));
2049 tcg_regset_clear(s
->reserved_regs
);
2050 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
2051 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_TMP
);
2052 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_PC
);
2055 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg arg
,
2056 TCGReg arg1
, intptr_t arg2
)
2058 tcg_out_ld32u(s
, COND_AL
, arg
, arg1
, arg2
);
2061 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
2062 TCGReg arg1
, intptr_t arg2
)
2064 tcg_out_st32(s
, COND_AL
, arg
, arg1
, arg2
);
2067 static inline bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
2068 TCGReg base
, intptr_t ofs
)
2073 static inline void tcg_out_mov(TCGContext
*s
, TCGType type
,
2074 TCGReg ret
, TCGReg arg
)
2076 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, ret
, 0, arg
, SHIFT_IMM_LSL(0));
2079 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
2080 TCGReg ret
, tcg_target_long arg
)
2082 tcg_out_movi32(s
, COND_AL
, ret
, arg
);
2085 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2086 and tcg_register_jit. */
2088 #define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
2090 #define FRAME_SIZE \
2092 + TCG_STATIC_CALL_ARGS_SIZE \
2093 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2094 + TCG_TARGET_STACK_ALIGN - 1) \
2095 & -TCG_TARGET_STACK_ALIGN)
2097 static void tcg_target_qemu_prologue(TCGContext
*s
)
2101 /* Calling convention requires us to save r4-r11 and lr. */
2102 /* stmdb sp!, { r4 - r11, lr } */
2103 tcg_out32(s
, (COND_AL
<< 28) | 0x092d4ff0);
2105 /* Reserve callee argument and tcg temp space. */
2106 stack_addend
= FRAME_SIZE
- PUSH_SIZE
;
2108 tcg_out_dat_rI(s
, COND_AL
, ARITH_SUB
, TCG_REG_CALL_STACK
,
2109 TCG_REG_CALL_STACK
, stack_addend
, 1);
2110 tcg_set_frame(s
, TCG_REG_CALL_STACK
, TCG_STATIC_CALL_ARGS_SIZE
,
2111 CPU_TEMP_BUF_NLONGS
* sizeof(long));
2113 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2115 tcg_out_bx(s
, COND_AL
, tcg_target_call_iarg_regs
[1]);
2116 tb_ret_addr
= s
->code_ptr
;
2118 /* Epilogue. We branch here via tb_ret_addr. */
2119 tcg_out_dat_rI(s
, COND_AL
, ARITH_ADD
, TCG_REG_CALL_STACK
,
2120 TCG_REG_CALL_STACK
, stack_addend
, 1);
2122 /* ldmia sp!, { r4 - r11, pc } */
2123 tcg_out32(s
, (COND_AL
<< 28) | 0x08bd8ff0);
2128 uint8_t fde_def_cfa
[4];
2129 uint8_t fde_reg_ofs
[18];
2132 #define ELF_HOST_MACHINE EM_ARM
2134 /* We're expecting a 2 byte uleb128 encoded value. */
2135 QEMU_BUILD_BUG_ON(FRAME_SIZE
>= (1 << 14));
2137 static const DebugFrame debug_frame
= {
2138 .h
.cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
2141 .h
.cie
.code_align
= 1,
2142 .h
.cie
.data_align
= 0x7c, /* sleb128 -4 */
2143 .h
.cie
.return_column
= 14,
2145 /* Total FDE size does not include the "len" member. */
2146 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
2149 12, 13, /* DW_CFA_def_cfa sp, ... */
2150 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2154 /* The following must match the stmdb in the prologue. */
2155 0x8e, 1, /* DW_CFA_offset, lr, -4 */
2156 0x8b, 2, /* DW_CFA_offset, r11, -8 */
2157 0x8a, 3, /* DW_CFA_offset, r10, -12 */
2158 0x89, 4, /* DW_CFA_offset, r9, -16 */
2159 0x88, 5, /* DW_CFA_offset, r8, -20 */
2160 0x87, 6, /* DW_CFA_offset, r7, -24 */
2161 0x86, 7, /* DW_CFA_offset, r6, -28 */
2162 0x85, 8, /* DW_CFA_offset, r5, -32 */
2163 0x84, 9, /* DW_CFA_offset, r4, -36 */
2167 void tcg_register_jit(void *buf
, size_t buf_size
)
2169 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));