2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #if defined(__ARM_ARCH_7__) || \
26 defined(__ARM_ARCH_7A__) || \
27 defined(__ARM_ARCH_7EM__) || \
28 defined(__ARM_ARCH_7M__) || \
29 defined(__ARM_ARCH_7R__)
30 #define USE_ARMV7_INSTRUCTIONS
33 #if defined(USE_ARMV7_INSTRUCTIONS) || \
34 defined(__ARM_ARCH_6J__) || \
35 defined(__ARM_ARCH_6K__) || \
36 defined(__ARM_ARCH_6T2__) || \
37 defined(__ARM_ARCH_6Z__) || \
38 defined(__ARM_ARCH_6ZK__)
39 #define USE_ARMV6_INSTRUCTIONS
42 #if defined(USE_ARMV6_INSTRUCTIONS) || \
43 defined(__ARM_ARCH_5T__) || \
44 defined(__ARM_ARCH_5TE__) || \
45 defined(__ARM_ARCH_5TEJ__)
46 #define USE_ARMV5_INSTRUCTIONS
49 #ifdef USE_ARMV5_INSTRUCTIONS
50 static const int use_armv5_instructions
= 1;
52 static const int use_armv5_instructions
= 0;
54 #undef USE_ARMV5_INSTRUCTIONS
56 #ifdef USE_ARMV6_INSTRUCTIONS
57 static const int use_armv6_instructions
= 1;
59 static const int use_armv6_instructions
= 0;
61 #undef USE_ARMV6_INSTRUCTIONS
63 #ifdef USE_ARMV7_INSTRUCTIONS
64 static const int use_armv7_instructions
= 1;
66 static const int use_armv7_instructions
= 0;
68 #undef USE_ARMV7_INSTRUCTIONS
71 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
91 static const int tcg_target_reg_alloc_order
[] = {
109 static const int tcg_target_call_iarg_regs
[4] = {
110 TCG_REG_R0
, TCG_REG_R1
, TCG_REG_R2
, TCG_REG_R3
112 static const int tcg_target_call_oarg_regs
[2] = {
113 TCG_REG_R0
, TCG_REG_R1
116 static void patch_reloc(uint8_t *code_ptr
, int type
,
117 tcg_target_long value
, tcg_target_long addend
)
121 *(uint32_t *) code_ptr
= value
;
130 *(uint32_t *) code_ptr
= ((*(uint32_t *) code_ptr
) & 0xff000000) |
131 (((value
- ((tcg_target_long
) code_ptr
+ 8)) >> 2) & 0xffffff);
136 /* maximum number of register used for input function arguments */
137 static inline int tcg_target_get_call_iarg_regs_count(int flags
)
142 /* parse target specific constraints */
143 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
150 ct
->ct
|= TCG_CT_CONST_ARM
;
154 ct
->ct
|= TCG_CT_REG
;
155 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
158 /* qemu_ld address */
160 ct
->ct
|= TCG_CT_REG
;
161 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
162 #ifdef CONFIG_SOFTMMU
163 /* r0 and r1 will be overwritten when reading the tlb entry,
164 so don't use these. */
165 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
166 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
170 ct
->ct
|= TCG_CT_REG
;
171 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
172 #ifdef CONFIG_SOFTMMU
173 /* r1 is still needed to load data_reg or data_reg2,
175 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
179 /* qemu_st address & data_reg */
181 ct
->ct
|= TCG_CT_REG
;
182 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
183 /* r0 and r1 will be overwritten when reading the tlb entry
184 (softmmu only) and doing the byte swapping, so don't
186 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
187 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
189 /* qemu_st64 data_reg2 */
191 ct
->ct
|= TCG_CT_REG
;
192 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
193 /* r0 and r1 will be overwritten when reading the tlb entry
194 (softmmu only) and doing the byte swapping, so don't
196 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
197 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
198 #ifdef CONFIG_SOFTMMU
199 /* r2 is still needed to load data_reg, so don't use it. */
200 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
213 static inline uint32_t rotl(uint32_t val
, int n
)
215 return (val
<< n
) | (val
>> (32 - n
));
218 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
219 right-rotated by an even amount between 0 and 30. */
220 static inline int encode_imm(uint32_t imm
)
224 /* simple case, only lower bits */
225 if ((imm
& ~0xff) == 0)
227 /* then try a simple even shift */
228 shift
= ctz32(imm
) & ~1;
229 if (((imm
>> shift
) & ~0xff) == 0)
231 /* now try harder with rotations */
232 if ((rotl(imm
, 2) & ~0xff) == 0)
234 if ((rotl(imm
, 4) & ~0xff) == 0)
236 if ((rotl(imm
, 6) & ~0xff) == 0)
238 /* imm can't be encoded */
242 static inline int check_fit_imm(uint32_t imm
)
244 return encode_imm(imm
) >= 0;
247 /* Test if a constant matches the constraint.
248 * TODO: define constraints for:
250 * ldr/str offset: between -0xfff and 0xfff
251 * ldrh/strh offset: between -0xff and 0xff
252 * mov operand2: values represented with x << (2 * y), x < 0x100
253 * add, sub, eor...: ditto
255 static inline int tcg_target_const_match(tcg_target_long val
,
256 const TCGArgConstraint
*arg_ct
)
260 if (ct
& TCG_CT_CONST
)
262 else if ((ct
& TCG_CT_CONST_ARM
) && check_fit_imm(val
))
268 enum arm_data_opc_e
{
286 #define TO_CPSR(opc) \
287 ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
289 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
290 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
291 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
292 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
293 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
294 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
295 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
296 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
298 enum arm_cond_code_e
{
301 COND_CS
= 0x2, /* Unsigned greater or equal */
302 COND_CC
= 0x3, /* Unsigned less than */
303 COND_MI
= 0x4, /* Negative */
304 COND_PL
= 0x5, /* Zero or greater */
305 COND_VS
= 0x6, /* Overflow */
306 COND_VC
= 0x7, /* No overflow */
307 COND_HI
= 0x8, /* Unsigned greater than */
308 COND_LS
= 0x9, /* Unsigned less or equal */
316 static const uint8_t tcg_cond_to_arm_cond
[10] = {
317 [TCG_COND_EQ
] = COND_EQ
,
318 [TCG_COND_NE
] = COND_NE
,
319 [TCG_COND_LT
] = COND_LT
,
320 [TCG_COND_GE
] = COND_GE
,
321 [TCG_COND_LE
] = COND_LE
,
322 [TCG_COND_GT
] = COND_GT
,
324 [TCG_COND_LTU
] = COND_CC
,
325 [TCG_COND_GEU
] = COND_CS
,
326 [TCG_COND_LEU
] = COND_LS
,
327 [TCG_COND_GTU
] = COND_HI
,
330 static inline void tcg_out_bx(TCGContext
*s
, int cond
, int rn
)
332 tcg_out32(s
, (cond
<< 28) | 0x012fff10 | rn
);
335 static inline void tcg_out_b(TCGContext
*s
, int cond
, int32_t offset
)
337 tcg_out32(s
, (cond
<< 28) | 0x0a000000 |
338 (((offset
- 8) >> 2) & 0x00ffffff));
341 static inline void tcg_out_b_noaddr(TCGContext
*s
, int cond
)
343 #ifdef HOST_WORDS_BIGENDIAN
344 tcg_out8(s
, (cond
<< 4) | 0x0a);
348 tcg_out8(s
, (cond
<< 4) | 0x0a);
352 static inline void tcg_out_bl(TCGContext
*s
, int cond
, int32_t offset
)
354 tcg_out32(s
, (cond
<< 28) | 0x0b000000 |
355 (((offset
- 8) >> 2) & 0x00ffffff));
358 static inline void tcg_out_blx(TCGContext
*s
, int cond
, int rn
)
360 tcg_out32(s
, (cond
<< 28) | 0x012fff30 | rn
);
363 static inline void tcg_out_dat_reg(TCGContext
*s
,
364 int cond
, int opc
, int rd
, int rn
, int rm
, int shift
)
366 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc
<< 21) | TO_CPSR(opc
) |
367 (rn
<< 16) | (rd
<< 12) | shift
| rm
);
370 static inline void tcg_out_dat_reg2(TCGContext
*s
,
371 int cond
, int opc0
, int opc1
, int rd0
, int rd1
,
372 int rn0
, int rn1
, int rm0
, int rm1
, int shift
)
374 if (rd0
== rn1
|| rd0
== rm1
) {
375 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc0
<< 21) | (1 << 20) |
376 (rn0
<< 16) | (8 << 12) | shift
| rm0
);
377 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc1
<< 21) |
378 (rn1
<< 16) | (rd1
<< 12) | shift
| rm1
);
379 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
380 rd0
, 0, TCG_REG_R8
, SHIFT_IMM_LSL(0));
382 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc0
<< 21) | (1 << 20) |
383 (rn0
<< 16) | (rd0
<< 12) | shift
| rm0
);
384 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc1
<< 21) |
385 (rn1
<< 16) | (rd1
<< 12) | shift
| rm1
);
389 static inline void tcg_out_dat_imm(TCGContext
*s
,
390 int cond
, int opc
, int rd
, int rn
, int im
)
392 tcg_out32(s
, (cond
<< 28) | (1 << 25) | (opc
<< 21) | TO_CPSR(opc
) |
393 (rn
<< 16) | (rd
<< 12) | im
);
396 static inline void tcg_out_movi32(TCGContext
*s
,
397 int cond
, int rd
, int32_t arg
)
399 /* TODO: This is very suboptimal, we can easily have a constant
400 * pool somewhere after all the instructions. */
402 if (arg
< 0 && arg
> -0x100)
403 return tcg_out_dat_imm(s
, cond
, ARITH_MVN
, rd
, 0, (~arg
) & 0xff);
405 if (use_armv7_instructions
) {
408 tcg_out32(s
, (cond
<< 28) | 0x03000000 | (rd
<< 12)
409 | ((arg
<< 4) & 0x000f0000) | (arg
& 0xfff));
410 if (arg
& 0xffff0000)
412 tcg_out32(s
, (cond
<< 28) | 0x03400000 | (rd
<< 12)
413 | ((arg
>> 12) & 0x000f0000) | ((arg
>> 16) & 0xfff));
415 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, rd
, 0, arg
& 0xff);
416 if (arg
& 0x0000ff00)
417 tcg_out_dat_imm(s
, cond
, ARITH_ORR
, rd
, rd
,
418 ((arg
>> 8) & 0xff) | 0xc00);
419 if (arg
& 0x00ff0000)
420 tcg_out_dat_imm(s
, cond
, ARITH_ORR
, rd
, rd
,
421 ((arg
>> 16) & 0xff) | 0x800);
422 if (arg
& 0xff000000)
423 tcg_out_dat_imm(s
, cond
, ARITH_ORR
, rd
, rd
,
424 ((arg
>> 24) & 0xff) | 0x400);
428 static inline void tcg_out_mul32(TCGContext
*s
,
429 int cond
, int rd
, int rs
, int rm
)
432 tcg_out32(s
, (cond
<< 28) | (rd
<< 16) | (0 << 12) |
433 (rs
<< 8) | 0x90 | rm
);
435 tcg_out32(s
, (cond
<< 28) | (rd
<< 16) | (0 << 12) |
436 (rm
<< 8) | 0x90 | rs
);
438 tcg_out32(s
, (cond
<< 28) | ( 8 << 16) | (0 << 12) |
439 (rs
<< 8) | 0x90 | rm
);
440 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
441 rd
, 0, TCG_REG_R8
, SHIFT_IMM_LSL(0));
445 static inline void tcg_out_umull32(TCGContext
*s
,
446 int cond
, int rd0
, int rd1
, int rs
, int rm
)
448 if (rd0
!= rm
&& rd1
!= rm
)
449 tcg_out32(s
, (cond
<< 28) | 0x800090 |
450 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8) | rm
);
451 else if (rd0
!= rs
&& rd1
!= rs
)
452 tcg_out32(s
, (cond
<< 28) | 0x800090 |
453 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rs
);
455 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
456 TCG_REG_R8
, 0, rm
, SHIFT_IMM_LSL(0));
457 tcg_out32(s
, (cond
<< 28) | 0x800098 |
458 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8));
462 static inline void tcg_out_smull32(TCGContext
*s
,
463 int cond
, int rd0
, int rd1
, int rs
, int rm
)
465 if (rd0
!= rm
&& rd1
!= rm
)
466 tcg_out32(s
, (cond
<< 28) | 0xc00090 |
467 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8) | rm
);
468 else if (rd0
!= rs
&& rd1
!= rs
)
469 tcg_out32(s
, (cond
<< 28) | 0xc00090 |
470 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rs
);
472 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
473 TCG_REG_R8
, 0, rm
, SHIFT_IMM_LSL(0));
474 tcg_out32(s
, (cond
<< 28) | 0xc00098 |
475 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8));
479 static inline void tcg_out_ext8s(TCGContext
*s
, int cond
,
482 if (use_armv6_instructions
) {
484 tcg_out32(s
, 0x06af0070 | (cond
<< 28) | (rd
<< 12) | rn
);
486 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
487 rd
, 0, rn
, SHIFT_IMM_LSL(24));
488 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
489 rd
, 0, rd
, SHIFT_IMM_ASR(24));
493 static inline void tcg_out_ext8u(TCGContext
*s
, int cond
,
496 tcg_out_dat_imm(s
, cond
, ARITH_AND
, rd
, rn
, 0xff);
499 static inline void tcg_out_ext16s(TCGContext
*s
, int cond
,
502 if (use_armv6_instructions
) {
504 tcg_out32(s
, 0x06bf0070 | (cond
<< 28) | (rd
<< 12) | rn
);
506 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
507 rd
, 0, rn
, SHIFT_IMM_LSL(16));
508 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
509 rd
, 0, rd
, SHIFT_IMM_ASR(16));
513 static inline void tcg_out_ext16u(TCGContext
*s
, int cond
,
516 if (use_armv6_instructions
) {
518 tcg_out32(s
, 0x06ff0070 | (cond
<< 28) | (rd
<< 12) | rn
);
520 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
521 rd
, 0, rn
, SHIFT_IMM_LSL(16));
522 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
523 rd
, 0, rd
, SHIFT_IMM_LSR(16));
527 static inline void tcg_out_bswap16s(TCGContext
*s
, int cond
, int rd
, int rn
)
529 if (use_armv6_instructions
) {
531 tcg_out32(s
, 0x06ff0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
533 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
534 TCG_REG_R8
, 0, rn
, SHIFT_IMM_LSL(24));
535 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
536 TCG_REG_R8
, 0, TCG_REG_R8
, SHIFT_IMM_ASR(16));
537 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
538 rd
, TCG_REG_R8
, rn
, SHIFT_IMM_LSR(8));
542 static inline void tcg_out_bswap16(TCGContext
*s
, int cond
, int rd
, int rn
)
544 if (use_armv6_instructions
) {
546 tcg_out32(s
, 0x06bf0fb0 | (cond
<< 28) | (rd
<< 12) | rn
);
548 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
549 TCG_REG_R8
, 0, rn
, SHIFT_IMM_LSL(24));
550 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
551 TCG_REG_R8
, 0, TCG_REG_R8
, SHIFT_IMM_LSR(16));
552 tcg_out_dat_reg(s
, cond
, ARITH_ORR
,
553 rd
, TCG_REG_R8
, rn
, SHIFT_IMM_LSR(8));
557 static inline void tcg_out_bswap32(TCGContext
*s
, int cond
, int rd
, int rn
)
559 if (use_armv6_instructions
) {
561 tcg_out32(s
, 0x06bf0f30 | (cond
<< 28) | (rd
<< 12) | rn
);
563 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
564 TCG_REG_R8
, rn
, rn
, SHIFT_IMM_ROR(16));
565 tcg_out_dat_imm(s
, cond
, ARITH_BIC
,
566 TCG_REG_R8
, TCG_REG_R8
, 0xff | 0x800);
567 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
568 rd
, 0, rn
, SHIFT_IMM_ROR(8));
569 tcg_out_dat_reg(s
, cond
, ARITH_EOR
,
570 rd
, rd
, TCG_REG_R8
, SHIFT_IMM_LSR(8));
574 static inline void tcg_out_ld32_12(TCGContext
*s
, int cond
,
575 int rd
, int rn
, tcg_target_long im
)
578 tcg_out32(s
, (cond
<< 28) | 0x05900000 |
579 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
581 tcg_out32(s
, (cond
<< 28) | 0x05100000 |
582 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
585 static inline void tcg_out_st32_12(TCGContext
*s
, int cond
,
586 int rd
, int rn
, tcg_target_long im
)
589 tcg_out32(s
, (cond
<< 28) | 0x05800000 |
590 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
592 tcg_out32(s
, (cond
<< 28) | 0x05000000 |
593 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
596 static inline void tcg_out_ld32_r(TCGContext
*s
, int cond
,
597 int rd
, int rn
, int rm
)
599 tcg_out32(s
, (cond
<< 28) | 0x07900000 |
600 (rn
<< 16) | (rd
<< 12) | rm
);
603 static inline void tcg_out_st32_r(TCGContext
*s
, int cond
,
604 int rd
, int rn
, int rm
)
606 tcg_out32(s
, (cond
<< 28) | 0x07800000 |
607 (rn
<< 16) | (rd
<< 12) | rm
);
610 /* Register pre-increment with base writeback. */
611 static inline void tcg_out_ld32_rwb(TCGContext
*s
, int cond
,
612 int rd
, int rn
, int rm
)
614 tcg_out32(s
, (cond
<< 28) | 0x07b00000 |
615 (rn
<< 16) | (rd
<< 12) | rm
);
618 static inline void tcg_out_st32_rwb(TCGContext
*s
, int cond
,
619 int rd
, int rn
, int rm
)
621 tcg_out32(s
, (cond
<< 28) | 0x07a00000 |
622 (rn
<< 16) | (rd
<< 12) | rm
);
625 static inline void tcg_out_ld16u_8(TCGContext
*s
, int cond
,
626 int rd
, int rn
, tcg_target_long im
)
629 tcg_out32(s
, (cond
<< 28) | 0x01d000b0 |
630 (rn
<< 16) | (rd
<< 12) |
631 ((im
& 0xf0) << 4) | (im
& 0xf));
633 tcg_out32(s
, (cond
<< 28) | 0x015000b0 |
634 (rn
<< 16) | (rd
<< 12) |
635 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
638 static inline void tcg_out_st16_8(TCGContext
*s
, int cond
,
639 int rd
, int rn
, tcg_target_long im
)
642 tcg_out32(s
, (cond
<< 28) | 0x01c000b0 |
643 (rn
<< 16) | (rd
<< 12) |
644 ((im
& 0xf0) << 4) | (im
& 0xf));
646 tcg_out32(s
, (cond
<< 28) | 0x014000b0 |
647 (rn
<< 16) | (rd
<< 12) |
648 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
651 static inline void tcg_out_ld16u_r(TCGContext
*s
, int cond
,
652 int rd
, int rn
, int rm
)
654 tcg_out32(s
, (cond
<< 28) | 0x019000b0 |
655 (rn
<< 16) | (rd
<< 12) | rm
);
658 static inline void tcg_out_st16_r(TCGContext
*s
, int cond
,
659 int rd
, int rn
, int rm
)
661 tcg_out32(s
, (cond
<< 28) | 0x018000b0 |
662 (rn
<< 16) | (rd
<< 12) | rm
);
665 static inline void tcg_out_ld16s_8(TCGContext
*s
, int cond
,
666 int rd
, int rn
, tcg_target_long im
)
669 tcg_out32(s
, (cond
<< 28) | 0x01d000f0 |
670 (rn
<< 16) | (rd
<< 12) |
671 ((im
& 0xf0) << 4) | (im
& 0xf));
673 tcg_out32(s
, (cond
<< 28) | 0x015000f0 |
674 (rn
<< 16) | (rd
<< 12) |
675 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
678 static inline void tcg_out_ld16s_r(TCGContext
*s
, int cond
,
679 int rd
, int rn
, int rm
)
681 tcg_out32(s
, (cond
<< 28) | 0x019000f0 |
682 (rn
<< 16) | (rd
<< 12) | rm
);
685 static inline void tcg_out_ld8_12(TCGContext
*s
, int cond
,
686 int rd
, int rn
, tcg_target_long im
)
689 tcg_out32(s
, (cond
<< 28) | 0x05d00000 |
690 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
692 tcg_out32(s
, (cond
<< 28) | 0x05500000 |
693 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
696 static inline void tcg_out_st8_12(TCGContext
*s
, int cond
,
697 int rd
, int rn
, tcg_target_long im
)
700 tcg_out32(s
, (cond
<< 28) | 0x05c00000 |
701 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
703 tcg_out32(s
, (cond
<< 28) | 0x05400000 |
704 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
707 static inline void tcg_out_ld8_r(TCGContext
*s
, int cond
,
708 int rd
, int rn
, int rm
)
710 tcg_out32(s
, (cond
<< 28) | 0x07d00000 |
711 (rn
<< 16) | (rd
<< 12) | rm
);
714 static inline void tcg_out_st8_r(TCGContext
*s
, int cond
,
715 int rd
, int rn
, int rm
)
717 tcg_out32(s
, (cond
<< 28) | 0x07c00000 |
718 (rn
<< 16) | (rd
<< 12) | rm
);
721 static inline void tcg_out_ld8s_8(TCGContext
*s
, int cond
,
722 int rd
, int rn
, tcg_target_long im
)
725 tcg_out32(s
, (cond
<< 28) | 0x01d000d0 |
726 (rn
<< 16) | (rd
<< 12) |
727 ((im
& 0xf0) << 4) | (im
& 0xf));
729 tcg_out32(s
, (cond
<< 28) | 0x015000d0 |
730 (rn
<< 16) | (rd
<< 12) |
731 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
734 static inline void tcg_out_ld8s_r(TCGContext
*s
, int cond
,
735 int rd
, int rn
, int rm
)
737 tcg_out32(s
, (cond
<< 28) | 0x019000d0 |
738 (rn
<< 16) | (rd
<< 12) | rm
);
741 static inline void tcg_out_ld32u(TCGContext
*s
, int cond
,
742 int rd
, int rn
, int32_t offset
)
744 if (offset
> 0xfff || offset
< -0xfff) {
745 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
746 tcg_out_ld32_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
748 tcg_out_ld32_12(s
, cond
, rd
, rn
, offset
);
751 static inline void tcg_out_st32(TCGContext
*s
, int cond
,
752 int rd
, int rn
, int32_t offset
)
754 if (offset
> 0xfff || offset
< -0xfff) {
755 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
756 tcg_out_st32_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
758 tcg_out_st32_12(s
, cond
, rd
, rn
, offset
);
761 static inline void tcg_out_ld16u(TCGContext
*s
, int cond
,
762 int rd
, int rn
, int32_t offset
)
764 if (offset
> 0xff || offset
< -0xff) {
765 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
766 tcg_out_ld16u_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
768 tcg_out_ld16u_8(s
, cond
, rd
, rn
, offset
);
771 static inline void tcg_out_ld16s(TCGContext
*s
, int cond
,
772 int rd
, int rn
, int32_t offset
)
774 if (offset
> 0xff || offset
< -0xff) {
775 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
776 tcg_out_ld16s_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
778 tcg_out_ld16s_8(s
, cond
, rd
, rn
, offset
);
781 static inline void tcg_out_st16(TCGContext
*s
, int cond
,
782 int rd
, int rn
, int32_t offset
)
784 if (offset
> 0xff || offset
< -0xff) {
785 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
786 tcg_out_st16_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
788 tcg_out_st16_8(s
, cond
, rd
, rn
, offset
);
791 static inline void tcg_out_ld8u(TCGContext
*s
, int cond
,
792 int rd
, int rn
, int32_t offset
)
794 if (offset
> 0xfff || offset
< -0xfff) {
795 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
796 tcg_out_ld8_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
798 tcg_out_ld8_12(s
, cond
, rd
, rn
, offset
);
801 static inline void tcg_out_ld8s(TCGContext
*s
, int cond
,
802 int rd
, int rn
, int32_t offset
)
804 if (offset
> 0xff || offset
< -0xff) {
805 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
806 tcg_out_ld8s_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
808 tcg_out_ld8s_8(s
, cond
, rd
, rn
, offset
);
811 static inline void tcg_out_st8(TCGContext
*s
, int cond
,
812 int rd
, int rn
, int32_t offset
)
814 if (offset
> 0xfff || offset
< -0xfff) {
815 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
816 tcg_out_st8_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
818 tcg_out_st8_12(s
, cond
, rd
, rn
, offset
);
821 static inline void tcg_out_goto(TCGContext
*s
, int cond
, uint32_t addr
)
825 val
= addr
- (tcg_target_long
) s
->code_ptr
;
826 if (val
- 8 < 0x01fffffd && val
- 8 > -0x01fffffd)
827 tcg_out_b(s
, cond
, val
);
832 if (cond
== COND_AL
) {
833 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
834 tcg_out32(s
, addr
); /* XXX: This is l->u.value, can we use it? */
836 tcg_out_movi32(s
, cond
, TCG_REG_R8
, val
- 8);
837 tcg_out_dat_reg(s
, cond
, ARITH_ADD
,
838 TCG_REG_PC
, TCG_REG_PC
,
839 TCG_REG_R8
, SHIFT_IMM_LSL(0));
845 static inline void tcg_out_call(TCGContext
*s
, int cond
, uint32_t addr
)
849 val
= addr
- (tcg_target_long
) s
->code_ptr
;
850 if (val
< 0x01fffffd && val
> -0x01fffffd)
851 tcg_out_bl(s
, cond
, val
);
856 if (cond
== COND_AL
) {
857 tcg_out_dat_imm(s
, cond
, ARITH_ADD
, TCG_REG_R14
, TCG_REG_PC
, 4);
858 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
859 tcg_out32(s
, addr
); /* XXX: This is l->u.value, can we use it? */
861 tcg_out_movi32(s
, cond
, TCG_REG_R9
, addr
);
862 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, TCG_REG_R14
, 0,
863 TCG_REG_PC
, SHIFT_IMM_LSL(0));
864 tcg_out_bx(s
, cond
, TCG_REG_R9
);
870 static inline void tcg_out_callr(TCGContext
*s
, int cond
, int arg
)
872 if (use_armv5_instructions
) {
873 tcg_out_blx(s
, cond
, arg
);
875 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, TCG_REG_R14
, 0,
876 TCG_REG_PC
, SHIFT_IMM_LSL(0));
877 tcg_out_bx(s
, cond
, arg
);
881 static inline void tcg_out_goto_label(TCGContext
*s
, int cond
, int label_index
)
883 TCGLabel
*l
= &s
->labels
[label_index
];
886 tcg_out_goto(s
, cond
, l
->u
.value
);
887 else if (cond
== COND_AL
) {
888 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
889 tcg_out_reloc(s
, s
->code_ptr
, R_ARM_ABS32
, label_index
, 31337);
892 /* Probably this should be preferred even for COND_AL... */
893 tcg_out_reloc(s
, s
->code_ptr
, R_ARM_PC24
, label_index
, 31337);
894 tcg_out_b_noaddr(s
, cond
);
898 #ifdef CONFIG_SOFTMMU
900 #include "../../softmmu_defs.h"
902 static void *qemu_ld_helpers
[4] = {
909 static void *qemu_st_helpers
[4] = {
917 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
919 static inline void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, int opc
)
921 int addr_reg
, data_reg
, data_reg2
, bswap
;
922 #ifdef CONFIG_SOFTMMU
923 int mem_index
, s_bits
;
924 # if TARGET_LONG_BITS == 64
930 #ifdef TARGET_WORDS_BIGENDIAN
939 data_reg2
= 0; /* suppress warning */
941 #ifdef CONFIG_SOFTMMU
942 # if TARGET_LONG_BITS == 64
948 /* Should generate something like the following:
949 * shr r8, addr_reg, #TARGET_PAGE_BITS
950 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
951 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
953 # if CPU_TLB_BITS > 8
956 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, TCG_REG_R8
,
957 0, addr_reg
, SHIFT_IMM_LSR(TARGET_PAGE_BITS
));
958 tcg_out_dat_imm(s
, COND_AL
, ARITH_AND
,
959 TCG_REG_R0
, TCG_REG_R8
, CPU_TLB_SIZE
- 1);
960 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
, TCG_AREG0
,
961 TCG_REG_R0
, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS
));
963 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
964 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
965 * not exceed otherwise, so use an
966 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
970 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
, TCG_REG_R0
,
971 (mem_index
<< (TLB_SHIFT
& 1)) |
972 ((16 - (TLB_SHIFT
>> 1)) << 8));
973 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R1
, TCG_REG_R0
,
974 offsetof(CPUState
, tlb_table
[0][0].addr_read
));
975 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0, TCG_REG_R1
,
976 TCG_REG_R8
, SHIFT_IMM_LSL(TARGET_PAGE_BITS
));
977 /* Check alignment. */
979 tcg_out_dat_imm(s
, COND_EQ
, ARITH_TST
,
980 0, addr_reg
, (1 << s_bits
) - 1);
981 # if TARGET_LONG_BITS == 64
982 /* XXX: possibly we could use a block data load or writeback in
983 * the first access. */
984 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
985 offsetof(CPUState
, tlb_table
[0][0].addr_read
) + 4);
986 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
987 TCG_REG_R1
, addr_reg2
, SHIFT_IMM_LSL(0));
989 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
990 offsetof(CPUState
, tlb_table
[0][0].addend
));
994 tcg_out_ld8_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
997 tcg_out_ld8s_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1000 tcg_out_ld16u_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1002 tcg_out_bswap16(s
, COND_EQ
, data_reg
, data_reg
);
1007 tcg_out_ld16u_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1008 tcg_out_bswap16s(s
, COND_EQ
, data_reg
, data_reg
);
1010 tcg_out_ld16s_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1015 tcg_out_ld32_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1017 tcg_out_bswap32(s
, COND_EQ
, data_reg
, data_reg
);
1022 tcg_out_ld32_rwb(s
, COND_EQ
, data_reg2
, TCG_REG_R1
, addr_reg
);
1023 tcg_out_ld32_12(s
, COND_EQ
, data_reg
, TCG_REG_R1
, 4);
1024 tcg_out_bswap32(s
, COND_EQ
, data_reg2
, data_reg2
);
1025 tcg_out_bswap32(s
, COND_EQ
, data_reg
, data_reg
);
1027 tcg_out_ld32_rwb(s
, COND_EQ
, data_reg
, TCG_REG_R1
, addr_reg
);
1028 tcg_out_ld32_12(s
, COND_EQ
, data_reg2
, TCG_REG_R1
, 4);
1033 label_ptr
= (void *) s
->code_ptr
;
1034 tcg_out_b(s
, COND_EQ
, 8);
1036 /* TODO: move this code to where the constants pool will be */
1037 if (addr_reg
!= TCG_REG_R0
) {
1038 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1039 TCG_REG_R0
, 0, addr_reg
, SHIFT_IMM_LSL(0));
1041 # if TARGET_LONG_BITS == 32
1042 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R1
, 0, mem_index
);
1044 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1045 TCG_REG_R1
, 0, addr_reg2
, SHIFT_IMM_LSL(0));
1046 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R2
, 0, mem_index
);
1048 tcg_out_bl(s
, COND_AL
, (tcg_target_long
) qemu_ld_helpers
[s_bits
] -
1049 (tcg_target_long
) s
->code_ptr
);
1053 tcg_out_ext8s(s
, COND_AL
, data_reg
, TCG_REG_R0
);
1056 tcg_out_ext16s(s
, COND_AL
, data_reg
, TCG_REG_R0
);
1062 if (data_reg
!= TCG_REG_R0
) {
1063 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1064 data_reg
, 0, TCG_REG_R0
, SHIFT_IMM_LSL(0));
1068 if (data_reg
!= TCG_REG_R0
) {
1069 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1070 data_reg
, 0, TCG_REG_R0
, SHIFT_IMM_LSL(0));
1072 if (data_reg2
!= TCG_REG_R1
) {
1073 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1074 data_reg2
, 0, TCG_REG_R1
, SHIFT_IMM_LSL(0));
1079 *label_ptr
+= ((void *) s
->code_ptr
- (void *) label_ptr
- 8) >> 2;
1080 #else /* !CONFIG_SOFTMMU */
1082 uint32_t offset
= GUEST_BASE
;
1087 i
= ctz32(offset
) & ~1;
1088 rot
= ((32 - i
) << 7) & 0xf00;
1090 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R8
, addr_reg
,
1091 ((offset
>> i
) & 0xff) | rot
);
1092 addr_reg
= TCG_REG_R8
;
1093 offset
&= ~(0xff << i
);
1098 tcg_out_ld8_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1101 tcg_out_ld8s_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1104 tcg_out_ld16u_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1106 tcg_out_bswap16(s
, COND_AL
, data_reg
, data_reg
);
1111 tcg_out_ld16u_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1112 tcg_out_bswap16s(s
, COND_AL
, data_reg
, data_reg
);
1114 tcg_out_ld16s_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1119 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1121 tcg_out_bswap32(s
, COND_AL
, data_reg
, data_reg
);
1125 /* TODO: use block load -
1126 * check that data_reg2 > data_reg or the other way */
1127 if (data_reg
== addr_reg
) {
1128 tcg_out_ld32_12(s
, COND_AL
, data_reg2
, addr_reg
, bswap
? 0 : 4);
1129 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, bswap
? 4 : 0);
1131 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, bswap
? 4 : 0);
1132 tcg_out_ld32_12(s
, COND_AL
, data_reg2
, addr_reg
, bswap
? 0 : 4);
1135 tcg_out_bswap32(s
, COND_AL
, data_reg
, data_reg
);
1136 tcg_out_bswap32(s
, COND_AL
, data_reg2
, data_reg2
);
1143 static inline void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, int opc
)
1145 int addr_reg
, data_reg
, data_reg2
, bswap
;
1146 #ifdef CONFIG_SOFTMMU
1147 int mem_index
, s_bits
;
1148 # if TARGET_LONG_BITS == 64
1151 uint32_t *label_ptr
;
1154 #ifdef TARGET_WORDS_BIGENDIAN
1161 data_reg2
= *args
++;
1163 data_reg2
= 0; /* suppress warning */
1165 #ifdef CONFIG_SOFTMMU
1166 # if TARGET_LONG_BITS == 64
1167 addr_reg2
= *args
++;
1172 /* Should generate something like the following:
1173 * shr r8, addr_reg, #TARGET_PAGE_BITS
1174 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
1175 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1177 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1178 TCG_REG_R8
, 0, addr_reg
, SHIFT_IMM_LSR(TARGET_PAGE_BITS
));
1179 tcg_out_dat_imm(s
, COND_AL
, ARITH_AND
,
1180 TCG_REG_R0
, TCG_REG_R8
, CPU_TLB_SIZE
- 1);
1181 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
,
1182 TCG_AREG0
, TCG_REG_R0
, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS
));
1184 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1185 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1186 * not exceed otherwise, so use an
1187 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1191 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R0
, TCG_REG_R0
,
1192 (mem_index
<< (TLB_SHIFT
& 1)) |
1193 ((16 - (TLB_SHIFT
>> 1)) << 8));
1194 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R1
, TCG_REG_R0
,
1195 offsetof(CPUState
, tlb_table
[0][0].addr_write
));
1196 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0, TCG_REG_R1
,
1197 TCG_REG_R8
, SHIFT_IMM_LSL(TARGET_PAGE_BITS
));
1198 /* Check alignment. */
1200 tcg_out_dat_imm(s
, COND_EQ
, ARITH_TST
,
1201 0, addr_reg
, (1 << s_bits
) - 1);
1202 # if TARGET_LONG_BITS == 64
1203 /* XXX: possibly we could use a block data load or writeback in
1204 * the first access. */
1205 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
1206 offsetof(CPUState
, tlb_table
[0][0].addr_write
) + 4);
1207 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1208 TCG_REG_R1
, addr_reg2
, SHIFT_IMM_LSL(0));
1210 tcg_out_ld32_12(s
, COND_EQ
, TCG_REG_R1
, TCG_REG_R0
,
1211 offsetof(CPUState
, tlb_table
[0][0].addend
));
1215 tcg_out_st8_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1219 tcg_out_bswap16(s
, COND_EQ
, TCG_REG_R0
, data_reg
);
1220 tcg_out_st16_r(s
, COND_EQ
, TCG_REG_R0
, addr_reg
, TCG_REG_R1
);
1222 tcg_out_st16_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1228 tcg_out_bswap32(s
, COND_EQ
, TCG_REG_R0
, data_reg
);
1229 tcg_out_st32_r(s
, COND_EQ
, TCG_REG_R0
, addr_reg
, TCG_REG_R1
);
1231 tcg_out_st32_r(s
, COND_EQ
, data_reg
, addr_reg
, TCG_REG_R1
);
1236 tcg_out_bswap32(s
, COND_EQ
, TCG_REG_R0
, data_reg2
);
1237 tcg_out_st32_rwb(s
, COND_EQ
, TCG_REG_R0
, TCG_REG_R1
, addr_reg
);
1238 tcg_out_bswap32(s
, COND_EQ
, TCG_REG_R0
, data_reg
);
1239 tcg_out_st32_12(s
, COND_EQ
, data_reg
, TCG_REG_R1
, 4);
1241 tcg_out_st32_rwb(s
, COND_EQ
, data_reg
, TCG_REG_R1
, addr_reg
);
1242 tcg_out_st32_12(s
, COND_EQ
, data_reg2
, TCG_REG_R1
, 4);
1247 label_ptr
= (void *) s
->code_ptr
;
1248 tcg_out_b(s
, COND_EQ
, 8);
1250 /* TODO: move this code to where the constants pool will be */
1251 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1252 TCG_REG_R0
, 0, addr_reg
, SHIFT_IMM_LSL(0));
1253 # if TARGET_LONG_BITS == 32
1256 tcg_out_ext8u(s
, COND_AL
, TCG_REG_R1
, data_reg
);
1257 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R2
, 0, mem_index
);
1260 tcg_out_ext16u(s
, COND_AL
, TCG_REG_R1
, data_reg
);
1261 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R2
, 0, mem_index
);
1264 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1265 TCG_REG_R1
, 0, data_reg
, SHIFT_IMM_LSL(0));
1266 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R2
, 0, mem_index
);
1269 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R8
, 0, mem_index
);
1270 tcg_out32(s
, (COND_AL
<< 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1271 if (data_reg
!= TCG_REG_R2
) {
1272 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1273 TCG_REG_R2
, 0, data_reg
, SHIFT_IMM_LSL(0));
1275 if (data_reg2
!= TCG_REG_R3
) {
1276 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1277 TCG_REG_R3
, 0, data_reg2
, SHIFT_IMM_LSL(0));
1282 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1283 TCG_REG_R1
, 0, addr_reg2
, SHIFT_IMM_LSL(0));
1286 tcg_out_ext8u(s
, COND_AL
, TCG_REG_R2
, data_reg
);
1287 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R3
, 0, mem_index
);
1290 tcg_out_ext16u(s
, COND_AL
, TCG_REG_R2
, data_reg
);
1291 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R3
, 0, mem_index
);
1294 if (data_reg
!= TCG_REG_R2
) {
1295 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1296 TCG_REG_R2
, 0, data_reg
, SHIFT_IMM_LSL(0));
1298 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R3
, 0, mem_index
);
1301 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R8
, 0, mem_index
);
1302 tcg_out32(s
, (COND_AL
<< 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1303 if (data_reg
!= TCG_REG_R2
) {
1304 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1305 TCG_REG_R2
, 0, data_reg
, SHIFT_IMM_LSL(0));
1307 if (data_reg2
!= TCG_REG_R3
) {
1308 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1309 TCG_REG_R3
, 0, data_reg2
, SHIFT_IMM_LSL(0));
1315 tcg_out_bl(s
, COND_AL
, (tcg_target_long
) qemu_st_helpers
[s_bits
] -
1316 (tcg_target_long
) s
->code_ptr
);
1318 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R13
, TCG_REG_R13
, 0x10);
1320 *label_ptr
+= ((void *) s
->code_ptr
- (void *) label_ptr
- 8) >> 2;
1321 #else /* !CONFIG_SOFTMMU */
1323 uint32_t offset
= GUEST_BASE
;
1328 i
= ctz32(offset
) & ~1;
1329 rot
= ((32 - i
) << 7) & 0xf00;
1331 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, TCG_REG_R1
, addr_reg
,
1332 ((offset
>> i
) & 0xff) | rot
);
1333 addr_reg
= TCG_REG_R1
;
1334 offset
&= ~(0xff << i
);
1339 tcg_out_st8_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1343 tcg_out_bswap16(s
, COND_AL
, TCG_REG_R0
, data_reg
);
1344 tcg_out_st16_8(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 0);
1346 tcg_out_st16_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1352 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, data_reg
);
1353 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 0);
1355 tcg_out_st32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1359 /* TODO: use block store -
1360 * check that data_reg2 > data_reg or the other way */
1362 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, data_reg2
);
1363 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 0);
1364 tcg_out_bswap32(s
, COND_AL
, TCG_REG_R0
, data_reg
);
1365 tcg_out_st32_12(s
, COND_AL
, TCG_REG_R0
, addr_reg
, 4);
1367 tcg_out_st32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1368 tcg_out_st32_12(s
, COND_AL
, data_reg2
, addr_reg
, 4);
1375 static uint8_t *tb_ret_addr
;
1377 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1378 const TCGArg
*args
, const int *const_args
)
1383 case INDEX_op_exit_tb
:
1385 uint8_t *ld_ptr
= s
->code_ptr
;
1387 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R0
, TCG_REG_PC
, 0);
1389 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R0
, 0, args
[0]);
1390 tcg_out_goto(s
, COND_AL
, (tcg_target_ulong
) tb_ret_addr
);
1392 *ld_ptr
= (uint8_t) (s
->code_ptr
- ld_ptr
) - 8;
1393 tcg_out32(s
, args
[0]);
1397 case INDEX_op_goto_tb
:
1398 if (s
->tb_jmp_offset
) {
1399 /* Direct jump method */
1400 #if defined(USE_DIRECT_JUMP)
1401 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1402 tcg_out_b(s
, COND_AL
, 8);
1404 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, -4);
1405 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1409 /* Indirect jump method */
1411 c
= (int) (s
->tb_next
+ args
[0]) - ((int) s
->code_ptr
+ 8);
1412 if (c
> 0xfff || c
< -0xfff) {
1413 tcg_out_movi32(s
, COND_AL
, TCG_REG_R0
,
1414 (tcg_target_long
) (s
->tb_next
+ args
[0]));
1415 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_R0
, 0);
1417 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_PC
, c
);
1419 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R0
, TCG_REG_PC
, 0);
1420 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_PC
, TCG_REG_R0
, 0);
1421 tcg_out32(s
, (tcg_target_long
) (s
->tb_next
+ args
[0]));
1424 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1428 tcg_out_call(s
, COND_AL
, args
[0]);
1430 tcg_out_callr(s
, COND_AL
, args
[0]);
1434 tcg_out_goto(s
, COND_AL
, args
[0]);
1436 tcg_out_bx(s
, COND_AL
, args
[0]);
1439 tcg_out_goto_label(s
, COND_AL
, args
[0]);
1442 case INDEX_op_ld8u_i32
:
1443 tcg_out_ld8u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1445 case INDEX_op_ld8s_i32
:
1446 tcg_out_ld8s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1448 case INDEX_op_ld16u_i32
:
1449 tcg_out_ld16u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1451 case INDEX_op_ld16s_i32
:
1452 tcg_out_ld16s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1454 case INDEX_op_ld_i32
:
1455 tcg_out_ld32u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1457 case INDEX_op_st8_i32
:
1458 tcg_out_st8(s
, COND_AL
, args
[0], args
[1], args
[2]);
1460 case INDEX_op_st16_i32
:
1461 tcg_out_st16(s
, COND_AL
, args
[0], args
[1], args
[2]);
1463 case INDEX_op_st_i32
:
1464 tcg_out_st32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1467 case INDEX_op_mov_i32
:
1468 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1469 args
[0], 0, args
[1], SHIFT_IMM_LSL(0));
1471 case INDEX_op_movi_i32
:
1472 tcg_out_movi32(s
, COND_AL
, args
[0], args
[1]);
1474 case INDEX_op_add_i32
:
1477 case INDEX_op_sub_i32
:
1480 case INDEX_op_and_i32
:
1483 case INDEX_op_andc_i32
:
1486 case INDEX_op_or_i32
:
1489 case INDEX_op_xor_i32
:
1493 if (const_args
[2]) {
1495 rot
= encode_imm(args
[2]);
1496 tcg_out_dat_imm(s
, COND_AL
, c
,
1497 args
[0], args
[1], rotl(args
[2], rot
) | (rot
<< 7));
1499 tcg_out_dat_reg(s
, COND_AL
, c
,
1500 args
[0], args
[1], args
[2], SHIFT_IMM_LSL(0));
1502 case INDEX_op_add2_i32
:
1503 tcg_out_dat_reg2(s
, COND_AL
, ARITH_ADD
, ARITH_ADC
,
1504 args
[0], args
[1], args
[2], args
[3],
1505 args
[4], args
[5], SHIFT_IMM_LSL(0));
1507 case INDEX_op_sub2_i32
:
1508 tcg_out_dat_reg2(s
, COND_AL
, ARITH_SUB
, ARITH_SBC
,
1509 args
[0], args
[1], args
[2], args
[3],
1510 args
[4], args
[5], SHIFT_IMM_LSL(0));
1512 case INDEX_op_neg_i32
:
1513 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, args
[0], args
[1], 0);
1515 case INDEX_op_not_i32
:
1516 tcg_out_dat_reg(s
, COND_AL
,
1517 ARITH_MVN
, args
[0], 0, args
[1], SHIFT_IMM_LSL(0));
1519 case INDEX_op_mul_i32
:
1520 tcg_out_mul32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1522 case INDEX_op_mulu2_i32
:
1523 tcg_out_umull32(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
1525 /* XXX: Perhaps args[2] & 0x1f is wrong */
1526 case INDEX_op_shl_i32
:
1528 SHIFT_IMM_LSL(args
[2] & 0x1f) : SHIFT_REG_LSL(args
[2]);
1530 case INDEX_op_shr_i32
:
1531 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_LSR(args
[2] & 0x1f) :
1532 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args
[2]);
1534 case INDEX_op_sar_i32
:
1535 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ASR(args
[2] & 0x1f) :
1536 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args
[2]);
1538 case INDEX_op_rotr_i32
:
1539 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ROR(args
[2] & 0x1f) :
1540 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args
[2]);
1543 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1], c
);
1546 case INDEX_op_rotl_i32
:
1547 if (const_args
[2]) {
1548 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1549 ((0x20 - args
[2]) & 0x1f) ?
1550 SHIFT_IMM_ROR((0x20 - args
[2]) & 0x1f) :
1553 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, TCG_REG_R8
, args
[1], 0x20);
1554 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1],
1555 SHIFT_REG_ROR(TCG_REG_R8
));
1559 case INDEX_op_brcond_i32
:
1560 if (const_args
[1]) {
1562 rot
= encode_imm(args
[1]);
1563 tcg_out_dat_imm(s
, COND_AL
, ARITH_CMP
, 0,
1564 args
[0], rotl(args
[1], rot
) | (rot
<< 7));
1566 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1567 args
[0], args
[1], SHIFT_IMM_LSL(0));
1569 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[2]], args
[3]);
1571 case INDEX_op_brcond2_i32
:
1572 /* The resulting conditions are:
1573 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1574 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1575 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1576 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1577 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1578 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1580 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1581 args
[1], args
[3], SHIFT_IMM_LSL(0));
1582 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1583 args
[0], args
[2], SHIFT_IMM_LSL(0));
1584 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[4]], args
[5]);
1586 case INDEX_op_setcond_i32
:
1587 if (const_args
[2]) {
1589 rot
= encode_imm(args
[2]);
1590 tcg_out_dat_imm(s
, COND_AL
, ARITH_CMP
, 0,
1591 args
[1], rotl(args
[2], rot
) | (rot
<< 7));
1593 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1594 args
[1], args
[2], SHIFT_IMM_LSL(0));
1596 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[3]],
1597 ARITH_MOV
, args
[0], 0, 1);
1598 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[3])],
1599 ARITH_MOV
, args
[0], 0, 0);
1601 case INDEX_op_setcond2_i32
:
1602 /* See brcond2_i32 comment */
1603 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1604 args
[2], args
[4], SHIFT_IMM_LSL(0));
1605 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1606 args
[1], args
[3], SHIFT_IMM_LSL(0));
1607 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[5]],
1608 ARITH_MOV
, args
[0], 0, 1);
1609 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[5])],
1610 ARITH_MOV
, args
[0], 0, 0);
1613 case INDEX_op_qemu_ld8u
:
1614 tcg_out_qemu_ld(s
, args
, 0);
1616 case INDEX_op_qemu_ld8s
:
1617 tcg_out_qemu_ld(s
, args
, 0 | 4);
1619 case INDEX_op_qemu_ld16u
:
1620 tcg_out_qemu_ld(s
, args
, 1);
1622 case INDEX_op_qemu_ld16s
:
1623 tcg_out_qemu_ld(s
, args
, 1 | 4);
1625 case INDEX_op_qemu_ld32
:
1626 tcg_out_qemu_ld(s
, args
, 2);
1628 case INDEX_op_qemu_ld64
:
1629 tcg_out_qemu_ld(s
, args
, 3);
1632 case INDEX_op_qemu_st8
:
1633 tcg_out_qemu_st(s
, args
, 0);
1635 case INDEX_op_qemu_st16
:
1636 tcg_out_qemu_st(s
, args
, 1);
1638 case INDEX_op_qemu_st32
:
1639 tcg_out_qemu_st(s
, args
, 2);
1641 case INDEX_op_qemu_st64
:
1642 tcg_out_qemu_st(s
, args
, 3);
1645 case INDEX_op_bswap16_i32
:
1646 tcg_out_bswap16(s
, COND_AL
, args
[0], args
[1]);
1648 case INDEX_op_bswap32_i32
:
1649 tcg_out_bswap32(s
, COND_AL
, args
[0], args
[1]);
1652 case INDEX_op_ext8s_i32
:
1653 tcg_out_ext8s(s
, COND_AL
, args
[0], args
[1]);
1655 case INDEX_op_ext16s_i32
:
1656 tcg_out_ext16s(s
, COND_AL
, args
[0], args
[1]);
1658 case INDEX_op_ext16u_i32
:
1659 tcg_out_ext16u(s
, COND_AL
, args
[0], args
[1]);
1667 static const TCGTargetOpDef arm_op_defs
[] = {
1668 { INDEX_op_exit_tb
, { } },
1669 { INDEX_op_goto_tb
, { } },
1670 { INDEX_op_call
, { "ri" } },
1671 { INDEX_op_jmp
, { "ri" } },
1672 { INDEX_op_br
, { } },
1674 { INDEX_op_mov_i32
, { "r", "r" } },
1675 { INDEX_op_movi_i32
, { "r" } },
1677 { INDEX_op_ld8u_i32
, { "r", "r" } },
1678 { INDEX_op_ld8s_i32
, { "r", "r" } },
1679 { INDEX_op_ld16u_i32
, { "r", "r" } },
1680 { INDEX_op_ld16s_i32
, { "r", "r" } },
1681 { INDEX_op_ld_i32
, { "r", "r" } },
1682 { INDEX_op_st8_i32
, { "r", "r" } },
1683 { INDEX_op_st16_i32
, { "r", "r" } },
1684 { INDEX_op_st_i32
, { "r", "r" } },
1686 /* TODO: "r", "r", "ri" */
1687 { INDEX_op_add_i32
, { "r", "r", "rI" } },
1688 { INDEX_op_sub_i32
, { "r", "r", "rI" } },
1689 { INDEX_op_mul_i32
, { "r", "r", "r" } },
1690 { INDEX_op_mulu2_i32
, { "r", "r", "r", "r" } },
1691 { INDEX_op_and_i32
, { "r", "r", "rI" } },
1692 { INDEX_op_andc_i32
, { "r", "r", "rI" } },
1693 { INDEX_op_or_i32
, { "r", "r", "rI" } },
1694 { INDEX_op_xor_i32
, { "r", "r", "rI" } },
1695 { INDEX_op_neg_i32
, { "r", "r" } },
1696 { INDEX_op_not_i32
, { "r", "r" } },
1698 { INDEX_op_shl_i32
, { "r", "r", "ri" } },
1699 { INDEX_op_shr_i32
, { "r", "r", "ri" } },
1700 { INDEX_op_sar_i32
, { "r", "r", "ri" } },
1701 { INDEX_op_rotl_i32
, { "r", "r", "ri" } },
1702 { INDEX_op_rotr_i32
, { "r", "r", "ri" } },
1704 { INDEX_op_brcond_i32
, { "r", "rI" } },
1705 { INDEX_op_setcond_i32
, { "r", "r", "rI" } },
1707 /* TODO: "r", "r", "r", "r", "ri", "ri" */
1708 { INDEX_op_add2_i32
, { "r", "r", "r", "r", "r", "r" } },
1709 { INDEX_op_sub2_i32
, { "r", "r", "r", "r", "r", "r" } },
1710 { INDEX_op_brcond2_i32
, { "r", "r", "r", "r" } },
1711 { INDEX_op_setcond2_i32
, { "r", "r", "r", "r", "r" } },
1713 #if TARGET_LONG_BITS == 32
1714 { INDEX_op_qemu_ld8u
, { "r", "l" } },
1715 { INDEX_op_qemu_ld8s
, { "r", "l" } },
1716 { INDEX_op_qemu_ld16u
, { "r", "l" } },
1717 { INDEX_op_qemu_ld16s
, { "r", "l" } },
1718 { INDEX_op_qemu_ld32
, { "r", "l" } },
1719 { INDEX_op_qemu_ld64
, { "L", "L", "l" } },
1721 { INDEX_op_qemu_st8
, { "s", "s" } },
1722 { INDEX_op_qemu_st16
, { "s", "s" } },
1723 { INDEX_op_qemu_st32
, { "s", "s" } },
1724 { INDEX_op_qemu_st64
, { "S", "S", "s" } },
1726 { INDEX_op_qemu_ld8u
, { "r", "l", "l" } },
1727 { INDEX_op_qemu_ld8s
, { "r", "l", "l" } },
1728 { INDEX_op_qemu_ld16u
, { "r", "l", "l" } },
1729 { INDEX_op_qemu_ld16s
, { "r", "l", "l" } },
1730 { INDEX_op_qemu_ld32
, { "r", "l", "l" } },
1731 { INDEX_op_qemu_ld64
, { "L", "L", "l", "l" } },
1733 { INDEX_op_qemu_st8
, { "s", "s", "s" } },
1734 { INDEX_op_qemu_st16
, { "s", "s", "s" } },
1735 { INDEX_op_qemu_st32
, { "s", "s", "s" } },
1736 { INDEX_op_qemu_st64
, { "S", "S", "s", "s" } },
1739 { INDEX_op_bswap16_i32
, { "r", "r" } },
1740 { INDEX_op_bswap32_i32
, { "r", "r" } },
1742 { INDEX_op_ext8s_i32
, { "r", "r" } },
1743 { INDEX_op_ext16s_i32
, { "r", "r" } },
1744 { INDEX_op_ext16u_i32
, { "r", "r" } },
1749 void tcg_target_init(TCGContext
*s
)
1751 #if !defined(CONFIG_USER_ONLY)
1753 if ((1 << CPU_TLB_ENTRY_BITS
) != sizeof(CPUTLBEntry
))
1757 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffff);
1758 tcg_regset_set32(tcg_target_call_clobber_regs
, 0,
1763 (1 << TCG_REG_R12
) |
1764 (1 << TCG_REG_R14
));
1766 tcg_regset_clear(s
->reserved_regs
);
1767 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
1768 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R8
);
1769 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_PC
);
1771 tcg_add_target_add_op_defs(arm_op_defs
);
1774 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, int arg
,
1775 int arg1
, tcg_target_long arg2
)
1777 tcg_out_ld32u(s
, COND_AL
, arg
, arg1
, arg2
);
1780 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, int arg
,
1781 int arg1
, tcg_target_long arg2
)
1783 tcg_out_st32(s
, COND_AL
, arg
, arg1
, arg2
);
1786 static void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
1790 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, reg
, reg
, val
);
1795 tcg_out_dat_imm(s
, COND_AL
, ARITH_SUB
, reg
, reg
, -val
);
1801 static inline void tcg_out_mov(TCGContext
*s
, int ret
, int arg
)
1803 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, ret
, 0, arg
, SHIFT_IMM_LSL(0));
1806 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
1807 int ret
, tcg_target_long arg
)
1809 tcg_out_movi32(s
, COND_AL
, ret
, arg
);
1812 void tcg_target_qemu_prologue(TCGContext
*s
)
1814 /* There is no need to save r7, it is used to store the address
1815 of the env structure and is not modified by GCC. */
1817 /* stmdb sp!, { r4 - r6, r8 - r11, lr } */
1818 tcg_out32(s
, (COND_AL
<< 28) | 0x092d4f70);
1820 tcg_out_bx(s
, COND_AL
, TCG_REG_R0
);
1821 tb_ret_addr
= s
->code_ptr
;
1823 /* ldmia sp!, { r4 - r6, r8 - r11, pc } */
1824 tcg_out32(s
, (COND_AL
<< 28) | 0x08bd8f70);