2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
45 static const int tcg_target_reg_alloc_order
[] = {
63 static const int tcg_target_call_iarg_regs
[4] = {
64 TCG_REG_R0
, TCG_REG_R1
, TCG_REG_R2
, TCG_REG_R3
66 static const int tcg_target_call_oarg_regs
[2] = {
67 TCG_REG_R0
, TCG_REG_R1
70 static void patch_reloc(uint8_t *code_ptr
, int type
,
71 tcg_target_long value
, tcg_target_long addend
)
75 *(uint32_t *) code_ptr
= value
;
84 *(uint32_t *) code_ptr
= ((*(uint32_t *) code_ptr
) & 0xff000000) |
85 (((value
- ((tcg_target_long
) code_ptr
+ 8)) >> 2) & 0xffffff);
90 /* maximum number of register used for input function arguments */
91 static inline int tcg_target_get_call_iarg_regs_count(int flags
)
96 /* parse target specific constraints */
97 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
104 ct
->ct
|= TCG_CT_CONST_ARM
;
108 #ifndef CONFIG_SOFTMMU
114 ct
->ct
|= TCG_CT_REG
;
115 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
118 #ifdef CONFIG_SOFTMMU
119 /* qemu_ld/st inputs (unless 'X', 'd' or 'D') */
121 ct
->ct
|= TCG_CT_REG
;
122 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
123 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
124 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
127 /* qemu_ld64 data_reg */
129 ct
->ct
|= TCG_CT_REG
;
130 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
131 /* r1 is still needed to load data_reg2, so don't use it. */
132 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
135 /* qemu_ld/st64 data_reg2 */
137 ct
->ct
|= TCG_CT_REG
;
138 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
139 /* r0, r1 and optionally r2 will be overwritten by the address
140 * and the low word of data, so don't use these. */
141 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
142 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
143 # if TARGET_LONG_BITS == 64
144 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R2
);
148 # if TARGET_LONG_BITS == 64
149 /* qemu_ld/st addr_reg2 */
151 ct
->ct
|= TCG_CT_REG
;
152 tcg_regset_set32(ct
->u
.regs
, 0, (1 << TCG_TARGET_NB_REGS
) - 1);
153 /* r0 will be overwritten by the low word of base, so don't use it. */
154 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
155 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R1
);
169 static inline uint32_t rotl(uint32_t val
, int n
)
171 return (val
<< n
) | (val
>> (32 - n
));
174 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
175 right-rotated by an even amount between 0 and 30. */
176 static inline int encode_imm(uint32_t imm
)
180 /* simple case, only lower bits */
181 if ((imm
& ~0xff) == 0)
183 /* then try a simple even shift */
184 shift
= ctz32(imm
) & ~1;
185 if (((imm
>> shift
) & ~0xff) == 0)
187 /* now try harder with rotations */
188 if ((rotl(imm
, 2) & ~0xff) == 0)
190 if ((rotl(imm
, 4) & ~0xff) == 0)
192 if ((rotl(imm
, 6) & ~0xff) == 0)
194 /* imm can't be encoded */
198 static inline int check_fit_imm(uint32_t imm
)
200 return encode_imm(imm
) >= 0;
203 /* Test if a constant matches the constraint.
204 * TODO: define constraints for:
206 * ldr/str offset: between -0xfff and 0xfff
207 * ldrh/strh offset: between -0xff and 0xff
208 * mov operand2: values represented with x << (2 * y), x < 0x100
209 * add, sub, eor...: ditto
211 static inline int tcg_target_const_match(tcg_target_long val
,
212 const TCGArgConstraint
*arg_ct
)
216 if (ct
& TCG_CT_CONST
)
218 else if ((ct
& TCG_CT_CONST_ARM
) && check_fit_imm(val
))
224 enum arm_data_opc_e
{
242 #define TO_CPSR(opc) \
243 ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
245 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
246 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
247 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
248 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
249 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
250 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
251 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
252 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
254 enum arm_cond_code_e
{
257 COND_CS
= 0x2, /* Unsigned greater or equal */
258 COND_CC
= 0x3, /* Unsigned less than */
259 COND_MI
= 0x4, /* Negative */
260 COND_PL
= 0x5, /* Zero or greater */
261 COND_VS
= 0x6, /* Overflow */
262 COND_VC
= 0x7, /* No overflow */
263 COND_HI
= 0x8, /* Unsigned greater than */
264 COND_LS
= 0x9, /* Unsigned less or equal */
272 static const uint8_t tcg_cond_to_arm_cond
[10] = {
273 [TCG_COND_EQ
] = COND_EQ
,
274 [TCG_COND_NE
] = COND_NE
,
275 [TCG_COND_LT
] = COND_LT
,
276 [TCG_COND_GE
] = COND_GE
,
277 [TCG_COND_LE
] = COND_LE
,
278 [TCG_COND_GT
] = COND_GT
,
280 [TCG_COND_LTU
] = COND_CC
,
281 [TCG_COND_GEU
] = COND_CS
,
282 [TCG_COND_LEU
] = COND_LS
,
283 [TCG_COND_GTU
] = COND_HI
,
286 static inline void tcg_out_bx(TCGContext
*s
, int cond
, int rn
)
288 tcg_out32(s
, (cond
<< 28) | 0x012fff10 | rn
);
291 static inline void tcg_out_b(TCGContext
*s
, int cond
, int32_t offset
)
293 tcg_out32(s
, (cond
<< 28) | 0x0a000000 |
294 (((offset
- 8) >> 2) & 0x00ffffff));
297 static inline void tcg_out_b_noaddr(TCGContext
*s
, int cond
)
299 #ifdef HOST_WORDS_BIGENDIAN
300 tcg_out8(s
, (cond
<< 4) | 0x0a);
304 tcg_out8(s
, (cond
<< 4) | 0x0a);
308 static inline void tcg_out_bl(TCGContext
*s
, int cond
, int32_t offset
)
310 tcg_out32(s
, (cond
<< 28) | 0x0b000000 |
311 (((offset
- 8) >> 2) & 0x00ffffff));
314 static inline void tcg_out_dat_reg(TCGContext
*s
,
315 int cond
, int opc
, int rd
, int rn
, int rm
, int shift
)
317 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc
<< 21) | TO_CPSR(opc
) |
318 (rn
<< 16) | (rd
<< 12) | shift
| rm
);
321 static inline void tcg_out_dat_reg2(TCGContext
*s
,
322 int cond
, int opc0
, int opc1
, int rd0
, int rd1
,
323 int rn0
, int rn1
, int rm0
, int rm1
, int shift
)
325 if (rd0
== rn1
|| rd0
== rm1
) {
326 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc0
<< 21) | (1 << 20) |
327 (rn0
<< 16) | (8 << 12) | shift
| rm0
);
328 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc1
<< 21) |
329 (rn1
<< 16) | (rd1
<< 12) | shift
| rm1
);
330 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
331 rd0
, 0, TCG_REG_R8
, SHIFT_IMM_LSL(0));
333 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc0
<< 21) | (1 << 20) |
334 (rn0
<< 16) | (rd0
<< 12) | shift
| rm0
);
335 tcg_out32(s
, (cond
<< 28) | (0 << 25) | (opc1
<< 21) |
336 (rn1
<< 16) | (rd1
<< 12) | shift
| rm1
);
340 static inline void tcg_out_dat_imm(TCGContext
*s
,
341 int cond
, int opc
, int rd
, int rn
, int im
)
343 tcg_out32(s
, (cond
<< 28) | (1 << 25) | (opc
<< 21) | TO_CPSR(opc
) |
344 (rn
<< 16) | (rd
<< 12) | im
);
347 static inline void tcg_out_movi32(TCGContext
*s
,
348 int cond
, int rd
, int32_t arg
)
350 int offset
= (uint32_t) arg
- ((uint32_t) s
->code_ptr
+ 8);
352 /* TODO: This is very suboptimal, we can easily have a constant
353 * pool somewhere after all the instructions. */
355 if (arg
< 0 && arg
> -0x100)
356 return tcg_out_dat_imm(s
, cond
, ARITH_MVN
, rd
, 0, (~arg
) & 0xff);
358 if (offset
< 0x100 && offset
> -0x100)
360 tcg_out_dat_imm(s
, cond
, ARITH_ADD
, rd
, 15, offset
) :
361 tcg_out_dat_imm(s
, cond
, ARITH_SUB
, rd
, 15, -offset
);
363 #ifdef __ARM_ARCH_7A__
366 tcg_out32(s
, (cond
<< 28) | 0x03000000 | (rd
<< 12)
367 | ((arg
<< 4) & 0x000f0000) | (arg
& 0xfff));
368 if (arg
& 0xffff0000)
370 tcg_out32(s
, (cond
<< 28) | 0x03400000 | (rd
<< 12)
371 | ((arg
>> 12) & 0x000f0000) | ((arg
>> 16) & 0xfff));
373 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, rd
, 0, arg
& 0xff);
374 if (arg
& 0x0000ff00)
375 tcg_out_dat_imm(s
, cond
, ARITH_ORR
, rd
, rd
,
376 ((arg
>> 8) & 0xff) | 0xc00);
377 if (arg
& 0x00ff0000)
378 tcg_out_dat_imm(s
, cond
, ARITH_ORR
, rd
, rd
,
379 ((arg
>> 16) & 0xff) | 0x800);
380 if (arg
& 0xff000000)
381 tcg_out_dat_imm(s
, cond
, ARITH_ORR
, rd
, rd
,
382 ((arg
>> 24) & 0xff) | 0x400);
386 static inline void tcg_out_mul32(TCGContext
*s
,
387 int cond
, int rd
, int rs
, int rm
)
390 tcg_out32(s
, (cond
<< 28) | (rd
<< 16) | (0 << 12) |
391 (rs
<< 8) | 0x90 | rm
);
393 tcg_out32(s
, (cond
<< 28) | (rd
<< 16) | (0 << 12) |
394 (rm
<< 8) | 0x90 | rs
);
396 tcg_out32(s
, (cond
<< 28) | ( 8 << 16) | (0 << 12) |
397 (rs
<< 8) | 0x90 | rm
);
398 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
399 rd
, 0, 8, SHIFT_IMM_LSL(0));
403 static inline void tcg_out_umull32(TCGContext
*s
,
404 int cond
, int rd0
, int rd1
, int rs
, int rm
)
406 if (rd0
!= rm
&& rd1
!= rm
)
407 tcg_out32(s
, (cond
<< 28) | 0x800090 |
408 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8) | rm
);
409 else if (rd0
!= rs
&& rd1
!= rs
)
410 tcg_out32(s
, (cond
<< 28) | 0x800090 |
411 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rs
);
413 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
414 TCG_REG_R8
, 0, rm
, SHIFT_IMM_LSL(0));
415 tcg_out32(s
, (cond
<< 28) | 0x800098 |
416 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8));
420 static inline void tcg_out_smull32(TCGContext
*s
,
421 int cond
, int rd0
, int rd1
, int rs
, int rm
)
423 if (rd0
!= rm
&& rd1
!= rm
)
424 tcg_out32(s
, (cond
<< 28) | 0xc00090 |
425 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8) | rm
);
426 else if (rd0
!= rs
&& rd1
!= rs
)
427 tcg_out32(s
, (cond
<< 28) | 0xc00090 |
428 (rd1
<< 16) | (rd0
<< 12) | (rm
<< 8) | rs
);
430 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
431 TCG_REG_R8
, 0, rm
, SHIFT_IMM_LSL(0));
432 tcg_out32(s
, (cond
<< 28) | 0xc00098 |
433 (rd1
<< 16) | (rd0
<< 12) | (rs
<< 8));
437 static inline void tcg_out_ld32_12(TCGContext
*s
, int cond
,
438 int rd
, int rn
, tcg_target_long im
)
441 tcg_out32(s
, (cond
<< 28) | 0x05900000 |
442 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
444 tcg_out32(s
, (cond
<< 28) | 0x05100000 |
445 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
448 static inline void tcg_out_st32_12(TCGContext
*s
, int cond
,
449 int rd
, int rn
, tcg_target_long im
)
452 tcg_out32(s
, (cond
<< 28) | 0x05800000 |
453 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
455 tcg_out32(s
, (cond
<< 28) | 0x05000000 |
456 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
459 static inline void tcg_out_ld32_r(TCGContext
*s
, int cond
,
460 int rd
, int rn
, int rm
)
462 tcg_out32(s
, (cond
<< 28) | 0x07900000 |
463 (rn
<< 16) | (rd
<< 12) | rm
);
466 static inline void tcg_out_st32_r(TCGContext
*s
, int cond
,
467 int rd
, int rn
, int rm
)
469 tcg_out32(s
, (cond
<< 28) | 0x07800000 |
470 (rn
<< 16) | (rd
<< 12) | rm
);
473 /* Register pre-increment with base writeback. */
474 static inline void tcg_out_ld32_rwb(TCGContext
*s
, int cond
,
475 int rd
, int rn
, int rm
)
477 tcg_out32(s
, (cond
<< 28) | 0x07b00000 |
478 (rn
<< 16) | (rd
<< 12) | rm
);
481 static inline void tcg_out_st32_rwb(TCGContext
*s
, int cond
,
482 int rd
, int rn
, int rm
)
484 tcg_out32(s
, (cond
<< 28) | 0x07a00000 |
485 (rn
<< 16) | (rd
<< 12) | rm
);
488 static inline void tcg_out_ld16u_8(TCGContext
*s
, int cond
,
489 int rd
, int rn
, tcg_target_long im
)
492 tcg_out32(s
, (cond
<< 28) | 0x01d000b0 |
493 (rn
<< 16) | (rd
<< 12) |
494 ((im
& 0xf0) << 4) | (im
& 0xf));
496 tcg_out32(s
, (cond
<< 28) | 0x015000b0 |
497 (rn
<< 16) | (rd
<< 12) |
498 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
501 static inline void tcg_out_st16u_8(TCGContext
*s
, int cond
,
502 int rd
, int rn
, tcg_target_long im
)
505 tcg_out32(s
, (cond
<< 28) | 0x01c000b0 |
506 (rn
<< 16) | (rd
<< 12) |
507 ((im
& 0xf0) << 4) | (im
& 0xf));
509 tcg_out32(s
, (cond
<< 28) | 0x014000b0 |
510 (rn
<< 16) | (rd
<< 12) |
511 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
514 static inline void tcg_out_ld16u_r(TCGContext
*s
, int cond
,
515 int rd
, int rn
, int rm
)
517 tcg_out32(s
, (cond
<< 28) | 0x019000b0 |
518 (rn
<< 16) | (rd
<< 12) | rm
);
521 static inline void tcg_out_st16u_r(TCGContext
*s
, int cond
,
522 int rd
, int rn
, int rm
)
524 tcg_out32(s
, (cond
<< 28) | 0x018000b0 |
525 (rn
<< 16) | (rd
<< 12) | rm
);
528 static inline void tcg_out_ld16s_8(TCGContext
*s
, int cond
,
529 int rd
, int rn
, tcg_target_long im
)
532 tcg_out32(s
, (cond
<< 28) | 0x01d000f0 |
533 (rn
<< 16) | (rd
<< 12) |
534 ((im
& 0xf0) << 4) | (im
& 0xf));
536 tcg_out32(s
, (cond
<< 28) | 0x015000f0 |
537 (rn
<< 16) | (rd
<< 12) |
538 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
541 static inline void tcg_out_st16s_8(TCGContext
*s
, int cond
,
542 int rd
, int rn
, tcg_target_long im
)
545 tcg_out32(s
, (cond
<< 28) | 0x01c000f0 |
546 (rn
<< 16) | (rd
<< 12) |
547 ((im
& 0xf0) << 4) | (im
& 0xf));
549 tcg_out32(s
, (cond
<< 28) | 0x014000f0 |
550 (rn
<< 16) | (rd
<< 12) |
551 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
554 static inline void tcg_out_ld16s_r(TCGContext
*s
, int cond
,
555 int rd
, int rn
, int rm
)
557 tcg_out32(s
, (cond
<< 28) | 0x019000f0 |
558 (rn
<< 16) | (rd
<< 12) | rm
);
561 static inline void tcg_out_st16s_r(TCGContext
*s
, int cond
,
562 int rd
, int rn
, int rm
)
564 tcg_out32(s
, (cond
<< 28) | 0x018000f0 |
565 (rn
<< 16) | (rd
<< 12) | rm
);
568 static inline void tcg_out_ld8_12(TCGContext
*s
, int cond
,
569 int rd
, int rn
, tcg_target_long im
)
572 tcg_out32(s
, (cond
<< 28) | 0x05d00000 |
573 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
575 tcg_out32(s
, (cond
<< 28) | 0x05500000 |
576 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
579 static inline void tcg_out_st8_12(TCGContext
*s
, int cond
,
580 int rd
, int rn
, tcg_target_long im
)
583 tcg_out32(s
, (cond
<< 28) | 0x05c00000 |
584 (rn
<< 16) | (rd
<< 12) | (im
& 0xfff));
586 tcg_out32(s
, (cond
<< 28) | 0x05400000 |
587 (rn
<< 16) | (rd
<< 12) | ((-im
) & 0xfff));
590 static inline void tcg_out_ld8_r(TCGContext
*s
, int cond
,
591 int rd
, int rn
, int rm
)
593 tcg_out32(s
, (cond
<< 28) | 0x07d00000 |
594 (rn
<< 16) | (rd
<< 12) | rm
);
597 static inline void tcg_out_st8_r(TCGContext
*s
, int cond
,
598 int rd
, int rn
, int rm
)
600 tcg_out32(s
, (cond
<< 28) | 0x07c00000 |
601 (rn
<< 16) | (rd
<< 12) | rm
);
604 static inline void tcg_out_ld8s_8(TCGContext
*s
, int cond
,
605 int rd
, int rn
, tcg_target_long im
)
608 tcg_out32(s
, (cond
<< 28) | 0x01d000d0 |
609 (rn
<< 16) | (rd
<< 12) |
610 ((im
& 0xf0) << 4) | (im
& 0xf));
612 tcg_out32(s
, (cond
<< 28) | 0x015000d0 |
613 (rn
<< 16) | (rd
<< 12) |
614 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
617 static inline void tcg_out_st8s_8(TCGContext
*s
, int cond
,
618 int rd
, int rn
, tcg_target_long im
)
621 tcg_out32(s
, (cond
<< 28) | 0x01c000d0 |
622 (rn
<< 16) | (rd
<< 12) |
623 ((im
& 0xf0) << 4) | (im
& 0xf));
625 tcg_out32(s
, (cond
<< 28) | 0x014000d0 |
626 (rn
<< 16) | (rd
<< 12) |
627 (((-im
) & 0xf0) << 4) | ((-im
) & 0xf));
630 static inline void tcg_out_ld8s_r(TCGContext
*s
, int cond
,
631 int rd
, int rn
, int rm
)
633 tcg_out32(s
, (cond
<< 28) | 0x019000d0 |
634 (rn
<< 16) | (rd
<< 12) | rm
);
637 static inline void tcg_out_st8s_r(TCGContext
*s
, int cond
,
638 int rd
, int rn
, int rm
)
640 tcg_out32(s
, (cond
<< 28) | 0x018000d0 |
641 (rn
<< 16) | (rd
<< 12) | rm
);
644 static inline void tcg_out_ld32u(TCGContext
*s
, int cond
,
645 int rd
, int rn
, int32_t offset
)
647 if (offset
> 0xfff || offset
< -0xfff) {
648 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
649 tcg_out_ld32_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
651 tcg_out_ld32_12(s
, cond
, rd
, rn
, offset
);
654 static inline void tcg_out_st32(TCGContext
*s
, int cond
,
655 int rd
, int rn
, int32_t offset
)
657 if (offset
> 0xfff || offset
< -0xfff) {
658 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
659 tcg_out_st32_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
661 tcg_out_st32_12(s
, cond
, rd
, rn
, offset
);
664 static inline void tcg_out_ld16u(TCGContext
*s
, int cond
,
665 int rd
, int rn
, int32_t offset
)
667 if (offset
> 0xff || offset
< -0xff) {
668 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
669 tcg_out_ld16u_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
671 tcg_out_ld16u_8(s
, cond
, rd
, rn
, offset
);
674 static inline void tcg_out_ld16s(TCGContext
*s
, int cond
,
675 int rd
, int rn
, int32_t offset
)
677 if (offset
> 0xff || offset
< -0xff) {
678 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
679 tcg_out_ld16s_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
681 tcg_out_ld16s_8(s
, cond
, rd
, rn
, offset
);
684 static inline void tcg_out_st16u(TCGContext
*s
, int cond
,
685 int rd
, int rn
, int32_t offset
)
687 if (offset
> 0xff || offset
< -0xff) {
688 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
689 tcg_out_st16u_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
691 tcg_out_st16u_8(s
, cond
, rd
, rn
, offset
);
694 static inline void tcg_out_ld8u(TCGContext
*s
, int cond
,
695 int rd
, int rn
, int32_t offset
)
697 if (offset
> 0xfff || offset
< -0xfff) {
698 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
699 tcg_out_ld8_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
701 tcg_out_ld8_12(s
, cond
, rd
, rn
, offset
);
704 static inline void tcg_out_ld8s(TCGContext
*s
, int cond
,
705 int rd
, int rn
, int32_t offset
)
707 if (offset
> 0xff || offset
< -0xff) {
708 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
709 tcg_out_ld8s_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
711 tcg_out_ld8s_8(s
, cond
, rd
, rn
, offset
);
714 static inline void tcg_out_st8u(TCGContext
*s
, int cond
,
715 int rd
, int rn
, int32_t offset
)
717 if (offset
> 0xfff || offset
< -0xfff) {
718 tcg_out_movi32(s
, cond
, TCG_REG_R8
, offset
);
719 tcg_out_st8_r(s
, cond
, rd
, rn
, TCG_REG_R8
);
721 tcg_out_st8_12(s
, cond
, rd
, rn
, offset
);
724 static inline void tcg_out_goto(TCGContext
*s
, int cond
, uint32_t addr
)
728 val
= addr
- (tcg_target_long
) s
->code_ptr
;
729 if (val
- 8 < 0x01fffffd && val
- 8 > -0x01fffffd)
730 tcg_out_b(s
, cond
, val
);
735 if (cond
== COND_AL
) {
736 tcg_out_ld32_12(s
, COND_AL
, 15, 15, -4);
737 tcg_out32(s
, addr
); /* XXX: This is l->u.value, can we use it? */
739 tcg_out_movi32(s
, cond
, TCG_REG_R8
, val
- 8);
740 tcg_out_dat_reg(s
, cond
, ARITH_ADD
,
741 15, 15, TCG_REG_R8
, SHIFT_IMM_LSL(0));
747 static inline void tcg_out_call(TCGContext
*s
, int cond
, uint32_t addr
)
752 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, TCG_REG_R8
, 0, 14, SHIFT_IMM_LSL(0));
755 val
= addr
- (tcg_target_long
) s
->code_ptr
;
756 if (val
< 0x01fffffd && val
> -0x01fffffd)
757 tcg_out_bl(s
, cond
, val
);
762 if (cond
== COND_AL
) {
763 tcg_out_dat_imm(s
, cond
, ARITH_ADD
, 14, 15, 4);
764 tcg_out_ld32_12(s
, COND_AL
, 15, 15, -4);
765 tcg_out32(s
, addr
); /* XXX: This is l->u.value, can we use it? */
767 tcg_out_movi32(s
, cond
, TCG_REG_R9
, addr
);
768 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, 14, 0, 15);
769 tcg_out_bx(s
, cond
, TCG_REG_R9
);
775 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, 14, 0, TCG_REG_R8
, SHIFT_IMM_LSL(0));
779 static inline void tcg_out_callr(TCGContext
*s
, int cond
, int arg
)
782 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, TCG_REG_R8
, 0, 14, SHIFT_IMM_LSL(0));
784 /* TODO: on ARMv5 and ARMv6 replace with tcg_out_blx(s, cond, arg); */
785 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, 14, 0, 15, SHIFT_IMM_LSL(0));
786 tcg_out_bx(s
, cond
, arg
);
788 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, 14, 0, TCG_REG_R8
, SHIFT_IMM_LSL(0));
792 static inline void tcg_out_goto_label(TCGContext
*s
, int cond
, int label_index
)
794 TCGLabel
*l
= &s
->labels
[label_index
];
797 tcg_out_goto(s
, cond
, l
->u
.value
);
798 else if (cond
== COND_AL
) {
799 tcg_out_ld32_12(s
, COND_AL
, 15, 15, -4);
800 tcg_out_reloc(s
, s
->code_ptr
, R_ARM_ABS32
, label_index
, 31337);
803 /* Probably this should be preferred even for COND_AL... */
804 tcg_out_reloc(s
, s
->code_ptr
, R_ARM_PC24
, label_index
, 31337);
805 tcg_out_b_noaddr(s
, cond
);
809 #ifdef CONFIG_SOFTMMU
811 #include "../../softmmu_defs.h"
813 static void *qemu_ld_helpers
[4] = {
820 static void *qemu_st_helpers
[4] = {
828 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
830 static inline void tcg_out_qemu_ld(TCGContext
*s
, int cond
,
831 const TCGArg
*args
, int opc
)
833 int addr_reg
, data_reg
, data_reg2
;
834 #ifdef CONFIG_SOFTMMU
835 int mem_index
, s_bits
;
836 # if TARGET_LONG_BITS == 64
846 data_reg2
= 0; /* suppress warning */
848 #ifdef CONFIG_SOFTMMU
849 # if TARGET_LONG_BITS == 64
855 /* Should generate something like the following:
856 * shr r8, addr_reg, #TARGET_PAGE_BITS
857 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
858 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
860 # if CPU_TLB_BITS > 8
863 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
864 8, 0, addr_reg
, SHIFT_IMM_LSR(TARGET_PAGE_BITS
));
865 tcg_out_dat_imm(s
, COND_AL
, ARITH_AND
,
866 0, 8, CPU_TLB_SIZE
- 1);
867 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
,
868 0, TCG_AREG0
, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS
));
870 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
871 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
872 * not exceed otherwise, so use an
873 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
877 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, 0, 0,
878 (mem_index
<< (TLB_SHIFT
& 1)) |
879 ((16 - (TLB_SHIFT
>> 1)) << 8));
880 tcg_out_ld32_12(s
, COND_AL
, 1, 0,
881 offsetof(CPUState
, tlb_table
[0][0].addr_read
));
882 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
,
883 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS
));
884 /* Check alignment. */
886 tcg_out_dat_imm(s
, COND_EQ
, ARITH_TST
,
887 0, addr_reg
, (1 << s_bits
) - 1);
888 # if TARGET_LONG_BITS == 64
889 /* XXX: possibly we could use a block data load or writeback in
890 * the first access. */
891 tcg_out_ld32_12(s
, COND_EQ
, 1, 0,
892 offsetof(CPUState
, tlb_table
[0][0].addr_read
) + 4);
893 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
,
894 0, 1, addr_reg2
, SHIFT_IMM_LSL(0));
896 tcg_out_ld32_12(s
, COND_EQ
, 1, 0,
897 offsetof(CPUState
, tlb_table
[0][0].addend
));
901 tcg_out_ld8_r(s
, COND_EQ
, data_reg
, addr_reg
, 1);
904 tcg_out_ld8s_r(s
, COND_EQ
, data_reg
, addr_reg
, 1);
907 tcg_out_ld16u_r(s
, COND_EQ
, data_reg
, addr_reg
, 1);
910 tcg_out_ld16s_r(s
, COND_EQ
, data_reg
, addr_reg
, 1);
914 tcg_out_ld32_r(s
, COND_EQ
, data_reg
, addr_reg
, 1);
917 tcg_out_ld32_rwb(s
, COND_EQ
, data_reg
, 1, addr_reg
);
918 tcg_out_ld32_12(s
, COND_EQ
, data_reg2
, 1, 4);
922 label_ptr
= (void *) s
->code_ptr
;
923 tcg_out_b(s
, COND_EQ
, 8);
926 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, 8, 0, 14, SHIFT_IMM_LSL(0));
929 /* TODO: move this code to where the constants pool will be */
931 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
932 0, 0, addr_reg
, SHIFT_IMM_LSL(0));
933 # if TARGET_LONG_BITS == 32
934 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, 1, 0, mem_index
);
937 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
938 1, 0, addr_reg2
, SHIFT_IMM_LSL(0));
939 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, 2, 0, mem_index
);
941 tcg_out_bl(s
, cond
, (tcg_target_long
) qemu_ld_helpers
[s_bits
] -
942 (tcg_target_long
) s
->code_ptr
);
946 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
947 0, 0, 0, SHIFT_IMM_LSL(24));
948 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
949 data_reg
, 0, 0, SHIFT_IMM_ASR(24));
952 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
953 0, 0, 0, SHIFT_IMM_LSL(16));
954 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
955 data_reg
, 0, 0, SHIFT_IMM_ASR(16));
962 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
963 data_reg
, 0, 0, SHIFT_IMM_LSL(0));
967 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
968 data_reg
, 0, 0, SHIFT_IMM_LSL(0));
970 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
971 data_reg2
, 0, 1, SHIFT_IMM_LSL(0));
976 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, 14, 0, 8, SHIFT_IMM_LSL(0));
979 *label_ptr
+= ((void *) s
->code_ptr
- (void *) label_ptr
- 8) >> 2;
980 #else /* !CONFIG_SOFTMMU */
982 uint32_t offset
= GUEST_BASE
;
987 i
= ctz32(offset
) & ~1;
988 rot
= ((32 - i
) << 7) & 0xf00;
990 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, 8, addr_reg
,
991 ((offset
>> i
) & 0xff) | rot
);
993 offset
&= ~(0xff << i
);
998 tcg_out_ld8_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1001 tcg_out_ld8s_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1004 tcg_out_ld16u_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1007 tcg_out_ld16s_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1011 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1014 /* TODO: use block load -
1015 * check that data_reg2 > data_reg or the other way */
1016 if (data_reg
== addr_reg
) {
1017 tcg_out_ld32_12(s
, COND_AL
, data_reg2
, addr_reg
, 4);
1018 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1020 tcg_out_ld32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1021 tcg_out_ld32_12(s
, COND_AL
, data_reg2
, addr_reg
, 4);
1028 static inline void tcg_out_qemu_st(TCGContext
*s
, int cond
,
1029 const TCGArg
*args
, int opc
)
1031 int addr_reg
, data_reg
, data_reg2
;
1032 #ifdef CONFIG_SOFTMMU
1033 int mem_index
, s_bits
;
1034 # if TARGET_LONG_BITS == 64
1037 uint32_t *label_ptr
;
1042 data_reg2
= *args
++;
1044 data_reg2
= 0; /* suppress warning */
1046 #ifdef CONFIG_SOFTMMU
1047 # if TARGET_LONG_BITS == 64
1048 addr_reg2
= *args
++;
1053 /* Should generate something like the following:
1054 * shr r8, addr_reg, #TARGET_PAGE_BITS
1055 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
1056 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1058 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1059 8, 0, addr_reg
, SHIFT_IMM_LSR(TARGET_PAGE_BITS
));
1060 tcg_out_dat_imm(s
, COND_AL
, ARITH_AND
,
1061 0, 8, CPU_TLB_SIZE
- 1);
1062 tcg_out_dat_reg(s
, COND_AL
, ARITH_ADD
,
1063 0, TCG_AREG0
, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS
));
1065 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1066 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1067 * not exceed otherwise, so use an
1068 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1072 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, 0, 0,
1073 (mem_index
<< (TLB_SHIFT
& 1)) |
1074 ((16 - (TLB_SHIFT
>> 1)) << 8));
1075 tcg_out_ld32_12(s
, COND_AL
, 1, 0,
1076 offsetof(CPUState
, tlb_table
[0][0].addr_write
));
1077 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
,
1078 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS
));
1079 /* Check alignment. */
1081 tcg_out_dat_imm(s
, COND_EQ
, ARITH_TST
,
1082 0, addr_reg
, (1 << s_bits
) - 1);
1083 # if TARGET_LONG_BITS == 64
1084 /* XXX: possibly we could use a block data load or writeback in
1085 * the first access. */
1086 tcg_out_ld32_12(s
, COND_EQ
, 1, 0,
1087 offsetof(CPUState
, tlb_table
[0][0].addr_write
)
1089 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
,
1090 0, 1, addr_reg2
, SHIFT_IMM_LSL(0));
1092 tcg_out_ld32_12(s
, COND_EQ
, 1, 0,
1093 offsetof(CPUState
, tlb_table
[0][0].addend
));
1097 tcg_out_st8_r(s
, COND_EQ
, data_reg
, addr_reg
, 1);
1100 tcg_out_st8s_r(s
, COND_EQ
, data_reg
, addr_reg
, 1);
1103 tcg_out_st16u_r(s
, COND_EQ
, data_reg
, addr_reg
, 1);
1106 tcg_out_st16s_r(s
, COND_EQ
, data_reg
, addr_reg
, 1);
1110 tcg_out_st32_r(s
, COND_EQ
, data_reg
, addr_reg
, 1);
1113 tcg_out_st32_rwb(s
, COND_EQ
, data_reg
, 1, addr_reg
);
1114 tcg_out_st32_12(s
, COND_EQ
, data_reg2
, 1, 4);
1118 label_ptr
= (void *) s
->code_ptr
;
1119 tcg_out_b(s
, COND_EQ
, 8);
1121 /* TODO: move this code to where the constants pool will be */
1123 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
1124 0, 0, addr_reg
, SHIFT_IMM_LSL(0));
1125 # if TARGET_LONG_BITS == 32
1128 tcg_out_dat_imm(s
, cond
, ARITH_AND
, 1, data_reg
, 0xff);
1129 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, 2, 0, mem_index
);
1132 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
1133 1, 0, data_reg
, SHIFT_IMM_LSL(16));
1134 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
1135 1, 0, 1, SHIFT_IMM_LSR(16));
1136 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, 2, 0, mem_index
);
1140 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
1141 1, 0, data_reg
, SHIFT_IMM_LSL(0));
1142 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, 2, 0, mem_index
);
1146 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
1147 1, 0, data_reg
, SHIFT_IMM_LSL(0));
1149 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
1150 2, 0, data_reg2
, SHIFT_IMM_LSL(0));
1151 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, 3, 0, mem_index
);
1156 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
1157 1, 0, addr_reg2
, SHIFT_IMM_LSL(0));
1160 tcg_out_dat_imm(s
, cond
, ARITH_AND
, 2, data_reg
, 0xff);
1161 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, 3, 0, mem_index
);
1164 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
1165 2, 0, data_reg
, SHIFT_IMM_LSL(16));
1166 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
1167 2, 0, 2, SHIFT_IMM_LSR(16));
1168 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, 3, 0, mem_index
);
1172 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
1173 2, 0, data_reg
, SHIFT_IMM_LSL(0));
1174 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, 3, 0, mem_index
);
1177 tcg_out_dat_imm(s
, cond
, ARITH_MOV
, 8, 0, mem_index
);
1178 tcg_out32(s
, (cond
<< 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1180 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
1181 2, 0, data_reg
, SHIFT_IMM_LSL(0));
1183 tcg_out_dat_reg(s
, cond
, ARITH_MOV
,
1184 3, 0, data_reg2
, SHIFT_IMM_LSL(0));
1190 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, 8, 0, 14, SHIFT_IMM_LSL(0));
1193 tcg_out_bl(s
, cond
, (tcg_target_long
) qemu_st_helpers
[s_bits
] -
1194 (tcg_target_long
) s
->code_ptr
);
1195 # if TARGET_LONG_BITS == 64
1197 tcg_out_dat_imm(s
, cond
, ARITH_ADD
, 13, 13, 0x10);
1201 tcg_out_dat_reg(s
, cond
, ARITH_MOV
, 14, 0, 8, SHIFT_IMM_LSL(0));
1204 *label_ptr
+= ((void *) s
->code_ptr
- (void *) label_ptr
- 8) >> 2;
1205 #else /* !CONFIG_SOFTMMU */
1207 uint32_t offset
= GUEST_BASE
;
1212 i
= ctz32(offset
) & ~1;
1213 rot
= ((32 - i
) << 7) & 0xf00;
1215 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, 8, addr_reg
,
1216 ((offset
>> i
) & 0xff) | rot
);
1218 offset
&= ~(0xff << i
);
1223 tcg_out_st8_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1226 tcg_out_st8s_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1229 tcg_out_st16u_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1232 tcg_out_st16s_8(s
, COND_AL
, data_reg
, addr_reg
, 0);
1236 tcg_out_st32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1239 /* TODO: use block store -
1240 * check that data_reg2 > data_reg or the other way */
1241 tcg_out_st32_12(s
, COND_AL
, data_reg
, addr_reg
, 0);
1242 tcg_out_st32_12(s
, COND_AL
, data_reg2
, addr_reg
, 4);
1248 static uint8_t *tb_ret_addr
;
1250 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1251 const TCGArg
*args
, const int *const_args
)
1256 case INDEX_op_exit_tb
:
1259 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R0
, 15, 0);
1261 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, TCG_REG_R0
, 0, args
[0]);
1262 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, 15, 0, 14, SHIFT_IMM_LSL(0));
1264 tcg_out32(s
, args
[0]);
1267 uint8_t *ld_ptr
= s
->code_ptr
;
1269 tcg_out_ld32_12(s
, COND_AL
, 0, 15, 0);
1271 tcg_out_dat_imm(s
, COND_AL
, ARITH_MOV
, 0, 0, args
[0]);
1272 tcg_out_goto(s
, COND_AL
, (tcg_target_ulong
) tb_ret_addr
);
1274 *ld_ptr
= (uint8_t) (s
->code_ptr
- ld_ptr
) - 8;
1275 tcg_out32(s
, args
[0]);
1280 case INDEX_op_goto_tb
:
1281 if (s
->tb_jmp_offset
) {
1282 /* Direct jump method */
1283 #if defined(USE_DIRECT_JUMP)
1284 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1285 tcg_out_b(s
, COND_AL
, 8);
1287 tcg_out_ld32_12(s
, COND_AL
, 15, 15, -4);
1288 s
->tb_jmp_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1292 /* Indirect jump method */
1294 c
= (int) (s
->tb_next
+ args
[0]) - ((int) s
->code_ptr
+ 8);
1295 if (c
> 0xfff || c
< -0xfff) {
1296 tcg_out_movi32(s
, COND_AL
, TCG_REG_R0
,
1297 (tcg_target_long
) (s
->tb_next
+ args
[0]));
1298 tcg_out_ld32_12(s
, COND_AL
, 15, TCG_REG_R0
, 0);
1300 tcg_out_ld32_12(s
, COND_AL
, 15, 15, c
);
1302 tcg_out_ld32_12(s
, COND_AL
, TCG_REG_R0
, 15, 0);
1303 tcg_out_ld32_12(s
, COND_AL
, 15, TCG_REG_R0
, 0);
1304 tcg_out32(s
, (tcg_target_long
) (s
->tb_next
+ args
[0]));
1307 s
->tb_next_offset
[args
[0]] = s
->code_ptr
- s
->code_buf
;
1311 tcg_out_call(s
, COND_AL
, args
[0]);
1313 tcg_out_callr(s
, COND_AL
, args
[0]);
1317 tcg_out_goto(s
, COND_AL
, args
[0]);
1319 tcg_out_bx(s
, COND_AL
, args
[0]);
1322 tcg_out_goto_label(s
, COND_AL
, args
[0]);
1325 case INDEX_op_ld8u_i32
:
1326 tcg_out_ld8u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1328 case INDEX_op_ld8s_i32
:
1329 tcg_out_ld8s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1331 case INDEX_op_ld16u_i32
:
1332 tcg_out_ld16u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1334 case INDEX_op_ld16s_i32
:
1335 tcg_out_ld16s(s
, COND_AL
, args
[0], args
[1], args
[2]);
1337 case INDEX_op_ld_i32
:
1338 tcg_out_ld32u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1340 case INDEX_op_st8_i32
:
1341 tcg_out_st8u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1343 case INDEX_op_st16_i32
:
1344 tcg_out_st16u(s
, COND_AL
, args
[0], args
[1], args
[2]);
1346 case INDEX_op_st_i32
:
1347 tcg_out_st32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1350 case INDEX_op_mov_i32
:
1351 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1352 args
[0], 0, args
[1], SHIFT_IMM_LSL(0));
1354 case INDEX_op_movi_i32
:
1355 tcg_out_movi32(s
, COND_AL
, args
[0], args
[1]);
1357 case INDEX_op_add_i32
:
1360 case INDEX_op_sub_i32
:
1363 case INDEX_op_and_i32
:
1366 case INDEX_op_andc_i32
:
1369 case INDEX_op_or_i32
:
1372 case INDEX_op_xor_i32
:
1376 if (const_args
[2]) {
1378 rot
= encode_imm(args
[2]);
1379 tcg_out_dat_imm(s
, COND_AL
, c
,
1380 args
[0], args
[1], rotl(args
[2], rot
) | (rot
<< 7));
1382 tcg_out_dat_reg(s
, COND_AL
, c
,
1383 args
[0], args
[1], args
[2], SHIFT_IMM_LSL(0));
1385 case INDEX_op_add2_i32
:
1386 tcg_out_dat_reg2(s
, COND_AL
, ARITH_ADD
, ARITH_ADC
,
1387 args
[0], args
[1], args
[2], args
[3],
1388 args
[4], args
[5], SHIFT_IMM_LSL(0));
1390 case INDEX_op_sub2_i32
:
1391 tcg_out_dat_reg2(s
, COND_AL
, ARITH_SUB
, ARITH_SBC
,
1392 args
[0], args
[1], args
[2], args
[3],
1393 args
[4], args
[5], SHIFT_IMM_LSL(0));
1395 case INDEX_op_neg_i32
:
1396 tcg_out_dat_imm(s
, COND_AL
, ARITH_RSB
, args
[0], args
[1], 0);
1398 case INDEX_op_not_i32
:
1399 tcg_out_dat_reg(s
, COND_AL
,
1400 ARITH_MVN
, args
[0], 0, args
[1], SHIFT_IMM_LSL(0));
1402 case INDEX_op_mul_i32
:
1403 tcg_out_mul32(s
, COND_AL
, args
[0], args
[1], args
[2]);
1405 case INDEX_op_mulu2_i32
:
1406 tcg_out_umull32(s
, COND_AL
, args
[0], args
[1], args
[2], args
[3]);
1408 /* XXX: Perhaps args[2] & 0x1f is wrong */
1409 case INDEX_op_shl_i32
:
1411 SHIFT_IMM_LSL(args
[2] & 0x1f) : SHIFT_REG_LSL(args
[2]);
1413 case INDEX_op_shr_i32
:
1414 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_LSR(args
[2] & 0x1f) :
1415 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args
[2]);
1417 case INDEX_op_sar_i32
:
1418 c
= const_args
[2] ? (args
[2] & 0x1f) ? SHIFT_IMM_ASR(args
[2] & 0x1f) :
1419 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args
[2]);
1422 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, args
[0], 0, args
[1], c
);
1425 case INDEX_op_brcond_i32
:
1426 if (const_args
[1]) {
1428 rot
= encode_imm(args
[1]);
1429 tcg_out_dat_imm(s
, COND_AL
, ARITH_CMP
,
1430 0, args
[0], rotl(args
[1], rot
) | (rot
<< 7));
1432 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1433 args
[0], args
[1], SHIFT_IMM_LSL(0));
1435 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[2]], args
[3]);
1437 case INDEX_op_brcond2_i32
:
1438 /* The resulting conditions are:
1439 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1440 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1441 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1442 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1443 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1444 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1446 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1447 args
[1], args
[3], SHIFT_IMM_LSL(0));
1448 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1449 args
[0], args
[2], SHIFT_IMM_LSL(0));
1450 tcg_out_goto_label(s
, tcg_cond_to_arm_cond
[args
[4]], args
[5]);
1452 case INDEX_op_setcond_i32
:
1453 if (const_args
[2]) {
1455 rot
= encode_imm(args
[2]);
1456 tcg_out_dat_imm(s
, COND_AL
, ARITH_CMP
,
1457 0, args
[1], rotl(args
[2], rot
) | (rot
<< 7));
1459 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1460 args
[1], args
[2], SHIFT_IMM_LSL(0));
1462 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[3]],
1463 ARITH_MOV
, args
[0], 0, 1);
1464 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[3])],
1465 ARITH_MOV
, args
[0], 0, 0);
1467 case INDEX_op_setcond2_i32
:
1468 /* See brcond2_i32 comment */
1469 tcg_out_dat_reg(s
, COND_AL
, ARITH_CMP
, 0,
1470 args
[2], args
[4], SHIFT_IMM_LSL(0));
1471 tcg_out_dat_reg(s
, COND_EQ
, ARITH_CMP
, 0,
1472 args
[1], args
[3], SHIFT_IMM_LSL(0));
1473 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[args
[5]],
1474 ARITH_MOV
, args
[0], 0, 1);
1475 tcg_out_dat_imm(s
, tcg_cond_to_arm_cond
[tcg_invert_cond(args
[5])],
1476 ARITH_MOV
, args
[0], 0, 0);
1479 case INDEX_op_qemu_ld8u
:
1480 tcg_out_qemu_ld(s
, COND_AL
, args
, 0);
1482 case INDEX_op_qemu_ld8s
:
1483 tcg_out_qemu_ld(s
, COND_AL
, args
, 0 | 4);
1485 case INDEX_op_qemu_ld16u
:
1486 tcg_out_qemu_ld(s
, COND_AL
, args
, 1);
1488 case INDEX_op_qemu_ld16s
:
1489 tcg_out_qemu_ld(s
, COND_AL
, args
, 1 | 4);
1491 case INDEX_op_qemu_ld32
:
1492 tcg_out_qemu_ld(s
, COND_AL
, args
, 2);
1494 case INDEX_op_qemu_ld64
:
1495 tcg_out_qemu_ld(s
, COND_AL
, args
, 3);
1498 case INDEX_op_qemu_st8
:
1499 tcg_out_qemu_st(s
, COND_AL
, args
, 0);
1501 case INDEX_op_qemu_st16
:
1502 tcg_out_qemu_st(s
, COND_AL
, args
, 1);
1504 case INDEX_op_qemu_st32
:
1505 tcg_out_qemu_st(s
, COND_AL
, args
, 2);
1507 case INDEX_op_qemu_st64
:
1508 tcg_out_qemu_st(s
, COND_AL
, args
, 3);
1511 case INDEX_op_ext8s_i32
:
1512 #ifdef __ARM_ARCH_7A__
1514 tcg_out32(s
, 0xe6af0070 | (args
[0] << 12) | args
[1]);
1516 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1517 args
[0], 0, args
[1], SHIFT_IMM_LSL(24));
1518 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1519 args
[0], 0, args
[0], SHIFT_IMM_ASR(24));
1522 case INDEX_op_ext16s_i32
:
1523 #ifdef __ARM_ARCH_7A__
1525 tcg_out32(s
, 0xe6bf0070 | (args
[0] << 12) | args
[1]);
1527 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1528 args
[0], 0, args
[1], SHIFT_IMM_LSL(16));
1529 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
,
1530 args
[0], 0, args
[0], SHIFT_IMM_ASR(16));
1539 static const TCGTargetOpDef arm_op_defs
[] = {
1540 { INDEX_op_exit_tb
, { } },
1541 { INDEX_op_goto_tb
, { } },
1542 { INDEX_op_call
, { "ri" } },
1543 { INDEX_op_jmp
, { "ri" } },
1544 { INDEX_op_br
, { } },
1546 { INDEX_op_mov_i32
, { "r", "r" } },
1547 { INDEX_op_movi_i32
, { "r" } },
1549 { INDEX_op_ld8u_i32
, { "r", "r" } },
1550 { INDEX_op_ld8s_i32
, { "r", "r" } },
1551 { INDEX_op_ld16u_i32
, { "r", "r" } },
1552 { INDEX_op_ld16s_i32
, { "r", "r" } },
1553 { INDEX_op_ld_i32
, { "r", "r" } },
1554 { INDEX_op_st8_i32
, { "r", "r" } },
1555 { INDEX_op_st16_i32
, { "r", "r" } },
1556 { INDEX_op_st_i32
, { "r", "r" } },
1558 /* TODO: "r", "r", "ri" */
1559 { INDEX_op_add_i32
, { "r", "r", "rI" } },
1560 { INDEX_op_sub_i32
, { "r", "r", "rI" } },
1561 { INDEX_op_mul_i32
, { "r", "r", "r" } },
1562 { INDEX_op_mulu2_i32
, { "r", "r", "r", "r" } },
1563 { INDEX_op_and_i32
, { "r", "r", "rI" } },
1564 { INDEX_op_andc_i32
, { "r", "r", "rI" } },
1565 { INDEX_op_or_i32
, { "r", "r", "rI" } },
1566 { INDEX_op_xor_i32
, { "r", "r", "rI" } },
1567 { INDEX_op_neg_i32
, { "r", "r" } },
1568 { INDEX_op_not_i32
, { "r", "r" } },
1570 { INDEX_op_shl_i32
, { "r", "r", "ri" } },
1571 { INDEX_op_shr_i32
, { "r", "r", "ri" } },
1572 { INDEX_op_sar_i32
, { "r", "r", "ri" } },
1574 { INDEX_op_brcond_i32
, { "r", "rI" } },
1575 { INDEX_op_setcond_i32
, { "r", "r", "rI" } },
1577 /* TODO: "r", "r", "r", "r", "ri", "ri" */
1578 { INDEX_op_add2_i32
, { "r", "r", "r", "r", "r", "r" } },
1579 { INDEX_op_sub2_i32
, { "r", "r", "r", "r", "r", "r" } },
1580 { INDEX_op_brcond2_i32
, { "r", "r", "r", "r" } },
1581 { INDEX_op_setcond2_i32
, { "r", "r", "r", "r", "r" } },
1583 #if TARGET_LONG_BITS == 32
1584 { INDEX_op_qemu_ld8u
, { "r", "x" } },
1585 { INDEX_op_qemu_ld8s
, { "r", "x" } },
1586 { INDEX_op_qemu_ld16u
, { "r", "x" } },
1587 { INDEX_op_qemu_ld16s
, { "r", "x" } },
1588 { INDEX_op_qemu_ld32
, { "r", "x" } },
1589 { INDEX_op_qemu_ld64
, { "d", "r", "x" } },
1591 { INDEX_op_qemu_st8
, { "x", "x" } },
1592 { INDEX_op_qemu_st16
, { "x", "x" } },
1593 { INDEX_op_qemu_st32
, { "x", "x" } },
1594 { INDEX_op_qemu_st64
, { "x", "D", "x" } },
1596 { INDEX_op_qemu_ld8u
, { "r", "x", "X" } },
1597 { INDEX_op_qemu_ld8s
, { "r", "x", "X" } },
1598 { INDEX_op_qemu_ld16u
, { "r", "x", "X" } },
1599 { INDEX_op_qemu_ld16s
, { "r", "x", "X" } },
1600 { INDEX_op_qemu_ld32
, { "r", "x", "X" } },
1601 { INDEX_op_qemu_ld64
, { "d", "r", "x", "X" } },
1603 { INDEX_op_qemu_st8
, { "x", "x", "X" } },
1604 { INDEX_op_qemu_st16
, { "x", "x", "X" } },
1605 { INDEX_op_qemu_st32
, { "x", "x", "X" } },
1606 { INDEX_op_qemu_st64
, { "x", "D", "x", "X" } },
1609 { INDEX_op_ext8s_i32
, { "r", "r" } },
1610 { INDEX_op_ext16s_i32
, { "r", "r" } },
1615 void tcg_target_init(TCGContext
*s
)
1617 #if !defined(CONFIG_USER_ONLY)
1619 if ((1 << CPU_TLB_ENTRY_BITS
) != sizeof(CPUTLBEntry
))
1623 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0,
1624 ((2 << TCG_REG_R14
) - 1) & ~(1 << TCG_REG_R8
));
1625 tcg_regset_set32(tcg_target_call_clobber_regs
, 0,
1626 ((2 << TCG_REG_R3
) - 1) |
1627 (1 << TCG_REG_R12
) | (1 << TCG_REG_R14
));
1629 tcg_regset_clear(s
->reserved_regs
);
1631 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R14
);
1633 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
1634 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R8
);
1636 tcg_add_target_add_op_defs(arm_op_defs
);
1639 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, int arg
,
1640 int arg1
, tcg_target_long arg2
)
1642 tcg_out_ld32u(s
, COND_AL
, arg
, arg1
, arg2
);
1645 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, int arg
,
1646 int arg1
, tcg_target_long arg2
)
1648 tcg_out_st32(s
, COND_AL
, arg
, arg1
, arg2
);
1651 static void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
1655 tcg_out_dat_imm(s
, COND_AL
, ARITH_ADD
, reg
, reg
, val
);
1660 tcg_out_dat_imm(s
, COND_AL
, ARITH_SUB
, reg
, reg
, -val
);
1666 static inline void tcg_out_mov(TCGContext
*s
, int ret
, int arg
)
1668 tcg_out_dat_reg(s
, COND_AL
, ARITH_MOV
, ret
, 0, arg
, SHIFT_IMM_LSL(0));
1671 static inline void tcg_out_movi(TCGContext
*s
, TCGType type
,
1672 int ret
, tcg_target_long arg
)
1674 tcg_out_movi32(s
, COND_AL
, ret
, arg
);
1677 void tcg_target_qemu_prologue(TCGContext
*s
)
1679 /* There is no need to save r7, it is used to store the address
1680 of the env structure and is not modified by GCC. */
1682 /* stmdb sp!, { r4 - r6, r8 - r11, lr } */
1683 tcg_out32(s
, (COND_AL
<< 28) | 0x092d4f70);
1685 tcg_out_bx(s
, COND_AL
, TCG_REG_R0
);
1686 tb_ret_addr
= s
->code_ptr
;
1688 /* ldmia sp!, { r4 - r6, r8 - r11, pc } */
1689 tcg_out32(s
, (COND_AL
<< 28) | 0x08bd8f70);