gtk: Don't use deprecated gtk_image_menu_item_new_with_mnemonic
[qemu/ar7.git] / tcg / arm / tcg-target.c
blob7535175f9c81ee3bb116fe41f5555679cb7aa51b
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "elf.h"
26 #include "tcg-be-ldst.h"
28 /* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */
29 #ifndef __ARM_ARCH
30 # if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
31 || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
32 || defined(__ARM_ARCH_7EM__)
33 # define __ARM_ARCH 7
34 # elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
35 || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
36 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__)
37 # define __ARM_ARCH 6
38 # elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5E__) \
39 || defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \
40 || defined(__ARM_ARCH_5TEJ__)
41 # define __ARM_ARCH 5
42 # else
43 # define __ARM_ARCH 4
44 # endif
45 #endif
47 static int arm_arch = __ARM_ARCH;
49 #if defined(__ARM_ARCH_5T__) \
50 || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
51 # define use_armv5t_instructions 1
52 #else
53 # define use_armv5t_instructions use_armv6_instructions
54 #endif
56 #define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6)
57 #define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
59 #ifndef use_idiv_instructions
60 bool use_idiv_instructions;
61 #endif
63 /* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */
64 #ifdef CONFIG_SOFTMMU
65 # define USING_SOFTMMU 1
66 #else
67 # define USING_SOFTMMU 0
68 #endif
70 #ifndef NDEBUG
71 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
72 "%r0",
73 "%r1",
74 "%r2",
75 "%r3",
76 "%r4",
77 "%r5",
78 "%r6",
79 "%r7",
80 "%r8",
81 "%r9",
82 "%r10",
83 "%r11",
84 "%r12",
85 "%r13",
86 "%r14",
87 "%pc",
89 #endif
91 static const int tcg_target_reg_alloc_order[] = {
92 TCG_REG_R4,
93 TCG_REG_R5,
94 TCG_REG_R6,
95 TCG_REG_R7,
96 TCG_REG_R8,
97 TCG_REG_R9,
98 TCG_REG_R10,
99 TCG_REG_R11,
100 TCG_REG_R13,
101 TCG_REG_R0,
102 TCG_REG_R1,
103 TCG_REG_R2,
104 TCG_REG_R3,
105 TCG_REG_R12,
106 TCG_REG_R14,
109 static const int tcg_target_call_iarg_regs[4] = {
110 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
112 static const int tcg_target_call_oarg_regs[2] = {
113 TCG_REG_R0, TCG_REG_R1
116 #define TCG_REG_TMP TCG_REG_R12
118 static inline void reloc_abs32(void *code_ptr, intptr_t target)
120 *(uint32_t *) code_ptr = target;
123 static inline void reloc_pc24(void *code_ptr, intptr_t target)
125 uint32_t offset = ((target - ((intptr_t)code_ptr + 8)) >> 2);
127 *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
128 | (offset & 0xffffff);
131 static void patch_reloc(uint8_t *code_ptr, int type,
132 intptr_t value, intptr_t addend)
134 switch (type) {
135 case R_ARM_ABS32:
136 reloc_abs32(code_ptr, value);
137 break;
139 case R_ARM_CALL:
140 case R_ARM_JUMP24:
141 default:
142 tcg_abort();
144 case R_ARM_PC24:
145 reloc_pc24(code_ptr, value);
146 break;
150 #define TCG_CT_CONST_ARM 0x100
151 #define TCG_CT_CONST_INV 0x200
152 #define TCG_CT_CONST_NEG 0x400
153 #define TCG_CT_CONST_ZERO 0x800
155 /* parse target specific constraints */
156 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
158 const char *ct_str;
160 ct_str = *pct_str;
161 switch (ct_str[0]) {
162 case 'I':
163 ct->ct |= TCG_CT_CONST_ARM;
164 break;
165 case 'K':
166 ct->ct |= TCG_CT_CONST_INV;
167 break;
168 case 'N': /* The gcc constraint letter is L, already used here. */
169 ct->ct |= TCG_CT_CONST_NEG;
170 break;
171 case 'Z':
172 ct->ct |= TCG_CT_CONST_ZERO;
173 break;
175 case 'r':
176 ct->ct |= TCG_CT_REG;
177 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
178 break;
180 /* qemu_ld address */
181 case 'l':
182 ct->ct |= TCG_CT_REG;
183 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
184 #ifdef CONFIG_SOFTMMU
185 /* r0-r2,lr will be overwritten when reading the tlb entry,
186 so don't use these. */
187 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
188 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
189 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
190 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
191 #endif
192 break;
194 /* qemu_st address & data */
195 case 's':
196 ct->ct |= TCG_CT_REG;
197 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
198 /* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
199 and r0-r1 doing the byte swapping, so don't use these. */
200 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
201 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
202 #if defined(CONFIG_SOFTMMU)
203 /* Avoid clashes with registers being used for helper args */
204 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
205 #if TARGET_LONG_BITS == 64
206 /* Avoid clashes with registers being used for helper args */
207 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
208 #endif
209 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
210 #endif
211 break;
213 default:
214 return -1;
216 ct_str++;
217 *pct_str = ct_str;
219 return 0;
222 static inline uint32_t rotl(uint32_t val, int n)
224 return (val << n) | (val >> (32 - n));
227 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
228 right-rotated by an even amount between 0 and 30. */
229 static inline int encode_imm(uint32_t imm)
231 int shift;
233 /* simple case, only lower bits */
234 if ((imm & ~0xff) == 0)
235 return 0;
236 /* then try a simple even shift */
237 shift = ctz32(imm) & ~1;
238 if (((imm >> shift) & ~0xff) == 0)
239 return 32 - shift;
240 /* now try harder with rotations */
241 if ((rotl(imm, 2) & ~0xff) == 0)
242 return 2;
243 if ((rotl(imm, 4) & ~0xff) == 0)
244 return 4;
245 if ((rotl(imm, 6) & ~0xff) == 0)
246 return 6;
247 /* imm can't be encoded */
248 return -1;
251 static inline int check_fit_imm(uint32_t imm)
253 return encode_imm(imm) >= 0;
256 /* Test if a constant matches the constraint.
257 * TODO: define constraints for:
259 * ldr/str offset: between -0xfff and 0xfff
260 * ldrh/strh offset: between -0xff and 0xff
261 * mov operand2: values represented with x << (2 * y), x < 0x100
262 * add, sub, eor...: ditto
264 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
265 const TCGArgConstraint *arg_ct)
267 int ct;
268 ct = arg_ct->ct;
269 if (ct & TCG_CT_CONST) {
270 return 1;
271 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
272 return 1;
273 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
274 return 1;
275 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
276 return 1;
277 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
278 return 1;
279 } else {
280 return 0;
284 #define TO_CPSR (1 << 20)
286 typedef enum {
287 ARITH_AND = 0x0 << 21,
288 ARITH_EOR = 0x1 << 21,
289 ARITH_SUB = 0x2 << 21,
290 ARITH_RSB = 0x3 << 21,
291 ARITH_ADD = 0x4 << 21,
292 ARITH_ADC = 0x5 << 21,
293 ARITH_SBC = 0x6 << 21,
294 ARITH_RSC = 0x7 << 21,
295 ARITH_TST = 0x8 << 21 | TO_CPSR,
296 ARITH_CMP = 0xa << 21 | TO_CPSR,
297 ARITH_CMN = 0xb << 21 | TO_CPSR,
298 ARITH_ORR = 0xc << 21,
299 ARITH_MOV = 0xd << 21,
300 ARITH_BIC = 0xe << 21,
301 ARITH_MVN = 0xf << 21,
303 INSN_LDR_IMM = 0x04100000,
304 INSN_LDR_REG = 0x06100000,
305 INSN_STR_IMM = 0x04000000,
306 INSN_STR_REG = 0x06000000,
308 INSN_LDRH_IMM = 0x005000b0,
309 INSN_LDRH_REG = 0x001000b0,
310 INSN_LDRSH_IMM = 0x005000f0,
311 INSN_LDRSH_REG = 0x001000f0,
312 INSN_STRH_IMM = 0x004000b0,
313 INSN_STRH_REG = 0x000000b0,
315 INSN_LDRB_IMM = 0x04500000,
316 INSN_LDRB_REG = 0x06500000,
317 INSN_LDRSB_IMM = 0x005000d0,
318 INSN_LDRSB_REG = 0x001000d0,
319 INSN_STRB_IMM = 0x04400000,
320 INSN_STRB_REG = 0x06400000,
322 INSN_LDRD_IMM = 0x004000d0,
323 INSN_LDRD_REG = 0x000000d0,
324 INSN_STRD_IMM = 0x004000f0,
325 INSN_STRD_REG = 0x000000f0,
326 } ARMInsn;
328 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
329 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
330 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
331 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
332 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
333 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
334 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
335 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
337 enum arm_cond_code_e {
338 COND_EQ = 0x0,
339 COND_NE = 0x1,
340 COND_CS = 0x2, /* Unsigned greater or equal */
341 COND_CC = 0x3, /* Unsigned less than */
342 COND_MI = 0x4, /* Negative */
343 COND_PL = 0x5, /* Zero or greater */
344 COND_VS = 0x6, /* Overflow */
345 COND_VC = 0x7, /* No overflow */
346 COND_HI = 0x8, /* Unsigned greater than */
347 COND_LS = 0x9, /* Unsigned less or equal */
348 COND_GE = 0xa,
349 COND_LT = 0xb,
350 COND_GT = 0xc,
351 COND_LE = 0xd,
352 COND_AL = 0xe,
355 static const uint8_t tcg_cond_to_arm_cond[] = {
356 [TCG_COND_EQ] = COND_EQ,
357 [TCG_COND_NE] = COND_NE,
358 [TCG_COND_LT] = COND_LT,
359 [TCG_COND_GE] = COND_GE,
360 [TCG_COND_LE] = COND_LE,
361 [TCG_COND_GT] = COND_GT,
362 /* unsigned */
363 [TCG_COND_LTU] = COND_CC,
364 [TCG_COND_GEU] = COND_CS,
365 [TCG_COND_LEU] = COND_LS,
366 [TCG_COND_GTU] = COND_HI,
369 static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
371 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
374 static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
376 tcg_out32(s, (cond << 28) | 0x0a000000 |
377 (((offset - 8) >> 2) & 0x00ffffff));
380 static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
382 /* We pay attention here to not modify the branch target by skipping
383 the corresponding bytes. This ensure that caches and memory are
384 kept coherent during retranslation. */
385 s->code_ptr += 3;
386 tcg_out8(s, (cond << 4) | 0x0a);
389 static inline void tcg_out_bl_noaddr(TCGContext *s, int cond)
391 /* We pay attention here to not modify the branch target by skipping
392 the corresponding bytes. This ensure that caches and memory are
393 kept coherent during retranslation. */
394 s->code_ptr += 3;
395 tcg_out8(s, (cond << 4) | 0x0b);
398 static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
400 tcg_out32(s, (cond << 28) | 0x0b000000 |
401 (((offset - 8) >> 2) & 0x00ffffff));
404 static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
406 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
409 static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
411 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
412 (((offset - 8) >> 2) & 0x00ffffff));
415 static inline void tcg_out_dat_reg(TCGContext *s,
416 int cond, int opc, int rd, int rn, int rm, int shift)
418 tcg_out32(s, (cond << 28) | (0 << 25) | opc |
419 (rn << 16) | (rd << 12) | shift | rm);
422 static inline void tcg_out_nop(TCGContext *s)
424 if (use_armv7_instructions) {
425 /* Architected nop introduced in v6k. */
426 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
427 also Just So Happened to do nothing on pre-v6k so that we
428 don't need to conditionalize it? */
429 tcg_out32(s, 0xe320f000);
430 } else {
431 /* Prior to that the assembler uses mov r0, r0. */
432 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 0, 0, 0, SHIFT_IMM_LSL(0));
436 static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
438 /* Simple reg-reg move, optimising out the 'do nothing' case */
439 if (rd != rm) {
440 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
444 static inline void tcg_out_dat_imm(TCGContext *s,
445 int cond, int opc, int rd, int rn, int im)
447 tcg_out32(s, (cond << 28) | (1 << 25) | opc |
448 (rn << 16) | (rd << 12) | im);
451 static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
453 int rot, opc, rn;
455 /* For armv7, make sure not to use movw+movt when mov/mvn would do.
456 Speed things up by only checking when movt would be required.
457 Prior to armv7, have one go at fully rotated immediates before
458 doing the decomposition thing below. */
459 if (!use_armv7_instructions || (arg & 0xffff0000)) {
460 rot = encode_imm(arg);
461 if (rot >= 0) {
462 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
463 rotl(arg, rot) | (rot << 7));
464 return;
466 rot = encode_imm(~arg);
467 if (rot >= 0) {
468 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
469 rotl(~arg, rot) | (rot << 7));
470 return;
474 /* Use movw + movt. */
475 if (use_armv7_instructions) {
476 /* movw */
477 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
478 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
479 if (arg & 0xffff0000) {
480 /* movt */
481 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
482 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
484 return;
487 /* TODO: This is very suboptimal, we can easily have a constant
488 pool somewhere after all the instructions. */
489 opc = ARITH_MOV;
490 rn = 0;
491 /* If we have lots of leading 1's, we can shorten the sequence by
492 beginning with mvn and then clearing higher bits with eor. */
493 if (clz32(~arg) > clz32(arg)) {
494 opc = ARITH_MVN, arg = ~arg;
496 do {
497 int i = ctz32(arg) & ~1;
498 rot = ((32 - i) << 7) & 0xf00;
499 tcg_out_dat_imm(s, cond, opc, rd, rn, ((arg >> i) & 0xff) | rot);
500 arg &= ~(0xff << i);
502 opc = ARITH_EOR;
503 rn = rd;
504 } while (arg);
507 static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
508 TCGArg lhs, TCGArg rhs, int rhs_is_const)
510 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
511 * rhs must satisfy the "rI" constraint.
513 if (rhs_is_const) {
514 int rot = encode_imm(rhs);
515 assert(rot >= 0);
516 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
517 } else {
518 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
522 static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
523 TCGReg dst, TCGReg lhs, TCGArg rhs,
524 bool rhs_is_const)
526 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
527 * rhs must satisfy the "rIK" constraint.
529 if (rhs_is_const) {
530 int rot = encode_imm(rhs);
531 if (rot < 0) {
532 rhs = ~rhs;
533 rot = encode_imm(rhs);
534 assert(rot >= 0);
535 opc = opinv;
537 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
538 } else {
539 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
543 static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
544 TCGArg dst, TCGArg lhs, TCGArg rhs,
545 bool rhs_is_const)
547 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
548 * rhs must satisfy the "rIN" constraint.
550 if (rhs_is_const) {
551 int rot = encode_imm(rhs);
552 if (rot < 0) {
553 rhs = -rhs;
554 rot = encode_imm(rhs);
555 assert(rot >= 0);
556 opc = opneg;
558 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
559 } else {
560 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
564 static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
565 TCGReg rn, TCGReg rm)
567 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
568 if (!use_armv6_instructions && rd == rn) {
569 if (rd == rm) {
570 /* rd == rn == rm; copy an input to tmp first. */
571 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
572 rm = rn = TCG_REG_TMP;
573 } else {
574 rn = rm;
575 rm = rd;
578 /* mul */
579 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
582 static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
583 TCGReg rd1, TCGReg rn, TCGReg rm)
585 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
586 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
587 if (rd0 == rm || rd1 == rm) {
588 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
589 rn = TCG_REG_TMP;
590 } else {
591 TCGReg t = rn;
592 rn = rm;
593 rm = t;
596 /* umull */
597 tcg_out32(s, (cond << 28) | 0x00800090 |
598 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
601 static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
602 TCGReg rd1, TCGReg rn, TCGReg rm)
604 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
605 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
606 if (rd0 == rm || rd1 == rm) {
607 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
608 rn = TCG_REG_TMP;
609 } else {
610 TCGReg t = rn;
611 rn = rm;
612 rm = t;
615 /* smull */
616 tcg_out32(s, (cond << 28) | 0x00c00090 |
617 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
620 static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
622 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
625 static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
627 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
630 static inline void tcg_out_ext8s(TCGContext *s, int cond,
631 int rd, int rn)
633 if (use_armv6_instructions) {
634 /* sxtb */
635 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
636 } else {
637 tcg_out_dat_reg(s, cond, ARITH_MOV,
638 rd, 0, rn, SHIFT_IMM_LSL(24));
639 tcg_out_dat_reg(s, cond, ARITH_MOV,
640 rd, 0, rd, SHIFT_IMM_ASR(24));
644 static inline void tcg_out_ext8u(TCGContext *s, int cond,
645 int rd, int rn)
647 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
650 static inline void tcg_out_ext16s(TCGContext *s, int cond,
651 int rd, int rn)
653 if (use_armv6_instructions) {
654 /* sxth */
655 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
656 } else {
657 tcg_out_dat_reg(s, cond, ARITH_MOV,
658 rd, 0, rn, SHIFT_IMM_LSL(16));
659 tcg_out_dat_reg(s, cond, ARITH_MOV,
660 rd, 0, rd, SHIFT_IMM_ASR(16));
664 static inline void tcg_out_ext16u(TCGContext *s, int cond,
665 int rd, int rn)
667 if (use_armv6_instructions) {
668 /* uxth */
669 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
670 } else {
671 tcg_out_dat_reg(s, cond, ARITH_MOV,
672 rd, 0, rn, SHIFT_IMM_LSL(16));
673 tcg_out_dat_reg(s, cond, ARITH_MOV,
674 rd, 0, rd, SHIFT_IMM_LSR(16));
678 static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
680 if (use_armv6_instructions) {
681 /* revsh */
682 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
683 } else {
684 tcg_out_dat_reg(s, cond, ARITH_MOV,
685 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
686 tcg_out_dat_reg(s, cond, ARITH_MOV,
687 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
688 tcg_out_dat_reg(s, cond, ARITH_ORR,
689 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
693 static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
695 if (use_armv6_instructions) {
696 /* rev16 */
697 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
698 } else {
699 tcg_out_dat_reg(s, cond, ARITH_MOV,
700 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
701 tcg_out_dat_reg(s, cond, ARITH_MOV,
702 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
703 tcg_out_dat_reg(s, cond, ARITH_ORR,
704 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
708 /* swap the two low bytes assuming that the two high input bytes and the
709 two high output bit can hold any value. */
710 static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
712 if (use_armv6_instructions) {
713 /* rev16 */
714 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
715 } else {
716 tcg_out_dat_reg(s, cond, ARITH_MOV,
717 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
718 tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
719 tcg_out_dat_reg(s, cond, ARITH_ORR,
720 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
724 static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
726 if (use_armv6_instructions) {
727 /* rev */
728 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
729 } else {
730 tcg_out_dat_reg(s, cond, ARITH_EOR,
731 TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
732 tcg_out_dat_imm(s, cond, ARITH_BIC,
733 TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
734 tcg_out_dat_reg(s, cond, ARITH_MOV,
735 rd, 0, rn, SHIFT_IMM_ROR(8));
736 tcg_out_dat_reg(s, cond, ARITH_EOR,
737 rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
741 bool tcg_target_deposit_valid(int ofs, int len)
743 /* ??? Without bfi, we could improve over generic code by combining
744 the right-shift from a non-zero ofs with the orr. We do run into
745 problems when rd == rs, and the mask generated from ofs+len doesn't
746 fit into an immediate. We would have to be careful not to pessimize
747 wrt the optimizations performed on the expanded code. */
748 return use_armv7_instructions;
751 static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
752 TCGArg a1, int ofs, int len, bool const_a1)
754 if (const_a1) {
755 /* bfi becomes bfc with rn == 15. */
756 a1 = 15;
758 /* bfi/bfc */
759 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
760 | (ofs << 7) | ((ofs + len - 1) << 16));
763 /* Note that this routine is used for both LDR and LDRH formats, so we do
764 not wish to include an immediate shift at this point. */
765 static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
766 TCGReg rn, TCGReg rm, bool u, bool p, bool w)
768 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
769 | (w << 21) | (rn << 16) | (rt << 12) | rm);
772 static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
773 TCGReg rn, int imm8, bool p, bool w)
775 bool u = 1;
776 if (imm8 < 0) {
777 imm8 = -imm8;
778 u = 0;
780 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
781 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
784 static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
785 TCGReg rn, int imm12, bool p, bool w)
787 bool u = 1;
788 if (imm12 < 0) {
789 imm12 = -imm12;
790 u = 0;
792 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
793 (rn << 16) | (rt << 12) | imm12);
796 static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
797 TCGReg rn, int imm12)
799 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
802 static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
803 TCGReg rn, int imm12)
805 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
808 static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
809 TCGReg rn, TCGReg rm)
811 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
814 static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
815 TCGReg rn, TCGReg rm)
817 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
820 static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt,
821 TCGReg rn, int imm8)
823 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
826 static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt,
827 TCGReg rn, TCGReg rm)
829 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
832 static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt,
833 TCGReg rn, int imm8)
835 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
838 static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt,
839 TCGReg rn, TCGReg rm)
841 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
844 /* Register pre-increment with base writeback. */
845 static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
846 TCGReg rn, TCGReg rm)
848 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
851 static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
852 TCGReg rn, TCGReg rm)
854 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
857 static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
858 TCGReg rn, int imm8)
860 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
863 static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
864 TCGReg rn, int imm8)
866 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
869 static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
870 TCGReg rn, TCGReg rm)
872 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
875 static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
876 TCGReg rn, TCGReg rm)
878 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
881 static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
882 TCGReg rn, int imm8)
884 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
887 static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
888 TCGReg rn, TCGReg rm)
890 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
893 static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
894 TCGReg rn, int imm12)
896 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
899 static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
900 TCGReg rn, int imm12)
902 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
905 static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
906 TCGReg rn, TCGReg rm)
908 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
911 static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
912 TCGReg rn, TCGReg rm)
914 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
917 static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
918 TCGReg rn, int imm8)
920 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
923 static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
924 TCGReg rn, TCGReg rm)
926 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
929 static inline void tcg_out_ld32u(TCGContext *s, int cond,
930 int rd, int rn, int32_t offset)
932 if (offset > 0xfff || offset < -0xfff) {
933 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
934 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
935 } else
936 tcg_out_ld32_12(s, cond, rd, rn, offset);
939 static inline void tcg_out_st32(TCGContext *s, int cond,
940 int rd, int rn, int32_t offset)
942 if (offset > 0xfff || offset < -0xfff) {
943 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
944 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
945 } else
946 tcg_out_st32_12(s, cond, rd, rn, offset);
949 static inline void tcg_out_ld16u(TCGContext *s, int cond,
950 int rd, int rn, int32_t offset)
952 if (offset > 0xff || offset < -0xff) {
953 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
954 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
955 } else
956 tcg_out_ld16u_8(s, cond, rd, rn, offset);
959 static inline void tcg_out_ld16s(TCGContext *s, int cond,
960 int rd, int rn, int32_t offset)
962 if (offset > 0xff || offset < -0xff) {
963 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
964 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
965 } else
966 tcg_out_ld16s_8(s, cond, rd, rn, offset);
969 static inline void tcg_out_st16(TCGContext *s, int cond,
970 int rd, int rn, int32_t offset)
972 if (offset > 0xff || offset < -0xff) {
973 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
974 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
975 } else
976 tcg_out_st16_8(s, cond, rd, rn, offset);
979 static inline void tcg_out_ld8u(TCGContext *s, int cond,
980 int rd, int rn, int32_t offset)
982 if (offset > 0xfff || offset < -0xfff) {
983 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
984 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
985 } else
986 tcg_out_ld8_12(s, cond, rd, rn, offset);
989 static inline void tcg_out_ld8s(TCGContext *s, int cond,
990 int rd, int rn, int32_t offset)
992 if (offset > 0xff || offset < -0xff) {
993 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
994 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
995 } else
996 tcg_out_ld8s_8(s, cond, rd, rn, offset);
999 static inline void tcg_out_st8(TCGContext *s, int cond,
1000 int rd, int rn, int32_t offset)
1002 if (offset > 0xfff || offset < -0xfff) {
1003 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1004 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1005 } else
1006 tcg_out_st8_12(s, cond, rd, rn, offset);
1009 /* The _goto case is normally between TBs within the same code buffer, and
1010 * with the code buffer limited to 16MB we wouldn't need the long case.
1011 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1013 static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
1015 int32_t disp = addr - (tcg_target_long) s->code_ptr;
1017 if ((addr & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1018 tcg_out_b(s, cond, disp);
1019 return;
1022 tcg_out_movi32(s, cond, TCG_REG_TMP, addr);
1023 if (use_armv5t_instructions) {
1024 tcg_out_bx(s, cond, TCG_REG_TMP);
1025 } else {
1026 if (addr & 1) {
1027 tcg_abort();
1029 tcg_out_mov_reg(s, cond, TCG_REG_PC, TCG_REG_TMP);
1033 /* The call case is mostly used for helpers - so it's not unreasonable
1034 * for them to be beyond branch range */
1035 static inline void tcg_out_call(TCGContext *s, uint32_t addr)
1037 int32_t val;
1039 val = addr - (tcg_target_long) s->code_ptr;
1040 if (val - 8 < 0x02000000 && val - 8 >= -0x02000000) {
1041 if (addr & 1) {
1042 /* Use BLX if the target is in Thumb mode */
1043 if (!use_armv5t_instructions) {
1044 tcg_abort();
1046 tcg_out_blx_imm(s, val);
1047 } else {
1048 tcg_out_bl(s, COND_AL, val);
1050 } else if (use_armv7_instructions) {
1051 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addr);
1052 tcg_out_blx(s, COND_AL, TCG_REG_TMP);
1053 } else {
1054 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
1055 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1056 tcg_out32(s, addr);
1060 static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
1062 if (use_armv5t_instructions) {
1063 tcg_out_blx(s, cond, arg);
1064 } else {
1065 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
1066 TCG_REG_PC, SHIFT_IMM_LSL(0));
1067 tcg_out_bx(s, cond, arg);
1071 static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
1073 TCGLabel *l = &s->labels[label_index];
1075 if (l->has_value) {
1076 tcg_out_goto(s, cond, l->u.value);
1077 } else {
1078 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
1079 tcg_out_b_noaddr(s, cond);
1083 #ifdef CONFIG_SOFTMMU
1084 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1085 * int mmu_idx, uintptr_t ra)
1087 static const void * const qemu_ld_helpers[16] = {
1088 [MO_UB] = helper_ret_ldub_mmu,
1089 [MO_SB] = helper_ret_ldsb_mmu,
1091 [MO_LEUW] = helper_le_lduw_mmu,
1092 [MO_LEUL] = helper_le_ldul_mmu,
1093 [MO_LEQ] = helper_le_ldq_mmu,
1094 [MO_LESW] = helper_le_ldsw_mmu,
1095 [MO_LESL] = helper_le_ldul_mmu,
1097 [MO_BEUW] = helper_be_lduw_mmu,
1098 [MO_BEUL] = helper_be_ldul_mmu,
1099 [MO_BEQ] = helper_be_ldq_mmu,
1100 [MO_BESW] = helper_be_ldsw_mmu,
1101 [MO_BESL] = helper_be_ldul_mmu,
1104 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1105 * uintxx_t val, int mmu_idx, uintptr_t ra)
1107 static const void * const qemu_st_helpers[16] = {
1108 [MO_UB] = helper_ret_stb_mmu,
1109 [MO_LEUW] = helper_le_stw_mmu,
1110 [MO_LEUL] = helper_le_stl_mmu,
1111 [MO_LEQ] = helper_le_stq_mmu,
1112 [MO_BEUW] = helper_be_stw_mmu,
1113 [MO_BEUL] = helper_be_stl_mmu,
1114 [MO_BEQ] = helper_be_stq_mmu,
1117 /* Helper routines for marshalling helper function arguments into
1118 * the correct registers and stack.
1119 * argreg is where we want to put this argument, arg is the argument itself.
1120 * Return value is the updated argreg ready for the next call.
1121 * Note that argreg 0..3 is real registers, 4+ on stack.
1123 * We provide routines for arguments which are: immediate, 32 bit
1124 * value in register, 16 and 8 bit values in register (which must be zero
1125 * extended before use) and 64 bit value in a lo:hi register pair.
1127 #define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1128 static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1130 if (argreg < 4) { \
1131 MOV_ARG(s, COND_AL, argreg, arg); \
1132 } else { \
1133 int ofs = (argreg - 4) * 4; \
1134 EXT_ARG; \
1135 assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1136 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1138 return argreg + 1; \
1141 DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
1142 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1143 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
1144 (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1145 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
1146 (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1147 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1149 static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1150 TCGReg arglo, TCGReg arghi)
1152 /* 64 bit arguments must go in even/odd register pairs
1153 * and in 8-aligned stack slots.
1155 if (argreg & 1) {
1156 argreg++;
1158 if (use_armv6_instructions && argreg >= 4
1159 && (arglo & 1) == 0 && arghi == arglo + 1) {
1160 tcg_out_strd_8(s, COND_AL, arglo,
1161 TCG_REG_CALL_STACK, (argreg - 4) * 4);
1162 return argreg + 2;
1163 } else {
1164 argreg = tcg_out_arg_reg32(s, argreg, arglo);
1165 argreg = tcg_out_arg_reg32(s, argreg, arghi);
1166 return argreg;
1170 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1172 /* We're expecting to use an 8-bit immediate and to mask. */
1173 QEMU_BUILD_BUG_ON(CPU_TLB_BITS > 8);
1175 /* We're expecting to use an 8-bit immediate add + 8-bit ldrd offset.
1176 Using the offset of the second entry in the last tlb table ensures
1177 that we can index all of the elements of the first entry. */
1178 QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
1179 > 0xffff);
1181 /* Load and compare a TLB entry, leaving the flags set. Returns the register
1182 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */
1184 static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1185 TCGMemOp s_bits, int mem_index, bool is_load)
1187 TCGReg base = TCG_AREG0;
1188 int cmp_off =
1189 (is_load
1190 ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
1191 : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
1192 int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1194 /* Should generate something like the following:
1195 * shr tmp, addrlo, #TARGET_PAGE_BITS (1)
1196 * add r2, env, #high
1197 * and r0, tmp, #(CPU_TLB_SIZE - 1) (2)
1198 * add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS (3)
1199 * ldr r0, [r2, #cmp] (4)
1200 * tst addrlo, #s_mask
1201 * ldr r2, [r2, #add] (5)
1202 * cmpeq r0, tmp, lsl #TARGET_PAGE_BITS
1204 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP,
1205 0, addrlo, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1207 /* We checked that the offset is contained within 16 bits above. */
1208 if (add_off > 0xfff || (use_armv6_instructions && cmp_off > 0xff)) {
1209 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R2, base,
1210 (24 << 7) | (cmp_off >> 8));
1211 base = TCG_REG_R2;
1212 add_off -= cmp_off & 0xff00;
1213 cmp_off &= 0xff;
1216 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1217 TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1);
1218 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R2, base,
1219 TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
1221 /* Load the tlb comparator. Use ldrd if needed and available,
1222 but due to how the pointer needs setting up, ldm isn't useful.
1223 Base arm5 doesn't have ldrd, but armv5te does. */
1224 if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1225 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_REG_R2, cmp_off);
1226 } else {
1227 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_R2, cmp_off);
1228 if (TARGET_LONG_BITS == 64) {
1229 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R2, cmp_off + 4);
1233 /* Check alignment. */
1234 if (s_bits) {
1235 tcg_out_dat_imm(s, COND_AL, ARITH_TST,
1236 0, addrlo, (1 << s_bits) - 1);
1239 /* Load the tlb addend. */
1240 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R2, add_off);
1242 tcg_out_dat_reg(s, (s_bits ? COND_EQ : COND_AL), ARITH_CMP, 0,
1243 TCG_REG_R0, TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1245 if (TARGET_LONG_BITS == 64) {
1246 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1247 TCG_REG_R1, addrhi, SHIFT_IMM_LSL(0));
1250 return TCG_REG_R2;
1253 /* Record the context of a call to the out of line helper code for the slow
1254 path for a load or store, so that we can later generate the correct
1255 helper code. */
1256 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
1257 TCGReg datalo, TCGReg datahi, TCGReg addrlo,
1258 TCGReg addrhi, int mem_index,
1259 uint8_t *raddr, uint8_t *label_ptr)
1261 TCGLabelQemuLdst *label = new_ldst_label(s);
1263 label->is_ld = is_ld;
1264 label->opc = opc;
1265 label->datalo_reg = datalo;
1266 label->datahi_reg = datahi;
1267 label->addrlo_reg = addrlo;
1268 label->addrhi_reg = addrhi;
1269 label->mem_index = mem_index;
1270 label->raddr = raddr;
1271 label->label_ptr[0] = label_ptr;
1274 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1276 TCGReg argreg, datalo, datahi;
1277 TCGMemOp opc = lb->opc;
1278 uintptr_t func;
1280 reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
1282 argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
1283 if (TARGET_LONG_BITS == 64) {
1284 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1285 } else {
1286 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1288 argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
1289 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1291 /* For armv6 we can use the canonical unsigned helpers and minimize
1292 icache usage. For pre-armv6, use the signed helpers since we do
1293 not have a single insn sign-extend. */
1294 if (use_armv6_instructions) {
1295 func = (uintptr_t)qemu_ld_helpers[opc & ~MO_SIGN];
1296 } else {
1297 func = (uintptr_t)qemu_ld_helpers[opc];
1298 if (opc & MO_SIGN) {
1299 opc = MO_UL;
1302 tcg_out_call(s, func);
1304 datalo = lb->datalo_reg;
1305 datahi = lb->datahi_reg;
1306 switch (opc & MO_SSIZE) {
1307 case MO_SB:
1308 tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0);
1309 break;
1310 case MO_SW:
1311 tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0);
1312 break;
1313 default:
1314 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1315 break;
1316 case MO_Q:
1317 if (datalo != TCG_REG_R1) {
1318 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1319 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1320 } else if (datahi != TCG_REG_R0) {
1321 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1322 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1323 } else {
1324 tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0);
1325 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1326 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP);
1328 break;
1331 tcg_out_goto(s, COND_AL, (tcg_target_long)lb->raddr);
1334 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1336 TCGReg argreg, datalo, datahi;
1337 TCGMemOp opc = lb->opc;
1339 reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
1341 argreg = TCG_REG_R0;
1342 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1343 if (TARGET_LONG_BITS == 64) {
1344 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1345 } else {
1346 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1349 datalo = lb->datalo_reg;
1350 datahi = lb->datahi_reg;
1351 switch (opc & MO_SIZE) {
1352 case MO_8:
1353 argreg = tcg_out_arg_reg8(s, argreg, datalo);
1354 break;
1355 case MO_16:
1356 argreg = tcg_out_arg_reg16(s, argreg, datalo);
1357 break;
1358 case MO_32:
1359 default:
1360 argreg = tcg_out_arg_reg32(s, argreg, datalo);
1361 break;
1362 case MO_64:
1363 argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi);
1364 break;
1367 argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
1368 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1370 /* Tail-call to the helper, which will return to the fast path. */
1371 tcg_out_goto(s, COND_AL, (uintptr_t)qemu_st_helpers[opc]);
1373 #endif /* SOFTMMU */
1375 static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc,
1376 TCGReg datalo, TCGReg datahi,
1377 TCGReg addrlo, TCGReg addend)
1379 TCGMemOp bswap = opc & MO_BSWAP;
1381 switch (opc & MO_SSIZE) {
1382 case MO_UB:
1383 tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend);
1384 break;
1385 case MO_SB:
1386 tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend);
1387 break;
1388 case MO_UW:
1389 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1390 if (bswap) {
1391 tcg_out_bswap16(s, COND_AL, datalo, datalo);
1393 break;
1394 case MO_SW:
1395 if (bswap) {
1396 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1397 tcg_out_bswap16s(s, COND_AL, datalo, datalo);
1398 } else {
1399 tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
1401 break;
1402 case MO_UL:
1403 default:
1404 tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
1405 if (bswap) {
1406 tcg_out_bswap32(s, COND_AL, datalo, datalo);
1408 break;
1409 case MO_Q:
1411 TCGReg dl = (bswap ? datahi : datalo);
1412 TCGReg dh = (bswap ? datalo : datahi);
1414 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1415 if (USING_SOFTMMU && use_armv6_instructions
1416 && (dl & 1) == 0 && dh == dl + 1) {
1417 tcg_out_ldrd_r(s, COND_AL, dl, addrlo, addend);
1418 } else if (dl != addend) {
1419 tcg_out_ld32_rwb(s, COND_AL, dl, addend, addrlo);
1420 tcg_out_ld32_12(s, COND_AL, dh, addend, 4);
1421 } else {
1422 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
1423 addend, addrlo, SHIFT_IMM_LSL(0));
1424 tcg_out_ld32_12(s, COND_AL, dl, TCG_REG_TMP, 0);
1425 tcg_out_ld32_12(s, COND_AL, dh, TCG_REG_TMP, 4);
1427 if (bswap) {
1428 tcg_out_bswap32(s, COND_AL, dl, dl);
1429 tcg_out_bswap32(s, COND_AL, dh, dh);
1432 break;
1436 static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc,
1437 TCGReg datalo, TCGReg datahi,
1438 TCGReg addrlo)
1440 TCGMemOp bswap = opc & MO_BSWAP;
1442 switch (opc & MO_SSIZE) {
1443 case MO_UB:
1444 tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
1445 break;
1446 case MO_SB:
1447 tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
1448 break;
1449 case MO_UW:
1450 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1451 if (bswap) {
1452 tcg_out_bswap16(s, COND_AL, datalo, datalo);
1454 break;
1455 case MO_SW:
1456 if (bswap) {
1457 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1458 tcg_out_bswap16s(s, COND_AL, datalo, datalo);
1459 } else {
1460 tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
1462 break;
1463 case MO_UL:
1464 default:
1465 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1466 if (bswap) {
1467 tcg_out_bswap32(s, COND_AL, datalo, datalo);
1469 break;
1470 case MO_Q:
1472 TCGReg dl = (bswap ? datahi : datalo);
1473 TCGReg dh = (bswap ? datalo : datahi);
1475 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1476 if (USING_SOFTMMU && use_armv6_instructions
1477 && (dl & 1) == 0 && dh == dl + 1) {
1478 tcg_out_ldrd_8(s, COND_AL, dl, addrlo, 0);
1479 } else if (dl == addrlo) {
1480 tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
1481 tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
1482 } else {
1483 tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
1484 tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
1486 if (bswap) {
1487 tcg_out_bswap32(s, COND_AL, dl, dl);
1488 tcg_out_bswap32(s, COND_AL, dh, dh);
1491 break;
1495 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1497 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1498 TCGMemOp opc;
1499 #ifdef CONFIG_SOFTMMU
1500 int mem_index;
1501 TCGReg addend;
1502 uint8_t *label_ptr;
1503 #endif
1505 datalo = *args++;
1506 datahi = (is64 ? *args++ : 0);
1507 addrlo = *args++;
1508 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1509 opc = *args++;
1511 #ifdef CONFIG_SOFTMMU
1512 mem_index = *args;
1513 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc & MO_SIZE, mem_index, 1);
1515 /* This a conditional BL only to load a pointer within this opcode into LR
1516 for the slow path. We will not be using the value for a tail call. */
1517 label_ptr = s->code_ptr;
1518 tcg_out_bl_noaddr(s, COND_NE);
1520 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
1522 add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi,
1523 mem_index, s->code_ptr, label_ptr);
1524 #else /* !CONFIG_SOFTMMU */
1525 if (GUEST_BASE) {
1526 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE);
1527 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP);
1528 } else {
1529 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
1531 #endif
1534 static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc,
1535 TCGReg datalo, TCGReg datahi,
1536 TCGReg addrlo, TCGReg addend)
1538 TCGMemOp bswap = opc & MO_BSWAP;
1540 switch (opc & MO_SIZE) {
1541 case MO_8:
1542 tcg_out_st8_r(s, cond, datalo, addrlo, addend);
1543 break;
1544 case MO_16:
1545 if (bswap) {
1546 tcg_out_bswap16st(s, cond, TCG_REG_R0, datalo);
1547 tcg_out_st16_r(s, cond, TCG_REG_R0, addrlo, addend);
1548 } else {
1549 tcg_out_st16_r(s, cond, datalo, addrlo, addend);
1551 break;
1552 case MO_32:
1553 default:
1554 if (bswap) {
1555 tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
1556 tcg_out_st32_r(s, cond, TCG_REG_R0, addrlo, addend);
1557 } else {
1558 tcg_out_st32_r(s, cond, datalo, addrlo, addend);
1560 break;
1561 case MO_64:
1562 /* Avoid strd for user-only emulation, to handle unaligned. */
1563 if (bswap) {
1564 tcg_out_bswap32(s, cond, TCG_REG_R0, datahi);
1565 tcg_out_st32_rwb(s, cond, TCG_REG_R0, addend, addrlo);
1566 tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
1567 tcg_out_st32_12(s, cond, TCG_REG_R0, addend, 4);
1568 } else if (USING_SOFTMMU && use_armv6_instructions
1569 && (datalo & 1) == 0 && datahi == datalo + 1) {
1570 tcg_out_strd_r(s, cond, datalo, addrlo, addend);
1571 } else {
1572 tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
1573 tcg_out_st32_12(s, cond, datahi, addend, 4);
1575 break;
1579 static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc,
1580 TCGReg datalo, TCGReg datahi,
1581 TCGReg addrlo)
1583 TCGMemOp bswap = opc & MO_BSWAP;
1585 switch (opc & MO_SIZE) {
1586 case MO_8:
1587 tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
1588 break;
1589 case MO_16:
1590 if (bswap) {
1591 tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, datalo);
1592 tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addrlo, 0);
1593 } else {
1594 tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
1596 break;
1597 case MO_32:
1598 default:
1599 if (bswap) {
1600 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
1601 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
1602 } else {
1603 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1605 break;
1606 case MO_64:
1607 /* Avoid strd for user-only emulation, to handle unaligned. */
1608 if (bswap) {
1609 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datahi);
1610 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
1611 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
1612 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 4);
1613 } else if (USING_SOFTMMU && use_armv6_instructions
1614 && (datalo & 1) == 0 && datahi == datalo + 1) {
1615 tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
1616 } else {
1617 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1618 tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
1620 break;
1624 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1626 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1627 TCGMemOp opc;
1628 #ifdef CONFIG_SOFTMMU
1629 int mem_index;
1630 TCGReg addend;
1631 uint8_t *label_ptr;
1632 #endif
1634 datalo = *args++;
1635 datahi = (is64 ? *args++ : 0);
1636 addrlo = *args++;
1637 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1638 opc = *args++;
1640 #ifdef CONFIG_SOFTMMU
1641 mem_index = *args;
1642 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc & MO_SIZE, mem_index, 0);
1644 tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend);
1646 /* The conditional call must come last, as we're going to return here. */
1647 label_ptr = s->code_ptr;
1648 tcg_out_bl_noaddr(s, COND_NE);
1650 add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi,
1651 mem_index, s->code_ptr, label_ptr);
1652 #else /* !CONFIG_SOFTMMU */
1653 if (GUEST_BASE) {
1654 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE);
1655 tcg_out_qemu_st_index(s, COND_AL, opc, datalo,
1656 datahi, addrlo, TCG_REG_TMP);
1657 } else {
1658 tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
1660 #endif
1663 static uint8_t *tb_ret_addr;
1665 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1666 const TCGArg *args, const int *const_args)
1668 TCGArg a0, a1, a2, a3, a4, a5;
1669 int c;
1671 switch (opc) {
1672 case INDEX_op_exit_tb:
1673 if (use_armv7_instructions || check_fit_imm(args[0])) {
1674 tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
1675 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1676 } else {
1677 uint8_t *ld_ptr = s->code_ptr;
1678 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1679 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1680 *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1681 tcg_out32(s, args[0]);
1683 break;
1684 case INDEX_op_goto_tb:
1685 if (s->tb_jmp_offset) {
1686 /* Direct jump method */
1687 #if defined(USE_DIRECT_JUMP)
1688 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1689 tcg_out_b_noaddr(s, COND_AL);
1690 #else
1691 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1692 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1693 tcg_out32(s, 0);
1694 #endif
1695 } else {
1696 /* Indirect jump method */
1697 #if 1
1698 c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1699 if (c > 0xfff || c < -0xfff) {
1700 tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1701 (tcg_target_long) (s->tb_next + args[0]));
1702 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1703 } else
1704 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
1705 #else
1706 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1707 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1708 tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1709 #endif
1711 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1712 break;
1713 case INDEX_op_call:
1714 if (const_args[0])
1715 tcg_out_call(s, args[0]);
1716 else
1717 tcg_out_callr(s, COND_AL, args[0]);
1718 break;
1719 case INDEX_op_br:
1720 tcg_out_goto_label(s, COND_AL, args[0]);
1721 break;
1723 case INDEX_op_ld8u_i32:
1724 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1725 break;
1726 case INDEX_op_ld8s_i32:
1727 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1728 break;
1729 case INDEX_op_ld16u_i32:
1730 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1731 break;
1732 case INDEX_op_ld16s_i32:
1733 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1734 break;
1735 case INDEX_op_ld_i32:
1736 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1737 break;
1738 case INDEX_op_st8_i32:
1739 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1740 break;
1741 case INDEX_op_st16_i32:
1742 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1743 break;
1744 case INDEX_op_st_i32:
1745 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1746 break;
1748 case INDEX_op_mov_i32:
1749 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1750 args[0], 0, args[1], SHIFT_IMM_LSL(0));
1751 break;
1752 case INDEX_op_movi_i32:
1753 tcg_out_movi32(s, COND_AL, args[0], args[1]);
1754 break;
1755 case INDEX_op_movcond_i32:
1756 /* Constraints mean that v2 is always in the same register as dest,
1757 * so we only need to do "if condition passed, move v1 to dest".
1759 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1760 args[1], args[2], const_args[2]);
1761 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
1762 ARITH_MVN, args[0], 0, args[3], const_args[3]);
1763 break;
1764 case INDEX_op_add_i32:
1765 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1766 args[0], args[1], args[2], const_args[2]);
1767 break;
1768 case INDEX_op_sub_i32:
1769 if (const_args[1]) {
1770 if (const_args[2]) {
1771 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
1772 } else {
1773 tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
1774 args[0], args[2], args[1], 1);
1776 } else {
1777 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
1778 args[0], args[1], args[2], const_args[2]);
1780 break;
1781 case INDEX_op_and_i32:
1782 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
1783 args[0], args[1], args[2], const_args[2]);
1784 break;
1785 case INDEX_op_andc_i32:
1786 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
1787 args[0], args[1], args[2], const_args[2]);
1788 break;
1789 case INDEX_op_or_i32:
1790 c = ARITH_ORR;
1791 goto gen_arith;
1792 case INDEX_op_xor_i32:
1793 c = ARITH_EOR;
1794 /* Fall through. */
1795 gen_arith:
1796 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
1797 break;
1798 case INDEX_op_add2_i32:
1799 a0 = args[0], a1 = args[1], a2 = args[2];
1800 a3 = args[3], a4 = args[4], a5 = args[5];
1801 if (a0 == a3 || (a0 == a5 && !const_args[5])) {
1802 a0 = TCG_REG_TMP;
1804 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
1805 a0, a2, a4, const_args[4]);
1806 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
1807 a1, a3, a5, const_args[5]);
1808 tcg_out_mov_reg(s, COND_AL, args[0], a0);
1809 break;
1810 case INDEX_op_sub2_i32:
1811 a0 = args[0], a1 = args[1], a2 = args[2];
1812 a3 = args[3], a4 = args[4], a5 = args[5];
1813 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
1814 a0 = TCG_REG_TMP;
1816 if (const_args[2]) {
1817 if (const_args[4]) {
1818 tcg_out_movi32(s, COND_AL, a0, a4);
1819 a4 = a0;
1821 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
1822 } else {
1823 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
1824 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
1826 if (const_args[3]) {
1827 if (const_args[5]) {
1828 tcg_out_movi32(s, COND_AL, a1, a5);
1829 a5 = a1;
1831 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
1832 } else {
1833 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
1834 a1, a3, a5, const_args[5]);
1836 tcg_out_mov_reg(s, COND_AL, args[0], a0);
1837 break;
1838 case INDEX_op_neg_i32:
1839 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1840 break;
1841 case INDEX_op_not_i32:
1842 tcg_out_dat_reg(s, COND_AL,
1843 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1844 break;
1845 case INDEX_op_mul_i32:
1846 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1847 break;
1848 case INDEX_op_mulu2_i32:
1849 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1850 break;
1851 case INDEX_op_muls2_i32:
1852 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1853 break;
1854 /* XXX: Perhaps args[2] & 0x1f is wrong */
1855 case INDEX_op_shl_i32:
1856 c = const_args[2] ?
1857 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1858 goto gen_shift32;
1859 case INDEX_op_shr_i32:
1860 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1861 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1862 goto gen_shift32;
1863 case INDEX_op_sar_i32:
1864 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1865 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1866 goto gen_shift32;
1867 case INDEX_op_rotr_i32:
1868 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1869 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1870 /* Fall through. */
1871 gen_shift32:
1872 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1873 break;
1875 case INDEX_op_rotl_i32:
1876 if (const_args[2]) {
1877 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1878 ((0x20 - args[2]) & 0x1f) ?
1879 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1880 SHIFT_IMM_LSL(0));
1881 } else {
1882 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
1883 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1884 SHIFT_REG_ROR(TCG_REG_TMP));
1886 break;
1888 case INDEX_op_brcond_i32:
1889 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1890 args[0], args[1], const_args[1]);
1891 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1892 break;
1893 case INDEX_op_brcond2_i32:
1894 /* The resulting conditions are:
1895 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1896 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1897 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1898 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1899 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1900 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1902 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1903 args[1], args[3], const_args[3]);
1904 tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
1905 args[0], args[2], const_args[2]);
1906 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1907 break;
1908 case INDEX_op_setcond_i32:
1909 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1910 args[1], args[2], const_args[2]);
1911 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1912 ARITH_MOV, args[0], 0, 1);
1913 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1914 ARITH_MOV, args[0], 0, 0);
1915 break;
1916 case INDEX_op_setcond2_i32:
1917 /* See brcond2_i32 comment */
1918 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1919 args[2], args[4], const_args[4]);
1920 tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
1921 args[1], args[3], const_args[3]);
1922 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1923 ARITH_MOV, args[0], 0, 1);
1924 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1925 ARITH_MOV, args[0], 0, 0);
1926 break;
1928 case INDEX_op_qemu_ld_i32:
1929 tcg_out_qemu_ld(s, args, 0);
1930 break;
1931 case INDEX_op_qemu_ld_i64:
1932 tcg_out_qemu_ld(s, args, 1);
1933 break;
1934 case INDEX_op_qemu_st_i32:
1935 tcg_out_qemu_st(s, args, 0);
1936 break;
1937 case INDEX_op_qemu_st_i64:
1938 tcg_out_qemu_st(s, args, 1);
1939 break;
1941 case INDEX_op_bswap16_i32:
1942 tcg_out_bswap16(s, COND_AL, args[0], args[1]);
1943 break;
1944 case INDEX_op_bswap32_i32:
1945 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
1946 break;
1948 case INDEX_op_ext8s_i32:
1949 tcg_out_ext8s(s, COND_AL, args[0], args[1]);
1950 break;
1951 case INDEX_op_ext16s_i32:
1952 tcg_out_ext16s(s, COND_AL, args[0], args[1]);
1953 break;
1954 case INDEX_op_ext16u_i32:
1955 tcg_out_ext16u(s, COND_AL, args[0], args[1]);
1956 break;
1958 case INDEX_op_deposit_i32:
1959 tcg_out_deposit(s, COND_AL, args[0], args[2],
1960 args[3], args[4], const_args[2]);
1961 break;
1963 case INDEX_op_div_i32:
1964 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
1965 break;
1966 case INDEX_op_divu_i32:
1967 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
1968 break;
1970 default:
1971 tcg_abort();
1975 static const TCGTargetOpDef arm_op_defs[] = {
1976 { INDEX_op_exit_tb, { } },
1977 { INDEX_op_goto_tb, { } },
1978 { INDEX_op_call, { "ri" } },
1979 { INDEX_op_br, { } },
1981 { INDEX_op_mov_i32, { "r", "r" } },
1982 { INDEX_op_movi_i32, { "r" } },
1984 { INDEX_op_ld8u_i32, { "r", "r" } },
1985 { INDEX_op_ld8s_i32, { "r", "r" } },
1986 { INDEX_op_ld16u_i32, { "r", "r" } },
1987 { INDEX_op_ld16s_i32, { "r", "r" } },
1988 { INDEX_op_ld_i32, { "r", "r" } },
1989 { INDEX_op_st8_i32, { "r", "r" } },
1990 { INDEX_op_st16_i32, { "r", "r" } },
1991 { INDEX_op_st_i32, { "r", "r" } },
1993 /* TODO: "r", "r", "ri" */
1994 { INDEX_op_add_i32, { "r", "r", "rIN" } },
1995 { INDEX_op_sub_i32, { "r", "rI", "rIN" } },
1996 { INDEX_op_mul_i32, { "r", "r", "r" } },
1997 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1998 { INDEX_op_muls2_i32, { "r", "r", "r", "r" } },
1999 { INDEX_op_and_i32, { "r", "r", "rIK" } },
2000 { INDEX_op_andc_i32, { "r", "r", "rIK" } },
2001 { INDEX_op_or_i32, { "r", "r", "rI" } },
2002 { INDEX_op_xor_i32, { "r", "r", "rI" } },
2003 { INDEX_op_neg_i32, { "r", "r" } },
2004 { INDEX_op_not_i32, { "r", "r" } },
2006 { INDEX_op_shl_i32, { "r", "r", "ri" } },
2007 { INDEX_op_shr_i32, { "r", "r", "ri" } },
2008 { INDEX_op_sar_i32, { "r", "r", "ri" } },
2009 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
2010 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
2012 { INDEX_op_brcond_i32, { "r", "rIN" } },
2013 { INDEX_op_setcond_i32, { "r", "r", "rIN" } },
2014 { INDEX_op_movcond_i32, { "r", "r", "rIN", "rIK", "0" } },
2016 { INDEX_op_add2_i32, { "r", "r", "r", "r", "rIN", "rIK" } },
2017 { INDEX_op_sub2_i32, { "r", "r", "rI", "rI", "rIN", "rIK" } },
2018 { INDEX_op_brcond2_i32, { "r", "r", "rIN", "rIN" } },
2019 { INDEX_op_setcond2_i32, { "r", "r", "r", "rIN", "rIN" } },
2021 #if TARGET_LONG_BITS == 32
2022 { INDEX_op_qemu_ld_i32, { "r", "l" } },
2023 { INDEX_op_qemu_ld_i64, { "r", "r", "l" } },
2024 { INDEX_op_qemu_st_i32, { "s", "s" } },
2025 { INDEX_op_qemu_st_i64, { "s", "s", "s" } },
2026 #else
2027 { INDEX_op_qemu_ld_i32, { "r", "l", "l" } },
2028 { INDEX_op_qemu_ld_i64, { "r", "r", "l", "l" } },
2029 { INDEX_op_qemu_st_i32, { "s", "s", "s" } },
2030 { INDEX_op_qemu_st_i64, { "s", "s", "s", "s" } },
2031 #endif
2033 { INDEX_op_bswap16_i32, { "r", "r" } },
2034 { INDEX_op_bswap32_i32, { "r", "r" } },
2036 { INDEX_op_ext8s_i32, { "r", "r" } },
2037 { INDEX_op_ext16s_i32, { "r", "r" } },
2038 { INDEX_op_ext16u_i32, { "r", "r" } },
2040 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
2042 { INDEX_op_div_i32, { "r", "r", "r" } },
2043 { INDEX_op_divu_i32, { "r", "r", "r" } },
2045 { -1 },
2048 static void tcg_target_init(TCGContext *s)
2050 /* Only probe for the platform and capabilities if we havn't already
2051 determined maximum values at compile time. */
2052 #ifndef use_idiv_instructions
2054 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2055 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2057 #endif
2058 if (__ARM_ARCH < 7) {
2059 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2060 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2061 arm_arch = pl[1] - '0';
2065 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2066 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
2067 (1 << TCG_REG_R0) |
2068 (1 << TCG_REG_R1) |
2069 (1 << TCG_REG_R2) |
2070 (1 << TCG_REG_R3) |
2071 (1 << TCG_REG_R12) |
2072 (1 << TCG_REG_R14));
2074 tcg_regset_clear(s->reserved_regs);
2075 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2076 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2077 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2079 tcg_add_target_add_op_defs(arm_op_defs);
2082 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2083 TCGReg arg1, intptr_t arg2)
2085 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2088 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2089 TCGReg arg1, intptr_t arg2)
2091 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2094 static inline void tcg_out_mov(TCGContext *s, TCGType type,
2095 TCGReg ret, TCGReg arg)
2097 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
2100 static inline void tcg_out_movi(TCGContext *s, TCGType type,
2101 TCGReg ret, tcg_target_long arg)
2103 tcg_out_movi32(s, COND_AL, ret, arg);
2106 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2107 and tcg_register_jit. */
2109 #define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
2111 #define FRAME_SIZE \
2112 ((PUSH_SIZE \
2113 + TCG_STATIC_CALL_ARGS_SIZE \
2114 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2115 + TCG_TARGET_STACK_ALIGN - 1) \
2116 & -TCG_TARGET_STACK_ALIGN)
2118 static void tcg_target_qemu_prologue(TCGContext *s)
2120 int stack_addend;
2122 /* Calling convention requires us to save r4-r11 and lr. */
2123 /* stmdb sp!, { r4 - r11, lr } */
2124 tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
2126 /* Reserve callee argument and tcg temp space. */
2127 stack_addend = FRAME_SIZE - PUSH_SIZE;
2129 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
2130 TCG_REG_CALL_STACK, stack_addend, 1);
2131 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2132 CPU_TEMP_BUF_NLONGS * sizeof(long));
2134 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2136 tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
2137 tb_ret_addr = s->code_ptr;
2139 /* Epilogue. We branch here via tb_ret_addr. */
2140 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
2141 TCG_REG_CALL_STACK, stack_addend, 1);
2143 /* ldmia sp!, { r4 - r11, pc } */
2144 tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
2147 typedef struct {
2148 DebugFrameCIE cie;
2149 DebugFrameFDEHeader fde;
2150 uint8_t fde_def_cfa[4];
2151 uint8_t fde_reg_ofs[18];
2152 } DebugFrame;
2154 #define ELF_HOST_MACHINE EM_ARM
2156 /* We're expecting a 2 byte uleb128 encoded value. */
2157 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2159 static DebugFrame debug_frame = {
2160 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2161 .cie.id = -1,
2162 .cie.version = 1,
2163 .cie.code_align = 1,
2164 .cie.data_align = 0x7c, /* sleb128 -4 */
2165 .cie.return_column = 14,
2167 /* Total FDE size does not include the "len" member. */
2168 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
2170 .fde_def_cfa = {
2171 12, 13, /* DW_CFA_def_cfa sp, ... */
2172 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2173 (FRAME_SIZE >> 7)
2175 .fde_reg_ofs = {
2176 /* The following must match the stmdb in the prologue. */
2177 0x8e, 1, /* DW_CFA_offset, lr, -4 */
2178 0x8b, 2, /* DW_CFA_offset, r11, -8 */
2179 0x8a, 3, /* DW_CFA_offset, r10, -12 */
2180 0x89, 4, /* DW_CFA_offset, r9, -16 */
2181 0x88, 5, /* DW_CFA_offset, r8, -20 */
2182 0x87, 6, /* DW_CFA_offset, r7, -24 */
2183 0x86, 7, /* DW_CFA_offset, r6, -28 */
2184 0x85, 8, /* DW_CFA_offset, r5, -32 */
2185 0x84, 9, /* DW_CFA_offset, r4, -36 */
2189 void tcg_register_jit(void *buf, size_t buf_size)
2191 debug_frame.fde.func_start = (tcg_target_long) buf;
2192 debug_frame.fde.func_len = buf_size;
2194 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));