4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "disas/disas.h"
31 #include "qemu/bitops.h"
37 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39 /* currently all emulated v5 cores are also v5TE, so don't bother */
40 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
41 #define ENABLE_ARCH_5J 0
42 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
48 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
50 #include "translate.h"
51 static uint32_t gen_opc_condexec_bits
[OPC_BUF_SIZE
];
53 #if defined(CONFIG_USER_ONLY)
56 #define IS_USER(s) (s->user)
60 /* We reuse the same 64-bit temporaries for efficiency. */
61 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
62 static TCGv_i32 cpu_R
[16];
63 static TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
64 static TCGv_i64 cpu_exclusive_addr
;
65 static TCGv_i64 cpu_exclusive_val
;
66 #ifdef CONFIG_USER_ONLY
67 static TCGv_i64 cpu_exclusive_test
;
68 static TCGv_i32 cpu_exclusive_info
;
71 /* FIXME: These should be removed. */
72 static TCGv_i32 cpu_F0s
, cpu_F1s
;
73 static TCGv_i64 cpu_F0d
, cpu_F1d
;
75 #include "exec/gen-icount.h"
77 static const char *regnames
[] =
78 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
79 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
81 /* initialize TCG globals. */
82 void arm_translate_init(void)
86 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
88 for (i
= 0; i
< 16; i
++) {
89 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
90 offsetof(CPUARMState
, regs
[i
]),
93 cpu_CF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, CF
), "CF");
94 cpu_NF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, NF
), "NF");
95 cpu_VF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, VF
), "VF");
96 cpu_ZF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, ZF
), "ZF");
98 cpu_exclusive_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
99 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
100 cpu_exclusive_val
= tcg_global_mem_new_i64(TCG_AREG0
,
101 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
102 #ifdef CONFIG_USER_ONLY
103 cpu_exclusive_test
= tcg_global_mem_new_i64(TCG_AREG0
,
104 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
105 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
106 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
109 a64_translate_init();
112 static inline TCGv_i32
load_cpu_offset(int offset
)
114 TCGv_i32 tmp
= tcg_temp_new_i32();
115 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
119 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
121 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
123 tcg_gen_st_i32(var
, cpu_env
, offset
);
124 tcg_temp_free_i32(var
);
127 #define store_cpu_field(var, name) \
128 store_cpu_offset(var, offsetof(CPUARMState, name))
130 /* Set a variable to the value of a CPU register. */
131 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
135 /* normally, since we updated PC, we need only to add one insn */
137 addr
= (long)s
->pc
+ 2;
139 addr
= (long)s
->pc
+ 4;
140 tcg_gen_movi_i32(var
, addr
);
142 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
146 /* Create a new temporary and set it to the value of a CPU register. */
147 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
149 TCGv_i32 tmp
= tcg_temp_new_i32();
150 load_reg_var(s
, tmp
, reg
);
154 /* Set a CPU register. The source must be a temporary and will be
156 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
159 tcg_gen_andi_i32(var
, var
, ~1);
160 s
->is_jmp
= DISAS_JUMP
;
162 tcg_gen_mov_i32(cpu_R
[reg
], var
);
163 tcg_temp_free_i32(var
);
166 /* Value extensions. */
167 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
168 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
169 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
170 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
172 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
173 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
176 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
178 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
179 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
180 tcg_temp_free_i32(tmp_mask
);
182 /* Set NZCV flags from the high 4 bits of var. */
183 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
185 static void gen_exception(int excp
)
187 TCGv_i32 tmp
= tcg_temp_new_i32();
188 tcg_gen_movi_i32(tmp
, excp
);
189 gen_helper_exception(cpu_env
, tmp
);
190 tcg_temp_free_i32(tmp
);
193 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
195 TCGv_i32 tmp1
= tcg_temp_new_i32();
196 TCGv_i32 tmp2
= tcg_temp_new_i32();
197 tcg_gen_ext16s_i32(tmp1
, a
);
198 tcg_gen_ext16s_i32(tmp2
, b
);
199 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
200 tcg_temp_free_i32(tmp2
);
201 tcg_gen_sari_i32(a
, a
, 16);
202 tcg_gen_sari_i32(b
, b
, 16);
203 tcg_gen_mul_i32(b
, b
, a
);
204 tcg_gen_mov_i32(a
, tmp1
);
205 tcg_temp_free_i32(tmp1
);
208 /* Byteswap each halfword. */
209 static void gen_rev16(TCGv_i32 var
)
211 TCGv_i32 tmp
= tcg_temp_new_i32();
212 tcg_gen_shri_i32(tmp
, var
, 8);
213 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
214 tcg_gen_shli_i32(var
, var
, 8);
215 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
216 tcg_gen_or_i32(var
, var
, tmp
);
217 tcg_temp_free_i32(tmp
);
220 /* Byteswap low halfword and sign extend. */
221 static void gen_revsh(TCGv_i32 var
)
223 tcg_gen_ext16u_i32(var
, var
);
224 tcg_gen_bswap16_i32(var
, var
);
225 tcg_gen_ext16s_i32(var
, var
);
228 /* Unsigned bitfield extract. */
229 static void gen_ubfx(TCGv_i32 var
, int shift
, uint32_t mask
)
232 tcg_gen_shri_i32(var
, var
, shift
);
233 tcg_gen_andi_i32(var
, var
, mask
);
236 /* Signed bitfield extract. */
237 static void gen_sbfx(TCGv_i32 var
, int shift
, int width
)
242 tcg_gen_sari_i32(var
, var
, shift
);
243 if (shift
+ width
< 32) {
244 signbit
= 1u << (width
- 1);
245 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
246 tcg_gen_xori_i32(var
, var
, signbit
);
247 tcg_gen_subi_i32(var
, var
, signbit
);
251 /* Return (b << 32) + a. Mark inputs as dead */
252 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv_i32 b
)
254 TCGv_i64 tmp64
= tcg_temp_new_i64();
256 tcg_gen_extu_i32_i64(tmp64
, b
);
257 tcg_temp_free_i32(b
);
258 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
259 tcg_gen_add_i64(a
, tmp64
, a
);
261 tcg_temp_free_i64(tmp64
);
265 /* Return (b << 32) - a. Mark inputs as dead. */
266 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv_i32 b
)
268 TCGv_i64 tmp64
= tcg_temp_new_i64();
270 tcg_gen_extu_i32_i64(tmp64
, b
);
271 tcg_temp_free_i32(b
);
272 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
273 tcg_gen_sub_i64(a
, tmp64
, a
);
275 tcg_temp_free_i64(tmp64
);
279 /* 32x32->64 multiply. Marks inputs as dead. */
280 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
282 TCGv_i32 lo
= tcg_temp_new_i32();
283 TCGv_i32 hi
= tcg_temp_new_i32();
286 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
287 tcg_temp_free_i32(a
);
288 tcg_temp_free_i32(b
);
290 ret
= tcg_temp_new_i64();
291 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
292 tcg_temp_free_i32(lo
);
293 tcg_temp_free_i32(hi
);
298 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
300 TCGv_i32 lo
= tcg_temp_new_i32();
301 TCGv_i32 hi
= tcg_temp_new_i32();
304 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
305 tcg_temp_free_i32(a
);
306 tcg_temp_free_i32(b
);
308 ret
= tcg_temp_new_i64();
309 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
310 tcg_temp_free_i32(lo
);
311 tcg_temp_free_i32(hi
);
316 /* Swap low and high halfwords. */
317 static void gen_swap_half(TCGv_i32 var
)
319 TCGv_i32 tmp
= tcg_temp_new_i32();
320 tcg_gen_shri_i32(tmp
, var
, 16);
321 tcg_gen_shli_i32(var
, var
, 16);
322 tcg_gen_or_i32(var
, var
, tmp
);
323 tcg_temp_free_i32(tmp
);
326 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
327 tmp = (t0 ^ t1) & 0x8000;
330 t0 = (t0 + t1) ^ tmp;
333 static void gen_add16(TCGv_i32 t0
, TCGv_i32 t1
)
335 TCGv_i32 tmp
= tcg_temp_new_i32();
336 tcg_gen_xor_i32(tmp
, t0
, t1
);
337 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
338 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
339 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
340 tcg_gen_add_i32(t0
, t0
, t1
);
341 tcg_gen_xor_i32(t0
, t0
, tmp
);
342 tcg_temp_free_i32(tmp
);
343 tcg_temp_free_i32(t1
);
346 /* Set CF to the top bit of var. */
347 static void gen_set_CF_bit31(TCGv_i32 var
)
349 tcg_gen_shri_i32(cpu_CF
, var
, 31);
352 /* Set N and Z flags from var. */
353 static inline void gen_logic_CC(TCGv_i32 var
)
355 tcg_gen_mov_i32(cpu_NF
, var
);
356 tcg_gen_mov_i32(cpu_ZF
, var
);
360 static void gen_adc(TCGv_i32 t0
, TCGv_i32 t1
)
362 tcg_gen_add_i32(t0
, t0
, t1
);
363 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
366 /* dest = T0 + T1 + CF. */
367 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
369 tcg_gen_add_i32(dest
, t0
, t1
);
370 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
373 /* dest = T0 - T1 + CF - 1. */
374 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
376 tcg_gen_sub_i32(dest
, t0
, t1
);
377 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
378 tcg_gen_subi_i32(dest
, dest
, 1);
381 /* dest = T0 + T1. Compute C, N, V and Z flags */
382 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
384 TCGv_i32 tmp
= tcg_temp_new_i32();
385 tcg_gen_movi_i32(tmp
, 0);
386 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
387 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
388 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
389 tcg_gen_xor_i32(tmp
, t0
, t1
);
390 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
391 tcg_temp_free_i32(tmp
);
392 tcg_gen_mov_i32(dest
, cpu_NF
);
395 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
396 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
398 TCGv_i32 tmp
= tcg_temp_new_i32();
399 if (TCG_TARGET_HAS_add2_i32
) {
400 tcg_gen_movi_i32(tmp
, 0);
401 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
402 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
404 TCGv_i64 q0
= tcg_temp_new_i64();
405 TCGv_i64 q1
= tcg_temp_new_i64();
406 tcg_gen_extu_i32_i64(q0
, t0
);
407 tcg_gen_extu_i32_i64(q1
, t1
);
408 tcg_gen_add_i64(q0
, q0
, q1
);
409 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
410 tcg_gen_add_i64(q0
, q0
, q1
);
411 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
412 tcg_temp_free_i64(q0
);
413 tcg_temp_free_i64(q1
);
415 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
416 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
417 tcg_gen_xor_i32(tmp
, t0
, t1
);
418 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
419 tcg_temp_free_i32(tmp
);
420 tcg_gen_mov_i32(dest
, cpu_NF
);
423 /* dest = T0 - T1. Compute C, N, V and Z flags */
424 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
427 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
428 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
429 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
430 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
431 tmp
= tcg_temp_new_i32();
432 tcg_gen_xor_i32(tmp
, t0
, t1
);
433 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
434 tcg_temp_free_i32(tmp
);
435 tcg_gen_mov_i32(dest
, cpu_NF
);
438 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
439 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
441 TCGv_i32 tmp
= tcg_temp_new_i32();
442 tcg_gen_not_i32(tmp
, t1
);
443 gen_adc_CC(dest
, t0
, tmp
);
444 tcg_temp_free_i32(tmp
);
447 #define GEN_SHIFT(name) \
448 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
450 TCGv_i32 tmp1, tmp2, tmp3; \
451 tmp1 = tcg_temp_new_i32(); \
452 tcg_gen_andi_i32(tmp1, t1, 0xff); \
453 tmp2 = tcg_const_i32(0); \
454 tmp3 = tcg_const_i32(0x1f); \
455 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
456 tcg_temp_free_i32(tmp3); \
457 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
458 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
459 tcg_temp_free_i32(tmp2); \
460 tcg_temp_free_i32(tmp1); \
466 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
469 tmp1
= tcg_temp_new_i32();
470 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
471 tmp2
= tcg_const_i32(0x1f);
472 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
473 tcg_temp_free_i32(tmp2
);
474 tcg_gen_sar_i32(dest
, t0
, tmp1
);
475 tcg_temp_free_i32(tmp1
);
478 static void tcg_gen_abs_i32(TCGv_i32 dest
, TCGv_i32 src
)
480 TCGv_i32 c0
= tcg_const_i32(0);
481 TCGv_i32 tmp
= tcg_temp_new_i32();
482 tcg_gen_neg_i32(tmp
, src
);
483 tcg_gen_movcond_i32(TCG_COND_GT
, dest
, src
, c0
, src
, tmp
);
484 tcg_temp_free_i32(c0
);
485 tcg_temp_free_i32(tmp
);
488 static void shifter_out_im(TCGv_i32 var
, int shift
)
491 tcg_gen_andi_i32(cpu_CF
, var
, 1);
493 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
495 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
500 /* Shift by immediate. Includes special handling for shift == 0. */
501 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
502 int shift
, int flags
)
508 shifter_out_im(var
, 32 - shift
);
509 tcg_gen_shli_i32(var
, var
, shift
);
515 tcg_gen_shri_i32(cpu_CF
, var
, 31);
517 tcg_gen_movi_i32(var
, 0);
520 shifter_out_im(var
, shift
- 1);
521 tcg_gen_shri_i32(var
, var
, shift
);
528 shifter_out_im(var
, shift
- 1);
531 tcg_gen_sari_i32(var
, var
, shift
);
533 case 3: /* ROR/RRX */
536 shifter_out_im(var
, shift
- 1);
537 tcg_gen_rotri_i32(var
, var
, shift
); break;
539 TCGv_i32 tmp
= tcg_temp_new_i32();
540 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
542 shifter_out_im(var
, 0);
543 tcg_gen_shri_i32(var
, var
, 1);
544 tcg_gen_or_i32(var
, var
, tmp
);
545 tcg_temp_free_i32(tmp
);
550 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
551 TCGv_i32 shift
, int flags
)
555 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
556 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
557 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
558 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
563 gen_shl(var
, var
, shift
);
566 gen_shr(var
, var
, shift
);
569 gen_sar(var
, var
, shift
);
571 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
572 tcg_gen_rotr_i32(var
, var
, shift
); break;
575 tcg_temp_free_i32(shift
);
578 #define PAS_OP(pfx) \
580 case 0: gen_pas_helper(glue(pfx,add16)); break; \
581 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
582 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
583 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
584 case 4: gen_pas_helper(glue(pfx,add8)); break; \
585 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
587 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
592 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
594 tmp
= tcg_temp_new_ptr();
595 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
597 tcg_temp_free_ptr(tmp
);
600 tmp
= tcg_temp_new_ptr();
601 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
603 tcg_temp_free_ptr(tmp
);
605 #undef gen_pas_helper
606 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
619 #undef gen_pas_helper
624 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
625 #define PAS_OP(pfx) \
627 case 0: gen_pas_helper(glue(pfx,add8)); break; \
628 case 1: gen_pas_helper(glue(pfx,add16)); break; \
629 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
630 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
631 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
632 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
634 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
639 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
641 tmp
= tcg_temp_new_ptr();
642 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
644 tcg_temp_free_ptr(tmp
);
647 tmp
= tcg_temp_new_ptr();
648 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
650 tcg_temp_free_ptr(tmp
);
652 #undef gen_pas_helper
653 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
666 #undef gen_pas_helper
672 * generate a conditional branch based on ARM condition code cc.
673 * This is common between ARM and Aarch64 targets.
675 void arm_gen_test_cc(int cc
, int label
)
682 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
685 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
688 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_CF
, 0, label
);
691 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
694 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_NF
, 0, label
);
697 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_NF
, 0, label
);
700 tcg_gen_brcondi_i32(TCG_COND_LT
, cpu_VF
, 0, label
);
703 tcg_gen_brcondi_i32(TCG_COND_GE
, cpu_VF
, 0, label
);
705 case 8: /* hi: C && !Z */
706 inv
= gen_new_label();
707 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, inv
);
708 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_ZF
, 0, label
);
711 case 9: /* ls: !C || Z */
712 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_CF
, 0, label
);
713 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
715 case 10: /* ge: N == V -> N ^ V == 0 */
716 tmp
= tcg_temp_new_i32();
717 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
718 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
719 tcg_temp_free_i32(tmp
);
721 case 11: /* lt: N != V -> N ^ V != 0 */
722 tmp
= tcg_temp_new_i32();
723 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
724 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
725 tcg_temp_free_i32(tmp
);
727 case 12: /* gt: !Z && N == V */
728 inv
= gen_new_label();
729 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, inv
);
730 tmp
= tcg_temp_new_i32();
731 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
732 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
733 tcg_temp_free_i32(tmp
);
736 case 13: /* le: Z || N != V */
737 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ZF
, 0, label
);
738 tmp
= tcg_temp_new_i32();
739 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
740 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
741 tcg_temp_free_i32(tmp
);
744 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
749 static const uint8_t table_logic_cc
[16] = {
768 /* Set PC and Thumb state from an immediate address. */
769 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
773 s
->is_jmp
= DISAS_UPDATE
;
774 if (s
->thumb
!= (addr
& 1)) {
775 tmp
= tcg_temp_new_i32();
776 tcg_gen_movi_i32(tmp
, addr
& 1);
777 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
778 tcg_temp_free_i32(tmp
);
780 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
783 /* Set PC and Thumb state from var. var is marked as dead. */
784 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
786 s
->is_jmp
= DISAS_UPDATE
;
787 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
788 tcg_gen_andi_i32(var
, var
, 1);
789 store_cpu_field(var
, thumb
);
792 /* Variant of store_reg which uses branch&exchange logic when storing
793 to r15 in ARM architecture v7 and above. The source must be a temporary
794 and will be marked as dead. */
795 static inline void store_reg_bx(CPUARMState
*env
, DisasContext
*s
,
796 int reg
, TCGv_i32 var
)
798 if (reg
== 15 && ENABLE_ARCH_7
) {
801 store_reg(s
, reg
, var
);
805 /* Variant of store_reg which uses branch&exchange logic when storing
806 * to r15 in ARM architecture v5T and above. This is used for storing
807 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
808 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
809 static inline void store_reg_from_load(CPUARMState
*env
, DisasContext
*s
,
810 int reg
, TCGv_i32 var
)
812 if (reg
== 15 && ENABLE_ARCH_5
) {
815 store_reg(s
, reg
, var
);
819 /* Abstractions of "generate code to do a guest load/store for
820 * AArch32", where a vaddr is always 32 bits (and is zero
821 * extended if we're a 64 bit core) and data is also
822 * 32 bits unless specifically doing a 64 bit access.
823 * These functions work like tcg_gen_qemu_{ld,st}* except
824 * that the address argument is TCGv_i32 rather than TCGv.
826 #if TARGET_LONG_BITS == 32
828 #define DO_GEN_LD(SUFF, OPC) \
829 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
831 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
834 #define DO_GEN_ST(SUFF, OPC) \
835 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
837 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
840 static inline void gen_aa32_ld64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
842 tcg_gen_qemu_ld_i64(val
, addr
, index
, MO_TEQ
);
845 static inline void gen_aa32_st64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
847 tcg_gen_qemu_st_i64(val
, addr
, index
, MO_TEQ
);
852 #define DO_GEN_LD(SUFF, OPC) \
853 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
855 TCGv addr64 = tcg_temp_new(); \
856 tcg_gen_extu_i32_i64(addr64, addr); \
857 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
858 tcg_temp_free(addr64); \
861 #define DO_GEN_ST(SUFF, OPC) \
862 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
864 TCGv addr64 = tcg_temp_new(); \
865 tcg_gen_extu_i32_i64(addr64, addr); \
866 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
867 tcg_temp_free(addr64); \
870 static inline void gen_aa32_ld64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
872 TCGv addr64
= tcg_temp_new();
873 tcg_gen_extu_i32_i64(addr64
, addr
);
874 tcg_gen_qemu_ld_i64(val
, addr64
, index
, MO_TEQ
);
875 tcg_temp_free(addr64
);
878 static inline void gen_aa32_st64(TCGv_i64 val
, TCGv_i32 addr
, int index
)
880 TCGv addr64
= tcg_temp_new();
881 tcg_gen_extu_i32_i64(addr64
, addr
);
882 tcg_gen_qemu_st_i64(val
, addr64
, index
, MO_TEQ
);
883 tcg_temp_free(addr64
);
890 DO_GEN_LD(16s
, MO_TESW
)
891 DO_GEN_LD(16u, MO_TEUW
)
892 DO_GEN_LD(32u, MO_TEUL
)
894 DO_GEN_ST(16, MO_TEUW
)
895 DO_GEN_ST(32, MO_TEUL
)
897 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
899 tcg_gen_movi_i32(cpu_R
[15], val
);
902 /* Force a TB lookup after an instruction that changes the CPU state. */
903 static inline void gen_lookup_tb(DisasContext
*s
)
905 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
906 s
->is_jmp
= DISAS_UPDATE
;
909 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
912 int val
, rm
, shift
, shiftop
;
915 if (!(insn
& (1 << 25))) {
918 if (!(insn
& (1 << 23)))
921 tcg_gen_addi_i32(var
, var
, val
);
925 shift
= (insn
>> 7) & 0x1f;
926 shiftop
= (insn
>> 5) & 3;
927 offset
= load_reg(s
, rm
);
928 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
929 if (!(insn
& (1 << 23)))
930 tcg_gen_sub_i32(var
, var
, offset
);
932 tcg_gen_add_i32(var
, var
, offset
);
933 tcg_temp_free_i32(offset
);
937 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
938 int extra
, TCGv_i32 var
)
943 if (insn
& (1 << 22)) {
945 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
946 if (!(insn
& (1 << 23)))
950 tcg_gen_addi_i32(var
, var
, val
);
954 tcg_gen_addi_i32(var
, var
, extra
);
956 offset
= load_reg(s
, rm
);
957 if (!(insn
& (1 << 23)))
958 tcg_gen_sub_i32(var
, var
, offset
);
960 tcg_gen_add_i32(var
, var
, offset
);
961 tcg_temp_free_i32(offset
);
965 static TCGv_ptr
get_fpstatus_ptr(int neon
)
967 TCGv_ptr statusptr
= tcg_temp_new_ptr();
970 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
972 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
974 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
978 #define VFP_OP2(name) \
979 static inline void gen_vfp_##name(int dp) \
981 TCGv_ptr fpst = get_fpstatus_ptr(0); \
983 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
985 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
987 tcg_temp_free_ptr(fpst); \
997 static inline void gen_vfp_F1_mul(int dp
)
999 /* Like gen_vfp_mul() but put result in F1 */
1000 TCGv_ptr fpst
= get_fpstatus_ptr(0);
1002 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
1004 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
1006 tcg_temp_free_ptr(fpst
);
1009 static inline void gen_vfp_F1_neg(int dp
)
1011 /* Like gen_vfp_neg() but put result in F1 */
1013 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
1015 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
1019 static inline void gen_vfp_abs(int dp
)
1022 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
1024 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
1027 static inline void gen_vfp_neg(int dp
)
1030 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1032 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1035 static inline void gen_vfp_sqrt(int dp
)
1038 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
1040 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1043 static inline void gen_vfp_cmp(int dp
)
1046 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1048 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1051 static inline void gen_vfp_cmpe(int dp
)
1054 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1056 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1059 static inline void gen_vfp_F1_ld0(int dp
)
1062 tcg_gen_movi_i64(cpu_F1d
, 0);
1064 tcg_gen_movi_i32(cpu_F1s
, 0);
1067 #define VFP_GEN_ITOF(name) \
1068 static inline void gen_vfp_##name(int dp, int neon) \
1070 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1072 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1074 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1076 tcg_temp_free_ptr(statusptr); \
1083 #define VFP_GEN_FTOI(name) \
1084 static inline void gen_vfp_##name(int dp, int neon) \
1086 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1088 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1090 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1092 tcg_temp_free_ptr(statusptr); \
1101 #define VFP_GEN_FIX(name, round) \
1102 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1104 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1105 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1107 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1110 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1113 tcg_temp_free_i32(tmp_shift); \
1114 tcg_temp_free_ptr(statusptr); \
1116 VFP_GEN_FIX(tosh
, _round_to_zero
)
1117 VFP_GEN_FIX(tosl
, _round_to_zero
)
1118 VFP_GEN_FIX(touh
, _round_to_zero
)
1119 VFP_GEN_FIX(toul
, _round_to_zero
)
1126 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1129 gen_aa32_ld64(cpu_F0d
, addr
, IS_USER(s
));
1131 gen_aa32_ld32u(cpu_F0s
, addr
, IS_USER(s
));
1135 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1138 gen_aa32_st64(cpu_F0d
, addr
, IS_USER(s
));
1140 gen_aa32_st32(cpu_F0s
, addr
, IS_USER(s
));
1145 vfp_reg_offset (int dp
, int reg
)
1148 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1150 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1151 + offsetof(CPU_DoubleU
, l
.upper
);
1153 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1154 + offsetof(CPU_DoubleU
, l
.lower
);
1158 /* Return the offset of a 32-bit piece of a NEON register.
1159 zero is the least significant end of the register. */
1161 neon_reg_offset (int reg
, int n
)
1165 return vfp_reg_offset(0, sreg
);
1168 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1170 TCGv_i32 tmp
= tcg_temp_new_i32();
1171 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1175 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1177 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1178 tcg_temp_free_i32(var
);
1181 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1183 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1186 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1188 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1191 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1192 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1193 #define tcg_gen_st_f32 tcg_gen_st_i32
1194 #define tcg_gen_st_f64 tcg_gen_st_i64
1196 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1199 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1201 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1204 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1207 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1209 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1212 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1215 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1217 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1220 #define ARM_CP_RW_BIT (1 << 20)
1222 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1224 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1227 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1229 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1232 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1234 TCGv_i32 var
= tcg_temp_new_i32();
1235 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1239 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1241 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1242 tcg_temp_free_i32(var
);
1245 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1247 iwmmxt_store_reg(cpu_M0
, rn
);
1250 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1252 iwmmxt_load_reg(cpu_M0
, rn
);
1255 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1257 iwmmxt_load_reg(cpu_V1
, rn
);
1258 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1261 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1263 iwmmxt_load_reg(cpu_V1
, rn
);
1264 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1267 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1269 iwmmxt_load_reg(cpu_V1
, rn
);
1270 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1273 #define IWMMXT_OP(name) \
1274 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1276 iwmmxt_load_reg(cpu_V1, rn); \
1277 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1280 #define IWMMXT_OP_ENV(name) \
1281 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1283 iwmmxt_load_reg(cpu_V1, rn); \
1284 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1287 #define IWMMXT_OP_ENV_SIZE(name) \
1288 IWMMXT_OP_ENV(name##b) \
1289 IWMMXT_OP_ENV(name##w) \
1290 IWMMXT_OP_ENV(name##l)
1292 #define IWMMXT_OP_ENV1(name) \
1293 static inline void gen_op_iwmmxt_##name##_M0(void) \
1295 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1309 IWMMXT_OP_ENV_SIZE(unpackl
)
1310 IWMMXT_OP_ENV_SIZE(unpackh
)
1312 IWMMXT_OP_ENV1(unpacklub
)
1313 IWMMXT_OP_ENV1(unpackluw
)
1314 IWMMXT_OP_ENV1(unpacklul
)
1315 IWMMXT_OP_ENV1(unpackhub
)
1316 IWMMXT_OP_ENV1(unpackhuw
)
1317 IWMMXT_OP_ENV1(unpackhul
)
1318 IWMMXT_OP_ENV1(unpacklsb
)
1319 IWMMXT_OP_ENV1(unpacklsw
)
1320 IWMMXT_OP_ENV1(unpacklsl
)
1321 IWMMXT_OP_ENV1(unpackhsb
)
1322 IWMMXT_OP_ENV1(unpackhsw
)
1323 IWMMXT_OP_ENV1(unpackhsl
)
1325 IWMMXT_OP_ENV_SIZE(cmpeq
)
1326 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1327 IWMMXT_OP_ENV_SIZE(cmpgts
)
1329 IWMMXT_OP_ENV_SIZE(mins
)
1330 IWMMXT_OP_ENV_SIZE(minu
)
1331 IWMMXT_OP_ENV_SIZE(maxs
)
1332 IWMMXT_OP_ENV_SIZE(maxu
)
1334 IWMMXT_OP_ENV_SIZE(subn
)
1335 IWMMXT_OP_ENV_SIZE(addn
)
1336 IWMMXT_OP_ENV_SIZE(subu
)
1337 IWMMXT_OP_ENV_SIZE(addu
)
1338 IWMMXT_OP_ENV_SIZE(subs
)
1339 IWMMXT_OP_ENV_SIZE(adds
)
1341 IWMMXT_OP_ENV(avgb0
)
1342 IWMMXT_OP_ENV(avgb1
)
1343 IWMMXT_OP_ENV(avgw0
)
1344 IWMMXT_OP_ENV(avgw1
)
1348 IWMMXT_OP_ENV(packuw
)
1349 IWMMXT_OP_ENV(packul
)
1350 IWMMXT_OP_ENV(packuq
)
1351 IWMMXT_OP_ENV(packsw
)
1352 IWMMXT_OP_ENV(packsl
)
1353 IWMMXT_OP_ENV(packsq
)
1355 static void gen_op_iwmmxt_set_mup(void)
1358 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1359 tcg_gen_ori_i32(tmp
, tmp
, 2);
1360 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1363 static void gen_op_iwmmxt_set_cup(void)
1366 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1367 tcg_gen_ori_i32(tmp
, tmp
, 1);
1368 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1371 static void gen_op_iwmmxt_setpsr_nz(void)
1373 TCGv_i32 tmp
= tcg_temp_new_i32();
1374 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1375 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1378 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1380 iwmmxt_load_reg(cpu_V1
, rn
);
1381 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1382 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1385 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1392 rd
= (insn
>> 16) & 0xf;
1393 tmp
= load_reg(s
, rd
);
1395 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1396 if (insn
& (1 << 24)) {
1398 if (insn
& (1 << 23))
1399 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1401 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1402 tcg_gen_mov_i32(dest
, tmp
);
1403 if (insn
& (1 << 21))
1404 store_reg(s
, rd
, tmp
);
1406 tcg_temp_free_i32(tmp
);
1407 } else if (insn
& (1 << 21)) {
1409 tcg_gen_mov_i32(dest
, tmp
);
1410 if (insn
& (1 << 23))
1411 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1413 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1414 store_reg(s
, rd
, tmp
);
1415 } else if (!(insn
& (1 << 23)))
1420 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1422 int rd
= (insn
>> 0) & 0xf;
1425 if (insn
& (1 << 8)) {
1426 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1429 tmp
= iwmmxt_load_creg(rd
);
1432 tmp
= tcg_temp_new_i32();
1433 iwmmxt_load_reg(cpu_V0
, rd
);
1434 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
1436 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1437 tcg_gen_mov_i32(dest
, tmp
);
1438 tcg_temp_free_i32(tmp
);
1442 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1443 (ie. an undefined instruction). */
1444 static int disas_iwmmxt_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
1447 int rdhi
, rdlo
, rd0
, rd1
, i
;
1449 TCGv_i32 tmp
, tmp2
, tmp3
;
1451 if ((insn
& 0x0e000e00) == 0x0c000000) {
1452 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1454 rdlo
= (insn
>> 12) & 0xf;
1455 rdhi
= (insn
>> 16) & 0xf;
1456 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1457 iwmmxt_load_reg(cpu_V0
, wrd
);
1458 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1459 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1460 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1461 } else { /* TMCRR */
1462 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1463 iwmmxt_store_reg(cpu_V0
, wrd
);
1464 gen_op_iwmmxt_set_mup();
1469 wrd
= (insn
>> 12) & 0xf;
1470 addr
= tcg_temp_new_i32();
1471 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1472 tcg_temp_free_i32(addr
);
1475 if (insn
& ARM_CP_RW_BIT
) {
1476 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1477 tmp
= tcg_temp_new_i32();
1478 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
1479 iwmmxt_store_creg(wrd
, tmp
);
1482 if (insn
& (1 << 8)) {
1483 if (insn
& (1 << 22)) { /* WLDRD */
1484 gen_aa32_ld64(cpu_M0
, addr
, IS_USER(s
));
1486 } else { /* WLDRW wRd */
1487 tmp
= tcg_temp_new_i32();
1488 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
1491 tmp
= tcg_temp_new_i32();
1492 if (insn
& (1 << 22)) { /* WLDRH */
1493 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
1494 } else { /* WLDRB */
1495 gen_aa32_ld8u(tmp
, addr
, IS_USER(s
));
1499 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1500 tcg_temp_free_i32(tmp
);
1502 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1505 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1506 tmp
= iwmmxt_load_creg(wrd
);
1507 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
1509 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1510 tmp
= tcg_temp_new_i32();
1511 if (insn
& (1 << 8)) {
1512 if (insn
& (1 << 22)) { /* WSTRD */
1513 gen_aa32_st64(cpu_M0
, addr
, IS_USER(s
));
1514 } else { /* WSTRW wRd */
1515 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1516 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
1519 if (insn
& (1 << 22)) { /* WSTRH */
1520 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1521 gen_aa32_st16(tmp
, addr
, IS_USER(s
));
1522 } else { /* WSTRB */
1523 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1524 gen_aa32_st8(tmp
, addr
, IS_USER(s
));
1528 tcg_temp_free_i32(tmp
);
1530 tcg_temp_free_i32(addr
);
1534 if ((insn
& 0x0f000000) != 0x0e000000)
1537 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1538 case 0x000: /* WOR */
1539 wrd
= (insn
>> 12) & 0xf;
1540 rd0
= (insn
>> 0) & 0xf;
1541 rd1
= (insn
>> 16) & 0xf;
1542 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1543 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1544 gen_op_iwmmxt_setpsr_nz();
1545 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1546 gen_op_iwmmxt_set_mup();
1547 gen_op_iwmmxt_set_cup();
1549 case 0x011: /* TMCR */
1552 rd
= (insn
>> 12) & 0xf;
1553 wrd
= (insn
>> 16) & 0xf;
1555 case ARM_IWMMXT_wCID
:
1556 case ARM_IWMMXT_wCASF
:
1558 case ARM_IWMMXT_wCon
:
1559 gen_op_iwmmxt_set_cup();
1561 case ARM_IWMMXT_wCSSF
:
1562 tmp
= iwmmxt_load_creg(wrd
);
1563 tmp2
= load_reg(s
, rd
);
1564 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1565 tcg_temp_free_i32(tmp2
);
1566 iwmmxt_store_creg(wrd
, tmp
);
1568 case ARM_IWMMXT_wCGR0
:
1569 case ARM_IWMMXT_wCGR1
:
1570 case ARM_IWMMXT_wCGR2
:
1571 case ARM_IWMMXT_wCGR3
:
1572 gen_op_iwmmxt_set_cup();
1573 tmp
= load_reg(s
, rd
);
1574 iwmmxt_store_creg(wrd
, tmp
);
1580 case 0x100: /* WXOR */
1581 wrd
= (insn
>> 12) & 0xf;
1582 rd0
= (insn
>> 0) & 0xf;
1583 rd1
= (insn
>> 16) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1585 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1586 gen_op_iwmmxt_setpsr_nz();
1587 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1588 gen_op_iwmmxt_set_mup();
1589 gen_op_iwmmxt_set_cup();
1591 case 0x111: /* TMRC */
1594 rd
= (insn
>> 12) & 0xf;
1595 wrd
= (insn
>> 16) & 0xf;
1596 tmp
= iwmmxt_load_creg(wrd
);
1597 store_reg(s
, rd
, tmp
);
1599 case 0x300: /* WANDN */
1600 wrd
= (insn
>> 12) & 0xf;
1601 rd0
= (insn
>> 0) & 0xf;
1602 rd1
= (insn
>> 16) & 0xf;
1603 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1604 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1605 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1606 gen_op_iwmmxt_setpsr_nz();
1607 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1608 gen_op_iwmmxt_set_mup();
1609 gen_op_iwmmxt_set_cup();
1611 case 0x200: /* WAND */
1612 wrd
= (insn
>> 12) & 0xf;
1613 rd0
= (insn
>> 0) & 0xf;
1614 rd1
= (insn
>> 16) & 0xf;
1615 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1616 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1617 gen_op_iwmmxt_setpsr_nz();
1618 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1619 gen_op_iwmmxt_set_mup();
1620 gen_op_iwmmxt_set_cup();
1622 case 0x810: case 0xa10: /* WMADD */
1623 wrd
= (insn
>> 12) & 0xf;
1624 rd0
= (insn
>> 0) & 0xf;
1625 rd1
= (insn
>> 16) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1627 if (insn
& (1 << 21))
1628 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1630 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1631 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1632 gen_op_iwmmxt_set_mup();
1634 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1635 wrd
= (insn
>> 12) & 0xf;
1636 rd0
= (insn
>> 16) & 0xf;
1637 rd1
= (insn
>> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1639 switch ((insn
>> 22) & 3) {
1641 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1644 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1647 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1652 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1656 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1657 wrd
= (insn
>> 12) & 0xf;
1658 rd0
= (insn
>> 16) & 0xf;
1659 rd1
= (insn
>> 0) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1661 switch ((insn
>> 22) & 3) {
1663 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1666 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1669 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1674 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1675 gen_op_iwmmxt_set_mup();
1676 gen_op_iwmmxt_set_cup();
1678 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1679 wrd
= (insn
>> 12) & 0xf;
1680 rd0
= (insn
>> 16) & 0xf;
1681 rd1
= (insn
>> 0) & 0xf;
1682 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1683 if (insn
& (1 << 22))
1684 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1686 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1687 if (!(insn
& (1 << 20)))
1688 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1690 gen_op_iwmmxt_set_mup();
1692 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1693 wrd
= (insn
>> 12) & 0xf;
1694 rd0
= (insn
>> 16) & 0xf;
1695 rd1
= (insn
>> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1697 if (insn
& (1 << 21)) {
1698 if (insn
& (1 << 20))
1699 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1701 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1703 if (insn
& (1 << 20))
1704 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1706 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1708 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1709 gen_op_iwmmxt_set_mup();
1711 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1712 wrd
= (insn
>> 12) & 0xf;
1713 rd0
= (insn
>> 16) & 0xf;
1714 rd1
= (insn
>> 0) & 0xf;
1715 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1716 if (insn
& (1 << 21))
1717 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1719 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1720 if (!(insn
& (1 << 20))) {
1721 iwmmxt_load_reg(cpu_V1
, wrd
);
1722 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1724 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1725 gen_op_iwmmxt_set_mup();
1727 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1728 wrd
= (insn
>> 12) & 0xf;
1729 rd0
= (insn
>> 16) & 0xf;
1730 rd1
= (insn
>> 0) & 0xf;
1731 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1732 switch ((insn
>> 22) & 3) {
1734 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1737 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1740 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1745 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1746 gen_op_iwmmxt_set_mup();
1747 gen_op_iwmmxt_set_cup();
1749 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1750 wrd
= (insn
>> 12) & 0xf;
1751 rd0
= (insn
>> 16) & 0xf;
1752 rd1
= (insn
>> 0) & 0xf;
1753 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1754 if (insn
& (1 << 22)) {
1755 if (insn
& (1 << 20))
1756 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1758 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1760 if (insn
& (1 << 20))
1761 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1763 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1765 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1766 gen_op_iwmmxt_set_mup();
1767 gen_op_iwmmxt_set_cup();
1769 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1770 wrd
= (insn
>> 12) & 0xf;
1771 rd0
= (insn
>> 16) & 0xf;
1772 rd1
= (insn
>> 0) & 0xf;
1773 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1774 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1775 tcg_gen_andi_i32(tmp
, tmp
, 7);
1776 iwmmxt_load_reg(cpu_V1
, rd1
);
1777 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1778 tcg_temp_free_i32(tmp
);
1779 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1780 gen_op_iwmmxt_set_mup();
1782 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1783 if (((insn
>> 6) & 3) == 3)
1785 rd
= (insn
>> 12) & 0xf;
1786 wrd
= (insn
>> 16) & 0xf;
1787 tmp
= load_reg(s
, rd
);
1788 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1789 switch ((insn
>> 6) & 3) {
1791 tmp2
= tcg_const_i32(0xff);
1792 tmp3
= tcg_const_i32((insn
& 7) << 3);
1795 tmp2
= tcg_const_i32(0xffff);
1796 tmp3
= tcg_const_i32((insn
& 3) << 4);
1799 tmp2
= tcg_const_i32(0xffffffff);
1800 tmp3
= tcg_const_i32((insn
& 1) << 5);
1803 TCGV_UNUSED_I32(tmp2
);
1804 TCGV_UNUSED_I32(tmp3
);
1806 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1807 tcg_temp_free_i32(tmp3
);
1808 tcg_temp_free_i32(tmp2
);
1809 tcg_temp_free_i32(tmp
);
1810 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1811 gen_op_iwmmxt_set_mup();
1813 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1814 rd
= (insn
>> 12) & 0xf;
1815 wrd
= (insn
>> 16) & 0xf;
1816 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1818 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1819 tmp
= tcg_temp_new_i32();
1820 switch ((insn
>> 22) & 3) {
1822 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1823 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1825 tcg_gen_ext8s_i32(tmp
, tmp
);
1827 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1831 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1832 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1834 tcg_gen_ext16s_i32(tmp
, tmp
);
1836 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1840 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1841 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1844 store_reg(s
, rd
, tmp
);
1846 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1847 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1849 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1850 switch ((insn
>> 22) & 3) {
1852 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1855 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1858 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1861 tcg_gen_shli_i32(tmp
, tmp
, 28);
1863 tcg_temp_free_i32(tmp
);
1865 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1866 if (((insn
>> 6) & 3) == 3)
1868 rd
= (insn
>> 12) & 0xf;
1869 wrd
= (insn
>> 16) & 0xf;
1870 tmp
= load_reg(s
, rd
);
1871 switch ((insn
>> 6) & 3) {
1873 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1876 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1879 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1882 tcg_temp_free_i32(tmp
);
1883 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1884 gen_op_iwmmxt_set_mup();
1886 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1887 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1889 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1890 tmp2
= tcg_temp_new_i32();
1891 tcg_gen_mov_i32(tmp2
, tmp
);
1892 switch ((insn
>> 22) & 3) {
1894 for (i
= 0; i
< 7; i
++) {
1895 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1896 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1900 for (i
= 0; i
< 3; i
++) {
1901 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1902 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1906 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1907 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1911 tcg_temp_free_i32(tmp2
);
1912 tcg_temp_free_i32(tmp
);
1914 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1915 wrd
= (insn
>> 12) & 0xf;
1916 rd0
= (insn
>> 16) & 0xf;
1917 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1918 switch ((insn
>> 22) & 3) {
1920 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1923 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1926 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1931 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1932 gen_op_iwmmxt_set_mup();
1934 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1935 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1937 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1938 tmp2
= tcg_temp_new_i32();
1939 tcg_gen_mov_i32(tmp2
, tmp
);
1940 switch ((insn
>> 22) & 3) {
1942 for (i
= 0; i
< 7; i
++) {
1943 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1944 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1948 for (i
= 0; i
< 3; i
++) {
1949 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1950 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1954 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1955 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1959 tcg_temp_free_i32(tmp2
);
1960 tcg_temp_free_i32(tmp
);
1962 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1963 rd
= (insn
>> 12) & 0xf;
1964 rd0
= (insn
>> 16) & 0xf;
1965 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
1967 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1968 tmp
= tcg_temp_new_i32();
1969 switch ((insn
>> 22) & 3) {
1971 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
1974 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
1977 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
1980 store_reg(s
, rd
, tmp
);
1982 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1983 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1984 wrd
= (insn
>> 12) & 0xf;
1985 rd0
= (insn
>> 16) & 0xf;
1986 rd1
= (insn
>> 0) & 0xf;
1987 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1988 switch ((insn
>> 22) & 3) {
1990 if (insn
& (1 << 21))
1991 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1993 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1996 if (insn
& (1 << 21))
1997 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1999 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2002 if (insn
& (1 << 21))
2003 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2005 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2010 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2011 gen_op_iwmmxt_set_mup();
2012 gen_op_iwmmxt_set_cup();
2014 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2015 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2016 wrd
= (insn
>> 12) & 0xf;
2017 rd0
= (insn
>> 16) & 0xf;
2018 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2019 switch ((insn
>> 22) & 3) {
2021 if (insn
& (1 << 21))
2022 gen_op_iwmmxt_unpacklsb_M0();
2024 gen_op_iwmmxt_unpacklub_M0();
2027 if (insn
& (1 << 21))
2028 gen_op_iwmmxt_unpacklsw_M0();
2030 gen_op_iwmmxt_unpackluw_M0();
2033 if (insn
& (1 << 21))
2034 gen_op_iwmmxt_unpacklsl_M0();
2036 gen_op_iwmmxt_unpacklul_M0();
2041 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2042 gen_op_iwmmxt_set_mup();
2043 gen_op_iwmmxt_set_cup();
2045 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2046 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2047 wrd
= (insn
>> 12) & 0xf;
2048 rd0
= (insn
>> 16) & 0xf;
2049 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2050 switch ((insn
>> 22) & 3) {
2052 if (insn
& (1 << 21))
2053 gen_op_iwmmxt_unpackhsb_M0();
2055 gen_op_iwmmxt_unpackhub_M0();
2058 if (insn
& (1 << 21))
2059 gen_op_iwmmxt_unpackhsw_M0();
2061 gen_op_iwmmxt_unpackhuw_M0();
2064 if (insn
& (1 << 21))
2065 gen_op_iwmmxt_unpackhsl_M0();
2067 gen_op_iwmmxt_unpackhul_M0();
2072 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2073 gen_op_iwmmxt_set_mup();
2074 gen_op_iwmmxt_set_cup();
2076 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2077 case 0x214: case 0x614: case 0xa14: case 0xe14:
2078 if (((insn
>> 22) & 3) == 0)
2080 wrd
= (insn
>> 12) & 0xf;
2081 rd0
= (insn
>> 16) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2083 tmp
= tcg_temp_new_i32();
2084 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2085 tcg_temp_free_i32(tmp
);
2088 switch ((insn
>> 22) & 3) {
2090 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2093 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2096 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2099 tcg_temp_free_i32(tmp
);
2100 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2101 gen_op_iwmmxt_set_mup();
2102 gen_op_iwmmxt_set_cup();
2104 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2105 case 0x014: case 0x414: case 0x814: case 0xc14:
2106 if (((insn
>> 22) & 3) == 0)
2108 wrd
= (insn
>> 12) & 0xf;
2109 rd0
= (insn
>> 16) & 0xf;
2110 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2111 tmp
= tcg_temp_new_i32();
2112 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2113 tcg_temp_free_i32(tmp
);
2116 switch ((insn
>> 22) & 3) {
2118 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2121 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2124 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2127 tcg_temp_free_i32(tmp
);
2128 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2129 gen_op_iwmmxt_set_mup();
2130 gen_op_iwmmxt_set_cup();
2132 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2133 case 0x114: case 0x514: case 0x914: case 0xd14:
2134 if (((insn
>> 22) & 3) == 0)
2136 wrd
= (insn
>> 12) & 0xf;
2137 rd0
= (insn
>> 16) & 0xf;
2138 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2139 tmp
= tcg_temp_new_i32();
2140 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2141 tcg_temp_free_i32(tmp
);
2144 switch ((insn
>> 22) & 3) {
2146 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2149 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2152 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2155 tcg_temp_free_i32(tmp
);
2156 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2157 gen_op_iwmmxt_set_mup();
2158 gen_op_iwmmxt_set_cup();
2160 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2161 case 0x314: case 0x714: case 0xb14: case 0xf14:
2162 if (((insn
>> 22) & 3) == 0)
2164 wrd
= (insn
>> 12) & 0xf;
2165 rd0
= (insn
>> 16) & 0xf;
2166 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2167 tmp
= tcg_temp_new_i32();
2168 switch ((insn
>> 22) & 3) {
2170 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2171 tcg_temp_free_i32(tmp
);
2174 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2177 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2178 tcg_temp_free_i32(tmp
);
2181 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2184 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2185 tcg_temp_free_i32(tmp
);
2188 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2191 tcg_temp_free_i32(tmp
);
2192 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2193 gen_op_iwmmxt_set_mup();
2194 gen_op_iwmmxt_set_cup();
2196 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2197 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2198 wrd
= (insn
>> 12) & 0xf;
2199 rd0
= (insn
>> 16) & 0xf;
2200 rd1
= (insn
>> 0) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2202 switch ((insn
>> 22) & 3) {
2204 if (insn
& (1 << 21))
2205 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2207 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2210 if (insn
& (1 << 21))
2211 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2213 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2216 if (insn
& (1 << 21))
2217 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2219 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2224 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2225 gen_op_iwmmxt_set_mup();
2227 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2228 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2229 wrd
= (insn
>> 12) & 0xf;
2230 rd0
= (insn
>> 16) & 0xf;
2231 rd1
= (insn
>> 0) & 0xf;
2232 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2233 switch ((insn
>> 22) & 3) {
2235 if (insn
& (1 << 21))
2236 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2238 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2241 if (insn
& (1 << 21))
2242 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2244 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2247 if (insn
& (1 << 21))
2248 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2250 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2255 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2256 gen_op_iwmmxt_set_mup();
2258 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2259 case 0x402: case 0x502: case 0x602: case 0x702:
2260 wrd
= (insn
>> 12) & 0xf;
2261 rd0
= (insn
>> 16) & 0xf;
2262 rd1
= (insn
>> 0) & 0xf;
2263 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2264 tmp
= tcg_const_i32((insn
>> 20) & 3);
2265 iwmmxt_load_reg(cpu_V1
, rd1
);
2266 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2267 tcg_temp_free_i32(tmp
);
2268 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2269 gen_op_iwmmxt_set_mup();
2271 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2272 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2273 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2274 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2275 wrd
= (insn
>> 12) & 0xf;
2276 rd0
= (insn
>> 16) & 0xf;
2277 rd1
= (insn
>> 0) & 0xf;
2278 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2279 switch ((insn
>> 20) & 0xf) {
2281 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2284 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2287 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2290 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2293 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2296 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2299 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2302 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2305 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2310 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2311 gen_op_iwmmxt_set_mup();
2312 gen_op_iwmmxt_set_cup();
2314 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2315 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2316 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2317 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2318 wrd
= (insn
>> 12) & 0xf;
2319 rd0
= (insn
>> 16) & 0xf;
2320 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2321 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2322 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2323 tcg_temp_free_i32(tmp
);
2324 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2328 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2329 case 0x418: case 0x518: case 0x618: case 0x718:
2330 case 0x818: case 0x918: case 0xa18: case 0xb18:
2331 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2332 wrd
= (insn
>> 12) & 0xf;
2333 rd0
= (insn
>> 16) & 0xf;
2334 rd1
= (insn
>> 0) & 0xf;
2335 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2336 switch ((insn
>> 20) & 0xf) {
2338 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2341 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2344 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2347 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2350 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2353 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2356 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2359 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2362 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2367 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2368 gen_op_iwmmxt_set_mup();
2369 gen_op_iwmmxt_set_cup();
2371 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2372 case 0x408: case 0x508: case 0x608: case 0x708:
2373 case 0x808: case 0x908: case 0xa08: case 0xb08:
2374 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2375 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2377 wrd
= (insn
>> 12) & 0xf;
2378 rd0
= (insn
>> 16) & 0xf;
2379 rd1
= (insn
>> 0) & 0xf;
2380 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2381 switch ((insn
>> 22) & 3) {
2383 if (insn
& (1 << 21))
2384 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2386 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2389 if (insn
& (1 << 21))
2390 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2392 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2395 if (insn
& (1 << 21))
2396 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2398 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2401 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2402 gen_op_iwmmxt_set_mup();
2403 gen_op_iwmmxt_set_cup();
2405 case 0x201: case 0x203: case 0x205: case 0x207:
2406 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2407 case 0x211: case 0x213: case 0x215: case 0x217:
2408 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2409 wrd
= (insn
>> 5) & 0xf;
2410 rd0
= (insn
>> 12) & 0xf;
2411 rd1
= (insn
>> 0) & 0xf;
2412 if (rd0
== 0xf || rd1
== 0xf)
2414 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2415 tmp
= load_reg(s
, rd0
);
2416 tmp2
= load_reg(s
, rd1
);
2417 switch ((insn
>> 16) & 0xf) {
2418 case 0x0: /* TMIA */
2419 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2421 case 0x8: /* TMIAPH */
2422 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2424 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2425 if (insn
& (1 << 16))
2426 tcg_gen_shri_i32(tmp
, tmp
, 16);
2427 if (insn
& (1 << 17))
2428 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2429 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2432 tcg_temp_free_i32(tmp2
);
2433 tcg_temp_free_i32(tmp
);
2436 tcg_temp_free_i32(tmp2
);
2437 tcg_temp_free_i32(tmp
);
2438 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2439 gen_op_iwmmxt_set_mup();
2448 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2449 (ie. an undefined instruction). */
2450 static int disas_dsp_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2452 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2455 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2456 /* Multiply with Internal Accumulate Format */
2457 rd0
= (insn
>> 12) & 0xf;
2459 acc
= (insn
>> 5) & 7;
2464 tmp
= load_reg(s
, rd0
);
2465 tmp2
= load_reg(s
, rd1
);
2466 switch ((insn
>> 16) & 0xf) {
2468 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2470 case 0x8: /* MIAPH */
2471 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2473 case 0xc: /* MIABB */
2474 case 0xd: /* MIABT */
2475 case 0xe: /* MIATB */
2476 case 0xf: /* MIATT */
2477 if (insn
& (1 << 16))
2478 tcg_gen_shri_i32(tmp
, tmp
, 16);
2479 if (insn
& (1 << 17))
2480 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2481 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2486 tcg_temp_free_i32(tmp2
);
2487 tcg_temp_free_i32(tmp
);
2489 gen_op_iwmmxt_movq_wRn_M0(acc
);
2493 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2494 /* Internal Accumulator Access Format */
2495 rdhi
= (insn
>> 16) & 0xf;
2496 rdlo
= (insn
>> 12) & 0xf;
2502 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2503 iwmmxt_load_reg(cpu_V0
, acc
);
2504 tcg_gen_trunc_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2505 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2506 tcg_gen_trunc_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2507 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2509 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2510 iwmmxt_store_reg(cpu_V0
, acc
);
2518 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2519 #define VFP_SREG(insn, bigbit, smallbit) \
2520 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2521 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2522 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2523 reg = (((insn) >> (bigbit)) & 0x0f) \
2524 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2526 if (insn & (1 << (smallbit))) \
2528 reg = ((insn) >> (bigbit)) & 0x0f; \
2531 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2532 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2533 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2534 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2535 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2536 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2538 /* Move between integer and VFP cores. */
2539 static TCGv_i32
gen_vfp_mrs(void)
2541 TCGv_i32 tmp
= tcg_temp_new_i32();
2542 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2546 static void gen_vfp_msr(TCGv_i32 tmp
)
2548 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2549 tcg_temp_free_i32(tmp
);
2552 static void gen_neon_dup_u8(TCGv_i32 var
, int shift
)
2554 TCGv_i32 tmp
= tcg_temp_new_i32();
2556 tcg_gen_shri_i32(var
, var
, shift
);
2557 tcg_gen_ext8u_i32(var
, var
);
2558 tcg_gen_shli_i32(tmp
, var
, 8);
2559 tcg_gen_or_i32(var
, var
, tmp
);
2560 tcg_gen_shli_i32(tmp
, var
, 16);
2561 tcg_gen_or_i32(var
, var
, tmp
);
2562 tcg_temp_free_i32(tmp
);
2565 static void gen_neon_dup_low16(TCGv_i32 var
)
2567 TCGv_i32 tmp
= tcg_temp_new_i32();
2568 tcg_gen_ext16u_i32(var
, var
);
2569 tcg_gen_shli_i32(tmp
, var
, 16);
2570 tcg_gen_or_i32(var
, var
, tmp
);
2571 tcg_temp_free_i32(tmp
);
2574 static void gen_neon_dup_high16(TCGv_i32 var
)
2576 TCGv_i32 tmp
= tcg_temp_new_i32();
2577 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2578 tcg_gen_shri_i32(tmp
, var
, 16);
2579 tcg_gen_or_i32(var
, var
, tmp
);
2580 tcg_temp_free_i32(tmp
);
2583 static TCGv_i32
gen_load_and_replicate(DisasContext
*s
, TCGv_i32 addr
, int size
)
2585 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2586 TCGv_i32 tmp
= tcg_temp_new_i32();
2589 gen_aa32_ld8u(tmp
, addr
, IS_USER(s
));
2590 gen_neon_dup_u8(tmp
, 0);
2593 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
2594 gen_neon_dup_low16(tmp
);
2597 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
2599 default: /* Avoid compiler warnings. */
2605 static int handle_vsel(uint32_t insn
, uint32_t rd
, uint32_t rn
, uint32_t rm
,
2608 uint32_t cc
= extract32(insn
, 20, 2);
2611 TCGv_i64 frn
, frm
, dest
;
2612 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
2614 zero
= tcg_const_i64(0);
2616 frn
= tcg_temp_new_i64();
2617 frm
= tcg_temp_new_i64();
2618 dest
= tcg_temp_new_i64();
2620 zf
= tcg_temp_new_i64();
2621 nf
= tcg_temp_new_i64();
2622 vf
= tcg_temp_new_i64();
2624 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
2625 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
2626 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
2628 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2629 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2632 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
2636 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
2639 case 2: /* ge: N == V -> N ^ V == 0 */
2640 tmp
= tcg_temp_new_i64();
2641 tcg_gen_xor_i64(tmp
, vf
, nf
);
2642 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2644 tcg_temp_free_i64(tmp
);
2646 case 3: /* gt: !Z && N == V */
2647 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
2649 tmp
= tcg_temp_new_i64();
2650 tcg_gen_xor_i64(tmp
, vf
, nf
);
2651 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2653 tcg_temp_free_i64(tmp
);
2656 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2657 tcg_temp_free_i64(frn
);
2658 tcg_temp_free_i64(frm
);
2659 tcg_temp_free_i64(dest
);
2661 tcg_temp_free_i64(zf
);
2662 tcg_temp_free_i64(nf
);
2663 tcg_temp_free_i64(vf
);
2665 tcg_temp_free_i64(zero
);
2667 TCGv_i32 frn
, frm
, dest
;
2670 zero
= tcg_const_i32(0);
2672 frn
= tcg_temp_new_i32();
2673 frm
= tcg_temp_new_i32();
2674 dest
= tcg_temp_new_i32();
2675 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2676 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2679 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
2683 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
2686 case 2: /* ge: N == V -> N ^ V == 0 */
2687 tmp
= tcg_temp_new_i32();
2688 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2689 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2691 tcg_temp_free_i32(tmp
);
2693 case 3: /* gt: !Z && N == V */
2694 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
2696 tmp
= tcg_temp_new_i32();
2697 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2698 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2700 tcg_temp_free_i32(tmp
);
2703 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2704 tcg_temp_free_i32(frn
);
2705 tcg_temp_free_i32(frm
);
2706 tcg_temp_free_i32(dest
);
2708 tcg_temp_free_i32(zero
);
2714 static int handle_vminmaxnm(uint32_t insn
, uint32_t rd
, uint32_t rn
,
2715 uint32_t rm
, uint32_t dp
)
2717 uint32_t vmin
= extract32(insn
, 6, 1);
2718 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2721 TCGv_i64 frn
, frm
, dest
;
2723 frn
= tcg_temp_new_i64();
2724 frm
= tcg_temp_new_i64();
2725 dest
= tcg_temp_new_i64();
2727 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2728 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2730 gen_helper_vfp_minnumd(dest
, frn
, frm
, fpst
);
2732 gen_helper_vfp_maxnumd(dest
, frn
, frm
, fpst
);
2734 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2735 tcg_temp_free_i64(frn
);
2736 tcg_temp_free_i64(frm
);
2737 tcg_temp_free_i64(dest
);
2739 TCGv_i32 frn
, frm
, dest
;
2741 frn
= tcg_temp_new_i32();
2742 frm
= tcg_temp_new_i32();
2743 dest
= tcg_temp_new_i32();
2745 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2746 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2748 gen_helper_vfp_minnums(dest
, frn
, frm
, fpst
);
2750 gen_helper_vfp_maxnums(dest
, frn
, frm
, fpst
);
2752 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2753 tcg_temp_free_i32(frn
);
2754 tcg_temp_free_i32(frm
);
2755 tcg_temp_free_i32(dest
);
2758 tcg_temp_free_ptr(fpst
);
2762 static int handle_vrint(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
2765 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2768 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
2769 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2774 tcg_op
= tcg_temp_new_i64();
2775 tcg_res
= tcg_temp_new_i64();
2776 tcg_gen_ld_f64(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
2777 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
2778 tcg_gen_st_f64(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
2779 tcg_temp_free_i64(tcg_op
);
2780 tcg_temp_free_i64(tcg_res
);
2784 tcg_op
= tcg_temp_new_i32();
2785 tcg_res
= tcg_temp_new_i32();
2786 tcg_gen_ld_f32(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
2787 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
2788 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
2789 tcg_temp_free_i32(tcg_op
);
2790 tcg_temp_free_i32(tcg_res
);
2793 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2794 tcg_temp_free_i32(tcg_rmode
);
2796 tcg_temp_free_ptr(fpst
);
2800 static int handle_vcvt(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
2803 bool is_signed
= extract32(insn
, 7, 1);
2804 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2805 TCGv_i32 tcg_rmode
, tcg_shift
;
2807 tcg_shift
= tcg_const_i32(0);
2809 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
2810 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2813 TCGv_i64 tcg_double
, tcg_res
;
2815 /* Rd is encoded as a single precision register even when the source
2816 * is double precision.
2818 rd
= ((rd
<< 1) & 0x1e) | ((rd
>> 4) & 0x1);
2819 tcg_double
= tcg_temp_new_i64();
2820 tcg_res
= tcg_temp_new_i64();
2821 tcg_tmp
= tcg_temp_new_i32();
2822 tcg_gen_ld_f64(tcg_double
, cpu_env
, vfp_reg_offset(1, rm
));
2824 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
2826 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
2828 tcg_gen_trunc_i64_i32(tcg_tmp
, tcg_res
);
2829 tcg_gen_st_f32(tcg_tmp
, cpu_env
, vfp_reg_offset(0, rd
));
2830 tcg_temp_free_i32(tcg_tmp
);
2831 tcg_temp_free_i64(tcg_res
);
2832 tcg_temp_free_i64(tcg_double
);
2834 TCGv_i32 tcg_single
, tcg_res
;
2835 tcg_single
= tcg_temp_new_i32();
2836 tcg_res
= tcg_temp_new_i32();
2837 tcg_gen_ld_f32(tcg_single
, cpu_env
, vfp_reg_offset(0, rm
));
2839 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
2841 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
2843 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(0, rd
));
2844 tcg_temp_free_i32(tcg_res
);
2845 tcg_temp_free_i32(tcg_single
);
2848 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2849 tcg_temp_free_i32(tcg_rmode
);
2851 tcg_temp_free_i32(tcg_shift
);
2853 tcg_temp_free_ptr(fpst
);
2858 /* Table for converting the most common AArch32 encoding of
2859 * rounding mode to arm_fprounding order (which matches the
2860 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2862 static const uint8_t fp_decode_rm
[] = {
2869 static int disas_vfp_v8_insn(CPUARMState
*env
, DisasContext
*s
, uint32_t insn
)
2871 uint32_t rd
, rn
, rm
, dp
= extract32(insn
, 8, 1);
2873 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2878 VFP_DREG_D(rd
, insn
);
2879 VFP_DREG_N(rn
, insn
);
2880 VFP_DREG_M(rm
, insn
);
2882 rd
= VFP_SREG_D(insn
);
2883 rn
= VFP_SREG_N(insn
);
2884 rm
= VFP_SREG_M(insn
);
2887 if ((insn
& 0x0f800e50) == 0x0e000a00) {
2888 return handle_vsel(insn
, rd
, rn
, rm
, dp
);
2889 } else if ((insn
& 0x0fb00e10) == 0x0e800a00) {
2890 return handle_vminmaxnm(insn
, rd
, rn
, rm
, dp
);
2891 } else if ((insn
& 0x0fbc0ed0) == 0x0eb80a40) {
2892 /* VRINTA, VRINTN, VRINTP, VRINTM */
2893 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
2894 return handle_vrint(insn
, rd
, rm
, dp
, rounding
);
2895 } else if ((insn
& 0x0fbc0e50) == 0x0ebc0a40) {
2896 /* VCVTA, VCVTN, VCVTP, VCVTM */
2897 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
2898 return handle_vcvt(insn
, rd
, rm
, dp
, rounding
);
2903 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2904 (ie. an undefined instruction). */
2905 static int disas_vfp_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
2907 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2913 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2916 if (!s
->vfp_enabled
) {
2917 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2918 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2920 rn
= (insn
>> 16) & 0xf;
2921 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2922 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2926 if (extract32(insn
, 28, 4) == 0xf) {
2927 /* Encodings with T=1 (Thumb) or unconditional (ARM):
2928 * only used in v8 and above.
2930 return disas_vfp_v8_insn(env
, s
, insn
);
2933 dp
= ((insn
& 0xf00) == 0xb00);
2934 switch ((insn
>> 24) & 0xf) {
2936 if (insn
& (1 << 4)) {
2937 /* single register transfer */
2938 rd
= (insn
>> 12) & 0xf;
2943 VFP_DREG_N(rn
, insn
);
2946 if (insn
& 0x00c00060
2947 && !arm_feature(env
, ARM_FEATURE_NEON
))
2950 pass
= (insn
>> 21) & 1;
2951 if (insn
& (1 << 22)) {
2953 offset
= ((insn
>> 5) & 3) * 8;
2954 } else if (insn
& (1 << 5)) {
2956 offset
= (insn
& (1 << 6)) ? 16 : 0;
2961 if (insn
& ARM_CP_RW_BIT
) {
2963 tmp
= neon_load_reg(rn
, pass
);
2967 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2968 if (insn
& (1 << 23))
2974 if (insn
& (1 << 23)) {
2976 tcg_gen_shri_i32(tmp
, tmp
, 16);
2982 tcg_gen_sari_i32(tmp
, tmp
, 16);
2991 store_reg(s
, rd
, tmp
);
2994 tmp
= load_reg(s
, rd
);
2995 if (insn
& (1 << 23)) {
2998 gen_neon_dup_u8(tmp
, 0);
2999 } else if (size
== 1) {
3000 gen_neon_dup_low16(tmp
);
3002 for (n
= 0; n
<= pass
* 2; n
++) {
3003 tmp2
= tcg_temp_new_i32();
3004 tcg_gen_mov_i32(tmp2
, tmp
);
3005 neon_store_reg(rn
, n
, tmp2
);
3007 neon_store_reg(rn
, n
, tmp
);
3012 tmp2
= neon_load_reg(rn
, pass
);
3013 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
3014 tcg_temp_free_i32(tmp2
);
3017 tmp2
= neon_load_reg(rn
, pass
);
3018 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
3019 tcg_temp_free_i32(tmp2
);
3024 neon_store_reg(rn
, pass
, tmp
);
3028 if ((insn
& 0x6f) != 0x00)
3030 rn
= VFP_SREG_N(insn
);
3031 if (insn
& ARM_CP_RW_BIT
) {
3033 if (insn
& (1 << 21)) {
3034 /* system register */
3039 /* VFP2 allows access to FSID from userspace.
3040 VFP3 restricts all id registers to privileged
3043 && arm_feature(env
, ARM_FEATURE_VFP3
))
3045 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3050 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3052 case ARM_VFP_FPINST
:
3053 case ARM_VFP_FPINST2
:
3054 /* Not present in VFP3. */
3056 || arm_feature(env
, ARM_FEATURE_VFP3
))
3058 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3062 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
3063 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
3065 tmp
= tcg_temp_new_i32();
3066 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
3072 || !arm_feature(env
, ARM_FEATURE_MVFR
))
3074 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3080 gen_mov_F0_vreg(0, rn
);
3081 tmp
= gen_vfp_mrs();
3084 /* Set the 4 flag bits in the CPSR. */
3086 tcg_temp_free_i32(tmp
);
3088 store_reg(s
, rd
, tmp
);
3092 if (insn
& (1 << 21)) {
3094 /* system register */
3099 /* Writes are ignored. */
3102 tmp
= load_reg(s
, rd
);
3103 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
3104 tcg_temp_free_i32(tmp
);
3110 /* TODO: VFP subarchitecture support.
3111 * For now, keep the EN bit only */
3112 tmp
= load_reg(s
, rd
);
3113 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
3114 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3117 case ARM_VFP_FPINST
:
3118 case ARM_VFP_FPINST2
:
3119 tmp
= load_reg(s
, rd
);
3120 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3126 tmp
= load_reg(s
, rd
);
3128 gen_mov_vreg_F0(0, rn
);
3133 /* data processing */
3134 /* The opcode is in bits 23, 21, 20 and 6. */
3135 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
3139 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
3141 /* rn is register number */
3142 VFP_DREG_N(rn
, insn
);
3145 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18) ||
3146 ((rn
& 0x1e) == 0x6))) {
3147 /* Integer or single/half precision destination. */
3148 rd
= VFP_SREG_D(insn
);
3150 VFP_DREG_D(rd
, insn
);
3153 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14) ||
3154 ((rn
& 0x1e) == 0x4))) {
3155 /* VCVT from int or half precision is always from S reg
3156 * regardless of dp bit. VCVT with immediate frac_bits
3157 * has same format as SREG_M.
3159 rm
= VFP_SREG_M(insn
);
3161 VFP_DREG_M(rm
, insn
);
3164 rn
= VFP_SREG_N(insn
);
3165 if (op
== 15 && rn
== 15) {
3166 /* Double precision destination. */
3167 VFP_DREG_D(rd
, insn
);
3169 rd
= VFP_SREG_D(insn
);
3171 /* NB that we implicitly rely on the encoding for the frac_bits
3172 * in VCVT of fixed to float being the same as that of an SREG_M
3174 rm
= VFP_SREG_M(insn
);
3177 veclen
= s
->vec_len
;
3178 if (op
== 15 && rn
> 3)
3181 /* Shut up compiler warnings. */
3192 /* Figure out what type of vector operation this is. */
3193 if ((rd
& bank_mask
) == 0) {
3198 delta_d
= (s
->vec_stride
>> 1) + 1;
3200 delta_d
= s
->vec_stride
+ 1;
3202 if ((rm
& bank_mask
) == 0) {
3203 /* mixed scalar/vector */
3212 /* Load the initial operands. */
3217 /* Integer source */
3218 gen_mov_F0_vreg(0, rm
);
3223 gen_mov_F0_vreg(dp
, rd
);
3224 gen_mov_F1_vreg(dp
, rm
);
3228 /* Compare with zero */
3229 gen_mov_F0_vreg(dp
, rd
);
3240 /* Source and destination the same. */
3241 gen_mov_F0_vreg(dp
, rd
);
3247 /* VCVTB, VCVTT: only present with the halfprec extension
3248 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3249 * (we choose to UNDEF)
3251 if ((dp
&& !arm_feature(env
, ARM_FEATURE_V8
)) ||
3252 !arm_feature(env
, ARM_FEATURE_VFP_FP16
)) {
3255 if (!extract32(rn
, 1, 1)) {
3256 /* Half precision source. */
3257 gen_mov_F0_vreg(0, rm
);
3260 /* Otherwise fall through */
3262 /* One source operand. */
3263 gen_mov_F0_vreg(dp
, rm
);
3267 /* Two source operands. */
3268 gen_mov_F0_vreg(dp
, rn
);
3269 gen_mov_F1_vreg(dp
, rm
);
3273 /* Perform the calculation. */
3275 case 0: /* VMLA: fd + (fn * fm) */
3276 /* Note that order of inputs to the add matters for NaNs */
3278 gen_mov_F0_vreg(dp
, rd
);
3281 case 1: /* VMLS: fd + -(fn * fm) */
3284 gen_mov_F0_vreg(dp
, rd
);
3287 case 2: /* VNMLS: -fd + (fn * fm) */
3288 /* Note that it isn't valid to replace (-A + B) with (B - A)
3289 * or similar plausible looking simplifications
3290 * because this will give wrong results for NaNs.
3293 gen_mov_F0_vreg(dp
, rd
);
3297 case 3: /* VNMLA: -fd + -(fn * fm) */
3300 gen_mov_F0_vreg(dp
, rd
);
3304 case 4: /* mul: fn * fm */
3307 case 5: /* nmul: -(fn * fm) */
3311 case 6: /* add: fn + fm */
3314 case 7: /* sub: fn - fm */
3317 case 8: /* div: fn / fm */
3320 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3321 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3322 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3323 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3324 /* These are fused multiply-add, and must be done as one
3325 * floating point operation with no rounding between the
3326 * multiplication and addition steps.
3327 * NB that doing the negations here as separate steps is
3328 * correct : an input NaN should come out with its sign bit
3329 * flipped if it is a negated-input.
3331 if (!arm_feature(env
, ARM_FEATURE_VFP4
)) {
3339 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
3341 frd
= tcg_temp_new_i64();
3342 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3345 gen_helper_vfp_negd(frd
, frd
);
3347 fpst
= get_fpstatus_ptr(0);
3348 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
3349 cpu_F1d
, frd
, fpst
);
3350 tcg_temp_free_ptr(fpst
);
3351 tcg_temp_free_i64(frd
);
3357 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
3359 frd
= tcg_temp_new_i32();
3360 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3362 gen_helper_vfp_negs(frd
, frd
);
3364 fpst
= get_fpstatus_ptr(0);
3365 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
3366 cpu_F1s
, frd
, fpst
);
3367 tcg_temp_free_ptr(fpst
);
3368 tcg_temp_free_i32(frd
);
3371 case 14: /* fconst */
3372 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3375 n
= (insn
<< 12) & 0x80000000;
3376 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3383 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3390 tcg_gen_movi_i32(cpu_F0s
, n
);
3393 case 15: /* extension space */
3407 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3408 tmp
= gen_vfp_mrs();
3409 tcg_gen_ext16u_i32(tmp
, tmp
);
3411 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3414 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3417 tcg_temp_free_i32(tmp
);
3419 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3420 tmp
= gen_vfp_mrs();
3421 tcg_gen_shri_i32(tmp
, tmp
, 16);
3423 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3426 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3429 tcg_temp_free_i32(tmp
);
3431 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3432 tmp
= tcg_temp_new_i32();
3434 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3437 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3440 gen_mov_F0_vreg(0, rd
);
3441 tmp2
= gen_vfp_mrs();
3442 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3443 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3444 tcg_temp_free_i32(tmp2
);
3447 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3448 tmp
= tcg_temp_new_i32();
3450 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3453 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3456 tcg_gen_shli_i32(tmp
, tmp
, 16);
3457 gen_mov_F0_vreg(0, rd
);
3458 tmp2
= gen_vfp_mrs();
3459 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3460 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3461 tcg_temp_free_i32(tmp2
);
3473 case 11: /* cmpez */
3477 case 12: /* vrintr */
3479 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3481 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
3483 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
3485 tcg_temp_free_ptr(fpst
);
3488 case 13: /* vrintz */
3490 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3492 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3493 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3495 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
3497 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
3499 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3500 tcg_temp_free_i32(tcg_rmode
);
3501 tcg_temp_free_ptr(fpst
);
3504 case 14: /* vrintx */
3506 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3508 gen_helper_rintd_exact(cpu_F0d
, cpu_F0d
, fpst
);
3510 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpst
);
3512 tcg_temp_free_ptr(fpst
);
3515 case 15: /* single<->double conversion */
3517 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3519 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3521 case 16: /* fuito */
3522 gen_vfp_uito(dp
, 0);
3524 case 17: /* fsito */
3525 gen_vfp_sito(dp
, 0);
3527 case 20: /* fshto */
3528 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3530 gen_vfp_shto(dp
, 16 - rm
, 0);
3532 case 21: /* fslto */
3533 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3535 gen_vfp_slto(dp
, 32 - rm
, 0);
3537 case 22: /* fuhto */
3538 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3540 gen_vfp_uhto(dp
, 16 - rm
, 0);
3542 case 23: /* fulto */
3543 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3545 gen_vfp_ulto(dp
, 32 - rm
, 0);
3547 case 24: /* ftoui */
3548 gen_vfp_toui(dp
, 0);
3550 case 25: /* ftouiz */
3551 gen_vfp_touiz(dp
, 0);
3553 case 26: /* ftosi */
3554 gen_vfp_tosi(dp
, 0);
3556 case 27: /* ftosiz */
3557 gen_vfp_tosiz(dp
, 0);
3559 case 28: /* ftosh */
3560 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3562 gen_vfp_tosh(dp
, 16 - rm
, 0);
3564 case 29: /* ftosl */
3565 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3567 gen_vfp_tosl(dp
, 32 - rm
, 0);
3569 case 30: /* ftouh */
3570 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3572 gen_vfp_touh(dp
, 16 - rm
, 0);
3574 case 31: /* ftoul */
3575 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3577 gen_vfp_toul(dp
, 32 - rm
, 0);
3579 default: /* undefined */
3583 default: /* undefined */
3587 /* Write back the result. */
3588 if (op
== 15 && (rn
>= 8 && rn
<= 11)) {
3589 /* Comparison, do nothing. */
3590 } else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18 ||
3591 (rn
& 0x1e) == 0x6)) {
3592 /* VCVT double to int: always integer result.
3593 * VCVT double to half precision is always a single
3596 gen_mov_vreg_F0(0, rd
);
3597 } else if (op
== 15 && rn
== 15) {
3599 gen_mov_vreg_F0(!dp
, rd
);
3601 gen_mov_vreg_F0(dp
, rd
);
3604 /* break out of the loop if we have finished */
3608 if (op
== 15 && delta_m
== 0) {
3609 /* single source one-many */
3611 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3613 gen_mov_vreg_F0(dp
, rd
);
3617 /* Setup the next operands. */
3619 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3623 /* One source operand. */
3624 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3626 gen_mov_F0_vreg(dp
, rm
);
3628 /* Two source operands. */
3629 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3631 gen_mov_F0_vreg(dp
, rn
);
3633 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3635 gen_mov_F1_vreg(dp
, rm
);
3643 if ((insn
& 0x03e00000) == 0x00400000) {
3644 /* two-register transfer */
3645 rn
= (insn
>> 16) & 0xf;
3646 rd
= (insn
>> 12) & 0xf;
3648 VFP_DREG_M(rm
, insn
);
3650 rm
= VFP_SREG_M(insn
);
3653 if (insn
& ARM_CP_RW_BIT
) {
3656 gen_mov_F0_vreg(0, rm
* 2);
3657 tmp
= gen_vfp_mrs();
3658 store_reg(s
, rd
, tmp
);
3659 gen_mov_F0_vreg(0, rm
* 2 + 1);
3660 tmp
= gen_vfp_mrs();
3661 store_reg(s
, rn
, tmp
);
3663 gen_mov_F0_vreg(0, rm
);
3664 tmp
= gen_vfp_mrs();
3665 store_reg(s
, rd
, tmp
);
3666 gen_mov_F0_vreg(0, rm
+ 1);
3667 tmp
= gen_vfp_mrs();
3668 store_reg(s
, rn
, tmp
);
3673 tmp
= load_reg(s
, rd
);
3675 gen_mov_vreg_F0(0, rm
* 2);
3676 tmp
= load_reg(s
, rn
);
3678 gen_mov_vreg_F0(0, rm
* 2 + 1);
3680 tmp
= load_reg(s
, rd
);
3682 gen_mov_vreg_F0(0, rm
);
3683 tmp
= load_reg(s
, rn
);
3685 gen_mov_vreg_F0(0, rm
+ 1);
3690 rn
= (insn
>> 16) & 0xf;
3692 VFP_DREG_D(rd
, insn
);
3694 rd
= VFP_SREG_D(insn
);
3695 if ((insn
& 0x01200000) == 0x01000000) {
3696 /* Single load/store */
3697 offset
= (insn
& 0xff) << 2;
3698 if ((insn
& (1 << 23)) == 0)
3700 if (s
->thumb
&& rn
== 15) {
3701 /* This is actually UNPREDICTABLE */
3702 addr
= tcg_temp_new_i32();
3703 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3705 addr
= load_reg(s
, rn
);
3707 tcg_gen_addi_i32(addr
, addr
, offset
);
3708 if (insn
& (1 << 20)) {
3709 gen_vfp_ld(s
, dp
, addr
);
3710 gen_mov_vreg_F0(dp
, rd
);
3712 gen_mov_F0_vreg(dp
, rd
);
3713 gen_vfp_st(s
, dp
, addr
);
3715 tcg_temp_free_i32(addr
);
3717 /* load/store multiple */
3718 int w
= insn
& (1 << 21);
3720 n
= (insn
>> 1) & 0x7f;
3724 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
3725 /* P == U , W == 1 => UNDEF */
3728 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
3729 /* UNPREDICTABLE cases for bad immediates: we choose to
3730 * UNDEF to avoid generating huge numbers of TCG ops
3734 if (rn
== 15 && w
) {
3735 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3739 if (s
->thumb
&& rn
== 15) {
3740 /* This is actually UNPREDICTABLE */
3741 addr
= tcg_temp_new_i32();
3742 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3744 addr
= load_reg(s
, rn
);
3746 if (insn
& (1 << 24)) /* pre-decrement */
3747 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3753 for (i
= 0; i
< n
; i
++) {
3754 if (insn
& ARM_CP_RW_BIT
) {
3756 gen_vfp_ld(s
, dp
, addr
);
3757 gen_mov_vreg_F0(dp
, rd
+ i
);
3760 gen_mov_F0_vreg(dp
, rd
+ i
);
3761 gen_vfp_st(s
, dp
, addr
);
3763 tcg_gen_addi_i32(addr
, addr
, offset
);
3767 if (insn
& (1 << 24))
3768 offset
= -offset
* n
;
3769 else if (dp
&& (insn
& 1))
3775 tcg_gen_addi_i32(addr
, addr
, offset
);
3776 store_reg(s
, rn
, addr
);
3778 tcg_temp_free_i32(addr
);
3784 /* Should never happen. */
3790 static inline void gen_goto_tb(DisasContext
*s
, int n
, target_ulong dest
)
3792 TranslationBlock
*tb
;
3795 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3797 gen_set_pc_im(s
, dest
);
3798 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
3800 gen_set_pc_im(s
, dest
);
3805 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3807 if (unlikely(s
->singlestep_enabled
)) {
3808 /* An indirect jump so that we still trigger the debug exception. */
3813 gen_goto_tb(s
, 0, dest
);
3814 s
->is_jmp
= DISAS_TB_JUMP
;
3818 static inline void gen_mulxy(TCGv_i32 t0
, TCGv_i32 t1
, int x
, int y
)
3821 tcg_gen_sari_i32(t0
, t0
, 16);
3825 tcg_gen_sari_i32(t1
, t1
, 16);
3828 tcg_gen_mul_i32(t0
, t0
, t1
);
3831 /* Return the mask of PSR bits set by a MSR instruction. */
3832 static uint32_t msr_mask(CPUARMState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3836 if (flags
& (1 << 0))
3838 if (flags
& (1 << 1))
3840 if (flags
& (1 << 2))
3842 if (flags
& (1 << 3))
3845 /* Mask out undefined bits. */
3846 mask
&= ~CPSR_RESERVED
;
3847 if (!arm_feature(env
, ARM_FEATURE_V4T
))
3849 if (!arm_feature(env
, ARM_FEATURE_V5
))
3850 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3851 if (!arm_feature(env
, ARM_FEATURE_V6
))
3852 mask
&= ~(CPSR_E
| CPSR_GE
);
3853 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3855 /* Mask out execution state bits. */
3858 /* Mask out privileged bits. */
3864 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3865 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv_i32 t0
)
3869 /* ??? This is also undefined in system mode. */
3873 tmp
= load_cpu_field(spsr
);
3874 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3875 tcg_gen_andi_i32(t0
, t0
, mask
);
3876 tcg_gen_or_i32(tmp
, tmp
, t0
);
3877 store_cpu_field(tmp
, spsr
);
3879 gen_set_cpsr(t0
, mask
);
3881 tcg_temp_free_i32(t0
);
3886 /* Returns nonzero if access to the PSR is not permitted. */
3887 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3890 tmp
= tcg_temp_new_i32();
3891 tcg_gen_movi_i32(tmp
, val
);
3892 return gen_set_psr(s
, mask
, spsr
, tmp
);
3895 /* Generate an old-style exception return. Marks pc as dead. */
3896 static void gen_exception_return(DisasContext
*s
, TCGv_i32 pc
)
3899 store_reg(s
, 15, pc
);
3900 tmp
= load_cpu_field(spsr
);
3901 gen_set_cpsr(tmp
, 0xffffffff);
3902 tcg_temp_free_i32(tmp
);
3903 s
->is_jmp
= DISAS_UPDATE
;
3906 /* Generate a v6 exception return. Marks both values as dead. */
3907 static void gen_rfe(DisasContext
*s
, TCGv_i32 pc
, TCGv_i32 cpsr
)
3909 gen_set_cpsr(cpsr
, 0xffffffff);
3910 tcg_temp_free_i32(cpsr
);
3911 store_reg(s
, 15, pc
);
3912 s
->is_jmp
= DISAS_UPDATE
;
3916 gen_set_condexec (DisasContext
*s
)
3918 if (s
->condexec_mask
) {
3919 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3920 TCGv_i32 tmp
= tcg_temp_new_i32();
3921 tcg_gen_movi_i32(tmp
, val
);
3922 store_cpu_field(tmp
, condexec_bits
);
3926 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
3928 gen_set_condexec(s
);
3929 gen_set_pc_im(s
, s
->pc
- offset
);
3930 gen_exception(excp
);
3931 s
->is_jmp
= DISAS_JUMP
;
3934 static void gen_nop_hint(DisasContext
*s
, int val
)
3938 gen_set_pc_im(s
, s
->pc
);
3939 s
->is_jmp
= DISAS_WFI
;
3944 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
3950 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3952 static inline void gen_neon_add(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
3955 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3956 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3957 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3962 static inline void gen_neon_rsb(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
3965 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3966 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3967 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3972 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3973 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3974 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3975 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3976 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3978 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3979 switch ((size << 1) | u) { \
3981 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3984 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3987 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3990 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3993 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3996 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3998 default: return 1; \
4001 #define GEN_NEON_INTEGER_OP(name) do { \
4002 switch ((size << 1) | u) { \
4004 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4007 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4010 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4013 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4016 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4019 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4021 default: return 1; \
4024 static TCGv_i32
neon_load_scratch(int scratch
)
4026 TCGv_i32 tmp
= tcg_temp_new_i32();
4027 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4031 static void neon_store_scratch(int scratch
, TCGv_i32 var
)
4033 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4034 tcg_temp_free_i32(var
);
4037 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
4041 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
4043 gen_neon_dup_high16(tmp
);
4045 gen_neon_dup_low16(tmp
);
4048 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
4053 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
4056 if (!q
&& size
== 2) {
4059 tmp
= tcg_const_i32(rd
);
4060 tmp2
= tcg_const_i32(rm
);
4064 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
4067 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
4070 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
4078 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
4081 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
4087 tcg_temp_free_i32(tmp
);
4088 tcg_temp_free_i32(tmp2
);
4092 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
4095 if (!q
&& size
== 2) {
4098 tmp
= tcg_const_i32(rd
);
4099 tmp2
= tcg_const_i32(rm
);
4103 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
4106 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
4109 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
4117 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
4120 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
4126 tcg_temp_free_i32(tmp
);
4127 tcg_temp_free_i32(tmp2
);
4131 static void gen_neon_trn_u8(TCGv_i32 t0
, TCGv_i32 t1
)
4135 rd
= tcg_temp_new_i32();
4136 tmp
= tcg_temp_new_i32();
4138 tcg_gen_shli_i32(rd
, t0
, 8);
4139 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
4140 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
4141 tcg_gen_or_i32(rd
, rd
, tmp
);
4143 tcg_gen_shri_i32(t1
, t1
, 8);
4144 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
4145 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
4146 tcg_gen_or_i32(t1
, t1
, tmp
);
4147 tcg_gen_mov_i32(t0
, rd
);
4149 tcg_temp_free_i32(tmp
);
4150 tcg_temp_free_i32(rd
);
4153 static void gen_neon_trn_u16(TCGv_i32 t0
, TCGv_i32 t1
)
4157 rd
= tcg_temp_new_i32();
4158 tmp
= tcg_temp_new_i32();
4160 tcg_gen_shli_i32(rd
, t0
, 16);
4161 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
4162 tcg_gen_or_i32(rd
, rd
, tmp
);
4163 tcg_gen_shri_i32(t1
, t1
, 16);
4164 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
4165 tcg_gen_or_i32(t1
, t1
, tmp
);
4166 tcg_gen_mov_i32(t0
, rd
);
4168 tcg_temp_free_i32(tmp
);
4169 tcg_temp_free_i32(rd
);
4177 } neon_ls_element_type
[11] = {
4191 /* Translate a NEON load/store element instruction. Return nonzero if the
4192 instruction is invalid. */
4193 static int disas_neon_ls_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
4212 if (!s
->vfp_enabled
)
4214 VFP_DREG_D(rd
, insn
);
4215 rn
= (insn
>> 16) & 0xf;
4217 load
= (insn
& (1 << 21)) != 0;
4218 if ((insn
& (1 << 23)) == 0) {
4219 /* Load store all elements. */
4220 op
= (insn
>> 8) & 0xf;
4221 size
= (insn
>> 6) & 3;
4224 /* Catch UNDEF cases for bad values of align field */
4227 if (((insn
>> 5) & 1) == 1) {
4232 if (((insn
>> 4) & 3) == 3) {
4239 nregs
= neon_ls_element_type
[op
].nregs
;
4240 interleave
= neon_ls_element_type
[op
].interleave
;
4241 spacing
= neon_ls_element_type
[op
].spacing
;
4242 if (size
== 3 && (interleave
| spacing
) != 1)
4244 addr
= tcg_temp_new_i32();
4245 load_reg_var(s
, addr
, rn
);
4246 stride
= (1 << size
) * interleave
;
4247 for (reg
= 0; reg
< nregs
; reg
++) {
4248 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
4249 load_reg_var(s
, addr
, rn
);
4250 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
4251 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
4252 load_reg_var(s
, addr
, rn
);
4253 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4256 tmp64
= tcg_temp_new_i64();
4258 gen_aa32_ld64(tmp64
, addr
, IS_USER(s
));
4259 neon_store_reg64(tmp64
, rd
);
4261 neon_load_reg64(tmp64
, rd
);
4262 gen_aa32_st64(tmp64
, addr
, IS_USER(s
));
4264 tcg_temp_free_i64(tmp64
);
4265 tcg_gen_addi_i32(addr
, addr
, stride
);
4267 for (pass
= 0; pass
< 2; pass
++) {
4270 tmp
= tcg_temp_new_i32();
4271 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
4272 neon_store_reg(rd
, pass
, tmp
);
4274 tmp
= neon_load_reg(rd
, pass
);
4275 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
4276 tcg_temp_free_i32(tmp
);
4278 tcg_gen_addi_i32(addr
, addr
, stride
);
4279 } else if (size
== 1) {
4281 tmp
= tcg_temp_new_i32();
4282 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
4283 tcg_gen_addi_i32(addr
, addr
, stride
);
4284 tmp2
= tcg_temp_new_i32();
4285 gen_aa32_ld16u(tmp2
, addr
, IS_USER(s
));
4286 tcg_gen_addi_i32(addr
, addr
, stride
);
4287 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
4288 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4289 tcg_temp_free_i32(tmp2
);
4290 neon_store_reg(rd
, pass
, tmp
);
4292 tmp
= neon_load_reg(rd
, pass
);
4293 tmp2
= tcg_temp_new_i32();
4294 tcg_gen_shri_i32(tmp2
, tmp
, 16);
4295 gen_aa32_st16(tmp
, addr
, IS_USER(s
));
4296 tcg_temp_free_i32(tmp
);
4297 tcg_gen_addi_i32(addr
, addr
, stride
);
4298 gen_aa32_st16(tmp2
, addr
, IS_USER(s
));
4299 tcg_temp_free_i32(tmp2
);
4300 tcg_gen_addi_i32(addr
, addr
, stride
);
4302 } else /* size == 0 */ {
4304 TCGV_UNUSED_I32(tmp2
);
4305 for (n
= 0; n
< 4; n
++) {
4306 tmp
= tcg_temp_new_i32();
4307 gen_aa32_ld8u(tmp
, addr
, IS_USER(s
));
4308 tcg_gen_addi_i32(addr
, addr
, stride
);
4312 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
4313 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
4314 tcg_temp_free_i32(tmp
);
4317 neon_store_reg(rd
, pass
, tmp2
);
4319 tmp2
= neon_load_reg(rd
, pass
);
4320 for (n
= 0; n
< 4; n
++) {
4321 tmp
= tcg_temp_new_i32();
4323 tcg_gen_mov_i32(tmp
, tmp2
);
4325 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
4327 gen_aa32_st8(tmp
, addr
, IS_USER(s
));
4328 tcg_temp_free_i32(tmp
);
4329 tcg_gen_addi_i32(addr
, addr
, stride
);
4331 tcg_temp_free_i32(tmp2
);
4338 tcg_temp_free_i32(addr
);
4341 size
= (insn
>> 10) & 3;
4343 /* Load single element to all lanes. */
4344 int a
= (insn
>> 4) & 1;
4348 size
= (insn
>> 6) & 3;
4349 nregs
= ((insn
>> 8) & 3) + 1;
4352 if (nregs
!= 4 || a
== 0) {
4355 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4358 if (nregs
== 1 && a
== 1 && size
== 0) {
4361 if (nregs
== 3 && a
== 1) {
4364 addr
= tcg_temp_new_i32();
4365 load_reg_var(s
, addr
, rn
);
4367 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4368 tmp
= gen_load_and_replicate(s
, addr
, size
);
4369 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4370 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4371 if (insn
& (1 << 5)) {
4372 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
4373 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
4375 tcg_temp_free_i32(tmp
);
4377 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4378 stride
= (insn
& (1 << 5)) ? 2 : 1;
4379 for (reg
= 0; reg
< nregs
; reg
++) {
4380 tmp
= gen_load_and_replicate(s
, addr
, size
);
4381 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4382 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4383 tcg_temp_free_i32(tmp
);
4384 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4388 tcg_temp_free_i32(addr
);
4389 stride
= (1 << size
) * nregs
;
4391 /* Single element. */
4392 int idx
= (insn
>> 4) & 0xf;
4393 pass
= (insn
>> 7) & 1;
4396 shift
= ((insn
>> 5) & 3) * 8;
4400 shift
= ((insn
>> 6) & 1) * 16;
4401 stride
= (insn
& (1 << 5)) ? 2 : 1;
4405 stride
= (insn
& (1 << 6)) ? 2 : 1;
4410 nregs
= ((insn
>> 8) & 3) + 1;
4411 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4414 if (((idx
& (1 << size
)) != 0) ||
4415 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
4420 if ((idx
& 1) != 0) {
4425 if (size
== 2 && (idx
& 2) != 0) {
4430 if ((size
== 2) && ((idx
& 3) == 3)) {
4437 if ((rd
+ stride
* (nregs
- 1)) > 31) {
4438 /* Attempts to write off the end of the register file
4439 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4440 * the neon_load_reg() would write off the end of the array.
4444 addr
= tcg_temp_new_i32();
4445 load_reg_var(s
, addr
, rn
);
4446 for (reg
= 0; reg
< nregs
; reg
++) {
4448 tmp
= tcg_temp_new_i32();
4451 gen_aa32_ld8u(tmp
, addr
, IS_USER(s
));
4454 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
4457 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
4459 default: /* Avoid compiler warnings. */
4463 tmp2
= neon_load_reg(rd
, pass
);
4464 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
,
4465 shift
, size
? 16 : 8);
4466 tcg_temp_free_i32(tmp2
);
4468 neon_store_reg(rd
, pass
, tmp
);
4469 } else { /* Store */
4470 tmp
= neon_load_reg(rd
, pass
);
4472 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4475 gen_aa32_st8(tmp
, addr
, IS_USER(s
));
4478 gen_aa32_st16(tmp
, addr
, IS_USER(s
));
4481 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
4484 tcg_temp_free_i32(tmp
);
4487 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4489 tcg_temp_free_i32(addr
);
4490 stride
= nregs
* (1 << size
);
4496 base
= load_reg(s
, rn
);
4498 tcg_gen_addi_i32(base
, base
, stride
);
4501 index
= load_reg(s
, rm
);
4502 tcg_gen_add_i32(base
, base
, index
);
4503 tcg_temp_free_i32(index
);
4505 store_reg(s
, rn
, base
);
4510 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4511 static void gen_neon_bsl(TCGv_i32 dest
, TCGv_i32 t
, TCGv_i32 f
, TCGv_i32 c
)
4513 tcg_gen_and_i32(t
, t
, c
);
4514 tcg_gen_andc_i32(f
, f
, c
);
4515 tcg_gen_or_i32(dest
, t
, f
);
4518 static inline void gen_neon_narrow(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4521 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4522 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4523 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
4528 static inline void gen_neon_narrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4531 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4532 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4533 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4538 static inline void gen_neon_narrow_satu(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4541 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4542 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4543 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4548 static inline void gen_neon_unarrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4551 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4552 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4553 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4558 static inline void gen_neon_shift_narrow(int size
, TCGv_i32 var
, TCGv_i32 shift
,
4564 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4565 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4570 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4571 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4578 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4579 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4584 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4585 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4592 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv_i32 src
, int size
, int u
)
4596 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4597 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4598 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4603 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4604 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4605 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4609 tcg_temp_free_i32(src
);
4612 static inline void gen_neon_addl(int size
)
4615 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4616 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4617 case 2: tcg_gen_add_i64(CPU_V001
); break;
4622 static inline void gen_neon_subl(int size
)
4625 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4626 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4627 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4632 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4635 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4636 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4638 tcg_gen_neg_i64(var
, var
);
4644 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4647 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4648 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4653 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv_i32 a
, TCGv_i32 b
,
4658 switch ((size
<< 1) | u
) {
4659 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4660 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4661 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4662 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4664 tmp
= gen_muls_i64_i32(a
, b
);
4665 tcg_gen_mov_i64(dest
, tmp
);
4666 tcg_temp_free_i64(tmp
);
4669 tmp
= gen_mulu_i64_i32(a
, b
);
4670 tcg_gen_mov_i64(dest
, tmp
);
4671 tcg_temp_free_i64(tmp
);
4676 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4677 Don't forget to clean them now. */
4679 tcg_temp_free_i32(a
);
4680 tcg_temp_free_i32(b
);
4684 static void gen_neon_narrow_op(int op
, int u
, int size
,
4685 TCGv_i32 dest
, TCGv_i64 src
)
4689 gen_neon_unarrow_sats(size
, dest
, src
);
4691 gen_neon_narrow(size
, dest
, src
);
4695 gen_neon_narrow_satu(size
, dest
, src
);
4697 gen_neon_narrow_sats(size
, dest
, src
);
4702 /* Symbolic constants for op fields for Neon 3-register same-length.
4703 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4706 #define NEON_3R_VHADD 0
4707 #define NEON_3R_VQADD 1
4708 #define NEON_3R_VRHADD 2
4709 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4710 #define NEON_3R_VHSUB 4
4711 #define NEON_3R_VQSUB 5
4712 #define NEON_3R_VCGT 6
4713 #define NEON_3R_VCGE 7
4714 #define NEON_3R_VSHL 8
4715 #define NEON_3R_VQSHL 9
4716 #define NEON_3R_VRSHL 10
4717 #define NEON_3R_VQRSHL 11
4718 #define NEON_3R_VMAX 12
4719 #define NEON_3R_VMIN 13
4720 #define NEON_3R_VABD 14
4721 #define NEON_3R_VABA 15
4722 #define NEON_3R_VADD_VSUB 16
4723 #define NEON_3R_VTST_VCEQ 17
4724 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4725 #define NEON_3R_VMUL 19
4726 #define NEON_3R_VPMAX 20
4727 #define NEON_3R_VPMIN 21
4728 #define NEON_3R_VQDMULH_VQRDMULH 22
4729 #define NEON_3R_VPADD 23
4730 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4731 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4732 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4733 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4734 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4735 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4736 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4738 static const uint8_t neon_3r_sizes
[] = {
4739 [NEON_3R_VHADD
] = 0x7,
4740 [NEON_3R_VQADD
] = 0xf,
4741 [NEON_3R_VRHADD
] = 0x7,
4742 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4743 [NEON_3R_VHSUB
] = 0x7,
4744 [NEON_3R_VQSUB
] = 0xf,
4745 [NEON_3R_VCGT
] = 0x7,
4746 [NEON_3R_VCGE
] = 0x7,
4747 [NEON_3R_VSHL
] = 0xf,
4748 [NEON_3R_VQSHL
] = 0xf,
4749 [NEON_3R_VRSHL
] = 0xf,
4750 [NEON_3R_VQRSHL
] = 0xf,
4751 [NEON_3R_VMAX
] = 0x7,
4752 [NEON_3R_VMIN
] = 0x7,
4753 [NEON_3R_VABD
] = 0x7,
4754 [NEON_3R_VABA
] = 0x7,
4755 [NEON_3R_VADD_VSUB
] = 0xf,
4756 [NEON_3R_VTST_VCEQ
] = 0x7,
4757 [NEON_3R_VML
] = 0x7,
4758 [NEON_3R_VMUL
] = 0x7,
4759 [NEON_3R_VPMAX
] = 0x7,
4760 [NEON_3R_VPMIN
] = 0x7,
4761 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4762 [NEON_3R_VPADD
] = 0x7,
4763 [NEON_3R_VFM
] = 0x5, /* size bit 1 encodes op */
4764 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4765 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4766 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4767 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4768 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4769 [NEON_3R_FLOAT_MISC
] = 0x5, /* size bit 1 encodes op */
4772 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4773 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4776 #define NEON_2RM_VREV64 0
4777 #define NEON_2RM_VREV32 1
4778 #define NEON_2RM_VREV16 2
4779 #define NEON_2RM_VPADDL 4
4780 #define NEON_2RM_VPADDL_U 5
4781 #define NEON_2RM_AESE 6 /* Includes AESD */
4782 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
4783 #define NEON_2RM_VCLS 8
4784 #define NEON_2RM_VCLZ 9
4785 #define NEON_2RM_VCNT 10
4786 #define NEON_2RM_VMVN 11
4787 #define NEON_2RM_VPADAL 12
4788 #define NEON_2RM_VPADAL_U 13
4789 #define NEON_2RM_VQABS 14
4790 #define NEON_2RM_VQNEG 15
4791 #define NEON_2RM_VCGT0 16
4792 #define NEON_2RM_VCGE0 17
4793 #define NEON_2RM_VCEQ0 18
4794 #define NEON_2RM_VCLE0 19
4795 #define NEON_2RM_VCLT0 20
4796 #define NEON_2RM_VABS 22
4797 #define NEON_2RM_VNEG 23
4798 #define NEON_2RM_VCGT0_F 24
4799 #define NEON_2RM_VCGE0_F 25
4800 #define NEON_2RM_VCEQ0_F 26
4801 #define NEON_2RM_VCLE0_F 27
4802 #define NEON_2RM_VCLT0_F 28
4803 #define NEON_2RM_VABS_F 30
4804 #define NEON_2RM_VNEG_F 31
4805 #define NEON_2RM_VSWP 32
4806 #define NEON_2RM_VTRN 33
4807 #define NEON_2RM_VUZP 34
4808 #define NEON_2RM_VZIP 35
4809 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4810 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4811 #define NEON_2RM_VSHLL 38
4812 #define NEON_2RM_VRINTN 40
4813 #define NEON_2RM_VRINTX 41
4814 #define NEON_2RM_VRINTA 42
4815 #define NEON_2RM_VRINTZ 43
4816 #define NEON_2RM_VCVT_F16_F32 44
4817 #define NEON_2RM_VRINTM 45
4818 #define NEON_2RM_VCVT_F32_F16 46
4819 #define NEON_2RM_VRINTP 47
4820 #define NEON_2RM_VCVTAU 48
4821 #define NEON_2RM_VCVTAS 49
4822 #define NEON_2RM_VCVTNU 50
4823 #define NEON_2RM_VCVTNS 51
4824 #define NEON_2RM_VCVTPU 52
4825 #define NEON_2RM_VCVTPS 53
4826 #define NEON_2RM_VCVTMU 54
4827 #define NEON_2RM_VCVTMS 55
4828 #define NEON_2RM_VRECPE 56
4829 #define NEON_2RM_VRSQRTE 57
4830 #define NEON_2RM_VRECPE_F 58
4831 #define NEON_2RM_VRSQRTE_F 59
4832 #define NEON_2RM_VCVT_FS 60
4833 #define NEON_2RM_VCVT_FU 61
4834 #define NEON_2RM_VCVT_SF 62
4835 #define NEON_2RM_VCVT_UF 63
4837 static int neon_2rm_is_float_op(int op
)
4839 /* Return true if this neon 2reg-misc op is float-to-float */
4840 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
4841 (op
>= NEON_2RM_VRINTN
&& op
<= NEON_2RM_VRINTZ
) ||
4842 op
== NEON_2RM_VRINTM
||
4843 (op
>= NEON_2RM_VRINTP
&& op
<= NEON_2RM_VCVTMS
) ||
4844 op
>= NEON_2RM_VRECPE_F
);
4847 /* Each entry in this array has bit n set if the insn allows
4848 * size value n (otherwise it will UNDEF). Since unallocated
4849 * op values will have no bits set they always UNDEF.
4851 static const uint8_t neon_2rm_sizes
[] = {
4852 [NEON_2RM_VREV64
] = 0x7,
4853 [NEON_2RM_VREV32
] = 0x3,
4854 [NEON_2RM_VREV16
] = 0x1,
4855 [NEON_2RM_VPADDL
] = 0x7,
4856 [NEON_2RM_VPADDL_U
] = 0x7,
4857 [NEON_2RM_AESE
] = 0x1,
4858 [NEON_2RM_AESMC
] = 0x1,
4859 [NEON_2RM_VCLS
] = 0x7,
4860 [NEON_2RM_VCLZ
] = 0x7,
4861 [NEON_2RM_VCNT
] = 0x1,
4862 [NEON_2RM_VMVN
] = 0x1,
4863 [NEON_2RM_VPADAL
] = 0x7,
4864 [NEON_2RM_VPADAL_U
] = 0x7,
4865 [NEON_2RM_VQABS
] = 0x7,
4866 [NEON_2RM_VQNEG
] = 0x7,
4867 [NEON_2RM_VCGT0
] = 0x7,
4868 [NEON_2RM_VCGE0
] = 0x7,
4869 [NEON_2RM_VCEQ0
] = 0x7,
4870 [NEON_2RM_VCLE0
] = 0x7,
4871 [NEON_2RM_VCLT0
] = 0x7,
4872 [NEON_2RM_VABS
] = 0x7,
4873 [NEON_2RM_VNEG
] = 0x7,
4874 [NEON_2RM_VCGT0_F
] = 0x4,
4875 [NEON_2RM_VCGE0_F
] = 0x4,
4876 [NEON_2RM_VCEQ0_F
] = 0x4,
4877 [NEON_2RM_VCLE0_F
] = 0x4,
4878 [NEON_2RM_VCLT0_F
] = 0x4,
4879 [NEON_2RM_VABS_F
] = 0x4,
4880 [NEON_2RM_VNEG_F
] = 0x4,
4881 [NEON_2RM_VSWP
] = 0x1,
4882 [NEON_2RM_VTRN
] = 0x7,
4883 [NEON_2RM_VUZP
] = 0x7,
4884 [NEON_2RM_VZIP
] = 0x7,
4885 [NEON_2RM_VMOVN
] = 0x7,
4886 [NEON_2RM_VQMOVN
] = 0x7,
4887 [NEON_2RM_VSHLL
] = 0x7,
4888 [NEON_2RM_VRINTN
] = 0x4,
4889 [NEON_2RM_VRINTX
] = 0x4,
4890 [NEON_2RM_VRINTA
] = 0x4,
4891 [NEON_2RM_VRINTZ
] = 0x4,
4892 [NEON_2RM_VCVT_F16_F32
] = 0x2,
4893 [NEON_2RM_VRINTM
] = 0x4,
4894 [NEON_2RM_VCVT_F32_F16
] = 0x2,
4895 [NEON_2RM_VRINTP
] = 0x4,
4896 [NEON_2RM_VCVTAU
] = 0x4,
4897 [NEON_2RM_VCVTAS
] = 0x4,
4898 [NEON_2RM_VCVTNU
] = 0x4,
4899 [NEON_2RM_VCVTNS
] = 0x4,
4900 [NEON_2RM_VCVTPU
] = 0x4,
4901 [NEON_2RM_VCVTPS
] = 0x4,
4902 [NEON_2RM_VCVTMU
] = 0x4,
4903 [NEON_2RM_VCVTMS
] = 0x4,
4904 [NEON_2RM_VRECPE
] = 0x4,
4905 [NEON_2RM_VRSQRTE
] = 0x4,
4906 [NEON_2RM_VRECPE_F
] = 0x4,
4907 [NEON_2RM_VRSQRTE_F
] = 0x4,
4908 [NEON_2RM_VCVT_FS
] = 0x4,
4909 [NEON_2RM_VCVT_FU
] = 0x4,
4910 [NEON_2RM_VCVT_SF
] = 0x4,
4911 [NEON_2RM_VCVT_UF
] = 0x4,
4914 /* Translate a NEON data processing instruction. Return nonzero if the
4915 instruction is invalid.
4916 We process data in a mixture of 32-bit and 64-bit chunks.
4917 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4919 static int disas_neon_data_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
4931 TCGv_i32 tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4934 if (!s
->vfp_enabled
)
4936 q
= (insn
& (1 << 6)) != 0;
4937 u
= (insn
>> 24) & 1;
4938 VFP_DREG_D(rd
, insn
);
4939 VFP_DREG_N(rn
, insn
);
4940 VFP_DREG_M(rm
, insn
);
4941 size
= (insn
>> 20) & 3;
4942 if ((insn
& (1 << 23)) == 0) {
4943 /* Three register same length. */
4944 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4945 /* Catch invalid op and bad size combinations: UNDEF */
4946 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4949 /* All insns of this form UNDEF for either this condition or the
4950 * superset of cases "Q==1"; we catch the latter later.
4952 if (q
&& ((rd
| rn
| rm
) & 1)) {
4955 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
4956 /* 64-bit element instructions. */
4957 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4958 neon_load_reg64(cpu_V0
, rn
+ pass
);
4959 neon_load_reg64(cpu_V1
, rm
+ pass
);
4963 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
4966 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
4972 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
4975 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
4981 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4983 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4988 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4991 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4997 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4999 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5002 case NEON_3R_VQRSHL
:
5004 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
5007 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
5011 case NEON_3R_VADD_VSUB
:
5013 tcg_gen_sub_i64(CPU_V001
);
5015 tcg_gen_add_i64(CPU_V001
);
5021 neon_store_reg64(cpu_V0
, rd
+ pass
);
5030 case NEON_3R_VQRSHL
:
5033 /* Shift instruction operands are reversed. */
5048 case NEON_3R_FLOAT_ARITH
:
5049 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
5051 case NEON_3R_FLOAT_MINMAX
:
5052 pairwise
= u
; /* if VPMIN/VPMAX (float) */
5054 case NEON_3R_FLOAT_CMP
:
5056 /* no encoding for U=0 C=1x */
5060 case NEON_3R_FLOAT_ACMP
:
5065 case NEON_3R_FLOAT_MISC
:
5066 /* VMAXNM/VMINNM in ARMv8 */
5067 if (u
&& !arm_feature(env
, ARM_FEATURE_V8
)) {
5072 if (u
&& (size
!= 0)) {
5073 /* UNDEF on invalid size for polynomial subcase */
5078 if (!arm_feature(env
, ARM_FEATURE_VFP4
) || u
) {
5086 if (pairwise
&& q
) {
5087 /* All the pairwise insns UNDEF if Q is set */
5091 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5096 tmp
= neon_load_reg(rn
, 0);
5097 tmp2
= neon_load_reg(rn
, 1);
5099 tmp
= neon_load_reg(rm
, 0);
5100 tmp2
= neon_load_reg(rm
, 1);
5104 tmp
= neon_load_reg(rn
, pass
);
5105 tmp2
= neon_load_reg(rm
, pass
);
5109 GEN_NEON_INTEGER_OP(hadd
);
5112 GEN_NEON_INTEGER_OP_ENV(qadd
);
5114 case NEON_3R_VRHADD
:
5115 GEN_NEON_INTEGER_OP(rhadd
);
5117 case NEON_3R_LOGIC
: /* Logic ops. */
5118 switch ((u
<< 2) | size
) {
5120 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
5123 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
5126 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5129 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
5132 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
5135 tmp3
= neon_load_reg(rd
, pass
);
5136 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
5137 tcg_temp_free_i32(tmp3
);
5140 tmp3
= neon_load_reg(rd
, pass
);
5141 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
5142 tcg_temp_free_i32(tmp3
);
5145 tmp3
= neon_load_reg(rd
, pass
);
5146 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
5147 tcg_temp_free_i32(tmp3
);
5152 GEN_NEON_INTEGER_OP(hsub
);
5155 GEN_NEON_INTEGER_OP_ENV(qsub
);
5158 GEN_NEON_INTEGER_OP(cgt
);
5161 GEN_NEON_INTEGER_OP(cge
);
5164 GEN_NEON_INTEGER_OP(shl
);
5167 GEN_NEON_INTEGER_OP_ENV(qshl
);
5170 GEN_NEON_INTEGER_OP(rshl
);
5172 case NEON_3R_VQRSHL
:
5173 GEN_NEON_INTEGER_OP_ENV(qrshl
);
5176 GEN_NEON_INTEGER_OP(max
);
5179 GEN_NEON_INTEGER_OP(min
);
5182 GEN_NEON_INTEGER_OP(abd
);
5185 GEN_NEON_INTEGER_OP(abd
);
5186 tcg_temp_free_i32(tmp2
);
5187 tmp2
= neon_load_reg(rd
, pass
);
5188 gen_neon_add(size
, tmp
, tmp2
);
5190 case NEON_3R_VADD_VSUB
:
5191 if (!u
) { /* VADD */
5192 gen_neon_add(size
, tmp
, tmp2
);
5195 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
5196 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
5197 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
5202 case NEON_3R_VTST_VCEQ
:
5203 if (!u
) { /* VTST */
5205 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
5206 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
5207 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
5212 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5213 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5214 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5219 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
5221 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5222 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5223 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5226 tcg_temp_free_i32(tmp2
);
5227 tmp2
= neon_load_reg(rd
, pass
);
5229 gen_neon_rsb(size
, tmp
, tmp2
);
5231 gen_neon_add(size
, tmp
, tmp2
);
5235 if (u
) { /* polynomial */
5236 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
5237 } else { /* Integer */
5239 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5240 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5241 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5247 GEN_NEON_INTEGER_OP(pmax
);
5250 GEN_NEON_INTEGER_OP(pmin
);
5252 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
5253 if (!u
) { /* VQDMULH */
5256 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5259 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5263 } else { /* VQRDMULH */
5266 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5269 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5277 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
5278 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
5279 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
5283 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
5285 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5286 switch ((u
<< 2) | size
) {
5289 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5292 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
5295 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
5300 tcg_temp_free_ptr(fpstatus
);
5303 case NEON_3R_FLOAT_MULTIPLY
:
5305 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5306 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5308 tcg_temp_free_i32(tmp2
);
5309 tmp2
= neon_load_reg(rd
, pass
);
5311 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5313 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5316 tcg_temp_free_ptr(fpstatus
);
5319 case NEON_3R_FLOAT_CMP
:
5321 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5323 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
5326 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5328 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5331 tcg_temp_free_ptr(fpstatus
);
5334 case NEON_3R_FLOAT_ACMP
:
5336 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5338 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5340 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5342 tcg_temp_free_ptr(fpstatus
);
5345 case NEON_3R_FLOAT_MINMAX
:
5347 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5349 gen_helper_vfp_maxs(tmp
, tmp
, tmp2
, fpstatus
);
5351 gen_helper_vfp_mins(tmp
, tmp
, tmp2
, fpstatus
);
5353 tcg_temp_free_ptr(fpstatus
);
5356 case NEON_3R_FLOAT_MISC
:
5359 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5361 gen_helper_vfp_maxnums(tmp
, tmp
, tmp2
, fpstatus
);
5363 gen_helper_vfp_minnums(tmp
, tmp
, tmp2
, fpstatus
);
5365 tcg_temp_free_ptr(fpstatus
);
5368 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
5370 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
5376 /* VFMA, VFMS: fused multiply-add */
5377 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5378 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
5381 gen_helper_vfp_negs(tmp
, tmp
);
5383 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
5384 tcg_temp_free_i32(tmp3
);
5385 tcg_temp_free_ptr(fpstatus
);
5391 tcg_temp_free_i32(tmp2
);
5393 /* Save the result. For elementwise operations we can put it
5394 straight into the destination register. For pairwise operations
5395 we have to be careful to avoid clobbering the source operands. */
5396 if (pairwise
&& rd
== rm
) {
5397 neon_store_scratch(pass
, tmp
);
5399 neon_store_reg(rd
, pass
, tmp
);
5403 if (pairwise
&& rd
== rm
) {
5404 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5405 tmp
= neon_load_scratch(pass
);
5406 neon_store_reg(rd
, pass
, tmp
);
5409 /* End of 3 register same size operations. */
5410 } else if (insn
& (1 << 4)) {
5411 if ((insn
& 0x00380080) != 0) {
5412 /* Two registers and shift. */
5413 op
= (insn
>> 8) & 0xf;
5414 if (insn
& (1 << 7)) {
5422 while ((insn
& (1 << (size
+ 19))) == 0)
5425 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
5426 /* To avoid excessive duplication of ops we implement shift
5427 by immediate using the variable shift operations. */
5429 /* Shift by immediate:
5430 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5431 if (q
&& ((rd
| rm
) & 1)) {
5434 if (!u
&& (op
== 4 || op
== 6)) {
5437 /* Right shifts are encoded as N - shift, where N is the
5438 element size in bits. */
5440 shift
= shift
- (1 << (size
+ 3));
5448 imm
= (uint8_t) shift
;
5453 imm
= (uint16_t) shift
;
5464 for (pass
= 0; pass
< count
; pass
++) {
5466 neon_load_reg64(cpu_V0
, rm
+ pass
);
5467 tcg_gen_movi_i64(cpu_V1
, imm
);
5472 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5474 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5479 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5481 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5484 case 5: /* VSHL, VSLI */
5485 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5487 case 6: /* VQSHLU */
5488 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
5493 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5496 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5501 if (op
== 1 || op
== 3) {
5503 neon_load_reg64(cpu_V1
, rd
+ pass
);
5504 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5505 } else if (op
== 4 || (op
== 5 && u
)) {
5507 neon_load_reg64(cpu_V1
, rd
+ pass
);
5509 if (shift
< -63 || shift
> 63) {
5513 mask
= 0xffffffffffffffffull
>> -shift
;
5515 mask
= 0xffffffffffffffffull
<< shift
;
5518 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
5519 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5521 neon_store_reg64(cpu_V0
, rd
+ pass
);
5522 } else { /* size < 3 */
5523 /* Operands in T0 and T1. */
5524 tmp
= neon_load_reg(rm
, pass
);
5525 tmp2
= tcg_temp_new_i32();
5526 tcg_gen_movi_i32(tmp2
, imm
);
5530 GEN_NEON_INTEGER_OP(shl
);
5534 GEN_NEON_INTEGER_OP(rshl
);
5537 case 5: /* VSHL, VSLI */
5539 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
5540 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
5541 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
5545 case 6: /* VQSHLU */
5548 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5552 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5556 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5564 GEN_NEON_INTEGER_OP_ENV(qshl
);
5567 tcg_temp_free_i32(tmp2
);
5569 if (op
== 1 || op
== 3) {
5571 tmp2
= neon_load_reg(rd
, pass
);
5572 gen_neon_add(size
, tmp
, tmp2
);
5573 tcg_temp_free_i32(tmp2
);
5574 } else if (op
== 4 || (op
== 5 && u
)) {
5579 mask
= 0xff >> -shift
;
5581 mask
= (uint8_t)(0xff << shift
);
5587 mask
= 0xffff >> -shift
;
5589 mask
= (uint16_t)(0xffff << shift
);
5593 if (shift
< -31 || shift
> 31) {
5597 mask
= 0xffffffffu
>> -shift
;
5599 mask
= 0xffffffffu
<< shift
;
5605 tmp2
= neon_load_reg(rd
, pass
);
5606 tcg_gen_andi_i32(tmp
, tmp
, mask
);
5607 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
5608 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5609 tcg_temp_free_i32(tmp2
);
5611 neon_store_reg(rd
, pass
, tmp
);
5614 } else if (op
< 10) {
5615 /* Shift by immediate and narrow:
5616 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5617 int input_unsigned
= (op
== 8) ? !u
: u
;
5621 shift
= shift
- (1 << (size
+ 3));
5624 tmp64
= tcg_const_i64(shift
);
5625 neon_load_reg64(cpu_V0
, rm
);
5626 neon_load_reg64(cpu_V1
, rm
+ 1);
5627 for (pass
= 0; pass
< 2; pass
++) {
5635 if (input_unsigned
) {
5636 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5638 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5641 if (input_unsigned
) {
5642 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5644 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5647 tmp
= tcg_temp_new_i32();
5648 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5649 neon_store_reg(rd
, pass
, tmp
);
5651 tcg_temp_free_i64(tmp64
);
5654 imm
= (uint16_t)shift
;
5658 imm
= (uint32_t)shift
;
5660 tmp2
= tcg_const_i32(imm
);
5661 tmp4
= neon_load_reg(rm
+ 1, 0);
5662 tmp5
= neon_load_reg(rm
+ 1, 1);
5663 for (pass
= 0; pass
< 2; pass
++) {
5665 tmp
= neon_load_reg(rm
, 0);
5669 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5672 tmp3
= neon_load_reg(rm
, 1);
5676 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5678 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5679 tcg_temp_free_i32(tmp
);
5680 tcg_temp_free_i32(tmp3
);
5681 tmp
= tcg_temp_new_i32();
5682 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5683 neon_store_reg(rd
, pass
, tmp
);
5685 tcg_temp_free_i32(tmp2
);
5687 } else if (op
== 10) {
5689 if (q
|| (rd
& 1)) {
5692 tmp
= neon_load_reg(rm
, 0);
5693 tmp2
= neon_load_reg(rm
, 1);
5694 for (pass
= 0; pass
< 2; pass
++) {
5698 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5701 /* The shift is less than the width of the source
5702 type, so we can just shift the whole register. */
5703 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5704 /* Widen the result of shift: we need to clear
5705 * the potential overflow bits resulting from
5706 * left bits of the narrow input appearing as
5707 * right bits of left the neighbour narrow
5709 if (size
< 2 || !u
) {
5712 imm
= (0xffu
>> (8 - shift
));
5714 } else if (size
== 1) {
5715 imm
= 0xffff >> (16 - shift
);
5718 imm
= 0xffffffff >> (32 - shift
);
5721 imm64
= imm
| (((uint64_t)imm
) << 32);
5725 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5728 neon_store_reg64(cpu_V0
, rd
+ pass
);
5730 } else if (op
>= 14) {
5731 /* VCVT fixed-point. */
5732 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5735 /* We have already masked out the must-be-1 top bit of imm6,
5736 * hence this 32-shift where the ARM ARM has 64-imm6.
5739 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5740 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
5743 gen_vfp_ulto(0, shift
, 1);
5745 gen_vfp_slto(0, shift
, 1);
5748 gen_vfp_toul(0, shift
, 1);
5750 gen_vfp_tosl(0, shift
, 1);
5752 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
5757 } else { /* (insn & 0x00380080) == 0 */
5759 if (q
&& (rd
& 1)) {
5763 op
= (insn
>> 8) & 0xf;
5764 /* One register and immediate. */
5765 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5766 invert
= (insn
& (1 << 5)) != 0;
5767 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5768 * We choose to not special-case this and will behave as if a
5769 * valid constant encoding of 0 had been given.
5788 imm
= (imm
<< 8) | (imm
<< 24);
5791 imm
= (imm
<< 8) | 0xff;
5794 imm
= (imm
<< 16) | 0xffff;
5797 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5805 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5806 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5812 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5813 if (op
& 1 && op
< 12) {
5814 tmp
= neon_load_reg(rd
, pass
);
5816 /* The immediate value has already been inverted, so
5818 tcg_gen_andi_i32(tmp
, tmp
, imm
);
5820 tcg_gen_ori_i32(tmp
, tmp
, imm
);
5824 tmp
= tcg_temp_new_i32();
5825 if (op
== 14 && invert
) {
5829 for (n
= 0; n
< 4; n
++) {
5830 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
5831 val
|= 0xff << (n
* 8);
5833 tcg_gen_movi_i32(tmp
, val
);
5835 tcg_gen_movi_i32(tmp
, imm
);
5838 neon_store_reg(rd
, pass
, tmp
);
5841 } else { /* (insn & 0x00800010 == 0x00800000) */
5843 op
= (insn
>> 8) & 0xf;
5844 if ((insn
& (1 << 6)) == 0) {
5845 /* Three registers of different lengths. */
5849 /* undefreq: bit 0 : UNDEF if size != 0
5850 * bit 1 : UNDEF if size == 0
5851 * bit 2 : UNDEF if U == 1
5852 * Note that [1:0] set implies 'always UNDEF'
5855 /* prewiden, src1_wide, src2_wide, undefreq */
5856 static const int neon_3reg_wide
[16][4] = {
5857 {1, 0, 0, 0}, /* VADDL */
5858 {1, 1, 0, 0}, /* VADDW */
5859 {1, 0, 0, 0}, /* VSUBL */
5860 {1, 1, 0, 0}, /* VSUBW */
5861 {0, 1, 1, 0}, /* VADDHN */
5862 {0, 0, 0, 0}, /* VABAL */
5863 {0, 1, 1, 0}, /* VSUBHN */
5864 {0, 0, 0, 0}, /* VABDL */
5865 {0, 0, 0, 0}, /* VMLAL */
5866 {0, 0, 0, 6}, /* VQDMLAL */
5867 {0, 0, 0, 0}, /* VMLSL */
5868 {0, 0, 0, 6}, /* VQDMLSL */
5869 {0, 0, 0, 0}, /* Integer VMULL */
5870 {0, 0, 0, 2}, /* VQDMULL */
5871 {0, 0, 0, 5}, /* Polynomial VMULL */
5872 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5875 prewiden
= neon_3reg_wide
[op
][0];
5876 src1_wide
= neon_3reg_wide
[op
][1];
5877 src2_wide
= neon_3reg_wide
[op
][2];
5878 undefreq
= neon_3reg_wide
[op
][3];
5880 if (((undefreq
& 1) && (size
!= 0)) ||
5881 ((undefreq
& 2) && (size
== 0)) ||
5882 ((undefreq
& 4) && u
)) {
5885 if ((src1_wide
&& (rn
& 1)) ||
5886 (src2_wide
&& (rm
& 1)) ||
5887 (!src2_wide
&& (rd
& 1))) {
5891 /* Avoid overlapping operands. Wide source operands are
5892 always aligned so will never overlap with wide
5893 destinations in problematic ways. */
5894 if (rd
== rm
&& !src2_wide
) {
5895 tmp
= neon_load_reg(rm
, 1);
5896 neon_store_scratch(2, tmp
);
5897 } else if (rd
== rn
&& !src1_wide
) {
5898 tmp
= neon_load_reg(rn
, 1);
5899 neon_store_scratch(2, tmp
);
5901 TCGV_UNUSED_I32(tmp3
);
5902 for (pass
= 0; pass
< 2; pass
++) {
5904 neon_load_reg64(cpu_V0
, rn
+ pass
);
5905 TCGV_UNUSED_I32(tmp
);
5907 if (pass
== 1 && rd
== rn
) {
5908 tmp
= neon_load_scratch(2);
5910 tmp
= neon_load_reg(rn
, pass
);
5913 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5917 neon_load_reg64(cpu_V1
, rm
+ pass
);
5918 TCGV_UNUSED_I32(tmp2
);
5920 if (pass
== 1 && rd
== rm
) {
5921 tmp2
= neon_load_scratch(2);
5923 tmp2
= neon_load_reg(rm
, pass
);
5926 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5930 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5931 gen_neon_addl(size
);
5933 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5934 gen_neon_subl(size
);
5936 case 5: case 7: /* VABAL, VABDL */
5937 switch ((size
<< 1) | u
) {
5939 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5942 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5945 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5948 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5951 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5954 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5958 tcg_temp_free_i32(tmp2
);
5959 tcg_temp_free_i32(tmp
);
5961 case 8: case 9: case 10: case 11: case 12: case 13:
5962 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5963 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5965 case 14: /* Polynomial VMULL */
5966 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5967 tcg_temp_free_i32(tmp2
);
5968 tcg_temp_free_i32(tmp
);
5970 default: /* 15 is RESERVED: caught earlier */
5975 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5976 neon_store_reg64(cpu_V0
, rd
+ pass
);
5977 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5979 neon_load_reg64(cpu_V1
, rd
+ pass
);
5981 case 10: /* VMLSL */
5982 gen_neon_negl(cpu_V0
, size
);
5984 case 5: case 8: /* VABAL, VMLAL */
5985 gen_neon_addl(size
);
5987 case 9: case 11: /* VQDMLAL, VQDMLSL */
5988 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5990 gen_neon_negl(cpu_V0
, size
);
5992 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5997 neon_store_reg64(cpu_V0
, rd
+ pass
);
5998 } else if (op
== 4 || op
== 6) {
5999 /* Narrowing operation. */
6000 tmp
= tcg_temp_new_i32();
6004 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
6007 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
6010 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6011 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
6018 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
6021 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
6024 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
6025 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6026 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
6034 neon_store_reg(rd
, 0, tmp3
);
6035 neon_store_reg(rd
, 1, tmp
);
6038 /* Write back the result. */
6039 neon_store_reg64(cpu_V0
, rd
+ pass
);
6043 /* Two registers and a scalar. NB that for ops of this form
6044 * the ARM ARM labels bit 24 as Q, but it is in our variable
6051 case 1: /* Float VMLA scalar */
6052 case 5: /* Floating point VMLS scalar */
6053 case 9: /* Floating point VMUL scalar */
6058 case 0: /* Integer VMLA scalar */
6059 case 4: /* Integer VMLS scalar */
6060 case 8: /* Integer VMUL scalar */
6061 case 12: /* VQDMULH scalar */
6062 case 13: /* VQRDMULH scalar */
6063 if (u
&& ((rd
| rn
) & 1)) {
6066 tmp
= neon_get_scalar(size
, rm
);
6067 neon_store_scratch(0, tmp
);
6068 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
6069 tmp
= neon_load_scratch(0);
6070 tmp2
= neon_load_reg(rn
, pass
);
6073 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6075 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6077 } else if (op
== 13) {
6079 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6081 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6083 } else if (op
& 1) {
6084 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6085 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
6086 tcg_temp_free_ptr(fpstatus
);
6089 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
6090 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
6091 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
6095 tcg_temp_free_i32(tmp2
);
6098 tmp2
= neon_load_reg(rd
, pass
);
6101 gen_neon_add(size
, tmp
, tmp2
);
6105 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6106 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
6107 tcg_temp_free_ptr(fpstatus
);
6111 gen_neon_rsb(size
, tmp
, tmp2
);
6115 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6116 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
6117 tcg_temp_free_ptr(fpstatus
);
6123 tcg_temp_free_i32(tmp2
);
6125 neon_store_reg(rd
, pass
, tmp
);
6128 case 3: /* VQDMLAL scalar */
6129 case 7: /* VQDMLSL scalar */
6130 case 11: /* VQDMULL scalar */
6135 case 2: /* VMLAL sclar */
6136 case 6: /* VMLSL scalar */
6137 case 10: /* VMULL scalar */
6141 tmp2
= neon_get_scalar(size
, rm
);
6142 /* We need a copy of tmp2 because gen_neon_mull
6143 * deletes it during pass 0. */
6144 tmp4
= tcg_temp_new_i32();
6145 tcg_gen_mov_i32(tmp4
, tmp2
);
6146 tmp3
= neon_load_reg(rn
, 1);
6148 for (pass
= 0; pass
< 2; pass
++) {
6150 tmp
= neon_load_reg(rn
, 0);
6155 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6157 neon_load_reg64(cpu_V1
, rd
+ pass
);
6161 gen_neon_negl(cpu_V0
, size
);
6164 gen_neon_addl(size
);
6167 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6169 gen_neon_negl(cpu_V0
, size
);
6171 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6177 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6182 neon_store_reg64(cpu_V0
, rd
+ pass
);
6187 default: /* 14 and 15 are RESERVED */
6191 } else { /* size == 3 */
6194 imm
= (insn
>> 8) & 0xf;
6199 if (q
&& ((rd
| rn
| rm
) & 1)) {
6204 neon_load_reg64(cpu_V0
, rn
);
6206 neon_load_reg64(cpu_V1
, rn
+ 1);
6208 } else if (imm
== 8) {
6209 neon_load_reg64(cpu_V0
, rn
+ 1);
6211 neon_load_reg64(cpu_V1
, rm
);
6214 tmp64
= tcg_temp_new_i64();
6216 neon_load_reg64(cpu_V0
, rn
);
6217 neon_load_reg64(tmp64
, rn
+ 1);
6219 neon_load_reg64(cpu_V0
, rn
+ 1);
6220 neon_load_reg64(tmp64
, rm
);
6222 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
6223 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
6224 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6226 neon_load_reg64(cpu_V1
, rm
);
6228 neon_load_reg64(cpu_V1
, rm
+ 1);
6231 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6232 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
6233 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
6234 tcg_temp_free_i64(tmp64
);
6237 neon_load_reg64(cpu_V0
, rn
);
6238 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
6239 neon_load_reg64(cpu_V1
, rm
);
6240 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6241 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6243 neon_store_reg64(cpu_V0
, rd
);
6245 neon_store_reg64(cpu_V1
, rd
+ 1);
6247 } else if ((insn
& (1 << 11)) == 0) {
6248 /* Two register misc. */
6249 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
6250 size
= (insn
>> 18) & 3;
6251 /* UNDEF for unknown op values and bad op-size combinations */
6252 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
6255 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
6256 q
&& ((rm
| rd
) & 1)) {
6260 case NEON_2RM_VREV64
:
6261 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
6262 tmp
= neon_load_reg(rm
, pass
* 2);
6263 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
6265 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6266 case 1: gen_swap_half(tmp
); break;
6267 case 2: /* no-op */ break;
6270 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
6272 neon_store_reg(rd
, pass
* 2, tmp2
);
6275 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
6276 case 1: gen_swap_half(tmp2
); break;
6279 neon_store_reg(rd
, pass
* 2, tmp2
);
6283 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
6284 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
6285 for (pass
= 0; pass
< q
+ 1; pass
++) {
6286 tmp
= neon_load_reg(rm
, pass
* 2);
6287 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
6288 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
6289 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
6291 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
6292 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
6293 case 2: tcg_gen_add_i64(CPU_V001
); break;
6296 if (op
>= NEON_2RM_VPADAL
) {
6298 neon_load_reg64(cpu_V1
, rd
+ pass
);
6299 gen_neon_addl(size
);
6301 neon_store_reg64(cpu_V0
, rd
+ pass
);
6307 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
6308 tmp
= neon_load_reg(rm
, n
);
6309 tmp2
= neon_load_reg(rd
, n
+ 1);
6310 neon_store_reg(rm
, n
, tmp2
);
6311 neon_store_reg(rd
, n
+ 1, tmp
);
6318 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
6323 if (gen_neon_zip(rd
, rm
, size
, q
)) {
6327 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
6328 /* also VQMOVUN; op field and mnemonics don't line up */
6332 TCGV_UNUSED_I32(tmp2
);
6333 for (pass
= 0; pass
< 2; pass
++) {
6334 neon_load_reg64(cpu_V0
, rm
+ pass
);
6335 tmp
= tcg_temp_new_i32();
6336 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
6341 neon_store_reg(rd
, 0, tmp2
);
6342 neon_store_reg(rd
, 1, tmp
);
6346 case NEON_2RM_VSHLL
:
6347 if (q
|| (rd
& 1)) {
6350 tmp
= neon_load_reg(rm
, 0);
6351 tmp2
= neon_load_reg(rm
, 1);
6352 for (pass
= 0; pass
< 2; pass
++) {
6355 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
6356 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
6357 neon_store_reg64(cpu_V0
, rd
+ pass
);
6360 case NEON_2RM_VCVT_F16_F32
:
6361 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
6365 tmp
= tcg_temp_new_i32();
6366 tmp2
= tcg_temp_new_i32();
6367 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
6368 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
6369 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
6370 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
6371 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6372 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6373 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
6374 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
6375 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
6376 neon_store_reg(rd
, 0, tmp2
);
6377 tmp2
= tcg_temp_new_i32();
6378 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
6379 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6380 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6381 neon_store_reg(rd
, 1, tmp2
);
6382 tcg_temp_free_i32(tmp
);
6384 case NEON_2RM_VCVT_F32_F16
:
6385 if (!arm_feature(env
, ARM_FEATURE_VFP_FP16
) ||
6389 tmp3
= tcg_temp_new_i32();
6390 tmp
= neon_load_reg(rm
, 0);
6391 tmp2
= neon_load_reg(rm
, 1);
6392 tcg_gen_ext16u_i32(tmp3
, tmp
);
6393 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6394 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
6395 tcg_gen_shri_i32(tmp3
, tmp
, 16);
6396 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6397 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
6398 tcg_temp_free_i32(tmp
);
6399 tcg_gen_ext16u_i32(tmp3
, tmp2
);
6400 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6401 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
6402 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
6403 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6404 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
6405 tcg_temp_free_i32(tmp2
);
6406 tcg_temp_free_i32(tmp3
);
6408 case NEON_2RM_AESE
: case NEON_2RM_AESMC
:
6409 if (!arm_feature(env
, ARM_FEATURE_V8_AES
)
6410 || ((rm
| rd
) & 1)) {
6413 tmp
= tcg_const_i32(rd
);
6414 tmp2
= tcg_const_i32(rm
);
6416 /* Bit 6 is the lowest opcode bit; it distinguishes between
6417 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6419 tmp3
= tcg_const_i32(extract32(insn
, 6, 1));
6421 if (op
== NEON_2RM_AESE
) {
6422 gen_helper_crypto_aese(cpu_env
, tmp
, tmp2
, tmp3
);
6424 gen_helper_crypto_aesmc(cpu_env
, tmp
, tmp2
, tmp3
);
6426 tcg_temp_free_i32(tmp
);
6427 tcg_temp_free_i32(tmp2
);
6428 tcg_temp_free_i32(tmp3
);
6432 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6433 if (neon_2rm_is_float_op(op
)) {
6434 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
6435 neon_reg_offset(rm
, pass
));
6436 TCGV_UNUSED_I32(tmp
);
6438 tmp
= neon_load_reg(rm
, pass
);
6441 case NEON_2RM_VREV32
:
6443 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6444 case 1: gen_swap_half(tmp
); break;
6448 case NEON_2RM_VREV16
:
6453 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
6454 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
6455 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
6461 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
6462 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
6463 case 2: gen_helper_clz(tmp
, tmp
); break;
6468 gen_helper_neon_cnt_u8(tmp
, tmp
);
6471 tcg_gen_not_i32(tmp
, tmp
);
6473 case NEON_2RM_VQABS
:
6476 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
6479 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
6482 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
6487 case NEON_2RM_VQNEG
:
6490 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
6493 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
6496 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
6501 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
6502 tmp2
= tcg_const_i32(0);
6504 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
6505 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
6506 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
6509 tcg_temp_free_i32(tmp2
);
6510 if (op
== NEON_2RM_VCLE0
) {
6511 tcg_gen_not_i32(tmp
, tmp
);
6514 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
6515 tmp2
= tcg_const_i32(0);
6517 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
6518 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
6519 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
6522 tcg_temp_free_i32(tmp2
);
6523 if (op
== NEON_2RM_VCLT0
) {
6524 tcg_gen_not_i32(tmp
, tmp
);
6527 case NEON_2RM_VCEQ0
:
6528 tmp2
= tcg_const_i32(0);
6530 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
6531 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
6532 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
6535 tcg_temp_free_i32(tmp2
);
6539 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
6540 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
6541 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
6546 tmp2
= tcg_const_i32(0);
6547 gen_neon_rsb(size
, tmp
, tmp2
);
6548 tcg_temp_free_i32(tmp2
);
6550 case NEON_2RM_VCGT0_F
:
6552 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6553 tmp2
= tcg_const_i32(0);
6554 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6555 tcg_temp_free_i32(tmp2
);
6556 tcg_temp_free_ptr(fpstatus
);
6559 case NEON_2RM_VCGE0_F
:
6561 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6562 tmp2
= tcg_const_i32(0);
6563 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6564 tcg_temp_free_i32(tmp2
);
6565 tcg_temp_free_ptr(fpstatus
);
6568 case NEON_2RM_VCEQ0_F
:
6570 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6571 tmp2
= tcg_const_i32(0);
6572 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6573 tcg_temp_free_i32(tmp2
);
6574 tcg_temp_free_ptr(fpstatus
);
6577 case NEON_2RM_VCLE0_F
:
6579 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6580 tmp2
= tcg_const_i32(0);
6581 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6582 tcg_temp_free_i32(tmp2
);
6583 tcg_temp_free_ptr(fpstatus
);
6586 case NEON_2RM_VCLT0_F
:
6588 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6589 tmp2
= tcg_const_i32(0);
6590 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6591 tcg_temp_free_i32(tmp2
);
6592 tcg_temp_free_ptr(fpstatus
);
6595 case NEON_2RM_VABS_F
:
6598 case NEON_2RM_VNEG_F
:
6602 tmp2
= neon_load_reg(rd
, pass
);
6603 neon_store_reg(rm
, pass
, tmp2
);
6606 tmp2
= neon_load_reg(rd
, pass
);
6608 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6609 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6612 neon_store_reg(rm
, pass
, tmp2
);
6614 case NEON_2RM_VRINTN
:
6615 case NEON_2RM_VRINTA
:
6616 case NEON_2RM_VRINTM
:
6617 case NEON_2RM_VRINTP
:
6618 case NEON_2RM_VRINTZ
:
6621 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6624 if (op
== NEON_2RM_VRINTZ
) {
6625 rmode
= FPROUNDING_ZERO
;
6627 rmode
= fp_decode_rm
[((op
& 0x6) >> 1) ^ 1];
6630 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6631 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6633 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpstatus
);
6634 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6636 tcg_temp_free_ptr(fpstatus
);
6637 tcg_temp_free_i32(tcg_rmode
);
6640 case NEON_2RM_VRINTX
:
6642 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6643 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpstatus
);
6644 tcg_temp_free_ptr(fpstatus
);
6647 case NEON_2RM_VCVTAU
:
6648 case NEON_2RM_VCVTAS
:
6649 case NEON_2RM_VCVTNU
:
6650 case NEON_2RM_VCVTNS
:
6651 case NEON_2RM_VCVTPU
:
6652 case NEON_2RM_VCVTPS
:
6653 case NEON_2RM_VCVTMU
:
6654 case NEON_2RM_VCVTMS
:
6656 bool is_signed
= !extract32(insn
, 7, 1);
6657 TCGv_ptr fpst
= get_fpstatus_ptr(1);
6658 TCGv_i32 tcg_rmode
, tcg_shift
;
6659 int rmode
= fp_decode_rm
[extract32(insn
, 8, 2)];
6661 tcg_shift
= tcg_const_i32(0);
6662 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6663 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6667 gen_helper_vfp_tosls(cpu_F0s
, cpu_F0s
,
6670 gen_helper_vfp_touls(cpu_F0s
, cpu_F0s
,
6674 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6676 tcg_temp_free_i32(tcg_rmode
);
6677 tcg_temp_free_i32(tcg_shift
);
6678 tcg_temp_free_ptr(fpst
);
6681 case NEON_2RM_VRECPE
:
6682 gen_helper_recpe_u32(tmp
, tmp
, cpu_env
);
6684 case NEON_2RM_VRSQRTE
:
6685 gen_helper_rsqrte_u32(tmp
, tmp
, cpu_env
);
6687 case NEON_2RM_VRECPE_F
:
6688 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6690 case NEON_2RM_VRSQRTE_F
:
6691 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
6693 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6696 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6699 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6700 gen_vfp_tosiz(0, 1);
6702 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6703 gen_vfp_touiz(0, 1);
6706 /* Reserved op values were caught by the
6707 * neon_2rm_sizes[] check earlier.
6711 if (neon_2rm_is_float_op(op
)) {
6712 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
6713 neon_reg_offset(rd
, pass
));
6715 neon_store_reg(rd
, pass
, tmp
);
6720 } else if ((insn
& (1 << 10)) == 0) {
6722 int n
= ((insn
>> 8) & 3) + 1;
6723 if ((rn
+ n
) > 32) {
6724 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6725 * helper function running off the end of the register file.
6730 if (insn
& (1 << 6)) {
6731 tmp
= neon_load_reg(rd
, 0);
6733 tmp
= tcg_temp_new_i32();
6734 tcg_gen_movi_i32(tmp
, 0);
6736 tmp2
= neon_load_reg(rm
, 0);
6737 tmp4
= tcg_const_i32(rn
);
6738 tmp5
= tcg_const_i32(n
);
6739 gen_helper_neon_tbl(tmp2
, cpu_env
, tmp2
, tmp
, tmp4
, tmp5
);
6740 tcg_temp_free_i32(tmp
);
6741 if (insn
& (1 << 6)) {
6742 tmp
= neon_load_reg(rd
, 1);
6744 tmp
= tcg_temp_new_i32();
6745 tcg_gen_movi_i32(tmp
, 0);
6747 tmp3
= neon_load_reg(rm
, 1);
6748 gen_helper_neon_tbl(tmp3
, cpu_env
, tmp3
, tmp
, tmp4
, tmp5
);
6749 tcg_temp_free_i32(tmp5
);
6750 tcg_temp_free_i32(tmp4
);
6751 neon_store_reg(rd
, 0, tmp2
);
6752 neon_store_reg(rd
, 1, tmp3
);
6753 tcg_temp_free_i32(tmp
);
6754 } else if ((insn
& 0x380) == 0) {
6756 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6759 if (insn
& (1 << 19)) {
6760 tmp
= neon_load_reg(rm
, 1);
6762 tmp
= neon_load_reg(rm
, 0);
6764 if (insn
& (1 << 16)) {
6765 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
6766 } else if (insn
& (1 << 17)) {
6767 if ((insn
>> 18) & 1)
6768 gen_neon_dup_high16(tmp
);
6770 gen_neon_dup_low16(tmp
);
6772 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6773 tmp2
= tcg_temp_new_i32();
6774 tcg_gen_mov_i32(tmp2
, tmp
);
6775 neon_store_reg(rd
, pass
, tmp2
);
6777 tcg_temp_free_i32(tmp
);
6786 static int disas_coproc_insn(CPUARMState
* env
, DisasContext
*s
, uint32_t insn
)
6788 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
6789 const ARMCPRegInfo
*ri
;
6791 cpnum
= (insn
>> 8) & 0xf;
6792 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
6793 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
6796 /* First check for coprocessor space used for actual instructions */
6800 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
6801 return disas_iwmmxt_insn(env
, s
, insn
);
6802 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6803 return disas_dsp_insn(env
, s
, insn
);
6810 /* Otherwise treat as a generic register access */
6811 is64
= (insn
& (1 << 25)) == 0;
6812 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
6820 opc1
= (insn
>> 4) & 0xf;
6822 rt2
= (insn
>> 16) & 0xf;
6824 crn
= (insn
>> 16) & 0xf;
6825 opc1
= (insn
>> 21) & 7;
6826 opc2
= (insn
>> 5) & 7;
6829 isread
= (insn
>> 20) & 1;
6830 rt
= (insn
>> 12) & 0xf;
6832 ri
= get_arm_cp_reginfo(s
->cp_regs
,
6833 ENCODE_CP_REG(cpnum
, is64
, crn
, crm
, opc1
, opc2
));
6835 /* Check access permissions */
6836 if (!cp_access_ok(s
->current_pl
, ri
, isread
)) {
6840 /* Handle special cases first */
6841 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
6848 gen_set_pc_im(s
, s
->pc
);
6849 s
->is_jmp
= DISAS_WFI
;
6855 if (use_icount
&& (ri
->type
& ARM_CP_IO
)) {
6864 if (ri
->type
& ARM_CP_CONST
) {
6865 tmp64
= tcg_const_i64(ri
->resetvalue
);
6866 } else if (ri
->readfn
) {
6868 gen_set_pc_im(s
, s
->pc
);
6869 tmp64
= tcg_temp_new_i64();
6870 tmpptr
= tcg_const_ptr(ri
);
6871 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
6872 tcg_temp_free_ptr(tmpptr
);
6874 tmp64
= tcg_temp_new_i64();
6875 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6877 tmp
= tcg_temp_new_i32();
6878 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6879 store_reg(s
, rt
, tmp
);
6880 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6881 tmp
= tcg_temp_new_i32();
6882 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6883 tcg_temp_free_i64(tmp64
);
6884 store_reg(s
, rt2
, tmp
);
6887 if (ri
->type
& ARM_CP_CONST
) {
6888 tmp
= tcg_const_i32(ri
->resetvalue
);
6889 } else if (ri
->readfn
) {
6891 gen_set_pc_im(s
, s
->pc
);
6892 tmp
= tcg_temp_new_i32();
6893 tmpptr
= tcg_const_ptr(ri
);
6894 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
6895 tcg_temp_free_ptr(tmpptr
);
6897 tmp
= load_cpu_offset(ri
->fieldoffset
);
6900 /* Destination register of r15 for 32 bit loads sets
6901 * the condition codes from the high 4 bits of the value
6904 tcg_temp_free_i32(tmp
);
6906 store_reg(s
, rt
, tmp
);
6911 if (ri
->type
& ARM_CP_CONST
) {
6912 /* If not forbidden by access permissions, treat as WI */
6917 TCGv_i32 tmplo
, tmphi
;
6918 TCGv_i64 tmp64
= tcg_temp_new_i64();
6919 tmplo
= load_reg(s
, rt
);
6920 tmphi
= load_reg(s
, rt2
);
6921 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
6922 tcg_temp_free_i32(tmplo
);
6923 tcg_temp_free_i32(tmphi
);
6925 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
6926 gen_set_pc_im(s
, s
->pc
);
6927 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
6928 tcg_temp_free_ptr(tmpptr
);
6930 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6932 tcg_temp_free_i64(tmp64
);
6937 gen_set_pc_im(s
, s
->pc
);
6938 tmp
= load_reg(s
, rt
);
6939 tmpptr
= tcg_const_ptr(ri
);
6940 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
6941 tcg_temp_free_ptr(tmpptr
);
6942 tcg_temp_free_i32(tmp
);
6944 TCGv_i32 tmp
= load_reg(s
, rt
);
6945 store_cpu_offset(tmp
, ri
->fieldoffset
);
6950 if (use_icount
&& (ri
->type
& ARM_CP_IO
)) {
6951 /* I/O operations must end the TB here (whether read or write) */
6954 } else if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
6955 /* We default to ending the TB on a coprocessor register write,
6956 * but allow this to be suppressed by the register definition
6957 * (usually only necessary to work around guest bugs).
6969 /* Store a 64-bit value to a register pair. Clobbers val. */
6970 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
6973 tmp
= tcg_temp_new_i32();
6974 tcg_gen_trunc_i64_i32(tmp
, val
);
6975 store_reg(s
, rlow
, tmp
);
6976 tmp
= tcg_temp_new_i32();
6977 tcg_gen_shri_i64(val
, val
, 32);
6978 tcg_gen_trunc_i64_i32(tmp
, val
);
6979 store_reg(s
, rhigh
, tmp
);
6982 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6983 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
6988 /* Load value and extend to 64 bits. */
6989 tmp
= tcg_temp_new_i64();
6990 tmp2
= load_reg(s
, rlow
);
6991 tcg_gen_extu_i32_i64(tmp
, tmp2
);
6992 tcg_temp_free_i32(tmp2
);
6993 tcg_gen_add_i64(val
, val
, tmp
);
6994 tcg_temp_free_i64(tmp
);
6997 /* load and add a 64-bit value from a register pair. */
6998 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
7004 /* Load 64-bit value rd:rn. */
7005 tmpl
= load_reg(s
, rlow
);
7006 tmph
= load_reg(s
, rhigh
);
7007 tmp
= tcg_temp_new_i64();
7008 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
7009 tcg_temp_free_i32(tmpl
);
7010 tcg_temp_free_i32(tmph
);
7011 tcg_gen_add_i64(val
, val
, tmp
);
7012 tcg_temp_free_i64(tmp
);
7015 /* Set N and Z flags from hi|lo. */
7016 static void gen_logicq_cc(TCGv_i32 lo
, TCGv_i32 hi
)
7018 tcg_gen_mov_i32(cpu_NF
, hi
);
7019 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
7022 /* Load/Store exclusive instructions are implemented by remembering
7023 the value/address loaded, and seeing if these are the same
7024 when the store is performed. This should be sufficient to implement
7025 the architecturally mandated semantics, and avoids having to monitor
7028 In system emulation mode only one CPU will be running at once, so
7029 this sequence is effectively atomic. In user emulation mode we
7030 throw an exception and handle the atomic operation elsewhere. */
7031 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
7032 TCGv_i32 addr
, int size
)
7034 TCGv_i32 tmp
= tcg_temp_new_i32();
7038 gen_aa32_ld8u(tmp
, addr
, IS_USER(s
));
7041 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
7045 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
7052 TCGv_i32 tmp2
= tcg_temp_new_i32();
7053 TCGv_i32 tmp3
= tcg_temp_new_i32();
7055 tcg_gen_addi_i32(tmp2
, addr
, 4);
7056 gen_aa32_ld32u(tmp3
, tmp2
, IS_USER(s
));
7057 tcg_temp_free_i32(tmp2
);
7058 tcg_gen_concat_i32_i64(cpu_exclusive_val
, tmp
, tmp3
);
7059 store_reg(s
, rt2
, tmp3
);
7061 tcg_gen_extu_i32_i64(cpu_exclusive_val
, tmp
);
7064 store_reg(s
, rt
, tmp
);
7065 tcg_gen_extu_i32_i64(cpu_exclusive_addr
, addr
);
7068 static void gen_clrex(DisasContext
*s
)
7070 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7073 #ifdef CONFIG_USER_ONLY
7074 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7075 TCGv_i32 addr
, int size
)
7077 tcg_gen_extu_i32_i64(cpu_exclusive_test
, addr
);
7078 tcg_gen_movi_i32(cpu_exclusive_info
,
7079 size
| (rd
<< 4) | (rt
<< 8) | (rt2
<< 12));
7080 gen_exception_insn(s
, 4, EXCP_STREX
);
7083 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7084 TCGv_i32 addr
, int size
)
7087 TCGv_i64 val64
, extaddr
;
7091 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7097 fail_label
= gen_new_label();
7098 done_label
= gen_new_label();
7099 extaddr
= tcg_temp_new_i64();
7100 tcg_gen_extu_i32_i64(extaddr
, addr
);
7101 tcg_gen_brcond_i64(TCG_COND_NE
, extaddr
, cpu_exclusive_addr
, fail_label
);
7102 tcg_temp_free_i64(extaddr
);
7104 tmp
= tcg_temp_new_i32();
7107 gen_aa32_ld8u(tmp
, addr
, IS_USER(s
));
7110 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
7114 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
7120 val64
= tcg_temp_new_i64();
7122 TCGv_i32 tmp2
= tcg_temp_new_i32();
7123 TCGv_i32 tmp3
= tcg_temp_new_i32();
7124 tcg_gen_addi_i32(tmp2
, addr
, 4);
7125 gen_aa32_ld32u(tmp3
, tmp2
, IS_USER(s
));
7126 tcg_temp_free_i32(tmp2
);
7127 tcg_gen_concat_i32_i64(val64
, tmp
, tmp3
);
7128 tcg_temp_free_i32(tmp3
);
7130 tcg_gen_extu_i32_i64(val64
, tmp
);
7132 tcg_temp_free_i32(tmp
);
7134 tcg_gen_brcond_i64(TCG_COND_NE
, val64
, cpu_exclusive_val
, fail_label
);
7135 tcg_temp_free_i64(val64
);
7137 tmp
= load_reg(s
, rt
);
7140 gen_aa32_st8(tmp
, addr
, IS_USER(s
));
7143 gen_aa32_st16(tmp
, addr
, IS_USER(s
));
7147 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
7152 tcg_temp_free_i32(tmp
);
7154 tcg_gen_addi_i32(addr
, addr
, 4);
7155 tmp
= load_reg(s
, rt2
);
7156 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
7157 tcg_temp_free_i32(tmp
);
7159 tcg_gen_movi_i32(cpu_R
[rd
], 0);
7160 tcg_gen_br(done_label
);
7161 gen_set_label(fail_label
);
7162 tcg_gen_movi_i32(cpu_R
[rd
], 1);
7163 gen_set_label(done_label
);
7164 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7171 * @mode: mode field from insn (which stack to store to)
7172 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7173 * @writeback: true if writeback bit set
7175 * Generate code for the SRS (Store Return State) insn.
7177 static void gen_srs(DisasContext
*s
,
7178 uint32_t mode
, uint32_t amode
, bool writeback
)
7181 TCGv_i32 addr
= tcg_temp_new_i32();
7182 TCGv_i32 tmp
= tcg_const_i32(mode
);
7183 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7184 tcg_temp_free_i32(tmp
);
7201 tcg_gen_addi_i32(addr
, addr
, offset
);
7202 tmp
= load_reg(s
, 14);
7203 gen_aa32_st32(tmp
, addr
, 0);
7204 tcg_temp_free_i32(tmp
);
7205 tmp
= load_cpu_field(spsr
);
7206 tcg_gen_addi_i32(addr
, addr
, 4);
7207 gen_aa32_st32(tmp
, addr
, 0);
7208 tcg_temp_free_i32(tmp
);
7226 tcg_gen_addi_i32(addr
, addr
, offset
);
7227 tmp
= tcg_const_i32(mode
);
7228 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7229 tcg_temp_free_i32(tmp
);
7231 tcg_temp_free_i32(addr
);
7234 static void disas_arm_insn(CPUARMState
* env
, DisasContext
*s
)
7236 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
7243 insn
= arm_ldl_code(env
, s
->pc
, s
->bswap_code
);
7246 /* M variants do not implement ARM mode. */
7251 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7252 * choose to UNDEF. In ARMv5 and above the space is used
7253 * for miscellaneous unconditional instructions.
7257 /* Unconditional instructions. */
7258 if (((insn
>> 25) & 7) == 1) {
7259 /* NEON Data processing. */
7260 if (!arm_feature(env
, ARM_FEATURE_NEON
))
7263 if (disas_neon_data_insn(env
, s
, insn
))
7267 if ((insn
& 0x0f100000) == 0x04000000) {
7268 /* NEON load/store. */
7269 if (!arm_feature(env
, ARM_FEATURE_NEON
))
7272 if (disas_neon_ls_insn(env
, s
, insn
))
7276 if ((insn
& 0x0f000e10) == 0x0e000a00) {
7278 if (disas_vfp_insn(env
, s
, insn
)) {
7283 if (((insn
& 0x0f30f000) == 0x0510f000) ||
7284 ((insn
& 0x0f30f010) == 0x0710f000)) {
7285 if ((insn
& (1 << 22)) == 0) {
7287 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
7291 /* Otherwise PLD; v5TE+ */
7295 if (((insn
& 0x0f70f000) == 0x0450f000) ||
7296 ((insn
& 0x0f70f010) == 0x0650f000)) {
7298 return; /* PLI; V7 */
7300 if (((insn
& 0x0f700000) == 0x04100000) ||
7301 ((insn
& 0x0f700010) == 0x06100000)) {
7302 if (!arm_feature(env
, ARM_FEATURE_V7MP
)) {
7305 return; /* v7MP: Unallocated memory hint: must NOP */
7308 if ((insn
& 0x0ffffdff) == 0x01010000) {
7311 if (((insn
>> 9) & 1) != s
->bswap_code
) {
7312 /* Dynamic endianness switching not implemented. */
7313 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented setend\n");
7317 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
7318 switch ((insn
>> 4) & 0xf) {
7327 /* We don't emulate caches so these are a no-op. */
7332 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
7338 gen_srs(s
, (insn
& 0x1f), (insn
>> 23) & 3, insn
& (1 << 21));
7340 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
7346 rn
= (insn
>> 16) & 0xf;
7347 addr
= load_reg(s
, rn
);
7348 i
= (insn
>> 23) & 3;
7350 case 0: offset
= -4; break; /* DA */
7351 case 1: offset
= 0; break; /* IA */
7352 case 2: offset
= -8; break; /* DB */
7353 case 3: offset
= 4; break; /* IB */
7357 tcg_gen_addi_i32(addr
, addr
, offset
);
7358 /* Load PC into tmp and CPSR into tmp2. */
7359 tmp
= tcg_temp_new_i32();
7360 gen_aa32_ld32u(tmp
, addr
, 0);
7361 tcg_gen_addi_i32(addr
, addr
, 4);
7362 tmp2
= tcg_temp_new_i32();
7363 gen_aa32_ld32u(tmp2
, addr
, 0);
7364 if (insn
& (1 << 21)) {
7365 /* Base writeback. */
7367 case 0: offset
= -8; break;
7368 case 1: offset
= 4; break;
7369 case 2: offset
= -4; break;
7370 case 3: offset
= 0; break;
7374 tcg_gen_addi_i32(addr
, addr
, offset
);
7375 store_reg(s
, rn
, addr
);
7377 tcg_temp_free_i32(addr
);
7379 gen_rfe(s
, tmp
, tmp2
);
7381 } else if ((insn
& 0x0e000000) == 0x0a000000) {
7382 /* branch link and change to thumb (blx <offset>) */
7385 val
= (uint32_t)s
->pc
;
7386 tmp
= tcg_temp_new_i32();
7387 tcg_gen_movi_i32(tmp
, val
);
7388 store_reg(s
, 14, tmp
);
7389 /* Sign-extend the 24-bit offset */
7390 offset
= (((int32_t)insn
) << 8) >> 8;
7391 /* offset * 4 + bit24 * 2 + (thumb bit) */
7392 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
7393 /* pipeline offset */
7395 /* protected by ARCH(5); above, near the start of uncond block */
7398 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
7399 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
7400 /* iWMMXt register transfer. */
7401 if (env
->cp15
.c15_cpar
& (1 << 1))
7402 if (!disas_iwmmxt_insn(env
, s
, insn
))
7405 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
7406 /* Coprocessor double register transfer. */
7408 } else if ((insn
& 0x0f000010) == 0x0e000010) {
7409 /* Additional coprocessor register transfer. */
7410 } else if ((insn
& 0x0ff10020) == 0x01000000) {
7413 /* cps (privileged) */
7417 if (insn
& (1 << 19)) {
7418 if (insn
& (1 << 8))
7420 if (insn
& (1 << 7))
7422 if (insn
& (1 << 6))
7424 if (insn
& (1 << 18))
7427 if (insn
& (1 << 17)) {
7429 val
|= (insn
& 0x1f);
7432 gen_set_psr_im(s
, mask
, 0, val
);
7439 /* if not always execute, we generate a conditional jump to
7441 s
->condlabel
= gen_new_label();
7442 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
7445 if ((insn
& 0x0f900000) == 0x03000000) {
7446 if ((insn
& (1 << 21)) == 0) {
7448 rd
= (insn
>> 12) & 0xf;
7449 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
7450 if ((insn
& (1 << 22)) == 0) {
7452 tmp
= tcg_temp_new_i32();
7453 tcg_gen_movi_i32(tmp
, val
);
7456 tmp
= load_reg(s
, rd
);
7457 tcg_gen_ext16u_i32(tmp
, tmp
);
7458 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
7460 store_reg(s
, rd
, tmp
);
7462 if (((insn
>> 12) & 0xf) != 0xf)
7464 if (((insn
>> 16) & 0xf) == 0) {
7465 gen_nop_hint(s
, insn
& 0xff);
7467 /* CPSR = immediate */
7469 shift
= ((insn
>> 8) & 0xf) * 2;
7471 val
= (val
>> shift
) | (val
<< (32 - shift
));
7472 i
= ((insn
& (1 << 22)) != 0);
7473 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
7477 } else if ((insn
& 0x0f900000) == 0x01000000
7478 && (insn
& 0x00000090) != 0x00000090) {
7479 /* miscellaneous instructions */
7480 op1
= (insn
>> 21) & 3;
7481 sh
= (insn
>> 4) & 0xf;
7484 case 0x0: /* move program status register */
7487 tmp
= load_reg(s
, rm
);
7488 i
= ((op1
& 2) != 0);
7489 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
7493 rd
= (insn
>> 12) & 0xf;
7497 tmp
= load_cpu_field(spsr
);
7499 tmp
= tcg_temp_new_i32();
7500 gen_helper_cpsr_read(tmp
, cpu_env
);
7502 store_reg(s
, rd
, tmp
);
7507 /* branch/exchange thumb (bx). */
7509 tmp
= load_reg(s
, rm
);
7511 } else if (op1
== 3) {
7514 rd
= (insn
>> 12) & 0xf;
7515 tmp
= load_reg(s
, rm
);
7516 gen_helper_clz(tmp
, tmp
);
7517 store_reg(s
, rd
, tmp
);
7525 /* Trivial implementation equivalent to bx. */
7526 tmp
= load_reg(s
, rm
);
7537 /* branch link/exchange thumb (blx) */
7538 tmp
= load_reg(s
, rm
);
7539 tmp2
= tcg_temp_new_i32();
7540 tcg_gen_movi_i32(tmp2
, s
->pc
);
7541 store_reg(s
, 14, tmp2
);
7544 case 0x5: /* saturating add/subtract */
7546 rd
= (insn
>> 12) & 0xf;
7547 rn
= (insn
>> 16) & 0xf;
7548 tmp
= load_reg(s
, rm
);
7549 tmp2
= load_reg(s
, rn
);
7551 gen_helper_double_saturate(tmp2
, cpu_env
, tmp2
);
7553 gen_helper_sub_saturate(tmp
, cpu_env
, tmp
, tmp2
);
7555 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
7556 tcg_temp_free_i32(tmp2
);
7557 store_reg(s
, rd
, tmp
);
7560 /* SMC instruction (op1 == 3)
7561 and undefined instructions (op1 == 0 || op1 == 2)
7568 gen_exception_insn(s
, 4, EXCP_BKPT
);
7570 case 0x8: /* signed multiply */
7575 rs
= (insn
>> 8) & 0xf;
7576 rn
= (insn
>> 12) & 0xf;
7577 rd
= (insn
>> 16) & 0xf;
7579 /* (32 * 16) >> 16 */
7580 tmp
= load_reg(s
, rm
);
7581 tmp2
= load_reg(s
, rs
);
7583 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7586 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7587 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7588 tmp
= tcg_temp_new_i32();
7589 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7590 tcg_temp_free_i64(tmp64
);
7591 if ((sh
& 2) == 0) {
7592 tmp2
= load_reg(s
, rn
);
7593 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
7594 tcg_temp_free_i32(tmp2
);
7596 store_reg(s
, rd
, tmp
);
7599 tmp
= load_reg(s
, rm
);
7600 tmp2
= load_reg(s
, rs
);
7601 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
7602 tcg_temp_free_i32(tmp2
);
7604 tmp64
= tcg_temp_new_i64();
7605 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7606 tcg_temp_free_i32(tmp
);
7607 gen_addq(s
, tmp64
, rn
, rd
);
7608 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7609 tcg_temp_free_i64(tmp64
);
7612 tmp2
= load_reg(s
, rn
);
7613 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
7614 tcg_temp_free_i32(tmp2
);
7616 store_reg(s
, rd
, tmp
);
7623 } else if (((insn
& 0x0e000000) == 0 &&
7624 (insn
& 0x00000090) != 0x90) ||
7625 ((insn
& 0x0e000000) == (1 << 25))) {
7626 int set_cc
, logic_cc
, shiftop
;
7628 op1
= (insn
>> 21) & 0xf;
7629 set_cc
= (insn
>> 20) & 1;
7630 logic_cc
= table_logic_cc
[op1
] & set_cc
;
7632 /* data processing instruction */
7633 if (insn
& (1 << 25)) {
7634 /* immediate operand */
7636 shift
= ((insn
>> 8) & 0xf) * 2;
7638 val
= (val
>> shift
) | (val
<< (32 - shift
));
7640 tmp2
= tcg_temp_new_i32();
7641 tcg_gen_movi_i32(tmp2
, val
);
7642 if (logic_cc
&& shift
) {
7643 gen_set_CF_bit31(tmp2
);
7648 tmp2
= load_reg(s
, rm
);
7649 shiftop
= (insn
>> 5) & 3;
7650 if (!(insn
& (1 << 4))) {
7651 shift
= (insn
>> 7) & 0x1f;
7652 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
7654 rs
= (insn
>> 8) & 0xf;
7655 tmp
= load_reg(s
, rs
);
7656 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
7659 if (op1
!= 0x0f && op1
!= 0x0d) {
7660 rn
= (insn
>> 16) & 0xf;
7661 tmp
= load_reg(s
, rn
);
7663 TCGV_UNUSED_I32(tmp
);
7665 rd
= (insn
>> 12) & 0xf;
7668 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7672 store_reg_bx(env
, s
, rd
, tmp
);
7675 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7679 store_reg_bx(env
, s
, rd
, tmp
);
7682 if (set_cc
&& rd
== 15) {
7683 /* SUBS r15, ... is used for exception return. */
7687 gen_sub_CC(tmp
, tmp
, tmp2
);
7688 gen_exception_return(s
, tmp
);
7691 gen_sub_CC(tmp
, tmp
, tmp2
);
7693 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7695 store_reg_bx(env
, s
, rd
, tmp
);
7700 gen_sub_CC(tmp
, tmp2
, tmp
);
7702 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7704 store_reg_bx(env
, s
, rd
, tmp
);
7708 gen_add_CC(tmp
, tmp
, tmp2
);
7710 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7712 store_reg_bx(env
, s
, rd
, tmp
);
7716 gen_adc_CC(tmp
, tmp
, tmp2
);
7718 gen_add_carry(tmp
, tmp
, tmp2
);
7720 store_reg_bx(env
, s
, rd
, tmp
);
7724 gen_sbc_CC(tmp
, tmp
, tmp2
);
7726 gen_sub_carry(tmp
, tmp
, tmp2
);
7728 store_reg_bx(env
, s
, rd
, tmp
);
7732 gen_sbc_CC(tmp
, tmp2
, tmp
);
7734 gen_sub_carry(tmp
, tmp2
, tmp
);
7736 store_reg_bx(env
, s
, rd
, tmp
);
7740 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
7743 tcg_temp_free_i32(tmp
);
7747 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
7750 tcg_temp_free_i32(tmp
);
7754 gen_sub_CC(tmp
, tmp
, tmp2
);
7756 tcg_temp_free_i32(tmp
);
7760 gen_add_CC(tmp
, tmp
, tmp2
);
7762 tcg_temp_free_i32(tmp
);
7765 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
7769 store_reg_bx(env
, s
, rd
, tmp
);
7772 if (logic_cc
&& rd
== 15) {
7773 /* MOVS r15, ... is used for exception return. */
7777 gen_exception_return(s
, tmp2
);
7782 store_reg_bx(env
, s
, rd
, tmp2
);
7786 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
7790 store_reg_bx(env
, s
, rd
, tmp
);
7794 tcg_gen_not_i32(tmp2
, tmp2
);
7798 store_reg_bx(env
, s
, rd
, tmp2
);
7801 if (op1
!= 0x0f && op1
!= 0x0d) {
7802 tcg_temp_free_i32(tmp2
);
7805 /* other instructions */
7806 op1
= (insn
>> 24) & 0xf;
7810 /* multiplies, extra load/stores */
7811 sh
= (insn
>> 5) & 3;
7814 rd
= (insn
>> 16) & 0xf;
7815 rn
= (insn
>> 12) & 0xf;
7816 rs
= (insn
>> 8) & 0xf;
7818 op1
= (insn
>> 20) & 0xf;
7820 case 0: case 1: case 2: case 3: case 6:
7822 tmp
= load_reg(s
, rs
);
7823 tmp2
= load_reg(s
, rm
);
7824 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7825 tcg_temp_free_i32(tmp2
);
7826 if (insn
& (1 << 22)) {
7827 /* Subtract (mls) */
7829 tmp2
= load_reg(s
, rn
);
7830 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7831 tcg_temp_free_i32(tmp2
);
7832 } else if (insn
& (1 << 21)) {
7834 tmp2
= load_reg(s
, rn
);
7835 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7836 tcg_temp_free_i32(tmp2
);
7838 if (insn
& (1 << 20))
7840 store_reg(s
, rd
, tmp
);
7843 /* 64 bit mul double accumulate (UMAAL) */
7845 tmp
= load_reg(s
, rs
);
7846 tmp2
= load_reg(s
, rm
);
7847 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7848 gen_addq_lo(s
, tmp64
, rn
);
7849 gen_addq_lo(s
, tmp64
, rd
);
7850 gen_storeq_reg(s
, rn
, rd
, tmp64
);
7851 tcg_temp_free_i64(tmp64
);
7853 case 8: case 9: case 10: case 11:
7854 case 12: case 13: case 14: case 15:
7855 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7856 tmp
= load_reg(s
, rs
);
7857 tmp2
= load_reg(s
, rm
);
7858 if (insn
& (1 << 22)) {
7859 tcg_gen_muls2_i32(tmp
, tmp2
, tmp
, tmp2
);
7861 tcg_gen_mulu2_i32(tmp
, tmp2
, tmp
, tmp2
);
7863 if (insn
& (1 << 21)) { /* mult accumulate */
7864 TCGv_i32 al
= load_reg(s
, rn
);
7865 TCGv_i32 ah
= load_reg(s
, rd
);
7866 tcg_gen_add2_i32(tmp
, tmp2
, tmp
, tmp2
, al
, ah
);
7867 tcg_temp_free_i32(al
);
7868 tcg_temp_free_i32(ah
);
7870 if (insn
& (1 << 20)) {
7871 gen_logicq_cc(tmp
, tmp2
);
7873 store_reg(s
, rn
, tmp
);
7874 store_reg(s
, rd
, tmp2
);
7880 rn
= (insn
>> 16) & 0xf;
7881 rd
= (insn
>> 12) & 0xf;
7882 if (insn
& (1 << 23)) {
7883 /* load/store exclusive */
7884 int op2
= (insn
>> 8) & 3;
7885 op1
= (insn
>> 21) & 0x3;
7888 case 0: /* lda/stl */
7894 case 1: /* reserved */
7896 case 2: /* ldaex/stlex */
7899 case 3: /* ldrex/strex */
7908 addr
= tcg_temp_local_new_i32();
7909 load_reg_var(s
, addr
, rn
);
7911 /* Since the emulation does not have barriers,
7912 the acquire/release semantics need no special
7915 if (insn
& (1 << 20)) {
7916 tmp
= tcg_temp_new_i32();
7919 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
7922 gen_aa32_ld8u(tmp
, addr
, IS_USER(s
));
7925 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
7930 store_reg(s
, rd
, tmp
);
7933 tmp
= load_reg(s
, rm
);
7936 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
7939 gen_aa32_st8(tmp
, addr
, IS_USER(s
));
7942 gen_aa32_st16(tmp
, addr
, IS_USER(s
));
7947 tcg_temp_free_i32(tmp
);
7949 } else if (insn
& (1 << 20)) {
7952 gen_load_exclusive(s
, rd
, 15, addr
, 2);
7954 case 1: /* ldrexd */
7955 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
7957 case 2: /* ldrexb */
7958 gen_load_exclusive(s
, rd
, 15, addr
, 0);
7960 case 3: /* ldrexh */
7961 gen_load_exclusive(s
, rd
, 15, addr
, 1);
7970 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
7972 case 1: /* strexd */
7973 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
7975 case 2: /* strexb */
7976 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
7978 case 3: /* strexh */
7979 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
7985 tcg_temp_free_i32(addr
);
7987 /* SWP instruction */
7990 /* ??? This is not really atomic. However we know
7991 we never have multiple CPUs running in parallel,
7992 so it is good enough. */
7993 addr
= load_reg(s
, rn
);
7994 tmp
= load_reg(s
, rm
);
7995 tmp2
= tcg_temp_new_i32();
7996 if (insn
& (1 << 22)) {
7997 gen_aa32_ld8u(tmp2
, addr
, IS_USER(s
));
7998 gen_aa32_st8(tmp
, addr
, IS_USER(s
));
8000 gen_aa32_ld32u(tmp2
, addr
, IS_USER(s
));
8001 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
8003 tcg_temp_free_i32(tmp
);
8004 tcg_temp_free_i32(addr
);
8005 store_reg(s
, rd
, tmp2
);
8011 /* Misc load/store */
8012 rn
= (insn
>> 16) & 0xf;
8013 rd
= (insn
>> 12) & 0xf;
8014 addr
= load_reg(s
, rn
);
8015 if (insn
& (1 << 24))
8016 gen_add_datah_offset(s
, insn
, 0, addr
);
8018 if (insn
& (1 << 20)) {
8020 tmp
= tcg_temp_new_i32();
8023 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
8026 gen_aa32_ld8s(tmp
, addr
, IS_USER(s
));
8030 gen_aa32_ld16s(tmp
, addr
, IS_USER(s
));
8034 } else if (sh
& 2) {
8039 tmp
= load_reg(s
, rd
);
8040 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
8041 tcg_temp_free_i32(tmp
);
8042 tcg_gen_addi_i32(addr
, addr
, 4);
8043 tmp
= load_reg(s
, rd
+ 1);
8044 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
8045 tcg_temp_free_i32(tmp
);
8049 tmp
= tcg_temp_new_i32();
8050 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
8051 store_reg(s
, rd
, tmp
);
8052 tcg_gen_addi_i32(addr
, addr
, 4);
8053 tmp
= tcg_temp_new_i32();
8054 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
8058 address_offset
= -4;
8061 tmp
= load_reg(s
, rd
);
8062 gen_aa32_st16(tmp
, addr
, IS_USER(s
));
8063 tcg_temp_free_i32(tmp
);
8066 /* Perform base writeback before the loaded value to
8067 ensure correct behavior with overlapping index registers.
8068 ldrd with base writeback is is undefined if the
8069 destination and index registers overlap. */
8070 if (!(insn
& (1 << 24))) {
8071 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
8072 store_reg(s
, rn
, addr
);
8073 } else if (insn
& (1 << 21)) {
8075 tcg_gen_addi_i32(addr
, addr
, address_offset
);
8076 store_reg(s
, rn
, addr
);
8078 tcg_temp_free_i32(addr
);
8081 /* Complete the load. */
8082 store_reg(s
, rd
, tmp
);
8091 if (insn
& (1 << 4)) {
8093 /* Armv6 Media instructions. */
8095 rn
= (insn
>> 16) & 0xf;
8096 rd
= (insn
>> 12) & 0xf;
8097 rs
= (insn
>> 8) & 0xf;
8098 switch ((insn
>> 23) & 3) {
8099 case 0: /* Parallel add/subtract. */
8100 op1
= (insn
>> 20) & 7;
8101 tmp
= load_reg(s
, rn
);
8102 tmp2
= load_reg(s
, rm
);
8103 sh
= (insn
>> 5) & 7;
8104 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
8106 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
8107 tcg_temp_free_i32(tmp2
);
8108 store_reg(s
, rd
, tmp
);
8111 if ((insn
& 0x00700020) == 0) {
8112 /* Halfword pack. */
8113 tmp
= load_reg(s
, rn
);
8114 tmp2
= load_reg(s
, rm
);
8115 shift
= (insn
>> 7) & 0x1f;
8116 if (insn
& (1 << 6)) {
8120 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8121 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8122 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8126 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8127 tcg_gen_ext16u_i32(tmp
, tmp
);
8128 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8130 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8131 tcg_temp_free_i32(tmp2
);
8132 store_reg(s
, rd
, tmp
);
8133 } else if ((insn
& 0x00200020) == 0x00200000) {
8135 tmp
= load_reg(s
, rm
);
8136 shift
= (insn
>> 7) & 0x1f;
8137 if (insn
& (1 << 6)) {
8140 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8142 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8144 sh
= (insn
>> 16) & 0x1f;
8145 tmp2
= tcg_const_i32(sh
);
8146 if (insn
& (1 << 22))
8147 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
8149 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
8150 tcg_temp_free_i32(tmp2
);
8151 store_reg(s
, rd
, tmp
);
8152 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
8154 tmp
= load_reg(s
, rm
);
8155 sh
= (insn
>> 16) & 0x1f;
8156 tmp2
= tcg_const_i32(sh
);
8157 if (insn
& (1 << 22))
8158 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
8160 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
8161 tcg_temp_free_i32(tmp2
);
8162 store_reg(s
, rd
, tmp
);
8163 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
8165 tmp
= load_reg(s
, rn
);
8166 tmp2
= load_reg(s
, rm
);
8167 tmp3
= tcg_temp_new_i32();
8168 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
8169 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8170 tcg_temp_free_i32(tmp3
);
8171 tcg_temp_free_i32(tmp2
);
8172 store_reg(s
, rd
, tmp
);
8173 } else if ((insn
& 0x000003e0) == 0x00000060) {
8174 tmp
= load_reg(s
, rm
);
8175 shift
= (insn
>> 10) & 3;
8176 /* ??? In many cases it's not necessary to do a
8177 rotate, a shift is sufficient. */
8179 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8180 op1
= (insn
>> 20) & 7;
8182 case 0: gen_sxtb16(tmp
); break;
8183 case 2: gen_sxtb(tmp
); break;
8184 case 3: gen_sxth(tmp
); break;
8185 case 4: gen_uxtb16(tmp
); break;
8186 case 6: gen_uxtb(tmp
); break;
8187 case 7: gen_uxth(tmp
); break;
8188 default: goto illegal_op
;
8191 tmp2
= load_reg(s
, rn
);
8192 if ((op1
& 3) == 0) {
8193 gen_add16(tmp
, tmp2
);
8195 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8196 tcg_temp_free_i32(tmp2
);
8199 store_reg(s
, rd
, tmp
);
8200 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
8202 tmp
= load_reg(s
, rm
);
8203 if (insn
& (1 << 22)) {
8204 if (insn
& (1 << 7)) {
8208 gen_helper_rbit(tmp
, tmp
);
8211 if (insn
& (1 << 7))
8214 tcg_gen_bswap32_i32(tmp
, tmp
);
8216 store_reg(s
, rd
, tmp
);
8221 case 2: /* Multiplies (Type 3). */
8222 switch ((insn
>> 20) & 0x7) {
8224 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
8225 /* op2 not 00x or 11x : UNDEF */
8228 /* Signed multiply most significant [accumulate].
8229 (SMMUL, SMMLA, SMMLS) */
8230 tmp
= load_reg(s
, rm
);
8231 tmp2
= load_reg(s
, rs
);
8232 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8235 tmp
= load_reg(s
, rd
);
8236 if (insn
& (1 << 6)) {
8237 tmp64
= gen_subq_msw(tmp64
, tmp
);
8239 tmp64
= gen_addq_msw(tmp64
, tmp
);
8242 if (insn
& (1 << 5)) {
8243 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8245 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8246 tmp
= tcg_temp_new_i32();
8247 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
8248 tcg_temp_free_i64(tmp64
);
8249 store_reg(s
, rn
, tmp
);
8253 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8254 if (insn
& (1 << 7)) {
8257 tmp
= load_reg(s
, rm
);
8258 tmp2
= load_reg(s
, rs
);
8259 if (insn
& (1 << 5))
8260 gen_swap_half(tmp2
);
8261 gen_smul_dual(tmp
, tmp2
);
8262 if (insn
& (1 << 6)) {
8263 /* This subtraction cannot overflow. */
8264 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8266 /* This addition cannot overflow 32 bits;
8267 * however it may overflow considered as a signed
8268 * operation, in which case we must set the Q flag.
8270 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8272 tcg_temp_free_i32(tmp2
);
8273 if (insn
& (1 << 22)) {
8274 /* smlald, smlsld */
8275 tmp64
= tcg_temp_new_i64();
8276 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8277 tcg_temp_free_i32(tmp
);
8278 gen_addq(s
, tmp64
, rd
, rn
);
8279 gen_storeq_reg(s
, rd
, rn
, tmp64
);
8280 tcg_temp_free_i64(tmp64
);
8282 /* smuad, smusd, smlad, smlsd */
8285 tmp2
= load_reg(s
, rd
);
8286 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8287 tcg_temp_free_i32(tmp2
);
8289 store_reg(s
, rn
, tmp
);
8295 if (!arm_feature(env
, ARM_FEATURE_ARM_DIV
)) {
8298 if (((insn
>> 5) & 7) || (rd
!= 15)) {
8301 tmp
= load_reg(s
, rm
);
8302 tmp2
= load_reg(s
, rs
);
8303 if (insn
& (1 << 21)) {
8304 gen_helper_udiv(tmp
, tmp
, tmp2
);
8306 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8308 tcg_temp_free_i32(tmp2
);
8309 store_reg(s
, rn
, tmp
);
8316 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
8318 case 0: /* Unsigned sum of absolute differences. */
8320 tmp
= load_reg(s
, rm
);
8321 tmp2
= load_reg(s
, rs
);
8322 gen_helper_usad8(tmp
, tmp
, tmp2
);
8323 tcg_temp_free_i32(tmp2
);
8325 tmp2
= load_reg(s
, rd
);
8326 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8327 tcg_temp_free_i32(tmp2
);
8329 store_reg(s
, rn
, tmp
);
8331 case 0x20: case 0x24: case 0x28: case 0x2c:
8332 /* Bitfield insert/clear. */
8334 shift
= (insn
>> 7) & 0x1f;
8335 i
= (insn
>> 16) & 0x1f;
8338 tmp
= tcg_temp_new_i32();
8339 tcg_gen_movi_i32(tmp
, 0);
8341 tmp
= load_reg(s
, rm
);
8344 tmp2
= load_reg(s
, rd
);
8345 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, i
);
8346 tcg_temp_free_i32(tmp2
);
8348 store_reg(s
, rd
, tmp
);
8350 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8351 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
8353 tmp
= load_reg(s
, rm
);
8354 shift
= (insn
>> 7) & 0x1f;
8355 i
= ((insn
>> 16) & 0x1f) + 1;
8360 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
8362 gen_sbfx(tmp
, shift
, i
);
8365 store_reg(s
, rd
, tmp
);
8375 /* Check for undefined extension instructions
8376 * per the ARM Bible IE:
8377 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8379 sh
= (0xf << 20) | (0xf << 4);
8380 if (op1
== 0x7 && ((insn
& sh
) == sh
))
8384 /* load/store byte/word */
8385 rn
= (insn
>> 16) & 0xf;
8386 rd
= (insn
>> 12) & 0xf;
8387 tmp2
= load_reg(s
, rn
);
8388 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
8389 if (insn
& (1 << 24))
8390 gen_add_data_offset(s
, insn
, tmp2
);
8391 if (insn
& (1 << 20)) {
8393 tmp
= tcg_temp_new_i32();
8394 if (insn
& (1 << 22)) {
8395 gen_aa32_ld8u(tmp
, tmp2
, i
);
8397 gen_aa32_ld32u(tmp
, tmp2
, i
);
8401 tmp
= load_reg(s
, rd
);
8402 if (insn
& (1 << 22)) {
8403 gen_aa32_st8(tmp
, tmp2
, i
);
8405 gen_aa32_st32(tmp
, tmp2
, i
);
8407 tcg_temp_free_i32(tmp
);
8409 if (!(insn
& (1 << 24))) {
8410 gen_add_data_offset(s
, insn
, tmp2
);
8411 store_reg(s
, rn
, tmp2
);
8412 } else if (insn
& (1 << 21)) {
8413 store_reg(s
, rn
, tmp2
);
8415 tcg_temp_free_i32(tmp2
);
8417 if (insn
& (1 << 20)) {
8418 /* Complete the load. */
8419 store_reg_from_load(env
, s
, rd
, tmp
);
8425 int j
, n
, user
, loaded_base
;
8426 TCGv_i32 loaded_var
;
8427 /* load/store multiple words */
8428 /* XXX: store correct base if write back */
8430 if (insn
& (1 << 22)) {
8432 goto illegal_op
; /* only usable in supervisor mode */
8434 if ((insn
& (1 << 15)) == 0)
8437 rn
= (insn
>> 16) & 0xf;
8438 addr
= load_reg(s
, rn
);
8440 /* compute total size */
8442 TCGV_UNUSED_I32(loaded_var
);
8445 if (insn
& (1 << i
))
8448 /* XXX: test invalid n == 0 case ? */
8449 if (insn
& (1 << 23)) {
8450 if (insn
& (1 << 24)) {
8452 tcg_gen_addi_i32(addr
, addr
, 4);
8454 /* post increment */
8457 if (insn
& (1 << 24)) {
8459 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
8461 /* post decrement */
8463 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
8468 if (insn
& (1 << i
)) {
8469 if (insn
& (1 << 20)) {
8471 tmp
= tcg_temp_new_i32();
8472 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
8474 tmp2
= tcg_const_i32(i
);
8475 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
8476 tcg_temp_free_i32(tmp2
);
8477 tcg_temp_free_i32(tmp
);
8478 } else if (i
== rn
) {
8482 store_reg_from_load(env
, s
, i
, tmp
);
8487 /* special case: r15 = PC + 8 */
8488 val
= (long)s
->pc
+ 4;
8489 tmp
= tcg_temp_new_i32();
8490 tcg_gen_movi_i32(tmp
, val
);
8492 tmp
= tcg_temp_new_i32();
8493 tmp2
= tcg_const_i32(i
);
8494 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
8495 tcg_temp_free_i32(tmp2
);
8497 tmp
= load_reg(s
, i
);
8499 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
8500 tcg_temp_free_i32(tmp
);
8503 /* no need to add after the last transfer */
8505 tcg_gen_addi_i32(addr
, addr
, 4);
8508 if (insn
& (1 << 21)) {
8510 if (insn
& (1 << 23)) {
8511 if (insn
& (1 << 24)) {
8514 /* post increment */
8515 tcg_gen_addi_i32(addr
, addr
, 4);
8518 if (insn
& (1 << 24)) {
8521 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
8523 /* post decrement */
8524 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
8527 store_reg(s
, rn
, addr
);
8529 tcg_temp_free_i32(addr
);
8532 store_reg(s
, rn
, loaded_var
);
8534 if ((insn
& (1 << 22)) && !user
) {
8535 /* Restore CPSR from SPSR. */
8536 tmp
= load_cpu_field(spsr
);
8537 gen_set_cpsr(tmp
, 0xffffffff);
8538 tcg_temp_free_i32(tmp
);
8539 s
->is_jmp
= DISAS_UPDATE
;
8548 /* branch (and link) */
8549 val
= (int32_t)s
->pc
;
8550 if (insn
& (1 << 24)) {
8551 tmp
= tcg_temp_new_i32();
8552 tcg_gen_movi_i32(tmp
, val
);
8553 store_reg(s
, 14, tmp
);
8555 offset
= sextract32(insn
<< 2, 0, 26);
8563 if (((insn
>> 8) & 0xe) == 10) {
8565 if (disas_vfp_insn(env
, s
, insn
)) {
8568 } else if (disas_coproc_insn(env
, s
, insn
)) {
8575 gen_set_pc_im(s
, s
->pc
);
8576 s
->is_jmp
= DISAS_SWI
;
8580 gen_exception_insn(s
, 4, EXCP_UDEF
);
8586 /* Return true if this is a Thumb-2 logical op. */
8588 thumb2_logic_op(int op
)
8593 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8594 then set condition code flags based on the result of the operation.
8595 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8596 to the high bit of T1.
8597 Returns zero if the opcode is valid. */
8600 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
,
8601 TCGv_i32 t0
, TCGv_i32 t1
)
8608 tcg_gen_and_i32(t0
, t0
, t1
);
8612 tcg_gen_andc_i32(t0
, t0
, t1
);
8616 tcg_gen_or_i32(t0
, t0
, t1
);
8620 tcg_gen_orc_i32(t0
, t0
, t1
);
8624 tcg_gen_xor_i32(t0
, t0
, t1
);
8629 gen_add_CC(t0
, t0
, t1
);
8631 tcg_gen_add_i32(t0
, t0
, t1
);
8635 gen_adc_CC(t0
, t0
, t1
);
8641 gen_sbc_CC(t0
, t0
, t1
);
8643 gen_sub_carry(t0
, t0
, t1
);
8648 gen_sub_CC(t0
, t0
, t1
);
8650 tcg_gen_sub_i32(t0
, t0
, t1
);
8654 gen_sub_CC(t0
, t1
, t0
);
8656 tcg_gen_sub_i32(t0
, t1
, t0
);
8658 default: /* 5, 6, 7, 9, 12, 15. */
8664 gen_set_CF_bit31(t1
);
8669 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8671 static int disas_thumb2_insn(CPUARMState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
8673 uint32_t insn
, imm
, shift
, offset
;
8674 uint32_t rd
, rn
, rm
, rs
;
8685 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
8686 || arm_feature (env
, ARM_FEATURE_M
))) {
8687 /* Thumb-1 cores may need to treat bl and blx as a pair of
8688 16-bit instructions to get correct prefetch abort behavior. */
8690 if ((insn
& (1 << 12)) == 0) {
8692 /* Second half of blx. */
8693 offset
= ((insn
& 0x7ff) << 1);
8694 tmp
= load_reg(s
, 14);
8695 tcg_gen_addi_i32(tmp
, tmp
, offset
);
8696 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
8698 tmp2
= tcg_temp_new_i32();
8699 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
8700 store_reg(s
, 14, tmp2
);
8704 if (insn
& (1 << 11)) {
8705 /* Second half of bl. */
8706 offset
= ((insn
& 0x7ff) << 1) | 1;
8707 tmp
= load_reg(s
, 14);
8708 tcg_gen_addi_i32(tmp
, tmp
, offset
);
8710 tmp2
= tcg_temp_new_i32();
8711 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
8712 store_reg(s
, 14, tmp2
);
8716 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
8717 /* Instruction spans a page boundary. Implement it as two
8718 16-bit instructions in case the second half causes an
8720 offset
= ((int32_t)insn
<< 21) >> 9;
8721 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
8724 /* Fall through to 32-bit decode. */
8727 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
8729 insn
|= (uint32_t)insn_hw1
<< 16;
8731 if ((insn
& 0xf800e800) != 0xf000e800) {
8735 rn
= (insn
>> 16) & 0xf;
8736 rs
= (insn
>> 12) & 0xf;
8737 rd
= (insn
>> 8) & 0xf;
8739 switch ((insn
>> 25) & 0xf) {
8740 case 0: case 1: case 2: case 3:
8741 /* 16-bit instructions. Should never happen. */
8744 if (insn
& (1 << 22)) {
8745 /* Other load/store, table branch. */
8746 if (insn
& 0x01200000) {
8747 /* Load/store doubleword. */
8749 addr
= tcg_temp_new_i32();
8750 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
8752 addr
= load_reg(s
, rn
);
8754 offset
= (insn
& 0xff) * 4;
8755 if ((insn
& (1 << 23)) == 0)
8757 if (insn
& (1 << 24)) {
8758 tcg_gen_addi_i32(addr
, addr
, offset
);
8761 if (insn
& (1 << 20)) {
8763 tmp
= tcg_temp_new_i32();
8764 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
8765 store_reg(s
, rs
, tmp
);
8766 tcg_gen_addi_i32(addr
, addr
, 4);
8767 tmp
= tcg_temp_new_i32();
8768 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
8769 store_reg(s
, rd
, tmp
);
8772 tmp
= load_reg(s
, rs
);
8773 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
8774 tcg_temp_free_i32(tmp
);
8775 tcg_gen_addi_i32(addr
, addr
, 4);
8776 tmp
= load_reg(s
, rd
);
8777 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
8778 tcg_temp_free_i32(tmp
);
8780 if (insn
& (1 << 21)) {
8781 /* Base writeback. */
8784 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
8785 store_reg(s
, rn
, addr
);
8787 tcg_temp_free_i32(addr
);
8789 } else if ((insn
& (1 << 23)) == 0) {
8790 /* Load/store exclusive word. */
8791 addr
= tcg_temp_local_new_i32();
8792 load_reg_var(s
, addr
, rn
);
8793 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
8794 if (insn
& (1 << 20)) {
8795 gen_load_exclusive(s
, rs
, 15, addr
, 2);
8797 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
8799 tcg_temp_free_i32(addr
);
8800 } else if ((insn
& (7 << 5)) == 0) {
8803 addr
= tcg_temp_new_i32();
8804 tcg_gen_movi_i32(addr
, s
->pc
);
8806 addr
= load_reg(s
, rn
);
8808 tmp
= load_reg(s
, rm
);
8809 tcg_gen_add_i32(addr
, addr
, tmp
);
8810 if (insn
& (1 << 4)) {
8812 tcg_gen_add_i32(addr
, addr
, tmp
);
8813 tcg_temp_free_i32(tmp
);
8814 tmp
= tcg_temp_new_i32();
8815 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
8817 tcg_temp_free_i32(tmp
);
8818 tmp
= tcg_temp_new_i32();
8819 gen_aa32_ld8u(tmp
, addr
, IS_USER(s
));
8821 tcg_temp_free_i32(addr
);
8822 tcg_gen_shli_i32(tmp
, tmp
, 1);
8823 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
8824 store_reg(s
, 15, tmp
);
8826 int op2
= (insn
>> 6) & 0x3;
8827 op
= (insn
>> 4) & 0x3;
8832 /* Load/store exclusive byte/halfword/doubleword */
8839 /* Load-acquire/store-release */
8845 /* Load-acquire/store-release exclusive */
8849 addr
= tcg_temp_local_new_i32();
8850 load_reg_var(s
, addr
, rn
);
8852 if (insn
& (1 << 20)) {
8853 tmp
= tcg_temp_new_i32();
8856 gen_aa32_ld8u(tmp
, addr
, IS_USER(s
));
8859 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
8862 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
8867 store_reg(s
, rs
, tmp
);
8869 tmp
= load_reg(s
, rs
);
8872 gen_aa32_st8(tmp
, addr
, IS_USER(s
));
8875 gen_aa32_st16(tmp
, addr
, IS_USER(s
));
8878 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
8883 tcg_temp_free_i32(tmp
);
8885 } else if (insn
& (1 << 20)) {
8886 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
8888 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
8890 tcg_temp_free_i32(addr
);
8893 /* Load/store multiple, RFE, SRS. */
8894 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
8895 /* RFE, SRS: not available in user mode or on M profile */
8896 if (IS_USER(s
) || IS_M(env
)) {
8899 if (insn
& (1 << 20)) {
8901 addr
= load_reg(s
, rn
);
8902 if ((insn
& (1 << 24)) == 0)
8903 tcg_gen_addi_i32(addr
, addr
, -8);
8904 /* Load PC into tmp and CPSR into tmp2. */
8905 tmp
= tcg_temp_new_i32();
8906 gen_aa32_ld32u(tmp
, addr
, 0);
8907 tcg_gen_addi_i32(addr
, addr
, 4);
8908 tmp2
= tcg_temp_new_i32();
8909 gen_aa32_ld32u(tmp2
, addr
, 0);
8910 if (insn
& (1 << 21)) {
8911 /* Base writeback. */
8912 if (insn
& (1 << 24)) {
8913 tcg_gen_addi_i32(addr
, addr
, 4);
8915 tcg_gen_addi_i32(addr
, addr
, -4);
8917 store_reg(s
, rn
, addr
);
8919 tcg_temp_free_i32(addr
);
8921 gen_rfe(s
, tmp
, tmp2
);
8924 gen_srs(s
, (insn
& 0x1f), (insn
& (1 << 24)) ? 1 : 2,
8928 int i
, loaded_base
= 0;
8929 TCGv_i32 loaded_var
;
8930 /* Load/store multiple. */
8931 addr
= load_reg(s
, rn
);
8933 for (i
= 0; i
< 16; i
++) {
8934 if (insn
& (1 << i
))
8937 if (insn
& (1 << 24)) {
8938 tcg_gen_addi_i32(addr
, addr
, -offset
);
8941 TCGV_UNUSED_I32(loaded_var
);
8942 for (i
= 0; i
< 16; i
++) {
8943 if ((insn
& (1 << i
)) == 0)
8945 if (insn
& (1 << 20)) {
8947 tmp
= tcg_temp_new_i32();
8948 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
8951 } else if (i
== rn
) {
8955 store_reg(s
, i
, tmp
);
8959 tmp
= load_reg(s
, i
);
8960 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
8961 tcg_temp_free_i32(tmp
);
8963 tcg_gen_addi_i32(addr
, addr
, 4);
8966 store_reg(s
, rn
, loaded_var
);
8968 if (insn
& (1 << 21)) {
8969 /* Base register writeback. */
8970 if (insn
& (1 << 24)) {
8971 tcg_gen_addi_i32(addr
, addr
, -offset
);
8973 /* Fault if writeback register is in register list. */
8974 if (insn
& (1 << rn
))
8976 store_reg(s
, rn
, addr
);
8978 tcg_temp_free_i32(addr
);
8985 op
= (insn
>> 21) & 0xf;
8987 /* Halfword pack. */
8988 tmp
= load_reg(s
, rn
);
8989 tmp2
= load_reg(s
, rm
);
8990 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
8991 if (insn
& (1 << 5)) {
8995 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8996 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8997 tcg_gen_ext16u_i32(tmp2
, tmp2
);
9001 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
9002 tcg_gen_ext16u_i32(tmp
, tmp
);
9003 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
9005 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9006 tcg_temp_free_i32(tmp2
);
9007 store_reg(s
, rd
, tmp
);
9009 /* Data processing register constant shift. */
9011 tmp
= tcg_temp_new_i32();
9012 tcg_gen_movi_i32(tmp
, 0);
9014 tmp
= load_reg(s
, rn
);
9016 tmp2
= load_reg(s
, rm
);
9018 shiftop
= (insn
>> 4) & 3;
9019 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
9020 conds
= (insn
& (1 << 20)) != 0;
9021 logic_cc
= (conds
&& thumb2_logic_op(op
));
9022 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
9023 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
9025 tcg_temp_free_i32(tmp2
);
9027 store_reg(s
, rd
, tmp
);
9029 tcg_temp_free_i32(tmp
);
9033 case 13: /* Misc data processing. */
9034 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
9035 if (op
< 4 && (insn
& 0xf000) != 0xf000)
9038 case 0: /* Register controlled shift. */
9039 tmp
= load_reg(s
, rn
);
9040 tmp2
= load_reg(s
, rm
);
9041 if ((insn
& 0x70) != 0)
9043 op
= (insn
>> 21) & 3;
9044 logic_cc
= (insn
& (1 << 20)) != 0;
9045 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
9048 store_reg_bx(env
, s
, rd
, tmp
);
9050 case 1: /* Sign/zero extend. */
9051 tmp
= load_reg(s
, rm
);
9052 shift
= (insn
>> 4) & 3;
9053 /* ??? In many cases it's not necessary to do a
9054 rotate, a shift is sufficient. */
9056 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
9057 op
= (insn
>> 20) & 7;
9059 case 0: gen_sxth(tmp
); break;
9060 case 1: gen_uxth(tmp
); break;
9061 case 2: gen_sxtb16(tmp
); break;
9062 case 3: gen_uxtb16(tmp
); break;
9063 case 4: gen_sxtb(tmp
); break;
9064 case 5: gen_uxtb(tmp
); break;
9065 default: goto illegal_op
;
9068 tmp2
= load_reg(s
, rn
);
9069 if ((op
>> 1) == 1) {
9070 gen_add16(tmp
, tmp2
);
9072 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9073 tcg_temp_free_i32(tmp2
);
9076 store_reg(s
, rd
, tmp
);
9078 case 2: /* SIMD add/subtract. */
9079 op
= (insn
>> 20) & 7;
9080 shift
= (insn
>> 4) & 7;
9081 if ((op
& 3) == 3 || (shift
& 3) == 3)
9083 tmp
= load_reg(s
, rn
);
9084 tmp2
= load_reg(s
, rm
);
9085 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
9086 tcg_temp_free_i32(tmp2
);
9087 store_reg(s
, rd
, tmp
);
9089 case 3: /* Other data processing. */
9090 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
9092 /* Saturating add/subtract. */
9093 tmp
= load_reg(s
, rn
);
9094 tmp2
= load_reg(s
, rm
);
9096 gen_helper_double_saturate(tmp
, cpu_env
, tmp
);
9098 gen_helper_sub_saturate(tmp
, cpu_env
, tmp2
, tmp
);
9100 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
9101 tcg_temp_free_i32(tmp2
);
9103 tmp
= load_reg(s
, rn
);
9105 case 0x0a: /* rbit */
9106 gen_helper_rbit(tmp
, tmp
);
9108 case 0x08: /* rev */
9109 tcg_gen_bswap32_i32(tmp
, tmp
);
9111 case 0x09: /* rev16 */
9114 case 0x0b: /* revsh */
9117 case 0x10: /* sel */
9118 tmp2
= load_reg(s
, rm
);
9119 tmp3
= tcg_temp_new_i32();
9120 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
9121 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
9122 tcg_temp_free_i32(tmp3
);
9123 tcg_temp_free_i32(tmp2
);
9125 case 0x18: /* clz */
9126 gen_helper_clz(tmp
, tmp
);
9132 store_reg(s
, rd
, tmp
);
9134 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9135 op
= (insn
>> 4) & 0xf;
9136 tmp
= load_reg(s
, rn
);
9137 tmp2
= load_reg(s
, rm
);
9138 switch ((insn
>> 20) & 7) {
9139 case 0: /* 32 x 32 -> 32 */
9140 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9141 tcg_temp_free_i32(tmp2
);
9143 tmp2
= load_reg(s
, rs
);
9145 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
9147 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9148 tcg_temp_free_i32(tmp2
);
9151 case 1: /* 16 x 16 -> 32 */
9152 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
9153 tcg_temp_free_i32(tmp2
);
9155 tmp2
= load_reg(s
, rs
);
9156 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9157 tcg_temp_free_i32(tmp2
);
9160 case 2: /* Dual multiply add. */
9161 case 4: /* Dual multiply subtract. */
9163 gen_swap_half(tmp2
);
9164 gen_smul_dual(tmp
, tmp2
);
9165 if (insn
& (1 << 22)) {
9166 /* This subtraction cannot overflow. */
9167 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9169 /* This addition cannot overflow 32 bits;
9170 * however it may overflow considered as a signed
9171 * operation, in which case we must set the Q flag.
9173 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9175 tcg_temp_free_i32(tmp2
);
9178 tmp2
= load_reg(s
, rs
);
9179 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9180 tcg_temp_free_i32(tmp2
);
9183 case 3: /* 32 * 16 -> 32msb */
9185 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
9188 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9189 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
9190 tmp
= tcg_temp_new_i32();
9191 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
9192 tcg_temp_free_i64(tmp64
);
9195 tmp2
= load_reg(s
, rs
);
9196 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9197 tcg_temp_free_i32(tmp2
);
9200 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9201 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9203 tmp
= load_reg(s
, rs
);
9204 if (insn
& (1 << 20)) {
9205 tmp64
= gen_addq_msw(tmp64
, tmp
);
9207 tmp64
= gen_subq_msw(tmp64
, tmp
);
9210 if (insn
& (1 << 4)) {
9211 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
9213 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
9214 tmp
= tcg_temp_new_i32();
9215 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
9216 tcg_temp_free_i64(tmp64
);
9218 case 7: /* Unsigned sum of absolute differences. */
9219 gen_helper_usad8(tmp
, tmp
, tmp2
);
9220 tcg_temp_free_i32(tmp2
);
9222 tmp2
= load_reg(s
, rs
);
9223 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9224 tcg_temp_free_i32(tmp2
);
9228 store_reg(s
, rd
, tmp
);
9230 case 6: case 7: /* 64-bit multiply, Divide. */
9231 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
9232 tmp
= load_reg(s
, rn
);
9233 tmp2
= load_reg(s
, rm
);
9234 if ((op
& 0x50) == 0x10) {
9236 if (!arm_feature(env
, ARM_FEATURE_THUMB_DIV
)) {
9240 gen_helper_udiv(tmp
, tmp
, tmp2
);
9242 gen_helper_sdiv(tmp
, tmp
, tmp2
);
9243 tcg_temp_free_i32(tmp2
);
9244 store_reg(s
, rd
, tmp
);
9245 } else if ((op
& 0xe) == 0xc) {
9246 /* Dual multiply accumulate long. */
9248 gen_swap_half(tmp2
);
9249 gen_smul_dual(tmp
, tmp2
);
9251 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9253 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9255 tcg_temp_free_i32(tmp2
);
9257 tmp64
= tcg_temp_new_i64();
9258 tcg_gen_ext_i32_i64(tmp64
, tmp
);
9259 tcg_temp_free_i32(tmp
);
9260 gen_addq(s
, tmp64
, rs
, rd
);
9261 gen_storeq_reg(s
, rs
, rd
, tmp64
);
9262 tcg_temp_free_i64(tmp64
);
9265 /* Unsigned 64-bit multiply */
9266 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
9270 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
9271 tcg_temp_free_i32(tmp2
);
9272 tmp64
= tcg_temp_new_i64();
9273 tcg_gen_ext_i32_i64(tmp64
, tmp
);
9274 tcg_temp_free_i32(tmp
);
9276 /* Signed 64-bit multiply */
9277 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9282 gen_addq_lo(s
, tmp64
, rs
);
9283 gen_addq_lo(s
, tmp64
, rd
);
9284 } else if (op
& 0x40) {
9285 /* 64-bit accumulate. */
9286 gen_addq(s
, tmp64
, rs
, rd
);
9288 gen_storeq_reg(s
, rs
, rd
, tmp64
);
9289 tcg_temp_free_i64(tmp64
);
9294 case 6: case 7: case 14: case 15:
9296 if (((insn
>> 24) & 3) == 3) {
9297 /* Translate into the equivalent ARM encoding. */
9298 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
9299 if (disas_neon_data_insn(env
, s
, insn
))
9301 } else if (((insn
>> 8) & 0xe) == 10) {
9302 if (disas_vfp_insn(env
, s
, insn
)) {
9306 if (insn
& (1 << 28))
9308 if (disas_coproc_insn (env
, s
, insn
))
9312 case 8: case 9: case 10: case 11:
9313 if (insn
& (1 << 15)) {
9314 /* Branches, misc control. */
9315 if (insn
& 0x5000) {
9316 /* Unconditional branch. */
9317 /* signextend(hw1[10:0]) -> offset[:12]. */
9318 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
9319 /* hw1[10:0] -> offset[11:1]. */
9320 offset
|= (insn
& 0x7ff) << 1;
9321 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9322 offset[24:22] already have the same value because of the
9323 sign extension above. */
9324 offset
^= ((~insn
) & (1 << 13)) << 10;
9325 offset
^= ((~insn
) & (1 << 11)) << 11;
9327 if (insn
& (1 << 14)) {
9328 /* Branch and link. */
9329 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
9333 if (insn
& (1 << 12)) {
9338 offset
&= ~(uint32_t)2;
9339 /* thumb2 bx, no need to check */
9340 gen_bx_im(s
, offset
);
9342 } else if (((insn
>> 23) & 7) == 7) {
9344 if (insn
& (1 << 13))
9347 if (insn
& (1 << 26)) {
9348 /* Secure monitor call (v6Z) */
9349 qemu_log_mask(LOG_UNIMP
,
9350 "arm: unimplemented secure monitor call\n");
9351 goto illegal_op
; /* not implemented. */
9353 op
= (insn
>> 20) & 7;
9355 case 0: /* msr cpsr. */
9357 tmp
= load_reg(s
, rn
);
9358 addr
= tcg_const_i32(insn
& 0xff);
9359 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
9360 tcg_temp_free_i32(addr
);
9361 tcg_temp_free_i32(tmp
);
9366 case 1: /* msr spsr. */
9369 tmp
= load_reg(s
, rn
);
9371 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
9375 case 2: /* cps, nop-hint. */
9376 if (((insn
>> 8) & 7) == 0) {
9377 gen_nop_hint(s
, insn
& 0xff);
9379 /* Implemented as NOP in user mode. */
9384 if (insn
& (1 << 10)) {
9385 if (insn
& (1 << 7))
9387 if (insn
& (1 << 6))
9389 if (insn
& (1 << 5))
9391 if (insn
& (1 << 9))
9392 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
9394 if (insn
& (1 << 8)) {
9396 imm
|= (insn
& 0x1f);
9399 gen_set_psr_im(s
, offset
, 0, imm
);
9402 case 3: /* Special control operations. */
9404 op
= (insn
>> 4) & 0xf;
9412 /* These execute as NOPs. */
9419 /* Trivial implementation equivalent to bx. */
9420 tmp
= load_reg(s
, rn
);
9423 case 5: /* Exception return. */
9427 if (rn
!= 14 || rd
!= 15) {
9430 tmp
= load_reg(s
, rn
);
9431 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
9432 gen_exception_return(s
, tmp
);
9434 case 6: /* mrs cpsr. */
9435 tmp
= tcg_temp_new_i32();
9437 addr
= tcg_const_i32(insn
& 0xff);
9438 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
9439 tcg_temp_free_i32(addr
);
9441 gen_helper_cpsr_read(tmp
, cpu_env
);
9443 store_reg(s
, rd
, tmp
);
9445 case 7: /* mrs spsr. */
9446 /* Not accessible in user mode. */
9447 if (IS_USER(s
) || IS_M(env
))
9449 tmp
= load_cpu_field(spsr
);
9450 store_reg(s
, rd
, tmp
);
9455 /* Conditional branch. */
9456 op
= (insn
>> 22) & 0xf;
9457 /* Generate a conditional jump to next instruction. */
9458 s
->condlabel
= gen_new_label();
9459 arm_gen_test_cc(op
^ 1, s
->condlabel
);
9462 /* offset[11:1] = insn[10:0] */
9463 offset
= (insn
& 0x7ff) << 1;
9464 /* offset[17:12] = insn[21:16]. */
9465 offset
|= (insn
& 0x003f0000) >> 4;
9466 /* offset[31:20] = insn[26]. */
9467 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
9468 /* offset[18] = insn[13]. */
9469 offset
|= (insn
& (1 << 13)) << 5;
9470 /* offset[19] = insn[11]. */
9471 offset
|= (insn
& (1 << 11)) << 8;
9473 /* jump to the offset */
9474 gen_jmp(s
, s
->pc
+ offset
);
9477 /* Data processing immediate. */
9478 if (insn
& (1 << 25)) {
9479 if (insn
& (1 << 24)) {
9480 if (insn
& (1 << 20))
9482 /* Bitfield/Saturate. */
9483 op
= (insn
>> 21) & 7;
9485 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
9487 tmp
= tcg_temp_new_i32();
9488 tcg_gen_movi_i32(tmp
, 0);
9490 tmp
= load_reg(s
, rn
);
9493 case 2: /* Signed bitfield extract. */
9495 if (shift
+ imm
> 32)
9498 gen_sbfx(tmp
, shift
, imm
);
9500 case 6: /* Unsigned bitfield extract. */
9502 if (shift
+ imm
> 32)
9505 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
9507 case 3: /* Bitfield insert/clear. */
9510 imm
= imm
+ 1 - shift
;
9512 tmp2
= load_reg(s
, rd
);
9513 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, imm
);
9514 tcg_temp_free_i32(tmp2
);
9519 default: /* Saturate. */
9522 tcg_gen_sari_i32(tmp
, tmp
, shift
);
9524 tcg_gen_shli_i32(tmp
, tmp
, shift
);
9526 tmp2
= tcg_const_i32(imm
);
9529 if ((op
& 1) && shift
== 0)
9530 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
9532 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
9535 if ((op
& 1) && shift
== 0)
9536 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
9538 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
9540 tcg_temp_free_i32(tmp2
);
9543 store_reg(s
, rd
, tmp
);
9545 imm
= ((insn
& 0x04000000) >> 15)
9546 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
9547 if (insn
& (1 << 22)) {
9548 /* 16-bit immediate. */
9549 imm
|= (insn
>> 4) & 0xf000;
9550 if (insn
& (1 << 23)) {
9552 tmp
= load_reg(s
, rd
);
9553 tcg_gen_ext16u_i32(tmp
, tmp
);
9554 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
9557 tmp
= tcg_temp_new_i32();
9558 tcg_gen_movi_i32(tmp
, imm
);
9561 /* Add/sub 12-bit immediate. */
9563 offset
= s
->pc
& ~(uint32_t)3;
9564 if (insn
& (1 << 23))
9568 tmp
= tcg_temp_new_i32();
9569 tcg_gen_movi_i32(tmp
, offset
);
9571 tmp
= load_reg(s
, rn
);
9572 if (insn
& (1 << 23))
9573 tcg_gen_subi_i32(tmp
, tmp
, imm
);
9575 tcg_gen_addi_i32(tmp
, tmp
, imm
);
9578 store_reg(s
, rd
, tmp
);
9581 int shifter_out
= 0;
9582 /* modified 12-bit immediate. */
9583 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
9584 imm
= (insn
& 0xff);
9587 /* Nothing to do. */
9589 case 1: /* 00XY00XY */
9592 case 2: /* XY00XY00 */
9596 case 3: /* XYXYXYXY */
9600 default: /* Rotated constant. */
9601 shift
= (shift
<< 1) | (imm
>> 7);
9603 imm
= imm
<< (32 - shift
);
9607 tmp2
= tcg_temp_new_i32();
9608 tcg_gen_movi_i32(tmp2
, imm
);
9609 rn
= (insn
>> 16) & 0xf;
9611 tmp
= tcg_temp_new_i32();
9612 tcg_gen_movi_i32(tmp
, 0);
9614 tmp
= load_reg(s
, rn
);
9616 op
= (insn
>> 21) & 0xf;
9617 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
9618 shifter_out
, tmp
, tmp2
))
9620 tcg_temp_free_i32(tmp2
);
9621 rd
= (insn
>> 8) & 0xf;
9623 store_reg(s
, rd
, tmp
);
9625 tcg_temp_free_i32(tmp
);
9630 case 12: /* Load/store single data item. */
9635 if ((insn
& 0x01100000) == 0x01000000) {
9636 if (disas_neon_ls_insn(env
, s
, insn
))
9640 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
9642 if (!(insn
& (1 << 20))) {
9646 /* Byte or halfword load space with dest == r15 : memory hints.
9647 * Catch them early so we don't emit pointless addressing code.
9648 * This space is a mix of:
9649 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9650 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9652 * unallocated hints, which must be treated as NOPs
9653 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9654 * which is easiest for the decoding logic
9655 * Some space which must UNDEF
9657 int op1
= (insn
>> 23) & 3;
9658 int op2
= (insn
>> 6) & 0x3f;
9663 /* UNPREDICTABLE, unallocated hint or
9664 * PLD/PLDW/PLI (literal)
9669 return 0; /* PLD/PLDW/PLI or unallocated hint */
9671 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
9672 return 0; /* PLD/PLDW/PLI or unallocated hint */
9674 /* UNDEF space, or an UNPREDICTABLE */
9680 addr
= tcg_temp_new_i32();
9682 /* s->pc has already been incremented by 4. */
9683 imm
= s
->pc
& 0xfffffffc;
9684 if (insn
& (1 << 23))
9685 imm
+= insn
& 0xfff;
9687 imm
-= insn
& 0xfff;
9688 tcg_gen_movi_i32(addr
, imm
);
9690 addr
= load_reg(s
, rn
);
9691 if (insn
& (1 << 23)) {
9692 /* Positive offset. */
9694 tcg_gen_addi_i32(addr
, addr
, imm
);
9697 switch ((insn
>> 8) & 0xf) {
9698 case 0x0: /* Shifted Register. */
9699 shift
= (insn
>> 4) & 0xf;
9701 tcg_temp_free_i32(addr
);
9704 tmp
= load_reg(s
, rm
);
9706 tcg_gen_shli_i32(tmp
, tmp
, shift
);
9707 tcg_gen_add_i32(addr
, addr
, tmp
);
9708 tcg_temp_free_i32(tmp
);
9710 case 0xc: /* Negative offset. */
9711 tcg_gen_addi_i32(addr
, addr
, -imm
);
9713 case 0xe: /* User privilege. */
9714 tcg_gen_addi_i32(addr
, addr
, imm
);
9717 case 0x9: /* Post-decrement. */
9720 case 0xb: /* Post-increment. */
9724 case 0xd: /* Pre-decrement. */
9727 case 0xf: /* Pre-increment. */
9728 tcg_gen_addi_i32(addr
, addr
, imm
);
9732 tcg_temp_free_i32(addr
);
9737 if (insn
& (1 << 20)) {
9739 tmp
= tcg_temp_new_i32();
9742 gen_aa32_ld8u(tmp
, addr
, user
);
9745 gen_aa32_ld8s(tmp
, addr
, user
);
9748 gen_aa32_ld16u(tmp
, addr
, user
);
9751 gen_aa32_ld16s(tmp
, addr
, user
);
9754 gen_aa32_ld32u(tmp
, addr
, user
);
9757 tcg_temp_free_i32(tmp
);
9758 tcg_temp_free_i32(addr
);
9764 store_reg(s
, rs
, tmp
);
9768 tmp
= load_reg(s
, rs
);
9771 gen_aa32_st8(tmp
, addr
, user
);
9774 gen_aa32_st16(tmp
, addr
, user
);
9777 gen_aa32_st32(tmp
, addr
, user
);
9780 tcg_temp_free_i32(tmp
);
9781 tcg_temp_free_i32(addr
);
9784 tcg_temp_free_i32(tmp
);
9787 tcg_gen_addi_i32(addr
, addr
, imm
);
9789 store_reg(s
, rn
, addr
);
9791 tcg_temp_free_i32(addr
);
9803 static void disas_thumb_insn(CPUARMState
*env
, DisasContext
*s
)
9805 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
9812 if (s
->condexec_mask
) {
9813 cond
= s
->condexec_cond
;
9814 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
9815 s
->condlabel
= gen_new_label();
9816 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
9821 insn
= arm_lduw_code(env
, s
->pc
, s
->bswap_code
);
9824 switch (insn
>> 12) {
9828 op
= (insn
>> 11) & 3;
9831 rn
= (insn
>> 3) & 7;
9832 tmp
= load_reg(s
, rn
);
9833 if (insn
& (1 << 10)) {
9835 tmp2
= tcg_temp_new_i32();
9836 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
9839 rm
= (insn
>> 6) & 7;
9840 tmp2
= load_reg(s
, rm
);
9842 if (insn
& (1 << 9)) {
9843 if (s
->condexec_mask
)
9844 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9846 gen_sub_CC(tmp
, tmp
, tmp2
);
9848 if (s
->condexec_mask
)
9849 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9851 gen_add_CC(tmp
, tmp
, tmp2
);
9853 tcg_temp_free_i32(tmp2
);
9854 store_reg(s
, rd
, tmp
);
9856 /* shift immediate */
9857 rm
= (insn
>> 3) & 7;
9858 shift
= (insn
>> 6) & 0x1f;
9859 tmp
= load_reg(s
, rm
);
9860 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
9861 if (!s
->condexec_mask
)
9863 store_reg(s
, rd
, tmp
);
9867 /* arithmetic large immediate */
9868 op
= (insn
>> 11) & 3;
9869 rd
= (insn
>> 8) & 0x7;
9870 if (op
== 0) { /* mov */
9871 tmp
= tcg_temp_new_i32();
9872 tcg_gen_movi_i32(tmp
, insn
& 0xff);
9873 if (!s
->condexec_mask
)
9875 store_reg(s
, rd
, tmp
);
9877 tmp
= load_reg(s
, rd
);
9878 tmp2
= tcg_temp_new_i32();
9879 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
9882 gen_sub_CC(tmp
, tmp
, tmp2
);
9883 tcg_temp_free_i32(tmp
);
9884 tcg_temp_free_i32(tmp2
);
9887 if (s
->condexec_mask
)
9888 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9890 gen_add_CC(tmp
, tmp
, tmp2
);
9891 tcg_temp_free_i32(tmp2
);
9892 store_reg(s
, rd
, tmp
);
9895 if (s
->condexec_mask
)
9896 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9898 gen_sub_CC(tmp
, tmp
, tmp2
);
9899 tcg_temp_free_i32(tmp2
);
9900 store_reg(s
, rd
, tmp
);
9906 if (insn
& (1 << 11)) {
9907 rd
= (insn
>> 8) & 7;
9908 /* load pc-relative. Bit 1 of PC is ignored. */
9909 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
9910 val
&= ~(uint32_t)2;
9911 addr
= tcg_temp_new_i32();
9912 tcg_gen_movi_i32(addr
, val
);
9913 tmp
= tcg_temp_new_i32();
9914 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
9915 tcg_temp_free_i32(addr
);
9916 store_reg(s
, rd
, tmp
);
9919 if (insn
& (1 << 10)) {
9920 /* data processing extended or blx */
9921 rd
= (insn
& 7) | ((insn
>> 4) & 8);
9922 rm
= (insn
>> 3) & 0xf;
9923 op
= (insn
>> 8) & 3;
9926 tmp
= load_reg(s
, rd
);
9927 tmp2
= load_reg(s
, rm
);
9928 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9929 tcg_temp_free_i32(tmp2
);
9930 store_reg(s
, rd
, tmp
);
9933 tmp
= load_reg(s
, rd
);
9934 tmp2
= load_reg(s
, rm
);
9935 gen_sub_CC(tmp
, tmp
, tmp2
);
9936 tcg_temp_free_i32(tmp2
);
9937 tcg_temp_free_i32(tmp
);
9939 case 2: /* mov/cpy */
9940 tmp
= load_reg(s
, rm
);
9941 store_reg(s
, rd
, tmp
);
9943 case 3:/* branch [and link] exchange thumb register */
9944 tmp
= load_reg(s
, rm
);
9945 if (insn
& (1 << 7)) {
9947 val
= (uint32_t)s
->pc
| 1;
9948 tmp2
= tcg_temp_new_i32();
9949 tcg_gen_movi_i32(tmp2
, val
);
9950 store_reg(s
, 14, tmp2
);
9952 /* already thumb, no need to check */
9959 /* data processing register */
9961 rm
= (insn
>> 3) & 7;
9962 op
= (insn
>> 6) & 0xf;
9963 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
9964 /* the shift/rotate ops want the operands backwards */
9973 if (op
== 9) { /* neg */
9974 tmp
= tcg_temp_new_i32();
9975 tcg_gen_movi_i32(tmp
, 0);
9976 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
9977 tmp
= load_reg(s
, rd
);
9979 TCGV_UNUSED_I32(tmp
);
9982 tmp2
= load_reg(s
, rm
);
9985 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9986 if (!s
->condexec_mask
)
9990 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
9991 if (!s
->condexec_mask
)
9995 if (s
->condexec_mask
) {
9996 gen_shl(tmp2
, tmp2
, tmp
);
9998 gen_helper_shl_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10002 case 0x3: /* lsr */
10003 if (s
->condexec_mask
) {
10004 gen_shr(tmp2
, tmp2
, tmp
);
10006 gen_helper_shr_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10007 gen_logic_CC(tmp2
);
10010 case 0x4: /* asr */
10011 if (s
->condexec_mask
) {
10012 gen_sar(tmp2
, tmp2
, tmp
);
10014 gen_helper_sar_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10015 gen_logic_CC(tmp2
);
10018 case 0x5: /* adc */
10019 if (s
->condexec_mask
) {
10020 gen_adc(tmp
, tmp2
);
10022 gen_adc_CC(tmp
, tmp
, tmp2
);
10025 case 0x6: /* sbc */
10026 if (s
->condexec_mask
) {
10027 gen_sub_carry(tmp
, tmp
, tmp2
);
10029 gen_sbc_CC(tmp
, tmp
, tmp2
);
10032 case 0x7: /* ror */
10033 if (s
->condexec_mask
) {
10034 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
10035 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
10037 gen_helper_ror_cc(tmp2
, cpu_env
, tmp2
, tmp
);
10038 gen_logic_CC(tmp2
);
10041 case 0x8: /* tst */
10042 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
10046 case 0x9: /* neg */
10047 if (s
->condexec_mask
)
10048 tcg_gen_neg_i32(tmp
, tmp2
);
10050 gen_sub_CC(tmp
, tmp
, tmp2
);
10052 case 0xa: /* cmp */
10053 gen_sub_CC(tmp
, tmp
, tmp2
);
10056 case 0xb: /* cmn */
10057 gen_add_CC(tmp
, tmp
, tmp2
);
10060 case 0xc: /* orr */
10061 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
10062 if (!s
->condexec_mask
)
10065 case 0xd: /* mul */
10066 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
10067 if (!s
->condexec_mask
)
10070 case 0xe: /* bic */
10071 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
10072 if (!s
->condexec_mask
)
10075 case 0xf: /* mvn */
10076 tcg_gen_not_i32(tmp2
, tmp2
);
10077 if (!s
->condexec_mask
)
10078 gen_logic_CC(tmp2
);
10085 store_reg(s
, rm
, tmp2
);
10087 tcg_temp_free_i32(tmp
);
10089 store_reg(s
, rd
, tmp
);
10090 tcg_temp_free_i32(tmp2
);
10093 tcg_temp_free_i32(tmp
);
10094 tcg_temp_free_i32(tmp2
);
10099 /* load/store register offset. */
10101 rn
= (insn
>> 3) & 7;
10102 rm
= (insn
>> 6) & 7;
10103 op
= (insn
>> 9) & 7;
10104 addr
= load_reg(s
, rn
);
10105 tmp
= load_reg(s
, rm
);
10106 tcg_gen_add_i32(addr
, addr
, tmp
);
10107 tcg_temp_free_i32(tmp
);
10109 if (op
< 3) { /* store */
10110 tmp
= load_reg(s
, rd
);
10112 tmp
= tcg_temp_new_i32();
10117 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
10120 gen_aa32_st16(tmp
, addr
, IS_USER(s
));
10123 gen_aa32_st8(tmp
, addr
, IS_USER(s
));
10125 case 3: /* ldrsb */
10126 gen_aa32_ld8s(tmp
, addr
, IS_USER(s
));
10129 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
10132 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
10135 gen_aa32_ld8u(tmp
, addr
, IS_USER(s
));
10137 case 7: /* ldrsh */
10138 gen_aa32_ld16s(tmp
, addr
, IS_USER(s
));
10141 if (op
>= 3) { /* load */
10142 store_reg(s
, rd
, tmp
);
10144 tcg_temp_free_i32(tmp
);
10146 tcg_temp_free_i32(addr
);
10150 /* load/store word immediate offset */
10152 rn
= (insn
>> 3) & 7;
10153 addr
= load_reg(s
, rn
);
10154 val
= (insn
>> 4) & 0x7c;
10155 tcg_gen_addi_i32(addr
, addr
, val
);
10157 if (insn
& (1 << 11)) {
10159 tmp
= tcg_temp_new_i32();
10160 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
10161 store_reg(s
, rd
, tmp
);
10164 tmp
= load_reg(s
, rd
);
10165 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
10166 tcg_temp_free_i32(tmp
);
10168 tcg_temp_free_i32(addr
);
10172 /* load/store byte immediate offset */
10174 rn
= (insn
>> 3) & 7;
10175 addr
= load_reg(s
, rn
);
10176 val
= (insn
>> 6) & 0x1f;
10177 tcg_gen_addi_i32(addr
, addr
, val
);
10179 if (insn
& (1 << 11)) {
10181 tmp
= tcg_temp_new_i32();
10182 gen_aa32_ld8u(tmp
, addr
, IS_USER(s
));
10183 store_reg(s
, rd
, tmp
);
10186 tmp
= load_reg(s
, rd
);
10187 gen_aa32_st8(tmp
, addr
, IS_USER(s
));
10188 tcg_temp_free_i32(tmp
);
10190 tcg_temp_free_i32(addr
);
10194 /* load/store halfword immediate offset */
10196 rn
= (insn
>> 3) & 7;
10197 addr
= load_reg(s
, rn
);
10198 val
= (insn
>> 5) & 0x3e;
10199 tcg_gen_addi_i32(addr
, addr
, val
);
10201 if (insn
& (1 << 11)) {
10203 tmp
= tcg_temp_new_i32();
10204 gen_aa32_ld16u(tmp
, addr
, IS_USER(s
));
10205 store_reg(s
, rd
, tmp
);
10208 tmp
= load_reg(s
, rd
);
10209 gen_aa32_st16(tmp
, addr
, IS_USER(s
));
10210 tcg_temp_free_i32(tmp
);
10212 tcg_temp_free_i32(addr
);
10216 /* load/store from stack */
10217 rd
= (insn
>> 8) & 7;
10218 addr
= load_reg(s
, 13);
10219 val
= (insn
& 0xff) * 4;
10220 tcg_gen_addi_i32(addr
, addr
, val
);
10222 if (insn
& (1 << 11)) {
10224 tmp
= tcg_temp_new_i32();
10225 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
10226 store_reg(s
, rd
, tmp
);
10229 tmp
= load_reg(s
, rd
);
10230 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
10231 tcg_temp_free_i32(tmp
);
10233 tcg_temp_free_i32(addr
);
10237 /* add to high reg */
10238 rd
= (insn
>> 8) & 7;
10239 if (insn
& (1 << 11)) {
10241 tmp
= load_reg(s
, 13);
10243 /* PC. bit 1 is ignored. */
10244 tmp
= tcg_temp_new_i32();
10245 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
10247 val
= (insn
& 0xff) * 4;
10248 tcg_gen_addi_i32(tmp
, tmp
, val
);
10249 store_reg(s
, rd
, tmp
);
10254 op
= (insn
>> 8) & 0xf;
10257 /* adjust stack pointer */
10258 tmp
= load_reg(s
, 13);
10259 val
= (insn
& 0x7f) * 4;
10260 if (insn
& (1 << 7))
10261 val
= -(int32_t)val
;
10262 tcg_gen_addi_i32(tmp
, tmp
, val
);
10263 store_reg(s
, 13, tmp
);
10266 case 2: /* sign/zero extend. */
10269 rm
= (insn
>> 3) & 7;
10270 tmp
= load_reg(s
, rm
);
10271 switch ((insn
>> 6) & 3) {
10272 case 0: gen_sxth(tmp
); break;
10273 case 1: gen_sxtb(tmp
); break;
10274 case 2: gen_uxth(tmp
); break;
10275 case 3: gen_uxtb(tmp
); break;
10277 store_reg(s
, rd
, tmp
);
10279 case 4: case 5: case 0xc: case 0xd:
10281 addr
= load_reg(s
, 13);
10282 if (insn
& (1 << 8))
10286 for (i
= 0; i
< 8; i
++) {
10287 if (insn
& (1 << i
))
10290 if ((insn
& (1 << 11)) == 0) {
10291 tcg_gen_addi_i32(addr
, addr
, -offset
);
10293 for (i
= 0; i
< 8; i
++) {
10294 if (insn
& (1 << i
)) {
10295 if (insn
& (1 << 11)) {
10297 tmp
= tcg_temp_new_i32();
10298 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
10299 store_reg(s
, i
, tmp
);
10302 tmp
= load_reg(s
, i
);
10303 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
10304 tcg_temp_free_i32(tmp
);
10306 /* advance to the next address. */
10307 tcg_gen_addi_i32(addr
, addr
, 4);
10310 TCGV_UNUSED_I32(tmp
);
10311 if (insn
& (1 << 8)) {
10312 if (insn
& (1 << 11)) {
10314 tmp
= tcg_temp_new_i32();
10315 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
10316 /* don't set the pc until the rest of the instruction
10320 tmp
= load_reg(s
, 14);
10321 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
10322 tcg_temp_free_i32(tmp
);
10324 tcg_gen_addi_i32(addr
, addr
, 4);
10326 if ((insn
& (1 << 11)) == 0) {
10327 tcg_gen_addi_i32(addr
, addr
, -offset
);
10329 /* write back the new stack pointer */
10330 store_reg(s
, 13, addr
);
10331 /* set the new PC value */
10332 if ((insn
& 0x0900) == 0x0900) {
10333 store_reg_from_load(env
, s
, 15, tmp
);
10337 case 1: case 3: case 9: case 11: /* czb */
10339 tmp
= load_reg(s
, rm
);
10340 s
->condlabel
= gen_new_label();
10342 if (insn
& (1 << 11))
10343 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
10345 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
10346 tcg_temp_free_i32(tmp
);
10347 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
10348 val
= (uint32_t)s
->pc
+ 2;
10353 case 15: /* IT, nop-hint. */
10354 if ((insn
& 0xf) == 0) {
10355 gen_nop_hint(s
, (insn
>> 4) & 0xf);
10359 s
->condexec_cond
= (insn
>> 4) & 0xe;
10360 s
->condexec_mask
= insn
& 0x1f;
10361 /* No actual code generated for this insn, just setup state. */
10364 case 0xe: /* bkpt */
10366 gen_exception_insn(s
, 2, EXCP_BKPT
);
10369 case 0xa: /* rev */
10371 rn
= (insn
>> 3) & 0x7;
10373 tmp
= load_reg(s
, rn
);
10374 switch ((insn
>> 6) & 3) {
10375 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
10376 case 1: gen_rev16(tmp
); break;
10377 case 3: gen_revsh(tmp
); break;
10378 default: goto illegal_op
;
10380 store_reg(s
, rd
, tmp
);
10384 switch ((insn
>> 5) & 7) {
10388 if (((insn
>> 3) & 1) != s
->bswap_code
) {
10389 /* Dynamic endianness switching not implemented. */
10390 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented setend\n");
10401 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
10404 addr
= tcg_const_i32(19);
10405 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
10406 tcg_temp_free_i32(addr
);
10410 addr
= tcg_const_i32(16);
10411 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
10412 tcg_temp_free_i32(addr
);
10414 tcg_temp_free_i32(tmp
);
10417 if (insn
& (1 << 4)) {
10418 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
10422 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
10437 /* load/store multiple */
10438 TCGv_i32 loaded_var
;
10439 TCGV_UNUSED_I32(loaded_var
);
10440 rn
= (insn
>> 8) & 0x7;
10441 addr
= load_reg(s
, rn
);
10442 for (i
= 0; i
< 8; i
++) {
10443 if (insn
& (1 << i
)) {
10444 if (insn
& (1 << 11)) {
10446 tmp
= tcg_temp_new_i32();
10447 gen_aa32_ld32u(tmp
, addr
, IS_USER(s
));
10451 store_reg(s
, i
, tmp
);
10455 tmp
= load_reg(s
, i
);
10456 gen_aa32_st32(tmp
, addr
, IS_USER(s
));
10457 tcg_temp_free_i32(tmp
);
10459 /* advance to the next address */
10460 tcg_gen_addi_i32(addr
, addr
, 4);
10463 if ((insn
& (1 << rn
)) == 0) {
10464 /* base reg not in list: base register writeback */
10465 store_reg(s
, rn
, addr
);
10467 /* base reg in list: if load, complete it now */
10468 if (insn
& (1 << 11)) {
10469 store_reg(s
, rn
, loaded_var
);
10471 tcg_temp_free_i32(addr
);
10476 /* conditional branch or swi */
10477 cond
= (insn
>> 8) & 0xf;
10483 gen_set_pc_im(s
, s
->pc
);
10484 s
->is_jmp
= DISAS_SWI
;
10487 /* generate a conditional jump to next instruction */
10488 s
->condlabel
= gen_new_label();
10489 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
10492 /* jump to the offset */
10493 val
= (uint32_t)s
->pc
+ 2;
10494 offset
= ((int32_t)insn
<< 24) >> 24;
10495 val
+= offset
<< 1;
10500 if (insn
& (1 << 11)) {
10501 if (disas_thumb2_insn(env
, s
, insn
))
10505 /* unconditional branch */
10506 val
= (uint32_t)s
->pc
;
10507 offset
= ((int32_t)insn
<< 21) >> 21;
10508 val
+= (offset
<< 1) + 2;
10513 if (disas_thumb2_insn(env
, s
, insn
))
10519 gen_exception_insn(s
, 4, EXCP_UDEF
);
10523 gen_exception_insn(s
, 2, EXCP_UDEF
);
10526 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10527 basic block 'tb'. If search_pc is TRUE, also generate PC
10528 information for each intermediate instruction. */
10529 static inline void gen_intermediate_code_internal(ARMCPU
*cpu
,
10530 TranslationBlock
*tb
,
10533 CPUState
*cs
= CPU(cpu
);
10534 CPUARMState
*env
= &cpu
->env
;
10535 DisasContext dc1
, *dc
= &dc1
;
10537 uint16_t *gen_opc_end
;
10539 target_ulong pc_start
;
10540 target_ulong next_page_start
;
10544 /* generate intermediate code */
10546 /* The A64 decoder has its own top level loop, because it doesn't need
10547 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
10549 if (ARM_TBFLAG_AARCH64_STATE(tb
->flags
)) {
10550 gen_intermediate_code_internal_a64(cpu
, tb
, search_pc
);
10558 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
10560 dc
->is_jmp
= DISAS_NEXT
;
10562 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
10566 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
10567 dc
->bswap_code
= ARM_TBFLAG_BSWAP_CODE(tb
->flags
);
10568 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
10569 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
10570 #if !defined(CONFIG_USER_ONLY)
10571 dc
->user
= (ARM_TBFLAG_PRIV(tb
->flags
) == 0);
10573 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
10574 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
10575 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
10576 dc
->cp_regs
= cpu
->cp_regs
;
10577 dc
->current_pl
= arm_current_pl(env
);
10579 cpu_F0s
= tcg_temp_new_i32();
10580 cpu_F1s
= tcg_temp_new_i32();
10581 cpu_F0d
= tcg_temp_new_i64();
10582 cpu_F1d
= tcg_temp_new_i64();
10585 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
10586 cpu_M0
= tcg_temp_new_i64();
10587 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
10590 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
10591 if (max_insns
== 0)
10592 max_insns
= CF_COUNT_MASK
;
10596 tcg_clear_temp_count();
10598 /* A note on handling of the condexec (IT) bits:
10600 * We want to avoid the overhead of having to write the updated condexec
10601 * bits back to the CPUARMState for every instruction in an IT block. So:
10602 * (1) if the condexec bits are not already zero then we write
10603 * zero back into the CPUARMState now. This avoids complications trying
10604 * to do it at the end of the block. (For example if we don't do this
10605 * it's hard to identify whether we can safely skip writing condexec
10606 * at the end of the TB, which we definitely want to do for the case
10607 * where a TB doesn't do anything with the IT state at all.)
10608 * (2) if we are going to leave the TB then we call gen_set_condexec()
10609 * which will write the correct value into CPUARMState if zero is wrong.
10610 * This is done both for leaving the TB at the end, and for leaving
10611 * it because of an exception we know will happen, which is done in
10612 * gen_exception_insn(). The latter is necessary because we need to
10613 * leave the TB with the PC/IT state just prior to execution of the
10614 * instruction which caused the exception.
10615 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
10616 * then the CPUARMState will be wrong and we need to reset it.
10617 * This is handled in the same way as restoration of the
10618 * PC in these situations: we will be called again with search_pc=1
10619 * and generate a mapping of the condexec bits for each PC in
10620 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10621 * this to restore the condexec bits.
10623 * Note that there are no instructions which can read the condexec
10624 * bits, and none which can write non-static values to them, so
10625 * we don't need to care about whether CPUARMState is correct in the
10629 /* Reset the conditional execution bits immediately. This avoids
10630 complications trying to do it at the end of the block. */
10631 if (dc
->condexec_mask
|| dc
->condexec_cond
)
10633 TCGv_i32 tmp
= tcg_temp_new_i32();
10634 tcg_gen_movi_i32(tmp
, 0);
10635 store_cpu_field(tmp
, condexec_bits
);
10638 #ifdef CONFIG_USER_ONLY
10639 /* Intercept jump to the magic kernel page. */
10640 if (dc
->pc
>= 0xffff0000) {
10641 /* We always get here via a jump, so know we are not in a
10642 conditional execution block. */
10643 gen_exception(EXCP_KERNEL_TRAP
);
10644 dc
->is_jmp
= DISAS_UPDATE
;
10648 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
10649 /* We always get here via a jump, so know we are not in a
10650 conditional execution block. */
10651 gen_exception(EXCP_EXCEPTION_EXIT
);
10652 dc
->is_jmp
= DISAS_UPDATE
;
10657 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
10658 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
10659 if (bp
->pc
== dc
->pc
) {
10660 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
10661 /* Advance PC so that clearing the breakpoint will
10662 invalidate this TB. */
10664 goto done_generating
;
10669 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
10673 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
10675 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
10676 gen_opc_condexec_bits
[lj
] = (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1);
10677 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
10678 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
10681 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
10684 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
10685 tcg_gen_debug_insn_start(dc
->pc
);
10689 disas_thumb_insn(env
, dc
);
10690 if (dc
->condexec_mask
) {
10691 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
10692 | ((dc
->condexec_mask
>> 4) & 1);
10693 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
10694 if (dc
->condexec_mask
== 0) {
10695 dc
->condexec_cond
= 0;
10699 disas_arm_insn(env
, dc
);
10702 if (dc
->condjmp
&& !dc
->is_jmp
) {
10703 gen_set_label(dc
->condlabel
);
10707 if (tcg_check_temp_count()) {
10708 fprintf(stderr
, "TCG temporary leak before "TARGET_FMT_lx
"\n",
10712 /* Translation stops when a conditional branch is encountered.
10713 * Otherwise the subsequent code could get translated several times.
10714 * Also stop translation when a page boundary is reached. This
10715 * ensures prefetch aborts occur at the right place. */
10717 } while (!dc
->is_jmp
&& tcg_ctx
.gen_opc_ptr
< gen_opc_end
&&
10718 !cs
->singlestep_enabled
&&
10720 dc
->pc
< next_page_start
&&
10721 num_insns
< max_insns
);
10723 if (tb
->cflags
& CF_LAST_IO
) {
10725 /* FIXME: This can theoretically happen with self-modifying
10727 cpu_abort(env
, "IO on conditional branch instruction");
10732 /* At this stage dc->condjmp will only be set when the skipped
10733 instruction was a conditional branch or trap, and the PC has
10734 already been written. */
10735 if (unlikely(cs
->singlestep_enabled
)) {
10736 /* Make sure the pc is updated, and raise a debug exception. */
10738 gen_set_condexec(dc
);
10739 if (dc
->is_jmp
== DISAS_SWI
) {
10740 gen_exception(EXCP_SWI
);
10742 gen_exception(EXCP_DEBUG
);
10744 gen_set_label(dc
->condlabel
);
10746 if (dc
->condjmp
|| !dc
->is_jmp
) {
10747 gen_set_pc_im(dc
, dc
->pc
);
10750 gen_set_condexec(dc
);
10751 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
10752 gen_exception(EXCP_SWI
);
10754 /* FIXME: Single stepping a WFI insn will not halt
10756 gen_exception(EXCP_DEBUG
);
10759 /* While branches must always occur at the end of an IT block,
10760 there are a few other things that can cause us to terminate
10761 the TB in the middle of an IT block:
10762 - Exception generating instructions (bkpt, swi, undefined).
10764 - Hardware watchpoints.
10765 Hardware breakpoints have already been handled and skip this code.
10767 gen_set_condexec(dc
);
10768 switch(dc
->is_jmp
) {
10770 gen_goto_tb(dc
, 1, dc
->pc
);
10775 /* indicate that the hash table must be used to find the next TB */
10776 tcg_gen_exit_tb(0);
10778 case DISAS_TB_JUMP
:
10779 /* nothing more to generate */
10782 gen_helper_wfi(cpu_env
);
10785 gen_exception(EXCP_SWI
);
10789 gen_set_label(dc
->condlabel
);
10790 gen_set_condexec(dc
);
10791 gen_goto_tb(dc
, 1, dc
->pc
);
10797 gen_tb_end(tb
, num_insns
);
10798 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
10801 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
10802 qemu_log("----------------\n");
10803 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
10804 log_target_disas(env
, pc_start
, dc
->pc
- pc_start
,
10805 dc
->thumb
| (dc
->bswap_code
<< 1));
10810 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
10813 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
10815 tb
->size
= dc
->pc
- pc_start
;
10816 tb
->icount
= num_insns
;
10820 void gen_intermediate_code(CPUARMState
*env
, TranslationBlock
*tb
)
10822 gen_intermediate_code_internal(arm_env_get_cpu(env
), tb
, false);
10825 void gen_intermediate_code_pc(CPUARMState
*env
, TranslationBlock
*tb
)
10827 gen_intermediate_code_internal(arm_env_get_cpu(env
), tb
, true);
10830 static const char *cpu_mode_names
[16] = {
10831 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10832 "???", "???", "???", "und", "???", "???", "???", "sys"
10835 void arm_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
10838 ARMCPU
*cpu
= ARM_CPU(cs
);
10839 CPUARMState
*env
= &cpu
->env
;
10843 for(i
=0;i
<16;i
++) {
10844 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
10846 cpu_fprintf(f
, "\n");
10848 cpu_fprintf(f
, " ");
10850 psr
= cpsr_read(env
);
10851 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
10853 psr
& (1 << 31) ? 'N' : '-',
10854 psr
& (1 << 30) ? 'Z' : '-',
10855 psr
& (1 << 29) ? 'C' : '-',
10856 psr
& (1 << 28) ? 'V' : '-',
10857 psr
& CPSR_T
? 'T' : 'A',
10858 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
10860 if (flags
& CPU_DUMP_FPU
) {
10861 int numvfpregs
= 0;
10862 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
10865 if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
10868 for (i
= 0; i
< numvfpregs
; i
++) {
10869 uint64_t v
= float64_val(env
->vfp
.regs
[i
]);
10870 cpu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
10871 i
* 2, (uint32_t)v
,
10872 i
* 2 + 1, (uint32_t)(v
>> 32),
10875 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
10879 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
, int pc_pos
)
10882 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
10883 env
->condexec_bits
= 0;
10885 env
->regs
[15] = tcg_ctx
.gen_opc_pc
[pc_pos
];
10886 env
->condexec_bits
= gen_opc_condexec_bits
[pc_pos
];