tcg-ppc64: Support the ppc64 elfv2 ABI
[qemu.git] / target-arm / translate.c
blobcf4e767ff865204bd3e7306feaf00cb0632fc256
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "internals.h"
29 #include "disas/disas.h"
30 #include "tcg-op.h"
31 #include "qemu/log.h"
32 #include "qemu/bitops.h"
33 #include "arm_ldst.h"
35 #include "exec/helper-proto.h"
36 #include "exec/helper-gen.h"
38 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
39 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
40 /* currently all emulated v5 cores are also v5TE, so don't bother */
41 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
42 #define ENABLE_ARCH_5J 0
43 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
44 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
45 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
46 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
47 #define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
49 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
51 #include "translate.h"
52 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
54 #if defined(CONFIG_USER_ONLY)
55 #define IS_USER(s) 1
56 #else
57 #define IS_USER(s) (s->user)
58 #endif
60 TCGv_ptr cpu_env;
61 /* We reuse the same 64-bit temporaries for efficiency. */
62 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
63 static TCGv_i32 cpu_R[16];
64 static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
65 static TCGv_i64 cpu_exclusive_addr;
66 static TCGv_i64 cpu_exclusive_val;
67 #ifdef CONFIG_USER_ONLY
68 static TCGv_i64 cpu_exclusive_test;
69 static TCGv_i32 cpu_exclusive_info;
70 #endif
72 /* FIXME: These should be removed. */
73 static TCGv_i32 cpu_F0s, cpu_F1s;
74 static TCGv_i64 cpu_F0d, cpu_F1d;
76 #include "exec/gen-icount.h"
78 static const char *regnames[] =
79 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
80 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
82 /* initialize TCG globals. */
83 void arm_translate_init(void)
85 int i;
87 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
89 for (i = 0; i < 16; i++) {
90 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
91 offsetof(CPUARMState, regs[i]),
92 regnames[i]);
94 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
95 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
96 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
97 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
99 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
100 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
101 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
102 offsetof(CPUARMState, exclusive_val), "exclusive_val");
103 #ifdef CONFIG_USER_ONLY
104 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
105 offsetof(CPUARMState, exclusive_test), "exclusive_test");
106 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
107 offsetof(CPUARMState, exclusive_info), "exclusive_info");
108 #endif
110 a64_translate_init();
113 static inline TCGv_i32 load_cpu_offset(int offset)
115 TCGv_i32 tmp = tcg_temp_new_i32();
116 tcg_gen_ld_i32(tmp, cpu_env, offset);
117 return tmp;
120 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
122 static inline void store_cpu_offset(TCGv_i32 var, int offset)
124 tcg_gen_st_i32(var, cpu_env, offset);
125 tcg_temp_free_i32(var);
128 #define store_cpu_field(var, name) \
129 store_cpu_offset(var, offsetof(CPUARMState, name))
131 /* Set a variable to the value of a CPU register. */
132 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
134 if (reg == 15) {
135 uint32_t addr;
136 /* normally, since we updated PC, we need only to add one insn */
137 if (s->thumb)
138 addr = (long)s->pc + 2;
139 else
140 addr = (long)s->pc + 4;
141 tcg_gen_movi_i32(var, addr);
142 } else {
143 tcg_gen_mov_i32(var, cpu_R[reg]);
147 /* Create a new temporary and set it to the value of a CPU register. */
148 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
150 TCGv_i32 tmp = tcg_temp_new_i32();
151 load_reg_var(s, tmp, reg);
152 return tmp;
155 /* Set a CPU register. The source must be a temporary and will be
156 marked as dead. */
157 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
159 if (reg == 15) {
160 tcg_gen_andi_i32(var, var, ~1);
161 s->is_jmp = DISAS_JUMP;
163 tcg_gen_mov_i32(cpu_R[reg], var);
164 tcg_temp_free_i32(var);
167 /* Value extensions. */
168 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
169 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
170 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
171 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
173 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
174 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
177 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
179 TCGv_i32 tmp_mask = tcg_const_i32(mask);
180 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
181 tcg_temp_free_i32(tmp_mask);
183 /* Set NZCV flags from the high 4 bits of var. */
184 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
186 static void gen_exception_internal(int excp)
188 TCGv_i32 tcg_excp = tcg_const_i32(excp);
190 assert(excp_is_internal(excp));
191 gen_helper_exception_internal(cpu_env, tcg_excp);
192 tcg_temp_free_i32(tcg_excp);
195 static void gen_exception(int excp, uint32_t syndrome)
197 TCGv_i32 tcg_excp = tcg_const_i32(excp);
198 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
200 gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
201 tcg_temp_free_i32(tcg_syn);
202 tcg_temp_free_i32(tcg_excp);
205 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
207 TCGv_i32 tmp1 = tcg_temp_new_i32();
208 TCGv_i32 tmp2 = tcg_temp_new_i32();
209 tcg_gen_ext16s_i32(tmp1, a);
210 tcg_gen_ext16s_i32(tmp2, b);
211 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
212 tcg_temp_free_i32(tmp2);
213 tcg_gen_sari_i32(a, a, 16);
214 tcg_gen_sari_i32(b, b, 16);
215 tcg_gen_mul_i32(b, b, a);
216 tcg_gen_mov_i32(a, tmp1);
217 tcg_temp_free_i32(tmp1);
220 /* Byteswap each halfword. */
221 static void gen_rev16(TCGv_i32 var)
223 TCGv_i32 tmp = tcg_temp_new_i32();
224 tcg_gen_shri_i32(tmp, var, 8);
225 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
226 tcg_gen_shli_i32(var, var, 8);
227 tcg_gen_andi_i32(var, var, 0xff00ff00);
228 tcg_gen_or_i32(var, var, tmp);
229 tcg_temp_free_i32(tmp);
232 /* Byteswap low halfword and sign extend. */
233 static void gen_revsh(TCGv_i32 var)
235 tcg_gen_ext16u_i32(var, var);
236 tcg_gen_bswap16_i32(var, var);
237 tcg_gen_ext16s_i32(var, var);
240 /* Unsigned bitfield extract. */
241 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
243 if (shift)
244 tcg_gen_shri_i32(var, var, shift);
245 tcg_gen_andi_i32(var, var, mask);
248 /* Signed bitfield extract. */
249 static void gen_sbfx(TCGv_i32 var, int shift, int width)
251 uint32_t signbit;
253 if (shift)
254 tcg_gen_sari_i32(var, var, shift);
255 if (shift + width < 32) {
256 signbit = 1u << (width - 1);
257 tcg_gen_andi_i32(var, var, (1u << width) - 1);
258 tcg_gen_xori_i32(var, var, signbit);
259 tcg_gen_subi_i32(var, var, signbit);
263 /* Return (b << 32) + a. Mark inputs as dead */
264 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
266 TCGv_i64 tmp64 = tcg_temp_new_i64();
268 tcg_gen_extu_i32_i64(tmp64, b);
269 tcg_temp_free_i32(b);
270 tcg_gen_shli_i64(tmp64, tmp64, 32);
271 tcg_gen_add_i64(a, tmp64, a);
273 tcg_temp_free_i64(tmp64);
274 return a;
277 /* Return (b << 32) - a. Mark inputs as dead. */
278 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
280 TCGv_i64 tmp64 = tcg_temp_new_i64();
282 tcg_gen_extu_i32_i64(tmp64, b);
283 tcg_temp_free_i32(b);
284 tcg_gen_shli_i64(tmp64, tmp64, 32);
285 tcg_gen_sub_i64(a, tmp64, a);
287 tcg_temp_free_i64(tmp64);
288 return a;
291 /* 32x32->64 multiply. Marks inputs as dead. */
292 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
294 TCGv_i32 lo = tcg_temp_new_i32();
295 TCGv_i32 hi = tcg_temp_new_i32();
296 TCGv_i64 ret;
298 tcg_gen_mulu2_i32(lo, hi, a, b);
299 tcg_temp_free_i32(a);
300 tcg_temp_free_i32(b);
302 ret = tcg_temp_new_i64();
303 tcg_gen_concat_i32_i64(ret, lo, hi);
304 tcg_temp_free_i32(lo);
305 tcg_temp_free_i32(hi);
307 return ret;
310 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
312 TCGv_i32 lo = tcg_temp_new_i32();
313 TCGv_i32 hi = tcg_temp_new_i32();
314 TCGv_i64 ret;
316 tcg_gen_muls2_i32(lo, hi, a, b);
317 tcg_temp_free_i32(a);
318 tcg_temp_free_i32(b);
320 ret = tcg_temp_new_i64();
321 tcg_gen_concat_i32_i64(ret, lo, hi);
322 tcg_temp_free_i32(lo);
323 tcg_temp_free_i32(hi);
325 return ret;
328 /* Swap low and high halfwords. */
329 static void gen_swap_half(TCGv_i32 var)
331 TCGv_i32 tmp = tcg_temp_new_i32();
332 tcg_gen_shri_i32(tmp, var, 16);
333 tcg_gen_shli_i32(var, var, 16);
334 tcg_gen_or_i32(var, var, tmp);
335 tcg_temp_free_i32(tmp);
338 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
339 tmp = (t0 ^ t1) & 0x8000;
340 t0 &= ~0x8000;
341 t1 &= ~0x8000;
342 t0 = (t0 + t1) ^ tmp;
345 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
347 TCGv_i32 tmp = tcg_temp_new_i32();
348 tcg_gen_xor_i32(tmp, t0, t1);
349 tcg_gen_andi_i32(tmp, tmp, 0x8000);
350 tcg_gen_andi_i32(t0, t0, ~0x8000);
351 tcg_gen_andi_i32(t1, t1, ~0x8000);
352 tcg_gen_add_i32(t0, t0, t1);
353 tcg_gen_xor_i32(t0, t0, tmp);
354 tcg_temp_free_i32(tmp);
355 tcg_temp_free_i32(t1);
358 /* Set CF to the top bit of var. */
359 static void gen_set_CF_bit31(TCGv_i32 var)
361 tcg_gen_shri_i32(cpu_CF, var, 31);
364 /* Set N and Z flags from var. */
365 static inline void gen_logic_CC(TCGv_i32 var)
367 tcg_gen_mov_i32(cpu_NF, var);
368 tcg_gen_mov_i32(cpu_ZF, var);
371 /* T0 += T1 + CF. */
372 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
374 tcg_gen_add_i32(t0, t0, t1);
375 tcg_gen_add_i32(t0, t0, cpu_CF);
378 /* dest = T0 + T1 + CF. */
379 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
381 tcg_gen_add_i32(dest, t0, t1);
382 tcg_gen_add_i32(dest, dest, cpu_CF);
385 /* dest = T0 - T1 + CF - 1. */
386 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
388 tcg_gen_sub_i32(dest, t0, t1);
389 tcg_gen_add_i32(dest, dest, cpu_CF);
390 tcg_gen_subi_i32(dest, dest, 1);
393 /* dest = T0 + T1. Compute C, N, V and Z flags */
394 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
396 TCGv_i32 tmp = tcg_temp_new_i32();
397 tcg_gen_movi_i32(tmp, 0);
398 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
399 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
400 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
401 tcg_gen_xor_i32(tmp, t0, t1);
402 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
403 tcg_temp_free_i32(tmp);
404 tcg_gen_mov_i32(dest, cpu_NF);
407 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
408 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
410 TCGv_i32 tmp = tcg_temp_new_i32();
411 if (TCG_TARGET_HAS_add2_i32) {
412 tcg_gen_movi_i32(tmp, 0);
413 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
414 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
415 } else {
416 TCGv_i64 q0 = tcg_temp_new_i64();
417 TCGv_i64 q1 = tcg_temp_new_i64();
418 tcg_gen_extu_i32_i64(q0, t0);
419 tcg_gen_extu_i32_i64(q1, t1);
420 tcg_gen_add_i64(q0, q0, q1);
421 tcg_gen_extu_i32_i64(q1, cpu_CF);
422 tcg_gen_add_i64(q0, q0, q1);
423 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
424 tcg_temp_free_i64(q0);
425 tcg_temp_free_i64(q1);
427 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
428 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
429 tcg_gen_xor_i32(tmp, t0, t1);
430 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
431 tcg_temp_free_i32(tmp);
432 tcg_gen_mov_i32(dest, cpu_NF);
435 /* dest = T0 - T1. Compute C, N, V and Z flags */
436 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
438 TCGv_i32 tmp;
439 tcg_gen_sub_i32(cpu_NF, t0, t1);
440 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
441 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
442 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
443 tmp = tcg_temp_new_i32();
444 tcg_gen_xor_i32(tmp, t0, t1);
445 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
446 tcg_temp_free_i32(tmp);
447 tcg_gen_mov_i32(dest, cpu_NF);
450 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
451 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
453 TCGv_i32 tmp = tcg_temp_new_i32();
454 tcg_gen_not_i32(tmp, t1);
455 gen_adc_CC(dest, t0, tmp);
456 tcg_temp_free_i32(tmp);
459 #define GEN_SHIFT(name) \
460 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
462 TCGv_i32 tmp1, tmp2, tmp3; \
463 tmp1 = tcg_temp_new_i32(); \
464 tcg_gen_andi_i32(tmp1, t1, 0xff); \
465 tmp2 = tcg_const_i32(0); \
466 tmp3 = tcg_const_i32(0x1f); \
467 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
468 tcg_temp_free_i32(tmp3); \
469 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
470 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
471 tcg_temp_free_i32(tmp2); \
472 tcg_temp_free_i32(tmp1); \
474 GEN_SHIFT(shl)
475 GEN_SHIFT(shr)
476 #undef GEN_SHIFT
478 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
480 TCGv_i32 tmp1, tmp2;
481 tmp1 = tcg_temp_new_i32();
482 tcg_gen_andi_i32(tmp1, t1, 0xff);
483 tmp2 = tcg_const_i32(0x1f);
484 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
485 tcg_temp_free_i32(tmp2);
486 tcg_gen_sar_i32(dest, t0, tmp1);
487 tcg_temp_free_i32(tmp1);
490 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
492 TCGv_i32 c0 = tcg_const_i32(0);
493 TCGv_i32 tmp = tcg_temp_new_i32();
494 tcg_gen_neg_i32(tmp, src);
495 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
496 tcg_temp_free_i32(c0);
497 tcg_temp_free_i32(tmp);
500 static void shifter_out_im(TCGv_i32 var, int shift)
502 if (shift == 0) {
503 tcg_gen_andi_i32(cpu_CF, var, 1);
504 } else {
505 tcg_gen_shri_i32(cpu_CF, var, shift);
506 if (shift != 31) {
507 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
512 /* Shift by immediate. Includes special handling for shift == 0. */
513 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
514 int shift, int flags)
516 switch (shiftop) {
517 case 0: /* LSL */
518 if (shift != 0) {
519 if (flags)
520 shifter_out_im(var, 32 - shift);
521 tcg_gen_shli_i32(var, var, shift);
523 break;
524 case 1: /* LSR */
525 if (shift == 0) {
526 if (flags) {
527 tcg_gen_shri_i32(cpu_CF, var, 31);
529 tcg_gen_movi_i32(var, 0);
530 } else {
531 if (flags)
532 shifter_out_im(var, shift - 1);
533 tcg_gen_shri_i32(var, var, shift);
535 break;
536 case 2: /* ASR */
537 if (shift == 0)
538 shift = 32;
539 if (flags)
540 shifter_out_im(var, shift - 1);
541 if (shift == 32)
542 shift = 31;
543 tcg_gen_sari_i32(var, var, shift);
544 break;
545 case 3: /* ROR/RRX */
546 if (shift != 0) {
547 if (flags)
548 shifter_out_im(var, shift - 1);
549 tcg_gen_rotri_i32(var, var, shift); break;
550 } else {
551 TCGv_i32 tmp = tcg_temp_new_i32();
552 tcg_gen_shli_i32(tmp, cpu_CF, 31);
553 if (flags)
554 shifter_out_im(var, 0);
555 tcg_gen_shri_i32(var, var, 1);
556 tcg_gen_or_i32(var, var, tmp);
557 tcg_temp_free_i32(tmp);
562 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
563 TCGv_i32 shift, int flags)
565 if (flags) {
566 switch (shiftop) {
567 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
568 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
569 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
570 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
572 } else {
573 switch (shiftop) {
574 case 0:
575 gen_shl(var, var, shift);
576 break;
577 case 1:
578 gen_shr(var, var, shift);
579 break;
580 case 2:
581 gen_sar(var, var, shift);
582 break;
583 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
584 tcg_gen_rotr_i32(var, var, shift); break;
587 tcg_temp_free_i32(shift);
590 #define PAS_OP(pfx) \
591 switch (op2) { \
592 case 0: gen_pas_helper(glue(pfx,add16)); break; \
593 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
594 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
595 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
596 case 4: gen_pas_helper(glue(pfx,add8)); break; \
597 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
599 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
601 TCGv_ptr tmp;
603 switch (op1) {
604 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
605 case 1:
606 tmp = tcg_temp_new_ptr();
607 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
608 PAS_OP(s)
609 tcg_temp_free_ptr(tmp);
610 break;
611 case 5:
612 tmp = tcg_temp_new_ptr();
613 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
614 PAS_OP(u)
615 tcg_temp_free_ptr(tmp);
616 break;
617 #undef gen_pas_helper
618 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
619 case 2:
620 PAS_OP(q);
621 break;
622 case 3:
623 PAS_OP(sh);
624 break;
625 case 6:
626 PAS_OP(uq);
627 break;
628 case 7:
629 PAS_OP(uh);
630 break;
631 #undef gen_pas_helper
634 #undef PAS_OP
636 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
637 #define PAS_OP(pfx) \
638 switch (op1) { \
639 case 0: gen_pas_helper(glue(pfx,add8)); break; \
640 case 1: gen_pas_helper(glue(pfx,add16)); break; \
641 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
642 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
643 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
644 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
646 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
648 TCGv_ptr tmp;
650 switch (op2) {
651 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
652 case 0:
653 tmp = tcg_temp_new_ptr();
654 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
655 PAS_OP(s)
656 tcg_temp_free_ptr(tmp);
657 break;
658 case 4:
659 tmp = tcg_temp_new_ptr();
660 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
661 PAS_OP(u)
662 tcg_temp_free_ptr(tmp);
663 break;
664 #undef gen_pas_helper
665 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
666 case 1:
667 PAS_OP(q);
668 break;
669 case 2:
670 PAS_OP(sh);
671 break;
672 case 5:
673 PAS_OP(uq);
674 break;
675 case 6:
676 PAS_OP(uh);
677 break;
678 #undef gen_pas_helper
681 #undef PAS_OP
684 * generate a conditional branch based on ARM condition code cc.
685 * This is common between ARM and Aarch64 targets.
687 void arm_gen_test_cc(int cc, int label)
689 TCGv_i32 tmp;
690 int inv;
692 switch (cc) {
693 case 0: /* eq: Z */
694 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
695 break;
696 case 1: /* ne: !Z */
697 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
698 break;
699 case 2: /* cs: C */
700 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
701 break;
702 case 3: /* cc: !C */
703 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
704 break;
705 case 4: /* mi: N */
706 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
707 break;
708 case 5: /* pl: !N */
709 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
710 break;
711 case 6: /* vs: V */
712 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
713 break;
714 case 7: /* vc: !V */
715 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
716 break;
717 case 8: /* hi: C && !Z */
718 inv = gen_new_label();
719 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
720 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
721 gen_set_label(inv);
722 break;
723 case 9: /* ls: !C || Z */
724 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
725 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
726 break;
727 case 10: /* ge: N == V -> N ^ V == 0 */
728 tmp = tcg_temp_new_i32();
729 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
730 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
731 tcg_temp_free_i32(tmp);
732 break;
733 case 11: /* lt: N != V -> N ^ V != 0 */
734 tmp = tcg_temp_new_i32();
735 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
736 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
737 tcg_temp_free_i32(tmp);
738 break;
739 case 12: /* gt: !Z && N == V */
740 inv = gen_new_label();
741 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
742 tmp = tcg_temp_new_i32();
743 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
744 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
745 tcg_temp_free_i32(tmp);
746 gen_set_label(inv);
747 break;
748 case 13: /* le: Z || N != V */
749 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
750 tmp = tcg_temp_new_i32();
751 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
752 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
753 tcg_temp_free_i32(tmp);
754 break;
755 default:
756 fprintf(stderr, "Bad condition code 0x%x\n", cc);
757 abort();
761 static const uint8_t table_logic_cc[16] = {
762 1, /* and */
763 1, /* xor */
764 0, /* sub */
765 0, /* rsb */
766 0, /* add */
767 0, /* adc */
768 0, /* sbc */
769 0, /* rsc */
770 1, /* andl */
771 1, /* xorl */
772 0, /* cmp */
773 0, /* cmn */
774 1, /* orr */
775 1, /* mov */
776 1, /* bic */
777 1, /* mvn */
780 /* Set PC and Thumb state from an immediate address. */
781 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
783 TCGv_i32 tmp;
785 s->is_jmp = DISAS_UPDATE;
786 if (s->thumb != (addr & 1)) {
787 tmp = tcg_temp_new_i32();
788 tcg_gen_movi_i32(tmp, addr & 1);
789 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
790 tcg_temp_free_i32(tmp);
792 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
795 /* Set PC and Thumb state from var. var is marked as dead. */
796 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
798 s->is_jmp = DISAS_UPDATE;
799 tcg_gen_andi_i32(cpu_R[15], var, ~1);
800 tcg_gen_andi_i32(var, var, 1);
801 store_cpu_field(var, thumb);
804 /* Variant of store_reg which uses branch&exchange logic when storing
805 to r15 in ARM architecture v7 and above. The source must be a temporary
806 and will be marked as dead. */
807 static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
808 int reg, TCGv_i32 var)
810 if (reg == 15 && ENABLE_ARCH_7) {
811 gen_bx(s, var);
812 } else {
813 store_reg(s, reg, var);
817 /* Variant of store_reg which uses branch&exchange logic when storing
818 * to r15 in ARM architecture v5T and above. This is used for storing
819 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
820 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
821 static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
822 int reg, TCGv_i32 var)
824 if (reg == 15 && ENABLE_ARCH_5) {
825 gen_bx(s, var);
826 } else {
827 store_reg(s, reg, var);
831 /* Abstractions of "generate code to do a guest load/store for
832 * AArch32", where a vaddr is always 32 bits (and is zero
833 * extended if we're a 64 bit core) and data is also
834 * 32 bits unless specifically doing a 64 bit access.
835 * These functions work like tcg_gen_qemu_{ld,st}* except
836 * that the address argument is TCGv_i32 rather than TCGv.
838 #if TARGET_LONG_BITS == 32
840 #define DO_GEN_LD(SUFF, OPC) \
841 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
843 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
846 #define DO_GEN_ST(SUFF, OPC) \
847 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
849 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
852 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
854 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
857 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
859 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
862 #else
864 #define DO_GEN_LD(SUFF, OPC) \
865 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
867 TCGv addr64 = tcg_temp_new(); \
868 tcg_gen_extu_i32_i64(addr64, addr); \
869 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
870 tcg_temp_free(addr64); \
873 #define DO_GEN_ST(SUFF, OPC) \
874 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
876 TCGv addr64 = tcg_temp_new(); \
877 tcg_gen_extu_i32_i64(addr64, addr); \
878 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
879 tcg_temp_free(addr64); \
882 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
884 TCGv addr64 = tcg_temp_new();
885 tcg_gen_extu_i32_i64(addr64, addr);
886 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
887 tcg_temp_free(addr64);
890 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
892 TCGv addr64 = tcg_temp_new();
893 tcg_gen_extu_i32_i64(addr64, addr);
894 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
895 tcg_temp_free(addr64);
898 #endif
900 DO_GEN_LD(8s, MO_SB)
901 DO_GEN_LD(8u, MO_UB)
902 DO_GEN_LD(16s, MO_TESW)
903 DO_GEN_LD(16u, MO_TEUW)
904 DO_GEN_LD(32u, MO_TEUL)
905 DO_GEN_ST(8, MO_UB)
906 DO_GEN_ST(16, MO_TEUW)
907 DO_GEN_ST(32, MO_TEUL)
909 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
911 tcg_gen_movi_i32(cpu_R[15], val);
914 static inline void
915 gen_set_condexec (DisasContext *s)
917 if (s->condexec_mask) {
918 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
919 TCGv_i32 tmp = tcg_temp_new_i32();
920 tcg_gen_movi_i32(tmp, val);
921 store_cpu_field(tmp, condexec_bits);
925 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
927 gen_set_condexec(s);
928 gen_set_pc_im(s, s->pc - offset);
929 gen_exception_internal(excp);
930 s->is_jmp = DISAS_JUMP;
933 static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
935 gen_set_condexec(s);
936 gen_set_pc_im(s, s->pc - offset);
937 gen_exception(excp, syn);
938 s->is_jmp = DISAS_JUMP;
941 /* Force a TB lookup after an instruction that changes the CPU state. */
942 static inline void gen_lookup_tb(DisasContext *s)
944 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
945 s->is_jmp = DISAS_UPDATE;
948 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
949 TCGv_i32 var)
951 int val, rm, shift, shiftop;
952 TCGv_i32 offset;
954 if (!(insn & (1 << 25))) {
955 /* immediate */
956 val = insn & 0xfff;
957 if (!(insn & (1 << 23)))
958 val = -val;
959 if (val != 0)
960 tcg_gen_addi_i32(var, var, val);
961 } else {
962 /* shift/register */
963 rm = (insn) & 0xf;
964 shift = (insn >> 7) & 0x1f;
965 shiftop = (insn >> 5) & 3;
966 offset = load_reg(s, rm);
967 gen_arm_shift_im(offset, shiftop, shift, 0);
968 if (!(insn & (1 << 23)))
969 tcg_gen_sub_i32(var, var, offset);
970 else
971 tcg_gen_add_i32(var, var, offset);
972 tcg_temp_free_i32(offset);
976 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
977 int extra, TCGv_i32 var)
979 int val, rm;
980 TCGv_i32 offset;
982 if (insn & (1 << 22)) {
983 /* immediate */
984 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
985 if (!(insn & (1 << 23)))
986 val = -val;
987 val += extra;
988 if (val != 0)
989 tcg_gen_addi_i32(var, var, val);
990 } else {
991 /* register */
992 if (extra)
993 tcg_gen_addi_i32(var, var, extra);
994 rm = (insn) & 0xf;
995 offset = load_reg(s, rm);
996 if (!(insn & (1 << 23)))
997 tcg_gen_sub_i32(var, var, offset);
998 else
999 tcg_gen_add_i32(var, var, offset);
1000 tcg_temp_free_i32(offset);
1004 static TCGv_ptr get_fpstatus_ptr(int neon)
1006 TCGv_ptr statusptr = tcg_temp_new_ptr();
1007 int offset;
1008 if (neon) {
1009 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1010 } else {
1011 offset = offsetof(CPUARMState, vfp.fp_status);
1013 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1014 return statusptr;
1017 #define VFP_OP2(name) \
1018 static inline void gen_vfp_##name(int dp) \
1020 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1021 if (dp) { \
1022 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1023 } else { \
1024 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1026 tcg_temp_free_ptr(fpst); \
1029 VFP_OP2(add)
1030 VFP_OP2(sub)
1031 VFP_OP2(mul)
1032 VFP_OP2(div)
1034 #undef VFP_OP2
1036 static inline void gen_vfp_F1_mul(int dp)
1038 /* Like gen_vfp_mul() but put result in F1 */
1039 TCGv_ptr fpst = get_fpstatus_ptr(0);
1040 if (dp) {
1041 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1042 } else {
1043 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1045 tcg_temp_free_ptr(fpst);
1048 static inline void gen_vfp_F1_neg(int dp)
1050 /* Like gen_vfp_neg() but put result in F1 */
1051 if (dp) {
1052 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1053 } else {
1054 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1058 static inline void gen_vfp_abs(int dp)
1060 if (dp)
1061 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1062 else
1063 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1066 static inline void gen_vfp_neg(int dp)
1068 if (dp)
1069 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1070 else
1071 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1074 static inline void gen_vfp_sqrt(int dp)
1076 if (dp)
1077 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1078 else
1079 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1082 static inline void gen_vfp_cmp(int dp)
1084 if (dp)
1085 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1086 else
1087 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1090 static inline void gen_vfp_cmpe(int dp)
1092 if (dp)
1093 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1094 else
1095 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1098 static inline void gen_vfp_F1_ld0(int dp)
1100 if (dp)
1101 tcg_gen_movi_i64(cpu_F1d, 0);
1102 else
1103 tcg_gen_movi_i32(cpu_F1s, 0);
1106 #define VFP_GEN_ITOF(name) \
1107 static inline void gen_vfp_##name(int dp, int neon) \
1109 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1110 if (dp) { \
1111 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1112 } else { \
1113 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1115 tcg_temp_free_ptr(statusptr); \
1118 VFP_GEN_ITOF(uito)
1119 VFP_GEN_ITOF(sito)
1120 #undef VFP_GEN_ITOF
1122 #define VFP_GEN_FTOI(name) \
1123 static inline void gen_vfp_##name(int dp, int neon) \
1125 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1126 if (dp) { \
1127 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1128 } else { \
1129 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1131 tcg_temp_free_ptr(statusptr); \
1134 VFP_GEN_FTOI(toui)
1135 VFP_GEN_FTOI(touiz)
1136 VFP_GEN_FTOI(tosi)
1137 VFP_GEN_FTOI(tosiz)
1138 #undef VFP_GEN_FTOI
1140 #define VFP_GEN_FIX(name, round) \
1141 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1143 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1144 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1145 if (dp) { \
1146 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1147 statusptr); \
1148 } else { \
1149 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1150 statusptr); \
1152 tcg_temp_free_i32(tmp_shift); \
1153 tcg_temp_free_ptr(statusptr); \
1155 VFP_GEN_FIX(tosh, _round_to_zero)
1156 VFP_GEN_FIX(tosl, _round_to_zero)
1157 VFP_GEN_FIX(touh, _round_to_zero)
1158 VFP_GEN_FIX(toul, _round_to_zero)
1159 VFP_GEN_FIX(shto, )
1160 VFP_GEN_FIX(slto, )
1161 VFP_GEN_FIX(uhto, )
1162 VFP_GEN_FIX(ulto, )
1163 #undef VFP_GEN_FIX
1165 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1167 if (dp) {
1168 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
1169 } else {
1170 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
1174 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1176 if (dp) {
1177 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
1178 } else {
1179 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
1183 static inline long
1184 vfp_reg_offset (int dp, int reg)
1186 if (dp)
1187 return offsetof(CPUARMState, vfp.regs[reg]);
1188 else if (reg & 1) {
1189 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1190 + offsetof(CPU_DoubleU, l.upper);
1191 } else {
1192 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1193 + offsetof(CPU_DoubleU, l.lower);
1197 /* Return the offset of a 32-bit piece of a NEON register.
1198 zero is the least significant end of the register. */
1199 static inline long
1200 neon_reg_offset (int reg, int n)
1202 int sreg;
1203 sreg = reg * 2 + n;
1204 return vfp_reg_offset(0, sreg);
1207 static TCGv_i32 neon_load_reg(int reg, int pass)
1209 TCGv_i32 tmp = tcg_temp_new_i32();
1210 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1211 return tmp;
1214 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1216 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1217 tcg_temp_free_i32(var);
1220 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1222 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1225 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1227 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1230 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1231 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1232 #define tcg_gen_st_f32 tcg_gen_st_i32
1233 #define tcg_gen_st_f64 tcg_gen_st_i64
1235 static inline void gen_mov_F0_vreg(int dp, int reg)
1237 if (dp)
1238 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1239 else
1240 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1243 static inline void gen_mov_F1_vreg(int dp, int reg)
1245 if (dp)
1246 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1247 else
1248 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1251 static inline void gen_mov_vreg_F0(int dp, int reg)
1253 if (dp)
1254 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1255 else
1256 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1259 #define ARM_CP_RW_BIT (1 << 20)
1261 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1263 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1266 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1268 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1271 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1273 TCGv_i32 var = tcg_temp_new_i32();
1274 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1275 return var;
1278 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1280 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1281 tcg_temp_free_i32(var);
1284 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1286 iwmmxt_store_reg(cpu_M0, rn);
1289 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1291 iwmmxt_load_reg(cpu_M0, rn);
1294 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1296 iwmmxt_load_reg(cpu_V1, rn);
1297 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1300 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1302 iwmmxt_load_reg(cpu_V1, rn);
1303 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1306 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1308 iwmmxt_load_reg(cpu_V1, rn);
1309 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1312 #define IWMMXT_OP(name) \
1313 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1315 iwmmxt_load_reg(cpu_V1, rn); \
1316 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1319 #define IWMMXT_OP_ENV(name) \
1320 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1322 iwmmxt_load_reg(cpu_V1, rn); \
1323 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1326 #define IWMMXT_OP_ENV_SIZE(name) \
1327 IWMMXT_OP_ENV(name##b) \
1328 IWMMXT_OP_ENV(name##w) \
1329 IWMMXT_OP_ENV(name##l)
1331 #define IWMMXT_OP_ENV1(name) \
1332 static inline void gen_op_iwmmxt_##name##_M0(void) \
1334 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1337 IWMMXT_OP(maddsq)
1338 IWMMXT_OP(madduq)
1339 IWMMXT_OP(sadb)
1340 IWMMXT_OP(sadw)
1341 IWMMXT_OP(mulslw)
1342 IWMMXT_OP(mulshw)
1343 IWMMXT_OP(mululw)
1344 IWMMXT_OP(muluhw)
1345 IWMMXT_OP(macsw)
1346 IWMMXT_OP(macuw)
1348 IWMMXT_OP_ENV_SIZE(unpackl)
1349 IWMMXT_OP_ENV_SIZE(unpackh)
1351 IWMMXT_OP_ENV1(unpacklub)
1352 IWMMXT_OP_ENV1(unpackluw)
1353 IWMMXT_OP_ENV1(unpacklul)
1354 IWMMXT_OP_ENV1(unpackhub)
1355 IWMMXT_OP_ENV1(unpackhuw)
1356 IWMMXT_OP_ENV1(unpackhul)
1357 IWMMXT_OP_ENV1(unpacklsb)
1358 IWMMXT_OP_ENV1(unpacklsw)
1359 IWMMXT_OP_ENV1(unpacklsl)
1360 IWMMXT_OP_ENV1(unpackhsb)
1361 IWMMXT_OP_ENV1(unpackhsw)
1362 IWMMXT_OP_ENV1(unpackhsl)
1364 IWMMXT_OP_ENV_SIZE(cmpeq)
1365 IWMMXT_OP_ENV_SIZE(cmpgtu)
1366 IWMMXT_OP_ENV_SIZE(cmpgts)
1368 IWMMXT_OP_ENV_SIZE(mins)
1369 IWMMXT_OP_ENV_SIZE(minu)
1370 IWMMXT_OP_ENV_SIZE(maxs)
1371 IWMMXT_OP_ENV_SIZE(maxu)
1373 IWMMXT_OP_ENV_SIZE(subn)
1374 IWMMXT_OP_ENV_SIZE(addn)
1375 IWMMXT_OP_ENV_SIZE(subu)
1376 IWMMXT_OP_ENV_SIZE(addu)
1377 IWMMXT_OP_ENV_SIZE(subs)
1378 IWMMXT_OP_ENV_SIZE(adds)
1380 IWMMXT_OP_ENV(avgb0)
1381 IWMMXT_OP_ENV(avgb1)
1382 IWMMXT_OP_ENV(avgw0)
1383 IWMMXT_OP_ENV(avgw1)
1385 IWMMXT_OP_ENV(packuw)
1386 IWMMXT_OP_ENV(packul)
1387 IWMMXT_OP_ENV(packuq)
1388 IWMMXT_OP_ENV(packsw)
1389 IWMMXT_OP_ENV(packsl)
1390 IWMMXT_OP_ENV(packsq)
1392 static void gen_op_iwmmxt_set_mup(void)
1394 TCGv_i32 tmp;
1395 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1396 tcg_gen_ori_i32(tmp, tmp, 2);
1397 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1400 static void gen_op_iwmmxt_set_cup(void)
1402 TCGv_i32 tmp;
1403 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1404 tcg_gen_ori_i32(tmp, tmp, 1);
1405 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1408 static void gen_op_iwmmxt_setpsr_nz(void)
1410 TCGv_i32 tmp = tcg_temp_new_i32();
1411 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1412 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1415 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1417 iwmmxt_load_reg(cpu_V1, rn);
1418 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1419 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1422 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1423 TCGv_i32 dest)
1425 int rd;
1426 uint32_t offset;
1427 TCGv_i32 tmp;
1429 rd = (insn >> 16) & 0xf;
1430 tmp = load_reg(s, rd);
1432 offset = (insn & 0xff) << ((insn >> 7) & 2);
1433 if (insn & (1 << 24)) {
1434 /* Pre indexed */
1435 if (insn & (1 << 23))
1436 tcg_gen_addi_i32(tmp, tmp, offset);
1437 else
1438 tcg_gen_addi_i32(tmp, tmp, -offset);
1439 tcg_gen_mov_i32(dest, tmp);
1440 if (insn & (1 << 21))
1441 store_reg(s, rd, tmp);
1442 else
1443 tcg_temp_free_i32(tmp);
1444 } else if (insn & (1 << 21)) {
1445 /* Post indexed */
1446 tcg_gen_mov_i32(dest, tmp);
1447 if (insn & (1 << 23))
1448 tcg_gen_addi_i32(tmp, tmp, offset);
1449 else
1450 tcg_gen_addi_i32(tmp, tmp, -offset);
1451 store_reg(s, rd, tmp);
1452 } else if (!(insn & (1 << 23)))
1453 return 1;
1454 return 0;
1457 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1459 int rd = (insn >> 0) & 0xf;
1460 TCGv_i32 tmp;
1462 if (insn & (1 << 8)) {
1463 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1464 return 1;
1465 } else {
1466 tmp = iwmmxt_load_creg(rd);
1468 } else {
1469 tmp = tcg_temp_new_i32();
1470 iwmmxt_load_reg(cpu_V0, rd);
1471 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1473 tcg_gen_andi_i32(tmp, tmp, mask);
1474 tcg_gen_mov_i32(dest, tmp);
1475 tcg_temp_free_i32(tmp);
1476 return 0;
1479 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1480 (ie. an undefined instruction). */
1481 static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
1483 int rd, wrd;
1484 int rdhi, rdlo, rd0, rd1, i;
1485 TCGv_i32 addr;
1486 TCGv_i32 tmp, tmp2, tmp3;
1488 if ((insn & 0x0e000e00) == 0x0c000000) {
1489 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1490 wrd = insn & 0xf;
1491 rdlo = (insn >> 12) & 0xf;
1492 rdhi = (insn >> 16) & 0xf;
1493 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1494 iwmmxt_load_reg(cpu_V0, wrd);
1495 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1496 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1497 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1498 } else { /* TMCRR */
1499 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1500 iwmmxt_store_reg(cpu_V0, wrd);
1501 gen_op_iwmmxt_set_mup();
1503 return 0;
1506 wrd = (insn >> 12) & 0xf;
1507 addr = tcg_temp_new_i32();
1508 if (gen_iwmmxt_address(s, insn, addr)) {
1509 tcg_temp_free_i32(addr);
1510 return 1;
1512 if (insn & ARM_CP_RW_BIT) {
1513 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1514 tmp = tcg_temp_new_i32();
1515 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1516 iwmmxt_store_creg(wrd, tmp);
1517 } else {
1518 i = 1;
1519 if (insn & (1 << 8)) {
1520 if (insn & (1 << 22)) { /* WLDRD */
1521 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
1522 i = 0;
1523 } else { /* WLDRW wRd */
1524 tmp = tcg_temp_new_i32();
1525 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1527 } else {
1528 tmp = tcg_temp_new_i32();
1529 if (insn & (1 << 22)) { /* WLDRH */
1530 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
1531 } else { /* WLDRB */
1532 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
1535 if (i) {
1536 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1537 tcg_temp_free_i32(tmp);
1539 gen_op_iwmmxt_movq_wRn_M0(wrd);
1541 } else {
1542 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1543 tmp = iwmmxt_load_creg(wrd);
1544 gen_aa32_st32(tmp, addr, get_mem_index(s));
1545 } else {
1546 gen_op_iwmmxt_movq_M0_wRn(wrd);
1547 tmp = tcg_temp_new_i32();
1548 if (insn & (1 << 8)) {
1549 if (insn & (1 << 22)) { /* WSTRD */
1550 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
1551 } else { /* WSTRW wRd */
1552 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1553 gen_aa32_st32(tmp, addr, get_mem_index(s));
1555 } else {
1556 if (insn & (1 << 22)) { /* WSTRH */
1557 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1558 gen_aa32_st16(tmp, addr, get_mem_index(s));
1559 } else { /* WSTRB */
1560 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1561 gen_aa32_st8(tmp, addr, get_mem_index(s));
1565 tcg_temp_free_i32(tmp);
1567 tcg_temp_free_i32(addr);
1568 return 0;
1571 if ((insn & 0x0f000000) != 0x0e000000)
1572 return 1;
1574 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1575 case 0x000: /* WOR */
1576 wrd = (insn >> 12) & 0xf;
1577 rd0 = (insn >> 0) & 0xf;
1578 rd1 = (insn >> 16) & 0xf;
1579 gen_op_iwmmxt_movq_M0_wRn(rd0);
1580 gen_op_iwmmxt_orq_M0_wRn(rd1);
1581 gen_op_iwmmxt_setpsr_nz();
1582 gen_op_iwmmxt_movq_wRn_M0(wrd);
1583 gen_op_iwmmxt_set_mup();
1584 gen_op_iwmmxt_set_cup();
1585 break;
1586 case 0x011: /* TMCR */
1587 if (insn & 0xf)
1588 return 1;
1589 rd = (insn >> 12) & 0xf;
1590 wrd = (insn >> 16) & 0xf;
1591 switch (wrd) {
1592 case ARM_IWMMXT_wCID:
1593 case ARM_IWMMXT_wCASF:
1594 break;
1595 case ARM_IWMMXT_wCon:
1596 gen_op_iwmmxt_set_cup();
1597 /* Fall through. */
1598 case ARM_IWMMXT_wCSSF:
1599 tmp = iwmmxt_load_creg(wrd);
1600 tmp2 = load_reg(s, rd);
1601 tcg_gen_andc_i32(tmp, tmp, tmp2);
1602 tcg_temp_free_i32(tmp2);
1603 iwmmxt_store_creg(wrd, tmp);
1604 break;
1605 case ARM_IWMMXT_wCGR0:
1606 case ARM_IWMMXT_wCGR1:
1607 case ARM_IWMMXT_wCGR2:
1608 case ARM_IWMMXT_wCGR3:
1609 gen_op_iwmmxt_set_cup();
1610 tmp = load_reg(s, rd);
1611 iwmmxt_store_creg(wrd, tmp);
1612 break;
1613 default:
1614 return 1;
1616 break;
1617 case 0x100: /* WXOR */
1618 wrd = (insn >> 12) & 0xf;
1619 rd0 = (insn >> 0) & 0xf;
1620 rd1 = (insn >> 16) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0);
1622 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1623 gen_op_iwmmxt_setpsr_nz();
1624 gen_op_iwmmxt_movq_wRn_M0(wrd);
1625 gen_op_iwmmxt_set_mup();
1626 gen_op_iwmmxt_set_cup();
1627 break;
1628 case 0x111: /* TMRC */
1629 if (insn & 0xf)
1630 return 1;
1631 rd = (insn >> 12) & 0xf;
1632 wrd = (insn >> 16) & 0xf;
1633 tmp = iwmmxt_load_creg(wrd);
1634 store_reg(s, rd, tmp);
1635 break;
1636 case 0x300: /* WANDN */
1637 wrd = (insn >> 12) & 0xf;
1638 rd0 = (insn >> 0) & 0xf;
1639 rd1 = (insn >> 16) & 0xf;
1640 gen_op_iwmmxt_movq_M0_wRn(rd0);
1641 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1642 gen_op_iwmmxt_andq_M0_wRn(rd1);
1643 gen_op_iwmmxt_setpsr_nz();
1644 gen_op_iwmmxt_movq_wRn_M0(wrd);
1645 gen_op_iwmmxt_set_mup();
1646 gen_op_iwmmxt_set_cup();
1647 break;
1648 case 0x200: /* WAND */
1649 wrd = (insn >> 12) & 0xf;
1650 rd0 = (insn >> 0) & 0xf;
1651 rd1 = (insn >> 16) & 0xf;
1652 gen_op_iwmmxt_movq_M0_wRn(rd0);
1653 gen_op_iwmmxt_andq_M0_wRn(rd1);
1654 gen_op_iwmmxt_setpsr_nz();
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1658 break;
1659 case 0x810: case 0xa10: /* WMADD */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 0) & 0xf;
1662 rd1 = (insn >> 16) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
1664 if (insn & (1 << 21))
1665 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1666 else
1667 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 break;
1671 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1672 wrd = (insn >> 12) & 0xf;
1673 rd0 = (insn >> 16) & 0xf;
1674 rd1 = (insn >> 0) & 0xf;
1675 gen_op_iwmmxt_movq_M0_wRn(rd0);
1676 switch ((insn >> 22) & 3) {
1677 case 0:
1678 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1679 break;
1680 case 1:
1681 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1682 break;
1683 case 2:
1684 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1685 break;
1686 case 3:
1687 return 1;
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1692 break;
1693 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1694 wrd = (insn >> 12) & 0xf;
1695 rd0 = (insn >> 16) & 0xf;
1696 rd1 = (insn >> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0);
1698 switch ((insn >> 22) & 3) {
1699 case 0:
1700 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1701 break;
1702 case 1:
1703 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1704 break;
1705 case 2:
1706 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1707 break;
1708 case 3:
1709 return 1;
1711 gen_op_iwmmxt_movq_wRn_M0(wrd);
1712 gen_op_iwmmxt_set_mup();
1713 gen_op_iwmmxt_set_cup();
1714 break;
1715 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1716 wrd = (insn >> 12) & 0xf;
1717 rd0 = (insn >> 16) & 0xf;
1718 rd1 = (insn >> 0) & 0xf;
1719 gen_op_iwmmxt_movq_M0_wRn(rd0);
1720 if (insn & (1 << 22))
1721 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1722 else
1723 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1724 if (!(insn & (1 << 20)))
1725 gen_op_iwmmxt_addl_M0_wRn(wrd);
1726 gen_op_iwmmxt_movq_wRn_M0(wrd);
1727 gen_op_iwmmxt_set_mup();
1728 break;
1729 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1730 wrd = (insn >> 12) & 0xf;
1731 rd0 = (insn >> 16) & 0xf;
1732 rd1 = (insn >> 0) & 0xf;
1733 gen_op_iwmmxt_movq_M0_wRn(rd0);
1734 if (insn & (1 << 21)) {
1735 if (insn & (1 << 20))
1736 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1737 else
1738 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1739 } else {
1740 if (insn & (1 << 20))
1741 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1742 else
1743 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1745 gen_op_iwmmxt_movq_wRn_M0(wrd);
1746 gen_op_iwmmxt_set_mup();
1747 break;
1748 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1749 wrd = (insn >> 12) & 0xf;
1750 rd0 = (insn >> 16) & 0xf;
1751 rd1 = (insn >> 0) & 0xf;
1752 gen_op_iwmmxt_movq_M0_wRn(rd0);
1753 if (insn & (1 << 21))
1754 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1755 else
1756 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1757 if (!(insn & (1 << 20))) {
1758 iwmmxt_load_reg(cpu_V1, wrd);
1759 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1761 gen_op_iwmmxt_movq_wRn_M0(wrd);
1762 gen_op_iwmmxt_set_mup();
1763 break;
1764 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1765 wrd = (insn >> 12) & 0xf;
1766 rd0 = (insn >> 16) & 0xf;
1767 rd1 = (insn >> 0) & 0xf;
1768 gen_op_iwmmxt_movq_M0_wRn(rd0);
1769 switch ((insn >> 22) & 3) {
1770 case 0:
1771 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1772 break;
1773 case 1:
1774 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1775 break;
1776 case 2:
1777 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1778 break;
1779 case 3:
1780 return 1;
1782 gen_op_iwmmxt_movq_wRn_M0(wrd);
1783 gen_op_iwmmxt_set_mup();
1784 gen_op_iwmmxt_set_cup();
1785 break;
1786 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1787 wrd = (insn >> 12) & 0xf;
1788 rd0 = (insn >> 16) & 0xf;
1789 rd1 = (insn >> 0) & 0xf;
1790 gen_op_iwmmxt_movq_M0_wRn(rd0);
1791 if (insn & (1 << 22)) {
1792 if (insn & (1 << 20))
1793 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1794 else
1795 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1796 } else {
1797 if (insn & (1 << 20))
1798 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1799 else
1800 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1802 gen_op_iwmmxt_movq_wRn_M0(wrd);
1803 gen_op_iwmmxt_set_mup();
1804 gen_op_iwmmxt_set_cup();
1805 break;
1806 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1807 wrd = (insn >> 12) & 0xf;
1808 rd0 = (insn >> 16) & 0xf;
1809 rd1 = (insn >> 0) & 0xf;
1810 gen_op_iwmmxt_movq_M0_wRn(rd0);
1811 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1812 tcg_gen_andi_i32(tmp, tmp, 7);
1813 iwmmxt_load_reg(cpu_V1, rd1);
1814 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1815 tcg_temp_free_i32(tmp);
1816 gen_op_iwmmxt_movq_wRn_M0(wrd);
1817 gen_op_iwmmxt_set_mup();
1818 break;
1819 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1820 if (((insn >> 6) & 3) == 3)
1821 return 1;
1822 rd = (insn >> 12) & 0xf;
1823 wrd = (insn >> 16) & 0xf;
1824 tmp = load_reg(s, rd);
1825 gen_op_iwmmxt_movq_M0_wRn(wrd);
1826 switch ((insn >> 6) & 3) {
1827 case 0:
1828 tmp2 = tcg_const_i32(0xff);
1829 tmp3 = tcg_const_i32((insn & 7) << 3);
1830 break;
1831 case 1:
1832 tmp2 = tcg_const_i32(0xffff);
1833 tmp3 = tcg_const_i32((insn & 3) << 4);
1834 break;
1835 case 2:
1836 tmp2 = tcg_const_i32(0xffffffff);
1837 tmp3 = tcg_const_i32((insn & 1) << 5);
1838 break;
1839 default:
1840 TCGV_UNUSED_I32(tmp2);
1841 TCGV_UNUSED_I32(tmp3);
1843 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1844 tcg_temp_free_i32(tmp3);
1845 tcg_temp_free_i32(tmp2);
1846 tcg_temp_free_i32(tmp);
1847 gen_op_iwmmxt_movq_wRn_M0(wrd);
1848 gen_op_iwmmxt_set_mup();
1849 break;
1850 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1851 rd = (insn >> 12) & 0xf;
1852 wrd = (insn >> 16) & 0xf;
1853 if (rd == 15 || ((insn >> 22) & 3) == 3)
1854 return 1;
1855 gen_op_iwmmxt_movq_M0_wRn(wrd);
1856 tmp = tcg_temp_new_i32();
1857 switch ((insn >> 22) & 3) {
1858 case 0:
1859 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1860 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1861 if (insn & 8) {
1862 tcg_gen_ext8s_i32(tmp, tmp);
1863 } else {
1864 tcg_gen_andi_i32(tmp, tmp, 0xff);
1866 break;
1867 case 1:
1868 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1869 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1870 if (insn & 8) {
1871 tcg_gen_ext16s_i32(tmp, tmp);
1872 } else {
1873 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1875 break;
1876 case 2:
1877 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1878 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1879 break;
1881 store_reg(s, rd, tmp);
1882 break;
1883 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1884 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1885 return 1;
1886 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1887 switch ((insn >> 22) & 3) {
1888 case 0:
1889 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1890 break;
1891 case 1:
1892 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1893 break;
1894 case 2:
1895 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1896 break;
1898 tcg_gen_shli_i32(tmp, tmp, 28);
1899 gen_set_nzcv(tmp);
1900 tcg_temp_free_i32(tmp);
1901 break;
1902 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1903 if (((insn >> 6) & 3) == 3)
1904 return 1;
1905 rd = (insn >> 12) & 0xf;
1906 wrd = (insn >> 16) & 0xf;
1907 tmp = load_reg(s, rd);
1908 switch ((insn >> 6) & 3) {
1909 case 0:
1910 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1911 break;
1912 case 1:
1913 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1914 break;
1915 case 2:
1916 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1917 break;
1919 tcg_temp_free_i32(tmp);
1920 gen_op_iwmmxt_movq_wRn_M0(wrd);
1921 gen_op_iwmmxt_set_mup();
1922 break;
1923 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1924 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1925 return 1;
1926 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1927 tmp2 = tcg_temp_new_i32();
1928 tcg_gen_mov_i32(tmp2, tmp);
1929 switch ((insn >> 22) & 3) {
1930 case 0:
1931 for (i = 0; i < 7; i ++) {
1932 tcg_gen_shli_i32(tmp2, tmp2, 4);
1933 tcg_gen_and_i32(tmp, tmp, tmp2);
1935 break;
1936 case 1:
1937 for (i = 0; i < 3; i ++) {
1938 tcg_gen_shli_i32(tmp2, tmp2, 8);
1939 tcg_gen_and_i32(tmp, tmp, tmp2);
1941 break;
1942 case 2:
1943 tcg_gen_shli_i32(tmp2, tmp2, 16);
1944 tcg_gen_and_i32(tmp, tmp, tmp2);
1945 break;
1947 gen_set_nzcv(tmp);
1948 tcg_temp_free_i32(tmp2);
1949 tcg_temp_free_i32(tmp);
1950 break;
1951 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1952 wrd = (insn >> 12) & 0xf;
1953 rd0 = (insn >> 16) & 0xf;
1954 gen_op_iwmmxt_movq_M0_wRn(rd0);
1955 switch ((insn >> 22) & 3) {
1956 case 0:
1957 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1958 break;
1959 case 1:
1960 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1961 break;
1962 case 2:
1963 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1964 break;
1965 case 3:
1966 return 1;
1968 gen_op_iwmmxt_movq_wRn_M0(wrd);
1969 gen_op_iwmmxt_set_mup();
1970 break;
1971 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1972 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1973 return 1;
1974 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1975 tmp2 = tcg_temp_new_i32();
1976 tcg_gen_mov_i32(tmp2, tmp);
1977 switch ((insn >> 22) & 3) {
1978 case 0:
1979 for (i = 0; i < 7; i ++) {
1980 tcg_gen_shli_i32(tmp2, tmp2, 4);
1981 tcg_gen_or_i32(tmp, tmp, tmp2);
1983 break;
1984 case 1:
1985 for (i = 0; i < 3; i ++) {
1986 tcg_gen_shli_i32(tmp2, tmp2, 8);
1987 tcg_gen_or_i32(tmp, tmp, tmp2);
1989 break;
1990 case 2:
1991 tcg_gen_shli_i32(tmp2, tmp2, 16);
1992 tcg_gen_or_i32(tmp, tmp, tmp2);
1993 break;
1995 gen_set_nzcv(tmp);
1996 tcg_temp_free_i32(tmp2);
1997 tcg_temp_free_i32(tmp);
1998 break;
1999 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2000 rd = (insn >> 12) & 0xf;
2001 rd0 = (insn >> 16) & 0xf;
2002 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2003 return 1;
2004 gen_op_iwmmxt_movq_M0_wRn(rd0);
2005 tmp = tcg_temp_new_i32();
2006 switch ((insn >> 22) & 3) {
2007 case 0:
2008 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2009 break;
2010 case 1:
2011 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2012 break;
2013 case 2:
2014 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2015 break;
2017 store_reg(s, rd, tmp);
2018 break;
2019 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2020 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2021 wrd = (insn >> 12) & 0xf;
2022 rd0 = (insn >> 16) & 0xf;
2023 rd1 = (insn >> 0) & 0xf;
2024 gen_op_iwmmxt_movq_M0_wRn(rd0);
2025 switch ((insn >> 22) & 3) {
2026 case 0:
2027 if (insn & (1 << 21))
2028 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2029 else
2030 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2031 break;
2032 case 1:
2033 if (insn & (1 << 21))
2034 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2035 else
2036 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2037 break;
2038 case 2:
2039 if (insn & (1 << 21))
2040 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2041 else
2042 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2043 break;
2044 case 3:
2045 return 1;
2047 gen_op_iwmmxt_movq_wRn_M0(wrd);
2048 gen_op_iwmmxt_set_mup();
2049 gen_op_iwmmxt_set_cup();
2050 break;
2051 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2052 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2053 wrd = (insn >> 12) & 0xf;
2054 rd0 = (insn >> 16) & 0xf;
2055 gen_op_iwmmxt_movq_M0_wRn(rd0);
2056 switch ((insn >> 22) & 3) {
2057 case 0:
2058 if (insn & (1 << 21))
2059 gen_op_iwmmxt_unpacklsb_M0();
2060 else
2061 gen_op_iwmmxt_unpacklub_M0();
2062 break;
2063 case 1:
2064 if (insn & (1 << 21))
2065 gen_op_iwmmxt_unpacklsw_M0();
2066 else
2067 gen_op_iwmmxt_unpackluw_M0();
2068 break;
2069 case 2:
2070 if (insn & (1 << 21))
2071 gen_op_iwmmxt_unpacklsl_M0();
2072 else
2073 gen_op_iwmmxt_unpacklul_M0();
2074 break;
2075 case 3:
2076 return 1;
2078 gen_op_iwmmxt_movq_wRn_M0(wrd);
2079 gen_op_iwmmxt_set_mup();
2080 gen_op_iwmmxt_set_cup();
2081 break;
2082 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2083 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2084 wrd = (insn >> 12) & 0xf;
2085 rd0 = (insn >> 16) & 0xf;
2086 gen_op_iwmmxt_movq_M0_wRn(rd0);
2087 switch ((insn >> 22) & 3) {
2088 case 0:
2089 if (insn & (1 << 21))
2090 gen_op_iwmmxt_unpackhsb_M0();
2091 else
2092 gen_op_iwmmxt_unpackhub_M0();
2093 break;
2094 case 1:
2095 if (insn & (1 << 21))
2096 gen_op_iwmmxt_unpackhsw_M0();
2097 else
2098 gen_op_iwmmxt_unpackhuw_M0();
2099 break;
2100 case 2:
2101 if (insn & (1 << 21))
2102 gen_op_iwmmxt_unpackhsl_M0();
2103 else
2104 gen_op_iwmmxt_unpackhul_M0();
2105 break;
2106 case 3:
2107 return 1;
2109 gen_op_iwmmxt_movq_wRn_M0(wrd);
2110 gen_op_iwmmxt_set_mup();
2111 gen_op_iwmmxt_set_cup();
2112 break;
2113 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2114 case 0x214: case 0x614: case 0xa14: case 0xe14:
2115 if (((insn >> 22) & 3) == 0)
2116 return 1;
2117 wrd = (insn >> 12) & 0xf;
2118 rd0 = (insn >> 16) & 0xf;
2119 gen_op_iwmmxt_movq_M0_wRn(rd0);
2120 tmp = tcg_temp_new_i32();
2121 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2122 tcg_temp_free_i32(tmp);
2123 return 1;
2125 switch ((insn >> 22) & 3) {
2126 case 1:
2127 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2128 break;
2129 case 2:
2130 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2131 break;
2132 case 3:
2133 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2134 break;
2136 tcg_temp_free_i32(tmp);
2137 gen_op_iwmmxt_movq_wRn_M0(wrd);
2138 gen_op_iwmmxt_set_mup();
2139 gen_op_iwmmxt_set_cup();
2140 break;
2141 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2142 case 0x014: case 0x414: case 0x814: case 0xc14:
2143 if (((insn >> 22) & 3) == 0)
2144 return 1;
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 tmp = tcg_temp_new_i32();
2149 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2150 tcg_temp_free_i32(tmp);
2151 return 1;
2153 switch ((insn >> 22) & 3) {
2154 case 1:
2155 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2156 break;
2157 case 2:
2158 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2159 break;
2160 case 3:
2161 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2162 break;
2164 tcg_temp_free_i32(tmp);
2165 gen_op_iwmmxt_movq_wRn_M0(wrd);
2166 gen_op_iwmmxt_set_mup();
2167 gen_op_iwmmxt_set_cup();
2168 break;
2169 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2170 case 0x114: case 0x514: case 0x914: case 0xd14:
2171 if (((insn >> 22) & 3) == 0)
2172 return 1;
2173 wrd = (insn >> 12) & 0xf;
2174 rd0 = (insn >> 16) & 0xf;
2175 gen_op_iwmmxt_movq_M0_wRn(rd0);
2176 tmp = tcg_temp_new_i32();
2177 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2178 tcg_temp_free_i32(tmp);
2179 return 1;
2181 switch ((insn >> 22) & 3) {
2182 case 1:
2183 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2184 break;
2185 case 2:
2186 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2187 break;
2188 case 3:
2189 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2190 break;
2192 tcg_temp_free_i32(tmp);
2193 gen_op_iwmmxt_movq_wRn_M0(wrd);
2194 gen_op_iwmmxt_set_mup();
2195 gen_op_iwmmxt_set_cup();
2196 break;
2197 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2198 case 0x314: case 0x714: case 0xb14: case 0xf14:
2199 if (((insn >> 22) & 3) == 0)
2200 return 1;
2201 wrd = (insn >> 12) & 0xf;
2202 rd0 = (insn >> 16) & 0xf;
2203 gen_op_iwmmxt_movq_M0_wRn(rd0);
2204 tmp = tcg_temp_new_i32();
2205 switch ((insn >> 22) & 3) {
2206 case 1:
2207 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2208 tcg_temp_free_i32(tmp);
2209 return 1;
2211 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2212 break;
2213 case 2:
2214 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2215 tcg_temp_free_i32(tmp);
2216 return 1;
2218 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2219 break;
2220 case 3:
2221 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2222 tcg_temp_free_i32(tmp);
2223 return 1;
2225 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2226 break;
2228 tcg_temp_free_i32(tmp);
2229 gen_op_iwmmxt_movq_wRn_M0(wrd);
2230 gen_op_iwmmxt_set_mup();
2231 gen_op_iwmmxt_set_cup();
2232 break;
2233 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2234 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2235 wrd = (insn >> 12) & 0xf;
2236 rd0 = (insn >> 16) & 0xf;
2237 rd1 = (insn >> 0) & 0xf;
2238 gen_op_iwmmxt_movq_M0_wRn(rd0);
2239 switch ((insn >> 22) & 3) {
2240 case 0:
2241 if (insn & (1 << 21))
2242 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2243 else
2244 gen_op_iwmmxt_minub_M0_wRn(rd1);
2245 break;
2246 case 1:
2247 if (insn & (1 << 21))
2248 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2249 else
2250 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2251 break;
2252 case 2:
2253 if (insn & (1 << 21))
2254 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2255 else
2256 gen_op_iwmmxt_minul_M0_wRn(rd1);
2257 break;
2258 case 3:
2259 return 1;
2261 gen_op_iwmmxt_movq_wRn_M0(wrd);
2262 gen_op_iwmmxt_set_mup();
2263 break;
2264 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2265 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2266 wrd = (insn >> 12) & 0xf;
2267 rd0 = (insn >> 16) & 0xf;
2268 rd1 = (insn >> 0) & 0xf;
2269 gen_op_iwmmxt_movq_M0_wRn(rd0);
2270 switch ((insn >> 22) & 3) {
2271 case 0:
2272 if (insn & (1 << 21))
2273 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2274 else
2275 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2276 break;
2277 case 1:
2278 if (insn & (1 << 21))
2279 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2280 else
2281 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2282 break;
2283 case 2:
2284 if (insn & (1 << 21))
2285 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2286 else
2287 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2288 break;
2289 case 3:
2290 return 1;
2292 gen_op_iwmmxt_movq_wRn_M0(wrd);
2293 gen_op_iwmmxt_set_mup();
2294 break;
2295 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2296 case 0x402: case 0x502: case 0x602: case 0x702:
2297 wrd = (insn >> 12) & 0xf;
2298 rd0 = (insn >> 16) & 0xf;
2299 rd1 = (insn >> 0) & 0xf;
2300 gen_op_iwmmxt_movq_M0_wRn(rd0);
2301 tmp = tcg_const_i32((insn >> 20) & 3);
2302 iwmmxt_load_reg(cpu_V1, rd1);
2303 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2304 tcg_temp_free_i32(tmp);
2305 gen_op_iwmmxt_movq_wRn_M0(wrd);
2306 gen_op_iwmmxt_set_mup();
2307 break;
2308 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2309 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2310 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2311 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2312 wrd = (insn >> 12) & 0xf;
2313 rd0 = (insn >> 16) & 0xf;
2314 rd1 = (insn >> 0) & 0xf;
2315 gen_op_iwmmxt_movq_M0_wRn(rd0);
2316 switch ((insn >> 20) & 0xf) {
2317 case 0x0:
2318 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2319 break;
2320 case 0x1:
2321 gen_op_iwmmxt_subub_M0_wRn(rd1);
2322 break;
2323 case 0x3:
2324 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2325 break;
2326 case 0x4:
2327 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2328 break;
2329 case 0x5:
2330 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2331 break;
2332 case 0x7:
2333 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2334 break;
2335 case 0x8:
2336 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2337 break;
2338 case 0x9:
2339 gen_op_iwmmxt_subul_M0_wRn(rd1);
2340 break;
2341 case 0xb:
2342 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2343 break;
2344 default:
2345 return 1;
2347 gen_op_iwmmxt_movq_wRn_M0(wrd);
2348 gen_op_iwmmxt_set_mup();
2349 gen_op_iwmmxt_set_cup();
2350 break;
2351 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2352 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2353 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2354 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2355 wrd = (insn >> 12) & 0xf;
2356 rd0 = (insn >> 16) & 0xf;
2357 gen_op_iwmmxt_movq_M0_wRn(rd0);
2358 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2359 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2360 tcg_temp_free_i32(tmp);
2361 gen_op_iwmmxt_movq_wRn_M0(wrd);
2362 gen_op_iwmmxt_set_mup();
2363 gen_op_iwmmxt_set_cup();
2364 break;
2365 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2366 case 0x418: case 0x518: case 0x618: case 0x718:
2367 case 0x818: case 0x918: case 0xa18: case 0xb18:
2368 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2369 wrd = (insn >> 12) & 0xf;
2370 rd0 = (insn >> 16) & 0xf;
2371 rd1 = (insn >> 0) & 0xf;
2372 gen_op_iwmmxt_movq_M0_wRn(rd0);
2373 switch ((insn >> 20) & 0xf) {
2374 case 0x0:
2375 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2376 break;
2377 case 0x1:
2378 gen_op_iwmmxt_addub_M0_wRn(rd1);
2379 break;
2380 case 0x3:
2381 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2382 break;
2383 case 0x4:
2384 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2385 break;
2386 case 0x5:
2387 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2388 break;
2389 case 0x7:
2390 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2391 break;
2392 case 0x8:
2393 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2394 break;
2395 case 0x9:
2396 gen_op_iwmmxt_addul_M0_wRn(rd1);
2397 break;
2398 case 0xb:
2399 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2400 break;
2401 default:
2402 return 1;
2404 gen_op_iwmmxt_movq_wRn_M0(wrd);
2405 gen_op_iwmmxt_set_mup();
2406 gen_op_iwmmxt_set_cup();
2407 break;
2408 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2409 case 0x408: case 0x508: case 0x608: case 0x708:
2410 case 0x808: case 0x908: case 0xa08: case 0xb08:
2411 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2412 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2413 return 1;
2414 wrd = (insn >> 12) & 0xf;
2415 rd0 = (insn >> 16) & 0xf;
2416 rd1 = (insn >> 0) & 0xf;
2417 gen_op_iwmmxt_movq_M0_wRn(rd0);
2418 switch ((insn >> 22) & 3) {
2419 case 1:
2420 if (insn & (1 << 21))
2421 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2422 else
2423 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2424 break;
2425 case 2:
2426 if (insn & (1 << 21))
2427 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2428 else
2429 gen_op_iwmmxt_packul_M0_wRn(rd1);
2430 break;
2431 case 3:
2432 if (insn & (1 << 21))
2433 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2434 else
2435 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2436 break;
2438 gen_op_iwmmxt_movq_wRn_M0(wrd);
2439 gen_op_iwmmxt_set_mup();
2440 gen_op_iwmmxt_set_cup();
2441 break;
2442 case 0x201: case 0x203: case 0x205: case 0x207:
2443 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2444 case 0x211: case 0x213: case 0x215: case 0x217:
2445 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2446 wrd = (insn >> 5) & 0xf;
2447 rd0 = (insn >> 12) & 0xf;
2448 rd1 = (insn >> 0) & 0xf;
2449 if (rd0 == 0xf || rd1 == 0xf)
2450 return 1;
2451 gen_op_iwmmxt_movq_M0_wRn(wrd);
2452 tmp = load_reg(s, rd0);
2453 tmp2 = load_reg(s, rd1);
2454 switch ((insn >> 16) & 0xf) {
2455 case 0x0: /* TMIA */
2456 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2457 break;
2458 case 0x8: /* TMIAPH */
2459 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2460 break;
2461 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2462 if (insn & (1 << 16))
2463 tcg_gen_shri_i32(tmp, tmp, 16);
2464 if (insn & (1 << 17))
2465 tcg_gen_shri_i32(tmp2, tmp2, 16);
2466 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2467 break;
2468 default:
2469 tcg_temp_free_i32(tmp2);
2470 tcg_temp_free_i32(tmp);
2471 return 1;
2473 tcg_temp_free_i32(tmp2);
2474 tcg_temp_free_i32(tmp);
2475 gen_op_iwmmxt_movq_wRn_M0(wrd);
2476 gen_op_iwmmxt_set_mup();
2477 break;
2478 default:
2479 return 1;
2482 return 0;
2485 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2486 (ie. an undefined instruction). */
2487 static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2489 int acc, rd0, rd1, rdhi, rdlo;
2490 TCGv_i32 tmp, tmp2;
2492 if ((insn & 0x0ff00f10) == 0x0e200010) {
2493 /* Multiply with Internal Accumulate Format */
2494 rd0 = (insn >> 12) & 0xf;
2495 rd1 = insn & 0xf;
2496 acc = (insn >> 5) & 7;
2498 if (acc != 0)
2499 return 1;
2501 tmp = load_reg(s, rd0);
2502 tmp2 = load_reg(s, rd1);
2503 switch ((insn >> 16) & 0xf) {
2504 case 0x0: /* MIA */
2505 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2506 break;
2507 case 0x8: /* MIAPH */
2508 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2509 break;
2510 case 0xc: /* MIABB */
2511 case 0xd: /* MIABT */
2512 case 0xe: /* MIATB */
2513 case 0xf: /* MIATT */
2514 if (insn & (1 << 16))
2515 tcg_gen_shri_i32(tmp, tmp, 16);
2516 if (insn & (1 << 17))
2517 tcg_gen_shri_i32(tmp2, tmp2, 16);
2518 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2519 break;
2520 default:
2521 return 1;
2523 tcg_temp_free_i32(tmp2);
2524 tcg_temp_free_i32(tmp);
2526 gen_op_iwmmxt_movq_wRn_M0(acc);
2527 return 0;
2530 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2531 /* Internal Accumulator Access Format */
2532 rdhi = (insn >> 16) & 0xf;
2533 rdlo = (insn >> 12) & 0xf;
2534 acc = insn & 7;
2536 if (acc != 0)
2537 return 1;
2539 if (insn & ARM_CP_RW_BIT) { /* MRA */
2540 iwmmxt_load_reg(cpu_V0, acc);
2541 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2542 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2543 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2544 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2545 } else { /* MAR */
2546 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2547 iwmmxt_store_reg(cpu_V0, acc);
2549 return 0;
2552 return 1;
2555 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2556 #define VFP_SREG(insn, bigbit, smallbit) \
2557 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2558 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2559 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2560 reg = (((insn) >> (bigbit)) & 0x0f) \
2561 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2562 } else { \
2563 if (insn & (1 << (smallbit))) \
2564 return 1; \
2565 reg = ((insn) >> (bigbit)) & 0x0f; \
2566 }} while (0)
2568 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2569 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2570 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2571 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2572 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2573 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2575 /* Move between integer and VFP cores. */
2576 static TCGv_i32 gen_vfp_mrs(void)
2578 TCGv_i32 tmp = tcg_temp_new_i32();
2579 tcg_gen_mov_i32(tmp, cpu_F0s);
2580 return tmp;
2583 static void gen_vfp_msr(TCGv_i32 tmp)
2585 tcg_gen_mov_i32(cpu_F0s, tmp);
2586 tcg_temp_free_i32(tmp);
2589 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2591 TCGv_i32 tmp = tcg_temp_new_i32();
2592 if (shift)
2593 tcg_gen_shri_i32(var, var, shift);
2594 tcg_gen_ext8u_i32(var, var);
2595 tcg_gen_shli_i32(tmp, var, 8);
2596 tcg_gen_or_i32(var, var, tmp);
2597 tcg_gen_shli_i32(tmp, var, 16);
2598 tcg_gen_or_i32(var, var, tmp);
2599 tcg_temp_free_i32(tmp);
2602 static void gen_neon_dup_low16(TCGv_i32 var)
2604 TCGv_i32 tmp = tcg_temp_new_i32();
2605 tcg_gen_ext16u_i32(var, var);
2606 tcg_gen_shli_i32(tmp, var, 16);
2607 tcg_gen_or_i32(var, var, tmp);
2608 tcg_temp_free_i32(tmp);
2611 static void gen_neon_dup_high16(TCGv_i32 var)
2613 TCGv_i32 tmp = tcg_temp_new_i32();
2614 tcg_gen_andi_i32(var, var, 0xffff0000);
2615 tcg_gen_shri_i32(tmp, var, 16);
2616 tcg_gen_or_i32(var, var, tmp);
2617 tcg_temp_free_i32(tmp);
2620 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2622 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2623 TCGv_i32 tmp = tcg_temp_new_i32();
2624 switch (size) {
2625 case 0:
2626 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2627 gen_neon_dup_u8(tmp, 0);
2628 break;
2629 case 1:
2630 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2631 gen_neon_dup_low16(tmp);
2632 break;
2633 case 2:
2634 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2635 break;
2636 default: /* Avoid compiler warnings. */
2637 abort();
2639 return tmp;
2642 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2643 uint32_t dp)
2645 uint32_t cc = extract32(insn, 20, 2);
2647 if (dp) {
2648 TCGv_i64 frn, frm, dest;
2649 TCGv_i64 tmp, zero, zf, nf, vf;
2651 zero = tcg_const_i64(0);
2653 frn = tcg_temp_new_i64();
2654 frm = tcg_temp_new_i64();
2655 dest = tcg_temp_new_i64();
2657 zf = tcg_temp_new_i64();
2658 nf = tcg_temp_new_i64();
2659 vf = tcg_temp_new_i64();
2661 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2662 tcg_gen_ext_i32_i64(nf, cpu_NF);
2663 tcg_gen_ext_i32_i64(vf, cpu_VF);
2665 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2666 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2667 switch (cc) {
2668 case 0: /* eq: Z */
2669 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2670 frn, frm);
2671 break;
2672 case 1: /* vs: V */
2673 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2674 frn, frm);
2675 break;
2676 case 2: /* ge: N == V -> N ^ V == 0 */
2677 tmp = tcg_temp_new_i64();
2678 tcg_gen_xor_i64(tmp, vf, nf);
2679 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2680 frn, frm);
2681 tcg_temp_free_i64(tmp);
2682 break;
2683 case 3: /* gt: !Z && N == V */
2684 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2685 frn, frm);
2686 tmp = tcg_temp_new_i64();
2687 tcg_gen_xor_i64(tmp, vf, nf);
2688 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2689 dest, frm);
2690 tcg_temp_free_i64(tmp);
2691 break;
2693 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2694 tcg_temp_free_i64(frn);
2695 tcg_temp_free_i64(frm);
2696 tcg_temp_free_i64(dest);
2698 tcg_temp_free_i64(zf);
2699 tcg_temp_free_i64(nf);
2700 tcg_temp_free_i64(vf);
2702 tcg_temp_free_i64(zero);
2703 } else {
2704 TCGv_i32 frn, frm, dest;
2705 TCGv_i32 tmp, zero;
2707 zero = tcg_const_i32(0);
2709 frn = tcg_temp_new_i32();
2710 frm = tcg_temp_new_i32();
2711 dest = tcg_temp_new_i32();
2712 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2713 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2714 switch (cc) {
2715 case 0: /* eq: Z */
2716 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2717 frn, frm);
2718 break;
2719 case 1: /* vs: V */
2720 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2721 frn, frm);
2722 break;
2723 case 2: /* ge: N == V -> N ^ V == 0 */
2724 tmp = tcg_temp_new_i32();
2725 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2726 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2727 frn, frm);
2728 tcg_temp_free_i32(tmp);
2729 break;
2730 case 3: /* gt: !Z && N == V */
2731 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2732 frn, frm);
2733 tmp = tcg_temp_new_i32();
2734 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2735 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2736 dest, frm);
2737 tcg_temp_free_i32(tmp);
2738 break;
2740 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2741 tcg_temp_free_i32(frn);
2742 tcg_temp_free_i32(frm);
2743 tcg_temp_free_i32(dest);
2745 tcg_temp_free_i32(zero);
2748 return 0;
2751 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2752 uint32_t rm, uint32_t dp)
2754 uint32_t vmin = extract32(insn, 6, 1);
2755 TCGv_ptr fpst = get_fpstatus_ptr(0);
2757 if (dp) {
2758 TCGv_i64 frn, frm, dest;
2760 frn = tcg_temp_new_i64();
2761 frm = tcg_temp_new_i64();
2762 dest = tcg_temp_new_i64();
2764 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2765 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2766 if (vmin) {
2767 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2768 } else {
2769 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2771 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2772 tcg_temp_free_i64(frn);
2773 tcg_temp_free_i64(frm);
2774 tcg_temp_free_i64(dest);
2775 } else {
2776 TCGv_i32 frn, frm, dest;
2778 frn = tcg_temp_new_i32();
2779 frm = tcg_temp_new_i32();
2780 dest = tcg_temp_new_i32();
2782 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2783 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2784 if (vmin) {
2785 gen_helper_vfp_minnums(dest, frn, frm, fpst);
2786 } else {
2787 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
2789 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2790 tcg_temp_free_i32(frn);
2791 tcg_temp_free_i32(frm);
2792 tcg_temp_free_i32(dest);
2795 tcg_temp_free_ptr(fpst);
2796 return 0;
2799 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2800 int rounding)
2802 TCGv_ptr fpst = get_fpstatus_ptr(0);
2803 TCGv_i32 tcg_rmode;
2805 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2806 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2808 if (dp) {
2809 TCGv_i64 tcg_op;
2810 TCGv_i64 tcg_res;
2811 tcg_op = tcg_temp_new_i64();
2812 tcg_res = tcg_temp_new_i64();
2813 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2814 gen_helper_rintd(tcg_res, tcg_op, fpst);
2815 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2816 tcg_temp_free_i64(tcg_op);
2817 tcg_temp_free_i64(tcg_res);
2818 } else {
2819 TCGv_i32 tcg_op;
2820 TCGv_i32 tcg_res;
2821 tcg_op = tcg_temp_new_i32();
2822 tcg_res = tcg_temp_new_i32();
2823 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2824 gen_helper_rints(tcg_res, tcg_op, fpst);
2825 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2826 tcg_temp_free_i32(tcg_op);
2827 tcg_temp_free_i32(tcg_res);
2830 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2831 tcg_temp_free_i32(tcg_rmode);
2833 tcg_temp_free_ptr(fpst);
2834 return 0;
2837 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2838 int rounding)
2840 bool is_signed = extract32(insn, 7, 1);
2841 TCGv_ptr fpst = get_fpstatus_ptr(0);
2842 TCGv_i32 tcg_rmode, tcg_shift;
2844 tcg_shift = tcg_const_i32(0);
2846 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2847 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2849 if (dp) {
2850 TCGv_i64 tcg_double, tcg_res;
2851 TCGv_i32 tcg_tmp;
2852 /* Rd is encoded as a single precision register even when the source
2853 * is double precision.
2855 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2856 tcg_double = tcg_temp_new_i64();
2857 tcg_res = tcg_temp_new_i64();
2858 tcg_tmp = tcg_temp_new_i32();
2859 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2860 if (is_signed) {
2861 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2862 } else {
2863 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2865 tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
2866 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2867 tcg_temp_free_i32(tcg_tmp);
2868 tcg_temp_free_i64(tcg_res);
2869 tcg_temp_free_i64(tcg_double);
2870 } else {
2871 TCGv_i32 tcg_single, tcg_res;
2872 tcg_single = tcg_temp_new_i32();
2873 tcg_res = tcg_temp_new_i32();
2874 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2875 if (is_signed) {
2876 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2877 } else {
2878 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2880 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
2881 tcg_temp_free_i32(tcg_res);
2882 tcg_temp_free_i32(tcg_single);
2885 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2886 tcg_temp_free_i32(tcg_rmode);
2888 tcg_temp_free_i32(tcg_shift);
2890 tcg_temp_free_ptr(fpst);
2892 return 0;
2895 /* Table for converting the most common AArch32 encoding of
2896 * rounding mode to arm_fprounding order (which matches the
2897 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2899 static const uint8_t fp_decode_rm[] = {
2900 FPROUNDING_TIEAWAY,
2901 FPROUNDING_TIEEVEN,
2902 FPROUNDING_POSINF,
2903 FPROUNDING_NEGINF,
2906 static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2908 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2910 if (!arm_feature(env, ARM_FEATURE_V8)) {
2911 return 1;
2914 if (dp) {
2915 VFP_DREG_D(rd, insn);
2916 VFP_DREG_N(rn, insn);
2917 VFP_DREG_M(rm, insn);
2918 } else {
2919 rd = VFP_SREG_D(insn);
2920 rn = VFP_SREG_N(insn);
2921 rm = VFP_SREG_M(insn);
2924 if ((insn & 0x0f800e50) == 0x0e000a00) {
2925 return handle_vsel(insn, rd, rn, rm, dp);
2926 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
2927 return handle_vminmaxnm(insn, rd, rn, rm, dp);
2928 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
2929 /* VRINTA, VRINTN, VRINTP, VRINTM */
2930 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2931 return handle_vrint(insn, rd, rm, dp, rounding);
2932 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
2933 /* VCVTA, VCVTN, VCVTP, VCVTM */
2934 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2935 return handle_vcvt(insn, rd, rm, dp, rounding);
2937 return 1;
2940 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2941 (ie. an undefined instruction). */
2942 static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
2944 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2945 int dp, veclen;
2946 TCGv_i32 addr;
2947 TCGv_i32 tmp;
2948 TCGv_i32 tmp2;
2950 if (!arm_feature(env, ARM_FEATURE_VFP))
2951 return 1;
2953 /* FIXME: this access check should not take precedence over UNDEF
2954 * for invalid encodings; we will generate incorrect syndrome information
2955 * for attempts to execute invalid vfp/neon encodings with FP disabled.
2957 if (!s->cpacr_fpen) {
2958 gen_exception_insn(s, 4, EXCP_UDEF,
2959 syn_fp_access_trap(1, 0xe, s->thumb));
2960 return 0;
2963 if (!s->vfp_enabled) {
2964 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2965 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2966 return 1;
2967 rn = (insn >> 16) & 0xf;
2968 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
2969 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
2970 return 1;
2974 if (extract32(insn, 28, 4) == 0xf) {
2975 /* Encodings with T=1 (Thumb) or unconditional (ARM):
2976 * only used in v8 and above.
2978 return disas_vfp_v8_insn(env, s, insn);
2981 dp = ((insn & 0xf00) == 0xb00);
2982 switch ((insn >> 24) & 0xf) {
2983 case 0xe:
2984 if (insn & (1 << 4)) {
2985 /* single register transfer */
2986 rd = (insn >> 12) & 0xf;
2987 if (dp) {
2988 int size;
2989 int pass;
2991 VFP_DREG_N(rn, insn);
2992 if (insn & 0xf)
2993 return 1;
2994 if (insn & 0x00c00060
2995 && !arm_feature(env, ARM_FEATURE_NEON))
2996 return 1;
2998 pass = (insn >> 21) & 1;
2999 if (insn & (1 << 22)) {
3000 size = 0;
3001 offset = ((insn >> 5) & 3) * 8;
3002 } else if (insn & (1 << 5)) {
3003 size = 1;
3004 offset = (insn & (1 << 6)) ? 16 : 0;
3005 } else {
3006 size = 2;
3007 offset = 0;
3009 if (insn & ARM_CP_RW_BIT) {
3010 /* vfp->arm */
3011 tmp = neon_load_reg(rn, pass);
3012 switch (size) {
3013 case 0:
3014 if (offset)
3015 tcg_gen_shri_i32(tmp, tmp, offset);
3016 if (insn & (1 << 23))
3017 gen_uxtb(tmp);
3018 else
3019 gen_sxtb(tmp);
3020 break;
3021 case 1:
3022 if (insn & (1 << 23)) {
3023 if (offset) {
3024 tcg_gen_shri_i32(tmp, tmp, 16);
3025 } else {
3026 gen_uxth(tmp);
3028 } else {
3029 if (offset) {
3030 tcg_gen_sari_i32(tmp, tmp, 16);
3031 } else {
3032 gen_sxth(tmp);
3035 break;
3036 case 2:
3037 break;
3039 store_reg(s, rd, tmp);
3040 } else {
3041 /* arm->vfp */
3042 tmp = load_reg(s, rd);
3043 if (insn & (1 << 23)) {
3044 /* VDUP */
3045 if (size == 0) {
3046 gen_neon_dup_u8(tmp, 0);
3047 } else if (size == 1) {
3048 gen_neon_dup_low16(tmp);
3050 for (n = 0; n <= pass * 2; n++) {
3051 tmp2 = tcg_temp_new_i32();
3052 tcg_gen_mov_i32(tmp2, tmp);
3053 neon_store_reg(rn, n, tmp2);
3055 neon_store_reg(rn, n, tmp);
3056 } else {
3057 /* VMOV */
3058 switch (size) {
3059 case 0:
3060 tmp2 = neon_load_reg(rn, pass);
3061 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3062 tcg_temp_free_i32(tmp2);
3063 break;
3064 case 1:
3065 tmp2 = neon_load_reg(rn, pass);
3066 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3067 tcg_temp_free_i32(tmp2);
3068 break;
3069 case 2:
3070 break;
3072 neon_store_reg(rn, pass, tmp);
3075 } else { /* !dp */
3076 if ((insn & 0x6f) != 0x00)
3077 return 1;
3078 rn = VFP_SREG_N(insn);
3079 if (insn & ARM_CP_RW_BIT) {
3080 /* vfp->arm */
3081 if (insn & (1 << 21)) {
3082 /* system register */
3083 rn >>= 1;
3085 switch (rn) {
3086 case ARM_VFP_FPSID:
3087 /* VFP2 allows access to FSID from userspace.
3088 VFP3 restricts all id registers to privileged
3089 accesses. */
3090 if (IS_USER(s)
3091 && arm_feature(env, ARM_FEATURE_VFP3))
3092 return 1;
3093 tmp = load_cpu_field(vfp.xregs[rn]);
3094 break;
3095 case ARM_VFP_FPEXC:
3096 if (IS_USER(s))
3097 return 1;
3098 tmp = load_cpu_field(vfp.xregs[rn]);
3099 break;
3100 case ARM_VFP_FPINST:
3101 case ARM_VFP_FPINST2:
3102 /* Not present in VFP3. */
3103 if (IS_USER(s)
3104 || arm_feature(env, ARM_FEATURE_VFP3))
3105 return 1;
3106 tmp = load_cpu_field(vfp.xregs[rn]);
3107 break;
3108 case ARM_VFP_FPSCR:
3109 if (rd == 15) {
3110 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3111 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3112 } else {
3113 tmp = tcg_temp_new_i32();
3114 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3116 break;
3117 case ARM_VFP_MVFR2:
3118 if (!arm_feature(env, ARM_FEATURE_V8)) {
3119 return 1;
3121 /* fall through */
3122 case ARM_VFP_MVFR0:
3123 case ARM_VFP_MVFR1:
3124 if (IS_USER(s)
3125 || !arm_feature(env, ARM_FEATURE_MVFR))
3126 return 1;
3127 tmp = load_cpu_field(vfp.xregs[rn]);
3128 break;
3129 default:
3130 return 1;
3132 } else {
3133 gen_mov_F0_vreg(0, rn);
3134 tmp = gen_vfp_mrs();
3136 if (rd == 15) {
3137 /* Set the 4 flag bits in the CPSR. */
3138 gen_set_nzcv(tmp);
3139 tcg_temp_free_i32(tmp);
3140 } else {
3141 store_reg(s, rd, tmp);
3143 } else {
3144 /* arm->vfp */
3145 if (insn & (1 << 21)) {
3146 rn >>= 1;
3147 /* system register */
3148 switch (rn) {
3149 case ARM_VFP_FPSID:
3150 case ARM_VFP_MVFR0:
3151 case ARM_VFP_MVFR1:
3152 /* Writes are ignored. */
3153 break;
3154 case ARM_VFP_FPSCR:
3155 tmp = load_reg(s, rd);
3156 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3157 tcg_temp_free_i32(tmp);
3158 gen_lookup_tb(s);
3159 break;
3160 case ARM_VFP_FPEXC:
3161 if (IS_USER(s))
3162 return 1;
3163 /* TODO: VFP subarchitecture support.
3164 * For now, keep the EN bit only */
3165 tmp = load_reg(s, rd);
3166 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3167 store_cpu_field(tmp, vfp.xregs[rn]);
3168 gen_lookup_tb(s);
3169 break;
3170 case ARM_VFP_FPINST:
3171 case ARM_VFP_FPINST2:
3172 tmp = load_reg(s, rd);
3173 store_cpu_field(tmp, vfp.xregs[rn]);
3174 break;
3175 default:
3176 return 1;
3178 } else {
3179 tmp = load_reg(s, rd);
3180 gen_vfp_msr(tmp);
3181 gen_mov_vreg_F0(0, rn);
3185 } else {
3186 /* data processing */
3187 /* The opcode is in bits 23, 21, 20 and 6. */
3188 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3189 if (dp) {
3190 if (op == 15) {
3191 /* rn is opcode */
3192 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3193 } else {
3194 /* rn is register number */
3195 VFP_DREG_N(rn, insn);
3198 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3199 ((rn & 0x1e) == 0x6))) {
3200 /* Integer or single/half precision destination. */
3201 rd = VFP_SREG_D(insn);
3202 } else {
3203 VFP_DREG_D(rd, insn);
3205 if (op == 15 &&
3206 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3207 ((rn & 0x1e) == 0x4))) {
3208 /* VCVT from int or half precision is always from S reg
3209 * regardless of dp bit. VCVT with immediate frac_bits
3210 * has same format as SREG_M.
3212 rm = VFP_SREG_M(insn);
3213 } else {
3214 VFP_DREG_M(rm, insn);
3216 } else {
3217 rn = VFP_SREG_N(insn);
3218 if (op == 15 && rn == 15) {
3219 /* Double precision destination. */
3220 VFP_DREG_D(rd, insn);
3221 } else {
3222 rd = VFP_SREG_D(insn);
3224 /* NB that we implicitly rely on the encoding for the frac_bits
3225 * in VCVT of fixed to float being the same as that of an SREG_M
3227 rm = VFP_SREG_M(insn);
3230 veclen = s->vec_len;
3231 if (op == 15 && rn > 3)
3232 veclen = 0;
3234 /* Shut up compiler warnings. */
3235 delta_m = 0;
3236 delta_d = 0;
3237 bank_mask = 0;
3239 if (veclen > 0) {
3240 if (dp)
3241 bank_mask = 0xc;
3242 else
3243 bank_mask = 0x18;
3245 /* Figure out what type of vector operation this is. */
3246 if ((rd & bank_mask) == 0) {
3247 /* scalar */
3248 veclen = 0;
3249 } else {
3250 if (dp)
3251 delta_d = (s->vec_stride >> 1) + 1;
3252 else
3253 delta_d = s->vec_stride + 1;
3255 if ((rm & bank_mask) == 0) {
3256 /* mixed scalar/vector */
3257 delta_m = 0;
3258 } else {
3259 /* vector */
3260 delta_m = delta_d;
3265 /* Load the initial operands. */
3266 if (op == 15) {
3267 switch (rn) {
3268 case 16:
3269 case 17:
3270 /* Integer source */
3271 gen_mov_F0_vreg(0, rm);
3272 break;
3273 case 8:
3274 case 9:
3275 /* Compare */
3276 gen_mov_F0_vreg(dp, rd);
3277 gen_mov_F1_vreg(dp, rm);
3278 break;
3279 case 10:
3280 case 11:
3281 /* Compare with zero */
3282 gen_mov_F0_vreg(dp, rd);
3283 gen_vfp_F1_ld0(dp);
3284 break;
3285 case 20:
3286 case 21:
3287 case 22:
3288 case 23:
3289 case 28:
3290 case 29:
3291 case 30:
3292 case 31:
3293 /* Source and destination the same. */
3294 gen_mov_F0_vreg(dp, rd);
3295 break;
3296 case 4:
3297 case 5:
3298 case 6:
3299 case 7:
3300 /* VCVTB, VCVTT: only present with the halfprec extension
3301 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3302 * (we choose to UNDEF)
3304 if ((dp && !arm_feature(env, ARM_FEATURE_V8)) ||
3305 !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
3306 return 1;
3308 if (!extract32(rn, 1, 1)) {
3309 /* Half precision source. */
3310 gen_mov_F0_vreg(0, rm);
3311 break;
3313 /* Otherwise fall through */
3314 default:
3315 /* One source operand. */
3316 gen_mov_F0_vreg(dp, rm);
3317 break;
3319 } else {
3320 /* Two source operands. */
3321 gen_mov_F0_vreg(dp, rn);
3322 gen_mov_F1_vreg(dp, rm);
3325 for (;;) {
3326 /* Perform the calculation. */
3327 switch (op) {
3328 case 0: /* VMLA: fd + (fn * fm) */
3329 /* Note that order of inputs to the add matters for NaNs */
3330 gen_vfp_F1_mul(dp);
3331 gen_mov_F0_vreg(dp, rd);
3332 gen_vfp_add(dp);
3333 break;
3334 case 1: /* VMLS: fd + -(fn * fm) */
3335 gen_vfp_mul(dp);
3336 gen_vfp_F1_neg(dp);
3337 gen_mov_F0_vreg(dp, rd);
3338 gen_vfp_add(dp);
3339 break;
3340 case 2: /* VNMLS: -fd + (fn * fm) */
3341 /* Note that it isn't valid to replace (-A + B) with (B - A)
3342 * or similar plausible looking simplifications
3343 * because this will give wrong results for NaNs.
3345 gen_vfp_F1_mul(dp);
3346 gen_mov_F0_vreg(dp, rd);
3347 gen_vfp_neg(dp);
3348 gen_vfp_add(dp);
3349 break;
3350 case 3: /* VNMLA: -fd + -(fn * fm) */
3351 gen_vfp_mul(dp);
3352 gen_vfp_F1_neg(dp);
3353 gen_mov_F0_vreg(dp, rd);
3354 gen_vfp_neg(dp);
3355 gen_vfp_add(dp);
3356 break;
3357 case 4: /* mul: fn * fm */
3358 gen_vfp_mul(dp);
3359 break;
3360 case 5: /* nmul: -(fn * fm) */
3361 gen_vfp_mul(dp);
3362 gen_vfp_neg(dp);
3363 break;
3364 case 6: /* add: fn + fm */
3365 gen_vfp_add(dp);
3366 break;
3367 case 7: /* sub: fn - fm */
3368 gen_vfp_sub(dp);
3369 break;
3370 case 8: /* div: fn / fm */
3371 gen_vfp_div(dp);
3372 break;
3373 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3374 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3375 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3376 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3377 /* These are fused multiply-add, and must be done as one
3378 * floating point operation with no rounding between the
3379 * multiplication and addition steps.
3380 * NB that doing the negations here as separate steps is
3381 * correct : an input NaN should come out with its sign bit
3382 * flipped if it is a negated-input.
3384 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3385 return 1;
3387 if (dp) {
3388 TCGv_ptr fpst;
3389 TCGv_i64 frd;
3390 if (op & 1) {
3391 /* VFNMS, VFMS */
3392 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3394 frd = tcg_temp_new_i64();
3395 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3396 if (op & 2) {
3397 /* VFNMA, VFNMS */
3398 gen_helper_vfp_negd(frd, frd);
3400 fpst = get_fpstatus_ptr(0);
3401 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3402 cpu_F1d, frd, fpst);
3403 tcg_temp_free_ptr(fpst);
3404 tcg_temp_free_i64(frd);
3405 } else {
3406 TCGv_ptr fpst;
3407 TCGv_i32 frd;
3408 if (op & 1) {
3409 /* VFNMS, VFMS */
3410 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3412 frd = tcg_temp_new_i32();
3413 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3414 if (op & 2) {
3415 gen_helper_vfp_negs(frd, frd);
3417 fpst = get_fpstatus_ptr(0);
3418 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3419 cpu_F1s, frd, fpst);
3420 tcg_temp_free_ptr(fpst);
3421 tcg_temp_free_i32(frd);
3423 break;
3424 case 14: /* fconst */
3425 if (!arm_feature(env, ARM_FEATURE_VFP3))
3426 return 1;
3428 n = (insn << 12) & 0x80000000;
3429 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3430 if (dp) {
3431 if (i & 0x40)
3432 i |= 0x3f80;
3433 else
3434 i |= 0x4000;
3435 n |= i << 16;
3436 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3437 } else {
3438 if (i & 0x40)
3439 i |= 0x780;
3440 else
3441 i |= 0x800;
3442 n |= i << 19;
3443 tcg_gen_movi_i32(cpu_F0s, n);
3445 break;
3446 case 15: /* extension space */
3447 switch (rn) {
3448 case 0: /* cpy */
3449 /* no-op */
3450 break;
3451 case 1: /* abs */
3452 gen_vfp_abs(dp);
3453 break;
3454 case 2: /* neg */
3455 gen_vfp_neg(dp);
3456 break;
3457 case 3: /* sqrt */
3458 gen_vfp_sqrt(dp);
3459 break;
3460 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3461 tmp = gen_vfp_mrs();
3462 tcg_gen_ext16u_i32(tmp, tmp);
3463 if (dp) {
3464 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3465 cpu_env);
3466 } else {
3467 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3468 cpu_env);
3470 tcg_temp_free_i32(tmp);
3471 break;
3472 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3473 tmp = gen_vfp_mrs();
3474 tcg_gen_shri_i32(tmp, tmp, 16);
3475 if (dp) {
3476 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3477 cpu_env);
3478 } else {
3479 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3480 cpu_env);
3482 tcg_temp_free_i32(tmp);
3483 break;
3484 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3485 tmp = tcg_temp_new_i32();
3486 if (dp) {
3487 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3488 cpu_env);
3489 } else {
3490 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3491 cpu_env);
3493 gen_mov_F0_vreg(0, rd);
3494 tmp2 = gen_vfp_mrs();
3495 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3496 tcg_gen_or_i32(tmp, tmp, tmp2);
3497 tcg_temp_free_i32(tmp2);
3498 gen_vfp_msr(tmp);
3499 break;
3500 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3501 tmp = tcg_temp_new_i32();
3502 if (dp) {
3503 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3504 cpu_env);
3505 } else {
3506 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3507 cpu_env);
3509 tcg_gen_shli_i32(tmp, tmp, 16);
3510 gen_mov_F0_vreg(0, rd);
3511 tmp2 = gen_vfp_mrs();
3512 tcg_gen_ext16u_i32(tmp2, tmp2);
3513 tcg_gen_or_i32(tmp, tmp, tmp2);
3514 tcg_temp_free_i32(tmp2);
3515 gen_vfp_msr(tmp);
3516 break;
3517 case 8: /* cmp */
3518 gen_vfp_cmp(dp);
3519 break;
3520 case 9: /* cmpe */
3521 gen_vfp_cmpe(dp);
3522 break;
3523 case 10: /* cmpz */
3524 gen_vfp_cmp(dp);
3525 break;
3526 case 11: /* cmpez */
3527 gen_vfp_F1_ld0(dp);
3528 gen_vfp_cmpe(dp);
3529 break;
3530 case 12: /* vrintr */
3532 TCGv_ptr fpst = get_fpstatus_ptr(0);
3533 if (dp) {
3534 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3535 } else {
3536 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3538 tcg_temp_free_ptr(fpst);
3539 break;
3541 case 13: /* vrintz */
3543 TCGv_ptr fpst = get_fpstatus_ptr(0);
3544 TCGv_i32 tcg_rmode;
3545 tcg_rmode = tcg_const_i32(float_round_to_zero);
3546 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3547 if (dp) {
3548 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3549 } else {
3550 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3552 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3553 tcg_temp_free_i32(tcg_rmode);
3554 tcg_temp_free_ptr(fpst);
3555 break;
3557 case 14: /* vrintx */
3559 TCGv_ptr fpst = get_fpstatus_ptr(0);
3560 if (dp) {
3561 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3562 } else {
3563 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3565 tcg_temp_free_ptr(fpst);
3566 break;
3568 case 15: /* single<->double conversion */
3569 if (dp)
3570 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3571 else
3572 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3573 break;
3574 case 16: /* fuito */
3575 gen_vfp_uito(dp, 0);
3576 break;
3577 case 17: /* fsito */
3578 gen_vfp_sito(dp, 0);
3579 break;
3580 case 20: /* fshto */
3581 if (!arm_feature(env, ARM_FEATURE_VFP3))
3582 return 1;
3583 gen_vfp_shto(dp, 16 - rm, 0);
3584 break;
3585 case 21: /* fslto */
3586 if (!arm_feature(env, ARM_FEATURE_VFP3))
3587 return 1;
3588 gen_vfp_slto(dp, 32 - rm, 0);
3589 break;
3590 case 22: /* fuhto */
3591 if (!arm_feature(env, ARM_FEATURE_VFP3))
3592 return 1;
3593 gen_vfp_uhto(dp, 16 - rm, 0);
3594 break;
3595 case 23: /* fulto */
3596 if (!arm_feature(env, ARM_FEATURE_VFP3))
3597 return 1;
3598 gen_vfp_ulto(dp, 32 - rm, 0);
3599 break;
3600 case 24: /* ftoui */
3601 gen_vfp_toui(dp, 0);
3602 break;
3603 case 25: /* ftouiz */
3604 gen_vfp_touiz(dp, 0);
3605 break;
3606 case 26: /* ftosi */
3607 gen_vfp_tosi(dp, 0);
3608 break;
3609 case 27: /* ftosiz */
3610 gen_vfp_tosiz(dp, 0);
3611 break;
3612 case 28: /* ftosh */
3613 if (!arm_feature(env, ARM_FEATURE_VFP3))
3614 return 1;
3615 gen_vfp_tosh(dp, 16 - rm, 0);
3616 break;
3617 case 29: /* ftosl */
3618 if (!arm_feature(env, ARM_FEATURE_VFP3))
3619 return 1;
3620 gen_vfp_tosl(dp, 32 - rm, 0);
3621 break;
3622 case 30: /* ftouh */
3623 if (!arm_feature(env, ARM_FEATURE_VFP3))
3624 return 1;
3625 gen_vfp_touh(dp, 16 - rm, 0);
3626 break;
3627 case 31: /* ftoul */
3628 if (!arm_feature(env, ARM_FEATURE_VFP3))
3629 return 1;
3630 gen_vfp_toul(dp, 32 - rm, 0);
3631 break;
3632 default: /* undefined */
3633 return 1;
3635 break;
3636 default: /* undefined */
3637 return 1;
3640 /* Write back the result. */
3641 if (op == 15 && (rn >= 8 && rn <= 11)) {
3642 /* Comparison, do nothing. */
3643 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3644 (rn & 0x1e) == 0x6)) {
3645 /* VCVT double to int: always integer result.
3646 * VCVT double to half precision is always a single
3647 * precision result.
3649 gen_mov_vreg_F0(0, rd);
3650 } else if (op == 15 && rn == 15) {
3651 /* conversion */
3652 gen_mov_vreg_F0(!dp, rd);
3653 } else {
3654 gen_mov_vreg_F0(dp, rd);
3657 /* break out of the loop if we have finished */
3658 if (veclen == 0)
3659 break;
3661 if (op == 15 && delta_m == 0) {
3662 /* single source one-many */
3663 while (veclen--) {
3664 rd = ((rd + delta_d) & (bank_mask - 1))
3665 | (rd & bank_mask);
3666 gen_mov_vreg_F0(dp, rd);
3668 break;
3670 /* Setup the next operands. */
3671 veclen--;
3672 rd = ((rd + delta_d) & (bank_mask - 1))
3673 | (rd & bank_mask);
3675 if (op == 15) {
3676 /* One source operand. */
3677 rm = ((rm + delta_m) & (bank_mask - 1))
3678 | (rm & bank_mask);
3679 gen_mov_F0_vreg(dp, rm);
3680 } else {
3681 /* Two source operands. */
3682 rn = ((rn + delta_d) & (bank_mask - 1))
3683 | (rn & bank_mask);
3684 gen_mov_F0_vreg(dp, rn);
3685 if (delta_m) {
3686 rm = ((rm + delta_m) & (bank_mask - 1))
3687 | (rm & bank_mask);
3688 gen_mov_F1_vreg(dp, rm);
3693 break;
3694 case 0xc:
3695 case 0xd:
3696 if ((insn & 0x03e00000) == 0x00400000) {
3697 /* two-register transfer */
3698 rn = (insn >> 16) & 0xf;
3699 rd = (insn >> 12) & 0xf;
3700 if (dp) {
3701 VFP_DREG_M(rm, insn);
3702 } else {
3703 rm = VFP_SREG_M(insn);
3706 if (insn & ARM_CP_RW_BIT) {
3707 /* vfp->arm */
3708 if (dp) {
3709 gen_mov_F0_vreg(0, rm * 2);
3710 tmp = gen_vfp_mrs();
3711 store_reg(s, rd, tmp);
3712 gen_mov_F0_vreg(0, rm * 2 + 1);
3713 tmp = gen_vfp_mrs();
3714 store_reg(s, rn, tmp);
3715 } else {
3716 gen_mov_F0_vreg(0, rm);
3717 tmp = gen_vfp_mrs();
3718 store_reg(s, rd, tmp);
3719 gen_mov_F0_vreg(0, rm + 1);
3720 tmp = gen_vfp_mrs();
3721 store_reg(s, rn, tmp);
3723 } else {
3724 /* arm->vfp */
3725 if (dp) {
3726 tmp = load_reg(s, rd);
3727 gen_vfp_msr(tmp);
3728 gen_mov_vreg_F0(0, rm * 2);
3729 tmp = load_reg(s, rn);
3730 gen_vfp_msr(tmp);
3731 gen_mov_vreg_F0(0, rm * 2 + 1);
3732 } else {
3733 tmp = load_reg(s, rd);
3734 gen_vfp_msr(tmp);
3735 gen_mov_vreg_F0(0, rm);
3736 tmp = load_reg(s, rn);
3737 gen_vfp_msr(tmp);
3738 gen_mov_vreg_F0(0, rm + 1);
3741 } else {
3742 /* Load/store */
3743 rn = (insn >> 16) & 0xf;
3744 if (dp)
3745 VFP_DREG_D(rd, insn);
3746 else
3747 rd = VFP_SREG_D(insn);
3748 if ((insn & 0x01200000) == 0x01000000) {
3749 /* Single load/store */
3750 offset = (insn & 0xff) << 2;
3751 if ((insn & (1 << 23)) == 0)
3752 offset = -offset;
3753 if (s->thumb && rn == 15) {
3754 /* This is actually UNPREDICTABLE */
3755 addr = tcg_temp_new_i32();
3756 tcg_gen_movi_i32(addr, s->pc & ~2);
3757 } else {
3758 addr = load_reg(s, rn);
3760 tcg_gen_addi_i32(addr, addr, offset);
3761 if (insn & (1 << 20)) {
3762 gen_vfp_ld(s, dp, addr);
3763 gen_mov_vreg_F0(dp, rd);
3764 } else {
3765 gen_mov_F0_vreg(dp, rd);
3766 gen_vfp_st(s, dp, addr);
3768 tcg_temp_free_i32(addr);
3769 } else {
3770 /* load/store multiple */
3771 int w = insn & (1 << 21);
3772 if (dp)
3773 n = (insn >> 1) & 0x7f;
3774 else
3775 n = insn & 0xff;
3777 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3778 /* P == U , W == 1 => UNDEF */
3779 return 1;
3781 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3782 /* UNPREDICTABLE cases for bad immediates: we choose to
3783 * UNDEF to avoid generating huge numbers of TCG ops
3785 return 1;
3787 if (rn == 15 && w) {
3788 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3789 return 1;
3792 if (s->thumb && rn == 15) {
3793 /* This is actually UNPREDICTABLE */
3794 addr = tcg_temp_new_i32();
3795 tcg_gen_movi_i32(addr, s->pc & ~2);
3796 } else {
3797 addr = load_reg(s, rn);
3799 if (insn & (1 << 24)) /* pre-decrement */
3800 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3802 if (dp)
3803 offset = 8;
3804 else
3805 offset = 4;
3806 for (i = 0; i < n; i++) {
3807 if (insn & ARM_CP_RW_BIT) {
3808 /* load */
3809 gen_vfp_ld(s, dp, addr);
3810 gen_mov_vreg_F0(dp, rd + i);
3811 } else {
3812 /* store */
3813 gen_mov_F0_vreg(dp, rd + i);
3814 gen_vfp_st(s, dp, addr);
3816 tcg_gen_addi_i32(addr, addr, offset);
3818 if (w) {
3819 /* writeback */
3820 if (insn & (1 << 24))
3821 offset = -offset * n;
3822 else if (dp && (insn & 1))
3823 offset = 4;
3824 else
3825 offset = 0;
3827 if (offset != 0)
3828 tcg_gen_addi_i32(addr, addr, offset);
3829 store_reg(s, rn, addr);
3830 } else {
3831 tcg_temp_free_i32(addr);
3835 break;
3836 default:
3837 /* Should never happen. */
3838 return 1;
3840 return 0;
3843 static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
3845 TranslationBlock *tb;
3847 tb = s->tb;
3848 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3849 tcg_gen_goto_tb(n);
3850 gen_set_pc_im(s, dest);
3851 tcg_gen_exit_tb((uintptr_t)tb + n);
3852 } else {
3853 gen_set_pc_im(s, dest);
3854 tcg_gen_exit_tb(0);
3858 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3860 if (unlikely(s->singlestep_enabled)) {
3861 /* An indirect jump so that we still trigger the debug exception. */
3862 if (s->thumb)
3863 dest |= 1;
3864 gen_bx_im(s, dest);
3865 } else {
3866 gen_goto_tb(s, 0, dest);
3867 s->is_jmp = DISAS_TB_JUMP;
3871 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
3873 if (x)
3874 tcg_gen_sari_i32(t0, t0, 16);
3875 else
3876 gen_sxth(t0);
3877 if (y)
3878 tcg_gen_sari_i32(t1, t1, 16);
3879 else
3880 gen_sxth(t1);
3881 tcg_gen_mul_i32(t0, t0, t1);
3884 /* Return the mask of PSR bits set by a MSR instruction. */
3885 static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
3886 uint32_t mask;
3888 mask = 0;
3889 if (flags & (1 << 0))
3890 mask |= 0xff;
3891 if (flags & (1 << 1))
3892 mask |= 0xff00;
3893 if (flags & (1 << 2))
3894 mask |= 0xff0000;
3895 if (flags & (1 << 3))
3896 mask |= 0xff000000;
3898 /* Mask out undefined bits. */
3899 mask &= ~CPSR_RESERVED;
3900 if (!arm_feature(env, ARM_FEATURE_V4T))
3901 mask &= ~CPSR_T;
3902 if (!arm_feature(env, ARM_FEATURE_V5))
3903 mask &= ~CPSR_Q; /* V5TE in reality*/
3904 if (!arm_feature(env, ARM_FEATURE_V6))
3905 mask &= ~(CPSR_E | CPSR_GE);
3906 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3907 mask &= ~CPSR_IT;
3908 /* Mask out execution state bits. */
3909 if (!spsr)
3910 mask &= ~CPSR_EXEC;
3911 /* Mask out privileged bits. */
3912 if (IS_USER(s))
3913 mask &= CPSR_USER;
3914 return mask;
3917 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3918 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
3920 TCGv_i32 tmp;
3921 if (spsr) {
3922 /* ??? This is also undefined in system mode. */
3923 if (IS_USER(s))
3924 return 1;
3926 tmp = load_cpu_field(spsr);
3927 tcg_gen_andi_i32(tmp, tmp, ~mask);
3928 tcg_gen_andi_i32(t0, t0, mask);
3929 tcg_gen_or_i32(tmp, tmp, t0);
3930 store_cpu_field(tmp, spsr);
3931 } else {
3932 gen_set_cpsr(t0, mask);
3934 tcg_temp_free_i32(t0);
3935 gen_lookup_tb(s);
3936 return 0;
3939 /* Returns nonzero if access to the PSR is not permitted. */
3940 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3942 TCGv_i32 tmp;
3943 tmp = tcg_temp_new_i32();
3944 tcg_gen_movi_i32(tmp, val);
3945 return gen_set_psr(s, mask, spsr, tmp);
3948 /* Generate an old-style exception return. Marks pc as dead. */
3949 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3951 TCGv_i32 tmp;
3952 store_reg(s, 15, pc);
3953 tmp = load_cpu_field(spsr);
3954 gen_set_cpsr(tmp, 0xffffffff);
3955 tcg_temp_free_i32(tmp);
3956 s->is_jmp = DISAS_UPDATE;
3959 /* Generate a v6 exception return. Marks both values as dead. */
3960 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
3962 gen_set_cpsr(cpsr, 0xffffffff);
3963 tcg_temp_free_i32(cpsr);
3964 store_reg(s, 15, pc);
3965 s->is_jmp = DISAS_UPDATE;
3968 static void gen_nop_hint(DisasContext *s, int val)
3970 switch (val) {
3971 case 3: /* wfi */
3972 gen_set_pc_im(s, s->pc);
3973 s->is_jmp = DISAS_WFI;
3974 break;
3975 case 2: /* wfe */
3976 gen_set_pc_im(s, s->pc);
3977 s->is_jmp = DISAS_WFE;
3978 break;
3979 case 4: /* sev */
3980 case 5: /* sevl */
3981 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
3982 default: /* nop */
3983 break;
3987 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3989 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
3991 switch (size) {
3992 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3993 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3994 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3995 default: abort();
3999 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4001 switch (size) {
4002 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4003 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4004 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4005 default: return;
4009 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4010 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4011 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4012 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4013 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4015 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4016 switch ((size << 1) | u) { \
4017 case 0: \
4018 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4019 break; \
4020 case 1: \
4021 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4022 break; \
4023 case 2: \
4024 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4025 break; \
4026 case 3: \
4027 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4028 break; \
4029 case 4: \
4030 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4031 break; \
4032 case 5: \
4033 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4034 break; \
4035 default: return 1; \
4036 }} while (0)
4038 #define GEN_NEON_INTEGER_OP(name) do { \
4039 switch ((size << 1) | u) { \
4040 case 0: \
4041 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4042 break; \
4043 case 1: \
4044 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4045 break; \
4046 case 2: \
4047 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4048 break; \
4049 case 3: \
4050 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4051 break; \
4052 case 4: \
4053 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4054 break; \
4055 case 5: \
4056 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4057 break; \
4058 default: return 1; \
4059 }} while (0)
4061 static TCGv_i32 neon_load_scratch(int scratch)
4063 TCGv_i32 tmp = tcg_temp_new_i32();
4064 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4065 return tmp;
4068 static void neon_store_scratch(int scratch, TCGv_i32 var)
4070 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4071 tcg_temp_free_i32(var);
4074 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4076 TCGv_i32 tmp;
4077 if (size == 1) {
4078 tmp = neon_load_reg(reg & 7, reg >> 4);
4079 if (reg & 8) {
4080 gen_neon_dup_high16(tmp);
4081 } else {
4082 gen_neon_dup_low16(tmp);
4084 } else {
4085 tmp = neon_load_reg(reg & 15, reg >> 4);
4087 return tmp;
4090 static int gen_neon_unzip(int rd, int rm, int size, int q)
4092 TCGv_i32 tmp, tmp2;
4093 if (!q && size == 2) {
4094 return 1;
4096 tmp = tcg_const_i32(rd);
4097 tmp2 = tcg_const_i32(rm);
4098 if (q) {
4099 switch (size) {
4100 case 0:
4101 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
4102 break;
4103 case 1:
4104 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
4105 break;
4106 case 2:
4107 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
4108 break;
4109 default:
4110 abort();
4112 } else {
4113 switch (size) {
4114 case 0:
4115 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
4116 break;
4117 case 1:
4118 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
4119 break;
4120 default:
4121 abort();
4124 tcg_temp_free_i32(tmp);
4125 tcg_temp_free_i32(tmp2);
4126 return 0;
4129 static int gen_neon_zip(int rd, int rm, int size, int q)
4131 TCGv_i32 tmp, tmp2;
4132 if (!q && size == 2) {
4133 return 1;
4135 tmp = tcg_const_i32(rd);
4136 tmp2 = tcg_const_i32(rm);
4137 if (q) {
4138 switch (size) {
4139 case 0:
4140 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
4141 break;
4142 case 1:
4143 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
4144 break;
4145 case 2:
4146 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
4147 break;
4148 default:
4149 abort();
4151 } else {
4152 switch (size) {
4153 case 0:
4154 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
4155 break;
4156 case 1:
4157 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
4158 break;
4159 default:
4160 abort();
4163 tcg_temp_free_i32(tmp);
4164 tcg_temp_free_i32(tmp2);
4165 return 0;
4168 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4170 TCGv_i32 rd, tmp;
4172 rd = tcg_temp_new_i32();
4173 tmp = tcg_temp_new_i32();
4175 tcg_gen_shli_i32(rd, t0, 8);
4176 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4177 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4178 tcg_gen_or_i32(rd, rd, tmp);
4180 tcg_gen_shri_i32(t1, t1, 8);
4181 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4182 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4183 tcg_gen_or_i32(t1, t1, tmp);
4184 tcg_gen_mov_i32(t0, rd);
4186 tcg_temp_free_i32(tmp);
4187 tcg_temp_free_i32(rd);
4190 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4192 TCGv_i32 rd, tmp;
4194 rd = tcg_temp_new_i32();
4195 tmp = tcg_temp_new_i32();
4197 tcg_gen_shli_i32(rd, t0, 16);
4198 tcg_gen_andi_i32(tmp, t1, 0xffff);
4199 tcg_gen_or_i32(rd, rd, tmp);
4200 tcg_gen_shri_i32(t1, t1, 16);
4201 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4202 tcg_gen_or_i32(t1, t1, tmp);
4203 tcg_gen_mov_i32(t0, rd);
4205 tcg_temp_free_i32(tmp);
4206 tcg_temp_free_i32(rd);
4210 static struct {
4211 int nregs;
4212 int interleave;
4213 int spacing;
4214 } neon_ls_element_type[11] = {
4215 {4, 4, 1},
4216 {4, 4, 2},
4217 {4, 1, 1},
4218 {4, 2, 1},
4219 {3, 3, 1},
4220 {3, 3, 2},
4221 {3, 1, 1},
4222 {1, 1, 1},
4223 {2, 2, 1},
4224 {2, 2, 2},
4225 {2, 1, 1}
4228 /* Translate a NEON load/store element instruction. Return nonzero if the
4229 instruction is invalid. */
4230 static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
4232 int rd, rn, rm;
4233 int op;
4234 int nregs;
4235 int interleave;
4236 int spacing;
4237 int stride;
4238 int size;
4239 int reg;
4240 int pass;
4241 int load;
4242 int shift;
4243 int n;
4244 TCGv_i32 addr;
4245 TCGv_i32 tmp;
4246 TCGv_i32 tmp2;
4247 TCGv_i64 tmp64;
4249 /* FIXME: this access check should not take precedence over UNDEF
4250 * for invalid encodings; we will generate incorrect syndrome information
4251 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4253 if (!s->cpacr_fpen) {
4254 gen_exception_insn(s, 4, EXCP_UDEF,
4255 syn_fp_access_trap(1, 0xe, s->thumb));
4256 return 0;
4259 if (!s->vfp_enabled)
4260 return 1;
4261 VFP_DREG_D(rd, insn);
4262 rn = (insn >> 16) & 0xf;
4263 rm = insn & 0xf;
4264 load = (insn & (1 << 21)) != 0;
4265 if ((insn & (1 << 23)) == 0) {
4266 /* Load store all elements. */
4267 op = (insn >> 8) & 0xf;
4268 size = (insn >> 6) & 3;
4269 if (op > 10)
4270 return 1;
4271 /* Catch UNDEF cases for bad values of align field */
4272 switch (op & 0xc) {
4273 case 4:
4274 if (((insn >> 5) & 1) == 1) {
4275 return 1;
4277 break;
4278 case 8:
4279 if (((insn >> 4) & 3) == 3) {
4280 return 1;
4282 break;
4283 default:
4284 break;
4286 nregs = neon_ls_element_type[op].nregs;
4287 interleave = neon_ls_element_type[op].interleave;
4288 spacing = neon_ls_element_type[op].spacing;
4289 if (size == 3 && (interleave | spacing) != 1)
4290 return 1;
4291 addr = tcg_temp_new_i32();
4292 load_reg_var(s, addr, rn);
4293 stride = (1 << size) * interleave;
4294 for (reg = 0; reg < nregs; reg++) {
4295 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4296 load_reg_var(s, addr, rn);
4297 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4298 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4299 load_reg_var(s, addr, rn);
4300 tcg_gen_addi_i32(addr, addr, 1 << size);
4302 if (size == 3) {
4303 tmp64 = tcg_temp_new_i64();
4304 if (load) {
4305 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
4306 neon_store_reg64(tmp64, rd);
4307 } else {
4308 neon_load_reg64(tmp64, rd);
4309 gen_aa32_st64(tmp64, addr, get_mem_index(s));
4311 tcg_temp_free_i64(tmp64);
4312 tcg_gen_addi_i32(addr, addr, stride);
4313 } else {
4314 for (pass = 0; pass < 2; pass++) {
4315 if (size == 2) {
4316 if (load) {
4317 tmp = tcg_temp_new_i32();
4318 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
4319 neon_store_reg(rd, pass, tmp);
4320 } else {
4321 tmp = neon_load_reg(rd, pass);
4322 gen_aa32_st32(tmp, addr, get_mem_index(s));
4323 tcg_temp_free_i32(tmp);
4325 tcg_gen_addi_i32(addr, addr, stride);
4326 } else if (size == 1) {
4327 if (load) {
4328 tmp = tcg_temp_new_i32();
4329 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
4330 tcg_gen_addi_i32(addr, addr, stride);
4331 tmp2 = tcg_temp_new_i32();
4332 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
4333 tcg_gen_addi_i32(addr, addr, stride);
4334 tcg_gen_shli_i32(tmp2, tmp2, 16);
4335 tcg_gen_or_i32(tmp, tmp, tmp2);
4336 tcg_temp_free_i32(tmp2);
4337 neon_store_reg(rd, pass, tmp);
4338 } else {
4339 tmp = neon_load_reg(rd, pass);
4340 tmp2 = tcg_temp_new_i32();
4341 tcg_gen_shri_i32(tmp2, tmp, 16);
4342 gen_aa32_st16(tmp, addr, get_mem_index(s));
4343 tcg_temp_free_i32(tmp);
4344 tcg_gen_addi_i32(addr, addr, stride);
4345 gen_aa32_st16(tmp2, addr, get_mem_index(s));
4346 tcg_temp_free_i32(tmp2);
4347 tcg_gen_addi_i32(addr, addr, stride);
4349 } else /* size == 0 */ {
4350 if (load) {
4351 TCGV_UNUSED_I32(tmp2);
4352 for (n = 0; n < 4; n++) {
4353 tmp = tcg_temp_new_i32();
4354 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
4355 tcg_gen_addi_i32(addr, addr, stride);
4356 if (n == 0) {
4357 tmp2 = tmp;
4358 } else {
4359 tcg_gen_shli_i32(tmp, tmp, n * 8);
4360 tcg_gen_or_i32(tmp2, tmp2, tmp);
4361 tcg_temp_free_i32(tmp);
4364 neon_store_reg(rd, pass, tmp2);
4365 } else {
4366 tmp2 = neon_load_reg(rd, pass);
4367 for (n = 0; n < 4; n++) {
4368 tmp = tcg_temp_new_i32();
4369 if (n == 0) {
4370 tcg_gen_mov_i32(tmp, tmp2);
4371 } else {
4372 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4374 gen_aa32_st8(tmp, addr, get_mem_index(s));
4375 tcg_temp_free_i32(tmp);
4376 tcg_gen_addi_i32(addr, addr, stride);
4378 tcg_temp_free_i32(tmp2);
4383 rd += spacing;
4385 tcg_temp_free_i32(addr);
4386 stride = nregs * 8;
4387 } else {
4388 size = (insn >> 10) & 3;
4389 if (size == 3) {
4390 /* Load single element to all lanes. */
4391 int a = (insn >> 4) & 1;
4392 if (!load) {
4393 return 1;
4395 size = (insn >> 6) & 3;
4396 nregs = ((insn >> 8) & 3) + 1;
4398 if (size == 3) {
4399 if (nregs != 4 || a == 0) {
4400 return 1;
4402 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4403 size = 2;
4405 if (nregs == 1 && a == 1 && size == 0) {
4406 return 1;
4408 if (nregs == 3 && a == 1) {
4409 return 1;
4411 addr = tcg_temp_new_i32();
4412 load_reg_var(s, addr, rn);
4413 if (nregs == 1) {
4414 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4415 tmp = gen_load_and_replicate(s, addr, size);
4416 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4417 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4418 if (insn & (1 << 5)) {
4419 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4420 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4422 tcg_temp_free_i32(tmp);
4423 } else {
4424 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4425 stride = (insn & (1 << 5)) ? 2 : 1;
4426 for (reg = 0; reg < nregs; reg++) {
4427 tmp = gen_load_and_replicate(s, addr, size);
4428 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4429 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4430 tcg_temp_free_i32(tmp);
4431 tcg_gen_addi_i32(addr, addr, 1 << size);
4432 rd += stride;
4435 tcg_temp_free_i32(addr);
4436 stride = (1 << size) * nregs;
4437 } else {
4438 /* Single element. */
4439 int idx = (insn >> 4) & 0xf;
4440 pass = (insn >> 7) & 1;
4441 switch (size) {
4442 case 0:
4443 shift = ((insn >> 5) & 3) * 8;
4444 stride = 1;
4445 break;
4446 case 1:
4447 shift = ((insn >> 6) & 1) * 16;
4448 stride = (insn & (1 << 5)) ? 2 : 1;
4449 break;
4450 case 2:
4451 shift = 0;
4452 stride = (insn & (1 << 6)) ? 2 : 1;
4453 break;
4454 default:
4455 abort();
4457 nregs = ((insn >> 8) & 3) + 1;
4458 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4459 switch (nregs) {
4460 case 1:
4461 if (((idx & (1 << size)) != 0) ||
4462 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4463 return 1;
4465 break;
4466 case 3:
4467 if ((idx & 1) != 0) {
4468 return 1;
4470 /* fall through */
4471 case 2:
4472 if (size == 2 && (idx & 2) != 0) {
4473 return 1;
4475 break;
4476 case 4:
4477 if ((size == 2) && ((idx & 3) == 3)) {
4478 return 1;
4480 break;
4481 default:
4482 abort();
4484 if ((rd + stride * (nregs - 1)) > 31) {
4485 /* Attempts to write off the end of the register file
4486 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4487 * the neon_load_reg() would write off the end of the array.
4489 return 1;
4491 addr = tcg_temp_new_i32();
4492 load_reg_var(s, addr, rn);
4493 for (reg = 0; reg < nregs; reg++) {
4494 if (load) {
4495 tmp = tcg_temp_new_i32();
4496 switch (size) {
4497 case 0:
4498 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
4499 break;
4500 case 1:
4501 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
4502 break;
4503 case 2:
4504 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
4505 break;
4506 default: /* Avoid compiler warnings. */
4507 abort();
4509 if (size != 2) {
4510 tmp2 = neon_load_reg(rd, pass);
4511 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4512 shift, size ? 16 : 8);
4513 tcg_temp_free_i32(tmp2);
4515 neon_store_reg(rd, pass, tmp);
4516 } else { /* Store */
4517 tmp = neon_load_reg(rd, pass);
4518 if (shift)
4519 tcg_gen_shri_i32(tmp, tmp, shift);
4520 switch (size) {
4521 case 0:
4522 gen_aa32_st8(tmp, addr, get_mem_index(s));
4523 break;
4524 case 1:
4525 gen_aa32_st16(tmp, addr, get_mem_index(s));
4526 break;
4527 case 2:
4528 gen_aa32_st32(tmp, addr, get_mem_index(s));
4529 break;
4531 tcg_temp_free_i32(tmp);
4533 rd += stride;
4534 tcg_gen_addi_i32(addr, addr, 1 << size);
4536 tcg_temp_free_i32(addr);
4537 stride = nregs * (1 << size);
4540 if (rm != 15) {
4541 TCGv_i32 base;
4543 base = load_reg(s, rn);
4544 if (rm == 13) {
4545 tcg_gen_addi_i32(base, base, stride);
4546 } else {
4547 TCGv_i32 index;
4548 index = load_reg(s, rm);
4549 tcg_gen_add_i32(base, base, index);
4550 tcg_temp_free_i32(index);
4552 store_reg(s, rn, base);
4554 return 0;
4557 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4558 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
4560 tcg_gen_and_i32(t, t, c);
4561 tcg_gen_andc_i32(f, f, c);
4562 tcg_gen_or_i32(dest, t, f);
4565 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
4567 switch (size) {
4568 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4569 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4570 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4571 default: abort();
4575 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4577 switch (size) {
4578 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4579 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4580 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4581 default: abort();
4585 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
4587 switch (size) {
4588 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4589 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4590 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4591 default: abort();
4595 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4597 switch (size) {
4598 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4599 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4600 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4601 default: abort();
4605 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
4606 int q, int u)
4608 if (q) {
4609 if (u) {
4610 switch (size) {
4611 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4612 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4613 default: abort();
4615 } else {
4616 switch (size) {
4617 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4618 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4619 default: abort();
4622 } else {
4623 if (u) {
4624 switch (size) {
4625 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4626 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4627 default: abort();
4629 } else {
4630 switch (size) {
4631 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4632 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4633 default: abort();
4639 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
4641 if (u) {
4642 switch (size) {
4643 case 0: gen_helper_neon_widen_u8(dest, src); break;
4644 case 1: gen_helper_neon_widen_u16(dest, src); break;
4645 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4646 default: abort();
4648 } else {
4649 switch (size) {
4650 case 0: gen_helper_neon_widen_s8(dest, src); break;
4651 case 1: gen_helper_neon_widen_s16(dest, src); break;
4652 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4653 default: abort();
4656 tcg_temp_free_i32(src);
4659 static inline void gen_neon_addl(int size)
4661 switch (size) {
4662 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4663 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4664 case 2: tcg_gen_add_i64(CPU_V001); break;
4665 default: abort();
4669 static inline void gen_neon_subl(int size)
4671 switch (size) {
4672 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4673 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4674 case 2: tcg_gen_sub_i64(CPU_V001); break;
4675 default: abort();
4679 static inline void gen_neon_negl(TCGv_i64 var, int size)
4681 switch (size) {
4682 case 0: gen_helper_neon_negl_u16(var, var); break;
4683 case 1: gen_helper_neon_negl_u32(var, var); break;
4684 case 2:
4685 tcg_gen_neg_i64(var, var);
4686 break;
4687 default: abort();
4691 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4693 switch (size) {
4694 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4695 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4696 default: abort();
4700 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4701 int size, int u)
4703 TCGv_i64 tmp;
4705 switch ((size << 1) | u) {
4706 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4707 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4708 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4709 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4710 case 4:
4711 tmp = gen_muls_i64_i32(a, b);
4712 tcg_gen_mov_i64(dest, tmp);
4713 tcg_temp_free_i64(tmp);
4714 break;
4715 case 5:
4716 tmp = gen_mulu_i64_i32(a, b);
4717 tcg_gen_mov_i64(dest, tmp);
4718 tcg_temp_free_i64(tmp);
4719 break;
4720 default: abort();
4723 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4724 Don't forget to clean them now. */
4725 if (size < 2) {
4726 tcg_temp_free_i32(a);
4727 tcg_temp_free_i32(b);
4731 static void gen_neon_narrow_op(int op, int u, int size,
4732 TCGv_i32 dest, TCGv_i64 src)
4734 if (op) {
4735 if (u) {
4736 gen_neon_unarrow_sats(size, dest, src);
4737 } else {
4738 gen_neon_narrow(size, dest, src);
4740 } else {
4741 if (u) {
4742 gen_neon_narrow_satu(size, dest, src);
4743 } else {
4744 gen_neon_narrow_sats(size, dest, src);
4749 /* Symbolic constants for op fields for Neon 3-register same-length.
4750 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4751 * table A7-9.
4753 #define NEON_3R_VHADD 0
4754 #define NEON_3R_VQADD 1
4755 #define NEON_3R_VRHADD 2
4756 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4757 #define NEON_3R_VHSUB 4
4758 #define NEON_3R_VQSUB 5
4759 #define NEON_3R_VCGT 6
4760 #define NEON_3R_VCGE 7
4761 #define NEON_3R_VSHL 8
4762 #define NEON_3R_VQSHL 9
4763 #define NEON_3R_VRSHL 10
4764 #define NEON_3R_VQRSHL 11
4765 #define NEON_3R_VMAX 12
4766 #define NEON_3R_VMIN 13
4767 #define NEON_3R_VABD 14
4768 #define NEON_3R_VABA 15
4769 #define NEON_3R_VADD_VSUB 16
4770 #define NEON_3R_VTST_VCEQ 17
4771 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4772 #define NEON_3R_VMUL 19
4773 #define NEON_3R_VPMAX 20
4774 #define NEON_3R_VPMIN 21
4775 #define NEON_3R_VQDMULH_VQRDMULH 22
4776 #define NEON_3R_VPADD 23
4777 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4778 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4779 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4780 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4781 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4782 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4783 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4784 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4786 static const uint8_t neon_3r_sizes[] = {
4787 [NEON_3R_VHADD] = 0x7,
4788 [NEON_3R_VQADD] = 0xf,
4789 [NEON_3R_VRHADD] = 0x7,
4790 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4791 [NEON_3R_VHSUB] = 0x7,
4792 [NEON_3R_VQSUB] = 0xf,
4793 [NEON_3R_VCGT] = 0x7,
4794 [NEON_3R_VCGE] = 0x7,
4795 [NEON_3R_VSHL] = 0xf,
4796 [NEON_3R_VQSHL] = 0xf,
4797 [NEON_3R_VRSHL] = 0xf,
4798 [NEON_3R_VQRSHL] = 0xf,
4799 [NEON_3R_VMAX] = 0x7,
4800 [NEON_3R_VMIN] = 0x7,
4801 [NEON_3R_VABD] = 0x7,
4802 [NEON_3R_VABA] = 0x7,
4803 [NEON_3R_VADD_VSUB] = 0xf,
4804 [NEON_3R_VTST_VCEQ] = 0x7,
4805 [NEON_3R_VML] = 0x7,
4806 [NEON_3R_VMUL] = 0x7,
4807 [NEON_3R_VPMAX] = 0x7,
4808 [NEON_3R_VPMIN] = 0x7,
4809 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4810 [NEON_3R_VPADD] = 0x7,
4811 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
4812 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4813 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4814 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4815 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4816 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4817 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4818 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
4821 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4822 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4823 * table A7-13.
4825 #define NEON_2RM_VREV64 0
4826 #define NEON_2RM_VREV32 1
4827 #define NEON_2RM_VREV16 2
4828 #define NEON_2RM_VPADDL 4
4829 #define NEON_2RM_VPADDL_U 5
4830 #define NEON_2RM_AESE 6 /* Includes AESD */
4831 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
4832 #define NEON_2RM_VCLS 8
4833 #define NEON_2RM_VCLZ 9
4834 #define NEON_2RM_VCNT 10
4835 #define NEON_2RM_VMVN 11
4836 #define NEON_2RM_VPADAL 12
4837 #define NEON_2RM_VPADAL_U 13
4838 #define NEON_2RM_VQABS 14
4839 #define NEON_2RM_VQNEG 15
4840 #define NEON_2RM_VCGT0 16
4841 #define NEON_2RM_VCGE0 17
4842 #define NEON_2RM_VCEQ0 18
4843 #define NEON_2RM_VCLE0 19
4844 #define NEON_2RM_VCLT0 20
4845 #define NEON_2RM_SHA1H 21
4846 #define NEON_2RM_VABS 22
4847 #define NEON_2RM_VNEG 23
4848 #define NEON_2RM_VCGT0_F 24
4849 #define NEON_2RM_VCGE0_F 25
4850 #define NEON_2RM_VCEQ0_F 26
4851 #define NEON_2RM_VCLE0_F 27
4852 #define NEON_2RM_VCLT0_F 28
4853 #define NEON_2RM_VABS_F 30
4854 #define NEON_2RM_VNEG_F 31
4855 #define NEON_2RM_VSWP 32
4856 #define NEON_2RM_VTRN 33
4857 #define NEON_2RM_VUZP 34
4858 #define NEON_2RM_VZIP 35
4859 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4860 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4861 #define NEON_2RM_VSHLL 38
4862 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
4863 #define NEON_2RM_VRINTN 40
4864 #define NEON_2RM_VRINTX 41
4865 #define NEON_2RM_VRINTA 42
4866 #define NEON_2RM_VRINTZ 43
4867 #define NEON_2RM_VCVT_F16_F32 44
4868 #define NEON_2RM_VRINTM 45
4869 #define NEON_2RM_VCVT_F32_F16 46
4870 #define NEON_2RM_VRINTP 47
4871 #define NEON_2RM_VCVTAU 48
4872 #define NEON_2RM_VCVTAS 49
4873 #define NEON_2RM_VCVTNU 50
4874 #define NEON_2RM_VCVTNS 51
4875 #define NEON_2RM_VCVTPU 52
4876 #define NEON_2RM_VCVTPS 53
4877 #define NEON_2RM_VCVTMU 54
4878 #define NEON_2RM_VCVTMS 55
4879 #define NEON_2RM_VRECPE 56
4880 #define NEON_2RM_VRSQRTE 57
4881 #define NEON_2RM_VRECPE_F 58
4882 #define NEON_2RM_VRSQRTE_F 59
4883 #define NEON_2RM_VCVT_FS 60
4884 #define NEON_2RM_VCVT_FU 61
4885 #define NEON_2RM_VCVT_SF 62
4886 #define NEON_2RM_VCVT_UF 63
4888 static int neon_2rm_is_float_op(int op)
4890 /* Return true if this neon 2reg-misc op is float-to-float */
4891 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4892 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
4893 op == NEON_2RM_VRINTM ||
4894 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
4895 op >= NEON_2RM_VRECPE_F);
4898 /* Each entry in this array has bit n set if the insn allows
4899 * size value n (otherwise it will UNDEF). Since unallocated
4900 * op values will have no bits set they always UNDEF.
4902 static const uint8_t neon_2rm_sizes[] = {
4903 [NEON_2RM_VREV64] = 0x7,
4904 [NEON_2RM_VREV32] = 0x3,
4905 [NEON_2RM_VREV16] = 0x1,
4906 [NEON_2RM_VPADDL] = 0x7,
4907 [NEON_2RM_VPADDL_U] = 0x7,
4908 [NEON_2RM_AESE] = 0x1,
4909 [NEON_2RM_AESMC] = 0x1,
4910 [NEON_2RM_VCLS] = 0x7,
4911 [NEON_2RM_VCLZ] = 0x7,
4912 [NEON_2RM_VCNT] = 0x1,
4913 [NEON_2RM_VMVN] = 0x1,
4914 [NEON_2RM_VPADAL] = 0x7,
4915 [NEON_2RM_VPADAL_U] = 0x7,
4916 [NEON_2RM_VQABS] = 0x7,
4917 [NEON_2RM_VQNEG] = 0x7,
4918 [NEON_2RM_VCGT0] = 0x7,
4919 [NEON_2RM_VCGE0] = 0x7,
4920 [NEON_2RM_VCEQ0] = 0x7,
4921 [NEON_2RM_VCLE0] = 0x7,
4922 [NEON_2RM_VCLT0] = 0x7,
4923 [NEON_2RM_SHA1H] = 0x4,
4924 [NEON_2RM_VABS] = 0x7,
4925 [NEON_2RM_VNEG] = 0x7,
4926 [NEON_2RM_VCGT0_F] = 0x4,
4927 [NEON_2RM_VCGE0_F] = 0x4,
4928 [NEON_2RM_VCEQ0_F] = 0x4,
4929 [NEON_2RM_VCLE0_F] = 0x4,
4930 [NEON_2RM_VCLT0_F] = 0x4,
4931 [NEON_2RM_VABS_F] = 0x4,
4932 [NEON_2RM_VNEG_F] = 0x4,
4933 [NEON_2RM_VSWP] = 0x1,
4934 [NEON_2RM_VTRN] = 0x7,
4935 [NEON_2RM_VUZP] = 0x7,
4936 [NEON_2RM_VZIP] = 0x7,
4937 [NEON_2RM_VMOVN] = 0x7,
4938 [NEON_2RM_VQMOVN] = 0x7,
4939 [NEON_2RM_VSHLL] = 0x7,
4940 [NEON_2RM_SHA1SU1] = 0x4,
4941 [NEON_2RM_VRINTN] = 0x4,
4942 [NEON_2RM_VRINTX] = 0x4,
4943 [NEON_2RM_VRINTA] = 0x4,
4944 [NEON_2RM_VRINTZ] = 0x4,
4945 [NEON_2RM_VCVT_F16_F32] = 0x2,
4946 [NEON_2RM_VRINTM] = 0x4,
4947 [NEON_2RM_VCVT_F32_F16] = 0x2,
4948 [NEON_2RM_VRINTP] = 0x4,
4949 [NEON_2RM_VCVTAU] = 0x4,
4950 [NEON_2RM_VCVTAS] = 0x4,
4951 [NEON_2RM_VCVTNU] = 0x4,
4952 [NEON_2RM_VCVTNS] = 0x4,
4953 [NEON_2RM_VCVTPU] = 0x4,
4954 [NEON_2RM_VCVTPS] = 0x4,
4955 [NEON_2RM_VCVTMU] = 0x4,
4956 [NEON_2RM_VCVTMS] = 0x4,
4957 [NEON_2RM_VRECPE] = 0x4,
4958 [NEON_2RM_VRSQRTE] = 0x4,
4959 [NEON_2RM_VRECPE_F] = 0x4,
4960 [NEON_2RM_VRSQRTE_F] = 0x4,
4961 [NEON_2RM_VCVT_FS] = 0x4,
4962 [NEON_2RM_VCVT_FU] = 0x4,
4963 [NEON_2RM_VCVT_SF] = 0x4,
4964 [NEON_2RM_VCVT_UF] = 0x4,
4967 /* Translate a NEON data processing instruction. Return nonzero if the
4968 instruction is invalid.
4969 We process data in a mixture of 32-bit and 64-bit chunks.
4970 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4972 static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
4974 int op;
4975 int q;
4976 int rd, rn, rm;
4977 int size;
4978 int shift;
4979 int pass;
4980 int count;
4981 int pairwise;
4982 int u;
4983 uint32_t imm, mask;
4984 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
4985 TCGv_i64 tmp64;
4987 /* FIXME: this access check should not take precedence over UNDEF
4988 * for invalid encodings; we will generate incorrect syndrome information
4989 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4991 if (!s->cpacr_fpen) {
4992 gen_exception_insn(s, 4, EXCP_UDEF,
4993 syn_fp_access_trap(1, 0xe, s->thumb));
4994 return 0;
4997 if (!s->vfp_enabled)
4998 return 1;
4999 q = (insn & (1 << 6)) != 0;
5000 u = (insn >> 24) & 1;
5001 VFP_DREG_D(rd, insn);
5002 VFP_DREG_N(rn, insn);
5003 VFP_DREG_M(rm, insn);
5004 size = (insn >> 20) & 3;
5005 if ((insn & (1 << 23)) == 0) {
5006 /* Three register same length. */
5007 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5008 /* Catch invalid op and bad size combinations: UNDEF */
5009 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5010 return 1;
5012 /* All insns of this form UNDEF for either this condition or the
5013 * superset of cases "Q==1"; we catch the latter later.
5015 if (q && ((rd | rn | rm) & 1)) {
5016 return 1;
5019 * The SHA-1/SHA-256 3-register instructions require special treatment
5020 * here, as their size field is overloaded as an op type selector, and
5021 * they all consume their input in a single pass.
5023 if (op == NEON_3R_SHA) {
5024 if (!q) {
5025 return 1;
5027 if (!u) { /* SHA-1 */
5028 if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
5029 return 1;
5031 tmp = tcg_const_i32(rd);
5032 tmp2 = tcg_const_i32(rn);
5033 tmp3 = tcg_const_i32(rm);
5034 tmp4 = tcg_const_i32(size);
5035 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5036 tcg_temp_free_i32(tmp4);
5037 } else { /* SHA-256 */
5038 if (!arm_feature(env, ARM_FEATURE_V8_SHA256) || size == 3) {
5039 return 1;
5041 tmp = tcg_const_i32(rd);
5042 tmp2 = tcg_const_i32(rn);
5043 tmp3 = tcg_const_i32(rm);
5044 switch (size) {
5045 case 0:
5046 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5047 break;
5048 case 1:
5049 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5050 break;
5051 case 2:
5052 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5053 break;
5056 tcg_temp_free_i32(tmp);
5057 tcg_temp_free_i32(tmp2);
5058 tcg_temp_free_i32(tmp3);
5059 return 0;
5061 if (size == 3 && op != NEON_3R_LOGIC) {
5062 /* 64-bit element instructions. */
5063 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5064 neon_load_reg64(cpu_V0, rn + pass);
5065 neon_load_reg64(cpu_V1, rm + pass);
5066 switch (op) {
5067 case NEON_3R_VQADD:
5068 if (u) {
5069 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5070 cpu_V0, cpu_V1);
5071 } else {
5072 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5073 cpu_V0, cpu_V1);
5075 break;
5076 case NEON_3R_VQSUB:
5077 if (u) {
5078 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5079 cpu_V0, cpu_V1);
5080 } else {
5081 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5082 cpu_V0, cpu_V1);
5084 break;
5085 case NEON_3R_VSHL:
5086 if (u) {
5087 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5088 } else {
5089 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5091 break;
5092 case NEON_3R_VQSHL:
5093 if (u) {
5094 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5095 cpu_V1, cpu_V0);
5096 } else {
5097 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5098 cpu_V1, cpu_V0);
5100 break;
5101 case NEON_3R_VRSHL:
5102 if (u) {
5103 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5104 } else {
5105 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5107 break;
5108 case NEON_3R_VQRSHL:
5109 if (u) {
5110 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5111 cpu_V1, cpu_V0);
5112 } else {
5113 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5114 cpu_V1, cpu_V0);
5116 break;
5117 case NEON_3R_VADD_VSUB:
5118 if (u) {
5119 tcg_gen_sub_i64(CPU_V001);
5120 } else {
5121 tcg_gen_add_i64(CPU_V001);
5123 break;
5124 default:
5125 abort();
5127 neon_store_reg64(cpu_V0, rd + pass);
5129 return 0;
5131 pairwise = 0;
5132 switch (op) {
5133 case NEON_3R_VSHL:
5134 case NEON_3R_VQSHL:
5135 case NEON_3R_VRSHL:
5136 case NEON_3R_VQRSHL:
5138 int rtmp;
5139 /* Shift instruction operands are reversed. */
5140 rtmp = rn;
5141 rn = rm;
5142 rm = rtmp;
5144 break;
5145 case NEON_3R_VPADD:
5146 if (u) {
5147 return 1;
5149 /* Fall through */
5150 case NEON_3R_VPMAX:
5151 case NEON_3R_VPMIN:
5152 pairwise = 1;
5153 break;
5154 case NEON_3R_FLOAT_ARITH:
5155 pairwise = (u && size < 2); /* if VPADD (float) */
5156 break;
5157 case NEON_3R_FLOAT_MINMAX:
5158 pairwise = u; /* if VPMIN/VPMAX (float) */
5159 break;
5160 case NEON_3R_FLOAT_CMP:
5161 if (!u && size) {
5162 /* no encoding for U=0 C=1x */
5163 return 1;
5165 break;
5166 case NEON_3R_FLOAT_ACMP:
5167 if (!u) {
5168 return 1;
5170 break;
5171 case NEON_3R_FLOAT_MISC:
5172 /* VMAXNM/VMINNM in ARMv8 */
5173 if (u && !arm_feature(env, ARM_FEATURE_V8)) {
5174 return 1;
5176 break;
5177 case NEON_3R_VMUL:
5178 if (u && (size != 0)) {
5179 /* UNDEF on invalid size for polynomial subcase */
5180 return 1;
5182 break;
5183 case NEON_3R_VFM:
5184 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
5185 return 1;
5187 break;
5188 default:
5189 break;
5192 if (pairwise && q) {
5193 /* All the pairwise insns UNDEF if Q is set */
5194 return 1;
5197 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5199 if (pairwise) {
5200 /* Pairwise. */
5201 if (pass < 1) {
5202 tmp = neon_load_reg(rn, 0);
5203 tmp2 = neon_load_reg(rn, 1);
5204 } else {
5205 tmp = neon_load_reg(rm, 0);
5206 tmp2 = neon_load_reg(rm, 1);
5208 } else {
5209 /* Elementwise. */
5210 tmp = neon_load_reg(rn, pass);
5211 tmp2 = neon_load_reg(rm, pass);
5213 switch (op) {
5214 case NEON_3R_VHADD:
5215 GEN_NEON_INTEGER_OP(hadd);
5216 break;
5217 case NEON_3R_VQADD:
5218 GEN_NEON_INTEGER_OP_ENV(qadd);
5219 break;
5220 case NEON_3R_VRHADD:
5221 GEN_NEON_INTEGER_OP(rhadd);
5222 break;
5223 case NEON_3R_LOGIC: /* Logic ops. */
5224 switch ((u << 2) | size) {
5225 case 0: /* VAND */
5226 tcg_gen_and_i32(tmp, tmp, tmp2);
5227 break;
5228 case 1: /* BIC */
5229 tcg_gen_andc_i32(tmp, tmp, tmp2);
5230 break;
5231 case 2: /* VORR */
5232 tcg_gen_or_i32(tmp, tmp, tmp2);
5233 break;
5234 case 3: /* VORN */
5235 tcg_gen_orc_i32(tmp, tmp, tmp2);
5236 break;
5237 case 4: /* VEOR */
5238 tcg_gen_xor_i32(tmp, tmp, tmp2);
5239 break;
5240 case 5: /* VBSL */
5241 tmp3 = neon_load_reg(rd, pass);
5242 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5243 tcg_temp_free_i32(tmp3);
5244 break;
5245 case 6: /* VBIT */
5246 tmp3 = neon_load_reg(rd, pass);
5247 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5248 tcg_temp_free_i32(tmp3);
5249 break;
5250 case 7: /* VBIF */
5251 tmp3 = neon_load_reg(rd, pass);
5252 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5253 tcg_temp_free_i32(tmp3);
5254 break;
5256 break;
5257 case NEON_3R_VHSUB:
5258 GEN_NEON_INTEGER_OP(hsub);
5259 break;
5260 case NEON_3R_VQSUB:
5261 GEN_NEON_INTEGER_OP_ENV(qsub);
5262 break;
5263 case NEON_3R_VCGT:
5264 GEN_NEON_INTEGER_OP(cgt);
5265 break;
5266 case NEON_3R_VCGE:
5267 GEN_NEON_INTEGER_OP(cge);
5268 break;
5269 case NEON_3R_VSHL:
5270 GEN_NEON_INTEGER_OP(shl);
5271 break;
5272 case NEON_3R_VQSHL:
5273 GEN_NEON_INTEGER_OP_ENV(qshl);
5274 break;
5275 case NEON_3R_VRSHL:
5276 GEN_NEON_INTEGER_OP(rshl);
5277 break;
5278 case NEON_3R_VQRSHL:
5279 GEN_NEON_INTEGER_OP_ENV(qrshl);
5280 break;
5281 case NEON_3R_VMAX:
5282 GEN_NEON_INTEGER_OP(max);
5283 break;
5284 case NEON_3R_VMIN:
5285 GEN_NEON_INTEGER_OP(min);
5286 break;
5287 case NEON_3R_VABD:
5288 GEN_NEON_INTEGER_OP(abd);
5289 break;
5290 case NEON_3R_VABA:
5291 GEN_NEON_INTEGER_OP(abd);
5292 tcg_temp_free_i32(tmp2);
5293 tmp2 = neon_load_reg(rd, pass);
5294 gen_neon_add(size, tmp, tmp2);
5295 break;
5296 case NEON_3R_VADD_VSUB:
5297 if (!u) { /* VADD */
5298 gen_neon_add(size, tmp, tmp2);
5299 } else { /* VSUB */
5300 switch (size) {
5301 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5302 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5303 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
5304 default: abort();
5307 break;
5308 case NEON_3R_VTST_VCEQ:
5309 if (!u) { /* VTST */
5310 switch (size) {
5311 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5312 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5313 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
5314 default: abort();
5316 } else { /* VCEQ */
5317 switch (size) {
5318 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5319 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5320 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5321 default: abort();
5324 break;
5325 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
5326 switch (size) {
5327 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5328 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5329 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5330 default: abort();
5332 tcg_temp_free_i32(tmp2);
5333 tmp2 = neon_load_reg(rd, pass);
5334 if (u) { /* VMLS */
5335 gen_neon_rsb(size, tmp, tmp2);
5336 } else { /* VMLA */
5337 gen_neon_add(size, tmp, tmp2);
5339 break;
5340 case NEON_3R_VMUL:
5341 if (u) { /* polynomial */
5342 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5343 } else { /* Integer */
5344 switch (size) {
5345 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5346 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5347 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5348 default: abort();
5351 break;
5352 case NEON_3R_VPMAX:
5353 GEN_NEON_INTEGER_OP(pmax);
5354 break;
5355 case NEON_3R_VPMIN:
5356 GEN_NEON_INTEGER_OP(pmin);
5357 break;
5358 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5359 if (!u) { /* VQDMULH */
5360 switch (size) {
5361 case 1:
5362 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5363 break;
5364 case 2:
5365 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5366 break;
5367 default: abort();
5369 } else { /* VQRDMULH */
5370 switch (size) {
5371 case 1:
5372 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5373 break;
5374 case 2:
5375 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5376 break;
5377 default: abort();
5380 break;
5381 case NEON_3R_VPADD:
5382 switch (size) {
5383 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5384 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5385 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5386 default: abort();
5388 break;
5389 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5391 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5392 switch ((u << 2) | size) {
5393 case 0: /* VADD */
5394 case 4: /* VPADD */
5395 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5396 break;
5397 case 2: /* VSUB */
5398 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5399 break;
5400 case 6: /* VABD */
5401 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5402 break;
5403 default:
5404 abort();
5406 tcg_temp_free_ptr(fpstatus);
5407 break;
5409 case NEON_3R_FLOAT_MULTIPLY:
5411 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5412 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5413 if (!u) {
5414 tcg_temp_free_i32(tmp2);
5415 tmp2 = neon_load_reg(rd, pass);
5416 if (size == 0) {
5417 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5418 } else {
5419 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5422 tcg_temp_free_ptr(fpstatus);
5423 break;
5425 case NEON_3R_FLOAT_CMP:
5427 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5428 if (!u) {
5429 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5430 } else {
5431 if (size == 0) {
5432 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5433 } else {
5434 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5437 tcg_temp_free_ptr(fpstatus);
5438 break;
5440 case NEON_3R_FLOAT_ACMP:
5442 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5443 if (size == 0) {
5444 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5445 } else {
5446 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5448 tcg_temp_free_ptr(fpstatus);
5449 break;
5451 case NEON_3R_FLOAT_MINMAX:
5453 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5454 if (size == 0) {
5455 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5456 } else {
5457 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5459 tcg_temp_free_ptr(fpstatus);
5460 break;
5462 case NEON_3R_FLOAT_MISC:
5463 if (u) {
5464 /* VMAXNM/VMINNM */
5465 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5466 if (size == 0) {
5467 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5468 } else {
5469 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5471 tcg_temp_free_ptr(fpstatus);
5472 } else {
5473 if (size == 0) {
5474 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5475 } else {
5476 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5479 break;
5480 case NEON_3R_VFM:
5482 /* VFMA, VFMS: fused multiply-add */
5483 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5484 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5485 if (size) {
5486 /* VFMS */
5487 gen_helper_vfp_negs(tmp, tmp);
5489 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5490 tcg_temp_free_i32(tmp3);
5491 tcg_temp_free_ptr(fpstatus);
5492 break;
5494 default:
5495 abort();
5497 tcg_temp_free_i32(tmp2);
5499 /* Save the result. For elementwise operations we can put it
5500 straight into the destination register. For pairwise operations
5501 we have to be careful to avoid clobbering the source operands. */
5502 if (pairwise && rd == rm) {
5503 neon_store_scratch(pass, tmp);
5504 } else {
5505 neon_store_reg(rd, pass, tmp);
5508 } /* for pass */
5509 if (pairwise && rd == rm) {
5510 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5511 tmp = neon_load_scratch(pass);
5512 neon_store_reg(rd, pass, tmp);
5515 /* End of 3 register same size operations. */
5516 } else if (insn & (1 << 4)) {
5517 if ((insn & 0x00380080) != 0) {
5518 /* Two registers and shift. */
5519 op = (insn >> 8) & 0xf;
5520 if (insn & (1 << 7)) {
5521 /* 64-bit shift. */
5522 if (op > 7) {
5523 return 1;
5525 size = 3;
5526 } else {
5527 size = 2;
5528 while ((insn & (1 << (size + 19))) == 0)
5529 size--;
5531 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5532 /* To avoid excessive duplication of ops we implement shift
5533 by immediate using the variable shift operations. */
5534 if (op < 8) {
5535 /* Shift by immediate:
5536 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5537 if (q && ((rd | rm) & 1)) {
5538 return 1;
5540 if (!u && (op == 4 || op == 6)) {
5541 return 1;
5543 /* Right shifts are encoded as N - shift, where N is the
5544 element size in bits. */
5545 if (op <= 4)
5546 shift = shift - (1 << (size + 3));
5547 if (size == 3) {
5548 count = q + 1;
5549 } else {
5550 count = q ? 4: 2;
5552 switch (size) {
5553 case 0:
5554 imm = (uint8_t) shift;
5555 imm |= imm << 8;
5556 imm |= imm << 16;
5557 break;
5558 case 1:
5559 imm = (uint16_t) shift;
5560 imm |= imm << 16;
5561 break;
5562 case 2:
5563 case 3:
5564 imm = shift;
5565 break;
5566 default:
5567 abort();
5570 for (pass = 0; pass < count; pass++) {
5571 if (size == 3) {
5572 neon_load_reg64(cpu_V0, rm + pass);
5573 tcg_gen_movi_i64(cpu_V1, imm);
5574 switch (op) {
5575 case 0: /* VSHR */
5576 case 1: /* VSRA */
5577 if (u)
5578 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5579 else
5580 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5581 break;
5582 case 2: /* VRSHR */
5583 case 3: /* VRSRA */
5584 if (u)
5585 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5586 else
5587 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5588 break;
5589 case 4: /* VSRI */
5590 case 5: /* VSHL, VSLI */
5591 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5592 break;
5593 case 6: /* VQSHLU */
5594 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5595 cpu_V0, cpu_V1);
5596 break;
5597 case 7: /* VQSHL */
5598 if (u) {
5599 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5600 cpu_V0, cpu_V1);
5601 } else {
5602 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5603 cpu_V0, cpu_V1);
5605 break;
5607 if (op == 1 || op == 3) {
5608 /* Accumulate. */
5609 neon_load_reg64(cpu_V1, rd + pass);
5610 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5611 } else if (op == 4 || (op == 5 && u)) {
5612 /* Insert */
5613 neon_load_reg64(cpu_V1, rd + pass);
5614 uint64_t mask;
5615 if (shift < -63 || shift > 63) {
5616 mask = 0;
5617 } else {
5618 if (op == 4) {
5619 mask = 0xffffffffffffffffull >> -shift;
5620 } else {
5621 mask = 0xffffffffffffffffull << shift;
5624 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5625 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5627 neon_store_reg64(cpu_V0, rd + pass);
5628 } else { /* size < 3 */
5629 /* Operands in T0 and T1. */
5630 tmp = neon_load_reg(rm, pass);
5631 tmp2 = tcg_temp_new_i32();
5632 tcg_gen_movi_i32(tmp2, imm);
5633 switch (op) {
5634 case 0: /* VSHR */
5635 case 1: /* VSRA */
5636 GEN_NEON_INTEGER_OP(shl);
5637 break;
5638 case 2: /* VRSHR */
5639 case 3: /* VRSRA */
5640 GEN_NEON_INTEGER_OP(rshl);
5641 break;
5642 case 4: /* VSRI */
5643 case 5: /* VSHL, VSLI */
5644 switch (size) {
5645 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5646 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5647 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5648 default: abort();
5650 break;
5651 case 6: /* VQSHLU */
5652 switch (size) {
5653 case 0:
5654 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5655 tmp, tmp2);
5656 break;
5657 case 1:
5658 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5659 tmp, tmp2);
5660 break;
5661 case 2:
5662 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5663 tmp, tmp2);
5664 break;
5665 default:
5666 abort();
5668 break;
5669 case 7: /* VQSHL */
5670 GEN_NEON_INTEGER_OP_ENV(qshl);
5671 break;
5673 tcg_temp_free_i32(tmp2);
5675 if (op == 1 || op == 3) {
5676 /* Accumulate. */
5677 tmp2 = neon_load_reg(rd, pass);
5678 gen_neon_add(size, tmp, tmp2);
5679 tcg_temp_free_i32(tmp2);
5680 } else if (op == 4 || (op == 5 && u)) {
5681 /* Insert */
5682 switch (size) {
5683 case 0:
5684 if (op == 4)
5685 mask = 0xff >> -shift;
5686 else
5687 mask = (uint8_t)(0xff << shift);
5688 mask |= mask << 8;
5689 mask |= mask << 16;
5690 break;
5691 case 1:
5692 if (op == 4)
5693 mask = 0xffff >> -shift;
5694 else
5695 mask = (uint16_t)(0xffff << shift);
5696 mask |= mask << 16;
5697 break;
5698 case 2:
5699 if (shift < -31 || shift > 31) {
5700 mask = 0;
5701 } else {
5702 if (op == 4)
5703 mask = 0xffffffffu >> -shift;
5704 else
5705 mask = 0xffffffffu << shift;
5707 break;
5708 default:
5709 abort();
5711 tmp2 = neon_load_reg(rd, pass);
5712 tcg_gen_andi_i32(tmp, tmp, mask);
5713 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5714 tcg_gen_or_i32(tmp, tmp, tmp2);
5715 tcg_temp_free_i32(tmp2);
5717 neon_store_reg(rd, pass, tmp);
5719 } /* for pass */
5720 } else if (op < 10) {
5721 /* Shift by immediate and narrow:
5722 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5723 int input_unsigned = (op == 8) ? !u : u;
5724 if (rm & 1) {
5725 return 1;
5727 shift = shift - (1 << (size + 3));
5728 size++;
5729 if (size == 3) {
5730 tmp64 = tcg_const_i64(shift);
5731 neon_load_reg64(cpu_V0, rm);
5732 neon_load_reg64(cpu_V1, rm + 1);
5733 for (pass = 0; pass < 2; pass++) {
5734 TCGv_i64 in;
5735 if (pass == 0) {
5736 in = cpu_V0;
5737 } else {
5738 in = cpu_V1;
5740 if (q) {
5741 if (input_unsigned) {
5742 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5743 } else {
5744 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5746 } else {
5747 if (input_unsigned) {
5748 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5749 } else {
5750 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5753 tmp = tcg_temp_new_i32();
5754 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5755 neon_store_reg(rd, pass, tmp);
5756 } /* for pass */
5757 tcg_temp_free_i64(tmp64);
5758 } else {
5759 if (size == 1) {
5760 imm = (uint16_t)shift;
5761 imm |= imm << 16;
5762 } else {
5763 /* size == 2 */
5764 imm = (uint32_t)shift;
5766 tmp2 = tcg_const_i32(imm);
5767 tmp4 = neon_load_reg(rm + 1, 0);
5768 tmp5 = neon_load_reg(rm + 1, 1);
5769 for (pass = 0; pass < 2; pass++) {
5770 if (pass == 0) {
5771 tmp = neon_load_reg(rm, 0);
5772 } else {
5773 tmp = tmp4;
5775 gen_neon_shift_narrow(size, tmp, tmp2, q,
5776 input_unsigned);
5777 if (pass == 0) {
5778 tmp3 = neon_load_reg(rm, 1);
5779 } else {
5780 tmp3 = tmp5;
5782 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5783 input_unsigned);
5784 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5785 tcg_temp_free_i32(tmp);
5786 tcg_temp_free_i32(tmp3);
5787 tmp = tcg_temp_new_i32();
5788 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5789 neon_store_reg(rd, pass, tmp);
5790 } /* for pass */
5791 tcg_temp_free_i32(tmp2);
5793 } else if (op == 10) {
5794 /* VSHLL, VMOVL */
5795 if (q || (rd & 1)) {
5796 return 1;
5798 tmp = neon_load_reg(rm, 0);
5799 tmp2 = neon_load_reg(rm, 1);
5800 for (pass = 0; pass < 2; pass++) {
5801 if (pass == 1)
5802 tmp = tmp2;
5804 gen_neon_widen(cpu_V0, tmp, size, u);
5806 if (shift != 0) {
5807 /* The shift is less than the width of the source
5808 type, so we can just shift the whole register. */
5809 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5810 /* Widen the result of shift: we need to clear
5811 * the potential overflow bits resulting from
5812 * left bits of the narrow input appearing as
5813 * right bits of left the neighbour narrow
5814 * input. */
5815 if (size < 2 || !u) {
5816 uint64_t imm64;
5817 if (size == 0) {
5818 imm = (0xffu >> (8 - shift));
5819 imm |= imm << 16;
5820 } else if (size == 1) {
5821 imm = 0xffff >> (16 - shift);
5822 } else {
5823 /* size == 2 */
5824 imm = 0xffffffff >> (32 - shift);
5826 if (size < 2) {
5827 imm64 = imm | (((uint64_t)imm) << 32);
5828 } else {
5829 imm64 = imm;
5831 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5834 neon_store_reg64(cpu_V0, rd + pass);
5836 } else if (op >= 14) {
5837 /* VCVT fixed-point. */
5838 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5839 return 1;
5841 /* We have already masked out the must-be-1 top bit of imm6,
5842 * hence this 32-shift where the ARM ARM has 64-imm6.
5844 shift = 32 - shift;
5845 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5846 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5847 if (!(op & 1)) {
5848 if (u)
5849 gen_vfp_ulto(0, shift, 1);
5850 else
5851 gen_vfp_slto(0, shift, 1);
5852 } else {
5853 if (u)
5854 gen_vfp_toul(0, shift, 1);
5855 else
5856 gen_vfp_tosl(0, shift, 1);
5858 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5860 } else {
5861 return 1;
5863 } else { /* (insn & 0x00380080) == 0 */
5864 int invert;
5865 if (q && (rd & 1)) {
5866 return 1;
5869 op = (insn >> 8) & 0xf;
5870 /* One register and immediate. */
5871 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5872 invert = (insn & (1 << 5)) != 0;
5873 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5874 * We choose to not special-case this and will behave as if a
5875 * valid constant encoding of 0 had been given.
5877 switch (op) {
5878 case 0: case 1:
5879 /* no-op */
5880 break;
5881 case 2: case 3:
5882 imm <<= 8;
5883 break;
5884 case 4: case 5:
5885 imm <<= 16;
5886 break;
5887 case 6: case 7:
5888 imm <<= 24;
5889 break;
5890 case 8: case 9:
5891 imm |= imm << 16;
5892 break;
5893 case 10: case 11:
5894 imm = (imm << 8) | (imm << 24);
5895 break;
5896 case 12:
5897 imm = (imm << 8) | 0xff;
5898 break;
5899 case 13:
5900 imm = (imm << 16) | 0xffff;
5901 break;
5902 case 14:
5903 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5904 if (invert)
5905 imm = ~imm;
5906 break;
5907 case 15:
5908 if (invert) {
5909 return 1;
5911 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5912 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5913 break;
5915 if (invert)
5916 imm = ~imm;
5918 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5919 if (op & 1 && op < 12) {
5920 tmp = neon_load_reg(rd, pass);
5921 if (invert) {
5922 /* The immediate value has already been inverted, so
5923 BIC becomes AND. */
5924 tcg_gen_andi_i32(tmp, tmp, imm);
5925 } else {
5926 tcg_gen_ori_i32(tmp, tmp, imm);
5928 } else {
5929 /* VMOV, VMVN. */
5930 tmp = tcg_temp_new_i32();
5931 if (op == 14 && invert) {
5932 int n;
5933 uint32_t val;
5934 val = 0;
5935 for (n = 0; n < 4; n++) {
5936 if (imm & (1 << (n + (pass & 1) * 4)))
5937 val |= 0xff << (n * 8);
5939 tcg_gen_movi_i32(tmp, val);
5940 } else {
5941 tcg_gen_movi_i32(tmp, imm);
5944 neon_store_reg(rd, pass, tmp);
5947 } else { /* (insn & 0x00800010 == 0x00800000) */
5948 if (size != 3) {
5949 op = (insn >> 8) & 0xf;
5950 if ((insn & (1 << 6)) == 0) {
5951 /* Three registers of different lengths. */
5952 int src1_wide;
5953 int src2_wide;
5954 int prewiden;
5955 /* undefreq: bit 0 : UNDEF if size == 0
5956 * bit 1 : UNDEF if size == 1
5957 * bit 2 : UNDEF if size == 2
5958 * bit 3 : UNDEF if U == 1
5959 * Note that [2:0] set implies 'always UNDEF'
5961 int undefreq;
5962 /* prewiden, src1_wide, src2_wide, undefreq */
5963 static const int neon_3reg_wide[16][4] = {
5964 {1, 0, 0, 0}, /* VADDL */
5965 {1, 1, 0, 0}, /* VADDW */
5966 {1, 0, 0, 0}, /* VSUBL */
5967 {1, 1, 0, 0}, /* VSUBW */
5968 {0, 1, 1, 0}, /* VADDHN */
5969 {0, 0, 0, 0}, /* VABAL */
5970 {0, 1, 1, 0}, /* VSUBHN */
5971 {0, 0, 0, 0}, /* VABDL */
5972 {0, 0, 0, 0}, /* VMLAL */
5973 {0, 0, 0, 9}, /* VQDMLAL */
5974 {0, 0, 0, 0}, /* VMLSL */
5975 {0, 0, 0, 9}, /* VQDMLSL */
5976 {0, 0, 0, 0}, /* Integer VMULL */
5977 {0, 0, 0, 1}, /* VQDMULL */
5978 {0, 0, 0, 0xa}, /* Polynomial VMULL */
5979 {0, 0, 0, 7}, /* Reserved: always UNDEF */
5982 prewiden = neon_3reg_wide[op][0];
5983 src1_wide = neon_3reg_wide[op][1];
5984 src2_wide = neon_3reg_wide[op][2];
5985 undefreq = neon_3reg_wide[op][3];
5987 if ((undefreq & (1 << size)) ||
5988 ((undefreq & 8) && u)) {
5989 return 1;
5991 if ((src1_wide && (rn & 1)) ||
5992 (src2_wide && (rm & 1)) ||
5993 (!src2_wide && (rd & 1))) {
5994 return 1;
5997 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
5998 * outside the loop below as it only performs a single pass.
6000 if (op == 14 && size == 2) {
6001 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6003 if (!arm_feature(env, ARM_FEATURE_V8_PMULL)) {
6004 return 1;
6006 tcg_rn = tcg_temp_new_i64();
6007 tcg_rm = tcg_temp_new_i64();
6008 tcg_rd = tcg_temp_new_i64();
6009 neon_load_reg64(tcg_rn, rn);
6010 neon_load_reg64(tcg_rm, rm);
6011 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6012 neon_store_reg64(tcg_rd, rd);
6013 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6014 neon_store_reg64(tcg_rd, rd + 1);
6015 tcg_temp_free_i64(tcg_rn);
6016 tcg_temp_free_i64(tcg_rm);
6017 tcg_temp_free_i64(tcg_rd);
6018 return 0;
6021 /* Avoid overlapping operands. Wide source operands are
6022 always aligned so will never overlap with wide
6023 destinations in problematic ways. */
6024 if (rd == rm && !src2_wide) {
6025 tmp = neon_load_reg(rm, 1);
6026 neon_store_scratch(2, tmp);
6027 } else if (rd == rn && !src1_wide) {
6028 tmp = neon_load_reg(rn, 1);
6029 neon_store_scratch(2, tmp);
6031 TCGV_UNUSED_I32(tmp3);
6032 for (pass = 0; pass < 2; pass++) {
6033 if (src1_wide) {
6034 neon_load_reg64(cpu_V0, rn + pass);
6035 TCGV_UNUSED_I32(tmp);
6036 } else {
6037 if (pass == 1 && rd == rn) {
6038 tmp = neon_load_scratch(2);
6039 } else {
6040 tmp = neon_load_reg(rn, pass);
6042 if (prewiden) {
6043 gen_neon_widen(cpu_V0, tmp, size, u);
6046 if (src2_wide) {
6047 neon_load_reg64(cpu_V1, rm + pass);
6048 TCGV_UNUSED_I32(tmp2);
6049 } else {
6050 if (pass == 1 && rd == rm) {
6051 tmp2 = neon_load_scratch(2);
6052 } else {
6053 tmp2 = neon_load_reg(rm, pass);
6055 if (prewiden) {
6056 gen_neon_widen(cpu_V1, tmp2, size, u);
6059 switch (op) {
6060 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6061 gen_neon_addl(size);
6062 break;
6063 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6064 gen_neon_subl(size);
6065 break;
6066 case 5: case 7: /* VABAL, VABDL */
6067 switch ((size << 1) | u) {
6068 case 0:
6069 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6070 break;
6071 case 1:
6072 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6073 break;
6074 case 2:
6075 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6076 break;
6077 case 3:
6078 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6079 break;
6080 case 4:
6081 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6082 break;
6083 case 5:
6084 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6085 break;
6086 default: abort();
6088 tcg_temp_free_i32(tmp2);
6089 tcg_temp_free_i32(tmp);
6090 break;
6091 case 8: case 9: case 10: case 11: case 12: case 13:
6092 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6093 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6094 break;
6095 case 14: /* Polynomial VMULL */
6096 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6097 tcg_temp_free_i32(tmp2);
6098 tcg_temp_free_i32(tmp);
6099 break;
6100 default: /* 15 is RESERVED: caught earlier */
6101 abort();
6103 if (op == 13) {
6104 /* VQDMULL */
6105 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6106 neon_store_reg64(cpu_V0, rd + pass);
6107 } else if (op == 5 || (op >= 8 && op <= 11)) {
6108 /* Accumulate. */
6109 neon_load_reg64(cpu_V1, rd + pass);
6110 switch (op) {
6111 case 10: /* VMLSL */
6112 gen_neon_negl(cpu_V0, size);
6113 /* Fall through */
6114 case 5: case 8: /* VABAL, VMLAL */
6115 gen_neon_addl(size);
6116 break;
6117 case 9: case 11: /* VQDMLAL, VQDMLSL */
6118 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6119 if (op == 11) {
6120 gen_neon_negl(cpu_V0, size);
6122 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6123 break;
6124 default:
6125 abort();
6127 neon_store_reg64(cpu_V0, rd + pass);
6128 } else if (op == 4 || op == 6) {
6129 /* Narrowing operation. */
6130 tmp = tcg_temp_new_i32();
6131 if (!u) {
6132 switch (size) {
6133 case 0:
6134 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6135 break;
6136 case 1:
6137 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6138 break;
6139 case 2:
6140 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6141 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6142 break;
6143 default: abort();
6145 } else {
6146 switch (size) {
6147 case 0:
6148 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6149 break;
6150 case 1:
6151 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6152 break;
6153 case 2:
6154 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6155 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6156 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6157 break;
6158 default: abort();
6161 if (pass == 0) {
6162 tmp3 = tmp;
6163 } else {
6164 neon_store_reg(rd, 0, tmp3);
6165 neon_store_reg(rd, 1, tmp);
6167 } else {
6168 /* Write back the result. */
6169 neon_store_reg64(cpu_V0, rd + pass);
6172 } else {
6173 /* Two registers and a scalar. NB that for ops of this form
6174 * the ARM ARM labels bit 24 as Q, but it is in our variable
6175 * 'u', not 'q'.
6177 if (size == 0) {
6178 return 1;
6180 switch (op) {
6181 case 1: /* Float VMLA scalar */
6182 case 5: /* Floating point VMLS scalar */
6183 case 9: /* Floating point VMUL scalar */
6184 if (size == 1) {
6185 return 1;
6187 /* fall through */
6188 case 0: /* Integer VMLA scalar */
6189 case 4: /* Integer VMLS scalar */
6190 case 8: /* Integer VMUL scalar */
6191 case 12: /* VQDMULH scalar */
6192 case 13: /* VQRDMULH scalar */
6193 if (u && ((rd | rn) & 1)) {
6194 return 1;
6196 tmp = neon_get_scalar(size, rm);
6197 neon_store_scratch(0, tmp);
6198 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6199 tmp = neon_load_scratch(0);
6200 tmp2 = neon_load_reg(rn, pass);
6201 if (op == 12) {
6202 if (size == 1) {
6203 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6204 } else {
6205 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6207 } else if (op == 13) {
6208 if (size == 1) {
6209 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6210 } else {
6211 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6213 } else if (op & 1) {
6214 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6215 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6216 tcg_temp_free_ptr(fpstatus);
6217 } else {
6218 switch (size) {
6219 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6220 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6221 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6222 default: abort();
6225 tcg_temp_free_i32(tmp2);
6226 if (op < 8) {
6227 /* Accumulate. */
6228 tmp2 = neon_load_reg(rd, pass);
6229 switch (op) {
6230 case 0:
6231 gen_neon_add(size, tmp, tmp2);
6232 break;
6233 case 1:
6235 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6236 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6237 tcg_temp_free_ptr(fpstatus);
6238 break;
6240 case 4:
6241 gen_neon_rsb(size, tmp, tmp2);
6242 break;
6243 case 5:
6245 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6246 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6247 tcg_temp_free_ptr(fpstatus);
6248 break;
6250 default:
6251 abort();
6253 tcg_temp_free_i32(tmp2);
6255 neon_store_reg(rd, pass, tmp);
6257 break;
6258 case 3: /* VQDMLAL scalar */
6259 case 7: /* VQDMLSL scalar */
6260 case 11: /* VQDMULL scalar */
6261 if (u == 1) {
6262 return 1;
6264 /* fall through */
6265 case 2: /* VMLAL sclar */
6266 case 6: /* VMLSL scalar */
6267 case 10: /* VMULL scalar */
6268 if (rd & 1) {
6269 return 1;
6271 tmp2 = neon_get_scalar(size, rm);
6272 /* We need a copy of tmp2 because gen_neon_mull
6273 * deletes it during pass 0. */
6274 tmp4 = tcg_temp_new_i32();
6275 tcg_gen_mov_i32(tmp4, tmp2);
6276 tmp3 = neon_load_reg(rn, 1);
6278 for (pass = 0; pass < 2; pass++) {
6279 if (pass == 0) {
6280 tmp = neon_load_reg(rn, 0);
6281 } else {
6282 tmp = tmp3;
6283 tmp2 = tmp4;
6285 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6286 if (op != 11) {
6287 neon_load_reg64(cpu_V1, rd + pass);
6289 switch (op) {
6290 case 6:
6291 gen_neon_negl(cpu_V0, size);
6292 /* Fall through */
6293 case 2:
6294 gen_neon_addl(size);
6295 break;
6296 case 3: case 7:
6297 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6298 if (op == 7) {
6299 gen_neon_negl(cpu_V0, size);
6301 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6302 break;
6303 case 10:
6304 /* no-op */
6305 break;
6306 case 11:
6307 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6308 break;
6309 default:
6310 abort();
6312 neon_store_reg64(cpu_V0, rd + pass);
6316 break;
6317 default: /* 14 and 15 are RESERVED */
6318 return 1;
6321 } else { /* size == 3 */
6322 if (!u) {
6323 /* Extract. */
6324 imm = (insn >> 8) & 0xf;
6326 if (imm > 7 && !q)
6327 return 1;
6329 if (q && ((rd | rn | rm) & 1)) {
6330 return 1;
6333 if (imm == 0) {
6334 neon_load_reg64(cpu_V0, rn);
6335 if (q) {
6336 neon_load_reg64(cpu_V1, rn + 1);
6338 } else if (imm == 8) {
6339 neon_load_reg64(cpu_V0, rn + 1);
6340 if (q) {
6341 neon_load_reg64(cpu_V1, rm);
6343 } else if (q) {
6344 tmp64 = tcg_temp_new_i64();
6345 if (imm < 8) {
6346 neon_load_reg64(cpu_V0, rn);
6347 neon_load_reg64(tmp64, rn + 1);
6348 } else {
6349 neon_load_reg64(cpu_V0, rn + 1);
6350 neon_load_reg64(tmp64, rm);
6352 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6353 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6354 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6355 if (imm < 8) {
6356 neon_load_reg64(cpu_V1, rm);
6357 } else {
6358 neon_load_reg64(cpu_V1, rm + 1);
6359 imm -= 8;
6361 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6362 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6363 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6364 tcg_temp_free_i64(tmp64);
6365 } else {
6366 /* BUGFIX */
6367 neon_load_reg64(cpu_V0, rn);
6368 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6369 neon_load_reg64(cpu_V1, rm);
6370 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6371 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6373 neon_store_reg64(cpu_V0, rd);
6374 if (q) {
6375 neon_store_reg64(cpu_V1, rd + 1);
6377 } else if ((insn & (1 << 11)) == 0) {
6378 /* Two register misc. */
6379 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6380 size = (insn >> 18) & 3;
6381 /* UNDEF for unknown op values and bad op-size combinations */
6382 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6383 return 1;
6385 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6386 q && ((rm | rd) & 1)) {
6387 return 1;
6389 switch (op) {
6390 case NEON_2RM_VREV64:
6391 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6392 tmp = neon_load_reg(rm, pass * 2);
6393 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6394 switch (size) {
6395 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6396 case 1: gen_swap_half(tmp); break;
6397 case 2: /* no-op */ break;
6398 default: abort();
6400 neon_store_reg(rd, pass * 2 + 1, tmp);
6401 if (size == 2) {
6402 neon_store_reg(rd, pass * 2, tmp2);
6403 } else {
6404 switch (size) {
6405 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6406 case 1: gen_swap_half(tmp2); break;
6407 default: abort();
6409 neon_store_reg(rd, pass * 2, tmp2);
6412 break;
6413 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6414 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6415 for (pass = 0; pass < q + 1; pass++) {
6416 tmp = neon_load_reg(rm, pass * 2);
6417 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6418 tmp = neon_load_reg(rm, pass * 2 + 1);
6419 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6420 switch (size) {
6421 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6422 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6423 case 2: tcg_gen_add_i64(CPU_V001); break;
6424 default: abort();
6426 if (op >= NEON_2RM_VPADAL) {
6427 /* Accumulate. */
6428 neon_load_reg64(cpu_V1, rd + pass);
6429 gen_neon_addl(size);
6431 neon_store_reg64(cpu_V0, rd + pass);
6433 break;
6434 case NEON_2RM_VTRN:
6435 if (size == 2) {
6436 int n;
6437 for (n = 0; n < (q ? 4 : 2); n += 2) {
6438 tmp = neon_load_reg(rm, n);
6439 tmp2 = neon_load_reg(rd, n + 1);
6440 neon_store_reg(rm, n, tmp2);
6441 neon_store_reg(rd, n + 1, tmp);
6443 } else {
6444 goto elementwise;
6446 break;
6447 case NEON_2RM_VUZP:
6448 if (gen_neon_unzip(rd, rm, size, q)) {
6449 return 1;
6451 break;
6452 case NEON_2RM_VZIP:
6453 if (gen_neon_zip(rd, rm, size, q)) {
6454 return 1;
6456 break;
6457 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6458 /* also VQMOVUN; op field and mnemonics don't line up */
6459 if (rm & 1) {
6460 return 1;
6462 TCGV_UNUSED_I32(tmp2);
6463 for (pass = 0; pass < 2; pass++) {
6464 neon_load_reg64(cpu_V0, rm + pass);
6465 tmp = tcg_temp_new_i32();
6466 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6467 tmp, cpu_V0);
6468 if (pass == 0) {
6469 tmp2 = tmp;
6470 } else {
6471 neon_store_reg(rd, 0, tmp2);
6472 neon_store_reg(rd, 1, tmp);
6475 break;
6476 case NEON_2RM_VSHLL:
6477 if (q || (rd & 1)) {
6478 return 1;
6480 tmp = neon_load_reg(rm, 0);
6481 tmp2 = neon_load_reg(rm, 1);
6482 for (pass = 0; pass < 2; pass++) {
6483 if (pass == 1)
6484 tmp = tmp2;
6485 gen_neon_widen(cpu_V0, tmp, size, 1);
6486 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6487 neon_store_reg64(cpu_V0, rd + pass);
6489 break;
6490 case NEON_2RM_VCVT_F16_F32:
6491 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6492 q || (rm & 1)) {
6493 return 1;
6495 tmp = tcg_temp_new_i32();
6496 tmp2 = tcg_temp_new_i32();
6497 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
6498 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6499 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
6500 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6501 tcg_gen_shli_i32(tmp2, tmp2, 16);
6502 tcg_gen_or_i32(tmp2, tmp2, tmp);
6503 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
6504 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6505 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6506 neon_store_reg(rd, 0, tmp2);
6507 tmp2 = tcg_temp_new_i32();
6508 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6509 tcg_gen_shli_i32(tmp2, tmp2, 16);
6510 tcg_gen_or_i32(tmp2, tmp2, tmp);
6511 neon_store_reg(rd, 1, tmp2);
6512 tcg_temp_free_i32(tmp);
6513 break;
6514 case NEON_2RM_VCVT_F32_F16:
6515 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6516 q || (rd & 1)) {
6517 return 1;
6519 tmp3 = tcg_temp_new_i32();
6520 tmp = neon_load_reg(rm, 0);
6521 tmp2 = neon_load_reg(rm, 1);
6522 tcg_gen_ext16u_i32(tmp3, tmp);
6523 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6524 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6525 tcg_gen_shri_i32(tmp3, tmp, 16);
6526 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6527 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
6528 tcg_temp_free_i32(tmp);
6529 tcg_gen_ext16u_i32(tmp3, tmp2);
6530 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6531 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6532 tcg_gen_shri_i32(tmp3, tmp2, 16);
6533 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6534 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
6535 tcg_temp_free_i32(tmp2);
6536 tcg_temp_free_i32(tmp3);
6537 break;
6538 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6539 if (!arm_feature(env, ARM_FEATURE_V8_AES)
6540 || ((rm | rd) & 1)) {
6541 return 1;
6543 tmp = tcg_const_i32(rd);
6544 tmp2 = tcg_const_i32(rm);
6546 /* Bit 6 is the lowest opcode bit; it distinguishes between
6547 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6549 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6551 if (op == NEON_2RM_AESE) {
6552 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6553 } else {
6554 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6556 tcg_temp_free_i32(tmp);
6557 tcg_temp_free_i32(tmp2);
6558 tcg_temp_free_i32(tmp3);
6559 break;
6560 case NEON_2RM_SHA1H:
6561 if (!arm_feature(env, ARM_FEATURE_V8_SHA1)
6562 || ((rm | rd) & 1)) {
6563 return 1;
6565 tmp = tcg_const_i32(rd);
6566 tmp2 = tcg_const_i32(rm);
6568 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6570 tcg_temp_free_i32(tmp);
6571 tcg_temp_free_i32(tmp2);
6572 break;
6573 case NEON_2RM_SHA1SU1:
6574 if ((rm | rd) & 1) {
6575 return 1;
6577 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6578 if (q) {
6579 if (!arm_feature(env, ARM_FEATURE_V8_SHA256)) {
6580 return 1;
6582 } else if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
6583 return 1;
6585 tmp = tcg_const_i32(rd);
6586 tmp2 = tcg_const_i32(rm);
6587 if (q) {
6588 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6589 } else {
6590 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6592 tcg_temp_free_i32(tmp);
6593 tcg_temp_free_i32(tmp2);
6594 break;
6595 default:
6596 elementwise:
6597 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6598 if (neon_2rm_is_float_op(op)) {
6599 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6600 neon_reg_offset(rm, pass));
6601 TCGV_UNUSED_I32(tmp);
6602 } else {
6603 tmp = neon_load_reg(rm, pass);
6605 switch (op) {
6606 case NEON_2RM_VREV32:
6607 switch (size) {
6608 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6609 case 1: gen_swap_half(tmp); break;
6610 default: abort();
6612 break;
6613 case NEON_2RM_VREV16:
6614 gen_rev16(tmp);
6615 break;
6616 case NEON_2RM_VCLS:
6617 switch (size) {
6618 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6619 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6620 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6621 default: abort();
6623 break;
6624 case NEON_2RM_VCLZ:
6625 switch (size) {
6626 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6627 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6628 case 2: gen_helper_clz(tmp, tmp); break;
6629 default: abort();
6631 break;
6632 case NEON_2RM_VCNT:
6633 gen_helper_neon_cnt_u8(tmp, tmp);
6634 break;
6635 case NEON_2RM_VMVN:
6636 tcg_gen_not_i32(tmp, tmp);
6637 break;
6638 case NEON_2RM_VQABS:
6639 switch (size) {
6640 case 0:
6641 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6642 break;
6643 case 1:
6644 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6645 break;
6646 case 2:
6647 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6648 break;
6649 default: abort();
6651 break;
6652 case NEON_2RM_VQNEG:
6653 switch (size) {
6654 case 0:
6655 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6656 break;
6657 case 1:
6658 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6659 break;
6660 case 2:
6661 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6662 break;
6663 default: abort();
6665 break;
6666 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6667 tmp2 = tcg_const_i32(0);
6668 switch(size) {
6669 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6670 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6671 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6672 default: abort();
6674 tcg_temp_free_i32(tmp2);
6675 if (op == NEON_2RM_VCLE0) {
6676 tcg_gen_not_i32(tmp, tmp);
6678 break;
6679 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6680 tmp2 = tcg_const_i32(0);
6681 switch(size) {
6682 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6683 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6684 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6685 default: abort();
6687 tcg_temp_free_i32(tmp2);
6688 if (op == NEON_2RM_VCLT0) {
6689 tcg_gen_not_i32(tmp, tmp);
6691 break;
6692 case NEON_2RM_VCEQ0:
6693 tmp2 = tcg_const_i32(0);
6694 switch(size) {
6695 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6696 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6697 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6698 default: abort();
6700 tcg_temp_free_i32(tmp2);
6701 break;
6702 case NEON_2RM_VABS:
6703 switch(size) {
6704 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6705 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6706 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6707 default: abort();
6709 break;
6710 case NEON_2RM_VNEG:
6711 tmp2 = tcg_const_i32(0);
6712 gen_neon_rsb(size, tmp, tmp2);
6713 tcg_temp_free_i32(tmp2);
6714 break;
6715 case NEON_2RM_VCGT0_F:
6717 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6718 tmp2 = tcg_const_i32(0);
6719 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6720 tcg_temp_free_i32(tmp2);
6721 tcg_temp_free_ptr(fpstatus);
6722 break;
6724 case NEON_2RM_VCGE0_F:
6726 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6727 tmp2 = tcg_const_i32(0);
6728 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6729 tcg_temp_free_i32(tmp2);
6730 tcg_temp_free_ptr(fpstatus);
6731 break;
6733 case NEON_2RM_VCEQ0_F:
6735 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6736 tmp2 = tcg_const_i32(0);
6737 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6738 tcg_temp_free_i32(tmp2);
6739 tcg_temp_free_ptr(fpstatus);
6740 break;
6742 case NEON_2RM_VCLE0_F:
6744 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6745 tmp2 = tcg_const_i32(0);
6746 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6747 tcg_temp_free_i32(tmp2);
6748 tcg_temp_free_ptr(fpstatus);
6749 break;
6751 case NEON_2RM_VCLT0_F:
6753 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6754 tmp2 = tcg_const_i32(0);
6755 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6756 tcg_temp_free_i32(tmp2);
6757 tcg_temp_free_ptr(fpstatus);
6758 break;
6760 case NEON_2RM_VABS_F:
6761 gen_vfp_abs(0);
6762 break;
6763 case NEON_2RM_VNEG_F:
6764 gen_vfp_neg(0);
6765 break;
6766 case NEON_2RM_VSWP:
6767 tmp2 = neon_load_reg(rd, pass);
6768 neon_store_reg(rm, pass, tmp2);
6769 break;
6770 case NEON_2RM_VTRN:
6771 tmp2 = neon_load_reg(rd, pass);
6772 switch (size) {
6773 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6774 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6775 default: abort();
6777 neon_store_reg(rm, pass, tmp2);
6778 break;
6779 case NEON_2RM_VRINTN:
6780 case NEON_2RM_VRINTA:
6781 case NEON_2RM_VRINTM:
6782 case NEON_2RM_VRINTP:
6783 case NEON_2RM_VRINTZ:
6785 TCGv_i32 tcg_rmode;
6786 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6787 int rmode;
6789 if (op == NEON_2RM_VRINTZ) {
6790 rmode = FPROUNDING_ZERO;
6791 } else {
6792 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6795 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6796 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6797 cpu_env);
6798 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6799 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6800 cpu_env);
6801 tcg_temp_free_ptr(fpstatus);
6802 tcg_temp_free_i32(tcg_rmode);
6803 break;
6805 case NEON_2RM_VRINTX:
6807 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6808 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6809 tcg_temp_free_ptr(fpstatus);
6810 break;
6812 case NEON_2RM_VCVTAU:
6813 case NEON_2RM_VCVTAS:
6814 case NEON_2RM_VCVTNU:
6815 case NEON_2RM_VCVTNS:
6816 case NEON_2RM_VCVTPU:
6817 case NEON_2RM_VCVTPS:
6818 case NEON_2RM_VCVTMU:
6819 case NEON_2RM_VCVTMS:
6821 bool is_signed = !extract32(insn, 7, 1);
6822 TCGv_ptr fpst = get_fpstatus_ptr(1);
6823 TCGv_i32 tcg_rmode, tcg_shift;
6824 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6826 tcg_shift = tcg_const_i32(0);
6827 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6828 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6829 cpu_env);
6831 if (is_signed) {
6832 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6833 tcg_shift, fpst);
6834 } else {
6835 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6836 tcg_shift, fpst);
6839 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6840 cpu_env);
6841 tcg_temp_free_i32(tcg_rmode);
6842 tcg_temp_free_i32(tcg_shift);
6843 tcg_temp_free_ptr(fpst);
6844 break;
6846 case NEON_2RM_VRECPE:
6848 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6849 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6850 tcg_temp_free_ptr(fpstatus);
6851 break;
6853 case NEON_2RM_VRSQRTE:
6855 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6856 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6857 tcg_temp_free_ptr(fpstatus);
6858 break;
6860 case NEON_2RM_VRECPE_F:
6862 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6863 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
6864 tcg_temp_free_ptr(fpstatus);
6865 break;
6867 case NEON_2RM_VRSQRTE_F:
6869 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6870 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
6871 tcg_temp_free_ptr(fpstatus);
6872 break;
6874 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6875 gen_vfp_sito(0, 1);
6876 break;
6877 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6878 gen_vfp_uito(0, 1);
6879 break;
6880 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6881 gen_vfp_tosiz(0, 1);
6882 break;
6883 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6884 gen_vfp_touiz(0, 1);
6885 break;
6886 default:
6887 /* Reserved op values were caught by the
6888 * neon_2rm_sizes[] check earlier.
6890 abort();
6892 if (neon_2rm_is_float_op(op)) {
6893 tcg_gen_st_f32(cpu_F0s, cpu_env,
6894 neon_reg_offset(rd, pass));
6895 } else {
6896 neon_store_reg(rd, pass, tmp);
6899 break;
6901 } else if ((insn & (1 << 10)) == 0) {
6902 /* VTBL, VTBX. */
6903 int n = ((insn >> 8) & 3) + 1;
6904 if ((rn + n) > 32) {
6905 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6906 * helper function running off the end of the register file.
6908 return 1;
6910 n <<= 3;
6911 if (insn & (1 << 6)) {
6912 tmp = neon_load_reg(rd, 0);
6913 } else {
6914 tmp = tcg_temp_new_i32();
6915 tcg_gen_movi_i32(tmp, 0);
6917 tmp2 = neon_load_reg(rm, 0);
6918 tmp4 = tcg_const_i32(rn);
6919 tmp5 = tcg_const_i32(n);
6920 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
6921 tcg_temp_free_i32(tmp);
6922 if (insn & (1 << 6)) {
6923 tmp = neon_load_reg(rd, 1);
6924 } else {
6925 tmp = tcg_temp_new_i32();
6926 tcg_gen_movi_i32(tmp, 0);
6928 tmp3 = neon_load_reg(rm, 1);
6929 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
6930 tcg_temp_free_i32(tmp5);
6931 tcg_temp_free_i32(tmp4);
6932 neon_store_reg(rd, 0, tmp2);
6933 neon_store_reg(rd, 1, tmp3);
6934 tcg_temp_free_i32(tmp);
6935 } else if ((insn & 0x380) == 0) {
6936 /* VDUP */
6937 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6938 return 1;
6940 if (insn & (1 << 19)) {
6941 tmp = neon_load_reg(rm, 1);
6942 } else {
6943 tmp = neon_load_reg(rm, 0);
6945 if (insn & (1 << 16)) {
6946 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6947 } else if (insn & (1 << 17)) {
6948 if ((insn >> 18) & 1)
6949 gen_neon_dup_high16(tmp);
6950 else
6951 gen_neon_dup_low16(tmp);
6953 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6954 tmp2 = tcg_temp_new_i32();
6955 tcg_gen_mov_i32(tmp2, tmp);
6956 neon_store_reg(rd, pass, tmp2);
6958 tcg_temp_free_i32(tmp);
6959 } else {
6960 return 1;
6964 return 0;
6967 static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
6969 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6970 const ARMCPRegInfo *ri;
6972 cpnum = (insn >> 8) & 0xf;
6973 if (arm_feature(env, ARM_FEATURE_XSCALE)
6974 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6975 return 1;
6977 /* First check for coprocessor space used for actual instructions */
6978 switch (cpnum) {
6979 case 0:
6980 case 1:
6981 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6982 return disas_iwmmxt_insn(env, s, insn);
6983 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6984 return disas_dsp_insn(env, s, insn);
6986 return 1;
6987 default:
6988 break;
6991 /* Otherwise treat as a generic register access */
6992 is64 = (insn & (1 << 25)) == 0;
6993 if (!is64 && ((insn & (1 << 4)) == 0)) {
6994 /* cdp */
6995 return 1;
6998 crm = insn & 0xf;
6999 if (is64) {
7000 crn = 0;
7001 opc1 = (insn >> 4) & 0xf;
7002 opc2 = 0;
7003 rt2 = (insn >> 16) & 0xf;
7004 } else {
7005 crn = (insn >> 16) & 0xf;
7006 opc1 = (insn >> 21) & 7;
7007 opc2 = (insn >> 5) & 7;
7008 rt2 = 0;
7010 isread = (insn >> 20) & 1;
7011 rt = (insn >> 12) & 0xf;
7013 ri = get_arm_cp_reginfo(s->cp_regs,
7014 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
7015 if (ri) {
7016 /* Check access permissions */
7017 if (!cp_access_ok(s->current_pl, ri, isread)) {
7018 return 1;
7021 if (ri->accessfn) {
7022 /* Emit code to perform further access permissions checks at
7023 * runtime; this may result in an exception.
7025 TCGv_ptr tmpptr;
7026 TCGv_i32 tcg_syn;
7027 uint32_t syndrome;
7029 /* Note that since we are an implementation which takes an
7030 * exception on a trapped conditional instruction only if the
7031 * instruction passes its condition code check, we can take
7032 * advantage of the clause in the ARM ARM that allows us to set
7033 * the COND field in the instruction to 0xE in all cases.
7034 * We could fish the actual condition out of the insn (ARM)
7035 * or the condexec bits (Thumb) but it isn't necessary.
7037 switch (cpnum) {
7038 case 14:
7039 if (is64) {
7040 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7041 isread, s->thumb);
7042 } else {
7043 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7044 rt, isread, s->thumb);
7046 break;
7047 case 15:
7048 if (is64) {
7049 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7050 isread, s->thumb);
7051 } else {
7052 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7053 rt, isread, s->thumb);
7055 break;
7056 default:
7057 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7058 * so this can only happen if this is an ARMv7 or earlier CPU,
7059 * in which case the syndrome information won't actually be
7060 * guest visible.
7062 assert(!arm_feature(env, ARM_FEATURE_V8));
7063 syndrome = syn_uncategorized();
7064 break;
7067 gen_set_pc_im(s, s->pc);
7068 tmpptr = tcg_const_ptr(ri);
7069 tcg_syn = tcg_const_i32(syndrome);
7070 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
7071 tcg_temp_free_ptr(tmpptr);
7072 tcg_temp_free_i32(tcg_syn);
7075 /* Handle special cases first */
7076 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7077 case ARM_CP_NOP:
7078 return 0;
7079 case ARM_CP_WFI:
7080 if (isread) {
7081 return 1;
7083 gen_set_pc_im(s, s->pc);
7084 s->is_jmp = DISAS_WFI;
7085 return 0;
7086 default:
7087 break;
7090 if (use_icount && (ri->type & ARM_CP_IO)) {
7091 gen_io_start();
7094 if (isread) {
7095 /* Read */
7096 if (is64) {
7097 TCGv_i64 tmp64;
7098 TCGv_i32 tmp;
7099 if (ri->type & ARM_CP_CONST) {
7100 tmp64 = tcg_const_i64(ri->resetvalue);
7101 } else if (ri->readfn) {
7102 TCGv_ptr tmpptr;
7103 tmp64 = tcg_temp_new_i64();
7104 tmpptr = tcg_const_ptr(ri);
7105 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7106 tcg_temp_free_ptr(tmpptr);
7107 } else {
7108 tmp64 = tcg_temp_new_i64();
7109 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7111 tmp = tcg_temp_new_i32();
7112 tcg_gen_trunc_i64_i32(tmp, tmp64);
7113 store_reg(s, rt, tmp);
7114 tcg_gen_shri_i64(tmp64, tmp64, 32);
7115 tmp = tcg_temp_new_i32();
7116 tcg_gen_trunc_i64_i32(tmp, tmp64);
7117 tcg_temp_free_i64(tmp64);
7118 store_reg(s, rt2, tmp);
7119 } else {
7120 TCGv_i32 tmp;
7121 if (ri->type & ARM_CP_CONST) {
7122 tmp = tcg_const_i32(ri->resetvalue);
7123 } else if (ri->readfn) {
7124 TCGv_ptr tmpptr;
7125 tmp = tcg_temp_new_i32();
7126 tmpptr = tcg_const_ptr(ri);
7127 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7128 tcg_temp_free_ptr(tmpptr);
7129 } else {
7130 tmp = load_cpu_offset(ri->fieldoffset);
7132 if (rt == 15) {
7133 /* Destination register of r15 for 32 bit loads sets
7134 * the condition codes from the high 4 bits of the value
7136 gen_set_nzcv(tmp);
7137 tcg_temp_free_i32(tmp);
7138 } else {
7139 store_reg(s, rt, tmp);
7142 } else {
7143 /* Write */
7144 if (ri->type & ARM_CP_CONST) {
7145 /* If not forbidden by access permissions, treat as WI */
7146 return 0;
7149 if (is64) {
7150 TCGv_i32 tmplo, tmphi;
7151 TCGv_i64 tmp64 = tcg_temp_new_i64();
7152 tmplo = load_reg(s, rt);
7153 tmphi = load_reg(s, rt2);
7154 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7155 tcg_temp_free_i32(tmplo);
7156 tcg_temp_free_i32(tmphi);
7157 if (ri->writefn) {
7158 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7159 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7160 tcg_temp_free_ptr(tmpptr);
7161 } else {
7162 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7164 tcg_temp_free_i64(tmp64);
7165 } else {
7166 if (ri->writefn) {
7167 TCGv_i32 tmp;
7168 TCGv_ptr tmpptr;
7169 tmp = load_reg(s, rt);
7170 tmpptr = tcg_const_ptr(ri);
7171 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7172 tcg_temp_free_ptr(tmpptr);
7173 tcg_temp_free_i32(tmp);
7174 } else {
7175 TCGv_i32 tmp = load_reg(s, rt);
7176 store_cpu_offset(tmp, ri->fieldoffset);
7181 if (use_icount && (ri->type & ARM_CP_IO)) {
7182 /* I/O operations must end the TB here (whether read or write) */
7183 gen_io_end();
7184 gen_lookup_tb(s);
7185 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7186 /* We default to ending the TB on a coprocessor register write,
7187 * but allow this to be suppressed by the register definition
7188 * (usually only necessary to work around guest bugs).
7190 gen_lookup_tb(s);
7193 return 0;
7196 /* Unknown register; this might be a guest error or a QEMU
7197 * unimplemented feature.
7199 if (is64) {
7200 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7201 "64 bit system register cp:%d opc1: %d crm:%d\n",
7202 isread ? "read" : "write", cpnum, opc1, crm);
7203 } else {
7204 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7205 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
7206 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2);
7209 return 1;
7213 /* Store a 64-bit value to a register pair. Clobbers val. */
7214 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7216 TCGv_i32 tmp;
7217 tmp = tcg_temp_new_i32();
7218 tcg_gen_trunc_i64_i32(tmp, val);
7219 store_reg(s, rlow, tmp);
7220 tmp = tcg_temp_new_i32();
7221 tcg_gen_shri_i64(val, val, 32);
7222 tcg_gen_trunc_i64_i32(tmp, val);
7223 store_reg(s, rhigh, tmp);
7226 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7227 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7229 TCGv_i64 tmp;
7230 TCGv_i32 tmp2;
7232 /* Load value and extend to 64 bits. */
7233 tmp = tcg_temp_new_i64();
7234 tmp2 = load_reg(s, rlow);
7235 tcg_gen_extu_i32_i64(tmp, tmp2);
7236 tcg_temp_free_i32(tmp2);
7237 tcg_gen_add_i64(val, val, tmp);
7238 tcg_temp_free_i64(tmp);
7241 /* load and add a 64-bit value from a register pair. */
7242 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7244 TCGv_i64 tmp;
7245 TCGv_i32 tmpl;
7246 TCGv_i32 tmph;
7248 /* Load 64-bit value rd:rn. */
7249 tmpl = load_reg(s, rlow);
7250 tmph = load_reg(s, rhigh);
7251 tmp = tcg_temp_new_i64();
7252 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7253 tcg_temp_free_i32(tmpl);
7254 tcg_temp_free_i32(tmph);
7255 tcg_gen_add_i64(val, val, tmp);
7256 tcg_temp_free_i64(tmp);
7259 /* Set N and Z flags from hi|lo. */
7260 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7262 tcg_gen_mov_i32(cpu_NF, hi);
7263 tcg_gen_or_i32(cpu_ZF, lo, hi);
7266 /* Load/Store exclusive instructions are implemented by remembering
7267 the value/address loaded, and seeing if these are the same
7268 when the store is performed. This should be sufficient to implement
7269 the architecturally mandated semantics, and avoids having to monitor
7270 regular stores.
7272 In system emulation mode only one CPU will be running at once, so
7273 this sequence is effectively atomic. In user emulation mode we
7274 throw an exception and handle the atomic operation elsewhere. */
7275 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7276 TCGv_i32 addr, int size)
7278 TCGv_i32 tmp = tcg_temp_new_i32();
7280 switch (size) {
7281 case 0:
7282 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
7283 break;
7284 case 1:
7285 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
7286 break;
7287 case 2:
7288 case 3:
7289 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7290 break;
7291 default:
7292 abort();
7295 if (size == 3) {
7296 TCGv_i32 tmp2 = tcg_temp_new_i32();
7297 TCGv_i32 tmp3 = tcg_temp_new_i32();
7299 tcg_gen_addi_i32(tmp2, addr, 4);
7300 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7301 tcg_temp_free_i32(tmp2);
7302 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7303 store_reg(s, rt2, tmp3);
7304 } else {
7305 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7308 store_reg(s, rt, tmp);
7309 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7312 static void gen_clrex(DisasContext *s)
7314 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7317 #ifdef CONFIG_USER_ONLY
7318 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7319 TCGv_i32 addr, int size)
7321 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
7322 tcg_gen_movi_i32(cpu_exclusive_info,
7323 size | (rd << 4) | (rt << 8) | (rt2 << 12));
7324 gen_exception_internal_insn(s, 4, EXCP_STREX);
7326 #else
7327 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7328 TCGv_i32 addr, int size)
7330 TCGv_i32 tmp;
7331 TCGv_i64 val64, extaddr;
7332 int done_label;
7333 int fail_label;
7335 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7336 [addr] = {Rt};
7337 {Rd} = 0;
7338 } else {
7339 {Rd} = 1;
7340 } */
7341 fail_label = gen_new_label();
7342 done_label = gen_new_label();
7343 extaddr = tcg_temp_new_i64();
7344 tcg_gen_extu_i32_i64(extaddr, addr);
7345 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7346 tcg_temp_free_i64(extaddr);
7348 tmp = tcg_temp_new_i32();
7349 switch (size) {
7350 case 0:
7351 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
7352 break;
7353 case 1:
7354 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
7355 break;
7356 case 2:
7357 case 3:
7358 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7359 break;
7360 default:
7361 abort();
7364 val64 = tcg_temp_new_i64();
7365 if (size == 3) {
7366 TCGv_i32 tmp2 = tcg_temp_new_i32();
7367 TCGv_i32 tmp3 = tcg_temp_new_i32();
7368 tcg_gen_addi_i32(tmp2, addr, 4);
7369 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7370 tcg_temp_free_i32(tmp2);
7371 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7372 tcg_temp_free_i32(tmp3);
7373 } else {
7374 tcg_gen_extu_i32_i64(val64, tmp);
7376 tcg_temp_free_i32(tmp);
7378 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7379 tcg_temp_free_i64(val64);
7381 tmp = load_reg(s, rt);
7382 switch (size) {
7383 case 0:
7384 gen_aa32_st8(tmp, addr, get_mem_index(s));
7385 break;
7386 case 1:
7387 gen_aa32_st16(tmp, addr, get_mem_index(s));
7388 break;
7389 case 2:
7390 case 3:
7391 gen_aa32_st32(tmp, addr, get_mem_index(s));
7392 break;
7393 default:
7394 abort();
7396 tcg_temp_free_i32(tmp);
7397 if (size == 3) {
7398 tcg_gen_addi_i32(addr, addr, 4);
7399 tmp = load_reg(s, rt2);
7400 gen_aa32_st32(tmp, addr, get_mem_index(s));
7401 tcg_temp_free_i32(tmp);
7403 tcg_gen_movi_i32(cpu_R[rd], 0);
7404 tcg_gen_br(done_label);
7405 gen_set_label(fail_label);
7406 tcg_gen_movi_i32(cpu_R[rd], 1);
7407 gen_set_label(done_label);
7408 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7410 #endif
7412 /* gen_srs:
7413 * @env: CPUARMState
7414 * @s: DisasContext
7415 * @mode: mode field from insn (which stack to store to)
7416 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7417 * @writeback: true if writeback bit set
7419 * Generate code for the SRS (Store Return State) insn.
7421 static void gen_srs(DisasContext *s,
7422 uint32_t mode, uint32_t amode, bool writeback)
7424 int32_t offset;
7425 TCGv_i32 addr = tcg_temp_new_i32();
7426 TCGv_i32 tmp = tcg_const_i32(mode);
7427 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7428 tcg_temp_free_i32(tmp);
7429 switch (amode) {
7430 case 0: /* DA */
7431 offset = -4;
7432 break;
7433 case 1: /* IA */
7434 offset = 0;
7435 break;
7436 case 2: /* DB */
7437 offset = -8;
7438 break;
7439 case 3: /* IB */
7440 offset = 4;
7441 break;
7442 default:
7443 abort();
7445 tcg_gen_addi_i32(addr, addr, offset);
7446 tmp = load_reg(s, 14);
7447 gen_aa32_st32(tmp, addr, get_mem_index(s));
7448 tcg_temp_free_i32(tmp);
7449 tmp = load_cpu_field(spsr);
7450 tcg_gen_addi_i32(addr, addr, 4);
7451 gen_aa32_st32(tmp, addr, get_mem_index(s));
7452 tcg_temp_free_i32(tmp);
7453 if (writeback) {
7454 switch (amode) {
7455 case 0:
7456 offset = -8;
7457 break;
7458 case 1:
7459 offset = 4;
7460 break;
7461 case 2:
7462 offset = -4;
7463 break;
7464 case 3:
7465 offset = 0;
7466 break;
7467 default:
7468 abort();
7470 tcg_gen_addi_i32(addr, addr, offset);
7471 tmp = tcg_const_i32(mode);
7472 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7473 tcg_temp_free_i32(tmp);
7475 tcg_temp_free_i32(addr);
7478 static void disas_arm_insn(CPUARMState * env, DisasContext *s)
7480 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
7481 TCGv_i32 tmp;
7482 TCGv_i32 tmp2;
7483 TCGv_i32 tmp3;
7484 TCGv_i32 addr;
7485 TCGv_i64 tmp64;
7487 insn = arm_ldl_code(env, s->pc, s->bswap_code);
7488 s->pc += 4;
7490 /* M variants do not implement ARM mode. */
7491 if (IS_M(env))
7492 goto illegal_op;
7493 cond = insn >> 28;
7494 if (cond == 0xf){
7495 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7496 * choose to UNDEF. In ARMv5 and above the space is used
7497 * for miscellaneous unconditional instructions.
7499 ARCH(5);
7501 /* Unconditional instructions. */
7502 if (((insn >> 25) & 7) == 1) {
7503 /* NEON Data processing. */
7504 if (!arm_feature(env, ARM_FEATURE_NEON))
7505 goto illegal_op;
7507 if (disas_neon_data_insn(env, s, insn))
7508 goto illegal_op;
7509 return;
7511 if ((insn & 0x0f100000) == 0x04000000) {
7512 /* NEON load/store. */
7513 if (!arm_feature(env, ARM_FEATURE_NEON))
7514 goto illegal_op;
7516 if (disas_neon_ls_insn(env, s, insn))
7517 goto illegal_op;
7518 return;
7520 if ((insn & 0x0f000e10) == 0x0e000a00) {
7521 /* VFP. */
7522 if (disas_vfp_insn(env, s, insn)) {
7523 goto illegal_op;
7525 return;
7527 if (((insn & 0x0f30f000) == 0x0510f000) ||
7528 ((insn & 0x0f30f010) == 0x0710f000)) {
7529 if ((insn & (1 << 22)) == 0) {
7530 /* PLDW; v7MP */
7531 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7532 goto illegal_op;
7535 /* Otherwise PLD; v5TE+ */
7536 ARCH(5TE);
7537 return;
7539 if (((insn & 0x0f70f000) == 0x0450f000) ||
7540 ((insn & 0x0f70f010) == 0x0650f000)) {
7541 ARCH(7);
7542 return; /* PLI; V7 */
7544 if (((insn & 0x0f700000) == 0x04100000) ||
7545 ((insn & 0x0f700010) == 0x06100000)) {
7546 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7547 goto illegal_op;
7549 return; /* v7MP: Unallocated memory hint: must NOP */
7552 if ((insn & 0x0ffffdff) == 0x01010000) {
7553 ARCH(6);
7554 /* setend */
7555 if (((insn >> 9) & 1) != s->bswap_code) {
7556 /* Dynamic endianness switching not implemented. */
7557 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
7558 goto illegal_op;
7560 return;
7561 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7562 switch ((insn >> 4) & 0xf) {
7563 case 1: /* clrex */
7564 ARCH(6K);
7565 gen_clrex(s);
7566 return;
7567 case 4: /* dsb */
7568 case 5: /* dmb */
7569 case 6: /* isb */
7570 ARCH(7);
7571 /* We don't emulate caches so these are a no-op. */
7572 return;
7573 default:
7574 goto illegal_op;
7576 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7577 /* srs */
7578 if (IS_USER(s)) {
7579 goto illegal_op;
7581 ARCH(6);
7582 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
7583 return;
7584 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
7585 /* rfe */
7586 int32_t offset;
7587 if (IS_USER(s))
7588 goto illegal_op;
7589 ARCH(6);
7590 rn = (insn >> 16) & 0xf;
7591 addr = load_reg(s, rn);
7592 i = (insn >> 23) & 3;
7593 switch (i) {
7594 case 0: offset = -4; break; /* DA */
7595 case 1: offset = 0; break; /* IA */
7596 case 2: offset = -8; break; /* DB */
7597 case 3: offset = 4; break; /* IB */
7598 default: abort();
7600 if (offset)
7601 tcg_gen_addi_i32(addr, addr, offset);
7602 /* Load PC into tmp and CPSR into tmp2. */
7603 tmp = tcg_temp_new_i32();
7604 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7605 tcg_gen_addi_i32(addr, addr, 4);
7606 tmp2 = tcg_temp_new_i32();
7607 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
7608 if (insn & (1 << 21)) {
7609 /* Base writeback. */
7610 switch (i) {
7611 case 0: offset = -8; break;
7612 case 1: offset = 4; break;
7613 case 2: offset = -4; break;
7614 case 3: offset = 0; break;
7615 default: abort();
7617 if (offset)
7618 tcg_gen_addi_i32(addr, addr, offset);
7619 store_reg(s, rn, addr);
7620 } else {
7621 tcg_temp_free_i32(addr);
7623 gen_rfe(s, tmp, tmp2);
7624 return;
7625 } else if ((insn & 0x0e000000) == 0x0a000000) {
7626 /* branch link and change to thumb (blx <offset>) */
7627 int32_t offset;
7629 val = (uint32_t)s->pc;
7630 tmp = tcg_temp_new_i32();
7631 tcg_gen_movi_i32(tmp, val);
7632 store_reg(s, 14, tmp);
7633 /* Sign-extend the 24-bit offset */
7634 offset = (((int32_t)insn) << 8) >> 8;
7635 /* offset * 4 + bit24 * 2 + (thumb bit) */
7636 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7637 /* pipeline offset */
7638 val += 4;
7639 /* protected by ARCH(5); above, near the start of uncond block */
7640 gen_bx_im(s, val);
7641 return;
7642 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7643 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
7644 /* iWMMXt register transfer. */
7645 if (env->cp15.c15_cpar & (1 << 1))
7646 if (!disas_iwmmxt_insn(env, s, insn))
7647 return;
7649 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7650 /* Coprocessor double register transfer. */
7651 ARCH(5TE);
7652 } else if ((insn & 0x0f000010) == 0x0e000010) {
7653 /* Additional coprocessor register transfer. */
7654 } else if ((insn & 0x0ff10020) == 0x01000000) {
7655 uint32_t mask;
7656 uint32_t val;
7657 /* cps (privileged) */
7658 if (IS_USER(s))
7659 return;
7660 mask = val = 0;
7661 if (insn & (1 << 19)) {
7662 if (insn & (1 << 8))
7663 mask |= CPSR_A;
7664 if (insn & (1 << 7))
7665 mask |= CPSR_I;
7666 if (insn & (1 << 6))
7667 mask |= CPSR_F;
7668 if (insn & (1 << 18))
7669 val |= mask;
7671 if (insn & (1 << 17)) {
7672 mask |= CPSR_M;
7673 val |= (insn & 0x1f);
7675 if (mask) {
7676 gen_set_psr_im(s, mask, 0, val);
7678 return;
7680 goto illegal_op;
7682 if (cond != 0xe) {
7683 /* if not always execute, we generate a conditional jump to
7684 next instruction */
7685 s->condlabel = gen_new_label();
7686 arm_gen_test_cc(cond ^ 1, s->condlabel);
7687 s->condjmp = 1;
7689 if ((insn & 0x0f900000) == 0x03000000) {
7690 if ((insn & (1 << 21)) == 0) {
7691 ARCH(6T2);
7692 rd = (insn >> 12) & 0xf;
7693 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7694 if ((insn & (1 << 22)) == 0) {
7695 /* MOVW */
7696 tmp = tcg_temp_new_i32();
7697 tcg_gen_movi_i32(tmp, val);
7698 } else {
7699 /* MOVT */
7700 tmp = load_reg(s, rd);
7701 tcg_gen_ext16u_i32(tmp, tmp);
7702 tcg_gen_ori_i32(tmp, tmp, val << 16);
7704 store_reg(s, rd, tmp);
7705 } else {
7706 if (((insn >> 12) & 0xf) != 0xf)
7707 goto illegal_op;
7708 if (((insn >> 16) & 0xf) == 0) {
7709 gen_nop_hint(s, insn & 0xff);
7710 } else {
7711 /* CPSR = immediate */
7712 val = insn & 0xff;
7713 shift = ((insn >> 8) & 0xf) * 2;
7714 if (shift)
7715 val = (val >> shift) | (val << (32 - shift));
7716 i = ((insn & (1 << 22)) != 0);
7717 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
7718 goto illegal_op;
7721 } else if ((insn & 0x0f900000) == 0x01000000
7722 && (insn & 0x00000090) != 0x00000090) {
7723 /* miscellaneous instructions */
7724 op1 = (insn >> 21) & 3;
7725 sh = (insn >> 4) & 0xf;
7726 rm = insn & 0xf;
7727 switch (sh) {
7728 case 0x0: /* move program status register */
7729 if (op1 & 1) {
7730 /* PSR = reg */
7731 tmp = load_reg(s, rm);
7732 i = ((op1 & 2) != 0);
7733 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
7734 goto illegal_op;
7735 } else {
7736 /* reg = PSR */
7737 rd = (insn >> 12) & 0xf;
7738 if (op1 & 2) {
7739 if (IS_USER(s))
7740 goto illegal_op;
7741 tmp = load_cpu_field(spsr);
7742 } else {
7743 tmp = tcg_temp_new_i32();
7744 gen_helper_cpsr_read(tmp, cpu_env);
7746 store_reg(s, rd, tmp);
7748 break;
7749 case 0x1:
7750 if (op1 == 1) {
7751 /* branch/exchange thumb (bx). */
7752 ARCH(4T);
7753 tmp = load_reg(s, rm);
7754 gen_bx(s, tmp);
7755 } else if (op1 == 3) {
7756 /* clz */
7757 ARCH(5);
7758 rd = (insn >> 12) & 0xf;
7759 tmp = load_reg(s, rm);
7760 gen_helper_clz(tmp, tmp);
7761 store_reg(s, rd, tmp);
7762 } else {
7763 goto illegal_op;
7765 break;
7766 case 0x2:
7767 if (op1 == 1) {
7768 ARCH(5J); /* bxj */
7769 /* Trivial implementation equivalent to bx. */
7770 tmp = load_reg(s, rm);
7771 gen_bx(s, tmp);
7772 } else {
7773 goto illegal_op;
7775 break;
7776 case 0x3:
7777 if (op1 != 1)
7778 goto illegal_op;
7780 ARCH(5);
7781 /* branch link/exchange thumb (blx) */
7782 tmp = load_reg(s, rm);
7783 tmp2 = tcg_temp_new_i32();
7784 tcg_gen_movi_i32(tmp2, s->pc);
7785 store_reg(s, 14, tmp2);
7786 gen_bx(s, tmp);
7787 break;
7788 case 0x4:
7790 /* crc32/crc32c */
7791 uint32_t c = extract32(insn, 8, 4);
7793 /* Check this CPU supports ARMv8 CRC instructions.
7794 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7795 * Bits 8, 10 and 11 should be zero.
7797 if (!arm_feature(env, ARM_FEATURE_CRC) || op1 == 0x3 ||
7798 (c & 0xd) != 0) {
7799 goto illegal_op;
7802 rn = extract32(insn, 16, 4);
7803 rd = extract32(insn, 12, 4);
7805 tmp = load_reg(s, rn);
7806 tmp2 = load_reg(s, rm);
7807 if (op1 == 0) {
7808 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
7809 } else if (op1 == 1) {
7810 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
7812 tmp3 = tcg_const_i32(1 << op1);
7813 if (c & 0x2) {
7814 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7815 } else {
7816 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7818 tcg_temp_free_i32(tmp2);
7819 tcg_temp_free_i32(tmp3);
7820 store_reg(s, rd, tmp);
7821 break;
7823 case 0x5: /* saturating add/subtract */
7824 ARCH(5TE);
7825 rd = (insn >> 12) & 0xf;
7826 rn = (insn >> 16) & 0xf;
7827 tmp = load_reg(s, rm);
7828 tmp2 = load_reg(s, rn);
7829 if (op1 & 2)
7830 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
7831 if (op1 & 1)
7832 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
7833 else
7834 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7835 tcg_temp_free_i32(tmp2);
7836 store_reg(s, rd, tmp);
7837 break;
7838 case 7:
7840 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
7841 /* SMC instruction (op1 == 3)
7842 and undefined instructions (op1 == 0 || op1 == 2)
7843 will trap */
7844 if (op1 != 1) {
7845 goto illegal_op;
7847 /* bkpt */
7848 ARCH(5);
7849 gen_exception_insn(s, 4, EXCP_BKPT, syn_aa32_bkpt(imm16, false));
7850 break;
7852 case 0x8: /* signed multiply */
7853 case 0xa:
7854 case 0xc:
7855 case 0xe:
7856 ARCH(5TE);
7857 rs = (insn >> 8) & 0xf;
7858 rn = (insn >> 12) & 0xf;
7859 rd = (insn >> 16) & 0xf;
7860 if (op1 == 1) {
7861 /* (32 * 16) >> 16 */
7862 tmp = load_reg(s, rm);
7863 tmp2 = load_reg(s, rs);
7864 if (sh & 4)
7865 tcg_gen_sari_i32(tmp2, tmp2, 16);
7866 else
7867 gen_sxth(tmp2);
7868 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7869 tcg_gen_shri_i64(tmp64, tmp64, 16);
7870 tmp = tcg_temp_new_i32();
7871 tcg_gen_trunc_i64_i32(tmp, tmp64);
7872 tcg_temp_free_i64(tmp64);
7873 if ((sh & 2) == 0) {
7874 tmp2 = load_reg(s, rn);
7875 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7876 tcg_temp_free_i32(tmp2);
7878 store_reg(s, rd, tmp);
7879 } else {
7880 /* 16 * 16 */
7881 tmp = load_reg(s, rm);
7882 tmp2 = load_reg(s, rs);
7883 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7884 tcg_temp_free_i32(tmp2);
7885 if (op1 == 2) {
7886 tmp64 = tcg_temp_new_i64();
7887 tcg_gen_ext_i32_i64(tmp64, tmp);
7888 tcg_temp_free_i32(tmp);
7889 gen_addq(s, tmp64, rn, rd);
7890 gen_storeq_reg(s, rn, rd, tmp64);
7891 tcg_temp_free_i64(tmp64);
7892 } else {
7893 if (op1 == 0) {
7894 tmp2 = load_reg(s, rn);
7895 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7896 tcg_temp_free_i32(tmp2);
7898 store_reg(s, rd, tmp);
7901 break;
7902 default:
7903 goto illegal_op;
7905 } else if (((insn & 0x0e000000) == 0 &&
7906 (insn & 0x00000090) != 0x90) ||
7907 ((insn & 0x0e000000) == (1 << 25))) {
7908 int set_cc, logic_cc, shiftop;
7910 op1 = (insn >> 21) & 0xf;
7911 set_cc = (insn >> 20) & 1;
7912 logic_cc = table_logic_cc[op1] & set_cc;
7914 /* data processing instruction */
7915 if (insn & (1 << 25)) {
7916 /* immediate operand */
7917 val = insn & 0xff;
7918 shift = ((insn >> 8) & 0xf) * 2;
7919 if (shift) {
7920 val = (val >> shift) | (val << (32 - shift));
7922 tmp2 = tcg_temp_new_i32();
7923 tcg_gen_movi_i32(tmp2, val);
7924 if (logic_cc && shift) {
7925 gen_set_CF_bit31(tmp2);
7927 } else {
7928 /* register */
7929 rm = (insn) & 0xf;
7930 tmp2 = load_reg(s, rm);
7931 shiftop = (insn >> 5) & 3;
7932 if (!(insn & (1 << 4))) {
7933 shift = (insn >> 7) & 0x1f;
7934 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7935 } else {
7936 rs = (insn >> 8) & 0xf;
7937 tmp = load_reg(s, rs);
7938 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
7941 if (op1 != 0x0f && op1 != 0x0d) {
7942 rn = (insn >> 16) & 0xf;
7943 tmp = load_reg(s, rn);
7944 } else {
7945 TCGV_UNUSED_I32(tmp);
7947 rd = (insn >> 12) & 0xf;
7948 switch(op1) {
7949 case 0x00:
7950 tcg_gen_and_i32(tmp, tmp, tmp2);
7951 if (logic_cc) {
7952 gen_logic_CC(tmp);
7954 store_reg_bx(env, s, rd, tmp);
7955 break;
7956 case 0x01:
7957 tcg_gen_xor_i32(tmp, tmp, tmp2);
7958 if (logic_cc) {
7959 gen_logic_CC(tmp);
7961 store_reg_bx(env, s, rd, tmp);
7962 break;
7963 case 0x02:
7964 if (set_cc && rd == 15) {
7965 /* SUBS r15, ... is used for exception return. */
7966 if (IS_USER(s)) {
7967 goto illegal_op;
7969 gen_sub_CC(tmp, tmp, tmp2);
7970 gen_exception_return(s, tmp);
7971 } else {
7972 if (set_cc) {
7973 gen_sub_CC(tmp, tmp, tmp2);
7974 } else {
7975 tcg_gen_sub_i32(tmp, tmp, tmp2);
7977 store_reg_bx(env, s, rd, tmp);
7979 break;
7980 case 0x03:
7981 if (set_cc) {
7982 gen_sub_CC(tmp, tmp2, tmp);
7983 } else {
7984 tcg_gen_sub_i32(tmp, tmp2, tmp);
7986 store_reg_bx(env, s, rd, tmp);
7987 break;
7988 case 0x04:
7989 if (set_cc) {
7990 gen_add_CC(tmp, tmp, tmp2);
7991 } else {
7992 tcg_gen_add_i32(tmp, tmp, tmp2);
7994 store_reg_bx(env, s, rd, tmp);
7995 break;
7996 case 0x05:
7997 if (set_cc) {
7998 gen_adc_CC(tmp, tmp, tmp2);
7999 } else {
8000 gen_add_carry(tmp, tmp, tmp2);
8002 store_reg_bx(env, s, rd, tmp);
8003 break;
8004 case 0x06:
8005 if (set_cc) {
8006 gen_sbc_CC(tmp, tmp, tmp2);
8007 } else {
8008 gen_sub_carry(tmp, tmp, tmp2);
8010 store_reg_bx(env, s, rd, tmp);
8011 break;
8012 case 0x07:
8013 if (set_cc) {
8014 gen_sbc_CC(tmp, tmp2, tmp);
8015 } else {
8016 gen_sub_carry(tmp, tmp2, tmp);
8018 store_reg_bx(env, s, rd, tmp);
8019 break;
8020 case 0x08:
8021 if (set_cc) {
8022 tcg_gen_and_i32(tmp, tmp, tmp2);
8023 gen_logic_CC(tmp);
8025 tcg_temp_free_i32(tmp);
8026 break;
8027 case 0x09:
8028 if (set_cc) {
8029 tcg_gen_xor_i32(tmp, tmp, tmp2);
8030 gen_logic_CC(tmp);
8032 tcg_temp_free_i32(tmp);
8033 break;
8034 case 0x0a:
8035 if (set_cc) {
8036 gen_sub_CC(tmp, tmp, tmp2);
8038 tcg_temp_free_i32(tmp);
8039 break;
8040 case 0x0b:
8041 if (set_cc) {
8042 gen_add_CC(tmp, tmp, tmp2);
8044 tcg_temp_free_i32(tmp);
8045 break;
8046 case 0x0c:
8047 tcg_gen_or_i32(tmp, tmp, tmp2);
8048 if (logic_cc) {
8049 gen_logic_CC(tmp);
8051 store_reg_bx(env, s, rd, tmp);
8052 break;
8053 case 0x0d:
8054 if (logic_cc && rd == 15) {
8055 /* MOVS r15, ... is used for exception return. */
8056 if (IS_USER(s)) {
8057 goto illegal_op;
8059 gen_exception_return(s, tmp2);
8060 } else {
8061 if (logic_cc) {
8062 gen_logic_CC(tmp2);
8064 store_reg_bx(env, s, rd, tmp2);
8066 break;
8067 case 0x0e:
8068 tcg_gen_andc_i32(tmp, tmp, tmp2);
8069 if (logic_cc) {
8070 gen_logic_CC(tmp);
8072 store_reg_bx(env, s, rd, tmp);
8073 break;
8074 default:
8075 case 0x0f:
8076 tcg_gen_not_i32(tmp2, tmp2);
8077 if (logic_cc) {
8078 gen_logic_CC(tmp2);
8080 store_reg_bx(env, s, rd, tmp2);
8081 break;
8083 if (op1 != 0x0f && op1 != 0x0d) {
8084 tcg_temp_free_i32(tmp2);
8086 } else {
8087 /* other instructions */
8088 op1 = (insn >> 24) & 0xf;
8089 switch(op1) {
8090 case 0x0:
8091 case 0x1:
8092 /* multiplies, extra load/stores */
8093 sh = (insn >> 5) & 3;
8094 if (sh == 0) {
8095 if (op1 == 0x0) {
8096 rd = (insn >> 16) & 0xf;
8097 rn = (insn >> 12) & 0xf;
8098 rs = (insn >> 8) & 0xf;
8099 rm = (insn) & 0xf;
8100 op1 = (insn >> 20) & 0xf;
8101 switch (op1) {
8102 case 0: case 1: case 2: case 3: case 6:
8103 /* 32 bit mul */
8104 tmp = load_reg(s, rs);
8105 tmp2 = load_reg(s, rm);
8106 tcg_gen_mul_i32(tmp, tmp, tmp2);
8107 tcg_temp_free_i32(tmp2);
8108 if (insn & (1 << 22)) {
8109 /* Subtract (mls) */
8110 ARCH(6T2);
8111 tmp2 = load_reg(s, rn);
8112 tcg_gen_sub_i32(tmp, tmp2, tmp);
8113 tcg_temp_free_i32(tmp2);
8114 } else if (insn & (1 << 21)) {
8115 /* Add */
8116 tmp2 = load_reg(s, rn);
8117 tcg_gen_add_i32(tmp, tmp, tmp2);
8118 tcg_temp_free_i32(tmp2);
8120 if (insn & (1 << 20))
8121 gen_logic_CC(tmp);
8122 store_reg(s, rd, tmp);
8123 break;
8124 case 4:
8125 /* 64 bit mul double accumulate (UMAAL) */
8126 ARCH(6);
8127 tmp = load_reg(s, rs);
8128 tmp2 = load_reg(s, rm);
8129 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8130 gen_addq_lo(s, tmp64, rn);
8131 gen_addq_lo(s, tmp64, rd);
8132 gen_storeq_reg(s, rn, rd, tmp64);
8133 tcg_temp_free_i64(tmp64);
8134 break;
8135 case 8: case 9: case 10: case 11:
8136 case 12: case 13: case 14: case 15:
8137 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8138 tmp = load_reg(s, rs);
8139 tmp2 = load_reg(s, rm);
8140 if (insn & (1 << 22)) {
8141 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8142 } else {
8143 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8145 if (insn & (1 << 21)) { /* mult accumulate */
8146 TCGv_i32 al = load_reg(s, rn);
8147 TCGv_i32 ah = load_reg(s, rd);
8148 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8149 tcg_temp_free_i32(al);
8150 tcg_temp_free_i32(ah);
8152 if (insn & (1 << 20)) {
8153 gen_logicq_cc(tmp, tmp2);
8155 store_reg(s, rn, tmp);
8156 store_reg(s, rd, tmp2);
8157 break;
8158 default:
8159 goto illegal_op;
8161 } else {
8162 rn = (insn >> 16) & 0xf;
8163 rd = (insn >> 12) & 0xf;
8164 if (insn & (1 << 23)) {
8165 /* load/store exclusive */
8166 int op2 = (insn >> 8) & 3;
8167 op1 = (insn >> 21) & 0x3;
8169 switch (op2) {
8170 case 0: /* lda/stl */
8171 if (op1 == 1) {
8172 goto illegal_op;
8174 ARCH(8);
8175 break;
8176 case 1: /* reserved */
8177 goto illegal_op;
8178 case 2: /* ldaex/stlex */
8179 ARCH(8);
8180 break;
8181 case 3: /* ldrex/strex */
8182 if (op1) {
8183 ARCH(6K);
8184 } else {
8185 ARCH(6);
8187 break;
8190 addr = tcg_temp_local_new_i32();
8191 load_reg_var(s, addr, rn);
8193 /* Since the emulation does not have barriers,
8194 the acquire/release semantics need no special
8195 handling */
8196 if (op2 == 0) {
8197 if (insn & (1 << 20)) {
8198 tmp = tcg_temp_new_i32();
8199 switch (op1) {
8200 case 0: /* lda */
8201 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8202 break;
8203 case 2: /* ldab */
8204 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8205 break;
8206 case 3: /* ldah */
8207 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8208 break;
8209 default:
8210 abort();
8212 store_reg(s, rd, tmp);
8213 } else {
8214 rm = insn & 0xf;
8215 tmp = load_reg(s, rm);
8216 switch (op1) {
8217 case 0: /* stl */
8218 gen_aa32_st32(tmp, addr, get_mem_index(s));
8219 break;
8220 case 2: /* stlb */
8221 gen_aa32_st8(tmp, addr, get_mem_index(s));
8222 break;
8223 case 3: /* stlh */
8224 gen_aa32_st16(tmp, addr, get_mem_index(s));
8225 break;
8226 default:
8227 abort();
8229 tcg_temp_free_i32(tmp);
8231 } else if (insn & (1 << 20)) {
8232 switch (op1) {
8233 case 0: /* ldrex */
8234 gen_load_exclusive(s, rd, 15, addr, 2);
8235 break;
8236 case 1: /* ldrexd */
8237 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8238 break;
8239 case 2: /* ldrexb */
8240 gen_load_exclusive(s, rd, 15, addr, 0);
8241 break;
8242 case 3: /* ldrexh */
8243 gen_load_exclusive(s, rd, 15, addr, 1);
8244 break;
8245 default:
8246 abort();
8248 } else {
8249 rm = insn & 0xf;
8250 switch (op1) {
8251 case 0: /* strex */
8252 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8253 break;
8254 case 1: /* strexd */
8255 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8256 break;
8257 case 2: /* strexb */
8258 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8259 break;
8260 case 3: /* strexh */
8261 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8262 break;
8263 default:
8264 abort();
8267 tcg_temp_free_i32(addr);
8268 } else {
8269 /* SWP instruction */
8270 rm = (insn) & 0xf;
8272 /* ??? This is not really atomic. However we know
8273 we never have multiple CPUs running in parallel,
8274 so it is good enough. */
8275 addr = load_reg(s, rn);
8276 tmp = load_reg(s, rm);
8277 tmp2 = tcg_temp_new_i32();
8278 if (insn & (1 << 22)) {
8279 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8280 gen_aa32_st8(tmp, addr, get_mem_index(s));
8281 } else {
8282 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8283 gen_aa32_st32(tmp, addr, get_mem_index(s));
8285 tcg_temp_free_i32(tmp);
8286 tcg_temp_free_i32(addr);
8287 store_reg(s, rd, tmp2);
8290 } else {
8291 int address_offset;
8292 int load;
8293 /* Misc load/store */
8294 rn = (insn >> 16) & 0xf;
8295 rd = (insn >> 12) & 0xf;
8296 addr = load_reg(s, rn);
8297 if (insn & (1 << 24))
8298 gen_add_datah_offset(s, insn, 0, addr);
8299 address_offset = 0;
8300 if (insn & (1 << 20)) {
8301 /* load */
8302 tmp = tcg_temp_new_i32();
8303 switch(sh) {
8304 case 1:
8305 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8306 break;
8307 case 2:
8308 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
8309 break;
8310 default:
8311 case 3:
8312 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
8313 break;
8315 load = 1;
8316 } else if (sh & 2) {
8317 ARCH(5TE);
8318 /* doubleword */
8319 if (sh & 1) {
8320 /* store */
8321 tmp = load_reg(s, rd);
8322 gen_aa32_st32(tmp, addr, get_mem_index(s));
8323 tcg_temp_free_i32(tmp);
8324 tcg_gen_addi_i32(addr, addr, 4);
8325 tmp = load_reg(s, rd + 1);
8326 gen_aa32_st32(tmp, addr, get_mem_index(s));
8327 tcg_temp_free_i32(tmp);
8328 load = 0;
8329 } else {
8330 /* load */
8331 tmp = tcg_temp_new_i32();
8332 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8333 store_reg(s, rd, tmp);
8334 tcg_gen_addi_i32(addr, addr, 4);
8335 tmp = tcg_temp_new_i32();
8336 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8337 rd++;
8338 load = 1;
8340 address_offset = -4;
8341 } else {
8342 /* store */
8343 tmp = load_reg(s, rd);
8344 gen_aa32_st16(tmp, addr, get_mem_index(s));
8345 tcg_temp_free_i32(tmp);
8346 load = 0;
8348 /* Perform base writeback before the loaded value to
8349 ensure correct behavior with overlapping index registers.
8350 ldrd with base writeback is is undefined if the
8351 destination and index registers overlap. */
8352 if (!(insn & (1 << 24))) {
8353 gen_add_datah_offset(s, insn, address_offset, addr);
8354 store_reg(s, rn, addr);
8355 } else if (insn & (1 << 21)) {
8356 if (address_offset)
8357 tcg_gen_addi_i32(addr, addr, address_offset);
8358 store_reg(s, rn, addr);
8359 } else {
8360 tcg_temp_free_i32(addr);
8362 if (load) {
8363 /* Complete the load. */
8364 store_reg(s, rd, tmp);
8367 break;
8368 case 0x4:
8369 case 0x5:
8370 goto do_ldst;
8371 case 0x6:
8372 case 0x7:
8373 if (insn & (1 << 4)) {
8374 ARCH(6);
8375 /* Armv6 Media instructions. */
8376 rm = insn & 0xf;
8377 rn = (insn >> 16) & 0xf;
8378 rd = (insn >> 12) & 0xf;
8379 rs = (insn >> 8) & 0xf;
8380 switch ((insn >> 23) & 3) {
8381 case 0: /* Parallel add/subtract. */
8382 op1 = (insn >> 20) & 7;
8383 tmp = load_reg(s, rn);
8384 tmp2 = load_reg(s, rm);
8385 sh = (insn >> 5) & 7;
8386 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8387 goto illegal_op;
8388 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
8389 tcg_temp_free_i32(tmp2);
8390 store_reg(s, rd, tmp);
8391 break;
8392 case 1:
8393 if ((insn & 0x00700020) == 0) {
8394 /* Halfword pack. */
8395 tmp = load_reg(s, rn);
8396 tmp2 = load_reg(s, rm);
8397 shift = (insn >> 7) & 0x1f;
8398 if (insn & (1 << 6)) {
8399 /* pkhtb */
8400 if (shift == 0)
8401 shift = 31;
8402 tcg_gen_sari_i32(tmp2, tmp2, shift);
8403 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8404 tcg_gen_ext16u_i32(tmp2, tmp2);
8405 } else {
8406 /* pkhbt */
8407 if (shift)
8408 tcg_gen_shli_i32(tmp2, tmp2, shift);
8409 tcg_gen_ext16u_i32(tmp, tmp);
8410 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8412 tcg_gen_or_i32(tmp, tmp, tmp2);
8413 tcg_temp_free_i32(tmp2);
8414 store_reg(s, rd, tmp);
8415 } else if ((insn & 0x00200020) == 0x00200000) {
8416 /* [us]sat */
8417 tmp = load_reg(s, rm);
8418 shift = (insn >> 7) & 0x1f;
8419 if (insn & (1 << 6)) {
8420 if (shift == 0)
8421 shift = 31;
8422 tcg_gen_sari_i32(tmp, tmp, shift);
8423 } else {
8424 tcg_gen_shli_i32(tmp, tmp, shift);
8426 sh = (insn >> 16) & 0x1f;
8427 tmp2 = tcg_const_i32(sh);
8428 if (insn & (1 << 22))
8429 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8430 else
8431 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8432 tcg_temp_free_i32(tmp2);
8433 store_reg(s, rd, tmp);
8434 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8435 /* [us]sat16 */
8436 tmp = load_reg(s, rm);
8437 sh = (insn >> 16) & 0x1f;
8438 tmp2 = tcg_const_i32(sh);
8439 if (insn & (1 << 22))
8440 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8441 else
8442 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8443 tcg_temp_free_i32(tmp2);
8444 store_reg(s, rd, tmp);
8445 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8446 /* Select bytes. */
8447 tmp = load_reg(s, rn);
8448 tmp2 = load_reg(s, rm);
8449 tmp3 = tcg_temp_new_i32();
8450 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8451 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8452 tcg_temp_free_i32(tmp3);
8453 tcg_temp_free_i32(tmp2);
8454 store_reg(s, rd, tmp);
8455 } else if ((insn & 0x000003e0) == 0x00000060) {
8456 tmp = load_reg(s, rm);
8457 shift = (insn >> 10) & 3;
8458 /* ??? In many cases it's not necessary to do a
8459 rotate, a shift is sufficient. */
8460 if (shift != 0)
8461 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8462 op1 = (insn >> 20) & 7;
8463 switch (op1) {
8464 case 0: gen_sxtb16(tmp); break;
8465 case 2: gen_sxtb(tmp); break;
8466 case 3: gen_sxth(tmp); break;
8467 case 4: gen_uxtb16(tmp); break;
8468 case 6: gen_uxtb(tmp); break;
8469 case 7: gen_uxth(tmp); break;
8470 default: goto illegal_op;
8472 if (rn != 15) {
8473 tmp2 = load_reg(s, rn);
8474 if ((op1 & 3) == 0) {
8475 gen_add16(tmp, tmp2);
8476 } else {
8477 tcg_gen_add_i32(tmp, tmp, tmp2);
8478 tcg_temp_free_i32(tmp2);
8481 store_reg(s, rd, tmp);
8482 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8483 /* rev */
8484 tmp = load_reg(s, rm);
8485 if (insn & (1 << 22)) {
8486 if (insn & (1 << 7)) {
8487 gen_revsh(tmp);
8488 } else {
8489 ARCH(6T2);
8490 gen_helper_rbit(tmp, tmp);
8492 } else {
8493 if (insn & (1 << 7))
8494 gen_rev16(tmp);
8495 else
8496 tcg_gen_bswap32_i32(tmp, tmp);
8498 store_reg(s, rd, tmp);
8499 } else {
8500 goto illegal_op;
8502 break;
8503 case 2: /* Multiplies (Type 3). */
8504 switch ((insn >> 20) & 0x7) {
8505 case 5:
8506 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8507 /* op2 not 00x or 11x : UNDEF */
8508 goto illegal_op;
8510 /* Signed multiply most significant [accumulate].
8511 (SMMUL, SMMLA, SMMLS) */
8512 tmp = load_reg(s, rm);
8513 tmp2 = load_reg(s, rs);
8514 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8516 if (rd != 15) {
8517 tmp = load_reg(s, rd);
8518 if (insn & (1 << 6)) {
8519 tmp64 = gen_subq_msw(tmp64, tmp);
8520 } else {
8521 tmp64 = gen_addq_msw(tmp64, tmp);
8524 if (insn & (1 << 5)) {
8525 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8527 tcg_gen_shri_i64(tmp64, tmp64, 32);
8528 tmp = tcg_temp_new_i32();
8529 tcg_gen_trunc_i64_i32(tmp, tmp64);
8530 tcg_temp_free_i64(tmp64);
8531 store_reg(s, rn, tmp);
8532 break;
8533 case 0:
8534 case 4:
8535 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8536 if (insn & (1 << 7)) {
8537 goto illegal_op;
8539 tmp = load_reg(s, rm);
8540 tmp2 = load_reg(s, rs);
8541 if (insn & (1 << 5))
8542 gen_swap_half(tmp2);
8543 gen_smul_dual(tmp, tmp2);
8544 if (insn & (1 << 22)) {
8545 /* smlald, smlsld */
8546 TCGv_i64 tmp64_2;
8548 tmp64 = tcg_temp_new_i64();
8549 tmp64_2 = tcg_temp_new_i64();
8550 tcg_gen_ext_i32_i64(tmp64, tmp);
8551 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
8552 tcg_temp_free_i32(tmp);
8553 tcg_temp_free_i32(tmp2);
8554 if (insn & (1 << 6)) {
8555 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8556 } else {
8557 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8559 tcg_temp_free_i64(tmp64_2);
8560 gen_addq(s, tmp64, rd, rn);
8561 gen_storeq_reg(s, rd, rn, tmp64);
8562 tcg_temp_free_i64(tmp64);
8563 } else {
8564 /* smuad, smusd, smlad, smlsd */
8565 if (insn & (1 << 6)) {
8566 /* This subtraction cannot overflow. */
8567 tcg_gen_sub_i32(tmp, tmp, tmp2);
8568 } else {
8569 /* This addition cannot overflow 32 bits;
8570 * however it may overflow considered as a
8571 * signed operation, in which case we must set
8572 * the Q flag.
8574 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8576 tcg_temp_free_i32(tmp2);
8577 if (rd != 15)
8579 tmp2 = load_reg(s, rd);
8580 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8581 tcg_temp_free_i32(tmp2);
8583 store_reg(s, rn, tmp);
8585 break;
8586 case 1:
8587 case 3:
8588 /* SDIV, UDIV */
8589 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
8590 goto illegal_op;
8592 if (((insn >> 5) & 7) || (rd != 15)) {
8593 goto illegal_op;
8595 tmp = load_reg(s, rm);
8596 tmp2 = load_reg(s, rs);
8597 if (insn & (1 << 21)) {
8598 gen_helper_udiv(tmp, tmp, tmp2);
8599 } else {
8600 gen_helper_sdiv(tmp, tmp, tmp2);
8602 tcg_temp_free_i32(tmp2);
8603 store_reg(s, rn, tmp);
8604 break;
8605 default:
8606 goto illegal_op;
8608 break;
8609 case 3:
8610 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8611 switch (op1) {
8612 case 0: /* Unsigned sum of absolute differences. */
8613 ARCH(6);
8614 tmp = load_reg(s, rm);
8615 tmp2 = load_reg(s, rs);
8616 gen_helper_usad8(tmp, tmp, tmp2);
8617 tcg_temp_free_i32(tmp2);
8618 if (rd != 15) {
8619 tmp2 = load_reg(s, rd);
8620 tcg_gen_add_i32(tmp, tmp, tmp2);
8621 tcg_temp_free_i32(tmp2);
8623 store_reg(s, rn, tmp);
8624 break;
8625 case 0x20: case 0x24: case 0x28: case 0x2c:
8626 /* Bitfield insert/clear. */
8627 ARCH(6T2);
8628 shift = (insn >> 7) & 0x1f;
8629 i = (insn >> 16) & 0x1f;
8630 i = i + 1 - shift;
8631 if (rm == 15) {
8632 tmp = tcg_temp_new_i32();
8633 tcg_gen_movi_i32(tmp, 0);
8634 } else {
8635 tmp = load_reg(s, rm);
8637 if (i != 32) {
8638 tmp2 = load_reg(s, rd);
8639 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
8640 tcg_temp_free_i32(tmp2);
8642 store_reg(s, rd, tmp);
8643 break;
8644 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8645 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
8646 ARCH(6T2);
8647 tmp = load_reg(s, rm);
8648 shift = (insn >> 7) & 0x1f;
8649 i = ((insn >> 16) & 0x1f) + 1;
8650 if (shift + i > 32)
8651 goto illegal_op;
8652 if (i < 32) {
8653 if (op1 & 0x20) {
8654 gen_ubfx(tmp, shift, (1u << i) - 1);
8655 } else {
8656 gen_sbfx(tmp, shift, i);
8659 store_reg(s, rd, tmp);
8660 break;
8661 default:
8662 goto illegal_op;
8664 break;
8666 break;
8668 do_ldst:
8669 /* Check for undefined extension instructions
8670 * per the ARM Bible IE:
8671 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8673 sh = (0xf << 20) | (0xf << 4);
8674 if (op1 == 0x7 && ((insn & sh) == sh))
8676 goto illegal_op;
8678 /* load/store byte/word */
8679 rn = (insn >> 16) & 0xf;
8680 rd = (insn >> 12) & 0xf;
8681 tmp2 = load_reg(s, rn);
8682 if ((insn & 0x01200000) == 0x00200000) {
8683 /* ldrt/strt */
8684 i = MMU_USER_IDX;
8685 } else {
8686 i = get_mem_index(s);
8688 if (insn & (1 << 24))
8689 gen_add_data_offset(s, insn, tmp2);
8690 if (insn & (1 << 20)) {
8691 /* load */
8692 tmp = tcg_temp_new_i32();
8693 if (insn & (1 << 22)) {
8694 gen_aa32_ld8u(tmp, tmp2, i);
8695 } else {
8696 gen_aa32_ld32u(tmp, tmp2, i);
8698 } else {
8699 /* store */
8700 tmp = load_reg(s, rd);
8701 if (insn & (1 << 22)) {
8702 gen_aa32_st8(tmp, tmp2, i);
8703 } else {
8704 gen_aa32_st32(tmp, tmp2, i);
8706 tcg_temp_free_i32(tmp);
8708 if (!(insn & (1 << 24))) {
8709 gen_add_data_offset(s, insn, tmp2);
8710 store_reg(s, rn, tmp2);
8711 } else if (insn & (1 << 21)) {
8712 store_reg(s, rn, tmp2);
8713 } else {
8714 tcg_temp_free_i32(tmp2);
8716 if (insn & (1 << 20)) {
8717 /* Complete the load. */
8718 store_reg_from_load(env, s, rd, tmp);
8720 break;
8721 case 0x08:
8722 case 0x09:
8724 int j, n, user, loaded_base;
8725 TCGv_i32 loaded_var;
8726 /* load/store multiple words */
8727 /* XXX: store correct base if write back */
8728 user = 0;
8729 if (insn & (1 << 22)) {
8730 if (IS_USER(s))
8731 goto illegal_op; /* only usable in supervisor mode */
8733 if ((insn & (1 << 15)) == 0)
8734 user = 1;
8736 rn = (insn >> 16) & 0xf;
8737 addr = load_reg(s, rn);
8739 /* compute total size */
8740 loaded_base = 0;
8741 TCGV_UNUSED_I32(loaded_var);
8742 n = 0;
8743 for(i=0;i<16;i++) {
8744 if (insn & (1 << i))
8745 n++;
8747 /* XXX: test invalid n == 0 case ? */
8748 if (insn & (1 << 23)) {
8749 if (insn & (1 << 24)) {
8750 /* pre increment */
8751 tcg_gen_addi_i32(addr, addr, 4);
8752 } else {
8753 /* post increment */
8755 } else {
8756 if (insn & (1 << 24)) {
8757 /* pre decrement */
8758 tcg_gen_addi_i32(addr, addr, -(n * 4));
8759 } else {
8760 /* post decrement */
8761 if (n != 1)
8762 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
8765 j = 0;
8766 for(i=0;i<16;i++) {
8767 if (insn & (1 << i)) {
8768 if (insn & (1 << 20)) {
8769 /* load */
8770 tmp = tcg_temp_new_i32();
8771 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8772 if (user) {
8773 tmp2 = tcg_const_i32(i);
8774 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
8775 tcg_temp_free_i32(tmp2);
8776 tcg_temp_free_i32(tmp);
8777 } else if (i == rn) {
8778 loaded_var = tmp;
8779 loaded_base = 1;
8780 } else {
8781 store_reg_from_load(env, s, i, tmp);
8783 } else {
8784 /* store */
8785 if (i == 15) {
8786 /* special case: r15 = PC + 8 */
8787 val = (long)s->pc + 4;
8788 tmp = tcg_temp_new_i32();
8789 tcg_gen_movi_i32(tmp, val);
8790 } else if (user) {
8791 tmp = tcg_temp_new_i32();
8792 tmp2 = tcg_const_i32(i);
8793 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
8794 tcg_temp_free_i32(tmp2);
8795 } else {
8796 tmp = load_reg(s, i);
8798 gen_aa32_st32(tmp, addr, get_mem_index(s));
8799 tcg_temp_free_i32(tmp);
8801 j++;
8802 /* no need to add after the last transfer */
8803 if (j != n)
8804 tcg_gen_addi_i32(addr, addr, 4);
8807 if (insn & (1 << 21)) {
8808 /* write back */
8809 if (insn & (1 << 23)) {
8810 if (insn & (1 << 24)) {
8811 /* pre increment */
8812 } else {
8813 /* post increment */
8814 tcg_gen_addi_i32(addr, addr, 4);
8816 } else {
8817 if (insn & (1 << 24)) {
8818 /* pre decrement */
8819 if (n != 1)
8820 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
8821 } else {
8822 /* post decrement */
8823 tcg_gen_addi_i32(addr, addr, -(n * 4));
8826 store_reg(s, rn, addr);
8827 } else {
8828 tcg_temp_free_i32(addr);
8830 if (loaded_base) {
8831 store_reg(s, rn, loaded_var);
8833 if ((insn & (1 << 22)) && !user) {
8834 /* Restore CPSR from SPSR. */
8835 tmp = load_cpu_field(spsr);
8836 gen_set_cpsr(tmp, 0xffffffff);
8837 tcg_temp_free_i32(tmp);
8838 s->is_jmp = DISAS_UPDATE;
8841 break;
8842 case 0xa:
8843 case 0xb:
8845 int32_t offset;
8847 /* branch (and link) */
8848 val = (int32_t)s->pc;
8849 if (insn & (1 << 24)) {
8850 tmp = tcg_temp_new_i32();
8851 tcg_gen_movi_i32(tmp, val);
8852 store_reg(s, 14, tmp);
8854 offset = sextract32(insn << 2, 0, 26);
8855 val += offset + 4;
8856 gen_jmp(s, val);
8858 break;
8859 case 0xc:
8860 case 0xd:
8861 case 0xe:
8862 if (((insn >> 8) & 0xe) == 10) {
8863 /* VFP. */
8864 if (disas_vfp_insn(env, s, insn)) {
8865 goto illegal_op;
8867 } else if (disas_coproc_insn(env, s, insn)) {
8868 /* Coprocessor. */
8869 goto illegal_op;
8871 break;
8872 case 0xf:
8873 /* swi */
8874 gen_set_pc_im(s, s->pc);
8875 s->svc_imm = extract32(insn, 0, 24);
8876 s->is_jmp = DISAS_SWI;
8877 break;
8878 default:
8879 illegal_op:
8880 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
8881 break;
8886 /* Return true if this is a Thumb-2 logical op. */
8887 static int
8888 thumb2_logic_op(int op)
8890 return (op < 8);
8893 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8894 then set condition code flags based on the result of the operation.
8895 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8896 to the high bit of T1.
8897 Returns zero if the opcode is valid. */
8899 static int
8900 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8901 TCGv_i32 t0, TCGv_i32 t1)
8903 int logic_cc;
8905 logic_cc = 0;
8906 switch (op) {
8907 case 0: /* and */
8908 tcg_gen_and_i32(t0, t0, t1);
8909 logic_cc = conds;
8910 break;
8911 case 1: /* bic */
8912 tcg_gen_andc_i32(t0, t0, t1);
8913 logic_cc = conds;
8914 break;
8915 case 2: /* orr */
8916 tcg_gen_or_i32(t0, t0, t1);
8917 logic_cc = conds;
8918 break;
8919 case 3: /* orn */
8920 tcg_gen_orc_i32(t0, t0, t1);
8921 logic_cc = conds;
8922 break;
8923 case 4: /* eor */
8924 tcg_gen_xor_i32(t0, t0, t1);
8925 logic_cc = conds;
8926 break;
8927 case 8: /* add */
8928 if (conds)
8929 gen_add_CC(t0, t0, t1);
8930 else
8931 tcg_gen_add_i32(t0, t0, t1);
8932 break;
8933 case 10: /* adc */
8934 if (conds)
8935 gen_adc_CC(t0, t0, t1);
8936 else
8937 gen_adc(t0, t1);
8938 break;
8939 case 11: /* sbc */
8940 if (conds) {
8941 gen_sbc_CC(t0, t0, t1);
8942 } else {
8943 gen_sub_carry(t0, t0, t1);
8945 break;
8946 case 13: /* sub */
8947 if (conds)
8948 gen_sub_CC(t0, t0, t1);
8949 else
8950 tcg_gen_sub_i32(t0, t0, t1);
8951 break;
8952 case 14: /* rsb */
8953 if (conds)
8954 gen_sub_CC(t0, t1, t0);
8955 else
8956 tcg_gen_sub_i32(t0, t1, t0);
8957 break;
8958 default: /* 5, 6, 7, 9, 12, 15. */
8959 return 1;
8961 if (logic_cc) {
8962 gen_logic_CC(t0);
8963 if (shifter_out)
8964 gen_set_CF_bit31(t1);
8966 return 0;
8969 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8970 is not legal. */
8971 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
8973 uint32_t insn, imm, shift, offset;
8974 uint32_t rd, rn, rm, rs;
8975 TCGv_i32 tmp;
8976 TCGv_i32 tmp2;
8977 TCGv_i32 tmp3;
8978 TCGv_i32 addr;
8979 TCGv_i64 tmp64;
8980 int op;
8981 int shiftop;
8982 int conds;
8983 int logic_cc;
8985 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8986 || arm_feature (env, ARM_FEATURE_M))) {
8987 /* Thumb-1 cores may need to treat bl and blx as a pair of
8988 16-bit instructions to get correct prefetch abort behavior. */
8989 insn = insn_hw1;
8990 if ((insn & (1 << 12)) == 0) {
8991 ARCH(5);
8992 /* Second half of blx. */
8993 offset = ((insn & 0x7ff) << 1);
8994 tmp = load_reg(s, 14);
8995 tcg_gen_addi_i32(tmp, tmp, offset);
8996 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
8998 tmp2 = tcg_temp_new_i32();
8999 tcg_gen_movi_i32(tmp2, s->pc | 1);
9000 store_reg(s, 14, tmp2);
9001 gen_bx(s, tmp);
9002 return 0;
9004 if (insn & (1 << 11)) {
9005 /* Second half of bl. */
9006 offset = ((insn & 0x7ff) << 1) | 1;
9007 tmp = load_reg(s, 14);
9008 tcg_gen_addi_i32(tmp, tmp, offset);
9010 tmp2 = tcg_temp_new_i32();
9011 tcg_gen_movi_i32(tmp2, s->pc | 1);
9012 store_reg(s, 14, tmp2);
9013 gen_bx(s, tmp);
9014 return 0;
9016 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9017 /* Instruction spans a page boundary. Implement it as two
9018 16-bit instructions in case the second half causes an
9019 prefetch abort. */
9020 offset = ((int32_t)insn << 21) >> 9;
9021 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9022 return 0;
9024 /* Fall through to 32-bit decode. */
9027 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9028 s->pc += 2;
9029 insn |= (uint32_t)insn_hw1 << 16;
9031 if ((insn & 0xf800e800) != 0xf000e800) {
9032 ARCH(6T2);
9035 rn = (insn >> 16) & 0xf;
9036 rs = (insn >> 12) & 0xf;
9037 rd = (insn >> 8) & 0xf;
9038 rm = insn & 0xf;
9039 switch ((insn >> 25) & 0xf) {
9040 case 0: case 1: case 2: case 3:
9041 /* 16-bit instructions. Should never happen. */
9042 abort();
9043 case 4:
9044 if (insn & (1 << 22)) {
9045 /* Other load/store, table branch. */
9046 if (insn & 0x01200000) {
9047 /* Load/store doubleword. */
9048 if (rn == 15) {
9049 addr = tcg_temp_new_i32();
9050 tcg_gen_movi_i32(addr, s->pc & ~3);
9051 } else {
9052 addr = load_reg(s, rn);
9054 offset = (insn & 0xff) * 4;
9055 if ((insn & (1 << 23)) == 0)
9056 offset = -offset;
9057 if (insn & (1 << 24)) {
9058 tcg_gen_addi_i32(addr, addr, offset);
9059 offset = 0;
9061 if (insn & (1 << 20)) {
9062 /* ldrd */
9063 tmp = tcg_temp_new_i32();
9064 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9065 store_reg(s, rs, tmp);
9066 tcg_gen_addi_i32(addr, addr, 4);
9067 tmp = tcg_temp_new_i32();
9068 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9069 store_reg(s, rd, tmp);
9070 } else {
9071 /* strd */
9072 tmp = load_reg(s, rs);
9073 gen_aa32_st32(tmp, addr, get_mem_index(s));
9074 tcg_temp_free_i32(tmp);
9075 tcg_gen_addi_i32(addr, addr, 4);
9076 tmp = load_reg(s, rd);
9077 gen_aa32_st32(tmp, addr, get_mem_index(s));
9078 tcg_temp_free_i32(tmp);
9080 if (insn & (1 << 21)) {
9081 /* Base writeback. */
9082 if (rn == 15)
9083 goto illegal_op;
9084 tcg_gen_addi_i32(addr, addr, offset - 4);
9085 store_reg(s, rn, addr);
9086 } else {
9087 tcg_temp_free_i32(addr);
9089 } else if ((insn & (1 << 23)) == 0) {
9090 /* Load/store exclusive word. */
9091 addr = tcg_temp_local_new_i32();
9092 load_reg_var(s, addr, rn);
9093 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9094 if (insn & (1 << 20)) {
9095 gen_load_exclusive(s, rs, 15, addr, 2);
9096 } else {
9097 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9099 tcg_temp_free_i32(addr);
9100 } else if ((insn & (7 << 5)) == 0) {
9101 /* Table Branch. */
9102 if (rn == 15) {
9103 addr = tcg_temp_new_i32();
9104 tcg_gen_movi_i32(addr, s->pc);
9105 } else {
9106 addr = load_reg(s, rn);
9108 tmp = load_reg(s, rm);
9109 tcg_gen_add_i32(addr, addr, tmp);
9110 if (insn & (1 << 4)) {
9111 /* tbh */
9112 tcg_gen_add_i32(addr, addr, tmp);
9113 tcg_temp_free_i32(tmp);
9114 tmp = tcg_temp_new_i32();
9115 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9116 } else { /* tbb */
9117 tcg_temp_free_i32(tmp);
9118 tmp = tcg_temp_new_i32();
9119 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9121 tcg_temp_free_i32(addr);
9122 tcg_gen_shli_i32(tmp, tmp, 1);
9123 tcg_gen_addi_i32(tmp, tmp, s->pc);
9124 store_reg(s, 15, tmp);
9125 } else {
9126 int op2 = (insn >> 6) & 0x3;
9127 op = (insn >> 4) & 0x3;
9128 switch (op2) {
9129 case 0:
9130 goto illegal_op;
9131 case 1:
9132 /* Load/store exclusive byte/halfword/doubleword */
9133 if (op == 2) {
9134 goto illegal_op;
9136 ARCH(7);
9137 break;
9138 case 2:
9139 /* Load-acquire/store-release */
9140 if (op == 3) {
9141 goto illegal_op;
9143 /* Fall through */
9144 case 3:
9145 /* Load-acquire/store-release exclusive */
9146 ARCH(8);
9147 break;
9149 addr = tcg_temp_local_new_i32();
9150 load_reg_var(s, addr, rn);
9151 if (!(op2 & 1)) {
9152 if (insn & (1 << 20)) {
9153 tmp = tcg_temp_new_i32();
9154 switch (op) {
9155 case 0: /* ldab */
9156 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9157 break;
9158 case 1: /* ldah */
9159 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9160 break;
9161 case 2: /* lda */
9162 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9163 break;
9164 default:
9165 abort();
9167 store_reg(s, rs, tmp);
9168 } else {
9169 tmp = load_reg(s, rs);
9170 switch (op) {
9171 case 0: /* stlb */
9172 gen_aa32_st8(tmp, addr, get_mem_index(s));
9173 break;
9174 case 1: /* stlh */
9175 gen_aa32_st16(tmp, addr, get_mem_index(s));
9176 break;
9177 case 2: /* stl */
9178 gen_aa32_st32(tmp, addr, get_mem_index(s));
9179 break;
9180 default:
9181 abort();
9183 tcg_temp_free_i32(tmp);
9185 } else if (insn & (1 << 20)) {
9186 gen_load_exclusive(s, rs, rd, addr, op);
9187 } else {
9188 gen_store_exclusive(s, rm, rs, rd, addr, op);
9190 tcg_temp_free_i32(addr);
9192 } else {
9193 /* Load/store multiple, RFE, SRS. */
9194 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9195 /* RFE, SRS: not available in user mode or on M profile */
9196 if (IS_USER(s) || IS_M(env)) {
9197 goto illegal_op;
9199 if (insn & (1 << 20)) {
9200 /* rfe */
9201 addr = load_reg(s, rn);
9202 if ((insn & (1 << 24)) == 0)
9203 tcg_gen_addi_i32(addr, addr, -8);
9204 /* Load PC into tmp and CPSR into tmp2. */
9205 tmp = tcg_temp_new_i32();
9206 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9207 tcg_gen_addi_i32(addr, addr, 4);
9208 tmp2 = tcg_temp_new_i32();
9209 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9210 if (insn & (1 << 21)) {
9211 /* Base writeback. */
9212 if (insn & (1 << 24)) {
9213 tcg_gen_addi_i32(addr, addr, 4);
9214 } else {
9215 tcg_gen_addi_i32(addr, addr, -4);
9217 store_reg(s, rn, addr);
9218 } else {
9219 tcg_temp_free_i32(addr);
9221 gen_rfe(s, tmp, tmp2);
9222 } else {
9223 /* srs */
9224 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9225 insn & (1 << 21));
9227 } else {
9228 int i, loaded_base = 0;
9229 TCGv_i32 loaded_var;
9230 /* Load/store multiple. */
9231 addr = load_reg(s, rn);
9232 offset = 0;
9233 for (i = 0; i < 16; i++) {
9234 if (insn & (1 << i))
9235 offset += 4;
9237 if (insn & (1 << 24)) {
9238 tcg_gen_addi_i32(addr, addr, -offset);
9241 TCGV_UNUSED_I32(loaded_var);
9242 for (i = 0; i < 16; i++) {
9243 if ((insn & (1 << i)) == 0)
9244 continue;
9245 if (insn & (1 << 20)) {
9246 /* Load. */
9247 tmp = tcg_temp_new_i32();
9248 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9249 if (i == 15) {
9250 gen_bx(s, tmp);
9251 } else if (i == rn) {
9252 loaded_var = tmp;
9253 loaded_base = 1;
9254 } else {
9255 store_reg(s, i, tmp);
9257 } else {
9258 /* Store. */
9259 tmp = load_reg(s, i);
9260 gen_aa32_st32(tmp, addr, get_mem_index(s));
9261 tcg_temp_free_i32(tmp);
9263 tcg_gen_addi_i32(addr, addr, 4);
9265 if (loaded_base) {
9266 store_reg(s, rn, loaded_var);
9268 if (insn & (1 << 21)) {
9269 /* Base register writeback. */
9270 if (insn & (1 << 24)) {
9271 tcg_gen_addi_i32(addr, addr, -offset);
9273 /* Fault if writeback register is in register list. */
9274 if (insn & (1 << rn))
9275 goto illegal_op;
9276 store_reg(s, rn, addr);
9277 } else {
9278 tcg_temp_free_i32(addr);
9282 break;
9283 case 5:
9285 op = (insn >> 21) & 0xf;
9286 if (op == 6) {
9287 /* Halfword pack. */
9288 tmp = load_reg(s, rn);
9289 tmp2 = load_reg(s, rm);
9290 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9291 if (insn & (1 << 5)) {
9292 /* pkhtb */
9293 if (shift == 0)
9294 shift = 31;
9295 tcg_gen_sari_i32(tmp2, tmp2, shift);
9296 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9297 tcg_gen_ext16u_i32(tmp2, tmp2);
9298 } else {
9299 /* pkhbt */
9300 if (shift)
9301 tcg_gen_shli_i32(tmp2, tmp2, shift);
9302 tcg_gen_ext16u_i32(tmp, tmp);
9303 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9305 tcg_gen_or_i32(tmp, tmp, tmp2);
9306 tcg_temp_free_i32(tmp2);
9307 store_reg(s, rd, tmp);
9308 } else {
9309 /* Data processing register constant shift. */
9310 if (rn == 15) {
9311 tmp = tcg_temp_new_i32();
9312 tcg_gen_movi_i32(tmp, 0);
9313 } else {
9314 tmp = load_reg(s, rn);
9316 tmp2 = load_reg(s, rm);
9318 shiftop = (insn >> 4) & 3;
9319 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9320 conds = (insn & (1 << 20)) != 0;
9321 logic_cc = (conds && thumb2_logic_op(op));
9322 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9323 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9324 goto illegal_op;
9325 tcg_temp_free_i32(tmp2);
9326 if (rd != 15) {
9327 store_reg(s, rd, tmp);
9328 } else {
9329 tcg_temp_free_i32(tmp);
9332 break;
9333 case 13: /* Misc data processing. */
9334 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9335 if (op < 4 && (insn & 0xf000) != 0xf000)
9336 goto illegal_op;
9337 switch (op) {
9338 case 0: /* Register controlled shift. */
9339 tmp = load_reg(s, rn);
9340 tmp2 = load_reg(s, rm);
9341 if ((insn & 0x70) != 0)
9342 goto illegal_op;
9343 op = (insn >> 21) & 3;
9344 logic_cc = (insn & (1 << 20)) != 0;
9345 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9346 if (logic_cc)
9347 gen_logic_CC(tmp);
9348 store_reg_bx(env, s, rd, tmp);
9349 break;
9350 case 1: /* Sign/zero extend. */
9351 tmp = load_reg(s, rm);
9352 shift = (insn >> 4) & 3;
9353 /* ??? In many cases it's not necessary to do a
9354 rotate, a shift is sufficient. */
9355 if (shift != 0)
9356 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9357 op = (insn >> 20) & 7;
9358 switch (op) {
9359 case 0: gen_sxth(tmp); break;
9360 case 1: gen_uxth(tmp); break;
9361 case 2: gen_sxtb16(tmp); break;
9362 case 3: gen_uxtb16(tmp); break;
9363 case 4: gen_sxtb(tmp); break;
9364 case 5: gen_uxtb(tmp); break;
9365 default: goto illegal_op;
9367 if (rn != 15) {
9368 tmp2 = load_reg(s, rn);
9369 if ((op >> 1) == 1) {
9370 gen_add16(tmp, tmp2);
9371 } else {
9372 tcg_gen_add_i32(tmp, tmp, tmp2);
9373 tcg_temp_free_i32(tmp2);
9376 store_reg(s, rd, tmp);
9377 break;
9378 case 2: /* SIMD add/subtract. */
9379 op = (insn >> 20) & 7;
9380 shift = (insn >> 4) & 7;
9381 if ((op & 3) == 3 || (shift & 3) == 3)
9382 goto illegal_op;
9383 tmp = load_reg(s, rn);
9384 tmp2 = load_reg(s, rm);
9385 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
9386 tcg_temp_free_i32(tmp2);
9387 store_reg(s, rd, tmp);
9388 break;
9389 case 3: /* Other data processing. */
9390 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9391 if (op < 4) {
9392 /* Saturating add/subtract. */
9393 tmp = load_reg(s, rn);
9394 tmp2 = load_reg(s, rm);
9395 if (op & 1)
9396 gen_helper_double_saturate(tmp, cpu_env, tmp);
9397 if (op & 2)
9398 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9399 else
9400 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
9401 tcg_temp_free_i32(tmp2);
9402 } else {
9403 tmp = load_reg(s, rn);
9404 switch (op) {
9405 case 0x0a: /* rbit */
9406 gen_helper_rbit(tmp, tmp);
9407 break;
9408 case 0x08: /* rev */
9409 tcg_gen_bswap32_i32(tmp, tmp);
9410 break;
9411 case 0x09: /* rev16 */
9412 gen_rev16(tmp);
9413 break;
9414 case 0x0b: /* revsh */
9415 gen_revsh(tmp);
9416 break;
9417 case 0x10: /* sel */
9418 tmp2 = load_reg(s, rm);
9419 tmp3 = tcg_temp_new_i32();
9420 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9421 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9422 tcg_temp_free_i32(tmp3);
9423 tcg_temp_free_i32(tmp2);
9424 break;
9425 case 0x18: /* clz */
9426 gen_helper_clz(tmp, tmp);
9427 break;
9428 case 0x20:
9429 case 0x21:
9430 case 0x22:
9431 case 0x28:
9432 case 0x29:
9433 case 0x2a:
9435 /* crc32/crc32c */
9436 uint32_t sz = op & 0x3;
9437 uint32_t c = op & 0x8;
9439 if (!arm_feature(env, ARM_FEATURE_CRC)) {
9440 goto illegal_op;
9443 tmp2 = load_reg(s, rm);
9444 if (sz == 0) {
9445 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9446 } else if (sz == 1) {
9447 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9449 tmp3 = tcg_const_i32(1 << sz);
9450 if (c) {
9451 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9452 } else {
9453 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9455 tcg_temp_free_i32(tmp2);
9456 tcg_temp_free_i32(tmp3);
9457 break;
9459 default:
9460 goto illegal_op;
9463 store_reg(s, rd, tmp);
9464 break;
9465 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9466 op = (insn >> 4) & 0xf;
9467 tmp = load_reg(s, rn);
9468 tmp2 = load_reg(s, rm);
9469 switch ((insn >> 20) & 7) {
9470 case 0: /* 32 x 32 -> 32 */
9471 tcg_gen_mul_i32(tmp, tmp, tmp2);
9472 tcg_temp_free_i32(tmp2);
9473 if (rs != 15) {
9474 tmp2 = load_reg(s, rs);
9475 if (op)
9476 tcg_gen_sub_i32(tmp, tmp2, tmp);
9477 else
9478 tcg_gen_add_i32(tmp, tmp, tmp2);
9479 tcg_temp_free_i32(tmp2);
9481 break;
9482 case 1: /* 16 x 16 -> 32 */
9483 gen_mulxy(tmp, tmp2, op & 2, op & 1);
9484 tcg_temp_free_i32(tmp2);
9485 if (rs != 15) {
9486 tmp2 = load_reg(s, rs);
9487 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9488 tcg_temp_free_i32(tmp2);
9490 break;
9491 case 2: /* Dual multiply add. */
9492 case 4: /* Dual multiply subtract. */
9493 if (op)
9494 gen_swap_half(tmp2);
9495 gen_smul_dual(tmp, tmp2);
9496 if (insn & (1 << 22)) {
9497 /* This subtraction cannot overflow. */
9498 tcg_gen_sub_i32(tmp, tmp, tmp2);
9499 } else {
9500 /* This addition cannot overflow 32 bits;
9501 * however it may overflow considered as a signed
9502 * operation, in which case we must set the Q flag.
9504 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9506 tcg_temp_free_i32(tmp2);
9507 if (rs != 15)
9509 tmp2 = load_reg(s, rs);
9510 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9511 tcg_temp_free_i32(tmp2);
9513 break;
9514 case 3: /* 32 * 16 -> 32msb */
9515 if (op)
9516 tcg_gen_sari_i32(tmp2, tmp2, 16);
9517 else
9518 gen_sxth(tmp2);
9519 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9520 tcg_gen_shri_i64(tmp64, tmp64, 16);
9521 tmp = tcg_temp_new_i32();
9522 tcg_gen_trunc_i64_i32(tmp, tmp64);
9523 tcg_temp_free_i64(tmp64);
9524 if (rs != 15)
9526 tmp2 = load_reg(s, rs);
9527 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9528 tcg_temp_free_i32(tmp2);
9530 break;
9531 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9532 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9533 if (rs != 15) {
9534 tmp = load_reg(s, rs);
9535 if (insn & (1 << 20)) {
9536 tmp64 = gen_addq_msw(tmp64, tmp);
9537 } else {
9538 tmp64 = gen_subq_msw(tmp64, tmp);
9541 if (insn & (1 << 4)) {
9542 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9544 tcg_gen_shri_i64(tmp64, tmp64, 32);
9545 tmp = tcg_temp_new_i32();
9546 tcg_gen_trunc_i64_i32(tmp, tmp64);
9547 tcg_temp_free_i64(tmp64);
9548 break;
9549 case 7: /* Unsigned sum of absolute differences. */
9550 gen_helper_usad8(tmp, tmp, tmp2);
9551 tcg_temp_free_i32(tmp2);
9552 if (rs != 15) {
9553 tmp2 = load_reg(s, rs);
9554 tcg_gen_add_i32(tmp, tmp, tmp2);
9555 tcg_temp_free_i32(tmp2);
9557 break;
9559 store_reg(s, rd, tmp);
9560 break;
9561 case 6: case 7: /* 64-bit multiply, Divide. */
9562 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
9563 tmp = load_reg(s, rn);
9564 tmp2 = load_reg(s, rm);
9565 if ((op & 0x50) == 0x10) {
9566 /* sdiv, udiv */
9567 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9568 goto illegal_op;
9570 if (op & 0x20)
9571 gen_helper_udiv(tmp, tmp, tmp2);
9572 else
9573 gen_helper_sdiv(tmp, tmp, tmp2);
9574 tcg_temp_free_i32(tmp2);
9575 store_reg(s, rd, tmp);
9576 } else if ((op & 0xe) == 0xc) {
9577 /* Dual multiply accumulate long. */
9578 if (op & 1)
9579 gen_swap_half(tmp2);
9580 gen_smul_dual(tmp, tmp2);
9581 if (op & 0x10) {
9582 tcg_gen_sub_i32(tmp, tmp, tmp2);
9583 } else {
9584 tcg_gen_add_i32(tmp, tmp, tmp2);
9586 tcg_temp_free_i32(tmp2);
9587 /* BUGFIX */
9588 tmp64 = tcg_temp_new_i64();
9589 tcg_gen_ext_i32_i64(tmp64, tmp);
9590 tcg_temp_free_i32(tmp);
9591 gen_addq(s, tmp64, rs, rd);
9592 gen_storeq_reg(s, rs, rd, tmp64);
9593 tcg_temp_free_i64(tmp64);
9594 } else {
9595 if (op & 0x20) {
9596 /* Unsigned 64-bit multiply */
9597 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9598 } else {
9599 if (op & 8) {
9600 /* smlalxy */
9601 gen_mulxy(tmp, tmp2, op & 2, op & 1);
9602 tcg_temp_free_i32(tmp2);
9603 tmp64 = tcg_temp_new_i64();
9604 tcg_gen_ext_i32_i64(tmp64, tmp);
9605 tcg_temp_free_i32(tmp);
9606 } else {
9607 /* Signed 64-bit multiply */
9608 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9611 if (op & 4) {
9612 /* umaal */
9613 gen_addq_lo(s, tmp64, rs);
9614 gen_addq_lo(s, tmp64, rd);
9615 } else if (op & 0x40) {
9616 /* 64-bit accumulate. */
9617 gen_addq(s, tmp64, rs, rd);
9619 gen_storeq_reg(s, rs, rd, tmp64);
9620 tcg_temp_free_i64(tmp64);
9622 break;
9624 break;
9625 case 6: case 7: case 14: case 15:
9626 /* Coprocessor. */
9627 if (((insn >> 24) & 3) == 3) {
9628 /* Translate into the equivalent ARM encoding. */
9629 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9630 if (disas_neon_data_insn(env, s, insn))
9631 goto illegal_op;
9632 } else if (((insn >> 8) & 0xe) == 10) {
9633 if (disas_vfp_insn(env, s, insn)) {
9634 goto illegal_op;
9636 } else {
9637 if (insn & (1 << 28))
9638 goto illegal_op;
9639 if (disas_coproc_insn (env, s, insn))
9640 goto illegal_op;
9642 break;
9643 case 8: case 9: case 10: case 11:
9644 if (insn & (1 << 15)) {
9645 /* Branches, misc control. */
9646 if (insn & 0x5000) {
9647 /* Unconditional branch. */
9648 /* signextend(hw1[10:0]) -> offset[:12]. */
9649 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9650 /* hw1[10:0] -> offset[11:1]. */
9651 offset |= (insn & 0x7ff) << 1;
9652 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9653 offset[24:22] already have the same value because of the
9654 sign extension above. */
9655 offset ^= ((~insn) & (1 << 13)) << 10;
9656 offset ^= ((~insn) & (1 << 11)) << 11;
9658 if (insn & (1 << 14)) {
9659 /* Branch and link. */
9660 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
9663 offset += s->pc;
9664 if (insn & (1 << 12)) {
9665 /* b/bl */
9666 gen_jmp(s, offset);
9667 } else {
9668 /* blx */
9669 offset &= ~(uint32_t)2;
9670 /* thumb2 bx, no need to check */
9671 gen_bx_im(s, offset);
9673 } else if (((insn >> 23) & 7) == 7) {
9674 /* Misc control */
9675 if (insn & (1 << 13))
9676 goto illegal_op;
9678 if (insn & (1 << 26)) {
9679 /* Secure monitor call (v6Z) */
9680 qemu_log_mask(LOG_UNIMP,
9681 "arm: unimplemented secure monitor call\n");
9682 goto illegal_op; /* not implemented. */
9683 } else {
9684 op = (insn >> 20) & 7;
9685 switch (op) {
9686 case 0: /* msr cpsr. */
9687 if (IS_M(env)) {
9688 tmp = load_reg(s, rn);
9689 addr = tcg_const_i32(insn & 0xff);
9690 gen_helper_v7m_msr(cpu_env, addr, tmp);
9691 tcg_temp_free_i32(addr);
9692 tcg_temp_free_i32(tmp);
9693 gen_lookup_tb(s);
9694 break;
9696 /* fall through */
9697 case 1: /* msr spsr. */
9698 if (IS_M(env))
9699 goto illegal_op;
9700 tmp = load_reg(s, rn);
9701 if (gen_set_psr(s,
9702 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
9703 op == 1, tmp))
9704 goto illegal_op;
9705 break;
9706 case 2: /* cps, nop-hint. */
9707 if (((insn >> 8) & 7) == 0) {
9708 gen_nop_hint(s, insn & 0xff);
9710 /* Implemented as NOP in user mode. */
9711 if (IS_USER(s))
9712 break;
9713 offset = 0;
9714 imm = 0;
9715 if (insn & (1 << 10)) {
9716 if (insn & (1 << 7))
9717 offset |= CPSR_A;
9718 if (insn & (1 << 6))
9719 offset |= CPSR_I;
9720 if (insn & (1 << 5))
9721 offset |= CPSR_F;
9722 if (insn & (1 << 9))
9723 imm = CPSR_A | CPSR_I | CPSR_F;
9725 if (insn & (1 << 8)) {
9726 offset |= 0x1f;
9727 imm |= (insn & 0x1f);
9729 if (offset) {
9730 gen_set_psr_im(s, offset, 0, imm);
9732 break;
9733 case 3: /* Special control operations. */
9734 ARCH(7);
9735 op = (insn >> 4) & 0xf;
9736 switch (op) {
9737 case 2: /* clrex */
9738 gen_clrex(s);
9739 break;
9740 case 4: /* dsb */
9741 case 5: /* dmb */
9742 case 6: /* isb */
9743 /* These execute as NOPs. */
9744 break;
9745 default:
9746 goto illegal_op;
9748 break;
9749 case 4: /* bxj */
9750 /* Trivial implementation equivalent to bx. */
9751 tmp = load_reg(s, rn);
9752 gen_bx(s, tmp);
9753 break;
9754 case 5: /* Exception return. */
9755 if (IS_USER(s)) {
9756 goto illegal_op;
9758 if (rn != 14 || rd != 15) {
9759 goto illegal_op;
9761 tmp = load_reg(s, rn);
9762 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
9763 gen_exception_return(s, tmp);
9764 break;
9765 case 6: /* mrs cpsr. */
9766 tmp = tcg_temp_new_i32();
9767 if (IS_M(env)) {
9768 addr = tcg_const_i32(insn & 0xff);
9769 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9770 tcg_temp_free_i32(addr);
9771 } else {
9772 gen_helper_cpsr_read(tmp, cpu_env);
9774 store_reg(s, rd, tmp);
9775 break;
9776 case 7: /* mrs spsr. */
9777 /* Not accessible in user mode. */
9778 if (IS_USER(s) || IS_M(env))
9779 goto illegal_op;
9780 tmp = load_cpu_field(spsr);
9781 store_reg(s, rd, tmp);
9782 break;
9785 } else {
9786 /* Conditional branch. */
9787 op = (insn >> 22) & 0xf;
9788 /* Generate a conditional jump to next instruction. */
9789 s->condlabel = gen_new_label();
9790 arm_gen_test_cc(op ^ 1, s->condlabel);
9791 s->condjmp = 1;
9793 /* offset[11:1] = insn[10:0] */
9794 offset = (insn & 0x7ff) << 1;
9795 /* offset[17:12] = insn[21:16]. */
9796 offset |= (insn & 0x003f0000) >> 4;
9797 /* offset[31:20] = insn[26]. */
9798 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
9799 /* offset[18] = insn[13]. */
9800 offset |= (insn & (1 << 13)) << 5;
9801 /* offset[19] = insn[11]. */
9802 offset |= (insn & (1 << 11)) << 8;
9804 /* jump to the offset */
9805 gen_jmp(s, s->pc + offset);
9807 } else {
9808 /* Data processing immediate. */
9809 if (insn & (1 << 25)) {
9810 if (insn & (1 << 24)) {
9811 if (insn & (1 << 20))
9812 goto illegal_op;
9813 /* Bitfield/Saturate. */
9814 op = (insn >> 21) & 7;
9815 imm = insn & 0x1f;
9816 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9817 if (rn == 15) {
9818 tmp = tcg_temp_new_i32();
9819 tcg_gen_movi_i32(tmp, 0);
9820 } else {
9821 tmp = load_reg(s, rn);
9823 switch (op) {
9824 case 2: /* Signed bitfield extract. */
9825 imm++;
9826 if (shift + imm > 32)
9827 goto illegal_op;
9828 if (imm < 32)
9829 gen_sbfx(tmp, shift, imm);
9830 break;
9831 case 6: /* Unsigned bitfield extract. */
9832 imm++;
9833 if (shift + imm > 32)
9834 goto illegal_op;
9835 if (imm < 32)
9836 gen_ubfx(tmp, shift, (1u << imm) - 1);
9837 break;
9838 case 3: /* Bitfield insert/clear. */
9839 if (imm < shift)
9840 goto illegal_op;
9841 imm = imm + 1 - shift;
9842 if (imm != 32) {
9843 tmp2 = load_reg(s, rd);
9844 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
9845 tcg_temp_free_i32(tmp2);
9847 break;
9848 case 7:
9849 goto illegal_op;
9850 default: /* Saturate. */
9851 if (shift) {
9852 if (op & 1)
9853 tcg_gen_sari_i32(tmp, tmp, shift);
9854 else
9855 tcg_gen_shli_i32(tmp, tmp, shift);
9857 tmp2 = tcg_const_i32(imm);
9858 if (op & 4) {
9859 /* Unsigned. */
9860 if ((op & 1) && shift == 0)
9861 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9862 else
9863 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
9864 } else {
9865 /* Signed. */
9866 if ((op & 1) && shift == 0)
9867 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9868 else
9869 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
9871 tcg_temp_free_i32(tmp2);
9872 break;
9874 store_reg(s, rd, tmp);
9875 } else {
9876 imm = ((insn & 0x04000000) >> 15)
9877 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9878 if (insn & (1 << 22)) {
9879 /* 16-bit immediate. */
9880 imm |= (insn >> 4) & 0xf000;
9881 if (insn & (1 << 23)) {
9882 /* movt */
9883 tmp = load_reg(s, rd);
9884 tcg_gen_ext16u_i32(tmp, tmp);
9885 tcg_gen_ori_i32(tmp, tmp, imm << 16);
9886 } else {
9887 /* movw */
9888 tmp = tcg_temp_new_i32();
9889 tcg_gen_movi_i32(tmp, imm);
9891 } else {
9892 /* Add/sub 12-bit immediate. */
9893 if (rn == 15) {
9894 offset = s->pc & ~(uint32_t)3;
9895 if (insn & (1 << 23))
9896 offset -= imm;
9897 else
9898 offset += imm;
9899 tmp = tcg_temp_new_i32();
9900 tcg_gen_movi_i32(tmp, offset);
9901 } else {
9902 tmp = load_reg(s, rn);
9903 if (insn & (1 << 23))
9904 tcg_gen_subi_i32(tmp, tmp, imm);
9905 else
9906 tcg_gen_addi_i32(tmp, tmp, imm);
9909 store_reg(s, rd, tmp);
9911 } else {
9912 int shifter_out = 0;
9913 /* modified 12-bit immediate. */
9914 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9915 imm = (insn & 0xff);
9916 switch (shift) {
9917 case 0: /* XY */
9918 /* Nothing to do. */
9919 break;
9920 case 1: /* 00XY00XY */
9921 imm |= imm << 16;
9922 break;
9923 case 2: /* XY00XY00 */
9924 imm |= imm << 16;
9925 imm <<= 8;
9926 break;
9927 case 3: /* XYXYXYXY */
9928 imm |= imm << 16;
9929 imm |= imm << 8;
9930 break;
9931 default: /* Rotated constant. */
9932 shift = (shift << 1) | (imm >> 7);
9933 imm |= 0x80;
9934 imm = imm << (32 - shift);
9935 shifter_out = 1;
9936 break;
9938 tmp2 = tcg_temp_new_i32();
9939 tcg_gen_movi_i32(tmp2, imm);
9940 rn = (insn >> 16) & 0xf;
9941 if (rn == 15) {
9942 tmp = tcg_temp_new_i32();
9943 tcg_gen_movi_i32(tmp, 0);
9944 } else {
9945 tmp = load_reg(s, rn);
9947 op = (insn >> 21) & 0xf;
9948 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
9949 shifter_out, tmp, tmp2))
9950 goto illegal_op;
9951 tcg_temp_free_i32(tmp2);
9952 rd = (insn >> 8) & 0xf;
9953 if (rd != 15) {
9954 store_reg(s, rd, tmp);
9955 } else {
9956 tcg_temp_free_i32(tmp);
9960 break;
9961 case 12: /* Load/store single data item. */
9963 int postinc = 0;
9964 int writeback = 0;
9965 int memidx;
9966 if ((insn & 0x01100000) == 0x01000000) {
9967 if (disas_neon_ls_insn(env, s, insn))
9968 goto illegal_op;
9969 break;
9971 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9972 if (rs == 15) {
9973 if (!(insn & (1 << 20))) {
9974 goto illegal_op;
9976 if (op != 2) {
9977 /* Byte or halfword load space with dest == r15 : memory hints.
9978 * Catch them early so we don't emit pointless addressing code.
9979 * This space is a mix of:
9980 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9981 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9982 * cores)
9983 * unallocated hints, which must be treated as NOPs
9984 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9985 * which is easiest for the decoding logic
9986 * Some space which must UNDEF
9988 int op1 = (insn >> 23) & 3;
9989 int op2 = (insn >> 6) & 0x3f;
9990 if (op & 2) {
9991 goto illegal_op;
9993 if (rn == 15) {
9994 /* UNPREDICTABLE, unallocated hint or
9995 * PLD/PLDW/PLI (literal)
9997 return 0;
9999 if (op1 & 1) {
10000 return 0; /* PLD/PLDW/PLI or unallocated hint */
10002 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10003 return 0; /* PLD/PLDW/PLI or unallocated hint */
10005 /* UNDEF space, or an UNPREDICTABLE */
10006 return 1;
10009 memidx = get_mem_index(s);
10010 if (rn == 15) {
10011 addr = tcg_temp_new_i32();
10012 /* PC relative. */
10013 /* s->pc has already been incremented by 4. */
10014 imm = s->pc & 0xfffffffc;
10015 if (insn & (1 << 23))
10016 imm += insn & 0xfff;
10017 else
10018 imm -= insn & 0xfff;
10019 tcg_gen_movi_i32(addr, imm);
10020 } else {
10021 addr = load_reg(s, rn);
10022 if (insn & (1 << 23)) {
10023 /* Positive offset. */
10024 imm = insn & 0xfff;
10025 tcg_gen_addi_i32(addr, addr, imm);
10026 } else {
10027 imm = insn & 0xff;
10028 switch ((insn >> 8) & 0xf) {
10029 case 0x0: /* Shifted Register. */
10030 shift = (insn >> 4) & 0xf;
10031 if (shift > 3) {
10032 tcg_temp_free_i32(addr);
10033 goto illegal_op;
10035 tmp = load_reg(s, rm);
10036 if (shift)
10037 tcg_gen_shli_i32(tmp, tmp, shift);
10038 tcg_gen_add_i32(addr, addr, tmp);
10039 tcg_temp_free_i32(tmp);
10040 break;
10041 case 0xc: /* Negative offset. */
10042 tcg_gen_addi_i32(addr, addr, -imm);
10043 break;
10044 case 0xe: /* User privilege. */
10045 tcg_gen_addi_i32(addr, addr, imm);
10046 memidx = MMU_USER_IDX;
10047 break;
10048 case 0x9: /* Post-decrement. */
10049 imm = -imm;
10050 /* Fall through. */
10051 case 0xb: /* Post-increment. */
10052 postinc = 1;
10053 writeback = 1;
10054 break;
10055 case 0xd: /* Pre-decrement. */
10056 imm = -imm;
10057 /* Fall through. */
10058 case 0xf: /* Pre-increment. */
10059 tcg_gen_addi_i32(addr, addr, imm);
10060 writeback = 1;
10061 break;
10062 default:
10063 tcg_temp_free_i32(addr);
10064 goto illegal_op;
10068 if (insn & (1 << 20)) {
10069 /* Load. */
10070 tmp = tcg_temp_new_i32();
10071 switch (op) {
10072 case 0:
10073 gen_aa32_ld8u(tmp, addr, memidx);
10074 break;
10075 case 4:
10076 gen_aa32_ld8s(tmp, addr, memidx);
10077 break;
10078 case 1:
10079 gen_aa32_ld16u(tmp, addr, memidx);
10080 break;
10081 case 5:
10082 gen_aa32_ld16s(tmp, addr, memidx);
10083 break;
10084 case 2:
10085 gen_aa32_ld32u(tmp, addr, memidx);
10086 break;
10087 default:
10088 tcg_temp_free_i32(tmp);
10089 tcg_temp_free_i32(addr);
10090 goto illegal_op;
10092 if (rs == 15) {
10093 gen_bx(s, tmp);
10094 } else {
10095 store_reg(s, rs, tmp);
10097 } else {
10098 /* Store. */
10099 tmp = load_reg(s, rs);
10100 switch (op) {
10101 case 0:
10102 gen_aa32_st8(tmp, addr, memidx);
10103 break;
10104 case 1:
10105 gen_aa32_st16(tmp, addr, memidx);
10106 break;
10107 case 2:
10108 gen_aa32_st32(tmp, addr, memidx);
10109 break;
10110 default:
10111 tcg_temp_free_i32(tmp);
10112 tcg_temp_free_i32(addr);
10113 goto illegal_op;
10115 tcg_temp_free_i32(tmp);
10117 if (postinc)
10118 tcg_gen_addi_i32(addr, addr, imm);
10119 if (writeback) {
10120 store_reg(s, rn, addr);
10121 } else {
10122 tcg_temp_free_i32(addr);
10125 break;
10126 default:
10127 goto illegal_op;
10129 return 0;
10130 illegal_op:
10131 return 1;
10134 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
10136 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10137 int32_t offset;
10138 int i;
10139 TCGv_i32 tmp;
10140 TCGv_i32 tmp2;
10141 TCGv_i32 addr;
10143 if (s->condexec_mask) {
10144 cond = s->condexec_cond;
10145 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10146 s->condlabel = gen_new_label();
10147 arm_gen_test_cc(cond ^ 1, s->condlabel);
10148 s->condjmp = 1;
10152 insn = arm_lduw_code(env, s->pc, s->bswap_code);
10153 s->pc += 2;
10155 switch (insn >> 12) {
10156 case 0: case 1:
10158 rd = insn & 7;
10159 op = (insn >> 11) & 3;
10160 if (op == 3) {
10161 /* add/subtract */
10162 rn = (insn >> 3) & 7;
10163 tmp = load_reg(s, rn);
10164 if (insn & (1 << 10)) {
10165 /* immediate */
10166 tmp2 = tcg_temp_new_i32();
10167 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
10168 } else {
10169 /* reg */
10170 rm = (insn >> 6) & 7;
10171 tmp2 = load_reg(s, rm);
10173 if (insn & (1 << 9)) {
10174 if (s->condexec_mask)
10175 tcg_gen_sub_i32(tmp, tmp, tmp2);
10176 else
10177 gen_sub_CC(tmp, tmp, tmp2);
10178 } else {
10179 if (s->condexec_mask)
10180 tcg_gen_add_i32(tmp, tmp, tmp2);
10181 else
10182 gen_add_CC(tmp, tmp, tmp2);
10184 tcg_temp_free_i32(tmp2);
10185 store_reg(s, rd, tmp);
10186 } else {
10187 /* shift immediate */
10188 rm = (insn >> 3) & 7;
10189 shift = (insn >> 6) & 0x1f;
10190 tmp = load_reg(s, rm);
10191 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10192 if (!s->condexec_mask)
10193 gen_logic_CC(tmp);
10194 store_reg(s, rd, tmp);
10196 break;
10197 case 2: case 3:
10198 /* arithmetic large immediate */
10199 op = (insn >> 11) & 3;
10200 rd = (insn >> 8) & 0x7;
10201 if (op == 0) { /* mov */
10202 tmp = tcg_temp_new_i32();
10203 tcg_gen_movi_i32(tmp, insn & 0xff);
10204 if (!s->condexec_mask)
10205 gen_logic_CC(tmp);
10206 store_reg(s, rd, tmp);
10207 } else {
10208 tmp = load_reg(s, rd);
10209 tmp2 = tcg_temp_new_i32();
10210 tcg_gen_movi_i32(tmp2, insn & 0xff);
10211 switch (op) {
10212 case 1: /* cmp */
10213 gen_sub_CC(tmp, tmp, tmp2);
10214 tcg_temp_free_i32(tmp);
10215 tcg_temp_free_i32(tmp2);
10216 break;
10217 case 2: /* add */
10218 if (s->condexec_mask)
10219 tcg_gen_add_i32(tmp, tmp, tmp2);
10220 else
10221 gen_add_CC(tmp, tmp, tmp2);
10222 tcg_temp_free_i32(tmp2);
10223 store_reg(s, rd, tmp);
10224 break;
10225 case 3: /* sub */
10226 if (s->condexec_mask)
10227 tcg_gen_sub_i32(tmp, tmp, tmp2);
10228 else
10229 gen_sub_CC(tmp, tmp, tmp2);
10230 tcg_temp_free_i32(tmp2);
10231 store_reg(s, rd, tmp);
10232 break;
10235 break;
10236 case 4:
10237 if (insn & (1 << 11)) {
10238 rd = (insn >> 8) & 7;
10239 /* load pc-relative. Bit 1 of PC is ignored. */
10240 val = s->pc + 2 + ((insn & 0xff) * 4);
10241 val &= ~(uint32_t)2;
10242 addr = tcg_temp_new_i32();
10243 tcg_gen_movi_i32(addr, val);
10244 tmp = tcg_temp_new_i32();
10245 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10246 tcg_temp_free_i32(addr);
10247 store_reg(s, rd, tmp);
10248 break;
10250 if (insn & (1 << 10)) {
10251 /* data processing extended or blx */
10252 rd = (insn & 7) | ((insn >> 4) & 8);
10253 rm = (insn >> 3) & 0xf;
10254 op = (insn >> 8) & 3;
10255 switch (op) {
10256 case 0: /* add */
10257 tmp = load_reg(s, rd);
10258 tmp2 = load_reg(s, rm);
10259 tcg_gen_add_i32(tmp, tmp, tmp2);
10260 tcg_temp_free_i32(tmp2);
10261 store_reg(s, rd, tmp);
10262 break;
10263 case 1: /* cmp */
10264 tmp = load_reg(s, rd);
10265 tmp2 = load_reg(s, rm);
10266 gen_sub_CC(tmp, tmp, tmp2);
10267 tcg_temp_free_i32(tmp2);
10268 tcg_temp_free_i32(tmp);
10269 break;
10270 case 2: /* mov/cpy */
10271 tmp = load_reg(s, rm);
10272 store_reg(s, rd, tmp);
10273 break;
10274 case 3:/* branch [and link] exchange thumb register */
10275 tmp = load_reg(s, rm);
10276 if (insn & (1 << 7)) {
10277 ARCH(5);
10278 val = (uint32_t)s->pc | 1;
10279 tmp2 = tcg_temp_new_i32();
10280 tcg_gen_movi_i32(tmp2, val);
10281 store_reg(s, 14, tmp2);
10283 /* already thumb, no need to check */
10284 gen_bx(s, tmp);
10285 break;
10287 break;
10290 /* data processing register */
10291 rd = insn & 7;
10292 rm = (insn >> 3) & 7;
10293 op = (insn >> 6) & 0xf;
10294 if (op == 2 || op == 3 || op == 4 || op == 7) {
10295 /* the shift/rotate ops want the operands backwards */
10296 val = rm;
10297 rm = rd;
10298 rd = val;
10299 val = 1;
10300 } else {
10301 val = 0;
10304 if (op == 9) { /* neg */
10305 tmp = tcg_temp_new_i32();
10306 tcg_gen_movi_i32(tmp, 0);
10307 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10308 tmp = load_reg(s, rd);
10309 } else {
10310 TCGV_UNUSED_I32(tmp);
10313 tmp2 = load_reg(s, rm);
10314 switch (op) {
10315 case 0x0: /* and */
10316 tcg_gen_and_i32(tmp, tmp, tmp2);
10317 if (!s->condexec_mask)
10318 gen_logic_CC(tmp);
10319 break;
10320 case 0x1: /* eor */
10321 tcg_gen_xor_i32(tmp, tmp, tmp2);
10322 if (!s->condexec_mask)
10323 gen_logic_CC(tmp);
10324 break;
10325 case 0x2: /* lsl */
10326 if (s->condexec_mask) {
10327 gen_shl(tmp2, tmp2, tmp);
10328 } else {
10329 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
10330 gen_logic_CC(tmp2);
10332 break;
10333 case 0x3: /* lsr */
10334 if (s->condexec_mask) {
10335 gen_shr(tmp2, tmp2, tmp);
10336 } else {
10337 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
10338 gen_logic_CC(tmp2);
10340 break;
10341 case 0x4: /* asr */
10342 if (s->condexec_mask) {
10343 gen_sar(tmp2, tmp2, tmp);
10344 } else {
10345 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
10346 gen_logic_CC(tmp2);
10348 break;
10349 case 0x5: /* adc */
10350 if (s->condexec_mask) {
10351 gen_adc(tmp, tmp2);
10352 } else {
10353 gen_adc_CC(tmp, tmp, tmp2);
10355 break;
10356 case 0x6: /* sbc */
10357 if (s->condexec_mask) {
10358 gen_sub_carry(tmp, tmp, tmp2);
10359 } else {
10360 gen_sbc_CC(tmp, tmp, tmp2);
10362 break;
10363 case 0x7: /* ror */
10364 if (s->condexec_mask) {
10365 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10366 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
10367 } else {
10368 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
10369 gen_logic_CC(tmp2);
10371 break;
10372 case 0x8: /* tst */
10373 tcg_gen_and_i32(tmp, tmp, tmp2);
10374 gen_logic_CC(tmp);
10375 rd = 16;
10376 break;
10377 case 0x9: /* neg */
10378 if (s->condexec_mask)
10379 tcg_gen_neg_i32(tmp, tmp2);
10380 else
10381 gen_sub_CC(tmp, tmp, tmp2);
10382 break;
10383 case 0xa: /* cmp */
10384 gen_sub_CC(tmp, tmp, tmp2);
10385 rd = 16;
10386 break;
10387 case 0xb: /* cmn */
10388 gen_add_CC(tmp, tmp, tmp2);
10389 rd = 16;
10390 break;
10391 case 0xc: /* orr */
10392 tcg_gen_or_i32(tmp, tmp, tmp2);
10393 if (!s->condexec_mask)
10394 gen_logic_CC(tmp);
10395 break;
10396 case 0xd: /* mul */
10397 tcg_gen_mul_i32(tmp, tmp, tmp2);
10398 if (!s->condexec_mask)
10399 gen_logic_CC(tmp);
10400 break;
10401 case 0xe: /* bic */
10402 tcg_gen_andc_i32(tmp, tmp, tmp2);
10403 if (!s->condexec_mask)
10404 gen_logic_CC(tmp);
10405 break;
10406 case 0xf: /* mvn */
10407 tcg_gen_not_i32(tmp2, tmp2);
10408 if (!s->condexec_mask)
10409 gen_logic_CC(tmp2);
10410 val = 1;
10411 rm = rd;
10412 break;
10414 if (rd != 16) {
10415 if (val) {
10416 store_reg(s, rm, tmp2);
10417 if (op != 0xf)
10418 tcg_temp_free_i32(tmp);
10419 } else {
10420 store_reg(s, rd, tmp);
10421 tcg_temp_free_i32(tmp2);
10423 } else {
10424 tcg_temp_free_i32(tmp);
10425 tcg_temp_free_i32(tmp2);
10427 break;
10429 case 5:
10430 /* load/store register offset. */
10431 rd = insn & 7;
10432 rn = (insn >> 3) & 7;
10433 rm = (insn >> 6) & 7;
10434 op = (insn >> 9) & 7;
10435 addr = load_reg(s, rn);
10436 tmp = load_reg(s, rm);
10437 tcg_gen_add_i32(addr, addr, tmp);
10438 tcg_temp_free_i32(tmp);
10440 if (op < 3) { /* store */
10441 tmp = load_reg(s, rd);
10442 } else {
10443 tmp = tcg_temp_new_i32();
10446 switch (op) {
10447 case 0: /* str */
10448 gen_aa32_st32(tmp, addr, get_mem_index(s));
10449 break;
10450 case 1: /* strh */
10451 gen_aa32_st16(tmp, addr, get_mem_index(s));
10452 break;
10453 case 2: /* strb */
10454 gen_aa32_st8(tmp, addr, get_mem_index(s));
10455 break;
10456 case 3: /* ldrsb */
10457 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
10458 break;
10459 case 4: /* ldr */
10460 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10461 break;
10462 case 5: /* ldrh */
10463 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
10464 break;
10465 case 6: /* ldrb */
10466 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
10467 break;
10468 case 7: /* ldrsh */
10469 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
10470 break;
10472 if (op >= 3) { /* load */
10473 store_reg(s, rd, tmp);
10474 } else {
10475 tcg_temp_free_i32(tmp);
10477 tcg_temp_free_i32(addr);
10478 break;
10480 case 6:
10481 /* load/store word immediate offset */
10482 rd = insn & 7;
10483 rn = (insn >> 3) & 7;
10484 addr = load_reg(s, rn);
10485 val = (insn >> 4) & 0x7c;
10486 tcg_gen_addi_i32(addr, addr, val);
10488 if (insn & (1 << 11)) {
10489 /* load */
10490 tmp = tcg_temp_new_i32();
10491 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10492 store_reg(s, rd, tmp);
10493 } else {
10494 /* store */
10495 tmp = load_reg(s, rd);
10496 gen_aa32_st32(tmp, addr, get_mem_index(s));
10497 tcg_temp_free_i32(tmp);
10499 tcg_temp_free_i32(addr);
10500 break;
10502 case 7:
10503 /* load/store byte immediate offset */
10504 rd = insn & 7;
10505 rn = (insn >> 3) & 7;
10506 addr = load_reg(s, rn);
10507 val = (insn >> 6) & 0x1f;
10508 tcg_gen_addi_i32(addr, addr, val);
10510 if (insn & (1 << 11)) {
10511 /* load */
10512 tmp = tcg_temp_new_i32();
10513 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
10514 store_reg(s, rd, tmp);
10515 } else {
10516 /* store */
10517 tmp = load_reg(s, rd);
10518 gen_aa32_st8(tmp, addr, get_mem_index(s));
10519 tcg_temp_free_i32(tmp);
10521 tcg_temp_free_i32(addr);
10522 break;
10524 case 8:
10525 /* load/store halfword immediate offset */
10526 rd = insn & 7;
10527 rn = (insn >> 3) & 7;
10528 addr = load_reg(s, rn);
10529 val = (insn >> 5) & 0x3e;
10530 tcg_gen_addi_i32(addr, addr, val);
10532 if (insn & (1 << 11)) {
10533 /* load */
10534 tmp = tcg_temp_new_i32();
10535 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
10536 store_reg(s, rd, tmp);
10537 } else {
10538 /* store */
10539 tmp = load_reg(s, rd);
10540 gen_aa32_st16(tmp, addr, get_mem_index(s));
10541 tcg_temp_free_i32(tmp);
10543 tcg_temp_free_i32(addr);
10544 break;
10546 case 9:
10547 /* load/store from stack */
10548 rd = (insn >> 8) & 7;
10549 addr = load_reg(s, 13);
10550 val = (insn & 0xff) * 4;
10551 tcg_gen_addi_i32(addr, addr, val);
10553 if (insn & (1 << 11)) {
10554 /* load */
10555 tmp = tcg_temp_new_i32();
10556 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10557 store_reg(s, rd, tmp);
10558 } else {
10559 /* store */
10560 tmp = load_reg(s, rd);
10561 gen_aa32_st32(tmp, addr, get_mem_index(s));
10562 tcg_temp_free_i32(tmp);
10564 tcg_temp_free_i32(addr);
10565 break;
10567 case 10:
10568 /* add to high reg */
10569 rd = (insn >> 8) & 7;
10570 if (insn & (1 << 11)) {
10571 /* SP */
10572 tmp = load_reg(s, 13);
10573 } else {
10574 /* PC. bit 1 is ignored. */
10575 tmp = tcg_temp_new_i32();
10576 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
10578 val = (insn & 0xff) * 4;
10579 tcg_gen_addi_i32(tmp, tmp, val);
10580 store_reg(s, rd, tmp);
10581 break;
10583 case 11:
10584 /* misc */
10585 op = (insn >> 8) & 0xf;
10586 switch (op) {
10587 case 0:
10588 /* adjust stack pointer */
10589 tmp = load_reg(s, 13);
10590 val = (insn & 0x7f) * 4;
10591 if (insn & (1 << 7))
10592 val = -(int32_t)val;
10593 tcg_gen_addi_i32(tmp, tmp, val);
10594 store_reg(s, 13, tmp);
10595 break;
10597 case 2: /* sign/zero extend. */
10598 ARCH(6);
10599 rd = insn & 7;
10600 rm = (insn >> 3) & 7;
10601 tmp = load_reg(s, rm);
10602 switch ((insn >> 6) & 3) {
10603 case 0: gen_sxth(tmp); break;
10604 case 1: gen_sxtb(tmp); break;
10605 case 2: gen_uxth(tmp); break;
10606 case 3: gen_uxtb(tmp); break;
10608 store_reg(s, rd, tmp);
10609 break;
10610 case 4: case 5: case 0xc: case 0xd:
10611 /* push/pop */
10612 addr = load_reg(s, 13);
10613 if (insn & (1 << 8))
10614 offset = 4;
10615 else
10616 offset = 0;
10617 for (i = 0; i < 8; i++) {
10618 if (insn & (1 << i))
10619 offset += 4;
10621 if ((insn & (1 << 11)) == 0) {
10622 tcg_gen_addi_i32(addr, addr, -offset);
10624 for (i = 0; i < 8; i++) {
10625 if (insn & (1 << i)) {
10626 if (insn & (1 << 11)) {
10627 /* pop */
10628 tmp = tcg_temp_new_i32();
10629 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10630 store_reg(s, i, tmp);
10631 } else {
10632 /* push */
10633 tmp = load_reg(s, i);
10634 gen_aa32_st32(tmp, addr, get_mem_index(s));
10635 tcg_temp_free_i32(tmp);
10637 /* advance to the next address. */
10638 tcg_gen_addi_i32(addr, addr, 4);
10641 TCGV_UNUSED_I32(tmp);
10642 if (insn & (1 << 8)) {
10643 if (insn & (1 << 11)) {
10644 /* pop pc */
10645 tmp = tcg_temp_new_i32();
10646 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10647 /* don't set the pc until the rest of the instruction
10648 has completed */
10649 } else {
10650 /* push lr */
10651 tmp = load_reg(s, 14);
10652 gen_aa32_st32(tmp, addr, get_mem_index(s));
10653 tcg_temp_free_i32(tmp);
10655 tcg_gen_addi_i32(addr, addr, 4);
10657 if ((insn & (1 << 11)) == 0) {
10658 tcg_gen_addi_i32(addr, addr, -offset);
10660 /* write back the new stack pointer */
10661 store_reg(s, 13, addr);
10662 /* set the new PC value */
10663 if ((insn & 0x0900) == 0x0900) {
10664 store_reg_from_load(env, s, 15, tmp);
10666 break;
10668 case 1: case 3: case 9: case 11: /* czb */
10669 rm = insn & 7;
10670 tmp = load_reg(s, rm);
10671 s->condlabel = gen_new_label();
10672 s->condjmp = 1;
10673 if (insn & (1 << 11))
10674 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
10675 else
10676 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
10677 tcg_temp_free_i32(tmp);
10678 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10679 val = (uint32_t)s->pc + 2;
10680 val += offset;
10681 gen_jmp(s, val);
10682 break;
10684 case 15: /* IT, nop-hint. */
10685 if ((insn & 0xf) == 0) {
10686 gen_nop_hint(s, (insn >> 4) & 0xf);
10687 break;
10689 /* If Then. */
10690 s->condexec_cond = (insn >> 4) & 0xe;
10691 s->condexec_mask = insn & 0x1f;
10692 /* No actual code generated for this insn, just setup state. */
10693 break;
10695 case 0xe: /* bkpt */
10697 int imm8 = extract32(insn, 0, 8);
10698 ARCH(5);
10699 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
10700 break;
10703 case 0xa: /* rev */
10704 ARCH(6);
10705 rn = (insn >> 3) & 0x7;
10706 rd = insn & 0x7;
10707 tmp = load_reg(s, rn);
10708 switch ((insn >> 6) & 3) {
10709 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
10710 case 1: gen_rev16(tmp); break;
10711 case 3: gen_revsh(tmp); break;
10712 default: goto illegal_op;
10714 store_reg(s, rd, tmp);
10715 break;
10717 case 6:
10718 switch ((insn >> 5) & 7) {
10719 case 2:
10720 /* setend */
10721 ARCH(6);
10722 if (((insn >> 3) & 1) != s->bswap_code) {
10723 /* Dynamic endianness switching not implemented. */
10724 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
10725 goto illegal_op;
10727 break;
10728 case 3:
10729 /* cps */
10730 ARCH(6);
10731 if (IS_USER(s)) {
10732 break;
10734 if (IS_M(env)) {
10735 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
10736 /* FAULTMASK */
10737 if (insn & 1) {
10738 addr = tcg_const_i32(19);
10739 gen_helper_v7m_msr(cpu_env, addr, tmp);
10740 tcg_temp_free_i32(addr);
10742 /* PRIMASK */
10743 if (insn & 2) {
10744 addr = tcg_const_i32(16);
10745 gen_helper_v7m_msr(cpu_env, addr, tmp);
10746 tcg_temp_free_i32(addr);
10748 tcg_temp_free_i32(tmp);
10749 gen_lookup_tb(s);
10750 } else {
10751 if (insn & (1 << 4)) {
10752 shift = CPSR_A | CPSR_I | CPSR_F;
10753 } else {
10754 shift = 0;
10756 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
10758 break;
10759 default:
10760 goto undef;
10762 break;
10764 default:
10765 goto undef;
10767 break;
10769 case 12:
10771 /* load/store multiple */
10772 TCGv_i32 loaded_var;
10773 TCGV_UNUSED_I32(loaded_var);
10774 rn = (insn >> 8) & 0x7;
10775 addr = load_reg(s, rn);
10776 for (i = 0; i < 8; i++) {
10777 if (insn & (1 << i)) {
10778 if (insn & (1 << 11)) {
10779 /* load */
10780 tmp = tcg_temp_new_i32();
10781 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10782 if (i == rn) {
10783 loaded_var = tmp;
10784 } else {
10785 store_reg(s, i, tmp);
10787 } else {
10788 /* store */
10789 tmp = load_reg(s, i);
10790 gen_aa32_st32(tmp, addr, get_mem_index(s));
10791 tcg_temp_free_i32(tmp);
10793 /* advance to the next address */
10794 tcg_gen_addi_i32(addr, addr, 4);
10797 if ((insn & (1 << rn)) == 0) {
10798 /* base reg not in list: base register writeback */
10799 store_reg(s, rn, addr);
10800 } else {
10801 /* base reg in list: if load, complete it now */
10802 if (insn & (1 << 11)) {
10803 store_reg(s, rn, loaded_var);
10805 tcg_temp_free_i32(addr);
10807 break;
10809 case 13:
10810 /* conditional branch or swi */
10811 cond = (insn >> 8) & 0xf;
10812 if (cond == 0xe)
10813 goto undef;
10815 if (cond == 0xf) {
10816 /* swi */
10817 gen_set_pc_im(s, s->pc);
10818 s->svc_imm = extract32(insn, 0, 8);
10819 s->is_jmp = DISAS_SWI;
10820 break;
10822 /* generate a conditional jump to next instruction */
10823 s->condlabel = gen_new_label();
10824 arm_gen_test_cc(cond ^ 1, s->condlabel);
10825 s->condjmp = 1;
10827 /* jump to the offset */
10828 val = (uint32_t)s->pc + 2;
10829 offset = ((int32_t)insn << 24) >> 24;
10830 val += offset << 1;
10831 gen_jmp(s, val);
10832 break;
10834 case 14:
10835 if (insn & (1 << 11)) {
10836 if (disas_thumb2_insn(env, s, insn))
10837 goto undef32;
10838 break;
10840 /* unconditional branch */
10841 val = (uint32_t)s->pc;
10842 offset = ((int32_t)insn << 21) >> 21;
10843 val += (offset << 1) + 2;
10844 gen_jmp(s, val);
10845 break;
10847 case 15:
10848 if (disas_thumb2_insn(env, s, insn))
10849 goto undef32;
10850 break;
10852 return;
10853 undef32:
10854 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
10855 return;
10856 illegal_op:
10857 undef:
10858 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
10861 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10862 basic block 'tb'. If search_pc is TRUE, also generate PC
10863 information for each intermediate instruction. */
10864 static inline void gen_intermediate_code_internal(ARMCPU *cpu,
10865 TranslationBlock *tb,
10866 bool search_pc)
10868 CPUState *cs = CPU(cpu);
10869 CPUARMState *env = &cpu->env;
10870 DisasContext dc1, *dc = &dc1;
10871 CPUBreakpoint *bp;
10872 uint16_t *gen_opc_end;
10873 int j, lj;
10874 target_ulong pc_start;
10875 target_ulong next_page_start;
10876 int num_insns;
10877 int max_insns;
10879 /* generate intermediate code */
10881 /* The A64 decoder has its own top level loop, because it doesn't need
10882 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
10884 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
10885 gen_intermediate_code_internal_a64(cpu, tb, search_pc);
10886 return;
10889 pc_start = tb->pc;
10891 dc->tb = tb;
10893 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
10895 dc->is_jmp = DISAS_NEXT;
10896 dc->pc = pc_start;
10897 dc->singlestep_enabled = cs->singlestep_enabled;
10898 dc->condjmp = 0;
10900 dc->aarch64 = 0;
10901 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
10902 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
10903 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
10904 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
10905 #if !defined(CONFIG_USER_ONLY)
10906 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
10907 #endif
10908 dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
10909 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
10910 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10911 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
10912 dc->cp_regs = cpu->cp_regs;
10913 dc->current_pl = arm_current_pl(env);
10914 dc->features = env->features;
10916 cpu_F0s = tcg_temp_new_i32();
10917 cpu_F1s = tcg_temp_new_i32();
10918 cpu_F0d = tcg_temp_new_i64();
10919 cpu_F1d = tcg_temp_new_i64();
10920 cpu_V0 = cpu_F0d;
10921 cpu_V1 = cpu_F1d;
10922 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
10923 cpu_M0 = tcg_temp_new_i64();
10924 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
10925 lj = -1;
10926 num_insns = 0;
10927 max_insns = tb->cflags & CF_COUNT_MASK;
10928 if (max_insns == 0)
10929 max_insns = CF_COUNT_MASK;
10931 gen_tb_start();
10933 tcg_clear_temp_count();
10935 /* A note on handling of the condexec (IT) bits:
10937 * We want to avoid the overhead of having to write the updated condexec
10938 * bits back to the CPUARMState for every instruction in an IT block. So:
10939 * (1) if the condexec bits are not already zero then we write
10940 * zero back into the CPUARMState now. This avoids complications trying
10941 * to do it at the end of the block. (For example if we don't do this
10942 * it's hard to identify whether we can safely skip writing condexec
10943 * at the end of the TB, which we definitely want to do for the case
10944 * where a TB doesn't do anything with the IT state at all.)
10945 * (2) if we are going to leave the TB then we call gen_set_condexec()
10946 * which will write the correct value into CPUARMState if zero is wrong.
10947 * This is done both for leaving the TB at the end, and for leaving
10948 * it because of an exception we know will happen, which is done in
10949 * gen_exception_insn(). The latter is necessary because we need to
10950 * leave the TB with the PC/IT state just prior to execution of the
10951 * instruction which caused the exception.
10952 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
10953 * then the CPUARMState will be wrong and we need to reset it.
10954 * This is handled in the same way as restoration of the
10955 * PC in these situations: we will be called again with search_pc=1
10956 * and generate a mapping of the condexec bits for each PC in
10957 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10958 * this to restore the condexec bits.
10960 * Note that there are no instructions which can read the condexec
10961 * bits, and none which can write non-static values to them, so
10962 * we don't need to care about whether CPUARMState is correct in the
10963 * middle of a TB.
10966 /* Reset the conditional execution bits immediately. This avoids
10967 complications trying to do it at the end of the block. */
10968 if (dc->condexec_mask || dc->condexec_cond)
10970 TCGv_i32 tmp = tcg_temp_new_i32();
10971 tcg_gen_movi_i32(tmp, 0);
10972 store_cpu_field(tmp, condexec_bits);
10974 do {
10975 #ifdef CONFIG_USER_ONLY
10976 /* Intercept jump to the magic kernel page. */
10977 if (dc->pc >= 0xffff0000) {
10978 /* We always get here via a jump, so know we are not in a
10979 conditional execution block. */
10980 gen_exception_internal(EXCP_KERNEL_TRAP);
10981 dc->is_jmp = DISAS_UPDATE;
10982 break;
10984 #else
10985 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10986 /* We always get here via a jump, so know we are not in a
10987 conditional execution block. */
10988 gen_exception_internal(EXCP_EXCEPTION_EXIT);
10989 dc->is_jmp = DISAS_UPDATE;
10990 break;
10992 #endif
10994 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
10995 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
10996 if (bp->pc == dc->pc) {
10997 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
10998 /* Advance PC so that clearing the breakpoint will
10999 invalidate this TB. */
11000 dc->pc += 2;
11001 goto done_generating;
11005 if (search_pc) {
11006 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
11007 if (lj < j) {
11008 lj++;
11009 while (lj < j)
11010 tcg_ctx.gen_opc_instr_start[lj++] = 0;
11012 tcg_ctx.gen_opc_pc[lj] = dc->pc;
11013 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
11014 tcg_ctx.gen_opc_instr_start[lj] = 1;
11015 tcg_ctx.gen_opc_icount[lj] = num_insns;
11018 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
11019 gen_io_start();
11021 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
11022 tcg_gen_debug_insn_start(dc->pc);
11025 if (dc->thumb) {
11026 disas_thumb_insn(env, dc);
11027 if (dc->condexec_mask) {
11028 dc->condexec_cond = (dc->condexec_cond & 0xe)
11029 | ((dc->condexec_mask >> 4) & 1);
11030 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11031 if (dc->condexec_mask == 0) {
11032 dc->condexec_cond = 0;
11035 } else {
11036 disas_arm_insn(env, dc);
11039 if (dc->condjmp && !dc->is_jmp) {
11040 gen_set_label(dc->condlabel);
11041 dc->condjmp = 0;
11044 if (tcg_check_temp_count()) {
11045 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11046 dc->pc);
11049 /* Translation stops when a conditional branch is encountered.
11050 * Otherwise the subsequent code could get translated several times.
11051 * Also stop translation when a page boundary is reached. This
11052 * ensures prefetch aborts occur at the right place. */
11053 num_insns ++;
11054 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
11055 !cs->singlestep_enabled &&
11056 !singlestep &&
11057 dc->pc < next_page_start &&
11058 num_insns < max_insns);
11060 if (tb->cflags & CF_LAST_IO) {
11061 if (dc->condjmp) {
11062 /* FIXME: This can theoretically happen with self-modifying
11063 code. */
11064 cpu_abort(cs, "IO on conditional branch instruction");
11066 gen_io_end();
11069 /* At this stage dc->condjmp will only be set when the skipped
11070 instruction was a conditional branch or trap, and the PC has
11071 already been written. */
11072 if (unlikely(cs->singlestep_enabled)) {
11073 /* Make sure the pc is updated, and raise a debug exception. */
11074 if (dc->condjmp) {
11075 gen_set_condexec(dc);
11076 if (dc->is_jmp == DISAS_SWI) {
11077 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
11078 } else {
11079 gen_exception_internal(EXCP_DEBUG);
11081 gen_set_label(dc->condlabel);
11083 if (dc->condjmp || !dc->is_jmp) {
11084 gen_set_pc_im(dc, dc->pc);
11085 dc->condjmp = 0;
11087 gen_set_condexec(dc);
11088 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
11089 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
11090 } else {
11091 /* FIXME: Single stepping a WFI insn will not halt
11092 the CPU. */
11093 gen_exception_internal(EXCP_DEBUG);
11095 } else {
11096 /* While branches must always occur at the end of an IT block,
11097 there are a few other things that can cause us to terminate
11098 the TB in the middle of an IT block:
11099 - Exception generating instructions (bkpt, swi, undefined).
11100 - Page boundaries.
11101 - Hardware watchpoints.
11102 Hardware breakpoints have already been handled and skip this code.
11104 gen_set_condexec(dc);
11105 switch(dc->is_jmp) {
11106 case DISAS_NEXT:
11107 gen_goto_tb(dc, 1, dc->pc);
11108 break;
11109 default:
11110 case DISAS_JUMP:
11111 case DISAS_UPDATE:
11112 /* indicate that the hash table must be used to find the next TB */
11113 tcg_gen_exit_tb(0);
11114 break;
11115 case DISAS_TB_JUMP:
11116 /* nothing more to generate */
11117 break;
11118 case DISAS_WFI:
11119 gen_helper_wfi(cpu_env);
11120 break;
11121 case DISAS_WFE:
11122 gen_helper_wfe(cpu_env);
11123 break;
11124 case DISAS_SWI:
11125 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
11126 break;
11128 if (dc->condjmp) {
11129 gen_set_label(dc->condlabel);
11130 gen_set_condexec(dc);
11131 gen_goto_tb(dc, 1, dc->pc);
11132 dc->condjmp = 0;
11136 done_generating:
11137 gen_tb_end(tb, num_insns);
11138 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
11140 #ifdef DEBUG_DISAS
11141 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
11142 qemu_log("----------------\n");
11143 qemu_log("IN: %s\n", lookup_symbol(pc_start));
11144 log_target_disas(env, pc_start, dc->pc - pc_start,
11145 dc->thumb | (dc->bswap_code << 1));
11146 qemu_log("\n");
11148 #endif
11149 if (search_pc) {
11150 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
11151 lj++;
11152 while (lj <= j)
11153 tcg_ctx.gen_opc_instr_start[lj++] = 0;
11154 } else {
11155 tb->size = dc->pc - pc_start;
11156 tb->icount = num_insns;
11160 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
11162 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
11165 void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
11167 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
11170 static const char *cpu_mode_names[16] = {
11171 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11172 "???", "???", "hyp", "und", "???", "???", "???", "sys"
11175 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11176 int flags)
11178 ARMCPU *cpu = ARM_CPU(cs);
11179 CPUARMState *env = &cpu->env;
11180 int i;
11181 uint32_t psr;
11183 if (is_a64(env)) {
11184 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11185 return;
11188 for(i=0;i<16;i++) {
11189 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
11190 if ((i % 4) == 3)
11191 cpu_fprintf(f, "\n");
11192 else
11193 cpu_fprintf(f, " ");
11195 psr = cpsr_read(env);
11196 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
11197 psr,
11198 psr & (1 << 31) ? 'N' : '-',
11199 psr & (1 << 30) ? 'Z' : '-',
11200 psr & (1 << 29) ? 'C' : '-',
11201 psr & (1 << 28) ? 'V' : '-',
11202 psr & CPSR_T ? 'T' : 'A',
11203 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
11205 if (flags & CPU_DUMP_FPU) {
11206 int numvfpregs = 0;
11207 if (arm_feature(env, ARM_FEATURE_VFP)) {
11208 numvfpregs += 16;
11210 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11211 numvfpregs += 16;
11213 for (i = 0; i < numvfpregs; i++) {
11214 uint64_t v = float64_val(env->vfp.regs[i]);
11215 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11216 i * 2, (uint32_t)v,
11217 i * 2 + 1, (uint32_t)(v >> 32),
11218 i, v);
11220 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
11224 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
11226 if (is_a64(env)) {
11227 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
11228 env->condexec_bits = 0;
11229 } else {
11230 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
11231 env->condexec_bits = gen_opc_condexec_bits[pc_pos];