target-ppc: Fix invalid SPR read/write warnings
[qemu/agraf.git] / target-arm / translate.c
bloba1b7b8c1a8fc07d68b4413d9076839c6d8fc7765
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "tcg-op.h"
30 #include "qemu/log.h"
32 #include "helper.h"
33 #define GEN_HELPER 1
34 #include "helper.h"
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48 /* internal defines */
49 typedef struct DisasContext {
50 target_ulong pc;
51 int is_jmp;
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
56 /* Thumb-2 conditional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
59 struct TranslationBlock *tb;
60 int singlestep_enabled;
61 int thumb;
62 int bswap_code;
63 #if !defined(CONFIG_USER_ONLY)
64 int user;
65 #endif
66 int vfp_enabled;
67 int vec_len;
68 int vec_stride;
69 } DisasContext;
71 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
73 #if defined(CONFIG_USER_ONLY)
74 #define IS_USER(s) 1
75 #else
76 #define IS_USER(s) (s->user)
77 #endif
79 /* These instructions trap after executing, so defer them until after the
80 conditional execution state has been updated. */
81 #define DISAS_WFI 4
82 #define DISAS_SWI 5
84 static TCGv_ptr cpu_env;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
87 static TCGv_i32 cpu_R[16];
88 static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
89 static TCGv_i32 cpu_exclusive_addr;
90 static TCGv_i32 cpu_exclusive_val;
91 static TCGv_i32 cpu_exclusive_high;
92 #ifdef CONFIG_USER_ONLY
93 static TCGv_i32 cpu_exclusive_test;
94 static TCGv_i32 cpu_exclusive_info;
95 #endif
97 /* FIXME: These should be removed. */
98 static TCGv cpu_F0s, cpu_F1s;
99 static TCGv_i64 cpu_F0d, cpu_F1d;
101 #include "exec/gen-icount.h"
103 static const char *regnames[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
107 /* initialize TCG globals. */
108 void arm_translate_init(void)
110 int i;
112 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
114 for (i = 0; i < 16; i++) {
115 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
116 offsetof(CPUARMState, regs[i]),
117 regnames[i]);
119 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
120 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
121 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
122 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
124 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
126 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUARMState, exclusive_val), "exclusive_val");
128 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
129 offsetof(CPUARMState, exclusive_high), "exclusive_high");
130 #ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
132 offsetof(CPUARMState, exclusive_test), "exclusive_test");
133 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUARMState, exclusive_info), "exclusive_info");
135 #endif
137 #define GEN_HELPER 2
138 #include "helper.h"
141 static inline TCGv load_cpu_offset(int offset)
143 TCGv tmp = tcg_temp_new_i32();
144 tcg_gen_ld_i32(tmp, cpu_env, offset);
145 return tmp;
148 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
150 static inline void store_cpu_offset(TCGv var, int offset)
152 tcg_gen_st_i32(var, cpu_env, offset);
153 tcg_temp_free_i32(var);
156 #define store_cpu_field(var, name) \
157 store_cpu_offset(var, offsetof(CPUARMState, name))
159 /* Set a variable to the value of a CPU register. */
160 static void load_reg_var(DisasContext *s, TCGv var, int reg)
162 if (reg == 15) {
163 uint32_t addr;
164 /* normally, since we updated PC, we need only to add one insn */
165 if (s->thumb)
166 addr = (long)s->pc + 2;
167 else
168 addr = (long)s->pc + 4;
169 tcg_gen_movi_i32(var, addr);
170 } else {
171 tcg_gen_mov_i32(var, cpu_R[reg]);
175 /* Create a new temporary and set it to the value of a CPU register. */
176 static inline TCGv load_reg(DisasContext *s, int reg)
178 TCGv tmp = tcg_temp_new_i32();
179 load_reg_var(s, tmp, reg);
180 return tmp;
183 /* Set a CPU register. The source must be a temporary and will be
184 marked as dead. */
185 static void store_reg(DisasContext *s, int reg, TCGv var)
187 if (reg == 15) {
188 tcg_gen_andi_i32(var, var, ~1);
189 s->is_jmp = DISAS_JUMP;
191 tcg_gen_mov_i32(cpu_R[reg], var);
192 tcg_temp_free_i32(var);
195 /* Value extensions. */
196 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
198 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
205 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
207 TCGv tmp_mask = tcg_const_i32(mask);
208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
209 tcg_temp_free_i32(tmp_mask);
211 /* Set NZCV flags from the high 4 bits of var. */
212 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214 static void gen_exception(int excp)
216 TCGv tmp = tcg_temp_new_i32();
217 tcg_gen_movi_i32(tmp, excp);
218 gen_helper_exception(cpu_env, tmp);
219 tcg_temp_free_i32(tmp);
222 static void gen_smul_dual(TCGv a, TCGv b)
224 TCGv tmp1 = tcg_temp_new_i32();
225 TCGv tmp2 = tcg_temp_new_i32();
226 tcg_gen_ext16s_i32(tmp1, a);
227 tcg_gen_ext16s_i32(tmp2, b);
228 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
229 tcg_temp_free_i32(tmp2);
230 tcg_gen_sari_i32(a, a, 16);
231 tcg_gen_sari_i32(b, b, 16);
232 tcg_gen_mul_i32(b, b, a);
233 tcg_gen_mov_i32(a, tmp1);
234 tcg_temp_free_i32(tmp1);
237 /* Byteswap each halfword. */
238 static void gen_rev16(TCGv var)
240 TCGv tmp = tcg_temp_new_i32();
241 tcg_gen_shri_i32(tmp, var, 8);
242 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
243 tcg_gen_shli_i32(var, var, 8);
244 tcg_gen_andi_i32(var, var, 0xff00ff00);
245 tcg_gen_or_i32(var, var, tmp);
246 tcg_temp_free_i32(tmp);
249 /* Byteswap low halfword and sign extend. */
250 static void gen_revsh(TCGv var)
252 tcg_gen_ext16u_i32(var, var);
253 tcg_gen_bswap16_i32(var, var);
254 tcg_gen_ext16s_i32(var, var);
257 /* Unsigned bitfield extract. */
258 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
260 if (shift)
261 tcg_gen_shri_i32(var, var, shift);
262 tcg_gen_andi_i32(var, var, mask);
265 /* Signed bitfield extract. */
266 static void gen_sbfx(TCGv var, int shift, int width)
268 uint32_t signbit;
270 if (shift)
271 tcg_gen_sari_i32(var, var, shift);
272 if (shift + width < 32) {
273 signbit = 1u << (width - 1);
274 tcg_gen_andi_i32(var, var, (1u << width) - 1);
275 tcg_gen_xori_i32(var, var, signbit);
276 tcg_gen_subi_i32(var, var, signbit);
280 /* Return (b << 32) + a. Mark inputs as dead */
281 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
283 TCGv_i64 tmp64 = tcg_temp_new_i64();
285 tcg_gen_extu_i32_i64(tmp64, b);
286 tcg_temp_free_i32(b);
287 tcg_gen_shli_i64(tmp64, tmp64, 32);
288 tcg_gen_add_i64(a, tmp64, a);
290 tcg_temp_free_i64(tmp64);
291 return a;
294 /* Return (b << 32) - a. Mark inputs as dead. */
295 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
297 TCGv_i64 tmp64 = tcg_temp_new_i64();
299 tcg_gen_extu_i32_i64(tmp64, b);
300 tcg_temp_free_i32(b);
301 tcg_gen_shli_i64(tmp64, tmp64, 32);
302 tcg_gen_sub_i64(a, tmp64, a);
304 tcg_temp_free_i64(tmp64);
305 return a;
308 /* 32x32->64 multiply. Marks inputs as dead. */
309 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
311 TCGv lo = tcg_temp_new_i32();
312 TCGv hi = tcg_temp_new_i32();
313 TCGv_i64 ret;
315 tcg_gen_mulu2_i32(lo, hi, a, b);
316 tcg_temp_free_i32(a);
317 tcg_temp_free_i32(b);
319 ret = tcg_temp_new_i64();
320 tcg_gen_concat_i32_i64(ret, lo, hi);
321 tcg_temp_free(lo);
322 tcg_temp_free(hi);
324 return ret;
327 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
329 TCGv lo = tcg_temp_new_i32();
330 TCGv hi = tcg_temp_new_i32();
331 TCGv_i64 ret;
333 tcg_gen_muls2_i32(lo, hi, a, b);
334 tcg_temp_free_i32(a);
335 tcg_temp_free_i32(b);
337 ret = tcg_temp_new_i64();
338 tcg_gen_concat_i32_i64(ret, lo, hi);
339 tcg_temp_free(lo);
340 tcg_temp_free(hi);
342 return ret;
345 /* Swap low and high halfwords. */
346 static void gen_swap_half(TCGv var)
348 TCGv tmp = tcg_temp_new_i32();
349 tcg_gen_shri_i32(tmp, var, 16);
350 tcg_gen_shli_i32(var, var, 16);
351 tcg_gen_or_i32(var, var, tmp);
352 tcg_temp_free_i32(tmp);
355 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
356 tmp = (t0 ^ t1) & 0x8000;
357 t0 &= ~0x8000;
358 t1 &= ~0x8000;
359 t0 = (t0 + t1) ^ tmp;
362 static void gen_add16(TCGv t0, TCGv t1)
364 TCGv tmp = tcg_temp_new_i32();
365 tcg_gen_xor_i32(tmp, t0, t1);
366 tcg_gen_andi_i32(tmp, tmp, 0x8000);
367 tcg_gen_andi_i32(t0, t0, ~0x8000);
368 tcg_gen_andi_i32(t1, t1, ~0x8000);
369 tcg_gen_add_i32(t0, t0, t1);
370 tcg_gen_xor_i32(t0, t0, tmp);
371 tcg_temp_free_i32(tmp);
372 tcg_temp_free_i32(t1);
375 /* Set CF to the top bit of var. */
376 static void gen_set_CF_bit31(TCGv var)
378 tcg_gen_shri_i32(cpu_CF, var, 31);
381 /* Set N and Z flags from var. */
382 static inline void gen_logic_CC(TCGv var)
384 tcg_gen_mov_i32(cpu_NF, var);
385 tcg_gen_mov_i32(cpu_ZF, var);
388 /* T0 += T1 + CF. */
389 static void gen_adc(TCGv t0, TCGv t1)
391 tcg_gen_add_i32(t0, t0, t1);
392 tcg_gen_add_i32(t0, t0, cpu_CF);
395 /* dest = T0 + T1 + CF. */
396 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
398 tcg_gen_add_i32(dest, t0, t1);
399 tcg_gen_add_i32(dest, dest, cpu_CF);
402 /* dest = T0 - T1 + CF - 1. */
403 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
405 tcg_gen_sub_i32(dest, t0, t1);
406 tcg_gen_add_i32(dest, dest, cpu_CF);
407 tcg_gen_subi_i32(dest, dest, 1);
410 /* dest = T0 + T1. Compute C, N, V and Z flags */
411 static void gen_add_CC(TCGv dest, TCGv t0, TCGv t1)
413 TCGv tmp = tcg_temp_new_i32();
414 tcg_gen_movi_i32(tmp, 0);
415 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
416 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
417 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
418 tcg_gen_xor_i32(tmp, t0, t1);
419 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
420 tcg_temp_free_i32(tmp);
421 tcg_gen_mov_i32(dest, cpu_NF);
424 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
425 static void gen_adc_CC(TCGv dest, TCGv t0, TCGv t1)
427 TCGv tmp = tcg_temp_new_i32();
428 if (TCG_TARGET_HAS_add2_i32) {
429 tcg_gen_movi_i32(tmp, 0);
430 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
431 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
432 } else {
433 TCGv_i64 q0 = tcg_temp_new_i64();
434 TCGv_i64 q1 = tcg_temp_new_i64();
435 tcg_gen_extu_i32_i64(q0, t0);
436 tcg_gen_extu_i32_i64(q1, t1);
437 tcg_gen_add_i64(q0, q0, q1);
438 tcg_gen_extu_i32_i64(q1, cpu_CF);
439 tcg_gen_add_i64(q0, q0, q1);
440 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
441 tcg_temp_free_i64(q0);
442 tcg_temp_free_i64(q1);
444 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
445 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
446 tcg_gen_xor_i32(tmp, t0, t1);
447 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
448 tcg_temp_free_i32(tmp);
449 tcg_gen_mov_i32(dest, cpu_NF);
452 /* dest = T0 - T1. Compute C, N, V and Z flags */
453 static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1)
455 TCGv tmp;
456 tcg_gen_sub_i32(cpu_NF, t0, t1);
457 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
458 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
459 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
460 tmp = tcg_temp_new_i32();
461 tcg_gen_xor_i32(tmp, t0, t1);
462 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
463 tcg_temp_free_i32(tmp);
464 tcg_gen_mov_i32(dest, cpu_NF);
467 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
468 static void gen_sbc_CC(TCGv dest, TCGv t0, TCGv t1)
470 TCGv tmp = tcg_temp_new_i32();
471 tcg_gen_not_i32(tmp, t1);
472 gen_adc_CC(dest, t0, tmp);
473 tcg_temp_free(tmp);
476 #define GEN_SHIFT(name) \
477 static void gen_##name(TCGv dest, TCGv t0, TCGv t1) \
479 TCGv tmp1, tmp2, tmp3; \
480 tmp1 = tcg_temp_new_i32(); \
481 tcg_gen_andi_i32(tmp1, t1, 0xff); \
482 tmp2 = tcg_const_i32(0); \
483 tmp3 = tcg_const_i32(0x1f); \
484 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
485 tcg_temp_free_i32(tmp3); \
486 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
487 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
488 tcg_temp_free_i32(tmp2); \
489 tcg_temp_free_i32(tmp1); \
491 GEN_SHIFT(shl)
492 GEN_SHIFT(shr)
493 #undef GEN_SHIFT
495 static void gen_sar(TCGv dest, TCGv t0, TCGv t1)
497 TCGv tmp1, tmp2;
498 tmp1 = tcg_temp_new_i32();
499 tcg_gen_andi_i32(tmp1, t1, 0xff);
500 tmp2 = tcg_const_i32(0x1f);
501 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
502 tcg_temp_free_i32(tmp2);
503 tcg_gen_sar_i32(dest, t0, tmp1);
504 tcg_temp_free_i32(tmp1);
507 static void tcg_gen_abs_i32(TCGv dest, TCGv src)
509 TCGv c0 = tcg_const_i32(0);
510 TCGv tmp = tcg_temp_new_i32();
511 tcg_gen_neg_i32(tmp, src);
512 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
513 tcg_temp_free_i32(c0);
514 tcg_temp_free_i32(tmp);
517 static void shifter_out_im(TCGv var, int shift)
519 if (shift == 0) {
520 tcg_gen_andi_i32(cpu_CF, var, 1);
521 } else {
522 tcg_gen_shri_i32(cpu_CF, var, shift);
523 if (shift != 31) {
524 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
529 /* Shift by immediate. Includes special handling for shift == 0. */
530 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
532 switch (shiftop) {
533 case 0: /* LSL */
534 if (shift != 0) {
535 if (flags)
536 shifter_out_im(var, 32 - shift);
537 tcg_gen_shli_i32(var, var, shift);
539 break;
540 case 1: /* LSR */
541 if (shift == 0) {
542 if (flags) {
543 tcg_gen_shri_i32(cpu_CF, var, 31);
545 tcg_gen_movi_i32(var, 0);
546 } else {
547 if (flags)
548 shifter_out_im(var, shift - 1);
549 tcg_gen_shri_i32(var, var, shift);
551 break;
552 case 2: /* ASR */
553 if (shift == 0)
554 shift = 32;
555 if (flags)
556 shifter_out_im(var, shift - 1);
557 if (shift == 32)
558 shift = 31;
559 tcg_gen_sari_i32(var, var, shift);
560 break;
561 case 3: /* ROR/RRX */
562 if (shift != 0) {
563 if (flags)
564 shifter_out_im(var, shift - 1);
565 tcg_gen_rotri_i32(var, var, shift); break;
566 } else {
567 TCGv tmp = tcg_temp_new_i32();
568 tcg_gen_shli_i32(tmp, cpu_CF, 31);
569 if (flags)
570 shifter_out_im(var, 0);
571 tcg_gen_shri_i32(var, var, 1);
572 tcg_gen_or_i32(var, var, tmp);
573 tcg_temp_free_i32(tmp);
578 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
579 TCGv shift, int flags)
581 if (flags) {
582 switch (shiftop) {
583 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
584 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
585 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
586 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
588 } else {
589 switch (shiftop) {
590 case 0:
591 gen_shl(var, var, shift);
592 break;
593 case 1:
594 gen_shr(var, var, shift);
595 break;
596 case 2:
597 gen_sar(var, var, shift);
598 break;
599 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
600 tcg_gen_rotr_i32(var, var, shift); break;
603 tcg_temp_free_i32(shift);
606 #define PAS_OP(pfx) \
607 switch (op2) { \
608 case 0: gen_pas_helper(glue(pfx,add16)); break; \
609 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
610 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
611 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
612 case 4: gen_pas_helper(glue(pfx,add8)); break; \
613 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
615 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
617 TCGv_ptr tmp;
619 switch (op1) {
620 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
621 case 1:
622 tmp = tcg_temp_new_ptr();
623 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
624 PAS_OP(s)
625 tcg_temp_free_ptr(tmp);
626 break;
627 case 5:
628 tmp = tcg_temp_new_ptr();
629 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
630 PAS_OP(u)
631 tcg_temp_free_ptr(tmp);
632 break;
633 #undef gen_pas_helper
634 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
635 case 2:
636 PAS_OP(q);
637 break;
638 case 3:
639 PAS_OP(sh);
640 break;
641 case 6:
642 PAS_OP(uq);
643 break;
644 case 7:
645 PAS_OP(uh);
646 break;
647 #undef gen_pas_helper
650 #undef PAS_OP
652 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
653 #define PAS_OP(pfx) \
654 switch (op1) { \
655 case 0: gen_pas_helper(glue(pfx,add8)); break; \
656 case 1: gen_pas_helper(glue(pfx,add16)); break; \
657 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
658 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
659 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
660 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
662 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
664 TCGv_ptr tmp;
666 switch (op2) {
667 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
668 case 0:
669 tmp = tcg_temp_new_ptr();
670 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
671 PAS_OP(s)
672 tcg_temp_free_ptr(tmp);
673 break;
674 case 4:
675 tmp = tcg_temp_new_ptr();
676 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
677 PAS_OP(u)
678 tcg_temp_free_ptr(tmp);
679 break;
680 #undef gen_pas_helper
681 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
682 case 1:
683 PAS_OP(q);
684 break;
685 case 2:
686 PAS_OP(sh);
687 break;
688 case 5:
689 PAS_OP(uq);
690 break;
691 case 6:
692 PAS_OP(uh);
693 break;
694 #undef gen_pas_helper
697 #undef PAS_OP
699 static void gen_test_cc(int cc, int label)
701 TCGv tmp;
702 int inv;
704 switch (cc) {
705 case 0: /* eq: Z */
706 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
707 break;
708 case 1: /* ne: !Z */
709 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
710 break;
711 case 2: /* cs: C */
712 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
713 break;
714 case 3: /* cc: !C */
715 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
716 break;
717 case 4: /* mi: N */
718 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
719 break;
720 case 5: /* pl: !N */
721 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
722 break;
723 case 6: /* vs: V */
724 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
725 break;
726 case 7: /* vc: !V */
727 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
728 break;
729 case 8: /* hi: C && !Z */
730 inv = gen_new_label();
731 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
732 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
733 gen_set_label(inv);
734 break;
735 case 9: /* ls: !C || Z */
736 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
737 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
738 break;
739 case 10: /* ge: N == V -> N ^ V == 0 */
740 tmp = tcg_temp_new_i32();
741 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
742 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
743 tcg_temp_free_i32(tmp);
744 break;
745 case 11: /* lt: N != V -> N ^ V != 0 */
746 tmp = tcg_temp_new_i32();
747 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
748 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
749 tcg_temp_free_i32(tmp);
750 break;
751 case 12: /* gt: !Z && N == V */
752 inv = gen_new_label();
753 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
754 tmp = tcg_temp_new_i32();
755 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
756 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
757 tcg_temp_free_i32(tmp);
758 gen_set_label(inv);
759 break;
760 case 13: /* le: Z || N != V */
761 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
762 tmp = tcg_temp_new_i32();
763 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
764 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
765 tcg_temp_free_i32(tmp);
766 break;
767 default:
768 fprintf(stderr, "Bad condition code 0x%x\n", cc);
769 abort();
773 static const uint8_t table_logic_cc[16] = {
774 1, /* and */
775 1, /* xor */
776 0, /* sub */
777 0, /* rsb */
778 0, /* add */
779 0, /* adc */
780 0, /* sbc */
781 0, /* rsc */
782 1, /* andl */
783 1, /* xorl */
784 0, /* cmp */
785 0, /* cmn */
786 1, /* orr */
787 1, /* mov */
788 1, /* bic */
789 1, /* mvn */
792 /* Set PC and Thumb state from an immediate address. */
793 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
795 TCGv tmp;
797 s->is_jmp = DISAS_UPDATE;
798 if (s->thumb != (addr & 1)) {
799 tmp = tcg_temp_new_i32();
800 tcg_gen_movi_i32(tmp, addr & 1);
801 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
802 tcg_temp_free_i32(tmp);
804 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
807 /* Set PC and Thumb state from var. var is marked as dead. */
808 static inline void gen_bx(DisasContext *s, TCGv var)
810 s->is_jmp = DISAS_UPDATE;
811 tcg_gen_andi_i32(cpu_R[15], var, ~1);
812 tcg_gen_andi_i32(var, var, 1);
813 store_cpu_field(var, thumb);
816 /* Variant of store_reg which uses branch&exchange logic when storing
817 to r15 in ARM architecture v7 and above. The source must be a temporary
818 and will be marked as dead. */
819 static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
820 int reg, TCGv var)
822 if (reg == 15 && ENABLE_ARCH_7) {
823 gen_bx(s, var);
824 } else {
825 store_reg(s, reg, var);
829 /* Variant of store_reg which uses branch&exchange logic when storing
830 * to r15 in ARM architecture v5T and above. This is used for storing
831 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
832 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
833 static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
834 int reg, TCGv var)
836 if (reg == 15 && ENABLE_ARCH_5) {
837 gen_bx(s, var);
838 } else {
839 store_reg(s, reg, var);
843 static inline TCGv gen_ld8s(TCGv addr, int index)
845 TCGv tmp = tcg_temp_new_i32();
846 tcg_gen_qemu_ld8s(tmp, addr, index);
847 return tmp;
849 static inline TCGv gen_ld8u(TCGv addr, int index)
851 TCGv tmp = tcg_temp_new_i32();
852 tcg_gen_qemu_ld8u(tmp, addr, index);
853 return tmp;
855 static inline TCGv gen_ld16s(TCGv addr, int index)
857 TCGv tmp = tcg_temp_new_i32();
858 tcg_gen_qemu_ld16s(tmp, addr, index);
859 return tmp;
861 static inline TCGv gen_ld16u(TCGv addr, int index)
863 TCGv tmp = tcg_temp_new_i32();
864 tcg_gen_qemu_ld16u(tmp, addr, index);
865 return tmp;
867 static inline TCGv gen_ld32(TCGv addr, int index)
869 TCGv tmp = tcg_temp_new_i32();
870 tcg_gen_qemu_ld32u(tmp, addr, index);
871 return tmp;
873 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
875 TCGv_i64 tmp = tcg_temp_new_i64();
876 tcg_gen_qemu_ld64(tmp, addr, index);
877 return tmp;
879 static inline void gen_st8(TCGv val, TCGv addr, int index)
881 tcg_gen_qemu_st8(val, addr, index);
882 tcg_temp_free_i32(val);
884 static inline void gen_st16(TCGv val, TCGv addr, int index)
886 tcg_gen_qemu_st16(val, addr, index);
887 tcg_temp_free_i32(val);
889 static inline void gen_st32(TCGv val, TCGv addr, int index)
891 tcg_gen_qemu_st32(val, addr, index);
892 tcg_temp_free_i32(val);
894 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
896 tcg_gen_qemu_st64(val, addr, index);
897 tcg_temp_free_i64(val);
900 static inline void gen_set_pc_im(uint32_t val)
902 tcg_gen_movi_i32(cpu_R[15], val);
905 /* Force a TB lookup after an instruction that changes the CPU state. */
906 static inline void gen_lookup_tb(DisasContext *s)
908 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
909 s->is_jmp = DISAS_UPDATE;
912 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
913 TCGv var)
915 int val, rm, shift, shiftop;
916 TCGv offset;
918 if (!(insn & (1 << 25))) {
919 /* immediate */
920 val = insn & 0xfff;
921 if (!(insn & (1 << 23)))
922 val = -val;
923 if (val != 0)
924 tcg_gen_addi_i32(var, var, val);
925 } else {
926 /* shift/register */
927 rm = (insn) & 0xf;
928 shift = (insn >> 7) & 0x1f;
929 shiftop = (insn >> 5) & 3;
930 offset = load_reg(s, rm);
931 gen_arm_shift_im(offset, shiftop, shift, 0);
932 if (!(insn & (1 << 23)))
933 tcg_gen_sub_i32(var, var, offset);
934 else
935 tcg_gen_add_i32(var, var, offset);
936 tcg_temp_free_i32(offset);
940 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
941 int extra, TCGv var)
943 int val, rm;
944 TCGv offset;
946 if (insn & (1 << 22)) {
947 /* immediate */
948 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
949 if (!(insn & (1 << 23)))
950 val = -val;
951 val += extra;
952 if (val != 0)
953 tcg_gen_addi_i32(var, var, val);
954 } else {
955 /* register */
956 if (extra)
957 tcg_gen_addi_i32(var, var, extra);
958 rm = (insn) & 0xf;
959 offset = load_reg(s, rm);
960 if (!(insn & (1 << 23)))
961 tcg_gen_sub_i32(var, var, offset);
962 else
963 tcg_gen_add_i32(var, var, offset);
964 tcg_temp_free_i32(offset);
968 static TCGv_ptr get_fpstatus_ptr(int neon)
970 TCGv_ptr statusptr = tcg_temp_new_ptr();
971 int offset;
972 if (neon) {
973 offset = offsetof(CPUARMState, vfp.standard_fp_status);
974 } else {
975 offset = offsetof(CPUARMState, vfp.fp_status);
977 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
978 return statusptr;
981 #define VFP_OP2(name) \
982 static inline void gen_vfp_##name(int dp) \
984 TCGv_ptr fpst = get_fpstatus_ptr(0); \
985 if (dp) { \
986 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
987 } else { \
988 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
990 tcg_temp_free_ptr(fpst); \
993 VFP_OP2(add)
994 VFP_OP2(sub)
995 VFP_OP2(mul)
996 VFP_OP2(div)
998 #undef VFP_OP2
1000 static inline void gen_vfp_F1_mul(int dp)
1002 /* Like gen_vfp_mul() but put result in F1 */
1003 TCGv_ptr fpst = get_fpstatus_ptr(0);
1004 if (dp) {
1005 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1006 } else {
1007 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1009 tcg_temp_free_ptr(fpst);
1012 static inline void gen_vfp_F1_neg(int dp)
1014 /* Like gen_vfp_neg() but put result in F1 */
1015 if (dp) {
1016 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1017 } else {
1018 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1022 static inline void gen_vfp_abs(int dp)
1024 if (dp)
1025 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1026 else
1027 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1030 static inline void gen_vfp_neg(int dp)
1032 if (dp)
1033 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1034 else
1035 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1038 static inline void gen_vfp_sqrt(int dp)
1040 if (dp)
1041 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1042 else
1043 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1046 static inline void gen_vfp_cmp(int dp)
1048 if (dp)
1049 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1050 else
1051 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1054 static inline void gen_vfp_cmpe(int dp)
1056 if (dp)
1057 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1058 else
1059 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1062 static inline void gen_vfp_F1_ld0(int dp)
1064 if (dp)
1065 tcg_gen_movi_i64(cpu_F1d, 0);
1066 else
1067 tcg_gen_movi_i32(cpu_F1s, 0);
1070 #define VFP_GEN_ITOF(name) \
1071 static inline void gen_vfp_##name(int dp, int neon) \
1073 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1074 if (dp) { \
1075 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1076 } else { \
1077 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1079 tcg_temp_free_ptr(statusptr); \
1082 VFP_GEN_ITOF(uito)
1083 VFP_GEN_ITOF(sito)
1084 #undef VFP_GEN_ITOF
1086 #define VFP_GEN_FTOI(name) \
1087 static inline void gen_vfp_##name(int dp, int neon) \
1089 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1090 if (dp) { \
1091 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1092 } else { \
1093 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1095 tcg_temp_free_ptr(statusptr); \
1098 VFP_GEN_FTOI(toui)
1099 VFP_GEN_FTOI(touiz)
1100 VFP_GEN_FTOI(tosi)
1101 VFP_GEN_FTOI(tosiz)
1102 #undef VFP_GEN_FTOI
1104 #define VFP_GEN_FIX(name) \
1105 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1107 TCGv tmp_shift = tcg_const_i32(shift); \
1108 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1109 if (dp) { \
1110 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1111 } else { \
1112 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1114 tcg_temp_free_i32(tmp_shift); \
1115 tcg_temp_free_ptr(statusptr); \
1117 VFP_GEN_FIX(tosh)
1118 VFP_GEN_FIX(tosl)
1119 VFP_GEN_FIX(touh)
1120 VFP_GEN_FIX(toul)
1121 VFP_GEN_FIX(shto)
1122 VFP_GEN_FIX(slto)
1123 VFP_GEN_FIX(uhto)
1124 VFP_GEN_FIX(ulto)
1125 #undef VFP_GEN_FIX
1127 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1129 if (dp)
1130 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1131 else
1132 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1135 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1137 if (dp)
1138 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1139 else
1140 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1143 static inline long
1144 vfp_reg_offset (int dp, int reg)
1146 if (dp)
1147 return offsetof(CPUARMState, vfp.regs[reg]);
1148 else if (reg & 1) {
1149 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1150 + offsetof(CPU_DoubleU, l.upper);
1151 } else {
1152 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1153 + offsetof(CPU_DoubleU, l.lower);
1157 /* Return the offset of a 32-bit piece of a NEON register.
1158 zero is the least significant end of the register. */
1159 static inline long
1160 neon_reg_offset (int reg, int n)
1162 int sreg;
1163 sreg = reg * 2 + n;
1164 return vfp_reg_offset(0, sreg);
1167 static TCGv neon_load_reg(int reg, int pass)
1169 TCGv tmp = tcg_temp_new_i32();
1170 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1171 return tmp;
1174 static void neon_store_reg(int reg, int pass, TCGv var)
1176 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1177 tcg_temp_free_i32(var);
1180 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1182 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1185 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1187 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1190 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1191 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1192 #define tcg_gen_st_f32 tcg_gen_st_i32
1193 #define tcg_gen_st_f64 tcg_gen_st_i64
1195 static inline void gen_mov_F0_vreg(int dp, int reg)
1197 if (dp)
1198 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1199 else
1200 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1203 static inline void gen_mov_F1_vreg(int dp, int reg)
1205 if (dp)
1206 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1207 else
1208 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1211 static inline void gen_mov_vreg_F0(int dp, int reg)
1213 if (dp)
1214 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1215 else
1216 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1219 #define ARM_CP_RW_BIT (1 << 20)
1221 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1223 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1226 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1228 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1231 static inline TCGv iwmmxt_load_creg(int reg)
1233 TCGv var = tcg_temp_new_i32();
1234 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1235 return var;
1238 static inline void iwmmxt_store_creg(int reg, TCGv var)
1240 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1241 tcg_temp_free_i32(var);
1244 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1246 iwmmxt_store_reg(cpu_M0, rn);
1249 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1251 iwmmxt_load_reg(cpu_M0, rn);
1254 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1256 iwmmxt_load_reg(cpu_V1, rn);
1257 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1260 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1262 iwmmxt_load_reg(cpu_V1, rn);
1263 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1266 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1268 iwmmxt_load_reg(cpu_V1, rn);
1269 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1272 #define IWMMXT_OP(name) \
1273 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1275 iwmmxt_load_reg(cpu_V1, rn); \
1276 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1279 #define IWMMXT_OP_ENV(name) \
1280 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1282 iwmmxt_load_reg(cpu_V1, rn); \
1283 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1286 #define IWMMXT_OP_ENV_SIZE(name) \
1287 IWMMXT_OP_ENV(name##b) \
1288 IWMMXT_OP_ENV(name##w) \
1289 IWMMXT_OP_ENV(name##l)
1291 #define IWMMXT_OP_ENV1(name) \
1292 static inline void gen_op_iwmmxt_##name##_M0(void) \
1294 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1297 IWMMXT_OP(maddsq)
1298 IWMMXT_OP(madduq)
1299 IWMMXT_OP(sadb)
1300 IWMMXT_OP(sadw)
1301 IWMMXT_OP(mulslw)
1302 IWMMXT_OP(mulshw)
1303 IWMMXT_OP(mululw)
1304 IWMMXT_OP(muluhw)
1305 IWMMXT_OP(macsw)
1306 IWMMXT_OP(macuw)
1308 IWMMXT_OP_ENV_SIZE(unpackl)
1309 IWMMXT_OP_ENV_SIZE(unpackh)
1311 IWMMXT_OP_ENV1(unpacklub)
1312 IWMMXT_OP_ENV1(unpackluw)
1313 IWMMXT_OP_ENV1(unpacklul)
1314 IWMMXT_OP_ENV1(unpackhub)
1315 IWMMXT_OP_ENV1(unpackhuw)
1316 IWMMXT_OP_ENV1(unpackhul)
1317 IWMMXT_OP_ENV1(unpacklsb)
1318 IWMMXT_OP_ENV1(unpacklsw)
1319 IWMMXT_OP_ENV1(unpacklsl)
1320 IWMMXT_OP_ENV1(unpackhsb)
1321 IWMMXT_OP_ENV1(unpackhsw)
1322 IWMMXT_OP_ENV1(unpackhsl)
1324 IWMMXT_OP_ENV_SIZE(cmpeq)
1325 IWMMXT_OP_ENV_SIZE(cmpgtu)
1326 IWMMXT_OP_ENV_SIZE(cmpgts)
1328 IWMMXT_OP_ENV_SIZE(mins)
1329 IWMMXT_OP_ENV_SIZE(minu)
1330 IWMMXT_OP_ENV_SIZE(maxs)
1331 IWMMXT_OP_ENV_SIZE(maxu)
1333 IWMMXT_OP_ENV_SIZE(subn)
1334 IWMMXT_OP_ENV_SIZE(addn)
1335 IWMMXT_OP_ENV_SIZE(subu)
1336 IWMMXT_OP_ENV_SIZE(addu)
1337 IWMMXT_OP_ENV_SIZE(subs)
1338 IWMMXT_OP_ENV_SIZE(adds)
1340 IWMMXT_OP_ENV(avgb0)
1341 IWMMXT_OP_ENV(avgb1)
1342 IWMMXT_OP_ENV(avgw0)
1343 IWMMXT_OP_ENV(avgw1)
1345 IWMMXT_OP(msadb)
1347 IWMMXT_OP_ENV(packuw)
1348 IWMMXT_OP_ENV(packul)
1349 IWMMXT_OP_ENV(packuq)
1350 IWMMXT_OP_ENV(packsw)
1351 IWMMXT_OP_ENV(packsl)
1352 IWMMXT_OP_ENV(packsq)
1354 static void gen_op_iwmmxt_set_mup(void)
1356 TCGv tmp;
1357 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1358 tcg_gen_ori_i32(tmp, tmp, 2);
1359 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1362 static void gen_op_iwmmxt_set_cup(void)
1364 TCGv tmp;
1365 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1366 tcg_gen_ori_i32(tmp, tmp, 1);
1367 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1370 static void gen_op_iwmmxt_setpsr_nz(void)
1372 TCGv tmp = tcg_temp_new_i32();
1373 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1374 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1377 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1379 iwmmxt_load_reg(cpu_V1, rn);
1380 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1381 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1384 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1386 int rd;
1387 uint32_t offset;
1388 TCGv tmp;
1390 rd = (insn >> 16) & 0xf;
1391 tmp = load_reg(s, rd);
1393 offset = (insn & 0xff) << ((insn >> 7) & 2);
1394 if (insn & (1 << 24)) {
1395 /* Pre indexed */
1396 if (insn & (1 << 23))
1397 tcg_gen_addi_i32(tmp, tmp, offset);
1398 else
1399 tcg_gen_addi_i32(tmp, tmp, -offset);
1400 tcg_gen_mov_i32(dest, tmp);
1401 if (insn & (1 << 21))
1402 store_reg(s, rd, tmp);
1403 else
1404 tcg_temp_free_i32(tmp);
1405 } else if (insn & (1 << 21)) {
1406 /* Post indexed */
1407 tcg_gen_mov_i32(dest, tmp);
1408 if (insn & (1 << 23))
1409 tcg_gen_addi_i32(tmp, tmp, offset);
1410 else
1411 tcg_gen_addi_i32(tmp, tmp, -offset);
1412 store_reg(s, rd, tmp);
1413 } else if (!(insn & (1 << 23)))
1414 return 1;
1415 return 0;
1418 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1420 int rd = (insn >> 0) & 0xf;
1421 TCGv tmp;
1423 if (insn & (1 << 8)) {
1424 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1425 return 1;
1426 } else {
1427 tmp = iwmmxt_load_creg(rd);
1429 } else {
1430 tmp = tcg_temp_new_i32();
1431 iwmmxt_load_reg(cpu_V0, rd);
1432 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1434 tcg_gen_andi_i32(tmp, tmp, mask);
1435 tcg_gen_mov_i32(dest, tmp);
1436 tcg_temp_free_i32(tmp);
1437 return 0;
1440 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1441 (ie. an undefined instruction). */
1442 static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
1444 int rd, wrd;
1445 int rdhi, rdlo, rd0, rd1, i;
1446 TCGv addr;
1447 TCGv tmp, tmp2, tmp3;
1449 if ((insn & 0x0e000e00) == 0x0c000000) {
1450 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1451 wrd = insn & 0xf;
1452 rdlo = (insn >> 12) & 0xf;
1453 rdhi = (insn >> 16) & 0xf;
1454 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1455 iwmmxt_load_reg(cpu_V0, wrd);
1456 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1457 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1458 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1459 } else { /* TMCRR */
1460 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1461 iwmmxt_store_reg(cpu_V0, wrd);
1462 gen_op_iwmmxt_set_mup();
1464 return 0;
1467 wrd = (insn >> 12) & 0xf;
1468 addr = tcg_temp_new_i32();
1469 if (gen_iwmmxt_address(s, insn, addr)) {
1470 tcg_temp_free_i32(addr);
1471 return 1;
1473 if (insn & ARM_CP_RW_BIT) {
1474 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1475 tmp = tcg_temp_new_i32();
1476 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1477 iwmmxt_store_creg(wrd, tmp);
1478 } else {
1479 i = 1;
1480 if (insn & (1 << 8)) {
1481 if (insn & (1 << 22)) { /* WLDRD */
1482 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1483 i = 0;
1484 } else { /* WLDRW wRd */
1485 tmp = gen_ld32(addr, IS_USER(s));
1487 } else {
1488 if (insn & (1 << 22)) { /* WLDRH */
1489 tmp = gen_ld16u(addr, IS_USER(s));
1490 } else { /* WLDRB */
1491 tmp = gen_ld8u(addr, IS_USER(s));
1494 if (i) {
1495 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1496 tcg_temp_free_i32(tmp);
1498 gen_op_iwmmxt_movq_wRn_M0(wrd);
1500 } else {
1501 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1502 tmp = iwmmxt_load_creg(wrd);
1503 gen_st32(tmp, addr, IS_USER(s));
1504 } else {
1505 gen_op_iwmmxt_movq_M0_wRn(wrd);
1506 tmp = tcg_temp_new_i32();
1507 if (insn & (1 << 8)) {
1508 if (insn & (1 << 22)) { /* WSTRD */
1509 tcg_temp_free_i32(tmp);
1510 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1511 } else { /* WSTRW wRd */
1512 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1513 gen_st32(tmp, addr, IS_USER(s));
1515 } else {
1516 if (insn & (1 << 22)) { /* WSTRH */
1517 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1518 gen_st16(tmp, addr, IS_USER(s));
1519 } else { /* WSTRB */
1520 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1521 gen_st8(tmp, addr, IS_USER(s));
1526 tcg_temp_free_i32(addr);
1527 return 0;
1530 if ((insn & 0x0f000000) != 0x0e000000)
1531 return 1;
1533 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1534 case 0x000: /* WOR */
1535 wrd = (insn >> 12) & 0xf;
1536 rd0 = (insn >> 0) & 0xf;
1537 rd1 = (insn >> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0);
1539 gen_op_iwmmxt_orq_M0_wRn(rd1);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x011: /* TMCR */
1546 if (insn & 0xf)
1547 return 1;
1548 rd = (insn >> 12) & 0xf;
1549 wrd = (insn >> 16) & 0xf;
1550 switch (wrd) {
1551 case ARM_IWMMXT_wCID:
1552 case ARM_IWMMXT_wCASF:
1553 break;
1554 case ARM_IWMMXT_wCon:
1555 gen_op_iwmmxt_set_cup();
1556 /* Fall through. */
1557 case ARM_IWMMXT_wCSSF:
1558 tmp = iwmmxt_load_creg(wrd);
1559 tmp2 = load_reg(s, rd);
1560 tcg_gen_andc_i32(tmp, tmp, tmp2);
1561 tcg_temp_free_i32(tmp2);
1562 iwmmxt_store_creg(wrd, tmp);
1563 break;
1564 case ARM_IWMMXT_wCGR0:
1565 case ARM_IWMMXT_wCGR1:
1566 case ARM_IWMMXT_wCGR2:
1567 case ARM_IWMMXT_wCGR3:
1568 gen_op_iwmmxt_set_cup();
1569 tmp = load_reg(s, rd);
1570 iwmmxt_store_creg(wrd, tmp);
1571 break;
1572 default:
1573 return 1;
1575 break;
1576 case 0x100: /* WXOR */
1577 wrd = (insn >> 12) & 0xf;
1578 rd0 = (insn >> 0) & 0xf;
1579 rd1 = (insn >> 16) & 0xf;
1580 gen_op_iwmmxt_movq_M0_wRn(rd0);
1581 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1582 gen_op_iwmmxt_setpsr_nz();
1583 gen_op_iwmmxt_movq_wRn_M0(wrd);
1584 gen_op_iwmmxt_set_mup();
1585 gen_op_iwmmxt_set_cup();
1586 break;
1587 case 0x111: /* TMRC */
1588 if (insn & 0xf)
1589 return 1;
1590 rd = (insn >> 12) & 0xf;
1591 wrd = (insn >> 16) & 0xf;
1592 tmp = iwmmxt_load_creg(wrd);
1593 store_reg(s, rd, tmp);
1594 break;
1595 case 0x300: /* WANDN */
1596 wrd = (insn >> 12) & 0xf;
1597 rd0 = (insn >> 0) & 0xf;
1598 rd1 = (insn >> 16) & 0xf;
1599 gen_op_iwmmxt_movq_M0_wRn(rd0);
1600 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1601 gen_op_iwmmxt_andq_M0_wRn(rd1);
1602 gen_op_iwmmxt_setpsr_nz();
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 gen_op_iwmmxt_set_cup();
1606 break;
1607 case 0x200: /* WAND */
1608 wrd = (insn >> 12) & 0xf;
1609 rd0 = (insn >> 0) & 0xf;
1610 rd1 = (insn >> 16) & 0xf;
1611 gen_op_iwmmxt_movq_M0_wRn(rd0);
1612 gen_op_iwmmxt_andq_M0_wRn(rd1);
1613 gen_op_iwmmxt_setpsr_nz();
1614 gen_op_iwmmxt_movq_wRn_M0(wrd);
1615 gen_op_iwmmxt_set_mup();
1616 gen_op_iwmmxt_set_cup();
1617 break;
1618 case 0x810: case 0xa10: /* WMADD */
1619 wrd = (insn >> 12) & 0xf;
1620 rd0 = (insn >> 0) & 0xf;
1621 rd1 = (insn >> 16) & 0xf;
1622 gen_op_iwmmxt_movq_M0_wRn(rd0);
1623 if (insn & (1 << 21))
1624 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1625 else
1626 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1627 gen_op_iwmmxt_movq_wRn_M0(wrd);
1628 gen_op_iwmmxt_set_mup();
1629 break;
1630 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1631 wrd = (insn >> 12) & 0xf;
1632 rd0 = (insn >> 16) & 0xf;
1633 rd1 = (insn >> 0) & 0xf;
1634 gen_op_iwmmxt_movq_M0_wRn(rd0);
1635 switch ((insn >> 22) & 3) {
1636 case 0:
1637 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1638 break;
1639 case 1:
1640 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1641 break;
1642 case 2:
1643 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1644 break;
1645 case 3:
1646 return 1;
1648 gen_op_iwmmxt_movq_wRn_M0(wrd);
1649 gen_op_iwmmxt_set_mup();
1650 gen_op_iwmmxt_set_cup();
1651 break;
1652 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1653 wrd = (insn >> 12) & 0xf;
1654 rd0 = (insn >> 16) & 0xf;
1655 rd1 = (insn >> 0) & 0xf;
1656 gen_op_iwmmxt_movq_M0_wRn(rd0);
1657 switch ((insn >> 22) & 3) {
1658 case 0:
1659 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1660 break;
1661 case 1:
1662 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1663 break;
1664 case 2:
1665 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1666 break;
1667 case 3:
1668 return 1;
1670 gen_op_iwmmxt_movq_wRn_M0(wrd);
1671 gen_op_iwmmxt_set_mup();
1672 gen_op_iwmmxt_set_cup();
1673 break;
1674 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1675 wrd = (insn >> 12) & 0xf;
1676 rd0 = (insn >> 16) & 0xf;
1677 rd1 = (insn >> 0) & 0xf;
1678 gen_op_iwmmxt_movq_M0_wRn(rd0);
1679 if (insn & (1 << 22))
1680 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1681 else
1682 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1683 if (!(insn & (1 << 20)))
1684 gen_op_iwmmxt_addl_M0_wRn(wrd);
1685 gen_op_iwmmxt_movq_wRn_M0(wrd);
1686 gen_op_iwmmxt_set_mup();
1687 break;
1688 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1689 wrd = (insn >> 12) & 0xf;
1690 rd0 = (insn >> 16) & 0xf;
1691 rd1 = (insn >> 0) & 0xf;
1692 gen_op_iwmmxt_movq_M0_wRn(rd0);
1693 if (insn & (1 << 21)) {
1694 if (insn & (1 << 20))
1695 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1696 else
1697 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1698 } else {
1699 if (insn & (1 << 20))
1700 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1701 else
1702 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1704 gen_op_iwmmxt_movq_wRn_M0(wrd);
1705 gen_op_iwmmxt_set_mup();
1706 break;
1707 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1708 wrd = (insn >> 12) & 0xf;
1709 rd0 = (insn >> 16) & 0xf;
1710 rd1 = (insn >> 0) & 0xf;
1711 gen_op_iwmmxt_movq_M0_wRn(rd0);
1712 if (insn & (1 << 21))
1713 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1714 else
1715 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1716 if (!(insn & (1 << 20))) {
1717 iwmmxt_load_reg(cpu_V1, wrd);
1718 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1720 gen_op_iwmmxt_movq_wRn_M0(wrd);
1721 gen_op_iwmmxt_set_mup();
1722 break;
1723 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1724 wrd = (insn >> 12) & 0xf;
1725 rd0 = (insn >> 16) & 0xf;
1726 rd1 = (insn >> 0) & 0xf;
1727 gen_op_iwmmxt_movq_M0_wRn(rd0);
1728 switch ((insn >> 22) & 3) {
1729 case 0:
1730 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1731 break;
1732 case 1:
1733 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1734 break;
1735 case 2:
1736 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1737 break;
1738 case 3:
1739 return 1;
1741 gen_op_iwmmxt_movq_wRn_M0(wrd);
1742 gen_op_iwmmxt_set_mup();
1743 gen_op_iwmmxt_set_cup();
1744 break;
1745 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1746 wrd = (insn >> 12) & 0xf;
1747 rd0 = (insn >> 16) & 0xf;
1748 rd1 = (insn >> 0) & 0xf;
1749 gen_op_iwmmxt_movq_M0_wRn(rd0);
1750 if (insn & (1 << 22)) {
1751 if (insn & (1 << 20))
1752 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1753 else
1754 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1755 } else {
1756 if (insn & (1 << 20))
1757 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1758 else
1759 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1761 gen_op_iwmmxt_movq_wRn_M0(wrd);
1762 gen_op_iwmmxt_set_mup();
1763 gen_op_iwmmxt_set_cup();
1764 break;
1765 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1766 wrd = (insn >> 12) & 0xf;
1767 rd0 = (insn >> 16) & 0xf;
1768 rd1 = (insn >> 0) & 0xf;
1769 gen_op_iwmmxt_movq_M0_wRn(rd0);
1770 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1771 tcg_gen_andi_i32(tmp, tmp, 7);
1772 iwmmxt_load_reg(cpu_V1, rd1);
1773 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1774 tcg_temp_free_i32(tmp);
1775 gen_op_iwmmxt_movq_wRn_M0(wrd);
1776 gen_op_iwmmxt_set_mup();
1777 break;
1778 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1779 if (((insn >> 6) & 3) == 3)
1780 return 1;
1781 rd = (insn >> 12) & 0xf;
1782 wrd = (insn >> 16) & 0xf;
1783 tmp = load_reg(s, rd);
1784 gen_op_iwmmxt_movq_M0_wRn(wrd);
1785 switch ((insn >> 6) & 3) {
1786 case 0:
1787 tmp2 = tcg_const_i32(0xff);
1788 tmp3 = tcg_const_i32((insn & 7) << 3);
1789 break;
1790 case 1:
1791 tmp2 = tcg_const_i32(0xffff);
1792 tmp3 = tcg_const_i32((insn & 3) << 4);
1793 break;
1794 case 2:
1795 tmp2 = tcg_const_i32(0xffffffff);
1796 tmp3 = tcg_const_i32((insn & 1) << 5);
1797 break;
1798 default:
1799 TCGV_UNUSED(tmp2);
1800 TCGV_UNUSED(tmp3);
1802 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1803 tcg_temp_free(tmp3);
1804 tcg_temp_free(tmp2);
1805 tcg_temp_free_i32(tmp);
1806 gen_op_iwmmxt_movq_wRn_M0(wrd);
1807 gen_op_iwmmxt_set_mup();
1808 break;
1809 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1810 rd = (insn >> 12) & 0xf;
1811 wrd = (insn >> 16) & 0xf;
1812 if (rd == 15 || ((insn >> 22) & 3) == 3)
1813 return 1;
1814 gen_op_iwmmxt_movq_M0_wRn(wrd);
1815 tmp = tcg_temp_new_i32();
1816 switch ((insn >> 22) & 3) {
1817 case 0:
1818 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1819 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1820 if (insn & 8) {
1821 tcg_gen_ext8s_i32(tmp, tmp);
1822 } else {
1823 tcg_gen_andi_i32(tmp, tmp, 0xff);
1825 break;
1826 case 1:
1827 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1828 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1829 if (insn & 8) {
1830 tcg_gen_ext16s_i32(tmp, tmp);
1831 } else {
1832 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1834 break;
1835 case 2:
1836 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1837 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1838 break;
1840 store_reg(s, rd, tmp);
1841 break;
1842 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1843 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1844 return 1;
1845 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1846 switch ((insn >> 22) & 3) {
1847 case 0:
1848 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1849 break;
1850 case 1:
1851 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1852 break;
1853 case 2:
1854 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1855 break;
1857 tcg_gen_shli_i32(tmp, tmp, 28);
1858 gen_set_nzcv(tmp);
1859 tcg_temp_free_i32(tmp);
1860 break;
1861 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1862 if (((insn >> 6) & 3) == 3)
1863 return 1;
1864 rd = (insn >> 12) & 0xf;
1865 wrd = (insn >> 16) & 0xf;
1866 tmp = load_reg(s, rd);
1867 switch ((insn >> 6) & 3) {
1868 case 0:
1869 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1870 break;
1871 case 1:
1872 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1873 break;
1874 case 2:
1875 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1876 break;
1878 tcg_temp_free_i32(tmp);
1879 gen_op_iwmmxt_movq_wRn_M0(wrd);
1880 gen_op_iwmmxt_set_mup();
1881 break;
1882 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1883 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1884 return 1;
1885 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1886 tmp2 = tcg_temp_new_i32();
1887 tcg_gen_mov_i32(tmp2, tmp);
1888 switch ((insn >> 22) & 3) {
1889 case 0:
1890 for (i = 0; i < 7; i ++) {
1891 tcg_gen_shli_i32(tmp2, tmp2, 4);
1892 tcg_gen_and_i32(tmp, tmp, tmp2);
1894 break;
1895 case 1:
1896 for (i = 0; i < 3; i ++) {
1897 tcg_gen_shli_i32(tmp2, tmp2, 8);
1898 tcg_gen_and_i32(tmp, tmp, tmp2);
1900 break;
1901 case 2:
1902 tcg_gen_shli_i32(tmp2, tmp2, 16);
1903 tcg_gen_and_i32(tmp, tmp, tmp2);
1904 break;
1906 gen_set_nzcv(tmp);
1907 tcg_temp_free_i32(tmp2);
1908 tcg_temp_free_i32(tmp);
1909 break;
1910 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1911 wrd = (insn >> 12) & 0xf;
1912 rd0 = (insn >> 16) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0);
1914 switch ((insn >> 22) & 3) {
1915 case 0:
1916 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1917 break;
1918 case 1:
1919 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1920 break;
1921 case 2:
1922 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1923 break;
1924 case 3:
1925 return 1;
1927 gen_op_iwmmxt_movq_wRn_M0(wrd);
1928 gen_op_iwmmxt_set_mup();
1929 break;
1930 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1931 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1932 return 1;
1933 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1934 tmp2 = tcg_temp_new_i32();
1935 tcg_gen_mov_i32(tmp2, tmp);
1936 switch ((insn >> 22) & 3) {
1937 case 0:
1938 for (i = 0; i < 7; i ++) {
1939 tcg_gen_shli_i32(tmp2, tmp2, 4);
1940 tcg_gen_or_i32(tmp, tmp, tmp2);
1942 break;
1943 case 1:
1944 for (i = 0; i < 3; i ++) {
1945 tcg_gen_shli_i32(tmp2, tmp2, 8);
1946 tcg_gen_or_i32(tmp, tmp, tmp2);
1948 break;
1949 case 2:
1950 tcg_gen_shli_i32(tmp2, tmp2, 16);
1951 tcg_gen_or_i32(tmp, tmp, tmp2);
1952 break;
1954 gen_set_nzcv(tmp);
1955 tcg_temp_free_i32(tmp2);
1956 tcg_temp_free_i32(tmp);
1957 break;
1958 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1959 rd = (insn >> 12) & 0xf;
1960 rd0 = (insn >> 16) & 0xf;
1961 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1962 return 1;
1963 gen_op_iwmmxt_movq_M0_wRn(rd0);
1964 tmp = tcg_temp_new_i32();
1965 switch ((insn >> 22) & 3) {
1966 case 0:
1967 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1968 break;
1969 case 1:
1970 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1971 break;
1972 case 2:
1973 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1974 break;
1976 store_reg(s, rd, tmp);
1977 break;
1978 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1979 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1980 wrd = (insn >> 12) & 0xf;
1981 rd0 = (insn >> 16) & 0xf;
1982 rd1 = (insn >> 0) & 0xf;
1983 gen_op_iwmmxt_movq_M0_wRn(rd0);
1984 switch ((insn >> 22) & 3) {
1985 case 0:
1986 if (insn & (1 << 21))
1987 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1988 else
1989 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1990 break;
1991 case 1:
1992 if (insn & (1 << 21))
1993 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1994 else
1995 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1996 break;
1997 case 2:
1998 if (insn & (1 << 21))
1999 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2000 else
2001 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2002 break;
2003 case 3:
2004 return 1;
2006 gen_op_iwmmxt_movq_wRn_M0(wrd);
2007 gen_op_iwmmxt_set_mup();
2008 gen_op_iwmmxt_set_cup();
2009 break;
2010 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2011 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2012 wrd = (insn >> 12) & 0xf;
2013 rd0 = (insn >> 16) & 0xf;
2014 gen_op_iwmmxt_movq_M0_wRn(rd0);
2015 switch ((insn >> 22) & 3) {
2016 case 0:
2017 if (insn & (1 << 21))
2018 gen_op_iwmmxt_unpacklsb_M0();
2019 else
2020 gen_op_iwmmxt_unpacklub_M0();
2021 break;
2022 case 1:
2023 if (insn & (1 << 21))
2024 gen_op_iwmmxt_unpacklsw_M0();
2025 else
2026 gen_op_iwmmxt_unpackluw_M0();
2027 break;
2028 case 2:
2029 if (insn & (1 << 21))
2030 gen_op_iwmmxt_unpacklsl_M0();
2031 else
2032 gen_op_iwmmxt_unpacklul_M0();
2033 break;
2034 case 3:
2035 return 1;
2037 gen_op_iwmmxt_movq_wRn_M0(wrd);
2038 gen_op_iwmmxt_set_mup();
2039 gen_op_iwmmxt_set_cup();
2040 break;
2041 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2042 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2043 wrd = (insn >> 12) & 0xf;
2044 rd0 = (insn >> 16) & 0xf;
2045 gen_op_iwmmxt_movq_M0_wRn(rd0);
2046 switch ((insn >> 22) & 3) {
2047 case 0:
2048 if (insn & (1 << 21))
2049 gen_op_iwmmxt_unpackhsb_M0();
2050 else
2051 gen_op_iwmmxt_unpackhub_M0();
2052 break;
2053 case 1:
2054 if (insn & (1 << 21))
2055 gen_op_iwmmxt_unpackhsw_M0();
2056 else
2057 gen_op_iwmmxt_unpackhuw_M0();
2058 break;
2059 case 2:
2060 if (insn & (1 << 21))
2061 gen_op_iwmmxt_unpackhsl_M0();
2062 else
2063 gen_op_iwmmxt_unpackhul_M0();
2064 break;
2065 case 3:
2066 return 1;
2068 gen_op_iwmmxt_movq_wRn_M0(wrd);
2069 gen_op_iwmmxt_set_mup();
2070 gen_op_iwmmxt_set_cup();
2071 break;
2072 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2073 case 0x214: case 0x614: case 0xa14: case 0xe14:
2074 if (((insn >> 22) & 3) == 0)
2075 return 1;
2076 wrd = (insn >> 12) & 0xf;
2077 rd0 = (insn >> 16) & 0xf;
2078 gen_op_iwmmxt_movq_M0_wRn(rd0);
2079 tmp = tcg_temp_new_i32();
2080 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2081 tcg_temp_free_i32(tmp);
2082 return 1;
2084 switch ((insn >> 22) & 3) {
2085 case 1:
2086 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2087 break;
2088 case 2:
2089 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2090 break;
2091 case 3:
2092 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2093 break;
2095 tcg_temp_free_i32(tmp);
2096 gen_op_iwmmxt_movq_wRn_M0(wrd);
2097 gen_op_iwmmxt_set_mup();
2098 gen_op_iwmmxt_set_cup();
2099 break;
2100 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2101 case 0x014: case 0x414: case 0x814: case 0xc14:
2102 if (((insn >> 22) & 3) == 0)
2103 return 1;
2104 wrd = (insn >> 12) & 0xf;
2105 rd0 = (insn >> 16) & 0xf;
2106 gen_op_iwmmxt_movq_M0_wRn(rd0);
2107 tmp = tcg_temp_new_i32();
2108 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2109 tcg_temp_free_i32(tmp);
2110 return 1;
2112 switch ((insn >> 22) & 3) {
2113 case 1:
2114 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2115 break;
2116 case 2:
2117 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2118 break;
2119 case 3:
2120 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2121 break;
2123 tcg_temp_free_i32(tmp);
2124 gen_op_iwmmxt_movq_wRn_M0(wrd);
2125 gen_op_iwmmxt_set_mup();
2126 gen_op_iwmmxt_set_cup();
2127 break;
2128 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2129 case 0x114: case 0x514: case 0x914: case 0xd14:
2130 if (((insn >> 22) & 3) == 0)
2131 return 1;
2132 wrd = (insn >> 12) & 0xf;
2133 rd0 = (insn >> 16) & 0xf;
2134 gen_op_iwmmxt_movq_M0_wRn(rd0);
2135 tmp = tcg_temp_new_i32();
2136 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2137 tcg_temp_free_i32(tmp);
2138 return 1;
2140 switch ((insn >> 22) & 3) {
2141 case 1:
2142 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2143 break;
2144 case 2:
2145 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2146 break;
2147 case 3:
2148 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2149 break;
2151 tcg_temp_free_i32(tmp);
2152 gen_op_iwmmxt_movq_wRn_M0(wrd);
2153 gen_op_iwmmxt_set_mup();
2154 gen_op_iwmmxt_set_cup();
2155 break;
2156 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2157 case 0x314: case 0x714: case 0xb14: case 0xf14:
2158 if (((insn >> 22) & 3) == 0)
2159 return 1;
2160 wrd = (insn >> 12) & 0xf;
2161 rd0 = (insn >> 16) & 0xf;
2162 gen_op_iwmmxt_movq_M0_wRn(rd0);
2163 tmp = tcg_temp_new_i32();
2164 switch ((insn >> 22) & 3) {
2165 case 1:
2166 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2167 tcg_temp_free_i32(tmp);
2168 return 1;
2170 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2171 break;
2172 case 2:
2173 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2174 tcg_temp_free_i32(tmp);
2175 return 1;
2177 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2178 break;
2179 case 3:
2180 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2181 tcg_temp_free_i32(tmp);
2182 return 1;
2184 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2185 break;
2187 tcg_temp_free_i32(tmp);
2188 gen_op_iwmmxt_movq_wRn_M0(wrd);
2189 gen_op_iwmmxt_set_mup();
2190 gen_op_iwmmxt_set_cup();
2191 break;
2192 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2193 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2194 wrd = (insn >> 12) & 0xf;
2195 rd0 = (insn >> 16) & 0xf;
2196 rd1 = (insn >> 0) & 0xf;
2197 gen_op_iwmmxt_movq_M0_wRn(rd0);
2198 switch ((insn >> 22) & 3) {
2199 case 0:
2200 if (insn & (1 << 21))
2201 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2202 else
2203 gen_op_iwmmxt_minub_M0_wRn(rd1);
2204 break;
2205 case 1:
2206 if (insn & (1 << 21))
2207 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2208 else
2209 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2210 break;
2211 case 2:
2212 if (insn & (1 << 21))
2213 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2214 else
2215 gen_op_iwmmxt_minul_M0_wRn(rd1);
2216 break;
2217 case 3:
2218 return 1;
2220 gen_op_iwmmxt_movq_wRn_M0(wrd);
2221 gen_op_iwmmxt_set_mup();
2222 break;
2223 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2224 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2225 wrd = (insn >> 12) & 0xf;
2226 rd0 = (insn >> 16) & 0xf;
2227 rd1 = (insn >> 0) & 0xf;
2228 gen_op_iwmmxt_movq_M0_wRn(rd0);
2229 switch ((insn >> 22) & 3) {
2230 case 0:
2231 if (insn & (1 << 21))
2232 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2233 else
2234 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2235 break;
2236 case 1:
2237 if (insn & (1 << 21))
2238 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2239 else
2240 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2241 break;
2242 case 2:
2243 if (insn & (1 << 21))
2244 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2245 else
2246 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2247 break;
2248 case 3:
2249 return 1;
2251 gen_op_iwmmxt_movq_wRn_M0(wrd);
2252 gen_op_iwmmxt_set_mup();
2253 break;
2254 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2255 case 0x402: case 0x502: case 0x602: case 0x702:
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 rd1 = (insn >> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0);
2260 tmp = tcg_const_i32((insn >> 20) & 3);
2261 iwmmxt_load_reg(cpu_V1, rd1);
2262 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2263 tcg_temp_free(tmp);
2264 gen_op_iwmmxt_movq_wRn_M0(wrd);
2265 gen_op_iwmmxt_set_mup();
2266 break;
2267 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2268 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2269 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2270 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2271 wrd = (insn >> 12) & 0xf;
2272 rd0 = (insn >> 16) & 0xf;
2273 rd1 = (insn >> 0) & 0xf;
2274 gen_op_iwmmxt_movq_M0_wRn(rd0);
2275 switch ((insn >> 20) & 0xf) {
2276 case 0x0:
2277 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2278 break;
2279 case 0x1:
2280 gen_op_iwmmxt_subub_M0_wRn(rd1);
2281 break;
2282 case 0x3:
2283 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2284 break;
2285 case 0x4:
2286 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2287 break;
2288 case 0x5:
2289 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2290 break;
2291 case 0x7:
2292 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2293 break;
2294 case 0x8:
2295 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2296 break;
2297 case 0x9:
2298 gen_op_iwmmxt_subul_M0_wRn(rd1);
2299 break;
2300 case 0xb:
2301 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2302 break;
2303 default:
2304 return 1;
2306 gen_op_iwmmxt_movq_wRn_M0(wrd);
2307 gen_op_iwmmxt_set_mup();
2308 gen_op_iwmmxt_set_cup();
2309 break;
2310 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2311 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2312 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2313 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2314 wrd = (insn >> 12) & 0xf;
2315 rd0 = (insn >> 16) & 0xf;
2316 gen_op_iwmmxt_movq_M0_wRn(rd0);
2317 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2318 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2319 tcg_temp_free(tmp);
2320 gen_op_iwmmxt_movq_wRn_M0(wrd);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2323 break;
2324 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2325 case 0x418: case 0x518: case 0x618: case 0x718:
2326 case 0x818: case 0x918: case 0xa18: case 0xb18:
2327 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2328 wrd = (insn >> 12) & 0xf;
2329 rd0 = (insn >> 16) & 0xf;
2330 rd1 = (insn >> 0) & 0xf;
2331 gen_op_iwmmxt_movq_M0_wRn(rd0);
2332 switch ((insn >> 20) & 0xf) {
2333 case 0x0:
2334 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2335 break;
2336 case 0x1:
2337 gen_op_iwmmxt_addub_M0_wRn(rd1);
2338 break;
2339 case 0x3:
2340 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2341 break;
2342 case 0x4:
2343 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2344 break;
2345 case 0x5:
2346 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2347 break;
2348 case 0x7:
2349 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2350 break;
2351 case 0x8:
2352 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2353 break;
2354 case 0x9:
2355 gen_op_iwmmxt_addul_M0_wRn(rd1);
2356 break;
2357 case 0xb:
2358 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2359 break;
2360 default:
2361 return 1;
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2366 break;
2367 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2368 case 0x408: case 0x508: case 0x608: case 0x708:
2369 case 0x808: case 0x908: case 0xa08: case 0xb08:
2370 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2371 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2372 return 1;
2373 wrd = (insn >> 12) & 0xf;
2374 rd0 = (insn >> 16) & 0xf;
2375 rd1 = (insn >> 0) & 0xf;
2376 gen_op_iwmmxt_movq_M0_wRn(rd0);
2377 switch ((insn >> 22) & 3) {
2378 case 1:
2379 if (insn & (1 << 21))
2380 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2381 else
2382 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2383 break;
2384 case 2:
2385 if (insn & (1 << 21))
2386 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2387 else
2388 gen_op_iwmmxt_packul_M0_wRn(rd1);
2389 break;
2390 case 3:
2391 if (insn & (1 << 21))
2392 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2393 else
2394 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2395 break;
2397 gen_op_iwmmxt_movq_wRn_M0(wrd);
2398 gen_op_iwmmxt_set_mup();
2399 gen_op_iwmmxt_set_cup();
2400 break;
2401 case 0x201: case 0x203: case 0x205: case 0x207:
2402 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2403 case 0x211: case 0x213: case 0x215: case 0x217:
2404 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2405 wrd = (insn >> 5) & 0xf;
2406 rd0 = (insn >> 12) & 0xf;
2407 rd1 = (insn >> 0) & 0xf;
2408 if (rd0 == 0xf || rd1 == 0xf)
2409 return 1;
2410 gen_op_iwmmxt_movq_M0_wRn(wrd);
2411 tmp = load_reg(s, rd0);
2412 tmp2 = load_reg(s, rd1);
2413 switch ((insn >> 16) & 0xf) {
2414 case 0x0: /* TMIA */
2415 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2416 break;
2417 case 0x8: /* TMIAPH */
2418 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2419 break;
2420 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2421 if (insn & (1 << 16))
2422 tcg_gen_shri_i32(tmp, tmp, 16);
2423 if (insn & (1 << 17))
2424 tcg_gen_shri_i32(tmp2, tmp2, 16);
2425 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2426 break;
2427 default:
2428 tcg_temp_free_i32(tmp2);
2429 tcg_temp_free_i32(tmp);
2430 return 1;
2432 tcg_temp_free_i32(tmp2);
2433 tcg_temp_free_i32(tmp);
2434 gen_op_iwmmxt_movq_wRn_M0(wrd);
2435 gen_op_iwmmxt_set_mup();
2436 break;
2437 default:
2438 return 1;
2441 return 0;
2444 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2445 (ie. an undefined instruction). */
2446 static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2448 int acc, rd0, rd1, rdhi, rdlo;
2449 TCGv tmp, tmp2;
2451 if ((insn & 0x0ff00f10) == 0x0e200010) {
2452 /* Multiply with Internal Accumulate Format */
2453 rd0 = (insn >> 12) & 0xf;
2454 rd1 = insn & 0xf;
2455 acc = (insn >> 5) & 7;
2457 if (acc != 0)
2458 return 1;
2460 tmp = load_reg(s, rd0);
2461 tmp2 = load_reg(s, rd1);
2462 switch ((insn >> 16) & 0xf) {
2463 case 0x0: /* MIA */
2464 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2465 break;
2466 case 0x8: /* MIAPH */
2467 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2468 break;
2469 case 0xc: /* MIABB */
2470 case 0xd: /* MIABT */
2471 case 0xe: /* MIATB */
2472 case 0xf: /* MIATT */
2473 if (insn & (1 << 16))
2474 tcg_gen_shri_i32(tmp, tmp, 16);
2475 if (insn & (1 << 17))
2476 tcg_gen_shri_i32(tmp2, tmp2, 16);
2477 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2478 break;
2479 default:
2480 return 1;
2482 tcg_temp_free_i32(tmp2);
2483 tcg_temp_free_i32(tmp);
2485 gen_op_iwmmxt_movq_wRn_M0(acc);
2486 return 0;
2489 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2490 /* Internal Accumulator Access Format */
2491 rdhi = (insn >> 16) & 0xf;
2492 rdlo = (insn >> 12) & 0xf;
2493 acc = insn & 7;
2495 if (acc != 0)
2496 return 1;
2498 if (insn & ARM_CP_RW_BIT) { /* MRA */
2499 iwmmxt_load_reg(cpu_V0, acc);
2500 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2501 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2502 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2503 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2504 } else { /* MAR */
2505 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2506 iwmmxt_store_reg(cpu_V0, acc);
2508 return 0;
2511 return 1;
2514 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2515 #define VFP_SREG(insn, bigbit, smallbit) \
2516 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2517 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2518 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2519 reg = (((insn) >> (bigbit)) & 0x0f) \
2520 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2521 } else { \
2522 if (insn & (1 << (smallbit))) \
2523 return 1; \
2524 reg = ((insn) >> (bigbit)) & 0x0f; \
2525 }} while (0)
2527 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2528 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2529 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2530 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2531 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2532 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2534 /* Move between integer and VFP cores. */
2535 static TCGv gen_vfp_mrs(void)
2537 TCGv tmp = tcg_temp_new_i32();
2538 tcg_gen_mov_i32(tmp, cpu_F0s);
2539 return tmp;
2542 static void gen_vfp_msr(TCGv tmp)
2544 tcg_gen_mov_i32(cpu_F0s, tmp);
2545 tcg_temp_free_i32(tmp);
2548 static void gen_neon_dup_u8(TCGv var, int shift)
2550 TCGv tmp = tcg_temp_new_i32();
2551 if (shift)
2552 tcg_gen_shri_i32(var, var, shift);
2553 tcg_gen_ext8u_i32(var, var);
2554 tcg_gen_shli_i32(tmp, var, 8);
2555 tcg_gen_or_i32(var, var, tmp);
2556 tcg_gen_shli_i32(tmp, var, 16);
2557 tcg_gen_or_i32(var, var, tmp);
2558 tcg_temp_free_i32(tmp);
2561 static void gen_neon_dup_low16(TCGv var)
2563 TCGv tmp = tcg_temp_new_i32();
2564 tcg_gen_ext16u_i32(var, var);
2565 tcg_gen_shli_i32(tmp, var, 16);
2566 tcg_gen_or_i32(var, var, tmp);
2567 tcg_temp_free_i32(tmp);
2570 static void gen_neon_dup_high16(TCGv var)
2572 TCGv tmp = tcg_temp_new_i32();
2573 tcg_gen_andi_i32(var, var, 0xffff0000);
2574 tcg_gen_shri_i32(tmp, var, 16);
2575 tcg_gen_or_i32(var, var, tmp);
2576 tcg_temp_free_i32(tmp);
2579 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2581 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2582 TCGv tmp;
2583 switch (size) {
2584 case 0:
2585 tmp = gen_ld8u(addr, IS_USER(s));
2586 gen_neon_dup_u8(tmp, 0);
2587 break;
2588 case 1:
2589 tmp = gen_ld16u(addr, IS_USER(s));
2590 gen_neon_dup_low16(tmp);
2591 break;
2592 case 2:
2593 tmp = gen_ld32(addr, IS_USER(s));
2594 break;
2595 default: /* Avoid compiler warnings. */
2596 abort();
2598 return tmp;
2601 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2602 (ie. an undefined instruction). */
2603 static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
2605 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2606 int dp, veclen;
2607 TCGv addr;
2608 TCGv tmp;
2609 TCGv tmp2;
2611 if (!arm_feature(env, ARM_FEATURE_VFP))
2612 return 1;
2614 if (!s->vfp_enabled) {
2615 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2616 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2617 return 1;
2618 rn = (insn >> 16) & 0xf;
2619 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2620 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2621 return 1;
2623 dp = ((insn & 0xf00) == 0xb00);
2624 switch ((insn >> 24) & 0xf) {
2625 case 0xe:
2626 if (insn & (1 << 4)) {
2627 /* single register transfer */
2628 rd = (insn >> 12) & 0xf;
2629 if (dp) {
2630 int size;
2631 int pass;
2633 VFP_DREG_N(rn, insn);
2634 if (insn & 0xf)
2635 return 1;
2636 if (insn & 0x00c00060
2637 && !arm_feature(env, ARM_FEATURE_NEON))
2638 return 1;
2640 pass = (insn >> 21) & 1;
2641 if (insn & (1 << 22)) {
2642 size = 0;
2643 offset = ((insn >> 5) & 3) * 8;
2644 } else if (insn & (1 << 5)) {
2645 size = 1;
2646 offset = (insn & (1 << 6)) ? 16 : 0;
2647 } else {
2648 size = 2;
2649 offset = 0;
2651 if (insn & ARM_CP_RW_BIT) {
2652 /* vfp->arm */
2653 tmp = neon_load_reg(rn, pass);
2654 switch (size) {
2655 case 0:
2656 if (offset)
2657 tcg_gen_shri_i32(tmp, tmp, offset);
2658 if (insn & (1 << 23))
2659 gen_uxtb(tmp);
2660 else
2661 gen_sxtb(tmp);
2662 break;
2663 case 1:
2664 if (insn & (1 << 23)) {
2665 if (offset) {
2666 tcg_gen_shri_i32(tmp, tmp, 16);
2667 } else {
2668 gen_uxth(tmp);
2670 } else {
2671 if (offset) {
2672 tcg_gen_sari_i32(tmp, tmp, 16);
2673 } else {
2674 gen_sxth(tmp);
2677 break;
2678 case 2:
2679 break;
2681 store_reg(s, rd, tmp);
2682 } else {
2683 /* arm->vfp */
2684 tmp = load_reg(s, rd);
2685 if (insn & (1 << 23)) {
2686 /* VDUP */
2687 if (size == 0) {
2688 gen_neon_dup_u8(tmp, 0);
2689 } else if (size == 1) {
2690 gen_neon_dup_low16(tmp);
2692 for (n = 0; n <= pass * 2; n++) {
2693 tmp2 = tcg_temp_new_i32();
2694 tcg_gen_mov_i32(tmp2, tmp);
2695 neon_store_reg(rn, n, tmp2);
2697 neon_store_reg(rn, n, tmp);
2698 } else {
2699 /* VMOV */
2700 switch (size) {
2701 case 0:
2702 tmp2 = neon_load_reg(rn, pass);
2703 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
2704 tcg_temp_free_i32(tmp2);
2705 break;
2706 case 1:
2707 tmp2 = neon_load_reg(rn, pass);
2708 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
2709 tcg_temp_free_i32(tmp2);
2710 break;
2711 case 2:
2712 break;
2714 neon_store_reg(rn, pass, tmp);
2717 } else { /* !dp */
2718 if ((insn & 0x6f) != 0x00)
2719 return 1;
2720 rn = VFP_SREG_N(insn);
2721 if (insn & ARM_CP_RW_BIT) {
2722 /* vfp->arm */
2723 if (insn & (1 << 21)) {
2724 /* system register */
2725 rn >>= 1;
2727 switch (rn) {
2728 case ARM_VFP_FPSID:
2729 /* VFP2 allows access to FSID from userspace.
2730 VFP3 restricts all id registers to privileged
2731 accesses. */
2732 if (IS_USER(s)
2733 && arm_feature(env, ARM_FEATURE_VFP3))
2734 return 1;
2735 tmp = load_cpu_field(vfp.xregs[rn]);
2736 break;
2737 case ARM_VFP_FPEXC:
2738 if (IS_USER(s))
2739 return 1;
2740 tmp = load_cpu_field(vfp.xregs[rn]);
2741 break;
2742 case ARM_VFP_FPINST:
2743 case ARM_VFP_FPINST2:
2744 /* Not present in VFP3. */
2745 if (IS_USER(s)
2746 || arm_feature(env, ARM_FEATURE_VFP3))
2747 return 1;
2748 tmp = load_cpu_field(vfp.xregs[rn]);
2749 break;
2750 case ARM_VFP_FPSCR:
2751 if (rd == 15) {
2752 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2753 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2754 } else {
2755 tmp = tcg_temp_new_i32();
2756 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2758 break;
2759 case ARM_VFP_MVFR0:
2760 case ARM_VFP_MVFR1:
2761 if (IS_USER(s)
2762 || !arm_feature(env, ARM_FEATURE_MVFR))
2763 return 1;
2764 tmp = load_cpu_field(vfp.xregs[rn]);
2765 break;
2766 default:
2767 return 1;
2769 } else {
2770 gen_mov_F0_vreg(0, rn);
2771 tmp = gen_vfp_mrs();
2773 if (rd == 15) {
2774 /* Set the 4 flag bits in the CPSR. */
2775 gen_set_nzcv(tmp);
2776 tcg_temp_free_i32(tmp);
2777 } else {
2778 store_reg(s, rd, tmp);
2780 } else {
2781 /* arm->vfp */
2782 if (insn & (1 << 21)) {
2783 rn >>= 1;
2784 /* system register */
2785 switch (rn) {
2786 case ARM_VFP_FPSID:
2787 case ARM_VFP_MVFR0:
2788 case ARM_VFP_MVFR1:
2789 /* Writes are ignored. */
2790 break;
2791 case ARM_VFP_FPSCR:
2792 tmp = load_reg(s, rd);
2793 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2794 tcg_temp_free_i32(tmp);
2795 gen_lookup_tb(s);
2796 break;
2797 case ARM_VFP_FPEXC:
2798 if (IS_USER(s))
2799 return 1;
2800 /* TODO: VFP subarchitecture support.
2801 * For now, keep the EN bit only */
2802 tmp = load_reg(s, rd);
2803 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2804 store_cpu_field(tmp, vfp.xregs[rn]);
2805 gen_lookup_tb(s);
2806 break;
2807 case ARM_VFP_FPINST:
2808 case ARM_VFP_FPINST2:
2809 tmp = load_reg(s, rd);
2810 store_cpu_field(tmp, vfp.xregs[rn]);
2811 break;
2812 default:
2813 return 1;
2815 } else {
2816 tmp = load_reg(s, rd);
2817 gen_vfp_msr(tmp);
2818 gen_mov_vreg_F0(0, rn);
2822 } else {
2823 /* data processing */
2824 /* The opcode is in bits 23, 21, 20 and 6. */
2825 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2826 if (dp) {
2827 if (op == 15) {
2828 /* rn is opcode */
2829 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2830 } else {
2831 /* rn is register number */
2832 VFP_DREG_N(rn, insn);
2835 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2836 /* Integer or single precision destination. */
2837 rd = VFP_SREG_D(insn);
2838 } else {
2839 VFP_DREG_D(rd, insn);
2841 if (op == 15 &&
2842 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2843 /* VCVT from int is always from S reg regardless of dp bit.
2844 * VCVT with immediate frac_bits has same format as SREG_M
2846 rm = VFP_SREG_M(insn);
2847 } else {
2848 VFP_DREG_M(rm, insn);
2850 } else {
2851 rn = VFP_SREG_N(insn);
2852 if (op == 15 && rn == 15) {
2853 /* Double precision destination. */
2854 VFP_DREG_D(rd, insn);
2855 } else {
2856 rd = VFP_SREG_D(insn);
2858 /* NB that we implicitly rely on the encoding for the frac_bits
2859 * in VCVT of fixed to float being the same as that of an SREG_M
2861 rm = VFP_SREG_M(insn);
2864 veclen = s->vec_len;
2865 if (op == 15 && rn > 3)
2866 veclen = 0;
2868 /* Shut up compiler warnings. */
2869 delta_m = 0;
2870 delta_d = 0;
2871 bank_mask = 0;
2873 if (veclen > 0) {
2874 if (dp)
2875 bank_mask = 0xc;
2876 else
2877 bank_mask = 0x18;
2879 /* Figure out what type of vector operation this is. */
2880 if ((rd & bank_mask) == 0) {
2881 /* scalar */
2882 veclen = 0;
2883 } else {
2884 if (dp)
2885 delta_d = (s->vec_stride >> 1) + 1;
2886 else
2887 delta_d = s->vec_stride + 1;
2889 if ((rm & bank_mask) == 0) {
2890 /* mixed scalar/vector */
2891 delta_m = 0;
2892 } else {
2893 /* vector */
2894 delta_m = delta_d;
2899 /* Load the initial operands. */
2900 if (op == 15) {
2901 switch (rn) {
2902 case 16:
2903 case 17:
2904 /* Integer source */
2905 gen_mov_F0_vreg(0, rm);
2906 break;
2907 case 8:
2908 case 9:
2909 /* Compare */
2910 gen_mov_F0_vreg(dp, rd);
2911 gen_mov_F1_vreg(dp, rm);
2912 break;
2913 case 10:
2914 case 11:
2915 /* Compare with zero */
2916 gen_mov_F0_vreg(dp, rd);
2917 gen_vfp_F1_ld0(dp);
2918 break;
2919 case 20:
2920 case 21:
2921 case 22:
2922 case 23:
2923 case 28:
2924 case 29:
2925 case 30:
2926 case 31:
2927 /* Source and destination the same. */
2928 gen_mov_F0_vreg(dp, rd);
2929 break;
2930 case 4:
2931 case 5:
2932 case 6:
2933 case 7:
2934 /* VCVTB, VCVTT: only present with the halfprec extension,
2935 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2937 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2938 return 1;
2940 /* Otherwise fall through */
2941 default:
2942 /* One source operand. */
2943 gen_mov_F0_vreg(dp, rm);
2944 break;
2946 } else {
2947 /* Two source operands. */
2948 gen_mov_F0_vreg(dp, rn);
2949 gen_mov_F1_vreg(dp, rm);
2952 for (;;) {
2953 /* Perform the calculation. */
2954 switch (op) {
2955 case 0: /* VMLA: fd + (fn * fm) */
2956 /* Note that order of inputs to the add matters for NaNs */
2957 gen_vfp_F1_mul(dp);
2958 gen_mov_F0_vreg(dp, rd);
2959 gen_vfp_add(dp);
2960 break;
2961 case 1: /* VMLS: fd + -(fn * fm) */
2962 gen_vfp_mul(dp);
2963 gen_vfp_F1_neg(dp);
2964 gen_mov_F0_vreg(dp, rd);
2965 gen_vfp_add(dp);
2966 break;
2967 case 2: /* VNMLS: -fd + (fn * fm) */
2968 /* Note that it isn't valid to replace (-A + B) with (B - A)
2969 * or similar plausible looking simplifications
2970 * because this will give wrong results for NaNs.
2972 gen_vfp_F1_mul(dp);
2973 gen_mov_F0_vreg(dp, rd);
2974 gen_vfp_neg(dp);
2975 gen_vfp_add(dp);
2976 break;
2977 case 3: /* VNMLA: -fd + -(fn * fm) */
2978 gen_vfp_mul(dp);
2979 gen_vfp_F1_neg(dp);
2980 gen_mov_F0_vreg(dp, rd);
2981 gen_vfp_neg(dp);
2982 gen_vfp_add(dp);
2983 break;
2984 case 4: /* mul: fn * fm */
2985 gen_vfp_mul(dp);
2986 break;
2987 case 5: /* nmul: -(fn * fm) */
2988 gen_vfp_mul(dp);
2989 gen_vfp_neg(dp);
2990 break;
2991 case 6: /* add: fn + fm */
2992 gen_vfp_add(dp);
2993 break;
2994 case 7: /* sub: fn - fm */
2995 gen_vfp_sub(dp);
2996 break;
2997 case 8: /* div: fn / fm */
2998 gen_vfp_div(dp);
2999 break;
3000 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3001 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3002 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3003 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3004 /* These are fused multiply-add, and must be done as one
3005 * floating point operation with no rounding between the
3006 * multiplication and addition steps.
3007 * NB that doing the negations here as separate steps is
3008 * correct : an input NaN should come out with its sign bit
3009 * flipped if it is a negated-input.
3011 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3012 return 1;
3014 if (dp) {
3015 TCGv_ptr fpst;
3016 TCGv_i64 frd;
3017 if (op & 1) {
3018 /* VFNMS, VFMS */
3019 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3021 frd = tcg_temp_new_i64();
3022 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3023 if (op & 2) {
3024 /* VFNMA, VFNMS */
3025 gen_helper_vfp_negd(frd, frd);
3027 fpst = get_fpstatus_ptr(0);
3028 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3029 cpu_F1d, frd, fpst);
3030 tcg_temp_free_ptr(fpst);
3031 tcg_temp_free_i64(frd);
3032 } else {
3033 TCGv_ptr fpst;
3034 TCGv_i32 frd;
3035 if (op & 1) {
3036 /* VFNMS, VFMS */
3037 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3039 frd = tcg_temp_new_i32();
3040 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3041 if (op & 2) {
3042 gen_helper_vfp_negs(frd, frd);
3044 fpst = get_fpstatus_ptr(0);
3045 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3046 cpu_F1s, frd, fpst);
3047 tcg_temp_free_ptr(fpst);
3048 tcg_temp_free_i32(frd);
3050 break;
3051 case 14: /* fconst */
3052 if (!arm_feature(env, ARM_FEATURE_VFP3))
3053 return 1;
3055 n = (insn << 12) & 0x80000000;
3056 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3057 if (dp) {
3058 if (i & 0x40)
3059 i |= 0x3f80;
3060 else
3061 i |= 0x4000;
3062 n |= i << 16;
3063 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3064 } else {
3065 if (i & 0x40)
3066 i |= 0x780;
3067 else
3068 i |= 0x800;
3069 n |= i << 19;
3070 tcg_gen_movi_i32(cpu_F0s, n);
3072 break;
3073 case 15: /* extension space */
3074 switch (rn) {
3075 case 0: /* cpy */
3076 /* no-op */
3077 break;
3078 case 1: /* abs */
3079 gen_vfp_abs(dp);
3080 break;
3081 case 2: /* neg */
3082 gen_vfp_neg(dp);
3083 break;
3084 case 3: /* sqrt */
3085 gen_vfp_sqrt(dp);
3086 break;
3087 case 4: /* vcvtb.f32.f16 */
3088 tmp = gen_vfp_mrs();
3089 tcg_gen_ext16u_i32(tmp, tmp);
3090 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3091 tcg_temp_free_i32(tmp);
3092 break;
3093 case 5: /* vcvtt.f32.f16 */
3094 tmp = gen_vfp_mrs();
3095 tcg_gen_shri_i32(tmp, tmp, 16);
3096 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3097 tcg_temp_free_i32(tmp);
3098 break;
3099 case 6: /* vcvtb.f16.f32 */
3100 tmp = tcg_temp_new_i32();
3101 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3102 gen_mov_F0_vreg(0, rd);
3103 tmp2 = gen_vfp_mrs();
3104 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3105 tcg_gen_or_i32(tmp, tmp, tmp2);
3106 tcg_temp_free_i32(tmp2);
3107 gen_vfp_msr(tmp);
3108 break;
3109 case 7: /* vcvtt.f16.f32 */
3110 tmp = tcg_temp_new_i32();
3111 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3112 tcg_gen_shli_i32(tmp, tmp, 16);
3113 gen_mov_F0_vreg(0, rd);
3114 tmp2 = gen_vfp_mrs();
3115 tcg_gen_ext16u_i32(tmp2, tmp2);
3116 tcg_gen_or_i32(tmp, tmp, tmp2);
3117 tcg_temp_free_i32(tmp2);
3118 gen_vfp_msr(tmp);
3119 break;
3120 case 8: /* cmp */
3121 gen_vfp_cmp(dp);
3122 break;
3123 case 9: /* cmpe */
3124 gen_vfp_cmpe(dp);
3125 break;
3126 case 10: /* cmpz */
3127 gen_vfp_cmp(dp);
3128 break;
3129 case 11: /* cmpez */
3130 gen_vfp_F1_ld0(dp);
3131 gen_vfp_cmpe(dp);
3132 break;
3133 case 15: /* single<->double conversion */
3134 if (dp)
3135 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3136 else
3137 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3138 break;
3139 case 16: /* fuito */
3140 gen_vfp_uito(dp, 0);
3141 break;
3142 case 17: /* fsito */
3143 gen_vfp_sito(dp, 0);
3144 break;
3145 case 20: /* fshto */
3146 if (!arm_feature(env, ARM_FEATURE_VFP3))
3147 return 1;
3148 gen_vfp_shto(dp, 16 - rm, 0);
3149 break;
3150 case 21: /* fslto */
3151 if (!arm_feature(env, ARM_FEATURE_VFP3))
3152 return 1;
3153 gen_vfp_slto(dp, 32 - rm, 0);
3154 break;
3155 case 22: /* fuhto */
3156 if (!arm_feature(env, ARM_FEATURE_VFP3))
3157 return 1;
3158 gen_vfp_uhto(dp, 16 - rm, 0);
3159 break;
3160 case 23: /* fulto */
3161 if (!arm_feature(env, ARM_FEATURE_VFP3))
3162 return 1;
3163 gen_vfp_ulto(dp, 32 - rm, 0);
3164 break;
3165 case 24: /* ftoui */
3166 gen_vfp_toui(dp, 0);
3167 break;
3168 case 25: /* ftouiz */
3169 gen_vfp_touiz(dp, 0);
3170 break;
3171 case 26: /* ftosi */
3172 gen_vfp_tosi(dp, 0);
3173 break;
3174 case 27: /* ftosiz */
3175 gen_vfp_tosiz(dp, 0);
3176 break;
3177 case 28: /* ftosh */
3178 if (!arm_feature(env, ARM_FEATURE_VFP3))
3179 return 1;
3180 gen_vfp_tosh(dp, 16 - rm, 0);
3181 break;
3182 case 29: /* ftosl */
3183 if (!arm_feature(env, ARM_FEATURE_VFP3))
3184 return 1;
3185 gen_vfp_tosl(dp, 32 - rm, 0);
3186 break;
3187 case 30: /* ftouh */
3188 if (!arm_feature(env, ARM_FEATURE_VFP3))
3189 return 1;
3190 gen_vfp_touh(dp, 16 - rm, 0);
3191 break;
3192 case 31: /* ftoul */
3193 if (!arm_feature(env, ARM_FEATURE_VFP3))
3194 return 1;
3195 gen_vfp_toul(dp, 32 - rm, 0);
3196 break;
3197 default: /* undefined */
3198 return 1;
3200 break;
3201 default: /* undefined */
3202 return 1;
3205 /* Write back the result. */
3206 if (op == 15 && (rn >= 8 && rn <= 11))
3207 ; /* Comparison, do nothing. */
3208 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3209 /* VCVT double to int: always integer result. */
3210 gen_mov_vreg_F0(0, rd);
3211 else if (op == 15 && rn == 15)
3212 /* conversion */
3213 gen_mov_vreg_F0(!dp, rd);
3214 else
3215 gen_mov_vreg_F0(dp, rd);
3217 /* break out of the loop if we have finished */
3218 if (veclen == 0)
3219 break;
3221 if (op == 15 && delta_m == 0) {
3222 /* single source one-many */
3223 while (veclen--) {
3224 rd = ((rd + delta_d) & (bank_mask - 1))
3225 | (rd & bank_mask);
3226 gen_mov_vreg_F0(dp, rd);
3228 break;
3230 /* Setup the next operands. */
3231 veclen--;
3232 rd = ((rd + delta_d) & (bank_mask - 1))
3233 | (rd & bank_mask);
3235 if (op == 15) {
3236 /* One source operand. */
3237 rm = ((rm + delta_m) & (bank_mask - 1))
3238 | (rm & bank_mask);
3239 gen_mov_F0_vreg(dp, rm);
3240 } else {
3241 /* Two source operands. */
3242 rn = ((rn + delta_d) & (bank_mask - 1))
3243 | (rn & bank_mask);
3244 gen_mov_F0_vreg(dp, rn);
3245 if (delta_m) {
3246 rm = ((rm + delta_m) & (bank_mask - 1))
3247 | (rm & bank_mask);
3248 gen_mov_F1_vreg(dp, rm);
3253 break;
3254 case 0xc:
3255 case 0xd:
3256 if ((insn & 0x03e00000) == 0x00400000) {
3257 /* two-register transfer */
3258 rn = (insn >> 16) & 0xf;
3259 rd = (insn >> 12) & 0xf;
3260 if (dp) {
3261 VFP_DREG_M(rm, insn);
3262 } else {
3263 rm = VFP_SREG_M(insn);
3266 if (insn & ARM_CP_RW_BIT) {
3267 /* vfp->arm */
3268 if (dp) {
3269 gen_mov_F0_vreg(0, rm * 2);
3270 tmp = gen_vfp_mrs();
3271 store_reg(s, rd, tmp);
3272 gen_mov_F0_vreg(0, rm * 2 + 1);
3273 tmp = gen_vfp_mrs();
3274 store_reg(s, rn, tmp);
3275 } else {
3276 gen_mov_F0_vreg(0, rm);
3277 tmp = gen_vfp_mrs();
3278 store_reg(s, rd, tmp);
3279 gen_mov_F0_vreg(0, rm + 1);
3280 tmp = gen_vfp_mrs();
3281 store_reg(s, rn, tmp);
3283 } else {
3284 /* arm->vfp */
3285 if (dp) {
3286 tmp = load_reg(s, rd);
3287 gen_vfp_msr(tmp);
3288 gen_mov_vreg_F0(0, rm * 2);
3289 tmp = load_reg(s, rn);
3290 gen_vfp_msr(tmp);
3291 gen_mov_vreg_F0(0, rm * 2 + 1);
3292 } else {
3293 tmp = load_reg(s, rd);
3294 gen_vfp_msr(tmp);
3295 gen_mov_vreg_F0(0, rm);
3296 tmp = load_reg(s, rn);
3297 gen_vfp_msr(tmp);
3298 gen_mov_vreg_F0(0, rm + 1);
3301 } else {
3302 /* Load/store */
3303 rn = (insn >> 16) & 0xf;
3304 if (dp)
3305 VFP_DREG_D(rd, insn);
3306 else
3307 rd = VFP_SREG_D(insn);
3308 if ((insn & 0x01200000) == 0x01000000) {
3309 /* Single load/store */
3310 offset = (insn & 0xff) << 2;
3311 if ((insn & (1 << 23)) == 0)
3312 offset = -offset;
3313 if (s->thumb && rn == 15) {
3314 /* This is actually UNPREDICTABLE */
3315 addr = tcg_temp_new_i32();
3316 tcg_gen_movi_i32(addr, s->pc & ~2);
3317 } else {
3318 addr = load_reg(s, rn);
3320 tcg_gen_addi_i32(addr, addr, offset);
3321 if (insn & (1 << 20)) {
3322 gen_vfp_ld(s, dp, addr);
3323 gen_mov_vreg_F0(dp, rd);
3324 } else {
3325 gen_mov_F0_vreg(dp, rd);
3326 gen_vfp_st(s, dp, addr);
3328 tcg_temp_free_i32(addr);
3329 } else {
3330 /* load/store multiple */
3331 int w = insn & (1 << 21);
3332 if (dp)
3333 n = (insn >> 1) & 0x7f;
3334 else
3335 n = insn & 0xff;
3337 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3338 /* P == U , W == 1 => UNDEF */
3339 return 1;
3341 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3342 /* UNPREDICTABLE cases for bad immediates: we choose to
3343 * UNDEF to avoid generating huge numbers of TCG ops
3345 return 1;
3347 if (rn == 15 && w) {
3348 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3349 return 1;
3352 if (s->thumb && rn == 15) {
3353 /* This is actually UNPREDICTABLE */
3354 addr = tcg_temp_new_i32();
3355 tcg_gen_movi_i32(addr, s->pc & ~2);
3356 } else {
3357 addr = load_reg(s, rn);
3359 if (insn & (1 << 24)) /* pre-decrement */
3360 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3362 if (dp)
3363 offset = 8;
3364 else
3365 offset = 4;
3366 for (i = 0; i < n; i++) {
3367 if (insn & ARM_CP_RW_BIT) {
3368 /* load */
3369 gen_vfp_ld(s, dp, addr);
3370 gen_mov_vreg_F0(dp, rd + i);
3371 } else {
3372 /* store */
3373 gen_mov_F0_vreg(dp, rd + i);
3374 gen_vfp_st(s, dp, addr);
3376 tcg_gen_addi_i32(addr, addr, offset);
3378 if (w) {
3379 /* writeback */
3380 if (insn & (1 << 24))
3381 offset = -offset * n;
3382 else if (dp && (insn & 1))
3383 offset = 4;
3384 else
3385 offset = 0;
3387 if (offset != 0)
3388 tcg_gen_addi_i32(addr, addr, offset);
3389 store_reg(s, rn, addr);
3390 } else {
3391 tcg_temp_free_i32(addr);
3395 break;
3396 default:
3397 /* Should never happen. */
3398 return 1;
3400 return 0;
3403 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3405 TranslationBlock *tb;
3407 tb = s->tb;
3408 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3409 tcg_gen_goto_tb(n);
3410 gen_set_pc_im(dest);
3411 tcg_gen_exit_tb((tcg_target_long)tb + n);
3412 } else {
3413 gen_set_pc_im(dest);
3414 tcg_gen_exit_tb(0);
3418 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3420 if (unlikely(s->singlestep_enabled)) {
3421 /* An indirect jump so that we still trigger the debug exception. */
3422 if (s->thumb)
3423 dest |= 1;
3424 gen_bx_im(s, dest);
3425 } else {
3426 gen_goto_tb(s, 0, dest);
3427 s->is_jmp = DISAS_TB_JUMP;
3431 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3433 if (x)
3434 tcg_gen_sari_i32(t0, t0, 16);
3435 else
3436 gen_sxth(t0);
3437 if (y)
3438 tcg_gen_sari_i32(t1, t1, 16);
3439 else
3440 gen_sxth(t1);
3441 tcg_gen_mul_i32(t0, t0, t1);
3444 /* Return the mask of PSR bits set by a MSR instruction. */
3445 static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
3446 uint32_t mask;
3448 mask = 0;
3449 if (flags & (1 << 0))
3450 mask |= 0xff;
3451 if (flags & (1 << 1))
3452 mask |= 0xff00;
3453 if (flags & (1 << 2))
3454 mask |= 0xff0000;
3455 if (flags & (1 << 3))
3456 mask |= 0xff000000;
3458 /* Mask out undefined bits. */
3459 mask &= ~CPSR_RESERVED;
3460 if (!arm_feature(env, ARM_FEATURE_V4T))
3461 mask &= ~CPSR_T;
3462 if (!arm_feature(env, ARM_FEATURE_V5))
3463 mask &= ~CPSR_Q; /* V5TE in reality*/
3464 if (!arm_feature(env, ARM_FEATURE_V6))
3465 mask &= ~(CPSR_E | CPSR_GE);
3466 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3467 mask &= ~CPSR_IT;
3468 /* Mask out execution state bits. */
3469 if (!spsr)
3470 mask &= ~CPSR_EXEC;
3471 /* Mask out privileged bits. */
3472 if (IS_USER(s))
3473 mask &= CPSR_USER;
3474 return mask;
3477 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3478 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3480 TCGv tmp;
3481 if (spsr) {
3482 /* ??? This is also undefined in system mode. */
3483 if (IS_USER(s))
3484 return 1;
3486 tmp = load_cpu_field(spsr);
3487 tcg_gen_andi_i32(tmp, tmp, ~mask);
3488 tcg_gen_andi_i32(t0, t0, mask);
3489 tcg_gen_or_i32(tmp, tmp, t0);
3490 store_cpu_field(tmp, spsr);
3491 } else {
3492 gen_set_cpsr(t0, mask);
3494 tcg_temp_free_i32(t0);
3495 gen_lookup_tb(s);
3496 return 0;
3499 /* Returns nonzero if access to the PSR is not permitted. */
3500 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3502 TCGv tmp;
3503 tmp = tcg_temp_new_i32();
3504 tcg_gen_movi_i32(tmp, val);
3505 return gen_set_psr(s, mask, spsr, tmp);
3508 /* Generate an old-style exception return. Marks pc as dead. */
3509 static void gen_exception_return(DisasContext *s, TCGv pc)
3511 TCGv tmp;
3512 store_reg(s, 15, pc);
3513 tmp = load_cpu_field(spsr);
3514 gen_set_cpsr(tmp, 0xffffffff);
3515 tcg_temp_free_i32(tmp);
3516 s->is_jmp = DISAS_UPDATE;
3519 /* Generate a v6 exception return. Marks both values as dead. */
3520 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3522 gen_set_cpsr(cpsr, 0xffffffff);
3523 tcg_temp_free_i32(cpsr);
3524 store_reg(s, 15, pc);
3525 s->is_jmp = DISAS_UPDATE;
3528 static inline void
3529 gen_set_condexec (DisasContext *s)
3531 if (s->condexec_mask) {
3532 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3533 TCGv tmp = tcg_temp_new_i32();
3534 tcg_gen_movi_i32(tmp, val);
3535 store_cpu_field(tmp, condexec_bits);
3539 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3541 gen_set_condexec(s);
3542 gen_set_pc_im(s->pc - offset);
3543 gen_exception(excp);
3544 s->is_jmp = DISAS_JUMP;
3547 static void gen_nop_hint(DisasContext *s, int val)
3549 switch (val) {
3550 case 3: /* wfi */
3551 gen_set_pc_im(s->pc);
3552 s->is_jmp = DISAS_WFI;
3553 break;
3554 case 2: /* wfe */
3555 case 4: /* sev */
3556 /* TODO: Implement SEV and WFE. May help SMP performance. */
3557 default: /* nop */
3558 break;
3562 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3564 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3566 switch (size) {
3567 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3568 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3569 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3570 default: abort();
3574 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3576 switch (size) {
3577 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3578 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3579 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3580 default: return;
3584 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3585 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3586 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3587 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3588 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3590 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3591 switch ((size << 1) | u) { \
3592 case 0: \
3593 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3594 break; \
3595 case 1: \
3596 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3597 break; \
3598 case 2: \
3599 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3600 break; \
3601 case 3: \
3602 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3603 break; \
3604 case 4: \
3605 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3606 break; \
3607 case 5: \
3608 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3609 break; \
3610 default: return 1; \
3611 }} while (0)
3613 #define GEN_NEON_INTEGER_OP(name) do { \
3614 switch ((size << 1) | u) { \
3615 case 0: \
3616 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3617 break; \
3618 case 1: \
3619 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3620 break; \
3621 case 2: \
3622 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3623 break; \
3624 case 3: \
3625 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3626 break; \
3627 case 4: \
3628 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3629 break; \
3630 case 5: \
3631 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3632 break; \
3633 default: return 1; \
3634 }} while (0)
3636 static TCGv neon_load_scratch(int scratch)
3638 TCGv tmp = tcg_temp_new_i32();
3639 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3640 return tmp;
3643 static void neon_store_scratch(int scratch, TCGv var)
3645 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3646 tcg_temp_free_i32(var);
3649 static inline TCGv neon_get_scalar(int size, int reg)
3651 TCGv tmp;
3652 if (size == 1) {
3653 tmp = neon_load_reg(reg & 7, reg >> 4);
3654 if (reg & 8) {
3655 gen_neon_dup_high16(tmp);
3656 } else {
3657 gen_neon_dup_low16(tmp);
3659 } else {
3660 tmp = neon_load_reg(reg & 15, reg >> 4);
3662 return tmp;
3665 static int gen_neon_unzip(int rd, int rm, int size, int q)
3667 TCGv tmp, tmp2;
3668 if (!q && size == 2) {
3669 return 1;
3671 tmp = tcg_const_i32(rd);
3672 tmp2 = tcg_const_i32(rm);
3673 if (q) {
3674 switch (size) {
3675 case 0:
3676 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3677 break;
3678 case 1:
3679 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3680 break;
3681 case 2:
3682 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3683 break;
3684 default:
3685 abort();
3687 } else {
3688 switch (size) {
3689 case 0:
3690 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3691 break;
3692 case 1:
3693 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3694 break;
3695 default:
3696 abort();
3699 tcg_temp_free_i32(tmp);
3700 tcg_temp_free_i32(tmp2);
3701 return 0;
3704 static int gen_neon_zip(int rd, int rm, int size, int q)
3706 TCGv tmp, tmp2;
3707 if (!q && size == 2) {
3708 return 1;
3710 tmp = tcg_const_i32(rd);
3711 tmp2 = tcg_const_i32(rm);
3712 if (q) {
3713 switch (size) {
3714 case 0:
3715 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3716 break;
3717 case 1:
3718 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3719 break;
3720 case 2:
3721 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3722 break;
3723 default:
3724 abort();
3726 } else {
3727 switch (size) {
3728 case 0:
3729 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3730 break;
3731 case 1:
3732 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3733 break;
3734 default:
3735 abort();
3738 tcg_temp_free_i32(tmp);
3739 tcg_temp_free_i32(tmp2);
3740 return 0;
3743 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3745 TCGv rd, tmp;
3747 rd = tcg_temp_new_i32();
3748 tmp = tcg_temp_new_i32();
3750 tcg_gen_shli_i32(rd, t0, 8);
3751 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3752 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3753 tcg_gen_or_i32(rd, rd, tmp);
3755 tcg_gen_shri_i32(t1, t1, 8);
3756 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3757 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3758 tcg_gen_or_i32(t1, t1, tmp);
3759 tcg_gen_mov_i32(t0, rd);
3761 tcg_temp_free_i32(tmp);
3762 tcg_temp_free_i32(rd);
3765 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3767 TCGv rd, tmp;
3769 rd = tcg_temp_new_i32();
3770 tmp = tcg_temp_new_i32();
3772 tcg_gen_shli_i32(rd, t0, 16);
3773 tcg_gen_andi_i32(tmp, t1, 0xffff);
3774 tcg_gen_or_i32(rd, rd, tmp);
3775 tcg_gen_shri_i32(t1, t1, 16);
3776 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3777 tcg_gen_or_i32(t1, t1, tmp);
3778 tcg_gen_mov_i32(t0, rd);
3780 tcg_temp_free_i32(tmp);
3781 tcg_temp_free_i32(rd);
3785 static struct {
3786 int nregs;
3787 int interleave;
3788 int spacing;
3789 } neon_ls_element_type[11] = {
3790 {4, 4, 1},
3791 {4, 4, 2},
3792 {4, 1, 1},
3793 {4, 2, 1},
3794 {3, 3, 1},
3795 {3, 3, 2},
3796 {3, 1, 1},
3797 {1, 1, 1},
3798 {2, 2, 1},
3799 {2, 2, 2},
3800 {2, 1, 1}
3803 /* Translate a NEON load/store element instruction. Return nonzero if the
3804 instruction is invalid. */
3805 static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
3807 int rd, rn, rm;
3808 int op;
3809 int nregs;
3810 int interleave;
3811 int spacing;
3812 int stride;
3813 int size;
3814 int reg;
3815 int pass;
3816 int load;
3817 int shift;
3818 int n;
3819 TCGv addr;
3820 TCGv tmp;
3821 TCGv tmp2;
3822 TCGv_i64 tmp64;
3824 if (!s->vfp_enabled)
3825 return 1;
3826 VFP_DREG_D(rd, insn);
3827 rn = (insn >> 16) & 0xf;
3828 rm = insn & 0xf;
3829 load = (insn & (1 << 21)) != 0;
3830 if ((insn & (1 << 23)) == 0) {
3831 /* Load store all elements. */
3832 op = (insn >> 8) & 0xf;
3833 size = (insn >> 6) & 3;
3834 if (op > 10)
3835 return 1;
3836 /* Catch UNDEF cases for bad values of align field */
3837 switch (op & 0xc) {
3838 case 4:
3839 if (((insn >> 5) & 1) == 1) {
3840 return 1;
3842 break;
3843 case 8:
3844 if (((insn >> 4) & 3) == 3) {
3845 return 1;
3847 break;
3848 default:
3849 break;
3851 nregs = neon_ls_element_type[op].nregs;
3852 interleave = neon_ls_element_type[op].interleave;
3853 spacing = neon_ls_element_type[op].spacing;
3854 if (size == 3 && (interleave | spacing) != 1)
3855 return 1;
3856 addr = tcg_temp_new_i32();
3857 load_reg_var(s, addr, rn);
3858 stride = (1 << size) * interleave;
3859 for (reg = 0; reg < nregs; reg++) {
3860 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3861 load_reg_var(s, addr, rn);
3862 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3863 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3864 load_reg_var(s, addr, rn);
3865 tcg_gen_addi_i32(addr, addr, 1 << size);
3867 if (size == 3) {
3868 if (load) {
3869 tmp64 = gen_ld64(addr, IS_USER(s));
3870 neon_store_reg64(tmp64, rd);
3871 tcg_temp_free_i64(tmp64);
3872 } else {
3873 tmp64 = tcg_temp_new_i64();
3874 neon_load_reg64(tmp64, rd);
3875 gen_st64(tmp64, addr, IS_USER(s));
3877 tcg_gen_addi_i32(addr, addr, stride);
3878 } else {
3879 for (pass = 0; pass < 2; pass++) {
3880 if (size == 2) {
3881 if (load) {
3882 tmp = gen_ld32(addr, IS_USER(s));
3883 neon_store_reg(rd, pass, tmp);
3884 } else {
3885 tmp = neon_load_reg(rd, pass);
3886 gen_st32(tmp, addr, IS_USER(s));
3888 tcg_gen_addi_i32(addr, addr, stride);
3889 } else if (size == 1) {
3890 if (load) {
3891 tmp = gen_ld16u(addr, IS_USER(s));
3892 tcg_gen_addi_i32(addr, addr, stride);
3893 tmp2 = gen_ld16u(addr, IS_USER(s));
3894 tcg_gen_addi_i32(addr, addr, stride);
3895 tcg_gen_shli_i32(tmp2, tmp2, 16);
3896 tcg_gen_or_i32(tmp, tmp, tmp2);
3897 tcg_temp_free_i32(tmp2);
3898 neon_store_reg(rd, pass, tmp);
3899 } else {
3900 tmp = neon_load_reg(rd, pass);
3901 tmp2 = tcg_temp_new_i32();
3902 tcg_gen_shri_i32(tmp2, tmp, 16);
3903 gen_st16(tmp, addr, IS_USER(s));
3904 tcg_gen_addi_i32(addr, addr, stride);
3905 gen_st16(tmp2, addr, IS_USER(s));
3906 tcg_gen_addi_i32(addr, addr, stride);
3908 } else /* size == 0 */ {
3909 if (load) {
3910 TCGV_UNUSED(tmp2);
3911 for (n = 0; n < 4; n++) {
3912 tmp = gen_ld8u(addr, IS_USER(s));
3913 tcg_gen_addi_i32(addr, addr, stride);
3914 if (n == 0) {
3915 tmp2 = tmp;
3916 } else {
3917 tcg_gen_shli_i32(tmp, tmp, n * 8);
3918 tcg_gen_or_i32(tmp2, tmp2, tmp);
3919 tcg_temp_free_i32(tmp);
3922 neon_store_reg(rd, pass, tmp2);
3923 } else {
3924 tmp2 = neon_load_reg(rd, pass);
3925 for (n = 0; n < 4; n++) {
3926 tmp = tcg_temp_new_i32();
3927 if (n == 0) {
3928 tcg_gen_mov_i32(tmp, tmp2);
3929 } else {
3930 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3932 gen_st8(tmp, addr, IS_USER(s));
3933 tcg_gen_addi_i32(addr, addr, stride);
3935 tcg_temp_free_i32(tmp2);
3940 rd += spacing;
3942 tcg_temp_free_i32(addr);
3943 stride = nregs * 8;
3944 } else {
3945 size = (insn >> 10) & 3;
3946 if (size == 3) {
3947 /* Load single element to all lanes. */
3948 int a = (insn >> 4) & 1;
3949 if (!load) {
3950 return 1;
3952 size = (insn >> 6) & 3;
3953 nregs = ((insn >> 8) & 3) + 1;
3955 if (size == 3) {
3956 if (nregs != 4 || a == 0) {
3957 return 1;
3959 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3960 size = 2;
3962 if (nregs == 1 && a == 1 && size == 0) {
3963 return 1;
3965 if (nregs == 3 && a == 1) {
3966 return 1;
3968 addr = tcg_temp_new_i32();
3969 load_reg_var(s, addr, rn);
3970 if (nregs == 1) {
3971 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3972 tmp = gen_load_and_replicate(s, addr, size);
3973 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3974 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3975 if (insn & (1 << 5)) {
3976 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3977 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3979 tcg_temp_free_i32(tmp);
3980 } else {
3981 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3982 stride = (insn & (1 << 5)) ? 2 : 1;
3983 for (reg = 0; reg < nregs; reg++) {
3984 tmp = gen_load_and_replicate(s, addr, size);
3985 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3986 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3987 tcg_temp_free_i32(tmp);
3988 tcg_gen_addi_i32(addr, addr, 1 << size);
3989 rd += stride;
3992 tcg_temp_free_i32(addr);
3993 stride = (1 << size) * nregs;
3994 } else {
3995 /* Single element. */
3996 int idx = (insn >> 4) & 0xf;
3997 pass = (insn >> 7) & 1;
3998 switch (size) {
3999 case 0:
4000 shift = ((insn >> 5) & 3) * 8;
4001 stride = 1;
4002 break;
4003 case 1:
4004 shift = ((insn >> 6) & 1) * 16;
4005 stride = (insn & (1 << 5)) ? 2 : 1;
4006 break;
4007 case 2:
4008 shift = 0;
4009 stride = (insn & (1 << 6)) ? 2 : 1;
4010 break;
4011 default:
4012 abort();
4014 nregs = ((insn >> 8) & 3) + 1;
4015 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4016 switch (nregs) {
4017 case 1:
4018 if (((idx & (1 << size)) != 0) ||
4019 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4020 return 1;
4022 break;
4023 case 3:
4024 if ((idx & 1) != 0) {
4025 return 1;
4027 /* fall through */
4028 case 2:
4029 if (size == 2 && (idx & 2) != 0) {
4030 return 1;
4032 break;
4033 case 4:
4034 if ((size == 2) && ((idx & 3) == 3)) {
4035 return 1;
4037 break;
4038 default:
4039 abort();
4041 if ((rd + stride * (nregs - 1)) > 31) {
4042 /* Attempts to write off the end of the register file
4043 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4044 * the neon_load_reg() would write off the end of the array.
4046 return 1;
4048 addr = tcg_temp_new_i32();
4049 load_reg_var(s, addr, rn);
4050 for (reg = 0; reg < nregs; reg++) {
4051 if (load) {
4052 switch (size) {
4053 case 0:
4054 tmp = gen_ld8u(addr, IS_USER(s));
4055 break;
4056 case 1:
4057 tmp = gen_ld16u(addr, IS_USER(s));
4058 break;
4059 case 2:
4060 tmp = gen_ld32(addr, IS_USER(s));
4061 break;
4062 default: /* Avoid compiler warnings. */
4063 abort();
4065 if (size != 2) {
4066 tmp2 = neon_load_reg(rd, pass);
4067 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4068 shift, size ? 16 : 8);
4069 tcg_temp_free_i32(tmp2);
4071 neon_store_reg(rd, pass, tmp);
4072 } else { /* Store */
4073 tmp = neon_load_reg(rd, pass);
4074 if (shift)
4075 tcg_gen_shri_i32(tmp, tmp, shift);
4076 switch (size) {
4077 case 0:
4078 gen_st8(tmp, addr, IS_USER(s));
4079 break;
4080 case 1:
4081 gen_st16(tmp, addr, IS_USER(s));
4082 break;
4083 case 2:
4084 gen_st32(tmp, addr, IS_USER(s));
4085 break;
4088 rd += stride;
4089 tcg_gen_addi_i32(addr, addr, 1 << size);
4091 tcg_temp_free_i32(addr);
4092 stride = nregs * (1 << size);
4095 if (rm != 15) {
4096 TCGv base;
4098 base = load_reg(s, rn);
4099 if (rm == 13) {
4100 tcg_gen_addi_i32(base, base, stride);
4101 } else {
4102 TCGv index;
4103 index = load_reg(s, rm);
4104 tcg_gen_add_i32(base, base, index);
4105 tcg_temp_free_i32(index);
4107 store_reg(s, rn, base);
4109 return 0;
4112 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4113 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4115 tcg_gen_and_i32(t, t, c);
4116 tcg_gen_andc_i32(f, f, c);
4117 tcg_gen_or_i32(dest, t, f);
4120 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4122 switch (size) {
4123 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4124 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4125 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4126 default: abort();
4130 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4132 switch (size) {
4133 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4134 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4135 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4136 default: abort();
4140 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4142 switch (size) {
4143 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4144 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4145 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4146 default: abort();
4150 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4152 switch (size) {
4153 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4154 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4155 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4156 default: abort();
4160 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4161 int q, int u)
4163 if (q) {
4164 if (u) {
4165 switch (size) {
4166 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4167 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4168 default: abort();
4170 } else {
4171 switch (size) {
4172 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4173 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4174 default: abort();
4177 } else {
4178 if (u) {
4179 switch (size) {
4180 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4181 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4182 default: abort();
4184 } else {
4185 switch (size) {
4186 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4187 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4188 default: abort();
4194 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4196 if (u) {
4197 switch (size) {
4198 case 0: gen_helper_neon_widen_u8(dest, src); break;
4199 case 1: gen_helper_neon_widen_u16(dest, src); break;
4200 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4201 default: abort();
4203 } else {
4204 switch (size) {
4205 case 0: gen_helper_neon_widen_s8(dest, src); break;
4206 case 1: gen_helper_neon_widen_s16(dest, src); break;
4207 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4208 default: abort();
4211 tcg_temp_free_i32(src);
4214 static inline void gen_neon_addl(int size)
4216 switch (size) {
4217 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4218 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4219 case 2: tcg_gen_add_i64(CPU_V001); break;
4220 default: abort();
4224 static inline void gen_neon_subl(int size)
4226 switch (size) {
4227 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4228 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4229 case 2: tcg_gen_sub_i64(CPU_V001); break;
4230 default: abort();
4234 static inline void gen_neon_negl(TCGv_i64 var, int size)
4236 switch (size) {
4237 case 0: gen_helper_neon_negl_u16(var, var); break;
4238 case 1: gen_helper_neon_negl_u32(var, var); break;
4239 case 2:
4240 tcg_gen_neg_i64(var, var);
4241 break;
4242 default: abort();
4246 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4248 switch (size) {
4249 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4250 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4251 default: abort();
4255 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4257 TCGv_i64 tmp;
4259 switch ((size << 1) | u) {
4260 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4261 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4262 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4263 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4264 case 4:
4265 tmp = gen_muls_i64_i32(a, b);
4266 tcg_gen_mov_i64(dest, tmp);
4267 tcg_temp_free_i64(tmp);
4268 break;
4269 case 5:
4270 tmp = gen_mulu_i64_i32(a, b);
4271 tcg_gen_mov_i64(dest, tmp);
4272 tcg_temp_free_i64(tmp);
4273 break;
4274 default: abort();
4277 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4278 Don't forget to clean them now. */
4279 if (size < 2) {
4280 tcg_temp_free_i32(a);
4281 tcg_temp_free_i32(b);
4285 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4287 if (op) {
4288 if (u) {
4289 gen_neon_unarrow_sats(size, dest, src);
4290 } else {
4291 gen_neon_narrow(size, dest, src);
4293 } else {
4294 if (u) {
4295 gen_neon_narrow_satu(size, dest, src);
4296 } else {
4297 gen_neon_narrow_sats(size, dest, src);
4302 /* Symbolic constants for op fields for Neon 3-register same-length.
4303 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4304 * table A7-9.
4306 #define NEON_3R_VHADD 0
4307 #define NEON_3R_VQADD 1
4308 #define NEON_3R_VRHADD 2
4309 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4310 #define NEON_3R_VHSUB 4
4311 #define NEON_3R_VQSUB 5
4312 #define NEON_3R_VCGT 6
4313 #define NEON_3R_VCGE 7
4314 #define NEON_3R_VSHL 8
4315 #define NEON_3R_VQSHL 9
4316 #define NEON_3R_VRSHL 10
4317 #define NEON_3R_VQRSHL 11
4318 #define NEON_3R_VMAX 12
4319 #define NEON_3R_VMIN 13
4320 #define NEON_3R_VABD 14
4321 #define NEON_3R_VABA 15
4322 #define NEON_3R_VADD_VSUB 16
4323 #define NEON_3R_VTST_VCEQ 17
4324 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4325 #define NEON_3R_VMUL 19
4326 #define NEON_3R_VPMAX 20
4327 #define NEON_3R_VPMIN 21
4328 #define NEON_3R_VQDMULH_VQRDMULH 22
4329 #define NEON_3R_VPADD 23
4330 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4331 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4332 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4333 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4334 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4335 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4336 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4338 static const uint8_t neon_3r_sizes[] = {
4339 [NEON_3R_VHADD] = 0x7,
4340 [NEON_3R_VQADD] = 0xf,
4341 [NEON_3R_VRHADD] = 0x7,
4342 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4343 [NEON_3R_VHSUB] = 0x7,
4344 [NEON_3R_VQSUB] = 0xf,
4345 [NEON_3R_VCGT] = 0x7,
4346 [NEON_3R_VCGE] = 0x7,
4347 [NEON_3R_VSHL] = 0xf,
4348 [NEON_3R_VQSHL] = 0xf,
4349 [NEON_3R_VRSHL] = 0xf,
4350 [NEON_3R_VQRSHL] = 0xf,
4351 [NEON_3R_VMAX] = 0x7,
4352 [NEON_3R_VMIN] = 0x7,
4353 [NEON_3R_VABD] = 0x7,
4354 [NEON_3R_VABA] = 0x7,
4355 [NEON_3R_VADD_VSUB] = 0xf,
4356 [NEON_3R_VTST_VCEQ] = 0x7,
4357 [NEON_3R_VML] = 0x7,
4358 [NEON_3R_VMUL] = 0x7,
4359 [NEON_3R_VPMAX] = 0x7,
4360 [NEON_3R_VPMIN] = 0x7,
4361 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4362 [NEON_3R_VPADD] = 0x7,
4363 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4364 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4365 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4366 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4367 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4368 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4369 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4372 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4373 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4374 * table A7-13.
4376 #define NEON_2RM_VREV64 0
4377 #define NEON_2RM_VREV32 1
4378 #define NEON_2RM_VREV16 2
4379 #define NEON_2RM_VPADDL 4
4380 #define NEON_2RM_VPADDL_U 5
4381 #define NEON_2RM_VCLS 8
4382 #define NEON_2RM_VCLZ 9
4383 #define NEON_2RM_VCNT 10
4384 #define NEON_2RM_VMVN 11
4385 #define NEON_2RM_VPADAL 12
4386 #define NEON_2RM_VPADAL_U 13
4387 #define NEON_2RM_VQABS 14
4388 #define NEON_2RM_VQNEG 15
4389 #define NEON_2RM_VCGT0 16
4390 #define NEON_2RM_VCGE0 17
4391 #define NEON_2RM_VCEQ0 18
4392 #define NEON_2RM_VCLE0 19
4393 #define NEON_2RM_VCLT0 20
4394 #define NEON_2RM_VABS 22
4395 #define NEON_2RM_VNEG 23
4396 #define NEON_2RM_VCGT0_F 24
4397 #define NEON_2RM_VCGE0_F 25
4398 #define NEON_2RM_VCEQ0_F 26
4399 #define NEON_2RM_VCLE0_F 27
4400 #define NEON_2RM_VCLT0_F 28
4401 #define NEON_2RM_VABS_F 30
4402 #define NEON_2RM_VNEG_F 31
4403 #define NEON_2RM_VSWP 32
4404 #define NEON_2RM_VTRN 33
4405 #define NEON_2RM_VUZP 34
4406 #define NEON_2RM_VZIP 35
4407 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4408 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4409 #define NEON_2RM_VSHLL 38
4410 #define NEON_2RM_VCVT_F16_F32 44
4411 #define NEON_2RM_VCVT_F32_F16 46
4412 #define NEON_2RM_VRECPE 56
4413 #define NEON_2RM_VRSQRTE 57
4414 #define NEON_2RM_VRECPE_F 58
4415 #define NEON_2RM_VRSQRTE_F 59
4416 #define NEON_2RM_VCVT_FS 60
4417 #define NEON_2RM_VCVT_FU 61
4418 #define NEON_2RM_VCVT_SF 62
4419 #define NEON_2RM_VCVT_UF 63
4421 static int neon_2rm_is_float_op(int op)
4423 /* Return true if this neon 2reg-misc op is float-to-float */
4424 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4425 op >= NEON_2RM_VRECPE_F);
4428 /* Each entry in this array has bit n set if the insn allows
4429 * size value n (otherwise it will UNDEF). Since unallocated
4430 * op values will have no bits set they always UNDEF.
4432 static const uint8_t neon_2rm_sizes[] = {
4433 [NEON_2RM_VREV64] = 0x7,
4434 [NEON_2RM_VREV32] = 0x3,
4435 [NEON_2RM_VREV16] = 0x1,
4436 [NEON_2RM_VPADDL] = 0x7,
4437 [NEON_2RM_VPADDL_U] = 0x7,
4438 [NEON_2RM_VCLS] = 0x7,
4439 [NEON_2RM_VCLZ] = 0x7,
4440 [NEON_2RM_VCNT] = 0x1,
4441 [NEON_2RM_VMVN] = 0x1,
4442 [NEON_2RM_VPADAL] = 0x7,
4443 [NEON_2RM_VPADAL_U] = 0x7,
4444 [NEON_2RM_VQABS] = 0x7,
4445 [NEON_2RM_VQNEG] = 0x7,
4446 [NEON_2RM_VCGT0] = 0x7,
4447 [NEON_2RM_VCGE0] = 0x7,
4448 [NEON_2RM_VCEQ0] = 0x7,
4449 [NEON_2RM_VCLE0] = 0x7,
4450 [NEON_2RM_VCLT0] = 0x7,
4451 [NEON_2RM_VABS] = 0x7,
4452 [NEON_2RM_VNEG] = 0x7,
4453 [NEON_2RM_VCGT0_F] = 0x4,
4454 [NEON_2RM_VCGE0_F] = 0x4,
4455 [NEON_2RM_VCEQ0_F] = 0x4,
4456 [NEON_2RM_VCLE0_F] = 0x4,
4457 [NEON_2RM_VCLT0_F] = 0x4,
4458 [NEON_2RM_VABS_F] = 0x4,
4459 [NEON_2RM_VNEG_F] = 0x4,
4460 [NEON_2RM_VSWP] = 0x1,
4461 [NEON_2RM_VTRN] = 0x7,
4462 [NEON_2RM_VUZP] = 0x7,
4463 [NEON_2RM_VZIP] = 0x7,
4464 [NEON_2RM_VMOVN] = 0x7,
4465 [NEON_2RM_VQMOVN] = 0x7,
4466 [NEON_2RM_VSHLL] = 0x7,
4467 [NEON_2RM_VCVT_F16_F32] = 0x2,
4468 [NEON_2RM_VCVT_F32_F16] = 0x2,
4469 [NEON_2RM_VRECPE] = 0x4,
4470 [NEON_2RM_VRSQRTE] = 0x4,
4471 [NEON_2RM_VRECPE_F] = 0x4,
4472 [NEON_2RM_VRSQRTE_F] = 0x4,
4473 [NEON_2RM_VCVT_FS] = 0x4,
4474 [NEON_2RM_VCVT_FU] = 0x4,
4475 [NEON_2RM_VCVT_SF] = 0x4,
4476 [NEON_2RM_VCVT_UF] = 0x4,
4479 /* Translate a NEON data processing instruction. Return nonzero if the
4480 instruction is invalid.
4481 We process data in a mixture of 32-bit and 64-bit chunks.
4482 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4484 static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
4486 int op;
4487 int q;
4488 int rd, rn, rm;
4489 int size;
4490 int shift;
4491 int pass;
4492 int count;
4493 int pairwise;
4494 int u;
4495 uint32_t imm, mask;
4496 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4497 TCGv_i64 tmp64;
4499 if (!s->vfp_enabled)
4500 return 1;
4501 q = (insn & (1 << 6)) != 0;
4502 u = (insn >> 24) & 1;
4503 VFP_DREG_D(rd, insn);
4504 VFP_DREG_N(rn, insn);
4505 VFP_DREG_M(rm, insn);
4506 size = (insn >> 20) & 3;
4507 if ((insn & (1 << 23)) == 0) {
4508 /* Three register same length. */
4509 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4510 /* Catch invalid op and bad size combinations: UNDEF */
4511 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4512 return 1;
4514 /* All insns of this form UNDEF for either this condition or the
4515 * superset of cases "Q==1"; we catch the latter later.
4517 if (q && ((rd | rn | rm) & 1)) {
4518 return 1;
4520 if (size == 3 && op != NEON_3R_LOGIC) {
4521 /* 64-bit element instructions. */
4522 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4523 neon_load_reg64(cpu_V0, rn + pass);
4524 neon_load_reg64(cpu_V1, rm + pass);
4525 switch (op) {
4526 case NEON_3R_VQADD:
4527 if (u) {
4528 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4529 cpu_V0, cpu_V1);
4530 } else {
4531 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4532 cpu_V0, cpu_V1);
4534 break;
4535 case NEON_3R_VQSUB:
4536 if (u) {
4537 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4538 cpu_V0, cpu_V1);
4539 } else {
4540 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4541 cpu_V0, cpu_V1);
4543 break;
4544 case NEON_3R_VSHL:
4545 if (u) {
4546 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4547 } else {
4548 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4550 break;
4551 case NEON_3R_VQSHL:
4552 if (u) {
4553 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4554 cpu_V1, cpu_V0);
4555 } else {
4556 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4557 cpu_V1, cpu_V0);
4559 break;
4560 case NEON_3R_VRSHL:
4561 if (u) {
4562 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4563 } else {
4564 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4566 break;
4567 case NEON_3R_VQRSHL:
4568 if (u) {
4569 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4570 cpu_V1, cpu_V0);
4571 } else {
4572 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4573 cpu_V1, cpu_V0);
4575 break;
4576 case NEON_3R_VADD_VSUB:
4577 if (u) {
4578 tcg_gen_sub_i64(CPU_V001);
4579 } else {
4580 tcg_gen_add_i64(CPU_V001);
4582 break;
4583 default:
4584 abort();
4586 neon_store_reg64(cpu_V0, rd + pass);
4588 return 0;
4590 pairwise = 0;
4591 switch (op) {
4592 case NEON_3R_VSHL:
4593 case NEON_3R_VQSHL:
4594 case NEON_3R_VRSHL:
4595 case NEON_3R_VQRSHL:
4597 int rtmp;
4598 /* Shift instruction operands are reversed. */
4599 rtmp = rn;
4600 rn = rm;
4601 rm = rtmp;
4603 break;
4604 case NEON_3R_VPADD:
4605 if (u) {
4606 return 1;
4608 /* Fall through */
4609 case NEON_3R_VPMAX:
4610 case NEON_3R_VPMIN:
4611 pairwise = 1;
4612 break;
4613 case NEON_3R_FLOAT_ARITH:
4614 pairwise = (u && size < 2); /* if VPADD (float) */
4615 break;
4616 case NEON_3R_FLOAT_MINMAX:
4617 pairwise = u; /* if VPMIN/VPMAX (float) */
4618 break;
4619 case NEON_3R_FLOAT_CMP:
4620 if (!u && size) {
4621 /* no encoding for U=0 C=1x */
4622 return 1;
4624 break;
4625 case NEON_3R_FLOAT_ACMP:
4626 if (!u) {
4627 return 1;
4629 break;
4630 case NEON_3R_VRECPS_VRSQRTS:
4631 if (u) {
4632 return 1;
4634 break;
4635 case NEON_3R_VMUL:
4636 if (u && (size != 0)) {
4637 /* UNDEF on invalid size for polynomial subcase */
4638 return 1;
4640 break;
4641 case NEON_3R_VFM:
4642 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4643 return 1;
4645 break;
4646 default:
4647 break;
4650 if (pairwise && q) {
4651 /* All the pairwise insns UNDEF if Q is set */
4652 return 1;
4655 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4657 if (pairwise) {
4658 /* Pairwise. */
4659 if (pass < 1) {
4660 tmp = neon_load_reg(rn, 0);
4661 tmp2 = neon_load_reg(rn, 1);
4662 } else {
4663 tmp = neon_load_reg(rm, 0);
4664 tmp2 = neon_load_reg(rm, 1);
4666 } else {
4667 /* Elementwise. */
4668 tmp = neon_load_reg(rn, pass);
4669 tmp2 = neon_load_reg(rm, pass);
4671 switch (op) {
4672 case NEON_3R_VHADD:
4673 GEN_NEON_INTEGER_OP(hadd);
4674 break;
4675 case NEON_3R_VQADD:
4676 GEN_NEON_INTEGER_OP_ENV(qadd);
4677 break;
4678 case NEON_3R_VRHADD:
4679 GEN_NEON_INTEGER_OP(rhadd);
4680 break;
4681 case NEON_3R_LOGIC: /* Logic ops. */
4682 switch ((u << 2) | size) {
4683 case 0: /* VAND */
4684 tcg_gen_and_i32(tmp, tmp, tmp2);
4685 break;
4686 case 1: /* BIC */
4687 tcg_gen_andc_i32(tmp, tmp, tmp2);
4688 break;
4689 case 2: /* VORR */
4690 tcg_gen_or_i32(tmp, tmp, tmp2);
4691 break;
4692 case 3: /* VORN */
4693 tcg_gen_orc_i32(tmp, tmp, tmp2);
4694 break;
4695 case 4: /* VEOR */
4696 tcg_gen_xor_i32(tmp, tmp, tmp2);
4697 break;
4698 case 5: /* VBSL */
4699 tmp3 = neon_load_reg(rd, pass);
4700 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4701 tcg_temp_free_i32(tmp3);
4702 break;
4703 case 6: /* VBIT */
4704 tmp3 = neon_load_reg(rd, pass);
4705 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4706 tcg_temp_free_i32(tmp3);
4707 break;
4708 case 7: /* VBIF */
4709 tmp3 = neon_load_reg(rd, pass);
4710 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4711 tcg_temp_free_i32(tmp3);
4712 break;
4714 break;
4715 case NEON_3R_VHSUB:
4716 GEN_NEON_INTEGER_OP(hsub);
4717 break;
4718 case NEON_3R_VQSUB:
4719 GEN_NEON_INTEGER_OP_ENV(qsub);
4720 break;
4721 case NEON_3R_VCGT:
4722 GEN_NEON_INTEGER_OP(cgt);
4723 break;
4724 case NEON_3R_VCGE:
4725 GEN_NEON_INTEGER_OP(cge);
4726 break;
4727 case NEON_3R_VSHL:
4728 GEN_NEON_INTEGER_OP(shl);
4729 break;
4730 case NEON_3R_VQSHL:
4731 GEN_NEON_INTEGER_OP_ENV(qshl);
4732 break;
4733 case NEON_3R_VRSHL:
4734 GEN_NEON_INTEGER_OP(rshl);
4735 break;
4736 case NEON_3R_VQRSHL:
4737 GEN_NEON_INTEGER_OP_ENV(qrshl);
4738 break;
4739 case NEON_3R_VMAX:
4740 GEN_NEON_INTEGER_OP(max);
4741 break;
4742 case NEON_3R_VMIN:
4743 GEN_NEON_INTEGER_OP(min);
4744 break;
4745 case NEON_3R_VABD:
4746 GEN_NEON_INTEGER_OP(abd);
4747 break;
4748 case NEON_3R_VABA:
4749 GEN_NEON_INTEGER_OP(abd);
4750 tcg_temp_free_i32(tmp2);
4751 tmp2 = neon_load_reg(rd, pass);
4752 gen_neon_add(size, tmp, tmp2);
4753 break;
4754 case NEON_3R_VADD_VSUB:
4755 if (!u) { /* VADD */
4756 gen_neon_add(size, tmp, tmp2);
4757 } else { /* VSUB */
4758 switch (size) {
4759 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4760 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4761 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4762 default: abort();
4765 break;
4766 case NEON_3R_VTST_VCEQ:
4767 if (!u) { /* VTST */
4768 switch (size) {
4769 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4770 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4771 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4772 default: abort();
4774 } else { /* VCEQ */
4775 switch (size) {
4776 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4777 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4778 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4779 default: abort();
4782 break;
4783 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4784 switch (size) {
4785 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4786 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4787 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4788 default: abort();
4790 tcg_temp_free_i32(tmp2);
4791 tmp2 = neon_load_reg(rd, pass);
4792 if (u) { /* VMLS */
4793 gen_neon_rsb(size, tmp, tmp2);
4794 } else { /* VMLA */
4795 gen_neon_add(size, tmp, tmp2);
4797 break;
4798 case NEON_3R_VMUL:
4799 if (u) { /* polynomial */
4800 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4801 } else { /* Integer */
4802 switch (size) {
4803 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4804 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4805 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4806 default: abort();
4809 break;
4810 case NEON_3R_VPMAX:
4811 GEN_NEON_INTEGER_OP(pmax);
4812 break;
4813 case NEON_3R_VPMIN:
4814 GEN_NEON_INTEGER_OP(pmin);
4815 break;
4816 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4817 if (!u) { /* VQDMULH */
4818 switch (size) {
4819 case 1:
4820 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4821 break;
4822 case 2:
4823 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4824 break;
4825 default: abort();
4827 } else { /* VQRDMULH */
4828 switch (size) {
4829 case 1:
4830 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4831 break;
4832 case 2:
4833 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4834 break;
4835 default: abort();
4838 break;
4839 case NEON_3R_VPADD:
4840 switch (size) {
4841 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4842 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4843 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4844 default: abort();
4846 break;
4847 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4849 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4850 switch ((u << 2) | size) {
4851 case 0: /* VADD */
4852 case 4: /* VPADD */
4853 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4854 break;
4855 case 2: /* VSUB */
4856 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
4857 break;
4858 case 6: /* VABD */
4859 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
4860 break;
4861 default:
4862 abort();
4864 tcg_temp_free_ptr(fpstatus);
4865 break;
4867 case NEON_3R_FLOAT_MULTIPLY:
4869 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4870 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
4871 if (!u) {
4872 tcg_temp_free_i32(tmp2);
4873 tmp2 = neon_load_reg(rd, pass);
4874 if (size == 0) {
4875 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4876 } else {
4877 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
4880 tcg_temp_free_ptr(fpstatus);
4881 break;
4883 case NEON_3R_FLOAT_CMP:
4885 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4886 if (!u) {
4887 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
4888 } else {
4889 if (size == 0) {
4890 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4891 } else {
4892 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4895 tcg_temp_free_ptr(fpstatus);
4896 break;
4898 case NEON_3R_FLOAT_ACMP:
4900 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4901 if (size == 0) {
4902 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4903 } else {
4904 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4906 tcg_temp_free_ptr(fpstatus);
4907 break;
4909 case NEON_3R_FLOAT_MINMAX:
4911 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4912 if (size == 0) {
4913 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4914 } else {
4915 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4917 tcg_temp_free_ptr(fpstatus);
4918 break;
4920 case NEON_3R_VRECPS_VRSQRTS:
4921 if (size == 0)
4922 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4923 else
4924 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4925 break;
4926 case NEON_3R_VFM:
4928 /* VFMA, VFMS: fused multiply-add */
4929 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4930 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4931 if (size) {
4932 /* VFMS */
4933 gen_helper_vfp_negs(tmp, tmp);
4935 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4936 tcg_temp_free_i32(tmp3);
4937 tcg_temp_free_ptr(fpstatus);
4938 break;
4940 default:
4941 abort();
4943 tcg_temp_free_i32(tmp2);
4945 /* Save the result. For elementwise operations we can put it
4946 straight into the destination register. For pairwise operations
4947 we have to be careful to avoid clobbering the source operands. */
4948 if (pairwise && rd == rm) {
4949 neon_store_scratch(pass, tmp);
4950 } else {
4951 neon_store_reg(rd, pass, tmp);
4954 } /* for pass */
4955 if (pairwise && rd == rm) {
4956 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4957 tmp = neon_load_scratch(pass);
4958 neon_store_reg(rd, pass, tmp);
4961 /* End of 3 register same size operations. */
4962 } else if (insn & (1 << 4)) {
4963 if ((insn & 0x00380080) != 0) {
4964 /* Two registers and shift. */
4965 op = (insn >> 8) & 0xf;
4966 if (insn & (1 << 7)) {
4967 /* 64-bit shift. */
4968 if (op > 7) {
4969 return 1;
4971 size = 3;
4972 } else {
4973 size = 2;
4974 while ((insn & (1 << (size + 19))) == 0)
4975 size--;
4977 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4978 /* To avoid excessive duplication of ops we implement shift
4979 by immediate using the variable shift operations. */
4980 if (op < 8) {
4981 /* Shift by immediate:
4982 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4983 if (q && ((rd | rm) & 1)) {
4984 return 1;
4986 if (!u && (op == 4 || op == 6)) {
4987 return 1;
4989 /* Right shifts are encoded as N - shift, where N is the
4990 element size in bits. */
4991 if (op <= 4)
4992 shift = shift - (1 << (size + 3));
4993 if (size == 3) {
4994 count = q + 1;
4995 } else {
4996 count = q ? 4: 2;
4998 switch (size) {
4999 case 0:
5000 imm = (uint8_t) shift;
5001 imm |= imm << 8;
5002 imm |= imm << 16;
5003 break;
5004 case 1:
5005 imm = (uint16_t) shift;
5006 imm |= imm << 16;
5007 break;
5008 case 2:
5009 case 3:
5010 imm = shift;
5011 break;
5012 default:
5013 abort();
5016 for (pass = 0; pass < count; pass++) {
5017 if (size == 3) {
5018 neon_load_reg64(cpu_V0, rm + pass);
5019 tcg_gen_movi_i64(cpu_V1, imm);
5020 switch (op) {
5021 case 0: /* VSHR */
5022 case 1: /* VSRA */
5023 if (u)
5024 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5025 else
5026 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5027 break;
5028 case 2: /* VRSHR */
5029 case 3: /* VRSRA */
5030 if (u)
5031 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5032 else
5033 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5034 break;
5035 case 4: /* VSRI */
5036 case 5: /* VSHL, VSLI */
5037 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5038 break;
5039 case 6: /* VQSHLU */
5040 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5041 cpu_V0, cpu_V1);
5042 break;
5043 case 7: /* VQSHL */
5044 if (u) {
5045 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5046 cpu_V0, cpu_V1);
5047 } else {
5048 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5049 cpu_V0, cpu_V1);
5051 break;
5053 if (op == 1 || op == 3) {
5054 /* Accumulate. */
5055 neon_load_reg64(cpu_V1, rd + pass);
5056 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5057 } else if (op == 4 || (op == 5 && u)) {
5058 /* Insert */
5059 neon_load_reg64(cpu_V1, rd + pass);
5060 uint64_t mask;
5061 if (shift < -63 || shift > 63) {
5062 mask = 0;
5063 } else {
5064 if (op == 4) {
5065 mask = 0xffffffffffffffffull >> -shift;
5066 } else {
5067 mask = 0xffffffffffffffffull << shift;
5070 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5071 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5073 neon_store_reg64(cpu_V0, rd + pass);
5074 } else { /* size < 3 */
5075 /* Operands in T0 and T1. */
5076 tmp = neon_load_reg(rm, pass);
5077 tmp2 = tcg_temp_new_i32();
5078 tcg_gen_movi_i32(tmp2, imm);
5079 switch (op) {
5080 case 0: /* VSHR */
5081 case 1: /* VSRA */
5082 GEN_NEON_INTEGER_OP(shl);
5083 break;
5084 case 2: /* VRSHR */
5085 case 3: /* VRSRA */
5086 GEN_NEON_INTEGER_OP(rshl);
5087 break;
5088 case 4: /* VSRI */
5089 case 5: /* VSHL, VSLI */
5090 switch (size) {
5091 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5092 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5093 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5094 default: abort();
5096 break;
5097 case 6: /* VQSHLU */
5098 switch (size) {
5099 case 0:
5100 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5101 tmp, tmp2);
5102 break;
5103 case 1:
5104 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5105 tmp, tmp2);
5106 break;
5107 case 2:
5108 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5109 tmp, tmp2);
5110 break;
5111 default:
5112 abort();
5114 break;
5115 case 7: /* VQSHL */
5116 GEN_NEON_INTEGER_OP_ENV(qshl);
5117 break;
5119 tcg_temp_free_i32(tmp2);
5121 if (op == 1 || op == 3) {
5122 /* Accumulate. */
5123 tmp2 = neon_load_reg(rd, pass);
5124 gen_neon_add(size, tmp, tmp2);
5125 tcg_temp_free_i32(tmp2);
5126 } else if (op == 4 || (op == 5 && u)) {
5127 /* Insert */
5128 switch (size) {
5129 case 0:
5130 if (op == 4)
5131 mask = 0xff >> -shift;
5132 else
5133 mask = (uint8_t)(0xff << shift);
5134 mask |= mask << 8;
5135 mask |= mask << 16;
5136 break;
5137 case 1:
5138 if (op == 4)
5139 mask = 0xffff >> -shift;
5140 else
5141 mask = (uint16_t)(0xffff << shift);
5142 mask |= mask << 16;
5143 break;
5144 case 2:
5145 if (shift < -31 || shift > 31) {
5146 mask = 0;
5147 } else {
5148 if (op == 4)
5149 mask = 0xffffffffu >> -shift;
5150 else
5151 mask = 0xffffffffu << shift;
5153 break;
5154 default:
5155 abort();
5157 tmp2 = neon_load_reg(rd, pass);
5158 tcg_gen_andi_i32(tmp, tmp, mask);
5159 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5160 tcg_gen_or_i32(tmp, tmp, tmp2);
5161 tcg_temp_free_i32(tmp2);
5163 neon_store_reg(rd, pass, tmp);
5165 } /* for pass */
5166 } else if (op < 10) {
5167 /* Shift by immediate and narrow:
5168 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5169 int input_unsigned = (op == 8) ? !u : u;
5170 if (rm & 1) {
5171 return 1;
5173 shift = shift - (1 << (size + 3));
5174 size++;
5175 if (size == 3) {
5176 tmp64 = tcg_const_i64(shift);
5177 neon_load_reg64(cpu_V0, rm);
5178 neon_load_reg64(cpu_V1, rm + 1);
5179 for (pass = 0; pass < 2; pass++) {
5180 TCGv_i64 in;
5181 if (pass == 0) {
5182 in = cpu_V0;
5183 } else {
5184 in = cpu_V1;
5186 if (q) {
5187 if (input_unsigned) {
5188 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5189 } else {
5190 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5192 } else {
5193 if (input_unsigned) {
5194 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5195 } else {
5196 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5199 tmp = tcg_temp_new_i32();
5200 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5201 neon_store_reg(rd, pass, tmp);
5202 } /* for pass */
5203 tcg_temp_free_i64(tmp64);
5204 } else {
5205 if (size == 1) {
5206 imm = (uint16_t)shift;
5207 imm |= imm << 16;
5208 } else {
5209 /* size == 2 */
5210 imm = (uint32_t)shift;
5212 tmp2 = tcg_const_i32(imm);
5213 tmp4 = neon_load_reg(rm + 1, 0);
5214 tmp5 = neon_load_reg(rm + 1, 1);
5215 for (pass = 0; pass < 2; pass++) {
5216 if (pass == 0) {
5217 tmp = neon_load_reg(rm, 0);
5218 } else {
5219 tmp = tmp4;
5221 gen_neon_shift_narrow(size, tmp, tmp2, q,
5222 input_unsigned);
5223 if (pass == 0) {
5224 tmp3 = neon_load_reg(rm, 1);
5225 } else {
5226 tmp3 = tmp5;
5228 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5229 input_unsigned);
5230 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5231 tcg_temp_free_i32(tmp);
5232 tcg_temp_free_i32(tmp3);
5233 tmp = tcg_temp_new_i32();
5234 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5235 neon_store_reg(rd, pass, tmp);
5236 } /* for pass */
5237 tcg_temp_free_i32(tmp2);
5239 } else if (op == 10) {
5240 /* VSHLL, VMOVL */
5241 if (q || (rd & 1)) {
5242 return 1;
5244 tmp = neon_load_reg(rm, 0);
5245 tmp2 = neon_load_reg(rm, 1);
5246 for (pass = 0; pass < 2; pass++) {
5247 if (pass == 1)
5248 tmp = tmp2;
5250 gen_neon_widen(cpu_V0, tmp, size, u);
5252 if (shift != 0) {
5253 /* The shift is less than the width of the source
5254 type, so we can just shift the whole register. */
5255 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5256 /* Widen the result of shift: we need to clear
5257 * the potential overflow bits resulting from
5258 * left bits of the narrow input appearing as
5259 * right bits of left the neighbour narrow
5260 * input. */
5261 if (size < 2 || !u) {
5262 uint64_t imm64;
5263 if (size == 0) {
5264 imm = (0xffu >> (8 - shift));
5265 imm |= imm << 16;
5266 } else if (size == 1) {
5267 imm = 0xffff >> (16 - shift);
5268 } else {
5269 /* size == 2 */
5270 imm = 0xffffffff >> (32 - shift);
5272 if (size < 2) {
5273 imm64 = imm | (((uint64_t)imm) << 32);
5274 } else {
5275 imm64 = imm;
5277 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5280 neon_store_reg64(cpu_V0, rd + pass);
5282 } else if (op >= 14) {
5283 /* VCVT fixed-point. */
5284 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5285 return 1;
5287 /* We have already masked out the must-be-1 top bit of imm6,
5288 * hence this 32-shift where the ARM ARM has 64-imm6.
5290 shift = 32 - shift;
5291 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5292 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5293 if (!(op & 1)) {
5294 if (u)
5295 gen_vfp_ulto(0, shift, 1);
5296 else
5297 gen_vfp_slto(0, shift, 1);
5298 } else {
5299 if (u)
5300 gen_vfp_toul(0, shift, 1);
5301 else
5302 gen_vfp_tosl(0, shift, 1);
5304 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5306 } else {
5307 return 1;
5309 } else { /* (insn & 0x00380080) == 0 */
5310 int invert;
5311 if (q && (rd & 1)) {
5312 return 1;
5315 op = (insn >> 8) & 0xf;
5316 /* One register and immediate. */
5317 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5318 invert = (insn & (1 << 5)) != 0;
5319 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5320 * We choose to not special-case this and will behave as if a
5321 * valid constant encoding of 0 had been given.
5323 switch (op) {
5324 case 0: case 1:
5325 /* no-op */
5326 break;
5327 case 2: case 3:
5328 imm <<= 8;
5329 break;
5330 case 4: case 5:
5331 imm <<= 16;
5332 break;
5333 case 6: case 7:
5334 imm <<= 24;
5335 break;
5336 case 8: case 9:
5337 imm |= imm << 16;
5338 break;
5339 case 10: case 11:
5340 imm = (imm << 8) | (imm << 24);
5341 break;
5342 case 12:
5343 imm = (imm << 8) | 0xff;
5344 break;
5345 case 13:
5346 imm = (imm << 16) | 0xffff;
5347 break;
5348 case 14:
5349 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5350 if (invert)
5351 imm = ~imm;
5352 break;
5353 case 15:
5354 if (invert) {
5355 return 1;
5357 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5358 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5359 break;
5361 if (invert)
5362 imm = ~imm;
5364 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5365 if (op & 1 && op < 12) {
5366 tmp = neon_load_reg(rd, pass);
5367 if (invert) {
5368 /* The immediate value has already been inverted, so
5369 BIC becomes AND. */
5370 tcg_gen_andi_i32(tmp, tmp, imm);
5371 } else {
5372 tcg_gen_ori_i32(tmp, tmp, imm);
5374 } else {
5375 /* VMOV, VMVN. */
5376 tmp = tcg_temp_new_i32();
5377 if (op == 14 && invert) {
5378 int n;
5379 uint32_t val;
5380 val = 0;
5381 for (n = 0; n < 4; n++) {
5382 if (imm & (1 << (n + (pass & 1) * 4)))
5383 val |= 0xff << (n * 8);
5385 tcg_gen_movi_i32(tmp, val);
5386 } else {
5387 tcg_gen_movi_i32(tmp, imm);
5390 neon_store_reg(rd, pass, tmp);
5393 } else { /* (insn & 0x00800010 == 0x00800000) */
5394 if (size != 3) {
5395 op = (insn >> 8) & 0xf;
5396 if ((insn & (1 << 6)) == 0) {
5397 /* Three registers of different lengths. */
5398 int src1_wide;
5399 int src2_wide;
5400 int prewiden;
5401 /* undefreq: bit 0 : UNDEF if size != 0
5402 * bit 1 : UNDEF if size == 0
5403 * bit 2 : UNDEF if U == 1
5404 * Note that [1:0] set implies 'always UNDEF'
5406 int undefreq;
5407 /* prewiden, src1_wide, src2_wide, undefreq */
5408 static const int neon_3reg_wide[16][4] = {
5409 {1, 0, 0, 0}, /* VADDL */
5410 {1, 1, 0, 0}, /* VADDW */
5411 {1, 0, 0, 0}, /* VSUBL */
5412 {1, 1, 0, 0}, /* VSUBW */
5413 {0, 1, 1, 0}, /* VADDHN */
5414 {0, 0, 0, 0}, /* VABAL */
5415 {0, 1, 1, 0}, /* VSUBHN */
5416 {0, 0, 0, 0}, /* VABDL */
5417 {0, 0, 0, 0}, /* VMLAL */
5418 {0, 0, 0, 6}, /* VQDMLAL */
5419 {0, 0, 0, 0}, /* VMLSL */
5420 {0, 0, 0, 6}, /* VQDMLSL */
5421 {0, 0, 0, 0}, /* Integer VMULL */
5422 {0, 0, 0, 2}, /* VQDMULL */
5423 {0, 0, 0, 5}, /* Polynomial VMULL */
5424 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5427 prewiden = neon_3reg_wide[op][0];
5428 src1_wide = neon_3reg_wide[op][1];
5429 src2_wide = neon_3reg_wide[op][2];
5430 undefreq = neon_3reg_wide[op][3];
5432 if (((undefreq & 1) && (size != 0)) ||
5433 ((undefreq & 2) && (size == 0)) ||
5434 ((undefreq & 4) && u)) {
5435 return 1;
5437 if ((src1_wide && (rn & 1)) ||
5438 (src2_wide && (rm & 1)) ||
5439 (!src2_wide && (rd & 1))) {
5440 return 1;
5443 /* Avoid overlapping operands. Wide source operands are
5444 always aligned so will never overlap with wide
5445 destinations in problematic ways. */
5446 if (rd == rm && !src2_wide) {
5447 tmp = neon_load_reg(rm, 1);
5448 neon_store_scratch(2, tmp);
5449 } else if (rd == rn && !src1_wide) {
5450 tmp = neon_load_reg(rn, 1);
5451 neon_store_scratch(2, tmp);
5453 TCGV_UNUSED(tmp3);
5454 for (pass = 0; pass < 2; pass++) {
5455 if (src1_wide) {
5456 neon_load_reg64(cpu_V0, rn + pass);
5457 TCGV_UNUSED(tmp);
5458 } else {
5459 if (pass == 1 && rd == rn) {
5460 tmp = neon_load_scratch(2);
5461 } else {
5462 tmp = neon_load_reg(rn, pass);
5464 if (prewiden) {
5465 gen_neon_widen(cpu_V0, tmp, size, u);
5468 if (src2_wide) {
5469 neon_load_reg64(cpu_V1, rm + pass);
5470 TCGV_UNUSED(tmp2);
5471 } else {
5472 if (pass == 1 && rd == rm) {
5473 tmp2 = neon_load_scratch(2);
5474 } else {
5475 tmp2 = neon_load_reg(rm, pass);
5477 if (prewiden) {
5478 gen_neon_widen(cpu_V1, tmp2, size, u);
5481 switch (op) {
5482 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5483 gen_neon_addl(size);
5484 break;
5485 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5486 gen_neon_subl(size);
5487 break;
5488 case 5: case 7: /* VABAL, VABDL */
5489 switch ((size << 1) | u) {
5490 case 0:
5491 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5492 break;
5493 case 1:
5494 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5495 break;
5496 case 2:
5497 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5498 break;
5499 case 3:
5500 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5501 break;
5502 case 4:
5503 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5504 break;
5505 case 5:
5506 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5507 break;
5508 default: abort();
5510 tcg_temp_free_i32(tmp2);
5511 tcg_temp_free_i32(tmp);
5512 break;
5513 case 8: case 9: case 10: case 11: case 12: case 13:
5514 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5515 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5516 break;
5517 case 14: /* Polynomial VMULL */
5518 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5519 tcg_temp_free_i32(tmp2);
5520 tcg_temp_free_i32(tmp);
5521 break;
5522 default: /* 15 is RESERVED: caught earlier */
5523 abort();
5525 if (op == 13) {
5526 /* VQDMULL */
5527 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5528 neon_store_reg64(cpu_V0, rd + pass);
5529 } else if (op == 5 || (op >= 8 && op <= 11)) {
5530 /* Accumulate. */
5531 neon_load_reg64(cpu_V1, rd + pass);
5532 switch (op) {
5533 case 10: /* VMLSL */
5534 gen_neon_negl(cpu_V0, size);
5535 /* Fall through */
5536 case 5: case 8: /* VABAL, VMLAL */
5537 gen_neon_addl(size);
5538 break;
5539 case 9: case 11: /* VQDMLAL, VQDMLSL */
5540 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5541 if (op == 11) {
5542 gen_neon_negl(cpu_V0, size);
5544 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5545 break;
5546 default:
5547 abort();
5549 neon_store_reg64(cpu_V0, rd + pass);
5550 } else if (op == 4 || op == 6) {
5551 /* Narrowing operation. */
5552 tmp = tcg_temp_new_i32();
5553 if (!u) {
5554 switch (size) {
5555 case 0:
5556 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5557 break;
5558 case 1:
5559 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5560 break;
5561 case 2:
5562 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5563 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5564 break;
5565 default: abort();
5567 } else {
5568 switch (size) {
5569 case 0:
5570 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5571 break;
5572 case 1:
5573 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5574 break;
5575 case 2:
5576 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5577 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5578 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5579 break;
5580 default: abort();
5583 if (pass == 0) {
5584 tmp3 = tmp;
5585 } else {
5586 neon_store_reg(rd, 0, tmp3);
5587 neon_store_reg(rd, 1, tmp);
5589 } else {
5590 /* Write back the result. */
5591 neon_store_reg64(cpu_V0, rd + pass);
5594 } else {
5595 /* Two registers and a scalar. NB that for ops of this form
5596 * the ARM ARM labels bit 24 as Q, but it is in our variable
5597 * 'u', not 'q'.
5599 if (size == 0) {
5600 return 1;
5602 switch (op) {
5603 case 1: /* Float VMLA scalar */
5604 case 5: /* Floating point VMLS scalar */
5605 case 9: /* Floating point VMUL scalar */
5606 if (size == 1) {
5607 return 1;
5609 /* fall through */
5610 case 0: /* Integer VMLA scalar */
5611 case 4: /* Integer VMLS scalar */
5612 case 8: /* Integer VMUL scalar */
5613 case 12: /* VQDMULH scalar */
5614 case 13: /* VQRDMULH scalar */
5615 if (u && ((rd | rn) & 1)) {
5616 return 1;
5618 tmp = neon_get_scalar(size, rm);
5619 neon_store_scratch(0, tmp);
5620 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5621 tmp = neon_load_scratch(0);
5622 tmp2 = neon_load_reg(rn, pass);
5623 if (op == 12) {
5624 if (size == 1) {
5625 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5626 } else {
5627 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5629 } else if (op == 13) {
5630 if (size == 1) {
5631 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5632 } else {
5633 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5635 } else if (op & 1) {
5636 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5637 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5638 tcg_temp_free_ptr(fpstatus);
5639 } else {
5640 switch (size) {
5641 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5642 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5643 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5644 default: abort();
5647 tcg_temp_free_i32(tmp2);
5648 if (op < 8) {
5649 /* Accumulate. */
5650 tmp2 = neon_load_reg(rd, pass);
5651 switch (op) {
5652 case 0:
5653 gen_neon_add(size, tmp, tmp2);
5654 break;
5655 case 1:
5657 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5658 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5659 tcg_temp_free_ptr(fpstatus);
5660 break;
5662 case 4:
5663 gen_neon_rsb(size, tmp, tmp2);
5664 break;
5665 case 5:
5667 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5668 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5669 tcg_temp_free_ptr(fpstatus);
5670 break;
5672 default:
5673 abort();
5675 tcg_temp_free_i32(tmp2);
5677 neon_store_reg(rd, pass, tmp);
5679 break;
5680 case 3: /* VQDMLAL scalar */
5681 case 7: /* VQDMLSL scalar */
5682 case 11: /* VQDMULL scalar */
5683 if (u == 1) {
5684 return 1;
5686 /* fall through */
5687 case 2: /* VMLAL sclar */
5688 case 6: /* VMLSL scalar */
5689 case 10: /* VMULL scalar */
5690 if (rd & 1) {
5691 return 1;
5693 tmp2 = neon_get_scalar(size, rm);
5694 /* We need a copy of tmp2 because gen_neon_mull
5695 * deletes it during pass 0. */
5696 tmp4 = tcg_temp_new_i32();
5697 tcg_gen_mov_i32(tmp4, tmp2);
5698 tmp3 = neon_load_reg(rn, 1);
5700 for (pass = 0; pass < 2; pass++) {
5701 if (pass == 0) {
5702 tmp = neon_load_reg(rn, 0);
5703 } else {
5704 tmp = tmp3;
5705 tmp2 = tmp4;
5707 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5708 if (op != 11) {
5709 neon_load_reg64(cpu_V1, rd + pass);
5711 switch (op) {
5712 case 6:
5713 gen_neon_negl(cpu_V0, size);
5714 /* Fall through */
5715 case 2:
5716 gen_neon_addl(size);
5717 break;
5718 case 3: case 7:
5719 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5720 if (op == 7) {
5721 gen_neon_negl(cpu_V0, size);
5723 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5724 break;
5725 case 10:
5726 /* no-op */
5727 break;
5728 case 11:
5729 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5730 break;
5731 default:
5732 abort();
5734 neon_store_reg64(cpu_V0, rd + pass);
5738 break;
5739 default: /* 14 and 15 are RESERVED */
5740 return 1;
5743 } else { /* size == 3 */
5744 if (!u) {
5745 /* Extract. */
5746 imm = (insn >> 8) & 0xf;
5748 if (imm > 7 && !q)
5749 return 1;
5751 if (q && ((rd | rn | rm) & 1)) {
5752 return 1;
5755 if (imm == 0) {
5756 neon_load_reg64(cpu_V0, rn);
5757 if (q) {
5758 neon_load_reg64(cpu_V1, rn + 1);
5760 } else if (imm == 8) {
5761 neon_load_reg64(cpu_V0, rn + 1);
5762 if (q) {
5763 neon_load_reg64(cpu_V1, rm);
5765 } else if (q) {
5766 tmp64 = tcg_temp_new_i64();
5767 if (imm < 8) {
5768 neon_load_reg64(cpu_V0, rn);
5769 neon_load_reg64(tmp64, rn + 1);
5770 } else {
5771 neon_load_reg64(cpu_V0, rn + 1);
5772 neon_load_reg64(tmp64, rm);
5774 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5775 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5776 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5777 if (imm < 8) {
5778 neon_load_reg64(cpu_V1, rm);
5779 } else {
5780 neon_load_reg64(cpu_V1, rm + 1);
5781 imm -= 8;
5783 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5784 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5785 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5786 tcg_temp_free_i64(tmp64);
5787 } else {
5788 /* BUGFIX */
5789 neon_load_reg64(cpu_V0, rn);
5790 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5791 neon_load_reg64(cpu_V1, rm);
5792 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5793 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5795 neon_store_reg64(cpu_V0, rd);
5796 if (q) {
5797 neon_store_reg64(cpu_V1, rd + 1);
5799 } else if ((insn & (1 << 11)) == 0) {
5800 /* Two register misc. */
5801 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5802 size = (insn >> 18) & 3;
5803 /* UNDEF for unknown op values and bad op-size combinations */
5804 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5805 return 1;
5807 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5808 q && ((rm | rd) & 1)) {
5809 return 1;
5811 switch (op) {
5812 case NEON_2RM_VREV64:
5813 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5814 tmp = neon_load_reg(rm, pass * 2);
5815 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5816 switch (size) {
5817 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5818 case 1: gen_swap_half(tmp); break;
5819 case 2: /* no-op */ break;
5820 default: abort();
5822 neon_store_reg(rd, pass * 2 + 1, tmp);
5823 if (size == 2) {
5824 neon_store_reg(rd, pass * 2, tmp2);
5825 } else {
5826 switch (size) {
5827 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5828 case 1: gen_swap_half(tmp2); break;
5829 default: abort();
5831 neon_store_reg(rd, pass * 2, tmp2);
5834 break;
5835 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5836 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5837 for (pass = 0; pass < q + 1; pass++) {
5838 tmp = neon_load_reg(rm, pass * 2);
5839 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5840 tmp = neon_load_reg(rm, pass * 2 + 1);
5841 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5842 switch (size) {
5843 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5844 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5845 case 2: tcg_gen_add_i64(CPU_V001); break;
5846 default: abort();
5848 if (op >= NEON_2RM_VPADAL) {
5849 /* Accumulate. */
5850 neon_load_reg64(cpu_V1, rd + pass);
5851 gen_neon_addl(size);
5853 neon_store_reg64(cpu_V0, rd + pass);
5855 break;
5856 case NEON_2RM_VTRN:
5857 if (size == 2) {
5858 int n;
5859 for (n = 0; n < (q ? 4 : 2); n += 2) {
5860 tmp = neon_load_reg(rm, n);
5861 tmp2 = neon_load_reg(rd, n + 1);
5862 neon_store_reg(rm, n, tmp2);
5863 neon_store_reg(rd, n + 1, tmp);
5865 } else {
5866 goto elementwise;
5868 break;
5869 case NEON_2RM_VUZP:
5870 if (gen_neon_unzip(rd, rm, size, q)) {
5871 return 1;
5873 break;
5874 case NEON_2RM_VZIP:
5875 if (gen_neon_zip(rd, rm, size, q)) {
5876 return 1;
5878 break;
5879 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5880 /* also VQMOVUN; op field and mnemonics don't line up */
5881 if (rm & 1) {
5882 return 1;
5884 TCGV_UNUSED(tmp2);
5885 for (pass = 0; pass < 2; pass++) {
5886 neon_load_reg64(cpu_V0, rm + pass);
5887 tmp = tcg_temp_new_i32();
5888 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5889 tmp, cpu_V0);
5890 if (pass == 0) {
5891 tmp2 = tmp;
5892 } else {
5893 neon_store_reg(rd, 0, tmp2);
5894 neon_store_reg(rd, 1, tmp);
5897 break;
5898 case NEON_2RM_VSHLL:
5899 if (q || (rd & 1)) {
5900 return 1;
5902 tmp = neon_load_reg(rm, 0);
5903 tmp2 = neon_load_reg(rm, 1);
5904 for (pass = 0; pass < 2; pass++) {
5905 if (pass == 1)
5906 tmp = tmp2;
5907 gen_neon_widen(cpu_V0, tmp, size, 1);
5908 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5909 neon_store_reg64(cpu_V0, rd + pass);
5911 break;
5912 case NEON_2RM_VCVT_F16_F32:
5913 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5914 q || (rm & 1)) {
5915 return 1;
5917 tmp = tcg_temp_new_i32();
5918 tmp2 = tcg_temp_new_i32();
5919 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5920 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5921 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5922 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5923 tcg_gen_shli_i32(tmp2, tmp2, 16);
5924 tcg_gen_or_i32(tmp2, tmp2, tmp);
5925 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5926 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5927 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5928 neon_store_reg(rd, 0, tmp2);
5929 tmp2 = tcg_temp_new_i32();
5930 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5931 tcg_gen_shli_i32(tmp2, tmp2, 16);
5932 tcg_gen_or_i32(tmp2, tmp2, tmp);
5933 neon_store_reg(rd, 1, tmp2);
5934 tcg_temp_free_i32(tmp);
5935 break;
5936 case NEON_2RM_VCVT_F32_F16:
5937 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5938 q || (rd & 1)) {
5939 return 1;
5941 tmp3 = tcg_temp_new_i32();
5942 tmp = neon_load_reg(rm, 0);
5943 tmp2 = neon_load_reg(rm, 1);
5944 tcg_gen_ext16u_i32(tmp3, tmp);
5945 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5946 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5947 tcg_gen_shri_i32(tmp3, tmp, 16);
5948 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5949 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5950 tcg_temp_free_i32(tmp);
5951 tcg_gen_ext16u_i32(tmp3, tmp2);
5952 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5953 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5954 tcg_gen_shri_i32(tmp3, tmp2, 16);
5955 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5956 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5957 tcg_temp_free_i32(tmp2);
5958 tcg_temp_free_i32(tmp3);
5959 break;
5960 default:
5961 elementwise:
5962 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5963 if (neon_2rm_is_float_op(op)) {
5964 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5965 neon_reg_offset(rm, pass));
5966 TCGV_UNUSED(tmp);
5967 } else {
5968 tmp = neon_load_reg(rm, pass);
5970 switch (op) {
5971 case NEON_2RM_VREV32:
5972 switch (size) {
5973 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5974 case 1: gen_swap_half(tmp); break;
5975 default: abort();
5977 break;
5978 case NEON_2RM_VREV16:
5979 gen_rev16(tmp);
5980 break;
5981 case NEON_2RM_VCLS:
5982 switch (size) {
5983 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5984 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5985 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5986 default: abort();
5988 break;
5989 case NEON_2RM_VCLZ:
5990 switch (size) {
5991 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5992 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5993 case 2: gen_helper_clz(tmp, tmp); break;
5994 default: abort();
5996 break;
5997 case NEON_2RM_VCNT:
5998 gen_helper_neon_cnt_u8(tmp, tmp);
5999 break;
6000 case NEON_2RM_VMVN:
6001 tcg_gen_not_i32(tmp, tmp);
6002 break;
6003 case NEON_2RM_VQABS:
6004 switch (size) {
6005 case 0:
6006 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6007 break;
6008 case 1:
6009 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6010 break;
6011 case 2:
6012 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6013 break;
6014 default: abort();
6016 break;
6017 case NEON_2RM_VQNEG:
6018 switch (size) {
6019 case 0:
6020 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6021 break;
6022 case 1:
6023 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6024 break;
6025 case 2:
6026 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6027 break;
6028 default: abort();
6030 break;
6031 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6032 tmp2 = tcg_const_i32(0);
6033 switch(size) {
6034 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6035 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6036 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6037 default: abort();
6039 tcg_temp_free(tmp2);
6040 if (op == NEON_2RM_VCLE0) {
6041 tcg_gen_not_i32(tmp, tmp);
6043 break;
6044 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6045 tmp2 = tcg_const_i32(0);
6046 switch(size) {
6047 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6048 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6049 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6050 default: abort();
6052 tcg_temp_free(tmp2);
6053 if (op == NEON_2RM_VCLT0) {
6054 tcg_gen_not_i32(tmp, tmp);
6056 break;
6057 case NEON_2RM_VCEQ0:
6058 tmp2 = tcg_const_i32(0);
6059 switch(size) {
6060 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6061 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6062 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6063 default: abort();
6065 tcg_temp_free(tmp2);
6066 break;
6067 case NEON_2RM_VABS:
6068 switch(size) {
6069 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6070 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6071 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6072 default: abort();
6074 break;
6075 case NEON_2RM_VNEG:
6076 tmp2 = tcg_const_i32(0);
6077 gen_neon_rsb(size, tmp, tmp2);
6078 tcg_temp_free(tmp2);
6079 break;
6080 case NEON_2RM_VCGT0_F:
6082 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6083 tmp2 = tcg_const_i32(0);
6084 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6085 tcg_temp_free(tmp2);
6086 tcg_temp_free_ptr(fpstatus);
6087 break;
6089 case NEON_2RM_VCGE0_F:
6091 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6092 tmp2 = tcg_const_i32(0);
6093 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6094 tcg_temp_free(tmp2);
6095 tcg_temp_free_ptr(fpstatus);
6096 break;
6098 case NEON_2RM_VCEQ0_F:
6100 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6101 tmp2 = tcg_const_i32(0);
6102 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6103 tcg_temp_free(tmp2);
6104 tcg_temp_free_ptr(fpstatus);
6105 break;
6107 case NEON_2RM_VCLE0_F:
6109 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6110 tmp2 = tcg_const_i32(0);
6111 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6112 tcg_temp_free(tmp2);
6113 tcg_temp_free_ptr(fpstatus);
6114 break;
6116 case NEON_2RM_VCLT0_F:
6118 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6119 tmp2 = tcg_const_i32(0);
6120 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6121 tcg_temp_free(tmp2);
6122 tcg_temp_free_ptr(fpstatus);
6123 break;
6125 case NEON_2RM_VABS_F:
6126 gen_vfp_abs(0);
6127 break;
6128 case NEON_2RM_VNEG_F:
6129 gen_vfp_neg(0);
6130 break;
6131 case NEON_2RM_VSWP:
6132 tmp2 = neon_load_reg(rd, pass);
6133 neon_store_reg(rm, pass, tmp2);
6134 break;
6135 case NEON_2RM_VTRN:
6136 tmp2 = neon_load_reg(rd, pass);
6137 switch (size) {
6138 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6139 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6140 default: abort();
6142 neon_store_reg(rm, pass, tmp2);
6143 break;
6144 case NEON_2RM_VRECPE:
6145 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6146 break;
6147 case NEON_2RM_VRSQRTE:
6148 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6149 break;
6150 case NEON_2RM_VRECPE_F:
6151 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6152 break;
6153 case NEON_2RM_VRSQRTE_F:
6154 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6155 break;
6156 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6157 gen_vfp_sito(0, 1);
6158 break;
6159 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6160 gen_vfp_uito(0, 1);
6161 break;
6162 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6163 gen_vfp_tosiz(0, 1);
6164 break;
6165 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6166 gen_vfp_touiz(0, 1);
6167 break;
6168 default:
6169 /* Reserved op values were caught by the
6170 * neon_2rm_sizes[] check earlier.
6172 abort();
6174 if (neon_2rm_is_float_op(op)) {
6175 tcg_gen_st_f32(cpu_F0s, cpu_env,
6176 neon_reg_offset(rd, pass));
6177 } else {
6178 neon_store_reg(rd, pass, tmp);
6181 break;
6183 } else if ((insn & (1 << 10)) == 0) {
6184 /* VTBL, VTBX. */
6185 int n = ((insn >> 8) & 3) + 1;
6186 if ((rn + n) > 32) {
6187 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6188 * helper function running off the end of the register file.
6190 return 1;
6192 n <<= 3;
6193 if (insn & (1 << 6)) {
6194 tmp = neon_load_reg(rd, 0);
6195 } else {
6196 tmp = tcg_temp_new_i32();
6197 tcg_gen_movi_i32(tmp, 0);
6199 tmp2 = neon_load_reg(rm, 0);
6200 tmp4 = tcg_const_i32(rn);
6201 tmp5 = tcg_const_i32(n);
6202 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
6203 tcg_temp_free_i32(tmp);
6204 if (insn & (1 << 6)) {
6205 tmp = neon_load_reg(rd, 1);
6206 } else {
6207 tmp = tcg_temp_new_i32();
6208 tcg_gen_movi_i32(tmp, 0);
6210 tmp3 = neon_load_reg(rm, 1);
6211 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
6212 tcg_temp_free_i32(tmp5);
6213 tcg_temp_free_i32(tmp4);
6214 neon_store_reg(rd, 0, tmp2);
6215 neon_store_reg(rd, 1, tmp3);
6216 tcg_temp_free_i32(tmp);
6217 } else if ((insn & 0x380) == 0) {
6218 /* VDUP */
6219 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6220 return 1;
6222 if (insn & (1 << 19)) {
6223 tmp = neon_load_reg(rm, 1);
6224 } else {
6225 tmp = neon_load_reg(rm, 0);
6227 if (insn & (1 << 16)) {
6228 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6229 } else if (insn & (1 << 17)) {
6230 if ((insn >> 18) & 1)
6231 gen_neon_dup_high16(tmp);
6232 else
6233 gen_neon_dup_low16(tmp);
6235 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6236 tmp2 = tcg_temp_new_i32();
6237 tcg_gen_mov_i32(tmp2, tmp);
6238 neon_store_reg(rd, pass, tmp2);
6240 tcg_temp_free_i32(tmp);
6241 } else {
6242 return 1;
6246 return 0;
6249 static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
6251 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6252 const ARMCPRegInfo *ri;
6253 ARMCPU *cpu = arm_env_get_cpu(env);
6255 cpnum = (insn >> 8) & 0xf;
6256 if (arm_feature(env, ARM_FEATURE_XSCALE)
6257 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6258 return 1;
6260 /* First check for coprocessor space used for actual instructions */
6261 switch (cpnum) {
6262 case 0:
6263 case 1:
6264 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6265 return disas_iwmmxt_insn(env, s, insn);
6266 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6267 return disas_dsp_insn(env, s, insn);
6269 return 1;
6270 case 10:
6271 case 11:
6272 return disas_vfp_insn (env, s, insn);
6273 default:
6274 break;
6277 /* Otherwise treat as a generic register access */
6278 is64 = (insn & (1 << 25)) == 0;
6279 if (!is64 && ((insn & (1 << 4)) == 0)) {
6280 /* cdp */
6281 return 1;
6284 crm = insn & 0xf;
6285 if (is64) {
6286 crn = 0;
6287 opc1 = (insn >> 4) & 0xf;
6288 opc2 = 0;
6289 rt2 = (insn >> 16) & 0xf;
6290 } else {
6291 crn = (insn >> 16) & 0xf;
6292 opc1 = (insn >> 21) & 7;
6293 opc2 = (insn >> 5) & 7;
6294 rt2 = 0;
6296 isread = (insn >> 20) & 1;
6297 rt = (insn >> 12) & 0xf;
6299 ri = get_arm_cp_reginfo(cpu,
6300 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6301 if (ri) {
6302 /* Check access permissions */
6303 if (!cp_access_ok(env, ri, isread)) {
6304 return 1;
6307 /* Handle special cases first */
6308 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6309 case ARM_CP_NOP:
6310 return 0;
6311 case ARM_CP_WFI:
6312 if (isread) {
6313 return 1;
6315 gen_set_pc_im(s->pc);
6316 s->is_jmp = DISAS_WFI;
6317 return 0;
6318 default:
6319 break;
6322 if (isread) {
6323 /* Read */
6324 if (is64) {
6325 TCGv_i64 tmp64;
6326 TCGv_i32 tmp;
6327 if (ri->type & ARM_CP_CONST) {
6328 tmp64 = tcg_const_i64(ri->resetvalue);
6329 } else if (ri->readfn) {
6330 TCGv_ptr tmpptr;
6331 gen_set_pc_im(s->pc);
6332 tmp64 = tcg_temp_new_i64();
6333 tmpptr = tcg_const_ptr(ri);
6334 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6335 tcg_temp_free_ptr(tmpptr);
6336 } else {
6337 tmp64 = tcg_temp_new_i64();
6338 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6340 tmp = tcg_temp_new_i32();
6341 tcg_gen_trunc_i64_i32(tmp, tmp64);
6342 store_reg(s, rt, tmp);
6343 tcg_gen_shri_i64(tmp64, tmp64, 32);
6344 tmp = tcg_temp_new_i32();
6345 tcg_gen_trunc_i64_i32(tmp, tmp64);
6346 tcg_temp_free_i64(tmp64);
6347 store_reg(s, rt2, tmp);
6348 } else {
6349 TCGv tmp;
6350 if (ri->type & ARM_CP_CONST) {
6351 tmp = tcg_const_i32(ri->resetvalue);
6352 } else if (ri->readfn) {
6353 TCGv_ptr tmpptr;
6354 gen_set_pc_im(s->pc);
6355 tmp = tcg_temp_new_i32();
6356 tmpptr = tcg_const_ptr(ri);
6357 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6358 tcg_temp_free_ptr(tmpptr);
6359 } else {
6360 tmp = load_cpu_offset(ri->fieldoffset);
6362 if (rt == 15) {
6363 /* Destination register of r15 for 32 bit loads sets
6364 * the condition codes from the high 4 bits of the value
6366 gen_set_nzcv(tmp);
6367 tcg_temp_free_i32(tmp);
6368 } else {
6369 store_reg(s, rt, tmp);
6372 } else {
6373 /* Write */
6374 if (ri->type & ARM_CP_CONST) {
6375 /* If not forbidden by access permissions, treat as WI */
6376 return 0;
6379 if (is64) {
6380 TCGv tmplo, tmphi;
6381 TCGv_i64 tmp64 = tcg_temp_new_i64();
6382 tmplo = load_reg(s, rt);
6383 tmphi = load_reg(s, rt2);
6384 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6385 tcg_temp_free_i32(tmplo);
6386 tcg_temp_free_i32(tmphi);
6387 if (ri->writefn) {
6388 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6389 gen_set_pc_im(s->pc);
6390 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6391 tcg_temp_free_ptr(tmpptr);
6392 } else {
6393 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6395 tcg_temp_free_i64(tmp64);
6396 } else {
6397 if (ri->writefn) {
6398 TCGv tmp;
6399 TCGv_ptr tmpptr;
6400 gen_set_pc_im(s->pc);
6401 tmp = load_reg(s, rt);
6402 tmpptr = tcg_const_ptr(ri);
6403 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6404 tcg_temp_free_ptr(tmpptr);
6405 tcg_temp_free_i32(tmp);
6406 } else {
6407 TCGv tmp = load_reg(s, rt);
6408 store_cpu_offset(tmp, ri->fieldoffset);
6411 /* We default to ending the TB on a coprocessor register write,
6412 * but allow this to be suppressed by the register definition
6413 * (usually only necessary to work around guest bugs).
6415 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6416 gen_lookup_tb(s);
6419 return 0;
6422 return 1;
6426 /* Store a 64-bit value to a register pair. Clobbers val. */
6427 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6429 TCGv tmp;
6430 tmp = tcg_temp_new_i32();
6431 tcg_gen_trunc_i64_i32(tmp, val);
6432 store_reg(s, rlow, tmp);
6433 tmp = tcg_temp_new_i32();
6434 tcg_gen_shri_i64(val, val, 32);
6435 tcg_gen_trunc_i64_i32(tmp, val);
6436 store_reg(s, rhigh, tmp);
6439 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6440 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6442 TCGv_i64 tmp;
6443 TCGv tmp2;
6445 /* Load value and extend to 64 bits. */
6446 tmp = tcg_temp_new_i64();
6447 tmp2 = load_reg(s, rlow);
6448 tcg_gen_extu_i32_i64(tmp, tmp2);
6449 tcg_temp_free_i32(tmp2);
6450 tcg_gen_add_i64(val, val, tmp);
6451 tcg_temp_free_i64(tmp);
6454 /* load and add a 64-bit value from a register pair. */
6455 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6457 TCGv_i64 tmp;
6458 TCGv tmpl;
6459 TCGv tmph;
6461 /* Load 64-bit value rd:rn. */
6462 tmpl = load_reg(s, rlow);
6463 tmph = load_reg(s, rhigh);
6464 tmp = tcg_temp_new_i64();
6465 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6466 tcg_temp_free_i32(tmpl);
6467 tcg_temp_free_i32(tmph);
6468 tcg_gen_add_i64(val, val, tmp);
6469 tcg_temp_free_i64(tmp);
6472 /* Set N and Z flags from hi|lo. */
6473 static void gen_logicq_cc(TCGv lo, TCGv hi)
6475 tcg_gen_mov_i32(cpu_NF, hi);
6476 tcg_gen_or_i32(cpu_ZF, lo, hi);
6479 /* Load/Store exclusive instructions are implemented by remembering
6480 the value/address loaded, and seeing if these are the same
6481 when the store is performed. This should be sufficient to implement
6482 the architecturally mandated semantics, and avoids having to monitor
6483 regular stores.
6485 In system emulation mode only one CPU will be running at once, so
6486 this sequence is effectively atomic. In user emulation mode we
6487 throw an exception and handle the atomic operation elsewhere. */
6488 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6489 TCGv addr, int size)
6491 TCGv tmp;
6493 switch (size) {
6494 case 0:
6495 tmp = gen_ld8u(addr, IS_USER(s));
6496 break;
6497 case 1:
6498 tmp = gen_ld16u(addr, IS_USER(s));
6499 break;
6500 case 2:
6501 case 3:
6502 tmp = gen_ld32(addr, IS_USER(s));
6503 break;
6504 default:
6505 abort();
6507 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6508 store_reg(s, rt, tmp);
6509 if (size == 3) {
6510 TCGv tmp2 = tcg_temp_new_i32();
6511 tcg_gen_addi_i32(tmp2, addr, 4);
6512 tmp = gen_ld32(tmp2, IS_USER(s));
6513 tcg_temp_free_i32(tmp2);
6514 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6515 store_reg(s, rt2, tmp);
6517 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6520 static void gen_clrex(DisasContext *s)
6522 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6525 #ifdef CONFIG_USER_ONLY
6526 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6527 TCGv addr, int size)
6529 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6530 tcg_gen_movi_i32(cpu_exclusive_info,
6531 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6532 gen_exception_insn(s, 4, EXCP_STREX);
6534 #else
6535 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6536 TCGv addr, int size)
6538 TCGv tmp;
6539 int done_label;
6540 int fail_label;
6542 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6543 [addr] = {Rt};
6544 {Rd} = 0;
6545 } else {
6546 {Rd} = 1;
6547 } */
6548 fail_label = gen_new_label();
6549 done_label = gen_new_label();
6550 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6551 switch (size) {
6552 case 0:
6553 tmp = gen_ld8u(addr, IS_USER(s));
6554 break;
6555 case 1:
6556 tmp = gen_ld16u(addr, IS_USER(s));
6557 break;
6558 case 2:
6559 case 3:
6560 tmp = gen_ld32(addr, IS_USER(s));
6561 break;
6562 default:
6563 abort();
6565 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6566 tcg_temp_free_i32(tmp);
6567 if (size == 3) {
6568 TCGv tmp2 = tcg_temp_new_i32();
6569 tcg_gen_addi_i32(tmp2, addr, 4);
6570 tmp = gen_ld32(tmp2, IS_USER(s));
6571 tcg_temp_free_i32(tmp2);
6572 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6573 tcg_temp_free_i32(tmp);
6575 tmp = load_reg(s, rt);
6576 switch (size) {
6577 case 0:
6578 gen_st8(tmp, addr, IS_USER(s));
6579 break;
6580 case 1:
6581 gen_st16(tmp, addr, IS_USER(s));
6582 break;
6583 case 2:
6584 case 3:
6585 gen_st32(tmp, addr, IS_USER(s));
6586 break;
6587 default:
6588 abort();
6590 if (size == 3) {
6591 tcg_gen_addi_i32(addr, addr, 4);
6592 tmp = load_reg(s, rt2);
6593 gen_st32(tmp, addr, IS_USER(s));
6595 tcg_gen_movi_i32(cpu_R[rd], 0);
6596 tcg_gen_br(done_label);
6597 gen_set_label(fail_label);
6598 tcg_gen_movi_i32(cpu_R[rd], 1);
6599 gen_set_label(done_label);
6600 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6602 #endif
6604 /* gen_srs:
6605 * @env: CPUARMState
6606 * @s: DisasContext
6607 * @mode: mode field from insn (which stack to store to)
6608 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
6609 * @writeback: true if writeback bit set
6611 * Generate code for the SRS (Store Return State) insn.
6613 static void gen_srs(DisasContext *s,
6614 uint32_t mode, uint32_t amode, bool writeback)
6616 int32_t offset;
6617 TCGv_i32 addr = tcg_temp_new_i32();
6618 TCGv_i32 tmp = tcg_const_i32(mode);
6619 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6620 tcg_temp_free_i32(tmp);
6621 switch (amode) {
6622 case 0: /* DA */
6623 offset = -4;
6624 break;
6625 case 1: /* IA */
6626 offset = 0;
6627 break;
6628 case 2: /* DB */
6629 offset = -8;
6630 break;
6631 case 3: /* IB */
6632 offset = 4;
6633 break;
6634 default:
6635 abort();
6637 tcg_gen_addi_i32(addr, addr, offset);
6638 tmp = load_reg(s, 14);
6639 gen_st32(tmp, addr, 0);
6640 tmp = load_cpu_field(spsr);
6641 tcg_gen_addi_i32(addr, addr, 4);
6642 gen_st32(tmp, addr, 0);
6643 if (writeback) {
6644 switch (amode) {
6645 case 0:
6646 offset = -8;
6647 break;
6648 case 1:
6649 offset = 4;
6650 break;
6651 case 2:
6652 offset = -4;
6653 break;
6654 case 3:
6655 offset = 0;
6656 break;
6657 default:
6658 abort();
6660 tcg_gen_addi_i32(addr, addr, offset);
6661 tmp = tcg_const_i32(mode);
6662 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6663 tcg_temp_free_i32(tmp);
6665 tcg_temp_free_i32(addr);
6668 static void disas_arm_insn(CPUARMState * env, DisasContext *s)
6670 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6671 TCGv tmp;
6672 TCGv tmp2;
6673 TCGv tmp3;
6674 TCGv addr;
6675 TCGv_i64 tmp64;
6677 insn = arm_ldl_code(env, s->pc, s->bswap_code);
6678 s->pc += 4;
6680 /* M variants do not implement ARM mode. */
6681 if (IS_M(env))
6682 goto illegal_op;
6683 cond = insn >> 28;
6684 if (cond == 0xf){
6685 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6686 * choose to UNDEF. In ARMv5 and above the space is used
6687 * for miscellaneous unconditional instructions.
6689 ARCH(5);
6691 /* Unconditional instructions. */
6692 if (((insn >> 25) & 7) == 1) {
6693 /* NEON Data processing. */
6694 if (!arm_feature(env, ARM_FEATURE_NEON))
6695 goto illegal_op;
6697 if (disas_neon_data_insn(env, s, insn))
6698 goto illegal_op;
6699 return;
6701 if ((insn & 0x0f100000) == 0x04000000) {
6702 /* NEON load/store. */
6703 if (!arm_feature(env, ARM_FEATURE_NEON))
6704 goto illegal_op;
6706 if (disas_neon_ls_insn(env, s, insn))
6707 goto illegal_op;
6708 return;
6710 if (((insn & 0x0f30f000) == 0x0510f000) ||
6711 ((insn & 0x0f30f010) == 0x0710f000)) {
6712 if ((insn & (1 << 22)) == 0) {
6713 /* PLDW; v7MP */
6714 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6715 goto illegal_op;
6718 /* Otherwise PLD; v5TE+ */
6719 ARCH(5TE);
6720 return;
6722 if (((insn & 0x0f70f000) == 0x0450f000) ||
6723 ((insn & 0x0f70f010) == 0x0650f000)) {
6724 ARCH(7);
6725 return; /* PLI; V7 */
6727 if (((insn & 0x0f700000) == 0x04100000) ||
6728 ((insn & 0x0f700010) == 0x06100000)) {
6729 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6730 goto illegal_op;
6732 return; /* v7MP: Unallocated memory hint: must NOP */
6735 if ((insn & 0x0ffffdff) == 0x01010000) {
6736 ARCH(6);
6737 /* setend */
6738 if (((insn >> 9) & 1) != s->bswap_code) {
6739 /* Dynamic endianness switching not implemented. */
6740 goto illegal_op;
6742 return;
6743 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6744 switch ((insn >> 4) & 0xf) {
6745 case 1: /* clrex */
6746 ARCH(6K);
6747 gen_clrex(s);
6748 return;
6749 case 4: /* dsb */
6750 case 5: /* dmb */
6751 case 6: /* isb */
6752 ARCH(7);
6753 /* We don't emulate caches so these are a no-op. */
6754 return;
6755 default:
6756 goto illegal_op;
6758 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6759 /* srs */
6760 if (IS_USER(s)) {
6761 goto illegal_op;
6763 ARCH(6);
6764 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
6765 return;
6766 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6767 /* rfe */
6768 int32_t offset;
6769 if (IS_USER(s))
6770 goto illegal_op;
6771 ARCH(6);
6772 rn = (insn >> 16) & 0xf;
6773 addr = load_reg(s, rn);
6774 i = (insn >> 23) & 3;
6775 switch (i) {
6776 case 0: offset = -4; break; /* DA */
6777 case 1: offset = 0; break; /* IA */
6778 case 2: offset = -8; break; /* DB */
6779 case 3: offset = 4; break; /* IB */
6780 default: abort();
6782 if (offset)
6783 tcg_gen_addi_i32(addr, addr, offset);
6784 /* Load PC into tmp and CPSR into tmp2. */
6785 tmp = gen_ld32(addr, 0);
6786 tcg_gen_addi_i32(addr, addr, 4);
6787 tmp2 = gen_ld32(addr, 0);
6788 if (insn & (1 << 21)) {
6789 /* Base writeback. */
6790 switch (i) {
6791 case 0: offset = -8; break;
6792 case 1: offset = 4; break;
6793 case 2: offset = -4; break;
6794 case 3: offset = 0; break;
6795 default: abort();
6797 if (offset)
6798 tcg_gen_addi_i32(addr, addr, offset);
6799 store_reg(s, rn, addr);
6800 } else {
6801 tcg_temp_free_i32(addr);
6803 gen_rfe(s, tmp, tmp2);
6804 return;
6805 } else if ((insn & 0x0e000000) == 0x0a000000) {
6806 /* branch link and change to thumb (blx <offset>) */
6807 int32_t offset;
6809 val = (uint32_t)s->pc;
6810 tmp = tcg_temp_new_i32();
6811 tcg_gen_movi_i32(tmp, val);
6812 store_reg(s, 14, tmp);
6813 /* Sign-extend the 24-bit offset */
6814 offset = (((int32_t)insn) << 8) >> 8;
6815 /* offset * 4 + bit24 * 2 + (thumb bit) */
6816 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6817 /* pipeline offset */
6818 val += 4;
6819 /* protected by ARCH(5); above, near the start of uncond block */
6820 gen_bx_im(s, val);
6821 return;
6822 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6823 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6824 /* iWMMXt register transfer. */
6825 if (env->cp15.c15_cpar & (1 << 1))
6826 if (!disas_iwmmxt_insn(env, s, insn))
6827 return;
6829 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6830 /* Coprocessor double register transfer. */
6831 ARCH(5TE);
6832 } else if ((insn & 0x0f000010) == 0x0e000010) {
6833 /* Additional coprocessor register transfer. */
6834 } else if ((insn & 0x0ff10020) == 0x01000000) {
6835 uint32_t mask;
6836 uint32_t val;
6837 /* cps (privileged) */
6838 if (IS_USER(s))
6839 return;
6840 mask = val = 0;
6841 if (insn & (1 << 19)) {
6842 if (insn & (1 << 8))
6843 mask |= CPSR_A;
6844 if (insn & (1 << 7))
6845 mask |= CPSR_I;
6846 if (insn & (1 << 6))
6847 mask |= CPSR_F;
6848 if (insn & (1 << 18))
6849 val |= mask;
6851 if (insn & (1 << 17)) {
6852 mask |= CPSR_M;
6853 val |= (insn & 0x1f);
6855 if (mask) {
6856 gen_set_psr_im(s, mask, 0, val);
6858 return;
6860 goto illegal_op;
6862 if (cond != 0xe) {
6863 /* if not always execute, we generate a conditional jump to
6864 next instruction */
6865 s->condlabel = gen_new_label();
6866 gen_test_cc(cond ^ 1, s->condlabel);
6867 s->condjmp = 1;
6869 if ((insn & 0x0f900000) == 0x03000000) {
6870 if ((insn & (1 << 21)) == 0) {
6871 ARCH(6T2);
6872 rd = (insn >> 12) & 0xf;
6873 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6874 if ((insn & (1 << 22)) == 0) {
6875 /* MOVW */
6876 tmp = tcg_temp_new_i32();
6877 tcg_gen_movi_i32(tmp, val);
6878 } else {
6879 /* MOVT */
6880 tmp = load_reg(s, rd);
6881 tcg_gen_ext16u_i32(tmp, tmp);
6882 tcg_gen_ori_i32(tmp, tmp, val << 16);
6884 store_reg(s, rd, tmp);
6885 } else {
6886 if (((insn >> 12) & 0xf) != 0xf)
6887 goto illegal_op;
6888 if (((insn >> 16) & 0xf) == 0) {
6889 gen_nop_hint(s, insn & 0xff);
6890 } else {
6891 /* CPSR = immediate */
6892 val = insn & 0xff;
6893 shift = ((insn >> 8) & 0xf) * 2;
6894 if (shift)
6895 val = (val >> shift) | (val << (32 - shift));
6896 i = ((insn & (1 << 22)) != 0);
6897 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6898 goto illegal_op;
6901 } else if ((insn & 0x0f900000) == 0x01000000
6902 && (insn & 0x00000090) != 0x00000090) {
6903 /* miscellaneous instructions */
6904 op1 = (insn >> 21) & 3;
6905 sh = (insn >> 4) & 0xf;
6906 rm = insn & 0xf;
6907 switch (sh) {
6908 case 0x0: /* move program status register */
6909 if (op1 & 1) {
6910 /* PSR = reg */
6911 tmp = load_reg(s, rm);
6912 i = ((op1 & 2) != 0);
6913 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6914 goto illegal_op;
6915 } else {
6916 /* reg = PSR */
6917 rd = (insn >> 12) & 0xf;
6918 if (op1 & 2) {
6919 if (IS_USER(s))
6920 goto illegal_op;
6921 tmp = load_cpu_field(spsr);
6922 } else {
6923 tmp = tcg_temp_new_i32();
6924 gen_helper_cpsr_read(tmp, cpu_env);
6926 store_reg(s, rd, tmp);
6928 break;
6929 case 0x1:
6930 if (op1 == 1) {
6931 /* branch/exchange thumb (bx). */
6932 ARCH(4T);
6933 tmp = load_reg(s, rm);
6934 gen_bx(s, tmp);
6935 } else if (op1 == 3) {
6936 /* clz */
6937 ARCH(5);
6938 rd = (insn >> 12) & 0xf;
6939 tmp = load_reg(s, rm);
6940 gen_helper_clz(tmp, tmp);
6941 store_reg(s, rd, tmp);
6942 } else {
6943 goto illegal_op;
6945 break;
6946 case 0x2:
6947 if (op1 == 1) {
6948 ARCH(5J); /* bxj */
6949 /* Trivial implementation equivalent to bx. */
6950 tmp = load_reg(s, rm);
6951 gen_bx(s, tmp);
6952 } else {
6953 goto illegal_op;
6955 break;
6956 case 0x3:
6957 if (op1 != 1)
6958 goto illegal_op;
6960 ARCH(5);
6961 /* branch link/exchange thumb (blx) */
6962 tmp = load_reg(s, rm);
6963 tmp2 = tcg_temp_new_i32();
6964 tcg_gen_movi_i32(tmp2, s->pc);
6965 store_reg(s, 14, tmp2);
6966 gen_bx(s, tmp);
6967 break;
6968 case 0x5: /* saturating add/subtract */
6969 ARCH(5TE);
6970 rd = (insn >> 12) & 0xf;
6971 rn = (insn >> 16) & 0xf;
6972 tmp = load_reg(s, rm);
6973 tmp2 = load_reg(s, rn);
6974 if (op1 & 2)
6975 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
6976 if (op1 & 1)
6977 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
6978 else
6979 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
6980 tcg_temp_free_i32(tmp2);
6981 store_reg(s, rd, tmp);
6982 break;
6983 case 7:
6984 /* SMC instruction (op1 == 3)
6985 and undefined instructions (op1 == 0 || op1 == 2)
6986 will trap */
6987 if (op1 != 1) {
6988 goto illegal_op;
6990 /* bkpt */
6991 ARCH(5);
6992 gen_exception_insn(s, 4, EXCP_BKPT);
6993 break;
6994 case 0x8: /* signed multiply */
6995 case 0xa:
6996 case 0xc:
6997 case 0xe:
6998 ARCH(5TE);
6999 rs = (insn >> 8) & 0xf;
7000 rn = (insn >> 12) & 0xf;
7001 rd = (insn >> 16) & 0xf;
7002 if (op1 == 1) {
7003 /* (32 * 16) >> 16 */
7004 tmp = load_reg(s, rm);
7005 tmp2 = load_reg(s, rs);
7006 if (sh & 4)
7007 tcg_gen_sari_i32(tmp2, tmp2, 16);
7008 else
7009 gen_sxth(tmp2);
7010 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7011 tcg_gen_shri_i64(tmp64, tmp64, 16);
7012 tmp = tcg_temp_new_i32();
7013 tcg_gen_trunc_i64_i32(tmp, tmp64);
7014 tcg_temp_free_i64(tmp64);
7015 if ((sh & 2) == 0) {
7016 tmp2 = load_reg(s, rn);
7017 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7018 tcg_temp_free_i32(tmp2);
7020 store_reg(s, rd, tmp);
7021 } else {
7022 /* 16 * 16 */
7023 tmp = load_reg(s, rm);
7024 tmp2 = load_reg(s, rs);
7025 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7026 tcg_temp_free_i32(tmp2);
7027 if (op1 == 2) {
7028 tmp64 = tcg_temp_new_i64();
7029 tcg_gen_ext_i32_i64(tmp64, tmp);
7030 tcg_temp_free_i32(tmp);
7031 gen_addq(s, tmp64, rn, rd);
7032 gen_storeq_reg(s, rn, rd, tmp64);
7033 tcg_temp_free_i64(tmp64);
7034 } else {
7035 if (op1 == 0) {
7036 tmp2 = load_reg(s, rn);
7037 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7038 tcg_temp_free_i32(tmp2);
7040 store_reg(s, rd, tmp);
7043 break;
7044 default:
7045 goto illegal_op;
7047 } else if (((insn & 0x0e000000) == 0 &&
7048 (insn & 0x00000090) != 0x90) ||
7049 ((insn & 0x0e000000) == (1 << 25))) {
7050 int set_cc, logic_cc, shiftop;
7052 op1 = (insn >> 21) & 0xf;
7053 set_cc = (insn >> 20) & 1;
7054 logic_cc = table_logic_cc[op1] & set_cc;
7056 /* data processing instruction */
7057 if (insn & (1 << 25)) {
7058 /* immediate operand */
7059 val = insn & 0xff;
7060 shift = ((insn >> 8) & 0xf) * 2;
7061 if (shift) {
7062 val = (val >> shift) | (val << (32 - shift));
7064 tmp2 = tcg_temp_new_i32();
7065 tcg_gen_movi_i32(tmp2, val);
7066 if (logic_cc && shift) {
7067 gen_set_CF_bit31(tmp2);
7069 } else {
7070 /* register */
7071 rm = (insn) & 0xf;
7072 tmp2 = load_reg(s, rm);
7073 shiftop = (insn >> 5) & 3;
7074 if (!(insn & (1 << 4))) {
7075 shift = (insn >> 7) & 0x1f;
7076 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7077 } else {
7078 rs = (insn >> 8) & 0xf;
7079 tmp = load_reg(s, rs);
7080 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
7083 if (op1 != 0x0f && op1 != 0x0d) {
7084 rn = (insn >> 16) & 0xf;
7085 tmp = load_reg(s, rn);
7086 } else {
7087 TCGV_UNUSED(tmp);
7089 rd = (insn >> 12) & 0xf;
7090 switch(op1) {
7091 case 0x00:
7092 tcg_gen_and_i32(tmp, tmp, tmp2);
7093 if (logic_cc) {
7094 gen_logic_CC(tmp);
7096 store_reg_bx(env, s, rd, tmp);
7097 break;
7098 case 0x01:
7099 tcg_gen_xor_i32(tmp, tmp, tmp2);
7100 if (logic_cc) {
7101 gen_logic_CC(tmp);
7103 store_reg_bx(env, s, rd, tmp);
7104 break;
7105 case 0x02:
7106 if (set_cc && rd == 15) {
7107 /* SUBS r15, ... is used for exception return. */
7108 if (IS_USER(s)) {
7109 goto illegal_op;
7111 gen_sub_CC(tmp, tmp, tmp2);
7112 gen_exception_return(s, tmp);
7113 } else {
7114 if (set_cc) {
7115 gen_sub_CC(tmp, tmp, tmp2);
7116 } else {
7117 tcg_gen_sub_i32(tmp, tmp, tmp2);
7119 store_reg_bx(env, s, rd, tmp);
7121 break;
7122 case 0x03:
7123 if (set_cc) {
7124 gen_sub_CC(tmp, tmp2, tmp);
7125 } else {
7126 tcg_gen_sub_i32(tmp, tmp2, tmp);
7128 store_reg_bx(env, s, rd, tmp);
7129 break;
7130 case 0x04:
7131 if (set_cc) {
7132 gen_add_CC(tmp, tmp, tmp2);
7133 } else {
7134 tcg_gen_add_i32(tmp, tmp, tmp2);
7136 store_reg_bx(env, s, rd, tmp);
7137 break;
7138 case 0x05:
7139 if (set_cc) {
7140 gen_adc_CC(tmp, tmp, tmp2);
7141 } else {
7142 gen_add_carry(tmp, tmp, tmp2);
7144 store_reg_bx(env, s, rd, tmp);
7145 break;
7146 case 0x06:
7147 if (set_cc) {
7148 gen_sbc_CC(tmp, tmp, tmp2);
7149 } else {
7150 gen_sub_carry(tmp, tmp, tmp2);
7152 store_reg_bx(env, s, rd, tmp);
7153 break;
7154 case 0x07:
7155 if (set_cc) {
7156 gen_sbc_CC(tmp, tmp2, tmp);
7157 } else {
7158 gen_sub_carry(tmp, tmp2, tmp);
7160 store_reg_bx(env, s, rd, tmp);
7161 break;
7162 case 0x08:
7163 if (set_cc) {
7164 tcg_gen_and_i32(tmp, tmp, tmp2);
7165 gen_logic_CC(tmp);
7167 tcg_temp_free_i32(tmp);
7168 break;
7169 case 0x09:
7170 if (set_cc) {
7171 tcg_gen_xor_i32(tmp, tmp, tmp2);
7172 gen_logic_CC(tmp);
7174 tcg_temp_free_i32(tmp);
7175 break;
7176 case 0x0a:
7177 if (set_cc) {
7178 gen_sub_CC(tmp, tmp, tmp2);
7180 tcg_temp_free_i32(tmp);
7181 break;
7182 case 0x0b:
7183 if (set_cc) {
7184 gen_add_CC(tmp, tmp, tmp2);
7186 tcg_temp_free_i32(tmp);
7187 break;
7188 case 0x0c:
7189 tcg_gen_or_i32(tmp, tmp, tmp2);
7190 if (logic_cc) {
7191 gen_logic_CC(tmp);
7193 store_reg_bx(env, s, rd, tmp);
7194 break;
7195 case 0x0d:
7196 if (logic_cc && rd == 15) {
7197 /* MOVS r15, ... is used for exception return. */
7198 if (IS_USER(s)) {
7199 goto illegal_op;
7201 gen_exception_return(s, tmp2);
7202 } else {
7203 if (logic_cc) {
7204 gen_logic_CC(tmp2);
7206 store_reg_bx(env, s, rd, tmp2);
7208 break;
7209 case 0x0e:
7210 tcg_gen_andc_i32(tmp, tmp, tmp2);
7211 if (logic_cc) {
7212 gen_logic_CC(tmp);
7214 store_reg_bx(env, s, rd, tmp);
7215 break;
7216 default:
7217 case 0x0f:
7218 tcg_gen_not_i32(tmp2, tmp2);
7219 if (logic_cc) {
7220 gen_logic_CC(tmp2);
7222 store_reg_bx(env, s, rd, tmp2);
7223 break;
7225 if (op1 != 0x0f && op1 != 0x0d) {
7226 tcg_temp_free_i32(tmp2);
7228 } else {
7229 /* other instructions */
7230 op1 = (insn >> 24) & 0xf;
7231 switch(op1) {
7232 case 0x0:
7233 case 0x1:
7234 /* multiplies, extra load/stores */
7235 sh = (insn >> 5) & 3;
7236 if (sh == 0) {
7237 if (op1 == 0x0) {
7238 rd = (insn >> 16) & 0xf;
7239 rn = (insn >> 12) & 0xf;
7240 rs = (insn >> 8) & 0xf;
7241 rm = (insn) & 0xf;
7242 op1 = (insn >> 20) & 0xf;
7243 switch (op1) {
7244 case 0: case 1: case 2: case 3: case 6:
7245 /* 32 bit mul */
7246 tmp = load_reg(s, rs);
7247 tmp2 = load_reg(s, rm);
7248 tcg_gen_mul_i32(tmp, tmp, tmp2);
7249 tcg_temp_free_i32(tmp2);
7250 if (insn & (1 << 22)) {
7251 /* Subtract (mls) */
7252 ARCH(6T2);
7253 tmp2 = load_reg(s, rn);
7254 tcg_gen_sub_i32(tmp, tmp2, tmp);
7255 tcg_temp_free_i32(tmp2);
7256 } else if (insn & (1 << 21)) {
7257 /* Add */
7258 tmp2 = load_reg(s, rn);
7259 tcg_gen_add_i32(tmp, tmp, tmp2);
7260 tcg_temp_free_i32(tmp2);
7262 if (insn & (1 << 20))
7263 gen_logic_CC(tmp);
7264 store_reg(s, rd, tmp);
7265 break;
7266 case 4:
7267 /* 64 bit mul double accumulate (UMAAL) */
7268 ARCH(6);
7269 tmp = load_reg(s, rs);
7270 tmp2 = load_reg(s, rm);
7271 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7272 gen_addq_lo(s, tmp64, rn);
7273 gen_addq_lo(s, tmp64, rd);
7274 gen_storeq_reg(s, rn, rd, tmp64);
7275 tcg_temp_free_i64(tmp64);
7276 break;
7277 case 8: case 9: case 10: case 11:
7278 case 12: case 13: case 14: case 15:
7279 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7280 tmp = load_reg(s, rs);
7281 tmp2 = load_reg(s, rm);
7282 if (insn & (1 << 22)) {
7283 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
7284 } else {
7285 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
7287 if (insn & (1 << 21)) { /* mult accumulate */
7288 TCGv al = load_reg(s, rn);
7289 TCGv ah = load_reg(s, rd);
7290 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
7291 tcg_temp_free(al);
7292 tcg_temp_free(ah);
7294 if (insn & (1 << 20)) {
7295 gen_logicq_cc(tmp, tmp2);
7297 store_reg(s, rn, tmp);
7298 store_reg(s, rd, tmp2);
7299 break;
7300 default:
7301 goto illegal_op;
7303 } else {
7304 rn = (insn >> 16) & 0xf;
7305 rd = (insn >> 12) & 0xf;
7306 if (insn & (1 << 23)) {
7307 /* load/store exclusive */
7308 op1 = (insn >> 21) & 0x3;
7309 if (op1)
7310 ARCH(6K);
7311 else
7312 ARCH(6);
7313 addr = tcg_temp_local_new_i32();
7314 load_reg_var(s, addr, rn);
7315 if (insn & (1 << 20)) {
7316 switch (op1) {
7317 case 0: /* ldrex */
7318 gen_load_exclusive(s, rd, 15, addr, 2);
7319 break;
7320 case 1: /* ldrexd */
7321 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7322 break;
7323 case 2: /* ldrexb */
7324 gen_load_exclusive(s, rd, 15, addr, 0);
7325 break;
7326 case 3: /* ldrexh */
7327 gen_load_exclusive(s, rd, 15, addr, 1);
7328 break;
7329 default:
7330 abort();
7332 } else {
7333 rm = insn & 0xf;
7334 switch (op1) {
7335 case 0: /* strex */
7336 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7337 break;
7338 case 1: /* strexd */
7339 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7340 break;
7341 case 2: /* strexb */
7342 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7343 break;
7344 case 3: /* strexh */
7345 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7346 break;
7347 default:
7348 abort();
7351 tcg_temp_free(addr);
7352 } else {
7353 /* SWP instruction */
7354 rm = (insn) & 0xf;
7356 /* ??? This is not really atomic. However we know
7357 we never have multiple CPUs running in parallel,
7358 so it is good enough. */
7359 addr = load_reg(s, rn);
7360 tmp = load_reg(s, rm);
7361 if (insn & (1 << 22)) {
7362 tmp2 = gen_ld8u(addr, IS_USER(s));
7363 gen_st8(tmp, addr, IS_USER(s));
7364 } else {
7365 tmp2 = gen_ld32(addr, IS_USER(s));
7366 gen_st32(tmp, addr, IS_USER(s));
7368 tcg_temp_free_i32(addr);
7369 store_reg(s, rd, tmp2);
7372 } else {
7373 int address_offset;
7374 int load;
7375 /* Misc load/store */
7376 rn = (insn >> 16) & 0xf;
7377 rd = (insn >> 12) & 0xf;
7378 addr = load_reg(s, rn);
7379 if (insn & (1 << 24))
7380 gen_add_datah_offset(s, insn, 0, addr);
7381 address_offset = 0;
7382 if (insn & (1 << 20)) {
7383 /* load */
7384 switch(sh) {
7385 case 1:
7386 tmp = gen_ld16u(addr, IS_USER(s));
7387 break;
7388 case 2:
7389 tmp = gen_ld8s(addr, IS_USER(s));
7390 break;
7391 default:
7392 case 3:
7393 tmp = gen_ld16s(addr, IS_USER(s));
7394 break;
7396 load = 1;
7397 } else if (sh & 2) {
7398 ARCH(5TE);
7399 /* doubleword */
7400 if (sh & 1) {
7401 /* store */
7402 tmp = load_reg(s, rd);
7403 gen_st32(tmp, addr, IS_USER(s));
7404 tcg_gen_addi_i32(addr, addr, 4);
7405 tmp = load_reg(s, rd + 1);
7406 gen_st32(tmp, addr, IS_USER(s));
7407 load = 0;
7408 } else {
7409 /* load */
7410 tmp = gen_ld32(addr, IS_USER(s));
7411 store_reg(s, rd, tmp);
7412 tcg_gen_addi_i32(addr, addr, 4);
7413 tmp = gen_ld32(addr, IS_USER(s));
7414 rd++;
7415 load = 1;
7417 address_offset = -4;
7418 } else {
7419 /* store */
7420 tmp = load_reg(s, rd);
7421 gen_st16(tmp, addr, IS_USER(s));
7422 load = 0;
7424 /* Perform base writeback before the loaded value to
7425 ensure correct behavior with overlapping index registers.
7426 ldrd with base writeback is is undefined if the
7427 destination and index registers overlap. */
7428 if (!(insn & (1 << 24))) {
7429 gen_add_datah_offset(s, insn, address_offset, addr);
7430 store_reg(s, rn, addr);
7431 } else if (insn & (1 << 21)) {
7432 if (address_offset)
7433 tcg_gen_addi_i32(addr, addr, address_offset);
7434 store_reg(s, rn, addr);
7435 } else {
7436 tcg_temp_free_i32(addr);
7438 if (load) {
7439 /* Complete the load. */
7440 store_reg(s, rd, tmp);
7443 break;
7444 case 0x4:
7445 case 0x5:
7446 goto do_ldst;
7447 case 0x6:
7448 case 0x7:
7449 if (insn & (1 << 4)) {
7450 ARCH(6);
7451 /* Armv6 Media instructions. */
7452 rm = insn & 0xf;
7453 rn = (insn >> 16) & 0xf;
7454 rd = (insn >> 12) & 0xf;
7455 rs = (insn >> 8) & 0xf;
7456 switch ((insn >> 23) & 3) {
7457 case 0: /* Parallel add/subtract. */
7458 op1 = (insn >> 20) & 7;
7459 tmp = load_reg(s, rn);
7460 tmp2 = load_reg(s, rm);
7461 sh = (insn >> 5) & 7;
7462 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7463 goto illegal_op;
7464 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7465 tcg_temp_free_i32(tmp2);
7466 store_reg(s, rd, tmp);
7467 break;
7468 case 1:
7469 if ((insn & 0x00700020) == 0) {
7470 /* Halfword pack. */
7471 tmp = load_reg(s, rn);
7472 tmp2 = load_reg(s, rm);
7473 shift = (insn >> 7) & 0x1f;
7474 if (insn & (1 << 6)) {
7475 /* pkhtb */
7476 if (shift == 0)
7477 shift = 31;
7478 tcg_gen_sari_i32(tmp2, tmp2, shift);
7479 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7480 tcg_gen_ext16u_i32(tmp2, tmp2);
7481 } else {
7482 /* pkhbt */
7483 if (shift)
7484 tcg_gen_shli_i32(tmp2, tmp2, shift);
7485 tcg_gen_ext16u_i32(tmp, tmp);
7486 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7488 tcg_gen_or_i32(tmp, tmp, tmp2);
7489 tcg_temp_free_i32(tmp2);
7490 store_reg(s, rd, tmp);
7491 } else if ((insn & 0x00200020) == 0x00200000) {
7492 /* [us]sat */
7493 tmp = load_reg(s, rm);
7494 shift = (insn >> 7) & 0x1f;
7495 if (insn & (1 << 6)) {
7496 if (shift == 0)
7497 shift = 31;
7498 tcg_gen_sari_i32(tmp, tmp, shift);
7499 } else {
7500 tcg_gen_shli_i32(tmp, tmp, shift);
7502 sh = (insn >> 16) & 0x1f;
7503 tmp2 = tcg_const_i32(sh);
7504 if (insn & (1 << 22))
7505 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
7506 else
7507 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
7508 tcg_temp_free_i32(tmp2);
7509 store_reg(s, rd, tmp);
7510 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7511 /* [us]sat16 */
7512 tmp = load_reg(s, rm);
7513 sh = (insn >> 16) & 0x1f;
7514 tmp2 = tcg_const_i32(sh);
7515 if (insn & (1 << 22))
7516 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
7517 else
7518 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
7519 tcg_temp_free_i32(tmp2);
7520 store_reg(s, rd, tmp);
7521 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7522 /* Select bytes. */
7523 tmp = load_reg(s, rn);
7524 tmp2 = load_reg(s, rm);
7525 tmp3 = tcg_temp_new_i32();
7526 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
7527 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7528 tcg_temp_free_i32(tmp3);
7529 tcg_temp_free_i32(tmp2);
7530 store_reg(s, rd, tmp);
7531 } else if ((insn & 0x000003e0) == 0x00000060) {
7532 tmp = load_reg(s, rm);
7533 shift = (insn >> 10) & 3;
7534 /* ??? In many cases it's not necessary to do a
7535 rotate, a shift is sufficient. */
7536 if (shift != 0)
7537 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7538 op1 = (insn >> 20) & 7;
7539 switch (op1) {
7540 case 0: gen_sxtb16(tmp); break;
7541 case 2: gen_sxtb(tmp); break;
7542 case 3: gen_sxth(tmp); break;
7543 case 4: gen_uxtb16(tmp); break;
7544 case 6: gen_uxtb(tmp); break;
7545 case 7: gen_uxth(tmp); break;
7546 default: goto illegal_op;
7548 if (rn != 15) {
7549 tmp2 = load_reg(s, rn);
7550 if ((op1 & 3) == 0) {
7551 gen_add16(tmp, tmp2);
7552 } else {
7553 tcg_gen_add_i32(tmp, tmp, tmp2);
7554 tcg_temp_free_i32(tmp2);
7557 store_reg(s, rd, tmp);
7558 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7559 /* rev */
7560 tmp = load_reg(s, rm);
7561 if (insn & (1 << 22)) {
7562 if (insn & (1 << 7)) {
7563 gen_revsh(tmp);
7564 } else {
7565 ARCH(6T2);
7566 gen_helper_rbit(tmp, tmp);
7568 } else {
7569 if (insn & (1 << 7))
7570 gen_rev16(tmp);
7571 else
7572 tcg_gen_bswap32_i32(tmp, tmp);
7574 store_reg(s, rd, tmp);
7575 } else {
7576 goto illegal_op;
7578 break;
7579 case 2: /* Multiplies (Type 3). */
7580 switch ((insn >> 20) & 0x7) {
7581 case 5:
7582 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7583 /* op2 not 00x or 11x : UNDEF */
7584 goto illegal_op;
7586 /* Signed multiply most significant [accumulate].
7587 (SMMUL, SMMLA, SMMLS) */
7588 tmp = load_reg(s, rm);
7589 tmp2 = load_reg(s, rs);
7590 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7592 if (rd != 15) {
7593 tmp = load_reg(s, rd);
7594 if (insn & (1 << 6)) {
7595 tmp64 = gen_subq_msw(tmp64, tmp);
7596 } else {
7597 tmp64 = gen_addq_msw(tmp64, tmp);
7600 if (insn & (1 << 5)) {
7601 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7603 tcg_gen_shri_i64(tmp64, tmp64, 32);
7604 tmp = tcg_temp_new_i32();
7605 tcg_gen_trunc_i64_i32(tmp, tmp64);
7606 tcg_temp_free_i64(tmp64);
7607 store_reg(s, rn, tmp);
7608 break;
7609 case 0:
7610 case 4:
7611 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7612 if (insn & (1 << 7)) {
7613 goto illegal_op;
7615 tmp = load_reg(s, rm);
7616 tmp2 = load_reg(s, rs);
7617 if (insn & (1 << 5))
7618 gen_swap_half(tmp2);
7619 gen_smul_dual(tmp, tmp2);
7620 if (insn & (1 << 6)) {
7621 /* This subtraction cannot overflow. */
7622 tcg_gen_sub_i32(tmp, tmp, tmp2);
7623 } else {
7624 /* This addition cannot overflow 32 bits;
7625 * however it may overflow considered as a signed
7626 * operation, in which case we must set the Q flag.
7628 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7630 tcg_temp_free_i32(tmp2);
7631 if (insn & (1 << 22)) {
7632 /* smlald, smlsld */
7633 tmp64 = tcg_temp_new_i64();
7634 tcg_gen_ext_i32_i64(tmp64, tmp);
7635 tcg_temp_free_i32(tmp);
7636 gen_addq(s, tmp64, rd, rn);
7637 gen_storeq_reg(s, rd, rn, tmp64);
7638 tcg_temp_free_i64(tmp64);
7639 } else {
7640 /* smuad, smusd, smlad, smlsd */
7641 if (rd != 15)
7643 tmp2 = load_reg(s, rd);
7644 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7645 tcg_temp_free_i32(tmp2);
7647 store_reg(s, rn, tmp);
7649 break;
7650 case 1:
7651 case 3:
7652 /* SDIV, UDIV */
7653 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7654 goto illegal_op;
7656 if (((insn >> 5) & 7) || (rd != 15)) {
7657 goto illegal_op;
7659 tmp = load_reg(s, rm);
7660 tmp2 = load_reg(s, rs);
7661 if (insn & (1 << 21)) {
7662 gen_helper_udiv(tmp, tmp, tmp2);
7663 } else {
7664 gen_helper_sdiv(tmp, tmp, tmp2);
7666 tcg_temp_free_i32(tmp2);
7667 store_reg(s, rn, tmp);
7668 break;
7669 default:
7670 goto illegal_op;
7672 break;
7673 case 3:
7674 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7675 switch (op1) {
7676 case 0: /* Unsigned sum of absolute differences. */
7677 ARCH(6);
7678 tmp = load_reg(s, rm);
7679 tmp2 = load_reg(s, rs);
7680 gen_helper_usad8(tmp, tmp, tmp2);
7681 tcg_temp_free_i32(tmp2);
7682 if (rd != 15) {
7683 tmp2 = load_reg(s, rd);
7684 tcg_gen_add_i32(tmp, tmp, tmp2);
7685 tcg_temp_free_i32(tmp2);
7687 store_reg(s, rn, tmp);
7688 break;
7689 case 0x20: case 0x24: case 0x28: case 0x2c:
7690 /* Bitfield insert/clear. */
7691 ARCH(6T2);
7692 shift = (insn >> 7) & 0x1f;
7693 i = (insn >> 16) & 0x1f;
7694 i = i + 1 - shift;
7695 if (rm == 15) {
7696 tmp = tcg_temp_new_i32();
7697 tcg_gen_movi_i32(tmp, 0);
7698 } else {
7699 tmp = load_reg(s, rm);
7701 if (i != 32) {
7702 tmp2 = load_reg(s, rd);
7703 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7704 tcg_temp_free_i32(tmp2);
7706 store_reg(s, rd, tmp);
7707 break;
7708 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7709 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7710 ARCH(6T2);
7711 tmp = load_reg(s, rm);
7712 shift = (insn >> 7) & 0x1f;
7713 i = ((insn >> 16) & 0x1f) + 1;
7714 if (shift + i > 32)
7715 goto illegal_op;
7716 if (i < 32) {
7717 if (op1 & 0x20) {
7718 gen_ubfx(tmp, shift, (1u << i) - 1);
7719 } else {
7720 gen_sbfx(tmp, shift, i);
7723 store_reg(s, rd, tmp);
7724 break;
7725 default:
7726 goto illegal_op;
7728 break;
7730 break;
7732 do_ldst:
7733 /* Check for undefined extension instructions
7734 * per the ARM Bible IE:
7735 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7737 sh = (0xf << 20) | (0xf << 4);
7738 if (op1 == 0x7 && ((insn & sh) == sh))
7740 goto illegal_op;
7742 /* load/store byte/word */
7743 rn = (insn >> 16) & 0xf;
7744 rd = (insn >> 12) & 0xf;
7745 tmp2 = load_reg(s, rn);
7746 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7747 if (insn & (1 << 24))
7748 gen_add_data_offset(s, insn, tmp2);
7749 if (insn & (1 << 20)) {
7750 /* load */
7751 if (insn & (1 << 22)) {
7752 tmp = gen_ld8u(tmp2, i);
7753 } else {
7754 tmp = gen_ld32(tmp2, i);
7756 } else {
7757 /* store */
7758 tmp = load_reg(s, rd);
7759 if (insn & (1 << 22))
7760 gen_st8(tmp, tmp2, i);
7761 else
7762 gen_st32(tmp, tmp2, i);
7764 if (!(insn & (1 << 24))) {
7765 gen_add_data_offset(s, insn, tmp2);
7766 store_reg(s, rn, tmp2);
7767 } else if (insn & (1 << 21)) {
7768 store_reg(s, rn, tmp2);
7769 } else {
7770 tcg_temp_free_i32(tmp2);
7772 if (insn & (1 << 20)) {
7773 /* Complete the load. */
7774 store_reg_from_load(env, s, rd, tmp);
7776 break;
7777 case 0x08:
7778 case 0x09:
7780 int j, n, user, loaded_base;
7781 TCGv loaded_var;
7782 /* load/store multiple words */
7783 /* XXX: store correct base if write back */
7784 user = 0;
7785 if (insn & (1 << 22)) {
7786 if (IS_USER(s))
7787 goto illegal_op; /* only usable in supervisor mode */
7789 if ((insn & (1 << 15)) == 0)
7790 user = 1;
7792 rn = (insn >> 16) & 0xf;
7793 addr = load_reg(s, rn);
7795 /* compute total size */
7796 loaded_base = 0;
7797 TCGV_UNUSED(loaded_var);
7798 n = 0;
7799 for(i=0;i<16;i++) {
7800 if (insn & (1 << i))
7801 n++;
7803 /* XXX: test invalid n == 0 case ? */
7804 if (insn & (1 << 23)) {
7805 if (insn & (1 << 24)) {
7806 /* pre increment */
7807 tcg_gen_addi_i32(addr, addr, 4);
7808 } else {
7809 /* post increment */
7811 } else {
7812 if (insn & (1 << 24)) {
7813 /* pre decrement */
7814 tcg_gen_addi_i32(addr, addr, -(n * 4));
7815 } else {
7816 /* post decrement */
7817 if (n != 1)
7818 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7821 j = 0;
7822 for(i=0;i<16;i++) {
7823 if (insn & (1 << i)) {
7824 if (insn & (1 << 20)) {
7825 /* load */
7826 tmp = gen_ld32(addr, IS_USER(s));
7827 if (user) {
7828 tmp2 = tcg_const_i32(i);
7829 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
7830 tcg_temp_free_i32(tmp2);
7831 tcg_temp_free_i32(tmp);
7832 } else if (i == rn) {
7833 loaded_var = tmp;
7834 loaded_base = 1;
7835 } else {
7836 store_reg_from_load(env, s, i, tmp);
7838 } else {
7839 /* store */
7840 if (i == 15) {
7841 /* special case: r15 = PC + 8 */
7842 val = (long)s->pc + 4;
7843 tmp = tcg_temp_new_i32();
7844 tcg_gen_movi_i32(tmp, val);
7845 } else if (user) {
7846 tmp = tcg_temp_new_i32();
7847 tmp2 = tcg_const_i32(i);
7848 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
7849 tcg_temp_free_i32(tmp2);
7850 } else {
7851 tmp = load_reg(s, i);
7853 gen_st32(tmp, addr, IS_USER(s));
7855 j++;
7856 /* no need to add after the last transfer */
7857 if (j != n)
7858 tcg_gen_addi_i32(addr, addr, 4);
7861 if (insn & (1 << 21)) {
7862 /* write back */
7863 if (insn & (1 << 23)) {
7864 if (insn & (1 << 24)) {
7865 /* pre increment */
7866 } else {
7867 /* post increment */
7868 tcg_gen_addi_i32(addr, addr, 4);
7870 } else {
7871 if (insn & (1 << 24)) {
7872 /* pre decrement */
7873 if (n != 1)
7874 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7875 } else {
7876 /* post decrement */
7877 tcg_gen_addi_i32(addr, addr, -(n * 4));
7880 store_reg(s, rn, addr);
7881 } else {
7882 tcg_temp_free_i32(addr);
7884 if (loaded_base) {
7885 store_reg(s, rn, loaded_var);
7887 if ((insn & (1 << 22)) && !user) {
7888 /* Restore CPSR from SPSR. */
7889 tmp = load_cpu_field(spsr);
7890 gen_set_cpsr(tmp, 0xffffffff);
7891 tcg_temp_free_i32(tmp);
7892 s->is_jmp = DISAS_UPDATE;
7895 break;
7896 case 0xa:
7897 case 0xb:
7899 int32_t offset;
7901 /* branch (and link) */
7902 val = (int32_t)s->pc;
7903 if (insn & (1 << 24)) {
7904 tmp = tcg_temp_new_i32();
7905 tcg_gen_movi_i32(tmp, val);
7906 store_reg(s, 14, tmp);
7908 offset = (((int32_t)insn << 8) >> 8);
7909 val += (offset << 2) + 4;
7910 gen_jmp(s, val);
7912 break;
7913 case 0xc:
7914 case 0xd:
7915 case 0xe:
7916 /* Coprocessor. */
7917 if (disas_coproc_insn(env, s, insn))
7918 goto illegal_op;
7919 break;
7920 case 0xf:
7921 /* swi */
7922 gen_set_pc_im(s->pc);
7923 s->is_jmp = DISAS_SWI;
7924 break;
7925 default:
7926 illegal_op:
7927 gen_exception_insn(s, 4, EXCP_UDEF);
7928 break;
7933 /* Return true if this is a Thumb-2 logical op. */
7934 static int
7935 thumb2_logic_op(int op)
7937 return (op < 8);
7940 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7941 then set condition code flags based on the result of the operation.
7942 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7943 to the high bit of T1.
7944 Returns zero if the opcode is valid. */
7946 static int
7947 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7949 int logic_cc;
7951 logic_cc = 0;
7952 switch (op) {
7953 case 0: /* and */
7954 tcg_gen_and_i32(t0, t0, t1);
7955 logic_cc = conds;
7956 break;
7957 case 1: /* bic */
7958 tcg_gen_andc_i32(t0, t0, t1);
7959 logic_cc = conds;
7960 break;
7961 case 2: /* orr */
7962 tcg_gen_or_i32(t0, t0, t1);
7963 logic_cc = conds;
7964 break;
7965 case 3: /* orn */
7966 tcg_gen_orc_i32(t0, t0, t1);
7967 logic_cc = conds;
7968 break;
7969 case 4: /* eor */
7970 tcg_gen_xor_i32(t0, t0, t1);
7971 logic_cc = conds;
7972 break;
7973 case 8: /* add */
7974 if (conds)
7975 gen_add_CC(t0, t0, t1);
7976 else
7977 tcg_gen_add_i32(t0, t0, t1);
7978 break;
7979 case 10: /* adc */
7980 if (conds)
7981 gen_adc_CC(t0, t0, t1);
7982 else
7983 gen_adc(t0, t1);
7984 break;
7985 case 11: /* sbc */
7986 if (conds) {
7987 gen_sbc_CC(t0, t0, t1);
7988 } else {
7989 gen_sub_carry(t0, t0, t1);
7991 break;
7992 case 13: /* sub */
7993 if (conds)
7994 gen_sub_CC(t0, t0, t1);
7995 else
7996 tcg_gen_sub_i32(t0, t0, t1);
7997 break;
7998 case 14: /* rsb */
7999 if (conds)
8000 gen_sub_CC(t0, t1, t0);
8001 else
8002 tcg_gen_sub_i32(t0, t1, t0);
8003 break;
8004 default: /* 5, 6, 7, 9, 12, 15. */
8005 return 1;
8007 if (logic_cc) {
8008 gen_logic_CC(t0);
8009 if (shifter_out)
8010 gen_set_CF_bit31(t1);
8012 return 0;
8015 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8016 is not legal. */
8017 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
8019 uint32_t insn, imm, shift, offset;
8020 uint32_t rd, rn, rm, rs;
8021 TCGv tmp;
8022 TCGv tmp2;
8023 TCGv tmp3;
8024 TCGv addr;
8025 TCGv_i64 tmp64;
8026 int op;
8027 int shiftop;
8028 int conds;
8029 int logic_cc;
8031 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8032 || arm_feature (env, ARM_FEATURE_M))) {
8033 /* Thumb-1 cores may need to treat bl and blx as a pair of
8034 16-bit instructions to get correct prefetch abort behavior. */
8035 insn = insn_hw1;
8036 if ((insn & (1 << 12)) == 0) {
8037 ARCH(5);
8038 /* Second half of blx. */
8039 offset = ((insn & 0x7ff) << 1);
8040 tmp = load_reg(s, 14);
8041 tcg_gen_addi_i32(tmp, tmp, offset);
8042 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
8044 tmp2 = tcg_temp_new_i32();
8045 tcg_gen_movi_i32(tmp2, s->pc | 1);
8046 store_reg(s, 14, tmp2);
8047 gen_bx(s, tmp);
8048 return 0;
8050 if (insn & (1 << 11)) {
8051 /* Second half of bl. */
8052 offset = ((insn & 0x7ff) << 1) | 1;
8053 tmp = load_reg(s, 14);
8054 tcg_gen_addi_i32(tmp, tmp, offset);
8056 tmp2 = tcg_temp_new_i32();
8057 tcg_gen_movi_i32(tmp2, s->pc | 1);
8058 store_reg(s, 14, tmp2);
8059 gen_bx(s, tmp);
8060 return 0;
8062 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8063 /* Instruction spans a page boundary. Implement it as two
8064 16-bit instructions in case the second half causes an
8065 prefetch abort. */
8066 offset = ((int32_t)insn << 21) >> 9;
8067 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
8068 return 0;
8070 /* Fall through to 32-bit decode. */
8073 insn = arm_lduw_code(env, s->pc, s->bswap_code);
8074 s->pc += 2;
8075 insn |= (uint32_t)insn_hw1 << 16;
8077 if ((insn & 0xf800e800) != 0xf000e800) {
8078 ARCH(6T2);
8081 rn = (insn >> 16) & 0xf;
8082 rs = (insn >> 12) & 0xf;
8083 rd = (insn >> 8) & 0xf;
8084 rm = insn & 0xf;
8085 switch ((insn >> 25) & 0xf) {
8086 case 0: case 1: case 2: case 3:
8087 /* 16-bit instructions. Should never happen. */
8088 abort();
8089 case 4:
8090 if (insn & (1 << 22)) {
8091 /* Other load/store, table branch. */
8092 if (insn & 0x01200000) {
8093 /* Load/store doubleword. */
8094 if (rn == 15) {
8095 addr = tcg_temp_new_i32();
8096 tcg_gen_movi_i32(addr, s->pc & ~3);
8097 } else {
8098 addr = load_reg(s, rn);
8100 offset = (insn & 0xff) * 4;
8101 if ((insn & (1 << 23)) == 0)
8102 offset = -offset;
8103 if (insn & (1 << 24)) {
8104 tcg_gen_addi_i32(addr, addr, offset);
8105 offset = 0;
8107 if (insn & (1 << 20)) {
8108 /* ldrd */
8109 tmp = gen_ld32(addr, IS_USER(s));
8110 store_reg(s, rs, tmp);
8111 tcg_gen_addi_i32(addr, addr, 4);
8112 tmp = gen_ld32(addr, IS_USER(s));
8113 store_reg(s, rd, tmp);
8114 } else {
8115 /* strd */
8116 tmp = load_reg(s, rs);
8117 gen_st32(tmp, addr, IS_USER(s));
8118 tcg_gen_addi_i32(addr, addr, 4);
8119 tmp = load_reg(s, rd);
8120 gen_st32(tmp, addr, IS_USER(s));
8122 if (insn & (1 << 21)) {
8123 /* Base writeback. */
8124 if (rn == 15)
8125 goto illegal_op;
8126 tcg_gen_addi_i32(addr, addr, offset - 4);
8127 store_reg(s, rn, addr);
8128 } else {
8129 tcg_temp_free_i32(addr);
8131 } else if ((insn & (1 << 23)) == 0) {
8132 /* Load/store exclusive word. */
8133 addr = tcg_temp_local_new();
8134 load_reg_var(s, addr, rn);
8135 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
8136 if (insn & (1 << 20)) {
8137 gen_load_exclusive(s, rs, 15, addr, 2);
8138 } else {
8139 gen_store_exclusive(s, rd, rs, 15, addr, 2);
8141 tcg_temp_free(addr);
8142 } else if ((insn & (1 << 6)) == 0) {
8143 /* Table Branch. */
8144 if (rn == 15) {
8145 addr = tcg_temp_new_i32();
8146 tcg_gen_movi_i32(addr, s->pc);
8147 } else {
8148 addr = load_reg(s, rn);
8150 tmp = load_reg(s, rm);
8151 tcg_gen_add_i32(addr, addr, tmp);
8152 if (insn & (1 << 4)) {
8153 /* tbh */
8154 tcg_gen_add_i32(addr, addr, tmp);
8155 tcg_temp_free_i32(tmp);
8156 tmp = gen_ld16u(addr, IS_USER(s));
8157 } else { /* tbb */
8158 tcg_temp_free_i32(tmp);
8159 tmp = gen_ld8u(addr, IS_USER(s));
8161 tcg_temp_free_i32(addr);
8162 tcg_gen_shli_i32(tmp, tmp, 1);
8163 tcg_gen_addi_i32(tmp, tmp, s->pc);
8164 store_reg(s, 15, tmp);
8165 } else {
8166 /* Load/store exclusive byte/halfword/doubleword. */
8167 ARCH(7);
8168 op = (insn >> 4) & 0x3;
8169 if (op == 2) {
8170 goto illegal_op;
8172 addr = tcg_temp_local_new();
8173 load_reg_var(s, addr, rn);
8174 if (insn & (1 << 20)) {
8175 gen_load_exclusive(s, rs, rd, addr, op);
8176 } else {
8177 gen_store_exclusive(s, rm, rs, rd, addr, op);
8179 tcg_temp_free(addr);
8181 } else {
8182 /* Load/store multiple, RFE, SRS. */
8183 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8184 /* RFE, SRS: not available in user mode or on M profile */
8185 if (IS_USER(s) || IS_M(env)) {
8186 goto illegal_op;
8188 if (insn & (1 << 20)) {
8189 /* rfe */
8190 addr = load_reg(s, rn);
8191 if ((insn & (1 << 24)) == 0)
8192 tcg_gen_addi_i32(addr, addr, -8);
8193 /* Load PC into tmp and CPSR into tmp2. */
8194 tmp = gen_ld32(addr, 0);
8195 tcg_gen_addi_i32(addr, addr, 4);
8196 tmp2 = gen_ld32(addr, 0);
8197 if (insn & (1 << 21)) {
8198 /* Base writeback. */
8199 if (insn & (1 << 24)) {
8200 tcg_gen_addi_i32(addr, addr, 4);
8201 } else {
8202 tcg_gen_addi_i32(addr, addr, -4);
8204 store_reg(s, rn, addr);
8205 } else {
8206 tcg_temp_free_i32(addr);
8208 gen_rfe(s, tmp, tmp2);
8209 } else {
8210 /* srs */
8211 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8212 insn & (1 << 21));
8214 } else {
8215 int i, loaded_base = 0;
8216 TCGv loaded_var;
8217 /* Load/store multiple. */
8218 addr = load_reg(s, rn);
8219 offset = 0;
8220 for (i = 0; i < 16; i++) {
8221 if (insn & (1 << i))
8222 offset += 4;
8224 if (insn & (1 << 24)) {
8225 tcg_gen_addi_i32(addr, addr, -offset);
8228 TCGV_UNUSED(loaded_var);
8229 for (i = 0; i < 16; i++) {
8230 if ((insn & (1 << i)) == 0)
8231 continue;
8232 if (insn & (1 << 20)) {
8233 /* Load. */
8234 tmp = gen_ld32(addr, IS_USER(s));
8235 if (i == 15) {
8236 gen_bx(s, tmp);
8237 } else if (i == rn) {
8238 loaded_var = tmp;
8239 loaded_base = 1;
8240 } else {
8241 store_reg(s, i, tmp);
8243 } else {
8244 /* Store. */
8245 tmp = load_reg(s, i);
8246 gen_st32(tmp, addr, IS_USER(s));
8248 tcg_gen_addi_i32(addr, addr, 4);
8250 if (loaded_base) {
8251 store_reg(s, rn, loaded_var);
8253 if (insn & (1 << 21)) {
8254 /* Base register writeback. */
8255 if (insn & (1 << 24)) {
8256 tcg_gen_addi_i32(addr, addr, -offset);
8258 /* Fault if writeback register is in register list. */
8259 if (insn & (1 << rn))
8260 goto illegal_op;
8261 store_reg(s, rn, addr);
8262 } else {
8263 tcg_temp_free_i32(addr);
8267 break;
8268 case 5:
8270 op = (insn >> 21) & 0xf;
8271 if (op == 6) {
8272 /* Halfword pack. */
8273 tmp = load_reg(s, rn);
8274 tmp2 = load_reg(s, rm);
8275 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8276 if (insn & (1 << 5)) {
8277 /* pkhtb */
8278 if (shift == 0)
8279 shift = 31;
8280 tcg_gen_sari_i32(tmp2, tmp2, shift);
8281 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8282 tcg_gen_ext16u_i32(tmp2, tmp2);
8283 } else {
8284 /* pkhbt */
8285 if (shift)
8286 tcg_gen_shli_i32(tmp2, tmp2, shift);
8287 tcg_gen_ext16u_i32(tmp, tmp);
8288 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8290 tcg_gen_or_i32(tmp, tmp, tmp2);
8291 tcg_temp_free_i32(tmp2);
8292 store_reg(s, rd, tmp);
8293 } else {
8294 /* Data processing register constant shift. */
8295 if (rn == 15) {
8296 tmp = tcg_temp_new_i32();
8297 tcg_gen_movi_i32(tmp, 0);
8298 } else {
8299 tmp = load_reg(s, rn);
8301 tmp2 = load_reg(s, rm);
8303 shiftop = (insn >> 4) & 3;
8304 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8305 conds = (insn & (1 << 20)) != 0;
8306 logic_cc = (conds && thumb2_logic_op(op));
8307 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8308 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8309 goto illegal_op;
8310 tcg_temp_free_i32(tmp2);
8311 if (rd != 15) {
8312 store_reg(s, rd, tmp);
8313 } else {
8314 tcg_temp_free_i32(tmp);
8317 break;
8318 case 13: /* Misc data processing. */
8319 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8320 if (op < 4 && (insn & 0xf000) != 0xf000)
8321 goto illegal_op;
8322 switch (op) {
8323 case 0: /* Register controlled shift. */
8324 tmp = load_reg(s, rn);
8325 tmp2 = load_reg(s, rm);
8326 if ((insn & 0x70) != 0)
8327 goto illegal_op;
8328 op = (insn >> 21) & 3;
8329 logic_cc = (insn & (1 << 20)) != 0;
8330 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8331 if (logic_cc)
8332 gen_logic_CC(tmp);
8333 store_reg_bx(env, s, rd, tmp);
8334 break;
8335 case 1: /* Sign/zero extend. */
8336 tmp = load_reg(s, rm);
8337 shift = (insn >> 4) & 3;
8338 /* ??? In many cases it's not necessary to do a
8339 rotate, a shift is sufficient. */
8340 if (shift != 0)
8341 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8342 op = (insn >> 20) & 7;
8343 switch (op) {
8344 case 0: gen_sxth(tmp); break;
8345 case 1: gen_uxth(tmp); break;
8346 case 2: gen_sxtb16(tmp); break;
8347 case 3: gen_uxtb16(tmp); break;
8348 case 4: gen_sxtb(tmp); break;
8349 case 5: gen_uxtb(tmp); break;
8350 default: goto illegal_op;
8352 if (rn != 15) {
8353 tmp2 = load_reg(s, rn);
8354 if ((op >> 1) == 1) {
8355 gen_add16(tmp, tmp2);
8356 } else {
8357 tcg_gen_add_i32(tmp, tmp, tmp2);
8358 tcg_temp_free_i32(tmp2);
8361 store_reg(s, rd, tmp);
8362 break;
8363 case 2: /* SIMD add/subtract. */
8364 op = (insn >> 20) & 7;
8365 shift = (insn >> 4) & 7;
8366 if ((op & 3) == 3 || (shift & 3) == 3)
8367 goto illegal_op;
8368 tmp = load_reg(s, rn);
8369 tmp2 = load_reg(s, rm);
8370 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8371 tcg_temp_free_i32(tmp2);
8372 store_reg(s, rd, tmp);
8373 break;
8374 case 3: /* Other data processing. */
8375 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8376 if (op < 4) {
8377 /* Saturating add/subtract. */
8378 tmp = load_reg(s, rn);
8379 tmp2 = load_reg(s, rm);
8380 if (op & 1)
8381 gen_helper_double_saturate(tmp, cpu_env, tmp);
8382 if (op & 2)
8383 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
8384 else
8385 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8386 tcg_temp_free_i32(tmp2);
8387 } else {
8388 tmp = load_reg(s, rn);
8389 switch (op) {
8390 case 0x0a: /* rbit */
8391 gen_helper_rbit(tmp, tmp);
8392 break;
8393 case 0x08: /* rev */
8394 tcg_gen_bswap32_i32(tmp, tmp);
8395 break;
8396 case 0x09: /* rev16 */
8397 gen_rev16(tmp);
8398 break;
8399 case 0x0b: /* revsh */
8400 gen_revsh(tmp);
8401 break;
8402 case 0x10: /* sel */
8403 tmp2 = load_reg(s, rm);
8404 tmp3 = tcg_temp_new_i32();
8405 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8406 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8407 tcg_temp_free_i32(tmp3);
8408 tcg_temp_free_i32(tmp2);
8409 break;
8410 case 0x18: /* clz */
8411 gen_helper_clz(tmp, tmp);
8412 break;
8413 default:
8414 goto illegal_op;
8417 store_reg(s, rd, tmp);
8418 break;
8419 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8420 op = (insn >> 4) & 0xf;
8421 tmp = load_reg(s, rn);
8422 tmp2 = load_reg(s, rm);
8423 switch ((insn >> 20) & 7) {
8424 case 0: /* 32 x 32 -> 32 */
8425 tcg_gen_mul_i32(tmp, tmp, tmp2);
8426 tcg_temp_free_i32(tmp2);
8427 if (rs != 15) {
8428 tmp2 = load_reg(s, rs);
8429 if (op)
8430 tcg_gen_sub_i32(tmp, tmp2, tmp);
8431 else
8432 tcg_gen_add_i32(tmp, tmp, tmp2);
8433 tcg_temp_free_i32(tmp2);
8435 break;
8436 case 1: /* 16 x 16 -> 32 */
8437 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8438 tcg_temp_free_i32(tmp2);
8439 if (rs != 15) {
8440 tmp2 = load_reg(s, rs);
8441 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8442 tcg_temp_free_i32(tmp2);
8444 break;
8445 case 2: /* Dual multiply add. */
8446 case 4: /* Dual multiply subtract. */
8447 if (op)
8448 gen_swap_half(tmp2);
8449 gen_smul_dual(tmp, tmp2);
8450 if (insn & (1 << 22)) {
8451 /* This subtraction cannot overflow. */
8452 tcg_gen_sub_i32(tmp, tmp, tmp2);
8453 } else {
8454 /* This addition cannot overflow 32 bits;
8455 * however it may overflow considered as a signed
8456 * operation, in which case we must set the Q flag.
8458 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8460 tcg_temp_free_i32(tmp2);
8461 if (rs != 15)
8463 tmp2 = load_reg(s, rs);
8464 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8465 tcg_temp_free_i32(tmp2);
8467 break;
8468 case 3: /* 32 * 16 -> 32msb */
8469 if (op)
8470 tcg_gen_sari_i32(tmp2, tmp2, 16);
8471 else
8472 gen_sxth(tmp2);
8473 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8474 tcg_gen_shri_i64(tmp64, tmp64, 16);
8475 tmp = tcg_temp_new_i32();
8476 tcg_gen_trunc_i64_i32(tmp, tmp64);
8477 tcg_temp_free_i64(tmp64);
8478 if (rs != 15)
8480 tmp2 = load_reg(s, rs);
8481 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8482 tcg_temp_free_i32(tmp2);
8484 break;
8485 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8486 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8487 if (rs != 15) {
8488 tmp = load_reg(s, rs);
8489 if (insn & (1 << 20)) {
8490 tmp64 = gen_addq_msw(tmp64, tmp);
8491 } else {
8492 tmp64 = gen_subq_msw(tmp64, tmp);
8495 if (insn & (1 << 4)) {
8496 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8498 tcg_gen_shri_i64(tmp64, tmp64, 32);
8499 tmp = tcg_temp_new_i32();
8500 tcg_gen_trunc_i64_i32(tmp, tmp64);
8501 tcg_temp_free_i64(tmp64);
8502 break;
8503 case 7: /* Unsigned sum of absolute differences. */
8504 gen_helper_usad8(tmp, tmp, tmp2);
8505 tcg_temp_free_i32(tmp2);
8506 if (rs != 15) {
8507 tmp2 = load_reg(s, rs);
8508 tcg_gen_add_i32(tmp, tmp, tmp2);
8509 tcg_temp_free_i32(tmp2);
8511 break;
8513 store_reg(s, rd, tmp);
8514 break;
8515 case 6: case 7: /* 64-bit multiply, Divide. */
8516 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8517 tmp = load_reg(s, rn);
8518 tmp2 = load_reg(s, rm);
8519 if ((op & 0x50) == 0x10) {
8520 /* sdiv, udiv */
8521 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
8522 goto illegal_op;
8524 if (op & 0x20)
8525 gen_helper_udiv(tmp, tmp, tmp2);
8526 else
8527 gen_helper_sdiv(tmp, tmp, tmp2);
8528 tcg_temp_free_i32(tmp2);
8529 store_reg(s, rd, tmp);
8530 } else if ((op & 0xe) == 0xc) {
8531 /* Dual multiply accumulate long. */
8532 if (op & 1)
8533 gen_swap_half(tmp2);
8534 gen_smul_dual(tmp, tmp2);
8535 if (op & 0x10) {
8536 tcg_gen_sub_i32(tmp, tmp, tmp2);
8537 } else {
8538 tcg_gen_add_i32(tmp, tmp, tmp2);
8540 tcg_temp_free_i32(tmp2);
8541 /* BUGFIX */
8542 tmp64 = tcg_temp_new_i64();
8543 tcg_gen_ext_i32_i64(tmp64, tmp);
8544 tcg_temp_free_i32(tmp);
8545 gen_addq(s, tmp64, rs, rd);
8546 gen_storeq_reg(s, rs, rd, tmp64);
8547 tcg_temp_free_i64(tmp64);
8548 } else {
8549 if (op & 0x20) {
8550 /* Unsigned 64-bit multiply */
8551 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8552 } else {
8553 if (op & 8) {
8554 /* smlalxy */
8555 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8556 tcg_temp_free_i32(tmp2);
8557 tmp64 = tcg_temp_new_i64();
8558 tcg_gen_ext_i32_i64(tmp64, tmp);
8559 tcg_temp_free_i32(tmp);
8560 } else {
8561 /* Signed 64-bit multiply */
8562 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8565 if (op & 4) {
8566 /* umaal */
8567 gen_addq_lo(s, tmp64, rs);
8568 gen_addq_lo(s, tmp64, rd);
8569 } else if (op & 0x40) {
8570 /* 64-bit accumulate. */
8571 gen_addq(s, tmp64, rs, rd);
8573 gen_storeq_reg(s, rs, rd, tmp64);
8574 tcg_temp_free_i64(tmp64);
8576 break;
8578 break;
8579 case 6: case 7: case 14: case 15:
8580 /* Coprocessor. */
8581 if (((insn >> 24) & 3) == 3) {
8582 /* Translate into the equivalent ARM encoding. */
8583 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8584 if (disas_neon_data_insn(env, s, insn))
8585 goto illegal_op;
8586 } else {
8587 if (insn & (1 << 28))
8588 goto illegal_op;
8589 if (disas_coproc_insn (env, s, insn))
8590 goto illegal_op;
8592 break;
8593 case 8: case 9: case 10: case 11:
8594 if (insn & (1 << 15)) {
8595 /* Branches, misc control. */
8596 if (insn & 0x5000) {
8597 /* Unconditional branch. */
8598 /* signextend(hw1[10:0]) -> offset[:12]. */
8599 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8600 /* hw1[10:0] -> offset[11:1]. */
8601 offset |= (insn & 0x7ff) << 1;
8602 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8603 offset[24:22] already have the same value because of the
8604 sign extension above. */
8605 offset ^= ((~insn) & (1 << 13)) << 10;
8606 offset ^= ((~insn) & (1 << 11)) << 11;
8608 if (insn & (1 << 14)) {
8609 /* Branch and link. */
8610 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8613 offset += s->pc;
8614 if (insn & (1 << 12)) {
8615 /* b/bl */
8616 gen_jmp(s, offset);
8617 } else {
8618 /* blx */
8619 offset &= ~(uint32_t)2;
8620 /* thumb2 bx, no need to check */
8621 gen_bx_im(s, offset);
8623 } else if (((insn >> 23) & 7) == 7) {
8624 /* Misc control */
8625 if (insn & (1 << 13))
8626 goto illegal_op;
8628 if (insn & (1 << 26)) {
8629 /* Secure monitor call (v6Z) */
8630 goto illegal_op; /* not implemented. */
8631 } else {
8632 op = (insn >> 20) & 7;
8633 switch (op) {
8634 case 0: /* msr cpsr. */
8635 if (IS_M(env)) {
8636 tmp = load_reg(s, rn);
8637 addr = tcg_const_i32(insn & 0xff);
8638 gen_helper_v7m_msr(cpu_env, addr, tmp);
8639 tcg_temp_free_i32(addr);
8640 tcg_temp_free_i32(tmp);
8641 gen_lookup_tb(s);
8642 break;
8644 /* fall through */
8645 case 1: /* msr spsr. */
8646 if (IS_M(env))
8647 goto illegal_op;
8648 tmp = load_reg(s, rn);
8649 if (gen_set_psr(s,
8650 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8651 op == 1, tmp))
8652 goto illegal_op;
8653 break;
8654 case 2: /* cps, nop-hint. */
8655 if (((insn >> 8) & 7) == 0) {
8656 gen_nop_hint(s, insn & 0xff);
8658 /* Implemented as NOP in user mode. */
8659 if (IS_USER(s))
8660 break;
8661 offset = 0;
8662 imm = 0;
8663 if (insn & (1 << 10)) {
8664 if (insn & (1 << 7))
8665 offset |= CPSR_A;
8666 if (insn & (1 << 6))
8667 offset |= CPSR_I;
8668 if (insn & (1 << 5))
8669 offset |= CPSR_F;
8670 if (insn & (1 << 9))
8671 imm = CPSR_A | CPSR_I | CPSR_F;
8673 if (insn & (1 << 8)) {
8674 offset |= 0x1f;
8675 imm |= (insn & 0x1f);
8677 if (offset) {
8678 gen_set_psr_im(s, offset, 0, imm);
8680 break;
8681 case 3: /* Special control operations. */
8682 ARCH(7);
8683 op = (insn >> 4) & 0xf;
8684 switch (op) {
8685 case 2: /* clrex */
8686 gen_clrex(s);
8687 break;
8688 case 4: /* dsb */
8689 case 5: /* dmb */
8690 case 6: /* isb */
8691 /* These execute as NOPs. */
8692 break;
8693 default:
8694 goto illegal_op;
8696 break;
8697 case 4: /* bxj */
8698 /* Trivial implementation equivalent to bx. */
8699 tmp = load_reg(s, rn);
8700 gen_bx(s, tmp);
8701 break;
8702 case 5: /* Exception return. */
8703 if (IS_USER(s)) {
8704 goto illegal_op;
8706 if (rn != 14 || rd != 15) {
8707 goto illegal_op;
8709 tmp = load_reg(s, rn);
8710 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8711 gen_exception_return(s, tmp);
8712 break;
8713 case 6: /* mrs cpsr. */
8714 tmp = tcg_temp_new_i32();
8715 if (IS_M(env)) {
8716 addr = tcg_const_i32(insn & 0xff);
8717 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8718 tcg_temp_free_i32(addr);
8719 } else {
8720 gen_helper_cpsr_read(tmp, cpu_env);
8722 store_reg(s, rd, tmp);
8723 break;
8724 case 7: /* mrs spsr. */
8725 /* Not accessible in user mode. */
8726 if (IS_USER(s) || IS_M(env))
8727 goto illegal_op;
8728 tmp = load_cpu_field(spsr);
8729 store_reg(s, rd, tmp);
8730 break;
8733 } else {
8734 /* Conditional branch. */
8735 op = (insn >> 22) & 0xf;
8736 /* Generate a conditional jump to next instruction. */
8737 s->condlabel = gen_new_label();
8738 gen_test_cc(op ^ 1, s->condlabel);
8739 s->condjmp = 1;
8741 /* offset[11:1] = insn[10:0] */
8742 offset = (insn & 0x7ff) << 1;
8743 /* offset[17:12] = insn[21:16]. */
8744 offset |= (insn & 0x003f0000) >> 4;
8745 /* offset[31:20] = insn[26]. */
8746 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8747 /* offset[18] = insn[13]. */
8748 offset |= (insn & (1 << 13)) << 5;
8749 /* offset[19] = insn[11]. */
8750 offset |= (insn & (1 << 11)) << 8;
8752 /* jump to the offset */
8753 gen_jmp(s, s->pc + offset);
8755 } else {
8756 /* Data processing immediate. */
8757 if (insn & (1 << 25)) {
8758 if (insn & (1 << 24)) {
8759 if (insn & (1 << 20))
8760 goto illegal_op;
8761 /* Bitfield/Saturate. */
8762 op = (insn >> 21) & 7;
8763 imm = insn & 0x1f;
8764 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8765 if (rn == 15) {
8766 tmp = tcg_temp_new_i32();
8767 tcg_gen_movi_i32(tmp, 0);
8768 } else {
8769 tmp = load_reg(s, rn);
8771 switch (op) {
8772 case 2: /* Signed bitfield extract. */
8773 imm++;
8774 if (shift + imm > 32)
8775 goto illegal_op;
8776 if (imm < 32)
8777 gen_sbfx(tmp, shift, imm);
8778 break;
8779 case 6: /* Unsigned bitfield extract. */
8780 imm++;
8781 if (shift + imm > 32)
8782 goto illegal_op;
8783 if (imm < 32)
8784 gen_ubfx(tmp, shift, (1u << imm) - 1);
8785 break;
8786 case 3: /* Bitfield insert/clear. */
8787 if (imm < shift)
8788 goto illegal_op;
8789 imm = imm + 1 - shift;
8790 if (imm != 32) {
8791 tmp2 = load_reg(s, rd);
8792 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
8793 tcg_temp_free_i32(tmp2);
8795 break;
8796 case 7:
8797 goto illegal_op;
8798 default: /* Saturate. */
8799 if (shift) {
8800 if (op & 1)
8801 tcg_gen_sari_i32(tmp, tmp, shift);
8802 else
8803 tcg_gen_shli_i32(tmp, tmp, shift);
8805 tmp2 = tcg_const_i32(imm);
8806 if (op & 4) {
8807 /* Unsigned. */
8808 if ((op & 1) && shift == 0)
8809 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8810 else
8811 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8812 } else {
8813 /* Signed. */
8814 if ((op & 1) && shift == 0)
8815 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8816 else
8817 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8819 tcg_temp_free_i32(tmp2);
8820 break;
8822 store_reg(s, rd, tmp);
8823 } else {
8824 imm = ((insn & 0x04000000) >> 15)
8825 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8826 if (insn & (1 << 22)) {
8827 /* 16-bit immediate. */
8828 imm |= (insn >> 4) & 0xf000;
8829 if (insn & (1 << 23)) {
8830 /* movt */
8831 tmp = load_reg(s, rd);
8832 tcg_gen_ext16u_i32(tmp, tmp);
8833 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8834 } else {
8835 /* movw */
8836 tmp = tcg_temp_new_i32();
8837 tcg_gen_movi_i32(tmp, imm);
8839 } else {
8840 /* Add/sub 12-bit immediate. */
8841 if (rn == 15) {
8842 offset = s->pc & ~(uint32_t)3;
8843 if (insn & (1 << 23))
8844 offset -= imm;
8845 else
8846 offset += imm;
8847 tmp = tcg_temp_new_i32();
8848 tcg_gen_movi_i32(tmp, offset);
8849 } else {
8850 tmp = load_reg(s, rn);
8851 if (insn & (1 << 23))
8852 tcg_gen_subi_i32(tmp, tmp, imm);
8853 else
8854 tcg_gen_addi_i32(tmp, tmp, imm);
8857 store_reg(s, rd, tmp);
8859 } else {
8860 int shifter_out = 0;
8861 /* modified 12-bit immediate. */
8862 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8863 imm = (insn & 0xff);
8864 switch (shift) {
8865 case 0: /* XY */
8866 /* Nothing to do. */
8867 break;
8868 case 1: /* 00XY00XY */
8869 imm |= imm << 16;
8870 break;
8871 case 2: /* XY00XY00 */
8872 imm |= imm << 16;
8873 imm <<= 8;
8874 break;
8875 case 3: /* XYXYXYXY */
8876 imm |= imm << 16;
8877 imm |= imm << 8;
8878 break;
8879 default: /* Rotated constant. */
8880 shift = (shift << 1) | (imm >> 7);
8881 imm |= 0x80;
8882 imm = imm << (32 - shift);
8883 shifter_out = 1;
8884 break;
8886 tmp2 = tcg_temp_new_i32();
8887 tcg_gen_movi_i32(tmp2, imm);
8888 rn = (insn >> 16) & 0xf;
8889 if (rn == 15) {
8890 tmp = tcg_temp_new_i32();
8891 tcg_gen_movi_i32(tmp, 0);
8892 } else {
8893 tmp = load_reg(s, rn);
8895 op = (insn >> 21) & 0xf;
8896 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8897 shifter_out, tmp, tmp2))
8898 goto illegal_op;
8899 tcg_temp_free_i32(tmp2);
8900 rd = (insn >> 8) & 0xf;
8901 if (rd != 15) {
8902 store_reg(s, rd, tmp);
8903 } else {
8904 tcg_temp_free_i32(tmp);
8908 break;
8909 case 12: /* Load/store single data item. */
8911 int postinc = 0;
8912 int writeback = 0;
8913 int user;
8914 if ((insn & 0x01100000) == 0x01000000) {
8915 if (disas_neon_ls_insn(env, s, insn))
8916 goto illegal_op;
8917 break;
8919 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8920 if (rs == 15) {
8921 if (!(insn & (1 << 20))) {
8922 goto illegal_op;
8924 if (op != 2) {
8925 /* Byte or halfword load space with dest == r15 : memory hints.
8926 * Catch them early so we don't emit pointless addressing code.
8927 * This space is a mix of:
8928 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8929 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8930 * cores)
8931 * unallocated hints, which must be treated as NOPs
8932 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8933 * which is easiest for the decoding logic
8934 * Some space which must UNDEF
8936 int op1 = (insn >> 23) & 3;
8937 int op2 = (insn >> 6) & 0x3f;
8938 if (op & 2) {
8939 goto illegal_op;
8941 if (rn == 15) {
8942 /* UNPREDICTABLE, unallocated hint or
8943 * PLD/PLDW/PLI (literal)
8945 return 0;
8947 if (op1 & 1) {
8948 return 0; /* PLD/PLDW/PLI or unallocated hint */
8950 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8951 return 0; /* PLD/PLDW/PLI or unallocated hint */
8953 /* UNDEF space, or an UNPREDICTABLE */
8954 return 1;
8957 user = IS_USER(s);
8958 if (rn == 15) {
8959 addr = tcg_temp_new_i32();
8960 /* PC relative. */
8961 /* s->pc has already been incremented by 4. */
8962 imm = s->pc & 0xfffffffc;
8963 if (insn & (1 << 23))
8964 imm += insn & 0xfff;
8965 else
8966 imm -= insn & 0xfff;
8967 tcg_gen_movi_i32(addr, imm);
8968 } else {
8969 addr = load_reg(s, rn);
8970 if (insn & (1 << 23)) {
8971 /* Positive offset. */
8972 imm = insn & 0xfff;
8973 tcg_gen_addi_i32(addr, addr, imm);
8974 } else {
8975 imm = insn & 0xff;
8976 switch ((insn >> 8) & 0xf) {
8977 case 0x0: /* Shifted Register. */
8978 shift = (insn >> 4) & 0xf;
8979 if (shift > 3) {
8980 tcg_temp_free_i32(addr);
8981 goto illegal_op;
8983 tmp = load_reg(s, rm);
8984 if (shift)
8985 tcg_gen_shli_i32(tmp, tmp, shift);
8986 tcg_gen_add_i32(addr, addr, tmp);
8987 tcg_temp_free_i32(tmp);
8988 break;
8989 case 0xc: /* Negative offset. */
8990 tcg_gen_addi_i32(addr, addr, -imm);
8991 break;
8992 case 0xe: /* User privilege. */
8993 tcg_gen_addi_i32(addr, addr, imm);
8994 user = 1;
8995 break;
8996 case 0x9: /* Post-decrement. */
8997 imm = -imm;
8998 /* Fall through. */
8999 case 0xb: /* Post-increment. */
9000 postinc = 1;
9001 writeback = 1;
9002 break;
9003 case 0xd: /* Pre-decrement. */
9004 imm = -imm;
9005 /* Fall through. */
9006 case 0xf: /* Pre-increment. */
9007 tcg_gen_addi_i32(addr, addr, imm);
9008 writeback = 1;
9009 break;
9010 default:
9011 tcg_temp_free_i32(addr);
9012 goto illegal_op;
9016 if (insn & (1 << 20)) {
9017 /* Load. */
9018 switch (op) {
9019 case 0: tmp = gen_ld8u(addr, user); break;
9020 case 4: tmp = gen_ld8s(addr, user); break;
9021 case 1: tmp = gen_ld16u(addr, user); break;
9022 case 5: tmp = gen_ld16s(addr, user); break;
9023 case 2: tmp = gen_ld32(addr, user); break;
9024 default:
9025 tcg_temp_free_i32(addr);
9026 goto illegal_op;
9028 if (rs == 15) {
9029 gen_bx(s, tmp);
9030 } else {
9031 store_reg(s, rs, tmp);
9033 } else {
9034 /* Store. */
9035 tmp = load_reg(s, rs);
9036 switch (op) {
9037 case 0: gen_st8(tmp, addr, user); break;
9038 case 1: gen_st16(tmp, addr, user); break;
9039 case 2: gen_st32(tmp, addr, user); break;
9040 default:
9041 tcg_temp_free_i32(addr);
9042 goto illegal_op;
9045 if (postinc)
9046 tcg_gen_addi_i32(addr, addr, imm);
9047 if (writeback) {
9048 store_reg(s, rn, addr);
9049 } else {
9050 tcg_temp_free_i32(addr);
9053 break;
9054 default:
9055 goto illegal_op;
9057 return 0;
9058 illegal_op:
9059 return 1;
9062 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
9064 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9065 int32_t offset;
9066 int i;
9067 TCGv tmp;
9068 TCGv tmp2;
9069 TCGv addr;
9071 if (s->condexec_mask) {
9072 cond = s->condexec_cond;
9073 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9074 s->condlabel = gen_new_label();
9075 gen_test_cc(cond ^ 1, s->condlabel);
9076 s->condjmp = 1;
9080 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9081 s->pc += 2;
9083 switch (insn >> 12) {
9084 case 0: case 1:
9086 rd = insn & 7;
9087 op = (insn >> 11) & 3;
9088 if (op == 3) {
9089 /* add/subtract */
9090 rn = (insn >> 3) & 7;
9091 tmp = load_reg(s, rn);
9092 if (insn & (1 << 10)) {
9093 /* immediate */
9094 tmp2 = tcg_temp_new_i32();
9095 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
9096 } else {
9097 /* reg */
9098 rm = (insn >> 6) & 7;
9099 tmp2 = load_reg(s, rm);
9101 if (insn & (1 << 9)) {
9102 if (s->condexec_mask)
9103 tcg_gen_sub_i32(tmp, tmp, tmp2);
9104 else
9105 gen_sub_CC(tmp, tmp, tmp2);
9106 } else {
9107 if (s->condexec_mask)
9108 tcg_gen_add_i32(tmp, tmp, tmp2);
9109 else
9110 gen_add_CC(tmp, tmp, tmp2);
9112 tcg_temp_free_i32(tmp2);
9113 store_reg(s, rd, tmp);
9114 } else {
9115 /* shift immediate */
9116 rm = (insn >> 3) & 7;
9117 shift = (insn >> 6) & 0x1f;
9118 tmp = load_reg(s, rm);
9119 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9120 if (!s->condexec_mask)
9121 gen_logic_CC(tmp);
9122 store_reg(s, rd, tmp);
9124 break;
9125 case 2: case 3:
9126 /* arithmetic large immediate */
9127 op = (insn >> 11) & 3;
9128 rd = (insn >> 8) & 0x7;
9129 if (op == 0) { /* mov */
9130 tmp = tcg_temp_new_i32();
9131 tcg_gen_movi_i32(tmp, insn & 0xff);
9132 if (!s->condexec_mask)
9133 gen_logic_CC(tmp);
9134 store_reg(s, rd, tmp);
9135 } else {
9136 tmp = load_reg(s, rd);
9137 tmp2 = tcg_temp_new_i32();
9138 tcg_gen_movi_i32(tmp2, insn & 0xff);
9139 switch (op) {
9140 case 1: /* cmp */
9141 gen_sub_CC(tmp, tmp, tmp2);
9142 tcg_temp_free_i32(tmp);
9143 tcg_temp_free_i32(tmp2);
9144 break;
9145 case 2: /* add */
9146 if (s->condexec_mask)
9147 tcg_gen_add_i32(tmp, tmp, tmp2);
9148 else
9149 gen_add_CC(tmp, tmp, tmp2);
9150 tcg_temp_free_i32(tmp2);
9151 store_reg(s, rd, tmp);
9152 break;
9153 case 3: /* sub */
9154 if (s->condexec_mask)
9155 tcg_gen_sub_i32(tmp, tmp, tmp2);
9156 else
9157 gen_sub_CC(tmp, tmp, tmp2);
9158 tcg_temp_free_i32(tmp2);
9159 store_reg(s, rd, tmp);
9160 break;
9163 break;
9164 case 4:
9165 if (insn & (1 << 11)) {
9166 rd = (insn >> 8) & 7;
9167 /* load pc-relative. Bit 1 of PC is ignored. */
9168 val = s->pc + 2 + ((insn & 0xff) * 4);
9169 val &= ~(uint32_t)2;
9170 addr = tcg_temp_new_i32();
9171 tcg_gen_movi_i32(addr, val);
9172 tmp = gen_ld32(addr, IS_USER(s));
9173 tcg_temp_free_i32(addr);
9174 store_reg(s, rd, tmp);
9175 break;
9177 if (insn & (1 << 10)) {
9178 /* data processing extended or blx */
9179 rd = (insn & 7) | ((insn >> 4) & 8);
9180 rm = (insn >> 3) & 0xf;
9181 op = (insn >> 8) & 3;
9182 switch (op) {
9183 case 0: /* add */
9184 tmp = load_reg(s, rd);
9185 tmp2 = load_reg(s, rm);
9186 tcg_gen_add_i32(tmp, tmp, tmp2);
9187 tcg_temp_free_i32(tmp2);
9188 store_reg(s, rd, tmp);
9189 break;
9190 case 1: /* cmp */
9191 tmp = load_reg(s, rd);
9192 tmp2 = load_reg(s, rm);
9193 gen_sub_CC(tmp, tmp, tmp2);
9194 tcg_temp_free_i32(tmp2);
9195 tcg_temp_free_i32(tmp);
9196 break;
9197 case 2: /* mov/cpy */
9198 tmp = load_reg(s, rm);
9199 store_reg(s, rd, tmp);
9200 break;
9201 case 3:/* branch [and link] exchange thumb register */
9202 tmp = load_reg(s, rm);
9203 if (insn & (1 << 7)) {
9204 ARCH(5);
9205 val = (uint32_t)s->pc | 1;
9206 tmp2 = tcg_temp_new_i32();
9207 tcg_gen_movi_i32(tmp2, val);
9208 store_reg(s, 14, tmp2);
9210 /* already thumb, no need to check */
9211 gen_bx(s, tmp);
9212 break;
9214 break;
9217 /* data processing register */
9218 rd = insn & 7;
9219 rm = (insn >> 3) & 7;
9220 op = (insn >> 6) & 0xf;
9221 if (op == 2 || op == 3 || op == 4 || op == 7) {
9222 /* the shift/rotate ops want the operands backwards */
9223 val = rm;
9224 rm = rd;
9225 rd = val;
9226 val = 1;
9227 } else {
9228 val = 0;
9231 if (op == 9) { /* neg */
9232 tmp = tcg_temp_new_i32();
9233 tcg_gen_movi_i32(tmp, 0);
9234 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9235 tmp = load_reg(s, rd);
9236 } else {
9237 TCGV_UNUSED(tmp);
9240 tmp2 = load_reg(s, rm);
9241 switch (op) {
9242 case 0x0: /* and */
9243 tcg_gen_and_i32(tmp, tmp, tmp2);
9244 if (!s->condexec_mask)
9245 gen_logic_CC(tmp);
9246 break;
9247 case 0x1: /* eor */
9248 tcg_gen_xor_i32(tmp, tmp, tmp2);
9249 if (!s->condexec_mask)
9250 gen_logic_CC(tmp);
9251 break;
9252 case 0x2: /* lsl */
9253 if (s->condexec_mask) {
9254 gen_shl(tmp2, tmp2, tmp);
9255 } else {
9256 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
9257 gen_logic_CC(tmp2);
9259 break;
9260 case 0x3: /* lsr */
9261 if (s->condexec_mask) {
9262 gen_shr(tmp2, tmp2, tmp);
9263 } else {
9264 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
9265 gen_logic_CC(tmp2);
9267 break;
9268 case 0x4: /* asr */
9269 if (s->condexec_mask) {
9270 gen_sar(tmp2, tmp2, tmp);
9271 } else {
9272 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
9273 gen_logic_CC(tmp2);
9275 break;
9276 case 0x5: /* adc */
9277 if (s->condexec_mask) {
9278 gen_adc(tmp, tmp2);
9279 } else {
9280 gen_adc_CC(tmp, tmp, tmp2);
9282 break;
9283 case 0x6: /* sbc */
9284 if (s->condexec_mask) {
9285 gen_sub_carry(tmp, tmp, tmp2);
9286 } else {
9287 gen_sbc_CC(tmp, tmp, tmp2);
9289 break;
9290 case 0x7: /* ror */
9291 if (s->condexec_mask) {
9292 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9293 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9294 } else {
9295 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
9296 gen_logic_CC(tmp2);
9298 break;
9299 case 0x8: /* tst */
9300 tcg_gen_and_i32(tmp, tmp, tmp2);
9301 gen_logic_CC(tmp);
9302 rd = 16;
9303 break;
9304 case 0x9: /* neg */
9305 if (s->condexec_mask)
9306 tcg_gen_neg_i32(tmp, tmp2);
9307 else
9308 gen_sub_CC(tmp, tmp, tmp2);
9309 break;
9310 case 0xa: /* cmp */
9311 gen_sub_CC(tmp, tmp, tmp2);
9312 rd = 16;
9313 break;
9314 case 0xb: /* cmn */
9315 gen_add_CC(tmp, tmp, tmp2);
9316 rd = 16;
9317 break;
9318 case 0xc: /* orr */
9319 tcg_gen_or_i32(tmp, tmp, tmp2);
9320 if (!s->condexec_mask)
9321 gen_logic_CC(tmp);
9322 break;
9323 case 0xd: /* mul */
9324 tcg_gen_mul_i32(tmp, tmp, tmp2);
9325 if (!s->condexec_mask)
9326 gen_logic_CC(tmp);
9327 break;
9328 case 0xe: /* bic */
9329 tcg_gen_andc_i32(tmp, tmp, tmp2);
9330 if (!s->condexec_mask)
9331 gen_logic_CC(tmp);
9332 break;
9333 case 0xf: /* mvn */
9334 tcg_gen_not_i32(tmp2, tmp2);
9335 if (!s->condexec_mask)
9336 gen_logic_CC(tmp2);
9337 val = 1;
9338 rm = rd;
9339 break;
9341 if (rd != 16) {
9342 if (val) {
9343 store_reg(s, rm, tmp2);
9344 if (op != 0xf)
9345 tcg_temp_free_i32(tmp);
9346 } else {
9347 store_reg(s, rd, tmp);
9348 tcg_temp_free_i32(tmp2);
9350 } else {
9351 tcg_temp_free_i32(tmp);
9352 tcg_temp_free_i32(tmp2);
9354 break;
9356 case 5:
9357 /* load/store register offset. */
9358 rd = insn & 7;
9359 rn = (insn >> 3) & 7;
9360 rm = (insn >> 6) & 7;
9361 op = (insn >> 9) & 7;
9362 addr = load_reg(s, rn);
9363 tmp = load_reg(s, rm);
9364 tcg_gen_add_i32(addr, addr, tmp);
9365 tcg_temp_free_i32(tmp);
9367 if (op < 3) /* store */
9368 tmp = load_reg(s, rd);
9370 switch (op) {
9371 case 0: /* str */
9372 gen_st32(tmp, addr, IS_USER(s));
9373 break;
9374 case 1: /* strh */
9375 gen_st16(tmp, addr, IS_USER(s));
9376 break;
9377 case 2: /* strb */
9378 gen_st8(tmp, addr, IS_USER(s));
9379 break;
9380 case 3: /* ldrsb */
9381 tmp = gen_ld8s(addr, IS_USER(s));
9382 break;
9383 case 4: /* ldr */
9384 tmp = gen_ld32(addr, IS_USER(s));
9385 break;
9386 case 5: /* ldrh */
9387 tmp = gen_ld16u(addr, IS_USER(s));
9388 break;
9389 case 6: /* ldrb */
9390 tmp = gen_ld8u(addr, IS_USER(s));
9391 break;
9392 case 7: /* ldrsh */
9393 tmp = gen_ld16s(addr, IS_USER(s));
9394 break;
9396 if (op >= 3) /* load */
9397 store_reg(s, rd, tmp);
9398 tcg_temp_free_i32(addr);
9399 break;
9401 case 6:
9402 /* load/store word immediate offset */
9403 rd = insn & 7;
9404 rn = (insn >> 3) & 7;
9405 addr = load_reg(s, rn);
9406 val = (insn >> 4) & 0x7c;
9407 tcg_gen_addi_i32(addr, addr, val);
9409 if (insn & (1 << 11)) {
9410 /* load */
9411 tmp = gen_ld32(addr, IS_USER(s));
9412 store_reg(s, rd, tmp);
9413 } else {
9414 /* store */
9415 tmp = load_reg(s, rd);
9416 gen_st32(tmp, addr, IS_USER(s));
9418 tcg_temp_free_i32(addr);
9419 break;
9421 case 7:
9422 /* load/store byte immediate offset */
9423 rd = insn & 7;
9424 rn = (insn >> 3) & 7;
9425 addr = load_reg(s, rn);
9426 val = (insn >> 6) & 0x1f;
9427 tcg_gen_addi_i32(addr, addr, val);
9429 if (insn & (1 << 11)) {
9430 /* load */
9431 tmp = gen_ld8u(addr, IS_USER(s));
9432 store_reg(s, rd, tmp);
9433 } else {
9434 /* store */
9435 tmp = load_reg(s, rd);
9436 gen_st8(tmp, addr, IS_USER(s));
9438 tcg_temp_free_i32(addr);
9439 break;
9441 case 8:
9442 /* load/store halfword immediate offset */
9443 rd = insn & 7;
9444 rn = (insn >> 3) & 7;
9445 addr = load_reg(s, rn);
9446 val = (insn >> 5) & 0x3e;
9447 tcg_gen_addi_i32(addr, addr, val);
9449 if (insn & (1 << 11)) {
9450 /* load */
9451 tmp = gen_ld16u(addr, IS_USER(s));
9452 store_reg(s, rd, tmp);
9453 } else {
9454 /* store */
9455 tmp = load_reg(s, rd);
9456 gen_st16(tmp, addr, IS_USER(s));
9458 tcg_temp_free_i32(addr);
9459 break;
9461 case 9:
9462 /* load/store from stack */
9463 rd = (insn >> 8) & 7;
9464 addr = load_reg(s, 13);
9465 val = (insn & 0xff) * 4;
9466 tcg_gen_addi_i32(addr, addr, val);
9468 if (insn & (1 << 11)) {
9469 /* load */
9470 tmp = gen_ld32(addr, IS_USER(s));
9471 store_reg(s, rd, tmp);
9472 } else {
9473 /* store */
9474 tmp = load_reg(s, rd);
9475 gen_st32(tmp, addr, IS_USER(s));
9477 tcg_temp_free_i32(addr);
9478 break;
9480 case 10:
9481 /* add to high reg */
9482 rd = (insn >> 8) & 7;
9483 if (insn & (1 << 11)) {
9484 /* SP */
9485 tmp = load_reg(s, 13);
9486 } else {
9487 /* PC. bit 1 is ignored. */
9488 tmp = tcg_temp_new_i32();
9489 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9491 val = (insn & 0xff) * 4;
9492 tcg_gen_addi_i32(tmp, tmp, val);
9493 store_reg(s, rd, tmp);
9494 break;
9496 case 11:
9497 /* misc */
9498 op = (insn >> 8) & 0xf;
9499 switch (op) {
9500 case 0:
9501 /* adjust stack pointer */
9502 tmp = load_reg(s, 13);
9503 val = (insn & 0x7f) * 4;
9504 if (insn & (1 << 7))
9505 val = -(int32_t)val;
9506 tcg_gen_addi_i32(tmp, tmp, val);
9507 store_reg(s, 13, tmp);
9508 break;
9510 case 2: /* sign/zero extend. */
9511 ARCH(6);
9512 rd = insn & 7;
9513 rm = (insn >> 3) & 7;
9514 tmp = load_reg(s, rm);
9515 switch ((insn >> 6) & 3) {
9516 case 0: gen_sxth(tmp); break;
9517 case 1: gen_sxtb(tmp); break;
9518 case 2: gen_uxth(tmp); break;
9519 case 3: gen_uxtb(tmp); break;
9521 store_reg(s, rd, tmp);
9522 break;
9523 case 4: case 5: case 0xc: case 0xd:
9524 /* push/pop */
9525 addr = load_reg(s, 13);
9526 if (insn & (1 << 8))
9527 offset = 4;
9528 else
9529 offset = 0;
9530 for (i = 0; i < 8; i++) {
9531 if (insn & (1 << i))
9532 offset += 4;
9534 if ((insn & (1 << 11)) == 0) {
9535 tcg_gen_addi_i32(addr, addr, -offset);
9537 for (i = 0; i < 8; i++) {
9538 if (insn & (1 << i)) {
9539 if (insn & (1 << 11)) {
9540 /* pop */
9541 tmp = gen_ld32(addr, IS_USER(s));
9542 store_reg(s, i, tmp);
9543 } else {
9544 /* push */
9545 tmp = load_reg(s, i);
9546 gen_st32(tmp, addr, IS_USER(s));
9548 /* advance to the next address. */
9549 tcg_gen_addi_i32(addr, addr, 4);
9552 TCGV_UNUSED(tmp);
9553 if (insn & (1 << 8)) {
9554 if (insn & (1 << 11)) {
9555 /* pop pc */
9556 tmp = gen_ld32(addr, IS_USER(s));
9557 /* don't set the pc until the rest of the instruction
9558 has completed */
9559 } else {
9560 /* push lr */
9561 tmp = load_reg(s, 14);
9562 gen_st32(tmp, addr, IS_USER(s));
9564 tcg_gen_addi_i32(addr, addr, 4);
9566 if ((insn & (1 << 11)) == 0) {
9567 tcg_gen_addi_i32(addr, addr, -offset);
9569 /* write back the new stack pointer */
9570 store_reg(s, 13, addr);
9571 /* set the new PC value */
9572 if ((insn & 0x0900) == 0x0900) {
9573 store_reg_from_load(env, s, 15, tmp);
9575 break;
9577 case 1: case 3: case 9: case 11: /* czb */
9578 rm = insn & 7;
9579 tmp = load_reg(s, rm);
9580 s->condlabel = gen_new_label();
9581 s->condjmp = 1;
9582 if (insn & (1 << 11))
9583 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9584 else
9585 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9586 tcg_temp_free_i32(tmp);
9587 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9588 val = (uint32_t)s->pc + 2;
9589 val += offset;
9590 gen_jmp(s, val);
9591 break;
9593 case 15: /* IT, nop-hint. */
9594 if ((insn & 0xf) == 0) {
9595 gen_nop_hint(s, (insn >> 4) & 0xf);
9596 break;
9598 /* If Then. */
9599 s->condexec_cond = (insn >> 4) & 0xe;
9600 s->condexec_mask = insn & 0x1f;
9601 /* No actual code generated for this insn, just setup state. */
9602 break;
9604 case 0xe: /* bkpt */
9605 ARCH(5);
9606 gen_exception_insn(s, 2, EXCP_BKPT);
9607 break;
9609 case 0xa: /* rev */
9610 ARCH(6);
9611 rn = (insn >> 3) & 0x7;
9612 rd = insn & 0x7;
9613 tmp = load_reg(s, rn);
9614 switch ((insn >> 6) & 3) {
9615 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9616 case 1: gen_rev16(tmp); break;
9617 case 3: gen_revsh(tmp); break;
9618 default: goto illegal_op;
9620 store_reg(s, rd, tmp);
9621 break;
9623 case 6:
9624 switch ((insn >> 5) & 7) {
9625 case 2:
9626 /* setend */
9627 ARCH(6);
9628 if (((insn >> 3) & 1) != s->bswap_code) {
9629 /* Dynamic endianness switching not implemented. */
9630 goto illegal_op;
9632 break;
9633 case 3:
9634 /* cps */
9635 ARCH(6);
9636 if (IS_USER(s)) {
9637 break;
9639 if (IS_M(env)) {
9640 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9641 /* FAULTMASK */
9642 if (insn & 1) {
9643 addr = tcg_const_i32(19);
9644 gen_helper_v7m_msr(cpu_env, addr, tmp);
9645 tcg_temp_free_i32(addr);
9647 /* PRIMASK */
9648 if (insn & 2) {
9649 addr = tcg_const_i32(16);
9650 gen_helper_v7m_msr(cpu_env, addr, tmp);
9651 tcg_temp_free_i32(addr);
9653 tcg_temp_free_i32(tmp);
9654 gen_lookup_tb(s);
9655 } else {
9656 if (insn & (1 << 4)) {
9657 shift = CPSR_A | CPSR_I | CPSR_F;
9658 } else {
9659 shift = 0;
9661 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9663 break;
9664 default:
9665 goto undef;
9667 break;
9669 default:
9670 goto undef;
9672 break;
9674 case 12:
9676 /* load/store multiple */
9677 TCGv loaded_var;
9678 TCGV_UNUSED(loaded_var);
9679 rn = (insn >> 8) & 0x7;
9680 addr = load_reg(s, rn);
9681 for (i = 0; i < 8; i++) {
9682 if (insn & (1 << i)) {
9683 if (insn & (1 << 11)) {
9684 /* load */
9685 tmp = gen_ld32(addr, IS_USER(s));
9686 if (i == rn) {
9687 loaded_var = tmp;
9688 } else {
9689 store_reg(s, i, tmp);
9691 } else {
9692 /* store */
9693 tmp = load_reg(s, i);
9694 gen_st32(tmp, addr, IS_USER(s));
9696 /* advance to the next address */
9697 tcg_gen_addi_i32(addr, addr, 4);
9700 if ((insn & (1 << rn)) == 0) {
9701 /* base reg not in list: base register writeback */
9702 store_reg(s, rn, addr);
9703 } else {
9704 /* base reg in list: if load, complete it now */
9705 if (insn & (1 << 11)) {
9706 store_reg(s, rn, loaded_var);
9708 tcg_temp_free_i32(addr);
9710 break;
9712 case 13:
9713 /* conditional branch or swi */
9714 cond = (insn >> 8) & 0xf;
9715 if (cond == 0xe)
9716 goto undef;
9718 if (cond == 0xf) {
9719 /* swi */
9720 gen_set_pc_im(s->pc);
9721 s->is_jmp = DISAS_SWI;
9722 break;
9724 /* generate a conditional jump to next instruction */
9725 s->condlabel = gen_new_label();
9726 gen_test_cc(cond ^ 1, s->condlabel);
9727 s->condjmp = 1;
9729 /* jump to the offset */
9730 val = (uint32_t)s->pc + 2;
9731 offset = ((int32_t)insn << 24) >> 24;
9732 val += offset << 1;
9733 gen_jmp(s, val);
9734 break;
9736 case 14:
9737 if (insn & (1 << 11)) {
9738 if (disas_thumb2_insn(env, s, insn))
9739 goto undef32;
9740 break;
9742 /* unconditional branch */
9743 val = (uint32_t)s->pc;
9744 offset = ((int32_t)insn << 21) >> 21;
9745 val += (offset << 1) + 2;
9746 gen_jmp(s, val);
9747 break;
9749 case 15:
9750 if (disas_thumb2_insn(env, s, insn))
9751 goto undef32;
9752 break;
9754 return;
9755 undef32:
9756 gen_exception_insn(s, 4, EXCP_UDEF);
9757 return;
9758 illegal_op:
9759 undef:
9760 gen_exception_insn(s, 2, EXCP_UDEF);
9763 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9764 basic block 'tb'. If search_pc is TRUE, also generate PC
9765 information for each intermediate instruction. */
9766 static inline void gen_intermediate_code_internal(CPUARMState *env,
9767 TranslationBlock *tb,
9768 int search_pc)
9770 DisasContext dc1, *dc = &dc1;
9771 CPUBreakpoint *bp;
9772 uint16_t *gen_opc_end;
9773 int j, lj;
9774 target_ulong pc_start;
9775 uint32_t next_page_start;
9776 int num_insns;
9777 int max_insns;
9779 /* generate intermediate code */
9780 pc_start = tb->pc;
9782 dc->tb = tb;
9784 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
9786 dc->is_jmp = DISAS_NEXT;
9787 dc->pc = pc_start;
9788 dc->singlestep_enabled = env->singlestep_enabled;
9789 dc->condjmp = 0;
9790 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9791 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
9792 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9793 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9794 #if !defined(CONFIG_USER_ONLY)
9795 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9796 #endif
9797 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9798 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9799 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9800 cpu_F0s = tcg_temp_new_i32();
9801 cpu_F1s = tcg_temp_new_i32();
9802 cpu_F0d = tcg_temp_new_i64();
9803 cpu_F1d = tcg_temp_new_i64();
9804 cpu_V0 = cpu_F0d;
9805 cpu_V1 = cpu_F1d;
9806 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9807 cpu_M0 = tcg_temp_new_i64();
9808 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9809 lj = -1;
9810 num_insns = 0;
9811 max_insns = tb->cflags & CF_COUNT_MASK;
9812 if (max_insns == 0)
9813 max_insns = CF_COUNT_MASK;
9815 gen_tb_start();
9817 tcg_clear_temp_count();
9819 /* A note on handling of the condexec (IT) bits:
9821 * We want to avoid the overhead of having to write the updated condexec
9822 * bits back to the CPUARMState for every instruction in an IT block. So:
9823 * (1) if the condexec bits are not already zero then we write
9824 * zero back into the CPUARMState now. This avoids complications trying
9825 * to do it at the end of the block. (For example if we don't do this
9826 * it's hard to identify whether we can safely skip writing condexec
9827 * at the end of the TB, which we definitely want to do for the case
9828 * where a TB doesn't do anything with the IT state at all.)
9829 * (2) if we are going to leave the TB then we call gen_set_condexec()
9830 * which will write the correct value into CPUARMState if zero is wrong.
9831 * This is done both for leaving the TB at the end, and for leaving
9832 * it because of an exception we know will happen, which is done in
9833 * gen_exception_insn(). The latter is necessary because we need to
9834 * leave the TB with the PC/IT state just prior to execution of the
9835 * instruction which caused the exception.
9836 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9837 * then the CPUARMState will be wrong and we need to reset it.
9838 * This is handled in the same way as restoration of the
9839 * PC in these situations: we will be called again with search_pc=1
9840 * and generate a mapping of the condexec bits for each PC in
9841 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9842 * this to restore the condexec bits.
9844 * Note that there are no instructions which can read the condexec
9845 * bits, and none which can write non-static values to them, so
9846 * we don't need to care about whether CPUARMState is correct in the
9847 * middle of a TB.
9850 /* Reset the conditional execution bits immediately. This avoids
9851 complications trying to do it at the end of the block. */
9852 if (dc->condexec_mask || dc->condexec_cond)
9854 TCGv tmp = tcg_temp_new_i32();
9855 tcg_gen_movi_i32(tmp, 0);
9856 store_cpu_field(tmp, condexec_bits);
9858 do {
9859 #ifdef CONFIG_USER_ONLY
9860 /* Intercept jump to the magic kernel page. */
9861 if (dc->pc >= 0xffff0000) {
9862 /* We always get here via a jump, so know we are not in a
9863 conditional execution block. */
9864 gen_exception(EXCP_KERNEL_TRAP);
9865 dc->is_jmp = DISAS_UPDATE;
9866 break;
9868 #else
9869 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9870 /* We always get here via a jump, so know we are not in a
9871 conditional execution block. */
9872 gen_exception(EXCP_EXCEPTION_EXIT);
9873 dc->is_jmp = DISAS_UPDATE;
9874 break;
9876 #endif
9878 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9879 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9880 if (bp->pc == dc->pc) {
9881 gen_exception_insn(dc, 0, EXCP_DEBUG);
9882 /* Advance PC so that clearing the breakpoint will
9883 invalidate this TB. */
9884 dc->pc += 2;
9885 goto done_generating;
9886 break;
9890 if (search_pc) {
9891 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
9892 if (lj < j) {
9893 lj++;
9894 while (lj < j)
9895 tcg_ctx.gen_opc_instr_start[lj++] = 0;
9897 tcg_ctx.gen_opc_pc[lj] = dc->pc;
9898 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9899 tcg_ctx.gen_opc_instr_start[lj] = 1;
9900 tcg_ctx.gen_opc_icount[lj] = num_insns;
9903 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9904 gen_io_start();
9906 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
9907 tcg_gen_debug_insn_start(dc->pc);
9910 if (dc->thumb) {
9911 disas_thumb_insn(env, dc);
9912 if (dc->condexec_mask) {
9913 dc->condexec_cond = (dc->condexec_cond & 0xe)
9914 | ((dc->condexec_mask >> 4) & 1);
9915 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9916 if (dc->condexec_mask == 0) {
9917 dc->condexec_cond = 0;
9920 } else {
9921 disas_arm_insn(env, dc);
9924 if (dc->condjmp && !dc->is_jmp) {
9925 gen_set_label(dc->condlabel);
9926 dc->condjmp = 0;
9929 if (tcg_check_temp_count()) {
9930 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9933 /* Translation stops when a conditional branch is encountered.
9934 * Otherwise the subsequent code could get translated several times.
9935 * Also stop translation when a page boundary is reached. This
9936 * ensures prefetch aborts occur at the right place. */
9937 num_insns ++;
9938 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
9939 !env->singlestep_enabled &&
9940 !singlestep &&
9941 dc->pc < next_page_start &&
9942 num_insns < max_insns);
9944 if (tb->cflags & CF_LAST_IO) {
9945 if (dc->condjmp) {
9946 /* FIXME: This can theoretically happen with self-modifying
9947 code. */
9948 cpu_abort(env, "IO on conditional branch instruction");
9950 gen_io_end();
9953 /* At this stage dc->condjmp will only be set when the skipped
9954 instruction was a conditional branch or trap, and the PC has
9955 already been written. */
9956 if (unlikely(env->singlestep_enabled)) {
9957 /* Make sure the pc is updated, and raise a debug exception. */
9958 if (dc->condjmp) {
9959 gen_set_condexec(dc);
9960 if (dc->is_jmp == DISAS_SWI) {
9961 gen_exception(EXCP_SWI);
9962 } else {
9963 gen_exception(EXCP_DEBUG);
9965 gen_set_label(dc->condlabel);
9967 if (dc->condjmp || !dc->is_jmp) {
9968 gen_set_pc_im(dc->pc);
9969 dc->condjmp = 0;
9971 gen_set_condexec(dc);
9972 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9973 gen_exception(EXCP_SWI);
9974 } else {
9975 /* FIXME: Single stepping a WFI insn will not halt
9976 the CPU. */
9977 gen_exception(EXCP_DEBUG);
9979 } else {
9980 /* While branches must always occur at the end of an IT block,
9981 there are a few other things that can cause us to terminate
9982 the TB in the middle of an IT block:
9983 - Exception generating instructions (bkpt, swi, undefined).
9984 - Page boundaries.
9985 - Hardware watchpoints.
9986 Hardware breakpoints have already been handled and skip this code.
9988 gen_set_condexec(dc);
9989 switch(dc->is_jmp) {
9990 case DISAS_NEXT:
9991 gen_goto_tb(dc, 1, dc->pc);
9992 break;
9993 default:
9994 case DISAS_JUMP:
9995 case DISAS_UPDATE:
9996 /* indicate that the hash table must be used to find the next TB */
9997 tcg_gen_exit_tb(0);
9998 break;
9999 case DISAS_TB_JUMP:
10000 /* nothing more to generate */
10001 break;
10002 case DISAS_WFI:
10003 gen_helper_wfi(cpu_env);
10004 break;
10005 case DISAS_SWI:
10006 gen_exception(EXCP_SWI);
10007 break;
10009 if (dc->condjmp) {
10010 gen_set_label(dc->condlabel);
10011 gen_set_condexec(dc);
10012 gen_goto_tb(dc, 1, dc->pc);
10013 dc->condjmp = 0;
10017 done_generating:
10018 gen_tb_end(tb, num_insns);
10019 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
10021 #ifdef DEBUG_DISAS
10022 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
10023 qemu_log("----------------\n");
10024 qemu_log("IN: %s\n", lookup_symbol(pc_start));
10025 log_target_disas(env, pc_start, dc->pc - pc_start,
10026 dc->thumb | (dc->bswap_code << 1));
10027 qemu_log("\n");
10029 #endif
10030 if (search_pc) {
10031 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
10032 lj++;
10033 while (lj <= j)
10034 tcg_ctx.gen_opc_instr_start[lj++] = 0;
10035 } else {
10036 tb->size = dc->pc - pc_start;
10037 tb->icount = num_insns;
10041 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
10043 gen_intermediate_code_internal(env, tb, 0);
10046 void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
10048 gen_intermediate_code_internal(env, tb, 1);
10051 static const char *cpu_mode_names[16] = {
10052 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10053 "???", "???", "???", "und", "???", "???", "???", "sys"
10056 void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
10057 int flags)
10059 int i;
10060 uint32_t psr;
10062 for(i=0;i<16;i++) {
10063 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
10064 if ((i % 4) == 3)
10065 cpu_fprintf(f, "\n");
10066 else
10067 cpu_fprintf(f, " ");
10069 psr = cpsr_read(env);
10070 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10071 psr,
10072 psr & (1 << 31) ? 'N' : '-',
10073 psr & (1 << 30) ? 'Z' : '-',
10074 psr & (1 << 29) ? 'C' : '-',
10075 psr & (1 << 28) ? 'V' : '-',
10076 psr & CPSR_T ? 'T' : 'A',
10077 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
10079 if (flags & CPU_DUMP_FPU) {
10080 int numvfpregs = 0;
10081 if (arm_feature(env, ARM_FEATURE_VFP)) {
10082 numvfpregs += 16;
10084 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10085 numvfpregs += 16;
10087 for (i = 0; i < numvfpregs; i++) {
10088 uint64_t v = float64_val(env->vfp.regs[i]);
10089 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10090 i * 2, (uint32_t)v,
10091 i * 2 + 1, (uint32_t)(v >> 32),
10092 i, v);
10094 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
10098 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
10100 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
10101 env->condexec_bits = gen_opc_condexec_bits[pc_pos];