Merge commit 'afb63ebd0a9599312c27ecceb839a399740e00ef' into upstream-merge
[qemu-kvm.git] / target-arm / translate.c
blob5fded491ec2bbd769cbe91f427a2c8b18b0766d0
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "disas.h"
29 #include "tcg-op.h"
30 #include "qemu-log.h"
32 #include "helper.h"
33 #define GEN_HELPER 1
34 #include "helper.h"
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48 /* internal defines */
49 typedef struct DisasContext {
50 target_ulong pc;
51 int is_jmp;
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
56 /* Thumb-2 conditional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
59 struct TranslationBlock *tb;
60 int singlestep_enabled;
61 int thumb;
62 int bswap_code;
63 #if !defined(CONFIG_USER_ONLY)
64 int user;
65 #endif
66 int vfp_enabled;
67 int vec_len;
68 int vec_stride;
69 } DisasContext;
71 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
73 #if defined(CONFIG_USER_ONLY)
74 #define IS_USER(s) 1
75 #else
76 #define IS_USER(s) (s->user)
77 #endif
79 /* These instructions trap after executing, so defer them until after the
80 conditional execution state has been updated. */
81 #define DISAS_WFI 4
82 #define DISAS_SWI 5
84 static TCGv_ptr cpu_env;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
87 static TCGv_i32 cpu_R[16];
88 static TCGv_i32 cpu_exclusive_addr;
89 static TCGv_i32 cpu_exclusive_val;
90 static TCGv_i32 cpu_exclusive_high;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test;
93 static TCGv_i32 cpu_exclusive_info;
94 #endif
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s, cpu_F1s;
98 static TCGv_i64 cpu_F0d, cpu_F1d;
100 #include "gen-icount.h"
102 static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
109 int i;
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUARMState, regs[i]),
116 regnames[i]);
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUARMState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUARMState, exclusive_high), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUARMState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUARMState, exclusive_info), "exclusive_info");
129 #endif
131 #define GEN_HELPER 2
132 #include "helper.h"
135 static inline TCGv load_cpu_offset(int offset)
137 TCGv tmp = tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
144 static inline void store_cpu_offset(TCGv var, int offset)
146 tcg_gen_st_i32(var, cpu_env, offset);
147 tcg_temp_free_i32(var);
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUARMState, name))
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext *s, TCGv var, int reg)
156 if (reg == 15) {
157 uint32_t addr;
158 /* normally, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
165 tcg_gen_mov_i32(var, cpu_R[reg]);
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv load_reg(DisasContext *s, int reg)
172 TCGv tmp = tcg_temp_new_i32();
173 load_reg_var(s, tmp, reg);
174 return tmp;
177 /* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179 static void store_reg(DisasContext *s, int reg, TCGv var)
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
185 tcg_gen_mov_i32(cpu_R[reg], var);
186 tcg_temp_free_i32(var);
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
199 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
208 static void gen_exception(int excp)
210 TCGv tmp = tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(cpu_env, tmp);
213 tcg_temp_free_i32(tmp);
216 static void gen_smul_dual(TCGv a, TCGv b)
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
223 tcg_temp_free_i32(tmp2);
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
228 tcg_temp_free_i32(tmp1);
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var)
234 TCGv tmp = tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
240 tcg_temp_free_i32(tmp);
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var)
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
254 if (shift)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var, int shift, int width)
262 uint32_t signbit;
264 if (shift)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
277 tcg_gen_andi_i32(val, val, mask);
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
280 tcg_gen_or_i32(dest, base, val);
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
288 tcg_gen_extu_i32_i64(tmp64, b);
289 tcg_temp_free_i32(b);
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
293 tcg_temp_free_i64(tmp64);
294 return a;
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
302 tcg_gen_extu_i32_i64(tmp64, b);
303 tcg_temp_free_i32(b);
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
307 tcg_temp_free_i64(tmp64);
308 return a;
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1, a);
320 tcg_temp_free_i32(a);
321 tcg_gen_extu_i32_i64(tmp2, b);
322 tcg_temp_free_i32(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 tcg_temp_free_i64(tmp2);
325 return tmp1;
328 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
333 tcg_gen_ext_i32_i64(tmp1, a);
334 tcg_temp_free_i32(a);
335 tcg_gen_ext_i32_i64(tmp2, b);
336 tcg_temp_free_i32(b);
337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
338 tcg_temp_free_i64(tmp2);
339 return tmp1;
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var)
345 TCGv tmp = tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
349 tcg_temp_free_i32(tmp);
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
354 t0 &= ~0x8000;
355 t1 &= ~0x8000;
356 t0 = (t0 + t1) ^ tmp;
359 static void gen_add16(TCGv t0, TCGv t1)
361 TCGv tmp = tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF))
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var)
377 TCGv tmp = tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp, var, 31);
379 gen_set_CF(tmp);
380 tcg_temp_free_i32(tmp);
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var)
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, ZF));
390 /* T0 += T1 + CF. */
391 static void gen_adc(TCGv t0, TCGv t1)
393 TCGv tmp;
394 tcg_gen_add_i32(t0, t0, t1);
395 tmp = load_cpu_field(CF);
396 tcg_gen_add_i32(t0, t0, tmp);
397 tcg_temp_free_i32(tmp);
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
403 TCGv tmp;
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
407 tcg_temp_free_i32(tmp);
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
413 TCGv tmp;
414 tcg_gen_sub_i32(dest, t0, t1);
415 tmp = load_cpu_field(CF);
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
418 tcg_temp_free_i32(tmp);
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
424 static void shifter_out_im(TCGv var, int shift)
426 TCGv tmp = tcg_temp_new_i32();
427 if (shift == 0) {
428 tcg_gen_andi_i32(tmp, var, 1);
429 } else {
430 tcg_gen_shri_i32(tmp, var, shift);
431 if (shift != 31)
432 tcg_gen_andi_i32(tmp, tmp, 1);
434 gen_set_CF(tmp);
435 tcg_temp_free_i32(tmp);
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
441 switch (shiftop) {
442 case 0: /* LSL */
443 if (shift != 0) {
444 if (flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
448 break;
449 case 1: /* LSR */
450 if (shift == 0) {
451 if (flags) {
452 tcg_gen_shri_i32(var, var, 31);
453 gen_set_CF(var);
455 tcg_gen_movi_i32(var, 0);
456 } else {
457 if (flags)
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
461 break;
462 case 2: /* ASR */
463 if (shift == 0)
464 shift = 32;
465 if (flags)
466 shifter_out_im(var, shift - 1);
467 if (shift == 32)
468 shift = 31;
469 tcg_gen_sari_i32(var, var, shift);
470 break;
471 case 3: /* ROR/RRX */
472 if (shift != 0) {
473 if (flags)
474 shifter_out_im(var, shift - 1);
475 tcg_gen_rotri_i32(var, var, shift); break;
476 } else {
477 TCGv tmp = load_cpu_field(CF);
478 if (flags)
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
483 tcg_temp_free_i32(tmp);
488 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
491 if (flags) {
492 switch (shiftop) {
493 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
494 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
495 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
496 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
498 } else {
499 switch (shiftop) {
500 case 0: gen_helper_shl(var, cpu_env, var, shift); break;
501 case 1: gen_helper_shr(var, cpu_env, var, shift); break;
502 case 2: gen_helper_sar(var, cpu_env, var, shift); break;
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
507 tcg_temp_free_i32(shift);
510 #define PAS_OP(pfx) \
511 switch (op2) { \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
519 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
521 TCGv_ptr tmp;
523 switch (op1) {
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 case 1:
526 tmp = tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
528 PAS_OP(s)
529 tcg_temp_free_ptr(tmp);
530 break;
531 case 5:
532 tmp = tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
534 PAS_OP(u)
535 tcg_temp_free_ptr(tmp);
536 break;
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
539 case 2:
540 PAS_OP(q);
541 break;
542 case 3:
543 PAS_OP(sh);
544 break;
545 case 6:
546 PAS_OP(uq);
547 break;
548 case 7:
549 PAS_OP(uh);
550 break;
551 #undef gen_pas_helper
554 #undef PAS_OP
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
558 switch (op1) { \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
566 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
568 TCGv_ptr tmp;
570 switch (op2) {
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 0:
573 tmp = tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
575 PAS_OP(s)
576 tcg_temp_free_ptr(tmp);
577 break;
578 case 4:
579 tmp = tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
581 PAS_OP(u)
582 tcg_temp_free_ptr(tmp);
583 break;
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 1:
587 PAS_OP(q);
588 break;
589 case 2:
590 PAS_OP(sh);
591 break;
592 case 5:
593 PAS_OP(uq);
594 break;
595 case 6:
596 PAS_OP(uh);
597 break;
598 #undef gen_pas_helper
601 #undef PAS_OP
603 static void gen_test_cc(int cc, int label)
605 TCGv tmp;
606 TCGv tmp2;
607 int inv;
609 switch (cc) {
610 case 0: /* eq: Z */
611 tmp = load_cpu_field(ZF);
612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
613 break;
614 case 1: /* ne: !Z */
615 tmp = load_cpu_field(ZF);
616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
617 break;
618 case 2: /* cs: C */
619 tmp = load_cpu_field(CF);
620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
621 break;
622 case 3: /* cc: !C */
623 tmp = load_cpu_field(CF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 4: /* mi: N */
627 tmp = load_cpu_field(NF);
628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
629 break;
630 case 5: /* pl: !N */
631 tmp = load_cpu_field(NF);
632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
633 break;
634 case 6: /* vs: V */
635 tmp = load_cpu_field(VF);
636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
637 break;
638 case 7: /* vc: !V */
639 tmp = load_cpu_field(VF);
640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
641 break;
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
646 tcg_temp_free_i32(tmp);
647 tmp = load_cpu_field(ZF);
648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
649 gen_set_label(inv);
650 break;
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
654 tcg_temp_free_i32(tmp);
655 tmp = load_cpu_field(ZF);
656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
657 break;
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
660 tmp2 = load_cpu_field(NF);
661 tcg_gen_xor_i32(tmp, tmp, tmp2);
662 tcg_temp_free_i32(tmp2);
663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
664 break;
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
667 tmp2 = load_cpu_field(NF);
668 tcg_gen_xor_i32(tmp, tmp, tmp2);
669 tcg_temp_free_i32(tmp2);
670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
671 break;
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
674 tmp = load_cpu_field(ZF);
675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
676 tcg_temp_free_i32(tmp);
677 tmp = load_cpu_field(VF);
678 tmp2 = load_cpu_field(NF);
679 tcg_gen_xor_i32(tmp, tmp, tmp2);
680 tcg_temp_free_i32(tmp2);
681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
682 gen_set_label(inv);
683 break;
684 case 13: /* le: Z || N != V */
685 tmp = load_cpu_field(ZF);
686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
687 tcg_temp_free_i32(tmp);
688 tmp = load_cpu_field(VF);
689 tmp2 = load_cpu_field(NF);
690 tcg_gen_xor_i32(tmp, tmp, tmp2);
691 tcg_temp_free_i32(tmp2);
692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
693 break;
694 default:
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
696 abort();
698 tcg_temp_free_i32(tmp);
701 static const uint8_t table_logic_cc[16] = {
702 1, /* and */
703 1, /* xor */
704 0, /* sub */
705 0, /* rsb */
706 0, /* add */
707 0, /* adc */
708 0, /* sbc */
709 0, /* rsc */
710 1, /* andl */
711 1, /* xorl */
712 0, /* cmp */
713 0, /* cmn */
714 1, /* orr */
715 1, /* mov */
716 1, /* bic */
717 1, /* mvn */
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
723 TCGv tmp;
725 s->is_jmp = DISAS_UPDATE;
726 if (s->thumb != (addr & 1)) {
727 tmp = tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
730 tcg_temp_free_i32(tmp);
732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext *s, TCGv var)
738 s->is_jmp = DISAS_UPDATE;
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
748 int reg, TCGv var)
750 if (reg == 15 && ENABLE_ARCH_7) {
751 gen_bx(s, var);
752 } else {
753 store_reg(s, reg, var);
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
762 int reg, TCGv var)
764 if (reg == 15 && ENABLE_ARCH_5) {
765 gen_bx(s, var);
766 } else {
767 store_reg(s, reg, var);
771 static inline TCGv gen_ld8s(TCGv addr, int index)
773 TCGv tmp = tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp, addr, index);
775 return tmp;
777 static inline TCGv gen_ld8u(TCGv addr, int index)
779 TCGv tmp = tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp, addr, index);
781 return tmp;
783 static inline TCGv gen_ld16s(TCGv addr, int index)
785 TCGv tmp = tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp, addr, index);
787 return tmp;
789 static inline TCGv gen_ld16u(TCGv addr, int index)
791 TCGv tmp = tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp, addr, index);
793 return tmp;
795 static inline TCGv gen_ld32(TCGv addr, int index)
797 TCGv tmp = tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp, addr, index);
799 return tmp;
801 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
805 return tmp;
807 static inline void gen_st8(TCGv val, TCGv addr, int index)
809 tcg_gen_qemu_st8(val, addr, index);
810 tcg_temp_free_i32(val);
812 static inline void gen_st16(TCGv val, TCGv addr, int index)
814 tcg_gen_qemu_st16(val, addr, index);
815 tcg_temp_free_i32(val);
817 static inline void gen_st32(TCGv val, TCGv addr, int index)
819 tcg_gen_qemu_st32(val, addr, index);
820 tcg_temp_free_i32(val);
822 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
828 static inline void gen_set_pc_im(uint32_t val)
830 tcg_gen_movi_i32(cpu_R[15], val);
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext *s)
836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
837 s->is_jmp = DISAS_UPDATE;
840 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
841 TCGv var)
843 int val, rm, shift, shiftop;
844 TCGv offset;
846 if (!(insn & (1 << 25))) {
847 /* immediate */
848 val = insn & 0xfff;
849 if (!(insn & (1 << 23)))
850 val = -val;
851 if (val != 0)
852 tcg_gen_addi_i32(var, var, val);
853 } else {
854 /* shift/register */
855 rm = (insn) & 0xf;
856 shift = (insn >> 7) & 0x1f;
857 shiftop = (insn >> 5) & 3;
858 offset = load_reg(s, rm);
859 gen_arm_shift_im(offset, shiftop, shift, 0);
860 if (!(insn & (1 << 23)))
861 tcg_gen_sub_i32(var, var, offset);
862 else
863 tcg_gen_add_i32(var, var, offset);
864 tcg_temp_free_i32(offset);
868 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
869 int extra, TCGv var)
871 int val, rm;
872 TCGv offset;
874 if (insn & (1 << 22)) {
875 /* immediate */
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
878 val = -val;
879 val += extra;
880 if (val != 0)
881 tcg_gen_addi_i32(var, var, val);
882 } else {
883 /* register */
884 if (extra)
885 tcg_gen_addi_i32(var, var, extra);
886 rm = (insn) & 0xf;
887 offset = load_reg(s, rm);
888 if (!(insn & (1 << 23)))
889 tcg_gen_sub_i32(var, var, offset);
890 else
891 tcg_gen_add_i32(var, var, offset);
892 tcg_temp_free_i32(offset);
896 static TCGv_ptr get_fpstatus_ptr(int neon)
898 TCGv_ptr statusptr = tcg_temp_new_ptr();
899 int offset;
900 if (neon) {
901 offset = offsetof(CPUARMState, vfp.standard_fp_status);
902 } else {
903 offset = offsetof(CPUARMState, vfp.fp_status);
905 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
906 return statusptr;
909 #define VFP_OP2(name) \
910 static inline void gen_vfp_##name(int dp) \
912 TCGv_ptr fpst = get_fpstatus_ptr(0); \
913 if (dp) { \
914 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
915 } else { \
916 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
918 tcg_temp_free_ptr(fpst); \
921 VFP_OP2(add)
922 VFP_OP2(sub)
923 VFP_OP2(mul)
924 VFP_OP2(div)
926 #undef VFP_OP2
928 static inline void gen_vfp_F1_mul(int dp)
930 /* Like gen_vfp_mul() but put result in F1 */
931 TCGv_ptr fpst = get_fpstatus_ptr(0);
932 if (dp) {
933 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
934 } else {
935 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
937 tcg_temp_free_ptr(fpst);
940 static inline void gen_vfp_F1_neg(int dp)
942 /* Like gen_vfp_neg() but put result in F1 */
943 if (dp) {
944 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
945 } else {
946 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
950 static inline void gen_vfp_abs(int dp)
952 if (dp)
953 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
954 else
955 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
958 static inline void gen_vfp_neg(int dp)
960 if (dp)
961 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
962 else
963 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
966 static inline void gen_vfp_sqrt(int dp)
968 if (dp)
969 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
970 else
971 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
974 static inline void gen_vfp_cmp(int dp)
976 if (dp)
977 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
978 else
979 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
982 static inline void gen_vfp_cmpe(int dp)
984 if (dp)
985 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
986 else
987 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
990 static inline void gen_vfp_F1_ld0(int dp)
992 if (dp)
993 tcg_gen_movi_i64(cpu_F1d, 0);
994 else
995 tcg_gen_movi_i32(cpu_F1s, 0);
998 #define VFP_GEN_ITOF(name) \
999 static inline void gen_vfp_##name(int dp, int neon) \
1001 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1002 if (dp) { \
1003 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1004 } else { \
1005 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1007 tcg_temp_free_ptr(statusptr); \
1010 VFP_GEN_ITOF(uito)
1011 VFP_GEN_ITOF(sito)
1012 #undef VFP_GEN_ITOF
1014 #define VFP_GEN_FTOI(name) \
1015 static inline void gen_vfp_##name(int dp, int neon) \
1017 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1018 if (dp) { \
1019 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1020 } else { \
1021 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1023 tcg_temp_free_ptr(statusptr); \
1026 VFP_GEN_FTOI(toui)
1027 VFP_GEN_FTOI(touiz)
1028 VFP_GEN_FTOI(tosi)
1029 VFP_GEN_FTOI(tosiz)
1030 #undef VFP_GEN_FTOI
1032 #define VFP_GEN_FIX(name) \
1033 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1035 TCGv tmp_shift = tcg_const_i32(shift); \
1036 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1037 if (dp) { \
1038 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1039 } else { \
1040 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1042 tcg_temp_free_i32(tmp_shift); \
1043 tcg_temp_free_ptr(statusptr); \
1045 VFP_GEN_FIX(tosh)
1046 VFP_GEN_FIX(tosl)
1047 VFP_GEN_FIX(touh)
1048 VFP_GEN_FIX(toul)
1049 VFP_GEN_FIX(shto)
1050 VFP_GEN_FIX(slto)
1051 VFP_GEN_FIX(uhto)
1052 VFP_GEN_FIX(ulto)
1053 #undef VFP_GEN_FIX
1055 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1057 if (dp)
1058 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1059 else
1060 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1063 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1065 if (dp)
1066 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1067 else
1068 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1071 static inline long
1072 vfp_reg_offset (int dp, int reg)
1074 if (dp)
1075 return offsetof(CPUARMState, vfp.regs[reg]);
1076 else if (reg & 1) {
1077 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1078 + offsetof(CPU_DoubleU, l.upper);
1079 } else {
1080 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1081 + offsetof(CPU_DoubleU, l.lower);
1085 /* Return the offset of a 32-bit piece of a NEON register.
1086 zero is the least significant end of the register. */
1087 static inline long
1088 neon_reg_offset (int reg, int n)
1090 int sreg;
1091 sreg = reg * 2 + n;
1092 return vfp_reg_offset(0, sreg);
1095 static TCGv neon_load_reg(int reg, int pass)
1097 TCGv tmp = tcg_temp_new_i32();
1098 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1099 return tmp;
1102 static void neon_store_reg(int reg, int pass, TCGv var)
1104 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1105 tcg_temp_free_i32(var);
1108 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1110 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1113 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1115 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1118 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1119 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1120 #define tcg_gen_st_f32 tcg_gen_st_i32
1121 #define tcg_gen_st_f64 tcg_gen_st_i64
1123 static inline void gen_mov_F0_vreg(int dp, int reg)
1125 if (dp)
1126 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1127 else
1128 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1131 static inline void gen_mov_F1_vreg(int dp, int reg)
1133 if (dp)
1134 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1135 else
1136 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1139 static inline void gen_mov_vreg_F0(int dp, int reg)
1141 if (dp)
1142 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1143 else
1144 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1147 #define ARM_CP_RW_BIT (1 << 20)
1149 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1151 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1154 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1156 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1159 static inline TCGv iwmmxt_load_creg(int reg)
1161 TCGv var = tcg_temp_new_i32();
1162 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1163 return var;
1166 static inline void iwmmxt_store_creg(int reg, TCGv var)
1168 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1169 tcg_temp_free_i32(var);
1172 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1174 iwmmxt_store_reg(cpu_M0, rn);
1177 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1179 iwmmxt_load_reg(cpu_M0, rn);
1182 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1184 iwmmxt_load_reg(cpu_V1, rn);
1185 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1188 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1190 iwmmxt_load_reg(cpu_V1, rn);
1191 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1194 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1196 iwmmxt_load_reg(cpu_V1, rn);
1197 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1200 #define IWMMXT_OP(name) \
1201 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1203 iwmmxt_load_reg(cpu_V1, rn); \
1204 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1207 #define IWMMXT_OP_ENV(name) \
1208 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1210 iwmmxt_load_reg(cpu_V1, rn); \
1211 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1214 #define IWMMXT_OP_ENV_SIZE(name) \
1215 IWMMXT_OP_ENV(name##b) \
1216 IWMMXT_OP_ENV(name##w) \
1217 IWMMXT_OP_ENV(name##l)
1219 #define IWMMXT_OP_ENV1(name) \
1220 static inline void gen_op_iwmmxt_##name##_M0(void) \
1222 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1225 IWMMXT_OP(maddsq)
1226 IWMMXT_OP(madduq)
1227 IWMMXT_OP(sadb)
1228 IWMMXT_OP(sadw)
1229 IWMMXT_OP(mulslw)
1230 IWMMXT_OP(mulshw)
1231 IWMMXT_OP(mululw)
1232 IWMMXT_OP(muluhw)
1233 IWMMXT_OP(macsw)
1234 IWMMXT_OP(macuw)
1236 IWMMXT_OP_ENV_SIZE(unpackl)
1237 IWMMXT_OP_ENV_SIZE(unpackh)
1239 IWMMXT_OP_ENV1(unpacklub)
1240 IWMMXT_OP_ENV1(unpackluw)
1241 IWMMXT_OP_ENV1(unpacklul)
1242 IWMMXT_OP_ENV1(unpackhub)
1243 IWMMXT_OP_ENV1(unpackhuw)
1244 IWMMXT_OP_ENV1(unpackhul)
1245 IWMMXT_OP_ENV1(unpacklsb)
1246 IWMMXT_OP_ENV1(unpacklsw)
1247 IWMMXT_OP_ENV1(unpacklsl)
1248 IWMMXT_OP_ENV1(unpackhsb)
1249 IWMMXT_OP_ENV1(unpackhsw)
1250 IWMMXT_OP_ENV1(unpackhsl)
1252 IWMMXT_OP_ENV_SIZE(cmpeq)
1253 IWMMXT_OP_ENV_SIZE(cmpgtu)
1254 IWMMXT_OP_ENV_SIZE(cmpgts)
1256 IWMMXT_OP_ENV_SIZE(mins)
1257 IWMMXT_OP_ENV_SIZE(minu)
1258 IWMMXT_OP_ENV_SIZE(maxs)
1259 IWMMXT_OP_ENV_SIZE(maxu)
1261 IWMMXT_OP_ENV_SIZE(subn)
1262 IWMMXT_OP_ENV_SIZE(addn)
1263 IWMMXT_OP_ENV_SIZE(subu)
1264 IWMMXT_OP_ENV_SIZE(addu)
1265 IWMMXT_OP_ENV_SIZE(subs)
1266 IWMMXT_OP_ENV_SIZE(adds)
1268 IWMMXT_OP_ENV(avgb0)
1269 IWMMXT_OP_ENV(avgb1)
1270 IWMMXT_OP_ENV(avgw0)
1271 IWMMXT_OP_ENV(avgw1)
1273 IWMMXT_OP(msadb)
1275 IWMMXT_OP_ENV(packuw)
1276 IWMMXT_OP_ENV(packul)
1277 IWMMXT_OP_ENV(packuq)
1278 IWMMXT_OP_ENV(packsw)
1279 IWMMXT_OP_ENV(packsl)
1280 IWMMXT_OP_ENV(packsq)
1282 static void gen_op_iwmmxt_set_mup(void)
1284 TCGv tmp;
1285 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1286 tcg_gen_ori_i32(tmp, tmp, 2);
1287 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1290 static void gen_op_iwmmxt_set_cup(void)
1292 TCGv tmp;
1293 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1294 tcg_gen_ori_i32(tmp, tmp, 1);
1295 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1298 static void gen_op_iwmmxt_setpsr_nz(void)
1300 TCGv tmp = tcg_temp_new_i32();
1301 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1302 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1305 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1307 iwmmxt_load_reg(cpu_V1, rn);
1308 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1309 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1312 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1314 int rd;
1315 uint32_t offset;
1316 TCGv tmp;
1318 rd = (insn >> 16) & 0xf;
1319 tmp = load_reg(s, rd);
1321 offset = (insn & 0xff) << ((insn >> 7) & 2);
1322 if (insn & (1 << 24)) {
1323 /* Pre indexed */
1324 if (insn & (1 << 23))
1325 tcg_gen_addi_i32(tmp, tmp, offset);
1326 else
1327 tcg_gen_addi_i32(tmp, tmp, -offset);
1328 tcg_gen_mov_i32(dest, tmp);
1329 if (insn & (1 << 21))
1330 store_reg(s, rd, tmp);
1331 else
1332 tcg_temp_free_i32(tmp);
1333 } else if (insn & (1 << 21)) {
1334 /* Post indexed */
1335 tcg_gen_mov_i32(dest, tmp);
1336 if (insn & (1 << 23))
1337 tcg_gen_addi_i32(tmp, tmp, offset);
1338 else
1339 tcg_gen_addi_i32(tmp, tmp, -offset);
1340 store_reg(s, rd, tmp);
1341 } else if (!(insn & (1 << 23)))
1342 return 1;
1343 return 0;
1346 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1348 int rd = (insn >> 0) & 0xf;
1349 TCGv tmp;
1351 if (insn & (1 << 8)) {
1352 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1353 return 1;
1354 } else {
1355 tmp = iwmmxt_load_creg(rd);
1357 } else {
1358 tmp = tcg_temp_new_i32();
1359 iwmmxt_load_reg(cpu_V0, rd);
1360 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1362 tcg_gen_andi_i32(tmp, tmp, mask);
1363 tcg_gen_mov_i32(dest, tmp);
1364 tcg_temp_free_i32(tmp);
1365 return 0;
1368 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1369 (ie. an undefined instruction). */
1370 static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
1372 int rd, wrd;
1373 int rdhi, rdlo, rd0, rd1, i;
1374 TCGv addr;
1375 TCGv tmp, tmp2, tmp3;
1377 if ((insn & 0x0e000e00) == 0x0c000000) {
1378 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1379 wrd = insn & 0xf;
1380 rdlo = (insn >> 12) & 0xf;
1381 rdhi = (insn >> 16) & 0xf;
1382 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1383 iwmmxt_load_reg(cpu_V0, wrd);
1384 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1385 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1386 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1387 } else { /* TMCRR */
1388 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1389 iwmmxt_store_reg(cpu_V0, wrd);
1390 gen_op_iwmmxt_set_mup();
1392 return 0;
1395 wrd = (insn >> 12) & 0xf;
1396 addr = tcg_temp_new_i32();
1397 if (gen_iwmmxt_address(s, insn, addr)) {
1398 tcg_temp_free_i32(addr);
1399 return 1;
1401 if (insn & ARM_CP_RW_BIT) {
1402 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1403 tmp = tcg_temp_new_i32();
1404 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1405 iwmmxt_store_creg(wrd, tmp);
1406 } else {
1407 i = 1;
1408 if (insn & (1 << 8)) {
1409 if (insn & (1 << 22)) { /* WLDRD */
1410 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1411 i = 0;
1412 } else { /* WLDRW wRd */
1413 tmp = gen_ld32(addr, IS_USER(s));
1415 } else {
1416 if (insn & (1 << 22)) { /* WLDRH */
1417 tmp = gen_ld16u(addr, IS_USER(s));
1418 } else { /* WLDRB */
1419 tmp = gen_ld8u(addr, IS_USER(s));
1422 if (i) {
1423 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1424 tcg_temp_free_i32(tmp);
1426 gen_op_iwmmxt_movq_wRn_M0(wrd);
1428 } else {
1429 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1430 tmp = iwmmxt_load_creg(wrd);
1431 gen_st32(tmp, addr, IS_USER(s));
1432 } else {
1433 gen_op_iwmmxt_movq_M0_wRn(wrd);
1434 tmp = tcg_temp_new_i32();
1435 if (insn & (1 << 8)) {
1436 if (insn & (1 << 22)) { /* WSTRD */
1437 tcg_temp_free_i32(tmp);
1438 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1439 } else { /* WSTRW wRd */
1440 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1441 gen_st32(tmp, addr, IS_USER(s));
1443 } else {
1444 if (insn & (1 << 22)) { /* WSTRH */
1445 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1446 gen_st16(tmp, addr, IS_USER(s));
1447 } else { /* WSTRB */
1448 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1449 gen_st8(tmp, addr, IS_USER(s));
1454 tcg_temp_free_i32(addr);
1455 return 0;
1458 if ((insn & 0x0f000000) != 0x0e000000)
1459 return 1;
1461 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1462 case 0x000: /* WOR */
1463 wrd = (insn >> 12) & 0xf;
1464 rd0 = (insn >> 0) & 0xf;
1465 rd1 = (insn >> 16) & 0xf;
1466 gen_op_iwmmxt_movq_M0_wRn(rd0);
1467 gen_op_iwmmxt_orq_M0_wRn(rd1);
1468 gen_op_iwmmxt_setpsr_nz();
1469 gen_op_iwmmxt_movq_wRn_M0(wrd);
1470 gen_op_iwmmxt_set_mup();
1471 gen_op_iwmmxt_set_cup();
1472 break;
1473 case 0x011: /* TMCR */
1474 if (insn & 0xf)
1475 return 1;
1476 rd = (insn >> 12) & 0xf;
1477 wrd = (insn >> 16) & 0xf;
1478 switch (wrd) {
1479 case ARM_IWMMXT_wCID:
1480 case ARM_IWMMXT_wCASF:
1481 break;
1482 case ARM_IWMMXT_wCon:
1483 gen_op_iwmmxt_set_cup();
1484 /* Fall through. */
1485 case ARM_IWMMXT_wCSSF:
1486 tmp = iwmmxt_load_creg(wrd);
1487 tmp2 = load_reg(s, rd);
1488 tcg_gen_andc_i32(tmp, tmp, tmp2);
1489 tcg_temp_free_i32(tmp2);
1490 iwmmxt_store_creg(wrd, tmp);
1491 break;
1492 case ARM_IWMMXT_wCGR0:
1493 case ARM_IWMMXT_wCGR1:
1494 case ARM_IWMMXT_wCGR2:
1495 case ARM_IWMMXT_wCGR3:
1496 gen_op_iwmmxt_set_cup();
1497 tmp = load_reg(s, rd);
1498 iwmmxt_store_creg(wrd, tmp);
1499 break;
1500 default:
1501 return 1;
1503 break;
1504 case 0x100: /* WXOR */
1505 wrd = (insn >> 12) & 0xf;
1506 rd0 = (insn >> 0) & 0xf;
1507 rd1 = (insn >> 16) & 0xf;
1508 gen_op_iwmmxt_movq_M0_wRn(rd0);
1509 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1510 gen_op_iwmmxt_setpsr_nz();
1511 gen_op_iwmmxt_movq_wRn_M0(wrd);
1512 gen_op_iwmmxt_set_mup();
1513 gen_op_iwmmxt_set_cup();
1514 break;
1515 case 0x111: /* TMRC */
1516 if (insn & 0xf)
1517 return 1;
1518 rd = (insn >> 12) & 0xf;
1519 wrd = (insn >> 16) & 0xf;
1520 tmp = iwmmxt_load_creg(wrd);
1521 store_reg(s, rd, tmp);
1522 break;
1523 case 0x300: /* WANDN */
1524 wrd = (insn >> 12) & 0xf;
1525 rd0 = (insn >> 0) & 0xf;
1526 rd1 = (insn >> 16) & 0xf;
1527 gen_op_iwmmxt_movq_M0_wRn(rd0);
1528 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1529 gen_op_iwmmxt_andq_M0_wRn(rd1);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1534 break;
1535 case 0x200: /* WAND */
1536 wrd = (insn >> 12) & 0xf;
1537 rd0 = (insn >> 0) & 0xf;
1538 rd1 = (insn >> 16) & 0xf;
1539 gen_op_iwmmxt_movq_M0_wRn(rd0);
1540 gen_op_iwmmxt_andq_M0_wRn(rd1);
1541 gen_op_iwmmxt_setpsr_nz();
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x810: case 0xa10: /* WMADD */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 0) & 0xf;
1549 rd1 = (insn >> 16) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 if (insn & (1 << 21))
1552 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1553 else
1554 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1555 gen_op_iwmmxt_movq_wRn_M0(wrd);
1556 gen_op_iwmmxt_set_mup();
1557 break;
1558 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1559 wrd = (insn >> 12) & 0xf;
1560 rd0 = (insn >> 16) & 0xf;
1561 rd1 = (insn >> 0) & 0xf;
1562 gen_op_iwmmxt_movq_M0_wRn(rd0);
1563 switch ((insn >> 22) & 3) {
1564 case 0:
1565 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1566 break;
1567 case 1:
1568 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1569 break;
1570 case 2:
1571 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1572 break;
1573 case 3:
1574 return 1;
1576 gen_op_iwmmxt_movq_wRn_M0(wrd);
1577 gen_op_iwmmxt_set_mup();
1578 gen_op_iwmmxt_set_cup();
1579 break;
1580 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1581 wrd = (insn >> 12) & 0xf;
1582 rd0 = (insn >> 16) & 0xf;
1583 rd1 = (insn >> 0) & 0xf;
1584 gen_op_iwmmxt_movq_M0_wRn(rd0);
1585 switch ((insn >> 22) & 3) {
1586 case 0:
1587 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1588 break;
1589 case 1:
1590 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1591 break;
1592 case 2:
1593 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1594 break;
1595 case 3:
1596 return 1;
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 gen_op_iwmmxt_set_cup();
1601 break;
1602 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1603 wrd = (insn >> 12) & 0xf;
1604 rd0 = (insn >> 16) & 0xf;
1605 rd1 = (insn >> 0) & 0xf;
1606 gen_op_iwmmxt_movq_M0_wRn(rd0);
1607 if (insn & (1 << 22))
1608 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1609 else
1610 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1611 if (!(insn & (1 << 20)))
1612 gen_op_iwmmxt_addl_M0_wRn(wrd);
1613 gen_op_iwmmxt_movq_wRn_M0(wrd);
1614 gen_op_iwmmxt_set_mup();
1615 break;
1616 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1617 wrd = (insn >> 12) & 0xf;
1618 rd0 = (insn >> 16) & 0xf;
1619 rd1 = (insn >> 0) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0);
1621 if (insn & (1 << 21)) {
1622 if (insn & (1 << 20))
1623 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1624 else
1625 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1626 } else {
1627 if (insn & (1 << 20))
1628 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1629 else
1630 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1632 gen_op_iwmmxt_movq_wRn_M0(wrd);
1633 gen_op_iwmmxt_set_mup();
1634 break;
1635 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 16) & 0xf;
1638 rd1 = (insn >> 0) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
1640 if (insn & (1 << 21))
1641 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1642 else
1643 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1644 if (!(insn & (1 << 20))) {
1645 iwmmxt_load_reg(cpu_V1, wrd);
1646 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1648 gen_op_iwmmxt_movq_wRn_M0(wrd);
1649 gen_op_iwmmxt_set_mup();
1650 break;
1651 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1652 wrd = (insn >> 12) & 0xf;
1653 rd0 = (insn >> 16) & 0xf;
1654 rd1 = (insn >> 0) & 0xf;
1655 gen_op_iwmmxt_movq_M0_wRn(rd0);
1656 switch ((insn >> 22) & 3) {
1657 case 0:
1658 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1659 break;
1660 case 1:
1661 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1662 break;
1663 case 2:
1664 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1665 break;
1666 case 3:
1667 return 1;
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 gen_op_iwmmxt_set_cup();
1672 break;
1673 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1674 wrd = (insn >> 12) & 0xf;
1675 rd0 = (insn >> 16) & 0xf;
1676 rd1 = (insn >> 0) & 0xf;
1677 gen_op_iwmmxt_movq_M0_wRn(rd0);
1678 if (insn & (1 << 22)) {
1679 if (insn & (1 << 20))
1680 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1681 else
1682 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1683 } else {
1684 if (insn & (1 << 20))
1685 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1686 else
1687 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1692 break;
1693 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1694 wrd = (insn >> 12) & 0xf;
1695 rd0 = (insn >> 16) & 0xf;
1696 rd1 = (insn >> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0);
1698 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1699 tcg_gen_andi_i32(tmp, tmp, 7);
1700 iwmmxt_load_reg(cpu_V1, rd1);
1701 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1702 tcg_temp_free_i32(tmp);
1703 gen_op_iwmmxt_movq_wRn_M0(wrd);
1704 gen_op_iwmmxt_set_mup();
1705 break;
1706 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1707 if (((insn >> 6) & 3) == 3)
1708 return 1;
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 tmp = load_reg(s, rd);
1712 gen_op_iwmmxt_movq_M0_wRn(wrd);
1713 switch ((insn >> 6) & 3) {
1714 case 0:
1715 tmp2 = tcg_const_i32(0xff);
1716 tmp3 = tcg_const_i32((insn & 7) << 3);
1717 break;
1718 case 1:
1719 tmp2 = tcg_const_i32(0xffff);
1720 tmp3 = tcg_const_i32((insn & 3) << 4);
1721 break;
1722 case 2:
1723 tmp2 = tcg_const_i32(0xffffffff);
1724 tmp3 = tcg_const_i32((insn & 1) << 5);
1725 break;
1726 default:
1727 TCGV_UNUSED(tmp2);
1728 TCGV_UNUSED(tmp3);
1730 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1731 tcg_temp_free(tmp3);
1732 tcg_temp_free(tmp2);
1733 tcg_temp_free_i32(tmp);
1734 gen_op_iwmmxt_movq_wRn_M0(wrd);
1735 gen_op_iwmmxt_set_mup();
1736 break;
1737 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1738 rd = (insn >> 12) & 0xf;
1739 wrd = (insn >> 16) & 0xf;
1740 if (rd == 15 || ((insn >> 22) & 3) == 3)
1741 return 1;
1742 gen_op_iwmmxt_movq_M0_wRn(wrd);
1743 tmp = tcg_temp_new_i32();
1744 switch ((insn >> 22) & 3) {
1745 case 0:
1746 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1747 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1748 if (insn & 8) {
1749 tcg_gen_ext8s_i32(tmp, tmp);
1750 } else {
1751 tcg_gen_andi_i32(tmp, tmp, 0xff);
1753 break;
1754 case 1:
1755 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1756 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1757 if (insn & 8) {
1758 tcg_gen_ext16s_i32(tmp, tmp);
1759 } else {
1760 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1762 break;
1763 case 2:
1764 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1765 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1766 break;
1768 store_reg(s, rd, tmp);
1769 break;
1770 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1771 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1772 return 1;
1773 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1774 switch ((insn >> 22) & 3) {
1775 case 0:
1776 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1777 break;
1778 case 1:
1779 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1780 break;
1781 case 2:
1782 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1783 break;
1785 tcg_gen_shli_i32(tmp, tmp, 28);
1786 gen_set_nzcv(tmp);
1787 tcg_temp_free_i32(tmp);
1788 break;
1789 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1790 if (((insn >> 6) & 3) == 3)
1791 return 1;
1792 rd = (insn >> 12) & 0xf;
1793 wrd = (insn >> 16) & 0xf;
1794 tmp = load_reg(s, rd);
1795 switch ((insn >> 6) & 3) {
1796 case 0:
1797 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1798 break;
1799 case 1:
1800 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1801 break;
1802 case 2:
1803 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1804 break;
1806 tcg_temp_free_i32(tmp);
1807 gen_op_iwmmxt_movq_wRn_M0(wrd);
1808 gen_op_iwmmxt_set_mup();
1809 break;
1810 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1811 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1812 return 1;
1813 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1814 tmp2 = tcg_temp_new_i32();
1815 tcg_gen_mov_i32(tmp2, tmp);
1816 switch ((insn >> 22) & 3) {
1817 case 0:
1818 for (i = 0; i < 7; i ++) {
1819 tcg_gen_shli_i32(tmp2, tmp2, 4);
1820 tcg_gen_and_i32(tmp, tmp, tmp2);
1822 break;
1823 case 1:
1824 for (i = 0; i < 3; i ++) {
1825 tcg_gen_shli_i32(tmp2, tmp2, 8);
1826 tcg_gen_and_i32(tmp, tmp, tmp2);
1828 break;
1829 case 2:
1830 tcg_gen_shli_i32(tmp2, tmp2, 16);
1831 tcg_gen_and_i32(tmp, tmp, tmp2);
1832 break;
1834 gen_set_nzcv(tmp);
1835 tcg_temp_free_i32(tmp2);
1836 tcg_temp_free_i32(tmp);
1837 break;
1838 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1839 wrd = (insn >> 12) & 0xf;
1840 rd0 = (insn >> 16) & 0xf;
1841 gen_op_iwmmxt_movq_M0_wRn(rd0);
1842 switch ((insn >> 22) & 3) {
1843 case 0:
1844 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1845 break;
1846 case 1:
1847 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1848 break;
1849 case 2:
1850 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1851 break;
1852 case 3:
1853 return 1;
1855 gen_op_iwmmxt_movq_wRn_M0(wrd);
1856 gen_op_iwmmxt_set_mup();
1857 break;
1858 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1859 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1860 return 1;
1861 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1862 tmp2 = tcg_temp_new_i32();
1863 tcg_gen_mov_i32(tmp2, tmp);
1864 switch ((insn >> 22) & 3) {
1865 case 0:
1866 for (i = 0; i < 7; i ++) {
1867 tcg_gen_shli_i32(tmp2, tmp2, 4);
1868 tcg_gen_or_i32(tmp, tmp, tmp2);
1870 break;
1871 case 1:
1872 for (i = 0; i < 3; i ++) {
1873 tcg_gen_shli_i32(tmp2, tmp2, 8);
1874 tcg_gen_or_i32(tmp, tmp, tmp2);
1876 break;
1877 case 2:
1878 tcg_gen_shli_i32(tmp2, tmp2, 16);
1879 tcg_gen_or_i32(tmp, tmp, tmp2);
1880 break;
1882 gen_set_nzcv(tmp);
1883 tcg_temp_free_i32(tmp2);
1884 tcg_temp_free_i32(tmp);
1885 break;
1886 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1887 rd = (insn >> 12) & 0xf;
1888 rd0 = (insn >> 16) & 0xf;
1889 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1890 return 1;
1891 gen_op_iwmmxt_movq_M0_wRn(rd0);
1892 tmp = tcg_temp_new_i32();
1893 switch ((insn >> 22) & 3) {
1894 case 0:
1895 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1896 break;
1897 case 1:
1898 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1899 break;
1900 case 2:
1901 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1902 break;
1904 store_reg(s, rd, tmp);
1905 break;
1906 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1907 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1908 wrd = (insn >> 12) & 0xf;
1909 rd0 = (insn >> 16) & 0xf;
1910 rd1 = (insn >> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 switch ((insn >> 22) & 3) {
1913 case 0:
1914 if (insn & (1 << 21))
1915 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1916 else
1917 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1918 break;
1919 case 1:
1920 if (insn & (1 << 21))
1921 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1922 else
1923 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1924 break;
1925 case 2:
1926 if (insn & (1 << 21))
1927 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1928 else
1929 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1930 break;
1931 case 3:
1932 return 1;
1934 gen_op_iwmmxt_movq_wRn_M0(wrd);
1935 gen_op_iwmmxt_set_mup();
1936 gen_op_iwmmxt_set_cup();
1937 break;
1938 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1939 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 16) & 0xf;
1942 gen_op_iwmmxt_movq_M0_wRn(rd0);
1943 switch ((insn >> 22) & 3) {
1944 case 0:
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_unpacklsb_M0();
1947 else
1948 gen_op_iwmmxt_unpacklub_M0();
1949 break;
1950 case 1:
1951 if (insn & (1 << 21))
1952 gen_op_iwmmxt_unpacklsw_M0();
1953 else
1954 gen_op_iwmmxt_unpackluw_M0();
1955 break;
1956 case 2:
1957 if (insn & (1 << 21))
1958 gen_op_iwmmxt_unpacklsl_M0();
1959 else
1960 gen_op_iwmmxt_unpacklul_M0();
1961 break;
1962 case 3:
1963 return 1;
1965 gen_op_iwmmxt_movq_wRn_M0(wrd);
1966 gen_op_iwmmxt_set_mup();
1967 gen_op_iwmmxt_set_cup();
1968 break;
1969 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1970 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1971 wrd = (insn >> 12) & 0xf;
1972 rd0 = (insn >> 16) & 0xf;
1973 gen_op_iwmmxt_movq_M0_wRn(rd0);
1974 switch ((insn >> 22) & 3) {
1975 case 0:
1976 if (insn & (1 << 21))
1977 gen_op_iwmmxt_unpackhsb_M0();
1978 else
1979 gen_op_iwmmxt_unpackhub_M0();
1980 break;
1981 case 1:
1982 if (insn & (1 << 21))
1983 gen_op_iwmmxt_unpackhsw_M0();
1984 else
1985 gen_op_iwmmxt_unpackhuw_M0();
1986 break;
1987 case 2:
1988 if (insn & (1 << 21))
1989 gen_op_iwmmxt_unpackhsl_M0();
1990 else
1991 gen_op_iwmmxt_unpackhul_M0();
1992 break;
1993 case 3:
1994 return 1;
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
1999 break;
2000 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2001 case 0x214: case 0x614: case 0xa14: case 0xe14:
2002 if (((insn >> 22) & 3) == 0)
2003 return 1;
2004 wrd = (insn >> 12) & 0xf;
2005 rd0 = (insn >> 16) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
2007 tmp = tcg_temp_new_i32();
2008 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2009 tcg_temp_free_i32(tmp);
2010 return 1;
2012 switch ((insn >> 22) & 3) {
2013 case 1:
2014 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2015 break;
2016 case 2:
2017 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2018 break;
2019 case 3:
2020 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2021 break;
2023 tcg_temp_free_i32(tmp);
2024 gen_op_iwmmxt_movq_wRn_M0(wrd);
2025 gen_op_iwmmxt_set_mup();
2026 gen_op_iwmmxt_set_cup();
2027 break;
2028 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2029 case 0x014: case 0x414: case 0x814: case 0xc14:
2030 if (((insn >> 22) & 3) == 0)
2031 return 1;
2032 wrd = (insn >> 12) & 0xf;
2033 rd0 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
2035 tmp = tcg_temp_new_i32();
2036 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2037 tcg_temp_free_i32(tmp);
2038 return 1;
2040 switch ((insn >> 22) & 3) {
2041 case 1:
2042 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2043 break;
2044 case 2:
2045 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2046 break;
2047 case 3:
2048 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2049 break;
2051 tcg_temp_free_i32(tmp);
2052 gen_op_iwmmxt_movq_wRn_M0(wrd);
2053 gen_op_iwmmxt_set_mup();
2054 gen_op_iwmmxt_set_cup();
2055 break;
2056 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2057 case 0x114: case 0x514: case 0x914: case 0xd14:
2058 if (((insn >> 22) & 3) == 0)
2059 return 1;
2060 wrd = (insn >> 12) & 0xf;
2061 rd0 = (insn >> 16) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0);
2063 tmp = tcg_temp_new_i32();
2064 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2065 tcg_temp_free_i32(tmp);
2066 return 1;
2068 switch ((insn >> 22) & 3) {
2069 case 1:
2070 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2071 break;
2072 case 2:
2073 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2074 break;
2075 case 3:
2076 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2077 break;
2079 tcg_temp_free_i32(tmp);
2080 gen_op_iwmmxt_movq_wRn_M0(wrd);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2083 break;
2084 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2085 case 0x314: case 0x714: case 0xb14: case 0xf14:
2086 if (((insn >> 22) & 3) == 0)
2087 return 1;
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 tmp = tcg_temp_new_i32();
2092 switch ((insn >> 22) & 3) {
2093 case 1:
2094 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2095 tcg_temp_free_i32(tmp);
2096 return 1;
2098 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2099 break;
2100 case 2:
2101 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2102 tcg_temp_free_i32(tmp);
2103 return 1;
2105 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2106 break;
2107 case 3:
2108 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2109 tcg_temp_free_i32(tmp);
2110 return 1;
2112 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2113 break;
2115 tcg_temp_free_i32(tmp);
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2119 break;
2120 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2121 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2122 wrd = (insn >> 12) & 0xf;
2123 rd0 = (insn >> 16) & 0xf;
2124 rd1 = (insn >> 0) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0);
2126 switch ((insn >> 22) & 3) {
2127 case 0:
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2130 else
2131 gen_op_iwmmxt_minub_M0_wRn(rd1);
2132 break;
2133 case 1:
2134 if (insn & (1 << 21))
2135 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2136 else
2137 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2138 break;
2139 case 2:
2140 if (insn & (1 << 21))
2141 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2142 else
2143 gen_op_iwmmxt_minul_M0_wRn(rd1);
2144 break;
2145 case 3:
2146 return 1;
2148 gen_op_iwmmxt_movq_wRn_M0(wrd);
2149 gen_op_iwmmxt_set_mup();
2150 break;
2151 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2152 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2153 wrd = (insn >> 12) & 0xf;
2154 rd0 = (insn >> 16) & 0xf;
2155 rd1 = (insn >> 0) & 0xf;
2156 gen_op_iwmmxt_movq_M0_wRn(rd0);
2157 switch ((insn >> 22) & 3) {
2158 case 0:
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2161 else
2162 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2163 break;
2164 case 1:
2165 if (insn & (1 << 21))
2166 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2167 else
2168 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2169 break;
2170 case 2:
2171 if (insn & (1 << 21))
2172 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2173 else
2174 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2175 break;
2176 case 3:
2177 return 1;
2179 gen_op_iwmmxt_movq_wRn_M0(wrd);
2180 gen_op_iwmmxt_set_mup();
2181 break;
2182 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2183 case 0x402: case 0x502: case 0x602: case 0x702:
2184 wrd = (insn >> 12) & 0xf;
2185 rd0 = (insn >> 16) & 0xf;
2186 rd1 = (insn >> 0) & 0xf;
2187 gen_op_iwmmxt_movq_M0_wRn(rd0);
2188 tmp = tcg_const_i32((insn >> 20) & 3);
2189 iwmmxt_load_reg(cpu_V1, rd1);
2190 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2191 tcg_temp_free(tmp);
2192 gen_op_iwmmxt_movq_wRn_M0(wrd);
2193 gen_op_iwmmxt_set_mup();
2194 break;
2195 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2196 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2197 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2198 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2199 wrd = (insn >> 12) & 0xf;
2200 rd0 = (insn >> 16) & 0xf;
2201 rd1 = (insn >> 0) & 0xf;
2202 gen_op_iwmmxt_movq_M0_wRn(rd0);
2203 switch ((insn >> 20) & 0xf) {
2204 case 0x0:
2205 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2206 break;
2207 case 0x1:
2208 gen_op_iwmmxt_subub_M0_wRn(rd1);
2209 break;
2210 case 0x3:
2211 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2212 break;
2213 case 0x4:
2214 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2215 break;
2216 case 0x5:
2217 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2218 break;
2219 case 0x7:
2220 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2221 break;
2222 case 0x8:
2223 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2224 break;
2225 case 0x9:
2226 gen_op_iwmmxt_subul_M0_wRn(rd1);
2227 break;
2228 case 0xb:
2229 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2230 break;
2231 default:
2232 return 1;
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 gen_op_iwmmxt_set_cup();
2237 break;
2238 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2239 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2240 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2241 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2242 wrd = (insn >> 12) & 0xf;
2243 rd0 = (insn >> 16) & 0xf;
2244 gen_op_iwmmxt_movq_M0_wRn(rd0);
2245 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2246 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2247 tcg_temp_free(tmp);
2248 gen_op_iwmmxt_movq_wRn_M0(wrd);
2249 gen_op_iwmmxt_set_mup();
2250 gen_op_iwmmxt_set_cup();
2251 break;
2252 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2253 case 0x418: case 0x518: case 0x618: case 0x718:
2254 case 0x818: case 0x918: case 0xa18: case 0xb18:
2255 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 rd1 = (insn >> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0);
2260 switch ((insn >> 20) & 0xf) {
2261 case 0x0:
2262 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2263 break;
2264 case 0x1:
2265 gen_op_iwmmxt_addub_M0_wRn(rd1);
2266 break;
2267 case 0x3:
2268 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2269 break;
2270 case 0x4:
2271 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2272 break;
2273 case 0x5:
2274 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2275 break;
2276 case 0x7:
2277 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2278 break;
2279 case 0x8:
2280 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2281 break;
2282 case 0x9:
2283 gen_op_iwmmxt_addul_M0_wRn(rd1);
2284 break;
2285 case 0xb:
2286 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2287 break;
2288 default:
2289 return 1;
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2296 case 0x408: case 0x508: case 0x608: case 0x708:
2297 case 0x808: case 0x908: case 0xa08: case 0xb08:
2298 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2299 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2300 return 1;
2301 wrd = (insn >> 12) & 0xf;
2302 rd0 = (insn >> 16) & 0xf;
2303 rd1 = (insn >> 0) & 0xf;
2304 gen_op_iwmmxt_movq_M0_wRn(rd0);
2305 switch ((insn >> 22) & 3) {
2306 case 1:
2307 if (insn & (1 << 21))
2308 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2309 else
2310 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2311 break;
2312 case 2:
2313 if (insn & (1 << 21))
2314 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2315 else
2316 gen_op_iwmmxt_packul_M0_wRn(rd1);
2317 break;
2318 case 3:
2319 if (insn & (1 << 21))
2320 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2321 else
2322 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2323 break;
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2328 break;
2329 case 0x201: case 0x203: case 0x205: case 0x207:
2330 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2331 case 0x211: case 0x213: case 0x215: case 0x217:
2332 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2333 wrd = (insn >> 5) & 0xf;
2334 rd0 = (insn >> 12) & 0xf;
2335 rd1 = (insn >> 0) & 0xf;
2336 if (rd0 == 0xf || rd1 == 0xf)
2337 return 1;
2338 gen_op_iwmmxt_movq_M0_wRn(wrd);
2339 tmp = load_reg(s, rd0);
2340 tmp2 = load_reg(s, rd1);
2341 switch ((insn >> 16) & 0xf) {
2342 case 0x0: /* TMIA */
2343 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2344 break;
2345 case 0x8: /* TMIAPH */
2346 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2347 break;
2348 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2349 if (insn & (1 << 16))
2350 tcg_gen_shri_i32(tmp, tmp, 16);
2351 if (insn & (1 << 17))
2352 tcg_gen_shri_i32(tmp2, tmp2, 16);
2353 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2354 break;
2355 default:
2356 tcg_temp_free_i32(tmp2);
2357 tcg_temp_free_i32(tmp);
2358 return 1;
2360 tcg_temp_free_i32(tmp2);
2361 tcg_temp_free_i32(tmp);
2362 gen_op_iwmmxt_movq_wRn_M0(wrd);
2363 gen_op_iwmmxt_set_mup();
2364 break;
2365 default:
2366 return 1;
2369 return 0;
2372 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2373 (ie. an undefined instruction). */
2374 static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2376 int acc, rd0, rd1, rdhi, rdlo;
2377 TCGv tmp, tmp2;
2379 if ((insn & 0x0ff00f10) == 0x0e200010) {
2380 /* Multiply with Internal Accumulate Format */
2381 rd0 = (insn >> 12) & 0xf;
2382 rd1 = insn & 0xf;
2383 acc = (insn >> 5) & 7;
2385 if (acc != 0)
2386 return 1;
2388 tmp = load_reg(s, rd0);
2389 tmp2 = load_reg(s, rd1);
2390 switch ((insn >> 16) & 0xf) {
2391 case 0x0: /* MIA */
2392 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2393 break;
2394 case 0x8: /* MIAPH */
2395 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2396 break;
2397 case 0xc: /* MIABB */
2398 case 0xd: /* MIABT */
2399 case 0xe: /* MIATB */
2400 case 0xf: /* MIATT */
2401 if (insn & (1 << 16))
2402 tcg_gen_shri_i32(tmp, tmp, 16);
2403 if (insn & (1 << 17))
2404 tcg_gen_shri_i32(tmp2, tmp2, 16);
2405 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2406 break;
2407 default:
2408 return 1;
2410 tcg_temp_free_i32(tmp2);
2411 tcg_temp_free_i32(tmp);
2413 gen_op_iwmmxt_movq_wRn_M0(acc);
2414 return 0;
2417 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2418 /* Internal Accumulator Access Format */
2419 rdhi = (insn >> 16) & 0xf;
2420 rdlo = (insn >> 12) & 0xf;
2421 acc = insn & 7;
2423 if (acc != 0)
2424 return 1;
2426 if (insn & ARM_CP_RW_BIT) { /* MRA */
2427 iwmmxt_load_reg(cpu_V0, acc);
2428 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2429 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2430 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2431 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2432 } else { /* MAR */
2433 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2434 iwmmxt_store_reg(cpu_V0, acc);
2436 return 0;
2439 return 1;
2442 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2443 #define VFP_SREG(insn, bigbit, smallbit) \
2444 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2445 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2446 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2447 reg = (((insn) >> (bigbit)) & 0x0f) \
2448 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2449 } else { \
2450 if (insn & (1 << (smallbit))) \
2451 return 1; \
2452 reg = ((insn) >> (bigbit)) & 0x0f; \
2453 }} while (0)
2455 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2456 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2457 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2458 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2459 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2460 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2462 /* Move between integer and VFP cores. */
2463 static TCGv gen_vfp_mrs(void)
2465 TCGv tmp = tcg_temp_new_i32();
2466 tcg_gen_mov_i32(tmp, cpu_F0s);
2467 return tmp;
2470 static void gen_vfp_msr(TCGv tmp)
2472 tcg_gen_mov_i32(cpu_F0s, tmp);
2473 tcg_temp_free_i32(tmp);
2476 static void gen_neon_dup_u8(TCGv var, int shift)
2478 TCGv tmp = tcg_temp_new_i32();
2479 if (shift)
2480 tcg_gen_shri_i32(var, var, shift);
2481 tcg_gen_ext8u_i32(var, var);
2482 tcg_gen_shli_i32(tmp, var, 8);
2483 tcg_gen_or_i32(var, var, tmp);
2484 tcg_gen_shli_i32(tmp, var, 16);
2485 tcg_gen_or_i32(var, var, tmp);
2486 tcg_temp_free_i32(tmp);
2489 static void gen_neon_dup_low16(TCGv var)
2491 TCGv tmp = tcg_temp_new_i32();
2492 tcg_gen_ext16u_i32(var, var);
2493 tcg_gen_shli_i32(tmp, var, 16);
2494 tcg_gen_or_i32(var, var, tmp);
2495 tcg_temp_free_i32(tmp);
2498 static void gen_neon_dup_high16(TCGv var)
2500 TCGv tmp = tcg_temp_new_i32();
2501 tcg_gen_andi_i32(var, var, 0xffff0000);
2502 tcg_gen_shri_i32(tmp, var, 16);
2503 tcg_gen_or_i32(var, var, tmp);
2504 tcg_temp_free_i32(tmp);
2507 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2509 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2510 TCGv tmp;
2511 switch (size) {
2512 case 0:
2513 tmp = gen_ld8u(addr, IS_USER(s));
2514 gen_neon_dup_u8(tmp, 0);
2515 break;
2516 case 1:
2517 tmp = gen_ld16u(addr, IS_USER(s));
2518 gen_neon_dup_low16(tmp);
2519 break;
2520 case 2:
2521 tmp = gen_ld32(addr, IS_USER(s));
2522 break;
2523 default: /* Avoid compiler warnings. */
2524 abort();
2526 return tmp;
2529 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2530 (ie. an undefined instruction). */
2531 static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
2533 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2534 int dp, veclen;
2535 TCGv addr;
2536 TCGv tmp;
2537 TCGv tmp2;
2539 if (!arm_feature(env, ARM_FEATURE_VFP))
2540 return 1;
2542 if (!s->vfp_enabled) {
2543 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2544 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2545 return 1;
2546 rn = (insn >> 16) & 0xf;
2547 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2548 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2549 return 1;
2551 dp = ((insn & 0xf00) == 0xb00);
2552 switch ((insn >> 24) & 0xf) {
2553 case 0xe:
2554 if (insn & (1 << 4)) {
2555 /* single register transfer */
2556 rd = (insn >> 12) & 0xf;
2557 if (dp) {
2558 int size;
2559 int pass;
2561 VFP_DREG_N(rn, insn);
2562 if (insn & 0xf)
2563 return 1;
2564 if (insn & 0x00c00060
2565 && !arm_feature(env, ARM_FEATURE_NEON))
2566 return 1;
2568 pass = (insn >> 21) & 1;
2569 if (insn & (1 << 22)) {
2570 size = 0;
2571 offset = ((insn >> 5) & 3) * 8;
2572 } else if (insn & (1 << 5)) {
2573 size = 1;
2574 offset = (insn & (1 << 6)) ? 16 : 0;
2575 } else {
2576 size = 2;
2577 offset = 0;
2579 if (insn & ARM_CP_RW_BIT) {
2580 /* vfp->arm */
2581 tmp = neon_load_reg(rn, pass);
2582 switch (size) {
2583 case 0:
2584 if (offset)
2585 tcg_gen_shri_i32(tmp, tmp, offset);
2586 if (insn & (1 << 23))
2587 gen_uxtb(tmp);
2588 else
2589 gen_sxtb(tmp);
2590 break;
2591 case 1:
2592 if (insn & (1 << 23)) {
2593 if (offset) {
2594 tcg_gen_shri_i32(tmp, tmp, 16);
2595 } else {
2596 gen_uxth(tmp);
2598 } else {
2599 if (offset) {
2600 tcg_gen_sari_i32(tmp, tmp, 16);
2601 } else {
2602 gen_sxth(tmp);
2605 break;
2606 case 2:
2607 break;
2609 store_reg(s, rd, tmp);
2610 } else {
2611 /* arm->vfp */
2612 tmp = load_reg(s, rd);
2613 if (insn & (1 << 23)) {
2614 /* VDUP */
2615 if (size == 0) {
2616 gen_neon_dup_u8(tmp, 0);
2617 } else if (size == 1) {
2618 gen_neon_dup_low16(tmp);
2620 for (n = 0; n <= pass * 2; n++) {
2621 tmp2 = tcg_temp_new_i32();
2622 tcg_gen_mov_i32(tmp2, tmp);
2623 neon_store_reg(rn, n, tmp2);
2625 neon_store_reg(rn, n, tmp);
2626 } else {
2627 /* VMOV */
2628 switch (size) {
2629 case 0:
2630 tmp2 = neon_load_reg(rn, pass);
2631 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2632 tcg_temp_free_i32(tmp2);
2633 break;
2634 case 1:
2635 tmp2 = neon_load_reg(rn, pass);
2636 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2637 tcg_temp_free_i32(tmp2);
2638 break;
2639 case 2:
2640 break;
2642 neon_store_reg(rn, pass, tmp);
2645 } else { /* !dp */
2646 if ((insn & 0x6f) != 0x00)
2647 return 1;
2648 rn = VFP_SREG_N(insn);
2649 if (insn & ARM_CP_RW_BIT) {
2650 /* vfp->arm */
2651 if (insn & (1 << 21)) {
2652 /* system register */
2653 rn >>= 1;
2655 switch (rn) {
2656 case ARM_VFP_FPSID:
2657 /* VFP2 allows access to FSID from userspace.
2658 VFP3 restricts all id registers to privileged
2659 accesses. */
2660 if (IS_USER(s)
2661 && arm_feature(env, ARM_FEATURE_VFP3))
2662 return 1;
2663 tmp = load_cpu_field(vfp.xregs[rn]);
2664 break;
2665 case ARM_VFP_FPEXC:
2666 if (IS_USER(s))
2667 return 1;
2668 tmp = load_cpu_field(vfp.xregs[rn]);
2669 break;
2670 case ARM_VFP_FPINST:
2671 case ARM_VFP_FPINST2:
2672 /* Not present in VFP3. */
2673 if (IS_USER(s)
2674 || arm_feature(env, ARM_FEATURE_VFP3))
2675 return 1;
2676 tmp = load_cpu_field(vfp.xregs[rn]);
2677 break;
2678 case ARM_VFP_FPSCR:
2679 if (rd == 15) {
2680 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2681 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2682 } else {
2683 tmp = tcg_temp_new_i32();
2684 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2686 break;
2687 case ARM_VFP_MVFR0:
2688 case ARM_VFP_MVFR1:
2689 if (IS_USER(s)
2690 || !arm_feature(env, ARM_FEATURE_MVFR))
2691 return 1;
2692 tmp = load_cpu_field(vfp.xregs[rn]);
2693 break;
2694 default:
2695 return 1;
2697 } else {
2698 gen_mov_F0_vreg(0, rn);
2699 tmp = gen_vfp_mrs();
2701 if (rd == 15) {
2702 /* Set the 4 flag bits in the CPSR. */
2703 gen_set_nzcv(tmp);
2704 tcg_temp_free_i32(tmp);
2705 } else {
2706 store_reg(s, rd, tmp);
2708 } else {
2709 /* arm->vfp */
2710 tmp = load_reg(s, rd);
2711 if (insn & (1 << 21)) {
2712 rn >>= 1;
2713 /* system register */
2714 switch (rn) {
2715 case ARM_VFP_FPSID:
2716 case ARM_VFP_MVFR0:
2717 case ARM_VFP_MVFR1:
2718 /* Writes are ignored. */
2719 break;
2720 case ARM_VFP_FPSCR:
2721 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2722 tcg_temp_free_i32(tmp);
2723 gen_lookup_tb(s);
2724 break;
2725 case ARM_VFP_FPEXC:
2726 if (IS_USER(s))
2727 return 1;
2728 /* TODO: VFP subarchitecture support.
2729 * For now, keep the EN bit only */
2730 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2731 store_cpu_field(tmp, vfp.xregs[rn]);
2732 gen_lookup_tb(s);
2733 break;
2734 case ARM_VFP_FPINST:
2735 case ARM_VFP_FPINST2:
2736 store_cpu_field(tmp, vfp.xregs[rn]);
2737 break;
2738 default:
2739 return 1;
2741 } else {
2742 gen_vfp_msr(tmp);
2743 gen_mov_vreg_F0(0, rn);
2747 } else {
2748 /* data processing */
2749 /* The opcode is in bits 23, 21, 20 and 6. */
2750 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2751 if (dp) {
2752 if (op == 15) {
2753 /* rn is opcode */
2754 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2755 } else {
2756 /* rn is register number */
2757 VFP_DREG_N(rn, insn);
2760 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2761 /* Integer or single precision destination. */
2762 rd = VFP_SREG_D(insn);
2763 } else {
2764 VFP_DREG_D(rd, insn);
2766 if (op == 15 &&
2767 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2768 /* VCVT from int is always from S reg regardless of dp bit.
2769 * VCVT with immediate frac_bits has same format as SREG_M
2771 rm = VFP_SREG_M(insn);
2772 } else {
2773 VFP_DREG_M(rm, insn);
2775 } else {
2776 rn = VFP_SREG_N(insn);
2777 if (op == 15 && rn == 15) {
2778 /* Double precision destination. */
2779 VFP_DREG_D(rd, insn);
2780 } else {
2781 rd = VFP_SREG_D(insn);
2783 /* NB that we implicitly rely on the encoding for the frac_bits
2784 * in VCVT of fixed to float being the same as that of an SREG_M
2786 rm = VFP_SREG_M(insn);
2789 veclen = s->vec_len;
2790 if (op == 15 && rn > 3)
2791 veclen = 0;
2793 /* Shut up compiler warnings. */
2794 delta_m = 0;
2795 delta_d = 0;
2796 bank_mask = 0;
2798 if (veclen > 0) {
2799 if (dp)
2800 bank_mask = 0xc;
2801 else
2802 bank_mask = 0x18;
2804 /* Figure out what type of vector operation this is. */
2805 if ((rd & bank_mask) == 0) {
2806 /* scalar */
2807 veclen = 0;
2808 } else {
2809 if (dp)
2810 delta_d = (s->vec_stride >> 1) + 1;
2811 else
2812 delta_d = s->vec_stride + 1;
2814 if ((rm & bank_mask) == 0) {
2815 /* mixed scalar/vector */
2816 delta_m = 0;
2817 } else {
2818 /* vector */
2819 delta_m = delta_d;
2824 /* Load the initial operands. */
2825 if (op == 15) {
2826 switch (rn) {
2827 case 16:
2828 case 17:
2829 /* Integer source */
2830 gen_mov_F0_vreg(0, rm);
2831 break;
2832 case 8:
2833 case 9:
2834 /* Compare */
2835 gen_mov_F0_vreg(dp, rd);
2836 gen_mov_F1_vreg(dp, rm);
2837 break;
2838 case 10:
2839 case 11:
2840 /* Compare with zero */
2841 gen_mov_F0_vreg(dp, rd);
2842 gen_vfp_F1_ld0(dp);
2843 break;
2844 case 20:
2845 case 21:
2846 case 22:
2847 case 23:
2848 case 28:
2849 case 29:
2850 case 30:
2851 case 31:
2852 /* Source and destination the same. */
2853 gen_mov_F0_vreg(dp, rd);
2854 break;
2855 case 4:
2856 case 5:
2857 case 6:
2858 case 7:
2859 /* VCVTB, VCVTT: only present with the halfprec extension,
2860 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2862 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2863 return 1;
2865 /* Otherwise fall through */
2866 default:
2867 /* One source operand. */
2868 gen_mov_F0_vreg(dp, rm);
2869 break;
2871 } else {
2872 /* Two source operands. */
2873 gen_mov_F0_vreg(dp, rn);
2874 gen_mov_F1_vreg(dp, rm);
2877 for (;;) {
2878 /* Perform the calculation. */
2879 switch (op) {
2880 case 0: /* VMLA: fd + (fn * fm) */
2881 /* Note that order of inputs to the add matters for NaNs */
2882 gen_vfp_F1_mul(dp);
2883 gen_mov_F0_vreg(dp, rd);
2884 gen_vfp_add(dp);
2885 break;
2886 case 1: /* VMLS: fd + -(fn * fm) */
2887 gen_vfp_mul(dp);
2888 gen_vfp_F1_neg(dp);
2889 gen_mov_F0_vreg(dp, rd);
2890 gen_vfp_add(dp);
2891 break;
2892 case 2: /* VNMLS: -fd + (fn * fm) */
2893 /* Note that it isn't valid to replace (-A + B) with (B - A)
2894 * or similar plausible looking simplifications
2895 * because this will give wrong results for NaNs.
2897 gen_vfp_F1_mul(dp);
2898 gen_mov_F0_vreg(dp, rd);
2899 gen_vfp_neg(dp);
2900 gen_vfp_add(dp);
2901 break;
2902 case 3: /* VNMLA: -fd + -(fn * fm) */
2903 gen_vfp_mul(dp);
2904 gen_vfp_F1_neg(dp);
2905 gen_mov_F0_vreg(dp, rd);
2906 gen_vfp_neg(dp);
2907 gen_vfp_add(dp);
2908 break;
2909 case 4: /* mul: fn * fm */
2910 gen_vfp_mul(dp);
2911 break;
2912 case 5: /* nmul: -(fn * fm) */
2913 gen_vfp_mul(dp);
2914 gen_vfp_neg(dp);
2915 break;
2916 case 6: /* add: fn + fm */
2917 gen_vfp_add(dp);
2918 break;
2919 case 7: /* sub: fn - fm */
2920 gen_vfp_sub(dp);
2921 break;
2922 case 8: /* div: fn / fm */
2923 gen_vfp_div(dp);
2924 break;
2925 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2926 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2927 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2928 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2929 /* These are fused multiply-add, and must be done as one
2930 * floating point operation with no rounding between the
2931 * multiplication and addition steps.
2932 * NB that doing the negations here as separate steps is
2933 * correct : an input NaN should come out with its sign bit
2934 * flipped if it is a negated-input.
2936 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
2937 return 1;
2939 if (dp) {
2940 TCGv_ptr fpst;
2941 TCGv_i64 frd;
2942 if (op & 1) {
2943 /* VFNMS, VFMS */
2944 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
2946 frd = tcg_temp_new_i64();
2947 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
2948 if (op & 2) {
2949 /* VFNMA, VFNMS */
2950 gen_helper_vfp_negd(frd, frd);
2952 fpst = get_fpstatus_ptr(0);
2953 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
2954 cpu_F1d, frd, fpst);
2955 tcg_temp_free_ptr(fpst);
2956 tcg_temp_free_i64(frd);
2957 } else {
2958 TCGv_ptr fpst;
2959 TCGv_i32 frd;
2960 if (op & 1) {
2961 /* VFNMS, VFMS */
2962 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
2964 frd = tcg_temp_new_i32();
2965 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
2966 if (op & 2) {
2967 gen_helper_vfp_negs(frd, frd);
2969 fpst = get_fpstatus_ptr(0);
2970 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
2971 cpu_F1s, frd, fpst);
2972 tcg_temp_free_ptr(fpst);
2973 tcg_temp_free_i32(frd);
2975 break;
2976 case 14: /* fconst */
2977 if (!arm_feature(env, ARM_FEATURE_VFP3))
2978 return 1;
2980 n = (insn << 12) & 0x80000000;
2981 i = ((insn >> 12) & 0x70) | (insn & 0xf);
2982 if (dp) {
2983 if (i & 0x40)
2984 i |= 0x3f80;
2985 else
2986 i |= 0x4000;
2987 n |= i << 16;
2988 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
2989 } else {
2990 if (i & 0x40)
2991 i |= 0x780;
2992 else
2993 i |= 0x800;
2994 n |= i << 19;
2995 tcg_gen_movi_i32(cpu_F0s, n);
2997 break;
2998 case 15: /* extension space */
2999 switch (rn) {
3000 case 0: /* cpy */
3001 /* no-op */
3002 break;
3003 case 1: /* abs */
3004 gen_vfp_abs(dp);
3005 break;
3006 case 2: /* neg */
3007 gen_vfp_neg(dp);
3008 break;
3009 case 3: /* sqrt */
3010 gen_vfp_sqrt(dp);
3011 break;
3012 case 4: /* vcvtb.f32.f16 */
3013 tmp = gen_vfp_mrs();
3014 tcg_gen_ext16u_i32(tmp, tmp);
3015 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3016 tcg_temp_free_i32(tmp);
3017 break;
3018 case 5: /* vcvtt.f32.f16 */
3019 tmp = gen_vfp_mrs();
3020 tcg_gen_shri_i32(tmp, tmp, 16);
3021 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3022 tcg_temp_free_i32(tmp);
3023 break;
3024 case 6: /* vcvtb.f16.f32 */
3025 tmp = tcg_temp_new_i32();
3026 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3027 gen_mov_F0_vreg(0, rd);
3028 tmp2 = gen_vfp_mrs();
3029 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3030 tcg_gen_or_i32(tmp, tmp, tmp2);
3031 tcg_temp_free_i32(tmp2);
3032 gen_vfp_msr(tmp);
3033 break;
3034 case 7: /* vcvtt.f16.f32 */
3035 tmp = tcg_temp_new_i32();
3036 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3037 tcg_gen_shli_i32(tmp, tmp, 16);
3038 gen_mov_F0_vreg(0, rd);
3039 tmp2 = gen_vfp_mrs();
3040 tcg_gen_ext16u_i32(tmp2, tmp2);
3041 tcg_gen_or_i32(tmp, tmp, tmp2);
3042 tcg_temp_free_i32(tmp2);
3043 gen_vfp_msr(tmp);
3044 break;
3045 case 8: /* cmp */
3046 gen_vfp_cmp(dp);
3047 break;
3048 case 9: /* cmpe */
3049 gen_vfp_cmpe(dp);
3050 break;
3051 case 10: /* cmpz */
3052 gen_vfp_cmp(dp);
3053 break;
3054 case 11: /* cmpez */
3055 gen_vfp_F1_ld0(dp);
3056 gen_vfp_cmpe(dp);
3057 break;
3058 case 15: /* single<->double conversion */
3059 if (dp)
3060 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3061 else
3062 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3063 break;
3064 case 16: /* fuito */
3065 gen_vfp_uito(dp, 0);
3066 break;
3067 case 17: /* fsito */
3068 gen_vfp_sito(dp, 0);
3069 break;
3070 case 20: /* fshto */
3071 if (!arm_feature(env, ARM_FEATURE_VFP3))
3072 return 1;
3073 gen_vfp_shto(dp, 16 - rm, 0);
3074 break;
3075 case 21: /* fslto */
3076 if (!arm_feature(env, ARM_FEATURE_VFP3))
3077 return 1;
3078 gen_vfp_slto(dp, 32 - rm, 0);
3079 break;
3080 case 22: /* fuhto */
3081 if (!arm_feature(env, ARM_FEATURE_VFP3))
3082 return 1;
3083 gen_vfp_uhto(dp, 16 - rm, 0);
3084 break;
3085 case 23: /* fulto */
3086 if (!arm_feature(env, ARM_FEATURE_VFP3))
3087 return 1;
3088 gen_vfp_ulto(dp, 32 - rm, 0);
3089 break;
3090 case 24: /* ftoui */
3091 gen_vfp_toui(dp, 0);
3092 break;
3093 case 25: /* ftouiz */
3094 gen_vfp_touiz(dp, 0);
3095 break;
3096 case 26: /* ftosi */
3097 gen_vfp_tosi(dp, 0);
3098 break;
3099 case 27: /* ftosiz */
3100 gen_vfp_tosiz(dp, 0);
3101 break;
3102 case 28: /* ftosh */
3103 if (!arm_feature(env, ARM_FEATURE_VFP3))
3104 return 1;
3105 gen_vfp_tosh(dp, 16 - rm, 0);
3106 break;
3107 case 29: /* ftosl */
3108 if (!arm_feature(env, ARM_FEATURE_VFP3))
3109 return 1;
3110 gen_vfp_tosl(dp, 32 - rm, 0);
3111 break;
3112 case 30: /* ftouh */
3113 if (!arm_feature(env, ARM_FEATURE_VFP3))
3114 return 1;
3115 gen_vfp_touh(dp, 16 - rm, 0);
3116 break;
3117 case 31: /* ftoul */
3118 if (!arm_feature(env, ARM_FEATURE_VFP3))
3119 return 1;
3120 gen_vfp_toul(dp, 32 - rm, 0);
3121 break;
3122 default: /* undefined */
3123 return 1;
3125 break;
3126 default: /* undefined */
3127 return 1;
3130 /* Write back the result. */
3131 if (op == 15 && (rn >= 8 && rn <= 11))
3132 ; /* Comparison, do nothing. */
3133 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3134 /* VCVT double to int: always integer result. */
3135 gen_mov_vreg_F0(0, rd);
3136 else if (op == 15 && rn == 15)
3137 /* conversion */
3138 gen_mov_vreg_F0(!dp, rd);
3139 else
3140 gen_mov_vreg_F0(dp, rd);
3142 /* break out of the loop if we have finished */
3143 if (veclen == 0)
3144 break;
3146 if (op == 15 && delta_m == 0) {
3147 /* single source one-many */
3148 while (veclen--) {
3149 rd = ((rd + delta_d) & (bank_mask - 1))
3150 | (rd & bank_mask);
3151 gen_mov_vreg_F0(dp, rd);
3153 break;
3155 /* Setup the next operands. */
3156 veclen--;
3157 rd = ((rd + delta_d) & (bank_mask - 1))
3158 | (rd & bank_mask);
3160 if (op == 15) {
3161 /* One source operand. */
3162 rm = ((rm + delta_m) & (bank_mask - 1))
3163 | (rm & bank_mask);
3164 gen_mov_F0_vreg(dp, rm);
3165 } else {
3166 /* Two source operands. */
3167 rn = ((rn + delta_d) & (bank_mask - 1))
3168 | (rn & bank_mask);
3169 gen_mov_F0_vreg(dp, rn);
3170 if (delta_m) {
3171 rm = ((rm + delta_m) & (bank_mask - 1))
3172 | (rm & bank_mask);
3173 gen_mov_F1_vreg(dp, rm);
3178 break;
3179 case 0xc:
3180 case 0xd:
3181 if ((insn & 0x03e00000) == 0x00400000) {
3182 /* two-register transfer */
3183 rn = (insn >> 16) & 0xf;
3184 rd = (insn >> 12) & 0xf;
3185 if (dp) {
3186 VFP_DREG_M(rm, insn);
3187 } else {
3188 rm = VFP_SREG_M(insn);
3191 if (insn & ARM_CP_RW_BIT) {
3192 /* vfp->arm */
3193 if (dp) {
3194 gen_mov_F0_vreg(0, rm * 2);
3195 tmp = gen_vfp_mrs();
3196 store_reg(s, rd, tmp);
3197 gen_mov_F0_vreg(0, rm * 2 + 1);
3198 tmp = gen_vfp_mrs();
3199 store_reg(s, rn, tmp);
3200 } else {
3201 gen_mov_F0_vreg(0, rm);
3202 tmp = gen_vfp_mrs();
3203 store_reg(s, rd, tmp);
3204 gen_mov_F0_vreg(0, rm + 1);
3205 tmp = gen_vfp_mrs();
3206 store_reg(s, rn, tmp);
3208 } else {
3209 /* arm->vfp */
3210 if (dp) {
3211 tmp = load_reg(s, rd);
3212 gen_vfp_msr(tmp);
3213 gen_mov_vreg_F0(0, rm * 2);
3214 tmp = load_reg(s, rn);
3215 gen_vfp_msr(tmp);
3216 gen_mov_vreg_F0(0, rm * 2 + 1);
3217 } else {
3218 tmp = load_reg(s, rd);
3219 gen_vfp_msr(tmp);
3220 gen_mov_vreg_F0(0, rm);
3221 tmp = load_reg(s, rn);
3222 gen_vfp_msr(tmp);
3223 gen_mov_vreg_F0(0, rm + 1);
3226 } else {
3227 /* Load/store */
3228 rn = (insn >> 16) & 0xf;
3229 if (dp)
3230 VFP_DREG_D(rd, insn);
3231 else
3232 rd = VFP_SREG_D(insn);
3233 if ((insn & 0x01200000) == 0x01000000) {
3234 /* Single load/store */
3235 offset = (insn & 0xff) << 2;
3236 if ((insn & (1 << 23)) == 0)
3237 offset = -offset;
3238 if (s->thumb && rn == 15) {
3239 /* This is actually UNPREDICTABLE */
3240 addr = tcg_temp_new_i32();
3241 tcg_gen_movi_i32(addr, s->pc & ~2);
3242 } else {
3243 addr = load_reg(s, rn);
3245 tcg_gen_addi_i32(addr, addr, offset);
3246 if (insn & (1 << 20)) {
3247 gen_vfp_ld(s, dp, addr);
3248 gen_mov_vreg_F0(dp, rd);
3249 } else {
3250 gen_mov_F0_vreg(dp, rd);
3251 gen_vfp_st(s, dp, addr);
3253 tcg_temp_free_i32(addr);
3254 } else {
3255 /* load/store multiple */
3256 int w = insn & (1 << 21);
3257 if (dp)
3258 n = (insn >> 1) & 0x7f;
3259 else
3260 n = insn & 0xff;
3262 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3263 /* P == U , W == 1 => UNDEF */
3264 return 1;
3266 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3267 /* UNPREDICTABLE cases for bad immediates: we choose to
3268 * UNDEF to avoid generating huge numbers of TCG ops
3270 return 1;
3272 if (rn == 15 && w) {
3273 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3274 return 1;
3277 if (s->thumb && rn == 15) {
3278 /* This is actually UNPREDICTABLE */
3279 addr = tcg_temp_new_i32();
3280 tcg_gen_movi_i32(addr, s->pc & ~2);
3281 } else {
3282 addr = load_reg(s, rn);
3284 if (insn & (1 << 24)) /* pre-decrement */
3285 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3287 if (dp)
3288 offset = 8;
3289 else
3290 offset = 4;
3291 for (i = 0; i < n; i++) {
3292 if (insn & ARM_CP_RW_BIT) {
3293 /* load */
3294 gen_vfp_ld(s, dp, addr);
3295 gen_mov_vreg_F0(dp, rd + i);
3296 } else {
3297 /* store */
3298 gen_mov_F0_vreg(dp, rd + i);
3299 gen_vfp_st(s, dp, addr);
3301 tcg_gen_addi_i32(addr, addr, offset);
3303 if (w) {
3304 /* writeback */
3305 if (insn & (1 << 24))
3306 offset = -offset * n;
3307 else if (dp && (insn & 1))
3308 offset = 4;
3309 else
3310 offset = 0;
3312 if (offset != 0)
3313 tcg_gen_addi_i32(addr, addr, offset);
3314 store_reg(s, rn, addr);
3315 } else {
3316 tcg_temp_free_i32(addr);
3320 break;
3321 default:
3322 /* Should never happen. */
3323 return 1;
3325 return 0;
3328 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3330 TranslationBlock *tb;
3332 tb = s->tb;
3333 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3334 tcg_gen_goto_tb(n);
3335 gen_set_pc_im(dest);
3336 tcg_gen_exit_tb((tcg_target_long)tb + n);
3337 } else {
3338 gen_set_pc_im(dest);
3339 tcg_gen_exit_tb(0);
3343 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3345 if (unlikely(s->singlestep_enabled)) {
3346 /* An indirect jump so that we still trigger the debug exception. */
3347 if (s->thumb)
3348 dest |= 1;
3349 gen_bx_im(s, dest);
3350 } else {
3351 gen_goto_tb(s, 0, dest);
3352 s->is_jmp = DISAS_TB_JUMP;
3356 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3358 if (x)
3359 tcg_gen_sari_i32(t0, t0, 16);
3360 else
3361 gen_sxth(t0);
3362 if (y)
3363 tcg_gen_sari_i32(t1, t1, 16);
3364 else
3365 gen_sxth(t1);
3366 tcg_gen_mul_i32(t0, t0, t1);
3369 /* Return the mask of PSR bits set by a MSR instruction. */
3370 static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
3371 uint32_t mask;
3373 mask = 0;
3374 if (flags & (1 << 0))
3375 mask |= 0xff;
3376 if (flags & (1 << 1))
3377 mask |= 0xff00;
3378 if (flags & (1 << 2))
3379 mask |= 0xff0000;
3380 if (flags & (1 << 3))
3381 mask |= 0xff000000;
3383 /* Mask out undefined bits. */
3384 mask &= ~CPSR_RESERVED;
3385 if (!arm_feature(env, ARM_FEATURE_V4T))
3386 mask &= ~CPSR_T;
3387 if (!arm_feature(env, ARM_FEATURE_V5))
3388 mask &= ~CPSR_Q; /* V5TE in reality*/
3389 if (!arm_feature(env, ARM_FEATURE_V6))
3390 mask &= ~(CPSR_E | CPSR_GE);
3391 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3392 mask &= ~CPSR_IT;
3393 /* Mask out execution state bits. */
3394 if (!spsr)
3395 mask &= ~CPSR_EXEC;
3396 /* Mask out privileged bits. */
3397 if (IS_USER(s))
3398 mask &= CPSR_USER;
3399 return mask;
3402 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3403 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3405 TCGv tmp;
3406 if (spsr) {
3407 /* ??? This is also undefined in system mode. */
3408 if (IS_USER(s))
3409 return 1;
3411 tmp = load_cpu_field(spsr);
3412 tcg_gen_andi_i32(tmp, tmp, ~mask);
3413 tcg_gen_andi_i32(t0, t0, mask);
3414 tcg_gen_or_i32(tmp, tmp, t0);
3415 store_cpu_field(tmp, spsr);
3416 } else {
3417 gen_set_cpsr(t0, mask);
3419 tcg_temp_free_i32(t0);
3420 gen_lookup_tb(s);
3421 return 0;
3424 /* Returns nonzero if access to the PSR is not permitted. */
3425 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3427 TCGv tmp;
3428 tmp = tcg_temp_new_i32();
3429 tcg_gen_movi_i32(tmp, val);
3430 return gen_set_psr(s, mask, spsr, tmp);
3433 /* Generate an old-style exception return. Marks pc as dead. */
3434 static void gen_exception_return(DisasContext *s, TCGv pc)
3436 TCGv tmp;
3437 store_reg(s, 15, pc);
3438 tmp = load_cpu_field(spsr);
3439 gen_set_cpsr(tmp, 0xffffffff);
3440 tcg_temp_free_i32(tmp);
3441 s->is_jmp = DISAS_UPDATE;
3444 /* Generate a v6 exception return. Marks both values as dead. */
3445 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3447 gen_set_cpsr(cpsr, 0xffffffff);
3448 tcg_temp_free_i32(cpsr);
3449 store_reg(s, 15, pc);
3450 s->is_jmp = DISAS_UPDATE;
3453 static inline void
3454 gen_set_condexec (DisasContext *s)
3456 if (s->condexec_mask) {
3457 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3458 TCGv tmp = tcg_temp_new_i32();
3459 tcg_gen_movi_i32(tmp, val);
3460 store_cpu_field(tmp, condexec_bits);
3464 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3466 gen_set_condexec(s);
3467 gen_set_pc_im(s->pc - offset);
3468 gen_exception(excp);
3469 s->is_jmp = DISAS_JUMP;
3472 static void gen_nop_hint(DisasContext *s, int val)
3474 switch (val) {
3475 case 3: /* wfi */
3476 gen_set_pc_im(s->pc);
3477 s->is_jmp = DISAS_WFI;
3478 break;
3479 case 2: /* wfe */
3480 case 4: /* sev */
3481 /* TODO: Implement SEV and WFE. May help SMP performance. */
3482 default: /* nop */
3483 break;
3487 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3489 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3491 switch (size) {
3492 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3493 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3494 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3495 default: abort();
3499 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3501 switch (size) {
3502 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3503 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3504 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3505 default: return;
3509 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3510 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3511 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3512 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3513 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3515 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3516 switch ((size << 1) | u) { \
3517 case 0: \
3518 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3519 break; \
3520 case 1: \
3521 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3522 break; \
3523 case 2: \
3524 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3525 break; \
3526 case 3: \
3527 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3528 break; \
3529 case 4: \
3530 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3531 break; \
3532 case 5: \
3533 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3534 break; \
3535 default: return 1; \
3536 }} while (0)
3538 #define GEN_NEON_INTEGER_OP(name) do { \
3539 switch ((size << 1) | u) { \
3540 case 0: \
3541 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3542 break; \
3543 case 1: \
3544 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3545 break; \
3546 case 2: \
3547 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3548 break; \
3549 case 3: \
3550 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3551 break; \
3552 case 4: \
3553 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3554 break; \
3555 case 5: \
3556 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3557 break; \
3558 default: return 1; \
3559 }} while (0)
3561 static TCGv neon_load_scratch(int scratch)
3563 TCGv tmp = tcg_temp_new_i32();
3564 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3565 return tmp;
3568 static void neon_store_scratch(int scratch, TCGv var)
3570 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3571 tcg_temp_free_i32(var);
3574 static inline TCGv neon_get_scalar(int size, int reg)
3576 TCGv tmp;
3577 if (size == 1) {
3578 tmp = neon_load_reg(reg & 7, reg >> 4);
3579 if (reg & 8) {
3580 gen_neon_dup_high16(tmp);
3581 } else {
3582 gen_neon_dup_low16(tmp);
3584 } else {
3585 tmp = neon_load_reg(reg & 15, reg >> 4);
3587 return tmp;
3590 static int gen_neon_unzip(int rd, int rm, int size, int q)
3592 TCGv tmp, tmp2;
3593 if (!q && size == 2) {
3594 return 1;
3596 tmp = tcg_const_i32(rd);
3597 tmp2 = tcg_const_i32(rm);
3598 if (q) {
3599 switch (size) {
3600 case 0:
3601 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3602 break;
3603 case 1:
3604 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3605 break;
3606 case 2:
3607 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3608 break;
3609 default:
3610 abort();
3612 } else {
3613 switch (size) {
3614 case 0:
3615 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3616 break;
3617 case 1:
3618 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3619 break;
3620 default:
3621 abort();
3624 tcg_temp_free_i32(tmp);
3625 tcg_temp_free_i32(tmp2);
3626 return 0;
3629 static int gen_neon_zip(int rd, int rm, int size, int q)
3631 TCGv tmp, tmp2;
3632 if (!q && size == 2) {
3633 return 1;
3635 tmp = tcg_const_i32(rd);
3636 tmp2 = tcg_const_i32(rm);
3637 if (q) {
3638 switch (size) {
3639 case 0:
3640 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3641 break;
3642 case 1:
3643 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3644 break;
3645 case 2:
3646 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3647 break;
3648 default:
3649 abort();
3651 } else {
3652 switch (size) {
3653 case 0:
3654 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3655 break;
3656 case 1:
3657 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3658 break;
3659 default:
3660 abort();
3663 tcg_temp_free_i32(tmp);
3664 tcg_temp_free_i32(tmp2);
3665 return 0;
3668 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3670 TCGv rd, tmp;
3672 rd = tcg_temp_new_i32();
3673 tmp = tcg_temp_new_i32();
3675 tcg_gen_shli_i32(rd, t0, 8);
3676 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3677 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3678 tcg_gen_or_i32(rd, rd, tmp);
3680 tcg_gen_shri_i32(t1, t1, 8);
3681 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3682 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3683 tcg_gen_or_i32(t1, t1, tmp);
3684 tcg_gen_mov_i32(t0, rd);
3686 tcg_temp_free_i32(tmp);
3687 tcg_temp_free_i32(rd);
3690 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3692 TCGv rd, tmp;
3694 rd = tcg_temp_new_i32();
3695 tmp = tcg_temp_new_i32();
3697 tcg_gen_shli_i32(rd, t0, 16);
3698 tcg_gen_andi_i32(tmp, t1, 0xffff);
3699 tcg_gen_or_i32(rd, rd, tmp);
3700 tcg_gen_shri_i32(t1, t1, 16);
3701 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3702 tcg_gen_or_i32(t1, t1, tmp);
3703 tcg_gen_mov_i32(t0, rd);
3705 tcg_temp_free_i32(tmp);
3706 tcg_temp_free_i32(rd);
3710 static struct {
3711 int nregs;
3712 int interleave;
3713 int spacing;
3714 } neon_ls_element_type[11] = {
3715 {4, 4, 1},
3716 {4, 4, 2},
3717 {4, 1, 1},
3718 {4, 2, 1},
3719 {3, 3, 1},
3720 {3, 3, 2},
3721 {3, 1, 1},
3722 {1, 1, 1},
3723 {2, 2, 1},
3724 {2, 2, 2},
3725 {2, 1, 1}
3728 /* Translate a NEON load/store element instruction. Return nonzero if the
3729 instruction is invalid. */
3730 static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
3732 int rd, rn, rm;
3733 int op;
3734 int nregs;
3735 int interleave;
3736 int spacing;
3737 int stride;
3738 int size;
3739 int reg;
3740 int pass;
3741 int load;
3742 int shift;
3743 int n;
3744 TCGv addr;
3745 TCGv tmp;
3746 TCGv tmp2;
3747 TCGv_i64 tmp64;
3749 if (!s->vfp_enabled)
3750 return 1;
3751 VFP_DREG_D(rd, insn);
3752 rn = (insn >> 16) & 0xf;
3753 rm = insn & 0xf;
3754 load = (insn & (1 << 21)) != 0;
3755 if ((insn & (1 << 23)) == 0) {
3756 /* Load store all elements. */
3757 op = (insn >> 8) & 0xf;
3758 size = (insn >> 6) & 3;
3759 if (op > 10)
3760 return 1;
3761 /* Catch UNDEF cases for bad values of align field */
3762 switch (op & 0xc) {
3763 case 4:
3764 if (((insn >> 5) & 1) == 1) {
3765 return 1;
3767 break;
3768 case 8:
3769 if (((insn >> 4) & 3) == 3) {
3770 return 1;
3772 break;
3773 default:
3774 break;
3776 nregs = neon_ls_element_type[op].nregs;
3777 interleave = neon_ls_element_type[op].interleave;
3778 spacing = neon_ls_element_type[op].spacing;
3779 if (size == 3 && (interleave | spacing) != 1)
3780 return 1;
3781 addr = tcg_temp_new_i32();
3782 load_reg_var(s, addr, rn);
3783 stride = (1 << size) * interleave;
3784 for (reg = 0; reg < nregs; reg++) {
3785 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3786 load_reg_var(s, addr, rn);
3787 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3788 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3789 load_reg_var(s, addr, rn);
3790 tcg_gen_addi_i32(addr, addr, 1 << size);
3792 if (size == 3) {
3793 if (load) {
3794 tmp64 = gen_ld64(addr, IS_USER(s));
3795 neon_store_reg64(tmp64, rd);
3796 tcg_temp_free_i64(tmp64);
3797 } else {
3798 tmp64 = tcg_temp_new_i64();
3799 neon_load_reg64(tmp64, rd);
3800 gen_st64(tmp64, addr, IS_USER(s));
3802 tcg_gen_addi_i32(addr, addr, stride);
3803 } else {
3804 for (pass = 0; pass < 2; pass++) {
3805 if (size == 2) {
3806 if (load) {
3807 tmp = gen_ld32(addr, IS_USER(s));
3808 neon_store_reg(rd, pass, tmp);
3809 } else {
3810 tmp = neon_load_reg(rd, pass);
3811 gen_st32(tmp, addr, IS_USER(s));
3813 tcg_gen_addi_i32(addr, addr, stride);
3814 } else if (size == 1) {
3815 if (load) {
3816 tmp = gen_ld16u(addr, IS_USER(s));
3817 tcg_gen_addi_i32(addr, addr, stride);
3818 tmp2 = gen_ld16u(addr, IS_USER(s));
3819 tcg_gen_addi_i32(addr, addr, stride);
3820 tcg_gen_shli_i32(tmp2, tmp2, 16);
3821 tcg_gen_or_i32(tmp, tmp, tmp2);
3822 tcg_temp_free_i32(tmp2);
3823 neon_store_reg(rd, pass, tmp);
3824 } else {
3825 tmp = neon_load_reg(rd, pass);
3826 tmp2 = tcg_temp_new_i32();
3827 tcg_gen_shri_i32(tmp2, tmp, 16);
3828 gen_st16(tmp, addr, IS_USER(s));
3829 tcg_gen_addi_i32(addr, addr, stride);
3830 gen_st16(tmp2, addr, IS_USER(s));
3831 tcg_gen_addi_i32(addr, addr, stride);
3833 } else /* size == 0 */ {
3834 if (load) {
3835 TCGV_UNUSED(tmp2);
3836 for (n = 0; n < 4; n++) {
3837 tmp = gen_ld8u(addr, IS_USER(s));
3838 tcg_gen_addi_i32(addr, addr, stride);
3839 if (n == 0) {
3840 tmp2 = tmp;
3841 } else {
3842 tcg_gen_shli_i32(tmp, tmp, n * 8);
3843 tcg_gen_or_i32(tmp2, tmp2, tmp);
3844 tcg_temp_free_i32(tmp);
3847 neon_store_reg(rd, pass, tmp2);
3848 } else {
3849 tmp2 = neon_load_reg(rd, pass);
3850 for (n = 0; n < 4; n++) {
3851 tmp = tcg_temp_new_i32();
3852 if (n == 0) {
3853 tcg_gen_mov_i32(tmp, tmp2);
3854 } else {
3855 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3857 gen_st8(tmp, addr, IS_USER(s));
3858 tcg_gen_addi_i32(addr, addr, stride);
3860 tcg_temp_free_i32(tmp2);
3865 rd += spacing;
3867 tcg_temp_free_i32(addr);
3868 stride = nregs * 8;
3869 } else {
3870 size = (insn >> 10) & 3;
3871 if (size == 3) {
3872 /* Load single element to all lanes. */
3873 int a = (insn >> 4) & 1;
3874 if (!load) {
3875 return 1;
3877 size = (insn >> 6) & 3;
3878 nregs = ((insn >> 8) & 3) + 1;
3880 if (size == 3) {
3881 if (nregs != 4 || a == 0) {
3882 return 1;
3884 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3885 size = 2;
3887 if (nregs == 1 && a == 1 && size == 0) {
3888 return 1;
3890 if (nregs == 3 && a == 1) {
3891 return 1;
3893 addr = tcg_temp_new_i32();
3894 load_reg_var(s, addr, rn);
3895 if (nregs == 1) {
3896 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3897 tmp = gen_load_and_replicate(s, addr, size);
3898 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3899 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3900 if (insn & (1 << 5)) {
3901 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3902 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3904 tcg_temp_free_i32(tmp);
3905 } else {
3906 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3907 stride = (insn & (1 << 5)) ? 2 : 1;
3908 for (reg = 0; reg < nregs; reg++) {
3909 tmp = gen_load_and_replicate(s, addr, size);
3910 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3911 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3912 tcg_temp_free_i32(tmp);
3913 tcg_gen_addi_i32(addr, addr, 1 << size);
3914 rd += stride;
3917 tcg_temp_free_i32(addr);
3918 stride = (1 << size) * nregs;
3919 } else {
3920 /* Single element. */
3921 int idx = (insn >> 4) & 0xf;
3922 pass = (insn >> 7) & 1;
3923 switch (size) {
3924 case 0:
3925 shift = ((insn >> 5) & 3) * 8;
3926 stride = 1;
3927 break;
3928 case 1:
3929 shift = ((insn >> 6) & 1) * 16;
3930 stride = (insn & (1 << 5)) ? 2 : 1;
3931 break;
3932 case 2:
3933 shift = 0;
3934 stride = (insn & (1 << 6)) ? 2 : 1;
3935 break;
3936 default:
3937 abort();
3939 nregs = ((insn >> 8) & 3) + 1;
3940 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3941 switch (nregs) {
3942 case 1:
3943 if (((idx & (1 << size)) != 0) ||
3944 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3945 return 1;
3947 break;
3948 case 3:
3949 if ((idx & 1) != 0) {
3950 return 1;
3952 /* fall through */
3953 case 2:
3954 if (size == 2 && (idx & 2) != 0) {
3955 return 1;
3957 break;
3958 case 4:
3959 if ((size == 2) && ((idx & 3) == 3)) {
3960 return 1;
3962 break;
3963 default:
3964 abort();
3966 if ((rd + stride * (nregs - 1)) > 31) {
3967 /* Attempts to write off the end of the register file
3968 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3969 * the neon_load_reg() would write off the end of the array.
3971 return 1;
3973 addr = tcg_temp_new_i32();
3974 load_reg_var(s, addr, rn);
3975 for (reg = 0; reg < nregs; reg++) {
3976 if (load) {
3977 switch (size) {
3978 case 0:
3979 tmp = gen_ld8u(addr, IS_USER(s));
3980 break;
3981 case 1:
3982 tmp = gen_ld16u(addr, IS_USER(s));
3983 break;
3984 case 2:
3985 tmp = gen_ld32(addr, IS_USER(s));
3986 break;
3987 default: /* Avoid compiler warnings. */
3988 abort();
3990 if (size != 2) {
3991 tmp2 = neon_load_reg(rd, pass);
3992 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3993 tcg_temp_free_i32(tmp2);
3995 neon_store_reg(rd, pass, tmp);
3996 } else { /* Store */
3997 tmp = neon_load_reg(rd, pass);
3998 if (shift)
3999 tcg_gen_shri_i32(tmp, tmp, shift);
4000 switch (size) {
4001 case 0:
4002 gen_st8(tmp, addr, IS_USER(s));
4003 break;
4004 case 1:
4005 gen_st16(tmp, addr, IS_USER(s));
4006 break;
4007 case 2:
4008 gen_st32(tmp, addr, IS_USER(s));
4009 break;
4012 rd += stride;
4013 tcg_gen_addi_i32(addr, addr, 1 << size);
4015 tcg_temp_free_i32(addr);
4016 stride = nregs * (1 << size);
4019 if (rm != 15) {
4020 TCGv base;
4022 base = load_reg(s, rn);
4023 if (rm == 13) {
4024 tcg_gen_addi_i32(base, base, stride);
4025 } else {
4026 TCGv index;
4027 index = load_reg(s, rm);
4028 tcg_gen_add_i32(base, base, index);
4029 tcg_temp_free_i32(index);
4031 store_reg(s, rn, base);
4033 return 0;
4036 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4037 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4039 tcg_gen_and_i32(t, t, c);
4040 tcg_gen_andc_i32(f, f, c);
4041 tcg_gen_or_i32(dest, t, f);
4044 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4046 switch (size) {
4047 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4048 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4049 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4050 default: abort();
4054 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4056 switch (size) {
4057 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4058 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4059 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4060 default: abort();
4064 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4066 switch (size) {
4067 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4068 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4069 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4070 default: abort();
4074 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4076 switch (size) {
4077 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4078 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4079 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4080 default: abort();
4084 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4085 int q, int u)
4087 if (q) {
4088 if (u) {
4089 switch (size) {
4090 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4091 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4092 default: abort();
4094 } else {
4095 switch (size) {
4096 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4097 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4098 default: abort();
4101 } else {
4102 if (u) {
4103 switch (size) {
4104 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4105 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4106 default: abort();
4108 } else {
4109 switch (size) {
4110 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4111 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4112 default: abort();
4118 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4120 if (u) {
4121 switch (size) {
4122 case 0: gen_helper_neon_widen_u8(dest, src); break;
4123 case 1: gen_helper_neon_widen_u16(dest, src); break;
4124 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4125 default: abort();
4127 } else {
4128 switch (size) {
4129 case 0: gen_helper_neon_widen_s8(dest, src); break;
4130 case 1: gen_helper_neon_widen_s16(dest, src); break;
4131 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4132 default: abort();
4135 tcg_temp_free_i32(src);
4138 static inline void gen_neon_addl(int size)
4140 switch (size) {
4141 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4142 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4143 case 2: tcg_gen_add_i64(CPU_V001); break;
4144 default: abort();
4148 static inline void gen_neon_subl(int size)
4150 switch (size) {
4151 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4152 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4153 case 2: tcg_gen_sub_i64(CPU_V001); break;
4154 default: abort();
4158 static inline void gen_neon_negl(TCGv_i64 var, int size)
4160 switch (size) {
4161 case 0: gen_helper_neon_negl_u16(var, var); break;
4162 case 1: gen_helper_neon_negl_u32(var, var); break;
4163 case 2: gen_helper_neon_negl_u64(var, var); break;
4164 default: abort();
4168 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4170 switch (size) {
4171 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4172 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4173 default: abort();
4177 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4179 TCGv_i64 tmp;
4181 switch ((size << 1) | u) {
4182 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4183 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4184 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4185 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4186 case 4:
4187 tmp = gen_muls_i64_i32(a, b);
4188 tcg_gen_mov_i64(dest, tmp);
4189 tcg_temp_free_i64(tmp);
4190 break;
4191 case 5:
4192 tmp = gen_mulu_i64_i32(a, b);
4193 tcg_gen_mov_i64(dest, tmp);
4194 tcg_temp_free_i64(tmp);
4195 break;
4196 default: abort();
4199 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4200 Don't forget to clean them now. */
4201 if (size < 2) {
4202 tcg_temp_free_i32(a);
4203 tcg_temp_free_i32(b);
4207 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4209 if (op) {
4210 if (u) {
4211 gen_neon_unarrow_sats(size, dest, src);
4212 } else {
4213 gen_neon_narrow(size, dest, src);
4215 } else {
4216 if (u) {
4217 gen_neon_narrow_satu(size, dest, src);
4218 } else {
4219 gen_neon_narrow_sats(size, dest, src);
4224 /* Symbolic constants for op fields for Neon 3-register same-length.
4225 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4226 * table A7-9.
4228 #define NEON_3R_VHADD 0
4229 #define NEON_3R_VQADD 1
4230 #define NEON_3R_VRHADD 2
4231 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4232 #define NEON_3R_VHSUB 4
4233 #define NEON_3R_VQSUB 5
4234 #define NEON_3R_VCGT 6
4235 #define NEON_3R_VCGE 7
4236 #define NEON_3R_VSHL 8
4237 #define NEON_3R_VQSHL 9
4238 #define NEON_3R_VRSHL 10
4239 #define NEON_3R_VQRSHL 11
4240 #define NEON_3R_VMAX 12
4241 #define NEON_3R_VMIN 13
4242 #define NEON_3R_VABD 14
4243 #define NEON_3R_VABA 15
4244 #define NEON_3R_VADD_VSUB 16
4245 #define NEON_3R_VTST_VCEQ 17
4246 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4247 #define NEON_3R_VMUL 19
4248 #define NEON_3R_VPMAX 20
4249 #define NEON_3R_VPMIN 21
4250 #define NEON_3R_VQDMULH_VQRDMULH 22
4251 #define NEON_3R_VPADD 23
4252 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4253 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4254 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4255 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4256 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4257 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4258 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4260 static const uint8_t neon_3r_sizes[] = {
4261 [NEON_3R_VHADD] = 0x7,
4262 [NEON_3R_VQADD] = 0xf,
4263 [NEON_3R_VRHADD] = 0x7,
4264 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4265 [NEON_3R_VHSUB] = 0x7,
4266 [NEON_3R_VQSUB] = 0xf,
4267 [NEON_3R_VCGT] = 0x7,
4268 [NEON_3R_VCGE] = 0x7,
4269 [NEON_3R_VSHL] = 0xf,
4270 [NEON_3R_VQSHL] = 0xf,
4271 [NEON_3R_VRSHL] = 0xf,
4272 [NEON_3R_VQRSHL] = 0xf,
4273 [NEON_3R_VMAX] = 0x7,
4274 [NEON_3R_VMIN] = 0x7,
4275 [NEON_3R_VABD] = 0x7,
4276 [NEON_3R_VABA] = 0x7,
4277 [NEON_3R_VADD_VSUB] = 0xf,
4278 [NEON_3R_VTST_VCEQ] = 0x7,
4279 [NEON_3R_VML] = 0x7,
4280 [NEON_3R_VMUL] = 0x7,
4281 [NEON_3R_VPMAX] = 0x7,
4282 [NEON_3R_VPMIN] = 0x7,
4283 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4284 [NEON_3R_VPADD] = 0x7,
4285 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4286 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4287 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4288 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4289 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4290 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4291 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4294 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4295 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4296 * table A7-13.
4298 #define NEON_2RM_VREV64 0
4299 #define NEON_2RM_VREV32 1
4300 #define NEON_2RM_VREV16 2
4301 #define NEON_2RM_VPADDL 4
4302 #define NEON_2RM_VPADDL_U 5
4303 #define NEON_2RM_VCLS 8
4304 #define NEON_2RM_VCLZ 9
4305 #define NEON_2RM_VCNT 10
4306 #define NEON_2RM_VMVN 11
4307 #define NEON_2RM_VPADAL 12
4308 #define NEON_2RM_VPADAL_U 13
4309 #define NEON_2RM_VQABS 14
4310 #define NEON_2RM_VQNEG 15
4311 #define NEON_2RM_VCGT0 16
4312 #define NEON_2RM_VCGE0 17
4313 #define NEON_2RM_VCEQ0 18
4314 #define NEON_2RM_VCLE0 19
4315 #define NEON_2RM_VCLT0 20
4316 #define NEON_2RM_VABS 22
4317 #define NEON_2RM_VNEG 23
4318 #define NEON_2RM_VCGT0_F 24
4319 #define NEON_2RM_VCGE0_F 25
4320 #define NEON_2RM_VCEQ0_F 26
4321 #define NEON_2RM_VCLE0_F 27
4322 #define NEON_2RM_VCLT0_F 28
4323 #define NEON_2RM_VABS_F 30
4324 #define NEON_2RM_VNEG_F 31
4325 #define NEON_2RM_VSWP 32
4326 #define NEON_2RM_VTRN 33
4327 #define NEON_2RM_VUZP 34
4328 #define NEON_2RM_VZIP 35
4329 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4330 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4331 #define NEON_2RM_VSHLL 38
4332 #define NEON_2RM_VCVT_F16_F32 44
4333 #define NEON_2RM_VCVT_F32_F16 46
4334 #define NEON_2RM_VRECPE 56
4335 #define NEON_2RM_VRSQRTE 57
4336 #define NEON_2RM_VRECPE_F 58
4337 #define NEON_2RM_VRSQRTE_F 59
4338 #define NEON_2RM_VCVT_FS 60
4339 #define NEON_2RM_VCVT_FU 61
4340 #define NEON_2RM_VCVT_SF 62
4341 #define NEON_2RM_VCVT_UF 63
4343 static int neon_2rm_is_float_op(int op)
4345 /* Return true if this neon 2reg-misc op is float-to-float */
4346 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4347 op >= NEON_2RM_VRECPE_F);
4350 /* Each entry in this array has bit n set if the insn allows
4351 * size value n (otherwise it will UNDEF). Since unallocated
4352 * op values will have no bits set they always UNDEF.
4354 static const uint8_t neon_2rm_sizes[] = {
4355 [NEON_2RM_VREV64] = 0x7,
4356 [NEON_2RM_VREV32] = 0x3,
4357 [NEON_2RM_VREV16] = 0x1,
4358 [NEON_2RM_VPADDL] = 0x7,
4359 [NEON_2RM_VPADDL_U] = 0x7,
4360 [NEON_2RM_VCLS] = 0x7,
4361 [NEON_2RM_VCLZ] = 0x7,
4362 [NEON_2RM_VCNT] = 0x1,
4363 [NEON_2RM_VMVN] = 0x1,
4364 [NEON_2RM_VPADAL] = 0x7,
4365 [NEON_2RM_VPADAL_U] = 0x7,
4366 [NEON_2RM_VQABS] = 0x7,
4367 [NEON_2RM_VQNEG] = 0x7,
4368 [NEON_2RM_VCGT0] = 0x7,
4369 [NEON_2RM_VCGE0] = 0x7,
4370 [NEON_2RM_VCEQ0] = 0x7,
4371 [NEON_2RM_VCLE0] = 0x7,
4372 [NEON_2RM_VCLT0] = 0x7,
4373 [NEON_2RM_VABS] = 0x7,
4374 [NEON_2RM_VNEG] = 0x7,
4375 [NEON_2RM_VCGT0_F] = 0x4,
4376 [NEON_2RM_VCGE0_F] = 0x4,
4377 [NEON_2RM_VCEQ0_F] = 0x4,
4378 [NEON_2RM_VCLE0_F] = 0x4,
4379 [NEON_2RM_VCLT0_F] = 0x4,
4380 [NEON_2RM_VABS_F] = 0x4,
4381 [NEON_2RM_VNEG_F] = 0x4,
4382 [NEON_2RM_VSWP] = 0x1,
4383 [NEON_2RM_VTRN] = 0x7,
4384 [NEON_2RM_VUZP] = 0x7,
4385 [NEON_2RM_VZIP] = 0x7,
4386 [NEON_2RM_VMOVN] = 0x7,
4387 [NEON_2RM_VQMOVN] = 0x7,
4388 [NEON_2RM_VSHLL] = 0x7,
4389 [NEON_2RM_VCVT_F16_F32] = 0x2,
4390 [NEON_2RM_VCVT_F32_F16] = 0x2,
4391 [NEON_2RM_VRECPE] = 0x4,
4392 [NEON_2RM_VRSQRTE] = 0x4,
4393 [NEON_2RM_VRECPE_F] = 0x4,
4394 [NEON_2RM_VRSQRTE_F] = 0x4,
4395 [NEON_2RM_VCVT_FS] = 0x4,
4396 [NEON_2RM_VCVT_FU] = 0x4,
4397 [NEON_2RM_VCVT_SF] = 0x4,
4398 [NEON_2RM_VCVT_UF] = 0x4,
4401 /* Translate a NEON data processing instruction. Return nonzero if the
4402 instruction is invalid.
4403 We process data in a mixture of 32-bit and 64-bit chunks.
4404 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4406 static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
4408 int op;
4409 int q;
4410 int rd, rn, rm;
4411 int size;
4412 int shift;
4413 int pass;
4414 int count;
4415 int pairwise;
4416 int u;
4417 uint32_t imm, mask;
4418 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4419 TCGv_i64 tmp64;
4421 if (!s->vfp_enabled)
4422 return 1;
4423 q = (insn & (1 << 6)) != 0;
4424 u = (insn >> 24) & 1;
4425 VFP_DREG_D(rd, insn);
4426 VFP_DREG_N(rn, insn);
4427 VFP_DREG_M(rm, insn);
4428 size = (insn >> 20) & 3;
4429 if ((insn & (1 << 23)) == 0) {
4430 /* Three register same length. */
4431 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4432 /* Catch invalid op and bad size combinations: UNDEF */
4433 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4434 return 1;
4436 /* All insns of this form UNDEF for either this condition or the
4437 * superset of cases "Q==1"; we catch the latter later.
4439 if (q && ((rd | rn | rm) & 1)) {
4440 return 1;
4442 if (size == 3 && op != NEON_3R_LOGIC) {
4443 /* 64-bit element instructions. */
4444 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4445 neon_load_reg64(cpu_V0, rn + pass);
4446 neon_load_reg64(cpu_V1, rm + pass);
4447 switch (op) {
4448 case NEON_3R_VQADD:
4449 if (u) {
4450 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4451 cpu_V0, cpu_V1);
4452 } else {
4453 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4454 cpu_V0, cpu_V1);
4456 break;
4457 case NEON_3R_VQSUB:
4458 if (u) {
4459 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4460 cpu_V0, cpu_V1);
4461 } else {
4462 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4463 cpu_V0, cpu_V1);
4465 break;
4466 case NEON_3R_VSHL:
4467 if (u) {
4468 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4469 } else {
4470 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4472 break;
4473 case NEON_3R_VQSHL:
4474 if (u) {
4475 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4476 cpu_V1, cpu_V0);
4477 } else {
4478 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4479 cpu_V1, cpu_V0);
4481 break;
4482 case NEON_3R_VRSHL:
4483 if (u) {
4484 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4485 } else {
4486 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4488 break;
4489 case NEON_3R_VQRSHL:
4490 if (u) {
4491 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4492 cpu_V1, cpu_V0);
4493 } else {
4494 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4495 cpu_V1, cpu_V0);
4497 break;
4498 case NEON_3R_VADD_VSUB:
4499 if (u) {
4500 tcg_gen_sub_i64(CPU_V001);
4501 } else {
4502 tcg_gen_add_i64(CPU_V001);
4504 break;
4505 default:
4506 abort();
4508 neon_store_reg64(cpu_V0, rd + pass);
4510 return 0;
4512 pairwise = 0;
4513 switch (op) {
4514 case NEON_3R_VSHL:
4515 case NEON_3R_VQSHL:
4516 case NEON_3R_VRSHL:
4517 case NEON_3R_VQRSHL:
4519 int rtmp;
4520 /* Shift instruction operands are reversed. */
4521 rtmp = rn;
4522 rn = rm;
4523 rm = rtmp;
4525 break;
4526 case NEON_3R_VPADD:
4527 if (u) {
4528 return 1;
4530 /* Fall through */
4531 case NEON_3R_VPMAX:
4532 case NEON_3R_VPMIN:
4533 pairwise = 1;
4534 break;
4535 case NEON_3R_FLOAT_ARITH:
4536 pairwise = (u && size < 2); /* if VPADD (float) */
4537 break;
4538 case NEON_3R_FLOAT_MINMAX:
4539 pairwise = u; /* if VPMIN/VPMAX (float) */
4540 break;
4541 case NEON_3R_FLOAT_CMP:
4542 if (!u && size) {
4543 /* no encoding for U=0 C=1x */
4544 return 1;
4546 break;
4547 case NEON_3R_FLOAT_ACMP:
4548 if (!u) {
4549 return 1;
4551 break;
4552 case NEON_3R_VRECPS_VRSQRTS:
4553 if (u) {
4554 return 1;
4556 break;
4557 case NEON_3R_VMUL:
4558 if (u && (size != 0)) {
4559 /* UNDEF on invalid size for polynomial subcase */
4560 return 1;
4562 break;
4563 case NEON_3R_VFM:
4564 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4565 return 1;
4567 break;
4568 default:
4569 break;
4572 if (pairwise && q) {
4573 /* All the pairwise insns UNDEF if Q is set */
4574 return 1;
4577 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4579 if (pairwise) {
4580 /* Pairwise. */
4581 if (pass < 1) {
4582 tmp = neon_load_reg(rn, 0);
4583 tmp2 = neon_load_reg(rn, 1);
4584 } else {
4585 tmp = neon_load_reg(rm, 0);
4586 tmp2 = neon_load_reg(rm, 1);
4588 } else {
4589 /* Elementwise. */
4590 tmp = neon_load_reg(rn, pass);
4591 tmp2 = neon_load_reg(rm, pass);
4593 switch (op) {
4594 case NEON_3R_VHADD:
4595 GEN_NEON_INTEGER_OP(hadd);
4596 break;
4597 case NEON_3R_VQADD:
4598 GEN_NEON_INTEGER_OP_ENV(qadd);
4599 break;
4600 case NEON_3R_VRHADD:
4601 GEN_NEON_INTEGER_OP(rhadd);
4602 break;
4603 case NEON_3R_LOGIC: /* Logic ops. */
4604 switch ((u << 2) | size) {
4605 case 0: /* VAND */
4606 tcg_gen_and_i32(tmp, tmp, tmp2);
4607 break;
4608 case 1: /* BIC */
4609 tcg_gen_andc_i32(tmp, tmp, tmp2);
4610 break;
4611 case 2: /* VORR */
4612 tcg_gen_or_i32(tmp, tmp, tmp2);
4613 break;
4614 case 3: /* VORN */
4615 tcg_gen_orc_i32(tmp, tmp, tmp2);
4616 break;
4617 case 4: /* VEOR */
4618 tcg_gen_xor_i32(tmp, tmp, tmp2);
4619 break;
4620 case 5: /* VBSL */
4621 tmp3 = neon_load_reg(rd, pass);
4622 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4623 tcg_temp_free_i32(tmp3);
4624 break;
4625 case 6: /* VBIT */
4626 tmp3 = neon_load_reg(rd, pass);
4627 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4628 tcg_temp_free_i32(tmp3);
4629 break;
4630 case 7: /* VBIF */
4631 tmp3 = neon_load_reg(rd, pass);
4632 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4633 tcg_temp_free_i32(tmp3);
4634 break;
4636 break;
4637 case NEON_3R_VHSUB:
4638 GEN_NEON_INTEGER_OP(hsub);
4639 break;
4640 case NEON_3R_VQSUB:
4641 GEN_NEON_INTEGER_OP_ENV(qsub);
4642 break;
4643 case NEON_3R_VCGT:
4644 GEN_NEON_INTEGER_OP(cgt);
4645 break;
4646 case NEON_3R_VCGE:
4647 GEN_NEON_INTEGER_OP(cge);
4648 break;
4649 case NEON_3R_VSHL:
4650 GEN_NEON_INTEGER_OP(shl);
4651 break;
4652 case NEON_3R_VQSHL:
4653 GEN_NEON_INTEGER_OP_ENV(qshl);
4654 break;
4655 case NEON_3R_VRSHL:
4656 GEN_NEON_INTEGER_OP(rshl);
4657 break;
4658 case NEON_3R_VQRSHL:
4659 GEN_NEON_INTEGER_OP_ENV(qrshl);
4660 break;
4661 case NEON_3R_VMAX:
4662 GEN_NEON_INTEGER_OP(max);
4663 break;
4664 case NEON_3R_VMIN:
4665 GEN_NEON_INTEGER_OP(min);
4666 break;
4667 case NEON_3R_VABD:
4668 GEN_NEON_INTEGER_OP(abd);
4669 break;
4670 case NEON_3R_VABA:
4671 GEN_NEON_INTEGER_OP(abd);
4672 tcg_temp_free_i32(tmp2);
4673 tmp2 = neon_load_reg(rd, pass);
4674 gen_neon_add(size, tmp, tmp2);
4675 break;
4676 case NEON_3R_VADD_VSUB:
4677 if (!u) { /* VADD */
4678 gen_neon_add(size, tmp, tmp2);
4679 } else { /* VSUB */
4680 switch (size) {
4681 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4682 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4683 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4684 default: abort();
4687 break;
4688 case NEON_3R_VTST_VCEQ:
4689 if (!u) { /* VTST */
4690 switch (size) {
4691 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4692 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4693 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4694 default: abort();
4696 } else { /* VCEQ */
4697 switch (size) {
4698 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4699 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4700 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4701 default: abort();
4704 break;
4705 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4706 switch (size) {
4707 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4708 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4709 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4710 default: abort();
4712 tcg_temp_free_i32(tmp2);
4713 tmp2 = neon_load_reg(rd, pass);
4714 if (u) { /* VMLS */
4715 gen_neon_rsb(size, tmp, tmp2);
4716 } else { /* VMLA */
4717 gen_neon_add(size, tmp, tmp2);
4719 break;
4720 case NEON_3R_VMUL:
4721 if (u) { /* polynomial */
4722 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4723 } else { /* Integer */
4724 switch (size) {
4725 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4726 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4727 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4728 default: abort();
4731 break;
4732 case NEON_3R_VPMAX:
4733 GEN_NEON_INTEGER_OP(pmax);
4734 break;
4735 case NEON_3R_VPMIN:
4736 GEN_NEON_INTEGER_OP(pmin);
4737 break;
4738 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4739 if (!u) { /* VQDMULH */
4740 switch (size) {
4741 case 1:
4742 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4743 break;
4744 case 2:
4745 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4746 break;
4747 default: abort();
4749 } else { /* VQRDMULH */
4750 switch (size) {
4751 case 1:
4752 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4753 break;
4754 case 2:
4755 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4756 break;
4757 default: abort();
4760 break;
4761 case NEON_3R_VPADD:
4762 switch (size) {
4763 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4764 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4765 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4766 default: abort();
4768 break;
4769 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4771 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4772 switch ((u << 2) | size) {
4773 case 0: /* VADD */
4774 case 4: /* VPADD */
4775 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4776 break;
4777 case 2: /* VSUB */
4778 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
4779 break;
4780 case 6: /* VABD */
4781 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
4782 break;
4783 default:
4784 abort();
4786 tcg_temp_free_ptr(fpstatus);
4787 break;
4789 case NEON_3R_FLOAT_MULTIPLY:
4791 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4792 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
4793 if (!u) {
4794 tcg_temp_free_i32(tmp2);
4795 tmp2 = neon_load_reg(rd, pass);
4796 if (size == 0) {
4797 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4798 } else {
4799 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
4802 tcg_temp_free_ptr(fpstatus);
4803 break;
4805 case NEON_3R_FLOAT_CMP:
4807 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4808 if (!u) {
4809 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
4810 } else {
4811 if (size == 0) {
4812 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4813 } else {
4814 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4817 tcg_temp_free_ptr(fpstatus);
4818 break;
4820 case NEON_3R_FLOAT_ACMP:
4822 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4823 if (size == 0) {
4824 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4825 } else {
4826 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4828 tcg_temp_free_ptr(fpstatus);
4829 break;
4831 case NEON_3R_FLOAT_MINMAX:
4833 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4834 if (size == 0) {
4835 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4836 } else {
4837 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4839 tcg_temp_free_ptr(fpstatus);
4840 break;
4842 case NEON_3R_VRECPS_VRSQRTS:
4843 if (size == 0)
4844 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4845 else
4846 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4847 break;
4848 case NEON_3R_VFM:
4850 /* VFMA, VFMS: fused multiply-add */
4851 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4852 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4853 if (size) {
4854 /* VFMS */
4855 gen_helper_vfp_negs(tmp, tmp);
4857 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4858 tcg_temp_free_i32(tmp3);
4859 tcg_temp_free_ptr(fpstatus);
4860 break;
4862 default:
4863 abort();
4865 tcg_temp_free_i32(tmp2);
4867 /* Save the result. For elementwise operations we can put it
4868 straight into the destination register. For pairwise operations
4869 we have to be careful to avoid clobbering the source operands. */
4870 if (pairwise && rd == rm) {
4871 neon_store_scratch(pass, tmp);
4872 } else {
4873 neon_store_reg(rd, pass, tmp);
4876 } /* for pass */
4877 if (pairwise && rd == rm) {
4878 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4879 tmp = neon_load_scratch(pass);
4880 neon_store_reg(rd, pass, tmp);
4883 /* End of 3 register same size operations. */
4884 } else if (insn & (1 << 4)) {
4885 if ((insn & 0x00380080) != 0) {
4886 /* Two registers and shift. */
4887 op = (insn >> 8) & 0xf;
4888 if (insn & (1 << 7)) {
4889 /* 64-bit shift. */
4890 if (op > 7) {
4891 return 1;
4893 size = 3;
4894 } else {
4895 size = 2;
4896 while ((insn & (1 << (size + 19))) == 0)
4897 size--;
4899 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4900 /* To avoid excessive duplication of ops we implement shift
4901 by immediate using the variable shift operations. */
4902 if (op < 8) {
4903 /* Shift by immediate:
4904 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4905 if (q && ((rd | rm) & 1)) {
4906 return 1;
4908 if (!u && (op == 4 || op == 6)) {
4909 return 1;
4911 /* Right shifts are encoded as N - shift, where N is the
4912 element size in bits. */
4913 if (op <= 4)
4914 shift = shift - (1 << (size + 3));
4915 if (size == 3) {
4916 count = q + 1;
4917 } else {
4918 count = q ? 4: 2;
4920 switch (size) {
4921 case 0:
4922 imm = (uint8_t) shift;
4923 imm |= imm << 8;
4924 imm |= imm << 16;
4925 break;
4926 case 1:
4927 imm = (uint16_t) shift;
4928 imm |= imm << 16;
4929 break;
4930 case 2:
4931 case 3:
4932 imm = shift;
4933 break;
4934 default:
4935 abort();
4938 for (pass = 0; pass < count; pass++) {
4939 if (size == 3) {
4940 neon_load_reg64(cpu_V0, rm + pass);
4941 tcg_gen_movi_i64(cpu_V1, imm);
4942 switch (op) {
4943 case 0: /* VSHR */
4944 case 1: /* VSRA */
4945 if (u)
4946 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4947 else
4948 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4949 break;
4950 case 2: /* VRSHR */
4951 case 3: /* VRSRA */
4952 if (u)
4953 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4954 else
4955 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4956 break;
4957 case 4: /* VSRI */
4958 case 5: /* VSHL, VSLI */
4959 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4960 break;
4961 case 6: /* VQSHLU */
4962 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4963 cpu_V0, cpu_V1);
4964 break;
4965 case 7: /* VQSHL */
4966 if (u) {
4967 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4968 cpu_V0, cpu_V1);
4969 } else {
4970 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4971 cpu_V0, cpu_V1);
4973 break;
4975 if (op == 1 || op == 3) {
4976 /* Accumulate. */
4977 neon_load_reg64(cpu_V1, rd + pass);
4978 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4979 } else if (op == 4 || (op == 5 && u)) {
4980 /* Insert */
4981 neon_load_reg64(cpu_V1, rd + pass);
4982 uint64_t mask;
4983 if (shift < -63 || shift > 63) {
4984 mask = 0;
4985 } else {
4986 if (op == 4) {
4987 mask = 0xffffffffffffffffull >> -shift;
4988 } else {
4989 mask = 0xffffffffffffffffull << shift;
4992 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4993 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
4995 neon_store_reg64(cpu_V0, rd + pass);
4996 } else { /* size < 3 */
4997 /* Operands in T0 and T1. */
4998 tmp = neon_load_reg(rm, pass);
4999 tmp2 = tcg_temp_new_i32();
5000 tcg_gen_movi_i32(tmp2, imm);
5001 switch (op) {
5002 case 0: /* VSHR */
5003 case 1: /* VSRA */
5004 GEN_NEON_INTEGER_OP(shl);
5005 break;
5006 case 2: /* VRSHR */
5007 case 3: /* VRSRA */
5008 GEN_NEON_INTEGER_OP(rshl);
5009 break;
5010 case 4: /* VSRI */
5011 case 5: /* VSHL, VSLI */
5012 switch (size) {
5013 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5014 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5015 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5016 default: abort();
5018 break;
5019 case 6: /* VQSHLU */
5020 switch (size) {
5021 case 0:
5022 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5023 tmp, tmp2);
5024 break;
5025 case 1:
5026 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5027 tmp, tmp2);
5028 break;
5029 case 2:
5030 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5031 tmp, tmp2);
5032 break;
5033 default:
5034 abort();
5036 break;
5037 case 7: /* VQSHL */
5038 GEN_NEON_INTEGER_OP_ENV(qshl);
5039 break;
5041 tcg_temp_free_i32(tmp2);
5043 if (op == 1 || op == 3) {
5044 /* Accumulate. */
5045 tmp2 = neon_load_reg(rd, pass);
5046 gen_neon_add(size, tmp, tmp2);
5047 tcg_temp_free_i32(tmp2);
5048 } else if (op == 4 || (op == 5 && u)) {
5049 /* Insert */
5050 switch (size) {
5051 case 0:
5052 if (op == 4)
5053 mask = 0xff >> -shift;
5054 else
5055 mask = (uint8_t)(0xff << shift);
5056 mask |= mask << 8;
5057 mask |= mask << 16;
5058 break;
5059 case 1:
5060 if (op == 4)
5061 mask = 0xffff >> -shift;
5062 else
5063 mask = (uint16_t)(0xffff << shift);
5064 mask |= mask << 16;
5065 break;
5066 case 2:
5067 if (shift < -31 || shift > 31) {
5068 mask = 0;
5069 } else {
5070 if (op == 4)
5071 mask = 0xffffffffu >> -shift;
5072 else
5073 mask = 0xffffffffu << shift;
5075 break;
5076 default:
5077 abort();
5079 tmp2 = neon_load_reg(rd, pass);
5080 tcg_gen_andi_i32(tmp, tmp, mask);
5081 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5082 tcg_gen_or_i32(tmp, tmp, tmp2);
5083 tcg_temp_free_i32(tmp2);
5085 neon_store_reg(rd, pass, tmp);
5087 } /* for pass */
5088 } else if (op < 10) {
5089 /* Shift by immediate and narrow:
5090 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5091 int input_unsigned = (op == 8) ? !u : u;
5092 if (rm & 1) {
5093 return 1;
5095 shift = shift - (1 << (size + 3));
5096 size++;
5097 if (size == 3) {
5098 tmp64 = tcg_const_i64(shift);
5099 neon_load_reg64(cpu_V0, rm);
5100 neon_load_reg64(cpu_V1, rm + 1);
5101 for (pass = 0; pass < 2; pass++) {
5102 TCGv_i64 in;
5103 if (pass == 0) {
5104 in = cpu_V0;
5105 } else {
5106 in = cpu_V1;
5108 if (q) {
5109 if (input_unsigned) {
5110 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5111 } else {
5112 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5114 } else {
5115 if (input_unsigned) {
5116 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5117 } else {
5118 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5121 tmp = tcg_temp_new_i32();
5122 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5123 neon_store_reg(rd, pass, tmp);
5124 } /* for pass */
5125 tcg_temp_free_i64(tmp64);
5126 } else {
5127 if (size == 1) {
5128 imm = (uint16_t)shift;
5129 imm |= imm << 16;
5130 } else {
5131 /* size == 2 */
5132 imm = (uint32_t)shift;
5134 tmp2 = tcg_const_i32(imm);
5135 tmp4 = neon_load_reg(rm + 1, 0);
5136 tmp5 = neon_load_reg(rm + 1, 1);
5137 for (pass = 0; pass < 2; pass++) {
5138 if (pass == 0) {
5139 tmp = neon_load_reg(rm, 0);
5140 } else {
5141 tmp = tmp4;
5143 gen_neon_shift_narrow(size, tmp, tmp2, q,
5144 input_unsigned);
5145 if (pass == 0) {
5146 tmp3 = neon_load_reg(rm, 1);
5147 } else {
5148 tmp3 = tmp5;
5150 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5151 input_unsigned);
5152 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5153 tcg_temp_free_i32(tmp);
5154 tcg_temp_free_i32(tmp3);
5155 tmp = tcg_temp_new_i32();
5156 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5157 neon_store_reg(rd, pass, tmp);
5158 } /* for pass */
5159 tcg_temp_free_i32(tmp2);
5161 } else if (op == 10) {
5162 /* VSHLL, VMOVL */
5163 if (q || (rd & 1)) {
5164 return 1;
5166 tmp = neon_load_reg(rm, 0);
5167 tmp2 = neon_load_reg(rm, 1);
5168 for (pass = 0; pass < 2; pass++) {
5169 if (pass == 1)
5170 tmp = tmp2;
5172 gen_neon_widen(cpu_V0, tmp, size, u);
5174 if (shift != 0) {
5175 /* The shift is less than the width of the source
5176 type, so we can just shift the whole register. */
5177 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5178 /* Widen the result of shift: we need to clear
5179 * the potential overflow bits resulting from
5180 * left bits of the narrow input appearing as
5181 * right bits of left the neighbour narrow
5182 * input. */
5183 if (size < 2 || !u) {
5184 uint64_t imm64;
5185 if (size == 0) {
5186 imm = (0xffu >> (8 - shift));
5187 imm |= imm << 16;
5188 } else if (size == 1) {
5189 imm = 0xffff >> (16 - shift);
5190 } else {
5191 /* size == 2 */
5192 imm = 0xffffffff >> (32 - shift);
5194 if (size < 2) {
5195 imm64 = imm | (((uint64_t)imm) << 32);
5196 } else {
5197 imm64 = imm;
5199 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5202 neon_store_reg64(cpu_V0, rd + pass);
5204 } else if (op >= 14) {
5205 /* VCVT fixed-point. */
5206 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5207 return 1;
5209 /* We have already masked out the must-be-1 top bit of imm6,
5210 * hence this 32-shift where the ARM ARM has 64-imm6.
5212 shift = 32 - shift;
5213 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5214 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5215 if (!(op & 1)) {
5216 if (u)
5217 gen_vfp_ulto(0, shift, 1);
5218 else
5219 gen_vfp_slto(0, shift, 1);
5220 } else {
5221 if (u)
5222 gen_vfp_toul(0, shift, 1);
5223 else
5224 gen_vfp_tosl(0, shift, 1);
5226 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5228 } else {
5229 return 1;
5231 } else { /* (insn & 0x00380080) == 0 */
5232 int invert;
5233 if (q && (rd & 1)) {
5234 return 1;
5237 op = (insn >> 8) & 0xf;
5238 /* One register and immediate. */
5239 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5240 invert = (insn & (1 << 5)) != 0;
5241 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5242 * We choose to not special-case this and will behave as if a
5243 * valid constant encoding of 0 had been given.
5245 switch (op) {
5246 case 0: case 1:
5247 /* no-op */
5248 break;
5249 case 2: case 3:
5250 imm <<= 8;
5251 break;
5252 case 4: case 5:
5253 imm <<= 16;
5254 break;
5255 case 6: case 7:
5256 imm <<= 24;
5257 break;
5258 case 8: case 9:
5259 imm |= imm << 16;
5260 break;
5261 case 10: case 11:
5262 imm = (imm << 8) | (imm << 24);
5263 break;
5264 case 12:
5265 imm = (imm << 8) | 0xff;
5266 break;
5267 case 13:
5268 imm = (imm << 16) | 0xffff;
5269 break;
5270 case 14:
5271 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5272 if (invert)
5273 imm = ~imm;
5274 break;
5275 case 15:
5276 if (invert) {
5277 return 1;
5279 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5280 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5281 break;
5283 if (invert)
5284 imm = ~imm;
5286 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5287 if (op & 1 && op < 12) {
5288 tmp = neon_load_reg(rd, pass);
5289 if (invert) {
5290 /* The immediate value has already been inverted, so
5291 BIC becomes AND. */
5292 tcg_gen_andi_i32(tmp, tmp, imm);
5293 } else {
5294 tcg_gen_ori_i32(tmp, tmp, imm);
5296 } else {
5297 /* VMOV, VMVN. */
5298 tmp = tcg_temp_new_i32();
5299 if (op == 14 && invert) {
5300 int n;
5301 uint32_t val;
5302 val = 0;
5303 for (n = 0; n < 4; n++) {
5304 if (imm & (1 << (n + (pass & 1) * 4)))
5305 val |= 0xff << (n * 8);
5307 tcg_gen_movi_i32(tmp, val);
5308 } else {
5309 tcg_gen_movi_i32(tmp, imm);
5312 neon_store_reg(rd, pass, tmp);
5315 } else { /* (insn & 0x00800010 == 0x00800000) */
5316 if (size != 3) {
5317 op = (insn >> 8) & 0xf;
5318 if ((insn & (1 << 6)) == 0) {
5319 /* Three registers of different lengths. */
5320 int src1_wide;
5321 int src2_wide;
5322 int prewiden;
5323 /* undefreq: bit 0 : UNDEF if size != 0
5324 * bit 1 : UNDEF if size == 0
5325 * bit 2 : UNDEF if U == 1
5326 * Note that [1:0] set implies 'always UNDEF'
5328 int undefreq;
5329 /* prewiden, src1_wide, src2_wide, undefreq */
5330 static const int neon_3reg_wide[16][4] = {
5331 {1, 0, 0, 0}, /* VADDL */
5332 {1, 1, 0, 0}, /* VADDW */
5333 {1, 0, 0, 0}, /* VSUBL */
5334 {1, 1, 0, 0}, /* VSUBW */
5335 {0, 1, 1, 0}, /* VADDHN */
5336 {0, 0, 0, 0}, /* VABAL */
5337 {0, 1, 1, 0}, /* VSUBHN */
5338 {0, 0, 0, 0}, /* VABDL */
5339 {0, 0, 0, 0}, /* VMLAL */
5340 {0, 0, 0, 6}, /* VQDMLAL */
5341 {0, 0, 0, 0}, /* VMLSL */
5342 {0, 0, 0, 6}, /* VQDMLSL */
5343 {0, 0, 0, 0}, /* Integer VMULL */
5344 {0, 0, 0, 2}, /* VQDMULL */
5345 {0, 0, 0, 5}, /* Polynomial VMULL */
5346 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5349 prewiden = neon_3reg_wide[op][0];
5350 src1_wide = neon_3reg_wide[op][1];
5351 src2_wide = neon_3reg_wide[op][2];
5352 undefreq = neon_3reg_wide[op][3];
5354 if (((undefreq & 1) && (size != 0)) ||
5355 ((undefreq & 2) && (size == 0)) ||
5356 ((undefreq & 4) && u)) {
5357 return 1;
5359 if ((src1_wide && (rn & 1)) ||
5360 (src2_wide && (rm & 1)) ||
5361 (!src2_wide && (rd & 1))) {
5362 return 1;
5365 /* Avoid overlapping operands. Wide source operands are
5366 always aligned so will never overlap with wide
5367 destinations in problematic ways. */
5368 if (rd == rm && !src2_wide) {
5369 tmp = neon_load_reg(rm, 1);
5370 neon_store_scratch(2, tmp);
5371 } else if (rd == rn && !src1_wide) {
5372 tmp = neon_load_reg(rn, 1);
5373 neon_store_scratch(2, tmp);
5375 TCGV_UNUSED(tmp3);
5376 for (pass = 0; pass < 2; pass++) {
5377 if (src1_wide) {
5378 neon_load_reg64(cpu_V0, rn + pass);
5379 TCGV_UNUSED(tmp);
5380 } else {
5381 if (pass == 1 && rd == rn) {
5382 tmp = neon_load_scratch(2);
5383 } else {
5384 tmp = neon_load_reg(rn, pass);
5386 if (prewiden) {
5387 gen_neon_widen(cpu_V0, tmp, size, u);
5390 if (src2_wide) {
5391 neon_load_reg64(cpu_V1, rm + pass);
5392 TCGV_UNUSED(tmp2);
5393 } else {
5394 if (pass == 1 && rd == rm) {
5395 tmp2 = neon_load_scratch(2);
5396 } else {
5397 tmp2 = neon_load_reg(rm, pass);
5399 if (prewiden) {
5400 gen_neon_widen(cpu_V1, tmp2, size, u);
5403 switch (op) {
5404 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5405 gen_neon_addl(size);
5406 break;
5407 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5408 gen_neon_subl(size);
5409 break;
5410 case 5: case 7: /* VABAL, VABDL */
5411 switch ((size << 1) | u) {
5412 case 0:
5413 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5414 break;
5415 case 1:
5416 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5417 break;
5418 case 2:
5419 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5420 break;
5421 case 3:
5422 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5423 break;
5424 case 4:
5425 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5426 break;
5427 case 5:
5428 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5429 break;
5430 default: abort();
5432 tcg_temp_free_i32(tmp2);
5433 tcg_temp_free_i32(tmp);
5434 break;
5435 case 8: case 9: case 10: case 11: case 12: case 13:
5436 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5437 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5438 break;
5439 case 14: /* Polynomial VMULL */
5440 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5441 tcg_temp_free_i32(tmp2);
5442 tcg_temp_free_i32(tmp);
5443 break;
5444 default: /* 15 is RESERVED: caught earlier */
5445 abort();
5447 if (op == 13) {
5448 /* VQDMULL */
5449 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5450 neon_store_reg64(cpu_V0, rd + pass);
5451 } else if (op == 5 || (op >= 8 && op <= 11)) {
5452 /* Accumulate. */
5453 neon_load_reg64(cpu_V1, rd + pass);
5454 switch (op) {
5455 case 10: /* VMLSL */
5456 gen_neon_negl(cpu_V0, size);
5457 /* Fall through */
5458 case 5: case 8: /* VABAL, VMLAL */
5459 gen_neon_addl(size);
5460 break;
5461 case 9: case 11: /* VQDMLAL, VQDMLSL */
5462 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5463 if (op == 11) {
5464 gen_neon_negl(cpu_V0, size);
5466 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5467 break;
5468 default:
5469 abort();
5471 neon_store_reg64(cpu_V0, rd + pass);
5472 } else if (op == 4 || op == 6) {
5473 /* Narrowing operation. */
5474 tmp = tcg_temp_new_i32();
5475 if (!u) {
5476 switch (size) {
5477 case 0:
5478 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5479 break;
5480 case 1:
5481 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5482 break;
5483 case 2:
5484 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5485 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5486 break;
5487 default: abort();
5489 } else {
5490 switch (size) {
5491 case 0:
5492 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5493 break;
5494 case 1:
5495 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5496 break;
5497 case 2:
5498 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5499 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5500 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5501 break;
5502 default: abort();
5505 if (pass == 0) {
5506 tmp3 = tmp;
5507 } else {
5508 neon_store_reg(rd, 0, tmp3);
5509 neon_store_reg(rd, 1, tmp);
5511 } else {
5512 /* Write back the result. */
5513 neon_store_reg64(cpu_V0, rd + pass);
5516 } else {
5517 /* Two registers and a scalar. NB that for ops of this form
5518 * the ARM ARM labels bit 24 as Q, but it is in our variable
5519 * 'u', not 'q'.
5521 if (size == 0) {
5522 return 1;
5524 switch (op) {
5525 case 1: /* Float VMLA scalar */
5526 case 5: /* Floating point VMLS scalar */
5527 case 9: /* Floating point VMUL scalar */
5528 if (size == 1) {
5529 return 1;
5531 /* fall through */
5532 case 0: /* Integer VMLA scalar */
5533 case 4: /* Integer VMLS scalar */
5534 case 8: /* Integer VMUL scalar */
5535 case 12: /* VQDMULH scalar */
5536 case 13: /* VQRDMULH scalar */
5537 if (u && ((rd | rn) & 1)) {
5538 return 1;
5540 tmp = neon_get_scalar(size, rm);
5541 neon_store_scratch(0, tmp);
5542 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5543 tmp = neon_load_scratch(0);
5544 tmp2 = neon_load_reg(rn, pass);
5545 if (op == 12) {
5546 if (size == 1) {
5547 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5548 } else {
5549 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5551 } else if (op == 13) {
5552 if (size == 1) {
5553 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5554 } else {
5555 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5557 } else if (op & 1) {
5558 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5559 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5560 tcg_temp_free_ptr(fpstatus);
5561 } else {
5562 switch (size) {
5563 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5564 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5565 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5566 default: abort();
5569 tcg_temp_free_i32(tmp2);
5570 if (op < 8) {
5571 /* Accumulate. */
5572 tmp2 = neon_load_reg(rd, pass);
5573 switch (op) {
5574 case 0:
5575 gen_neon_add(size, tmp, tmp2);
5576 break;
5577 case 1:
5579 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5580 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5581 tcg_temp_free_ptr(fpstatus);
5582 break;
5584 case 4:
5585 gen_neon_rsb(size, tmp, tmp2);
5586 break;
5587 case 5:
5589 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5590 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5591 tcg_temp_free_ptr(fpstatus);
5592 break;
5594 default:
5595 abort();
5597 tcg_temp_free_i32(tmp2);
5599 neon_store_reg(rd, pass, tmp);
5601 break;
5602 case 3: /* VQDMLAL scalar */
5603 case 7: /* VQDMLSL scalar */
5604 case 11: /* VQDMULL scalar */
5605 if (u == 1) {
5606 return 1;
5608 /* fall through */
5609 case 2: /* VMLAL sclar */
5610 case 6: /* VMLSL scalar */
5611 case 10: /* VMULL scalar */
5612 if (rd & 1) {
5613 return 1;
5615 tmp2 = neon_get_scalar(size, rm);
5616 /* We need a copy of tmp2 because gen_neon_mull
5617 * deletes it during pass 0. */
5618 tmp4 = tcg_temp_new_i32();
5619 tcg_gen_mov_i32(tmp4, tmp2);
5620 tmp3 = neon_load_reg(rn, 1);
5622 for (pass = 0; pass < 2; pass++) {
5623 if (pass == 0) {
5624 tmp = neon_load_reg(rn, 0);
5625 } else {
5626 tmp = tmp3;
5627 tmp2 = tmp4;
5629 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5630 if (op != 11) {
5631 neon_load_reg64(cpu_V1, rd + pass);
5633 switch (op) {
5634 case 6:
5635 gen_neon_negl(cpu_V0, size);
5636 /* Fall through */
5637 case 2:
5638 gen_neon_addl(size);
5639 break;
5640 case 3: case 7:
5641 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5642 if (op == 7) {
5643 gen_neon_negl(cpu_V0, size);
5645 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5646 break;
5647 case 10:
5648 /* no-op */
5649 break;
5650 case 11:
5651 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5652 break;
5653 default:
5654 abort();
5656 neon_store_reg64(cpu_V0, rd + pass);
5660 break;
5661 default: /* 14 and 15 are RESERVED */
5662 return 1;
5665 } else { /* size == 3 */
5666 if (!u) {
5667 /* Extract. */
5668 imm = (insn >> 8) & 0xf;
5670 if (imm > 7 && !q)
5671 return 1;
5673 if (q && ((rd | rn | rm) & 1)) {
5674 return 1;
5677 if (imm == 0) {
5678 neon_load_reg64(cpu_V0, rn);
5679 if (q) {
5680 neon_load_reg64(cpu_V1, rn + 1);
5682 } else if (imm == 8) {
5683 neon_load_reg64(cpu_V0, rn + 1);
5684 if (q) {
5685 neon_load_reg64(cpu_V1, rm);
5687 } else if (q) {
5688 tmp64 = tcg_temp_new_i64();
5689 if (imm < 8) {
5690 neon_load_reg64(cpu_V0, rn);
5691 neon_load_reg64(tmp64, rn + 1);
5692 } else {
5693 neon_load_reg64(cpu_V0, rn + 1);
5694 neon_load_reg64(tmp64, rm);
5696 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5697 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5698 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5699 if (imm < 8) {
5700 neon_load_reg64(cpu_V1, rm);
5701 } else {
5702 neon_load_reg64(cpu_V1, rm + 1);
5703 imm -= 8;
5705 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5706 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5707 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5708 tcg_temp_free_i64(tmp64);
5709 } else {
5710 /* BUGFIX */
5711 neon_load_reg64(cpu_V0, rn);
5712 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5713 neon_load_reg64(cpu_V1, rm);
5714 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5715 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5717 neon_store_reg64(cpu_V0, rd);
5718 if (q) {
5719 neon_store_reg64(cpu_V1, rd + 1);
5721 } else if ((insn & (1 << 11)) == 0) {
5722 /* Two register misc. */
5723 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5724 size = (insn >> 18) & 3;
5725 /* UNDEF for unknown op values and bad op-size combinations */
5726 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5727 return 1;
5729 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5730 q && ((rm | rd) & 1)) {
5731 return 1;
5733 switch (op) {
5734 case NEON_2RM_VREV64:
5735 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5736 tmp = neon_load_reg(rm, pass * 2);
5737 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5738 switch (size) {
5739 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5740 case 1: gen_swap_half(tmp); break;
5741 case 2: /* no-op */ break;
5742 default: abort();
5744 neon_store_reg(rd, pass * 2 + 1, tmp);
5745 if (size == 2) {
5746 neon_store_reg(rd, pass * 2, tmp2);
5747 } else {
5748 switch (size) {
5749 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5750 case 1: gen_swap_half(tmp2); break;
5751 default: abort();
5753 neon_store_reg(rd, pass * 2, tmp2);
5756 break;
5757 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5758 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5759 for (pass = 0; pass < q + 1; pass++) {
5760 tmp = neon_load_reg(rm, pass * 2);
5761 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5762 tmp = neon_load_reg(rm, pass * 2 + 1);
5763 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5764 switch (size) {
5765 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5766 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5767 case 2: tcg_gen_add_i64(CPU_V001); break;
5768 default: abort();
5770 if (op >= NEON_2RM_VPADAL) {
5771 /* Accumulate. */
5772 neon_load_reg64(cpu_V1, rd + pass);
5773 gen_neon_addl(size);
5775 neon_store_reg64(cpu_V0, rd + pass);
5777 break;
5778 case NEON_2RM_VTRN:
5779 if (size == 2) {
5780 int n;
5781 for (n = 0; n < (q ? 4 : 2); n += 2) {
5782 tmp = neon_load_reg(rm, n);
5783 tmp2 = neon_load_reg(rd, n + 1);
5784 neon_store_reg(rm, n, tmp2);
5785 neon_store_reg(rd, n + 1, tmp);
5787 } else {
5788 goto elementwise;
5790 break;
5791 case NEON_2RM_VUZP:
5792 if (gen_neon_unzip(rd, rm, size, q)) {
5793 return 1;
5795 break;
5796 case NEON_2RM_VZIP:
5797 if (gen_neon_zip(rd, rm, size, q)) {
5798 return 1;
5800 break;
5801 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5802 /* also VQMOVUN; op field and mnemonics don't line up */
5803 if (rm & 1) {
5804 return 1;
5806 TCGV_UNUSED(tmp2);
5807 for (pass = 0; pass < 2; pass++) {
5808 neon_load_reg64(cpu_V0, rm + pass);
5809 tmp = tcg_temp_new_i32();
5810 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5811 tmp, cpu_V0);
5812 if (pass == 0) {
5813 tmp2 = tmp;
5814 } else {
5815 neon_store_reg(rd, 0, tmp2);
5816 neon_store_reg(rd, 1, tmp);
5819 break;
5820 case NEON_2RM_VSHLL:
5821 if (q || (rd & 1)) {
5822 return 1;
5824 tmp = neon_load_reg(rm, 0);
5825 tmp2 = neon_load_reg(rm, 1);
5826 for (pass = 0; pass < 2; pass++) {
5827 if (pass == 1)
5828 tmp = tmp2;
5829 gen_neon_widen(cpu_V0, tmp, size, 1);
5830 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5831 neon_store_reg64(cpu_V0, rd + pass);
5833 break;
5834 case NEON_2RM_VCVT_F16_F32:
5835 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5836 q || (rm & 1)) {
5837 return 1;
5839 tmp = tcg_temp_new_i32();
5840 tmp2 = tcg_temp_new_i32();
5841 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5842 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5843 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5844 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5845 tcg_gen_shli_i32(tmp2, tmp2, 16);
5846 tcg_gen_or_i32(tmp2, tmp2, tmp);
5847 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5848 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5849 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5850 neon_store_reg(rd, 0, tmp2);
5851 tmp2 = tcg_temp_new_i32();
5852 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5853 tcg_gen_shli_i32(tmp2, tmp2, 16);
5854 tcg_gen_or_i32(tmp2, tmp2, tmp);
5855 neon_store_reg(rd, 1, tmp2);
5856 tcg_temp_free_i32(tmp);
5857 break;
5858 case NEON_2RM_VCVT_F32_F16:
5859 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5860 q || (rd & 1)) {
5861 return 1;
5863 tmp3 = tcg_temp_new_i32();
5864 tmp = neon_load_reg(rm, 0);
5865 tmp2 = neon_load_reg(rm, 1);
5866 tcg_gen_ext16u_i32(tmp3, tmp);
5867 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5868 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5869 tcg_gen_shri_i32(tmp3, tmp, 16);
5870 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5871 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5872 tcg_temp_free_i32(tmp);
5873 tcg_gen_ext16u_i32(tmp3, tmp2);
5874 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5875 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5876 tcg_gen_shri_i32(tmp3, tmp2, 16);
5877 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5878 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5879 tcg_temp_free_i32(tmp2);
5880 tcg_temp_free_i32(tmp3);
5881 break;
5882 default:
5883 elementwise:
5884 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5885 if (neon_2rm_is_float_op(op)) {
5886 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5887 neon_reg_offset(rm, pass));
5888 TCGV_UNUSED(tmp);
5889 } else {
5890 tmp = neon_load_reg(rm, pass);
5892 switch (op) {
5893 case NEON_2RM_VREV32:
5894 switch (size) {
5895 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5896 case 1: gen_swap_half(tmp); break;
5897 default: abort();
5899 break;
5900 case NEON_2RM_VREV16:
5901 gen_rev16(tmp);
5902 break;
5903 case NEON_2RM_VCLS:
5904 switch (size) {
5905 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5906 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5907 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5908 default: abort();
5910 break;
5911 case NEON_2RM_VCLZ:
5912 switch (size) {
5913 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5914 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5915 case 2: gen_helper_clz(tmp, tmp); break;
5916 default: abort();
5918 break;
5919 case NEON_2RM_VCNT:
5920 gen_helper_neon_cnt_u8(tmp, tmp);
5921 break;
5922 case NEON_2RM_VMVN:
5923 tcg_gen_not_i32(tmp, tmp);
5924 break;
5925 case NEON_2RM_VQABS:
5926 switch (size) {
5927 case 0:
5928 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
5929 break;
5930 case 1:
5931 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
5932 break;
5933 case 2:
5934 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
5935 break;
5936 default: abort();
5938 break;
5939 case NEON_2RM_VQNEG:
5940 switch (size) {
5941 case 0:
5942 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
5943 break;
5944 case 1:
5945 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
5946 break;
5947 case 2:
5948 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
5949 break;
5950 default: abort();
5952 break;
5953 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
5954 tmp2 = tcg_const_i32(0);
5955 switch(size) {
5956 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5957 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5958 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5959 default: abort();
5961 tcg_temp_free(tmp2);
5962 if (op == NEON_2RM_VCLE0) {
5963 tcg_gen_not_i32(tmp, tmp);
5965 break;
5966 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
5967 tmp2 = tcg_const_i32(0);
5968 switch(size) {
5969 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5970 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5971 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5972 default: abort();
5974 tcg_temp_free(tmp2);
5975 if (op == NEON_2RM_VCLT0) {
5976 tcg_gen_not_i32(tmp, tmp);
5978 break;
5979 case NEON_2RM_VCEQ0:
5980 tmp2 = tcg_const_i32(0);
5981 switch(size) {
5982 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5983 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5984 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5985 default: abort();
5987 tcg_temp_free(tmp2);
5988 break;
5989 case NEON_2RM_VABS:
5990 switch(size) {
5991 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5992 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5993 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5994 default: abort();
5996 break;
5997 case NEON_2RM_VNEG:
5998 tmp2 = tcg_const_i32(0);
5999 gen_neon_rsb(size, tmp, tmp2);
6000 tcg_temp_free(tmp2);
6001 break;
6002 case NEON_2RM_VCGT0_F:
6004 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6005 tmp2 = tcg_const_i32(0);
6006 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6007 tcg_temp_free(tmp2);
6008 tcg_temp_free_ptr(fpstatus);
6009 break;
6011 case NEON_2RM_VCGE0_F:
6013 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6014 tmp2 = tcg_const_i32(0);
6015 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6016 tcg_temp_free(tmp2);
6017 tcg_temp_free_ptr(fpstatus);
6018 break;
6020 case NEON_2RM_VCEQ0_F:
6022 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6023 tmp2 = tcg_const_i32(0);
6024 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6025 tcg_temp_free(tmp2);
6026 tcg_temp_free_ptr(fpstatus);
6027 break;
6029 case NEON_2RM_VCLE0_F:
6031 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6032 tmp2 = tcg_const_i32(0);
6033 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6034 tcg_temp_free(tmp2);
6035 tcg_temp_free_ptr(fpstatus);
6036 break;
6038 case NEON_2RM_VCLT0_F:
6040 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6041 tmp2 = tcg_const_i32(0);
6042 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6043 tcg_temp_free(tmp2);
6044 tcg_temp_free_ptr(fpstatus);
6045 break;
6047 case NEON_2RM_VABS_F:
6048 gen_vfp_abs(0);
6049 break;
6050 case NEON_2RM_VNEG_F:
6051 gen_vfp_neg(0);
6052 break;
6053 case NEON_2RM_VSWP:
6054 tmp2 = neon_load_reg(rd, pass);
6055 neon_store_reg(rm, pass, tmp2);
6056 break;
6057 case NEON_2RM_VTRN:
6058 tmp2 = neon_load_reg(rd, pass);
6059 switch (size) {
6060 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6061 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6062 default: abort();
6064 neon_store_reg(rm, pass, tmp2);
6065 break;
6066 case NEON_2RM_VRECPE:
6067 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6068 break;
6069 case NEON_2RM_VRSQRTE:
6070 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6071 break;
6072 case NEON_2RM_VRECPE_F:
6073 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6074 break;
6075 case NEON_2RM_VRSQRTE_F:
6076 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6077 break;
6078 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6079 gen_vfp_sito(0, 1);
6080 break;
6081 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6082 gen_vfp_uito(0, 1);
6083 break;
6084 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6085 gen_vfp_tosiz(0, 1);
6086 break;
6087 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6088 gen_vfp_touiz(0, 1);
6089 break;
6090 default:
6091 /* Reserved op values were caught by the
6092 * neon_2rm_sizes[] check earlier.
6094 abort();
6096 if (neon_2rm_is_float_op(op)) {
6097 tcg_gen_st_f32(cpu_F0s, cpu_env,
6098 neon_reg_offset(rd, pass));
6099 } else {
6100 neon_store_reg(rd, pass, tmp);
6103 break;
6105 } else if ((insn & (1 << 10)) == 0) {
6106 /* VTBL, VTBX. */
6107 int n = ((insn >> 8) & 3) + 1;
6108 if ((rn + n) > 32) {
6109 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6110 * helper function running off the end of the register file.
6112 return 1;
6114 n <<= 3;
6115 if (insn & (1 << 6)) {
6116 tmp = neon_load_reg(rd, 0);
6117 } else {
6118 tmp = tcg_temp_new_i32();
6119 tcg_gen_movi_i32(tmp, 0);
6121 tmp2 = neon_load_reg(rm, 0);
6122 tmp4 = tcg_const_i32(rn);
6123 tmp5 = tcg_const_i32(n);
6124 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
6125 tcg_temp_free_i32(tmp);
6126 if (insn & (1 << 6)) {
6127 tmp = neon_load_reg(rd, 1);
6128 } else {
6129 tmp = tcg_temp_new_i32();
6130 tcg_gen_movi_i32(tmp, 0);
6132 tmp3 = neon_load_reg(rm, 1);
6133 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
6134 tcg_temp_free_i32(tmp5);
6135 tcg_temp_free_i32(tmp4);
6136 neon_store_reg(rd, 0, tmp2);
6137 neon_store_reg(rd, 1, tmp3);
6138 tcg_temp_free_i32(tmp);
6139 } else if ((insn & 0x380) == 0) {
6140 /* VDUP */
6141 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6142 return 1;
6144 if (insn & (1 << 19)) {
6145 tmp = neon_load_reg(rm, 1);
6146 } else {
6147 tmp = neon_load_reg(rm, 0);
6149 if (insn & (1 << 16)) {
6150 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6151 } else if (insn & (1 << 17)) {
6152 if ((insn >> 18) & 1)
6153 gen_neon_dup_high16(tmp);
6154 else
6155 gen_neon_dup_low16(tmp);
6157 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6158 tmp2 = tcg_temp_new_i32();
6159 tcg_gen_mov_i32(tmp2, tmp);
6160 neon_store_reg(rd, pass, tmp2);
6162 tcg_temp_free_i32(tmp);
6163 } else {
6164 return 1;
6168 return 0;
6171 static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
6173 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6174 const ARMCPRegInfo *ri;
6175 ARMCPU *cpu = arm_env_get_cpu(env);
6177 cpnum = (insn >> 8) & 0xf;
6178 if (arm_feature(env, ARM_FEATURE_XSCALE)
6179 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6180 return 1;
6182 /* First check for coprocessor space used for actual instructions */
6183 switch (cpnum) {
6184 case 0:
6185 case 1:
6186 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6187 return disas_iwmmxt_insn(env, s, insn);
6188 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6189 return disas_dsp_insn(env, s, insn);
6191 return 1;
6192 case 10:
6193 case 11:
6194 return disas_vfp_insn (env, s, insn);
6195 default:
6196 break;
6199 /* Otherwise treat as a generic register access */
6200 is64 = (insn & (1 << 25)) == 0;
6201 if (!is64 && ((insn & (1 << 4)) == 0)) {
6202 /* cdp */
6203 return 1;
6206 crm = insn & 0xf;
6207 if (is64) {
6208 crn = 0;
6209 opc1 = (insn >> 4) & 0xf;
6210 opc2 = 0;
6211 rt2 = (insn >> 16) & 0xf;
6212 } else {
6213 crn = (insn >> 16) & 0xf;
6214 opc1 = (insn >> 21) & 7;
6215 opc2 = (insn >> 5) & 7;
6216 rt2 = 0;
6218 isread = (insn >> 20) & 1;
6219 rt = (insn >> 12) & 0xf;
6221 ri = get_arm_cp_reginfo(cpu,
6222 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6223 if (ri) {
6224 /* Check access permissions */
6225 if (!cp_access_ok(env, ri, isread)) {
6226 return 1;
6229 /* Handle special cases first */
6230 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6231 case ARM_CP_NOP:
6232 return 0;
6233 case ARM_CP_WFI:
6234 if (isread) {
6235 return 1;
6237 gen_set_pc_im(s->pc);
6238 s->is_jmp = DISAS_WFI;
6239 return 0;
6240 default:
6241 break;
6244 if (isread) {
6245 /* Read */
6246 if (is64) {
6247 TCGv_i64 tmp64;
6248 TCGv_i32 tmp;
6249 if (ri->type & ARM_CP_CONST) {
6250 tmp64 = tcg_const_i64(ri->resetvalue);
6251 } else if (ri->readfn) {
6252 TCGv_ptr tmpptr;
6253 gen_set_pc_im(s->pc);
6254 tmp64 = tcg_temp_new_i64();
6255 tmpptr = tcg_const_ptr(ri);
6256 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6257 tcg_temp_free_ptr(tmpptr);
6258 } else {
6259 tmp64 = tcg_temp_new_i64();
6260 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6262 tmp = tcg_temp_new_i32();
6263 tcg_gen_trunc_i64_i32(tmp, tmp64);
6264 store_reg(s, rt, tmp);
6265 tcg_gen_shri_i64(tmp64, tmp64, 32);
6266 tmp = tcg_temp_new_i32();
6267 tcg_gen_trunc_i64_i32(tmp, tmp64);
6268 tcg_temp_free_i64(tmp64);
6269 store_reg(s, rt2, tmp);
6270 } else {
6271 TCGv tmp;
6272 if (ri->type & ARM_CP_CONST) {
6273 tmp = tcg_const_i32(ri->resetvalue);
6274 } else if (ri->readfn) {
6275 TCGv_ptr tmpptr;
6276 gen_set_pc_im(s->pc);
6277 tmp = tcg_temp_new_i32();
6278 tmpptr = tcg_const_ptr(ri);
6279 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6280 tcg_temp_free_ptr(tmpptr);
6281 } else {
6282 tmp = load_cpu_offset(ri->fieldoffset);
6284 if (rt == 15) {
6285 /* Destination register of r15 for 32 bit loads sets
6286 * the condition codes from the high 4 bits of the value
6288 gen_set_nzcv(tmp);
6289 tcg_temp_free_i32(tmp);
6290 } else {
6291 store_reg(s, rt, tmp);
6294 } else {
6295 /* Write */
6296 if (ri->type & ARM_CP_CONST) {
6297 /* If not forbidden by access permissions, treat as WI */
6298 return 0;
6301 if (is64) {
6302 TCGv tmplo, tmphi;
6303 TCGv_i64 tmp64 = tcg_temp_new_i64();
6304 tmplo = load_reg(s, rt);
6305 tmphi = load_reg(s, rt2);
6306 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6307 tcg_temp_free_i32(tmplo);
6308 tcg_temp_free_i32(tmphi);
6309 if (ri->writefn) {
6310 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6311 gen_set_pc_im(s->pc);
6312 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6313 tcg_temp_free_ptr(tmpptr);
6314 } else {
6315 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6317 tcg_temp_free_i64(tmp64);
6318 } else {
6319 if (ri->writefn) {
6320 TCGv tmp;
6321 TCGv_ptr tmpptr;
6322 gen_set_pc_im(s->pc);
6323 tmp = load_reg(s, rt);
6324 tmpptr = tcg_const_ptr(ri);
6325 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6326 tcg_temp_free_ptr(tmpptr);
6327 tcg_temp_free_i32(tmp);
6328 } else {
6329 TCGv tmp = load_reg(s, rt);
6330 store_cpu_offset(tmp, ri->fieldoffset);
6333 /* We default to ending the TB on a coprocessor register write,
6334 * but allow this to be suppressed by the register definition
6335 * (usually only necessary to work around guest bugs).
6337 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6338 gen_lookup_tb(s);
6341 return 0;
6344 return 1;
6348 /* Store a 64-bit value to a register pair. Clobbers val. */
6349 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6351 TCGv tmp;
6352 tmp = tcg_temp_new_i32();
6353 tcg_gen_trunc_i64_i32(tmp, val);
6354 store_reg(s, rlow, tmp);
6355 tmp = tcg_temp_new_i32();
6356 tcg_gen_shri_i64(val, val, 32);
6357 tcg_gen_trunc_i64_i32(tmp, val);
6358 store_reg(s, rhigh, tmp);
6361 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6362 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6364 TCGv_i64 tmp;
6365 TCGv tmp2;
6367 /* Load value and extend to 64 bits. */
6368 tmp = tcg_temp_new_i64();
6369 tmp2 = load_reg(s, rlow);
6370 tcg_gen_extu_i32_i64(tmp, tmp2);
6371 tcg_temp_free_i32(tmp2);
6372 tcg_gen_add_i64(val, val, tmp);
6373 tcg_temp_free_i64(tmp);
6376 /* load and add a 64-bit value from a register pair. */
6377 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6379 TCGv_i64 tmp;
6380 TCGv tmpl;
6381 TCGv tmph;
6383 /* Load 64-bit value rd:rn. */
6384 tmpl = load_reg(s, rlow);
6385 tmph = load_reg(s, rhigh);
6386 tmp = tcg_temp_new_i64();
6387 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6388 tcg_temp_free_i32(tmpl);
6389 tcg_temp_free_i32(tmph);
6390 tcg_gen_add_i64(val, val, tmp);
6391 tcg_temp_free_i64(tmp);
6394 /* Set N and Z flags from a 64-bit value. */
6395 static void gen_logicq_cc(TCGv_i64 val)
6397 TCGv tmp = tcg_temp_new_i32();
6398 gen_helper_logicq_cc(tmp, val);
6399 gen_logic_CC(tmp);
6400 tcg_temp_free_i32(tmp);
6403 /* Load/Store exclusive instructions are implemented by remembering
6404 the value/address loaded, and seeing if these are the same
6405 when the store is performed. This should be sufficient to implement
6406 the architecturally mandated semantics, and avoids having to monitor
6407 regular stores.
6409 In system emulation mode only one CPU will be running at once, so
6410 this sequence is effectively atomic. In user emulation mode we
6411 throw an exception and handle the atomic operation elsewhere. */
6412 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6413 TCGv addr, int size)
6415 TCGv tmp;
6417 switch (size) {
6418 case 0:
6419 tmp = gen_ld8u(addr, IS_USER(s));
6420 break;
6421 case 1:
6422 tmp = gen_ld16u(addr, IS_USER(s));
6423 break;
6424 case 2:
6425 case 3:
6426 tmp = gen_ld32(addr, IS_USER(s));
6427 break;
6428 default:
6429 abort();
6431 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6432 store_reg(s, rt, tmp);
6433 if (size == 3) {
6434 TCGv tmp2 = tcg_temp_new_i32();
6435 tcg_gen_addi_i32(tmp2, addr, 4);
6436 tmp = gen_ld32(tmp2, IS_USER(s));
6437 tcg_temp_free_i32(tmp2);
6438 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6439 store_reg(s, rt2, tmp);
6441 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6444 static void gen_clrex(DisasContext *s)
6446 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6449 #ifdef CONFIG_USER_ONLY
6450 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6451 TCGv addr, int size)
6453 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6454 tcg_gen_movi_i32(cpu_exclusive_info,
6455 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6456 gen_exception_insn(s, 4, EXCP_STREX);
6458 #else
6459 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6460 TCGv addr, int size)
6462 TCGv tmp;
6463 int done_label;
6464 int fail_label;
6466 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6467 [addr] = {Rt};
6468 {Rd} = 0;
6469 } else {
6470 {Rd} = 1;
6471 } */
6472 fail_label = gen_new_label();
6473 done_label = gen_new_label();
6474 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6475 switch (size) {
6476 case 0:
6477 tmp = gen_ld8u(addr, IS_USER(s));
6478 break;
6479 case 1:
6480 tmp = gen_ld16u(addr, IS_USER(s));
6481 break;
6482 case 2:
6483 case 3:
6484 tmp = gen_ld32(addr, IS_USER(s));
6485 break;
6486 default:
6487 abort();
6489 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6490 tcg_temp_free_i32(tmp);
6491 if (size == 3) {
6492 TCGv tmp2 = tcg_temp_new_i32();
6493 tcg_gen_addi_i32(tmp2, addr, 4);
6494 tmp = gen_ld32(tmp2, IS_USER(s));
6495 tcg_temp_free_i32(tmp2);
6496 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6497 tcg_temp_free_i32(tmp);
6499 tmp = load_reg(s, rt);
6500 switch (size) {
6501 case 0:
6502 gen_st8(tmp, addr, IS_USER(s));
6503 break;
6504 case 1:
6505 gen_st16(tmp, addr, IS_USER(s));
6506 break;
6507 case 2:
6508 case 3:
6509 gen_st32(tmp, addr, IS_USER(s));
6510 break;
6511 default:
6512 abort();
6514 if (size == 3) {
6515 tcg_gen_addi_i32(addr, addr, 4);
6516 tmp = load_reg(s, rt2);
6517 gen_st32(tmp, addr, IS_USER(s));
6519 tcg_gen_movi_i32(cpu_R[rd], 0);
6520 tcg_gen_br(done_label);
6521 gen_set_label(fail_label);
6522 tcg_gen_movi_i32(cpu_R[rd], 1);
6523 gen_set_label(done_label);
6524 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6526 #endif
6528 static void disas_arm_insn(CPUARMState * env, DisasContext *s)
6530 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6531 TCGv tmp;
6532 TCGv tmp2;
6533 TCGv tmp3;
6534 TCGv addr;
6535 TCGv_i64 tmp64;
6537 insn = arm_ldl_code(env, s->pc, s->bswap_code);
6538 s->pc += 4;
6540 /* M variants do not implement ARM mode. */
6541 if (IS_M(env))
6542 goto illegal_op;
6543 cond = insn >> 28;
6544 if (cond == 0xf){
6545 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6546 * choose to UNDEF. In ARMv5 and above the space is used
6547 * for miscellaneous unconditional instructions.
6549 ARCH(5);
6551 /* Unconditional instructions. */
6552 if (((insn >> 25) & 7) == 1) {
6553 /* NEON Data processing. */
6554 if (!arm_feature(env, ARM_FEATURE_NEON))
6555 goto illegal_op;
6557 if (disas_neon_data_insn(env, s, insn))
6558 goto illegal_op;
6559 return;
6561 if ((insn & 0x0f100000) == 0x04000000) {
6562 /* NEON load/store. */
6563 if (!arm_feature(env, ARM_FEATURE_NEON))
6564 goto illegal_op;
6566 if (disas_neon_ls_insn(env, s, insn))
6567 goto illegal_op;
6568 return;
6570 if (((insn & 0x0f30f000) == 0x0510f000) ||
6571 ((insn & 0x0f30f010) == 0x0710f000)) {
6572 if ((insn & (1 << 22)) == 0) {
6573 /* PLDW; v7MP */
6574 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6575 goto illegal_op;
6578 /* Otherwise PLD; v5TE+ */
6579 ARCH(5TE);
6580 return;
6582 if (((insn & 0x0f70f000) == 0x0450f000) ||
6583 ((insn & 0x0f70f010) == 0x0650f000)) {
6584 ARCH(7);
6585 return; /* PLI; V7 */
6587 if (((insn & 0x0f700000) == 0x04100000) ||
6588 ((insn & 0x0f700010) == 0x06100000)) {
6589 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6590 goto illegal_op;
6592 return; /* v7MP: Unallocated memory hint: must NOP */
6595 if ((insn & 0x0ffffdff) == 0x01010000) {
6596 ARCH(6);
6597 /* setend */
6598 if (((insn >> 9) & 1) != s->bswap_code) {
6599 /* Dynamic endianness switching not implemented. */
6600 goto illegal_op;
6602 return;
6603 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6604 switch ((insn >> 4) & 0xf) {
6605 case 1: /* clrex */
6606 ARCH(6K);
6607 gen_clrex(s);
6608 return;
6609 case 4: /* dsb */
6610 case 5: /* dmb */
6611 case 6: /* isb */
6612 ARCH(7);
6613 /* We don't emulate caches so these are a no-op. */
6614 return;
6615 default:
6616 goto illegal_op;
6618 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6619 /* srs */
6620 int32_t offset;
6621 if (IS_USER(s))
6622 goto illegal_op;
6623 ARCH(6);
6624 op1 = (insn & 0x1f);
6625 addr = tcg_temp_new_i32();
6626 tmp = tcg_const_i32(op1);
6627 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6628 tcg_temp_free_i32(tmp);
6629 i = (insn >> 23) & 3;
6630 switch (i) {
6631 case 0: offset = -4; break; /* DA */
6632 case 1: offset = 0; break; /* IA */
6633 case 2: offset = -8; break; /* DB */
6634 case 3: offset = 4; break; /* IB */
6635 default: abort();
6637 if (offset)
6638 tcg_gen_addi_i32(addr, addr, offset);
6639 tmp = load_reg(s, 14);
6640 gen_st32(tmp, addr, 0);
6641 tmp = load_cpu_field(spsr);
6642 tcg_gen_addi_i32(addr, addr, 4);
6643 gen_st32(tmp, addr, 0);
6644 if (insn & (1 << 21)) {
6645 /* Base writeback. */
6646 switch (i) {
6647 case 0: offset = -8; break;
6648 case 1: offset = 4; break;
6649 case 2: offset = -4; break;
6650 case 3: offset = 0; break;
6651 default: abort();
6653 if (offset)
6654 tcg_gen_addi_i32(addr, addr, offset);
6655 tmp = tcg_const_i32(op1);
6656 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6657 tcg_temp_free_i32(tmp);
6658 tcg_temp_free_i32(addr);
6659 } else {
6660 tcg_temp_free_i32(addr);
6662 return;
6663 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6664 /* rfe */
6665 int32_t offset;
6666 if (IS_USER(s))
6667 goto illegal_op;
6668 ARCH(6);
6669 rn = (insn >> 16) & 0xf;
6670 addr = load_reg(s, rn);
6671 i = (insn >> 23) & 3;
6672 switch (i) {
6673 case 0: offset = -4; break; /* DA */
6674 case 1: offset = 0; break; /* IA */
6675 case 2: offset = -8; break; /* DB */
6676 case 3: offset = 4; break; /* IB */
6677 default: abort();
6679 if (offset)
6680 tcg_gen_addi_i32(addr, addr, offset);
6681 /* Load PC into tmp and CPSR into tmp2. */
6682 tmp = gen_ld32(addr, 0);
6683 tcg_gen_addi_i32(addr, addr, 4);
6684 tmp2 = gen_ld32(addr, 0);
6685 if (insn & (1 << 21)) {
6686 /* Base writeback. */
6687 switch (i) {
6688 case 0: offset = -8; break;
6689 case 1: offset = 4; break;
6690 case 2: offset = -4; break;
6691 case 3: offset = 0; break;
6692 default: abort();
6694 if (offset)
6695 tcg_gen_addi_i32(addr, addr, offset);
6696 store_reg(s, rn, addr);
6697 } else {
6698 tcg_temp_free_i32(addr);
6700 gen_rfe(s, tmp, tmp2);
6701 return;
6702 } else if ((insn & 0x0e000000) == 0x0a000000) {
6703 /* branch link and change to thumb (blx <offset>) */
6704 int32_t offset;
6706 val = (uint32_t)s->pc;
6707 tmp = tcg_temp_new_i32();
6708 tcg_gen_movi_i32(tmp, val);
6709 store_reg(s, 14, tmp);
6710 /* Sign-extend the 24-bit offset */
6711 offset = (((int32_t)insn) << 8) >> 8;
6712 /* offset * 4 + bit24 * 2 + (thumb bit) */
6713 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6714 /* pipeline offset */
6715 val += 4;
6716 /* protected by ARCH(5); above, near the start of uncond block */
6717 gen_bx_im(s, val);
6718 return;
6719 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6720 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6721 /* iWMMXt register transfer. */
6722 if (env->cp15.c15_cpar & (1 << 1))
6723 if (!disas_iwmmxt_insn(env, s, insn))
6724 return;
6726 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6727 /* Coprocessor double register transfer. */
6728 ARCH(5TE);
6729 } else if ((insn & 0x0f000010) == 0x0e000010) {
6730 /* Additional coprocessor register transfer. */
6731 } else if ((insn & 0x0ff10020) == 0x01000000) {
6732 uint32_t mask;
6733 uint32_t val;
6734 /* cps (privileged) */
6735 if (IS_USER(s))
6736 return;
6737 mask = val = 0;
6738 if (insn & (1 << 19)) {
6739 if (insn & (1 << 8))
6740 mask |= CPSR_A;
6741 if (insn & (1 << 7))
6742 mask |= CPSR_I;
6743 if (insn & (1 << 6))
6744 mask |= CPSR_F;
6745 if (insn & (1 << 18))
6746 val |= mask;
6748 if (insn & (1 << 17)) {
6749 mask |= CPSR_M;
6750 val |= (insn & 0x1f);
6752 if (mask) {
6753 gen_set_psr_im(s, mask, 0, val);
6755 return;
6757 goto illegal_op;
6759 if (cond != 0xe) {
6760 /* if not always execute, we generate a conditional jump to
6761 next instruction */
6762 s->condlabel = gen_new_label();
6763 gen_test_cc(cond ^ 1, s->condlabel);
6764 s->condjmp = 1;
6766 if ((insn & 0x0f900000) == 0x03000000) {
6767 if ((insn & (1 << 21)) == 0) {
6768 ARCH(6T2);
6769 rd = (insn >> 12) & 0xf;
6770 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6771 if ((insn & (1 << 22)) == 0) {
6772 /* MOVW */
6773 tmp = tcg_temp_new_i32();
6774 tcg_gen_movi_i32(tmp, val);
6775 } else {
6776 /* MOVT */
6777 tmp = load_reg(s, rd);
6778 tcg_gen_ext16u_i32(tmp, tmp);
6779 tcg_gen_ori_i32(tmp, tmp, val << 16);
6781 store_reg(s, rd, tmp);
6782 } else {
6783 if (((insn >> 12) & 0xf) != 0xf)
6784 goto illegal_op;
6785 if (((insn >> 16) & 0xf) == 0) {
6786 gen_nop_hint(s, insn & 0xff);
6787 } else {
6788 /* CPSR = immediate */
6789 val = insn & 0xff;
6790 shift = ((insn >> 8) & 0xf) * 2;
6791 if (shift)
6792 val = (val >> shift) | (val << (32 - shift));
6793 i = ((insn & (1 << 22)) != 0);
6794 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6795 goto illegal_op;
6798 } else if ((insn & 0x0f900000) == 0x01000000
6799 && (insn & 0x00000090) != 0x00000090) {
6800 /* miscellaneous instructions */
6801 op1 = (insn >> 21) & 3;
6802 sh = (insn >> 4) & 0xf;
6803 rm = insn & 0xf;
6804 switch (sh) {
6805 case 0x0: /* move program status register */
6806 if (op1 & 1) {
6807 /* PSR = reg */
6808 tmp = load_reg(s, rm);
6809 i = ((op1 & 2) != 0);
6810 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6811 goto illegal_op;
6812 } else {
6813 /* reg = PSR */
6814 rd = (insn >> 12) & 0xf;
6815 if (op1 & 2) {
6816 if (IS_USER(s))
6817 goto illegal_op;
6818 tmp = load_cpu_field(spsr);
6819 } else {
6820 tmp = tcg_temp_new_i32();
6821 gen_helper_cpsr_read(tmp, cpu_env);
6823 store_reg(s, rd, tmp);
6825 break;
6826 case 0x1:
6827 if (op1 == 1) {
6828 /* branch/exchange thumb (bx). */
6829 ARCH(4T);
6830 tmp = load_reg(s, rm);
6831 gen_bx(s, tmp);
6832 } else if (op1 == 3) {
6833 /* clz */
6834 ARCH(5);
6835 rd = (insn >> 12) & 0xf;
6836 tmp = load_reg(s, rm);
6837 gen_helper_clz(tmp, tmp);
6838 store_reg(s, rd, tmp);
6839 } else {
6840 goto illegal_op;
6842 break;
6843 case 0x2:
6844 if (op1 == 1) {
6845 ARCH(5J); /* bxj */
6846 /* Trivial implementation equivalent to bx. */
6847 tmp = load_reg(s, rm);
6848 gen_bx(s, tmp);
6849 } else {
6850 goto illegal_op;
6852 break;
6853 case 0x3:
6854 if (op1 != 1)
6855 goto illegal_op;
6857 ARCH(5);
6858 /* branch link/exchange thumb (blx) */
6859 tmp = load_reg(s, rm);
6860 tmp2 = tcg_temp_new_i32();
6861 tcg_gen_movi_i32(tmp2, s->pc);
6862 store_reg(s, 14, tmp2);
6863 gen_bx(s, tmp);
6864 break;
6865 case 0x5: /* saturating add/subtract */
6866 ARCH(5TE);
6867 rd = (insn >> 12) & 0xf;
6868 rn = (insn >> 16) & 0xf;
6869 tmp = load_reg(s, rm);
6870 tmp2 = load_reg(s, rn);
6871 if (op1 & 2)
6872 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
6873 if (op1 & 1)
6874 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
6875 else
6876 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
6877 tcg_temp_free_i32(tmp2);
6878 store_reg(s, rd, tmp);
6879 break;
6880 case 7:
6881 /* SMC instruction (op1 == 3)
6882 and undefined instructions (op1 == 0 || op1 == 2)
6883 will trap */
6884 if (op1 != 1) {
6885 goto illegal_op;
6887 /* bkpt */
6888 ARCH(5);
6889 gen_exception_insn(s, 4, EXCP_BKPT);
6890 break;
6891 case 0x8: /* signed multiply */
6892 case 0xa:
6893 case 0xc:
6894 case 0xe:
6895 ARCH(5TE);
6896 rs = (insn >> 8) & 0xf;
6897 rn = (insn >> 12) & 0xf;
6898 rd = (insn >> 16) & 0xf;
6899 if (op1 == 1) {
6900 /* (32 * 16) >> 16 */
6901 tmp = load_reg(s, rm);
6902 tmp2 = load_reg(s, rs);
6903 if (sh & 4)
6904 tcg_gen_sari_i32(tmp2, tmp2, 16);
6905 else
6906 gen_sxth(tmp2);
6907 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6908 tcg_gen_shri_i64(tmp64, tmp64, 16);
6909 tmp = tcg_temp_new_i32();
6910 tcg_gen_trunc_i64_i32(tmp, tmp64);
6911 tcg_temp_free_i64(tmp64);
6912 if ((sh & 2) == 0) {
6913 tmp2 = load_reg(s, rn);
6914 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
6915 tcg_temp_free_i32(tmp2);
6917 store_reg(s, rd, tmp);
6918 } else {
6919 /* 16 * 16 */
6920 tmp = load_reg(s, rm);
6921 tmp2 = load_reg(s, rs);
6922 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6923 tcg_temp_free_i32(tmp2);
6924 if (op1 == 2) {
6925 tmp64 = tcg_temp_new_i64();
6926 tcg_gen_ext_i32_i64(tmp64, tmp);
6927 tcg_temp_free_i32(tmp);
6928 gen_addq(s, tmp64, rn, rd);
6929 gen_storeq_reg(s, rn, rd, tmp64);
6930 tcg_temp_free_i64(tmp64);
6931 } else {
6932 if (op1 == 0) {
6933 tmp2 = load_reg(s, rn);
6934 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
6935 tcg_temp_free_i32(tmp2);
6937 store_reg(s, rd, tmp);
6940 break;
6941 default:
6942 goto illegal_op;
6944 } else if (((insn & 0x0e000000) == 0 &&
6945 (insn & 0x00000090) != 0x90) ||
6946 ((insn & 0x0e000000) == (1 << 25))) {
6947 int set_cc, logic_cc, shiftop;
6949 op1 = (insn >> 21) & 0xf;
6950 set_cc = (insn >> 20) & 1;
6951 logic_cc = table_logic_cc[op1] & set_cc;
6953 /* data processing instruction */
6954 if (insn & (1 << 25)) {
6955 /* immediate operand */
6956 val = insn & 0xff;
6957 shift = ((insn >> 8) & 0xf) * 2;
6958 if (shift) {
6959 val = (val >> shift) | (val << (32 - shift));
6961 tmp2 = tcg_temp_new_i32();
6962 tcg_gen_movi_i32(tmp2, val);
6963 if (logic_cc && shift) {
6964 gen_set_CF_bit31(tmp2);
6966 } else {
6967 /* register */
6968 rm = (insn) & 0xf;
6969 tmp2 = load_reg(s, rm);
6970 shiftop = (insn >> 5) & 3;
6971 if (!(insn & (1 << 4))) {
6972 shift = (insn >> 7) & 0x1f;
6973 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6974 } else {
6975 rs = (insn >> 8) & 0xf;
6976 tmp = load_reg(s, rs);
6977 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6980 if (op1 != 0x0f && op1 != 0x0d) {
6981 rn = (insn >> 16) & 0xf;
6982 tmp = load_reg(s, rn);
6983 } else {
6984 TCGV_UNUSED(tmp);
6986 rd = (insn >> 12) & 0xf;
6987 switch(op1) {
6988 case 0x00:
6989 tcg_gen_and_i32(tmp, tmp, tmp2);
6990 if (logic_cc) {
6991 gen_logic_CC(tmp);
6993 store_reg_bx(env, s, rd, tmp);
6994 break;
6995 case 0x01:
6996 tcg_gen_xor_i32(tmp, tmp, tmp2);
6997 if (logic_cc) {
6998 gen_logic_CC(tmp);
7000 store_reg_bx(env, s, rd, tmp);
7001 break;
7002 case 0x02:
7003 if (set_cc && rd == 15) {
7004 /* SUBS r15, ... is used for exception return. */
7005 if (IS_USER(s)) {
7006 goto illegal_op;
7008 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
7009 gen_exception_return(s, tmp);
7010 } else {
7011 if (set_cc) {
7012 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
7013 } else {
7014 tcg_gen_sub_i32(tmp, tmp, tmp2);
7016 store_reg_bx(env, s, rd, tmp);
7018 break;
7019 case 0x03:
7020 if (set_cc) {
7021 gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
7022 } else {
7023 tcg_gen_sub_i32(tmp, tmp2, tmp);
7025 store_reg_bx(env, s, rd, tmp);
7026 break;
7027 case 0x04:
7028 if (set_cc) {
7029 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
7030 } else {
7031 tcg_gen_add_i32(tmp, tmp, tmp2);
7033 store_reg_bx(env, s, rd, tmp);
7034 break;
7035 case 0x05:
7036 if (set_cc) {
7037 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
7038 } else {
7039 gen_add_carry(tmp, tmp, tmp2);
7041 store_reg_bx(env, s, rd, tmp);
7042 break;
7043 case 0x06:
7044 if (set_cc) {
7045 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
7046 } else {
7047 gen_sub_carry(tmp, tmp, tmp2);
7049 store_reg_bx(env, s, rd, tmp);
7050 break;
7051 case 0x07:
7052 if (set_cc) {
7053 gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
7054 } else {
7055 gen_sub_carry(tmp, tmp2, tmp);
7057 store_reg_bx(env, s, rd, tmp);
7058 break;
7059 case 0x08:
7060 if (set_cc) {
7061 tcg_gen_and_i32(tmp, tmp, tmp2);
7062 gen_logic_CC(tmp);
7064 tcg_temp_free_i32(tmp);
7065 break;
7066 case 0x09:
7067 if (set_cc) {
7068 tcg_gen_xor_i32(tmp, tmp, tmp2);
7069 gen_logic_CC(tmp);
7071 tcg_temp_free_i32(tmp);
7072 break;
7073 case 0x0a:
7074 if (set_cc) {
7075 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
7077 tcg_temp_free_i32(tmp);
7078 break;
7079 case 0x0b:
7080 if (set_cc) {
7081 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
7083 tcg_temp_free_i32(tmp);
7084 break;
7085 case 0x0c:
7086 tcg_gen_or_i32(tmp, tmp, tmp2);
7087 if (logic_cc) {
7088 gen_logic_CC(tmp);
7090 store_reg_bx(env, s, rd, tmp);
7091 break;
7092 case 0x0d:
7093 if (logic_cc && rd == 15) {
7094 /* MOVS r15, ... is used for exception return. */
7095 if (IS_USER(s)) {
7096 goto illegal_op;
7098 gen_exception_return(s, tmp2);
7099 } else {
7100 if (logic_cc) {
7101 gen_logic_CC(tmp2);
7103 store_reg_bx(env, s, rd, tmp2);
7105 break;
7106 case 0x0e:
7107 tcg_gen_andc_i32(tmp, tmp, tmp2);
7108 if (logic_cc) {
7109 gen_logic_CC(tmp);
7111 store_reg_bx(env, s, rd, tmp);
7112 break;
7113 default:
7114 case 0x0f:
7115 tcg_gen_not_i32(tmp2, tmp2);
7116 if (logic_cc) {
7117 gen_logic_CC(tmp2);
7119 store_reg_bx(env, s, rd, tmp2);
7120 break;
7122 if (op1 != 0x0f && op1 != 0x0d) {
7123 tcg_temp_free_i32(tmp2);
7125 } else {
7126 /* other instructions */
7127 op1 = (insn >> 24) & 0xf;
7128 switch(op1) {
7129 case 0x0:
7130 case 0x1:
7131 /* multiplies, extra load/stores */
7132 sh = (insn >> 5) & 3;
7133 if (sh == 0) {
7134 if (op1 == 0x0) {
7135 rd = (insn >> 16) & 0xf;
7136 rn = (insn >> 12) & 0xf;
7137 rs = (insn >> 8) & 0xf;
7138 rm = (insn) & 0xf;
7139 op1 = (insn >> 20) & 0xf;
7140 switch (op1) {
7141 case 0: case 1: case 2: case 3: case 6:
7142 /* 32 bit mul */
7143 tmp = load_reg(s, rs);
7144 tmp2 = load_reg(s, rm);
7145 tcg_gen_mul_i32(tmp, tmp, tmp2);
7146 tcg_temp_free_i32(tmp2);
7147 if (insn & (1 << 22)) {
7148 /* Subtract (mls) */
7149 ARCH(6T2);
7150 tmp2 = load_reg(s, rn);
7151 tcg_gen_sub_i32(tmp, tmp2, tmp);
7152 tcg_temp_free_i32(tmp2);
7153 } else if (insn & (1 << 21)) {
7154 /* Add */
7155 tmp2 = load_reg(s, rn);
7156 tcg_gen_add_i32(tmp, tmp, tmp2);
7157 tcg_temp_free_i32(tmp2);
7159 if (insn & (1 << 20))
7160 gen_logic_CC(tmp);
7161 store_reg(s, rd, tmp);
7162 break;
7163 case 4:
7164 /* 64 bit mul double accumulate (UMAAL) */
7165 ARCH(6);
7166 tmp = load_reg(s, rs);
7167 tmp2 = load_reg(s, rm);
7168 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7169 gen_addq_lo(s, tmp64, rn);
7170 gen_addq_lo(s, tmp64, rd);
7171 gen_storeq_reg(s, rn, rd, tmp64);
7172 tcg_temp_free_i64(tmp64);
7173 break;
7174 case 8: case 9: case 10: case 11:
7175 case 12: case 13: case 14: case 15:
7176 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7177 tmp = load_reg(s, rs);
7178 tmp2 = load_reg(s, rm);
7179 if (insn & (1 << 22)) {
7180 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7181 } else {
7182 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7184 if (insn & (1 << 21)) { /* mult accumulate */
7185 gen_addq(s, tmp64, rn, rd);
7187 if (insn & (1 << 20)) {
7188 gen_logicq_cc(tmp64);
7190 gen_storeq_reg(s, rn, rd, tmp64);
7191 tcg_temp_free_i64(tmp64);
7192 break;
7193 default:
7194 goto illegal_op;
7196 } else {
7197 rn = (insn >> 16) & 0xf;
7198 rd = (insn >> 12) & 0xf;
7199 if (insn & (1 << 23)) {
7200 /* load/store exclusive */
7201 op1 = (insn >> 21) & 0x3;
7202 if (op1)
7203 ARCH(6K);
7204 else
7205 ARCH(6);
7206 addr = tcg_temp_local_new_i32();
7207 load_reg_var(s, addr, rn);
7208 if (insn & (1 << 20)) {
7209 switch (op1) {
7210 case 0: /* ldrex */
7211 gen_load_exclusive(s, rd, 15, addr, 2);
7212 break;
7213 case 1: /* ldrexd */
7214 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7215 break;
7216 case 2: /* ldrexb */
7217 gen_load_exclusive(s, rd, 15, addr, 0);
7218 break;
7219 case 3: /* ldrexh */
7220 gen_load_exclusive(s, rd, 15, addr, 1);
7221 break;
7222 default:
7223 abort();
7225 } else {
7226 rm = insn & 0xf;
7227 switch (op1) {
7228 case 0: /* strex */
7229 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7230 break;
7231 case 1: /* strexd */
7232 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7233 break;
7234 case 2: /* strexb */
7235 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7236 break;
7237 case 3: /* strexh */
7238 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7239 break;
7240 default:
7241 abort();
7244 tcg_temp_free(addr);
7245 } else {
7246 /* SWP instruction */
7247 rm = (insn) & 0xf;
7249 /* ??? This is not really atomic. However we know
7250 we never have multiple CPUs running in parallel,
7251 so it is good enough. */
7252 addr = load_reg(s, rn);
7253 tmp = load_reg(s, rm);
7254 if (insn & (1 << 22)) {
7255 tmp2 = gen_ld8u(addr, IS_USER(s));
7256 gen_st8(tmp, addr, IS_USER(s));
7257 } else {
7258 tmp2 = gen_ld32(addr, IS_USER(s));
7259 gen_st32(tmp, addr, IS_USER(s));
7261 tcg_temp_free_i32(addr);
7262 store_reg(s, rd, tmp2);
7265 } else {
7266 int address_offset;
7267 int load;
7268 /* Misc load/store */
7269 rn = (insn >> 16) & 0xf;
7270 rd = (insn >> 12) & 0xf;
7271 addr = load_reg(s, rn);
7272 if (insn & (1 << 24))
7273 gen_add_datah_offset(s, insn, 0, addr);
7274 address_offset = 0;
7275 if (insn & (1 << 20)) {
7276 /* load */
7277 switch(sh) {
7278 case 1:
7279 tmp = gen_ld16u(addr, IS_USER(s));
7280 break;
7281 case 2:
7282 tmp = gen_ld8s(addr, IS_USER(s));
7283 break;
7284 default:
7285 case 3:
7286 tmp = gen_ld16s(addr, IS_USER(s));
7287 break;
7289 load = 1;
7290 } else if (sh & 2) {
7291 ARCH(5TE);
7292 /* doubleword */
7293 if (sh & 1) {
7294 /* store */
7295 tmp = load_reg(s, rd);
7296 gen_st32(tmp, addr, IS_USER(s));
7297 tcg_gen_addi_i32(addr, addr, 4);
7298 tmp = load_reg(s, rd + 1);
7299 gen_st32(tmp, addr, IS_USER(s));
7300 load = 0;
7301 } else {
7302 /* load */
7303 tmp = gen_ld32(addr, IS_USER(s));
7304 store_reg(s, rd, tmp);
7305 tcg_gen_addi_i32(addr, addr, 4);
7306 tmp = gen_ld32(addr, IS_USER(s));
7307 rd++;
7308 load = 1;
7310 address_offset = -4;
7311 } else {
7312 /* store */
7313 tmp = load_reg(s, rd);
7314 gen_st16(tmp, addr, IS_USER(s));
7315 load = 0;
7317 /* Perform base writeback before the loaded value to
7318 ensure correct behavior with overlapping index registers.
7319 ldrd with base writeback is is undefined if the
7320 destination and index registers overlap. */
7321 if (!(insn & (1 << 24))) {
7322 gen_add_datah_offset(s, insn, address_offset, addr);
7323 store_reg(s, rn, addr);
7324 } else if (insn & (1 << 21)) {
7325 if (address_offset)
7326 tcg_gen_addi_i32(addr, addr, address_offset);
7327 store_reg(s, rn, addr);
7328 } else {
7329 tcg_temp_free_i32(addr);
7331 if (load) {
7332 /* Complete the load. */
7333 store_reg(s, rd, tmp);
7336 break;
7337 case 0x4:
7338 case 0x5:
7339 goto do_ldst;
7340 case 0x6:
7341 case 0x7:
7342 if (insn & (1 << 4)) {
7343 ARCH(6);
7344 /* Armv6 Media instructions. */
7345 rm = insn & 0xf;
7346 rn = (insn >> 16) & 0xf;
7347 rd = (insn >> 12) & 0xf;
7348 rs = (insn >> 8) & 0xf;
7349 switch ((insn >> 23) & 3) {
7350 case 0: /* Parallel add/subtract. */
7351 op1 = (insn >> 20) & 7;
7352 tmp = load_reg(s, rn);
7353 tmp2 = load_reg(s, rm);
7354 sh = (insn >> 5) & 7;
7355 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7356 goto illegal_op;
7357 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7358 tcg_temp_free_i32(tmp2);
7359 store_reg(s, rd, tmp);
7360 break;
7361 case 1:
7362 if ((insn & 0x00700020) == 0) {
7363 /* Halfword pack. */
7364 tmp = load_reg(s, rn);
7365 tmp2 = load_reg(s, rm);
7366 shift = (insn >> 7) & 0x1f;
7367 if (insn & (1 << 6)) {
7368 /* pkhtb */
7369 if (shift == 0)
7370 shift = 31;
7371 tcg_gen_sari_i32(tmp2, tmp2, shift);
7372 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7373 tcg_gen_ext16u_i32(tmp2, tmp2);
7374 } else {
7375 /* pkhbt */
7376 if (shift)
7377 tcg_gen_shli_i32(tmp2, tmp2, shift);
7378 tcg_gen_ext16u_i32(tmp, tmp);
7379 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7381 tcg_gen_or_i32(tmp, tmp, tmp2);
7382 tcg_temp_free_i32(tmp2);
7383 store_reg(s, rd, tmp);
7384 } else if ((insn & 0x00200020) == 0x00200000) {
7385 /* [us]sat */
7386 tmp = load_reg(s, rm);
7387 shift = (insn >> 7) & 0x1f;
7388 if (insn & (1 << 6)) {
7389 if (shift == 0)
7390 shift = 31;
7391 tcg_gen_sari_i32(tmp, tmp, shift);
7392 } else {
7393 tcg_gen_shli_i32(tmp, tmp, shift);
7395 sh = (insn >> 16) & 0x1f;
7396 tmp2 = tcg_const_i32(sh);
7397 if (insn & (1 << 22))
7398 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
7399 else
7400 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
7401 tcg_temp_free_i32(tmp2);
7402 store_reg(s, rd, tmp);
7403 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7404 /* [us]sat16 */
7405 tmp = load_reg(s, rm);
7406 sh = (insn >> 16) & 0x1f;
7407 tmp2 = tcg_const_i32(sh);
7408 if (insn & (1 << 22))
7409 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
7410 else
7411 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
7412 tcg_temp_free_i32(tmp2);
7413 store_reg(s, rd, tmp);
7414 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7415 /* Select bytes. */
7416 tmp = load_reg(s, rn);
7417 tmp2 = load_reg(s, rm);
7418 tmp3 = tcg_temp_new_i32();
7419 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
7420 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7421 tcg_temp_free_i32(tmp3);
7422 tcg_temp_free_i32(tmp2);
7423 store_reg(s, rd, tmp);
7424 } else if ((insn & 0x000003e0) == 0x00000060) {
7425 tmp = load_reg(s, rm);
7426 shift = (insn >> 10) & 3;
7427 /* ??? In many cases it's not necessary to do a
7428 rotate, a shift is sufficient. */
7429 if (shift != 0)
7430 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7431 op1 = (insn >> 20) & 7;
7432 switch (op1) {
7433 case 0: gen_sxtb16(tmp); break;
7434 case 2: gen_sxtb(tmp); break;
7435 case 3: gen_sxth(tmp); break;
7436 case 4: gen_uxtb16(tmp); break;
7437 case 6: gen_uxtb(tmp); break;
7438 case 7: gen_uxth(tmp); break;
7439 default: goto illegal_op;
7441 if (rn != 15) {
7442 tmp2 = load_reg(s, rn);
7443 if ((op1 & 3) == 0) {
7444 gen_add16(tmp, tmp2);
7445 } else {
7446 tcg_gen_add_i32(tmp, tmp, tmp2);
7447 tcg_temp_free_i32(tmp2);
7450 store_reg(s, rd, tmp);
7451 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7452 /* rev */
7453 tmp = load_reg(s, rm);
7454 if (insn & (1 << 22)) {
7455 if (insn & (1 << 7)) {
7456 gen_revsh(tmp);
7457 } else {
7458 ARCH(6T2);
7459 gen_helper_rbit(tmp, tmp);
7461 } else {
7462 if (insn & (1 << 7))
7463 gen_rev16(tmp);
7464 else
7465 tcg_gen_bswap32_i32(tmp, tmp);
7467 store_reg(s, rd, tmp);
7468 } else {
7469 goto illegal_op;
7471 break;
7472 case 2: /* Multiplies (Type 3). */
7473 switch ((insn >> 20) & 0x7) {
7474 case 5:
7475 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7476 /* op2 not 00x or 11x : UNDEF */
7477 goto illegal_op;
7479 /* Signed multiply most significant [accumulate].
7480 (SMMUL, SMMLA, SMMLS) */
7481 tmp = load_reg(s, rm);
7482 tmp2 = load_reg(s, rs);
7483 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7485 if (rd != 15) {
7486 tmp = load_reg(s, rd);
7487 if (insn & (1 << 6)) {
7488 tmp64 = gen_subq_msw(tmp64, tmp);
7489 } else {
7490 tmp64 = gen_addq_msw(tmp64, tmp);
7493 if (insn & (1 << 5)) {
7494 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7496 tcg_gen_shri_i64(tmp64, tmp64, 32);
7497 tmp = tcg_temp_new_i32();
7498 tcg_gen_trunc_i64_i32(tmp, tmp64);
7499 tcg_temp_free_i64(tmp64);
7500 store_reg(s, rn, tmp);
7501 break;
7502 case 0:
7503 case 4:
7504 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7505 if (insn & (1 << 7)) {
7506 goto illegal_op;
7508 tmp = load_reg(s, rm);
7509 tmp2 = load_reg(s, rs);
7510 if (insn & (1 << 5))
7511 gen_swap_half(tmp2);
7512 gen_smul_dual(tmp, tmp2);
7513 if (insn & (1 << 6)) {
7514 /* This subtraction cannot overflow. */
7515 tcg_gen_sub_i32(tmp, tmp, tmp2);
7516 } else {
7517 /* This addition cannot overflow 32 bits;
7518 * however it may overflow considered as a signed
7519 * operation, in which case we must set the Q flag.
7521 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7523 tcg_temp_free_i32(tmp2);
7524 if (insn & (1 << 22)) {
7525 /* smlald, smlsld */
7526 tmp64 = tcg_temp_new_i64();
7527 tcg_gen_ext_i32_i64(tmp64, tmp);
7528 tcg_temp_free_i32(tmp);
7529 gen_addq(s, tmp64, rd, rn);
7530 gen_storeq_reg(s, rd, rn, tmp64);
7531 tcg_temp_free_i64(tmp64);
7532 } else {
7533 /* smuad, smusd, smlad, smlsd */
7534 if (rd != 15)
7536 tmp2 = load_reg(s, rd);
7537 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7538 tcg_temp_free_i32(tmp2);
7540 store_reg(s, rn, tmp);
7542 break;
7543 case 1:
7544 case 3:
7545 /* SDIV, UDIV */
7546 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7547 goto illegal_op;
7549 if (((insn >> 5) & 7) || (rd != 15)) {
7550 goto illegal_op;
7552 tmp = load_reg(s, rm);
7553 tmp2 = load_reg(s, rs);
7554 if (insn & (1 << 21)) {
7555 gen_helper_udiv(tmp, tmp, tmp2);
7556 } else {
7557 gen_helper_sdiv(tmp, tmp, tmp2);
7559 tcg_temp_free_i32(tmp2);
7560 store_reg(s, rn, tmp);
7561 break;
7562 default:
7563 goto illegal_op;
7565 break;
7566 case 3:
7567 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7568 switch (op1) {
7569 case 0: /* Unsigned sum of absolute differences. */
7570 ARCH(6);
7571 tmp = load_reg(s, rm);
7572 tmp2 = load_reg(s, rs);
7573 gen_helper_usad8(tmp, tmp, tmp2);
7574 tcg_temp_free_i32(tmp2);
7575 if (rd != 15) {
7576 tmp2 = load_reg(s, rd);
7577 tcg_gen_add_i32(tmp, tmp, tmp2);
7578 tcg_temp_free_i32(tmp2);
7580 store_reg(s, rn, tmp);
7581 break;
7582 case 0x20: case 0x24: case 0x28: case 0x2c:
7583 /* Bitfield insert/clear. */
7584 ARCH(6T2);
7585 shift = (insn >> 7) & 0x1f;
7586 i = (insn >> 16) & 0x1f;
7587 i = i + 1 - shift;
7588 if (rm == 15) {
7589 tmp = tcg_temp_new_i32();
7590 tcg_gen_movi_i32(tmp, 0);
7591 } else {
7592 tmp = load_reg(s, rm);
7594 if (i != 32) {
7595 tmp2 = load_reg(s, rd);
7596 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7597 tcg_temp_free_i32(tmp2);
7599 store_reg(s, rd, tmp);
7600 break;
7601 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7602 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7603 ARCH(6T2);
7604 tmp = load_reg(s, rm);
7605 shift = (insn >> 7) & 0x1f;
7606 i = ((insn >> 16) & 0x1f) + 1;
7607 if (shift + i > 32)
7608 goto illegal_op;
7609 if (i < 32) {
7610 if (op1 & 0x20) {
7611 gen_ubfx(tmp, shift, (1u << i) - 1);
7612 } else {
7613 gen_sbfx(tmp, shift, i);
7616 store_reg(s, rd, tmp);
7617 break;
7618 default:
7619 goto illegal_op;
7621 break;
7623 break;
7625 do_ldst:
7626 /* Check for undefined extension instructions
7627 * per the ARM Bible IE:
7628 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7630 sh = (0xf << 20) | (0xf << 4);
7631 if (op1 == 0x7 && ((insn & sh) == sh))
7633 goto illegal_op;
7635 /* load/store byte/word */
7636 rn = (insn >> 16) & 0xf;
7637 rd = (insn >> 12) & 0xf;
7638 tmp2 = load_reg(s, rn);
7639 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7640 if (insn & (1 << 24))
7641 gen_add_data_offset(s, insn, tmp2);
7642 if (insn & (1 << 20)) {
7643 /* load */
7644 if (insn & (1 << 22)) {
7645 tmp = gen_ld8u(tmp2, i);
7646 } else {
7647 tmp = gen_ld32(tmp2, i);
7649 } else {
7650 /* store */
7651 tmp = load_reg(s, rd);
7652 if (insn & (1 << 22))
7653 gen_st8(tmp, tmp2, i);
7654 else
7655 gen_st32(tmp, tmp2, i);
7657 if (!(insn & (1 << 24))) {
7658 gen_add_data_offset(s, insn, tmp2);
7659 store_reg(s, rn, tmp2);
7660 } else if (insn & (1 << 21)) {
7661 store_reg(s, rn, tmp2);
7662 } else {
7663 tcg_temp_free_i32(tmp2);
7665 if (insn & (1 << 20)) {
7666 /* Complete the load. */
7667 store_reg_from_load(env, s, rd, tmp);
7669 break;
7670 case 0x08:
7671 case 0x09:
7673 int j, n, user, loaded_base;
7674 TCGv loaded_var;
7675 /* load/store multiple words */
7676 /* XXX: store correct base if write back */
7677 user = 0;
7678 if (insn & (1 << 22)) {
7679 if (IS_USER(s))
7680 goto illegal_op; /* only usable in supervisor mode */
7682 if ((insn & (1 << 15)) == 0)
7683 user = 1;
7685 rn = (insn >> 16) & 0xf;
7686 addr = load_reg(s, rn);
7688 /* compute total size */
7689 loaded_base = 0;
7690 TCGV_UNUSED(loaded_var);
7691 n = 0;
7692 for(i=0;i<16;i++) {
7693 if (insn & (1 << i))
7694 n++;
7696 /* XXX: test invalid n == 0 case ? */
7697 if (insn & (1 << 23)) {
7698 if (insn & (1 << 24)) {
7699 /* pre increment */
7700 tcg_gen_addi_i32(addr, addr, 4);
7701 } else {
7702 /* post increment */
7704 } else {
7705 if (insn & (1 << 24)) {
7706 /* pre decrement */
7707 tcg_gen_addi_i32(addr, addr, -(n * 4));
7708 } else {
7709 /* post decrement */
7710 if (n != 1)
7711 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7714 j = 0;
7715 for(i=0;i<16;i++) {
7716 if (insn & (1 << i)) {
7717 if (insn & (1 << 20)) {
7718 /* load */
7719 tmp = gen_ld32(addr, IS_USER(s));
7720 if (user) {
7721 tmp2 = tcg_const_i32(i);
7722 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
7723 tcg_temp_free_i32(tmp2);
7724 tcg_temp_free_i32(tmp);
7725 } else if (i == rn) {
7726 loaded_var = tmp;
7727 loaded_base = 1;
7728 } else {
7729 store_reg_from_load(env, s, i, tmp);
7731 } else {
7732 /* store */
7733 if (i == 15) {
7734 /* special case: r15 = PC + 8 */
7735 val = (long)s->pc + 4;
7736 tmp = tcg_temp_new_i32();
7737 tcg_gen_movi_i32(tmp, val);
7738 } else if (user) {
7739 tmp = tcg_temp_new_i32();
7740 tmp2 = tcg_const_i32(i);
7741 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
7742 tcg_temp_free_i32(tmp2);
7743 } else {
7744 tmp = load_reg(s, i);
7746 gen_st32(tmp, addr, IS_USER(s));
7748 j++;
7749 /* no need to add after the last transfer */
7750 if (j != n)
7751 tcg_gen_addi_i32(addr, addr, 4);
7754 if (insn & (1 << 21)) {
7755 /* write back */
7756 if (insn & (1 << 23)) {
7757 if (insn & (1 << 24)) {
7758 /* pre increment */
7759 } else {
7760 /* post increment */
7761 tcg_gen_addi_i32(addr, addr, 4);
7763 } else {
7764 if (insn & (1 << 24)) {
7765 /* pre decrement */
7766 if (n != 1)
7767 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7768 } else {
7769 /* post decrement */
7770 tcg_gen_addi_i32(addr, addr, -(n * 4));
7773 store_reg(s, rn, addr);
7774 } else {
7775 tcg_temp_free_i32(addr);
7777 if (loaded_base) {
7778 store_reg(s, rn, loaded_var);
7780 if ((insn & (1 << 22)) && !user) {
7781 /* Restore CPSR from SPSR. */
7782 tmp = load_cpu_field(spsr);
7783 gen_set_cpsr(tmp, 0xffffffff);
7784 tcg_temp_free_i32(tmp);
7785 s->is_jmp = DISAS_UPDATE;
7788 break;
7789 case 0xa:
7790 case 0xb:
7792 int32_t offset;
7794 /* branch (and link) */
7795 val = (int32_t)s->pc;
7796 if (insn & (1 << 24)) {
7797 tmp = tcg_temp_new_i32();
7798 tcg_gen_movi_i32(tmp, val);
7799 store_reg(s, 14, tmp);
7801 offset = (((int32_t)insn << 8) >> 8);
7802 val += (offset << 2) + 4;
7803 gen_jmp(s, val);
7805 break;
7806 case 0xc:
7807 case 0xd:
7808 case 0xe:
7809 /* Coprocessor. */
7810 if (disas_coproc_insn(env, s, insn))
7811 goto illegal_op;
7812 break;
7813 case 0xf:
7814 /* swi */
7815 gen_set_pc_im(s->pc);
7816 s->is_jmp = DISAS_SWI;
7817 break;
7818 default:
7819 illegal_op:
7820 gen_exception_insn(s, 4, EXCP_UDEF);
7821 break;
7826 /* Return true if this is a Thumb-2 logical op. */
7827 static int
7828 thumb2_logic_op(int op)
7830 return (op < 8);
7833 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7834 then set condition code flags based on the result of the operation.
7835 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7836 to the high bit of T1.
7837 Returns zero if the opcode is valid. */
7839 static int
7840 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7842 int logic_cc;
7844 logic_cc = 0;
7845 switch (op) {
7846 case 0: /* and */
7847 tcg_gen_and_i32(t0, t0, t1);
7848 logic_cc = conds;
7849 break;
7850 case 1: /* bic */
7851 tcg_gen_andc_i32(t0, t0, t1);
7852 logic_cc = conds;
7853 break;
7854 case 2: /* orr */
7855 tcg_gen_or_i32(t0, t0, t1);
7856 logic_cc = conds;
7857 break;
7858 case 3: /* orn */
7859 tcg_gen_orc_i32(t0, t0, t1);
7860 logic_cc = conds;
7861 break;
7862 case 4: /* eor */
7863 tcg_gen_xor_i32(t0, t0, t1);
7864 logic_cc = conds;
7865 break;
7866 case 8: /* add */
7867 if (conds)
7868 gen_helper_add_cc(t0, cpu_env, t0, t1);
7869 else
7870 tcg_gen_add_i32(t0, t0, t1);
7871 break;
7872 case 10: /* adc */
7873 if (conds)
7874 gen_helper_adc_cc(t0, cpu_env, t0, t1);
7875 else
7876 gen_adc(t0, t1);
7877 break;
7878 case 11: /* sbc */
7879 if (conds)
7880 gen_helper_sbc_cc(t0, cpu_env, t0, t1);
7881 else
7882 gen_sub_carry(t0, t0, t1);
7883 break;
7884 case 13: /* sub */
7885 if (conds)
7886 gen_helper_sub_cc(t0, cpu_env, t0, t1);
7887 else
7888 tcg_gen_sub_i32(t0, t0, t1);
7889 break;
7890 case 14: /* rsb */
7891 if (conds)
7892 gen_helper_sub_cc(t0, cpu_env, t1, t0);
7893 else
7894 tcg_gen_sub_i32(t0, t1, t0);
7895 break;
7896 default: /* 5, 6, 7, 9, 12, 15. */
7897 return 1;
7899 if (logic_cc) {
7900 gen_logic_CC(t0);
7901 if (shifter_out)
7902 gen_set_CF_bit31(t1);
7904 return 0;
7907 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7908 is not legal. */
7909 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
7911 uint32_t insn, imm, shift, offset;
7912 uint32_t rd, rn, rm, rs;
7913 TCGv tmp;
7914 TCGv tmp2;
7915 TCGv tmp3;
7916 TCGv addr;
7917 TCGv_i64 tmp64;
7918 int op;
7919 int shiftop;
7920 int conds;
7921 int logic_cc;
7923 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7924 || arm_feature (env, ARM_FEATURE_M))) {
7925 /* Thumb-1 cores may need to treat bl and blx as a pair of
7926 16-bit instructions to get correct prefetch abort behavior. */
7927 insn = insn_hw1;
7928 if ((insn & (1 << 12)) == 0) {
7929 ARCH(5);
7930 /* Second half of blx. */
7931 offset = ((insn & 0x7ff) << 1);
7932 tmp = load_reg(s, 14);
7933 tcg_gen_addi_i32(tmp, tmp, offset);
7934 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7936 tmp2 = tcg_temp_new_i32();
7937 tcg_gen_movi_i32(tmp2, s->pc | 1);
7938 store_reg(s, 14, tmp2);
7939 gen_bx(s, tmp);
7940 return 0;
7942 if (insn & (1 << 11)) {
7943 /* Second half of bl. */
7944 offset = ((insn & 0x7ff) << 1) | 1;
7945 tmp = load_reg(s, 14);
7946 tcg_gen_addi_i32(tmp, tmp, offset);
7948 tmp2 = tcg_temp_new_i32();
7949 tcg_gen_movi_i32(tmp2, s->pc | 1);
7950 store_reg(s, 14, tmp2);
7951 gen_bx(s, tmp);
7952 return 0;
7954 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7955 /* Instruction spans a page boundary. Implement it as two
7956 16-bit instructions in case the second half causes an
7957 prefetch abort. */
7958 offset = ((int32_t)insn << 21) >> 9;
7959 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7960 return 0;
7962 /* Fall through to 32-bit decode. */
7965 insn = arm_lduw_code(env, s->pc, s->bswap_code);
7966 s->pc += 2;
7967 insn |= (uint32_t)insn_hw1 << 16;
7969 if ((insn & 0xf800e800) != 0xf000e800) {
7970 ARCH(6T2);
7973 rn = (insn >> 16) & 0xf;
7974 rs = (insn >> 12) & 0xf;
7975 rd = (insn >> 8) & 0xf;
7976 rm = insn & 0xf;
7977 switch ((insn >> 25) & 0xf) {
7978 case 0: case 1: case 2: case 3:
7979 /* 16-bit instructions. Should never happen. */
7980 abort();
7981 case 4:
7982 if (insn & (1 << 22)) {
7983 /* Other load/store, table branch. */
7984 if (insn & 0x01200000) {
7985 /* Load/store doubleword. */
7986 if (rn == 15) {
7987 addr = tcg_temp_new_i32();
7988 tcg_gen_movi_i32(addr, s->pc & ~3);
7989 } else {
7990 addr = load_reg(s, rn);
7992 offset = (insn & 0xff) * 4;
7993 if ((insn & (1 << 23)) == 0)
7994 offset = -offset;
7995 if (insn & (1 << 24)) {
7996 tcg_gen_addi_i32(addr, addr, offset);
7997 offset = 0;
7999 if (insn & (1 << 20)) {
8000 /* ldrd */
8001 tmp = gen_ld32(addr, IS_USER(s));
8002 store_reg(s, rs, tmp);
8003 tcg_gen_addi_i32(addr, addr, 4);
8004 tmp = gen_ld32(addr, IS_USER(s));
8005 store_reg(s, rd, tmp);
8006 } else {
8007 /* strd */
8008 tmp = load_reg(s, rs);
8009 gen_st32(tmp, addr, IS_USER(s));
8010 tcg_gen_addi_i32(addr, addr, 4);
8011 tmp = load_reg(s, rd);
8012 gen_st32(tmp, addr, IS_USER(s));
8014 if (insn & (1 << 21)) {
8015 /* Base writeback. */
8016 if (rn == 15)
8017 goto illegal_op;
8018 tcg_gen_addi_i32(addr, addr, offset - 4);
8019 store_reg(s, rn, addr);
8020 } else {
8021 tcg_temp_free_i32(addr);
8023 } else if ((insn & (1 << 23)) == 0) {
8024 /* Load/store exclusive word. */
8025 addr = tcg_temp_local_new();
8026 load_reg_var(s, addr, rn);
8027 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
8028 if (insn & (1 << 20)) {
8029 gen_load_exclusive(s, rs, 15, addr, 2);
8030 } else {
8031 gen_store_exclusive(s, rd, rs, 15, addr, 2);
8033 tcg_temp_free(addr);
8034 } else if ((insn & (1 << 6)) == 0) {
8035 /* Table Branch. */
8036 if (rn == 15) {
8037 addr = tcg_temp_new_i32();
8038 tcg_gen_movi_i32(addr, s->pc);
8039 } else {
8040 addr = load_reg(s, rn);
8042 tmp = load_reg(s, rm);
8043 tcg_gen_add_i32(addr, addr, tmp);
8044 if (insn & (1 << 4)) {
8045 /* tbh */
8046 tcg_gen_add_i32(addr, addr, tmp);
8047 tcg_temp_free_i32(tmp);
8048 tmp = gen_ld16u(addr, IS_USER(s));
8049 } else { /* tbb */
8050 tcg_temp_free_i32(tmp);
8051 tmp = gen_ld8u(addr, IS_USER(s));
8053 tcg_temp_free_i32(addr);
8054 tcg_gen_shli_i32(tmp, tmp, 1);
8055 tcg_gen_addi_i32(tmp, tmp, s->pc);
8056 store_reg(s, 15, tmp);
8057 } else {
8058 /* Load/store exclusive byte/halfword/doubleword. */
8059 ARCH(7);
8060 op = (insn >> 4) & 0x3;
8061 if (op == 2) {
8062 goto illegal_op;
8064 addr = tcg_temp_local_new();
8065 load_reg_var(s, addr, rn);
8066 if (insn & (1 << 20)) {
8067 gen_load_exclusive(s, rs, rd, addr, op);
8068 } else {
8069 gen_store_exclusive(s, rm, rs, rd, addr, op);
8071 tcg_temp_free(addr);
8073 } else {
8074 /* Load/store multiple, RFE, SRS. */
8075 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8076 /* Not available in user mode. */
8077 if (IS_USER(s))
8078 goto illegal_op;
8079 if (insn & (1 << 20)) {
8080 /* rfe */
8081 addr = load_reg(s, rn);
8082 if ((insn & (1 << 24)) == 0)
8083 tcg_gen_addi_i32(addr, addr, -8);
8084 /* Load PC into tmp and CPSR into tmp2. */
8085 tmp = gen_ld32(addr, 0);
8086 tcg_gen_addi_i32(addr, addr, 4);
8087 tmp2 = gen_ld32(addr, 0);
8088 if (insn & (1 << 21)) {
8089 /* Base writeback. */
8090 if (insn & (1 << 24)) {
8091 tcg_gen_addi_i32(addr, addr, 4);
8092 } else {
8093 tcg_gen_addi_i32(addr, addr, -4);
8095 store_reg(s, rn, addr);
8096 } else {
8097 tcg_temp_free_i32(addr);
8099 gen_rfe(s, tmp, tmp2);
8100 } else {
8101 /* srs */
8102 op = (insn & 0x1f);
8103 addr = tcg_temp_new_i32();
8104 tmp = tcg_const_i32(op);
8105 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8106 tcg_temp_free_i32(tmp);
8107 if ((insn & (1 << 24)) == 0) {
8108 tcg_gen_addi_i32(addr, addr, -8);
8110 tmp = load_reg(s, 14);
8111 gen_st32(tmp, addr, 0);
8112 tcg_gen_addi_i32(addr, addr, 4);
8113 tmp = tcg_temp_new_i32();
8114 gen_helper_cpsr_read(tmp, cpu_env);
8115 gen_st32(tmp, addr, 0);
8116 if (insn & (1 << 21)) {
8117 if ((insn & (1 << 24)) == 0) {
8118 tcg_gen_addi_i32(addr, addr, -4);
8119 } else {
8120 tcg_gen_addi_i32(addr, addr, 4);
8122 tmp = tcg_const_i32(op);
8123 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8124 tcg_temp_free_i32(tmp);
8125 } else {
8126 tcg_temp_free_i32(addr);
8129 } else {
8130 int i, loaded_base = 0;
8131 TCGv loaded_var;
8132 /* Load/store multiple. */
8133 addr = load_reg(s, rn);
8134 offset = 0;
8135 for (i = 0; i < 16; i++) {
8136 if (insn & (1 << i))
8137 offset += 4;
8139 if (insn & (1 << 24)) {
8140 tcg_gen_addi_i32(addr, addr, -offset);
8143 TCGV_UNUSED(loaded_var);
8144 for (i = 0; i < 16; i++) {
8145 if ((insn & (1 << i)) == 0)
8146 continue;
8147 if (insn & (1 << 20)) {
8148 /* Load. */
8149 tmp = gen_ld32(addr, IS_USER(s));
8150 if (i == 15) {
8151 gen_bx(s, tmp);
8152 } else if (i == rn) {
8153 loaded_var = tmp;
8154 loaded_base = 1;
8155 } else {
8156 store_reg(s, i, tmp);
8158 } else {
8159 /* Store. */
8160 tmp = load_reg(s, i);
8161 gen_st32(tmp, addr, IS_USER(s));
8163 tcg_gen_addi_i32(addr, addr, 4);
8165 if (loaded_base) {
8166 store_reg(s, rn, loaded_var);
8168 if (insn & (1 << 21)) {
8169 /* Base register writeback. */
8170 if (insn & (1 << 24)) {
8171 tcg_gen_addi_i32(addr, addr, -offset);
8173 /* Fault if writeback register is in register list. */
8174 if (insn & (1 << rn))
8175 goto illegal_op;
8176 store_reg(s, rn, addr);
8177 } else {
8178 tcg_temp_free_i32(addr);
8182 break;
8183 case 5:
8185 op = (insn >> 21) & 0xf;
8186 if (op == 6) {
8187 /* Halfword pack. */
8188 tmp = load_reg(s, rn);
8189 tmp2 = load_reg(s, rm);
8190 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8191 if (insn & (1 << 5)) {
8192 /* pkhtb */
8193 if (shift == 0)
8194 shift = 31;
8195 tcg_gen_sari_i32(tmp2, tmp2, shift);
8196 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8197 tcg_gen_ext16u_i32(tmp2, tmp2);
8198 } else {
8199 /* pkhbt */
8200 if (shift)
8201 tcg_gen_shli_i32(tmp2, tmp2, shift);
8202 tcg_gen_ext16u_i32(tmp, tmp);
8203 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8205 tcg_gen_or_i32(tmp, tmp, tmp2);
8206 tcg_temp_free_i32(tmp2);
8207 store_reg(s, rd, tmp);
8208 } else {
8209 /* Data processing register constant shift. */
8210 if (rn == 15) {
8211 tmp = tcg_temp_new_i32();
8212 tcg_gen_movi_i32(tmp, 0);
8213 } else {
8214 tmp = load_reg(s, rn);
8216 tmp2 = load_reg(s, rm);
8218 shiftop = (insn >> 4) & 3;
8219 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8220 conds = (insn & (1 << 20)) != 0;
8221 logic_cc = (conds && thumb2_logic_op(op));
8222 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8223 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8224 goto illegal_op;
8225 tcg_temp_free_i32(tmp2);
8226 if (rd != 15) {
8227 store_reg(s, rd, tmp);
8228 } else {
8229 tcg_temp_free_i32(tmp);
8232 break;
8233 case 13: /* Misc data processing. */
8234 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8235 if (op < 4 && (insn & 0xf000) != 0xf000)
8236 goto illegal_op;
8237 switch (op) {
8238 case 0: /* Register controlled shift. */
8239 tmp = load_reg(s, rn);
8240 tmp2 = load_reg(s, rm);
8241 if ((insn & 0x70) != 0)
8242 goto illegal_op;
8243 op = (insn >> 21) & 3;
8244 logic_cc = (insn & (1 << 20)) != 0;
8245 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8246 if (logic_cc)
8247 gen_logic_CC(tmp);
8248 store_reg_bx(env, s, rd, tmp);
8249 break;
8250 case 1: /* Sign/zero extend. */
8251 tmp = load_reg(s, rm);
8252 shift = (insn >> 4) & 3;
8253 /* ??? In many cases it's not necessary to do a
8254 rotate, a shift is sufficient. */
8255 if (shift != 0)
8256 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8257 op = (insn >> 20) & 7;
8258 switch (op) {
8259 case 0: gen_sxth(tmp); break;
8260 case 1: gen_uxth(tmp); break;
8261 case 2: gen_sxtb16(tmp); break;
8262 case 3: gen_uxtb16(tmp); break;
8263 case 4: gen_sxtb(tmp); break;
8264 case 5: gen_uxtb(tmp); break;
8265 default: goto illegal_op;
8267 if (rn != 15) {
8268 tmp2 = load_reg(s, rn);
8269 if ((op >> 1) == 1) {
8270 gen_add16(tmp, tmp2);
8271 } else {
8272 tcg_gen_add_i32(tmp, tmp, tmp2);
8273 tcg_temp_free_i32(tmp2);
8276 store_reg(s, rd, tmp);
8277 break;
8278 case 2: /* SIMD add/subtract. */
8279 op = (insn >> 20) & 7;
8280 shift = (insn >> 4) & 7;
8281 if ((op & 3) == 3 || (shift & 3) == 3)
8282 goto illegal_op;
8283 tmp = load_reg(s, rn);
8284 tmp2 = load_reg(s, rm);
8285 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8286 tcg_temp_free_i32(tmp2);
8287 store_reg(s, rd, tmp);
8288 break;
8289 case 3: /* Other data processing. */
8290 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8291 if (op < 4) {
8292 /* Saturating add/subtract. */
8293 tmp = load_reg(s, rn);
8294 tmp2 = load_reg(s, rm);
8295 if (op & 1)
8296 gen_helper_double_saturate(tmp, cpu_env, tmp);
8297 if (op & 2)
8298 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
8299 else
8300 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8301 tcg_temp_free_i32(tmp2);
8302 } else {
8303 tmp = load_reg(s, rn);
8304 switch (op) {
8305 case 0x0a: /* rbit */
8306 gen_helper_rbit(tmp, tmp);
8307 break;
8308 case 0x08: /* rev */
8309 tcg_gen_bswap32_i32(tmp, tmp);
8310 break;
8311 case 0x09: /* rev16 */
8312 gen_rev16(tmp);
8313 break;
8314 case 0x0b: /* revsh */
8315 gen_revsh(tmp);
8316 break;
8317 case 0x10: /* sel */
8318 tmp2 = load_reg(s, rm);
8319 tmp3 = tcg_temp_new_i32();
8320 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8321 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8322 tcg_temp_free_i32(tmp3);
8323 tcg_temp_free_i32(tmp2);
8324 break;
8325 case 0x18: /* clz */
8326 gen_helper_clz(tmp, tmp);
8327 break;
8328 default:
8329 goto illegal_op;
8332 store_reg(s, rd, tmp);
8333 break;
8334 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8335 op = (insn >> 4) & 0xf;
8336 tmp = load_reg(s, rn);
8337 tmp2 = load_reg(s, rm);
8338 switch ((insn >> 20) & 7) {
8339 case 0: /* 32 x 32 -> 32 */
8340 tcg_gen_mul_i32(tmp, tmp, tmp2);
8341 tcg_temp_free_i32(tmp2);
8342 if (rs != 15) {
8343 tmp2 = load_reg(s, rs);
8344 if (op)
8345 tcg_gen_sub_i32(tmp, tmp2, tmp);
8346 else
8347 tcg_gen_add_i32(tmp, tmp, tmp2);
8348 tcg_temp_free_i32(tmp2);
8350 break;
8351 case 1: /* 16 x 16 -> 32 */
8352 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8353 tcg_temp_free_i32(tmp2);
8354 if (rs != 15) {
8355 tmp2 = load_reg(s, rs);
8356 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8357 tcg_temp_free_i32(tmp2);
8359 break;
8360 case 2: /* Dual multiply add. */
8361 case 4: /* Dual multiply subtract. */
8362 if (op)
8363 gen_swap_half(tmp2);
8364 gen_smul_dual(tmp, tmp2);
8365 if (insn & (1 << 22)) {
8366 /* This subtraction cannot overflow. */
8367 tcg_gen_sub_i32(tmp, tmp, tmp2);
8368 } else {
8369 /* This addition cannot overflow 32 bits;
8370 * however it may overflow considered as a signed
8371 * operation, in which case we must set the Q flag.
8373 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8375 tcg_temp_free_i32(tmp2);
8376 if (rs != 15)
8378 tmp2 = load_reg(s, rs);
8379 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8380 tcg_temp_free_i32(tmp2);
8382 break;
8383 case 3: /* 32 * 16 -> 32msb */
8384 if (op)
8385 tcg_gen_sari_i32(tmp2, tmp2, 16);
8386 else
8387 gen_sxth(tmp2);
8388 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8389 tcg_gen_shri_i64(tmp64, tmp64, 16);
8390 tmp = tcg_temp_new_i32();
8391 tcg_gen_trunc_i64_i32(tmp, tmp64);
8392 tcg_temp_free_i64(tmp64);
8393 if (rs != 15)
8395 tmp2 = load_reg(s, rs);
8396 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8397 tcg_temp_free_i32(tmp2);
8399 break;
8400 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8401 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8402 if (rs != 15) {
8403 tmp = load_reg(s, rs);
8404 if (insn & (1 << 20)) {
8405 tmp64 = gen_addq_msw(tmp64, tmp);
8406 } else {
8407 tmp64 = gen_subq_msw(tmp64, tmp);
8410 if (insn & (1 << 4)) {
8411 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8413 tcg_gen_shri_i64(tmp64, tmp64, 32);
8414 tmp = tcg_temp_new_i32();
8415 tcg_gen_trunc_i64_i32(tmp, tmp64);
8416 tcg_temp_free_i64(tmp64);
8417 break;
8418 case 7: /* Unsigned sum of absolute differences. */
8419 gen_helper_usad8(tmp, tmp, tmp2);
8420 tcg_temp_free_i32(tmp2);
8421 if (rs != 15) {
8422 tmp2 = load_reg(s, rs);
8423 tcg_gen_add_i32(tmp, tmp, tmp2);
8424 tcg_temp_free_i32(tmp2);
8426 break;
8428 store_reg(s, rd, tmp);
8429 break;
8430 case 6: case 7: /* 64-bit multiply, Divide. */
8431 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8432 tmp = load_reg(s, rn);
8433 tmp2 = load_reg(s, rm);
8434 if ((op & 0x50) == 0x10) {
8435 /* sdiv, udiv */
8436 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
8437 goto illegal_op;
8439 if (op & 0x20)
8440 gen_helper_udiv(tmp, tmp, tmp2);
8441 else
8442 gen_helper_sdiv(tmp, tmp, tmp2);
8443 tcg_temp_free_i32(tmp2);
8444 store_reg(s, rd, tmp);
8445 } else if ((op & 0xe) == 0xc) {
8446 /* Dual multiply accumulate long. */
8447 if (op & 1)
8448 gen_swap_half(tmp2);
8449 gen_smul_dual(tmp, tmp2);
8450 if (op & 0x10) {
8451 tcg_gen_sub_i32(tmp, tmp, tmp2);
8452 } else {
8453 tcg_gen_add_i32(tmp, tmp, tmp2);
8455 tcg_temp_free_i32(tmp2);
8456 /* BUGFIX */
8457 tmp64 = tcg_temp_new_i64();
8458 tcg_gen_ext_i32_i64(tmp64, tmp);
8459 tcg_temp_free_i32(tmp);
8460 gen_addq(s, tmp64, rs, rd);
8461 gen_storeq_reg(s, rs, rd, tmp64);
8462 tcg_temp_free_i64(tmp64);
8463 } else {
8464 if (op & 0x20) {
8465 /* Unsigned 64-bit multiply */
8466 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8467 } else {
8468 if (op & 8) {
8469 /* smlalxy */
8470 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8471 tcg_temp_free_i32(tmp2);
8472 tmp64 = tcg_temp_new_i64();
8473 tcg_gen_ext_i32_i64(tmp64, tmp);
8474 tcg_temp_free_i32(tmp);
8475 } else {
8476 /* Signed 64-bit multiply */
8477 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8480 if (op & 4) {
8481 /* umaal */
8482 gen_addq_lo(s, tmp64, rs);
8483 gen_addq_lo(s, tmp64, rd);
8484 } else if (op & 0x40) {
8485 /* 64-bit accumulate. */
8486 gen_addq(s, tmp64, rs, rd);
8488 gen_storeq_reg(s, rs, rd, tmp64);
8489 tcg_temp_free_i64(tmp64);
8491 break;
8493 break;
8494 case 6: case 7: case 14: case 15:
8495 /* Coprocessor. */
8496 if (((insn >> 24) & 3) == 3) {
8497 /* Translate into the equivalent ARM encoding. */
8498 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8499 if (disas_neon_data_insn(env, s, insn))
8500 goto illegal_op;
8501 } else {
8502 if (insn & (1 << 28))
8503 goto illegal_op;
8504 if (disas_coproc_insn (env, s, insn))
8505 goto illegal_op;
8507 break;
8508 case 8: case 9: case 10: case 11:
8509 if (insn & (1 << 15)) {
8510 /* Branches, misc control. */
8511 if (insn & 0x5000) {
8512 /* Unconditional branch. */
8513 /* signextend(hw1[10:0]) -> offset[:12]. */
8514 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8515 /* hw1[10:0] -> offset[11:1]. */
8516 offset |= (insn & 0x7ff) << 1;
8517 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8518 offset[24:22] already have the same value because of the
8519 sign extension above. */
8520 offset ^= ((~insn) & (1 << 13)) << 10;
8521 offset ^= ((~insn) & (1 << 11)) << 11;
8523 if (insn & (1 << 14)) {
8524 /* Branch and link. */
8525 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8528 offset += s->pc;
8529 if (insn & (1 << 12)) {
8530 /* b/bl */
8531 gen_jmp(s, offset);
8532 } else {
8533 /* blx */
8534 offset &= ~(uint32_t)2;
8535 /* thumb2 bx, no need to check */
8536 gen_bx_im(s, offset);
8538 } else if (((insn >> 23) & 7) == 7) {
8539 /* Misc control */
8540 if (insn & (1 << 13))
8541 goto illegal_op;
8543 if (insn & (1 << 26)) {
8544 /* Secure monitor call (v6Z) */
8545 goto illegal_op; /* not implemented. */
8546 } else {
8547 op = (insn >> 20) & 7;
8548 switch (op) {
8549 case 0: /* msr cpsr. */
8550 if (IS_M(env)) {
8551 tmp = load_reg(s, rn);
8552 addr = tcg_const_i32(insn & 0xff);
8553 gen_helper_v7m_msr(cpu_env, addr, tmp);
8554 tcg_temp_free_i32(addr);
8555 tcg_temp_free_i32(tmp);
8556 gen_lookup_tb(s);
8557 break;
8559 /* fall through */
8560 case 1: /* msr spsr. */
8561 if (IS_M(env))
8562 goto illegal_op;
8563 tmp = load_reg(s, rn);
8564 if (gen_set_psr(s,
8565 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8566 op == 1, tmp))
8567 goto illegal_op;
8568 break;
8569 case 2: /* cps, nop-hint. */
8570 if (((insn >> 8) & 7) == 0) {
8571 gen_nop_hint(s, insn & 0xff);
8573 /* Implemented as NOP in user mode. */
8574 if (IS_USER(s))
8575 break;
8576 offset = 0;
8577 imm = 0;
8578 if (insn & (1 << 10)) {
8579 if (insn & (1 << 7))
8580 offset |= CPSR_A;
8581 if (insn & (1 << 6))
8582 offset |= CPSR_I;
8583 if (insn & (1 << 5))
8584 offset |= CPSR_F;
8585 if (insn & (1 << 9))
8586 imm = CPSR_A | CPSR_I | CPSR_F;
8588 if (insn & (1 << 8)) {
8589 offset |= 0x1f;
8590 imm |= (insn & 0x1f);
8592 if (offset) {
8593 gen_set_psr_im(s, offset, 0, imm);
8595 break;
8596 case 3: /* Special control operations. */
8597 ARCH(7);
8598 op = (insn >> 4) & 0xf;
8599 switch (op) {
8600 case 2: /* clrex */
8601 gen_clrex(s);
8602 break;
8603 case 4: /* dsb */
8604 case 5: /* dmb */
8605 case 6: /* isb */
8606 /* These execute as NOPs. */
8607 break;
8608 default:
8609 goto illegal_op;
8611 break;
8612 case 4: /* bxj */
8613 /* Trivial implementation equivalent to bx. */
8614 tmp = load_reg(s, rn);
8615 gen_bx(s, tmp);
8616 break;
8617 case 5: /* Exception return. */
8618 if (IS_USER(s)) {
8619 goto illegal_op;
8621 if (rn != 14 || rd != 15) {
8622 goto illegal_op;
8624 tmp = load_reg(s, rn);
8625 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8626 gen_exception_return(s, tmp);
8627 break;
8628 case 6: /* mrs cpsr. */
8629 tmp = tcg_temp_new_i32();
8630 if (IS_M(env)) {
8631 addr = tcg_const_i32(insn & 0xff);
8632 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8633 tcg_temp_free_i32(addr);
8634 } else {
8635 gen_helper_cpsr_read(tmp, cpu_env);
8637 store_reg(s, rd, tmp);
8638 break;
8639 case 7: /* mrs spsr. */
8640 /* Not accessible in user mode. */
8641 if (IS_USER(s) || IS_M(env))
8642 goto illegal_op;
8643 tmp = load_cpu_field(spsr);
8644 store_reg(s, rd, tmp);
8645 break;
8648 } else {
8649 /* Conditional branch. */
8650 op = (insn >> 22) & 0xf;
8651 /* Generate a conditional jump to next instruction. */
8652 s->condlabel = gen_new_label();
8653 gen_test_cc(op ^ 1, s->condlabel);
8654 s->condjmp = 1;
8656 /* offset[11:1] = insn[10:0] */
8657 offset = (insn & 0x7ff) << 1;
8658 /* offset[17:12] = insn[21:16]. */
8659 offset |= (insn & 0x003f0000) >> 4;
8660 /* offset[31:20] = insn[26]. */
8661 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8662 /* offset[18] = insn[13]. */
8663 offset |= (insn & (1 << 13)) << 5;
8664 /* offset[19] = insn[11]. */
8665 offset |= (insn & (1 << 11)) << 8;
8667 /* jump to the offset */
8668 gen_jmp(s, s->pc + offset);
8670 } else {
8671 /* Data processing immediate. */
8672 if (insn & (1 << 25)) {
8673 if (insn & (1 << 24)) {
8674 if (insn & (1 << 20))
8675 goto illegal_op;
8676 /* Bitfield/Saturate. */
8677 op = (insn >> 21) & 7;
8678 imm = insn & 0x1f;
8679 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8680 if (rn == 15) {
8681 tmp = tcg_temp_new_i32();
8682 tcg_gen_movi_i32(tmp, 0);
8683 } else {
8684 tmp = load_reg(s, rn);
8686 switch (op) {
8687 case 2: /* Signed bitfield extract. */
8688 imm++;
8689 if (shift + imm > 32)
8690 goto illegal_op;
8691 if (imm < 32)
8692 gen_sbfx(tmp, shift, imm);
8693 break;
8694 case 6: /* Unsigned bitfield extract. */
8695 imm++;
8696 if (shift + imm > 32)
8697 goto illegal_op;
8698 if (imm < 32)
8699 gen_ubfx(tmp, shift, (1u << imm) - 1);
8700 break;
8701 case 3: /* Bitfield insert/clear. */
8702 if (imm < shift)
8703 goto illegal_op;
8704 imm = imm + 1 - shift;
8705 if (imm != 32) {
8706 tmp2 = load_reg(s, rd);
8707 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8708 tcg_temp_free_i32(tmp2);
8710 break;
8711 case 7:
8712 goto illegal_op;
8713 default: /* Saturate. */
8714 if (shift) {
8715 if (op & 1)
8716 tcg_gen_sari_i32(tmp, tmp, shift);
8717 else
8718 tcg_gen_shli_i32(tmp, tmp, shift);
8720 tmp2 = tcg_const_i32(imm);
8721 if (op & 4) {
8722 /* Unsigned. */
8723 if ((op & 1) && shift == 0)
8724 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8725 else
8726 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8727 } else {
8728 /* Signed. */
8729 if ((op & 1) && shift == 0)
8730 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8731 else
8732 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8734 tcg_temp_free_i32(tmp2);
8735 break;
8737 store_reg(s, rd, tmp);
8738 } else {
8739 imm = ((insn & 0x04000000) >> 15)
8740 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8741 if (insn & (1 << 22)) {
8742 /* 16-bit immediate. */
8743 imm |= (insn >> 4) & 0xf000;
8744 if (insn & (1 << 23)) {
8745 /* movt */
8746 tmp = load_reg(s, rd);
8747 tcg_gen_ext16u_i32(tmp, tmp);
8748 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8749 } else {
8750 /* movw */
8751 tmp = tcg_temp_new_i32();
8752 tcg_gen_movi_i32(tmp, imm);
8754 } else {
8755 /* Add/sub 12-bit immediate. */
8756 if (rn == 15) {
8757 offset = s->pc & ~(uint32_t)3;
8758 if (insn & (1 << 23))
8759 offset -= imm;
8760 else
8761 offset += imm;
8762 tmp = tcg_temp_new_i32();
8763 tcg_gen_movi_i32(tmp, offset);
8764 } else {
8765 tmp = load_reg(s, rn);
8766 if (insn & (1 << 23))
8767 tcg_gen_subi_i32(tmp, tmp, imm);
8768 else
8769 tcg_gen_addi_i32(tmp, tmp, imm);
8772 store_reg(s, rd, tmp);
8774 } else {
8775 int shifter_out = 0;
8776 /* modified 12-bit immediate. */
8777 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8778 imm = (insn & 0xff);
8779 switch (shift) {
8780 case 0: /* XY */
8781 /* Nothing to do. */
8782 break;
8783 case 1: /* 00XY00XY */
8784 imm |= imm << 16;
8785 break;
8786 case 2: /* XY00XY00 */
8787 imm |= imm << 16;
8788 imm <<= 8;
8789 break;
8790 case 3: /* XYXYXYXY */
8791 imm |= imm << 16;
8792 imm |= imm << 8;
8793 break;
8794 default: /* Rotated constant. */
8795 shift = (shift << 1) | (imm >> 7);
8796 imm |= 0x80;
8797 imm = imm << (32 - shift);
8798 shifter_out = 1;
8799 break;
8801 tmp2 = tcg_temp_new_i32();
8802 tcg_gen_movi_i32(tmp2, imm);
8803 rn = (insn >> 16) & 0xf;
8804 if (rn == 15) {
8805 tmp = tcg_temp_new_i32();
8806 tcg_gen_movi_i32(tmp, 0);
8807 } else {
8808 tmp = load_reg(s, rn);
8810 op = (insn >> 21) & 0xf;
8811 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8812 shifter_out, tmp, tmp2))
8813 goto illegal_op;
8814 tcg_temp_free_i32(tmp2);
8815 rd = (insn >> 8) & 0xf;
8816 if (rd != 15) {
8817 store_reg(s, rd, tmp);
8818 } else {
8819 tcg_temp_free_i32(tmp);
8823 break;
8824 case 12: /* Load/store single data item. */
8826 int postinc = 0;
8827 int writeback = 0;
8828 int user;
8829 if ((insn & 0x01100000) == 0x01000000) {
8830 if (disas_neon_ls_insn(env, s, insn))
8831 goto illegal_op;
8832 break;
8834 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8835 if (rs == 15) {
8836 if (!(insn & (1 << 20))) {
8837 goto illegal_op;
8839 if (op != 2) {
8840 /* Byte or halfword load space with dest == r15 : memory hints.
8841 * Catch them early so we don't emit pointless addressing code.
8842 * This space is a mix of:
8843 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8844 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8845 * cores)
8846 * unallocated hints, which must be treated as NOPs
8847 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8848 * which is easiest for the decoding logic
8849 * Some space which must UNDEF
8851 int op1 = (insn >> 23) & 3;
8852 int op2 = (insn >> 6) & 0x3f;
8853 if (op & 2) {
8854 goto illegal_op;
8856 if (rn == 15) {
8857 /* UNPREDICTABLE, unallocated hint or
8858 * PLD/PLDW/PLI (literal)
8860 return 0;
8862 if (op1 & 1) {
8863 return 0; /* PLD/PLDW/PLI or unallocated hint */
8865 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8866 return 0; /* PLD/PLDW/PLI or unallocated hint */
8868 /* UNDEF space, or an UNPREDICTABLE */
8869 return 1;
8872 user = IS_USER(s);
8873 if (rn == 15) {
8874 addr = tcg_temp_new_i32();
8875 /* PC relative. */
8876 /* s->pc has already been incremented by 4. */
8877 imm = s->pc & 0xfffffffc;
8878 if (insn & (1 << 23))
8879 imm += insn & 0xfff;
8880 else
8881 imm -= insn & 0xfff;
8882 tcg_gen_movi_i32(addr, imm);
8883 } else {
8884 addr = load_reg(s, rn);
8885 if (insn & (1 << 23)) {
8886 /* Positive offset. */
8887 imm = insn & 0xfff;
8888 tcg_gen_addi_i32(addr, addr, imm);
8889 } else {
8890 imm = insn & 0xff;
8891 switch ((insn >> 8) & 0xf) {
8892 case 0x0: /* Shifted Register. */
8893 shift = (insn >> 4) & 0xf;
8894 if (shift > 3) {
8895 tcg_temp_free_i32(addr);
8896 goto illegal_op;
8898 tmp = load_reg(s, rm);
8899 if (shift)
8900 tcg_gen_shli_i32(tmp, tmp, shift);
8901 tcg_gen_add_i32(addr, addr, tmp);
8902 tcg_temp_free_i32(tmp);
8903 break;
8904 case 0xc: /* Negative offset. */
8905 tcg_gen_addi_i32(addr, addr, -imm);
8906 break;
8907 case 0xe: /* User privilege. */
8908 tcg_gen_addi_i32(addr, addr, imm);
8909 user = 1;
8910 break;
8911 case 0x9: /* Post-decrement. */
8912 imm = -imm;
8913 /* Fall through. */
8914 case 0xb: /* Post-increment. */
8915 postinc = 1;
8916 writeback = 1;
8917 break;
8918 case 0xd: /* Pre-decrement. */
8919 imm = -imm;
8920 /* Fall through. */
8921 case 0xf: /* Pre-increment. */
8922 tcg_gen_addi_i32(addr, addr, imm);
8923 writeback = 1;
8924 break;
8925 default:
8926 tcg_temp_free_i32(addr);
8927 goto illegal_op;
8931 if (insn & (1 << 20)) {
8932 /* Load. */
8933 switch (op) {
8934 case 0: tmp = gen_ld8u(addr, user); break;
8935 case 4: tmp = gen_ld8s(addr, user); break;
8936 case 1: tmp = gen_ld16u(addr, user); break;
8937 case 5: tmp = gen_ld16s(addr, user); break;
8938 case 2: tmp = gen_ld32(addr, user); break;
8939 default:
8940 tcg_temp_free_i32(addr);
8941 goto illegal_op;
8943 if (rs == 15) {
8944 gen_bx(s, tmp);
8945 } else {
8946 store_reg(s, rs, tmp);
8948 } else {
8949 /* Store. */
8950 tmp = load_reg(s, rs);
8951 switch (op) {
8952 case 0: gen_st8(tmp, addr, user); break;
8953 case 1: gen_st16(tmp, addr, user); break;
8954 case 2: gen_st32(tmp, addr, user); break;
8955 default:
8956 tcg_temp_free_i32(addr);
8957 goto illegal_op;
8960 if (postinc)
8961 tcg_gen_addi_i32(addr, addr, imm);
8962 if (writeback) {
8963 store_reg(s, rn, addr);
8964 } else {
8965 tcg_temp_free_i32(addr);
8968 break;
8969 default:
8970 goto illegal_op;
8972 return 0;
8973 illegal_op:
8974 return 1;
8977 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
8979 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8980 int32_t offset;
8981 int i;
8982 TCGv tmp;
8983 TCGv tmp2;
8984 TCGv addr;
8986 if (s->condexec_mask) {
8987 cond = s->condexec_cond;
8988 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8989 s->condlabel = gen_new_label();
8990 gen_test_cc(cond ^ 1, s->condlabel);
8991 s->condjmp = 1;
8995 insn = arm_lduw_code(env, s->pc, s->bswap_code);
8996 s->pc += 2;
8998 switch (insn >> 12) {
8999 case 0: case 1:
9001 rd = insn & 7;
9002 op = (insn >> 11) & 3;
9003 if (op == 3) {
9004 /* add/subtract */
9005 rn = (insn >> 3) & 7;
9006 tmp = load_reg(s, rn);
9007 if (insn & (1 << 10)) {
9008 /* immediate */
9009 tmp2 = tcg_temp_new_i32();
9010 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
9011 } else {
9012 /* reg */
9013 rm = (insn >> 6) & 7;
9014 tmp2 = load_reg(s, rm);
9016 if (insn & (1 << 9)) {
9017 if (s->condexec_mask)
9018 tcg_gen_sub_i32(tmp, tmp, tmp2);
9019 else
9020 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
9021 } else {
9022 if (s->condexec_mask)
9023 tcg_gen_add_i32(tmp, tmp, tmp2);
9024 else
9025 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
9027 tcg_temp_free_i32(tmp2);
9028 store_reg(s, rd, tmp);
9029 } else {
9030 /* shift immediate */
9031 rm = (insn >> 3) & 7;
9032 shift = (insn >> 6) & 0x1f;
9033 tmp = load_reg(s, rm);
9034 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9035 if (!s->condexec_mask)
9036 gen_logic_CC(tmp);
9037 store_reg(s, rd, tmp);
9039 break;
9040 case 2: case 3:
9041 /* arithmetic large immediate */
9042 op = (insn >> 11) & 3;
9043 rd = (insn >> 8) & 0x7;
9044 if (op == 0) { /* mov */
9045 tmp = tcg_temp_new_i32();
9046 tcg_gen_movi_i32(tmp, insn & 0xff);
9047 if (!s->condexec_mask)
9048 gen_logic_CC(tmp);
9049 store_reg(s, rd, tmp);
9050 } else {
9051 tmp = load_reg(s, rd);
9052 tmp2 = tcg_temp_new_i32();
9053 tcg_gen_movi_i32(tmp2, insn & 0xff);
9054 switch (op) {
9055 case 1: /* cmp */
9056 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
9057 tcg_temp_free_i32(tmp);
9058 tcg_temp_free_i32(tmp2);
9059 break;
9060 case 2: /* add */
9061 if (s->condexec_mask)
9062 tcg_gen_add_i32(tmp, tmp, tmp2);
9063 else
9064 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
9065 tcg_temp_free_i32(tmp2);
9066 store_reg(s, rd, tmp);
9067 break;
9068 case 3: /* sub */
9069 if (s->condexec_mask)
9070 tcg_gen_sub_i32(tmp, tmp, tmp2);
9071 else
9072 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
9073 tcg_temp_free_i32(tmp2);
9074 store_reg(s, rd, tmp);
9075 break;
9078 break;
9079 case 4:
9080 if (insn & (1 << 11)) {
9081 rd = (insn >> 8) & 7;
9082 /* load pc-relative. Bit 1 of PC is ignored. */
9083 val = s->pc + 2 + ((insn & 0xff) * 4);
9084 val &= ~(uint32_t)2;
9085 addr = tcg_temp_new_i32();
9086 tcg_gen_movi_i32(addr, val);
9087 tmp = gen_ld32(addr, IS_USER(s));
9088 tcg_temp_free_i32(addr);
9089 store_reg(s, rd, tmp);
9090 break;
9092 if (insn & (1 << 10)) {
9093 /* data processing extended or blx */
9094 rd = (insn & 7) | ((insn >> 4) & 8);
9095 rm = (insn >> 3) & 0xf;
9096 op = (insn >> 8) & 3;
9097 switch (op) {
9098 case 0: /* add */
9099 tmp = load_reg(s, rd);
9100 tmp2 = load_reg(s, rm);
9101 tcg_gen_add_i32(tmp, tmp, tmp2);
9102 tcg_temp_free_i32(tmp2);
9103 store_reg(s, rd, tmp);
9104 break;
9105 case 1: /* cmp */
9106 tmp = load_reg(s, rd);
9107 tmp2 = load_reg(s, rm);
9108 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
9109 tcg_temp_free_i32(tmp2);
9110 tcg_temp_free_i32(tmp);
9111 break;
9112 case 2: /* mov/cpy */
9113 tmp = load_reg(s, rm);
9114 store_reg(s, rd, tmp);
9115 break;
9116 case 3:/* branch [and link] exchange thumb register */
9117 tmp = load_reg(s, rm);
9118 if (insn & (1 << 7)) {
9119 ARCH(5);
9120 val = (uint32_t)s->pc | 1;
9121 tmp2 = tcg_temp_new_i32();
9122 tcg_gen_movi_i32(tmp2, val);
9123 store_reg(s, 14, tmp2);
9125 /* already thumb, no need to check */
9126 gen_bx(s, tmp);
9127 break;
9129 break;
9132 /* data processing register */
9133 rd = insn & 7;
9134 rm = (insn >> 3) & 7;
9135 op = (insn >> 6) & 0xf;
9136 if (op == 2 || op == 3 || op == 4 || op == 7) {
9137 /* the shift/rotate ops want the operands backwards */
9138 val = rm;
9139 rm = rd;
9140 rd = val;
9141 val = 1;
9142 } else {
9143 val = 0;
9146 if (op == 9) { /* neg */
9147 tmp = tcg_temp_new_i32();
9148 tcg_gen_movi_i32(tmp, 0);
9149 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9150 tmp = load_reg(s, rd);
9151 } else {
9152 TCGV_UNUSED(tmp);
9155 tmp2 = load_reg(s, rm);
9156 switch (op) {
9157 case 0x0: /* and */
9158 tcg_gen_and_i32(tmp, tmp, tmp2);
9159 if (!s->condexec_mask)
9160 gen_logic_CC(tmp);
9161 break;
9162 case 0x1: /* eor */
9163 tcg_gen_xor_i32(tmp, tmp, tmp2);
9164 if (!s->condexec_mask)
9165 gen_logic_CC(tmp);
9166 break;
9167 case 0x2: /* lsl */
9168 if (s->condexec_mask) {
9169 gen_helper_shl(tmp2, cpu_env, tmp2, tmp);
9170 } else {
9171 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
9172 gen_logic_CC(tmp2);
9174 break;
9175 case 0x3: /* lsr */
9176 if (s->condexec_mask) {
9177 gen_helper_shr(tmp2, cpu_env, tmp2, tmp);
9178 } else {
9179 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
9180 gen_logic_CC(tmp2);
9182 break;
9183 case 0x4: /* asr */
9184 if (s->condexec_mask) {
9185 gen_helper_sar(tmp2, cpu_env, tmp2, tmp);
9186 } else {
9187 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
9188 gen_logic_CC(tmp2);
9190 break;
9191 case 0x5: /* adc */
9192 if (s->condexec_mask)
9193 gen_adc(tmp, tmp2);
9194 else
9195 gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
9196 break;
9197 case 0x6: /* sbc */
9198 if (s->condexec_mask)
9199 gen_sub_carry(tmp, tmp, tmp2);
9200 else
9201 gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
9202 break;
9203 case 0x7: /* ror */
9204 if (s->condexec_mask) {
9205 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9206 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9207 } else {
9208 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
9209 gen_logic_CC(tmp2);
9211 break;
9212 case 0x8: /* tst */
9213 tcg_gen_and_i32(tmp, tmp, tmp2);
9214 gen_logic_CC(tmp);
9215 rd = 16;
9216 break;
9217 case 0x9: /* neg */
9218 if (s->condexec_mask)
9219 tcg_gen_neg_i32(tmp, tmp2);
9220 else
9221 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
9222 break;
9223 case 0xa: /* cmp */
9224 gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
9225 rd = 16;
9226 break;
9227 case 0xb: /* cmn */
9228 gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
9229 rd = 16;
9230 break;
9231 case 0xc: /* orr */
9232 tcg_gen_or_i32(tmp, tmp, tmp2);
9233 if (!s->condexec_mask)
9234 gen_logic_CC(tmp);
9235 break;
9236 case 0xd: /* mul */
9237 tcg_gen_mul_i32(tmp, tmp, tmp2);
9238 if (!s->condexec_mask)
9239 gen_logic_CC(tmp);
9240 break;
9241 case 0xe: /* bic */
9242 tcg_gen_andc_i32(tmp, tmp, tmp2);
9243 if (!s->condexec_mask)
9244 gen_logic_CC(tmp);
9245 break;
9246 case 0xf: /* mvn */
9247 tcg_gen_not_i32(tmp2, tmp2);
9248 if (!s->condexec_mask)
9249 gen_logic_CC(tmp2);
9250 val = 1;
9251 rm = rd;
9252 break;
9254 if (rd != 16) {
9255 if (val) {
9256 store_reg(s, rm, tmp2);
9257 if (op != 0xf)
9258 tcg_temp_free_i32(tmp);
9259 } else {
9260 store_reg(s, rd, tmp);
9261 tcg_temp_free_i32(tmp2);
9263 } else {
9264 tcg_temp_free_i32(tmp);
9265 tcg_temp_free_i32(tmp2);
9267 break;
9269 case 5:
9270 /* load/store register offset. */
9271 rd = insn & 7;
9272 rn = (insn >> 3) & 7;
9273 rm = (insn >> 6) & 7;
9274 op = (insn >> 9) & 7;
9275 addr = load_reg(s, rn);
9276 tmp = load_reg(s, rm);
9277 tcg_gen_add_i32(addr, addr, tmp);
9278 tcg_temp_free_i32(tmp);
9280 if (op < 3) /* store */
9281 tmp = load_reg(s, rd);
9283 switch (op) {
9284 case 0: /* str */
9285 gen_st32(tmp, addr, IS_USER(s));
9286 break;
9287 case 1: /* strh */
9288 gen_st16(tmp, addr, IS_USER(s));
9289 break;
9290 case 2: /* strb */
9291 gen_st8(tmp, addr, IS_USER(s));
9292 break;
9293 case 3: /* ldrsb */
9294 tmp = gen_ld8s(addr, IS_USER(s));
9295 break;
9296 case 4: /* ldr */
9297 tmp = gen_ld32(addr, IS_USER(s));
9298 break;
9299 case 5: /* ldrh */
9300 tmp = gen_ld16u(addr, IS_USER(s));
9301 break;
9302 case 6: /* ldrb */
9303 tmp = gen_ld8u(addr, IS_USER(s));
9304 break;
9305 case 7: /* ldrsh */
9306 tmp = gen_ld16s(addr, IS_USER(s));
9307 break;
9309 if (op >= 3) /* load */
9310 store_reg(s, rd, tmp);
9311 tcg_temp_free_i32(addr);
9312 break;
9314 case 6:
9315 /* load/store word immediate offset */
9316 rd = insn & 7;
9317 rn = (insn >> 3) & 7;
9318 addr = load_reg(s, rn);
9319 val = (insn >> 4) & 0x7c;
9320 tcg_gen_addi_i32(addr, addr, val);
9322 if (insn & (1 << 11)) {
9323 /* load */
9324 tmp = gen_ld32(addr, IS_USER(s));
9325 store_reg(s, rd, tmp);
9326 } else {
9327 /* store */
9328 tmp = load_reg(s, rd);
9329 gen_st32(tmp, addr, IS_USER(s));
9331 tcg_temp_free_i32(addr);
9332 break;
9334 case 7:
9335 /* load/store byte immediate offset */
9336 rd = insn & 7;
9337 rn = (insn >> 3) & 7;
9338 addr = load_reg(s, rn);
9339 val = (insn >> 6) & 0x1f;
9340 tcg_gen_addi_i32(addr, addr, val);
9342 if (insn & (1 << 11)) {
9343 /* load */
9344 tmp = gen_ld8u(addr, IS_USER(s));
9345 store_reg(s, rd, tmp);
9346 } else {
9347 /* store */
9348 tmp = load_reg(s, rd);
9349 gen_st8(tmp, addr, IS_USER(s));
9351 tcg_temp_free_i32(addr);
9352 break;
9354 case 8:
9355 /* load/store halfword immediate offset */
9356 rd = insn & 7;
9357 rn = (insn >> 3) & 7;
9358 addr = load_reg(s, rn);
9359 val = (insn >> 5) & 0x3e;
9360 tcg_gen_addi_i32(addr, addr, val);
9362 if (insn & (1 << 11)) {
9363 /* load */
9364 tmp = gen_ld16u(addr, IS_USER(s));
9365 store_reg(s, rd, tmp);
9366 } else {
9367 /* store */
9368 tmp = load_reg(s, rd);
9369 gen_st16(tmp, addr, IS_USER(s));
9371 tcg_temp_free_i32(addr);
9372 break;
9374 case 9:
9375 /* load/store from stack */
9376 rd = (insn >> 8) & 7;
9377 addr = load_reg(s, 13);
9378 val = (insn & 0xff) * 4;
9379 tcg_gen_addi_i32(addr, addr, val);
9381 if (insn & (1 << 11)) {
9382 /* load */
9383 tmp = gen_ld32(addr, IS_USER(s));
9384 store_reg(s, rd, tmp);
9385 } else {
9386 /* store */
9387 tmp = load_reg(s, rd);
9388 gen_st32(tmp, addr, IS_USER(s));
9390 tcg_temp_free_i32(addr);
9391 break;
9393 case 10:
9394 /* add to high reg */
9395 rd = (insn >> 8) & 7;
9396 if (insn & (1 << 11)) {
9397 /* SP */
9398 tmp = load_reg(s, 13);
9399 } else {
9400 /* PC. bit 1 is ignored. */
9401 tmp = tcg_temp_new_i32();
9402 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9404 val = (insn & 0xff) * 4;
9405 tcg_gen_addi_i32(tmp, tmp, val);
9406 store_reg(s, rd, tmp);
9407 break;
9409 case 11:
9410 /* misc */
9411 op = (insn >> 8) & 0xf;
9412 switch (op) {
9413 case 0:
9414 /* adjust stack pointer */
9415 tmp = load_reg(s, 13);
9416 val = (insn & 0x7f) * 4;
9417 if (insn & (1 << 7))
9418 val = -(int32_t)val;
9419 tcg_gen_addi_i32(tmp, tmp, val);
9420 store_reg(s, 13, tmp);
9421 break;
9423 case 2: /* sign/zero extend. */
9424 ARCH(6);
9425 rd = insn & 7;
9426 rm = (insn >> 3) & 7;
9427 tmp = load_reg(s, rm);
9428 switch ((insn >> 6) & 3) {
9429 case 0: gen_sxth(tmp); break;
9430 case 1: gen_sxtb(tmp); break;
9431 case 2: gen_uxth(tmp); break;
9432 case 3: gen_uxtb(tmp); break;
9434 store_reg(s, rd, tmp);
9435 break;
9436 case 4: case 5: case 0xc: case 0xd:
9437 /* push/pop */
9438 addr = load_reg(s, 13);
9439 if (insn & (1 << 8))
9440 offset = 4;
9441 else
9442 offset = 0;
9443 for (i = 0; i < 8; i++) {
9444 if (insn & (1 << i))
9445 offset += 4;
9447 if ((insn & (1 << 11)) == 0) {
9448 tcg_gen_addi_i32(addr, addr, -offset);
9450 for (i = 0; i < 8; i++) {
9451 if (insn & (1 << i)) {
9452 if (insn & (1 << 11)) {
9453 /* pop */
9454 tmp = gen_ld32(addr, IS_USER(s));
9455 store_reg(s, i, tmp);
9456 } else {
9457 /* push */
9458 tmp = load_reg(s, i);
9459 gen_st32(tmp, addr, IS_USER(s));
9461 /* advance to the next address. */
9462 tcg_gen_addi_i32(addr, addr, 4);
9465 TCGV_UNUSED(tmp);
9466 if (insn & (1 << 8)) {
9467 if (insn & (1 << 11)) {
9468 /* pop pc */
9469 tmp = gen_ld32(addr, IS_USER(s));
9470 /* don't set the pc until the rest of the instruction
9471 has completed */
9472 } else {
9473 /* push lr */
9474 tmp = load_reg(s, 14);
9475 gen_st32(tmp, addr, IS_USER(s));
9477 tcg_gen_addi_i32(addr, addr, 4);
9479 if ((insn & (1 << 11)) == 0) {
9480 tcg_gen_addi_i32(addr, addr, -offset);
9482 /* write back the new stack pointer */
9483 store_reg(s, 13, addr);
9484 /* set the new PC value */
9485 if ((insn & 0x0900) == 0x0900) {
9486 store_reg_from_load(env, s, 15, tmp);
9488 break;
9490 case 1: case 3: case 9: case 11: /* czb */
9491 rm = insn & 7;
9492 tmp = load_reg(s, rm);
9493 s->condlabel = gen_new_label();
9494 s->condjmp = 1;
9495 if (insn & (1 << 11))
9496 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9497 else
9498 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9499 tcg_temp_free_i32(tmp);
9500 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9501 val = (uint32_t)s->pc + 2;
9502 val += offset;
9503 gen_jmp(s, val);
9504 break;
9506 case 15: /* IT, nop-hint. */
9507 if ((insn & 0xf) == 0) {
9508 gen_nop_hint(s, (insn >> 4) & 0xf);
9509 break;
9511 /* If Then. */
9512 s->condexec_cond = (insn >> 4) & 0xe;
9513 s->condexec_mask = insn & 0x1f;
9514 /* No actual code generated for this insn, just setup state. */
9515 break;
9517 case 0xe: /* bkpt */
9518 ARCH(5);
9519 gen_exception_insn(s, 2, EXCP_BKPT);
9520 break;
9522 case 0xa: /* rev */
9523 ARCH(6);
9524 rn = (insn >> 3) & 0x7;
9525 rd = insn & 0x7;
9526 tmp = load_reg(s, rn);
9527 switch ((insn >> 6) & 3) {
9528 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9529 case 1: gen_rev16(tmp); break;
9530 case 3: gen_revsh(tmp); break;
9531 default: goto illegal_op;
9533 store_reg(s, rd, tmp);
9534 break;
9536 case 6:
9537 switch ((insn >> 5) & 7) {
9538 case 2:
9539 /* setend */
9540 ARCH(6);
9541 if (((insn >> 3) & 1) != s->bswap_code) {
9542 /* Dynamic endianness switching not implemented. */
9543 goto illegal_op;
9545 break;
9546 case 3:
9547 /* cps */
9548 ARCH(6);
9549 if (IS_USER(s)) {
9550 break;
9552 if (IS_M(env)) {
9553 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9554 /* FAULTMASK */
9555 if (insn & 1) {
9556 addr = tcg_const_i32(19);
9557 gen_helper_v7m_msr(cpu_env, addr, tmp);
9558 tcg_temp_free_i32(addr);
9560 /* PRIMASK */
9561 if (insn & 2) {
9562 addr = tcg_const_i32(16);
9563 gen_helper_v7m_msr(cpu_env, addr, tmp);
9564 tcg_temp_free_i32(addr);
9566 tcg_temp_free_i32(tmp);
9567 gen_lookup_tb(s);
9568 } else {
9569 if (insn & (1 << 4)) {
9570 shift = CPSR_A | CPSR_I | CPSR_F;
9571 } else {
9572 shift = 0;
9574 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9576 break;
9577 default:
9578 goto undef;
9580 break;
9582 default:
9583 goto undef;
9585 break;
9587 case 12:
9589 /* load/store multiple */
9590 TCGv loaded_var;
9591 TCGV_UNUSED(loaded_var);
9592 rn = (insn >> 8) & 0x7;
9593 addr = load_reg(s, rn);
9594 for (i = 0; i < 8; i++) {
9595 if (insn & (1 << i)) {
9596 if (insn & (1 << 11)) {
9597 /* load */
9598 tmp = gen_ld32(addr, IS_USER(s));
9599 if (i == rn) {
9600 loaded_var = tmp;
9601 } else {
9602 store_reg(s, i, tmp);
9604 } else {
9605 /* store */
9606 tmp = load_reg(s, i);
9607 gen_st32(tmp, addr, IS_USER(s));
9609 /* advance to the next address */
9610 tcg_gen_addi_i32(addr, addr, 4);
9613 if ((insn & (1 << rn)) == 0) {
9614 /* base reg not in list: base register writeback */
9615 store_reg(s, rn, addr);
9616 } else {
9617 /* base reg in list: if load, complete it now */
9618 if (insn & (1 << 11)) {
9619 store_reg(s, rn, loaded_var);
9621 tcg_temp_free_i32(addr);
9623 break;
9625 case 13:
9626 /* conditional branch or swi */
9627 cond = (insn >> 8) & 0xf;
9628 if (cond == 0xe)
9629 goto undef;
9631 if (cond == 0xf) {
9632 /* swi */
9633 gen_set_pc_im(s->pc);
9634 s->is_jmp = DISAS_SWI;
9635 break;
9637 /* generate a conditional jump to next instruction */
9638 s->condlabel = gen_new_label();
9639 gen_test_cc(cond ^ 1, s->condlabel);
9640 s->condjmp = 1;
9642 /* jump to the offset */
9643 val = (uint32_t)s->pc + 2;
9644 offset = ((int32_t)insn << 24) >> 24;
9645 val += offset << 1;
9646 gen_jmp(s, val);
9647 break;
9649 case 14:
9650 if (insn & (1 << 11)) {
9651 if (disas_thumb2_insn(env, s, insn))
9652 goto undef32;
9653 break;
9655 /* unconditional branch */
9656 val = (uint32_t)s->pc;
9657 offset = ((int32_t)insn << 21) >> 21;
9658 val += (offset << 1) + 2;
9659 gen_jmp(s, val);
9660 break;
9662 case 15:
9663 if (disas_thumb2_insn(env, s, insn))
9664 goto undef32;
9665 break;
9667 return;
9668 undef32:
9669 gen_exception_insn(s, 4, EXCP_UDEF);
9670 return;
9671 illegal_op:
9672 undef:
9673 gen_exception_insn(s, 2, EXCP_UDEF);
9676 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9677 basic block 'tb'. If search_pc is TRUE, also generate PC
9678 information for each intermediate instruction. */
9679 static inline void gen_intermediate_code_internal(CPUARMState *env,
9680 TranslationBlock *tb,
9681 int search_pc)
9683 DisasContext dc1, *dc = &dc1;
9684 CPUBreakpoint *bp;
9685 uint16_t *gen_opc_end;
9686 int j, lj;
9687 target_ulong pc_start;
9688 uint32_t next_page_start;
9689 int num_insns;
9690 int max_insns;
9692 /* generate intermediate code */
9693 pc_start = tb->pc;
9695 dc->tb = tb;
9697 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9699 dc->is_jmp = DISAS_NEXT;
9700 dc->pc = pc_start;
9701 dc->singlestep_enabled = env->singlestep_enabled;
9702 dc->condjmp = 0;
9703 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9704 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
9705 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9706 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9707 #if !defined(CONFIG_USER_ONLY)
9708 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9709 #endif
9710 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9711 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9712 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9713 cpu_F0s = tcg_temp_new_i32();
9714 cpu_F1s = tcg_temp_new_i32();
9715 cpu_F0d = tcg_temp_new_i64();
9716 cpu_F1d = tcg_temp_new_i64();
9717 cpu_V0 = cpu_F0d;
9718 cpu_V1 = cpu_F1d;
9719 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9720 cpu_M0 = tcg_temp_new_i64();
9721 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9722 lj = -1;
9723 num_insns = 0;
9724 max_insns = tb->cflags & CF_COUNT_MASK;
9725 if (max_insns == 0)
9726 max_insns = CF_COUNT_MASK;
9728 gen_icount_start();
9730 tcg_clear_temp_count();
9732 /* A note on handling of the condexec (IT) bits:
9734 * We want to avoid the overhead of having to write the updated condexec
9735 * bits back to the CPUARMState for every instruction in an IT block. So:
9736 * (1) if the condexec bits are not already zero then we write
9737 * zero back into the CPUARMState now. This avoids complications trying
9738 * to do it at the end of the block. (For example if we don't do this
9739 * it's hard to identify whether we can safely skip writing condexec
9740 * at the end of the TB, which we definitely want to do for the case
9741 * where a TB doesn't do anything with the IT state at all.)
9742 * (2) if we are going to leave the TB then we call gen_set_condexec()
9743 * which will write the correct value into CPUARMState if zero is wrong.
9744 * This is done both for leaving the TB at the end, and for leaving
9745 * it because of an exception we know will happen, which is done in
9746 * gen_exception_insn(). The latter is necessary because we need to
9747 * leave the TB with the PC/IT state just prior to execution of the
9748 * instruction which caused the exception.
9749 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9750 * then the CPUARMState will be wrong and we need to reset it.
9751 * This is handled in the same way as restoration of the
9752 * PC in these situations: we will be called again with search_pc=1
9753 * and generate a mapping of the condexec bits for each PC in
9754 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9755 * this to restore the condexec bits.
9757 * Note that there are no instructions which can read the condexec
9758 * bits, and none which can write non-static values to them, so
9759 * we don't need to care about whether CPUARMState is correct in the
9760 * middle of a TB.
9763 /* Reset the conditional execution bits immediately. This avoids
9764 complications trying to do it at the end of the block. */
9765 if (dc->condexec_mask || dc->condexec_cond)
9767 TCGv tmp = tcg_temp_new_i32();
9768 tcg_gen_movi_i32(tmp, 0);
9769 store_cpu_field(tmp, condexec_bits);
9771 do {
9772 #ifdef CONFIG_USER_ONLY
9773 /* Intercept jump to the magic kernel page. */
9774 if (dc->pc >= 0xffff0000) {
9775 /* We always get here via a jump, so know we are not in a
9776 conditional execution block. */
9777 gen_exception(EXCP_KERNEL_TRAP);
9778 dc->is_jmp = DISAS_UPDATE;
9779 break;
9781 #else
9782 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9783 /* We always get here via a jump, so know we are not in a
9784 conditional execution block. */
9785 gen_exception(EXCP_EXCEPTION_EXIT);
9786 dc->is_jmp = DISAS_UPDATE;
9787 break;
9789 #endif
9791 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9792 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9793 if (bp->pc == dc->pc) {
9794 gen_exception_insn(dc, 0, EXCP_DEBUG);
9795 /* Advance PC so that clearing the breakpoint will
9796 invalidate this TB. */
9797 dc->pc += 2;
9798 goto done_generating;
9799 break;
9803 if (search_pc) {
9804 j = gen_opc_ptr - gen_opc_buf;
9805 if (lj < j) {
9806 lj++;
9807 while (lj < j)
9808 gen_opc_instr_start[lj++] = 0;
9810 gen_opc_pc[lj] = dc->pc;
9811 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9812 gen_opc_instr_start[lj] = 1;
9813 gen_opc_icount[lj] = num_insns;
9816 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9817 gen_io_start();
9819 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
9820 tcg_gen_debug_insn_start(dc->pc);
9823 if (dc->thumb) {
9824 disas_thumb_insn(env, dc);
9825 if (dc->condexec_mask) {
9826 dc->condexec_cond = (dc->condexec_cond & 0xe)
9827 | ((dc->condexec_mask >> 4) & 1);
9828 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9829 if (dc->condexec_mask == 0) {
9830 dc->condexec_cond = 0;
9833 } else {
9834 disas_arm_insn(env, dc);
9837 if (dc->condjmp && !dc->is_jmp) {
9838 gen_set_label(dc->condlabel);
9839 dc->condjmp = 0;
9842 if (tcg_check_temp_count()) {
9843 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9846 /* Translation stops when a conditional branch is encountered.
9847 * Otherwise the subsequent code could get translated several times.
9848 * Also stop translation when a page boundary is reached. This
9849 * ensures prefetch aborts occur at the right place. */
9850 num_insns ++;
9851 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9852 !env->singlestep_enabled &&
9853 !singlestep &&
9854 dc->pc < next_page_start &&
9855 num_insns < max_insns);
9857 if (tb->cflags & CF_LAST_IO) {
9858 if (dc->condjmp) {
9859 /* FIXME: This can theoretically happen with self-modifying
9860 code. */
9861 cpu_abort(env, "IO on conditional branch instruction");
9863 gen_io_end();
9866 /* At this stage dc->condjmp will only be set when the skipped
9867 instruction was a conditional branch or trap, and the PC has
9868 already been written. */
9869 if (unlikely(env->singlestep_enabled)) {
9870 /* Make sure the pc is updated, and raise a debug exception. */
9871 if (dc->condjmp) {
9872 gen_set_condexec(dc);
9873 if (dc->is_jmp == DISAS_SWI) {
9874 gen_exception(EXCP_SWI);
9875 } else {
9876 gen_exception(EXCP_DEBUG);
9878 gen_set_label(dc->condlabel);
9880 if (dc->condjmp || !dc->is_jmp) {
9881 gen_set_pc_im(dc->pc);
9882 dc->condjmp = 0;
9884 gen_set_condexec(dc);
9885 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9886 gen_exception(EXCP_SWI);
9887 } else {
9888 /* FIXME: Single stepping a WFI insn will not halt
9889 the CPU. */
9890 gen_exception(EXCP_DEBUG);
9892 } else {
9893 /* While branches must always occur at the end of an IT block,
9894 there are a few other things that can cause us to terminate
9895 the TB in the middle of an IT block:
9896 - Exception generating instructions (bkpt, swi, undefined).
9897 - Page boundaries.
9898 - Hardware watchpoints.
9899 Hardware breakpoints have already been handled and skip this code.
9901 gen_set_condexec(dc);
9902 switch(dc->is_jmp) {
9903 case DISAS_NEXT:
9904 gen_goto_tb(dc, 1, dc->pc);
9905 break;
9906 default:
9907 case DISAS_JUMP:
9908 case DISAS_UPDATE:
9909 /* indicate that the hash table must be used to find the next TB */
9910 tcg_gen_exit_tb(0);
9911 break;
9912 case DISAS_TB_JUMP:
9913 /* nothing more to generate */
9914 break;
9915 case DISAS_WFI:
9916 gen_helper_wfi(cpu_env);
9917 break;
9918 case DISAS_SWI:
9919 gen_exception(EXCP_SWI);
9920 break;
9922 if (dc->condjmp) {
9923 gen_set_label(dc->condlabel);
9924 gen_set_condexec(dc);
9925 gen_goto_tb(dc, 1, dc->pc);
9926 dc->condjmp = 0;
9930 done_generating:
9931 gen_icount_end(tb, num_insns);
9932 *gen_opc_ptr = INDEX_op_end;
9934 #ifdef DEBUG_DISAS
9935 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9936 qemu_log("----------------\n");
9937 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9938 log_target_disas(pc_start, dc->pc - pc_start,
9939 dc->thumb | (dc->bswap_code << 1));
9940 qemu_log("\n");
9942 #endif
9943 if (search_pc) {
9944 j = gen_opc_ptr - gen_opc_buf;
9945 lj++;
9946 while (lj <= j)
9947 gen_opc_instr_start[lj++] = 0;
9948 } else {
9949 tb->size = dc->pc - pc_start;
9950 tb->icount = num_insns;
9954 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
9956 gen_intermediate_code_internal(env, tb, 0);
9959 void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
9961 gen_intermediate_code_internal(env, tb, 1);
9964 static const char *cpu_mode_names[16] = {
9965 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9966 "???", "???", "???", "und", "???", "???", "???", "sys"
9969 void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
9970 int flags)
9972 int i;
9973 #if 0
9974 union {
9975 uint32_t i;
9976 float s;
9977 } s0, s1;
9978 CPU_DoubleU d;
9979 /* ??? This assumes float64 and double have the same layout.
9980 Oh well, it's only debug dumps. */
9981 union {
9982 float64 f64;
9983 double d;
9984 } d0;
9985 #endif
9986 uint32_t psr;
9988 for(i=0;i<16;i++) {
9989 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9990 if ((i % 4) == 3)
9991 cpu_fprintf(f, "\n");
9992 else
9993 cpu_fprintf(f, " ");
9995 psr = cpsr_read(env);
9996 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9997 psr,
9998 psr & (1 << 31) ? 'N' : '-',
9999 psr & (1 << 30) ? 'Z' : '-',
10000 psr & (1 << 29) ? 'C' : '-',
10001 psr & (1 << 28) ? 'V' : '-',
10002 psr & CPSR_T ? 'T' : 'A',
10003 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
10005 #if 0
10006 for (i = 0; i < 16; i++) {
10007 d.d = env->vfp.regs[i];
10008 s0.i = d.l.lower;
10009 s1.i = d.l.upper;
10010 d0.f64 = d.d;
10011 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
10012 i * 2, (int)s0.i, s0.s,
10013 i * 2 + 1, (int)s1.i, s1.s,
10014 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
10015 d0.d);
10017 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
10018 #endif
10021 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
10023 env->regs[15] = gen_opc_pc[pc_pos];
10024 env->condexec_bits = gen_opc_condexec_bits[pc_pos];