target-arm: Handle UNDEF cases for Neon 3-regs-same insns
[qemu.git] / target-arm / translate.c
blob5ffbace5ae6f88256683ae45fa6b0c649a966a84
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
37 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39 /* currently all emulated v5 cores are also v5TE, so don't bother */
40 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
41 #define ENABLE_ARCH_5J 0
42 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
47 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
49 /* internal defines */
50 typedef struct DisasContext {
51 target_ulong pc;
52 int is_jmp;
53 /* Nonzero if this instruction has been conditionally skipped. */
54 int condjmp;
55 /* The label that will be jumped to when the instruction is skipped. */
56 int condlabel;
57 /* Thumb-2 condtional execution bits. */
58 int condexec_mask;
59 int condexec_cond;
60 struct TranslationBlock *tb;
61 int singlestep_enabled;
62 int thumb;
63 #if !defined(CONFIG_USER_ONLY)
64 int user;
65 #endif
66 int vfp_enabled;
67 int vec_len;
68 int vec_stride;
69 } DisasContext;
71 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
73 #if defined(CONFIG_USER_ONLY)
74 #define IS_USER(s) 1
75 #else
76 #define IS_USER(s) (s->user)
77 #endif
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
81 #define DISAS_WFI 4
82 #define DISAS_SWI 5
84 static TCGv_ptr cpu_env;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
87 static TCGv_i32 cpu_R[16];
88 static TCGv_i32 cpu_exclusive_addr;
89 static TCGv_i32 cpu_exclusive_val;
90 static TCGv_i32 cpu_exclusive_high;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test;
93 static TCGv_i32 cpu_exclusive_info;
94 #endif
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s, cpu_F1s;
98 static TCGv_i64 cpu_F0d, cpu_F1d;
100 #include "gen-icount.h"
102 static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
109 int i;
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, regs[i]),
116 regnames[i]);
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, exclusive_high), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUState, exclusive_info), "exclusive_info");
129 #endif
131 #define GEN_HELPER 2
132 #include "helpers.h"
135 static inline TCGv load_cpu_offset(int offset)
137 TCGv tmp = tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
144 static inline void store_cpu_offset(TCGv var, int offset)
146 tcg_gen_st_i32(var, cpu_env, offset);
147 tcg_temp_free_i32(var);
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUState, name))
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext *s, TCGv var, int reg)
156 if (reg == 15) {
157 uint32_t addr;
158 /* normaly, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
165 tcg_gen_mov_i32(var, cpu_R[reg]);
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv load_reg(DisasContext *s, int reg)
172 TCGv tmp = tcg_temp_new_i32();
173 load_reg_var(s, tmp, reg);
174 return tmp;
177 /* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179 static void store_reg(DisasContext *s, int reg, TCGv var)
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
185 tcg_gen_mov_i32(cpu_R[reg], var);
186 tcg_temp_free_i32(var);
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
199 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
208 static void gen_exception(int excp)
210 TCGv tmp = tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
213 tcg_temp_free_i32(tmp);
216 static void gen_smul_dual(TCGv a, TCGv b)
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
223 tcg_temp_free_i32(tmp2);
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
228 tcg_temp_free_i32(tmp1);
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var)
234 TCGv tmp = tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
240 tcg_temp_free_i32(tmp);
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var)
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
254 if (shift)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var, int shift, int width)
262 uint32_t signbit;
264 if (shift)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
277 tcg_gen_andi_i32(val, val, mask);
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
280 tcg_gen_or_i32(dest, base, val);
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
288 tcg_gen_extu_i32_i64(tmp64, b);
289 tcg_temp_free_i32(b);
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
293 tcg_temp_free_i64(tmp64);
294 return a;
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
302 tcg_gen_extu_i32_i64(tmp64, b);
303 tcg_temp_free_i32(b);
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
307 tcg_temp_free_i64(tmp64);
308 return a;
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1, a);
320 tcg_temp_free_i32(a);
321 tcg_gen_extu_i32_i64(tmp2, b);
322 tcg_temp_free_i32(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 tcg_temp_free_i64(tmp2);
325 return tmp1;
328 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
333 tcg_gen_ext_i32_i64(tmp1, a);
334 tcg_temp_free_i32(a);
335 tcg_gen_ext_i32_i64(tmp2, b);
336 tcg_temp_free_i32(b);
337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
338 tcg_temp_free_i64(tmp2);
339 return tmp1;
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var)
345 TCGv tmp = tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
349 tcg_temp_free_i32(tmp);
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
354 t0 &= ~0x8000;
355 t1 &= ~0x8000;
356 t0 = (t0 + t1) ^ tmp;
359 static void gen_add16(TCGv t0, TCGv t1)
361 TCGv tmp = tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var)
377 TCGv tmp = tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp, var, 31);
379 gen_set_CF(tmp);
380 tcg_temp_free_i32(tmp);
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var)
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
390 /* T0 += T1 + CF. */
391 static void gen_adc(TCGv t0, TCGv t1)
393 TCGv tmp;
394 tcg_gen_add_i32(t0, t0, t1);
395 tmp = load_cpu_field(CF);
396 tcg_gen_add_i32(t0, t0, tmp);
397 tcg_temp_free_i32(tmp);
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
403 TCGv tmp;
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
407 tcg_temp_free_i32(tmp);
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
413 TCGv tmp;
414 tcg_gen_sub_i32(dest, t0, t1);
415 tmp = load_cpu_field(CF);
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
418 tcg_temp_free_i32(tmp);
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
424 static void shifter_out_im(TCGv var, int shift)
426 TCGv tmp = tcg_temp_new_i32();
427 if (shift == 0) {
428 tcg_gen_andi_i32(tmp, var, 1);
429 } else {
430 tcg_gen_shri_i32(tmp, var, shift);
431 if (shift != 31)
432 tcg_gen_andi_i32(tmp, tmp, 1);
434 gen_set_CF(tmp);
435 tcg_temp_free_i32(tmp);
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
441 switch (shiftop) {
442 case 0: /* LSL */
443 if (shift != 0) {
444 if (flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
448 break;
449 case 1: /* LSR */
450 if (shift == 0) {
451 if (flags) {
452 tcg_gen_shri_i32(var, var, 31);
453 gen_set_CF(var);
455 tcg_gen_movi_i32(var, 0);
456 } else {
457 if (flags)
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
461 break;
462 case 2: /* ASR */
463 if (shift == 0)
464 shift = 32;
465 if (flags)
466 shifter_out_im(var, shift - 1);
467 if (shift == 32)
468 shift = 31;
469 tcg_gen_sari_i32(var, var, shift);
470 break;
471 case 3: /* ROR/RRX */
472 if (shift != 0) {
473 if (flags)
474 shifter_out_im(var, shift - 1);
475 tcg_gen_rotri_i32(var, var, shift); break;
476 } else {
477 TCGv tmp = load_cpu_field(CF);
478 if (flags)
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
483 tcg_temp_free_i32(tmp);
488 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
491 if (flags) {
492 switch (shiftop) {
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
498 } else {
499 switch (shiftop) {
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
507 tcg_temp_free_i32(shift);
510 #define PAS_OP(pfx) \
511 switch (op2) { \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
519 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
521 TCGv_ptr tmp;
523 switch (op1) {
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 case 1:
526 tmp = tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
528 PAS_OP(s)
529 tcg_temp_free_ptr(tmp);
530 break;
531 case 5:
532 tmp = tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(u)
535 tcg_temp_free_ptr(tmp);
536 break;
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
539 case 2:
540 PAS_OP(q);
541 break;
542 case 3:
543 PAS_OP(sh);
544 break;
545 case 6:
546 PAS_OP(uq);
547 break;
548 case 7:
549 PAS_OP(uh);
550 break;
551 #undef gen_pas_helper
554 #undef PAS_OP
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
558 switch (op1) { \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
566 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
568 TCGv_ptr tmp;
570 switch (op2) {
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 0:
573 tmp = tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
575 PAS_OP(s)
576 tcg_temp_free_ptr(tmp);
577 break;
578 case 4:
579 tmp = tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(u)
582 tcg_temp_free_ptr(tmp);
583 break;
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 1:
587 PAS_OP(q);
588 break;
589 case 2:
590 PAS_OP(sh);
591 break;
592 case 5:
593 PAS_OP(uq);
594 break;
595 case 6:
596 PAS_OP(uh);
597 break;
598 #undef gen_pas_helper
601 #undef PAS_OP
603 static void gen_test_cc(int cc, int label)
605 TCGv tmp;
606 TCGv tmp2;
607 int inv;
609 switch (cc) {
610 case 0: /* eq: Z */
611 tmp = load_cpu_field(ZF);
612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
613 break;
614 case 1: /* ne: !Z */
615 tmp = load_cpu_field(ZF);
616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
617 break;
618 case 2: /* cs: C */
619 tmp = load_cpu_field(CF);
620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
621 break;
622 case 3: /* cc: !C */
623 tmp = load_cpu_field(CF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 4: /* mi: N */
627 tmp = load_cpu_field(NF);
628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
629 break;
630 case 5: /* pl: !N */
631 tmp = load_cpu_field(NF);
632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
633 break;
634 case 6: /* vs: V */
635 tmp = load_cpu_field(VF);
636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
637 break;
638 case 7: /* vc: !V */
639 tmp = load_cpu_field(VF);
640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
641 break;
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
646 tcg_temp_free_i32(tmp);
647 tmp = load_cpu_field(ZF);
648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
649 gen_set_label(inv);
650 break;
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
654 tcg_temp_free_i32(tmp);
655 tmp = load_cpu_field(ZF);
656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
657 break;
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
660 tmp2 = load_cpu_field(NF);
661 tcg_gen_xor_i32(tmp, tmp, tmp2);
662 tcg_temp_free_i32(tmp2);
663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
664 break;
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
667 tmp2 = load_cpu_field(NF);
668 tcg_gen_xor_i32(tmp, tmp, tmp2);
669 tcg_temp_free_i32(tmp2);
670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
671 break;
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
674 tmp = load_cpu_field(ZF);
675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
676 tcg_temp_free_i32(tmp);
677 tmp = load_cpu_field(VF);
678 tmp2 = load_cpu_field(NF);
679 tcg_gen_xor_i32(tmp, tmp, tmp2);
680 tcg_temp_free_i32(tmp2);
681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
682 gen_set_label(inv);
683 break;
684 case 13: /* le: Z || N != V */
685 tmp = load_cpu_field(ZF);
686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
687 tcg_temp_free_i32(tmp);
688 tmp = load_cpu_field(VF);
689 tmp2 = load_cpu_field(NF);
690 tcg_gen_xor_i32(tmp, tmp, tmp2);
691 tcg_temp_free_i32(tmp2);
692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
693 break;
694 default:
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
696 abort();
698 tcg_temp_free_i32(tmp);
701 static const uint8_t table_logic_cc[16] = {
702 1, /* and */
703 1, /* xor */
704 0, /* sub */
705 0, /* rsb */
706 0, /* add */
707 0, /* adc */
708 0, /* sbc */
709 0, /* rsc */
710 1, /* andl */
711 1, /* xorl */
712 0, /* cmp */
713 0, /* cmn */
714 1, /* orr */
715 1, /* mov */
716 1, /* bic */
717 1, /* mvn */
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
723 TCGv tmp;
725 s->is_jmp = DISAS_UPDATE;
726 if (s->thumb != (addr & 1)) {
727 tmp = tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
730 tcg_temp_free_i32(tmp);
732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext *s, TCGv var)
738 s->is_jmp = DISAS_UPDATE;
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUState *env, DisasContext *s,
748 int reg, TCGv var)
750 if (reg == 15 && ENABLE_ARCH_7) {
751 gen_bx(s, var);
752 } else {
753 store_reg(s, reg, var);
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUState *env, DisasContext *s,
762 int reg, TCGv var)
764 if (reg == 15 && ENABLE_ARCH_5) {
765 gen_bx(s, var);
766 } else {
767 store_reg(s, reg, var);
771 static inline TCGv gen_ld8s(TCGv addr, int index)
773 TCGv tmp = tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp, addr, index);
775 return tmp;
777 static inline TCGv gen_ld8u(TCGv addr, int index)
779 TCGv tmp = tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp, addr, index);
781 return tmp;
783 static inline TCGv gen_ld16s(TCGv addr, int index)
785 TCGv tmp = tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp, addr, index);
787 return tmp;
789 static inline TCGv gen_ld16u(TCGv addr, int index)
791 TCGv tmp = tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp, addr, index);
793 return tmp;
795 static inline TCGv gen_ld32(TCGv addr, int index)
797 TCGv tmp = tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp, addr, index);
799 return tmp;
801 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
805 return tmp;
807 static inline void gen_st8(TCGv val, TCGv addr, int index)
809 tcg_gen_qemu_st8(val, addr, index);
810 tcg_temp_free_i32(val);
812 static inline void gen_st16(TCGv val, TCGv addr, int index)
814 tcg_gen_qemu_st16(val, addr, index);
815 tcg_temp_free_i32(val);
817 static inline void gen_st32(TCGv val, TCGv addr, int index)
819 tcg_gen_qemu_st32(val, addr, index);
820 tcg_temp_free_i32(val);
822 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
828 static inline void gen_set_pc_im(uint32_t val)
830 tcg_gen_movi_i32(cpu_R[15], val);
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext *s)
836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
837 s->is_jmp = DISAS_UPDATE;
840 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
841 TCGv var)
843 int val, rm, shift, shiftop;
844 TCGv offset;
846 if (!(insn & (1 << 25))) {
847 /* immediate */
848 val = insn & 0xfff;
849 if (!(insn & (1 << 23)))
850 val = -val;
851 if (val != 0)
852 tcg_gen_addi_i32(var, var, val);
853 } else {
854 /* shift/register */
855 rm = (insn) & 0xf;
856 shift = (insn >> 7) & 0x1f;
857 shiftop = (insn >> 5) & 3;
858 offset = load_reg(s, rm);
859 gen_arm_shift_im(offset, shiftop, shift, 0);
860 if (!(insn & (1 << 23)))
861 tcg_gen_sub_i32(var, var, offset);
862 else
863 tcg_gen_add_i32(var, var, offset);
864 tcg_temp_free_i32(offset);
868 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
869 int extra, TCGv var)
871 int val, rm;
872 TCGv offset;
874 if (insn & (1 << 22)) {
875 /* immediate */
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
878 val = -val;
879 val += extra;
880 if (val != 0)
881 tcg_gen_addi_i32(var, var, val);
882 } else {
883 /* register */
884 if (extra)
885 tcg_gen_addi_i32(var, var, extra);
886 rm = (insn) & 0xf;
887 offset = load_reg(s, rm);
888 if (!(insn & (1 << 23)))
889 tcg_gen_sub_i32(var, var, offset);
890 else
891 tcg_gen_add_i32(var, var, offset);
892 tcg_temp_free_i32(offset);
896 #define VFP_OP2(name) \
897 static inline void gen_vfp_##name(int dp) \
899 if (dp) \
900 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
901 else \
902 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
905 VFP_OP2(add)
906 VFP_OP2(sub)
907 VFP_OP2(mul)
908 VFP_OP2(div)
910 #undef VFP_OP2
912 static inline void gen_vfp_abs(int dp)
914 if (dp)
915 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
916 else
917 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
920 static inline void gen_vfp_neg(int dp)
922 if (dp)
923 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
924 else
925 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
928 static inline void gen_vfp_sqrt(int dp)
930 if (dp)
931 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
932 else
933 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
936 static inline void gen_vfp_cmp(int dp)
938 if (dp)
939 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
940 else
941 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
944 static inline void gen_vfp_cmpe(int dp)
946 if (dp)
947 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
948 else
949 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
952 static inline void gen_vfp_F1_ld0(int dp)
954 if (dp)
955 tcg_gen_movi_i64(cpu_F1d, 0);
956 else
957 tcg_gen_movi_i32(cpu_F1s, 0);
960 static inline void gen_vfp_uito(int dp)
962 if (dp)
963 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
964 else
965 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
968 static inline void gen_vfp_sito(int dp)
970 if (dp)
971 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
972 else
973 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
976 static inline void gen_vfp_toui(int dp)
978 if (dp)
979 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
980 else
981 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
984 static inline void gen_vfp_touiz(int dp)
986 if (dp)
987 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
988 else
989 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
992 static inline void gen_vfp_tosi(int dp)
994 if (dp)
995 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
996 else
997 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1000 static inline void gen_vfp_tosiz(int dp)
1002 if (dp)
1003 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1004 else
1005 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1008 #define VFP_GEN_FIX(name) \
1009 static inline void gen_vfp_##name(int dp, int shift) \
1011 TCGv tmp_shift = tcg_const_i32(shift); \
1012 if (dp) \
1013 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1014 else \
1015 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1016 tcg_temp_free_i32(tmp_shift); \
1018 VFP_GEN_FIX(tosh)
1019 VFP_GEN_FIX(tosl)
1020 VFP_GEN_FIX(touh)
1021 VFP_GEN_FIX(toul)
1022 VFP_GEN_FIX(shto)
1023 VFP_GEN_FIX(slto)
1024 VFP_GEN_FIX(uhto)
1025 VFP_GEN_FIX(ulto)
1026 #undef VFP_GEN_FIX
1028 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1030 if (dp)
1031 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1032 else
1033 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1036 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1038 if (dp)
1039 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1040 else
1041 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1044 static inline long
1045 vfp_reg_offset (int dp, int reg)
1047 if (dp)
1048 return offsetof(CPUARMState, vfp.regs[reg]);
1049 else if (reg & 1) {
1050 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1051 + offsetof(CPU_DoubleU, l.upper);
1052 } else {
1053 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1054 + offsetof(CPU_DoubleU, l.lower);
1058 /* Return the offset of a 32-bit piece of a NEON register.
1059 zero is the least significant end of the register. */
1060 static inline long
1061 neon_reg_offset (int reg, int n)
1063 int sreg;
1064 sreg = reg * 2 + n;
1065 return vfp_reg_offset(0, sreg);
1068 static TCGv neon_load_reg(int reg, int pass)
1070 TCGv tmp = tcg_temp_new_i32();
1071 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1072 return tmp;
1075 static void neon_store_reg(int reg, int pass, TCGv var)
1077 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1078 tcg_temp_free_i32(var);
1081 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1083 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1086 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1088 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1091 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1092 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1093 #define tcg_gen_st_f32 tcg_gen_st_i32
1094 #define tcg_gen_st_f64 tcg_gen_st_i64
1096 static inline void gen_mov_F0_vreg(int dp, int reg)
1098 if (dp)
1099 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1100 else
1101 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1104 static inline void gen_mov_F1_vreg(int dp, int reg)
1106 if (dp)
1107 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1108 else
1109 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1112 static inline void gen_mov_vreg_F0(int dp, int reg)
1114 if (dp)
1115 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1116 else
1117 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1120 #define ARM_CP_RW_BIT (1 << 20)
1122 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1124 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1127 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1129 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1132 static inline TCGv iwmmxt_load_creg(int reg)
1134 TCGv var = tcg_temp_new_i32();
1135 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1136 return var;
1139 static inline void iwmmxt_store_creg(int reg, TCGv var)
1141 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1142 tcg_temp_free_i32(var);
1145 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1147 iwmmxt_store_reg(cpu_M0, rn);
1150 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1152 iwmmxt_load_reg(cpu_M0, rn);
1155 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1157 iwmmxt_load_reg(cpu_V1, rn);
1158 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1161 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1163 iwmmxt_load_reg(cpu_V1, rn);
1164 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1167 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1169 iwmmxt_load_reg(cpu_V1, rn);
1170 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1173 #define IWMMXT_OP(name) \
1174 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1176 iwmmxt_load_reg(cpu_V1, rn); \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1180 #define IWMMXT_OP_SIZE(name) \
1181 IWMMXT_OP(name##b) \
1182 IWMMXT_OP(name##w) \
1183 IWMMXT_OP(name##l)
1185 #define IWMMXT_OP_1(name) \
1186 static inline void gen_op_iwmmxt_##name##_M0(void) \
1188 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
1191 IWMMXT_OP(maddsq)
1192 IWMMXT_OP(madduq)
1193 IWMMXT_OP(sadb)
1194 IWMMXT_OP(sadw)
1195 IWMMXT_OP(mulslw)
1196 IWMMXT_OP(mulshw)
1197 IWMMXT_OP(mululw)
1198 IWMMXT_OP(muluhw)
1199 IWMMXT_OP(macsw)
1200 IWMMXT_OP(macuw)
1202 IWMMXT_OP_SIZE(unpackl)
1203 IWMMXT_OP_SIZE(unpackh)
1205 IWMMXT_OP_1(unpacklub)
1206 IWMMXT_OP_1(unpackluw)
1207 IWMMXT_OP_1(unpacklul)
1208 IWMMXT_OP_1(unpackhub)
1209 IWMMXT_OP_1(unpackhuw)
1210 IWMMXT_OP_1(unpackhul)
1211 IWMMXT_OP_1(unpacklsb)
1212 IWMMXT_OP_1(unpacklsw)
1213 IWMMXT_OP_1(unpacklsl)
1214 IWMMXT_OP_1(unpackhsb)
1215 IWMMXT_OP_1(unpackhsw)
1216 IWMMXT_OP_1(unpackhsl)
1218 IWMMXT_OP_SIZE(cmpeq)
1219 IWMMXT_OP_SIZE(cmpgtu)
1220 IWMMXT_OP_SIZE(cmpgts)
1222 IWMMXT_OP_SIZE(mins)
1223 IWMMXT_OP_SIZE(minu)
1224 IWMMXT_OP_SIZE(maxs)
1225 IWMMXT_OP_SIZE(maxu)
1227 IWMMXT_OP_SIZE(subn)
1228 IWMMXT_OP_SIZE(addn)
1229 IWMMXT_OP_SIZE(subu)
1230 IWMMXT_OP_SIZE(addu)
1231 IWMMXT_OP_SIZE(subs)
1232 IWMMXT_OP_SIZE(adds)
1234 IWMMXT_OP(avgb0)
1235 IWMMXT_OP(avgb1)
1236 IWMMXT_OP(avgw0)
1237 IWMMXT_OP(avgw1)
1239 IWMMXT_OP(msadb)
1241 IWMMXT_OP(packuw)
1242 IWMMXT_OP(packul)
1243 IWMMXT_OP(packuq)
1244 IWMMXT_OP(packsw)
1245 IWMMXT_OP(packsl)
1246 IWMMXT_OP(packsq)
1248 static void gen_op_iwmmxt_set_mup(void)
1250 TCGv tmp;
1251 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1252 tcg_gen_ori_i32(tmp, tmp, 2);
1253 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1256 static void gen_op_iwmmxt_set_cup(void)
1258 TCGv tmp;
1259 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1260 tcg_gen_ori_i32(tmp, tmp, 1);
1261 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1264 static void gen_op_iwmmxt_setpsr_nz(void)
1266 TCGv tmp = tcg_temp_new_i32();
1267 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1268 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1271 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1273 iwmmxt_load_reg(cpu_V1, rn);
1274 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1275 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1278 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1280 int rd;
1281 uint32_t offset;
1282 TCGv tmp;
1284 rd = (insn >> 16) & 0xf;
1285 tmp = load_reg(s, rd);
1287 offset = (insn & 0xff) << ((insn >> 7) & 2);
1288 if (insn & (1 << 24)) {
1289 /* Pre indexed */
1290 if (insn & (1 << 23))
1291 tcg_gen_addi_i32(tmp, tmp, offset);
1292 else
1293 tcg_gen_addi_i32(tmp, tmp, -offset);
1294 tcg_gen_mov_i32(dest, tmp);
1295 if (insn & (1 << 21))
1296 store_reg(s, rd, tmp);
1297 else
1298 tcg_temp_free_i32(tmp);
1299 } else if (insn & (1 << 21)) {
1300 /* Post indexed */
1301 tcg_gen_mov_i32(dest, tmp);
1302 if (insn & (1 << 23))
1303 tcg_gen_addi_i32(tmp, tmp, offset);
1304 else
1305 tcg_gen_addi_i32(tmp, tmp, -offset);
1306 store_reg(s, rd, tmp);
1307 } else if (!(insn & (1 << 23)))
1308 return 1;
1309 return 0;
1312 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1314 int rd = (insn >> 0) & 0xf;
1315 TCGv tmp;
1317 if (insn & (1 << 8)) {
1318 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1319 return 1;
1320 } else {
1321 tmp = iwmmxt_load_creg(rd);
1323 } else {
1324 tmp = tcg_temp_new_i32();
1325 iwmmxt_load_reg(cpu_V0, rd);
1326 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1328 tcg_gen_andi_i32(tmp, tmp, mask);
1329 tcg_gen_mov_i32(dest, tmp);
1330 tcg_temp_free_i32(tmp);
1331 return 0;
1334 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1335 (ie. an undefined instruction). */
1336 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1338 int rd, wrd;
1339 int rdhi, rdlo, rd0, rd1, i;
1340 TCGv addr;
1341 TCGv tmp, tmp2, tmp3;
1343 if ((insn & 0x0e000e00) == 0x0c000000) {
1344 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1345 wrd = insn & 0xf;
1346 rdlo = (insn >> 12) & 0xf;
1347 rdhi = (insn >> 16) & 0xf;
1348 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1349 iwmmxt_load_reg(cpu_V0, wrd);
1350 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1351 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1352 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1353 } else { /* TMCRR */
1354 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1355 iwmmxt_store_reg(cpu_V0, wrd);
1356 gen_op_iwmmxt_set_mup();
1358 return 0;
1361 wrd = (insn >> 12) & 0xf;
1362 addr = tcg_temp_new_i32();
1363 if (gen_iwmmxt_address(s, insn, addr)) {
1364 tcg_temp_free_i32(addr);
1365 return 1;
1367 if (insn & ARM_CP_RW_BIT) {
1368 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1369 tmp = tcg_temp_new_i32();
1370 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1371 iwmmxt_store_creg(wrd, tmp);
1372 } else {
1373 i = 1;
1374 if (insn & (1 << 8)) {
1375 if (insn & (1 << 22)) { /* WLDRD */
1376 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1377 i = 0;
1378 } else { /* WLDRW wRd */
1379 tmp = gen_ld32(addr, IS_USER(s));
1381 } else {
1382 if (insn & (1 << 22)) { /* WLDRH */
1383 tmp = gen_ld16u(addr, IS_USER(s));
1384 } else { /* WLDRB */
1385 tmp = gen_ld8u(addr, IS_USER(s));
1388 if (i) {
1389 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1390 tcg_temp_free_i32(tmp);
1392 gen_op_iwmmxt_movq_wRn_M0(wrd);
1394 } else {
1395 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1396 tmp = iwmmxt_load_creg(wrd);
1397 gen_st32(tmp, addr, IS_USER(s));
1398 } else {
1399 gen_op_iwmmxt_movq_M0_wRn(wrd);
1400 tmp = tcg_temp_new_i32();
1401 if (insn & (1 << 8)) {
1402 if (insn & (1 << 22)) { /* WSTRD */
1403 tcg_temp_free_i32(tmp);
1404 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1405 } else { /* WSTRW wRd */
1406 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1407 gen_st32(tmp, addr, IS_USER(s));
1409 } else {
1410 if (insn & (1 << 22)) { /* WSTRH */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1412 gen_st16(tmp, addr, IS_USER(s));
1413 } else { /* WSTRB */
1414 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1415 gen_st8(tmp, addr, IS_USER(s));
1420 tcg_temp_free_i32(addr);
1421 return 0;
1424 if ((insn & 0x0f000000) != 0x0e000000)
1425 return 1;
1427 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1428 case 0x000: /* WOR */
1429 wrd = (insn >> 12) & 0xf;
1430 rd0 = (insn >> 0) & 0xf;
1431 rd1 = (insn >> 16) & 0xf;
1432 gen_op_iwmmxt_movq_M0_wRn(rd0);
1433 gen_op_iwmmxt_orq_M0_wRn(rd1);
1434 gen_op_iwmmxt_setpsr_nz();
1435 gen_op_iwmmxt_movq_wRn_M0(wrd);
1436 gen_op_iwmmxt_set_mup();
1437 gen_op_iwmmxt_set_cup();
1438 break;
1439 case 0x011: /* TMCR */
1440 if (insn & 0xf)
1441 return 1;
1442 rd = (insn >> 12) & 0xf;
1443 wrd = (insn >> 16) & 0xf;
1444 switch (wrd) {
1445 case ARM_IWMMXT_wCID:
1446 case ARM_IWMMXT_wCASF:
1447 break;
1448 case ARM_IWMMXT_wCon:
1449 gen_op_iwmmxt_set_cup();
1450 /* Fall through. */
1451 case ARM_IWMMXT_wCSSF:
1452 tmp = iwmmxt_load_creg(wrd);
1453 tmp2 = load_reg(s, rd);
1454 tcg_gen_andc_i32(tmp, tmp, tmp2);
1455 tcg_temp_free_i32(tmp2);
1456 iwmmxt_store_creg(wrd, tmp);
1457 break;
1458 case ARM_IWMMXT_wCGR0:
1459 case ARM_IWMMXT_wCGR1:
1460 case ARM_IWMMXT_wCGR2:
1461 case ARM_IWMMXT_wCGR3:
1462 gen_op_iwmmxt_set_cup();
1463 tmp = load_reg(s, rd);
1464 iwmmxt_store_creg(wrd, tmp);
1465 break;
1466 default:
1467 return 1;
1469 break;
1470 case 0x100: /* WXOR */
1471 wrd = (insn >> 12) & 0xf;
1472 rd0 = (insn >> 0) & 0xf;
1473 rd1 = (insn >> 16) & 0xf;
1474 gen_op_iwmmxt_movq_M0_wRn(rd0);
1475 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1476 gen_op_iwmmxt_setpsr_nz();
1477 gen_op_iwmmxt_movq_wRn_M0(wrd);
1478 gen_op_iwmmxt_set_mup();
1479 gen_op_iwmmxt_set_cup();
1480 break;
1481 case 0x111: /* TMRC */
1482 if (insn & 0xf)
1483 return 1;
1484 rd = (insn >> 12) & 0xf;
1485 wrd = (insn >> 16) & 0xf;
1486 tmp = iwmmxt_load_creg(wrd);
1487 store_reg(s, rd, tmp);
1488 break;
1489 case 0x300: /* WANDN */
1490 wrd = (insn >> 12) & 0xf;
1491 rd0 = (insn >> 0) & 0xf;
1492 rd1 = (insn >> 16) & 0xf;
1493 gen_op_iwmmxt_movq_M0_wRn(rd0);
1494 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1500 break;
1501 case 0x200: /* WAND */
1502 wrd = (insn >> 12) & 0xf;
1503 rd0 = (insn >> 0) & 0xf;
1504 rd1 = (insn >> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0);
1506 gen_op_iwmmxt_andq_M0_wRn(rd1);
1507 gen_op_iwmmxt_setpsr_nz();
1508 gen_op_iwmmxt_movq_wRn_M0(wrd);
1509 gen_op_iwmmxt_set_mup();
1510 gen_op_iwmmxt_set_cup();
1511 break;
1512 case 0x810: case 0xa10: /* WMADD */
1513 wrd = (insn >> 12) & 0xf;
1514 rd0 = (insn >> 0) & 0xf;
1515 rd1 = (insn >> 16) & 0xf;
1516 gen_op_iwmmxt_movq_M0_wRn(rd0);
1517 if (insn & (1 << 21))
1518 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1519 else
1520 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1521 gen_op_iwmmxt_movq_wRn_M0(wrd);
1522 gen_op_iwmmxt_set_mup();
1523 break;
1524 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1525 wrd = (insn >> 12) & 0xf;
1526 rd0 = (insn >> 16) & 0xf;
1527 rd1 = (insn >> 0) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0);
1529 switch ((insn >> 22) & 3) {
1530 case 0:
1531 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1532 break;
1533 case 1:
1534 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1535 break;
1536 case 2:
1537 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1538 break;
1539 case 3:
1540 return 1;
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 16) & 0xf;
1549 rd1 = (insn >> 0) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 switch ((insn >> 22) & 3) {
1552 case 0:
1553 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1554 break;
1555 case 1:
1556 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1557 break;
1558 case 2:
1559 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1560 break;
1561 case 3:
1562 return 1;
1564 gen_op_iwmmxt_movq_wRn_M0(wrd);
1565 gen_op_iwmmxt_set_mup();
1566 gen_op_iwmmxt_set_cup();
1567 break;
1568 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1569 wrd = (insn >> 12) & 0xf;
1570 rd0 = (insn >> 16) & 0xf;
1571 rd1 = (insn >> 0) & 0xf;
1572 gen_op_iwmmxt_movq_M0_wRn(rd0);
1573 if (insn & (1 << 22))
1574 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1575 else
1576 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1577 if (!(insn & (1 << 20)))
1578 gen_op_iwmmxt_addl_M0_wRn(wrd);
1579 gen_op_iwmmxt_movq_wRn_M0(wrd);
1580 gen_op_iwmmxt_set_mup();
1581 break;
1582 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1583 wrd = (insn >> 12) & 0xf;
1584 rd0 = (insn >> 16) & 0xf;
1585 rd1 = (insn >> 0) & 0xf;
1586 gen_op_iwmmxt_movq_M0_wRn(rd0);
1587 if (insn & (1 << 21)) {
1588 if (insn & (1 << 20))
1589 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1590 else
1591 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1592 } else {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 break;
1601 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 21))
1607 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1610 if (!(insn & (1 << 20))) {
1611 iwmmxt_load_reg(cpu_V1, wrd);
1612 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1614 gen_op_iwmmxt_movq_wRn_M0(wrd);
1615 gen_op_iwmmxt_set_mup();
1616 break;
1617 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1618 wrd = (insn >> 12) & 0xf;
1619 rd0 = (insn >> 16) & 0xf;
1620 rd1 = (insn >> 0) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0);
1622 switch ((insn >> 22) & 3) {
1623 case 0:
1624 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1625 break;
1626 case 1:
1627 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1628 break;
1629 case 2:
1630 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1631 break;
1632 case 3:
1633 return 1;
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1638 break;
1639 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1640 wrd = (insn >> 12) & 0xf;
1641 rd0 = (insn >> 16) & 0xf;
1642 rd1 = (insn >> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0);
1644 if (insn & (1 << 22)) {
1645 if (insn & (1 << 20))
1646 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1647 else
1648 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1649 } else {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1658 break;
1659 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 16) & 0xf;
1662 rd1 = (insn >> 0) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
1664 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1665 tcg_gen_andi_i32(tmp, tmp, 7);
1666 iwmmxt_load_reg(cpu_V1, rd1);
1667 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1668 tcg_temp_free_i32(tmp);
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 break;
1672 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1673 if (((insn >> 6) & 3) == 3)
1674 return 1;
1675 rd = (insn >> 12) & 0xf;
1676 wrd = (insn >> 16) & 0xf;
1677 tmp = load_reg(s, rd);
1678 gen_op_iwmmxt_movq_M0_wRn(wrd);
1679 switch ((insn >> 6) & 3) {
1680 case 0:
1681 tmp2 = tcg_const_i32(0xff);
1682 tmp3 = tcg_const_i32((insn & 7) << 3);
1683 break;
1684 case 1:
1685 tmp2 = tcg_const_i32(0xffff);
1686 tmp3 = tcg_const_i32((insn & 3) << 4);
1687 break;
1688 case 2:
1689 tmp2 = tcg_const_i32(0xffffffff);
1690 tmp3 = tcg_const_i32((insn & 1) << 5);
1691 break;
1692 default:
1693 TCGV_UNUSED(tmp2);
1694 TCGV_UNUSED(tmp3);
1696 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1697 tcg_temp_free(tmp3);
1698 tcg_temp_free(tmp2);
1699 tcg_temp_free_i32(tmp);
1700 gen_op_iwmmxt_movq_wRn_M0(wrd);
1701 gen_op_iwmmxt_set_mup();
1702 break;
1703 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1704 rd = (insn >> 12) & 0xf;
1705 wrd = (insn >> 16) & 0xf;
1706 if (rd == 15 || ((insn >> 22) & 3) == 3)
1707 return 1;
1708 gen_op_iwmmxt_movq_M0_wRn(wrd);
1709 tmp = tcg_temp_new_i32();
1710 switch ((insn >> 22) & 3) {
1711 case 0:
1712 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1713 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1714 if (insn & 8) {
1715 tcg_gen_ext8s_i32(tmp, tmp);
1716 } else {
1717 tcg_gen_andi_i32(tmp, tmp, 0xff);
1719 break;
1720 case 1:
1721 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1722 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1723 if (insn & 8) {
1724 tcg_gen_ext16s_i32(tmp, tmp);
1725 } else {
1726 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1728 break;
1729 case 2:
1730 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1731 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1732 break;
1734 store_reg(s, rd, tmp);
1735 break;
1736 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1737 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1738 return 1;
1739 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1740 switch ((insn >> 22) & 3) {
1741 case 0:
1742 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1743 break;
1744 case 1:
1745 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1746 break;
1747 case 2:
1748 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1749 break;
1751 tcg_gen_shli_i32(tmp, tmp, 28);
1752 gen_set_nzcv(tmp);
1753 tcg_temp_free_i32(tmp);
1754 break;
1755 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1756 if (((insn >> 6) & 3) == 3)
1757 return 1;
1758 rd = (insn >> 12) & 0xf;
1759 wrd = (insn >> 16) & 0xf;
1760 tmp = load_reg(s, rd);
1761 switch ((insn >> 6) & 3) {
1762 case 0:
1763 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1764 break;
1765 case 1:
1766 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1767 break;
1768 case 2:
1769 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1770 break;
1772 tcg_temp_free_i32(tmp);
1773 gen_op_iwmmxt_movq_wRn_M0(wrd);
1774 gen_op_iwmmxt_set_mup();
1775 break;
1776 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1777 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1778 return 1;
1779 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1780 tmp2 = tcg_temp_new_i32();
1781 tcg_gen_mov_i32(tmp2, tmp);
1782 switch ((insn >> 22) & 3) {
1783 case 0:
1784 for (i = 0; i < 7; i ++) {
1785 tcg_gen_shli_i32(tmp2, tmp2, 4);
1786 tcg_gen_and_i32(tmp, tmp, tmp2);
1788 break;
1789 case 1:
1790 for (i = 0; i < 3; i ++) {
1791 tcg_gen_shli_i32(tmp2, tmp2, 8);
1792 tcg_gen_and_i32(tmp, tmp, tmp2);
1794 break;
1795 case 2:
1796 tcg_gen_shli_i32(tmp2, tmp2, 16);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
1798 break;
1800 gen_set_nzcv(tmp);
1801 tcg_temp_free_i32(tmp2);
1802 tcg_temp_free_i32(tmp);
1803 break;
1804 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1805 wrd = (insn >> 12) & 0xf;
1806 rd0 = (insn >> 16) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 switch ((insn >> 22) & 3) {
1809 case 0:
1810 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1811 break;
1812 case 1:
1813 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1814 break;
1815 case 2:
1816 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1817 break;
1818 case 3:
1819 return 1;
1821 gen_op_iwmmxt_movq_wRn_M0(wrd);
1822 gen_op_iwmmxt_set_mup();
1823 break;
1824 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1825 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1826 return 1;
1827 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1828 tmp2 = tcg_temp_new_i32();
1829 tcg_gen_mov_i32(tmp2, tmp);
1830 switch ((insn >> 22) & 3) {
1831 case 0:
1832 for (i = 0; i < 7; i ++) {
1833 tcg_gen_shli_i32(tmp2, tmp2, 4);
1834 tcg_gen_or_i32(tmp, tmp, tmp2);
1836 break;
1837 case 1:
1838 for (i = 0; i < 3; i ++) {
1839 tcg_gen_shli_i32(tmp2, tmp2, 8);
1840 tcg_gen_or_i32(tmp, tmp, tmp2);
1842 break;
1843 case 2:
1844 tcg_gen_shli_i32(tmp2, tmp2, 16);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
1846 break;
1848 gen_set_nzcv(tmp);
1849 tcg_temp_free_i32(tmp2);
1850 tcg_temp_free_i32(tmp);
1851 break;
1852 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1853 rd = (insn >> 12) & 0xf;
1854 rd0 = (insn >> 16) & 0xf;
1855 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1856 return 1;
1857 gen_op_iwmmxt_movq_M0_wRn(rd0);
1858 tmp = tcg_temp_new_i32();
1859 switch ((insn >> 22) & 3) {
1860 case 0:
1861 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1862 break;
1863 case 1:
1864 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1865 break;
1866 case 2:
1867 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1868 break;
1870 store_reg(s, rd, tmp);
1871 break;
1872 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1873 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1874 wrd = (insn >> 12) & 0xf;
1875 rd0 = (insn >> 16) & 0xf;
1876 rd1 = (insn >> 0) & 0xf;
1877 gen_op_iwmmxt_movq_M0_wRn(rd0);
1878 switch ((insn >> 22) & 3) {
1879 case 0:
1880 if (insn & (1 << 21))
1881 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1882 else
1883 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1884 break;
1885 case 1:
1886 if (insn & (1 << 21))
1887 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1888 else
1889 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1890 break;
1891 case 2:
1892 if (insn & (1 << 21))
1893 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1894 else
1895 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1896 break;
1897 case 3:
1898 return 1;
1900 gen_op_iwmmxt_movq_wRn_M0(wrd);
1901 gen_op_iwmmxt_set_mup();
1902 gen_op_iwmmxt_set_cup();
1903 break;
1904 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1905 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1906 wrd = (insn >> 12) & 0xf;
1907 rd0 = (insn >> 16) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0);
1909 switch ((insn >> 22) & 3) {
1910 case 0:
1911 if (insn & (1 << 21))
1912 gen_op_iwmmxt_unpacklsb_M0();
1913 else
1914 gen_op_iwmmxt_unpacklub_M0();
1915 break;
1916 case 1:
1917 if (insn & (1 << 21))
1918 gen_op_iwmmxt_unpacklsw_M0();
1919 else
1920 gen_op_iwmmxt_unpackluw_M0();
1921 break;
1922 case 2:
1923 if (insn & (1 << 21))
1924 gen_op_iwmmxt_unpacklsl_M0();
1925 else
1926 gen_op_iwmmxt_unpacklul_M0();
1927 break;
1928 case 3:
1929 return 1;
1931 gen_op_iwmmxt_movq_wRn_M0(wrd);
1932 gen_op_iwmmxt_set_mup();
1933 gen_op_iwmmxt_set_cup();
1934 break;
1935 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1936 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1937 wrd = (insn >> 12) & 0xf;
1938 rd0 = (insn >> 16) & 0xf;
1939 gen_op_iwmmxt_movq_M0_wRn(rd0);
1940 switch ((insn >> 22) & 3) {
1941 case 0:
1942 if (insn & (1 << 21))
1943 gen_op_iwmmxt_unpackhsb_M0();
1944 else
1945 gen_op_iwmmxt_unpackhub_M0();
1946 break;
1947 case 1:
1948 if (insn & (1 << 21))
1949 gen_op_iwmmxt_unpackhsw_M0();
1950 else
1951 gen_op_iwmmxt_unpackhuw_M0();
1952 break;
1953 case 2:
1954 if (insn & (1 << 21))
1955 gen_op_iwmmxt_unpackhsl_M0();
1956 else
1957 gen_op_iwmmxt_unpackhul_M0();
1958 break;
1959 case 3:
1960 return 1;
1962 gen_op_iwmmxt_movq_wRn_M0(wrd);
1963 gen_op_iwmmxt_set_mup();
1964 gen_op_iwmmxt_set_cup();
1965 break;
1966 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1967 case 0x214: case 0x614: case 0xa14: case 0xe14:
1968 if (((insn >> 22) & 3) == 0)
1969 return 1;
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 tmp = tcg_temp_new_i32();
1974 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1975 tcg_temp_free_i32(tmp);
1976 return 1;
1978 switch ((insn >> 22) & 3) {
1979 case 1:
1980 gen_helper_iwmmxt_srlw(cpu_M0, cpu_M0, tmp);
1981 break;
1982 case 2:
1983 gen_helper_iwmmxt_srll(cpu_M0, cpu_M0, tmp);
1984 break;
1985 case 3:
1986 gen_helper_iwmmxt_srlq(cpu_M0, cpu_M0, tmp);
1987 break;
1989 tcg_temp_free_i32(tmp);
1990 gen_op_iwmmxt_movq_wRn_M0(wrd);
1991 gen_op_iwmmxt_set_mup();
1992 gen_op_iwmmxt_set_cup();
1993 break;
1994 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1995 case 0x014: case 0x414: case 0x814: case 0xc14:
1996 if (((insn >> 22) & 3) == 0)
1997 return 1;
1998 wrd = (insn >> 12) & 0xf;
1999 rd0 = (insn >> 16) & 0xf;
2000 gen_op_iwmmxt_movq_M0_wRn(rd0);
2001 tmp = tcg_temp_new_i32();
2002 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2003 tcg_temp_free_i32(tmp);
2004 return 1;
2006 switch ((insn >> 22) & 3) {
2007 case 1:
2008 gen_helper_iwmmxt_sraw(cpu_M0, cpu_M0, tmp);
2009 break;
2010 case 2:
2011 gen_helper_iwmmxt_sral(cpu_M0, cpu_M0, tmp);
2012 break;
2013 case 3:
2014 gen_helper_iwmmxt_sraq(cpu_M0, cpu_M0, tmp);
2015 break;
2017 tcg_temp_free_i32(tmp);
2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2019 gen_op_iwmmxt_set_mup();
2020 gen_op_iwmmxt_set_cup();
2021 break;
2022 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2023 case 0x114: case 0x514: case 0x914: case 0xd14:
2024 if (((insn >> 22) & 3) == 0)
2025 return 1;
2026 wrd = (insn >> 12) & 0xf;
2027 rd0 = (insn >> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
2029 tmp = tcg_temp_new_i32();
2030 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2031 tcg_temp_free_i32(tmp);
2032 return 1;
2034 switch ((insn >> 22) & 3) {
2035 case 1:
2036 gen_helper_iwmmxt_sllw(cpu_M0, cpu_M0, tmp);
2037 break;
2038 case 2:
2039 gen_helper_iwmmxt_slll(cpu_M0, cpu_M0, tmp);
2040 break;
2041 case 3:
2042 gen_helper_iwmmxt_sllq(cpu_M0, cpu_M0, tmp);
2043 break;
2045 tcg_temp_free_i32(tmp);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 gen_op_iwmmxt_set_cup();
2049 break;
2050 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2051 case 0x314: case 0x714: case 0xb14: case 0xf14:
2052 if (((insn >> 22) & 3) == 0)
2053 return 1;
2054 wrd = (insn >> 12) & 0xf;
2055 rd0 = (insn >> 16) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0);
2057 tmp = tcg_temp_new_i32();
2058 switch ((insn >> 22) & 3) {
2059 case 1:
2060 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2061 tcg_temp_free_i32(tmp);
2062 return 1;
2064 gen_helper_iwmmxt_rorw(cpu_M0, cpu_M0, tmp);
2065 break;
2066 case 2:
2067 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2068 tcg_temp_free_i32(tmp);
2069 return 1;
2071 gen_helper_iwmmxt_rorl(cpu_M0, cpu_M0, tmp);
2072 break;
2073 case 3:
2074 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2075 tcg_temp_free_i32(tmp);
2076 return 1;
2078 gen_helper_iwmmxt_rorq(cpu_M0, cpu_M0, tmp);
2079 break;
2081 tcg_temp_free_i32(tmp);
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2087 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 rd1 = (insn >> 0) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0);
2092 switch ((insn >> 22) & 3) {
2093 case 0:
2094 if (insn & (1 << 21))
2095 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2096 else
2097 gen_op_iwmmxt_minub_M0_wRn(rd1);
2098 break;
2099 case 1:
2100 if (insn & (1 << 21))
2101 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2102 else
2103 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2104 break;
2105 case 2:
2106 if (insn & (1 << 21))
2107 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2108 else
2109 gen_op_iwmmxt_minul_M0_wRn(rd1);
2110 break;
2111 case 3:
2112 return 1;
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 break;
2117 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2118 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 rd1 = (insn >> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
2123 switch ((insn >> 22) & 3) {
2124 case 0:
2125 if (insn & (1 << 21))
2126 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2127 else
2128 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2129 break;
2130 case 1:
2131 if (insn & (1 << 21))
2132 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2133 else
2134 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2135 break;
2136 case 2:
2137 if (insn & (1 << 21))
2138 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2139 else
2140 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2141 break;
2142 case 3:
2143 return 1;
2145 gen_op_iwmmxt_movq_wRn_M0(wrd);
2146 gen_op_iwmmxt_set_mup();
2147 break;
2148 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2149 case 0x402: case 0x502: case 0x602: case 0x702:
2150 wrd = (insn >> 12) & 0xf;
2151 rd0 = (insn >> 16) & 0xf;
2152 rd1 = (insn >> 0) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0);
2154 tmp = tcg_const_i32((insn >> 20) & 3);
2155 iwmmxt_load_reg(cpu_V1, rd1);
2156 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2157 tcg_temp_free(tmp);
2158 gen_op_iwmmxt_movq_wRn_M0(wrd);
2159 gen_op_iwmmxt_set_mup();
2160 break;
2161 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2162 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2163 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2164 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2165 wrd = (insn >> 12) & 0xf;
2166 rd0 = (insn >> 16) & 0xf;
2167 rd1 = (insn >> 0) & 0xf;
2168 gen_op_iwmmxt_movq_M0_wRn(rd0);
2169 switch ((insn >> 20) & 0xf) {
2170 case 0x0:
2171 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2172 break;
2173 case 0x1:
2174 gen_op_iwmmxt_subub_M0_wRn(rd1);
2175 break;
2176 case 0x3:
2177 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2178 break;
2179 case 0x4:
2180 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2181 break;
2182 case 0x5:
2183 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2184 break;
2185 case 0x7:
2186 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2187 break;
2188 case 0x8:
2189 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2190 break;
2191 case 0x9:
2192 gen_op_iwmmxt_subul_M0_wRn(rd1);
2193 break;
2194 case 0xb:
2195 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2196 break;
2197 default:
2198 return 1;
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
2204 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2205 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2206 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2207 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2208 wrd = (insn >> 12) & 0xf;
2209 rd0 = (insn >> 16) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0);
2211 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2212 gen_helper_iwmmxt_shufh(cpu_M0, cpu_M0, tmp);
2213 tcg_temp_free(tmp);
2214 gen_op_iwmmxt_movq_wRn_M0(wrd);
2215 gen_op_iwmmxt_set_mup();
2216 gen_op_iwmmxt_set_cup();
2217 break;
2218 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2219 case 0x418: case 0x518: case 0x618: case 0x718:
2220 case 0x818: case 0x918: case 0xa18: case 0xb18:
2221 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2222 wrd = (insn >> 12) & 0xf;
2223 rd0 = (insn >> 16) & 0xf;
2224 rd1 = (insn >> 0) & 0xf;
2225 gen_op_iwmmxt_movq_M0_wRn(rd0);
2226 switch ((insn >> 20) & 0xf) {
2227 case 0x0:
2228 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2229 break;
2230 case 0x1:
2231 gen_op_iwmmxt_addub_M0_wRn(rd1);
2232 break;
2233 case 0x3:
2234 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2235 break;
2236 case 0x4:
2237 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2238 break;
2239 case 0x5:
2240 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2241 break;
2242 case 0x7:
2243 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2244 break;
2245 case 0x8:
2246 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2247 break;
2248 case 0x9:
2249 gen_op_iwmmxt_addul_M0_wRn(rd1);
2250 break;
2251 case 0xb:
2252 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2253 break;
2254 default:
2255 return 1;
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2260 break;
2261 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2262 case 0x408: case 0x508: case 0x608: case 0x708:
2263 case 0x808: case 0x908: case 0xa08: case 0xb08:
2264 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2265 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2266 return 1;
2267 wrd = (insn >> 12) & 0xf;
2268 rd0 = (insn >> 16) & 0xf;
2269 rd1 = (insn >> 0) & 0xf;
2270 gen_op_iwmmxt_movq_M0_wRn(rd0);
2271 switch ((insn >> 22) & 3) {
2272 case 1:
2273 if (insn & (1 << 21))
2274 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2275 else
2276 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2277 break;
2278 case 2:
2279 if (insn & (1 << 21))
2280 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2281 else
2282 gen_op_iwmmxt_packul_M0_wRn(rd1);
2283 break;
2284 case 3:
2285 if (insn & (1 << 21))
2286 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2287 else
2288 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2289 break;
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x201: case 0x203: case 0x205: case 0x207:
2296 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2297 case 0x211: case 0x213: case 0x215: case 0x217:
2298 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2299 wrd = (insn >> 5) & 0xf;
2300 rd0 = (insn >> 12) & 0xf;
2301 rd1 = (insn >> 0) & 0xf;
2302 if (rd0 == 0xf || rd1 == 0xf)
2303 return 1;
2304 gen_op_iwmmxt_movq_M0_wRn(wrd);
2305 tmp = load_reg(s, rd0);
2306 tmp2 = load_reg(s, rd1);
2307 switch ((insn >> 16) & 0xf) {
2308 case 0x0: /* TMIA */
2309 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2310 break;
2311 case 0x8: /* TMIAPH */
2312 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2313 break;
2314 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2315 if (insn & (1 << 16))
2316 tcg_gen_shri_i32(tmp, tmp, 16);
2317 if (insn & (1 << 17))
2318 tcg_gen_shri_i32(tmp2, tmp2, 16);
2319 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2320 break;
2321 default:
2322 tcg_temp_free_i32(tmp2);
2323 tcg_temp_free_i32(tmp);
2324 return 1;
2326 tcg_temp_free_i32(tmp2);
2327 tcg_temp_free_i32(tmp);
2328 gen_op_iwmmxt_movq_wRn_M0(wrd);
2329 gen_op_iwmmxt_set_mup();
2330 break;
2331 default:
2332 return 1;
2335 return 0;
2338 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2339 (ie. an undefined instruction). */
2340 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2342 int acc, rd0, rd1, rdhi, rdlo;
2343 TCGv tmp, tmp2;
2345 if ((insn & 0x0ff00f10) == 0x0e200010) {
2346 /* Multiply with Internal Accumulate Format */
2347 rd0 = (insn >> 12) & 0xf;
2348 rd1 = insn & 0xf;
2349 acc = (insn >> 5) & 7;
2351 if (acc != 0)
2352 return 1;
2354 tmp = load_reg(s, rd0);
2355 tmp2 = load_reg(s, rd1);
2356 switch ((insn >> 16) & 0xf) {
2357 case 0x0: /* MIA */
2358 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2359 break;
2360 case 0x8: /* MIAPH */
2361 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2362 break;
2363 case 0xc: /* MIABB */
2364 case 0xd: /* MIABT */
2365 case 0xe: /* MIATB */
2366 case 0xf: /* MIATT */
2367 if (insn & (1 << 16))
2368 tcg_gen_shri_i32(tmp, tmp, 16);
2369 if (insn & (1 << 17))
2370 tcg_gen_shri_i32(tmp2, tmp2, 16);
2371 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2372 break;
2373 default:
2374 return 1;
2376 tcg_temp_free_i32(tmp2);
2377 tcg_temp_free_i32(tmp);
2379 gen_op_iwmmxt_movq_wRn_M0(acc);
2380 return 0;
2383 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2384 /* Internal Accumulator Access Format */
2385 rdhi = (insn >> 16) & 0xf;
2386 rdlo = (insn >> 12) & 0xf;
2387 acc = insn & 7;
2389 if (acc != 0)
2390 return 1;
2392 if (insn & ARM_CP_RW_BIT) { /* MRA */
2393 iwmmxt_load_reg(cpu_V0, acc);
2394 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2395 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2396 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2397 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2398 } else { /* MAR */
2399 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2400 iwmmxt_store_reg(cpu_V0, acc);
2402 return 0;
2405 return 1;
2408 /* Disassemble system coprocessor instruction. Return nonzero if
2409 instruction is not defined. */
2410 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2412 TCGv tmp, tmp2;
2413 uint32_t rd = (insn >> 12) & 0xf;
2414 uint32_t cp = (insn >> 8) & 0xf;
2415 if (IS_USER(s)) {
2416 return 1;
2419 if (insn & ARM_CP_RW_BIT) {
2420 if (!env->cp[cp].cp_read)
2421 return 1;
2422 gen_set_pc_im(s->pc);
2423 tmp = tcg_temp_new_i32();
2424 tmp2 = tcg_const_i32(insn);
2425 gen_helper_get_cp(tmp, cpu_env, tmp2);
2426 tcg_temp_free(tmp2);
2427 store_reg(s, rd, tmp);
2428 } else {
2429 if (!env->cp[cp].cp_write)
2430 return 1;
2431 gen_set_pc_im(s->pc);
2432 tmp = load_reg(s, rd);
2433 tmp2 = tcg_const_i32(insn);
2434 gen_helper_set_cp(cpu_env, tmp2, tmp);
2435 tcg_temp_free(tmp2);
2436 tcg_temp_free_i32(tmp);
2438 return 0;
2441 static int cp15_user_ok(uint32_t insn)
2443 int cpn = (insn >> 16) & 0xf;
2444 int cpm = insn & 0xf;
2445 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2447 if (cpn == 13 && cpm == 0) {
2448 /* TLS register. */
2449 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2450 return 1;
2452 if (cpn == 7) {
2453 /* ISB, DSB, DMB. */
2454 if ((cpm == 5 && op == 4)
2455 || (cpm == 10 && (op == 4 || op == 5)))
2456 return 1;
2458 return 0;
2461 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2463 TCGv tmp;
2464 int cpn = (insn >> 16) & 0xf;
2465 int cpm = insn & 0xf;
2466 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2468 if (!arm_feature(env, ARM_FEATURE_V6K))
2469 return 0;
2471 if (!(cpn == 13 && cpm == 0))
2472 return 0;
2474 if (insn & ARM_CP_RW_BIT) {
2475 switch (op) {
2476 case 2:
2477 tmp = load_cpu_field(cp15.c13_tls1);
2478 break;
2479 case 3:
2480 tmp = load_cpu_field(cp15.c13_tls2);
2481 break;
2482 case 4:
2483 tmp = load_cpu_field(cp15.c13_tls3);
2484 break;
2485 default:
2486 return 0;
2488 store_reg(s, rd, tmp);
2490 } else {
2491 tmp = load_reg(s, rd);
2492 switch (op) {
2493 case 2:
2494 store_cpu_field(tmp, cp15.c13_tls1);
2495 break;
2496 case 3:
2497 store_cpu_field(tmp, cp15.c13_tls2);
2498 break;
2499 case 4:
2500 store_cpu_field(tmp, cp15.c13_tls3);
2501 break;
2502 default:
2503 tcg_temp_free_i32(tmp);
2504 return 0;
2507 return 1;
2510 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2511 instruction is not defined. */
2512 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2514 uint32_t rd;
2515 TCGv tmp, tmp2;
2517 /* M profile cores use memory mapped registers instead of cp15. */
2518 if (arm_feature(env, ARM_FEATURE_M))
2519 return 1;
2521 if ((insn & (1 << 25)) == 0) {
2522 if (insn & (1 << 20)) {
2523 /* mrrc */
2524 return 1;
2526 /* mcrr. Used for block cache operations, so implement as no-op. */
2527 return 0;
2529 if ((insn & (1 << 4)) == 0) {
2530 /* cdp */
2531 return 1;
2533 if (IS_USER(s) && !cp15_user_ok(insn)) {
2534 return 1;
2537 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2538 * instructions rather than a separate instruction.
2540 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2541 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2542 * In v7, this must NOP.
2544 if (!arm_feature(env, ARM_FEATURE_V7)) {
2545 /* Wait for interrupt. */
2546 gen_set_pc_im(s->pc);
2547 s->is_jmp = DISAS_WFI;
2549 return 0;
2552 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2553 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2554 * so this is slightly over-broad.
2556 if (!arm_feature(env, ARM_FEATURE_V6)) {
2557 /* Wait for interrupt. */
2558 gen_set_pc_im(s->pc);
2559 s->is_jmp = DISAS_WFI;
2560 return 0;
2562 /* Otherwise fall through to handle via helper function.
2563 * In particular, on v7 and some v6 cores this is one of
2564 * the VA-PA registers.
2568 rd = (insn >> 12) & 0xf;
2570 if (cp15_tls_load_store(env, s, insn, rd))
2571 return 0;
2573 tmp2 = tcg_const_i32(insn);
2574 if (insn & ARM_CP_RW_BIT) {
2575 tmp = tcg_temp_new_i32();
2576 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2577 /* If the destination register is r15 then sets condition codes. */
2578 if (rd != 15)
2579 store_reg(s, rd, tmp);
2580 else
2581 tcg_temp_free_i32(tmp);
2582 } else {
2583 tmp = load_reg(s, rd);
2584 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2585 tcg_temp_free_i32(tmp);
2586 /* Normally we would always end the TB here, but Linux
2587 * arch/arm/mach-pxa/sleep.S expects two instructions following
2588 * an MMU enable to execute from cache. Imitate this behaviour. */
2589 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2590 (insn & 0x0fff0fff) != 0x0e010f10)
2591 gen_lookup_tb(s);
2593 tcg_temp_free_i32(tmp2);
2594 return 0;
2597 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2598 #define VFP_SREG(insn, bigbit, smallbit) \
2599 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2600 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2601 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2602 reg = (((insn) >> (bigbit)) & 0x0f) \
2603 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2604 } else { \
2605 if (insn & (1 << (smallbit))) \
2606 return 1; \
2607 reg = ((insn) >> (bigbit)) & 0x0f; \
2608 }} while (0)
2610 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2611 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2612 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2613 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2614 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2615 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2617 /* Move between integer and VFP cores. */
2618 static TCGv gen_vfp_mrs(void)
2620 TCGv tmp = tcg_temp_new_i32();
2621 tcg_gen_mov_i32(tmp, cpu_F0s);
2622 return tmp;
2625 static void gen_vfp_msr(TCGv tmp)
2627 tcg_gen_mov_i32(cpu_F0s, tmp);
2628 tcg_temp_free_i32(tmp);
2631 static void gen_neon_dup_u8(TCGv var, int shift)
2633 TCGv tmp = tcg_temp_new_i32();
2634 if (shift)
2635 tcg_gen_shri_i32(var, var, shift);
2636 tcg_gen_ext8u_i32(var, var);
2637 tcg_gen_shli_i32(tmp, var, 8);
2638 tcg_gen_or_i32(var, var, tmp);
2639 tcg_gen_shli_i32(tmp, var, 16);
2640 tcg_gen_or_i32(var, var, tmp);
2641 tcg_temp_free_i32(tmp);
2644 static void gen_neon_dup_low16(TCGv var)
2646 TCGv tmp = tcg_temp_new_i32();
2647 tcg_gen_ext16u_i32(var, var);
2648 tcg_gen_shli_i32(tmp, var, 16);
2649 tcg_gen_or_i32(var, var, tmp);
2650 tcg_temp_free_i32(tmp);
2653 static void gen_neon_dup_high16(TCGv var)
2655 TCGv tmp = tcg_temp_new_i32();
2656 tcg_gen_andi_i32(var, var, 0xffff0000);
2657 tcg_gen_shri_i32(tmp, var, 16);
2658 tcg_gen_or_i32(var, var, tmp);
2659 tcg_temp_free_i32(tmp);
2662 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2664 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2665 TCGv tmp;
2666 switch (size) {
2667 case 0:
2668 tmp = gen_ld8u(addr, IS_USER(s));
2669 gen_neon_dup_u8(tmp, 0);
2670 break;
2671 case 1:
2672 tmp = gen_ld16u(addr, IS_USER(s));
2673 gen_neon_dup_low16(tmp);
2674 break;
2675 case 2:
2676 tmp = gen_ld32(addr, IS_USER(s));
2677 break;
2678 default: /* Avoid compiler warnings. */
2679 abort();
2681 return tmp;
2684 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2685 (ie. an undefined instruction). */
2686 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2688 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2689 int dp, veclen;
2690 TCGv addr;
2691 TCGv tmp;
2692 TCGv tmp2;
2694 if (!arm_feature(env, ARM_FEATURE_VFP))
2695 return 1;
2697 if (!s->vfp_enabled) {
2698 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2699 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2700 return 1;
2701 rn = (insn >> 16) & 0xf;
2702 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2703 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2704 return 1;
2706 dp = ((insn & 0xf00) == 0xb00);
2707 switch ((insn >> 24) & 0xf) {
2708 case 0xe:
2709 if (insn & (1 << 4)) {
2710 /* single register transfer */
2711 rd = (insn >> 12) & 0xf;
2712 if (dp) {
2713 int size;
2714 int pass;
2716 VFP_DREG_N(rn, insn);
2717 if (insn & 0xf)
2718 return 1;
2719 if (insn & 0x00c00060
2720 && !arm_feature(env, ARM_FEATURE_NEON))
2721 return 1;
2723 pass = (insn >> 21) & 1;
2724 if (insn & (1 << 22)) {
2725 size = 0;
2726 offset = ((insn >> 5) & 3) * 8;
2727 } else if (insn & (1 << 5)) {
2728 size = 1;
2729 offset = (insn & (1 << 6)) ? 16 : 0;
2730 } else {
2731 size = 2;
2732 offset = 0;
2734 if (insn & ARM_CP_RW_BIT) {
2735 /* vfp->arm */
2736 tmp = neon_load_reg(rn, pass);
2737 switch (size) {
2738 case 0:
2739 if (offset)
2740 tcg_gen_shri_i32(tmp, tmp, offset);
2741 if (insn & (1 << 23))
2742 gen_uxtb(tmp);
2743 else
2744 gen_sxtb(tmp);
2745 break;
2746 case 1:
2747 if (insn & (1 << 23)) {
2748 if (offset) {
2749 tcg_gen_shri_i32(tmp, tmp, 16);
2750 } else {
2751 gen_uxth(tmp);
2753 } else {
2754 if (offset) {
2755 tcg_gen_sari_i32(tmp, tmp, 16);
2756 } else {
2757 gen_sxth(tmp);
2760 break;
2761 case 2:
2762 break;
2764 store_reg(s, rd, tmp);
2765 } else {
2766 /* arm->vfp */
2767 tmp = load_reg(s, rd);
2768 if (insn & (1 << 23)) {
2769 /* VDUP */
2770 if (size == 0) {
2771 gen_neon_dup_u8(tmp, 0);
2772 } else if (size == 1) {
2773 gen_neon_dup_low16(tmp);
2775 for (n = 0; n <= pass * 2; n++) {
2776 tmp2 = tcg_temp_new_i32();
2777 tcg_gen_mov_i32(tmp2, tmp);
2778 neon_store_reg(rn, n, tmp2);
2780 neon_store_reg(rn, n, tmp);
2781 } else {
2782 /* VMOV */
2783 switch (size) {
2784 case 0:
2785 tmp2 = neon_load_reg(rn, pass);
2786 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2787 tcg_temp_free_i32(tmp2);
2788 break;
2789 case 1:
2790 tmp2 = neon_load_reg(rn, pass);
2791 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2792 tcg_temp_free_i32(tmp2);
2793 break;
2794 case 2:
2795 break;
2797 neon_store_reg(rn, pass, tmp);
2800 } else { /* !dp */
2801 if ((insn & 0x6f) != 0x00)
2802 return 1;
2803 rn = VFP_SREG_N(insn);
2804 if (insn & ARM_CP_RW_BIT) {
2805 /* vfp->arm */
2806 if (insn & (1 << 21)) {
2807 /* system register */
2808 rn >>= 1;
2810 switch (rn) {
2811 case ARM_VFP_FPSID:
2812 /* VFP2 allows access to FSID from userspace.
2813 VFP3 restricts all id registers to privileged
2814 accesses. */
2815 if (IS_USER(s)
2816 && arm_feature(env, ARM_FEATURE_VFP3))
2817 return 1;
2818 tmp = load_cpu_field(vfp.xregs[rn]);
2819 break;
2820 case ARM_VFP_FPEXC:
2821 if (IS_USER(s))
2822 return 1;
2823 tmp = load_cpu_field(vfp.xregs[rn]);
2824 break;
2825 case ARM_VFP_FPINST:
2826 case ARM_VFP_FPINST2:
2827 /* Not present in VFP3. */
2828 if (IS_USER(s)
2829 || arm_feature(env, ARM_FEATURE_VFP3))
2830 return 1;
2831 tmp = load_cpu_field(vfp.xregs[rn]);
2832 break;
2833 case ARM_VFP_FPSCR:
2834 if (rd == 15) {
2835 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2836 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2837 } else {
2838 tmp = tcg_temp_new_i32();
2839 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2841 break;
2842 case ARM_VFP_MVFR0:
2843 case ARM_VFP_MVFR1:
2844 if (IS_USER(s)
2845 || !arm_feature(env, ARM_FEATURE_VFP3))
2846 return 1;
2847 tmp = load_cpu_field(vfp.xregs[rn]);
2848 break;
2849 default:
2850 return 1;
2852 } else {
2853 gen_mov_F0_vreg(0, rn);
2854 tmp = gen_vfp_mrs();
2856 if (rd == 15) {
2857 /* Set the 4 flag bits in the CPSR. */
2858 gen_set_nzcv(tmp);
2859 tcg_temp_free_i32(tmp);
2860 } else {
2861 store_reg(s, rd, tmp);
2863 } else {
2864 /* arm->vfp */
2865 tmp = load_reg(s, rd);
2866 if (insn & (1 << 21)) {
2867 rn >>= 1;
2868 /* system register */
2869 switch (rn) {
2870 case ARM_VFP_FPSID:
2871 case ARM_VFP_MVFR0:
2872 case ARM_VFP_MVFR1:
2873 /* Writes are ignored. */
2874 break;
2875 case ARM_VFP_FPSCR:
2876 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2877 tcg_temp_free_i32(tmp);
2878 gen_lookup_tb(s);
2879 break;
2880 case ARM_VFP_FPEXC:
2881 if (IS_USER(s))
2882 return 1;
2883 /* TODO: VFP subarchitecture support.
2884 * For now, keep the EN bit only */
2885 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2886 store_cpu_field(tmp, vfp.xregs[rn]);
2887 gen_lookup_tb(s);
2888 break;
2889 case ARM_VFP_FPINST:
2890 case ARM_VFP_FPINST2:
2891 store_cpu_field(tmp, vfp.xregs[rn]);
2892 break;
2893 default:
2894 return 1;
2896 } else {
2897 gen_vfp_msr(tmp);
2898 gen_mov_vreg_F0(0, rn);
2902 } else {
2903 /* data processing */
2904 /* The opcode is in bits 23, 21, 20 and 6. */
2905 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2906 if (dp) {
2907 if (op == 15) {
2908 /* rn is opcode */
2909 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2910 } else {
2911 /* rn is register number */
2912 VFP_DREG_N(rn, insn);
2915 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2916 /* Integer or single precision destination. */
2917 rd = VFP_SREG_D(insn);
2918 } else {
2919 VFP_DREG_D(rd, insn);
2921 if (op == 15 &&
2922 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2923 /* VCVT from int is always from S reg regardless of dp bit.
2924 * VCVT with immediate frac_bits has same format as SREG_M
2926 rm = VFP_SREG_M(insn);
2927 } else {
2928 VFP_DREG_M(rm, insn);
2930 } else {
2931 rn = VFP_SREG_N(insn);
2932 if (op == 15 && rn == 15) {
2933 /* Double precision destination. */
2934 VFP_DREG_D(rd, insn);
2935 } else {
2936 rd = VFP_SREG_D(insn);
2938 /* NB that we implicitly rely on the encoding for the frac_bits
2939 * in VCVT of fixed to float being the same as that of an SREG_M
2941 rm = VFP_SREG_M(insn);
2944 veclen = s->vec_len;
2945 if (op == 15 && rn > 3)
2946 veclen = 0;
2948 /* Shut up compiler warnings. */
2949 delta_m = 0;
2950 delta_d = 0;
2951 bank_mask = 0;
2953 if (veclen > 0) {
2954 if (dp)
2955 bank_mask = 0xc;
2956 else
2957 bank_mask = 0x18;
2959 /* Figure out what type of vector operation this is. */
2960 if ((rd & bank_mask) == 0) {
2961 /* scalar */
2962 veclen = 0;
2963 } else {
2964 if (dp)
2965 delta_d = (s->vec_stride >> 1) + 1;
2966 else
2967 delta_d = s->vec_stride + 1;
2969 if ((rm & bank_mask) == 0) {
2970 /* mixed scalar/vector */
2971 delta_m = 0;
2972 } else {
2973 /* vector */
2974 delta_m = delta_d;
2979 /* Load the initial operands. */
2980 if (op == 15) {
2981 switch (rn) {
2982 case 16:
2983 case 17:
2984 /* Integer source */
2985 gen_mov_F0_vreg(0, rm);
2986 break;
2987 case 8:
2988 case 9:
2989 /* Compare */
2990 gen_mov_F0_vreg(dp, rd);
2991 gen_mov_F1_vreg(dp, rm);
2992 break;
2993 case 10:
2994 case 11:
2995 /* Compare with zero */
2996 gen_mov_F0_vreg(dp, rd);
2997 gen_vfp_F1_ld0(dp);
2998 break;
2999 case 20:
3000 case 21:
3001 case 22:
3002 case 23:
3003 case 28:
3004 case 29:
3005 case 30:
3006 case 31:
3007 /* Source and destination the same. */
3008 gen_mov_F0_vreg(dp, rd);
3009 break;
3010 default:
3011 /* One source operand. */
3012 gen_mov_F0_vreg(dp, rm);
3013 break;
3015 } else {
3016 /* Two source operands. */
3017 gen_mov_F0_vreg(dp, rn);
3018 gen_mov_F1_vreg(dp, rm);
3021 for (;;) {
3022 /* Perform the calculation. */
3023 switch (op) {
3024 case 0: /* mac: fd + (fn * fm) */
3025 gen_vfp_mul(dp);
3026 gen_mov_F1_vreg(dp, rd);
3027 gen_vfp_add(dp);
3028 break;
3029 case 1: /* nmac: fd - (fn * fm) */
3030 gen_vfp_mul(dp);
3031 gen_vfp_neg(dp);
3032 gen_mov_F1_vreg(dp, rd);
3033 gen_vfp_add(dp);
3034 break;
3035 case 2: /* msc: -fd + (fn * fm) */
3036 gen_vfp_mul(dp);
3037 gen_mov_F1_vreg(dp, rd);
3038 gen_vfp_sub(dp);
3039 break;
3040 case 3: /* nmsc: -fd - (fn * fm) */
3041 gen_vfp_mul(dp);
3042 gen_vfp_neg(dp);
3043 gen_mov_F1_vreg(dp, rd);
3044 gen_vfp_sub(dp);
3045 break;
3046 case 4: /* mul: fn * fm */
3047 gen_vfp_mul(dp);
3048 break;
3049 case 5: /* nmul: -(fn * fm) */
3050 gen_vfp_mul(dp);
3051 gen_vfp_neg(dp);
3052 break;
3053 case 6: /* add: fn + fm */
3054 gen_vfp_add(dp);
3055 break;
3056 case 7: /* sub: fn - fm */
3057 gen_vfp_sub(dp);
3058 break;
3059 case 8: /* div: fn / fm */
3060 gen_vfp_div(dp);
3061 break;
3062 case 14: /* fconst */
3063 if (!arm_feature(env, ARM_FEATURE_VFP3))
3064 return 1;
3066 n = (insn << 12) & 0x80000000;
3067 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3068 if (dp) {
3069 if (i & 0x40)
3070 i |= 0x3f80;
3071 else
3072 i |= 0x4000;
3073 n |= i << 16;
3074 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3075 } else {
3076 if (i & 0x40)
3077 i |= 0x780;
3078 else
3079 i |= 0x800;
3080 n |= i << 19;
3081 tcg_gen_movi_i32(cpu_F0s, n);
3083 break;
3084 case 15: /* extension space */
3085 switch (rn) {
3086 case 0: /* cpy */
3087 /* no-op */
3088 break;
3089 case 1: /* abs */
3090 gen_vfp_abs(dp);
3091 break;
3092 case 2: /* neg */
3093 gen_vfp_neg(dp);
3094 break;
3095 case 3: /* sqrt */
3096 gen_vfp_sqrt(dp);
3097 break;
3098 case 4: /* vcvtb.f32.f16 */
3099 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3100 return 1;
3101 tmp = gen_vfp_mrs();
3102 tcg_gen_ext16u_i32(tmp, tmp);
3103 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3104 tcg_temp_free_i32(tmp);
3105 break;
3106 case 5: /* vcvtt.f32.f16 */
3107 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3108 return 1;
3109 tmp = gen_vfp_mrs();
3110 tcg_gen_shri_i32(tmp, tmp, 16);
3111 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3112 tcg_temp_free_i32(tmp);
3113 break;
3114 case 6: /* vcvtb.f16.f32 */
3115 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3116 return 1;
3117 tmp = tcg_temp_new_i32();
3118 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3119 gen_mov_F0_vreg(0, rd);
3120 tmp2 = gen_vfp_mrs();
3121 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3122 tcg_gen_or_i32(tmp, tmp, tmp2);
3123 tcg_temp_free_i32(tmp2);
3124 gen_vfp_msr(tmp);
3125 break;
3126 case 7: /* vcvtt.f16.f32 */
3127 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3128 return 1;
3129 tmp = tcg_temp_new_i32();
3130 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3131 tcg_gen_shli_i32(tmp, tmp, 16);
3132 gen_mov_F0_vreg(0, rd);
3133 tmp2 = gen_vfp_mrs();
3134 tcg_gen_ext16u_i32(tmp2, tmp2);
3135 tcg_gen_or_i32(tmp, tmp, tmp2);
3136 tcg_temp_free_i32(tmp2);
3137 gen_vfp_msr(tmp);
3138 break;
3139 case 8: /* cmp */
3140 gen_vfp_cmp(dp);
3141 break;
3142 case 9: /* cmpe */
3143 gen_vfp_cmpe(dp);
3144 break;
3145 case 10: /* cmpz */
3146 gen_vfp_cmp(dp);
3147 break;
3148 case 11: /* cmpez */
3149 gen_vfp_F1_ld0(dp);
3150 gen_vfp_cmpe(dp);
3151 break;
3152 case 15: /* single<->double conversion */
3153 if (dp)
3154 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3155 else
3156 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3157 break;
3158 case 16: /* fuito */
3159 gen_vfp_uito(dp);
3160 break;
3161 case 17: /* fsito */
3162 gen_vfp_sito(dp);
3163 break;
3164 case 20: /* fshto */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
3167 gen_vfp_shto(dp, 16 - rm);
3168 break;
3169 case 21: /* fslto */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
3172 gen_vfp_slto(dp, 32 - rm);
3173 break;
3174 case 22: /* fuhto */
3175 if (!arm_feature(env, ARM_FEATURE_VFP3))
3176 return 1;
3177 gen_vfp_uhto(dp, 16 - rm);
3178 break;
3179 case 23: /* fulto */
3180 if (!arm_feature(env, ARM_FEATURE_VFP3))
3181 return 1;
3182 gen_vfp_ulto(dp, 32 - rm);
3183 break;
3184 case 24: /* ftoui */
3185 gen_vfp_toui(dp);
3186 break;
3187 case 25: /* ftouiz */
3188 gen_vfp_touiz(dp);
3189 break;
3190 case 26: /* ftosi */
3191 gen_vfp_tosi(dp);
3192 break;
3193 case 27: /* ftosiz */
3194 gen_vfp_tosiz(dp);
3195 break;
3196 case 28: /* ftosh */
3197 if (!arm_feature(env, ARM_FEATURE_VFP3))
3198 return 1;
3199 gen_vfp_tosh(dp, 16 - rm);
3200 break;
3201 case 29: /* ftosl */
3202 if (!arm_feature(env, ARM_FEATURE_VFP3))
3203 return 1;
3204 gen_vfp_tosl(dp, 32 - rm);
3205 break;
3206 case 30: /* ftouh */
3207 if (!arm_feature(env, ARM_FEATURE_VFP3))
3208 return 1;
3209 gen_vfp_touh(dp, 16 - rm);
3210 break;
3211 case 31: /* ftoul */
3212 if (!arm_feature(env, ARM_FEATURE_VFP3))
3213 return 1;
3214 gen_vfp_toul(dp, 32 - rm);
3215 break;
3216 default: /* undefined */
3217 printf ("rn:%d\n", rn);
3218 return 1;
3220 break;
3221 default: /* undefined */
3222 printf ("op:%d\n", op);
3223 return 1;
3226 /* Write back the result. */
3227 if (op == 15 && (rn >= 8 && rn <= 11))
3228 ; /* Comparison, do nothing. */
3229 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3230 /* VCVT double to int: always integer result. */
3231 gen_mov_vreg_F0(0, rd);
3232 else if (op == 15 && rn == 15)
3233 /* conversion */
3234 gen_mov_vreg_F0(!dp, rd);
3235 else
3236 gen_mov_vreg_F0(dp, rd);
3238 /* break out of the loop if we have finished */
3239 if (veclen == 0)
3240 break;
3242 if (op == 15 && delta_m == 0) {
3243 /* single source one-many */
3244 while (veclen--) {
3245 rd = ((rd + delta_d) & (bank_mask - 1))
3246 | (rd & bank_mask);
3247 gen_mov_vreg_F0(dp, rd);
3249 break;
3251 /* Setup the next operands. */
3252 veclen--;
3253 rd = ((rd + delta_d) & (bank_mask - 1))
3254 | (rd & bank_mask);
3256 if (op == 15) {
3257 /* One source operand. */
3258 rm = ((rm + delta_m) & (bank_mask - 1))
3259 | (rm & bank_mask);
3260 gen_mov_F0_vreg(dp, rm);
3261 } else {
3262 /* Two source operands. */
3263 rn = ((rn + delta_d) & (bank_mask - 1))
3264 | (rn & bank_mask);
3265 gen_mov_F0_vreg(dp, rn);
3266 if (delta_m) {
3267 rm = ((rm + delta_m) & (bank_mask - 1))
3268 | (rm & bank_mask);
3269 gen_mov_F1_vreg(dp, rm);
3274 break;
3275 case 0xc:
3276 case 0xd:
3277 if ((insn & 0x03e00000) == 0x00400000) {
3278 /* two-register transfer */
3279 rn = (insn >> 16) & 0xf;
3280 rd = (insn >> 12) & 0xf;
3281 if (dp) {
3282 VFP_DREG_M(rm, insn);
3283 } else {
3284 rm = VFP_SREG_M(insn);
3287 if (insn & ARM_CP_RW_BIT) {
3288 /* vfp->arm */
3289 if (dp) {
3290 gen_mov_F0_vreg(0, rm * 2);
3291 tmp = gen_vfp_mrs();
3292 store_reg(s, rd, tmp);
3293 gen_mov_F0_vreg(0, rm * 2 + 1);
3294 tmp = gen_vfp_mrs();
3295 store_reg(s, rn, tmp);
3296 } else {
3297 gen_mov_F0_vreg(0, rm);
3298 tmp = gen_vfp_mrs();
3299 store_reg(s, rd, tmp);
3300 gen_mov_F0_vreg(0, rm + 1);
3301 tmp = gen_vfp_mrs();
3302 store_reg(s, rn, tmp);
3304 } else {
3305 /* arm->vfp */
3306 if (dp) {
3307 tmp = load_reg(s, rd);
3308 gen_vfp_msr(tmp);
3309 gen_mov_vreg_F0(0, rm * 2);
3310 tmp = load_reg(s, rn);
3311 gen_vfp_msr(tmp);
3312 gen_mov_vreg_F0(0, rm * 2 + 1);
3313 } else {
3314 tmp = load_reg(s, rd);
3315 gen_vfp_msr(tmp);
3316 gen_mov_vreg_F0(0, rm);
3317 tmp = load_reg(s, rn);
3318 gen_vfp_msr(tmp);
3319 gen_mov_vreg_F0(0, rm + 1);
3322 } else {
3323 /* Load/store */
3324 rn = (insn >> 16) & 0xf;
3325 if (dp)
3326 VFP_DREG_D(rd, insn);
3327 else
3328 rd = VFP_SREG_D(insn);
3329 if (s->thumb && rn == 15) {
3330 addr = tcg_temp_new_i32();
3331 tcg_gen_movi_i32(addr, s->pc & ~2);
3332 } else {
3333 addr = load_reg(s, rn);
3335 if ((insn & 0x01200000) == 0x01000000) {
3336 /* Single load/store */
3337 offset = (insn & 0xff) << 2;
3338 if ((insn & (1 << 23)) == 0)
3339 offset = -offset;
3340 tcg_gen_addi_i32(addr, addr, offset);
3341 if (insn & (1 << 20)) {
3342 gen_vfp_ld(s, dp, addr);
3343 gen_mov_vreg_F0(dp, rd);
3344 } else {
3345 gen_mov_F0_vreg(dp, rd);
3346 gen_vfp_st(s, dp, addr);
3348 tcg_temp_free_i32(addr);
3349 } else {
3350 /* load/store multiple */
3351 if (dp)
3352 n = (insn >> 1) & 0x7f;
3353 else
3354 n = insn & 0xff;
3356 if (insn & (1 << 24)) /* pre-decrement */
3357 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3359 if (dp)
3360 offset = 8;
3361 else
3362 offset = 4;
3363 for (i = 0; i < n; i++) {
3364 if (insn & ARM_CP_RW_BIT) {
3365 /* load */
3366 gen_vfp_ld(s, dp, addr);
3367 gen_mov_vreg_F0(dp, rd + i);
3368 } else {
3369 /* store */
3370 gen_mov_F0_vreg(dp, rd + i);
3371 gen_vfp_st(s, dp, addr);
3373 tcg_gen_addi_i32(addr, addr, offset);
3375 if (insn & (1 << 21)) {
3376 /* writeback */
3377 if (insn & (1 << 24))
3378 offset = -offset * n;
3379 else if (dp && (insn & 1))
3380 offset = 4;
3381 else
3382 offset = 0;
3384 if (offset != 0)
3385 tcg_gen_addi_i32(addr, addr, offset);
3386 store_reg(s, rn, addr);
3387 } else {
3388 tcg_temp_free_i32(addr);
3392 break;
3393 default:
3394 /* Should never happen. */
3395 return 1;
3397 return 0;
3400 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3402 TranslationBlock *tb;
3404 tb = s->tb;
3405 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3406 tcg_gen_goto_tb(n);
3407 gen_set_pc_im(dest);
3408 tcg_gen_exit_tb((tcg_target_long)tb + n);
3409 } else {
3410 gen_set_pc_im(dest);
3411 tcg_gen_exit_tb(0);
3415 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3417 if (unlikely(s->singlestep_enabled)) {
3418 /* An indirect jump so that we still trigger the debug exception. */
3419 if (s->thumb)
3420 dest |= 1;
3421 gen_bx_im(s, dest);
3422 } else {
3423 gen_goto_tb(s, 0, dest);
3424 s->is_jmp = DISAS_TB_JUMP;
3428 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3430 if (x)
3431 tcg_gen_sari_i32(t0, t0, 16);
3432 else
3433 gen_sxth(t0);
3434 if (y)
3435 tcg_gen_sari_i32(t1, t1, 16);
3436 else
3437 gen_sxth(t1);
3438 tcg_gen_mul_i32(t0, t0, t1);
3441 /* Return the mask of PSR bits set by a MSR instruction. */
3442 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3443 uint32_t mask;
3445 mask = 0;
3446 if (flags & (1 << 0))
3447 mask |= 0xff;
3448 if (flags & (1 << 1))
3449 mask |= 0xff00;
3450 if (flags & (1 << 2))
3451 mask |= 0xff0000;
3452 if (flags & (1 << 3))
3453 mask |= 0xff000000;
3455 /* Mask out undefined bits. */
3456 mask &= ~CPSR_RESERVED;
3457 if (!arm_feature(env, ARM_FEATURE_V4T))
3458 mask &= ~CPSR_T;
3459 if (!arm_feature(env, ARM_FEATURE_V5))
3460 mask &= ~CPSR_Q; /* V5TE in reality*/
3461 if (!arm_feature(env, ARM_FEATURE_V6))
3462 mask &= ~(CPSR_E | CPSR_GE);
3463 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3464 mask &= ~CPSR_IT;
3465 /* Mask out execution state bits. */
3466 if (!spsr)
3467 mask &= ~CPSR_EXEC;
3468 /* Mask out privileged bits. */
3469 if (IS_USER(s))
3470 mask &= CPSR_USER;
3471 return mask;
3474 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3475 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3477 TCGv tmp;
3478 if (spsr) {
3479 /* ??? This is also undefined in system mode. */
3480 if (IS_USER(s))
3481 return 1;
3483 tmp = load_cpu_field(spsr);
3484 tcg_gen_andi_i32(tmp, tmp, ~mask);
3485 tcg_gen_andi_i32(t0, t0, mask);
3486 tcg_gen_or_i32(tmp, tmp, t0);
3487 store_cpu_field(tmp, spsr);
3488 } else {
3489 gen_set_cpsr(t0, mask);
3491 tcg_temp_free_i32(t0);
3492 gen_lookup_tb(s);
3493 return 0;
3496 /* Returns nonzero if access to the PSR is not permitted. */
3497 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3499 TCGv tmp;
3500 tmp = tcg_temp_new_i32();
3501 tcg_gen_movi_i32(tmp, val);
3502 return gen_set_psr(s, mask, spsr, tmp);
3505 /* Generate an old-style exception return. Marks pc as dead. */
3506 static void gen_exception_return(DisasContext *s, TCGv pc)
3508 TCGv tmp;
3509 store_reg(s, 15, pc);
3510 tmp = load_cpu_field(spsr);
3511 gen_set_cpsr(tmp, 0xffffffff);
3512 tcg_temp_free_i32(tmp);
3513 s->is_jmp = DISAS_UPDATE;
3516 /* Generate a v6 exception return. Marks both values as dead. */
3517 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3519 gen_set_cpsr(cpsr, 0xffffffff);
3520 tcg_temp_free_i32(cpsr);
3521 store_reg(s, 15, pc);
3522 s->is_jmp = DISAS_UPDATE;
3525 static inline void
3526 gen_set_condexec (DisasContext *s)
3528 if (s->condexec_mask) {
3529 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3530 TCGv tmp = tcg_temp_new_i32();
3531 tcg_gen_movi_i32(tmp, val);
3532 store_cpu_field(tmp, condexec_bits);
3536 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3538 gen_set_condexec(s);
3539 gen_set_pc_im(s->pc - offset);
3540 gen_exception(excp);
3541 s->is_jmp = DISAS_JUMP;
3544 static void gen_nop_hint(DisasContext *s, int val)
3546 switch (val) {
3547 case 3: /* wfi */
3548 gen_set_pc_im(s->pc);
3549 s->is_jmp = DISAS_WFI;
3550 break;
3551 case 2: /* wfe */
3552 case 4: /* sev */
3553 /* TODO: Implement SEV and WFE. May help SMP performance. */
3554 default: /* nop */
3555 break;
3559 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3561 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3563 switch (size) {
3564 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3565 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3566 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3567 default: abort();
3571 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3573 switch (size) {
3574 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3575 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3576 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3577 default: return;
3581 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3582 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3583 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3584 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3585 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3587 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3588 switch ((size << 1) | u) { \
3589 case 0: \
3590 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3591 break; \
3592 case 1: \
3593 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3594 break; \
3595 case 2: \
3596 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3597 break; \
3598 case 3: \
3599 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3600 break; \
3601 case 4: \
3602 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3603 break; \
3604 case 5: \
3605 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3606 break; \
3607 default: return 1; \
3608 }} while (0)
3610 #define GEN_NEON_INTEGER_OP(name) do { \
3611 switch ((size << 1) | u) { \
3612 case 0: \
3613 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3614 break; \
3615 case 1: \
3616 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3617 break; \
3618 case 2: \
3619 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3620 break; \
3621 case 3: \
3622 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3623 break; \
3624 case 4: \
3625 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3626 break; \
3627 case 5: \
3628 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3629 break; \
3630 default: return 1; \
3631 }} while (0)
3633 static TCGv neon_load_scratch(int scratch)
3635 TCGv tmp = tcg_temp_new_i32();
3636 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3637 return tmp;
3640 static void neon_store_scratch(int scratch, TCGv var)
3642 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3643 tcg_temp_free_i32(var);
3646 static inline TCGv neon_get_scalar(int size, int reg)
3648 TCGv tmp;
3649 if (size == 1) {
3650 tmp = neon_load_reg(reg & 7, reg >> 4);
3651 if (reg & 8) {
3652 gen_neon_dup_high16(tmp);
3653 } else {
3654 gen_neon_dup_low16(tmp);
3656 } else {
3657 tmp = neon_load_reg(reg & 15, reg >> 4);
3659 return tmp;
3662 static int gen_neon_unzip(int rd, int rm, int size, int q)
3664 TCGv tmp, tmp2;
3665 if (size == 3 || (!q && size == 2)) {
3666 return 1;
3668 tmp = tcg_const_i32(rd);
3669 tmp2 = tcg_const_i32(rm);
3670 if (q) {
3671 switch (size) {
3672 case 0:
3673 gen_helper_neon_qunzip8(tmp, tmp2);
3674 break;
3675 case 1:
3676 gen_helper_neon_qunzip16(tmp, tmp2);
3677 break;
3678 case 2:
3679 gen_helper_neon_qunzip32(tmp, tmp2);
3680 break;
3681 default:
3682 abort();
3684 } else {
3685 switch (size) {
3686 case 0:
3687 gen_helper_neon_unzip8(tmp, tmp2);
3688 break;
3689 case 1:
3690 gen_helper_neon_unzip16(tmp, tmp2);
3691 break;
3692 default:
3693 abort();
3696 tcg_temp_free_i32(tmp);
3697 tcg_temp_free_i32(tmp2);
3698 return 0;
3701 static int gen_neon_zip(int rd, int rm, int size, int q)
3703 TCGv tmp, tmp2;
3704 if (size == 3 || (!q && size == 2)) {
3705 return 1;
3707 tmp = tcg_const_i32(rd);
3708 tmp2 = tcg_const_i32(rm);
3709 if (q) {
3710 switch (size) {
3711 case 0:
3712 gen_helper_neon_qzip8(tmp, tmp2);
3713 break;
3714 case 1:
3715 gen_helper_neon_qzip16(tmp, tmp2);
3716 break;
3717 case 2:
3718 gen_helper_neon_qzip32(tmp, tmp2);
3719 break;
3720 default:
3721 abort();
3723 } else {
3724 switch (size) {
3725 case 0:
3726 gen_helper_neon_zip8(tmp, tmp2);
3727 break;
3728 case 1:
3729 gen_helper_neon_zip16(tmp, tmp2);
3730 break;
3731 default:
3732 abort();
3735 tcg_temp_free_i32(tmp);
3736 tcg_temp_free_i32(tmp2);
3737 return 0;
3740 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3742 TCGv rd, tmp;
3744 rd = tcg_temp_new_i32();
3745 tmp = tcg_temp_new_i32();
3747 tcg_gen_shli_i32(rd, t0, 8);
3748 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3749 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3750 tcg_gen_or_i32(rd, rd, tmp);
3752 tcg_gen_shri_i32(t1, t1, 8);
3753 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3754 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3755 tcg_gen_or_i32(t1, t1, tmp);
3756 tcg_gen_mov_i32(t0, rd);
3758 tcg_temp_free_i32(tmp);
3759 tcg_temp_free_i32(rd);
3762 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3764 TCGv rd, tmp;
3766 rd = tcg_temp_new_i32();
3767 tmp = tcg_temp_new_i32();
3769 tcg_gen_shli_i32(rd, t0, 16);
3770 tcg_gen_andi_i32(tmp, t1, 0xffff);
3771 tcg_gen_or_i32(rd, rd, tmp);
3772 tcg_gen_shri_i32(t1, t1, 16);
3773 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3774 tcg_gen_or_i32(t1, t1, tmp);
3775 tcg_gen_mov_i32(t0, rd);
3777 tcg_temp_free_i32(tmp);
3778 tcg_temp_free_i32(rd);
3782 static struct {
3783 int nregs;
3784 int interleave;
3785 int spacing;
3786 } neon_ls_element_type[11] = {
3787 {4, 4, 1},
3788 {4, 4, 2},
3789 {4, 1, 1},
3790 {4, 2, 1},
3791 {3, 3, 1},
3792 {3, 3, 2},
3793 {3, 1, 1},
3794 {1, 1, 1},
3795 {2, 2, 1},
3796 {2, 2, 2},
3797 {2, 1, 1}
3800 /* Translate a NEON load/store element instruction. Return nonzero if the
3801 instruction is invalid. */
3802 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3804 int rd, rn, rm;
3805 int op;
3806 int nregs;
3807 int interleave;
3808 int spacing;
3809 int stride;
3810 int size;
3811 int reg;
3812 int pass;
3813 int load;
3814 int shift;
3815 int n;
3816 TCGv addr;
3817 TCGv tmp;
3818 TCGv tmp2;
3819 TCGv_i64 tmp64;
3821 if (!s->vfp_enabled)
3822 return 1;
3823 VFP_DREG_D(rd, insn);
3824 rn = (insn >> 16) & 0xf;
3825 rm = insn & 0xf;
3826 load = (insn & (1 << 21)) != 0;
3827 if ((insn & (1 << 23)) == 0) {
3828 /* Load store all elements. */
3829 op = (insn >> 8) & 0xf;
3830 size = (insn >> 6) & 3;
3831 if (op > 10)
3832 return 1;
3833 nregs = neon_ls_element_type[op].nregs;
3834 interleave = neon_ls_element_type[op].interleave;
3835 spacing = neon_ls_element_type[op].spacing;
3836 if (size == 3 && (interleave | spacing) != 1)
3837 return 1;
3838 addr = tcg_temp_new_i32();
3839 load_reg_var(s, addr, rn);
3840 stride = (1 << size) * interleave;
3841 for (reg = 0; reg < nregs; reg++) {
3842 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3843 load_reg_var(s, addr, rn);
3844 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3845 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3846 load_reg_var(s, addr, rn);
3847 tcg_gen_addi_i32(addr, addr, 1 << size);
3849 if (size == 3) {
3850 if (load) {
3851 tmp64 = gen_ld64(addr, IS_USER(s));
3852 neon_store_reg64(tmp64, rd);
3853 tcg_temp_free_i64(tmp64);
3854 } else {
3855 tmp64 = tcg_temp_new_i64();
3856 neon_load_reg64(tmp64, rd);
3857 gen_st64(tmp64, addr, IS_USER(s));
3859 tcg_gen_addi_i32(addr, addr, stride);
3860 } else {
3861 for (pass = 0; pass < 2; pass++) {
3862 if (size == 2) {
3863 if (load) {
3864 tmp = gen_ld32(addr, IS_USER(s));
3865 neon_store_reg(rd, pass, tmp);
3866 } else {
3867 tmp = neon_load_reg(rd, pass);
3868 gen_st32(tmp, addr, IS_USER(s));
3870 tcg_gen_addi_i32(addr, addr, stride);
3871 } else if (size == 1) {
3872 if (load) {
3873 tmp = gen_ld16u(addr, IS_USER(s));
3874 tcg_gen_addi_i32(addr, addr, stride);
3875 tmp2 = gen_ld16u(addr, IS_USER(s));
3876 tcg_gen_addi_i32(addr, addr, stride);
3877 tcg_gen_shli_i32(tmp2, tmp2, 16);
3878 tcg_gen_or_i32(tmp, tmp, tmp2);
3879 tcg_temp_free_i32(tmp2);
3880 neon_store_reg(rd, pass, tmp);
3881 } else {
3882 tmp = neon_load_reg(rd, pass);
3883 tmp2 = tcg_temp_new_i32();
3884 tcg_gen_shri_i32(tmp2, tmp, 16);
3885 gen_st16(tmp, addr, IS_USER(s));
3886 tcg_gen_addi_i32(addr, addr, stride);
3887 gen_st16(tmp2, addr, IS_USER(s));
3888 tcg_gen_addi_i32(addr, addr, stride);
3890 } else /* size == 0 */ {
3891 if (load) {
3892 TCGV_UNUSED(tmp2);
3893 for (n = 0; n < 4; n++) {
3894 tmp = gen_ld8u(addr, IS_USER(s));
3895 tcg_gen_addi_i32(addr, addr, stride);
3896 if (n == 0) {
3897 tmp2 = tmp;
3898 } else {
3899 tcg_gen_shli_i32(tmp, tmp, n * 8);
3900 tcg_gen_or_i32(tmp2, tmp2, tmp);
3901 tcg_temp_free_i32(tmp);
3904 neon_store_reg(rd, pass, tmp2);
3905 } else {
3906 tmp2 = neon_load_reg(rd, pass);
3907 for (n = 0; n < 4; n++) {
3908 tmp = tcg_temp_new_i32();
3909 if (n == 0) {
3910 tcg_gen_mov_i32(tmp, tmp2);
3911 } else {
3912 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3914 gen_st8(tmp, addr, IS_USER(s));
3915 tcg_gen_addi_i32(addr, addr, stride);
3917 tcg_temp_free_i32(tmp2);
3922 rd += spacing;
3924 tcg_temp_free_i32(addr);
3925 stride = nregs * 8;
3926 } else {
3927 size = (insn >> 10) & 3;
3928 if (size == 3) {
3929 /* Load single element to all lanes. */
3930 int a = (insn >> 4) & 1;
3931 if (!load) {
3932 return 1;
3934 size = (insn >> 6) & 3;
3935 nregs = ((insn >> 8) & 3) + 1;
3937 if (size == 3) {
3938 if (nregs != 4 || a == 0) {
3939 return 1;
3941 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3942 size = 2;
3944 if (nregs == 1 && a == 1 && size == 0) {
3945 return 1;
3947 if (nregs == 3 && a == 1) {
3948 return 1;
3950 addr = tcg_temp_new_i32();
3951 load_reg_var(s, addr, rn);
3952 if (nregs == 1) {
3953 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3954 tmp = gen_load_and_replicate(s, addr, size);
3955 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3956 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3957 if (insn & (1 << 5)) {
3958 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3959 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3961 tcg_temp_free_i32(tmp);
3962 } else {
3963 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3964 stride = (insn & (1 << 5)) ? 2 : 1;
3965 for (reg = 0; reg < nregs; reg++) {
3966 tmp = gen_load_and_replicate(s, addr, size);
3967 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3968 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3969 tcg_temp_free_i32(tmp);
3970 tcg_gen_addi_i32(addr, addr, 1 << size);
3971 rd += stride;
3974 tcg_temp_free_i32(addr);
3975 stride = (1 << size) * nregs;
3976 } else {
3977 /* Single element. */
3978 pass = (insn >> 7) & 1;
3979 switch (size) {
3980 case 0:
3981 shift = ((insn >> 5) & 3) * 8;
3982 stride = 1;
3983 break;
3984 case 1:
3985 shift = ((insn >> 6) & 1) * 16;
3986 stride = (insn & (1 << 5)) ? 2 : 1;
3987 break;
3988 case 2:
3989 shift = 0;
3990 stride = (insn & (1 << 6)) ? 2 : 1;
3991 break;
3992 default:
3993 abort();
3995 nregs = ((insn >> 8) & 3) + 1;
3996 addr = tcg_temp_new_i32();
3997 load_reg_var(s, addr, rn);
3998 for (reg = 0; reg < nregs; reg++) {
3999 if (load) {
4000 switch (size) {
4001 case 0:
4002 tmp = gen_ld8u(addr, IS_USER(s));
4003 break;
4004 case 1:
4005 tmp = gen_ld16u(addr, IS_USER(s));
4006 break;
4007 case 2:
4008 tmp = gen_ld32(addr, IS_USER(s));
4009 break;
4010 default: /* Avoid compiler warnings. */
4011 abort();
4013 if (size != 2) {
4014 tmp2 = neon_load_reg(rd, pass);
4015 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4016 tcg_temp_free_i32(tmp2);
4018 neon_store_reg(rd, pass, tmp);
4019 } else { /* Store */
4020 tmp = neon_load_reg(rd, pass);
4021 if (shift)
4022 tcg_gen_shri_i32(tmp, tmp, shift);
4023 switch (size) {
4024 case 0:
4025 gen_st8(tmp, addr, IS_USER(s));
4026 break;
4027 case 1:
4028 gen_st16(tmp, addr, IS_USER(s));
4029 break;
4030 case 2:
4031 gen_st32(tmp, addr, IS_USER(s));
4032 break;
4035 rd += stride;
4036 tcg_gen_addi_i32(addr, addr, 1 << size);
4038 tcg_temp_free_i32(addr);
4039 stride = nregs * (1 << size);
4042 if (rm != 15) {
4043 TCGv base;
4045 base = load_reg(s, rn);
4046 if (rm == 13) {
4047 tcg_gen_addi_i32(base, base, stride);
4048 } else {
4049 TCGv index;
4050 index = load_reg(s, rm);
4051 tcg_gen_add_i32(base, base, index);
4052 tcg_temp_free_i32(index);
4054 store_reg(s, rn, base);
4056 return 0;
4059 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4060 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4062 tcg_gen_and_i32(t, t, c);
4063 tcg_gen_andc_i32(f, f, c);
4064 tcg_gen_or_i32(dest, t, f);
4067 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4069 switch (size) {
4070 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4071 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4072 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4073 default: abort();
4077 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4079 switch (size) {
4080 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4081 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4082 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
4083 default: abort();
4087 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4089 switch (size) {
4090 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4091 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4092 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
4093 default: abort();
4097 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4099 switch (size) {
4100 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4101 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4102 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
4103 default: abort();
4107 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4108 int q, int u)
4110 if (q) {
4111 if (u) {
4112 switch (size) {
4113 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4114 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4115 default: abort();
4117 } else {
4118 switch (size) {
4119 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4120 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4121 default: abort();
4124 } else {
4125 if (u) {
4126 switch (size) {
4127 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4128 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4129 default: abort();
4131 } else {
4132 switch (size) {
4133 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4134 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4135 default: abort();
4141 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4143 if (u) {
4144 switch (size) {
4145 case 0: gen_helper_neon_widen_u8(dest, src); break;
4146 case 1: gen_helper_neon_widen_u16(dest, src); break;
4147 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4148 default: abort();
4150 } else {
4151 switch (size) {
4152 case 0: gen_helper_neon_widen_s8(dest, src); break;
4153 case 1: gen_helper_neon_widen_s16(dest, src); break;
4154 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4155 default: abort();
4158 tcg_temp_free_i32(src);
4161 static inline void gen_neon_addl(int size)
4163 switch (size) {
4164 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4165 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4166 case 2: tcg_gen_add_i64(CPU_V001); break;
4167 default: abort();
4171 static inline void gen_neon_subl(int size)
4173 switch (size) {
4174 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4175 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4176 case 2: tcg_gen_sub_i64(CPU_V001); break;
4177 default: abort();
4181 static inline void gen_neon_negl(TCGv_i64 var, int size)
4183 switch (size) {
4184 case 0: gen_helper_neon_negl_u16(var, var); break;
4185 case 1: gen_helper_neon_negl_u32(var, var); break;
4186 case 2: gen_helper_neon_negl_u64(var, var); break;
4187 default: abort();
4191 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4193 switch (size) {
4194 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4195 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
4196 default: abort();
4200 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4202 TCGv_i64 tmp;
4204 switch ((size << 1) | u) {
4205 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4206 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4207 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4208 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4209 case 4:
4210 tmp = gen_muls_i64_i32(a, b);
4211 tcg_gen_mov_i64(dest, tmp);
4212 tcg_temp_free_i64(tmp);
4213 break;
4214 case 5:
4215 tmp = gen_mulu_i64_i32(a, b);
4216 tcg_gen_mov_i64(dest, tmp);
4217 tcg_temp_free_i64(tmp);
4218 break;
4219 default: abort();
4222 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4223 Don't forget to clean them now. */
4224 if (size < 2) {
4225 tcg_temp_free_i32(a);
4226 tcg_temp_free_i32(b);
4230 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4232 if (op) {
4233 if (u) {
4234 gen_neon_unarrow_sats(size, dest, src);
4235 } else {
4236 gen_neon_narrow(size, dest, src);
4238 } else {
4239 if (u) {
4240 gen_neon_narrow_satu(size, dest, src);
4241 } else {
4242 gen_neon_narrow_sats(size, dest, src);
4247 /* Symbolic constants for op fields for Neon 3-register same-length.
4248 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4249 * table A7-9.
4251 #define NEON_3R_VHADD 0
4252 #define NEON_3R_VQADD 1
4253 #define NEON_3R_VRHADD 2
4254 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4255 #define NEON_3R_VHSUB 4
4256 #define NEON_3R_VQSUB 5
4257 #define NEON_3R_VCGT 6
4258 #define NEON_3R_VCGE 7
4259 #define NEON_3R_VSHL 8
4260 #define NEON_3R_VQSHL 9
4261 #define NEON_3R_VRSHL 10
4262 #define NEON_3R_VQRSHL 11
4263 #define NEON_3R_VMAX 12
4264 #define NEON_3R_VMIN 13
4265 #define NEON_3R_VABD 14
4266 #define NEON_3R_VABA 15
4267 #define NEON_3R_VADD_VSUB 16
4268 #define NEON_3R_VTST_VCEQ 17
4269 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4270 #define NEON_3R_VMUL 19
4271 #define NEON_3R_VPMAX 20
4272 #define NEON_3R_VPMIN 21
4273 #define NEON_3R_VQDMULH_VQRDMULH 22
4274 #define NEON_3R_VPADD 23
4275 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4276 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4277 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4278 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4279 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4280 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4282 static const uint8_t neon_3r_sizes[] = {
4283 [NEON_3R_VHADD] = 0x7,
4284 [NEON_3R_VQADD] = 0xf,
4285 [NEON_3R_VRHADD] = 0x7,
4286 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4287 [NEON_3R_VHSUB] = 0x7,
4288 [NEON_3R_VQSUB] = 0xf,
4289 [NEON_3R_VCGT] = 0x7,
4290 [NEON_3R_VCGE] = 0x7,
4291 [NEON_3R_VSHL] = 0xf,
4292 [NEON_3R_VQSHL] = 0xf,
4293 [NEON_3R_VRSHL] = 0xf,
4294 [NEON_3R_VQRSHL] = 0xf,
4295 [NEON_3R_VMAX] = 0x7,
4296 [NEON_3R_VMIN] = 0x7,
4297 [NEON_3R_VABD] = 0x7,
4298 [NEON_3R_VABA] = 0x7,
4299 [NEON_3R_VADD_VSUB] = 0xf,
4300 [NEON_3R_VTST_VCEQ] = 0x7,
4301 [NEON_3R_VML] = 0x7,
4302 [NEON_3R_VMUL] = 0x7,
4303 [NEON_3R_VPMAX] = 0x7,
4304 [NEON_3R_VPMIN] = 0x7,
4305 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4306 [NEON_3R_VPADD] = 0x7,
4307 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4308 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4309 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4310 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4311 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4312 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4315 /* Translate a NEON data processing instruction. Return nonzero if the
4316 instruction is invalid.
4317 We process data in a mixture of 32-bit and 64-bit chunks.
4318 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4320 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4322 int op;
4323 int q;
4324 int rd, rn, rm;
4325 int size;
4326 int shift;
4327 int pass;
4328 int count;
4329 int pairwise;
4330 int u;
4331 int n;
4332 uint32_t imm, mask;
4333 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4334 TCGv_i64 tmp64;
4336 if (!s->vfp_enabled)
4337 return 1;
4338 q = (insn & (1 << 6)) != 0;
4339 u = (insn >> 24) & 1;
4340 VFP_DREG_D(rd, insn);
4341 VFP_DREG_N(rn, insn);
4342 VFP_DREG_M(rm, insn);
4343 size = (insn >> 20) & 3;
4344 if ((insn & (1 << 23)) == 0) {
4345 /* Three register same length. */
4346 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4347 /* Catch invalid op and bad size combinations: UNDEF */
4348 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4349 return 1;
4351 /* All insns of this form UNDEF for either this condition or the
4352 * superset of cases "Q==1"; we catch the latter later.
4354 if (q && ((rd | rn | rm) & 1)) {
4355 return 1;
4357 if (size == 3 && op != NEON_3R_LOGIC) {
4358 /* 64-bit element instructions. */
4359 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4360 neon_load_reg64(cpu_V0, rn + pass);
4361 neon_load_reg64(cpu_V1, rm + pass);
4362 switch (op) {
4363 case NEON_3R_VQADD:
4364 if (u) {
4365 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
4366 } else {
4367 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
4369 break;
4370 case NEON_3R_VQSUB:
4371 if (u) {
4372 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
4373 } else {
4374 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
4376 break;
4377 case NEON_3R_VSHL:
4378 if (u) {
4379 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4380 } else {
4381 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4383 break;
4384 case NEON_3R_VQSHL:
4385 if (u) {
4386 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
4387 } else {
4388 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
4390 break;
4391 case NEON_3R_VRSHL:
4392 if (u) {
4393 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4394 } else {
4395 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4397 break;
4398 case NEON_3R_VQRSHL:
4399 if (u) {
4400 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
4401 } else {
4402 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
4404 break;
4405 case NEON_3R_VADD_VSUB:
4406 if (u) {
4407 tcg_gen_sub_i64(CPU_V001);
4408 } else {
4409 tcg_gen_add_i64(CPU_V001);
4411 break;
4412 default:
4413 abort();
4415 neon_store_reg64(cpu_V0, rd + pass);
4417 return 0;
4419 pairwise = 0;
4420 switch (op) {
4421 case NEON_3R_VSHL:
4422 case NEON_3R_VQSHL:
4423 case NEON_3R_VRSHL:
4424 case NEON_3R_VQRSHL:
4426 int rtmp;
4427 /* Shift instruction operands are reversed. */
4428 rtmp = rn;
4429 rn = rm;
4430 rm = rtmp;
4432 break;
4433 case NEON_3R_VPADD:
4434 if (u) {
4435 return 1;
4437 /* Fall through */
4438 case NEON_3R_VPMAX:
4439 case NEON_3R_VPMIN:
4440 pairwise = 1;
4441 break;
4442 case NEON_3R_FLOAT_ARITH:
4443 pairwise = (u && size < 2); /* if VPADD (float) */
4444 break;
4445 case NEON_3R_FLOAT_MINMAX:
4446 pairwise = u; /* if VPMIN/VPMAX (float) */
4447 break;
4448 case NEON_3R_FLOAT_CMP:
4449 if (!u && size) {
4450 /* no encoding for U=0 C=1x */
4451 return 1;
4453 break;
4454 case NEON_3R_FLOAT_ACMP:
4455 if (!u) {
4456 return 1;
4458 break;
4459 case NEON_3R_VRECPS_VRSQRTS:
4460 if (u) {
4461 return 1;
4463 break;
4464 case NEON_3R_VMUL:
4465 if (u && (size != 0)) {
4466 /* UNDEF on invalid size for polynomial subcase */
4467 return 1;
4469 break;
4470 default:
4471 break;
4474 if (pairwise && q) {
4475 /* All the pairwise insns UNDEF if Q is set */
4476 return 1;
4479 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4481 if (pairwise) {
4482 /* Pairwise. */
4483 if (q)
4484 n = (pass & 1) * 2;
4485 else
4486 n = 0;
4487 if (pass < q + 1) {
4488 tmp = neon_load_reg(rn, n);
4489 tmp2 = neon_load_reg(rn, n + 1);
4490 } else {
4491 tmp = neon_load_reg(rm, n);
4492 tmp2 = neon_load_reg(rm, n + 1);
4494 } else {
4495 /* Elementwise. */
4496 tmp = neon_load_reg(rn, pass);
4497 tmp2 = neon_load_reg(rm, pass);
4499 switch (op) {
4500 case NEON_3R_VHADD:
4501 GEN_NEON_INTEGER_OP(hadd);
4502 break;
4503 case NEON_3R_VQADD:
4504 GEN_NEON_INTEGER_OP(qadd);
4505 break;
4506 case NEON_3R_VRHADD:
4507 GEN_NEON_INTEGER_OP(rhadd);
4508 break;
4509 case NEON_3R_LOGIC: /* Logic ops. */
4510 switch ((u << 2) | size) {
4511 case 0: /* VAND */
4512 tcg_gen_and_i32(tmp, tmp, tmp2);
4513 break;
4514 case 1: /* BIC */
4515 tcg_gen_andc_i32(tmp, tmp, tmp2);
4516 break;
4517 case 2: /* VORR */
4518 tcg_gen_or_i32(tmp, tmp, tmp2);
4519 break;
4520 case 3: /* VORN */
4521 tcg_gen_orc_i32(tmp, tmp, tmp2);
4522 break;
4523 case 4: /* VEOR */
4524 tcg_gen_xor_i32(tmp, tmp, tmp2);
4525 break;
4526 case 5: /* VBSL */
4527 tmp3 = neon_load_reg(rd, pass);
4528 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4529 tcg_temp_free_i32(tmp3);
4530 break;
4531 case 6: /* VBIT */
4532 tmp3 = neon_load_reg(rd, pass);
4533 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4534 tcg_temp_free_i32(tmp3);
4535 break;
4536 case 7: /* VBIF */
4537 tmp3 = neon_load_reg(rd, pass);
4538 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4539 tcg_temp_free_i32(tmp3);
4540 break;
4542 break;
4543 case NEON_3R_VHSUB:
4544 GEN_NEON_INTEGER_OP(hsub);
4545 break;
4546 case NEON_3R_VQSUB:
4547 GEN_NEON_INTEGER_OP(qsub);
4548 break;
4549 case NEON_3R_VCGT:
4550 GEN_NEON_INTEGER_OP(cgt);
4551 break;
4552 case NEON_3R_VCGE:
4553 GEN_NEON_INTEGER_OP(cge);
4554 break;
4555 case NEON_3R_VSHL:
4556 GEN_NEON_INTEGER_OP(shl);
4557 break;
4558 case NEON_3R_VQSHL:
4559 GEN_NEON_INTEGER_OP(qshl);
4560 break;
4561 case NEON_3R_VRSHL:
4562 GEN_NEON_INTEGER_OP(rshl);
4563 break;
4564 case NEON_3R_VQRSHL:
4565 GEN_NEON_INTEGER_OP(qrshl);
4566 break;
4567 case NEON_3R_VMAX:
4568 GEN_NEON_INTEGER_OP(max);
4569 break;
4570 case NEON_3R_VMIN:
4571 GEN_NEON_INTEGER_OP(min);
4572 break;
4573 case NEON_3R_VABD:
4574 GEN_NEON_INTEGER_OP(abd);
4575 break;
4576 case NEON_3R_VABA:
4577 GEN_NEON_INTEGER_OP(abd);
4578 tcg_temp_free_i32(tmp2);
4579 tmp2 = neon_load_reg(rd, pass);
4580 gen_neon_add(size, tmp, tmp2);
4581 break;
4582 case NEON_3R_VADD_VSUB:
4583 if (!u) { /* VADD */
4584 gen_neon_add(size, tmp, tmp2);
4585 } else { /* VSUB */
4586 switch (size) {
4587 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4588 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4589 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4590 default: abort();
4593 break;
4594 case NEON_3R_VTST_VCEQ:
4595 if (!u) { /* VTST */
4596 switch (size) {
4597 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4598 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4599 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4600 default: abort();
4602 } else { /* VCEQ */
4603 switch (size) {
4604 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4605 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4606 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4607 default: abort();
4610 break;
4611 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4612 switch (size) {
4613 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4614 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4615 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4616 default: abort();
4618 tcg_temp_free_i32(tmp2);
4619 tmp2 = neon_load_reg(rd, pass);
4620 if (u) { /* VMLS */
4621 gen_neon_rsb(size, tmp, tmp2);
4622 } else { /* VMLA */
4623 gen_neon_add(size, tmp, tmp2);
4625 break;
4626 case NEON_3R_VMUL:
4627 if (u) { /* polynomial */
4628 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4629 } else { /* Integer */
4630 switch (size) {
4631 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4632 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4633 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4634 default: abort();
4637 break;
4638 case NEON_3R_VPMAX:
4639 GEN_NEON_INTEGER_OP(pmax);
4640 break;
4641 case NEON_3R_VPMIN:
4642 GEN_NEON_INTEGER_OP(pmin);
4643 break;
4644 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4645 if (!u) { /* VQDMULH */
4646 switch (size) {
4647 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4648 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
4649 default: abort();
4651 } else { /* VQRDMULH */
4652 switch (size) {
4653 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4654 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
4655 default: abort();
4658 break;
4659 case NEON_3R_VPADD:
4660 switch (size) {
4661 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4662 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4663 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4664 default: abort();
4666 break;
4667 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4668 switch ((u << 2) | size) {
4669 case 0: /* VADD */
4670 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4671 break;
4672 case 2: /* VSUB */
4673 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4674 break;
4675 case 4: /* VPADD */
4676 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4677 break;
4678 case 6: /* VABD */
4679 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4680 break;
4681 default:
4682 abort();
4684 break;
4685 case NEON_3R_FLOAT_MULTIPLY:
4686 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4687 if (!u) {
4688 tcg_temp_free_i32(tmp2);
4689 tmp2 = neon_load_reg(rd, pass);
4690 if (size == 0) {
4691 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4692 } else {
4693 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4696 break;
4697 case NEON_3R_FLOAT_CMP:
4698 if (!u) {
4699 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4700 } else {
4701 if (size == 0)
4702 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4703 else
4704 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4706 break;
4707 case NEON_3R_FLOAT_ACMP:
4708 if (size == 0)
4709 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4710 else
4711 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4712 break;
4713 case NEON_3R_FLOAT_MINMAX:
4714 if (size == 0)
4715 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4716 else
4717 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4718 break;
4719 case NEON_3R_VRECPS_VRSQRTS:
4720 if (size == 0)
4721 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4722 else
4723 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4724 break;
4725 default:
4726 abort();
4728 tcg_temp_free_i32(tmp2);
4730 /* Save the result. For elementwise operations we can put it
4731 straight into the destination register. For pairwise operations
4732 we have to be careful to avoid clobbering the source operands. */
4733 if (pairwise && rd == rm) {
4734 neon_store_scratch(pass, tmp);
4735 } else {
4736 neon_store_reg(rd, pass, tmp);
4739 } /* for pass */
4740 if (pairwise && rd == rm) {
4741 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4742 tmp = neon_load_scratch(pass);
4743 neon_store_reg(rd, pass, tmp);
4746 /* End of 3 register same size operations. */
4747 } else if (insn & (1 << 4)) {
4748 if ((insn & 0x00380080) != 0) {
4749 /* Two registers and shift. */
4750 op = (insn >> 8) & 0xf;
4751 if (insn & (1 << 7)) {
4752 /* 64-bit shift. */
4753 size = 3;
4754 } else {
4755 size = 2;
4756 while ((insn & (1 << (size + 19))) == 0)
4757 size--;
4759 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4760 /* To avoid excessive dumplication of ops we implement shift
4761 by immediate using the variable shift operations. */
4762 if (op < 8) {
4763 /* Shift by immediate:
4764 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4765 /* Right shifts are encoded as N - shift, where N is the
4766 element size in bits. */
4767 if (op <= 4)
4768 shift = shift - (1 << (size + 3));
4769 if (size == 3) {
4770 count = q + 1;
4771 } else {
4772 count = q ? 4: 2;
4774 switch (size) {
4775 case 0:
4776 imm = (uint8_t) shift;
4777 imm |= imm << 8;
4778 imm |= imm << 16;
4779 break;
4780 case 1:
4781 imm = (uint16_t) shift;
4782 imm |= imm << 16;
4783 break;
4784 case 2:
4785 case 3:
4786 imm = shift;
4787 break;
4788 default:
4789 abort();
4792 for (pass = 0; pass < count; pass++) {
4793 if (size == 3) {
4794 neon_load_reg64(cpu_V0, rm + pass);
4795 tcg_gen_movi_i64(cpu_V1, imm);
4796 switch (op) {
4797 case 0: /* VSHR */
4798 case 1: /* VSRA */
4799 if (u)
4800 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4801 else
4802 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4803 break;
4804 case 2: /* VRSHR */
4805 case 3: /* VRSRA */
4806 if (u)
4807 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4808 else
4809 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4810 break;
4811 case 4: /* VSRI */
4812 if (!u)
4813 return 1;
4814 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4815 break;
4816 case 5: /* VSHL, VSLI */
4817 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4818 break;
4819 case 6: /* VQSHLU */
4820 if (u) {
4821 gen_helper_neon_qshlu_s64(cpu_V0,
4822 cpu_V0, cpu_V1);
4823 } else {
4824 return 1;
4826 break;
4827 case 7: /* VQSHL */
4828 if (u) {
4829 gen_helper_neon_qshl_u64(cpu_V0,
4830 cpu_V0, cpu_V1);
4831 } else {
4832 gen_helper_neon_qshl_s64(cpu_V0,
4833 cpu_V0, cpu_V1);
4835 break;
4837 if (op == 1 || op == 3) {
4838 /* Accumulate. */
4839 neon_load_reg64(cpu_V1, rd + pass);
4840 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4841 } else if (op == 4 || (op == 5 && u)) {
4842 /* Insert */
4843 neon_load_reg64(cpu_V1, rd + pass);
4844 uint64_t mask;
4845 if (shift < -63 || shift > 63) {
4846 mask = 0;
4847 } else {
4848 if (op == 4) {
4849 mask = 0xffffffffffffffffull >> -shift;
4850 } else {
4851 mask = 0xffffffffffffffffull << shift;
4854 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4855 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
4857 neon_store_reg64(cpu_V0, rd + pass);
4858 } else { /* size < 3 */
4859 /* Operands in T0 and T1. */
4860 tmp = neon_load_reg(rm, pass);
4861 tmp2 = tcg_temp_new_i32();
4862 tcg_gen_movi_i32(tmp2, imm);
4863 switch (op) {
4864 case 0: /* VSHR */
4865 case 1: /* VSRA */
4866 GEN_NEON_INTEGER_OP(shl);
4867 break;
4868 case 2: /* VRSHR */
4869 case 3: /* VRSRA */
4870 GEN_NEON_INTEGER_OP(rshl);
4871 break;
4872 case 4: /* VSRI */
4873 if (!u)
4874 return 1;
4875 GEN_NEON_INTEGER_OP(shl);
4876 break;
4877 case 5: /* VSHL, VSLI */
4878 switch (size) {
4879 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4880 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4881 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4882 default: return 1;
4884 break;
4885 case 6: /* VQSHLU */
4886 if (!u) {
4887 return 1;
4889 switch (size) {
4890 case 0:
4891 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
4892 break;
4893 case 1:
4894 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
4895 break;
4896 case 2:
4897 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
4898 break;
4899 default:
4900 return 1;
4902 break;
4903 case 7: /* VQSHL */
4904 GEN_NEON_INTEGER_OP(qshl);
4905 break;
4907 tcg_temp_free_i32(tmp2);
4909 if (op == 1 || op == 3) {
4910 /* Accumulate. */
4911 tmp2 = neon_load_reg(rd, pass);
4912 gen_neon_add(size, tmp, tmp2);
4913 tcg_temp_free_i32(tmp2);
4914 } else if (op == 4 || (op == 5 && u)) {
4915 /* Insert */
4916 switch (size) {
4917 case 0:
4918 if (op == 4)
4919 mask = 0xff >> -shift;
4920 else
4921 mask = (uint8_t)(0xff << shift);
4922 mask |= mask << 8;
4923 mask |= mask << 16;
4924 break;
4925 case 1:
4926 if (op == 4)
4927 mask = 0xffff >> -shift;
4928 else
4929 mask = (uint16_t)(0xffff << shift);
4930 mask |= mask << 16;
4931 break;
4932 case 2:
4933 if (shift < -31 || shift > 31) {
4934 mask = 0;
4935 } else {
4936 if (op == 4)
4937 mask = 0xffffffffu >> -shift;
4938 else
4939 mask = 0xffffffffu << shift;
4941 break;
4942 default:
4943 abort();
4945 tmp2 = neon_load_reg(rd, pass);
4946 tcg_gen_andi_i32(tmp, tmp, mask);
4947 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4948 tcg_gen_or_i32(tmp, tmp, tmp2);
4949 tcg_temp_free_i32(tmp2);
4951 neon_store_reg(rd, pass, tmp);
4953 } /* for pass */
4954 } else if (op < 10) {
4955 /* Shift by immediate and narrow:
4956 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4957 int input_unsigned = (op == 8) ? !u : u;
4959 shift = shift - (1 << (size + 3));
4960 size++;
4961 if (size == 3) {
4962 tmp64 = tcg_const_i64(shift);
4963 neon_load_reg64(cpu_V0, rm);
4964 neon_load_reg64(cpu_V1, rm + 1);
4965 for (pass = 0; pass < 2; pass++) {
4966 TCGv_i64 in;
4967 if (pass == 0) {
4968 in = cpu_V0;
4969 } else {
4970 in = cpu_V1;
4972 if (q) {
4973 if (input_unsigned) {
4974 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
4975 } else {
4976 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
4978 } else {
4979 if (input_unsigned) {
4980 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
4981 } else {
4982 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
4985 tmp = tcg_temp_new_i32();
4986 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4987 neon_store_reg(rd, pass, tmp);
4988 } /* for pass */
4989 tcg_temp_free_i64(tmp64);
4990 } else {
4991 if (size == 1) {
4992 imm = (uint16_t)shift;
4993 imm |= imm << 16;
4994 } else {
4995 /* size == 2 */
4996 imm = (uint32_t)shift;
4998 tmp2 = tcg_const_i32(imm);
4999 tmp4 = neon_load_reg(rm + 1, 0);
5000 tmp5 = neon_load_reg(rm + 1, 1);
5001 for (pass = 0; pass < 2; pass++) {
5002 if (pass == 0) {
5003 tmp = neon_load_reg(rm, 0);
5004 } else {
5005 tmp = tmp4;
5007 gen_neon_shift_narrow(size, tmp, tmp2, q,
5008 input_unsigned);
5009 if (pass == 0) {
5010 tmp3 = neon_load_reg(rm, 1);
5011 } else {
5012 tmp3 = tmp5;
5014 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5015 input_unsigned);
5016 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5017 tcg_temp_free_i32(tmp);
5018 tcg_temp_free_i32(tmp3);
5019 tmp = tcg_temp_new_i32();
5020 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5021 neon_store_reg(rd, pass, tmp);
5022 } /* for pass */
5023 tcg_temp_free_i32(tmp2);
5025 } else if (op == 10) {
5026 /* VSHLL */
5027 if (q || size == 3)
5028 return 1;
5029 tmp = neon_load_reg(rm, 0);
5030 tmp2 = neon_load_reg(rm, 1);
5031 for (pass = 0; pass < 2; pass++) {
5032 if (pass == 1)
5033 tmp = tmp2;
5035 gen_neon_widen(cpu_V0, tmp, size, u);
5037 if (shift != 0) {
5038 /* The shift is less than the width of the source
5039 type, so we can just shift the whole register. */
5040 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5041 /* Widen the result of shift: we need to clear
5042 * the potential overflow bits resulting from
5043 * left bits of the narrow input appearing as
5044 * right bits of left the neighbour narrow
5045 * input. */
5046 if (size < 2 || !u) {
5047 uint64_t imm64;
5048 if (size == 0) {
5049 imm = (0xffu >> (8 - shift));
5050 imm |= imm << 16;
5051 } else if (size == 1) {
5052 imm = 0xffff >> (16 - shift);
5053 } else {
5054 /* size == 2 */
5055 imm = 0xffffffff >> (32 - shift);
5057 if (size < 2) {
5058 imm64 = imm | (((uint64_t)imm) << 32);
5059 } else {
5060 imm64 = imm;
5062 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5065 neon_store_reg64(cpu_V0, rd + pass);
5067 } else if (op >= 14) {
5068 /* VCVT fixed-point. */
5069 /* We have already masked out the must-be-1 top bit of imm6,
5070 * hence this 32-shift where the ARM ARM has 64-imm6.
5072 shift = 32 - shift;
5073 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5074 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5075 if (!(op & 1)) {
5076 if (u)
5077 gen_vfp_ulto(0, shift);
5078 else
5079 gen_vfp_slto(0, shift);
5080 } else {
5081 if (u)
5082 gen_vfp_toul(0, shift);
5083 else
5084 gen_vfp_tosl(0, shift);
5086 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5088 } else {
5089 return 1;
5091 } else { /* (insn & 0x00380080) == 0 */
5092 int invert;
5094 op = (insn >> 8) & 0xf;
5095 /* One register and immediate. */
5096 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5097 invert = (insn & (1 << 5)) != 0;
5098 switch (op) {
5099 case 0: case 1:
5100 /* no-op */
5101 break;
5102 case 2: case 3:
5103 imm <<= 8;
5104 break;
5105 case 4: case 5:
5106 imm <<= 16;
5107 break;
5108 case 6: case 7:
5109 imm <<= 24;
5110 break;
5111 case 8: case 9:
5112 imm |= imm << 16;
5113 break;
5114 case 10: case 11:
5115 imm = (imm << 8) | (imm << 24);
5116 break;
5117 case 12:
5118 imm = (imm << 8) | 0xff;
5119 break;
5120 case 13:
5121 imm = (imm << 16) | 0xffff;
5122 break;
5123 case 14:
5124 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5125 if (invert)
5126 imm = ~imm;
5127 break;
5128 case 15:
5129 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5130 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5131 break;
5133 if (invert)
5134 imm = ~imm;
5136 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5137 if (op & 1 && op < 12) {
5138 tmp = neon_load_reg(rd, pass);
5139 if (invert) {
5140 /* The immediate value has already been inverted, so
5141 BIC becomes AND. */
5142 tcg_gen_andi_i32(tmp, tmp, imm);
5143 } else {
5144 tcg_gen_ori_i32(tmp, tmp, imm);
5146 } else {
5147 /* VMOV, VMVN. */
5148 tmp = tcg_temp_new_i32();
5149 if (op == 14 && invert) {
5150 uint32_t val;
5151 val = 0;
5152 for (n = 0; n < 4; n++) {
5153 if (imm & (1 << (n + (pass & 1) * 4)))
5154 val |= 0xff << (n * 8);
5156 tcg_gen_movi_i32(tmp, val);
5157 } else {
5158 tcg_gen_movi_i32(tmp, imm);
5161 neon_store_reg(rd, pass, tmp);
5164 } else { /* (insn & 0x00800010 == 0x00800000) */
5165 if (size != 3) {
5166 op = (insn >> 8) & 0xf;
5167 if ((insn & (1 << 6)) == 0) {
5168 /* Three registers of different lengths. */
5169 int src1_wide;
5170 int src2_wide;
5171 int prewiden;
5172 /* prewiden, src1_wide, src2_wide */
5173 static const int neon_3reg_wide[16][3] = {
5174 {1, 0, 0}, /* VADDL */
5175 {1, 1, 0}, /* VADDW */
5176 {1, 0, 0}, /* VSUBL */
5177 {1, 1, 0}, /* VSUBW */
5178 {0, 1, 1}, /* VADDHN */
5179 {0, 0, 0}, /* VABAL */
5180 {0, 1, 1}, /* VSUBHN */
5181 {0, 0, 0}, /* VABDL */
5182 {0, 0, 0}, /* VMLAL */
5183 {0, 0, 0}, /* VQDMLAL */
5184 {0, 0, 0}, /* VMLSL */
5185 {0, 0, 0}, /* VQDMLSL */
5186 {0, 0, 0}, /* Integer VMULL */
5187 {0, 0, 0}, /* VQDMULL */
5188 {0, 0, 0} /* Polynomial VMULL */
5191 prewiden = neon_3reg_wide[op][0];
5192 src1_wide = neon_3reg_wide[op][1];
5193 src2_wide = neon_3reg_wide[op][2];
5195 if (size == 0 && (op == 9 || op == 11 || op == 13))
5196 return 1;
5198 /* Avoid overlapping operands. Wide source operands are
5199 always aligned so will never overlap with wide
5200 destinations in problematic ways. */
5201 if (rd == rm && !src2_wide) {
5202 tmp = neon_load_reg(rm, 1);
5203 neon_store_scratch(2, tmp);
5204 } else if (rd == rn && !src1_wide) {
5205 tmp = neon_load_reg(rn, 1);
5206 neon_store_scratch(2, tmp);
5208 TCGV_UNUSED(tmp3);
5209 for (pass = 0; pass < 2; pass++) {
5210 if (src1_wide) {
5211 neon_load_reg64(cpu_V0, rn + pass);
5212 TCGV_UNUSED(tmp);
5213 } else {
5214 if (pass == 1 && rd == rn) {
5215 tmp = neon_load_scratch(2);
5216 } else {
5217 tmp = neon_load_reg(rn, pass);
5219 if (prewiden) {
5220 gen_neon_widen(cpu_V0, tmp, size, u);
5223 if (src2_wide) {
5224 neon_load_reg64(cpu_V1, rm + pass);
5225 TCGV_UNUSED(tmp2);
5226 } else {
5227 if (pass == 1 && rd == rm) {
5228 tmp2 = neon_load_scratch(2);
5229 } else {
5230 tmp2 = neon_load_reg(rm, pass);
5232 if (prewiden) {
5233 gen_neon_widen(cpu_V1, tmp2, size, u);
5236 switch (op) {
5237 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5238 gen_neon_addl(size);
5239 break;
5240 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5241 gen_neon_subl(size);
5242 break;
5243 case 5: case 7: /* VABAL, VABDL */
5244 switch ((size << 1) | u) {
5245 case 0:
5246 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5247 break;
5248 case 1:
5249 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5250 break;
5251 case 2:
5252 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5253 break;
5254 case 3:
5255 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5256 break;
5257 case 4:
5258 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5259 break;
5260 case 5:
5261 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5262 break;
5263 default: abort();
5265 tcg_temp_free_i32(tmp2);
5266 tcg_temp_free_i32(tmp);
5267 break;
5268 case 8: case 9: case 10: case 11: case 12: case 13:
5269 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5270 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5271 break;
5272 case 14: /* Polynomial VMULL */
5273 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5274 tcg_temp_free_i32(tmp2);
5275 tcg_temp_free_i32(tmp);
5276 break;
5277 default: /* 15 is RESERVED. */
5278 return 1;
5280 if (op == 13) {
5281 /* VQDMULL */
5282 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5283 neon_store_reg64(cpu_V0, rd + pass);
5284 } else if (op == 5 || (op >= 8 && op <= 11)) {
5285 /* Accumulate. */
5286 neon_load_reg64(cpu_V1, rd + pass);
5287 switch (op) {
5288 case 10: /* VMLSL */
5289 gen_neon_negl(cpu_V0, size);
5290 /* Fall through */
5291 case 5: case 8: /* VABAL, VMLAL */
5292 gen_neon_addl(size);
5293 break;
5294 case 9: case 11: /* VQDMLAL, VQDMLSL */
5295 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5296 if (op == 11) {
5297 gen_neon_negl(cpu_V0, size);
5299 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5300 break;
5301 default:
5302 abort();
5304 neon_store_reg64(cpu_V0, rd + pass);
5305 } else if (op == 4 || op == 6) {
5306 /* Narrowing operation. */
5307 tmp = tcg_temp_new_i32();
5308 if (!u) {
5309 switch (size) {
5310 case 0:
5311 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5312 break;
5313 case 1:
5314 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5315 break;
5316 case 2:
5317 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5318 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5319 break;
5320 default: abort();
5322 } else {
5323 switch (size) {
5324 case 0:
5325 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5326 break;
5327 case 1:
5328 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5329 break;
5330 case 2:
5331 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5332 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5333 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5334 break;
5335 default: abort();
5338 if (pass == 0) {
5339 tmp3 = tmp;
5340 } else {
5341 neon_store_reg(rd, 0, tmp3);
5342 neon_store_reg(rd, 1, tmp);
5344 } else {
5345 /* Write back the result. */
5346 neon_store_reg64(cpu_V0, rd + pass);
5349 } else {
5350 /* Two registers and a scalar. */
5351 switch (op) {
5352 case 0: /* Integer VMLA scalar */
5353 case 1: /* Float VMLA scalar */
5354 case 4: /* Integer VMLS scalar */
5355 case 5: /* Floating point VMLS scalar */
5356 case 8: /* Integer VMUL scalar */
5357 case 9: /* Floating point VMUL scalar */
5358 case 12: /* VQDMULH scalar */
5359 case 13: /* VQRDMULH scalar */
5360 tmp = neon_get_scalar(size, rm);
5361 neon_store_scratch(0, tmp);
5362 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5363 tmp = neon_load_scratch(0);
5364 tmp2 = neon_load_reg(rn, pass);
5365 if (op == 12) {
5366 if (size == 1) {
5367 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
5368 } else {
5369 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
5371 } else if (op == 13) {
5372 if (size == 1) {
5373 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
5374 } else {
5375 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
5377 } else if (op & 1) {
5378 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5379 } else {
5380 switch (size) {
5381 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5382 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5383 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5384 default: return 1;
5387 tcg_temp_free_i32(tmp2);
5388 if (op < 8) {
5389 /* Accumulate. */
5390 tmp2 = neon_load_reg(rd, pass);
5391 switch (op) {
5392 case 0:
5393 gen_neon_add(size, tmp, tmp2);
5394 break;
5395 case 1:
5396 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5397 break;
5398 case 4:
5399 gen_neon_rsb(size, tmp, tmp2);
5400 break;
5401 case 5:
5402 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5403 break;
5404 default:
5405 abort();
5407 tcg_temp_free_i32(tmp2);
5409 neon_store_reg(rd, pass, tmp);
5411 break;
5412 case 2: /* VMLAL sclar */
5413 case 3: /* VQDMLAL scalar */
5414 case 6: /* VMLSL scalar */
5415 case 7: /* VQDMLSL scalar */
5416 case 10: /* VMULL scalar */
5417 case 11: /* VQDMULL scalar */
5418 if (size == 0 && (op == 3 || op == 7 || op == 11))
5419 return 1;
5421 tmp2 = neon_get_scalar(size, rm);
5422 /* We need a copy of tmp2 because gen_neon_mull
5423 * deletes it during pass 0. */
5424 tmp4 = tcg_temp_new_i32();
5425 tcg_gen_mov_i32(tmp4, tmp2);
5426 tmp3 = neon_load_reg(rn, 1);
5428 for (pass = 0; pass < 2; pass++) {
5429 if (pass == 0) {
5430 tmp = neon_load_reg(rn, 0);
5431 } else {
5432 tmp = tmp3;
5433 tmp2 = tmp4;
5435 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5436 if (op != 11) {
5437 neon_load_reg64(cpu_V1, rd + pass);
5439 switch (op) {
5440 case 6:
5441 gen_neon_negl(cpu_V0, size);
5442 /* Fall through */
5443 case 2:
5444 gen_neon_addl(size);
5445 break;
5446 case 3: case 7:
5447 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5448 if (op == 7) {
5449 gen_neon_negl(cpu_V0, size);
5451 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5452 break;
5453 case 10:
5454 /* no-op */
5455 break;
5456 case 11:
5457 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5458 break;
5459 default:
5460 abort();
5462 neon_store_reg64(cpu_V0, rd + pass);
5466 break;
5467 default: /* 14 and 15 are RESERVED */
5468 return 1;
5471 } else { /* size == 3 */
5472 if (!u) {
5473 /* Extract. */
5474 imm = (insn >> 8) & 0xf;
5476 if (imm > 7 && !q)
5477 return 1;
5479 if (imm == 0) {
5480 neon_load_reg64(cpu_V0, rn);
5481 if (q) {
5482 neon_load_reg64(cpu_V1, rn + 1);
5484 } else if (imm == 8) {
5485 neon_load_reg64(cpu_V0, rn + 1);
5486 if (q) {
5487 neon_load_reg64(cpu_V1, rm);
5489 } else if (q) {
5490 tmp64 = tcg_temp_new_i64();
5491 if (imm < 8) {
5492 neon_load_reg64(cpu_V0, rn);
5493 neon_load_reg64(tmp64, rn + 1);
5494 } else {
5495 neon_load_reg64(cpu_V0, rn + 1);
5496 neon_load_reg64(tmp64, rm);
5498 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5499 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5500 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5501 if (imm < 8) {
5502 neon_load_reg64(cpu_V1, rm);
5503 } else {
5504 neon_load_reg64(cpu_V1, rm + 1);
5505 imm -= 8;
5507 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5508 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5509 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5510 tcg_temp_free_i64(tmp64);
5511 } else {
5512 /* BUGFIX */
5513 neon_load_reg64(cpu_V0, rn);
5514 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5515 neon_load_reg64(cpu_V1, rm);
5516 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5517 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5519 neon_store_reg64(cpu_V0, rd);
5520 if (q) {
5521 neon_store_reg64(cpu_V1, rd + 1);
5523 } else if ((insn & (1 << 11)) == 0) {
5524 /* Two register misc. */
5525 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5526 size = (insn >> 18) & 3;
5527 switch (op) {
5528 case 0: /* VREV64 */
5529 if (size == 3)
5530 return 1;
5531 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5532 tmp = neon_load_reg(rm, pass * 2);
5533 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5534 switch (size) {
5535 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5536 case 1: gen_swap_half(tmp); break;
5537 case 2: /* no-op */ break;
5538 default: abort();
5540 neon_store_reg(rd, pass * 2 + 1, tmp);
5541 if (size == 2) {
5542 neon_store_reg(rd, pass * 2, tmp2);
5543 } else {
5544 switch (size) {
5545 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5546 case 1: gen_swap_half(tmp2); break;
5547 default: abort();
5549 neon_store_reg(rd, pass * 2, tmp2);
5552 break;
5553 case 4: case 5: /* VPADDL */
5554 case 12: case 13: /* VPADAL */
5555 if (size == 3)
5556 return 1;
5557 for (pass = 0; pass < q + 1; pass++) {
5558 tmp = neon_load_reg(rm, pass * 2);
5559 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5560 tmp = neon_load_reg(rm, pass * 2 + 1);
5561 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5562 switch (size) {
5563 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5564 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5565 case 2: tcg_gen_add_i64(CPU_V001); break;
5566 default: abort();
5568 if (op >= 12) {
5569 /* Accumulate. */
5570 neon_load_reg64(cpu_V1, rd + pass);
5571 gen_neon_addl(size);
5573 neon_store_reg64(cpu_V0, rd + pass);
5575 break;
5576 case 33: /* VTRN */
5577 if (size == 2) {
5578 for (n = 0; n < (q ? 4 : 2); n += 2) {
5579 tmp = neon_load_reg(rm, n);
5580 tmp2 = neon_load_reg(rd, n + 1);
5581 neon_store_reg(rm, n, tmp2);
5582 neon_store_reg(rd, n + 1, tmp);
5584 } else {
5585 goto elementwise;
5587 break;
5588 case 34: /* VUZP */
5589 if (gen_neon_unzip(rd, rm, size, q)) {
5590 return 1;
5592 break;
5593 case 35: /* VZIP */
5594 if (gen_neon_zip(rd, rm, size, q)) {
5595 return 1;
5597 break;
5598 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5599 if (size == 3)
5600 return 1;
5601 TCGV_UNUSED(tmp2);
5602 for (pass = 0; pass < 2; pass++) {
5603 neon_load_reg64(cpu_V0, rm + pass);
5604 tmp = tcg_temp_new_i32();
5605 gen_neon_narrow_op(op == 36, q, size, tmp, cpu_V0);
5606 if (pass == 0) {
5607 tmp2 = tmp;
5608 } else {
5609 neon_store_reg(rd, 0, tmp2);
5610 neon_store_reg(rd, 1, tmp);
5613 break;
5614 case 38: /* VSHLL */
5615 if (q || size == 3)
5616 return 1;
5617 tmp = neon_load_reg(rm, 0);
5618 tmp2 = neon_load_reg(rm, 1);
5619 for (pass = 0; pass < 2; pass++) {
5620 if (pass == 1)
5621 tmp = tmp2;
5622 gen_neon_widen(cpu_V0, tmp, size, 1);
5623 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5624 neon_store_reg64(cpu_V0, rd + pass);
5626 break;
5627 case 44: /* VCVT.F16.F32 */
5628 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5629 return 1;
5630 tmp = tcg_temp_new_i32();
5631 tmp2 = tcg_temp_new_i32();
5632 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5633 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5634 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5635 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5636 tcg_gen_shli_i32(tmp2, tmp2, 16);
5637 tcg_gen_or_i32(tmp2, tmp2, tmp);
5638 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5639 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5640 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5641 neon_store_reg(rd, 0, tmp2);
5642 tmp2 = tcg_temp_new_i32();
5643 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5644 tcg_gen_shli_i32(tmp2, tmp2, 16);
5645 tcg_gen_or_i32(tmp2, tmp2, tmp);
5646 neon_store_reg(rd, 1, tmp2);
5647 tcg_temp_free_i32(tmp);
5648 break;
5649 case 46: /* VCVT.F32.F16 */
5650 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5651 return 1;
5652 tmp3 = tcg_temp_new_i32();
5653 tmp = neon_load_reg(rm, 0);
5654 tmp2 = neon_load_reg(rm, 1);
5655 tcg_gen_ext16u_i32(tmp3, tmp);
5656 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5657 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5658 tcg_gen_shri_i32(tmp3, tmp, 16);
5659 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5660 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5661 tcg_temp_free_i32(tmp);
5662 tcg_gen_ext16u_i32(tmp3, tmp2);
5663 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5664 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5665 tcg_gen_shri_i32(tmp3, tmp2, 16);
5666 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5667 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5668 tcg_temp_free_i32(tmp2);
5669 tcg_temp_free_i32(tmp3);
5670 break;
5671 default:
5672 elementwise:
5673 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5674 if (op == 30 || op == 31 || op >= 58) {
5675 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5676 neon_reg_offset(rm, pass));
5677 TCGV_UNUSED(tmp);
5678 } else {
5679 tmp = neon_load_reg(rm, pass);
5681 switch (op) {
5682 case 1: /* VREV32 */
5683 switch (size) {
5684 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5685 case 1: gen_swap_half(tmp); break;
5686 default: return 1;
5688 break;
5689 case 2: /* VREV16 */
5690 if (size != 0)
5691 return 1;
5692 gen_rev16(tmp);
5693 break;
5694 case 8: /* CLS */
5695 switch (size) {
5696 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5697 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5698 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5699 default: return 1;
5701 break;
5702 case 9: /* CLZ */
5703 switch (size) {
5704 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5705 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5706 case 2: gen_helper_clz(tmp, tmp); break;
5707 default: return 1;
5709 break;
5710 case 10: /* CNT */
5711 if (size != 0)
5712 return 1;
5713 gen_helper_neon_cnt_u8(tmp, tmp);
5714 break;
5715 case 11: /* VNOT */
5716 if (size != 0)
5717 return 1;
5718 tcg_gen_not_i32(tmp, tmp);
5719 break;
5720 case 14: /* VQABS */
5721 switch (size) {
5722 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
5723 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
5724 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
5725 default: return 1;
5727 break;
5728 case 15: /* VQNEG */
5729 switch (size) {
5730 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
5731 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
5732 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
5733 default: return 1;
5735 break;
5736 case 16: case 19: /* VCGT #0, VCLE #0 */
5737 tmp2 = tcg_const_i32(0);
5738 switch(size) {
5739 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5740 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5741 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5742 default: return 1;
5744 tcg_temp_free(tmp2);
5745 if (op == 19)
5746 tcg_gen_not_i32(tmp, tmp);
5747 break;
5748 case 17: case 20: /* VCGE #0, VCLT #0 */
5749 tmp2 = tcg_const_i32(0);
5750 switch(size) {
5751 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5752 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5753 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5754 default: return 1;
5756 tcg_temp_free(tmp2);
5757 if (op == 20)
5758 tcg_gen_not_i32(tmp, tmp);
5759 break;
5760 case 18: /* VCEQ #0 */
5761 tmp2 = tcg_const_i32(0);
5762 switch(size) {
5763 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5764 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5765 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5766 default: return 1;
5768 tcg_temp_free(tmp2);
5769 break;
5770 case 22: /* VABS */
5771 switch(size) {
5772 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5773 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5774 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5775 default: return 1;
5777 break;
5778 case 23: /* VNEG */
5779 if (size == 3)
5780 return 1;
5781 tmp2 = tcg_const_i32(0);
5782 gen_neon_rsb(size, tmp, tmp2);
5783 tcg_temp_free(tmp2);
5784 break;
5785 case 24: /* Float VCGT #0 */
5786 tmp2 = tcg_const_i32(0);
5787 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5788 tcg_temp_free(tmp2);
5789 break;
5790 case 25: /* Float VCGE #0 */
5791 tmp2 = tcg_const_i32(0);
5792 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5793 tcg_temp_free(tmp2);
5794 break;
5795 case 26: /* Float VCEQ #0 */
5796 tmp2 = tcg_const_i32(0);
5797 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5798 tcg_temp_free(tmp2);
5799 break;
5800 case 27: /* Float VCLE #0 */
5801 tmp2 = tcg_const_i32(0);
5802 gen_helper_neon_cge_f32(tmp, tmp2, tmp);
5803 tcg_temp_free(tmp2);
5804 break;
5805 case 28: /* Float VCLT #0 */
5806 tmp2 = tcg_const_i32(0);
5807 gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
5808 tcg_temp_free(tmp2);
5809 break;
5810 case 30: /* Float VABS */
5811 gen_vfp_abs(0);
5812 break;
5813 case 31: /* Float VNEG */
5814 gen_vfp_neg(0);
5815 break;
5816 case 32: /* VSWP */
5817 tmp2 = neon_load_reg(rd, pass);
5818 neon_store_reg(rm, pass, tmp2);
5819 break;
5820 case 33: /* VTRN */
5821 tmp2 = neon_load_reg(rd, pass);
5822 switch (size) {
5823 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5824 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5825 case 2: abort();
5826 default: return 1;
5828 neon_store_reg(rm, pass, tmp2);
5829 break;
5830 case 56: /* Integer VRECPE */
5831 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5832 break;
5833 case 57: /* Integer VRSQRTE */
5834 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5835 break;
5836 case 58: /* Float VRECPE */
5837 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5838 break;
5839 case 59: /* Float VRSQRTE */
5840 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5841 break;
5842 case 60: /* VCVT.F32.S32 */
5843 gen_vfp_sito(0);
5844 break;
5845 case 61: /* VCVT.F32.U32 */
5846 gen_vfp_uito(0);
5847 break;
5848 case 62: /* VCVT.S32.F32 */
5849 gen_vfp_tosiz(0);
5850 break;
5851 case 63: /* VCVT.U32.F32 */
5852 gen_vfp_touiz(0);
5853 break;
5854 default:
5855 /* Reserved: 21, 29, 39-56 */
5856 return 1;
5858 if (op == 30 || op == 31 || op >= 58) {
5859 tcg_gen_st_f32(cpu_F0s, cpu_env,
5860 neon_reg_offset(rd, pass));
5861 } else {
5862 neon_store_reg(rd, pass, tmp);
5865 break;
5867 } else if ((insn & (1 << 10)) == 0) {
5868 /* VTBL, VTBX. */
5869 n = ((insn >> 5) & 0x18) + 8;
5870 if (insn & (1 << 6)) {
5871 tmp = neon_load_reg(rd, 0);
5872 } else {
5873 tmp = tcg_temp_new_i32();
5874 tcg_gen_movi_i32(tmp, 0);
5876 tmp2 = neon_load_reg(rm, 0);
5877 tmp4 = tcg_const_i32(rn);
5878 tmp5 = tcg_const_i32(n);
5879 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5880 tcg_temp_free_i32(tmp);
5881 if (insn & (1 << 6)) {
5882 tmp = neon_load_reg(rd, 1);
5883 } else {
5884 tmp = tcg_temp_new_i32();
5885 tcg_gen_movi_i32(tmp, 0);
5887 tmp3 = neon_load_reg(rm, 1);
5888 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5889 tcg_temp_free_i32(tmp5);
5890 tcg_temp_free_i32(tmp4);
5891 neon_store_reg(rd, 0, tmp2);
5892 neon_store_reg(rd, 1, tmp3);
5893 tcg_temp_free_i32(tmp);
5894 } else if ((insn & 0x380) == 0) {
5895 /* VDUP */
5896 if (insn & (1 << 19)) {
5897 tmp = neon_load_reg(rm, 1);
5898 } else {
5899 tmp = neon_load_reg(rm, 0);
5901 if (insn & (1 << 16)) {
5902 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5903 } else if (insn & (1 << 17)) {
5904 if ((insn >> 18) & 1)
5905 gen_neon_dup_high16(tmp);
5906 else
5907 gen_neon_dup_low16(tmp);
5909 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5910 tmp2 = tcg_temp_new_i32();
5911 tcg_gen_mov_i32(tmp2, tmp);
5912 neon_store_reg(rd, pass, tmp2);
5914 tcg_temp_free_i32(tmp);
5915 } else {
5916 return 1;
5920 return 0;
5923 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5925 int crn = (insn >> 16) & 0xf;
5926 int crm = insn & 0xf;
5927 int op1 = (insn >> 21) & 7;
5928 int op2 = (insn >> 5) & 7;
5929 int rt = (insn >> 12) & 0xf;
5930 TCGv tmp;
5932 /* Minimal set of debug registers, since we don't support debug */
5933 if (op1 == 0 && crn == 0 && op2 == 0) {
5934 switch (crm) {
5935 case 0:
5936 /* DBGDIDR: just RAZ. In particular this means the
5937 * "debug architecture version" bits will read as
5938 * a reserved value, which should cause Linux to
5939 * not try to use the debug hardware.
5941 tmp = tcg_const_i32(0);
5942 store_reg(s, rt, tmp);
5943 return 0;
5944 case 1:
5945 case 2:
5946 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
5947 * don't implement memory mapped debug components
5949 if (ENABLE_ARCH_7) {
5950 tmp = tcg_const_i32(0);
5951 store_reg(s, rt, tmp);
5952 return 0;
5954 break;
5955 default:
5956 break;
5960 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5961 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5962 /* TEECR */
5963 if (IS_USER(s))
5964 return 1;
5965 tmp = load_cpu_field(teecr);
5966 store_reg(s, rt, tmp);
5967 return 0;
5969 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5970 /* TEEHBR */
5971 if (IS_USER(s) && (env->teecr & 1))
5972 return 1;
5973 tmp = load_cpu_field(teehbr);
5974 store_reg(s, rt, tmp);
5975 return 0;
5978 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5979 op1, crn, crm, op2);
5980 return 1;
5983 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5985 int crn = (insn >> 16) & 0xf;
5986 int crm = insn & 0xf;
5987 int op1 = (insn >> 21) & 7;
5988 int op2 = (insn >> 5) & 7;
5989 int rt = (insn >> 12) & 0xf;
5990 TCGv tmp;
5992 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5993 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5994 /* TEECR */
5995 if (IS_USER(s))
5996 return 1;
5997 tmp = load_reg(s, rt);
5998 gen_helper_set_teecr(cpu_env, tmp);
5999 tcg_temp_free_i32(tmp);
6000 return 0;
6002 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6003 /* TEEHBR */
6004 if (IS_USER(s) && (env->teecr & 1))
6005 return 1;
6006 tmp = load_reg(s, rt);
6007 store_cpu_field(tmp, teehbr);
6008 return 0;
6011 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6012 op1, crn, crm, op2);
6013 return 1;
6016 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6018 int cpnum;
6020 cpnum = (insn >> 8) & 0xf;
6021 if (arm_feature(env, ARM_FEATURE_XSCALE)
6022 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6023 return 1;
6025 switch (cpnum) {
6026 case 0:
6027 case 1:
6028 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6029 return disas_iwmmxt_insn(env, s, insn);
6030 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6031 return disas_dsp_insn(env, s, insn);
6033 return 1;
6034 case 10:
6035 case 11:
6036 return disas_vfp_insn (env, s, insn);
6037 case 14:
6038 /* Coprocessors 7-15 are architecturally reserved by ARM.
6039 Unfortunately Intel decided to ignore this. */
6040 if (arm_feature(env, ARM_FEATURE_XSCALE))
6041 goto board;
6042 if (insn & (1 << 20))
6043 return disas_cp14_read(env, s, insn);
6044 else
6045 return disas_cp14_write(env, s, insn);
6046 case 15:
6047 return disas_cp15_insn (env, s, insn);
6048 default:
6049 board:
6050 /* Unknown coprocessor. See if the board has hooked it. */
6051 return disas_cp_insn (env, s, insn);
6056 /* Store a 64-bit value to a register pair. Clobbers val. */
6057 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6059 TCGv tmp;
6060 tmp = tcg_temp_new_i32();
6061 tcg_gen_trunc_i64_i32(tmp, val);
6062 store_reg(s, rlow, tmp);
6063 tmp = tcg_temp_new_i32();
6064 tcg_gen_shri_i64(val, val, 32);
6065 tcg_gen_trunc_i64_i32(tmp, val);
6066 store_reg(s, rhigh, tmp);
6069 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6070 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6072 TCGv_i64 tmp;
6073 TCGv tmp2;
6075 /* Load value and extend to 64 bits. */
6076 tmp = tcg_temp_new_i64();
6077 tmp2 = load_reg(s, rlow);
6078 tcg_gen_extu_i32_i64(tmp, tmp2);
6079 tcg_temp_free_i32(tmp2);
6080 tcg_gen_add_i64(val, val, tmp);
6081 tcg_temp_free_i64(tmp);
6084 /* load and add a 64-bit value from a register pair. */
6085 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6087 TCGv_i64 tmp;
6088 TCGv tmpl;
6089 TCGv tmph;
6091 /* Load 64-bit value rd:rn. */
6092 tmpl = load_reg(s, rlow);
6093 tmph = load_reg(s, rhigh);
6094 tmp = tcg_temp_new_i64();
6095 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6096 tcg_temp_free_i32(tmpl);
6097 tcg_temp_free_i32(tmph);
6098 tcg_gen_add_i64(val, val, tmp);
6099 tcg_temp_free_i64(tmp);
6102 /* Set N and Z flags from a 64-bit value. */
6103 static void gen_logicq_cc(TCGv_i64 val)
6105 TCGv tmp = tcg_temp_new_i32();
6106 gen_helper_logicq_cc(tmp, val);
6107 gen_logic_CC(tmp);
6108 tcg_temp_free_i32(tmp);
6111 /* Load/Store exclusive instructions are implemented by remembering
6112 the value/address loaded, and seeing if these are the same
6113 when the store is performed. This should be is sufficient to implement
6114 the architecturally mandated semantics, and avoids having to monitor
6115 regular stores.
6117 In system emulation mode only one CPU will be running at once, so
6118 this sequence is effectively atomic. In user emulation mode we
6119 throw an exception and handle the atomic operation elsewhere. */
6120 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6121 TCGv addr, int size)
6123 TCGv tmp;
6125 switch (size) {
6126 case 0:
6127 tmp = gen_ld8u(addr, IS_USER(s));
6128 break;
6129 case 1:
6130 tmp = gen_ld16u(addr, IS_USER(s));
6131 break;
6132 case 2:
6133 case 3:
6134 tmp = gen_ld32(addr, IS_USER(s));
6135 break;
6136 default:
6137 abort();
6139 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6140 store_reg(s, rt, tmp);
6141 if (size == 3) {
6142 TCGv tmp2 = tcg_temp_new_i32();
6143 tcg_gen_addi_i32(tmp2, addr, 4);
6144 tmp = gen_ld32(tmp2, IS_USER(s));
6145 tcg_temp_free_i32(tmp2);
6146 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6147 store_reg(s, rt2, tmp);
6149 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6152 static void gen_clrex(DisasContext *s)
6154 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6157 #ifdef CONFIG_USER_ONLY
6158 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6159 TCGv addr, int size)
6161 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6162 tcg_gen_movi_i32(cpu_exclusive_info,
6163 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6164 gen_exception_insn(s, 4, EXCP_STREX);
6166 #else
6167 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6168 TCGv addr, int size)
6170 TCGv tmp;
6171 int done_label;
6172 int fail_label;
6174 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6175 [addr] = {Rt};
6176 {Rd} = 0;
6177 } else {
6178 {Rd} = 1;
6179 } */
6180 fail_label = gen_new_label();
6181 done_label = gen_new_label();
6182 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6183 switch (size) {
6184 case 0:
6185 tmp = gen_ld8u(addr, IS_USER(s));
6186 break;
6187 case 1:
6188 tmp = gen_ld16u(addr, IS_USER(s));
6189 break;
6190 case 2:
6191 case 3:
6192 tmp = gen_ld32(addr, IS_USER(s));
6193 break;
6194 default:
6195 abort();
6197 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6198 tcg_temp_free_i32(tmp);
6199 if (size == 3) {
6200 TCGv tmp2 = tcg_temp_new_i32();
6201 tcg_gen_addi_i32(tmp2, addr, 4);
6202 tmp = gen_ld32(tmp2, IS_USER(s));
6203 tcg_temp_free_i32(tmp2);
6204 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6205 tcg_temp_free_i32(tmp);
6207 tmp = load_reg(s, rt);
6208 switch (size) {
6209 case 0:
6210 gen_st8(tmp, addr, IS_USER(s));
6211 break;
6212 case 1:
6213 gen_st16(tmp, addr, IS_USER(s));
6214 break;
6215 case 2:
6216 case 3:
6217 gen_st32(tmp, addr, IS_USER(s));
6218 break;
6219 default:
6220 abort();
6222 if (size == 3) {
6223 tcg_gen_addi_i32(addr, addr, 4);
6224 tmp = load_reg(s, rt2);
6225 gen_st32(tmp, addr, IS_USER(s));
6227 tcg_gen_movi_i32(cpu_R[rd], 0);
6228 tcg_gen_br(done_label);
6229 gen_set_label(fail_label);
6230 tcg_gen_movi_i32(cpu_R[rd], 1);
6231 gen_set_label(done_label);
6232 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6234 #endif
6236 static void disas_arm_insn(CPUState * env, DisasContext *s)
6238 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6239 TCGv tmp;
6240 TCGv tmp2;
6241 TCGv tmp3;
6242 TCGv addr;
6243 TCGv_i64 tmp64;
6245 insn = ldl_code(s->pc);
6246 s->pc += 4;
6248 /* M variants do not implement ARM mode. */
6249 if (IS_M(env))
6250 goto illegal_op;
6251 cond = insn >> 28;
6252 if (cond == 0xf){
6253 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6254 * choose to UNDEF. In ARMv5 and above the space is used
6255 * for miscellaneous unconditional instructions.
6257 ARCH(5);
6259 /* Unconditional instructions. */
6260 if (((insn >> 25) & 7) == 1) {
6261 /* NEON Data processing. */
6262 if (!arm_feature(env, ARM_FEATURE_NEON))
6263 goto illegal_op;
6265 if (disas_neon_data_insn(env, s, insn))
6266 goto illegal_op;
6267 return;
6269 if ((insn & 0x0f100000) == 0x04000000) {
6270 /* NEON load/store. */
6271 if (!arm_feature(env, ARM_FEATURE_NEON))
6272 goto illegal_op;
6274 if (disas_neon_ls_insn(env, s, insn))
6275 goto illegal_op;
6276 return;
6278 if (((insn & 0x0f30f000) == 0x0510f000) ||
6279 ((insn & 0x0f30f010) == 0x0710f000)) {
6280 if ((insn & (1 << 22)) == 0) {
6281 /* PLDW; v7MP */
6282 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6283 goto illegal_op;
6286 /* Otherwise PLD; v5TE+ */
6287 ARCH(5TE);
6288 return;
6290 if (((insn & 0x0f70f000) == 0x0450f000) ||
6291 ((insn & 0x0f70f010) == 0x0650f000)) {
6292 ARCH(7);
6293 return; /* PLI; V7 */
6295 if (((insn & 0x0f700000) == 0x04100000) ||
6296 ((insn & 0x0f700010) == 0x06100000)) {
6297 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6298 goto illegal_op;
6300 return; /* v7MP: Unallocated memory hint: must NOP */
6303 if ((insn & 0x0ffffdff) == 0x01010000) {
6304 ARCH(6);
6305 /* setend */
6306 if (insn & (1 << 9)) {
6307 /* BE8 mode not implemented. */
6308 goto illegal_op;
6310 return;
6311 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6312 switch ((insn >> 4) & 0xf) {
6313 case 1: /* clrex */
6314 ARCH(6K);
6315 gen_clrex(s);
6316 return;
6317 case 4: /* dsb */
6318 case 5: /* dmb */
6319 case 6: /* isb */
6320 ARCH(7);
6321 /* We don't emulate caches so these are a no-op. */
6322 return;
6323 default:
6324 goto illegal_op;
6326 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6327 /* srs */
6328 int32_t offset;
6329 if (IS_USER(s))
6330 goto illegal_op;
6331 ARCH(6);
6332 op1 = (insn & 0x1f);
6333 addr = tcg_temp_new_i32();
6334 tmp = tcg_const_i32(op1);
6335 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6336 tcg_temp_free_i32(tmp);
6337 i = (insn >> 23) & 3;
6338 switch (i) {
6339 case 0: offset = -4; break; /* DA */
6340 case 1: offset = 0; break; /* IA */
6341 case 2: offset = -8; break; /* DB */
6342 case 3: offset = 4; break; /* IB */
6343 default: abort();
6345 if (offset)
6346 tcg_gen_addi_i32(addr, addr, offset);
6347 tmp = load_reg(s, 14);
6348 gen_st32(tmp, addr, 0);
6349 tmp = load_cpu_field(spsr);
6350 tcg_gen_addi_i32(addr, addr, 4);
6351 gen_st32(tmp, addr, 0);
6352 if (insn & (1 << 21)) {
6353 /* Base writeback. */
6354 switch (i) {
6355 case 0: offset = -8; break;
6356 case 1: offset = 4; break;
6357 case 2: offset = -4; break;
6358 case 3: offset = 0; break;
6359 default: abort();
6361 if (offset)
6362 tcg_gen_addi_i32(addr, addr, offset);
6363 tmp = tcg_const_i32(op1);
6364 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6365 tcg_temp_free_i32(tmp);
6366 tcg_temp_free_i32(addr);
6367 } else {
6368 tcg_temp_free_i32(addr);
6370 return;
6371 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6372 /* rfe */
6373 int32_t offset;
6374 if (IS_USER(s))
6375 goto illegal_op;
6376 ARCH(6);
6377 rn = (insn >> 16) & 0xf;
6378 addr = load_reg(s, rn);
6379 i = (insn >> 23) & 3;
6380 switch (i) {
6381 case 0: offset = -4; break; /* DA */
6382 case 1: offset = 0; break; /* IA */
6383 case 2: offset = -8; break; /* DB */
6384 case 3: offset = 4; break; /* IB */
6385 default: abort();
6387 if (offset)
6388 tcg_gen_addi_i32(addr, addr, offset);
6389 /* Load PC into tmp and CPSR into tmp2. */
6390 tmp = gen_ld32(addr, 0);
6391 tcg_gen_addi_i32(addr, addr, 4);
6392 tmp2 = gen_ld32(addr, 0);
6393 if (insn & (1 << 21)) {
6394 /* Base writeback. */
6395 switch (i) {
6396 case 0: offset = -8; break;
6397 case 1: offset = 4; break;
6398 case 2: offset = -4; break;
6399 case 3: offset = 0; break;
6400 default: abort();
6402 if (offset)
6403 tcg_gen_addi_i32(addr, addr, offset);
6404 store_reg(s, rn, addr);
6405 } else {
6406 tcg_temp_free_i32(addr);
6408 gen_rfe(s, tmp, tmp2);
6409 return;
6410 } else if ((insn & 0x0e000000) == 0x0a000000) {
6411 /* branch link and change to thumb (blx <offset>) */
6412 int32_t offset;
6414 val = (uint32_t)s->pc;
6415 tmp = tcg_temp_new_i32();
6416 tcg_gen_movi_i32(tmp, val);
6417 store_reg(s, 14, tmp);
6418 /* Sign-extend the 24-bit offset */
6419 offset = (((int32_t)insn) << 8) >> 8;
6420 /* offset * 4 + bit24 * 2 + (thumb bit) */
6421 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6422 /* pipeline offset */
6423 val += 4;
6424 /* protected by ARCH(5); above, near the start of uncond block */
6425 gen_bx_im(s, val);
6426 return;
6427 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6428 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6429 /* iWMMXt register transfer. */
6430 if (env->cp15.c15_cpar & (1 << 1))
6431 if (!disas_iwmmxt_insn(env, s, insn))
6432 return;
6434 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6435 /* Coprocessor double register transfer. */
6436 ARCH(5TE);
6437 } else if ((insn & 0x0f000010) == 0x0e000010) {
6438 /* Additional coprocessor register transfer. */
6439 } else if ((insn & 0x0ff10020) == 0x01000000) {
6440 uint32_t mask;
6441 uint32_t val;
6442 /* cps (privileged) */
6443 if (IS_USER(s))
6444 return;
6445 mask = val = 0;
6446 if (insn & (1 << 19)) {
6447 if (insn & (1 << 8))
6448 mask |= CPSR_A;
6449 if (insn & (1 << 7))
6450 mask |= CPSR_I;
6451 if (insn & (1 << 6))
6452 mask |= CPSR_F;
6453 if (insn & (1 << 18))
6454 val |= mask;
6456 if (insn & (1 << 17)) {
6457 mask |= CPSR_M;
6458 val |= (insn & 0x1f);
6460 if (mask) {
6461 gen_set_psr_im(s, mask, 0, val);
6463 return;
6465 goto illegal_op;
6467 if (cond != 0xe) {
6468 /* if not always execute, we generate a conditional jump to
6469 next instruction */
6470 s->condlabel = gen_new_label();
6471 gen_test_cc(cond ^ 1, s->condlabel);
6472 s->condjmp = 1;
6474 if ((insn & 0x0f900000) == 0x03000000) {
6475 if ((insn & (1 << 21)) == 0) {
6476 ARCH(6T2);
6477 rd = (insn >> 12) & 0xf;
6478 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6479 if ((insn & (1 << 22)) == 0) {
6480 /* MOVW */
6481 tmp = tcg_temp_new_i32();
6482 tcg_gen_movi_i32(tmp, val);
6483 } else {
6484 /* MOVT */
6485 tmp = load_reg(s, rd);
6486 tcg_gen_ext16u_i32(tmp, tmp);
6487 tcg_gen_ori_i32(tmp, tmp, val << 16);
6489 store_reg(s, rd, tmp);
6490 } else {
6491 if (((insn >> 12) & 0xf) != 0xf)
6492 goto illegal_op;
6493 if (((insn >> 16) & 0xf) == 0) {
6494 gen_nop_hint(s, insn & 0xff);
6495 } else {
6496 /* CPSR = immediate */
6497 val = insn & 0xff;
6498 shift = ((insn >> 8) & 0xf) * 2;
6499 if (shift)
6500 val = (val >> shift) | (val << (32 - shift));
6501 i = ((insn & (1 << 22)) != 0);
6502 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6503 goto illegal_op;
6506 } else if ((insn & 0x0f900000) == 0x01000000
6507 && (insn & 0x00000090) != 0x00000090) {
6508 /* miscellaneous instructions */
6509 op1 = (insn >> 21) & 3;
6510 sh = (insn >> 4) & 0xf;
6511 rm = insn & 0xf;
6512 switch (sh) {
6513 case 0x0: /* move program status register */
6514 if (op1 & 1) {
6515 /* PSR = reg */
6516 tmp = load_reg(s, rm);
6517 i = ((op1 & 2) != 0);
6518 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6519 goto illegal_op;
6520 } else {
6521 /* reg = PSR */
6522 rd = (insn >> 12) & 0xf;
6523 if (op1 & 2) {
6524 if (IS_USER(s))
6525 goto illegal_op;
6526 tmp = load_cpu_field(spsr);
6527 } else {
6528 tmp = tcg_temp_new_i32();
6529 gen_helper_cpsr_read(tmp);
6531 store_reg(s, rd, tmp);
6533 break;
6534 case 0x1:
6535 if (op1 == 1) {
6536 /* branch/exchange thumb (bx). */
6537 ARCH(4T);
6538 tmp = load_reg(s, rm);
6539 gen_bx(s, tmp);
6540 } else if (op1 == 3) {
6541 /* clz */
6542 ARCH(5);
6543 rd = (insn >> 12) & 0xf;
6544 tmp = load_reg(s, rm);
6545 gen_helper_clz(tmp, tmp);
6546 store_reg(s, rd, tmp);
6547 } else {
6548 goto illegal_op;
6550 break;
6551 case 0x2:
6552 if (op1 == 1) {
6553 ARCH(5J); /* bxj */
6554 /* Trivial implementation equivalent to bx. */
6555 tmp = load_reg(s, rm);
6556 gen_bx(s, tmp);
6557 } else {
6558 goto illegal_op;
6560 break;
6561 case 0x3:
6562 if (op1 != 1)
6563 goto illegal_op;
6565 ARCH(5);
6566 /* branch link/exchange thumb (blx) */
6567 tmp = load_reg(s, rm);
6568 tmp2 = tcg_temp_new_i32();
6569 tcg_gen_movi_i32(tmp2, s->pc);
6570 store_reg(s, 14, tmp2);
6571 gen_bx(s, tmp);
6572 break;
6573 case 0x5: /* saturating add/subtract */
6574 ARCH(5TE);
6575 rd = (insn >> 12) & 0xf;
6576 rn = (insn >> 16) & 0xf;
6577 tmp = load_reg(s, rm);
6578 tmp2 = load_reg(s, rn);
6579 if (op1 & 2)
6580 gen_helper_double_saturate(tmp2, tmp2);
6581 if (op1 & 1)
6582 gen_helper_sub_saturate(tmp, tmp, tmp2);
6583 else
6584 gen_helper_add_saturate(tmp, tmp, tmp2);
6585 tcg_temp_free_i32(tmp2);
6586 store_reg(s, rd, tmp);
6587 break;
6588 case 7:
6589 /* SMC instruction (op1 == 3)
6590 and undefined instructions (op1 == 0 || op1 == 2)
6591 will trap */
6592 if (op1 != 1) {
6593 goto illegal_op;
6595 /* bkpt */
6596 ARCH(5);
6597 gen_exception_insn(s, 4, EXCP_BKPT);
6598 break;
6599 case 0x8: /* signed multiply */
6600 case 0xa:
6601 case 0xc:
6602 case 0xe:
6603 ARCH(5TE);
6604 rs = (insn >> 8) & 0xf;
6605 rn = (insn >> 12) & 0xf;
6606 rd = (insn >> 16) & 0xf;
6607 if (op1 == 1) {
6608 /* (32 * 16) >> 16 */
6609 tmp = load_reg(s, rm);
6610 tmp2 = load_reg(s, rs);
6611 if (sh & 4)
6612 tcg_gen_sari_i32(tmp2, tmp2, 16);
6613 else
6614 gen_sxth(tmp2);
6615 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6616 tcg_gen_shri_i64(tmp64, tmp64, 16);
6617 tmp = tcg_temp_new_i32();
6618 tcg_gen_trunc_i64_i32(tmp, tmp64);
6619 tcg_temp_free_i64(tmp64);
6620 if ((sh & 2) == 0) {
6621 tmp2 = load_reg(s, rn);
6622 gen_helper_add_setq(tmp, tmp, tmp2);
6623 tcg_temp_free_i32(tmp2);
6625 store_reg(s, rd, tmp);
6626 } else {
6627 /* 16 * 16 */
6628 tmp = load_reg(s, rm);
6629 tmp2 = load_reg(s, rs);
6630 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6631 tcg_temp_free_i32(tmp2);
6632 if (op1 == 2) {
6633 tmp64 = tcg_temp_new_i64();
6634 tcg_gen_ext_i32_i64(tmp64, tmp);
6635 tcg_temp_free_i32(tmp);
6636 gen_addq(s, tmp64, rn, rd);
6637 gen_storeq_reg(s, rn, rd, tmp64);
6638 tcg_temp_free_i64(tmp64);
6639 } else {
6640 if (op1 == 0) {
6641 tmp2 = load_reg(s, rn);
6642 gen_helper_add_setq(tmp, tmp, tmp2);
6643 tcg_temp_free_i32(tmp2);
6645 store_reg(s, rd, tmp);
6648 break;
6649 default:
6650 goto illegal_op;
6652 } else if (((insn & 0x0e000000) == 0 &&
6653 (insn & 0x00000090) != 0x90) ||
6654 ((insn & 0x0e000000) == (1 << 25))) {
6655 int set_cc, logic_cc, shiftop;
6657 op1 = (insn >> 21) & 0xf;
6658 set_cc = (insn >> 20) & 1;
6659 logic_cc = table_logic_cc[op1] & set_cc;
6661 /* data processing instruction */
6662 if (insn & (1 << 25)) {
6663 /* immediate operand */
6664 val = insn & 0xff;
6665 shift = ((insn >> 8) & 0xf) * 2;
6666 if (shift) {
6667 val = (val >> shift) | (val << (32 - shift));
6669 tmp2 = tcg_temp_new_i32();
6670 tcg_gen_movi_i32(tmp2, val);
6671 if (logic_cc && shift) {
6672 gen_set_CF_bit31(tmp2);
6674 } else {
6675 /* register */
6676 rm = (insn) & 0xf;
6677 tmp2 = load_reg(s, rm);
6678 shiftop = (insn >> 5) & 3;
6679 if (!(insn & (1 << 4))) {
6680 shift = (insn >> 7) & 0x1f;
6681 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6682 } else {
6683 rs = (insn >> 8) & 0xf;
6684 tmp = load_reg(s, rs);
6685 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6688 if (op1 != 0x0f && op1 != 0x0d) {
6689 rn = (insn >> 16) & 0xf;
6690 tmp = load_reg(s, rn);
6691 } else {
6692 TCGV_UNUSED(tmp);
6694 rd = (insn >> 12) & 0xf;
6695 switch(op1) {
6696 case 0x00:
6697 tcg_gen_and_i32(tmp, tmp, tmp2);
6698 if (logic_cc) {
6699 gen_logic_CC(tmp);
6701 store_reg_bx(env, s, rd, tmp);
6702 break;
6703 case 0x01:
6704 tcg_gen_xor_i32(tmp, tmp, tmp2);
6705 if (logic_cc) {
6706 gen_logic_CC(tmp);
6708 store_reg_bx(env, s, rd, tmp);
6709 break;
6710 case 0x02:
6711 if (set_cc && rd == 15) {
6712 /* SUBS r15, ... is used for exception return. */
6713 if (IS_USER(s)) {
6714 goto illegal_op;
6716 gen_helper_sub_cc(tmp, tmp, tmp2);
6717 gen_exception_return(s, tmp);
6718 } else {
6719 if (set_cc) {
6720 gen_helper_sub_cc(tmp, tmp, tmp2);
6721 } else {
6722 tcg_gen_sub_i32(tmp, tmp, tmp2);
6724 store_reg_bx(env, s, rd, tmp);
6726 break;
6727 case 0x03:
6728 if (set_cc) {
6729 gen_helper_sub_cc(tmp, tmp2, tmp);
6730 } else {
6731 tcg_gen_sub_i32(tmp, tmp2, tmp);
6733 store_reg_bx(env, s, rd, tmp);
6734 break;
6735 case 0x04:
6736 if (set_cc) {
6737 gen_helper_add_cc(tmp, tmp, tmp2);
6738 } else {
6739 tcg_gen_add_i32(tmp, tmp, tmp2);
6741 store_reg_bx(env, s, rd, tmp);
6742 break;
6743 case 0x05:
6744 if (set_cc) {
6745 gen_helper_adc_cc(tmp, tmp, tmp2);
6746 } else {
6747 gen_add_carry(tmp, tmp, tmp2);
6749 store_reg_bx(env, s, rd, tmp);
6750 break;
6751 case 0x06:
6752 if (set_cc) {
6753 gen_helper_sbc_cc(tmp, tmp, tmp2);
6754 } else {
6755 gen_sub_carry(tmp, tmp, tmp2);
6757 store_reg_bx(env, s, rd, tmp);
6758 break;
6759 case 0x07:
6760 if (set_cc) {
6761 gen_helper_sbc_cc(tmp, tmp2, tmp);
6762 } else {
6763 gen_sub_carry(tmp, tmp2, tmp);
6765 store_reg_bx(env, s, rd, tmp);
6766 break;
6767 case 0x08:
6768 if (set_cc) {
6769 tcg_gen_and_i32(tmp, tmp, tmp2);
6770 gen_logic_CC(tmp);
6772 tcg_temp_free_i32(tmp);
6773 break;
6774 case 0x09:
6775 if (set_cc) {
6776 tcg_gen_xor_i32(tmp, tmp, tmp2);
6777 gen_logic_CC(tmp);
6779 tcg_temp_free_i32(tmp);
6780 break;
6781 case 0x0a:
6782 if (set_cc) {
6783 gen_helper_sub_cc(tmp, tmp, tmp2);
6785 tcg_temp_free_i32(tmp);
6786 break;
6787 case 0x0b:
6788 if (set_cc) {
6789 gen_helper_add_cc(tmp, tmp, tmp2);
6791 tcg_temp_free_i32(tmp);
6792 break;
6793 case 0x0c:
6794 tcg_gen_or_i32(tmp, tmp, tmp2);
6795 if (logic_cc) {
6796 gen_logic_CC(tmp);
6798 store_reg_bx(env, s, rd, tmp);
6799 break;
6800 case 0x0d:
6801 if (logic_cc && rd == 15) {
6802 /* MOVS r15, ... is used for exception return. */
6803 if (IS_USER(s)) {
6804 goto illegal_op;
6806 gen_exception_return(s, tmp2);
6807 } else {
6808 if (logic_cc) {
6809 gen_logic_CC(tmp2);
6811 store_reg_bx(env, s, rd, tmp2);
6813 break;
6814 case 0x0e:
6815 tcg_gen_andc_i32(tmp, tmp, tmp2);
6816 if (logic_cc) {
6817 gen_logic_CC(tmp);
6819 store_reg_bx(env, s, rd, tmp);
6820 break;
6821 default:
6822 case 0x0f:
6823 tcg_gen_not_i32(tmp2, tmp2);
6824 if (logic_cc) {
6825 gen_logic_CC(tmp2);
6827 store_reg_bx(env, s, rd, tmp2);
6828 break;
6830 if (op1 != 0x0f && op1 != 0x0d) {
6831 tcg_temp_free_i32(tmp2);
6833 } else {
6834 /* other instructions */
6835 op1 = (insn >> 24) & 0xf;
6836 switch(op1) {
6837 case 0x0:
6838 case 0x1:
6839 /* multiplies, extra load/stores */
6840 sh = (insn >> 5) & 3;
6841 if (sh == 0) {
6842 if (op1 == 0x0) {
6843 rd = (insn >> 16) & 0xf;
6844 rn = (insn >> 12) & 0xf;
6845 rs = (insn >> 8) & 0xf;
6846 rm = (insn) & 0xf;
6847 op1 = (insn >> 20) & 0xf;
6848 switch (op1) {
6849 case 0: case 1: case 2: case 3: case 6:
6850 /* 32 bit mul */
6851 tmp = load_reg(s, rs);
6852 tmp2 = load_reg(s, rm);
6853 tcg_gen_mul_i32(tmp, tmp, tmp2);
6854 tcg_temp_free_i32(tmp2);
6855 if (insn & (1 << 22)) {
6856 /* Subtract (mls) */
6857 ARCH(6T2);
6858 tmp2 = load_reg(s, rn);
6859 tcg_gen_sub_i32(tmp, tmp2, tmp);
6860 tcg_temp_free_i32(tmp2);
6861 } else if (insn & (1 << 21)) {
6862 /* Add */
6863 tmp2 = load_reg(s, rn);
6864 tcg_gen_add_i32(tmp, tmp, tmp2);
6865 tcg_temp_free_i32(tmp2);
6867 if (insn & (1 << 20))
6868 gen_logic_CC(tmp);
6869 store_reg(s, rd, tmp);
6870 break;
6871 case 4:
6872 /* 64 bit mul double accumulate (UMAAL) */
6873 ARCH(6);
6874 tmp = load_reg(s, rs);
6875 tmp2 = load_reg(s, rm);
6876 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6877 gen_addq_lo(s, tmp64, rn);
6878 gen_addq_lo(s, tmp64, rd);
6879 gen_storeq_reg(s, rn, rd, tmp64);
6880 tcg_temp_free_i64(tmp64);
6881 break;
6882 case 8: case 9: case 10: case 11:
6883 case 12: case 13: case 14: case 15:
6884 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6885 tmp = load_reg(s, rs);
6886 tmp2 = load_reg(s, rm);
6887 if (insn & (1 << 22)) {
6888 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6889 } else {
6890 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6892 if (insn & (1 << 21)) { /* mult accumulate */
6893 gen_addq(s, tmp64, rn, rd);
6895 if (insn & (1 << 20)) {
6896 gen_logicq_cc(tmp64);
6898 gen_storeq_reg(s, rn, rd, tmp64);
6899 tcg_temp_free_i64(tmp64);
6900 break;
6901 default:
6902 goto illegal_op;
6904 } else {
6905 rn = (insn >> 16) & 0xf;
6906 rd = (insn >> 12) & 0xf;
6907 if (insn & (1 << 23)) {
6908 /* load/store exclusive */
6909 op1 = (insn >> 21) & 0x3;
6910 if (op1)
6911 ARCH(6K);
6912 else
6913 ARCH(6);
6914 addr = tcg_temp_local_new_i32();
6915 load_reg_var(s, addr, rn);
6916 if (insn & (1 << 20)) {
6917 switch (op1) {
6918 case 0: /* ldrex */
6919 gen_load_exclusive(s, rd, 15, addr, 2);
6920 break;
6921 case 1: /* ldrexd */
6922 gen_load_exclusive(s, rd, rd + 1, addr, 3);
6923 break;
6924 case 2: /* ldrexb */
6925 gen_load_exclusive(s, rd, 15, addr, 0);
6926 break;
6927 case 3: /* ldrexh */
6928 gen_load_exclusive(s, rd, 15, addr, 1);
6929 break;
6930 default:
6931 abort();
6933 } else {
6934 rm = insn & 0xf;
6935 switch (op1) {
6936 case 0: /* strex */
6937 gen_store_exclusive(s, rd, rm, 15, addr, 2);
6938 break;
6939 case 1: /* strexd */
6940 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6941 break;
6942 case 2: /* strexb */
6943 gen_store_exclusive(s, rd, rm, 15, addr, 0);
6944 break;
6945 case 3: /* strexh */
6946 gen_store_exclusive(s, rd, rm, 15, addr, 1);
6947 break;
6948 default:
6949 abort();
6952 tcg_temp_free(addr);
6953 } else {
6954 /* SWP instruction */
6955 rm = (insn) & 0xf;
6957 /* ??? This is not really atomic. However we know
6958 we never have multiple CPUs running in parallel,
6959 so it is good enough. */
6960 addr = load_reg(s, rn);
6961 tmp = load_reg(s, rm);
6962 if (insn & (1 << 22)) {
6963 tmp2 = gen_ld8u(addr, IS_USER(s));
6964 gen_st8(tmp, addr, IS_USER(s));
6965 } else {
6966 tmp2 = gen_ld32(addr, IS_USER(s));
6967 gen_st32(tmp, addr, IS_USER(s));
6969 tcg_temp_free_i32(addr);
6970 store_reg(s, rd, tmp2);
6973 } else {
6974 int address_offset;
6975 int load;
6976 /* Misc load/store */
6977 rn = (insn >> 16) & 0xf;
6978 rd = (insn >> 12) & 0xf;
6979 addr = load_reg(s, rn);
6980 if (insn & (1 << 24))
6981 gen_add_datah_offset(s, insn, 0, addr);
6982 address_offset = 0;
6983 if (insn & (1 << 20)) {
6984 /* load */
6985 switch(sh) {
6986 case 1:
6987 tmp = gen_ld16u(addr, IS_USER(s));
6988 break;
6989 case 2:
6990 tmp = gen_ld8s(addr, IS_USER(s));
6991 break;
6992 default:
6993 case 3:
6994 tmp = gen_ld16s(addr, IS_USER(s));
6995 break;
6997 load = 1;
6998 } else if (sh & 2) {
6999 ARCH(5TE);
7000 /* doubleword */
7001 if (sh & 1) {
7002 /* store */
7003 tmp = load_reg(s, rd);
7004 gen_st32(tmp, addr, IS_USER(s));
7005 tcg_gen_addi_i32(addr, addr, 4);
7006 tmp = load_reg(s, rd + 1);
7007 gen_st32(tmp, addr, IS_USER(s));
7008 load = 0;
7009 } else {
7010 /* load */
7011 tmp = gen_ld32(addr, IS_USER(s));
7012 store_reg(s, rd, tmp);
7013 tcg_gen_addi_i32(addr, addr, 4);
7014 tmp = gen_ld32(addr, IS_USER(s));
7015 rd++;
7016 load = 1;
7018 address_offset = -4;
7019 } else {
7020 /* store */
7021 tmp = load_reg(s, rd);
7022 gen_st16(tmp, addr, IS_USER(s));
7023 load = 0;
7025 /* Perform base writeback before the loaded value to
7026 ensure correct behavior with overlapping index registers.
7027 ldrd with base writeback is is undefined if the
7028 destination and index registers overlap. */
7029 if (!(insn & (1 << 24))) {
7030 gen_add_datah_offset(s, insn, address_offset, addr);
7031 store_reg(s, rn, addr);
7032 } else if (insn & (1 << 21)) {
7033 if (address_offset)
7034 tcg_gen_addi_i32(addr, addr, address_offset);
7035 store_reg(s, rn, addr);
7036 } else {
7037 tcg_temp_free_i32(addr);
7039 if (load) {
7040 /* Complete the load. */
7041 store_reg(s, rd, tmp);
7044 break;
7045 case 0x4:
7046 case 0x5:
7047 goto do_ldst;
7048 case 0x6:
7049 case 0x7:
7050 if (insn & (1 << 4)) {
7051 ARCH(6);
7052 /* Armv6 Media instructions. */
7053 rm = insn & 0xf;
7054 rn = (insn >> 16) & 0xf;
7055 rd = (insn >> 12) & 0xf;
7056 rs = (insn >> 8) & 0xf;
7057 switch ((insn >> 23) & 3) {
7058 case 0: /* Parallel add/subtract. */
7059 op1 = (insn >> 20) & 7;
7060 tmp = load_reg(s, rn);
7061 tmp2 = load_reg(s, rm);
7062 sh = (insn >> 5) & 7;
7063 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7064 goto illegal_op;
7065 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7066 tcg_temp_free_i32(tmp2);
7067 store_reg(s, rd, tmp);
7068 break;
7069 case 1:
7070 if ((insn & 0x00700020) == 0) {
7071 /* Halfword pack. */
7072 tmp = load_reg(s, rn);
7073 tmp2 = load_reg(s, rm);
7074 shift = (insn >> 7) & 0x1f;
7075 if (insn & (1 << 6)) {
7076 /* pkhtb */
7077 if (shift == 0)
7078 shift = 31;
7079 tcg_gen_sari_i32(tmp2, tmp2, shift);
7080 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7081 tcg_gen_ext16u_i32(tmp2, tmp2);
7082 } else {
7083 /* pkhbt */
7084 if (shift)
7085 tcg_gen_shli_i32(tmp2, tmp2, shift);
7086 tcg_gen_ext16u_i32(tmp, tmp);
7087 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7089 tcg_gen_or_i32(tmp, tmp, tmp2);
7090 tcg_temp_free_i32(tmp2);
7091 store_reg(s, rd, tmp);
7092 } else if ((insn & 0x00200020) == 0x00200000) {
7093 /* [us]sat */
7094 tmp = load_reg(s, rm);
7095 shift = (insn >> 7) & 0x1f;
7096 if (insn & (1 << 6)) {
7097 if (shift == 0)
7098 shift = 31;
7099 tcg_gen_sari_i32(tmp, tmp, shift);
7100 } else {
7101 tcg_gen_shli_i32(tmp, tmp, shift);
7103 sh = (insn >> 16) & 0x1f;
7104 tmp2 = tcg_const_i32(sh);
7105 if (insn & (1 << 22))
7106 gen_helper_usat(tmp, tmp, tmp2);
7107 else
7108 gen_helper_ssat(tmp, tmp, tmp2);
7109 tcg_temp_free_i32(tmp2);
7110 store_reg(s, rd, tmp);
7111 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7112 /* [us]sat16 */
7113 tmp = load_reg(s, rm);
7114 sh = (insn >> 16) & 0x1f;
7115 tmp2 = tcg_const_i32(sh);
7116 if (insn & (1 << 22))
7117 gen_helper_usat16(tmp, tmp, tmp2);
7118 else
7119 gen_helper_ssat16(tmp, tmp, tmp2);
7120 tcg_temp_free_i32(tmp2);
7121 store_reg(s, rd, tmp);
7122 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7123 /* Select bytes. */
7124 tmp = load_reg(s, rn);
7125 tmp2 = load_reg(s, rm);
7126 tmp3 = tcg_temp_new_i32();
7127 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7128 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7129 tcg_temp_free_i32(tmp3);
7130 tcg_temp_free_i32(tmp2);
7131 store_reg(s, rd, tmp);
7132 } else if ((insn & 0x000003e0) == 0x00000060) {
7133 tmp = load_reg(s, rm);
7134 shift = (insn >> 10) & 3;
7135 /* ??? In many cases it's not neccessary to do a
7136 rotate, a shift is sufficient. */
7137 if (shift != 0)
7138 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7139 op1 = (insn >> 20) & 7;
7140 switch (op1) {
7141 case 0: gen_sxtb16(tmp); break;
7142 case 2: gen_sxtb(tmp); break;
7143 case 3: gen_sxth(tmp); break;
7144 case 4: gen_uxtb16(tmp); break;
7145 case 6: gen_uxtb(tmp); break;
7146 case 7: gen_uxth(tmp); break;
7147 default: goto illegal_op;
7149 if (rn != 15) {
7150 tmp2 = load_reg(s, rn);
7151 if ((op1 & 3) == 0) {
7152 gen_add16(tmp, tmp2);
7153 } else {
7154 tcg_gen_add_i32(tmp, tmp, tmp2);
7155 tcg_temp_free_i32(tmp2);
7158 store_reg(s, rd, tmp);
7159 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7160 /* rev */
7161 tmp = load_reg(s, rm);
7162 if (insn & (1 << 22)) {
7163 if (insn & (1 << 7)) {
7164 gen_revsh(tmp);
7165 } else {
7166 ARCH(6T2);
7167 gen_helper_rbit(tmp, tmp);
7169 } else {
7170 if (insn & (1 << 7))
7171 gen_rev16(tmp);
7172 else
7173 tcg_gen_bswap32_i32(tmp, tmp);
7175 store_reg(s, rd, tmp);
7176 } else {
7177 goto illegal_op;
7179 break;
7180 case 2: /* Multiplies (Type 3). */
7181 tmp = load_reg(s, rm);
7182 tmp2 = load_reg(s, rs);
7183 if (insn & (1 << 20)) {
7184 /* Signed multiply most significant [accumulate].
7185 (SMMUL, SMMLA, SMMLS) */
7186 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7188 if (rd != 15) {
7189 tmp = load_reg(s, rd);
7190 if (insn & (1 << 6)) {
7191 tmp64 = gen_subq_msw(tmp64, tmp);
7192 } else {
7193 tmp64 = gen_addq_msw(tmp64, tmp);
7196 if (insn & (1 << 5)) {
7197 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7199 tcg_gen_shri_i64(tmp64, tmp64, 32);
7200 tmp = tcg_temp_new_i32();
7201 tcg_gen_trunc_i64_i32(tmp, tmp64);
7202 tcg_temp_free_i64(tmp64);
7203 store_reg(s, rn, tmp);
7204 } else {
7205 if (insn & (1 << 5))
7206 gen_swap_half(tmp2);
7207 gen_smul_dual(tmp, tmp2);
7208 if (insn & (1 << 6)) {
7209 /* This subtraction cannot overflow. */
7210 tcg_gen_sub_i32(tmp, tmp, tmp2);
7211 } else {
7212 /* This addition cannot overflow 32 bits;
7213 * however it may overflow considered as a signed
7214 * operation, in which case we must set the Q flag.
7216 gen_helper_add_setq(tmp, tmp, tmp2);
7218 tcg_temp_free_i32(tmp2);
7219 if (insn & (1 << 22)) {
7220 /* smlald, smlsld */
7221 tmp64 = tcg_temp_new_i64();
7222 tcg_gen_ext_i32_i64(tmp64, tmp);
7223 tcg_temp_free_i32(tmp);
7224 gen_addq(s, tmp64, rd, rn);
7225 gen_storeq_reg(s, rd, rn, tmp64);
7226 tcg_temp_free_i64(tmp64);
7227 } else {
7228 /* smuad, smusd, smlad, smlsd */
7229 if (rd != 15)
7231 tmp2 = load_reg(s, rd);
7232 gen_helper_add_setq(tmp, tmp, tmp2);
7233 tcg_temp_free_i32(tmp2);
7235 store_reg(s, rn, tmp);
7238 break;
7239 case 3:
7240 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7241 switch (op1) {
7242 case 0: /* Unsigned sum of absolute differences. */
7243 ARCH(6);
7244 tmp = load_reg(s, rm);
7245 tmp2 = load_reg(s, rs);
7246 gen_helper_usad8(tmp, tmp, tmp2);
7247 tcg_temp_free_i32(tmp2);
7248 if (rd != 15) {
7249 tmp2 = load_reg(s, rd);
7250 tcg_gen_add_i32(tmp, tmp, tmp2);
7251 tcg_temp_free_i32(tmp2);
7253 store_reg(s, rn, tmp);
7254 break;
7255 case 0x20: case 0x24: case 0x28: case 0x2c:
7256 /* Bitfield insert/clear. */
7257 ARCH(6T2);
7258 shift = (insn >> 7) & 0x1f;
7259 i = (insn >> 16) & 0x1f;
7260 i = i + 1 - shift;
7261 if (rm == 15) {
7262 tmp = tcg_temp_new_i32();
7263 tcg_gen_movi_i32(tmp, 0);
7264 } else {
7265 tmp = load_reg(s, rm);
7267 if (i != 32) {
7268 tmp2 = load_reg(s, rd);
7269 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7270 tcg_temp_free_i32(tmp2);
7272 store_reg(s, rd, tmp);
7273 break;
7274 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7275 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7276 ARCH(6T2);
7277 tmp = load_reg(s, rm);
7278 shift = (insn >> 7) & 0x1f;
7279 i = ((insn >> 16) & 0x1f) + 1;
7280 if (shift + i > 32)
7281 goto illegal_op;
7282 if (i < 32) {
7283 if (op1 & 0x20) {
7284 gen_ubfx(tmp, shift, (1u << i) - 1);
7285 } else {
7286 gen_sbfx(tmp, shift, i);
7289 store_reg(s, rd, tmp);
7290 break;
7291 default:
7292 goto illegal_op;
7294 break;
7296 break;
7298 do_ldst:
7299 /* Check for undefined extension instructions
7300 * per the ARM Bible IE:
7301 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7303 sh = (0xf << 20) | (0xf << 4);
7304 if (op1 == 0x7 && ((insn & sh) == sh))
7306 goto illegal_op;
7308 /* load/store byte/word */
7309 rn = (insn >> 16) & 0xf;
7310 rd = (insn >> 12) & 0xf;
7311 tmp2 = load_reg(s, rn);
7312 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7313 if (insn & (1 << 24))
7314 gen_add_data_offset(s, insn, tmp2);
7315 if (insn & (1 << 20)) {
7316 /* load */
7317 if (insn & (1 << 22)) {
7318 tmp = gen_ld8u(tmp2, i);
7319 } else {
7320 tmp = gen_ld32(tmp2, i);
7322 } else {
7323 /* store */
7324 tmp = load_reg(s, rd);
7325 if (insn & (1 << 22))
7326 gen_st8(tmp, tmp2, i);
7327 else
7328 gen_st32(tmp, tmp2, i);
7330 if (!(insn & (1 << 24))) {
7331 gen_add_data_offset(s, insn, tmp2);
7332 store_reg(s, rn, tmp2);
7333 } else if (insn & (1 << 21)) {
7334 store_reg(s, rn, tmp2);
7335 } else {
7336 tcg_temp_free_i32(tmp2);
7338 if (insn & (1 << 20)) {
7339 /* Complete the load. */
7340 store_reg_from_load(env, s, rd, tmp);
7342 break;
7343 case 0x08:
7344 case 0x09:
7346 int j, n, user, loaded_base;
7347 TCGv loaded_var;
7348 /* load/store multiple words */
7349 /* XXX: store correct base if write back */
7350 user = 0;
7351 if (insn & (1 << 22)) {
7352 if (IS_USER(s))
7353 goto illegal_op; /* only usable in supervisor mode */
7355 if ((insn & (1 << 15)) == 0)
7356 user = 1;
7358 rn = (insn >> 16) & 0xf;
7359 addr = load_reg(s, rn);
7361 /* compute total size */
7362 loaded_base = 0;
7363 TCGV_UNUSED(loaded_var);
7364 n = 0;
7365 for(i=0;i<16;i++) {
7366 if (insn & (1 << i))
7367 n++;
7369 /* XXX: test invalid n == 0 case ? */
7370 if (insn & (1 << 23)) {
7371 if (insn & (1 << 24)) {
7372 /* pre increment */
7373 tcg_gen_addi_i32(addr, addr, 4);
7374 } else {
7375 /* post increment */
7377 } else {
7378 if (insn & (1 << 24)) {
7379 /* pre decrement */
7380 tcg_gen_addi_i32(addr, addr, -(n * 4));
7381 } else {
7382 /* post decrement */
7383 if (n != 1)
7384 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7387 j = 0;
7388 for(i=0;i<16;i++) {
7389 if (insn & (1 << i)) {
7390 if (insn & (1 << 20)) {
7391 /* load */
7392 tmp = gen_ld32(addr, IS_USER(s));
7393 if (user) {
7394 tmp2 = tcg_const_i32(i);
7395 gen_helper_set_user_reg(tmp2, tmp);
7396 tcg_temp_free_i32(tmp2);
7397 tcg_temp_free_i32(tmp);
7398 } else if (i == rn) {
7399 loaded_var = tmp;
7400 loaded_base = 1;
7401 } else {
7402 store_reg_from_load(env, s, i, tmp);
7404 } else {
7405 /* store */
7406 if (i == 15) {
7407 /* special case: r15 = PC + 8 */
7408 val = (long)s->pc + 4;
7409 tmp = tcg_temp_new_i32();
7410 tcg_gen_movi_i32(tmp, val);
7411 } else if (user) {
7412 tmp = tcg_temp_new_i32();
7413 tmp2 = tcg_const_i32(i);
7414 gen_helper_get_user_reg(tmp, tmp2);
7415 tcg_temp_free_i32(tmp2);
7416 } else {
7417 tmp = load_reg(s, i);
7419 gen_st32(tmp, addr, IS_USER(s));
7421 j++;
7422 /* no need to add after the last transfer */
7423 if (j != n)
7424 tcg_gen_addi_i32(addr, addr, 4);
7427 if (insn & (1 << 21)) {
7428 /* write back */
7429 if (insn & (1 << 23)) {
7430 if (insn & (1 << 24)) {
7431 /* pre increment */
7432 } else {
7433 /* post increment */
7434 tcg_gen_addi_i32(addr, addr, 4);
7436 } else {
7437 if (insn & (1 << 24)) {
7438 /* pre decrement */
7439 if (n != 1)
7440 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7441 } else {
7442 /* post decrement */
7443 tcg_gen_addi_i32(addr, addr, -(n * 4));
7446 store_reg(s, rn, addr);
7447 } else {
7448 tcg_temp_free_i32(addr);
7450 if (loaded_base) {
7451 store_reg(s, rn, loaded_var);
7453 if ((insn & (1 << 22)) && !user) {
7454 /* Restore CPSR from SPSR. */
7455 tmp = load_cpu_field(spsr);
7456 gen_set_cpsr(tmp, 0xffffffff);
7457 tcg_temp_free_i32(tmp);
7458 s->is_jmp = DISAS_UPDATE;
7461 break;
7462 case 0xa:
7463 case 0xb:
7465 int32_t offset;
7467 /* branch (and link) */
7468 val = (int32_t)s->pc;
7469 if (insn & (1 << 24)) {
7470 tmp = tcg_temp_new_i32();
7471 tcg_gen_movi_i32(tmp, val);
7472 store_reg(s, 14, tmp);
7474 offset = (((int32_t)insn << 8) >> 8);
7475 val += (offset << 2) + 4;
7476 gen_jmp(s, val);
7478 break;
7479 case 0xc:
7480 case 0xd:
7481 case 0xe:
7482 /* Coprocessor. */
7483 if (disas_coproc_insn(env, s, insn))
7484 goto illegal_op;
7485 break;
7486 case 0xf:
7487 /* swi */
7488 gen_set_pc_im(s->pc);
7489 s->is_jmp = DISAS_SWI;
7490 break;
7491 default:
7492 illegal_op:
7493 gen_exception_insn(s, 4, EXCP_UDEF);
7494 break;
7499 /* Return true if this is a Thumb-2 logical op. */
7500 static int
7501 thumb2_logic_op(int op)
7503 return (op < 8);
7506 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7507 then set condition code flags based on the result of the operation.
7508 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7509 to the high bit of T1.
7510 Returns zero if the opcode is valid. */
7512 static int
7513 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7515 int logic_cc;
7517 logic_cc = 0;
7518 switch (op) {
7519 case 0: /* and */
7520 tcg_gen_and_i32(t0, t0, t1);
7521 logic_cc = conds;
7522 break;
7523 case 1: /* bic */
7524 tcg_gen_andc_i32(t0, t0, t1);
7525 logic_cc = conds;
7526 break;
7527 case 2: /* orr */
7528 tcg_gen_or_i32(t0, t0, t1);
7529 logic_cc = conds;
7530 break;
7531 case 3: /* orn */
7532 tcg_gen_orc_i32(t0, t0, t1);
7533 logic_cc = conds;
7534 break;
7535 case 4: /* eor */
7536 tcg_gen_xor_i32(t0, t0, t1);
7537 logic_cc = conds;
7538 break;
7539 case 8: /* add */
7540 if (conds)
7541 gen_helper_add_cc(t0, t0, t1);
7542 else
7543 tcg_gen_add_i32(t0, t0, t1);
7544 break;
7545 case 10: /* adc */
7546 if (conds)
7547 gen_helper_adc_cc(t0, t0, t1);
7548 else
7549 gen_adc(t0, t1);
7550 break;
7551 case 11: /* sbc */
7552 if (conds)
7553 gen_helper_sbc_cc(t0, t0, t1);
7554 else
7555 gen_sub_carry(t0, t0, t1);
7556 break;
7557 case 13: /* sub */
7558 if (conds)
7559 gen_helper_sub_cc(t0, t0, t1);
7560 else
7561 tcg_gen_sub_i32(t0, t0, t1);
7562 break;
7563 case 14: /* rsb */
7564 if (conds)
7565 gen_helper_sub_cc(t0, t1, t0);
7566 else
7567 tcg_gen_sub_i32(t0, t1, t0);
7568 break;
7569 default: /* 5, 6, 7, 9, 12, 15. */
7570 return 1;
7572 if (logic_cc) {
7573 gen_logic_CC(t0);
7574 if (shifter_out)
7575 gen_set_CF_bit31(t1);
7577 return 0;
7580 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7581 is not legal. */
7582 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7584 uint32_t insn, imm, shift, offset;
7585 uint32_t rd, rn, rm, rs;
7586 TCGv tmp;
7587 TCGv tmp2;
7588 TCGv tmp3;
7589 TCGv addr;
7590 TCGv_i64 tmp64;
7591 int op;
7592 int shiftop;
7593 int conds;
7594 int logic_cc;
7596 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7597 || arm_feature (env, ARM_FEATURE_M))) {
7598 /* Thumb-1 cores may need to treat bl and blx as a pair of
7599 16-bit instructions to get correct prefetch abort behavior. */
7600 insn = insn_hw1;
7601 if ((insn & (1 << 12)) == 0) {
7602 ARCH(5);
7603 /* Second half of blx. */
7604 offset = ((insn & 0x7ff) << 1);
7605 tmp = load_reg(s, 14);
7606 tcg_gen_addi_i32(tmp, tmp, offset);
7607 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7609 tmp2 = tcg_temp_new_i32();
7610 tcg_gen_movi_i32(tmp2, s->pc | 1);
7611 store_reg(s, 14, tmp2);
7612 gen_bx(s, tmp);
7613 return 0;
7615 if (insn & (1 << 11)) {
7616 /* Second half of bl. */
7617 offset = ((insn & 0x7ff) << 1) | 1;
7618 tmp = load_reg(s, 14);
7619 tcg_gen_addi_i32(tmp, tmp, offset);
7621 tmp2 = tcg_temp_new_i32();
7622 tcg_gen_movi_i32(tmp2, s->pc | 1);
7623 store_reg(s, 14, tmp2);
7624 gen_bx(s, tmp);
7625 return 0;
7627 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7628 /* Instruction spans a page boundary. Implement it as two
7629 16-bit instructions in case the second half causes an
7630 prefetch abort. */
7631 offset = ((int32_t)insn << 21) >> 9;
7632 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7633 return 0;
7635 /* Fall through to 32-bit decode. */
7638 insn = lduw_code(s->pc);
7639 s->pc += 2;
7640 insn |= (uint32_t)insn_hw1 << 16;
7642 if ((insn & 0xf800e800) != 0xf000e800) {
7643 ARCH(6T2);
7646 rn = (insn >> 16) & 0xf;
7647 rs = (insn >> 12) & 0xf;
7648 rd = (insn >> 8) & 0xf;
7649 rm = insn & 0xf;
7650 switch ((insn >> 25) & 0xf) {
7651 case 0: case 1: case 2: case 3:
7652 /* 16-bit instructions. Should never happen. */
7653 abort();
7654 case 4:
7655 if (insn & (1 << 22)) {
7656 /* Other load/store, table branch. */
7657 if (insn & 0x01200000) {
7658 /* Load/store doubleword. */
7659 if (rn == 15) {
7660 addr = tcg_temp_new_i32();
7661 tcg_gen_movi_i32(addr, s->pc & ~3);
7662 } else {
7663 addr = load_reg(s, rn);
7665 offset = (insn & 0xff) * 4;
7666 if ((insn & (1 << 23)) == 0)
7667 offset = -offset;
7668 if (insn & (1 << 24)) {
7669 tcg_gen_addi_i32(addr, addr, offset);
7670 offset = 0;
7672 if (insn & (1 << 20)) {
7673 /* ldrd */
7674 tmp = gen_ld32(addr, IS_USER(s));
7675 store_reg(s, rs, tmp);
7676 tcg_gen_addi_i32(addr, addr, 4);
7677 tmp = gen_ld32(addr, IS_USER(s));
7678 store_reg(s, rd, tmp);
7679 } else {
7680 /* strd */
7681 tmp = load_reg(s, rs);
7682 gen_st32(tmp, addr, IS_USER(s));
7683 tcg_gen_addi_i32(addr, addr, 4);
7684 tmp = load_reg(s, rd);
7685 gen_st32(tmp, addr, IS_USER(s));
7687 if (insn & (1 << 21)) {
7688 /* Base writeback. */
7689 if (rn == 15)
7690 goto illegal_op;
7691 tcg_gen_addi_i32(addr, addr, offset - 4);
7692 store_reg(s, rn, addr);
7693 } else {
7694 tcg_temp_free_i32(addr);
7696 } else if ((insn & (1 << 23)) == 0) {
7697 /* Load/store exclusive word. */
7698 addr = tcg_temp_local_new();
7699 load_reg_var(s, addr, rn);
7700 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7701 if (insn & (1 << 20)) {
7702 gen_load_exclusive(s, rs, 15, addr, 2);
7703 } else {
7704 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7706 tcg_temp_free(addr);
7707 } else if ((insn & (1 << 6)) == 0) {
7708 /* Table Branch. */
7709 if (rn == 15) {
7710 addr = tcg_temp_new_i32();
7711 tcg_gen_movi_i32(addr, s->pc);
7712 } else {
7713 addr = load_reg(s, rn);
7715 tmp = load_reg(s, rm);
7716 tcg_gen_add_i32(addr, addr, tmp);
7717 if (insn & (1 << 4)) {
7718 /* tbh */
7719 tcg_gen_add_i32(addr, addr, tmp);
7720 tcg_temp_free_i32(tmp);
7721 tmp = gen_ld16u(addr, IS_USER(s));
7722 } else { /* tbb */
7723 tcg_temp_free_i32(tmp);
7724 tmp = gen_ld8u(addr, IS_USER(s));
7726 tcg_temp_free_i32(addr);
7727 tcg_gen_shli_i32(tmp, tmp, 1);
7728 tcg_gen_addi_i32(tmp, tmp, s->pc);
7729 store_reg(s, 15, tmp);
7730 } else {
7731 /* Load/store exclusive byte/halfword/doubleword. */
7732 ARCH(7);
7733 op = (insn >> 4) & 0x3;
7734 if (op == 2) {
7735 goto illegal_op;
7737 addr = tcg_temp_local_new();
7738 load_reg_var(s, addr, rn);
7739 if (insn & (1 << 20)) {
7740 gen_load_exclusive(s, rs, rd, addr, op);
7741 } else {
7742 gen_store_exclusive(s, rm, rs, rd, addr, op);
7744 tcg_temp_free(addr);
7746 } else {
7747 /* Load/store multiple, RFE, SRS. */
7748 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7749 /* Not available in user mode. */
7750 if (IS_USER(s))
7751 goto illegal_op;
7752 if (insn & (1 << 20)) {
7753 /* rfe */
7754 addr = load_reg(s, rn);
7755 if ((insn & (1 << 24)) == 0)
7756 tcg_gen_addi_i32(addr, addr, -8);
7757 /* Load PC into tmp and CPSR into tmp2. */
7758 tmp = gen_ld32(addr, 0);
7759 tcg_gen_addi_i32(addr, addr, 4);
7760 tmp2 = gen_ld32(addr, 0);
7761 if (insn & (1 << 21)) {
7762 /* Base writeback. */
7763 if (insn & (1 << 24)) {
7764 tcg_gen_addi_i32(addr, addr, 4);
7765 } else {
7766 tcg_gen_addi_i32(addr, addr, -4);
7768 store_reg(s, rn, addr);
7769 } else {
7770 tcg_temp_free_i32(addr);
7772 gen_rfe(s, tmp, tmp2);
7773 } else {
7774 /* srs */
7775 op = (insn & 0x1f);
7776 addr = tcg_temp_new_i32();
7777 tmp = tcg_const_i32(op);
7778 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7779 tcg_temp_free_i32(tmp);
7780 if ((insn & (1 << 24)) == 0) {
7781 tcg_gen_addi_i32(addr, addr, -8);
7783 tmp = load_reg(s, 14);
7784 gen_st32(tmp, addr, 0);
7785 tcg_gen_addi_i32(addr, addr, 4);
7786 tmp = tcg_temp_new_i32();
7787 gen_helper_cpsr_read(tmp);
7788 gen_st32(tmp, addr, 0);
7789 if (insn & (1 << 21)) {
7790 if ((insn & (1 << 24)) == 0) {
7791 tcg_gen_addi_i32(addr, addr, -4);
7792 } else {
7793 tcg_gen_addi_i32(addr, addr, 4);
7795 tmp = tcg_const_i32(op);
7796 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7797 tcg_temp_free_i32(tmp);
7798 } else {
7799 tcg_temp_free_i32(addr);
7802 } else {
7803 int i;
7804 /* Load/store multiple. */
7805 addr = load_reg(s, rn);
7806 offset = 0;
7807 for (i = 0; i < 16; i++) {
7808 if (insn & (1 << i))
7809 offset += 4;
7811 if (insn & (1 << 24)) {
7812 tcg_gen_addi_i32(addr, addr, -offset);
7815 for (i = 0; i < 16; i++) {
7816 if ((insn & (1 << i)) == 0)
7817 continue;
7818 if (insn & (1 << 20)) {
7819 /* Load. */
7820 tmp = gen_ld32(addr, IS_USER(s));
7821 if (i == 15) {
7822 gen_bx(s, tmp);
7823 } else {
7824 store_reg(s, i, tmp);
7826 } else {
7827 /* Store. */
7828 tmp = load_reg(s, i);
7829 gen_st32(tmp, addr, IS_USER(s));
7831 tcg_gen_addi_i32(addr, addr, 4);
7833 if (insn & (1 << 21)) {
7834 /* Base register writeback. */
7835 if (insn & (1 << 24)) {
7836 tcg_gen_addi_i32(addr, addr, -offset);
7838 /* Fault if writeback register is in register list. */
7839 if (insn & (1 << rn))
7840 goto illegal_op;
7841 store_reg(s, rn, addr);
7842 } else {
7843 tcg_temp_free_i32(addr);
7847 break;
7848 case 5:
7850 op = (insn >> 21) & 0xf;
7851 if (op == 6) {
7852 /* Halfword pack. */
7853 tmp = load_reg(s, rn);
7854 tmp2 = load_reg(s, rm);
7855 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7856 if (insn & (1 << 5)) {
7857 /* pkhtb */
7858 if (shift == 0)
7859 shift = 31;
7860 tcg_gen_sari_i32(tmp2, tmp2, shift);
7861 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7862 tcg_gen_ext16u_i32(tmp2, tmp2);
7863 } else {
7864 /* pkhbt */
7865 if (shift)
7866 tcg_gen_shli_i32(tmp2, tmp2, shift);
7867 tcg_gen_ext16u_i32(tmp, tmp);
7868 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7870 tcg_gen_or_i32(tmp, tmp, tmp2);
7871 tcg_temp_free_i32(tmp2);
7872 store_reg(s, rd, tmp);
7873 } else {
7874 /* Data processing register constant shift. */
7875 if (rn == 15) {
7876 tmp = tcg_temp_new_i32();
7877 tcg_gen_movi_i32(tmp, 0);
7878 } else {
7879 tmp = load_reg(s, rn);
7881 tmp2 = load_reg(s, rm);
7883 shiftop = (insn >> 4) & 3;
7884 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7885 conds = (insn & (1 << 20)) != 0;
7886 logic_cc = (conds && thumb2_logic_op(op));
7887 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7888 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7889 goto illegal_op;
7890 tcg_temp_free_i32(tmp2);
7891 if (rd != 15) {
7892 store_reg(s, rd, tmp);
7893 } else {
7894 tcg_temp_free_i32(tmp);
7897 break;
7898 case 13: /* Misc data processing. */
7899 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7900 if (op < 4 && (insn & 0xf000) != 0xf000)
7901 goto illegal_op;
7902 switch (op) {
7903 case 0: /* Register controlled shift. */
7904 tmp = load_reg(s, rn);
7905 tmp2 = load_reg(s, rm);
7906 if ((insn & 0x70) != 0)
7907 goto illegal_op;
7908 op = (insn >> 21) & 3;
7909 logic_cc = (insn & (1 << 20)) != 0;
7910 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7911 if (logic_cc)
7912 gen_logic_CC(tmp);
7913 store_reg_bx(env, s, rd, tmp);
7914 break;
7915 case 1: /* Sign/zero extend. */
7916 tmp = load_reg(s, rm);
7917 shift = (insn >> 4) & 3;
7918 /* ??? In many cases it's not neccessary to do a
7919 rotate, a shift is sufficient. */
7920 if (shift != 0)
7921 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7922 op = (insn >> 20) & 7;
7923 switch (op) {
7924 case 0: gen_sxth(tmp); break;
7925 case 1: gen_uxth(tmp); break;
7926 case 2: gen_sxtb16(tmp); break;
7927 case 3: gen_uxtb16(tmp); break;
7928 case 4: gen_sxtb(tmp); break;
7929 case 5: gen_uxtb(tmp); break;
7930 default: goto illegal_op;
7932 if (rn != 15) {
7933 tmp2 = load_reg(s, rn);
7934 if ((op >> 1) == 1) {
7935 gen_add16(tmp, tmp2);
7936 } else {
7937 tcg_gen_add_i32(tmp, tmp, tmp2);
7938 tcg_temp_free_i32(tmp2);
7941 store_reg(s, rd, tmp);
7942 break;
7943 case 2: /* SIMD add/subtract. */
7944 op = (insn >> 20) & 7;
7945 shift = (insn >> 4) & 7;
7946 if ((op & 3) == 3 || (shift & 3) == 3)
7947 goto illegal_op;
7948 tmp = load_reg(s, rn);
7949 tmp2 = load_reg(s, rm);
7950 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7951 tcg_temp_free_i32(tmp2);
7952 store_reg(s, rd, tmp);
7953 break;
7954 case 3: /* Other data processing. */
7955 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7956 if (op < 4) {
7957 /* Saturating add/subtract. */
7958 tmp = load_reg(s, rn);
7959 tmp2 = load_reg(s, rm);
7960 if (op & 1)
7961 gen_helper_double_saturate(tmp, tmp);
7962 if (op & 2)
7963 gen_helper_sub_saturate(tmp, tmp2, tmp);
7964 else
7965 gen_helper_add_saturate(tmp, tmp, tmp2);
7966 tcg_temp_free_i32(tmp2);
7967 } else {
7968 tmp = load_reg(s, rn);
7969 switch (op) {
7970 case 0x0a: /* rbit */
7971 gen_helper_rbit(tmp, tmp);
7972 break;
7973 case 0x08: /* rev */
7974 tcg_gen_bswap32_i32(tmp, tmp);
7975 break;
7976 case 0x09: /* rev16 */
7977 gen_rev16(tmp);
7978 break;
7979 case 0x0b: /* revsh */
7980 gen_revsh(tmp);
7981 break;
7982 case 0x10: /* sel */
7983 tmp2 = load_reg(s, rm);
7984 tmp3 = tcg_temp_new_i32();
7985 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7986 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7987 tcg_temp_free_i32(tmp3);
7988 tcg_temp_free_i32(tmp2);
7989 break;
7990 case 0x18: /* clz */
7991 gen_helper_clz(tmp, tmp);
7992 break;
7993 default:
7994 goto illegal_op;
7997 store_reg(s, rd, tmp);
7998 break;
7999 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8000 op = (insn >> 4) & 0xf;
8001 tmp = load_reg(s, rn);
8002 tmp2 = load_reg(s, rm);
8003 switch ((insn >> 20) & 7) {
8004 case 0: /* 32 x 32 -> 32 */
8005 tcg_gen_mul_i32(tmp, tmp, tmp2);
8006 tcg_temp_free_i32(tmp2);
8007 if (rs != 15) {
8008 tmp2 = load_reg(s, rs);
8009 if (op)
8010 tcg_gen_sub_i32(tmp, tmp2, tmp);
8011 else
8012 tcg_gen_add_i32(tmp, tmp, tmp2);
8013 tcg_temp_free_i32(tmp2);
8015 break;
8016 case 1: /* 16 x 16 -> 32 */
8017 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8018 tcg_temp_free_i32(tmp2);
8019 if (rs != 15) {
8020 tmp2 = load_reg(s, rs);
8021 gen_helper_add_setq(tmp, tmp, tmp2);
8022 tcg_temp_free_i32(tmp2);
8024 break;
8025 case 2: /* Dual multiply add. */
8026 case 4: /* Dual multiply subtract. */
8027 if (op)
8028 gen_swap_half(tmp2);
8029 gen_smul_dual(tmp, tmp2);
8030 if (insn & (1 << 22)) {
8031 /* This subtraction cannot overflow. */
8032 tcg_gen_sub_i32(tmp, tmp, tmp2);
8033 } else {
8034 /* This addition cannot overflow 32 bits;
8035 * however it may overflow considered as a signed
8036 * operation, in which case we must set the Q flag.
8038 gen_helper_add_setq(tmp, tmp, tmp2);
8040 tcg_temp_free_i32(tmp2);
8041 if (rs != 15)
8043 tmp2 = load_reg(s, rs);
8044 gen_helper_add_setq(tmp, tmp, tmp2);
8045 tcg_temp_free_i32(tmp2);
8047 break;
8048 case 3: /* 32 * 16 -> 32msb */
8049 if (op)
8050 tcg_gen_sari_i32(tmp2, tmp2, 16);
8051 else
8052 gen_sxth(tmp2);
8053 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8054 tcg_gen_shri_i64(tmp64, tmp64, 16);
8055 tmp = tcg_temp_new_i32();
8056 tcg_gen_trunc_i64_i32(tmp, tmp64);
8057 tcg_temp_free_i64(tmp64);
8058 if (rs != 15)
8060 tmp2 = load_reg(s, rs);
8061 gen_helper_add_setq(tmp, tmp, tmp2);
8062 tcg_temp_free_i32(tmp2);
8064 break;
8065 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8066 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8067 if (rs != 15) {
8068 tmp = load_reg(s, rs);
8069 if (insn & (1 << 20)) {
8070 tmp64 = gen_addq_msw(tmp64, tmp);
8071 } else {
8072 tmp64 = gen_subq_msw(tmp64, tmp);
8075 if (insn & (1 << 4)) {
8076 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8078 tcg_gen_shri_i64(tmp64, tmp64, 32);
8079 tmp = tcg_temp_new_i32();
8080 tcg_gen_trunc_i64_i32(tmp, tmp64);
8081 tcg_temp_free_i64(tmp64);
8082 break;
8083 case 7: /* Unsigned sum of absolute differences. */
8084 gen_helper_usad8(tmp, tmp, tmp2);
8085 tcg_temp_free_i32(tmp2);
8086 if (rs != 15) {
8087 tmp2 = load_reg(s, rs);
8088 tcg_gen_add_i32(tmp, tmp, tmp2);
8089 tcg_temp_free_i32(tmp2);
8091 break;
8093 store_reg(s, rd, tmp);
8094 break;
8095 case 6: case 7: /* 64-bit multiply, Divide. */
8096 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8097 tmp = load_reg(s, rn);
8098 tmp2 = load_reg(s, rm);
8099 if ((op & 0x50) == 0x10) {
8100 /* sdiv, udiv */
8101 if (!arm_feature(env, ARM_FEATURE_DIV))
8102 goto illegal_op;
8103 if (op & 0x20)
8104 gen_helper_udiv(tmp, tmp, tmp2);
8105 else
8106 gen_helper_sdiv(tmp, tmp, tmp2);
8107 tcg_temp_free_i32(tmp2);
8108 store_reg(s, rd, tmp);
8109 } else if ((op & 0xe) == 0xc) {
8110 /* Dual multiply accumulate long. */
8111 if (op & 1)
8112 gen_swap_half(tmp2);
8113 gen_smul_dual(tmp, tmp2);
8114 if (op & 0x10) {
8115 tcg_gen_sub_i32(tmp, tmp, tmp2);
8116 } else {
8117 tcg_gen_add_i32(tmp, tmp, tmp2);
8119 tcg_temp_free_i32(tmp2);
8120 /* BUGFIX */
8121 tmp64 = tcg_temp_new_i64();
8122 tcg_gen_ext_i32_i64(tmp64, tmp);
8123 tcg_temp_free_i32(tmp);
8124 gen_addq(s, tmp64, rs, rd);
8125 gen_storeq_reg(s, rs, rd, tmp64);
8126 tcg_temp_free_i64(tmp64);
8127 } else {
8128 if (op & 0x20) {
8129 /* Unsigned 64-bit multiply */
8130 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8131 } else {
8132 if (op & 8) {
8133 /* smlalxy */
8134 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8135 tcg_temp_free_i32(tmp2);
8136 tmp64 = tcg_temp_new_i64();
8137 tcg_gen_ext_i32_i64(tmp64, tmp);
8138 tcg_temp_free_i32(tmp);
8139 } else {
8140 /* Signed 64-bit multiply */
8141 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8144 if (op & 4) {
8145 /* umaal */
8146 gen_addq_lo(s, tmp64, rs);
8147 gen_addq_lo(s, tmp64, rd);
8148 } else if (op & 0x40) {
8149 /* 64-bit accumulate. */
8150 gen_addq(s, tmp64, rs, rd);
8152 gen_storeq_reg(s, rs, rd, tmp64);
8153 tcg_temp_free_i64(tmp64);
8155 break;
8157 break;
8158 case 6: case 7: case 14: case 15:
8159 /* Coprocessor. */
8160 if (((insn >> 24) & 3) == 3) {
8161 /* Translate into the equivalent ARM encoding. */
8162 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8163 if (disas_neon_data_insn(env, s, insn))
8164 goto illegal_op;
8165 } else {
8166 if (insn & (1 << 28))
8167 goto illegal_op;
8168 if (disas_coproc_insn (env, s, insn))
8169 goto illegal_op;
8171 break;
8172 case 8: case 9: case 10: case 11:
8173 if (insn & (1 << 15)) {
8174 /* Branches, misc control. */
8175 if (insn & 0x5000) {
8176 /* Unconditional branch. */
8177 /* signextend(hw1[10:0]) -> offset[:12]. */
8178 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8179 /* hw1[10:0] -> offset[11:1]. */
8180 offset |= (insn & 0x7ff) << 1;
8181 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8182 offset[24:22] already have the same value because of the
8183 sign extension above. */
8184 offset ^= ((~insn) & (1 << 13)) << 10;
8185 offset ^= ((~insn) & (1 << 11)) << 11;
8187 if (insn & (1 << 14)) {
8188 /* Branch and link. */
8189 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8192 offset += s->pc;
8193 if (insn & (1 << 12)) {
8194 /* b/bl */
8195 gen_jmp(s, offset);
8196 } else {
8197 /* blx */
8198 offset &= ~(uint32_t)2;
8199 /* thumb2 bx, no need to check */
8200 gen_bx_im(s, offset);
8202 } else if (((insn >> 23) & 7) == 7) {
8203 /* Misc control */
8204 if (insn & (1 << 13))
8205 goto illegal_op;
8207 if (insn & (1 << 26)) {
8208 /* Secure monitor call (v6Z) */
8209 goto illegal_op; /* not implemented. */
8210 } else {
8211 op = (insn >> 20) & 7;
8212 switch (op) {
8213 case 0: /* msr cpsr. */
8214 if (IS_M(env)) {
8215 tmp = load_reg(s, rn);
8216 addr = tcg_const_i32(insn & 0xff);
8217 gen_helper_v7m_msr(cpu_env, addr, tmp);
8218 tcg_temp_free_i32(addr);
8219 tcg_temp_free_i32(tmp);
8220 gen_lookup_tb(s);
8221 break;
8223 /* fall through */
8224 case 1: /* msr spsr. */
8225 if (IS_M(env))
8226 goto illegal_op;
8227 tmp = load_reg(s, rn);
8228 if (gen_set_psr(s,
8229 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8230 op == 1, tmp))
8231 goto illegal_op;
8232 break;
8233 case 2: /* cps, nop-hint. */
8234 if (((insn >> 8) & 7) == 0) {
8235 gen_nop_hint(s, insn & 0xff);
8237 /* Implemented as NOP in user mode. */
8238 if (IS_USER(s))
8239 break;
8240 offset = 0;
8241 imm = 0;
8242 if (insn & (1 << 10)) {
8243 if (insn & (1 << 7))
8244 offset |= CPSR_A;
8245 if (insn & (1 << 6))
8246 offset |= CPSR_I;
8247 if (insn & (1 << 5))
8248 offset |= CPSR_F;
8249 if (insn & (1 << 9))
8250 imm = CPSR_A | CPSR_I | CPSR_F;
8252 if (insn & (1 << 8)) {
8253 offset |= 0x1f;
8254 imm |= (insn & 0x1f);
8256 if (offset) {
8257 gen_set_psr_im(s, offset, 0, imm);
8259 break;
8260 case 3: /* Special control operations. */
8261 ARCH(7);
8262 op = (insn >> 4) & 0xf;
8263 switch (op) {
8264 case 2: /* clrex */
8265 gen_clrex(s);
8266 break;
8267 case 4: /* dsb */
8268 case 5: /* dmb */
8269 case 6: /* isb */
8270 /* These execute as NOPs. */
8271 break;
8272 default:
8273 goto illegal_op;
8275 break;
8276 case 4: /* bxj */
8277 /* Trivial implementation equivalent to bx. */
8278 tmp = load_reg(s, rn);
8279 gen_bx(s, tmp);
8280 break;
8281 case 5: /* Exception return. */
8282 if (IS_USER(s)) {
8283 goto illegal_op;
8285 if (rn != 14 || rd != 15) {
8286 goto illegal_op;
8288 tmp = load_reg(s, rn);
8289 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8290 gen_exception_return(s, tmp);
8291 break;
8292 case 6: /* mrs cpsr. */
8293 tmp = tcg_temp_new_i32();
8294 if (IS_M(env)) {
8295 addr = tcg_const_i32(insn & 0xff);
8296 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8297 tcg_temp_free_i32(addr);
8298 } else {
8299 gen_helper_cpsr_read(tmp);
8301 store_reg(s, rd, tmp);
8302 break;
8303 case 7: /* mrs spsr. */
8304 /* Not accessible in user mode. */
8305 if (IS_USER(s) || IS_M(env))
8306 goto illegal_op;
8307 tmp = load_cpu_field(spsr);
8308 store_reg(s, rd, tmp);
8309 break;
8312 } else {
8313 /* Conditional branch. */
8314 op = (insn >> 22) & 0xf;
8315 /* Generate a conditional jump to next instruction. */
8316 s->condlabel = gen_new_label();
8317 gen_test_cc(op ^ 1, s->condlabel);
8318 s->condjmp = 1;
8320 /* offset[11:1] = insn[10:0] */
8321 offset = (insn & 0x7ff) << 1;
8322 /* offset[17:12] = insn[21:16]. */
8323 offset |= (insn & 0x003f0000) >> 4;
8324 /* offset[31:20] = insn[26]. */
8325 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8326 /* offset[18] = insn[13]. */
8327 offset |= (insn & (1 << 13)) << 5;
8328 /* offset[19] = insn[11]. */
8329 offset |= (insn & (1 << 11)) << 8;
8331 /* jump to the offset */
8332 gen_jmp(s, s->pc + offset);
8334 } else {
8335 /* Data processing immediate. */
8336 if (insn & (1 << 25)) {
8337 if (insn & (1 << 24)) {
8338 if (insn & (1 << 20))
8339 goto illegal_op;
8340 /* Bitfield/Saturate. */
8341 op = (insn >> 21) & 7;
8342 imm = insn & 0x1f;
8343 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8344 if (rn == 15) {
8345 tmp = tcg_temp_new_i32();
8346 tcg_gen_movi_i32(tmp, 0);
8347 } else {
8348 tmp = load_reg(s, rn);
8350 switch (op) {
8351 case 2: /* Signed bitfield extract. */
8352 imm++;
8353 if (shift + imm > 32)
8354 goto illegal_op;
8355 if (imm < 32)
8356 gen_sbfx(tmp, shift, imm);
8357 break;
8358 case 6: /* Unsigned bitfield extract. */
8359 imm++;
8360 if (shift + imm > 32)
8361 goto illegal_op;
8362 if (imm < 32)
8363 gen_ubfx(tmp, shift, (1u << imm) - 1);
8364 break;
8365 case 3: /* Bitfield insert/clear. */
8366 if (imm < shift)
8367 goto illegal_op;
8368 imm = imm + 1 - shift;
8369 if (imm != 32) {
8370 tmp2 = load_reg(s, rd);
8371 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8372 tcg_temp_free_i32(tmp2);
8374 break;
8375 case 7:
8376 goto illegal_op;
8377 default: /* Saturate. */
8378 if (shift) {
8379 if (op & 1)
8380 tcg_gen_sari_i32(tmp, tmp, shift);
8381 else
8382 tcg_gen_shli_i32(tmp, tmp, shift);
8384 tmp2 = tcg_const_i32(imm);
8385 if (op & 4) {
8386 /* Unsigned. */
8387 if ((op & 1) && shift == 0)
8388 gen_helper_usat16(tmp, tmp, tmp2);
8389 else
8390 gen_helper_usat(tmp, tmp, tmp2);
8391 } else {
8392 /* Signed. */
8393 if ((op & 1) && shift == 0)
8394 gen_helper_ssat16(tmp, tmp, tmp2);
8395 else
8396 gen_helper_ssat(tmp, tmp, tmp2);
8398 tcg_temp_free_i32(tmp2);
8399 break;
8401 store_reg(s, rd, tmp);
8402 } else {
8403 imm = ((insn & 0x04000000) >> 15)
8404 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8405 if (insn & (1 << 22)) {
8406 /* 16-bit immediate. */
8407 imm |= (insn >> 4) & 0xf000;
8408 if (insn & (1 << 23)) {
8409 /* movt */
8410 tmp = load_reg(s, rd);
8411 tcg_gen_ext16u_i32(tmp, tmp);
8412 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8413 } else {
8414 /* movw */
8415 tmp = tcg_temp_new_i32();
8416 tcg_gen_movi_i32(tmp, imm);
8418 } else {
8419 /* Add/sub 12-bit immediate. */
8420 if (rn == 15) {
8421 offset = s->pc & ~(uint32_t)3;
8422 if (insn & (1 << 23))
8423 offset -= imm;
8424 else
8425 offset += imm;
8426 tmp = tcg_temp_new_i32();
8427 tcg_gen_movi_i32(tmp, offset);
8428 } else {
8429 tmp = load_reg(s, rn);
8430 if (insn & (1 << 23))
8431 tcg_gen_subi_i32(tmp, tmp, imm);
8432 else
8433 tcg_gen_addi_i32(tmp, tmp, imm);
8436 store_reg(s, rd, tmp);
8438 } else {
8439 int shifter_out = 0;
8440 /* modified 12-bit immediate. */
8441 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8442 imm = (insn & 0xff);
8443 switch (shift) {
8444 case 0: /* XY */
8445 /* Nothing to do. */
8446 break;
8447 case 1: /* 00XY00XY */
8448 imm |= imm << 16;
8449 break;
8450 case 2: /* XY00XY00 */
8451 imm |= imm << 16;
8452 imm <<= 8;
8453 break;
8454 case 3: /* XYXYXYXY */
8455 imm |= imm << 16;
8456 imm |= imm << 8;
8457 break;
8458 default: /* Rotated constant. */
8459 shift = (shift << 1) | (imm >> 7);
8460 imm |= 0x80;
8461 imm = imm << (32 - shift);
8462 shifter_out = 1;
8463 break;
8465 tmp2 = tcg_temp_new_i32();
8466 tcg_gen_movi_i32(tmp2, imm);
8467 rn = (insn >> 16) & 0xf;
8468 if (rn == 15) {
8469 tmp = tcg_temp_new_i32();
8470 tcg_gen_movi_i32(tmp, 0);
8471 } else {
8472 tmp = load_reg(s, rn);
8474 op = (insn >> 21) & 0xf;
8475 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8476 shifter_out, tmp, tmp2))
8477 goto illegal_op;
8478 tcg_temp_free_i32(tmp2);
8479 rd = (insn >> 8) & 0xf;
8480 if (rd != 15) {
8481 store_reg(s, rd, tmp);
8482 } else {
8483 tcg_temp_free_i32(tmp);
8487 break;
8488 case 12: /* Load/store single data item. */
8490 int postinc = 0;
8491 int writeback = 0;
8492 int user;
8493 if ((insn & 0x01100000) == 0x01000000) {
8494 if (disas_neon_ls_insn(env, s, insn))
8495 goto illegal_op;
8496 break;
8498 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8499 if (rs == 15) {
8500 if (!(insn & (1 << 20))) {
8501 goto illegal_op;
8503 if (op != 2) {
8504 /* Byte or halfword load space with dest == r15 : memory hints.
8505 * Catch them early so we don't emit pointless addressing code.
8506 * This space is a mix of:
8507 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8508 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8509 * cores)
8510 * unallocated hints, which must be treated as NOPs
8511 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8512 * which is easiest for the decoding logic
8513 * Some space which must UNDEF
8515 int op1 = (insn >> 23) & 3;
8516 int op2 = (insn >> 6) & 0x3f;
8517 if (op & 2) {
8518 goto illegal_op;
8520 if (rn == 15) {
8521 /* UNPREDICTABLE or unallocated hint */
8522 return 0;
8524 if (op1 & 1) {
8525 return 0; /* PLD* or unallocated hint */
8527 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8528 return 0; /* PLD* or unallocated hint */
8530 /* UNDEF space, or an UNPREDICTABLE */
8531 return 1;
8534 user = IS_USER(s);
8535 if (rn == 15) {
8536 addr = tcg_temp_new_i32();
8537 /* PC relative. */
8538 /* s->pc has already been incremented by 4. */
8539 imm = s->pc & 0xfffffffc;
8540 if (insn & (1 << 23))
8541 imm += insn & 0xfff;
8542 else
8543 imm -= insn & 0xfff;
8544 tcg_gen_movi_i32(addr, imm);
8545 } else {
8546 addr = load_reg(s, rn);
8547 if (insn & (1 << 23)) {
8548 /* Positive offset. */
8549 imm = insn & 0xfff;
8550 tcg_gen_addi_i32(addr, addr, imm);
8551 } else {
8552 imm = insn & 0xff;
8553 switch ((insn >> 8) & 0xf) {
8554 case 0x0: /* Shifted Register. */
8555 shift = (insn >> 4) & 0xf;
8556 if (shift > 3) {
8557 tcg_temp_free_i32(addr);
8558 goto illegal_op;
8560 tmp = load_reg(s, rm);
8561 if (shift)
8562 tcg_gen_shli_i32(tmp, tmp, shift);
8563 tcg_gen_add_i32(addr, addr, tmp);
8564 tcg_temp_free_i32(tmp);
8565 break;
8566 case 0xc: /* Negative offset. */
8567 tcg_gen_addi_i32(addr, addr, -imm);
8568 break;
8569 case 0xe: /* User privilege. */
8570 tcg_gen_addi_i32(addr, addr, imm);
8571 user = 1;
8572 break;
8573 case 0x9: /* Post-decrement. */
8574 imm = -imm;
8575 /* Fall through. */
8576 case 0xb: /* Post-increment. */
8577 postinc = 1;
8578 writeback = 1;
8579 break;
8580 case 0xd: /* Pre-decrement. */
8581 imm = -imm;
8582 /* Fall through. */
8583 case 0xf: /* Pre-increment. */
8584 tcg_gen_addi_i32(addr, addr, imm);
8585 writeback = 1;
8586 break;
8587 default:
8588 tcg_temp_free_i32(addr);
8589 goto illegal_op;
8593 if (insn & (1 << 20)) {
8594 /* Load. */
8595 switch (op) {
8596 case 0: tmp = gen_ld8u(addr, user); break;
8597 case 4: tmp = gen_ld8s(addr, user); break;
8598 case 1: tmp = gen_ld16u(addr, user); break;
8599 case 5: tmp = gen_ld16s(addr, user); break;
8600 case 2: tmp = gen_ld32(addr, user); break;
8601 default:
8602 tcg_temp_free_i32(addr);
8603 goto illegal_op;
8605 if (rs == 15) {
8606 gen_bx(s, tmp);
8607 } else {
8608 store_reg(s, rs, tmp);
8610 } else {
8611 /* Store. */
8612 tmp = load_reg(s, rs);
8613 switch (op) {
8614 case 0: gen_st8(tmp, addr, user); break;
8615 case 1: gen_st16(tmp, addr, user); break;
8616 case 2: gen_st32(tmp, addr, user); break;
8617 default:
8618 tcg_temp_free_i32(addr);
8619 goto illegal_op;
8622 if (postinc)
8623 tcg_gen_addi_i32(addr, addr, imm);
8624 if (writeback) {
8625 store_reg(s, rn, addr);
8626 } else {
8627 tcg_temp_free_i32(addr);
8630 break;
8631 default:
8632 goto illegal_op;
8634 return 0;
8635 illegal_op:
8636 return 1;
8639 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8641 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8642 int32_t offset;
8643 int i;
8644 TCGv tmp;
8645 TCGv tmp2;
8646 TCGv addr;
8648 if (s->condexec_mask) {
8649 cond = s->condexec_cond;
8650 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8651 s->condlabel = gen_new_label();
8652 gen_test_cc(cond ^ 1, s->condlabel);
8653 s->condjmp = 1;
8657 insn = lduw_code(s->pc);
8658 s->pc += 2;
8660 switch (insn >> 12) {
8661 case 0: case 1:
8663 rd = insn & 7;
8664 op = (insn >> 11) & 3;
8665 if (op == 3) {
8666 /* add/subtract */
8667 rn = (insn >> 3) & 7;
8668 tmp = load_reg(s, rn);
8669 if (insn & (1 << 10)) {
8670 /* immediate */
8671 tmp2 = tcg_temp_new_i32();
8672 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8673 } else {
8674 /* reg */
8675 rm = (insn >> 6) & 7;
8676 tmp2 = load_reg(s, rm);
8678 if (insn & (1 << 9)) {
8679 if (s->condexec_mask)
8680 tcg_gen_sub_i32(tmp, tmp, tmp2);
8681 else
8682 gen_helper_sub_cc(tmp, tmp, tmp2);
8683 } else {
8684 if (s->condexec_mask)
8685 tcg_gen_add_i32(tmp, tmp, tmp2);
8686 else
8687 gen_helper_add_cc(tmp, tmp, tmp2);
8689 tcg_temp_free_i32(tmp2);
8690 store_reg(s, rd, tmp);
8691 } else {
8692 /* shift immediate */
8693 rm = (insn >> 3) & 7;
8694 shift = (insn >> 6) & 0x1f;
8695 tmp = load_reg(s, rm);
8696 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8697 if (!s->condexec_mask)
8698 gen_logic_CC(tmp);
8699 store_reg(s, rd, tmp);
8701 break;
8702 case 2: case 3:
8703 /* arithmetic large immediate */
8704 op = (insn >> 11) & 3;
8705 rd = (insn >> 8) & 0x7;
8706 if (op == 0) { /* mov */
8707 tmp = tcg_temp_new_i32();
8708 tcg_gen_movi_i32(tmp, insn & 0xff);
8709 if (!s->condexec_mask)
8710 gen_logic_CC(tmp);
8711 store_reg(s, rd, tmp);
8712 } else {
8713 tmp = load_reg(s, rd);
8714 tmp2 = tcg_temp_new_i32();
8715 tcg_gen_movi_i32(tmp2, insn & 0xff);
8716 switch (op) {
8717 case 1: /* cmp */
8718 gen_helper_sub_cc(tmp, tmp, tmp2);
8719 tcg_temp_free_i32(tmp);
8720 tcg_temp_free_i32(tmp2);
8721 break;
8722 case 2: /* add */
8723 if (s->condexec_mask)
8724 tcg_gen_add_i32(tmp, tmp, tmp2);
8725 else
8726 gen_helper_add_cc(tmp, tmp, tmp2);
8727 tcg_temp_free_i32(tmp2);
8728 store_reg(s, rd, tmp);
8729 break;
8730 case 3: /* sub */
8731 if (s->condexec_mask)
8732 tcg_gen_sub_i32(tmp, tmp, tmp2);
8733 else
8734 gen_helper_sub_cc(tmp, tmp, tmp2);
8735 tcg_temp_free_i32(tmp2);
8736 store_reg(s, rd, tmp);
8737 break;
8740 break;
8741 case 4:
8742 if (insn & (1 << 11)) {
8743 rd = (insn >> 8) & 7;
8744 /* load pc-relative. Bit 1 of PC is ignored. */
8745 val = s->pc + 2 + ((insn & 0xff) * 4);
8746 val &= ~(uint32_t)2;
8747 addr = tcg_temp_new_i32();
8748 tcg_gen_movi_i32(addr, val);
8749 tmp = gen_ld32(addr, IS_USER(s));
8750 tcg_temp_free_i32(addr);
8751 store_reg(s, rd, tmp);
8752 break;
8754 if (insn & (1 << 10)) {
8755 /* data processing extended or blx */
8756 rd = (insn & 7) | ((insn >> 4) & 8);
8757 rm = (insn >> 3) & 0xf;
8758 op = (insn >> 8) & 3;
8759 switch (op) {
8760 case 0: /* add */
8761 tmp = load_reg(s, rd);
8762 tmp2 = load_reg(s, rm);
8763 tcg_gen_add_i32(tmp, tmp, tmp2);
8764 tcg_temp_free_i32(tmp2);
8765 store_reg(s, rd, tmp);
8766 break;
8767 case 1: /* cmp */
8768 tmp = load_reg(s, rd);
8769 tmp2 = load_reg(s, rm);
8770 gen_helper_sub_cc(tmp, tmp, tmp2);
8771 tcg_temp_free_i32(tmp2);
8772 tcg_temp_free_i32(tmp);
8773 break;
8774 case 2: /* mov/cpy */
8775 tmp = load_reg(s, rm);
8776 store_reg(s, rd, tmp);
8777 break;
8778 case 3:/* branch [and link] exchange thumb register */
8779 tmp = load_reg(s, rm);
8780 if (insn & (1 << 7)) {
8781 ARCH(5);
8782 val = (uint32_t)s->pc | 1;
8783 tmp2 = tcg_temp_new_i32();
8784 tcg_gen_movi_i32(tmp2, val);
8785 store_reg(s, 14, tmp2);
8787 /* already thumb, no need to check */
8788 gen_bx(s, tmp);
8789 break;
8791 break;
8794 /* data processing register */
8795 rd = insn & 7;
8796 rm = (insn >> 3) & 7;
8797 op = (insn >> 6) & 0xf;
8798 if (op == 2 || op == 3 || op == 4 || op == 7) {
8799 /* the shift/rotate ops want the operands backwards */
8800 val = rm;
8801 rm = rd;
8802 rd = val;
8803 val = 1;
8804 } else {
8805 val = 0;
8808 if (op == 9) { /* neg */
8809 tmp = tcg_temp_new_i32();
8810 tcg_gen_movi_i32(tmp, 0);
8811 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8812 tmp = load_reg(s, rd);
8813 } else {
8814 TCGV_UNUSED(tmp);
8817 tmp2 = load_reg(s, rm);
8818 switch (op) {
8819 case 0x0: /* and */
8820 tcg_gen_and_i32(tmp, tmp, tmp2);
8821 if (!s->condexec_mask)
8822 gen_logic_CC(tmp);
8823 break;
8824 case 0x1: /* eor */
8825 tcg_gen_xor_i32(tmp, tmp, tmp2);
8826 if (!s->condexec_mask)
8827 gen_logic_CC(tmp);
8828 break;
8829 case 0x2: /* lsl */
8830 if (s->condexec_mask) {
8831 gen_helper_shl(tmp2, tmp2, tmp);
8832 } else {
8833 gen_helper_shl_cc(tmp2, tmp2, tmp);
8834 gen_logic_CC(tmp2);
8836 break;
8837 case 0x3: /* lsr */
8838 if (s->condexec_mask) {
8839 gen_helper_shr(tmp2, tmp2, tmp);
8840 } else {
8841 gen_helper_shr_cc(tmp2, tmp2, tmp);
8842 gen_logic_CC(tmp2);
8844 break;
8845 case 0x4: /* asr */
8846 if (s->condexec_mask) {
8847 gen_helper_sar(tmp2, tmp2, tmp);
8848 } else {
8849 gen_helper_sar_cc(tmp2, tmp2, tmp);
8850 gen_logic_CC(tmp2);
8852 break;
8853 case 0x5: /* adc */
8854 if (s->condexec_mask)
8855 gen_adc(tmp, tmp2);
8856 else
8857 gen_helper_adc_cc(tmp, tmp, tmp2);
8858 break;
8859 case 0x6: /* sbc */
8860 if (s->condexec_mask)
8861 gen_sub_carry(tmp, tmp, tmp2);
8862 else
8863 gen_helper_sbc_cc(tmp, tmp, tmp2);
8864 break;
8865 case 0x7: /* ror */
8866 if (s->condexec_mask) {
8867 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8868 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8869 } else {
8870 gen_helper_ror_cc(tmp2, tmp2, tmp);
8871 gen_logic_CC(tmp2);
8873 break;
8874 case 0x8: /* tst */
8875 tcg_gen_and_i32(tmp, tmp, tmp2);
8876 gen_logic_CC(tmp);
8877 rd = 16;
8878 break;
8879 case 0x9: /* neg */
8880 if (s->condexec_mask)
8881 tcg_gen_neg_i32(tmp, tmp2);
8882 else
8883 gen_helper_sub_cc(tmp, tmp, tmp2);
8884 break;
8885 case 0xa: /* cmp */
8886 gen_helper_sub_cc(tmp, tmp, tmp2);
8887 rd = 16;
8888 break;
8889 case 0xb: /* cmn */
8890 gen_helper_add_cc(tmp, tmp, tmp2);
8891 rd = 16;
8892 break;
8893 case 0xc: /* orr */
8894 tcg_gen_or_i32(tmp, tmp, tmp2);
8895 if (!s->condexec_mask)
8896 gen_logic_CC(tmp);
8897 break;
8898 case 0xd: /* mul */
8899 tcg_gen_mul_i32(tmp, tmp, tmp2);
8900 if (!s->condexec_mask)
8901 gen_logic_CC(tmp);
8902 break;
8903 case 0xe: /* bic */
8904 tcg_gen_andc_i32(tmp, tmp, tmp2);
8905 if (!s->condexec_mask)
8906 gen_logic_CC(tmp);
8907 break;
8908 case 0xf: /* mvn */
8909 tcg_gen_not_i32(tmp2, tmp2);
8910 if (!s->condexec_mask)
8911 gen_logic_CC(tmp2);
8912 val = 1;
8913 rm = rd;
8914 break;
8916 if (rd != 16) {
8917 if (val) {
8918 store_reg(s, rm, tmp2);
8919 if (op != 0xf)
8920 tcg_temp_free_i32(tmp);
8921 } else {
8922 store_reg(s, rd, tmp);
8923 tcg_temp_free_i32(tmp2);
8925 } else {
8926 tcg_temp_free_i32(tmp);
8927 tcg_temp_free_i32(tmp2);
8929 break;
8931 case 5:
8932 /* load/store register offset. */
8933 rd = insn & 7;
8934 rn = (insn >> 3) & 7;
8935 rm = (insn >> 6) & 7;
8936 op = (insn >> 9) & 7;
8937 addr = load_reg(s, rn);
8938 tmp = load_reg(s, rm);
8939 tcg_gen_add_i32(addr, addr, tmp);
8940 tcg_temp_free_i32(tmp);
8942 if (op < 3) /* store */
8943 tmp = load_reg(s, rd);
8945 switch (op) {
8946 case 0: /* str */
8947 gen_st32(tmp, addr, IS_USER(s));
8948 break;
8949 case 1: /* strh */
8950 gen_st16(tmp, addr, IS_USER(s));
8951 break;
8952 case 2: /* strb */
8953 gen_st8(tmp, addr, IS_USER(s));
8954 break;
8955 case 3: /* ldrsb */
8956 tmp = gen_ld8s(addr, IS_USER(s));
8957 break;
8958 case 4: /* ldr */
8959 tmp = gen_ld32(addr, IS_USER(s));
8960 break;
8961 case 5: /* ldrh */
8962 tmp = gen_ld16u(addr, IS_USER(s));
8963 break;
8964 case 6: /* ldrb */
8965 tmp = gen_ld8u(addr, IS_USER(s));
8966 break;
8967 case 7: /* ldrsh */
8968 tmp = gen_ld16s(addr, IS_USER(s));
8969 break;
8971 if (op >= 3) /* load */
8972 store_reg(s, rd, tmp);
8973 tcg_temp_free_i32(addr);
8974 break;
8976 case 6:
8977 /* load/store word immediate offset */
8978 rd = insn & 7;
8979 rn = (insn >> 3) & 7;
8980 addr = load_reg(s, rn);
8981 val = (insn >> 4) & 0x7c;
8982 tcg_gen_addi_i32(addr, addr, val);
8984 if (insn & (1 << 11)) {
8985 /* load */
8986 tmp = gen_ld32(addr, IS_USER(s));
8987 store_reg(s, rd, tmp);
8988 } else {
8989 /* store */
8990 tmp = load_reg(s, rd);
8991 gen_st32(tmp, addr, IS_USER(s));
8993 tcg_temp_free_i32(addr);
8994 break;
8996 case 7:
8997 /* load/store byte immediate offset */
8998 rd = insn & 7;
8999 rn = (insn >> 3) & 7;
9000 addr = load_reg(s, rn);
9001 val = (insn >> 6) & 0x1f;
9002 tcg_gen_addi_i32(addr, addr, val);
9004 if (insn & (1 << 11)) {
9005 /* load */
9006 tmp = gen_ld8u(addr, IS_USER(s));
9007 store_reg(s, rd, tmp);
9008 } else {
9009 /* store */
9010 tmp = load_reg(s, rd);
9011 gen_st8(tmp, addr, IS_USER(s));
9013 tcg_temp_free_i32(addr);
9014 break;
9016 case 8:
9017 /* load/store halfword immediate offset */
9018 rd = insn & 7;
9019 rn = (insn >> 3) & 7;
9020 addr = load_reg(s, rn);
9021 val = (insn >> 5) & 0x3e;
9022 tcg_gen_addi_i32(addr, addr, val);
9024 if (insn & (1 << 11)) {
9025 /* load */
9026 tmp = gen_ld16u(addr, IS_USER(s));
9027 store_reg(s, rd, tmp);
9028 } else {
9029 /* store */
9030 tmp = load_reg(s, rd);
9031 gen_st16(tmp, addr, IS_USER(s));
9033 tcg_temp_free_i32(addr);
9034 break;
9036 case 9:
9037 /* load/store from stack */
9038 rd = (insn >> 8) & 7;
9039 addr = load_reg(s, 13);
9040 val = (insn & 0xff) * 4;
9041 tcg_gen_addi_i32(addr, addr, val);
9043 if (insn & (1 << 11)) {
9044 /* load */
9045 tmp = gen_ld32(addr, IS_USER(s));
9046 store_reg(s, rd, tmp);
9047 } else {
9048 /* store */
9049 tmp = load_reg(s, rd);
9050 gen_st32(tmp, addr, IS_USER(s));
9052 tcg_temp_free_i32(addr);
9053 break;
9055 case 10:
9056 /* add to high reg */
9057 rd = (insn >> 8) & 7;
9058 if (insn & (1 << 11)) {
9059 /* SP */
9060 tmp = load_reg(s, 13);
9061 } else {
9062 /* PC. bit 1 is ignored. */
9063 tmp = tcg_temp_new_i32();
9064 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9066 val = (insn & 0xff) * 4;
9067 tcg_gen_addi_i32(tmp, tmp, val);
9068 store_reg(s, rd, tmp);
9069 break;
9071 case 11:
9072 /* misc */
9073 op = (insn >> 8) & 0xf;
9074 switch (op) {
9075 case 0:
9076 /* adjust stack pointer */
9077 tmp = load_reg(s, 13);
9078 val = (insn & 0x7f) * 4;
9079 if (insn & (1 << 7))
9080 val = -(int32_t)val;
9081 tcg_gen_addi_i32(tmp, tmp, val);
9082 store_reg(s, 13, tmp);
9083 break;
9085 case 2: /* sign/zero extend. */
9086 ARCH(6);
9087 rd = insn & 7;
9088 rm = (insn >> 3) & 7;
9089 tmp = load_reg(s, rm);
9090 switch ((insn >> 6) & 3) {
9091 case 0: gen_sxth(tmp); break;
9092 case 1: gen_sxtb(tmp); break;
9093 case 2: gen_uxth(tmp); break;
9094 case 3: gen_uxtb(tmp); break;
9096 store_reg(s, rd, tmp);
9097 break;
9098 case 4: case 5: case 0xc: case 0xd:
9099 /* push/pop */
9100 addr = load_reg(s, 13);
9101 if (insn & (1 << 8))
9102 offset = 4;
9103 else
9104 offset = 0;
9105 for (i = 0; i < 8; i++) {
9106 if (insn & (1 << i))
9107 offset += 4;
9109 if ((insn & (1 << 11)) == 0) {
9110 tcg_gen_addi_i32(addr, addr, -offset);
9112 for (i = 0; i < 8; i++) {
9113 if (insn & (1 << i)) {
9114 if (insn & (1 << 11)) {
9115 /* pop */
9116 tmp = gen_ld32(addr, IS_USER(s));
9117 store_reg(s, i, tmp);
9118 } else {
9119 /* push */
9120 tmp = load_reg(s, i);
9121 gen_st32(tmp, addr, IS_USER(s));
9123 /* advance to the next address. */
9124 tcg_gen_addi_i32(addr, addr, 4);
9127 TCGV_UNUSED(tmp);
9128 if (insn & (1 << 8)) {
9129 if (insn & (1 << 11)) {
9130 /* pop pc */
9131 tmp = gen_ld32(addr, IS_USER(s));
9132 /* don't set the pc until the rest of the instruction
9133 has completed */
9134 } else {
9135 /* push lr */
9136 tmp = load_reg(s, 14);
9137 gen_st32(tmp, addr, IS_USER(s));
9139 tcg_gen_addi_i32(addr, addr, 4);
9141 if ((insn & (1 << 11)) == 0) {
9142 tcg_gen_addi_i32(addr, addr, -offset);
9144 /* write back the new stack pointer */
9145 store_reg(s, 13, addr);
9146 /* set the new PC value */
9147 if ((insn & 0x0900) == 0x0900) {
9148 store_reg_from_load(env, s, 15, tmp);
9150 break;
9152 case 1: case 3: case 9: case 11: /* czb */
9153 rm = insn & 7;
9154 tmp = load_reg(s, rm);
9155 s->condlabel = gen_new_label();
9156 s->condjmp = 1;
9157 if (insn & (1 << 11))
9158 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9159 else
9160 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9161 tcg_temp_free_i32(tmp);
9162 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9163 val = (uint32_t)s->pc + 2;
9164 val += offset;
9165 gen_jmp(s, val);
9166 break;
9168 case 15: /* IT, nop-hint. */
9169 if ((insn & 0xf) == 0) {
9170 gen_nop_hint(s, (insn >> 4) & 0xf);
9171 break;
9173 /* If Then. */
9174 s->condexec_cond = (insn >> 4) & 0xe;
9175 s->condexec_mask = insn & 0x1f;
9176 /* No actual code generated for this insn, just setup state. */
9177 break;
9179 case 0xe: /* bkpt */
9180 ARCH(5);
9181 gen_exception_insn(s, 2, EXCP_BKPT);
9182 break;
9184 case 0xa: /* rev */
9185 ARCH(6);
9186 rn = (insn >> 3) & 0x7;
9187 rd = insn & 0x7;
9188 tmp = load_reg(s, rn);
9189 switch ((insn >> 6) & 3) {
9190 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9191 case 1: gen_rev16(tmp); break;
9192 case 3: gen_revsh(tmp); break;
9193 default: goto illegal_op;
9195 store_reg(s, rd, tmp);
9196 break;
9198 case 6: /* cps */
9199 ARCH(6);
9200 if (IS_USER(s))
9201 break;
9202 if (IS_M(env)) {
9203 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9204 /* PRIMASK */
9205 if (insn & 1) {
9206 addr = tcg_const_i32(16);
9207 gen_helper_v7m_msr(cpu_env, addr, tmp);
9208 tcg_temp_free_i32(addr);
9210 /* FAULTMASK */
9211 if (insn & 2) {
9212 addr = tcg_const_i32(17);
9213 gen_helper_v7m_msr(cpu_env, addr, tmp);
9214 tcg_temp_free_i32(addr);
9216 tcg_temp_free_i32(tmp);
9217 gen_lookup_tb(s);
9218 } else {
9219 if (insn & (1 << 4))
9220 shift = CPSR_A | CPSR_I | CPSR_F;
9221 else
9222 shift = 0;
9223 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9225 break;
9227 default:
9228 goto undef;
9230 break;
9232 case 12:
9233 /* load/store multiple */
9234 rn = (insn >> 8) & 0x7;
9235 addr = load_reg(s, rn);
9236 for (i = 0; i < 8; i++) {
9237 if (insn & (1 << i)) {
9238 if (insn & (1 << 11)) {
9239 /* load */
9240 tmp = gen_ld32(addr, IS_USER(s));
9241 store_reg(s, i, tmp);
9242 } else {
9243 /* store */
9244 tmp = load_reg(s, i);
9245 gen_st32(tmp, addr, IS_USER(s));
9247 /* advance to the next address */
9248 tcg_gen_addi_i32(addr, addr, 4);
9251 /* Base register writeback. */
9252 if ((insn & (1 << rn)) == 0) {
9253 store_reg(s, rn, addr);
9254 } else {
9255 tcg_temp_free_i32(addr);
9257 break;
9259 case 13:
9260 /* conditional branch or swi */
9261 cond = (insn >> 8) & 0xf;
9262 if (cond == 0xe)
9263 goto undef;
9265 if (cond == 0xf) {
9266 /* swi */
9267 gen_set_pc_im(s->pc);
9268 s->is_jmp = DISAS_SWI;
9269 break;
9271 /* generate a conditional jump to next instruction */
9272 s->condlabel = gen_new_label();
9273 gen_test_cc(cond ^ 1, s->condlabel);
9274 s->condjmp = 1;
9276 /* jump to the offset */
9277 val = (uint32_t)s->pc + 2;
9278 offset = ((int32_t)insn << 24) >> 24;
9279 val += offset << 1;
9280 gen_jmp(s, val);
9281 break;
9283 case 14:
9284 if (insn & (1 << 11)) {
9285 if (disas_thumb2_insn(env, s, insn))
9286 goto undef32;
9287 break;
9289 /* unconditional branch */
9290 val = (uint32_t)s->pc;
9291 offset = ((int32_t)insn << 21) >> 21;
9292 val += (offset << 1) + 2;
9293 gen_jmp(s, val);
9294 break;
9296 case 15:
9297 if (disas_thumb2_insn(env, s, insn))
9298 goto undef32;
9299 break;
9301 return;
9302 undef32:
9303 gen_exception_insn(s, 4, EXCP_UDEF);
9304 return;
9305 illegal_op:
9306 undef:
9307 gen_exception_insn(s, 2, EXCP_UDEF);
9310 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9311 basic block 'tb'. If search_pc is TRUE, also generate PC
9312 information for each intermediate instruction. */
9313 static inline void gen_intermediate_code_internal(CPUState *env,
9314 TranslationBlock *tb,
9315 int search_pc)
9317 DisasContext dc1, *dc = &dc1;
9318 CPUBreakpoint *bp;
9319 uint16_t *gen_opc_end;
9320 int j, lj;
9321 target_ulong pc_start;
9322 uint32_t next_page_start;
9323 int num_insns;
9324 int max_insns;
9326 /* generate intermediate code */
9327 pc_start = tb->pc;
9329 dc->tb = tb;
9331 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9333 dc->is_jmp = DISAS_NEXT;
9334 dc->pc = pc_start;
9335 dc->singlestep_enabled = env->singlestep_enabled;
9336 dc->condjmp = 0;
9337 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9338 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9339 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9340 #if !defined(CONFIG_USER_ONLY)
9341 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9342 #endif
9343 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9344 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9345 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9346 cpu_F0s = tcg_temp_new_i32();
9347 cpu_F1s = tcg_temp_new_i32();
9348 cpu_F0d = tcg_temp_new_i64();
9349 cpu_F1d = tcg_temp_new_i64();
9350 cpu_V0 = cpu_F0d;
9351 cpu_V1 = cpu_F1d;
9352 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9353 cpu_M0 = tcg_temp_new_i64();
9354 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9355 lj = -1;
9356 num_insns = 0;
9357 max_insns = tb->cflags & CF_COUNT_MASK;
9358 if (max_insns == 0)
9359 max_insns = CF_COUNT_MASK;
9361 gen_icount_start();
9363 tcg_clear_temp_count();
9365 /* A note on handling of the condexec (IT) bits:
9367 * We want to avoid the overhead of having to write the updated condexec
9368 * bits back to the CPUState for every instruction in an IT block. So:
9369 * (1) if the condexec bits are not already zero then we write
9370 * zero back into the CPUState now. This avoids complications trying
9371 * to do it at the end of the block. (For example if we don't do this
9372 * it's hard to identify whether we can safely skip writing condexec
9373 * at the end of the TB, which we definitely want to do for the case
9374 * where a TB doesn't do anything with the IT state at all.)
9375 * (2) if we are going to leave the TB then we call gen_set_condexec()
9376 * which will write the correct value into CPUState if zero is wrong.
9377 * This is done both for leaving the TB at the end, and for leaving
9378 * it because of an exception we know will happen, which is done in
9379 * gen_exception_insn(). The latter is necessary because we need to
9380 * leave the TB with the PC/IT state just prior to execution of the
9381 * instruction which caused the exception.
9382 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9383 * then the CPUState will be wrong and we need to reset it.
9384 * This is handled in the same way as restoration of the
9385 * PC in these situations: we will be called again with search_pc=1
9386 * and generate a mapping of the condexec bits for each PC in
9387 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9388 * the condexec bits.
9390 * Note that there are no instructions which can read the condexec
9391 * bits, and none which can write non-static values to them, so
9392 * we don't need to care about whether CPUState is correct in the
9393 * middle of a TB.
9396 /* Reset the conditional execution bits immediately. This avoids
9397 complications trying to do it at the end of the block. */
9398 if (dc->condexec_mask || dc->condexec_cond)
9400 TCGv tmp = tcg_temp_new_i32();
9401 tcg_gen_movi_i32(tmp, 0);
9402 store_cpu_field(tmp, condexec_bits);
9404 do {
9405 #ifdef CONFIG_USER_ONLY
9406 /* Intercept jump to the magic kernel page. */
9407 if (dc->pc >= 0xffff0000) {
9408 /* We always get here via a jump, so know we are not in a
9409 conditional execution block. */
9410 gen_exception(EXCP_KERNEL_TRAP);
9411 dc->is_jmp = DISAS_UPDATE;
9412 break;
9414 #else
9415 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9416 /* We always get here via a jump, so know we are not in a
9417 conditional execution block. */
9418 gen_exception(EXCP_EXCEPTION_EXIT);
9419 dc->is_jmp = DISAS_UPDATE;
9420 break;
9422 #endif
9424 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9425 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9426 if (bp->pc == dc->pc) {
9427 gen_exception_insn(dc, 0, EXCP_DEBUG);
9428 /* Advance PC so that clearing the breakpoint will
9429 invalidate this TB. */
9430 dc->pc += 2;
9431 goto done_generating;
9432 break;
9436 if (search_pc) {
9437 j = gen_opc_ptr - gen_opc_buf;
9438 if (lj < j) {
9439 lj++;
9440 while (lj < j)
9441 gen_opc_instr_start[lj++] = 0;
9443 gen_opc_pc[lj] = dc->pc;
9444 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9445 gen_opc_instr_start[lj] = 1;
9446 gen_opc_icount[lj] = num_insns;
9449 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9450 gen_io_start();
9452 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9453 tcg_gen_debug_insn_start(dc->pc);
9456 if (dc->thumb) {
9457 disas_thumb_insn(env, dc);
9458 if (dc->condexec_mask) {
9459 dc->condexec_cond = (dc->condexec_cond & 0xe)
9460 | ((dc->condexec_mask >> 4) & 1);
9461 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9462 if (dc->condexec_mask == 0) {
9463 dc->condexec_cond = 0;
9466 } else {
9467 disas_arm_insn(env, dc);
9470 if (dc->condjmp && !dc->is_jmp) {
9471 gen_set_label(dc->condlabel);
9472 dc->condjmp = 0;
9475 if (tcg_check_temp_count()) {
9476 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9479 /* Translation stops when a conditional branch is encountered.
9480 * Otherwise the subsequent code could get translated several times.
9481 * Also stop translation when a page boundary is reached. This
9482 * ensures prefetch aborts occur at the right place. */
9483 num_insns ++;
9484 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9485 !env->singlestep_enabled &&
9486 !singlestep &&
9487 dc->pc < next_page_start &&
9488 num_insns < max_insns);
9490 if (tb->cflags & CF_LAST_IO) {
9491 if (dc->condjmp) {
9492 /* FIXME: This can theoretically happen with self-modifying
9493 code. */
9494 cpu_abort(env, "IO on conditional branch instruction");
9496 gen_io_end();
9499 /* At this stage dc->condjmp will only be set when the skipped
9500 instruction was a conditional branch or trap, and the PC has
9501 already been written. */
9502 if (unlikely(env->singlestep_enabled)) {
9503 /* Make sure the pc is updated, and raise a debug exception. */
9504 if (dc->condjmp) {
9505 gen_set_condexec(dc);
9506 if (dc->is_jmp == DISAS_SWI) {
9507 gen_exception(EXCP_SWI);
9508 } else {
9509 gen_exception(EXCP_DEBUG);
9511 gen_set_label(dc->condlabel);
9513 if (dc->condjmp || !dc->is_jmp) {
9514 gen_set_pc_im(dc->pc);
9515 dc->condjmp = 0;
9517 gen_set_condexec(dc);
9518 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9519 gen_exception(EXCP_SWI);
9520 } else {
9521 /* FIXME: Single stepping a WFI insn will not halt
9522 the CPU. */
9523 gen_exception(EXCP_DEBUG);
9525 } else {
9526 /* While branches must always occur at the end of an IT block,
9527 there are a few other things that can cause us to terminate
9528 the TB in the middel of an IT block:
9529 - Exception generating instructions (bkpt, swi, undefined).
9530 - Page boundaries.
9531 - Hardware watchpoints.
9532 Hardware breakpoints have already been handled and skip this code.
9534 gen_set_condexec(dc);
9535 switch(dc->is_jmp) {
9536 case DISAS_NEXT:
9537 gen_goto_tb(dc, 1, dc->pc);
9538 break;
9539 default:
9540 case DISAS_JUMP:
9541 case DISAS_UPDATE:
9542 /* indicate that the hash table must be used to find the next TB */
9543 tcg_gen_exit_tb(0);
9544 break;
9545 case DISAS_TB_JUMP:
9546 /* nothing more to generate */
9547 break;
9548 case DISAS_WFI:
9549 gen_helper_wfi();
9550 break;
9551 case DISAS_SWI:
9552 gen_exception(EXCP_SWI);
9553 break;
9555 if (dc->condjmp) {
9556 gen_set_label(dc->condlabel);
9557 gen_set_condexec(dc);
9558 gen_goto_tb(dc, 1, dc->pc);
9559 dc->condjmp = 0;
9563 done_generating:
9564 gen_icount_end(tb, num_insns);
9565 *gen_opc_ptr = INDEX_op_end;
9567 #ifdef DEBUG_DISAS
9568 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9569 qemu_log("----------------\n");
9570 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9571 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9572 qemu_log("\n");
9574 #endif
9575 if (search_pc) {
9576 j = gen_opc_ptr - gen_opc_buf;
9577 lj++;
9578 while (lj <= j)
9579 gen_opc_instr_start[lj++] = 0;
9580 } else {
9581 tb->size = dc->pc - pc_start;
9582 tb->icount = num_insns;
9586 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9588 gen_intermediate_code_internal(env, tb, 0);
9591 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9593 gen_intermediate_code_internal(env, tb, 1);
9596 static const char *cpu_mode_names[16] = {
9597 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9598 "???", "???", "???", "und", "???", "???", "???", "sys"
9601 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9602 int flags)
9604 int i;
9605 #if 0
9606 union {
9607 uint32_t i;
9608 float s;
9609 } s0, s1;
9610 CPU_DoubleU d;
9611 /* ??? This assumes float64 and double have the same layout.
9612 Oh well, it's only debug dumps. */
9613 union {
9614 float64 f64;
9615 double d;
9616 } d0;
9617 #endif
9618 uint32_t psr;
9620 for(i=0;i<16;i++) {
9621 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9622 if ((i % 4) == 3)
9623 cpu_fprintf(f, "\n");
9624 else
9625 cpu_fprintf(f, " ");
9627 psr = cpsr_read(env);
9628 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9629 psr,
9630 psr & (1 << 31) ? 'N' : '-',
9631 psr & (1 << 30) ? 'Z' : '-',
9632 psr & (1 << 29) ? 'C' : '-',
9633 psr & (1 << 28) ? 'V' : '-',
9634 psr & CPSR_T ? 'T' : 'A',
9635 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9637 #if 0
9638 for (i = 0; i < 16; i++) {
9639 d.d = env->vfp.regs[i];
9640 s0.i = d.l.lower;
9641 s1.i = d.l.upper;
9642 d0.f64 = d.d;
9643 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9644 i * 2, (int)s0.i, s0.s,
9645 i * 2 + 1, (int)s1.i, s1.s,
9646 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9647 d0.d);
9649 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9650 #endif
9653 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9654 unsigned long searched_pc, int pc_pos, void *puc)
9656 env->regs[15] = gen_opc_pc[pc_pos];
9657 env->condexec_bits = gen_opc_condexec_bits[pc_pos];