arm: basic support for ARMv4/ARMv4T emulation
[qemu.git] / target-arm / translate.c
blob998cfd530c80557a603a9e518de43b397667d681
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
37 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39 /* currently all emulated v5 cores are also v5TE, so don't bother */
40 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
41 #define ENABLE_ARCH_5J 0
42 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
47 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
49 /* internal defines */
50 typedef struct DisasContext {
51 target_ulong pc;
52 int is_jmp;
53 /* Nonzero if this instruction has been conditionally skipped. */
54 int condjmp;
55 /* The label that will be jumped to when the instruction is skipped. */
56 int condlabel;
57 /* Thumb-2 condtional execution bits. */
58 int condexec_mask;
59 int condexec_cond;
60 struct TranslationBlock *tb;
61 int singlestep_enabled;
62 int thumb;
63 #if !defined(CONFIG_USER_ONLY)
64 int user;
65 #endif
66 int vfp_enabled;
67 int vec_len;
68 int vec_stride;
69 } DisasContext;
71 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
73 #if defined(CONFIG_USER_ONLY)
74 #define IS_USER(s) 1
75 #else
76 #define IS_USER(s) (s->user)
77 #endif
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
81 #define DISAS_WFI 4
82 #define DISAS_SWI 5
84 static TCGv_ptr cpu_env;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
87 static TCGv_i32 cpu_R[16];
88 static TCGv_i32 cpu_exclusive_addr;
89 static TCGv_i32 cpu_exclusive_val;
90 static TCGv_i32 cpu_exclusive_high;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test;
93 static TCGv_i32 cpu_exclusive_info;
94 #endif
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s, cpu_F1s;
98 static TCGv_i64 cpu_F0d, cpu_F1d;
100 #include "gen-icount.h"
102 static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
109 int i;
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, regs[i]),
116 regnames[i]);
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, exclusive_high), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUState, exclusive_info), "exclusive_info");
129 #endif
131 #define GEN_HELPER 2
132 #include "helpers.h"
135 static inline TCGv load_cpu_offset(int offset)
137 TCGv tmp = tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
144 static inline void store_cpu_offset(TCGv var, int offset)
146 tcg_gen_st_i32(var, cpu_env, offset);
147 tcg_temp_free_i32(var);
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUState, name))
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext *s, TCGv var, int reg)
156 if (reg == 15) {
157 uint32_t addr;
158 /* normaly, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
165 tcg_gen_mov_i32(var, cpu_R[reg]);
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv load_reg(DisasContext *s, int reg)
172 TCGv tmp = tcg_temp_new_i32();
173 load_reg_var(s, tmp, reg);
174 return tmp;
177 /* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179 static void store_reg(DisasContext *s, int reg, TCGv var)
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
185 tcg_gen_mov_i32(cpu_R[reg], var);
186 tcg_temp_free_i32(var);
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
199 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
208 static void gen_exception(int excp)
210 TCGv tmp = tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
213 tcg_temp_free_i32(tmp);
216 static void gen_smul_dual(TCGv a, TCGv b)
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
223 tcg_temp_free_i32(tmp2);
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
228 tcg_temp_free_i32(tmp1);
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var)
234 TCGv tmp = tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
240 tcg_temp_free_i32(tmp);
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var)
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
254 if (shift)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var, int shift, int width)
262 uint32_t signbit;
264 if (shift)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
277 tcg_gen_andi_i32(val, val, mask);
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
280 tcg_gen_or_i32(dest, base, val);
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
288 tcg_gen_extu_i32_i64(tmp64, b);
289 tcg_temp_free_i32(b);
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
293 tcg_temp_free_i64(tmp64);
294 return a;
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
302 tcg_gen_extu_i32_i64(tmp64, b);
303 tcg_temp_free_i32(b);
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
307 tcg_temp_free_i64(tmp64);
308 return a;
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1, a);
320 tcg_temp_free_i32(a);
321 tcg_gen_extu_i32_i64(tmp2, b);
322 tcg_temp_free_i32(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 tcg_temp_free_i64(tmp2);
325 return tmp1;
328 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
333 tcg_gen_ext_i32_i64(tmp1, a);
334 tcg_temp_free_i32(a);
335 tcg_gen_ext_i32_i64(tmp2, b);
336 tcg_temp_free_i32(b);
337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
338 tcg_temp_free_i64(tmp2);
339 return tmp1;
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var)
345 TCGv tmp = tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
349 tcg_temp_free_i32(tmp);
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
354 t0 &= ~0x8000;
355 t1 &= ~0x8000;
356 t0 = (t0 + t1) ^ tmp;
359 static void gen_add16(TCGv t0, TCGv t1)
361 TCGv tmp = tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var)
377 TCGv tmp = tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp, var, 31);
379 gen_set_CF(tmp);
380 tcg_temp_free_i32(tmp);
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var)
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
390 /* T0 += T1 + CF. */
391 static void gen_adc(TCGv t0, TCGv t1)
393 TCGv tmp;
394 tcg_gen_add_i32(t0, t0, t1);
395 tmp = load_cpu_field(CF);
396 tcg_gen_add_i32(t0, t0, tmp);
397 tcg_temp_free_i32(tmp);
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
403 TCGv tmp;
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
407 tcg_temp_free_i32(tmp);
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
413 TCGv tmp;
414 tcg_gen_sub_i32(dest, t0, t1);
415 tmp = load_cpu_field(CF);
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
418 tcg_temp_free_i32(tmp);
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
424 static void shifter_out_im(TCGv var, int shift)
426 TCGv tmp = tcg_temp_new_i32();
427 if (shift == 0) {
428 tcg_gen_andi_i32(tmp, var, 1);
429 } else {
430 tcg_gen_shri_i32(tmp, var, shift);
431 if (shift != 31)
432 tcg_gen_andi_i32(tmp, tmp, 1);
434 gen_set_CF(tmp);
435 tcg_temp_free_i32(tmp);
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
441 switch (shiftop) {
442 case 0: /* LSL */
443 if (shift != 0) {
444 if (flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
448 break;
449 case 1: /* LSR */
450 if (shift == 0) {
451 if (flags) {
452 tcg_gen_shri_i32(var, var, 31);
453 gen_set_CF(var);
455 tcg_gen_movi_i32(var, 0);
456 } else {
457 if (flags)
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
461 break;
462 case 2: /* ASR */
463 if (shift == 0)
464 shift = 32;
465 if (flags)
466 shifter_out_im(var, shift - 1);
467 if (shift == 32)
468 shift = 31;
469 tcg_gen_sari_i32(var, var, shift);
470 break;
471 case 3: /* ROR/RRX */
472 if (shift != 0) {
473 if (flags)
474 shifter_out_im(var, shift - 1);
475 tcg_gen_rotri_i32(var, var, shift); break;
476 } else {
477 TCGv tmp = load_cpu_field(CF);
478 if (flags)
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
483 tcg_temp_free_i32(tmp);
488 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
491 if (flags) {
492 switch (shiftop) {
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
498 } else {
499 switch (shiftop) {
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
507 tcg_temp_free_i32(shift);
510 #define PAS_OP(pfx) \
511 switch (op2) { \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
519 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
521 TCGv_ptr tmp;
523 switch (op1) {
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 case 1:
526 tmp = tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
528 PAS_OP(s)
529 tcg_temp_free_ptr(tmp);
530 break;
531 case 5:
532 tmp = tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(u)
535 tcg_temp_free_ptr(tmp);
536 break;
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
539 case 2:
540 PAS_OP(q);
541 break;
542 case 3:
543 PAS_OP(sh);
544 break;
545 case 6:
546 PAS_OP(uq);
547 break;
548 case 7:
549 PAS_OP(uh);
550 break;
551 #undef gen_pas_helper
554 #undef PAS_OP
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
558 switch (op1) { \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
566 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
568 TCGv_ptr tmp;
570 switch (op2) {
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 0:
573 tmp = tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
575 PAS_OP(s)
576 tcg_temp_free_ptr(tmp);
577 break;
578 case 4:
579 tmp = tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(u)
582 tcg_temp_free_ptr(tmp);
583 break;
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 1:
587 PAS_OP(q);
588 break;
589 case 2:
590 PAS_OP(sh);
591 break;
592 case 5:
593 PAS_OP(uq);
594 break;
595 case 6:
596 PAS_OP(uh);
597 break;
598 #undef gen_pas_helper
601 #undef PAS_OP
603 static void gen_test_cc(int cc, int label)
605 TCGv tmp;
606 TCGv tmp2;
607 int inv;
609 switch (cc) {
610 case 0: /* eq: Z */
611 tmp = load_cpu_field(ZF);
612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
613 break;
614 case 1: /* ne: !Z */
615 tmp = load_cpu_field(ZF);
616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
617 break;
618 case 2: /* cs: C */
619 tmp = load_cpu_field(CF);
620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
621 break;
622 case 3: /* cc: !C */
623 tmp = load_cpu_field(CF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 4: /* mi: N */
627 tmp = load_cpu_field(NF);
628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
629 break;
630 case 5: /* pl: !N */
631 tmp = load_cpu_field(NF);
632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
633 break;
634 case 6: /* vs: V */
635 tmp = load_cpu_field(VF);
636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
637 break;
638 case 7: /* vc: !V */
639 tmp = load_cpu_field(VF);
640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
641 break;
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
646 tcg_temp_free_i32(tmp);
647 tmp = load_cpu_field(ZF);
648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
649 gen_set_label(inv);
650 break;
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
654 tcg_temp_free_i32(tmp);
655 tmp = load_cpu_field(ZF);
656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
657 break;
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
660 tmp2 = load_cpu_field(NF);
661 tcg_gen_xor_i32(tmp, tmp, tmp2);
662 tcg_temp_free_i32(tmp2);
663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
664 break;
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
667 tmp2 = load_cpu_field(NF);
668 tcg_gen_xor_i32(tmp, tmp, tmp2);
669 tcg_temp_free_i32(tmp2);
670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
671 break;
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
674 tmp = load_cpu_field(ZF);
675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
676 tcg_temp_free_i32(tmp);
677 tmp = load_cpu_field(VF);
678 tmp2 = load_cpu_field(NF);
679 tcg_gen_xor_i32(tmp, tmp, tmp2);
680 tcg_temp_free_i32(tmp2);
681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
682 gen_set_label(inv);
683 break;
684 case 13: /* le: Z || N != V */
685 tmp = load_cpu_field(ZF);
686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
687 tcg_temp_free_i32(tmp);
688 tmp = load_cpu_field(VF);
689 tmp2 = load_cpu_field(NF);
690 tcg_gen_xor_i32(tmp, tmp, tmp2);
691 tcg_temp_free_i32(tmp2);
692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
693 break;
694 default:
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
696 abort();
698 tcg_temp_free_i32(tmp);
701 static const uint8_t table_logic_cc[16] = {
702 1, /* and */
703 1, /* xor */
704 0, /* sub */
705 0, /* rsb */
706 0, /* add */
707 0, /* adc */
708 0, /* sbc */
709 0, /* rsc */
710 1, /* andl */
711 1, /* xorl */
712 0, /* cmp */
713 0, /* cmn */
714 1, /* orr */
715 1, /* mov */
716 1, /* bic */
717 1, /* mvn */
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
723 TCGv tmp;
725 s->is_jmp = DISAS_UPDATE;
726 if (s->thumb != (addr & 1)) {
727 tmp = tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
730 tcg_temp_free_i32(tmp);
732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext *s, TCGv var)
738 s->is_jmp = DISAS_UPDATE;
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUState *env, DisasContext *s,
748 int reg, TCGv var)
750 if (reg == 15 && ENABLE_ARCH_7) {
751 gen_bx(s, var);
752 } else {
753 store_reg(s, reg, var);
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUState *env, DisasContext *s,
762 int reg, TCGv var)
764 if (reg == 15 && ENABLE_ARCH_5) {
765 gen_bx(s, var);
766 } else {
767 store_reg(s, reg, var);
771 static inline TCGv gen_ld8s(TCGv addr, int index)
773 TCGv tmp = tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp, addr, index);
775 return tmp;
777 static inline TCGv gen_ld8u(TCGv addr, int index)
779 TCGv tmp = tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp, addr, index);
781 return tmp;
783 static inline TCGv gen_ld16s(TCGv addr, int index)
785 TCGv tmp = tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp, addr, index);
787 return tmp;
789 static inline TCGv gen_ld16u(TCGv addr, int index)
791 TCGv tmp = tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp, addr, index);
793 return tmp;
795 static inline TCGv gen_ld32(TCGv addr, int index)
797 TCGv tmp = tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp, addr, index);
799 return tmp;
801 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
805 return tmp;
807 static inline void gen_st8(TCGv val, TCGv addr, int index)
809 tcg_gen_qemu_st8(val, addr, index);
810 tcg_temp_free_i32(val);
812 static inline void gen_st16(TCGv val, TCGv addr, int index)
814 tcg_gen_qemu_st16(val, addr, index);
815 tcg_temp_free_i32(val);
817 static inline void gen_st32(TCGv val, TCGv addr, int index)
819 tcg_gen_qemu_st32(val, addr, index);
820 tcg_temp_free_i32(val);
822 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
828 static inline void gen_set_pc_im(uint32_t val)
830 tcg_gen_movi_i32(cpu_R[15], val);
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext *s)
836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
837 s->is_jmp = DISAS_UPDATE;
840 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
841 TCGv var)
843 int val, rm, shift, shiftop;
844 TCGv offset;
846 if (!(insn & (1 << 25))) {
847 /* immediate */
848 val = insn & 0xfff;
849 if (!(insn & (1 << 23)))
850 val = -val;
851 if (val != 0)
852 tcg_gen_addi_i32(var, var, val);
853 } else {
854 /* shift/register */
855 rm = (insn) & 0xf;
856 shift = (insn >> 7) & 0x1f;
857 shiftop = (insn >> 5) & 3;
858 offset = load_reg(s, rm);
859 gen_arm_shift_im(offset, shiftop, shift, 0);
860 if (!(insn & (1 << 23)))
861 tcg_gen_sub_i32(var, var, offset);
862 else
863 tcg_gen_add_i32(var, var, offset);
864 tcg_temp_free_i32(offset);
868 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
869 int extra, TCGv var)
871 int val, rm;
872 TCGv offset;
874 if (insn & (1 << 22)) {
875 /* immediate */
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
878 val = -val;
879 val += extra;
880 if (val != 0)
881 tcg_gen_addi_i32(var, var, val);
882 } else {
883 /* register */
884 if (extra)
885 tcg_gen_addi_i32(var, var, extra);
886 rm = (insn) & 0xf;
887 offset = load_reg(s, rm);
888 if (!(insn & (1 << 23)))
889 tcg_gen_sub_i32(var, var, offset);
890 else
891 tcg_gen_add_i32(var, var, offset);
892 tcg_temp_free_i32(offset);
896 #define VFP_OP2(name) \
897 static inline void gen_vfp_##name(int dp) \
899 if (dp) \
900 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
901 else \
902 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
905 VFP_OP2(add)
906 VFP_OP2(sub)
907 VFP_OP2(mul)
908 VFP_OP2(div)
910 #undef VFP_OP2
912 static inline void gen_vfp_abs(int dp)
914 if (dp)
915 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
916 else
917 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
920 static inline void gen_vfp_neg(int dp)
922 if (dp)
923 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
924 else
925 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
928 static inline void gen_vfp_sqrt(int dp)
930 if (dp)
931 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
932 else
933 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
936 static inline void gen_vfp_cmp(int dp)
938 if (dp)
939 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
940 else
941 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
944 static inline void gen_vfp_cmpe(int dp)
946 if (dp)
947 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
948 else
949 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
952 static inline void gen_vfp_F1_ld0(int dp)
954 if (dp)
955 tcg_gen_movi_i64(cpu_F1d, 0);
956 else
957 tcg_gen_movi_i32(cpu_F1s, 0);
960 static inline void gen_vfp_uito(int dp)
962 if (dp)
963 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
964 else
965 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
968 static inline void gen_vfp_sito(int dp)
970 if (dp)
971 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
972 else
973 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
976 static inline void gen_vfp_toui(int dp)
978 if (dp)
979 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
980 else
981 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
984 static inline void gen_vfp_touiz(int dp)
986 if (dp)
987 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
988 else
989 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
992 static inline void gen_vfp_tosi(int dp)
994 if (dp)
995 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
996 else
997 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1000 static inline void gen_vfp_tosiz(int dp)
1002 if (dp)
1003 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1004 else
1005 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1008 #define VFP_GEN_FIX(name) \
1009 static inline void gen_vfp_##name(int dp, int shift) \
1011 TCGv tmp_shift = tcg_const_i32(shift); \
1012 if (dp) \
1013 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1014 else \
1015 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1016 tcg_temp_free_i32(tmp_shift); \
1018 VFP_GEN_FIX(tosh)
1019 VFP_GEN_FIX(tosl)
1020 VFP_GEN_FIX(touh)
1021 VFP_GEN_FIX(toul)
1022 VFP_GEN_FIX(shto)
1023 VFP_GEN_FIX(slto)
1024 VFP_GEN_FIX(uhto)
1025 VFP_GEN_FIX(ulto)
1026 #undef VFP_GEN_FIX
1028 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1030 if (dp)
1031 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1032 else
1033 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1036 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1038 if (dp)
1039 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1040 else
1041 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1044 static inline long
1045 vfp_reg_offset (int dp, int reg)
1047 if (dp)
1048 return offsetof(CPUARMState, vfp.regs[reg]);
1049 else if (reg & 1) {
1050 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1051 + offsetof(CPU_DoubleU, l.upper);
1052 } else {
1053 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1054 + offsetof(CPU_DoubleU, l.lower);
1058 /* Return the offset of a 32-bit piece of a NEON register.
1059 zero is the least significant end of the register. */
1060 static inline long
1061 neon_reg_offset (int reg, int n)
1063 int sreg;
1064 sreg = reg * 2 + n;
1065 return vfp_reg_offset(0, sreg);
1068 static TCGv neon_load_reg(int reg, int pass)
1070 TCGv tmp = tcg_temp_new_i32();
1071 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1072 return tmp;
1075 static void neon_store_reg(int reg, int pass, TCGv var)
1077 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1078 tcg_temp_free_i32(var);
1081 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1083 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1086 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1088 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1091 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1092 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1093 #define tcg_gen_st_f32 tcg_gen_st_i32
1094 #define tcg_gen_st_f64 tcg_gen_st_i64
1096 static inline void gen_mov_F0_vreg(int dp, int reg)
1098 if (dp)
1099 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1100 else
1101 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1104 static inline void gen_mov_F1_vreg(int dp, int reg)
1106 if (dp)
1107 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1108 else
1109 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1112 static inline void gen_mov_vreg_F0(int dp, int reg)
1114 if (dp)
1115 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1116 else
1117 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1120 #define ARM_CP_RW_BIT (1 << 20)
1122 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1124 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1127 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1129 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1132 static inline TCGv iwmmxt_load_creg(int reg)
1134 TCGv var = tcg_temp_new_i32();
1135 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1136 return var;
1139 static inline void iwmmxt_store_creg(int reg, TCGv var)
1141 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1142 tcg_temp_free_i32(var);
1145 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1147 iwmmxt_store_reg(cpu_M0, rn);
1150 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1152 iwmmxt_load_reg(cpu_M0, rn);
1155 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1157 iwmmxt_load_reg(cpu_V1, rn);
1158 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1161 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1163 iwmmxt_load_reg(cpu_V1, rn);
1164 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1167 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1169 iwmmxt_load_reg(cpu_V1, rn);
1170 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1173 #define IWMMXT_OP(name) \
1174 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1176 iwmmxt_load_reg(cpu_V1, rn); \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1180 #define IWMMXT_OP_SIZE(name) \
1181 IWMMXT_OP(name##b) \
1182 IWMMXT_OP(name##w) \
1183 IWMMXT_OP(name##l)
1185 #define IWMMXT_OP_1(name) \
1186 static inline void gen_op_iwmmxt_##name##_M0(void) \
1188 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
1191 IWMMXT_OP(maddsq)
1192 IWMMXT_OP(madduq)
1193 IWMMXT_OP(sadb)
1194 IWMMXT_OP(sadw)
1195 IWMMXT_OP(mulslw)
1196 IWMMXT_OP(mulshw)
1197 IWMMXT_OP(mululw)
1198 IWMMXT_OP(muluhw)
1199 IWMMXT_OP(macsw)
1200 IWMMXT_OP(macuw)
1202 IWMMXT_OP_SIZE(unpackl)
1203 IWMMXT_OP_SIZE(unpackh)
1205 IWMMXT_OP_1(unpacklub)
1206 IWMMXT_OP_1(unpackluw)
1207 IWMMXT_OP_1(unpacklul)
1208 IWMMXT_OP_1(unpackhub)
1209 IWMMXT_OP_1(unpackhuw)
1210 IWMMXT_OP_1(unpackhul)
1211 IWMMXT_OP_1(unpacklsb)
1212 IWMMXT_OP_1(unpacklsw)
1213 IWMMXT_OP_1(unpacklsl)
1214 IWMMXT_OP_1(unpackhsb)
1215 IWMMXT_OP_1(unpackhsw)
1216 IWMMXT_OP_1(unpackhsl)
1218 IWMMXT_OP_SIZE(cmpeq)
1219 IWMMXT_OP_SIZE(cmpgtu)
1220 IWMMXT_OP_SIZE(cmpgts)
1222 IWMMXT_OP_SIZE(mins)
1223 IWMMXT_OP_SIZE(minu)
1224 IWMMXT_OP_SIZE(maxs)
1225 IWMMXT_OP_SIZE(maxu)
1227 IWMMXT_OP_SIZE(subn)
1228 IWMMXT_OP_SIZE(addn)
1229 IWMMXT_OP_SIZE(subu)
1230 IWMMXT_OP_SIZE(addu)
1231 IWMMXT_OP_SIZE(subs)
1232 IWMMXT_OP_SIZE(adds)
1234 IWMMXT_OP(avgb0)
1235 IWMMXT_OP(avgb1)
1236 IWMMXT_OP(avgw0)
1237 IWMMXT_OP(avgw1)
1239 IWMMXT_OP(msadb)
1241 IWMMXT_OP(packuw)
1242 IWMMXT_OP(packul)
1243 IWMMXT_OP(packuq)
1244 IWMMXT_OP(packsw)
1245 IWMMXT_OP(packsl)
1246 IWMMXT_OP(packsq)
1248 static void gen_op_iwmmxt_set_mup(void)
1250 TCGv tmp;
1251 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1252 tcg_gen_ori_i32(tmp, tmp, 2);
1253 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1256 static void gen_op_iwmmxt_set_cup(void)
1258 TCGv tmp;
1259 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1260 tcg_gen_ori_i32(tmp, tmp, 1);
1261 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1264 static void gen_op_iwmmxt_setpsr_nz(void)
1266 TCGv tmp = tcg_temp_new_i32();
1267 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1268 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1271 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1273 iwmmxt_load_reg(cpu_V1, rn);
1274 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1275 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1278 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1280 int rd;
1281 uint32_t offset;
1282 TCGv tmp;
1284 rd = (insn >> 16) & 0xf;
1285 tmp = load_reg(s, rd);
1287 offset = (insn & 0xff) << ((insn >> 7) & 2);
1288 if (insn & (1 << 24)) {
1289 /* Pre indexed */
1290 if (insn & (1 << 23))
1291 tcg_gen_addi_i32(tmp, tmp, offset);
1292 else
1293 tcg_gen_addi_i32(tmp, tmp, -offset);
1294 tcg_gen_mov_i32(dest, tmp);
1295 if (insn & (1 << 21))
1296 store_reg(s, rd, tmp);
1297 else
1298 tcg_temp_free_i32(tmp);
1299 } else if (insn & (1 << 21)) {
1300 /* Post indexed */
1301 tcg_gen_mov_i32(dest, tmp);
1302 if (insn & (1 << 23))
1303 tcg_gen_addi_i32(tmp, tmp, offset);
1304 else
1305 tcg_gen_addi_i32(tmp, tmp, -offset);
1306 store_reg(s, rd, tmp);
1307 } else if (!(insn & (1 << 23)))
1308 return 1;
1309 return 0;
1312 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1314 int rd = (insn >> 0) & 0xf;
1315 TCGv tmp;
1317 if (insn & (1 << 8)) {
1318 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1319 return 1;
1320 } else {
1321 tmp = iwmmxt_load_creg(rd);
1323 } else {
1324 tmp = tcg_temp_new_i32();
1325 iwmmxt_load_reg(cpu_V0, rd);
1326 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1328 tcg_gen_andi_i32(tmp, tmp, mask);
1329 tcg_gen_mov_i32(dest, tmp);
1330 tcg_temp_free_i32(tmp);
1331 return 0;
1334 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1335 (ie. an undefined instruction). */
1336 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1338 int rd, wrd;
1339 int rdhi, rdlo, rd0, rd1, i;
1340 TCGv addr;
1341 TCGv tmp, tmp2, tmp3;
1343 if ((insn & 0x0e000e00) == 0x0c000000) {
1344 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1345 wrd = insn & 0xf;
1346 rdlo = (insn >> 12) & 0xf;
1347 rdhi = (insn >> 16) & 0xf;
1348 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1349 iwmmxt_load_reg(cpu_V0, wrd);
1350 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1351 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1352 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1353 } else { /* TMCRR */
1354 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1355 iwmmxt_store_reg(cpu_V0, wrd);
1356 gen_op_iwmmxt_set_mup();
1358 return 0;
1361 wrd = (insn >> 12) & 0xf;
1362 addr = tcg_temp_new_i32();
1363 if (gen_iwmmxt_address(s, insn, addr)) {
1364 tcg_temp_free_i32(addr);
1365 return 1;
1367 if (insn & ARM_CP_RW_BIT) {
1368 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1369 tmp = tcg_temp_new_i32();
1370 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1371 iwmmxt_store_creg(wrd, tmp);
1372 } else {
1373 i = 1;
1374 if (insn & (1 << 8)) {
1375 if (insn & (1 << 22)) { /* WLDRD */
1376 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1377 i = 0;
1378 } else { /* WLDRW wRd */
1379 tmp = gen_ld32(addr, IS_USER(s));
1381 } else {
1382 if (insn & (1 << 22)) { /* WLDRH */
1383 tmp = gen_ld16u(addr, IS_USER(s));
1384 } else { /* WLDRB */
1385 tmp = gen_ld8u(addr, IS_USER(s));
1388 if (i) {
1389 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1390 tcg_temp_free_i32(tmp);
1392 gen_op_iwmmxt_movq_wRn_M0(wrd);
1394 } else {
1395 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1396 tmp = iwmmxt_load_creg(wrd);
1397 gen_st32(tmp, addr, IS_USER(s));
1398 } else {
1399 gen_op_iwmmxt_movq_M0_wRn(wrd);
1400 tmp = tcg_temp_new_i32();
1401 if (insn & (1 << 8)) {
1402 if (insn & (1 << 22)) { /* WSTRD */
1403 tcg_temp_free_i32(tmp);
1404 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1405 } else { /* WSTRW wRd */
1406 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1407 gen_st32(tmp, addr, IS_USER(s));
1409 } else {
1410 if (insn & (1 << 22)) { /* WSTRH */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1412 gen_st16(tmp, addr, IS_USER(s));
1413 } else { /* WSTRB */
1414 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1415 gen_st8(tmp, addr, IS_USER(s));
1420 tcg_temp_free_i32(addr);
1421 return 0;
1424 if ((insn & 0x0f000000) != 0x0e000000)
1425 return 1;
1427 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1428 case 0x000: /* WOR */
1429 wrd = (insn >> 12) & 0xf;
1430 rd0 = (insn >> 0) & 0xf;
1431 rd1 = (insn >> 16) & 0xf;
1432 gen_op_iwmmxt_movq_M0_wRn(rd0);
1433 gen_op_iwmmxt_orq_M0_wRn(rd1);
1434 gen_op_iwmmxt_setpsr_nz();
1435 gen_op_iwmmxt_movq_wRn_M0(wrd);
1436 gen_op_iwmmxt_set_mup();
1437 gen_op_iwmmxt_set_cup();
1438 break;
1439 case 0x011: /* TMCR */
1440 if (insn & 0xf)
1441 return 1;
1442 rd = (insn >> 12) & 0xf;
1443 wrd = (insn >> 16) & 0xf;
1444 switch (wrd) {
1445 case ARM_IWMMXT_wCID:
1446 case ARM_IWMMXT_wCASF:
1447 break;
1448 case ARM_IWMMXT_wCon:
1449 gen_op_iwmmxt_set_cup();
1450 /* Fall through. */
1451 case ARM_IWMMXT_wCSSF:
1452 tmp = iwmmxt_load_creg(wrd);
1453 tmp2 = load_reg(s, rd);
1454 tcg_gen_andc_i32(tmp, tmp, tmp2);
1455 tcg_temp_free_i32(tmp2);
1456 iwmmxt_store_creg(wrd, tmp);
1457 break;
1458 case ARM_IWMMXT_wCGR0:
1459 case ARM_IWMMXT_wCGR1:
1460 case ARM_IWMMXT_wCGR2:
1461 case ARM_IWMMXT_wCGR3:
1462 gen_op_iwmmxt_set_cup();
1463 tmp = load_reg(s, rd);
1464 iwmmxt_store_creg(wrd, tmp);
1465 break;
1466 default:
1467 return 1;
1469 break;
1470 case 0x100: /* WXOR */
1471 wrd = (insn >> 12) & 0xf;
1472 rd0 = (insn >> 0) & 0xf;
1473 rd1 = (insn >> 16) & 0xf;
1474 gen_op_iwmmxt_movq_M0_wRn(rd0);
1475 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1476 gen_op_iwmmxt_setpsr_nz();
1477 gen_op_iwmmxt_movq_wRn_M0(wrd);
1478 gen_op_iwmmxt_set_mup();
1479 gen_op_iwmmxt_set_cup();
1480 break;
1481 case 0x111: /* TMRC */
1482 if (insn & 0xf)
1483 return 1;
1484 rd = (insn >> 12) & 0xf;
1485 wrd = (insn >> 16) & 0xf;
1486 tmp = iwmmxt_load_creg(wrd);
1487 store_reg(s, rd, tmp);
1488 break;
1489 case 0x300: /* WANDN */
1490 wrd = (insn >> 12) & 0xf;
1491 rd0 = (insn >> 0) & 0xf;
1492 rd1 = (insn >> 16) & 0xf;
1493 gen_op_iwmmxt_movq_M0_wRn(rd0);
1494 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1500 break;
1501 case 0x200: /* WAND */
1502 wrd = (insn >> 12) & 0xf;
1503 rd0 = (insn >> 0) & 0xf;
1504 rd1 = (insn >> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0);
1506 gen_op_iwmmxt_andq_M0_wRn(rd1);
1507 gen_op_iwmmxt_setpsr_nz();
1508 gen_op_iwmmxt_movq_wRn_M0(wrd);
1509 gen_op_iwmmxt_set_mup();
1510 gen_op_iwmmxt_set_cup();
1511 break;
1512 case 0x810: case 0xa10: /* WMADD */
1513 wrd = (insn >> 12) & 0xf;
1514 rd0 = (insn >> 0) & 0xf;
1515 rd1 = (insn >> 16) & 0xf;
1516 gen_op_iwmmxt_movq_M0_wRn(rd0);
1517 if (insn & (1 << 21))
1518 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1519 else
1520 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1521 gen_op_iwmmxt_movq_wRn_M0(wrd);
1522 gen_op_iwmmxt_set_mup();
1523 break;
1524 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1525 wrd = (insn >> 12) & 0xf;
1526 rd0 = (insn >> 16) & 0xf;
1527 rd1 = (insn >> 0) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0);
1529 switch ((insn >> 22) & 3) {
1530 case 0:
1531 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1532 break;
1533 case 1:
1534 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1535 break;
1536 case 2:
1537 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1538 break;
1539 case 3:
1540 return 1;
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 16) & 0xf;
1549 rd1 = (insn >> 0) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 switch ((insn >> 22) & 3) {
1552 case 0:
1553 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1554 break;
1555 case 1:
1556 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1557 break;
1558 case 2:
1559 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1560 break;
1561 case 3:
1562 return 1;
1564 gen_op_iwmmxt_movq_wRn_M0(wrd);
1565 gen_op_iwmmxt_set_mup();
1566 gen_op_iwmmxt_set_cup();
1567 break;
1568 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1569 wrd = (insn >> 12) & 0xf;
1570 rd0 = (insn >> 16) & 0xf;
1571 rd1 = (insn >> 0) & 0xf;
1572 gen_op_iwmmxt_movq_M0_wRn(rd0);
1573 if (insn & (1 << 22))
1574 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1575 else
1576 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1577 if (!(insn & (1 << 20)))
1578 gen_op_iwmmxt_addl_M0_wRn(wrd);
1579 gen_op_iwmmxt_movq_wRn_M0(wrd);
1580 gen_op_iwmmxt_set_mup();
1581 break;
1582 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1583 wrd = (insn >> 12) & 0xf;
1584 rd0 = (insn >> 16) & 0xf;
1585 rd1 = (insn >> 0) & 0xf;
1586 gen_op_iwmmxt_movq_M0_wRn(rd0);
1587 if (insn & (1 << 21)) {
1588 if (insn & (1 << 20))
1589 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1590 else
1591 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1592 } else {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 break;
1601 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 21))
1607 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1610 if (!(insn & (1 << 20))) {
1611 iwmmxt_load_reg(cpu_V1, wrd);
1612 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1614 gen_op_iwmmxt_movq_wRn_M0(wrd);
1615 gen_op_iwmmxt_set_mup();
1616 break;
1617 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1618 wrd = (insn >> 12) & 0xf;
1619 rd0 = (insn >> 16) & 0xf;
1620 rd1 = (insn >> 0) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0);
1622 switch ((insn >> 22) & 3) {
1623 case 0:
1624 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1625 break;
1626 case 1:
1627 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1628 break;
1629 case 2:
1630 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1631 break;
1632 case 3:
1633 return 1;
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1638 break;
1639 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1640 wrd = (insn >> 12) & 0xf;
1641 rd0 = (insn >> 16) & 0xf;
1642 rd1 = (insn >> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0);
1644 if (insn & (1 << 22)) {
1645 if (insn & (1 << 20))
1646 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1647 else
1648 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1649 } else {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1658 break;
1659 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 16) & 0xf;
1662 rd1 = (insn >> 0) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
1664 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1665 tcg_gen_andi_i32(tmp, tmp, 7);
1666 iwmmxt_load_reg(cpu_V1, rd1);
1667 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1668 tcg_temp_free_i32(tmp);
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 break;
1672 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1673 if (((insn >> 6) & 3) == 3)
1674 return 1;
1675 rd = (insn >> 12) & 0xf;
1676 wrd = (insn >> 16) & 0xf;
1677 tmp = load_reg(s, rd);
1678 gen_op_iwmmxt_movq_M0_wRn(wrd);
1679 switch ((insn >> 6) & 3) {
1680 case 0:
1681 tmp2 = tcg_const_i32(0xff);
1682 tmp3 = tcg_const_i32((insn & 7) << 3);
1683 break;
1684 case 1:
1685 tmp2 = tcg_const_i32(0xffff);
1686 tmp3 = tcg_const_i32((insn & 3) << 4);
1687 break;
1688 case 2:
1689 tmp2 = tcg_const_i32(0xffffffff);
1690 tmp3 = tcg_const_i32((insn & 1) << 5);
1691 break;
1692 default:
1693 TCGV_UNUSED(tmp2);
1694 TCGV_UNUSED(tmp3);
1696 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1697 tcg_temp_free(tmp3);
1698 tcg_temp_free(tmp2);
1699 tcg_temp_free_i32(tmp);
1700 gen_op_iwmmxt_movq_wRn_M0(wrd);
1701 gen_op_iwmmxt_set_mup();
1702 break;
1703 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1704 rd = (insn >> 12) & 0xf;
1705 wrd = (insn >> 16) & 0xf;
1706 if (rd == 15 || ((insn >> 22) & 3) == 3)
1707 return 1;
1708 gen_op_iwmmxt_movq_M0_wRn(wrd);
1709 tmp = tcg_temp_new_i32();
1710 switch ((insn >> 22) & 3) {
1711 case 0:
1712 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1713 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1714 if (insn & 8) {
1715 tcg_gen_ext8s_i32(tmp, tmp);
1716 } else {
1717 tcg_gen_andi_i32(tmp, tmp, 0xff);
1719 break;
1720 case 1:
1721 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1722 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1723 if (insn & 8) {
1724 tcg_gen_ext16s_i32(tmp, tmp);
1725 } else {
1726 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1728 break;
1729 case 2:
1730 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1731 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1732 break;
1734 store_reg(s, rd, tmp);
1735 break;
1736 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1737 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1738 return 1;
1739 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1740 switch ((insn >> 22) & 3) {
1741 case 0:
1742 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1743 break;
1744 case 1:
1745 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1746 break;
1747 case 2:
1748 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1749 break;
1751 tcg_gen_shli_i32(tmp, tmp, 28);
1752 gen_set_nzcv(tmp);
1753 tcg_temp_free_i32(tmp);
1754 break;
1755 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1756 if (((insn >> 6) & 3) == 3)
1757 return 1;
1758 rd = (insn >> 12) & 0xf;
1759 wrd = (insn >> 16) & 0xf;
1760 tmp = load_reg(s, rd);
1761 switch ((insn >> 6) & 3) {
1762 case 0:
1763 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1764 break;
1765 case 1:
1766 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1767 break;
1768 case 2:
1769 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1770 break;
1772 tcg_temp_free_i32(tmp);
1773 gen_op_iwmmxt_movq_wRn_M0(wrd);
1774 gen_op_iwmmxt_set_mup();
1775 break;
1776 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1777 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1778 return 1;
1779 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1780 tmp2 = tcg_temp_new_i32();
1781 tcg_gen_mov_i32(tmp2, tmp);
1782 switch ((insn >> 22) & 3) {
1783 case 0:
1784 for (i = 0; i < 7; i ++) {
1785 tcg_gen_shli_i32(tmp2, tmp2, 4);
1786 tcg_gen_and_i32(tmp, tmp, tmp2);
1788 break;
1789 case 1:
1790 for (i = 0; i < 3; i ++) {
1791 tcg_gen_shli_i32(tmp2, tmp2, 8);
1792 tcg_gen_and_i32(tmp, tmp, tmp2);
1794 break;
1795 case 2:
1796 tcg_gen_shli_i32(tmp2, tmp2, 16);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
1798 break;
1800 gen_set_nzcv(tmp);
1801 tcg_temp_free_i32(tmp2);
1802 tcg_temp_free_i32(tmp);
1803 break;
1804 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1805 wrd = (insn >> 12) & 0xf;
1806 rd0 = (insn >> 16) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 switch ((insn >> 22) & 3) {
1809 case 0:
1810 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1811 break;
1812 case 1:
1813 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1814 break;
1815 case 2:
1816 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1817 break;
1818 case 3:
1819 return 1;
1821 gen_op_iwmmxt_movq_wRn_M0(wrd);
1822 gen_op_iwmmxt_set_mup();
1823 break;
1824 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1825 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1826 return 1;
1827 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1828 tmp2 = tcg_temp_new_i32();
1829 tcg_gen_mov_i32(tmp2, tmp);
1830 switch ((insn >> 22) & 3) {
1831 case 0:
1832 for (i = 0; i < 7; i ++) {
1833 tcg_gen_shli_i32(tmp2, tmp2, 4);
1834 tcg_gen_or_i32(tmp, tmp, tmp2);
1836 break;
1837 case 1:
1838 for (i = 0; i < 3; i ++) {
1839 tcg_gen_shli_i32(tmp2, tmp2, 8);
1840 tcg_gen_or_i32(tmp, tmp, tmp2);
1842 break;
1843 case 2:
1844 tcg_gen_shli_i32(tmp2, tmp2, 16);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
1846 break;
1848 gen_set_nzcv(tmp);
1849 tcg_temp_free_i32(tmp2);
1850 tcg_temp_free_i32(tmp);
1851 break;
1852 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1853 rd = (insn >> 12) & 0xf;
1854 rd0 = (insn >> 16) & 0xf;
1855 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1856 return 1;
1857 gen_op_iwmmxt_movq_M0_wRn(rd0);
1858 tmp = tcg_temp_new_i32();
1859 switch ((insn >> 22) & 3) {
1860 case 0:
1861 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1862 break;
1863 case 1:
1864 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1865 break;
1866 case 2:
1867 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1868 break;
1870 store_reg(s, rd, tmp);
1871 break;
1872 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1873 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1874 wrd = (insn >> 12) & 0xf;
1875 rd0 = (insn >> 16) & 0xf;
1876 rd1 = (insn >> 0) & 0xf;
1877 gen_op_iwmmxt_movq_M0_wRn(rd0);
1878 switch ((insn >> 22) & 3) {
1879 case 0:
1880 if (insn & (1 << 21))
1881 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1882 else
1883 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1884 break;
1885 case 1:
1886 if (insn & (1 << 21))
1887 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1888 else
1889 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1890 break;
1891 case 2:
1892 if (insn & (1 << 21))
1893 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1894 else
1895 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1896 break;
1897 case 3:
1898 return 1;
1900 gen_op_iwmmxt_movq_wRn_M0(wrd);
1901 gen_op_iwmmxt_set_mup();
1902 gen_op_iwmmxt_set_cup();
1903 break;
1904 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1905 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1906 wrd = (insn >> 12) & 0xf;
1907 rd0 = (insn >> 16) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0);
1909 switch ((insn >> 22) & 3) {
1910 case 0:
1911 if (insn & (1 << 21))
1912 gen_op_iwmmxt_unpacklsb_M0();
1913 else
1914 gen_op_iwmmxt_unpacklub_M0();
1915 break;
1916 case 1:
1917 if (insn & (1 << 21))
1918 gen_op_iwmmxt_unpacklsw_M0();
1919 else
1920 gen_op_iwmmxt_unpackluw_M0();
1921 break;
1922 case 2:
1923 if (insn & (1 << 21))
1924 gen_op_iwmmxt_unpacklsl_M0();
1925 else
1926 gen_op_iwmmxt_unpacklul_M0();
1927 break;
1928 case 3:
1929 return 1;
1931 gen_op_iwmmxt_movq_wRn_M0(wrd);
1932 gen_op_iwmmxt_set_mup();
1933 gen_op_iwmmxt_set_cup();
1934 break;
1935 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1936 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1937 wrd = (insn >> 12) & 0xf;
1938 rd0 = (insn >> 16) & 0xf;
1939 gen_op_iwmmxt_movq_M0_wRn(rd0);
1940 switch ((insn >> 22) & 3) {
1941 case 0:
1942 if (insn & (1 << 21))
1943 gen_op_iwmmxt_unpackhsb_M0();
1944 else
1945 gen_op_iwmmxt_unpackhub_M0();
1946 break;
1947 case 1:
1948 if (insn & (1 << 21))
1949 gen_op_iwmmxt_unpackhsw_M0();
1950 else
1951 gen_op_iwmmxt_unpackhuw_M0();
1952 break;
1953 case 2:
1954 if (insn & (1 << 21))
1955 gen_op_iwmmxt_unpackhsl_M0();
1956 else
1957 gen_op_iwmmxt_unpackhul_M0();
1958 break;
1959 case 3:
1960 return 1;
1962 gen_op_iwmmxt_movq_wRn_M0(wrd);
1963 gen_op_iwmmxt_set_mup();
1964 gen_op_iwmmxt_set_cup();
1965 break;
1966 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1967 case 0x214: case 0x614: case 0xa14: case 0xe14:
1968 if (((insn >> 22) & 3) == 0)
1969 return 1;
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 tmp = tcg_temp_new_i32();
1974 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1975 tcg_temp_free_i32(tmp);
1976 return 1;
1978 switch ((insn >> 22) & 3) {
1979 case 1:
1980 gen_helper_iwmmxt_srlw(cpu_M0, cpu_M0, tmp);
1981 break;
1982 case 2:
1983 gen_helper_iwmmxt_srll(cpu_M0, cpu_M0, tmp);
1984 break;
1985 case 3:
1986 gen_helper_iwmmxt_srlq(cpu_M0, cpu_M0, tmp);
1987 break;
1989 tcg_temp_free_i32(tmp);
1990 gen_op_iwmmxt_movq_wRn_M0(wrd);
1991 gen_op_iwmmxt_set_mup();
1992 gen_op_iwmmxt_set_cup();
1993 break;
1994 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1995 case 0x014: case 0x414: case 0x814: case 0xc14:
1996 if (((insn >> 22) & 3) == 0)
1997 return 1;
1998 wrd = (insn >> 12) & 0xf;
1999 rd0 = (insn >> 16) & 0xf;
2000 gen_op_iwmmxt_movq_M0_wRn(rd0);
2001 tmp = tcg_temp_new_i32();
2002 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2003 tcg_temp_free_i32(tmp);
2004 return 1;
2006 switch ((insn >> 22) & 3) {
2007 case 1:
2008 gen_helper_iwmmxt_sraw(cpu_M0, cpu_M0, tmp);
2009 break;
2010 case 2:
2011 gen_helper_iwmmxt_sral(cpu_M0, cpu_M0, tmp);
2012 break;
2013 case 3:
2014 gen_helper_iwmmxt_sraq(cpu_M0, cpu_M0, tmp);
2015 break;
2017 tcg_temp_free_i32(tmp);
2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2019 gen_op_iwmmxt_set_mup();
2020 gen_op_iwmmxt_set_cup();
2021 break;
2022 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2023 case 0x114: case 0x514: case 0x914: case 0xd14:
2024 if (((insn >> 22) & 3) == 0)
2025 return 1;
2026 wrd = (insn >> 12) & 0xf;
2027 rd0 = (insn >> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
2029 tmp = tcg_temp_new_i32();
2030 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2031 tcg_temp_free_i32(tmp);
2032 return 1;
2034 switch ((insn >> 22) & 3) {
2035 case 1:
2036 gen_helper_iwmmxt_sllw(cpu_M0, cpu_M0, tmp);
2037 break;
2038 case 2:
2039 gen_helper_iwmmxt_slll(cpu_M0, cpu_M0, tmp);
2040 break;
2041 case 3:
2042 gen_helper_iwmmxt_sllq(cpu_M0, cpu_M0, tmp);
2043 break;
2045 tcg_temp_free_i32(tmp);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 gen_op_iwmmxt_set_cup();
2049 break;
2050 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2051 case 0x314: case 0x714: case 0xb14: case 0xf14:
2052 if (((insn >> 22) & 3) == 0)
2053 return 1;
2054 wrd = (insn >> 12) & 0xf;
2055 rd0 = (insn >> 16) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0);
2057 tmp = tcg_temp_new_i32();
2058 switch ((insn >> 22) & 3) {
2059 case 1:
2060 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2061 tcg_temp_free_i32(tmp);
2062 return 1;
2064 gen_helper_iwmmxt_rorw(cpu_M0, cpu_M0, tmp);
2065 break;
2066 case 2:
2067 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2068 tcg_temp_free_i32(tmp);
2069 return 1;
2071 gen_helper_iwmmxt_rorl(cpu_M0, cpu_M0, tmp);
2072 break;
2073 case 3:
2074 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2075 tcg_temp_free_i32(tmp);
2076 return 1;
2078 gen_helper_iwmmxt_rorq(cpu_M0, cpu_M0, tmp);
2079 break;
2081 tcg_temp_free_i32(tmp);
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2087 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 rd1 = (insn >> 0) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0);
2092 switch ((insn >> 22) & 3) {
2093 case 0:
2094 if (insn & (1 << 21))
2095 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2096 else
2097 gen_op_iwmmxt_minub_M0_wRn(rd1);
2098 break;
2099 case 1:
2100 if (insn & (1 << 21))
2101 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2102 else
2103 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2104 break;
2105 case 2:
2106 if (insn & (1 << 21))
2107 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2108 else
2109 gen_op_iwmmxt_minul_M0_wRn(rd1);
2110 break;
2111 case 3:
2112 return 1;
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 break;
2117 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2118 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 rd1 = (insn >> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
2123 switch ((insn >> 22) & 3) {
2124 case 0:
2125 if (insn & (1 << 21))
2126 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2127 else
2128 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2129 break;
2130 case 1:
2131 if (insn & (1 << 21))
2132 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2133 else
2134 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2135 break;
2136 case 2:
2137 if (insn & (1 << 21))
2138 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2139 else
2140 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2141 break;
2142 case 3:
2143 return 1;
2145 gen_op_iwmmxt_movq_wRn_M0(wrd);
2146 gen_op_iwmmxt_set_mup();
2147 break;
2148 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2149 case 0x402: case 0x502: case 0x602: case 0x702:
2150 wrd = (insn >> 12) & 0xf;
2151 rd0 = (insn >> 16) & 0xf;
2152 rd1 = (insn >> 0) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0);
2154 tmp = tcg_const_i32((insn >> 20) & 3);
2155 iwmmxt_load_reg(cpu_V1, rd1);
2156 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2157 tcg_temp_free(tmp);
2158 gen_op_iwmmxt_movq_wRn_M0(wrd);
2159 gen_op_iwmmxt_set_mup();
2160 break;
2161 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2162 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2163 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2164 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2165 wrd = (insn >> 12) & 0xf;
2166 rd0 = (insn >> 16) & 0xf;
2167 rd1 = (insn >> 0) & 0xf;
2168 gen_op_iwmmxt_movq_M0_wRn(rd0);
2169 switch ((insn >> 20) & 0xf) {
2170 case 0x0:
2171 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2172 break;
2173 case 0x1:
2174 gen_op_iwmmxt_subub_M0_wRn(rd1);
2175 break;
2176 case 0x3:
2177 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2178 break;
2179 case 0x4:
2180 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2181 break;
2182 case 0x5:
2183 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2184 break;
2185 case 0x7:
2186 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2187 break;
2188 case 0x8:
2189 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2190 break;
2191 case 0x9:
2192 gen_op_iwmmxt_subul_M0_wRn(rd1);
2193 break;
2194 case 0xb:
2195 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2196 break;
2197 default:
2198 return 1;
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
2204 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2205 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2206 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2207 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2208 wrd = (insn >> 12) & 0xf;
2209 rd0 = (insn >> 16) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0);
2211 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2212 gen_helper_iwmmxt_shufh(cpu_M0, cpu_M0, tmp);
2213 tcg_temp_free(tmp);
2214 gen_op_iwmmxt_movq_wRn_M0(wrd);
2215 gen_op_iwmmxt_set_mup();
2216 gen_op_iwmmxt_set_cup();
2217 break;
2218 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2219 case 0x418: case 0x518: case 0x618: case 0x718:
2220 case 0x818: case 0x918: case 0xa18: case 0xb18:
2221 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2222 wrd = (insn >> 12) & 0xf;
2223 rd0 = (insn >> 16) & 0xf;
2224 rd1 = (insn >> 0) & 0xf;
2225 gen_op_iwmmxt_movq_M0_wRn(rd0);
2226 switch ((insn >> 20) & 0xf) {
2227 case 0x0:
2228 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2229 break;
2230 case 0x1:
2231 gen_op_iwmmxt_addub_M0_wRn(rd1);
2232 break;
2233 case 0x3:
2234 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2235 break;
2236 case 0x4:
2237 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2238 break;
2239 case 0x5:
2240 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2241 break;
2242 case 0x7:
2243 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2244 break;
2245 case 0x8:
2246 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2247 break;
2248 case 0x9:
2249 gen_op_iwmmxt_addul_M0_wRn(rd1);
2250 break;
2251 case 0xb:
2252 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2253 break;
2254 default:
2255 return 1;
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2260 break;
2261 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2262 case 0x408: case 0x508: case 0x608: case 0x708:
2263 case 0x808: case 0x908: case 0xa08: case 0xb08:
2264 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2265 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2266 return 1;
2267 wrd = (insn >> 12) & 0xf;
2268 rd0 = (insn >> 16) & 0xf;
2269 rd1 = (insn >> 0) & 0xf;
2270 gen_op_iwmmxt_movq_M0_wRn(rd0);
2271 switch ((insn >> 22) & 3) {
2272 case 1:
2273 if (insn & (1 << 21))
2274 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2275 else
2276 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2277 break;
2278 case 2:
2279 if (insn & (1 << 21))
2280 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2281 else
2282 gen_op_iwmmxt_packul_M0_wRn(rd1);
2283 break;
2284 case 3:
2285 if (insn & (1 << 21))
2286 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2287 else
2288 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2289 break;
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x201: case 0x203: case 0x205: case 0x207:
2296 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2297 case 0x211: case 0x213: case 0x215: case 0x217:
2298 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2299 wrd = (insn >> 5) & 0xf;
2300 rd0 = (insn >> 12) & 0xf;
2301 rd1 = (insn >> 0) & 0xf;
2302 if (rd0 == 0xf || rd1 == 0xf)
2303 return 1;
2304 gen_op_iwmmxt_movq_M0_wRn(wrd);
2305 tmp = load_reg(s, rd0);
2306 tmp2 = load_reg(s, rd1);
2307 switch ((insn >> 16) & 0xf) {
2308 case 0x0: /* TMIA */
2309 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2310 break;
2311 case 0x8: /* TMIAPH */
2312 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2313 break;
2314 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2315 if (insn & (1 << 16))
2316 tcg_gen_shri_i32(tmp, tmp, 16);
2317 if (insn & (1 << 17))
2318 tcg_gen_shri_i32(tmp2, tmp2, 16);
2319 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2320 break;
2321 default:
2322 tcg_temp_free_i32(tmp2);
2323 tcg_temp_free_i32(tmp);
2324 return 1;
2326 tcg_temp_free_i32(tmp2);
2327 tcg_temp_free_i32(tmp);
2328 gen_op_iwmmxt_movq_wRn_M0(wrd);
2329 gen_op_iwmmxt_set_mup();
2330 break;
2331 default:
2332 return 1;
2335 return 0;
2338 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2339 (ie. an undefined instruction). */
2340 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2342 int acc, rd0, rd1, rdhi, rdlo;
2343 TCGv tmp, tmp2;
2345 if ((insn & 0x0ff00f10) == 0x0e200010) {
2346 /* Multiply with Internal Accumulate Format */
2347 rd0 = (insn >> 12) & 0xf;
2348 rd1 = insn & 0xf;
2349 acc = (insn >> 5) & 7;
2351 if (acc != 0)
2352 return 1;
2354 tmp = load_reg(s, rd0);
2355 tmp2 = load_reg(s, rd1);
2356 switch ((insn >> 16) & 0xf) {
2357 case 0x0: /* MIA */
2358 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2359 break;
2360 case 0x8: /* MIAPH */
2361 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2362 break;
2363 case 0xc: /* MIABB */
2364 case 0xd: /* MIABT */
2365 case 0xe: /* MIATB */
2366 case 0xf: /* MIATT */
2367 if (insn & (1 << 16))
2368 tcg_gen_shri_i32(tmp, tmp, 16);
2369 if (insn & (1 << 17))
2370 tcg_gen_shri_i32(tmp2, tmp2, 16);
2371 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2372 break;
2373 default:
2374 return 1;
2376 tcg_temp_free_i32(tmp2);
2377 tcg_temp_free_i32(tmp);
2379 gen_op_iwmmxt_movq_wRn_M0(acc);
2380 return 0;
2383 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2384 /* Internal Accumulator Access Format */
2385 rdhi = (insn >> 16) & 0xf;
2386 rdlo = (insn >> 12) & 0xf;
2387 acc = insn & 7;
2389 if (acc != 0)
2390 return 1;
2392 if (insn & ARM_CP_RW_BIT) { /* MRA */
2393 iwmmxt_load_reg(cpu_V0, acc);
2394 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2395 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2396 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2397 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2398 } else { /* MAR */
2399 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2400 iwmmxt_store_reg(cpu_V0, acc);
2402 return 0;
2405 return 1;
2408 /* Disassemble system coprocessor instruction. Return nonzero if
2409 instruction is not defined. */
2410 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2412 TCGv tmp, tmp2;
2413 uint32_t rd = (insn >> 12) & 0xf;
2414 uint32_t cp = (insn >> 8) & 0xf;
2415 if (IS_USER(s)) {
2416 return 1;
2419 if (insn & ARM_CP_RW_BIT) {
2420 if (!env->cp[cp].cp_read)
2421 return 1;
2422 gen_set_pc_im(s->pc);
2423 tmp = tcg_temp_new_i32();
2424 tmp2 = tcg_const_i32(insn);
2425 gen_helper_get_cp(tmp, cpu_env, tmp2);
2426 tcg_temp_free(tmp2);
2427 store_reg(s, rd, tmp);
2428 } else {
2429 if (!env->cp[cp].cp_write)
2430 return 1;
2431 gen_set_pc_im(s->pc);
2432 tmp = load_reg(s, rd);
2433 tmp2 = tcg_const_i32(insn);
2434 gen_helper_set_cp(cpu_env, tmp2, tmp);
2435 tcg_temp_free(tmp2);
2436 tcg_temp_free_i32(tmp);
2438 return 0;
2441 static int cp15_user_ok(uint32_t insn)
2443 int cpn = (insn >> 16) & 0xf;
2444 int cpm = insn & 0xf;
2445 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2447 if (cpn == 13 && cpm == 0) {
2448 /* TLS register. */
2449 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2450 return 1;
2452 if (cpn == 7) {
2453 /* ISB, DSB, DMB. */
2454 if ((cpm == 5 && op == 4)
2455 || (cpm == 10 && (op == 4 || op == 5)))
2456 return 1;
2458 return 0;
2461 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2463 TCGv tmp;
2464 int cpn = (insn >> 16) & 0xf;
2465 int cpm = insn & 0xf;
2466 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2468 if (!arm_feature(env, ARM_FEATURE_V6K))
2469 return 0;
2471 if (!(cpn == 13 && cpm == 0))
2472 return 0;
2474 if (insn & ARM_CP_RW_BIT) {
2475 switch (op) {
2476 case 2:
2477 tmp = load_cpu_field(cp15.c13_tls1);
2478 break;
2479 case 3:
2480 tmp = load_cpu_field(cp15.c13_tls2);
2481 break;
2482 case 4:
2483 tmp = load_cpu_field(cp15.c13_tls3);
2484 break;
2485 default:
2486 return 0;
2488 store_reg(s, rd, tmp);
2490 } else {
2491 tmp = load_reg(s, rd);
2492 switch (op) {
2493 case 2:
2494 store_cpu_field(tmp, cp15.c13_tls1);
2495 break;
2496 case 3:
2497 store_cpu_field(tmp, cp15.c13_tls2);
2498 break;
2499 case 4:
2500 store_cpu_field(tmp, cp15.c13_tls3);
2501 break;
2502 default:
2503 tcg_temp_free_i32(tmp);
2504 return 0;
2507 return 1;
2510 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2511 instruction is not defined. */
2512 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2514 uint32_t rd;
2515 TCGv tmp, tmp2;
2517 /* M profile cores use memory mapped registers instead of cp15. */
2518 if (arm_feature(env, ARM_FEATURE_M))
2519 return 1;
2521 if ((insn & (1 << 25)) == 0) {
2522 if (insn & (1 << 20)) {
2523 /* mrrc */
2524 return 1;
2526 /* mcrr. Used for block cache operations, so implement as no-op. */
2527 return 0;
2529 if ((insn & (1 << 4)) == 0) {
2530 /* cdp */
2531 return 1;
2533 if (IS_USER(s) && !cp15_user_ok(insn)) {
2534 return 1;
2537 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2538 * instructions rather than a separate instruction.
2540 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2541 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2542 * In v7, this must NOP.
2544 if (!arm_feature(env, ARM_FEATURE_V7)) {
2545 /* Wait for interrupt. */
2546 gen_set_pc_im(s->pc);
2547 s->is_jmp = DISAS_WFI;
2549 return 0;
2552 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2553 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2554 * so this is slightly over-broad.
2556 if (!arm_feature(env, ARM_FEATURE_V6)) {
2557 /* Wait for interrupt. */
2558 gen_set_pc_im(s->pc);
2559 s->is_jmp = DISAS_WFI;
2560 return 0;
2562 /* Otherwise fall through to handle via helper function.
2563 * In particular, on v7 and some v6 cores this is one of
2564 * the VA-PA registers.
2568 rd = (insn >> 12) & 0xf;
2570 if (cp15_tls_load_store(env, s, insn, rd))
2571 return 0;
2573 tmp2 = tcg_const_i32(insn);
2574 if (insn & ARM_CP_RW_BIT) {
2575 tmp = tcg_temp_new_i32();
2576 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2577 /* If the destination register is r15 then sets condition codes. */
2578 if (rd != 15)
2579 store_reg(s, rd, tmp);
2580 else
2581 tcg_temp_free_i32(tmp);
2582 } else {
2583 tmp = load_reg(s, rd);
2584 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2585 tcg_temp_free_i32(tmp);
2586 /* Normally we would always end the TB here, but Linux
2587 * arch/arm/mach-pxa/sleep.S expects two instructions following
2588 * an MMU enable to execute from cache. Imitate this behaviour. */
2589 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2590 (insn & 0x0fff0fff) != 0x0e010f10)
2591 gen_lookup_tb(s);
2593 tcg_temp_free_i32(tmp2);
2594 return 0;
2597 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2598 #define VFP_SREG(insn, bigbit, smallbit) \
2599 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2600 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2601 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2602 reg = (((insn) >> (bigbit)) & 0x0f) \
2603 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2604 } else { \
2605 if (insn & (1 << (smallbit))) \
2606 return 1; \
2607 reg = ((insn) >> (bigbit)) & 0x0f; \
2608 }} while (0)
2610 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2611 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2612 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2613 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2614 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2615 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2617 /* Move between integer and VFP cores. */
2618 static TCGv gen_vfp_mrs(void)
2620 TCGv tmp = tcg_temp_new_i32();
2621 tcg_gen_mov_i32(tmp, cpu_F0s);
2622 return tmp;
2625 static void gen_vfp_msr(TCGv tmp)
2627 tcg_gen_mov_i32(cpu_F0s, tmp);
2628 tcg_temp_free_i32(tmp);
2631 static void gen_neon_dup_u8(TCGv var, int shift)
2633 TCGv tmp = tcg_temp_new_i32();
2634 if (shift)
2635 tcg_gen_shri_i32(var, var, shift);
2636 tcg_gen_ext8u_i32(var, var);
2637 tcg_gen_shli_i32(tmp, var, 8);
2638 tcg_gen_or_i32(var, var, tmp);
2639 tcg_gen_shli_i32(tmp, var, 16);
2640 tcg_gen_or_i32(var, var, tmp);
2641 tcg_temp_free_i32(tmp);
2644 static void gen_neon_dup_low16(TCGv var)
2646 TCGv tmp = tcg_temp_new_i32();
2647 tcg_gen_ext16u_i32(var, var);
2648 tcg_gen_shli_i32(tmp, var, 16);
2649 tcg_gen_or_i32(var, var, tmp);
2650 tcg_temp_free_i32(tmp);
2653 static void gen_neon_dup_high16(TCGv var)
2655 TCGv tmp = tcg_temp_new_i32();
2656 tcg_gen_andi_i32(var, var, 0xffff0000);
2657 tcg_gen_shri_i32(tmp, var, 16);
2658 tcg_gen_or_i32(var, var, tmp);
2659 tcg_temp_free_i32(tmp);
2662 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2664 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2665 TCGv tmp;
2666 switch (size) {
2667 case 0:
2668 tmp = gen_ld8u(addr, IS_USER(s));
2669 gen_neon_dup_u8(tmp, 0);
2670 break;
2671 case 1:
2672 tmp = gen_ld16u(addr, IS_USER(s));
2673 gen_neon_dup_low16(tmp);
2674 break;
2675 case 2:
2676 tmp = gen_ld32(addr, IS_USER(s));
2677 break;
2678 default: /* Avoid compiler warnings. */
2679 abort();
2681 return tmp;
2684 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2685 (ie. an undefined instruction). */
2686 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2688 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2689 int dp, veclen;
2690 TCGv addr;
2691 TCGv tmp;
2692 TCGv tmp2;
2694 if (!arm_feature(env, ARM_FEATURE_VFP))
2695 return 1;
2697 if (!s->vfp_enabled) {
2698 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2699 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2700 return 1;
2701 rn = (insn >> 16) & 0xf;
2702 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2703 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2704 return 1;
2706 dp = ((insn & 0xf00) == 0xb00);
2707 switch ((insn >> 24) & 0xf) {
2708 case 0xe:
2709 if (insn & (1 << 4)) {
2710 /* single register transfer */
2711 rd = (insn >> 12) & 0xf;
2712 if (dp) {
2713 int size;
2714 int pass;
2716 VFP_DREG_N(rn, insn);
2717 if (insn & 0xf)
2718 return 1;
2719 if (insn & 0x00c00060
2720 && !arm_feature(env, ARM_FEATURE_NEON))
2721 return 1;
2723 pass = (insn >> 21) & 1;
2724 if (insn & (1 << 22)) {
2725 size = 0;
2726 offset = ((insn >> 5) & 3) * 8;
2727 } else if (insn & (1 << 5)) {
2728 size = 1;
2729 offset = (insn & (1 << 6)) ? 16 : 0;
2730 } else {
2731 size = 2;
2732 offset = 0;
2734 if (insn & ARM_CP_RW_BIT) {
2735 /* vfp->arm */
2736 tmp = neon_load_reg(rn, pass);
2737 switch (size) {
2738 case 0:
2739 if (offset)
2740 tcg_gen_shri_i32(tmp, tmp, offset);
2741 if (insn & (1 << 23))
2742 gen_uxtb(tmp);
2743 else
2744 gen_sxtb(tmp);
2745 break;
2746 case 1:
2747 if (insn & (1 << 23)) {
2748 if (offset) {
2749 tcg_gen_shri_i32(tmp, tmp, 16);
2750 } else {
2751 gen_uxth(tmp);
2753 } else {
2754 if (offset) {
2755 tcg_gen_sari_i32(tmp, tmp, 16);
2756 } else {
2757 gen_sxth(tmp);
2760 break;
2761 case 2:
2762 break;
2764 store_reg(s, rd, tmp);
2765 } else {
2766 /* arm->vfp */
2767 tmp = load_reg(s, rd);
2768 if (insn & (1 << 23)) {
2769 /* VDUP */
2770 if (size == 0) {
2771 gen_neon_dup_u8(tmp, 0);
2772 } else if (size == 1) {
2773 gen_neon_dup_low16(tmp);
2775 for (n = 0; n <= pass * 2; n++) {
2776 tmp2 = tcg_temp_new_i32();
2777 tcg_gen_mov_i32(tmp2, tmp);
2778 neon_store_reg(rn, n, tmp2);
2780 neon_store_reg(rn, n, tmp);
2781 } else {
2782 /* VMOV */
2783 switch (size) {
2784 case 0:
2785 tmp2 = neon_load_reg(rn, pass);
2786 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2787 tcg_temp_free_i32(tmp2);
2788 break;
2789 case 1:
2790 tmp2 = neon_load_reg(rn, pass);
2791 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2792 tcg_temp_free_i32(tmp2);
2793 break;
2794 case 2:
2795 break;
2797 neon_store_reg(rn, pass, tmp);
2800 } else { /* !dp */
2801 if ((insn & 0x6f) != 0x00)
2802 return 1;
2803 rn = VFP_SREG_N(insn);
2804 if (insn & ARM_CP_RW_BIT) {
2805 /* vfp->arm */
2806 if (insn & (1 << 21)) {
2807 /* system register */
2808 rn >>= 1;
2810 switch (rn) {
2811 case ARM_VFP_FPSID:
2812 /* VFP2 allows access to FSID from userspace.
2813 VFP3 restricts all id registers to privileged
2814 accesses. */
2815 if (IS_USER(s)
2816 && arm_feature(env, ARM_FEATURE_VFP3))
2817 return 1;
2818 tmp = load_cpu_field(vfp.xregs[rn]);
2819 break;
2820 case ARM_VFP_FPEXC:
2821 if (IS_USER(s))
2822 return 1;
2823 tmp = load_cpu_field(vfp.xregs[rn]);
2824 break;
2825 case ARM_VFP_FPINST:
2826 case ARM_VFP_FPINST2:
2827 /* Not present in VFP3. */
2828 if (IS_USER(s)
2829 || arm_feature(env, ARM_FEATURE_VFP3))
2830 return 1;
2831 tmp = load_cpu_field(vfp.xregs[rn]);
2832 break;
2833 case ARM_VFP_FPSCR:
2834 if (rd == 15) {
2835 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2836 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2837 } else {
2838 tmp = tcg_temp_new_i32();
2839 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2841 break;
2842 case ARM_VFP_MVFR0:
2843 case ARM_VFP_MVFR1:
2844 if (IS_USER(s)
2845 || !arm_feature(env, ARM_FEATURE_VFP3))
2846 return 1;
2847 tmp = load_cpu_field(vfp.xregs[rn]);
2848 break;
2849 default:
2850 return 1;
2852 } else {
2853 gen_mov_F0_vreg(0, rn);
2854 tmp = gen_vfp_mrs();
2856 if (rd == 15) {
2857 /* Set the 4 flag bits in the CPSR. */
2858 gen_set_nzcv(tmp);
2859 tcg_temp_free_i32(tmp);
2860 } else {
2861 store_reg(s, rd, tmp);
2863 } else {
2864 /* arm->vfp */
2865 tmp = load_reg(s, rd);
2866 if (insn & (1 << 21)) {
2867 rn >>= 1;
2868 /* system register */
2869 switch (rn) {
2870 case ARM_VFP_FPSID:
2871 case ARM_VFP_MVFR0:
2872 case ARM_VFP_MVFR1:
2873 /* Writes are ignored. */
2874 break;
2875 case ARM_VFP_FPSCR:
2876 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2877 tcg_temp_free_i32(tmp);
2878 gen_lookup_tb(s);
2879 break;
2880 case ARM_VFP_FPEXC:
2881 if (IS_USER(s))
2882 return 1;
2883 /* TODO: VFP subarchitecture support.
2884 * For now, keep the EN bit only */
2885 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2886 store_cpu_field(tmp, vfp.xregs[rn]);
2887 gen_lookup_tb(s);
2888 break;
2889 case ARM_VFP_FPINST:
2890 case ARM_VFP_FPINST2:
2891 store_cpu_field(tmp, vfp.xregs[rn]);
2892 break;
2893 default:
2894 return 1;
2896 } else {
2897 gen_vfp_msr(tmp);
2898 gen_mov_vreg_F0(0, rn);
2902 } else {
2903 /* data processing */
2904 /* The opcode is in bits 23, 21, 20 and 6. */
2905 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2906 if (dp) {
2907 if (op == 15) {
2908 /* rn is opcode */
2909 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2910 } else {
2911 /* rn is register number */
2912 VFP_DREG_N(rn, insn);
2915 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2916 /* Integer or single precision destination. */
2917 rd = VFP_SREG_D(insn);
2918 } else {
2919 VFP_DREG_D(rd, insn);
2921 if (op == 15 &&
2922 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2923 /* VCVT from int is always from S reg regardless of dp bit.
2924 * VCVT with immediate frac_bits has same format as SREG_M
2926 rm = VFP_SREG_M(insn);
2927 } else {
2928 VFP_DREG_M(rm, insn);
2930 } else {
2931 rn = VFP_SREG_N(insn);
2932 if (op == 15 && rn == 15) {
2933 /* Double precision destination. */
2934 VFP_DREG_D(rd, insn);
2935 } else {
2936 rd = VFP_SREG_D(insn);
2938 /* NB that we implicitly rely on the encoding for the frac_bits
2939 * in VCVT of fixed to float being the same as that of an SREG_M
2941 rm = VFP_SREG_M(insn);
2944 veclen = s->vec_len;
2945 if (op == 15 && rn > 3)
2946 veclen = 0;
2948 /* Shut up compiler warnings. */
2949 delta_m = 0;
2950 delta_d = 0;
2951 bank_mask = 0;
2953 if (veclen > 0) {
2954 if (dp)
2955 bank_mask = 0xc;
2956 else
2957 bank_mask = 0x18;
2959 /* Figure out what type of vector operation this is. */
2960 if ((rd & bank_mask) == 0) {
2961 /* scalar */
2962 veclen = 0;
2963 } else {
2964 if (dp)
2965 delta_d = (s->vec_stride >> 1) + 1;
2966 else
2967 delta_d = s->vec_stride + 1;
2969 if ((rm & bank_mask) == 0) {
2970 /* mixed scalar/vector */
2971 delta_m = 0;
2972 } else {
2973 /* vector */
2974 delta_m = delta_d;
2979 /* Load the initial operands. */
2980 if (op == 15) {
2981 switch (rn) {
2982 case 16:
2983 case 17:
2984 /* Integer source */
2985 gen_mov_F0_vreg(0, rm);
2986 break;
2987 case 8:
2988 case 9:
2989 /* Compare */
2990 gen_mov_F0_vreg(dp, rd);
2991 gen_mov_F1_vreg(dp, rm);
2992 break;
2993 case 10:
2994 case 11:
2995 /* Compare with zero */
2996 gen_mov_F0_vreg(dp, rd);
2997 gen_vfp_F1_ld0(dp);
2998 break;
2999 case 20:
3000 case 21:
3001 case 22:
3002 case 23:
3003 case 28:
3004 case 29:
3005 case 30:
3006 case 31:
3007 /* Source and destination the same. */
3008 gen_mov_F0_vreg(dp, rd);
3009 break;
3010 default:
3011 /* One source operand. */
3012 gen_mov_F0_vreg(dp, rm);
3013 break;
3015 } else {
3016 /* Two source operands. */
3017 gen_mov_F0_vreg(dp, rn);
3018 gen_mov_F1_vreg(dp, rm);
3021 for (;;) {
3022 /* Perform the calculation. */
3023 switch (op) {
3024 case 0: /* mac: fd + (fn * fm) */
3025 gen_vfp_mul(dp);
3026 gen_mov_F1_vreg(dp, rd);
3027 gen_vfp_add(dp);
3028 break;
3029 case 1: /* nmac: fd - (fn * fm) */
3030 gen_vfp_mul(dp);
3031 gen_vfp_neg(dp);
3032 gen_mov_F1_vreg(dp, rd);
3033 gen_vfp_add(dp);
3034 break;
3035 case 2: /* msc: -fd + (fn * fm) */
3036 gen_vfp_mul(dp);
3037 gen_mov_F1_vreg(dp, rd);
3038 gen_vfp_sub(dp);
3039 break;
3040 case 3: /* nmsc: -fd - (fn * fm) */
3041 gen_vfp_mul(dp);
3042 gen_vfp_neg(dp);
3043 gen_mov_F1_vreg(dp, rd);
3044 gen_vfp_sub(dp);
3045 break;
3046 case 4: /* mul: fn * fm */
3047 gen_vfp_mul(dp);
3048 break;
3049 case 5: /* nmul: -(fn * fm) */
3050 gen_vfp_mul(dp);
3051 gen_vfp_neg(dp);
3052 break;
3053 case 6: /* add: fn + fm */
3054 gen_vfp_add(dp);
3055 break;
3056 case 7: /* sub: fn - fm */
3057 gen_vfp_sub(dp);
3058 break;
3059 case 8: /* div: fn / fm */
3060 gen_vfp_div(dp);
3061 break;
3062 case 14: /* fconst */
3063 if (!arm_feature(env, ARM_FEATURE_VFP3))
3064 return 1;
3066 n = (insn << 12) & 0x80000000;
3067 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3068 if (dp) {
3069 if (i & 0x40)
3070 i |= 0x3f80;
3071 else
3072 i |= 0x4000;
3073 n |= i << 16;
3074 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3075 } else {
3076 if (i & 0x40)
3077 i |= 0x780;
3078 else
3079 i |= 0x800;
3080 n |= i << 19;
3081 tcg_gen_movi_i32(cpu_F0s, n);
3083 break;
3084 case 15: /* extension space */
3085 switch (rn) {
3086 case 0: /* cpy */
3087 /* no-op */
3088 break;
3089 case 1: /* abs */
3090 gen_vfp_abs(dp);
3091 break;
3092 case 2: /* neg */
3093 gen_vfp_neg(dp);
3094 break;
3095 case 3: /* sqrt */
3096 gen_vfp_sqrt(dp);
3097 break;
3098 case 4: /* vcvtb.f32.f16 */
3099 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3100 return 1;
3101 tmp = gen_vfp_mrs();
3102 tcg_gen_ext16u_i32(tmp, tmp);
3103 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3104 tcg_temp_free_i32(tmp);
3105 break;
3106 case 5: /* vcvtt.f32.f16 */
3107 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3108 return 1;
3109 tmp = gen_vfp_mrs();
3110 tcg_gen_shri_i32(tmp, tmp, 16);
3111 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3112 tcg_temp_free_i32(tmp);
3113 break;
3114 case 6: /* vcvtb.f16.f32 */
3115 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3116 return 1;
3117 tmp = tcg_temp_new_i32();
3118 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3119 gen_mov_F0_vreg(0, rd);
3120 tmp2 = gen_vfp_mrs();
3121 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3122 tcg_gen_or_i32(tmp, tmp, tmp2);
3123 tcg_temp_free_i32(tmp2);
3124 gen_vfp_msr(tmp);
3125 break;
3126 case 7: /* vcvtt.f16.f32 */
3127 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3128 return 1;
3129 tmp = tcg_temp_new_i32();
3130 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3131 tcg_gen_shli_i32(tmp, tmp, 16);
3132 gen_mov_F0_vreg(0, rd);
3133 tmp2 = gen_vfp_mrs();
3134 tcg_gen_ext16u_i32(tmp2, tmp2);
3135 tcg_gen_or_i32(tmp, tmp, tmp2);
3136 tcg_temp_free_i32(tmp2);
3137 gen_vfp_msr(tmp);
3138 break;
3139 case 8: /* cmp */
3140 gen_vfp_cmp(dp);
3141 break;
3142 case 9: /* cmpe */
3143 gen_vfp_cmpe(dp);
3144 break;
3145 case 10: /* cmpz */
3146 gen_vfp_cmp(dp);
3147 break;
3148 case 11: /* cmpez */
3149 gen_vfp_F1_ld0(dp);
3150 gen_vfp_cmpe(dp);
3151 break;
3152 case 15: /* single<->double conversion */
3153 if (dp)
3154 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3155 else
3156 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3157 break;
3158 case 16: /* fuito */
3159 gen_vfp_uito(dp);
3160 break;
3161 case 17: /* fsito */
3162 gen_vfp_sito(dp);
3163 break;
3164 case 20: /* fshto */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
3167 gen_vfp_shto(dp, 16 - rm);
3168 break;
3169 case 21: /* fslto */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
3172 gen_vfp_slto(dp, 32 - rm);
3173 break;
3174 case 22: /* fuhto */
3175 if (!arm_feature(env, ARM_FEATURE_VFP3))
3176 return 1;
3177 gen_vfp_uhto(dp, 16 - rm);
3178 break;
3179 case 23: /* fulto */
3180 if (!arm_feature(env, ARM_FEATURE_VFP3))
3181 return 1;
3182 gen_vfp_ulto(dp, 32 - rm);
3183 break;
3184 case 24: /* ftoui */
3185 gen_vfp_toui(dp);
3186 break;
3187 case 25: /* ftouiz */
3188 gen_vfp_touiz(dp);
3189 break;
3190 case 26: /* ftosi */
3191 gen_vfp_tosi(dp);
3192 break;
3193 case 27: /* ftosiz */
3194 gen_vfp_tosiz(dp);
3195 break;
3196 case 28: /* ftosh */
3197 if (!arm_feature(env, ARM_FEATURE_VFP3))
3198 return 1;
3199 gen_vfp_tosh(dp, 16 - rm);
3200 break;
3201 case 29: /* ftosl */
3202 if (!arm_feature(env, ARM_FEATURE_VFP3))
3203 return 1;
3204 gen_vfp_tosl(dp, 32 - rm);
3205 break;
3206 case 30: /* ftouh */
3207 if (!arm_feature(env, ARM_FEATURE_VFP3))
3208 return 1;
3209 gen_vfp_touh(dp, 16 - rm);
3210 break;
3211 case 31: /* ftoul */
3212 if (!arm_feature(env, ARM_FEATURE_VFP3))
3213 return 1;
3214 gen_vfp_toul(dp, 32 - rm);
3215 break;
3216 default: /* undefined */
3217 printf ("rn:%d\n", rn);
3218 return 1;
3220 break;
3221 default: /* undefined */
3222 printf ("op:%d\n", op);
3223 return 1;
3226 /* Write back the result. */
3227 if (op == 15 && (rn >= 8 && rn <= 11))
3228 ; /* Comparison, do nothing. */
3229 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3230 /* VCVT double to int: always integer result. */
3231 gen_mov_vreg_F0(0, rd);
3232 else if (op == 15 && rn == 15)
3233 /* conversion */
3234 gen_mov_vreg_F0(!dp, rd);
3235 else
3236 gen_mov_vreg_F0(dp, rd);
3238 /* break out of the loop if we have finished */
3239 if (veclen == 0)
3240 break;
3242 if (op == 15 && delta_m == 0) {
3243 /* single source one-many */
3244 while (veclen--) {
3245 rd = ((rd + delta_d) & (bank_mask - 1))
3246 | (rd & bank_mask);
3247 gen_mov_vreg_F0(dp, rd);
3249 break;
3251 /* Setup the next operands. */
3252 veclen--;
3253 rd = ((rd + delta_d) & (bank_mask - 1))
3254 | (rd & bank_mask);
3256 if (op == 15) {
3257 /* One source operand. */
3258 rm = ((rm + delta_m) & (bank_mask - 1))
3259 | (rm & bank_mask);
3260 gen_mov_F0_vreg(dp, rm);
3261 } else {
3262 /* Two source operands. */
3263 rn = ((rn + delta_d) & (bank_mask - 1))
3264 | (rn & bank_mask);
3265 gen_mov_F0_vreg(dp, rn);
3266 if (delta_m) {
3267 rm = ((rm + delta_m) & (bank_mask - 1))
3268 | (rm & bank_mask);
3269 gen_mov_F1_vreg(dp, rm);
3274 break;
3275 case 0xc:
3276 case 0xd:
3277 if ((insn & 0x03e00000) == 0x00400000) {
3278 /* two-register transfer */
3279 rn = (insn >> 16) & 0xf;
3280 rd = (insn >> 12) & 0xf;
3281 if (dp) {
3282 VFP_DREG_M(rm, insn);
3283 } else {
3284 rm = VFP_SREG_M(insn);
3287 if (insn & ARM_CP_RW_BIT) {
3288 /* vfp->arm */
3289 if (dp) {
3290 gen_mov_F0_vreg(0, rm * 2);
3291 tmp = gen_vfp_mrs();
3292 store_reg(s, rd, tmp);
3293 gen_mov_F0_vreg(0, rm * 2 + 1);
3294 tmp = gen_vfp_mrs();
3295 store_reg(s, rn, tmp);
3296 } else {
3297 gen_mov_F0_vreg(0, rm);
3298 tmp = gen_vfp_mrs();
3299 store_reg(s, rd, tmp);
3300 gen_mov_F0_vreg(0, rm + 1);
3301 tmp = gen_vfp_mrs();
3302 store_reg(s, rn, tmp);
3304 } else {
3305 /* arm->vfp */
3306 if (dp) {
3307 tmp = load_reg(s, rd);
3308 gen_vfp_msr(tmp);
3309 gen_mov_vreg_F0(0, rm * 2);
3310 tmp = load_reg(s, rn);
3311 gen_vfp_msr(tmp);
3312 gen_mov_vreg_F0(0, rm * 2 + 1);
3313 } else {
3314 tmp = load_reg(s, rd);
3315 gen_vfp_msr(tmp);
3316 gen_mov_vreg_F0(0, rm);
3317 tmp = load_reg(s, rn);
3318 gen_vfp_msr(tmp);
3319 gen_mov_vreg_F0(0, rm + 1);
3322 } else {
3323 /* Load/store */
3324 rn = (insn >> 16) & 0xf;
3325 if (dp)
3326 VFP_DREG_D(rd, insn);
3327 else
3328 rd = VFP_SREG_D(insn);
3329 if (s->thumb && rn == 15) {
3330 addr = tcg_temp_new_i32();
3331 tcg_gen_movi_i32(addr, s->pc & ~2);
3332 } else {
3333 addr = load_reg(s, rn);
3335 if ((insn & 0x01200000) == 0x01000000) {
3336 /* Single load/store */
3337 offset = (insn & 0xff) << 2;
3338 if ((insn & (1 << 23)) == 0)
3339 offset = -offset;
3340 tcg_gen_addi_i32(addr, addr, offset);
3341 if (insn & (1 << 20)) {
3342 gen_vfp_ld(s, dp, addr);
3343 gen_mov_vreg_F0(dp, rd);
3344 } else {
3345 gen_mov_F0_vreg(dp, rd);
3346 gen_vfp_st(s, dp, addr);
3348 tcg_temp_free_i32(addr);
3349 } else {
3350 /* load/store multiple */
3351 if (dp)
3352 n = (insn >> 1) & 0x7f;
3353 else
3354 n = insn & 0xff;
3356 if (insn & (1 << 24)) /* pre-decrement */
3357 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3359 if (dp)
3360 offset = 8;
3361 else
3362 offset = 4;
3363 for (i = 0; i < n; i++) {
3364 if (insn & ARM_CP_RW_BIT) {
3365 /* load */
3366 gen_vfp_ld(s, dp, addr);
3367 gen_mov_vreg_F0(dp, rd + i);
3368 } else {
3369 /* store */
3370 gen_mov_F0_vreg(dp, rd + i);
3371 gen_vfp_st(s, dp, addr);
3373 tcg_gen_addi_i32(addr, addr, offset);
3375 if (insn & (1 << 21)) {
3376 /* writeback */
3377 if (insn & (1 << 24))
3378 offset = -offset * n;
3379 else if (dp && (insn & 1))
3380 offset = 4;
3381 else
3382 offset = 0;
3384 if (offset != 0)
3385 tcg_gen_addi_i32(addr, addr, offset);
3386 store_reg(s, rn, addr);
3387 } else {
3388 tcg_temp_free_i32(addr);
3392 break;
3393 default:
3394 /* Should never happen. */
3395 return 1;
3397 return 0;
3400 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3402 TranslationBlock *tb;
3404 tb = s->tb;
3405 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3406 tcg_gen_goto_tb(n);
3407 gen_set_pc_im(dest);
3408 tcg_gen_exit_tb((tcg_target_long)tb + n);
3409 } else {
3410 gen_set_pc_im(dest);
3411 tcg_gen_exit_tb(0);
3415 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3417 if (unlikely(s->singlestep_enabled)) {
3418 /* An indirect jump so that we still trigger the debug exception. */
3419 if (s->thumb)
3420 dest |= 1;
3421 gen_bx_im(s, dest);
3422 } else {
3423 gen_goto_tb(s, 0, dest);
3424 s->is_jmp = DISAS_TB_JUMP;
3428 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3430 if (x)
3431 tcg_gen_sari_i32(t0, t0, 16);
3432 else
3433 gen_sxth(t0);
3434 if (y)
3435 tcg_gen_sari_i32(t1, t1, 16);
3436 else
3437 gen_sxth(t1);
3438 tcg_gen_mul_i32(t0, t0, t1);
3441 /* Return the mask of PSR bits set by a MSR instruction. */
3442 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3443 uint32_t mask;
3445 mask = 0;
3446 if (flags & (1 << 0))
3447 mask |= 0xff;
3448 if (flags & (1 << 1))
3449 mask |= 0xff00;
3450 if (flags & (1 << 2))
3451 mask |= 0xff0000;
3452 if (flags & (1 << 3))
3453 mask |= 0xff000000;
3455 /* Mask out undefined bits. */
3456 mask &= ~CPSR_RESERVED;
3457 if (!arm_feature(env, ARM_FEATURE_V4T))
3458 mask &= ~CPSR_T;
3459 if (!arm_feature(env, ARM_FEATURE_V5))
3460 mask &= ~CPSR_Q; /* V5TE in reality*/
3461 if (!arm_feature(env, ARM_FEATURE_V6))
3462 mask &= ~(CPSR_E | CPSR_GE);
3463 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3464 mask &= ~CPSR_IT;
3465 /* Mask out execution state bits. */
3466 if (!spsr)
3467 mask &= ~CPSR_EXEC;
3468 /* Mask out privileged bits. */
3469 if (IS_USER(s))
3470 mask &= CPSR_USER;
3471 return mask;
3474 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3475 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3477 TCGv tmp;
3478 if (spsr) {
3479 /* ??? This is also undefined in system mode. */
3480 if (IS_USER(s))
3481 return 1;
3483 tmp = load_cpu_field(spsr);
3484 tcg_gen_andi_i32(tmp, tmp, ~mask);
3485 tcg_gen_andi_i32(t0, t0, mask);
3486 tcg_gen_or_i32(tmp, tmp, t0);
3487 store_cpu_field(tmp, spsr);
3488 } else {
3489 gen_set_cpsr(t0, mask);
3491 tcg_temp_free_i32(t0);
3492 gen_lookup_tb(s);
3493 return 0;
3496 /* Returns nonzero if access to the PSR is not permitted. */
3497 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3499 TCGv tmp;
3500 tmp = tcg_temp_new_i32();
3501 tcg_gen_movi_i32(tmp, val);
3502 return gen_set_psr(s, mask, spsr, tmp);
3505 /* Generate an old-style exception return. Marks pc as dead. */
3506 static void gen_exception_return(DisasContext *s, TCGv pc)
3508 TCGv tmp;
3509 store_reg(s, 15, pc);
3510 tmp = load_cpu_field(spsr);
3511 gen_set_cpsr(tmp, 0xffffffff);
3512 tcg_temp_free_i32(tmp);
3513 s->is_jmp = DISAS_UPDATE;
3516 /* Generate a v6 exception return. Marks both values as dead. */
3517 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3519 gen_set_cpsr(cpsr, 0xffffffff);
3520 tcg_temp_free_i32(cpsr);
3521 store_reg(s, 15, pc);
3522 s->is_jmp = DISAS_UPDATE;
3525 static inline void
3526 gen_set_condexec (DisasContext *s)
3528 if (s->condexec_mask) {
3529 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3530 TCGv tmp = tcg_temp_new_i32();
3531 tcg_gen_movi_i32(tmp, val);
3532 store_cpu_field(tmp, condexec_bits);
3536 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3538 gen_set_condexec(s);
3539 gen_set_pc_im(s->pc - offset);
3540 gen_exception(excp);
3541 s->is_jmp = DISAS_JUMP;
3544 static void gen_nop_hint(DisasContext *s, int val)
3546 switch (val) {
3547 case 3: /* wfi */
3548 gen_set_pc_im(s->pc);
3549 s->is_jmp = DISAS_WFI;
3550 break;
3551 case 2: /* wfe */
3552 case 4: /* sev */
3553 /* TODO: Implement SEV and WFE. May help SMP performance. */
3554 default: /* nop */
3555 break;
3559 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3561 static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
3563 switch (size) {
3564 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3565 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3566 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3567 default: return 1;
3569 return 0;
3572 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3574 switch (size) {
3575 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3576 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3577 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3578 default: return;
3582 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3583 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3584 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3585 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3586 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3588 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3589 switch ((size << 1) | u) { \
3590 case 0: \
3591 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3592 break; \
3593 case 1: \
3594 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3595 break; \
3596 case 2: \
3597 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3598 break; \
3599 case 3: \
3600 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3601 break; \
3602 case 4: \
3603 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3604 break; \
3605 case 5: \
3606 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3607 break; \
3608 default: return 1; \
3609 }} while (0)
3611 #define GEN_NEON_INTEGER_OP(name) do { \
3612 switch ((size << 1) | u) { \
3613 case 0: \
3614 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3615 break; \
3616 case 1: \
3617 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3618 break; \
3619 case 2: \
3620 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3621 break; \
3622 case 3: \
3623 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3624 break; \
3625 case 4: \
3626 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3627 break; \
3628 case 5: \
3629 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3630 break; \
3631 default: return 1; \
3632 }} while (0)
3634 static TCGv neon_load_scratch(int scratch)
3636 TCGv tmp = tcg_temp_new_i32();
3637 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3638 return tmp;
3641 static void neon_store_scratch(int scratch, TCGv var)
3643 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3644 tcg_temp_free_i32(var);
3647 static inline TCGv neon_get_scalar(int size, int reg)
3649 TCGv tmp;
3650 if (size == 1) {
3651 tmp = neon_load_reg(reg & 7, reg >> 4);
3652 if (reg & 8) {
3653 gen_neon_dup_high16(tmp);
3654 } else {
3655 gen_neon_dup_low16(tmp);
3657 } else {
3658 tmp = neon_load_reg(reg & 15, reg >> 4);
3660 return tmp;
3663 static int gen_neon_unzip(int rd, int rm, int size, int q)
3665 TCGv tmp, tmp2;
3666 if (size == 3 || (!q && size == 2)) {
3667 return 1;
3669 tmp = tcg_const_i32(rd);
3670 tmp2 = tcg_const_i32(rm);
3671 if (q) {
3672 switch (size) {
3673 case 0:
3674 gen_helper_neon_qunzip8(tmp, tmp2);
3675 break;
3676 case 1:
3677 gen_helper_neon_qunzip16(tmp, tmp2);
3678 break;
3679 case 2:
3680 gen_helper_neon_qunzip32(tmp, tmp2);
3681 break;
3682 default:
3683 abort();
3685 } else {
3686 switch (size) {
3687 case 0:
3688 gen_helper_neon_unzip8(tmp, tmp2);
3689 break;
3690 case 1:
3691 gen_helper_neon_unzip16(tmp, tmp2);
3692 break;
3693 default:
3694 abort();
3697 tcg_temp_free_i32(tmp);
3698 tcg_temp_free_i32(tmp2);
3699 return 0;
3702 static int gen_neon_zip(int rd, int rm, int size, int q)
3704 TCGv tmp, tmp2;
3705 if (size == 3 || (!q && size == 2)) {
3706 return 1;
3708 tmp = tcg_const_i32(rd);
3709 tmp2 = tcg_const_i32(rm);
3710 if (q) {
3711 switch (size) {
3712 case 0:
3713 gen_helper_neon_qzip8(tmp, tmp2);
3714 break;
3715 case 1:
3716 gen_helper_neon_qzip16(tmp, tmp2);
3717 break;
3718 case 2:
3719 gen_helper_neon_qzip32(tmp, tmp2);
3720 break;
3721 default:
3722 abort();
3724 } else {
3725 switch (size) {
3726 case 0:
3727 gen_helper_neon_zip8(tmp, tmp2);
3728 break;
3729 case 1:
3730 gen_helper_neon_zip16(tmp, tmp2);
3731 break;
3732 default:
3733 abort();
3736 tcg_temp_free_i32(tmp);
3737 tcg_temp_free_i32(tmp2);
3738 return 0;
3741 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3743 TCGv rd, tmp;
3745 rd = tcg_temp_new_i32();
3746 tmp = tcg_temp_new_i32();
3748 tcg_gen_shli_i32(rd, t0, 8);
3749 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3750 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3751 tcg_gen_or_i32(rd, rd, tmp);
3753 tcg_gen_shri_i32(t1, t1, 8);
3754 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3755 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3756 tcg_gen_or_i32(t1, t1, tmp);
3757 tcg_gen_mov_i32(t0, rd);
3759 tcg_temp_free_i32(tmp);
3760 tcg_temp_free_i32(rd);
3763 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3765 TCGv rd, tmp;
3767 rd = tcg_temp_new_i32();
3768 tmp = tcg_temp_new_i32();
3770 tcg_gen_shli_i32(rd, t0, 16);
3771 tcg_gen_andi_i32(tmp, t1, 0xffff);
3772 tcg_gen_or_i32(rd, rd, tmp);
3773 tcg_gen_shri_i32(t1, t1, 16);
3774 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3775 tcg_gen_or_i32(t1, t1, tmp);
3776 tcg_gen_mov_i32(t0, rd);
3778 tcg_temp_free_i32(tmp);
3779 tcg_temp_free_i32(rd);
3783 static struct {
3784 int nregs;
3785 int interleave;
3786 int spacing;
3787 } neon_ls_element_type[11] = {
3788 {4, 4, 1},
3789 {4, 4, 2},
3790 {4, 1, 1},
3791 {4, 2, 1},
3792 {3, 3, 1},
3793 {3, 3, 2},
3794 {3, 1, 1},
3795 {1, 1, 1},
3796 {2, 2, 1},
3797 {2, 2, 2},
3798 {2, 1, 1}
3801 /* Translate a NEON load/store element instruction. Return nonzero if the
3802 instruction is invalid. */
3803 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3805 int rd, rn, rm;
3806 int op;
3807 int nregs;
3808 int interleave;
3809 int spacing;
3810 int stride;
3811 int size;
3812 int reg;
3813 int pass;
3814 int load;
3815 int shift;
3816 int n;
3817 TCGv addr;
3818 TCGv tmp;
3819 TCGv tmp2;
3820 TCGv_i64 tmp64;
3822 if (!s->vfp_enabled)
3823 return 1;
3824 VFP_DREG_D(rd, insn);
3825 rn = (insn >> 16) & 0xf;
3826 rm = insn & 0xf;
3827 load = (insn & (1 << 21)) != 0;
3828 if ((insn & (1 << 23)) == 0) {
3829 /* Load store all elements. */
3830 op = (insn >> 8) & 0xf;
3831 size = (insn >> 6) & 3;
3832 if (op > 10)
3833 return 1;
3834 nregs = neon_ls_element_type[op].nregs;
3835 interleave = neon_ls_element_type[op].interleave;
3836 spacing = neon_ls_element_type[op].spacing;
3837 if (size == 3 && (interleave | spacing) != 1)
3838 return 1;
3839 addr = tcg_temp_new_i32();
3840 load_reg_var(s, addr, rn);
3841 stride = (1 << size) * interleave;
3842 for (reg = 0; reg < nregs; reg++) {
3843 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3844 load_reg_var(s, addr, rn);
3845 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3846 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3847 load_reg_var(s, addr, rn);
3848 tcg_gen_addi_i32(addr, addr, 1 << size);
3850 if (size == 3) {
3851 if (load) {
3852 tmp64 = gen_ld64(addr, IS_USER(s));
3853 neon_store_reg64(tmp64, rd);
3854 tcg_temp_free_i64(tmp64);
3855 } else {
3856 tmp64 = tcg_temp_new_i64();
3857 neon_load_reg64(tmp64, rd);
3858 gen_st64(tmp64, addr, IS_USER(s));
3860 tcg_gen_addi_i32(addr, addr, stride);
3861 } else {
3862 for (pass = 0; pass < 2; pass++) {
3863 if (size == 2) {
3864 if (load) {
3865 tmp = gen_ld32(addr, IS_USER(s));
3866 neon_store_reg(rd, pass, tmp);
3867 } else {
3868 tmp = neon_load_reg(rd, pass);
3869 gen_st32(tmp, addr, IS_USER(s));
3871 tcg_gen_addi_i32(addr, addr, stride);
3872 } else if (size == 1) {
3873 if (load) {
3874 tmp = gen_ld16u(addr, IS_USER(s));
3875 tcg_gen_addi_i32(addr, addr, stride);
3876 tmp2 = gen_ld16u(addr, IS_USER(s));
3877 tcg_gen_addi_i32(addr, addr, stride);
3878 tcg_gen_shli_i32(tmp2, tmp2, 16);
3879 tcg_gen_or_i32(tmp, tmp, tmp2);
3880 tcg_temp_free_i32(tmp2);
3881 neon_store_reg(rd, pass, tmp);
3882 } else {
3883 tmp = neon_load_reg(rd, pass);
3884 tmp2 = tcg_temp_new_i32();
3885 tcg_gen_shri_i32(tmp2, tmp, 16);
3886 gen_st16(tmp, addr, IS_USER(s));
3887 tcg_gen_addi_i32(addr, addr, stride);
3888 gen_st16(tmp2, addr, IS_USER(s));
3889 tcg_gen_addi_i32(addr, addr, stride);
3891 } else /* size == 0 */ {
3892 if (load) {
3893 TCGV_UNUSED(tmp2);
3894 for (n = 0; n < 4; n++) {
3895 tmp = gen_ld8u(addr, IS_USER(s));
3896 tcg_gen_addi_i32(addr, addr, stride);
3897 if (n == 0) {
3898 tmp2 = tmp;
3899 } else {
3900 tcg_gen_shli_i32(tmp, tmp, n * 8);
3901 tcg_gen_or_i32(tmp2, tmp2, tmp);
3902 tcg_temp_free_i32(tmp);
3905 neon_store_reg(rd, pass, tmp2);
3906 } else {
3907 tmp2 = neon_load_reg(rd, pass);
3908 for (n = 0; n < 4; n++) {
3909 tmp = tcg_temp_new_i32();
3910 if (n == 0) {
3911 tcg_gen_mov_i32(tmp, tmp2);
3912 } else {
3913 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3915 gen_st8(tmp, addr, IS_USER(s));
3916 tcg_gen_addi_i32(addr, addr, stride);
3918 tcg_temp_free_i32(tmp2);
3923 rd += spacing;
3925 tcg_temp_free_i32(addr);
3926 stride = nregs * 8;
3927 } else {
3928 size = (insn >> 10) & 3;
3929 if (size == 3) {
3930 /* Load single element to all lanes. */
3931 int a = (insn >> 4) & 1;
3932 if (!load) {
3933 return 1;
3935 size = (insn >> 6) & 3;
3936 nregs = ((insn >> 8) & 3) + 1;
3938 if (size == 3) {
3939 if (nregs != 4 || a == 0) {
3940 return 1;
3942 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3943 size = 2;
3945 if (nregs == 1 && a == 1 && size == 0) {
3946 return 1;
3948 if (nregs == 3 && a == 1) {
3949 return 1;
3951 addr = tcg_temp_new_i32();
3952 load_reg_var(s, addr, rn);
3953 if (nregs == 1) {
3954 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3955 tmp = gen_load_and_replicate(s, addr, size);
3956 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3957 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3958 if (insn & (1 << 5)) {
3959 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3960 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3962 tcg_temp_free_i32(tmp);
3963 } else {
3964 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3965 stride = (insn & (1 << 5)) ? 2 : 1;
3966 for (reg = 0; reg < nregs; reg++) {
3967 tmp = gen_load_and_replicate(s, addr, size);
3968 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3969 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3970 tcg_temp_free_i32(tmp);
3971 tcg_gen_addi_i32(addr, addr, 1 << size);
3972 rd += stride;
3975 tcg_temp_free_i32(addr);
3976 stride = (1 << size) * nregs;
3977 } else {
3978 /* Single element. */
3979 pass = (insn >> 7) & 1;
3980 switch (size) {
3981 case 0:
3982 shift = ((insn >> 5) & 3) * 8;
3983 stride = 1;
3984 break;
3985 case 1:
3986 shift = ((insn >> 6) & 1) * 16;
3987 stride = (insn & (1 << 5)) ? 2 : 1;
3988 break;
3989 case 2:
3990 shift = 0;
3991 stride = (insn & (1 << 6)) ? 2 : 1;
3992 break;
3993 default:
3994 abort();
3996 nregs = ((insn >> 8) & 3) + 1;
3997 addr = tcg_temp_new_i32();
3998 load_reg_var(s, addr, rn);
3999 for (reg = 0; reg < nregs; reg++) {
4000 if (load) {
4001 switch (size) {
4002 case 0:
4003 tmp = gen_ld8u(addr, IS_USER(s));
4004 break;
4005 case 1:
4006 tmp = gen_ld16u(addr, IS_USER(s));
4007 break;
4008 case 2:
4009 tmp = gen_ld32(addr, IS_USER(s));
4010 break;
4011 default: /* Avoid compiler warnings. */
4012 abort();
4014 if (size != 2) {
4015 tmp2 = neon_load_reg(rd, pass);
4016 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4017 tcg_temp_free_i32(tmp2);
4019 neon_store_reg(rd, pass, tmp);
4020 } else { /* Store */
4021 tmp = neon_load_reg(rd, pass);
4022 if (shift)
4023 tcg_gen_shri_i32(tmp, tmp, shift);
4024 switch (size) {
4025 case 0:
4026 gen_st8(tmp, addr, IS_USER(s));
4027 break;
4028 case 1:
4029 gen_st16(tmp, addr, IS_USER(s));
4030 break;
4031 case 2:
4032 gen_st32(tmp, addr, IS_USER(s));
4033 break;
4036 rd += stride;
4037 tcg_gen_addi_i32(addr, addr, 1 << size);
4039 tcg_temp_free_i32(addr);
4040 stride = nregs * (1 << size);
4043 if (rm != 15) {
4044 TCGv base;
4046 base = load_reg(s, rn);
4047 if (rm == 13) {
4048 tcg_gen_addi_i32(base, base, stride);
4049 } else {
4050 TCGv index;
4051 index = load_reg(s, rm);
4052 tcg_gen_add_i32(base, base, index);
4053 tcg_temp_free_i32(index);
4055 store_reg(s, rn, base);
4057 return 0;
4060 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4061 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4063 tcg_gen_and_i32(t, t, c);
4064 tcg_gen_andc_i32(f, f, c);
4065 tcg_gen_or_i32(dest, t, f);
4068 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4070 switch (size) {
4071 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4072 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4073 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4074 default: abort();
4078 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4080 switch (size) {
4081 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4082 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4083 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
4084 default: abort();
4088 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4090 switch (size) {
4091 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4092 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4093 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
4094 default: abort();
4098 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4100 switch (size) {
4101 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4102 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4103 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
4104 default: abort();
4108 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4109 int q, int u)
4111 if (q) {
4112 if (u) {
4113 switch (size) {
4114 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4115 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4116 default: abort();
4118 } else {
4119 switch (size) {
4120 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4121 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4122 default: abort();
4125 } else {
4126 if (u) {
4127 switch (size) {
4128 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4129 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4130 default: abort();
4132 } else {
4133 switch (size) {
4134 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4135 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4136 default: abort();
4142 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4144 if (u) {
4145 switch (size) {
4146 case 0: gen_helper_neon_widen_u8(dest, src); break;
4147 case 1: gen_helper_neon_widen_u16(dest, src); break;
4148 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4149 default: abort();
4151 } else {
4152 switch (size) {
4153 case 0: gen_helper_neon_widen_s8(dest, src); break;
4154 case 1: gen_helper_neon_widen_s16(dest, src); break;
4155 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4156 default: abort();
4159 tcg_temp_free_i32(src);
4162 static inline void gen_neon_addl(int size)
4164 switch (size) {
4165 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4166 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4167 case 2: tcg_gen_add_i64(CPU_V001); break;
4168 default: abort();
4172 static inline void gen_neon_subl(int size)
4174 switch (size) {
4175 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4176 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4177 case 2: tcg_gen_sub_i64(CPU_V001); break;
4178 default: abort();
4182 static inline void gen_neon_negl(TCGv_i64 var, int size)
4184 switch (size) {
4185 case 0: gen_helper_neon_negl_u16(var, var); break;
4186 case 1: gen_helper_neon_negl_u32(var, var); break;
4187 case 2: gen_helper_neon_negl_u64(var, var); break;
4188 default: abort();
4192 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4194 switch (size) {
4195 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4196 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
4197 default: abort();
4201 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4203 TCGv_i64 tmp;
4205 switch ((size << 1) | u) {
4206 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4207 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4208 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4209 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4210 case 4:
4211 tmp = gen_muls_i64_i32(a, b);
4212 tcg_gen_mov_i64(dest, tmp);
4213 tcg_temp_free_i64(tmp);
4214 break;
4215 case 5:
4216 tmp = gen_mulu_i64_i32(a, b);
4217 tcg_gen_mov_i64(dest, tmp);
4218 tcg_temp_free_i64(tmp);
4219 break;
4220 default: abort();
4223 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4224 Don't forget to clean them now. */
4225 if (size < 2) {
4226 tcg_temp_free_i32(a);
4227 tcg_temp_free_i32(b);
4231 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4233 if (op) {
4234 if (u) {
4235 gen_neon_unarrow_sats(size, dest, src);
4236 } else {
4237 gen_neon_narrow(size, dest, src);
4239 } else {
4240 if (u) {
4241 gen_neon_narrow_satu(size, dest, src);
4242 } else {
4243 gen_neon_narrow_sats(size, dest, src);
4248 /* Translate a NEON data processing instruction. Return nonzero if the
4249 instruction is invalid.
4250 We process data in a mixture of 32-bit and 64-bit chunks.
4251 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4253 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4255 int op;
4256 int q;
4257 int rd, rn, rm;
4258 int size;
4259 int shift;
4260 int pass;
4261 int count;
4262 int pairwise;
4263 int u;
4264 int n;
4265 uint32_t imm, mask;
4266 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4267 TCGv_i64 tmp64;
4269 if (!s->vfp_enabled)
4270 return 1;
4271 q = (insn & (1 << 6)) != 0;
4272 u = (insn >> 24) & 1;
4273 VFP_DREG_D(rd, insn);
4274 VFP_DREG_N(rn, insn);
4275 VFP_DREG_M(rm, insn);
4276 size = (insn >> 20) & 3;
4277 if ((insn & (1 << 23)) == 0) {
4278 /* Three register same length. */
4279 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4280 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4281 || op == 10 || op == 11 || op == 16)) {
4282 /* 64-bit element instructions. */
4283 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4284 neon_load_reg64(cpu_V0, rn + pass);
4285 neon_load_reg64(cpu_V1, rm + pass);
4286 switch (op) {
4287 case 1: /* VQADD */
4288 if (u) {
4289 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
4290 } else {
4291 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
4293 break;
4294 case 5: /* VQSUB */
4295 if (u) {
4296 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
4297 } else {
4298 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
4300 break;
4301 case 8: /* VSHL */
4302 if (u) {
4303 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4304 } else {
4305 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4307 break;
4308 case 9: /* VQSHL */
4309 if (u) {
4310 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
4311 } else {
4312 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
4314 break;
4315 case 10: /* VRSHL */
4316 if (u) {
4317 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4318 } else {
4319 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4321 break;
4322 case 11: /* VQRSHL */
4323 if (u) {
4324 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
4325 } else {
4326 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
4328 break;
4329 case 16:
4330 if (u) {
4331 tcg_gen_sub_i64(CPU_V001);
4332 } else {
4333 tcg_gen_add_i64(CPU_V001);
4335 break;
4336 default:
4337 abort();
4339 neon_store_reg64(cpu_V0, rd + pass);
4341 return 0;
4343 switch (op) {
4344 case 8: /* VSHL */
4345 case 9: /* VQSHL */
4346 case 10: /* VRSHL */
4347 case 11: /* VQRSHL */
4349 int rtmp;
4350 /* Shift instruction operands are reversed. */
4351 rtmp = rn;
4352 rn = rm;
4353 rm = rtmp;
4354 pairwise = 0;
4356 break;
4357 case 20: /* VPMAX */
4358 case 21: /* VPMIN */
4359 case 23: /* VPADD */
4360 pairwise = 1;
4361 break;
4362 case 26: /* VPADD (float) */
4363 pairwise = (u && size < 2);
4364 break;
4365 case 30: /* VPMIN/VPMAX (float) */
4366 pairwise = u;
4367 break;
4368 default:
4369 pairwise = 0;
4370 break;
4373 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4375 if (pairwise) {
4376 /* Pairwise. */
4377 if (q)
4378 n = (pass & 1) * 2;
4379 else
4380 n = 0;
4381 if (pass < q + 1) {
4382 tmp = neon_load_reg(rn, n);
4383 tmp2 = neon_load_reg(rn, n + 1);
4384 } else {
4385 tmp = neon_load_reg(rm, n);
4386 tmp2 = neon_load_reg(rm, n + 1);
4388 } else {
4389 /* Elementwise. */
4390 tmp = neon_load_reg(rn, pass);
4391 tmp2 = neon_load_reg(rm, pass);
4393 switch (op) {
4394 case 0: /* VHADD */
4395 GEN_NEON_INTEGER_OP(hadd);
4396 break;
4397 case 1: /* VQADD */
4398 GEN_NEON_INTEGER_OP(qadd);
4399 break;
4400 case 2: /* VRHADD */
4401 GEN_NEON_INTEGER_OP(rhadd);
4402 break;
4403 case 3: /* Logic ops. */
4404 switch ((u << 2) | size) {
4405 case 0: /* VAND */
4406 tcg_gen_and_i32(tmp, tmp, tmp2);
4407 break;
4408 case 1: /* BIC */
4409 tcg_gen_andc_i32(tmp, tmp, tmp2);
4410 break;
4411 case 2: /* VORR */
4412 tcg_gen_or_i32(tmp, tmp, tmp2);
4413 break;
4414 case 3: /* VORN */
4415 tcg_gen_orc_i32(tmp, tmp, tmp2);
4416 break;
4417 case 4: /* VEOR */
4418 tcg_gen_xor_i32(tmp, tmp, tmp2);
4419 break;
4420 case 5: /* VBSL */
4421 tmp3 = neon_load_reg(rd, pass);
4422 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4423 tcg_temp_free_i32(tmp3);
4424 break;
4425 case 6: /* VBIT */
4426 tmp3 = neon_load_reg(rd, pass);
4427 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4428 tcg_temp_free_i32(tmp3);
4429 break;
4430 case 7: /* VBIF */
4431 tmp3 = neon_load_reg(rd, pass);
4432 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4433 tcg_temp_free_i32(tmp3);
4434 break;
4436 break;
4437 case 4: /* VHSUB */
4438 GEN_NEON_INTEGER_OP(hsub);
4439 break;
4440 case 5: /* VQSUB */
4441 GEN_NEON_INTEGER_OP(qsub);
4442 break;
4443 case 6: /* VCGT */
4444 GEN_NEON_INTEGER_OP(cgt);
4445 break;
4446 case 7: /* VCGE */
4447 GEN_NEON_INTEGER_OP(cge);
4448 break;
4449 case 8: /* VSHL */
4450 GEN_NEON_INTEGER_OP(shl);
4451 break;
4452 case 9: /* VQSHL */
4453 GEN_NEON_INTEGER_OP(qshl);
4454 break;
4455 case 10: /* VRSHL */
4456 GEN_NEON_INTEGER_OP(rshl);
4457 break;
4458 case 11: /* VQRSHL */
4459 GEN_NEON_INTEGER_OP(qrshl);
4460 break;
4461 case 12: /* VMAX */
4462 GEN_NEON_INTEGER_OP(max);
4463 break;
4464 case 13: /* VMIN */
4465 GEN_NEON_INTEGER_OP(min);
4466 break;
4467 case 14: /* VABD */
4468 GEN_NEON_INTEGER_OP(abd);
4469 break;
4470 case 15: /* VABA */
4471 GEN_NEON_INTEGER_OP(abd);
4472 tcg_temp_free_i32(tmp2);
4473 tmp2 = neon_load_reg(rd, pass);
4474 gen_neon_add(size, tmp, tmp2);
4475 break;
4476 case 16:
4477 if (!u) { /* VADD */
4478 if (gen_neon_add(size, tmp, tmp2))
4479 return 1;
4480 } else { /* VSUB */
4481 switch (size) {
4482 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4483 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4484 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4485 default: return 1;
4488 break;
4489 case 17:
4490 if (!u) { /* VTST */
4491 switch (size) {
4492 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4493 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4494 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4495 default: return 1;
4497 } else { /* VCEQ */
4498 switch (size) {
4499 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4500 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4501 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4502 default: return 1;
4505 break;
4506 case 18: /* Multiply. */
4507 switch (size) {
4508 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4509 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4510 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4511 default: return 1;
4513 tcg_temp_free_i32(tmp2);
4514 tmp2 = neon_load_reg(rd, pass);
4515 if (u) { /* VMLS */
4516 gen_neon_rsb(size, tmp, tmp2);
4517 } else { /* VMLA */
4518 gen_neon_add(size, tmp, tmp2);
4520 break;
4521 case 19: /* VMUL */
4522 if (u) { /* polynomial */
4523 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4524 } else { /* Integer */
4525 switch (size) {
4526 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4527 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4528 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4529 default: return 1;
4532 break;
4533 case 20: /* VPMAX */
4534 GEN_NEON_INTEGER_OP(pmax);
4535 break;
4536 case 21: /* VPMIN */
4537 GEN_NEON_INTEGER_OP(pmin);
4538 break;
4539 case 22: /* Hultiply high. */
4540 if (!u) { /* VQDMULH */
4541 switch (size) {
4542 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4543 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
4544 default: return 1;
4546 } else { /* VQRDHMUL */
4547 switch (size) {
4548 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4549 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
4550 default: return 1;
4553 break;
4554 case 23: /* VPADD */
4555 if (u)
4556 return 1;
4557 switch (size) {
4558 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4559 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4560 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4561 default: return 1;
4563 break;
4564 case 26: /* Floating point arithnetic. */
4565 switch ((u << 2) | size) {
4566 case 0: /* VADD */
4567 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4568 break;
4569 case 2: /* VSUB */
4570 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4571 break;
4572 case 4: /* VPADD */
4573 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4574 break;
4575 case 6: /* VABD */
4576 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4577 break;
4578 default:
4579 return 1;
4581 break;
4582 case 27: /* Float multiply. */
4583 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4584 if (!u) {
4585 tcg_temp_free_i32(tmp2);
4586 tmp2 = neon_load_reg(rd, pass);
4587 if (size == 0) {
4588 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4589 } else {
4590 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4593 break;
4594 case 28: /* Float compare. */
4595 if (!u) {
4596 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4597 } else {
4598 if (size == 0)
4599 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4600 else
4601 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4603 break;
4604 case 29: /* Float compare absolute. */
4605 if (!u)
4606 return 1;
4607 if (size == 0)
4608 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4609 else
4610 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4611 break;
4612 case 30: /* Float min/max. */
4613 if (size == 0)
4614 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4615 else
4616 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4617 break;
4618 case 31:
4619 if (size == 0)
4620 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4621 else
4622 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4623 break;
4624 default:
4625 abort();
4627 tcg_temp_free_i32(tmp2);
4629 /* Save the result. For elementwise operations we can put it
4630 straight into the destination register. For pairwise operations
4631 we have to be careful to avoid clobbering the source operands. */
4632 if (pairwise && rd == rm) {
4633 neon_store_scratch(pass, tmp);
4634 } else {
4635 neon_store_reg(rd, pass, tmp);
4638 } /* for pass */
4639 if (pairwise && rd == rm) {
4640 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4641 tmp = neon_load_scratch(pass);
4642 neon_store_reg(rd, pass, tmp);
4645 /* End of 3 register same size operations. */
4646 } else if (insn & (1 << 4)) {
4647 if ((insn & 0x00380080) != 0) {
4648 /* Two registers and shift. */
4649 op = (insn >> 8) & 0xf;
4650 if (insn & (1 << 7)) {
4651 /* 64-bit shift. */
4652 size = 3;
4653 } else {
4654 size = 2;
4655 while ((insn & (1 << (size + 19))) == 0)
4656 size--;
4658 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4659 /* To avoid excessive dumplication of ops we implement shift
4660 by immediate using the variable shift operations. */
4661 if (op < 8) {
4662 /* Shift by immediate:
4663 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4664 /* Right shifts are encoded as N - shift, where N is the
4665 element size in bits. */
4666 if (op <= 4)
4667 shift = shift - (1 << (size + 3));
4668 if (size == 3) {
4669 count = q + 1;
4670 } else {
4671 count = q ? 4: 2;
4673 switch (size) {
4674 case 0:
4675 imm = (uint8_t) shift;
4676 imm |= imm << 8;
4677 imm |= imm << 16;
4678 break;
4679 case 1:
4680 imm = (uint16_t) shift;
4681 imm |= imm << 16;
4682 break;
4683 case 2:
4684 case 3:
4685 imm = shift;
4686 break;
4687 default:
4688 abort();
4691 for (pass = 0; pass < count; pass++) {
4692 if (size == 3) {
4693 neon_load_reg64(cpu_V0, rm + pass);
4694 tcg_gen_movi_i64(cpu_V1, imm);
4695 switch (op) {
4696 case 0: /* VSHR */
4697 case 1: /* VSRA */
4698 if (u)
4699 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4700 else
4701 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4702 break;
4703 case 2: /* VRSHR */
4704 case 3: /* VRSRA */
4705 if (u)
4706 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4707 else
4708 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4709 break;
4710 case 4: /* VSRI */
4711 if (!u)
4712 return 1;
4713 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4714 break;
4715 case 5: /* VSHL, VSLI */
4716 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4717 break;
4718 case 6: /* VQSHLU */
4719 if (u) {
4720 gen_helper_neon_qshlu_s64(cpu_V0,
4721 cpu_V0, cpu_V1);
4722 } else {
4723 return 1;
4725 break;
4726 case 7: /* VQSHL */
4727 if (u) {
4728 gen_helper_neon_qshl_u64(cpu_V0,
4729 cpu_V0, cpu_V1);
4730 } else {
4731 gen_helper_neon_qshl_s64(cpu_V0,
4732 cpu_V0, cpu_V1);
4734 break;
4736 if (op == 1 || op == 3) {
4737 /* Accumulate. */
4738 neon_load_reg64(cpu_V1, rd + pass);
4739 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4740 } else if (op == 4 || (op == 5 && u)) {
4741 /* Insert */
4742 neon_load_reg64(cpu_V1, rd + pass);
4743 uint64_t mask;
4744 if (shift < -63 || shift > 63) {
4745 mask = 0;
4746 } else {
4747 if (op == 4) {
4748 mask = 0xffffffffffffffffull >> -shift;
4749 } else {
4750 mask = 0xffffffffffffffffull << shift;
4753 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4754 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
4756 neon_store_reg64(cpu_V0, rd + pass);
4757 } else { /* size < 3 */
4758 /* Operands in T0 and T1. */
4759 tmp = neon_load_reg(rm, pass);
4760 tmp2 = tcg_temp_new_i32();
4761 tcg_gen_movi_i32(tmp2, imm);
4762 switch (op) {
4763 case 0: /* VSHR */
4764 case 1: /* VSRA */
4765 GEN_NEON_INTEGER_OP(shl);
4766 break;
4767 case 2: /* VRSHR */
4768 case 3: /* VRSRA */
4769 GEN_NEON_INTEGER_OP(rshl);
4770 break;
4771 case 4: /* VSRI */
4772 if (!u)
4773 return 1;
4774 GEN_NEON_INTEGER_OP(shl);
4775 break;
4776 case 5: /* VSHL, VSLI */
4777 switch (size) {
4778 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4779 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4780 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4781 default: return 1;
4783 break;
4784 case 6: /* VQSHLU */
4785 if (!u) {
4786 return 1;
4788 switch (size) {
4789 case 0:
4790 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
4791 break;
4792 case 1:
4793 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
4794 break;
4795 case 2:
4796 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
4797 break;
4798 default:
4799 return 1;
4801 break;
4802 case 7: /* VQSHL */
4803 GEN_NEON_INTEGER_OP(qshl);
4804 break;
4806 tcg_temp_free_i32(tmp2);
4808 if (op == 1 || op == 3) {
4809 /* Accumulate. */
4810 tmp2 = neon_load_reg(rd, pass);
4811 gen_neon_add(size, tmp, tmp2);
4812 tcg_temp_free_i32(tmp2);
4813 } else if (op == 4 || (op == 5 && u)) {
4814 /* Insert */
4815 switch (size) {
4816 case 0:
4817 if (op == 4)
4818 mask = 0xff >> -shift;
4819 else
4820 mask = (uint8_t)(0xff << shift);
4821 mask |= mask << 8;
4822 mask |= mask << 16;
4823 break;
4824 case 1:
4825 if (op == 4)
4826 mask = 0xffff >> -shift;
4827 else
4828 mask = (uint16_t)(0xffff << shift);
4829 mask |= mask << 16;
4830 break;
4831 case 2:
4832 if (shift < -31 || shift > 31) {
4833 mask = 0;
4834 } else {
4835 if (op == 4)
4836 mask = 0xffffffffu >> -shift;
4837 else
4838 mask = 0xffffffffu << shift;
4840 break;
4841 default:
4842 abort();
4844 tmp2 = neon_load_reg(rd, pass);
4845 tcg_gen_andi_i32(tmp, tmp, mask);
4846 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4847 tcg_gen_or_i32(tmp, tmp, tmp2);
4848 tcg_temp_free_i32(tmp2);
4850 neon_store_reg(rd, pass, tmp);
4852 } /* for pass */
4853 } else if (op < 10) {
4854 /* Shift by immediate and narrow:
4855 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4856 int input_unsigned = (op == 8) ? !u : u;
4858 shift = shift - (1 << (size + 3));
4859 size++;
4860 if (size == 3) {
4861 tmp64 = tcg_const_i64(shift);
4862 neon_load_reg64(cpu_V0, rm);
4863 neon_load_reg64(cpu_V1, rm + 1);
4864 for (pass = 0; pass < 2; pass++) {
4865 TCGv_i64 in;
4866 if (pass == 0) {
4867 in = cpu_V0;
4868 } else {
4869 in = cpu_V1;
4871 if (q) {
4872 if (input_unsigned) {
4873 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
4874 } else {
4875 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
4877 } else {
4878 if (input_unsigned) {
4879 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
4880 } else {
4881 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
4884 tmp = tcg_temp_new_i32();
4885 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4886 neon_store_reg(rd, pass, tmp);
4887 } /* for pass */
4888 tcg_temp_free_i64(tmp64);
4889 } else {
4890 if (size == 1) {
4891 imm = (uint16_t)shift;
4892 imm |= imm << 16;
4893 } else {
4894 /* size == 2 */
4895 imm = (uint32_t)shift;
4897 tmp2 = tcg_const_i32(imm);
4898 tmp4 = neon_load_reg(rm + 1, 0);
4899 tmp5 = neon_load_reg(rm + 1, 1);
4900 for (pass = 0; pass < 2; pass++) {
4901 if (pass == 0) {
4902 tmp = neon_load_reg(rm, 0);
4903 } else {
4904 tmp = tmp4;
4906 gen_neon_shift_narrow(size, tmp, tmp2, q,
4907 input_unsigned);
4908 if (pass == 0) {
4909 tmp3 = neon_load_reg(rm, 1);
4910 } else {
4911 tmp3 = tmp5;
4913 gen_neon_shift_narrow(size, tmp3, tmp2, q,
4914 input_unsigned);
4915 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4916 tcg_temp_free_i32(tmp);
4917 tcg_temp_free_i32(tmp3);
4918 tmp = tcg_temp_new_i32();
4919 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4920 neon_store_reg(rd, pass, tmp);
4921 } /* for pass */
4922 tcg_temp_free_i32(tmp2);
4924 } else if (op == 10) {
4925 /* VSHLL */
4926 if (q || size == 3)
4927 return 1;
4928 tmp = neon_load_reg(rm, 0);
4929 tmp2 = neon_load_reg(rm, 1);
4930 for (pass = 0; pass < 2; pass++) {
4931 if (pass == 1)
4932 tmp = tmp2;
4934 gen_neon_widen(cpu_V0, tmp, size, u);
4936 if (shift != 0) {
4937 /* The shift is less than the width of the source
4938 type, so we can just shift the whole register. */
4939 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4940 /* Widen the result of shift: we need to clear
4941 * the potential overflow bits resulting from
4942 * left bits of the narrow input appearing as
4943 * right bits of left the neighbour narrow
4944 * input. */
4945 if (size < 2 || !u) {
4946 uint64_t imm64;
4947 if (size == 0) {
4948 imm = (0xffu >> (8 - shift));
4949 imm |= imm << 16;
4950 } else if (size == 1) {
4951 imm = 0xffff >> (16 - shift);
4952 } else {
4953 /* size == 2 */
4954 imm = 0xffffffff >> (32 - shift);
4956 if (size < 2) {
4957 imm64 = imm | (((uint64_t)imm) << 32);
4958 } else {
4959 imm64 = imm;
4961 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
4964 neon_store_reg64(cpu_V0, rd + pass);
4966 } else if (op >= 14) {
4967 /* VCVT fixed-point. */
4968 /* We have already masked out the must-be-1 top bit of imm6,
4969 * hence this 32-shift where the ARM ARM has 64-imm6.
4971 shift = 32 - shift;
4972 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4973 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4974 if (!(op & 1)) {
4975 if (u)
4976 gen_vfp_ulto(0, shift);
4977 else
4978 gen_vfp_slto(0, shift);
4979 } else {
4980 if (u)
4981 gen_vfp_toul(0, shift);
4982 else
4983 gen_vfp_tosl(0, shift);
4985 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4987 } else {
4988 return 1;
4990 } else { /* (insn & 0x00380080) == 0 */
4991 int invert;
4993 op = (insn >> 8) & 0xf;
4994 /* One register and immediate. */
4995 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4996 invert = (insn & (1 << 5)) != 0;
4997 switch (op) {
4998 case 0: case 1:
4999 /* no-op */
5000 break;
5001 case 2: case 3:
5002 imm <<= 8;
5003 break;
5004 case 4: case 5:
5005 imm <<= 16;
5006 break;
5007 case 6: case 7:
5008 imm <<= 24;
5009 break;
5010 case 8: case 9:
5011 imm |= imm << 16;
5012 break;
5013 case 10: case 11:
5014 imm = (imm << 8) | (imm << 24);
5015 break;
5016 case 12:
5017 imm = (imm << 8) | 0xff;
5018 break;
5019 case 13:
5020 imm = (imm << 16) | 0xffff;
5021 break;
5022 case 14:
5023 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5024 if (invert)
5025 imm = ~imm;
5026 break;
5027 case 15:
5028 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5029 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5030 break;
5032 if (invert)
5033 imm = ~imm;
5035 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5036 if (op & 1 && op < 12) {
5037 tmp = neon_load_reg(rd, pass);
5038 if (invert) {
5039 /* The immediate value has already been inverted, so
5040 BIC becomes AND. */
5041 tcg_gen_andi_i32(tmp, tmp, imm);
5042 } else {
5043 tcg_gen_ori_i32(tmp, tmp, imm);
5045 } else {
5046 /* VMOV, VMVN. */
5047 tmp = tcg_temp_new_i32();
5048 if (op == 14 && invert) {
5049 uint32_t val;
5050 val = 0;
5051 for (n = 0; n < 4; n++) {
5052 if (imm & (1 << (n + (pass & 1) * 4)))
5053 val |= 0xff << (n * 8);
5055 tcg_gen_movi_i32(tmp, val);
5056 } else {
5057 tcg_gen_movi_i32(tmp, imm);
5060 neon_store_reg(rd, pass, tmp);
5063 } else { /* (insn & 0x00800010 == 0x00800000) */
5064 if (size != 3) {
5065 op = (insn >> 8) & 0xf;
5066 if ((insn & (1 << 6)) == 0) {
5067 /* Three registers of different lengths. */
5068 int src1_wide;
5069 int src2_wide;
5070 int prewiden;
5071 /* prewiden, src1_wide, src2_wide */
5072 static const int neon_3reg_wide[16][3] = {
5073 {1, 0, 0}, /* VADDL */
5074 {1, 1, 0}, /* VADDW */
5075 {1, 0, 0}, /* VSUBL */
5076 {1, 1, 0}, /* VSUBW */
5077 {0, 1, 1}, /* VADDHN */
5078 {0, 0, 0}, /* VABAL */
5079 {0, 1, 1}, /* VSUBHN */
5080 {0, 0, 0}, /* VABDL */
5081 {0, 0, 0}, /* VMLAL */
5082 {0, 0, 0}, /* VQDMLAL */
5083 {0, 0, 0}, /* VMLSL */
5084 {0, 0, 0}, /* VQDMLSL */
5085 {0, 0, 0}, /* Integer VMULL */
5086 {0, 0, 0}, /* VQDMULL */
5087 {0, 0, 0} /* Polynomial VMULL */
5090 prewiden = neon_3reg_wide[op][0];
5091 src1_wide = neon_3reg_wide[op][1];
5092 src2_wide = neon_3reg_wide[op][2];
5094 if (size == 0 && (op == 9 || op == 11 || op == 13))
5095 return 1;
5097 /* Avoid overlapping operands. Wide source operands are
5098 always aligned so will never overlap with wide
5099 destinations in problematic ways. */
5100 if (rd == rm && !src2_wide) {
5101 tmp = neon_load_reg(rm, 1);
5102 neon_store_scratch(2, tmp);
5103 } else if (rd == rn && !src1_wide) {
5104 tmp = neon_load_reg(rn, 1);
5105 neon_store_scratch(2, tmp);
5107 TCGV_UNUSED(tmp3);
5108 for (pass = 0; pass < 2; pass++) {
5109 if (src1_wide) {
5110 neon_load_reg64(cpu_V0, rn + pass);
5111 TCGV_UNUSED(tmp);
5112 } else {
5113 if (pass == 1 && rd == rn) {
5114 tmp = neon_load_scratch(2);
5115 } else {
5116 tmp = neon_load_reg(rn, pass);
5118 if (prewiden) {
5119 gen_neon_widen(cpu_V0, tmp, size, u);
5122 if (src2_wide) {
5123 neon_load_reg64(cpu_V1, rm + pass);
5124 TCGV_UNUSED(tmp2);
5125 } else {
5126 if (pass == 1 && rd == rm) {
5127 tmp2 = neon_load_scratch(2);
5128 } else {
5129 tmp2 = neon_load_reg(rm, pass);
5131 if (prewiden) {
5132 gen_neon_widen(cpu_V1, tmp2, size, u);
5135 switch (op) {
5136 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5137 gen_neon_addl(size);
5138 break;
5139 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5140 gen_neon_subl(size);
5141 break;
5142 case 5: case 7: /* VABAL, VABDL */
5143 switch ((size << 1) | u) {
5144 case 0:
5145 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5146 break;
5147 case 1:
5148 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5149 break;
5150 case 2:
5151 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5152 break;
5153 case 3:
5154 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5155 break;
5156 case 4:
5157 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5158 break;
5159 case 5:
5160 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5161 break;
5162 default: abort();
5164 tcg_temp_free_i32(tmp2);
5165 tcg_temp_free_i32(tmp);
5166 break;
5167 case 8: case 9: case 10: case 11: case 12: case 13:
5168 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5169 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5170 break;
5171 case 14: /* Polynomial VMULL */
5172 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5173 tcg_temp_free_i32(tmp2);
5174 tcg_temp_free_i32(tmp);
5175 break;
5176 default: /* 15 is RESERVED. */
5177 return 1;
5179 if (op == 13) {
5180 /* VQDMULL */
5181 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5182 neon_store_reg64(cpu_V0, rd + pass);
5183 } else if (op == 5 || (op >= 8 && op <= 11)) {
5184 /* Accumulate. */
5185 neon_load_reg64(cpu_V1, rd + pass);
5186 switch (op) {
5187 case 10: /* VMLSL */
5188 gen_neon_negl(cpu_V0, size);
5189 /* Fall through */
5190 case 5: case 8: /* VABAL, VMLAL */
5191 gen_neon_addl(size);
5192 break;
5193 case 9: case 11: /* VQDMLAL, VQDMLSL */
5194 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5195 if (op == 11) {
5196 gen_neon_negl(cpu_V0, size);
5198 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5199 break;
5200 default:
5201 abort();
5203 neon_store_reg64(cpu_V0, rd + pass);
5204 } else if (op == 4 || op == 6) {
5205 /* Narrowing operation. */
5206 tmp = tcg_temp_new_i32();
5207 if (!u) {
5208 switch (size) {
5209 case 0:
5210 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5211 break;
5212 case 1:
5213 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5214 break;
5215 case 2:
5216 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5217 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5218 break;
5219 default: abort();
5221 } else {
5222 switch (size) {
5223 case 0:
5224 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5225 break;
5226 case 1:
5227 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5228 break;
5229 case 2:
5230 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5231 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5232 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5233 break;
5234 default: abort();
5237 if (pass == 0) {
5238 tmp3 = tmp;
5239 } else {
5240 neon_store_reg(rd, 0, tmp3);
5241 neon_store_reg(rd, 1, tmp);
5243 } else {
5244 /* Write back the result. */
5245 neon_store_reg64(cpu_V0, rd + pass);
5248 } else {
5249 /* Two registers and a scalar. */
5250 switch (op) {
5251 case 0: /* Integer VMLA scalar */
5252 case 1: /* Float VMLA scalar */
5253 case 4: /* Integer VMLS scalar */
5254 case 5: /* Floating point VMLS scalar */
5255 case 8: /* Integer VMUL scalar */
5256 case 9: /* Floating point VMUL scalar */
5257 case 12: /* VQDMULH scalar */
5258 case 13: /* VQRDMULH scalar */
5259 tmp = neon_get_scalar(size, rm);
5260 neon_store_scratch(0, tmp);
5261 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5262 tmp = neon_load_scratch(0);
5263 tmp2 = neon_load_reg(rn, pass);
5264 if (op == 12) {
5265 if (size == 1) {
5266 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
5267 } else {
5268 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
5270 } else if (op == 13) {
5271 if (size == 1) {
5272 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
5273 } else {
5274 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
5276 } else if (op & 1) {
5277 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5278 } else {
5279 switch (size) {
5280 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5281 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5282 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5283 default: return 1;
5286 tcg_temp_free_i32(tmp2);
5287 if (op < 8) {
5288 /* Accumulate. */
5289 tmp2 = neon_load_reg(rd, pass);
5290 switch (op) {
5291 case 0:
5292 gen_neon_add(size, tmp, tmp2);
5293 break;
5294 case 1:
5295 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5296 break;
5297 case 4:
5298 gen_neon_rsb(size, tmp, tmp2);
5299 break;
5300 case 5:
5301 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5302 break;
5303 default:
5304 abort();
5306 tcg_temp_free_i32(tmp2);
5308 neon_store_reg(rd, pass, tmp);
5310 break;
5311 case 2: /* VMLAL sclar */
5312 case 3: /* VQDMLAL scalar */
5313 case 6: /* VMLSL scalar */
5314 case 7: /* VQDMLSL scalar */
5315 case 10: /* VMULL scalar */
5316 case 11: /* VQDMULL scalar */
5317 if (size == 0 && (op == 3 || op == 7 || op == 11))
5318 return 1;
5320 tmp2 = neon_get_scalar(size, rm);
5321 /* We need a copy of tmp2 because gen_neon_mull
5322 * deletes it during pass 0. */
5323 tmp4 = tcg_temp_new_i32();
5324 tcg_gen_mov_i32(tmp4, tmp2);
5325 tmp3 = neon_load_reg(rn, 1);
5327 for (pass = 0; pass < 2; pass++) {
5328 if (pass == 0) {
5329 tmp = neon_load_reg(rn, 0);
5330 } else {
5331 tmp = tmp3;
5332 tmp2 = tmp4;
5334 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5335 if (op != 11) {
5336 neon_load_reg64(cpu_V1, rd + pass);
5338 switch (op) {
5339 case 6:
5340 gen_neon_negl(cpu_V0, size);
5341 /* Fall through */
5342 case 2:
5343 gen_neon_addl(size);
5344 break;
5345 case 3: case 7:
5346 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5347 if (op == 7) {
5348 gen_neon_negl(cpu_V0, size);
5350 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5351 break;
5352 case 10:
5353 /* no-op */
5354 break;
5355 case 11:
5356 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5357 break;
5358 default:
5359 abort();
5361 neon_store_reg64(cpu_V0, rd + pass);
5365 break;
5366 default: /* 14 and 15 are RESERVED */
5367 return 1;
5370 } else { /* size == 3 */
5371 if (!u) {
5372 /* Extract. */
5373 imm = (insn >> 8) & 0xf;
5375 if (imm > 7 && !q)
5376 return 1;
5378 if (imm == 0) {
5379 neon_load_reg64(cpu_V0, rn);
5380 if (q) {
5381 neon_load_reg64(cpu_V1, rn + 1);
5383 } else if (imm == 8) {
5384 neon_load_reg64(cpu_V0, rn + 1);
5385 if (q) {
5386 neon_load_reg64(cpu_V1, rm);
5388 } else if (q) {
5389 tmp64 = tcg_temp_new_i64();
5390 if (imm < 8) {
5391 neon_load_reg64(cpu_V0, rn);
5392 neon_load_reg64(tmp64, rn + 1);
5393 } else {
5394 neon_load_reg64(cpu_V0, rn + 1);
5395 neon_load_reg64(tmp64, rm);
5397 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5398 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5399 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5400 if (imm < 8) {
5401 neon_load_reg64(cpu_V1, rm);
5402 } else {
5403 neon_load_reg64(cpu_V1, rm + 1);
5404 imm -= 8;
5406 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5407 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5408 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5409 tcg_temp_free_i64(tmp64);
5410 } else {
5411 /* BUGFIX */
5412 neon_load_reg64(cpu_V0, rn);
5413 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5414 neon_load_reg64(cpu_V1, rm);
5415 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5416 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5418 neon_store_reg64(cpu_V0, rd);
5419 if (q) {
5420 neon_store_reg64(cpu_V1, rd + 1);
5422 } else if ((insn & (1 << 11)) == 0) {
5423 /* Two register misc. */
5424 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5425 size = (insn >> 18) & 3;
5426 switch (op) {
5427 case 0: /* VREV64 */
5428 if (size == 3)
5429 return 1;
5430 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5431 tmp = neon_load_reg(rm, pass * 2);
5432 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5433 switch (size) {
5434 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5435 case 1: gen_swap_half(tmp); break;
5436 case 2: /* no-op */ break;
5437 default: abort();
5439 neon_store_reg(rd, pass * 2 + 1, tmp);
5440 if (size == 2) {
5441 neon_store_reg(rd, pass * 2, tmp2);
5442 } else {
5443 switch (size) {
5444 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5445 case 1: gen_swap_half(tmp2); break;
5446 default: abort();
5448 neon_store_reg(rd, pass * 2, tmp2);
5451 break;
5452 case 4: case 5: /* VPADDL */
5453 case 12: case 13: /* VPADAL */
5454 if (size == 3)
5455 return 1;
5456 for (pass = 0; pass < q + 1; pass++) {
5457 tmp = neon_load_reg(rm, pass * 2);
5458 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5459 tmp = neon_load_reg(rm, pass * 2 + 1);
5460 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5461 switch (size) {
5462 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5463 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5464 case 2: tcg_gen_add_i64(CPU_V001); break;
5465 default: abort();
5467 if (op >= 12) {
5468 /* Accumulate. */
5469 neon_load_reg64(cpu_V1, rd + pass);
5470 gen_neon_addl(size);
5472 neon_store_reg64(cpu_V0, rd + pass);
5474 break;
5475 case 33: /* VTRN */
5476 if (size == 2) {
5477 for (n = 0; n < (q ? 4 : 2); n += 2) {
5478 tmp = neon_load_reg(rm, n);
5479 tmp2 = neon_load_reg(rd, n + 1);
5480 neon_store_reg(rm, n, tmp2);
5481 neon_store_reg(rd, n + 1, tmp);
5483 } else {
5484 goto elementwise;
5486 break;
5487 case 34: /* VUZP */
5488 if (gen_neon_unzip(rd, rm, size, q)) {
5489 return 1;
5491 break;
5492 case 35: /* VZIP */
5493 if (gen_neon_zip(rd, rm, size, q)) {
5494 return 1;
5496 break;
5497 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5498 if (size == 3)
5499 return 1;
5500 TCGV_UNUSED(tmp2);
5501 for (pass = 0; pass < 2; pass++) {
5502 neon_load_reg64(cpu_V0, rm + pass);
5503 tmp = tcg_temp_new_i32();
5504 gen_neon_narrow_op(op == 36, q, size, tmp, cpu_V0);
5505 if (pass == 0) {
5506 tmp2 = tmp;
5507 } else {
5508 neon_store_reg(rd, 0, tmp2);
5509 neon_store_reg(rd, 1, tmp);
5512 break;
5513 case 38: /* VSHLL */
5514 if (q || size == 3)
5515 return 1;
5516 tmp = neon_load_reg(rm, 0);
5517 tmp2 = neon_load_reg(rm, 1);
5518 for (pass = 0; pass < 2; pass++) {
5519 if (pass == 1)
5520 tmp = tmp2;
5521 gen_neon_widen(cpu_V0, tmp, size, 1);
5522 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5523 neon_store_reg64(cpu_V0, rd + pass);
5525 break;
5526 case 44: /* VCVT.F16.F32 */
5527 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5528 return 1;
5529 tmp = tcg_temp_new_i32();
5530 tmp2 = tcg_temp_new_i32();
5531 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5532 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5533 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5534 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5535 tcg_gen_shli_i32(tmp2, tmp2, 16);
5536 tcg_gen_or_i32(tmp2, tmp2, tmp);
5537 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5538 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5539 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5540 neon_store_reg(rd, 0, tmp2);
5541 tmp2 = tcg_temp_new_i32();
5542 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5543 tcg_gen_shli_i32(tmp2, tmp2, 16);
5544 tcg_gen_or_i32(tmp2, tmp2, tmp);
5545 neon_store_reg(rd, 1, tmp2);
5546 tcg_temp_free_i32(tmp);
5547 break;
5548 case 46: /* VCVT.F32.F16 */
5549 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5550 return 1;
5551 tmp3 = tcg_temp_new_i32();
5552 tmp = neon_load_reg(rm, 0);
5553 tmp2 = neon_load_reg(rm, 1);
5554 tcg_gen_ext16u_i32(tmp3, tmp);
5555 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5556 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5557 tcg_gen_shri_i32(tmp3, tmp, 16);
5558 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5559 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5560 tcg_temp_free_i32(tmp);
5561 tcg_gen_ext16u_i32(tmp3, tmp2);
5562 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5563 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5564 tcg_gen_shri_i32(tmp3, tmp2, 16);
5565 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5566 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5567 tcg_temp_free_i32(tmp2);
5568 tcg_temp_free_i32(tmp3);
5569 break;
5570 default:
5571 elementwise:
5572 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5573 if (op == 30 || op == 31 || op >= 58) {
5574 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5575 neon_reg_offset(rm, pass));
5576 TCGV_UNUSED(tmp);
5577 } else {
5578 tmp = neon_load_reg(rm, pass);
5580 switch (op) {
5581 case 1: /* VREV32 */
5582 switch (size) {
5583 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5584 case 1: gen_swap_half(tmp); break;
5585 default: return 1;
5587 break;
5588 case 2: /* VREV16 */
5589 if (size != 0)
5590 return 1;
5591 gen_rev16(tmp);
5592 break;
5593 case 8: /* CLS */
5594 switch (size) {
5595 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5596 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5597 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5598 default: return 1;
5600 break;
5601 case 9: /* CLZ */
5602 switch (size) {
5603 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5604 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5605 case 2: gen_helper_clz(tmp, tmp); break;
5606 default: return 1;
5608 break;
5609 case 10: /* CNT */
5610 if (size != 0)
5611 return 1;
5612 gen_helper_neon_cnt_u8(tmp, tmp);
5613 break;
5614 case 11: /* VNOT */
5615 if (size != 0)
5616 return 1;
5617 tcg_gen_not_i32(tmp, tmp);
5618 break;
5619 case 14: /* VQABS */
5620 switch (size) {
5621 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
5622 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
5623 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
5624 default: return 1;
5626 break;
5627 case 15: /* VQNEG */
5628 switch (size) {
5629 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
5630 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
5631 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
5632 default: return 1;
5634 break;
5635 case 16: case 19: /* VCGT #0, VCLE #0 */
5636 tmp2 = tcg_const_i32(0);
5637 switch(size) {
5638 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5639 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5640 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5641 default: return 1;
5643 tcg_temp_free(tmp2);
5644 if (op == 19)
5645 tcg_gen_not_i32(tmp, tmp);
5646 break;
5647 case 17: case 20: /* VCGE #0, VCLT #0 */
5648 tmp2 = tcg_const_i32(0);
5649 switch(size) {
5650 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5651 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5652 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5653 default: return 1;
5655 tcg_temp_free(tmp2);
5656 if (op == 20)
5657 tcg_gen_not_i32(tmp, tmp);
5658 break;
5659 case 18: /* VCEQ #0 */
5660 tmp2 = tcg_const_i32(0);
5661 switch(size) {
5662 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5663 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5664 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5665 default: return 1;
5667 tcg_temp_free(tmp2);
5668 break;
5669 case 22: /* VABS */
5670 switch(size) {
5671 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5672 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5673 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5674 default: return 1;
5676 break;
5677 case 23: /* VNEG */
5678 if (size == 3)
5679 return 1;
5680 tmp2 = tcg_const_i32(0);
5681 gen_neon_rsb(size, tmp, tmp2);
5682 tcg_temp_free(tmp2);
5683 break;
5684 case 24: /* Float VCGT #0 */
5685 tmp2 = tcg_const_i32(0);
5686 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5687 tcg_temp_free(tmp2);
5688 break;
5689 case 25: /* Float VCGE #0 */
5690 tmp2 = tcg_const_i32(0);
5691 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5692 tcg_temp_free(tmp2);
5693 break;
5694 case 26: /* Float VCEQ #0 */
5695 tmp2 = tcg_const_i32(0);
5696 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5697 tcg_temp_free(tmp2);
5698 break;
5699 case 27: /* Float VCLE #0 */
5700 tmp2 = tcg_const_i32(0);
5701 gen_helper_neon_cge_f32(tmp, tmp2, tmp);
5702 tcg_temp_free(tmp2);
5703 break;
5704 case 28: /* Float VCLT #0 */
5705 tmp2 = tcg_const_i32(0);
5706 gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
5707 tcg_temp_free(tmp2);
5708 break;
5709 case 30: /* Float VABS */
5710 gen_vfp_abs(0);
5711 break;
5712 case 31: /* Float VNEG */
5713 gen_vfp_neg(0);
5714 break;
5715 case 32: /* VSWP */
5716 tmp2 = neon_load_reg(rd, pass);
5717 neon_store_reg(rm, pass, tmp2);
5718 break;
5719 case 33: /* VTRN */
5720 tmp2 = neon_load_reg(rd, pass);
5721 switch (size) {
5722 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5723 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5724 case 2: abort();
5725 default: return 1;
5727 neon_store_reg(rm, pass, tmp2);
5728 break;
5729 case 56: /* Integer VRECPE */
5730 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5731 break;
5732 case 57: /* Integer VRSQRTE */
5733 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5734 break;
5735 case 58: /* Float VRECPE */
5736 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5737 break;
5738 case 59: /* Float VRSQRTE */
5739 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5740 break;
5741 case 60: /* VCVT.F32.S32 */
5742 gen_vfp_sito(0);
5743 break;
5744 case 61: /* VCVT.F32.U32 */
5745 gen_vfp_uito(0);
5746 break;
5747 case 62: /* VCVT.S32.F32 */
5748 gen_vfp_tosiz(0);
5749 break;
5750 case 63: /* VCVT.U32.F32 */
5751 gen_vfp_touiz(0);
5752 break;
5753 default:
5754 /* Reserved: 21, 29, 39-56 */
5755 return 1;
5757 if (op == 30 || op == 31 || op >= 58) {
5758 tcg_gen_st_f32(cpu_F0s, cpu_env,
5759 neon_reg_offset(rd, pass));
5760 } else {
5761 neon_store_reg(rd, pass, tmp);
5764 break;
5766 } else if ((insn & (1 << 10)) == 0) {
5767 /* VTBL, VTBX. */
5768 n = ((insn >> 5) & 0x18) + 8;
5769 if (insn & (1 << 6)) {
5770 tmp = neon_load_reg(rd, 0);
5771 } else {
5772 tmp = tcg_temp_new_i32();
5773 tcg_gen_movi_i32(tmp, 0);
5775 tmp2 = neon_load_reg(rm, 0);
5776 tmp4 = tcg_const_i32(rn);
5777 tmp5 = tcg_const_i32(n);
5778 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5779 tcg_temp_free_i32(tmp);
5780 if (insn & (1 << 6)) {
5781 tmp = neon_load_reg(rd, 1);
5782 } else {
5783 tmp = tcg_temp_new_i32();
5784 tcg_gen_movi_i32(tmp, 0);
5786 tmp3 = neon_load_reg(rm, 1);
5787 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5788 tcg_temp_free_i32(tmp5);
5789 tcg_temp_free_i32(tmp4);
5790 neon_store_reg(rd, 0, tmp2);
5791 neon_store_reg(rd, 1, tmp3);
5792 tcg_temp_free_i32(tmp);
5793 } else if ((insn & 0x380) == 0) {
5794 /* VDUP */
5795 if (insn & (1 << 19)) {
5796 tmp = neon_load_reg(rm, 1);
5797 } else {
5798 tmp = neon_load_reg(rm, 0);
5800 if (insn & (1 << 16)) {
5801 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5802 } else if (insn & (1 << 17)) {
5803 if ((insn >> 18) & 1)
5804 gen_neon_dup_high16(tmp);
5805 else
5806 gen_neon_dup_low16(tmp);
5808 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5809 tmp2 = tcg_temp_new_i32();
5810 tcg_gen_mov_i32(tmp2, tmp);
5811 neon_store_reg(rd, pass, tmp2);
5813 tcg_temp_free_i32(tmp);
5814 } else {
5815 return 1;
5819 return 0;
5822 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5824 int crn = (insn >> 16) & 0xf;
5825 int crm = insn & 0xf;
5826 int op1 = (insn >> 21) & 7;
5827 int op2 = (insn >> 5) & 7;
5828 int rt = (insn >> 12) & 0xf;
5829 TCGv tmp;
5831 /* Minimal set of debug registers, since we don't support debug */
5832 if (op1 == 0 && crn == 0 && op2 == 0) {
5833 switch (crm) {
5834 case 0:
5835 /* DBGDIDR: just RAZ. In particular this means the
5836 * "debug architecture version" bits will read as
5837 * a reserved value, which should cause Linux to
5838 * not try to use the debug hardware.
5840 tmp = tcg_const_i32(0);
5841 store_reg(s, rt, tmp);
5842 return 0;
5843 case 1:
5844 case 2:
5845 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
5846 * don't implement memory mapped debug components
5848 if (ENABLE_ARCH_7) {
5849 tmp = tcg_const_i32(0);
5850 store_reg(s, rt, tmp);
5851 return 0;
5853 break;
5854 default:
5855 break;
5859 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5860 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5861 /* TEECR */
5862 if (IS_USER(s))
5863 return 1;
5864 tmp = load_cpu_field(teecr);
5865 store_reg(s, rt, tmp);
5866 return 0;
5868 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5869 /* TEEHBR */
5870 if (IS_USER(s) && (env->teecr & 1))
5871 return 1;
5872 tmp = load_cpu_field(teehbr);
5873 store_reg(s, rt, tmp);
5874 return 0;
5877 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5878 op1, crn, crm, op2);
5879 return 1;
5882 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5884 int crn = (insn >> 16) & 0xf;
5885 int crm = insn & 0xf;
5886 int op1 = (insn >> 21) & 7;
5887 int op2 = (insn >> 5) & 7;
5888 int rt = (insn >> 12) & 0xf;
5889 TCGv tmp;
5891 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5892 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5893 /* TEECR */
5894 if (IS_USER(s))
5895 return 1;
5896 tmp = load_reg(s, rt);
5897 gen_helper_set_teecr(cpu_env, tmp);
5898 tcg_temp_free_i32(tmp);
5899 return 0;
5901 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5902 /* TEEHBR */
5903 if (IS_USER(s) && (env->teecr & 1))
5904 return 1;
5905 tmp = load_reg(s, rt);
5906 store_cpu_field(tmp, teehbr);
5907 return 0;
5910 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5911 op1, crn, crm, op2);
5912 return 1;
5915 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5917 int cpnum;
5919 cpnum = (insn >> 8) & 0xf;
5920 if (arm_feature(env, ARM_FEATURE_XSCALE)
5921 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5922 return 1;
5924 switch (cpnum) {
5925 case 0:
5926 case 1:
5927 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5928 return disas_iwmmxt_insn(env, s, insn);
5929 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5930 return disas_dsp_insn(env, s, insn);
5932 return 1;
5933 case 10:
5934 case 11:
5935 return disas_vfp_insn (env, s, insn);
5936 case 14:
5937 /* Coprocessors 7-15 are architecturally reserved by ARM.
5938 Unfortunately Intel decided to ignore this. */
5939 if (arm_feature(env, ARM_FEATURE_XSCALE))
5940 goto board;
5941 if (insn & (1 << 20))
5942 return disas_cp14_read(env, s, insn);
5943 else
5944 return disas_cp14_write(env, s, insn);
5945 case 15:
5946 return disas_cp15_insn (env, s, insn);
5947 default:
5948 board:
5949 /* Unknown coprocessor. See if the board has hooked it. */
5950 return disas_cp_insn (env, s, insn);
5955 /* Store a 64-bit value to a register pair. Clobbers val. */
5956 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5958 TCGv tmp;
5959 tmp = tcg_temp_new_i32();
5960 tcg_gen_trunc_i64_i32(tmp, val);
5961 store_reg(s, rlow, tmp);
5962 tmp = tcg_temp_new_i32();
5963 tcg_gen_shri_i64(val, val, 32);
5964 tcg_gen_trunc_i64_i32(tmp, val);
5965 store_reg(s, rhigh, tmp);
5968 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5969 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5971 TCGv_i64 tmp;
5972 TCGv tmp2;
5974 /* Load value and extend to 64 bits. */
5975 tmp = tcg_temp_new_i64();
5976 tmp2 = load_reg(s, rlow);
5977 tcg_gen_extu_i32_i64(tmp, tmp2);
5978 tcg_temp_free_i32(tmp2);
5979 tcg_gen_add_i64(val, val, tmp);
5980 tcg_temp_free_i64(tmp);
5983 /* load and add a 64-bit value from a register pair. */
5984 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5986 TCGv_i64 tmp;
5987 TCGv tmpl;
5988 TCGv tmph;
5990 /* Load 64-bit value rd:rn. */
5991 tmpl = load_reg(s, rlow);
5992 tmph = load_reg(s, rhigh);
5993 tmp = tcg_temp_new_i64();
5994 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5995 tcg_temp_free_i32(tmpl);
5996 tcg_temp_free_i32(tmph);
5997 tcg_gen_add_i64(val, val, tmp);
5998 tcg_temp_free_i64(tmp);
6001 /* Set N and Z flags from a 64-bit value. */
6002 static void gen_logicq_cc(TCGv_i64 val)
6004 TCGv tmp = tcg_temp_new_i32();
6005 gen_helper_logicq_cc(tmp, val);
6006 gen_logic_CC(tmp);
6007 tcg_temp_free_i32(tmp);
6010 /* Load/Store exclusive instructions are implemented by remembering
6011 the value/address loaded, and seeing if these are the same
6012 when the store is performed. This should be is sufficient to implement
6013 the architecturally mandated semantics, and avoids having to monitor
6014 regular stores.
6016 In system emulation mode only one CPU will be running at once, so
6017 this sequence is effectively atomic. In user emulation mode we
6018 throw an exception and handle the atomic operation elsewhere. */
6019 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6020 TCGv addr, int size)
6022 TCGv tmp;
6024 switch (size) {
6025 case 0:
6026 tmp = gen_ld8u(addr, IS_USER(s));
6027 break;
6028 case 1:
6029 tmp = gen_ld16u(addr, IS_USER(s));
6030 break;
6031 case 2:
6032 case 3:
6033 tmp = gen_ld32(addr, IS_USER(s));
6034 break;
6035 default:
6036 abort();
6038 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6039 store_reg(s, rt, tmp);
6040 if (size == 3) {
6041 TCGv tmp2 = tcg_temp_new_i32();
6042 tcg_gen_addi_i32(tmp2, addr, 4);
6043 tmp = gen_ld32(tmp2, IS_USER(s));
6044 tcg_temp_free_i32(tmp2);
6045 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6046 store_reg(s, rt2, tmp);
6048 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6051 static void gen_clrex(DisasContext *s)
6053 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6056 #ifdef CONFIG_USER_ONLY
6057 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6058 TCGv addr, int size)
6060 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6061 tcg_gen_movi_i32(cpu_exclusive_info,
6062 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6063 gen_exception_insn(s, 4, EXCP_STREX);
6065 #else
6066 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6067 TCGv addr, int size)
6069 TCGv tmp;
6070 int done_label;
6071 int fail_label;
6073 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6074 [addr] = {Rt};
6075 {Rd} = 0;
6076 } else {
6077 {Rd} = 1;
6078 } */
6079 fail_label = gen_new_label();
6080 done_label = gen_new_label();
6081 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6082 switch (size) {
6083 case 0:
6084 tmp = gen_ld8u(addr, IS_USER(s));
6085 break;
6086 case 1:
6087 tmp = gen_ld16u(addr, IS_USER(s));
6088 break;
6089 case 2:
6090 case 3:
6091 tmp = gen_ld32(addr, IS_USER(s));
6092 break;
6093 default:
6094 abort();
6096 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6097 tcg_temp_free_i32(tmp);
6098 if (size == 3) {
6099 TCGv tmp2 = tcg_temp_new_i32();
6100 tcg_gen_addi_i32(tmp2, addr, 4);
6101 tmp = gen_ld32(tmp2, IS_USER(s));
6102 tcg_temp_free_i32(tmp2);
6103 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6104 tcg_temp_free_i32(tmp);
6106 tmp = load_reg(s, rt);
6107 switch (size) {
6108 case 0:
6109 gen_st8(tmp, addr, IS_USER(s));
6110 break;
6111 case 1:
6112 gen_st16(tmp, addr, IS_USER(s));
6113 break;
6114 case 2:
6115 case 3:
6116 gen_st32(tmp, addr, IS_USER(s));
6117 break;
6118 default:
6119 abort();
6121 if (size == 3) {
6122 tcg_gen_addi_i32(addr, addr, 4);
6123 tmp = load_reg(s, rt2);
6124 gen_st32(tmp, addr, IS_USER(s));
6126 tcg_gen_movi_i32(cpu_R[rd], 0);
6127 tcg_gen_br(done_label);
6128 gen_set_label(fail_label);
6129 tcg_gen_movi_i32(cpu_R[rd], 1);
6130 gen_set_label(done_label);
6131 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6133 #endif
6135 static void disas_arm_insn(CPUState * env, DisasContext *s)
6137 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6138 TCGv tmp;
6139 TCGv tmp2;
6140 TCGv tmp3;
6141 TCGv addr;
6142 TCGv_i64 tmp64;
6144 insn = ldl_code(s->pc);
6145 s->pc += 4;
6147 /* M variants do not implement ARM mode. */
6148 if (IS_M(env))
6149 goto illegal_op;
6150 cond = insn >> 28;
6151 if (cond == 0xf){
6152 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6153 * choose to UNDEF. In ARMv5 and above the space is used
6154 * for miscellaneous unconditional instructions.
6156 ARCH(5);
6158 /* Unconditional instructions. */
6159 if (((insn >> 25) & 7) == 1) {
6160 /* NEON Data processing. */
6161 if (!arm_feature(env, ARM_FEATURE_NEON))
6162 goto illegal_op;
6164 if (disas_neon_data_insn(env, s, insn))
6165 goto illegal_op;
6166 return;
6168 if ((insn & 0x0f100000) == 0x04000000) {
6169 /* NEON load/store. */
6170 if (!arm_feature(env, ARM_FEATURE_NEON))
6171 goto illegal_op;
6173 if (disas_neon_ls_insn(env, s, insn))
6174 goto illegal_op;
6175 return;
6177 if (((insn & 0x0f30f000) == 0x0510f000) ||
6178 ((insn & 0x0f30f010) == 0x0710f000)) {
6179 if ((insn & (1 << 22)) == 0) {
6180 /* PLDW; v7MP */
6181 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6182 goto illegal_op;
6185 /* Otherwise PLD; v5TE+ */
6186 ARCH(5TE);
6187 return;
6189 if (((insn & 0x0f70f000) == 0x0450f000) ||
6190 ((insn & 0x0f70f010) == 0x0650f000)) {
6191 ARCH(7);
6192 return; /* PLI; V7 */
6194 if (((insn & 0x0f700000) == 0x04100000) ||
6195 ((insn & 0x0f700010) == 0x06100000)) {
6196 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6197 goto illegal_op;
6199 return; /* v7MP: Unallocated memory hint: must NOP */
6202 if ((insn & 0x0ffffdff) == 0x01010000) {
6203 ARCH(6);
6204 /* setend */
6205 if (insn & (1 << 9)) {
6206 /* BE8 mode not implemented. */
6207 goto illegal_op;
6209 return;
6210 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6211 switch ((insn >> 4) & 0xf) {
6212 case 1: /* clrex */
6213 ARCH(6K);
6214 gen_clrex(s);
6215 return;
6216 case 4: /* dsb */
6217 case 5: /* dmb */
6218 case 6: /* isb */
6219 ARCH(7);
6220 /* We don't emulate caches so these are a no-op. */
6221 return;
6222 default:
6223 goto illegal_op;
6225 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6226 /* srs */
6227 int32_t offset;
6228 if (IS_USER(s))
6229 goto illegal_op;
6230 ARCH(6);
6231 op1 = (insn & 0x1f);
6232 addr = tcg_temp_new_i32();
6233 tmp = tcg_const_i32(op1);
6234 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6235 tcg_temp_free_i32(tmp);
6236 i = (insn >> 23) & 3;
6237 switch (i) {
6238 case 0: offset = -4; break; /* DA */
6239 case 1: offset = 0; break; /* IA */
6240 case 2: offset = -8; break; /* DB */
6241 case 3: offset = 4; break; /* IB */
6242 default: abort();
6244 if (offset)
6245 tcg_gen_addi_i32(addr, addr, offset);
6246 tmp = load_reg(s, 14);
6247 gen_st32(tmp, addr, 0);
6248 tmp = load_cpu_field(spsr);
6249 tcg_gen_addi_i32(addr, addr, 4);
6250 gen_st32(tmp, addr, 0);
6251 if (insn & (1 << 21)) {
6252 /* Base writeback. */
6253 switch (i) {
6254 case 0: offset = -8; break;
6255 case 1: offset = 4; break;
6256 case 2: offset = -4; break;
6257 case 3: offset = 0; break;
6258 default: abort();
6260 if (offset)
6261 tcg_gen_addi_i32(addr, addr, offset);
6262 tmp = tcg_const_i32(op1);
6263 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6264 tcg_temp_free_i32(tmp);
6265 tcg_temp_free_i32(addr);
6266 } else {
6267 tcg_temp_free_i32(addr);
6269 return;
6270 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6271 /* rfe */
6272 int32_t offset;
6273 if (IS_USER(s))
6274 goto illegal_op;
6275 ARCH(6);
6276 rn = (insn >> 16) & 0xf;
6277 addr = load_reg(s, rn);
6278 i = (insn >> 23) & 3;
6279 switch (i) {
6280 case 0: offset = -4; break; /* DA */
6281 case 1: offset = 0; break; /* IA */
6282 case 2: offset = -8; break; /* DB */
6283 case 3: offset = 4; break; /* IB */
6284 default: abort();
6286 if (offset)
6287 tcg_gen_addi_i32(addr, addr, offset);
6288 /* Load PC into tmp and CPSR into tmp2. */
6289 tmp = gen_ld32(addr, 0);
6290 tcg_gen_addi_i32(addr, addr, 4);
6291 tmp2 = gen_ld32(addr, 0);
6292 if (insn & (1 << 21)) {
6293 /* Base writeback. */
6294 switch (i) {
6295 case 0: offset = -8; break;
6296 case 1: offset = 4; break;
6297 case 2: offset = -4; break;
6298 case 3: offset = 0; break;
6299 default: abort();
6301 if (offset)
6302 tcg_gen_addi_i32(addr, addr, offset);
6303 store_reg(s, rn, addr);
6304 } else {
6305 tcg_temp_free_i32(addr);
6307 gen_rfe(s, tmp, tmp2);
6308 return;
6309 } else if ((insn & 0x0e000000) == 0x0a000000) {
6310 /* branch link and change to thumb (blx <offset>) */
6311 int32_t offset;
6313 val = (uint32_t)s->pc;
6314 tmp = tcg_temp_new_i32();
6315 tcg_gen_movi_i32(tmp, val);
6316 store_reg(s, 14, tmp);
6317 /* Sign-extend the 24-bit offset */
6318 offset = (((int32_t)insn) << 8) >> 8;
6319 /* offset * 4 + bit24 * 2 + (thumb bit) */
6320 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6321 /* pipeline offset */
6322 val += 4;
6323 /* protected by ARCH(5); above, near the start of uncond block */
6324 gen_bx_im(s, val);
6325 return;
6326 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6327 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6328 /* iWMMXt register transfer. */
6329 if (env->cp15.c15_cpar & (1 << 1))
6330 if (!disas_iwmmxt_insn(env, s, insn))
6331 return;
6333 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6334 /* Coprocessor double register transfer. */
6335 ARCH(5TE);
6336 } else if ((insn & 0x0f000010) == 0x0e000010) {
6337 /* Additional coprocessor register transfer. */
6338 } else if ((insn & 0x0ff10020) == 0x01000000) {
6339 uint32_t mask;
6340 uint32_t val;
6341 /* cps (privileged) */
6342 if (IS_USER(s))
6343 return;
6344 mask = val = 0;
6345 if (insn & (1 << 19)) {
6346 if (insn & (1 << 8))
6347 mask |= CPSR_A;
6348 if (insn & (1 << 7))
6349 mask |= CPSR_I;
6350 if (insn & (1 << 6))
6351 mask |= CPSR_F;
6352 if (insn & (1 << 18))
6353 val |= mask;
6355 if (insn & (1 << 17)) {
6356 mask |= CPSR_M;
6357 val |= (insn & 0x1f);
6359 if (mask) {
6360 gen_set_psr_im(s, mask, 0, val);
6362 return;
6364 goto illegal_op;
6366 if (cond != 0xe) {
6367 /* if not always execute, we generate a conditional jump to
6368 next instruction */
6369 s->condlabel = gen_new_label();
6370 gen_test_cc(cond ^ 1, s->condlabel);
6371 s->condjmp = 1;
6373 if ((insn & 0x0f900000) == 0x03000000) {
6374 if ((insn & (1 << 21)) == 0) {
6375 ARCH(6T2);
6376 rd = (insn >> 12) & 0xf;
6377 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6378 if ((insn & (1 << 22)) == 0) {
6379 /* MOVW */
6380 tmp = tcg_temp_new_i32();
6381 tcg_gen_movi_i32(tmp, val);
6382 } else {
6383 /* MOVT */
6384 tmp = load_reg(s, rd);
6385 tcg_gen_ext16u_i32(tmp, tmp);
6386 tcg_gen_ori_i32(tmp, tmp, val << 16);
6388 store_reg(s, rd, tmp);
6389 } else {
6390 if (((insn >> 12) & 0xf) != 0xf)
6391 goto illegal_op;
6392 if (((insn >> 16) & 0xf) == 0) {
6393 gen_nop_hint(s, insn & 0xff);
6394 } else {
6395 /* CPSR = immediate */
6396 val = insn & 0xff;
6397 shift = ((insn >> 8) & 0xf) * 2;
6398 if (shift)
6399 val = (val >> shift) | (val << (32 - shift));
6400 i = ((insn & (1 << 22)) != 0);
6401 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6402 goto illegal_op;
6405 } else if ((insn & 0x0f900000) == 0x01000000
6406 && (insn & 0x00000090) != 0x00000090) {
6407 /* miscellaneous instructions */
6408 op1 = (insn >> 21) & 3;
6409 sh = (insn >> 4) & 0xf;
6410 rm = insn & 0xf;
6411 switch (sh) {
6412 case 0x0: /* move program status register */
6413 if (op1 & 1) {
6414 /* PSR = reg */
6415 tmp = load_reg(s, rm);
6416 i = ((op1 & 2) != 0);
6417 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6418 goto illegal_op;
6419 } else {
6420 /* reg = PSR */
6421 rd = (insn >> 12) & 0xf;
6422 if (op1 & 2) {
6423 if (IS_USER(s))
6424 goto illegal_op;
6425 tmp = load_cpu_field(spsr);
6426 } else {
6427 tmp = tcg_temp_new_i32();
6428 gen_helper_cpsr_read(tmp);
6430 store_reg(s, rd, tmp);
6432 break;
6433 case 0x1:
6434 if (op1 == 1) {
6435 /* branch/exchange thumb (bx). */
6436 ARCH(4T);
6437 tmp = load_reg(s, rm);
6438 gen_bx(s, tmp);
6439 } else if (op1 == 3) {
6440 /* clz */
6441 ARCH(5);
6442 rd = (insn >> 12) & 0xf;
6443 tmp = load_reg(s, rm);
6444 gen_helper_clz(tmp, tmp);
6445 store_reg(s, rd, tmp);
6446 } else {
6447 goto illegal_op;
6449 break;
6450 case 0x2:
6451 if (op1 == 1) {
6452 ARCH(5J); /* bxj */
6453 /* Trivial implementation equivalent to bx. */
6454 tmp = load_reg(s, rm);
6455 gen_bx(s, tmp);
6456 } else {
6457 goto illegal_op;
6459 break;
6460 case 0x3:
6461 if (op1 != 1)
6462 goto illegal_op;
6464 ARCH(5);
6465 /* branch link/exchange thumb (blx) */
6466 tmp = load_reg(s, rm);
6467 tmp2 = tcg_temp_new_i32();
6468 tcg_gen_movi_i32(tmp2, s->pc);
6469 store_reg(s, 14, tmp2);
6470 gen_bx(s, tmp);
6471 break;
6472 case 0x5: /* saturating add/subtract */
6473 ARCH(5TE);
6474 rd = (insn >> 12) & 0xf;
6475 rn = (insn >> 16) & 0xf;
6476 tmp = load_reg(s, rm);
6477 tmp2 = load_reg(s, rn);
6478 if (op1 & 2)
6479 gen_helper_double_saturate(tmp2, tmp2);
6480 if (op1 & 1)
6481 gen_helper_sub_saturate(tmp, tmp, tmp2);
6482 else
6483 gen_helper_add_saturate(tmp, tmp, tmp2);
6484 tcg_temp_free_i32(tmp2);
6485 store_reg(s, rd, tmp);
6486 break;
6487 case 7:
6488 /* SMC instruction (op1 == 3)
6489 and undefined instructions (op1 == 0 || op1 == 2)
6490 will trap */
6491 if (op1 != 1) {
6492 goto illegal_op;
6494 /* bkpt */
6495 ARCH(5);
6496 gen_exception_insn(s, 4, EXCP_BKPT);
6497 break;
6498 case 0x8: /* signed multiply */
6499 case 0xa:
6500 case 0xc:
6501 case 0xe:
6502 ARCH(5TE);
6503 rs = (insn >> 8) & 0xf;
6504 rn = (insn >> 12) & 0xf;
6505 rd = (insn >> 16) & 0xf;
6506 if (op1 == 1) {
6507 /* (32 * 16) >> 16 */
6508 tmp = load_reg(s, rm);
6509 tmp2 = load_reg(s, rs);
6510 if (sh & 4)
6511 tcg_gen_sari_i32(tmp2, tmp2, 16);
6512 else
6513 gen_sxth(tmp2);
6514 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6515 tcg_gen_shri_i64(tmp64, tmp64, 16);
6516 tmp = tcg_temp_new_i32();
6517 tcg_gen_trunc_i64_i32(tmp, tmp64);
6518 tcg_temp_free_i64(tmp64);
6519 if ((sh & 2) == 0) {
6520 tmp2 = load_reg(s, rn);
6521 gen_helper_add_setq(tmp, tmp, tmp2);
6522 tcg_temp_free_i32(tmp2);
6524 store_reg(s, rd, tmp);
6525 } else {
6526 /* 16 * 16 */
6527 tmp = load_reg(s, rm);
6528 tmp2 = load_reg(s, rs);
6529 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6530 tcg_temp_free_i32(tmp2);
6531 if (op1 == 2) {
6532 tmp64 = tcg_temp_new_i64();
6533 tcg_gen_ext_i32_i64(tmp64, tmp);
6534 tcg_temp_free_i32(tmp);
6535 gen_addq(s, tmp64, rn, rd);
6536 gen_storeq_reg(s, rn, rd, tmp64);
6537 tcg_temp_free_i64(tmp64);
6538 } else {
6539 if (op1 == 0) {
6540 tmp2 = load_reg(s, rn);
6541 gen_helper_add_setq(tmp, tmp, tmp2);
6542 tcg_temp_free_i32(tmp2);
6544 store_reg(s, rd, tmp);
6547 break;
6548 default:
6549 goto illegal_op;
6551 } else if (((insn & 0x0e000000) == 0 &&
6552 (insn & 0x00000090) != 0x90) ||
6553 ((insn & 0x0e000000) == (1 << 25))) {
6554 int set_cc, logic_cc, shiftop;
6556 op1 = (insn >> 21) & 0xf;
6557 set_cc = (insn >> 20) & 1;
6558 logic_cc = table_logic_cc[op1] & set_cc;
6560 /* data processing instruction */
6561 if (insn & (1 << 25)) {
6562 /* immediate operand */
6563 val = insn & 0xff;
6564 shift = ((insn >> 8) & 0xf) * 2;
6565 if (shift) {
6566 val = (val >> shift) | (val << (32 - shift));
6568 tmp2 = tcg_temp_new_i32();
6569 tcg_gen_movi_i32(tmp2, val);
6570 if (logic_cc && shift) {
6571 gen_set_CF_bit31(tmp2);
6573 } else {
6574 /* register */
6575 rm = (insn) & 0xf;
6576 tmp2 = load_reg(s, rm);
6577 shiftop = (insn >> 5) & 3;
6578 if (!(insn & (1 << 4))) {
6579 shift = (insn >> 7) & 0x1f;
6580 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6581 } else {
6582 rs = (insn >> 8) & 0xf;
6583 tmp = load_reg(s, rs);
6584 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6587 if (op1 != 0x0f && op1 != 0x0d) {
6588 rn = (insn >> 16) & 0xf;
6589 tmp = load_reg(s, rn);
6590 } else {
6591 TCGV_UNUSED(tmp);
6593 rd = (insn >> 12) & 0xf;
6594 switch(op1) {
6595 case 0x00:
6596 tcg_gen_and_i32(tmp, tmp, tmp2);
6597 if (logic_cc) {
6598 gen_logic_CC(tmp);
6600 store_reg_bx(env, s, rd, tmp);
6601 break;
6602 case 0x01:
6603 tcg_gen_xor_i32(tmp, tmp, tmp2);
6604 if (logic_cc) {
6605 gen_logic_CC(tmp);
6607 store_reg_bx(env, s, rd, tmp);
6608 break;
6609 case 0x02:
6610 if (set_cc && rd == 15) {
6611 /* SUBS r15, ... is used for exception return. */
6612 if (IS_USER(s)) {
6613 goto illegal_op;
6615 gen_helper_sub_cc(tmp, tmp, tmp2);
6616 gen_exception_return(s, tmp);
6617 } else {
6618 if (set_cc) {
6619 gen_helper_sub_cc(tmp, tmp, tmp2);
6620 } else {
6621 tcg_gen_sub_i32(tmp, tmp, tmp2);
6623 store_reg_bx(env, s, rd, tmp);
6625 break;
6626 case 0x03:
6627 if (set_cc) {
6628 gen_helper_sub_cc(tmp, tmp2, tmp);
6629 } else {
6630 tcg_gen_sub_i32(tmp, tmp2, tmp);
6632 store_reg_bx(env, s, rd, tmp);
6633 break;
6634 case 0x04:
6635 if (set_cc) {
6636 gen_helper_add_cc(tmp, tmp, tmp2);
6637 } else {
6638 tcg_gen_add_i32(tmp, tmp, tmp2);
6640 store_reg_bx(env, s, rd, tmp);
6641 break;
6642 case 0x05:
6643 if (set_cc) {
6644 gen_helper_adc_cc(tmp, tmp, tmp2);
6645 } else {
6646 gen_add_carry(tmp, tmp, tmp2);
6648 store_reg_bx(env, s, rd, tmp);
6649 break;
6650 case 0x06:
6651 if (set_cc) {
6652 gen_helper_sbc_cc(tmp, tmp, tmp2);
6653 } else {
6654 gen_sub_carry(tmp, tmp, tmp2);
6656 store_reg_bx(env, s, rd, tmp);
6657 break;
6658 case 0x07:
6659 if (set_cc) {
6660 gen_helper_sbc_cc(tmp, tmp2, tmp);
6661 } else {
6662 gen_sub_carry(tmp, tmp2, tmp);
6664 store_reg_bx(env, s, rd, tmp);
6665 break;
6666 case 0x08:
6667 if (set_cc) {
6668 tcg_gen_and_i32(tmp, tmp, tmp2);
6669 gen_logic_CC(tmp);
6671 tcg_temp_free_i32(tmp);
6672 break;
6673 case 0x09:
6674 if (set_cc) {
6675 tcg_gen_xor_i32(tmp, tmp, tmp2);
6676 gen_logic_CC(tmp);
6678 tcg_temp_free_i32(tmp);
6679 break;
6680 case 0x0a:
6681 if (set_cc) {
6682 gen_helper_sub_cc(tmp, tmp, tmp2);
6684 tcg_temp_free_i32(tmp);
6685 break;
6686 case 0x0b:
6687 if (set_cc) {
6688 gen_helper_add_cc(tmp, tmp, tmp2);
6690 tcg_temp_free_i32(tmp);
6691 break;
6692 case 0x0c:
6693 tcg_gen_or_i32(tmp, tmp, tmp2);
6694 if (logic_cc) {
6695 gen_logic_CC(tmp);
6697 store_reg_bx(env, s, rd, tmp);
6698 break;
6699 case 0x0d:
6700 if (logic_cc && rd == 15) {
6701 /* MOVS r15, ... is used for exception return. */
6702 if (IS_USER(s)) {
6703 goto illegal_op;
6705 gen_exception_return(s, tmp2);
6706 } else {
6707 if (logic_cc) {
6708 gen_logic_CC(tmp2);
6710 store_reg_bx(env, s, rd, tmp2);
6712 break;
6713 case 0x0e:
6714 tcg_gen_andc_i32(tmp, tmp, tmp2);
6715 if (logic_cc) {
6716 gen_logic_CC(tmp);
6718 store_reg_bx(env, s, rd, tmp);
6719 break;
6720 default:
6721 case 0x0f:
6722 tcg_gen_not_i32(tmp2, tmp2);
6723 if (logic_cc) {
6724 gen_logic_CC(tmp2);
6726 store_reg_bx(env, s, rd, tmp2);
6727 break;
6729 if (op1 != 0x0f && op1 != 0x0d) {
6730 tcg_temp_free_i32(tmp2);
6732 } else {
6733 /* other instructions */
6734 op1 = (insn >> 24) & 0xf;
6735 switch(op1) {
6736 case 0x0:
6737 case 0x1:
6738 /* multiplies, extra load/stores */
6739 sh = (insn >> 5) & 3;
6740 if (sh == 0) {
6741 if (op1 == 0x0) {
6742 rd = (insn >> 16) & 0xf;
6743 rn = (insn >> 12) & 0xf;
6744 rs = (insn >> 8) & 0xf;
6745 rm = (insn) & 0xf;
6746 op1 = (insn >> 20) & 0xf;
6747 switch (op1) {
6748 case 0: case 1: case 2: case 3: case 6:
6749 /* 32 bit mul */
6750 tmp = load_reg(s, rs);
6751 tmp2 = load_reg(s, rm);
6752 tcg_gen_mul_i32(tmp, tmp, tmp2);
6753 tcg_temp_free_i32(tmp2);
6754 if (insn & (1 << 22)) {
6755 /* Subtract (mls) */
6756 ARCH(6T2);
6757 tmp2 = load_reg(s, rn);
6758 tcg_gen_sub_i32(tmp, tmp2, tmp);
6759 tcg_temp_free_i32(tmp2);
6760 } else if (insn & (1 << 21)) {
6761 /* Add */
6762 tmp2 = load_reg(s, rn);
6763 tcg_gen_add_i32(tmp, tmp, tmp2);
6764 tcg_temp_free_i32(tmp2);
6766 if (insn & (1 << 20))
6767 gen_logic_CC(tmp);
6768 store_reg(s, rd, tmp);
6769 break;
6770 case 4:
6771 /* 64 bit mul double accumulate (UMAAL) */
6772 ARCH(6);
6773 tmp = load_reg(s, rs);
6774 tmp2 = load_reg(s, rm);
6775 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6776 gen_addq_lo(s, tmp64, rn);
6777 gen_addq_lo(s, tmp64, rd);
6778 gen_storeq_reg(s, rn, rd, tmp64);
6779 tcg_temp_free_i64(tmp64);
6780 break;
6781 case 8: case 9: case 10: case 11:
6782 case 12: case 13: case 14: case 15:
6783 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6784 tmp = load_reg(s, rs);
6785 tmp2 = load_reg(s, rm);
6786 if (insn & (1 << 22)) {
6787 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6788 } else {
6789 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6791 if (insn & (1 << 21)) { /* mult accumulate */
6792 gen_addq(s, tmp64, rn, rd);
6794 if (insn & (1 << 20)) {
6795 gen_logicq_cc(tmp64);
6797 gen_storeq_reg(s, rn, rd, tmp64);
6798 tcg_temp_free_i64(tmp64);
6799 break;
6800 default:
6801 goto illegal_op;
6803 } else {
6804 rn = (insn >> 16) & 0xf;
6805 rd = (insn >> 12) & 0xf;
6806 if (insn & (1 << 23)) {
6807 /* load/store exclusive */
6808 op1 = (insn >> 21) & 0x3;
6809 if (op1)
6810 ARCH(6K);
6811 else
6812 ARCH(6);
6813 addr = tcg_temp_local_new_i32();
6814 load_reg_var(s, addr, rn);
6815 if (insn & (1 << 20)) {
6816 switch (op1) {
6817 case 0: /* ldrex */
6818 gen_load_exclusive(s, rd, 15, addr, 2);
6819 break;
6820 case 1: /* ldrexd */
6821 gen_load_exclusive(s, rd, rd + 1, addr, 3);
6822 break;
6823 case 2: /* ldrexb */
6824 gen_load_exclusive(s, rd, 15, addr, 0);
6825 break;
6826 case 3: /* ldrexh */
6827 gen_load_exclusive(s, rd, 15, addr, 1);
6828 break;
6829 default:
6830 abort();
6832 } else {
6833 rm = insn & 0xf;
6834 switch (op1) {
6835 case 0: /* strex */
6836 gen_store_exclusive(s, rd, rm, 15, addr, 2);
6837 break;
6838 case 1: /* strexd */
6839 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6840 break;
6841 case 2: /* strexb */
6842 gen_store_exclusive(s, rd, rm, 15, addr, 0);
6843 break;
6844 case 3: /* strexh */
6845 gen_store_exclusive(s, rd, rm, 15, addr, 1);
6846 break;
6847 default:
6848 abort();
6851 tcg_temp_free(addr);
6852 } else {
6853 /* SWP instruction */
6854 rm = (insn) & 0xf;
6856 /* ??? This is not really atomic. However we know
6857 we never have multiple CPUs running in parallel,
6858 so it is good enough. */
6859 addr = load_reg(s, rn);
6860 tmp = load_reg(s, rm);
6861 if (insn & (1 << 22)) {
6862 tmp2 = gen_ld8u(addr, IS_USER(s));
6863 gen_st8(tmp, addr, IS_USER(s));
6864 } else {
6865 tmp2 = gen_ld32(addr, IS_USER(s));
6866 gen_st32(tmp, addr, IS_USER(s));
6868 tcg_temp_free_i32(addr);
6869 store_reg(s, rd, tmp2);
6872 } else {
6873 int address_offset;
6874 int load;
6875 /* Misc load/store */
6876 rn = (insn >> 16) & 0xf;
6877 rd = (insn >> 12) & 0xf;
6878 addr = load_reg(s, rn);
6879 if (insn & (1 << 24))
6880 gen_add_datah_offset(s, insn, 0, addr);
6881 address_offset = 0;
6882 if (insn & (1 << 20)) {
6883 /* load */
6884 switch(sh) {
6885 case 1:
6886 tmp = gen_ld16u(addr, IS_USER(s));
6887 break;
6888 case 2:
6889 tmp = gen_ld8s(addr, IS_USER(s));
6890 break;
6891 default:
6892 case 3:
6893 tmp = gen_ld16s(addr, IS_USER(s));
6894 break;
6896 load = 1;
6897 } else if (sh & 2) {
6898 ARCH(5TE);
6899 /* doubleword */
6900 if (sh & 1) {
6901 /* store */
6902 tmp = load_reg(s, rd);
6903 gen_st32(tmp, addr, IS_USER(s));
6904 tcg_gen_addi_i32(addr, addr, 4);
6905 tmp = load_reg(s, rd + 1);
6906 gen_st32(tmp, addr, IS_USER(s));
6907 load = 0;
6908 } else {
6909 /* load */
6910 tmp = gen_ld32(addr, IS_USER(s));
6911 store_reg(s, rd, tmp);
6912 tcg_gen_addi_i32(addr, addr, 4);
6913 tmp = gen_ld32(addr, IS_USER(s));
6914 rd++;
6915 load = 1;
6917 address_offset = -4;
6918 } else {
6919 /* store */
6920 tmp = load_reg(s, rd);
6921 gen_st16(tmp, addr, IS_USER(s));
6922 load = 0;
6924 /* Perform base writeback before the loaded value to
6925 ensure correct behavior with overlapping index registers.
6926 ldrd with base writeback is is undefined if the
6927 destination and index registers overlap. */
6928 if (!(insn & (1 << 24))) {
6929 gen_add_datah_offset(s, insn, address_offset, addr);
6930 store_reg(s, rn, addr);
6931 } else if (insn & (1 << 21)) {
6932 if (address_offset)
6933 tcg_gen_addi_i32(addr, addr, address_offset);
6934 store_reg(s, rn, addr);
6935 } else {
6936 tcg_temp_free_i32(addr);
6938 if (load) {
6939 /* Complete the load. */
6940 store_reg(s, rd, tmp);
6943 break;
6944 case 0x4:
6945 case 0x5:
6946 goto do_ldst;
6947 case 0x6:
6948 case 0x7:
6949 if (insn & (1 << 4)) {
6950 ARCH(6);
6951 /* Armv6 Media instructions. */
6952 rm = insn & 0xf;
6953 rn = (insn >> 16) & 0xf;
6954 rd = (insn >> 12) & 0xf;
6955 rs = (insn >> 8) & 0xf;
6956 switch ((insn >> 23) & 3) {
6957 case 0: /* Parallel add/subtract. */
6958 op1 = (insn >> 20) & 7;
6959 tmp = load_reg(s, rn);
6960 tmp2 = load_reg(s, rm);
6961 sh = (insn >> 5) & 7;
6962 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6963 goto illegal_op;
6964 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6965 tcg_temp_free_i32(tmp2);
6966 store_reg(s, rd, tmp);
6967 break;
6968 case 1:
6969 if ((insn & 0x00700020) == 0) {
6970 /* Halfword pack. */
6971 tmp = load_reg(s, rn);
6972 tmp2 = load_reg(s, rm);
6973 shift = (insn >> 7) & 0x1f;
6974 if (insn & (1 << 6)) {
6975 /* pkhtb */
6976 if (shift == 0)
6977 shift = 31;
6978 tcg_gen_sari_i32(tmp2, tmp2, shift);
6979 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6980 tcg_gen_ext16u_i32(tmp2, tmp2);
6981 } else {
6982 /* pkhbt */
6983 if (shift)
6984 tcg_gen_shli_i32(tmp2, tmp2, shift);
6985 tcg_gen_ext16u_i32(tmp, tmp);
6986 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6988 tcg_gen_or_i32(tmp, tmp, tmp2);
6989 tcg_temp_free_i32(tmp2);
6990 store_reg(s, rd, tmp);
6991 } else if ((insn & 0x00200020) == 0x00200000) {
6992 /* [us]sat */
6993 tmp = load_reg(s, rm);
6994 shift = (insn >> 7) & 0x1f;
6995 if (insn & (1 << 6)) {
6996 if (shift == 0)
6997 shift = 31;
6998 tcg_gen_sari_i32(tmp, tmp, shift);
6999 } else {
7000 tcg_gen_shli_i32(tmp, tmp, shift);
7002 sh = (insn >> 16) & 0x1f;
7003 tmp2 = tcg_const_i32(sh);
7004 if (insn & (1 << 22))
7005 gen_helper_usat(tmp, tmp, tmp2);
7006 else
7007 gen_helper_ssat(tmp, tmp, tmp2);
7008 tcg_temp_free_i32(tmp2);
7009 store_reg(s, rd, tmp);
7010 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7011 /* [us]sat16 */
7012 tmp = load_reg(s, rm);
7013 sh = (insn >> 16) & 0x1f;
7014 tmp2 = tcg_const_i32(sh);
7015 if (insn & (1 << 22))
7016 gen_helper_usat16(tmp, tmp, tmp2);
7017 else
7018 gen_helper_ssat16(tmp, tmp, tmp2);
7019 tcg_temp_free_i32(tmp2);
7020 store_reg(s, rd, tmp);
7021 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7022 /* Select bytes. */
7023 tmp = load_reg(s, rn);
7024 tmp2 = load_reg(s, rm);
7025 tmp3 = tcg_temp_new_i32();
7026 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7027 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7028 tcg_temp_free_i32(tmp3);
7029 tcg_temp_free_i32(tmp2);
7030 store_reg(s, rd, tmp);
7031 } else if ((insn & 0x000003e0) == 0x00000060) {
7032 tmp = load_reg(s, rm);
7033 shift = (insn >> 10) & 3;
7034 /* ??? In many cases it's not neccessary to do a
7035 rotate, a shift is sufficient. */
7036 if (shift != 0)
7037 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7038 op1 = (insn >> 20) & 7;
7039 switch (op1) {
7040 case 0: gen_sxtb16(tmp); break;
7041 case 2: gen_sxtb(tmp); break;
7042 case 3: gen_sxth(tmp); break;
7043 case 4: gen_uxtb16(tmp); break;
7044 case 6: gen_uxtb(tmp); break;
7045 case 7: gen_uxth(tmp); break;
7046 default: goto illegal_op;
7048 if (rn != 15) {
7049 tmp2 = load_reg(s, rn);
7050 if ((op1 & 3) == 0) {
7051 gen_add16(tmp, tmp2);
7052 } else {
7053 tcg_gen_add_i32(tmp, tmp, tmp2);
7054 tcg_temp_free_i32(tmp2);
7057 store_reg(s, rd, tmp);
7058 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7059 /* rev */
7060 tmp = load_reg(s, rm);
7061 if (insn & (1 << 22)) {
7062 if (insn & (1 << 7)) {
7063 gen_revsh(tmp);
7064 } else {
7065 ARCH(6T2);
7066 gen_helper_rbit(tmp, tmp);
7068 } else {
7069 if (insn & (1 << 7))
7070 gen_rev16(tmp);
7071 else
7072 tcg_gen_bswap32_i32(tmp, tmp);
7074 store_reg(s, rd, tmp);
7075 } else {
7076 goto illegal_op;
7078 break;
7079 case 2: /* Multiplies (Type 3). */
7080 tmp = load_reg(s, rm);
7081 tmp2 = load_reg(s, rs);
7082 if (insn & (1 << 20)) {
7083 /* Signed multiply most significant [accumulate].
7084 (SMMUL, SMMLA, SMMLS) */
7085 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7087 if (rd != 15) {
7088 tmp = load_reg(s, rd);
7089 if (insn & (1 << 6)) {
7090 tmp64 = gen_subq_msw(tmp64, tmp);
7091 } else {
7092 tmp64 = gen_addq_msw(tmp64, tmp);
7095 if (insn & (1 << 5)) {
7096 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7098 tcg_gen_shri_i64(tmp64, tmp64, 32);
7099 tmp = tcg_temp_new_i32();
7100 tcg_gen_trunc_i64_i32(tmp, tmp64);
7101 tcg_temp_free_i64(tmp64);
7102 store_reg(s, rn, tmp);
7103 } else {
7104 if (insn & (1 << 5))
7105 gen_swap_half(tmp2);
7106 gen_smul_dual(tmp, tmp2);
7107 if (insn & (1 << 6)) {
7108 /* This subtraction cannot overflow. */
7109 tcg_gen_sub_i32(tmp, tmp, tmp2);
7110 } else {
7111 /* This addition cannot overflow 32 bits;
7112 * however it may overflow considered as a signed
7113 * operation, in which case we must set the Q flag.
7115 gen_helper_add_setq(tmp, tmp, tmp2);
7117 tcg_temp_free_i32(tmp2);
7118 if (insn & (1 << 22)) {
7119 /* smlald, smlsld */
7120 tmp64 = tcg_temp_new_i64();
7121 tcg_gen_ext_i32_i64(tmp64, tmp);
7122 tcg_temp_free_i32(tmp);
7123 gen_addq(s, tmp64, rd, rn);
7124 gen_storeq_reg(s, rd, rn, tmp64);
7125 tcg_temp_free_i64(tmp64);
7126 } else {
7127 /* smuad, smusd, smlad, smlsd */
7128 if (rd != 15)
7130 tmp2 = load_reg(s, rd);
7131 gen_helper_add_setq(tmp, tmp, tmp2);
7132 tcg_temp_free_i32(tmp2);
7134 store_reg(s, rn, tmp);
7137 break;
7138 case 3:
7139 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7140 switch (op1) {
7141 case 0: /* Unsigned sum of absolute differences. */
7142 ARCH(6);
7143 tmp = load_reg(s, rm);
7144 tmp2 = load_reg(s, rs);
7145 gen_helper_usad8(tmp, tmp, tmp2);
7146 tcg_temp_free_i32(tmp2);
7147 if (rd != 15) {
7148 tmp2 = load_reg(s, rd);
7149 tcg_gen_add_i32(tmp, tmp, tmp2);
7150 tcg_temp_free_i32(tmp2);
7152 store_reg(s, rn, tmp);
7153 break;
7154 case 0x20: case 0x24: case 0x28: case 0x2c:
7155 /* Bitfield insert/clear. */
7156 ARCH(6T2);
7157 shift = (insn >> 7) & 0x1f;
7158 i = (insn >> 16) & 0x1f;
7159 i = i + 1 - shift;
7160 if (rm == 15) {
7161 tmp = tcg_temp_new_i32();
7162 tcg_gen_movi_i32(tmp, 0);
7163 } else {
7164 tmp = load_reg(s, rm);
7166 if (i != 32) {
7167 tmp2 = load_reg(s, rd);
7168 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7169 tcg_temp_free_i32(tmp2);
7171 store_reg(s, rd, tmp);
7172 break;
7173 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7174 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7175 ARCH(6T2);
7176 tmp = load_reg(s, rm);
7177 shift = (insn >> 7) & 0x1f;
7178 i = ((insn >> 16) & 0x1f) + 1;
7179 if (shift + i > 32)
7180 goto illegal_op;
7181 if (i < 32) {
7182 if (op1 & 0x20) {
7183 gen_ubfx(tmp, shift, (1u << i) - 1);
7184 } else {
7185 gen_sbfx(tmp, shift, i);
7188 store_reg(s, rd, tmp);
7189 break;
7190 default:
7191 goto illegal_op;
7193 break;
7195 break;
7197 do_ldst:
7198 /* Check for undefined extension instructions
7199 * per the ARM Bible IE:
7200 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7202 sh = (0xf << 20) | (0xf << 4);
7203 if (op1 == 0x7 && ((insn & sh) == sh))
7205 goto illegal_op;
7207 /* load/store byte/word */
7208 rn = (insn >> 16) & 0xf;
7209 rd = (insn >> 12) & 0xf;
7210 tmp2 = load_reg(s, rn);
7211 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7212 if (insn & (1 << 24))
7213 gen_add_data_offset(s, insn, tmp2);
7214 if (insn & (1 << 20)) {
7215 /* load */
7216 if (insn & (1 << 22)) {
7217 tmp = gen_ld8u(tmp2, i);
7218 } else {
7219 tmp = gen_ld32(tmp2, i);
7221 } else {
7222 /* store */
7223 tmp = load_reg(s, rd);
7224 if (insn & (1 << 22))
7225 gen_st8(tmp, tmp2, i);
7226 else
7227 gen_st32(tmp, tmp2, i);
7229 if (!(insn & (1 << 24))) {
7230 gen_add_data_offset(s, insn, tmp2);
7231 store_reg(s, rn, tmp2);
7232 } else if (insn & (1 << 21)) {
7233 store_reg(s, rn, tmp2);
7234 } else {
7235 tcg_temp_free_i32(tmp2);
7237 if (insn & (1 << 20)) {
7238 /* Complete the load. */
7239 store_reg_from_load(env, s, rd, tmp);
7241 break;
7242 case 0x08:
7243 case 0x09:
7245 int j, n, user, loaded_base;
7246 TCGv loaded_var;
7247 /* load/store multiple words */
7248 /* XXX: store correct base if write back */
7249 user = 0;
7250 if (insn & (1 << 22)) {
7251 if (IS_USER(s))
7252 goto illegal_op; /* only usable in supervisor mode */
7254 if ((insn & (1 << 15)) == 0)
7255 user = 1;
7257 rn = (insn >> 16) & 0xf;
7258 addr = load_reg(s, rn);
7260 /* compute total size */
7261 loaded_base = 0;
7262 TCGV_UNUSED(loaded_var);
7263 n = 0;
7264 for(i=0;i<16;i++) {
7265 if (insn & (1 << i))
7266 n++;
7268 /* XXX: test invalid n == 0 case ? */
7269 if (insn & (1 << 23)) {
7270 if (insn & (1 << 24)) {
7271 /* pre increment */
7272 tcg_gen_addi_i32(addr, addr, 4);
7273 } else {
7274 /* post increment */
7276 } else {
7277 if (insn & (1 << 24)) {
7278 /* pre decrement */
7279 tcg_gen_addi_i32(addr, addr, -(n * 4));
7280 } else {
7281 /* post decrement */
7282 if (n != 1)
7283 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7286 j = 0;
7287 for(i=0;i<16;i++) {
7288 if (insn & (1 << i)) {
7289 if (insn & (1 << 20)) {
7290 /* load */
7291 tmp = gen_ld32(addr, IS_USER(s));
7292 if (user) {
7293 tmp2 = tcg_const_i32(i);
7294 gen_helper_set_user_reg(tmp2, tmp);
7295 tcg_temp_free_i32(tmp2);
7296 tcg_temp_free_i32(tmp);
7297 } else if (i == rn) {
7298 loaded_var = tmp;
7299 loaded_base = 1;
7300 } else {
7301 store_reg_from_load(env, s, i, tmp);
7303 } else {
7304 /* store */
7305 if (i == 15) {
7306 /* special case: r15 = PC + 8 */
7307 val = (long)s->pc + 4;
7308 tmp = tcg_temp_new_i32();
7309 tcg_gen_movi_i32(tmp, val);
7310 } else if (user) {
7311 tmp = tcg_temp_new_i32();
7312 tmp2 = tcg_const_i32(i);
7313 gen_helper_get_user_reg(tmp, tmp2);
7314 tcg_temp_free_i32(tmp2);
7315 } else {
7316 tmp = load_reg(s, i);
7318 gen_st32(tmp, addr, IS_USER(s));
7320 j++;
7321 /* no need to add after the last transfer */
7322 if (j != n)
7323 tcg_gen_addi_i32(addr, addr, 4);
7326 if (insn & (1 << 21)) {
7327 /* write back */
7328 if (insn & (1 << 23)) {
7329 if (insn & (1 << 24)) {
7330 /* pre increment */
7331 } else {
7332 /* post increment */
7333 tcg_gen_addi_i32(addr, addr, 4);
7335 } else {
7336 if (insn & (1 << 24)) {
7337 /* pre decrement */
7338 if (n != 1)
7339 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7340 } else {
7341 /* post decrement */
7342 tcg_gen_addi_i32(addr, addr, -(n * 4));
7345 store_reg(s, rn, addr);
7346 } else {
7347 tcg_temp_free_i32(addr);
7349 if (loaded_base) {
7350 store_reg(s, rn, loaded_var);
7352 if ((insn & (1 << 22)) && !user) {
7353 /* Restore CPSR from SPSR. */
7354 tmp = load_cpu_field(spsr);
7355 gen_set_cpsr(tmp, 0xffffffff);
7356 tcg_temp_free_i32(tmp);
7357 s->is_jmp = DISAS_UPDATE;
7360 break;
7361 case 0xa:
7362 case 0xb:
7364 int32_t offset;
7366 /* branch (and link) */
7367 val = (int32_t)s->pc;
7368 if (insn & (1 << 24)) {
7369 tmp = tcg_temp_new_i32();
7370 tcg_gen_movi_i32(tmp, val);
7371 store_reg(s, 14, tmp);
7373 offset = (((int32_t)insn << 8) >> 8);
7374 val += (offset << 2) + 4;
7375 gen_jmp(s, val);
7377 break;
7378 case 0xc:
7379 case 0xd:
7380 case 0xe:
7381 /* Coprocessor. */
7382 if (disas_coproc_insn(env, s, insn))
7383 goto illegal_op;
7384 break;
7385 case 0xf:
7386 /* swi */
7387 gen_set_pc_im(s->pc);
7388 s->is_jmp = DISAS_SWI;
7389 break;
7390 default:
7391 illegal_op:
7392 gen_exception_insn(s, 4, EXCP_UDEF);
7393 break;
7398 /* Return true if this is a Thumb-2 logical op. */
7399 static int
7400 thumb2_logic_op(int op)
7402 return (op < 8);
7405 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7406 then set condition code flags based on the result of the operation.
7407 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7408 to the high bit of T1.
7409 Returns zero if the opcode is valid. */
7411 static int
7412 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7414 int logic_cc;
7416 logic_cc = 0;
7417 switch (op) {
7418 case 0: /* and */
7419 tcg_gen_and_i32(t0, t0, t1);
7420 logic_cc = conds;
7421 break;
7422 case 1: /* bic */
7423 tcg_gen_andc_i32(t0, t0, t1);
7424 logic_cc = conds;
7425 break;
7426 case 2: /* orr */
7427 tcg_gen_or_i32(t0, t0, t1);
7428 logic_cc = conds;
7429 break;
7430 case 3: /* orn */
7431 tcg_gen_orc_i32(t0, t0, t1);
7432 logic_cc = conds;
7433 break;
7434 case 4: /* eor */
7435 tcg_gen_xor_i32(t0, t0, t1);
7436 logic_cc = conds;
7437 break;
7438 case 8: /* add */
7439 if (conds)
7440 gen_helper_add_cc(t0, t0, t1);
7441 else
7442 tcg_gen_add_i32(t0, t0, t1);
7443 break;
7444 case 10: /* adc */
7445 if (conds)
7446 gen_helper_adc_cc(t0, t0, t1);
7447 else
7448 gen_adc(t0, t1);
7449 break;
7450 case 11: /* sbc */
7451 if (conds)
7452 gen_helper_sbc_cc(t0, t0, t1);
7453 else
7454 gen_sub_carry(t0, t0, t1);
7455 break;
7456 case 13: /* sub */
7457 if (conds)
7458 gen_helper_sub_cc(t0, t0, t1);
7459 else
7460 tcg_gen_sub_i32(t0, t0, t1);
7461 break;
7462 case 14: /* rsb */
7463 if (conds)
7464 gen_helper_sub_cc(t0, t1, t0);
7465 else
7466 tcg_gen_sub_i32(t0, t1, t0);
7467 break;
7468 default: /* 5, 6, 7, 9, 12, 15. */
7469 return 1;
7471 if (logic_cc) {
7472 gen_logic_CC(t0);
7473 if (shifter_out)
7474 gen_set_CF_bit31(t1);
7476 return 0;
7479 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7480 is not legal. */
7481 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7483 uint32_t insn, imm, shift, offset;
7484 uint32_t rd, rn, rm, rs;
7485 TCGv tmp;
7486 TCGv tmp2;
7487 TCGv tmp3;
7488 TCGv addr;
7489 TCGv_i64 tmp64;
7490 int op;
7491 int shiftop;
7492 int conds;
7493 int logic_cc;
7495 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7496 || arm_feature (env, ARM_FEATURE_M))) {
7497 /* Thumb-1 cores may need to treat bl and blx as a pair of
7498 16-bit instructions to get correct prefetch abort behavior. */
7499 insn = insn_hw1;
7500 if ((insn & (1 << 12)) == 0) {
7501 ARCH(5);
7502 /* Second half of blx. */
7503 offset = ((insn & 0x7ff) << 1);
7504 tmp = load_reg(s, 14);
7505 tcg_gen_addi_i32(tmp, tmp, offset);
7506 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7508 tmp2 = tcg_temp_new_i32();
7509 tcg_gen_movi_i32(tmp2, s->pc | 1);
7510 store_reg(s, 14, tmp2);
7511 gen_bx(s, tmp);
7512 return 0;
7514 if (insn & (1 << 11)) {
7515 /* Second half of bl. */
7516 offset = ((insn & 0x7ff) << 1) | 1;
7517 tmp = load_reg(s, 14);
7518 tcg_gen_addi_i32(tmp, tmp, offset);
7520 tmp2 = tcg_temp_new_i32();
7521 tcg_gen_movi_i32(tmp2, s->pc | 1);
7522 store_reg(s, 14, tmp2);
7523 gen_bx(s, tmp);
7524 return 0;
7526 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7527 /* Instruction spans a page boundary. Implement it as two
7528 16-bit instructions in case the second half causes an
7529 prefetch abort. */
7530 offset = ((int32_t)insn << 21) >> 9;
7531 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7532 return 0;
7534 /* Fall through to 32-bit decode. */
7537 insn = lduw_code(s->pc);
7538 s->pc += 2;
7539 insn |= (uint32_t)insn_hw1 << 16;
7541 if ((insn & 0xf800e800) != 0xf000e800) {
7542 ARCH(6T2);
7545 rn = (insn >> 16) & 0xf;
7546 rs = (insn >> 12) & 0xf;
7547 rd = (insn >> 8) & 0xf;
7548 rm = insn & 0xf;
7549 switch ((insn >> 25) & 0xf) {
7550 case 0: case 1: case 2: case 3:
7551 /* 16-bit instructions. Should never happen. */
7552 abort();
7553 case 4:
7554 if (insn & (1 << 22)) {
7555 /* Other load/store, table branch. */
7556 if (insn & 0x01200000) {
7557 /* Load/store doubleword. */
7558 if (rn == 15) {
7559 addr = tcg_temp_new_i32();
7560 tcg_gen_movi_i32(addr, s->pc & ~3);
7561 } else {
7562 addr = load_reg(s, rn);
7564 offset = (insn & 0xff) * 4;
7565 if ((insn & (1 << 23)) == 0)
7566 offset = -offset;
7567 if (insn & (1 << 24)) {
7568 tcg_gen_addi_i32(addr, addr, offset);
7569 offset = 0;
7571 if (insn & (1 << 20)) {
7572 /* ldrd */
7573 tmp = gen_ld32(addr, IS_USER(s));
7574 store_reg(s, rs, tmp);
7575 tcg_gen_addi_i32(addr, addr, 4);
7576 tmp = gen_ld32(addr, IS_USER(s));
7577 store_reg(s, rd, tmp);
7578 } else {
7579 /* strd */
7580 tmp = load_reg(s, rs);
7581 gen_st32(tmp, addr, IS_USER(s));
7582 tcg_gen_addi_i32(addr, addr, 4);
7583 tmp = load_reg(s, rd);
7584 gen_st32(tmp, addr, IS_USER(s));
7586 if (insn & (1 << 21)) {
7587 /* Base writeback. */
7588 if (rn == 15)
7589 goto illegal_op;
7590 tcg_gen_addi_i32(addr, addr, offset - 4);
7591 store_reg(s, rn, addr);
7592 } else {
7593 tcg_temp_free_i32(addr);
7595 } else if ((insn & (1 << 23)) == 0) {
7596 /* Load/store exclusive word. */
7597 addr = tcg_temp_local_new();
7598 load_reg_var(s, addr, rn);
7599 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7600 if (insn & (1 << 20)) {
7601 gen_load_exclusive(s, rs, 15, addr, 2);
7602 } else {
7603 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7605 tcg_temp_free(addr);
7606 } else if ((insn & (1 << 6)) == 0) {
7607 /* Table Branch. */
7608 if (rn == 15) {
7609 addr = tcg_temp_new_i32();
7610 tcg_gen_movi_i32(addr, s->pc);
7611 } else {
7612 addr = load_reg(s, rn);
7614 tmp = load_reg(s, rm);
7615 tcg_gen_add_i32(addr, addr, tmp);
7616 if (insn & (1 << 4)) {
7617 /* tbh */
7618 tcg_gen_add_i32(addr, addr, tmp);
7619 tcg_temp_free_i32(tmp);
7620 tmp = gen_ld16u(addr, IS_USER(s));
7621 } else { /* tbb */
7622 tcg_temp_free_i32(tmp);
7623 tmp = gen_ld8u(addr, IS_USER(s));
7625 tcg_temp_free_i32(addr);
7626 tcg_gen_shli_i32(tmp, tmp, 1);
7627 tcg_gen_addi_i32(tmp, tmp, s->pc);
7628 store_reg(s, 15, tmp);
7629 } else {
7630 /* Load/store exclusive byte/halfword/doubleword. */
7631 ARCH(7);
7632 op = (insn >> 4) & 0x3;
7633 if (op == 2) {
7634 goto illegal_op;
7636 addr = tcg_temp_local_new();
7637 load_reg_var(s, addr, rn);
7638 if (insn & (1 << 20)) {
7639 gen_load_exclusive(s, rs, rd, addr, op);
7640 } else {
7641 gen_store_exclusive(s, rm, rs, rd, addr, op);
7643 tcg_temp_free(addr);
7645 } else {
7646 /* Load/store multiple, RFE, SRS. */
7647 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7648 /* Not available in user mode. */
7649 if (IS_USER(s))
7650 goto illegal_op;
7651 if (insn & (1 << 20)) {
7652 /* rfe */
7653 addr = load_reg(s, rn);
7654 if ((insn & (1 << 24)) == 0)
7655 tcg_gen_addi_i32(addr, addr, -8);
7656 /* Load PC into tmp and CPSR into tmp2. */
7657 tmp = gen_ld32(addr, 0);
7658 tcg_gen_addi_i32(addr, addr, 4);
7659 tmp2 = gen_ld32(addr, 0);
7660 if (insn & (1 << 21)) {
7661 /* Base writeback. */
7662 if (insn & (1 << 24)) {
7663 tcg_gen_addi_i32(addr, addr, 4);
7664 } else {
7665 tcg_gen_addi_i32(addr, addr, -4);
7667 store_reg(s, rn, addr);
7668 } else {
7669 tcg_temp_free_i32(addr);
7671 gen_rfe(s, tmp, tmp2);
7672 } else {
7673 /* srs */
7674 op = (insn & 0x1f);
7675 addr = tcg_temp_new_i32();
7676 tmp = tcg_const_i32(op);
7677 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7678 tcg_temp_free_i32(tmp);
7679 if ((insn & (1 << 24)) == 0) {
7680 tcg_gen_addi_i32(addr, addr, -8);
7682 tmp = load_reg(s, 14);
7683 gen_st32(tmp, addr, 0);
7684 tcg_gen_addi_i32(addr, addr, 4);
7685 tmp = tcg_temp_new_i32();
7686 gen_helper_cpsr_read(tmp);
7687 gen_st32(tmp, addr, 0);
7688 if (insn & (1 << 21)) {
7689 if ((insn & (1 << 24)) == 0) {
7690 tcg_gen_addi_i32(addr, addr, -4);
7691 } else {
7692 tcg_gen_addi_i32(addr, addr, 4);
7694 tmp = tcg_const_i32(op);
7695 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7696 tcg_temp_free_i32(tmp);
7697 } else {
7698 tcg_temp_free_i32(addr);
7701 } else {
7702 int i;
7703 /* Load/store multiple. */
7704 addr = load_reg(s, rn);
7705 offset = 0;
7706 for (i = 0; i < 16; i++) {
7707 if (insn & (1 << i))
7708 offset += 4;
7710 if (insn & (1 << 24)) {
7711 tcg_gen_addi_i32(addr, addr, -offset);
7714 for (i = 0; i < 16; i++) {
7715 if ((insn & (1 << i)) == 0)
7716 continue;
7717 if (insn & (1 << 20)) {
7718 /* Load. */
7719 tmp = gen_ld32(addr, IS_USER(s));
7720 if (i == 15) {
7721 gen_bx(s, tmp);
7722 } else {
7723 store_reg(s, i, tmp);
7725 } else {
7726 /* Store. */
7727 tmp = load_reg(s, i);
7728 gen_st32(tmp, addr, IS_USER(s));
7730 tcg_gen_addi_i32(addr, addr, 4);
7732 if (insn & (1 << 21)) {
7733 /* Base register writeback. */
7734 if (insn & (1 << 24)) {
7735 tcg_gen_addi_i32(addr, addr, -offset);
7737 /* Fault if writeback register is in register list. */
7738 if (insn & (1 << rn))
7739 goto illegal_op;
7740 store_reg(s, rn, addr);
7741 } else {
7742 tcg_temp_free_i32(addr);
7746 break;
7747 case 5:
7749 op = (insn >> 21) & 0xf;
7750 if (op == 6) {
7751 /* Halfword pack. */
7752 tmp = load_reg(s, rn);
7753 tmp2 = load_reg(s, rm);
7754 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7755 if (insn & (1 << 5)) {
7756 /* pkhtb */
7757 if (shift == 0)
7758 shift = 31;
7759 tcg_gen_sari_i32(tmp2, tmp2, shift);
7760 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7761 tcg_gen_ext16u_i32(tmp2, tmp2);
7762 } else {
7763 /* pkhbt */
7764 if (shift)
7765 tcg_gen_shli_i32(tmp2, tmp2, shift);
7766 tcg_gen_ext16u_i32(tmp, tmp);
7767 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7769 tcg_gen_or_i32(tmp, tmp, tmp2);
7770 tcg_temp_free_i32(tmp2);
7771 store_reg(s, rd, tmp);
7772 } else {
7773 /* Data processing register constant shift. */
7774 if (rn == 15) {
7775 tmp = tcg_temp_new_i32();
7776 tcg_gen_movi_i32(tmp, 0);
7777 } else {
7778 tmp = load_reg(s, rn);
7780 tmp2 = load_reg(s, rm);
7782 shiftop = (insn >> 4) & 3;
7783 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7784 conds = (insn & (1 << 20)) != 0;
7785 logic_cc = (conds && thumb2_logic_op(op));
7786 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7787 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7788 goto illegal_op;
7789 tcg_temp_free_i32(tmp2);
7790 if (rd != 15) {
7791 store_reg(s, rd, tmp);
7792 } else {
7793 tcg_temp_free_i32(tmp);
7796 break;
7797 case 13: /* Misc data processing. */
7798 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7799 if (op < 4 && (insn & 0xf000) != 0xf000)
7800 goto illegal_op;
7801 switch (op) {
7802 case 0: /* Register controlled shift. */
7803 tmp = load_reg(s, rn);
7804 tmp2 = load_reg(s, rm);
7805 if ((insn & 0x70) != 0)
7806 goto illegal_op;
7807 op = (insn >> 21) & 3;
7808 logic_cc = (insn & (1 << 20)) != 0;
7809 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7810 if (logic_cc)
7811 gen_logic_CC(tmp);
7812 store_reg_bx(env, s, rd, tmp);
7813 break;
7814 case 1: /* Sign/zero extend. */
7815 tmp = load_reg(s, rm);
7816 shift = (insn >> 4) & 3;
7817 /* ??? In many cases it's not neccessary to do a
7818 rotate, a shift is sufficient. */
7819 if (shift != 0)
7820 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7821 op = (insn >> 20) & 7;
7822 switch (op) {
7823 case 0: gen_sxth(tmp); break;
7824 case 1: gen_uxth(tmp); break;
7825 case 2: gen_sxtb16(tmp); break;
7826 case 3: gen_uxtb16(tmp); break;
7827 case 4: gen_sxtb(tmp); break;
7828 case 5: gen_uxtb(tmp); break;
7829 default: goto illegal_op;
7831 if (rn != 15) {
7832 tmp2 = load_reg(s, rn);
7833 if ((op >> 1) == 1) {
7834 gen_add16(tmp, tmp2);
7835 } else {
7836 tcg_gen_add_i32(tmp, tmp, tmp2);
7837 tcg_temp_free_i32(tmp2);
7840 store_reg(s, rd, tmp);
7841 break;
7842 case 2: /* SIMD add/subtract. */
7843 op = (insn >> 20) & 7;
7844 shift = (insn >> 4) & 7;
7845 if ((op & 3) == 3 || (shift & 3) == 3)
7846 goto illegal_op;
7847 tmp = load_reg(s, rn);
7848 tmp2 = load_reg(s, rm);
7849 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7850 tcg_temp_free_i32(tmp2);
7851 store_reg(s, rd, tmp);
7852 break;
7853 case 3: /* Other data processing. */
7854 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7855 if (op < 4) {
7856 /* Saturating add/subtract. */
7857 tmp = load_reg(s, rn);
7858 tmp2 = load_reg(s, rm);
7859 if (op & 1)
7860 gen_helper_double_saturate(tmp, tmp);
7861 if (op & 2)
7862 gen_helper_sub_saturate(tmp, tmp2, tmp);
7863 else
7864 gen_helper_add_saturate(tmp, tmp, tmp2);
7865 tcg_temp_free_i32(tmp2);
7866 } else {
7867 tmp = load_reg(s, rn);
7868 switch (op) {
7869 case 0x0a: /* rbit */
7870 gen_helper_rbit(tmp, tmp);
7871 break;
7872 case 0x08: /* rev */
7873 tcg_gen_bswap32_i32(tmp, tmp);
7874 break;
7875 case 0x09: /* rev16 */
7876 gen_rev16(tmp);
7877 break;
7878 case 0x0b: /* revsh */
7879 gen_revsh(tmp);
7880 break;
7881 case 0x10: /* sel */
7882 tmp2 = load_reg(s, rm);
7883 tmp3 = tcg_temp_new_i32();
7884 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7885 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7886 tcg_temp_free_i32(tmp3);
7887 tcg_temp_free_i32(tmp2);
7888 break;
7889 case 0x18: /* clz */
7890 gen_helper_clz(tmp, tmp);
7891 break;
7892 default:
7893 goto illegal_op;
7896 store_reg(s, rd, tmp);
7897 break;
7898 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7899 op = (insn >> 4) & 0xf;
7900 tmp = load_reg(s, rn);
7901 tmp2 = load_reg(s, rm);
7902 switch ((insn >> 20) & 7) {
7903 case 0: /* 32 x 32 -> 32 */
7904 tcg_gen_mul_i32(tmp, tmp, tmp2);
7905 tcg_temp_free_i32(tmp2);
7906 if (rs != 15) {
7907 tmp2 = load_reg(s, rs);
7908 if (op)
7909 tcg_gen_sub_i32(tmp, tmp2, tmp);
7910 else
7911 tcg_gen_add_i32(tmp, tmp, tmp2);
7912 tcg_temp_free_i32(tmp2);
7914 break;
7915 case 1: /* 16 x 16 -> 32 */
7916 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7917 tcg_temp_free_i32(tmp2);
7918 if (rs != 15) {
7919 tmp2 = load_reg(s, rs);
7920 gen_helper_add_setq(tmp, tmp, tmp2);
7921 tcg_temp_free_i32(tmp2);
7923 break;
7924 case 2: /* Dual multiply add. */
7925 case 4: /* Dual multiply subtract. */
7926 if (op)
7927 gen_swap_half(tmp2);
7928 gen_smul_dual(tmp, tmp2);
7929 if (insn & (1 << 22)) {
7930 /* This subtraction cannot overflow. */
7931 tcg_gen_sub_i32(tmp, tmp, tmp2);
7932 } else {
7933 /* This addition cannot overflow 32 bits;
7934 * however it may overflow considered as a signed
7935 * operation, in which case we must set the Q flag.
7937 gen_helper_add_setq(tmp, tmp, tmp2);
7939 tcg_temp_free_i32(tmp2);
7940 if (rs != 15)
7942 tmp2 = load_reg(s, rs);
7943 gen_helper_add_setq(tmp, tmp, tmp2);
7944 tcg_temp_free_i32(tmp2);
7946 break;
7947 case 3: /* 32 * 16 -> 32msb */
7948 if (op)
7949 tcg_gen_sari_i32(tmp2, tmp2, 16);
7950 else
7951 gen_sxth(tmp2);
7952 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7953 tcg_gen_shri_i64(tmp64, tmp64, 16);
7954 tmp = tcg_temp_new_i32();
7955 tcg_gen_trunc_i64_i32(tmp, tmp64);
7956 tcg_temp_free_i64(tmp64);
7957 if (rs != 15)
7959 tmp2 = load_reg(s, rs);
7960 gen_helper_add_setq(tmp, tmp, tmp2);
7961 tcg_temp_free_i32(tmp2);
7963 break;
7964 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7965 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7966 if (rs != 15) {
7967 tmp = load_reg(s, rs);
7968 if (insn & (1 << 20)) {
7969 tmp64 = gen_addq_msw(tmp64, tmp);
7970 } else {
7971 tmp64 = gen_subq_msw(tmp64, tmp);
7974 if (insn & (1 << 4)) {
7975 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7977 tcg_gen_shri_i64(tmp64, tmp64, 32);
7978 tmp = tcg_temp_new_i32();
7979 tcg_gen_trunc_i64_i32(tmp, tmp64);
7980 tcg_temp_free_i64(tmp64);
7981 break;
7982 case 7: /* Unsigned sum of absolute differences. */
7983 gen_helper_usad8(tmp, tmp, tmp2);
7984 tcg_temp_free_i32(tmp2);
7985 if (rs != 15) {
7986 tmp2 = load_reg(s, rs);
7987 tcg_gen_add_i32(tmp, tmp, tmp2);
7988 tcg_temp_free_i32(tmp2);
7990 break;
7992 store_reg(s, rd, tmp);
7993 break;
7994 case 6: case 7: /* 64-bit multiply, Divide. */
7995 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7996 tmp = load_reg(s, rn);
7997 tmp2 = load_reg(s, rm);
7998 if ((op & 0x50) == 0x10) {
7999 /* sdiv, udiv */
8000 if (!arm_feature(env, ARM_FEATURE_DIV))
8001 goto illegal_op;
8002 if (op & 0x20)
8003 gen_helper_udiv(tmp, tmp, tmp2);
8004 else
8005 gen_helper_sdiv(tmp, tmp, tmp2);
8006 tcg_temp_free_i32(tmp2);
8007 store_reg(s, rd, tmp);
8008 } else if ((op & 0xe) == 0xc) {
8009 /* Dual multiply accumulate long. */
8010 if (op & 1)
8011 gen_swap_half(tmp2);
8012 gen_smul_dual(tmp, tmp2);
8013 if (op & 0x10) {
8014 tcg_gen_sub_i32(tmp, tmp, tmp2);
8015 } else {
8016 tcg_gen_add_i32(tmp, tmp, tmp2);
8018 tcg_temp_free_i32(tmp2);
8019 /* BUGFIX */
8020 tmp64 = tcg_temp_new_i64();
8021 tcg_gen_ext_i32_i64(tmp64, tmp);
8022 tcg_temp_free_i32(tmp);
8023 gen_addq(s, tmp64, rs, rd);
8024 gen_storeq_reg(s, rs, rd, tmp64);
8025 tcg_temp_free_i64(tmp64);
8026 } else {
8027 if (op & 0x20) {
8028 /* Unsigned 64-bit multiply */
8029 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8030 } else {
8031 if (op & 8) {
8032 /* smlalxy */
8033 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8034 tcg_temp_free_i32(tmp2);
8035 tmp64 = tcg_temp_new_i64();
8036 tcg_gen_ext_i32_i64(tmp64, tmp);
8037 tcg_temp_free_i32(tmp);
8038 } else {
8039 /* Signed 64-bit multiply */
8040 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8043 if (op & 4) {
8044 /* umaal */
8045 gen_addq_lo(s, tmp64, rs);
8046 gen_addq_lo(s, tmp64, rd);
8047 } else if (op & 0x40) {
8048 /* 64-bit accumulate. */
8049 gen_addq(s, tmp64, rs, rd);
8051 gen_storeq_reg(s, rs, rd, tmp64);
8052 tcg_temp_free_i64(tmp64);
8054 break;
8056 break;
8057 case 6: case 7: case 14: case 15:
8058 /* Coprocessor. */
8059 if (((insn >> 24) & 3) == 3) {
8060 /* Translate into the equivalent ARM encoding. */
8061 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8062 if (disas_neon_data_insn(env, s, insn))
8063 goto illegal_op;
8064 } else {
8065 if (insn & (1 << 28))
8066 goto illegal_op;
8067 if (disas_coproc_insn (env, s, insn))
8068 goto illegal_op;
8070 break;
8071 case 8: case 9: case 10: case 11:
8072 if (insn & (1 << 15)) {
8073 /* Branches, misc control. */
8074 if (insn & 0x5000) {
8075 /* Unconditional branch. */
8076 /* signextend(hw1[10:0]) -> offset[:12]. */
8077 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8078 /* hw1[10:0] -> offset[11:1]. */
8079 offset |= (insn & 0x7ff) << 1;
8080 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8081 offset[24:22] already have the same value because of the
8082 sign extension above. */
8083 offset ^= ((~insn) & (1 << 13)) << 10;
8084 offset ^= ((~insn) & (1 << 11)) << 11;
8086 if (insn & (1 << 14)) {
8087 /* Branch and link. */
8088 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8091 offset += s->pc;
8092 if (insn & (1 << 12)) {
8093 /* b/bl */
8094 gen_jmp(s, offset);
8095 } else {
8096 /* blx */
8097 offset &= ~(uint32_t)2;
8098 /* thumb2 bx, no need to check */
8099 gen_bx_im(s, offset);
8101 } else if (((insn >> 23) & 7) == 7) {
8102 /* Misc control */
8103 if (insn & (1 << 13))
8104 goto illegal_op;
8106 if (insn & (1 << 26)) {
8107 /* Secure monitor call (v6Z) */
8108 goto illegal_op; /* not implemented. */
8109 } else {
8110 op = (insn >> 20) & 7;
8111 switch (op) {
8112 case 0: /* msr cpsr. */
8113 if (IS_M(env)) {
8114 tmp = load_reg(s, rn);
8115 addr = tcg_const_i32(insn & 0xff);
8116 gen_helper_v7m_msr(cpu_env, addr, tmp);
8117 tcg_temp_free_i32(addr);
8118 tcg_temp_free_i32(tmp);
8119 gen_lookup_tb(s);
8120 break;
8122 /* fall through */
8123 case 1: /* msr spsr. */
8124 if (IS_M(env))
8125 goto illegal_op;
8126 tmp = load_reg(s, rn);
8127 if (gen_set_psr(s,
8128 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8129 op == 1, tmp))
8130 goto illegal_op;
8131 break;
8132 case 2: /* cps, nop-hint. */
8133 if (((insn >> 8) & 7) == 0) {
8134 gen_nop_hint(s, insn & 0xff);
8136 /* Implemented as NOP in user mode. */
8137 if (IS_USER(s))
8138 break;
8139 offset = 0;
8140 imm = 0;
8141 if (insn & (1 << 10)) {
8142 if (insn & (1 << 7))
8143 offset |= CPSR_A;
8144 if (insn & (1 << 6))
8145 offset |= CPSR_I;
8146 if (insn & (1 << 5))
8147 offset |= CPSR_F;
8148 if (insn & (1 << 9))
8149 imm = CPSR_A | CPSR_I | CPSR_F;
8151 if (insn & (1 << 8)) {
8152 offset |= 0x1f;
8153 imm |= (insn & 0x1f);
8155 if (offset) {
8156 gen_set_psr_im(s, offset, 0, imm);
8158 break;
8159 case 3: /* Special control operations. */
8160 ARCH(7);
8161 op = (insn >> 4) & 0xf;
8162 switch (op) {
8163 case 2: /* clrex */
8164 gen_clrex(s);
8165 break;
8166 case 4: /* dsb */
8167 case 5: /* dmb */
8168 case 6: /* isb */
8169 /* These execute as NOPs. */
8170 break;
8171 default:
8172 goto illegal_op;
8174 break;
8175 case 4: /* bxj */
8176 /* Trivial implementation equivalent to bx. */
8177 tmp = load_reg(s, rn);
8178 gen_bx(s, tmp);
8179 break;
8180 case 5: /* Exception return. */
8181 if (IS_USER(s)) {
8182 goto illegal_op;
8184 if (rn != 14 || rd != 15) {
8185 goto illegal_op;
8187 tmp = load_reg(s, rn);
8188 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8189 gen_exception_return(s, tmp);
8190 break;
8191 case 6: /* mrs cpsr. */
8192 tmp = tcg_temp_new_i32();
8193 if (IS_M(env)) {
8194 addr = tcg_const_i32(insn & 0xff);
8195 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8196 tcg_temp_free_i32(addr);
8197 } else {
8198 gen_helper_cpsr_read(tmp);
8200 store_reg(s, rd, tmp);
8201 break;
8202 case 7: /* mrs spsr. */
8203 /* Not accessible in user mode. */
8204 if (IS_USER(s) || IS_M(env))
8205 goto illegal_op;
8206 tmp = load_cpu_field(spsr);
8207 store_reg(s, rd, tmp);
8208 break;
8211 } else {
8212 /* Conditional branch. */
8213 op = (insn >> 22) & 0xf;
8214 /* Generate a conditional jump to next instruction. */
8215 s->condlabel = gen_new_label();
8216 gen_test_cc(op ^ 1, s->condlabel);
8217 s->condjmp = 1;
8219 /* offset[11:1] = insn[10:0] */
8220 offset = (insn & 0x7ff) << 1;
8221 /* offset[17:12] = insn[21:16]. */
8222 offset |= (insn & 0x003f0000) >> 4;
8223 /* offset[31:20] = insn[26]. */
8224 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8225 /* offset[18] = insn[13]. */
8226 offset |= (insn & (1 << 13)) << 5;
8227 /* offset[19] = insn[11]. */
8228 offset |= (insn & (1 << 11)) << 8;
8230 /* jump to the offset */
8231 gen_jmp(s, s->pc + offset);
8233 } else {
8234 /* Data processing immediate. */
8235 if (insn & (1 << 25)) {
8236 if (insn & (1 << 24)) {
8237 if (insn & (1 << 20))
8238 goto illegal_op;
8239 /* Bitfield/Saturate. */
8240 op = (insn >> 21) & 7;
8241 imm = insn & 0x1f;
8242 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8243 if (rn == 15) {
8244 tmp = tcg_temp_new_i32();
8245 tcg_gen_movi_i32(tmp, 0);
8246 } else {
8247 tmp = load_reg(s, rn);
8249 switch (op) {
8250 case 2: /* Signed bitfield extract. */
8251 imm++;
8252 if (shift + imm > 32)
8253 goto illegal_op;
8254 if (imm < 32)
8255 gen_sbfx(tmp, shift, imm);
8256 break;
8257 case 6: /* Unsigned bitfield extract. */
8258 imm++;
8259 if (shift + imm > 32)
8260 goto illegal_op;
8261 if (imm < 32)
8262 gen_ubfx(tmp, shift, (1u << imm) - 1);
8263 break;
8264 case 3: /* Bitfield insert/clear. */
8265 if (imm < shift)
8266 goto illegal_op;
8267 imm = imm + 1 - shift;
8268 if (imm != 32) {
8269 tmp2 = load_reg(s, rd);
8270 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8271 tcg_temp_free_i32(tmp2);
8273 break;
8274 case 7:
8275 goto illegal_op;
8276 default: /* Saturate. */
8277 if (shift) {
8278 if (op & 1)
8279 tcg_gen_sari_i32(tmp, tmp, shift);
8280 else
8281 tcg_gen_shli_i32(tmp, tmp, shift);
8283 tmp2 = tcg_const_i32(imm);
8284 if (op & 4) {
8285 /* Unsigned. */
8286 if ((op & 1) && shift == 0)
8287 gen_helper_usat16(tmp, tmp, tmp2);
8288 else
8289 gen_helper_usat(tmp, tmp, tmp2);
8290 } else {
8291 /* Signed. */
8292 if ((op & 1) && shift == 0)
8293 gen_helper_ssat16(tmp, tmp, tmp2);
8294 else
8295 gen_helper_ssat(tmp, tmp, tmp2);
8297 tcg_temp_free_i32(tmp2);
8298 break;
8300 store_reg(s, rd, tmp);
8301 } else {
8302 imm = ((insn & 0x04000000) >> 15)
8303 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8304 if (insn & (1 << 22)) {
8305 /* 16-bit immediate. */
8306 imm |= (insn >> 4) & 0xf000;
8307 if (insn & (1 << 23)) {
8308 /* movt */
8309 tmp = load_reg(s, rd);
8310 tcg_gen_ext16u_i32(tmp, tmp);
8311 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8312 } else {
8313 /* movw */
8314 tmp = tcg_temp_new_i32();
8315 tcg_gen_movi_i32(tmp, imm);
8317 } else {
8318 /* Add/sub 12-bit immediate. */
8319 if (rn == 15) {
8320 offset = s->pc & ~(uint32_t)3;
8321 if (insn & (1 << 23))
8322 offset -= imm;
8323 else
8324 offset += imm;
8325 tmp = tcg_temp_new_i32();
8326 tcg_gen_movi_i32(tmp, offset);
8327 } else {
8328 tmp = load_reg(s, rn);
8329 if (insn & (1 << 23))
8330 tcg_gen_subi_i32(tmp, tmp, imm);
8331 else
8332 tcg_gen_addi_i32(tmp, tmp, imm);
8335 store_reg(s, rd, tmp);
8337 } else {
8338 int shifter_out = 0;
8339 /* modified 12-bit immediate. */
8340 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8341 imm = (insn & 0xff);
8342 switch (shift) {
8343 case 0: /* XY */
8344 /* Nothing to do. */
8345 break;
8346 case 1: /* 00XY00XY */
8347 imm |= imm << 16;
8348 break;
8349 case 2: /* XY00XY00 */
8350 imm |= imm << 16;
8351 imm <<= 8;
8352 break;
8353 case 3: /* XYXYXYXY */
8354 imm |= imm << 16;
8355 imm |= imm << 8;
8356 break;
8357 default: /* Rotated constant. */
8358 shift = (shift << 1) | (imm >> 7);
8359 imm |= 0x80;
8360 imm = imm << (32 - shift);
8361 shifter_out = 1;
8362 break;
8364 tmp2 = tcg_temp_new_i32();
8365 tcg_gen_movi_i32(tmp2, imm);
8366 rn = (insn >> 16) & 0xf;
8367 if (rn == 15) {
8368 tmp = tcg_temp_new_i32();
8369 tcg_gen_movi_i32(tmp, 0);
8370 } else {
8371 tmp = load_reg(s, rn);
8373 op = (insn >> 21) & 0xf;
8374 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8375 shifter_out, tmp, tmp2))
8376 goto illegal_op;
8377 tcg_temp_free_i32(tmp2);
8378 rd = (insn >> 8) & 0xf;
8379 if (rd != 15) {
8380 store_reg(s, rd, tmp);
8381 } else {
8382 tcg_temp_free_i32(tmp);
8386 break;
8387 case 12: /* Load/store single data item. */
8389 int postinc = 0;
8390 int writeback = 0;
8391 int user;
8392 if ((insn & 0x01100000) == 0x01000000) {
8393 if (disas_neon_ls_insn(env, s, insn))
8394 goto illegal_op;
8395 break;
8397 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8398 if (rs == 15) {
8399 if (!(insn & (1 << 20))) {
8400 goto illegal_op;
8402 if (op != 2) {
8403 /* Byte or halfword load space with dest == r15 : memory hints.
8404 * Catch them early so we don't emit pointless addressing code.
8405 * This space is a mix of:
8406 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8407 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8408 * cores)
8409 * unallocated hints, which must be treated as NOPs
8410 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8411 * which is easiest for the decoding logic
8412 * Some space which must UNDEF
8414 int op1 = (insn >> 23) & 3;
8415 int op2 = (insn >> 6) & 0x3f;
8416 if (op & 2) {
8417 goto illegal_op;
8419 if (rn == 15) {
8420 /* UNPREDICTABLE or unallocated hint */
8421 return 0;
8423 if (op1 & 1) {
8424 return 0; /* PLD* or unallocated hint */
8426 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8427 return 0; /* PLD* or unallocated hint */
8429 /* UNDEF space, or an UNPREDICTABLE */
8430 return 1;
8433 user = IS_USER(s);
8434 if (rn == 15) {
8435 addr = tcg_temp_new_i32();
8436 /* PC relative. */
8437 /* s->pc has already been incremented by 4. */
8438 imm = s->pc & 0xfffffffc;
8439 if (insn & (1 << 23))
8440 imm += insn & 0xfff;
8441 else
8442 imm -= insn & 0xfff;
8443 tcg_gen_movi_i32(addr, imm);
8444 } else {
8445 addr = load_reg(s, rn);
8446 if (insn & (1 << 23)) {
8447 /* Positive offset. */
8448 imm = insn & 0xfff;
8449 tcg_gen_addi_i32(addr, addr, imm);
8450 } else {
8451 imm = insn & 0xff;
8452 switch ((insn >> 8) & 0xf) {
8453 case 0x0: /* Shifted Register. */
8454 shift = (insn >> 4) & 0xf;
8455 if (shift > 3) {
8456 tcg_temp_free_i32(addr);
8457 goto illegal_op;
8459 tmp = load_reg(s, rm);
8460 if (shift)
8461 tcg_gen_shli_i32(tmp, tmp, shift);
8462 tcg_gen_add_i32(addr, addr, tmp);
8463 tcg_temp_free_i32(tmp);
8464 break;
8465 case 0xc: /* Negative offset. */
8466 tcg_gen_addi_i32(addr, addr, -imm);
8467 break;
8468 case 0xe: /* User privilege. */
8469 tcg_gen_addi_i32(addr, addr, imm);
8470 user = 1;
8471 break;
8472 case 0x9: /* Post-decrement. */
8473 imm = -imm;
8474 /* Fall through. */
8475 case 0xb: /* Post-increment. */
8476 postinc = 1;
8477 writeback = 1;
8478 break;
8479 case 0xd: /* Pre-decrement. */
8480 imm = -imm;
8481 /* Fall through. */
8482 case 0xf: /* Pre-increment. */
8483 tcg_gen_addi_i32(addr, addr, imm);
8484 writeback = 1;
8485 break;
8486 default:
8487 tcg_temp_free_i32(addr);
8488 goto illegal_op;
8492 if (insn & (1 << 20)) {
8493 /* Load. */
8494 switch (op) {
8495 case 0: tmp = gen_ld8u(addr, user); break;
8496 case 4: tmp = gen_ld8s(addr, user); break;
8497 case 1: tmp = gen_ld16u(addr, user); break;
8498 case 5: tmp = gen_ld16s(addr, user); break;
8499 case 2: tmp = gen_ld32(addr, user); break;
8500 default:
8501 tcg_temp_free_i32(addr);
8502 goto illegal_op;
8504 if (rs == 15) {
8505 gen_bx(s, tmp);
8506 } else {
8507 store_reg(s, rs, tmp);
8509 } else {
8510 /* Store. */
8511 tmp = load_reg(s, rs);
8512 switch (op) {
8513 case 0: gen_st8(tmp, addr, user); break;
8514 case 1: gen_st16(tmp, addr, user); break;
8515 case 2: gen_st32(tmp, addr, user); break;
8516 default:
8517 tcg_temp_free_i32(addr);
8518 goto illegal_op;
8521 if (postinc)
8522 tcg_gen_addi_i32(addr, addr, imm);
8523 if (writeback) {
8524 store_reg(s, rn, addr);
8525 } else {
8526 tcg_temp_free_i32(addr);
8529 break;
8530 default:
8531 goto illegal_op;
8533 return 0;
8534 illegal_op:
8535 return 1;
8538 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8540 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8541 int32_t offset;
8542 int i;
8543 TCGv tmp;
8544 TCGv tmp2;
8545 TCGv addr;
8547 if (s->condexec_mask) {
8548 cond = s->condexec_cond;
8549 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8550 s->condlabel = gen_new_label();
8551 gen_test_cc(cond ^ 1, s->condlabel);
8552 s->condjmp = 1;
8556 insn = lduw_code(s->pc);
8557 s->pc += 2;
8559 switch (insn >> 12) {
8560 case 0: case 1:
8562 rd = insn & 7;
8563 op = (insn >> 11) & 3;
8564 if (op == 3) {
8565 /* add/subtract */
8566 rn = (insn >> 3) & 7;
8567 tmp = load_reg(s, rn);
8568 if (insn & (1 << 10)) {
8569 /* immediate */
8570 tmp2 = tcg_temp_new_i32();
8571 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8572 } else {
8573 /* reg */
8574 rm = (insn >> 6) & 7;
8575 tmp2 = load_reg(s, rm);
8577 if (insn & (1 << 9)) {
8578 if (s->condexec_mask)
8579 tcg_gen_sub_i32(tmp, tmp, tmp2);
8580 else
8581 gen_helper_sub_cc(tmp, tmp, tmp2);
8582 } else {
8583 if (s->condexec_mask)
8584 tcg_gen_add_i32(tmp, tmp, tmp2);
8585 else
8586 gen_helper_add_cc(tmp, tmp, tmp2);
8588 tcg_temp_free_i32(tmp2);
8589 store_reg(s, rd, tmp);
8590 } else {
8591 /* shift immediate */
8592 rm = (insn >> 3) & 7;
8593 shift = (insn >> 6) & 0x1f;
8594 tmp = load_reg(s, rm);
8595 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8596 if (!s->condexec_mask)
8597 gen_logic_CC(tmp);
8598 store_reg(s, rd, tmp);
8600 break;
8601 case 2: case 3:
8602 /* arithmetic large immediate */
8603 op = (insn >> 11) & 3;
8604 rd = (insn >> 8) & 0x7;
8605 if (op == 0) { /* mov */
8606 tmp = tcg_temp_new_i32();
8607 tcg_gen_movi_i32(tmp, insn & 0xff);
8608 if (!s->condexec_mask)
8609 gen_logic_CC(tmp);
8610 store_reg(s, rd, tmp);
8611 } else {
8612 tmp = load_reg(s, rd);
8613 tmp2 = tcg_temp_new_i32();
8614 tcg_gen_movi_i32(tmp2, insn & 0xff);
8615 switch (op) {
8616 case 1: /* cmp */
8617 gen_helper_sub_cc(tmp, tmp, tmp2);
8618 tcg_temp_free_i32(tmp);
8619 tcg_temp_free_i32(tmp2);
8620 break;
8621 case 2: /* add */
8622 if (s->condexec_mask)
8623 tcg_gen_add_i32(tmp, tmp, tmp2);
8624 else
8625 gen_helper_add_cc(tmp, tmp, tmp2);
8626 tcg_temp_free_i32(tmp2);
8627 store_reg(s, rd, tmp);
8628 break;
8629 case 3: /* sub */
8630 if (s->condexec_mask)
8631 tcg_gen_sub_i32(tmp, tmp, tmp2);
8632 else
8633 gen_helper_sub_cc(tmp, tmp, tmp2);
8634 tcg_temp_free_i32(tmp2);
8635 store_reg(s, rd, tmp);
8636 break;
8639 break;
8640 case 4:
8641 if (insn & (1 << 11)) {
8642 rd = (insn >> 8) & 7;
8643 /* load pc-relative. Bit 1 of PC is ignored. */
8644 val = s->pc + 2 + ((insn & 0xff) * 4);
8645 val &= ~(uint32_t)2;
8646 addr = tcg_temp_new_i32();
8647 tcg_gen_movi_i32(addr, val);
8648 tmp = gen_ld32(addr, IS_USER(s));
8649 tcg_temp_free_i32(addr);
8650 store_reg(s, rd, tmp);
8651 break;
8653 if (insn & (1 << 10)) {
8654 /* data processing extended or blx */
8655 rd = (insn & 7) | ((insn >> 4) & 8);
8656 rm = (insn >> 3) & 0xf;
8657 op = (insn >> 8) & 3;
8658 switch (op) {
8659 case 0: /* add */
8660 tmp = load_reg(s, rd);
8661 tmp2 = load_reg(s, rm);
8662 tcg_gen_add_i32(tmp, tmp, tmp2);
8663 tcg_temp_free_i32(tmp2);
8664 store_reg(s, rd, tmp);
8665 break;
8666 case 1: /* cmp */
8667 tmp = load_reg(s, rd);
8668 tmp2 = load_reg(s, rm);
8669 gen_helper_sub_cc(tmp, tmp, tmp2);
8670 tcg_temp_free_i32(tmp2);
8671 tcg_temp_free_i32(tmp);
8672 break;
8673 case 2: /* mov/cpy */
8674 tmp = load_reg(s, rm);
8675 store_reg(s, rd, tmp);
8676 break;
8677 case 3:/* branch [and link] exchange thumb register */
8678 tmp = load_reg(s, rm);
8679 if (insn & (1 << 7)) {
8680 ARCH(5);
8681 val = (uint32_t)s->pc | 1;
8682 tmp2 = tcg_temp_new_i32();
8683 tcg_gen_movi_i32(tmp2, val);
8684 store_reg(s, 14, tmp2);
8686 /* already thumb, no need to check */
8687 gen_bx(s, tmp);
8688 break;
8690 break;
8693 /* data processing register */
8694 rd = insn & 7;
8695 rm = (insn >> 3) & 7;
8696 op = (insn >> 6) & 0xf;
8697 if (op == 2 || op == 3 || op == 4 || op == 7) {
8698 /* the shift/rotate ops want the operands backwards */
8699 val = rm;
8700 rm = rd;
8701 rd = val;
8702 val = 1;
8703 } else {
8704 val = 0;
8707 if (op == 9) { /* neg */
8708 tmp = tcg_temp_new_i32();
8709 tcg_gen_movi_i32(tmp, 0);
8710 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8711 tmp = load_reg(s, rd);
8712 } else {
8713 TCGV_UNUSED(tmp);
8716 tmp2 = load_reg(s, rm);
8717 switch (op) {
8718 case 0x0: /* and */
8719 tcg_gen_and_i32(tmp, tmp, tmp2);
8720 if (!s->condexec_mask)
8721 gen_logic_CC(tmp);
8722 break;
8723 case 0x1: /* eor */
8724 tcg_gen_xor_i32(tmp, tmp, tmp2);
8725 if (!s->condexec_mask)
8726 gen_logic_CC(tmp);
8727 break;
8728 case 0x2: /* lsl */
8729 if (s->condexec_mask) {
8730 gen_helper_shl(tmp2, tmp2, tmp);
8731 } else {
8732 gen_helper_shl_cc(tmp2, tmp2, tmp);
8733 gen_logic_CC(tmp2);
8735 break;
8736 case 0x3: /* lsr */
8737 if (s->condexec_mask) {
8738 gen_helper_shr(tmp2, tmp2, tmp);
8739 } else {
8740 gen_helper_shr_cc(tmp2, tmp2, tmp);
8741 gen_logic_CC(tmp2);
8743 break;
8744 case 0x4: /* asr */
8745 if (s->condexec_mask) {
8746 gen_helper_sar(tmp2, tmp2, tmp);
8747 } else {
8748 gen_helper_sar_cc(tmp2, tmp2, tmp);
8749 gen_logic_CC(tmp2);
8751 break;
8752 case 0x5: /* adc */
8753 if (s->condexec_mask)
8754 gen_adc(tmp, tmp2);
8755 else
8756 gen_helper_adc_cc(tmp, tmp, tmp2);
8757 break;
8758 case 0x6: /* sbc */
8759 if (s->condexec_mask)
8760 gen_sub_carry(tmp, tmp, tmp2);
8761 else
8762 gen_helper_sbc_cc(tmp, tmp, tmp2);
8763 break;
8764 case 0x7: /* ror */
8765 if (s->condexec_mask) {
8766 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8767 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8768 } else {
8769 gen_helper_ror_cc(tmp2, tmp2, tmp);
8770 gen_logic_CC(tmp2);
8772 break;
8773 case 0x8: /* tst */
8774 tcg_gen_and_i32(tmp, tmp, tmp2);
8775 gen_logic_CC(tmp);
8776 rd = 16;
8777 break;
8778 case 0x9: /* neg */
8779 if (s->condexec_mask)
8780 tcg_gen_neg_i32(tmp, tmp2);
8781 else
8782 gen_helper_sub_cc(tmp, tmp, tmp2);
8783 break;
8784 case 0xa: /* cmp */
8785 gen_helper_sub_cc(tmp, tmp, tmp2);
8786 rd = 16;
8787 break;
8788 case 0xb: /* cmn */
8789 gen_helper_add_cc(tmp, tmp, tmp2);
8790 rd = 16;
8791 break;
8792 case 0xc: /* orr */
8793 tcg_gen_or_i32(tmp, tmp, tmp2);
8794 if (!s->condexec_mask)
8795 gen_logic_CC(tmp);
8796 break;
8797 case 0xd: /* mul */
8798 tcg_gen_mul_i32(tmp, tmp, tmp2);
8799 if (!s->condexec_mask)
8800 gen_logic_CC(tmp);
8801 break;
8802 case 0xe: /* bic */
8803 tcg_gen_andc_i32(tmp, tmp, tmp2);
8804 if (!s->condexec_mask)
8805 gen_logic_CC(tmp);
8806 break;
8807 case 0xf: /* mvn */
8808 tcg_gen_not_i32(tmp2, tmp2);
8809 if (!s->condexec_mask)
8810 gen_logic_CC(tmp2);
8811 val = 1;
8812 rm = rd;
8813 break;
8815 if (rd != 16) {
8816 if (val) {
8817 store_reg(s, rm, tmp2);
8818 if (op != 0xf)
8819 tcg_temp_free_i32(tmp);
8820 } else {
8821 store_reg(s, rd, tmp);
8822 tcg_temp_free_i32(tmp2);
8824 } else {
8825 tcg_temp_free_i32(tmp);
8826 tcg_temp_free_i32(tmp2);
8828 break;
8830 case 5:
8831 /* load/store register offset. */
8832 rd = insn & 7;
8833 rn = (insn >> 3) & 7;
8834 rm = (insn >> 6) & 7;
8835 op = (insn >> 9) & 7;
8836 addr = load_reg(s, rn);
8837 tmp = load_reg(s, rm);
8838 tcg_gen_add_i32(addr, addr, tmp);
8839 tcg_temp_free_i32(tmp);
8841 if (op < 3) /* store */
8842 tmp = load_reg(s, rd);
8844 switch (op) {
8845 case 0: /* str */
8846 gen_st32(tmp, addr, IS_USER(s));
8847 break;
8848 case 1: /* strh */
8849 gen_st16(tmp, addr, IS_USER(s));
8850 break;
8851 case 2: /* strb */
8852 gen_st8(tmp, addr, IS_USER(s));
8853 break;
8854 case 3: /* ldrsb */
8855 tmp = gen_ld8s(addr, IS_USER(s));
8856 break;
8857 case 4: /* ldr */
8858 tmp = gen_ld32(addr, IS_USER(s));
8859 break;
8860 case 5: /* ldrh */
8861 tmp = gen_ld16u(addr, IS_USER(s));
8862 break;
8863 case 6: /* ldrb */
8864 tmp = gen_ld8u(addr, IS_USER(s));
8865 break;
8866 case 7: /* ldrsh */
8867 tmp = gen_ld16s(addr, IS_USER(s));
8868 break;
8870 if (op >= 3) /* load */
8871 store_reg(s, rd, tmp);
8872 tcg_temp_free_i32(addr);
8873 break;
8875 case 6:
8876 /* load/store word immediate offset */
8877 rd = insn & 7;
8878 rn = (insn >> 3) & 7;
8879 addr = load_reg(s, rn);
8880 val = (insn >> 4) & 0x7c;
8881 tcg_gen_addi_i32(addr, addr, val);
8883 if (insn & (1 << 11)) {
8884 /* load */
8885 tmp = gen_ld32(addr, IS_USER(s));
8886 store_reg(s, rd, tmp);
8887 } else {
8888 /* store */
8889 tmp = load_reg(s, rd);
8890 gen_st32(tmp, addr, IS_USER(s));
8892 tcg_temp_free_i32(addr);
8893 break;
8895 case 7:
8896 /* load/store byte immediate offset */
8897 rd = insn & 7;
8898 rn = (insn >> 3) & 7;
8899 addr = load_reg(s, rn);
8900 val = (insn >> 6) & 0x1f;
8901 tcg_gen_addi_i32(addr, addr, val);
8903 if (insn & (1 << 11)) {
8904 /* load */
8905 tmp = gen_ld8u(addr, IS_USER(s));
8906 store_reg(s, rd, tmp);
8907 } else {
8908 /* store */
8909 tmp = load_reg(s, rd);
8910 gen_st8(tmp, addr, IS_USER(s));
8912 tcg_temp_free_i32(addr);
8913 break;
8915 case 8:
8916 /* load/store halfword immediate offset */
8917 rd = insn & 7;
8918 rn = (insn >> 3) & 7;
8919 addr = load_reg(s, rn);
8920 val = (insn >> 5) & 0x3e;
8921 tcg_gen_addi_i32(addr, addr, val);
8923 if (insn & (1 << 11)) {
8924 /* load */
8925 tmp = gen_ld16u(addr, IS_USER(s));
8926 store_reg(s, rd, tmp);
8927 } else {
8928 /* store */
8929 tmp = load_reg(s, rd);
8930 gen_st16(tmp, addr, IS_USER(s));
8932 tcg_temp_free_i32(addr);
8933 break;
8935 case 9:
8936 /* load/store from stack */
8937 rd = (insn >> 8) & 7;
8938 addr = load_reg(s, 13);
8939 val = (insn & 0xff) * 4;
8940 tcg_gen_addi_i32(addr, addr, val);
8942 if (insn & (1 << 11)) {
8943 /* load */
8944 tmp = gen_ld32(addr, IS_USER(s));
8945 store_reg(s, rd, tmp);
8946 } else {
8947 /* store */
8948 tmp = load_reg(s, rd);
8949 gen_st32(tmp, addr, IS_USER(s));
8951 tcg_temp_free_i32(addr);
8952 break;
8954 case 10:
8955 /* add to high reg */
8956 rd = (insn >> 8) & 7;
8957 if (insn & (1 << 11)) {
8958 /* SP */
8959 tmp = load_reg(s, 13);
8960 } else {
8961 /* PC. bit 1 is ignored. */
8962 tmp = tcg_temp_new_i32();
8963 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8965 val = (insn & 0xff) * 4;
8966 tcg_gen_addi_i32(tmp, tmp, val);
8967 store_reg(s, rd, tmp);
8968 break;
8970 case 11:
8971 /* misc */
8972 op = (insn >> 8) & 0xf;
8973 switch (op) {
8974 case 0:
8975 /* adjust stack pointer */
8976 tmp = load_reg(s, 13);
8977 val = (insn & 0x7f) * 4;
8978 if (insn & (1 << 7))
8979 val = -(int32_t)val;
8980 tcg_gen_addi_i32(tmp, tmp, val);
8981 store_reg(s, 13, tmp);
8982 break;
8984 case 2: /* sign/zero extend. */
8985 ARCH(6);
8986 rd = insn & 7;
8987 rm = (insn >> 3) & 7;
8988 tmp = load_reg(s, rm);
8989 switch ((insn >> 6) & 3) {
8990 case 0: gen_sxth(tmp); break;
8991 case 1: gen_sxtb(tmp); break;
8992 case 2: gen_uxth(tmp); break;
8993 case 3: gen_uxtb(tmp); break;
8995 store_reg(s, rd, tmp);
8996 break;
8997 case 4: case 5: case 0xc: case 0xd:
8998 /* push/pop */
8999 addr = load_reg(s, 13);
9000 if (insn & (1 << 8))
9001 offset = 4;
9002 else
9003 offset = 0;
9004 for (i = 0; i < 8; i++) {
9005 if (insn & (1 << i))
9006 offset += 4;
9008 if ((insn & (1 << 11)) == 0) {
9009 tcg_gen_addi_i32(addr, addr, -offset);
9011 for (i = 0; i < 8; i++) {
9012 if (insn & (1 << i)) {
9013 if (insn & (1 << 11)) {
9014 /* pop */
9015 tmp = gen_ld32(addr, IS_USER(s));
9016 store_reg(s, i, tmp);
9017 } else {
9018 /* push */
9019 tmp = load_reg(s, i);
9020 gen_st32(tmp, addr, IS_USER(s));
9022 /* advance to the next address. */
9023 tcg_gen_addi_i32(addr, addr, 4);
9026 TCGV_UNUSED(tmp);
9027 if (insn & (1 << 8)) {
9028 if (insn & (1 << 11)) {
9029 /* pop pc */
9030 tmp = gen_ld32(addr, IS_USER(s));
9031 /* don't set the pc until the rest of the instruction
9032 has completed */
9033 } else {
9034 /* push lr */
9035 tmp = load_reg(s, 14);
9036 gen_st32(tmp, addr, IS_USER(s));
9038 tcg_gen_addi_i32(addr, addr, 4);
9040 if ((insn & (1 << 11)) == 0) {
9041 tcg_gen_addi_i32(addr, addr, -offset);
9043 /* write back the new stack pointer */
9044 store_reg(s, 13, addr);
9045 /* set the new PC value */
9046 if ((insn & 0x0900) == 0x0900) {
9047 store_reg_from_load(env, s, 15, tmp);
9049 break;
9051 case 1: case 3: case 9: case 11: /* czb */
9052 rm = insn & 7;
9053 tmp = load_reg(s, rm);
9054 s->condlabel = gen_new_label();
9055 s->condjmp = 1;
9056 if (insn & (1 << 11))
9057 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9058 else
9059 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9060 tcg_temp_free_i32(tmp);
9061 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9062 val = (uint32_t)s->pc + 2;
9063 val += offset;
9064 gen_jmp(s, val);
9065 break;
9067 case 15: /* IT, nop-hint. */
9068 if ((insn & 0xf) == 0) {
9069 gen_nop_hint(s, (insn >> 4) & 0xf);
9070 break;
9072 /* If Then. */
9073 s->condexec_cond = (insn >> 4) & 0xe;
9074 s->condexec_mask = insn & 0x1f;
9075 /* No actual code generated for this insn, just setup state. */
9076 break;
9078 case 0xe: /* bkpt */
9079 ARCH(5);
9080 gen_exception_insn(s, 2, EXCP_BKPT);
9081 break;
9083 case 0xa: /* rev */
9084 ARCH(6);
9085 rn = (insn >> 3) & 0x7;
9086 rd = insn & 0x7;
9087 tmp = load_reg(s, rn);
9088 switch ((insn >> 6) & 3) {
9089 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9090 case 1: gen_rev16(tmp); break;
9091 case 3: gen_revsh(tmp); break;
9092 default: goto illegal_op;
9094 store_reg(s, rd, tmp);
9095 break;
9097 case 6: /* cps */
9098 ARCH(6);
9099 if (IS_USER(s))
9100 break;
9101 if (IS_M(env)) {
9102 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9103 /* PRIMASK */
9104 if (insn & 1) {
9105 addr = tcg_const_i32(16);
9106 gen_helper_v7m_msr(cpu_env, addr, tmp);
9107 tcg_temp_free_i32(addr);
9109 /* FAULTMASK */
9110 if (insn & 2) {
9111 addr = tcg_const_i32(17);
9112 gen_helper_v7m_msr(cpu_env, addr, tmp);
9113 tcg_temp_free_i32(addr);
9115 tcg_temp_free_i32(tmp);
9116 gen_lookup_tb(s);
9117 } else {
9118 if (insn & (1 << 4))
9119 shift = CPSR_A | CPSR_I | CPSR_F;
9120 else
9121 shift = 0;
9122 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9124 break;
9126 default:
9127 goto undef;
9129 break;
9131 case 12:
9132 /* load/store multiple */
9133 rn = (insn >> 8) & 0x7;
9134 addr = load_reg(s, rn);
9135 for (i = 0; i < 8; i++) {
9136 if (insn & (1 << i)) {
9137 if (insn & (1 << 11)) {
9138 /* load */
9139 tmp = gen_ld32(addr, IS_USER(s));
9140 store_reg(s, i, tmp);
9141 } else {
9142 /* store */
9143 tmp = load_reg(s, i);
9144 gen_st32(tmp, addr, IS_USER(s));
9146 /* advance to the next address */
9147 tcg_gen_addi_i32(addr, addr, 4);
9150 /* Base register writeback. */
9151 if ((insn & (1 << rn)) == 0) {
9152 store_reg(s, rn, addr);
9153 } else {
9154 tcg_temp_free_i32(addr);
9156 break;
9158 case 13:
9159 /* conditional branch or swi */
9160 cond = (insn >> 8) & 0xf;
9161 if (cond == 0xe)
9162 goto undef;
9164 if (cond == 0xf) {
9165 /* swi */
9166 gen_set_pc_im(s->pc);
9167 s->is_jmp = DISAS_SWI;
9168 break;
9170 /* generate a conditional jump to next instruction */
9171 s->condlabel = gen_new_label();
9172 gen_test_cc(cond ^ 1, s->condlabel);
9173 s->condjmp = 1;
9175 /* jump to the offset */
9176 val = (uint32_t)s->pc + 2;
9177 offset = ((int32_t)insn << 24) >> 24;
9178 val += offset << 1;
9179 gen_jmp(s, val);
9180 break;
9182 case 14:
9183 if (insn & (1 << 11)) {
9184 if (disas_thumb2_insn(env, s, insn))
9185 goto undef32;
9186 break;
9188 /* unconditional branch */
9189 val = (uint32_t)s->pc;
9190 offset = ((int32_t)insn << 21) >> 21;
9191 val += (offset << 1) + 2;
9192 gen_jmp(s, val);
9193 break;
9195 case 15:
9196 if (disas_thumb2_insn(env, s, insn))
9197 goto undef32;
9198 break;
9200 return;
9201 undef32:
9202 gen_exception_insn(s, 4, EXCP_UDEF);
9203 return;
9204 illegal_op:
9205 undef:
9206 gen_exception_insn(s, 2, EXCP_UDEF);
9209 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9210 basic block 'tb'. If search_pc is TRUE, also generate PC
9211 information for each intermediate instruction. */
9212 static inline void gen_intermediate_code_internal(CPUState *env,
9213 TranslationBlock *tb,
9214 int search_pc)
9216 DisasContext dc1, *dc = &dc1;
9217 CPUBreakpoint *bp;
9218 uint16_t *gen_opc_end;
9219 int j, lj;
9220 target_ulong pc_start;
9221 uint32_t next_page_start;
9222 int num_insns;
9223 int max_insns;
9225 /* generate intermediate code */
9226 pc_start = tb->pc;
9228 dc->tb = tb;
9230 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9232 dc->is_jmp = DISAS_NEXT;
9233 dc->pc = pc_start;
9234 dc->singlestep_enabled = env->singlestep_enabled;
9235 dc->condjmp = 0;
9236 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9237 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9238 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9239 #if !defined(CONFIG_USER_ONLY)
9240 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9241 #endif
9242 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9243 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9244 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9245 cpu_F0s = tcg_temp_new_i32();
9246 cpu_F1s = tcg_temp_new_i32();
9247 cpu_F0d = tcg_temp_new_i64();
9248 cpu_F1d = tcg_temp_new_i64();
9249 cpu_V0 = cpu_F0d;
9250 cpu_V1 = cpu_F1d;
9251 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9252 cpu_M0 = tcg_temp_new_i64();
9253 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9254 lj = -1;
9255 num_insns = 0;
9256 max_insns = tb->cflags & CF_COUNT_MASK;
9257 if (max_insns == 0)
9258 max_insns = CF_COUNT_MASK;
9260 gen_icount_start();
9262 tcg_clear_temp_count();
9264 /* A note on handling of the condexec (IT) bits:
9266 * We want to avoid the overhead of having to write the updated condexec
9267 * bits back to the CPUState for every instruction in an IT block. So:
9268 * (1) if the condexec bits are not already zero then we write
9269 * zero back into the CPUState now. This avoids complications trying
9270 * to do it at the end of the block. (For example if we don't do this
9271 * it's hard to identify whether we can safely skip writing condexec
9272 * at the end of the TB, which we definitely want to do for the case
9273 * where a TB doesn't do anything with the IT state at all.)
9274 * (2) if we are going to leave the TB then we call gen_set_condexec()
9275 * which will write the correct value into CPUState if zero is wrong.
9276 * This is done both for leaving the TB at the end, and for leaving
9277 * it because of an exception we know will happen, which is done in
9278 * gen_exception_insn(). The latter is necessary because we need to
9279 * leave the TB with the PC/IT state just prior to execution of the
9280 * instruction which caused the exception.
9281 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9282 * then the CPUState will be wrong and we need to reset it.
9283 * This is handled in the same way as restoration of the
9284 * PC in these situations: we will be called again with search_pc=1
9285 * and generate a mapping of the condexec bits for each PC in
9286 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9287 * the condexec bits.
9289 * Note that there are no instructions which can read the condexec
9290 * bits, and none which can write non-static values to them, so
9291 * we don't need to care about whether CPUState is correct in the
9292 * middle of a TB.
9295 /* Reset the conditional execution bits immediately. This avoids
9296 complications trying to do it at the end of the block. */
9297 if (dc->condexec_mask || dc->condexec_cond)
9299 TCGv tmp = tcg_temp_new_i32();
9300 tcg_gen_movi_i32(tmp, 0);
9301 store_cpu_field(tmp, condexec_bits);
9303 do {
9304 #ifdef CONFIG_USER_ONLY
9305 /* Intercept jump to the magic kernel page. */
9306 if (dc->pc >= 0xffff0000) {
9307 /* We always get here via a jump, so know we are not in a
9308 conditional execution block. */
9309 gen_exception(EXCP_KERNEL_TRAP);
9310 dc->is_jmp = DISAS_UPDATE;
9311 break;
9313 #else
9314 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9315 /* We always get here via a jump, so know we are not in a
9316 conditional execution block. */
9317 gen_exception(EXCP_EXCEPTION_EXIT);
9318 dc->is_jmp = DISAS_UPDATE;
9319 break;
9321 #endif
9323 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9324 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9325 if (bp->pc == dc->pc) {
9326 gen_exception_insn(dc, 0, EXCP_DEBUG);
9327 /* Advance PC so that clearing the breakpoint will
9328 invalidate this TB. */
9329 dc->pc += 2;
9330 goto done_generating;
9331 break;
9335 if (search_pc) {
9336 j = gen_opc_ptr - gen_opc_buf;
9337 if (lj < j) {
9338 lj++;
9339 while (lj < j)
9340 gen_opc_instr_start[lj++] = 0;
9342 gen_opc_pc[lj] = dc->pc;
9343 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9344 gen_opc_instr_start[lj] = 1;
9345 gen_opc_icount[lj] = num_insns;
9348 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9349 gen_io_start();
9351 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9352 tcg_gen_debug_insn_start(dc->pc);
9355 if (dc->thumb) {
9356 disas_thumb_insn(env, dc);
9357 if (dc->condexec_mask) {
9358 dc->condexec_cond = (dc->condexec_cond & 0xe)
9359 | ((dc->condexec_mask >> 4) & 1);
9360 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9361 if (dc->condexec_mask == 0) {
9362 dc->condexec_cond = 0;
9365 } else {
9366 disas_arm_insn(env, dc);
9369 if (dc->condjmp && !dc->is_jmp) {
9370 gen_set_label(dc->condlabel);
9371 dc->condjmp = 0;
9374 if (tcg_check_temp_count()) {
9375 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9378 /* Translation stops when a conditional branch is encountered.
9379 * Otherwise the subsequent code could get translated several times.
9380 * Also stop translation when a page boundary is reached. This
9381 * ensures prefetch aborts occur at the right place. */
9382 num_insns ++;
9383 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9384 !env->singlestep_enabled &&
9385 !singlestep &&
9386 dc->pc < next_page_start &&
9387 num_insns < max_insns);
9389 if (tb->cflags & CF_LAST_IO) {
9390 if (dc->condjmp) {
9391 /* FIXME: This can theoretically happen with self-modifying
9392 code. */
9393 cpu_abort(env, "IO on conditional branch instruction");
9395 gen_io_end();
9398 /* At this stage dc->condjmp will only be set when the skipped
9399 instruction was a conditional branch or trap, and the PC has
9400 already been written. */
9401 if (unlikely(env->singlestep_enabled)) {
9402 /* Make sure the pc is updated, and raise a debug exception. */
9403 if (dc->condjmp) {
9404 gen_set_condexec(dc);
9405 if (dc->is_jmp == DISAS_SWI) {
9406 gen_exception(EXCP_SWI);
9407 } else {
9408 gen_exception(EXCP_DEBUG);
9410 gen_set_label(dc->condlabel);
9412 if (dc->condjmp || !dc->is_jmp) {
9413 gen_set_pc_im(dc->pc);
9414 dc->condjmp = 0;
9416 gen_set_condexec(dc);
9417 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9418 gen_exception(EXCP_SWI);
9419 } else {
9420 /* FIXME: Single stepping a WFI insn will not halt
9421 the CPU. */
9422 gen_exception(EXCP_DEBUG);
9424 } else {
9425 /* While branches must always occur at the end of an IT block,
9426 there are a few other things that can cause us to terminate
9427 the TB in the middel of an IT block:
9428 - Exception generating instructions (bkpt, swi, undefined).
9429 - Page boundaries.
9430 - Hardware watchpoints.
9431 Hardware breakpoints have already been handled and skip this code.
9433 gen_set_condexec(dc);
9434 switch(dc->is_jmp) {
9435 case DISAS_NEXT:
9436 gen_goto_tb(dc, 1, dc->pc);
9437 break;
9438 default:
9439 case DISAS_JUMP:
9440 case DISAS_UPDATE:
9441 /* indicate that the hash table must be used to find the next TB */
9442 tcg_gen_exit_tb(0);
9443 break;
9444 case DISAS_TB_JUMP:
9445 /* nothing more to generate */
9446 break;
9447 case DISAS_WFI:
9448 gen_helper_wfi();
9449 break;
9450 case DISAS_SWI:
9451 gen_exception(EXCP_SWI);
9452 break;
9454 if (dc->condjmp) {
9455 gen_set_label(dc->condlabel);
9456 gen_set_condexec(dc);
9457 gen_goto_tb(dc, 1, dc->pc);
9458 dc->condjmp = 0;
9462 done_generating:
9463 gen_icount_end(tb, num_insns);
9464 *gen_opc_ptr = INDEX_op_end;
9466 #ifdef DEBUG_DISAS
9467 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9468 qemu_log("----------------\n");
9469 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9470 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9471 qemu_log("\n");
9473 #endif
9474 if (search_pc) {
9475 j = gen_opc_ptr - gen_opc_buf;
9476 lj++;
9477 while (lj <= j)
9478 gen_opc_instr_start[lj++] = 0;
9479 } else {
9480 tb->size = dc->pc - pc_start;
9481 tb->icount = num_insns;
9485 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9487 gen_intermediate_code_internal(env, tb, 0);
9490 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9492 gen_intermediate_code_internal(env, tb, 1);
9495 static const char *cpu_mode_names[16] = {
9496 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9497 "???", "???", "???", "und", "???", "???", "???", "sys"
9500 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9501 int flags)
9503 int i;
9504 #if 0
9505 union {
9506 uint32_t i;
9507 float s;
9508 } s0, s1;
9509 CPU_DoubleU d;
9510 /* ??? This assumes float64 and double have the same layout.
9511 Oh well, it's only debug dumps. */
9512 union {
9513 float64 f64;
9514 double d;
9515 } d0;
9516 #endif
9517 uint32_t psr;
9519 for(i=0;i<16;i++) {
9520 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9521 if ((i % 4) == 3)
9522 cpu_fprintf(f, "\n");
9523 else
9524 cpu_fprintf(f, " ");
9526 psr = cpsr_read(env);
9527 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9528 psr,
9529 psr & (1 << 31) ? 'N' : '-',
9530 psr & (1 << 30) ? 'Z' : '-',
9531 psr & (1 << 29) ? 'C' : '-',
9532 psr & (1 << 28) ? 'V' : '-',
9533 psr & CPSR_T ? 'T' : 'A',
9534 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9536 #if 0
9537 for (i = 0; i < 16; i++) {
9538 d.d = env->vfp.regs[i];
9539 s0.i = d.l.lower;
9540 s1.i = d.l.upper;
9541 d0.f64 = d.d;
9542 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9543 i * 2, (int)s0.i, s0.s,
9544 i * 2 + 1, (int)s1.i, s1.s,
9545 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9546 d0.d);
9548 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9549 #endif
9552 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9553 unsigned long searched_pc, int pc_pos, void *puc)
9555 env->regs[15] = gen_opc_pc[pc_pos];
9556 env->condexec_bits = gen_opc_condexec_bits[pc_pos];