target-arm: Handle UNDEFs for Neon single element load/stores
[qemu.git] / target-arm / translate.c
blob80b25ac5c469265a11fdc0fdcbcab05f75cc8a0f
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
33 #include "helper.h"
34 #define GEN_HELPER 1
35 #include "helper.h"
37 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39 /* currently all emulated v5 cores are also v5TE, so don't bother */
40 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
41 #define ENABLE_ARCH_5J 0
42 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
47 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
49 /* internal defines */
50 typedef struct DisasContext {
51 target_ulong pc;
52 int is_jmp;
53 /* Nonzero if this instruction has been conditionally skipped. */
54 int condjmp;
55 /* The label that will be jumped to when the instruction is skipped. */
56 int condlabel;
57 /* Thumb-2 condtional execution bits. */
58 int condexec_mask;
59 int condexec_cond;
60 struct TranslationBlock *tb;
61 int singlestep_enabled;
62 int thumb;
63 #if !defined(CONFIG_USER_ONLY)
64 int user;
65 #endif
66 int vfp_enabled;
67 int vec_len;
68 int vec_stride;
69 } DisasContext;
71 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
73 #if defined(CONFIG_USER_ONLY)
74 #define IS_USER(s) 1
75 #else
76 #define IS_USER(s) (s->user)
77 #endif
79 /* These instructions trap after executing, so defer them until after the
80 conditional executions state has been updated. */
81 #define DISAS_WFI 4
82 #define DISAS_SWI 5
84 static TCGv_ptr cpu_env;
85 /* We reuse the same 64-bit temporaries for efficiency. */
86 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
87 static TCGv_i32 cpu_R[16];
88 static TCGv_i32 cpu_exclusive_addr;
89 static TCGv_i32 cpu_exclusive_val;
90 static TCGv_i32 cpu_exclusive_high;
91 #ifdef CONFIG_USER_ONLY
92 static TCGv_i32 cpu_exclusive_test;
93 static TCGv_i32 cpu_exclusive_info;
94 #endif
96 /* FIXME: These should be removed. */
97 static TCGv cpu_F0s, cpu_F1s;
98 static TCGv_i64 cpu_F0d, cpu_F1d;
100 #include "gen-icount.h"
102 static const char *regnames[] =
103 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106 /* initialize TCG globals. */
107 void arm_translate_init(void)
109 int i;
111 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113 for (i = 0; i < 16; i++) {
114 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, regs[i]),
116 regnames[i]);
118 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_addr), "exclusive_addr");
120 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, exclusive_val), "exclusive_val");
122 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, exclusive_high), "exclusive_high");
124 #ifdef CONFIG_USER_ONLY
125 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUState, exclusive_test), "exclusive_test");
127 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUState, exclusive_info), "exclusive_info");
129 #endif
131 #define GEN_HELPER 2
132 #include "helper.h"
135 static inline TCGv load_cpu_offset(int offset)
137 TCGv tmp = tcg_temp_new_i32();
138 tcg_gen_ld_i32(tmp, cpu_env, offset);
139 return tmp;
142 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
144 static inline void store_cpu_offset(TCGv var, int offset)
146 tcg_gen_st_i32(var, cpu_env, offset);
147 tcg_temp_free_i32(var);
150 #define store_cpu_field(var, name) \
151 store_cpu_offset(var, offsetof(CPUState, name))
153 /* Set a variable to the value of a CPU register. */
154 static void load_reg_var(DisasContext *s, TCGv var, int reg)
156 if (reg == 15) {
157 uint32_t addr;
158 /* normaly, since we updated PC, we need only to add one insn */
159 if (s->thumb)
160 addr = (long)s->pc + 2;
161 else
162 addr = (long)s->pc + 4;
163 tcg_gen_movi_i32(var, addr);
164 } else {
165 tcg_gen_mov_i32(var, cpu_R[reg]);
169 /* Create a new temporary and set it to the value of a CPU register. */
170 static inline TCGv load_reg(DisasContext *s, int reg)
172 TCGv tmp = tcg_temp_new_i32();
173 load_reg_var(s, tmp, reg);
174 return tmp;
177 /* Set a CPU register. The source must be a temporary and will be
178 marked as dead. */
179 static void store_reg(DisasContext *s, int reg, TCGv var)
181 if (reg == 15) {
182 tcg_gen_andi_i32(var, var, ~1);
183 s->is_jmp = DISAS_JUMP;
185 tcg_gen_mov_i32(cpu_R[reg], var);
186 tcg_temp_free_i32(var);
189 /* Value extensions. */
190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
191 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
192 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
193 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
195 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
196 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
199 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
201 TCGv tmp_mask = tcg_const_i32(mask);
202 gen_helper_cpsr_write(var, tmp_mask);
203 tcg_temp_free_i32(tmp_mask);
205 /* Set NZCV flags from the high 4 bits of var. */
206 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
208 static void gen_exception(int excp)
210 TCGv tmp = tcg_temp_new_i32();
211 tcg_gen_movi_i32(tmp, excp);
212 gen_helper_exception(tmp);
213 tcg_temp_free_i32(tmp);
216 static void gen_smul_dual(TCGv a, TCGv b)
218 TCGv tmp1 = tcg_temp_new_i32();
219 TCGv tmp2 = tcg_temp_new_i32();
220 tcg_gen_ext16s_i32(tmp1, a);
221 tcg_gen_ext16s_i32(tmp2, b);
222 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
223 tcg_temp_free_i32(tmp2);
224 tcg_gen_sari_i32(a, a, 16);
225 tcg_gen_sari_i32(b, b, 16);
226 tcg_gen_mul_i32(b, b, a);
227 tcg_gen_mov_i32(a, tmp1);
228 tcg_temp_free_i32(tmp1);
231 /* Byteswap each halfword. */
232 static void gen_rev16(TCGv var)
234 TCGv tmp = tcg_temp_new_i32();
235 tcg_gen_shri_i32(tmp, var, 8);
236 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
237 tcg_gen_shli_i32(var, var, 8);
238 tcg_gen_andi_i32(var, var, 0xff00ff00);
239 tcg_gen_or_i32(var, var, tmp);
240 tcg_temp_free_i32(tmp);
243 /* Byteswap low halfword and sign extend. */
244 static void gen_revsh(TCGv var)
246 tcg_gen_ext16u_i32(var, var);
247 tcg_gen_bswap16_i32(var, var);
248 tcg_gen_ext16s_i32(var, var);
251 /* Unsigned bitfield extract. */
252 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
254 if (shift)
255 tcg_gen_shri_i32(var, var, shift);
256 tcg_gen_andi_i32(var, var, mask);
259 /* Signed bitfield extract. */
260 static void gen_sbfx(TCGv var, int shift, int width)
262 uint32_t signbit;
264 if (shift)
265 tcg_gen_sari_i32(var, var, shift);
266 if (shift + width < 32) {
267 signbit = 1u << (width - 1);
268 tcg_gen_andi_i32(var, var, (1u << width) - 1);
269 tcg_gen_xori_i32(var, var, signbit);
270 tcg_gen_subi_i32(var, var, signbit);
274 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
275 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
277 tcg_gen_andi_i32(val, val, mask);
278 tcg_gen_shli_i32(val, val, shift);
279 tcg_gen_andi_i32(base, base, ~(mask << shift));
280 tcg_gen_or_i32(dest, base, val);
283 /* Return (b << 32) + a. Mark inputs as dead */
284 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
286 TCGv_i64 tmp64 = tcg_temp_new_i64();
288 tcg_gen_extu_i32_i64(tmp64, b);
289 tcg_temp_free_i32(b);
290 tcg_gen_shli_i64(tmp64, tmp64, 32);
291 tcg_gen_add_i64(a, tmp64, a);
293 tcg_temp_free_i64(tmp64);
294 return a;
297 /* Return (b << 32) - a. Mark inputs as dead. */
298 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
300 TCGv_i64 tmp64 = tcg_temp_new_i64();
302 tcg_gen_extu_i32_i64(tmp64, b);
303 tcg_temp_free_i32(b);
304 tcg_gen_shli_i64(tmp64, tmp64, 32);
305 tcg_gen_sub_i64(a, tmp64, a);
307 tcg_temp_free_i64(tmp64);
308 return a;
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1, a);
320 tcg_temp_free_i32(a);
321 tcg_gen_extu_i32_i64(tmp2, b);
322 tcg_temp_free_i32(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 tcg_temp_free_i64(tmp2);
325 return tmp1;
328 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
333 tcg_gen_ext_i32_i64(tmp1, a);
334 tcg_temp_free_i32(a);
335 tcg_gen_ext_i32_i64(tmp2, b);
336 tcg_temp_free_i32(b);
337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
338 tcg_temp_free_i64(tmp2);
339 return tmp1;
342 /* Swap low and high halfwords. */
343 static void gen_swap_half(TCGv var)
345 TCGv tmp = tcg_temp_new_i32();
346 tcg_gen_shri_i32(tmp, var, 16);
347 tcg_gen_shli_i32(var, var, 16);
348 tcg_gen_or_i32(var, var, tmp);
349 tcg_temp_free_i32(tmp);
352 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
353 tmp = (t0 ^ t1) & 0x8000;
354 t0 &= ~0x8000;
355 t1 &= ~0x8000;
356 t0 = (t0 + t1) ^ tmp;
359 static void gen_add16(TCGv t0, TCGv t1)
361 TCGv tmp = tcg_temp_new_i32();
362 tcg_gen_xor_i32(tmp, t0, t1);
363 tcg_gen_andi_i32(tmp, tmp, 0x8000);
364 tcg_gen_andi_i32(t0, t0, ~0x8000);
365 tcg_gen_andi_i32(t1, t1, ~0x8000);
366 tcg_gen_add_i32(t0, t0, t1);
367 tcg_gen_xor_i32(t0, t0, tmp);
368 tcg_temp_free_i32(tmp);
369 tcg_temp_free_i32(t1);
372 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
374 /* Set CF to the top bit of var. */
375 static void gen_set_CF_bit31(TCGv var)
377 TCGv tmp = tcg_temp_new_i32();
378 tcg_gen_shri_i32(tmp, var, 31);
379 gen_set_CF(tmp);
380 tcg_temp_free_i32(tmp);
383 /* Set N and Z flags from var. */
384 static inline void gen_logic_CC(TCGv var)
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
390 /* T0 += T1 + CF. */
391 static void gen_adc(TCGv t0, TCGv t1)
393 TCGv tmp;
394 tcg_gen_add_i32(t0, t0, t1);
395 tmp = load_cpu_field(CF);
396 tcg_gen_add_i32(t0, t0, tmp);
397 tcg_temp_free_i32(tmp);
400 /* dest = T0 + T1 + CF. */
401 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
403 TCGv tmp;
404 tcg_gen_add_i32(dest, t0, t1);
405 tmp = load_cpu_field(CF);
406 tcg_gen_add_i32(dest, dest, tmp);
407 tcg_temp_free_i32(tmp);
410 /* dest = T0 - T1 + CF - 1. */
411 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
413 TCGv tmp;
414 tcg_gen_sub_i32(dest, t0, t1);
415 tmp = load_cpu_field(CF);
416 tcg_gen_add_i32(dest, dest, tmp);
417 tcg_gen_subi_i32(dest, dest, 1);
418 tcg_temp_free_i32(tmp);
421 /* FIXME: Implement this natively. */
422 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
424 static void shifter_out_im(TCGv var, int shift)
426 TCGv tmp = tcg_temp_new_i32();
427 if (shift == 0) {
428 tcg_gen_andi_i32(tmp, var, 1);
429 } else {
430 tcg_gen_shri_i32(tmp, var, shift);
431 if (shift != 31)
432 tcg_gen_andi_i32(tmp, tmp, 1);
434 gen_set_CF(tmp);
435 tcg_temp_free_i32(tmp);
438 /* Shift by immediate. Includes special handling for shift == 0. */
439 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
441 switch (shiftop) {
442 case 0: /* LSL */
443 if (shift != 0) {
444 if (flags)
445 shifter_out_im(var, 32 - shift);
446 tcg_gen_shli_i32(var, var, shift);
448 break;
449 case 1: /* LSR */
450 if (shift == 0) {
451 if (flags) {
452 tcg_gen_shri_i32(var, var, 31);
453 gen_set_CF(var);
455 tcg_gen_movi_i32(var, 0);
456 } else {
457 if (flags)
458 shifter_out_im(var, shift - 1);
459 tcg_gen_shri_i32(var, var, shift);
461 break;
462 case 2: /* ASR */
463 if (shift == 0)
464 shift = 32;
465 if (flags)
466 shifter_out_im(var, shift - 1);
467 if (shift == 32)
468 shift = 31;
469 tcg_gen_sari_i32(var, var, shift);
470 break;
471 case 3: /* ROR/RRX */
472 if (shift != 0) {
473 if (flags)
474 shifter_out_im(var, shift - 1);
475 tcg_gen_rotri_i32(var, var, shift); break;
476 } else {
477 TCGv tmp = load_cpu_field(CF);
478 if (flags)
479 shifter_out_im(var, 0);
480 tcg_gen_shri_i32(var, var, 1);
481 tcg_gen_shli_i32(tmp, tmp, 31);
482 tcg_gen_or_i32(var, var, tmp);
483 tcg_temp_free_i32(tmp);
488 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
489 TCGv shift, int flags)
491 if (flags) {
492 switch (shiftop) {
493 case 0: gen_helper_shl_cc(var, var, shift); break;
494 case 1: gen_helper_shr_cc(var, var, shift); break;
495 case 2: gen_helper_sar_cc(var, var, shift); break;
496 case 3: gen_helper_ror_cc(var, var, shift); break;
498 } else {
499 switch (shiftop) {
500 case 0: gen_helper_shl(var, var, shift); break;
501 case 1: gen_helper_shr(var, var, shift); break;
502 case 2: gen_helper_sar(var, var, shift); break;
503 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
504 tcg_gen_rotr_i32(var, var, shift); break;
507 tcg_temp_free_i32(shift);
510 #define PAS_OP(pfx) \
511 switch (op2) { \
512 case 0: gen_pas_helper(glue(pfx,add16)); break; \
513 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
514 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
515 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
516 case 4: gen_pas_helper(glue(pfx,add8)); break; \
517 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
519 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
521 TCGv_ptr tmp;
523 switch (op1) {
524 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
525 case 1:
526 tmp = tcg_temp_new_ptr();
527 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
528 PAS_OP(s)
529 tcg_temp_free_ptr(tmp);
530 break;
531 case 5:
532 tmp = tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
534 PAS_OP(u)
535 tcg_temp_free_ptr(tmp);
536 break;
537 #undef gen_pas_helper
538 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
539 case 2:
540 PAS_OP(q);
541 break;
542 case 3:
543 PAS_OP(sh);
544 break;
545 case 6:
546 PAS_OP(uq);
547 break;
548 case 7:
549 PAS_OP(uh);
550 break;
551 #undef gen_pas_helper
554 #undef PAS_OP
556 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
557 #define PAS_OP(pfx) \
558 switch (op1) { \
559 case 0: gen_pas_helper(glue(pfx,add8)); break; \
560 case 1: gen_pas_helper(glue(pfx,add16)); break; \
561 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
562 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
563 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
564 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
566 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
568 TCGv_ptr tmp;
570 switch (op2) {
571 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
572 case 0:
573 tmp = tcg_temp_new_ptr();
574 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
575 PAS_OP(s)
576 tcg_temp_free_ptr(tmp);
577 break;
578 case 4:
579 tmp = tcg_temp_new_ptr();
580 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
581 PAS_OP(u)
582 tcg_temp_free_ptr(tmp);
583 break;
584 #undef gen_pas_helper
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
586 case 1:
587 PAS_OP(q);
588 break;
589 case 2:
590 PAS_OP(sh);
591 break;
592 case 5:
593 PAS_OP(uq);
594 break;
595 case 6:
596 PAS_OP(uh);
597 break;
598 #undef gen_pas_helper
601 #undef PAS_OP
603 static void gen_test_cc(int cc, int label)
605 TCGv tmp;
606 TCGv tmp2;
607 int inv;
609 switch (cc) {
610 case 0: /* eq: Z */
611 tmp = load_cpu_field(ZF);
612 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
613 break;
614 case 1: /* ne: !Z */
615 tmp = load_cpu_field(ZF);
616 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
617 break;
618 case 2: /* cs: C */
619 tmp = load_cpu_field(CF);
620 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
621 break;
622 case 3: /* cc: !C */
623 tmp = load_cpu_field(CF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 4: /* mi: N */
627 tmp = load_cpu_field(NF);
628 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
629 break;
630 case 5: /* pl: !N */
631 tmp = load_cpu_field(NF);
632 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
633 break;
634 case 6: /* vs: V */
635 tmp = load_cpu_field(VF);
636 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
637 break;
638 case 7: /* vc: !V */
639 tmp = load_cpu_field(VF);
640 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
641 break;
642 case 8: /* hi: C && !Z */
643 inv = gen_new_label();
644 tmp = load_cpu_field(CF);
645 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
646 tcg_temp_free_i32(tmp);
647 tmp = load_cpu_field(ZF);
648 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
649 gen_set_label(inv);
650 break;
651 case 9: /* ls: !C || Z */
652 tmp = load_cpu_field(CF);
653 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
654 tcg_temp_free_i32(tmp);
655 tmp = load_cpu_field(ZF);
656 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
657 break;
658 case 10: /* ge: N == V -> N ^ V == 0 */
659 tmp = load_cpu_field(VF);
660 tmp2 = load_cpu_field(NF);
661 tcg_gen_xor_i32(tmp, tmp, tmp2);
662 tcg_temp_free_i32(tmp2);
663 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
664 break;
665 case 11: /* lt: N != V -> N ^ V != 0 */
666 tmp = load_cpu_field(VF);
667 tmp2 = load_cpu_field(NF);
668 tcg_gen_xor_i32(tmp, tmp, tmp2);
669 tcg_temp_free_i32(tmp2);
670 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
671 break;
672 case 12: /* gt: !Z && N == V */
673 inv = gen_new_label();
674 tmp = load_cpu_field(ZF);
675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
676 tcg_temp_free_i32(tmp);
677 tmp = load_cpu_field(VF);
678 tmp2 = load_cpu_field(NF);
679 tcg_gen_xor_i32(tmp, tmp, tmp2);
680 tcg_temp_free_i32(tmp2);
681 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
682 gen_set_label(inv);
683 break;
684 case 13: /* le: Z || N != V */
685 tmp = load_cpu_field(ZF);
686 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
687 tcg_temp_free_i32(tmp);
688 tmp = load_cpu_field(VF);
689 tmp2 = load_cpu_field(NF);
690 tcg_gen_xor_i32(tmp, tmp, tmp2);
691 tcg_temp_free_i32(tmp2);
692 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
693 break;
694 default:
695 fprintf(stderr, "Bad condition code 0x%x\n", cc);
696 abort();
698 tcg_temp_free_i32(tmp);
701 static const uint8_t table_logic_cc[16] = {
702 1, /* and */
703 1, /* xor */
704 0, /* sub */
705 0, /* rsb */
706 0, /* add */
707 0, /* adc */
708 0, /* sbc */
709 0, /* rsc */
710 1, /* andl */
711 1, /* xorl */
712 0, /* cmp */
713 0, /* cmn */
714 1, /* orr */
715 1, /* mov */
716 1, /* bic */
717 1, /* mvn */
720 /* Set PC and Thumb state from an immediate address. */
721 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
723 TCGv tmp;
725 s->is_jmp = DISAS_UPDATE;
726 if (s->thumb != (addr & 1)) {
727 tmp = tcg_temp_new_i32();
728 tcg_gen_movi_i32(tmp, addr & 1);
729 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
730 tcg_temp_free_i32(tmp);
732 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
735 /* Set PC and Thumb state from var. var is marked as dead. */
736 static inline void gen_bx(DisasContext *s, TCGv var)
738 s->is_jmp = DISAS_UPDATE;
739 tcg_gen_andi_i32(cpu_R[15], var, ~1);
740 tcg_gen_andi_i32(var, var, 1);
741 store_cpu_field(var, thumb);
744 /* Variant of store_reg which uses branch&exchange logic when storing
745 to r15 in ARM architecture v7 and above. The source must be a temporary
746 and will be marked as dead. */
747 static inline void store_reg_bx(CPUState *env, DisasContext *s,
748 int reg, TCGv var)
750 if (reg == 15 && ENABLE_ARCH_7) {
751 gen_bx(s, var);
752 } else {
753 store_reg(s, reg, var);
757 /* Variant of store_reg which uses branch&exchange logic when storing
758 * to r15 in ARM architecture v5T and above. This is used for storing
759 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
760 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
761 static inline void store_reg_from_load(CPUState *env, DisasContext *s,
762 int reg, TCGv var)
764 if (reg == 15 && ENABLE_ARCH_5) {
765 gen_bx(s, var);
766 } else {
767 store_reg(s, reg, var);
771 static inline TCGv gen_ld8s(TCGv addr, int index)
773 TCGv tmp = tcg_temp_new_i32();
774 tcg_gen_qemu_ld8s(tmp, addr, index);
775 return tmp;
777 static inline TCGv gen_ld8u(TCGv addr, int index)
779 TCGv tmp = tcg_temp_new_i32();
780 tcg_gen_qemu_ld8u(tmp, addr, index);
781 return tmp;
783 static inline TCGv gen_ld16s(TCGv addr, int index)
785 TCGv tmp = tcg_temp_new_i32();
786 tcg_gen_qemu_ld16s(tmp, addr, index);
787 return tmp;
789 static inline TCGv gen_ld16u(TCGv addr, int index)
791 TCGv tmp = tcg_temp_new_i32();
792 tcg_gen_qemu_ld16u(tmp, addr, index);
793 return tmp;
795 static inline TCGv gen_ld32(TCGv addr, int index)
797 TCGv tmp = tcg_temp_new_i32();
798 tcg_gen_qemu_ld32u(tmp, addr, index);
799 return tmp;
801 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
803 TCGv_i64 tmp = tcg_temp_new_i64();
804 tcg_gen_qemu_ld64(tmp, addr, index);
805 return tmp;
807 static inline void gen_st8(TCGv val, TCGv addr, int index)
809 tcg_gen_qemu_st8(val, addr, index);
810 tcg_temp_free_i32(val);
812 static inline void gen_st16(TCGv val, TCGv addr, int index)
814 tcg_gen_qemu_st16(val, addr, index);
815 tcg_temp_free_i32(val);
817 static inline void gen_st32(TCGv val, TCGv addr, int index)
819 tcg_gen_qemu_st32(val, addr, index);
820 tcg_temp_free_i32(val);
822 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
824 tcg_gen_qemu_st64(val, addr, index);
825 tcg_temp_free_i64(val);
828 static inline void gen_set_pc_im(uint32_t val)
830 tcg_gen_movi_i32(cpu_R[15], val);
833 /* Force a TB lookup after an instruction that changes the CPU state. */
834 static inline void gen_lookup_tb(DisasContext *s)
836 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
837 s->is_jmp = DISAS_UPDATE;
840 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
841 TCGv var)
843 int val, rm, shift, shiftop;
844 TCGv offset;
846 if (!(insn & (1 << 25))) {
847 /* immediate */
848 val = insn & 0xfff;
849 if (!(insn & (1 << 23)))
850 val = -val;
851 if (val != 0)
852 tcg_gen_addi_i32(var, var, val);
853 } else {
854 /* shift/register */
855 rm = (insn) & 0xf;
856 shift = (insn >> 7) & 0x1f;
857 shiftop = (insn >> 5) & 3;
858 offset = load_reg(s, rm);
859 gen_arm_shift_im(offset, shiftop, shift, 0);
860 if (!(insn & (1 << 23)))
861 tcg_gen_sub_i32(var, var, offset);
862 else
863 tcg_gen_add_i32(var, var, offset);
864 tcg_temp_free_i32(offset);
868 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
869 int extra, TCGv var)
871 int val, rm;
872 TCGv offset;
874 if (insn & (1 << 22)) {
875 /* immediate */
876 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
877 if (!(insn & (1 << 23)))
878 val = -val;
879 val += extra;
880 if (val != 0)
881 tcg_gen_addi_i32(var, var, val);
882 } else {
883 /* register */
884 if (extra)
885 tcg_gen_addi_i32(var, var, extra);
886 rm = (insn) & 0xf;
887 offset = load_reg(s, rm);
888 if (!(insn & (1 << 23)))
889 tcg_gen_sub_i32(var, var, offset);
890 else
891 tcg_gen_add_i32(var, var, offset);
892 tcg_temp_free_i32(offset);
896 #define VFP_OP2(name) \
897 static inline void gen_vfp_##name(int dp) \
899 if (dp) \
900 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
901 else \
902 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
905 VFP_OP2(add)
906 VFP_OP2(sub)
907 VFP_OP2(mul)
908 VFP_OP2(div)
910 #undef VFP_OP2
912 static inline void gen_vfp_abs(int dp)
914 if (dp)
915 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
916 else
917 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
920 static inline void gen_vfp_neg(int dp)
922 if (dp)
923 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
924 else
925 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
928 static inline void gen_vfp_sqrt(int dp)
930 if (dp)
931 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
932 else
933 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
936 static inline void gen_vfp_cmp(int dp)
938 if (dp)
939 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
940 else
941 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
944 static inline void gen_vfp_cmpe(int dp)
946 if (dp)
947 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
948 else
949 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
952 static inline void gen_vfp_F1_ld0(int dp)
954 if (dp)
955 tcg_gen_movi_i64(cpu_F1d, 0);
956 else
957 tcg_gen_movi_i32(cpu_F1s, 0);
960 static inline void gen_vfp_uito(int dp)
962 if (dp)
963 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
964 else
965 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
968 static inline void gen_vfp_sito(int dp)
970 if (dp)
971 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
972 else
973 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
976 static inline void gen_vfp_toui(int dp)
978 if (dp)
979 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
980 else
981 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
984 static inline void gen_vfp_touiz(int dp)
986 if (dp)
987 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
988 else
989 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
992 static inline void gen_vfp_tosi(int dp)
994 if (dp)
995 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
996 else
997 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1000 static inline void gen_vfp_tosiz(int dp)
1002 if (dp)
1003 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1004 else
1005 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1008 #define VFP_GEN_FIX(name) \
1009 static inline void gen_vfp_##name(int dp, int shift) \
1011 TCGv tmp_shift = tcg_const_i32(shift); \
1012 if (dp) \
1013 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1014 else \
1015 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1016 tcg_temp_free_i32(tmp_shift); \
1018 VFP_GEN_FIX(tosh)
1019 VFP_GEN_FIX(tosl)
1020 VFP_GEN_FIX(touh)
1021 VFP_GEN_FIX(toul)
1022 VFP_GEN_FIX(shto)
1023 VFP_GEN_FIX(slto)
1024 VFP_GEN_FIX(uhto)
1025 VFP_GEN_FIX(ulto)
1026 #undef VFP_GEN_FIX
1028 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1030 if (dp)
1031 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1032 else
1033 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1036 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1038 if (dp)
1039 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1040 else
1041 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1044 static inline long
1045 vfp_reg_offset (int dp, int reg)
1047 if (dp)
1048 return offsetof(CPUARMState, vfp.regs[reg]);
1049 else if (reg & 1) {
1050 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1051 + offsetof(CPU_DoubleU, l.upper);
1052 } else {
1053 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1054 + offsetof(CPU_DoubleU, l.lower);
1058 /* Return the offset of a 32-bit piece of a NEON register.
1059 zero is the least significant end of the register. */
1060 static inline long
1061 neon_reg_offset (int reg, int n)
1063 int sreg;
1064 sreg = reg * 2 + n;
1065 return vfp_reg_offset(0, sreg);
1068 static TCGv neon_load_reg(int reg, int pass)
1070 TCGv tmp = tcg_temp_new_i32();
1071 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1072 return tmp;
1075 static void neon_store_reg(int reg, int pass, TCGv var)
1077 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1078 tcg_temp_free_i32(var);
1081 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1083 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1086 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1088 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1091 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1092 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1093 #define tcg_gen_st_f32 tcg_gen_st_i32
1094 #define tcg_gen_st_f64 tcg_gen_st_i64
1096 static inline void gen_mov_F0_vreg(int dp, int reg)
1098 if (dp)
1099 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1100 else
1101 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1104 static inline void gen_mov_F1_vreg(int dp, int reg)
1106 if (dp)
1107 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1108 else
1109 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1112 static inline void gen_mov_vreg_F0(int dp, int reg)
1114 if (dp)
1115 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1116 else
1117 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1120 #define ARM_CP_RW_BIT (1 << 20)
1122 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1124 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1127 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1129 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1132 static inline TCGv iwmmxt_load_creg(int reg)
1134 TCGv var = tcg_temp_new_i32();
1135 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1136 return var;
1139 static inline void iwmmxt_store_creg(int reg, TCGv var)
1141 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1142 tcg_temp_free_i32(var);
1145 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1147 iwmmxt_store_reg(cpu_M0, rn);
1150 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1152 iwmmxt_load_reg(cpu_M0, rn);
1155 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1157 iwmmxt_load_reg(cpu_V1, rn);
1158 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1161 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1163 iwmmxt_load_reg(cpu_V1, rn);
1164 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1167 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1169 iwmmxt_load_reg(cpu_V1, rn);
1170 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1173 #define IWMMXT_OP(name) \
1174 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1176 iwmmxt_load_reg(cpu_V1, rn); \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1180 #define IWMMXT_OP_SIZE(name) \
1181 IWMMXT_OP(name##b) \
1182 IWMMXT_OP(name##w) \
1183 IWMMXT_OP(name##l)
1185 #define IWMMXT_OP_1(name) \
1186 static inline void gen_op_iwmmxt_##name##_M0(void) \
1188 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
1191 IWMMXT_OP(maddsq)
1192 IWMMXT_OP(madduq)
1193 IWMMXT_OP(sadb)
1194 IWMMXT_OP(sadw)
1195 IWMMXT_OP(mulslw)
1196 IWMMXT_OP(mulshw)
1197 IWMMXT_OP(mululw)
1198 IWMMXT_OP(muluhw)
1199 IWMMXT_OP(macsw)
1200 IWMMXT_OP(macuw)
1202 IWMMXT_OP_SIZE(unpackl)
1203 IWMMXT_OP_SIZE(unpackh)
1205 IWMMXT_OP_1(unpacklub)
1206 IWMMXT_OP_1(unpackluw)
1207 IWMMXT_OP_1(unpacklul)
1208 IWMMXT_OP_1(unpackhub)
1209 IWMMXT_OP_1(unpackhuw)
1210 IWMMXT_OP_1(unpackhul)
1211 IWMMXT_OP_1(unpacklsb)
1212 IWMMXT_OP_1(unpacklsw)
1213 IWMMXT_OP_1(unpacklsl)
1214 IWMMXT_OP_1(unpackhsb)
1215 IWMMXT_OP_1(unpackhsw)
1216 IWMMXT_OP_1(unpackhsl)
1218 IWMMXT_OP_SIZE(cmpeq)
1219 IWMMXT_OP_SIZE(cmpgtu)
1220 IWMMXT_OP_SIZE(cmpgts)
1222 IWMMXT_OP_SIZE(mins)
1223 IWMMXT_OP_SIZE(minu)
1224 IWMMXT_OP_SIZE(maxs)
1225 IWMMXT_OP_SIZE(maxu)
1227 IWMMXT_OP_SIZE(subn)
1228 IWMMXT_OP_SIZE(addn)
1229 IWMMXT_OP_SIZE(subu)
1230 IWMMXT_OP_SIZE(addu)
1231 IWMMXT_OP_SIZE(subs)
1232 IWMMXT_OP_SIZE(adds)
1234 IWMMXT_OP(avgb0)
1235 IWMMXT_OP(avgb1)
1236 IWMMXT_OP(avgw0)
1237 IWMMXT_OP(avgw1)
1239 IWMMXT_OP(msadb)
1241 IWMMXT_OP(packuw)
1242 IWMMXT_OP(packul)
1243 IWMMXT_OP(packuq)
1244 IWMMXT_OP(packsw)
1245 IWMMXT_OP(packsl)
1246 IWMMXT_OP(packsq)
1248 static void gen_op_iwmmxt_set_mup(void)
1250 TCGv tmp;
1251 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1252 tcg_gen_ori_i32(tmp, tmp, 2);
1253 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1256 static void gen_op_iwmmxt_set_cup(void)
1258 TCGv tmp;
1259 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1260 tcg_gen_ori_i32(tmp, tmp, 1);
1261 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1264 static void gen_op_iwmmxt_setpsr_nz(void)
1266 TCGv tmp = tcg_temp_new_i32();
1267 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1268 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1271 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1273 iwmmxt_load_reg(cpu_V1, rn);
1274 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1275 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1278 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1280 int rd;
1281 uint32_t offset;
1282 TCGv tmp;
1284 rd = (insn >> 16) & 0xf;
1285 tmp = load_reg(s, rd);
1287 offset = (insn & 0xff) << ((insn >> 7) & 2);
1288 if (insn & (1 << 24)) {
1289 /* Pre indexed */
1290 if (insn & (1 << 23))
1291 tcg_gen_addi_i32(tmp, tmp, offset);
1292 else
1293 tcg_gen_addi_i32(tmp, tmp, -offset);
1294 tcg_gen_mov_i32(dest, tmp);
1295 if (insn & (1 << 21))
1296 store_reg(s, rd, tmp);
1297 else
1298 tcg_temp_free_i32(tmp);
1299 } else if (insn & (1 << 21)) {
1300 /* Post indexed */
1301 tcg_gen_mov_i32(dest, tmp);
1302 if (insn & (1 << 23))
1303 tcg_gen_addi_i32(tmp, tmp, offset);
1304 else
1305 tcg_gen_addi_i32(tmp, tmp, -offset);
1306 store_reg(s, rd, tmp);
1307 } else if (!(insn & (1 << 23)))
1308 return 1;
1309 return 0;
1312 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1314 int rd = (insn >> 0) & 0xf;
1315 TCGv tmp;
1317 if (insn & (1 << 8)) {
1318 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1319 return 1;
1320 } else {
1321 tmp = iwmmxt_load_creg(rd);
1323 } else {
1324 tmp = tcg_temp_new_i32();
1325 iwmmxt_load_reg(cpu_V0, rd);
1326 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1328 tcg_gen_andi_i32(tmp, tmp, mask);
1329 tcg_gen_mov_i32(dest, tmp);
1330 tcg_temp_free_i32(tmp);
1331 return 0;
1334 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1335 (ie. an undefined instruction). */
1336 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1338 int rd, wrd;
1339 int rdhi, rdlo, rd0, rd1, i;
1340 TCGv addr;
1341 TCGv tmp, tmp2, tmp3;
1343 if ((insn & 0x0e000e00) == 0x0c000000) {
1344 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1345 wrd = insn & 0xf;
1346 rdlo = (insn >> 12) & 0xf;
1347 rdhi = (insn >> 16) & 0xf;
1348 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1349 iwmmxt_load_reg(cpu_V0, wrd);
1350 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1351 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1352 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1353 } else { /* TMCRR */
1354 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1355 iwmmxt_store_reg(cpu_V0, wrd);
1356 gen_op_iwmmxt_set_mup();
1358 return 0;
1361 wrd = (insn >> 12) & 0xf;
1362 addr = tcg_temp_new_i32();
1363 if (gen_iwmmxt_address(s, insn, addr)) {
1364 tcg_temp_free_i32(addr);
1365 return 1;
1367 if (insn & ARM_CP_RW_BIT) {
1368 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1369 tmp = tcg_temp_new_i32();
1370 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1371 iwmmxt_store_creg(wrd, tmp);
1372 } else {
1373 i = 1;
1374 if (insn & (1 << 8)) {
1375 if (insn & (1 << 22)) { /* WLDRD */
1376 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1377 i = 0;
1378 } else { /* WLDRW wRd */
1379 tmp = gen_ld32(addr, IS_USER(s));
1381 } else {
1382 if (insn & (1 << 22)) { /* WLDRH */
1383 tmp = gen_ld16u(addr, IS_USER(s));
1384 } else { /* WLDRB */
1385 tmp = gen_ld8u(addr, IS_USER(s));
1388 if (i) {
1389 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1390 tcg_temp_free_i32(tmp);
1392 gen_op_iwmmxt_movq_wRn_M0(wrd);
1394 } else {
1395 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1396 tmp = iwmmxt_load_creg(wrd);
1397 gen_st32(tmp, addr, IS_USER(s));
1398 } else {
1399 gen_op_iwmmxt_movq_M0_wRn(wrd);
1400 tmp = tcg_temp_new_i32();
1401 if (insn & (1 << 8)) {
1402 if (insn & (1 << 22)) { /* WSTRD */
1403 tcg_temp_free_i32(tmp);
1404 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1405 } else { /* WSTRW wRd */
1406 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1407 gen_st32(tmp, addr, IS_USER(s));
1409 } else {
1410 if (insn & (1 << 22)) { /* WSTRH */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1412 gen_st16(tmp, addr, IS_USER(s));
1413 } else { /* WSTRB */
1414 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1415 gen_st8(tmp, addr, IS_USER(s));
1420 tcg_temp_free_i32(addr);
1421 return 0;
1424 if ((insn & 0x0f000000) != 0x0e000000)
1425 return 1;
1427 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1428 case 0x000: /* WOR */
1429 wrd = (insn >> 12) & 0xf;
1430 rd0 = (insn >> 0) & 0xf;
1431 rd1 = (insn >> 16) & 0xf;
1432 gen_op_iwmmxt_movq_M0_wRn(rd0);
1433 gen_op_iwmmxt_orq_M0_wRn(rd1);
1434 gen_op_iwmmxt_setpsr_nz();
1435 gen_op_iwmmxt_movq_wRn_M0(wrd);
1436 gen_op_iwmmxt_set_mup();
1437 gen_op_iwmmxt_set_cup();
1438 break;
1439 case 0x011: /* TMCR */
1440 if (insn & 0xf)
1441 return 1;
1442 rd = (insn >> 12) & 0xf;
1443 wrd = (insn >> 16) & 0xf;
1444 switch (wrd) {
1445 case ARM_IWMMXT_wCID:
1446 case ARM_IWMMXT_wCASF:
1447 break;
1448 case ARM_IWMMXT_wCon:
1449 gen_op_iwmmxt_set_cup();
1450 /* Fall through. */
1451 case ARM_IWMMXT_wCSSF:
1452 tmp = iwmmxt_load_creg(wrd);
1453 tmp2 = load_reg(s, rd);
1454 tcg_gen_andc_i32(tmp, tmp, tmp2);
1455 tcg_temp_free_i32(tmp2);
1456 iwmmxt_store_creg(wrd, tmp);
1457 break;
1458 case ARM_IWMMXT_wCGR0:
1459 case ARM_IWMMXT_wCGR1:
1460 case ARM_IWMMXT_wCGR2:
1461 case ARM_IWMMXT_wCGR3:
1462 gen_op_iwmmxt_set_cup();
1463 tmp = load_reg(s, rd);
1464 iwmmxt_store_creg(wrd, tmp);
1465 break;
1466 default:
1467 return 1;
1469 break;
1470 case 0x100: /* WXOR */
1471 wrd = (insn >> 12) & 0xf;
1472 rd0 = (insn >> 0) & 0xf;
1473 rd1 = (insn >> 16) & 0xf;
1474 gen_op_iwmmxt_movq_M0_wRn(rd0);
1475 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1476 gen_op_iwmmxt_setpsr_nz();
1477 gen_op_iwmmxt_movq_wRn_M0(wrd);
1478 gen_op_iwmmxt_set_mup();
1479 gen_op_iwmmxt_set_cup();
1480 break;
1481 case 0x111: /* TMRC */
1482 if (insn & 0xf)
1483 return 1;
1484 rd = (insn >> 12) & 0xf;
1485 wrd = (insn >> 16) & 0xf;
1486 tmp = iwmmxt_load_creg(wrd);
1487 store_reg(s, rd, tmp);
1488 break;
1489 case 0x300: /* WANDN */
1490 wrd = (insn >> 12) & 0xf;
1491 rd0 = (insn >> 0) & 0xf;
1492 rd1 = (insn >> 16) & 0xf;
1493 gen_op_iwmmxt_movq_M0_wRn(rd0);
1494 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1500 break;
1501 case 0x200: /* WAND */
1502 wrd = (insn >> 12) & 0xf;
1503 rd0 = (insn >> 0) & 0xf;
1504 rd1 = (insn >> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0);
1506 gen_op_iwmmxt_andq_M0_wRn(rd1);
1507 gen_op_iwmmxt_setpsr_nz();
1508 gen_op_iwmmxt_movq_wRn_M0(wrd);
1509 gen_op_iwmmxt_set_mup();
1510 gen_op_iwmmxt_set_cup();
1511 break;
1512 case 0x810: case 0xa10: /* WMADD */
1513 wrd = (insn >> 12) & 0xf;
1514 rd0 = (insn >> 0) & 0xf;
1515 rd1 = (insn >> 16) & 0xf;
1516 gen_op_iwmmxt_movq_M0_wRn(rd0);
1517 if (insn & (1 << 21))
1518 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1519 else
1520 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1521 gen_op_iwmmxt_movq_wRn_M0(wrd);
1522 gen_op_iwmmxt_set_mup();
1523 break;
1524 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1525 wrd = (insn >> 12) & 0xf;
1526 rd0 = (insn >> 16) & 0xf;
1527 rd1 = (insn >> 0) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0);
1529 switch ((insn >> 22) & 3) {
1530 case 0:
1531 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1532 break;
1533 case 1:
1534 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1535 break;
1536 case 2:
1537 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1538 break;
1539 case 3:
1540 return 1;
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 16) & 0xf;
1549 rd1 = (insn >> 0) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 switch ((insn >> 22) & 3) {
1552 case 0:
1553 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1554 break;
1555 case 1:
1556 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1557 break;
1558 case 2:
1559 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1560 break;
1561 case 3:
1562 return 1;
1564 gen_op_iwmmxt_movq_wRn_M0(wrd);
1565 gen_op_iwmmxt_set_mup();
1566 gen_op_iwmmxt_set_cup();
1567 break;
1568 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1569 wrd = (insn >> 12) & 0xf;
1570 rd0 = (insn >> 16) & 0xf;
1571 rd1 = (insn >> 0) & 0xf;
1572 gen_op_iwmmxt_movq_M0_wRn(rd0);
1573 if (insn & (1 << 22))
1574 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1575 else
1576 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1577 if (!(insn & (1 << 20)))
1578 gen_op_iwmmxt_addl_M0_wRn(wrd);
1579 gen_op_iwmmxt_movq_wRn_M0(wrd);
1580 gen_op_iwmmxt_set_mup();
1581 break;
1582 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1583 wrd = (insn >> 12) & 0xf;
1584 rd0 = (insn >> 16) & 0xf;
1585 rd1 = (insn >> 0) & 0xf;
1586 gen_op_iwmmxt_movq_M0_wRn(rd0);
1587 if (insn & (1 << 21)) {
1588 if (insn & (1 << 20))
1589 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1590 else
1591 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1592 } else {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 break;
1601 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 21))
1607 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1610 if (!(insn & (1 << 20))) {
1611 iwmmxt_load_reg(cpu_V1, wrd);
1612 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1614 gen_op_iwmmxt_movq_wRn_M0(wrd);
1615 gen_op_iwmmxt_set_mup();
1616 break;
1617 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1618 wrd = (insn >> 12) & 0xf;
1619 rd0 = (insn >> 16) & 0xf;
1620 rd1 = (insn >> 0) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0);
1622 switch ((insn >> 22) & 3) {
1623 case 0:
1624 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1625 break;
1626 case 1:
1627 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1628 break;
1629 case 2:
1630 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1631 break;
1632 case 3:
1633 return 1;
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1638 break;
1639 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1640 wrd = (insn >> 12) & 0xf;
1641 rd0 = (insn >> 16) & 0xf;
1642 rd1 = (insn >> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0);
1644 if (insn & (1 << 22)) {
1645 if (insn & (1 << 20))
1646 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1647 else
1648 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1649 } else {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1658 break;
1659 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 16) & 0xf;
1662 rd1 = (insn >> 0) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
1664 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1665 tcg_gen_andi_i32(tmp, tmp, 7);
1666 iwmmxt_load_reg(cpu_V1, rd1);
1667 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1668 tcg_temp_free_i32(tmp);
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 break;
1672 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1673 if (((insn >> 6) & 3) == 3)
1674 return 1;
1675 rd = (insn >> 12) & 0xf;
1676 wrd = (insn >> 16) & 0xf;
1677 tmp = load_reg(s, rd);
1678 gen_op_iwmmxt_movq_M0_wRn(wrd);
1679 switch ((insn >> 6) & 3) {
1680 case 0:
1681 tmp2 = tcg_const_i32(0xff);
1682 tmp3 = tcg_const_i32((insn & 7) << 3);
1683 break;
1684 case 1:
1685 tmp2 = tcg_const_i32(0xffff);
1686 tmp3 = tcg_const_i32((insn & 3) << 4);
1687 break;
1688 case 2:
1689 tmp2 = tcg_const_i32(0xffffffff);
1690 tmp3 = tcg_const_i32((insn & 1) << 5);
1691 break;
1692 default:
1693 TCGV_UNUSED(tmp2);
1694 TCGV_UNUSED(tmp3);
1696 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1697 tcg_temp_free(tmp3);
1698 tcg_temp_free(tmp2);
1699 tcg_temp_free_i32(tmp);
1700 gen_op_iwmmxt_movq_wRn_M0(wrd);
1701 gen_op_iwmmxt_set_mup();
1702 break;
1703 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1704 rd = (insn >> 12) & 0xf;
1705 wrd = (insn >> 16) & 0xf;
1706 if (rd == 15 || ((insn >> 22) & 3) == 3)
1707 return 1;
1708 gen_op_iwmmxt_movq_M0_wRn(wrd);
1709 tmp = tcg_temp_new_i32();
1710 switch ((insn >> 22) & 3) {
1711 case 0:
1712 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1713 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1714 if (insn & 8) {
1715 tcg_gen_ext8s_i32(tmp, tmp);
1716 } else {
1717 tcg_gen_andi_i32(tmp, tmp, 0xff);
1719 break;
1720 case 1:
1721 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1722 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1723 if (insn & 8) {
1724 tcg_gen_ext16s_i32(tmp, tmp);
1725 } else {
1726 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1728 break;
1729 case 2:
1730 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1731 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1732 break;
1734 store_reg(s, rd, tmp);
1735 break;
1736 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1737 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1738 return 1;
1739 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1740 switch ((insn >> 22) & 3) {
1741 case 0:
1742 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1743 break;
1744 case 1:
1745 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1746 break;
1747 case 2:
1748 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1749 break;
1751 tcg_gen_shli_i32(tmp, tmp, 28);
1752 gen_set_nzcv(tmp);
1753 tcg_temp_free_i32(tmp);
1754 break;
1755 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1756 if (((insn >> 6) & 3) == 3)
1757 return 1;
1758 rd = (insn >> 12) & 0xf;
1759 wrd = (insn >> 16) & 0xf;
1760 tmp = load_reg(s, rd);
1761 switch ((insn >> 6) & 3) {
1762 case 0:
1763 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1764 break;
1765 case 1:
1766 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1767 break;
1768 case 2:
1769 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1770 break;
1772 tcg_temp_free_i32(tmp);
1773 gen_op_iwmmxt_movq_wRn_M0(wrd);
1774 gen_op_iwmmxt_set_mup();
1775 break;
1776 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1777 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1778 return 1;
1779 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1780 tmp2 = tcg_temp_new_i32();
1781 tcg_gen_mov_i32(tmp2, tmp);
1782 switch ((insn >> 22) & 3) {
1783 case 0:
1784 for (i = 0; i < 7; i ++) {
1785 tcg_gen_shli_i32(tmp2, tmp2, 4);
1786 tcg_gen_and_i32(tmp, tmp, tmp2);
1788 break;
1789 case 1:
1790 for (i = 0; i < 3; i ++) {
1791 tcg_gen_shli_i32(tmp2, tmp2, 8);
1792 tcg_gen_and_i32(tmp, tmp, tmp2);
1794 break;
1795 case 2:
1796 tcg_gen_shli_i32(tmp2, tmp2, 16);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
1798 break;
1800 gen_set_nzcv(tmp);
1801 tcg_temp_free_i32(tmp2);
1802 tcg_temp_free_i32(tmp);
1803 break;
1804 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1805 wrd = (insn >> 12) & 0xf;
1806 rd0 = (insn >> 16) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 switch ((insn >> 22) & 3) {
1809 case 0:
1810 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1811 break;
1812 case 1:
1813 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1814 break;
1815 case 2:
1816 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1817 break;
1818 case 3:
1819 return 1;
1821 gen_op_iwmmxt_movq_wRn_M0(wrd);
1822 gen_op_iwmmxt_set_mup();
1823 break;
1824 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1825 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1826 return 1;
1827 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1828 tmp2 = tcg_temp_new_i32();
1829 tcg_gen_mov_i32(tmp2, tmp);
1830 switch ((insn >> 22) & 3) {
1831 case 0:
1832 for (i = 0; i < 7; i ++) {
1833 tcg_gen_shli_i32(tmp2, tmp2, 4);
1834 tcg_gen_or_i32(tmp, tmp, tmp2);
1836 break;
1837 case 1:
1838 for (i = 0; i < 3; i ++) {
1839 tcg_gen_shli_i32(tmp2, tmp2, 8);
1840 tcg_gen_or_i32(tmp, tmp, tmp2);
1842 break;
1843 case 2:
1844 tcg_gen_shli_i32(tmp2, tmp2, 16);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
1846 break;
1848 gen_set_nzcv(tmp);
1849 tcg_temp_free_i32(tmp2);
1850 tcg_temp_free_i32(tmp);
1851 break;
1852 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1853 rd = (insn >> 12) & 0xf;
1854 rd0 = (insn >> 16) & 0xf;
1855 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1856 return 1;
1857 gen_op_iwmmxt_movq_M0_wRn(rd0);
1858 tmp = tcg_temp_new_i32();
1859 switch ((insn >> 22) & 3) {
1860 case 0:
1861 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1862 break;
1863 case 1:
1864 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1865 break;
1866 case 2:
1867 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1868 break;
1870 store_reg(s, rd, tmp);
1871 break;
1872 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1873 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1874 wrd = (insn >> 12) & 0xf;
1875 rd0 = (insn >> 16) & 0xf;
1876 rd1 = (insn >> 0) & 0xf;
1877 gen_op_iwmmxt_movq_M0_wRn(rd0);
1878 switch ((insn >> 22) & 3) {
1879 case 0:
1880 if (insn & (1 << 21))
1881 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1882 else
1883 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1884 break;
1885 case 1:
1886 if (insn & (1 << 21))
1887 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1888 else
1889 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1890 break;
1891 case 2:
1892 if (insn & (1 << 21))
1893 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1894 else
1895 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1896 break;
1897 case 3:
1898 return 1;
1900 gen_op_iwmmxt_movq_wRn_M0(wrd);
1901 gen_op_iwmmxt_set_mup();
1902 gen_op_iwmmxt_set_cup();
1903 break;
1904 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1905 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1906 wrd = (insn >> 12) & 0xf;
1907 rd0 = (insn >> 16) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0);
1909 switch ((insn >> 22) & 3) {
1910 case 0:
1911 if (insn & (1 << 21))
1912 gen_op_iwmmxt_unpacklsb_M0();
1913 else
1914 gen_op_iwmmxt_unpacklub_M0();
1915 break;
1916 case 1:
1917 if (insn & (1 << 21))
1918 gen_op_iwmmxt_unpacklsw_M0();
1919 else
1920 gen_op_iwmmxt_unpackluw_M0();
1921 break;
1922 case 2:
1923 if (insn & (1 << 21))
1924 gen_op_iwmmxt_unpacklsl_M0();
1925 else
1926 gen_op_iwmmxt_unpacklul_M0();
1927 break;
1928 case 3:
1929 return 1;
1931 gen_op_iwmmxt_movq_wRn_M0(wrd);
1932 gen_op_iwmmxt_set_mup();
1933 gen_op_iwmmxt_set_cup();
1934 break;
1935 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1936 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1937 wrd = (insn >> 12) & 0xf;
1938 rd0 = (insn >> 16) & 0xf;
1939 gen_op_iwmmxt_movq_M0_wRn(rd0);
1940 switch ((insn >> 22) & 3) {
1941 case 0:
1942 if (insn & (1 << 21))
1943 gen_op_iwmmxt_unpackhsb_M0();
1944 else
1945 gen_op_iwmmxt_unpackhub_M0();
1946 break;
1947 case 1:
1948 if (insn & (1 << 21))
1949 gen_op_iwmmxt_unpackhsw_M0();
1950 else
1951 gen_op_iwmmxt_unpackhuw_M0();
1952 break;
1953 case 2:
1954 if (insn & (1 << 21))
1955 gen_op_iwmmxt_unpackhsl_M0();
1956 else
1957 gen_op_iwmmxt_unpackhul_M0();
1958 break;
1959 case 3:
1960 return 1;
1962 gen_op_iwmmxt_movq_wRn_M0(wrd);
1963 gen_op_iwmmxt_set_mup();
1964 gen_op_iwmmxt_set_cup();
1965 break;
1966 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1967 case 0x214: case 0x614: case 0xa14: case 0xe14:
1968 if (((insn >> 22) & 3) == 0)
1969 return 1;
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 tmp = tcg_temp_new_i32();
1974 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1975 tcg_temp_free_i32(tmp);
1976 return 1;
1978 switch ((insn >> 22) & 3) {
1979 case 1:
1980 gen_helper_iwmmxt_srlw(cpu_M0, cpu_M0, tmp);
1981 break;
1982 case 2:
1983 gen_helper_iwmmxt_srll(cpu_M0, cpu_M0, tmp);
1984 break;
1985 case 3:
1986 gen_helper_iwmmxt_srlq(cpu_M0, cpu_M0, tmp);
1987 break;
1989 tcg_temp_free_i32(tmp);
1990 gen_op_iwmmxt_movq_wRn_M0(wrd);
1991 gen_op_iwmmxt_set_mup();
1992 gen_op_iwmmxt_set_cup();
1993 break;
1994 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1995 case 0x014: case 0x414: case 0x814: case 0xc14:
1996 if (((insn >> 22) & 3) == 0)
1997 return 1;
1998 wrd = (insn >> 12) & 0xf;
1999 rd0 = (insn >> 16) & 0xf;
2000 gen_op_iwmmxt_movq_M0_wRn(rd0);
2001 tmp = tcg_temp_new_i32();
2002 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2003 tcg_temp_free_i32(tmp);
2004 return 1;
2006 switch ((insn >> 22) & 3) {
2007 case 1:
2008 gen_helper_iwmmxt_sraw(cpu_M0, cpu_M0, tmp);
2009 break;
2010 case 2:
2011 gen_helper_iwmmxt_sral(cpu_M0, cpu_M0, tmp);
2012 break;
2013 case 3:
2014 gen_helper_iwmmxt_sraq(cpu_M0, cpu_M0, tmp);
2015 break;
2017 tcg_temp_free_i32(tmp);
2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2019 gen_op_iwmmxt_set_mup();
2020 gen_op_iwmmxt_set_cup();
2021 break;
2022 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2023 case 0x114: case 0x514: case 0x914: case 0xd14:
2024 if (((insn >> 22) & 3) == 0)
2025 return 1;
2026 wrd = (insn >> 12) & 0xf;
2027 rd0 = (insn >> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
2029 tmp = tcg_temp_new_i32();
2030 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2031 tcg_temp_free_i32(tmp);
2032 return 1;
2034 switch ((insn >> 22) & 3) {
2035 case 1:
2036 gen_helper_iwmmxt_sllw(cpu_M0, cpu_M0, tmp);
2037 break;
2038 case 2:
2039 gen_helper_iwmmxt_slll(cpu_M0, cpu_M0, tmp);
2040 break;
2041 case 3:
2042 gen_helper_iwmmxt_sllq(cpu_M0, cpu_M0, tmp);
2043 break;
2045 tcg_temp_free_i32(tmp);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 gen_op_iwmmxt_set_cup();
2049 break;
2050 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2051 case 0x314: case 0x714: case 0xb14: case 0xf14:
2052 if (((insn >> 22) & 3) == 0)
2053 return 1;
2054 wrd = (insn >> 12) & 0xf;
2055 rd0 = (insn >> 16) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0);
2057 tmp = tcg_temp_new_i32();
2058 switch ((insn >> 22) & 3) {
2059 case 1:
2060 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2061 tcg_temp_free_i32(tmp);
2062 return 1;
2064 gen_helper_iwmmxt_rorw(cpu_M0, cpu_M0, tmp);
2065 break;
2066 case 2:
2067 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2068 tcg_temp_free_i32(tmp);
2069 return 1;
2071 gen_helper_iwmmxt_rorl(cpu_M0, cpu_M0, tmp);
2072 break;
2073 case 3:
2074 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2075 tcg_temp_free_i32(tmp);
2076 return 1;
2078 gen_helper_iwmmxt_rorq(cpu_M0, cpu_M0, tmp);
2079 break;
2081 tcg_temp_free_i32(tmp);
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2087 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 rd1 = (insn >> 0) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0);
2092 switch ((insn >> 22) & 3) {
2093 case 0:
2094 if (insn & (1 << 21))
2095 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2096 else
2097 gen_op_iwmmxt_minub_M0_wRn(rd1);
2098 break;
2099 case 1:
2100 if (insn & (1 << 21))
2101 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2102 else
2103 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2104 break;
2105 case 2:
2106 if (insn & (1 << 21))
2107 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2108 else
2109 gen_op_iwmmxt_minul_M0_wRn(rd1);
2110 break;
2111 case 3:
2112 return 1;
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 break;
2117 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2118 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 rd1 = (insn >> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
2123 switch ((insn >> 22) & 3) {
2124 case 0:
2125 if (insn & (1 << 21))
2126 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2127 else
2128 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2129 break;
2130 case 1:
2131 if (insn & (1 << 21))
2132 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2133 else
2134 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2135 break;
2136 case 2:
2137 if (insn & (1 << 21))
2138 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2139 else
2140 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2141 break;
2142 case 3:
2143 return 1;
2145 gen_op_iwmmxt_movq_wRn_M0(wrd);
2146 gen_op_iwmmxt_set_mup();
2147 break;
2148 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2149 case 0x402: case 0x502: case 0x602: case 0x702:
2150 wrd = (insn >> 12) & 0xf;
2151 rd0 = (insn >> 16) & 0xf;
2152 rd1 = (insn >> 0) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0);
2154 tmp = tcg_const_i32((insn >> 20) & 3);
2155 iwmmxt_load_reg(cpu_V1, rd1);
2156 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2157 tcg_temp_free(tmp);
2158 gen_op_iwmmxt_movq_wRn_M0(wrd);
2159 gen_op_iwmmxt_set_mup();
2160 break;
2161 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2162 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2163 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2164 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2165 wrd = (insn >> 12) & 0xf;
2166 rd0 = (insn >> 16) & 0xf;
2167 rd1 = (insn >> 0) & 0xf;
2168 gen_op_iwmmxt_movq_M0_wRn(rd0);
2169 switch ((insn >> 20) & 0xf) {
2170 case 0x0:
2171 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2172 break;
2173 case 0x1:
2174 gen_op_iwmmxt_subub_M0_wRn(rd1);
2175 break;
2176 case 0x3:
2177 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2178 break;
2179 case 0x4:
2180 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2181 break;
2182 case 0x5:
2183 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2184 break;
2185 case 0x7:
2186 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2187 break;
2188 case 0x8:
2189 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2190 break;
2191 case 0x9:
2192 gen_op_iwmmxt_subul_M0_wRn(rd1);
2193 break;
2194 case 0xb:
2195 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2196 break;
2197 default:
2198 return 1;
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
2204 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2205 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2206 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2207 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2208 wrd = (insn >> 12) & 0xf;
2209 rd0 = (insn >> 16) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0);
2211 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2212 gen_helper_iwmmxt_shufh(cpu_M0, cpu_M0, tmp);
2213 tcg_temp_free(tmp);
2214 gen_op_iwmmxt_movq_wRn_M0(wrd);
2215 gen_op_iwmmxt_set_mup();
2216 gen_op_iwmmxt_set_cup();
2217 break;
2218 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2219 case 0x418: case 0x518: case 0x618: case 0x718:
2220 case 0x818: case 0x918: case 0xa18: case 0xb18:
2221 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2222 wrd = (insn >> 12) & 0xf;
2223 rd0 = (insn >> 16) & 0xf;
2224 rd1 = (insn >> 0) & 0xf;
2225 gen_op_iwmmxt_movq_M0_wRn(rd0);
2226 switch ((insn >> 20) & 0xf) {
2227 case 0x0:
2228 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2229 break;
2230 case 0x1:
2231 gen_op_iwmmxt_addub_M0_wRn(rd1);
2232 break;
2233 case 0x3:
2234 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2235 break;
2236 case 0x4:
2237 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2238 break;
2239 case 0x5:
2240 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2241 break;
2242 case 0x7:
2243 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2244 break;
2245 case 0x8:
2246 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2247 break;
2248 case 0x9:
2249 gen_op_iwmmxt_addul_M0_wRn(rd1);
2250 break;
2251 case 0xb:
2252 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2253 break;
2254 default:
2255 return 1;
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2260 break;
2261 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2262 case 0x408: case 0x508: case 0x608: case 0x708:
2263 case 0x808: case 0x908: case 0xa08: case 0xb08:
2264 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2265 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2266 return 1;
2267 wrd = (insn >> 12) & 0xf;
2268 rd0 = (insn >> 16) & 0xf;
2269 rd1 = (insn >> 0) & 0xf;
2270 gen_op_iwmmxt_movq_M0_wRn(rd0);
2271 switch ((insn >> 22) & 3) {
2272 case 1:
2273 if (insn & (1 << 21))
2274 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2275 else
2276 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2277 break;
2278 case 2:
2279 if (insn & (1 << 21))
2280 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2281 else
2282 gen_op_iwmmxt_packul_M0_wRn(rd1);
2283 break;
2284 case 3:
2285 if (insn & (1 << 21))
2286 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2287 else
2288 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2289 break;
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x201: case 0x203: case 0x205: case 0x207:
2296 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2297 case 0x211: case 0x213: case 0x215: case 0x217:
2298 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2299 wrd = (insn >> 5) & 0xf;
2300 rd0 = (insn >> 12) & 0xf;
2301 rd1 = (insn >> 0) & 0xf;
2302 if (rd0 == 0xf || rd1 == 0xf)
2303 return 1;
2304 gen_op_iwmmxt_movq_M0_wRn(wrd);
2305 tmp = load_reg(s, rd0);
2306 tmp2 = load_reg(s, rd1);
2307 switch ((insn >> 16) & 0xf) {
2308 case 0x0: /* TMIA */
2309 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2310 break;
2311 case 0x8: /* TMIAPH */
2312 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2313 break;
2314 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2315 if (insn & (1 << 16))
2316 tcg_gen_shri_i32(tmp, tmp, 16);
2317 if (insn & (1 << 17))
2318 tcg_gen_shri_i32(tmp2, tmp2, 16);
2319 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2320 break;
2321 default:
2322 tcg_temp_free_i32(tmp2);
2323 tcg_temp_free_i32(tmp);
2324 return 1;
2326 tcg_temp_free_i32(tmp2);
2327 tcg_temp_free_i32(tmp);
2328 gen_op_iwmmxt_movq_wRn_M0(wrd);
2329 gen_op_iwmmxt_set_mup();
2330 break;
2331 default:
2332 return 1;
2335 return 0;
2338 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2339 (ie. an undefined instruction). */
2340 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2342 int acc, rd0, rd1, rdhi, rdlo;
2343 TCGv tmp, tmp2;
2345 if ((insn & 0x0ff00f10) == 0x0e200010) {
2346 /* Multiply with Internal Accumulate Format */
2347 rd0 = (insn >> 12) & 0xf;
2348 rd1 = insn & 0xf;
2349 acc = (insn >> 5) & 7;
2351 if (acc != 0)
2352 return 1;
2354 tmp = load_reg(s, rd0);
2355 tmp2 = load_reg(s, rd1);
2356 switch ((insn >> 16) & 0xf) {
2357 case 0x0: /* MIA */
2358 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2359 break;
2360 case 0x8: /* MIAPH */
2361 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2362 break;
2363 case 0xc: /* MIABB */
2364 case 0xd: /* MIABT */
2365 case 0xe: /* MIATB */
2366 case 0xf: /* MIATT */
2367 if (insn & (1 << 16))
2368 tcg_gen_shri_i32(tmp, tmp, 16);
2369 if (insn & (1 << 17))
2370 tcg_gen_shri_i32(tmp2, tmp2, 16);
2371 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2372 break;
2373 default:
2374 return 1;
2376 tcg_temp_free_i32(tmp2);
2377 tcg_temp_free_i32(tmp);
2379 gen_op_iwmmxt_movq_wRn_M0(acc);
2380 return 0;
2383 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2384 /* Internal Accumulator Access Format */
2385 rdhi = (insn >> 16) & 0xf;
2386 rdlo = (insn >> 12) & 0xf;
2387 acc = insn & 7;
2389 if (acc != 0)
2390 return 1;
2392 if (insn & ARM_CP_RW_BIT) { /* MRA */
2393 iwmmxt_load_reg(cpu_V0, acc);
2394 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2395 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2396 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2397 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2398 } else { /* MAR */
2399 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2400 iwmmxt_store_reg(cpu_V0, acc);
2402 return 0;
2405 return 1;
2408 /* Disassemble system coprocessor instruction. Return nonzero if
2409 instruction is not defined. */
2410 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2412 TCGv tmp, tmp2;
2413 uint32_t rd = (insn >> 12) & 0xf;
2414 uint32_t cp = (insn >> 8) & 0xf;
2415 if (IS_USER(s)) {
2416 return 1;
2419 if (insn & ARM_CP_RW_BIT) {
2420 if (!env->cp[cp].cp_read)
2421 return 1;
2422 gen_set_pc_im(s->pc);
2423 tmp = tcg_temp_new_i32();
2424 tmp2 = tcg_const_i32(insn);
2425 gen_helper_get_cp(tmp, cpu_env, tmp2);
2426 tcg_temp_free(tmp2);
2427 store_reg(s, rd, tmp);
2428 } else {
2429 if (!env->cp[cp].cp_write)
2430 return 1;
2431 gen_set_pc_im(s->pc);
2432 tmp = load_reg(s, rd);
2433 tmp2 = tcg_const_i32(insn);
2434 gen_helper_set_cp(cpu_env, tmp2, tmp);
2435 tcg_temp_free(tmp2);
2436 tcg_temp_free_i32(tmp);
2438 return 0;
2441 static int cp15_user_ok(uint32_t insn)
2443 int cpn = (insn >> 16) & 0xf;
2444 int cpm = insn & 0xf;
2445 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2447 if (cpn == 13 && cpm == 0) {
2448 /* TLS register. */
2449 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2450 return 1;
2452 if (cpn == 7) {
2453 /* ISB, DSB, DMB. */
2454 if ((cpm == 5 && op == 4)
2455 || (cpm == 10 && (op == 4 || op == 5)))
2456 return 1;
2458 return 0;
2461 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2463 TCGv tmp;
2464 int cpn = (insn >> 16) & 0xf;
2465 int cpm = insn & 0xf;
2466 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2468 if (!arm_feature(env, ARM_FEATURE_V6K))
2469 return 0;
2471 if (!(cpn == 13 && cpm == 0))
2472 return 0;
2474 if (insn & ARM_CP_RW_BIT) {
2475 switch (op) {
2476 case 2:
2477 tmp = load_cpu_field(cp15.c13_tls1);
2478 break;
2479 case 3:
2480 tmp = load_cpu_field(cp15.c13_tls2);
2481 break;
2482 case 4:
2483 tmp = load_cpu_field(cp15.c13_tls3);
2484 break;
2485 default:
2486 return 0;
2488 store_reg(s, rd, tmp);
2490 } else {
2491 tmp = load_reg(s, rd);
2492 switch (op) {
2493 case 2:
2494 store_cpu_field(tmp, cp15.c13_tls1);
2495 break;
2496 case 3:
2497 store_cpu_field(tmp, cp15.c13_tls2);
2498 break;
2499 case 4:
2500 store_cpu_field(tmp, cp15.c13_tls3);
2501 break;
2502 default:
2503 tcg_temp_free_i32(tmp);
2504 return 0;
2507 return 1;
2510 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2511 instruction is not defined. */
2512 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2514 uint32_t rd;
2515 TCGv tmp, tmp2;
2517 /* M profile cores use memory mapped registers instead of cp15. */
2518 if (arm_feature(env, ARM_FEATURE_M))
2519 return 1;
2521 if ((insn & (1 << 25)) == 0) {
2522 if (insn & (1 << 20)) {
2523 /* mrrc */
2524 return 1;
2526 /* mcrr. Used for block cache operations, so implement as no-op. */
2527 return 0;
2529 if ((insn & (1 << 4)) == 0) {
2530 /* cdp */
2531 return 1;
2533 if (IS_USER(s) && !cp15_user_ok(insn)) {
2534 return 1;
2537 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2538 * instructions rather than a separate instruction.
2540 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2541 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2542 * In v7, this must NOP.
2544 if (!arm_feature(env, ARM_FEATURE_V7)) {
2545 /* Wait for interrupt. */
2546 gen_set_pc_im(s->pc);
2547 s->is_jmp = DISAS_WFI;
2549 return 0;
2552 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2553 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2554 * so this is slightly over-broad.
2556 if (!arm_feature(env, ARM_FEATURE_V6)) {
2557 /* Wait for interrupt. */
2558 gen_set_pc_im(s->pc);
2559 s->is_jmp = DISAS_WFI;
2560 return 0;
2562 /* Otherwise fall through to handle via helper function.
2563 * In particular, on v7 and some v6 cores this is one of
2564 * the VA-PA registers.
2568 rd = (insn >> 12) & 0xf;
2570 if (cp15_tls_load_store(env, s, insn, rd))
2571 return 0;
2573 tmp2 = tcg_const_i32(insn);
2574 if (insn & ARM_CP_RW_BIT) {
2575 tmp = tcg_temp_new_i32();
2576 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2577 /* If the destination register is r15 then sets condition codes. */
2578 if (rd != 15)
2579 store_reg(s, rd, tmp);
2580 else
2581 tcg_temp_free_i32(tmp);
2582 } else {
2583 tmp = load_reg(s, rd);
2584 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2585 tcg_temp_free_i32(tmp);
2586 /* Normally we would always end the TB here, but Linux
2587 * arch/arm/mach-pxa/sleep.S expects two instructions following
2588 * an MMU enable to execute from cache. Imitate this behaviour. */
2589 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2590 (insn & 0x0fff0fff) != 0x0e010f10)
2591 gen_lookup_tb(s);
2593 tcg_temp_free_i32(tmp2);
2594 return 0;
2597 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2598 #define VFP_SREG(insn, bigbit, smallbit) \
2599 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2600 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2601 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2602 reg = (((insn) >> (bigbit)) & 0x0f) \
2603 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2604 } else { \
2605 if (insn & (1 << (smallbit))) \
2606 return 1; \
2607 reg = ((insn) >> (bigbit)) & 0x0f; \
2608 }} while (0)
2610 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2611 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2612 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2613 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2614 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2615 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2617 /* Move between integer and VFP cores. */
2618 static TCGv gen_vfp_mrs(void)
2620 TCGv tmp = tcg_temp_new_i32();
2621 tcg_gen_mov_i32(tmp, cpu_F0s);
2622 return tmp;
2625 static void gen_vfp_msr(TCGv tmp)
2627 tcg_gen_mov_i32(cpu_F0s, tmp);
2628 tcg_temp_free_i32(tmp);
2631 static void gen_neon_dup_u8(TCGv var, int shift)
2633 TCGv tmp = tcg_temp_new_i32();
2634 if (shift)
2635 tcg_gen_shri_i32(var, var, shift);
2636 tcg_gen_ext8u_i32(var, var);
2637 tcg_gen_shli_i32(tmp, var, 8);
2638 tcg_gen_or_i32(var, var, tmp);
2639 tcg_gen_shli_i32(tmp, var, 16);
2640 tcg_gen_or_i32(var, var, tmp);
2641 tcg_temp_free_i32(tmp);
2644 static void gen_neon_dup_low16(TCGv var)
2646 TCGv tmp = tcg_temp_new_i32();
2647 tcg_gen_ext16u_i32(var, var);
2648 tcg_gen_shli_i32(tmp, var, 16);
2649 tcg_gen_or_i32(var, var, tmp);
2650 tcg_temp_free_i32(tmp);
2653 static void gen_neon_dup_high16(TCGv var)
2655 TCGv tmp = tcg_temp_new_i32();
2656 tcg_gen_andi_i32(var, var, 0xffff0000);
2657 tcg_gen_shri_i32(tmp, var, 16);
2658 tcg_gen_or_i32(var, var, tmp);
2659 tcg_temp_free_i32(tmp);
2662 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2664 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2665 TCGv tmp;
2666 switch (size) {
2667 case 0:
2668 tmp = gen_ld8u(addr, IS_USER(s));
2669 gen_neon_dup_u8(tmp, 0);
2670 break;
2671 case 1:
2672 tmp = gen_ld16u(addr, IS_USER(s));
2673 gen_neon_dup_low16(tmp);
2674 break;
2675 case 2:
2676 tmp = gen_ld32(addr, IS_USER(s));
2677 break;
2678 default: /* Avoid compiler warnings. */
2679 abort();
2681 return tmp;
2684 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2685 (ie. an undefined instruction). */
2686 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2688 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2689 int dp, veclen;
2690 TCGv addr;
2691 TCGv tmp;
2692 TCGv tmp2;
2694 if (!arm_feature(env, ARM_FEATURE_VFP))
2695 return 1;
2697 if (!s->vfp_enabled) {
2698 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2699 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2700 return 1;
2701 rn = (insn >> 16) & 0xf;
2702 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2703 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2704 return 1;
2706 dp = ((insn & 0xf00) == 0xb00);
2707 switch ((insn >> 24) & 0xf) {
2708 case 0xe:
2709 if (insn & (1 << 4)) {
2710 /* single register transfer */
2711 rd = (insn >> 12) & 0xf;
2712 if (dp) {
2713 int size;
2714 int pass;
2716 VFP_DREG_N(rn, insn);
2717 if (insn & 0xf)
2718 return 1;
2719 if (insn & 0x00c00060
2720 && !arm_feature(env, ARM_FEATURE_NEON))
2721 return 1;
2723 pass = (insn >> 21) & 1;
2724 if (insn & (1 << 22)) {
2725 size = 0;
2726 offset = ((insn >> 5) & 3) * 8;
2727 } else if (insn & (1 << 5)) {
2728 size = 1;
2729 offset = (insn & (1 << 6)) ? 16 : 0;
2730 } else {
2731 size = 2;
2732 offset = 0;
2734 if (insn & ARM_CP_RW_BIT) {
2735 /* vfp->arm */
2736 tmp = neon_load_reg(rn, pass);
2737 switch (size) {
2738 case 0:
2739 if (offset)
2740 tcg_gen_shri_i32(tmp, tmp, offset);
2741 if (insn & (1 << 23))
2742 gen_uxtb(tmp);
2743 else
2744 gen_sxtb(tmp);
2745 break;
2746 case 1:
2747 if (insn & (1 << 23)) {
2748 if (offset) {
2749 tcg_gen_shri_i32(tmp, tmp, 16);
2750 } else {
2751 gen_uxth(tmp);
2753 } else {
2754 if (offset) {
2755 tcg_gen_sari_i32(tmp, tmp, 16);
2756 } else {
2757 gen_sxth(tmp);
2760 break;
2761 case 2:
2762 break;
2764 store_reg(s, rd, tmp);
2765 } else {
2766 /* arm->vfp */
2767 tmp = load_reg(s, rd);
2768 if (insn & (1 << 23)) {
2769 /* VDUP */
2770 if (size == 0) {
2771 gen_neon_dup_u8(tmp, 0);
2772 } else if (size == 1) {
2773 gen_neon_dup_low16(tmp);
2775 for (n = 0; n <= pass * 2; n++) {
2776 tmp2 = tcg_temp_new_i32();
2777 tcg_gen_mov_i32(tmp2, tmp);
2778 neon_store_reg(rn, n, tmp2);
2780 neon_store_reg(rn, n, tmp);
2781 } else {
2782 /* VMOV */
2783 switch (size) {
2784 case 0:
2785 tmp2 = neon_load_reg(rn, pass);
2786 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2787 tcg_temp_free_i32(tmp2);
2788 break;
2789 case 1:
2790 tmp2 = neon_load_reg(rn, pass);
2791 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2792 tcg_temp_free_i32(tmp2);
2793 break;
2794 case 2:
2795 break;
2797 neon_store_reg(rn, pass, tmp);
2800 } else { /* !dp */
2801 if ((insn & 0x6f) != 0x00)
2802 return 1;
2803 rn = VFP_SREG_N(insn);
2804 if (insn & ARM_CP_RW_BIT) {
2805 /* vfp->arm */
2806 if (insn & (1 << 21)) {
2807 /* system register */
2808 rn >>= 1;
2810 switch (rn) {
2811 case ARM_VFP_FPSID:
2812 /* VFP2 allows access to FSID from userspace.
2813 VFP3 restricts all id registers to privileged
2814 accesses. */
2815 if (IS_USER(s)
2816 && arm_feature(env, ARM_FEATURE_VFP3))
2817 return 1;
2818 tmp = load_cpu_field(vfp.xregs[rn]);
2819 break;
2820 case ARM_VFP_FPEXC:
2821 if (IS_USER(s))
2822 return 1;
2823 tmp = load_cpu_field(vfp.xregs[rn]);
2824 break;
2825 case ARM_VFP_FPINST:
2826 case ARM_VFP_FPINST2:
2827 /* Not present in VFP3. */
2828 if (IS_USER(s)
2829 || arm_feature(env, ARM_FEATURE_VFP3))
2830 return 1;
2831 tmp = load_cpu_field(vfp.xregs[rn]);
2832 break;
2833 case ARM_VFP_FPSCR:
2834 if (rd == 15) {
2835 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2836 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2837 } else {
2838 tmp = tcg_temp_new_i32();
2839 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2841 break;
2842 case ARM_VFP_MVFR0:
2843 case ARM_VFP_MVFR1:
2844 if (IS_USER(s)
2845 || !arm_feature(env, ARM_FEATURE_VFP3))
2846 return 1;
2847 tmp = load_cpu_field(vfp.xregs[rn]);
2848 break;
2849 default:
2850 return 1;
2852 } else {
2853 gen_mov_F0_vreg(0, rn);
2854 tmp = gen_vfp_mrs();
2856 if (rd == 15) {
2857 /* Set the 4 flag bits in the CPSR. */
2858 gen_set_nzcv(tmp);
2859 tcg_temp_free_i32(tmp);
2860 } else {
2861 store_reg(s, rd, tmp);
2863 } else {
2864 /* arm->vfp */
2865 tmp = load_reg(s, rd);
2866 if (insn & (1 << 21)) {
2867 rn >>= 1;
2868 /* system register */
2869 switch (rn) {
2870 case ARM_VFP_FPSID:
2871 case ARM_VFP_MVFR0:
2872 case ARM_VFP_MVFR1:
2873 /* Writes are ignored. */
2874 break;
2875 case ARM_VFP_FPSCR:
2876 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2877 tcg_temp_free_i32(tmp);
2878 gen_lookup_tb(s);
2879 break;
2880 case ARM_VFP_FPEXC:
2881 if (IS_USER(s))
2882 return 1;
2883 /* TODO: VFP subarchitecture support.
2884 * For now, keep the EN bit only */
2885 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2886 store_cpu_field(tmp, vfp.xregs[rn]);
2887 gen_lookup_tb(s);
2888 break;
2889 case ARM_VFP_FPINST:
2890 case ARM_VFP_FPINST2:
2891 store_cpu_field(tmp, vfp.xregs[rn]);
2892 break;
2893 default:
2894 return 1;
2896 } else {
2897 gen_vfp_msr(tmp);
2898 gen_mov_vreg_F0(0, rn);
2902 } else {
2903 /* data processing */
2904 /* The opcode is in bits 23, 21, 20 and 6. */
2905 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2906 if (dp) {
2907 if (op == 15) {
2908 /* rn is opcode */
2909 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2910 } else {
2911 /* rn is register number */
2912 VFP_DREG_N(rn, insn);
2915 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2916 /* Integer or single precision destination. */
2917 rd = VFP_SREG_D(insn);
2918 } else {
2919 VFP_DREG_D(rd, insn);
2921 if (op == 15 &&
2922 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2923 /* VCVT from int is always from S reg regardless of dp bit.
2924 * VCVT with immediate frac_bits has same format as SREG_M
2926 rm = VFP_SREG_M(insn);
2927 } else {
2928 VFP_DREG_M(rm, insn);
2930 } else {
2931 rn = VFP_SREG_N(insn);
2932 if (op == 15 && rn == 15) {
2933 /* Double precision destination. */
2934 VFP_DREG_D(rd, insn);
2935 } else {
2936 rd = VFP_SREG_D(insn);
2938 /* NB that we implicitly rely on the encoding for the frac_bits
2939 * in VCVT of fixed to float being the same as that of an SREG_M
2941 rm = VFP_SREG_M(insn);
2944 veclen = s->vec_len;
2945 if (op == 15 && rn > 3)
2946 veclen = 0;
2948 /* Shut up compiler warnings. */
2949 delta_m = 0;
2950 delta_d = 0;
2951 bank_mask = 0;
2953 if (veclen > 0) {
2954 if (dp)
2955 bank_mask = 0xc;
2956 else
2957 bank_mask = 0x18;
2959 /* Figure out what type of vector operation this is. */
2960 if ((rd & bank_mask) == 0) {
2961 /* scalar */
2962 veclen = 0;
2963 } else {
2964 if (dp)
2965 delta_d = (s->vec_stride >> 1) + 1;
2966 else
2967 delta_d = s->vec_stride + 1;
2969 if ((rm & bank_mask) == 0) {
2970 /* mixed scalar/vector */
2971 delta_m = 0;
2972 } else {
2973 /* vector */
2974 delta_m = delta_d;
2979 /* Load the initial operands. */
2980 if (op == 15) {
2981 switch (rn) {
2982 case 16:
2983 case 17:
2984 /* Integer source */
2985 gen_mov_F0_vreg(0, rm);
2986 break;
2987 case 8:
2988 case 9:
2989 /* Compare */
2990 gen_mov_F0_vreg(dp, rd);
2991 gen_mov_F1_vreg(dp, rm);
2992 break;
2993 case 10:
2994 case 11:
2995 /* Compare with zero */
2996 gen_mov_F0_vreg(dp, rd);
2997 gen_vfp_F1_ld0(dp);
2998 break;
2999 case 20:
3000 case 21:
3001 case 22:
3002 case 23:
3003 case 28:
3004 case 29:
3005 case 30:
3006 case 31:
3007 /* Source and destination the same. */
3008 gen_mov_F0_vreg(dp, rd);
3009 break;
3010 default:
3011 /* One source operand. */
3012 gen_mov_F0_vreg(dp, rm);
3013 break;
3015 } else {
3016 /* Two source operands. */
3017 gen_mov_F0_vreg(dp, rn);
3018 gen_mov_F1_vreg(dp, rm);
3021 for (;;) {
3022 /* Perform the calculation. */
3023 switch (op) {
3024 case 0: /* mac: fd + (fn * fm) */
3025 gen_vfp_mul(dp);
3026 gen_mov_F1_vreg(dp, rd);
3027 gen_vfp_add(dp);
3028 break;
3029 case 1: /* nmac: fd - (fn * fm) */
3030 gen_vfp_mul(dp);
3031 gen_vfp_neg(dp);
3032 gen_mov_F1_vreg(dp, rd);
3033 gen_vfp_add(dp);
3034 break;
3035 case 2: /* msc: -fd + (fn * fm) */
3036 gen_vfp_mul(dp);
3037 gen_mov_F1_vreg(dp, rd);
3038 gen_vfp_sub(dp);
3039 break;
3040 case 3: /* nmsc: -fd - (fn * fm) */
3041 gen_vfp_mul(dp);
3042 gen_vfp_neg(dp);
3043 gen_mov_F1_vreg(dp, rd);
3044 gen_vfp_sub(dp);
3045 break;
3046 case 4: /* mul: fn * fm */
3047 gen_vfp_mul(dp);
3048 break;
3049 case 5: /* nmul: -(fn * fm) */
3050 gen_vfp_mul(dp);
3051 gen_vfp_neg(dp);
3052 break;
3053 case 6: /* add: fn + fm */
3054 gen_vfp_add(dp);
3055 break;
3056 case 7: /* sub: fn - fm */
3057 gen_vfp_sub(dp);
3058 break;
3059 case 8: /* div: fn / fm */
3060 gen_vfp_div(dp);
3061 break;
3062 case 14: /* fconst */
3063 if (!arm_feature(env, ARM_FEATURE_VFP3))
3064 return 1;
3066 n = (insn << 12) & 0x80000000;
3067 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3068 if (dp) {
3069 if (i & 0x40)
3070 i |= 0x3f80;
3071 else
3072 i |= 0x4000;
3073 n |= i << 16;
3074 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3075 } else {
3076 if (i & 0x40)
3077 i |= 0x780;
3078 else
3079 i |= 0x800;
3080 n |= i << 19;
3081 tcg_gen_movi_i32(cpu_F0s, n);
3083 break;
3084 case 15: /* extension space */
3085 switch (rn) {
3086 case 0: /* cpy */
3087 /* no-op */
3088 break;
3089 case 1: /* abs */
3090 gen_vfp_abs(dp);
3091 break;
3092 case 2: /* neg */
3093 gen_vfp_neg(dp);
3094 break;
3095 case 3: /* sqrt */
3096 gen_vfp_sqrt(dp);
3097 break;
3098 case 4: /* vcvtb.f32.f16 */
3099 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3100 return 1;
3101 tmp = gen_vfp_mrs();
3102 tcg_gen_ext16u_i32(tmp, tmp);
3103 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3104 tcg_temp_free_i32(tmp);
3105 break;
3106 case 5: /* vcvtt.f32.f16 */
3107 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3108 return 1;
3109 tmp = gen_vfp_mrs();
3110 tcg_gen_shri_i32(tmp, tmp, 16);
3111 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3112 tcg_temp_free_i32(tmp);
3113 break;
3114 case 6: /* vcvtb.f16.f32 */
3115 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3116 return 1;
3117 tmp = tcg_temp_new_i32();
3118 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3119 gen_mov_F0_vreg(0, rd);
3120 tmp2 = gen_vfp_mrs();
3121 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3122 tcg_gen_or_i32(tmp, tmp, tmp2);
3123 tcg_temp_free_i32(tmp2);
3124 gen_vfp_msr(tmp);
3125 break;
3126 case 7: /* vcvtt.f16.f32 */
3127 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3128 return 1;
3129 tmp = tcg_temp_new_i32();
3130 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3131 tcg_gen_shli_i32(tmp, tmp, 16);
3132 gen_mov_F0_vreg(0, rd);
3133 tmp2 = gen_vfp_mrs();
3134 tcg_gen_ext16u_i32(tmp2, tmp2);
3135 tcg_gen_or_i32(tmp, tmp, tmp2);
3136 tcg_temp_free_i32(tmp2);
3137 gen_vfp_msr(tmp);
3138 break;
3139 case 8: /* cmp */
3140 gen_vfp_cmp(dp);
3141 break;
3142 case 9: /* cmpe */
3143 gen_vfp_cmpe(dp);
3144 break;
3145 case 10: /* cmpz */
3146 gen_vfp_cmp(dp);
3147 break;
3148 case 11: /* cmpez */
3149 gen_vfp_F1_ld0(dp);
3150 gen_vfp_cmpe(dp);
3151 break;
3152 case 15: /* single<->double conversion */
3153 if (dp)
3154 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3155 else
3156 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3157 break;
3158 case 16: /* fuito */
3159 gen_vfp_uito(dp);
3160 break;
3161 case 17: /* fsito */
3162 gen_vfp_sito(dp);
3163 break;
3164 case 20: /* fshto */
3165 if (!arm_feature(env, ARM_FEATURE_VFP3))
3166 return 1;
3167 gen_vfp_shto(dp, 16 - rm);
3168 break;
3169 case 21: /* fslto */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
3172 gen_vfp_slto(dp, 32 - rm);
3173 break;
3174 case 22: /* fuhto */
3175 if (!arm_feature(env, ARM_FEATURE_VFP3))
3176 return 1;
3177 gen_vfp_uhto(dp, 16 - rm);
3178 break;
3179 case 23: /* fulto */
3180 if (!arm_feature(env, ARM_FEATURE_VFP3))
3181 return 1;
3182 gen_vfp_ulto(dp, 32 - rm);
3183 break;
3184 case 24: /* ftoui */
3185 gen_vfp_toui(dp);
3186 break;
3187 case 25: /* ftouiz */
3188 gen_vfp_touiz(dp);
3189 break;
3190 case 26: /* ftosi */
3191 gen_vfp_tosi(dp);
3192 break;
3193 case 27: /* ftosiz */
3194 gen_vfp_tosiz(dp);
3195 break;
3196 case 28: /* ftosh */
3197 if (!arm_feature(env, ARM_FEATURE_VFP3))
3198 return 1;
3199 gen_vfp_tosh(dp, 16 - rm);
3200 break;
3201 case 29: /* ftosl */
3202 if (!arm_feature(env, ARM_FEATURE_VFP3))
3203 return 1;
3204 gen_vfp_tosl(dp, 32 - rm);
3205 break;
3206 case 30: /* ftouh */
3207 if (!arm_feature(env, ARM_FEATURE_VFP3))
3208 return 1;
3209 gen_vfp_touh(dp, 16 - rm);
3210 break;
3211 case 31: /* ftoul */
3212 if (!arm_feature(env, ARM_FEATURE_VFP3))
3213 return 1;
3214 gen_vfp_toul(dp, 32 - rm);
3215 break;
3216 default: /* undefined */
3217 printf ("rn:%d\n", rn);
3218 return 1;
3220 break;
3221 default: /* undefined */
3222 printf ("op:%d\n", op);
3223 return 1;
3226 /* Write back the result. */
3227 if (op == 15 && (rn >= 8 && rn <= 11))
3228 ; /* Comparison, do nothing. */
3229 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3230 /* VCVT double to int: always integer result. */
3231 gen_mov_vreg_F0(0, rd);
3232 else if (op == 15 && rn == 15)
3233 /* conversion */
3234 gen_mov_vreg_F0(!dp, rd);
3235 else
3236 gen_mov_vreg_F0(dp, rd);
3238 /* break out of the loop if we have finished */
3239 if (veclen == 0)
3240 break;
3242 if (op == 15 && delta_m == 0) {
3243 /* single source one-many */
3244 while (veclen--) {
3245 rd = ((rd + delta_d) & (bank_mask - 1))
3246 | (rd & bank_mask);
3247 gen_mov_vreg_F0(dp, rd);
3249 break;
3251 /* Setup the next operands. */
3252 veclen--;
3253 rd = ((rd + delta_d) & (bank_mask - 1))
3254 | (rd & bank_mask);
3256 if (op == 15) {
3257 /* One source operand. */
3258 rm = ((rm + delta_m) & (bank_mask - 1))
3259 | (rm & bank_mask);
3260 gen_mov_F0_vreg(dp, rm);
3261 } else {
3262 /* Two source operands. */
3263 rn = ((rn + delta_d) & (bank_mask - 1))
3264 | (rn & bank_mask);
3265 gen_mov_F0_vreg(dp, rn);
3266 if (delta_m) {
3267 rm = ((rm + delta_m) & (bank_mask - 1))
3268 | (rm & bank_mask);
3269 gen_mov_F1_vreg(dp, rm);
3274 break;
3275 case 0xc:
3276 case 0xd:
3277 if ((insn & 0x03e00000) == 0x00400000) {
3278 /* two-register transfer */
3279 rn = (insn >> 16) & 0xf;
3280 rd = (insn >> 12) & 0xf;
3281 if (dp) {
3282 VFP_DREG_M(rm, insn);
3283 } else {
3284 rm = VFP_SREG_M(insn);
3287 if (insn & ARM_CP_RW_BIT) {
3288 /* vfp->arm */
3289 if (dp) {
3290 gen_mov_F0_vreg(0, rm * 2);
3291 tmp = gen_vfp_mrs();
3292 store_reg(s, rd, tmp);
3293 gen_mov_F0_vreg(0, rm * 2 + 1);
3294 tmp = gen_vfp_mrs();
3295 store_reg(s, rn, tmp);
3296 } else {
3297 gen_mov_F0_vreg(0, rm);
3298 tmp = gen_vfp_mrs();
3299 store_reg(s, rd, tmp);
3300 gen_mov_F0_vreg(0, rm + 1);
3301 tmp = gen_vfp_mrs();
3302 store_reg(s, rn, tmp);
3304 } else {
3305 /* arm->vfp */
3306 if (dp) {
3307 tmp = load_reg(s, rd);
3308 gen_vfp_msr(tmp);
3309 gen_mov_vreg_F0(0, rm * 2);
3310 tmp = load_reg(s, rn);
3311 gen_vfp_msr(tmp);
3312 gen_mov_vreg_F0(0, rm * 2 + 1);
3313 } else {
3314 tmp = load_reg(s, rd);
3315 gen_vfp_msr(tmp);
3316 gen_mov_vreg_F0(0, rm);
3317 tmp = load_reg(s, rn);
3318 gen_vfp_msr(tmp);
3319 gen_mov_vreg_F0(0, rm + 1);
3322 } else {
3323 /* Load/store */
3324 rn = (insn >> 16) & 0xf;
3325 if (dp)
3326 VFP_DREG_D(rd, insn);
3327 else
3328 rd = VFP_SREG_D(insn);
3329 if (s->thumb && rn == 15) {
3330 addr = tcg_temp_new_i32();
3331 tcg_gen_movi_i32(addr, s->pc & ~2);
3332 } else {
3333 addr = load_reg(s, rn);
3335 if ((insn & 0x01200000) == 0x01000000) {
3336 /* Single load/store */
3337 offset = (insn & 0xff) << 2;
3338 if ((insn & (1 << 23)) == 0)
3339 offset = -offset;
3340 tcg_gen_addi_i32(addr, addr, offset);
3341 if (insn & (1 << 20)) {
3342 gen_vfp_ld(s, dp, addr);
3343 gen_mov_vreg_F0(dp, rd);
3344 } else {
3345 gen_mov_F0_vreg(dp, rd);
3346 gen_vfp_st(s, dp, addr);
3348 tcg_temp_free_i32(addr);
3349 } else {
3350 /* load/store multiple */
3351 if (dp)
3352 n = (insn >> 1) & 0x7f;
3353 else
3354 n = insn & 0xff;
3356 if (insn & (1 << 24)) /* pre-decrement */
3357 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3359 if (dp)
3360 offset = 8;
3361 else
3362 offset = 4;
3363 for (i = 0; i < n; i++) {
3364 if (insn & ARM_CP_RW_BIT) {
3365 /* load */
3366 gen_vfp_ld(s, dp, addr);
3367 gen_mov_vreg_F0(dp, rd + i);
3368 } else {
3369 /* store */
3370 gen_mov_F0_vreg(dp, rd + i);
3371 gen_vfp_st(s, dp, addr);
3373 tcg_gen_addi_i32(addr, addr, offset);
3375 if (insn & (1 << 21)) {
3376 /* writeback */
3377 if (insn & (1 << 24))
3378 offset = -offset * n;
3379 else if (dp && (insn & 1))
3380 offset = 4;
3381 else
3382 offset = 0;
3384 if (offset != 0)
3385 tcg_gen_addi_i32(addr, addr, offset);
3386 store_reg(s, rn, addr);
3387 } else {
3388 tcg_temp_free_i32(addr);
3392 break;
3393 default:
3394 /* Should never happen. */
3395 return 1;
3397 return 0;
3400 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3402 TranslationBlock *tb;
3404 tb = s->tb;
3405 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3406 tcg_gen_goto_tb(n);
3407 gen_set_pc_im(dest);
3408 tcg_gen_exit_tb((tcg_target_long)tb + n);
3409 } else {
3410 gen_set_pc_im(dest);
3411 tcg_gen_exit_tb(0);
3415 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3417 if (unlikely(s->singlestep_enabled)) {
3418 /* An indirect jump so that we still trigger the debug exception. */
3419 if (s->thumb)
3420 dest |= 1;
3421 gen_bx_im(s, dest);
3422 } else {
3423 gen_goto_tb(s, 0, dest);
3424 s->is_jmp = DISAS_TB_JUMP;
3428 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3430 if (x)
3431 tcg_gen_sari_i32(t0, t0, 16);
3432 else
3433 gen_sxth(t0);
3434 if (y)
3435 tcg_gen_sari_i32(t1, t1, 16);
3436 else
3437 gen_sxth(t1);
3438 tcg_gen_mul_i32(t0, t0, t1);
3441 /* Return the mask of PSR bits set by a MSR instruction. */
3442 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3443 uint32_t mask;
3445 mask = 0;
3446 if (flags & (1 << 0))
3447 mask |= 0xff;
3448 if (flags & (1 << 1))
3449 mask |= 0xff00;
3450 if (flags & (1 << 2))
3451 mask |= 0xff0000;
3452 if (flags & (1 << 3))
3453 mask |= 0xff000000;
3455 /* Mask out undefined bits. */
3456 mask &= ~CPSR_RESERVED;
3457 if (!arm_feature(env, ARM_FEATURE_V4T))
3458 mask &= ~CPSR_T;
3459 if (!arm_feature(env, ARM_FEATURE_V5))
3460 mask &= ~CPSR_Q; /* V5TE in reality*/
3461 if (!arm_feature(env, ARM_FEATURE_V6))
3462 mask &= ~(CPSR_E | CPSR_GE);
3463 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3464 mask &= ~CPSR_IT;
3465 /* Mask out execution state bits. */
3466 if (!spsr)
3467 mask &= ~CPSR_EXEC;
3468 /* Mask out privileged bits. */
3469 if (IS_USER(s))
3470 mask &= CPSR_USER;
3471 return mask;
3474 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3475 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3477 TCGv tmp;
3478 if (spsr) {
3479 /* ??? This is also undefined in system mode. */
3480 if (IS_USER(s))
3481 return 1;
3483 tmp = load_cpu_field(spsr);
3484 tcg_gen_andi_i32(tmp, tmp, ~mask);
3485 tcg_gen_andi_i32(t0, t0, mask);
3486 tcg_gen_or_i32(tmp, tmp, t0);
3487 store_cpu_field(tmp, spsr);
3488 } else {
3489 gen_set_cpsr(t0, mask);
3491 tcg_temp_free_i32(t0);
3492 gen_lookup_tb(s);
3493 return 0;
3496 /* Returns nonzero if access to the PSR is not permitted. */
3497 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3499 TCGv tmp;
3500 tmp = tcg_temp_new_i32();
3501 tcg_gen_movi_i32(tmp, val);
3502 return gen_set_psr(s, mask, spsr, tmp);
3505 /* Generate an old-style exception return. Marks pc as dead. */
3506 static void gen_exception_return(DisasContext *s, TCGv pc)
3508 TCGv tmp;
3509 store_reg(s, 15, pc);
3510 tmp = load_cpu_field(spsr);
3511 gen_set_cpsr(tmp, 0xffffffff);
3512 tcg_temp_free_i32(tmp);
3513 s->is_jmp = DISAS_UPDATE;
3516 /* Generate a v6 exception return. Marks both values as dead. */
3517 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3519 gen_set_cpsr(cpsr, 0xffffffff);
3520 tcg_temp_free_i32(cpsr);
3521 store_reg(s, 15, pc);
3522 s->is_jmp = DISAS_UPDATE;
3525 static inline void
3526 gen_set_condexec (DisasContext *s)
3528 if (s->condexec_mask) {
3529 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3530 TCGv tmp = tcg_temp_new_i32();
3531 tcg_gen_movi_i32(tmp, val);
3532 store_cpu_field(tmp, condexec_bits);
3536 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3538 gen_set_condexec(s);
3539 gen_set_pc_im(s->pc - offset);
3540 gen_exception(excp);
3541 s->is_jmp = DISAS_JUMP;
3544 static void gen_nop_hint(DisasContext *s, int val)
3546 switch (val) {
3547 case 3: /* wfi */
3548 gen_set_pc_im(s->pc);
3549 s->is_jmp = DISAS_WFI;
3550 break;
3551 case 2: /* wfe */
3552 case 4: /* sev */
3553 /* TODO: Implement SEV and WFE. May help SMP performance. */
3554 default: /* nop */
3555 break;
3559 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3561 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3563 switch (size) {
3564 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3565 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3566 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3567 default: abort();
3571 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3573 switch (size) {
3574 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3575 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3576 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3577 default: return;
3581 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3582 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3583 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3584 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3585 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3587 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3588 switch ((size << 1) | u) { \
3589 case 0: \
3590 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3591 break; \
3592 case 1: \
3593 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3594 break; \
3595 case 2: \
3596 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3597 break; \
3598 case 3: \
3599 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3600 break; \
3601 case 4: \
3602 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3603 break; \
3604 case 5: \
3605 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3606 break; \
3607 default: return 1; \
3608 }} while (0)
3610 #define GEN_NEON_INTEGER_OP(name) do { \
3611 switch ((size << 1) | u) { \
3612 case 0: \
3613 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3614 break; \
3615 case 1: \
3616 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3617 break; \
3618 case 2: \
3619 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3620 break; \
3621 case 3: \
3622 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3623 break; \
3624 case 4: \
3625 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3626 break; \
3627 case 5: \
3628 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3629 break; \
3630 default: return 1; \
3631 }} while (0)
3633 static TCGv neon_load_scratch(int scratch)
3635 TCGv tmp = tcg_temp_new_i32();
3636 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3637 return tmp;
3640 static void neon_store_scratch(int scratch, TCGv var)
3642 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3643 tcg_temp_free_i32(var);
3646 static inline TCGv neon_get_scalar(int size, int reg)
3648 TCGv tmp;
3649 if (size == 1) {
3650 tmp = neon_load_reg(reg & 7, reg >> 4);
3651 if (reg & 8) {
3652 gen_neon_dup_high16(tmp);
3653 } else {
3654 gen_neon_dup_low16(tmp);
3656 } else {
3657 tmp = neon_load_reg(reg & 15, reg >> 4);
3659 return tmp;
3662 static int gen_neon_unzip(int rd, int rm, int size, int q)
3664 TCGv tmp, tmp2;
3665 if (!q && size == 2) {
3666 return 1;
3668 tmp = tcg_const_i32(rd);
3669 tmp2 = tcg_const_i32(rm);
3670 if (q) {
3671 switch (size) {
3672 case 0:
3673 gen_helper_neon_qunzip8(tmp, tmp2);
3674 break;
3675 case 1:
3676 gen_helper_neon_qunzip16(tmp, tmp2);
3677 break;
3678 case 2:
3679 gen_helper_neon_qunzip32(tmp, tmp2);
3680 break;
3681 default:
3682 abort();
3684 } else {
3685 switch (size) {
3686 case 0:
3687 gen_helper_neon_unzip8(tmp, tmp2);
3688 break;
3689 case 1:
3690 gen_helper_neon_unzip16(tmp, tmp2);
3691 break;
3692 default:
3693 abort();
3696 tcg_temp_free_i32(tmp);
3697 tcg_temp_free_i32(tmp2);
3698 return 0;
3701 static int gen_neon_zip(int rd, int rm, int size, int q)
3703 TCGv tmp, tmp2;
3704 if (!q && size == 2) {
3705 return 1;
3707 tmp = tcg_const_i32(rd);
3708 tmp2 = tcg_const_i32(rm);
3709 if (q) {
3710 switch (size) {
3711 case 0:
3712 gen_helper_neon_qzip8(tmp, tmp2);
3713 break;
3714 case 1:
3715 gen_helper_neon_qzip16(tmp, tmp2);
3716 break;
3717 case 2:
3718 gen_helper_neon_qzip32(tmp, tmp2);
3719 break;
3720 default:
3721 abort();
3723 } else {
3724 switch (size) {
3725 case 0:
3726 gen_helper_neon_zip8(tmp, tmp2);
3727 break;
3728 case 1:
3729 gen_helper_neon_zip16(tmp, tmp2);
3730 break;
3731 default:
3732 abort();
3735 tcg_temp_free_i32(tmp);
3736 tcg_temp_free_i32(tmp2);
3737 return 0;
3740 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3742 TCGv rd, tmp;
3744 rd = tcg_temp_new_i32();
3745 tmp = tcg_temp_new_i32();
3747 tcg_gen_shli_i32(rd, t0, 8);
3748 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3749 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3750 tcg_gen_or_i32(rd, rd, tmp);
3752 tcg_gen_shri_i32(t1, t1, 8);
3753 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3754 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3755 tcg_gen_or_i32(t1, t1, tmp);
3756 tcg_gen_mov_i32(t0, rd);
3758 tcg_temp_free_i32(tmp);
3759 tcg_temp_free_i32(rd);
3762 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3764 TCGv rd, tmp;
3766 rd = tcg_temp_new_i32();
3767 tmp = tcg_temp_new_i32();
3769 tcg_gen_shli_i32(rd, t0, 16);
3770 tcg_gen_andi_i32(tmp, t1, 0xffff);
3771 tcg_gen_or_i32(rd, rd, tmp);
3772 tcg_gen_shri_i32(t1, t1, 16);
3773 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3774 tcg_gen_or_i32(t1, t1, tmp);
3775 tcg_gen_mov_i32(t0, rd);
3777 tcg_temp_free_i32(tmp);
3778 tcg_temp_free_i32(rd);
3782 static struct {
3783 int nregs;
3784 int interleave;
3785 int spacing;
3786 } neon_ls_element_type[11] = {
3787 {4, 4, 1},
3788 {4, 4, 2},
3789 {4, 1, 1},
3790 {4, 2, 1},
3791 {3, 3, 1},
3792 {3, 3, 2},
3793 {3, 1, 1},
3794 {1, 1, 1},
3795 {2, 2, 1},
3796 {2, 2, 2},
3797 {2, 1, 1}
3800 /* Translate a NEON load/store element instruction. Return nonzero if the
3801 instruction is invalid. */
3802 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3804 int rd, rn, rm;
3805 int op;
3806 int nregs;
3807 int interleave;
3808 int spacing;
3809 int stride;
3810 int size;
3811 int reg;
3812 int pass;
3813 int load;
3814 int shift;
3815 int n;
3816 TCGv addr;
3817 TCGv tmp;
3818 TCGv tmp2;
3819 TCGv_i64 tmp64;
3821 if (!s->vfp_enabled)
3822 return 1;
3823 VFP_DREG_D(rd, insn);
3824 rn = (insn >> 16) & 0xf;
3825 rm = insn & 0xf;
3826 load = (insn & (1 << 21)) != 0;
3827 if ((insn & (1 << 23)) == 0) {
3828 /* Load store all elements. */
3829 op = (insn >> 8) & 0xf;
3830 size = (insn >> 6) & 3;
3831 if (op > 10)
3832 return 1;
3833 nregs = neon_ls_element_type[op].nregs;
3834 interleave = neon_ls_element_type[op].interleave;
3835 spacing = neon_ls_element_type[op].spacing;
3836 if (size == 3 && (interleave | spacing) != 1)
3837 return 1;
3838 addr = tcg_temp_new_i32();
3839 load_reg_var(s, addr, rn);
3840 stride = (1 << size) * interleave;
3841 for (reg = 0; reg < nregs; reg++) {
3842 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3843 load_reg_var(s, addr, rn);
3844 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3845 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3846 load_reg_var(s, addr, rn);
3847 tcg_gen_addi_i32(addr, addr, 1 << size);
3849 if (size == 3) {
3850 if (load) {
3851 tmp64 = gen_ld64(addr, IS_USER(s));
3852 neon_store_reg64(tmp64, rd);
3853 tcg_temp_free_i64(tmp64);
3854 } else {
3855 tmp64 = tcg_temp_new_i64();
3856 neon_load_reg64(tmp64, rd);
3857 gen_st64(tmp64, addr, IS_USER(s));
3859 tcg_gen_addi_i32(addr, addr, stride);
3860 } else {
3861 for (pass = 0; pass < 2; pass++) {
3862 if (size == 2) {
3863 if (load) {
3864 tmp = gen_ld32(addr, IS_USER(s));
3865 neon_store_reg(rd, pass, tmp);
3866 } else {
3867 tmp = neon_load_reg(rd, pass);
3868 gen_st32(tmp, addr, IS_USER(s));
3870 tcg_gen_addi_i32(addr, addr, stride);
3871 } else if (size == 1) {
3872 if (load) {
3873 tmp = gen_ld16u(addr, IS_USER(s));
3874 tcg_gen_addi_i32(addr, addr, stride);
3875 tmp2 = gen_ld16u(addr, IS_USER(s));
3876 tcg_gen_addi_i32(addr, addr, stride);
3877 tcg_gen_shli_i32(tmp2, tmp2, 16);
3878 tcg_gen_or_i32(tmp, tmp, tmp2);
3879 tcg_temp_free_i32(tmp2);
3880 neon_store_reg(rd, pass, tmp);
3881 } else {
3882 tmp = neon_load_reg(rd, pass);
3883 tmp2 = tcg_temp_new_i32();
3884 tcg_gen_shri_i32(tmp2, tmp, 16);
3885 gen_st16(tmp, addr, IS_USER(s));
3886 tcg_gen_addi_i32(addr, addr, stride);
3887 gen_st16(tmp2, addr, IS_USER(s));
3888 tcg_gen_addi_i32(addr, addr, stride);
3890 } else /* size == 0 */ {
3891 if (load) {
3892 TCGV_UNUSED(tmp2);
3893 for (n = 0; n < 4; n++) {
3894 tmp = gen_ld8u(addr, IS_USER(s));
3895 tcg_gen_addi_i32(addr, addr, stride);
3896 if (n == 0) {
3897 tmp2 = tmp;
3898 } else {
3899 tcg_gen_shli_i32(tmp, tmp, n * 8);
3900 tcg_gen_or_i32(tmp2, tmp2, tmp);
3901 tcg_temp_free_i32(tmp);
3904 neon_store_reg(rd, pass, tmp2);
3905 } else {
3906 tmp2 = neon_load_reg(rd, pass);
3907 for (n = 0; n < 4; n++) {
3908 tmp = tcg_temp_new_i32();
3909 if (n == 0) {
3910 tcg_gen_mov_i32(tmp, tmp2);
3911 } else {
3912 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3914 gen_st8(tmp, addr, IS_USER(s));
3915 tcg_gen_addi_i32(addr, addr, stride);
3917 tcg_temp_free_i32(tmp2);
3922 rd += spacing;
3924 tcg_temp_free_i32(addr);
3925 stride = nregs * 8;
3926 } else {
3927 size = (insn >> 10) & 3;
3928 if (size == 3) {
3929 /* Load single element to all lanes. */
3930 int a = (insn >> 4) & 1;
3931 if (!load) {
3932 return 1;
3934 size = (insn >> 6) & 3;
3935 nregs = ((insn >> 8) & 3) + 1;
3937 if (size == 3) {
3938 if (nregs != 4 || a == 0) {
3939 return 1;
3941 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3942 size = 2;
3944 if (nregs == 1 && a == 1 && size == 0) {
3945 return 1;
3947 if (nregs == 3 && a == 1) {
3948 return 1;
3950 addr = tcg_temp_new_i32();
3951 load_reg_var(s, addr, rn);
3952 if (nregs == 1) {
3953 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3954 tmp = gen_load_and_replicate(s, addr, size);
3955 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3956 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3957 if (insn & (1 << 5)) {
3958 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3959 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3961 tcg_temp_free_i32(tmp);
3962 } else {
3963 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3964 stride = (insn & (1 << 5)) ? 2 : 1;
3965 for (reg = 0; reg < nregs; reg++) {
3966 tmp = gen_load_and_replicate(s, addr, size);
3967 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3968 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3969 tcg_temp_free_i32(tmp);
3970 tcg_gen_addi_i32(addr, addr, 1 << size);
3971 rd += stride;
3974 tcg_temp_free_i32(addr);
3975 stride = (1 << size) * nregs;
3976 } else {
3977 /* Single element. */
3978 int idx = (insn >> 4) & 0xf;
3979 pass = (insn >> 7) & 1;
3980 switch (size) {
3981 case 0:
3982 shift = ((insn >> 5) & 3) * 8;
3983 stride = 1;
3984 break;
3985 case 1:
3986 shift = ((insn >> 6) & 1) * 16;
3987 stride = (insn & (1 << 5)) ? 2 : 1;
3988 break;
3989 case 2:
3990 shift = 0;
3991 stride = (insn & (1 << 6)) ? 2 : 1;
3992 break;
3993 default:
3994 abort();
3996 nregs = ((insn >> 8) & 3) + 1;
3997 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3998 switch (nregs) {
3999 case 1:
4000 if (((idx & (1 << size)) != 0) ||
4001 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4002 return 1;
4004 break;
4005 case 3:
4006 if ((idx & 1) != 0) {
4007 return 1;
4009 /* fall through */
4010 case 2:
4011 if (size == 2 && (idx & 2) != 0) {
4012 return 1;
4014 break;
4015 case 4:
4016 if ((size == 2) && ((idx & 3) == 3)) {
4017 return 1;
4019 break;
4020 default:
4021 abort();
4023 if ((rd + stride * (nregs - 1)) > 31) {
4024 /* Attempts to write off the end of the register file
4025 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4026 * the neon_load_reg() would write off the end of the array.
4028 return 1;
4030 addr = tcg_temp_new_i32();
4031 load_reg_var(s, addr, rn);
4032 for (reg = 0; reg < nregs; reg++) {
4033 if (load) {
4034 switch (size) {
4035 case 0:
4036 tmp = gen_ld8u(addr, IS_USER(s));
4037 break;
4038 case 1:
4039 tmp = gen_ld16u(addr, IS_USER(s));
4040 break;
4041 case 2:
4042 tmp = gen_ld32(addr, IS_USER(s));
4043 break;
4044 default: /* Avoid compiler warnings. */
4045 abort();
4047 if (size != 2) {
4048 tmp2 = neon_load_reg(rd, pass);
4049 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4050 tcg_temp_free_i32(tmp2);
4052 neon_store_reg(rd, pass, tmp);
4053 } else { /* Store */
4054 tmp = neon_load_reg(rd, pass);
4055 if (shift)
4056 tcg_gen_shri_i32(tmp, tmp, shift);
4057 switch (size) {
4058 case 0:
4059 gen_st8(tmp, addr, IS_USER(s));
4060 break;
4061 case 1:
4062 gen_st16(tmp, addr, IS_USER(s));
4063 break;
4064 case 2:
4065 gen_st32(tmp, addr, IS_USER(s));
4066 break;
4069 rd += stride;
4070 tcg_gen_addi_i32(addr, addr, 1 << size);
4072 tcg_temp_free_i32(addr);
4073 stride = nregs * (1 << size);
4076 if (rm != 15) {
4077 TCGv base;
4079 base = load_reg(s, rn);
4080 if (rm == 13) {
4081 tcg_gen_addi_i32(base, base, stride);
4082 } else {
4083 TCGv index;
4084 index = load_reg(s, rm);
4085 tcg_gen_add_i32(base, base, index);
4086 tcg_temp_free_i32(index);
4088 store_reg(s, rn, base);
4090 return 0;
4093 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4094 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4096 tcg_gen_and_i32(t, t, c);
4097 tcg_gen_andc_i32(f, f, c);
4098 tcg_gen_or_i32(dest, t, f);
4101 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4103 switch (size) {
4104 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4105 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4106 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4107 default: abort();
4111 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4113 switch (size) {
4114 case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
4115 case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
4116 case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
4117 default: abort();
4121 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4123 switch (size) {
4124 case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
4125 case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
4126 case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
4127 default: abort();
4131 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4133 switch (size) {
4134 case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
4135 case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
4136 case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
4137 default: abort();
4141 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4142 int q, int u)
4144 if (q) {
4145 if (u) {
4146 switch (size) {
4147 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4148 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4149 default: abort();
4151 } else {
4152 switch (size) {
4153 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4154 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4155 default: abort();
4158 } else {
4159 if (u) {
4160 switch (size) {
4161 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4162 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4163 default: abort();
4165 } else {
4166 switch (size) {
4167 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4168 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4169 default: abort();
4175 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4177 if (u) {
4178 switch (size) {
4179 case 0: gen_helper_neon_widen_u8(dest, src); break;
4180 case 1: gen_helper_neon_widen_u16(dest, src); break;
4181 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4182 default: abort();
4184 } else {
4185 switch (size) {
4186 case 0: gen_helper_neon_widen_s8(dest, src); break;
4187 case 1: gen_helper_neon_widen_s16(dest, src); break;
4188 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4189 default: abort();
4192 tcg_temp_free_i32(src);
4195 static inline void gen_neon_addl(int size)
4197 switch (size) {
4198 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4199 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4200 case 2: tcg_gen_add_i64(CPU_V001); break;
4201 default: abort();
4205 static inline void gen_neon_subl(int size)
4207 switch (size) {
4208 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4209 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4210 case 2: tcg_gen_sub_i64(CPU_V001); break;
4211 default: abort();
4215 static inline void gen_neon_negl(TCGv_i64 var, int size)
4217 switch (size) {
4218 case 0: gen_helper_neon_negl_u16(var, var); break;
4219 case 1: gen_helper_neon_negl_u32(var, var); break;
4220 case 2: gen_helper_neon_negl_u64(var, var); break;
4221 default: abort();
4225 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4227 switch (size) {
4228 case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
4229 case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
4230 default: abort();
4234 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4236 TCGv_i64 tmp;
4238 switch ((size << 1) | u) {
4239 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4240 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4241 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4242 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4243 case 4:
4244 tmp = gen_muls_i64_i32(a, b);
4245 tcg_gen_mov_i64(dest, tmp);
4246 tcg_temp_free_i64(tmp);
4247 break;
4248 case 5:
4249 tmp = gen_mulu_i64_i32(a, b);
4250 tcg_gen_mov_i64(dest, tmp);
4251 tcg_temp_free_i64(tmp);
4252 break;
4253 default: abort();
4256 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4257 Don't forget to clean them now. */
4258 if (size < 2) {
4259 tcg_temp_free_i32(a);
4260 tcg_temp_free_i32(b);
4264 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4266 if (op) {
4267 if (u) {
4268 gen_neon_unarrow_sats(size, dest, src);
4269 } else {
4270 gen_neon_narrow(size, dest, src);
4272 } else {
4273 if (u) {
4274 gen_neon_narrow_satu(size, dest, src);
4275 } else {
4276 gen_neon_narrow_sats(size, dest, src);
4281 /* Symbolic constants for op fields for Neon 3-register same-length.
4282 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4283 * table A7-9.
4285 #define NEON_3R_VHADD 0
4286 #define NEON_3R_VQADD 1
4287 #define NEON_3R_VRHADD 2
4288 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4289 #define NEON_3R_VHSUB 4
4290 #define NEON_3R_VQSUB 5
4291 #define NEON_3R_VCGT 6
4292 #define NEON_3R_VCGE 7
4293 #define NEON_3R_VSHL 8
4294 #define NEON_3R_VQSHL 9
4295 #define NEON_3R_VRSHL 10
4296 #define NEON_3R_VQRSHL 11
4297 #define NEON_3R_VMAX 12
4298 #define NEON_3R_VMIN 13
4299 #define NEON_3R_VABD 14
4300 #define NEON_3R_VABA 15
4301 #define NEON_3R_VADD_VSUB 16
4302 #define NEON_3R_VTST_VCEQ 17
4303 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4304 #define NEON_3R_VMUL 19
4305 #define NEON_3R_VPMAX 20
4306 #define NEON_3R_VPMIN 21
4307 #define NEON_3R_VQDMULH_VQRDMULH 22
4308 #define NEON_3R_VPADD 23
4309 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4310 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4311 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4312 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4313 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4314 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4316 static const uint8_t neon_3r_sizes[] = {
4317 [NEON_3R_VHADD] = 0x7,
4318 [NEON_3R_VQADD] = 0xf,
4319 [NEON_3R_VRHADD] = 0x7,
4320 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4321 [NEON_3R_VHSUB] = 0x7,
4322 [NEON_3R_VQSUB] = 0xf,
4323 [NEON_3R_VCGT] = 0x7,
4324 [NEON_3R_VCGE] = 0x7,
4325 [NEON_3R_VSHL] = 0xf,
4326 [NEON_3R_VQSHL] = 0xf,
4327 [NEON_3R_VRSHL] = 0xf,
4328 [NEON_3R_VQRSHL] = 0xf,
4329 [NEON_3R_VMAX] = 0x7,
4330 [NEON_3R_VMIN] = 0x7,
4331 [NEON_3R_VABD] = 0x7,
4332 [NEON_3R_VABA] = 0x7,
4333 [NEON_3R_VADD_VSUB] = 0xf,
4334 [NEON_3R_VTST_VCEQ] = 0x7,
4335 [NEON_3R_VML] = 0x7,
4336 [NEON_3R_VMUL] = 0x7,
4337 [NEON_3R_VPMAX] = 0x7,
4338 [NEON_3R_VPMIN] = 0x7,
4339 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4340 [NEON_3R_VPADD] = 0x7,
4341 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4342 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4343 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4344 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4345 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4346 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4349 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4350 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4351 * table A7-13.
4353 #define NEON_2RM_VREV64 0
4354 #define NEON_2RM_VREV32 1
4355 #define NEON_2RM_VREV16 2
4356 #define NEON_2RM_VPADDL 4
4357 #define NEON_2RM_VPADDL_U 5
4358 #define NEON_2RM_VCLS 8
4359 #define NEON_2RM_VCLZ 9
4360 #define NEON_2RM_VCNT 10
4361 #define NEON_2RM_VMVN 11
4362 #define NEON_2RM_VPADAL 12
4363 #define NEON_2RM_VPADAL_U 13
4364 #define NEON_2RM_VQABS 14
4365 #define NEON_2RM_VQNEG 15
4366 #define NEON_2RM_VCGT0 16
4367 #define NEON_2RM_VCGE0 17
4368 #define NEON_2RM_VCEQ0 18
4369 #define NEON_2RM_VCLE0 19
4370 #define NEON_2RM_VCLT0 20
4371 #define NEON_2RM_VABS 22
4372 #define NEON_2RM_VNEG 23
4373 #define NEON_2RM_VCGT0_F 24
4374 #define NEON_2RM_VCGE0_F 25
4375 #define NEON_2RM_VCEQ0_F 26
4376 #define NEON_2RM_VCLE0_F 27
4377 #define NEON_2RM_VCLT0_F 28
4378 #define NEON_2RM_VABS_F 30
4379 #define NEON_2RM_VNEG_F 31
4380 #define NEON_2RM_VSWP 32
4381 #define NEON_2RM_VTRN 33
4382 #define NEON_2RM_VUZP 34
4383 #define NEON_2RM_VZIP 35
4384 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4385 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4386 #define NEON_2RM_VSHLL 38
4387 #define NEON_2RM_VCVT_F16_F32 44
4388 #define NEON_2RM_VCVT_F32_F16 46
4389 #define NEON_2RM_VRECPE 56
4390 #define NEON_2RM_VRSQRTE 57
4391 #define NEON_2RM_VRECPE_F 58
4392 #define NEON_2RM_VRSQRTE_F 59
4393 #define NEON_2RM_VCVT_FS 60
4394 #define NEON_2RM_VCVT_FU 61
4395 #define NEON_2RM_VCVT_SF 62
4396 #define NEON_2RM_VCVT_UF 63
4398 static int neon_2rm_is_float_op(int op)
4400 /* Return true if this neon 2reg-misc op is float-to-float */
4401 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4402 op >= NEON_2RM_VRECPE_F);
4405 /* Each entry in this array has bit n set if the insn allows
4406 * size value n (otherwise it will UNDEF). Since unallocated
4407 * op values will have no bits set they always UNDEF.
4409 static const uint8_t neon_2rm_sizes[] = {
4410 [NEON_2RM_VREV64] = 0x7,
4411 [NEON_2RM_VREV32] = 0x3,
4412 [NEON_2RM_VREV16] = 0x1,
4413 [NEON_2RM_VPADDL] = 0x7,
4414 [NEON_2RM_VPADDL_U] = 0x7,
4415 [NEON_2RM_VCLS] = 0x7,
4416 [NEON_2RM_VCLZ] = 0x7,
4417 [NEON_2RM_VCNT] = 0x1,
4418 [NEON_2RM_VMVN] = 0x1,
4419 [NEON_2RM_VPADAL] = 0x7,
4420 [NEON_2RM_VPADAL_U] = 0x7,
4421 [NEON_2RM_VQABS] = 0x7,
4422 [NEON_2RM_VQNEG] = 0x7,
4423 [NEON_2RM_VCGT0] = 0x7,
4424 [NEON_2RM_VCGE0] = 0x7,
4425 [NEON_2RM_VCEQ0] = 0x7,
4426 [NEON_2RM_VCLE0] = 0x7,
4427 [NEON_2RM_VCLT0] = 0x7,
4428 [NEON_2RM_VABS] = 0x7,
4429 [NEON_2RM_VNEG] = 0x7,
4430 [NEON_2RM_VCGT0_F] = 0x4,
4431 [NEON_2RM_VCGE0_F] = 0x4,
4432 [NEON_2RM_VCEQ0_F] = 0x4,
4433 [NEON_2RM_VCLE0_F] = 0x4,
4434 [NEON_2RM_VCLT0_F] = 0x4,
4435 [NEON_2RM_VABS_F] = 0x4,
4436 [NEON_2RM_VNEG_F] = 0x4,
4437 [NEON_2RM_VSWP] = 0x1,
4438 [NEON_2RM_VTRN] = 0x7,
4439 [NEON_2RM_VUZP] = 0x7,
4440 [NEON_2RM_VZIP] = 0x7,
4441 [NEON_2RM_VMOVN] = 0x7,
4442 [NEON_2RM_VQMOVN] = 0x7,
4443 [NEON_2RM_VSHLL] = 0x7,
4444 [NEON_2RM_VCVT_F16_F32] = 0x2,
4445 [NEON_2RM_VCVT_F32_F16] = 0x2,
4446 [NEON_2RM_VRECPE] = 0x4,
4447 [NEON_2RM_VRSQRTE] = 0x4,
4448 [NEON_2RM_VRECPE_F] = 0x4,
4449 [NEON_2RM_VRSQRTE_F] = 0x4,
4450 [NEON_2RM_VCVT_FS] = 0x4,
4451 [NEON_2RM_VCVT_FU] = 0x4,
4452 [NEON_2RM_VCVT_SF] = 0x4,
4453 [NEON_2RM_VCVT_UF] = 0x4,
4456 /* Translate a NEON data processing instruction. Return nonzero if the
4457 instruction is invalid.
4458 We process data in a mixture of 32-bit and 64-bit chunks.
4459 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4461 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4463 int op;
4464 int q;
4465 int rd, rn, rm;
4466 int size;
4467 int shift;
4468 int pass;
4469 int count;
4470 int pairwise;
4471 int u;
4472 uint32_t imm, mask;
4473 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4474 TCGv_i64 tmp64;
4476 if (!s->vfp_enabled)
4477 return 1;
4478 q = (insn & (1 << 6)) != 0;
4479 u = (insn >> 24) & 1;
4480 VFP_DREG_D(rd, insn);
4481 VFP_DREG_N(rn, insn);
4482 VFP_DREG_M(rm, insn);
4483 size = (insn >> 20) & 3;
4484 if ((insn & (1 << 23)) == 0) {
4485 /* Three register same length. */
4486 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4487 /* Catch invalid op and bad size combinations: UNDEF */
4488 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4489 return 1;
4491 /* All insns of this form UNDEF for either this condition or the
4492 * superset of cases "Q==1"; we catch the latter later.
4494 if (q && ((rd | rn | rm) & 1)) {
4495 return 1;
4497 if (size == 3 && op != NEON_3R_LOGIC) {
4498 /* 64-bit element instructions. */
4499 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4500 neon_load_reg64(cpu_V0, rn + pass);
4501 neon_load_reg64(cpu_V1, rm + pass);
4502 switch (op) {
4503 case NEON_3R_VQADD:
4504 if (u) {
4505 gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
4506 } else {
4507 gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
4509 break;
4510 case NEON_3R_VQSUB:
4511 if (u) {
4512 gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
4513 } else {
4514 gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
4516 break;
4517 case NEON_3R_VSHL:
4518 if (u) {
4519 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4520 } else {
4521 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4523 break;
4524 case NEON_3R_VQSHL:
4525 if (u) {
4526 gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
4527 } else {
4528 gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
4530 break;
4531 case NEON_3R_VRSHL:
4532 if (u) {
4533 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4534 } else {
4535 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4537 break;
4538 case NEON_3R_VQRSHL:
4539 if (u) {
4540 gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
4541 } else {
4542 gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
4544 break;
4545 case NEON_3R_VADD_VSUB:
4546 if (u) {
4547 tcg_gen_sub_i64(CPU_V001);
4548 } else {
4549 tcg_gen_add_i64(CPU_V001);
4551 break;
4552 default:
4553 abort();
4555 neon_store_reg64(cpu_V0, rd + pass);
4557 return 0;
4559 pairwise = 0;
4560 switch (op) {
4561 case NEON_3R_VSHL:
4562 case NEON_3R_VQSHL:
4563 case NEON_3R_VRSHL:
4564 case NEON_3R_VQRSHL:
4566 int rtmp;
4567 /* Shift instruction operands are reversed. */
4568 rtmp = rn;
4569 rn = rm;
4570 rm = rtmp;
4572 break;
4573 case NEON_3R_VPADD:
4574 if (u) {
4575 return 1;
4577 /* Fall through */
4578 case NEON_3R_VPMAX:
4579 case NEON_3R_VPMIN:
4580 pairwise = 1;
4581 break;
4582 case NEON_3R_FLOAT_ARITH:
4583 pairwise = (u && size < 2); /* if VPADD (float) */
4584 break;
4585 case NEON_3R_FLOAT_MINMAX:
4586 pairwise = u; /* if VPMIN/VPMAX (float) */
4587 break;
4588 case NEON_3R_FLOAT_CMP:
4589 if (!u && size) {
4590 /* no encoding for U=0 C=1x */
4591 return 1;
4593 break;
4594 case NEON_3R_FLOAT_ACMP:
4595 if (!u) {
4596 return 1;
4598 break;
4599 case NEON_3R_VRECPS_VRSQRTS:
4600 if (u) {
4601 return 1;
4603 break;
4604 case NEON_3R_VMUL:
4605 if (u && (size != 0)) {
4606 /* UNDEF on invalid size for polynomial subcase */
4607 return 1;
4609 break;
4610 default:
4611 break;
4614 if (pairwise && q) {
4615 /* All the pairwise insns UNDEF if Q is set */
4616 return 1;
4619 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4621 if (pairwise) {
4622 /* Pairwise. */
4623 if (pass < 1) {
4624 tmp = neon_load_reg(rn, 0);
4625 tmp2 = neon_load_reg(rn, 1);
4626 } else {
4627 tmp = neon_load_reg(rm, 0);
4628 tmp2 = neon_load_reg(rm, 1);
4630 } else {
4631 /* Elementwise. */
4632 tmp = neon_load_reg(rn, pass);
4633 tmp2 = neon_load_reg(rm, pass);
4635 switch (op) {
4636 case NEON_3R_VHADD:
4637 GEN_NEON_INTEGER_OP(hadd);
4638 break;
4639 case NEON_3R_VQADD:
4640 GEN_NEON_INTEGER_OP(qadd);
4641 break;
4642 case NEON_3R_VRHADD:
4643 GEN_NEON_INTEGER_OP(rhadd);
4644 break;
4645 case NEON_3R_LOGIC: /* Logic ops. */
4646 switch ((u << 2) | size) {
4647 case 0: /* VAND */
4648 tcg_gen_and_i32(tmp, tmp, tmp2);
4649 break;
4650 case 1: /* BIC */
4651 tcg_gen_andc_i32(tmp, tmp, tmp2);
4652 break;
4653 case 2: /* VORR */
4654 tcg_gen_or_i32(tmp, tmp, tmp2);
4655 break;
4656 case 3: /* VORN */
4657 tcg_gen_orc_i32(tmp, tmp, tmp2);
4658 break;
4659 case 4: /* VEOR */
4660 tcg_gen_xor_i32(tmp, tmp, tmp2);
4661 break;
4662 case 5: /* VBSL */
4663 tmp3 = neon_load_reg(rd, pass);
4664 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4665 tcg_temp_free_i32(tmp3);
4666 break;
4667 case 6: /* VBIT */
4668 tmp3 = neon_load_reg(rd, pass);
4669 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4670 tcg_temp_free_i32(tmp3);
4671 break;
4672 case 7: /* VBIF */
4673 tmp3 = neon_load_reg(rd, pass);
4674 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4675 tcg_temp_free_i32(tmp3);
4676 break;
4678 break;
4679 case NEON_3R_VHSUB:
4680 GEN_NEON_INTEGER_OP(hsub);
4681 break;
4682 case NEON_3R_VQSUB:
4683 GEN_NEON_INTEGER_OP(qsub);
4684 break;
4685 case NEON_3R_VCGT:
4686 GEN_NEON_INTEGER_OP(cgt);
4687 break;
4688 case NEON_3R_VCGE:
4689 GEN_NEON_INTEGER_OP(cge);
4690 break;
4691 case NEON_3R_VSHL:
4692 GEN_NEON_INTEGER_OP(shl);
4693 break;
4694 case NEON_3R_VQSHL:
4695 GEN_NEON_INTEGER_OP(qshl);
4696 break;
4697 case NEON_3R_VRSHL:
4698 GEN_NEON_INTEGER_OP(rshl);
4699 break;
4700 case NEON_3R_VQRSHL:
4701 GEN_NEON_INTEGER_OP(qrshl);
4702 break;
4703 case NEON_3R_VMAX:
4704 GEN_NEON_INTEGER_OP(max);
4705 break;
4706 case NEON_3R_VMIN:
4707 GEN_NEON_INTEGER_OP(min);
4708 break;
4709 case NEON_3R_VABD:
4710 GEN_NEON_INTEGER_OP(abd);
4711 break;
4712 case NEON_3R_VABA:
4713 GEN_NEON_INTEGER_OP(abd);
4714 tcg_temp_free_i32(tmp2);
4715 tmp2 = neon_load_reg(rd, pass);
4716 gen_neon_add(size, tmp, tmp2);
4717 break;
4718 case NEON_3R_VADD_VSUB:
4719 if (!u) { /* VADD */
4720 gen_neon_add(size, tmp, tmp2);
4721 } else { /* VSUB */
4722 switch (size) {
4723 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4724 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4725 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4726 default: abort();
4729 break;
4730 case NEON_3R_VTST_VCEQ:
4731 if (!u) { /* VTST */
4732 switch (size) {
4733 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4734 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4735 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4736 default: abort();
4738 } else { /* VCEQ */
4739 switch (size) {
4740 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4741 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4742 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4743 default: abort();
4746 break;
4747 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4748 switch (size) {
4749 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4750 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4751 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4752 default: abort();
4754 tcg_temp_free_i32(tmp2);
4755 tmp2 = neon_load_reg(rd, pass);
4756 if (u) { /* VMLS */
4757 gen_neon_rsb(size, tmp, tmp2);
4758 } else { /* VMLA */
4759 gen_neon_add(size, tmp, tmp2);
4761 break;
4762 case NEON_3R_VMUL:
4763 if (u) { /* polynomial */
4764 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4765 } else { /* Integer */
4766 switch (size) {
4767 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4768 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4769 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4770 default: abort();
4773 break;
4774 case NEON_3R_VPMAX:
4775 GEN_NEON_INTEGER_OP(pmax);
4776 break;
4777 case NEON_3R_VPMIN:
4778 GEN_NEON_INTEGER_OP(pmin);
4779 break;
4780 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4781 if (!u) { /* VQDMULH */
4782 switch (size) {
4783 case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
4784 case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
4785 default: abort();
4787 } else { /* VQRDMULH */
4788 switch (size) {
4789 case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
4790 case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
4791 default: abort();
4794 break;
4795 case NEON_3R_VPADD:
4796 switch (size) {
4797 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4798 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4799 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4800 default: abort();
4802 break;
4803 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4804 switch ((u << 2) | size) {
4805 case 0: /* VADD */
4806 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4807 break;
4808 case 2: /* VSUB */
4809 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4810 break;
4811 case 4: /* VPADD */
4812 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4813 break;
4814 case 6: /* VABD */
4815 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4816 break;
4817 default:
4818 abort();
4820 break;
4821 case NEON_3R_FLOAT_MULTIPLY:
4822 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4823 if (!u) {
4824 tcg_temp_free_i32(tmp2);
4825 tmp2 = neon_load_reg(rd, pass);
4826 if (size == 0) {
4827 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4828 } else {
4829 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4832 break;
4833 case NEON_3R_FLOAT_CMP:
4834 if (!u) {
4835 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4836 } else {
4837 if (size == 0)
4838 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4839 else
4840 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4842 break;
4843 case NEON_3R_FLOAT_ACMP:
4844 if (size == 0)
4845 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4846 else
4847 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4848 break;
4849 case NEON_3R_FLOAT_MINMAX:
4850 if (size == 0)
4851 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4852 else
4853 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4854 break;
4855 case NEON_3R_VRECPS_VRSQRTS:
4856 if (size == 0)
4857 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4858 else
4859 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4860 break;
4861 default:
4862 abort();
4864 tcg_temp_free_i32(tmp2);
4866 /* Save the result. For elementwise operations we can put it
4867 straight into the destination register. For pairwise operations
4868 we have to be careful to avoid clobbering the source operands. */
4869 if (pairwise && rd == rm) {
4870 neon_store_scratch(pass, tmp);
4871 } else {
4872 neon_store_reg(rd, pass, tmp);
4875 } /* for pass */
4876 if (pairwise && rd == rm) {
4877 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4878 tmp = neon_load_scratch(pass);
4879 neon_store_reg(rd, pass, tmp);
4882 /* End of 3 register same size operations. */
4883 } else if (insn & (1 << 4)) {
4884 if ((insn & 0x00380080) != 0) {
4885 /* Two registers and shift. */
4886 op = (insn >> 8) & 0xf;
4887 if (insn & (1 << 7)) {
4888 /* 64-bit shift. */
4889 if (op > 7) {
4890 return 1;
4892 size = 3;
4893 } else {
4894 size = 2;
4895 while ((insn & (1 << (size + 19))) == 0)
4896 size--;
4898 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4899 /* To avoid excessive dumplication of ops we implement shift
4900 by immediate using the variable shift operations. */
4901 if (op < 8) {
4902 /* Shift by immediate:
4903 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4904 if (q && ((rd | rm) & 1)) {
4905 return 1;
4907 if (!u && (op == 4 || op == 6)) {
4908 return 1;
4910 /* Right shifts are encoded as N - shift, where N is the
4911 element size in bits. */
4912 if (op <= 4)
4913 shift = shift - (1 << (size + 3));
4914 if (size == 3) {
4915 count = q + 1;
4916 } else {
4917 count = q ? 4: 2;
4919 switch (size) {
4920 case 0:
4921 imm = (uint8_t) shift;
4922 imm |= imm << 8;
4923 imm |= imm << 16;
4924 break;
4925 case 1:
4926 imm = (uint16_t) shift;
4927 imm |= imm << 16;
4928 break;
4929 case 2:
4930 case 3:
4931 imm = shift;
4932 break;
4933 default:
4934 abort();
4937 for (pass = 0; pass < count; pass++) {
4938 if (size == 3) {
4939 neon_load_reg64(cpu_V0, rm + pass);
4940 tcg_gen_movi_i64(cpu_V1, imm);
4941 switch (op) {
4942 case 0: /* VSHR */
4943 case 1: /* VSRA */
4944 if (u)
4945 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4946 else
4947 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4948 break;
4949 case 2: /* VRSHR */
4950 case 3: /* VRSRA */
4951 if (u)
4952 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4953 else
4954 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4955 break;
4956 case 4: /* VSRI */
4957 case 5: /* VSHL, VSLI */
4958 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4959 break;
4960 case 6: /* VQSHLU */
4961 gen_helper_neon_qshlu_s64(cpu_V0, cpu_V0, cpu_V1);
4962 break;
4963 case 7: /* VQSHL */
4964 if (u) {
4965 gen_helper_neon_qshl_u64(cpu_V0,
4966 cpu_V0, cpu_V1);
4967 } else {
4968 gen_helper_neon_qshl_s64(cpu_V0,
4969 cpu_V0, cpu_V1);
4971 break;
4973 if (op == 1 || op == 3) {
4974 /* Accumulate. */
4975 neon_load_reg64(cpu_V1, rd + pass);
4976 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4977 } else if (op == 4 || (op == 5 && u)) {
4978 /* Insert */
4979 neon_load_reg64(cpu_V1, rd + pass);
4980 uint64_t mask;
4981 if (shift < -63 || shift > 63) {
4982 mask = 0;
4983 } else {
4984 if (op == 4) {
4985 mask = 0xffffffffffffffffull >> -shift;
4986 } else {
4987 mask = 0xffffffffffffffffull << shift;
4990 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4991 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
4993 neon_store_reg64(cpu_V0, rd + pass);
4994 } else { /* size < 3 */
4995 /* Operands in T0 and T1. */
4996 tmp = neon_load_reg(rm, pass);
4997 tmp2 = tcg_temp_new_i32();
4998 tcg_gen_movi_i32(tmp2, imm);
4999 switch (op) {
5000 case 0: /* VSHR */
5001 case 1: /* VSRA */
5002 GEN_NEON_INTEGER_OP(shl);
5003 break;
5004 case 2: /* VRSHR */
5005 case 3: /* VRSRA */
5006 GEN_NEON_INTEGER_OP(rshl);
5007 break;
5008 case 4: /* VSRI */
5009 case 5: /* VSHL, VSLI */
5010 switch (size) {
5011 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5012 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5013 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5014 default: abort();
5016 break;
5017 case 6: /* VQSHLU */
5018 switch (size) {
5019 case 0:
5020 gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
5021 break;
5022 case 1:
5023 gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
5024 break;
5025 case 2:
5026 gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
5027 break;
5028 default:
5029 abort();
5031 break;
5032 case 7: /* VQSHL */
5033 GEN_NEON_INTEGER_OP(qshl);
5034 break;
5036 tcg_temp_free_i32(tmp2);
5038 if (op == 1 || op == 3) {
5039 /* Accumulate. */
5040 tmp2 = neon_load_reg(rd, pass);
5041 gen_neon_add(size, tmp, tmp2);
5042 tcg_temp_free_i32(tmp2);
5043 } else if (op == 4 || (op == 5 && u)) {
5044 /* Insert */
5045 switch (size) {
5046 case 0:
5047 if (op == 4)
5048 mask = 0xff >> -shift;
5049 else
5050 mask = (uint8_t)(0xff << shift);
5051 mask |= mask << 8;
5052 mask |= mask << 16;
5053 break;
5054 case 1:
5055 if (op == 4)
5056 mask = 0xffff >> -shift;
5057 else
5058 mask = (uint16_t)(0xffff << shift);
5059 mask |= mask << 16;
5060 break;
5061 case 2:
5062 if (shift < -31 || shift > 31) {
5063 mask = 0;
5064 } else {
5065 if (op == 4)
5066 mask = 0xffffffffu >> -shift;
5067 else
5068 mask = 0xffffffffu << shift;
5070 break;
5071 default:
5072 abort();
5074 tmp2 = neon_load_reg(rd, pass);
5075 tcg_gen_andi_i32(tmp, tmp, mask);
5076 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5077 tcg_gen_or_i32(tmp, tmp, tmp2);
5078 tcg_temp_free_i32(tmp2);
5080 neon_store_reg(rd, pass, tmp);
5082 } /* for pass */
5083 } else if (op < 10) {
5084 /* Shift by immediate and narrow:
5085 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5086 int input_unsigned = (op == 8) ? !u : u;
5087 if (rm & 1) {
5088 return 1;
5090 shift = shift - (1 << (size + 3));
5091 size++;
5092 if (size == 3) {
5093 tmp64 = tcg_const_i64(shift);
5094 neon_load_reg64(cpu_V0, rm);
5095 neon_load_reg64(cpu_V1, rm + 1);
5096 for (pass = 0; pass < 2; pass++) {
5097 TCGv_i64 in;
5098 if (pass == 0) {
5099 in = cpu_V0;
5100 } else {
5101 in = cpu_V1;
5103 if (q) {
5104 if (input_unsigned) {
5105 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5106 } else {
5107 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5109 } else {
5110 if (input_unsigned) {
5111 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5112 } else {
5113 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5116 tmp = tcg_temp_new_i32();
5117 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5118 neon_store_reg(rd, pass, tmp);
5119 } /* for pass */
5120 tcg_temp_free_i64(tmp64);
5121 } else {
5122 if (size == 1) {
5123 imm = (uint16_t)shift;
5124 imm |= imm << 16;
5125 } else {
5126 /* size == 2 */
5127 imm = (uint32_t)shift;
5129 tmp2 = tcg_const_i32(imm);
5130 tmp4 = neon_load_reg(rm + 1, 0);
5131 tmp5 = neon_load_reg(rm + 1, 1);
5132 for (pass = 0; pass < 2; pass++) {
5133 if (pass == 0) {
5134 tmp = neon_load_reg(rm, 0);
5135 } else {
5136 tmp = tmp4;
5138 gen_neon_shift_narrow(size, tmp, tmp2, q,
5139 input_unsigned);
5140 if (pass == 0) {
5141 tmp3 = neon_load_reg(rm, 1);
5142 } else {
5143 tmp3 = tmp5;
5145 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5146 input_unsigned);
5147 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5148 tcg_temp_free_i32(tmp);
5149 tcg_temp_free_i32(tmp3);
5150 tmp = tcg_temp_new_i32();
5151 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5152 neon_store_reg(rd, pass, tmp);
5153 } /* for pass */
5154 tcg_temp_free_i32(tmp2);
5156 } else if (op == 10) {
5157 /* VSHLL, VMOVL */
5158 if (q || (rd & 1)) {
5159 return 1;
5161 tmp = neon_load_reg(rm, 0);
5162 tmp2 = neon_load_reg(rm, 1);
5163 for (pass = 0; pass < 2; pass++) {
5164 if (pass == 1)
5165 tmp = tmp2;
5167 gen_neon_widen(cpu_V0, tmp, size, u);
5169 if (shift != 0) {
5170 /* The shift is less than the width of the source
5171 type, so we can just shift the whole register. */
5172 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5173 /* Widen the result of shift: we need to clear
5174 * the potential overflow bits resulting from
5175 * left bits of the narrow input appearing as
5176 * right bits of left the neighbour narrow
5177 * input. */
5178 if (size < 2 || !u) {
5179 uint64_t imm64;
5180 if (size == 0) {
5181 imm = (0xffu >> (8 - shift));
5182 imm |= imm << 16;
5183 } else if (size == 1) {
5184 imm = 0xffff >> (16 - shift);
5185 } else {
5186 /* size == 2 */
5187 imm = 0xffffffff >> (32 - shift);
5189 if (size < 2) {
5190 imm64 = imm | (((uint64_t)imm) << 32);
5191 } else {
5192 imm64 = imm;
5194 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5197 neon_store_reg64(cpu_V0, rd + pass);
5199 } else if (op >= 14) {
5200 /* VCVT fixed-point. */
5201 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5202 return 1;
5204 /* We have already masked out the must-be-1 top bit of imm6,
5205 * hence this 32-shift where the ARM ARM has 64-imm6.
5207 shift = 32 - shift;
5208 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5209 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5210 if (!(op & 1)) {
5211 if (u)
5212 gen_vfp_ulto(0, shift);
5213 else
5214 gen_vfp_slto(0, shift);
5215 } else {
5216 if (u)
5217 gen_vfp_toul(0, shift);
5218 else
5219 gen_vfp_tosl(0, shift);
5221 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5223 } else {
5224 return 1;
5226 } else { /* (insn & 0x00380080) == 0 */
5227 int invert;
5228 if (q && (rd & 1)) {
5229 return 1;
5232 op = (insn >> 8) & 0xf;
5233 /* One register and immediate. */
5234 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5235 invert = (insn & (1 << 5)) != 0;
5236 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5237 * We choose to not special-case this and will behave as if a
5238 * valid constant encoding of 0 had been given.
5240 switch (op) {
5241 case 0: case 1:
5242 /* no-op */
5243 break;
5244 case 2: case 3:
5245 imm <<= 8;
5246 break;
5247 case 4: case 5:
5248 imm <<= 16;
5249 break;
5250 case 6: case 7:
5251 imm <<= 24;
5252 break;
5253 case 8: case 9:
5254 imm |= imm << 16;
5255 break;
5256 case 10: case 11:
5257 imm = (imm << 8) | (imm << 24);
5258 break;
5259 case 12:
5260 imm = (imm << 8) | 0xff;
5261 break;
5262 case 13:
5263 imm = (imm << 16) | 0xffff;
5264 break;
5265 case 14:
5266 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5267 if (invert)
5268 imm = ~imm;
5269 break;
5270 case 15:
5271 if (invert) {
5272 return 1;
5274 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5275 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5276 break;
5278 if (invert)
5279 imm = ~imm;
5281 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5282 if (op & 1 && op < 12) {
5283 tmp = neon_load_reg(rd, pass);
5284 if (invert) {
5285 /* The immediate value has already been inverted, so
5286 BIC becomes AND. */
5287 tcg_gen_andi_i32(tmp, tmp, imm);
5288 } else {
5289 tcg_gen_ori_i32(tmp, tmp, imm);
5291 } else {
5292 /* VMOV, VMVN. */
5293 tmp = tcg_temp_new_i32();
5294 if (op == 14 && invert) {
5295 int n;
5296 uint32_t val;
5297 val = 0;
5298 for (n = 0; n < 4; n++) {
5299 if (imm & (1 << (n + (pass & 1) * 4)))
5300 val |= 0xff << (n * 8);
5302 tcg_gen_movi_i32(tmp, val);
5303 } else {
5304 tcg_gen_movi_i32(tmp, imm);
5307 neon_store_reg(rd, pass, tmp);
5310 } else { /* (insn & 0x00800010 == 0x00800000) */
5311 if (size != 3) {
5312 op = (insn >> 8) & 0xf;
5313 if ((insn & (1 << 6)) == 0) {
5314 /* Three registers of different lengths. */
5315 int src1_wide;
5316 int src2_wide;
5317 int prewiden;
5318 /* undefreq: bit 0 : UNDEF if size != 0
5319 * bit 1 : UNDEF if size == 0
5320 * bit 2 : UNDEF if U == 1
5321 * Note that [1:0] set implies 'always UNDEF'
5323 int undefreq;
5324 /* prewiden, src1_wide, src2_wide, undefreq */
5325 static const int neon_3reg_wide[16][4] = {
5326 {1, 0, 0, 0}, /* VADDL */
5327 {1, 1, 0, 0}, /* VADDW */
5328 {1, 0, 0, 0}, /* VSUBL */
5329 {1, 1, 0, 0}, /* VSUBW */
5330 {0, 1, 1, 0}, /* VADDHN */
5331 {0, 0, 0, 0}, /* VABAL */
5332 {0, 1, 1, 0}, /* VSUBHN */
5333 {0, 0, 0, 0}, /* VABDL */
5334 {0, 0, 0, 0}, /* VMLAL */
5335 {0, 0, 0, 6}, /* VQDMLAL */
5336 {0, 0, 0, 0}, /* VMLSL */
5337 {0, 0, 0, 6}, /* VQDMLSL */
5338 {0, 0, 0, 0}, /* Integer VMULL */
5339 {0, 0, 0, 2}, /* VQDMULL */
5340 {0, 0, 0, 5}, /* Polynomial VMULL */
5341 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5344 prewiden = neon_3reg_wide[op][0];
5345 src1_wide = neon_3reg_wide[op][1];
5346 src2_wide = neon_3reg_wide[op][2];
5347 undefreq = neon_3reg_wide[op][3];
5349 if (((undefreq & 1) && (size != 0)) ||
5350 ((undefreq & 2) && (size == 0)) ||
5351 ((undefreq & 4) && u)) {
5352 return 1;
5354 if ((src1_wide && (rn & 1)) ||
5355 (src2_wide && (rm & 1)) ||
5356 (!src2_wide && (rd & 1))) {
5357 return 1;
5360 /* Avoid overlapping operands. Wide source operands are
5361 always aligned so will never overlap with wide
5362 destinations in problematic ways. */
5363 if (rd == rm && !src2_wide) {
5364 tmp = neon_load_reg(rm, 1);
5365 neon_store_scratch(2, tmp);
5366 } else if (rd == rn && !src1_wide) {
5367 tmp = neon_load_reg(rn, 1);
5368 neon_store_scratch(2, tmp);
5370 TCGV_UNUSED(tmp3);
5371 for (pass = 0; pass < 2; pass++) {
5372 if (src1_wide) {
5373 neon_load_reg64(cpu_V0, rn + pass);
5374 TCGV_UNUSED(tmp);
5375 } else {
5376 if (pass == 1 && rd == rn) {
5377 tmp = neon_load_scratch(2);
5378 } else {
5379 tmp = neon_load_reg(rn, pass);
5381 if (prewiden) {
5382 gen_neon_widen(cpu_V0, tmp, size, u);
5385 if (src2_wide) {
5386 neon_load_reg64(cpu_V1, rm + pass);
5387 TCGV_UNUSED(tmp2);
5388 } else {
5389 if (pass == 1 && rd == rm) {
5390 tmp2 = neon_load_scratch(2);
5391 } else {
5392 tmp2 = neon_load_reg(rm, pass);
5394 if (prewiden) {
5395 gen_neon_widen(cpu_V1, tmp2, size, u);
5398 switch (op) {
5399 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5400 gen_neon_addl(size);
5401 break;
5402 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5403 gen_neon_subl(size);
5404 break;
5405 case 5: case 7: /* VABAL, VABDL */
5406 switch ((size << 1) | u) {
5407 case 0:
5408 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5409 break;
5410 case 1:
5411 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5412 break;
5413 case 2:
5414 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5415 break;
5416 case 3:
5417 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5418 break;
5419 case 4:
5420 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5421 break;
5422 case 5:
5423 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5424 break;
5425 default: abort();
5427 tcg_temp_free_i32(tmp2);
5428 tcg_temp_free_i32(tmp);
5429 break;
5430 case 8: case 9: case 10: case 11: case 12: case 13:
5431 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5432 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5433 break;
5434 case 14: /* Polynomial VMULL */
5435 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5436 tcg_temp_free_i32(tmp2);
5437 tcg_temp_free_i32(tmp);
5438 break;
5439 default: /* 15 is RESERVED: caught earlier */
5440 abort();
5442 if (op == 13) {
5443 /* VQDMULL */
5444 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5445 neon_store_reg64(cpu_V0, rd + pass);
5446 } else if (op == 5 || (op >= 8 && op <= 11)) {
5447 /* Accumulate. */
5448 neon_load_reg64(cpu_V1, rd + pass);
5449 switch (op) {
5450 case 10: /* VMLSL */
5451 gen_neon_negl(cpu_V0, size);
5452 /* Fall through */
5453 case 5: case 8: /* VABAL, VMLAL */
5454 gen_neon_addl(size);
5455 break;
5456 case 9: case 11: /* VQDMLAL, VQDMLSL */
5457 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5458 if (op == 11) {
5459 gen_neon_negl(cpu_V0, size);
5461 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5462 break;
5463 default:
5464 abort();
5466 neon_store_reg64(cpu_V0, rd + pass);
5467 } else if (op == 4 || op == 6) {
5468 /* Narrowing operation. */
5469 tmp = tcg_temp_new_i32();
5470 if (!u) {
5471 switch (size) {
5472 case 0:
5473 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5474 break;
5475 case 1:
5476 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5477 break;
5478 case 2:
5479 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5480 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5481 break;
5482 default: abort();
5484 } else {
5485 switch (size) {
5486 case 0:
5487 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5488 break;
5489 case 1:
5490 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5491 break;
5492 case 2:
5493 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5494 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5495 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5496 break;
5497 default: abort();
5500 if (pass == 0) {
5501 tmp3 = tmp;
5502 } else {
5503 neon_store_reg(rd, 0, tmp3);
5504 neon_store_reg(rd, 1, tmp);
5506 } else {
5507 /* Write back the result. */
5508 neon_store_reg64(cpu_V0, rd + pass);
5511 } else {
5512 /* Two registers and a scalar. NB that for ops of this form
5513 * the ARM ARM labels bit 24 as Q, but it is in our variable
5514 * 'u', not 'q'.
5516 if (size == 0) {
5517 return 1;
5519 switch (op) {
5520 case 1: /* Float VMLA scalar */
5521 case 5: /* Floating point VMLS scalar */
5522 case 9: /* Floating point VMUL scalar */
5523 if (size == 1) {
5524 return 1;
5526 /* fall through */
5527 case 0: /* Integer VMLA scalar */
5528 case 4: /* Integer VMLS scalar */
5529 case 8: /* Integer VMUL scalar */
5530 case 12: /* VQDMULH scalar */
5531 case 13: /* VQRDMULH scalar */
5532 if (u && ((rd | rn) & 1)) {
5533 return 1;
5535 tmp = neon_get_scalar(size, rm);
5536 neon_store_scratch(0, tmp);
5537 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5538 tmp = neon_load_scratch(0);
5539 tmp2 = neon_load_reg(rn, pass);
5540 if (op == 12) {
5541 if (size == 1) {
5542 gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
5543 } else {
5544 gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
5546 } else if (op == 13) {
5547 if (size == 1) {
5548 gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
5549 } else {
5550 gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
5552 } else if (op & 1) {
5553 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5554 } else {
5555 switch (size) {
5556 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5557 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5558 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5559 default: abort();
5562 tcg_temp_free_i32(tmp2);
5563 if (op < 8) {
5564 /* Accumulate. */
5565 tmp2 = neon_load_reg(rd, pass);
5566 switch (op) {
5567 case 0:
5568 gen_neon_add(size, tmp, tmp2);
5569 break;
5570 case 1:
5571 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5572 break;
5573 case 4:
5574 gen_neon_rsb(size, tmp, tmp2);
5575 break;
5576 case 5:
5577 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5578 break;
5579 default:
5580 abort();
5582 tcg_temp_free_i32(tmp2);
5584 neon_store_reg(rd, pass, tmp);
5586 break;
5587 case 3: /* VQDMLAL scalar */
5588 case 7: /* VQDMLSL scalar */
5589 case 11: /* VQDMULL scalar */
5590 if (u == 1) {
5591 return 1;
5593 /* fall through */
5594 case 2: /* VMLAL sclar */
5595 case 6: /* VMLSL scalar */
5596 case 10: /* VMULL scalar */
5597 if (rd & 1) {
5598 return 1;
5600 tmp2 = neon_get_scalar(size, rm);
5601 /* We need a copy of tmp2 because gen_neon_mull
5602 * deletes it during pass 0. */
5603 tmp4 = tcg_temp_new_i32();
5604 tcg_gen_mov_i32(tmp4, tmp2);
5605 tmp3 = neon_load_reg(rn, 1);
5607 for (pass = 0; pass < 2; pass++) {
5608 if (pass == 0) {
5609 tmp = neon_load_reg(rn, 0);
5610 } else {
5611 tmp = tmp3;
5612 tmp2 = tmp4;
5614 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5615 if (op != 11) {
5616 neon_load_reg64(cpu_V1, rd + pass);
5618 switch (op) {
5619 case 6:
5620 gen_neon_negl(cpu_V0, size);
5621 /* Fall through */
5622 case 2:
5623 gen_neon_addl(size);
5624 break;
5625 case 3: case 7:
5626 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5627 if (op == 7) {
5628 gen_neon_negl(cpu_V0, size);
5630 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5631 break;
5632 case 10:
5633 /* no-op */
5634 break;
5635 case 11:
5636 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5637 break;
5638 default:
5639 abort();
5641 neon_store_reg64(cpu_V0, rd + pass);
5645 break;
5646 default: /* 14 and 15 are RESERVED */
5647 return 1;
5650 } else { /* size == 3 */
5651 if (!u) {
5652 /* Extract. */
5653 imm = (insn >> 8) & 0xf;
5655 if (imm > 7 && !q)
5656 return 1;
5658 if (q && ((rd | rn | rm) & 1)) {
5659 return 1;
5662 if (imm == 0) {
5663 neon_load_reg64(cpu_V0, rn);
5664 if (q) {
5665 neon_load_reg64(cpu_V1, rn + 1);
5667 } else if (imm == 8) {
5668 neon_load_reg64(cpu_V0, rn + 1);
5669 if (q) {
5670 neon_load_reg64(cpu_V1, rm);
5672 } else if (q) {
5673 tmp64 = tcg_temp_new_i64();
5674 if (imm < 8) {
5675 neon_load_reg64(cpu_V0, rn);
5676 neon_load_reg64(tmp64, rn + 1);
5677 } else {
5678 neon_load_reg64(cpu_V0, rn + 1);
5679 neon_load_reg64(tmp64, rm);
5681 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5682 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5683 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5684 if (imm < 8) {
5685 neon_load_reg64(cpu_V1, rm);
5686 } else {
5687 neon_load_reg64(cpu_V1, rm + 1);
5688 imm -= 8;
5690 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5691 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5692 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5693 tcg_temp_free_i64(tmp64);
5694 } else {
5695 /* BUGFIX */
5696 neon_load_reg64(cpu_V0, rn);
5697 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5698 neon_load_reg64(cpu_V1, rm);
5699 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5700 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5702 neon_store_reg64(cpu_V0, rd);
5703 if (q) {
5704 neon_store_reg64(cpu_V1, rd + 1);
5706 } else if ((insn & (1 << 11)) == 0) {
5707 /* Two register misc. */
5708 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5709 size = (insn >> 18) & 3;
5710 /* UNDEF for unknown op values and bad op-size combinations */
5711 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5712 return 1;
5714 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5715 q && ((rm | rd) & 1)) {
5716 return 1;
5718 switch (op) {
5719 case NEON_2RM_VREV64:
5720 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5721 tmp = neon_load_reg(rm, pass * 2);
5722 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5723 switch (size) {
5724 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5725 case 1: gen_swap_half(tmp); break;
5726 case 2: /* no-op */ break;
5727 default: abort();
5729 neon_store_reg(rd, pass * 2 + 1, tmp);
5730 if (size == 2) {
5731 neon_store_reg(rd, pass * 2, tmp2);
5732 } else {
5733 switch (size) {
5734 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5735 case 1: gen_swap_half(tmp2); break;
5736 default: abort();
5738 neon_store_reg(rd, pass * 2, tmp2);
5741 break;
5742 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5743 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5744 for (pass = 0; pass < q + 1; pass++) {
5745 tmp = neon_load_reg(rm, pass * 2);
5746 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5747 tmp = neon_load_reg(rm, pass * 2 + 1);
5748 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5749 switch (size) {
5750 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5751 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5752 case 2: tcg_gen_add_i64(CPU_V001); break;
5753 default: abort();
5755 if (op >= NEON_2RM_VPADAL) {
5756 /* Accumulate. */
5757 neon_load_reg64(cpu_V1, rd + pass);
5758 gen_neon_addl(size);
5760 neon_store_reg64(cpu_V0, rd + pass);
5762 break;
5763 case NEON_2RM_VTRN:
5764 if (size == 2) {
5765 int n;
5766 for (n = 0; n < (q ? 4 : 2); n += 2) {
5767 tmp = neon_load_reg(rm, n);
5768 tmp2 = neon_load_reg(rd, n + 1);
5769 neon_store_reg(rm, n, tmp2);
5770 neon_store_reg(rd, n + 1, tmp);
5772 } else {
5773 goto elementwise;
5775 break;
5776 case NEON_2RM_VUZP:
5777 if (gen_neon_unzip(rd, rm, size, q)) {
5778 return 1;
5780 break;
5781 case NEON_2RM_VZIP:
5782 if (gen_neon_zip(rd, rm, size, q)) {
5783 return 1;
5785 break;
5786 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5787 /* also VQMOVUN; op field and mnemonics don't line up */
5788 if (rm & 1) {
5789 return 1;
5791 TCGV_UNUSED(tmp2);
5792 for (pass = 0; pass < 2; pass++) {
5793 neon_load_reg64(cpu_V0, rm + pass);
5794 tmp = tcg_temp_new_i32();
5795 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5796 tmp, cpu_V0);
5797 if (pass == 0) {
5798 tmp2 = tmp;
5799 } else {
5800 neon_store_reg(rd, 0, tmp2);
5801 neon_store_reg(rd, 1, tmp);
5804 break;
5805 case NEON_2RM_VSHLL:
5806 if (q || (rd & 1)) {
5807 return 1;
5809 tmp = neon_load_reg(rm, 0);
5810 tmp2 = neon_load_reg(rm, 1);
5811 for (pass = 0; pass < 2; pass++) {
5812 if (pass == 1)
5813 tmp = tmp2;
5814 gen_neon_widen(cpu_V0, tmp, size, 1);
5815 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5816 neon_store_reg64(cpu_V0, rd + pass);
5818 break;
5819 case NEON_2RM_VCVT_F16_F32:
5820 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5821 q || (rm & 1)) {
5822 return 1;
5824 tmp = tcg_temp_new_i32();
5825 tmp2 = tcg_temp_new_i32();
5826 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5827 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5828 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5829 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5830 tcg_gen_shli_i32(tmp2, tmp2, 16);
5831 tcg_gen_or_i32(tmp2, tmp2, tmp);
5832 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5833 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5834 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5835 neon_store_reg(rd, 0, tmp2);
5836 tmp2 = tcg_temp_new_i32();
5837 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5838 tcg_gen_shli_i32(tmp2, tmp2, 16);
5839 tcg_gen_or_i32(tmp2, tmp2, tmp);
5840 neon_store_reg(rd, 1, tmp2);
5841 tcg_temp_free_i32(tmp);
5842 break;
5843 case NEON_2RM_VCVT_F32_F16:
5844 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5845 q || (rd & 1)) {
5846 return 1;
5848 tmp3 = tcg_temp_new_i32();
5849 tmp = neon_load_reg(rm, 0);
5850 tmp2 = neon_load_reg(rm, 1);
5851 tcg_gen_ext16u_i32(tmp3, tmp);
5852 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5853 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5854 tcg_gen_shri_i32(tmp3, tmp, 16);
5855 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5856 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5857 tcg_temp_free_i32(tmp);
5858 tcg_gen_ext16u_i32(tmp3, tmp2);
5859 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5860 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5861 tcg_gen_shri_i32(tmp3, tmp2, 16);
5862 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5863 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5864 tcg_temp_free_i32(tmp2);
5865 tcg_temp_free_i32(tmp3);
5866 break;
5867 default:
5868 elementwise:
5869 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5870 if (neon_2rm_is_float_op(op)) {
5871 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5872 neon_reg_offset(rm, pass));
5873 TCGV_UNUSED(tmp);
5874 } else {
5875 tmp = neon_load_reg(rm, pass);
5877 switch (op) {
5878 case NEON_2RM_VREV32:
5879 switch (size) {
5880 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5881 case 1: gen_swap_half(tmp); break;
5882 default: abort();
5884 break;
5885 case NEON_2RM_VREV16:
5886 gen_rev16(tmp);
5887 break;
5888 case NEON_2RM_VCLS:
5889 switch (size) {
5890 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5891 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5892 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5893 default: abort();
5895 break;
5896 case NEON_2RM_VCLZ:
5897 switch (size) {
5898 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5899 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5900 case 2: gen_helper_clz(tmp, tmp); break;
5901 default: abort();
5903 break;
5904 case NEON_2RM_VCNT:
5905 gen_helper_neon_cnt_u8(tmp, tmp);
5906 break;
5907 case NEON_2RM_VMVN:
5908 tcg_gen_not_i32(tmp, tmp);
5909 break;
5910 case NEON_2RM_VQABS:
5911 switch (size) {
5912 case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
5913 case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
5914 case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
5915 default: abort();
5917 break;
5918 case NEON_2RM_VQNEG:
5919 switch (size) {
5920 case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
5921 case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
5922 case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
5923 default: abort();
5925 break;
5926 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
5927 tmp2 = tcg_const_i32(0);
5928 switch(size) {
5929 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5930 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5931 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5932 default: abort();
5934 tcg_temp_free(tmp2);
5935 if (op == NEON_2RM_VCLE0) {
5936 tcg_gen_not_i32(tmp, tmp);
5938 break;
5939 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
5940 tmp2 = tcg_const_i32(0);
5941 switch(size) {
5942 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5943 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5944 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5945 default: abort();
5947 tcg_temp_free(tmp2);
5948 if (op == NEON_2RM_VCLT0) {
5949 tcg_gen_not_i32(tmp, tmp);
5951 break;
5952 case NEON_2RM_VCEQ0:
5953 tmp2 = tcg_const_i32(0);
5954 switch(size) {
5955 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5956 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5957 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5958 default: abort();
5960 tcg_temp_free(tmp2);
5961 break;
5962 case NEON_2RM_VABS:
5963 switch(size) {
5964 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5965 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5966 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5967 default: abort();
5969 break;
5970 case NEON_2RM_VNEG:
5971 tmp2 = tcg_const_i32(0);
5972 gen_neon_rsb(size, tmp, tmp2);
5973 tcg_temp_free(tmp2);
5974 break;
5975 case NEON_2RM_VCGT0_F:
5976 tmp2 = tcg_const_i32(0);
5977 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5978 tcg_temp_free(tmp2);
5979 break;
5980 case NEON_2RM_VCGE0_F:
5981 tmp2 = tcg_const_i32(0);
5982 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5983 tcg_temp_free(tmp2);
5984 break;
5985 case NEON_2RM_VCEQ0_F:
5986 tmp2 = tcg_const_i32(0);
5987 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5988 tcg_temp_free(tmp2);
5989 break;
5990 case NEON_2RM_VCLE0_F:
5991 tmp2 = tcg_const_i32(0);
5992 gen_helper_neon_cge_f32(tmp, tmp2, tmp);
5993 tcg_temp_free(tmp2);
5994 break;
5995 case NEON_2RM_VCLT0_F:
5996 tmp2 = tcg_const_i32(0);
5997 gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
5998 tcg_temp_free(tmp2);
5999 break;
6000 case NEON_2RM_VABS_F:
6001 gen_vfp_abs(0);
6002 break;
6003 case NEON_2RM_VNEG_F:
6004 gen_vfp_neg(0);
6005 break;
6006 case NEON_2RM_VSWP:
6007 tmp2 = neon_load_reg(rd, pass);
6008 neon_store_reg(rm, pass, tmp2);
6009 break;
6010 case NEON_2RM_VTRN:
6011 tmp2 = neon_load_reg(rd, pass);
6012 switch (size) {
6013 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6014 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6015 default: abort();
6017 neon_store_reg(rm, pass, tmp2);
6018 break;
6019 case NEON_2RM_VRECPE:
6020 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6021 break;
6022 case NEON_2RM_VRSQRTE:
6023 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6024 break;
6025 case NEON_2RM_VRECPE_F:
6026 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6027 break;
6028 case NEON_2RM_VRSQRTE_F:
6029 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6030 break;
6031 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6032 gen_vfp_sito(0);
6033 break;
6034 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6035 gen_vfp_uito(0);
6036 break;
6037 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6038 gen_vfp_tosiz(0);
6039 break;
6040 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6041 gen_vfp_touiz(0);
6042 break;
6043 default:
6044 /* Reserved op values were caught by the
6045 * neon_2rm_sizes[] check earlier.
6047 abort();
6049 if (neon_2rm_is_float_op(op)) {
6050 tcg_gen_st_f32(cpu_F0s, cpu_env,
6051 neon_reg_offset(rd, pass));
6052 } else {
6053 neon_store_reg(rd, pass, tmp);
6056 break;
6058 } else if ((insn & (1 << 10)) == 0) {
6059 /* VTBL, VTBX. */
6060 int n = ((insn >> 8) & 3) + 1;
6061 if ((rn + n) > 32) {
6062 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6063 * helper function running off the end of the register file.
6065 return 1;
6067 n <<= 3;
6068 if (insn & (1 << 6)) {
6069 tmp = neon_load_reg(rd, 0);
6070 } else {
6071 tmp = tcg_temp_new_i32();
6072 tcg_gen_movi_i32(tmp, 0);
6074 tmp2 = neon_load_reg(rm, 0);
6075 tmp4 = tcg_const_i32(rn);
6076 tmp5 = tcg_const_i32(n);
6077 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
6078 tcg_temp_free_i32(tmp);
6079 if (insn & (1 << 6)) {
6080 tmp = neon_load_reg(rd, 1);
6081 } else {
6082 tmp = tcg_temp_new_i32();
6083 tcg_gen_movi_i32(tmp, 0);
6085 tmp3 = neon_load_reg(rm, 1);
6086 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
6087 tcg_temp_free_i32(tmp5);
6088 tcg_temp_free_i32(tmp4);
6089 neon_store_reg(rd, 0, tmp2);
6090 neon_store_reg(rd, 1, tmp3);
6091 tcg_temp_free_i32(tmp);
6092 } else if ((insn & 0x380) == 0) {
6093 /* VDUP */
6094 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6095 return 1;
6097 if (insn & (1 << 19)) {
6098 tmp = neon_load_reg(rm, 1);
6099 } else {
6100 tmp = neon_load_reg(rm, 0);
6102 if (insn & (1 << 16)) {
6103 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6104 } else if (insn & (1 << 17)) {
6105 if ((insn >> 18) & 1)
6106 gen_neon_dup_high16(tmp);
6107 else
6108 gen_neon_dup_low16(tmp);
6110 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6111 tmp2 = tcg_temp_new_i32();
6112 tcg_gen_mov_i32(tmp2, tmp);
6113 neon_store_reg(rd, pass, tmp2);
6115 tcg_temp_free_i32(tmp);
6116 } else {
6117 return 1;
6121 return 0;
6124 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6126 int crn = (insn >> 16) & 0xf;
6127 int crm = insn & 0xf;
6128 int op1 = (insn >> 21) & 7;
6129 int op2 = (insn >> 5) & 7;
6130 int rt = (insn >> 12) & 0xf;
6131 TCGv tmp;
6133 /* Minimal set of debug registers, since we don't support debug */
6134 if (op1 == 0 && crn == 0 && op2 == 0) {
6135 switch (crm) {
6136 case 0:
6137 /* DBGDIDR: just RAZ. In particular this means the
6138 * "debug architecture version" bits will read as
6139 * a reserved value, which should cause Linux to
6140 * not try to use the debug hardware.
6142 tmp = tcg_const_i32(0);
6143 store_reg(s, rt, tmp);
6144 return 0;
6145 case 1:
6146 case 2:
6147 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6148 * don't implement memory mapped debug components
6150 if (ENABLE_ARCH_7) {
6151 tmp = tcg_const_i32(0);
6152 store_reg(s, rt, tmp);
6153 return 0;
6155 break;
6156 default:
6157 break;
6161 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6162 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6163 /* TEECR */
6164 if (IS_USER(s))
6165 return 1;
6166 tmp = load_cpu_field(teecr);
6167 store_reg(s, rt, tmp);
6168 return 0;
6170 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6171 /* TEEHBR */
6172 if (IS_USER(s) && (env->teecr & 1))
6173 return 1;
6174 tmp = load_cpu_field(teehbr);
6175 store_reg(s, rt, tmp);
6176 return 0;
6179 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
6180 op1, crn, crm, op2);
6181 return 1;
6184 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6186 int crn = (insn >> 16) & 0xf;
6187 int crm = insn & 0xf;
6188 int op1 = (insn >> 21) & 7;
6189 int op2 = (insn >> 5) & 7;
6190 int rt = (insn >> 12) & 0xf;
6191 TCGv tmp;
6193 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6194 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6195 /* TEECR */
6196 if (IS_USER(s))
6197 return 1;
6198 tmp = load_reg(s, rt);
6199 gen_helper_set_teecr(cpu_env, tmp);
6200 tcg_temp_free_i32(tmp);
6201 return 0;
6203 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6204 /* TEEHBR */
6205 if (IS_USER(s) && (env->teecr & 1))
6206 return 1;
6207 tmp = load_reg(s, rt);
6208 store_cpu_field(tmp, teehbr);
6209 return 0;
6212 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
6213 op1, crn, crm, op2);
6214 return 1;
6217 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6219 int cpnum;
6221 cpnum = (insn >> 8) & 0xf;
6222 if (arm_feature(env, ARM_FEATURE_XSCALE)
6223 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6224 return 1;
6226 switch (cpnum) {
6227 case 0:
6228 case 1:
6229 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6230 return disas_iwmmxt_insn(env, s, insn);
6231 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6232 return disas_dsp_insn(env, s, insn);
6234 return 1;
6235 case 10:
6236 case 11:
6237 return disas_vfp_insn (env, s, insn);
6238 case 14:
6239 /* Coprocessors 7-15 are architecturally reserved by ARM.
6240 Unfortunately Intel decided to ignore this. */
6241 if (arm_feature(env, ARM_FEATURE_XSCALE))
6242 goto board;
6243 if (insn & (1 << 20))
6244 return disas_cp14_read(env, s, insn);
6245 else
6246 return disas_cp14_write(env, s, insn);
6247 case 15:
6248 return disas_cp15_insn (env, s, insn);
6249 default:
6250 board:
6251 /* Unknown coprocessor. See if the board has hooked it. */
6252 return disas_cp_insn (env, s, insn);
6257 /* Store a 64-bit value to a register pair. Clobbers val. */
6258 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6260 TCGv tmp;
6261 tmp = tcg_temp_new_i32();
6262 tcg_gen_trunc_i64_i32(tmp, val);
6263 store_reg(s, rlow, tmp);
6264 tmp = tcg_temp_new_i32();
6265 tcg_gen_shri_i64(val, val, 32);
6266 tcg_gen_trunc_i64_i32(tmp, val);
6267 store_reg(s, rhigh, tmp);
6270 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6271 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6273 TCGv_i64 tmp;
6274 TCGv tmp2;
6276 /* Load value and extend to 64 bits. */
6277 tmp = tcg_temp_new_i64();
6278 tmp2 = load_reg(s, rlow);
6279 tcg_gen_extu_i32_i64(tmp, tmp2);
6280 tcg_temp_free_i32(tmp2);
6281 tcg_gen_add_i64(val, val, tmp);
6282 tcg_temp_free_i64(tmp);
6285 /* load and add a 64-bit value from a register pair. */
6286 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6288 TCGv_i64 tmp;
6289 TCGv tmpl;
6290 TCGv tmph;
6292 /* Load 64-bit value rd:rn. */
6293 tmpl = load_reg(s, rlow);
6294 tmph = load_reg(s, rhigh);
6295 tmp = tcg_temp_new_i64();
6296 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6297 tcg_temp_free_i32(tmpl);
6298 tcg_temp_free_i32(tmph);
6299 tcg_gen_add_i64(val, val, tmp);
6300 tcg_temp_free_i64(tmp);
6303 /* Set N and Z flags from a 64-bit value. */
6304 static void gen_logicq_cc(TCGv_i64 val)
6306 TCGv tmp = tcg_temp_new_i32();
6307 gen_helper_logicq_cc(tmp, val);
6308 gen_logic_CC(tmp);
6309 tcg_temp_free_i32(tmp);
6312 /* Load/Store exclusive instructions are implemented by remembering
6313 the value/address loaded, and seeing if these are the same
6314 when the store is performed. This should be is sufficient to implement
6315 the architecturally mandated semantics, and avoids having to monitor
6316 regular stores.
6318 In system emulation mode only one CPU will be running at once, so
6319 this sequence is effectively atomic. In user emulation mode we
6320 throw an exception and handle the atomic operation elsewhere. */
6321 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6322 TCGv addr, int size)
6324 TCGv tmp;
6326 switch (size) {
6327 case 0:
6328 tmp = gen_ld8u(addr, IS_USER(s));
6329 break;
6330 case 1:
6331 tmp = gen_ld16u(addr, IS_USER(s));
6332 break;
6333 case 2:
6334 case 3:
6335 tmp = gen_ld32(addr, IS_USER(s));
6336 break;
6337 default:
6338 abort();
6340 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6341 store_reg(s, rt, tmp);
6342 if (size == 3) {
6343 TCGv tmp2 = tcg_temp_new_i32();
6344 tcg_gen_addi_i32(tmp2, addr, 4);
6345 tmp = gen_ld32(tmp2, IS_USER(s));
6346 tcg_temp_free_i32(tmp2);
6347 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6348 store_reg(s, rt2, tmp);
6350 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6353 static void gen_clrex(DisasContext *s)
6355 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6358 #ifdef CONFIG_USER_ONLY
6359 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6360 TCGv addr, int size)
6362 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6363 tcg_gen_movi_i32(cpu_exclusive_info,
6364 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6365 gen_exception_insn(s, 4, EXCP_STREX);
6367 #else
6368 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6369 TCGv addr, int size)
6371 TCGv tmp;
6372 int done_label;
6373 int fail_label;
6375 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6376 [addr] = {Rt};
6377 {Rd} = 0;
6378 } else {
6379 {Rd} = 1;
6380 } */
6381 fail_label = gen_new_label();
6382 done_label = gen_new_label();
6383 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6384 switch (size) {
6385 case 0:
6386 tmp = gen_ld8u(addr, IS_USER(s));
6387 break;
6388 case 1:
6389 tmp = gen_ld16u(addr, IS_USER(s));
6390 break;
6391 case 2:
6392 case 3:
6393 tmp = gen_ld32(addr, IS_USER(s));
6394 break;
6395 default:
6396 abort();
6398 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6399 tcg_temp_free_i32(tmp);
6400 if (size == 3) {
6401 TCGv tmp2 = tcg_temp_new_i32();
6402 tcg_gen_addi_i32(tmp2, addr, 4);
6403 tmp = gen_ld32(tmp2, IS_USER(s));
6404 tcg_temp_free_i32(tmp2);
6405 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6406 tcg_temp_free_i32(tmp);
6408 tmp = load_reg(s, rt);
6409 switch (size) {
6410 case 0:
6411 gen_st8(tmp, addr, IS_USER(s));
6412 break;
6413 case 1:
6414 gen_st16(tmp, addr, IS_USER(s));
6415 break;
6416 case 2:
6417 case 3:
6418 gen_st32(tmp, addr, IS_USER(s));
6419 break;
6420 default:
6421 abort();
6423 if (size == 3) {
6424 tcg_gen_addi_i32(addr, addr, 4);
6425 tmp = load_reg(s, rt2);
6426 gen_st32(tmp, addr, IS_USER(s));
6428 tcg_gen_movi_i32(cpu_R[rd], 0);
6429 tcg_gen_br(done_label);
6430 gen_set_label(fail_label);
6431 tcg_gen_movi_i32(cpu_R[rd], 1);
6432 gen_set_label(done_label);
6433 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6435 #endif
6437 static void disas_arm_insn(CPUState * env, DisasContext *s)
6439 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6440 TCGv tmp;
6441 TCGv tmp2;
6442 TCGv tmp3;
6443 TCGv addr;
6444 TCGv_i64 tmp64;
6446 insn = ldl_code(s->pc);
6447 s->pc += 4;
6449 /* M variants do not implement ARM mode. */
6450 if (IS_M(env))
6451 goto illegal_op;
6452 cond = insn >> 28;
6453 if (cond == 0xf){
6454 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6455 * choose to UNDEF. In ARMv5 and above the space is used
6456 * for miscellaneous unconditional instructions.
6458 ARCH(5);
6460 /* Unconditional instructions. */
6461 if (((insn >> 25) & 7) == 1) {
6462 /* NEON Data processing. */
6463 if (!arm_feature(env, ARM_FEATURE_NEON))
6464 goto illegal_op;
6466 if (disas_neon_data_insn(env, s, insn))
6467 goto illegal_op;
6468 return;
6470 if ((insn & 0x0f100000) == 0x04000000) {
6471 /* NEON load/store. */
6472 if (!arm_feature(env, ARM_FEATURE_NEON))
6473 goto illegal_op;
6475 if (disas_neon_ls_insn(env, s, insn))
6476 goto illegal_op;
6477 return;
6479 if (((insn & 0x0f30f000) == 0x0510f000) ||
6480 ((insn & 0x0f30f010) == 0x0710f000)) {
6481 if ((insn & (1 << 22)) == 0) {
6482 /* PLDW; v7MP */
6483 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6484 goto illegal_op;
6487 /* Otherwise PLD; v5TE+ */
6488 ARCH(5TE);
6489 return;
6491 if (((insn & 0x0f70f000) == 0x0450f000) ||
6492 ((insn & 0x0f70f010) == 0x0650f000)) {
6493 ARCH(7);
6494 return; /* PLI; V7 */
6496 if (((insn & 0x0f700000) == 0x04100000) ||
6497 ((insn & 0x0f700010) == 0x06100000)) {
6498 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6499 goto illegal_op;
6501 return; /* v7MP: Unallocated memory hint: must NOP */
6504 if ((insn & 0x0ffffdff) == 0x01010000) {
6505 ARCH(6);
6506 /* setend */
6507 if (insn & (1 << 9)) {
6508 /* BE8 mode not implemented. */
6509 goto illegal_op;
6511 return;
6512 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6513 switch ((insn >> 4) & 0xf) {
6514 case 1: /* clrex */
6515 ARCH(6K);
6516 gen_clrex(s);
6517 return;
6518 case 4: /* dsb */
6519 case 5: /* dmb */
6520 case 6: /* isb */
6521 ARCH(7);
6522 /* We don't emulate caches so these are a no-op. */
6523 return;
6524 default:
6525 goto illegal_op;
6527 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6528 /* srs */
6529 int32_t offset;
6530 if (IS_USER(s))
6531 goto illegal_op;
6532 ARCH(6);
6533 op1 = (insn & 0x1f);
6534 addr = tcg_temp_new_i32();
6535 tmp = tcg_const_i32(op1);
6536 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6537 tcg_temp_free_i32(tmp);
6538 i = (insn >> 23) & 3;
6539 switch (i) {
6540 case 0: offset = -4; break; /* DA */
6541 case 1: offset = 0; break; /* IA */
6542 case 2: offset = -8; break; /* DB */
6543 case 3: offset = 4; break; /* IB */
6544 default: abort();
6546 if (offset)
6547 tcg_gen_addi_i32(addr, addr, offset);
6548 tmp = load_reg(s, 14);
6549 gen_st32(tmp, addr, 0);
6550 tmp = load_cpu_field(spsr);
6551 tcg_gen_addi_i32(addr, addr, 4);
6552 gen_st32(tmp, addr, 0);
6553 if (insn & (1 << 21)) {
6554 /* Base writeback. */
6555 switch (i) {
6556 case 0: offset = -8; break;
6557 case 1: offset = 4; break;
6558 case 2: offset = -4; break;
6559 case 3: offset = 0; break;
6560 default: abort();
6562 if (offset)
6563 tcg_gen_addi_i32(addr, addr, offset);
6564 tmp = tcg_const_i32(op1);
6565 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6566 tcg_temp_free_i32(tmp);
6567 tcg_temp_free_i32(addr);
6568 } else {
6569 tcg_temp_free_i32(addr);
6571 return;
6572 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6573 /* rfe */
6574 int32_t offset;
6575 if (IS_USER(s))
6576 goto illegal_op;
6577 ARCH(6);
6578 rn = (insn >> 16) & 0xf;
6579 addr = load_reg(s, rn);
6580 i = (insn >> 23) & 3;
6581 switch (i) {
6582 case 0: offset = -4; break; /* DA */
6583 case 1: offset = 0; break; /* IA */
6584 case 2: offset = -8; break; /* DB */
6585 case 3: offset = 4; break; /* IB */
6586 default: abort();
6588 if (offset)
6589 tcg_gen_addi_i32(addr, addr, offset);
6590 /* Load PC into tmp and CPSR into tmp2. */
6591 tmp = gen_ld32(addr, 0);
6592 tcg_gen_addi_i32(addr, addr, 4);
6593 tmp2 = gen_ld32(addr, 0);
6594 if (insn & (1 << 21)) {
6595 /* Base writeback. */
6596 switch (i) {
6597 case 0: offset = -8; break;
6598 case 1: offset = 4; break;
6599 case 2: offset = -4; break;
6600 case 3: offset = 0; break;
6601 default: abort();
6603 if (offset)
6604 tcg_gen_addi_i32(addr, addr, offset);
6605 store_reg(s, rn, addr);
6606 } else {
6607 tcg_temp_free_i32(addr);
6609 gen_rfe(s, tmp, tmp2);
6610 return;
6611 } else if ((insn & 0x0e000000) == 0x0a000000) {
6612 /* branch link and change to thumb (blx <offset>) */
6613 int32_t offset;
6615 val = (uint32_t)s->pc;
6616 tmp = tcg_temp_new_i32();
6617 tcg_gen_movi_i32(tmp, val);
6618 store_reg(s, 14, tmp);
6619 /* Sign-extend the 24-bit offset */
6620 offset = (((int32_t)insn) << 8) >> 8;
6621 /* offset * 4 + bit24 * 2 + (thumb bit) */
6622 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6623 /* pipeline offset */
6624 val += 4;
6625 /* protected by ARCH(5); above, near the start of uncond block */
6626 gen_bx_im(s, val);
6627 return;
6628 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6629 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6630 /* iWMMXt register transfer. */
6631 if (env->cp15.c15_cpar & (1 << 1))
6632 if (!disas_iwmmxt_insn(env, s, insn))
6633 return;
6635 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6636 /* Coprocessor double register transfer. */
6637 ARCH(5TE);
6638 } else if ((insn & 0x0f000010) == 0x0e000010) {
6639 /* Additional coprocessor register transfer. */
6640 } else if ((insn & 0x0ff10020) == 0x01000000) {
6641 uint32_t mask;
6642 uint32_t val;
6643 /* cps (privileged) */
6644 if (IS_USER(s))
6645 return;
6646 mask = val = 0;
6647 if (insn & (1 << 19)) {
6648 if (insn & (1 << 8))
6649 mask |= CPSR_A;
6650 if (insn & (1 << 7))
6651 mask |= CPSR_I;
6652 if (insn & (1 << 6))
6653 mask |= CPSR_F;
6654 if (insn & (1 << 18))
6655 val |= mask;
6657 if (insn & (1 << 17)) {
6658 mask |= CPSR_M;
6659 val |= (insn & 0x1f);
6661 if (mask) {
6662 gen_set_psr_im(s, mask, 0, val);
6664 return;
6666 goto illegal_op;
6668 if (cond != 0xe) {
6669 /* if not always execute, we generate a conditional jump to
6670 next instruction */
6671 s->condlabel = gen_new_label();
6672 gen_test_cc(cond ^ 1, s->condlabel);
6673 s->condjmp = 1;
6675 if ((insn & 0x0f900000) == 0x03000000) {
6676 if ((insn & (1 << 21)) == 0) {
6677 ARCH(6T2);
6678 rd = (insn >> 12) & 0xf;
6679 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6680 if ((insn & (1 << 22)) == 0) {
6681 /* MOVW */
6682 tmp = tcg_temp_new_i32();
6683 tcg_gen_movi_i32(tmp, val);
6684 } else {
6685 /* MOVT */
6686 tmp = load_reg(s, rd);
6687 tcg_gen_ext16u_i32(tmp, tmp);
6688 tcg_gen_ori_i32(tmp, tmp, val << 16);
6690 store_reg(s, rd, tmp);
6691 } else {
6692 if (((insn >> 12) & 0xf) != 0xf)
6693 goto illegal_op;
6694 if (((insn >> 16) & 0xf) == 0) {
6695 gen_nop_hint(s, insn & 0xff);
6696 } else {
6697 /* CPSR = immediate */
6698 val = insn & 0xff;
6699 shift = ((insn >> 8) & 0xf) * 2;
6700 if (shift)
6701 val = (val >> shift) | (val << (32 - shift));
6702 i = ((insn & (1 << 22)) != 0);
6703 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6704 goto illegal_op;
6707 } else if ((insn & 0x0f900000) == 0x01000000
6708 && (insn & 0x00000090) != 0x00000090) {
6709 /* miscellaneous instructions */
6710 op1 = (insn >> 21) & 3;
6711 sh = (insn >> 4) & 0xf;
6712 rm = insn & 0xf;
6713 switch (sh) {
6714 case 0x0: /* move program status register */
6715 if (op1 & 1) {
6716 /* PSR = reg */
6717 tmp = load_reg(s, rm);
6718 i = ((op1 & 2) != 0);
6719 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6720 goto illegal_op;
6721 } else {
6722 /* reg = PSR */
6723 rd = (insn >> 12) & 0xf;
6724 if (op1 & 2) {
6725 if (IS_USER(s))
6726 goto illegal_op;
6727 tmp = load_cpu_field(spsr);
6728 } else {
6729 tmp = tcg_temp_new_i32();
6730 gen_helper_cpsr_read(tmp);
6732 store_reg(s, rd, tmp);
6734 break;
6735 case 0x1:
6736 if (op1 == 1) {
6737 /* branch/exchange thumb (bx). */
6738 ARCH(4T);
6739 tmp = load_reg(s, rm);
6740 gen_bx(s, tmp);
6741 } else if (op1 == 3) {
6742 /* clz */
6743 ARCH(5);
6744 rd = (insn >> 12) & 0xf;
6745 tmp = load_reg(s, rm);
6746 gen_helper_clz(tmp, tmp);
6747 store_reg(s, rd, tmp);
6748 } else {
6749 goto illegal_op;
6751 break;
6752 case 0x2:
6753 if (op1 == 1) {
6754 ARCH(5J); /* bxj */
6755 /* Trivial implementation equivalent to bx. */
6756 tmp = load_reg(s, rm);
6757 gen_bx(s, tmp);
6758 } else {
6759 goto illegal_op;
6761 break;
6762 case 0x3:
6763 if (op1 != 1)
6764 goto illegal_op;
6766 ARCH(5);
6767 /* branch link/exchange thumb (blx) */
6768 tmp = load_reg(s, rm);
6769 tmp2 = tcg_temp_new_i32();
6770 tcg_gen_movi_i32(tmp2, s->pc);
6771 store_reg(s, 14, tmp2);
6772 gen_bx(s, tmp);
6773 break;
6774 case 0x5: /* saturating add/subtract */
6775 ARCH(5TE);
6776 rd = (insn >> 12) & 0xf;
6777 rn = (insn >> 16) & 0xf;
6778 tmp = load_reg(s, rm);
6779 tmp2 = load_reg(s, rn);
6780 if (op1 & 2)
6781 gen_helper_double_saturate(tmp2, tmp2);
6782 if (op1 & 1)
6783 gen_helper_sub_saturate(tmp, tmp, tmp2);
6784 else
6785 gen_helper_add_saturate(tmp, tmp, tmp2);
6786 tcg_temp_free_i32(tmp2);
6787 store_reg(s, rd, tmp);
6788 break;
6789 case 7:
6790 /* SMC instruction (op1 == 3)
6791 and undefined instructions (op1 == 0 || op1 == 2)
6792 will trap */
6793 if (op1 != 1) {
6794 goto illegal_op;
6796 /* bkpt */
6797 ARCH(5);
6798 gen_exception_insn(s, 4, EXCP_BKPT);
6799 break;
6800 case 0x8: /* signed multiply */
6801 case 0xa:
6802 case 0xc:
6803 case 0xe:
6804 ARCH(5TE);
6805 rs = (insn >> 8) & 0xf;
6806 rn = (insn >> 12) & 0xf;
6807 rd = (insn >> 16) & 0xf;
6808 if (op1 == 1) {
6809 /* (32 * 16) >> 16 */
6810 tmp = load_reg(s, rm);
6811 tmp2 = load_reg(s, rs);
6812 if (sh & 4)
6813 tcg_gen_sari_i32(tmp2, tmp2, 16);
6814 else
6815 gen_sxth(tmp2);
6816 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6817 tcg_gen_shri_i64(tmp64, tmp64, 16);
6818 tmp = tcg_temp_new_i32();
6819 tcg_gen_trunc_i64_i32(tmp, tmp64);
6820 tcg_temp_free_i64(tmp64);
6821 if ((sh & 2) == 0) {
6822 tmp2 = load_reg(s, rn);
6823 gen_helper_add_setq(tmp, tmp, tmp2);
6824 tcg_temp_free_i32(tmp2);
6826 store_reg(s, rd, tmp);
6827 } else {
6828 /* 16 * 16 */
6829 tmp = load_reg(s, rm);
6830 tmp2 = load_reg(s, rs);
6831 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6832 tcg_temp_free_i32(tmp2);
6833 if (op1 == 2) {
6834 tmp64 = tcg_temp_new_i64();
6835 tcg_gen_ext_i32_i64(tmp64, tmp);
6836 tcg_temp_free_i32(tmp);
6837 gen_addq(s, tmp64, rn, rd);
6838 gen_storeq_reg(s, rn, rd, tmp64);
6839 tcg_temp_free_i64(tmp64);
6840 } else {
6841 if (op1 == 0) {
6842 tmp2 = load_reg(s, rn);
6843 gen_helper_add_setq(tmp, tmp, tmp2);
6844 tcg_temp_free_i32(tmp2);
6846 store_reg(s, rd, tmp);
6849 break;
6850 default:
6851 goto illegal_op;
6853 } else if (((insn & 0x0e000000) == 0 &&
6854 (insn & 0x00000090) != 0x90) ||
6855 ((insn & 0x0e000000) == (1 << 25))) {
6856 int set_cc, logic_cc, shiftop;
6858 op1 = (insn >> 21) & 0xf;
6859 set_cc = (insn >> 20) & 1;
6860 logic_cc = table_logic_cc[op1] & set_cc;
6862 /* data processing instruction */
6863 if (insn & (1 << 25)) {
6864 /* immediate operand */
6865 val = insn & 0xff;
6866 shift = ((insn >> 8) & 0xf) * 2;
6867 if (shift) {
6868 val = (val >> shift) | (val << (32 - shift));
6870 tmp2 = tcg_temp_new_i32();
6871 tcg_gen_movi_i32(tmp2, val);
6872 if (logic_cc && shift) {
6873 gen_set_CF_bit31(tmp2);
6875 } else {
6876 /* register */
6877 rm = (insn) & 0xf;
6878 tmp2 = load_reg(s, rm);
6879 shiftop = (insn >> 5) & 3;
6880 if (!(insn & (1 << 4))) {
6881 shift = (insn >> 7) & 0x1f;
6882 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6883 } else {
6884 rs = (insn >> 8) & 0xf;
6885 tmp = load_reg(s, rs);
6886 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6889 if (op1 != 0x0f && op1 != 0x0d) {
6890 rn = (insn >> 16) & 0xf;
6891 tmp = load_reg(s, rn);
6892 } else {
6893 TCGV_UNUSED(tmp);
6895 rd = (insn >> 12) & 0xf;
6896 switch(op1) {
6897 case 0x00:
6898 tcg_gen_and_i32(tmp, tmp, tmp2);
6899 if (logic_cc) {
6900 gen_logic_CC(tmp);
6902 store_reg_bx(env, s, rd, tmp);
6903 break;
6904 case 0x01:
6905 tcg_gen_xor_i32(tmp, tmp, tmp2);
6906 if (logic_cc) {
6907 gen_logic_CC(tmp);
6909 store_reg_bx(env, s, rd, tmp);
6910 break;
6911 case 0x02:
6912 if (set_cc && rd == 15) {
6913 /* SUBS r15, ... is used for exception return. */
6914 if (IS_USER(s)) {
6915 goto illegal_op;
6917 gen_helper_sub_cc(tmp, tmp, tmp2);
6918 gen_exception_return(s, tmp);
6919 } else {
6920 if (set_cc) {
6921 gen_helper_sub_cc(tmp, tmp, tmp2);
6922 } else {
6923 tcg_gen_sub_i32(tmp, tmp, tmp2);
6925 store_reg_bx(env, s, rd, tmp);
6927 break;
6928 case 0x03:
6929 if (set_cc) {
6930 gen_helper_sub_cc(tmp, tmp2, tmp);
6931 } else {
6932 tcg_gen_sub_i32(tmp, tmp2, tmp);
6934 store_reg_bx(env, s, rd, tmp);
6935 break;
6936 case 0x04:
6937 if (set_cc) {
6938 gen_helper_add_cc(tmp, tmp, tmp2);
6939 } else {
6940 tcg_gen_add_i32(tmp, tmp, tmp2);
6942 store_reg_bx(env, s, rd, tmp);
6943 break;
6944 case 0x05:
6945 if (set_cc) {
6946 gen_helper_adc_cc(tmp, tmp, tmp2);
6947 } else {
6948 gen_add_carry(tmp, tmp, tmp2);
6950 store_reg_bx(env, s, rd, tmp);
6951 break;
6952 case 0x06:
6953 if (set_cc) {
6954 gen_helper_sbc_cc(tmp, tmp, tmp2);
6955 } else {
6956 gen_sub_carry(tmp, tmp, tmp2);
6958 store_reg_bx(env, s, rd, tmp);
6959 break;
6960 case 0x07:
6961 if (set_cc) {
6962 gen_helper_sbc_cc(tmp, tmp2, tmp);
6963 } else {
6964 gen_sub_carry(tmp, tmp2, tmp);
6966 store_reg_bx(env, s, rd, tmp);
6967 break;
6968 case 0x08:
6969 if (set_cc) {
6970 tcg_gen_and_i32(tmp, tmp, tmp2);
6971 gen_logic_CC(tmp);
6973 tcg_temp_free_i32(tmp);
6974 break;
6975 case 0x09:
6976 if (set_cc) {
6977 tcg_gen_xor_i32(tmp, tmp, tmp2);
6978 gen_logic_CC(tmp);
6980 tcg_temp_free_i32(tmp);
6981 break;
6982 case 0x0a:
6983 if (set_cc) {
6984 gen_helper_sub_cc(tmp, tmp, tmp2);
6986 tcg_temp_free_i32(tmp);
6987 break;
6988 case 0x0b:
6989 if (set_cc) {
6990 gen_helper_add_cc(tmp, tmp, tmp2);
6992 tcg_temp_free_i32(tmp);
6993 break;
6994 case 0x0c:
6995 tcg_gen_or_i32(tmp, tmp, tmp2);
6996 if (logic_cc) {
6997 gen_logic_CC(tmp);
6999 store_reg_bx(env, s, rd, tmp);
7000 break;
7001 case 0x0d:
7002 if (logic_cc && rd == 15) {
7003 /* MOVS r15, ... is used for exception return. */
7004 if (IS_USER(s)) {
7005 goto illegal_op;
7007 gen_exception_return(s, tmp2);
7008 } else {
7009 if (logic_cc) {
7010 gen_logic_CC(tmp2);
7012 store_reg_bx(env, s, rd, tmp2);
7014 break;
7015 case 0x0e:
7016 tcg_gen_andc_i32(tmp, tmp, tmp2);
7017 if (logic_cc) {
7018 gen_logic_CC(tmp);
7020 store_reg_bx(env, s, rd, tmp);
7021 break;
7022 default:
7023 case 0x0f:
7024 tcg_gen_not_i32(tmp2, tmp2);
7025 if (logic_cc) {
7026 gen_logic_CC(tmp2);
7028 store_reg_bx(env, s, rd, tmp2);
7029 break;
7031 if (op1 != 0x0f && op1 != 0x0d) {
7032 tcg_temp_free_i32(tmp2);
7034 } else {
7035 /* other instructions */
7036 op1 = (insn >> 24) & 0xf;
7037 switch(op1) {
7038 case 0x0:
7039 case 0x1:
7040 /* multiplies, extra load/stores */
7041 sh = (insn >> 5) & 3;
7042 if (sh == 0) {
7043 if (op1 == 0x0) {
7044 rd = (insn >> 16) & 0xf;
7045 rn = (insn >> 12) & 0xf;
7046 rs = (insn >> 8) & 0xf;
7047 rm = (insn) & 0xf;
7048 op1 = (insn >> 20) & 0xf;
7049 switch (op1) {
7050 case 0: case 1: case 2: case 3: case 6:
7051 /* 32 bit mul */
7052 tmp = load_reg(s, rs);
7053 tmp2 = load_reg(s, rm);
7054 tcg_gen_mul_i32(tmp, tmp, tmp2);
7055 tcg_temp_free_i32(tmp2);
7056 if (insn & (1 << 22)) {
7057 /* Subtract (mls) */
7058 ARCH(6T2);
7059 tmp2 = load_reg(s, rn);
7060 tcg_gen_sub_i32(tmp, tmp2, tmp);
7061 tcg_temp_free_i32(tmp2);
7062 } else if (insn & (1 << 21)) {
7063 /* Add */
7064 tmp2 = load_reg(s, rn);
7065 tcg_gen_add_i32(tmp, tmp, tmp2);
7066 tcg_temp_free_i32(tmp2);
7068 if (insn & (1 << 20))
7069 gen_logic_CC(tmp);
7070 store_reg(s, rd, tmp);
7071 break;
7072 case 4:
7073 /* 64 bit mul double accumulate (UMAAL) */
7074 ARCH(6);
7075 tmp = load_reg(s, rs);
7076 tmp2 = load_reg(s, rm);
7077 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7078 gen_addq_lo(s, tmp64, rn);
7079 gen_addq_lo(s, tmp64, rd);
7080 gen_storeq_reg(s, rn, rd, tmp64);
7081 tcg_temp_free_i64(tmp64);
7082 break;
7083 case 8: case 9: case 10: case 11:
7084 case 12: case 13: case 14: case 15:
7085 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7086 tmp = load_reg(s, rs);
7087 tmp2 = load_reg(s, rm);
7088 if (insn & (1 << 22)) {
7089 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7090 } else {
7091 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7093 if (insn & (1 << 21)) { /* mult accumulate */
7094 gen_addq(s, tmp64, rn, rd);
7096 if (insn & (1 << 20)) {
7097 gen_logicq_cc(tmp64);
7099 gen_storeq_reg(s, rn, rd, tmp64);
7100 tcg_temp_free_i64(tmp64);
7101 break;
7102 default:
7103 goto illegal_op;
7105 } else {
7106 rn = (insn >> 16) & 0xf;
7107 rd = (insn >> 12) & 0xf;
7108 if (insn & (1 << 23)) {
7109 /* load/store exclusive */
7110 op1 = (insn >> 21) & 0x3;
7111 if (op1)
7112 ARCH(6K);
7113 else
7114 ARCH(6);
7115 addr = tcg_temp_local_new_i32();
7116 load_reg_var(s, addr, rn);
7117 if (insn & (1 << 20)) {
7118 switch (op1) {
7119 case 0: /* ldrex */
7120 gen_load_exclusive(s, rd, 15, addr, 2);
7121 break;
7122 case 1: /* ldrexd */
7123 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7124 break;
7125 case 2: /* ldrexb */
7126 gen_load_exclusive(s, rd, 15, addr, 0);
7127 break;
7128 case 3: /* ldrexh */
7129 gen_load_exclusive(s, rd, 15, addr, 1);
7130 break;
7131 default:
7132 abort();
7134 } else {
7135 rm = insn & 0xf;
7136 switch (op1) {
7137 case 0: /* strex */
7138 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7139 break;
7140 case 1: /* strexd */
7141 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7142 break;
7143 case 2: /* strexb */
7144 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7145 break;
7146 case 3: /* strexh */
7147 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7148 break;
7149 default:
7150 abort();
7153 tcg_temp_free(addr);
7154 } else {
7155 /* SWP instruction */
7156 rm = (insn) & 0xf;
7158 /* ??? This is not really atomic. However we know
7159 we never have multiple CPUs running in parallel,
7160 so it is good enough. */
7161 addr = load_reg(s, rn);
7162 tmp = load_reg(s, rm);
7163 if (insn & (1 << 22)) {
7164 tmp2 = gen_ld8u(addr, IS_USER(s));
7165 gen_st8(tmp, addr, IS_USER(s));
7166 } else {
7167 tmp2 = gen_ld32(addr, IS_USER(s));
7168 gen_st32(tmp, addr, IS_USER(s));
7170 tcg_temp_free_i32(addr);
7171 store_reg(s, rd, tmp2);
7174 } else {
7175 int address_offset;
7176 int load;
7177 /* Misc load/store */
7178 rn = (insn >> 16) & 0xf;
7179 rd = (insn >> 12) & 0xf;
7180 addr = load_reg(s, rn);
7181 if (insn & (1 << 24))
7182 gen_add_datah_offset(s, insn, 0, addr);
7183 address_offset = 0;
7184 if (insn & (1 << 20)) {
7185 /* load */
7186 switch(sh) {
7187 case 1:
7188 tmp = gen_ld16u(addr, IS_USER(s));
7189 break;
7190 case 2:
7191 tmp = gen_ld8s(addr, IS_USER(s));
7192 break;
7193 default:
7194 case 3:
7195 tmp = gen_ld16s(addr, IS_USER(s));
7196 break;
7198 load = 1;
7199 } else if (sh & 2) {
7200 ARCH(5TE);
7201 /* doubleword */
7202 if (sh & 1) {
7203 /* store */
7204 tmp = load_reg(s, rd);
7205 gen_st32(tmp, addr, IS_USER(s));
7206 tcg_gen_addi_i32(addr, addr, 4);
7207 tmp = load_reg(s, rd + 1);
7208 gen_st32(tmp, addr, IS_USER(s));
7209 load = 0;
7210 } else {
7211 /* load */
7212 tmp = gen_ld32(addr, IS_USER(s));
7213 store_reg(s, rd, tmp);
7214 tcg_gen_addi_i32(addr, addr, 4);
7215 tmp = gen_ld32(addr, IS_USER(s));
7216 rd++;
7217 load = 1;
7219 address_offset = -4;
7220 } else {
7221 /* store */
7222 tmp = load_reg(s, rd);
7223 gen_st16(tmp, addr, IS_USER(s));
7224 load = 0;
7226 /* Perform base writeback before the loaded value to
7227 ensure correct behavior with overlapping index registers.
7228 ldrd with base writeback is is undefined if the
7229 destination and index registers overlap. */
7230 if (!(insn & (1 << 24))) {
7231 gen_add_datah_offset(s, insn, address_offset, addr);
7232 store_reg(s, rn, addr);
7233 } else if (insn & (1 << 21)) {
7234 if (address_offset)
7235 tcg_gen_addi_i32(addr, addr, address_offset);
7236 store_reg(s, rn, addr);
7237 } else {
7238 tcg_temp_free_i32(addr);
7240 if (load) {
7241 /* Complete the load. */
7242 store_reg(s, rd, tmp);
7245 break;
7246 case 0x4:
7247 case 0x5:
7248 goto do_ldst;
7249 case 0x6:
7250 case 0x7:
7251 if (insn & (1 << 4)) {
7252 ARCH(6);
7253 /* Armv6 Media instructions. */
7254 rm = insn & 0xf;
7255 rn = (insn >> 16) & 0xf;
7256 rd = (insn >> 12) & 0xf;
7257 rs = (insn >> 8) & 0xf;
7258 switch ((insn >> 23) & 3) {
7259 case 0: /* Parallel add/subtract. */
7260 op1 = (insn >> 20) & 7;
7261 tmp = load_reg(s, rn);
7262 tmp2 = load_reg(s, rm);
7263 sh = (insn >> 5) & 7;
7264 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7265 goto illegal_op;
7266 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7267 tcg_temp_free_i32(tmp2);
7268 store_reg(s, rd, tmp);
7269 break;
7270 case 1:
7271 if ((insn & 0x00700020) == 0) {
7272 /* Halfword pack. */
7273 tmp = load_reg(s, rn);
7274 tmp2 = load_reg(s, rm);
7275 shift = (insn >> 7) & 0x1f;
7276 if (insn & (1 << 6)) {
7277 /* pkhtb */
7278 if (shift == 0)
7279 shift = 31;
7280 tcg_gen_sari_i32(tmp2, tmp2, shift);
7281 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7282 tcg_gen_ext16u_i32(tmp2, tmp2);
7283 } else {
7284 /* pkhbt */
7285 if (shift)
7286 tcg_gen_shli_i32(tmp2, tmp2, shift);
7287 tcg_gen_ext16u_i32(tmp, tmp);
7288 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7290 tcg_gen_or_i32(tmp, tmp, tmp2);
7291 tcg_temp_free_i32(tmp2);
7292 store_reg(s, rd, tmp);
7293 } else if ((insn & 0x00200020) == 0x00200000) {
7294 /* [us]sat */
7295 tmp = load_reg(s, rm);
7296 shift = (insn >> 7) & 0x1f;
7297 if (insn & (1 << 6)) {
7298 if (shift == 0)
7299 shift = 31;
7300 tcg_gen_sari_i32(tmp, tmp, shift);
7301 } else {
7302 tcg_gen_shli_i32(tmp, tmp, shift);
7304 sh = (insn >> 16) & 0x1f;
7305 tmp2 = tcg_const_i32(sh);
7306 if (insn & (1 << 22))
7307 gen_helper_usat(tmp, tmp, tmp2);
7308 else
7309 gen_helper_ssat(tmp, tmp, tmp2);
7310 tcg_temp_free_i32(tmp2);
7311 store_reg(s, rd, tmp);
7312 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7313 /* [us]sat16 */
7314 tmp = load_reg(s, rm);
7315 sh = (insn >> 16) & 0x1f;
7316 tmp2 = tcg_const_i32(sh);
7317 if (insn & (1 << 22))
7318 gen_helper_usat16(tmp, tmp, tmp2);
7319 else
7320 gen_helper_ssat16(tmp, tmp, tmp2);
7321 tcg_temp_free_i32(tmp2);
7322 store_reg(s, rd, tmp);
7323 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7324 /* Select bytes. */
7325 tmp = load_reg(s, rn);
7326 tmp2 = load_reg(s, rm);
7327 tmp3 = tcg_temp_new_i32();
7328 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7329 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7330 tcg_temp_free_i32(tmp3);
7331 tcg_temp_free_i32(tmp2);
7332 store_reg(s, rd, tmp);
7333 } else if ((insn & 0x000003e0) == 0x00000060) {
7334 tmp = load_reg(s, rm);
7335 shift = (insn >> 10) & 3;
7336 /* ??? In many cases it's not neccessary to do a
7337 rotate, a shift is sufficient. */
7338 if (shift != 0)
7339 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7340 op1 = (insn >> 20) & 7;
7341 switch (op1) {
7342 case 0: gen_sxtb16(tmp); break;
7343 case 2: gen_sxtb(tmp); break;
7344 case 3: gen_sxth(tmp); break;
7345 case 4: gen_uxtb16(tmp); break;
7346 case 6: gen_uxtb(tmp); break;
7347 case 7: gen_uxth(tmp); break;
7348 default: goto illegal_op;
7350 if (rn != 15) {
7351 tmp2 = load_reg(s, rn);
7352 if ((op1 & 3) == 0) {
7353 gen_add16(tmp, tmp2);
7354 } else {
7355 tcg_gen_add_i32(tmp, tmp, tmp2);
7356 tcg_temp_free_i32(tmp2);
7359 store_reg(s, rd, tmp);
7360 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7361 /* rev */
7362 tmp = load_reg(s, rm);
7363 if (insn & (1 << 22)) {
7364 if (insn & (1 << 7)) {
7365 gen_revsh(tmp);
7366 } else {
7367 ARCH(6T2);
7368 gen_helper_rbit(tmp, tmp);
7370 } else {
7371 if (insn & (1 << 7))
7372 gen_rev16(tmp);
7373 else
7374 tcg_gen_bswap32_i32(tmp, tmp);
7376 store_reg(s, rd, tmp);
7377 } else {
7378 goto illegal_op;
7380 break;
7381 case 2: /* Multiplies (Type 3). */
7382 tmp = load_reg(s, rm);
7383 tmp2 = load_reg(s, rs);
7384 if (insn & (1 << 20)) {
7385 /* Signed multiply most significant [accumulate].
7386 (SMMUL, SMMLA, SMMLS) */
7387 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7389 if (rd != 15) {
7390 tmp = load_reg(s, rd);
7391 if (insn & (1 << 6)) {
7392 tmp64 = gen_subq_msw(tmp64, tmp);
7393 } else {
7394 tmp64 = gen_addq_msw(tmp64, tmp);
7397 if (insn & (1 << 5)) {
7398 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7400 tcg_gen_shri_i64(tmp64, tmp64, 32);
7401 tmp = tcg_temp_new_i32();
7402 tcg_gen_trunc_i64_i32(tmp, tmp64);
7403 tcg_temp_free_i64(tmp64);
7404 store_reg(s, rn, tmp);
7405 } else {
7406 if (insn & (1 << 5))
7407 gen_swap_half(tmp2);
7408 gen_smul_dual(tmp, tmp2);
7409 if (insn & (1 << 6)) {
7410 /* This subtraction cannot overflow. */
7411 tcg_gen_sub_i32(tmp, tmp, tmp2);
7412 } else {
7413 /* This addition cannot overflow 32 bits;
7414 * however it may overflow considered as a signed
7415 * operation, in which case we must set the Q flag.
7417 gen_helper_add_setq(tmp, tmp, tmp2);
7419 tcg_temp_free_i32(tmp2);
7420 if (insn & (1 << 22)) {
7421 /* smlald, smlsld */
7422 tmp64 = tcg_temp_new_i64();
7423 tcg_gen_ext_i32_i64(tmp64, tmp);
7424 tcg_temp_free_i32(tmp);
7425 gen_addq(s, tmp64, rd, rn);
7426 gen_storeq_reg(s, rd, rn, tmp64);
7427 tcg_temp_free_i64(tmp64);
7428 } else {
7429 /* smuad, smusd, smlad, smlsd */
7430 if (rd != 15)
7432 tmp2 = load_reg(s, rd);
7433 gen_helper_add_setq(tmp, tmp, tmp2);
7434 tcg_temp_free_i32(tmp2);
7436 store_reg(s, rn, tmp);
7439 break;
7440 case 3:
7441 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7442 switch (op1) {
7443 case 0: /* Unsigned sum of absolute differences. */
7444 ARCH(6);
7445 tmp = load_reg(s, rm);
7446 tmp2 = load_reg(s, rs);
7447 gen_helper_usad8(tmp, tmp, tmp2);
7448 tcg_temp_free_i32(tmp2);
7449 if (rd != 15) {
7450 tmp2 = load_reg(s, rd);
7451 tcg_gen_add_i32(tmp, tmp, tmp2);
7452 tcg_temp_free_i32(tmp2);
7454 store_reg(s, rn, tmp);
7455 break;
7456 case 0x20: case 0x24: case 0x28: case 0x2c:
7457 /* Bitfield insert/clear. */
7458 ARCH(6T2);
7459 shift = (insn >> 7) & 0x1f;
7460 i = (insn >> 16) & 0x1f;
7461 i = i + 1 - shift;
7462 if (rm == 15) {
7463 tmp = tcg_temp_new_i32();
7464 tcg_gen_movi_i32(tmp, 0);
7465 } else {
7466 tmp = load_reg(s, rm);
7468 if (i != 32) {
7469 tmp2 = load_reg(s, rd);
7470 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7471 tcg_temp_free_i32(tmp2);
7473 store_reg(s, rd, tmp);
7474 break;
7475 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7476 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7477 ARCH(6T2);
7478 tmp = load_reg(s, rm);
7479 shift = (insn >> 7) & 0x1f;
7480 i = ((insn >> 16) & 0x1f) + 1;
7481 if (shift + i > 32)
7482 goto illegal_op;
7483 if (i < 32) {
7484 if (op1 & 0x20) {
7485 gen_ubfx(tmp, shift, (1u << i) - 1);
7486 } else {
7487 gen_sbfx(tmp, shift, i);
7490 store_reg(s, rd, tmp);
7491 break;
7492 default:
7493 goto illegal_op;
7495 break;
7497 break;
7499 do_ldst:
7500 /* Check for undefined extension instructions
7501 * per the ARM Bible IE:
7502 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7504 sh = (0xf << 20) | (0xf << 4);
7505 if (op1 == 0x7 && ((insn & sh) == sh))
7507 goto illegal_op;
7509 /* load/store byte/word */
7510 rn = (insn >> 16) & 0xf;
7511 rd = (insn >> 12) & 0xf;
7512 tmp2 = load_reg(s, rn);
7513 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7514 if (insn & (1 << 24))
7515 gen_add_data_offset(s, insn, tmp2);
7516 if (insn & (1 << 20)) {
7517 /* load */
7518 if (insn & (1 << 22)) {
7519 tmp = gen_ld8u(tmp2, i);
7520 } else {
7521 tmp = gen_ld32(tmp2, i);
7523 } else {
7524 /* store */
7525 tmp = load_reg(s, rd);
7526 if (insn & (1 << 22))
7527 gen_st8(tmp, tmp2, i);
7528 else
7529 gen_st32(tmp, tmp2, i);
7531 if (!(insn & (1 << 24))) {
7532 gen_add_data_offset(s, insn, tmp2);
7533 store_reg(s, rn, tmp2);
7534 } else if (insn & (1 << 21)) {
7535 store_reg(s, rn, tmp2);
7536 } else {
7537 tcg_temp_free_i32(tmp2);
7539 if (insn & (1 << 20)) {
7540 /* Complete the load. */
7541 store_reg_from_load(env, s, rd, tmp);
7543 break;
7544 case 0x08:
7545 case 0x09:
7547 int j, n, user, loaded_base;
7548 TCGv loaded_var;
7549 /* load/store multiple words */
7550 /* XXX: store correct base if write back */
7551 user = 0;
7552 if (insn & (1 << 22)) {
7553 if (IS_USER(s))
7554 goto illegal_op; /* only usable in supervisor mode */
7556 if ((insn & (1 << 15)) == 0)
7557 user = 1;
7559 rn = (insn >> 16) & 0xf;
7560 addr = load_reg(s, rn);
7562 /* compute total size */
7563 loaded_base = 0;
7564 TCGV_UNUSED(loaded_var);
7565 n = 0;
7566 for(i=0;i<16;i++) {
7567 if (insn & (1 << i))
7568 n++;
7570 /* XXX: test invalid n == 0 case ? */
7571 if (insn & (1 << 23)) {
7572 if (insn & (1 << 24)) {
7573 /* pre increment */
7574 tcg_gen_addi_i32(addr, addr, 4);
7575 } else {
7576 /* post increment */
7578 } else {
7579 if (insn & (1 << 24)) {
7580 /* pre decrement */
7581 tcg_gen_addi_i32(addr, addr, -(n * 4));
7582 } else {
7583 /* post decrement */
7584 if (n != 1)
7585 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7588 j = 0;
7589 for(i=0;i<16;i++) {
7590 if (insn & (1 << i)) {
7591 if (insn & (1 << 20)) {
7592 /* load */
7593 tmp = gen_ld32(addr, IS_USER(s));
7594 if (user) {
7595 tmp2 = tcg_const_i32(i);
7596 gen_helper_set_user_reg(tmp2, tmp);
7597 tcg_temp_free_i32(tmp2);
7598 tcg_temp_free_i32(tmp);
7599 } else if (i == rn) {
7600 loaded_var = tmp;
7601 loaded_base = 1;
7602 } else {
7603 store_reg_from_load(env, s, i, tmp);
7605 } else {
7606 /* store */
7607 if (i == 15) {
7608 /* special case: r15 = PC + 8 */
7609 val = (long)s->pc + 4;
7610 tmp = tcg_temp_new_i32();
7611 tcg_gen_movi_i32(tmp, val);
7612 } else if (user) {
7613 tmp = tcg_temp_new_i32();
7614 tmp2 = tcg_const_i32(i);
7615 gen_helper_get_user_reg(tmp, tmp2);
7616 tcg_temp_free_i32(tmp2);
7617 } else {
7618 tmp = load_reg(s, i);
7620 gen_st32(tmp, addr, IS_USER(s));
7622 j++;
7623 /* no need to add after the last transfer */
7624 if (j != n)
7625 tcg_gen_addi_i32(addr, addr, 4);
7628 if (insn & (1 << 21)) {
7629 /* write back */
7630 if (insn & (1 << 23)) {
7631 if (insn & (1 << 24)) {
7632 /* pre increment */
7633 } else {
7634 /* post increment */
7635 tcg_gen_addi_i32(addr, addr, 4);
7637 } else {
7638 if (insn & (1 << 24)) {
7639 /* pre decrement */
7640 if (n != 1)
7641 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7642 } else {
7643 /* post decrement */
7644 tcg_gen_addi_i32(addr, addr, -(n * 4));
7647 store_reg(s, rn, addr);
7648 } else {
7649 tcg_temp_free_i32(addr);
7651 if (loaded_base) {
7652 store_reg(s, rn, loaded_var);
7654 if ((insn & (1 << 22)) && !user) {
7655 /* Restore CPSR from SPSR. */
7656 tmp = load_cpu_field(spsr);
7657 gen_set_cpsr(tmp, 0xffffffff);
7658 tcg_temp_free_i32(tmp);
7659 s->is_jmp = DISAS_UPDATE;
7662 break;
7663 case 0xa:
7664 case 0xb:
7666 int32_t offset;
7668 /* branch (and link) */
7669 val = (int32_t)s->pc;
7670 if (insn & (1 << 24)) {
7671 tmp = tcg_temp_new_i32();
7672 tcg_gen_movi_i32(tmp, val);
7673 store_reg(s, 14, tmp);
7675 offset = (((int32_t)insn << 8) >> 8);
7676 val += (offset << 2) + 4;
7677 gen_jmp(s, val);
7679 break;
7680 case 0xc:
7681 case 0xd:
7682 case 0xe:
7683 /* Coprocessor. */
7684 if (disas_coproc_insn(env, s, insn))
7685 goto illegal_op;
7686 break;
7687 case 0xf:
7688 /* swi */
7689 gen_set_pc_im(s->pc);
7690 s->is_jmp = DISAS_SWI;
7691 break;
7692 default:
7693 illegal_op:
7694 gen_exception_insn(s, 4, EXCP_UDEF);
7695 break;
7700 /* Return true if this is a Thumb-2 logical op. */
7701 static int
7702 thumb2_logic_op(int op)
7704 return (op < 8);
7707 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7708 then set condition code flags based on the result of the operation.
7709 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7710 to the high bit of T1.
7711 Returns zero if the opcode is valid. */
7713 static int
7714 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7716 int logic_cc;
7718 logic_cc = 0;
7719 switch (op) {
7720 case 0: /* and */
7721 tcg_gen_and_i32(t0, t0, t1);
7722 logic_cc = conds;
7723 break;
7724 case 1: /* bic */
7725 tcg_gen_andc_i32(t0, t0, t1);
7726 logic_cc = conds;
7727 break;
7728 case 2: /* orr */
7729 tcg_gen_or_i32(t0, t0, t1);
7730 logic_cc = conds;
7731 break;
7732 case 3: /* orn */
7733 tcg_gen_orc_i32(t0, t0, t1);
7734 logic_cc = conds;
7735 break;
7736 case 4: /* eor */
7737 tcg_gen_xor_i32(t0, t0, t1);
7738 logic_cc = conds;
7739 break;
7740 case 8: /* add */
7741 if (conds)
7742 gen_helper_add_cc(t0, t0, t1);
7743 else
7744 tcg_gen_add_i32(t0, t0, t1);
7745 break;
7746 case 10: /* adc */
7747 if (conds)
7748 gen_helper_adc_cc(t0, t0, t1);
7749 else
7750 gen_adc(t0, t1);
7751 break;
7752 case 11: /* sbc */
7753 if (conds)
7754 gen_helper_sbc_cc(t0, t0, t1);
7755 else
7756 gen_sub_carry(t0, t0, t1);
7757 break;
7758 case 13: /* sub */
7759 if (conds)
7760 gen_helper_sub_cc(t0, t0, t1);
7761 else
7762 tcg_gen_sub_i32(t0, t0, t1);
7763 break;
7764 case 14: /* rsb */
7765 if (conds)
7766 gen_helper_sub_cc(t0, t1, t0);
7767 else
7768 tcg_gen_sub_i32(t0, t1, t0);
7769 break;
7770 default: /* 5, 6, 7, 9, 12, 15. */
7771 return 1;
7773 if (logic_cc) {
7774 gen_logic_CC(t0);
7775 if (shifter_out)
7776 gen_set_CF_bit31(t1);
7778 return 0;
7781 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7782 is not legal. */
7783 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7785 uint32_t insn, imm, shift, offset;
7786 uint32_t rd, rn, rm, rs;
7787 TCGv tmp;
7788 TCGv tmp2;
7789 TCGv tmp3;
7790 TCGv addr;
7791 TCGv_i64 tmp64;
7792 int op;
7793 int shiftop;
7794 int conds;
7795 int logic_cc;
7797 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7798 || arm_feature (env, ARM_FEATURE_M))) {
7799 /* Thumb-1 cores may need to treat bl and blx as a pair of
7800 16-bit instructions to get correct prefetch abort behavior. */
7801 insn = insn_hw1;
7802 if ((insn & (1 << 12)) == 0) {
7803 ARCH(5);
7804 /* Second half of blx. */
7805 offset = ((insn & 0x7ff) << 1);
7806 tmp = load_reg(s, 14);
7807 tcg_gen_addi_i32(tmp, tmp, offset);
7808 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7810 tmp2 = tcg_temp_new_i32();
7811 tcg_gen_movi_i32(tmp2, s->pc | 1);
7812 store_reg(s, 14, tmp2);
7813 gen_bx(s, tmp);
7814 return 0;
7816 if (insn & (1 << 11)) {
7817 /* Second half of bl. */
7818 offset = ((insn & 0x7ff) << 1) | 1;
7819 tmp = load_reg(s, 14);
7820 tcg_gen_addi_i32(tmp, tmp, offset);
7822 tmp2 = tcg_temp_new_i32();
7823 tcg_gen_movi_i32(tmp2, s->pc | 1);
7824 store_reg(s, 14, tmp2);
7825 gen_bx(s, tmp);
7826 return 0;
7828 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7829 /* Instruction spans a page boundary. Implement it as two
7830 16-bit instructions in case the second half causes an
7831 prefetch abort. */
7832 offset = ((int32_t)insn << 21) >> 9;
7833 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7834 return 0;
7836 /* Fall through to 32-bit decode. */
7839 insn = lduw_code(s->pc);
7840 s->pc += 2;
7841 insn |= (uint32_t)insn_hw1 << 16;
7843 if ((insn & 0xf800e800) != 0xf000e800) {
7844 ARCH(6T2);
7847 rn = (insn >> 16) & 0xf;
7848 rs = (insn >> 12) & 0xf;
7849 rd = (insn >> 8) & 0xf;
7850 rm = insn & 0xf;
7851 switch ((insn >> 25) & 0xf) {
7852 case 0: case 1: case 2: case 3:
7853 /* 16-bit instructions. Should never happen. */
7854 abort();
7855 case 4:
7856 if (insn & (1 << 22)) {
7857 /* Other load/store, table branch. */
7858 if (insn & 0x01200000) {
7859 /* Load/store doubleword. */
7860 if (rn == 15) {
7861 addr = tcg_temp_new_i32();
7862 tcg_gen_movi_i32(addr, s->pc & ~3);
7863 } else {
7864 addr = load_reg(s, rn);
7866 offset = (insn & 0xff) * 4;
7867 if ((insn & (1 << 23)) == 0)
7868 offset = -offset;
7869 if (insn & (1 << 24)) {
7870 tcg_gen_addi_i32(addr, addr, offset);
7871 offset = 0;
7873 if (insn & (1 << 20)) {
7874 /* ldrd */
7875 tmp = gen_ld32(addr, IS_USER(s));
7876 store_reg(s, rs, tmp);
7877 tcg_gen_addi_i32(addr, addr, 4);
7878 tmp = gen_ld32(addr, IS_USER(s));
7879 store_reg(s, rd, tmp);
7880 } else {
7881 /* strd */
7882 tmp = load_reg(s, rs);
7883 gen_st32(tmp, addr, IS_USER(s));
7884 tcg_gen_addi_i32(addr, addr, 4);
7885 tmp = load_reg(s, rd);
7886 gen_st32(tmp, addr, IS_USER(s));
7888 if (insn & (1 << 21)) {
7889 /* Base writeback. */
7890 if (rn == 15)
7891 goto illegal_op;
7892 tcg_gen_addi_i32(addr, addr, offset - 4);
7893 store_reg(s, rn, addr);
7894 } else {
7895 tcg_temp_free_i32(addr);
7897 } else if ((insn & (1 << 23)) == 0) {
7898 /* Load/store exclusive word. */
7899 addr = tcg_temp_local_new();
7900 load_reg_var(s, addr, rn);
7901 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7902 if (insn & (1 << 20)) {
7903 gen_load_exclusive(s, rs, 15, addr, 2);
7904 } else {
7905 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7907 tcg_temp_free(addr);
7908 } else if ((insn & (1 << 6)) == 0) {
7909 /* Table Branch. */
7910 if (rn == 15) {
7911 addr = tcg_temp_new_i32();
7912 tcg_gen_movi_i32(addr, s->pc);
7913 } else {
7914 addr = load_reg(s, rn);
7916 tmp = load_reg(s, rm);
7917 tcg_gen_add_i32(addr, addr, tmp);
7918 if (insn & (1 << 4)) {
7919 /* tbh */
7920 tcg_gen_add_i32(addr, addr, tmp);
7921 tcg_temp_free_i32(tmp);
7922 tmp = gen_ld16u(addr, IS_USER(s));
7923 } else { /* tbb */
7924 tcg_temp_free_i32(tmp);
7925 tmp = gen_ld8u(addr, IS_USER(s));
7927 tcg_temp_free_i32(addr);
7928 tcg_gen_shli_i32(tmp, tmp, 1);
7929 tcg_gen_addi_i32(tmp, tmp, s->pc);
7930 store_reg(s, 15, tmp);
7931 } else {
7932 /* Load/store exclusive byte/halfword/doubleword. */
7933 ARCH(7);
7934 op = (insn >> 4) & 0x3;
7935 if (op == 2) {
7936 goto illegal_op;
7938 addr = tcg_temp_local_new();
7939 load_reg_var(s, addr, rn);
7940 if (insn & (1 << 20)) {
7941 gen_load_exclusive(s, rs, rd, addr, op);
7942 } else {
7943 gen_store_exclusive(s, rm, rs, rd, addr, op);
7945 tcg_temp_free(addr);
7947 } else {
7948 /* Load/store multiple, RFE, SRS. */
7949 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7950 /* Not available in user mode. */
7951 if (IS_USER(s))
7952 goto illegal_op;
7953 if (insn & (1 << 20)) {
7954 /* rfe */
7955 addr = load_reg(s, rn);
7956 if ((insn & (1 << 24)) == 0)
7957 tcg_gen_addi_i32(addr, addr, -8);
7958 /* Load PC into tmp and CPSR into tmp2. */
7959 tmp = gen_ld32(addr, 0);
7960 tcg_gen_addi_i32(addr, addr, 4);
7961 tmp2 = gen_ld32(addr, 0);
7962 if (insn & (1 << 21)) {
7963 /* Base writeback. */
7964 if (insn & (1 << 24)) {
7965 tcg_gen_addi_i32(addr, addr, 4);
7966 } else {
7967 tcg_gen_addi_i32(addr, addr, -4);
7969 store_reg(s, rn, addr);
7970 } else {
7971 tcg_temp_free_i32(addr);
7973 gen_rfe(s, tmp, tmp2);
7974 } else {
7975 /* srs */
7976 op = (insn & 0x1f);
7977 addr = tcg_temp_new_i32();
7978 tmp = tcg_const_i32(op);
7979 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7980 tcg_temp_free_i32(tmp);
7981 if ((insn & (1 << 24)) == 0) {
7982 tcg_gen_addi_i32(addr, addr, -8);
7984 tmp = load_reg(s, 14);
7985 gen_st32(tmp, addr, 0);
7986 tcg_gen_addi_i32(addr, addr, 4);
7987 tmp = tcg_temp_new_i32();
7988 gen_helper_cpsr_read(tmp);
7989 gen_st32(tmp, addr, 0);
7990 if (insn & (1 << 21)) {
7991 if ((insn & (1 << 24)) == 0) {
7992 tcg_gen_addi_i32(addr, addr, -4);
7993 } else {
7994 tcg_gen_addi_i32(addr, addr, 4);
7996 tmp = tcg_const_i32(op);
7997 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7998 tcg_temp_free_i32(tmp);
7999 } else {
8000 tcg_temp_free_i32(addr);
8003 } else {
8004 int i;
8005 /* Load/store multiple. */
8006 addr = load_reg(s, rn);
8007 offset = 0;
8008 for (i = 0; i < 16; i++) {
8009 if (insn & (1 << i))
8010 offset += 4;
8012 if (insn & (1 << 24)) {
8013 tcg_gen_addi_i32(addr, addr, -offset);
8016 for (i = 0; i < 16; i++) {
8017 if ((insn & (1 << i)) == 0)
8018 continue;
8019 if (insn & (1 << 20)) {
8020 /* Load. */
8021 tmp = gen_ld32(addr, IS_USER(s));
8022 if (i == 15) {
8023 gen_bx(s, tmp);
8024 } else {
8025 store_reg(s, i, tmp);
8027 } else {
8028 /* Store. */
8029 tmp = load_reg(s, i);
8030 gen_st32(tmp, addr, IS_USER(s));
8032 tcg_gen_addi_i32(addr, addr, 4);
8034 if (insn & (1 << 21)) {
8035 /* Base register writeback. */
8036 if (insn & (1 << 24)) {
8037 tcg_gen_addi_i32(addr, addr, -offset);
8039 /* Fault if writeback register is in register list. */
8040 if (insn & (1 << rn))
8041 goto illegal_op;
8042 store_reg(s, rn, addr);
8043 } else {
8044 tcg_temp_free_i32(addr);
8048 break;
8049 case 5:
8051 op = (insn >> 21) & 0xf;
8052 if (op == 6) {
8053 /* Halfword pack. */
8054 tmp = load_reg(s, rn);
8055 tmp2 = load_reg(s, rm);
8056 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8057 if (insn & (1 << 5)) {
8058 /* pkhtb */
8059 if (shift == 0)
8060 shift = 31;
8061 tcg_gen_sari_i32(tmp2, tmp2, shift);
8062 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8063 tcg_gen_ext16u_i32(tmp2, tmp2);
8064 } else {
8065 /* pkhbt */
8066 if (shift)
8067 tcg_gen_shli_i32(tmp2, tmp2, shift);
8068 tcg_gen_ext16u_i32(tmp, tmp);
8069 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8071 tcg_gen_or_i32(tmp, tmp, tmp2);
8072 tcg_temp_free_i32(tmp2);
8073 store_reg(s, rd, tmp);
8074 } else {
8075 /* Data processing register constant shift. */
8076 if (rn == 15) {
8077 tmp = tcg_temp_new_i32();
8078 tcg_gen_movi_i32(tmp, 0);
8079 } else {
8080 tmp = load_reg(s, rn);
8082 tmp2 = load_reg(s, rm);
8084 shiftop = (insn >> 4) & 3;
8085 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8086 conds = (insn & (1 << 20)) != 0;
8087 logic_cc = (conds && thumb2_logic_op(op));
8088 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8089 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8090 goto illegal_op;
8091 tcg_temp_free_i32(tmp2);
8092 if (rd != 15) {
8093 store_reg(s, rd, tmp);
8094 } else {
8095 tcg_temp_free_i32(tmp);
8098 break;
8099 case 13: /* Misc data processing. */
8100 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8101 if (op < 4 && (insn & 0xf000) != 0xf000)
8102 goto illegal_op;
8103 switch (op) {
8104 case 0: /* Register controlled shift. */
8105 tmp = load_reg(s, rn);
8106 tmp2 = load_reg(s, rm);
8107 if ((insn & 0x70) != 0)
8108 goto illegal_op;
8109 op = (insn >> 21) & 3;
8110 logic_cc = (insn & (1 << 20)) != 0;
8111 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8112 if (logic_cc)
8113 gen_logic_CC(tmp);
8114 store_reg_bx(env, s, rd, tmp);
8115 break;
8116 case 1: /* Sign/zero extend. */
8117 tmp = load_reg(s, rm);
8118 shift = (insn >> 4) & 3;
8119 /* ??? In many cases it's not neccessary to do a
8120 rotate, a shift is sufficient. */
8121 if (shift != 0)
8122 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8123 op = (insn >> 20) & 7;
8124 switch (op) {
8125 case 0: gen_sxth(tmp); break;
8126 case 1: gen_uxth(tmp); break;
8127 case 2: gen_sxtb16(tmp); break;
8128 case 3: gen_uxtb16(tmp); break;
8129 case 4: gen_sxtb(tmp); break;
8130 case 5: gen_uxtb(tmp); break;
8131 default: goto illegal_op;
8133 if (rn != 15) {
8134 tmp2 = load_reg(s, rn);
8135 if ((op >> 1) == 1) {
8136 gen_add16(tmp, tmp2);
8137 } else {
8138 tcg_gen_add_i32(tmp, tmp, tmp2);
8139 tcg_temp_free_i32(tmp2);
8142 store_reg(s, rd, tmp);
8143 break;
8144 case 2: /* SIMD add/subtract. */
8145 op = (insn >> 20) & 7;
8146 shift = (insn >> 4) & 7;
8147 if ((op & 3) == 3 || (shift & 3) == 3)
8148 goto illegal_op;
8149 tmp = load_reg(s, rn);
8150 tmp2 = load_reg(s, rm);
8151 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8152 tcg_temp_free_i32(tmp2);
8153 store_reg(s, rd, tmp);
8154 break;
8155 case 3: /* Other data processing. */
8156 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8157 if (op < 4) {
8158 /* Saturating add/subtract. */
8159 tmp = load_reg(s, rn);
8160 tmp2 = load_reg(s, rm);
8161 if (op & 1)
8162 gen_helper_double_saturate(tmp, tmp);
8163 if (op & 2)
8164 gen_helper_sub_saturate(tmp, tmp2, tmp);
8165 else
8166 gen_helper_add_saturate(tmp, tmp, tmp2);
8167 tcg_temp_free_i32(tmp2);
8168 } else {
8169 tmp = load_reg(s, rn);
8170 switch (op) {
8171 case 0x0a: /* rbit */
8172 gen_helper_rbit(tmp, tmp);
8173 break;
8174 case 0x08: /* rev */
8175 tcg_gen_bswap32_i32(tmp, tmp);
8176 break;
8177 case 0x09: /* rev16 */
8178 gen_rev16(tmp);
8179 break;
8180 case 0x0b: /* revsh */
8181 gen_revsh(tmp);
8182 break;
8183 case 0x10: /* sel */
8184 tmp2 = load_reg(s, rm);
8185 tmp3 = tcg_temp_new_i32();
8186 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
8187 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8188 tcg_temp_free_i32(tmp3);
8189 tcg_temp_free_i32(tmp2);
8190 break;
8191 case 0x18: /* clz */
8192 gen_helper_clz(tmp, tmp);
8193 break;
8194 default:
8195 goto illegal_op;
8198 store_reg(s, rd, tmp);
8199 break;
8200 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8201 op = (insn >> 4) & 0xf;
8202 tmp = load_reg(s, rn);
8203 tmp2 = load_reg(s, rm);
8204 switch ((insn >> 20) & 7) {
8205 case 0: /* 32 x 32 -> 32 */
8206 tcg_gen_mul_i32(tmp, tmp, tmp2);
8207 tcg_temp_free_i32(tmp2);
8208 if (rs != 15) {
8209 tmp2 = load_reg(s, rs);
8210 if (op)
8211 tcg_gen_sub_i32(tmp, tmp2, tmp);
8212 else
8213 tcg_gen_add_i32(tmp, tmp, tmp2);
8214 tcg_temp_free_i32(tmp2);
8216 break;
8217 case 1: /* 16 x 16 -> 32 */
8218 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8219 tcg_temp_free_i32(tmp2);
8220 if (rs != 15) {
8221 tmp2 = load_reg(s, rs);
8222 gen_helper_add_setq(tmp, tmp, tmp2);
8223 tcg_temp_free_i32(tmp2);
8225 break;
8226 case 2: /* Dual multiply add. */
8227 case 4: /* Dual multiply subtract. */
8228 if (op)
8229 gen_swap_half(tmp2);
8230 gen_smul_dual(tmp, tmp2);
8231 if (insn & (1 << 22)) {
8232 /* This subtraction cannot overflow. */
8233 tcg_gen_sub_i32(tmp, tmp, tmp2);
8234 } else {
8235 /* This addition cannot overflow 32 bits;
8236 * however it may overflow considered as a signed
8237 * operation, in which case we must set the Q flag.
8239 gen_helper_add_setq(tmp, tmp, tmp2);
8241 tcg_temp_free_i32(tmp2);
8242 if (rs != 15)
8244 tmp2 = load_reg(s, rs);
8245 gen_helper_add_setq(tmp, tmp, tmp2);
8246 tcg_temp_free_i32(tmp2);
8248 break;
8249 case 3: /* 32 * 16 -> 32msb */
8250 if (op)
8251 tcg_gen_sari_i32(tmp2, tmp2, 16);
8252 else
8253 gen_sxth(tmp2);
8254 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8255 tcg_gen_shri_i64(tmp64, tmp64, 16);
8256 tmp = tcg_temp_new_i32();
8257 tcg_gen_trunc_i64_i32(tmp, tmp64);
8258 tcg_temp_free_i64(tmp64);
8259 if (rs != 15)
8261 tmp2 = load_reg(s, rs);
8262 gen_helper_add_setq(tmp, tmp, tmp2);
8263 tcg_temp_free_i32(tmp2);
8265 break;
8266 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8267 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8268 if (rs != 15) {
8269 tmp = load_reg(s, rs);
8270 if (insn & (1 << 20)) {
8271 tmp64 = gen_addq_msw(tmp64, tmp);
8272 } else {
8273 tmp64 = gen_subq_msw(tmp64, tmp);
8276 if (insn & (1 << 4)) {
8277 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8279 tcg_gen_shri_i64(tmp64, tmp64, 32);
8280 tmp = tcg_temp_new_i32();
8281 tcg_gen_trunc_i64_i32(tmp, tmp64);
8282 tcg_temp_free_i64(tmp64);
8283 break;
8284 case 7: /* Unsigned sum of absolute differences. */
8285 gen_helper_usad8(tmp, tmp, tmp2);
8286 tcg_temp_free_i32(tmp2);
8287 if (rs != 15) {
8288 tmp2 = load_reg(s, rs);
8289 tcg_gen_add_i32(tmp, tmp, tmp2);
8290 tcg_temp_free_i32(tmp2);
8292 break;
8294 store_reg(s, rd, tmp);
8295 break;
8296 case 6: case 7: /* 64-bit multiply, Divide. */
8297 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8298 tmp = load_reg(s, rn);
8299 tmp2 = load_reg(s, rm);
8300 if ((op & 0x50) == 0x10) {
8301 /* sdiv, udiv */
8302 if (!arm_feature(env, ARM_FEATURE_DIV))
8303 goto illegal_op;
8304 if (op & 0x20)
8305 gen_helper_udiv(tmp, tmp, tmp2);
8306 else
8307 gen_helper_sdiv(tmp, tmp, tmp2);
8308 tcg_temp_free_i32(tmp2);
8309 store_reg(s, rd, tmp);
8310 } else if ((op & 0xe) == 0xc) {
8311 /* Dual multiply accumulate long. */
8312 if (op & 1)
8313 gen_swap_half(tmp2);
8314 gen_smul_dual(tmp, tmp2);
8315 if (op & 0x10) {
8316 tcg_gen_sub_i32(tmp, tmp, tmp2);
8317 } else {
8318 tcg_gen_add_i32(tmp, tmp, tmp2);
8320 tcg_temp_free_i32(tmp2);
8321 /* BUGFIX */
8322 tmp64 = tcg_temp_new_i64();
8323 tcg_gen_ext_i32_i64(tmp64, tmp);
8324 tcg_temp_free_i32(tmp);
8325 gen_addq(s, tmp64, rs, rd);
8326 gen_storeq_reg(s, rs, rd, tmp64);
8327 tcg_temp_free_i64(tmp64);
8328 } else {
8329 if (op & 0x20) {
8330 /* Unsigned 64-bit multiply */
8331 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8332 } else {
8333 if (op & 8) {
8334 /* smlalxy */
8335 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8336 tcg_temp_free_i32(tmp2);
8337 tmp64 = tcg_temp_new_i64();
8338 tcg_gen_ext_i32_i64(tmp64, tmp);
8339 tcg_temp_free_i32(tmp);
8340 } else {
8341 /* Signed 64-bit multiply */
8342 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8345 if (op & 4) {
8346 /* umaal */
8347 gen_addq_lo(s, tmp64, rs);
8348 gen_addq_lo(s, tmp64, rd);
8349 } else if (op & 0x40) {
8350 /* 64-bit accumulate. */
8351 gen_addq(s, tmp64, rs, rd);
8353 gen_storeq_reg(s, rs, rd, tmp64);
8354 tcg_temp_free_i64(tmp64);
8356 break;
8358 break;
8359 case 6: case 7: case 14: case 15:
8360 /* Coprocessor. */
8361 if (((insn >> 24) & 3) == 3) {
8362 /* Translate into the equivalent ARM encoding. */
8363 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8364 if (disas_neon_data_insn(env, s, insn))
8365 goto illegal_op;
8366 } else {
8367 if (insn & (1 << 28))
8368 goto illegal_op;
8369 if (disas_coproc_insn (env, s, insn))
8370 goto illegal_op;
8372 break;
8373 case 8: case 9: case 10: case 11:
8374 if (insn & (1 << 15)) {
8375 /* Branches, misc control. */
8376 if (insn & 0x5000) {
8377 /* Unconditional branch. */
8378 /* signextend(hw1[10:0]) -> offset[:12]. */
8379 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8380 /* hw1[10:0] -> offset[11:1]. */
8381 offset |= (insn & 0x7ff) << 1;
8382 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8383 offset[24:22] already have the same value because of the
8384 sign extension above. */
8385 offset ^= ((~insn) & (1 << 13)) << 10;
8386 offset ^= ((~insn) & (1 << 11)) << 11;
8388 if (insn & (1 << 14)) {
8389 /* Branch and link. */
8390 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8393 offset += s->pc;
8394 if (insn & (1 << 12)) {
8395 /* b/bl */
8396 gen_jmp(s, offset);
8397 } else {
8398 /* blx */
8399 offset &= ~(uint32_t)2;
8400 /* thumb2 bx, no need to check */
8401 gen_bx_im(s, offset);
8403 } else if (((insn >> 23) & 7) == 7) {
8404 /* Misc control */
8405 if (insn & (1 << 13))
8406 goto illegal_op;
8408 if (insn & (1 << 26)) {
8409 /* Secure monitor call (v6Z) */
8410 goto illegal_op; /* not implemented. */
8411 } else {
8412 op = (insn >> 20) & 7;
8413 switch (op) {
8414 case 0: /* msr cpsr. */
8415 if (IS_M(env)) {
8416 tmp = load_reg(s, rn);
8417 addr = tcg_const_i32(insn & 0xff);
8418 gen_helper_v7m_msr(cpu_env, addr, tmp);
8419 tcg_temp_free_i32(addr);
8420 tcg_temp_free_i32(tmp);
8421 gen_lookup_tb(s);
8422 break;
8424 /* fall through */
8425 case 1: /* msr spsr. */
8426 if (IS_M(env))
8427 goto illegal_op;
8428 tmp = load_reg(s, rn);
8429 if (gen_set_psr(s,
8430 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8431 op == 1, tmp))
8432 goto illegal_op;
8433 break;
8434 case 2: /* cps, nop-hint. */
8435 if (((insn >> 8) & 7) == 0) {
8436 gen_nop_hint(s, insn & 0xff);
8438 /* Implemented as NOP in user mode. */
8439 if (IS_USER(s))
8440 break;
8441 offset = 0;
8442 imm = 0;
8443 if (insn & (1 << 10)) {
8444 if (insn & (1 << 7))
8445 offset |= CPSR_A;
8446 if (insn & (1 << 6))
8447 offset |= CPSR_I;
8448 if (insn & (1 << 5))
8449 offset |= CPSR_F;
8450 if (insn & (1 << 9))
8451 imm = CPSR_A | CPSR_I | CPSR_F;
8453 if (insn & (1 << 8)) {
8454 offset |= 0x1f;
8455 imm |= (insn & 0x1f);
8457 if (offset) {
8458 gen_set_psr_im(s, offset, 0, imm);
8460 break;
8461 case 3: /* Special control operations. */
8462 ARCH(7);
8463 op = (insn >> 4) & 0xf;
8464 switch (op) {
8465 case 2: /* clrex */
8466 gen_clrex(s);
8467 break;
8468 case 4: /* dsb */
8469 case 5: /* dmb */
8470 case 6: /* isb */
8471 /* These execute as NOPs. */
8472 break;
8473 default:
8474 goto illegal_op;
8476 break;
8477 case 4: /* bxj */
8478 /* Trivial implementation equivalent to bx. */
8479 tmp = load_reg(s, rn);
8480 gen_bx(s, tmp);
8481 break;
8482 case 5: /* Exception return. */
8483 if (IS_USER(s)) {
8484 goto illegal_op;
8486 if (rn != 14 || rd != 15) {
8487 goto illegal_op;
8489 tmp = load_reg(s, rn);
8490 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8491 gen_exception_return(s, tmp);
8492 break;
8493 case 6: /* mrs cpsr. */
8494 tmp = tcg_temp_new_i32();
8495 if (IS_M(env)) {
8496 addr = tcg_const_i32(insn & 0xff);
8497 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8498 tcg_temp_free_i32(addr);
8499 } else {
8500 gen_helper_cpsr_read(tmp);
8502 store_reg(s, rd, tmp);
8503 break;
8504 case 7: /* mrs spsr. */
8505 /* Not accessible in user mode. */
8506 if (IS_USER(s) || IS_M(env))
8507 goto illegal_op;
8508 tmp = load_cpu_field(spsr);
8509 store_reg(s, rd, tmp);
8510 break;
8513 } else {
8514 /* Conditional branch. */
8515 op = (insn >> 22) & 0xf;
8516 /* Generate a conditional jump to next instruction. */
8517 s->condlabel = gen_new_label();
8518 gen_test_cc(op ^ 1, s->condlabel);
8519 s->condjmp = 1;
8521 /* offset[11:1] = insn[10:0] */
8522 offset = (insn & 0x7ff) << 1;
8523 /* offset[17:12] = insn[21:16]. */
8524 offset |= (insn & 0x003f0000) >> 4;
8525 /* offset[31:20] = insn[26]. */
8526 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8527 /* offset[18] = insn[13]. */
8528 offset |= (insn & (1 << 13)) << 5;
8529 /* offset[19] = insn[11]. */
8530 offset |= (insn & (1 << 11)) << 8;
8532 /* jump to the offset */
8533 gen_jmp(s, s->pc + offset);
8535 } else {
8536 /* Data processing immediate. */
8537 if (insn & (1 << 25)) {
8538 if (insn & (1 << 24)) {
8539 if (insn & (1 << 20))
8540 goto illegal_op;
8541 /* Bitfield/Saturate. */
8542 op = (insn >> 21) & 7;
8543 imm = insn & 0x1f;
8544 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8545 if (rn == 15) {
8546 tmp = tcg_temp_new_i32();
8547 tcg_gen_movi_i32(tmp, 0);
8548 } else {
8549 tmp = load_reg(s, rn);
8551 switch (op) {
8552 case 2: /* Signed bitfield extract. */
8553 imm++;
8554 if (shift + imm > 32)
8555 goto illegal_op;
8556 if (imm < 32)
8557 gen_sbfx(tmp, shift, imm);
8558 break;
8559 case 6: /* Unsigned bitfield extract. */
8560 imm++;
8561 if (shift + imm > 32)
8562 goto illegal_op;
8563 if (imm < 32)
8564 gen_ubfx(tmp, shift, (1u << imm) - 1);
8565 break;
8566 case 3: /* Bitfield insert/clear. */
8567 if (imm < shift)
8568 goto illegal_op;
8569 imm = imm + 1 - shift;
8570 if (imm != 32) {
8571 tmp2 = load_reg(s, rd);
8572 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8573 tcg_temp_free_i32(tmp2);
8575 break;
8576 case 7:
8577 goto illegal_op;
8578 default: /* Saturate. */
8579 if (shift) {
8580 if (op & 1)
8581 tcg_gen_sari_i32(tmp, tmp, shift);
8582 else
8583 tcg_gen_shli_i32(tmp, tmp, shift);
8585 tmp2 = tcg_const_i32(imm);
8586 if (op & 4) {
8587 /* Unsigned. */
8588 if ((op & 1) && shift == 0)
8589 gen_helper_usat16(tmp, tmp, tmp2);
8590 else
8591 gen_helper_usat(tmp, tmp, tmp2);
8592 } else {
8593 /* Signed. */
8594 if ((op & 1) && shift == 0)
8595 gen_helper_ssat16(tmp, tmp, tmp2);
8596 else
8597 gen_helper_ssat(tmp, tmp, tmp2);
8599 tcg_temp_free_i32(tmp2);
8600 break;
8602 store_reg(s, rd, tmp);
8603 } else {
8604 imm = ((insn & 0x04000000) >> 15)
8605 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8606 if (insn & (1 << 22)) {
8607 /* 16-bit immediate. */
8608 imm |= (insn >> 4) & 0xf000;
8609 if (insn & (1 << 23)) {
8610 /* movt */
8611 tmp = load_reg(s, rd);
8612 tcg_gen_ext16u_i32(tmp, tmp);
8613 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8614 } else {
8615 /* movw */
8616 tmp = tcg_temp_new_i32();
8617 tcg_gen_movi_i32(tmp, imm);
8619 } else {
8620 /* Add/sub 12-bit immediate. */
8621 if (rn == 15) {
8622 offset = s->pc & ~(uint32_t)3;
8623 if (insn & (1 << 23))
8624 offset -= imm;
8625 else
8626 offset += imm;
8627 tmp = tcg_temp_new_i32();
8628 tcg_gen_movi_i32(tmp, offset);
8629 } else {
8630 tmp = load_reg(s, rn);
8631 if (insn & (1 << 23))
8632 tcg_gen_subi_i32(tmp, tmp, imm);
8633 else
8634 tcg_gen_addi_i32(tmp, tmp, imm);
8637 store_reg(s, rd, tmp);
8639 } else {
8640 int shifter_out = 0;
8641 /* modified 12-bit immediate. */
8642 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8643 imm = (insn & 0xff);
8644 switch (shift) {
8645 case 0: /* XY */
8646 /* Nothing to do. */
8647 break;
8648 case 1: /* 00XY00XY */
8649 imm |= imm << 16;
8650 break;
8651 case 2: /* XY00XY00 */
8652 imm |= imm << 16;
8653 imm <<= 8;
8654 break;
8655 case 3: /* XYXYXYXY */
8656 imm |= imm << 16;
8657 imm |= imm << 8;
8658 break;
8659 default: /* Rotated constant. */
8660 shift = (shift << 1) | (imm >> 7);
8661 imm |= 0x80;
8662 imm = imm << (32 - shift);
8663 shifter_out = 1;
8664 break;
8666 tmp2 = tcg_temp_new_i32();
8667 tcg_gen_movi_i32(tmp2, imm);
8668 rn = (insn >> 16) & 0xf;
8669 if (rn == 15) {
8670 tmp = tcg_temp_new_i32();
8671 tcg_gen_movi_i32(tmp, 0);
8672 } else {
8673 tmp = load_reg(s, rn);
8675 op = (insn >> 21) & 0xf;
8676 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8677 shifter_out, tmp, tmp2))
8678 goto illegal_op;
8679 tcg_temp_free_i32(tmp2);
8680 rd = (insn >> 8) & 0xf;
8681 if (rd != 15) {
8682 store_reg(s, rd, tmp);
8683 } else {
8684 tcg_temp_free_i32(tmp);
8688 break;
8689 case 12: /* Load/store single data item. */
8691 int postinc = 0;
8692 int writeback = 0;
8693 int user;
8694 if ((insn & 0x01100000) == 0x01000000) {
8695 if (disas_neon_ls_insn(env, s, insn))
8696 goto illegal_op;
8697 break;
8699 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8700 if (rs == 15) {
8701 if (!(insn & (1 << 20))) {
8702 goto illegal_op;
8704 if (op != 2) {
8705 /* Byte or halfword load space with dest == r15 : memory hints.
8706 * Catch them early so we don't emit pointless addressing code.
8707 * This space is a mix of:
8708 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8709 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8710 * cores)
8711 * unallocated hints, which must be treated as NOPs
8712 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8713 * which is easiest for the decoding logic
8714 * Some space which must UNDEF
8716 int op1 = (insn >> 23) & 3;
8717 int op2 = (insn >> 6) & 0x3f;
8718 if (op & 2) {
8719 goto illegal_op;
8721 if (rn == 15) {
8722 /* UNPREDICTABLE or unallocated hint */
8723 return 0;
8725 if (op1 & 1) {
8726 return 0; /* PLD* or unallocated hint */
8728 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8729 return 0; /* PLD* or unallocated hint */
8731 /* UNDEF space, or an UNPREDICTABLE */
8732 return 1;
8735 user = IS_USER(s);
8736 if (rn == 15) {
8737 addr = tcg_temp_new_i32();
8738 /* PC relative. */
8739 /* s->pc has already been incremented by 4. */
8740 imm = s->pc & 0xfffffffc;
8741 if (insn & (1 << 23))
8742 imm += insn & 0xfff;
8743 else
8744 imm -= insn & 0xfff;
8745 tcg_gen_movi_i32(addr, imm);
8746 } else {
8747 addr = load_reg(s, rn);
8748 if (insn & (1 << 23)) {
8749 /* Positive offset. */
8750 imm = insn & 0xfff;
8751 tcg_gen_addi_i32(addr, addr, imm);
8752 } else {
8753 imm = insn & 0xff;
8754 switch ((insn >> 8) & 0xf) {
8755 case 0x0: /* Shifted Register. */
8756 shift = (insn >> 4) & 0xf;
8757 if (shift > 3) {
8758 tcg_temp_free_i32(addr);
8759 goto illegal_op;
8761 tmp = load_reg(s, rm);
8762 if (shift)
8763 tcg_gen_shli_i32(tmp, tmp, shift);
8764 tcg_gen_add_i32(addr, addr, tmp);
8765 tcg_temp_free_i32(tmp);
8766 break;
8767 case 0xc: /* Negative offset. */
8768 tcg_gen_addi_i32(addr, addr, -imm);
8769 break;
8770 case 0xe: /* User privilege. */
8771 tcg_gen_addi_i32(addr, addr, imm);
8772 user = 1;
8773 break;
8774 case 0x9: /* Post-decrement. */
8775 imm = -imm;
8776 /* Fall through. */
8777 case 0xb: /* Post-increment. */
8778 postinc = 1;
8779 writeback = 1;
8780 break;
8781 case 0xd: /* Pre-decrement. */
8782 imm = -imm;
8783 /* Fall through. */
8784 case 0xf: /* Pre-increment. */
8785 tcg_gen_addi_i32(addr, addr, imm);
8786 writeback = 1;
8787 break;
8788 default:
8789 tcg_temp_free_i32(addr);
8790 goto illegal_op;
8794 if (insn & (1 << 20)) {
8795 /* Load. */
8796 switch (op) {
8797 case 0: tmp = gen_ld8u(addr, user); break;
8798 case 4: tmp = gen_ld8s(addr, user); break;
8799 case 1: tmp = gen_ld16u(addr, user); break;
8800 case 5: tmp = gen_ld16s(addr, user); break;
8801 case 2: tmp = gen_ld32(addr, user); break;
8802 default:
8803 tcg_temp_free_i32(addr);
8804 goto illegal_op;
8806 if (rs == 15) {
8807 gen_bx(s, tmp);
8808 } else {
8809 store_reg(s, rs, tmp);
8811 } else {
8812 /* Store. */
8813 tmp = load_reg(s, rs);
8814 switch (op) {
8815 case 0: gen_st8(tmp, addr, user); break;
8816 case 1: gen_st16(tmp, addr, user); break;
8817 case 2: gen_st32(tmp, addr, user); break;
8818 default:
8819 tcg_temp_free_i32(addr);
8820 goto illegal_op;
8823 if (postinc)
8824 tcg_gen_addi_i32(addr, addr, imm);
8825 if (writeback) {
8826 store_reg(s, rn, addr);
8827 } else {
8828 tcg_temp_free_i32(addr);
8831 break;
8832 default:
8833 goto illegal_op;
8835 return 0;
8836 illegal_op:
8837 return 1;
8840 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8842 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8843 int32_t offset;
8844 int i;
8845 TCGv tmp;
8846 TCGv tmp2;
8847 TCGv addr;
8849 if (s->condexec_mask) {
8850 cond = s->condexec_cond;
8851 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8852 s->condlabel = gen_new_label();
8853 gen_test_cc(cond ^ 1, s->condlabel);
8854 s->condjmp = 1;
8858 insn = lduw_code(s->pc);
8859 s->pc += 2;
8861 switch (insn >> 12) {
8862 case 0: case 1:
8864 rd = insn & 7;
8865 op = (insn >> 11) & 3;
8866 if (op == 3) {
8867 /* add/subtract */
8868 rn = (insn >> 3) & 7;
8869 tmp = load_reg(s, rn);
8870 if (insn & (1 << 10)) {
8871 /* immediate */
8872 tmp2 = tcg_temp_new_i32();
8873 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8874 } else {
8875 /* reg */
8876 rm = (insn >> 6) & 7;
8877 tmp2 = load_reg(s, rm);
8879 if (insn & (1 << 9)) {
8880 if (s->condexec_mask)
8881 tcg_gen_sub_i32(tmp, tmp, tmp2);
8882 else
8883 gen_helper_sub_cc(tmp, tmp, tmp2);
8884 } else {
8885 if (s->condexec_mask)
8886 tcg_gen_add_i32(tmp, tmp, tmp2);
8887 else
8888 gen_helper_add_cc(tmp, tmp, tmp2);
8890 tcg_temp_free_i32(tmp2);
8891 store_reg(s, rd, tmp);
8892 } else {
8893 /* shift immediate */
8894 rm = (insn >> 3) & 7;
8895 shift = (insn >> 6) & 0x1f;
8896 tmp = load_reg(s, rm);
8897 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8898 if (!s->condexec_mask)
8899 gen_logic_CC(tmp);
8900 store_reg(s, rd, tmp);
8902 break;
8903 case 2: case 3:
8904 /* arithmetic large immediate */
8905 op = (insn >> 11) & 3;
8906 rd = (insn >> 8) & 0x7;
8907 if (op == 0) { /* mov */
8908 tmp = tcg_temp_new_i32();
8909 tcg_gen_movi_i32(tmp, insn & 0xff);
8910 if (!s->condexec_mask)
8911 gen_logic_CC(tmp);
8912 store_reg(s, rd, tmp);
8913 } else {
8914 tmp = load_reg(s, rd);
8915 tmp2 = tcg_temp_new_i32();
8916 tcg_gen_movi_i32(tmp2, insn & 0xff);
8917 switch (op) {
8918 case 1: /* cmp */
8919 gen_helper_sub_cc(tmp, tmp, tmp2);
8920 tcg_temp_free_i32(tmp);
8921 tcg_temp_free_i32(tmp2);
8922 break;
8923 case 2: /* add */
8924 if (s->condexec_mask)
8925 tcg_gen_add_i32(tmp, tmp, tmp2);
8926 else
8927 gen_helper_add_cc(tmp, tmp, tmp2);
8928 tcg_temp_free_i32(tmp2);
8929 store_reg(s, rd, tmp);
8930 break;
8931 case 3: /* sub */
8932 if (s->condexec_mask)
8933 tcg_gen_sub_i32(tmp, tmp, tmp2);
8934 else
8935 gen_helper_sub_cc(tmp, tmp, tmp2);
8936 tcg_temp_free_i32(tmp2);
8937 store_reg(s, rd, tmp);
8938 break;
8941 break;
8942 case 4:
8943 if (insn & (1 << 11)) {
8944 rd = (insn >> 8) & 7;
8945 /* load pc-relative. Bit 1 of PC is ignored. */
8946 val = s->pc + 2 + ((insn & 0xff) * 4);
8947 val &= ~(uint32_t)2;
8948 addr = tcg_temp_new_i32();
8949 tcg_gen_movi_i32(addr, val);
8950 tmp = gen_ld32(addr, IS_USER(s));
8951 tcg_temp_free_i32(addr);
8952 store_reg(s, rd, tmp);
8953 break;
8955 if (insn & (1 << 10)) {
8956 /* data processing extended or blx */
8957 rd = (insn & 7) | ((insn >> 4) & 8);
8958 rm = (insn >> 3) & 0xf;
8959 op = (insn >> 8) & 3;
8960 switch (op) {
8961 case 0: /* add */
8962 tmp = load_reg(s, rd);
8963 tmp2 = load_reg(s, rm);
8964 tcg_gen_add_i32(tmp, tmp, tmp2);
8965 tcg_temp_free_i32(tmp2);
8966 store_reg(s, rd, tmp);
8967 break;
8968 case 1: /* cmp */
8969 tmp = load_reg(s, rd);
8970 tmp2 = load_reg(s, rm);
8971 gen_helper_sub_cc(tmp, tmp, tmp2);
8972 tcg_temp_free_i32(tmp2);
8973 tcg_temp_free_i32(tmp);
8974 break;
8975 case 2: /* mov/cpy */
8976 tmp = load_reg(s, rm);
8977 store_reg(s, rd, tmp);
8978 break;
8979 case 3:/* branch [and link] exchange thumb register */
8980 tmp = load_reg(s, rm);
8981 if (insn & (1 << 7)) {
8982 ARCH(5);
8983 val = (uint32_t)s->pc | 1;
8984 tmp2 = tcg_temp_new_i32();
8985 tcg_gen_movi_i32(tmp2, val);
8986 store_reg(s, 14, tmp2);
8988 /* already thumb, no need to check */
8989 gen_bx(s, tmp);
8990 break;
8992 break;
8995 /* data processing register */
8996 rd = insn & 7;
8997 rm = (insn >> 3) & 7;
8998 op = (insn >> 6) & 0xf;
8999 if (op == 2 || op == 3 || op == 4 || op == 7) {
9000 /* the shift/rotate ops want the operands backwards */
9001 val = rm;
9002 rm = rd;
9003 rd = val;
9004 val = 1;
9005 } else {
9006 val = 0;
9009 if (op == 9) { /* neg */
9010 tmp = tcg_temp_new_i32();
9011 tcg_gen_movi_i32(tmp, 0);
9012 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9013 tmp = load_reg(s, rd);
9014 } else {
9015 TCGV_UNUSED(tmp);
9018 tmp2 = load_reg(s, rm);
9019 switch (op) {
9020 case 0x0: /* and */
9021 tcg_gen_and_i32(tmp, tmp, tmp2);
9022 if (!s->condexec_mask)
9023 gen_logic_CC(tmp);
9024 break;
9025 case 0x1: /* eor */
9026 tcg_gen_xor_i32(tmp, tmp, tmp2);
9027 if (!s->condexec_mask)
9028 gen_logic_CC(tmp);
9029 break;
9030 case 0x2: /* lsl */
9031 if (s->condexec_mask) {
9032 gen_helper_shl(tmp2, tmp2, tmp);
9033 } else {
9034 gen_helper_shl_cc(tmp2, tmp2, tmp);
9035 gen_logic_CC(tmp2);
9037 break;
9038 case 0x3: /* lsr */
9039 if (s->condexec_mask) {
9040 gen_helper_shr(tmp2, tmp2, tmp);
9041 } else {
9042 gen_helper_shr_cc(tmp2, tmp2, tmp);
9043 gen_logic_CC(tmp2);
9045 break;
9046 case 0x4: /* asr */
9047 if (s->condexec_mask) {
9048 gen_helper_sar(tmp2, tmp2, tmp);
9049 } else {
9050 gen_helper_sar_cc(tmp2, tmp2, tmp);
9051 gen_logic_CC(tmp2);
9053 break;
9054 case 0x5: /* adc */
9055 if (s->condexec_mask)
9056 gen_adc(tmp, tmp2);
9057 else
9058 gen_helper_adc_cc(tmp, tmp, tmp2);
9059 break;
9060 case 0x6: /* sbc */
9061 if (s->condexec_mask)
9062 gen_sub_carry(tmp, tmp, tmp2);
9063 else
9064 gen_helper_sbc_cc(tmp, tmp, tmp2);
9065 break;
9066 case 0x7: /* ror */
9067 if (s->condexec_mask) {
9068 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9069 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9070 } else {
9071 gen_helper_ror_cc(tmp2, tmp2, tmp);
9072 gen_logic_CC(tmp2);
9074 break;
9075 case 0x8: /* tst */
9076 tcg_gen_and_i32(tmp, tmp, tmp2);
9077 gen_logic_CC(tmp);
9078 rd = 16;
9079 break;
9080 case 0x9: /* neg */
9081 if (s->condexec_mask)
9082 tcg_gen_neg_i32(tmp, tmp2);
9083 else
9084 gen_helper_sub_cc(tmp, tmp, tmp2);
9085 break;
9086 case 0xa: /* cmp */
9087 gen_helper_sub_cc(tmp, tmp, tmp2);
9088 rd = 16;
9089 break;
9090 case 0xb: /* cmn */
9091 gen_helper_add_cc(tmp, tmp, tmp2);
9092 rd = 16;
9093 break;
9094 case 0xc: /* orr */
9095 tcg_gen_or_i32(tmp, tmp, tmp2);
9096 if (!s->condexec_mask)
9097 gen_logic_CC(tmp);
9098 break;
9099 case 0xd: /* mul */
9100 tcg_gen_mul_i32(tmp, tmp, tmp2);
9101 if (!s->condexec_mask)
9102 gen_logic_CC(tmp);
9103 break;
9104 case 0xe: /* bic */
9105 tcg_gen_andc_i32(tmp, tmp, tmp2);
9106 if (!s->condexec_mask)
9107 gen_logic_CC(tmp);
9108 break;
9109 case 0xf: /* mvn */
9110 tcg_gen_not_i32(tmp2, tmp2);
9111 if (!s->condexec_mask)
9112 gen_logic_CC(tmp2);
9113 val = 1;
9114 rm = rd;
9115 break;
9117 if (rd != 16) {
9118 if (val) {
9119 store_reg(s, rm, tmp2);
9120 if (op != 0xf)
9121 tcg_temp_free_i32(tmp);
9122 } else {
9123 store_reg(s, rd, tmp);
9124 tcg_temp_free_i32(tmp2);
9126 } else {
9127 tcg_temp_free_i32(tmp);
9128 tcg_temp_free_i32(tmp2);
9130 break;
9132 case 5:
9133 /* load/store register offset. */
9134 rd = insn & 7;
9135 rn = (insn >> 3) & 7;
9136 rm = (insn >> 6) & 7;
9137 op = (insn >> 9) & 7;
9138 addr = load_reg(s, rn);
9139 tmp = load_reg(s, rm);
9140 tcg_gen_add_i32(addr, addr, tmp);
9141 tcg_temp_free_i32(tmp);
9143 if (op < 3) /* store */
9144 tmp = load_reg(s, rd);
9146 switch (op) {
9147 case 0: /* str */
9148 gen_st32(tmp, addr, IS_USER(s));
9149 break;
9150 case 1: /* strh */
9151 gen_st16(tmp, addr, IS_USER(s));
9152 break;
9153 case 2: /* strb */
9154 gen_st8(tmp, addr, IS_USER(s));
9155 break;
9156 case 3: /* ldrsb */
9157 tmp = gen_ld8s(addr, IS_USER(s));
9158 break;
9159 case 4: /* ldr */
9160 tmp = gen_ld32(addr, IS_USER(s));
9161 break;
9162 case 5: /* ldrh */
9163 tmp = gen_ld16u(addr, IS_USER(s));
9164 break;
9165 case 6: /* ldrb */
9166 tmp = gen_ld8u(addr, IS_USER(s));
9167 break;
9168 case 7: /* ldrsh */
9169 tmp = gen_ld16s(addr, IS_USER(s));
9170 break;
9172 if (op >= 3) /* load */
9173 store_reg(s, rd, tmp);
9174 tcg_temp_free_i32(addr);
9175 break;
9177 case 6:
9178 /* load/store word immediate offset */
9179 rd = insn & 7;
9180 rn = (insn >> 3) & 7;
9181 addr = load_reg(s, rn);
9182 val = (insn >> 4) & 0x7c;
9183 tcg_gen_addi_i32(addr, addr, val);
9185 if (insn & (1 << 11)) {
9186 /* load */
9187 tmp = gen_ld32(addr, IS_USER(s));
9188 store_reg(s, rd, tmp);
9189 } else {
9190 /* store */
9191 tmp = load_reg(s, rd);
9192 gen_st32(tmp, addr, IS_USER(s));
9194 tcg_temp_free_i32(addr);
9195 break;
9197 case 7:
9198 /* load/store byte immediate offset */
9199 rd = insn & 7;
9200 rn = (insn >> 3) & 7;
9201 addr = load_reg(s, rn);
9202 val = (insn >> 6) & 0x1f;
9203 tcg_gen_addi_i32(addr, addr, val);
9205 if (insn & (1 << 11)) {
9206 /* load */
9207 tmp = gen_ld8u(addr, IS_USER(s));
9208 store_reg(s, rd, tmp);
9209 } else {
9210 /* store */
9211 tmp = load_reg(s, rd);
9212 gen_st8(tmp, addr, IS_USER(s));
9214 tcg_temp_free_i32(addr);
9215 break;
9217 case 8:
9218 /* load/store halfword immediate offset */
9219 rd = insn & 7;
9220 rn = (insn >> 3) & 7;
9221 addr = load_reg(s, rn);
9222 val = (insn >> 5) & 0x3e;
9223 tcg_gen_addi_i32(addr, addr, val);
9225 if (insn & (1 << 11)) {
9226 /* load */
9227 tmp = gen_ld16u(addr, IS_USER(s));
9228 store_reg(s, rd, tmp);
9229 } else {
9230 /* store */
9231 tmp = load_reg(s, rd);
9232 gen_st16(tmp, addr, IS_USER(s));
9234 tcg_temp_free_i32(addr);
9235 break;
9237 case 9:
9238 /* load/store from stack */
9239 rd = (insn >> 8) & 7;
9240 addr = load_reg(s, 13);
9241 val = (insn & 0xff) * 4;
9242 tcg_gen_addi_i32(addr, addr, val);
9244 if (insn & (1 << 11)) {
9245 /* load */
9246 tmp = gen_ld32(addr, IS_USER(s));
9247 store_reg(s, rd, tmp);
9248 } else {
9249 /* store */
9250 tmp = load_reg(s, rd);
9251 gen_st32(tmp, addr, IS_USER(s));
9253 tcg_temp_free_i32(addr);
9254 break;
9256 case 10:
9257 /* add to high reg */
9258 rd = (insn >> 8) & 7;
9259 if (insn & (1 << 11)) {
9260 /* SP */
9261 tmp = load_reg(s, 13);
9262 } else {
9263 /* PC. bit 1 is ignored. */
9264 tmp = tcg_temp_new_i32();
9265 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9267 val = (insn & 0xff) * 4;
9268 tcg_gen_addi_i32(tmp, tmp, val);
9269 store_reg(s, rd, tmp);
9270 break;
9272 case 11:
9273 /* misc */
9274 op = (insn >> 8) & 0xf;
9275 switch (op) {
9276 case 0:
9277 /* adjust stack pointer */
9278 tmp = load_reg(s, 13);
9279 val = (insn & 0x7f) * 4;
9280 if (insn & (1 << 7))
9281 val = -(int32_t)val;
9282 tcg_gen_addi_i32(tmp, tmp, val);
9283 store_reg(s, 13, tmp);
9284 break;
9286 case 2: /* sign/zero extend. */
9287 ARCH(6);
9288 rd = insn & 7;
9289 rm = (insn >> 3) & 7;
9290 tmp = load_reg(s, rm);
9291 switch ((insn >> 6) & 3) {
9292 case 0: gen_sxth(tmp); break;
9293 case 1: gen_sxtb(tmp); break;
9294 case 2: gen_uxth(tmp); break;
9295 case 3: gen_uxtb(tmp); break;
9297 store_reg(s, rd, tmp);
9298 break;
9299 case 4: case 5: case 0xc: case 0xd:
9300 /* push/pop */
9301 addr = load_reg(s, 13);
9302 if (insn & (1 << 8))
9303 offset = 4;
9304 else
9305 offset = 0;
9306 for (i = 0; i < 8; i++) {
9307 if (insn & (1 << i))
9308 offset += 4;
9310 if ((insn & (1 << 11)) == 0) {
9311 tcg_gen_addi_i32(addr, addr, -offset);
9313 for (i = 0; i < 8; i++) {
9314 if (insn & (1 << i)) {
9315 if (insn & (1 << 11)) {
9316 /* pop */
9317 tmp = gen_ld32(addr, IS_USER(s));
9318 store_reg(s, i, tmp);
9319 } else {
9320 /* push */
9321 tmp = load_reg(s, i);
9322 gen_st32(tmp, addr, IS_USER(s));
9324 /* advance to the next address. */
9325 tcg_gen_addi_i32(addr, addr, 4);
9328 TCGV_UNUSED(tmp);
9329 if (insn & (1 << 8)) {
9330 if (insn & (1 << 11)) {
9331 /* pop pc */
9332 tmp = gen_ld32(addr, IS_USER(s));
9333 /* don't set the pc until the rest of the instruction
9334 has completed */
9335 } else {
9336 /* push lr */
9337 tmp = load_reg(s, 14);
9338 gen_st32(tmp, addr, IS_USER(s));
9340 tcg_gen_addi_i32(addr, addr, 4);
9342 if ((insn & (1 << 11)) == 0) {
9343 tcg_gen_addi_i32(addr, addr, -offset);
9345 /* write back the new stack pointer */
9346 store_reg(s, 13, addr);
9347 /* set the new PC value */
9348 if ((insn & 0x0900) == 0x0900) {
9349 store_reg_from_load(env, s, 15, tmp);
9351 break;
9353 case 1: case 3: case 9: case 11: /* czb */
9354 rm = insn & 7;
9355 tmp = load_reg(s, rm);
9356 s->condlabel = gen_new_label();
9357 s->condjmp = 1;
9358 if (insn & (1 << 11))
9359 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9360 else
9361 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9362 tcg_temp_free_i32(tmp);
9363 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9364 val = (uint32_t)s->pc + 2;
9365 val += offset;
9366 gen_jmp(s, val);
9367 break;
9369 case 15: /* IT, nop-hint. */
9370 if ((insn & 0xf) == 0) {
9371 gen_nop_hint(s, (insn >> 4) & 0xf);
9372 break;
9374 /* If Then. */
9375 s->condexec_cond = (insn >> 4) & 0xe;
9376 s->condexec_mask = insn & 0x1f;
9377 /* No actual code generated for this insn, just setup state. */
9378 break;
9380 case 0xe: /* bkpt */
9381 ARCH(5);
9382 gen_exception_insn(s, 2, EXCP_BKPT);
9383 break;
9385 case 0xa: /* rev */
9386 ARCH(6);
9387 rn = (insn >> 3) & 0x7;
9388 rd = insn & 0x7;
9389 tmp = load_reg(s, rn);
9390 switch ((insn >> 6) & 3) {
9391 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9392 case 1: gen_rev16(tmp); break;
9393 case 3: gen_revsh(tmp); break;
9394 default: goto illegal_op;
9396 store_reg(s, rd, tmp);
9397 break;
9399 case 6: /* cps */
9400 ARCH(6);
9401 if (IS_USER(s))
9402 break;
9403 if (IS_M(env)) {
9404 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9405 /* PRIMASK */
9406 if (insn & 1) {
9407 addr = tcg_const_i32(16);
9408 gen_helper_v7m_msr(cpu_env, addr, tmp);
9409 tcg_temp_free_i32(addr);
9411 /* FAULTMASK */
9412 if (insn & 2) {
9413 addr = tcg_const_i32(17);
9414 gen_helper_v7m_msr(cpu_env, addr, tmp);
9415 tcg_temp_free_i32(addr);
9417 tcg_temp_free_i32(tmp);
9418 gen_lookup_tb(s);
9419 } else {
9420 if (insn & (1 << 4))
9421 shift = CPSR_A | CPSR_I | CPSR_F;
9422 else
9423 shift = 0;
9424 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9426 break;
9428 default:
9429 goto undef;
9431 break;
9433 case 12:
9434 /* load/store multiple */
9435 rn = (insn >> 8) & 0x7;
9436 addr = load_reg(s, rn);
9437 for (i = 0; i < 8; i++) {
9438 if (insn & (1 << i)) {
9439 if (insn & (1 << 11)) {
9440 /* load */
9441 tmp = gen_ld32(addr, IS_USER(s));
9442 store_reg(s, i, tmp);
9443 } else {
9444 /* store */
9445 tmp = load_reg(s, i);
9446 gen_st32(tmp, addr, IS_USER(s));
9448 /* advance to the next address */
9449 tcg_gen_addi_i32(addr, addr, 4);
9452 /* Base register writeback. */
9453 if ((insn & (1 << rn)) == 0) {
9454 store_reg(s, rn, addr);
9455 } else {
9456 tcg_temp_free_i32(addr);
9458 break;
9460 case 13:
9461 /* conditional branch or swi */
9462 cond = (insn >> 8) & 0xf;
9463 if (cond == 0xe)
9464 goto undef;
9466 if (cond == 0xf) {
9467 /* swi */
9468 gen_set_pc_im(s->pc);
9469 s->is_jmp = DISAS_SWI;
9470 break;
9472 /* generate a conditional jump to next instruction */
9473 s->condlabel = gen_new_label();
9474 gen_test_cc(cond ^ 1, s->condlabel);
9475 s->condjmp = 1;
9477 /* jump to the offset */
9478 val = (uint32_t)s->pc + 2;
9479 offset = ((int32_t)insn << 24) >> 24;
9480 val += offset << 1;
9481 gen_jmp(s, val);
9482 break;
9484 case 14:
9485 if (insn & (1 << 11)) {
9486 if (disas_thumb2_insn(env, s, insn))
9487 goto undef32;
9488 break;
9490 /* unconditional branch */
9491 val = (uint32_t)s->pc;
9492 offset = ((int32_t)insn << 21) >> 21;
9493 val += (offset << 1) + 2;
9494 gen_jmp(s, val);
9495 break;
9497 case 15:
9498 if (disas_thumb2_insn(env, s, insn))
9499 goto undef32;
9500 break;
9502 return;
9503 undef32:
9504 gen_exception_insn(s, 4, EXCP_UDEF);
9505 return;
9506 illegal_op:
9507 undef:
9508 gen_exception_insn(s, 2, EXCP_UDEF);
9511 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9512 basic block 'tb'. If search_pc is TRUE, also generate PC
9513 information for each intermediate instruction. */
9514 static inline void gen_intermediate_code_internal(CPUState *env,
9515 TranslationBlock *tb,
9516 int search_pc)
9518 DisasContext dc1, *dc = &dc1;
9519 CPUBreakpoint *bp;
9520 uint16_t *gen_opc_end;
9521 int j, lj;
9522 target_ulong pc_start;
9523 uint32_t next_page_start;
9524 int num_insns;
9525 int max_insns;
9527 /* generate intermediate code */
9528 pc_start = tb->pc;
9530 dc->tb = tb;
9532 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9534 dc->is_jmp = DISAS_NEXT;
9535 dc->pc = pc_start;
9536 dc->singlestep_enabled = env->singlestep_enabled;
9537 dc->condjmp = 0;
9538 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9539 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9540 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9541 #if !defined(CONFIG_USER_ONLY)
9542 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9543 #endif
9544 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9545 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9546 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9547 cpu_F0s = tcg_temp_new_i32();
9548 cpu_F1s = tcg_temp_new_i32();
9549 cpu_F0d = tcg_temp_new_i64();
9550 cpu_F1d = tcg_temp_new_i64();
9551 cpu_V0 = cpu_F0d;
9552 cpu_V1 = cpu_F1d;
9553 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9554 cpu_M0 = tcg_temp_new_i64();
9555 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9556 lj = -1;
9557 num_insns = 0;
9558 max_insns = tb->cflags & CF_COUNT_MASK;
9559 if (max_insns == 0)
9560 max_insns = CF_COUNT_MASK;
9562 gen_icount_start();
9564 tcg_clear_temp_count();
9566 /* A note on handling of the condexec (IT) bits:
9568 * We want to avoid the overhead of having to write the updated condexec
9569 * bits back to the CPUState for every instruction in an IT block. So:
9570 * (1) if the condexec bits are not already zero then we write
9571 * zero back into the CPUState now. This avoids complications trying
9572 * to do it at the end of the block. (For example if we don't do this
9573 * it's hard to identify whether we can safely skip writing condexec
9574 * at the end of the TB, which we definitely want to do for the case
9575 * where a TB doesn't do anything with the IT state at all.)
9576 * (2) if we are going to leave the TB then we call gen_set_condexec()
9577 * which will write the correct value into CPUState if zero is wrong.
9578 * This is done both for leaving the TB at the end, and for leaving
9579 * it because of an exception we know will happen, which is done in
9580 * gen_exception_insn(). The latter is necessary because we need to
9581 * leave the TB with the PC/IT state just prior to execution of the
9582 * instruction which caused the exception.
9583 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9584 * then the CPUState will be wrong and we need to reset it.
9585 * This is handled in the same way as restoration of the
9586 * PC in these situations: we will be called again with search_pc=1
9587 * and generate a mapping of the condexec bits for each PC in
9588 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9589 * this to restore the condexec bits.
9591 * Note that there are no instructions which can read the condexec
9592 * bits, and none which can write non-static values to them, so
9593 * we don't need to care about whether CPUState is correct in the
9594 * middle of a TB.
9597 /* Reset the conditional execution bits immediately. This avoids
9598 complications trying to do it at the end of the block. */
9599 if (dc->condexec_mask || dc->condexec_cond)
9601 TCGv tmp = tcg_temp_new_i32();
9602 tcg_gen_movi_i32(tmp, 0);
9603 store_cpu_field(tmp, condexec_bits);
9605 do {
9606 #ifdef CONFIG_USER_ONLY
9607 /* Intercept jump to the magic kernel page. */
9608 if (dc->pc >= 0xffff0000) {
9609 /* We always get here via a jump, so know we are not in a
9610 conditional execution block. */
9611 gen_exception(EXCP_KERNEL_TRAP);
9612 dc->is_jmp = DISAS_UPDATE;
9613 break;
9615 #else
9616 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9617 /* We always get here via a jump, so know we are not in a
9618 conditional execution block. */
9619 gen_exception(EXCP_EXCEPTION_EXIT);
9620 dc->is_jmp = DISAS_UPDATE;
9621 break;
9623 #endif
9625 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9626 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9627 if (bp->pc == dc->pc) {
9628 gen_exception_insn(dc, 0, EXCP_DEBUG);
9629 /* Advance PC so that clearing the breakpoint will
9630 invalidate this TB. */
9631 dc->pc += 2;
9632 goto done_generating;
9633 break;
9637 if (search_pc) {
9638 j = gen_opc_ptr - gen_opc_buf;
9639 if (lj < j) {
9640 lj++;
9641 while (lj < j)
9642 gen_opc_instr_start[lj++] = 0;
9644 gen_opc_pc[lj] = dc->pc;
9645 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9646 gen_opc_instr_start[lj] = 1;
9647 gen_opc_icount[lj] = num_insns;
9650 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9651 gen_io_start();
9653 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9654 tcg_gen_debug_insn_start(dc->pc);
9657 if (dc->thumb) {
9658 disas_thumb_insn(env, dc);
9659 if (dc->condexec_mask) {
9660 dc->condexec_cond = (dc->condexec_cond & 0xe)
9661 | ((dc->condexec_mask >> 4) & 1);
9662 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9663 if (dc->condexec_mask == 0) {
9664 dc->condexec_cond = 0;
9667 } else {
9668 disas_arm_insn(env, dc);
9671 if (dc->condjmp && !dc->is_jmp) {
9672 gen_set_label(dc->condlabel);
9673 dc->condjmp = 0;
9676 if (tcg_check_temp_count()) {
9677 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9680 /* Translation stops when a conditional branch is encountered.
9681 * Otherwise the subsequent code could get translated several times.
9682 * Also stop translation when a page boundary is reached. This
9683 * ensures prefetch aborts occur at the right place. */
9684 num_insns ++;
9685 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9686 !env->singlestep_enabled &&
9687 !singlestep &&
9688 dc->pc < next_page_start &&
9689 num_insns < max_insns);
9691 if (tb->cflags & CF_LAST_IO) {
9692 if (dc->condjmp) {
9693 /* FIXME: This can theoretically happen with self-modifying
9694 code. */
9695 cpu_abort(env, "IO on conditional branch instruction");
9697 gen_io_end();
9700 /* At this stage dc->condjmp will only be set when the skipped
9701 instruction was a conditional branch or trap, and the PC has
9702 already been written. */
9703 if (unlikely(env->singlestep_enabled)) {
9704 /* Make sure the pc is updated, and raise a debug exception. */
9705 if (dc->condjmp) {
9706 gen_set_condexec(dc);
9707 if (dc->is_jmp == DISAS_SWI) {
9708 gen_exception(EXCP_SWI);
9709 } else {
9710 gen_exception(EXCP_DEBUG);
9712 gen_set_label(dc->condlabel);
9714 if (dc->condjmp || !dc->is_jmp) {
9715 gen_set_pc_im(dc->pc);
9716 dc->condjmp = 0;
9718 gen_set_condexec(dc);
9719 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9720 gen_exception(EXCP_SWI);
9721 } else {
9722 /* FIXME: Single stepping a WFI insn will not halt
9723 the CPU. */
9724 gen_exception(EXCP_DEBUG);
9726 } else {
9727 /* While branches must always occur at the end of an IT block,
9728 there are a few other things that can cause us to terminate
9729 the TB in the middel of an IT block:
9730 - Exception generating instructions (bkpt, swi, undefined).
9731 - Page boundaries.
9732 - Hardware watchpoints.
9733 Hardware breakpoints have already been handled and skip this code.
9735 gen_set_condexec(dc);
9736 switch(dc->is_jmp) {
9737 case DISAS_NEXT:
9738 gen_goto_tb(dc, 1, dc->pc);
9739 break;
9740 default:
9741 case DISAS_JUMP:
9742 case DISAS_UPDATE:
9743 /* indicate that the hash table must be used to find the next TB */
9744 tcg_gen_exit_tb(0);
9745 break;
9746 case DISAS_TB_JUMP:
9747 /* nothing more to generate */
9748 break;
9749 case DISAS_WFI:
9750 gen_helper_wfi();
9751 break;
9752 case DISAS_SWI:
9753 gen_exception(EXCP_SWI);
9754 break;
9756 if (dc->condjmp) {
9757 gen_set_label(dc->condlabel);
9758 gen_set_condexec(dc);
9759 gen_goto_tb(dc, 1, dc->pc);
9760 dc->condjmp = 0;
9764 done_generating:
9765 gen_icount_end(tb, num_insns);
9766 *gen_opc_ptr = INDEX_op_end;
9768 #ifdef DEBUG_DISAS
9769 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9770 qemu_log("----------------\n");
9771 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9772 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9773 qemu_log("\n");
9775 #endif
9776 if (search_pc) {
9777 j = gen_opc_ptr - gen_opc_buf;
9778 lj++;
9779 while (lj <= j)
9780 gen_opc_instr_start[lj++] = 0;
9781 } else {
9782 tb->size = dc->pc - pc_start;
9783 tb->icount = num_insns;
9787 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9789 gen_intermediate_code_internal(env, tb, 0);
9792 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9794 gen_intermediate_code_internal(env, tb, 1);
9797 static const char *cpu_mode_names[16] = {
9798 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9799 "???", "???", "???", "und", "???", "???", "???", "sys"
9802 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9803 int flags)
9805 int i;
9806 #if 0
9807 union {
9808 uint32_t i;
9809 float s;
9810 } s0, s1;
9811 CPU_DoubleU d;
9812 /* ??? This assumes float64 and double have the same layout.
9813 Oh well, it's only debug dumps. */
9814 union {
9815 float64 f64;
9816 double d;
9817 } d0;
9818 #endif
9819 uint32_t psr;
9821 for(i=0;i<16;i++) {
9822 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9823 if ((i % 4) == 3)
9824 cpu_fprintf(f, "\n");
9825 else
9826 cpu_fprintf(f, " ");
9828 psr = cpsr_read(env);
9829 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9830 psr,
9831 psr & (1 << 31) ? 'N' : '-',
9832 psr & (1 << 30) ? 'Z' : '-',
9833 psr & (1 << 29) ? 'C' : '-',
9834 psr & (1 << 28) ? 'V' : '-',
9835 psr & CPSR_T ? 'T' : 'A',
9836 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9838 #if 0
9839 for (i = 0; i < 16; i++) {
9840 d.d = env->vfp.regs[i];
9841 s0.i = d.l.lower;
9842 s1.i = d.l.upper;
9843 d0.f64 = d.d;
9844 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9845 i * 2, (int)s0.i, s0.s,
9846 i * 2 + 1, (int)s1.i, s1.s,
9847 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9848 d0.d);
9850 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9851 #endif
9854 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
9856 env->regs[15] = gen_opc_pc[pc_pos];
9857 env->condexec_bits = gen_opc_condexec_bits[pc_pos];