hw/9pfs: Reset server state during TVERSION
[qemu.git] / target-arm / translate.c
blob0f35b60946907bf20fba5e094589e8e7efd697b1
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "disas.h"
29 #include "tcg-op.h"
30 #include "qemu-log.h"
32 #include "helper.h"
33 #define GEN_HELPER 1
34 #include "helper.h"
36 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 /* currently all emulated v5 cores are also v5TE, so don't bother */
39 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
40 #define ENABLE_ARCH_5J 0
41 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
48 /* internal defines */
49 typedef struct DisasContext {
50 target_ulong pc;
51 int is_jmp;
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
56 /* Thumb-2 condtional execution bits. */
57 int condexec_mask;
58 int condexec_cond;
59 struct TranslationBlock *tb;
60 int singlestep_enabled;
61 int thumb;
62 #if !defined(CONFIG_USER_ONLY)
63 int user;
64 #endif
65 int vfp_enabled;
66 int vec_len;
67 int vec_stride;
68 } DisasContext;
70 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72 #if defined(CONFIG_USER_ONLY)
73 #define IS_USER(s) 1
74 #else
75 #define IS_USER(s) (s->user)
76 #endif
78 /* These instructions trap after executing, so defer them until after the
79 conditional executions state has been updated. */
80 #define DISAS_WFI 4
81 #define DISAS_SWI 5
83 static TCGv_ptr cpu_env;
84 /* We reuse the same 64-bit temporaries for efficiency. */
85 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
86 static TCGv_i32 cpu_R[16];
87 static TCGv_i32 cpu_exclusive_addr;
88 static TCGv_i32 cpu_exclusive_val;
89 static TCGv_i32 cpu_exclusive_high;
90 #ifdef CONFIG_USER_ONLY
91 static TCGv_i32 cpu_exclusive_test;
92 static TCGv_i32 cpu_exclusive_info;
93 #endif
95 /* FIXME: These should be removed. */
96 static TCGv cpu_F0s, cpu_F1s;
97 static TCGv_i64 cpu_F0d, cpu_F1d;
99 #include "gen-icount.h"
101 static const char *regnames[] =
102 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
103 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
105 /* initialize TCG globals. */
106 void arm_translate_init(void)
108 int i;
110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
112 for (i = 0; i < 16; i++) {
113 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, regs[i]),
115 regnames[i]);
117 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUState, exclusive_addr), "exclusive_addr");
119 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUState, exclusive_val), "exclusive_val");
121 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_high), "exclusive_high");
123 #ifdef CONFIG_USER_ONLY
124 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUState, exclusive_test), "exclusive_test");
126 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUState, exclusive_info), "exclusive_info");
128 #endif
130 #define GEN_HELPER 2
131 #include "helper.h"
134 static inline TCGv load_cpu_offset(int offset)
136 TCGv tmp = tcg_temp_new_i32();
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
141 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
143 static inline void store_cpu_offset(TCGv var, int offset)
145 tcg_gen_st_i32(var, cpu_env, offset);
146 tcg_temp_free_i32(var);
149 #define store_cpu_field(var, name) \
150 store_cpu_offset(var, offsetof(CPUState, name))
152 /* Set a variable to the value of a CPU register. */
153 static void load_reg_var(DisasContext *s, TCGv var, int reg)
155 if (reg == 15) {
156 uint32_t addr;
157 /* normaly, since we updated PC, we need only to add one insn */
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
164 tcg_gen_mov_i32(var, cpu_R[reg]);
168 /* Create a new temporary and set it to the value of a CPU register. */
169 static inline TCGv load_reg(DisasContext *s, int reg)
171 TCGv tmp = tcg_temp_new_i32();
172 load_reg_var(s, tmp, reg);
173 return tmp;
176 /* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
178 static void store_reg(DisasContext *s, int reg, TCGv var)
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
184 tcg_gen_mov_i32(cpu_R[reg], var);
185 tcg_temp_free_i32(var);
188 /* Value extensions. */
189 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
191 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
198 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
200 TCGv tmp_mask = tcg_const_i32(mask);
201 gen_helper_cpsr_write(var, tmp_mask);
202 tcg_temp_free_i32(tmp_mask);
204 /* Set NZCV flags from the high 4 bits of var. */
205 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207 static void gen_exception(int excp)
209 TCGv tmp = tcg_temp_new_i32();
210 tcg_gen_movi_i32(tmp, excp);
211 gen_helper_exception(tmp);
212 tcg_temp_free_i32(tmp);
215 static void gen_smul_dual(TCGv a, TCGv b)
217 TCGv tmp1 = tcg_temp_new_i32();
218 TCGv tmp2 = tcg_temp_new_i32();
219 tcg_gen_ext16s_i32(tmp1, a);
220 tcg_gen_ext16s_i32(tmp2, b);
221 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
222 tcg_temp_free_i32(tmp2);
223 tcg_gen_sari_i32(a, a, 16);
224 tcg_gen_sari_i32(b, b, 16);
225 tcg_gen_mul_i32(b, b, a);
226 tcg_gen_mov_i32(a, tmp1);
227 tcg_temp_free_i32(tmp1);
230 /* Byteswap each halfword. */
231 static void gen_rev16(TCGv var)
233 TCGv tmp = tcg_temp_new_i32();
234 tcg_gen_shri_i32(tmp, var, 8);
235 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
236 tcg_gen_shli_i32(var, var, 8);
237 tcg_gen_andi_i32(var, var, 0xff00ff00);
238 tcg_gen_or_i32(var, var, tmp);
239 tcg_temp_free_i32(tmp);
242 /* Byteswap low halfword and sign extend. */
243 static void gen_revsh(TCGv var)
245 tcg_gen_ext16u_i32(var, var);
246 tcg_gen_bswap16_i32(var, var);
247 tcg_gen_ext16s_i32(var, var);
250 /* Unsigned bitfield extract. */
251 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
253 if (shift)
254 tcg_gen_shri_i32(var, var, shift);
255 tcg_gen_andi_i32(var, var, mask);
258 /* Signed bitfield extract. */
259 static void gen_sbfx(TCGv var, int shift, int width)
261 uint32_t signbit;
263 if (shift)
264 tcg_gen_sari_i32(var, var, shift);
265 if (shift + width < 32) {
266 signbit = 1u << (width - 1);
267 tcg_gen_andi_i32(var, var, (1u << width) - 1);
268 tcg_gen_xori_i32(var, var, signbit);
269 tcg_gen_subi_i32(var, var, signbit);
273 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
274 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
276 tcg_gen_andi_i32(val, val, mask);
277 tcg_gen_shli_i32(val, val, shift);
278 tcg_gen_andi_i32(base, base, ~(mask << shift));
279 tcg_gen_or_i32(dest, base, val);
282 /* Return (b << 32) + a. Mark inputs as dead */
283 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
285 TCGv_i64 tmp64 = tcg_temp_new_i64();
287 tcg_gen_extu_i32_i64(tmp64, b);
288 tcg_temp_free_i32(b);
289 tcg_gen_shli_i64(tmp64, tmp64, 32);
290 tcg_gen_add_i64(a, tmp64, a);
292 tcg_temp_free_i64(tmp64);
293 return a;
296 /* Return (b << 32) - a. Mark inputs as dead. */
297 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
299 TCGv_i64 tmp64 = tcg_temp_new_i64();
301 tcg_gen_extu_i32_i64(tmp64, b);
302 tcg_temp_free_i32(b);
303 tcg_gen_shli_i64(tmp64, tmp64, 32);
304 tcg_gen_sub_i64(a, tmp64, a);
306 tcg_temp_free_i64(tmp64);
307 return a;
310 /* FIXME: Most targets have native widening multiplication.
311 It would be good to use that instead of a full wide multiply. */
312 /* 32x32->64 multiply. Marks inputs as dead. */
313 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
315 TCGv_i64 tmp1 = tcg_temp_new_i64();
316 TCGv_i64 tmp2 = tcg_temp_new_i64();
318 tcg_gen_extu_i32_i64(tmp1, a);
319 tcg_temp_free_i32(a);
320 tcg_gen_extu_i32_i64(tmp2, b);
321 tcg_temp_free_i32(b);
322 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
323 tcg_temp_free_i64(tmp2);
324 return tmp1;
327 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
332 tcg_gen_ext_i32_i64(tmp1, a);
333 tcg_temp_free_i32(a);
334 tcg_gen_ext_i32_i64(tmp2, b);
335 tcg_temp_free_i32(b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
337 tcg_temp_free_i64(tmp2);
338 return tmp1;
341 /* Swap low and high halfwords. */
342 static void gen_swap_half(TCGv var)
344 TCGv tmp = tcg_temp_new_i32();
345 tcg_gen_shri_i32(tmp, var, 16);
346 tcg_gen_shli_i32(var, var, 16);
347 tcg_gen_or_i32(var, var, tmp);
348 tcg_temp_free_i32(tmp);
351 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
352 tmp = (t0 ^ t1) & 0x8000;
353 t0 &= ~0x8000;
354 t1 &= ~0x8000;
355 t0 = (t0 + t1) ^ tmp;
358 static void gen_add16(TCGv t0, TCGv t1)
360 TCGv tmp = tcg_temp_new_i32();
361 tcg_gen_xor_i32(tmp, t0, t1);
362 tcg_gen_andi_i32(tmp, tmp, 0x8000);
363 tcg_gen_andi_i32(t0, t0, ~0x8000);
364 tcg_gen_andi_i32(t1, t1, ~0x8000);
365 tcg_gen_add_i32(t0, t0, t1);
366 tcg_gen_xor_i32(t0, t0, tmp);
367 tcg_temp_free_i32(tmp);
368 tcg_temp_free_i32(t1);
371 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
373 /* Set CF to the top bit of var. */
374 static void gen_set_CF_bit31(TCGv var)
376 TCGv tmp = tcg_temp_new_i32();
377 tcg_gen_shri_i32(tmp, var, 31);
378 gen_set_CF(tmp);
379 tcg_temp_free_i32(tmp);
382 /* Set N and Z flags from var. */
383 static inline void gen_logic_CC(TCGv var)
385 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
386 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
389 /* T0 += T1 + CF. */
390 static void gen_adc(TCGv t0, TCGv t1)
392 TCGv tmp;
393 tcg_gen_add_i32(t0, t0, t1);
394 tmp = load_cpu_field(CF);
395 tcg_gen_add_i32(t0, t0, tmp);
396 tcg_temp_free_i32(tmp);
399 /* dest = T0 + T1 + CF. */
400 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
402 TCGv tmp;
403 tcg_gen_add_i32(dest, t0, t1);
404 tmp = load_cpu_field(CF);
405 tcg_gen_add_i32(dest, dest, tmp);
406 tcg_temp_free_i32(tmp);
409 /* dest = T0 - T1 + CF - 1. */
410 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
412 TCGv tmp;
413 tcg_gen_sub_i32(dest, t0, t1);
414 tmp = load_cpu_field(CF);
415 tcg_gen_add_i32(dest, dest, tmp);
416 tcg_gen_subi_i32(dest, dest, 1);
417 tcg_temp_free_i32(tmp);
420 /* FIXME: Implement this natively. */
421 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
423 static void shifter_out_im(TCGv var, int shift)
425 TCGv tmp = tcg_temp_new_i32();
426 if (shift == 0) {
427 tcg_gen_andi_i32(tmp, var, 1);
428 } else {
429 tcg_gen_shri_i32(tmp, var, shift);
430 if (shift != 31)
431 tcg_gen_andi_i32(tmp, tmp, 1);
433 gen_set_CF(tmp);
434 tcg_temp_free_i32(tmp);
437 /* Shift by immediate. Includes special handling for shift == 0. */
438 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
440 switch (shiftop) {
441 case 0: /* LSL */
442 if (shift != 0) {
443 if (flags)
444 shifter_out_im(var, 32 - shift);
445 tcg_gen_shli_i32(var, var, shift);
447 break;
448 case 1: /* LSR */
449 if (shift == 0) {
450 if (flags) {
451 tcg_gen_shri_i32(var, var, 31);
452 gen_set_CF(var);
454 tcg_gen_movi_i32(var, 0);
455 } else {
456 if (flags)
457 shifter_out_im(var, shift - 1);
458 tcg_gen_shri_i32(var, var, shift);
460 break;
461 case 2: /* ASR */
462 if (shift == 0)
463 shift = 32;
464 if (flags)
465 shifter_out_im(var, shift - 1);
466 if (shift == 32)
467 shift = 31;
468 tcg_gen_sari_i32(var, var, shift);
469 break;
470 case 3: /* ROR/RRX */
471 if (shift != 0) {
472 if (flags)
473 shifter_out_im(var, shift - 1);
474 tcg_gen_rotri_i32(var, var, shift); break;
475 } else {
476 TCGv tmp = load_cpu_field(CF);
477 if (flags)
478 shifter_out_im(var, 0);
479 tcg_gen_shri_i32(var, var, 1);
480 tcg_gen_shli_i32(tmp, tmp, 31);
481 tcg_gen_or_i32(var, var, tmp);
482 tcg_temp_free_i32(tmp);
487 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
488 TCGv shift, int flags)
490 if (flags) {
491 switch (shiftop) {
492 case 0: gen_helper_shl_cc(var, var, shift); break;
493 case 1: gen_helper_shr_cc(var, var, shift); break;
494 case 2: gen_helper_sar_cc(var, var, shift); break;
495 case 3: gen_helper_ror_cc(var, var, shift); break;
497 } else {
498 switch (shiftop) {
499 case 0: gen_helper_shl(var, var, shift); break;
500 case 1: gen_helper_shr(var, var, shift); break;
501 case 2: gen_helper_sar(var, var, shift); break;
502 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
503 tcg_gen_rotr_i32(var, var, shift); break;
506 tcg_temp_free_i32(shift);
509 #define PAS_OP(pfx) \
510 switch (op2) { \
511 case 0: gen_pas_helper(glue(pfx,add16)); break; \
512 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
513 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
514 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
515 case 4: gen_pas_helper(glue(pfx,add8)); break; \
516 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
518 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
520 TCGv_ptr tmp;
522 switch (op1) {
523 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
524 case 1:
525 tmp = tcg_temp_new_ptr();
526 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
527 PAS_OP(s)
528 tcg_temp_free_ptr(tmp);
529 break;
530 case 5:
531 tmp = tcg_temp_new_ptr();
532 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
533 PAS_OP(u)
534 tcg_temp_free_ptr(tmp);
535 break;
536 #undef gen_pas_helper
537 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
538 case 2:
539 PAS_OP(q);
540 break;
541 case 3:
542 PAS_OP(sh);
543 break;
544 case 6:
545 PAS_OP(uq);
546 break;
547 case 7:
548 PAS_OP(uh);
549 break;
550 #undef gen_pas_helper
553 #undef PAS_OP
555 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
556 #define PAS_OP(pfx) \
557 switch (op1) { \
558 case 0: gen_pas_helper(glue(pfx,add8)); break; \
559 case 1: gen_pas_helper(glue(pfx,add16)); break; \
560 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
561 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
562 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
563 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
565 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
567 TCGv_ptr tmp;
569 switch (op2) {
570 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
571 case 0:
572 tmp = tcg_temp_new_ptr();
573 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
574 PAS_OP(s)
575 tcg_temp_free_ptr(tmp);
576 break;
577 case 4:
578 tmp = tcg_temp_new_ptr();
579 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
580 PAS_OP(u)
581 tcg_temp_free_ptr(tmp);
582 break;
583 #undef gen_pas_helper
584 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
585 case 1:
586 PAS_OP(q);
587 break;
588 case 2:
589 PAS_OP(sh);
590 break;
591 case 5:
592 PAS_OP(uq);
593 break;
594 case 6:
595 PAS_OP(uh);
596 break;
597 #undef gen_pas_helper
600 #undef PAS_OP
602 static void gen_test_cc(int cc, int label)
604 TCGv tmp;
605 TCGv tmp2;
606 int inv;
608 switch (cc) {
609 case 0: /* eq: Z */
610 tmp = load_cpu_field(ZF);
611 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
612 break;
613 case 1: /* ne: !Z */
614 tmp = load_cpu_field(ZF);
615 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
616 break;
617 case 2: /* cs: C */
618 tmp = load_cpu_field(CF);
619 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
620 break;
621 case 3: /* cc: !C */
622 tmp = load_cpu_field(CF);
623 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
624 break;
625 case 4: /* mi: N */
626 tmp = load_cpu_field(NF);
627 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
628 break;
629 case 5: /* pl: !N */
630 tmp = load_cpu_field(NF);
631 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
632 break;
633 case 6: /* vs: V */
634 tmp = load_cpu_field(VF);
635 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
636 break;
637 case 7: /* vc: !V */
638 tmp = load_cpu_field(VF);
639 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
640 break;
641 case 8: /* hi: C && !Z */
642 inv = gen_new_label();
643 tmp = load_cpu_field(CF);
644 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
645 tcg_temp_free_i32(tmp);
646 tmp = load_cpu_field(ZF);
647 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
648 gen_set_label(inv);
649 break;
650 case 9: /* ls: !C || Z */
651 tmp = load_cpu_field(CF);
652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
653 tcg_temp_free_i32(tmp);
654 tmp = load_cpu_field(ZF);
655 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
656 break;
657 case 10: /* ge: N == V -> N ^ V == 0 */
658 tmp = load_cpu_field(VF);
659 tmp2 = load_cpu_field(NF);
660 tcg_gen_xor_i32(tmp, tmp, tmp2);
661 tcg_temp_free_i32(tmp2);
662 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
663 break;
664 case 11: /* lt: N != V -> N ^ V != 0 */
665 tmp = load_cpu_field(VF);
666 tmp2 = load_cpu_field(NF);
667 tcg_gen_xor_i32(tmp, tmp, tmp2);
668 tcg_temp_free_i32(tmp2);
669 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
670 break;
671 case 12: /* gt: !Z && N == V */
672 inv = gen_new_label();
673 tmp = load_cpu_field(ZF);
674 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
675 tcg_temp_free_i32(tmp);
676 tmp = load_cpu_field(VF);
677 tmp2 = load_cpu_field(NF);
678 tcg_gen_xor_i32(tmp, tmp, tmp2);
679 tcg_temp_free_i32(tmp2);
680 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
681 gen_set_label(inv);
682 break;
683 case 13: /* le: Z || N != V */
684 tmp = load_cpu_field(ZF);
685 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
686 tcg_temp_free_i32(tmp);
687 tmp = load_cpu_field(VF);
688 tmp2 = load_cpu_field(NF);
689 tcg_gen_xor_i32(tmp, tmp, tmp2);
690 tcg_temp_free_i32(tmp2);
691 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
692 break;
693 default:
694 fprintf(stderr, "Bad condition code 0x%x\n", cc);
695 abort();
697 tcg_temp_free_i32(tmp);
700 static const uint8_t table_logic_cc[16] = {
701 1, /* and */
702 1, /* xor */
703 0, /* sub */
704 0, /* rsb */
705 0, /* add */
706 0, /* adc */
707 0, /* sbc */
708 0, /* rsc */
709 1, /* andl */
710 1, /* xorl */
711 0, /* cmp */
712 0, /* cmn */
713 1, /* orr */
714 1, /* mov */
715 1, /* bic */
716 1, /* mvn */
719 /* Set PC and Thumb state from an immediate address. */
720 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
722 TCGv tmp;
724 s->is_jmp = DISAS_UPDATE;
725 if (s->thumb != (addr & 1)) {
726 tmp = tcg_temp_new_i32();
727 tcg_gen_movi_i32(tmp, addr & 1);
728 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
729 tcg_temp_free_i32(tmp);
731 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
734 /* Set PC and Thumb state from var. var is marked as dead. */
735 static inline void gen_bx(DisasContext *s, TCGv var)
737 s->is_jmp = DISAS_UPDATE;
738 tcg_gen_andi_i32(cpu_R[15], var, ~1);
739 tcg_gen_andi_i32(var, var, 1);
740 store_cpu_field(var, thumb);
743 /* Variant of store_reg which uses branch&exchange logic when storing
744 to r15 in ARM architecture v7 and above. The source must be a temporary
745 and will be marked as dead. */
746 static inline void store_reg_bx(CPUState *env, DisasContext *s,
747 int reg, TCGv var)
749 if (reg == 15 && ENABLE_ARCH_7) {
750 gen_bx(s, var);
751 } else {
752 store_reg(s, reg, var);
756 /* Variant of store_reg which uses branch&exchange logic when storing
757 * to r15 in ARM architecture v5T and above. This is used for storing
758 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
759 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
760 static inline void store_reg_from_load(CPUState *env, DisasContext *s,
761 int reg, TCGv var)
763 if (reg == 15 && ENABLE_ARCH_5) {
764 gen_bx(s, var);
765 } else {
766 store_reg(s, reg, var);
770 static inline TCGv gen_ld8s(TCGv addr, int index)
772 TCGv tmp = tcg_temp_new_i32();
773 tcg_gen_qemu_ld8s(tmp, addr, index);
774 return tmp;
776 static inline TCGv gen_ld8u(TCGv addr, int index)
778 TCGv tmp = tcg_temp_new_i32();
779 tcg_gen_qemu_ld8u(tmp, addr, index);
780 return tmp;
782 static inline TCGv gen_ld16s(TCGv addr, int index)
784 TCGv tmp = tcg_temp_new_i32();
785 tcg_gen_qemu_ld16s(tmp, addr, index);
786 return tmp;
788 static inline TCGv gen_ld16u(TCGv addr, int index)
790 TCGv tmp = tcg_temp_new_i32();
791 tcg_gen_qemu_ld16u(tmp, addr, index);
792 return tmp;
794 static inline TCGv gen_ld32(TCGv addr, int index)
796 TCGv tmp = tcg_temp_new_i32();
797 tcg_gen_qemu_ld32u(tmp, addr, index);
798 return tmp;
800 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
802 TCGv_i64 tmp = tcg_temp_new_i64();
803 tcg_gen_qemu_ld64(tmp, addr, index);
804 return tmp;
806 static inline void gen_st8(TCGv val, TCGv addr, int index)
808 tcg_gen_qemu_st8(val, addr, index);
809 tcg_temp_free_i32(val);
811 static inline void gen_st16(TCGv val, TCGv addr, int index)
813 tcg_gen_qemu_st16(val, addr, index);
814 tcg_temp_free_i32(val);
816 static inline void gen_st32(TCGv val, TCGv addr, int index)
818 tcg_gen_qemu_st32(val, addr, index);
819 tcg_temp_free_i32(val);
821 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
823 tcg_gen_qemu_st64(val, addr, index);
824 tcg_temp_free_i64(val);
827 static inline void gen_set_pc_im(uint32_t val)
829 tcg_gen_movi_i32(cpu_R[15], val);
832 /* Force a TB lookup after an instruction that changes the CPU state. */
833 static inline void gen_lookup_tb(DisasContext *s)
835 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
836 s->is_jmp = DISAS_UPDATE;
839 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
840 TCGv var)
842 int val, rm, shift, shiftop;
843 TCGv offset;
845 if (!(insn & (1 << 25))) {
846 /* immediate */
847 val = insn & 0xfff;
848 if (!(insn & (1 << 23)))
849 val = -val;
850 if (val != 0)
851 tcg_gen_addi_i32(var, var, val);
852 } else {
853 /* shift/register */
854 rm = (insn) & 0xf;
855 shift = (insn >> 7) & 0x1f;
856 shiftop = (insn >> 5) & 3;
857 offset = load_reg(s, rm);
858 gen_arm_shift_im(offset, shiftop, shift, 0);
859 if (!(insn & (1 << 23)))
860 tcg_gen_sub_i32(var, var, offset);
861 else
862 tcg_gen_add_i32(var, var, offset);
863 tcg_temp_free_i32(offset);
867 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
868 int extra, TCGv var)
870 int val, rm;
871 TCGv offset;
873 if (insn & (1 << 22)) {
874 /* immediate */
875 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
876 if (!(insn & (1 << 23)))
877 val = -val;
878 val += extra;
879 if (val != 0)
880 tcg_gen_addi_i32(var, var, val);
881 } else {
882 /* register */
883 if (extra)
884 tcg_gen_addi_i32(var, var, extra);
885 rm = (insn) & 0xf;
886 offset = load_reg(s, rm);
887 if (!(insn & (1 << 23)))
888 tcg_gen_sub_i32(var, var, offset);
889 else
890 tcg_gen_add_i32(var, var, offset);
891 tcg_temp_free_i32(offset);
895 static TCGv_ptr get_fpstatus_ptr(int neon)
897 TCGv_ptr statusptr = tcg_temp_new_ptr();
898 int offset;
899 if (neon) {
900 offset = offsetof(CPUState, vfp.standard_fp_status);
901 } else {
902 offset = offsetof(CPUState, vfp.fp_status);
904 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
905 return statusptr;
908 #define VFP_OP2(name) \
909 static inline void gen_vfp_##name(int dp) \
911 TCGv_ptr fpst = get_fpstatus_ptr(0); \
912 if (dp) { \
913 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
914 } else { \
915 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
917 tcg_temp_free_ptr(fpst); \
920 VFP_OP2(add)
921 VFP_OP2(sub)
922 VFP_OP2(mul)
923 VFP_OP2(div)
925 #undef VFP_OP2
927 static inline void gen_vfp_F1_mul(int dp)
929 /* Like gen_vfp_mul() but put result in F1 */
930 TCGv_ptr fpst = get_fpstatus_ptr(0);
931 if (dp) {
932 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
933 } else {
934 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
936 tcg_temp_free_ptr(fpst);
939 static inline void gen_vfp_F1_neg(int dp)
941 /* Like gen_vfp_neg() but put result in F1 */
942 if (dp) {
943 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
944 } else {
945 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
949 static inline void gen_vfp_abs(int dp)
951 if (dp)
952 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
953 else
954 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
957 static inline void gen_vfp_neg(int dp)
959 if (dp)
960 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
961 else
962 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
965 static inline void gen_vfp_sqrt(int dp)
967 if (dp)
968 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
969 else
970 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
973 static inline void gen_vfp_cmp(int dp)
975 if (dp)
976 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
977 else
978 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
981 static inline void gen_vfp_cmpe(int dp)
983 if (dp)
984 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
985 else
986 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
989 static inline void gen_vfp_F1_ld0(int dp)
991 if (dp)
992 tcg_gen_movi_i64(cpu_F1d, 0);
993 else
994 tcg_gen_movi_i32(cpu_F1s, 0);
997 #define VFP_GEN_ITOF(name) \
998 static inline void gen_vfp_##name(int dp, int neon) \
1000 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1001 if (dp) { \
1002 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1003 } else { \
1004 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1006 tcg_temp_free_ptr(statusptr); \
1009 VFP_GEN_ITOF(uito)
1010 VFP_GEN_ITOF(sito)
1011 #undef VFP_GEN_ITOF
1013 #define VFP_GEN_FTOI(name) \
1014 static inline void gen_vfp_##name(int dp, int neon) \
1016 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1017 if (dp) { \
1018 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1019 } else { \
1020 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1022 tcg_temp_free_ptr(statusptr); \
1025 VFP_GEN_FTOI(toui)
1026 VFP_GEN_FTOI(touiz)
1027 VFP_GEN_FTOI(tosi)
1028 VFP_GEN_FTOI(tosiz)
1029 #undef VFP_GEN_FTOI
1031 #define VFP_GEN_FIX(name) \
1032 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1034 TCGv tmp_shift = tcg_const_i32(shift); \
1035 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1036 if (dp) { \
1037 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1038 } else { \
1039 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1041 tcg_temp_free_i32(tmp_shift); \
1042 tcg_temp_free_ptr(statusptr); \
1044 VFP_GEN_FIX(tosh)
1045 VFP_GEN_FIX(tosl)
1046 VFP_GEN_FIX(touh)
1047 VFP_GEN_FIX(toul)
1048 VFP_GEN_FIX(shto)
1049 VFP_GEN_FIX(slto)
1050 VFP_GEN_FIX(uhto)
1051 VFP_GEN_FIX(ulto)
1052 #undef VFP_GEN_FIX
1054 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1056 if (dp)
1057 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1058 else
1059 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1062 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1064 if (dp)
1065 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1066 else
1067 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1070 static inline long
1071 vfp_reg_offset (int dp, int reg)
1073 if (dp)
1074 return offsetof(CPUARMState, vfp.regs[reg]);
1075 else if (reg & 1) {
1076 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1077 + offsetof(CPU_DoubleU, l.upper);
1078 } else {
1079 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1080 + offsetof(CPU_DoubleU, l.lower);
1084 /* Return the offset of a 32-bit piece of a NEON register.
1085 zero is the least significant end of the register. */
1086 static inline long
1087 neon_reg_offset (int reg, int n)
1089 int sreg;
1090 sreg = reg * 2 + n;
1091 return vfp_reg_offset(0, sreg);
1094 static TCGv neon_load_reg(int reg, int pass)
1096 TCGv tmp = tcg_temp_new_i32();
1097 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1098 return tmp;
1101 static void neon_store_reg(int reg, int pass, TCGv var)
1103 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1104 tcg_temp_free_i32(var);
1107 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1109 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1112 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1114 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1117 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1118 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1119 #define tcg_gen_st_f32 tcg_gen_st_i32
1120 #define tcg_gen_st_f64 tcg_gen_st_i64
1122 static inline void gen_mov_F0_vreg(int dp, int reg)
1124 if (dp)
1125 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1126 else
1127 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1130 static inline void gen_mov_F1_vreg(int dp, int reg)
1132 if (dp)
1133 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1134 else
1135 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1138 static inline void gen_mov_vreg_F0(int dp, int reg)
1140 if (dp)
1141 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1142 else
1143 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1146 #define ARM_CP_RW_BIT (1 << 20)
1148 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1150 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1153 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1155 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1158 static inline TCGv iwmmxt_load_creg(int reg)
1160 TCGv var = tcg_temp_new_i32();
1161 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1162 return var;
1165 static inline void iwmmxt_store_creg(int reg, TCGv var)
1167 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1168 tcg_temp_free_i32(var);
1171 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1173 iwmmxt_store_reg(cpu_M0, rn);
1176 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1178 iwmmxt_load_reg(cpu_M0, rn);
1181 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1183 iwmmxt_load_reg(cpu_V1, rn);
1184 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1187 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1189 iwmmxt_load_reg(cpu_V1, rn);
1190 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1193 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1195 iwmmxt_load_reg(cpu_V1, rn);
1196 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1199 #define IWMMXT_OP(name) \
1200 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1202 iwmmxt_load_reg(cpu_V1, rn); \
1203 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1206 #define IWMMXT_OP_ENV(name) \
1207 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1209 iwmmxt_load_reg(cpu_V1, rn); \
1210 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1213 #define IWMMXT_OP_ENV_SIZE(name) \
1214 IWMMXT_OP_ENV(name##b) \
1215 IWMMXT_OP_ENV(name##w) \
1216 IWMMXT_OP_ENV(name##l)
1218 #define IWMMXT_OP_ENV1(name) \
1219 static inline void gen_op_iwmmxt_##name##_M0(void) \
1221 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1224 IWMMXT_OP(maddsq)
1225 IWMMXT_OP(madduq)
1226 IWMMXT_OP(sadb)
1227 IWMMXT_OP(sadw)
1228 IWMMXT_OP(mulslw)
1229 IWMMXT_OP(mulshw)
1230 IWMMXT_OP(mululw)
1231 IWMMXT_OP(muluhw)
1232 IWMMXT_OP(macsw)
1233 IWMMXT_OP(macuw)
1235 IWMMXT_OP_ENV_SIZE(unpackl)
1236 IWMMXT_OP_ENV_SIZE(unpackh)
1238 IWMMXT_OP_ENV1(unpacklub)
1239 IWMMXT_OP_ENV1(unpackluw)
1240 IWMMXT_OP_ENV1(unpacklul)
1241 IWMMXT_OP_ENV1(unpackhub)
1242 IWMMXT_OP_ENV1(unpackhuw)
1243 IWMMXT_OP_ENV1(unpackhul)
1244 IWMMXT_OP_ENV1(unpacklsb)
1245 IWMMXT_OP_ENV1(unpacklsw)
1246 IWMMXT_OP_ENV1(unpacklsl)
1247 IWMMXT_OP_ENV1(unpackhsb)
1248 IWMMXT_OP_ENV1(unpackhsw)
1249 IWMMXT_OP_ENV1(unpackhsl)
1251 IWMMXT_OP_ENV_SIZE(cmpeq)
1252 IWMMXT_OP_ENV_SIZE(cmpgtu)
1253 IWMMXT_OP_ENV_SIZE(cmpgts)
1255 IWMMXT_OP_ENV_SIZE(mins)
1256 IWMMXT_OP_ENV_SIZE(minu)
1257 IWMMXT_OP_ENV_SIZE(maxs)
1258 IWMMXT_OP_ENV_SIZE(maxu)
1260 IWMMXT_OP_ENV_SIZE(subn)
1261 IWMMXT_OP_ENV_SIZE(addn)
1262 IWMMXT_OP_ENV_SIZE(subu)
1263 IWMMXT_OP_ENV_SIZE(addu)
1264 IWMMXT_OP_ENV_SIZE(subs)
1265 IWMMXT_OP_ENV_SIZE(adds)
1267 IWMMXT_OP_ENV(avgb0)
1268 IWMMXT_OP_ENV(avgb1)
1269 IWMMXT_OP_ENV(avgw0)
1270 IWMMXT_OP_ENV(avgw1)
1272 IWMMXT_OP(msadb)
1274 IWMMXT_OP_ENV(packuw)
1275 IWMMXT_OP_ENV(packul)
1276 IWMMXT_OP_ENV(packuq)
1277 IWMMXT_OP_ENV(packsw)
1278 IWMMXT_OP_ENV(packsl)
1279 IWMMXT_OP_ENV(packsq)
1281 static void gen_op_iwmmxt_set_mup(void)
1283 TCGv tmp;
1284 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1285 tcg_gen_ori_i32(tmp, tmp, 2);
1286 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1289 static void gen_op_iwmmxt_set_cup(void)
1291 TCGv tmp;
1292 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1293 tcg_gen_ori_i32(tmp, tmp, 1);
1294 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1297 static void gen_op_iwmmxt_setpsr_nz(void)
1299 TCGv tmp = tcg_temp_new_i32();
1300 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1301 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1304 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1306 iwmmxt_load_reg(cpu_V1, rn);
1307 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1308 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1311 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1313 int rd;
1314 uint32_t offset;
1315 TCGv tmp;
1317 rd = (insn >> 16) & 0xf;
1318 tmp = load_reg(s, rd);
1320 offset = (insn & 0xff) << ((insn >> 7) & 2);
1321 if (insn & (1 << 24)) {
1322 /* Pre indexed */
1323 if (insn & (1 << 23))
1324 tcg_gen_addi_i32(tmp, tmp, offset);
1325 else
1326 tcg_gen_addi_i32(tmp, tmp, -offset);
1327 tcg_gen_mov_i32(dest, tmp);
1328 if (insn & (1 << 21))
1329 store_reg(s, rd, tmp);
1330 else
1331 tcg_temp_free_i32(tmp);
1332 } else if (insn & (1 << 21)) {
1333 /* Post indexed */
1334 tcg_gen_mov_i32(dest, tmp);
1335 if (insn & (1 << 23))
1336 tcg_gen_addi_i32(tmp, tmp, offset);
1337 else
1338 tcg_gen_addi_i32(tmp, tmp, -offset);
1339 store_reg(s, rd, tmp);
1340 } else if (!(insn & (1 << 23)))
1341 return 1;
1342 return 0;
1345 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1347 int rd = (insn >> 0) & 0xf;
1348 TCGv tmp;
1350 if (insn & (1 << 8)) {
1351 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1352 return 1;
1353 } else {
1354 tmp = iwmmxt_load_creg(rd);
1356 } else {
1357 tmp = tcg_temp_new_i32();
1358 iwmmxt_load_reg(cpu_V0, rd);
1359 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1361 tcg_gen_andi_i32(tmp, tmp, mask);
1362 tcg_gen_mov_i32(dest, tmp);
1363 tcg_temp_free_i32(tmp);
1364 return 0;
1367 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1368 (ie. an undefined instruction). */
1369 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1371 int rd, wrd;
1372 int rdhi, rdlo, rd0, rd1, i;
1373 TCGv addr;
1374 TCGv tmp, tmp2, tmp3;
1376 if ((insn & 0x0e000e00) == 0x0c000000) {
1377 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1378 wrd = insn & 0xf;
1379 rdlo = (insn >> 12) & 0xf;
1380 rdhi = (insn >> 16) & 0xf;
1381 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1382 iwmmxt_load_reg(cpu_V0, wrd);
1383 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1384 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1385 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1386 } else { /* TMCRR */
1387 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1388 iwmmxt_store_reg(cpu_V0, wrd);
1389 gen_op_iwmmxt_set_mup();
1391 return 0;
1394 wrd = (insn >> 12) & 0xf;
1395 addr = tcg_temp_new_i32();
1396 if (gen_iwmmxt_address(s, insn, addr)) {
1397 tcg_temp_free_i32(addr);
1398 return 1;
1400 if (insn & ARM_CP_RW_BIT) {
1401 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1402 tmp = tcg_temp_new_i32();
1403 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1404 iwmmxt_store_creg(wrd, tmp);
1405 } else {
1406 i = 1;
1407 if (insn & (1 << 8)) {
1408 if (insn & (1 << 22)) { /* WLDRD */
1409 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1410 i = 0;
1411 } else { /* WLDRW wRd */
1412 tmp = gen_ld32(addr, IS_USER(s));
1414 } else {
1415 if (insn & (1 << 22)) { /* WLDRH */
1416 tmp = gen_ld16u(addr, IS_USER(s));
1417 } else { /* WLDRB */
1418 tmp = gen_ld8u(addr, IS_USER(s));
1421 if (i) {
1422 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1423 tcg_temp_free_i32(tmp);
1425 gen_op_iwmmxt_movq_wRn_M0(wrd);
1427 } else {
1428 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1429 tmp = iwmmxt_load_creg(wrd);
1430 gen_st32(tmp, addr, IS_USER(s));
1431 } else {
1432 gen_op_iwmmxt_movq_M0_wRn(wrd);
1433 tmp = tcg_temp_new_i32();
1434 if (insn & (1 << 8)) {
1435 if (insn & (1 << 22)) { /* WSTRD */
1436 tcg_temp_free_i32(tmp);
1437 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1438 } else { /* WSTRW wRd */
1439 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1440 gen_st32(tmp, addr, IS_USER(s));
1442 } else {
1443 if (insn & (1 << 22)) { /* WSTRH */
1444 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1445 gen_st16(tmp, addr, IS_USER(s));
1446 } else { /* WSTRB */
1447 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1448 gen_st8(tmp, addr, IS_USER(s));
1453 tcg_temp_free_i32(addr);
1454 return 0;
1457 if ((insn & 0x0f000000) != 0x0e000000)
1458 return 1;
1460 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1461 case 0x000: /* WOR */
1462 wrd = (insn >> 12) & 0xf;
1463 rd0 = (insn >> 0) & 0xf;
1464 rd1 = (insn >> 16) & 0xf;
1465 gen_op_iwmmxt_movq_M0_wRn(rd0);
1466 gen_op_iwmmxt_orq_M0_wRn(rd1);
1467 gen_op_iwmmxt_setpsr_nz();
1468 gen_op_iwmmxt_movq_wRn_M0(wrd);
1469 gen_op_iwmmxt_set_mup();
1470 gen_op_iwmmxt_set_cup();
1471 break;
1472 case 0x011: /* TMCR */
1473 if (insn & 0xf)
1474 return 1;
1475 rd = (insn >> 12) & 0xf;
1476 wrd = (insn >> 16) & 0xf;
1477 switch (wrd) {
1478 case ARM_IWMMXT_wCID:
1479 case ARM_IWMMXT_wCASF:
1480 break;
1481 case ARM_IWMMXT_wCon:
1482 gen_op_iwmmxt_set_cup();
1483 /* Fall through. */
1484 case ARM_IWMMXT_wCSSF:
1485 tmp = iwmmxt_load_creg(wrd);
1486 tmp2 = load_reg(s, rd);
1487 tcg_gen_andc_i32(tmp, tmp, tmp2);
1488 tcg_temp_free_i32(tmp2);
1489 iwmmxt_store_creg(wrd, tmp);
1490 break;
1491 case ARM_IWMMXT_wCGR0:
1492 case ARM_IWMMXT_wCGR1:
1493 case ARM_IWMMXT_wCGR2:
1494 case ARM_IWMMXT_wCGR3:
1495 gen_op_iwmmxt_set_cup();
1496 tmp = load_reg(s, rd);
1497 iwmmxt_store_creg(wrd, tmp);
1498 break;
1499 default:
1500 return 1;
1502 break;
1503 case 0x100: /* WXOR */
1504 wrd = (insn >> 12) & 0xf;
1505 rd0 = (insn >> 0) & 0xf;
1506 rd1 = (insn >> 16) & 0xf;
1507 gen_op_iwmmxt_movq_M0_wRn(rd0);
1508 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1509 gen_op_iwmmxt_setpsr_nz();
1510 gen_op_iwmmxt_movq_wRn_M0(wrd);
1511 gen_op_iwmmxt_set_mup();
1512 gen_op_iwmmxt_set_cup();
1513 break;
1514 case 0x111: /* TMRC */
1515 if (insn & 0xf)
1516 return 1;
1517 rd = (insn >> 12) & 0xf;
1518 wrd = (insn >> 16) & 0xf;
1519 tmp = iwmmxt_load_creg(wrd);
1520 store_reg(s, rd, tmp);
1521 break;
1522 case 0x300: /* WANDN */
1523 wrd = (insn >> 12) & 0xf;
1524 rd0 = (insn >> 0) & 0xf;
1525 rd1 = (insn >> 16) & 0xf;
1526 gen_op_iwmmxt_movq_M0_wRn(rd0);
1527 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1528 gen_op_iwmmxt_andq_M0_wRn(rd1);
1529 gen_op_iwmmxt_setpsr_nz();
1530 gen_op_iwmmxt_movq_wRn_M0(wrd);
1531 gen_op_iwmmxt_set_mup();
1532 gen_op_iwmmxt_set_cup();
1533 break;
1534 case 0x200: /* WAND */
1535 wrd = (insn >> 12) & 0xf;
1536 rd0 = (insn >> 0) & 0xf;
1537 rd1 = (insn >> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0);
1539 gen_op_iwmmxt_andq_M0_wRn(rd1);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x810: case 0xa10: /* WMADD */
1546 wrd = (insn >> 12) & 0xf;
1547 rd0 = (insn >> 0) & 0xf;
1548 rd1 = (insn >> 16) & 0xf;
1549 gen_op_iwmmxt_movq_M0_wRn(rd0);
1550 if (insn & (1 << 21))
1551 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1552 else
1553 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1554 gen_op_iwmmxt_movq_wRn_M0(wrd);
1555 gen_op_iwmmxt_set_mup();
1556 break;
1557 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1558 wrd = (insn >> 12) & 0xf;
1559 rd0 = (insn >> 16) & 0xf;
1560 rd1 = (insn >> 0) & 0xf;
1561 gen_op_iwmmxt_movq_M0_wRn(rd0);
1562 switch ((insn >> 22) & 3) {
1563 case 0:
1564 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1565 break;
1566 case 1:
1567 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1568 break;
1569 case 2:
1570 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1571 break;
1572 case 3:
1573 return 1;
1575 gen_op_iwmmxt_movq_wRn_M0(wrd);
1576 gen_op_iwmmxt_set_mup();
1577 gen_op_iwmmxt_set_cup();
1578 break;
1579 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1580 wrd = (insn >> 12) & 0xf;
1581 rd0 = (insn >> 16) & 0xf;
1582 rd1 = (insn >> 0) & 0xf;
1583 gen_op_iwmmxt_movq_M0_wRn(rd0);
1584 switch ((insn >> 22) & 3) {
1585 case 0:
1586 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1587 break;
1588 case 1:
1589 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1590 break;
1591 case 2:
1592 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1593 break;
1594 case 3:
1595 return 1;
1597 gen_op_iwmmxt_movq_wRn_M0(wrd);
1598 gen_op_iwmmxt_set_mup();
1599 gen_op_iwmmxt_set_cup();
1600 break;
1601 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 22))
1607 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1610 if (!(insn & (1 << 20)))
1611 gen_op_iwmmxt_addl_M0_wRn(wrd);
1612 gen_op_iwmmxt_movq_wRn_M0(wrd);
1613 gen_op_iwmmxt_set_mup();
1614 break;
1615 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1616 wrd = (insn >> 12) & 0xf;
1617 rd0 = (insn >> 16) & 0xf;
1618 rd1 = (insn >> 0) & 0xf;
1619 gen_op_iwmmxt_movq_M0_wRn(rd0);
1620 if (insn & (1 << 21)) {
1621 if (insn & (1 << 20))
1622 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1623 else
1624 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1625 } else {
1626 if (insn & (1 << 20))
1627 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1628 else
1629 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1631 gen_op_iwmmxt_movq_wRn_M0(wrd);
1632 gen_op_iwmmxt_set_mup();
1633 break;
1634 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 16) & 0xf;
1637 rd1 = (insn >> 0) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 if (insn & (1 << 21))
1640 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1643 if (!(insn & (1 << 20))) {
1644 iwmmxt_load_reg(cpu_V1, wrd);
1645 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1647 gen_op_iwmmxt_movq_wRn_M0(wrd);
1648 gen_op_iwmmxt_set_mup();
1649 break;
1650 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1651 wrd = (insn >> 12) & 0xf;
1652 rd0 = (insn >> 16) & 0xf;
1653 rd1 = (insn >> 0) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0);
1655 switch ((insn >> 22) & 3) {
1656 case 0:
1657 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1658 break;
1659 case 1:
1660 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1661 break;
1662 case 2:
1663 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1664 break;
1665 case 3:
1666 return 1;
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 gen_op_iwmmxt_set_cup();
1671 break;
1672 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1673 wrd = (insn >> 12) & 0xf;
1674 rd0 = (insn >> 16) & 0xf;
1675 rd1 = (insn >> 0) & 0xf;
1676 gen_op_iwmmxt_movq_M0_wRn(rd0);
1677 if (insn & (1 << 22)) {
1678 if (insn & (1 << 20))
1679 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1680 else
1681 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1682 } else {
1683 if (insn & (1 << 20))
1684 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1685 else
1686 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1688 gen_op_iwmmxt_movq_wRn_M0(wrd);
1689 gen_op_iwmmxt_set_mup();
1690 gen_op_iwmmxt_set_cup();
1691 break;
1692 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1693 wrd = (insn >> 12) & 0xf;
1694 rd0 = (insn >> 16) & 0xf;
1695 rd1 = (insn >> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0);
1697 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1698 tcg_gen_andi_i32(tmp, tmp, 7);
1699 iwmmxt_load_reg(cpu_V1, rd1);
1700 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1701 tcg_temp_free_i32(tmp);
1702 gen_op_iwmmxt_movq_wRn_M0(wrd);
1703 gen_op_iwmmxt_set_mup();
1704 break;
1705 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1706 if (((insn >> 6) & 3) == 3)
1707 return 1;
1708 rd = (insn >> 12) & 0xf;
1709 wrd = (insn >> 16) & 0xf;
1710 tmp = load_reg(s, rd);
1711 gen_op_iwmmxt_movq_M0_wRn(wrd);
1712 switch ((insn >> 6) & 3) {
1713 case 0:
1714 tmp2 = tcg_const_i32(0xff);
1715 tmp3 = tcg_const_i32((insn & 7) << 3);
1716 break;
1717 case 1:
1718 tmp2 = tcg_const_i32(0xffff);
1719 tmp3 = tcg_const_i32((insn & 3) << 4);
1720 break;
1721 case 2:
1722 tmp2 = tcg_const_i32(0xffffffff);
1723 tmp3 = tcg_const_i32((insn & 1) << 5);
1724 break;
1725 default:
1726 TCGV_UNUSED(tmp2);
1727 TCGV_UNUSED(tmp3);
1729 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1730 tcg_temp_free(tmp3);
1731 tcg_temp_free(tmp2);
1732 tcg_temp_free_i32(tmp);
1733 gen_op_iwmmxt_movq_wRn_M0(wrd);
1734 gen_op_iwmmxt_set_mup();
1735 break;
1736 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1737 rd = (insn >> 12) & 0xf;
1738 wrd = (insn >> 16) & 0xf;
1739 if (rd == 15 || ((insn >> 22) & 3) == 3)
1740 return 1;
1741 gen_op_iwmmxt_movq_M0_wRn(wrd);
1742 tmp = tcg_temp_new_i32();
1743 switch ((insn >> 22) & 3) {
1744 case 0:
1745 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1746 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1747 if (insn & 8) {
1748 tcg_gen_ext8s_i32(tmp, tmp);
1749 } else {
1750 tcg_gen_andi_i32(tmp, tmp, 0xff);
1752 break;
1753 case 1:
1754 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1755 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1756 if (insn & 8) {
1757 tcg_gen_ext16s_i32(tmp, tmp);
1758 } else {
1759 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1761 break;
1762 case 2:
1763 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1764 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1765 break;
1767 store_reg(s, rd, tmp);
1768 break;
1769 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1770 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1771 return 1;
1772 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1773 switch ((insn >> 22) & 3) {
1774 case 0:
1775 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1776 break;
1777 case 1:
1778 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1779 break;
1780 case 2:
1781 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1782 break;
1784 tcg_gen_shli_i32(tmp, tmp, 28);
1785 gen_set_nzcv(tmp);
1786 tcg_temp_free_i32(tmp);
1787 break;
1788 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1789 if (((insn >> 6) & 3) == 3)
1790 return 1;
1791 rd = (insn >> 12) & 0xf;
1792 wrd = (insn >> 16) & 0xf;
1793 tmp = load_reg(s, rd);
1794 switch ((insn >> 6) & 3) {
1795 case 0:
1796 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1797 break;
1798 case 1:
1799 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1800 break;
1801 case 2:
1802 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1803 break;
1805 tcg_temp_free_i32(tmp);
1806 gen_op_iwmmxt_movq_wRn_M0(wrd);
1807 gen_op_iwmmxt_set_mup();
1808 break;
1809 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1810 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1811 return 1;
1812 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1813 tmp2 = tcg_temp_new_i32();
1814 tcg_gen_mov_i32(tmp2, tmp);
1815 switch ((insn >> 22) & 3) {
1816 case 0:
1817 for (i = 0; i < 7; i ++) {
1818 tcg_gen_shli_i32(tmp2, tmp2, 4);
1819 tcg_gen_and_i32(tmp, tmp, tmp2);
1821 break;
1822 case 1:
1823 for (i = 0; i < 3; i ++) {
1824 tcg_gen_shli_i32(tmp2, tmp2, 8);
1825 tcg_gen_and_i32(tmp, tmp, tmp2);
1827 break;
1828 case 2:
1829 tcg_gen_shli_i32(tmp2, tmp2, 16);
1830 tcg_gen_and_i32(tmp, tmp, tmp2);
1831 break;
1833 gen_set_nzcv(tmp);
1834 tcg_temp_free_i32(tmp2);
1835 tcg_temp_free_i32(tmp);
1836 break;
1837 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1838 wrd = (insn >> 12) & 0xf;
1839 rd0 = (insn >> 16) & 0xf;
1840 gen_op_iwmmxt_movq_M0_wRn(rd0);
1841 switch ((insn >> 22) & 3) {
1842 case 0:
1843 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1844 break;
1845 case 1:
1846 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1847 break;
1848 case 2:
1849 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1850 break;
1851 case 3:
1852 return 1;
1854 gen_op_iwmmxt_movq_wRn_M0(wrd);
1855 gen_op_iwmmxt_set_mup();
1856 break;
1857 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1858 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1859 return 1;
1860 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1861 tmp2 = tcg_temp_new_i32();
1862 tcg_gen_mov_i32(tmp2, tmp);
1863 switch ((insn >> 22) & 3) {
1864 case 0:
1865 for (i = 0; i < 7; i ++) {
1866 tcg_gen_shli_i32(tmp2, tmp2, 4);
1867 tcg_gen_or_i32(tmp, tmp, tmp2);
1869 break;
1870 case 1:
1871 for (i = 0; i < 3; i ++) {
1872 tcg_gen_shli_i32(tmp2, tmp2, 8);
1873 tcg_gen_or_i32(tmp, tmp, tmp2);
1875 break;
1876 case 2:
1877 tcg_gen_shli_i32(tmp2, tmp2, 16);
1878 tcg_gen_or_i32(tmp, tmp, tmp2);
1879 break;
1881 gen_set_nzcv(tmp);
1882 tcg_temp_free_i32(tmp2);
1883 tcg_temp_free_i32(tmp);
1884 break;
1885 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1886 rd = (insn >> 12) & 0xf;
1887 rd0 = (insn >> 16) & 0xf;
1888 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1889 return 1;
1890 gen_op_iwmmxt_movq_M0_wRn(rd0);
1891 tmp = tcg_temp_new_i32();
1892 switch ((insn >> 22) & 3) {
1893 case 0:
1894 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1895 break;
1896 case 1:
1897 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1898 break;
1899 case 2:
1900 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1901 break;
1903 store_reg(s, rd, tmp);
1904 break;
1905 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1906 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1907 wrd = (insn >> 12) & 0xf;
1908 rd0 = (insn >> 16) & 0xf;
1909 rd1 = (insn >> 0) & 0xf;
1910 gen_op_iwmmxt_movq_M0_wRn(rd0);
1911 switch ((insn >> 22) & 3) {
1912 case 0:
1913 if (insn & (1 << 21))
1914 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1915 else
1916 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1917 break;
1918 case 1:
1919 if (insn & (1 << 21))
1920 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1921 else
1922 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1923 break;
1924 case 2:
1925 if (insn & (1 << 21))
1926 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1927 else
1928 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1929 break;
1930 case 3:
1931 return 1;
1933 gen_op_iwmmxt_movq_wRn_M0(wrd);
1934 gen_op_iwmmxt_set_mup();
1935 gen_op_iwmmxt_set_cup();
1936 break;
1937 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1938 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1939 wrd = (insn >> 12) & 0xf;
1940 rd0 = (insn >> 16) & 0xf;
1941 gen_op_iwmmxt_movq_M0_wRn(rd0);
1942 switch ((insn >> 22) & 3) {
1943 case 0:
1944 if (insn & (1 << 21))
1945 gen_op_iwmmxt_unpacklsb_M0();
1946 else
1947 gen_op_iwmmxt_unpacklub_M0();
1948 break;
1949 case 1:
1950 if (insn & (1 << 21))
1951 gen_op_iwmmxt_unpacklsw_M0();
1952 else
1953 gen_op_iwmmxt_unpackluw_M0();
1954 break;
1955 case 2:
1956 if (insn & (1 << 21))
1957 gen_op_iwmmxt_unpacklsl_M0();
1958 else
1959 gen_op_iwmmxt_unpacklul_M0();
1960 break;
1961 case 3:
1962 return 1;
1964 gen_op_iwmmxt_movq_wRn_M0(wrd);
1965 gen_op_iwmmxt_set_mup();
1966 gen_op_iwmmxt_set_cup();
1967 break;
1968 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1969 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 switch ((insn >> 22) & 3) {
1974 case 0:
1975 if (insn & (1 << 21))
1976 gen_op_iwmmxt_unpackhsb_M0();
1977 else
1978 gen_op_iwmmxt_unpackhub_M0();
1979 break;
1980 case 1:
1981 if (insn & (1 << 21))
1982 gen_op_iwmmxt_unpackhsw_M0();
1983 else
1984 gen_op_iwmmxt_unpackhuw_M0();
1985 break;
1986 case 2:
1987 if (insn & (1 << 21))
1988 gen_op_iwmmxt_unpackhsl_M0();
1989 else
1990 gen_op_iwmmxt_unpackhul_M0();
1991 break;
1992 case 3:
1993 return 1;
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2000 case 0x214: case 0x614: case 0xa14: case 0xe14:
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
2006 tmp = tcg_temp_new_i32();
2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2008 tcg_temp_free_i32(tmp);
2009 return 1;
2011 switch ((insn >> 22) & 3) {
2012 case 1:
2013 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2014 break;
2015 case 2:
2016 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2017 break;
2018 case 3:
2019 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2020 break;
2022 tcg_temp_free_i32(tmp);
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2028 case 0x014: case 0x414: case 0x814: case 0xc14:
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
2034 tmp = tcg_temp_new_i32();
2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2036 tcg_temp_free_i32(tmp);
2037 return 1;
2039 switch ((insn >> 22) & 3) {
2040 case 1:
2041 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2042 break;
2043 case 2:
2044 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2045 break;
2046 case 3:
2047 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2048 break;
2050 tcg_temp_free_i32(tmp);
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2056 case 0x114: case 0x514: case 0x914: case 0xd14:
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
2062 tmp = tcg_temp_new_i32();
2063 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2064 tcg_temp_free_i32(tmp);
2065 return 1;
2067 switch ((insn >> 22) & 3) {
2068 case 1:
2069 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2070 break;
2071 case 2:
2072 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2073 break;
2074 case 3:
2075 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2076 break;
2078 tcg_temp_free_i32(tmp);
2079 gen_op_iwmmxt_movq_wRn_M0(wrd);
2080 gen_op_iwmmxt_set_mup();
2081 gen_op_iwmmxt_set_cup();
2082 break;
2083 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2084 case 0x314: case 0x714: case 0xb14: case 0xf14:
2085 if (((insn >> 22) & 3) == 0)
2086 return 1;
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0);
2090 tmp = tcg_temp_new_i32();
2091 switch ((insn >> 22) & 3) {
2092 case 1:
2093 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2094 tcg_temp_free_i32(tmp);
2095 return 1;
2097 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2098 break;
2099 case 2:
2100 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2101 tcg_temp_free_i32(tmp);
2102 return 1;
2104 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2105 break;
2106 case 3:
2107 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2108 tcg_temp_free_i32(tmp);
2109 return 1;
2111 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2112 break;
2114 tcg_temp_free_i32(tmp);
2115 gen_op_iwmmxt_movq_wRn_M0(wrd);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2118 break;
2119 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2120 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2121 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 16) & 0xf;
2123 rd1 = (insn >> 0) & 0xf;
2124 gen_op_iwmmxt_movq_M0_wRn(rd0);
2125 switch ((insn >> 22) & 3) {
2126 case 0:
2127 if (insn & (1 << 21))
2128 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2129 else
2130 gen_op_iwmmxt_minub_M0_wRn(rd1);
2131 break;
2132 case 1:
2133 if (insn & (1 << 21))
2134 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2135 else
2136 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2137 break;
2138 case 2:
2139 if (insn & (1 << 21))
2140 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2141 else
2142 gen_op_iwmmxt_minul_M0_wRn(rd1);
2143 break;
2144 case 3:
2145 return 1;
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 break;
2150 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2151 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2152 wrd = (insn >> 12) & 0xf;
2153 rd0 = (insn >> 16) & 0xf;
2154 rd1 = (insn >> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0);
2156 switch ((insn >> 22) & 3) {
2157 case 0:
2158 if (insn & (1 << 21))
2159 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2160 else
2161 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2162 break;
2163 case 1:
2164 if (insn & (1 << 21))
2165 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2166 else
2167 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2168 break;
2169 case 2:
2170 if (insn & (1 << 21))
2171 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2172 else
2173 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2174 break;
2175 case 3:
2176 return 1;
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 break;
2181 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2182 case 0x402: case 0x502: case 0x602: case 0x702:
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 rd1 = (insn >> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0);
2187 tmp = tcg_const_i32((insn >> 20) & 3);
2188 iwmmxt_load_reg(cpu_V1, rd1);
2189 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2190 tcg_temp_free(tmp);
2191 gen_op_iwmmxt_movq_wRn_M0(wrd);
2192 gen_op_iwmmxt_set_mup();
2193 break;
2194 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2195 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2196 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2197 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2198 wrd = (insn >> 12) & 0xf;
2199 rd0 = (insn >> 16) & 0xf;
2200 rd1 = (insn >> 0) & 0xf;
2201 gen_op_iwmmxt_movq_M0_wRn(rd0);
2202 switch ((insn >> 20) & 0xf) {
2203 case 0x0:
2204 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2205 break;
2206 case 0x1:
2207 gen_op_iwmmxt_subub_M0_wRn(rd1);
2208 break;
2209 case 0x3:
2210 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2211 break;
2212 case 0x4:
2213 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2214 break;
2215 case 0x5:
2216 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2217 break;
2218 case 0x7:
2219 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2220 break;
2221 case 0x8:
2222 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2223 break;
2224 case 0x9:
2225 gen_op_iwmmxt_subul_M0_wRn(rd1);
2226 break;
2227 case 0xb:
2228 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2229 break;
2230 default:
2231 return 1;
2233 gen_op_iwmmxt_movq_wRn_M0(wrd);
2234 gen_op_iwmmxt_set_mup();
2235 gen_op_iwmmxt_set_cup();
2236 break;
2237 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2238 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2239 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2240 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2241 wrd = (insn >> 12) & 0xf;
2242 rd0 = (insn >> 16) & 0xf;
2243 gen_op_iwmmxt_movq_M0_wRn(rd0);
2244 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2245 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2246 tcg_temp_free(tmp);
2247 gen_op_iwmmxt_movq_wRn_M0(wrd);
2248 gen_op_iwmmxt_set_mup();
2249 gen_op_iwmmxt_set_cup();
2250 break;
2251 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2252 case 0x418: case 0x518: case 0x618: case 0x718:
2253 case 0x818: case 0x918: case 0xa18: case 0xb18:
2254 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2255 wrd = (insn >> 12) & 0xf;
2256 rd0 = (insn >> 16) & 0xf;
2257 rd1 = (insn >> 0) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0);
2259 switch ((insn >> 20) & 0xf) {
2260 case 0x0:
2261 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2262 break;
2263 case 0x1:
2264 gen_op_iwmmxt_addub_M0_wRn(rd1);
2265 break;
2266 case 0x3:
2267 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2268 break;
2269 case 0x4:
2270 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2271 break;
2272 case 0x5:
2273 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2274 break;
2275 case 0x7:
2276 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2277 break;
2278 case 0x8:
2279 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2280 break;
2281 case 0x9:
2282 gen_op_iwmmxt_addul_M0_wRn(rd1);
2283 break;
2284 case 0xb:
2285 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2286 break;
2287 default:
2288 return 1;
2290 gen_op_iwmmxt_movq_wRn_M0(wrd);
2291 gen_op_iwmmxt_set_mup();
2292 gen_op_iwmmxt_set_cup();
2293 break;
2294 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2295 case 0x408: case 0x508: case 0x608: case 0x708:
2296 case 0x808: case 0x908: case 0xa08: case 0xb08:
2297 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2298 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2299 return 1;
2300 wrd = (insn >> 12) & 0xf;
2301 rd0 = (insn >> 16) & 0xf;
2302 rd1 = (insn >> 0) & 0xf;
2303 gen_op_iwmmxt_movq_M0_wRn(rd0);
2304 switch ((insn >> 22) & 3) {
2305 case 1:
2306 if (insn & (1 << 21))
2307 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2308 else
2309 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2310 break;
2311 case 2:
2312 if (insn & (1 << 21))
2313 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2314 else
2315 gen_op_iwmmxt_packul_M0_wRn(rd1);
2316 break;
2317 case 3:
2318 if (insn & (1 << 21))
2319 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2320 else
2321 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2322 break;
2324 gen_op_iwmmxt_movq_wRn_M0(wrd);
2325 gen_op_iwmmxt_set_mup();
2326 gen_op_iwmmxt_set_cup();
2327 break;
2328 case 0x201: case 0x203: case 0x205: case 0x207:
2329 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2330 case 0x211: case 0x213: case 0x215: case 0x217:
2331 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2332 wrd = (insn >> 5) & 0xf;
2333 rd0 = (insn >> 12) & 0xf;
2334 rd1 = (insn >> 0) & 0xf;
2335 if (rd0 == 0xf || rd1 == 0xf)
2336 return 1;
2337 gen_op_iwmmxt_movq_M0_wRn(wrd);
2338 tmp = load_reg(s, rd0);
2339 tmp2 = load_reg(s, rd1);
2340 switch ((insn >> 16) & 0xf) {
2341 case 0x0: /* TMIA */
2342 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2343 break;
2344 case 0x8: /* TMIAPH */
2345 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2346 break;
2347 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2348 if (insn & (1 << 16))
2349 tcg_gen_shri_i32(tmp, tmp, 16);
2350 if (insn & (1 << 17))
2351 tcg_gen_shri_i32(tmp2, tmp2, 16);
2352 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2353 break;
2354 default:
2355 tcg_temp_free_i32(tmp2);
2356 tcg_temp_free_i32(tmp);
2357 return 1;
2359 tcg_temp_free_i32(tmp2);
2360 tcg_temp_free_i32(tmp);
2361 gen_op_iwmmxt_movq_wRn_M0(wrd);
2362 gen_op_iwmmxt_set_mup();
2363 break;
2364 default:
2365 return 1;
2368 return 0;
2371 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2372 (ie. an undefined instruction). */
2373 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2375 int acc, rd0, rd1, rdhi, rdlo;
2376 TCGv tmp, tmp2;
2378 if ((insn & 0x0ff00f10) == 0x0e200010) {
2379 /* Multiply with Internal Accumulate Format */
2380 rd0 = (insn >> 12) & 0xf;
2381 rd1 = insn & 0xf;
2382 acc = (insn >> 5) & 7;
2384 if (acc != 0)
2385 return 1;
2387 tmp = load_reg(s, rd0);
2388 tmp2 = load_reg(s, rd1);
2389 switch ((insn >> 16) & 0xf) {
2390 case 0x0: /* MIA */
2391 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2392 break;
2393 case 0x8: /* MIAPH */
2394 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2395 break;
2396 case 0xc: /* MIABB */
2397 case 0xd: /* MIABT */
2398 case 0xe: /* MIATB */
2399 case 0xf: /* MIATT */
2400 if (insn & (1 << 16))
2401 tcg_gen_shri_i32(tmp, tmp, 16);
2402 if (insn & (1 << 17))
2403 tcg_gen_shri_i32(tmp2, tmp2, 16);
2404 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2405 break;
2406 default:
2407 return 1;
2409 tcg_temp_free_i32(tmp2);
2410 tcg_temp_free_i32(tmp);
2412 gen_op_iwmmxt_movq_wRn_M0(acc);
2413 return 0;
2416 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2417 /* Internal Accumulator Access Format */
2418 rdhi = (insn >> 16) & 0xf;
2419 rdlo = (insn >> 12) & 0xf;
2420 acc = insn & 7;
2422 if (acc != 0)
2423 return 1;
2425 if (insn & ARM_CP_RW_BIT) { /* MRA */
2426 iwmmxt_load_reg(cpu_V0, acc);
2427 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2428 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2429 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2430 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2431 } else { /* MAR */
2432 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2433 iwmmxt_store_reg(cpu_V0, acc);
2435 return 0;
2438 return 1;
2441 /* Disassemble system coprocessor instruction. Return nonzero if
2442 instruction is not defined. */
2443 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2445 TCGv tmp, tmp2;
2446 uint32_t rd = (insn >> 12) & 0xf;
2447 uint32_t cp = (insn >> 8) & 0xf;
2448 if (IS_USER(s)) {
2449 return 1;
2452 if (insn & ARM_CP_RW_BIT) {
2453 if (!env->cp[cp].cp_read)
2454 return 1;
2455 gen_set_pc_im(s->pc);
2456 tmp = tcg_temp_new_i32();
2457 tmp2 = tcg_const_i32(insn);
2458 gen_helper_get_cp(tmp, cpu_env, tmp2);
2459 tcg_temp_free(tmp2);
2460 store_reg(s, rd, tmp);
2461 } else {
2462 if (!env->cp[cp].cp_write)
2463 return 1;
2464 gen_set_pc_im(s->pc);
2465 tmp = load_reg(s, rd);
2466 tmp2 = tcg_const_i32(insn);
2467 gen_helper_set_cp(cpu_env, tmp2, tmp);
2468 tcg_temp_free(tmp2);
2469 tcg_temp_free_i32(tmp);
2471 return 0;
2474 static int cp15_user_ok(CPUState *env, uint32_t insn)
2476 int cpn = (insn >> 16) & 0xf;
2477 int cpm = insn & 0xf;
2478 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2480 if (arm_feature(env, ARM_FEATURE_V7) && cpn == 9) {
2481 /* Performance monitor registers fall into three categories:
2482 * (a) always UNDEF in usermode
2483 * (b) UNDEF only if PMUSERENR.EN is 0
2484 * (c) always read OK and UNDEF on write (PMUSERENR only)
2486 if ((cpm == 12 && (op < 6)) ||
2487 (cpm == 13 && (op < 3))) {
2488 return env->cp15.c9_pmuserenr;
2489 } else if (cpm == 14 && op == 0 && (insn & ARM_CP_RW_BIT)) {
2490 /* PMUSERENR, read only */
2491 return 1;
2493 return 0;
2496 if (cpn == 13 && cpm == 0) {
2497 /* TLS register. */
2498 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2499 return 1;
2501 return 0;
2504 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2506 TCGv tmp;
2507 int cpn = (insn >> 16) & 0xf;
2508 int cpm = insn & 0xf;
2509 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2511 if (!arm_feature(env, ARM_FEATURE_V6K))
2512 return 0;
2514 if (!(cpn == 13 && cpm == 0))
2515 return 0;
2517 if (insn & ARM_CP_RW_BIT) {
2518 switch (op) {
2519 case 2:
2520 tmp = load_cpu_field(cp15.c13_tls1);
2521 break;
2522 case 3:
2523 tmp = load_cpu_field(cp15.c13_tls2);
2524 break;
2525 case 4:
2526 tmp = load_cpu_field(cp15.c13_tls3);
2527 break;
2528 default:
2529 return 0;
2531 store_reg(s, rd, tmp);
2533 } else {
2534 tmp = load_reg(s, rd);
2535 switch (op) {
2536 case 2:
2537 store_cpu_field(tmp, cp15.c13_tls1);
2538 break;
2539 case 3:
2540 store_cpu_field(tmp, cp15.c13_tls2);
2541 break;
2542 case 4:
2543 store_cpu_field(tmp, cp15.c13_tls3);
2544 break;
2545 default:
2546 tcg_temp_free_i32(tmp);
2547 return 0;
2550 return 1;
2553 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2554 instruction is not defined. */
2555 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2557 uint32_t rd;
2558 TCGv tmp, tmp2;
2560 /* M profile cores use memory mapped registers instead of cp15. */
2561 if (arm_feature(env, ARM_FEATURE_M))
2562 return 1;
2564 if ((insn & (1 << 25)) == 0) {
2565 if (insn & (1 << 20)) {
2566 /* mrrc */
2567 return 1;
2569 /* mcrr. Used for block cache operations, so implement as no-op. */
2570 return 0;
2572 if ((insn & (1 << 4)) == 0) {
2573 /* cdp */
2574 return 1;
2576 /* We special case a number of cp15 instructions which were used
2577 * for things which are real instructions in ARMv7. This allows
2578 * them to work in linux-user mode which doesn't provide functional
2579 * get_cp15/set_cp15 helpers, and is more efficient anyway.
2581 switch ((insn & 0x0fff0fff)) {
2582 case 0x0e070f90:
2583 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2584 * In v7, this must NOP.
2586 if (IS_USER(s)) {
2587 return 1;
2589 if (!arm_feature(env, ARM_FEATURE_V7)) {
2590 /* Wait for interrupt. */
2591 gen_set_pc_im(s->pc);
2592 s->is_jmp = DISAS_WFI;
2594 return 0;
2595 case 0x0e070f58:
2596 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2597 * so this is slightly over-broad.
2599 if (!IS_USER(s) && !arm_feature(env, ARM_FEATURE_V6)) {
2600 /* Wait for interrupt. */
2601 gen_set_pc_im(s->pc);
2602 s->is_jmp = DISAS_WFI;
2603 return 0;
2605 /* Otherwise continue to handle via helper function.
2606 * In particular, on v7 and some v6 cores this is one of
2607 * the VA-PA registers.
2609 break;
2610 case 0x0e070f3d:
2611 /* 0,c7,c13,1: prefetch-by-MVA in v6, NOP in v7 */
2612 if (arm_feature(env, ARM_FEATURE_V6)) {
2613 return IS_USER(s) ? 1 : 0;
2615 break;
2616 case 0x0e070f95: /* 0,c7,c5,4 : ISB */
2617 case 0x0e070f9a: /* 0,c7,c10,4: DSB */
2618 case 0x0e070fba: /* 0,c7,c10,5: DMB */
2619 /* Barriers in both v6 and v7 */
2620 if (arm_feature(env, ARM_FEATURE_V6)) {
2621 return 0;
2623 break;
2624 default:
2625 break;
2628 if (IS_USER(s) && !cp15_user_ok(env, insn)) {
2629 return 1;
2632 rd = (insn >> 12) & 0xf;
2634 if (cp15_tls_load_store(env, s, insn, rd))
2635 return 0;
2637 tmp2 = tcg_const_i32(insn);
2638 if (insn & ARM_CP_RW_BIT) {
2639 tmp = tcg_temp_new_i32();
2640 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2641 /* If the destination register is r15 then sets condition codes. */
2642 if (rd != 15)
2643 store_reg(s, rd, tmp);
2644 else
2645 tcg_temp_free_i32(tmp);
2646 } else {
2647 tmp = load_reg(s, rd);
2648 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2649 tcg_temp_free_i32(tmp);
2650 /* Normally we would always end the TB here, but Linux
2651 * arch/arm/mach-pxa/sleep.S expects two instructions following
2652 * an MMU enable to execute from cache. Imitate this behaviour. */
2653 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2654 (insn & 0x0fff0fff) != 0x0e010f10)
2655 gen_lookup_tb(s);
2657 tcg_temp_free_i32(tmp2);
2658 return 0;
2661 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2662 #define VFP_SREG(insn, bigbit, smallbit) \
2663 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2664 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2665 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2666 reg = (((insn) >> (bigbit)) & 0x0f) \
2667 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2668 } else { \
2669 if (insn & (1 << (smallbit))) \
2670 return 1; \
2671 reg = ((insn) >> (bigbit)) & 0x0f; \
2672 }} while (0)
2674 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2675 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2676 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2677 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2678 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2679 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2681 /* Move between integer and VFP cores. */
2682 static TCGv gen_vfp_mrs(void)
2684 TCGv tmp = tcg_temp_new_i32();
2685 tcg_gen_mov_i32(tmp, cpu_F0s);
2686 return tmp;
2689 static void gen_vfp_msr(TCGv tmp)
2691 tcg_gen_mov_i32(cpu_F0s, tmp);
2692 tcg_temp_free_i32(tmp);
2695 static void gen_neon_dup_u8(TCGv var, int shift)
2697 TCGv tmp = tcg_temp_new_i32();
2698 if (shift)
2699 tcg_gen_shri_i32(var, var, shift);
2700 tcg_gen_ext8u_i32(var, var);
2701 tcg_gen_shli_i32(tmp, var, 8);
2702 tcg_gen_or_i32(var, var, tmp);
2703 tcg_gen_shli_i32(tmp, var, 16);
2704 tcg_gen_or_i32(var, var, tmp);
2705 tcg_temp_free_i32(tmp);
2708 static void gen_neon_dup_low16(TCGv var)
2710 TCGv tmp = tcg_temp_new_i32();
2711 tcg_gen_ext16u_i32(var, var);
2712 tcg_gen_shli_i32(tmp, var, 16);
2713 tcg_gen_or_i32(var, var, tmp);
2714 tcg_temp_free_i32(tmp);
2717 static void gen_neon_dup_high16(TCGv var)
2719 TCGv tmp = tcg_temp_new_i32();
2720 tcg_gen_andi_i32(var, var, 0xffff0000);
2721 tcg_gen_shri_i32(tmp, var, 16);
2722 tcg_gen_or_i32(var, var, tmp);
2723 tcg_temp_free_i32(tmp);
2726 static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2728 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2729 TCGv tmp;
2730 switch (size) {
2731 case 0:
2732 tmp = gen_ld8u(addr, IS_USER(s));
2733 gen_neon_dup_u8(tmp, 0);
2734 break;
2735 case 1:
2736 tmp = gen_ld16u(addr, IS_USER(s));
2737 gen_neon_dup_low16(tmp);
2738 break;
2739 case 2:
2740 tmp = gen_ld32(addr, IS_USER(s));
2741 break;
2742 default: /* Avoid compiler warnings. */
2743 abort();
2745 return tmp;
2748 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2749 (ie. an undefined instruction). */
2750 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2752 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2753 int dp, veclen;
2754 TCGv addr;
2755 TCGv tmp;
2756 TCGv tmp2;
2758 if (!arm_feature(env, ARM_FEATURE_VFP))
2759 return 1;
2761 if (!s->vfp_enabled) {
2762 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2763 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2764 return 1;
2765 rn = (insn >> 16) & 0xf;
2766 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2767 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2768 return 1;
2770 dp = ((insn & 0xf00) == 0xb00);
2771 switch ((insn >> 24) & 0xf) {
2772 case 0xe:
2773 if (insn & (1 << 4)) {
2774 /* single register transfer */
2775 rd = (insn >> 12) & 0xf;
2776 if (dp) {
2777 int size;
2778 int pass;
2780 VFP_DREG_N(rn, insn);
2781 if (insn & 0xf)
2782 return 1;
2783 if (insn & 0x00c00060
2784 && !arm_feature(env, ARM_FEATURE_NEON))
2785 return 1;
2787 pass = (insn >> 21) & 1;
2788 if (insn & (1 << 22)) {
2789 size = 0;
2790 offset = ((insn >> 5) & 3) * 8;
2791 } else if (insn & (1 << 5)) {
2792 size = 1;
2793 offset = (insn & (1 << 6)) ? 16 : 0;
2794 } else {
2795 size = 2;
2796 offset = 0;
2798 if (insn & ARM_CP_RW_BIT) {
2799 /* vfp->arm */
2800 tmp = neon_load_reg(rn, pass);
2801 switch (size) {
2802 case 0:
2803 if (offset)
2804 tcg_gen_shri_i32(tmp, tmp, offset);
2805 if (insn & (1 << 23))
2806 gen_uxtb(tmp);
2807 else
2808 gen_sxtb(tmp);
2809 break;
2810 case 1:
2811 if (insn & (1 << 23)) {
2812 if (offset) {
2813 tcg_gen_shri_i32(tmp, tmp, 16);
2814 } else {
2815 gen_uxth(tmp);
2817 } else {
2818 if (offset) {
2819 tcg_gen_sari_i32(tmp, tmp, 16);
2820 } else {
2821 gen_sxth(tmp);
2824 break;
2825 case 2:
2826 break;
2828 store_reg(s, rd, tmp);
2829 } else {
2830 /* arm->vfp */
2831 tmp = load_reg(s, rd);
2832 if (insn & (1 << 23)) {
2833 /* VDUP */
2834 if (size == 0) {
2835 gen_neon_dup_u8(tmp, 0);
2836 } else if (size == 1) {
2837 gen_neon_dup_low16(tmp);
2839 for (n = 0; n <= pass * 2; n++) {
2840 tmp2 = tcg_temp_new_i32();
2841 tcg_gen_mov_i32(tmp2, tmp);
2842 neon_store_reg(rn, n, tmp2);
2844 neon_store_reg(rn, n, tmp);
2845 } else {
2846 /* VMOV */
2847 switch (size) {
2848 case 0:
2849 tmp2 = neon_load_reg(rn, pass);
2850 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2851 tcg_temp_free_i32(tmp2);
2852 break;
2853 case 1:
2854 tmp2 = neon_load_reg(rn, pass);
2855 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2856 tcg_temp_free_i32(tmp2);
2857 break;
2858 case 2:
2859 break;
2861 neon_store_reg(rn, pass, tmp);
2864 } else { /* !dp */
2865 if ((insn & 0x6f) != 0x00)
2866 return 1;
2867 rn = VFP_SREG_N(insn);
2868 if (insn & ARM_CP_RW_BIT) {
2869 /* vfp->arm */
2870 if (insn & (1 << 21)) {
2871 /* system register */
2872 rn >>= 1;
2874 switch (rn) {
2875 case ARM_VFP_FPSID:
2876 /* VFP2 allows access to FSID from userspace.
2877 VFP3 restricts all id registers to privileged
2878 accesses. */
2879 if (IS_USER(s)
2880 && arm_feature(env, ARM_FEATURE_VFP3))
2881 return 1;
2882 tmp = load_cpu_field(vfp.xregs[rn]);
2883 break;
2884 case ARM_VFP_FPEXC:
2885 if (IS_USER(s))
2886 return 1;
2887 tmp = load_cpu_field(vfp.xregs[rn]);
2888 break;
2889 case ARM_VFP_FPINST:
2890 case ARM_VFP_FPINST2:
2891 /* Not present in VFP3. */
2892 if (IS_USER(s)
2893 || arm_feature(env, ARM_FEATURE_VFP3))
2894 return 1;
2895 tmp = load_cpu_field(vfp.xregs[rn]);
2896 break;
2897 case ARM_VFP_FPSCR:
2898 if (rd == 15) {
2899 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2900 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2901 } else {
2902 tmp = tcg_temp_new_i32();
2903 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2905 break;
2906 case ARM_VFP_MVFR0:
2907 case ARM_VFP_MVFR1:
2908 if (IS_USER(s)
2909 || !arm_feature(env, ARM_FEATURE_VFP3))
2910 return 1;
2911 tmp = load_cpu_field(vfp.xregs[rn]);
2912 break;
2913 default:
2914 return 1;
2916 } else {
2917 gen_mov_F0_vreg(0, rn);
2918 tmp = gen_vfp_mrs();
2920 if (rd == 15) {
2921 /* Set the 4 flag bits in the CPSR. */
2922 gen_set_nzcv(tmp);
2923 tcg_temp_free_i32(tmp);
2924 } else {
2925 store_reg(s, rd, tmp);
2927 } else {
2928 /* arm->vfp */
2929 tmp = load_reg(s, rd);
2930 if (insn & (1 << 21)) {
2931 rn >>= 1;
2932 /* system register */
2933 switch (rn) {
2934 case ARM_VFP_FPSID:
2935 case ARM_VFP_MVFR0:
2936 case ARM_VFP_MVFR1:
2937 /* Writes are ignored. */
2938 break;
2939 case ARM_VFP_FPSCR:
2940 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2941 tcg_temp_free_i32(tmp);
2942 gen_lookup_tb(s);
2943 break;
2944 case ARM_VFP_FPEXC:
2945 if (IS_USER(s))
2946 return 1;
2947 /* TODO: VFP subarchitecture support.
2948 * For now, keep the EN bit only */
2949 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2950 store_cpu_field(tmp, vfp.xregs[rn]);
2951 gen_lookup_tb(s);
2952 break;
2953 case ARM_VFP_FPINST:
2954 case ARM_VFP_FPINST2:
2955 store_cpu_field(tmp, vfp.xregs[rn]);
2956 break;
2957 default:
2958 return 1;
2960 } else {
2961 gen_vfp_msr(tmp);
2962 gen_mov_vreg_F0(0, rn);
2966 } else {
2967 /* data processing */
2968 /* The opcode is in bits 23, 21, 20 and 6. */
2969 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2970 if (dp) {
2971 if (op == 15) {
2972 /* rn is opcode */
2973 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2974 } else {
2975 /* rn is register number */
2976 VFP_DREG_N(rn, insn);
2979 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2980 /* Integer or single precision destination. */
2981 rd = VFP_SREG_D(insn);
2982 } else {
2983 VFP_DREG_D(rd, insn);
2985 if (op == 15 &&
2986 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2987 /* VCVT from int is always from S reg regardless of dp bit.
2988 * VCVT with immediate frac_bits has same format as SREG_M
2990 rm = VFP_SREG_M(insn);
2991 } else {
2992 VFP_DREG_M(rm, insn);
2994 } else {
2995 rn = VFP_SREG_N(insn);
2996 if (op == 15 && rn == 15) {
2997 /* Double precision destination. */
2998 VFP_DREG_D(rd, insn);
2999 } else {
3000 rd = VFP_SREG_D(insn);
3002 /* NB that we implicitly rely on the encoding for the frac_bits
3003 * in VCVT of fixed to float being the same as that of an SREG_M
3005 rm = VFP_SREG_M(insn);
3008 veclen = s->vec_len;
3009 if (op == 15 && rn > 3)
3010 veclen = 0;
3012 /* Shut up compiler warnings. */
3013 delta_m = 0;
3014 delta_d = 0;
3015 bank_mask = 0;
3017 if (veclen > 0) {
3018 if (dp)
3019 bank_mask = 0xc;
3020 else
3021 bank_mask = 0x18;
3023 /* Figure out what type of vector operation this is. */
3024 if ((rd & bank_mask) == 0) {
3025 /* scalar */
3026 veclen = 0;
3027 } else {
3028 if (dp)
3029 delta_d = (s->vec_stride >> 1) + 1;
3030 else
3031 delta_d = s->vec_stride + 1;
3033 if ((rm & bank_mask) == 0) {
3034 /* mixed scalar/vector */
3035 delta_m = 0;
3036 } else {
3037 /* vector */
3038 delta_m = delta_d;
3043 /* Load the initial operands. */
3044 if (op == 15) {
3045 switch (rn) {
3046 case 16:
3047 case 17:
3048 /* Integer source */
3049 gen_mov_F0_vreg(0, rm);
3050 break;
3051 case 8:
3052 case 9:
3053 /* Compare */
3054 gen_mov_F0_vreg(dp, rd);
3055 gen_mov_F1_vreg(dp, rm);
3056 break;
3057 case 10:
3058 case 11:
3059 /* Compare with zero */
3060 gen_mov_F0_vreg(dp, rd);
3061 gen_vfp_F1_ld0(dp);
3062 break;
3063 case 20:
3064 case 21:
3065 case 22:
3066 case 23:
3067 case 28:
3068 case 29:
3069 case 30:
3070 case 31:
3071 /* Source and destination the same. */
3072 gen_mov_F0_vreg(dp, rd);
3073 break;
3074 case 4:
3075 case 5:
3076 case 6:
3077 case 7:
3078 /* VCVTB, VCVTT: only present with the halfprec extension,
3079 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
3081 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
3082 return 1;
3084 /* Otherwise fall through */
3085 default:
3086 /* One source operand. */
3087 gen_mov_F0_vreg(dp, rm);
3088 break;
3090 } else {
3091 /* Two source operands. */
3092 gen_mov_F0_vreg(dp, rn);
3093 gen_mov_F1_vreg(dp, rm);
3096 for (;;) {
3097 /* Perform the calculation. */
3098 switch (op) {
3099 case 0: /* VMLA: fd + (fn * fm) */
3100 /* Note that order of inputs to the add matters for NaNs */
3101 gen_vfp_F1_mul(dp);
3102 gen_mov_F0_vreg(dp, rd);
3103 gen_vfp_add(dp);
3104 break;
3105 case 1: /* VMLS: fd + -(fn * fm) */
3106 gen_vfp_mul(dp);
3107 gen_vfp_F1_neg(dp);
3108 gen_mov_F0_vreg(dp, rd);
3109 gen_vfp_add(dp);
3110 break;
3111 case 2: /* VNMLS: -fd + (fn * fm) */
3112 /* Note that it isn't valid to replace (-A + B) with (B - A)
3113 * or similar plausible looking simplifications
3114 * because this will give wrong results for NaNs.
3116 gen_vfp_F1_mul(dp);
3117 gen_mov_F0_vreg(dp, rd);
3118 gen_vfp_neg(dp);
3119 gen_vfp_add(dp);
3120 break;
3121 case 3: /* VNMLA: -fd + -(fn * fm) */
3122 gen_vfp_mul(dp);
3123 gen_vfp_F1_neg(dp);
3124 gen_mov_F0_vreg(dp, rd);
3125 gen_vfp_neg(dp);
3126 gen_vfp_add(dp);
3127 break;
3128 case 4: /* mul: fn * fm */
3129 gen_vfp_mul(dp);
3130 break;
3131 case 5: /* nmul: -(fn * fm) */
3132 gen_vfp_mul(dp);
3133 gen_vfp_neg(dp);
3134 break;
3135 case 6: /* add: fn + fm */
3136 gen_vfp_add(dp);
3137 break;
3138 case 7: /* sub: fn - fm */
3139 gen_vfp_sub(dp);
3140 break;
3141 case 8: /* div: fn / fm */
3142 gen_vfp_div(dp);
3143 break;
3144 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3145 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3146 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3147 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3148 /* These are fused multiply-add, and must be done as one
3149 * floating point operation with no rounding between the
3150 * multiplication and addition steps.
3151 * NB that doing the negations here as separate steps is
3152 * correct : an input NaN should come out with its sign bit
3153 * flipped if it is a negated-input.
3155 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3156 return 1;
3158 if (dp) {
3159 TCGv_ptr fpst;
3160 TCGv_i64 frd;
3161 if (op & 1) {
3162 /* VFNMS, VFMS */
3163 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3165 frd = tcg_temp_new_i64();
3166 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3167 if (op & 2) {
3168 /* VFNMA, VFNMS */
3169 gen_helper_vfp_negd(frd, frd);
3171 fpst = get_fpstatus_ptr(0);
3172 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3173 cpu_F1d, frd, fpst);
3174 tcg_temp_free_ptr(fpst);
3175 tcg_temp_free_i64(frd);
3176 } else {
3177 TCGv_ptr fpst;
3178 TCGv_i32 frd;
3179 if (op & 1) {
3180 /* VFNMS, VFMS */
3181 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3183 frd = tcg_temp_new_i32();
3184 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3185 if (op & 2) {
3186 gen_helper_vfp_negs(frd, frd);
3188 fpst = get_fpstatus_ptr(0);
3189 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3190 cpu_F1s, frd, fpst);
3191 tcg_temp_free_ptr(fpst);
3192 tcg_temp_free_i32(frd);
3194 break;
3195 case 14: /* fconst */
3196 if (!arm_feature(env, ARM_FEATURE_VFP3))
3197 return 1;
3199 n = (insn << 12) & 0x80000000;
3200 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3201 if (dp) {
3202 if (i & 0x40)
3203 i |= 0x3f80;
3204 else
3205 i |= 0x4000;
3206 n |= i << 16;
3207 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3208 } else {
3209 if (i & 0x40)
3210 i |= 0x780;
3211 else
3212 i |= 0x800;
3213 n |= i << 19;
3214 tcg_gen_movi_i32(cpu_F0s, n);
3216 break;
3217 case 15: /* extension space */
3218 switch (rn) {
3219 case 0: /* cpy */
3220 /* no-op */
3221 break;
3222 case 1: /* abs */
3223 gen_vfp_abs(dp);
3224 break;
3225 case 2: /* neg */
3226 gen_vfp_neg(dp);
3227 break;
3228 case 3: /* sqrt */
3229 gen_vfp_sqrt(dp);
3230 break;
3231 case 4: /* vcvtb.f32.f16 */
3232 tmp = gen_vfp_mrs();
3233 tcg_gen_ext16u_i32(tmp, tmp);
3234 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3235 tcg_temp_free_i32(tmp);
3236 break;
3237 case 5: /* vcvtt.f32.f16 */
3238 tmp = gen_vfp_mrs();
3239 tcg_gen_shri_i32(tmp, tmp, 16);
3240 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3241 tcg_temp_free_i32(tmp);
3242 break;
3243 case 6: /* vcvtb.f16.f32 */
3244 tmp = tcg_temp_new_i32();
3245 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3246 gen_mov_F0_vreg(0, rd);
3247 tmp2 = gen_vfp_mrs();
3248 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3249 tcg_gen_or_i32(tmp, tmp, tmp2);
3250 tcg_temp_free_i32(tmp2);
3251 gen_vfp_msr(tmp);
3252 break;
3253 case 7: /* vcvtt.f16.f32 */
3254 tmp = tcg_temp_new_i32();
3255 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3256 tcg_gen_shli_i32(tmp, tmp, 16);
3257 gen_mov_F0_vreg(0, rd);
3258 tmp2 = gen_vfp_mrs();
3259 tcg_gen_ext16u_i32(tmp2, tmp2);
3260 tcg_gen_or_i32(tmp, tmp, tmp2);
3261 tcg_temp_free_i32(tmp2);
3262 gen_vfp_msr(tmp);
3263 break;
3264 case 8: /* cmp */
3265 gen_vfp_cmp(dp);
3266 break;
3267 case 9: /* cmpe */
3268 gen_vfp_cmpe(dp);
3269 break;
3270 case 10: /* cmpz */
3271 gen_vfp_cmp(dp);
3272 break;
3273 case 11: /* cmpez */
3274 gen_vfp_F1_ld0(dp);
3275 gen_vfp_cmpe(dp);
3276 break;
3277 case 15: /* single<->double conversion */
3278 if (dp)
3279 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3280 else
3281 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3282 break;
3283 case 16: /* fuito */
3284 gen_vfp_uito(dp, 0);
3285 break;
3286 case 17: /* fsito */
3287 gen_vfp_sito(dp, 0);
3288 break;
3289 case 20: /* fshto */
3290 if (!arm_feature(env, ARM_FEATURE_VFP3))
3291 return 1;
3292 gen_vfp_shto(dp, 16 - rm, 0);
3293 break;
3294 case 21: /* fslto */
3295 if (!arm_feature(env, ARM_FEATURE_VFP3))
3296 return 1;
3297 gen_vfp_slto(dp, 32 - rm, 0);
3298 break;
3299 case 22: /* fuhto */
3300 if (!arm_feature(env, ARM_FEATURE_VFP3))
3301 return 1;
3302 gen_vfp_uhto(dp, 16 - rm, 0);
3303 break;
3304 case 23: /* fulto */
3305 if (!arm_feature(env, ARM_FEATURE_VFP3))
3306 return 1;
3307 gen_vfp_ulto(dp, 32 - rm, 0);
3308 break;
3309 case 24: /* ftoui */
3310 gen_vfp_toui(dp, 0);
3311 break;
3312 case 25: /* ftouiz */
3313 gen_vfp_touiz(dp, 0);
3314 break;
3315 case 26: /* ftosi */
3316 gen_vfp_tosi(dp, 0);
3317 break;
3318 case 27: /* ftosiz */
3319 gen_vfp_tosiz(dp, 0);
3320 break;
3321 case 28: /* ftosh */
3322 if (!arm_feature(env, ARM_FEATURE_VFP3))
3323 return 1;
3324 gen_vfp_tosh(dp, 16 - rm, 0);
3325 break;
3326 case 29: /* ftosl */
3327 if (!arm_feature(env, ARM_FEATURE_VFP3))
3328 return 1;
3329 gen_vfp_tosl(dp, 32 - rm, 0);
3330 break;
3331 case 30: /* ftouh */
3332 if (!arm_feature(env, ARM_FEATURE_VFP3))
3333 return 1;
3334 gen_vfp_touh(dp, 16 - rm, 0);
3335 break;
3336 case 31: /* ftoul */
3337 if (!arm_feature(env, ARM_FEATURE_VFP3))
3338 return 1;
3339 gen_vfp_toul(dp, 32 - rm, 0);
3340 break;
3341 default: /* undefined */
3342 return 1;
3344 break;
3345 default: /* undefined */
3346 return 1;
3349 /* Write back the result. */
3350 if (op == 15 && (rn >= 8 && rn <= 11))
3351 ; /* Comparison, do nothing. */
3352 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3353 /* VCVT double to int: always integer result. */
3354 gen_mov_vreg_F0(0, rd);
3355 else if (op == 15 && rn == 15)
3356 /* conversion */
3357 gen_mov_vreg_F0(!dp, rd);
3358 else
3359 gen_mov_vreg_F0(dp, rd);
3361 /* break out of the loop if we have finished */
3362 if (veclen == 0)
3363 break;
3365 if (op == 15 && delta_m == 0) {
3366 /* single source one-many */
3367 while (veclen--) {
3368 rd = ((rd + delta_d) & (bank_mask - 1))
3369 | (rd & bank_mask);
3370 gen_mov_vreg_F0(dp, rd);
3372 break;
3374 /* Setup the next operands. */
3375 veclen--;
3376 rd = ((rd + delta_d) & (bank_mask - 1))
3377 | (rd & bank_mask);
3379 if (op == 15) {
3380 /* One source operand. */
3381 rm = ((rm + delta_m) & (bank_mask - 1))
3382 | (rm & bank_mask);
3383 gen_mov_F0_vreg(dp, rm);
3384 } else {
3385 /* Two source operands. */
3386 rn = ((rn + delta_d) & (bank_mask - 1))
3387 | (rn & bank_mask);
3388 gen_mov_F0_vreg(dp, rn);
3389 if (delta_m) {
3390 rm = ((rm + delta_m) & (bank_mask - 1))
3391 | (rm & bank_mask);
3392 gen_mov_F1_vreg(dp, rm);
3397 break;
3398 case 0xc:
3399 case 0xd:
3400 if ((insn & 0x03e00000) == 0x00400000) {
3401 /* two-register transfer */
3402 rn = (insn >> 16) & 0xf;
3403 rd = (insn >> 12) & 0xf;
3404 if (dp) {
3405 VFP_DREG_M(rm, insn);
3406 } else {
3407 rm = VFP_SREG_M(insn);
3410 if (insn & ARM_CP_RW_BIT) {
3411 /* vfp->arm */
3412 if (dp) {
3413 gen_mov_F0_vreg(0, rm * 2);
3414 tmp = gen_vfp_mrs();
3415 store_reg(s, rd, tmp);
3416 gen_mov_F0_vreg(0, rm * 2 + 1);
3417 tmp = gen_vfp_mrs();
3418 store_reg(s, rn, tmp);
3419 } else {
3420 gen_mov_F0_vreg(0, rm);
3421 tmp = gen_vfp_mrs();
3422 store_reg(s, rd, tmp);
3423 gen_mov_F0_vreg(0, rm + 1);
3424 tmp = gen_vfp_mrs();
3425 store_reg(s, rn, tmp);
3427 } else {
3428 /* arm->vfp */
3429 if (dp) {
3430 tmp = load_reg(s, rd);
3431 gen_vfp_msr(tmp);
3432 gen_mov_vreg_F0(0, rm * 2);
3433 tmp = load_reg(s, rn);
3434 gen_vfp_msr(tmp);
3435 gen_mov_vreg_F0(0, rm * 2 + 1);
3436 } else {
3437 tmp = load_reg(s, rd);
3438 gen_vfp_msr(tmp);
3439 gen_mov_vreg_F0(0, rm);
3440 tmp = load_reg(s, rn);
3441 gen_vfp_msr(tmp);
3442 gen_mov_vreg_F0(0, rm + 1);
3445 } else {
3446 /* Load/store */
3447 rn = (insn >> 16) & 0xf;
3448 if (dp)
3449 VFP_DREG_D(rd, insn);
3450 else
3451 rd = VFP_SREG_D(insn);
3452 if ((insn & 0x01200000) == 0x01000000) {
3453 /* Single load/store */
3454 offset = (insn & 0xff) << 2;
3455 if ((insn & (1 << 23)) == 0)
3456 offset = -offset;
3457 if (s->thumb && rn == 15) {
3458 /* This is actually UNPREDICTABLE */
3459 addr = tcg_temp_new_i32();
3460 tcg_gen_movi_i32(addr, s->pc & ~2);
3461 } else {
3462 addr = load_reg(s, rn);
3464 tcg_gen_addi_i32(addr, addr, offset);
3465 if (insn & (1 << 20)) {
3466 gen_vfp_ld(s, dp, addr);
3467 gen_mov_vreg_F0(dp, rd);
3468 } else {
3469 gen_mov_F0_vreg(dp, rd);
3470 gen_vfp_st(s, dp, addr);
3472 tcg_temp_free_i32(addr);
3473 } else {
3474 /* load/store multiple */
3475 int w = insn & (1 << 21);
3476 if (dp)
3477 n = (insn >> 1) & 0x7f;
3478 else
3479 n = insn & 0xff;
3481 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3482 /* P == U , W == 1 => UNDEF */
3483 return 1;
3485 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3486 /* UNPREDICTABLE cases for bad immediates: we choose to
3487 * UNDEF to avoid generating huge numbers of TCG ops
3489 return 1;
3491 if (rn == 15 && w) {
3492 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3493 return 1;
3496 if (s->thumb && rn == 15) {
3497 /* This is actually UNPREDICTABLE */
3498 addr = tcg_temp_new_i32();
3499 tcg_gen_movi_i32(addr, s->pc & ~2);
3500 } else {
3501 addr = load_reg(s, rn);
3503 if (insn & (1 << 24)) /* pre-decrement */
3504 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3506 if (dp)
3507 offset = 8;
3508 else
3509 offset = 4;
3510 for (i = 0; i < n; i++) {
3511 if (insn & ARM_CP_RW_BIT) {
3512 /* load */
3513 gen_vfp_ld(s, dp, addr);
3514 gen_mov_vreg_F0(dp, rd + i);
3515 } else {
3516 /* store */
3517 gen_mov_F0_vreg(dp, rd + i);
3518 gen_vfp_st(s, dp, addr);
3520 tcg_gen_addi_i32(addr, addr, offset);
3522 if (w) {
3523 /* writeback */
3524 if (insn & (1 << 24))
3525 offset = -offset * n;
3526 else if (dp && (insn & 1))
3527 offset = 4;
3528 else
3529 offset = 0;
3531 if (offset != 0)
3532 tcg_gen_addi_i32(addr, addr, offset);
3533 store_reg(s, rn, addr);
3534 } else {
3535 tcg_temp_free_i32(addr);
3539 break;
3540 default:
3541 /* Should never happen. */
3542 return 1;
3544 return 0;
3547 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3549 TranslationBlock *tb;
3551 tb = s->tb;
3552 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3553 tcg_gen_goto_tb(n);
3554 gen_set_pc_im(dest);
3555 tcg_gen_exit_tb((tcg_target_long)tb + n);
3556 } else {
3557 gen_set_pc_im(dest);
3558 tcg_gen_exit_tb(0);
3562 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3564 if (unlikely(s->singlestep_enabled)) {
3565 /* An indirect jump so that we still trigger the debug exception. */
3566 if (s->thumb)
3567 dest |= 1;
3568 gen_bx_im(s, dest);
3569 } else {
3570 gen_goto_tb(s, 0, dest);
3571 s->is_jmp = DISAS_TB_JUMP;
3575 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3577 if (x)
3578 tcg_gen_sari_i32(t0, t0, 16);
3579 else
3580 gen_sxth(t0);
3581 if (y)
3582 tcg_gen_sari_i32(t1, t1, 16);
3583 else
3584 gen_sxth(t1);
3585 tcg_gen_mul_i32(t0, t0, t1);
3588 /* Return the mask of PSR bits set by a MSR instruction. */
3589 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3590 uint32_t mask;
3592 mask = 0;
3593 if (flags & (1 << 0))
3594 mask |= 0xff;
3595 if (flags & (1 << 1))
3596 mask |= 0xff00;
3597 if (flags & (1 << 2))
3598 mask |= 0xff0000;
3599 if (flags & (1 << 3))
3600 mask |= 0xff000000;
3602 /* Mask out undefined bits. */
3603 mask &= ~CPSR_RESERVED;
3604 if (!arm_feature(env, ARM_FEATURE_V4T))
3605 mask &= ~CPSR_T;
3606 if (!arm_feature(env, ARM_FEATURE_V5))
3607 mask &= ~CPSR_Q; /* V5TE in reality*/
3608 if (!arm_feature(env, ARM_FEATURE_V6))
3609 mask &= ~(CPSR_E | CPSR_GE);
3610 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3611 mask &= ~CPSR_IT;
3612 /* Mask out execution state bits. */
3613 if (!spsr)
3614 mask &= ~CPSR_EXEC;
3615 /* Mask out privileged bits. */
3616 if (IS_USER(s))
3617 mask &= CPSR_USER;
3618 return mask;
3621 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3622 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3624 TCGv tmp;
3625 if (spsr) {
3626 /* ??? This is also undefined in system mode. */
3627 if (IS_USER(s))
3628 return 1;
3630 tmp = load_cpu_field(spsr);
3631 tcg_gen_andi_i32(tmp, tmp, ~mask);
3632 tcg_gen_andi_i32(t0, t0, mask);
3633 tcg_gen_or_i32(tmp, tmp, t0);
3634 store_cpu_field(tmp, spsr);
3635 } else {
3636 gen_set_cpsr(t0, mask);
3638 tcg_temp_free_i32(t0);
3639 gen_lookup_tb(s);
3640 return 0;
3643 /* Returns nonzero if access to the PSR is not permitted. */
3644 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3646 TCGv tmp;
3647 tmp = tcg_temp_new_i32();
3648 tcg_gen_movi_i32(tmp, val);
3649 return gen_set_psr(s, mask, spsr, tmp);
3652 /* Generate an old-style exception return. Marks pc as dead. */
3653 static void gen_exception_return(DisasContext *s, TCGv pc)
3655 TCGv tmp;
3656 store_reg(s, 15, pc);
3657 tmp = load_cpu_field(spsr);
3658 gen_set_cpsr(tmp, 0xffffffff);
3659 tcg_temp_free_i32(tmp);
3660 s->is_jmp = DISAS_UPDATE;
3663 /* Generate a v6 exception return. Marks both values as dead. */
3664 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3666 gen_set_cpsr(cpsr, 0xffffffff);
3667 tcg_temp_free_i32(cpsr);
3668 store_reg(s, 15, pc);
3669 s->is_jmp = DISAS_UPDATE;
3672 static inline void
3673 gen_set_condexec (DisasContext *s)
3675 if (s->condexec_mask) {
3676 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3677 TCGv tmp = tcg_temp_new_i32();
3678 tcg_gen_movi_i32(tmp, val);
3679 store_cpu_field(tmp, condexec_bits);
3683 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3685 gen_set_condexec(s);
3686 gen_set_pc_im(s->pc - offset);
3687 gen_exception(excp);
3688 s->is_jmp = DISAS_JUMP;
3691 static void gen_nop_hint(DisasContext *s, int val)
3693 switch (val) {
3694 case 3: /* wfi */
3695 gen_set_pc_im(s->pc);
3696 s->is_jmp = DISAS_WFI;
3697 break;
3698 case 2: /* wfe */
3699 case 4: /* sev */
3700 /* TODO: Implement SEV and WFE. May help SMP performance. */
3701 default: /* nop */
3702 break;
3706 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3708 static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
3710 switch (size) {
3711 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3712 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3713 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3714 default: abort();
3718 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3720 switch (size) {
3721 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3722 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3723 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3724 default: return;
3728 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3729 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3730 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3731 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3732 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3734 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3735 switch ((size << 1) | u) { \
3736 case 0: \
3737 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3738 break; \
3739 case 1: \
3740 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3741 break; \
3742 case 2: \
3743 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3744 break; \
3745 case 3: \
3746 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3747 break; \
3748 case 4: \
3749 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3750 break; \
3751 case 5: \
3752 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3753 break; \
3754 default: return 1; \
3755 }} while (0)
3757 #define GEN_NEON_INTEGER_OP(name) do { \
3758 switch ((size << 1) | u) { \
3759 case 0: \
3760 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3761 break; \
3762 case 1: \
3763 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3764 break; \
3765 case 2: \
3766 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3767 break; \
3768 case 3: \
3769 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3770 break; \
3771 case 4: \
3772 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3773 break; \
3774 case 5: \
3775 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3776 break; \
3777 default: return 1; \
3778 }} while (0)
3780 static TCGv neon_load_scratch(int scratch)
3782 TCGv tmp = tcg_temp_new_i32();
3783 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3784 return tmp;
3787 static void neon_store_scratch(int scratch, TCGv var)
3789 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3790 tcg_temp_free_i32(var);
3793 static inline TCGv neon_get_scalar(int size, int reg)
3795 TCGv tmp;
3796 if (size == 1) {
3797 tmp = neon_load_reg(reg & 7, reg >> 4);
3798 if (reg & 8) {
3799 gen_neon_dup_high16(tmp);
3800 } else {
3801 gen_neon_dup_low16(tmp);
3803 } else {
3804 tmp = neon_load_reg(reg & 15, reg >> 4);
3806 return tmp;
3809 static int gen_neon_unzip(int rd, int rm, int size, int q)
3811 TCGv tmp, tmp2;
3812 if (!q && size == 2) {
3813 return 1;
3815 tmp = tcg_const_i32(rd);
3816 tmp2 = tcg_const_i32(rm);
3817 if (q) {
3818 switch (size) {
3819 case 0:
3820 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3821 break;
3822 case 1:
3823 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3824 break;
3825 case 2:
3826 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3827 break;
3828 default:
3829 abort();
3831 } else {
3832 switch (size) {
3833 case 0:
3834 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3835 break;
3836 case 1:
3837 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3838 break;
3839 default:
3840 abort();
3843 tcg_temp_free_i32(tmp);
3844 tcg_temp_free_i32(tmp2);
3845 return 0;
3848 static int gen_neon_zip(int rd, int rm, int size, int q)
3850 TCGv tmp, tmp2;
3851 if (!q && size == 2) {
3852 return 1;
3854 tmp = tcg_const_i32(rd);
3855 tmp2 = tcg_const_i32(rm);
3856 if (q) {
3857 switch (size) {
3858 case 0:
3859 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3860 break;
3861 case 1:
3862 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3863 break;
3864 case 2:
3865 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3866 break;
3867 default:
3868 abort();
3870 } else {
3871 switch (size) {
3872 case 0:
3873 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3874 break;
3875 case 1:
3876 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3877 break;
3878 default:
3879 abort();
3882 tcg_temp_free_i32(tmp);
3883 tcg_temp_free_i32(tmp2);
3884 return 0;
3887 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3889 TCGv rd, tmp;
3891 rd = tcg_temp_new_i32();
3892 tmp = tcg_temp_new_i32();
3894 tcg_gen_shli_i32(rd, t0, 8);
3895 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3896 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3897 tcg_gen_or_i32(rd, rd, tmp);
3899 tcg_gen_shri_i32(t1, t1, 8);
3900 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3901 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3902 tcg_gen_or_i32(t1, t1, tmp);
3903 tcg_gen_mov_i32(t0, rd);
3905 tcg_temp_free_i32(tmp);
3906 tcg_temp_free_i32(rd);
3909 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3911 TCGv rd, tmp;
3913 rd = tcg_temp_new_i32();
3914 tmp = tcg_temp_new_i32();
3916 tcg_gen_shli_i32(rd, t0, 16);
3917 tcg_gen_andi_i32(tmp, t1, 0xffff);
3918 tcg_gen_or_i32(rd, rd, tmp);
3919 tcg_gen_shri_i32(t1, t1, 16);
3920 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3921 tcg_gen_or_i32(t1, t1, tmp);
3922 tcg_gen_mov_i32(t0, rd);
3924 tcg_temp_free_i32(tmp);
3925 tcg_temp_free_i32(rd);
3929 static struct {
3930 int nregs;
3931 int interleave;
3932 int spacing;
3933 } neon_ls_element_type[11] = {
3934 {4, 4, 1},
3935 {4, 4, 2},
3936 {4, 1, 1},
3937 {4, 2, 1},
3938 {3, 3, 1},
3939 {3, 3, 2},
3940 {3, 1, 1},
3941 {1, 1, 1},
3942 {2, 2, 1},
3943 {2, 2, 2},
3944 {2, 1, 1}
3947 /* Translate a NEON load/store element instruction. Return nonzero if the
3948 instruction is invalid. */
3949 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3951 int rd, rn, rm;
3952 int op;
3953 int nregs;
3954 int interleave;
3955 int spacing;
3956 int stride;
3957 int size;
3958 int reg;
3959 int pass;
3960 int load;
3961 int shift;
3962 int n;
3963 TCGv addr;
3964 TCGv tmp;
3965 TCGv tmp2;
3966 TCGv_i64 tmp64;
3968 if (!s->vfp_enabled)
3969 return 1;
3970 VFP_DREG_D(rd, insn);
3971 rn = (insn >> 16) & 0xf;
3972 rm = insn & 0xf;
3973 load = (insn & (1 << 21)) != 0;
3974 if ((insn & (1 << 23)) == 0) {
3975 /* Load store all elements. */
3976 op = (insn >> 8) & 0xf;
3977 size = (insn >> 6) & 3;
3978 if (op > 10)
3979 return 1;
3980 /* Catch UNDEF cases for bad values of align field */
3981 switch (op & 0xc) {
3982 case 4:
3983 if (((insn >> 5) & 1) == 1) {
3984 return 1;
3986 break;
3987 case 8:
3988 if (((insn >> 4) & 3) == 3) {
3989 return 1;
3991 break;
3992 default:
3993 break;
3995 nregs = neon_ls_element_type[op].nregs;
3996 interleave = neon_ls_element_type[op].interleave;
3997 spacing = neon_ls_element_type[op].spacing;
3998 if (size == 3 && (interleave | spacing) != 1)
3999 return 1;
4000 addr = tcg_temp_new_i32();
4001 load_reg_var(s, addr, rn);
4002 stride = (1 << size) * interleave;
4003 for (reg = 0; reg < nregs; reg++) {
4004 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4005 load_reg_var(s, addr, rn);
4006 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4007 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4008 load_reg_var(s, addr, rn);
4009 tcg_gen_addi_i32(addr, addr, 1 << size);
4011 if (size == 3) {
4012 if (load) {
4013 tmp64 = gen_ld64(addr, IS_USER(s));
4014 neon_store_reg64(tmp64, rd);
4015 tcg_temp_free_i64(tmp64);
4016 } else {
4017 tmp64 = tcg_temp_new_i64();
4018 neon_load_reg64(tmp64, rd);
4019 gen_st64(tmp64, addr, IS_USER(s));
4021 tcg_gen_addi_i32(addr, addr, stride);
4022 } else {
4023 for (pass = 0; pass < 2; pass++) {
4024 if (size == 2) {
4025 if (load) {
4026 tmp = gen_ld32(addr, IS_USER(s));
4027 neon_store_reg(rd, pass, tmp);
4028 } else {
4029 tmp = neon_load_reg(rd, pass);
4030 gen_st32(tmp, addr, IS_USER(s));
4032 tcg_gen_addi_i32(addr, addr, stride);
4033 } else if (size == 1) {
4034 if (load) {
4035 tmp = gen_ld16u(addr, IS_USER(s));
4036 tcg_gen_addi_i32(addr, addr, stride);
4037 tmp2 = gen_ld16u(addr, IS_USER(s));
4038 tcg_gen_addi_i32(addr, addr, stride);
4039 tcg_gen_shli_i32(tmp2, tmp2, 16);
4040 tcg_gen_or_i32(tmp, tmp, tmp2);
4041 tcg_temp_free_i32(tmp2);
4042 neon_store_reg(rd, pass, tmp);
4043 } else {
4044 tmp = neon_load_reg(rd, pass);
4045 tmp2 = tcg_temp_new_i32();
4046 tcg_gen_shri_i32(tmp2, tmp, 16);
4047 gen_st16(tmp, addr, IS_USER(s));
4048 tcg_gen_addi_i32(addr, addr, stride);
4049 gen_st16(tmp2, addr, IS_USER(s));
4050 tcg_gen_addi_i32(addr, addr, stride);
4052 } else /* size == 0 */ {
4053 if (load) {
4054 TCGV_UNUSED(tmp2);
4055 for (n = 0; n < 4; n++) {
4056 tmp = gen_ld8u(addr, IS_USER(s));
4057 tcg_gen_addi_i32(addr, addr, stride);
4058 if (n == 0) {
4059 tmp2 = tmp;
4060 } else {
4061 tcg_gen_shli_i32(tmp, tmp, n * 8);
4062 tcg_gen_or_i32(tmp2, tmp2, tmp);
4063 tcg_temp_free_i32(tmp);
4066 neon_store_reg(rd, pass, tmp2);
4067 } else {
4068 tmp2 = neon_load_reg(rd, pass);
4069 for (n = 0; n < 4; n++) {
4070 tmp = tcg_temp_new_i32();
4071 if (n == 0) {
4072 tcg_gen_mov_i32(tmp, tmp2);
4073 } else {
4074 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4076 gen_st8(tmp, addr, IS_USER(s));
4077 tcg_gen_addi_i32(addr, addr, stride);
4079 tcg_temp_free_i32(tmp2);
4084 rd += spacing;
4086 tcg_temp_free_i32(addr);
4087 stride = nregs * 8;
4088 } else {
4089 size = (insn >> 10) & 3;
4090 if (size == 3) {
4091 /* Load single element to all lanes. */
4092 int a = (insn >> 4) & 1;
4093 if (!load) {
4094 return 1;
4096 size = (insn >> 6) & 3;
4097 nregs = ((insn >> 8) & 3) + 1;
4099 if (size == 3) {
4100 if (nregs != 4 || a == 0) {
4101 return 1;
4103 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4104 size = 2;
4106 if (nregs == 1 && a == 1 && size == 0) {
4107 return 1;
4109 if (nregs == 3 && a == 1) {
4110 return 1;
4112 addr = tcg_temp_new_i32();
4113 load_reg_var(s, addr, rn);
4114 if (nregs == 1) {
4115 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4116 tmp = gen_load_and_replicate(s, addr, size);
4117 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4118 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4119 if (insn & (1 << 5)) {
4120 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4121 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4123 tcg_temp_free_i32(tmp);
4124 } else {
4125 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4126 stride = (insn & (1 << 5)) ? 2 : 1;
4127 for (reg = 0; reg < nregs; reg++) {
4128 tmp = gen_load_and_replicate(s, addr, size);
4129 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4130 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4131 tcg_temp_free_i32(tmp);
4132 tcg_gen_addi_i32(addr, addr, 1 << size);
4133 rd += stride;
4136 tcg_temp_free_i32(addr);
4137 stride = (1 << size) * nregs;
4138 } else {
4139 /* Single element. */
4140 int idx = (insn >> 4) & 0xf;
4141 pass = (insn >> 7) & 1;
4142 switch (size) {
4143 case 0:
4144 shift = ((insn >> 5) & 3) * 8;
4145 stride = 1;
4146 break;
4147 case 1:
4148 shift = ((insn >> 6) & 1) * 16;
4149 stride = (insn & (1 << 5)) ? 2 : 1;
4150 break;
4151 case 2:
4152 shift = 0;
4153 stride = (insn & (1 << 6)) ? 2 : 1;
4154 break;
4155 default:
4156 abort();
4158 nregs = ((insn >> 8) & 3) + 1;
4159 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4160 switch (nregs) {
4161 case 1:
4162 if (((idx & (1 << size)) != 0) ||
4163 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4164 return 1;
4166 break;
4167 case 3:
4168 if ((idx & 1) != 0) {
4169 return 1;
4171 /* fall through */
4172 case 2:
4173 if (size == 2 && (idx & 2) != 0) {
4174 return 1;
4176 break;
4177 case 4:
4178 if ((size == 2) && ((idx & 3) == 3)) {
4179 return 1;
4181 break;
4182 default:
4183 abort();
4185 if ((rd + stride * (nregs - 1)) > 31) {
4186 /* Attempts to write off the end of the register file
4187 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4188 * the neon_load_reg() would write off the end of the array.
4190 return 1;
4192 addr = tcg_temp_new_i32();
4193 load_reg_var(s, addr, rn);
4194 for (reg = 0; reg < nregs; reg++) {
4195 if (load) {
4196 switch (size) {
4197 case 0:
4198 tmp = gen_ld8u(addr, IS_USER(s));
4199 break;
4200 case 1:
4201 tmp = gen_ld16u(addr, IS_USER(s));
4202 break;
4203 case 2:
4204 tmp = gen_ld32(addr, IS_USER(s));
4205 break;
4206 default: /* Avoid compiler warnings. */
4207 abort();
4209 if (size != 2) {
4210 tmp2 = neon_load_reg(rd, pass);
4211 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4212 tcg_temp_free_i32(tmp2);
4214 neon_store_reg(rd, pass, tmp);
4215 } else { /* Store */
4216 tmp = neon_load_reg(rd, pass);
4217 if (shift)
4218 tcg_gen_shri_i32(tmp, tmp, shift);
4219 switch (size) {
4220 case 0:
4221 gen_st8(tmp, addr, IS_USER(s));
4222 break;
4223 case 1:
4224 gen_st16(tmp, addr, IS_USER(s));
4225 break;
4226 case 2:
4227 gen_st32(tmp, addr, IS_USER(s));
4228 break;
4231 rd += stride;
4232 tcg_gen_addi_i32(addr, addr, 1 << size);
4234 tcg_temp_free_i32(addr);
4235 stride = nregs * (1 << size);
4238 if (rm != 15) {
4239 TCGv base;
4241 base = load_reg(s, rn);
4242 if (rm == 13) {
4243 tcg_gen_addi_i32(base, base, stride);
4244 } else {
4245 TCGv index;
4246 index = load_reg(s, rm);
4247 tcg_gen_add_i32(base, base, index);
4248 tcg_temp_free_i32(index);
4250 store_reg(s, rn, base);
4252 return 0;
4255 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4256 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4258 tcg_gen_and_i32(t, t, c);
4259 tcg_gen_andc_i32(f, f, c);
4260 tcg_gen_or_i32(dest, t, f);
4263 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4265 switch (size) {
4266 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4267 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4268 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4269 default: abort();
4273 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4275 switch (size) {
4276 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4277 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4278 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4279 default: abort();
4283 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4285 switch (size) {
4286 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4287 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4288 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4289 default: abort();
4293 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4295 switch (size) {
4296 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4297 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4298 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4299 default: abort();
4303 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4304 int q, int u)
4306 if (q) {
4307 if (u) {
4308 switch (size) {
4309 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4310 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4311 default: abort();
4313 } else {
4314 switch (size) {
4315 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4316 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4317 default: abort();
4320 } else {
4321 if (u) {
4322 switch (size) {
4323 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4324 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4325 default: abort();
4327 } else {
4328 switch (size) {
4329 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4330 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4331 default: abort();
4337 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4339 if (u) {
4340 switch (size) {
4341 case 0: gen_helper_neon_widen_u8(dest, src); break;
4342 case 1: gen_helper_neon_widen_u16(dest, src); break;
4343 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4344 default: abort();
4346 } else {
4347 switch (size) {
4348 case 0: gen_helper_neon_widen_s8(dest, src); break;
4349 case 1: gen_helper_neon_widen_s16(dest, src); break;
4350 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4351 default: abort();
4354 tcg_temp_free_i32(src);
4357 static inline void gen_neon_addl(int size)
4359 switch (size) {
4360 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4361 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4362 case 2: tcg_gen_add_i64(CPU_V001); break;
4363 default: abort();
4367 static inline void gen_neon_subl(int size)
4369 switch (size) {
4370 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4371 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4372 case 2: tcg_gen_sub_i64(CPU_V001); break;
4373 default: abort();
4377 static inline void gen_neon_negl(TCGv_i64 var, int size)
4379 switch (size) {
4380 case 0: gen_helper_neon_negl_u16(var, var); break;
4381 case 1: gen_helper_neon_negl_u32(var, var); break;
4382 case 2: gen_helper_neon_negl_u64(var, var); break;
4383 default: abort();
4387 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4389 switch (size) {
4390 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4391 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4392 default: abort();
4396 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4398 TCGv_i64 tmp;
4400 switch ((size << 1) | u) {
4401 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4402 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4403 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4404 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4405 case 4:
4406 tmp = gen_muls_i64_i32(a, b);
4407 tcg_gen_mov_i64(dest, tmp);
4408 tcg_temp_free_i64(tmp);
4409 break;
4410 case 5:
4411 tmp = gen_mulu_i64_i32(a, b);
4412 tcg_gen_mov_i64(dest, tmp);
4413 tcg_temp_free_i64(tmp);
4414 break;
4415 default: abort();
4418 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4419 Don't forget to clean them now. */
4420 if (size < 2) {
4421 tcg_temp_free_i32(a);
4422 tcg_temp_free_i32(b);
4426 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4428 if (op) {
4429 if (u) {
4430 gen_neon_unarrow_sats(size, dest, src);
4431 } else {
4432 gen_neon_narrow(size, dest, src);
4434 } else {
4435 if (u) {
4436 gen_neon_narrow_satu(size, dest, src);
4437 } else {
4438 gen_neon_narrow_sats(size, dest, src);
4443 /* Symbolic constants for op fields for Neon 3-register same-length.
4444 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4445 * table A7-9.
4447 #define NEON_3R_VHADD 0
4448 #define NEON_3R_VQADD 1
4449 #define NEON_3R_VRHADD 2
4450 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4451 #define NEON_3R_VHSUB 4
4452 #define NEON_3R_VQSUB 5
4453 #define NEON_3R_VCGT 6
4454 #define NEON_3R_VCGE 7
4455 #define NEON_3R_VSHL 8
4456 #define NEON_3R_VQSHL 9
4457 #define NEON_3R_VRSHL 10
4458 #define NEON_3R_VQRSHL 11
4459 #define NEON_3R_VMAX 12
4460 #define NEON_3R_VMIN 13
4461 #define NEON_3R_VABD 14
4462 #define NEON_3R_VABA 15
4463 #define NEON_3R_VADD_VSUB 16
4464 #define NEON_3R_VTST_VCEQ 17
4465 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4466 #define NEON_3R_VMUL 19
4467 #define NEON_3R_VPMAX 20
4468 #define NEON_3R_VPMIN 21
4469 #define NEON_3R_VQDMULH_VQRDMULH 22
4470 #define NEON_3R_VPADD 23
4471 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4472 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4473 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4474 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4475 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4476 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4477 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4479 static const uint8_t neon_3r_sizes[] = {
4480 [NEON_3R_VHADD] = 0x7,
4481 [NEON_3R_VQADD] = 0xf,
4482 [NEON_3R_VRHADD] = 0x7,
4483 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4484 [NEON_3R_VHSUB] = 0x7,
4485 [NEON_3R_VQSUB] = 0xf,
4486 [NEON_3R_VCGT] = 0x7,
4487 [NEON_3R_VCGE] = 0x7,
4488 [NEON_3R_VSHL] = 0xf,
4489 [NEON_3R_VQSHL] = 0xf,
4490 [NEON_3R_VRSHL] = 0xf,
4491 [NEON_3R_VQRSHL] = 0xf,
4492 [NEON_3R_VMAX] = 0x7,
4493 [NEON_3R_VMIN] = 0x7,
4494 [NEON_3R_VABD] = 0x7,
4495 [NEON_3R_VABA] = 0x7,
4496 [NEON_3R_VADD_VSUB] = 0xf,
4497 [NEON_3R_VTST_VCEQ] = 0x7,
4498 [NEON_3R_VML] = 0x7,
4499 [NEON_3R_VMUL] = 0x7,
4500 [NEON_3R_VPMAX] = 0x7,
4501 [NEON_3R_VPMIN] = 0x7,
4502 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4503 [NEON_3R_VPADD] = 0x7,
4504 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4505 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4506 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4507 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4508 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4509 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4510 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4513 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4514 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4515 * table A7-13.
4517 #define NEON_2RM_VREV64 0
4518 #define NEON_2RM_VREV32 1
4519 #define NEON_2RM_VREV16 2
4520 #define NEON_2RM_VPADDL 4
4521 #define NEON_2RM_VPADDL_U 5
4522 #define NEON_2RM_VCLS 8
4523 #define NEON_2RM_VCLZ 9
4524 #define NEON_2RM_VCNT 10
4525 #define NEON_2RM_VMVN 11
4526 #define NEON_2RM_VPADAL 12
4527 #define NEON_2RM_VPADAL_U 13
4528 #define NEON_2RM_VQABS 14
4529 #define NEON_2RM_VQNEG 15
4530 #define NEON_2RM_VCGT0 16
4531 #define NEON_2RM_VCGE0 17
4532 #define NEON_2RM_VCEQ0 18
4533 #define NEON_2RM_VCLE0 19
4534 #define NEON_2RM_VCLT0 20
4535 #define NEON_2RM_VABS 22
4536 #define NEON_2RM_VNEG 23
4537 #define NEON_2RM_VCGT0_F 24
4538 #define NEON_2RM_VCGE0_F 25
4539 #define NEON_2RM_VCEQ0_F 26
4540 #define NEON_2RM_VCLE0_F 27
4541 #define NEON_2RM_VCLT0_F 28
4542 #define NEON_2RM_VABS_F 30
4543 #define NEON_2RM_VNEG_F 31
4544 #define NEON_2RM_VSWP 32
4545 #define NEON_2RM_VTRN 33
4546 #define NEON_2RM_VUZP 34
4547 #define NEON_2RM_VZIP 35
4548 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4549 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4550 #define NEON_2RM_VSHLL 38
4551 #define NEON_2RM_VCVT_F16_F32 44
4552 #define NEON_2RM_VCVT_F32_F16 46
4553 #define NEON_2RM_VRECPE 56
4554 #define NEON_2RM_VRSQRTE 57
4555 #define NEON_2RM_VRECPE_F 58
4556 #define NEON_2RM_VRSQRTE_F 59
4557 #define NEON_2RM_VCVT_FS 60
4558 #define NEON_2RM_VCVT_FU 61
4559 #define NEON_2RM_VCVT_SF 62
4560 #define NEON_2RM_VCVT_UF 63
4562 static int neon_2rm_is_float_op(int op)
4564 /* Return true if this neon 2reg-misc op is float-to-float */
4565 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4566 op >= NEON_2RM_VRECPE_F);
4569 /* Each entry in this array has bit n set if the insn allows
4570 * size value n (otherwise it will UNDEF). Since unallocated
4571 * op values will have no bits set they always UNDEF.
4573 static const uint8_t neon_2rm_sizes[] = {
4574 [NEON_2RM_VREV64] = 0x7,
4575 [NEON_2RM_VREV32] = 0x3,
4576 [NEON_2RM_VREV16] = 0x1,
4577 [NEON_2RM_VPADDL] = 0x7,
4578 [NEON_2RM_VPADDL_U] = 0x7,
4579 [NEON_2RM_VCLS] = 0x7,
4580 [NEON_2RM_VCLZ] = 0x7,
4581 [NEON_2RM_VCNT] = 0x1,
4582 [NEON_2RM_VMVN] = 0x1,
4583 [NEON_2RM_VPADAL] = 0x7,
4584 [NEON_2RM_VPADAL_U] = 0x7,
4585 [NEON_2RM_VQABS] = 0x7,
4586 [NEON_2RM_VQNEG] = 0x7,
4587 [NEON_2RM_VCGT0] = 0x7,
4588 [NEON_2RM_VCGE0] = 0x7,
4589 [NEON_2RM_VCEQ0] = 0x7,
4590 [NEON_2RM_VCLE0] = 0x7,
4591 [NEON_2RM_VCLT0] = 0x7,
4592 [NEON_2RM_VABS] = 0x7,
4593 [NEON_2RM_VNEG] = 0x7,
4594 [NEON_2RM_VCGT0_F] = 0x4,
4595 [NEON_2RM_VCGE0_F] = 0x4,
4596 [NEON_2RM_VCEQ0_F] = 0x4,
4597 [NEON_2RM_VCLE0_F] = 0x4,
4598 [NEON_2RM_VCLT0_F] = 0x4,
4599 [NEON_2RM_VABS_F] = 0x4,
4600 [NEON_2RM_VNEG_F] = 0x4,
4601 [NEON_2RM_VSWP] = 0x1,
4602 [NEON_2RM_VTRN] = 0x7,
4603 [NEON_2RM_VUZP] = 0x7,
4604 [NEON_2RM_VZIP] = 0x7,
4605 [NEON_2RM_VMOVN] = 0x7,
4606 [NEON_2RM_VQMOVN] = 0x7,
4607 [NEON_2RM_VSHLL] = 0x7,
4608 [NEON_2RM_VCVT_F16_F32] = 0x2,
4609 [NEON_2RM_VCVT_F32_F16] = 0x2,
4610 [NEON_2RM_VRECPE] = 0x4,
4611 [NEON_2RM_VRSQRTE] = 0x4,
4612 [NEON_2RM_VRECPE_F] = 0x4,
4613 [NEON_2RM_VRSQRTE_F] = 0x4,
4614 [NEON_2RM_VCVT_FS] = 0x4,
4615 [NEON_2RM_VCVT_FU] = 0x4,
4616 [NEON_2RM_VCVT_SF] = 0x4,
4617 [NEON_2RM_VCVT_UF] = 0x4,
4620 /* Translate a NEON data processing instruction. Return nonzero if the
4621 instruction is invalid.
4622 We process data in a mixture of 32-bit and 64-bit chunks.
4623 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4625 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4627 int op;
4628 int q;
4629 int rd, rn, rm;
4630 int size;
4631 int shift;
4632 int pass;
4633 int count;
4634 int pairwise;
4635 int u;
4636 uint32_t imm, mask;
4637 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4638 TCGv_i64 tmp64;
4640 if (!s->vfp_enabled)
4641 return 1;
4642 q = (insn & (1 << 6)) != 0;
4643 u = (insn >> 24) & 1;
4644 VFP_DREG_D(rd, insn);
4645 VFP_DREG_N(rn, insn);
4646 VFP_DREG_M(rm, insn);
4647 size = (insn >> 20) & 3;
4648 if ((insn & (1 << 23)) == 0) {
4649 /* Three register same length. */
4650 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4651 /* Catch invalid op and bad size combinations: UNDEF */
4652 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4653 return 1;
4655 /* All insns of this form UNDEF for either this condition or the
4656 * superset of cases "Q==1"; we catch the latter later.
4658 if (q && ((rd | rn | rm) & 1)) {
4659 return 1;
4661 if (size == 3 && op != NEON_3R_LOGIC) {
4662 /* 64-bit element instructions. */
4663 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4664 neon_load_reg64(cpu_V0, rn + pass);
4665 neon_load_reg64(cpu_V1, rm + pass);
4666 switch (op) {
4667 case NEON_3R_VQADD:
4668 if (u) {
4669 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4670 cpu_V0, cpu_V1);
4671 } else {
4672 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4673 cpu_V0, cpu_V1);
4675 break;
4676 case NEON_3R_VQSUB:
4677 if (u) {
4678 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4679 cpu_V0, cpu_V1);
4680 } else {
4681 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4682 cpu_V0, cpu_V1);
4684 break;
4685 case NEON_3R_VSHL:
4686 if (u) {
4687 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4688 } else {
4689 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4691 break;
4692 case NEON_3R_VQSHL:
4693 if (u) {
4694 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4695 cpu_V1, cpu_V0);
4696 } else {
4697 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4698 cpu_V1, cpu_V0);
4700 break;
4701 case NEON_3R_VRSHL:
4702 if (u) {
4703 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4704 } else {
4705 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4707 break;
4708 case NEON_3R_VQRSHL:
4709 if (u) {
4710 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4711 cpu_V1, cpu_V0);
4712 } else {
4713 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4714 cpu_V1, cpu_V0);
4716 break;
4717 case NEON_3R_VADD_VSUB:
4718 if (u) {
4719 tcg_gen_sub_i64(CPU_V001);
4720 } else {
4721 tcg_gen_add_i64(CPU_V001);
4723 break;
4724 default:
4725 abort();
4727 neon_store_reg64(cpu_V0, rd + pass);
4729 return 0;
4731 pairwise = 0;
4732 switch (op) {
4733 case NEON_3R_VSHL:
4734 case NEON_3R_VQSHL:
4735 case NEON_3R_VRSHL:
4736 case NEON_3R_VQRSHL:
4738 int rtmp;
4739 /* Shift instruction operands are reversed. */
4740 rtmp = rn;
4741 rn = rm;
4742 rm = rtmp;
4744 break;
4745 case NEON_3R_VPADD:
4746 if (u) {
4747 return 1;
4749 /* Fall through */
4750 case NEON_3R_VPMAX:
4751 case NEON_3R_VPMIN:
4752 pairwise = 1;
4753 break;
4754 case NEON_3R_FLOAT_ARITH:
4755 pairwise = (u && size < 2); /* if VPADD (float) */
4756 break;
4757 case NEON_3R_FLOAT_MINMAX:
4758 pairwise = u; /* if VPMIN/VPMAX (float) */
4759 break;
4760 case NEON_3R_FLOAT_CMP:
4761 if (!u && size) {
4762 /* no encoding for U=0 C=1x */
4763 return 1;
4765 break;
4766 case NEON_3R_FLOAT_ACMP:
4767 if (!u) {
4768 return 1;
4770 break;
4771 case NEON_3R_VRECPS_VRSQRTS:
4772 if (u) {
4773 return 1;
4775 break;
4776 case NEON_3R_VMUL:
4777 if (u && (size != 0)) {
4778 /* UNDEF on invalid size for polynomial subcase */
4779 return 1;
4781 break;
4782 case NEON_3R_VFM:
4783 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4784 return 1;
4786 break;
4787 default:
4788 break;
4791 if (pairwise && q) {
4792 /* All the pairwise insns UNDEF if Q is set */
4793 return 1;
4796 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4798 if (pairwise) {
4799 /* Pairwise. */
4800 if (pass < 1) {
4801 tmp = neon_load_reg(rn, 0);
4802 tmp2 = neon_load_reg(rn, 1);
4803 } else {
4804 tmp = neon_load_reg(rm, 0);
4805 tmp2 = neon_load_reg(rm, 1);
4807 } else {
4808 /* Elementwise. */
4809 tmp = neon_load_reg(rn, pass);
4810 tmp2 = neon_load_reg(rm, pass);
4812 switch (op) {
4813 case NEON_3R_VHADD:
4814 GEN_NEON_INTEGER_OP(hadd);
4815 break;
4816 case NEON_3R_VQADD:
4817 GEN_NEON_INTEGER_OP_ENV(qadd);
4818 break;
4819 case NEON_3R_VRHADD:
4820 GEN_NEON_INTEGER_OP(rhadd);
4821 break;
4822 case NEON_3R_LOGIC: /* Logic ops. */
4823 switch ((u << 2) | size) {
4824 case 0: /* VAND */
4825 tcg_gen_and_i32(tmp, tmp, tmp2);
4826 break;
4827 case 1: /* BIC */
4828 tcg_gen_andc_i32(tmp, tmp, tmp2);
4829 break;
4830 case 2: /* VORR */
4831 tcg_gen_or_i32(tmp, tmp, tmp2);
4832 break;
4833 case 3: /* VORN */
4834 tcg_gen_orc_i32(tmp, tmp, tmp2);
4835 break;
4836 case 4: /* VEOR */
4837 tcg_gen_xor_i32(tmp, tmp, tmp2);
4838 break;
4839 case 5: /* VBSL */
4840 tmp3 = neon_load_reg(rd, pass);
4841 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4842 tcg_temp_free_i32(tmp3);
4843 break;
4844 case 6: /* VBIT */
4845 tmp3 = neon_load_reg(rd, pass);
4846 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4847 tcg_temp_free_i32(tmp3);
4848 break;
4849 case 7: /* VBIF */
4850 tmp3 = neon_load_reg(rd, pass);
4851 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4852 tcg_temp_free_i32(tmp3);
4853 break;
4855 break;
4856 case NEON_3R_VHSUB:
4857 GEN_NEON_INTEGER_OP(hsub);
4858 break;
4859 case NEON_3R_VQSUB:
4860 GEN_NEON_INTEGER_OP_ENV(qsub);
4861 break;
4862 case NEON_3R_VCGT:
4863 GEN_NEON_INTEGER_OP(cgt);
4864 break;
4865 case NEON_3R_VCGE:
4866 GEN_NEON_INTEGER_OP(cge);
4867 break;
4868 case NEON_3R_VSHL:
4869 GEN_NEON_INTEGER_OP(shl);
4870 break;
4871 case NEON_3R_VQSHL:
4872 GEN_NEON_INTEGER_OP_ENV(qshl);
4873 break;
4874 case NEON_3R_VRSHL:
4875 GEN_NEON_INTEGER_OP(rshl);
4876 break;
4877 case NEON_3R_VQRSHL:
4878 GEN_NEON_INTEGER_OP_ENV(qrshl);
4879 break;
4880 case NEON_3R_VMAX:
4881 GEN_NEON_INTEGER_OP(max);
4882 break;
4883 case NEON_3R_VMIN:
4884 GEN_NEON_INTEGER_OP(min);
4885 break;
4886 case NEON_3R_VABD:
4887 GEN_NEON_INTEGER_OP(abd);
4888 break;
4889 case NEON_3R_VABA:
4890 GEN_NEON_INTEGER_OP(abd);
4891 tcg_temp_free_i32(tmp2);
4892 tmp2 = neon_load_reg(rd, pass);
4893 gen_neon_add(size, tmp, tmp2);
4894 break;
4895 case NEON_3R_VADD_VSUB:
4896 if (!u) { /* VADD */
4897 gen_neon_add(size, tmp, tmp2);
4898 } else { /* VSUB */
4899 switch (size) {
4900 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4901 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4902 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4903 default: abort();
4906 break;
4907 case NEON_3R_VTST_VCEQ:
4908 if (!u) { /* VTST */
4909 switch (size) {
4910 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4911 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4912 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4913 default: abort();
4915 } else { /* VCEQ */
4916 switch (size) {
4917 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4918 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4919 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4920 default: abort();
4923 break;
4924 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4925 switch (size) {
4926 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4927 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4928 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4929 default: abort();
4931 tcg_temp_free_i32(tmp2);
4932 tmp2 = neon_load_reg(rd, pass);
4933 if (u) { /* VMLS */
4934 gen_neon_rsb(size, tmp, tmp2);
4935 } else { /* VMLA */
4936 gen_neon_add(size, tmp, tmp2);
4938 break;
4939 case NEON_3R_VMUL:
4940 if (u) { /* polynomial */
4941 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4942 } else { /* Integer */
4943 switch (size) {
4944 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4945 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4946 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4947 default: abort();
4950 break;
4951 case NEON_3R_VPMAX:
4952 GEN_NEON_INTEGER_OP(pmax);
4953 break;
4954 case NEON_3R_VPMIN:
4955 GEN_NEON_INTEGER_OP(pmin);
4956 break;
4957 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4958 if (!u) { /* VQDMULH */
4959 switch (size) {
4960 case 1:
4961 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4962 break;
4963 case 2:
4964 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4965 break;
4966 default: abort();
4968 } else { /* VQRDMULH */
4969 switch (size) {
4970 case 1:
4971 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4972 break;
4973 case 2:
4974 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4975 break;
4976 default: abort();
4979 break;
4980 case NEON_3R_VPADD:
4981 switch (size) {
4982 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4983 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4984 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4985 default: abort();
4987 break;
4988 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4990 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4991 switch ((u << 2) | size) {
4992 case 0: /* VADD */
4993 case 4: /* VPADD */
4994 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4995 break;
4996 case 2: /* VSUB */
4997 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
4998 break;
4999 case 6: /* VABD */
5000 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5001 break;
5002 default:
5003 abort();
5005 tcg_temp_free_ptr(fpstatus);
5006 break;
5008 case NEON_3R_FLOAT_MULTIPLY:
5010 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5011 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5012 if (!u) {
5013 tcg_temp_free_i32(tmp2);
5014 tmp2 = neon_load_reg(rd, pass);
5015 if (size == 0) {
5016 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5017 } else {
5018 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5021 tcg_temp_free_ptr(fpstatus);
5022 break;
5024 case NEON_3R_FLOAT_CMP:
5026 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5027 if (!u) {
5028 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5029 } else {
5030 if (size == 0) {
5031 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5032 } else {
5033 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5036 tcg_temp_free_ptr(fpstatus);
5037 break;
5039 case NEON_3R_FLOAT_ACMP:
5041 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5042 if (size == 0) {
5043 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5044 } else {
5045 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5047 tcg_temp_free_ptr(fpstatus);
5048 break;
5050 case NEON_3R_FLOAT_MINMAX:
5052 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5053 if (size == 0) {
5054 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
5055 } else {
5056 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
5058 tcg_temp_free_ptr(fpstatus);
5059 break;
5061 case NEON_3R_VRECPS_VRSQRTS:
5062 if (size == 0)
5063 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5064 else
5065 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5066 break;
5067 case NEON_3R_VFM:
5069 /* VFMA, VFMS: fused multiply-add */
5070 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5071 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5072 if (size) {
5073 /* VFMS */
5074 gen_helper_vfp_negs(tmp, tmp);
5076 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5077 tcg_temp_free_i32(tmp3);
5078 tcg_temp_free_ptr(fpstatus);
5079 break;
5081 default:
5082 abort();
5084 tcg_temp_free_i32(tmp2);
5086 /* Save the result. For elementwise operations we can put it
5087 straight into the destination register. For pairwise operations
5088 we have to be careful to avoid clobbering the source operands. */
5089 if (pairwise && rd == rm) {
5090 neon_store_scratch(pass, tmp);
5091 } else {
5092 neon_store_reg(rd, pass, tmp);
5095 } /* for pass */
5096 if (pairwise && rd == rm) {
5097 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5098 tmp = neon_load_scratch(pass);
5099 neon_store_reg(rd, pass, tmp);
5102 /* End of 3 register same size operations. */
5103 } else if (insn & (1 << 4)) {
5104 if ((insn & 0x00380080) != 0) {
5105 /* Two registers and shift. */
5106 op = (insn >> 8) & 0xf;
5107 if (insn & (1 << 7)) {
5108 /* 64-bit shift. */
5109 if (op > 7) {
5110 return 1;
5112 size = 3;
5113 } else {
5114 size = 2;
5115 while ((insn & (1 << (size + 19))) == 0)
5116 size--;
5118 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5119 /* To avoid excessive dumplication of ops we implement shift
5120 by immediate using the variable shift operations. */
5121 if (op < 8) {
5122 /* Shift by immediate:
5123 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5124 if (q && ((rd | rm) & 1)) {
5125 return 1;
5127 if (!u && (op == 4 || op == 6)) {
5128 return 1;
5130 /* Right shifts are encoded as N - shift, where N is the
5131 element size in bits. */
5132 if (op <= 4)
5133 shift = shift - (1 << (size + 3));
5134 if (size == 3) {
5135 count = q + 1;
5136 } else {
5137 count = q ? 4: 2;
5139 switch (size) {
5140 case 0:
5141 imm = (uint8_t) shift;
5142 imm |= imm << 8;
5143 imm |= imm << 16;
5144 break;
5145 case 1:
5146 imm = (uint16_t) shift;
5147 imm |= imm << 16;
5148 break;
5149 case 2:
5150 case 3:
5151 imm = shift;
5152 break;
5153 default:
5154 abort();
5157 for (pass = 0; pass < count; pass++) {
5158 if (size == 3) {
5159 neon_load_reg64(cpu_V0, rm + pass);
5160 tcg_gen_movi_i64(cpu_V1, imm);
5161 switch (op) {
5162 case 0: /* VSHR */
5163 case 1: /* VSRA */
5164 if (u)
5165 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5166 else
5167 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5168 break;
5169 case 2: /* VRSHR */
5170 case 3: /* VRSRA */
5171 if (u)
5172 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5173 else
5174 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5175 break;
5176 case 4: /* VSRI */
5177 case 5: /* VSHL, VSLI */
5178 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5179 break;
5180 case 6: /* VQSHLU */
5181 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5182 cpu_V0, cpu_V1);
5183 break;
5184 case 7: /* VQSHL */
5185 if (u) {
5186 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5187 cpu_V0, cpu_V1);
5188 } else {
5189 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5190 cpu_V0, cpu_V1);
5192 break;
5194 if (op == 1 || op == 3) {
5195 /* Accumulate. */
5196 neon_load_reg64(cpu_V1, rd + pass);
5197 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5198 } else if (op == 4 || (op == 5 && u)) {
5199 /* Insert */
5200 neon_load_reg64(cpu_V1, rd + pass);
5201 uint64_t mask;
5202 if (shift < -63 || shift > 63) {
5203 mask = 0;
5204 } else {
5205 if (op == 4) {
5206 mask = 0xffffffffffffffffull >> -shift;
5207 } else {
5208 mask = 0xffffffffffffffffull << shift;
5211 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5212 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5214 neon_store_reg64(cpu_V0, rd + pass);
5215 } else { /* size < 3 */
5216 /* Operands in T0 and T1. */
5217 tmp = neon_load_reg(rm, pass);
5218 tmp2 = tcg_temp_new_i32();
5219 tcg_gen_movi_i32(tmp2, imm);
5220 switch (op) {
5221 case 0: /* VSHR */
5222 case 1: /* VSRA */
5223 GEN_NEON_INTEGER_OP(shl);
5224 break;
5225 case 2: /* VRSHR */
5226 case 3: /* VRSRA */
5227 GEN_NEON_INTEGER_OP(rshl);
5228 break;
5229 case 4: /* VSRI */
5230 case 5: /* VSHL, VSLI */
5231 switch (size) {
5232 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5233 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5234 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5235 default: abort();
5237 break;
5238 case 6: /* VQSHLU */
5239 switch (size) {
5240 case 0:
5241 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5242 tmp, tmp2);
5243 break;
5244 case 1:
5245 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5246 tmp, tmp2);
5247 break;
5248 case 2:
5249 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5250 tmp, tmp2);
5251 break;
5252 default:
5253 abort();
5255 break;
5256 case 7: /* VQSHL */
5257 GEN_NEON_INTEGER_OP_ENV(qshl);
5258 break;
5260 tcg_temp_free_i32(tmp2);
5262 if (op == 1 || op == 3) {
5263 /* Accumulate. */
5264 tmp2 = neon_load_reg(rd, pass);
5265 gen_neon_add(size, tmp, tmp2);
5266 tcg_temp_free_i32(tmp2);
5267 } else if (op == 4 || (op == 5 && u)) {
5268 /* Insert */
5269 switch (size) {
5270 case 0:
5271 if (op == 4)
5272 mask = 0xff >> -shift;
5273 else
5274 mask = (uint8_t)(0xff << shift);
5275 mask |= mask << 8;
5276 mask |= mask << 16;
5277 break;
5278 case 1:
5279 if (op == 4)
5280 mask = 0xffff >> -shift;
5281 else
5282 mask = (uint16_t)(0xffff << shift);
5283 mask |= mask << 16;
5284 break;
5285 case 2:
5286 if (shift < -31 || shift > 31) {
5287 mask = 0;
5288 } else {
5289 if (op == 4)
5290 mask = 0xffffffffu >> -shift;
5291 else
5292 mask = 0xffffffffu << shift;
5294 break;
5295 default:
5296 abort();
5298 tmp2 = neon_load_reg(rd, pass);
5299 tcg_gen_andi_i32(tmp, tmp, mask);
5300 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5301 tcg_gen_or_i32(tmp, tmp, tmp2);
5302 tcg_temp_free_i32(tmp2);
5304 neon_store_reg(rd, pass, tmp);
5306 } /* for pass */
5307 } else if (op < 10) {
5308 /* Shift by immediate and narrow:
5309 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5310 int input_unsigned = (op == 8) ? !u : u;
5311 if (rm & 1) {
5312 return 1;
5314 shift = shift - (1 << (size + 3));
5315 size++;
5316 if (size == 3) {
5317 tmp64 = tcg_const_i64(shift);
5318 neon_load_reg64(cpu_V0, rm);
5319 neon_load_reg64(cpu_V1, rm + 1);
5320 for (pass = 0; pass < 2; pass++) {
5321 TCGv_i64 in;
5322 if (pass == 0) {
5323 in = cpu_V0;
5324 } else {
5325 in = cpu_V1;
5327 if (q) {
5328 if (input_unsigned) {
5329 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5330 } else {
5331 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5333 } else {
5334 if (input_unsigned) {
5335 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5336 } else {
5337 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5340 tmp = tcg_temp_new_i32();
5341 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5342 neon_store_reg(rd, pass, tmp);
5343 } /* for pass */
5344 tcg_temp_free_i64(tmp64);
5345 } else {
5346 if (size == 1) {
5347 imm = (uint16_t)shift;
5348 imm |= imm << 16;
5349 } else {
5350 /* size == 2 */
5351 imm = (uint32_t)shift;
5353 tmp2 = tcg_const_i32(imm);
5354 tmp4 = neon_load_reg(rm + 1, 0);
5355 tmp5 = neon_load_reg(rm + 1, 1);
5356 for (pass = 0; pass < 2; pass++) {
5357 if (pass == 0) {
5358 tmp = neon_load_reg(rm, 0);
5359 } else {
5360 tmp = tmp4;
5362 gen_neon_shift_narrow(size, tmp, tmp2, q,
5363 input_unsigned);
5364 if (pass == 0) {
5365 tmp3 = neon_load_reg(rm, 1);
5366 } else {
5367 tmp3 = tmp5;
5369 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5370 input_unsigned);
5371 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5372 tcg_temp_free_i32(tmp);
5373 tcg_temp_free_i32(tmp3);
5374 tmp = tcg_temp_new_i32();
5375 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5376 neon_store_reg(rd, pass, tmp);
5377 } /* for pass */
5378 tcg_temp_free_i32(tmp2);
5380 } else if (op == 10) {
5381 /* VSHLL, VMOVL */
5382 if (q || (rd & 1)) {
5383 return 1;
5385 tmp = neon_load_reg(rm, 0);
5386 tmp2 = neon_load_reg(rm, 1);
5387 for (pass = 0; pass < 2; pass++) {
5388 if (pass == 1)
5389 tmp = tmp2;
5391 gen_neon_widen(cpu_V0, tmp, size, u);
5393 if (shift != 0) {
5394 /* The shift is less than the width of the source
5395 type, so we can just shift the whole register. */
5396 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5397 /* Widen the result of shift: we need to clear
5398 * the potential overflow bits resulting from
5399 * left bits of the narrow input appearing as
5400 * right bits of left the neighbour narrow
5401 * input. */
5402 if (size < 2 || !u) {
5403 uint64_t imm64;
5404 if (size == 0) {
5405 imm = (0xffu >> (8 - shift));
5406 imm |= imm << 16;
5407 } else if (size == 1) {
5408 imm = 0xffff >> (16 - shift);
5409 } else {
5410 /* size == 2 */
5411 imm = 0xffffffff >> (32 - shift);
5413 if (size < 2) {
5414 imm64 = imm | (((uint64_t)imm) << 32);
5415 } else {
5416 imm64 = imm;
5418 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5421 neon_store_reg64(cpu_V0, rd + pass);
5423 } else if (op >= 14) {
5424 /* VCVT fixed-point. */
5425 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5426 return 1;
5428 /* We have already masked out the must-be-1 top bit of imm6,
5429 * hence this 32-shift where the ARM ARM has 64-imm6.
5431 shift = 32 - shift;
5432 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5433 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5434 if (!(op & 1)) {
5435 if (u)
5436 gen_vfp_ulto(0, shift, 1);
5437 else
5438 gen_vfp_slto(0, shift, 1);
5439 } else {
5440 if (u)
5441 gen_vfp_toul(0, shift, 1);
5442 else
5443 gen_vfp_tosl(0, shift, 1);
5445 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5447 } else {
5448 return 1;
5450 } else { /* (insn & 0x00380080) == 0 */
5451 int invert;
5452 if (q && (rd & 1)) {
5453 return 1;
5456 op = (insn >> 8) & 0xf;
5457 /* One register and immediate. */
5458 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5459 invert = (insn & (1 << 5)) != 0;
5460 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5461 * We choose to not special-case this and will behave as if a
5462 * valid constant encoding of 0 had been given.
5464 switch (op) {
5465 case 0: case 1:
5466 /* no-op */
5467 break;
5468 case 2: case 3:
5469 imm <<= 8;
5470 break;
5471 case 4: case 5:
5472 imm <<= 16;
5473 break;
5474 case 6: case 7:
5475 imm <<= 24;
5476 break;
5477 case 8: case 9:
5478 imm |= imm << 16;
5479 break;
5480 case 10: case 11:
5481 imm = (imm << 8) | (imm << 24);
5482 break;
5483 case 12:
5484 imm = (imm << 8) | 0xff;
5485 break;
5486 case 13:
5487 imm = (imm << 16) | 0xffff;
5488 break;
5489 case 14:
5490 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5491 if (invert)
5492 imm = ~imm;
5493 break;
5494 case 15:
5495 if (invert) {
5496 return 1;
5498 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5499 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5500 break;
5502 if (invert)
5503 imm = ~imm;
5505 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5506 if (op & 1 && op < 12) {
5507 tmp = neon_load_reg(rd, pass);
5508 if (invert) {
5509 /* The immediate value has already been inverted, so
5510 BIC becomes AND. */
5511 tcg_gen_andi_i32(tmp, tmp, imm);
5512 } else {
5513 tcg_gen_ori_i32(tmp, tmp, imm);
5515 } else {
5516 /* VMOV, VMVN. */
5517 tmp = tcg_temp_new_i32();
5518 if (op == 14 && invert) {
5519 int n;
5520 uint32_t val;
5521 val = 0;
5522 for (n = 0; n < 4; n++) {
5523 if (imm & (1 << (n + (pass & 1) * 4)))
5524 val |= 0xff << (n * 8);
5526 tcg_gen_movi_i32(tmp, val);
5527 } else {
5528 tcg_gen_movi_i32(tmp, imm);
5531 neon_store_reg(rd, pass, tmp);
5534 } else { /* (insn & 0x00800010 == 0x00800000) */
5535 if (size != 3) {
5536 op = (insn >> 8) & 0xf;
5537 if ((insn & (1 << 6)) == 0) {
5538 /* Three registers of different lengths. */
5539 int src1_wide;
5540 int src2_wide;
5541 int prewiden;
5542 /* undefreq: bit 0 : UNDEF if size != 0
5543 * bit 1 : UNDEF if size == 0
5544 * bit 2 : UNDEF if U == 1
5545 * Note that [1:0] set implies 'always UNDEF'
5547 int undefreq;
5548 /* prewiden, src1_wide, src2_wide, undefreq */
5549 static const int neon_3reg_wide[16][4] = {
5550 {1, 0, 0, 0}, /* VADDL */
5551 {1, 1, 0, 0}, /* VADDW */
5552 {1, 0, 0, 0}, /* VSUBL */
5553 {1, 1, 0, 0}, /* VSUBW */
5554 {0, 1, 1, 0}, /* VADDHN */
5555 {0, 0, 0, 0}, /* VABAL */
5556 {0, 1, 1, 0}, /* VSUBHN */
5557 {0, 0, 0, 0}, /* VABDL */
5558 {0, 0, 0, 0}, /* VMLAL */
5559 {0, 0, 0, 6}, /* VQDMLAL */
5560 {0, 0, 0, 0}, /* VMLSL */
5561 {0, 0, 0, 6}, /* VQDMLSL */
5562 {0, 0, 0, 0}, /* Integer VMULL */
5563 {0, 0, 0, 2}, /* VQDMULL */
5564 {0, 0, 0, 5}, /* Polynomial VMULL */
5565 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5568 prewiden = neon_3reg_wide[op][0];
5569 src1_wide = neon_3reg_wide[op][1];
5570 src2_wide = neon_3reg_wide[op][2];
5571 undefreq = neon_3reg_wide[op][3];
5573 if (((undefreq & 1) && (size != 0)) ||
5574 ((undefreq & 2) && (size == 0)) ||
5575 ((undefreq & 4) && u)) {
5576 return 1;
5578 if ((src1_wide && (rn & 1)) ||
5579 (src2_wide && (rm & 1)) ||
5580 (!src2_wide && (rd & 1))) {
5581 return 1;
5584 /* Avoid overlapping operands. Wide source operands are
5585 always aligned so will never overlap with wide
5586 destinations in problematic ways. */
5587 if (rd == rm && !src2_wide) {
5588 tmp = neon_load_reg(rm, 1);
5589 neon_store_scratch(2, tmp);
5590 } else if (rd == rn && !src1_wide) {
5591 tmp = neon_load_reg(rn, 1);
5592 neon_store_scratch(2, tmp);
5594 TCGV_UNUSED(tmp3);
5595 for (pass = 0; pass < 2; pass++) {
5596 if (src1_wide) {
5597 neon_load_reg64(cpu_V0, rn + pass);
5598 TCGV_UNUSED(tmp);
5599 } else {
5600 if (pass == 1 && rd == rn) {
5601 tmp = neon_load_scratch(2);
5602 } else {
5603 tmp = neon_load_reg(rn, pass);
5605 if (prewiden) {
5606 gen_neon_widen(cpu_V0, tmp, size, u);
5609 if (src2_wide) {
5610 neon_load_reg64(cpu_V1, rm + pass);
5611 TCGV_UNUSED(tmp2);
5612 } else {
5613 if (pass == 1 && rd == rm) {
5614 tmp2 = neon_load_scratch(2);
5615 } else {
5616 tmp2 = neon_load_reg(rm, pass);
5618 if (prewiden) {
5619 gen_neon_widen(cpu_V1, tmp2, size, u);
5622 switch (op) {
5623 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5624 gen_neon_addl(size);
5625 break;
5626 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5627 gen_neon_subl(size);
5628 break;
5629 case 5: case 7: /* VABAL, VABDL */
5630 switch ((size << 1) | u) {
5631 case 0:
5632 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5633 break;
5634 case 1:
5635 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5636 break;
5637 case 2:
5638 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5639 break;
5640 case 3:
5641 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5642 break;
5643 case 4:
5644 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5645 break;
5646 case 5:
5647 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5648 break;
5649 default: abort();
5651 tcg_temp_free_i32(tmp2);
5652 tcg_temp_free_i32(tmp);
5653 break;
5654 case 8: case 9: case 10: case 11: case 12: case 13:
5655 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5656 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5657 break;
5658 case 14: /* Polynomial VMULL */
5659 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5660 tcg_temp_free_i32(tmp2);
5661 tcg_temp_free_i32(tmp);
5662 break;
5663 default: /* 15 is RESERVED: caught earlier */
5664 abort();
5666 if (op == 13) {
5667 /* VQDMULL */
5668 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5669 neon_store_reg64(cpu_V0, rd + pass);
5670 } else if (op == 5 || (op >= 8 && op <= 11)) {
5671 /* Accumulate. */
5672 neon_load_reg64(cpu_V1, rd + pass);
5673 switch (op) {
5674 case 10: /* VMLSL */
5675 gen_neon_negl(cpu_V0, size);
5676 /* Fall through */
5677 case 5: case 8: /* VABAL, VMLAL */
5678 gen_neon_addl(size);
5679 break;
5680 case 9: case 11: /* VQDMLAL, VQDMLSL */
5681 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5682 if (op == 11) {
5683 gen_neon_negl(cpu_V0, size);
5685 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5686 break;
5687 default:
5688 abort();
5690 neon_store_reg64(cpu_V0, rd + pass);
5691 } else if (op == 4 || op == 6) {
5692 /* Narrowing operation. */
5693 tmp = tcg_temp_new_i32();
5694 if (!u) {
5695 switch (size) {
5696 case 0:
5697 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5698 break;
5699 case 1:
5700 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5701 break;
5702 case 2:
5703 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5704 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5705 break;
5706 default: abort();
5708 } else {
5709 switch (size) {
5710 case 0:
5711 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5712 break;
5713 case 1:
5714 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5715 break;
5716 case 2:
5717 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5718 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5719 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5720 break;
5721 default: abort();
5724 if (pass == 0) {
5725 tmp3 = tmp;
5726 } else {
5727 neon_store_reg(rd, 0, tmp3);
5728 neon_store_reg(rd, 1, tmp);
5730 } else {
5731 /* Write back the result. */
5732 neon_store_reg64(cpu_V0, rd + pass);
5735 } else {
5736 /* Two registers and a scalar. NB that for ops of this form
5737 * the ARM ARM labels bit 24 as Q, but it is in our variable
5738 * 'u', not 'q'.
5740 if (size == 0) {
5741 return 1;
5743 switch (op) {
5744 case 1: /* Float VMLA scalar */
5745 case 5: /* Floating point VMLS scalar */
5746 case 9: /* Floating point VMUL scalar */
5747 if (size == 1) {
5748 return 1;
5750 /* fall through */
5751 case 0: /* Integer VMLA scalar */
5752 case 4: /* Integer VMLS scalar */
5753 case 8: /* Integer VMUL scalar */
5754 case 12: /* VQDMULH scalar */
5755 case 13: /* VQRDMULH scalar */
5756 if (u && ((rd | rn) & 1)) {
5757 return 1;
5759 tmp = neon_get_scalar(size, rm);
5760 neon_store_scratch(0, tmp);
5761 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5762 tmp = neon_load_scratch(0);
5763 tmp2 = neon_load_reg(rn, pass);
5764 if (op == 12) {
5765 if (size == 1) {
5766 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5767 } else {
5768 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5770 } else if (op == 13) {
5771 if (size == 1) {
5772 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5773 } else {
5774 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5776 } else if (op & 1) {
5777 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5778 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5779 tcg_temp_free_ptr(fpstatus);
5780 } else {
5781 switch (size) {
5782 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5783 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5784 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5785 default: abort();
5788 tcg_temp_free_i32(tmp2);
5789 if (op < 8) {
5790 /* Accumulate. */
5791 tmp2 = neon_load_reg(rd, pass);
5792 switch (op) {
5793 case 0:
5794 gen_neon_add(size, tmp, tmp2);
5795 break;
5796 case 1:
5798 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5799 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5800 tcg_temp_free_ptr(fpstatus);
5801 break;
5803 case 4:
5804 gen_neon_rsb(size, tmp, tmp2);
5805 break;
5806 case 5:
5808 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5809 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5810 tcg_temp_free_ptr(fpstatus);
5811 break;
5813 default:
5814 abort();
5816 tcg_temp_free_i32(tmp2);
5818 neon_store_reg(rd, pass, tmp);
5820 break;
5821 case 3: /* VQDMLAL scalar */
5822 case 7: /* VQDMLSL scalar */
5823 case 11: /* VQDMULL scalar */
5824 if (u == 1) {
5825 return 1;
5827 /* fall through */
5828 case 2: /* VMLAL sclar */
5829 case 6: /* VMLSL scalar */
5830 case 10: /* VMULL scalar */
5831 if (rd & 1) {
5832 return 1;
5834 tmp2 = neon_get_scalar(size, rm);
5835 /* We need a copy of tmp2 because gen_neon_mull
5836 * deletes it during pass 0. */
5837 tmp4 = tcg_temp_new_i32();
5838 tcg_gen_mov_i32(tmp4, tmp2);
5839 tmp3 = neon_load_reg(rn, 1);
5841 for (pass = 0; pass < 2; pass++) {
5842 if (pass == 0) {
5843 tmp = neon_load_reg(rn, 0);
5844 } else {
5845 tmp = tmp3;
5846 tmp2 = tmp4;
5848 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5849 if (op != 11) {
5850 neon_load_reg64(cpu_V1, rd + pass);
5852 switch (op) {
5853 case 6:
5854 gen_neon_negl(cpu_V0, size);
5855 /* Fall through */
5856 case 2:
5857 gen_neon_addl(size);
5858 break;
5859 case 3: case 7:
5860 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5861 if (op == 7) {
5862 gen_neon_negl(cpu_V0, size);
5864 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5865 break;
5866 case 10:
5867 /* no-op */
5868 break;
5869 case 11:
5870 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5871 break;
5872 default:
5873 abort();
5875 neon_store_reg64(cpu_V0, rd + pass);
5879 break;
5880 default: /* 14 and 15 are RESERVED */
5881 return 1;
5884 } else { /* size == 3 */
5885 if (!u) {
5886 /* Extract. */
5887 imm = (insn >> 8) & 0xf;
5889 if (imm > 7 && !q)
5890 return 1;
5892 if (q && ((rd | rn | rm) & 1)) {
5893 return 1;
5896 if (imm == 0) {
5897 neon_load_reg64(cpu_V0, rn);
5898 if (q) {
5899 neon_load_reg64(cpu_V1, rn + 1);
5901 } else if (imm == 8) {
5902 neon_load_reg64(cpu_V0, rn + 1);
5903 if (q) {
5904 neon_load_reg64(cpu_V1, rm);
5906 } else if (q) {
5907 tmp64 = tcg_temp_new_i64();
5908 if (imm < 8) {
5909 neon_load_reg64(cpu_V0, rn);
5910 neon_load_reg64(tmp64, rn + 1);
5911 } else {
5912 neon_load_reg64(cpu_V0, rn + 1);
5913 neon_load_reg64(tmp64, rm);
5915 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5916 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5917 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5918 if (imm < 8) {
5919 neon_load_reg64(cpu_V1, rm);
5920 } else {
5921 neon_load_reg64(cpu_V1, rm + 1);
5922 imm -= 8;
5924 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5925 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5926 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5927 tcg_temp_free_i64(tmp64);
5928 } else {
5929 /* BUGFIX */
5930 neon_load_reg64(cpu_V0, rn);
5931 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5932 neon_load_reg64(cpu_V1, rm);
5933 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5934 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5936 neon_store_reg64(cpu_V0, rd);
5937 if (q) {
5938 neon_store_reg64(cpu_V1, rd + 1);
5940 } else if ((insn & (1 << 11)) == 0) {
5941 /* Two register misc. */
5942 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5943 size = (insn >> 18) & 3;
5944 /* UNDEF for unknown op values and bad op-size combinations */
5945 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5946 return 1;
5948 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5949 q && ((rm | rd) & 1)) {
5950 return 1;
5952 switch (op) {
5953 case NEON_2RM_VREV64:
5954 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5955 tmp = neon_load_reg(rm, pass * 2);
5956 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5957 switch (size) {
5958 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5959 case 1: gen_swap_half(tmp); break;
5960 case 2: /* no-op */ break;
5961 default: abort();
5963 neon_store_reg(rd, pass * 2 + 1, tmp);
5964 if (size == 2) {
5965 neon_store_reg(rd, pass * 2, tmp2);
5966 } else {
5967 switch (size) {
5968 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5969 case 1: gen_swap_half(tmp2); break;
5970 default: abort();
5972 neon_store_reg(rd, pass * 2, tmp2);
5975 break;
5976 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5977 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5978 for (pass = 0; pass < q + 1; pass++) {
5979 tmp = neon_load_reg(rm, pass * 2);
5980 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5981 tmp = neon_load_reg(rm, pass * 2 + 1);
5982 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5983 switch (size) {
5984 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5985 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5986 case 2: tcg_gen_add_i64(CPU_V001); break;
5987 default: abort();
5989 if (op >= NEON_2RM_VPADAL) {
5990 /* Accumulate. */
5991 neon_load_reg64(cpu_V1, rd + pass);
5992 gen_neon_addl(size);
5994 neon_store_reg64(cpu_V0, rd + pass);
5996 break;
5997 case NEON_2RM_VTRN:
5998 if (size == 2) {
5999 int n;
6000 for (n = 0; n < (q ? 4 : 2); n += 2) {
6001 tmp = neon_load_reg(rm, n);
6002 tmp2 = neon_load_reg(rd, n + 1);
6003 neon_store_reg(rm, n, tmp2);
6004 neon_store_reg(rd, n + 1, tmp);
6006 } else {
6007 goto elementwise;
6009 break;
6010 case NEON_2RM_VUZP:
6011 if (gen_neon_unzip(rd, rm, size, q)) {
6012 return 1;
6014 break;
6015 case NEON_2RM_VZIP:
6016 if (gen_neon_zip(rd, rm, size, q)) {
6017 return 1;
6019 break;
6020 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6021 /* also VQMOVUN; op field and mnemonics don't line up */
6022 if (rm & 1) {
6023 return 1;
6025 TCGV_UNUSED(tmp2);
6026 for (pass = 0; pass < 2; pass++) {
6027 neon_load_reg64(cpu_V0, rm + pass);
6028 tmp = tcg_temp_new_i32();
6029 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6030 tmp, cpu_V0);
6031 if (pass == 0) {
6032 tmp2 = tmp;
6033 } else {
6034 neon_store_reg(rd, 0, tmp2);
6035 neon_store_reg(rd, 1, tmp);
6038 break;
6039 case NEON_2RM_VSHLL:
6040 if (q || (rd & 1)) {
6041 return 1;
6043 tmp = neon_load_reg(rm, 0);
6044 tmp2 = neon_load_reg(rm, 1);
6045 for (pass = 0; pass < 2; pass++) {
6046 if (pass == 1)
6047 tmp = tmp2;
6048 gen_neon_widen(cpu_V0, tmp, size, 1);
6049 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6050 neon_store_reg64(cpu_V0, rd + pass);
6052 break;
6053 case NEON_2RM_VCVT_F16_F32:
6054 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6055 q || (rm & 1)) {
6056 return 1;
6058 tmp = tcg_temp_new_i32();
6059 tmp2 = tcg_temp_new_i32();
6060 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
6061 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6062 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
6063 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6064 tcg_gen_shli_i32(tmp2, tmp2, 16);
6065 tcg_gen_or_i32(tmp2, tmp2, tmp);
6066 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
6067 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6068 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6069 neon_store_reg(rd, 0, tmp2);
6070 tmp2 = tcg_temp_new_i32();
6071 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6072 tcg_gen_shli_i32(tmp2, tmp2, 16);
6073 tcg_gen_or_i32(tmp2, tmp2, tmp);
6074 neon_store_reg(rd, 1, tmp2);
6075 tcg_temp_free_i32(tmp);
6076 break;
6077 case NEON_2RM_VCVT_F32_F16:
6078 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6079 q || (rd & 1)) {
6080 return 1;
6082 tmp3 = tcg_temp_new_i32();
6083 tmp = neon_load_reg(rm, 0);
6084 tmp2 = neon_load_reg(rm, 1);
6085 tcg_gen_ext16u_i32(tmp3, tmp);
6086 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6087 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6088 tcg_gen_shri_i32(tmp3, tmp, 16);
6089 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6090 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
6091 tcg_temp_free_i32(tmp);
6092 tcg_gen_ext16u_i32(tmp3, tmp2);
6093 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6094 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6095 tcg_gen_shri_i32(tmp3, tmp2, 16);
6096 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6097 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
6098 tcg_temp_free_i32(tmp2);
6099 tcg_temp_free_i32(tmp3);
6100 break;
6101 default:
6102 elementwise:
6103 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6104 if (neon_2rm_is_float_op(op)) {
6105 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6106 neon_reg_offset(rm, pass));
6107 TCGV_UNUSED(tmp);
6108 } else {
6109 tmp = neon_load_reg(rm, pass);
6111 switch (op) {
6112 case NEON_2RM_VREV32:
6113 switch (size) {
6114 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6115 case 1: gen_swap_half(tmp); break;
6116 default: abort();
6118 break;
6119 case NEON_2RM_VREV16:
6120 gen_rev16(tmp);
6121 break;
6122 case NEON_2RM_VCLS:
6123 switch (size) {
6124 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6125 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6126 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6127 default: abort();
6129 break;
6130 case NEON_2RM_VCLZ:
6131 switch (size) {
6132 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6133 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6134 case 2: gen_helper_clz(tmp, tmp); break;
6135 default: abort();
6137 break;
6138 case NEON_2RM_VCNT:
6139 gen_helper_neon_cnt_u8(tmp, tmp);
6140 break;
6141 case NEON_2RM_VMVN:
6142 tcg_gen_not_i32(tmp, tmp);
6143 break;
6144 case NEON_2RM_VQABS:
6145 switch (size) {
6146 case 0:
6147 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6148 break;
6149 case 1:
6150 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6151 break;
6152 case 2:
6153 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6154 break;
6155 default: abort();
6157 break;
6158 case NEON_2RM_VQNEG:
6159 switch (size) {
6160 case 0:
6161 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6162 break;
6163 case 1:
6164 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6165 break;
6166 case 2:
6167 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6168 break;
6169 default: abort();
6171 break;
6172 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6173 tmp2 = tcg_const_i32(0);
6174 switch(size) {
6175 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6176 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6177 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6178 default: abort();
6180 tcg_temp_free(tmp2);
6181 if (op == NEON_2RM_VCLE0) {
6182 tcg_gen_not_i32(tmp, tmp);
6184 break;
6185 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6186 tmp2 = tcg_const_i32(0);
6187 switch(size) {
6188 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6189 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6190 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6191 default: abort();
6193 tcg_temp_free(tmp2);
6194 if (op == NEON_2RM_VCLT0) {
6195 tcg_gen_not_i32(tmp, tmp);
6197 break;
6198 case NEON_2RM_VCEQ0:
6199 tmp2 = tcg_const_i32(0);
6200 switch(size) {
6201 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6202 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6203 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6204 default: abort();
6206 tcg_temp_free(tmp2);
6207 break;
6208 case NEON_2RM_VABS:
6209 switch(size) {
6210 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6211 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6212 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6213 default: abort();
6215 break;
6216 case NEON_2RM_VNEG:
6217 tmp2 = tcg_const_i32(0);
6218 gen_neon_rsb(size, tmp, tmp2);
6219 tcg_temp_free(tmp2);
6220 break;
6221 case NEON_2RM_VCGT0_F:
6223 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6224 tmp2 = tcg_const_i32(0);
6225 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6226 tcg_temp_free(tmp2);
6227 tcg_temp_free_ptr(fpstatus);
6228 break;
6230 case NEON_2RM_VCGE0_F:
6232 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6233 tmp2 = tcg_const_i32(0);
6234 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6235 tcg_temp_free(tmp2);
6236 tcg_temp_free_ptr(fpstatus);
6237 break;
6239 case NEON_2RM_VCEQ0_F:
6241 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6242 tmp2 = tcg_const_i32(0);
6243 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6244 tcg_temp_free(tmp2);
6245 tcg_temp_free_ptr(fpstatus);
6246 break;
6248 case NEON_2RM_VCLE0_F:
6250 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6251 tmp2 = tcg_const_i32(0);
6252 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6253 tcg_temp_free(tmp2);
6254 tcg_temp_free_ptr(fpstatus);
6255 break;
6257 case NEON_2RM_VCLT0_F:
6259 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6260 tmp2 = tcg_const_i32(0);
6261 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6262 tcg_temp_free(tmp2);
6263 tcg_temp_free_ptr(fpstatus);
6264 break;
6266 case NEON_2RM_VABS_F:
6267 gen_vfp_abs(0);
6268 break;
6269 case NEON_2RM_VNEG_F:
6270 gen_vfp_neg(0);
6271 break;
6272 case NEON_2RM_VSWP:
6273 tmp2 = neon_load_reg(rd, pass);
6274 neon_store_reg(rm, pass, tmp2);
6275 break;
6276 case NEON_2RM_VTRN:
6277 tmp2 = neon_load_reg(rd, pass);
6278 switch (size) {
6279 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6280 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6281 default: abort();
6283 neon_store_reg(rm, pass, tmp2);
6284 break;
6285 case NEON_2RM_VRECPE:
6286 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6287 break;
6288 case NEON_2RM_VRSQRTE:
6289 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6290 break;
6291 case NEON_2RM_VRECPE_F:
6292 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6293 break;
6294 case NEON_2RM_VRSQRTE_F:
6295 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6296 break;
6297 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6298 gen_vfp_sito(0, 1);
6299 break;
6300 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6301 gen_vfp_uito(0, 1);
6302 break;
6303 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6304 gen_vfp_tosiz(0, 1);
6305 break;
6306 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6307 gen_vfp_touiz(0, 1);
6308 break;
6309 default:
6310 /* Reserved op values were caught by the
6311 * neon_2rm_sizes[] check earlier.
6313 abort();
6315 if (neon_2rm_is_float_op(op)) {
6316 tcg_gen_st_f32(cpu_F0s, cpu_env,
6317 neon_reg_offset(rd, pass));
6318 } else {
6319 neon_store_reg(rd, pass, tmp);
6322 break;
6324 } else if ((insn & (1 << 10)) == 0) {
6325 /* VTBL, VTBX. */
6326 int n = ((insn >> 8) & 3) + 1;
6327 if ((rn + n) > 32) {
6328 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6329 * helper function running off the end of the register file.
6331 return 1;
6333 n <<= 3;
6334 if (insn & (1 << 6)) {
6335 tmp = neon_load_reg(rd, 0);
6336 } else {
6337 tmp = tcg_temp_new_i32();
6338 tcg_gen_movi_i32(tmp, 0);
6340 tmp2 = neon_load_reg(rm, 0);
6341 tmp4 = tcg_const_i32(rn);
6342 tmp5 = tcg_const_i32(n);
6343 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
6344 tcg_temp_free_i32(tmp);
6345 if (insn & (1 << 6)) {
6346 tmp = neon_load_reg(rd, 1);
6347 } else {
6348 tmp = tcg_temp_new_i32();
6349 tcg_gen_movi_i32(tmp, 0);
6351 tmp3 = neon_load_reg(rm, 1);
6352 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
6353 tcg_temp_free_i32(tmp5);
6354 tcg_temp_free_i32(tmp4);
6355 neon_store_reg(rd, 0, tmp2);
6356 neon_store_reg(rd, 1, tmp3);
6357 tcg_temp_free_i32(tmp);
6358 } else if ((insn & 0x380) == 0) {
6359 /* VDUP */
6360 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6361 return 1;
6363 if (insn & (1 << 19)) {
6364 tmp = neon_load_reg(rm, 1);
6365 } else {
6366 tmp = neon_load_reg(rm, 0);
6368 if (insn & (1 << 16)) {
6369 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6370 } else if (insn & (1 << 17)) {
6371 if ((insn >> 18) & 1)
6372 gen_neon_dup_high16(tmp);
6373 else
6374 gen_neon_dup_low16(tmp);
6376 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6377 tmp2 = tcg_temp_new_i32();
6378 tcg_gen_mov_i32(tmp2, tmp);
6379 neon_store_reg(rd, pass, tmp2);
6381 tcg_temp_free_i32(tmp);
6382 } else {
6383 return 1;
6387 return 0;
6390 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
6392 int crn = (insn >> 16) & 0xf;
6393 int crm = insn & 0xf;
6394 int op1 = (insn >> 21) & 7;
6395 int op2 = (insn >> 5) & 7;
6396 int rt = (insn >> 12) & 0xf;
6397 TCGv tmp;
6399 /* Minimal set of debug registers, since we don't support debug */
6400 if (op1 == 0 && crn == 0 && op2 == 0) {
6401 switch (crm) {
6402 case 0:
6403 /* DBGDIDR: just RAZ. In particular this means the
6404 * "debug architecture version" bits will read as
6405 * a reserved value, which should cause Linux to
6406 * not try to use the debug hardware.
6408 tmp = tcg_const_i32(0);
6409 store_reg(s, rt, tmp);
6410 return 0;
6411 case 1:
6412 case 2:
6413 /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
6414 * don't implement memory mapped debug components
6416 if (ENABLE_ARCH_7) {
6417 tmp = tcg_const_i32(0);
6418 store_reg(s, rt, tmp);
6419 return 0;
6421 break;
6422 default:
6423 break;
6427 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6428 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6429 /* TEECR */
6430 if (IS_USER(s))
6431 return 1;
6432 tmp = load_cpu_field(teecr);
6433 store_reg(s, rt, tmp);
6434 return 0;
6436 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6437 /* TEEHBR */
6438 if (IS_USER(s) && (env->teecr & 1))
6439 return 1;
6440 tmp = load_cpu_field(teehbr);
6441 store_reg(s, rt, tmp);
6442 return 0;
6445 return 1;
6448 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
6450 int crn = (insn >> 16) & 0xf;
6451 int crm = insn & 0xf;
6452 int op1 = (insn >> 21) & 7;
6453 int op2 = (insn >> 5) & 7;
6454 int rt = (insn >> 12) & 0xf;
6455 TCGv tmp;
6457 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6458 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
6459 /* TEECR */
6460 if (IS_USER(s))
6461 return 1;
6462 tmp = load_reg(s, rt);
6463 gen_helper_set_teecr(cpu_env, tmp);
6464 tcg_temp_free_i32(tmp);
6465 return 0;
6467 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
6468 /* TEEHBR */
6469 if (IS_USER(s) && (env->teecr & 1))
6470 return 1;
6471 tmp = load_reg(s, rt);
6472 store_cpu_field(tmp, teehbr);
6473 return 0;
6476 return 1;
6479 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
6481 int cpnum;
6483 cpnum = (insn >> 8) & 0xf;
6484 if (arm_feature(env, ARM_FEATURE_XSCALE)
6485 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6486 return 1;
6488 switch (cpnum) {
6489 case 0:
6490 case 1:
6491 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6492 return disas_iwmmxt_insn(env, s, insn);
6493 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6494 return disas_dsp_insn(env, s, insn);
6496 return 1;
6497 case 10:
6498 case 11:
6499 return disas_vfp_insn (env, s, insn);
6500 case 14:
6501 /* Coprocessors 7-15 are architecturally reserved by ARM.
6502 Unfortunately Intel decided to ignore this. */
6503 if (arm_feature(env, ARM_FEATURE_XSCALE))
6504 goto board;
6505 if (insn & (1 << 20))
6506 return disas_cp14_read(env, s, insn);
6507 else
6508 return disas_cp14_write(env, s, insn);
6509 case 15:
6510 return disas_cp15_insn (env, s, insn);
6511 default:
6512 board:
6513 /* Unknown coprocessor. See if the board has hooked it. */
6514 return disas_cp_insn (env, s, insn);
6519 /* Store a 64-bit value to a register pair. Clobbers val. */
6520 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6522 TCGv tmp;
6523 tmp = tcg_temp_new_i32();
6524 tcg_gen_trunc_i64_i32(tmp, val);
6525 store_reg(s, rlow, tmp);
6526 tmp = tcg_temp_new_i32();
6527 tcg_gen_shri_i64(val, val, 32);
6528 tcg_gen_trunc_i64_i32(tmp, val);
6529 store_reg(s, rhigh, tmp);
6532 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6533 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6535 TCGv_i64 tmp;
6536 TCGv tmp2;
6538 /* Load value and extend to 64 bits. */
6539 tmp = tcg_temp_new_i64();
6540 tmp2 = load_reg(s, rlow);
6541 tcg_gen_extu_i32_i64(tmp, tmp2);
6542 tcg_temp_free_i32(tmp2);
6543 tcg_gen_add_i64(val, val, tmp);
6544 tcg_temp_free_i64(tmp);
6547 /* load and add a 64-bit value from a register pair. */
6548 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6550 TCGv_i64 tmp;
6551 TCGv tmpl;
6552 TCGv tmph;
6554 /* Load 64-bit value rd:rn. */
6555 tmpl = load_reg(s, rlow);
6556 tmph = load_reg(s, rhigh);
6557 tmp = tcg_temp_new_i64();
6558 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6559 tcg_temp_free_i32(tmpl);
6560 tcg_temp_free_i32(tmph);
6561 tcg_gen_add_i64(val, val, tmp);
6562 tcg_temp_free_i64(tmp);
6565 /* Set N and Z flags from a 64-bit value. */
6566 static void gen_logicq_cc(TCGv_i64 val)
6568 TCGv tmp = tcg_temp_new_i32();
6569 gen_helper_logicq_cc(tmp, val);
6570 gen_logic_CC(tmp);
6571 tcg_temp_free_i32(tmp);
6574 /* Load/Store exclusive instructions are implemented by remembering
6575 the value/address loaded, and seeing if these are the same
6576 when the store is performed. This should be is sufficient to implement
6577 the architecturally mandated semantics, and avoids having to monitor
6578 regular stores.
6580 In system emulation mode only one CPU will be running at once, so
6581 this sequence is effectively atomic. In user emulation mode we
6582 throw an exception and handle the atomic operation elsewhere. */
6583 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6584 TCGv addr, int size)
6586 TCGv tmp;
6588 switch (size) {
6589 case 0:
6590 tmp = gen_ld8u(addr, IS_USER(s));
6591 break;
6592 case 1:
6593 tmp = gen_ld16u(addr, IS_USER(s));
6594 break;
6595 case 2:
6596 case 3:
6597 tmp = gen_ld32(addr, IS_USER(s));
6598 break;
6599 default:
6600 abort();
6602 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6603 store_reg(s, rt, tmp);
6604 if (size == 3) {
6605 TCGv tmp2 = tcg_temp_new_i32();
6606 tcg_gen_addi_i32(tmp2, addr, 4);
6607 tmp = gen_ld32(tmp2, IS_USER(s));
6608 tcg_temp_free_i32(tmp2);
6609 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6610 store_reg(s, rt2, tmp);
6612 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6615 static void gen_clrex(DisasContext *s)
6617 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6620 #ifdef CONFIG_USER_ONLY
6621 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6622 TCGv addr, int size)
6624 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6625 tcg_gen_movi_i32(cpu_exclusive_info,
6626 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6627 gen_exception_insn(s, 4, EXCP_STREX);
6629 #else
6630 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6631 TCGv addr, int size)
6633 TCGv tmp;
6634 int done_label;
6635 int fail_label;
6637 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6638 [addr] = {Rt};
6639 {Rd} = 0;
6640 } else {
6641 {Rd} = 1;
6642 } */
6643 fail_label = gen_new_label();
6644 done_label = gen_new_label();
6645 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6646 switch (size) {
6647 case 0:
6648 tmp = gen_ld8u(addr, IS_USER(s));
6649 break;
6650 case 1:
6651 tmp = gen_ld16u(addr, IS_USER(s));
6652 break;
6653 case 2:
6654 case 3:
6655 tmp = gen_ld32(addr, IS_USER(s));
6656 break;
6657 default:
6658 abort();
6660 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6661 tcg_temp_free_i32(tmp);
6662 if (size == 3) {
6663 TCGv tmp2 = tcg_temp_new_i32();
6664 tcg_gen_addi_i32(tmp2, addr, 4);
6665 tmp = gen_ld32(tmp2, IS_USER(s));
6666 tcg_temp_free_i32(tmp2);
6667 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6668 tcg_temp_free_i32(tmp);
6670 tmp = load_reg(s, rt);
6671 switch (size) {
6672 case 0:
6673 gen_st8(tmp, addr, IS_USER(s));
6674 break;
6675 case 1:
6676 gen_st16(tmp, addr, IS_USER(s));
6677 break;
6678 case 2:
6679 case 3:
6680 gen_st32(tmp, addr, IS_USER(s));
6681 break;
6682 default:
6683 abort();
6685 if (size == 3) {
6686 tcg_gen_addi_i32(addr, addr, 4);
6687 tmp = load_reg(s, rt2);
6688 gen_st32(tmp, addr, IS_USER(s));
6690 tcg_gen_movi_i32(cpu_R[rd], 0);
6691 tcg_gen_br(done_label);
6692 gen_set_label(fail_label);
6693 tcg_gen_movi_i32(cpu_R[rd], 1);
6694 gen_set_label(done_label);
6695 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6697 #endif
6699 static void disas_arm_insn(CPUState * env, DisasContext *s)
6701 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6702 TCGv tmp;
6703 TCGv tmp2;
6704 TCGv tmp3;
6705 TCGv addr;
6706 TCGv_i64 tmp64;
6708 insn = ldl_code(s->pc);
6709 s->pc += 4;
6711 /* M variants do not implement ARM mode. */
6712 if (IS_M(env))
6713 goto illegal_op;
6714 cond = insn >> 28;
6715 if (cond == 0xf){
6716 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6717 * choose to UNDEF. In ARMv5 and above the space is used
6718 * for miscellaneous unconditional instructions.
6720 ARCH(5);
6722 /* Unconditional instructions. */
6723 if (((insn >> 25) & 7) == 1) {
6724 /* NEON Data processing. */
6725 if (!arm_feature(env, ARM_FEATURE_NEON))
6726 goto illegal_op;
6728 if (disas_neon_data_insn(env, s, insn))
6729 goto illegal_op;
6730 return;
6732 if ((insn & 0x0f100000) == 0x04000000) {
6733 /* NEON load/store. */
6734 if (!arm_feature(env, ARM_FEATURE_NEON))
6735 goto illegal_op;
6737 if (disas_neon_ls_insn(env, s, insn))
6738 goto illegal_op;
6739 return;
6741 if (((insn & 0x0f30f000) == 0x0510f000) ||
6742 ((insn & 0x0f30f010) == 0x0710f000)) {
6743 if ((insn & (1 << 22)) == 0) {
6744 /* PLDW; v7MP */
6745 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6746 goto illegal_op;
6749 /* Otherwise PLD; v5TE+ */
6750 ARCH(5TE);
6751 return;
6753 if (((insn & 0x0f70f000) == 0x0450f000) ||
6754 ((insn & 0x0f70f010) == 0x0650f000)) {
6755 ARCH(7);
6756 return; /* PLI; V7 */
6758 if (((insn & 0x0f700000) == 0x04100000) ||
6759 ((insn & 0x0f700010) == 0x06100000)) {
6760 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6761 goto illegal_op;
6763 return; /* v7MP: Unallocated memory hint: must NOP */
6766 if ((insn & 0x0ffffdff) == 0x01010000) {
6767 ARCH(6);
6768 /* setend */
6769 if (insn & (1 << 9)) {
6770 /* BE8 mode not implemented. */
6771 goto illegal_op;
6773 return;
6774 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6775 switch ((insn >> 4) & 0xf) {
6776 case 1: /* clrex */
6777 ARCH(6K);
6778 gen_clrex(s);
6779 return;
6780 case 4: /* dsb */
6781 case 5: /* dmb */
6782 case 6: /* isb */
6783 ARCH(7);
6784 /* We don't emulate caches so these are a no-op. */
6785 return;
6786 default:
6787 goto illegal_op;
6789 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6790 /* srs */
6791 int32_t offset;
6792 if (IS_USER(s))
6793 goto illegal_op;
6794 ARCH(6);
6795 op1 = (insn & 0x1f);
6796 addr = tcg_temp_new_i32();
6797 tmp = tcg_const_i32(op1);
6798 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6799 tcg_temp_free_i32(tmp);
6800 i = (insn >> 23) & 3;
6801 switch (i) {
6802 case 0: offset = -4; break; /* DA */
6803 case 1: offset = 0; break; /* IA */
6804 case 2: offset = -8; break; /* DB */
6805 case 3: offset = 4; break; /* IB */
6806 default: abort();
6808 if (offset)
6809 tcg_gen_addi_i32(addr, addr, offset);
6810 tmp = load_reg(s, 14);
6811 gen_st32(tmp, addr, 0);
6812 tmp = load_cpu_field(spsr);
6813 tcg_gen_addi_i32(addr, addr, 4);
6814 gen_st32(tmp, addr, 0);
6815 if (insn & (1 << 21)) {
6816 /* Base writeback. */
6817 switch (i) {
6818 case 0: offset = -8; break;
6819 case 1: offset = 4; break;
6820 case 2: offset = -4; break;
6821 case 3: offset = 0; break;
6822 default: abort();
6824 if (offset)
6825 tcg_gen_addi_i32(addr, addr, offset);
6826 tmp = tcg_const_i32(op1);
6827 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6828 tcg_temp_free_i32(tmp);
6829 tcg_temp_free_i32(addr);
6830 } else {
6831 tcg_temp_free_i32(addr);
6833 return;
6834 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6835 /* rfe */
6836 int32_t offset;
6837 if (IS_USER(s))
6838 goto illegal_op;
6839 ARCH(6);
6840 rn = (insn >> 16) & 0xf;
6841 addr = load_reg(s, rn);
6842 i = (insn >> 23) & 3;
6843 switch (i) {
6844 case 0: offset = -4; break; /* DA */
6845 case 1: offset = 0; break; /* IA */
6846 case 2: offset = -8; break; /* DB */
6847 case 3: offset = 4; break; /* IB */
6848 default: abort();
6850 if (offset)
6851 tcg_gen_addi_i32(addr, addr, offset);
6852 /* Load PC into tmp and CPSR into tmp2. */
6853 tmp = gen_ld32(addr, 0);
6854 tcg_gen_addi_i32(addr, addr, 4);
6855 tmp2 = gen_ld32(addr, 0);
6856 if (insn & (1 << 21)) {
6857 /* Base writeback. */
6858 switch (i) {
6859 case 0: offset = -8; break;
6860 case 1: offset = 4; break;
6861 case 2: offset = -4; break;
6862 case 3: offset = 0; break;
6863 default: abort();
6865 if (offset)
6866 tcg_gen_addi_i32(addr, addr, offset);
6867 store_reg(s, rn, addr);
6868 } else {
6869 tcg_temp_free_i32(addr);
6871 gen_rfe(s, tmp, tmp2);
6872 return;
6873 } else if ((insn & 0x0e000000) == 0x0a000000) {
6874 /* branch link and change to thumb (blx <offset>) */
6875 int32_t offset;
6877 val = (uint32_t)s->pc;
6878 tmp = tcg_temp_new_i32();
6879 tcg_gen_movi_i32(tmp, val);
6880 store_reg(s, 14, tmp);
6881 /* Sign-extend the 24-bit offset */
6882 offset = (((int32_t)insn) << 8) >> 8;
6883 /* offset * 4 + bit24 * 2 + (thumb bit) */
6884 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6885 /* pipeline offset */
6886 val += 4;
6887 /* protected by ARCH(5); above, near the start of uncond block */
6888 gen_bx_im(s, val);
6889 return;
6890 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6891 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6892 /* iWMMXt register transfer. */
6893 if (env->cp15.c15_cpar & (1 << 1))
6894 if (!disas_iwmmxt_insn(env, s, insn))
6895 return;
6897 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6898 /* Coprocessor double register transfer. */
6899 ARCH(5TE);
6900 } else if ((insn & 0x0f000010) == 0x0e000010) {
6901 /* Additional coprocessor register transfer. */
6902 } else if ((insn & 0x0ff10020) == 0x01000000) {
6903 uint32_t mask;
6904 uint32_t val;
6905 /* cps (privileged) */
6906 if (IS_USER(s))
6907 return;
6908 mask = val = 0;
6909 if (insn & (1 << 19)) {
6910 if (insn & (1 << 8))
6911 mask |= CPSR_A;
6912 if (insn & (1 << 7))
6913 mask |= CPSR_I;
6914 if (insn & (1 << 6))
6915 mask |= CPSR_F;
6916 if (insn & (1 << 18))
6917 val |= mask;
6919 if (insn & (1 << 17)) {
6920 mask |= CPSR_M;
6921 val |= (insn & 0x1f);
6923 if (mask) {
6924 gen_set_psr_im(s, mask, 0, val);
6926 return;
6928 goto illegal_op;
6930 if (cond != 0xe) {
6931 /* if not always execute, we generate a conditional jump to
6932 next instruction */
6933 s->condlabel = gen_new_label();
6934 gen_test_cc(cond ^ 1, s->condlabel);
6935 s->condjmp = 1;
6937 if ((insn & 0x0f900000) == 0x03000000) {
6938 if ((insn & (1 << 21)) == 0) {
6939 ARCH(6T2);
6940 rd = (insn >> 12) & 0xf;
6941 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6942 if ((insn & (1 << 22)) == 0) {
6943 /* MOVW */
6944 tmp = tcg_temp_new_i32();
6945 tcg_gen_movi_i32(tmp, val);
6946 } else {
6947 /* MOVT */
6948 tmp = load_reg(s, rd);
6949 tcg_gen_ext16u_i32(tmp, tmp);
6950 tcg_gen_ori_i32(tmp, tmp, val << 16);
6952 store_reg(s, rd, tmp);
6953 } else {
6954 if (((insn >> 12) & 0xf) != 0xf)
6955 goto illegal_op;
6956 if (((insn >> 16) & 0xf) == 0) {
6957 gen_nop_hint(s, insn & 0xff);
6958 } else {
6959 /* CPSR = immediate */
6960 val = insn & 0xff;
6961 shift = ((insn >> 8) & 0xf) * 2;
6962 if (shift)
6963 val = (val >> shift) | (val << (32 - shift));
6964 i = ((insn & (1 << 22)) != 0);
6965 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6966 goto illegal_op;
6969 } else if ((insn & 0x0f900000) == 0x01000000
6970 && (insn & 0x00000090) != 0x00000090) {
6971 /* miscellaneous instructions */
6972 op1 = (insn >> 21) & 3;
6973 sh = (insn >> 4) & 0xf;
6974 rm = insn & 0xf;
6975 switch (sh) {
6976 case 0x0: /* move program status register */
6977 if (op1 & 1) {
6978 /* PSR = reg */
6979 tmp = load_reg(s, rm);
6980 i = ((op1 & 2) != 0);
6981 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6982 goto illegal_op;
6983 } else {
6984 /* reg = PSR */
6985 rd = (insn >> 12) & 0xf;
6986 if (op1 & 2) {
6987 if (IS_USER(s))
6988 goto illegal_op;
6989 tmp = load_cpu_field(spsr);
6990 } else {
6991 tmp = tcg_temp_new_i32();
6992 gen_helper_cpsr_read(tmp);
6994 store_reg(s, rd, tmp);
6996 break;
6997 case 0x1:
6998 if (op1 == 1) {
6999 /* branch/exchange thumb (bx). */
7000 ARCH(4T);
7001 tmp = load_reg(s, rm);
7002 gen_bx(s, tmp);
7003 } else if (op1 == 3) {
7004 /* clz */
7005 ARCH(5);
7006 rd = (insn >> 12) & 0xf;
7007 tmp = load_reg(s, rm);
7008 gen_helper_clz(tmp, tmp);
7009 store_reg(s, rd, tmp);
7010 } else {
7011 goto illegal_op;
7013 break;
7014 case 0x2:
7015 if (op1 == 1) {
7016 ARCH(5J); /* bxj */
7017 /* Trivial implementation equivalent to bx. */
7018 tmp = load_reg(s, rm);
7019 gen_bx(s, tmp);
7020 } else {
7021 goto illegal_op;
7023 break;
7024 case 0x3:
7025 if (op1 != 1)
7026 goto illegal_op;
7028 ARCH(5);
7029 /* branch link/exchange thumb (blx) */
7030 tmp = load_reg(s, rm);
7031 tmp2 = tcg_temp_new_i32();
7032 tcg_gen_movi_i32(tmp2, s->pc);
7033 store_reg(s, 14, tmp2);
7034 gen_bx(s, tmp);
7035 break;
7036 case 0x5: /* saturating add/subtract */
7037 ARCH(5TE);
7038 rd = (insn >> 12) & 0xf;
7039 rn = (insn >> 16) & 0xf;
7040 tmp = load_reg(s, rm);
7041 tmp2 = load_reg(s, rn);
7042 if (op1 & 2)
7043 gen_helper_double_saturate(tmp2, tmp2);
7044 if (op1 & 1)
7045 gen_helper_sub_saturate(tmp, tmp, tmp2);
7046 else
7047 gen_helper_add_saturate(tmp, tmp, tmp2);
7048 tcg_temp_free_i32(tmp2);
7049 store_reg(s, rd, tmp);
7050 break;
7051 case 7:
7052 /* SMC instruction (op1 == 3)
7053 and undefined instructions (op1 == 0 || op1 == 2)
7054 will trap */
7055 if (op1 != 1) {
7056 goto illegal_op;
7058 /* bkpt */
7059 ARCH(5);
7060 gen_exception_insn(s, 4, EXCP_BKPT);
7061 break;
7062 case 0x8: /* signed multiply */
7063 case 0xa:
7064 case 0xc:
7065 case 0xe:
7066 ARCH(5TE);
7067 rs = (insn >> 8) & 0xf;
7068 rn = (insn >> 12) & 0xf;
7069 rd = (insn >> 16) & 0xf;
7070 if (op1 == 1) {
7071 /* (32 * 16) >> 16 */
7072 tmp = load_reg(s, rm);
7073 tmp2 = load_reg(s, rs);
7074 if (sh & 4)
7075 tcg_gen_sari_i32(tmp2, tmp2, 16);
7076 else
7077 gen_sxth(tmp2);
7078 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7079 tcg_gen_shri_i64(tmp64, tmp64, 16);
7080 tmp = tcg_temp_new_i32();
7081 tcg_gen_trunc_i64_i32(tmp, tmp64);
7082 tcg_temp_free_i64(tmp64);
7083 if ((sh & 2) == 0) {
7084 tmp2 = load_reg(s, rn);
7085 gen_helper_add_setq(tmp, tmp, tmp2);
7086 tcg_temp_free_i32(tmp2);
7088 store_reg(s, rd, tmp);
7089 } else {
7090 /* 16 * 16 */
7091 tmp = load_reg(s, rm);
7092 tmp2 = load_reg(s, rs);
7093 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7094 tcg_temp_free_i32(tmp2);
7095 if (op1 == 2) {
7096 tmp64 = tcg_temp_new_i64();
7097 tcg_gen_ext_i32_i64(tmp64, tmp);
7098 tcg_temp_free_i32(tmp);
7099 gen_addq(s, tmp64, rn, rd);
7100 gen_storeq_reg(s, rn, rd, tmp64);
7101 tcg_temp_free_i64(tmp64);
7102 } else {
7103 if (op1 == 0) {
7104 tmp2 = load_reg(s, rn);
7105 gen_helper_add_setq(tmp, tmp, tmp2);
7106 tcg_temp_free_i32(tmp2);
7108 store_reg(s, rd, tmp);
7111 break;
7112 default:
7113 goto illegal_op;
7115 } else if (((insn & 0x0e000000) == 0 &&
7116 (insn & 0x00000090) != 0x90) ||
7117 ((insn & 0x0e000000) == (1 << 25))) {
7118 int set_cc, logic_cc, shiftop;
7120 op1 = (insn >> 21) & 0xf;
7121 set_cc = (insn >> 20) & 1;
7122 logic_cc = table_logic_cc[op1] & set_cc;
7124 /* data processing instruction */
7125 if (insn & (1 << 25)) {
7126 /* immediate operand */
7127 val = insn & 0xff;
7128 shift = ((insn >> 8) & 0xf) * 2;
7129 if (shift) {
7130 val = (val >> shift) | (val << (32 - shift));
7132 tmp2 = tcg_temp_new_i32();
7133 tcg_gen_movi_i32(tmp2, val);
7134 if (logic_cc && shift) {
7135 gen_set_CF_bit31(tmp2);
7137 } else {
7138 /* register */
7139 rm = (insn) & 0xf;
7140 tmp2 = load_reg(s, rm);
7141 shiftop = (insn >> 5) & 3;
7142 if (!(insn & (1 << 4))) {
7143 shift = (insn >> 7) & 0x1f;
7144 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7145 } else {
7146 rs = (insn >> 8) & 0xf;
7147 tmp = load_reg(s, rs);
7148 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
7151 if (op1 != 0x0f && op1 != 0x0d) {
7152 rn = (insn >> 16) & 0xf;
7153 tmp = load_reg(s, rn);
7154 } else {
7155 TCGV_UNUSED(tmp);
7157 rd = (insn >> 12) & 0xf;
7158 switch(op1) {
7159 case 0x00:
7160 tcg_gen_and_i32(tmp, tmp, tmp2);
7161 if (logic_cc) {
7162 gen_logic_CC(tmp);
7164 store_reg_bx(env, s, rd, tmp);
7165 break;
7166 case 0x01:
7167 tcg_gen_xor_i32(tmp, tmp, tmp2);
7168 if (logic_cc) {
7169 gen_logic_CC(tmp);
7171 store_reg_bx(env, s, rd, tmp);
7172 break;
7173 case 0x02:
7174 if (set_cc && rd == 15) {
7175 /* SUBS r15, ... is used for exception return. */
7176 if (IS_USER(s)) {
7177 goto illegal_op;
7179 gen_helper_sub_cc(tmp, tmp, tmp2);
7180 gen_exception_return(s, tmp);
7181 } else {
7182 if (set_cc) {
7183 gen_helper_sub_cc(tmp, tmp, tmp2);
7184 } else {
7185 tcg_gen_sub_i32(tmp, tmp, tmp2);
7187 store_reg_bx(env, s, rd, tmp);
7189 break;
7190 case 0x03:
7191 if (set_cc) {
7192 gen_helper_sub_cc(tmp, tmp2, tmp);
7193 } else {
7194 tcg_gen_sub_i32(tmp, tmp2, tmp);
7196 store_reg_bx(env, s, rd, tmp);
7197 break;
7198 case 0x04:
7199 if (set_cc) {
7200 gen_helper_add_cc(tmp, tmp, tmp2);
7201 } else {
7202 tcg_gen_add_i32(tmp, tmp, tmp2);
7204 store_reg_bx(env, s, rd, tmp);
7205 break;
7206 case 0x05:
7207 if (set_cc) {
7208 gen_helper_adc_cc(tmp, tmp, tmp2);
7209 } else {
7210 gen_add_carry(tmp, tmp, tmp2);
7212 store_reg_bx(env, s, rd, tmp);
7213 break;
7214 case 0x06:
7215 if (set_cc) {
7216 gen_helper_sbc_cc(tmp, tmp, tmp2);
7217 } else {
7218 gen_sub_carry(tmp, tmp, tmp2);
7220 store_reg_bx(env, s, rd, tmp);
7221 break;
7222 case 0x07:
7223 if (set_cc) {
7224 gen_helper_sbc_cc(tmp, tmp2, tmp);
7225 } else {
7226 gen_sub_carry(tmp, tmp2, tmp);
7228 store_reg_bx(env, s, rd, tmp);
7229 break;
7230 case 0x08:
7231 if (set_cc) {
7232 tcg_gen_and_i32(tmp, tmp, tmp2);
7233 gen_logic_CC(tmp);
7235 tcg_temp_free_i32(tmp);
7236 break;
7237 case 0x09:
7238 if (set_cc) {
7239 tcg_gen_xor_i32(tmp, tmp, tmp2);
7240 gen_logic_CC(tmp);
7242 tcg_temp_free_i32(tmp);
7243 break;
7244 case 0x0a:
7245 if (set_cc) {
7246 gen_helper_sub_cc(tmp, tmp, tmp2);
7248 tcg_temp_free_i32(tmp);
7249 break;
7250 case 0x0b:
7251 if (set_cc) {
7252 gen_helper_add_cc(tmp, tmp, tmp2);
7254 tcg_temp_free_i32(tmp);
7255 break;
7256 case 0x0c:
7257 tcg_gen_or_i32(tmp, tmp, tmp2);
7258 if (logic_cc) {
7259 gen_logic_CC(tmp);
7261 store_reg_bx(env, s, rd, tmp);
7262 break;
7263 case 0x0d:
7264 if (logic_cc && rd == 15) {
7265 /* MOVS r15, ... is used for exception return. */
7266 if (IS_USER(s)) {
7267 goto illegal_op;
7269 gen_exception_return(s, tmp2);
7270 } else {
7271 if (logic_cc) {
7272 gen_logic_CC(tmp2);
7274 store_reg_bx(env, s, rd, tmp2);
7276 break;
7277 case 0x0e:
7278 tcg_gen_andc_i32(tmp, tmp, tmp2);
7279 if (logic_cc) {
7280 gen_logic_CC(tmp);
7282 store_reg_bx(env, s, rd, tmp);
7283 break;
7284 default:
7285 case 0x0f:
7286 tcg_gen_not_i32(tmp2, tmp2);
7287 if (logic_cc) {
7288 gen_logic_CC(tmp2);
7290 store_reg_bx(env, s, rd, tmp2);
7291 break;
7293 if (op1 != 0x0f && op1 != 0x0d) {
7294 tcg_temp_free_i32(tmp2);
7296 } else {
7297 /* other instructions */
7298 op1 = (insn >> 24) & 0xf;
7299 switch(op1) {
7300 case 0x0:
7301 case 0x1:
7302 /* multiplies, extra load/stores */
7303 sh = (insn >> 5) & 3;
7304 if (sh == 0) {
7305 if (op1 == 0x0) {
7306 rd = (insn >> 16) & 0xf;
7307 rn = (insn >> 12) & 0xf;
7308 rs = (insn >> 8) & 0xf;
7309 rm = (insn) & 0xf;
7310 op1 = (insn >> 20) & 0xf;
7311 switch (op1) {
7312 case 0: case 1: case 2: case 3: case 6:
7313 /* 32 bit mul */
7314 tmp = load_reg(s, rs);
7315 tmp2 = load_reg(s, rm);
7316 tcg_gen_mul_i32(tmp, tmp, tmp2);
7317 tcg_temp_free_i32(tmp2);
7318 if (insn & (1 << 22)) {
7319 /* Subtract (mls) */
7320 ARCH(6T2);
7321 tmp2 = load_reg(s, rn);
7322 tcg_gen_sub_i32(tmp, tmp2, tmp);
7323 tcg_temp_free_i32(tmp2);
7324 } else if (insn & (1 << 21)) {
7325 /* Add */
7326 tmp2 = load_reg(s, rn);
7327 tcg_gen_add_i32(tmp, tmp, tmp2);
7328 tcg_temp_free_i32(tmp2);
7330 if (insn & (1 << 20))
7331 gen_logic_CC(tmp);
7332 store_reg(s, rd, tmp);
7333 break;
7334 case 4:
7335 /* 64 bit mul double accumulate (UMAAL) */
7336 ARCH(6);
7337 tmp = load_reg(s, rs);
7338 tmp2 = load_reg(s, rm);
7339 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7340 gen_addq_lo(s, tmp64, rn);
7341 gen_addq_lo(s, tmp64, rd);
7342 gen_storeq_reg(s, rn, rd, tmp64);
7343 tcg_temp_free_i64(tmp64);
7344 break;
7345 case 8: case 9: case 10: case 11:
7346 case 12: case 13: case 14: case 15:
7347 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7348 tmp = load_reg(s, rs);
7349 tmp2 = load_reg(s, rm);
7350 if (insn & (1 << 22)) {
7351 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7352 } else {
7353 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7355 if (insn & (1 << 21)) { /* mult accumulate */
7356 gen_addq(s, tmp64, rn, rd);
7358 if (insn & (1 << 20)) {
7359 gen_logicq_cc(tmp64);
7361 gen_storeq_reg(s, rn, rd, tmp64);
7362 tcg_temp_free_i64(tmp64);
7363 break;
7364 default:
7365 goto illegal_op;
7367 } else {
7368 rn = (insn >> 16) & 0xf;
7369 rd = (insn >> 12) & 0xf;
7370 if (insn & (1 << 23)) {
7371 /* load/store exclusive */
7372 op1 = (insn >> 21) & 0x3;
7373 if (op1)
7374 ARCH(6K);
7375 else
7376 ARCH(6);
7377 addr = tcg_temp_local_new_i32();
7378 load_reg_var(s, addr, rn);
7379 if (insn & (1 << 20)) {
7380 switch (op1) {
7381 case 0: /* ldrex */
7382 gen_load_exclusive(s, rd, 15, addr, 2);
7383 break;
7384 case 1: /* ldrexd */
7385 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7386 break;
7387 case 2: /* ldrexb */
7388 gen_load_exclusive(s, rd, 15, addr, 0);
7389 break;
7390 case 3: /* ldrexh */
7391 gen_load_exclusive(s, rd, 15, addr, 1);
7392 break;
7393 default:
7394 abort();
7396 } else {
7397 rm = insn & 0xf;
7398 switch (op1) {
7399 case 0: /* strex */
7400 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7401 break;
7402 case 1: /* strexd */
7403 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7404 break;
7405 case 2: /* strexb */
7406 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7407 break;
7408 case 3: /* strexh */
7409 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7410 break;
7411 default:
7412 abort();
7415 tcg_temp_free(addr);
7416 } else {
7417 /* SWP instruction */
7418 rm = (insn) & 0xf;
7420 /* ??? This is not really atomic. However we know
7421 we never have multiple CPUs running in parallel,
7422 so it is good enough. */
7423 addr = load_reg(s, rn);
7424 tmp = load_reg(s, rm);
7425 if (insn & (1 << 22)) {
7426 tmp2 = gen_ld8u(addr, IS_USER(s));
7427 gen_st8(tmp, addr, IS_USER(s));
7428 } else {
7429 tmp2 = gen_ld32(addr, IS_USER(s));
7430 gen_st32(tmp, addr, IS_USER(s));
7432 tcg_temp_free_i32(addr);
7433 store_reg(s, rd, tmp2);
7436 } else {
7437 int address_offset;
7438 int load;
7439 /* Misc load/store */
7440 rn = (insn >> 16) & 0xf;
7441 rd = (insn >> 12) & 0xf;
7442 addr = load_reg(s, rn);
7443 if (insn & (1 << 24))
7444 gen_add_datah_offset(s, insn, 0, addr);
7445 address_offset = 0;
7446 if (insn & (1 << 20)) {
7447 /* load */
7448 switch(sh) {
7449 case 1:
7450 tmp = gen_ld16u(addr, IS_USER(s));
7451 break;
7452 case 2:
7453 tmp = gen_ld8s(addr, IS_USER(s));
7454 break;
7455 default:
7456 case 3:
7457 tmp = gen_ld16s(addr, IS_USER(s));
7458 break;
7460 load = 1;
7461 } else if (sh & 2) {
7462 ARCH(5TE);
7463 /* doubleword */
7464 if (sh & 1) {
7465 /* store */
7466 tmp = load_reg(s, rd);
7467 gen_st32(tmp, addr, IS_USER(s));
7468 tcg_gen_addi_i32(addr, addr, 4);
7469 tmp = load_reg(s, rd + 1);
7470 gen_st32(tmp, addr, IS_USER(s));
7471 load = 0;
7472 } else {
7473 /* load */
7474 tmp = gen_ld32(addr, IS_USER(s));
7475 store_reg(s, rd, tmp);
7476 tcg_gen_addi_i32(addr, addr, 4);
7477 tmp = gen_ld32(addr, IS_USER(s));
7478 rd++;
7479 load = 1;
7481 address_offset = -4;
7482 } else {
7483 /* store */
7484 tmp = load_reg(s, rd);
7485 gen_st16(tmp, addr, IS_USER(s));
7486 load = 0;
7488 /* Perform base writeback before the loaded value to
7489 ensure correct behavior with overlapping index registers.
7490 ldrd with base writeback is is undefined if the
7491 destination and index registers overlap. */
7492 if (!(insn & (1 << 24))) {
7493 gen_add_datah_offset(s, insn, address_offset, addr);
7494 store_reg(s, rn, addr);
7495 } else if (insn & (1 << 21)) {
7496 if (address_offset)
7497 tcg_gen_addi_i32(addr, addr, address_offset);
7498 store_reg(s, rn, addr);
7499 } else {
7500 tcg_temp_free_i32(addr);
7502 if (load) {
7503 /* Complete the load. */
7504 store_reg(s, rd, tmp);
7507 break;
7508 case 0x4:
7509 case 0x5:
7510 goto do_ldst;
7511 case 0x6:
7512 case 0x7:
7513 if (insn & (1 << 4)) {
7514 ARCH(6);
7515 /* Armv6 Media instructions. */
7516 rm = insn & 0xf;
7517 rn = (insn >> 16) & 0xf;
7518 rd = (insn >> 12) & 0xf;
7519 rs = (insn >> 8) & 0xf;
7520 switch ((insn >> 23) & 3) {
7521 case 0: /* Parallel add/subtract. */
7522 op1 = (insn >> 20) & 7;
7523 tmp = load_reg(s, rn);
7524 tmp2 = load_reg(s, rm);
7525 sh = (insn >> 5) & 7;
7526 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7527 goto illegal_op;
7528 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7529 tcg_temp_free_i32(tmp2);
7530 store_reg(s, rd, tmp);
7531 break;
7532 case 1:
7533 if ((insn & 0x00700020) == 0) {
7534 /* Halfword pack. */
7535 tmp = load_reg(s, rn);
7536 tmp2 = load_reg(s, rm);
7537 shift = (insn >> 7) & 0x1f;
7538 if (insn & (1 << 6)) {
7539 /* pkhtb */
7540 if (shift == 0)
7541 shift = 31;
7542 tcg_gen_sari_i32(tmp2, tmp2, shift);
7543 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7544 tcg_gen_ext16u_i32(tmp2, tmp2);
7545 } else {
7546 /* pkhbt */
7547 if (shift)
7548 tcg_gen_shli_i32(tmp2, tmp2, shift);
7549 tcg_gen_ext16u_i32(tmp, tmp);
7550 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7552 tcg_gen_or_i32(tmp, tmp, tmp2);
7553 tcg_temp_free_i32(tmp2);
7554 store_reg(s, rd, tmp);
7555 } else if ((insn & 0x00200020) == 0x00200000) {
7556 /* [us]sat */
7557 tmp = load_reg(s, rm);
7558 shift = (insn >> 7) & 0x1f;
7559 if (insn & (1 << 6)) {
7560 if (shift == 0)
7561 shift = 31;
7562 tcg_gen_sari_i32(tmp, tmp, shift);
7563 } else {
7564 tcg_gen_shli_i32(tmp, tmp, shift);
7566 sh = (insn >> 16) & 0x1f;
7567 tmp2 = tcg_const_i32(sh);
7568 if (insn & (1 << 22))
7569 gen_helper_usat(tmp, tmp, tmp2);
7570 else
7571 gen_helper_ssat(tmp, tmp, tmp2);
7572 tcg_temp_free_i32(tmp2);
7573 store_reg(s, rd, tmp);
7574 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7575 /* [us]sat16 */
7576 tmp = load_reg(s, rm);
7577 sh = (insn >> 16) & 0x1f;
7578 tmp2 = tcg_const_i32(sh);
7579 if (insn & (1 << 22))
7580 gen_helper_usat16(tmp, tmp, tmp2);
7581 else
7582 gen_helper_ssat16(tmp, tmp, tmp2);
7583 tcg_temp_free_i32(tmp2);
7584 store_reg(s, rd, tmp);
7585 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7586 /* Select bytes. */
7587 tmp = load_reg(s, rn);
7588 tmp2 = load_reg(s, rm);
7589 tmp3 = tcg_temp_new_i32();
7590 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7591 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7592 tcg_temp_free_i32(tmp3);
7593 tcg_temp_free_i32(tmp2);
7594 store_reg(s, rd, tmp);
7595 } else if ((insn & 0x000003e0) == 0x00000060) {
7596 tmp = load_reg(s, rm);
7597 shift = (insn >> 10) & 3;
7598 /* ??? In many cases it's not necessary to do a
7599 rotate, a shift is sufficient. */
7600 if (shift != 0)
7601 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7602 op1 = (insn >> 20) & 7;
7603 switch (op1) {
7604 case 0: gen_sxtb16(tmp); break;
7605 case 2: gen_sxtb(tmp); break;
7606 case 3: gen_sxth(tmp); break;
7607 case 4: gen_uxtb16(tmp); break;
7608 case 6: gen_uxtb(tmp); break;
7609 case 7: gen_uxth(tmp); break;
7610 default: goto illegal_op;
7612 if (rn != 15) {
7613 tmp2 = load_reg(s, rn);
7614 if ((op1 & 3) == 0) {
7615 gen_add16(tmp, tmp2);
7616 } else {
7617 tcg_gen_add_i32(tmp, tmp, tmp2);
7618 tcg_temp_free_i32(tmp2);
7621 store_reg(s, rd, tmp);
7622 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7623 /* rev */
7624 tmp = load_reg(s, rm);
7625 if (insn & (1 << 22)) {
7626 if (insn & (1 << 7)) {
7627 gen_revsh(tmp);
7628 } else {
7629 ARCH(6T2);
7630 gen_helper_rbit(tmp, tmp);
7632 } else {
7633 if (insn & (1 << 7))
7634 gen_rev16(tmp);
7635 else
7636 tcg_gen_bswap32_i32(tmp, tmp);
7638 store_reg(s, rd, tmp);
7639 } else {
7640 goto illegal_op;
7642 break;
7643 case 2: /* Multiplies (Type 3). */
7644 switch ((insn >> 20) & 0x7) {
7645 case 5:
7646 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7647 /* op2 not 00x or 11x : UNDEF */
7648 goto illegal_op;
7650 /* Signed multiply most significant [accumulate].
7651 (SMMUL, SMMLA, SMMLS) */
7652 tmp = load_reg(s, rm);
7653 tmp2 = load_reg(s, rs);
7654 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7656 if (rd != 15) {
7657 tmp = load_reg(s, rd);
7658 if (insn & (1 << 6)) {
7659 tmp64 = gen_subq_msw(tmp64, tmp);
7660 } else {
7661 tmp64 = gen_addq_msw(tmp64, tmp);
7664 if (insn & (1 << 5)) {
7665 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7667 tcg_gen_shri_i64(tmp64, tmp64, 32);
7668 tmp = tcg_temp_new_i32();
7669 tcg_gen_trunc_i64_i32(tmp, tmp64);
7670 tcg_temp_free_i64(tmp64);
7671 store_reg(s, rn, tmp);
7672 break;
7673 case 0:
7674 case 4:
7675 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7676 if (insn & (1 << 7)) {
7677 goto illegal_op;
7679 tmp = load_reg(s, rm);
7680 tmp2 = load_reg(s, rs);
7681 if (insn & (1 << 5))
7682 gen_swap_half(tmp2);
7683 gen_smul_dual(tmp, tmp2);
7684 if (insn & (1 << 6)) {
7685 /* This subtraction cannot overflow. */
7686 tcg_gen_sub_i32(tmp, tmp, tmp2);
7687 } else {
7688 /* This addition cannot overflow 32 bits;
7689 * however it may overflow considered as a signed
7690 * operation, in which case we must set the Q flag.
7692 gen_helper_add_setq(tmp, tmp, tmp2);
7694 tcg_temp_free_i32(tmp2);
7695 if (insn & (1 << 22)) {
7696 /* smlald, smlsld */
7697 tmp64 = tcg_temp_new_i64();
7698 tcg_gen_ext_i32_i64(tmp64, tmp);
7699 tcg_temp_free_i32(tmp);
7700 gen_addq(s, tmp64, rd, rn);
7701 gen_storeq_reg(s, rd, rn, tmp64);
7702 tcg_temp_free_i64(tmp64);
7703 } else {
7704 /* smuad, smusd, smlad, smlsd */
7705 if (rd != 15)
7707 tmp2 = load_reg(s, rd);
7708 gen_helper_add_setq(tmp, tmp, tmp2);
7709 tcg_temp_free_i32(tmp2);
7711 store_reg(s, rn, tmp);
7713 break;
7714 case 1:
7715 case 3:
7716 /* SDIV, UDIV */
7717 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7718 goto illegal_op;
7720 if (((insn >> 5) & 7) || (rd != 15)) {
7721 goto illegal_op;
7723 tmp = load_reg(s, rm);
7724 tmp2 = load_reg(s, rs);
7725 if (insn & (1 << 21)) {
7726 gen_helper_udiv(tmp, tmp, tmp2);
7727 } else {
7728 gen_helper_sdiv(tmp, tmp, tmp2);
7730 tcg_temp_free_i32(tmp2);
7731 store_reg(s, rn, tmp);
7732 break;
7733 default:
7734 goto illegal_op;
7736 break;
7737 case 3:
7738 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7739 switch (op1) {
7740 case 0: /* Unsigned sum of absolute differences. */
7741 ARCH(6);
7742 tmp = load_reg(s, rm);
7743 tmp2 = load_reg(s, rs);
7744 gen_helper_usad8(tmp, tmp, tmp2);
7745 tcg_temp_free_i32(tmp2);
7746 if (rd != 15) {
7747 tmp2 = load_reg(s, rd);
7748 tcg_gen_add_i32(tmp, tmp, tmp2);
7749 tcg_temp_free_i32(tmp2);
7751 store_reg(s, rn, tmp);
7752 break;
7753 case 0x20: case 0x24: case 0x28: case 0x2c:
7754 /* Bitfield insert/clear. */
7755 ARCH(6T2);
7756 shift = (insn >> 7) & 0x1f;
7757 i = (insn >> 16) & 0x1f;
7758 i = i + 1 - shift;
7759 if (rm == 15) {
7760 tmp = tcg_temp_new_i32();
7761 tcg_gen_movi_i32(tmp, 0);
7762 } else {
7763 tmp = load_reg(s, rm);
7765 if (i != 32) {
7766 tmp2 = load_reg(s, rd);
7767 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7768 tcg_temp_free_i32(tmp2);
7770 store_reg(s, rd, tmp);
7771 break;
7772 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7773 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7774 ARCH(6T2);
7775 tmp = load_reg(s, rm);
7776 shift = (insn >> 7) & 0x1f;
7777 i = ((insn >> 16) & 0x1f) + 1;
7778 if (shift + i > 32)
7779 goto illegal_op;
7780 if (i < 32) {
7781 if (op1 & 0x20) {
7782 gen_ubfx(tmp, shift, (1u << i) - 1);
7783 } else {
7784 gen_sbfx(tmp, shift, i);
7787 store_reg(s, rd, tmp);
7788 break;
7789 default:
7790 goto illegal_op;
7792 break;
7794 break;
7796 do_ldst:
7797 /* Check for undefined extension instructions
7798 * per the ARM Bible IE:
7799 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7801 sh = (0xf << 20) | (0xf << 4);
7802 if (op1 == 0x7 && ((insn & sh) == sh))
7804 goto illegal_op;
7806 /* load/store byte/word */
7807 rn = (insn >> 16) & 0xf;
7808 rd = (insn >> 12) & 0xf;
7809 tmp2 = load_reg(s, rn);
7810 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7811 if (insn & (1 << 24))
7812 gen_add_data_offset(s, insn, tmp2);
7813 if (insn & (1 << 20)) {
7814 /* load */
7815 if (insn & (1 << 22)) {
7816 tmp = gen_ld8u(tmp2, i);
7817 } else {
7818 tmp = gen_ld32(tmp2, i);
7820 } else {
7821 /* store */
7822 tmp = load_reg(s, rd);
7823 if (insn & (1 << 22))
7824 gen_st8(tmp, tmp2, i);
7825 else
7826 gen_st32(tmp, tmp2, i);
7828 if (!(insn & (1 << 24))) {
7829 gen_add_data_offset(s, insn, tmp2);
7830 store_reg(s, rn, tmp2);
7831 } else if (insn & (1 << 21)) {
7832 store_reg(s, rn, tmp2);
7833 } else {
7834 tcg_temp_free_i32(tmp2);
7836 if (insn & (1 << 20)) {
7837 /* Complete the load. */
7838 store_reg_from_load(env, s, rd, tmp);
7840 break;
7841 case 0x08:
7842 case 0x09:
7844 int j, n, user, loaded_base;
7845 TCGv loaded_var;
7846 /* load/store multiple words */
7847 /* XXX: store correct base if write back */
7848 user = 0;
7849 if (insn & (1 << 22)) {
7850 if (IS_USER(s))
7851 goto illegal_op; /* only usable in supervisor mode */
7853 if ((insn & (1 << 15)) == 0)
7854 user = 1;
7856 rn = (insn >> 16) & 0xf;
7857 addr = load_reg(s, rn);
7859 /* compute total size */
7860 loaded_base = 0;
7861 TCGV_UNUSED(loaded_var);
7862 n = 0;
7863 for(i=0;i<16;i++) {
7864 if (insn & (1 << i))
7865 n++;
7867 /* XXX: test invalid n == 0 case ? */
7868 if (insn & (1 << 23)) {
7869 if (insn & (1 << 24)) {
7870 /* pre increment */
7871 tcg_gen_addi_i32(addr, addr, 4);
7872 } else {
7873 /* post increment */
7875 } else {
7876 if (insn & (1 << 24)) {
7877 /* pre decrement */
7878 tcg_gen_addi_i32(addr, addr, -(n * 4));
7879 } else {
7880 /* post decrement */
7881 if (n != 1)
7882 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7885 j = 0;
7886 for(i=0;i<16;i++) {
7887 if (insn & (1 << i)) {
7888 if (insn & (1 << 20)) {
7889 /* load */
7890 tmp = gen_ld32(addr, IS_USER(s));
7891 if (user) {
7892 tmp2 = tcg_const_i32(i);
7893 gen_helper_set_user_reg(tmp2, tmp);
7894 tcg_temp_free_i32(tmp2);
7895 tcg_temp_free_i32(tmp);
7896 } else if (i == rn) {
7897 loaded_var = tmp;
7898 loaded_base = 1;
7899 } else {
7900 store_reg_from_load(env, s, i, tmp);
7902 } else {
7903 /* store */
7904 if (i == 15) {
7905 /* special case: r15 = PC + 8 */
7906 val = (long)s->pc + 4;
7907 tmp = tcg_temp_new_i32();
7908 tcg_gen_movi_i32(tmp, val);
7909 } else if (user) {
7910 tmp = tcg_temp_new_i32();
7911 tmp2 = tcg_const_i32(i);
7912 gen_helper_get_user_reg(tmp, tmp2);
7913 tcg_temp_free_i32(tmp2);
7914 } else {
7915 tmp = load_reg(s, i);
7917 gen_st32(tmp, addr, IS_USER(s));
7919 j++;
7920 /* no need to add after the last transfer */
7921 if (j != n)
7922 tcg_gen_addi_i32(addr, addr, 4);
7925 if (insn & (1 << 21)) {
7926 /* write back */
7927 if (insn & (1 << 23)) {
7928 if (insn & (1 << 24)) {
7929 /* pre increment */
7930 } else {
7931 /* post increment */
7932 tcg_gen_addi_i32(addr, addr, 4);
7934 } else {
7935 if (insn & (1 << 24)) {
7936 /* pre decrement */
7937 if (n != 1)
7938 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7939 } else {
7940 /* post decrement */
7941 tcg_gen_addi_i32(addr, addr, -(n * 4));
7944 store_reg(s, rn, addr);
7945 } else {
7946 tcg_temp_free_i32(addr);
7948 if (loaded_base) {
7949 store_reg(s, rn, loaded_var);
7951 if ((insn & (1 << 22)) && !user) {
7952 /* Restore CPSR from SPSR. */
7953 tmp = load_cpu_field(spsr);
7954 gen_set_cpsr(tmp, 0xffffffff);
7955 tcg_temp_free_i32(tmp);
7956 s->is_jmp = DISAS_UPDATE;
7959 break;
7960 case 0xa:
7961 case 0xb:
7963 int32_t offset;
7965 /* branch (and link) */
7966 val = (int32_t)s->pc;
7967 if (insn & (1 << 24)) {
7968 tmp = tcg_temp_new_i32();
7969 tcg_gen_movi_i32(tmp, val);
7970 store_reg(s, 14, tmp);
7972 offset = (((int32_t)insn << 8) >> 8);
7973 val += (offset << 2) + 4;
7974 gen_jmp(s, val);
7976 break;
7977 case 0xc:
7978 case 0xd:
7979 case 0xe:
7980 /* Coprocessor. */
7981 if (disas_coproc_insn(env, s, insn))
7982 goto illegal_op;
7983 break;
7984 case 0xf:
7985 /* swi */
7986 gen_set_pc_im(s->pc);
7987 s->is_jmp = DISAS_SWI;
7988 break;
7989 default:
7990 illegal_op:
7991 gen_exception_insn(s, 4, EXCP_UDEF);
7992 break;
7997 /* Return true if this is a Thumb-2 logical op. */
7998 static int
7999 thumb2_logic_op(int op)
8001 return (op < 8);
8004 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8005 then set condition code flags based on the result of the operation.
8006 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8007 to the high bit of T1.
8008 Returns zero if the opcode is valid. */
8010 static int
8011 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
8013 int logic_cc;
8015 logic_cc = 0;
8016 switch (op) {
8017 case 0: /* and */
8018 tcg_gen_and_i32(t0, t0, t1);
8019 logic_cc = conds;
8020 break;
8021 case 1: /* bic */
8022 tcg_gen_andc_i32(t0, t0, t1);
8023 logic_cc = conds;
8024 break;
8025 case 2: /* orr */
8026 tcg_gen_or_i32(t0, t0, t1);
8027 logic_cc = conds;
8028 break;
8029 case 3: /* orn */
8030 tcg_gen_orc_i32(t0, t0, t1);
8031 logic_cc = conds;
8032 break;
8033 case 4: /* eor */
8034 tcg_gen_xor_i32(t0, t0, t1);
8035 logic_cc = conds;
8036 break;
8037 case 8: /* add */
8038 if (conds)
8039 gen_helper_add_cc(t0, t0, t1);
8040 else
8041 tcg_gen_add_i32(t0, t0, t1);
8042 break;
8043 case 10: /* adc */
8044 if (conds)
8045 gen_helper_adc_cc(t0, t0, t1);
8046 else
8047 gen_adc(t0, t1);
8048 break;
8049 case 11: /* sbc */
8050 if (conds)
8051 gen_helper_sbc_cc(t0, t0, t1);
8052 else
8053 gen_sub_carry(t0, t0, t1);
8054 break;
8055 case 13: /* sub */
8056 if (conds)
8057 gen_helper_sub_cc(t0, t0, t1);
8058 else
8059 tcg_gen_sub_i32(t0, t0, t1);
8060 break;
8061 case 14: /* rsb */
8062 if (conds)
8063 gen_helper_sub_cc(t0, t1, t0);
8064 else
8065 tcg_gen_sub_i32(t0, t1, t0);
8066 break;
8067 default: /* 5, 6, 7, 9, 12, 15. */
8068 return 1;
8070 if (logic_cc) {
8071 gen_logic_CC(t0);
8072 if (shifter_out)
8073 gen_set_CF_bit31(t1);
8075 return 0;
8078 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8079 is not legal. */
8080 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
8082 uint32_t insn, imm, shift, offset;
8083 uint32_t rd, rn, rm, rs;
8084 TCGv tmp;
8085 TCGv tmp2;
8086 TCGv tmp3;
8087 TCGv addr;
8088 TCGv_i64 tmp64;
8089 int op;
8090 int shiftop;
8091 int conds;
8092 int logic_cc;
8094 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8095 || arm_feature (env, ARM_FEATURE_M))) {
8096 /* Thumb-1 cores may need to treat bl and blx as a pair of
8097 16-bit instructions to get correct prefetch abort behavior. */
8098 insn = insn_hw1;
8099 if ((insn & (1 << 12)) == 0) {
8100 ARCH(5);
8101 /* Second half of blx. */
8102 offset = ((insn & 0x7ff) << 1);
8103 tmp = load_reg(s, 14);
8104 tcg_gen_addi_i32(tmp, tmp, offset);
8105 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
8107 tmp2 = tcg_temp_new_i32();
8108 tcg_gen_movi_i32(tmp2, s->pc | 1);
8109 store_reg(s, 14, tmp2);
8110 gen_bx(s, tmp);
8111 return 0;
8113 if (insn & (1 << 11)) {
8114 /* Second half of bl. */
8115 offset = ((insn & 0x7ff) << 1) | 1;
8116 tmp = load_reg(s, 14);
8117 tcg_gen_addi_i32(tmp, tmp, offset);
8119 tmp2 = tcg_temp_new_i32();
8120 tcg_gen_movi_i32(tmp2, s->pc | 1);
8121 store_reg(s, 14, tmp2);
8122 gen_bx(s, tmp);
8123 return 0;
8125 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8126 /* Instruction spans a page boundary. Implement it as two
8127 16-bit instructions in case the second half causes an
8128 prefetch abort. */
8129 offset = ((int32_t)insn << 21) >> 9;
8130 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
8131 return 0;
8133 /* Fall through to 32-bit decode. */
8136 insn = lduw_code(s->pc);
8137 s->pc += 2;
8138 insn |= (uint32_t)insn_hw1 << 16;
8140 if ((insn & 0xf800e800) != 0xf000e800) {
8141 ARCH(6T2);
8144 rn = (insn >> 16) & 0xf;
8145 rs = (insn >> 12) & 0xf;
8146 rd = (insn >> 8) & 0xf;
8147 rm = insn & 0xf;
8148 switch ((insn >> 25) & 0xf) {
8149 case 0: case 1: case 2: case 3:
8150 /* 16-bit instructions. Should never happen. */
8151 abort();
8152 case 4:
8153 if (insn & (1 << 22)) {
8154 /* Other load/store, table branch. */
8155 if (insn & 0x01200000) {
8156 /* Load/store doubleword. */
8157 if (rn == 15) {
8158 addr = tcg_temp_new_i32();
8159 tcg_gen_movi_i32(addr, s->pc & ~3);
8160 } else {
8161 addr = load_reg(s, rn);
8163 offset = (insn & 0xff) * 4;
8164 if ((insn & (1 << 23)) == 0)
8165 offset = -offset;
8166 if (insn & (1 << 24)) {
8167 tcg_gen_addi_i32(addr, addr, offset);
8168 offset = 0;
8170 if (insn & (1 << 20)) {
8171 /* ldrd */
8172 tmp = gen_ld32(addr, IS_USER(s));
8173 store_reg(s, rs, tmp);
8174 tcg_gen_addi_i32(addr, addr, 4);
8175 tmp = gen_ld32(addr, IS_USER(s));
8176 store_reg(s, rd, tmp);
8177 } else {
8178 /* strd */
8179 tmp = load_reg(s, rs);
8180 gen_st32(tmp, addr, IS_USER(s));
8181 tcg_gen_addi_i32(addr, addr, 4);
8182 tmp = load_reg(s, rd);
8183 gen_st32(tmp, addr, IS_USER(s));
8185 if (insn & (1 << 21)) {
8186 /* Base writeback. */
8187 if (rn == 15)
8188 goto illegal_op;
8189 tcg_gen_addi_i32(addr, addr, offset - 4);
8190 store_reg(s, rn, addr);
8191 } else {
8192 tcg_temp_free_i32(addr);
8194 } else if ((insn & (1 << 23)) == 0) {
8195 /* Load/store exclusive word. */
8196 addr = tcg_temp_local_new();
8197 load_reg_var(s, addr, rn);
8198 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
8199 if (insn & (1 << 20)) {
8200 gen_load_exclusive(s, rs, 15, addr, 2);
8201 } else {
8202 gen_store_exclusive(s, rd, rs, 15, addr, 2);
8204 tcg_temp_free(addr);
8205 } else if ((insn & (1 << 6)) == 0) {
8206 /* Table Branch. */
8207 if (rn == 15) {
8208 addr = tcg_temp_new_i32();
8209 tcg_gen_movi_i32(addr, s->pc);
8210 } else {
8211 addr = load_reg(s, rn);
8213 tmp = load_reg(s, rm);
8214 tcg_gen_add_i32(addr, addr, tmp);
8215 if (insn & (1 << 4)) {
8216 /* tbh */
8217 tcg_gen_add_i32(addr, addr, tmp);
8218 tcg_temp_free_i32(tmp);
8219 tmp = gen_ld16u(addr, IS_USER(s));
8220 } else { /* tbb */
8221 tcg_temp_free_i32(tmp);
8222 tmp = gen_ld8u(addr, IS_USER(s));
8224 tcg_temp_free_i32(addr);
8225 tcg_gen_shli_i32(tmp, tmp, 1);
8226 tcg_gen_addi_i32(tmp, tmp, s->pc);
8227 store_reg(s, 15, tmp);
8228 } else {
8229 /* Load/store exclusive byte/halfword/doubleword. */
8230 ARCH(7);
8231 op = (insn >> 4) & 0x3;
8232 if (op == 2) {
8233 goto illegal_op;
8235 addr = tcg_temp_local_new();
8236 load_reg_var(s, addr, rn);
8237 if (insn & (1 << 20)) {
8238 gen_load_exclusive(s, rs, rd, addr, op);
8239 } else {
8240 gen_store_exclusive(s, rm, rs, rd, addr, op);
8242 tcg_temp_free(addr);
8244 } else {
8245 /* Load/store multiple, RFE, SRS. */
8246 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8247 /* Not available in user mode. */
8248 if (IS_USER(s))
8249 goto illegal_op;
8250 if (insn & (1 << 20)) {
8251 /* rfe */
8252 addr = load_reg(s, rn);
8253 if ((insn & (1 << 24)) == 0)
8254 tcg_gen_addi_i32(addr, addr, -8);
8255 /* Load PC into tmp and CPSR into tmp2. */
8256 tmp = gen_ld32(addr, 0);
8257 tcg_gen_addi_i32(addr, addr, 4);
8258 tmp2 = gen_ld32(addr, 0);
8259 if (insn & (1 << 21)) {
8260 /* Base writeback. */
8261 if (insn & (1 << 24)) {
8262 tcg_gen_addi_i32(addr, addr, 4);
8263 } else {
8264 tcg_gen_addi_i32(addr, addr, -4);
8266 store_reg(s, rn, addr);
8267 } else {
8268 tcg_temp_free_i32(addr);
8270 gen_rfe(s, tmp, tmp2);
8271 } else {
8272 /* srs */
8273 op = (insn & 0x1f);
8274 addr = tcg_temp_new_i32();
8275 tmp = tcg_const_i32(op);
8276 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8277 tcg_temp_free_i32(tmp);
8278 if ((insn & (1 << 24)) == 0) {
8279 tcg_gen_addi_i32(addr, addr, -8);
8281 tmp = load_reg(s, 14);
8282 gen_st32(tmp, addr, 0);
8283 tcg_gen_addi_i32(addr, addr, 4);
8284 tmp = tcg_temp_new_i32();
8285 gen_helper_cpsr_read(tmp);
8286 gen_st32(tmp, addr, 0);
8287 if (insn & (1 << 21)) {
8288 if ((insn & (1 << 24)) == 0) {
8289 tcg_gen_addi_i32(addr, addr, -4);
8290 } else {
8291 tcg_gen_addi_i32(addr, addr, 4);
8293 tmp = tcg_const_i32(op);
8294 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8295 tcg_temp_free_i32(tmp);
8296 } else {
8297 tcg_temp_free_i32(addr);
8300 } else {
8301 int i, loaded_base = 0;
8302 TCGv loaded_var;
8303 /* Load/store multiple. */
8304 addr = load_reg(s, rn);
8305 offset = 0;
8306 for (i = 0; i < 16; i++) {
8307 if (insn & (1 << i))
8308 offset += 4;
8310 if (insn & (1 << 24)) {
8311 tcg_gen_addi_i32(addr, addr, -offset);
8314 TCGV_UNUSED(loaded_var);
8315 for (i = 0; i < 16; i++) {
8316 if ((insn & (1 << i)) == 0)
8317 continue;
8318 if (insn & (1 << 20)) {
8319 /* Load. */
8320 tmp = gen_ld32(addr, IS_USER(s));
8321 if (i == 15) {
8322 gen_bx(s, tmp);
8323 } else if (i == rn) {
8324 loaded_var = tmp;
8325 loaded_base = 1;
8326 } else {
8327 store_reg(s, i, tmp);
8329 } else {
8330 /* Store. */
8331 tmp = load_reg(s, i);
8332 gen_st32(tmp, addr, IS_USER(s));
8334 tcg_gen_addi_i32(addr, addr, 4);
8336 if (loaded_base) {
8337 store_reg(s, rn, loaded_var);
8339 if (insn & (1 << 21)) {
8340 /* Base register writeback. */
8341 if (insn & (1 << 24)) {
8342 tcg_gen_addi_i32(addr, addr, -offset);
8344 /* Fault if writeback register is in register list. */
8345 if (insn & (1 << rn))
8346 goto illegal_op;
8347 store_reg(s, rn, addr);
8348 } else {
8349 tcg_temp_free_i32(addr);
8353 break;
8354 case 5:
8356 op = (insn >> 21) & 0xf;
8357 if (op == 6) {
8358 /* Halfword pack. */
8359 tmp = load_reg(s, rn);
8360 tmp2 = load_reg(s, rm);
8361 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8362 if (insn & (1 << 5)) {
8363 /* pkhtb */
8364 if (shift == 0)
8365 shift = 31;
8366 tcg_gen_sari_i32(tmp2, tmp2, shift);
8367 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8368 tcg_gen_ext16u_i32(tmp2, tmp2);
8369 } else {
8370 /* pkhbt */
8371 if (shift)
8372 tcg_gen_shli_i32(tmp2, tmp2, shift);
8373 tcg_gen_ext16u_i32(tmp, tmp);
8374 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8376 tcg_gen_or_i32(tmp, tmp, tmp2);
8377 tcg_temp_free_i32(tmp2);
8378 store_reg(s, rd, tmp);
8379 } else {
8380 /* Data processing register constant shift. */
8381 if (rn == 15) {
8382 tmp = tcg_temp_new_i32();
8383 tcg_gen_movi_i32(tmp, 0);
8384 } else {
8385 tmp = load_reg(s, rn);
8387 tmp2 = load_reg(s, rm);
8389 shiftop = (insn >> 4) & 3;
8390 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8391 conds = (insn & (1 << 20)) != 0;
8392 logic_cc = (conds && thumb2_logic_op(op));
8393 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8394 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8395 goto illegal_op;
8396 tcg_temp_free_i32(tmp2);
8397 if (rd != 15) {
8398 store_reg(s, rd, tmp);
8399 } else {
8400 tcg_temp_free_i32(tmp);
8403 break;
8404 case 13: /* Misc data processing. */
8405 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8406 if (op < 4 && (insn & 0xf000) != 0xf000)
8407 goto illegal_op;
8408 switch (op) {
8409 case 0: /* Register controlled shift. */
8410 tmp = load_reg(s, rn);
8411 tmp2 = load_reg(s, rm);
8412 if ((insn & 0x70) != 0)
8413 goto illegal_op;
8414 op = (insn >> 21) & 3;
8415 logic_cc = (insn & (1 << 20)) != 0;
8416 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8417 if (logic_cc)
8418 gen_logic_CC(tmp);
8419 store_reg_bx(env, s, rd, tmp);
8420 break;
8421 case 1: /* Sign/zero extend. */
8422 tmp = load_reg(s, rm);
8423 shift = (insn >> 4) & 3;
8424 /* ??? In many cases it's not necessary to do a
8425 rotate, a shift is sufficient. */
8426 if (shift != 0)
8427 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8428 op = (insn >> 20) & 7;
8429 switch (op) {
8430 case 0: gen_sxth(tmp); break;
8431 case 1: gen_uxth(tmp); break;
8432 case 2: gen_sxtb16(tmp); break;
8433 case 3: gen_uxtb16(tmp); break;
8434 case 4: gen_sxtb(tmp); break;
8435 case 5: gen_uxtb(tmp); break;
8436 default: goto illegal_op;
8438 if (rn != 15) {
8439 tmp2 = load_reg(s, rn);
8440 if ((op >> 1) == 1) {
8441 gen_add16(tmp, tmp2);
8442 } else {
8443 tcg_gen_add_i32(tmp, tmp, tmp2);
8444 tcg_temp_free_i32(tmp2);
8447 store_reg(s, rd, tmp);
8448 break;
8449 case 2: /* SIMD add/subtract. */
8450 op = (insn >> 20) & 7;
8451 shift = (insn >> 4) & 7;
8452 if ((op & 3) == 3 || (shift & 3) == 3)
8453 goto illegal_op;
8454 tmp = load_reg(s, rn);
8455 tmp2 = load_reg(s, rm);
8456 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8457 tcg_temp_free_i32(tmp2);
8458 store_reg(s, rd, tmp);
8459 break;
8460 case 3: /* Other data processing. */
8461 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8462 if (op < 4) {
8463 /* Saturating add/subtract. */
8464 tmp = load_reg(s, rn);
8465 tmp2 = load_reg(s, rm);
8466 if (op & 1)
8467 gen_helper_double_saturate(tmp, tmp);
8468 if (op & 2)
8469 gen_helper_sub_saturate(tmp, tmp2, tmp);
8470 else
8471 gen_helper_add_saturate(tmp, tmp, tmp2);
8472 tcg_temp_free_i32(tmp2);
8473 } else {
8474 tmp = load_reg(s, rn);
8475 switch (op) {
8476 case 0x0a: /* rbit */
8477 gen_helper_rbit(tmp, tmp);
8478 break;
8479 case 0x08: /* rev */
8480 tcg_gen_bswap32_i32(tmp, tmp);
8481 break;
8482 case 0x09: /* rev16 */
8483 gen_rev16(tmp);
8484 break;
8485 case 0x0b: /* revsh */
8486 gen_revsh(tmp);
8487 break;
8488 case 0x10: /* sel */
8489 tmp2 = load_reg(s, rm);
8490 tmp3 = tcg_temp_new_i32();
8491 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
8492 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8493 tcg_temp_free_i32(tmp3);
8494 tcg_temp_free_i32(tmp2);
8495 break;
8496 case 0x18: /* clz */
8497 gen_helper_clz(tmp, tmp);
8498 break;
8499 default:
8500 goto illegal_op;
8503 store_reg(s, rd, tmp);
8504 break;
8505 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8506 op = (insn >> 4) & 0xf;
8507 tmp = load_reg(s, rn);
8508 tmp2 = load_reg(s, rm);
8509 switch ((insn >> 20) & 7) {
8510 case 0: /* 32 x 32 -> 32 */
8511 tcg_gen_mul_i32(tmp, tmp, tmp2);
8512 tcg_temp_free_i32(tmp2);
8513 if (rs != 15) {
8514 tmp2 = load_reg(s, rs);
8515 if (op)
8516 tcg_gen_sub_i32(tmp, tmp2, tmp);
8517 else
8518 tcg_gen_add_i32(tmp, tmp, tmp2);
8519 tcg_temp_free_i32(tmp2);
8521 break;
8522 case 1: /* 16 x 16 -> 32 */
8523 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8524 tcg_temp_free_i32(tmp2);
8525 if (rs != 15) {
8526 tmp2 = load_reg(s, rs);
8527 gen_helper_add_setq(tmp, tmp, tmp2);
8528 tcg_temp_free_i32(tmp2);
8530 break;
8531 case 2: /* Dual multiply add. */
8532 case 4: /* Dual multiply subtract. */
8533 if (op)
8534 gen_swap_half(tmp2);
8535 gen_smul_dual(tmp, tmp2);
8536 if (insn & (1 << 22)) {
8537 /* This subtraction cannot overflow. */
8538 tcg_gen_sub_i32(tmp, tmp, tmp2);
8539 } else {
8540 /* This addition cannot overflow 32 bits;
8541 * however it may overflow considered as a signed
8542 * operation, in which case we must set the Q flag.
8544 gen_helper_add_setq(tmp, tmp, tmp2);
8546 tcg_temp_free_i32(tmp2);
8547 if (rs != 15)
8549 tmp2 = load_reg(s, rs);
8550 gen_helper_add_setq(tmp, tmp, tmp2);
8551 tcg_temp_free_i32(tmp2);
8553 break;
8554 case 3: /* 32 * 16 -> 32msb */
8555 if (op)
8556 tcg_gen_sari_i32(tmp2, tmp2, 16);
8557 else
8558 gen_sxth(tmp2);
8559 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8560 tcg_gen_shri_i64(tmp64, tmp64, 16);
8561 tmp = tcg_temp_new_i32();
8562 tcg_gen_trunc_i64_i32(tmp, tmp64);
8563 tcg_temp_free_i64(tmp64);
8564 if (rs != 15)
8566 tmp2 = load_reg(s, rs);
8567 gen_helper_add_setq(tmp, tmp, tmp2);
8568 tcg_temp_free_i32(tmp2);
8570 break;
8571 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8572 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8573 if (rs != 15) {
8574 tmp = load_reg(s, rs);
8575 if (insn & (1 << 20)) {
8576 tmp64 = gen_addq_msw(tmp64, tmp);
8577 } else {
8578 tmp64 = gen_subq_msw(tmp64, tmp);
8581 if (insn & (1 << 4)) {
8582 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8584 tcg_gen_shri_i64(tmp64, tmp64, 32);
8585 tmp = tcg_temp_new_i32();
8586 tcg_gen_trunc_i64_i32(tmp, tmp64);
8587 tcg_temp_free_i64(tmp64);
8588 break;
8589 case 7: /* Unsigned sum of absolute differences. */
8590 gen_helper_usad8(tmp, tmp, tmp2);
8591 tcg_temp_free_i32(tmp2);
8592 if (rs != 15) {
8593 tmp2 = load_reg(s, rs);
8594 tcg_gen_add_i32(tmp, tmp, tmp2);
8595 tcg_temp_free_i32(tmp2);
8597 break;
8599 store_reg(s, rd, tmp);
8600 break;
8601 case 6: case 7: /* 64-bit multiply, Divide. */
8602 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8603 tmp = load_reg(s, rn);
8604 tmp2 = load_reg(s, rm);
8605 if ((op & 0x50) == 0x10) {
8606 /* sdiv, udiv */
8607 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
8608 goto illegal_op;
8610 if (op & 0x20)
8611 gen_helper_udiv(tmp, tmp, tmp2);
8612 else
8613 gen_helper_sdiv(tmp, tmp, tmp2);
8614 tcg_temp_free_i32(tmp2);
8615 store_reg(s, rd, tmp);
8616 } else if ((op & 0xe) == 0xc) {
8617 /* Dual multiply accumulate long. */
8618 if (op & 1)
8619 gen_swap_half(tmp2);
8620 gen_smul_dual(tmp, tmp2);
8621 if (op & 0x10) {
8622 tcg_gen_sub_i32(tmp, tmp, tmp2);
8623 } else {
8624 tcg_gen_add_i32(tmp, tmp, tmp2);
8626 tcg_temp_free_i32(tmp2);
8627 /* BUGFIX */
8628 tmp64 = tcg_temp_new_i64();
8629 tcg_gen_ext_i32_i64(tmp64, tmp);
8630 tcg_temp_free_i32(tmp);
8631 gen_addq(s, tmp64, rs, rd);
8632 gen_storeq_reg(s, rs, rd, tmp64);
8633 tcg_temp_free_i64(tmp64);
8634 } else {
8635 if (op & 0x20) {
8636 /* Unsigned 64-bit multiply */
8637 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8638 } else {
8639 if (op & 8) {
8640 /* smlalxy */
8641 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8642 tcg_temp_free_i32(tmp2);
8643 tmp64 = tcg_temp_new_i64();
8644 tcg_gen_ext_i32_i64(tmp64, tmp);
8645 tcg_temp_free_i32(tmp);
8646 } else {
8647 /* Signed 64-bit multiply */
8648 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8651 if (op & 4) {
8652 /* umaal */
8653 gen_addq_lo(s, tmp64, rs);
8654 gen_addq_lo(s, tmp64, rd);
8655 } else if (op & 0x40) {
8656 /* 64-bit accumulate. */
8657 gen_addq(s, tmp64, rs, rd);
8659 gen_storeq_reg(s, rs, rd, tmp64);
8660 tcg_temp_free_i64(tmp64);
8662 break;
8664 break;
8665 case 6: case 7: case 14: case 15:
8666 /* Coprocessor. */
8667 if (((insn >> 24) & 3) == 3) {
8668 /* Translate into the equivalent ARM encoding. */
8669 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8670 if (disas_neon_data_insn(env, s, insn))
8671 goto illegal_op;
8672 } else {
8673 if (insn & (1 << 28))
8674 goto illegal_op;
8675 if (disas_coproc_insn (env, s, insn))
8676 goto illegal_op;
8678 break;
8679 case 8: case 9: case 10: case 11:
8680 if (insn & (1 << 15)) {
8681 /* Branches, misc control. */
8682 if (insn & 0x5000) {
8683 /* Unconditional branch. */
8684 /* signextend(hw1[10:0]) -> offset[:12]. */
8685 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8686 /* hw1[10:0] -> offset[11:1]. */
8687 offset |= (insn & 0x7ff) << 1;
8688 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8689 offset[24:22] already have the same value because of the
8690 sign extension above. */
8691 offset ^= ((~insn) & (1 << 13)) << 10;
8692 offset ^= ((~insn) & (1 << 11)) << 11;
8694 if (insn & (1 << 14)) {
8695 /* Branch and link. */
8696 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8699 offset += s->pc;
8700 if (insn & (1 << 12)) {
8701 /* b/bl */
8702 gen_jmp(s, offset);
8703 } else {
8704 /* blx */
8705 offset &= ~(uint32_t)2;
8706 /* thumb2 bx, no need to check */
8707 gen_bx_im(s, offset);
8709 } else if (((insn >> 23) & 7) == 7) {
8710 /* Misc control */
8711 if (insn & (1 << 13))
8712 goto illegal_op;
8714 if (insn & (1 << 26)) {
8715 /* Secure monitor call (v6Z) */
8716 goto illegal_op; /* not implemented. */
8717 } else {
8718 op = (insn >> 20) & 7;
8719 switch (op) {
8720 case 0: /* msr cpsr. */
8721 if (IS_M(env)) {
8722 tmp = load_reg(s, rn);
8723 addr = tcg_const_i32(insn & 0xff);
8724 gen_helper_v7m_msr(cpu_env, addr, tmp);
8725 tcg_temp_free_i32(addr);
8726 tcg_temp_free_i32(tmp);
8727 gen_lookup_tb(s);
8728 break;
8730 /* fall through */
8731 case 1: /* msr spsr. */
8732 if (IS_M(env))
8733 goto illegal_op;
8734 tmp = load_reg(s, rn);
8735 if (gen_set_psr(s,
8736 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8737 op == 1, tmp))
8738 goto illegal_op;
8739 break;
8740 case 2: /* cps, nop-hint. */
8741 if (((insn >> 8) & 7) == 0) {
8742 gen_nop_hint(s, insn & 0xff);
8744 /* Implemented as NOP in user mode. */
8745 if (IS_USER(s))
8746 break;
8747 offset = 0;
8748 imm = 0;
8749 if (insn & (1 << 10)) {
8750 if (insn & (1 << 7))
8751 offset |= CPSR_A;
8752 if (insn & (1 << 6))
8753 offset |= CPSR_I;
8754 if (insn & (1 << 5))
8755 offset |= CPSR_F;
8756 if (insn & (1 << 9))
8757 imm = CPSR_A | CPSR_I | CPSR_F;
8759 if (insn & (1 << 8)) {
8760 offset |= 0x1f;
8761 imm |= (insn & 0x1f);
8763 if (offset) {
8764 gen_set_psr_im(s, offset, 0, imm);
8766 break;
8767 case 3: /* Special control operations. */
8768 ARCH(7);
8769 op = (insn >> 4) & 0xf;
8770 switch (op) {
8771 case 2: /* clrex */
8772 gen_clrex(s);
8773 break;
8774 case 4: /* dsb */
8775 case 5: /* dmb */
8776 case 6: /* isb */
8777 /* These execute as NOPs. */
8778 break;
8779 default:
8780 goto illegal_op;
8782 break;
8783 case 4: /* bxj */
8784 /* Trivial implementation equivalent to bx. */
8785 tmp = load_reg(s, rn);
8786 gen_bx(s, tmp);
8787 break;
8788 case 5: /* Exception return. */
8789 if (IS_USER(s)) {
8790 goto illegal_op;
8792 if (rn != 14 || rd != 15) {
8793 goto illegal_op;
8795 tmp = load_reg(s, rn);
8796 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8797 gen_exception_return(s, tmp);
8798 break;
8799 case 6: /* mrs cpsr. */
8800 tmp = tcg_temp_new_i32();
8801 if (IS_M(env)) {
8802 addr = tcg_const_i32(insn & 0xff);
8803 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8804 tcg_temp_free_i32(addr);
8805 } else {
8806 gen_helper_cpsr_read(tmp);
8808 store_reg(s, rd, tmp);
8809 break;
8810 case 7: /* mrs spsr. */
8811 /* Not accessible in user mode. */
8812 if (IS_USER(s) || IS_M(env))
8813 goto illegal_op;
8814 tmp = load_cpu_field(spsr);
8815 store_reg(s, rd, tmp);
8816 break;
8819 } else {
8820 /* Conditional branch. */
8821 op = (insn >> 22) & 0xf;
8822 /* Generate a conditional jump to next instruction. */
8823 s->condlabel = gen_new_label();
8824 gen_test_cc(op ^ 1, s->condlabel);
8825 s->condjmp = 1;
8827 /* offset[11:1] = insn[10:0] */
8828 offset = (insn & 0x7ff) << 1;
8829 /* offset[17:12] = insn[21:16]. */
8830 offset |= (insn & 0x003f0000) >> 4;
8831 /* offset[31:20] = insn[26]. */
8832 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8833 /* offset[18] = insn[13]. */
8834 offset |= (insn & (1 << 13)) << 5;
8835 /* offset[19] = insn[11]. */
8836 offset |= (insn & (1 << 11)) << 8;
8838 /* jump to the offset */
8839 gen_jmp(s, s->pc + offset);
8841 } else {
8842 /* Data processing immediate. */
8843 if (insn & (1 << 25)) {
8844 if (insn & (1 << 24)) {
8845 if (insn & (1 << 20))
8846 goto illegal_op;
8847 /* Bitfield/Saturate. */
8848 op = (insn >> 21) & 7;
8849 imm = insn & 0x1f;
8850 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8851 if (rn == 15) {
8852 tmp = tcg_temp_new_i32();
8853 tcg_gen_movi_i32(tmp, 0);
8854 } else {
8855 tmp = load_reg(s, rn);
8857 switch (op) {
8858 case 2: /* Signed bitfield extract. */
8859 imm++;
8860 if (shift + imm > 32)
8861 goto illegal_op;
8862 if (imm < 32)
8863 gen_sbfx(tmp, shift, imm);
8864 break;
8865 case 6: /* Unsigned bitfield extract. */
8866 imm++;
8867 if (shift + imm > 32)
8868 goto illegal_op;
8869 if (imm < 32)
8870 gen_ubfx(tmp, shift, (1u << imm) - 1);
8871 break;
8872 case 3: /* Bitfield insert/clear. */
8873 if (imm < shift)
8874 goto illegal_op;
8875 imm = imm + 1 - shift;
8876 if (imm != 32) {
8877 tmp2 = load_reg(s, rd);
8878 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8879 tcg_temp_free_i32(tmp2);
8881 break;
8882 case 7:
8883 goto illegal_op;
8884 default: /* Saturate. */
8885 if (shift) {
8886 if (op & 1)
8887 tcg_gen_sari_i32(tmp, tmp, shift);
8888 else
8889 tcg_gen_shli_i32(tmp, tmp, shift);
8891 tmp2 = tcg_const_i32(imm);
8892 if (op & 4) {
8893 /* Unsigned. */
8894 if ((op & 1) && shift == 0)
8895 gen_helper_usat16(tmp, tmp, tmp2);
8896 else
8897 gen_helper_usat(tmp, tmp, tmp2);
8898 } else {
8899 /* Signed. */
8900 if ((op & 1) && shift == 0)
8901 gen_helper_ssat16(tmp, tmp, tmp2);
8902 else
8903 gen_helper_ssat(tmp, tmp, tmp2);
8905 tcg_temp_free_i32(tmp2);
8906 break;
8908 store_reg(s, rd, tmp);
8909 } else {
8910 imm = ((insn & 0x04000000) >> 15)
8911 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8912 if (insn & (1 << 22)) {
8913 /* 16-bit immediate. */
8914 imm |= (insn >> 4) & 0xf000;
8915 if (insn & (1 << 23)) {
8916 /* movt */
8917 tmp = load_reg(s, rd);
8918 tcg_gen_ext16u_i32(tmp, tmp);
8919 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8920 } else {
8921 /* movw */
8922 tmp = tcg_temp_new_i32();
8923 tcg_gen_movi_i32(tmp, imm);
8925 } else {
8926 /* Add/sub 12-bit immediate. */
8927 if (rn == 15) {
8928 offset = s->pc & ~(uint32_t)3;
8929 if (insn & (1 << 23))
8930 offset -= imm;
8931 else
8932 offset += imm;
8933 tmp = tcg_temp_new_i32();
8934 tcg_gen_movi_i32(tmp, offset);
8935 } else {
8936 tmp = load_reg(s, rn);
8937 if (insn & (1 << 23))
8938 tcg_gen_subi_i32(tmp, tmp, imm);
8939 else
8940 tcg_gen_addi_i32(tmp, tmp, imm);
8943 store_reg(s, rd, tmp);
8945 } else {
8946 int shifter_out = 0;
8947 /* modified 12-bit immediate. */
8948 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8949 imm = (insn & 0xff);
8950 switch (shift) {
8951 case 0: /* XY */
8952 /* Nothing to do. */
8953 break;
8954 case 1: /* 00XY00XY */
8955 imm |= imm << 16;
8956 break;
8957 case 2: /* XY00XY00 */
8958 imm |= imm << 16;
8959 imm <<= 8;
8960 break;
8961 case 3: /* XYXYXYXY */
8962 imm |= imm << 16;
8963 imm |= imm << 8;
8964 break;
8965 default: /* Rotated constant. */
8966 shift = (shift << 1) | (imm >> 7);
8967 imm |= 0x80;
8968 imm = imm << (32 - shift);
8969 shifter_out = 1;
8970 break;
8972 tmp2 = tcg_temp_new_i32();
8973 tcg_gen_movi_i32(tmp2, imm);
8974 rn = (insn >> 16) & 0xf;
8975 if (rn == 15) {
8976 tmp = tcg_temp_new_i32();
8977 tcg_gen_movi_i32(tmp, 0);
8978 } else {
8979 tmp = load_reg(s, rn);
8981 op = (insn >> 21) & 0xf;
8982 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8983 shifter_out, tmp, tmp2))
8984 goto illegal_op;
8985 tcg_temp_free_i32(tmp2);
8986 rd = (insn >> 8) & 0xf;
8987 if (rd != 15) {
8988 store_reg(s, rd, tmp);
8989 } else {
8990 tcg_temp_free_i32(tmp);
8994 break;
8995 case 12: /* Load/store single data item. */
8997 int postinc = 0;
8998 int writeback = 0;
8999 int user;
9000 if ((insn & 0x01100000) == 0x01000000) {
9001 if (disas_neon_ls_insn(env, s, insn))
9002 goto illegal_op;
9003 break;
9005 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9006 if (rs == 15) {
9007 if (!(insn & (1 << 20))) {
9008 goto illegal_op;
9010 if (op != 2) {
9011 /* Byte or halfword load space with dest == r15 : memory hints.
9012 * Catch them early so we don't emit pointless addressing code.
9013 * This space is a mix of:
9014 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9015 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9016 * cores)
9017 * unallocated hints, which must be treated as NOPs
9018 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9019 * which is easiest for the decoding logic
9020 * Some space which must UNDEF
9022 int op1 = (insn >> 23) & 3;
9023 int op2 = (insn >> 6) & 0x3f;
9024 if (op & 2) {
9025 goto illegal_op;
9027 if (rn == 15) {
9028 /* UNPREDICTABLE or unallocated hint */
9029 return 0;
9031 if (op1 & 1) {
9032 return 0; /* PLD* or unallocated hint */
9034 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
9035 return 0; /* PLD* or unallocated hint */
9037 /* UNDEF space, or an UNPREDICTABLE */
9038 return 1;
9041 user = IS_USER(s);
9042 if (rn == 15) {
9043 addr = tcg_temp_new_i32();
9044 /* PC relative. */
9045 /* s->pc has already been incremented by 4. */
9046 imm = s->pc & 0xfffffffc;
9047 if (insn & (1 << 23))
9048 imm += insn & 0xfff;
9049 else
9050 imm -= insn & 0xfff;
9051 tcg_gen_movi_i32(addr, imm);
9052 } else {
9053 addr = load_reg(s, rn);
9054 if (insn & (1 << 23)) {
9055 /* Positive offset. */
9056 imm = insn & 0xfff;
9057 tcg_gen_addi_i32(addr, addr, imm);
9058 } else {
9059 imm = insn & 0xff;
9060 switch ((insn >> 8) & 0xf) {
9061 case 0x0: /* Shifted Register. */
9062 shift = (insn >> 4) & 0xf;
9063 if (shift > 3) {
9064 tcg_temp_free_i32(addr);
9065 goto illegal_op;
9067 tmp = load_reg(s, rm);
9068 if (shift)
9069 tcg_gen_shli_i32(tmp, tmp, shift);
9070 tcg_gen_add_i32(addr, addr, tmp);
9071 tcg_temp_free_i32(tmp);
9072 break;
9073 case 0xc: /* Negative offset. */
9074 tcg_gen_addi_i32(addr, addr, -imm);
9075 break;
9076 case 0xe: /* User privilege. */
9077 tcg_gen_addi_i32(addr, addr, imm);
9078 user = 1;
9079 break;
9080 case 0x9: /* Post-decrement. */
9081 imm = -imm;
9082 /* Fall through. */
9083 case 0xb: /* Post-increment. */
9084 postinc = 1;
9085 writeback = 1;
9086 break;
9087 case 0xd: /* Pre-decrement. */
9088 imm = -imm;
9089 /* Fall through. */
9090 case 0xf: /* Pre-increment. */
9091 tcg_gen_addi_i32(addr, addr, imm);
9092 writeback = 1;
9093 break;
9094 default:
9095 tcg_temp_free_i32(addr);
9096 goto illegal_op;
9100 if (insn & (1 << 20)) {
9101 /* Load. */
9102 switch (op) {
9103 case 0: tmp = gen_ld8u(addr, user); break;
9104 case 4: tmp = gen_ld8s(addr, user); break;
9105 case 1: tmp = gen_ld16u(addr, user); break;
9106 case 5: tmp = gen_ld16s(addr, user); break;
9107 case 2: tmp = gen_ld32(addr, user); break;
9108 default:
9109 tcg_temp_free_i32(addr);
9110 goto illegal_op;
9112 if (rs == 15) {
9113 gen_bx(s, tmp);
9114 } else {
9115 store_reg(s, rs, tmp);
9117 } else {
9118 /* Store. */
9119 tmp = load_reg(s, rs);
9120 switch (op) {
9121 case 0: gen_st8(tmp, addr, user); break;
9122 case 1: gen_st16(tmp, addr, user); break;
9123 case 2: gen_st32(tmp, addr, user); break;
9124 default:
9125 tcg_temp_free_i32(addr);
9126 goto illegal_op;
9129 if (postinc)
9130 tcg_gen_addi_i32(addr, addr, imm);
9131 if (writeback) {
9132 store_reg(s, rn, addr);
9133 } else {
9134 tcg_temp_free_i32(addr);
9137 break;
9138 default:
9139 goto illegal_op;
9141 return 0;
9142 illegal_op:
9143 return 1;
9146 static void disas_thumb_insn(CPUState *env, DisasContext *s)
9148 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9149 int32_t offset;
9150 int i;
9151 TCGv tmp;
9152 TCGv tmp2;
9153 TCGv addr;
9155 if (s->condexec_mask) {
9156 cond = s->condexec_cond;
9157 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9158 s->condlabel = gen_new_label();
9159 gen_test_cc(cond ^ 1, s->condlabel);
9160 s->condjmp = 1;
9164 insn = lduw_code(s->pc);
9165 s->pc += 2;
9167 switch (insn >> 12) {
9168 case 0: case 1:
9170 rd = insn & 7;
9171 op = (insn >> 11) & 3;
9172 if (op == 3) {
9173 /* add/subtract */
9174 rn = (insn >> 3) & 7;
9175 tmp = load_reg(s, rn);
9176 if (insn & (1 << 10)) {
9177 /* immediate */
9178 tmp2 = tcg_temp_new_i32();
9179 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
9180 } else {
9181 /* reg */
9182 rm = (insn >> 6) & 7;
9183 tmp2 = load_reg(s, rm);
9185 if (insn & (1 << 9)) {
9186 if (s->condexec_mask)
9187 tcg_gen_sub_i32(tmp, tmp, tmp2);
9188 else
9189 gen_helper_sub_cc(tmp, tmp, tmp2);
9190 } else {
9191 if (s->condexec_mask)
9192 tcg_gen_add_i32(tmp, tmp, tmp2);
9193 else
9194 gen_helper_add_cc(tmp, tmp, tmp2);
9196 tcg_temp_free_i32(tmp2);
9197 store_reg(s, rd, tmp);
9198 } else {
9199 /* shift immediate */
9200 rm = (insn >> 3) & 7;
9201 shift = (insn >> 6) & 0x1f;
9202 tmp = load_reg(s, rm);
9203 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9204 if (!s->condexec_mask)
9205 gen_logic_CC(tmp);
9206 store_reg(s, rd, tmp);
9208 break;
9209 case 2: case 3:
9210 /* arithmetic large immediate */
9211 op = (insn >> 11) & 3;
9212 rd = (insn >> 8) & 0x7;
9213 if (op == 0) { /* mov */
9214 tmp = tcg_temp_new_i32();
9215 tcg_gen_movi_i32(tmp, insn & 0xff);
9216 if (!s->condexec_mask)
9217 gen_logic_CC(tmp);
9218 store_reg(s, rd, tmp);
9219 } else {
9220 tmp = load_reg(s, rd);
9221 tmp2 = tcg_temp_new_i32();
9222 tcg_gen_movi_i32(tmp2, insn & 0xff);
9223 switch (op) {
9224 case 1: /* cmp */
9225 gen_helper_sub_cc(tmp, tmp, tmp2);
9226 tcg_temp_free_i32(tmp);
9227 tcg_temp_free_i32(tmp2);
9228 break;
9229 case 2: /* add */
9230 if (s->condexec_mask)
9231 tcg_gen_add_i32(tmp, tmp, tmp2);
9232 else
9233 gen_helper_add_cc(tmp, tmp, tmp2);
9234 tcg_temp_free_i32(tmp2);
9235 store_reg(s, rd, tmp);
9236 break;
9237 case 3: /* sub */
9238 if (s->condexec_mask)
9239 tcg_gen_sub_i32(tmp, tmp, tmp2);
9240 else
9241 gen_helper_sub_cc(tmp, tmp, tmp2);
9242 tcg_temp_free_i32(tmp2);
9243 store_reg(s, rd, tmp);
9244 break;
9247 break;
9248 case 4:
9249 if (insn & (1 << 11)) {
9250 rd = (insn >> 8) & 7;
9251 /* load pc-relative. Bit 1 of PC is ignored. */
9252 val = s->pc + 2 + ((insn & 0xff) * 4);
9253 val &= ~(uint32_t)2;
9254 addr = tcg_temp_new_i32();
9255 tcg_gen_movi_i32(addr, val);
9256 tmp = gen_ld32(addr, IS_USER(s));
9257 tcg_temp_free_i32(addr);
9258 store_reg(s, rd, tmp);
9259 break;
9261 if (insn & (1 << 10)) {
9262 /* data processing extended or blx */
9263 rd = (insn & 7) | ((insn >> 4) & 8);
9264 rm = (insn >> 3) & 0xf;
9265 op = (insn >> 8) & 3;
9266 switch (op) {
9267 case 0: /* add */
9268 tmp = load_reg(s, rd);
9269 tmp2 = load_reg(s, rm);
9270 tcg_gen_add_i32(tmp, tmp, tmp2);
9271 tcg_temp_free_i32(tmp2);
9272 store_reg(s, rd, tmp);
9273 break;
9274 case 1: /* cmp */
9275 tmp = load_reg(s, rd);
9276 tmp2 = load_reg(s, rm);
9277 gen_helper_sub_cc(tmp, tmp, tmp2);
9278 tcg_temp_free_i32(tmp2);
9279 tcg_temp_free_i32(tmp);
9280 break;
9281 case 2: /* mov/cpy */
9282 tmp = load_reg(s, rm);
9283 store_reg(s, rd, tmp);
9284 break;
9285 case 3:/* branch [and link] exchange thumb register */
9286 tmp = load_reg(s, rm);
9287 if (insn & (1 << 7)) {
9288 ARCH(5);
9289 val = (uint32_t)s->pc | 1;
9290 tmp2 = tcg_temp_new_i32();
9291 tcg_gen_movi_i32(tmp2, val);
9292 store_reg(s, 14, tmp2);
9294 /* already thumb, no need to check */
9295 gen_bx(s, tmp);
9296 break;
9298 break;
9301 /* data processing register */
9302 rd = insn & 7;
9303 rm = (insn >> 3) & 7;
9304 op = (insn >> 6) & 0xf;
9305 if (op == 2 || op == 3 || op == 4 || op == 7) {
9306 /* the shift/rotate ops want the operands backwards */
9307 val = rm;
9308 rm = rd;
9309 rd = val;
9310 val = 1;
9311 } else {
9312 val = 0;
9315 if (op == 9) { /* neg */
9316 tmp = tcg_temp_new_i32();
9317 tcg_gen_movi_i32(tmp, 0);
9318 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9319 tmp = load_reg(s, rd);
9320 } else {
9321 TCGV_UNUSED(tmp);
9324 tmp2 = load_reg(s, rm);
9325 switch (op) {
9326 case 0x0: /* and */
9327 tcg_gen_and_i32(tmp, tmp, tmp2);
9328 if (!s->condexec_mask)
9329 gen_logic_CC(tmp);
9330 break;
9331 case 0x1: /* eor */
9332 tcg_gen_xor_i32(tmp, tmp, tmp2);
9333 if (!s->condexec_mask)
9334 gen_logic_CC(tmp);
9335 break;
9336 case 0x2: /* lsl */
9337 if (s->condexec_mask) {
9338 gen_helper_shl(tmp2, tmp2, tmp);
9339 } else {
9340 gen_helper_shl_cc(tmp2, tmp2, tmp);
9341 gen_logic_CC(tmp2);
9343 break;
9344 case 0x3: /* lsr */
9345 if (s->condexec_mask) {
9346 gen_helper_shr(tmp2, tmp2, tmp);
9347 } else {
9348 gen_helper_shr_cc(tmp2, tmp2, tmp);
9349 gen_logic_CC(tmp2);
9351 break;
9352 case 0x4: /* asr */
9353 if (s->condexec_mask) {
9354 gen_helper_sar(tmp2, tmp2, tmp);
9355 } else {
9356 gen_helper_sar_cc(tmp2, tmp2, tmp);
9357 gen_logic_CC(tmp2);
9359 break;
9360 case 0x5: /* adc */
9361 if (s->condexec_mask)
9362 gen_adc(tmp, tmp2);
9363 else
9364 gen_helper_adc_cc(tmp, tmp, tmp2);
9365 break;
9366 case 0x6: /* sbc */
9367 if (s->condexec_mask)
9368 gen_sub_carry(tmp, tmp, tmp2);
9369 else
9370 gen_helper_sbc_cc(tmp, tmp, tmp2);
9371 break;
9372 case 0x7: /* ror */
9373 if (s->condexec_mask) {
9374 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9375 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9376 } else {
9377 gen_helper_ror_cc(tmp2, tmp2, tmp);
9378 gen_logic_CC(tmp2);
9380 break;
9381 case 0x8: /* tst */
9382 tcg_gen_and_i32(tmp, tmp, tmp2);
9383 gen_logic_CC(tmp);
9384 rd = 16;
9385 break;
9386 case 0x9: /* neg */
9387 if (s->condexec_mask)
9388 tcg_gen_neg_i32(tmp, tmp2);
9389 else
9390 gen_helper_sub_cc(tmp, tmp, tmp2);
9391 break;
9392 case 0xa: /* cmp */
9393 gen_helper_sub_cc(tmp, tmp, tmp2);
9394 rd = 16;
9395 break;
9396 case 0xb: /* cmn */
9397 gen_helper_add_cc(tmp, tmp, tmp2);
9398 rd = 16;
9399 break;
9400 case 0xc: /* orr */
9401 tcg_gen_or_i32(tmp, tmp, tmp2);
9402 if (!s->condexec_mask)
9403 gen_logic_CC(tmp);
9404 break;
9405 case 0xd: /* mul */
9406 tcg_gen_mul_i32(tmp, tmp, tmp2);
9407 if (!s->condexec_mask)
9408 gen_logic_CC(tmp);
9409 break;
9410 case 0xe: /* bic */
9411 tcg_gen_andc_i32(tmp, tmp, tmp2);
9412 if (!s->condexec_mask)
9413 gen_logic_CC(tmp);
9414 break;
9415 case 0xf: /* mvn */
9416 tcg_gen_not_i32(tmp2, tmp2);
9417 if (!s->condexec_mask)
9418 gen_logic_CC(tmp2);
9419 val = 1;
9420 rm = rd;
9421 break;
9423 if (rd != 16) {
9424 if (val) {
9425 store_reg(s, rm, tmp2);
9426 if (op != 0xf)
9427 tcg_temp_free_i32(tmp);
9428 } else {
9429 store_reg(s, rd, tmp);
9430 tcg_temp_free_i32(tmp2);
9432 } else {
9433 tcg_temp_free_i32(tmp);
9434 tcg_temp_free_i32(tmp2);
9436 break;
9438 case 5:
9439 /* load/store register offset. */
9440 rd = insn & 7;
9441 rn = (insn >> 3) & 7;
9442 rm = (insn >> 6) & 7;
9443 op = (insn >> 9) & 7;
9444 addr = load_reg(s, rn);
9445 tmp = load_reg(s, rm);
9446 tcg_gen_add_i32(addr, addr, tmp);
9447 tcg_temp_free_i32(tmp);
9449 if (op < 3) /* store */
9450 tmp = load_reg(s, rd);
9452 switch (op) {
9453 case 0: /* str */
9454 gen_st32(tmp, addr, IS_USER(s));
9455 break;
9456 case 1: /* strh */
9457 gen_st16(tmp, addr, IS_USER(s));
9458 break;
9459 case 2: /* strb */
9460 gen_st8(tmp, addr, IS_USER(s));
9461 break;
9462 case 3: /* ldrsb */
9463 tmp = gen_ld8s(addr, IS_USER(s));
9464 break;
9465 case 4: /* ldr */
9466 tmp = gen_ld32(addr, IS_USER(s));
9467 break;
9468 case 5: /* ldrh */
9469 tmp = gen_ld16u(addr, IS_USER(s));
9470 break;
9471 case 6: /* ldrb */
9472 tmp = gen_ld8u(addr, IS_USER(s));
9473 break;
9474 case 7: /* ldrsh */
9475 tmp = gen_ld16s(addr, IS_USER(s));
9476 break;
9478 if (op >= 3) /* load */
9479 store_reg(s, rd, tmp);
9480 tcg_temp_free_i32(addr);
9481 break;
9483 case 6:
9484 /* load/store word immediate offset */
9485 rd = insn & 7;
9486 rn = (insn >> 3) & 7;
9487 addr = load_reg(s, rn);
9488 val = (insn >> 4) & 0x7c;
9489 tcg_gen_addi_i32(addr, addr, val);
9491 if (insn & (1 << 11)) {
9492 /* load */
9493 tmp = gen_ld32(addr, IS_USER(s));
9494 store_reg(s, rd, tmp);
9495 } else {
9496 /* store */
9497 tmp = load_reg(s, rd);
9498 gen_st32(tmp, addr, IS_USER(s));
9500 tcg_temp_free_i32(addr);
9501 break;
9503 case 7:
9504 /* load/store byte immediate offset */
9505 rd = insn & 7;
9506 rn = (insn >> 3) & 7;
9507 addr = load_reg(s, rn);
9508 val = (insn >> 6) & 0x1f;
9509 tcg_gen_addi_i32(addr, addr, val);
9511 if (insn & (1 << 11)) {
9512 /* load */
9513 tmp = gen_ld8u(addr, IS_USER(s));
9514 store_reg(s, rd, tmp);
9515 } else {
9516 /* store */
9517 tmp = load_reg(s, rd);
9518 gen_st8(tmp, addr, IS_USER(s));
9520 tcg_temp_free_i32(addr);
9521 break;
9523 case 8:
9524 /* load/store halfword immediate offset */
9525 rd = insn & 7;
9526 rn = (insn >> 3) & 7;
9527 addr = load_reg(s, rn);
9528 val = (insn >> 5) & 0x3e;
9529 tcg_gen_addi_i32(addr, addr, val);
9531 if (insn & (1 << 11)) {
9532 /* load */
9533 tmp = gen_ld16u(addr, IS_USER(s));
9534 store_reg(s, rd, tmp);
9535 } else {
9536 /* store */
9537 tmp = load_reg(s, rd);
9538 gen_st16(tmp, addr, IS_USER(s));
9540 tcg_temp_free_i32(addr);
9541 break;
9543 case 9:
9544 /* load/store from stack */
9545 rd = (insn >> 8) & 7;
9546 addr = load_reg(s, 13);
9547 val = (insn & 0xff) * 4;
9548 tcg_gen_addi_i32(addr, addr, val);
9550 if (insn & (1 << 11)) {
9551 /* load */
9552 tmp = gen_ld32(addr, IS_USER(s));
9553 store_reg(s, rd, tmp);
9554 } else {
9555 /* store */
9556 tmp = load_reg(s, rd);
9557 gen_st32(tmp, addr, IS_USER(s));
9559 tcg_temp_free_i32(addr);
9560 break;
9562 case 10:
9563 /* add to high reg */
9564 rd = (insn >> 8) & 7;
9565 if (insn & (1 << 11)) {
9566 /* SP */
9567 tmp = load_reg(s, 13);
9568 } else {
9569 /* PC. bit 1 is ignored. */
9570 tmp = tcg_temp_new_i32();
9571 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9573 val = (insn & 0xff) * 4;
9574 tcg_gen_addi_i32(tmp, tmp, val);
9575 store_reg(s, rd, tmp);
9576 break;
9578 case 11:
9579 /* misc */
9580 op = (insn >> 8) & 0xf;
9581 switch (op) {
9582 case 0:
9583 /* adjust stack pointer */
9584 tmp = load_reg(s, 13);
9585 val = (insn & 0x7f) * 4;
9586 if (insn & (1 << 7))
9587 val = -(int32_t)val;
9588 tcg_gen_addi_i32(tmp, tmp, val);
9589 store_reg(s, 13, tmp);
9590 break;
9592 case 2: /* sign/zero extend. */
9593 ARCH(6);
9594 rd = insn & 7;
9595 rm = (insn >> 3) & 7;
9596 tmp = load_reg(s, rm);
9597 switch ((insn >> 6) & 3) {
9598 case 0: gen_sxth(tmp); break;
9599 case 1: gen_sxtb(tmp); break;
9600 case 2: gen_uxth(tmp); break;
9601 case 3: gen_uxtb(tmp); break;
9603 store_reg(s, rd, tmp);
9604 break;
9605 case 4: case 5: case 0xc: case 0xd:
9606 /* push/pop */
9607 addr = load_reg(s, 13);
9608 if (insn & (1 << 8))
9609 offset = 4;
9610 else
9611 offset = 0;
9612 for (i = 0; i < 8; i++) {
9613 if (insn & (1 << i))
9614 offset += 4;
9616 if ((insn & (1 << 11)) == 0) {
9617 tcg_gen_addi_i32(addr, addr, -offset);
9619 for (i = 0; i < 8; i++) {
9620 if (insn & (1 << i)) {
9621 if (insn & (1 << 11)) {
9622 /* pop */
9623 tmp = gen_ld32(addr, IS_USER(s));
9624 store_reg(s, i, tmp);
9625 } else {
9626 /* push */
9627 tmp = load_reg(s, i);
9628 gen_st32(tmp, addr, IS_USER(s));
9630 /* advance to the next address. */
9631 tcg_gen_addi_i32(addr, addr, 4);
9634 TCGV_UNUSED(tmp);
9635 if (insn & (1 << 8)) {
9636 if (insn & (1 << 11)) {
9637 /* pop pc */
9638 tmp = gen_ld32(addr, IS_USER(s));
9639 /* don't set the pc until the rest of the instruction
9640 has completed */
9641 } else {
9642 /* push lr */
9643 tmp = load_reg(s, 14);
9644 gen_st32(tmp, addr, IS_USER(s));
9646 tcg_gen_addi_i32(addr, addr, 4);
9648 if ((insn & (1 << 11)) == 0) {
9649 tcg_gen_addi_i32(addr, addr, -offset);
9651 /* write back the new stack pointer */
9652 store_reg(s, 13, addr);
9653 /* set the new PC value */
9654 if ((insn & 0x0900) == 0x0900) {
9655 store_reg_from_load(env, s, 15, tmp);
9657 break;
9659 case 1: case 3: case 9: case 11: /* czb */
9660 rm = insn & 7;
9661 tmp = load_reg(s, rm);
9662 s->condlabel = gen_new_label();
9663 s->condjmp = 1;
9664 if (insn & (1 << 11))
9665 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9666 else
9667 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9668 tcg_temp_free_i32(tmp);
9669 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9670 val = (uint32_t)s->pc + 2;
9671 val += offset;
9672 gen_jmp(s, val);
9673 break;
9675 case 15: /* IT, nop-hint. */
9676 if ((insn & 0xf) == 0) {
9677 gen_nop_hint(s, (insn >> 4) & 0xf);
9678 break;
9680 /* If Then. */
9681 s->condexec_cond = (insn >> 4) & 0xe;
9682 s->condexec_mask = insn & 0x1f;
9683 /* No actual code generated for this insn, just setup state. */
9684 break;
9686 case 0xe: /* bkpt */
9687 ARCH(5);
9688 gen_exception_insn(s, 2, EXCP_BKPT);
9689 break;
9691 case 0xa: /* rev */
9692 ARCH(6);
9693 rn = (insn >> 3) & 0x7;
9694 rd = insn & 0x7;
9695 tmp = load_reg(s, rn);
9696 switch ((insn >> 6) & 3) {
9697 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9698 case 1: gen_rev16(tmp); break;
9699 case 3: gen_revsh(tmp); break;
9700 default: goto illegal_op;
9702 store_reg(s, rd, tmp);
9703 break;
9705 case 6: /* cps */
9706 ARCH(6);
9707 if (IS_USER(s))
9708 break;
9709 if (IS_M(env)) {
9710 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9711 /* PRIMASK */
9712 if (insn & 1) {
9713 addr = tcg_const_i32(16);
9714 gen_helper_v7m_msr(cpu_env, addr, tmp);
9715 tcg_temp_free_i32(addr);
9717 /* FAULTMASK */
9718 if (insn & 2) {
9719 addr = tcg_const_i32(17);
9720 gen_helper_v7m_msr(cpu_env, addr, tmp);
9721 tcg_temp_free_i32(addr);
9723 tcg_temp_free_i32(tmp);
9724 gen_lookup_tb(s);
9725 } else {
9726 if (insn & (1 << 4))
9727 shift = CPSR_A | CPSR_I | CPSR_F;
9728 else
9729 shift = 0;
9730 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9732 break;
9734 default:
9735 goto undef;
9737 break;
9739 case 12:
9741 /* load/store multiple */
9742 TCGv loaded_var;
9743 TCGV_UNUSED(loaded_var);
9744 rn = (insn >> 8) & 0x7;
9745 addr = load_reg(s, rn);
9746 for (i = 0; i < 8; i++) {
9747 if (insn & (1 << i)) {
9748 if (insn & (1 << 11)) {
9749 /* load */
9750 tmp = gen_ld32(addr, IS_USER(s));
9751 if (i == rn) {
9752 loaded_var = tmp;
9753 } else {
9754 store_reg(s, i, tmp);
9756 } else {
9757 /* store */
9758 tmp = load_reg(s, i);
9759 gen_st32(tmp, addr, IS_USER(s));
9761 /* advance to the next address */
9762 tcg_gen_addi_i32(addr, addr, 4);
9765 if ((insn & (1 << rn)) == 0) {
9766 /* base reg not in list: base register writeback */
9767 store_reg(s, rn, addr);
9768 } else {
9769 /* base reg in list: if load, complete it now */
9770 if (insn & (1 << 11)) {
9771 store_reg(s, rn, loaded_var);
9773 tcg_temp_free_i32(addr);
9775 break;
9777 case 13:
9778 /* conditional branch or swi */
9779 cond = (insn >> 8) & 0xf;
9780 if (cond == 0xe)
9781 goto undef;
9783 if (cond == 0xf) {
9784 /* swi */
9785 gen_set_pc_im(s->pc);
9786 s->is_jmp = DISAS_SWI;
9787 break;
9789 /* generate a conditional jump to next instruction */
9790 s->condlabel = gen_new_label();
9791 gen_test_cc(cond ^ 1, s->condlabel);
9792 s->condjmp = 1;
9794 /* jump to the offset */
9795 val = (uint32_t)s->pc + 2;
9796 offset = ((int32_t)insn << 24) >> 24;
9797 val += offset << 1;
9798 gen_jmp(s, val);
9799 break;
9801 case 14:
9802 if (insn & (1 << 11)) {
9803 if (disas_thumb2_insn(env, s, insn))
9804 goto undef32;
9805 break;
9807 /* unconditional branch */
9808 val = (uint32_t)s->pc;
9809 offset = ((int32_t)insn << 21) >> 21;
9810 val += (offset << 1) + 2;
9811 gen_jmp(s, val);
9812 break;
9814 case 15:
9815 if (disas_thumb2_insn(env, s, insn))
9816 goto undef32;
9817 break;
9819 return;
9820 undef32:
9821 gen_exception_insn(s, 4, EXCP_UDEF);
9822 return;
9823 illegal_op:
9824 undef:
9825 gen_exception_insn(s, 2, EXCP_UDEF);
9828 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9829 basic block 'tb'. If search_pc is TRUE, also generate PC
9830 information for each intermediate instruction. */
9831 static inline void gen_intermediate_code_internal(CPUState *env,
9832 TranslationBlock *tb,
9833 int search_pc)
9835 DisasContext dc1, *dc = &dc1;
9836 CPUBreakpoint *bp;
9837 uint16_t *gen_opc_end;
9838 int j, lj;
9839 target_ulong pc_start;
9840 uint32_t next_page_start;
9841 int num_insns;
9842 int max_insns;
9844 /* generate intermediate code */
9845 pc_start = tb->pc;
9847 dc->tb = tb;
9849 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9851 dc->is_jmp = DISAS_NEXT;
9852 dc->pc = pc_start;
9853 dc->singlestep_enabled = env->singlestep_enabled;
9854 dc->condjmp = 0;
9855 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9856 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9857 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9858 #if !defined(CONFIG_USER_ONLY)
9859 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9860 #endif
9861 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9862 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9863 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9864 cpu_F0s = tcg_temp_new_i32();
9865 cpu_F1s = tcg_temp_new_i32();
9866 cpu_F0d = tcg_temp_new_i64();
9867 cpu_F1d = tcg_temp_new_i64();
9868 cpu_V0 = cpu_F0d;
9869 cpu_V1 = cpu_F1d;
9870 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9871 cpu_M0 = tcg_temp_new_i64();
9872 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9873 lj = -1;
9874 num_insns = 0;
9875 max_insns = tb->cflags & CF_COUNT_MASK;
9876 if (max_insns == 0)
9877 max_insns = CF_COUNT_MASK;
9879 gen_icount_start();
9881 tcg_clear_temp_count();
9883 /* A note on handling of the condexec (IT) bits:
9885 * We want to avoid the overhead of having to write the updated condexec
9886 * bits back to the CPUState for every instruction in an IT block. So:
9887 * (1) if the condexec bits are not already zero then we write
9888 * zero back into the CPUState now. This avoids complications trying
9889 * to do it at the end of the block. (For example if we don't do this
9890 * it's hard to identify whether we can safely skip writing condexec
9891 * at the end of the TB, which we definitely want to do for the case
9892 * where a TB doesn't do anything with the IT state at all.)
9893 * (2) if we are going to leave the TB then we call gen_set_condexec()
9894 * which will write the correct value into CPUState if zero is wrong.
9895 * This is done both for leaving the TB at the end, and for leaving
9896 * it because of an exception we know will happen, which is done in
9897 * gen_exception_insn(). The latter is necessary because we need to
9898 * leave the TB with the PC/IT state just prior to execution of the
9899 * instruction which caused the exception.
9900 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9901 * then the CPUState will be wrong and we need to reset it.
9902 * This is handled in the same way as restoration of the
9903 * PC in these situations: we will be called again with search_pc=1
9904 * and generate a mapping of the condexec bits for each PC in
9905 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9906 * this to restore the condexec bits.
9908 * Note that there are no instructions which can read the condexec
9909 * bits, and none which can write non-static values to them, so
9910 * we don't need to care about whether CPUState is correct in the
9911 * middle of a TB.
9914 /* Reset the conditional execution bits immediately. This avoids
9915 complications trying to do it at the end of the block. */
9916 if (dc->condexec_mask || dc->condexec_cond)
9918 TCGv tmp = tcg_temp_new_i32();
9919 tcg_gen_movi_i32(tmp, 0);
9920 store_cpu_field(tmp, condexec_bits);
9922 do {
9923 #ifdef CONFIG_USER_ONLY
9924 /* Intercept jump to the magic kernel page. */
9925 if (dc->pc >= 0xffff0000) {
9926 /* We always get here via a jump, so know we are not in a
9927 conditional execution block. */
9928 gen_exception(EXCP_KERNEL_TRAP);
9929 dc->is_jmp = DISAS_UPDATE;
9930 break;
9932 #else
9933 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9934 /* We always get here via a jump, so know we are not in a
9935 conditional execution block. */
9936 gen_exception(EXCP_EXCEPTION_EXIT);
9937 dc->is_jmp = DISAS_UPDATE;
9938 break;
9940 #endif
9942 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9943 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9944 if (bp->pc == dc->pc) {
9945 gen_exception_insn(dc, 0, EXCP_DEBUG);
9946 /* Advance PC so that clearing the breakpoint will
9947 invalidate this TB. */
9948 dc->pc += 2;
9949 goto done_generating;
9950 break;
9954 if (search_pc) {
9955 j = gen_opc_ptr - gen_opc_buf;
9956 if (lj < j) {
9957 lj++;
9958 while (lj < j)
9959 gen_opc_instr_start[lj++] = 0;
9961 gen_opc_pc[lj] = dc->pc;
9962 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9963 gen_opc_instr_start[lj] = 1;
9964 gen_opc_icount[lj] = num_insns;
9967 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9968 gen_io_start();
9970 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9971 tcg_gen_debug_insn_start(dc->pc);
9974 if (dc->thumb) {
9975 disas_thumb_insn(env, dc);
9976 if (dc->condexec_mask) {
9977 dc->condexec_cond = (dc->condexec_cond & 0xe)
9978 | ((dc->condexec_mask >> 4) & 1);
9979 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9980 if (dc->condexec_mask == 0) {
9981 dc->condexec_cond = 0;
9984 } else {
9985 disas_arm_insn(env, dc);
9988 if (dc->condjmp && !dc->is_jmp) {
9989 gen_set_label(dc->condlabel);
9990 dc->condjmp = 0;
9993 if (tcg_check_temp_count()) {
9994 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9997 /* Translation stops when a conditional branch is encountered.
9998 * Otherwise the subsequent code could get translated several times.
9999 * Also stop translation when a page boundary is reached. This
10000 * ensures prefetch aborts occur at the right place. */
10001 num_insns ++;
10002 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
10003 !env->singlestep_enabled &&
10004 !singlestep &&
10005 dc->pc < next_page_start &&
10006 num_insns < max_insns);
10008 if (tb->cflags & CF_LAST_IO) {
10009 if (dc->condjmp) {
10010 /* FIXME: This can theoretically happen with self-modifying
10011 code. */
10012 cpu_abort(env, "IO on conditional branch instruction");
10014 gen_io_end();
10017 /* At this stage dc->condjmp will only be set when the skipped
10018 instruction was a conditional branch or trap, and the PC has
10019 already been written. */
10020 if (unlikely(env->singlestep_enabled)) {
10021 /* Make sure the pc is updated, and raise a debug exception. */
10022 if (dc->condjmp) {
10023 gen_set_condexec(dc);
10024 if (dc->is_jmp == DISAS_SWI) {
10025 gen_exception(EXCP_SWI);
10026 } else {
10027 gen_exception(EXCP_DEBUG);
10029 gen_set_label(dc->condlabel);
10031 if (dc->condjmp || !dc->is_jmp) {
10032 gen_set_pc_im(dc->pc);
10033 dc->condjmp = 0;
10035 gen_set_condexec(dc);
10036 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
10037 gen_exception(EXCP_SWI);
10038 } else {
10039 /* FIXME: Single stepping a WFI insn will not halt
10040 the CPU. */
10041 gen_exception(EXCP_DEBUG);
10043 } else {
10044 /* While branches must always occur at the end of an IT block,
10045 there are a few other things that can cause us to terminate
10046 the TB in the middel of an IT block:
10047 - Exception generating instructions (bkpt, swi, undefined).
10048 - Page boundaries.
10049 - Hardware watchpoints.
10050 Hardware breakpoints have already been handled and skip this code.
10052 gen_set_condexec(dc);
10053 switch(dc->is_jmp) {
10054 case DISAS_NEXT:
10055 gen_goto_tb(dc, 1, dc->pc);
10056 break;
10057 default:
10058 case DISAS_JUMP:
10059 case DISAS_UPDATE:
10060 /* indicate that the hash table must be used to find the next TB */
10061 tcg_gen_exit_tb(0);
10062 break;
10063 case DISAS_TB_JUMP:
10064 /* nothing more to generate */
10065 break;
10066 case DISAS_WFI:
10067 gen_helper_wfi();
10068 break;
10069 case DISAS_SWI:
10070 gen_exception(EXCP_SWI);
10071 break;
10073 if (dc->condjmp) {
10074 gen_set_label(dc->condlabel);
10075 gen_set_condexec(dc);
10076 gen_goto_tb(dc, 1, dc->pc);
10077 dc->condjmp = 0;
10081 done_generating:
10082 gen_icount_end(tb, num_insns);
10083 *gen_opc_ptr = INDEX_op_end;
10085 #ifdef DEBUG_DISAS
10086 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
10087 qemu_log("----------------\n");
10088 qemu_log("IN: %s\n", lookup_symbol(pc_start));
10089 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
10090 qemu_log("\n");
10092 #endif
10093 if (search_pc) {
10094 j = gen_opc_ptr - gen_opc_buf;
10095 lj++;
10096 while (lj <= j)
10097 gen_opc_instr_start[lj++] = 0;
10098 } else {
10099 tb->size = dc->pc - pc_start;
10100 tb->icount = num_insns;
10104 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
10106 gen_intermediate_code_internal(env, tb, 0);
10109 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
10111 gen_intermediate_code_internal(env, tb, 1);
10114 static const char *cpu_mode_names[16] = {
10115 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10116 "???", "???", "???", "und", "???", "???", "???", "sys"
10119 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
10120 int flags)
10122 int i;
10123 #if 0
10124 union {
10125 uint32_t i;
10126 float s;
10127 } s0, s1;
10128 CPU_DoubleU d;
10129 /* ??? This assumes float64 and double have the same layout.
10130 Oh well, it's only debug dumps. */
10131 union {
10132 float64 f64;
10133 double d;
10134 } d0;
10135 #endif
10136 uint32_t psr;
10138 for(i=0;i<16;i++) {
10139 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
10140 if ((i % 4) == 3)
10141 cpu_fprintf(f, "\n");
10142 else
10143 cpu_fprintf(f, " ");
10145 psr = cpsr_read(env);
10146 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10147 psr,
10148 psr & (1 << 31) ? 'N' : '-',
10149 psr & (1 << 30) ? 'Z' : '-',
10150 psr & (1 << 29) ? 'C' : '-',
10151 psr & (1 << 28) ? 'V' : '-',
10152 psr & CPSR_T ? 'T' : 'A',
10153 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
10155 #if 0
10156 for (i = 0; i < 16; i++) {
10157 d.d = env->vfp.regs[i];
10158 s0.i = d.l.lower;
10159 s1.i = d.l.upper;
10160 d0.f64 = d.d;
10161 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
10162 i * 2, (int)s0.i, s0.s,
10163 i * 2 + 1, (int)s1.i, s1.s,
10164 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
10165 d0.d);
10167 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
10168 #endif
10171 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
10173 env->regs[15] = gen_opc_pc[pc_pos];
10174 env->condexec_bits = gen_opc_condexec_bits[pc_pos];