target-arm: Set carry flag correctly for Thumb2 ORNS
[qemu/ar7.git] / target-arm / translate.c
blobb08b636635c309b7cc6b4a1cfbddaad787ec33e5
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
45 /* internal defines */
46 typedef struct DisasContext {
47 target_ulong pc;
48 int is_jmp;
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
58 int thumb;
59 #if !defined(CONFIG_USER_ONLY)
60 int user;
61 #endif
62 int vfp_enabled;
63 int vec_len;
64 int vec_stride;
65 } DisasContext;
67 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
69 #if defined(CONFIG_USER_ONLY)
70 #define IS_USER(s) 1
71 #else
72 #define IS_USER(s) (s->user)
73 #endif
75 /* These instructions trap after executing, so defer them until after the
76 conditional executions state has been updated. */
77 #define DISAS_WFI 4
78 #define DISAS_SWI 5
80 static TCGv_ptr cpu_env;
81 /* We reuse the same 64-bit temporaries for efficiency. */
82 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
83 static TCGv_i32 cpu_R[16];
84 static TCGv_i32 cpu_exclusive_addr;
85 static TCGv_i32 cpu_exclusive_val;
86 static TCGv_i32 cpu_exclusive_high;
87 #ifdef CONFIG_USER_ONLY
88 static TCGv_i32 cpu_exclusive_test;
89 static TCGv_i32 cpu_exclusive_info;
90 #endif
92 /* FIXME: These should be removed. */
93 static TCGv cpu_F0s, cpu_F1s;
94 static TCGv_i64 cpu_F0d, cpu_F1d;
96 #include "gen-icount.h"
98 static const char *regnames[] =
99 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
100 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
102 /* initialize TCG globals. */
103 void arm_translate_init(void)
105 int i;
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
109 for (i = 0; i < 16; i++) {
110 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUState, regs[i]),
112 regnames[i]);
114 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUState, exclusive_addr), "exclusive_addr");
116 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_val), "exclusive_val");
118 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_high), "exclusive_high");
120 #ifdef CONFIG_USER_ONLY
121 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, exclusive_test), "exclusive_test");
123 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUState, exclusive_info), "exclusive_info");
125 #endif
127 #define GEN_HELPER 2
128 #include "helpers.h"
131 static int num_temps;
133 /* Allocate a temporary variable. */
134 static TCGv_i32 new_tmp(void)
136 num_temps++;
137 return tcg_temp_new_i32();
140 /* Release a temporary variable. */
141 static void dead_tmp(TCGv tmp)
143 tcg_temp_free(tmp);
144 num_temps--;
147 static inline TCGv load_cpu_offset(int offset)
149 TCGv tmp = new_tmp();
150 tcg_gen_ld_i32(tmp, cpu_env, offset);
151 return tmp;
154 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
156 static inline void store_cpu_offset(TCGv var, int offset)
158 tcg_gen_st_i32(var, cpu_env, offset);
159 dead_tmp(var);
162 #define store_cpu_field(var, name) \
163 store_cpu_offset(var, offsetof(CPUState, name))
165 /* Set a variable to the value of a CPU register. */
166 static void load_reg_var(DisasContext *s, TCGv var, int reg)
168 if (reg == 15) {
169 uint32_t addr;
170 /* normaly, since we updated PC, we need only to add one insn */
171 if (s->thumb)
172 addr = (long)s->pc + 2;
173 else
174 addr = (long)s->pc + 4;
175 tcg_gen_movi_i32(var, addr);
176 } else {
177 tcg_gen_mov_i32(var, cpu_R[reg]);
181 /* Create a new temporary and set it to the value of a CPU register. */
182 static inline TCGv load_reg(DisasContext *s, int reg)
184 TCGv tmp = new_tmp();
185 load_reg_var(s, tmp, reg);
186 return tmp;
189 /* Set a CPU register. The source must be a temporary and will be
190 marked as dead. */
191 static void store_reg(DisasContext *s, int reg, TCGv var)
193 if (reg == 15) {
194 tcg_gen_andi_i32(var, var, ~1);
195 s->is_jmp = DISAS_JUMP;
197 tcg_gen_mov_i32(cpu_R[reg], var);
198 dead_tmp(var);
201 /* Value extensions. */
202 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
203 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
204 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
205 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
207 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
208 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
211 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
213 TCGv tmp_mask = tcg_const_i32(mask);
214 gen_helper_cpsr_write(var, tmp_mask);
215 tcg_temp_free_i32(tmp_mask);
217 /* Set NZCV flags from the high 4 bits of var. */
218 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
220 static void gen_exception(int excp)
222 TCGv tmp = new_tmp();
223 tcg_gen_movi_i32(tmp, excp);
224 gen_helper_exception(tmp);
225 dead_tmp(tmp);
228 static void gen_smul_dual(TCGv a, TCGv b)
230 TCGv tmp1 = new_tmp();
231 TCGv tmp2 = new_tmp();
232 tcg_gen_ext16s_i32(tmp1, a);
233 tcg_gen_ext16s_i32(tmp2, b);
234 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
235 dead_tmp(tmp2);
236 tcg_gen_sari_i32(a, a, 16);
237 tcg_gen_sari_i32(b, b, 16);
238 tcg_gen_mul_i32(b, b, a);
239 tcg_gen_mov_i32(a, tmp1);
240 dead_tmp(tmp1);
243 /* Byteswap each halfword. */
244 static void gen_rev16(TCGv var)
246 TCGv tmp = new_tmp();
247 tcg_gen_shri_i32(tmp, var, 8);
248 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
249 tcg_gen_shli_i32(var, var, 8);
250 tcg_gen_andi_i32(var, var, 0xff00ff00);
251 tcg_gen_or_i32(var, var, tmp);
252 dead_tmp(tmp);
255 /* Byteswap low halfword and sign extend. */
256 static void gen_revsh(TCGv var)
258 tcg_gen_ext16u_i32(var, var);
259 tcg_gen_bswap16_i32(var, var);
260 tcg_gen_ext16s_i32(var, var);
263 /* Unsigned bitfield extract. */
264 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
266 if (shift)
267 tcg_gen_shri_i32(var, var, shift);
268 tcg_gen_andi_i32(var, var, mask);
271 /* Signed bitfield extract. */
272 static void gen_sbfx(TCGv var, int shift, int width)
274 uint32_t signbit;
276 if (shift)
277 tcg_gen_sari_i32(var, var, shift);
278 if (shift + width < 32) {
279 signbit = 1u << (width - 1);
280 tcg_gen_andi_i32(var, var, (1u << width) - 1);
281 tcg_gen_xori_i32(var, var, signbit);
282 tcg_gen_subi_i32(var, var, signbit);
286 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
287 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
289 tcg_gen_andi_i32(val, val, mask);
290 tcg_gen_shli_i32(val, val, shift);
291 tcg_gen_andi_i32(base, base, ~(mask << shift));
292 tcg_gen_or_i32(dest, base, val);
295 /* Return (b << 32) + a. Mark inputs as dead */
296 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
298 TCGv_i64 tmp64 = tcg_temp_new_i64();
300 tcg_gen_extu_i32_i64(tmp64, b);
301 dead_tmp(b);
302 tcg_gen_shli_i64(tmp64, tmp64, 32);
303 tcg_gen_add_i64(a, tmp64, a);
305 tcg_temp_free_i64(tmp64);
306 return a;
309 /* Return (b << 32) - a. Mark inputs as dead. */
310 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
312 TCGv_i64 tmp64 = tcg_temp_new_i64();
314 tcg_gen_extu_i32_i64(tmp64, b);
315 dead_tmp(b);
316 tcg_gen_shli_i64(tmp64, tmp64, 32);
317 tcg_gen_sub_i64(a, tmp64, a);
319 tcg_temp_free_i64(tmp64);
320 return a;
323 /* FIXME: Most targets have native widening multiplication.
324 It would be good to use that instead of a full wide multiply. */
325 /* 32x32->64 multiply. Marks inputs as dead. */
326 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
328 TCGv_i64 tmp1 = tcg_temp_new_i64();
329 TCGv_i64 tmp2 = tcg_temp_new_i64();
331 tcg_gen_extu_i32_i64(tmp1, a);
332 dead_tmp(a);
333 tcg_gen_extu_i32_i64(tmp2, b);
334 dead_tmp(b);
335 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
336 tcg_temp_free_i64(tmp2);
337 return tmp1;
340 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
342 TCGv_i64 tmp1 = tcg_temp_new_i64();
343 TCGv_i64 tmp2 = tcg_temp_new_i64();
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 tcg_temp_free_i64(tmp2);
351 return tmp1;
354 /* Swap low and high halfwords. */
355 static void gen_swap_half(TCGv var)
357 TCGv tmp = new_tmp();
358 tcg_gen_shri_i32(tmp, var, 16);
359 tcg_gen_shli_i32(var, var, 16);
360 tcg_gen_or_i32(var, var, tmp);
361 dead_tmp(tmp);
364 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
365 tmp = (t0 ^ t1) & 0x8000;
366 t0 &= ~0x8000;
367 t1 &= ~0x8000;
368 t0 = (t0 + t1) ^ tmp;
371 static void gen_add16(TCGv t0, TCGv t1)
373 TCGv tmp = new_tmp();
374 tcg_gen_xor_i32(tmp, t0, t1);
375 tcg_gen_andi_i32(tmp, tmp, 0x8000);
376 tcg_gen_andi_i32(t0, t0, ~0x8000);
377 tcg_gen_andi_i32(t1, t1, ~0x8000);
378 tcg_gen_add_i32(t0, t0, t1);
379 tcg_gen_xor_i32(t0, t0, tmp);
380 dead_tmp(tmp);
381 dead_tmp(t1);
384 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
386 /* Set CF to the top bit of var. */
387 static void gen_set_CF_bit31(TCGv var)
389 TCGv tmp = new_tmp();
390 tcg_gen_shri_i32(tmp, var, 31);
391 gen_set_CF(tmp);
392 dead_tmp(tmp);
395 /* Set N and Z flags from var. */
396 static inline void gen_logic_CC(TCGv var)
398 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
399 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
402 /* T0 += T1 + CF. */
403 static void gen_adc(TCGv t0, TCGv t1)
405 TCGv tmp;
406 tcg_gen_add_i32(t0, t0, t1);
407 tmp = load_cpu_field(CF);
408 tcg_gen_add_i32(t0, t0, tmp);
409 dead_tmp(tmp);
412 /* dest = T0 + T1 + CF. */
413 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
415 TCGv tmp;
416 tcg_gen_add_i32(dest, t0, t1);
417 tmp = load_cpu_field(CF);
418 tcg_gen_add_i32(dest, dest, tmp);
419 dead_tmp(tmp);
422 /* dest = T0 - T1 + CF - 1. */
423 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
425 TCGv tmp;
426 tcg_gen_sub_i32(dest, t0, t1);
427 tmp = load_cpu_field(CF);
428 tcg_gen_add_i32(dest, dest, tmp);
429 tcg_gen_subi_i32(dest, dest, 1);
430 dead_tmp(tmp);
433 /* FIXME: Implement this natively. */
434 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
436 static void shifter_out_im(TCGv var, int shift)
438 TCGv tmp = new_tmp();
439 if (shift == 0) {
440 tcg_gen_andi_i32(tmp, var, 1);
441 } else {
442 tcg_gen_shri_i32(tmp, var, shift);
443 if (shift != 31)
444 tcg_gen_andi_i32(tmp, tmp, 1);
446 gen_set_CF(tmp);
447 dead_tmp(tmp);
450 /* Shift by immediate. Includes special handling for shift == 0. */
451 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
453 switch (shiftop) {
454 case 0: /* LSL */
455 if (shift != 0) {
456 if (flags)
457 shifter_out_im(var, 32 - shift);
458 tcg_gen_shli_i32(var, var, shift);
460 break;
461 case 1: /* LSR */
462 if (shift == 0) {
463 if (flags) {
464 tcg_gen_shri_i32(var, var, 31);
465 gen_set_CF(var);
467 tcg_gen_movi_i32(var, 0);
468 } else {
469 if (flags)
470 shifter_out_im(var, shift - 1);
471 tcg_gen_shri_i32(var, var, shift);
473 break;
474 case 2: /* ASR */
475 if (shift == 0)
476 shift = 32;
477 if (flags)
478 shifter_out_im(var, shift - 1);
479 if (shift == 32)
480 shift = 31;
481 tcg_gen_sari_i32(var, var, shift);
482 break;
483 case 3: /* ROR/RRX */
484 if (shift != 0) {
485 if (flags)
486 shifter_out_im(var, shift - 1);
487 tcg_gen_rotri_i32(var, var, shift); break;
488 } else {
489 TCGv tmp = load_cpu_field(CF);
490 if (flags)
491 shifter_out_im(var, 0);
492 tcg_gen_shri_i32(var, var, 1);
493 tcg_gen_shli_i32(tmp, tmp, 31);
494 tcg_gen_or_i32(var, var, tmp);
495 dead_tmp(tmp);
500 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
501 TCGv shift, int flags)
503 if (flags) {
504 switch (shiftop) {
505 case 0: gen_helper_shl_cc(var, var, shift); break;
506 case 1: gen_helper_shr_cc(var, var, shift); break;
507 case 2: gen_helper_sar_cc(var, var, shift); break;
508 case 3: gen_helper_ror_cc(var, var, shift); break;
510 } else {
511 switch (shiftop) {
512 case 0: gen_helper_shl(var, var, shift); break;
513 case 1: gen_helper_shr(var, var, shift); break;
514 case 2: gen_helper_sar(var, var, shift); break;
515 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
516 tcg_gen_rotr_i32(var, var, shift); break;
519 dead_tmp(shift);
522 #define PAS_OP(pfx) \
523 switch (op2) { \
524 case 0: gen_pas_helper(glue(pfx,add16)); break; \
525 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
526 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
527 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
528 case 4: gen_pas_helper(glue(pfx,add8)); break; \
529 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
531 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
533 TCGv_ptr tmp;
535 switch (op1) {
536 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
537 case 1:
538 tmp = tcg_temp_new_ptr();
539 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
540 PAS_OP(s)
541 tcg_temp_free_ptr(tmp);
542 break;
543 case 5:
544 tmp = tcg_temp_new_ptr();
545 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
546 PAS_OP(u)
547 tcg_temp_free_ptr(tmp);
548 break;
549 #undef gen_pas_helper
550 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
551 case 2:
552 PAS_OP(q);
553 break;
554 case 3:
555 PAS_OP(sh);
556 break;
557 case 6:
558 PAS_OP(uq);
559 break;
560 case 7:
561 PAS_OP(uh);
562 break;
563 #undef gen_pas_helper
566 #undef PAS_OP
568 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
569 #define PAS_OP(pfx) \
570 switch (op1) { \
571 case 0: gen_pas_helper(glue(pfx,add8)); break; \
572 case 1: gen_pas_helper(glue(pfx,add16)); break; \
573 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
574 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
575 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
576 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
578 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
580 TCGv_ptr tmp;
582 switch (op2) {
583 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
584 case 0:
585 tmp = tcg_temp_new_ptr();
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(s)
588 tcg_temp_free_ptr(tmp);
589 break;
590 case 4:
591 tmp = tcg_temp_new_ptr();
592 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
593 PAS_OP(u)
594 tcg_temp_free_ptr(tmp);
595 break;
596 #undef gen_pas_helper
597 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 case 1:
599 PAS_OP(q);
600 break;
601 case 2:
602 PAS_OP(sh);
603 break;
604 case 5:
605 PAS_OP(uq);
606 break;
607 case 6:
608 PAS_OP(uh);
609 break;
610 #undef gen_pas_helper
613 #undef PAS_OP
615 static void gen_test_cc(int cc, int label)
617 TCGv tmp;
618 TCGv tmp2;
619 int inv;
621 switch (cc) {
622 case 0: /* eq: Z */
623 tmp = load_cpu_field(ZF);
624 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
625 break;
626 case 1: /* ne: !Z */
627 tmp = load_cpu_field(ZF);
628 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
629 break;
630 case 2: /* cs: C */
631 tmp = load_cpu_field(CF);
632 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
633 break;
634 case 3: /* cc: !C */
635 tmp = load_cpu_field(CF);
636 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
637 break;
638 case 4: /* mi: N */
639 tmp = load_cpu_field(NF);
640 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
641 break;
642 case 5: /* pl: !N */
643 tmp = load_cpu_field(NF);
644 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
645 break;
646 case 6: /* vs: V */
647 tmp = load_cpu_field(VF);
648 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
649 break;
650 case 7: /* vc: !V */
651 tmp = load_cpu_field(VF);
652 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
653 break;
654 case 8: /* hi: C && !Z */
655 inv = gen_new_label();
656 tmp = load_cpu_field(CF);
657 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
658 dead_tmp(tmp);
659 tmp = load_cpu_field(ZF);
660 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
661 gen_set_label(inv);
662 break;
663 case 9: /* ls: !C || Z */
664 tmp = load_cpu_field(CF);
665 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
666 dead_tmp(tmp);
667 tmp = load_cpu_field(ZF);
668 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
669 break;
670 case 10: /* ge: N == V -> N ^ V == 0 */
671 tmp = load_cpu_field(VF);
672 tmp2 = load_cpu_field(NF);
673 tcg_gen_xor_i32(tmp, tmp, tmp2);
674 dead_tmp(tmp2);
675 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
676 break;
677 case 11: /* lt: N != V -> N ^ V != 0 */
678 tmp = load_cpu_field(VF);
679 tmp2 = load_cpu_field(NF);
680 tcg_gen_xor_i32(tmp, tmp, tmp2);
681 dead_tmp(tmp2);
682 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
683 break;
684 case 12: /* gt: !Z && N == V */
685 inv = gen_new_label();
686 tmp = load_cpu_field(ZF);
687 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
688 dead_tmp(tmp);
689 tmp = load_cpu_field(VF);
690 tmp2 = load_cpu_field(NF);
691 tcg_gen_xor_i32(tmp, tmp, tmp2);
692 dead_tmp(tmp2);
693 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
694 gen_set_label(inv);
695 break;
696 case 13: /* le: Z || N != V */
697 tmp = load_cpu_field(ZF);
698 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
699 dead_tmp(tmp);
700 tmp = load_cpu_field(VF);
701 tmp2 = load_cpu_field(NF);
702 tcg_gen_xor_i32(tmp, tmp, tmp2);
703 dead_tmp(tmp2);
704 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
705 break;
706 default:
707 fprintf(stderr, "Bad condition code 0x%x\n", cc);
708 abort();
710 dead_tmp(tmp);
713 static const uint8_t table_logic_cc[16] = {
714 1, /* and */
715 1, /* xor */
716 0, /* sub */
717 0, /* rsb */
718 0, /* add */
719 0, /* adc */
720 0, /* sbc */
721 0, /* rsc */
722 1, /* andl */
723 1, /* xorl */
724 0, /* cmp */
725 0, /* cmn */
726 1, /* orr */
727 1, /* mov */
728 1, /* bic */
729 1, /* mvn */
732 /* Set PC and Thumb state from an immediate address. */
733 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
735 TCGv tmp;
737 s->is_jmp = DISAS_UPDATE;
738 if (s->thumb != (addr & 1)) {
739 tmp = new_tmp();
740 tcg_gen_movi_i32(tmp, addr & 1);
741 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
742 dead_tmp(tmp);
744 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
747 /* Set PC and Thumb state from var. var is marked as dead. */
748 static inline void gen_bx(DisasContext *s, TCGv var)
750 s->is_jmp = DISAS_UPDATE;
751 tcg_gen_andi_i32(cpu_R[15], var, ~1);
752 tcg_gen_andi_i32(var, var, 1);
753 store_cpu_field(var, thumb);
756 /* Variant of store_reg which uses branch&exchange logic when storing
757 to r15 in ARM architecture v7 and above. The source must be a temporary
758 and will be marked as dead. */
759 static inline void store_reg_bx(CPUState *env, DisasContext *s,
760 int reg, TCGv var)
762 if (reg == 15 && ENABLE_ARCH_7) {
763 gen_bx(s, var);
764 } else {
765 store_reg(s, reg, var);
769 static inline TCGv gen_ld8s(TCGv addr, int index)
771 TCGv tmp = new_tmp();
772 tcg_gen_qemu_ld8s(tmp, addr, index);
773 return tmp;
775 static inline TCGv gen_ld8u(TCGv addr, int index)
777 TCGv tmp = new_tmp();
778 tcg_gen_qemu_ld8u(tmp, addr, index);
779 return tmp;
781 static inline TCGv gen_ld16s(TCGv addr, int index)
783 TCGv tmp = new_tmp();
784 tcg_gen_qemu_ld16s(tmp, addr, index);
785 return tmp;
787 static inline TCGv gen_ld16u(TCGv addr, int index)
789 TCGv tmp = new_tmp();
790 tcg_gen_qemu_ld16u(tmp, addr, index);
791 return tmp;
793 static inline TCGv gen_ld32(TCGv addr, int index)
795 TCGv tmp = new_tmp();
796 tcg_gen_qemu_ld32u(tmp, addr, index);
797 return tmp;
799 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
801 TCGv_i64 tmp = tcg_temp_new_i64();
802 tcg_gen_qemu_ld64(tmp, addr, index);
803 return tmp;
805 static inline void gen_st8(TCGv val, TCGv addr, int index)
807 tcg_gen_qemu_st8(val, addr, index);
808 dead_tmp(val);
810 static inline void gen_st16(TCGv val, TCGv addr, int index)
812 tcg_gen_qemu_st16(val, addr, index);
813 dead_tmp(val);
815 static inline void gen_st32(TCGv val, TCGv addr, int index)
817 tcg_gen_qemu_st32(val, addr, index);
818 dead_tmp(val);
820 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
822 tcg_gen_qemu_st64(val, addr, index);
823 tcg_temp_free_i64(val);
826 static inline void gen_set_pc_im(uint32_t val)
828 tcg_gen_movi_i32(cpu_R[15], val);
831 /* Force a TB lookup after an instruction that changes the CPU state. */
832 static inline void gen_lookup_tb(DisasContext *s)
834 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
835 s->is_jmp = DISAS_UPDATE;
838 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
839 TCGv var)
841 int val, rm, shift, shiftop;
842 TCGv offset;
844 if (!(insn & (1 << 25))) {
845 /* immediate */
846 val = insn & 0xfff;
847 if (!(insn & (1 << 23)))
848 val = -val;
849 if (val != 0)
850 tcg_gen_addi_i32(var, var, val);
851 } else {
852 /* shift/register */
853 rm = (insn) & 0xf;
854 shift = (insn >> 7) & 0x1f;
855 shiftop = (insn >> 5) & 3;
856 offset = load_reg(s, rm);
857 gen_arm_shift_im(offset, shiftop, shift, 0);
858 if (!(insn & (1 << 23)))
859 tcg_gen_sub_i32(var, var, offset);
860 else
861 tcg_gen_add_i32(var, var, offset);
862 dead_tmp(offset);
866 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
867 int extra, TCGv var)
869 int val, rm;
870 TCGv offset;
872 if (insn & (1 << 22)) {
873 /* immediate */
874 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
875 if (!(insn & (1 << 23)))
876 val = -val;
877 val += extra;
878 if (val != 0)
879 tcg_gen_addi_i32(var, var, val);
880 } else {
881 /* register */
882 if (extra)
883 tcg_gen_addi_i32(var, var, extra);
884 rm = (insn) & 0xf;
885 offset = load_reg(s, rm);
886 if (!(insn & (1 << 23)))
887 tcg_gen_sub_i32(var, var, offset);
888 else
889 tcg_gen_add_i32(var, var, offset);
890 dead_tmp(offset);
894 #define VFP_OP2(name) \
895 static inline void gen_vfp_##name(int dp) \
897 if (dp) \
898 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
899 else \
900 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
903 VFP_OP2(add)
904 VFP_OP2(sub)
905 VFP_OP2(mul)
906 VFP_OP2(div)
908 #undef VFP_OP2
910 static inline void gen_vfp_abs(int dp)
912 if (dp)
913 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
914 else
915 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
918 static inline void gen_vfp_neg(int dp)
920 if (dp)
921 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
922 else
923 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
926 static inline void gen_vfp_sqrt(int dp)
928 if (dp)
929 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
930 else
931 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
934 static inline void gen_vfp_cmp(int dp)
936 if (dp)
937 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
938 else
939 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
942 static inline void gen_vfp_cmpe(int dp)
944 if (dp)
945 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
946 else
947 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
950 static inline void gen_vfp_F1_ld0(int dp)
952 if (dp)
953 tcg_gen_movi_i64(cpu_F1d, 0);
954 else
955 tcg_gen_movi_i32(cpu_F1s, 0);
958 static inline void gen_vfp_uito(int dp)
960 if (dp)
961 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
962 else
963 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
966 static inline void gen_vfp_sito(int dp)
968 if (dp)
969 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
970 else
971 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
974 static inline void gen_vfp_toui(int dp)
976 if (dp)
977 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
978 else
979 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
982 static inline void gen_vfp_touiz(int dp)
984 if (dp)
985 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
986 else
987 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
990 static inline void gen_vfp_tosi(int dp)
992 if (dp)
993 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
994 else
995 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
998 static inline void gen_vfp_tosiz(int dp)
1000 if (dp)
1001 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1002 else
1003 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1006 #define VFP_GEN_FIX(name) \
1007 static inline void gen_vfp_##name(int dp, int shift) \
1009 TCGv tmp_shift = tcg_const_i32(shift); \
1010 if (dp) \
1011 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1012 else \
1013 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1014 tcg_temp_free_i32(tmp_shift); \
1016 VFP_GEN_FIX(tosh)
1017 VFP_GEN_FIX(tosl)
1018 VFP_GEN_FIX(touh)
1019 VFP_GEN_FIX(toul)
1020 VFP_GEN_FIX(shto)
1021 VFP_GEN_FIX(slto)
1022 VFP_GEN_FIX(uhto)
1023 VFP_GEN_FIX(ulto)
1024 #undef VFP_GEN_FIX
1026 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1028 if (dp)
1029 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1030 else
1031 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1034 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1036 if (dp)
1037 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1038 else
1039 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1042 static inline long
1043 vfp_reg_offset (int dp, int reg)
1045 if (dp)
1046 return offsetof(CPUARMState, vfp.regs[reg]);
1047 else if (reg & 1) {
1048 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1049 + offsetof(CPU_DoubleU, l.upper);
1050 } else {
1051 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1052 + offsetof(CPU_DoubleU, l.lower);
1056 /* Return the offset of a 32-bit piece of a NEON register.
1057 zero is the least significant end of the register. */
1058 static inline long
1059 neon_reg_offset (int reg, int n)
1061 int sreg;
1062 sreg = reg * 2 + n;
1063 return vfp_reg_offset(0, sreg);
1066 static TCGv neon_load_reg(int reg, int pass)
1068 TCGv tmp = new_tmp();
1069 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1070 return tmp;
1073 static void neon_store_reg(int reg, int pass, TCGv var)
1075 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1076 dead_tmp(var);
1079 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1081 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1084 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1086 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1089 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1090 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1091 #define tcg_gen_st_f32 tcg_gen_st_i32
1092 #define tcg_gen_st_f64 tcg_gen_st_i64
1094 static inline void gen_mov_F0_vreg(int dp, int reg)
1096 if (dp)
1097 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1098 else
1099 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1102 static inline void gen_mov_F1_vreg(int dp, int reg)
1104 if (dp)
1105 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1106 else
1107 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1110 static inline void gen_mov_vreg_F0(int dp, int reg)
1112 if (dp)
1113 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1114 else
1115 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1118 #define ARM_CP_RW_BIT (1 << 20)
1120 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1122 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1125 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1127 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1130 static inline TCGv iwmmxt_load_creg(int reg)
1132 TCGv var = new_tmp();
1133 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1134 return var;
1137 static inline void iwmmxt_store_creg(int reg, TCGv var)
1139 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1140 dead_tmp(var);
1143 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1145 iwmmxt_store_reg(cpu_M0, rn);
1148 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1150 iwmmxt_load_reg(cpu_M0, rn);
1153 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1155 iwmmxt_load_reg(cpu_V1, rn);
1156 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1159 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1161 iwmmxt_load_reg(cpu_V1, rn);
1162 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1165 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1167 iwmmxt_load_reg(cpu_V1, rn);
1168 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1171 #define IWMMXT_OP(name) \
1172 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1174 iwmmxt_load_reg(cpu_V1, rn); \
1175 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1178 #define IWMMXT_OP_ENV(name) \
1179 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1181 iwmmxt_load_reg(cpu_V1, rn); \
1182 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1185 #define IWMMXT_OP_ENV_SIZE(name) \
1186 IWMMXT_OP_ENV(name##b) \
1187 IWMMXT_OP_ENV(name##w) \
1188 IWMMXT_OP_ENV(name##l)
1190 #define IWMMXT_OP_ENV1(name) \
1191 static inline void gen_op_iwmmxt_##name##_M0(void) \
1193 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1196 IWMMXT_OP(maddsq)
1197 IWMMXT_OP(madduq)
1198 IWMMXT_OP(sadb)
1199 IWMMXT_OP(sadw)
1200 IWMMXT_OP(mulslw)
1201 IWMMXT_OP(mulshw)
1202 IWMMXT_OP(mululw)
1203 IWMMXT_OP(muluhw)
1204 IWMMXT_OP(macsw)
1205 IWMMXT_OP(macuw)
1207 IWMMXT_OP_ENV_SIZE(unpackl)
1208 IWMMXT_OP_ENV_SIZE(unpackh)
1210 IWMMXT_OP_ENV1(unpacklub)
1211 IWMMXT_OP_ENV1(unpackluw)
1212 IWMMXT_OP_ENV1(unpacklul)
1213 IWMMXT_OP_ENV1(unpackhub)
1214 IWMMXT_OP_ENV1(unpackhuw)
1215 IWMMXT_OP_ENV1(unpackhul)
1216 IWMMXT_OP_ENV1(unpacklsb)
1217 IWMMXT_OP_ENV1(unpacklsw)
1218 IWMMXT_OP_ENV1(unpacklsl)
1219 IWMMXT_OP_ENV1(unpackhsb)
1220 IWMMXT_OP_ENV1(unpackhsw)
1221 IWMMXT_OP_ENV1(unpackhsl)
1223 IWMMXT_OP_ENV_SIZE(cmpeq)
1224 IWMMXT_OP_ENV_SIZE(cmpgtu)
1225 IWMMXT_OP_ENV_SIZE(cmpgts)
1227 IWMMXT_OP_ENV_SIZE(mins)
1228 IWMMXT_OP_ENV_SIZE(minu)
1229 IWMMXT_OP_ENV_SIZE(maxs)
1230 IWMMXT_OP_ENV_SIZE(maxu)
1232 IWMMXT_OP_ENV_SIZE(subn)
1233 IWMMXT_OP_ENV_SIZE(addn)
1234 IWMMXT_OP_ENV_SIZE(subu)
1235 IWMMXT_OP_ENV_SIZE(addu)
1236 IWMMXT_OP_ENV_SIZE(subs)
1237 IWMMXT_OP_ENV_SIZE(adds)
1239 IWMMXT_OP_ENV(avgb0)
1240 IWMMXT_OP_ENV(avgb1)
1241 IWMMXT_OP_ENV(avgw0)
1242 IWMMXT_OP_ENV(avgw1)
1244 IWMMXT_OP(msadb)
1246 IWMMXT_OP_ENV(packuw)
1247 IWMMXT_OP_ENV(packul)
1248 IWMMXT_OP_ENV(packuq)
1249 IWMMXT_OP_ENV(packsw)
1250 IWMMXT_OP_ENV(packsl)
1251 IWMMXT_OP_ENV(packsq)
1253 static void gen_op_iwmmxt_set_mup(void)
1255 TCGv tmp;
1256 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1257 tcg_gen_ori_i32(tmp, tmp, 2);
1258 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1261 static void gen_op_iwmmxt_set_cup(void)
1263 TCGv tmp;
1264 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1265 tcg_gen_ori_i32(tmp, tmp, 1);
1266 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1269 static void gen_op_iwmmxt_setpsr_nz(void)
1271 TCGv tmp = new_tmp();
1272 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1273 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1276 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1278 iwmmxt_load_reg(cpu_V1, rn);
1279 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1280 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1283 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1285 int rd;
1286 uint32_t offset;
1287 TCGv tmp;
1289 rd = (insn >> 16) & 0xf;
1290 tmp = load_reg(s, rd);
1292 offset = (insn & 0xff) << ((insn >> 7) & 2);
1293 if (insn & (1 << 24)) {
1294 /* Pre indexed */
1295 if (insn & (1 << 23))
1296 tcg_gen_addi_i32(tmp, tmp, offset);
1297 else
1298 tcg_gen_addi_i32(tmp, tmp, -offset);
1299 tcg_gen_mov_i32(dest, tmp);
1300 if (insn & (1 << 21))
1301 store_reg(s, rd, tmp);
1302 else
1303 dead_tmp(tmp);
1304 } else if (insn & (1 << 21)) {
1305 /* Post indexed */
1306 tcg_gen_mov_i32(dest, tmp);
1307 if (insn & (1 << 23))
1308 tcg_gen_addi_i32(tmp, tmp, offset);
1309 else
1310 tcg_gen_addi_i32(tmp, tmp, -offset);
1311 store_reg(s, rd, tmp);
1312 } else if (!(insn & (1 << 23)))
1313 return 1;
1314 return 0;
1317 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1319 int rd = (insn >> 0) & 0xf;
1320 TCGv tmp;
1322 if (insn & (1 << 8)) {
1323 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1324 return 1;
1325 } else {
1326 tmp = iwmmxt_load_creg(rd);
1328 } else {
1329 tmp = new_tmp();
1330 iwmmxt_load_reg(cpu_V0, rd);
1331 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1333 tcg_gen_andi_i32(tmp, tmp, mask);
1334 tcg_gen_mov_i32(dest, tmp);
1335 dead_tmp(tmp);
1336 return 0;
1339 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1340 (ie. an undefined instruction). */
1341 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1343 int rd, wrd;
1344 int rdhi, rdlo, rd0, rd1, i;
1345 TCGv addr;
1346 TCGv tmp, tmp2, tmp3;
1348 if ((insn & 0x0e000e00) == 0x0c000000) {
1349 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1350 wrd = insn & 0xf;
1351 rdlo = (insn >> 12) & 0xf;
1352 rdhi = (insn >> 16) & 0xf;
1353 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1354 iwmmxt_load_reg(cpu_V0, wrd);
1355 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1356 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1357 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1358 } else { /* TMCRR */
1359 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1360 iwmmxt_store_reg(cpu_V0, wrd);
1361 gen_op_iwmmxt_set_mup();
1363 return 0;
1366 wrd = (insn >> 12) & 0xf;
1367 addr = new_tmp();
1368 if (gen_iwmmxt_address(s, insn, addr)) {
1369 dead_tmp(addr);
1370 return 1;
1372 if (insn & ARM_CP_RW_BIT) {
1373 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1374 tmp = new_tmp();
1375 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1376 iwmmxt_store_creg(wrd, tmp);
1377 } else {
1378 i = 1;
1379 if (insn & (1 << 8)) {
1380 if (insn & (1 << 22)) { /* WLDRD */
1381 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1382 i = 0;
1383 } else { /* WLDRW wRd */
1384 tmp = gen_ld32(addr, IS_USER(s));
1386 } else {
1387 if (insn & (1 << 22)) { /* WLDRH */
1388 tmp = gen_ld16u(addr, IS_USER(s));
1389 } else { /* WLDRB */
1390 tmp = gen_ld8u(addr, IS_USER(s));
1393 if (i) {
1394 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1395 dead_tmp(tmp);
1397 gen_op_iwmmxt_movq_wRn_M0(wrd);
1399 } else {
1400 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1401 tmp = iwmmxt_load_creg(wrd);
1402 gen_st32(tmp, addr, IS_USER(s));
1403 } else {
1404 gen_op_iwmmxt_movq_M0_wRn(wrd);
1405 tmp = new_tmp();
1406 if (insn & (1 << 8)) {
1407 if (insn & (1 << 22)) { /* WSTRD */
1408 dead_tmp(tmp);
1409 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1410 } else { /* WSTRW wRd */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1412 gen_st32(tmp, addr, IS_USER(s));
1414 } else {
1415 if (insn & (1 << 22)) { /* WSTRH */
1416 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1417 gen_st16(tmp, addr, IS_USER(s));
1418 } else { /* WSTRB */
1419 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1420 gen_st8(tmp, addr, IS_USER(s));
1425 dead_tmp(addr);
1426 return 0;
1429 if ((insn & 0x0f000000) != 0x0e000000)
1430 return 1;
1432 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1433 case 0x000: /* WOR */
1434 wrd = (insn >> 12) & 0xf;
1435 rd0 = (insn >> 0) & 0xf;
1436 rd1 = (insn >> 16) & 0xf;
1437 gen_op_iwmmxt_movq_M0_wRn(rd0);
1438 gen_op_iwmmxt_orq_M0_wRn(rd1);
1439 gen_op_iwmmxt_setpsr_nz();
1440 gen_op_iwmmxt_movq_wRn_M0(wrd);
1441 gen_op_iwmmxt_set_mup();
1442 gen_op_iwmmxt_set_cup();
1443 break;
1444 case 0x011: /* TMCR */
1445 if (insn & 0xf)
1446 return 1;
1447 rd = (insn >> 12) & 0xf;
1448 wrd = (insn >> 16) & 0xf;
1449 switch (wrd) {
1450 case ARM_IWMMXT_wCID:
1451 case ARM_IWMMXT_wCASF:
1452 break;
1453 case ARM_IWMMXT_wCon:
1454 gen_op_iwmmxt_set_cup();
1455 /* Fall through. */
1456 case ARM_IWMMXT_wCSSF:
1457 tmp = iwmmxt_load_creg(wrd);
1458 tmp2 = load_reg(s, rd);
1459 tcg_gen_andc_i32(tmp, tmp, tmp2);
1460 dead_tmp(tmp2);
1461 iwmmxt_store_creg(wrd, tmp);
1462 break;
1463 case ARM_IWMMXT_wCGR0:
1464 case ARM_IWMMXT_wCGR1:
1465 case ARM_IWMMXT_wCGR2:
1466 case ARM_IWMMXT_wCGR3:
1467 gen_op_iwmmxt_set_cup();
1468 tmp = load_reg(s, rd);
1469 iwmmxt_store_creg(wrd, tmp);
1470 break;
1471 default:
1472 return 1;
1474 break;
1475 case 0x100: /* WXOR */
1476 wrd = (insn >> 12) & 0xf;
1477 rd0 = (insn >> 0) & 0xf;
1478 rd1 = (insn >> 16) & 0xf;
1479 gen_op_iwmmxt_movq_M0_wRn(rd0);
1480 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1481 gen_op_iwmmxt_setpsr_nz();
1482 gen_op_iwmmxt_movq_wRn_M0(wrd);
1483 gen_op_iwmmxt_set_mup();
1484 gen_op_iwmmxt_set_cup();
1485 break;
1486 case 0x111: /* TMRC */
1487 if (insn & 0xf)
1488 return 1;
1489 rd = (insn >> 12) & 0xf;
1490 wrd = (insn >> 16) & 0xf;
1491 tmp = iwmmxt_load_creg(wrd);
1492 store_reg(s, rd, tmp);
1493 break;
1494 case 0x300: /* WANDN */
1495 wrd = (insn >> 12) & 0xf;
1496 rd0 = (insn >> 0) & 0xf;
1497 rd1 = (insn >> 16) & 0xf;
1498 gen_op_iwmmxt_movq_M0_wRn(rd0);
1499 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1500 gen_op_iwmmxt_andq_M0_wRn(rd1);
1501 gen_op_iwmmxt_setpsr_nz();
1502 gen_op_iwmmxt_movq_wRn_M0(wrd);
1503 gen_op_iwmmxt_set_mup();
1504 gen_op_iwmmxt_set_cup();
1505 break;
1506 case 0x200: /* WAND */
1507 wrd = (insn >> 12) & 0xf;
1508 rd0 = (insn >> 0) & 0xf;
1509 rd1 = (insn >> 16) & 0xf;
1510 gen_op_iwmmxt_movq_M0_wRn(rd0);
1511 gen_op_iwmmxt_andq_M0_wRn(rd1);
1512 gen_op_iwmmxt_setpsr_nz();
1513 gen_op_iwmmxt_movq_wRn_M0(wrd);
1514 gen_op_iwmmxt_set_mup();
1515 gen_op_iwmmxt_set_cup();
1516 break;
1517 case 0x810: case 0xa10: /* WMADD */
1518 wrd = (insn >> 12) & 0xf;
1519 rd0 = (insn >> 0) & 0xf;
1520 rd1 = (insn >> 16) & 0xf;
1521 gen_op_iwmmxt_movq_M0_wRn(rd0);
1522 if (insn & (1 << 21))
1523 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1524 else
1525 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1526 gen_op_iwmmxt_movq_wRn_M0(wrd);
1527 gen_op_iwmmxt_set_mup();
1528 break;
1529 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1530 wrd = (insn >> 12) & 0xf;
1531 rd0 = (insn >> 16) & 0xf;
1532 rd1 = (insn >> 0) & 0xf;
1533 gen_op_iwmmxt_movq_M0_wRn(rd0);
1534 switch ((insn >> 22) & 3) {
1535 case 0:
1536 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1537 break;
1538 case 1:
1539 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1540 break;
1541 case 2:
1542 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1543 break;
1544 case 3:
1545 return 1;
1547 gen_op_iwmmxt_movq_wRn_M0(wrd);
1548 gen_op_iwmmxt_set_mup();
1549 gen_op_iwmmxt_set_cup();
1550 break;
1551 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1552 wrd = (insn >> 12) & 0xf;
1553 rd0 = (insn >> 16) & 0xf;
1554 rd1 = (insn >> 0) & 0xf;
1555 gen_op_iwmmxt_movq_M0_wRn(rd0);
1556 switch ((insn >> 22) & 3) {
1557 case 0:
1558 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1559 break;
1560 case 1:
1561 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1562 break;
1563 case 2:
1564 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1565 break;
1566 case 3:
1567 return 1;
1569 gen_op_iwmmxt_movq_wRn_M0(wrd);
1570 gen_op_iwmmxt_set_mup();
1571 gen_op_iwmmxt_set_cup();
1572 break;
1573 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 16) & 0xf;
1576 rd1 = (insn >> 0) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
1578 if (insn & (1 << 22))
1579 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1580 else
1581 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1582 if (!(insn & (1 << 20)))
1583 gen_op_iwmmxt_addl_M0_wRn(wrd);
1584 gen_op_iwmmxt_movq_wRn_M0(wrd);
1585 gen_op_iwmmxt_set_mup();
1586 break;
1587 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1588 wrd = (insn >> 12) & 0xf;
1589 rd0 = (insn >> 16) & 0xf;
1590 rd1 = (insn >> 0) & 0xf;
1591 gen_op_iwmmxt_movq_M0_wRn(rd0);
1592 if (insn & (1 << 21)) {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1597 } else {
1598 if (insn & (1 << 20))
1599 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1600 else
1601 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 break;
1606 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1607 wrd = (insn >> 12) & 0xf;
1608 rd0 = (insn >> 16) & 0xf;
1609 rd1 = (insn >> 0) & 0xf;
1610 gen_op_iwmmxt_movq_M0_wRn(rd0);
1611 if (insn & (1 << 21))
1612 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1613 else
1614 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1615 if (!(insn & (1 << 20))) {
1616 iwmmxt_load_reg(cpu_V1, wrd);
1617 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1619 gen_op_iwmmxt_movq_wRn_M0(wrd);
1620 gen_op_iwmmxt_set_mup();
1621 break;
1622 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1623 wrd = (insn >> 12) & 0xf;
1624 rd0 = (insn >> 16) & 0xf;
1625 rd1 = (insn >> 0) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0);
1627 switch ((insn >> 22) & 3) {
1628 case 0:
1629 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1630 break;
1631 case 1:
1632 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1633 break;
1634 case 2:
1635 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1636 break;
1637 case 3:
1638 return 1;
1640 gen_op_iwmmxt_movq_wRn_M0(wrd);
1641 gen_op_iwmmxt_set_mup();
1642 gen_op_iwmmxt_set_cup();
1643 break;
1644 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1645 wrd = (insn >> 12) & 0xf;
1646 rd0 = (insn >> 16) & 0xf;
1647 rd1 = (insn >> 0) & 0xf;
1648 gen_op_iwmmxt_movq_M0_wRn(rd0);
1649 if (insn & (1 << 22)) {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1654 } else {
1655 if (insn & (1 << 20))
1656 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1657 else
1658 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1660 gen_op_iwmmxt_movq_wRn_M0(wrd);
1661 gen_op_iwmmxt_set_mup();
1662 gen_op_iwmmxt_set_cup();
1663 break;
1664 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1665 wrd = (insn >> 12) & 0xf;
1666 rd0 = (insn >> 16) & 0xf;
1667 rd1 = (insn >> 0) & 0xf;
1668 gen_op_iwmmxt_movq_M0_wRn(rd0);
1669 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1670 tcg_gen_andi_i32(tmp, tmp, 7);
1671 iwmmxt_load_reg(cpu_V1, rd1);
1672 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1673 dead_tmp(tmp);
1674 gen_op_iwmmxt_movq_wRn_M0(wrd);
1675 gen_op_iwmmxt_set_mup();
1676 break;
1677 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1678 if (((insn >> 6) & 3) == 3)
1679 return 1;
1680 rd = (insn >> 12) & 0xf;
1681 wrd = (insn >> 16) & 0xf;
1682 tmp = load_reg(s, rd);
1683 gen_op_iwmmxt_movq_M0_wRn(wrd);
1684 switch ((insn >> 6) & 3) {
1685 case 0:
1686 tmp2 = tcg_const_i32(0xff);
1687 tmp3 = tcg_const_i32((insn & 7) << 3);
1688 break;
1689 case 1:
1690 tmp2 = tcg_const_i32(0xffff);
1691 tmp3 = tcg_const_i32((insn & 3) << 4);
1692 break;
1693 case 2:
1694 tmp2 = tcg_const_i32(0xffffffff);
1695 tmp3 = tcg_const_i32((insn & 1) << 5);
1696 break;
1697 default:
1698 TCGV_UNUSED(tmp2);
1699 TCGV_UNUSED(tmp3);
1701 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1702 tcg_temp_free(tmp3);
1703 tcg_temp_free(tmp2);
1704 dead_tmp(tmp);
1705 gen_op_iwmmxt_movq_wRn_M0(wrd);
1706 gen_op_iwmmxt_set_mup();
1707 break;
1708 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 if (rd == 15 || ((insn >> 22) & 3) == 3)
1712 return 1;
1713 gen_op_iwmmxt_movq_M0_wRn(wrd);
1714 tmp = new_tmp();
1715 switch ((insn >> 22) & 3) {
1716 case 0:
1717 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1718 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1719 if (insn & 8) {
1720 tcg_gen_ext8s_i32(tmp, tmp);
1721 } else {
1722 tcg_gen_andi_i32(tmp, tmp, 0xff);
1724 break;
1725 case 1:
1726 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1727 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1728 if (insn & 8) {
1729 tcg_gen_ext16s_i32(tmp, tmp);
1730 } else {
1731 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1733 break;
1734 case 2:
1735 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1736 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1737 break;
1739 store_reg(s, rd, tmp);
1740 break;
1741 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1742 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1743 return 1;
1744 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1745 switch ((insn >> 22) & 3) {
1746 case 0:
1747 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1748 break;
1749 case 1:
1750 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1751 break;
1752 case 2:
1753 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1754 break;
1756 tcg_gen_shli_i32(tmp, tmp, 28);
1757 gen_set_nzcv(tmp);
1758 dead_tmp(tmp);
1759 break;
1760 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1761 if (((insn >> 6) & 3) == 3)
1762 return 1;
1763 rd = (insn >> 12) & 0xf;
1764 wrd = (insn >> 16) & 0xf;
1765 tmp = load_reg(s, rd);
1766 switch ((insn >> 6) & 3) {
1767 case 0:
1768 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1769 break;
1770 case 1:
1771 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1772 break;
1773 case 2:
1774 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1775 break;
1777 dead_tmp(tmp);
1778 gen_op_iwmmxt_movq_wRn_M0(wrd);
1779 gen_op_iwmmxt_set_mup();
1780 break;
1781 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1782 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1783 return 1;
1784 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1785 tmp2 = new_tmp();
1786 tcg_gen_mov_i32(tmp2, tmp);
1787 switch ((insn >> 22) & 3) {
1788 case 0:
1789 for (i = 0; i < 7; i ++) {
1790 tcg_gen_shli_i32(tmp2, tmp2, 4);
1791 tcg_gen_and_i32(tmp, tmp, tmp2);
1793 break;
1794 case 1:
1795 for (i = 0; i < 3; i ++) {
1796 tcg_gen_shli_i32(tmp2, tmp2, 8);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
1799 break;
1800 case 2:
1801 tcg_gen_shli_i32(tmp2, tmp2, 16);
1802 tcg_gen_and_i32(tmp, tmp, tmp2);
1803 break;
1805 gen_set_nzcv(tmp);
1806 dead_tmp(tmp2);
1807 dead_tmp(tmp);
1808 break;
1809 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 16) & 0xf;
1812 gen_op_iwmmxt_movq_M0_wRn(rd0);
1813 switch ((insn >> 22) & 3) {
1814 case 0:
1815 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1816 break;
1817 case 1:
1818 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1819 break;
1820 case 2:
1821 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1822 break;
1823 case 3:
1824 return 1;
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 break;
1829 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1830 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1831 return 1;
1832 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1833 tmp2 = new_tmp();
1834 tcg_gen_mov_i32(tmp2, tmp);
1835 switch ((insn >> 22) & 3) {
1836 case 0:
1837 for (i = 0; i < 7; i ++) {
1838 tcg_gen_shli_i32(tmp2, tmp2, 4);
1839 tcg_gen_or_i32(tmp, tmp, tmp2);
1841 break;
1842 case 1:
1843 for (i = 0; i < 3; i ++) {
1844 tcg_gen_shli_i32(tmp2, tmp2, 8);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
1847 break;
1848 case 2:
1849 tcg_gen_shli_i32(tmp2, tmp2, 16);
1850 tcg_gen_or_i32(tmp, tmp, tmp2);
1851 break;
1853 gen_set_nzcv(tmp);
1854 dead_tmp(tmp2);
1855 dead_tmp(tmp);
1856 break;
1857 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1858 rd = (insn >> 12) & 0xf;
1859 rd0 = (insn >> 16) & 0xf;
1860 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1861 return 1;
1862 gen_op_iwmmxt_movq_M0_wRn(rd0);
1863 tmp = new_tmp();
1864 switch ((insn >> 22) & 3) {
1865 case 0:
1866 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1867 break;
1868 case 1:
1869 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1870 break;
1871 case 2:
1872 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1873 break;
1875 store_reg(s, rd, tmp);
1876 break;
1877 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1878 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1879 wrd = (insn >> 12) & 0xf;
1880 rd0 = (insn >> 16) & 0xf;
1881 rd1 = (insn >> 0) & 0xf;
1882 gen_op_iwmmxt_movq_M0_wRn(rd0);
1883 switch ((insn >> 22) & 3) {
1884 case 0:
1885 if (insn & (1 << 21))
1886 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1887 else
1888 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1889 break;
1890 case 1:
1891 if (insn & (1 << 21))
1892 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1893 else
1894 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1895 break;
1896 case 2:
1897 if (insn & (1 << 21))
1898 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1899 else
1900 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1901 break;
1902 case 3:
1903 return 1;
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 gen_op_iwmmxt_set_cup();
1908 break;
1909 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1910 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1911 wrd = (insn >> 12) & 0xf;
1912 rd0 = (insn >> 16) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0);
1914 switch ((insn >> 22) & 3) {
1915 case 0:
1916 if (insn & (1 << 21))
1917 gen_op_iwmmxt_unpacklsb_M0();
1918 else
1919 gen_op_iwmmxt_unpacklub_M0();
1920 break;
1921 case 1:
1922 if (insn & (1 << 21))
1923 gen_op_iwmmxt_unpacklsw_M0();
1924 else
1925 gen_op_iwmmxt_unpackluw_M0();
1926 break;
1927 case 2:
1928 if (insn & (1 << 21))
1929 gen_op_iwmmxt_unpacklsl_M0();
1930 else
1931 gen_op_iwmmxt_unpacklul_M0();
1932 break;
1933 case 3:
1934 return 1;
1936 gen_op_iwmmxt_movq_wRn_M0(wrd);
1937 gen_op_iwmmxt_set_mup();
1938 gen_op_iwmmxt_set_cup();
1939 break;
1940 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1941 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1942 wrd = (insn >> 12) & 0xf;
1943 rd0 = (insn >> 16) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
1945 switch ((insn >> 22) & 3) {
1946 case 0:
1947 if (insn & (1 << 21))
1948 gen_op_iwmmxt_unpackhsb_M0();
1949 else
1950 gen_op_iwmmxt_unpackhub_M0();
1951 break;
1952 case 1:
1953 if (insn & (1 << 21))
1954 gen_op_iwmmxt_unpackhsw_M0();
1955 else
1956 gen_op_iwmmxt_unpackhuw_M0();
1957 break;
1958 case 2:
1959 if (insn & (1 << 21))
1960 gen_op_iwmmxt_unpackhsl_M0();
1961 else
1962 gen_op_iwmmxt_unpackhul_M0();
1963 break;
1964 case 3:
1965 return 1;
1967 gen_op_iwmmxt_movq_wRn_M0(wrd);
1968 gen_op_iwmmxt_set_mup();
1969 gen_op_iwmmxt_set_cup();
1970 break;
1971 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1972 case 0x214: case 0x614: case 0xa14: case 0xe14:
1973 if (((insn >> 22) & 3) == 0)
1974 return 1;
1975 wrd = (insn >> 12) & 0xf;
1976 rd0 = (insn >> 16) & 0xf;
1977 gen_op_iwmmxt_movq_M0_wRn(rd0);
1978 tmp = new_tmp();
1979 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1980 dead_tmp(tmp);
1981 return 1;
1983 switch ((insn >> 22) & 3) {
1984 case 1:
1985 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
1986 break;
1987 case 2:
1988 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
1989 break;
1990 case 3:
1991 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
1992 break;
1994 dead_tmp(tmp);
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2000 case 0x014: case 0x414: case 0x814: case 0xc14:
2001 if (((insn >> 22) & 3) == 0)
2002 return 1;
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 gen_op_iwmmxt_movq_M0_wRn(rd0);
2006 tmp = new_tmp();
2007 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2008 dead_tmp(tmp);
2009 return 1;
2011 switch ((insn >> 22) & 3) {
2012 case 1:
2013 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2014 break;
2015 case 2:
2016 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2017 break;
2018 case 3:
2019 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2020 break;
2022 dead_tmp(tmp);
2023 gen_op_iwmmxt_movq_wRn_M0(wrd);
2024 gen_op_iwmmxt_set_mup();
2025 gen_op_iwmmxt_set_cup();
2026 break;
2027 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2028 case 0x114: case 0x514: case 0x914: case 0xd14:
2029 if (((insn >> 22) & 3) == 0)
2030 return 1;
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 16) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
2034 tmp = new_tmp();
2035 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2036 dead_tmp(tmp);
2037 return 1;
2039 switch ((insn >> 22) & 3) {
2040 case 1:
2041 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2042 break;
2043 case 2:
2044 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2045 break;
2046 case 3:
2047 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2048 break;
2050 dead_tmp(tmp);
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2056 case 0x314: case 0x714: case 0xb14: case 0xf14:
2057 if (((insn >> 22) & 3) == 0)
2058 return 1;
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
2062 tmp = new_tmp();
2063 switch ((insn >> 22) & 3) {
2064 case 1:
2065 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2066 dead_tmp(tmp);
2067 return 1;
2069 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2070 break;
2071 case 2:
2072 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2073 dead_tmp(tmp);
2074 return 1;
2076 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2077 break;
2078 case 3:
2079 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2080 dead_tmp(tmp);
2081 return 1;
2083 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2084 break;
2086 dead_tmp(tmp);
2087 gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2090 break;
2091 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2092 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2093 wrd = (insn >> 12) & 0xf;
2094 rd0 = (insn >> 16) & 0xf;
2095 rd1 = (insn >> 0) & 0xf;
2096 gen_op_iwmmxt_movq_M0_wRn(rd0);
2097 switch ((insn >> 22) & 3) {
2098 case 0:
2099 if (insn & (1 << 21))
2100 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2101 else
2102 gen_op_iwmmxt_minub_M0_wRn(rd1);
2103 break;
2104 case 1:
2105 if (insn & (1 << 21))
2106 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2107 else
2108 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2109 break;
2110 case 2:
2111 if (insn & (1 << 21))
2112 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2113 else
2114 gen_op_iwmmxt_minul_M0_wRn(rd1);
2115 break;
2116 case 3:
2117 return 1;
2119 gen_op_iwmmxt_movq_wRn_M0(wrd);
2120 gen_op_iwmmxt_set_mup();
2121 break;
2122 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2123 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2124 wrd = (insn >> 12) & 0xf;
2125 rd0 = (insn >> 16) & 0xf;
2126 rd1 = (insn >> 0) & 0xf;
2127 gen_op_iwmmxt_movq_M0_wRn(rd0);
2128 switch ((insn >> 22) & 3) {
2129 case 0:
2130 if (insn & (1 << 21))
2131 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2132 else
2133 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2134 break;
2135 case 1:
2136 if (insn & (1 << 21))
2137 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2138 else
2139 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2140 break;
2141 case 2:
2142 if (insn & (1 << 21))
2143 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2144 else
2145 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2146 break;
2147 case 3:
2148 return 1;
2150 gen_op_iwmmxt_movq_wRn_M0(wrd);
2151 gen_op_iwmmxt_set_mup();
2152 break;
2153 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2154 case 0x402: case 0x502: case 0x602: case 0x702:
2155 wrd = (insn >> 12) & 0xf;
2156 rd0 = (insn >> 16) & 0xf;
2157 rd1 = (insn >> 0) & 0xf;
2158 gen_op_iwmmxt_movq_M0_wRn(rd0);
2159 tmp = tcg_const_i32((insn >> 20) & 3);
2160 iwmmxt_load_reg(cpu_V1, rd1);
2161 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2162 tcg_temp_free(tmp);
2163 gen_op_iwmmxt_movq_wRn_M0(wrd);
2164 gen_op_iwmmxt_set_mup();
2165 break;
2166 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2167 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2168 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2169 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2170 wrd = (insn >> 12) & 0xf;
2171 rd0 = (insn >> 16) & 0xf;
2172 rd1 = (insn >> 0) & 0xf;
2173 gen_op_iwmmxt_movq_M0_wRn(rd0);
2174 switch ((insn >> 20) & 0xf) {
2175 case 0x0:
2176 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2177 break;
2178 case 0x1:
2179 gen_op_iwmmxt_subub_M0_wRn(rd1);
2180 break;
2181 case 0x3:
2182 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2183 break;
2184 case 0x4:
2185 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2186 break;
2187 case 0x5:
2188 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2189 break;
2190 case 0x7:
2191 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2192 break;
2193 case 0x8:
2194 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2195 break;
2196 case 0x9:
2197 gen_op_iwmmxt_subul_M0_wRn(rd1);
2198 break;
2199 case 0xb:
2200 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2201 break;
2202 default:
2203 return 1;
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2208 break;
2209 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2210 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2211 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2212 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2213 wrd = (insn >> 12) & 0xf;
2214 rd0 = (insn >> 16) & 0xf;
2215 gen_op_iwmmxt_movq_M0_wRn(rd0);
2216 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2217 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2218 tcg_temp_free(tmp);
2219 gen_op_iwmmxt_movq_wRn_M0(wrd);
2220 gen_op_iwmmxt_set_mup();
2221 gen_op_iwmmxt_set_cup();
2222 break;
2223 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2224 case 0x418: case 0x518: case 0x618: case 0x718:
2225 case 0x818: case 0x918: case 0xa18: case 0xb18:
2226 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2227 wrd = (insn >> 12) & 0xf;
2228 rd0 = (insn >> 16) & 0xf;
2229 rd1 = (insn >> 0) & 0xf;
2230 gen_op_iwmmxt_movq_M0_wRn(rd0);
2231 switch ((insn >> 20) & 0xf) {
2232 case 0x0:
2233 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2234 break;
2235 case 0x1:
2236 gen_op_iwmmxt_addub_M0_wRn(rd1);
2237 break;
2238 case 0x3:
2239 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2240 break;
2241 case 0x4:
2242 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2243 break;
2244 case 0x5:
2245 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2246 break;
2247 case 0x7:
2248 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2249 break;
2250 case 0x8:
2251 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2252 break;
2253 case 0x9:
2254 gen_op_iwmmxt_addul_M0_wRn(rd1);
2255 break;
2256 case 0xb:
2257 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2258 break;
2259 default:
2260 return 1;
2262 gen_op_iwmmxt_movq_wRn_M0(wrd);
2263 gen_op_iwmmxt_set_mup();
2264 gen_op_iwmmxt_set_cup();
2265 break;
2266 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2267 case 0x408: case 0x508: case 0x608: case 0x708:
2268 case 0x808: case 0x908: case 0xa08: case 0xb08:
2269 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2270 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2271 return 1;
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
2276 switch ((insn >> 22) & 3) {
2277 case 1:
2278 if (insn & (1 << 21))
2279 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2280 else
2281 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2282 break;
2283 case 2:
2284 if (insn & (1 << 21))
2285 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2286 else
2287 gen_op_iwmmxt_packul_M0_wRn(rd1);
2288 break;
2289 case 3:
2290 if (insn & (1 << 21))
2291 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2292 else
2293 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2294 break;
2296 gen_op_iwmmxt_movq_wRn_M0(wrd);
2297 gen_op_iwmmxt_set_mup();
2298 gen_op_iwmmxt_set_cup();
2299 break;
2300 case 0x201: case 0x203: case 0x205: case 0x207:
2301 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2302 case 0x211: case 0x213: case 0x215: case 0x217:
2303 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2304 wrd = (insn >> 5) & 0xf;
2305 rd0 = (insn >> 12) & 0xf;
2306 rd1 = (insn >> 0) & 0xf;
2307 if (rd0 == 0xf || rd1 == 0xf)
2308 return 1;
2309 gen_op_iwmmxt_movq_M0_wRn(wrd);
2310 tmp = load_reg(s, rd0);
2311 tmp2 = load_reg(s, rd1);
2312 switch ((insn >> 16) & 0xf) {
2313 case 0x0: /* TMIA */
2314 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2315 break;
2316 case 0x8: /* TMIAPH */
2317 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2318 break;
2319 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2320 if (insn & (1 << 16))
2321 tcg_gen_shri_i32(tmp, tmp, 16);
2322 if (insn & (1 << 17))
2323 tcg_gen_shri_i32(tmp2, tmp2, 16);
2324 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2325 break;
2326 default:
2327 dead_tmp(tmp2);
2328 dead_tmp(tmp);
2329 return 1;
2331 dead_tmp(tmp2);
2332 dead_tmp(tmp);
2333 gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 gen_op_iwmmxt_set_mup();
2335 break;
2336 default:
2337 return 1;
2340 return 0;
2343 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2344 (ie. an undefined instruction). */
2345 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2347 int acc, rd0, rd1, rdhi, rdlo;
2348 TCGv tmp, tmp2;
2350 if ((insn & 0x0ff00f10) == 0x0e200010) {
2351 /* Multiply with Internal Accumulate Format */
2352 rd0 = (insn >> 12) & 0xf;
2353 rd1 = insn & 0xf;
2354 acc = (insn >> 5) & 7;
2356 if (acc != 0)
2357 return 1;
2359 tmp = load_reg(s, rd0);
2360 tmp2 = load_reg(s, rd1);
2361 switch ((insn >> 16) & 0xf) {
2362 case 0x0: /* MIA */
2363 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2364 break;
2365 case 0x8: /* MIAPH */
2366 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2367 break;
2368 case 0xc: /* MIABB */
2369 case 0xd: /* MIABT */
2370 case 0xe: /* MIATB */
2371 case 0xf: /* MIATT */
2372 if (insn & (1 << 16))
2373 tcg_gen_shri_i32(tmp, tmp, 16);
2374 if (insn & (1 << 17))
2375 tcg_gen_shri_i32(tmp2, tmp2, 16);
2376 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2377 break;
2378 default:
2379 return 1;
2381 dead_tmp(tmp2);
2382 dead_tmp(tmp);
2384 gen_op_iwmmxt_movq_wRn_M0(acc);
2385 return 0;
2388 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2389 /* Internal Accumulator Access Format */
2390 rdhi = (insn >> 16) & 0xf;
2391 rdlo = (insn >> 12) & 0xf;
2392 acc = insn & 7;
2394 if (acc != 0)
2395 return 1;
2397 if (insn & ARM_CP_RW_BIT) { /* MRA */
2398 iwmmxt_load_reg(cpu_V0, acc);
2399 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2400 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2401 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2402 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2403 } else { /* MAR */
2404 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2405 iwmmxt_store_reg(cpu_V0, acc);
2407 return 0;
2410 return 1;
2413 /* Disassemble system coprocessor instruction. Return nonzero if
2414 instruction is not defined. */
2415 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2417 TCGv tmp, tmp2;
2418 uint32_t rd = (insn >> 12) & 0xf;
2419 uint32_t cp = (insn >> 8) & 0xf;
2420 if (IS_USER(s)) {
2421 return 1;
2424 if (insn & ARM_CP_RW_BIT) {
2425 if (!env->cp[cp].cp_read)
2426 return 1;
2427 gen_set_pc_im(s->pc);
2428 tmp = new_tmp();
2429 tmp2 = tcg_const_i32(insn);
2430 gen_helper_get_cp(tmp, cpu_env, tmp2);
2431 tcg_temp_free(tmp2);
2432 store_reg(s, rd, tmp);
2433 } else {
2434 if (!env->cp[cp].cp_write)
2435 return 1;
2436 gen_set_pc_im(s->pc);
2437 tmp = load_reg(s, rd);
2438 tmp2 = tcg_const_i32(insn);
2439 gen_helper_set_cp(cpu_env, tmp2, tmp);
2440 tcg_temp_free(tmp2);
2441 dead_tmp(tmp);
2443 return 0;
2446 static int cp15_user_ok(uint32_t insn)
2448 int cpn = (insn >> 16) & 0xf;
2449 int cpm = insn & 0xf;
2450 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2452 if (cpn == 13 && cpm == 0) {
2453 /* TLS register. */
2454 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2455 return 1;
2457 if (cpn == 7) {
2458 /* ISB, DSB, DMB. */
2459 if ((cpm == 5 && op == 4)
2460 || (cpm == 10 && (op == 4 || op == 5)))
2461 return 1;
2463 return 0;
2466 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2468 TCGv tmp;
2469 int cpn = (insn >> 16) & 0xf;
2470 int cpm = insn & 0xf;
2471 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2473 if (!arm_feature(env, ARM_FEATURE_V6K))
2474 return 0;
2476 if (!(cpn == 13 && cpm == 0))
2477 return 0;
2479 if (insn & ARM_CP_RW_BIT) {
2480 switch (op) {
2481 case 2:
2482 tmp = load_cpu_field(cp15.c13_tls1);
2483 break;
2484 case 3:
2485 tmp = load_cpu_field(cp15.c13_tls2);
2486 break;
2487 case 4:
2488 tmp = load_cpu_field(cp15.c13_tls3);
2489 break;
2490 default:
2491 return 0;
2493 store_reg(s, rd, tmp);
2495 } else {
2496 tmp = load_reg(s, rd);
2497 switch (op) {
2498 case 2:
2499 store_cpu_field(tmp, cp15.c13_tls1);
2500 break;
2501 case 3:
2502 store_cpu_field(tmp, cp15.c13_tls2);
2503 break;
2504 case 4:
2505 store_cpu_field(tmp, cp15.c13_tls3);
2506 break;
2507 default:
2508 dead_tmp(tmp);
2509 return 0;
2512 return 1;
2515 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2516 instruction is not defined. */
2517 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2519 uint32_t rd;
2520 TCGv tmp, tmp2;
2522 /* M profile cores use memory mapped registers instead of cp15. */
2523 if (arm_feature(env, ARM_FEATURE_M))
2524 return 1;
2526 if ((insn & (1 << 25)) == 0) {
2527 if (insn & (1 << 20)) {
2528 /* mrrc */
2529 return 1;
2531 /* mcrr. Used for block cache operations, so implement as no-op. */
2532 return 0;
2534 if ((insn & (1 << 4)) == 0) {
2535 /* cdp */
2536 return 1;
2538 if (IS_USER(s) && !cp15_user_ok(insn)) {
2539 return 1;
2542 /* Pre-v7 versions of the architecture implemented WFI via coprocessor
2543 * instructions rather than a separate instruction.
2545 if ((insn & 0x0fff0fff) == 0x0e070f90) {
2546 /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
2547 * In v7, this must NOP.
2549 if (!arm_feature(env, ARM_FEATURE_V7)) {
2550 /* Wait for interrupt. */
2551 gen_set_pc_im(s->pc);
2552 s->is_jmp = DISAS_WFI;
2554 return 0;
2557 if ((insn & 0x0fff0fff) == 0x0e070f58) {
2558 /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
2559 * so this is slightly over-broad.
2561 if (!arm_feature(env, ARM_FEATURE_V6)) {
2562 /* Wait for interrupt. */
2563 gen_set_pc_im(s->pc);
2564 s->is_jmp = DISAS_WFI;
2565 return 0;
2567 /* Otherwise fall through to handle via helper function.
2568 * In particular, on v7 and some v6 cores this is one of
2569 * the VA-PA registers.
2573 rd = (insn >> 12) & 0xf;
2575 if (cp15_tls_load_store(env, s, insn, rd))
2576 return 0;
2578 tmp2 = tcg_const_i32(insn);
2579 if (insn & ARM_CP_RW_BIT) {
2580 tmp = new_tmp();
2581 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2582 /* If the destination register is r15 then sets condition codes. */
2583 if (rd != 15)
2584 store_reg(s, rd, tmp);
2585 else
2586 dead_tmp(tmp);
2587 } else {
2588 tmp = load_reg(s, rd);
2589 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2590 dead_tmp(tmp);
2591 /* Normally we would always end the TB here, but Linux
2592 * arch/arm/mach-pxa/sleep.S expects two instructions following
2593 * an MMU enable to execute from cache. Imitate this behaviour. */
2594 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2595 (insn & 0x0fff0fff) != 0x0e010f10)
2596 gen_lookup_tb(s);
2598 tcg_temp_free_i32(tmp2);
2599 return 0;
2602 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2603 #define VFP_SREG(insn, bigbit, smallbit) \
2604 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2605 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2606 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2607 reg = (((insn) >> (bigbit)) & 0x0f) \
2608 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2609 } else { \
2610 if (insn & (1 << (smallbit))) \
2611 return 1; \
2612 reg = ((insn) >> (bigbit)) & 0x0f; \
2613 }} while (0)
2615 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2616 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2617 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2618 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2619 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2620 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2622 /* Move between integer and VFP cores. */
2623 static TCGv gen_vfp_mrs(void)
2625 TCGv tmp = new_tmp();
2626 tcg_gen_mov_i32(tmp, cpu_F0s);
2627 return tmp;
2630 static void gen_vfp_msr(TCGv tmp)
2632 tcg_gen_mov_i32(cpu_F0s, tmp);
2633 dead_tmp(tmp);
2636 static void gen_neon_dup_u8(TCGv var, int shift)
2638 TCGv tmp = new_tmp();
2639 if (shift)
2640 tcg_gen_shri_i32(var, var, shift);
2641 tcg_gen_ext8u_i32(var, var);
2642 tcg_gen_shli_i32(tmp, var, 8);
2643 tcg_gen_or_i32(var, var, tmp);
2644 tcg_gen_shli_i32(tmp, var, 16);
2645 tcg_gen_or_i32(var, var, tmp);
2646 dead_tmp(tmp);
2649 static void gen_neon_dup_low16(TCGv var)
2651 TCGv tmp = new_tmp();
2652 tcg_gen_ext16u_i32(var, var);
2653 tcg_gen_shli_i32(tmp, var, 16);
2654 tcg_gen_or_i32(var, var, tmp);
2655 dead_tmp(tmp);
2658 static void gen_neon_dup_high16(TCGv var)
2660 TCGv tmp = new_tmp();
2661 tcg_gen_andi_i32(var, var, 0xffff0000);
2662 tcg_gen_shri_i32(tmp, var, 16);
2663 tcg_gen_or_i32(var, var, tmp);
2664 dead_tmp(tmp);
2667 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2668 (ie. an undefined instruction). */
2669 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2671 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2672 int dp, veclen;
2673 TCGv addr;
2674 TCGv tmp;
2675 TCGv tmp2;
2677 if (!arm_feature(env, ARM_FEATURE_VFP))
2678 return 1;
2680 if (!s->vfp_enabled) {
2681 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2682 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2683 return 1;
2684 rn = (insn >> 16) & 0xf;
2685 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2686 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2687 return 1;
2689 dp = ((insn & 0xf00) == 0xb00);
2690 switch ((insn >> 24) & 0xf) {
2691 case 0xe:
2692 if (insn & (1 << 4)) {
2693 /* single register transfer */
2694 rd = (insn >> 12) & 0xf;
2695 if (dp) {
2696 int size;
2697 int pass;
2699 VFP_DREG_N(rn, insn);
2700 if (insn & 0xf)
2701 return 1;
2702 if (insn & 0x00c00060
2703 && !arm_feature(env, ARM_FEATURE_NEON))
2704 return 1;
2706 pass = (insn >> 21) & 1;
2707 if (insn & (1 << 22)) {
2708 size = 0;
2709 offset = ((insn >> 5) & 3) * 8;
2710 } else if (insn & (1 << 5)) {
2711 size = 1;
2712 offset = (insn & (1 << 6)) ? 16 : 0;
2713 } else {
2714 size = 2;
2715 offset = 0;
2717 if (insn & ARM_CP_RW_BIT) {
2718 /* vfp->arm */
2719 tmp = neon_load_reg(rn, pass);
2720 switch (size) {
2721 case 0:
2722 if (offset)
2723 tcg_gen_shri_i32(tmp, tmp, offset);
2724 if (insn & (1 << 23))
2725 gen_uxtb(tmp);
2726 else
2727 gen_sxtb(tmp);
2728 break;
2729 case 1:
2730 if (insn & (1 << 23)) {
2731 if (offset) {
2732 tcg_gen_shri_i32(tmp, tmp, 16);
2733 } else {
2734 gen_uxth(tmp);
2736 } else {
2737 if (offset) {
2738 tcg_gen_sari_i32(tmp, tmp, 16);
2739 } else {
2740 gen_sxth(tmp);
2743 break;
2744 case 2:
2745 break;
2747 store_reg(s, rd, tmp);
2748 } else {
2749 /* arm->vfp */
2750 tmp = load_reg(s, rd);
2751 if (insn & (1 << 23)) {
2752 /* VDUP */
2753 if (size == 0) {
2754 gen_neon_dup_u8(tmp, 0);
2755 } else if (size == 1) {
2756 gen_neon_dup_low16(tmp);
2758 for (n = 0; n <= pass * 2; n++) {
2759 tmp2 = new_tmp();
2760 tcg_gen_mov_i32(tmp2, tmp);
2761 neon_store_reg(rn, n, tmp2);
2763 neon_store_reg(rn, n, tmp);
2764 } else {
2765 /* VMOV */
2766 switch (size) {
2767 case 0:
2768 tmp2 = neon_load_reg(rn, pass);
2769 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2770 dead_tmp(tmp2);
2771 break;
2772 case 1:
2773 tmp2 = neon_load_reg(rn, pass);
2774 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2775 dead_tmp(tmp2);
2776 break;
2777 case 2:
2778 break;
2780 neon_store_reg(rn, pass, tmp);
2783 } else { /* !dp */
2784 if ((insn & 0x6f) != 0x00)
2785 return 1;
2786 rn = VFP_SREG_N(insn);
2787 if (insn & ARM_CP_RW_BIT) {
2788 /* vfp->arm */
2789 if (insn & (1 << 21)) {
2790 /* system register */
2791 rn >>= 1;
2793 switch (rn) {
2794 case ARM_VFP_FPSID:
2795 /* VFP2 allows access to FSID from userspace.
2796 VFP3 restricts all id registers to privileged
2797 accesses. */
2798 if (IS_USER(s)
2799 && arm_feature(env, ARM_FEATURE_VFP3))
2800 return 1;
2801 tmp = load_cpu_field(vfp.xregs[rn]);
2802 break;
2803 case ARM_VFP_FPEXC:
2804 if (IS_USER(s))
2805 return 1;
2806 tmp = load_cpu_field(vfp.xregs[rn]);
2807 break;
2808 case ARM_VFP_FPINST:
2809 case ARM_VFP_FPINST2:
2810 /* Not present in VFP3. */
2811 if (IS_USER(s)
2812 || arm_feature(env, ARM_FEATURE_VFP3))
2813 return 1;
2814 tmp = load_cpu_field(vfp.xregs[rn]);
2815 break;
2816 case ARM_VFP_FPSCR:
2817 if (rd == 15) {
2818 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2819 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2820 } else {
2821 tmp = new_tmp();
2822 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2824 break;
2825 case ARM_VFP_MVFR0:
2826 case ARM_VFP_MVFR1:
2827 if (IS_USER(s)
2828 || !arm_feature(env, ARM_FEATURE_VFP3))
2829 return 1;
2830 tmp = load_cpu_field(vfp.xregs[rn]);
2831 break;
2832 default:
2833 return 1;
2835 } else {
2836 gen_mov_F0_vreg(0, rn);
2837 tmp = gen_vfp_mrs();
2839 if (rd == 15) {
2840 /* Set the 4 flag bits in the CPSR. */
2841 gen_set_nzcv(tmp);
2842 dead_tmp(tmp);
2843 } else {
2844 store_reg(s, rd, tmp);
2846 } else {
2847 /* arm->vfp */
2848 tmp = load_reg(s, rd);
2849 if (insn & (1 << 21)) {
2850 rn >>= 1;
2851 /* system register */
2852 switch (rn) {
2853 case ARM_VFP_FPSID:
2854 case ARM_VFP_MVFR0:
2855 case ARM_VFP_MVFR1:
2856 /* Writes are ignored. */
2857 break;
2858 case ARM_VFP_FPSCR:
2859 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2860 dead_tmp(tmp);
2861 gen_lookup_tb(s);
2862 break;
2863 case ARM_VFP_FPEXC:
2864 if (IS_USER(s))
2865 return 1;
2866 /* TODO: VFP subarchitecture support.
2867 * For now, keep the EN bit only */
2868 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2869 store_cpu_field(tmp, vfp.xregs[rn]);
2870 gen_lookup_tb(s);
2871 break;
2872 case ARM_VFP_FPINST:
2873 case ARM_VFP_FPINST2:
2874 store_cpu_field(tmp, vfp.xregs[rn]);
2875 break;
2876 default:
2877 return 1;
2879 } else {
2880 gen_vfp_msr(tmp);
2881 gen_mov_vreg_F0(0, rn);
2885 } else {
2886 /* data processing */
2887 /* The opcode is in bits 23, 21, 20 and 6. */
2888 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2889 if (dp) {
2890 if (op == 15) {
2891 /* rn is opcode */
2892 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2893 } else {
2894 /* rn is register number */
2895 VFP_DREG_N(rn, insn);
2898 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2899 /* Integer or single precision destination. */
2900 rd = VFP_SREG_D(insn);
2901 } else {
2902 VFP_DREG_D(rd, insn);
2904 if (op == 15 &&
2905 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2906 /* VCVT from int is always from S reg regardless of dp bit.
2907 * VCVT with immediate frac_bits has same format as SREG_M
2909 rm = VFP_SREG_M(insn);
2910 } else {
2911 VFP_DREG_M(rm, insn);
2913 } else {
2914 rn = VFP_SREG_N(insn);
2915 if (op == 15 && rn == 15) {
2916 /* Double precision destination. */
2917 VFP_DREG_D(rd, insn);
2918 } else {
2919 rd = VFP_SREG_D(insn);
2921 /* NB that we implicitly rely on the encoding for the frac_bits
2922 * in VCVT of fixed to float being the same as that of an SREG_M
2924 rm = VFP_SREG_M(insn);
2927 veclen = s->vec_len;
2928 if (op == 15 && rn > 3)
2929 veclen = 0;
2931 /* Shut up compiler warnings. */
2932 delta_m = 0;
2933 delta_d = 0;
2934 bank_mask = 0;
2936 if (veclen > 0) {
2937 if (dp)
2938 bank_mask = 0xc;
2939 else
2940 bank_mask = 0x18;
2942 /* Figure out what type of vector operation this is. */
2943 if ((rd & bank_mask) == 0) {
2944 /* scalar */
2945 veclen = 0;
2946 } else {
2947 if (dp)
2948 delta_d = (s->vec_stride >> 1) + 1;
2949 else
2950 delta_d = s->vec_stride + 1;
2952 if ((rm & bank_mask) == 0) {
2953 /* mixed scalar/vector */
2954 delta_m = 0;
2955 } else {
2956 /* vector */
2957 delta_m = delta_d;
2962 /* Load the initial operands. */
2963 if (op == 15) {
2964 switch (rn) {
2965 case 16:
2966 case 17:
2967 /* Integer source */
2968 gen_mov_F0_vreg(0, rm);
2969 break;
2970 case 8:
2971 case 9:
2972 /* Compare */
2973 gen_mov_F0_vreg(dp, rd);
2974 gen_mov_F1_vreg(dp, rm);
2975 break;
2976 case 10:
2977 case 11:
2978 /* Compare with zero */
2979 gen_mov_F0_vreg(dp, rd);
2980 gen_vfp_F1_ld0(dp);
2981 break;
2982 case 20:
2983 case 21:
2984 case 22:
2985 case 23:
2986 case 28:
2987 case 29:
2988 case 30:
2989 case 31:
2990 /* Source and destination the same. */
2991 gen_mov_F0_vreg(dp, rd);
2992 break;
2993 default:
2994 /* One source operand. */
2995 gen_mov_F0_vreg(dp, rm);
2996 break;
2998 } else {
2999 /* Two source operands. */
3000 gen_mov_F0_vreg(dp, rn);
3001 gen_mov_F1_vreg(dp, rm);
3004 for (;;) {
3005 /* Perform the calculation. */
3006 switch (op) {
3007 case 0: /* mac: fd + (fn * fm) */
3008 gen_vfp_mul(dp);
3009 gen_mov_F1_vreg(dp, rd);
3010 gen_vfp_add(dp);
3011 break;
3012 case 1: /* nmac: fd - (fn * fm) */
3013 gen_vfp_mul(dp);
3014 gen_vfp_neg(dp);
3015 gen_mov_F1_vreg(dp, rd);
3016 gen_vfp_add(dp);
3017 break;
3018 case 2: /* msc: -fd + (fn * fm) */
3019 gen_vfp_mul(dp);
3020 gen_mov_F1_vreg(dp, rd);
3021 gen_vfp_sub(dp);
3022 break;
3023 case 3: /* nmsc: -fd - (fn * fm) */
3024 gen_vfp_mul(dp);
3025 gen_vfp_neg(dp);
3026 gen_mov_F1_vreg(dp, rd);
3027 gen_vfp_sub(dp);
3028 break;
3029 case 4: /* mul: fn * fm */
3030 gen_vfp_mul(dp);
3031 break;
3032 case 5: /* nmul: -(fn * fm) */
3033 gen_vfp_mul(dp);
3034 gen_vfp_neg(dp);
3035 break;
3036 case 6: /* add: fn + fm */
3037 gen_vfp_add(dp);
3038 break;
3039 case 7: /* sub: fn - fm */
3040 gen_vfp_sub(dp);
3041 break;
3042 case 8: /* div: fn / fm */
3043 gen_vfp_div(dp);
3044 break;
3045 case 14: /* fconst */
3046 if (!arm_feature(env, ARM_FEATURE_VFP3))
3047 return 1;
3049 n = (insn << 12) & 0x80000000;
3050 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3051 if (dp) {
3052 if (i & 0x40)
3053 i |= 0x3f80;
3054 else
3055 i |= 0x4000;
3056 n |= i << 16;
3057 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3058 } else {
3059 if (i & 0x40)
3060 i |= 0x780;
3061 else
3062 i |= 0x800;
3063 n |= i << 19;
3064 tcg_gen_movi_i32(cpu_F0s, n);
3066 break;
3067 case 15: /* extension space */
3068 switch (rn) {
3069 case 0: /* cpy */
3070 /* no-op */
3071 break;
3072 case 1: /* abs */
3073 gen_vfp_abs(dp);
3074 break;
3075 case 2: /* neg */
3076 gen_vfp_neg(dp);
3077 break;
3078 case 3: /* sqrt */
3079 gen_vfp_sqrt(dp);
3080 break;
3081 case 4: /* vcvtb.f32.f16 */
3082 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3083 return 1;
3084 tmp = gen_vfp_mrs();
3085 tcg_gen_ext16u_i32(tmp, tmp);
3086 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3087 dead_tmp(tmp);
3088 break;
3089 case 5: /* vcvtt.f32.f16 */
3090 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3091 return 1;
3092 tmp = gen_vfp_mrs();
3093 tcg_gen_shri_i32(tmp, tmp, 16);
3094 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3095 dead_tmp(tmp);
3096 break;
3097 case 6: /* vcvtb.f16.f32 */
3098 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3099 return 1;
3100 tmp = new_tmp();
3101 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3102 gen_mov_F0_vreg(0, rd);
3103 tmp2 = gen_vfp_mrs();
3104 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3105 tcg_gen_or_i32(tmp, tmp, tmp2);
3106 dead_tmp(tmp2);
3107 gen_vfp_msr(tmp);
3108 break;
3109 case 7: /* vcvtt.f16.f32 */
3110 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3111 return 1;
3112 tmp = new_tmp();
3113 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3114 tcg_gen_shli_i32(tmp, tmp, 16);
3115 gen_mov_F0_vreg(0, rd);
3116 tmp2 = gen_vfp_mrs();
3117 tcg_gen_ext16u_i32(tmp2, tmp2);
3118 tcg_gen_or_i32(tmp, tmp, tmp2);
3119 dead_tmp(tmp2);
3120 gen_vfp_msr(tmp);
3121 break;
3122 case 8: /* cmp */
3123 gen_vfp_cmp(dp);
3124 break;
3125 case 9: /* cmpe */
3126 gen_vfp_cmpe(dp);
3127 break;
3128 case 10: /* cmpz */
3129 gen_vfp_cmp(dp);
3130 break;
3131 case 11: /* cmpez */
3132 gen_vfp_F1_ld0(dp);
3133 gen_vfp_cmpe(dp);
3134 break;
3135 case 15: /* single<->double conversion */
3136 if (dp)
3137 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3138 else
3139 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3140 break;
3141 case 16: /* fuito */
3142 gen_vfp_uito(dp);
3143 break;
3144 case 17: /* fsito */
3145 gen_vfp_sito(dp);
3146 break;
3147 case 20: /* fshto */
3148 if (!arm_feature(env, ARM_FEATURE_VFP3))
3149 return 1;
3150 gen_vfp_shto(dp, 16 - rm);
3151 break;
3152 case 21: /* fslto */
3153 if (!arm_feature(env, ARM_FEATURE_VFP3))
3154 return 1;
3155 gen_vfp_slto(dp, 32 - rm);
3156 break;
3157 case 22: /* fuhto */
3158 if (!arm_feature(env, ARM_FEATURE_VFP3))
3159 return 1;
3160 gen_vfp_uhto(dp, 16 - rm);
3161 break;
3162 case 23: /* fulto */
3163 if (!arm_feature(env, ARM_FEATURE_VFP3))
3164 return 1;
3165 gen_vfp_ulto(dp, 32 - rm);
3166 break;
3167 case 24: /* ftoui */
3168 gen_vfp_toui(dp);
3169 break;
3170 case 25: /* ftouiz */
3171 gen_vfp_touiz(dp);
3172 break;
3173 case 26: /* ftosi */
3174 gen_vfp_tosi(dp);
3175 break;
3176 case 27: /* ftosiz */
3177 gen_vfp_tosiz(dp);
3178 break;
3179 case 28: /* ftosh */
3180 if (!arm_feature(env, ARM_FEATURE_VFP3))
3181 return 1;
3182 gen_vfp_tosh(dp, 16 - rm);
3183 break;
3184 case 29: /* ftosl */
3185 if (!arm_feature(env, ARM_FEATURE_VFP3))
3186 return 1;
3187 gen_vfp_tosl(dp, 32 - rm);
3188 break;
3189 case 30: /* ftouh */
3190 if (!arm_feature(env, ARM_FEATURE_VFP3))
3191 return 1;
3192 gen_vfp_touh(dp, 16 - rm);
3193 break;
3194 case 31: /* ftoul */
3195 if (!arm_feature(env, ARM_FEATURE_VFP3))
3196 return 1;
3197 gen_vfp_toul(dp, 32 - rm);
3198 break;
3199 default: /* undefined */
3200 printf ("rn:%d\n", rn);
3201 return 1;
3203 break;
3204 default: /* undefined */
3205 printf ("op:%d\n", op);
3206 return 1;
3209 /* Write back the result. */
3210 if (op == 15 && (rn >= 8 && rn <= 11))
3211 ; /* Comparison, do nothing. */
3212 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3213 /* VCVT double to int: always integer result. */
3214 gen_mov_vreg_F0(0, rd);
3215 else if (op == 15 && rn == 15)
3216 /* conversion */
3217 gen_mov_vreg_F0(!dp, rd);
3218 else
3219 gen_mov_vreg_F0(dp, rd);
3221 /* break out of the loop if we have finished */
3222 if (veclen == 0)
3223 break;
3225 if (op == 15 && delta_m == 0) {
3226 /* single source one-many */
3227 while (veclen--) {
3228 rd = ((rd + delta_d) & (bank_mask - 1))
3229 | (rd & bank_mask);
3230 gen_mov_vreg_F0(dp, rd);
3232 break;
3234 /* Setup the next operands. */
3235 veclen--;
3236 rd = ((rd + delta_d) & (bank_mask - 1))
3237 | (rd & bank_mask);
3239 if (op == 15) {
3240 /* One source operand. */
3241 rm = ((rm + delta_m) & (bank_mask - 1))
3242 | (rm & bank_mask);
3243 gen_mov_F0_vreg(dp, rm);
3244 } else {
3245 /* Two source operands. */
3246 rn = ((rn + delta_d) & (bank_mask - 1))
3247 | (rn & bank_mask);
3248 gen_mov_F0_vreg(dp, rn);
3249 if (delta_m) {
3250 rm = ((rm + delta_m) & (bank_mask - 1))
3251 | (rm & bank_mask);
3252 gen_mov_F1_vreg(dp, rm);
3257 break;
3258 case 0xc:
3259 case 0xd:
3260 if ((insn & 0x03e00000) == 0x00400000) {
3261 /* two-register transfer */
3262 rn = (insn >> 16) & 0xf;
3263 rd = (insn >> 12) & 0xf;
3264 if (dp) {
3265 VFP_DREG_M(rm, insn);
3266 } else {
3267 rm = VFP_SREG_M(insn);
3270 if (insn & ARM_CP_RW_BIT) {
3271 /* vfp->arm */
3272 if (dp) {
3273 gen_mov_F0_vreg(0, rm * 2);
3274 tmp = gen_vfp_mrs();
3275 store_reg(s, rd, tmp);
3276 gen_mov_F0_vreg(0, rm * 2 + 1);
3277 tmp = gen_vfp_mrs();
3278 store_reg(s, rn, tmp);
3279 } else {
3280 gen_mov_F0_vreg(0, rm);
3281 tmp = gen_vfp_mrs();
3282 store_reg(s, rd, tmp);
3283 gen_mov_F0_vreg(0, rm + 1);
3284 tmp = gen_vfp_mrs();
3285 store_reg(s, rn, tmp);
3287 } else {
3288 /* arm->vfp */
3289 if (dp) {
3290 tmp = load_reg(s, rd);
3291 gen_vfp_msr(tmp);
3292 gen_mov_vreg_F0(0, rm * 2);
3293 tmp = load_reg(s, rn);
3294 gen_vfp_msr(tmp);
3295 gen_mov_vreg_F0(0, rm * 2 + 1);
3296 } else {
3297 tmp = load_reg(s, rd);
3298 gen_vfp_msr(tmp);
3299 gen_mov_vreg_F0(0, rm);
3300 tmp = load_reg(s, rn);
3301 gen_vfp_msr(tmp);
3302 gen_mov_vreg_F0(0, rm + 1);
3305 } else {
3306 /* Load/store */
3307 rn = (insn >> 16) & 0xf;
3308 if (dp)
3309 VFP_DREG_D(rd, insn);
3310 else
3311 rd = VFP_SREG_D(insn);
3312 if (s->thumb && rn == 15) {
3313 addr = new_tmp();
3314 tcg_gen_movi_i32(addr, s->pc & ~2);
3315 } else {
3316 addr = load_reg(s, rn);
3318 if ((insn & 0x01200000) == 0x01000000) {
3319 /* Single load/store */
3320 offset = (insn & 0xff) << 2;
3321 if ((insn & (1 << 23)) == 0)
3322 offset = -offset;
3323 tcg_gen_addi_i32(addr, addr, offset);
3324 if (insn & (1 << 20)) {
3325 gen_vfp_ld(s, dp, addr);
3326 gen_mov_vreg_F0(dp, rd);
3327 } else {
3328 gen_mov_F0_vreg(dp, rd);
3329 gen_vfp_st(s, dp, addr);
3331 dead_tmp(addr);
3332 } else {
3333 /* load/store multiple */
3334 if (dp)
3335 n = (insn >> 1) & 0x7f;
3336 else
3337 n = insn & 0xff;
3339 if (insn & (1 << 24)) /* pre-decrement */
3340 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3342 if (dp)
3343 offset = 8;
3344 else
3345 offset = 4;
3346 for (i = 0; i < n; i++) {
3347 if (insn & ARM_CP_RW_BIT) {
3348 /* load */
3349 gen_vfp_ld(s, dp, addr);
3350 gen_mov_vreg_F0(dp, rd + i);
3351 } else {
3352 /* store */
3353 gen_mov_F0_vreg(dp, rd + i);
3354 gen_vfp_st(s, dp, addr);
3356 tcg_gen_addi_i32(addr, addr, offset);
3358 if (insn & (1 << 21)) {
3359 /* writeback */
3360 if (insn & (1 << 24))
3361 offset = -offset * n;
3362 else if (dp && (insn & 1))
3363 offset = 4;
3364 else
3365 offset = 0;
3367 if (offset != 0)
3368 tcg_gen_addi_i32(addr, addr, offset);
3369 store_reg(s, rn, addr);
3370 } else {
3371 dead_tmp(addr);
3375 break;
3376 default:
3377 /* Should never happen. */
3378 return 1;
3380 return 0;
3383 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3385 TranslationBlock *tb;
3387 tb = s->tb;
3388 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3389 tcg_gen_goto_tb(n);
3390 gen_set_pc_im(dest);
3391 tcg_gen_exit_tb((long)tb + n);
3392 } else {
3393 gen_set_pc_im(dest);
3394 tcg_gen_exit_tb(0);
3398 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3400 if (unlikely(s->singlestep_enabled)) {
3401 /* An indirect jump so that we still trigger the debug exception. */
3402 if (s->thumb)
3403 dest |= 1;
3404 gen_bx_im(s, dest);
3405 } else {
3406 gen_goto_tb(s, 0, dest);
3407 s->is_jmp = DISAS_TB_JUMP;
3411 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3413 if (x)
3414 tcg_gen_sari_i32(t0, t0, 16);
3415 else
3416 gen_sxth(t0);
3417 if (y)
3418 tcg_gen_sari_i32(t1, t1, 16);
3419 else
3420 gen_sxth(t1);
3421 tcg_gen_mul_i32(t0, t0, t1);
3424 /* Return the mask of PSR bits set by a MSR instruction. */
3425 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3426 uint32_t mask;
3428 mask = 0;
3429 if (flags & (1 << 0))
3430 mask |= 0xff;
3431 if (flags & (1 << 1))
3432 mask |= 0xff00;
3433 if (flags & (1 << 2))
3434 mask |= 0xff0000;
3435 if (flags & (1 << 3))
3436 mask |= 0xff000000;
3438 /* Mask out undefined bits. */
3439 mask &= ~CPSR_RESERVED;
3440 if (!arm_feature(env, ARM_FEATURE_V6))
3441 mask &= ~(CPSR_E | CPSR_GE);
3442 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3443 mask &= ~CPSR_IT;
3444 /* Mask out execution state bits. */
3445 if (!spsr)
3446 mask &= ~CPSR_EXEC;
3447 /* Mask out privileged bits. */
3448 if (IS_USER(s))
3449 mask &= CPSR_USER;
3450 return mask;
3453 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3454 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3456 TCGv tmp;
3457 if (spsr) {
3458 /* ??? This is also undefined in system mode. */
3459 if (IS_USER(s))
3460 return 1;
3462 tmp = load_cpu_field(spsr);
3463 tcg_gen_andi_i32(tmp, tmp, ~mask);
3464 tcg_gen_andi_i32(t0, t0, mask);
3465 tcg_gen_or_i32(tmp, tmp, t0);
3466 store_cpu_field(tmp, spsr);
3467 } else {
3468 gen_set_cpsr(t0, mask);
3470 dead_tmp(t0);
3471 gen_lookup_tb(s);
3472 return 0;
3475 /* Returns nonzero if access to the PSR is not permitted. */
3476 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3478 TCGv tmp;
3479 tmp = new_tmp();
3480 tcg_gen_movi_i32(tmp, val);
3481 return gen_set_psr(s, mask, spsr, tmp);
3484 /* Generate an old-style exception return. Marks pc as dead. */
3485 static void gen_exception_return(DisasContext *s, TCGv pc)
3487 TCGv tmp;
3488 store_reg(s, 15, pc);
3489 tmp = load_cpu_field(spsr);
3490 gen_set_cpsr(tmp, 0xffffffff);
3491 dead_tmp(tmp);
3492 s->is_jmp = DISAS_UPDATE;
3495 /* Generate a v6 exception return. Marks both values as dead. */
3496 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3498 gen_set_cpsr(cpsr, 0xffffffff);
3499 dead_tmp(cpsr);
3500 store_reg(s, 15, pc);
3501 s->is_jmp = DISAS_UPDATE;
3504 static inline void
3505 gen_set_condexec (DisasContext *s)
3507 if (s->condexec_mask) {
3508 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3509 TCGv tmp = new_tmp();
3510 tcg_gen_movi_i32(tmp, val);
3511 store_cpu_field(tmp, condexec_bits);
3515 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3517 gen_set_condexec(s);
3518 gen_set_pc_im(s->pc - offset);
3519 gen_exception(excp);
3520 s->is_jmp = DISAS_JUMP;
3523 static void gen_nop_hint(DisasContext *s, int val)
3525 switch (val) {
3526 case 3: /* wfi */
3527 gen_set_pc_im(s->pc);
3528 s->is_jmp = DISAS_WFI;
3529 break;
3530 case 2: /* wfe */
3531 case 4: /* sev */
3532 /* TODO: Implement SEV and WFE. May help SMP performance. */
3533 default: /* nop */
3534 break;
3538 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3540 static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
3542 switch (size) {
3543 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3544 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3545 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3546 default: return 1;
3548 return 0;
3551 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3553 switch (size) {
3554 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3555 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3556 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3557 default: return;
3561 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3562 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3563 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3564 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3565 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3567 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3568 switch ((size << 1) | u) { \
3569 case 0: \
3570 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3571 break; \
3572 case 1: \
3573 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3574 break; \
3575 case 2: \
3576 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3577 break; \
3578 case 3: \
3579 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3580 break; \
3581 case 4: \
3582 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3583 break; \
3584 case 5: \
3585 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3586 break; \
3587 default: return 1; \
3588 }} while (0)
3590 #define GEN_NEON_INTEGER_OP(name) do { \
3591 switch ((size << 1) | u) { \
3592 case 0: \
3593 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3594 break; \
3595 case 1: \
3596 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3597 break; \
3598 case 2: \
3599 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3600 break; \
3601 case 3: \
3602 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3603 break; \
3604 case 4: \
3605 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3606 break; \
3607 case 5: \
3608 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3609 break; \
3610 default: return 1; \
3611 }} while (0)
3613 static TCGv neon_load_scratch(int scratch)
3615 TCGv tmp = new_tmp();
3616 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3617 return tmp;
3620 static void neon_store_scratch(int scratch, TCGv var)
3622 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3623 dead_tmp(var);
3626 static inline TCGv neon_get_scalar(int size, int reg)
3628 TCGv tmp;
3629 if (size == 1) {
3630 tmp = neon_load_reg(reg & 7, reg >> 4);
3631 if (reg & 8) {
3632 gen_neon_dup_high16(tmp);
3633 } else {
3634 gen_neon_dup_low16(tmp);
3636 } else {
3637 tmp = neon_load_reg(reg & 15, reg >> 4);
3639 return tmp;
3642 static int gen_neon_unzip(int rd, int rm, int size, int q)
3644 TCGv tmp, tmp2;
3645 if (size == 3 || (!q && size == 2)) {
3646 return 1;
3648 tmp = tcg_const_i32(rd);
3649 tmp2 = tcg_const_i32(rm);
3650 if (q) {
3651 switch (size) {
3652 case 0:
3653 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3654 break;
3655 case 1:
3656 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3657 break;
3658 case 2:
3659 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3660 break;
3661 default:
3662 abort();
3664 } else {
3665 switch (size) {
3666 case 0:
3667 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3668 break;
3669 case 1:
3670 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3671 break;
3672 default:
3673 abort();
3676 tcg_temp_free_i32(tmp);
3677 tcg_temp_free_i32(tmp2);
3678 return 0;
3681 static int gen_neon_zip(int rd, int rm, int size, int q)
3683 TCGv tmp, tmp2;
3684 if (size == 3 || (!q && size == 2)) {
3685 return 1;
3687 tmp = tcg_const_i32(rd);
3688 tmp2 = tcg_const_i32(rm);
3689 if (q) {
3690 switch (size) {
3691 case 0:
3692 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3693 break;
3694 case 1:
3695 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3696 break;
3697 case 2:
3698 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3699 break;
3700 default:
3701 abort();
3703 } else {
3704 switch (size) {
3705 case 0:
3706 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3707 break;
3708 case 1:
3709 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3710 break;
3711 default:
3712 abort();
3715 tcg_temp_free_i32(tmp);
3716 tcg_temp_free_i32(tmp2);
3717 return 0;
3720 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3722 TCGv rd, tmp;
3724 rd = new_tmp();
3725 tmp = new_tmp();
3727 tcg_gen_shli_i32(rd, t0, 8);
3728 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3729 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3730 tcg_gen_or_i32(rd, rd, tmp);
3732 tcg_gen_shri_i32(t1, t1, 8);
3733 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3734 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3735 tcg_gen_or_i32(t1, t1, tmp);
3736 tcg_gen_mov_i32(t0, rd);
3738 dead_tmp(tmp);
3739 dead_tmp(rd);
3742 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3744 TCGv rd, tmp;
3746 rd = new_tmp();
3747 tmp = new_tmp();
3749 tcg_gen_shli_i32(rd, t0, 16);
3750 tcg_gen_andi_i32(tmp, t1, 0xffff);
3751 tcg_gen_or_i32(rd, rd, tmp);
3752 tcg_gen_shri_i32(t1, t1, 16);
3753 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3754 tcg_gen_or_i32(t1, t1, tmp);
3755 tcg_gen_mov_i32(t0, rd);
3757 dead_tmp(tmp);
3758 dead_tmp(rd);
3762 static struct {
3763 int nregs;
3764 int interleave;
3765 int spacing;
3766 } neon_ls_element_type[11] = {
3767 {4, 4, 1},
3768 {4, 4, 2},
3769 {4, 1, 1},
3770 {4, 2, 1},
3771 {3, 3, 1},
3772 {3, 3, 2},
3773 {3, 1, 1},
3774 {1, 1, 1},
3775 {2, 2, 1},
3776 {2, 2, 2},
3777 {2, 1, 1}
3780 /* Translate a NEON load/store element instruction. Return nonzero if the
3781 instruction is invalid. */
3782 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3784 int rd, rn, rm;
3785 int op;
3786 int nregs;
3787 int interleave;
3788 int spacing;
3789 int stride;
3790 int size;
3791 int reg;
3792 int pass;
3793 int load;
3794 int shift;
3795 int n;
3796 TCGv addr;
3797 TCGv tmp;
3798 TCGv tmp2;
3799 TCGv_i64 tmp64;
3801 if (!s->vfp_enabled)
3802 return 1;
3803 VFP_DREG_D(rd, insn);
3804 rn = (insn >> 16) & 0xf;
3805 rm = insn & 0xf;
3806 load = (insn & (1 << 21)) != 0;
3807 addr = new_tmp();
3808 if ((insn & (1 << 23)) == 0) {
3809 /* Load store all elements. */
3810 op = (insn >> 8) & 0xf;
3811 size = (insn >> 6) & 3;
3812 if (op > 10)
3813 return 1;
3814 nregs = neon_ls_element_type[op].nregs;
3815 interleave = neon_ls_element_type[op].interleave;
3816 spacing = neon_ls_element_type[op].spacing;
3817 if (size == 3 && (interleave | spacing) != 1)
3818 return 1;
3819 load_reg_var(s, addr, rn);
3820 stride = (1 << size) * interleave;
3821 for (reg = 0; reg < nregs; reg++) {
3822 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3823 load_reg_var(s, addr, rn);
3824 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3825 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3826 load_reg_var(s, addr, rn);
3827 tcg_gen_addi_i32(addr, addr, 1 << size);
3829 if (size == 3) {
3830 if (load) {
3831 tmp64 = gen_ld64(addr, IS_USER(s));
3832 neon_store_reg64(tmp64, rd);
3833 tcg_temp_free_i64(tmp64);
3834 } else {
3835 tmp64 = tcg_temp_new_i64();
3836 neon_load_reg64(tmp64, rd);
3837 gen_st64(tmp64, addr, IS_USER(s));
3839 tcg_gen_addi_i32(addr, addr, stride);
3840 } else {
3841 for (pass = 0; pass < 2; pass++) {
3842 if (size == 2) {
3843 if (load) {
3844 tmp = gen_ld32(addr, IS_USER(s));
3845 neon_store_reg(rd, pass, tmp);
3846 } else {
3847 tmp = neon_load_reg(rd, pass);
3848 gen_st32(tmp, addr, IS_USER(s));
3850 tcg_gen_addi_i32(addr, addr, stride);
3851 } else if (size == 1) {
3852 if (load) {
3853 tmp = gen_ld16u(addr, IS_USER(s));
3854 tcg_gen_addi_i32(addr, addr, stride);
3855 tmp2 = gen_ld16u(addr, IS_USER(s));
3856 tcg_gen_addi_i32(addr, addr, stride);
3857 tcg_gen_shli_i32(tmp2, tmp2, 16);
3858 tcg_gen_or_i32(tmp, tmp, tmp2);
3859 dead_tmp(tmp2);
3860 neon_store_reg(rd, pass, tmp);
3861 } else {
3862 tmp = neon_load_reg(rd, pass);
3863 tmp2 = new_tmp();
3864 tcg_gen_shri_i32(tmp2, tmp, 16);
3865 gen_st16(tmp, addr, IS_USER(s));
3866 tcg_gen_addi_i32(addr, addr, stride);
3867 gen_st16(tmp2, addr, IS_USER(s));
3868 tcg_gen_addi_i32(addr, addr, stride);
3870 } else /* size == 0 */ {
3871 if (load) {
3872 TCGV_UNUSED(tmp2);
3873 for (n = 0; n < 4; n++) {
3874 tmp = gen_ld8u(addr, IS_USER(s));
3875 tcg_gen_addi_i32(addr, addr, stride);
3876 if (n == 0) {
3877 tmp2 = tmp;
3878 } else {
3879 tcg_gen_shli_i32(tmp, tmp, n * 8);
3880 tcg_gen_or_i32(tmp2, tmp2, tmp);
3881 dead_tmp(tmp);
3884 neon_store_reg(rd, pass, tmp2);
3885 } else {
3886 tmp2 = neon_load_reg(rd, pass);
3887 for (n = 0; n < 4; n++) {
3888 tmp = new_tmp();
3889 if (n == 0) {
3890 tcg_gen_mov_i32(tmp, tmp2);
3891 } else {
3892 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3894 gen_st8(tmp, addr, IS_USER(s));
3895 tcg_gen_addi_i32(addr, addr, stride);
3897 dead_tmp(tmp2);
3902 rd += spacing;
3904 stride = nregs * 8;
3905 } else {
3906 size = (insn >> 10) & 3;
3907 if (size == 3) {
3908 /* Load single element to all lanes. */
3909 if (!load)
3910 return 1;
3911 size = (insn >> 6) & 3;
3912 nregs = ((insn >> 8) & 3) + 1;
3913 stride = (insn & (1 << 5)) ? 2 : 1;
3914 load_reg_var(s, addr, rn);
3915 for (reg = 0; reg < nregs; reg++) {
3916 switch (size) {
3917 case 0:
3918 tmp = gen_ld8u(addr, IS_USER(s));
3919 gen_neon_dup_u8(tmp, 0);
3920 break;
3921 case 1:
3922 tmp = gen_ld16u(addr, IS_USER(s));
3923 gen_neon_dup_low16(tmp);
3924 break;
3925 case 2:
3926 tmp = gen_ld32(addr, IS_USER(s));
3927 break;
3928 case 3:
3929 return 1;
3930 default: /* Avoid compiler warnings. */
3931 abort();
3933 tcg_gen_addi_i32(addr, addr, 1 << size);
3934 tmp2 = new_tmp();
3935 tcg_gen_mov_i32(tmp2, tmp);
3936 neon_store_reg(rd, 0, tmp2);
3937 neon_store_reg(rd, 1, tmp);
3938 rd += stride;
3940 stride = (1 << size) * nregs;
3941 } else {
3942 /* Single element. */
3943 pass = (insn >> 7) & 1;
3944 switch (size) {
3945 case 0:
3946 shift = ((insn >> 5) & 3) * 8;
3947 stride = 1;
3948 break;
3949 case 1:
3950 shift = ((insn >> 6) & 1) * 16;
3951 stride = (insn & (1 << 5)) ? 2 : 1;
3952 break;
3953 case 2:
3954 shift = 0;
3955 stride = (insn & (1 << 6)) ? 2 : 1;
3956 break;
3957 default:
3958 abort();
3960 nregs = ((insn >> 8) & 3) + 1;
3961 load_reg_var(s, addr, rn);
3962 for (reg = 0; reg < nregs; reg++) {
3963 if (load) {
3964 switch (size) {
3965 case 0:
3966 tmp = gen_ld8u(addr, IS_USER(s));
3967 break;
3968 case 1:
3969 tmp = gen_ld16u(addr, IS_USER(s));
3970 break;
3971 case 2:
3972 tmp = gen_ld32(addr, IS_USER(s));
3973 break;
3974 default: /* Avoid compiler warnings. */
3975 abort();
3977 if (size != 2) {
3978 tmp2 = neon_load_reg(rd, pass);
3979 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3980 dead_tmp(tmp2);
3982 neon_store_reg(rd, pass, tmp);
3983 } else { /* Store */
3984 tmp = neon_load_reg(rd, pass);
3985 if (shift)
3986 tcg_gen_shri_i32(tmp, tmp, shift);
3987 switch (size) {
3988 case 0:
3989 gen_st8(tmp, addr, IS_USER(s));
3990 break;
3991 case 1:
3992 gen_st16(tmp, addr, IS_USER(s));
3993 break;
3994 case 2:
3995 gen_st32(tmp, addr, IS_USER(s));
3996 break;
3999 rd += stride;
4000 tcg_gen_addi_i32(addr, addr, 1 << size);
4002 stride = nregs * (1 << size);
4005 dead_tmp(addr);
4006 if (rm != 15) {
4007 TCGv base;
4009 base = load_reg(s, rn);
4010 if (rm == 13) {
4011 tcg_gen_addi_i32(base, base, stride);
4012 } else {
4013 TCGv index;
4014 index = load_reg(s, rm);
4015 tcg_gen_add_i32(base, base, index);
4016 dead_tmp(index);
4018 store_reg(s, rn, base);
4020 return 0;
4023 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4024 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4026 tcg_gen_and_i32(t, t, c);
4027 tcg_gen_andc_i32(f, f, c);
4028 tcg_gen_or_i32(dest, t, f);
4031 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4033 switch (size) {
4034 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4035 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4036 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4037 default: abort();
4041 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4043 switch (size) {
4044 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4045 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4046 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4047 default: abort();
4051 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4053 switch (size) {
4054 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4055 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4056 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4057 default: abort();
4061 static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4063 switch (size) {
4064 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4065 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4066 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4067 default: abort();
4071 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4072 int q, int u)
4074 if (q) {
4075 if (u) {
4076 switch (size) {
4077 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4078 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4079 default: abort();
4081 } else {
4082 switch (size) {
4083 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4084 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4085 default: abort();
4088 } else {
4089 if (u) {
4090 switch (size) {
4091 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4092 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4093 default: abort();
4095 } else {
4096 switch (size) {
4097 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4098 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4099 default: abort();
4105 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4107 if (u) {
4108 switch (size) {
4109 case 0: gen_helper_neon_widen_u8(dest, src); break;
4110 case 1: gen_helper_neon_widen_u16(dest, src); break;
4111 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4112 default: abort();
4114 } else {
4115 switch (size) {
4116 case 0: gen_helper_neon_widen_s8(dest, src); break;
4117 case 1: gen_helper_neon_widen_s16(dest, src); break;
4118 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4119 default: abort();
4122 dead_tmp(src);
4125 static inline void gen_neon_addl(int size)
4127 switch (size) {
4128 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4129 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4130 case 2: tcg_gen_add_i64(CPU_V001); break;
4131 default: abort();
4135 static inline void gen_neon_subl(int size)
4137 switch (size) {
4138 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4139 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4140 case 2: tcg_gen_sub_i64(CPU_V001); break;
4141 default: abort();
4145 static inline void gen_neon_negl(TCGv_i64 var, int size)
4147 switch (size) {
4148 case 0: gen_helper_neon_negl_u16(var, var); break;
4149 case 1: gen_helper_neon_negl_u32(var, var); break;
4150 case 2: gen_helper_neon_negl_u64(var, var); break;
4151 default: abort();
4155 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4157 switch (size) {
4158 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4159 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4160 default: abort();
4164 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4166 TCGv_i64 tmp;
4168 switch ((size << 1) | u) {
4169 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4170 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4171 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4172 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4173 case 4:
4174 tmp = gen_muls_i64_i32(a, b);
4175 tcg_gen_mov_i64(dest, tmp);
4176 break;
4177 case 5:
4178 tmp = gen_mulu_i64_i32(a, b);
4179 tcg_gen_mov_i64(dest, tmp);
4180 break;
4181 default: abort();
4184 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4185 Don't forget to clean them now. */
4186 if (size < 2) {
4187 dead_tmp(a);
4188 dead_tmp(b);
4192 static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4194 if (op) {
4195 if (u) {
4196 gen_neon_unarrow_sats(size, dest, src);
4197 } else {
4198 gen_neon_narrow(size, dest, src);
4200 } else {
4201 if (u) {
4202 gen_neon_narrow_satu(size, dest, src);
4203 } else {
4204 gen_neon_narrow_sats(size, dest, src);
4209 /* Translate a NEON data processing instruction. Return nonzero if the
4210 instruction is invalid.
4211 We process data in a mixture of 32-bit and 64-bit chunks.
4212 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4214 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4216 int op;
4217 int q;
4218 int rd, rn, rm;
4219 int size;
4220 int shift;
4221 int pass;
4222 int count;
4223 int pairwise;
4224 int u;
4225 int n;
4226 uint32_t imm, mask;
4227 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4228 TCGv_i64 tmp64;
4230 if (!s->vfp_enabled)
4231 return 1;
4232 q = (insn & (1 << 6)) != 0;
4233 u = (insn >> 24) & 1;
4234 VFP_DREG_D(rd, insn);
4235 VFP_DREG_N(rn, insn);
4236 VFP_DREG_M(rm, insn);
4237 size = (insn >> 20) & 3;
4238 if ((insn & (1 << 23)) == 0) {
4239 /* Three register same length. */
4240 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4241 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4242 || op == 10 || op == 11 || op == 16)) {
4243 /* 64-bit element instructions. */
4244 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4245 neon_load_reg64(cpu_V0, rn + pass);
4246 neon_load_reg64(cpu_V1, rm + pass);
4247 switch (op) {
4248 case 1: /* VQADD */
4249 if (u) {
4250 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4251 cpu_V0, cpu_V1);
4252 } else {
4253 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4254 cpu_V0, cpu_V1);
4256 break;
4257 case 5: /* VQSUB */
4258 if (u) {
4259 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4260 cpu_V0, cpu_V1);
4261 } else {
4262 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4263 cpu_V0, cpu_V1);
4265 break;
4266 case 8: /* VSHL */
4267 if (u) {
4268 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4269 } else {
4270 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4272 break;
4273 case 9: /* VQSHL */
4274 if (u) {
4275 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4276 cpu_V1, cpu_V0);
4277 } else {
4278 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4279 cpu_V1, cpu_V0);
4281 break;
4282 case 10: /* VRSHL */
4283 if (u) {
4284 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4285 } else {
4286 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4288 break;
4289 case 11: /* VQRSHL */
4290 if (u) {
4291 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4292 cpu_V1, cpu_V0);
4293 } else {
4294 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4295 cpu_V1, cpu_V0);
4297 break;
4298 case 16:
4299 if (u) {
4300 tcg_gen_sub_i64(CPU_V001);
4301 } else {
4302 tcg_gen_add_i64(CPU_V001);
4304 break;
4305 default:
4306 abort();
4308 neon_store_reg64(cpu_V0, rd + pass);
4310 return 0;
4312 switch (op) {
4313 case 8: /* VSHL */
4314 case 9: /* VQSHL */
4315 case 10: /* VRSHL */
4316 case 11: /* VQRSHL */
4318 int rtmp;
4319 /* Shift instruction operands are reversed. */
4320 rtmp = rn;
4321 rn = rm;
4322 rm = rtmp;
4323 pairwise = 0;
4325 break;
4326 case 20: /* VPMAX */
4327 case 21: /* VPMIN */
4328 case 23: /* VPADD */
4329 pairwise = 1;
4330 break;
4331 case 26: /* VPADD (float) */
4332 pairwise = (u && size < 2);
4333 break;
4334 case 30: /* VPMIN/VPMAX (float) */
4335 pairwise = u;
4336 break;
4337 default:
4338 pairwise = 0;
4339 break;
4342 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4344 if (pairwise) {
4345 /* Pairwise. */
4346 if (q)
4347 n = (pass & 1) * 2;
4348 else
4349 n = 0;
4350 if (pass < q + 1) {
4351 tmp = neon_load_reg(rn, n);
4352 tmp2 = neon_load_reg(rn, n + 1);
4353 } else {
4354 tmp = neon_load_reg(rm, n);
4355 tmp2 = neon_load_reg(rm, n + 1);
4357 } else {
4358 /* Elementwise. */
4359 tmp = neon_load_reg(rn, pass);
4360 tmp2 = neon_load_reg(rm, pass);
4362 switch (op) {
4363 case 0: /* VHADD */
4364 GEN_NEON_INTEGER_OP(hadd);
4365 break;
4366 case 1: /* VQADD */
4367 GEN_NEON_INTEGER_OP_ENV(qadd);
4368 break;
4369 case 2: /* VRHADD */
4370 GEN_NEON_INTEGER_OP(rhadd);
4371 break;
4372 case 3: /* Logic ops. */
4373 switch ((u << 2) | size) {
4374 case 0: /* VAND */
4375 tcg_gen_and_i32(tmp, tmp, tmp2);
4376 break;
4377 case 1: /* BIC */
4378 tcg_gen_andc_i32(tmp, tmp, tmp2);
4379 break;
4380 case 2: /* VORR */
4381 tcg_gen_or_i32(tmp, tmp, tmp2);
4382 break;
4383 case 3: /* VORN */
4384 tcg_gen_orc_i32(tmp, tmp, tmp2);
4385 break;
4386 case 4: /* VEOR */
4387 tcg_gen_xor_i32(tmp, tmp, tmp2);
4388 break;
4389 case 5: /* VBSL */
4390 tmp3 = neon_load_reg(rd, pass);
4391 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4392 dead_tmp(tmp3);
4393 break;
4394 case 6: /* VBIT */
4395 tmp3 = neon_load_reg(rd, pass);
4396 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4397 dead_tmp(tmp3);
4398 break;
4399 case 7: /* VBIF */
4400 tmp3 = neon_load_reg(rd, pass);
4401 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4402 dead_tmp(tmp3);
4403 break;
4405 break;
4406 case 4: /* VHSUB */
4407 GEN_NEON_INTEGER_OP(hsub);
4408 break;
4409 case 5: /* VQSUB */
4410 GEN_NEON_INTEGER_OP_ENV(qsub);
4411 break;
4412 case 6: /* VCGT */
4413 GEN_NEON_INTEGER_OP(cgt);
4414 break;
4415 case 7: /* VCGE */
4416 GEN_NEON_INTEGER_OP(cge);
4417 break;
4418 case 8: /* VSHL */
4419 GEN_NEON_INTEGER_OP(shl);
4420 break;
4421 case 9: /* VQSHL */
4422 GEN_NEON_INTEGER_OP_ENV(qshl);
4423 break;
4424 case 10: /* VRSHL */
4425 GEN_NEON_INTEGER_OP(rshl);
4426 break;
4427 case 11: /* VQRSHL */
4428 GEN_NEON_INTEGER_OP_ENV(qrshl);
4429 break;
4430 case 12: /* VMAX */
4431 GEN_NEON_INTEGER_OP(max);
4432 break;
4433 case 13: /* VMIN */
4434 GEN_NEON_INTEGER_OP(min);
4435 break;
4436 case 14: /* VABD */
4437 GEN_NEON_INTEGER_OP(abd);
4438 break;
4439 case 15: /* VABA */
4440 GEN_NEON_INTEGER_OP(abd);
4441 dead_tmp(tmp2);
4442 tmp2 = neon_load_reg(rd, pass);
4443 gen_neon_add(size, tmp, tmp2);
4444 break;
4445 case 16:
4446 if (!u) { /* VADD */
4447 if (gen_neon_add(size, tmp, tmp2))
4448 return 1;
4449 } else { /* VSUB */
4450 switch (size) {
4451 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4452 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4453 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4454 default: return 1;
4457 break;
4458 case 17:
4459 if (!u) { /* VTST */
4460 switch (size) {
4461 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4462 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4463 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4464 default: return 1;
4466 } else { /* VCEQ */
4467 switch (size) {
4468 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4469 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4470 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4471 default: return 1;
4474 break;
4475 case 18: /* Multiply. */
4476 switch (size) {
4477 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4478 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4479 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4480 default: return 1;
4482 dead_tmp(tmp2);
4483 tmp2 = neon_load_reg(rd, pass);
4484 if (u) { /* VMLS */
4485 gen_neon_rsb(size, tmp, tmp2);
4486 } else { /* VMLA */
4487 gen_neon_add(size, tmp, tmp2);
4489 break;
4490 case 19: /* VMUL */
4491 if (u) { /* polynomial */
4492 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4493 } else { /* Integer */
4494 switch (size) {
4495 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4496 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4497 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4498 default: return 1;
4501 break;
4502 case 20: /* VPMAX */
4503 GEN_NEON_INTEGER_OP(pmax);
4504 break;
4505 case 21: /* VPMIN */
4506 GEN_NEON_INTEGER_OP(pmin);
4507 break;
4508 case 22: /* Hultiply high. */
4509 if (!u) { /* VQDMULH */
4510 switch (size) {
4511 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4512 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4513 default: return 1;
4515 } else { /* VQRDHMUL */
4516 switch (size) {
4517 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4518 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4519 default: return 1;
4522 break;
4523 case 23: /* VPADD */
4524 if (u)
4525 return 1;
4526 switch (size) {
4527 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4528 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4529 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4530 default: return 1;
4532 break;
4533 case 26: /* Floating point arithnetic. */
4534 switch ((u << 2) | size) {
4535 case 0: /* VADD */
4536 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4537 break;
4538 case 2: /* VSUB */
4539 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4540 break;
4541 case 4: /* VPADD */
4542 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4543 break;
4544 case 6: /* VABD */
4545 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4546 break;
4547 default:
4548 return 1;
4550 break;
4551 case 27: /* Float multiply. */
4552 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4553 if (!u) {
4554 dead_tmp(tmp2);
4555 tmp2 = neon_load_reg(rd, pass);
4556 if (size == 0) {
4557 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4558 } else {
4559 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4562 break;
4563 case 28: /* Float compare. */
4564 if (!u) {
4565 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4566 } else {
4567 if (size == 0)
4568 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4569 else
4570 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4572 break;
4573 case 29: /* Float compare absolute. */
4574 if (!u)
4575 return 1;
4576 if (size == 0)
4577 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4578 else
4579 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4580 break;
4581 case 30: /* Float min/max. */
4582 if (size == 0)
4583 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4584 else
4585 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4586 break;
4587 case 31:
4588 if (size == 0)
4589 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4590 else
4591 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4592 break;
4593 default:
4594 abort();
4596 dead_tmp(tmp2);
4598 /* Save the result. For elementwise operations we can put it
4599 straight into the destination register. For pairwise operations
4600 we have to be careful to avoid clobbering the source operands. */
4601 if (pairwise && rd == rm) {
4602 neon_store_scratch(pass, tmp);
4603 } else {
4604 neon_store_reg(rd, pass, tmp);
4607 } /* for pass */
4608 if (pairwise && rd == rm) {
4609 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4610 tmp = neon_load_scratch(pass);
4611 neon_store_reg(rd, pass, tmp);
4614 /* End of 3 register same size operations. */
4615 } else if (insn & (1 << 4)) {
4616 if ((insn & 0x00380080) != 0) {
4617 /* Two registers and shift. */
4618 op = (insn >> 8) & 0xf;
4619 if (insn & (1 << 7)) {
4620 /* 64-bit shift. */
4621 size = 3;
4622 } else {
4623 size = 2;
4624 while ((insn & (1 << (size + 19))) == 0)
4625 size--;
4627 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4628 /* To avoid excessive dumplication of ops we implement shift
4629 by immediate using the variable shift operations. */
4630 if (op < 8) {
4631 /* Shift by immediate:
4632 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4633 /* Right shifts are encoded as N - shift, where N is the
4634 element size in bits. */
4635 if (op <= 4)
4636 shift = shift - (1 << (size + 3));
4637 if (size == 3) {
4638 count = q + 1;
4639 } else {
4640 count = q ? 4: 2;
4642 switch (size) {
4643 case 0:
4644 imm = (uint8_t) shift;
4645 imm |= imm << 8;
4646 imm |= imm << 16;
4647 break;
4648 case 1:
4649 imm = (uint16_t) shift;
4650 imm |= imm << 16;
4651 break;
4652 case 2:
4653 case 3:
4654 imm = shift;
4655 break;
4656 default:
4657 abort();
4660 for (pass = 0; pass < count; pass++) {
4661 if (size == 3) {
4662 neon_load_reg64(cpu_V0, rm + pass);
4663 tcg_gen_movi_i64(cpu_V1, imm);
4664 switch (op) {
4665 case 0: /* VSHR */
4666 case 1: /* VSRA */
4667 if (u)
4668 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4669 else
4670 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4671 break;
4672 case 2: /* VRSHR */
4673 case 3: /* VRSRA */
4674 if (u)
4675 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4676 else
4677 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4678 break;
4679 case 4: /* VSRI */
4680 if (!u)
4681 return 1;
4682 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4683 break;
4684 case 5: /* VSHL, VSLI */
4685 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4686 break;
4687 case 6: /* VQSHLU */
4688 if (u) {
4689 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
4690 cpu_V0, cpu_V1);
4691 } else {
4692 return 1;
4694 break;
4695 case 7: /* VQSHL */
4696 if (u) {
4697 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4698 cpu_V0, cpu_V1);
4699 } else {
4700 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4701 cpu_V0, cpu_V1);
4703 break;
4705 if (op == 1 || op == 3) {
4706 /* Accumulate. */
4707 neon_load_reg64(cpu_V1, rd + pass);
4708 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4709 } else if (op == 4 || (op == 5 && u)) {
4710 /* Insert */
4711 neon_load_reg64(cpu_V1, rd + pass);
4712 uint64_t mask;
4713 if (shift < -63 || shift > 63) {
4714 mask = 0;
4715 } else {
4716 if (op == 4) {
4717 mask = 0xffffffffffffffffull >> -shift;
4718 } else {
4719 mask = 0xffffffffffffffffull << shift;
4722 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
4723 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
4725 neon_store_reg64(cpu_V0, rd + pass);
4726 } else { /* size < 3 */
4727 /* Operands in T0 and T1. */
4728 tmp = neon_load_reg(rm, pass);
4729 tmp2 = new_tmp();
4730 tcg_gen_movi_i32(tmp2, imm);
4731 switch (op) {
4732 case 0: /* VSHR */
4733 case 1: /* VSRA */
4734 GEN_NEON_INTEGER_OP(shl);
4735 break;
4736 case 2: /* VRSHR */
4737 case 3: /* VRSRA */
4738 GEN_NEON_INTEGER_OP(rshl);
4739 break;
4740 case 4: /* VSRI */
4741 if (!u)
4742 return 1;
4743 GEN_NEON_INTEGER_OP(shl);
4744 break;
4745 case 5: /* VSHL, VSLI */
4746 switch (size) {
4747 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4748 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4749 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4750 default: return 1;
4752 break;
4753 case 6: /* VQSHLU */
4754 if (!u) {
4755 return 1;
4757 switch (size) {
4758 case 0:
4759 gen_helper_neon_qshlu_s8(tmp, cpu_env,
4760 tmp, tmp2);
4761 break;
4762 case 1:
4763 gen_helper_neon_qshlu_s16(tmp, cpu_env,
4764 tmp, tmp2);
4765 break;
4766 case 2:
4767 gen_helper_neon_qshlu_s32(tmp, cpu_env,
4768 tmp, tmp2);
4769 break;
4770 default:
4771 return 1;
4773 break;
4774 case 7: /* VQSHL */
4775 GEN_NEON_INTEGER_OP_ENV(qshl);
4776 break;
4778 dead_tmp(tmp2);
4780 if (op == 1 || op == 3) {
4781 /* Accumulate. */
4782 tmp2 = neon_load_reg(rd, pass);
4783 gen_neon_add(size, tmp, tmp2);
4784 dead_tmp(tmp2);
4785 } else if (op == 4 || (op == 5 && u)) {
4786 /* Insert */
4787 switch (size) {
4788 case 0:
4789 if (op == 4)
4790 mask = 0xff >> -shift;
4791 else
4792 mask = (uint8_t)(0xff << shift);
4793 mask |= mask << 8;
4794 mask |= mask << 16;
4795 break;
4796 case 1:
4797 if (op == 4)
4798 mask = 0xffff >> -shift;
4799 else
4800 mask = (uint16_t)(0xffff << shift);
4801 mask |= mask << 16;
4802 break;
4803 case 2:
4804 if (shift < -31 || shift > 31) {
4805 mask = 0;
4806 } else {
4807 if (op == 4)
4808 mask = 0xffffffffu >> -shift;
4809 else
4810 mask = 0xffffffffu << shift;
4812 break;
4813 default:
4814 abort();
4816 tmp2 = neon_load_reg(rd, pass);
4817 tcg_gen_andi_i32(tmp, tmp, mask);
4818 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4819 tcg_gen_or_i32(tmp, tmp, tmp2);
4820 dead_tmp(tmp2);
4822 neon_store_reg(rd, pass, tmp);
4824 } /* for pass */
4825 } else if (op < 10) {
4826 /* Shift by immediate and narrow:
4827 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4828 int input_unsigned = (op == 8) ? !u : u;
4830 shift = shift - (1 << (size + 3));
4831 size++;
4832 if (size == 3) {
4833 tmp64 = tcg_const_i64(shift);
4834 neon_load_reg64(cpu_V0, rm);
4835 neon_load_reg64(cpu_V1, rm + 1);
4836 for (pass = 0; pass < 2; pass++) {
4837 TCGv_i64 in;
4838 if (pass == 0) {
4839 in = cpu_V0;
4840 } else {
4841 in = cpu_V1;
4843 if (q) {
4844 if (input_unsigned) {
4845 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
4846 } else {
4847 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
4849 } else {
4850 if (input_unsigned) {
4851 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
4852 } else {
4853 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
4856 tmp = new_tmp();
4857 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4858 neon_store_reg(rd, pass, tmp);
4859 } /* for pass */
4860 tcg_temp_free_i64(tmp64);
4861 } else {
4862 if (size == 1) {
4863 imm = (uint16_t)shift;
4864 imm |= imm << 16;
4865 } else {
4866 /* size == 2 */
4867 imm = (uint32_t)shift;
4869 tmp2 = tcg_const_i32(imm);
4870 tmp4 = neon_load_reg(rm + 1, 0);
4871 tmp5 = neon_load_reg(rm + 1, 1);
4872 for (pass = 0; pass < 2; pass++) {
4873 if (pass == 0) {
4874 tmp = neon_load_reg(rm, 0);
4875 } else {
4876 tmp = tmp4;
4878 gen_neon_shift_narrow(size, tmp, tmp2, q,
4879 input_unsigned);
4880 if (pass == 0) {
4881 tmp3 = neon_load_reg(rm, 1);
4882 } else {
4883 tmp3 = tmp5;
4885 gen_neon_shift_narrow(size, tmp3, tmp2, q,
4886 input_unsigned);
4887 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4888 dead_tmp(tmp);
4889 dead_tmp(tmp3);
4890 tmp = new_tmp();
4891 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
4892 neon_store_reg(rd, pass, tmp);
4893 } /* for pass */
4894 tcg_temp_free_i32(tmp2);
4896 } else if (op == 10) {
4897 /* VSHLL */
4898 if (q || size == 3)
4899 return 1;
4900 tmp = neon_load_reg(rm, 0);
4901 tmp2 = neon_load_reg(rm, 1);
4902 for (pass = 0; pass < 2; pass++) {
4903 if (pass == 1)
4904 tmp = tmp2;
4906 gen_neon_widen(cpu_V0, tmp, size, u);
4908 if (shift != 0) {
4909 /* The shift is less than the width of the source
4910 type, so we can just shift the whole register. */
4911 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4912 /* Widen the result of shift: we need to clear
4913 * the potential overflow bits resulting from
4914 * left bits of the narrow input appearing as
4915 * right bits of left the neighbour narrow
4916 * input. */
4917 if (size < 2 || !u) {
4918 uint64_t imm64;
4919 if (size == 0) {
4920 imm = (0xffu >> (8 - shift));
4921 imm |= imm << 16;
4922 } else if (size == 1) {
4923 imm = 0xffff >> (16 - shift);
4924 } else {
4925 /* size == 2 */
4926 imm = 0xffffffff >> (32 - shift);
4928 if (size < 2) {
4929 imm64 = imm | (((uint64_t)imm) << 32);
4930 } else {
4931 imm64 = imm;
4933 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
4936 neon_store_reg64(cpu_V0, rd + pass);
4938 } else if (op >= 14) {
4939 /* VCVT fixed-point. */
4940 /* We have already masked out the must-be-1 top bit of imm6,
4941 * hence this 32-shift where the ARM ARM has 64-imm6.
4943 shift = 32 - shift;
4944 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4945 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4946 if (!(op & 1)) {
4947 if (u)
4948 gen_vfp_ulto(0, shift);
4949 else
4950 gen_vfp_slto(0, shift);
4951 } else {
4952 if (u)
4953 gen_vfp_toul(0, shift);
4954 else
4955 gen_vfp_tosl(0, shift);
4957 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4959 } else {
4960 return 1;
4962 } else { /* (insn & 0x00380080) == 0 */
4963 int invert;
4965 op = (insn >> 8) & 0xf;
4966 /* One register and immediate. */
4967 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4968 invert = (insn & (1 << 5)) != 0;
4969 switch (op) {
4970 case 0: case 1:
4971 /* no-op */
4972 break;
4973 case 2: case 3:
4974 imm <<= 8;
4975 break;
4976 case 4: case 5:
4977 imm <<= 16;
4978 break;
4979 case 6: case 7:
4980 imm <<= 24;
4981 break;
4982 case 8: case 9:
4983 imm |= imm << 16;
4984 break;
4985 case 10: case 11:
4986 imm = (imm << 8) | (imm << 24);
4987 break;
4988 case 12:
4989 imm = (imm << 8) | 0xff;
4990 break;
4991 case 13:
4992 imm = (imm << 16) | 0xffff;
4993 break;
4994 case 14:
4995 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4996 if (invert)
4997 imm = ~imm;
4998 break;
4999 case 15:
5000 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5001 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5002 break;
5004 if (invert)
5005 imm = ~imm;
5007 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5008 if (op & 1 && op < 12) {
5009 tmp = neon_load_reg(rd, pass);
5010 if (invert) {
5011 /* The immediate value has already been inverted, so
5012 BIC becomes AND. */
5013 tcg_gen_andi_i32(tmp, tmp, imm);
5014 } else {
5015 tcg_gen_ori_i32(tmp, tmp, imm);
5017 } else {
5018 /* VMOV, VMVN. */
5019 tmp = new_tmp();
5020 if (op == 14 && invert) {
5021 uint32_t val;
5022 val = 0;
5023 for (n = 0; n < 4; n++) {
5024 if (imm & (1 << (n + (pass & 1) * 4)))
5025 val |= 0xff << (n * 8);
5027 tcg_gen_movi_i32(tmp, val);
5028 } else {
5029 tcg_gen_movi_i32(tmp, imm);
5032 neon_store_reg(rd, pass, tmp);
5035 } else { /* (insn & 0x00800010 == 0x00800000) */
5036 if (size != 3) {
5037 op = (insn >> 8) & 0xf;
5038 if ((insn & (1 << 6)) == 0) {
5039 /* Three registers of different lengths. */
5040 int src1_wide;
5041 int src2_wide;
5042 int prewiden;
5043 /* prewiden, src1_wide, src2_wide */
5044 static const int neon_3reg_wide[16][3] = {
5045 {1, 0, 0}, /* VADDL */
5046 {1, 1, 0}, /* VADDW */
5047 {1, 0, 0}, /* VSUBL */
5048 {1, 1, 0}, /* VSUBW */
5049 {0, 1, 1}, /* VADDHN */
5050 {0, 0, 0}, /* VABAL */
5051 {0, 1, 1}, /* VSUBHN */
5052 {0, 0, 0}, /* VABDL */
5053 {0, 0, 0}, /* VMLAL */
5054 {0, 0, 0}, /* VQDMLAL */
5055 {0, 0, 0}, /* VMLSL */
5056 {0, 0, 0}, /* VQDMLSL */
5057 {0, 0, 0}, /* Integer VMULL */
5058 {0, 0, 0}, /* VQDMULL */
5059 {0, 0, 0} /* Polynomial VMULL */
5062 prewiden = neon_3reg_wide[op][0];
5063 src1_wide = neon_3reg_wide[op][1];
5064 src2_wide = neon_3reg_wide[op][2];
5066 if (size == 0 && (op == 9 || op == 11 || op == 13))
5067 return 1;
5069 /* Avoid overlapping operands. Wide source operands are
5070 always aligned so will never overlap with wide
5071 destinations in problematic ways. */
5072 if (rd == rm && !src2_wide) {
5073 tmp = neon_load_reg(rm, 1);
5074 neon_store_scratch(2, tmp);
5075 } else if (rd == rn && !src1_wide) {
5076 tmp = neon_load_reg(rn, 1);
5077 neon_store_scratch(2, tmp);
5079 TCGV_UNUSED(tmp3);
5080 for (pass = 0; pass < 2; pass++) {
5081 if (src1_wide) {
5082 neon_load_reg64(cpu_V0, rn + pass);
5083 TCGV_UNUSED(tmp);
5084 } else {
5085 if (pass == 1 && rd == rn) {
5086 tmp = neon_load_scratch(2);
5087 } else {
5088 tmp = neon_load_reg(rn, pass);
5090 if (prewiden) {
5091 gen_neon_widen(cpu_V0, tmp, size, u);
5094 if (src2_wide) {
5095 neon_load_reg64(cpu_V1, rm + pass);
5096 TCGV_UNUSED(tmp2);
5097 } else {
5098 if (pass == 1 && rd == rm) {
5099 tmp2 = neon_load_scratch(2);
5100 } else {
5101 tmp2 = neon_load_reg(rm, pass);
5103 if (prewiden) {
5104 gen_neon_widen(cpu_V1, tmp2, size, u);
5107 switch (op) {
5108 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5109 gen_neon_addl(size);
5110 break;
5111 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5112 gen_neon_subl(size);
5113 break;
5114 case 5: case 7: /* VABAL, VABDL */
5115 switch ((size << 1) | u) {
5116 case 0:
5117 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5118 break;
5119 case 1:
5120 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5121 break;
5122 case 2:
5123 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5124 break;
5125 case 3:
5126 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5127 break;
5128 case 4:
5129 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5130 break;
5131 case 5:
5132 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5133 break;
5134 default: abort();
5136 dead_tmp(tmp2);
5137 dead_tmp(tmp);
5138 break;
5139 case 8: case 9: case 10: case 11: case 12: case 13:
5140 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5141 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5142 break;
5143 case 14: /* Polynomial VMULL */
5144 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5145 dead_tmp(tmp2);
5146 dead_tmp(tmp);
5147 break;
5148 default: /* 15 is RESERVED. */
5149 return 1;
5151 if (op == 13) {
5152 /* VQDMULL */
5153 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5154 neon_store_reg64(cpu_V0, rd + pass);
5155 } else if (op == 5 || (op >= 8 && op <= 11)) {
5156 /* Accumulate. */
5157 neon_load_reg64(cpu_V1, rd + pass);
5158 switch (op) {
5159 case 10: /* VMLSL */
5160 gen_neon_negl(cpu_V0, size);
5161 /* Fall through */
5162 case 5: case 8: /* VABAL, VMLAL */
5163 gen_neon_addl(size);
5164 break;
5165 case 9: case 11: /* VQDMLAL, VQDMLSL */
5166 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5167 if (op == 11) {
5168 gen_neon_negl(cpu_V0, size);
5170 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5171 break;
5172 default:
5173 abort();
5175 neon_store_reg64(cpu_V0, rd + pass);
5176 } else if (op == 4 || op == 6) {
5177 /* Narrowing operation. */
5178 tmp = new_tmp();
5179 if (!u) {
5180 switch (size) {
5181 case 0:
5182 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5183 break;
5184 case 1:
5185 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5186 break;
5187 case 2:
5188 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5189 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5190 break;
5191 default: abort();
5193 } else {
5194 switch (size) {
5195 case 0:
5196 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5197 break;
5198 case 1:
5199 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5200 break;
5201 case 2:
5202 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5203 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5204 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5205 break;
5206 default: abort();
5209 if (pass == 0) {
5210 tmp3 = tmp;
5211 } else {
5212 neon_store_reg(rd, 0, tmp3);
5213 neon_store_reg(rd, 1, tmp);
5215 } else {
5216 /* Write back the result. */
5217 neon_store_reg64(cpu_V0, rd + pass);
5220 } else {
5221 /* Two registers and a scalar. */
5222 switch (op) {
5223 case 0: /* Integer VMLA scalar */
5224 case 1: /* Float VMLA scalar */
5225 case 4: /* Integer VMLS scalar */
5226 case 5: /* Floating point VMLS scalar */
5227 case 8: /* Integer VMUL scalar */
5228 case 9: /* Floating point VMUL scalar */
5229 case 12: /* VQDMULH scalar */
5230 case 13: /* VQRDMULH scalar */
5231 tmp = neon_get_scalar(size, rm);
5232 neon_store_scratch(0, tmp);
5233 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5234 tmp = neon_load_scratch(0);
5235 tmp2 = neon_load_reg(rn, pass);
5236 if (op == 12) {
5237 if (size == 1) {
5238 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5239 } else {
5240 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5242 } else if (op == 13) {
5243 if (size == 1) {
5244 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5245 } else {
5246 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5248 } else if (op & 1) {
5249 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5250 } else {
5251 switch (size) {
5252 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5253 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5254 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5255 default: return 1;
5258 dead_tmp(tmp2);
5259 if (op < 8) {
5260 /* Accumulate. */
5261 tmp2 = neon_load_reg(rd, pass);
5262 switch (op) {
5263 case 0:
5264 gen_neon_add(size, tmp, tmp2);
5265 break;
5266 case 1:
5267 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5268 break;
5269 case 4:
5270 gen_neon_rsb(size, tmp, tmp2);
5271 break;
5272 case 5:
5273 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5274 break;
5275 default:
5276 abort();
5278 dead_tmp(tmp2);
5280 neon_store_reg(rd, pass, tmp);
5282 break;
5283 case 2: /* VMLAL sclar */
5284 case 3: /* VQDMLAL scalar */
5285 case 6: /* VMLSL scalar */
5286 case 7: /* VQDMLSL scalar */
5287 case 10: /* VMULL scalar */
5288 case 11: /* VQDMULL scalar */
5289 if (size == 0 && (op == 3 || op == 7 || op == 11))
5290 return 1;
5292 tmp2 = neon_get_scalar(size, rm);
5293 /* We need a copy of tmp2 because gen_neon_mull
5294 * deletes it during pass 0. */
5295 tmp4 = new_tmp();
5296 tcg_gen_mov_i32(tmp4, tmp2);
5297 tmp3 = neon_load_reg(rn, 1);
5299 for (pass = 0; pass < 2; pass++) {
5300 if (pass == 0) {
5301 tmp = neon_load_reg(rn, 0);
5302 } else {
5303 tmp = tmp3;
5304 tmp2 = tmp4;
5306 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5307 if (op != 11) {
5308 neon_load_reg64(cpu_V1, rd + pass);
5310 switch (op) {
5311 case 6:
5312 gen_neon_negl(cpu_V0, size);
5313 /* Fall through */
5314 case 2:
5315 gen_neon_addl(size);
5316 break;
5317 case 3: case 7:
5318 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5319 if (op == 7) {
5320 gen_neon_negl(cpu_V0, size);
5322 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5323 break;
5324 case 10:
5325 /* no-op */
5326 break;
5327 case 11:
5328 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5329 break;
5330 default:
5331 abort();
5333 neon_store_reg64(cpu_V0, rd + pass);
5337 break;
5338 default: /* 14 and 15 are RESERVED */
5339 return 1;
5342 } else { /* size == 3 */
5343 if (!u) {
5344 /* Extract. */
5345 imm = (insn >> 8) & 0xf;
5347 if (imm > 7 && !q)
5348 return 1;
5350 if (imm == 0) {
5351 neon_load_reg64(cpu_V0, rn);
5352 if (q) {
5353 neon_load_reg64(cpu_V1, rn + 1);
5355 } else if (imm == 8) {
5356 neon_load_reg64(cpu_V0, rn + 1);
5357 if (q) {
5358 neon_load_reg64(cpu_V1, rm);
5360 } else if (q) {
5361 tmp64 = tcg_temp_new_i64();
5362 if (imm < 8) {
5363 neon_load_reg64(cpu_V0, rn);
5364 neon_load_reg64(tmp64, rn + 1);
5365 } else {
5366 neon_load_reg64(cpu_V0, rn + 1);
5367 neon_load_reg64(tmp64, rm);
5369 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5370 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5371 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5372 if (imm < 8) {
5373 neon_load_reg64(cpu_V1, rm);
5374 } else {
5375 neon_load_reg64(cpu_V1, rm + 1);
5376 imm -= 8;
5378 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5379 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5380 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5381 tcg_temp_free_i64(tmp64);
5382 } else {
5383 /* BUGFIX */
5384 neon_load_reg64(cpu_V0, rn);
5385 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5386 neon_load_reg64(cpu_V1, rm);
5387 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5388 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5390 neon_store_reg64(cpu_V0, rd);
5391 if (q) {
5392 neon_store_reg64(cpu_V1, rd + 1);
5394 } else if ((insn & (1 << 11)) == 0) {
5395 /* Two register misc. */
5396 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5397 size = (insn >> 18) & 3;
5398 switch (op) {
5399 case 0: /* VREV64 */
5400 if (size == 3)
5401 return 1;
5402 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5403 tmp = neon_load_reg(rm, pass * 2);
5404 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5405 switch (size) {
5406 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5407 case 1: gen_swap_half(tmp); break;
5408 case 2: /* no-op */ break;
5409 default: abort();
5411 neon_store_reg(rd, pass * 2 + 1, tmp);
5412 if (size == 2) {
5413 neon_store_reg(rd, pass * 2, tmp2);
5414 } else {
5415 switch (size) {
5416 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5417 case 1: gen_swap_half(tmp2); break;
5418 default: abort();
5420 neon_store_reg(rd, pass * 2, tmp2);
5423 break;
5424 case 4: case 5: /* VPADDL */
5425 case 12: case 13: /* VPADAL */
5426 if (size == 3)
5427 return 1;
5428 for (pass = 0; pass < q + 1; pass++) {
5429 tmp = neon_load_reg(rm, pass * 2);
5430 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5431 tmp = neon_load_reg(rm, pass * 2 + 1);
5432 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5433 switch (size) {
5434 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5435 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5436 case 2: tcg_gen_add_i64(CPU_V001); break;
5437 default: abort();
5439 if (op >= 12) {
5440 /* Accumulate. */
5441 neon_load_reg64(cpu_V1, rd + pass);
5442 gen_neon_addl(size);
5444 neon_store_reg64(cpu_V0, rd + pass);
5446 break;
5447 case 33: /* VTRN */
5448 if (size == 2) {
5449 for (n = 0; n < (q ? 4 : 2); n += 2) {
5450 tmp = neon_load_reg(rm, n);
5451 tmp2 = neon_load_reg(rd, n + 1);
5452 neon_store_reg(rm, n, tmp2);
5453 neon_store_reg(rd, n + 1, tmp);
5455 } else {
5456 goto elementwise;
5458 break;
5459 case 34: /* VUZP */
5460 if (gen_neon_unzip(rd, rm, size, q)) {
5461 return 1;
5463 break;
5464 case 35: /* VZIP */
5465 if (gen_neon_zip(rd, rm, size, q)) {
5466 return 1;
5468 break;
5469 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5470 if (size == 3)
5471 return 1;
5472 TCGV_UNUSED(tmp2);
5473 for (pass = 0; pass < 2; pass++) {
5474 neon_load_reg64(cpu_V0, rm + pass);
5475 tmp = new_tmp();
5476 gen_neon_narrow_op(op == 36, q, size, tmp, cpu_V0);
5477 if (pass == 0) {
5478 tmp2 = tmp;
5479 } else {
5480 neon_store_reg(rd, 0, tmp2);
5481 neon_store_reg(rd, 1, tmp);
5484 break;
5485 case 38: /* VSHLL */
5486 if (q || size == 3)
5487 return 1;
5488 tmp = neon_load_reg(rm, 0);
5489 tmp2 = neon_load_reg(rm, 1);
5490 for (pass = 0; pass < 2; pass++) {
5491 if (pass == 1)
5492 tmp = tmp2;
5493 gen_neon_widen(cpu_V0, tmp, size, 1);
5494 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5495 neon_store_reg64(cpu_V0, rd + pass);
5497 break;
5498 case 44: /* VCVT.F16.F32 */
5499 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5500 return 1;
5501 tmp = new_tmp();
5502 tmp2 = new_tmp();
5503 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5504 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5505 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5506 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5507 tcg_gen_shli_i32(tmp2, tmp2, 16);
5508 tcg_gen_or_i32(tmp2, tmp2, tmp);
5509 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5510 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5511 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5512 neon_store_reg(rd, 0, tmp2);
5513 tmp2 = new_tmp();
5514 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5515 tcg_gen_shli_i32(tmp2, tmp2, 16);
5516 tcg_gen_or_i32(tmp2, tmp2, tmp);
5517 neon_store_reg(rd, 1, tmp2);
5518 dead_tmp(tmp);
5519 break;
5520 case 46: /* VCVT.F32.F16 */
5521 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5522 return 1;
5523 tmp3 = new_tmp();
5524 tmp = neon_load_reg(rm, 0);
5525 tmp2 = neon_load_reg(rm, 1);
5526 tcg_gen_ext16u_i32(tmp3, tmp);
5527 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5528 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5529 tcg_gen_shri_i32(tmp3, tmp, 16);
5530 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5531 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5532 dead_tmp(tmp);
5533 tcg_gen_ext16u_i32(tmp3, tmp2);
5534 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5535 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5536 tcg_gen_shri_i32(tmp3, tmp2, 16);
5537 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5538 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5539 dead_tmp(tmp2);
5540 dead_tmp(tmp3);
5541 break;
5542 default:
5543 elementwise:
5544 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5545 if (op == 30 || op == 31 || op >= 58) {
5546 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5547 neon_reg_offset(rm, pass));
5548 TCGV_UNUSED(tmp);
5549 } else {
5550 tmp = neon_load_reg(rm, pass);
5552 switch (op) {
5553 case 1: /* VREV32 */
5554 switch (size) {
5555 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5556 case 1: gen_swap_half(tmp); break;
5557 default: return 1;
5559 break;
5560 case 2: /* VREV16 */
5561 if (size != 0)
5562 return 1;
5563 gen_rev16(tmp);
5564 break;
5565 case 8: /* CLS */
5566 switch (size) {
5567 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5568 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5569 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5570 default: return 1;
5572 break;
5573 case 9: /* CLZ */
5574 switch (size) {
5575 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5576 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5577 case 2: gen_helper_clz(tmp, tmp); break;
5578 default: return 1;
5580 break;
5581 case 10: /* CNT */
5582 if (size != 0)
5583 return 1;
5584 gen_helper_neon_cnt_u8(tmp, tmp);
5585 break;
5586 case 11: /* VNOT */
5587 if (size != 0)
5588 return 1;
5589 tcg_gen_not_i32(tmp, tmp);
5590 break;
5591 case 14: /* VQABS */
5592 switch (size) {
5593 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5594 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5595 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
5596 default: return 1;
5598 break;
5599 case 15: /* VQNEG */
5600 switch (size) {
5601 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5602 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5603 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
5604 default: return 1;
5606 break;
5607 case 16: case 19: /* VCGT #0, VCLE #0 */
5608 tmp2 = tcg_const_i32(0);
5609 switch(size) {
5610 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5611 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5612 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5613 default: return 1;
5615 tcg_temp_free(tmp2);
5616 if (op == 19)
5617 tcg_gen_not_i32(tmp, tmp);
5618 break;
5619 case 17: case 20: /* VCGE #0, VCLT #0 */
5620 tmp2 = tcg_const_i32(0);
5621 switch(size) {
5622 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5623 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5624 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5625 default: return 1;
5627 tcg_temp_free(tmp2);
5628 if (op == 20)
5629 tcg_gen_not_i32(tmp, tmp);
5630 break;
5631 case 18: /* VCEQ #0 */
5632 tmp2 = tcg_const_i32(0);
5633 switch(size) {
5634 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5635 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5636 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5637 default: return 1;
5639 tcg_temp_free(tmp2);
5640 break;
5641 case 22: /* VABS */
5642 switch(size) {
5643 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5644 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5645 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5646 default: return 1;
5648 break;
5649 case 23: /* VNEG */
5650 if (size == 3)
5651 return 1;
5652 tmp2 = tcg_const_i32(0);
5653 gen_neon_rsb(size, tmp, tmp2);
5654 tcg_temp_free(tmp2);
5655 break;
5656 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5657 tmp2 = tcg_const_i32(0);
5658 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5659 tcg_temp_free(tmp2);
5660 if (op == 27)
5661 tcg_gen_not_i32(tmp, tmp);
5662 break;
5663 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5664 tmp2 = tcg_const_i32(0);
5665 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5666 tcg_temp_free(tmp2);
5667 if (op == 28)
5668 tcg_gen_not_i32(tmp, tmp);
5669 break;
5670 case 26: /* Float VCEQ #0 */
5671 tmp2 = tcg_const_i32(0);
5672 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5673 tcg_temp_free(tmp2);
5674 break;
5675 case 30: /* Float VABS */
5676 gen_vfp_abs(0);
5677 break;
5678 case 31: /* Float VNEG */
5679 gen_vfp_neg(0);
5680 break;
5681 case 32: /* VSWP */
5682 tmp2 = neon_load_reg(rd, pass);
5683 neon_store_reg(rm, pass, tmp2);
5684 break;
5685 case 33: /* VTRN */
5686 tmp2 = neon_load_reg(rd, pass);
5687 switch (size) {
5688 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5689 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5690 case 2: abort();
5691 default: return 1;
5693 neon_store_reg(rm, pass, tmp2);
5694 break;
5695 case 56: /* Integer VRECPE */
5696 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5697 break;
5698 case 57: /* Integer VRSQRTE */
5699 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5700 break;
5701 case 58: /* Float VRECPE */
5702 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5703 break;
5704 case 59: /* Float VRSQRTE */
5705 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5706 break;
5707 case 60: /* VCVT.F32.S32 */
5708 gen_vfp_sito(0);
5709 break;
5710 case 61: /* VCVT.F32.U32 */
5711 gen_vfp_uito(0);
5712 break;
5713 case 62: /* VCVT.S32.F32 */
5714 gen_vfp_tosiz(0);
5715 break;
5716 case 63: /* VCVT.U32.F32 */
5717 gen_vfp_touiz(0);
5718 break;
5719 default:
5720 /* Reserved: 21, 29, 39-56 */
5721 return 1;
5723 if (op == 30 || op == 31 || op >= 58) {
5724 tcg_gen_st_f32(cpu_F0s, cpu_env,
5725 neon_reg_offset(rd, pass));
5726 } else {
5727 neon_store_reg(rd, pass, tmp);
5730 break;
5732 } else if ((insn & (1 << 10)) == 0) {
5733 /* VTBL, VTBX. */
5734 n = ((insn >> 5) & 0x18) + 8;
5735 if (insn & (1 << 6)) {
5736 tmp = neon_load_reg(rd, 0);
5737 } else {
5738 tmp = new_tmp();
5739 tcg_gen_movi_i32(tmp, 0);
5741 tmp2 = neon_load_reg(rm, 0);
5742 tmp4 = tcg_const_i32(rn);
5743 tmp5 = tcg_const_i32(n);
5744 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5745 dead_tmp(tmp);
5746 if (insn & (1 << 6)) {
5747 tmp = neon_load_reg(rd, 1);
5748 } else {
5749 tmp = new_tmp();
5750 tcg_gen_movi_i32(tmp, 0);
5752 tmp3 = neon_load_reg(rm, 1);
5753 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5754 tcg_temp_free_i32(tmp5);
5755 tcg_temp_free_i32(tmp4);
5756 neon_store_reg(rd, 0, tmp2);
5757 neon_store_reg(rd, 1, tmp3);
5758 dead_tmp(tmp);
5759 } else if ((insn & 0x380) == 0) {
5760 /* VDUP */
5761 if (insn & (1 << 19)) {
5762 tmp = neon_load_reg(rm, 1);
5763 } else {
5764 tmp = neon_load_reg(rm, 0);
5766 if (insn & (1 << 16)) {
5767 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5768 } else if (insn & (1 << 17)) {
5769 if ((insn >> 18) & 1)
5770 gen_neon_dup_high16(tmp);
5771 else
5772 gen_neon_dup_low16(tmp);
5774 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5775 tmp2 = new_tmp();
5776 tcg_gen_mov_i32(tmp2, tmp);
5777 neon_store_reg(rd, pass, tmp2);
5779 dead_tmp(tmp);
5780 } else {
5781 return 1;
5785 return 0;
5788 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5790 int crn = (insn >> 16) & 0xf;
5791 int crm = insn & 0xf;
5792 int op1 = (insn >> 21) & 7;
5793 int op2 = (insn >> 5) & 7;
5794 int rt = (insn >> 12) & 0xf;
5795 TCGv tmp;
5797 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5798 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5799 /* TEECR */
5800 if (IS_USER(s))
5801 return 1;
5802 tmp = load_cpu_field(teecr);
5803 store_reg(s, rt, tmp);
5804 return 0;
5806 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5807 /* TEEHBR */
5808 if (IS_USER(s) && (env->teecr & 1))
5809 return 1;
5810 tmp = load_cpu_field(teehbr);
5811 store_reg(s, rt, tmp);
5812 return 0;
5815 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5816 op1, crn, crm, op2);
5817 return 1;
5820 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5822 int crn = (insn >> 16) & 0xf;
5823 int crm = insn & 0xf;
5824 int op1 = (insn >> 21) & 7;
5825 int op2 = (insn >> 5) & 7;
5826 int rt = (insn >> 12) & 0xf;
5827 TCGv tmp;
5829 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5830 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5831 /* TEECR */
5832 if (IS_USER(s))
5833 return 1;
5834 tmp = load_reg(s, rt);
5835 gen_helper_set_teecr(cpu_env, tmp);
5836 dead_tmp(tmp);
5837 return 0;
5839 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5840 /* TEEHBR */
5841 if (IS_USER(s) && (env->teecr & 1))
5842 return 1;
5843 tmp = load_reg(s, rt);
5844 store_cpu_field(tmp, teehbr);
5845 return 0;
5848 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5849 op1, crn, crm, op2);
5850 return 1;
5853 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5855 int cpnum;
5857 cpnum = (insn >> 8) & 0xf;
5858 if (arm_feature(env, ARM_FEATURE_XSCALE)
5859 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5860 return 1;
5862 switch (cpnum) {
5863 case 0:
5864 case 1:
5865 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5866 return disas_iwmmxt_insn(env, s, insn);
5867 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5868 return disas_dsp_insn(env, s, insn);
5870 return 1;
5871 case 10:
5872 case 11:
5873 return disas_vfp_insn (env, s, insn);
5874 case 14:
5875 /* Coprocessors 7-15 are architecturally reserved by ARM.
5876 Unfortunately Intel decided to ignore this. */
5877 if (arm_feature(env, ARM_FEATURE_XSCALE))
5878 goto board;
5879 if (insn & (1 << 20))
5880 return disas_cp14_read(env, s, insn);
5881 else
5882 return disas_cp14_write(env, s, insn);
5883 case 15:
5884 return disas_cp15_insn (env, s, insn);
5885 default:
5886 board:
5887 /* Unknown coprocessor. See if the board has hooked it. */
5888 return disas_cp_insn (env, s, insn);
5893 /* Store a 64-bit value to a register pair. Clobbers val. */
5894 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5896 TCGv tmp;
5897 tmp = new_tmp();
5898 tcg_gen_trunc_i64_i32(tmp, val);
5899 store_reg(s, rlow, tmp);
5900 tmp = new_tmp();
5901 tcg_gen_shri_i64(val, val, 32);
5902 tcg_gen_trunc_i64_i32(tmp, val);
5903 store_reg(s, rhigh, tmp);
5906 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5907 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5909 TCGv_i64 tmp;
5910 TCGv tmp2;
5912 /* Load value and extend to 64 bits. */
5913 tmp = tcg_temp_new_i64();
5914 tmp2 = load_reg(s, rlow);
5915 tcg_gen_extu_i32_i64(tmp, tmp2);
5916 dead_tmp(tmp2);
5917 tcg_gen_add_i64(val, val, tmp);
5918 tcg_temp_free_i64(tmp);
5921 /* load and add a 64-bit value from a register pair. */
5922 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5924 TCGv_i64 tmp;
5925 TCGv tmpl;
5926 TCGv tmph;
5928 /* Load 64-bit value rd:rn. */
5929 tmpl = load_reg(s, rlow);
5930 tmph = load_reg(s, rhigh);
5931 tmp = tcg_temp_new_i64();
5932 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5933 dead_tmp(tmpl);
5934 dead_tmp(tmph);
5935 tcg_gen_add_i64(val, val, tmp);
5936 tcg_temp_free_i64(tmp);
5939 /* Set N and Z flags from a 64-bit value. */
5940 static void gen_logicq_cc(TCGv_i64 val)
5942 TCGv tmp = new_tmp();
5943 gen_helper_logicq_cc(tmp, val);
5944 gen_logic_CC(tmp);
5945 dead_tmp(tmp);
5948 /* Load/Store exclusive instructions are implemented by remembering
5949 the value/address loaded, and seeing if these are the same
5950 when the store is performed. This should be is sufficient to implement
5951 the architecturally mandated semantics, and avoids having to monitor
5952 regular stores.
5954 In system emulation mode only one CPU will be running at once, so
5955 this sequence is effectively atomic. In user emulation mode we
5956 throw an exception and handle the atomic operation elsewhere. */
5957 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5958 TCGv addr, int size)
5960 TCGv tmp;
5962 switch (size) {
5963 case 0:
5964 tmp = gen_ld8u(addr, IS_USER(s));
5965 break;
5966 case 1:
5967 tmp = gen_ld16u(addr, IS_USER(s));
5968 break;
5969 case 2:
5970 case 3:
5971 tmp = gen_ld32(addr, IS_USER(s));
5972 break;
5973 default:
5974 abort();
5976 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5977 store_reg(s, rt, tmp);
5978 if (size == 3) {
5979 TCGv tmp2 = new_tmp();
5980 tcg_gen_addi_i32(tmp2, addr, 4);
5981 tmp = gen_ld32(tmp2, IS_USER(s));
5982 dead_tmp(tmp2);
5983 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5984 store_reg(s, rt2, tmp);
5986 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5989 static void gen_clrex(DisasContext *s)
5991 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5994 #ifdef CONFIG_USER_ONLY
5995 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5996 TCGv addr, int size)
5998 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5999 tcg_gen_movi_i32(cpu_exclusive_info,
6000 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6001 gen_exception_insn(s, 4, EXCP_STREX);
6003 #else
6004 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6005 TCGv addr, int size)
6007 TCGv tmp;
6008 int done_label;
6009 int fail_label;
6011 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6012 [addr] = {Rt};
6013 {Rd} = 0;
6014 } else {
6015 {Rd} = 1;
6016 } */
6017 fail_label = gen_new_label();
6018 done_label = gen_new_label();
6019 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6020 switch (size) {
6021 case 0:
6022 tmp = gen_ld8u(addr, IS_USER(s));
6023 break;
6024 case 1:
6025 tmp = gen_ld16u(addr, IS_USER(s));
6026 break;
6027 case 2:
6028 case 3:
6029 tmp = gen_ld32(addr, IS_USER(s));
6030 break;
6031 default:
6032 abort();
6034 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6035 dead_tmp(tmp);
6036 if (size == 3) {
6037 TCGv tmp2 = new_tmp();
6038 tcg_gen_addi_i32(tmp2, addr, 4);
6039 tmp = gen_ld32(tmp2, IS_USER(s));
6040 dead_tmp(tmp2);
6041 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6042 dead_tmp(tmp);
6044 tmp = load_reg(s, rt);
6045 switch (size) {
6046 case 0:
6047 gen_st8(tmp, addr, IS_USER(s));
6048 break;
6049 case 1:
6050 gen_st16(tmp, addr, IS_USER(s));
6051 break;
6052 case 2:
6053 case 3:
6054 gen_st32(tmp, addr, IS_USER(s));
6055 break;
6056 default:
6057 abort();
6059 if (size == 3) {
6060 tcg_gen_addi_i32(addr, addr, 4);
6061 tmp = load_reg(s, rt2);
6062 gen_st32(tmp, addr, IS_USER(s));
6064 tcg_gen_movi_i32(cpu_R[rd], 0);
6065 tcg_gen_br(done_label);
6066 gen_set_label(fail_label);
6067 tcg_gen_movi_i32(cpu_R[rd], 1);
6068 gen_set_label(done_label);
6069 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6071 #endif
6073 static void disas_arm_insn(CPUState * env, DisasContext *s)
6075 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6076 TCGv tmp;
6077 TCGv tmp2;
6078 TCGv tmp3;
6079 TCGv addr;
6080 TCGv_i64 tmp64;
6082 insn = ldl_code(s->pc);
6083 s->pc += 4;
6085 /* M variants do not implement ARM mode. */
6086 if (IS_M(env))
6087 goto illegal_op;
6088 cond = insn >> 28;
6089 if (cond == 0xf){
6090 /* Unconditional instructions. */
6091 if (((insn >> 25) & 7) == 1) {
6092 /* NEON Data processing. */
6093 if (!arm_feature(env, ARM_FEATURE_NEON))
6094 goto illegal_op;
6096 if (disas_neon_data_insn(env, s, insn))
6097 goto illegal_op;
6098 return;
6100 if ((insn & 0x0f100000) == 0x04000000) {
6101 /* NEON load/store. */
6102 if (!arm_feature(env, ARM_FEATURE_NEON))
6103 goto illegal_op;
6105 if (disas_neon_ls_insn(env, s, insn))
6106 goto illegal_op;
6107 return;
6109 if (((insn & 0x0f30f000) == 0x0510f000) ||
6110 ((insn & 0x0f30f010) == 0x0710f000)) {
6111 if ((insn & (1 << 22)) == 0) {
6112 /* PLDW; v7MP */
6113 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6114 goto illegal_op;
6117 /* Otherwise PLD; v5TE+ */
6118 return;
6120 if (((insn & 0x0f70f000) == 0x0450f000) ||
6121 ((insn & 0x0f70f010) == 0x0650f000)) {
6122 ARCH(7);
6123 return; /* PLI; V7 */
6125 if (((insn & 0x0f700000) == 0x04100000) ||
6126 ((insn & 0x0f700010) == 0x06100000)) {
6127 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6128 goto illegal_op;
6130 return; /* v7MP: Unallocated memory hint: must NOP */
6133 if ((insn & 0x0ffffdff) == 0x01010000) {
6134 ARCH(6);
6135 /* setend */
6136 if (insn & (1 << 9)) {
6137 /* BE8 mode not implemented. */
6138 goto illegal_op;
6140 return;
6141 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6142 switch ((insn >> 4) & 0xf) {
6143 case 1: /* clrex */
6144 ARCH(6K);
6145 gen_clrex(s);
6146 return;
6147 case 4: /* dsb */
6148 case 5: /* dmb */
6149 case 6: /* isb */
6150 ARCH(7);
6151 /* We don't emulate caches so these are a no-op. */
6152 return;
6153 default:
6154 goto illegal_op;
6156 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6157 /* srs */
6158 int32_t offset;
6159 if (IS_USER(s))
6160 goto illegal_op;
6161 ARCH(6);
6162 op1 = (insn & 0x1f);
6163 addr = new_tmp();
6164 tmp = tcg_const_i32(op1);
6165 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6166 tcg_temp_free_i32(tmp);
6167 i = (insn >> 23) & 3;
6168 switch (i) {
6169 case 0: offset = -4; break; /* DA */
6170 case 1: offset = 0; break; /* IA */
6171 case 2: offset = -8; break; /* DB */
6172 case 3: offset = 4; break; /* IB */
6173 default: abort();
6175 if (offset)
6176 tcg_gen_addi_i32(addr, addr, offset);
6177 tmp = load_reg(s, 14);
6178 gen_st32(tmp, addr, 0);
6179 tmp = load_cpu_field(spsr);
6180 tcg_gen_addi_i32(addr, addr, 4);
6181 gen_st32(tmp, addr, 0);
6182 if (insn & (1 << 21)) {
6183 /* Base writeback. */
6184 switch (i) {
6185 case 0: offset = -8; break;
6186 case 1: offset = 4; break;
6187 case 2: offset = -4; break;
6188 case 3: offset = 0; break;
6189 default: abort();
6191 if (offset)
6192 tcg_gen_addi_i32(addr, addr, offset);
6193 tmp = tcg_const_i32(op1);
6194 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6195 tcg_temp_free_i32(tmp);
6196 dead_tmp(addr);
6197 } else {
6198 dead_tmp(addr);
6200 return;
6201 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6202 /* rfe */
6203 int32_t offset;
6204 if (IS_USER(s))
6205 goto illegal_op;
6206 ARCH(6);
6207 rn = (insn >> 16) & 0xf;
6208 addr = load_reg(s, rn);
6209 i = (insn >> 23) & 3;
6210 switch (i) {
6211 case 0: offset = -4; break; /* DA */
6212 case 1: offset = 0; break; /* IA */
6213 case 2: offset = -8; break; /* DB */
6214 case 3: offset = 4; break; /* IB */
6215 default: abort();
6217 if (offset)
6218 tcg_gen_addi_i32(addr, addr, offset);
6219 /* Load PC into tmp and CPSR into tmp2. */
6220 tmp = gen_ld32(addr, 0);
6221 tcg_gen_addi_i32(addr, addr, 4);
6222 tmp2 = gen_ld32(addr, 0);
6223 if (insn & (1 << 21)) {
6224 /* Base writeback. */
6225 switch (i) {
6226 case 0: offset = -8; break;
6227 case 1: offset = 4; break;
6228 case 2: offset = -4; break;
6229 case 3: offset = 0; break;
6230 default: abort();
6232 if (offset)
6233 tcg_gen_addi_i32(addr, addr, offset);
6234 store_reg(s, rn, addr);
6235 } else {
6236 dead_tmp(addr);
6238 gen_rfe(s, tmp, tmp2);
6239 return;
6240 } else if ((insn & 0x0e000000) == 0x0a000000) {
6241 /* branch link and change to thumb (blx <offset>) */
6242 int32_t offset;
6244 val = (uint32_t)s->pc;
6245 tmp = new_tmp();
6246 tcg_gen_movi_i32(tmp, val);
6247 store_reg(s, 14, tmp);
6248 /* Sign-extend the 24-bit offset */
6249 offset = (((int32_t)insn) << 8) >> 8;
6250 /* offset * 4 + bit24 * 2 + (thumb bit) */
6251 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6252 /* pipeline offset */
6253 val += 4;
6254 gen_bx_im(s, val);
6255 return;
6256 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6257 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6258 /* iWMMXt register transfer. */
6259 if (env->cp15.c15_cpar & (1 << 1))
6260 if (!disas_iwmmxt_insn(env, s, insn))
6261 return;
6263 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6264 /* Coprocessor double register transfer. */
6265 } else if ((insn & 0x0f000010) == 0x0e000010) {
6266 /* Additional coprocessor register transfer. */
6267 } else if ((insn & 0x0ff10020) == 0x01000000) {
6268 uint32_t mask;
6269 uint32_t val;
6270 /* cps (privileged) */
6271 if (IS_USER(s))
6272 return;
6273 mask = val = 0;
6274 if (insn & (1 << 19)) {
6275 if (insn & (1 << 8))
6276 mask |= CPSR_A;
6277 if (insn & (1 << 7))
6278 mask |= CPSR_I;
6279 if (insn & (1 << 6))
6280 mask |= CPSR_F;
6281 if (insn & (1 << 18))
6282 val |= mask;
6284 if (insn & (1 << 17)) {
6285 mask |= CPSR_M;
6286 val |= (insn & 0x1f);
6288 if (mask) {
6289 gen_set_psr_im(s, mask, 0, val);
6291 return;
6293 goto illegal_op;
6295 if (cond != 0xe) {
6296 /* if not always execute, we generate a conditional jump to
6297 next instruction */
6298 s->condlabel = gen_new_label();
6299 gen_test_cc(cond ^ 1, s->condlabel);
6300 s->condjmp = 1;
6302 if ((insn & 0x0f900000) == 0x03000000) {
6303 if ((insn & (1 << 21)) == 0) {
6304 ARCH(6T2);
6305 rd = (insn >> 12) & 0xf;
6306 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6307 if ((insn & (1 << 22)) == 0) {
6308 /* MOVW */
6309 tmp = new_tmp();
6310 tcg_gen_movi_i32(tmp, val);
6311 } else {
6312 /* MOVT */
6313 tmp = load_reg(s, rd);
6314 tcg_gen_ext16u_i32(tmp, tmp);
6315 tcg_gen_ori_i32(tmp, tmp, val << 16);
6317 store_reg(s, rd, tmp);
6318 } else {
6319 if (((insn >> 12) & 0xf) != 0xf)
6320 goto illegal_op;
6321 if (((insn >> 16) & 0xf) == 0) {
6322 gen_nop_hint(s, insn & 0xff);
6323 } else {
6324 /* CPSR = immediate */
6325 val = insn & 0xff;
6326 shift = ((insn >> 8) & 0xf) * 2;
6327 if (shift)
6328 val = (val >> shift) | (val << (32 - shift));
6329 i = ((insn & (1 << 22)) != 0);
6330 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6331 goto illegal_op;
6334 } else if ((insn & 0x0f900000) == 0x01000000
6335 && (insn & 0x00000090) != 0x00000090) {
6336 /* miscellaneous instructions */
6337 op1 = (insn >> 21) & 3;
6338 sh = (insn >> 4) & 0xf;
6339 rm = insn & 0xf;
6340 switch (sh) {
6341 case 0x0: /* move program status register */
6342 if (op1 & 1) {
6343 /* PSR = reg */
6344 tmp = load_reg(s, rm);
6345 i = ((op1 & 2) != 0);
6346 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6347 goto illegal_op;
6348 } else {
6349 /* reg = PSR */
6350 rd = (insn >> 12) & 0xf;
6351 if (op1 & 2) {
6352 if (IS_USER(s))
6353 goto illegal_op;
6354 tmp = load_cpu_field(spsr);
6355 } else {
6356 tmp = new_tmp();
6357 gen_helper_cpsr_read(tmp);
6359 store_reg(s, rd, tmp);
6361 break;
6362 case 0x1:
6363 if (op1 == 1) {
6364 /* branch/exchange thumb (bx). */
6365 tmp = load_reg(s, rm);
6366 gen_bx(s, tmp);
6367 } else if (op1 == 3) {
6368 /* clz */
6369 rd = (insn >> 12) & 0xf;
6370 tmp = load_reg(s, rm);
6371 gen_helper_clz(tmp, tmp);
6372 store_reg(s, rd, tmp);
6373 } else {
6374 goto illegal_op;
6376 break;
6377 case 0x2:
6378 if (op1 == 1) {
6379 ARCH(5J); /* bxj */
6380 /* Trivial implementation equivalent to bx. */
6381 tmp = load_reg(s, rm);
6382 gen_bx(s, tmp);
6383 } else {
6384 goto illegal_op;
6386 break;
6387 case 0x3:
6388 if (op1 != 1)
6389 goto illegal_op;
6391 /* branch link/exchange thumb (blx) */
6392 tmp = load_reg(s, rm);
6393 tmp2 = new_tmp();
6394 tcg_gen_movi_i32(tmp2, s->pc);
6395 store_reg(s, 14, tmp2);
6396 gen_bx(s, tmp);
6397 break;
6398 case 0x5: /* saturating add/subtract */
6399 rd = (insn >> 12) & 0xf;
6400 rn = (insn >> 16) & 0xf;
6401 tmp = load_reg(s, rm);
6402 tmp2 = load_reg(s, rn);
6403 if (op1 & 2)
6404 gen_helper_double_saturate(tmp2, tmp2);
6405 if (op1 & 1)
6406 gen_helper_sub_saturate(tmp, tmp, tmp2);
6407 else
6408 gen_helper_add_saturate(tmp, tmp, tmp2);
6409 dead_tmp(tmp2);
6410 store_reg(s, rd, tmp);
6411 break;
6412 case 7:
6413 /* SMC instruction (op1 == 3)
6414 and undefined instructions (op1 == 0 || op1 == 2)
6415 will trap */
6416 if (op1 != 1) {
6417 goto illegal_op;
6419 /* bkpt */
6420 gen_exception_insn(s, 4, EXCP_BKPT);
6421 break;
6422 case 0x8: /* signed multiply */
6423 case 0xa:
6424 case 0xc:
6425 case 0xe:
6426 rs = (insn >> 8) & 0xf;
6427 rn = (insn >> 12) & 0xf;
6428 rd = (insn >> 16) & 0xf;
6429 if (op1 == 1) {
6430 /* (32 * 16) >> 16 */
6431 tmp = load_reg(s, rm);
6432 tmp2 = load_reg(s, rs);
6433 if (sh & 4)
6434 tcg_gen_sari_i32(tmp2, tmp2, 16);
6435 else
6436 gen_sxth(tmp2);
6437 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6438 tcg_gen_shri_i64(tmp64, tmp64, 16);
6439 tmp = new_tmp();
6440 tcg_gen_trunc_i64_i32(tmp, tmp64);
6441 tcg_temp_free_i64(tmp64);
6442 if ((sh & 2) == 0) {
6443 tmp2 = load_reg(s, rn);
6444 gen_helper_add_setq(tmp, tmp, tmp2);
6445 dead_tmp(tmp2);
6447 store_reg(s, rd, tmp);
6448 } else {
6449 /* 16 * 16 */
6450 tmp = load_reg(s, rm);
6451 tmp2 = load_reg(s, rs);
6452 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6453 dead_tmp(tmp2);
6454 if (op1 == 2) {
6455 tmp64 = tcg_temp_new_i64();
6456 tcg_gen_ext_i32_i64(tmp64, tmp);
6457 dead_tmp(tmp);
6458 gen_addq(s, tmp64, rn, rd);
6459 gen_storeq_reg(s, rn, rd, tmp64);
6460 tcg_temp_free_i64(tmp64);
6461 } else {
6462 if (op1 == 0) {
6463 tmp2 = load_reg(s, rn);
6464 gen_helper_add_setq(tmp, tmp, tmp2);
6465 dead_tmp(tmp2);
6467 store_reg(s, rd, tmp);
6470 break;
6471 default:
6472 goto illegal_op;
6474 } else if (((insn & 0x0e000000) == 0 &&
6475 (insn & 0x00000090) != 0x90) ||
6476 ((insn & 0x0e000000) == (1 << 25))) {
6477 int set_cc, logic_cc, shiftop;
6479 op1 = (insn >> 21) & 0xf;
6480 set_cc = (insn >> 20) & 1;
6481 logic_cc = table_logic_cc[op1] & set_cc;
6483 /* data processing instruction */
6484 if (insn & (1 << 25)) {
6485 /* immediate operand */
6486 val = insn & 0xff;
6487 shift = ((insn >> 8) & 0xf) * 2;
6488 if (shift) {
6489 val = (val >> shift) | (val << (32 - shift));
6491 tmp2 = new_tmp();
6492 tcg_gen_movi_i32(tmp2, val);
6493 if (logic_cc && shift) {
6494 gen_set_CF_bit31(tmp2);
6496 } else {
6497 /* register */
6498 rm = (insn) & 0xf;
6499 tmp2 = load_reg(s, rm);
6500 shiftop = (insn >> 5) & 3;
6501 if (!(insn & (1 << 4))) {
6502 shift = (insn >> 7) & 0x1f;
6503 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6504 } else {
6505 rs = (insn >> 8) & 0xf;
6506 tmp = load_reg(s, rs);
6507 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6510 if (op1 != 0x0f && op1 != 0x0d) {
6511 rn = (insn >> 16) & 0xf;
6512 tmp = load_reg(s, rn);
6513 } else {
6514 TCGV_UNUSED(tmp);
6516 rd = (insn >> 12) & 0xf;
6517 switch(op1) {
6518 case 0x00:
6519 tcg_gen_and_i32(tmp, tmp, tmp2);
6520 if (logic_cc) {
6521 gen_logic_CC(tmp);
6523 store_reg_bx(env, s, rd, tmp);
6524 break;
6525 case 0x01:
6526 tcg_gen_xor_i32(tmp, tmp, tmp2);
6527 if (logic_cc) {
6528 gen_logic_CC(tmp);
6530 store_reg_bx(env, s, rd, tmp);
6531 break;
6532 case 0x02:
6533 if (set_cc && rd == 15) {
6534 /* SUBS r15, ... is used for exception return. */
6535 if (IS_USER(s)) {
6536 goto illegal_op;
6538 gen_helper_sub_cc(tmp, tmp, tmp2);
6539 gen_exception_return(s, tmp);
6540 } else {
6541 if (set_cc) {
6542 gen_helper_sub_cc(tmp, tmp, tmp2);
6543 } else {
6544 tcg_gen_sub_i32(tmp, tmp, tmp2);
6546 store_reg_bx(env, s, rd, tmp);
6548 break;
6549 case 0x03:
6550 if (set_cc) {
6551 gen_helper_sub_cc(tmp, tmp2, tmp);
6552 } else {
6553 tcg_gen_sub_i32(tmp, tmp2, tmp);
6555 store_reg_bx(env, s, rd, tmp);
6556 break;
6557 case 0x04:
6558 if (set_cc) {
6559 gen_helper_add_cc(tmp, tmp, tmp2);
6560 } else {
6561 tcg_gen_add_i32(tmp, tmp, tmp2);
6563 store_reg_bx(env, s, rd, tmp);
6564 break;
6565 case 0x05:
6566 if (set_cc) {
6567 gen_helper_adc_cc(tmp, tmp, tmp2);
6568 } else {
6569 gen_add_carry(tmp, tmp, tmp2);
6571 store_reg_bx(env, s, rd, tmp);
6572 break;
6573 case 0x06:
6574 if (set_cc) {
6575 gen_helper_sbc_cc(tmp, tmp, tmp2);
6576 } else {
6577 gen_sub_carry(tmp, tmp, tmp2);
6579 store_reg_bx(env, s, rd, tmp);
6580 break;
6581 case 0x07:
6582 if (set_cc) {
6583 gen_helper_sbc_cc(tmp, tmp2, tmp);
6584 } else {
6585 gen_sub_carry(tmp, tmp2, tmp);
6587 store_reg_bx(env, s, rd, tmp);
6588 break;
6589 case 0x08:
6590 if (set_cc) {
6591 tcg_gen_and_i32(tmp, tmp, tmp2);
6592 gen_logic_CC(tmp);
6594 dead_tmp(tmp);
6595 break;
6596 case 0x09:
6597 if (set_cc) {
6598 tcg_gen_xor_i32(tmp, tmp, tmp2);
6599 gen_logic_CC(tmp);
6601 dead_tmp(tmp);
6602 break;
6603 case 0x0a:
6604 if (set_cc) {
6605 gen_helper_sub_cc(tmp, tmp, tmp2);
6607 dead_tmp(tmp);
6608 break;
6609 case 0x0b:
6610 if (set_cc) {
6611 gen_helper_add_cc(tmp, tmp, tmp2);
6613 dead_tmp(tmp);
6614 break;
6615 case 0x0c:
6616 tcg_gen_or_i32(tmp, tmp, tmp2);
6617 if (logic_cc) {
6618 gen_logic_CC(tmp);
6620 store_reg_bx(env, s, rd, tmp);
6621 break;
6622 case 0x0d:
6623 if (logic_cc && rd == 15) {
6624 /* MOVS r15, ... is used for exception return. */
6625 if (IS_USER(s)) {
6626 goto illegal_op;
6628 gen_exception_return(s, tmp2);
6629 } else {
6630 if (logic_cc) {
6631 gen_logic_CC(tmp2);
6633 store_reg_bx(env, s, rd, tmp2);
6635 break;
6636 case 0x0e:
6637 tcg_gen_andc_i32(tmp, tmp, tmp2);
6638 if (logic_cc) {
6639 gen_logic_CC(tmp);
6641 store_reg_bx(env, s, rd, tmp);
6642 break;
6643 default:
6644 case 0x0f:
6645 tcg_gen_not_i32(tmp2, tmp2);
6646 if (logic_cc) {
6647 gen_logic_CC(tmp2);
6649 store_reg_bx(env, s, rd, tmp2);
6650 break;
6652 if (op1 != 0x0f && op1 != 0x0d) {
6653 dead_tmp(tmp2);
6655 } else {
6656 /* other instructions */
6657 op1 = (insn >> 24) & 0xf;
6658 switch(op1) {
6659 case 0x0:
6660 case 0x1:
6661 /* multiplies, extra load/stores */
6662 sh = (insn >> 5) & 3;
6663 if (sh == 0) {
6664 if (op1 == 0x0) {
6665 rd = (insn >> 16) & 0xf;
6666 rn = (insn >> 12) & 0xf;
6667 rs = (insn >> 8) & 0xf;
6668 rm = (insn) & 0xf;
6669 op1 = (insn >> 20) & 0xf;
6670 switch (op1) {
6671 case 0: case 1: case 2: case 3: case 6:
6672 /* 32 bit mul */
6673 tmp = load_reg(s, rs);
6674 tmp2 = load_reg(s, rm);
6675 tcg_gen_mul_i32(tmp, tmp, tmp2);
6676 dead_tmp(tmp2);
6677 if (insn & (1 << 22)) {
6678 /* Subtract (mls) */
6679 ARCH(6T2);
6680 tmp2 = load_reg(s, rn);
6681 tcg_gen_sub_i32(tmp, tmp2, tmp);
6682 dead_tmp(tmp2);
6683 } else if (insn & (1 << 21)) {
6684 /* Add */
6685 tmp2 = load_reg(s, rn);
6686 tcg_gen_add_i32(tmp, tmp, tmp2);
6687 dead_tmp(tmp2);
6689 if (insn & (1 << 20))
6690 gen_logic_CC(tmp);
6691 store_reg(s, rd, tmp);
6692 break;
6693 case 4:
6694 /* 64 bit mul double accumulate (UMAAL) */
6695 ARCH(6);
6696 tmp = load_reg(s, rs);
6697 tmp2 = load_reg(s, rm);
6698 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6699 gen_addq_lo(s, tmp64, rn);
6700 gen_addq_lo(s, tmp64, rd);
6701 gen_storeq_reg(s, rn, rd, tmp64);
6702 tcg_temp_free_i64(tmp64);
6703 break;
6704 case 8: case 9: case 10: case 11:
6705 case 12: case 13: case 14: case 15:
6706 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6707 tmp = load_reg(s, rs);
6708 tmp2 = load_reg(s, rm);
6709 if (insn & (1 << 22)) {
6710 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6711 } else {
6712 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6714 if (insn & (1 << 21)) { /* mult accumulate */
6715 gen_addq(s, tmp64, rn, rd);
6717 if (insn & (1 << 20)) {
6718 gen_logicq_cc(tmp64);
6720 gen_storeq_reg(s, rn, rd, tmp64);
6721 tcg_temp_free_i64(tmp64);
6722 break;
6723 default:
6724 goto illegal_op;
6726 } else {
6727 rn = (insn >> 16) & 0xf;
6728 rd = (insn >> 12) & 0xf;
6729 if (insn & (1 << 23)) {
6730 /* load/store exclusive */
6731 op1 = (insn >> 21) & 0x3;
6732 if (op1)
6733 ARCH(6K);
6734 else
6735 ARCH(6);
6736 addr = tcg_temp_local_new_i32();
6737 load_reg_var(s, addr, rn);
6738 if (insn & (1 << 20)) {
6739 switch (op1) {
6740 case 0: /* ldrex */
6741 gen_load_exclusive(s, rd, 15, addr, 2);
6742 break;
6743 case 1: /* ldrexd */
6744 gen_load_exclusive(s, rd, rd + 1, addr, 3);
6745 break;
6746 case 2: /* ldrexb */
6747 gen_load_exclusive(s, rd, 15, addr, 0);
6748 break;
6749 case 3: /* ldrexh */
6750 gen_load_exclusive(s, rd, 15, addr, 1);
6751 break;
6752 default:
6753 abort();
6755 } else {
6756 rm = insn & 0xf;
6757 switch (op1) {
6758 case 0: /* strex */
6759 gen_store_exclusive(s, rd, rm, 15, addr, 2);
6760 break;
6761 case 1: /* strexd */
6762 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6763 break;
6764 case 2: /* strexb */
6765 gen_store_exclusive(s, rd, rm, 15, addr, 0);
6766 break;
6767 case 3: /* strexh */
6768 gen_store_exclusive(s, rd, rm, 15, addr, 1);
6769 break;
6770 default:
6771 abort();
6774 tcg_temp_free(addr);
6775 } else {
6776 /* SWP instruction */
6777 rm = (insn) & 0xf;
6779 /* ??? This is not really atomic. However we know
6780 we never have multiple CPUs running in parallel,
6781 so it is good enough. */
6782 addr = load_reg(s, rn);
6783 tmp = load_reg(s, rm);
6784 if (insn & (1 << 22)) {
6785 tmp2 = gen_ld8u(addr, IS_USER(s));
6786 gen_st8(tmp, addr, IS_USER(s));
6787 } else {
6788 tmp2 = gen_ld32(addr, IS_USER(s));
6789 gen_st32(tmp, addr, IS_USER(s));
6791 dead_tmp(addr);
6792 store_reg(s, rd, tmp2);
6795 } else {
6796 int address_offset;
6797 int load;
6798 /* Misc load/store */
6799 rn = (insn >> 16) & 0xf;
6800 rd = (insn >> 12) & 0xf;
6801 addr = load_reg(s, rn);
6802 if (insn & (1 << 24))
6803 gen_add_datah_offset(s, insn, 0, addr);
6804 address_offset = 0;
6805 if (insn & (1 << 20)) {
6806 /* load */
6807 switch(sh) {
6808 case 1:
6809 tmp = gen_ld16u(addr, IS_USER(s));
6810 break;
6811 case 2:
6812 tmp = gen_ld8s(addr, IS_USER(s));
6813 break;
6814 default:
6815 case 3:
6816 tmp = gen_ld16s(addr, IS_USER(s));
6817 break;
6819 load = 1;
6820 } else if (sh & 2) {
6821 /* doubleword */
6822 if (sh & 1) {
6823 /* store */
6824 tmp = load_reg(s, rd);
6825 gen_st32(tmp, addr, IS_USER(s));
6826 tcg_gen_addi_i32(addr, addr, 4);
6827 tmp = load_reg(s, rd + 1);
6828 gen_st32(tmp, addr, IS_USER(s));
6829 load = 0;
6830 } else {
6831 /* load */
6832 tmp = gen_ld32(addr, IS_USER(s));
6833 store_reg(s, rd, tmp);
6834 tcg_gen_addi_i32(addr, addr, 4);
6835 tmp = gen_ld32(addr, IS_USER(s));
6836 rd++;
6837 load = 1;
6839 address_offset = -4;
6840 } else {
6841 /* store */
6842 tmp = load_reg(s, rd);
6843 gen_st16(tmp, addr, IS_USER(s));
6844 load = 0;
6846 /* Perform base writeback before the loaded value to
6847 ensure correct behavior with overlapping index registers.
6848 ldrd with base writeback is is undefined if the
6849 destination and index registers overlap. */
6850 if (!(insn & (1 << 24))) {
6851 gen_add_datah_offset(s, insn, address_offset, addr);
6852 store_reg(s, rn, addr);
6853 } else if (insn & (1 << 21)) {
6854 if (address_offset)
6855 tcg_gen_addi_i32(addr, addr, address_offset);
6856 store_reg(s, rn, addr);
6857 } else {
6858 dead_tmp(addr);
6860 if (load) {
6861 /* Complete the load. */
6862 store_reg(s, rd, tmp);
6865 break;
6866 case 0x4:
6867 case 0x5:
6868 goto do_ldst;
6869 case 0x6:
6870 case 0x7:
6871 if (insn & (1 << 4)) {
6872 ARCH(6);
6873 /* Armv6 Media instructions. */
6874 rm = insn & 0xf;
6875 rn = (insn >> 16) & 0xf;
6876 rd = (insn >> 12) & 0xf;
6877 rs = (insn >> 8) & 0xf;
6878 switch ((insn >> 23) & 3) {
6879 case 0: /* Parallel add/subtract. */
6880 op1 = (insn >> 20) & 7;
6881 tmp = load_reg(s, rn);
6882 tmp2 = load_reg(s, rm);
6883 sh = (insn >> 5) & 7;
6884 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6885 goto illegal_op;
6886 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6887 dead_tmp(tmp2);
6888 store_reg(s, rd, tmp);
6889 break;
6890 case 1:
6891 if ((insn & 0x00700020) == 0) {
6892 /* Halfword pack. */
6893 tmp = load_reg(s, rn);
6894 tmp2 = load_reg(s, rm);
6895 shift = (insn >> 7) & 0x1f;
6896 if (insn & (1 << 6)) {
6897 /* pkhtb */
6898 if (shift == 0)
6899 shift = 31;
6900 tcg_gen_sari_i32(tmp2, tmp2, shift);
6901 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6902 tcg_gen_ext16u_i32(tmp2, tmp2);
6903 } else {
6904 /* pkhbt */
6905 if (shift)
6906 tcg_gen_shli_i32(tmp2, tmp2, shift);
6907 tcg_gen_ext16u_i32(tmp, tmp);
6908 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6910 tcg_gen_or_i32(tmp, tmp, tmp2);
6911 dead_tmp(tmp2);
6912 store_reg(s, rd, tmp);
6913 } else if ((insn & 0x00200020) == 0x00200000) {
6914 /* [us]sat */
6915 tmp = load_reg(s, rm);
6916 shift = (insn >> 7) & 0x1f;
6917 if (insn & (1 << 6)) {
6918 if (shift == 0)
6919 shift = 31;
6920 tcg_gen_sari_i32(tmp, tmp, shift);
6921 } else {
6922 tcg_gen_shli_i32(tmp, tmp, shift);
6924 sh = (insn >> 16) & 0x1f;
6925 tmp2 = tcg_const_i32(sh);
6926 if (insn & (1 << 22))
6927 gen_helper_usat(tmp, tmp, tmp2);
6928 else
6929 gen_helper_ssat(tmp, tmp, tmp2);
6930 tcg_temp_free_i32(tmp2);
6931 store_reg(s, rd, tmp);
6932 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6933 /* [us]sat16 */
6934 tmp = load_reg(s, rm);
6935 sh = (insn >> 16) & 0x1f;
6936 tmp2 = tcg_const_i32(sh);
6937 if (insn & (1 << 22))
6938 gen_helper_usat16(tmp, tmp, tmp2);
6939 else
6940 gen_helper_ssat16(tmp, tmp, tmp2);
6941 tcg_temp_free_i32(tmp2);
6942 store_reg(s, rd, tmp);
6943 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6944 /* Select bytes. */
6945 tmp = load_reg(s, rn);
6946 tmp2 = load_reg(s, rm);
6947 tmp3 = new_tmp();
6948 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6949 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6950 dead_tmp(tmp3);
6951 dead_tmp(tmp2);
6952 store_reg(s, rd, tmp);
6953 } else if ((insn & 0x000003e0) == 0x00000060) {
6954 tmp = load_reg(s, rm);
6955 shift = (insn >> 10) & 3;
6956 /* ??? In many cases it's not neccessary to do a
6957 rotate, a shift is sufficient. */
6958 if (shift != 0)
6959 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
6960 op1 = (insn >> 20) & 7;
6961 switch (op1) {
6962 case 0: gen_sxtb16(tmp); break;
6963 case 2: gen_sxtb(tmp); break;
6964 case 3: gen_sxth(tmp); break;
6965 case 4: gen_uxtb16(tmp); break;
6966 case 6: gen_uxtb(tmp); break;
6967 case 7: gen_uxth(tmp); break;
6968 default: goto illegal_op;
6970 if (rn != 15) {
6971 tmp2 = load_reg(s, rn);
6972 if ((op1 & 3) == 0) {
6973 gen_add16(tmp, tmp2);
6974 } else {
6975 tcg_gen_add_i32(tmp, tmp, tmp2);
6976 dead_tmp(tmp2);
6979 store_reg(s, rd, tmp);
6980 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6981 /* rev */
6982 tmp = load_reg(s, rm);
6983 if (insn & (1 << 22)) {
6984 if (insn & (1 << 7)) {
6985 gen_revsh(tmp);
6986 } else {
6987 ARCH(6T2);
6988 gen_helper_rbit(tmp, tmp);
6990 } else {
6991 if (insn & (1 << 7))
6992 gen_rev16(tmp);
6993 else
6994 tcg_gen_bswap32_i32(tmp, tmp);
6996 store_reg(s, rd, tmp);
6997 } else {
6998 goto illegal_op;
7000 break;
7001 case 2: /* Multiplies (Type 3). */
7002 tmp = load_reg(s, rm);
7003 tmp2 = load_reg(s, rs);
7004 if (insn & (1 << 20)) {
7005 /* Signed multiply most significant [accumulate].
7006 (SMMUL, SMMLA, SMMLS) */
7007 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7009 if (rd != 15) {
7010 tmp = load_reg(s, rd);
7011 if (insn & (1 << 6)) {
7012 tmp64 = gen_subq_msw(tmp64, tmp);
7013 } else {
7014 tmp64 = gen_addq_msw(tmp64, tmp);
7017 if (insn & (1 << 5)) {
7018 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7020 tcg_gen_shri_i64(tmp64, tmp64, 32);
7021 tmp = new_tmp();
7022 tcg_gen_trunc_i64_i32(tmp, tmp64);
7023 tcg_temp_free_i64(tmp64);
7024 store_reg(s, rn, tmp);
7025 } else {
7026 if (insn & (1 << 5))
7027 gen_swap_half(tmp2);
7028 gen_smul_dual(tmp, tmp2);
7029 /* This addition cannot overflow. */
7030 if (insn & (1 << 6)) {
7031 tcg_gen_sub_i32(tmp, tmp, tmp2);
7032 } else {
7033 tcg_gen_add_i32(tmp, tmp, tmp2);
7035 dead_tmp(tmp2);
7036 if (insn & (1 << 22)) {
7037 /* smlald, smlsld */
7038 tmp64 = tcg_temp_new_i64();
7039 tcg_gen_ext_i32_i64(tmp64, tmp);
7040 dead_tmp(tmp);
7041 gen_addq(s, tmp64, rd, rn);
7042 gen_storeq_reg(s, rd, rn, tmp64);
7043 tcg_temp_free_i64(tmp64);
7044 } else {
7045 /* smuad, smusd, smlad, smlsd */
7046 if (rd != 15)
7048 tmp2 = load_reg(s, rd);
7049 gen_helper_add_setq(tmp, tmp, tmp2);
7050 dead_tmp(tmp2);
7052 store_reg(s, rn, tmp);
7055 break;
7056 case 3:
7057 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7058 switch (op1) {
7059 case 0: /* Unsigned sum of absolute differences. */
7060 ARCH(6);
7061 tmp = load_reg(s, rm);
7062 tmp2 = load_reg(s, rs);
7063 gen_helper_usad8(tmp, tmp, tmp2);
7064 dead_tmp(tmp2);
7065 if (rd != 15) {
7066 tmp2 = load_reg(s, rd);
7067 tcg_gen_add_i32(tmp, tmp, tmp2);
7068 dead_tmp(tmp2);
7070 store_reg(s, rn, tmp);
7071 break;
7072 case 0x20: case 0x24: case 0x28: case 0x2c:
7073 /* Bitfield insert/clear. */
7074 ARCH(6T2);
7075 shift = (insn >> 7) & 0x1f;
7076 i = (insn >> 16) & 0x1f;
7077 i = i + 1 - shift;
7078 if (rm == 15) {
7079 tmp = new_tmp();
7080 tcg_gen_movi_i32(tmp, 0);
7081 } else {
7082 tmp = load_reg(s, rm);
7084 if (i != 32) {
7085 tmp2 = load_reg(s, rd);
7086 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7087 dead_tmp(tmp2);
7089 store_reg(s, rd, tmp);
7090 break;
7091 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7092 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7093 ARCH(6T2);
7094 tmp = load_reg(s, rm);
7095 shift = (insn >> 7) & 0x1f;
7096 i = ((insn >> 16) & 0x1f) + 1;
7097 if (shift + i > 32)
7098 goto illegal_op;
7099 if (i < 32) {
7100 if (op1 & 0x20) {
7101 gen_ubfx(tmp, shift, (1u << i) - 1);
7102 } else {
7103 gen_sbfx(tmp, shift, i);
7106 store_reg(s, rd, tmp);
7107 break;
7108 default:
7109 goto illegal_op;
7111 break;
7113 break;
7115 do_ldst:
7116 /* Check for undefined extension instructions
7117 * per the ARM Bible IE:
7118 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7120 sh = (0xf << 20) | (0xf << 4);
7121 if (op1 == 0x7 && ((insn & sh) == sh))
7123 goto illegal_op;
7125 /* load/store byte/word */
7126 rn = (insn >> 16) & 0xf;
7127 rd = (insn >> 12) & 0xf;
7128 tmp2 = load_reg(s, rn);
7129 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7130 if (insn & (1 << 24))
7131 gen_add_data_offset(s, insn, tmp2);
7132 if (insn & (1 << 20)) {
7133 /* load */
7134 if (insn & (1 << 22)) {
7135 tmp = gen_ld8u(tmp2, i);
7136 } else {
7137 tmp = gen_ld32(tmp2, i);
7139 } else {
7140 /* store */
7141 tmp = load_reg(s, rd);
7142 if (insn & (1 << 22))
7143 gen_st8(tmp, tmp2, i);
7144 else
7145 gen_st32(tmp, tmp2, i);
7147 if (!(insn & (1 << 24))) {
7148 gen_add_data_offset(s, insn, tmp2);
7149 store_reg(s, rn, tmp2);
7150 } else if (insn & (1 << 21)) {
7151 store_reg(s, rn, tmp2);
7152 } else {
7153 dead_tmp(tmp2);
7155 if (insn & (1 << 20)) {
7156 /* Complete the load. */
7157 if (rd == 15)
7158 gen_bx(s, tmp);
7159 else
7160 store_reg(s, rd, tmp);
7162 break;
7163 case 0x08:
7164 case 0x09:
7166 int j, n, user, loaded_base;
7167 TCGv loaded_var;
7168 /* load/store multiple words */
7169 /* XXX: store correct base if write back */
7170 user = 0;
7171 if (insn & (1 << 22)) {
7172 if (IS_USER(s))
7173 goto illegal_op; /* only usable in supervisor mode */
7175 if ((insn & (1 << 15)) == 0)
7176 user = 1;
7178 rn = (insn >> 16) & 0xf;
7179 addr = load_reg(s, rn);
7181 /* compute total size */
7182 loaded_base = 0;
7183 TCGV_UNUSED(loaded_var);
7184 n = 0;
7185 for(i=0;i<16;i++) {
7186 if (insn & (1 << i))
7187 n++;
7189 /* XXX: test invalid n == 0 case ? */
7190 if (insn & (1 << 23)) {
7191 if (insn & (1 << 24)) {
7192 /* pre increment */
7193 tcg_gen_addi_i32(addr, addr, 4);
7194 } else {
7195 /* post increment */
7197 } else {
7198 if (insn & (1 << 24)) {
7199 /* pre decrement */
7200 tcg_gen_addi_i32(addr, addr, -(n * 4));
7201 } else {
7202 /* post decrement */
7203 if (n != 1)
7204 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7207 j = 0;
7208 for(i=0;i<16;i++) {
7209 if (insn & (1 << i)) {
7210 if (insn & (1 << 20)) {
7211 /* load */
7212 tmp = gen_ld32(addr, IS_USER(s));
7213 if (i == 15) {
7214 gen_bx(s, tmp);
7215 } else if (user) {
7216 tmp2 = tcg_const_i32(i);
7217 gen_helper_set_user_reg(tmp2, tmp);
7218 tcg_temp_free_i32(tmp2);
7219 dead_tmp(tmp);
7220 } else if (i == rn) {
7221 loaded_var = tmp;
7222 loaded_base = 1;
7223 } else {
7224 store_reg(s, i, tmp);
7226 } else {
7227 /* store */
7228 if (i == 15) {
7229 /* special case: r15 = PC + 8 */
7230 val = (long)s->pc + 4;
7231 tmp = new_tmp();
7232 tcg_gen_movi_i32(tmp, val);
7233 } else if (user) {
7234 tmp = new_tmp();
7235 tmp2 = tcg_const_i32(i);
7236 gen_helper_get_user_reg(tmp, tmp2);
7237 tcg_temp_free_i32(tmp2);
7238 } else {
7239 tmp = load_reg(s, i);
7241 gen_st32(tmp, addr, IS_USER(s));
7243 j++;
7244 /* no need to add after the last transfer */
7245 if (j != n)
7246 tcg_gen_addi_i32(addr, addr, 4);
7249 if (insn & (1 << 21)) {
7250 /* write back */
7251 if (insn & (1 << 23)) {
7252 if (insn & (1 << 24)) {
7253 /* pre increment */
7254 } else {
7255 /* post increment */
7256 tcg_gen_addi_i32(addr, addr, 4);
7258 } else {
7259 if (insn & (1 << 24)) {
7260 /* pre decrement */
7261 if (n != 1)
7262 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7263 } else {
7264 /* post decrement */
7265 tcg_gen_addi_i32(addr, addr, -(n * 4));
7268 store_reg(s, rn, addr);
7269 } else {
7270 dead_tmp(addr);
7272 if (loaded_base) {
7273 store_reg(s, rn, loaded_var);
7275 if ((insn & (1 << 22)) && !user) {
7276 /* Restore CPSR from SPSR. */
7277 tmp = load_cpu_field(spsr);
7278 gen_set_cpsr(tmp, 0xffffffff);
7279 dead_tmp(tmp);
7280 s->is_jmp = DISAS_UPDATE;
7283 break;
7284 case 0xa:
7285 case 0xb:
7287 int32_t offset;
7289 /* branch (and link) */
7290 val = (int32_t)s->pc;
7291 if (insn & (1 << 24)) {
7292 tmp = new_tmp();
7293 tcg_gen_movi_i32(tmp, val);
7294 store_reg(s, 14, tmp);
7296 offset = (((int32_t)insn << 8) >> 8);
7297 val += (offset << 2) + 4;
7298 gen_jmp(s, val);
7300 break;
7301 case 0xc:
7302 case 0xd:
7303 case 0xe:
7304 /* Coprocessor. */
7305 if (disas_coproc_insn(env, s, insn))
7306 goto illegal_op;
7307 break;
7308 case 0xf:
7309 /* swi */
7310 gen_set_pc_im(s->pc);
7311 s->is_jmp = DISAS_SWI;
7312 break;
7313 default:
7314 illegal_op:
7315 gen_exception_insn(s, 4, EXCP_UDEF);
7316 break;
7321 /* Return true if this is a Thumb-2 logical op. */
7322 static int
7323 thumb2_logic_op(int op)
7325 return (op < 8);
7328 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7329 then set condition code flags based on the result of the operation.
7330 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7331 to the high bit of T1.
7332 Returns zero if the opcode is valid. */
7334 static int
7335 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7337 int logic_cc;
7339 logic_cc = 0;
7340 switch (op) {
7341 case 0: /* and */
7342 tcg_gen_and_i32(t0, t0, t1);
7343 logic_cc = conds;
7344 break;
7345 case 1: /* bic */
7346 tcg_gen_andc_i32(t0, t0, t1);
7347 logic_cc = conds;
7348 break;
7349 case 2: /* orr */
7350 tcg_gen_or_i32(t0, t0, t1);
7351 logic_cc = conds;
7352 break;
7353 case 3: /* orn */
7354 tcg_gen_orc_i32(t0, t0, t1);
7355 logic_cc = conds;
7356 break;
7357 case 4: /* eor */
7358 tcg_gen_xor_i32(t0, t0, t1);
7359 logic_cc = conds;
7360 break;
7361 case 8: /* add */
7362 if (conds)
7363 gen_helper_add_cc(t0, t0, t1);
7364 else
7365 tcg_gen_add_i32(t0, t0, t1);
7366 break;
7367 case 10: /* adc */
7368 if (conds)
7369 gen_helper_adc_cc(t0, t0, t1);
7370 else
7371 gen_adc(t0, t1);
7372 break;
7373 case 11: /* sbc */
7374 if (conds)
7375 gen_helper_sbc_cc(t0, t0, t1);
7376 else
7377 gen_sub_carry(t0, t0, t1);
7378 break;
7379 case 13: /* sub */
7380 if (conds)
7381 gen_helper_sub_cc(t0, t0, t1);
7382 else
7383 tcg_gen_sub_i32(t0, t0, t1);
7384 break;
7385 case 14: /* rsb */
7386 if (conds)
7387 gen_helper_sub_cc(t0, t1, t0);
7388 else
7389 tcg_gen_sub_i32(t0, t1, t0);
7390 break;
7391 default: /* 5, 6, 7, 9, 12, 15. */
7392 return 1;
7394 if (logic_cc) {
7395 gen_logic_CC(t0);
7396 if (shifter_out)
7397 gen_set_CF_bit31(t1);
7399 return 0;
7402 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7403 is not legal. */
7404 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7406 uint32_t insn, imm, shift, offset;
7407 uint32_t rd, rn, rm, rs;
7408 TCGv tmp;
7409 TCGv tmp2;
7410 TCGv tmp3;
7411 TCGv addr;
7412 TCGv_i64 tmp64;
7413 int op;
7414 int shiftop;
7415 int conds;
7416 int logic_cc;
7418 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7419 || arm_feature (env, ARM_FEATURE_M))) {
7420 /* Thumb-1 cores may need to treat bl and blx as a pair of
7421 16-bit instructions to get correct prefetch abort behavior. */
7422 insn = insn_hw1;
7423 if ((insn & (1 << 12)) == 0) {
7424 /* Second half of blx. */
7425 offset = ((insn & 0x7ff) << 1);
7426 tmp = load_reg(s, 14);
7427 tcg_gen_addi_i32(tmp, tmp, offset);
7428 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7430 tmp2 = new_tmp();
7431 tcg_gen_movi_i32(tmp2, s->pc | 1);
7432 store_reg(s, 14, tmp2);
7433 gen_bx(s, tmp);
7434 return 0;
7436 if (insn & (1 << 11)) {
7437 /* Second half of bl. */
7438 offset = ((insn & 0x7ff) << 1) | 1;
7439 tmp = load_reg(s, 14);
7440 tcg_gen_addi_i32(tmp, tmp, offset);
7442 tmp2 = new_tmp();
7443 tcg_gen_movi_i32(tmp2, s->pc | 1);
7444 store_reg(s, 14, tmp2);
7445 gen_bx(s, tmp);
7446 return 0;
7448 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7449 /* Instruction spans a page boundary. Implement it as two
7450 16-bit instructions in case the second half causes an
7451 prefetch abort. */
7452 offset = ((int32_t)insn << 21) >> 9;
7453 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7454 return 0;
7456 /* Fall through to 32-bit decode. */
7459 insn = lduw_code(s->pc);
7460 s->pc += 2;
7461 insn |= (uint32_t)insn_hw1 << 16;
7463 if ((insn & 0xf800e800) != 0xf000e800) {
7464 ARCH(6T2);
7467 rn = (insn >> 16) & 0xf;
7468 rs = (insn >> 12) & 0xf;
7469 rd = (insn >> 8) & 0xf;
7470 rm = insn & 0xf;
7471 switch ((insn >> 25) & 0xf) {
7472 case 0: case 1: case 2: case 3:
7473 /* 16-bit instructions. Should never happen. */
7474 abort();
7475 case 4:
7476 if (insn & (1 << 22)) {
7477 /* Other load/store, table branch. */
7478 if (insn & 0x01200000) {
7479 /* Load/store doubleword. */
7480 if (rn == 15) {
7481 addr = new_tmp();
7482 tcg_gen_movi_i32(addr, s->pc & ~3);
7483 } else {
7484 addr = load_reg(s, rn);
7486 offset = (insn & 0xff) * 4;
7487 if ((insn & (1 << 23)) == 0)
7488 offset = -offset;
7489 if (insn & (1 << 24)) {
7490 tcg_gen_addi_i32(addr, addr, offset);
7491 offset = 0;
7493 if (insn & (1 << 20)) {
7494 /* ldrd */
7495 tmp = gen_ld32(addr, IS_USER(s));
7496 store_reg(s, rs, tmp);
7497 tcg_gen_addi_i32(addr, addr, 4);
7498 tmp = gen_ld32(addr, IS_USER(s));
7499 store_reg(s, rd, tmp);
7500 } else {
7501 /* strd */
7502 tmp = load_reg(s, rs);
7503 gen_st32(tmp, addr, IS_USER(s));
7504 tcg_gen_addi_i32(addr, addr, 4);
7505 tmp = load_reg(s, rd);
7506 gen_st32(tmp, addr, IS_USER(s));
7508 if (insn & (1 << 21)) {
7509 /* Base writeback. */
7510 if (rn == 15)
7511 goto illegal_op;
7512 tcg_gen_addi_i32(addr, addr, offset - 4);
7513 store_reg(s, rn, addr);
7514 } else {
7515 dead_tmp(addr);
7517 } else if ((insn & (1 << 23)) == 0) {
7518 /* Load/store exclusive word. */
7519 addr = tcg_temp_local_new();
7520 load_reg_var(s, addr, rn);
7521 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7522 if (insn & (1 << 20)) {
7523 gen_load_exclusive(s, rs, 15, addr, 2);
7524 } else {
7525 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7527 tcg_temp_free(addr);
7528 } else if ((insn & (1 << 6)) == 0) {
7529 /* Table Branch. */
7530 if (rn == 15) {
7531 addr = new_tmp();
7532 tcg_gen_movi_i32(addr, s->pc);
7533 } else {
7534 addr = load_reg(s, rn);
7536 tmp = load_reg(s, rm);
7537 tcg_gen_add_i32(addr, addr, tmp);
7538 if (insn & (1 << 4)) {
7539 /* tbh */
7540 tcg_gen_add_i32(addr, addr, tmp);
7541 dead_tmp(tmp);
7542 tmp = gen_ld16u(addr, IS_USER(s));
7543 } else { /* tbb */
7544 dead_tmp(tmp);
7545 tmp = gen_ld8u(addr, IS_USER(s));
7547 dead_tmp(addr);
7548 tcg_gen_shli_i32(tmp, tmp, 1);
7549 tcg_gen_addi_i32(tmp, tmp, s->pc);
7550 store_reg(s, 15, tmp);
7551 } else {
7552 /* Load/store exclusive byte/halfword/doubleword. */
7553 ARCH(7);
7554 op = (insn >> 4) & 0x3;
7555 if (op == 2) {
7556 goto illegal_op;
7558 addr = tcg_temp_local_new();
7559 load_reg_var(s, addr, rn);
7560 if (insn & (1 << 20)) {
7561 gen_load_exclusive(s, rs, rd, addr, op);
7562 } else {
7563 gen_store_exclusive(s, rm, rs, rd, addr, op);
7565 tcg_temp_free(addr);
7567 } else {
7568 /* Load/store multiple, RFE, SRS. */
7569 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7570 /* Not available in user mode. */
7571 if (IS_USER(s))
7572 goto illegal_op;
7573 if (insn & (1 << 20)) {
7574 /* rfe */
7575 addr = load_reg(s, rn);
7576 if ((insn & (1 << 24)) == 0)
7577 tcg_gen_addi_i32(addr, addr, -8);
7578 /* Load PC into tmp and CPSR into tmp2. */
7579 tmp = gen_ld32(addr, 0);
7580 tcg_gen_addi_i32(addr, addr, 4);
7581 tmp2 = gen_ld32(addr, 0);
7582 if (insn & (1 << 21)) {
7583 /* Base writeback. */
7584 if (insn & (1 << 24)) {
7585 tcg_gen_addi_i32(addr, addr, 4);
7586 } else {
7587 tcg_gen_addi_i32(addr, addr, -4);
7589 store_reg(s, rn, addr);
7590 } else {
7591 dead_tmp(addr);
7593 gen_rfe(s, tmp, tmp2);
7594 } else {
7595 /* srs */
7596 op = (insn & 0x1f);
7597 addr = new_tmp();
7598 tmp = tcg_const_i32(op);
7599 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7600 tcg_temp_free_i32(tmp);
7601 if ((insn & (1 << 24)) == 0) {
7602 tcg_gen_addi_i32(addr, addr, -8);
7604 tmp = load_reg(s, 14);
7605 gen_st32(tmp, addr, 0);
7606 tcg_gen_addi_i32(addr, addr, 4);
7607 tmp = new_tmp();
7608 gen_helper_cpsr_read(tmp);
7609 gen_st32(tmp, addr, 0);
7610 if (insn & (1 << 21)) {
7611 if ((insn & (1 << 24)) == 0) {
7612 tcg_gen_addi_i32(addr, addr, -4);
7613 } else {
7614 tcg_gen_addi_i32(addr, addr, 4);
7616 tmp = tcg_const_i32(op);
7617 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7618 tcg_temp_free_i32(tmp);
7619 } else {
7620 dead_tmp(addr);
7623 } else {
7624 int i;
7625 /* Load/store multiple. */
7626 addr = load_reg(s, rn);
7627 offset = 0;
7628 for (i = 0; i < 16; i++) {
7629 if (insn & (1 << i))
7630 offset += 4;
7632 if (insn & (1 << 24)) {
7633 tcg_gen_addi_i32(addr, addr, -offset);
7636 for (i = 0; i < 16; i++) {
7637 if ((insn & (1 << i)) == 0)
7638 continue;
7639 if (insn & (1 << 20)) {
7640 /* Load. */
7641 tmp = gen_ld32(addr, IS_USER(s));
7642 if (i == 15) {
7643 gen_bx(s, tmp);
7644 } else {
7645 store_reg(s, i, tmp);
7647 } else {
7648 /* Store. */
7649 tmp = load_reg(s, i);
7650 gen_st32(tmp, addr, IS_USER(s));
7652 tcg_gen_addi_i32(addr, addr, 4);
7654 if (insn & (1 << 21)) {
7655 /* Base register writeback. */
7656 if (insn & (1 << 24)) {
7657 tcg_gen_addi_i32(addr, addr, -offset);
7659 /* Fault if writeback register is in register list. */
7660 if (insn & (1 << rn))
7661 goto illegal_op;
7662 store_reg(s, rn, addr);
7663 } else {
7664 dead_tmp(addr);
7668 break;
7669 case 5:
7671 op = (insn >> 21) & 0xf;
7672 if (op == 6) {
7673 /* Halfword pack. */
7674 tmp = load_reg(s, rn);
7675 tmp2 = load_reg(s, rm);
7676 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7677 if (insn & (1 << 5)) {
7678 /* pkhtb */
7679 if (shift == 0)
7680 shift = 31;
7681 tcg_gen_sari_i32(tmp2, tmp2, shift);
7682 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7683 tcg_gen_ext16u_i32(tmp2, tmp2);
7684 } else {
7685 /* pkhbt */
7686 if (shift)
7687 tcg_gen_shli_i32(tmp2, tmp2, shift);
7688 tcg_gen_ext16u_i32(tmp, tmp);
7689 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7691 tcg_gen_or_i32(tmp, tmp, tmp2);
7692 dead_tmp(tmp2);
7693 store_reg(s, rd, tmp);
7694 } else {
7695 /* Data processing register constant shift. */
7696 if (rn == 15) {
7697 tmp = new_tmp();
7698 tcg_gen_movi_i32(tmp, 0);
7699 } else {
7700 tmp = load_reg(s, rn);
7702 tmp2 = load_reg(s, rm);
7704 shiftop = (insn >> 4) & 3;
7705 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7706 conds = (insn & (1 << 20)) != 0;
7707 logic_cc = (conds && thumb2_logic_op(op));
7708 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7709 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7710 goto illegal_op;
7711 dead_tmp(tmp2);
7712 if (rd != 15) {
7713 store_reg(s, rd, tmp);
7714 } else {
7715 dead_tmp(tmp);
7718 break;
7719 case 13: /* Misc data processing. */
7720 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7721 if (op < 4 && (insn & 0xf000) != 0xf000)
7722 goto illegal_op;
7723 switch (op) {
7724 case 0: /* Register controlled shift. */
7725 tmp = load_reg(s, rn);
7726 tmp2 = load_reg(s, rm);
7727 if ((insn & 0x70) != 0)
7728 goto illegal_op;
7729 op = (insn >> 21) & 3;
7730 logic_cc = (insn & (1 << 20)) != 0;
7731 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7732 if (logic_cc)
7733 gen_logic_CC(tmp);
7734 store_reg_bx(env, s, rd, tmp);
7735 break;
7736 case 1: /* Sign/zero extend. */
7737 tmp = load_reg(s, rm);
7738 shift = (insn >> 4) & 3;
7739 /* ??? In many cases it's not neccessary to do a
7740 rotate, a shift is sufficient. */
7741 if (shift != 0)
7742 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7743 op = (insn >> 20) & 7;
7744 switch (op) {
7745 case 0: gen_sxth(tmp); break;
7746 case 1: gen_uxth(tmp); break;
7747 case 2: gen_sxtb16(tmp); break;
7748 case 3: gen_uxtb16(tmp); break;
7749 case 4: gen_sxtb(tmp); break;
7750 case 5: gen_uxtb(tmp); break;
7751 default: goto illegal_op;
7753 if (rn != 15) {
7754 tmp2 = load_reg(s, rn);
7755 if ((op >> 1) == 1) {
7756 gen_add16(tmp, tmp2);
7757 } else {
7758 tcg_gen_add_i32(tmp, tmp, tmp2);
7759 dead_tmp(tmp2);
7762 store_reg(s, rd, tmp);
7763 break;
7764 case 2: /* SIMD add/subtract. */
7765 op = (insn >> 20) & 7;
7766 shift = (insn >> 4) & 7;
7767 if ((op & 3) == 3 || (shift & 3) == 3)
7768 goto illegal_op;
7769 tmp = load_reg(s, rn);
7770 tmp2 = load_reg(s, rm);
7771 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7772 dead_tmp(tmp2);
7773 store_reg(s, rd, tmp);
7774 break;
7775 case 3: /* Other data processing. */
7776 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7777 if (op < 4) {
7778 /* Saturating add/subtract. */
7779 tmp = load_reg(s, rn);
7780 tmp2 = load_reg(s, rm);
7781 if (op & 1)
7782 gen_helper_double_saturate(tmp, tmp);
7783 if (op & 2)
7784 gen_helper_sub_saturate(tmp, tmp2, tmp);
7785 else
7786 gen_helper_add_saturate(tmp, tmp, tmp2);
7787 dead_tmp(tmp2);
7788 } else {
7789 tmp = load_reg(s, rn);
7790 switch (op) {
7791 case 0x0a: /* rbit */
7792 gen_helper_rbit(tmp, tmp);
7793 break;
7794 case 0x08: /* rev */
7795 tcg_gen_bswap32_i32(tmp, tmp);
7796 break;
7797 case 0x09: /* rev16 */
7798 gen_rev16(tmp);
7799 break;
7800 case 0x0b: /* revsh */
7801 gen_revsh(tmp);
7802 break;
7803 case 0x10: /* sel */
7804 tmp2 = load_reg(s, rm);
7805 tmp3 = new_tmp();
7806 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7807 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7808 dead_tmp(tmp3);
7809 dead_tmp(tmp2);
7810 break;
7811 case 0x18: /* clz */
7812 gen_helper_clz(tmp, tmp);
7813 break;
7814 default:
7815 goto illegal_op;
7818 store_reg(s, rd, tmp);
7819 break;
7820 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7821 op = (insn >> 4) & 0xf;
7822 tmp = load_reg(s, rn);
7823 tmp2 = load_reg(s, rm);
7824 switch ((insn >> 20) & 7) {
7825 case 0: /* 32 x 32 -> 32 */
7826 tcg_gen_mul_i32(tmp, tmp, tmp2);
7827 dead_tmp(tmp2);
7828 if (rs != 15) {
7829 tmp2 = load_reg(s, rs);
7830 if (op)
7831 tcg_gen_sub_i32(tmp, tmp2, tmp);
7832 else
7833 tcg_gen_add_i32(tmp, tmp, tmp2);
7834 dead_tmp(tmp2);
7836 break;
7837 case 1: /* 16 x 16 -> 32 */
7838 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7839 dead_tmp(tmp2);
7840 if (rs != 15) {
7841 tmp2 = load_reg(s, rs);
7842 gen_helper_add_setq(tmp, tmp, tmp2);
7843 dead_tmp(tmp2);
7845 break;
7846 case 2: /* Dual multiply add. */
7847 case 4: /* Dual multiply subtract. */
7848 if (op)
7849 gen_swap_half(tmp2);
7850 gen_smul_dual(tmp, tmp2);
7851 /* This addition cannot overflow. */
7852 if (insn & (1 << 22)) {
7853 tcg_gen_sub_i32(tmp, tmp, tmp2);
7854 } else {
7855 tcg_gen_add_i32(tmp, tmp, tmp2);
7857 dead_tmp(tmp2);
7858 if (rs != 15)
7860 tmp2 = load_reg(s, rs);
7861 gen_helper_add_setq(tmp, tmp, tmp2);
7862 dead_tmp(tmp2);
7864 break;
7865 case 3: /* 32 * 16 -> 32msb */
7866 if (op)
7867 tcg_gen_sari_i32(tmp2, tmp2, 16);
7868 else
7869 gen_sxth(tmp2);
7870 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7871 tcg_gen_shri_i64(tmp64, tmp64, 16);
7872 tmp = new_tmp();
7873 tcg_gen_trunc_i64_i32(tmp, tmp64);
7874 tcg_temp_free_i64(tmp64);
7875 if (rs != 15)
7877 tmp2 = load_reg(s, rs);
7878 gen_helper_add_setq(tmp, tmp, tmp2);
7879 dead_tmp(tmp2);
7881 break;
7882 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7883 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7884 if (rs != 15) {
7885 tmp = load_reg(s, rs);
7886 if (insn & (1 << 20)) {
7887 tmp64 = gen_addq_msw(tmp64, tmp);
7888 } else {
7889 tmp64 = gen_subq_msw(tmp64, tmp);
7892 if (insn & (1 << 4)) {
7893 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7895 tcg_gen_shri_i64(tmp64, tmp64, 32);
7896 tmp = new_tmp();
7897 tcg_gen_trunc_i64_i32(tmp, tmp64);
7898 tcg_temp_free_i64(tmp64);
7899 break;
7900 case 7: /* Unsigned sum of absolute differences. */
7901 gen_helper_usad8(tmp, tmp, tmp2);
7902 dead_tmp(tmp2);
7903 if (rs != 15) {
7904 tmp2 = load_reg(s, rs);
7905 tcg_gen_add_i32(tmp, tmp, tmp2);
7906 dead_tmp(tmp2);
7908 break;
7910 store_reg(s, rd, tmp);
7911 break;
7912 case 6: case 7: /* 64-bit multiply, Divide. */
7913 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7914 tmp = load_reg(s, rn);
7915 tmp2 = load_reg(s, rm);
7916 if ((op & 0x50) == 0x10) {
7917 /* sdiv, udiv */
7918 if (!arm_feature(env, ARM_FEATURE_DIV))
7919 goto illegal_op;
7920 if (op & 0x20)
7921 gen_helper_udiv(tmp, tmp, tmp2);
7922 else
7923 gen_helper_sdiv(tmp, tmp, tmp2);
7924 dead_tmp(tmp2);
7925 store_reg(s, rd, tmp);
7926 } else if ((op & 0xe) == 0xc) {
7927 /* Dual multiply accumulate long. */
7928 if (op & 1)
7929 gen_swap_half(tmp2);
7930 gen_smul_dual(tmp, tmp2);
7931 if (op & 0x10) {
7932 tcg_gen_sub_i32(tmp, tmp, tmp2);
7933 } else {
7934 tcg_gen_add_i32(tmp, tmp, tmp2);
7936 dead_tmp(tmp2);
7937 /* BUGFIX */
7938 tmp64 = tcg_temp_new_i64();
7939 tcg_gen_ext_i32_i64(tmp64, tmp);
7940 dead_tmp(tmp);
7941 gen_addq(s, tmp64, rs, rd);
7942 gen_storeq_reg(s, rs, rd, tmp64);
7943 tcg_temp_free_i64(tmp64);
7944 } else {
7945 if (op & 0x20) {
7946 /* Unsigned 64-bit multiply */
7947 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7948 } else {
7949 if (op & 8) {
7950 /* smlalxy */
7951 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7952 dead_tmp(tmp2);
7953 tmp64 = tcg_temp_new_i64();
7954 tcg_gen_ext_i32_i64(tmp64, tmp);
7955 dead_tmp(tmp);
7956 } else {
7957 /* Signed 64-bit multiply */
7958 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7961 if (op & 4) {
7962 /* umaal */
7963 gen_addq_lo(s, tmp64, rs);
7964 gen_addq_lo(s, tmp64, rd);
7965 } else if (op & 0x40) {
7966 /* 64-bit accumulate. */
7967 gen_addq(s, tmp64, rs, rd);
7969 gen_storeq_reg(s, rs, rd, tmp64);
7970 tcg_temp_free_i64(tmp64);
7972 break;
7974 break;
7975 case 6: case 7: case 14: case 15:
7976 /* Coprocessor. */
7977 if (((insn >> 24) & 3) == 3) {
7978 /* Translate into the equivalent ARM encoding. */
7979 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7980 if (disas_neon_data_insn(env, s, insn))
7981 goto illegal_op;
7982 } else {
7983 if (insn & (1 << 28))
7984 goto illegal_op;
7985 if (disas_coproc_insn (env, s, insn))
7986 goto illegal_op;
7988 break;
7989 case 8: case 9: case 10: case 11:
7990 if (insn & (1 << 15)) {
7991 /* Branches, misc control. */
7992 if (insn & 0x5000) {
7993 /* Unconditional branch. */
7994 /* signextend(hw1[10:0]) -> offset[:12]. */
7995 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7996 /* hw1[10:0] -> offset[11:1]. */
7997 offset |= (insn & 0x7ff) << 1;
7998 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7999 offset[24:22] already have the same value because of the
8000 sign extension above. */
8001 offset ^= ((~insn) & (1 << 13)) << 10;
8002 offset ^= ((~insn) & (1 << 11)) << 11;
8004 if (insn & (1 << 14)) {
8005 /* Branch and link. */
8006 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8009 offset += s->pc;
8010 if (insn & (1 << 12)) {
8011 /* b/bl */
8012 gen_jmp(s, offset);
8013 } else {
8014 /* blx */
8015 offset &= ~(uint32_t)2;
8016 gen_bx_im(s, offset);
8018 } else if (((insn >> 23) & 7) == 7) {
8019 /* Misc control */
8020 if (insn & (1 << 13))
8021 goto illegal_op;
8023 if (insn & (1 << 26)) {
8024 /* Secure monitor call (v6Z) */
8025 goto illegal_op; /* not implemented. */
8026 } else {
8027 op = (insn >> 20) & 7;
8028 switch (op) {
8029 case 0: /* msr cpsr. */
8030 if (IS_M(env)) {
8031 tmp = load_reg(s, rn);
8032 addr = tcg_const_i32(insn & 0xff);
8033 gen_helper_v7m_msr(cpu_env, addr, tmp);
8034 tcg_temp_free_i32(addr);
8035 dead_tmp(tmp);
8036 gen_lookup_tb(s);
8037 break;
8039 /* fall through */
8040 case 1: /* msr spsr. */
8041 if (IS_M(env))
8042 goto illegal_op;
8043 tmp = load_reg(s, rn);
8044 if (gen_set_psr(s,
8045 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8046 op == 1, tmp))
8047 goto illegal_op;
8048 break;
8049 case 2: /* cps, nop-hint. */
8050 if (((insn >> 8) & 7) == 0) {
8051 gen_nop_hint(s, insn & 0xff);
8053 /* Implemented as NOP in user mode. */
8054 if (IS_USER(s))
8055 break;
8056 offset = 0;
8057 imm = 0;
8058 if (insn & (1 << 10)) {
8059 if (insn & (1 << 7))
8060 offset |= CPSR_A;
8061 if (insn & (1 << 6))
8062 offset |= CPSR_I;
8063 if (insn & (1 << 5))
8064 offset |= CPSR_F;
8065 if (insn & (1 << 9))
8066 imm = CPSR_A | CPSR_I | CPSR_F;
8068 if (insn & (1 << 8)) {
8069 offset |= 0x1f;
8070 imm |= (insn & 0x1f);
8072 if (offset) {
8073 gen_set_psr_im(s, offset, 0, imm);
8075 break;
8076 case 3: /* Special control operations. */
8077 ARCH(7);
8078 op = (insn >> 4) & 0xf;
8079 switch (op) {
8080 case 2: /* clrex */
8081 gen_clrex(s);
8082 break;
8083 case 4: /* dsb */
8084 case 5: /* dmb */
8085 case 6: /* isb */
8086 /* These execute as NOPs. */
8087 break;
8088 default:
8089 goto illegal_op;
8091 break;
8092 case 4: /* bxj */
8093 /* Trivial implementation equivalent to bx. */
8094 tmp = load_reg(s, rn);
8095 gen_bx(s, tmp);
8096 break;
8097 case 5: /* Exception return. */
8098 if (IS_USER(s)) {
8099 goto illegal_op;
8101 if (rn != 14 || rd != 15) {
8102 goto illegal_op;
8104 tmp = load_reg(s, rn);
8105 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8106 gen_exception_return(s, tmp);
8107 break;
8108 case 6: /* mrs cpsr. */
8109 tmp = new_tmp();
8110 if (IS_M(env)) {
8111 addr = tcg_const_i32(insn & 0xff);
8112 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8113 tcg_temp_free_i32(addr);
8114 } else {
8115 gen_helper_cpsr_read(tmp);
8117 store_reg(s, rd, tmp);
8118 break;
8119 case 7: /* mrs spsr. */
8120 /* Not accessible in user mode. */
8121 if (IS_USER(s) || IS_M(env))
8122 goto illegal_op;
8123 tmp = load_cpu_field(spsr);
8124 store_reg(s, rd, tmp);
8125 break;
8128 } else {
8129 /* Conditional branch. */
8130 op = (insn >> 22) & 0xf;
8131 /* Generate a conditional jump to next instruction. */
8132 s->condlabel = gen_new_label();
8133 gen_test_cc(op ^ 1, s->condlabel);
8134 s->condjmp = 1;
8136 /* offset[11:1] = insn[10:0] */
8137 offset = (insn & 0x7ff) << 1;
8138 /* offset[17:12] = insn[21:16]. */
8139 offset |= (insn & 0x003f0000) >> 4;
8140 /* offset[31:20] = insn[26]. */
8141 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8142 /* offset[18] = insn[13]. */
8143 offset |= (insn & (1 << 13)) << 5;
8144 /* offset[19] = insn[11]. */
8145 offset |= (insn & (1 << 11)) << 8;
8147 /* jump to the offset */
8148 gen_jmp(s, s->pc + offset);
8150 } else {
8151 /* Data processing immediate. */
8152 if (insn & (1 << 25)) {
8153 if (insn & (1 << 24)) {
8154 if (insn & (1 << 20))
8155 goto illegal_op;
8156 /* Bitfield/Saturate. */
8157 op = (insn >> 21) & 7;
8158 imm = insn & 0x1f;
8159 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8160 if (rn == 15) {
8161 tmp = new_tmp();
8162 tcg_gen_movi_i32(tmp, 0);
8163 } else {
8164 tmp = load_reg(s, rn);
8166 switch (op) {
8167 case 2: /* Signed bitfield extract. */
8168 imm++;
8169 if (shift + imm > 32)
8170 goto illegal_op;
8171 if (imm < 32)
8172 gen_sbfx(tmp, shift, imm);
8173 break;
8174 case 6: /* Unsigned bitfield extract. */
8175 imm++;
8176 if (shift + imm > 32)
8177 goto illegal_op;
8178 if (imm < 32)
8179 gen_ubfx(tmp, shift, (1u << imm) - 1);
8180 break;
8181 case 3: /* Bitfield insert/clear. */
8182 if (imm < shift)
8183 goto illegal_op;
8184 imm = imm + 1 - shift;
8185 if (imm != 32) {
8186 tmp2 = load_reg(s, rd);
8187 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8188 dead_tmp(tmp2);
8190 break;
8191 case 7:
8192 goto illegal_op;
8193 default: /* Saturate. */
8194 if (shift) {
8195 if (op & 1)
8196 tcg_gen_sari_i32(tmp, tmp, shift);
8197 else
8198 tcg_gen_shli_i32(tmp, tmp, shift);
8200 tmp2 = tcg_const_i32(imm);
8201 if (op & 4) {
8202 /* Unsigned. */
8203 if ((op & 1) && shift == 0)
8204 gen_helper_usat16(tmp, tmp, tmp2);
8205 else
8206 gen_helper_usat(tmp, tmp, tmp2);
8207 } else {
8208 /* Signed. */
8209 if ((op & 1) && shift == 0)
8210 gen_helper_ssat16(tmp, tmp, tmp2);
8211 else
8212 gen_helper_ssat(tmp, tmp, tmp2);
8214 tcg_temp_free_i32(tmp2);
8215 break;
8217 store_reg(s, rd, tmp);
8218 } else {
8219 imm = ((insn & 0x04000000) >> 15)
8220 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8221 if (insn & (1 << 22)) {
8222 /* 16-bit immediate. */
8223 imm |= (insn >> 4) & 0xf000;
8224 if (insn & (1 << 23)) {
8225 /* movt */
8226 tmp = load_reg(s, rd);
8227 tcg_gen_ext16u_i32(tmp, tmp);
8228 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8229 } else {
8230 /* movw */
8231 tmp = new_tmp();
8232 tcg_gen_movi_i32(tmp, imm);
8234 } else {
8235 /* Add/sub 12-bit immediate. */
8236 if (rn == 15) {
8237 offset = s->pc & ~(uint32_t)3;
8238 if (insn & (1 << 23))
8239 offset -= imm;
8240 else
8241 offset += imm;
8242 tmp = new_tmp();
8243 tcg_gen_movi_i32(tmp, offset);
8244 } else {
8245 tmp = load_reg(s, rn);
8246 if (insn & (1 << 23))
8247 tcg_gen_subi_i32(tmp, tmp, imm);
8248 else
8249 tcg_gen_addi_i32(tmp, tmp, imm);
8252 store_reg(s, rd, tmp);
8254 } else {
8255 int shifter_out = 0;
8256 /* modified 12-bit immediate. */
8257 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8258 imm = (insn & 0xff);
8259 switch (shift) {
8260 case 0: /* XY */
8261 /* Nothing to do. */
8262 break;
8263 case 1: /* 00XY00XY */
8264 imm |= imm << 16;
8265 break;
8266 case 2: /* XY00XY00 */
8267 imm |= imm << 16;
8268 imm <<= 8;
8269 break;
8270 case 3: /* XYXYXYXY */
8271 imm |= imm << 16;
8272 imm |= imm << 8;
8273 break;
8274 default: /* Rotated constant. */
8275 shift = (shift << 1) | (imm >> 7);
8276 imm |= 0x80;
8277 imm = imm << (32 - shift);
8278 shifter_out = 1;
8279 break;
8281 tmp2 = new_tmp();
8282 tcg_gen_movi_i32(tmp2, imm);
8283 rn = (insn >> 16) & 0xf;
8284 if (rn == 15) {
8285 tmp = new_tmp();
8286 tcg_gen_movi_i32(tmp, 0);
8287 } else {
8288 tmp = load_reg(s, rn);
8290 op = (insn >> 21) & 0xf;
8291 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8292 shifter_out, tmp, tmp2))
8293 goto illegal_op;
8294 dead_tmp(tmp2);
8295 rd = (insn >> 8) & 0xf;
8296 if (rd != 15) {
8297 store_reg(s, rd, tmp);
8298 } else {
8299 dead_tmp(tmp);
8303 break;
8304 case 12: /* Load/store single data item. */
8306 int postinc = 0;
8307 int writeback = 0;
8308 int user;
8309 if ((insn & 0x01100000) == 0x01000000) {
8310 if (disas_neon_ls_insn(env, s, insn))
8311 goto illegal_op;
8312 break;
8314 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8315 if (rs == 15) {
8316 if (!(insn & (1 << 20))) {
8317 goto illegal_op;
8319 if (op != 2) {
8320 /* Byte or halfword load space with dest == r15 : memory hints.
8321 * Catch them early so we don't emit pointless addressing code.
8322 * This space is a mix of:
8323 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8324 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8325 * cores)
8326 * unallocated hints, which must be treated as NOPs
8327 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8328 * which is easiest for the decoding logic
8329 * Some space which must UNDEF
8331 int op1 = (insn >> 23) & 3;
8332 int op2 = (insn >> 6) & 0x3f;
8333 if (op & 2) {
8334 goto illegal_op;
8336 if (rn == 15) {
8337 /* UNPREDICTABLE or unallocated hint */
8338 return 0;
8340 if (op1 & 1) {
8341 return 0; /* PLD* or unallocated hint */
8343 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
8344 return 0; /* PLD* or unallocated hint */
8346 /* UNDEF space, or an UNPREDICTABLE */
8347 return 1;
8350 user = IS_USER(s);
8351 if (rn == 15) {
8352 addr = new_tmp();
8353 /* PC relative. */
8354 /* s->pc has already been incremented by 4. */
8355 imm = s->pc & 0xfffffffc;
8356 if (insn & (1 << 23))
8357 imm += insn & 0xfff;
8358 else
8359 imm -= insn & 0xfff;
8360 tcg_gen_movi_i32(addr, imm);
8361 } else {
8362 addr = load_reg(s, rn);
8363 if (insn & (1 << 23)) {
8364 /* Positive offset. */
8365 imm = insn & 0xfff;
8366 tcg_gen_addi_i32(addr, addr, imm);
8367 } else {
8368 imm = insn & 0xff;
8369 switch ((insn >> 8) & 7) {
8370 case 0: case 8: /* Shifted Register. */
8371 shift = (insn >> 4) & 0xf;
8372 if (shift > 3)
8373 goto illegal_op;
8374 tmp = load_reg(s, rm);
8375 if (shift)
8376 tcg_gen_shli_i32(tmp, tmp, shift);
8377 tcg_gen_add_i32(addr, addr, tmp);
8378 dead_tmp(tmp);
8379 break;
8380 case 4: /* Negative offset. */
8381 tcg_gen_addi_i32(addr, addr, -imm);
8382 break;
8383 case 6: /* User privilege. */
8384 tcg_gen_addi_i32(addr, addr, imm);
8385 user = 1;
8386 break;
8387 case 1: /* Post-decrement. */
8388 imm = -imm;
8389 /* Fall through. */
8390 case 3: /* Post-increment. */
8391 postinc = 1;
8392 writeback = 1;
8393 break;
8394 case 5: /* Pre-decrement. */
8395 imm = -imm;
8396 /* Fall through. */
8397 case 7: /* Pre-increment. */
8398 tcg_gen_addi_i32(addr, addr, imm);
8399 writeback = 1;
8400 break;
8401 default:
8402 goto illegal_op;
8406 if (insn & (1 << 20)) {
8407 /* Load. */
8408 switch (op) {
8409 case 0: tmp = gen_ld8u(addr, user); break;
8410 case 4: tmp = gen_ld8s(addr, user); break;
8411 case 1: tmp = gen_ld16u(addr, user); break;
8412 case 5: tmp = gen_ld16s(addr, user); break;
8413 case 2: tmp = gen_ld32(addr, user); break;
8414 default: goto illegal_op;
8416 if (rs == 15) {
8417 gen_bx(s, tmp);
8418 } else {
8419 store_reg(s, rs, tmp);
8421 } else {
8422 /* Store. */
8423 tmp = load_reg(s, rs);
8424 switch (op) {
8425 case 0: gen_st8(tmp, addr, user); break;
8426 case 1: gen_st16(tmp, addr, user); break;
8427 case 2: gen_st32(tmp, addr, user); break;
8428 default: goto illegal_op;
8431 if (postinc)
8432 tcg_gen_addi_i32(addr, addr, imm);
8433 if (writeback) {
8434 store_reg(s, rn, addr);
8435 } else {
8436 dead_tmp(addr);
8439 break;
8440 default:
8441 goto illegal_op;
8443 return 0;
8444 illegal_op:
8445 return 1;
8448 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8450 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8451 int32_t offset;
8452 int i;
8453 TCGv tmp;
8454 TCGv tmp2;
8455 TCGv addr;
8457 if (s->condexec_mask) {
8458 cond = s->condexec_cond;
8459 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8460 s->condlabel = gen_new_label();
8461 gen_test_cc(cond ^ 1, s->condlabel);
8462 s->condjmp = 1;
8466 insn = lduw_code(s->pc);
8467 s->pc += 2;
8469 switch (insn >> 12) {
8470 case 0: case 1:
8472 rd = insn & 7;
8473 op = (insn >> 11) & 3;
8474 if (op == 3) {
8475 /* add/subtract */
8476 rn = (insn >> 3) & 7;
8477 tmp = load_reg(s, rn);
8478 if (insn & (1 << 10)) {
8479 /* immediate */
8480 tmp2 = new_tmp();
8481 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8482 } else {
8483 /* reg */
8484 rm = (insn >> 6) & 7;
8485 tmp2 = load_reg(s, rm);
8487 if (insn & (1 << 9)) {
8488 if (s->condexec_mask)
8489 tcg_gen_sub_i32(tmp, tmp, tmp2);
8490 else
8491 gen_helper_sub_cc(tmp, tmp, tmp2);
8492 } else {
8493 if (s->condexec_mask)
8494 tcg_gen_add_i32(tmp, tmp, tmp2);
8495 else
8496 gen_helper_add_cc(tmp, tmp, tmp2);
8498 dead_tmp(tmp2);
8499 store_reg(s, rd, tmp);
8500 } else {
8501 /* shift immediate */
8502 rm = (insn >> 3) & 7;
8503 shift = (insn >> 6) & 0x1f;
8504 tmp = load_reg(s, rm);
8505 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8506 if (!s->condexec_mask)
8507 gen_logic_CC(tmp);
8508 store_reg(s, rd, tmp);
8510 break;
8511 case 2: case 3:
8512 /* arithmetic large immediate */
8513 op = (insn >> 11) & 3;
8514 rd = (insn >> 8) & 0x7;
8515 if (op == 0) { /* mov */
8516 tmp = new_tmp();
8517 tcg_gen_movi_i32(tmp, insn & 0xff);
8518 if (!s->condexec_mask)
8519 gen_logic_CC(tmp);
8520 store_reg(s, rd, tmp);
8521 } else {
8522 tmp = load_reg(s, rd);
8523 tmp2 = new_tmp();
8524 tcg_gen_movi_i32(tmp2, insn & 0xff);
8525 switch (op) {
8526 case 1: /* cmp */
8527 gen_helper_sub_cc(tmp, tmp, tmp2);
8528 dead_tmp(tmp);
8529 dead_tmp(tmp2);
8530 break;
8531 case 2: /* add */
8532 if (s->condexec_mask)
8533 tcg_gen_add_i32(tmp, tmp, tmp2);
8534 else
8535 gen_helper_add_cc(tmp, tmp, tmp2);
8536 dead_tmp(tmp2);
8537 store_reg(s, rd, tmp);
8538 break;
8539 case 3: /* sub */
8540 if (s->condexec_mask)
8541 tcg_gen_sub_i32(tmp, tmp, tmp2);
8542 else
8543 gen_helper_sub_cc(tmp, tmp, tmp2);
8544 dead_tmp(tmp2);
8545 store_reg(s, rd, tmp);
8546 break;
8549 break;
8550 case 4:
8551 if (insn & (1 << 11)) {
8552 rd = (insn >> 8) & 7;
8553 /* load pc-relative. Bit 1 of PC is ignored. */
8554 val = s->pc + 2 + ((insn & 0xff) * 4);
8555 val &= ~(uint32_t)2;
8556 addr = new_tmp();
8557 tcg_gen_movi_i32(addr, val);
8558 tmp = gen_ld32(addr, IS_USER(s));
8559 dead_tmp(addr);
8560 store_reg(s, rd, tmp);
8561 break;
8563 if (insn & (1 << 10)) {
8564 /* data processing extended or blx */
8565 rd = (insn & 7) | ((insn >> 4) & 8);
8566 rm = (insn >> 3) & 0xf;
8567 op = (insn >> 8) & 3;
8568 switch (op) {
8569 case 0: /* add */
8570 tmp = load_reg(s, rd);
8571 tmp2 = load_reg(s, rm);
8572 tcg_gen_add_i32(tmp, tmp, tmp2);
8573 dead_tmp(tmp2);
8574 store_reg(s, rd, tmp);
8575 break;
8576 case 1: /* cmp */
8577 tmp = load_reg(s, rd);
8578 tmp2 = load_reg(s, rm);
8579 gen_helper_sub_cc(tmp, tmp, tmp2);
8580 dead_tmp(tmp2);
8581 dead_tmp(tmp);
8582 break;
8583 case 2: /* mov/cpy */
8584 tmp = load_reg(s, rm);
8585 store_reg(s, rd, tmp);
8586 break;
8587 case 3:/* branch [and link] exchange thumb register */
8588 tmp = load_reg(s, rm);
8589 if (insn & (1 << 7)) {
8590 val = (uint32_t)s->pc | 1;
8591 tmp2 = new_tmp();
8592 tcg_gen_movi_i32(tmp2, val);
8593 store_reg(s, 14, tmp2);
8595 gen_bx(s, tmp);
8596 break;
8598 break;
8601 /* data processing register */
8602 rd = insn & 7;
8603 rm = (insn >> 3) & 7;
8604 op = (insn >> 6) & 0xf;
8605 if (op == 2 || op == 3 || op == 4 || op == 7) {
8606 /* the shift/rotate ops want the operands backwards */
8607 val = rm;
8608 rm = rd;
8609 rd = val;
8610 val = 1;
8611 } else {
8612 val = 0;
8615 if (op == 9) { /* neg */
8616 tmp = new_tmp();
8617 tcg_gen_movi_i32(tmp, 0);
8618 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8619 tmp = load_reg(s, rd);
8620 } else {
8621 TCGV_UNUSED(tmp);
8624 tmp2 = load_reg(s, rm);
8625 switch (op) {
8626 case 0x0: /* and */
8627 tcg_gen_and_i32(tmp, tmp, tmp2);
8628 if (!s->condexec_mask)
8629 gen_logic_CC(tmp);
8630 break;
8631 case 0x1: /* eor */
8632 tcg_gen_xor_i32(tmp, tmp, tmp2);
8633 if (!s->condexec_mask)
8634 gen_logic_CC(tmp);
8635 break;
8636 case 0x2: /* lsl */
8637 if (s->condexec_mask) {
8638 gen_helper_shl(tmp2, tmp2, tmp);
8639 } else {
8640 gen_helper_shl_cc(tmp2, tmp2, tmp);
8641 gen_logic_CC(tmp2);
8643 break;
8644 case 0x3: /* lsr */
8645 if (s->condexec_mask) {
8646 gen_helper_shr(tmp2, tmp2, tmp);
8647 } else {
8648 gen_helper_shr_cc(tmp2, tmp2, tmp);
8649 gen_logic_CC(tmp2);
8651 break;
8652 case 0x4: /* asr */
8653 if (s->condexec_mask) {
8654 gen_helper_sar(tmp2, tmp2, tmp);
8655 } else {
8656 gen_helper_sar_cc(tmp2, tmp2, tmp);
8657 gen_logic_CC(tmp2);
8659 break;
8660 case 0x5: /* adc */
8661 if (s->condexec_mask)
8662 gen_adc(tmp, tmp2);
8663 else
8664 gen_helper_adc_cc(tmp, tmp, tmp2);
8665 break;
8666 case 0x6: /* sbc */
8667 if (s->condexec_mask)
8668 gen_sub_carry(tmp, tmp, tmp2);
8669 else
8670 gen_helper_sbc_cc(tmp, tmp, tmp2);
8671 break;
8672 case 0x7: /* ror */
8673 if (s->condexec_mask) {
8674 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8675 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8676 } else {
8677 gen_helper_ror_cc(tmp2, tmp2, tmp);
8678 gen_logic_CC(tmp2);
8680 break;
8681 case 0x8: /* tst */
8682 tcg_gen_and_i32(tmp, tmp, tmp2);
8683 gen_logic_CC(tmp);
8684 rd = 16;
8685 break;
8686 case 0x9: /* neg */
8687 if (s->condexec_mask)
8688 tcg_gen_neg_i32(tmp, tmp2);
8689 else
8690 gen_helper_sub_cc(tmp, tmp, tmp2);
8691 break;
8692 case 0xa: /* cmp */
8693 gen_helper_sub_cc(tmp, tmp, tmp2);
8694 rd = 16;
8695 break;
8696 case 0xb: /* cmn */
8697 gen_helper_add_cc(tmp, tmp, tmp2);
8698 rd = 16;
8699 break;
8700 case 0xc: /* orr */
8701 tcg_gen_or_i32(tmp, tmp, tmp2);
8702 if (!s->condexec_mask)
8703 gen_logic_CC(tmp);
8704 break;
8705 case 0xd: /* mul */
8706 tcg_gen_mul_i32(tmp, tmp, tmp2);
8707 if (!s->condexec_mask)
8708 gen_logic_CC(tmp);
8709 break;
8710 case 0xe: /* bic */
8711 tcg_gen_andc_i32(tmp, tmp, tmp2);
8712 if (!s->condexec_mask)
8713 gen_logic_CC(tmp);
8714 break;
8715 case 0xf: /* mvn */
8716 tcg_gen_not_i32(tmp2, tmp2);
8717 if (!s->condexec_mask)
8718 gen_logic_CC(tmp2);
8719 val = 1;
8720 rm = rd;
8721 break;
8723 if (rd != 16) {
8724 if (val) {
8725 store_reg(s, rm, tmp2);
8726 if (op != 0xf)
8727 dead_tmp(tmp);
8728 } else {
8729 store_reg(s, rd, tmp);
8730 dead_tmp(tmp2);
8732 } else {
8733 dead_tmp(tmp);
8734 dead_tmp(tmp2);
8736 break;
8738 case 5:
8739 /* load/store register offset. */
8740 rd = insn & 7;
8741 rn = (insn >> 3) & 7;
8742 rm = (insn >> 6) & 7;
8743 op = (insn >> 9) & 7;
8744 addr = load_reg(s, rn);
8745 tmp = load_reg(s, rm);
8746 tcg_gen_add_i32(addr, addr, tmp);
8747 dead_tmp(tmp);
8749 if (op < 3) /* store */
8750 tmp = load_reg(s, rd);
8752 switch (op) {
8753 case 0: /* str */
8754 gen_st32(tmp, addr, IS_USER(s));
8755 break;
8756 case 1: /* strh */
8757 gen_st16(tmp, addr, IS_USER(s));
8758 break;
8759 case 2: /* strb */
8760 gen_st8(tmp, addr, IS_USER(s));
8761 break;
8762 case 3: /* ldrsb */
8763 tmp = gen_ld8s(addr, IS_USER(s));
8764 break;
8765 case 4: /* ldr */
8766 tmp = gen_ld32(addr, IS_USER(s));
8767 break;
8768 case 5: /* ldrh */
8769 tmp = gen_ld16u(addr, IS_USER(s));
8770 break;
8771 case 6: /* ldrb */
8772 tmp = gen_ld8u(addr, IS_USER(s));
8773 break;
8774 case 7: /* ldrsh */
8775 tmp = gen_ld16s(addr, IS_USER(s));
8776 break;
8778 if (op >= 3) /* load */
8779 store_reg(s, rd, tmp);
8780 dead_tmp(addr);
8781 break;
8783 case 6:
8784 /* load/store word immediate offset */
8785 rd = insn & 7;
8786 rn = (insn >> 3) & 7;
8787 addr = load_reg(s, rn);
8788 val = (insn >> 4) & 0x7c;
8789 tcg_gen_addi_i32(addr, addr, val);
8791 if (insn & (1 << 11)) {
8792 /* load */
8793 tmp = gen_ld32(addr, IS_USER(s));
8794 store_reg(s, rd, tmp);
8795 } else {
8796 /* store */
8797 tmp = load_reg(s, rd);
8798 gen_st32(tmp, addr, IS_USER(s));
8800 dead_tmp(addr);
8801 break;
8803 case 7:
8804 /* load/store byte immediate offset */
8805 rd = insn & 7;
8806 rn = (insn >> 3) & 7;
8807 addr = load_reg(s, rn);
8808 val = (insn >> 6) & 0x1f;
8809 tcg_gen_addi_i32(addr, addr, val);
8811 if (insn & (1 << 11)) {
8812 /* load */
8813 tmp = gen_ld8u(addr, IS_USER(s));
8814 store_reg(s, rd, tmp);
8815 } else {
8816 /* store */
8817 tmp = load_reg(s, rd);
8818 gen_st8(tmp, addr, IS_USER(s));
8820 dead_tmp(addr);
8821 break;
8823 case 8:
8824 /* load/store halfword immediate offset */
8825 rd = insn & 7;
8826 rn = (insn >> 3) & 7;
8827 addr = load_reg(s, rn);
8828 val = (insn >> 5) & 0x3e;
8829 tcg_gen_addi_i32(addr, addr, val);
8831 if (insn & (1 << 11)) {
8832 /* load */
8833 tmp = gen_ld16u(addr, IS_USER(s));
8834 store_reg(s, rd, tmp);
8835 } else {
8836 /* store */
8837 tmp = load_reg(s, rd);
8838 gen_st16(tmp, addr, IS_USER(s));
8840 dead_tmp(addr);
8841 break;
8843 case 9:
8844 /* load/store from stack */
8845 rd = (insn >> 8) & 7;
8846 addr = load_reg(s, 13);
8847 val = (insn & 0xff) * 4;
8848 tcg_gen_addi_i32(addr, addr, val);
8850 if (insn & (1 << 11)) {
8851 /* load */
8852 tmp = gen_ld32(addr, IS_USER(s));
8853 store_reg(s, rd, tmp);
8854 } else {
8855 /* store */
8856 tmp = load_reg(s, rd);
8857 gen_st32(tmp, addr, IS_USER(s));
8859 dead_tmp(addr);
8860 break;
8862 case 10:
8863 /* add to high reg */
8864 rd = (insn >> 8) & 7;
8865 if (insn & (1 << 11)) {
8866 /* SP */
8867 tmp = load_reg(s, 13);
8868 } else {
8869 /* PC. bit 1 is ignored. */
8870 tmp = new_tmp();
8871 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8873 val = (insn & 0xff) * 4;
8874 tcg_gen_addi_i32(tmp, tmp, val);
8875 store_reg(s, rd, tmp);
8876 break;
8878 case 11:
8879 /* misc */
8880 op = (insn >> 8) & 0xf;
8881 switch (op) {
8882 case 0:
8883 /* adjust stack pointer */
8884 tmp = load_reg(s, 13);
8885 val = (insn & 0x7f) * 4;
8886 if (insn & (1 << 7))
8887 val = -(int32_t)val;
8888 tcg_gen_addi_i32(tmp, tmp, val);
8889 store_reg(s, 13, tmp);
8890 break;
8892 case 2: /* sign/zero extend. */
8893 ARCH(6);
8894 rd = insn & 7;
8895 rm = (insn >> 3) & 7;
8896 tmp = load_reg(s, rm);
8897 switch ((insn >> 6) & 3) {
8898 case 0: gen_sxth(tmp); break;
8899 case 1: gen_sxtb(tmp); break;
8900 case 2: gen_uxth(tmp); break;
8901 case 3: gen_uxtb(tmp); break;
8903 store_reg(s, rd, tmp);
8904 break;
8905 case 4: case 5: case 0xc: case 0xd:
8906 /* push/pop */
8907 addr = load_reg(s, 13);
8908 if (insn & (1 << 8))
8909 offset = 4;
8910 else
8911 offset = 0;
8912 for (i = 0; i < 8; i++) {
8913 if (insn & (1 << i))
8914 offset += 4;
8916 if ((insn & (1 << 11)) == 0) {
8917 tcg_gen_addi_i32(addr, addr, -offset);
8919 for (i = 0; i < 8; i++) {
8920 if (insn & (1 << i)) {
8921 if (insn & (1 << 11)) {
8922 /* pop */
8923 tmp = gen_ld32(addr, IS_USER(s));
8924 store_reg(s, i, tmp);
8925 } else {
8926 /* push */
8927 tmp = load_reg(s, i);
8928 gen_st32(tmp, addr, IS_USER(s));
8930 /* advance to the next address. */
8931 tcg_gen_addi_i32(addr, addr, 4);
8934 TCGV_UNUSED(tmp);
8935 if (insn & (1 << 8)) {
8936 if (insn & (1 << 11)) {
8937 /* pop pc */
8938 tmp = gen_ld32(addr, IS_USER(s));
8939 /* don't set the pc until the rest of the instruction
8940 has completed */
8941 } else {
8942 /* push lr */
8943 tmp = load_reg(s, 14);
8944 gen_st32(tmp, addr, IS_USER(s));
8946 tcg_gen_addi_i32(addr, addr, 4);
8948 if ((insn & (1 << 11)) == 0) {
8949 tcg_gen_addi_i32(addr, addr, -offset);
8951 /* write back the new stack pointer */
8952 store_reg(s, 13, addr);
8953 /* set the new PC value */
8954 if ((insn & 0x0900) == 0x0900)
8955 gen_bx(s, tmp);
8956 break;
8958 case 1: case 3: case 9: case 11: /* czb */
8959 rm = insn & 7;
8960 tmp = load_reg(s, rm);
8961 s->condlabel = gen_new_label();
8962 s->condjmp = 1;
8963 if (insn & (1 << 11))
8964 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8965 else
8966 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8967 dead_tmp(tmp);
8968 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8969 val = (uint32_t)s->pc + 2;
8970 val += offset;
8971 gen_jmp(s, val);
8972 break;
8974 case 15: /* IT, nop-hint. */
8975 if ((insn & 0xf) == 0) {
8976 gen_nop_hint(s, (insn >> 4) & 0xf);
8977 break;
8979 /* If Then. */
8980 s->condexec_cond = (insn >> 4) & 0xe;
8981 s->condexec_mask = insn & 0x1f;
8982 /* No actual code generated for this insn, just setup state. */
8983 break;
8985 case 0xe: /* bkpt */
8986 gen_exception_insn(s, 2, EXCP_BKPT);
8987 break;
8989 case 0xa: /* rev */
8990 ARCH(6);
8991 rn = (insn >> 3) & 0x7;
8992 rd = insn & 0x7;
8993 tmp = load_reg(s, rn);
8994 switch ((insn >> 6) & 3) {
8995 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8996 case 1: gen_rev16(tmp); break;
8997 case 3: gen_revsh(tmp); break;
8998 default: goto illegal_op;
9000 store_reg(s, rd, tmp);
9001 break;
9003 case 6: /* cps */
9004 ARCH(6);
9005 if (IS_USER(s))
9006 break;
9007 if (IS_M(env)) {
9008 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9009 /* PRIMASK */
9010 if (insn & 1) {
9011 addr = tcg_const_i32(16);
9012 gen_helper_v7m_msr(cpu_env, addr, tmp);
9013 tcg_temp_free_i32(addr);
9015 /* FAULTMASK */
9016 if (insn & 2) {
9017 addr = tcg_const_i32(17);
9018 gen_helper_v7m_msr(cpu_env, addr, tmp);
9019 tcg_temp_free_i32(addr);
9021 tcg_temp_free_i32(tmp);
9022 gen_lookup_tb(s);
9023 } else {
9024 if (insn & (1 << 4))
9025 shift = CPSR_A | CPSR_I | CPSR_F;
9026 else
9027 shift = 0;
9028 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9030 break;
9032 default:
9033 goto undef;
9035 break;
9037 case 12:
9038 /* load/store multiple */
9039 rn = (insn >> 8) & 0x7;
9040 addr = load_reg(s, rn);
9041 for (i = 0; i < 8; i++) {
9042 if (insn & (1 << i)) {
9043 if (insn & (1 << 11)) {
9044 /* load */
9045 tmp = gen_ld32(addr, IS_USER(s));
9046 store_reg(s, i, tmp);
9047 } else {
9048 /* store */
9049 tmp = load_reg(s, i);
9050 gen_st32(tmp, addr, IS_USER(s));
9052 /* advance to the next address */
9053 tcg_gen_addi_i32(addr, addr, 4);
9056 /* Base register writeback. */
9057 if ((insn & (1 << rn)) == 0) {
9058 store_reg(s, rn, addr);
9059 } else {
9060 dead_tmp(addr);
9062 break;
9064 case 13:
9065 /* conditional branch or swi */
9066 cond = (insn >> 8) & 0xf;
9067 if (cond == 0xe)
9068 goto undef;
9070 if (cond == 0xf) {
9071 /* swi */
9072 gen_set_pc_im(s->pc);
9073 s->is_jmp = DISAS_SWI;
9074 break;
9076 /* generate a conditional jump to next instruction */
9077 s->condlabel = gen_new_label();
9078 gen_test_cc(cond ^ 1, s->condlabel);
9079 s->condjmp = 1;
9081 /* jump to the offset */
9082 val = (uint32_t)s->pc + 2;
9083 offset = ((int32_t)insn << 24) >> 24;
9084 val += offset << 1;
9085 gen_jmp(s, val);
9086 break;
9088 case 14:
9089 if (insn & (1 << 11)) {
9090 if (disas_thumb2_insn(env, s, insn))
9091 goto undef32;
9092 break;
9094 /* unconditional branch */
9095 val = (uint32_t)s->pc;
9096 offset = ((int32_t)insn << 21) >> 21;
9097 val += (offset << 1) + 2;
9098 gen_jmp(s, val);
9099 break;
9101 case 15:
9102 if (disas_thumb2_insn(env, s, insn))
9103 goto undef32;
9104 break;
9106 return;
9107 undef32:
9108 gen_exception_insn(s, 4, EXCP_UDEF);
9109 return;
9110 illegal_op:
9111 undef:
9112 gen_exception_insn(s, 2, EXCP_UDEF);
9115 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9116 basic block 'tb'. If search_pc is TRUE, also generate PC
9117 information for each intermediate instruction. */
9118 static inline void gen_intermediate_code_internal(CPUState *env,
9119 TranslationBlock *tb,
9120 int search_pc)
9122 DisasContext dc1, *dc = &dc1;
9123 CPUBreakpoint *bp;
9124 uint16_t *gen_opc_end;
9125 int j, lj;
9126 target_ulong pc_start;
9127 uint32_t next_page_start;
9128 int num_insns;
9129 int max_insns;
9131 /* generate intermediate code */
9132 num_temps = 0;
9134 pc_start = tb->pc;
9136 dc->tb = tb;
9138 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9140 dc->is_jmp = DISAS_NEXT;
9141 dc->pc = pc_start;
9142 dc->singlestep_enabled = env->singlestep_enabled;
9143 dc->condjmp = 0;
9144 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
9145 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9146 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
9147 #if !defined(CONFIG_USER_ONLY)
9148 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
9149 #endif
9150 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
9151 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9152 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
9153 cpu_F0s = tcg_temp_new_i32();
9154 cpu_F1s = tcg_temp_new_i32();
9155 cpu_F0d = tcg_temp_new_i64();
9156 cpu_F1d = tcg_temp_new_i64();
9157 cpu_V0 = cpu_F0d;
9158 cpu_V1 = cpu_F1d;
9159 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9160 cpu_M0 = tcg_temp_new_i64();
9161 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9162 lj = -1;
9163 num_insns = 0;
9164 max_insns = tb->cflags & CF_COUNT_MASK;
9165 if (max_insns == 0)
9166 max_insns = CF_COUNT_MASK;
9168 gen_icount_start();
9170 /* A note on handling of the condexec (IT) bits:
9172 * We want to avoid the overhead of having to write the updated condexec
9173 * bits back to the CPUState for every instruction in an IT block. So:
9174 * (1) if the condexec bits are not already zero then we write
9175 * zero back into the CPUState now. This avoids complications trying
9176 * to do it at the end of the block. (For example if we don't do this
9177 * it's hard to identify whether we can safely skip writing condexec
9178 * at the end of the TB, which we definitely want to do for the case
9179 * where a TB doesn't do anything with the IT state at all.)
9180 * (2) if we are going to leave the TB then we call gen_set_condexec()
9181 * which will write the correct value into CPUState if zero is wrong.
9182 * This is done both for leaving the TB at the end, and for leaving
9183 * it because of an exception we know will happen, which is done in
9184 * gen_exception_insn(). The latter is necessary because we need to
9185 * leave the TB with the PC/IT state just prior to execution of the
9186 * instruction which caused the exception.
9187 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
9188 * then the CPUState will be wrong and we need to reset it.
9189 * This is handled in the same way as restoration of the
9190 * PC in these situations: we will be called again with search_pc=1
9191 * and generate a mapping of the condexec bits for each PC in
9192 * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore
9193 * the condexec bits.
9195 * Note that there are no instructions which can read the condexec
9196 * bits, and none which can write non-static values to them, so
9197 * we don't need to care about whether CPUState is correct in the
9198 * middle of a TB.
9201 /* Reset the conditional execution bits immediately. This avoids
9202 complications trying to do it at the end of the block. */
9203 if (dc->condexec_mask || dc->condexec_cond)
9205 TCGv tmp = new_tmp();
9206 tcg_gen_movi_i32(tmp, 0);
9207 store_cpu_field(tmp, condexec_bits);
9209 do {
9210 #ifdef CONFIG_USER_ONLY
9211 /* Intercept jump to the magic kernel page. */
9212 if (dc->pc >= 0xffff0000) {
9213 /* We always get here via a jump, so know we are not in a
9214 conditional execution block. */
9215 gen_exception(EXCP_KERNEL_TRAP);
9216 dc->is_jmp = DISAS_UPDATE;
9217 break;
9219 #else
9220 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9221 /* We always get here via a jump, so know we are not in a
9222 conditional execution block. */
9223 gen_exception(EXCP_EXCEPTION_EXIT);
9224 dc->is_jmp = DISAS_UPDATE;
9225 break;
9227 #endif
9229 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9230 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9231 if (bp->pc == dc->pc) {
9232 gen_exception_insn(dc, 0, EXCP_DEBUG);
9233 /* Advance PC so that clearing the breakpoint will
9234 invalidate this TB. */
9235 dc->pc += 2;
9236 goto done_generating;
9237 break;
9241 if (search_pc) {
9242 j = gen_opc_ptr - gen_opc_buf;
9243 if (lj < j) {
9244 lj++;
9245 while (lj < j)
9246 gen_opc_instr_start[lj++] = 0;
9248 gen_opc_pc[lj] = dc->pc;
9249 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
9250 gen_opc_instr_start[lj] = 1;
9251 gen_opc_icount[lj] = num_insns;
9254 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9255 gen_io_start();
9257 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
9258 tcg_gen_debug_insn_start(dc->pc);
9261 if (dc->thumb) {
9262 disas_thumb_insn(env, dc);
9263 if (dc->condexec_mask) {
9264 dc->condexec_cond = (dc->condexec_cond & 0xe)
9265 | ((dc->condexec_mask >> 4) & 1);
9266 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9267 if (dc->condexec_mask == 0) {
9268 dc->condexec_cond = 0;
9271 } else {
9272 disas_arm_insn(env, dc);
9274 if (num_temps) {
9275 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9276 num_temps = 0;
9279 if (dc->condjmp && !dc->is_jmp) {
9280 gen_set_label(dc->condlabel);
9281 dc->condjmp = 0;
9283 /* Translation stops when a conditional branch is encountered.
9284 * Otherwise the subsequent code could get translated several times.
9285 * Also stop translation when a page boundary is reached. This
9286 * ensures prefetch aborts occur at the right place. */
9287 num_insns ++;
9288 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9289 !env->singlestep_enabled &&
9290 !singlestep &&
9291 dc->pc < next_page_start &&
9292 num_insns < max_insns);
9294 if (tb->cflags & CF_LAST_IO) {
9295 if (dc->condjmp) {
9296 /* FIXME: This can theoretically happen with self-modifying
9297 code. */
9298 cpu_abort(env, "IO on conditional branch instruction");
9300 gen_io_end();
9303 /* At this stage dc->condjmp will only be set when the skipped
9304 instruction was a conditional branch or trap, and the PC has
9305 already been written. */
9306 if (unlikely(env->singlestep_enabled)) {
9307 /* Make sure the pc is updated, and raise a debug exception. */
9308 if (dc->condjmp) {
9309 gen_set_condexec(dc);
9310 if (dc->is_jmp == DISAS_SWI) {
9311 gen_exception(EXCP_SWI);
9312 } else {
9313 gen_exception(EXCP_DEBUG);
9315 gen_set_label(dc->condlabel);
9317 if (dc->condjmp || !dc->is_jmp) {
9318 gen_set_pc_im(dc->pc);
9319 dc->condjmp = 0;
9321 gen_set_condexec(dc);
9322 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9323 gen_exception(EXCP_SWI);
9324 } else {
9325 /* FIXME: Single stepping a WFI insn will not halt
9326 the CPU. */
9327 gen_exception(EXCP_DEBUG);
9329 } else {
9330 /* While branches must always occur at the end of an IT block,
9331 there are a few other things that can cause us to terminate
9332 the TB in the middel of an IT block:
9333 - Exception generating instructions (bkpt, swi, undefined).
9334 - Page boundaries.
9335 - Hardware watchpoints.
9336 Hardware breakpoints have already been handled and skip this code.
9338 gen_set_condexec(dc);
9339 switch(dc->is_jmp) {
9340 case DISAS_NEXT:
9341 gen_goto_tb(dc, 1, dc->pc);
9342 break;
9343 default:
9344 case DISAS_JUMP:
9345 case DISAS_UPDATE:
9346 /* indicate that the hash table must be used to find the next TB */
9347 tcg_gen_exit_tb(0);
9348 break;
9349 case DISAS_TB_JUMP:
9350 /* nothing more to generate */
9351 break;
9352 case DISAS_WFI:
9353 gen_helper_wfi();
9354 break;
9355 case DISAS_SWI:
9356 gen_exception(EXCP_SWI);
9357 break;
9359 if (dc->condjmp) {
9360 gen_set_label(dc->condlabel);
9361 gen_set_condexec(dc);
9362 gen_goto_tb(dc, 1, dc->pc);
9363 dc->condjmp = 0;
9367 done_generating:
9368 gen_icount_end(tb, num_insns);
9369 *gen_opc_ptr = INDEX_op_end;
9371 #ifdef DEBUG_DISAS
9372 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9373 qemu_log("----------------\n");
9374 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9375 log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
9376 qemu_log("\n");
9378 #endif
9379 if (search_pc) {
9380 j = gen_opc_ptr - gen_opc_buf;
9381 lj++;
9382 while (lj <= j)
9383 gen_opc_instr_start[lj++] = 0;
9384 } else {
9385 tb->size = dc->pc - pc_start;
9386 tb->icount = num_insns;
9390 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9392 gen_intermediate_code_internal(env, tb, 0);
9395 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9397 gen_intermediate_code_internal(env, tb, 1);
9400 static const char *cpu_mode_names[16] = {
9401 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9402 "???", "???", "???", "und", "???", "???", "???", "sys"
9405 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9406 int flags)
9408 int i;
9409 #if 0
9410 union {
9411 uint32_t i;
9412 float s;
9413 } s0, s1;
9414 CPU_DoubleU d;
9415 /* ??? This assumes float64 and double have the same layout.
9416 Oh well, it's only debug dumps. */
9417 union {
9418 float64 f64;
9419 double d;
9420 } d0;
9421 #endif
9422 uint32_t psr;
9424 for(i=0;i<16;i++) {
9425 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9426 if ((i % 4) == 3)
9427 cpu_fprintf(f, "\n");
9428 else
9429 cpu_fprintf(f, " ");
9431 psr = cpsr_read(env);
9432 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9433 psr,
9434 psr & (1 << 31) ? 'N' : '-',
9435 psr & (1 << 30) ? 'Z' : '-',
9436 psr & (1 << 29) ? 'C' : '-',
9437 psr & (1 << 28) ? 'V' : '-',
9438 psr & CPSR_T ? 'T' : 'A',
9439 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9441 #if 0
9442 for (i = 0; i < 16; i++) {
9443 d.d = env->vfp.regs[i];
9444 s0.i = d.l.lower;
9445 s1.i = d.l.upper;
9446 d0.f64 = d.d;
9447 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9448 i * 2, (int)s0.i, s0.s,
9449 i * 2 + 1, (int)s1.i, s1.s,
9450 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9451 d0.d);
9453 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9454 #endif
9457 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9458 unsigned long searched_pc, int pc_pos, void *puc)
9460 env->regs[15] = gen_opc_pc[pc_pos];
9461 env->condexec_bits = gen_opc_condexec_bits[pc_pos];