4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 #define ENABLE_ARCH_5J 0
34 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
35 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
36 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
37 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
39 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
41 /* internal defines */
42 typedef struct DisasContext
{
45 /* Nonzero if this instruction has been conditionally skipped. */
47 /* The label that will be jumped to when the instruction is skipped. */
49 /* Thumb-2 condtional execution bits. */
52 struct TranslationBlock
*tb
;
53 int singlestep_enabled
;
56 #if !defined(CONFIG_USER_ONLY)
61 #if defined(CONFIG_USER_ONLY)
64 #define IS_USER(s) (s->user)
67 /* These instructions trap after executing, so defer them until after the
68 conditional executions state has been updated. */
72 /* XXX: move that elsewhere */
76 #define PAS_OP(pfx) { \
77 gen_op_ ## pfx ## add16_T0_T1, \
78 gen_op_ ## pfx ## addsubx_T0_T1, \
79 gen_op_ ## pfx ## subaddx_T0_T1, \
80 gen_op_ ## pfx ## sub16_T0_T1, \
81 gen_op_ ## pfx ## add8_T0_T1, \
84 gen_op_ ## pfx ## sub8_T0_T1 }
86 static GenOpFunc
*gen_arm_parallel_addsub
[8][8] = {
98 /* For unknown reasons Arm and Thumb-2 use arbitrarily diffenet encodings. */
99 #define PAS_OP(pfx) { \
100 gen_op_ ## pfx ## add8_T0_T1, \
101 gen_op_ ## pfx ## add16_T0_T1, \
102 gen_op_ ## pfx ## addsubx_T0_T1, \
104 gen_op_ ## pfx ## sub8_T0_T1, \
105 gen_op_ ## pfx ## sub16_T0_T1, \
106 gen_op_ ## pfx ## subaddx_T0_T1, \
109 static GenOpFunc
*gen_thumb2_parallel_addsub
[8][8] = {
121 static GenOpFunc1
*gen_test_cc
[14] = {
138 const uint8_t table_logic_cc
[16] = {
157 static GenOpFunc1
*gen_shift_T1_im
[4] = {
164 static GenOpFunc
*gen_shift_T1_0
[4] = {
171 static GenOpFunc1
*gen_shift_T2_im
[4] = {
178 static GenOpFunc
*gen_shift_T2_0
[4] = {
185 static GenOpFunc1
*gen_shift_T1_im_cc
[4] = {
186 gen_op_shll_T1_im_cc
,
187 gen_op_shrl_T1_im_cc
,
188 gen_op_sarl_T1_im_cc
,
189 gen_op_rorl_T1_im_cc
,
192 static GenOpFunc
*gen_shift_T1_0_cc
[4] = {
199 static GenOpFunc
*gen_shift_T1_T0
[4] = {
206 static GenOpFunc
*gen_shift_T1_T0_cc
[4] = {
207 gen_op_shll_T1_T0_cc
,
208 gen_op_shrl_T1_T0_cc
,
209 gen_op_sarl_T1_T0_cc
,
210 gen_op_rorl_T1_T0_cc
,
213 static GenOpFunc
*gen_op_movl_TN_reg
[3][16] = {
270 static GenOpFunc
*gen_op_movl_reg_TN
[2][16] = {
309 static GenOpFunc1
*gen_op_movl_TN_im
[3] = {
315 static GenOpFunc1
*gen_shift_T0_im_thumb_cc
[3] = {
316 gen_op_shll_T0_im_thumb_cc
,
317 gen_op_shrl_T0_im_thumb_cc
,
318 gen_op_sarl_T0_im_thumb_cc
,
321 static GenOpFunc1
*gen_shift_T0_im_thumb
[3] = {
322 gen_op_shll_T0_im_thumb
,
323 gen_op_shrl_T0_im_thumb
,
324 gen_op_sarl_T0_im_thumb
,
327 static inline void gen_bx(DisasContext
*s
)
329 s
->is_jmp
= DISAS_UPDATE
;
334 #if defined(CONFIG_USER_ONLY)
335 #define gen_ldst(name, s) gen_op_##name##_raw()
337 #define gen_ldst(name, s) do { \
340 gen_op_##name##_user(); \
342 gen_op_##name##_kernel(); \
346 static inline void gen_movl_TN_reg(DisasContext
*s
, int reg
, int t
)
351 /* normaly, since we updated PC, we need only to add one insn */
353 val
= (long)s
->pc
+ 2;
355 val
= (long)s
->pc
+ 4;
356 gen_op_movl_TN_im
[t
](val
);
358 gen_op_movl_TN_reg
[t
][reg
]();
362 static inline void gen_movl_T0_reg(DisasContext
*s
, int reg
)
364 gen_movl_TN_reg(s
, reg
, 0);
367 static inline void gen_movl_T1_reg(DisasContext
*s
, int reg
)
369 gen_movl_TN_reg(s
, reg
, 1);
372 static inline void gen_movl_T2_reg(DisasContext
*s
, int reg
)
374 gen_movl_TN_reg(s
, reg
, 2);
377 static inline void gen_movl_reg_TN(DisasContext
*s
, int reg
, int t
)
379 gen_op_movl_reg_TN
[t
][reg
]();
381 s
->is_jmp
= DISAS_JUMP
;
385 static inline void gen_movl_reg_T0(DisasContext
*s
, int reg
)
387 gen_movl_reg_TN(s
, reg
, 0);
390 static inline void gen_movl_reg_T1(DisasContext
*s
, int reg
)
392 gen_movl_reg_TN(s
, reg
, 1);
395 /* Force a TB lookup after an instruction that changes the CPU state. */
396 static inline void gen_lookup_tb(DisasContext
*s
)
398 gen_op_movl_T0_im(s
->pc
);
399 gen_movl_reg_T0(s
, 15);
400 s
->is_jmp
= DISAS_UPDATE
;
403 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
)
405 int val
, rm
, shift
, shiftop
;
407 if (!(insn
& (1 << 25))) {
410 if (!(insn
& (1 << 23)))
413 gen_op_addl_T1_im(val
);
417 shift
= (insn
>> 7) & 0x1f;
418 gen_movl_T2_reg(s
, rm
);
419 shiftop
= (insn
>> 5) & 3;
421 gen_shift_T2_im
[shiftop
](shift
);
422 } else if (shiftop
!= 0) {
423 gen_shift_T2_0
[shiftop
]();
425 if (!(insn
& (1 << 23)))
432 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
437 if (insn
& (1 << 22)) {
439 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
440 if (!(insn
& (1 << 23)))
444 gen_op_addl_T1_im(val
);
448 gen_op_addl_T1_im(extra
);
450 gen_movl_T2_reg(s
, rm
);
451 if (!(insn
& (1 << 23)))
458 #define VFP_OP(name) \
459 static inline void gen_vfp_##name(int dp) \
462 gen_op_vfp_##name##d(); \
464 gen_op_vfp_##name##s(); \
467 #define VFP_OP1(name) \
468 static inline void gen_vfp_##name(int dp, int arg) \
471 gen_op_vfp_##name##d(arg); \
473 gen_op_vfp_##name##s(arg); \
503 static inline void gen_vfp_fconst(int dp
, uint32_t val
)
506 gen_op_vfp_fconstd(val
);
508 gen_op_vfp_fconsts(val
);
511 static inline void gen_vfp_ld(DisasContext
*s
, int dp
)
514 gen_ldst(vfp_ldd
, s
);
516 gen_ldst(vfp_lds
, s
);
519 static inline void gen_vfp_st(DisasContext
*s
, int dp
)
522 gen_ldst(vfp_std
, s
);
524 gen_ldst(vfp_sts
, s
);
528 vfp_reg_offset (int dp
, int reg
)
531 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
533 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
534 + offsetof(CPU_DoubleU
, l
.upper
);
536 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
537 + offsetof(CPU_DoubleU
, l
.lower
);
541 /* Return the offset of a 32-bit piece of a NEON register.
542 zero is the least significant end of the register. */
544 neon_reg_offset (int reg
, int n
)
548 return vfp_reg_offset(0, sreg
);
551 #define NEON_GET_REG(T, reg, n) gen_op_neon_getreg_##T(neon_reg_offset(reg, n))
552 #define NEON_SET_REG(T, reg, n) gen_op_neon_setreg_##T(neon_reg_offset(reg, n))
554 static inline void gen_mov_F0_vreg(int dp
, int reg
)
557 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp
, reg
));
559 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp
, reg
));
562 static inline void gen_mov_F1_vreg(int dp
, int reg
)
565 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp
, reg
));
567 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp
, reg
));
570 static inline void gen_mov_vreg_F0(int dp
, int reg
)
573 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp
, reg
));
575 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp
, reg
));
578 #define ARM_CP_RW_BIT (1 << 20)
580 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
)
585 rd
= (insn
>> 16) & 0xf;
586 gen_movl_T1_reg(s
, rd
);
588 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
589 if (insn
& (1 << 24)) {
591 if (insn
& (1 << 23))
592 gen_op_addl_T1_im(offset
);
594 gen_op_addl_T1_im(-offset
);
596 if (insn
& (1 << 21))
597 gen_movl_reg_T1(s
, rd
);
598 } else if (insn
& (1 << 21)) {
600 if (insn
& (1 << 23))
601 gen_op_movl_T0_im(offset
);
603 gen_op_movl_T0_im(- offset
);
605 gen_movl_reg_T0(s
, rd
);
606 } else if (!(insn
& (1 << 23)))
611 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
)
613 int rd
= (insn
>> 0) & 0xf;
616 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
)
619 gen_op_iwmmxt_movl_T0_wCx(rd
);
621 gen_op_iwmmxt_movl_T0_T1_wRn(rd
);
623 gen_op_movl_T1_im(mask
);
628 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
629 (ie. an undefined instruction). */
630 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
633 int rdhi
, rdlo
, rd0
, rd1
, i
;
635 if ((insn
& 0x0e000e00) == 0x0c000000) {
636 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
638 rdlo
= (insn
>> 12) & 0xf;
639 rdhi
= (insn
>> 16) & 0xf;
640 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
641 gen_op_iwmmxt_movl_T0_T1_wRn(wrd
);
642 gen_movl_reg_T0(s
, rdlo
);
643 gen_movl_reg_T1(s
, rdhi
);
645 gen_movl_T0_reg(s
, rdlo
);
646 gen_movl_T1_reg(s
, rdhi
);
647 gen_op_iwmmxt_movl_wRn_T0_T1(wrd
);
648 gen_op_iwmmxt_set_mup();
653 wrd
= (insn
>> 12) & 0xf;
654 if (gen_iwmmxt_address(s
, insn
))
656 if (insn
& ARM_CP_RW_BIT
) {
657 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
659 gen_op_iwmmxt_movl_wCx_T0(wrd
);
662 if (insn
& (1 << 22)) /* WLDRD */
663 gen_ldst(iwmmxt_ldq
, s
);
665 gen_ldst(iwmmxt_ldl
, s
);
667 if (insn
& (1 << 22)) /* WLDRH */
668 gen_ldst(iwmmxt_ldw
, s
);
670 gen_ldst(iwmmxt_ldb
, s
);
671 gen_op_iwmmxt_movq_wRn_M0(wrd
);
674 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
675 gen_op_iwmmxt_movl_T0_wCx(wrd
);
678 gen_op_iwmmxt_movq_M0_wRn(wrd
);
680 if (insn
& (1 << 22)) /* WSTRD */
681 gen_ldst(iwmmxt_stq
, s
);
683 gen_ldst(iwmmxt_stl
, s
);
685 if (insn
& (1 << 22)) /* WSTRH */
686 gen_ldst(iwmmxt_ldw
, s
);
688 gen_ldst(iwmmxt_stb
, s
);
694 if ((insn
& 0x0f000000) != 0x0e000000)
697 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
698 case 0x000: /* WOR */
699 wrd
= (insn
>> 12) & 0xf;
700 rd0
= (insn
>> 0) & 0xf;
701 rd1
= (insn
>> 16) & 0xf;
702 gen_op_iwmmxt_movq_M0_wRn(rd0
);
703 gen_op_iwmmxt_orq_M0_wRn(rd1
);
704 gen_op_iwmmxt_setpsr_nz();
705 gen_op_iwmmxt_movq_wRn_M0(wrd
);
706 gen_op_iwmmxt_set_mup();
707 gen_op_iwmmxt_set_cup();
709 case 0x011: /* TMCR */
712 rd
= (insn
>> 12) & 0xf;
713 wrd
= (insn
>> 16) & 0xf;
715 case ARM_IWMMXT_wCID
:
716 case ARM_IWMMXT_wCASF
:
718 case ARM_IWMMXT_wCon
:
719 gen_op_iwmmxt_set_cup();
721 case ARM_IWMMXT_wCSSF
:
722 gen_op_iwmmxt_movl_T0_wCx(wrd
);
723 gen_movl_T1_reg(s
, rd
);
725 gen_op_iwmmxt_movl_wCx_T0(wrd
);
727 case ARM_IWMMXT_wCGR0
:
728 case ARM_IWMMXT_wCGR1
:
729 case ARM_IWMMXT_wCGR2
:
730 case ARM_IWMMXT_wCGR3
:
731 gen_op_iwmmxt_set_cup();
732 gen_movl_reg_T0(s
, rd
);
733 gen_op_iwmmxt_movl_wCx_T0(wrd
);
739 case 0x100: /* WXOR */
740 wrd
= (insn
>> 12) & 0xf;
741 rd0
= (insn
>> 0) & 0xf;
742 rd1
= (insn
>> 16) & 0xf;
743 gen_op_iwmmxt_movq_M0_wRn(rd0
);
744 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
745 gen_op_iwmmxt_setpsr_nz();
746 gen_op_iwmmxt_movq_wRn_M0(wrd
);
747 gen_op_iwmmxt_set_mup();
748 gen_op_iwmmxt_set_cup();
750 case 0x111: /* TMRC */
753 rd
= (insn
>> 12) & 0xf;
754 wrd
= (insn
>> 16) & 0xf;
755 gen_op_iwmmxt_movl_T0_wCx(wrd
);
756 gen_movl_reg_T0(s
, rd
);
758 case 0x300: /* WANDN */
759 wrd
= (insn
>> 12) & 0xf;
760 rd0
= (insn
>> 0) & 0xf;
761 rd1
= (insn
>> 16) & 0xf;
762 gen_op_iwmmxt_movq_M0_wRn(rd0
);
763 gen_op_iwmmxt_negq_M0();
764 gen_op_iwmmxt_andq_M0_wRn(rd1
);
765 gen_op_iwmmxt_setpsr_nz();
766 gen_op_iwmmxt_movq_wRn_M0(wrd
);
767 gen_op_iwmmxt_set_mup();
768 gen_op_iwmmxt_set_cup();
770 case 0x200: /* WAND */
771 wrd
= (insn
>> 12) & 0xf;
772 rd0
= (insn
>> 0) & 0xf;
773 rd1
= (insn
>> 16) & 0xf;
774 gen_op_iwmmxt_movq_M0_wRn(rd0
);
775 gen_op_iwmmxt_andq_M0_wRn(rd1
);
776 gen_op_iwmmxt_setpsr_nz();
777 gen_op_iwmmxt_movq_wRn_M0(wrd
);
778 gen_op_iwmmxt_set_mup();
779 gen_op_iwmmxt_set_cup();
781 case 0x810: case 0xa10: /* WMADD */
782 wrd
= (insn
>> 12) & 0xf;
783 rd0
= (insn
>> 0) & 0xf;
784 rd1
= (insn
>> 16) & 0xf;
785 gen_op_iwmmxt_movq_M0_wRn(rd0
);
786 if (insn
& (1 << 21))
787 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
789 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
790 gen_op_iwmmxt_movq_wRn_M0(wrd
);
791 gen_op_iwmmxt_set_mup();
793 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
794 wrd
= (insn
>> 12) & 0xf;
795 rd0
= (insn
>> 16) & 0xf;
796 rd1
= (insn
>> 0) & 0xf;
797 gen_op_iwmmxt_movq_M0_wRn(rd0
);
798 switch ((insn
>> 22) & 3) {
800 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
803 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
806 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
811 gen_op_iwmmxt_movq_wRn_M0(wrd
);
812 gen_op_iwmmxt_set_mup();
813 gen_op_iwmmxt_set_cup();
815 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
816 wrd
= (insn
>> 12) & 0xf;
817 rd0
= (insn
>> 16) & 0xf;
818 rd1
= (insn
>> 0) & 0xf;
819 gen_op_iwmmxt_movq_M0_wRn(rd0
);
820 switch ((insn
>> 22) & 3) {
822 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
825 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
828 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
833 gen_op_iwmmxt_movq_wRn_M0(wrd
);
834 gen_op_iwmmxt_set_mup();
835 gen_op_iwmmxt_set_cup();
837 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
838 wrd
= (insn
>> 12) & 0xf;
839 rd0
= (insn
>> 16) & 0xf;
840 rd1
= (insn
>> 0) & 0xf;
841 gen_op_iwmmxt_movq_M0_wRn(rd0
);
842 if (insn
& (1 << 22))
843 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
845 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
846 if (!(insn
& (1 << 20)))
847 gen_op_iwmmxt_addl_M0_wRn(wrd
);
848 gen_op_iwmmxt_movq_wRn_M0(wrd
);
849 gen_op_iwmmxt_set_mup();
851 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
852 wrd
= (insn
>> 12) & 0xf;
853 rd0
= (insn
>> 16) & 0xf;
854 rd1
= (insn
>> 0) & 0xf;
855 gen_op_iwmmxt_movq_M0_wRn(rd0
);
856 if (insn
& (1 << 21))
857 gen_op_iwmmxt_mulsw_M0_wRn(rd1
, (insn
& (1 << 20)) ? 16 : 0);
859 gen_op_iwmmxt_muluw_M0_wRn(rd1
, (insn
& (1 << 20)) ? 16 : 0);
860 gen_op_iwmmxt_movq_wRn_M0(wrd
);
861 gen_op_iwmmxt_set_mup();
863 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
864 wrd
= (insn
>> 12) & 0xf;
865 rd0
= (insn
>> 16) & 0xf;
866 rd1
= (insn
>> 0) & 0xf;
867 gen_op_iwmmxt_movq_M0_wRn(rd0
);
868 if (insn
& (1 << 21))
869 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
871 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
872 if (!(insn
& (1 << 20))) {
873 if (insn
& (1 << 21))
874 gen_op_iwmmxt_addsq_M0_wRn(wrd
);
876 gen_op_iwmmxt_adduq_M0_wRn(wrd
);
878 gen_op_iwmmxt_movq_wRn_M0(wrd
);
879 gen_op_iwmmxt_set_mup();
881 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
882 wrd
= (insn
>> 12) & 0xf;
883 rd0
= (insn
>> 16) & 0xf;
884 rd1
= (insn
>> 0) & 0xf;
885 gen_op_iwmmxt_movq_M0_wRn(rd0
);
886 switch ((insn
>> 22) & 3) {
888 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
891 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
894 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
899 gen_op_iwmmxt_movq_wRn_M0(wrd
);
900 gen_op_iwmmxt_set_mup();
901 gen_op_iwmmxt_set_cup();
903 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
904 wrd
= (insn
>> 12) & 0xf;
905 rd0
= (insn
>> 16) & 0xf;
906 rd1
= (insn
>> 0) & 0xf;
907 gen_op_iwmmxt_movq_M0_wRn(rd0
);
908 if (insn
& (1 << 22))
909 gen_op_iwmmxt_avgw_M0_wRn(rd1
, (insn
>> 20) & 1);
911 gen_op_iwmmxt_avgb_M0_wRn(rd1
, (insn
>> 20) & 1);
912 gen_op_iwmmxt_movq_wRn_M0(wrd
);
913 gen_op_iwmmxt_set_mup();
914 gen_op_iwmmxt_set_cup();
916 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
917 wrd
= (insn
>> 12) & 0xf;
918 rd0
= (insn
>> 16) & 0xf;
919 rd1
= (insn
>> 0) & 0xf;
920 gen_op_iwmmxt_movq_M0_wRn(rd0
);
921 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
922 gen_op_movl_T1_im(7);
924 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
925 gen_op_iwmmxt_movq_wRn_M0(wrd
);
926 gen_op_iwmmxt_set_mup();
928 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
929 rd
= (insn
>> 12) & 0xf;
930 wrd
= (insn
>> 16) & 0xf;
931 gen_movl_T0_reg(s
, rd
);
932 gen_op_iwmmxt_movq_M0_wRn(wrd
);
933 switch ((insn
>> 6) & 3) {
935 gen_op_movl_T1_im(0xff);
936 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 7) << 3);
939 gen_op_movl_T1_im(0xffff);
940 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 3) << 4);
943 gen_op_movl_T1_im(0xffffffff);
944 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 1) << 5);
949 gen_op_iwmmxt_movq_wRn_M0(wrd
);
950 gen_op_iwmmxt_set_mup();
952 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
953 rd
= (insn
>> 12) & 0xf;
954 wrd
= (insn
>> 16) & 0xf;
957 gen_op_iwmmxt_movq_M0_wRn(wrd
);
958 switch ((insn
>> 22) & 3) {
961 gen_op_iwmmxt_extrsb_T0_M0((insn
& 7) << 3);
963 gen_op_movl_T1_im(0xff);
964 gen_op_iwmmxt_extru_T0_M0_T1((insn
& 7) << 3);
969 gen_op_iwmmxt_extrsw_T0_M0((insn
& 3) << 4);
971 gen_op_movl_T1_im(0xffff);
972 gen_op_iwmmxt_extru_T0_M0_T1((insn
& 3) << 4);
976 gen_op_movl_T1_im(0xffffffff);
977 gen_op_iwmmxt_extru_T0_M0_T1((insn
& 1) << 5);
982 gen_op_movl_reg_TN
[0][rd
]();
984 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
985 if ((insn
& 0x000ff008) != 0x0003f000)
987 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
988 switch ((insn
>> 22) & 3) {
990 gen_op_shrl_T1_im(((insn
& 7) << 2) + 0);
993 gen_op_shrl_T1_im(((insn
& 3) << 3) + 4);
996 gen_op_shrl_T1_im(((insn
& 1) << 4) + 12);
1001 gen_op_shll_T1_im(28);
1002 gen_op_movl_T0_T1();
1003 gen_op_movl_cpsr_T0(0xf0000000);
1005 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1006 rd
= (insn
>> 12) & 0xf;
1007 wrd
= (insn
>> 16) & 0xf;
1008 gen_movl_T0_reg(s
, rd
);
1009 switch ((insn
>> 6) & 3) {
1011 gen_op_iwmmxt_bcstb_M0_T0();
1014 gen_op_iwmmxt_bcstw_M0_T0();
1017 gen_op_iwmmxt_bcstl_M0_T0();
1022 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1023 gen_op_iwmmxt_set_mup();
1025 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1026 if ((insn
& 0x000ff00f) != 0x0003f000)
1028 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1029 switch ((insn
>> 22) & 3) {
1031 for (i
= 0; i
< 7; i
++) {
1032 gen_op_shll_T1_im(4);
1033 gen_op_andl_T0_T1();
1037 for (i
= 0; i
< 3; i
++) {
1038 gen_op_shll_T1_im(8);
1039 gen_op_andl_T0_T1();
1043 gen_op_shll_T1_im(16);
1044 gen_op_andl_T0_T1();
1049 gen_op_movl_cpsr_T0(0xf0000000);
1051 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1052 wrd
= (insn
>> 12) & 0xf;
1053 rd0
= (insn
>> 16) & 0xf;
1054 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1055 switch ((insn
>> 22) & 3) {
1057 gen_op_iwmmxt_addcb_M0();
1060 gen_op_iwmmxt_addcw_M0();
1063 gen_op_iwmmxt_addcl_M0();
1068 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1069 gen_op_iwmmxt_set_mup();
1071 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1072 if ((insn
& 0x000ff00f) != 0x0003f000)
1074 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1075 switch ((insn
>> 22) & 3) {
1077 for (i
= 0; i
< 7; i
++) {
1078 gen_op_shll_T1_im(4);
1083 for (i
= 0; i
< 3; i
++) {
1084 gen_op_shll_T1_im(8);
1089 gen_op_shll_T1_im(16);
1095 gen_op_movl_T1_im(0xf0000000);
1096 gen_op_andl_T0_T1();
1097 gen_op_movl_cpsr_T0(0xf0000000);
1099 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1100 rd
= (insn
>> 12) & 0xf;
1101 rd0
= (insn
>> 16) & 0xf;
1102 if ((insn
& 0xf) != 0)
1104 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1105 switch ((insn
>> 22) & 3) {
1107 gen_op_iwmmxt_msbb_T0_M0();
1110 gen_op_iwmmxt_msbw_T0_M0();
1113 gen_op_iwmmxt_msbl_T0_M0();
1118 gen_movl_reg_T0(s
, rd
);
1120 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1121 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1122 wrd
= (insn
>> 12) & 0xf;
1123 rd0
= (insn
>> 16) & 0xf;
1124 rd1
= (insn
>> 0) & 0xf;
1125 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1126 switch ((insn
>> 22) & 3) {
1128 if (insn
& (1 << 21))
1129 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1131 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1134 if (insn
& (1 << 21))
1135 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1137 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1140 if (insn
& (1 << 21))
1141 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1143 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1148 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1149 gen_op_iwmmxt_set_mup();
1150 gen_op_iwmmxt_set_cup();
1152 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1153 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1154 wrd
= (insn
>> 12) & 0xf;
1155 rd0
= (insn
>> 16) & 0xf;
1156 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1157 switch ((insn
>> 22) & 3) {
1159 if (insn
& (1 << 21))
1160 gen_op_iwmmxt_unpacklsb_M0();
1162 gen_op_iwmmxt_unpacklub_M0();
1165 if (insn
& (1 << 21))
1166 gen_op_iwmmxt_unpacklsw_M0();
1168 gen_op_iwmmxt_unpackluw_M0();
1171 if (insn
& (1 << 21))
1172 gen_op_iwmmxt_unpacklsl_M0();
1174 gen_op_iwmmxt_unpacklul_M0();
1179 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1180 gen_op_iwmmxt_set_mup();
1181 gen_op_iwmmxt_set_cup();
1183 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1184 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1185 wrd
= (insn
>> 12) & 0xf;
1186 rd0
= (insn
>> 16) & 0xf;
1187 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1188 switch ((insn
>> 22) & 3) {
1190 if (insn
& (1 << 21))
1191 gen_op_iwmmxt_unpackhsb_M0();
1193 gen_op_iwmmxt_unpackhub_M0();
1196 if (insn
& (1 << 21))
1197 gen_op_iwmmxt_unpackhsw_M0();
1199 gen_op_iwmmxt_unpackhuw_M0();
1202 if (insn
& (1 << 21))
1203 gen_op_iwmmxt_unpackhsl_M0();
1205 gen_op_iwmmxt_unpackhul_M0();
1210 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1211 gen_op_iwmmxt_set_mup();
1212 gen_op_iwmmxt_set_cup();
1214 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1215 case 0x214: case 0x614: case 0xa14: case 0xe14:
1216 wrd
= (insn
>> 12) & 0xf;
1217 rd0
= (insn
>> 16) & 0xf;
1218 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1219 if (gen_iwmmxt_shift(insn
, 0xff))
1221 switch ((insn
>> 22) & 3) {
1225 gen_op_iwmmxt_srlw_M0_T0();
1228 gen_op_iwmmxt_srll_M0_T0();
1231 gen_op_iwmmxt_srlq_M0_T0();
1234 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1235 gen_op_iwmmxt_set_mup();
1236 gen_op_iwmmxt_set_cup();
1238 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1239 case 0x014: case 0x414: case 0x814: case 0xc14:
1240 wrd
= (insn
>> 12) & 0xf;
1241 rd0
= (insn
>> 16) & 0xf;
1242 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1243 if (gen_iwmmxt_shift(insn
, 0xff))
1245 switch ((insn
>> 22) & 3) {
1249 gen_op_iwmmxt_sraw_M0_T0();
1252 gen_op_iwmmxt_sral_M0_T0();
1255 gen_op_iwmmxt_sraq_M0_T0();
1258 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1259 gen_op_iwmmxt_set_mup();
1260 gen_op_iwmmxt_set_cup();
1262 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
1263 case 0x114: case 0x514: case 0x914: case 0xd14:
1264 wrd
= (insn
>> 12) & 0xf;
1265 rd0
= (insn
>> 16) & 0xf;
1266 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1267 if (gen_iwmmxt_shift(insn
, 0xff))
1269 switch ((insn
>> 22) & 3) {
1273 gen_op_iwmmxt_sllw_M0_T0();
1276 gen_op_iwmmxt_slll_M0_T0();
1279 gen_op_iwmmxt_sllq_M0_T0();
1282 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1283 gen_op_iwmmxt_set_mup();
1284 gen_op_iwmmxt_set_cup();
1286 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
1287 case 0x314: case 0x714: case 0xb14: case 0xf14:
1288 wrd
= (insn
>> 12) & 0xf;
1289 rd0
= (insn
>> 16) & 0xf;
1290 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1291 switch ((insn
>> 22) & 3) {
1295 if (gen_iwmmxt_shift(insn
, 0xf))
1297 gen_op_iwmmxt_rorw_M0_T0();
1300 if (gen_iwmmxt_shift(insn
, 0x1f))
1302 gen_op_iwmmxt_rorl_M0_T0();
1305 if (gen_iwmmxt_shift(insn
, 0x3f))
1307 gen_op_iwmmxt_rorq_M0_T0();
1310 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1311 gen_op_iwmmxt_set_mup();
1312 gen_op_iwmmxt_set_cup();
1314 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
1315 case 0x916: case 0xb16: case 0xd16: case 0xf16:
1316 wrd
= (insn
>> 12) & 0xf;
1317 rd0
= (insn
>> 16) & 0xf;
1318 rd1
= (insn
>> 0) & 0xf;
1319 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1320 switch ((insn
>> 22) & 3) {
1322 if (insn
& (1 << 21))
1323 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
1325 gen_op_iwmmxt_minub_M0_wRn(rd1
);
1328 if (insn
& (1 << 21))
1329 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
1331 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
1334 if (insn
& (1 << 21))
1335 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
1337 gen_op_iwmmxt_minul_M0_wRn(rd1
);
1342 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1343 gen_op_iwmmxt_set_mup();
1345 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
1346 case 0x816: case 0xa16: case 0xc16: case 0xe16:
1347 wrd
= (insn
>> 12) & 0xf;
1348 rd0
= (insn
>> 16) & 0xf;
1349 rd1
= (insn
>> 0) & 0xf;
1350 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1351 switch ((insn
>> 22) & 3) {
1353 if (insn
& (1 << 21))
1354 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
1356 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
1359 if (insn
& (1 << 21))
1360 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
1362 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
1365 if (insn
& (1 << 21))
1366 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
1368 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
1373 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1374 gen_op_iwmmxt_set_mup();
1376 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
1377 case 0x402: case 0x502: case 0x602: case 0x702:
1378 wrd
= (insn
>> 12) & 0xf;
1379 rd0
= (insn
>> 16) & 0xf;
1380 rd1
= (insn
>> 0) & 0xf;
1381 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1382 gen_op_movl_T0_im((insn
>> 20) & 3);
1383 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
1384 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1385 gen_op_iwmmxt_set_mup();
1387 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
1388 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
1389 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
1390 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
1391 wrd
= (insn
>> 12) & 0xf;
1392 rd0
= (insn
>> 16) & 0xf;
1393 rd1
= (insn
>> 0) & 0xf;
1394 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1395 switch ((insn
>> 20) & 0xf) {
1397 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
1400 gen_op_iwmmxt_subub_M0_wRn(rd1
);
1403 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
1406 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
1409 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
1412 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
1415 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
1418 gen_op_iwmmxt_subul_M0_wRn(rd1
);
1421 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
1426 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1427 gen_op_iwmmxt_set_mup();
1428 gen_op_iwmmxt_set_cup();
1430 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
1431 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
1432 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
1433 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
1434 wrd
= (insn
>> 12) & 0xf;
1435 rd0
= (insn
>> 16) & 0xf;
1436 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1437 gen_op_movl_T0_im(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
1438 gen_op_iwmmxt_shufh_M0_T0();
1439 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1440 gen_op_iwmmxt_set_mup();
1441 gen_op_iwmmxt_set_cup();
1443 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
1444 case 0x418: case 0x518: case 0x618: case 0x718:
1445 case 0x818: case 0x918: case 0xa18: case 0xb18:
1446 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
1447 wrd
= (insn
>> 12) & 0xf;
1448 rd0
= (insn
>> 16) & 0xf;
1449 rd1
= (insn
>> 0) & 0xf;
1450 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1451 switch ((insn
>> 20) & 0xf) {
1453 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
1456 gen_op_iwmmxt_addub_M0_wRn(rd1
);
1459 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
1462 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
1465 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
1468 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
1471 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
1474 gen_op_iwmmxt_addul_M0_wRn(rd1
);
1477 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
1482 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1483 gen_op_iwmmxt_set_mup();
1484 gen_op_iwmmxt_set_cup();
1486 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
1487 case 0x408: case 0x508: case 0x608: case 0x708:
1488 case 0x808: case 0x908: case 0xa08: case 0xb08:
1489 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
1490 wrd
= (insn
>> 12) & 0xf;
1491 rd0
= (insn
>> 16) & 0xf;
1492 rd1
= (insn
>> 0) & 0xf;
1493 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1494 if (!(insn
& (1 << 20)))
1496 switch ((insn
>> 22) & 3) {
1500 if (insn
& (1 << 21))
1501 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
1503 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
1506 if (insn
& (1 << 21))
1507 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
1509 gen_op_iwmmxt_packul_M0_wRn(rd1
);
1512 if (insn
& (1 << 21))
1513 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
1515 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
1518 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1519 gen_op_iwmmxt_set_mup();
1520 gen_op_iwmmxt_set_cup();
1522 case 0x201: case 0x203: case 0x205: case 0x207:
1523 case 0x209: case 0x20b: case 0x20d: case 0x20f:
1524 case 0x211: case 0x213: case 0x215: case 0x217:
1525 case 0x219: case 0x21b: case 0x21d: case 0x21f:
1526 wrd
= (insn
>> 5) & 0xf;
1527 rd0
= (insn
>> 12) & 0xf;
1528 rd1
= (insn
>> 0) & 0xf;
1529 if (rd0
== 0xf || rd1
== 0xf)
1531 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1532 switch ((insn
>> 16) & 0xf) {
1533 case 0x0: /* TMIA */
1534 gen_op_movl_TN_reg
[0][rd0
]();
1535 gen_op_movl_TN_reg
[1][rd1
]();
1536 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1538 case 0x8: /* TMIAPH */
1539 gen_op_movl_TN_reg
[0][rd0
]();
1540 gen_op_movl_TN_reg
[1][rd1
]();
1541 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1543 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
1544 gen_op_movl_TN_reg
[1][rd0
]();
1545 if (insn
& (1 << 16))
1546 gen_op_shrl_T1_im(16);
1547 gen_op_movl_T0_T1();
1548 gen_op_movl_TN_reg
[1][rd1
]();
1549 if (insn
& (1 << 17))
1550 gen_op_shrl_T1_im(16);
1551 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1556 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1557 gen_op_iwmmxt_set_mup();
1566 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
1567 (ie. an undefined instruction). */
1568 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1570 int acc
, rd0
, rd1
, rdhi
, rdlo
;
1572 if ((insn
& 0x0ff00f10) == 0x0e200010) {
1573 /* Multiply with Internal Accumulate Format */
1574 rd0
= (insn
>> 12) & 0xf;
1576 acc
= (insn
>> 5) & 7;
1581 switch ((insn
>> 16) & 0xf) {
1583 gen_op_movl_TN_reg
[0][rd0
]();
1584 gen_op_movl_TN_reg
[1][rd1
]();
1585 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1587 case 0x8: /* MIAPH */
1588 gen_op_movl_TN_reg
[0][rd0
]();
1589 gen_op_movl_TN_reg
[1][rd1
]();
1590 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1592 case 0xc: /* MIABB */
1593 case 0xd: /* MIABT */
1594 case 0xe: /* MIATB */
1595 case 0xf: /* MIATT */
1596 gen_op_movl_TN_reg
[1][rd0
]();
1597 if (insn
& (1 << 16))
1598 gen_op_shrl_T1_im(16);
1599 gen_op_movl_T0_T1();
1600 gen_op_movl_TN_reg
[1][rd1
]();
1601 if (insn
& (1 << 17))
1602 gen_op_shrl_T1_im(16);
1603 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1609 gen_op_iwmmxt_movq_wRn_M0(acc
);
1613 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
1614 /* Internal Accumulator Access Format */
1615 rdhi
= (insn
>> 16) & 0xf;
1616 rdlo
= (insn
>> 12) & 0xf;
1622 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
1623 gen_op_iwmmxt_movl_T0_T1_wRn(acc
);
1624 gen_op_movl_reg_TN
[0][rdlo
]();
1625 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
1626 gen_op_andl_T0_T1();
1627 gen_op_movl_reg_TN
[0][rdhi
]();
1629 gen_op_movl_TN_reg
[0][rdlo
]();
1630 gen_op_movl_TN_reg
[1][rdhi
]();
1631 gen_op_iwmmxt_movl_wRn_T0_T1(acc
);
1639 /* Disassemble system coprocessor instruction. Return nonzero if
1640 instruction is not defined. */
1641 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1643 uint32_t rd
= (insn
>> 12) & 0xf;
1644 uint32_t cp
= (insn
>> 8) & 0xf;
1649 if (insn
& ARM_CP_RW_BIT
) {
1650 if (!env
->cp
[cp
].cp_read
)
1652 gen_op_movl_T0_im((uint32_t) s
->pc
);
1653 gen_op_movl_reg_TN
[0][15]();
1654 gen_op_movl_T0_cp(insn
);
1655 gen_movl_reg_T0(s
, rd
);
1657 if (!env
->cp
[cp
].cp_write
)
1659 gen_op_movl_T0_im((uint32_t) s
->pc
);
1660 gen_op_movl_reg_TN
[0][15]();
1661 gen_movl_T0_reg(s
, rd
);
1662 gen_op_movl_cp_T0(insn
);
1667 static int cp15_user_ok(uint32_t insn
)
1669 int cpn
= (insn
>> 16) & 0xf;
1670 int cpm
= insn
& 0xf;
1671 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
1673 if (cpn
== 13 && cpm
== 0) {
1675 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
1679 /* ISB, DSB, DMB. */
1680 if ((cpm
== 5 && op
== 4)
1681 || (cpm
== 10 && (op
== 4 || op
== 5)))
1687 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
1688 instruction is not defined. */
1689 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1693 /* M profile cores use memory mapped registers instead of cp15. */
1694 if (arm_feature(env
, ARM_FEATURE_M
))
1697 if ((insn
& (1 << 25)) == 0) {
1698 if (insn
& (1 << 20)) {
1702 /* mcrr. Used for block cache operations, so implement as no-op. */
1705 if ((insn
& (1 << 4)) == 0) {
1709 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
1712 if ((insn
& 0x0fff0fff) == 0x0e070f90
1713 || (insn
& 0x0fff0fff) == 0x0e070f58) {
1714 /* Wait for interrupt. */
1715 gen_op_movl_T0_im((long)s
->pc
);
1716 gen_op_movl_reg_TN
[0][15]();
1717 s
->is_jmp
= DISAS_WFI
;
1720 rd
= (insn
>> 12) & 0xf;
1721 if (insn
& ARM_CP_RW_BIT
) {
1722 gen_op_movl_T0_cp15(insn
);
1723 /* If the destination register is r15 then sets condition codes. */
1725 gen_movl_reg_T0(s
, rd
);
1727 gen_movl_T0_reg(s
, rd
);
1728 gen_op_movl_cp15_T0(insn
);
1729 /* Normally we would always end the TB here, but Linux
1730 * arch/arm/mach-pxa/sleep.S expects two instructions following
1731 * an MMU enable to execute from cache. Imitate this behaviour. */
1732 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
1733 (insn
& 0x0fff0fff) != 0x0e010f10)
1739 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
1740 #define VFP_SREG(insn, bigbit, smallbit) \
1741 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
1742 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
1743 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
1744 reg = (((insn) >> (bigbit)) & 0x0f) \
1745 | (((insn) >> ((smallbit) - 4)) & 0x10); \
1747 if (insn & (1 << (smallbit))) \
1749 reg = ((insn) >> (bigbit)) & 0x0f; \
1752 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
1753 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
1754 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
1755 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
1756 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
1757 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
1760 vfp_enabled(CPUState
* env
)
1762 return ((env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) != 0);
1765 /* Disassemble a VFP instruction. Returns nonzero if an error occured
1766 (ie. an undefined instruction). */
1767 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
1769 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
1772 if (!arm_feature(env
, ARM_FEATURE_VFP
))
1775 if (!vfp_enabled(env
)) {
1776 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
1777 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
1779 rn
= (insn
>> 16) & 0xf;
1780 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
1781 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
1784 dp
= ((insn
& 0xf00) == 0xb00);
1785 switch ((insn
>> 24) & 0xf) {
1787 if (insn
& (1 << 4)) {
1788 /* single register transfer */
1789 rd
= (insn
>> 12) & 0xf;
1794 VFP_DREG_N(rn
, insn
);
1797 if (insn
& 0x00c00060
1798 && !arm_feature(env
, ARM_FEATURE_NEON
))
1801 pass
= (insn
>> 21) & 1;
1802 if (insn
& (1 << 22)) {
1804 offset
= ((insn
>> 5) & 3) * 8;
1805 } else if (insn
& (1 << 5)) {
1807 offset
= (insn
& (1 << 6)) ? 16 : 0;
1812 if (insn
& ARM_CP_RW_BIT
) {
1816 NEON_GET_REG(T1
, rn
, pass
);
1818 gen_op_shrl_T1_im(offset
);
1819 if (insn
& (1 << 23))
1825 NEON_GET_REG(T1
, rn
, pass
);
1826 if (insn
& (1 << 23)) {
1828 gen_op_shrl_T1_im(16);
1834 gen_op_sarl_T1_im(16);
1841 NEON_GET_REG(T1
, rn
, pass
);
1844 gen_movl_reg_T1(s
, rd
);
1847 gen_movl_T0_reg(s
, rd
);
1848 if (insn
& (1 << 23)) {
1851 gen_op_neon_dup_u8(0);
1852 } else if (size
== 1) {
1853 gen_op_neon_dup_low16();
1855 NEON_SET_REG(T0
, rn
, 0);
1856 NEON_SET_REG(T0
, rn
, 1);
1861 NEON_GET_REG(T2
, rn
, pass
);
1862 gen_op_movl_T1_im(0xff);
1863 gen_op_andl_T0_T1();
1864 gen_op_neon_insert_elt(offset
, ~(0xff << offset
));
1865 NEON_SET_REG(T2
, rn
, pass
);
1868 NEON_GET_REG(T2
, rn
, pass
);
1869 gen_op_movl_T1_im(0xffff);
1870 gen_op_andl_T0_T1();
1871 bank_mask
= offset
? 0xffff : 0xffff0000;
1872 gen_op_neon_insert_elt(offset
, bank_mask
);
1873 NEON_SET_REG(T2
, rn
, pass
);
1876 NEON_SET_REG(T0
, rn
, pass
);
1882 if ((insn
& 0x6f) != 0x00)
1884 rn
= VFP_SREG_N(insn
);
1885 if (insn
& ARM_CP_RW_BIT
) {
1887 if (insn
& (1 << 21)) {
1888 /* system register */
1893 /* VFP2 allows access for FSID from userspace.
1894 VFP3 restricts all id registers to privileged
1897 && arm_feature(env
, ARM_FEATURE_VFP3
))
1899 gen_op_vfp_movl_T0_xreg(rn
);
1904 gen_op_vfp_movl_T0_xreg(rn
);
1906 case ARM_VFP_FPINST
:
1907 case ARM_VFP_FPINST2
:
1908 /* Not present in VFP3. */
1910 || arm_feature(env
, ARM_FEATURE_VFP3
))
1912 gen_op_vfp_movl_T0_xreg(rn
);
1916 gen_op_vfp_movl_T0_fpscr_flags();
1918 gen_op_vfp_movl_T0_fpscr();
1923 || !arm_feature(env
, ARM_FEATURE_VFP3
))
1925 gen_op_vfp_movl_T0_xreg(rn
);
1931 gen_mov_F0_vreg(0, rn
);
1935 /* Set the 4 flag bits in the CPSR. */
1936 gen_op_movl_cpsr_T0(0xf0000000);
1938 gen_movl_reg_T0(s
, rd
);
1941 gen_movl_T0_reg(s
, rd
);
1942 if (insn
& (1 << 21)) {
1944 /* system register */
1949 /* Writes are ignored. */
1952 gen_op_vfp_movl_fpscr_T0();
1958 gen_op_vfp_movl_xreg_T0(rn
);
1961 case ARM_VFP_FPINST
:
1962 case ARM_VFP_FPINST2
:
1963 gen_op_vfp_movl_xreg_T0(rn
);
1970 gen_mov_vreg_F0(0, rn
);
1975 /* data processing */
1976 /* The opcode is in bits 23, 21, 20 and 6. */
1977 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
1981 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
1983 /* rn is register number */
1984 VFP_DREG_N(rn
, insn
);
1987 if (op
== 15 && (rn
== 15 || rn
> 17)) {
1988 /* Integer or single precision destination. */
1989 rd
= VFP_SREG_D(insn
);
1991 VFP_DREG_D(rd
, insn
);
1994 if (op
== 15 && (rn
== 16 || rn
== 17)) {
1995 /* Integer source. */
1996 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
1998 VFP_DREG_M(rm
, insn
);
2001 rn
= VFP_SREG_N(insn
);
2002 if (op
== 15 && rn
== 15) {
2003 /* Double precision destination. */
2004 VFP_DREG_D(rd
, insn
);
2006 rd
= VFP_SREG_D(insn
);
2008 rm
= VFP_SREG_M(insn
);
2011 veclen
= env
->vfp
.vec_len
;
2012 if (op
== 15 && rn
> 3)
2015 /* Shut up compiler warnings. */
2026 /* Figure out what type of vector operation this is. */
2027 if ((rd
& bank_mask
) == 0) {
2032 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
2034 delta_d
= env
->vfp
.vec_stride
+ 1;
2036 if ((rm
& bank_mask
) == 0) {
2037 /* mixed scalar/vector */
2046 /* Load the initial operands. */
2051 /* Integer source */
2052 gen_mov_F0_vreg(0, rm
);
2057 gen_mov_F0_vreg(dp
, rd
);
2058 gen_mov_F1_vreg(dp
, rm
);
2062 /* Compare with zero */
2063 gen_mov_F0_vreg(dp
, rd
);
2070 /* Source and destination the same. */
2071 gen_mov_F0_vreg(dp
, rd
);
2074 /* One source operand. */
2075 gen_mov_F0_vreg(dp
, rm
);
2079 /* Two source operands. */
2080 gen_mov_F0_vreg(dp
, rn
);
2081 gen_mov_F1_vreg(dp
, rm
);
2085 /* Perform the calculation. */
2087 case 0: /* mac: fd + (fn * fm) */
2089 gen_mov_F1_vreg(dp
, rd
);
2092 case 1: /* nmac: fd - (fn * fm) */
2095 gen_mov_F1_vreg(dp
, rd
);
2098 case 2: /* msc: -fd + (fn * fm) */
2100 gen_mov_F1_vreg(dp
, rd
);
2103 case 3: /* nmsc: -fd - (fn * fm) */
2105 gen_mov_F1_vreg(dp
, rd
);
2109 case 4: /* mul: fn * fm */
2112 case 5: /* nmul: -(fn * fm) */
2116 case 6: /* add: fn + fm */
2119 case 7: /* sub: fn - fm */
2122 case 8: /* div: fn / fm */
2125 case 14: /* fconst */
2126 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2129 n
= (insn
<< 12) & 0x80000000;
2130 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
2144 gen_vfp_fconst(dp
, n
);
2146 case 15: /* extension space */
2169 case 11: /* cmpez */
2173 case 15: /* single<->double conversion */
2175 gen_op_vfp_fcvtsd();
2177 gen_op_vfp_fcvtds();
2179 case 16: /* fuito */
2182 case 17: /* fsito */
2185 case 20: /* fshto */
2186 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2188 gen_vfp_shto(dp
, rm
);
2190 case 21: /* fslto */
2191 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2193 gen_vfp_slto(dp
, rm
);
2195 case 22: /* fuhto */
2196 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2198 gen_vfp_uhto(dp
, rm
);
2200 case 23: /* fulto */
2201 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2203 gen_vfp_ulto(dp
, rm
);
2205 case 24: /* ftoui */
2208 case 25: /* ftouiz */
2211 case 26: /* ftosi */
2214 case 27: /* ftosiz */
2217 case 28: /* ftosh */
2218 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2220 gen_vfp_tosh(dp
, rm
);
2222 case 29: /* ftosl */
2223 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2225 gen_vfp_tosl(dp
, rm
);
2227 case 30: /* ftouh */
2228 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2230 gen_vfp_touh(dp
, rm
);
2232 case 31: /* ftoul */
2233 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2235 gen_vfp_toul(dp
, rm
);
2237 default: /* undefined */
2238 printf ("rn:%d\n", rn
);
2242 default: /* undefined */
2243 printf ("op:%d\n", op
);
2247 /* Write back the result. */
2248 if (op
== 15 && (rn
>= 8 && rn
<= 11))
2249 ; /* Comparison, do nothing. */
2250 else if (op
== 15 && rn
> 17)
2251 /* Integer result. */
2252 gen_mov_vreg_F0(0, rd
);
2253 else if (op
== 15 && rn
== 15)
2255 gen_mov_vreg_F0(!dp
, rd
);
2257 gen_mov_vreg_F0(dp
, rd
);
2259 /* break out of the loop if we have finished */
2263 if (op
== 15 && delta_m
== 0) {
2264 /* single source one-many */
2266 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
2268 gen_mov_vreg_F0(dp
, rd
);
2272 /* Setup the next operands. */
2274 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
2278 /* One source operand. */
2279 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
2281 gen_mov_F0_vreg(dp
, rm
);
2283 /* Two source operands. */
2284 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
2286 gen_mov_F0_vreg(dp
, rn
);
2288 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
2290 gen_mov_F1_vreg(dp
, rm
);
2298 if (dp
&& (insn
& 0x03e00000) == 0x00400000) {
2299 /* two-register transfer */
2300 rn
= (insn
>> 16) & 0xf;
2301 rd
= (insn
>> 12) & 0xf;
2303 VFP_DREG_M(rm
, insn
);
2305 rm
= VFP_SREG_M(insn
);
2308 if (insn
& ARM_CP_RW_BIT
) {
2311 gen_mov_F0_vreg(1, rm
);
2313 gen_movl_reg_T0(s
, rd
);
2314 gen_movl_reg_T1(s
, rn
);
2316 gen_mov_F0_vreg(0, rm
);
2318 gen_movl_reg_T0(s
, rn
);
2319 gen_mov_F0_vreg(0, rm
+ 1);
2321 gen_movl_reg_T0(s
, rd
);
2326 gen_movl_T0_reg(s
, rd
);
2327 gen_movl_T1_reg(s
, rn
);
2329 gen_mov_vreg_F0(1, rm
);
2331 gen_movl_T0_reg(s
, rn
);
2333 gen_mov_vreg_F0(0, rm
);
2334 gen_movl_T0_reg(s
, rd
);
2336 gen_mov_vreg_F0(0, rm
+ 1);
2341 rn
= (insn
>> 16) & 0xf;
2343 VFP_DREG_D(rd
, insn
);
2345 rd
= VFP_SREG_D(insn
);
2346 if (s
->thumb
&& rn
== 15) {
2347 gen_op_movl_T1_im(s
->pc
& ~2);
2349 gen_movl_T1_reg(s
, rn
);
2351 if ((insn
& 0x01200000) == 0x01000000) {
2352 /* Single load/store */
2353 offset
= (insn
& 0xff) << 2;
2354 if ((insn
& (1 << 23)) == 0)
2356 gen_op_addl_T1_im(offset
);
2357 if (insn
& (1 << 20)) {
2359 gen_mov_vreg_F0(dp
, rd
);
2361 gen_mov_F0_vreg(dp
, rd
);
2365 /* load/store multiple */
2367 n
= (insn
>> 1) & 0x7f;
2371 if (insn
& (1 << 24)) /* pre-decrement */
2372 gen_op_addl_T1_im(-((insn
& 0xff) << 2));
2378 for (i
= 0; i
< n
; i
++) {
2379 if (insn
& ARM_CP_RW_BIT
) {
2382 gen_mov_vreg_F0(dp
, rd
+ i
);
2385 gen_mov_F0_vreg(dp
, rd
+ i
);
2388 gen_op_addl_T1_im(offset
);
2390 if (insn
& (1 << 21)) {
2392 if (insn
& (1 << 24))
2393 offset
= -offset
* n
;
2394 else if (dp
&& (insn
& 1))
2400 gen_op_addl_T1_im(offset
);
2401 gen_movl_reg_T1(s
, rn
);
2407 /* Should never happen. */
2413 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
2415 TranslationBlock
*tb
;
2418 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
2420 gen_op_movl_T0_im(dest
);
2421 gen_op_movl_r15_T0();
2422 tcg_gen_exit_tb((long)tb
+ n
);
2424 gen_op_movl_T0_im(dest
);
2425 gen_op_movl_r15_T0();
2430 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
2432 if (__builtin_expect(s
->singlestep_enabled
, 0)) {
2433 /* An indirect jump so that we still trigger the debug exception. */
2436 gen_op_movl_T0_im(dest
);
2439 gen_goto_tb(s
, 0, dest
);
2440 s
->is_jmp
= DISAS_TB_JUMP
;
2444 static inline void gen_mulxy(int x
, int y
)
2447 gen_op_sarl_T0_im(16);
2451 gen_op_sarl_T1_im(16);
2457 /* Return the mask of PSR bits set by a MSR instruction. */
2458 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
2462 if (flags
& (1 << 0))
2464 if (flags
& (1 << 1))
2466 if (flags
& (1 << 2))
2468 if (flags
& (1 << 3))
2471 /* Mask out undefined bits. */
2472 mask
&= ~CPSR_RESERVED
;
2473 if (!arm_feature(env
, ARM_FEATURE_V6
))
2474 mask
&= ~(CPSR_E
| CPSR_GE
);
2475 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
2477 /* Mask out execution state bits. */
2480 /* Mask out privileged bits. */
2486 /* Returns nonzero if access to the PSR is not permitted. */
2487 static int gen_set_psr_T0(DisasContext
*s
, uint32_t mask
, int spsr
)
2490 /* ??? This is also undefined in system mode. */
2493 gen_op_movl_spsr_T0(mask
);
2495 gen_op_movl_cpsr_T0(mask
);
2501 /* Generate an old-style exception return. */
2502 static void gen_exception_return(DisasContext
*s
)
2504 gen_op_movl_reg_TN
[0][15]();
2505 gen_op_movl_T0_spsr();
2506 gen_op_movl_cpsr_T0(0xffffffff);
2507 s
->is_jmp
= DISAS_UPDATE
;
2510 /* Generate a v6 exception return. */
2511 static void gen_rfe(DisasContext
*s
)
2513 gen_op_movl_cpsr_T0(0xffffffff);
2514 gen_op_movl_T0_T2();
2515 gen_op_movl_reg_TN
[0][15]();
2516 s
->is_jmp
= DISAS_UPDATE
;
2520 gen_set_condexec (DisasContext
*s
)
2522 if (s
->condexec_mask
) {
2523 gen_op_set_condexec((s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1));
2527 static void gen_nop_hint(DisasContext
*s
, int val
)
2531 gen_op_movl_T0_im((long)s
->pc
);
2532 gen_op_movl_reg_TN
[0][15]();
2533 s
->is_jmp
= DISAS_WFI
;
2537 /* TODO: Implement SEV and WFE. May help SMP performance. */
2543 /* Neon shift by constant. The actual ops are the same as used for variable
2544 shifts. [OP][U][SIZE] */
2545 static GenOpFunc
*gen_neon_shift_im
[8][2][4] = {
2546 { /* 0 */ /* VSHR */
2549 gen_op_neon_shl_u16
,
2550 gen_op_neon_shl_u32
,
2554 gen_op_neon_shl_s16
,
2555 gen_op_neon_shl_s32
,
2558 }, { /* 1 */ /* VSRA */
2561 gen_op_neon_shl_u16
,
2562 gen_op_neon_shl_u32
,
2566 gen_op_neon_shl_s16
,
2567 gen_op_neon_shl_s32
,
2570 }, { /* 2 */ /* VRSHR */
2572 gen_op_neon_rshl_u8
,
2573 gen_op_neon_rshl_u16
,
2574 gen_op_neon_rshl_u32
,
2575 gen_op_neon_rshl_u64
2577 gen_op_neon_rshl_s8
,
2578 gen_op_neon_rshl_s16
,
2579 gen_op_neon_rshl_s32
,
2580 gen_op_neon_rshl_s64
2582 }, { /* 3 */ /* VRSRA */
2584 gen_op_neon_rshl_u8
,
2585 gen_op_neon_rshl_u16
,
2586 gen_op_neon_rshl_u32
,
2587 gen_op_neon_rshl_u64
2589 gen_op_neon_rshl_s8
,
2590 gen_op_neon_rshl_s16
,
2591 gen_op_neon_rshl_s32
,
2592 gen_op_neon_rshl_s64
2596 NULL
, NULL
, NULL
, NULL
2599 gen_op_neon_shl_u16
,
2600 gen_op_neon_shl_u32
,
2601 gen_op_neon_shl_u64
,
2606 gen_op_neon_shl_u16
,
2607 gen_op_neon_shl_u32
,
2608 gen_op_neon_shl_u64
,
2611 gen_op_neon_shl_u16
,
2612 gen_op_neon_shl_u32
,
2613 gen_op_neon_shl_u64
,
2615 }, { /* 6 */ /* VQSHL */
2617 gen_op_neon_qshl_u8
,
2618 gen_op_neon_qshl_u16
,
2619 gen_op_neon_qshl_u32
,
2620 gen_op_neon_qshl_u64
2622 gen_op_neon_qshl_s8
,
2623 gen_op_neon_qshl_s16
,
2624 gen_op_neon_qshl_s32
,
2625 gen_op_neon_qshl_s64
2627 }, { /* 7 */ /* VQSHLU */
2629 gen_op_neon_qshl_u8
,
2630 gen_op_neon_qshl_u16
,
2631 gen_op_neon_qshl_u32
,
2632 gen_op_neon_qshl_u64
2634 gen_op_neon_qshl_u8
,
2635 gen_op_neon_qshl_u16
,
2636 gen_op_neon_qshl_u32
,
2637 gen_op_neon_qshl_u64
2642 /* [R][U][size - 1] */
2643 static GenOpFunc
*gen_neon_shift_im_narrow
[2][2][3] = {
2646 gen_op_neon_shl_u16
,
2647 gen_op_neon_shl_u32
,
2650 gen_op_neon_shl_s16
,
2651 gen_op_neon_shl_s32
,
2656 gen_op_neon_rshl_u16
,
2657 gen_op_neon_rshl_u32
,
2658 gen_op_neon_rshl_u64
2660 gen_op_neon_rshl_s16
,
2661 gen_op_neon_rshl_s32
,
2662 gen_op_neon_rshl_s64
2668 gen_op_neon_narrow_u32 ()
2673 static GenOpFunc
*gen_neon_narrow
[3] = {
2674 gen_op_neon_narrow_u8
,
2675 gen_op_neon_narrow_u16
,
2676 gen_op_neon_narrow_u32
2679 static GenOpFunc
*gen_neon_narrow_satu
[3] = {
2680 gen_op_neon_narrow_sat_u8
,
2681 gen_op_neon_narrow_sat_u16
,
2682 gen_op_neon_narrow_sat_u32
2685 static GenOpFunc
*gen_neon_narrow_sats
[3] = {
2686 gen_op_neon_narrow_sat_s8
,
2687 gen_op_neon_narrow_sat_s16
,
2688 gen_op_neon_narrow_sat_s32
2691 static inline int gen_neon_add(int size
)
2694 case 0: gen_op_neon_add_u8(); break;
2695 case 1: gen_op_neon_add_u16(); break;
2696 case 2: gen_op_addl_T0_T1(); break;
2702 /* 32-bit pairwise ops end up the same as the elementsise versions. */
2703 #define gen_op_neon_pmax_s32 gen_op_neon_max_s32
2704 #define gen_op_neon_pmax_u32 gen_op_neon_max_u32
2705 #define gen_op_neon_pmin_s32 gen_op_neon_min_s32
2706 #define gen_op_neon_pmin_u32 gen_op_neon_min_u32
2708 #define GEN_NEON_INTEGER_OP(name) do { \
2709 switch ((size << 1) | u) { \
2710 case 0: gen_op_neon_##name##_s8(); break; \
2711 case 1: gen_op_neon_##name##_u8(); break; \
2712 case 2: gen_op_neon_##name##_s16(); break; \
2713 case 3: gen_op_neon_##name##_u16(); break; \
2714 case 4: gen_op_neon_##name##_s32(); break; \
2715 case 5: gen_op_neon_##name##_u32(); break; \
2716 default: return 1; \
2720 gen_neon_movl_scratch_T0(int scratch
)
2724 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
2725 gen_op_neon_setreg_T0(offset
);
2729 gen_neon_movl_scratch_T1(int scratch
)
2733 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
2734 gen_op_neon_setreg_T1(offset
);
2738 gen_neon_movl_T0_scratch(int scratch
)
2742 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
2743 gen_op_neon_getreg_T0(offset
);
2747 gen_neon_movl_T1_scratch(int scratch
)
2751 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
2752 gen_op_neon_getreg_T1(offset
);
2755 static inline void gen_op_neon_widen_u32(void)
2757 gen_op_movl_T1_im(0);
2760 static inline void gen_neon_get_scalar(int size
, int reg
)
2763 NEON_GET_REG(T0
, reg
>> 1, reg
& 1);
2765 NEON_GET_REG(T0
, reg
>> 2, (reg
>> 1) & 1);
2767 gen_op_neon_dup_low16();
2769 gen_op_neon_dup_high16();
2773 static void gen_neon_unzip(int reg
, int q
, int tmp
, int size
)
2777 for (n
= 0; n
< q
+ 1; n
+= 2) {
2778 NEON_GET_REG(T0
, reg
, n
);
2779 NEON_GET_REG(T0
, reg
, n
+ n
);
2781 case 0: gen_op_neon_unzip_u8(); break;
2782 case 1: gen_op_neon_zip_u16(); break; /* zip and unzip are the same. */
2783 case 2: /* no-op */; break;
2786 gen_neon_movl_scratch_T0(tmp
+ n
);
2787 gen_neon_movl_scratch_T1(tmp
+ n
+ 1);
2795 } neon_ls_element_type
[11] = {
2809 /* Translate a NEON load/store element instruction. Return nonzero if the
2810 instruction is invalid. */
2811 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2826 if (!vfp_enabled(env
))
2828 VFP_DREG_D(rd
, insn
);
2829 rn
= (insn
>> 16) & 0xf;
2831 load
= (insn
& (1 << 21)) != 0;
2832 if ((insn
& (1 << 23)) == 0) {
2833 /* Load store all elements. */
2834 op
= (insn
>> 8) & 0xf;
2835 size
= (insn
>> 6) & 3;
2836 if (op
> 10 || size
== 3)
2838 nregs
= neon_ls_element_type
[op
].nregs
;
2839 interleave
= neon_ls_element_type
[op
].interleave
;
2840 gen_movl_T1_reg(s
, rn
);
2841 stride
= (1 << size
) * interleave
;
2842 for (reg
= 0; reg
< nregs
; reg
++) {
2843 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
2844 gen_movl_T1_reg(s
, rn
);
2845 gen_op_addl_T1_im((1 << size
) * reg
);
2846 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
2847 gen_movl_T1_reg(s
, rn
);
2848 gen_op_addl_T1_im(1 << size
);
2850 for (pass
= 0; pass
< 2; pass
++) {
2854 NEON_SET_REG(T0
, rd
, pass
);
2856 NEON_GET_REG(T0
, rd
, pass
);
2859 gen_op_addl_T1_im(stride
);
2860 } else if (size
== 1) {
2863 gen_op_addl_T1_im(stride
);
2864 gen_op_movl_T2_T0();
2866 gen_op_addl_T1_im(stride
);
2867 gen_op_neon_insert_elt(16, 0xffff);
2868 NEON_SET_REG(T2
, rd
, pass
);
2870 NEON_GET_REG(T2
, rd
, pass
);
2871 gen_op_movl_T0_T2();
2873 gen_op_addl_T1_im(stride
);
2874 gen_op_neon_extract_elt(16, 0xffff0000);
2876 gen_op_addl_T1_im(stride
);
2878 } else /* size == 0 */ {
2881 for (n
= 0; n
< 4; n
++) {
2883 gen_op_addl_T1_im(stride
);
2885 gen_op_movl_T2_T0();
2887 gen_op_neon_insert_elt(n
* 8, ~mask
);
2891 NEON_SET_REG(T2
, rd
, pass
);
2893 NEON_GET_REG(T2
, rd
, pass
);
2895 for (n
= 0; n
< 4; n
++) {
2897 gen_op_movl_T0_T2();
2899 gen_op_neon_extract_elt(n
* 8, mask
);
2902 gen_op_addl_T1_im(stride
);
2908 rd
+= neon_ls_element_type
[op
].spacing
;
2912 size
= (insn
>> 10) & 3;
2914 /* Load single element to all lanes. */
2917 size
= (insn
>> 6) & 3;
2918 nregs
= ((insn
>> 8) & 3) + 1;
2919 stride
= (insn
& (1 << 5)) ? 2 : 1;
2920 gen_movl_T1_reg(s
, rn
);
2921 for (reg
= 0; reg
< nregs
; reg
++) {
2925 gen_op_neon_dup_u8(0);
2929 gen_op_neon_dup_low16();
2937 gen_op_addl_T1_im(1 << size
);
2938 NEON_SET_REG(T0
, rd
, 0);
2939 NEON_SET_REG(T0
, rd
, 1);
2942 stride
= (1 << size
) * nregs
;
2944 /* Single element. */
2945 pass
= (insn
>> 7) & 1;
2948 shift
= ((insn
>> 5) & 3) * 8;
2949 mask
= 0xff << shift
;
2953 shift
= ((insn
>> 6) & 1) * 16;
2954 mask
= shift
? 0xffff0000 : 0xffff;
2955 stride
= (insn
& (1 << 5)) ? 2 : 1;
2960 stride
= (insn
& (1 << 6)) ? 2 : 1;
2965 nregs
= ((insn
>> 8) & 3) + 1;
2966 gen_movl_T1_reg(s
, rn
);
2967 for (reg
= 0; reg
< nregs
; reg
++) {
2970 NEON_GET_REG(T2
, rd
, pass
);
2981 NEON_SET_REG(T0
, rd
, pass
);
2985 gen_op_neon_insert_elt(shift
, ~mask
);
2986 NEON_SET_REG(T0
, rd
, pass
);
2988 } else { /* Store */
2990 NEON_GET_REG(T0
, rd
, pass
);
2992 NEON_GET_REG(T2
, rd
, pass
);
2993 gen_op_neon_extract_elt(shift
, mask
);
3008 gen_op_addl_T1_im(1 << size
);
3010 stride
= nregs
* (1 << size
);
3014 gen_movl_T1_reg(s
, rn
);
3016 gen_op_addl_T1_im(stride
);
3018 gen_movl_T2_reg(s
, rm
);
3019 gen_op_addl_T1_T2();
3021 gen_movl_reg_T1(s
, rn
);
3026 /* Translate a NEON data processing instruction. Return nonzero if the
3027 instruction is invalid.
3028 In general we process vectors in 32-bit chunks. This means we can reuse
3029 some of the scalar ops, and hopefully the code generated for 32-bit
3030 hosts won't be too awful. The downside is that the few 64-bit operations
3031 (mainly shifts) get complicated. */
3033 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3047 if (!vfp_enabled(env
))
3049 q
= (insn
& (1 << 6)) != 0;
3050 u
= (insn
>> 24) & 1;
3051 VFP_DREG_D(rd
, insn
);
3052 VFP_DREG_N(rn
, insn
);
3053 VFP_DREG_M(rm
, insn
);
3054 size
= (insn
>> 20) & 3;
3055 if ((insn
& (1 << 23)) == 0) {
3056 /* Three register same length. */
3057 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
3058 if (size
== 3 && (op
== 1 || op
== 5 || op
== 16)) {
3059 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
3060 NEON_GET_REG(T0
, rm
, pass
* 2);
3061 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
3062 gen_neon_movl_scratch_T0(0);
3063 gen_neon_movl_scratch_T1(1);
3064 NEON_GET_REG(T0
, rn
, pass
* 2);
3065 NEON_GET_REG(T1
, rn
, pass
* 2 + 1);
3069 gen_op_neon_addl_saturate_u64();
3071 gen_op_neon_addl_saturate_s64();
3076 gen_op_neon_subl_saturate_u64();
3078 gen_op_neon_subl_saturate_s64();
3083 gen_op_neon_subl_u64();
3085 gen_op_neon_addl_u64();
3091 NEON_SET_REG(T0
, rd
, pass
* 2);
3092 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
3099 case 10: /* VRSHL */
3100 case 11: /* VQSHL */
3101 /* Shift operations have Rn and Rm reversed. */
3110 case 20: /* VPMAX */
3111 case 21: /* VPMIN */
3112 case 23: /* VPADD */
3115 case 26: /* VPADD (float) */
3116 pairwise
= (u
&& size
< 2);
3118 case 30: /* VPMIN/VPMAX (float) */
3125 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
3134 NEON_GET_REG(T0
, rn
, n
);
3135 NEON_GET_REG(T1
, rn
, n
+ 1);
3137 NEON_GET_REG(T0
, rm
, n
);
3138 NEON_GET_REG(T1
, rm
, n
+ 1);
3142 NEON_GET_REG(T0
, rn
, pass
);
3143 NEON_GET_REG(T1
, rm
, pass
);
3147 GEN_NEON_INTEGER_OP(hadd
);
3150 switch (size
<< 1| u
) {
3151 case 0: gen_op_neon_qadd_s8(); break;
3152 case 1: gen_op_neon_qadd_u8(); break;
3153 case 2: gen_op_neon_qadd_s16(); break;
3154 case 3: gen_op_neon_qadd_u16(); break;
3155 case 4: gen_op_addl_T0_T1_saturate(); break;
3156 case 5: gen_op_addl_T0_T1_usaturate(); break;
3160 case 2: /* VRHADD */
3161 GEN_NEON_INTEGER_OP(rhadd
);
3163 case 3: /* Logic ops. */
3164 switch ((u
<< 2) | size
) {
3166 gen_op_andl_T0_T1();
3169 gen_op_bicl_T0_T1();
3179 gen_op_xorl_T0_T1();
3182 NEON_GET_REG(T2
, rd
, pass
);
3186 NEON_GET_REG(T2
, rd
, pass
);
3190 NEON_GET_REG(T2
, rd
, pass
);
3196 GEN_NEON_INTEGER_OP(hsub
);
3199 switch ((size
<< 1) | u
) {
3200 case 0: gen_op_neon_qsub_s8(); break;
3201 case 1: gen_op_neon_qsub_u8(); break;
3202 case 2: gen_op_neon_qsub_s16(); break;
3203 case 3: gen_op_neon_qsub_u16(); break;
3204 case 4: gen_op_subl_T0_T1_saturate(); break;
3205 case 5: gen_op_subl_T0_T1_usaturate(); break;
3210 GEN_NEON_INTEGER_OP(cgt
);
3213 GEN_NEON_INTEGER_OP(cge
);
3216 switch ((size
<< 1) | u
) {
3217 case 0: gen_op_neon_shl_s8(); break;
3218 case 1: gen_op_neon_shl_u8(); break;
3219 case 2: gen_op_neon_shl_s16(); break;
3220 case 3: gen_op_neon_shl_u16(); break;
3221 case 4: gen_op_neon_shl_s32(); break;
3222 case 5: gen_op_neon_shl_u32(); break;
3224 /* ??? Implementing these is tricky because the vector ops work
3225 on 32-bit pieces. */
3226 case 6: gen_op_neon_shl_s64(); break;
3227 case 7: gen_op_neon_shl_u64(); break;
3229 case 6: case 7: cpu_abort(env
, "VSHL.64 not implemented");
3234 switch ((size
<< 1) | u
) {
3235 case 0: gen_op_neon_qshl_s8(); break;
3236 case 1: gen_op_neon_qshl_u8(); break;
3237 case 2: gen_op_neon_qshl_s16(); break;
3238 case 3: gen_op_neon_qshl_u16(); break;
3239 case 4: gen_op_neon_qshl_s32(); break;
3240 case 5: gen_op_neon_qshl_u32(); break;
3242 /* ??? Implementing these is tricky because the vector ops work
3243 on 32-bit pieces. */
3244 case 6: gen_op_neon_qshl_s64(); break;
3245 case 7: gen_op_neon_qshl_u64(); break;
3247 case 6: case 7: cpu_abort(env
, "VQSHL.64 not implemented");
3251 case 10: /* VRSHL */
3252 switch ((size
<< 1) | u
) {
3253 case 0: gen_op_neon_rshl_s8(); break;
3254 case 1: gen_op_neon_rshl_u8(); break;
3255 case 2: gen_op_neon_rshl_s16(); break;
3256 case 3: gen_op_neon_rshl_u16(); break;
3257 case 4: gen_op_neon_rshl_s32(); break;
3258 case 5: gen_op_neon_rshl_u32(); break;
3260 /* ??? Implementing these is tricky because the vector ops work
3261 on 32-bit pieces. */
3262 case 6: gen_op_neon_rshl_s64(); break;
3263 case 7: gen_op_neon_rshl_u64(); break;
3265 case 6: case 7: cpu_abort(env
, "VRSHL.64 not implemented");
3269 case 11: /* VQRSHL */
3270 switch ((size
<< 1) | u
) {
3271 case 0: gen_op_neon_qrshl_s8(); break;
3272 case 1: gen_op_neon_qrshl_u8(); break;
3273 case 2: gen_op_neon_qrshl_s16(); break;
3274 case 3: gen_op_neon_qrshl_u16(); break;
3275 case 4: gen_op_neon_qrshl_s32(); break;
3276 case 5: gen_op_neon_qrshl_u32(); break;
3278 /* ??? Implementing these is tricky because the vector ops work
3279 on 32-bit pieces. */
3280 case 6: gen_op_neon_qrshl_s64(); break;
3281 case 7: gen_op_neon_qrshl_u64(); break;
3283 case 6: case 7: cpu_abort(env
, "VQRSHL.64 not implemented");
3288 GEN_NEON_INTEGER_OP(max
);
3291 GEN_NEON_INTEGER_OP(min
);
3294 GEN_NEON_INTEGER_OP(abd
);
3297 GEN_NEON_INTEGER_OP(abd
);
3298 NEON_GET_REG(T1
, rd
, pass
);
3302 if (!u
) { /* VADD */
3303 if (gen_neon_add(size
))
3307 case 0: gen_op_neon_sub_u8(); break;
3308 case 1: gen_op_neon_sub_u16(); break;
3309 case 2: gen_op_subl_T0_T1(); break;
3315 if (!u
) { /* VTST */
3317 case 0: gen_op_neon_tst_u8(); break;
3318 case 1: gen_op_neon_tst_u16(); break;
3319 case 2: gen_op_neon_tst_u32(); break;
3324 case 0: gen_op_neon_ceq_u8(); break;
3325 case 1: gen_op_neon_ceq_u16(); break;
3326 case 2: gen_op_neon_ceq_u32(); break;
3331 case 18: /* Multiply. */
3333 case 0: gen_op_neon_mul_u8(); break;
3334 case 1: gen_op_neon_mul_u16(); break;
3335 case 2: gen_op_mul_T0_T1(); break;
3338 NEON_GET_REG(T1
, rd
, pass
);
3341 case 0: gen_op_neon_rsb_u8(); break;
3342 case 1: gen_op_neon_rsb_u16(); break;
3343 case 2: gen_op_rsbl_T0_T1(); break;
3351 if (u
) { /* polynomial */
3352 gen_op_neon_mul_p8();
3353 } else { /* Integer */
3355 case 0: gen_op_neon_mul_u8(); break;
3356 case 1: gen_op_neon_mul_u16(); break;
3357 case 2: gen_op_mul_T0_T1(); break;
3362 case 20: /* VPMAX */
3363 GEN_NEON_INTEGER_OP(pmax
);
3365 case 21: /* VPMIN */
3366 GEN_NEON_INTEGER_OP(pmin
);
3368 case 22: /* Hultiply high. */
3369 if (!u
) { /* VQDMULH */
3371 case 1: gen_op_neon_qdmulh_s16(); break;
3372 case 2: gen_op_neon_qdmulh_s32(); break;
3375 } else { /* VQRDHMUL */
3377 case 1: gen_op_neon_qrdmulh_s16(); break;
3378 case 2: gen_op_neon_qrdmulh_s32(); break;
3383 case 23: /* VPADD */
3387 case 0: gen_op_neon_padd_u8(); break;
3388 case 1: gen_op_neon_padd_u16(); break;
3389 case 2: gen_op_addl_T0_T1(); break;
3393 case 26: /* Floating point arithnetic. */
3394 switch ((u
<< 2) | size
) {
3396 gen_op_neon_add_f32();
3399 gen_op_neon_sub_f32();
3402 gen_op_neon_add_f32();
3405 gen_op_neon_abd_f32();
3411 case 27: /* Float multiply. */
3412 gen_op_neon_mul_f32();
3414 NEON_GET_REG(T1
, rd
, pass
);
3416 gen_op_neon_add_f32();
3418 gen_op_neon_rsb_f32();
3422 case 28: /* Float compare. */
3424 gen_op_neon_ceq_f32();
3427 gen_op_neon_cge_f32();
3429 gen_op_neon_cgt_f32();
3432 case 29: /* Float compare absolute. */
3436 gen_op_neon_acge_f32();
3438 gen_op_neon_acgt_f32();
3440 case 30: /* Float min/max. */
3442 gen_op_neon_max_f32();
3444 gen_op_neon_min_f32();
3448 gen_op_neon_recps_f32();
3450 gen_op_neon_rsqrts_f32();
3455 /* Save the result. For elementwise operations we can put it
3456 straight into the destination register. For pairwise operations
3457 we have to be careful to avoid clobbering the source operands. */
3458 if (pairwise
&& rd
== rm
) {
3459 gen_neon_movl_scratch_T0(pass
);
3461 NEON_SET_REG(T0
, rd
, pass
);
3465 if (pairwise
&& rd
== rm
) {
3466 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
3467 gen_neon_movl_T0_scratch(pass
);
3468 NEON_SET_REG(T0
, rd
, pass
);
3471 } else if (insn
& (1 << 4)) {
3472 if ((insn
& 0x00380080) != 0) {
3473 /* Two registers and shift. */
3474 op
= (insn
>> 8) & 0xf;
3475 if (insn
& (1 << 7)) {
3480 while ((insn
& (1 << (size
+ 19))) == 0)
3483 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
3484 /* To avoid excessive dumplication of ops we implement shift
3485 by immediate using the variable shift operations. */
3487 /* Shift by immediate:
3488 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
3489 /* Right shifts are encoded as N - shift, where N is the
3490 element size in bits. */
3492 shift
= shift
- (1 << (size
+ 3));
3502 imm
= (uint8_t) shift
;
3507 imm
= (uint16_t) shift
;
3518 for (pass
= 0; pass
< count
; pass
++) {
3520 /* Operands in T0 and T1. */
3521 gen_op_movl_T1_im(imm
);
3522 NEON_GET_REG(T0
, rm
, pass
);
3524 /* Operands in {T0, T1} and env->vfp.scratch. */
3525 gen_op_movl_T0_im(imm
);
3526 gen_neon_movl_scratch_T0(0);
3527 gen_op_movl_T0_im((int32_t)imm
>> 31);
3528 gen_neon_movl_scratch_T0(1);
3529 NEON_GET_REG(T0
, rm
, pass
* 2);
3530 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
3533 if (gen_neon_shift_im
[op
][u
][size
] == NULL
)
3535 gen_neon_shift_im
[op
][u
][size
]();
3537 if (op
== 1 || op
== 3) {
3540 gen_neon_movl_scratch_T0(0);
3541 gen_neon_movl_scratch_T1(1);
3542 NEON_GET_REG(T0
, rd
, pass
* 2);
3543 NEON_GET_REG(T1
, rd
, pass
* 2 + 1);
3544 gen_op_neon_addl_u64();
3546 NEON_GET_REG(T1
, rd
, pass
);
3549 } else if (op
== 4 || (op
== 5 && u
)) {
3552 cpu_abort(env
, "VS[LR]I.64 not implemented");
3557 imm
= 0xff >> -shift
;
3559 imm
= (uint8_t)(0xff << shift
);
3565 imm
= 0xffff >> -shift
;
3567 imm
= (uint16_t)(0xffff << shift
);
3572 imm
= 0xffffffffu
>> -shift
;
3574 imm
= 0xffffffffu
<< shift
;
3579 NEON_GET_REG(T1
, rd
, pass
);
3580 gen_op_movl_T2_im(imm
);
3584 NEON_SET_REG(T0
, rd
, pass
* 2);
3585 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
3587 NEON_SET_REG(T0
, rd
, pass
);
3590 } else if (op
< 10) {
3591 /* Shift by immedaiate and narrow:
3592 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
3593 shift
= shift
- (1 << (size
+ 3));
3602 imm
= (uint16_t) shift
;
3613 /* Processing MSB first means we need to do less shuffling at
3615 for (pass
= count
- 1; pass
>= 0; pass
--) {
3616 /* Avoid clobbering the second operand before it has been
3625 /* Operands in T0 and T1. */
3626 gen_op_movl_T1_im(imm
);
3627 NEON_GET_REG(T0
, rm
, n
);
3629 /* Operands in {T0, T1} and env->vfp.scratch. */
3630 gen_op_movl_T0_im(imm
);
3631 gen_neon_movl_scratch_T0(0);
3632 gen_op_movl_T0_im((int32_t)imm
>> 31);
3633 gen_neon_movl_scratch_T0(1);
3634 NEON_GET_REG(T0
, rm
, n
* 2);
3635 NEON_GET_REG(T0
, rm
, n
* 2 + 1);
3638 gen_neon_shift_im_narrow
[q
][u
][size
- 1]();
3640 if (size
< 3 && (pass
& 1) == 0) {
3641 gen_neon_movl_scratch_T0(0);
3646 gen_neon_movl_T1_scratch(0);
3648 if (op
== 8 && !u
) {
3649 gen_neon_narrow
[size
- 1]();
3652 gen_neon_narrow_sats
[size
- 2]();
3654 gen_neon_narrow_satu
[size
- 1]();
3657 offset
= neon_reg_offset(rd
, n
);
3659 offset
= neon_reg_offset(rd
, n
>> 1);
3660 gen_op_neon_setreg_T0(offset
);
3663 } else if (op
== 10) {
3667 for (pass
= 0; pass
< 2; pass
++) {
3668 /* Avoid clobbering the input operand. */
3674 NEON_GET_REG(T0
, rm
, n
);
3675 GEN_NEON_INTEGER_OP(widen
);
3677 /* The shift is less than the width of the source
3678 type, so in some cases we can just
3679 shift the whole register. */
3680 if (size
== 1 || (size
== 0 && u
)) {
3681 gen_op_shll_T0_im(shift
);
3682 gen_op_shll_T1_im(shift
);
3685 case 0: gen_op_neon_shll_u16(shift
); break;
3686 case 2: gen_op_neon_shll_u64(shift
); break;
3691 NEON_SET_REG(T0
, rd
, n
* 2);
3692 NEON_SET_REG(T1
, rd
, n
* 2 + 1);
3694 } else if (op
== 15 || op
== 16) {
3695 /* VCVT fixed-point. */
3696 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
3697 gen_op_vfp_getreg_F0s(neon_reg_offset(rm
, pass
));
3700 gen_op_vfp_ultos(shift
);
3702 gen_op_vfp_sltos(shift
);
3705 gen_op_vfp_touls(shift
);
3707 gen_op_vfp_tosls(shift
);
3709 gen_op_vfp_setreg_F0s(neon_reg_offset(rd
, pass
));
3714 } else { /* (insn & 0x00380080) == 0 */
3717 op
= (insn
>> 8) & 0xf;
3718 /* One register and immediate. */
3719 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
3720 invert
= (insn
& (1 << 5)) != 0;
3738 imm
= (imm
<< 8) | (imm
<< 24);
3741 imm
= (imm
< 8) | 0xff;
3744 imm
= (imm
<< 16) | 0xffff;
3747 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
3752 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
3753 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
3759 if (op
!= 14 || !invert
)
3760 gen_op_movl_T1_im(imm
);
3762 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
3763 if (op
& 1 && op
< 12) {
3764 NEON_GET_REG(T0
, rd
, pass
);
3766 /* The immediate value has already been inverted, so
3768 gen_op_andl_T0_T1();
3772 NEON_SET_REG(T0
, rd
, pass
);
3774 if (op
== 14 && invert
) {
3777 for (n
= 0; n
< 4; n
++) {
3778 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
3779 tmp
|= 0xff << (n
* 8);
3781 gen_op_movl_T1_im(tmp
);
3784 NEON_SET_REG(T1
, rd
, pass
);
3788 } else { /* (insn & 0x00800010 == 0x00800010) */
3790 op
= (insn
>> 8) & 0xf;
3791 if ((insn
& (1 << 6)) == 0) {
3792 /* Three registers of different lengths. */
3796 /* prewiden, src1_wide, src2_wide */
3797 static const int neon_3reg_wide
[16][3] = {
3798 {1, 0, 0}, /* VADDL */
3799 {1, 1, 0}, /* VADDW */
3800 {1, 0, 0}, /* VSUBL */
3801 {1, 1, 0}, /* VSUBW */
3802 {0, 1, 1}, /* VADDHN */
3803 {0, 0, 0}, /* VABAL */
3804 {0, 1, 1}, /* VSUBHN */
3805 {0, 0, 0}, /* VABDL */
3806 {0, 0, 0}, /* VMLAL */
3807 {0, 0, 0}, /* VQDMLAL */
3808 {0, 0, 0}, /* VMLSL */
3809 {0, 0, 0}, /* VQDMLSL */
3810 {0, 0, 0}, /* Integer VMULL */
3811 {0, 0, 0}, /* VQDMULL */
3812 {0, 0, 0} /* Polynomial VMULL */
3815 prewiden
= neon_3reg_wide
[op
][0];
3816 src1_wide
= neon_3reg_wide
[op
][1];
3817 src2_wide
= neon_3reg_wide
[op
][2];
3819 /* Avoid overlapping operands. Wide source operands are
3820 always aligned so will never overlap with wide
3821 destinations in problematic ways. */
3823 NEON_GET_REG(T2
, rm
, 1);
3824 } else if (rd
== rn
) {
3825 NEON_GET_REG(T2
, rn
, 1);
3827 for (pass
= 0; pass
< 2; pass
++) {
3828 /* Load the second operand into env->vfp.scratch.
3829 Also widen narrow operands. */
3830 if (pass
== 1 && rd
== rm
) {
3832 gen_op_movl_T0_T2();
3834 gen_op_movl_T1_T2();
3838 NEON_GET_REG(T0
, rm
, pass
* 2);
3839 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
3842 NEON_GET_REG(T0
, rm
, pass
);
3844 NEON_GET_REG(T1
, rm
, pass
);
3848 if (prewiden
&& !src2_wide
) {
3849 GEN_NEON_INTEGER_OP(widen
);
3851 if (prewiden
|| src2_wide
) {
3852 gen_neon_movl_scratch_T0(0);
3853 gen_neon_movl_scratch_T1(1);
3856 /* Load the first operand. */
3857 if (pass
== 1 && rd
== rn
) {
3858 gen_op_movl_T0_T2();
3861 NEON_GET_REG(T0
, rn
, pass
* 2);
3862 NEON_GET_REG(T1
, rn
, pass
* 2 + 1);
3864 NEON_GET_REG(T0
, rn
, pass
);
3867 if (prewiden
&& !src1_wide
) {
3868 GEN_NEON_INTEGER_OP(widen
);
3871 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
3873 case 0: gen_op_neon_addl_u16(); break;
3874 case 1: gen_op_neon_addl_u32(); break;
3875 case 2: gen_op_neon_addl_u64(); break;
3879 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
3881 case 0: gen_op_neon_subl_u16(); break;
3882 case 1: gen_op_neon_subl_u32(); break;
3883 case 2: gen_op_neon_subl_u64(); break;
3887 case 5: case 7: /* VABAL, VABDL */
3888 switch ((size
<< 1) | u
) {
3889 case 0: gen_op_neon_abdl_s16(); break;
3890 case 1: gen_op_neon_abdl_u16(); break;
3891 case 2: gen_op_neon_abdl_s32(); break;
3892 case 3: gen_op_neon_abdl_u32(); break;
3893 case 4: gen_op_neon_abdl_s64(); break;
3894 case 5: gen_op_neon_abdl_u64(); break;
3898 case 8: case 9: case 10: case 11: case 12: case 13:
3899 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
3900 switch ((size
<< 1) | u
) {
3901 case 0: gen_op_neon_mull_s8(); break;
3902 case 1: gen_op_neon_mull_u8(); break;
3903 case 2: gen_op_neon_mull_s16(); break;
3904 case 3: gen_op_neon_mull_u16(); break;
3905 case 4: gen_op_imull_T0_T1(); break;
3906 case 5: gen_op_mull_T0_T1(); break;
3910 case 14: /* Polynomial VMULL */
3911 cpu_abort(env
, "Polynomial VMULL not implemented");
3913 default: /* 15 is RESERVED. */
3916 if (op
== 5 || op
== 13 || (op
>= 8 && op
<= 11)) {
3918 if (op
== 10 || op
== 11) {
3920 case 0: gen_op_neon_negl_u16(); break;
3921 case 1: gen_op_neon_negl_u32(); break;
3922 case 2: gen_op_neon_negl_u64(); break;
3927 gen_neon_movl_scratch_T0(0);
3928 gen_neon_movl_scratch_T1(1);
3931 NEON_GET_REG(T0
, rd
, pass
* 2);
3932 NEON_GET_REG(T1
, rd
, pass
* 2 + 1);
3936 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
3938 case 0: gen_op_neon_addl_u16(); break;
3939 case 1: gen_op_neon_addl_u32(); break;
3940 case 2: gen_op_neon_addl_u64(); break;
3944 case 9: case 11: /* VQDMLAL, VQDMLSL */
3946 case 1: gen_op_neon_addl_saturate_s32(); break;
3947 case 2: gen_op_neon_addl_saturate_s64(); break;
3951 case 13: /* VQDMULL */
3953 case 1: gen_op_neon_addl_saturate_s32(); break;
3954 case 2: gen_op_neon_addl_saturate_s64(); break;
3961 NEON_SET_REG(T0
, rd
, pass
* 2);
3962 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
3963 } else if (op
== 4 || op
== 6) {
3964 /* Narrowing operation. */
3967 case 0: gen_op_neon_narrow_high_u8(); break;
3968 case 1: gen_op_neon_narrow_high_u16(); break;
3969 case 2: gen_op_movl_T0_T1(); break;
3974 case 0: gen_op_neon_narrow_high_round_u8(); break;
3975 case 1: gen_op_neon_narrow_high_round_u16(); break;
3976 case 2: gen_op_neon_narrow_high_round_u32(); break;
3980 NEON_SET_REG(T0
, rd
, pass
);
3982 /* Write back the result. */
3983 NEON_SET_REG(T0
, rd
, pass
* 2);
3984 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
3988 /* Two registers and a scalar. */
3990 case 0: /* Integer VMLA scalar */
3991 case 1: /* Float VMLA scalar */
3992 case 4: /* Integer VMLS scalar */
3993 case 5: /* Floating point VMLS scalar */
3994 case 8: /* Integer VMUL scalar */
3995 case 9: /* Floating point VMUL scalar */
3996 case 12: /* VQDMULH scalar */
3997 case 13: /* VQRDMULH scalar */
3998 gen_neon_get_scalar(size
, rm
);
3999 gen_op_movl_T2_T0();
4000 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
4002 gen_op_movl_T0_T2();
4003 NEON_GET_REG(T1
, rn
, pass
);
4006 gen_op_neon_qdmulh_s16();
4008 gen_op_neon_qdmulh_s32();
4010 } else if (op
== 13) {
4012 gen_op_neon_qrdmulh_s16();
4014 gen_op_neon_qrdmulh_s32();
4016 } else if (op
& 1) {
4017 gen_op_neon_mul_f32();
4020 case 0: gen_op_neon_mul_u8(); break;
4021 case 1: gen_op_neon_mul_u16(); break;
4022 case 2: gen_op_mul_T0_T1(); break;
4028 NEON_GET_REG(T1
, rd
, pass
);
4034 gen_op_neon_add_f32();
4038 case 0: gen_op_neon_rsb_u8(); break;
4039 case 1: gen_op_neon_rsb_u16(); break;
4040 case 2: gen_op_rsbl_T0_T1(); break;
4045 gen_op_neon_rsb_f32();
4051 NEON_SET_REG(T0
, rd
, pass
);
4054 case 2: /* VMLAL sclar */
4055 case 3: /* VQDMLAL scalar */
4056 case 6: /* VMLSL scalar */
4057 case 7: /* VQDMLSL scalar */
4058 case 10: /* VMULL scalar */
4059 case 11: /* VQDMULL scalar */
4061 /* Save overlapping operands before they are
4063 NEON_GET_REG(T0
, rn
, 1);
4064 gen_neon_movl_scratch_T0(2);
4066 gen_neon_get_scalar(size
, rm
);
4067 gen_op_movl_T2_T0();
4068 for (pass
= 0; pass
< 2; pass
++) {
4070 gen_op_movl_T0_T2();
4072 if (pass
!= 0 && rd
== rn
) {
4073 gen_neon_movl_T1_scratch(2);
4075 NEON_GET_REG(T1
, rn
, pass
);
4077 switch ((size
<< 1) | u
) {
4078 case 0: gen_op_neon_mull_s8(); break;
4079 case 1: gen_op_neon_mull_u8(); break;
4080 case 2: gen_op_neon_mull_s16(); break;
4081 case 3: gen_op_neon_mull_u16(); break;
4082 case 4: gen_op_imull_T0_T1(); break;
4083 case 5: gen_op_mull_T0_T1(); break;
4086 if (op
== 6 || op
== 7) {
4088 case 0: gen_op_neon_negl_u16(); break;
4089 case 1: gen_op_neon_negl_u32(); break;
4090 case 2: gen_op_neon_negl_u64(); break;
4094 gen_neon_movl_scratch_T0(0);
4095 gen_neon_movl_scratch_T1(1);
4096 NEON_GET_REG(T0
, rd
, pass
* 2);
4097 NEON_GET_REG(T1
, rd
, pass
* 2 + 1);
4101 case 0: gen_op_neon_addl_u16(); break;
4102 case 1: gen_op_neon_addl_u32(); break;
4103 case 2: gen_op_neon_addl_u64(); break;
4110 gen_op_neon_addl_saturate_s32();
4111 gen_op_neon_addl_saturate_s32();
4114 gen_op_neon_addl_saturate_s64();
4115 gen_op_neon_addl_saturate_s64();
4125 case 1: gen_op_neon_addl_saturate_s32(); break;
4126 case 2: gen_op_neon_addl_saturate_s64(); break;
4133 NEON_SET_REG(T0
, rd
, pass
* 2);
4134 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
4137 default: /* 14 and 15 are RESERVED */
4141 } else { /* size == 3 */
4145 imm
= (insn
>> 8) & 0xf;
4149 NEON_GET_REG(T0
, reg
, n
);
4150 for (pass
= 0; pass
< count
; pass
++) {
4157 NEON_GET_REG(T1
, reg
, n
);
4158 gen_op_neon_extract((insn
<< 3) & 0x1f);
4160 /* ??? This is broken if rd and rm overlap */
4161 NEON_SET_REG(T0
, rd
, pass
);
4163 gen_op_movl_T0_T1();
4165 NEON_GET_REG(T0
, reg
, n
);
4168 } else if ((insn
& (1 << 11)) == 0) {
4169 /* Two register misc. */
4170 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
4171 size
= (insn
>> 18) & 3;
4173 case 0: /* VREV64 */
4176 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4177 NEON_GET_REG(T0
, rm
, pass
* 2);
4178 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
4180 case 0: gen_op_rev_T0(); break;
4181 case 1: gen_op_revh_T0(); break;
4182 case 2: /* no-op */ break;
4185 NEON_SET_REG(T0
, rd
, pass
* 2 + 1);
4187 NEON_SET_REG(T1
, rd
, pass
* 2);
4189 gen_op_movl_T0_T1();
4191 case 0: gen_op_rev_T0(); break;
4192 case 1: gen_op_revh_T0(); break;
4195 NEON_SET_REG(T0
, rd
, pass
* 2);
4199 case 4: case 5: /* VPADDL */
4200 case 12: case 13: /* VPADAL */
4205 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4206 NEON_GET_REG(T0
, rm
, pass
* 2);
4207 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
4209 gen_op_neon_paddl_u32();
4211 gen_op_neon_paddl_s32();
4214 gen_neon_movl_scratch_T0(0);
4215 gen_neon_movl_scratch_T1(1);
4217 NEON_GET_REG(T0
, rd
, pass
* 2);
4218 NEON_GET_REG(T1
, rd
, pass
* 2 + 1);
4219 gen_op_neon_addl_u64();
4221 NEON_SET_REG(T0
, rd
, pass
* 2);
4222 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
4227 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
4228 NEON_GET_REG(T0
, rm
, n
);
4229 NEON_GET_REG(T1
, rd
, n
+ 1);
4230 NEON_SET_REG(T1
, rm
, n
);
4231 NEON_SET_REG(T0
, rd
, n
+ 1);
4239 Rd A3 A2 A1 A0 B2 B0 A2 A0
4240 Rm B3 B2 B1 B0 B3 B1 A3 A1
4244 gen_neon_unzip(rd
, q
, 0, size
);
4245 gen_neon_unzip(rm
, q
, 4, size
);
4247 static int unzip_order_q
[8] =
4248 {0, 2, 4, 6, 1, 3, 5, 7};
4249 for (n
= 0; n
< 8; n
++) {
4250 int reg
= (n
< 4) ? rd
: rm
;
4251 gen_neon_movl_T0_scratch(unzip_order_q
[n
]);
4252 NEON_SET_REG(T0
, reg
, n
% 4);
4255 static int unzip_order
[4] =
4257 for (n
= 0; n
< 4; n
++) {
4258 int reg
= (n
< 2) ? rd
: rm
;
4259 gen_neon_movl_T0_scratch(unzip_order
[n
]);
4260 NEON_SET_REG(T0
, reg
, n
% 2);
4266 Rd A3 A2 A1 A0 B1 A1 B0 A0
4267 Rm B3 B2 B1 B0 B3 A3 B2 A2
4271 count
= (q
? 4 : 2);
4272 for (n
= 0; n
< count
; n
++) {
4273 NEON_GET_REG(T0
, rd
, n
);
4274 NEON_GET_REG(T1
, rd
, n
);
4276 case 0: gen_op_neon_zip_u8(); break;
4277 case 1: gen_op_neon_zip_u16(); break;
4278 case 2: /* no-op */; break;
4281 gen_neon_movl_scratch_T0(n
* 2);
4282 gen_neon_movl_scratch_T1(n
* 2 + 1);
4284 for (n
= 0; n
< count
* 2; n
++) {
4285 int reg
= (n
< count
) ? rd
: rm
;
4286 gen_neon_movl_T0_scratch(n
);
4287 NEON_SET_REG(T0
, reg
, n
% count
);
4290 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
4291 for (pass
= 0; pass
< 2; pass
++) {
4297 NEON_GET_REG(T0
, rm
, n
* 2);
4298 NEON_GET_REG(T1
, rm
, n
* 2 + 1);
4299 if (op
== 36 && q
== 0) {
4301 case 0: gen_op_neon_narrow_u8(); break;
4302 case 1: gen_op_neon_narrow_u16(); break;
4303 case 2: /* no-op */ break;
4308 case 0: gen_op_neon_narrow_sat_u8(); break;
4309 case 1: gen_op_neon_narrow_sat_u16(); break;
4310 case 2: gen_op_neon_narrow_sat_u32(); break;
4315 case 0: gen_op_neon_narrow_sat_s8(); break;
4316 case 1: gen_op_neon_narrow_sat_s16(); break;
4317 case 2: gen_op_neon_narrow_sat_s32(); break;
4321 NEON_SET_REG(T0
, rd
, n
);
4324 case 38: /* VSHLL */
4328 NEON_GET_REG(T2
, rm
, 1);
4330 for (pass
= 0; pass
< 2; pass
++) {
4331 if (pass
== 1 && rm
== rd
) {
4332 gen_op_movl_T0_T2();
4334 NEON_GET_REG(T0
, rm
, pass
);
4337 case 0: gen_op_neon_widen_high_u8(); break;
4338 case 1: gen_op_neon_widen_high_u16(); break;
4340 gen_op_movl_T1_T0();
4341 gen_op_movl_T0_im(0);
4345 NEON_SET_REG(T0
, rd
, pass
* 2);
4346 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
4351 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4352 if (op
== 30 || op
== 31 || op
>= 58) {
4353 gen_op_vfp_getreg_F0s(neon_reg_offset(rm
, pass
));
4355 NEON_GET_REG(T0
, rm
, pass
);
4358 case 1: /* VREV32 */
4360 case 0: gen_op_rev_T0(); break;
4361 case 1: gen_op_revh_T0(); break;
4365 case 2: /* VREV16 */
4370 case 4: case 5: /* VPADDL */
4371 case 12: case 13: /* VPADAL */
4372 switch ((size
<< 1) | (op
& 1)) {
4373 case 0: gen_op_neon_paddl_s8(); break;
4374 case 1: gen_op_neon_paddl_u8(); break;
4375 case 2: gen_op_neon_paddl_s16(); break;
4376 case 3: gen_op_neon_paddl_u16(); break;
4381 NEON_GET_REG(T1
, rd
, pass
);
4383 case 0: gen_op_neon_add_u16(); break;
4384 case 1: gen_op_addl_T0_T1(); break;
4391 case 0: gen_op_neon_cls_s8(); break;
4392 case 1: gen_op_neon_cls_s16(); break;
4393 case 2: gen_op_neon_cls_s32(); break;
4399 case 0: gen_op_neon_clz_u8(); break;
4400 case 1: gen_op_neon_clz_u16(); break;
4401 case 2: gen_op_clz_T0(); break;
4408 gen_op_neon_cnt_u8();
4415 case 14: /* VQABS */
4417 case 0: gen_op_neon_qabs_s8(); break;
4418 case 1: gen_op_neon_qabs_s16(); break;
4419 case 2: gen_op_neon_qabs_s32(); break;
4423 case 15: /* VQNEG */
4425 case 0: gen_op_neon_qneg_s8(); break;
4426 case 1: gen_op_neon_qneg_s16(); break;
4427 case 2: gen_op_neon_qneg_s32(); break;
4431 case 16: case 19: /* VCGT #0, VCLE #0 */
4432 gen_op_movl_T1_im(0);
4434 case 0: gen_op_neon_cgt_s8(); break;
4435 case 1: gen_op_neon_cgt_s16(); break;
4436 case 2: gen_op_neon_cgt_s32(); break;
4442 case 17: case 20: /* VCGE #0, VCLT #0 */
4443 gen_op_movl_T1_im(0);
4445 case 0: gen_op_neon_cge_s8(); break;
4446 case 1: gen_op_neon_cge_s16(); break;
4447 case 2: gen_op_neon_cge_s32(); break;
4453 case 18: /* VCEQ #0 */
4454 gen_op_movl_T1_im(0);
4456 case 0: gen_op_neon_ceq_u8(); break;
4457 case 1: gen_op_neon_ceq_u16(); break;
4458 case 2: gen_op_neon_ceq_u32(); break;
4464 case 0: gen_op_neon_abs_s8(); break;
4465 case 1: gen_op_neon_abs_s16(); break;
4466 case 2: gen_op_neon_abs_s32(); break;
4471 gen_op_movl_T1_im(0);
4473 case 0: gen_op_neon_rsb_u8(); break;
4474 case 1: gen_op_neon_rsb_u16(); break;
4475 case 2: gen_op_rsbl_T0_T1(); break;
4479 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
4480 gen_op_movl_T1_im(0);
4481 gen_op_neon_cgt_f32();
4485 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
4486 gen_op_movl_T1_im(0);
4487 gen_op_neon_cge_f32();
4491 case 26: /* Float VCEQ #0 */
4492 gen_op_movl_T1_im(0);
4493 gen_op_neon_ceq_f32();
4495 case 30: /* Float VABS */
4498 case 31: /* Float VNEG */
4502 NEON_GET_REG(T1
, rd
, pass
);
4503 NEON_SET_REG(T1
, rm
, pass
);
4506 NEON_GET_REG(T1
, rd
, pass
);
4508 case 0: gen_op_neon_trn_u8(); break;
4509 case 1: gen_op_neon_trn_u16(); break;
4513 NEON_SET_REG(T1
, rm
, pass
);
4515 case 56: /* Integer VRECPE */
4516 gen_op_neon_recpe_u32();
4518 case 57: /* Integer VRSQRTE */
4519 gen_op_neon_rsqrte_u32();
4521 case 58: /* Float VRECPE */
4522 gen_op_neon_recpe_f32();
4524 case 59: /* Float VRSQRTE */
4525 gen_op_neon_rsqrte_f32();
4527 case 60: /* VCVT.F32.S32 */
4528 gen_op_vfp_tosizs();
4530 case 61: /* VCVT.F32.U32 */
4531 gen_op_vfp_touizs();
4533 case 62: /* VCVT.S32.F32 */
4536 case 63: /* VCVT.U32.F32 */
4540 /* Reserved: 21, 29, 39-56 */
4543 if (op
== 30 || op
== 31 || op
>= 58) {
4544 gen_op_vfp_setreg_F0s(neon_reg_offset(rm
, pass
));
4546 NEON_SET_REG(T0
, rd
, pass
);
4551 } else if ((insn
& (1 << 10)) == 0) {
4553 n
= (insn
>> 5) & 0x18;
4554 NEON_GET_REG(T1
, rm
, 0);
4555 if (insn
& (1 << 6)) {
4556 NEON_GET_REG(T0
, rd
, 0);
4558 gen_op_movl_T0_im(0);
4560 gen_op_neon_tbl(rn
, n
);
4561 gen_op_movl_T2_T0();
4562 NEON_GET_REG(T1
, rm
, 1);
4563 if (insn
& (1 << 6)) {
4564 NEON_GET_REG(T0
, rd
, 0);
4566 gen_op_movl_T0_im(0);
4568 gen_op_neon_tbl(rn
, n
);
4569 NEON_SET_REG(T2
, rd
, 0);
4570 NEON_SET_REG(T0
, rd
, 1);
4571 } else if ((insn
& 0x380) == 0) {
4573 if (insn
& (1 << 19)) {
4574 NEON_SET_REG(T0
, rm
, 1);
4576 NEON_SET_REG(T0
, rm
, 0);
4578 if (insn
& (1 << 16)) {
4579 gen_op_neon_dup_u8(((insn
>> 17) & 3) * 8);
4580 } else if (insn
& (1 << 17)) {
4581 if ((insn
>> 18) & 1)
4582 gen_op_neon_dup_high16();
4584 gen_op_neon_dup_low16();
4586 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4587 NEON_SET_REG(T0
, rd
, pass
);
4597 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4601 cpnum
= (insn
>> 8) & 0xf;
4602 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
4603 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
4609 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
4610 return disas_iwmmxt_insn(env
, s
, insn
);
4611 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
4612 return disas_dsp_insn(env
, s
, insn
);
4617 return disas_vfp_insn (env
, s
, insn
);
4619 return disas_cp15_insn (env
, s
, insn
);
4621 /* Unknown coprocessor. See if the board has hooked it. */
4622 return disas_cp_insn (env
, s
, insn
);
4626 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
4628 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
4630 insn
= ldl_code(s
->pc
);
4633 /* M variants do not implement ARM mode. */
4638 /* Unconditional instructions. */
4639 if (((insn
>> 25) & 7) == 1) {
4640 /* NEON Data processing. */
4641 if (!arm_feature(env
, ARM_FEATURE_NEON
))
4644 if (disas_neon_data_insn(env
, s
, insn
))
4648 if ((insn
& 0x0f100000) == 0x04000000) {
4649 /* NEON load/store. */
4650 if (!arm_feature(env
, ARM_FEATURE_NEON
))
4653 if (disas_neon_ls_insn(env
, s
, insn
))
4657 if ((insn
& 0x0d70f000) == 0x0550f000)
4659 else if ((insn
& 0x0ffffdff) == 0x01010000) {
4662 if (insn
& (1 << 9)) {
4663 /* BE8 mode not implemented. */
4667 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
4668 switch ((insn
>> 4) & 0xf) {
4677 /* We don't emulate caches so these are a no-op. */
4682 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
4688 op1
= (insn
& 0x1f);
4689 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
4690 gen_movl_T1_reg(s
, 13);
4692 gen_op_movl_T1_r13_banked(op1
);
4694 i
= (insn
>> 23) & 3;
4696 case 0: offset
= -4; break; /* DA */
4697 case 1: offset
= -8; break; /* DB */
4698 case 2: offset
= 0; break; /* IA */
4699 case 3: offset
= 4; break; /* IB */
4703 gen_op_addl_T1_im(offset
);
4704 gen_movl_T0_reg(s
, 14);
4706 gen_op_movl_T0_cpsr();
4707 gen_op_addl_T1_im(4);
4709 if (insn
& (1 << 21)) {
4710 /* Base writeback. */
4712 case 0: offset
= -8; break;
4713 case 1: offset
= -4; break;
4714 case 2: offset
= 4; break;
4715 case 3: offset
= 0; break;
4719 gen_op_addl_T1_im(offset
);
4720 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
4721 gen_movl_reg_T1(s
, 13);
4723 gen_op_movl_r13_T1_banked(op1
);
4726 } else if ((insn
& 0x0e5fffe0) == 0x081d0a00) {
4732 rn
= (insn
>> 16) & 0xf;
4733 gen_movl_T1_reg(s
, rn
);
4734 i
= (insn
>> 23) & 3;
4736 case 0: offset
= 0; break; /* DA */
4737 case 1: offset
= -4; break; /* DB */
4738 case 2: offset
= 4; break; /* IA */
4739 case 3: offset
= 8; break; /* IB */
4743 gen_op_addl_T1_im(offset
);
4744 /* Load CPSR into T2 and PC into T0. */
4746 gen_op_movl_T2_T0();
4747 gen_op_addl_T1_im(-4);
4749 if (insn
& (1 << 21)) {
4750 /* Base writeback. */
4752 case 0: offset
= -4; break;
4753 case 1: offset
= 0; break;
4754 case 2: offset
= 8; break;
4755 case 3: offset
= 4; break;
4759 gen_op_addl_T1_im(offset
);
4760 gen_movl_reg_T1(s
, rn
);
4763 } else if ((insn
& 0x0e000000) == 0x0a000000) {
4764 /* branch link and change to thumb (blx <offset>) */
4767 val
= (uint32_t)s
->pc
;
4768 gen_op_movl_T0_im(val
);
4769 gen_movl_reg_T0(s
, 14);
4770 /* Sign-extend the 24-bit offset */
4771 offset
= (((int32_t)insn
) << 8) >> 8;
4772 /* offset * 4 + bit24 * 2 + (thumb bit) */
4773 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
4774 /* pipeline offset */
4776 gen_op_movl_T0_im(val
);
4779 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
4780 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
4781 /* iWMMXt register transfer. */
4782 if (env
->cp15
.c15_cpar
& (1 << 1))
4783 if (!disas_iwmmxt_insn(env
, s
, insn
))
4786 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
4787 /* Coprocessor double register transfer. */
4788 } else if ((insn
& 0x0f000010) == 0x0e000010) {
4789 /* Additional coprocessor register transfer. */
4790 } else if ((insn
& 0x0ff10010) == 0x01000000) {
4793 /* cps (privileged) */
4797 if (insn
& (1 << 19)) {
4798 if (insn
& (1 << 8))
4800 if (insn
& (1 << 7))
4802 if (insn
& (1 << 6))
4804 if (insn
& (1 << 18))
4807 if (insn
& (1 << 14)) {
4809 val
|= (insn
& 0x1f);
4812 gen_op_movl_T0_im(val
);
4813 gen_set_psr_T0(s
, mask
, 0);
4820 /* if not always execute, we generate a conditional jump to
4822 s
->condlabel
= gen_new_label();
4823 gen_test_cc
[cond
^ 1](s
->condlabel
);
4826 if ((insn
& 0x0f900000) == 0x03000000) {
4827 if ((insn
& (1 << 21)) == 0) {
4829 rd
= (insn
>> 12) & 0xf;
4830 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
4831 if ((insn
& (1 << 22)) == 0) {
4833 gen_op_movl_T0_im(val
);
4836 gen_movl_T0_reg(s
, rd
);
4837 gen_op_movl_T1_im(0xffff);
4838 gen_op_andl_T0_T1();
4839 gen_op_movl_T1_im(val
<< 16);
4842 gen_movl_reg_T0(s
, rd
);
4844 if (((insn
>> 12) & 0xf) != 0xf)
4846 if (((insn
>> 16) & 0xf) == 0) {
4847 gen_nop_hint(s
, insn
& 0xff);
4849 /* CPSR = immediate */
4851 shift
= ((insn
>> 8) & 0xf) * 2;
4853 val
= (val
>> shift
) | (val
<< (32 - shift
));
4854 gen_op_movl_T0_im(val
);
4855 i
= ((insn
& (1 << 22)) != 0);
4856 if (gen_set_psr_T0(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
))
4860 } else if ((insn
& 0x0f900000) == 0x01000000
4861 && (insn
& 0x00000090) != 0x00000090) {
4862 /* miscellaneous instructions */
4863 op1
= (insn
>> 21) & 3;
4864 sh
= (insn
>> 4) & 0xf;
4867 case 0x0: /* move program status register */
4870 gen_movl_T0_reg(s
, rm
);
4871 i
= ((op1
& 2) != 0);
4872 if (gen_set_psr_T0(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
))
4876 rd
= (insn
>> 12) & 0xf;
4880 gen_op_movl_T0_spsr();
4882 gen_op_movl_T0_cpsr();
4884 gen_movl_reg_T0(s
, rd
);
4889 /* branch/exchange thumb (bx). */
4890 gen_movl_T0_reg(s
, rm
);
4892 } else if (op1
== 3) {
4894 rd
= (insn
>> 12) & 0xf;
4895 gen_movl_T0_reg(s
, rm
);
4897 gen_movl_reg_T0(s
, rd
);
4905 /* Trivial implementation equivalent to bx. */
4906 gen_movl_T0_reg(s
, rm
);
4916 /* branch link/exchange thumb (blx) */
4917 val
= (uint32_t)s
->pc
;
4918 gen_op_movl_T1_im(val
);
4919 gen_movl_T0_reg(s
, rm
);
4920 gen_movl_reg_T1(s
, 14);
4923 case 0x5: /* saturating add/subtract */
4924 rd
= (insn
>> 12) & 0xf;
4925 rn
= (insn
>> 16) & 0xf;
4926 gen_movl_T0_reg(s
, rm
);
4927 gen_movl_T1_reg(s
, rn
);
4929 gen_op_double_T1_saturate();
4931 gen_op_subl_T0_T1_saturate();
4933 gen_op_addl_T0_T1_saturate();
4934 gen_movl_reg_T0(s
, rd
);
4937 gen_set_condexec(s
);
4938 gen_op_movl_T0_im((long)s
->pc
- 4);
4939 gen_op_movl_reg_TN
[0][15]();
4941 s
->is_jmp
= DISAS_JUMP
;
4943 case 0x8: /* signed multiply */
4947 rs
= (insn
>> 8) & 0xf;
4948 rn
= (insn
>> 12) & 0xf;
4949 rd
= (insn
>> 16) & 0xf;
4951 /* (32 * 16) >> 16 */
4952 gen_movl_T0_reg(s
, rm
);
4953 gen_movl_T1_reg(s
, rs
);
4955 gen_op_sarl_T1_im(16);
4958 gen_op_imulw_T0_T1();
4959 if ((sh
& 2) == 0) {
4960 gen_movl_T1_reg(s
, rn
);
4961 gen_op_addl_T0_T1_setq();
4963 gen_movl_reg_T0(s
, rd
);
4966 gen_movl_T0_reg(s
, rm
);
4967 gen_movl_T1_reg(s
, rs
);
4968 gen_mulxy(sh
& 2, sh
& 4);
4970 gen_op_signbit_T1_T0();
4971 gen_op_addq_T0_T1(rn
, rd
);
4972 gen_movl_reg_T0(s
, rn
);
4973 gen_movl_reg_T1(s
, rd
);
4976 gen_movl_T1_reg(s
, rn
);
4977 gen_op_addl_T0_T1_setq();
4979 gen_movl_reg_T0(s
, rd
);
4986 } else if (((insn
& 0x0e000000) == 0 &&
4987 (insn
& 0x00000090) != 0x90) ||
4988 ((insn
& 0x0e000000) == (1 << 25))) {
4989 int set_cc
, logic_cc
, shiftop
;
4991 op1
= (insn
>> 21) & 0xf;
4992 set_cc
= (insn
>> 20) & 1;
4993 logic_cc
= table_logic_cc
[op1
] & set_cc
;
4995 /* data processing instruction */
4996 if (insn
& (1 << 25)) {
4997 /* immediate operand */
4999 shift
= ((insn
>> 8) & 0xf) * 2;
5001 val
= (val
>> shift
) | (val
<< (32 - shift
));
5002 gen_op_movl_T1_im(val
);
5003 if (logic_cc
&& shift
)
5008 gen_movl_T1_reg(s
, rm
);
5009 shiftop
= (insn
>> 5) & 3;
5010 if (!(insn
& (1 << 4))) {
5011 shift
= (insn
>> 7) & 0x1f;
5014 gen_shift_T1_im_cc
[shiftop
](shift
);
5016 gen_shift_T1_im
[shiftop
](shift
);
5018 } else if (shiftop
!= 0) {
5020 gen_shift_T1_0_cc
[shiftop
]();
5022 gen_shift_T1_0
[shiftop
]();
5026 rs
= (insn
>> 8) & 0xf;
5027 gen_movl_T0_reg(s
, rs
);
5029 gen_shift_T1_T0_cc
[shiftop
]();
5031 gen_shift_T1_T0
[shiftop
]();
5035 if (op1
!= 0x0f && op1
!= 0x0d) {
5036 rn
= (insn
>> 16) & 0xf;
5037 gen_movl_T0_reg(s
, rn
);
5039 rd
= (insn
>> 12) & 0xf;
5042 gen_op_andl_T0_T1();
5043 gen_movl_reg_T0(s
, rd
);
5045 gen_op_logic_T0_cc();
5048 gen_op_xorl_T0_T1();
5049 gen_movl_reg_T0(s
, rd
);
5051 gen_op_logic_T0_cc();
5054 if (set_cc
&& rd
== 15) {
5055 /* SUBS r15, ... is used for exception return. */
5058 gen_op_subl_T0_T1_cc();
5059 gen_exception_return(s
);
5062 gen_op_subl_T0_T1_cc();
5064 gen_op_subl_T0_T1();
5065 gen_movl_reg_T0(s
, rd
);
5070 gen_op_rsbl_T0_T1_cc();
5072 gen_op_rsbl_T0_T1();
5073 gen_movl_reg_T0(s
, rd
);
5077 gen_op_addl_T0_T1_cc();
5079 gen_op_addl_T0_T1();
5080 gen_movl_reg_T0(s
, rd
);
5084 gen_op_adcl_T0_T1_cc();
5086 gen_op_adcl_T0_T1();
5087 gen_movl_reg_T0(s
, rd
);
5091 gen_op_sbcl_T0_T1_cc();
5093 gen_op_sbcl_T0_T1();
5094 gen_movl_reg_T0(s
, rd
);
5098 gen_op_rscl_T0_T1_cc();
5100 gen_op_rscl_T0_T1();
5101 gen_movl_reg_T0(s
, rd
);
5105 gen_op_andl_T0_T1();
5106 gen_op_logic_T0_cc();
5111 gen_op_xorl_T0_T1();
5112 gen_op_logic_T0_cc();
5117 gen_op_subl_T0_T1_cc();
5122 gen_op_addl_T0_T1_cc();
5127 gen_movl_reg_T0(s
, rd
);
5129 gen_op_logic_T0_cc();
5132 if (logic_cc
&& rd
== 15) {
5133 /* MOVS r15, ... is used for exception return. */
5136 gen_op_movl_T0_T1();
5137 gen_exception_return(s
);
5139 gen_movl_reg_T1(s
, rd
);
5141 gen_op_logic_T1_cc();
5145 gen_op_bicl_T0_T1();
5146 gen_movl_reg_T0(s
, rd
);
5148 gen_op_logic_T0_cc();
5153 gen_movl_reg_T1(s
, rd
);
5155 gen_op_logic_T1_cc();
5159 /* other instructions */
5160 op1
= (insn
>> 24) & 0xf;
5164 /* multiplies, extra load/stores */
5165 sh
= (insn
>> 5) & 3;
5168 rd
= (insn
>> 16) & 0xf;
5169 rn
= (insn
>> 12) & 0xf;
5170 rs
= (insn
>> 8) & 0xf;
5172 op1
= (insn
>> 20) & 0xf;
5174 case 0: case 1: case 2: case 3: case 6:
5176 gen_movl_T0_reg(s
, rs
);
5177 gen_movl_T1_reg(s
, rm
);
5179 if (insn
& (1 << 22)) {
5180 /* Subtract (mls) */
5182 gen_movl_T1_reg(s
, rn
);
5183 gen_op_rsbl_T0_T1();
5184 } else if (insn
& (1 << 21)) {
5186 gen_movl_T1_reg(s
, rn
);
5187 gen_op_addl_T0_T1();
5189 if (insn
& (1 << 20))
5190 gen_op_logic_T0_cc();
5191 gen_movl_reg_T0(s
, rd
);
5195 gen_movl_T0_reg(s
, rs
);
5196 gen_movl_T1_reg(s
, rm
);
5197 if (insn
& (1 << 22))
5198 gen_op_imull_T0_T1();
5200 gen_op_mull_T0_T1();
5201 if (insn
& (1 << 21)) /* mult accumulate */
5202 gen_op_addq_T0_T1(rn
, rd
);
5203 if (!(insn
& (1 << 23))) { /* double accumulate */
5205 gen_op_addq_lo_T0_T1(rn
);
5206 gen_op_addq_lo_T0_T1(rd
);
5208 if (insn
& (1 << 20))
5210 gen_movl_reg_T0(s
, rn
);
5211 gen_movl_reg_T1(s
, rd
);
5215 rn
= (insn
>> 16) & 0xf;
5216 rd
= (insn
>> 12) & 0xf;
5217 if (insn
& (1 << 23)) {
5218 /* load/store exclusive */
5219 gen_movl_T1_reg(s
, rn
);
5220 if (insn
& (1 << 20)) {
5224 gen_movl_T0_reg(s
, rm
);
5227 gen_movl_reg_T0(s
, rd
);
5229 /* SWP instruction */
5232 gen_movl_T0_reg(s
, rm
);
5233 gen_movl_T1_reg(s
, rn
);
5234 if (insn
& (1 << 22)) {
5239 gen_movl_reg_T0(s
, rd
);
5245 /* Misc load/store */
5246 rn
= (insn
>> 16) & 0xf;
5247 rd
= (insn
>> 12) & 0xf;
5248 gen_movl_T1_reg(s
, rn
);
5249 if (insn
& (1 << 24))
5250 gen_add_datah_offset(s
, insn
, 0);
5252 if (insn
& (1 << 20)) {
5267 } else if (sh
& 2) {
5271 gen_movl_T0_reg(s
, rd
);
5273 gen_op_addl_T1_im(4);
5274 gen_movl_T0_reg(s
, rd
+ 1);
5280 gen_movl_reg_T0(s
, rd
);
5281 gen_op_addl_T1_im(4);
5286 address_offset
= -4;
5289 gen_movl_T0_reg(s
, rd
);
5293 /* Perform base writeback before the loaded value to
5294 ensure correct behavior with overlapping index registers.
5295 ldrd with base writeback is is undefined if the
5296 destination and index registers overlap. */
5297 if (!(insn
& (1 << 24))) {
5298 gen_add_datah_offset(s
, insn
, address_offset
);
5299 gen_movl_reg_T1(s
, rn
);
5300 } else if (insn
& (1 << 21)) {
5302 gen_op_addl_T1_im(address_offset
);
5303 gen_movl_reg_T1(s
, rn
);
5306 /* Complete the load. */
5307 gen_movl_reg_T0(s
, rd
);
5316 if (insn
& (1 << 4)) {
5318 /* Armv6 Media instructions. */
5320 rn
= (insn
>> 16) & 0xf;
5321 rd
= (insn
>> 12) & 0xf;
5322 rs
= (insn
>> 8) & 0xf;
5323 switch ((insn
>> 23) & 3) {
5324 case 0: /* Parallel add/subtract. */
5325 op1
= (insn
>> 20) & 7;
5326 gen_movl_T0_reg(s
, rn
);
5327 gen_movl_T1_reg(s
, rm
);
5328 sh
= (insn
>> 5) & 7;
5329 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
5331 gen_arm_parallel_addsub
[op1
][sh
]();
5332 gen_movl_reg_T0(s
, rd
);
5335 if ((insn
& 0x00700020) == 0) {
5337 gen_movl_T0_reg(s
, rn
);
5338 gen_movl_T1_reg(s
, rm
);
5339 shift
= (insn
>> 7) & 0x1f;
5341 gen_op_shll_T1_im(shift
);
5342 if (insn
& (1 << 6))
5343 gen_op_pkhtb_T0_T1();
5345 gen_op_pkhbt_T0_T1();
5346 gen_movl_reg_T0(s
, rd
);
5347 } else if ((insn
& 0x00200020) == 0x00200000) {
5349 gen_movl_T1_reg(s
, rm
);
5350 shift
= (insn
>> 7) & 0x1f;
5351 if (insn
& (1 << 6)) {
5354 gen_op_sarl_T1_im(shift
);
5356 gen_op_shll_T1_im(shift
);
5358 sh
= (insn
>> 16) & 0x1f;
5360 if (insn
& (1 << 22))
5365 gen_movl_T1_reg(s
, rd
);
5366 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
5368 gen_movl_T1_reg(s
, rm
);
5369 sh
= (insn
>> 16) & 0x1f;
5371 if (insn
& (1 << 22))
5372 gen_op_usat16_T1(sh
);
5374 gen_op_ssat16_T1(sh
);
5376 gen_movl_T1_reg(s
, rd
);
5377 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
5379 gen_movl_T0_reg(s
, rn
);
5380 gen_movl_T1_reg(s
, rm
);
5382 gen_movl_reg_T0(s
, rd
);
5383 } else if ((insn
& 0x000003e0) == 0x00000060) {
5384 gen_movl_T1_reg(s
, rm
);
5385 shift
= (insn
>> 10) & 3;
5386 /* ??? In many cases it's not neccessary to do a
5387 rotate, a shift is sufficient. */
5389 gen_op_rorl_T1_im(shift
* 8);
5390 op1
= (insn
>> 20) & 7;
5392 case 0: gen_op_sxtb16_T1(); break;
5393 case 2: gen_op_sxtb_T1(); break;
5394 case 3: gen_op_sxth_T1(); break;
5395 case 4: gen_op_uxtb16_T1(); break;
5396 case 6: gen_op_uxtb_T1(); break;
5397 case 7: gen_op_uxth_T1(); break;
5398 default: goto illegal_op
;
5401 gen_movl_T2_reg(s
, rn
);
5402 if ((op1
& 3) == 0) {
5403 gen_op_add16_T1_T2();
5405 gen_op_addl_T1_T2();
5408 gen_movl_reg_T1(s
, rd
);
5409 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
5411 gen_movl_T0_reg(s
, rm
);
5412 if (insn
& (1 << 22)) {
5413 if (insn
& (1 << 7)) {
5420 if (insn
& (1 << 7))
5425 gen_movl_reg_T0(s
, rd
);
5430 case 2: /* Multiplies (Type 3). */
5431 gen_movl_T0_reg(s
, rm
);
5432 gen_movl_T1_reg(s
, rs
);
5433 if (insn
& (1 << 20)) {
5434 /* Signed multiply most significant [accumulate]. */
5435 gen_op_imull_T0_T1();
5436 if (insn
& (1 << 5))
5437 gen_op_roundqd_T0_T1();
5439 gen_op_movl_T0_T1();
5441 gen_movl_T1_reg(s
, rn
);
5442 if (insn
& (1 << 6)) {
5443 gen_op_addl_T0_T1();
5445 gen_op_rsbl_T0_T1();
5448 gen_movl_reg_T0(s
, rd
);
5450 if (insn
& (1 << 5))
5451 gen_op_swap_half_T1();
5452 gen_op_mul_dual_T0_T1();
5453 if (insn
& (1 << 22)) {
5454 if (insn
& (1 << 6)) {
5456 gen_op_addq_T0_T1_dual(rn
, rd
);
5459 gen_op_subq_T0_T1_dual(rn
, rd
);
5462 /* This addition cannot overflow. */
5463 if (insn
& (1 << 6)) {
5465 gen_op_subl_T0_T1();
5468 gen_op_addl_T0_T1();
5472 gen_movl_T1_reg(s
, rn
);
5473 gen_op_addl_T0_T1_setq();
5475 gen_movl_reg_T0(s
, rd
);
5480 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
5482 case 0: /* Unsigned sum of absolute differences. */
5484 gen_movl_T0_reg(s
, rm
);
5485 gen_movl_T1_reg(s
, rs
);
5486 gen_op_usad8_T0_T1();
5488 gen_movl_T1_reg(s
, rn
);
5489 gen_op_addl_T0_T1();
5491 gen_movl_reg_T0(s
, rd
);
5493 case 0x20: case 0x24: case 0x28: case 0x2c:
5494 /* Bitfield insert/clear. */
5496 shift
= (insn
>> 7) & 0x1f;
5497 i
= (insn
>> 16) & 0x1f;
5500 gen_op_movl_T1_im(0);
5502 gen_movl_T1_reg(s
, rm
);
5505 gen_movl_T0_reg(s
, rd
);
5506 gen_op_bfi_T1_T0(shift
, ((1u << i
) - 1) << shift
);
5508 gen_movl_reg_T1(s
, rd
);
5510 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
5511 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5512 gen_movl_T1_reg(s
, rm
);
5513 shift
= (insn
>> 7) & 0x1f;
5514 i
= ((insn
>> 16) & 0x1f) + 1;
5519 gen_op_ubfx_T1(shift
, (1u << i
) - 1);
5521 gen_op_sbfx_T1(shift
, i
);
5524 gen_movl_reg_T1(s
, rd
);
5534 /* Check for undefined extension instructions
5535 * per the ARM Bible IE:
5536 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
5538 sh
= (0xf << 20) | (0xf << 4);
5539 if (op1
== 0x7 && ((insn
& sh
) == sh
))
5543 /* load/store byte/word */
5544 rn
= (insn
>> 16) & 0xf;
5545 rd
= (insn
>> 12) & 0xf;
5546 gen_movl_T1_reg(s
, rn
);
5547 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
5548 if (insn
& (1 << 24))
5549 gen_add_data_offset(s
, insn
);
5550 if (insn
& (1 << 20)) {
5553 #if defined(CONFIG_USER_ONLY)
5554 if (insn
& (1 << 22))
5559 if (insn
& (1 << 22)) {
5563 gen_op_ldub_kernel();
5568 gen_op_ldl_kernel();
5573 gen_movl_T0_reg(s
, rd
);
5574 #if defined(CONFIG_USER_ONLY)
5575 if (insn
& (1 << 22))
5580 if (insn
& (1 << 22)) {
5584 gen_op_stb_kernel();
5589 gen_op_stl_kernel();
5593 if (!(insn
& (1 << 24))) {
5594 gen_add_data_offset(s
, insn
);
5595 gen_movl_reg_T1(s
, rn
);
5596 } else if (insn
& (1 << 21))
5597 gen_movl_reg_T1(s
, rn
); {
5599 if (insn
& (1 << 20)) {
5600 /* Complete the load. */
5604 gen_movl_reg_T0(s
, rd
);
5610 int j
, n
, user
, loaded_base
;
5611 /* load/store multiple words */
5612 /* XXX: store correct base if write back */
5614 if (insn
& (1 << 22)) {
5616 goto illegal_op
; /* only usable in supervisor mode */
5618 if ((insn
& (1 << 15)) == 0)
5621 rn
= (insn
>> 16) & 0xf;
5622 gen_movl_T1_reg(s
, rn
);
5624 /* compute total size */
5628 if (insn
& (1 << i
))
5631 /* XXX: test invalid n == 0 case ? */
5632 if (insn
& (1 << 23)) {
5633 if (insn
& (1 << 24)) {
5635 gen_op_addl_T1_im(4);
5637 /* post increment */
5640 if (insn
& (1 << 24)) {
5642 gen_op_addl_T1_im(-(n
* 4));
5644 /* post decrement */
5646 gen_op_addl_T1_im(-((n
- 1) * 4));
5651 if (insn
& (1 << i
)) {
5652 if (insn
& (1 << 20)) {
5658 gen_op_movl_user_T0(i
);
5659 } else if (i
== rn
) {
5660 gen_op_movl_T2_T0();
5663 gen_movl_reg_T0(s
, i
);
5668 /* special case: r15 = PC + 8 */
5669 val
= (long)s
->pc
+ 4;
5670 gen_op_movl_TN_im
[0](val
);
5672 gen_op_movl_T0_user(i
);
5674 gen_movl_T0_reg(s
, i
);
5679 /* no need to add after the last transfer */
5681 gen_op_addl_T1_im(4);
5684 if (insn
& (1 << 21)) {
5686 if (insn
& (1 << 23)) {
5687 if (insn
& (1 << 24)) {
5690 /* post increment */
5691 gen_op_addl_T1_im(4);
5694 if (insn
& (1 << 24)) {
5697 gen_op_addl_T1_im(-((n
- 1) * 4));
5699 /* post decrement */
5700 gen_op_addl_T1_im(-(n
* 4));
5703 gen_movl_reg_T1(s
, rn
);
5706 gen_op_movl_T0_T2();
5707 gen_movl_reg_T0(s
, rn
);
5709 if ((insn
& (1 << 22)) && !user
) {
5710 /* Restore CPSR from SPSR. */
5711 gen_op_movl_T0_spsr();
5712 gen_op_movl_cpsr_T0(0xffffffff);
5713 s
->is_jmp
= DISAS_UPDATE
;
5722 /* branch (and link) */
5723 val
= (int32_t)s
->pc
;
5724 if (insn
& (1 << 24)) {
5725 gen_op_movl_T0_im(val
);
5726 gen_op_movl_reg_TN
[0][14]();
5728 offset
= (((int32_t)insn
<< 8) >> 8);
5729 val
+= (offset
<< 2) + 4;
5737 if (disas_coproc_insn(env
, s
, insn
))
5742 gen_op_movl_T0_im((long)s
->pc
);
5743 gen_op_movl_reg_TN
[0][15]();
5744 s
->is_jmp
= DISAS_SWI
;
5748 gen_set_condexec(s
);
5749 gen_op_movl_T0_im((long)s
->pc
- 4);
5750 gen_op_movl_reg_TN
[0][15]();
5751 gen_op_undef_insn();
5752 s
->is_jmp
= DISAS_JUMP
;
5758 /* Return true if this is a Thumb-2 logical op. */
5760 thumb2_logic_op(int op
)
5765 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
5766 then set condition code flags based on the result of the operation.
5767 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
5768 to the high bit of T1.
5769 Returns zero if the opcode is valid. */
5772 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
)
5779 gen_op_andl_T0_T1();
5783 gen_op_bicl_T0_T1();
5796 gen_op_xorl_T0_T1();
5801 gen_op_addl_T0_T1_cc();
5803 gen_op_addl_T0_T1();
5807 gen_op_adcl_T0_T1_cc();
5809 gen_op_adcl_T0_T1();
5813 gen_op_sbcl_T0_T1_cc();
5815 gen_op_sbcl_T0_T1();
5819 gen_op_subl_T0_T1_cc();
5821 gen_op_subl_T0_T1();
5825 gen_op_rsbl_T0_T1_cc();
5827 gen_op_rsbl_T0_T1();
5829 default: /* 5, 6, 7, 9, 12, 15. */
5833 gen_op_logic_T0_cc();
5840 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
5842 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
5844 uint32_t insn
, imm
, shift
, offset
, addr
;
5845 uint32_t rd
, rn
, rm
, rs
;
5851 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
5852 || arm_feature (env
, ARM_FEATURE_M
))) {
5853 /* Thumb-1 cores may need to tread bl and blx as a pair of
5854 16-bit instructions to get correct prefetch abort behavior. */
5856 if ((insn
& (1 << 12)) == 0) {
5857 /* Second half of blx. */
5858 offset
= ((insn
& 0x7ff) << 1);
5859 gen_movl_T0_reg(s
, 14);
5860 gen_op_movl_T1_im(offset
);
5861 gen_op_addl_T0_T1();
5862 gen_op_movl_T1_im(0xfffffffc);
5863 gen_op_andl_T0_T1();
5865 addr
= (uint32_t)s
->pc
;
5866 gen_op_movl_T1_im(addr
| 1);
5867 gen_movl_reg_T1(s
, 14);
5871 if (insn
& (1 << 11)) {
5872 /* Second half of bl. */
5873 offset
= ((insn
& 0x7ff) << 1) | 1;
5874 gen_movl_T0_reg(s
, 14);
5875 gen_op_movl_T1_im(offset
);
5876 gen_op_addl_T0_T1();
5878 addr
= (uint32_t)s
->pc
;
5879 gen_op_movl_T1_im(addr
| 1);
5880 gen_movl_reg_T1(s
, 14);
5884 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
5885 /* Instruction spans a page boundary. Implement it as two
5886 16-bit instructions in case the second half causes an
5888 offset
= ((int32_t)insn
<< 21) >> 9;
5889 addr
= s
->pc
+ 2 + offset
;
5890 gen_op_movl_T0_im(addr
);
5891 gen_movl_reg_T0(s
, 14);
5894 /* Fall through to 32-bit decode. */
5897 insn
= lduw_code(s
->pc
);
5899 insn
|= (uint32_t)insn_hw1
<< 16;
5901 if ((insn
& 0xf800e800) != 0xf000e800) {
5905 rn
= (insn
>> 16) & 0xf;
5906 rs
= (insn
>> 12) & 0xf;
5907 rd
= (insn
>> 8) & 0xf;
5909 switch ((insn
>> 25) & 0xf) {
5910 case 0: case 1: case 2: case 3:
5911 /* 16-bit instructions. Should never happen. */
5914 if (insn
& (1 << 22)) {
5915 /* Other load/store, table branch. */
5916 if (insn
& 0x01200000) {
5917 /* Load/store doubleword. */
5919 gen_op_movl_T1_im(s
->pc
& ~3);
5921 gen_movl_T1_reg(s
, rn
);
5923 offset
= (insn
& 0xff) * 4;
5924 if ((insn
& (1 << 23)) == 0)
5926 if (insn
& (1 << 24)) {
5927 gen_op_addl_T1_im(offset
);
5930 if (insn
& (1 << 20)) {
5933 gen_movl_reg_T0(s
, rs
);
5934 gen_op_addl_T1_im(4);
5936 gen_movl_reg_T0(s
, rd
);
5939 gen_movl_T0_reg(s
, rs
);
5941 gen_op_addl_T1_im(4);
5942 gen_movl_T0_reg(s
, rd
);
5945 if (insn
& (1 << 21)) {
5946 /* Base writeback. */
5949 gen_op_addl_T1_im(offset
- 4);
5950 gen_movl_reg_T1(s
, rn
);
5952 } else if ((insn
& (1 << 23)) == 0) {
5953 /* Load/store exclusive word. */
5954 gen_movl_T0_reg(s
, rd
);
5955 gen_movl_T1_reg(s
, rn
);
5956 if (insn
& (1 << 20)) {
5961 gen_movl_reg_T0(s
, rd
);
5962 } else if ((insn
& (1 << 6)) == 0) {
5965 gen_op_movl_T1_im(s
->pc
);
5967 gen_movl_T1_reg(s
, rn
);
5969 gen_movl_T2_reg(s
, rm
);
5970 gen_op_addl_T1_T2();
5971 if (insn
& (1 << 4)) {
5973 gen_op_addl_T1_T2();
5978 gen_op_jmp_T0_im(s
->pc
);
5979 s
->is_jmp
= DISAS_JUMP
;
5981 /* Load/store exclusive byte/halfword/doubleword. */
5982 op
= (insn
>> 4) & 0x3;
5983 gen_movl_T1_reg(s
, rn
);
5984 if (insn
& (1 << 20)) {
5994 gen_movl_reg_T1(s
, rd
);
5999 gen_movl_reg_T0(s
, rs
);
6001 gen_movl_T0_reg(s
, rs
);
6010 gen_movl_T2_reg(s
, rd
);
6016 gen_movl_reg_T0(s
, rm
);
6020 /* Load/store multiple, RFE, SRS. */
6021 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
6022 /* Not available in user mode. */
6025 if (insn
& (1 << 20)) {
6027 gen_movl_T1_reg(s
, rn
);
6028 if (insn
& (1 << 24)) {
6029 gen_op_addl_T1_im(4);
6031 gen_op_addl_T1_im(-4);
6033 /* Load CPSR into T2 and PC into T0. */
6035 gen_op_movl_T2_T0();
6036 gen_op_addl_T1_im(-4);
6038 if (insn
& (1 << 21)) {
6039 /* Base writeback. */
6040 if (insn
& (1 << 24))
6041 gen_op_addl_T1_im(8);
6042 gen_movl_reg_T1(s
, rn
);
6048 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
6049 gen_movl_T1_reg(s
, 13);
6051 gen_op_movl_T1_r13_banked(op
);
6053 if ((insn
& (1 << 24)) == 0) {
6054 gen_op_addl_T1_im(-8);
6056 gen_movl_T0_reg(s
, 14);
6058 gen_op_movl_T0_cpsr();
6059 gen_op_addl_T1_im(4);
6061 if (insn
& (1 << 21)) {
6062 if ((insn
& (1 << 24)) == 0) {
6063 gen_op_addl_T1_im(-4);
6065 gen_op_addl_T1_im(4);
6067 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
6068 gen_movl_reg_T1(s
, 13);
6070 gen_op_movl_r13_T1_banked(op
);
6076 /* Load/store multiple. */
6077 gen_movl_T1_reg(s
, rn
);
6079 for (i
= 0; i
< 16; i
++) {
6080 if (insn
& (1 << i
))
6083 if (insn
& (1 << 24)) {
6084 gen_op_addl_T1_im(-offset
);
6087 for (i
= 0; i
< 16; i
++) {
6088 if ((insn
& (1 << i
)) == 0)
6090 if (insn
& (1 << 20)) {
6096 gen_movl_reg_T0(s
, i
);
6100 gen_movl_T0_reg(s
, i
);
6103 gen_op_addl_T1_im(4);
6105 if (insn
& (1 << 21)) {
6106 /* Base register writeback. */
6107 if (insn
& (1 << 24)) {
6108 gen_op_addl_T1_im(-offset
);
6110 /* Fault if writeback register is in register list. */
6111 if (insn
& (1 << rn
))
6113 gen_movl_reg_T1(s
, rn
);
6118 case 5: /* Data processing register constant shift. */
6120 gen_op_movl_T0_im(0);
6122 gen_movl_T0_reg(s
, rn
);
6123 gen_movl_T1_reg(s
, rm
);
6124 op
= (insn
>> 21) & 0xf;
6125 shiftop
= (insn
>> 4) & 3;
6126 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
6127 conds
= (insn
& (1 << 20)) != 0;
6128 logic_cc
= (conds
&& thumb2_logic_op(op
));
6131 gen_shift_T1_im_cc
[shiftop
](shift
);
6133 gen_shift_T1_im
[shiftop
](shift
);
6135 } else if (shiftop
!= 0) {
6137 gen_shift_T1_0_cc
[shiftop
]();
6139 gen_shift_T1_0
[shiftop
]();
6142 if (gen_thumb2_data_op(s
, op
, conds
, 0))
6145 gen_movl_reg_T0(s
, rd
);
6147 case 13: /* Misc data processing. */
6148 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
6149 if (op
< 4 && (insn
& 0xf000) != 0xf000)
6152 case 0: /* Register controlled shift. */
6153 gen_movl_T0_reg(s
, rm
);
6154 gen_movl_T1_reg(s
, rn
);
6155 if ((insn
& 0x70) != 0)
6157 op
= (insn
>> 21) & 3;
6158 if (insn
& (1 << 20)) {
6159 gen_shift_T1_T0_cc
[op
]();
6160 gen_op_logic_T1_cc();
6162 gen_shift_T1_T0
[op
]();
6164 gen_movl_reg_T1(s
, rd
);
6166 case 1: /* Sign/zero extend. */
6167 gen_movl_T1_reg(s
, rm
);
6168 shift
= (insn
>> 4) & 3;
6169 /* ??? In many cases it's not neccessary to do a
6170 rotate, a shift is sufficient. */
6172 gen_op_rorl_T1_im(shift
* 8);
6173 op
= (insn
>> 20) & 7;
6175 case 0: gen_op_sxth_T1(); break;
6176 case 1: gen_op_uxth_T1(); break;
6177 case 2: gen_op_sxtb16_T1(); break;
6178 case 3: gen_op_uxtb16_T1(); break;
6179 case 4: gen_op_sxtb_T1(); break;
6180 case 5: gen_op_uxtb_T1(); break;
6181 default: goto illegal_op
;
6184 gen_movl_T2_reg(s
, rn
);
6185 if ((op
>> 1) == 1) {
6186 gen_op_add16_T1_T2();
6188 gen_op_addl_T1_T2();
6191 gen_movl_reg_T1(s
, rd
);
6193 case 2: /* SIMD add/subtract. */
6194 op
= (insn
>> 20) & 7;
6195 shift
= (insn
>> 4) & 7;
6196 if ((op
& 3) == 3 || (shift
& 3) == 3)
6198 gen_movl_T0_reg(s
, rn
);
6199 gen_movl_T1_reg(s
, rm
);
6200 gen_thumb2_parallel_addsub
[op
][shift
]();
6201 gen_movl_reg_T0(s
, rd
);
6203 case 3: /* Other data processing. */
6204 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
6206 /* Saturating add/subtract. */
6207 gen_movl_T0_reg(s
, rm
);
6208 gen_movl_T1_reg(s
, rn
);
6210 gen_op_double_T1_saturate();
6212 gen_op_subl_T0_T1_saturate();
6214 gen_op_addl_T0_T1_saturate();
6216 gen_movl_T0_reg(s
, rn
);
6218 case 0x0a: /* rbit */
6221 case 0x08: /* rev */
6224 case 0x09: /* rev16 */
6227 case 0x0b: /* revsh */
6230 case 0x10: /* sel */
6231 gen_movl_T1_reg(s
, rm
);
6234 case 0x18: /* clz */
6241 gen_movl_reg_T0(s
, rd
);
6243 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
6244 op
= (insn
>> 4) & 0xf;
6245 gen_movl_T0_reg(s
, rn
);
6246 gen_movl_T1_reg(s
, rm
);
6247 switch ((insn
>> 20) & 7) {
6248 case 0: /* 32 x 32 -> 32 */
6251 gen_movl_T1_reg(s
, rs
);
6253 gen_op_rsbl_T0_T1();
6255 gen_op_addl_T0_T1();
6257 gen_movl_reg_T0(s
, rd
);
6259 case 1: /* 16 x 16 -> 32 */
6260 gen_mulxy(op
& 2, op
& 1);
6262 gen_movl_T1_reg(s
, rs
);
6263 gen_op_addl_T0_T1_setq();
6265 gen_movl_reg_T0(s
, rd
);
6267 case 2: /* Dual multiply add. */
6268 case 4: /* Dual multiply subtract. */
6270 gen_op_swap_half_T1();
6271 gen_op_mul_dual_T0_T1();
6272 /* This addition cannot overflow. */
6273 if (insn
& (1 << 22)) {
6274 gen_op_subl_T0_T1();
6276 gen_op_addl_T0_T1();
6280 gen_movl_T1_reg(s
, rs
);
6281 gen_op_addl_T0_T1_setq();
6283 gen_movl_reg_T0(s
, rd
);
6285 case 3: /* 32 * 16 -> 32msb */
6287 gen_op_sarl_T1_im(16);
6290 gen_op_imulw_T0_T1();
6293 gen_movl_T1_reg(s
, rs
);
6294 gen_op_addl_T0_T1_setq();
6296 gen_movl_reg_T0(s
, rd
);
6298 case 5: case 6: /* 32 * 32 -> 32msb */
6299 gen_op_imull_T0_T1();
6300 if (insn
& (1 << 5))
6301 gen_op_roundqd_T0_T1();
6303 gen_op_movl_T0_T1();
6305 gen_movl_T1_reg(s
, rs
);
6306 if (insn
& (1 << 21)) {
6307 gen_op_addl_T0_T1();
6309 gen_op_rsbl_T0_T1();
6312 gen_movl_reg_T0(s
, rd
);
6314 case 7: /* Unsigned sum of absolute differences. */
6315 gen_op_usad8_T0_T1();
6317 gen_movl_T1_reg(s
, rs
);
6318 gen_op_addl_T0_T1();
6320 gen_movl_reg_T0(s
, rd
);
6324 case 6: case 7: /* 64-bit multiply, Divide. */
6325 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
6326 gen_movl_T0_reg(s
, rn
);
6327 gen_movl_T1_reg(s
, rm
);
6328 if ((op
& 0x50) == 0x10) {
6330 if (!arm_feature(env
, ARM_FEATURE_DIV
))
6333 gen_op_udivl_T0_T1();
6335 gen_op_sdivl_T0_T1();
6336 gen_movl_reg_T0(s
, rd
);
6337 } else if ((op
& 0xe) == 0xc) {
6338 /* Dual multiply accumulate long. */
6340 gen_op_swap_half_T1();
6341 gen_op_mul_dual_T0_T1();
6343 gen_op_subl_T0_T1();
6345 gen_op_addl_T0_T1();
6347 gen_op_signbit_T1_T0();
6348 gen_op_addq_T0_T1(rs
, rd
);
6349 gen_movl_reg_T0(s
, rs
);
6350 gen_movl_reg_T1(s
, rd
);
6353 /* Unsigned 64-bit multiply */
6354 gen_op_mull_T0_T1();
6358 gen_mulxy(op
& 2, op
& 1);
6359 gen_op_signbit_T1_T0();
6361 /* Signed 64-bit multiply */
6362 gen_op_imull_T0_T1();
6367 gen_op_addq_lo_T0_T1(rs
);
6368 gen_op_addq_lo_T0_T1(rd
);
6369 } else if (op
& 0x40) {
6370 /* 64-bit accumulate. */
6371 gen_op_addq_T0_T1(rs
, rd
);
6373 gen_movl_reg_T0(s
, rs
);
6374 gen_movl_reg_T1(s
, rd
);
6379 case 6: case 7: case 14: case 15:
6381 if (((insn
>> 24) & 3) == 3) {
6382 /* Translate into the equivalent ARM encoding. */
6383 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4);
6384 if (disas_neon_data_insn(env
, s
, insn
))
6387 if (insn
& (1 << 28))
6389 if (disas_coproc_insn (env
, s
, insn
))
6393 case 8: case 9: case 10: case 11:
6394 if (insn
& (1 << 15)) {
6395 /* Branches, misc control. */
6396 if (insn
& 0x5000) {
6397 /* Unconditional branch. */
6398 /* signextend(hw1[10:0]) -> offset[:12]. */
6399 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
6400 /* hw1[10:0] -> offset[11:1]. */
6401 offset
|= (insn
& 0x7ff) << 1;
6402 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
6403 offset[24:22] already have the same value because of the
6404 sign extension above. */
6405 offset
^= ((~insn
) & (1 << 13)) << 10;
6406 offset
^= ((~insn
) & (1 << 11)) << 11;
6409 if (insn
& (1 << 14)) {
6410 /* Branch and link. */
6411 gen_op_movl_T1_im(addr
| 1);
6412 gen_movl_reg_T1(s
, 14);
6416 if (insn
& (1 << 12)) {
6421 addr
&= ~(uint32_t)2;
6422 gen_op_movl_T0_im(addr
);
6425 } else if (((insn
>> 23) & 7) == 7) {
6427 if (insn
& (1 << 13))
6430 if (insn
& (1 << 26)) {
6431 /* Secure monitor call (v6Z) */
6432 goto illegal_op
; /* not implemented. */
6434 op
= (insn
>> 20) & 7;
6436 case 0: /* msr cpsr. */
6438 gen_op_v7m_msr_T0(insn
& 0xff);
6439 gen_movl_reg_T0(s
, rn
);
6444 case 1: /* msr spsr. */
6447 gen_movl_T0_reg(s
, rn
);
6448 if (gen_set_psr_T0(s
,
6449 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
6453 case 2: /* cps, nop-hint. */
6454 if (((insn
>> 8) & 7) == 0) {
6455 gen_nop_hint(s
, insn
& 0xff);
6457 /* Implemented as NOP in user mode. */
6462 if (insn
& (1 << 10)) {
6463 if (insn
& (1 << 7))
6465 if (insn
& (1 << 6))
6467 if (insn
& (1 << 5))
6469 if (insn
& (1 << 9))
6470 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
6472 if (insn
& (1 << 8)) {
6474 imm
|= (insn
& 0x1f);
6477 gen_op_movl_T0_im(imm
);
6478 gen_set_psr_T0(s
, offset
, 0);
6481 case 3: /* Special control operations. */
6482 op
= (insn
>> 4) & 0xf;
6490 /* These execute as NOPs. */
6498 /* Trivial implementation equivalent to bx. */
6499 gen_movl_T0_reg(s
, rn
);
6502 case 5: /* Exception return. */
6503 /* Unpredictable in user mode. */
6505 case 6: /* mrs cpsr. */
6507 gen_op_v7m_mrs_T0(insn
& 0xff);
6509 gen_op_movl_T0_cpsr();
6511 gen_movl_reg_T0(s
, rd
);
6513 case 7: /* mrs spsr. */
6514 /* Not accessible in user mode. */
6515 if (IS_USER(s
) || IS_M(env
))
6517 gen_op_movl_T0_spsr();
6518 gen_movl_reg_T0(s
, rd
);
6523 /* Conditional branch. */
6524 op
= (insn
>> 22) & 0xf;
6525 /* Generate a conditional jump to next instruction. */
6526 s
->condlabel
= gen_new_label();
6527 gen_test_cc
[op
^ 1](s
->condlabel
);
6530 /* offset[11:1] = insn[10:0] */
6531 offset
= (insn
& 0x7ff) << 1;
6532 /* offset[17:12] = insn[21:16]. */
6533 offset
|= (insn
& 0x003f0000) >> 4;
6534 /* offset[31:20] = insn[26]. */
6535 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
6536 /* offset[18] = insn[13]. */
6537 offset
|= (insn
& (1 << 13)) << 5;
6538 /* offset[19] = insn[11]. */
6539 offset
|= (insn
& (1 << 11)) << 8;
6541 /* jump to the offset */
6542 addr
= s
->pc
+ offset
;
6546 /* Data processing immediate. */
6547 if (insn
& (1 << 25)) {
6548 if (insn
& (1 << 24)) {
6549 if (insn
& (1 << 20))
6551 /* Bitfield/Saturate. */
6552 op
= (insn
>> 21) & 7;
6554 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
6556 gen_op_movl_T1_im(0);
6558 gen_movl_T1_reg(s
, rn
);
6560 case 2: /* Signed bitfield extract. */
6562 if (shift
+ imm
> 32)
6565 gen_op_sbfx_T1(shift
, imm
);
6567 case 6: /* Unsigned bitfield extract. */
6569 if (shift
+ imm
> 32)
6572 gen_op_ubfx_T1(shift
, (1u << imm
) - 1);
6574 case 3: /* Bitfield insert/clear. */
6577 imm
= imm
+ 1 - shift
;
6579 gen_movl_T0_reg(s
, rd
);
6580 gen_op_bfi_T1_T0(shift
, ((1u << imm
) - 1) << shift
);
6585 default: /* Saturate. */
6586 gen_movl_T1_reg(s
, rn
);
6589 gen_op_sarl_T1_im(shift
);
6591 gen_op_shll_T1_im(shift
);
6595 gen_op_ssat_T1(imm
);
6596 if ((op
& 1) && shift
== 0)
6597 gen_op_usat16_T1(imm
);
6599 gen_op_usat_T1(imm
);
6602 gen_op_ssat_T1(imm
);
6603 if ((op
& 1) && shift
== 0)
6604 gen_op_ssat16_T1(imm
);
6606 gen_op_ssat_T1(imm
);
6610 gen_movl_reg_T1(s
, rd
);
6612 imm
= ((insn
& 0x04000000) >> 15)
6613 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
6614 if (insn
& (1 << 22)) {
6615 /* 16-bit immediate. */
6616 imm
|= (insn
>> 4) & 0xf000;
6617 if (insn
& (1 << 23)) {
6619 gen_movl_T0_reg(s
, rd
);
6620 gen_op_movtop_T0_im(imm
<< 16);
6623 gen_op_movl_T0_im(imm
);
6626 /* Add/sub 12-bit immediate. */
6628 addr
= s
->pc
& ~(uint32_t)3;
6629 if (insn
& (1 << 23))
6633 gen_op_movl_T0_im(addr
);
6635 gen_movl_T0_reg(s
, rn
);
6636 gen_op_movl_T1_im(imm
);
6637 if (insn
& (1 << 23))
6638 gen_op_subl_T0_T1();
6640 gen_op_addl_T0_T1();
6643 gen_movl_reg_T0(s
, rd
);
6646 int shifter_out
= 0;
6647 /* modified 12-bit immediate. */
6648 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
6649 imm
= (insn
& 0xff);
6652 /* Nothing to do. */
6654 case 1: /* 00XY00XY */
6657 case 2: /* XY00XY00 */
6661 case 3: /* XYXYXYXY */
6665 default: /* Rotated constant. */
6666 shift
= (shift
<< 1) | (imm
>> 7);
6668 imm
= imm
<< (32 - shift
);
6672 gen_op_movl_T1_im(imm
);
6673 rn
= (insn
>> 16) & 0xf;
6675 gen_op_movl_T0_im(0);
6677 gen_movl_T0_reg(s
, rn
);
6678 op
= (insn
>> 21) & 0xf;
6679 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
6682 rd
= (insn
>> 8) & 0xf;
6684 gen_movl_reg_T0(s
, rd
);
6689 case 12: /* Load/store single data item. */
6693 if ((insn
& 0x01100000) == 0x01000000) {
6694 if (disas_neon_ls_insn(env
, s
, insn
))
6700 /* s->pc has already been incremented by 4. */
6701 imm
= s
->pc
& 0xfffffffc;
6702 if (insn
& (1 << 23))
6703 imm
+= insn
& 0xfff;
6705 imm
-= insn
& 0xfff;
6706 gen_op_movl_T1_im(imm
);
6708 gen_movl_T1_reg(s
, rn
);
6709 if (insn
& (1 << 23)) {
6710 /* Positive offset. */
6712 gen_op_addl_T1_im(imm
);
6714 op
= (insn
>> 8) & 7;
6717 case 0: case 8: /* Shifted Register. */
6718 shift
= (insn
>> 4) & 0xf;
6721 gen_movl_T2_reg(s
, rm
);
6723 gen_op_shll_T2_im(shift
);
6724 gen_op_addl_T1_T2();
6726 case 4: /* Negative offset. */
6727 gen_op_addl_T1_im(-imm
);
6729 case 6: /* User privilege. */
6730 gen_op_addl_T1_im(imm
);
6732 case 1: /* Post-decrement. */
6735 case 3: /* Post-increment. */
6736 gen_op_movl_T2_im(imm
);
6740 case 5: /* Pre-decrement. */
6743 case 7: /* Pre-increment. */
6744 gen_op_addl_T1_im(imm
);
6752 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
6753 if (insn
& (1 << 20)) {
6755 if (rs
== 15 && op
!= 2) {
6758 /* Memory hint. Implemented as NOP. */
6761 case 0: gen_ldst(ldub
, s
); break;
6762 case 4: gen_ldst(ldsb
, s
); break;
6763 case 1: gen_ldst(lduw
, s
); break;
6764 case 5: gen_ldst(ldsw
, s
); break;
6765 case 2: gen_ldst(ldl
, s
); break;
6766 default: goto illegal_op
;
6771 gen_movl_reg_T0(s
, rs
);
6778 gen_movl_T0_reg(s
, rs
);
6780 case 0: gen_ldst(stb
, s
); break;
6781 case 1: gen_ldst(stw
, s
); break;
6782 case 2: gen_ldst(stl
, s
); break;
6783 default: goto illegal_op
;
6787 gen_op_addl_T1_im(imm
);
6789 gen_movl_reg_T1(s
, rn
);
6800 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
6802 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
6806 if (s
->condexec_mask
) {
6807 cond
= s
->condexec_cond
;
6808 s
->condlabel
= gen_new_label();
6809 gen_test_cc
[cond
^ 1](s
->condlabel
);
6813 insn
= lduw_code(s
->pc
);
6816 switch (insn
>> 12) {
6819 op
= (insn
>> 11) & 3;
6822 rn
= (insn
>> 3) & 7;
6823 gen_movl_T0_reg(s
, rn
);
6824 if (insn
& (1 << 10)) {
6826 gen_op_movl_T1_im((insn
>> 6) & 7);
6829 rm
= (insn
>> 6) & 7;
6830 gen_movl_T1_reg(s
, rm
);
6832 if (insn
& (1 << 9)) {
6833 if (s
->condexec_mask
)
6834 gen_op_subl_T0_T1();
6836 gen_op_subl_T0_T1_cc();
6838 if (s
->condexec_mask
)
6839 gen_op_addl_T0_T1();
6841 gen_op_addl_T0_T1_cc();
6843 gen_movl_reg_T0(s
, rd
);
6845 /* shift immediate */
6846 rm
= (insn
>> 3) & 7;
6847 shift
= (insn
>> 6) & 0x1f;
6848 gen_movl_T0_reg(s
, rm
);
6849 if (s
->condexec_mask
)
6850 gen_shift_T0_im_thumb
[op
](shift
);
6852 gen_shift_T0_im_thumb_cc
[op
](shift
);
6853 gen_movl_reg_T0(s
, rd
);
6857 /* arithmetic large immediate */
6858 op
= (insn
>> 11) & 3;
6859 rd
= (insn
>> 8) & 0x7;
6861 gen_op_movl_T0_im(insn
& 0xff);
6863 gen_movl_T0_reg(s
, rd
);
6864 gen_op_movl_T1_im(insn
& 0xff);
6868 if (!s
->condexec_mask
)
6869 gen_op_logic_T0_cc();
6872 gen_op_subl_T0_T1_cc();
6875 if (s
->condexec_mask
)
6876 gen_op_addl_T0_T1();
6878 gen_op_addl_T0_T1_cc();
6881 if (s
->condexec_mask
)
6882 gen_op_subl_T0_T1();
6884 gen_op_subl_T0_T1_cc();
6888 gen_movl_reg_T0(s
, rd
);
6891 if (insn
& (1 << 11)) {
6892 rd
= (insn
>> 8) & 7;
6893 /* load pc-relative. Bit 1 of PC is ignored. */
6894 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
6895 val
&= ~(uint32_t)2;
6896 gen_op_movl_T1_im(val
);
6898 gen_movl_reg_T0(s
, rd
);
6901 if (insn
& (1 << 10)) {
6902 /* data processing extended or blx */
6903 rd
= (insn
& 7) | ((insn
>> 4) & 8);
6904 rm
= (insn
>> 3) & 0xf;
6905 op
= (insn
>> 8) & 3;
6908 gen_movl_T0_reg(s
, rd
);
6909 gen_movl_T1_reg(s
, rm
);
6910 gen_op_addl_T0_T1();
6911 gen_movl_reg_T0(s
, rd
);
6914 gen_movl_T0_reg(s
, rd
);
6915 gen_movl_T1_reg(s
, rm
);
6916 gen_op_subl_T0_T1_cc();
6918 case 2: /* mov/cpy */
6919 gen_movl_T0_reg(s
, rm
);
6920 gen_movl_reg_T0(s
, rd
);
6922 case 3:/* branch [and link] exchange thumb register */
6923 if (insn
& (1 << 7)) {
6924 val
= (uint32_t)s
->pc
| 1;
6925 gen_op_movl_T1_im(val
);
6926 gen_movl_reg_T1(s
, 14);
6928 gen_movl_T0_reg(s
, rm
);
6935 /* data processing register */
6937 rm
= (insn
>> 3) & 7;
6938 op
= (insn
>> 6) & 0xf;
6939 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
6940 /* the shift/rotate ops want the operands backwards */
6949 if (op
== 9) /* neg */
6950 gen_op_movl_T0_im(0);
6951 else if (op
!= 0xf) /* mvn doesn't read its first operand */
6952 gen_movl_T0_reg(s
, rd
);
6954 gen_movl_T1_reg(s
, rm
);
6957 gen_op_andl_T0_T1();
6958 if (!s
->condexec_mask
)
6959 gen_op_logic_T0_cc();
6962 gen_op_xorl_T0_T1();
6963 if (!s
->condexec_mask
)
6964 gen_op_logic_T0_cc();
6967 if (s
->condexec_mask
) {
6968 gen_op_shll_T1_T0();
6970 gen_op_shll_T1_T0_cc();
6971 gen_op_logic_T1_cc();
6975 if (s
->condexec_mask
) {
6976 gen_op_shrl_T1_T0();
6978 gen_op_shrl_T1_T0_cc();
6979 gen_op_logic_T1_cc();
6983 if (s
->condexec_mask
) {
6984 gen_op_sarl_T1_T0();
6986 gen_op_sarl_T1_T0_cc();
6987 gen_op_logic_T1_cc();
6991 if (s
->condexec_mask
)
6992 gen_op_adcl_T0_T1();
6994 gen_op_adcl_T0_T1_cc();
6997 if (s
->condexec_mask
)
6998 gen_op_sbcl_T0_T1();
7000 gen_op_sbcl_T0_T1_cc();
7003 if (s
->condexec_mask
) {
7004 gen_op_rorl_T1_T0();
7006 gen_op_rorl_T1_T0_cc();
7007 gen_op_logic_T1_cc();
7011 gen_op_andl_T0_T1();
7012 gen_op_logic_T0_cc();
7016 if (s
->condexec_mask
)
7017 gen_op_subl_T0_T1();
7019 gen_op_subl_T0_T1_cc();
7022 gen_op_subl_T0_T1_cc();
7026 gen_op_addl_T0_T1_cc();
7031 if (!s
->condexec_mask
)
7032 gen_op_logic_T0_cc();
7035 gen_op_mull_T0_T1();
7036 if (!s
->condexec_mask
)
7037 gen_op_logic_T0_cc();
7040 gen_op_bicl_T0_T1();
7041 if (!s
->condexec_mask
)
7042 gen_op_logic_T0_cc();
7046 if (!s
->condexec_mask
)
7047 gen_op_logic_T1_cc();
7054 gen_movl_reg_T1(s
, rm
);
7056 gen_movl_reg_T0(s
, rd
);
7061 /* load/store register offset. */
7063 rn
= (insn
>> 3) & 7;
7064 rm
= (insn
>> 6) & 7;
7065 op
= (insn
>> 9) & 7;
7066 gen_movl_T1_reg(s
, rn
);
7067 gen_movl_T2_reg(s
, rm
);
7068 gen_op_addl_T1_T2();
7070 if (op
< 3) /* store */
7071 gen_movl_T0_reg(s
, rd
);
7099 if (op
>= 3) /* load */
7100 gen_movl_reg_T0(s
, rd
);
7104 /* load/store word immediate offset */
7106 rn
= (insn
>> 3) & 7;
7107 gen_movl_T1_reg(s
, rn
);
7108 val
= (insn
>> 4) & 0x7c;
7109 gen_op_movl_T2_im(val
);
7110 gen_op_addl_T1_T2();
7112 if (insn
& (1 << 11)) {
7115 gen_movl_reg_T0(s
, rd
);
7118 gen_movl_T0_reg(s
, rd
);
7124 /* load/store byte immediate offset */
7126 rn
= (insn
>> 3) & 7;
7127 gen_movl_T1_reg(s
, rn
);
7128 val
= (insn
>> 6) & 0x1f;
7129 gen_op_movl_T2_im(val
);
7130 gen_op_addl_T1_T2();
7132 if (insn
& (1 << 11)) {
7135 gen_movl_reg_T0(s
, rd
);
7138 gen_movl_T0_reg(s
, rd
);
7144 /* load/store halfword immediate offset */
7146 rn
= (insn
>> 3) & 7;
7147 gen_movl_T1_reg(s
, rn
);
7148 val
= (insn
>> 5) & 0x3e;
7149 gen_op_movl_T2_im(val
);
7150 gen_op_addl_T1_T2();
7152 if (insn
& (1 << 11)) {
7155 gen_movl_reg_T0(s
, rd
);
7158 gen_movl_T0_reg(s
, rd
);
7164 /* load/store from stack */
7165 rd
= (insn
>> 8) & 7;
7166 gen_movl_T1_reg(s
, 13);
7167 val
= (insn
& 0xff) * 4;
7168 gen_op_movl_T2_im(val
);
7169 gen_op_addl_T1_T2();
7171 if (insn
& (1 << 11)) {
7174 gen_movl_reg_T0(s
, rd
);
7177 gen_movl_T0_reg(s
, rd
);
7183 /* add to high reg */
7184 rd
= (insn
>> 8) & 7;
7185 if (insn
& (1 << 11)) {
7187 gen_movl_T0_reg(s
, 13);
7189 /* PC. bit 1 is ignored. */
7190 gen_op_movl_T0_im((s
->pc
+ 2) & ~(uint32_t)2);
7192 val
= (insn
& 0xff) * 4;
7193 gen_op_movl_T1_im(val
);
7194 gen_op_addl_T0_T1();
7195 gen_movl_reg_T0(s
, rd
);
7200 op
= (insn
>> 8) & 0xf;
7203 /* adjust stack pointer */
7204 gen_movl_T1_reg(s
, 13);
7205 val
= (insn
& 0x7f) * 4;
7206 if (insn
& (1 << 7))
7207 val
= -(int32_t)val
;
7208 gen_op_movl_T2_im(val
);
7209 gen_op_addl_T1_T2();
7210 gen_movl_reg_T1(s
, 13);
7213 case 2: /* sign/zero extend. */
7216 rm
= (insn
>> 3) & 7;
7217 gen_movl_T1_reg(s
, rm
);
7218 switch ((insn
>> 6) & 3) {
7219 case 0: gen_op_sxth_T1(); break;
7220 case 1: gen_op_sxtb_T1(); break;
7221 case 2: gen_op_uxth_T1(); break;
7222 case 3: gen_op_uxtb_T1(); break;
7224 gen_movl_reg_T1(s
, rd
);
7226 case 4: case 5: case 0xc: case 0xd:
7228 gen_movl_T1_reg(s
, 13);
7229 if (insn
& (1 << 8))
7233 for (i
= 0; i
< 8; i
++) {
7234 if (insn
& (1 << i
))
7237 if ((insn
& (1 << 11)) == 0) {
7238 gen_op_movl_T2_im(-offset
);
7239 gen_op_addl_T1_T2();
7241 gen_op_movl_T2_im(4);
7242 for (i
= 0; i
< 8; i
++) {
7243 if (insn
& (1 << i
)) {
7244 if (insn
& (1 << 11)) {
7247 gen_movl_reg_T0(s
, i
);
7250 gen_movl_T0_reg(s
, i
);
7253 /* advance to the next address. */
7254 gen_op_addl_T1_T2();
7257 if (insn
& (1 << 8)) {
7258 if (insn
& (1 << 11)) {
7261 /* don't set the pc until the rest of the instruction
7265 gen_movl_T0_reg(s
, 14);
7268 gen_op_addl_T1_T2();
7270 if ((insn
& (1 << 11)) == 0) {
7271 gen_op_movl_T2_im(-offset
);
7272 gen_op_addl_T1_T2();
7274 /* write back the new stack pointer */
7275 gen_movl_reg_T1(s
, 13);
7276 /* set the new PC value */
7277 if ((insn
& 0x0900) == 0x0900)
7281 case 1: case 3: case 9: case 11: /* czb */
7283 gen_movl_T0_reg(s
, rm
);
7284 s
->condlabel
= gen_new_label();
7286 if (insn
& (1 << 11))
7287 gen_op_testn_T0(s
->condlabel
);
7289 gen_op_test_T0(s
->condlabel
);
7291 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
7292 val
= (uint32_t)s
->pc
+ 2;
7297 case 15: /* IT, nop-hint. */
7298 if ((insn
& 0xf) == 0) {
7299 gen_nop_hint(s
, (insn
>> 4) & 0xf);
7303 s
->condexec_cond
= (insn
>> 4) & 0xe;
7304 s
->condexec_mask
= insn
& 0x1f;
7305 /* No actual code generated for this insn, just setup state. */
7308 case 0xe: /* bkpt */
7309 gen_set_condexec(s
);
7310 gen_op_movl_T0_im((long)s
->pc
- 2);
7311 gen_op_movl_reg_TN
[0][15]();
7313 s
->is_jmp
= DISAS_JUMP
;
7318 rn
= (insn
>> 3) & 0x7;
7320 gen_movl_T0_reg(s
, rn
);
7321 switch ((insn
>> 6) & 3) {
7322 case 0: gen_op_rev_T0(); break;
7323 case 1: gen_op_rev16_T0(); break;
7324 case 3: gen_op_revsh_T0(); break;
7325 default: goto illegal_op
;
7327 gen_movl_reg_T0(s
, rd
);
7335 val
= (insn
& (1 << 4)) != 0;
7336 gen_op_movl_T0_im(val
);
7339 gen_op_v7m_msr_T0(16);
7342 gen_op_v7m_msr_T0(17);
7346 if (insn
& (1 << 4))
7347 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
7351 val
= ((insn
& 7) << 6) & shift
;
7352 gen_op_movl_T0_im(val
);
7353 gen_set_psr_T0(s
, shift
, 0);
7363 /* load/store multiple */
7364 rn
= (insn
>> 8) & 0x7;
7365 gen_movl_T1_reg(s
, rn
);
7366 gen_op_movl_T2_im(4);
7367 for (i
= 0; i
< 8; i
++) {
7368 if (insn
& (1 << i
)) {
7369 if (insn
& (1 << 11)) {
7372 gen_movl_reg_T0(s
, i
);
7375 gen_movl_T0_reg(s
, i
);
7378 /* advance to the next address */
7379 gen_op_addl_T1_T2();
7382 /* Base register writeback. */
7383 if ((insn
& (1 << rn
)) == 0)
7384 gen_movl_reg_T1(s
, rn
);
7388 /* conditional branch or swi */
7389 cond
= (insn
>> 8) & 0xf;
7395 gen_set_condexec(s
);
7396 gen_op_movl_T0_im((long)s
->pc
| 1);
7397 /* Don't set r15. */
7398 gen_op_movl_reg_TN
[0][15]();
7399 s
->is_jmp
= DISAS_SWI
;
7402 /* generate a conditional jump to next instruction */
7403 s
->condlabel
= gen_new_label();
7404 gen_test_cc
[cond
^ 1](s
->condlabel
);
7406 gen_movl_T1_reg(s
, 15);
7408 /* jump to the offset */
7409 val
= (uint32_t)s
->pc
+ 2;
7410 offset
= ((int32_t)insn
<< 24) >> 24;
7416 if (insn
& (1 << 11)) {
7417 if (disas_thumb2_insn(env
, s
, insn
))
7421 /* unconditional branch */
7422 val
= (uint32_t)s
->pc
;
7423 offset
= ((int32_t)insn
<< 21) >> 21;
7424 val
+= (offset
<< 1) + 2;
7429 if (disas_thumb2_insn(env
, s
, insn
))
7435 gen_set_condexec(s
);
7436 gen_op_movl_T0_im((long)s
->pc
- 4);
7437 gen_op_movl_reg_TN
[0][15]();
7438 gen_op_undef_insn();
7439 s
->is_jmp
= DISAS_JUMP
;
7443 gen_set_condexec(s
);
7444 gen_op_movl_T0_im((long)s
->pc
- 2);
7445 gen_op_movl_reg_TN
[0][15]();
7446 gen_op_undef_insn();
7447 s
->is_jmp
= DISAS_JUMP
;
7450 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7451 basic block 'tb'. If search_pc is TRUE, also generate PC
7452 information for each intermediate instruction. */
7453 static inline int gen_intermediate_code_internal(CPUState
*env
,
7454 TranslationBlock
*tb
,
7457 DisasContext dc1
, *dc
= &dc1
;
7458 uint16_t *gen_opc_end
;
7460 target_ulong pc_start
;
7461 uint32_t next_page_start
;
7463 /* generate intermediate code */
7468 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
7470 dc
->is_jmp
= DISAS_NEXT
;
7472 dc
->singlestep_enabled
= env
->singlestep_enabled
;
7474 dc
->thumb
= env
->thumb
;
7475 dc
->condexec_mask
= (env
->condexec_bits
& 0xf) << 1;
7476 dc
->condexec_cond
= env
->condexec_bits
>> 4;
7478 #if !defined(CONFIG_USER_ONLY)
7480 dc
->user
= ((env
->v7m
.exception
== 0) && (env
->v7m
.control
& 1));
7482 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
7485 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
7487 /* Reset the conditional execution bits immediately. This avoids
7488 complications trying to do it at the end of the block. */
7489 if (env
->condexec_bits
)
7490 gen_op_set_condexec(0);
7492 #ifndef CONFIG_USER_ONLY
7493 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
7494 /* We always get here via a jump, so know we are not in a
7495 conditional execution block. */
7496 gen_op_exception_exit();
7500 if (env
->nb_breakpoints
> 0) {
7501 for(j
= 0; j
< env
->nb_breakpoints
; j
++) {
7502 if (env
->breakpoints
[j
] == dc
->pc
) {
7503 gen_set_condexec(dc
);
7504 gen_op_movl_T0_im((long)dc
->pc
);
7505 gen_op_movl_reg_TN
[0][15]();
7507 dc
->is_jmp
= DISAS_JUMP
;
7508 /* Advance PC so that clearing the breakpoint will
7509 invalidate this TB. */
7511 goto done_generating
;
7517 j
= gen_opc_ptr
- gen_opc_buf
;
7521 gen_opc_instr_start
[lj
++] = 0;
7523 gen_opc_pc
[lj
] = dc
->pc
;
7524 gen_opc_instr_start
[lj
] = 1;
7528 disas_thumb_insn(env
, dc
);
7529 if (dc
->condexec_mask
) {
7530 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
7531 | ((dc
->condexec_mask
>> 4) & 1);
7532 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
7533 if (dc
->condexec_mask
== 0) {
7534 dc
->condexec_cond
= 0;
7538 disas_arm_insn(env
, dc
);
7541 if (dc
->condjmp
&& !dc
->is_jmp
) {
7542 gen_set_label(dc
->condlabel
);
7545 /* Terminate the TB on memory ops if watchpoints are present. */
7546 /* FIXME: This should be replacd by the deterministic execution
7547 * IRQ raising bits. */
7548 if (dc
->is_mem
&& env
->nb_watchpoints
)
7551 /* Translation stops when a conditional branch is enoutered.
7552 * Otherwise the subsequent code could get translated several times.
7553 * Also stop translation when a page boundary is reached. This
7554 * ensures prefech aborts occur at the right place. */
7555 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
7556 !env
->singlestep_enabled
&&
7557 dc
->pc
< next_page_start
);
7559 /* At this stage dc->condjmp will only be set when the skipped
7560 instruction was a conditional branch or trap, and the PC has
7561 already been written. */
7562 if (__builtin_expect(env
->singlestep_enabled
, 0)) {
7563 /* Make sure the pc is updated, and raise a debug exception. */
7565 gen_set_condexec(dc
);
7566 if (dc
->is_jmp
== DISAS_SWI
) {
7571 gen_set_label(dc
->condlabel
);
7573 if (dc
->condjmp
|| !dc
->is_jmp
) {
7574 gen_op_movl_T0_im((long)dc
->pc
);
7575 gen_op_movl_reg_TN
[0][15]();
7578 gen_set_condexec(dc
);
7579 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
7582 /* FIXME: Single stepping a WFI insn will not halt
7587 /* While branches must always occur at the end of an IT block,
7588 there are a few other things that can cause us to terminate
7589 the TB in the middel of an IT block:
7590 - Exception generating instructions (bkpt, swi, undefined).
7592 - Hardware watchpoints.
7593 Hardware breakpoints have already been handled and skip this code.
7595 gen_set_condexec(dc
);
7596 switch(dc
->is_jmp
) {
7598 gen_goto_tb(dc
, 1, dc
->pc
);
7603 /* indicate that the hash table must be used to find the next TB */
7607 /* nothing more to generate */
7617 gen_set_label(dc
->condlabel
);
7618 gen_set_condexec(dc
);
7619 gen_goto_tb(dc
, 1, dc
->pc
);
7624 *gen_opc_ptr
= INDEX_op_end
;
7627 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
7628 fprintf(logfile
, "----------------\n");
7629 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc_start
));
7630 target_disas(logfile
, pc_start
, dc
->pc
- pc_start
, env
->thumb
);
7631 fprintf(logfile
, "\n");
7635 j
= gen_opc_ptr
- gen_opc_buf
;
7638 gen_opc_instr_start
[lj
++] = 0;
7640 tb
->size
= dc
->pc
- pc_start
;
7645 int gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
7647 return gen_intermediate_code_internal(env
, tb
, 0);
7650 int gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
7652 return gen_intermediate_code_internal(env
, tb
, 1);
7655 static const char *cpu_mode_names
[16] = {
7656 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
7657 "???", "???", "???", "und", "???", "???", "???", "sys"
7660 void cpu_dump_state(CPUState
*env
, FILE *f
,
7661 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
7670 /* ??? This assumes float64 and double have the same layout.
7671 Oh well, it's only debug dumps. */
7679 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
7681 cpu_fprintf(f
, "\n");
7683 cpu_fprintf(f
, " ");
7685 psr
= cpsr_read(env
);
7686 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
7688 psr
& (1 << 31) ? 'N' : '-',
7689 psr
& (1 << 30) ? 'Z' : '-',
7690 psr
& (1 << 29) ? 'C' : '-',
7691 psr
& (1 << 28) ? 'V' : '-',
7692 psr
& CPSR_T
? 'T' : 'A',
7693 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
7695 for (i
= 0; i
< 16; i
++) {
7696 d
.d
= env
->vfp
.regs
[i
];
7700 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
7701 i
* 2, (int)s0
.i
, s0
.s
,
7702 i
* 2 + 1, (int)s1
.i
, s1
.s
,
7703 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
7706 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);