4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 #define ENABLE_ARCH_5J 0
33 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
34 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
35 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
36 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
38 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
40 /* internal defines */
41 typedef struct DisasContext
{
44 /* Nonzero if this instruction has been conditionally skipped. */
46 /* The label that will be jumped to when the instruction is skipped. */
48 /* Thumb-2 condtional execution bits. */
51 struct TranslationBlock
*tb
;
52 int singlestep_enabled
;
55 #if !defined(CONFIG_USER_ONLY)
60 #if defined(CONFIG_USER_ONLY)
63 #define IS_USER(s) (s->user)
66 /* These instructions trap after executing, so defer them until after the
67 conditional executions state has been updated. */
71 #ifdef USE_DIRECT_JUMP
74 #define TBPARAM(x) (long)(x)
77 /* XXX: move that elsewhere */
78 static uint16_t *gen_opc_ptr
;
79 static uint32_t *gen_opparam_ptr
;
84 #define DEF(s, n, copy_size) INDEX_op_ ## s,
92 #define PAS_OP(pfx) { \
93 gen_op_ ## pfx ## add16_T0_T1, \
94 gen_op_ ## pfx ## addsubx_T0_T1, \
95 gen_op_ ## pfx ## subaddx_T0_T1, \
96 gen_op_ ## pfx ## sub16_T0_T1, \
97 gen_op_ ## pfx ## add8_T0_T1, \
100 gen_op_ ## pfx ## sub8_T0_T1 }
102 static GenOpFunc
*gen_arm_parallel_addsub
[8][8] = {
114 /* For unknown reasons Arm and Thumb-2 use arbitrarily diffenet encodings. */
115 #define PAS_OP(pfx) { \
116 gen_op_ ## pfx ## add8_T0_T1, \
117 gen_op_ ## pfx ## add16_T0_T1, \
118 gen_op_ ## pfx ## addsubx_T0_T1, \
120 gen_op_ ## pfx ## sub8_T0_T1, \
121 gen_op_ ## pfx ## sub16_T0_T1, \
122 gen_op_ ## pfx ## subaddx_T0_T1, \
125 static GenOpFunc
*gen_thumb2_parallel_addsub
[8][8] = {
137 static GenOpFunc1
*gen_test_cc
[14] = {
154 const uint8_t table_logic_cc
[16] = {
173 static GenOpFunc1
*gen_shift_T1_im
[4] = {
180 static GenOpFunc
*gen_shift_T1_0
[4] = {
187 static GenOpFunc1
*gen_shift_T2_im
[4] = {
194 static GenOpFunc
*gen_shift_T2_0
[4] = {
201 static GenOpFunc1
*gen_shift_T1_im_cc
[4] = {
202 gen_op_shll_T1_im_cc
,
203 gen_op_shrl_T1_im_cc
,
204 gen_op_sarl_T1_im_cc
,
205 gen_op_rorl_T1_im_cc
,
208 static GenOpFunc
*gen_shift_T1_0_cc
[4] = {
215 static GenOpFunc
*gen_shift_T1_T0
[4] = {
222 static GenOpFunc
*gen_shift_T1_T0_cc
[4] = {
223 gen_op_shll_T1_T0_cc
,
224 gen_op_shrl_T1_T0_cc
,
225 gen_op_sarl_T1_T0_cc
,
226 gen_op_rorl_T1_T0_cc
,
229 static GenOpFunc
*gen_op_movl_TN_reg
[3][16] = {
286 static GenOpFunc
*gen_op_movl_reg_TN
[2][16] = {
325 static GenOpFunc1
*gen_op_movl_TN_im
[3] = {
331 static GenOpFunc1
*gen_shift_T0_im_thumb_cc
[3] = {
332 gen_op_shll_T0_im_thumb_cc
,
333 gen_op_shrl_T0_im_thumb_cc
,
334 gen_op_sarl_T0_im_thumb_cc
,
337 static GenOpFunc1
*gen_shift_T0_im_thumb
[3] = {
338 gen_op_shll_T0_im_thumb
,
339 gen_op_shrl_T0_im_thumb
,
340 gen_op_sarl_T0_im_thumb
,
343 static inline void gen_bx(DisasContext
*s
)
345 s
->is_jmp
= DISAS_UPDATE
;
350 #if defined(CONFIG_USER_ONLY)
351 #define gen_ldst(name, s) gen_op_##name##_raw()
353 #define gen_ldst(name, s) do { \
356 gen_op_##name##_user(); \
358 gen_op_##name##_kernel(); \
362 static inline void gen_movl_TN_reg(DisasContext
*s
, int reg
, int t
)
367 /* normaly, since we updated PC, we need only to add one insn */
369 val
= (long)s
->pc
+ 2;
371 val
= (long)s
->pc
+ 4;
372 gen_op_movl_TN_im
[t
](val
);
374 gen_op_movl_TN_reg
[t
][reg
]();
378 static inline void gen_movl_T0_reg(DisasContext
*s
, int reg
)
380 gen_movl_TN_reg(s
, reg
, 0);
383 static inline void gen_movl_T1_reg(DisasContext
*s
, int reg
)
385 gen_movl_TN_reg(s
, reg
, 1);
388 static inline void gen_movl_T2_reg(DisasContext
*s
, int reg
)
390 gen_movl_TN_reg(s
, reg
, 2);
393 static inline void gen_movl_reg_TN(DisasContext
*s
, int reg
, int t
)
395 gen_op_movl_reg_TN
[t
][reg
]();
397 s
->is_jmp
= DISAS_JUMP
;
401 static inline void gen_movl_reg_T0(DisasContext
*s
, int reg
)
403 gen_movl_reg_TN(s
, reg
, 0);
406 static inline void gen_movl_reg_T1(DisasContext
*s
, int reg
)
408 gen_movl_reg_TN(s
, reg
, 1);
411 /* Force a TB lookup after an instruction that changes the CPU state. */
412 static inline void gen_lookup_tb(DisasContext
*s
)
414 gen_op_movl_T0_im(s
->pc
);
415 gen_movl_reg_T0(s
, 15);
416 s
->is_jmp
= DISAS_UPDATE
;
419 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
)
421 int val
, rm
, shift
, shiftop
;
423 if (!(insn
& (1 << 25))) {
426 if (!(insn
& (1 << 23)))
429 gen_op_addl_T1_im(val
);
433 shift
= (insn
>> 7) & 0x1f;
434 gen_movl_T2_reg(s
, rm
);
435 shiftop
= (insn
>> 5) & 3;
437 gen_shift_T2_im
[shiftop
](shift
);
438 } else if (shiftop
!= 0) {
439 gen_shift_T2_0
[shiftop
]();
441 if (!(insn
& (1 << 23)))
448 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
453 if (insn
& (1 << 22)) {
455 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
456 if (!(insn
& (1 << 23)))
460 gen_op_addl_T1_im(val
);
464 gen_op_addl_T1_im(extra
);
466 gen_movl_T2_reg(s
, rm
);
467 if (!(insn
& (1 << 23)))
474 #define VFP_OP(name) \
475 static inline void gen_vfp_##name(int dp) \
478 gen_op_vfp_##name##d(); \
480 gen_op_vfp_##name##s(); \
483 #define VFP_OP1(name) \
484 static inline void gen_vfp_##name(int dp, int arg) \
487 gen_op_vfp_##name##d(arg); \
489 gen_op_vfp_##name##s(arg); \
519 static inline void gen_vfp_fconst(int dp
, uint32_t val
)
522 gen_op_vfp_fconstd(val
);
524 gen_op_vfp_fconsts(val
);
527 static inline void gen_vfp_ld(DisasContext
*s
, int dp
)
530 gen_ldst(vfp_ldd
, s
);
532 gen_ldst(vfp_lds
, s
);
535 static inline void gen_vfp_st(DisasContext
*s
, int dp
)
538 gen_ldst(vfp_std
, s
);
540 gen_ldst(vfp_sts
, s
);
544 vfp_reg_offset (int dp
, int reg
)
547 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
549 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
550 + offsetof(CPU_DoubleU
, l
.upper
);
552 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
553 + offsetof(CPU_DoubleU
, l
.lower
);
557 /* Return the offset of a 32-bit piece of a NEON register.
558 zero is the least significant end of the register. */
560 neon_reg_offset (int reg
, int n
)
564 return vfp_reg_offset(0, sreg
);
567 #define NEON_GET_REG(T, reg, n) gen_op_neon_getreg_##T(neon_reg_offset(reg, n))
568 #define NEON_SET_REG(T, reg, n) gen_op_neon_setreg_##T(neon_reg_offset(reg, n))
570 static inline void gen_mov_F0_vreg(int dp
, int reg
)
573 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp
, reg
));
575 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp
, reg
));
578 static inline void gen_mov_F1_vreg(int dp
, int reg
)
581 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp
, reg
));
583 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp
, reg
));
586 static inline void gen_mov_vreg_F0(int dp
, int reg
)
589 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp
, reg
));
591 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp
, reg
));
594 #define ARM_CP_RW_BIT (1 << 20)
596 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
)
601 rd
= (insn
>> 16) & 0xf;
602 gen_movl_T1_reg(s
, rd
);
604 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
605 if (insn
& (1 << 24)) {
607 if (insn
& (1 << 23))
608 gen_op_addl_T1_im(offset
);
610 gen_op_addl_T1_im(-offset
);
612 if (insn
& (1 << 21))
613 gen_movl_reg_T1(s
, rd
);
614 } else if (insn
& (1 << 21)) {
616 if (insn
& (1 << 23))
617 gen_op_movl_T0_im(offset
);
619 gen_op_movl_T0_im(- offset
);
621 gen_movl_reg_T0(s
, rd
);
622 } else if (!(insn
& (1 << 23)))
627 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
)
629 int rd
= (insn
>> 0) & 0xf;
632 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
)
635 gen_op_iwmmxt_movl_T0_wCx(rd
);
637 gen_op_iwmmxt_movl_T0_T1_wRn(rd
);
639 gen_op_movl_T1_im(mask
);
644 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
645 (ie. an undefined instruction). */
646 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
649 int rdhi
, rdlo
, rd0
, rd1
, i
;
651 if ((insn
& 0x0e000e00) == 0x0c000000) {
652 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
654 rdlo
= (insn
>> 12) & 0xf;
655 rdhi
= (insn
>> 16) & 0xf;
656 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
657 gen_op_iwmmxt_movl_T0_T1_wRn(wrd
);
658 gen_movl_reg_T0(s
, rdlo
);
659 gen_movl_reg_T1(s
, rdhi
);
661 gen_movl_T0_reg(s
, rdlo
);
662 gen_movl_T1_reg(s
, rdhi
);
663 gen_op_iwmmxt_movl_wRn_T0_T1(wrd
);
664 gen_op_iwmmxt_set_mup();
669 wrd
= (insn
>> 12) & 0xf;
670 if (gen_iwmmxt_address(s
, insn
))
672 if (insn
& ARM_CP_RW_BIT
) {
673 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
675 gen_op_iwmmxt_movl_wCx_T0(wrd
);
678 if (insn
& (1 << 22)) /* WLDRD */
679 gen_ldst(iwmmxt_ldq
, s
);
681 gen_ldst(iwmmxt_ldl
, s
);
683 if (insn
& (1 << 22)) /* WLDRH */
684 gen_ldst(iwmmxt_ldw
, s
);
686 gen_ldst(iwmmxt_ldb
, s
);
687 gen_op_iwmmxt_movq_wRn_M0(wrd
);
690 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
691 gen_op_iwmmxt_movl_T0_wCx(wrd
);
694 gen_op_iwmmxt_movq_M0_wRn(wrd
);
696 if (insn
& (1 << 22)) /* WSTRD */
697 gen_ldst(iwmmxt_stq
, s
);
699 gen_ldst(iwmmxt_stl
, s
);
701 if (insn
& (1 << 22)) /* WSTRH */
702 gen_ldst(iwmmxt_ldw
, s
);
704 gen_ldst(iwmmxt_stb
, s
);
710 if ((insn
& 0x0f000000) != 0x0e000000)
713 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
714 case 0x000: /* WOR */
715 wrd
= (insn
>> 12) & 0xf;
716 rd0
= (insn
>> 0) & 0xf;
717 rd1
= (insn
>> 16) & 0xf;
718 gen_op_iwmmxt_movq_M0_wRn(rd0
);
719 gen_op_iwmmxt_orq_M0_wRn(rd1
);
720 gen_op_iwmmxt_setpsr_nz();
721 gen_op_iwmmxt_movq_wRn_M0(wrd
);
722 gen_op_iwmmxt_set_mup();
723 gen_op_iwmmxt_set_cup();
725 case 0x011: /* TMCR */
728 rd
= (insn
>> 12) & 0xf;
729 wrd
= (insn
>> 16) & 0xf;
731 case ARM_IWMMXT_wCID
:
732 case ARM_IWMMXT_wCASF
:
734 case ARM_IWMMXT_wCon
:
735 gen_op_iwmmxt_set_cup();
737 case ARM_IWMMXT_wCSSF
:
738 gen_op_iwmmxt_movl_T0_wCx(wrd
);
739 gen_movl_T1_reg(s
, rd
);
741 gen_op_iwmmxt_movl_wCx_T0(wrd
);
743 case ARM_IWMMXT_wCGR0
:
744 case ARM_IWMMXT_wCGR1
:
745 case ARM_IWMMXT_wCGR2
:
746 case ARM_IWMMXT_wCGR3
:
747 gen_op_iwmmxt_set_cup();
748 gen_movl_reg_T0(s
, rd
);
749 gen_op_iwmmxt_movl_wCx_T0(wrd
);
755 case 0x100: /* WXOR */
756 wrd
= (insn
>> 12) & 0xf;
757 rd0
= (insn
>> 0) & 0xf;
758 rd1
= (insn
>> 16) & 0xf;
759 gen_op_iwmmxt_movq_M0_wRn(rd0
);
760 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
761 gen_op_iwmmxt_setpsr_nz();
762 gen_op_iwmmxt_movq_wRn_M0(wrd
);
763 gen_op_iwmmxt_set_mup();
764 gen_op_iwmmxt_set_cup();
766 case 0x111: /* TMRC */
769 rd
= (insn
>> 12) & 0xf;
770 wrd
= (insn
>> 16) & 0xf;
771 gen_op_iwmmxt_movl_T0_wCx(wrd
);
772 gen_movl_reg_T0(s
, rd
);
774 case 0x300: /* WANDN */
775 wrd
= (insn
>> 12) & 0xf;
776 rd0
= (insn
>> 0) & 0xf;
777 rd1
= (insn
>> 16) & 0xf;
778 gen_op_iwmmxt_movq_M0_wRn(rd0
);
779 gen_op_iwmmxt_negq_M0();
780 gen_op_iwmmxt_andq_M0_wRn(rd1
);
781 gen_op_iwmmxt_setpsr_nz();
782 gen_op_iwmmxt_movq_wRn_M0(wrd
);
783 gen_op_iwmmxt_set_mup();
784 gen_op_iwmmxt_set_cup();
786 case 0x200: /* WAND */
787 wrd
= (insn
>> 12) & 0xf;
788 rd0
= (insn
>> 0) & 0xf;
789 rd1
= (insn
>> 16) & 0xf;
790 gen_op_iwmmxt_movq_M0_wRn(rd0
);
791 gen_op_iwmmxt_andq_M0_wRn(rd1
);
792 gen_op_iwmmxt_setpsr_nz();
793 gen_op_iwmmxt_movq_wRn_M0(wrd
);
794 gen_op_iwmmxt_set_mup();
795 gen_op_iwmmxt_set_cup();
797 case 0x810: case 0xa10: /* WMADD */
798 wrd
= (insn
>> 12) & 0xf;
799 rd0
= (insn
>> 0) & 0xf;
800 rd1
= (insn
>> 16) & 0xf;
801 gen_op_iwmmxt_movq_M0_wRn(rd0
);
802 if (insn
& (1 << 21))
803 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
805 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
806 gen_op_iwmmxt_movq_wRn_M0(wrd
);
807 gen_op_iwmmxt_set_mup();
809 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
810 wrd
= (insn
>> 12) & 0xf;
811 rd0
= (insn
>> 16) & 0xf;
812 rd1
= (insn
>> 0) & 0xf;
813 gen_op_iwmmxt_movq_M0_wRn(rd0
);
814 switch ((insn
>> 22) & 3) {
816 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
819 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
822 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
827 gen_op_iwmmxt_movq_wRn_M0(wrd
);
828 gen_op_iwmmxt_set_mup();
829 gen_op_iwmmxt_set_cup();
831 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
832 wrd
= (insn
>> 12) & 0xf;
833 rd0
= (insn
>> 16) & 0xf;
834 rd1
= (insn
>> 0) & 0xf;
835 gen_op_iwmmxt_movq_M0_wRn(rd0
);
836 switch ((insn
>> 22) & 3) {
838 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
841 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
844 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
849 gen_op_iwmmxt_movq_wRn_M0(wrd
);
850 gen_op_iwmmxt_set_mup();
851 gen_op_iwmmxt_set_cup();
853 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
854 wrd
= (insn
>> 12) & 0xf;
855 rd0
= (insn
>> 16) & 0xf;
856 rd1
= (insn
>> 0) & 0xf;
857 gen_op_iwmmxt_movq_M0_wRn(rd0
);
858 if (insn
& (1 << 22))
859 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
861 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
862 if (!(insn
& (1 << 20)))
863 gen_op_iwmmxt_addl_M0_wRn(wrd
);
864 gen_op_iwmmxt_movq_wRn_M0(wrd
);
865 gen_op_iwmmxt_set_mup();
867 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
868 wrd
= (insn
>> 12) & 0xf;
869 rd0
= (insn
>> 16) & 0xf;
870 rd1
= (insn
>> 0) & 0xf;
871 gen_op_iwmmxt_movq_M0_wRn(rd0
);
872 if (insn
& (1 << 21))
873 gen_op_iwmmxt_mulsw_M0_wRn(rd1
, (insn
& (1 << 20)) ? 16 : 0);
875 gen_op_iwmmxt_muluw_M0_wRn(rd1
, (insn
& (1 << 20)) ? 16 : 0);
876 gen_op_iwmmxt_movq_wRn_M0(wrd
);
877 gen_op_iwmmxt_set_mup();
879 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
880 wrd
= (insn
>> 12) & 0xf;
881 rd0
= (insn
>> 16) & 0xf;
882 rd1
= (insn
>> 0) & 0xf;
883 gen_op_iwmmxt_movq_M0_wRn(rd0
);
884 if (insn
& (1 << 21))
885 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
887 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
888 if (!(insn
& (1 << 20))) {
889 if (insn
& (1 << 21))
890 gen_op_iwmmxt_addsq_M0_wRn(wrd
);
892 gen_op_iwmmxt_adduq_M0_wRn(wrd
);
894 gen_op_iwmmxt_movq_wRn_M0(wrd
);
895 gen_op_iwmmxt_set_mup();
897 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
898 wrd
= (insn
>> 12) & 0xf;
899 rd0
= (insn
>> 16) & 0xf;
900 rd1
= (insn
>> 0) & 0xf;
901 gen_op_iwmmxt_movq_M0_wRn(rd0
);
902 switch ((insn
>> 22) & 3) {
904 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
907 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
910 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
915 gen_op_iwmmxt_movq_wRn_M0(wrd
);
916 gen_op_iwmmxt_set_mup();
917 gen_op_iwmmxt_set_cup();
919 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
920 wrd
= (insn
>> 12) & 0xf;
921 rd0
= (insn
>> 16) & 0xf;
922 rd1
= (insn
>> 0) & 0xf;
923 gen_op_iwmmxt_movq_M0_wRn(rd0
);
924 if (insn
& (1 << 22))
925 gen_op_iwmmxt_avgw_M0_wRn(rd1
, (insn
>> 20) & 1);
927 gen_op_iwmmxt_avgb_M0_wRn(rd1
, (insn
>> 20) & 1);
928 gen_op_iwmmxt_movq_wRn_M0(wrd
);
929 gen_op_iwmmxt_set_mup();
930 gen_op_iwmmxt_set_cup();
932 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
933 wrd
= (insn
>> 12) & 0xf;
934 rd0
= (insn
>> 16) & 0xf;
935 rd1
= (insn
>> 0) & 0xf;
936 gen_op_iwmmxt_movq_M0_wRn(rd0
);
937 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
938 gen_op_movl_T1_im(7);
940 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
941 gen_op_iwmmxt_movq_wRn_M0(wrd
);
942 gen_op_iwmmxt_set_mup();
944 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
945 rd
= (insn
>> 12) & 0xf;
946 wrd
= (insn
>> 16) & 0xf;
947 gen_movl_T0_reg(s
, rd
);
948 gen_op_iwmmxt_movq_M0_wRn(wrd
);
949 switch ((insn
>> 6) & 3) {
951 gen_op_movl_T1_im(0xff);
952 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 7) << 3);
955 gen_op_movl_T1_im(0xffff);
956 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 3) << 4);
959 gen_op_movl_T1_im(0xffffffff);
960 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 1) << 5);
965 gen_op_iwmmxt_movq_wRn_M0(wrd
);
966 gen_op_iwmmxt_set_mup();
968 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
969 rd
= (insn
>> 12) & 0xf;
970 wrd
= (insn
>> 16) & 0xf;
973 gen_op_iwmmxt_movq_M0_wRn(wrd
);
974 switch ((insn
>> 22) & 3) {
977 gen_op_iwmmxt_extrsb_T0_M0((insn
& 7) << 3);
979 gen_op_movl_T1_im(0xff);
980 gen_op_iwmmxt_extru_T0_M0_T1((insn
& 7) << 3);
985 gen_op_iwmmxt_extrsw_T0_M0((insn
& 3) << 4);
987 gen_op_movl_T1_im(0xffff);
988 gen_op_iwmmxt_extru_T0_M0_T1((insn
& 3) << 4);
992 gen_op_movl_T1_im(0xffffffff);
993 gen_op_iwmmxt_extru_T0_M0_T1((insn
& 1) << 5);
998 gen_op_movl_reg_TN
[0][rd
]();
1000 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1001 if ((insn
& 0x000ff008) != 0x0003f000)
1003 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1004 switch ((insn
>> 22) & 3) {
1006 gen_op_shrl_T1_im(((insn
& 7) << 2) + 0);
1009 gen_op_shrl_T1_im(((insn
& 3) << 3) + 4);
1012 gen_op_shrl_T1_im(((insn
& 1) << 4) + 12);
1017 gen_op_shll_T1_im(28);
1018 gen_op_movl_T0_T1();
1019 gen_op_movl_cpsr_T0(0xf0000000);
1021 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1022 rd
= (insn
>> 12) & 0xf;
1023 wrd
= (insn
>> 16) & 0xf;
1024 gen_movl_T0_reg(s
, rd
);
1025 switch ((insn
>> 6) & 3) {
1027 gen_op_iwmmxt_bcstb_M0_T0();
1030 gen_op_iwmmxt_bcstw_M0_T0();
1033 gen_op_iwmmxt_bcstl_M0_T0();
1038 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1039 gen_op_iwmmxt_set_mup();
1041 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1042 if ((insn
& 0x000ff00f) != 0x0003f000)
1044 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1045 switch ((insn
>> 22) & 3) {
1047 for (i
= 0; i
< 7; i
++) {
1048 gen_op_shll_T1_im(4);
1049 gen_op_andl_T0_T1();
1053 for (i
= 0; i
< 3; i
++) {
1054 gen_op_shll_T1_im(8);
1055 gen_op_andl_T0_T1();
1059 gen_op_shll_T1_im(16);
1060 gen_op_andl_T0_T1();
1065 gen_op_movl_cpsr_T0(0xf0000000);
1067 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1068 wrd
= (insn
>> 12) & 0xf;
1069 rd0
= (insn
>> 16) & 0xf;
1070 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1071 switch ((insn
>> 22) & 3) {
1073 gen_op_iwmmxt_addcb_M0();
1076 gen_op_iwmmxt_addcw_M0();
1079 gen_op_iwmmxt_addcl_M0();
1084 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1085 gen_op_iwmmxt_set_mup();
1087 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1088 if ((insn
& 0x000ff00f) != 0x0003f000)
1090 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1091 switch ((insn
>> 22) & 3) {
1093 for (i
= 0; i
< 7; i
++) {
1094 gen_op_shll_T1_im(4);
1099 for (i
= 0; i
< 3; i
++) {
1100 gen_op_shll_T1_im(8);
1105 gen_op_shll_T1_im(16);
1111 gen_op_movl_T1_im(0xf0000000);
1112 gen_op_andl_T0_T1();
1113 gen_op_movl_cpsr_T0(0xf0000000);
1115 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1116 rd
= (insn
>> 12) & 0xf;
1117 rd0
= (insn
>> 16) & 0xf;
1118 if ((insn
& 0xf) != 0)
1120 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1121 switch ((insn
>> 22) & 3) {
1123 gen_op_iwmmxt_msbb_T0_M0();
1126 gen_op_iwmmxt_msbw_T0_M0();
1129 gen_op_iwmmxt_msbl_T0_M0();
1134 gen_movl_reg_T0(s
, rd
);
1136 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1137 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1138 wrd
= (insn
>> 12) & 0xf;
1139 rd0
= (insn
>> 16) & 0xf;
1140 rd1
= (insn
>> 0) & 0xf;
1141 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1142 switch ((insn
>> 22) & 3) {
1144 if (insn
& (1 << 21))
1145 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1147 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1150 if (insn
& (1 << 21))
1151 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1153 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1156 if (insn
& (1 << 21))
1157 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1159 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1164 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1165 gen_op_iwmmxt_set_mup();
1166 gen_op_iwmmxt_set_cup();
1168 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1169 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1170 wrd
= (insn
>> 12) & 0xf;
1171 rd0
= (insn
>> 16) & 0xf;
1172 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1173 switch ((insn
>> 22) & 3) {
1175 if (insn
& (1 << 21))
1176 gen_op_iwmmxt_unpacklsb_M0();
1178 gen_op_iwmmxt_unpacklub_M0();
1181 if (insn
& (1 << 21))
1182 gen_op_iwmmxt_unpacklsw_M0();
1184 gen_op_iwmmxt_unpackluw_M0();
1187 if (insn
& (1 << 21))
1188 gen_op_iwmmxt_unpacklsl_M0();
1190 gen_op_iwmmxt_unpacklul_M0();
1195 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1196 gen_op_iwmmxt_set_mup();
1197 gen_op_iwmmxt_set_cup();
1199 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1200 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1201 wrd
= (insn
>> 12) & 0xf;
1202 rd0
= (insn
>> 16) & 0xf;
1203 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1204 switch ((insn
>> 22) & 3) {
1206 if (insn
& (1 << 21))
1207 gen_op_iwmmxt_unpackhsb_M0();
1209 gen_op_iwmmxt_unpackhub_M0();
1212 if (insn
& (1 << 21))
1213 gen_op_iwmmxt_unpackhsw_M0();
1215 gen_op_iwmmxt_unpackhuw_M0();
1218 if (insn
& (1 << 21))
1219 gen_op_iwmmxt_unpackhsl_M0();
1221 gen_op_iwmmxt_unpackhul_M0();
1226 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1227 gen_op_iwmmxt_set_mup();
1228 gen_op_iwmmxt_set_cup();
1230 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1231 case 0x214: case 0x614: case 0xa14: case 0xe14:
1232 wrd
= (insn
>> 12) & 0xf;
1233 rd0
= (insn
>> 16) & 0xf;
1234 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1235 if (gen_iwmmxt_shift(insn
, 0xff))
1237 switch ((insn
>> 22) & 3) {
1241 gen_op_iwmmxt_srlw_M0_T0();
1244 gen_op_iwmmxt_srll_M0_T0();
1247 gen_op_iwmmxt_srlq_M0_T0();
1250 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1251 gen_op_iwmmxt_set_mup();
1252 gen_op_iwmmxt_set_cup();
1254 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1255 case 0x014: case 0x414: case 0x814: case 0xc14:
1256 wrd
= (insn
>> 12) & 0xf;
1257 rd0
= (insn
>> 16) & 0xf;
1258 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1259 if (gen_iwmmxt_shift(insn
, 0xff))
1261 switch ((insn
>> 22) & 3) {
1265 gen_op_iwmmxt_sraw_M0_T0();
1268 gen_op_iwmmxt_sral_M0_T0();
1271 gen_op_iwmmxt_sraq_M0_T0();
1274 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1275 gen_op_iwmmxt_set_mup();
1276 gen_op_iwmmxt_set_cup();
1278 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
1279 case 0x114: case 0x514: case 0x914: case 0xd14:
1280 wrd
= (insn
>> 12) & 0xf;
1281 rd0
= (insn
>> 16) & 0xf;
1282 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1283 if (gen_iwmmxt_shift(insn
, 0xff))
1285 switch ((insn
>> 22) & 3) {
1289 gen_op_iwmmxt_sllw_M0_T0();
1292 gen_op_iwmmxt_slll_M0_T0();
1295 gen_op_iwmmxt_sllq_M0_T0();
1298 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1299 gen_op_iwmmxt_set_mup();
1300 gen_op_iwmmxt_set_cup();
1302 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
1303 case 0x314: case 0x714: case 0xb14: case 0xf14:
1304 wrd
= (insn
>> 12) & 0xf;
1305 rd0
= (insn
>> 16) & 0xf;
1306 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1307 switch ((insn
>> 22) & 3) {
1311 if (gen_iwmmxt_shift(insn
, 0xf))
1313 gen_op_iwmmxt_rorw_M0_T0();
1316 if (gen_iwmmxt_shift(insn
, 0x1f))
1318 gen_op_iwmmxt_rorl_M0_T0();
1321 if (gen_iwmmxt_shift(insn
, 0x3f))
1323 gen_op_iwmmxt_rorq_M0_T0();
1326 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1327 gen_op_iwmmxt_set_mup();
1328 gen_op_iwmmxt_set_cup();
1330 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
1331 case 0x916: case 0xb16: case 0xd16: case 0xf16:
1332 wrd
= (insn
>> 12) & 0xf;
1333 rd0
= (insn
>> 16) & 0xf;
1334 rd1
= (insn
>> 0) & 0xf;
1335 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1336 switch ((insn
>> 22) & 3) {
1338 if (insn
& (1 << 21))
1339 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
1341 gen_op_iwmmxt_minub_M0_wRn(rd1
);
1344 if (insn
& (1 << 21))
1345 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
1347 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
1350 if (insn
& (1 << 21))
1351 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
1353 gen_op_iwmmxt_minul_M0_wRn(rd1
);
1358 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1359 gen_op_iwmmxt_set_mup();
1361 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
1362 case 0x816: case 0xa16: case 0xc16: case 0xe16:
1363 wrd
= (insn
>> 12) & 0xf;
1364 rd0
= (insn
>> 16) & 0xf;
1365 rd1
= (insn
>> 0) & 0xf;
1366 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1367 switch ((insn
>> 22) & 3) {
1369 if (insn
& (1 << 21))
1370 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
1372 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
1375 if (insn
& (1 << 21))
1376 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
1378 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
1381 if (insn
& (1 << 21))
1382 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
1384 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
1389 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1390 gen_op_iwmmxt_set_mup();
1392 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
1393 case 0x402: case 0x502: case 0x602: case 0x702:
1394 wrd
= (insn
>> 12) & 0xf;
1395 rd0
= (insn
>> 16) & 0xf;
1396 rd1
= (insn
>> 0) & 0xf;
1397 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1398 gen_op_movl_T0_im((insn
>> 20) & 3);
1399 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
1400 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1401 gen_op_iwmmxt_set_mup();
1403 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
1404 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
1405 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
1406 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
1407 wrd
= (insn
>> 12) & 0xf;
1408 rd0
= (insn
>> 16) & 0xf;
1409 rd1
= (insn
>> 0) & 0xf;
1410 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1411 switch ((insn
>> 20) & 0xf) {
1413 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
1416 gen_op_iwmmxt_subub_M0_wRn(rd1
);
1419 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
1422 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
1425 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
1428 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
1431 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
1434 gen_op_iwmmxt_subul_M0_wRn(rd1
);
1437 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
1442 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1443 gen_op_iwmmxt_set_mup();
1444 gen_op_iwmmxt_set_cup();
1446 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
1447 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
1448 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
1449 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
1450 wrd
= (insn
>> 12) & 0xf;
1451 rd0
= (insn
>> 16) & 0xf;
1452 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1453 gen_op_movl_T0_im(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
1454 gen_op_iwmmxt_shufh_M0_T0();
1455 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1456 gen_op_iwmmxt_set_mup();
1457 gen_op_iwmmxt_set_cup();
1459 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
1460 case 0x418: case 0x518: case 0x618: case 0x718:
1461 case 0x818: case 0x918: case 0xa18: case 0xb18:
1462 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
1463 wrd
= (insn
>> 12) & 0xf;
1464 rd0
= (insn
>> 16) & 0xf;
1465 rd1
= (insn
>> 0) & 0xf;
1466 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1467 switch ((insn
>> 20) & 0xf) {
1469 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
1472 gen_op_iwmmxt_addub_M0_wRn(rd1
);
1475 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
1478 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
1481 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
1484 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
1487 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
1490 gen_op_iwmmxt_addul_M0_wRn(rd1
);
1493 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
1498 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1499 gen_op_iwmmxt_set_mup();
1500 gen_op_iwmmxt_set_cup();
1502 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
1503 case 0x408: case 0x508: case 0x608: case 0x708:
1504 case 0x808: case 0x908: case 0xa08: case 0xb08:
1505 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
1506 wrd
= (insn
>> 12) & 0xf;
1507 rd0
= (insn
>> 16) & 0xf;
1508 rd1
= (insn
>> 0) & 0xf;
1509 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1510 if (!(insn
& (1 << 20)))
1512 switch ((insn
>> 22) & 3) {
1516 if (insn
& (1 << 21))
1517 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
1519 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
1522 if (insn
& (1 << 21))
1523 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
1525 gen_op_iwmmxt_packul_M0_wRn(rd1
);
1528 if (insn
& (1 << 21))
1529 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
1531 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
1534 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1535 gen_op_iwmmxt_set_mup();
1536 gen_op_iwmmxt_set_cup();
1538 case 0x201: case 0x203: case 0x205: case 0x207:
1539 case 0x209: case 0x20b: case 0x20d: case 0x20f:
1540 case 0x211: case 0x213: case 0x215: case 0x217:
1541 case 0x219: case 0x21b: case 0x21d: case 0x21f:
1542 wrd
= (insn
>> 5) & 0xf;
1543 rd0
= (insn
>> 12) & 0xf;
1544 rd1
= (insn
>> 0) & 0xf;
1545 if (rd0
== 0xf || rd1
== 0xf)
1547 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1548 switch ((insn
>> 16) & 0xf) {
1549 case 0x0: /* TMIA */
1550 gen_op_movl_TN_reg
[0][rd0
]();
1551 gen_op_movl_TN_reg
[1][rd1
]();
1552 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1554 case 0x8: /* TMIAPH */
1555 gen_op_movl_TN_reg
[0][rd0
]();
1556 gen_op_movl_TN_reg
[1][rd1
]();
1557 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1559 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
1560 gen_op_movl_TN_reg
[1][rd0
]();
1561 if (insn
& (1 << 16))
1562 gen_op_shrl_T1_im(16);
1563 gen_op_movl_T0_T1();
1564 gen_op_movl_TN_reg
[1][rd1
]();
1565 if (insn
& (1 << 17))
1566 gen_op_shrl_T1_im(16);
1567 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1572 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1573 gen_op_iwmmxt_set_mup();
1582 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
1583 (ie. an undefined instruction). */
1584 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1586 int acc
, rd0
, rd1
, rdhi
, rdlo
;
1588 if ((insn
& 0x0ff00f10) == 0x0e200010) {
1589 /* Multiply with Internal Accumulate Format */
1590 rd0
= (insn
>> 12) & 0xf;
1592 acc
= (insn
>> 5) & 7;
1597 switch ((insn
>> 16) & 0xf) {
1599 gen_op_movl_TN_reg
[0][rd0
]();
1600 gen_op_movl_TN_reg
[1][rd1
]();
1601 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1603 case 0x8: /* MIAPH */
1604 gen_op_movl_TN_reg
[0][rd0
]();
1605 gen_op_movl_TN_reg
[1][rd1
]();
1606 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1608 case 0xc: /* MIABB */
1609 case 0xd: /* MIABT */
1610 case 0xe: /* MIATB */
1611 case 0xf: /* MIATT */
1612 gen_op_movl_TN_reg
[1][rd0
]();
1613 if (insn
& (1 << 16))
1614 gen_op_shrl_T1_im(16);
1615 gen_op_movl_T0_T1();
1616 gen_op_movl_TN_reg
[1][rd1
]();
1617 if (insn
& (1 << 17))
1618 gen_op_shrl_T1_im(16);
1619 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1625 gen_op_iwmmxt_movq_wRn_M0(acc
);
1629 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
1630 /* Internal Accumulator Access Format */
1631 rdhi
= (insn
>> 16) & 0xf;
1632 rdlo
= (insn
>> 12) & 0xf;
1638 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
1639 gen_op_iwmmxt_movl_T0_T1_wRn(acc
);
1640 gen_op_movl_reg_TN
[0][rdlo
]();
1641 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
1642 gen_op_andl_T0_T1();
1643 gen_op_movl_reg_TN
[0][rdhi
]();
1645 gen_op_movl_TN_reg
[0][rdlo
]();
1646 gen_op_movl_TN_reg
[1][rdhi
]();
1647 gen_op_iwmmxt_movl_wRn_T0_T1(acc
);
1655 /* Disassemble system coprocessor instruction. Return nonzero if
1656 instruction is not defined. */
1657 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1659 uint32_t rd
= (insn
>> 12) & 0xf;
1660 uint32_t cp
= (insn
>> 8) & 0xf;
1665 if (insn
& ARM_CP_RW_BIT
) {
1666 if (!env
->cp
[cp
].cp_read
)
1668 gen_op_movl_T0_im((uint32_t) s
->pc
);
1669 gen_op_movl_reg_TN
[0][15]();
1670 gen_op_movl_T0_cp(insn
);
1671 gen_movl_reg_T0(s
, rd
);
1673 if (!env
->cp
[cp
].cp_write
)
1675 gen_op_movl_T0_im((uint32_t) s
->pc
);
1676 gen_op_movl_reg_TN
[0][15]();
1677 gen_movl_T0_reg(s
, rd
);
1678 gen_op_movl_cp_T0(insn
);
1683 static int cp15_user_ok(uint32_t insn
)
1685 int cpn
= (insn
>> 16) & 0xf;
1686 int cpm
= insn
& 0xf;
1687 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
1689 if (cpn
== 13 && cpm
== 0) {
1691 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
1695 /* ISB, DSB, DMB. */
1696 if ((cpm
== 5 && op
== 4)
1697 || (cpm
== 10 && (op
== 4 || op
== 5)))
1703 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
1704 instruction is not defined. */
1705 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1709 /* M profile cores use memory mapped registers instead of cp15. */
1710 if (arm_feature(env
, ARM_FEATURE_M
))
1713 if ((insn
& (1 << 25)) == 0) {
1714 if (insn
& (1 << 20)) {
1718 /* mcrr. Used for block cache operations, so implement as no-op. */
1721 if ((insn
& (1 << 4)) == 0) {
1725 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
1728 if ((insn
& 0x0fff0fff) == 0x0e070f90
1729 || (insn
& 0x0fff0fff) == 0x0e070f58) {
1730 /* Wait for interrupt. */
1731 gen_op_movl_T0_im((long)s
->pc
);
1732 gen_op_movl_reg_TN
[0][15]();
1733 s
->is_jmp
= DISAS_WFI
;
1736 rd
= (insn
>> 12) & 0xf;
1737 if (insn
& ARM_CP_RW_BIT
) {
1738 gen_op_movl_T0_cp15(insn
);
1739 /* If the destination register is r15 then sets condition codes. */
1741 gen_movl_reg_T0(s
, rd
);
1743 gen_movl_T0_reg(s
, rd
);
1744 gen_op_movl_cp15_T0(insn
);
1745 /* Normally we would always end the TB here, but Linux
1746 * arch/arm/mach-pxa/sleep.S expects two instructions following
1747 * an MMU enable to execute from cache. Imitate this behaviour. */
1748 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
1749 (insn
& 0x0fff0fff) != 0x0e010f10)
1755 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
1756 #define VFP_SREG(insn, bigbit, smallbit) \
1757 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
1758 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
1759 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
1760 reg = (((insn) >> (bigbit)) & 0x0f) \
1761 | (((insn) >> ((smallbit) - 4)) & 0x10); \
1763 if (insn & (1 << (smallbit))) \
1765 reg = ((insn) >> (bigbit)) & 0x0f; \
1768 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
1769 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
1770 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
1771 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
1772 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
1773 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
1776 vfp_enabled(CPUState
* env
)
1778 return ((env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) != 0);
1781 /* Disassemble a VFP instruction. Returns nonzero if an error occured
1782 (ie. an undefined instruction). */
1783 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
1785 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
1788 if (!arm_feature(env
, ARM_FEATURE_VFP
))
1791 if (!vfp_enabled(env
)) {
1792 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
1793 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
1795 rn
= (insn
>> 16) & 0xf;
1796 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
1797 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
1800 dp
= ((insn
& 0xf00) == 0xb00);
1801 switch ((insn
>> 24) & 0xf) {
1803 if (insn
& (1 << 4)) {
1804 /* single register transfer */
1805 rd
= (insn
>> 12) & 0xf;
1810 VFP_DREG_N(rn
, insn
);
1813 if (insn
& 0x00c00060
1814 && !arm_feature(env
, ARM_FEATURE_NEON
))
1817 pass
= (insn
>> 21) & 1;
1818 if (insn
& (1 << 22)) {
1820 offset
= ((insn
>> 5) & 3) * 8;
1821 } else if (insn
& (1 << 5)) {
1823 offset
= (insn
& (1 << 6)) ? 16 : 0;
1828 if (insn
& ARM_CP_RW_BIT
) {
1832 NEON_GET_REG(T1
, rn
, pass
);
1834 gen_op_shrl_T1_im(offset
);
1835 if (insn
& (1 << 23))
1841 NEON_GET_REG(T1
, rn
, pass
);
1842 if (insn
& (1 << 23)) {
1844 gen_op_shrl_T1_im(16);
1850 gen_op_sarl_T1_im(16);
1857 NEON_GET_REG(T1
, rn
, pass
);
1860 gen_movl_reg_T1(s
, rd
);
1863 gen_movl_T0_reg(s
, rd
);
1864 if (insn
& (1 << 23)) {
1867 gen_op_neon_dup_u8(0);
1868 } else if (size
== 1) {
1869 gen_op_neon_dup_low16();
1871 NEON_SET_REG(T0
, rn
, 0);
1872 NEON_SET_REG(T0
, rn
, 1);
1877 NEON_GET_REG(T2
, rn
, pass
);
1878 gen_op_movl_T1_im(0xff);
1879 gen_op_andl_T0_T1();
1880 gen_op_neon_insert_elt(offset
, ~(0xff << offset
));
1881 NEON_SET_REG(T2
, rn
, pass
);
1884 NEON_GET_REG(T2
, rn
, pass
);
1885 gen_op_movl_T1_im(0xffff);
1886 gen_op_andl_T0_T1();
1887 bank_mask
= offset
? 0xffff : 0xffff0000;
1888 gen_op_neon_insert_elt(offset
, bank_mask
);
1889 NEON_SET_REG(T2
, rn
, pass
);
1892 NEON_SET_REG(T0
, rn
, pass
);
1898 if ((insn
& 0x6f) != 0x00)
1900 rn
= VFP_SREG_N(insn
);
1901 if (insn
& ARM_CP_RW_BIT
) {
1903 if (insn
& (1 << 21)) {
1904 /* system register */
1909 /* VFP2 allows access for FSID from userspace.
1910 VFP3 restricts all id registers to privileged
1913 && arm_feature(env
, ARM_FEATURE_VFP3
))
1915 gen_op_vfp_movl_T0_xreg(rn
);
1920 gen_op_vfp_movl_T0_xreg(rn
);
1922 case ARM_VFP_FPINST
:
1923 case ARM_VFP_FPINST2
:
1924 /* Not present in VFP3. */
1926 || arm_feature(env
, ARM_FEATURE_VFP3
))
1928 gen_op_vfp_movl_T0_xreg(rn
);
1932 gen_op_vfp_movl_T0_fpscr_flags();
1934 gen_op_vfp_movl_T0_fpscr();
1939 || !arm_feature(env
, ARM_FEATURE_VFP3
))
1941 gen_op_vfp_movl_T0_xreg(rn
);
1947 gen_mov_F0_vreg(0, rn
);
1951 /* Set the 4 flag bits in the CPSR. */
1952 gen_op_movl_cpsr_T0(0xf0000000);
1954 gen_movl_reg_T0(s
, rd
);
1957 gen_movl_T0_reg(s
, rd
);
1958 if (insn
& (1 << 21)) {
1960 /* system register */
1965 /* Writes are ignored. */
1968 gen_op_vfp_movl_fpscr_T0();
1974 gen_op_vfp_movl_xreg_T0(rn
);
1977 case ARM_VFP_FPINST
:
1978 case ARM_VFP_FPINST2
:
1979 gen_op_vfp_movl_xreg_T0(rn
);
1986 gen_mov_vreg_F0(0, rn
);
1991 /* data processing */
1992 /* The opcode is in bits 23, 21, 20 and 6. */
1993 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
1997 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
1999 /* rn is register number */
2000 VFP_DREG_N(rn
, insn
);
2003 if (op
== 15 && (rn
== 15 || rn
> 17)) {
2004 /* Integer or single precision destination. */
2005 rd
= VFP_SREG_D(insn
);
2007 VFP_DREG_D(rd
, insn
);
2010 if (op
== 15 && (rn
== 16 || rn
== 17)) {
2011 /* Integer source. */
2012 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
2014 VFP_DREG_M(rm
, insn
);
2017 rn
= VFP_SREG_N(insn
);
2018 if (op
== 15 && rn
== 15) {
2019 /* Double precision destination. */
2020 VFP_DREG_D(rd
, insn
);
2022 rd
= VFP_SREG_D(insn
);
2024 rm
= VFP_SREG_M(insn
);
2027 veclen
= env
->vfp
.vec_len
;
2028 if (op
== 15 && rn
> 3)
2031 /* Shut up compiler warnings. */
2042 /* Figure out what type of vector operation this is. */
2043 if ((rd
& bank_mask
) == 0) {
2048 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
2050 delta_d
= env
->vfp
.vec_stride
+ 1;
2052 if ((rm
& bank_mask
) == 0) {
2053 /* mixed scalar/vector */
2062 /* Load the initial operands. */
2067 /* Integer source */
2068 gen_mov_F0_vreg(0, rm
);
2073 gen_mov_F0_vreg(dp
, rd
);
2074 gen_mov_F1_vreg(dp
, rm
);
2078 /* Compare with zero */
2079 gen_mov_F0_vreg(dp
, rd
);
2086 /* Source and destination the same. */
2087 gen_mov_F0_vreg(dp
, rd
);
2090 /* One source operand. */
2091 gen_mov_F0_vreg(dp
, rm
);
2095 /* Two source operands. */
2096 gen_mov_F0_vreg(dp
, rn
);
2097 gen_mov_F1_vreg(dp
, rm
);
2101 /* Perform the calculation. */
2103 case 0: /* mac: fd + (fn * fm) */
2105 gen_mov_F1_vreg(dp
, rd
);
2108 case 1: /* nmac: fd - (fn * fm) */
2111 gen_mov_F1_vreg(dp
, rd
);
2114 case 2: /* msc: -fd + (fn * fm) */
2116 gen_mov_F1_vreg(dp
, rd
);
2119 case 3: /* nmsc: -fd - (fn * fm) */
2121 gen_mov_F1_vreg(dp
, rd
);
2125 case 4: /* mul: fn * fm */
2128 case 5: /* nmul: -(fn * fm) */
2132 case 6: /* add: fn + fm */
2135 case 7: /* sub: fn - fm */
2138 case 8: /* div: fn / fm */
2141 case 14: /* fconst */
2142 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2145 n
= (insn
<< 12) & 0x80000000;
2146 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
2160 gen_vfp_fconst(dp
, n
);
2162 case 15: /* extension space */
2185 case 11: /* cmpez */
2189 case 15: /* single<->double conversion */
2191 gen_op_vfp_fcvtsd();
2193 gen_op_vfp_fcvtds();
2195 case 16: /* fuito */
2198 case 17: /* fsito */
2201 case 20: /* fshto */
2202 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2204 gen_vfp_shto(dp
, rm
);
2206 case 21: /* fslto */
2207 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2209 gen_vfp_slto(dp
, rm
);
2211 case 22: /* fuhto */
2212 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2214 gen_vfp_uhto(dp
, rm
);
2216 case 23: /* fulto */
2217 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2219 gen_vfp_ulto(dp
, rm
);
2221 case 24: /* ftoui */
2224 case 25: /* ftouiz */
2227 case 26: /* ftosi */
2230 case 27: /* ftosiz */
2233 case 28: /* ftosh */
2234 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2236 gen_vfp_tosh(dp
, rm
);
2238 case 29: /* ftosl */
2239 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2241 gen_vfp_tosl(dp
, rm
);
2243 case 30: /* ftouh */
2244 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2246 gen_vfp_touh(dp
, rm
);
2248 case 31: /* ftoul */
2249 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
2251 gen_vfp_toul(dp
, rm
);
2253 default: /* undefined */
2254 printf ("rn:%d\n", rn
);
2258 default: /* undefined */
2259 printf ("op:%d\n", op
);
2263 /* Write back the result. */
2264 if (op
== 15 && (rn
>= 8 && rn
<= 11))
2265 ; /* Comparison, do nothing. */
2266 else if (op
== 15 && rn
> 17)
2267 /* Integer result. */
2268 gen_mov_vreg_F0(0, rd
);
2269 else if (op
== 15 && rn
== 15)
2271 gen_mov_vreg_F0(!dp
, rd
);
2273 gen_mov_vreg_F0(dp
, rd
);
2275 /* break out of the loop if we have finished */
2279 if (op
== 15 && delta_m
== 0) {
2280 /* single source one-many */
2282 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
2284 gen_mov_vreg_F0(dp
, rd
);
2288 /* Setup the next operands. */
2290 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
2294 /* One source operand. */
2295 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
2297 gen_mov_F0_vreg(dp
, rm
);
2299 /* Two source operands. */
2300 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
2302 gen_mov_F0_vreg(dp
, rn
);
2304 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
2306 gen_mov_F1_vreg(dp
, rm
);
2314 if (dp
&& (insn
& 0x03e00000) == 0x00400000) {
2315 /* two-register transfer */
2316 rn
= (insn
>> 16) & 0xf;
2317 rd
= (insn
>> 12) & 0xf;
2319 VFP_DREG_M(rm
, insn
);
2321 rm
= VFP_SREG_M(insn
);
2324 if (insn
& ARM_CP_RW_BIT
) {
2327 gen_mov_F0_vreg(1, rm
);
2329 gen_movl_reg_T0(s
, rd
);
2330 gen_movl_reg_T1(s
, rn
);
2332 gen_mov_F0_vreg(0, rm
);
2334 gen_movl_reg_T0(s
, rn
);
2335 gen_mov_F0_vreg(0, rm
+ 1);
2337 gen_movl_reg_T0(s
, rd
);
2342 gen_movl_T0_reg(s
, rd
);
2343 gen_movl_T1_reg(s
, rn
);
2345 gen_mov_vreg_F0(1, rm
);
2347 gen_movl_T0_reg(s
, rn
);
2349 gen_mov_vreg_F0(0, rm
);
2350 gen_movl_T0_reg(s
, rd
);
2352 gen_mov_vreg_F0(0, rm
+ 1);
2357 rn
= (insn
>> 16) & 0xf;
2359 VFP_DREG_D(rd
, insn
);
2361 rd
= VFP_SREG_D(insn
);
2362 if (s
->thumb
&& rn
== 15) {
2363 gen_op_movl_T1_im(s
->pc
& ~2);
2365 gen_movl_T1_reg(s
, rn
);
2367 if ((insn
& 0x01200000) == 0x01000000) {
2368 /* Single load/store */
2369 offset
= (insn
& 0xff) << 2;
2370 if ((insn
& (1 << 23)) == 0)
2372 gen_op_addl_T1_im(offset
);
2373 if (insn
& (1 << 20)) {
2375 gen_mov_vreg_F0(dp
, rd
);
2377 gen_mov_F0_vreg(dp
, rd
);
2381 /* load/store multiple */
2383 n
= (insn
>> 1) & 0x7f;
2387 if (insn
& (1 << 24)) /* pre-decrement */
2388 gen_op_addl_T1_im(-((insn
& 0xff) << 2));
2394 for (i
= 0; i
< n
; i
++) {
2395 if (insn
& ARM_CP_RW_BIT
) {
2398 gen_mov_vreg_F0(dp
, rd
+ i
);
2401 gen_mov_F0_vreg(dp
, rd
+ i
);
2404 gen_op_addl_T1_im(offset
);
2406 if (insn
& (1 << 21)) {
2408 if (insn
& (1 << 24))
2409 offset
= -offset
* n
;
2410 else if (dp
&& (insn
& 1))
2416 gen_op_addl_T1_im(offset
);
2417 gen_movl_reg_T1(s
, rn
);
2423 /* Should never happen. */
2429 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
2431 TranslationBlock
*tb
;
2434 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
2436 gen_op_goto_tb0(TBPARAM(tb
));
2438 gen_op_goto_tb1(TBPARAM(tb
));
2439 gen_op_movl_T0_im(dest
);
2440 gen_op_movl_r15_T0();
2441 gen_op_movl_T0_im((long)tb
+ n
);
2444 gen_op_movl_T0_im(dest
);
2445 gen_op_movl_r15_T0();
2451 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
2453 if (__builtin_expect(s
->singlestep_enabled
, 0)) {
2454 /* An indirect jump so that we still trigger the debug exception. */
2457 gen_op_movl_T0_im(dest
);
2460 gen_goto_tb(s
, 0, dest
);
2461 s
->is_jmp
= DISAS_TB_JUMP
;
2465 static inline void gen_mulxy(int x
, int y
)
2468 gen_op_sarl_T0_im(16);
2472 gen_op_sarl_T1_im(16);
2478 /* Return the mask of PSR bits set by a MSR instruction. */
2479 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
2483 if (flags
& (1 << 0))
2485 if (flags
& (1 << 1))
2487 if (flags
& (1 << 2))
2489 if (flags
& (1 << 3))
2492 /* Mask out undefined bits. */
2493 mask
&= ~CPSR_RESERVED
;
2494 if (!arm_feature(env
, ARM_FEATURE_V6
))
2495 mask
&= ~(CPSR_E
| CPSR_GE
);
2496 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
2498 /* Mask out execution state bits. */
2501 /* Mask out privileged bits. */
2507 /* Returns nonzero if access to the PSR is not permitted. */
2508 static int gen_set_psr_T0(DisasContext
*s
, uint32_t mask
, int spsr
)
2511 /* ??? This is also undefined in system mode. */
2514 gen_op_movl_spsr_T0(mask
);
2516 gen_op_movl_cpsr_T0(mask
);
2522 /* Generate an old-style exception return. */
2523 static void gen_exception_return(DisasContext
*s
)
2525 gen_op_movl_reg_TN
[0][15]();
2526 gen_op_movl_T0_spsr();
2527 gen_op_movl_cpsr_T0(0xffffffff);
2528 s
->is_jmp
= DISAS_UPDATE
;
2531 /* Generate a v6 exception return. */
2532 static void gen_rfe(DisasContext
*s
)
2534 gen_op_movl_cpsr_T0(0xffffffff);
2535 gen_op_movl_T0_T2();
2536 gen_op_movl_reg_TN
[0][15]();
2537 s
->is_jmp
= DISAS_UPDATE
;
2541 gen_set_condexec (DisasContext
*s
)
2543 if (s
->condexec_mask
) {
2544 gen_op_set_condexec((s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1));
2548 static void gen_nop_hint(DisasContext
*s
, int val
)
2552 gen_op_movl_T0_im((long)s
->pc
);
2553 gen_op_movl_reg_TN
[0][15]();
2554 s
->is_jmp
= DISAS_WFI
;
2558 /* TODO: Implement SEV and WFE. May help SMP performance. */
2564 /* Neon shift by constant. The actual ops are the same as used for variable
2565 shifts. [OP][U][SIZE] */
2566 static GenOpFunc
*gen_neon_shift_im
[8][2][4] = {
2567 { /* 0 */ /* VSHR */
2570 gen_op_neon_shl_u16
,
2571 gen_op_neon_shl_u32
,
2575 gen_op_neon_shl_s16
,
2576 gen_op_neon_shl_s32
,
2579 }, { /* 1 */ /* VSRA */
2582 gen_op_neon_shl_u16
,
2583 gen_op_neon_shl_u32
,
2587 gen_op_neon_shl_s16
,
2588 gen_op_neon_shl_s32
,
2591 }, { /* 2 */ /* VRSHR */
2593 gen_op_neon_rshl_u8
,
2594 gen_op_neon_rshl_u16
,
2595 gen_op_neon_rshl_u32
,
2596 gen_op_neon_rshl_u64
2598 gen_op_neon_rshl_s8
,
2599 gen_op_neon_rshl_s16
,
2600 gen_op_neon_rshl_s32
,
2601 gen_op_neon_rshl_s64
2603 }, { /* 3 */ /* VRSRA */
2605 gen_op_neon_rshl_u8
,
2606 gen_op_neon_rshl_u16
,
2607 gen_op_neon_rshl_u32
,
2608 gen_op_neon_rshl_u64
2610 gen_op_neon_rshl_s8
,
2611 gen_op_neon_rshl_s16
,
2612 gen_op_neon_rshl_s32
,
2613 gen_op_neon_rshl_s64
2617 NULL
, NULL
, NULL
, NULL
2620 gen_op_neon_shl_u16
,
2621 gen_op_neon_shl_u32
,
2622 gen_op_neon_shl_u64
,
2627 gen_op_neon_shl_u16
,
2628 gen_op_neon_shl_u32
,
2629 gen_op_neon_shl_u64
,
2632 gen_op_neon_shl_u16
,
2633 gen_op_neon_shl_u32
,
2634 gen_op_neon_shl_u64
,
2636 }, { /* 6 */ /* VQSHL */
2638 gen_op_neon_qshl_u8
,
2639 gen_op_neon_qshl_u16
,
2640 gen_op_neon_qshl_u32
,
2641 gen_op_neon_qshl_u64
2643 gen_op_neon_qshl_s8
,
2644 gen_op_neon_qshl_s16
,
2645 gen_op_neon_qshl_s32
,
2646 gen_op_neon_qshl_s64
2648 }, { /* 7 */ /* VQSHLU */
2650 gen_op_neon_qshl_u8
,
2651 gen_op_neon_qshl_u16
,
2652 gen_op_neon_qshl_u32
,
2653 gen_op_neon_qshl_u64
2655 gen_op_neon_qshl_u8
,
2656 gen_op_neon_qshl_u16
,
2657 gen_op_neon_qshl_u32
,
2658 gen_op_neon_qshl_u64
2663 /* [R][U][size - 1] */
2664 static GenOpFunc
*gen_neon_shift_im_narrow
[2][2][3] = {
2667 gen_op_neon_shl_u16
,
2668 gen_op_neon_shl_u32
,
2671 gen_op_neon_shl_s16
,
2672 gen_op_neon_shl_s32
,
2677 gen_op_neon_rshl_u16
,
2678 gen_op_neon_rshl_u32
,
2679 gen_op_neon_rshl_u64
2681 gen_op_neon_rshl_s16
,
2682 gen_op_neon_rshl_s32
,
2683 gen_op_neon_rshl_s64
2689 gen_op_neon_narrow_u32 ()
2694 static GenOpFunc
*gen_neon_narrow
[3] = {
2695 gen_op_neon_narrow_u8
,
2696 gen_op_neon_narrow_u16
,
2697 gen_op_neon_narrow_u32
2700 static GenOpFunc
*gen_neon_narrow_satu
[3] = {
2701 gen_op_neon_narrow_sat_u8
,
2702 gen_op_neon_narrow_sat_u16
,
2703 gen_op_neon_narrow_sat_u32
2706 static GenOpFunc
*gen_neon_narrow_sats
[3] = {
2707 gen_op_neon_narrow_sat_s8
,
2708 gen_op_neon_narrow_sat_s16
,
2709 gen_op_neon_narrow_sat_s32
2712 static inline int gen_neon_add(int size
)
2715 case 0: gen_op_neon_add_u8(); break;
2716 case 1: gen_op_neon_add_u16(); break;
2717 case 2: gen_op_addl_T0_T1(); break;
2723 /* 32-bit pairwise ops end up the same as the elementsise versions. */
2724 #define gen_op_neon_pmax_s32 gen_op_neon_max_s32
2725 #define gen_op_neon_pmax_u32 gen_op_neon_max_u32
2726 #define gen_op_neon_pmin_s32 gen_op_neon_min_s32
2727 #define gen_op_neon_pmin_u32 gen_op_neon_min_u32
2729 #define GEN_NEON_INTEGER_OP(name) do { \
2730 switch ((size << 1) | u) { \
2731 case 0: gen_op_neon_##name##_s8(); break; \
2732 case 1: gen_op_neon_##name##_u8(); break; \
2733 case 2: gen_op_neon_##name##_s16(); break; \
2734 case 3: gen_op_neon_##name##_u16(); break; \
2735 case 4: gen_op_neon_##name##_s32(); break; \
2736 case 5: gen_op_neon_##name##_u32(); break; \
2737 default: return 1; \
2741 gen_neon_movl_scratch_T0(int scratch
)
2745 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
2746 gen_op_neon_setreg_T0(offset
);
2750 gen_neon_movl_scratch_T1(int scratch
)
2754 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
2755 gen_op_neon_setreg_T1(offset
);
2759 gen_neon_movl_T0_scratch(int scratch
)
2763 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
2764 gen_op_neon_getreg_T0(offset
);
2768 gen_neon_movl_T1_scratch(int scratch
)
2772 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
2773 gen_op_neon_getreg_T1(offset
);
2776 static inline void gen_op_neon_widen_u32(void)
2778 gen_op_movl_T1_im(0);
2781 static inline void gen_neon_get_scalar(int size
, int reg
)
2784 NEON_GET_REG(T0
, reg
>> 1, reg
& 1);
2786 NEON_GET_REG(T0
, reg
>> 2, (reg
>> 1) & 1);
2788 gen_op_neon_dup_low16();
2790 gen_op_neon_dup_high16();
2794 static void gen_neon_unzip(int reg
, int q
, int tmp
, int size
)
2798 for (n
= 0; n
< q
+ 1; n
+= 2) {
2799 NEON_GET_REG(T0
, reg
, n
);
2800 NEON_GET_REG(T0
, reg
, n
+ n
);
2802 case 0: gen_op_neon_unzip_u8(); break;
2803 case 1: gen_op_neon_zip_u16(); break; /* zip and unzip are the same. */
2804 case 2: /* no-op */; break;
2807 gen_neon_movl_scratch_T0(tmp
+ n
);
2808 gen_neon_movl_scratch_T1(tmp
+ n
+ 1);
2816 } neon_ls_element_type
[11] = {
2830 /* Translate a NEON load/store element instruction. Return nonzero if the
2831 instruction is invalid. */
2832 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2847 if (!vfp_enabled(env
))
2849 VFP_DREG_D(rd
, insn
);
2850 rn
= (insn
>> 16) & 0xf;
2852 load
= (insn
& (1 << 21)) != 0;
2853 if ((insn
& (1 << 23)) == 0) {
2854 /* Load store all elements. */
2855 op
= (insn
>> 8) & 0xf;
2856 size
= (insn
>> 6) & 3;
2857 if (op
> 10 || size
== 3)
2859 nregs
= neon_ls_element_type
[op
].nregs
;
2860 interleave
= neon_ls_element_type
[op
].interleave
;
2861 gen_movl_T1_reg(s
, rn
);
2862 stride
= (1 << size
) * interleave
;
2863 for (reg
= 0; reg
< nregs
; reg
++) {
2864 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
2865 gen_movl_T1_reg(s
, rn
);
2866 gen_op_addl_T1_im((1 << size
) * reg
);
2867 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
2868 gen_movl_T1_reg(s
, rn
);
2869 gen_op_addl_T1_im(1 << size
);
2871 for (pass
= 0; pass
< 2; pass
++) {
2875 NEON_SET_REG(T0
, rd
, pass
);
2877 NEON_GET_REG(T0
, rd
, pass
);
2880 gen_op_addl_T1_im(stride
);
2881 } else if (size
== 1) {
2884 gen_op_addl_T1_im(stride
);
2885 gen_op_movl_T2_T0();
2887 gen_op_addl_T1_im(stride
);
2888 gen_op_neon_insert_elt(16, 0xffff);
2889 NEON_SET_REG(T2
, rd
, pass
);
2891 NEON_GET_REG(T2
, rd
, pass
);
2892 gen_op_movl_T0_T2();
2894 gen_op_addl_T1_im(stride
);
2895 gen_op_neon_extract_elt(16, 0xffff0000);
2897 gen_op_addl_T1_im(stride
);
2899 } else /* size == 0 */ {
2902 for (n
= 0; n
< 4; n
++) {
2904 gen_op_addl_T1_im(stride
);
2906 gen_op_movl_T2_T0();
2908 gen_op_neon_insert_elt(n
* 8, ~mask
);
2912 NEON_SET_REG(T2
, rd
, pass
);
2914 NEON_GET_REG(T2
, rd
, pass
);
2916 for (n
= 0; n
< 4; n
++) {
2918 gen_op_movl_T0_T2();
2920 gen_op_neon_extract_elt(n
* 8, mask
);
2923 gen_op_addl_T1_im(stride
);
2929 rd
+= neon_ls_element_type
[op
].spacing
;
2933 size
= (insn
>> 10) & 3;
2935 /* Load single element to all lanes. */
2938 size
= (insn
>> 6) & 3;
2939 nregs
= ((insn
>> 8) & 3) + 1;
2940 stride
= (insn
& (1 << 5)) ? 2 : 1;
2941 gen_movl_T1_reg(s
, rn
);
2942 for (reg
= 0; reg
< nregs
; reg
++) {
2946 gen_op_neon_dup_u8(0);
2950 gen_op_neon_dup_low16();
2958 gen_op_addl_T1_im(1 << size
);
2959 NEON_SET_REG(T0
, rd
, 0);
2960 NEON_SET_REG(T0
, rd
, 1);
2963 stride
= (1 << size
) * nregs
;
2965 /* Single element. */
2966 pass
= (insn
>> 7) & 1;
2969 shift
= ((insn
>> 5) & 3) * 8;
2970 mask
= 0xff << shift
;
2974 shift
= ((insn
>> 6) & 1) * 16;
2975 mask
= shift
? 0xffff0000 : 0xffff;
2976 stride
= (insn
& (1 << 5)) ? 2 : 1;
2981 stride
= (insn
& (1 << 6)) ? 2 : 1;
2986 nregs
= ((insn
>> 8) & 3) + 1;
2987 gen_movl_T1_reg(s
, rn
);
2988 for (reg
= 0; reg
< nregs
; reg
++) {
2991 NEON_GET_REG(T2
, rd
, pass
);
3002 NEON_SET_REG(T0
, rd
, pass
);
3006 gen_op_neon_insert_elt(shift
, ~mask
);
3007 NEON_SET_REG(T0
, rd
, pass
);
3009 } else { /* Store */
3011 NEON_GET_REG(T0
, rd
, pass
);
3013 NEON_GET_REG(T2
, rd
, pass
);
3014 gen_op_neon_extract_elt(shift
, mask
);
3029 gen_op_addl_T1_im(1 << size
);
3031 stride
= nregs
* (1 << size
);
3035 gen_movl_T1_reg(s
, rn
);
3037 gen_op_addl_T1_im(stride
);
3039 gen_movl_T2_reg(s
, rm
);
3040 gen_op_addl_T1_T2();
3042 gen_movl_reg_T1(s
, rn
);
3047 /* Translate a NEON data processing instruction. Return nonzero if the
3048 instruction is invalid.
3049 In general we process vectors in 32-bit chunks. This means we can reuse
3050 some of the scalar ops, and hopefully the code generated for 32-bit
3051 hosts won't be too awful. The downside is that the few 64-bit operations
3052 (mainly shifts) get complicated. */
3054 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3068 if (!vfp_enabled(env
))
3070 q
= (insn
& (1 << 6)) != 0;
3071 u
= (insn
>> 24) & 1;
3072 VFP_DREG_D(rd
, insn
);
3073 VFP_DREG_N(rn
, insn
);
3074 VFP_DREG_M(rm
, insn
);
3075 size
= (insn
>> 20) & 3;
3076 if ((insn
& (1 << 23)) == 0) {
3077 /* Three register same length. */
3078 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
3079 if (size
== 3 && (op
== 1 || op
== 5 || op
== 16)) {
3080 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
3081 NEON_GET_REG(T0
, rm
, pass
* 2);
3082 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
3083 gen_neon_movl_scratch_T0(0);
3084 gen_neon_movl_scratch_T1(1);
3085 NEON_GET_REG(T0
, rn
, pass
* 2);
3086 NEON_GET_REG(T1
, rn
, pass
* 2 + 1);
3090 gen_op_neon_addl_saturate_u64();
3092 gen_op_neon_addl_saturate_s64();
3097 gen_op_neon_subl_saturate_u64();
3099 gen_op_neon_subl_saturate_s64();
3104 gen_op_neon_subl_u64();
3106 gen_op_neon_addl_u64();
3112 NEON_SET_REG(T0
, rd
, pass
* 2);
3113 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
3120 case 10: /* VRSHL */
3121 case 11: /* VQSHL */
3122 /* Shift operations have Rn and Rm reversed. */
3131 case 20: /* VPMAX */
3132 case 21: /* VPMIN */
3133 case 23: /* VPADD */
3136 case 26: /* VPADD (float) */
3137 pairwise
= (u
&& size
< 2);
3139 case 30: /* VPMIN/VPMAX (float) */
3146 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
3155 NEON_GET_REG(T0
, rn
, n
);
3156 NEON_GET_REG(T1
, rn
, n
+ 1);
3158 NEON_GET_REG(T0
, rm
, n
);
3159 NEON_GET_REG(T1
, rm
, n
+ 1);
3163 NEON_GET_REG(T0
, rn
, pass
);
3164 NEON_GET_REG(T1
, rm
, pass
);
3168 GEN_NEON_INTEGER_OP(hadd
);
3171 switch (size
<< 1| u
) {
3172 case 0: gen_op_neon_qadd_s8(); break;
3173 case 1: gen_op_neon_qadd_u8(); break;
3174 case 2: gen_op_neon_qadd_s16(); break;
3175 case 3: gen_op_neon_qadd_u16(); break;
3176 case 4: gen_op_addl_T0_T1_saturate(); break;
3177 case 5: gen_op_addl_T0_T1_usaturate(); break;
3181 case 2: /* VRHADD */
3182 GEN_NEON_INTEGER_OP(rhadd
);
3184 case 3: /* Logic ops. */
3185 switch ((u
<< 2) | size
) {
3187 gen_op_andl_T0_T1();
3190 gen_op_bicl_T0_T1();
3200 gen_op_xorl_T0_T1();
3203 NEON_GET_REG(T2
, rd
, pass
);
3207 NEON_GET_REG(T2
, rd
, pass
);
3211 NEON_GET_REG(T2
, rd
, pass
);
3217 GEN_NEON_INTEGER_OP(hsub
);
3220 switch ((size
<< 1) | u
) {
3221 case 0: gen_op_neon_qsub_s8(); break;
3222 case 1: gen_op_neon_qsub_u8(); break;
3223 case 2: gen_op_neon_qsub_s16(); break;
3224 case 3: gen_op_neon_qsub_u16(); break;
3225 case 4: gen_op_subl_T0_T1_saturate(); break;
3226 case 5: gen_op_subl_T0_T1_usaturate(); break;
3231 GEN_NEON_INTEGER_OP(cgt
);
3234 GEN_NEON_INTEGER_OP(cge
);
3237 switch ((size
<< 1) | u
) {
3238 case 0: gen_op_neon_shl_s8(); break;
3239 case 1: gen_op_neon_shl_u8(); break;
3240 case 2: gen_op_neon_shl_s16(); break;
3241 case 3: gen_op_neon_shl_u16(); break;
3242 case 4: gen_op_neon_shl_s32(); break;
3243 case 5: gen_op_neon_shl_u32(); break;
3245 /* ??? Implementing these is tricky because the vector ops work
3246 on 32-bit pieces. */
3247 case 6: gen_op_neon_shl_s64(); break;
3248 case 7: gen_op_neon_shl_u64(); break;
3250 case 6: case 7: cpu_abort(env
, "VSHL.64 not implemented");
3255 switch ((size
<< 1) | u
) {
3256 case 0: gen_op_neon_qshl_s8(); break;
3257 case 1: gen_op_neon_qshl_u8(); break;
3258 case 2: gen_op_neon_qshl_s16(); break;
3259 case 3: gen_op_neon_qshl_u16(); break;
3260 case 4: gen_op_neon_qshl_s32(); break;
3261 case 5: gen_op_neon_qshl_u32(); break;
3263 /* ??? Implementing these is tricky because the vector ops work
3264 on 32-bit pieces. */
3265 case 6: gen_op_neon_qshl_s64(); break;
3266 case 7: gen_op_neon_qshl_u64(); break;
3268 case 6: case 7: cpu_abort(env
, "VQSHL.64 not implemented");
3272 case 10: /* VRSHL */
3273 switch ((size
<< 1) | u
) {
3274 case 0: gen_op_neon_rshl_s8(); break;
3275 case 1: gen_op_neon_rshl_u8(); break;
3276 case 2: gen_op_neon_rshl_s16(); break;
3277 case 3: gen_op_neon_rshl_u16(); break;
3278 case 4: gen_op_neon_rshl_s32(); break;
3279 case 5: gen_op_neon_rshl_u32(); break;
3281 /* ??? Implementing these is tricky because the vector ops work
3282 on 32-bit pieces. */
3283 case 6: gen_op_neon_rshl_s64(); break;
3284 case 7: gen_op_neon_rshl_u64(); break;
3286 case 6: case 7: cpu_abort(env
, "VRSHL.64 not implemented");
3290 case 11: /* VQRSHL */
3291 switch ((size
<< 1) | u
) {
3292 case 0: gen_op_neon_qrshl_s8(); break;
3293 case 1: gen_op_neon_qrshl_u8(); break;
3294 case 2: gen_op_neon_qrshl_s16(); break;
3295 case 3: gen_op_neon_qrshl_u16(); break;
3296 case 4: gen_op_neon_qrshl_s32(); break;
3297 case 5: gen_op_neon_qrshl_u32(); break;
3299 /* ??? Implementing these is tricky because the vector ops work
3300 on 32-bit pieces. */
3301 case 6: gen_op_neon_qrshl_s64(); break;
3302 case 7: gen_op_neon_qrshl_u64(); break;
3304 case 6: case 7: cpu_abort(env
, "VQRSHL.64 not implemented");
3309 GEN_NEON_INTEGER_OP(max
);
3312 GEN_NEON_INTEGER_OP(min
);
3315 GEN_NEON_INTEGER_OP(abd
);
3318 GEN_NEON_INTEGER_OP(abd
);
3319 NEON_GET_REG(T1
, rd
, pass
);
3323 if (!u
) { /* VADD */
3324 if (gen_neon_add(size
))
3328 case 0: gen_op_neon_sub_u8(); break;
3329 case 1: gen_op_neon_sub_u16(); break;
3330 case 2: gen_op_subl_T0_T1(); break;
3336 if (!u
) { /* VTST */
3338 case 0: gen_op_neon_tst_u8(); break;
3339 case 1: gen_op_neon_tst_u16(); break;
3340 case 2: gen_op_neon_tst_u32(); break;
3345 case 0: gen_op_neon_ceq_u8(); break;
3346 case 1: gen_op_neon_ceq_u16(); break;
3347 case 2: gen_op_neon_ceq_u32(); break;
3352 case 18: /* Multiply. */
3354 case 0: gen_op_neon_mul_u8(); break;
3355 case 1: gen_op_neon_mul_u16(); break;
3356 case 2: gen_op_mul_T0_T1(); break;
3359 NEON_GET_REG(T1
, rd
, pass
);
3362 case 0: gen_op_neon_rsb_u8(); break;
3363 case 1: gen_op_neon_rsb_u16(); break;
3364 case 2: gen_op_rsbl_T0_T1(); break;
3372 if (u
) { /* polynomial */
3373 gen_op_neon_mul_p8();
3374 } else { /* Integer */
3376 case 0: gen_op_neon_mul_u8(); break;
3377 case 1: gen_op_neon_mul_u16(); break;
3378 case 2: gen_op_mul_T0_T1(); break;
3383 case 20: /* VPMAX */
3384 GEN_NEON_INTEGER_OP(pmax
);
3386 case 21: /* VPMIN */
3387 GEN_NEON_INTEGER_OP(pmin
);
3389 case 22: /* Hultiply high. */
3390 if (!u
) { /* VQDMULH */
3392 case 1: gen_op_neon_qdmulh_s16(); break;
3393 case 2: gen_op_neon_qdmulh_s32(); break;
3396 } else { /* VQRDHMUL */
3398 case 1: gen_op_neon_qrdmulh_s16(); break;
3399 case 2: gen_op_neon_qrdmulh_s32(); break;
3404 case 23: /* VPADD */
3408 case 0: gen_op_neon_padd_u8(); break;
3409 case 1: gen_op_neon_padd_u16(); break;
3410 case 2: gen_op_addl_T0_T1(); break;
3414 case 26: /* Floating point arithnetic. */
3415 switch ((u
<< 2) | size
) {
3417 gen_op_neon_add_f32();
3420 gen_op_neon_sub_f32();
3423 gen_op_neon_add_f32();
3426 gen_op_neon_abd_f32();
3432 case 27: /* Float multiply. */
3433 gen_op_neon_mul_f32();
3435 NEON_GET_REG(T1
, rd
, pass
);
3437 gen_op_neon_add_f32();
3439 gen_op_neon_rsb_f32();
3443 case 28: /* Float compare. */
3445 gen_op_neon_ceq_f32();
3448 gen_op_neon_cge_f32();
3450 gen_op_neon_cgt_f32();
3453 case 29: /* Float compare absolute. */
3457 gen_op_neon_acge_f32();
3459 gen_op_neon_acgt_f32();
3461 case 30: /* Float min/max. */
3463 gen_op_neon_max_f32();
3465 gen_op_neon_min_f32();
3469 gen_op_neon_recps_f32();
3471 gen_op_neon_rsqrts_f32();
3476 /* Save the result. For elementwise operations we can put it
3477 straight into the destination register. For pairwise operations
3478 we have to be careful to avoid clobbering the source operands. */
3479 if (pairwise
&& rd
== rm
) {
3480 gen_neon_movl_scratch_T0(pass
);
3482 NEON_SET_REG(T0
, rd
, pass
);
3486 if (pairwise
&& rd
== rm
) {
3487 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
3488 gen_neon_movl_T0_scratch(pass
);
3489 NEON_SET_REG(T0
, rd
, pass
);
3492 } else if (insn
& (1 << 4)) {
3493 if ((insn
& 0x00380080) != 0) {
3494 /* Two registers and shift. */
3495 op
= (insn
>> 8) & 0xf;
3496 if (insn
& (1 << 7)) {
3501 while ((insn
& (1 << (size
+ 19))) == 0)
3504 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
3505 /* To avoid excessive dumplication of ops we implement shift
3506 by immediate using the variable shift operations. */
3508 /* Shift by immediate:
3509 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
3510 /* Right shifts are encoded as N - shift, where N is the
3511 element size in bits. */
3513 shift
= shift
- (1 << (size
+ 3));
3523 imm
= (uint8_t) shift
;
3528 imm
= (uint16_t) shift
;
3539 for (pass
= 0; pass
< count
; pass
++) {
3541 /* Operands in T0 and T1. */
3542 gen_op_movl_T1_im(imm
);
3543 NEON_GET_REG(T0
, rm
, pass
);
3545 /* Operands in {T0, T1} and env->vfp.scratch. */
3546 gen_op_movl_T0_im(imm
);
3547 gen_neon_movl_scratch_T0(0);
3548 gen_op_movl_T0_im((int32_t)imm
>> 31);
3549 gen_neon_movl_scratch_T0(1);
3550 NEON_GET_REG(T0
, rm
, pass
* 2);
3551 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
3554 if (gen_neon_shift_im
[op
][u
][size
] == NULL
)
3556 gen_neon_shift_im
[op
][u
][size
]();
3558 if (op
== 1 || op
== 3) {
3561 gen_neon_movl_scratch_T0(0);
3562 gen_neon_movl_scratch_T1(1);
3563 NEON_GET_REG(T0
, rd
, pass
* 2);
3564 NEON_GET_REG(T1
, rd
, pass
* 2 + 1);
3565 gen_op_neon_addl_u64();
3567 NEON_GET_REG(T1
, rd
, pass
);
3570 } else if (op
== 4 || (op
== 5 && u
)) {
3573 cpu_abort(env
, "VS[LR]I.64 not implemented");
3578 imm
= 0xff >> -shift
;
3580 imm
= (uint8_t)(0xff << shift
);
3586 imm
= 0xffff >> -shift
;
3588 imm
= (uint16_t)(0xffff << shift
);
3593 imm
= 0xffffffffu
>> -shift
;
3595 imm
= 0xffffffffu
<< shift
;
3600 NEON_GET_REG(T1
, rd
, pass
);
3601 gen_op_movl_T2_im(imm
);
3605 NEON_SET_REG(T0
, rd
, pass
* 2);
3606 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
3608 NEON_SET_REG(T0
, rd
, pass
);
3611 } else if (op
< 10) {
3612 /* Shift by immedaiate and narrow:
3613 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
3614 shift
= shift
- (1 << (size
+ 3));
3623 imm
= (uint16_t) shift
;
3634 /* Processing MSB first means we need to do less shuffling at
3636 for (pass
= count
- 1; pass
>= 0; pass
--) {
3637 /* Avoid clobbering the second operand before it has been
3646 /* Operands in T0 and T1. */
3647 gen_op_movl_T1_im(imm
);
3648 NEON_GET_REG(T0
, rm
, n
);
3650 /* Operands in {T0, T1} and env->vfp.scratch. */
3651 gen_op_movl_T0_im(imm
);
3652 gen_neon_movl_scratch_T0(0);
3653 gen_op_movl_T0_im((int32_t)imm
>> 31);
3654 gen_neon_movl_scratch_T0(1);
3655 NEON_GET_REG(T0
, rm
, n
* 2);
3656 NEON_GET_REG(T0
, rm
, n
* 2 + 1);
3659 gen_neon_shift_im_narrow
[q
][u
][size
- 1]();
3661 if (size
< 3 && (pass
& 1) == 0) {
3662 gen_neon_movl_scratch_T0(0);
3667 gen_neon_movl_T1_scratch(0);
3669 if (op
== 8 && !u
) {
3670 gen_neon_narrow
[size
- 1]();
3673 gen_neon_narrow_sats
[size
- 2]();
3675 gen_neon_narrow_satu
[size
- 1]();
3678 offset
= neon_reg_offset(rd
, n
);
3680 offset
= neon_reg_offset(rd
, n
>> 1);
3681 gen_op_neon_setreg_T0(offset
);
3684 } else if (op
== 10) {
3688 for (pass
= 0; pass
< 2; pass
++) {
3689 /* Avoid clobbering the input operand. */
3695 NEON_GET_REG(T0
, rm
, n
);
3696 GEN_NEON_INTEGER_OP(widen
);
3698 /* The shift is less than the width of the source
3699 type, so in some cases we can just
3700 shift the whole register. */
3701 if (size
== 1 || (size
== 0 && u
)) {
3702 gen_op_shll_T0_im(shift
);
3703 gen_op_shll_T1_im(shift
);
3706 case 0: gen_op_neon_shll_u16(shift
); break;
3707 case 2: gen_op_neon_shll_u64(shift
); break;
3712 NEON_SET_REG(T0
, rd
, n
* 2);
3713 NEON_SET_REG(T1
, rd
, n
* 2 + 1);
3715 } else if (op
== 15 || op
== 16) {
3716 /* VCVT fixed-point. */
3717 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
3718 gen_op_vfp_getreg_F0s(neon_reg_offset(rm
, pass
));
3721 gen_op_vfp_ultos(shift
);
3723 gen_op_vfp_sltos(shift
);
3726 gen_op_vfp_touls(shift
);
3728 gen_op_vfp_tosls(shift
);
3730 gen_op_vfp_setreg_F0s(neon_reg_offset(rd
, pass
));
3735 } else { /* (insn & 0x00380080) == 0 */
3738 op
= (insn
>> 8) & 0xf;
3739 /* One register and immediate. */
3740 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
3741 invert
= (insn
& (1 << 5)) != 0;
3759 imm
= (imm
<< 8) | (imm
<< 24);
3762 imm
= (imm
< 8) | 0xff;
3765 imm
= (imm
<< 16) | 0xffff;
3768 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
3773 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
3774 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
3780 if (op
!= 14 || !invert
)
3781 gen_op_movl_T1_im(imm
);
3783 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
3784 if (op
& 1 && op
< 12) {
3785 NEON_GET_REG(T0
, rd
, pass
);
3787 /* The immediate value has already been inverted, so
3789 gen_op_andl_T0_T1();
3793 NEON_SET_REG(T0
, rd
, pass
);
3795 if (op
== 14 && invert
) {
3798 for (n
= 0; n
< 4; n
++) {
3799 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
3800 tmp
|= 0xff << (n
* 8);
3802 gen_op_movl_T1_im(tmp
);
3805 NEON_SET_REG(T1
, rd
, pass
);
3809 } else { /* (insn & 0x00800010 == 0x00800010) */
3811 op
= (insn
>> 8) & 0xf;
3812 if ((insn
& (1 << 6)) == 0) {
3813 /* Three registers of different lengths. */
3817 /* prewiden, src1_wide, src2_wide */
3818 static const int neon_3reg_wide
[16][3] = {
3819 {1, 0, 0}, /* VADDL */
3820 {1, 1, 0}, /* VADDW */
3821 {1, 0, 0}, /* VSUBL */
3822 {1, 1, 0}, /* VSUBW */
3823 {0, 1, 1}, /* VADDHN */
3824 {0, 0, 0}, /* VABAL */
3825 {0, 1, 1}, /* VSUBHN */
3826 {0, 0, 0}, /* VABDL */
3827 {0, 0, 0}, /* VMLAL */
3828 {0, 0, 0}, /* VQDMLAL */
3829 {0, 0, 0}, /* VMLSL */
3830 {0, 0, 0}, /* VQDMLSL */
3831 {0, 0, 0}, /* Integer VMULL */
3832 {0, 0, 0}, /* VQDMULL */
3833 {0, 0, 0} /* Polynomial VMULL */
3836 prewiden
= neon_3reg_wide
[op
][0];
3837 src1_wide
= neon_3reg_wide
[op
][1];
3838 src2_wide
= neon_3reg_wide
[op
][2];
3840 /* Avoid overlapping operands. Wide source operands are
3841 always aligned so will never overlap with wide
3842 destinations in problematic ways. */
3844 NEON_GET_REG(T2
, rm
, 1);
3845 } else if (rd
== rn
) {
3846 NEON_GET_REG(T2
, rn
, 1);
3848 for (pass
= 0; pass
< 2; pass
++) {
3849 /* Load the second operand into env->vfp.scratch.
3850 Also widen narrow operands. */
3851 if (pass
== 1 && rd
== rm
) {
3853 gen_op_movl_T0_T2();
3855 gen_op_movl_T1_T2();
3859 NEON_GET_REG(T0
, rm
, pass
* 2);
3860 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
3863 NEON_GET_REG(T0
, rm
, pass
);
3865 NEON_GET_REG(T1
, rm
, pass
);
3869 if (prewiden
&& !src2_wide
) {
3870 GEN_NEON_INTEGER_OP(widen
);
3872 if (prewiden
|| src2_wide
) {
3873 gen_neon_movl_scratch_T0(0);
3874 gen_neon_movl_scratch_T1(1);
3877 /* Load the first operand. */
3878 if (pass
== 1 && rd
== rn
) {
3879 gen_op_movl_T0_T2();
3882 NEON_GET_REG(T0
, rn
, pass
* 2);
3883 NEON_GET_REG(T1
, rn
, pass
* 2 + 1);
3885 NEON_GET_REG(T0
, rn
, pass
);
3888 if (prewiden
&& !src1_wide
) {
3889 GEN_NEON_INTEGER_OP(widen
);
3892 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
3894 case 0: gen_op_neon_addl_u16(); break;
3895 case 1: gen_op_neon_addl_u32(); break;
3896 case 2: gen_op_neon_addl_u64(); break;
3900 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
3902 case 0: gen_op_neon_subl_u16(); break;
3903 case 1: gen_op_neon_subl_u32(); break;
3904 case 2: gen_op_neon_subl_u64(); break;
3908 case 5: case 7: /* VABAL, VABDL */
3909 switch ((size
<< 1) | u
) {
3910 case 0: gen_op_neon_abdl_s16(); break;
3911 case 1: gen_op_neon_abdl_u16(); break;
3912 case 2: gen_op_neon_abdl_s32(); break;
3913 case 3: gen_op_neon_abdl_u32(); break;
3914 case 4: gen_op_neon_abdl_s64(); break;
3915 case 5: gen_op_neon_abdl_u64(); break;
3919 case 8: case 9: case 10: case 11: case 12: case 13:
3920 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
3921 switch ((size
<< 1) | u
) {
3922 case 0: gen_op_neon_mull_s8(); break;
3923 case 1: gen_op_neon_mull_u8(); break;
3924 case 2: gen_op_neon_mull_s16(); break;
3925 case 3: gen_op_neon_mull_u16(); break;
3926 case 4: gen_op_imull_T0_T1(); break;
3927 case 5: gen_op_mull_T0_T1(); break;
3931 case 14: /* Polynomial VMULL */
3932 cpu_abort(env
, "Polynomial VMULL not implemented");
3934 default: /* 15 is RESERVED. */
3937 if (op
== 5 || op
== 13 || (op
>= 8 && op
<= 11)) {
3939 if (op
== 10 || op
== 11) {
3941 case 0: gen_op_neon_negl_u16(); break;
3942 case 1: gen_op_neon_negl_u32(); break;
3943 case 2: gen_op_neon_negl_u64(); break;
3948 gen_neon_movl_scratch_T0(0);
3949 gen_neon_movl_scratch_T1(1);
3952 NEON_GET_REG(T0
, rd
, pass
* 2);
3953 NEON_GET_REG(T1
, rd
, pass
* 2 + 1);
3957 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
3959 case 0: gen_op_neon_addl_u16(); break;
3960 case 1: gen_op_neon_addl_u32(); break;
3961 case 2: gen_op_neon_addl_u64(); break;
3965 case 9: case 11: /* VQDMLAL, VQDMLSL */
3967 case 1: gen_op_neon_addl_saturate_s32(); break;
3968 case 2: gen_op_neon_addl_saturate_s64(); break;
3972 case 13: /* VQDMULL */
3974 case 1: gen_op_neon_addl_saturate_s32(); break;
3975 case 2: gen_op_neon_addl_saturate_s64(); break;
3982 NEON_SET_REG(T0
, rd
, pass
* 2);
3983 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
3984 } else if (op
== 4 || op
== 6) {
3985 /* Narrowing operation. */
3988 case 0: gen_op_neon_narrow_high_u8(); break;
3989 case 1: gen_op_neon_narrow_high_u16(); break;
3990 case 2: gen_op_movl_T0_T1(); break;
3995 case 0: gen_op_neon_narrow_high_round_u8(); break;
3996 case 1: gen_op_neon_narrow_high_round_u16(); break;
3997 case 2: gen_op_neon_narrow_high_round_u32(); break;
4001 NEON_SET_REG(T0
, rd
, pass
);
4003 /* Write back the result. */
4004 NEON_SET_REG(T0
, rd
, pass
* 2);
4005 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
4009 /* Two registers and a scalar. */
4011 case 0: /* Integer VMLA scalar */
4012 case 1: /* Float VMLA scalar */
4013 case 4: /* Integer VMLS scalar */
4014 case 5: /* Floating point VMLS scalar */
4015 case 8: /* Integer VMUL scalar */
4016 case 9: /* Floating point VMUL scalar */
4017 case 12: /* VQDMULH scalar */
4018 case 13: /* VQRDMULH scalar */
4019 gen_neon_get_scalar(size
, rm
);
4020 gen_op_movl_T2_T0();
4021 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
4023 gen_op_movl_T0_T2();
4024 NEON_GET_REG(T1
, rn
, pass
);
4027 gen_op_neon_qdmulh_s16();
4029 gen_op_neon_qdmulh_s32();
4031 } else if (op
== 13) {
4033 gen_op_neon_qrdmulh_s16();
4035 gen_op_neon_qrdmulh_s32();
4037 } else if (op
& 1) {
4038 gen_op_neon_mul_f32();
4041 case 0: gen_op_neon_mul_u8(); break;
4042 case 1: gen_op_neon_mul_u16(); break;
4043 case 2: gen_op_mul_T0_T1(); break;
4049 NEON_GET_REG(T1
, rd
, pass
);
4055 gen_op_neon_add_f32();
4059 case 0: gen_op_neon_rsb_u8(); break;
4060 case 1: gen_op_neon_rsb_u16(); break;
4061 case 2: gen_op_rsbl_T0_T1(); break;
4066 gen_op_neon_rsb_f32();
4072 NEON_SET_REG(T0
, rd
, pass
);
4075 case 2: /* VMLAL sclar */
4076 case 3: /* VQDMLAL scalar */
4077 case 6: /* VMLSL scalar */
4078 case 7: /* VQDMLSL scalar */
4079 case 10: /* VMULL scalar */
4080 case 11: /* VQDMULL scalar */
4082 /* Save overlapping operands before they are
4084 NEON_GET_REG(T0
, rn
, 1);
4085 gen_neon_movl_scratch_T0(2);
4087 gen_neon_get_scalar(size
, rm
);
4088 gen_op_movl_T2_T0();
4089 for (pass
= 0; pass
< 2; pass
++) {
4091 gen_op_movl_T0_T2();
4093 if (pass
!= 0 && rd
== rn
) {
4094 gen_neon_movl_T1_scratch(2);
4096 NEON_GET_REG(T1
, rn
, pass
);
4098 switch ((size
<< 1) | u
) {
4099 case 0: gen_op_neon_mull_s8(); break;
4100 case 1: gen_op_neon_mull_u8(); break;
4101 case 2: gen_op_neon_mull_s16(); break;
4102 case 3: gen_op_neon_mull_u16(); break;
4103 case 4: gen_op_imull_T0_T1(); break;
4104 case 5: gen_op_mull_T0_T1(); break;
4107 if (op
== 6 || op
== 7) {
4109 case 0: gen_op_neon_negl_u16(); break;
4110 case 1: gen_op_neon_negl_u32(); break;
4111 case 2: gen_op_neon_negl_u64(); break;
4115 gen_neon_movl_scratch_T0(0);
4116 gen_neon_movl_scratch_T1(1);
4117 NEON_GET_REG(T0
, rd
, pass
* 2);
4118 NEON_GET_REG(T1
, rd
, pass
* 2 + 1);
4122 case 0: gen_op_neon_addl_u16(); break;
4123 case 1: gen_op_neon_addl_u32(); break;
4124 case 2: gen_op_neon_addl_u64(); break;
4131 gen_op_neon_addl_saturate_s32();
4132 gen_op_neon_addl_saturate_s32();
4135 gen_op_neon_addl_saturate_s64();
4136 gen_op_neon_addl_saturate_s64();
4146 case 1: gen_op_neon_addl_saturate_s32(); break;
4147 case 2: gen_op_neon_addl_saturate_s64(); break;
4154 NEON_SET_REG(T0
, rd
, pass
* 2);
4155 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
4158 default: /* 14 and 15 are RESERVED */
4162 } else { /* size == 3 */
4166 imm
= (insn
>> 8) & 0xf;
4170 NEON_GET_REG(T0
, reg
, n
);
4171 for (pass
= 0; pass
< count
; pass
++) {
4178 NEON_GET_REG(T1
, reg
, n
);
4179 gen_op_neon_extract((insn
<< 3) & 0x1f);
4181 /* ??? This is broken if rd and rm overlap */
4182 NEON_SET_REG(T0
, rd
, pass
);
4184 gen_op_movl_T0_T1();
4186 NEON_GET_REG(T0
, reg
, n
);
4189 } else if ((insn
& (1 << 11)) == 0) {
4190 /* Two register misc. */
4191 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
4192 size
= (insn
>> 18) & 3;
4194 case 0: /* VREV64 */
4197 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4198 NEON_GET_REG(T0
, rm
, pass
* 2);
4199 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
4201 case 0: gen_op_rev_T0(); break;
4202 case 1: gen_op_revh_T0(); break;
4203 case 2: /* no-op */ break;
4206 NEON_SET_REG(T0
, rd
, pass
* 2 + 1);
4208 NEON_SET_REG(T1
, rd
, pass
* 2);
4210 gen_op_movl_T0_T1();
4212 case 0: gen_op_rev_T0(); break;
4213 case 1: gen_op_revh_T0(); break;
4216 NEON_SET_REG(T0
, rd
, pass
* 2);
4220 case 4: case 5: /* VPADDL */
4221 case 12: case 13: /* VPADAL */
4226 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4227 NEON_GET_REG(T0
, rm
, pass
* 2);
4228 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
4230 gen_op_neon_paddl_u32();
4232 gen_op_neon_paddl_s32();
4235 gen_neon_movl_scratch_T0(0);
4236 gen_neon_movl_scratch_T1(1);
4238 NEON_GET_REG(T0
, rd
, pass
* 2);
4239 NEON_GET_REG(T1
, rd
, pass
* 2 + 1);
4240 gen_op_neon_addl_u64();
4242 NEON_SET_REG(T0
, rd
, pass
* 2);
4243 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
4248 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
4249 NEON_GET_REG(T0
, rm
, n
);
4250 NEON_GET_REG(T1
, rd
, n
+ 1);
4251 NEON_SET_REG(T1
, rm
, n
);
4252 NEON_SET_REG(T0
, rd
, n
+ 1);
4260 Rd A3 A2 A1 A0 B2 B0 A2 A0
4261 Rm B3 B2 B1 B0 B3 B1 A3 A1
4265 gen_neon_unzip(rd
, q
, 0, size
);
4266 gen_neon_unzip(rm
, q
, 4, size
);
4268 static int unzip_order_q
[8] =
4269 {0, 2, 4, 6, 1, 3, 5, 7};
4270 for (n
= 0; n
< 8; n
++) {
4271 int reg
= (n
< 4) ? rd
: rm
;
4272 gen_neon_movl_T0_scratch(unzip_order_q
[n
]);
4273 NEON_SET_REG(T0
, reg
, n
% 4);
4276 static int unzip_order
[4] =
4278 for (n
= 0; n
< 4; n
++) {
4279 int reg
= (n
< 2) ? rd
: rm
;
4280 gen_neon_movl_T0_scratch(unzip_order
[n
]);
4281 NEON_SET_REG(T0
, reg
, n
% 2);
4287 Rd A3 A2 A1 A0 B1 A1 B0 A0
4288 Rm B3 B2 B1 B0 B3 A3 B2 A2
4292 count
= (q
? 4 : 2);
4293 for (n
= 0; n
< count
; n
++) {
4294 NEON_GET_REG(T0
, rd
, n
);
4295 NEON_GET_REG(T1
, rd
, n
);
4297 case 0: gen_op_neon_zip_u8(); break;
4298 case 1: gen_op_neon_zip_u16(); break;
4299 case 2: /* no-op */; break;
4302 gen_neon_movl_scratch_T0(n
* 2);
4303 gen_neon_movl_scratch_T1(n
* 2 + 1);
4305 for (n
= 0; n
< count
* 2; n
++) {
4306 int reg
= (n
< count
) ? rd
: rm
;
4307 gen_neon_movl_T0_scratch(n
);
4308 NEON_SET_REG(T0
, reg
, n
% count
);
4311 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
4312 for (pass
= 0; pass
< 2; pass
++) {
4318 NEON_GET_REG(T0
, rm
, n
* 2);
4319 NEON_GET_REG(T1
, rm
, n
* 2 + 1);
4320 if (op
== 36 && q
== 0) {
4322 case 0: gen_op_neon_narrow_u8(); break;
4323 case 1: gen_op_neon_narrow_u16(); break;
4324 case 2: /* no-op */ break;
4329 case 0: gen_op_neon_narrow_sat_u8(); break;
4330 case 1: gen_op_neon_narrow_sat_u16(); break;
4331 case 2: gen_op_neon_narrow_sat_u32(); break;
4336 case 0: gen_op_neon_narrow_sat_s8(); break;
4337 case 1: gen_op_neon_narrow_sat_s16(); break;
4338 case 2: gen_op_neon_narrow_sat_s32(); break;
4342 NEON_SET_REG(T0
, rd
, n
);
4345 case 38: /* VSHLL */
4349 NEON_GET_REG(T2
, rm
, 1);
4351 for (pass
= 0; pass
< 2; pass
++) {
4352 if (pass
== 1 && rm
== rd
) {
4353 gen_op_movl_T0_T2();
4355 NEON_GET_REG(T0
, rm
, pass
);
4358 case 0: gen_op_neon_widen_high_u8(); break;
4359 case 1: gen_op_neon_widen_high_u16(); break;
4361 gen_op_movl_T1_T0();
4362 gen_op_movl_T0_im(0);
4366 NEON_SET_REG(T0
, rd
, pass
* 2);
4367 NEON_SET_REG(T1
, rd
, pass
* 2 + 1);
4372 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4373 if (op
== 30 || op
== 31 || op
>= 58) {
4374 gen_op_vfp_getreg_F0s(neon_reg_offset(rm
, pass
));
4376 NEON_GET_REG(T0
, rm
, pass
);
4379 case 1: /* VREV32 */
4381 case 0: gen_op_rev_T0(); break;
4382 case 1: gen_op_revh_T0(); break;
4386 case 2: /* VREV16 */
4391 case 4: case 5: /* VPADDL */
4392 case 12: case 13: /* VPADAL */
4393 switch ((size
<< 1) | (op
& 1)) {
4394 case 0: gen_op_neon_paddl_s8(); break;
4395 case 1: gen_op_neon_paddl_u8(); break;
4396 case 2: gen_op_neon_paddl_s16(); break;
4397 case 3: gen_op_neon_paddl_u16(); break;
4402 NEON_GET_REG(T1
, rd
, pass
);
4404 case 0: gen_op_neon_add_u16(); break;
4405 case 1: gen_op_addl_T0_T1(); break;
4412 case 0: gen_op_neon_cls_s8(); break;
4413 case 1: gen_op_neon_cls_s16(); break;
4414 case 2: gen_op_neon_cls_s32(); break;
4420 case 0: gen_op_neon_clz_u8(); break;
4421 case 1: gen_op_neon_clz_u16(); break;
4422 case 2: gen_op_clz_T0(); break;
4429 gen_op_neon_cnt_u8();
4436 case 14: /* VQABS */
4438 case 0: gen_op_neon_qabs_s8(); break;
4439 case 1: gen_op_neon_qabs_s16(); break;
4440 case 2: gen_op_neon_qabs_s32(); break;
4444 case 15: /* VQNEG */
4446 case 0: gen_op_neon_qneg_s8(); break;
4447 case 1: gen_op_neon_qneg_s16(); break;
4448 case 2: gen_op_neon_qneg_s32(); break;
4452 case 16: case 19: /* VCGT #0, VCLE #0 */
4453 gen_op_movl_T1_im(0);
4455 case 0: gen_op_neon_cgt_s8(); break;
4456 case 1: gen_op_neon_cgt_s16(); break;
4457 case 2: gen_op_neon_cgt_s32(); break;
4463 case 17: case 20: /* VCGE #0, VCLT #0 */
4464 gen_op_movl_T1_im(0);
4466 case 0: gen_op_neon_cge_s8(); break;
4467 case 1: gen_op_neon_cge_s16(); break;
4468 case 2: gen_op_neon_cge_s32(); break;
4474 case 18: /* VCEQ #0 */
4475 gen_op_movl_T1_im(0);
4477 case 0: gen_op_neon_ceq_u8(); break;
4478 case 1: gen_op_neon_ceq_u16(); break;
4479 case 2: gen_op_neon_ceq_u32(); break;
4485 case 0: gen_op_neon_abs_s8(); break;
4486 case 1: gen_op_neon_abs_s16(); break;
4487 case 2: gen_op_neon_abs_s32(); break;
4492 gen_op_movl_T1_im(0);
4494 case 0: gen_op_neon_rsb_u8(); break;
4495 case 1: gen_op_neon_rsb_u16(); break;
4496 case 2: gen_op_rsbl_T0_T1(); break;
4500 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
4501 gen_op_movl_T1_im(0);
4502 gen_op_neon_cgt_f32();
4506 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
4507 gen_op_movl_T1_im(0);
4508 gen_op_neon_cge_f32();
4512 case 26: /* Float VCEQ #0 */
4513 gen_op_movl_T1_im(0);
4514 gen_op_neon_ceq_f32();
4516 case 30: /* Float VABS */
4519 case 31: /* Float VNEG */
4523 NEON_GET_REG(T1
, rd
, pass
);
4524 NEON_SET_REG(T1
, rm
, pass
);
4527 NEON_GET_REG(T1
, rd
, pass
);
4529 case 0: gen_op_neon_trn_u8(); break;
4530 case 1: gen_op_neon_trn_u16(); break;
4534 NEON_SET_REG(T1
, rm
, pass
);
4536 case 56: /* Integer VRECPE */
4537 gen_op_neon_recpe_u32();
4539 case 57: /* Integer VRSQRTE */
4540 gen_op_neon_rsqrte_u32();
4542 case 58: /* Float VRECPE */
4543 gen_op_neon_recpe_f32();
4545 case 59: /* Float VRSQRTE */
4546 gen_op_neon_rsqrte_f32();
4548 case 60: /* VCVT.F32.S32 */
4549 gen_op_vfp_tosizs();
4551 case 61: /* VCVT.F32.U32 */
4552 gen_op_vfp_touizs();
4554 case 62: /* VCVT.S32.F32 */
4557 case 63: /* VCVT.U32.F32 */
4561 /* Reserved: 21, 29, 39-56 */
4564 if (op
== 30 || op
== 31 || op
>= 58) {
4565 gen_op_vfp_setreg_F0s(neon_reg_offset(rm
, pass
));
4567 NEON_SET_REG(T0
, rd
, pass
);
4572 } else if ((insn
& (1 << 10)) == 0) {
4574 n
= (insn
>> 5) & 0x18;
4575 NEON_GET_REG(T1
, rm
, 0);
4576 if (insn
& (1 << 6)) {
4577 NEON_GET_REG(T0
, rd
, 0);
4579 gen_op_movl_T0_im(0);
4581 gen_op_neon_tbl(rn
, n
);
4582 gen_op_movl_T2_T0();
4583 NEON_GET_REG(T1
, rm
, 1);
4584 if (insn
& (1 << 6)) {
4585 NEON_GET_REG(T0
, rd
, 0);
4587 gen_op_movl_T0_im(0);
4589 gen_op_neon_tbl(rn
, n
);
4590 NEON_SET_REG(T2
, rd
, 0);
4591 NEON_SET_REG(T0
, rd
, 1);
4592 } else if ((insn
& 0x380) == 0) {
4594 if (insn
& (1 << 19)) {
4595 NEON_SET_REG(T0
, rm
, 1);
4597 NEON_SET_REG(T0
, rm
, 0);
4599 if (insn
& (1 << 16)) {
4600 gen_op_neon_dup_u8(((insn
>> 17) & 3) * 8);
4601 } else if (insn
& (1 << 17)) {
4602 if ((insn
>> 18) & 1)
4603 gen_op_neon_dup_high16();
4605 gen_op_neon_dup_low16();
4607 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4608 NEON_SET_REG(T0
, rd
, pass
);
4618 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4622 cpnum
= (insn
>> 8) & 0xf;
4623 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
4624 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
4630 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
4631 return disas_iwmmxt_insn(env
, s
, insn
);
4632 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
4633 return disas_dsp_insn(env
, s
, insn
);
4638 return disas_vfp_insn (env
, s
, insn
);
4640 return disas_cp15_insn (env
, s
, insn
);
4642 /* Unknown coprocessor. See if the board has hooked it. */
4643 return disas_cp_insn (env
, s
, insn
);
4647 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
4649 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
4651 insn
= ldl_code(s
->pc
);
4654 /* M variants do not implement ARM mode. */
4659 /* Unconditional instructions. */
4660 if (((insn
>> 25) & 7) == 1) {
4661 /* NEON Data processing. */
4662 if (!arm_feature(env
, ARM_FEATURE_NEON
))
4665 if (disas_neon_data_insn(env
, s
, insn
))
4669 if ((insn
& 0x0f100000) == 0x04000000) {
4670 /* NEON load/store. */
4671 if (!arm_feature(env
, ARM_FEATURE_NEON
))
4674 if (disas_neon_ls_insn(env
, s
, insn
))
4678 if ((insn
& 0x0d70f000) == 0x0550f000)
4680 else if ((insn
& 0x0ffffdff) == 0x01010000) {
4683 if (insn
& (1 << 9)) {
4684 /* BE8 mode not implemented. */
4688 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
4689 switch ((insn
>> 4) & 0xf) {
4698 /* We don't emulate caches so these are a no-op. */
4703 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
4709 op1
= (insn
& 0x1f);
4710 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
4711 gen_movl_T1_reg(s
, 13);
4713 gen_op_movl_T1_r13_banked(op1
);
4715 i
= (insn
>> 23) & 3;
4717 case 0: offset
= -4; break; /* DA */
4718 case 1: offset
= -8; break; /* DB */
4719 case 2: offset
= 0; break; /* IA */
4720 case 3: offset
= 4; break; /* IB */
4724 gen_op_addl_T1_im(offset
);
4725 gen_movl_T0_reg(s
, 14);
4727 gen_op_movl_T0_cpsr();
4728 gen_op_addl_T1_im(4);
4730 if (insn
& (1 << 21)) {
4731 /* Base writeback. */
4733 case 0: offset
= -8; break;
4734 case 1: offset
= -4; break;
4735 case 2: offset
= 4; break;
4736 case 3: offset
= 0; break;
4740 gen_op_addl_T1_im(offset
);
4741 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
4742 gen_movl_reg_T1(s
, 13);
4744 gen_op_movl_r13_T1_banked(op1
);
4747 } else if ((insn
& 0x0e5fffe0) == 0x081d0a00) {
4753 rn
= (insn
>> 16) & 0xf;
4754 gen_movl_T1_reg(s
, rn
);
4755 i
= (insn
>> 23) & 3;
4757 case 0: offset
= 0; break; /* DA */
4758 case 1: offset
= -4; break; /* DB */
4759 case 2: offset
= 4; break; /* IA */
4760 case 3: offset
= 8; break; /* IB */
4764 gen_op_addl_T1_im(offset
);
4765 /* Load CPSR into T2 and PC into T0. */
4767 gen_op_movl_T2_T0();
4768 gen_op_addl_T1_im(-4);
4770 if (insn
& (1 << 21)) {
4771 /* Base writeback. */
4773 case 0: offset
= -4; break;
4774 case 1: offset
= 0; break;
4775 case 2: offset
= 8; break;
4776 case 3: offset
= 4; break;
4780 gen_op_addl_T1_im(offset
);
4781 gen_movl_reg_T1(s
, rn
);
4784 } else if ((insn
& 0x0e000000) == 0x0a000000) {
4785 /* branch link and change to thumb (blx <offset>) */
4788 val
= (uint32_t)s
->pc
;
4789 gen_op_movl_T0_im(val
);
4790 gen_movl_reg_T0(s
, 14);
4791 /* Sign-extend the 24-bit offset */
4792 offset
= (((int32_t)insn
) << 8) >> 8;
4793 /* offset * 4 + bit24 * 2 + (thumb bit) */
4794 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
4795 /* pipeline offset */
4797 gen_op_movl_T0_im(val
);
4800 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
4801 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
4802 /* iWMMXt register transfer. */
4803 if (env
->cp15
.c15_cpar
& (1 << 1))
4804 if (!disas_iwmmxt_insn(env
, s
, insn
))
4807 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
4808 /* Coprocessor double register transfer. */
4809 } else if ((insn
& 0x0f000010) == 0x0e000010) {
4810 /* Additional coprocessor register transfer. */
4811 } else if ((insn
& 0x0ff10010) == 0x01000000) {
4814 /* cps (privileged) */
4818 if (insn
& (1 << 19)) {
4819 if (insn
& (1 << 8))
4821 if (insn
& (1 << 7))
4823 if (insn
& (1 << 6))
4825 if (insn
& (1 << 18))
4828 if (insn
& (1 << 14)) {
4830 val
|= (insn
& 0x1f);
4833 gen_op_movl_T0_im(val
);
4834 gen_set_psr_T0(s
, mask
, 0);
4841 /* if not always execute, we generate a conditional jump to
4843 s
->condlabel
= gen_new_label();
4844 gen_test_cc
[cond
^ 1](s
->condlabel
);
4847 if ((insn
& 0x0f900000) == 0x03000000) {
4848 if ((insn
& (1 << 21)) == 0) {
4850 rd
= (insn
>> 12) & 0xf;
4851 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
4852 if ((insn
& (1 << 22)) == 0) {
4854 gen_op_movl_T0_im(val
);
4857 gen_movl_T0_reg(s
, rd
);
4858 gen_op_movl_T1_im(0xffff);
4859 gen_op_andl_T0_T1();
4860 gen_op_movl_T1_im(val
<< 16);
4863 gen_movl_reg_T0(s
, rd
);
4865 if (((insn
>> 12) & 0xf) != 0xf)
4867 if (((insn
>> 16) & 0xf) == 0) {
4868 gen_nop_hint(s
, insn
& 0xff);
4870 /* CPSR = immediate */
4872 shift
= ((insn
>> 8) & 0xf) * 2;
4874 val
= (val
>> shift
) | (val
<< (32 - shift
));
4875 gen_op_movl_T0_im(val
);
4876 i
= ((insn
& (1 << 22)) != 0);
4877 if (gen_set_psr_T0(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
))
4881 } else if ((insn
& 0x0f900000) == 0x01000000
4882 && (insn
& 0x00000090) != 0x00000090) {
4883 /* miscellaneous instructions */
4884 op1
= (insn
>> 21) & 3;
4885 sh
= (insn
>> 4) & 0xf;
4888 case 0x0: /* move program status register */
4891 gen_movl_T0_reg(s
, rm
);
4892 i
= ((op1
& 2) != 0);
4893 if (gen_set_psr_T0(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
))
4897 rd
= (insn
>> 12) & 0xf;
4901 gen_op_movl_T0_spsr();
4903 gen_op_movl_T0_cpsr();
4905 gen_movl_reg_T0(s
, rd
);
4910 /* branch/exchange thumb (bx). */
4911 gen_movl_T0_reg(s
, rm
);
4913 } else if (op1
== 3) {
4915 rd
= (insn
>> 12) & 0xf;
4916 gen_movl_T0_reg(s
, rm
);
4918 gen_movl_reg_T0(s
, rd
);
4926 /* Trivial implementation equivalent to bx. */
4927 gen_movl_T0_reg(s
, rm
);
4937 /* branch link/exchange thumb (blx) */
4938 val
= (uint32_t)s
->pc
;
4939 gen_op_movl_T1_im(val
);
4940 gen_movl_T0_reg(s
, rm
);
4941 gen_movl_reg_T1(s
, 14);
4944 case 0x5: /* saturating add/subtract */
4945 rd
= (insn
>> 12) & 0xf;
4946 rn
= (insn
>> 16) & 0xf;
4947 gen_movl_T0_reg(s
, rm
);
4948 gen_movl_T1_reg(s
, rn
);
4950 gen_op_double_T1_saturate();
4952 gen_op_subl_T0_T1_saturate();
4954 gen_op_addl_T0_T1_saturate();
4955 gen_movl_reg_T0(s
, rd
);
4958 gen_set_condexec(s
);
4959 gen_op_movl_T0_im((long)s
->pc
- 4);
4960 gen_op_movl_reg_TN
[0][15]();
4962 s
->is_jmp
= DISAS_JUMP
;
4964 case 0x8: /* signed multiply */
4968 rs
= (insn
>> 8) & 0xf;
4969 rn
= (insn
>> 12) & 0xf;
4970 rd
= (insn
>> 16) & 0xf;
4972 /* (32 * 16) >> 16 */
4973 gen_movl_T0_reg(s
, rm
);
4974 gen_movl_T1_reg(s
, rs
);
4976 gen_op_sarl_T1_im(16);
4979 gen_op_imulw_T0_T1();
4980 if ((sh
& 2) == 0) {
4981 gen_movl_T1_reg(s
, rn
);
4982 gen_op_addl_T0_T1_setq();
4984 gen_movl_reg_T0(s
, rd
);
4987 gen_movl_T0_reg(s
, rm
);
4988 gen_movl_T1_reg(s
, rs
);
4989 gen_mulxy(sh
& 2, sh
& 4);
4991 gen_op_signbit_T1_T0();
4992 gen_op_addq_T0_T1(rn
, rd
);
4993 gen_movl_reg_T0(s
, rn
);
4994 gen_movl_reg_T1(s
, rd
);
4997 gen_movl_T1_reg(s
, rn
);
4998 gen_op_addl_T0_T1_setq();
5000 gen_movl_reg_T0(s
, rd
);
5007 } else if (((insn
& 0x0e000000) == 0 &&
5008 (insn
& 0x00000090) != 0x90) ||
5009 ((insn
& 0x0e000000) == (1 << 25))) {
5010 int set_cc
, logic_cc
, shiftop
;
5012 op1
= (insn
>> 21) & 0xf;
5013 set_cc
= (insn
>> 20) & 1;
5014 logic_cc
= table_logic_cc
[op1
] & set_cc
;
5016 /* data processing instruction */
5017 if (insn
& (1 << 25)) {
5018 /* immediate operand */
5020 shift
= ((insn
>> 8) & 0xf) * 2;
5022 val
= (val
>> shift
) | (val
<< (32 - shift
));
5023 gen_op_movl_T1_im(val
);
5024 if (logic_cc
&& shift
)
5029 gen_movl_T1_reg(s
, rm
);
5030 shiftop
= (insn
>> 5) & 3;
5031 if (!(insn
& (1 << 4))) {
5032 shift
= (insn
>> 7) & 0x1f;
5035 gen_shift_T1_im_cc
[shiftop
](shift
);
5037 gen_shift_T1_im
[shiftop
](shift
);
5039 } else if (shiftop
!= 0) {
5041 gen_shift_T1_0_cc
[shiftop
]();
5043 gen_shift_T1_0
[shiftop
]();
5047 rs
= (insn
>> 8) & 0xf;
5048 gen_movl_T0_reg(s
, rs
);
5050 gen_shift_T1_T0_cc
[shiftop
]();
5052 gen_shift_T1_T0
[shiftop
]();
5056 if (op1
!= 0x0f && op1
!= 0x0d) {
5057 rn
= (insn
>> 16) & 0xf;
5058 gen_movl_T0_reg(s
, rn
);
5060 rd
= (insn
>> 12) & 0xf;
5063 gen_op_andl_T0_T1();
5064 gen_movl_reg_T0(s
, rd
);
5066 gen_op_logic_T0_cc();
5069 gen_op_xorl_T0_T1();
5070 gen_movl_reg_T0(s
, rd
);
5072 gen_op_logic_T0_cc();
5075 if (set_cc
&& rd
== 15) {
5076 /* SUBS r15, ... is used for exception return. */
5079 gen_op_subl_T0_T1_cc();
5080 gen_exception_return(s
);
5083 gen_op_subl_T0_T1_cc();
5085 gen_op_subl_T0_T1();
5086 gen_movl_reg_T0(s
, rd
);
5091 gen_op_rsbl_T0_T1_cc();
5093 gen_op_rsbl_T0_T1();
5094 gen_movl_reg_T0(s
, rd
);
5098 gen_op_addl_T0_T1_cc();
5100 gen_op_addl_T0_T1();
5101 gen_movl_reg_T0(s
, rd
);
5105 gen_op_adcl_T0_T1_cc();
5107 gen_op_adcl_T0_T1();
5108 gen_movl_reg_T0(s
, rd
);
5112 gen_op_sbcl_T0_T1_cc();
5114 gen_op_sbcl_T0_T1();
5115 gen_movl_reg_T0(s
, rd
);
5119 gen_op_rscl_T0_T1_cc();
5121 gen_op_rscl_T0_T1();
5122 gen_movl_reg_T0(s
, rd
);
5126 gen_op_andl_T0_T1();
5127 gen_op_logic_T0_cc();
5132 gen_op_xorl_T0_T1();
5133 gen_op_logic_T0_cc();
5138 gen_op_subl_T0_T1_cc();
5143 gen_op_addl_T0_T1_cc();
5148 gen_movl_reg_T0(s
, rd
);
5150 gen_op_logic_T0_cc();
5153 if (logic_cc
&& rd
== 15) {
5154 /* MOVS r15, ... is used for exception return. */
5157 gen_op_movl_T0_T1();
5158 gen_exception_return(s
);
5160 gen_movl_reg_T1(s
, rd
);
5162 gen_op_logic_T1_cc();
5166 gen_op_bicl_T0_T1();
5167 gen_movl_reg_T0(s
, rd
);
5169 gen_op_logic_T0_cc();
5174 gen_movl_reg_T1(s
, rd
);
5176 gen_op_logic_T1_cc();
5180 /* other instructions */
5181 op1
= (insn
>> 24) & 0xf;
5185 /* multiplies, extra load/stores */
5186 sh
= (insn
>> 5) & 3;
5189 rd
= (insn
>> 16) & 0xf;
5190 rn
= (insn
>> 12) & 0xf;
5191 rs
= (insn
>> 8) & 0xf;
5193 op1
= (insn
>> 20) & 0xf;
5195 case 0: case 1: case 2: case 3: case 6:
5197 gen_movl_T0_reg(s
, rs
);
5198 gen_movl_T1_reg(s
, rm
);
5200 if (insn
& (1 << 22)) {
5201 /* Subtract (mls) */
5203 gen_movl_T1_reg(s
, rn
);
5204 gen_op_rsbl_T0_T1();
5205 } else if (insn
& (1 << 21)) {
5207 gen_movl_T1_reg(s
, rn
);
5208 gen_op_addl_T0_T1();
5210 if (insn
& (1 << 20))
5211 gen_op_logic_T0_cc();
5212 gen_movl_reg_T0(s
, rd
);
5216 gen_movl_T0_reg(s
, rs
);
5217 gen_movl_T1_reg(s
, rm
);
5218 if (insn
& (1 << 22))
5219 gen_op_imull_T0_T1();
5221 gen_op_mull_T0_T1();
5222 if (insn
& (1 << 21)) /* mult accumulate */
5223 gen_op_addq_T0_T1(rn
, rd
);
5224 if (!(insn
& (1 << 23))) { /* double accumulate */
5226 gen_op_addq_lo_T0_T1(rn
);
5227 gen_op_addq_lo_T0_T1(rd
);
5229 if (insn
& (1 << 20))
5231 gen_movl_reg_T0(s
, rn
);
5232 gen_movl_reg_T1(s
, rd
);
5236 rn
= (insn
>> 16) & 0xf;
5237 rd
= (insn
>> 12) & 0xf;
5238 if (insn
& (1 << 23)) {
5239 /* load/store exclusive */
5240 gen_movl_T1_reg(s
, rn
);
5241 if (insn
& (1 << 20)) {
5245 gen_movl_T0_reg(s
, rm
);
5248 gen_movl_reg_T0(s
, rd
);
5250 /* SWP instruction */
5253 gen_movl_T0_reg(s
, rm
);
5254 gen_movl_T1_reg(s
, rn
);
5255 if (insn
& (1 << 22)) {
5260 gen_movl_reg_T0(s
, rd
);
5266 /* Misc load/store */
5267 rn
= (insn
>> 16) & 0xf;
5268 rd
= (insn
>> 12) & 0xf;
5269 gen_movl_T1_reg(s
, rn
);
5270 if (insn
& (1 << 24))
5271 gen_add_datah_offset(s
, insn
, 0);
5273 if (insn
& (1 << 20)) {
5288 } else if (sh
& 2) {
5292 gen_movl_T0_reg(s
, rd
);
5294 gen_op_addl_T1_im(4);
5295 gen_movl_T0_reg(s
, rd
+ 1);
5301 gen_movl_reg_T0(s
, rd
);
5302 gen_op_addl_T1_im(4);
5307 address_offset
= -4;
5310 gen_movl_T0_reg(s
, rd
);
5314 /* Perform base writeback before the loaded value to
5315 ensure correct behavior with overlapping index registers.
5316 ldrd with base writeback is is undefined if the
5317 destination and index registers overlap. */
5318 if (!(insn
& (1 << 24))) {
5319 gen_add_datah_offset(s
, insn
, address_offset
);
5320 gen_movl_reg_T1(s
, rn
);
5321 } else if (insn
& (1 << 21)) {
5323 gen_op_addl_T1_im(address_offset
);
5324 gen_movl_reg_T1(s
, rn
);
5327 /* Complete the load. */
5328 gen_movl_reg_T0(s
, rd
);
5337 if (insn
& (1 << 4)) {
5339 /* Armv6 Media instructions. */
5341 rn
= (insn
>> 16) & 0xf;
5342 rd
= (insn
>> 12) & 0xf;
5343 rs
= (insn
>> 8) & 0xf;
5344 switch ((insn
>> 23) & 3) {
5345 case 0: /* Parallel add/subtract. */
5346 op1
= (insn
>> 20) & 7;
5347 gen_movl_T0_reg(s
, rn
);
5348 gen_movl_T1_reg(s
, rm
);
5349 sh
= (insn
>> 5) & 7;
5350 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
5352 gen_arm_parallel_addsub
[op1
][sh
]();
5353 gen_movl_reg_T0(s
, rd
);
5356 if ((insn
& 0x00700020) == 0) {
5358 gen_movl_T0_reg(s
, rn
);
5359 gen_movl_T1_reg(s
, rm
);
5360 shift
= (insn
>> 7) & 0x1f;
5362 gen_op_shll_T1_im(shift
);
5363 if (insn
& (1 << 6))
5364 gen_op_pkhtb_T0_T1();
5366 gen_op_pkhbt_T0_T1();
5367 gen_movl_reg_T0(s
, rd
);
5368 } else if ((insn
& 0x00200020) == 0x00200000) {
5370 gen_movl_T1_reg(s
, rm
);
5371 shift
= (insn
>> 7) & 0x1f;
5372 if (insn
& (1 << 6)) {
5375 gen_op_sarl_T1_im(shift
);
5377 gen_op_shll_T1_im(shift
);
5379 sh
= (insn
>> 16) & 0x1f;
5381 if (insn
& (1 << 22))
5386 gen_movl_T1_reg(s
, rd
);
5387 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
5389 gen_movl_T1_reg(s
, rm
);
5390 sh
= (insn
>> 16) & 0x1f;
5392 if (insn
& (1 << 22))
5393 gen_op_usat16_T1(sh
);
5395 gen_op_ssat16_T1(sh
);
5397 gen_movl_T1_reg(s
, rd
);
5398 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
5400 gen_movl_T0_reg(s
, rn
);
5401 gen_movl_T1_reg(s
, rm
);
5403 gen_movl_reg_T0(s
, rd
);
5404 } else if ((insn
& 0x000003e0) == 0x00000060) {
5405 gen_movl_T1_reg(s
, rm
);
5406 shift
= (insn
>> 10) & 3;
5407 /* ??? In many cases it's not neccessary to do a
5408 rotate, a shift is sufficient. */
5410 gen_op_rorl_T1_im(shift
* 8);
5411 op1
= (insn
>> 20) & 7;
5413 case 0: gen_op_sxtb16_T1(); break;
5414 case 2: gen_op_sxtb_T1(); break;
5415 case 3: gen_op_sxth_T1(); break;
5416 case 4: gen_op_uxtb16_T1(); break;
5417 case 6: gen_op_uxtb_T1(); break;
5418 case 7: gen_op_uxth_T1(); break;
5419 default: goto illegal_op
;
5422 gen_movl_T2_reg(s
, rn
);
5423 if ((op1
& 3) == 0) {
5424 gen_op_add16_T1_T2();
5426 gen_op_addl_T1_T2();
5429 gen_movl_reg_T1(s
, rd
);
5430 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
5432 gen_movl_T0_reg(s
, rm
);
5433 if (insn
& (1 << 22)) {
5434 if (insn
& (1 << 7)) {
5441 if (insn
& (1 << 7))
5446 gen_movl_reg_T0(s
, rd
);
5451 case 2: /* Multiplies (Type 3). */
5452 gen_movl_T0_reg(s
, rm
);
5453 gen_movl_T1_reg(s
, rs
);
5454 if (insn
& (1 << 20)) {
5455 /* Signed multiply most significant [accumulate]. */
5456 gen_op_imull_T0_T1();
5457 if (insn
& (1 << 5))
5458 gen_op_roundqd_T0_T1();
5460 gen_op_movl_T0_T1();
5462 gen_movl_T1_reg(s
, rn
);
5463 if (insn
& (1 << 6)) {
5464 gen_op_addl_T0_T1();
5466 gen_op_rsbl_T0_T1();
5469 gen_movl_reg_T0(s
, rd
);
5471 if (insn
& (1 << 5))
5472 gen_op_swap_half_T1();
5473 gen_op_mul_dual_T0_T1();
5474 if (insn
& (1 << 22)) {
5475 if (insn
& (1 << 6)) {
5477 gen_op_addq_T0_T1_dual(rn
, rd
);
5480 gen_op_subq_T0_T1_dual(rn
, rd
);
5483 /* This addition cannot overflow. */
5484 if (insn
& (1 << 6)) {
5486 gen_op_subl_T0_T1();
5489 gen_op_addl_T0_T1();
5493 gen_movl_T1_reg(s
, rn
);
5494 gen_op_addl_T0_T1_setq();
5496 gen_movl_reg_T0(s
, rd
);
5501 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
5503 case 0: /* Unsigned sum of absolute differences. */
5505 gen_movl_T0_reg(s
, rm
);
5506 gen_movl_T1_reg(s
, rs
);
5507 gen_op_usad8_T0_T1();
5509 gen_movl_T1_reg(s
, rn
);
5510 gen_op_addl_T0_T1();
5512 gen_movl_reg_T0(s
, rd
);
5514 case 0x20: case 0x24: case 0x28: case 0x2c:
5515 /* Bitfield insert/clear. */
5517 shift
= (insn
>> 7) & 0x1f;
5518 i
= (insn
>> 16) & 0x1f;
5521 gen_op_movl_T1_im(0);
5523 gen_movl_T1_reg(s
, rm
);
5526 gen_movl_T0_reg(s
, rd
);
5527 gen_op_bfi_T1_T0(shift
, ((1u << i
) - 1) << shift
);
5529 gen_movl_reg_T1(s
, rd
);
5531 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
5532 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5533 gen_movl_T1_reg(s
, rm
);
5534 shift
= (insn
>> 7) & 0x1f;
5535 i
= ((insn
>> 16) & 0x1f) + 1;
5540 gen_op_ubfx_T1(shift
, (1u << i
) - 1);
5542 gen_op_sbfx_T1(shift
, i
);
5545 gen_movl_reg_T1(s
, rd
);
5555 /* Check for undefined extension instructions
5556 * per the ARM Bible IE:
5557 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
5559 sh
= (0xf << 20) | (0xf << 4);
5560 if (op1
== 0x7 && ((insn
& sh
) == sh
))
5564 /* load/store byte/word */
5565 rn
= (insn
>> 16) & 0xf;
5566 rd
= (insn
>> 12) & 0xf;
5567 gen_movl_T1_reg(s
, rn
);
5568 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
5569 if (insn
& (1 << 24))
5570 gen_add_data_offset(s
, insn
);
5571 if (insn
& (1 << 20)) {
5574 #if defined(CONFIG_USER_ONLY)
5575 if (insn
& (1 << 22))
5580 if (insn
& (1 << 22)) {
5584 gen_op_ldub_kernel();
5589 gen_op_ldl_kernel();
5594 gen_movl_T0_reg(s
, rd
);
5595 #if defined(CONFIG_USER_ONLY)
5596 if (insn
& (1 << 22))
5601 if (insn
& (1 << 22)) {
5605 gen_op_stb_kernel();
5610 gen_op_stl_kernel();
5614 if (!(insn
& (1 << 24))) {
5615 gen_add_data_offset(s
, insn
);
5616 gen_movl_reg_T1(s
, rn
);
5617 } else if (insn
& (1 << 21))
5618 gen_movl_reg_T1(s
, rn
); {
5620 if (insn
& (1 << 20)) {
5621 /* Complete the load. */
5625 gen_movl_reg_T0(s
, rd
);
5631 int j
, n
, user
, loaded_base
;
5632 /* load/store multiple words */
5633 /* XXX: store correct base if write back */
5635 if (insn
& (1 << 22)) {
5637 goto illegal_op
; /* only usable in supervisor mode */
5639 if ((insn
& (1 << 15)) == 0)
5642 rn
= (insn
>> 16) & 0xf;
5643 gen_movl_T1_reg(s
, rn
);
5645 /* compute total size */
5649 if (insn
& (1 << i
))
5652 /* XXX: test invalid n == 0 case ? */
5653 if (insn
& (1 << 23)) {
5654 if (insn
& (1 << 24)) {
5656 gen_op_addl_T1_im(4);
5658 /* post increment */
5661 if (insn
& (1 << 24)) {
5663 gen_op_addl_T1_im(-(n
* 4));
5665 /* post decrement */
5667 gen_op_addl_T1_im(-((n
- 1) * 4));
5672 if (insn
& (1 << i
)) {
5673 if (insn
& (1 << 20)) {
5679 gen_op_movl_user_T0(i
);
5680 } else if (i
== rn
) {
5681 gen_op_movl_T2_T0();
5684 gen_movl_reg_T0(s
, i
);
5689 /* special case: r15 = PC + 8 */
5690 val
= (long)s
->pc
+ 4;
5691 gen_op_movl_TN_im
[0](val
);
5693 gen_op_movl_T0_user(i
);
5695 gen_movl_T0_reg(s
, i
);
5700 /* no need to add after the last transfer */
5702 gen_op_addl_T1_im(4);
5705 if (insn
& (1 << 21)) {
5707 if (insn
& (1 << 23)) {
5708 if (insn
& (1 << 24)) {
5711 /* post increment */
5712 gen_op_addl_T1_im(4);
5715 if (insn
& (1 << 24)) {
5718 gen_op_addl_T1_im(-((n
- 1) * 4));
5720 /* post decrement */
5721 gen_op_addl_T1_im(-(n
* 4));
5724 gen_movl_reg_T1(s
, rn
);
5727 gen_op_movl_T0_T2();
5728 gen_movl_reg_T0(s
, rn
);
5730 if ((insn
& (1 << 22)) && !user
) {
5731 /* Restore CPSR from SPSR. */
5732 gen_op_movl_T0_spsr();
5733 gen_op_movl_cpsr_T0(0xffffffff);
5734 s
->is_jmp
= DISAS_UPDATE
;
5743 /* branch (and link) */
5744 val
= (int32_t)s
->pc
;
5745 if (insn
& (1 << 24)) {
5746 gen_op_movl_T0_im(val
);
5747 gen_op_movl_reg_TN
[0][14]();
5749 offset
= (((int32_t)insn
<< 8) >> 8);
5750 val
+= (offset
<< 2) + 4;
5758 if (disas_coproc_insn(env
, s
, insn
))
5763 gen_op_movl_T0_im((long)s
->pc
);
5764 gen_op_movl_reg_TN
[0][15]();
5765 s
->is_jmp
= DISAS_SWI
;
5769 gen_set_condexec(s
);
5770 gen_op_movl_T0_im((long)s
->pc
- 4);
5771 gen_op_movl_reg_TN
[0][15]();
5772 gen_op_undef_insn();
5773 s
->is_jmp
= DISAS_JUMP
;
5779 /* Return true if this is a Thumb-2 logical op. */
5781 thumb2_logic_op(int op
)
5786 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
5787 then set condition code flags based on the result of the operation.
5788 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
5789 to the high bit of T1.
5790 Returns zero if the opcode is valid. */
5793 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
)
5800 gen_op_andl_T0_T1();
5804 gen_op_bicl_T0_T1();
5817 gen_op_xorl_T0_T1();
5822 gen_op_addl_T0_T1_cc();
5824 gen_op_addl_T0_T1();
5828 gen_op_adcl_T0_T1_cc();
5830 gen_op_adcl_T0_T1();
5834 gen_op_sbcl_T0_T1_cc();
5836 gen_op_sbcl_T0_T1();
5840 gen_op_subl_T0_T1_cc();
5842 gen_op_subl_T0_T1();
5846 gen_op_rsbl_T0_T1_cc();
5848 gen_op_rsbl_T0_T1();
5850 default: /* 5, 6, 7, 9, 12, 15. */
5854 gen_op_logic_T0_cc();
5861 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
5863 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
5865 uint32_t insn
, imm
, shift
, offset
, addr
;
5866 uint32_t rd
, rn
, rm
, rs
;
5872 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
5873 || arm_feature (env
, ARM_FEATURE_M
))) {
5874 /* Thumb-1 cores may need to tread bl and blx as a pair of
5875 16-bit instructions to get correct prefetch abort behavior. */
5877 if ((insn
& (1 << 12)) == 0) {
5878 /* Second half of blx. */
5879 offset
= ((insn
& 0x7ff) << 1);
5880 gen_movl_T0_reg(s
, 14);
5881 gen_op_movl_T1_im(offset
);
5882 gen_op_addl_T0_T1();
5883 gen_op_movl_T1_im(0xfffffffc);
5884 gen_op_andl_T0_T1();
5886 addr
= (uint32_t)s
->pc
;
5887 gen_op_movl_T1_im(addr
| 1);
5888 gen_movl_reg_T1(s
, 14);
5892 if (insn
& (1 << 11)) {
5893 /* Second half of bl. */
5894 offset
= ((insn
& 0x7ff) << 1) | 1;
5895 gen_movl_T0_reg(s
, 14);
5896 gen_op_movl_T1_im(offset
);
5897 gen_op_addl_T0_T1();
5899 addr
= (uint32_t)s
->pc
;
5900 gen_op_movl_T1_im(addr
| 1);
5901 gen_movl_reg_T1(s
, 14);
5905 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
5906 /* Instruction spans a page boundary. Implement it as two
5907 16-bit instructions in case the second half causes an
5909 offset
= ((int32_t)insn
<< 21) >> 9;
5910 addr
= s
->pc
+ 2 + offset
;
5911 gen_op_movl_T0_im(addr
);
5912 gen_movl_reg_T0(s
, 14);
5915 /* Fall through to 32-bit decode. */
5918 insn
= lduw_code(s
->pc
);
5920 insn
|= (uint32_t)insn_hw1
<< 16;
5922 if ((insn
& 0xf800e800) != 0xf000e800) {
5926 rn
= (insn
>> 16) & 0xf;
5927 rs
= (insn
>> 12) & 0xf;
5928 rd
= (insn
>> 8) & 0xf;
5930 switch ((insn
>> 25) & 0xf) {
5931 case 0: case 1: case 2: case 3:
5932 /* 16-bit instructions. Should never happen. */
5935 if (insn
& (1 << 22)) {
5936 /* Other load/store, table branch. */
5937 if (insn
& 0x01200000) {
5938 /* Load/store doubleword. */
5940 gen_op_movl_T1_im(s
->pc
& ~3);
5942 gen_movl_T1_reg(s
, rn
);
5944 offset
= (insn
& 0xff) * 4;
5945 if ((insn
& (1 << 23)) == 0)
5947 if (insn
& (1 << 24)) {
5948 gen_op_addl_T1_im(offset
);
5951 if (insn
& (1 << 20)) {
5954 gen_movl_reg_T0(s
, rs
);
5955 gen_op_addl_T1_im(4);
5957 gen_movl_reg_T0(s
, rd
);
5960 gen_movl_T0_reg(s
, rs
);
5962 gen_op_addl_T1_im(4);
5963 gen_movl_T0_reg(s
, rd
);
5966 if (insn
& (1 << 21)) {
5967 /* Base writeback. */
5970 gen_op_addl_T1_im(offset
- 4);
5971 gen_movl_reg_T1(s
, rn
);
5973 } else if ((insn
& (1 << 23)) == 0) {
5974 /* Load/store exclusive word. */
5975 gen_movl_T0_reg(s
, rd
);
5976 gen_movl_T1_reg(s
, rn
);
5977 if (insn
& (1 << 20)) {
5982 gen_movl_reg_T0(s
, rd
);
5983 } else if ((insn
& (1 << 6)) == 0) {
5986 gen_op_movl_T1_im(s
->pc
);
5988 gen_movl_T1_reg(s
, rn
);
5990 gen_movl_T2_reg(s
, rm
);
5991 gen_op_addl_T1_T2();
5992 if (insn
& (1 << 4)) {
5994 gen_op_addl_T1_T2();
5999 gen_op_jmp_T0_im(s
->pc
);
6000 s
->is_jmp
= DISAS_JUMP
;
6002 /* Load/store exclusive byte/halfword/doubleword. */
6003 op
= (insn
>> 4) & 0x3;
6004 gen_movl_T1_reg(s
, rn
);
6005 if (insn
& (1 << 20)) {
6015 gen_movl_reg_T1(s
, rd
);
6020 gen_movl_reg_T0(s
, rs
);
6022 gen_movl_T0_reg(s
, rs
);
6031 gen_movl_T2_reg(s
, rd
);
6037 gen_movl_reg_T0(s
, rm
);
6041 /* Load/store multiple, RFE, SRS. */
6042 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
6043 /* Not available in user mode. */
6046 if (insn
& (1 << 20)) {
6048 gen_movl_T1_reg(s
, rn
);
6049 if (insn
& (1 << 24)) {
6050 gen_op_addl_T1_im(4);
6052 gen_op_addl_T1_im(-4);
6054 /* Load CPSR into T2 and PC into T0. */
6056 gen_op_movl_T2_T0();
6057 gen_op_addl_T1_im(-4);
6059 if (insn
& (1 << 21)) {
6060 /* Base writeback. */
6061 if (insn
& (1 << 24))
6062 gen_op_addl_T1_im(8);
6063 gen_movl_reg_T1(s
, rn
);
6069 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
6070 gen_movl_T1_reg(s
, 13);
6072 gen_op_movl_T1_r13_banked(op
);
6074 if ((insn
& (1 << 24)) == 0) {
6075 gen_op_addl_T1_im(-8);
6077 gen_movl_T0_reg(s
, 14);
6079 gen_op_movl_T0_cpsr();
6080 gen_op_addl_T1_im(4);
6082 if (insn
& (1 << 21)) {
6083 if ((insn
& (1 << 24)) == 0) {
6084 gen_op_addl_T1_im(-4);
6086 gen_op_addl_T1_im(4);
6088 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
6089 gen_movl_reg_T1(s
, 13);
6091 gen_op_movl_r13_T1_banked(op
);
6097 /* Load/store multiple. */
6098 gen_movl_T1_reg(s
, rn
);
6100 for (i
= 0; i
< 16; i
++) {
6101 if (insn
& (1 << i
))
6104 if (insn
& (1 << 24)) {
6105 gen_op_addl_T1_im(-offset
);
6108 for (i
= 0; i
< 16; i
++) {
6109 if ((insn
& (1 << i
)) == 0)
6111 if (insn
& (1 << 20)) {
6117 gen_movl_reg_T0(s
, i
);
6121 gen_movl_T0_reg(s
, i
);
6124 gen_op_addl_T1_im(4);
6126 if (insn
& (1 << 21)) {
6127 /* Base register writeback. */
6128 if (insn
& (1 << 24)) {
6129 gen_op_addl_T1_im(-offset
);
6131 /* Fault if writeback register is in register list. */
6132 if (insn
& (1 << rn
))
6134 gen_movl_reg_T1(s
, rn
);
6139 case 5: /* Data processing register constant shift. */
6141 gen_op_movl_T0_im(0);
6143 gen_movl_T0_reg(s
, rn
);
6144 gen_movl_T1_reg(s
, rm
);
6145 op
= (insn
>> 21) & 0xf;
6146 shiftop
= (insn
>> 4) & 3;
6147 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
6148 conds
= (insn
& (1 << 20)) != 0;
6149 logic_cc
= (conds
&& thumb2_logic_op(op
));
6152 gen_shift_T1_im_cc
[shiftop
](shift
);
6154 gen_shift_T1_im
[shiftop
](shift
);
6156 } else if (shiftop
!= 0) {
6158 gen_shift_T1_0_cc
[shiftop
]();
6160 gen_shift_T1_0
[shiftop
]();
6163 if (gen_thumb2_data_op(s
, op
, conds
, 0))
6166 gen_movl_reg_T0(s
, rd
);
6168 case 13: /* Misc data processing. */
6169 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
6170 if (op
< 4 && (insn
& 0xf000) != 0xf000)
6173 case 0: /* Register controlled shift. */
6174 gen_movl_T0_reg(s
, rm
);
6175 gen_movl_T1_reg(s
, rn
);
6176 if ((insn
& 0x70) != 0)
6178 op
= (insn
>> 21) & 3;
6179 if (insn
& (1 << 20)) {
6180 gen_shift_T1_T0_cc
[op
]();
6181 gen_op_logic_T1_cc();
6183 gen_shift_T1_T0
[op
]();
6185 gen_movl_reg_T1(s
, rd
);
6187 case 1: /* Sign/zero extend. */
6188 gen_movl_T1_reg(s
, rm
);
6189 shift
= (insn
>> 4) & 3;
6190 /* ??? In many cases it's not neccessary to do a
6191 rotate, a shift is sufficient. */
6193 gen_op_rorl_T1_im(shift
* 8);
6194 op
= (insn
>> 20) & 7;
6196 case 0: gen_op_sxth_T1(); break;
6197 case 1: gen_op_uxth_T1(); break;
6198 case 2: gen_op_sxtb16_T1(); break;
6199 case 3: gen_op_uxtb16_T1(); break;
6200 case 4: gen_op_sxtb_T1(); break;
6201 case 5: gen_op_uxtb_T1(); break;
6202 default: goto illegal_op
;
6205 gen_movl_T2_reg(s
, rn
);
6206 if ((op
>> 1) == 1) {
6207 gen_op_add16_T1_T2();
6209 gen_op_addl_T1_T2();
6212 gen_movl_reg_T1(s
, rd
);
6214 case 2: /* SIMD add/subtract. */
6215 op
= (insn
>> 20) & 7;
6216 shift
= (insn
>> 4) & 7;
6217 if ((op
& 3) == 3 || (shift
& 3) == 3)
6219 gen_movl_T0_reg(s
, rn
);
6220 gen_movl_T1_reg(s
, rm
);
6221 gen_thumb2_parallel_addsub
[op
][shift
]();
6222 gen_movl_reg_T0(s
, rd
);
6224 case 3: /* Other data processing. */
6225 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
6227 /* Saturating add/subtract. */
6228 gen_movl_T0_reg(s
, rm
);
6229 gen_movl_T1_reg(s
, rn
);
6231 gen_op_double_T1_saturate();
6233 gen_op_subl_T0_T1_saturate();
6235 gen_op_addl_T0_T1_saturate();
6237 gen_movl_T0_reg(s
, rn
);
6239 case 0x0a: /* rbit */
6242 case 0x08: /* rev */
6245 case 0x09: /* rev16 */
6248 case 0x0b: /* revsh */
6251 case 0x10: /* sel */
6252 gen_movl_T1_reg(s
, rm
);
6255 case 0x18: /* clz */
6262 gen_movl_reg_T0(s
, rd
);
6264 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
6265 op
= (insn
>> 4) & 0xf;
6266 gen_movl_T0_reg(s
, rn
);
6267 gen_movl_T1_reg(s
, rm
);
6268 switch ((insn
>> 20) & 7) {
6269 case 0: /* 32 x 32 -> 32 */
6272 gen_movl_T1_reg(s
, rs
);
6274 gen_op_rsbl_T0_T1();
6276 gen_op_addl_T0_T1();
6278 gen_movl_reg_T0(s
, rd
);
6280 case 1: /* 16 x 16 -> 32 */
6281 gen_mulxy(op
& 2, op
& 1);
6283 gen_movl_T1_reg(s
, rs
);
6284 gen_op_addl_T0_T1_setq();
6286 gen_movl_reg_T0(s
, rd
);
6288 case 2: /* Dual multiply add. */
6289 case 4: /* Dual multiply subtract. */
6291 gen_op_swap_half_T1();
6292 gen_op_mul_dual_T0_T1();
6293 /* This addition cannot overflow. */
6294 if (insn
& (1 << 22)) {
6295 gen_op_subl_T0_T1();
6297 gen_op_addl_T0_T1();
6301 gen_movl_T1_reg(s
, rs
);
6302 gen_op_addl_T0_T1_setq();
6304 gen_movl_reg_T0(s
, rd
);
6306 case 3: /* 32 * 16 -> 32msb */
6308 gen_op_sarl_T1_im(16);
6311 gen_op_imulw_T0_T1();
6314 gen_movl_T1_reg(s
, rs
);
6315 gen_op_addl_T0_T1_setq();
6317 gen_movl_reg_T0(s
, rd
);
6319 case 5: case 6: /* 32 * 32 -> 32msb */
6320 gen_op_imull_T0_T1();
6321 if (insn
& (1 << 5))
6322 gen_op_roundqd_T0_T1();
6324 gen_op_movl_T0_T1();
6326 gen_movl_T1_reg(s
, rs
);
6327 if (insn
& (1 << 21)) {
6328 gen_op_addl_T0_T1();
6330 gen_op_rsbl_T0_T1();
6333 gen_movl_reg_T0(s
, rd
);
6335 case 7: /* Unsigned sum of absolute differences. */
6336 gen_op_usad8_T0_T1();
6338 gen_movl_T1_reg(s
, rs
);
6339 gen_op_addl_T0_T1();
6341 gen_movl_reg_T0(s
, rd
);
6345 case 6: case 7: /* 64-bit multiply, Divide. */
6346 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
6347 gen_movl_T0_reg(s
, rn
);
6348 gen_movl_T1_reg(s
, rm
);
6349 if ((op
& 0x50) == 0x10) {
6351 if (!arm_feature(env
, ARM_FEATURE_DIV
))
6354 gen_op_udivl_T0_T1();
6356 gen_op_sdivl_T0_T1();
6357 gen_movl_reg_T0(s
, rd
);
6358 } else if ((op
& 0xe) == 0xc) {
6359 /* Dual multiply accumulate long. */
6361 gen_op_swap_half_T1();
6362 gen_op_mul_dual_T0_T1();
6364 gen_op_subl_T0_T1();
6366 gen_op_addl_T0_T1();
6368 gen_op_signbit_T1_T0();
6369 gen_op_addq_T0_T1(rs
, rd
);
6370 gen_movl_reg_T0(s
, rs
);
6371 gen_movl_reg_T1(s
, rd
);
6374 /* Unsigned 64-bit multiply */
6375 gen_op_mull_T0_T1();
6379 gen_mulxy(op
& 2, op
& 1);
6380 gen_op_signbit_T1_T0();
6382 /* Signed 64-bit multiply */
6383 gen_op_imull_T0_T1();
6388 gen_op_addq_lo_T0_T1(rs
);
6389 gen_op_addq_lo_T0_T1(rd
);
6390 } else if (op
& 0x40) {
6391 /* 64-bit accumulate. */
6392 gen_op_addq_T0_T1(rs
, rd
);
6394 gen_movl_reg_T0(s
, rs
);
6395 gen_movl_reg_T1(s
, rd
);
6400 case 6: case 7: case 14: case 15:
6402 if (((insn
>> 24) & 3) == 3) {
6403 /* Translate into the equivalent ARM encoding. */
6404 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4);
6405 if (disas_neon_data_insn(env
, s
, insn
))
6408 if (insn
& (1 << 28))
6410 if (disas_coproc_insn (env
, s
, insn
))
6414 case 8: case 9: case 10: case 11:
6415 if (insn
& (1 << 15)) {
6416 /* Branches, misc control. */
6417 if (insn
& 0x5000) {
6418 /* Unconditional branch. */
6419 /* signextend(hw1[10:0]) -> offset[:12]. */
6420 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
6421 /* hw1[10:0] -> offset[11:1]. */
6422 offset
|= (insn
& 0x7ff) << 1;
6423 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
6424 offset[24:22] already have the same value because of the
6425 sign extension above. */
6426 offset
^= ((~insn
) & (1 << 13)) << 10;
6427 offset
^= ((~insn
) & (1 << 11)) << 11;
6430 if (insn
& (1 << 14)) {
6431 /* Branch and link. */
6432 gen_op_movl_T1_im(addr
| 1);
6433 gen_movl_reg_T1(s
, 14);
6437 if (insn
& (1 << 12)) {
6442 addr
&= ~(uint32_t)2;
6443 gen_op_movl_T0_im(addr
);
6446 } else if (((insn
>> 23) & 7) == 7) {
6448 if (insn
& (1 << 13))
6451 if (insn
& (1 << 26)) {
6452 /* Secure monitor call (v6Z) */
6453 goto illegal_op
; /* not implemented. */
6455 op
= (insn
>> 20) & 7;
6457 case 0: /* msr cpsr. */
6459 gen_op_v7m_msr_T0(insn
& 0xff);
6460 gen_movl_reg_T0(s
, rn
);
6465 case 1: /* msr spsr. */
6468 gen_movl_T0_reg(s
, rn
);
6469 if (gen_set_psr_T0(s
,
6470 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
6474 case 2: /* cps, nop-hint. */
6475 if (((insn
>> 8) & 7) == 0) {
6476 gen_nop_hint(s
, insn
& 0xff);
6478 /* Implemented as NOP in user mode. */
6483 if (insn
& (1 << 10)) {
6484 if (insn
& (1 << 7))
6486 if (insn
& (1 << 6))
6488 if (insn
& (1 << 5))
6490 if (insn
& (1 << 9))
6491 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
6493 if (insn
& (1 << 8)) {
6495 imm
|= (insn
& 0x1f);
6498 gen_op_movl_T0_im(imm
);
6499 gen_set_psr_T0(s
, offset
, 0);
6502 case 3: /* Special control operations. */
6503 op
= (insn
>> 4) & 0xf;
6511 /* These execute as NOPs. */
6519 /* Trivial implementation equivalent to bx. */
6520 gen_movl_T0_reg(s
, rn
);
6523 case 5: /* Exception return. */
6524 /* Unpredictable in user mode. */
6526 case 6: /* mrs cpsr. */
6528 gen_op_v7m_mrs_T0(insn
& 0xff);
6530 gen_op_movl_T0_cpsr();
6532 gen_movl_reg_T0(s
, rd
);
6534 case 7: /* mrs spsr. */
6535 /* Not accessible in user mode. */
6536 if (IS_USER(s
) || IS_M(env
))
6538 gen_op_movl_T0_spsr();
6539 gen_movl_reg_T0(s
, rd
);
6544 /* Conditional branch. */
6545 op
= (insn
>> 22) & 0xf;
6546 /* Generate a conditional jump to next instruction. */
6547 s
->condlabel
= gen_new_label();
6548 gen_test_cc
[op
^ 1](s
->condlabel
);
6551 /* offset[11:1] = insn[10:0] */
6552 offset
= (insn
& 0x7ff) << 1;
6553 /* offset[17:12] = insn[21:16]. */
6554 offset
|= (insn
& 0x003f0000) >> 4;
6555 /* offset[31:20] = insn[26]. */
6556 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
6557 /* offset[18] = insn[13]. */
6558 offset
|= (insn
& (1 << 13)) << 5;
6559 /* offset[19] = insn[11]. */
6560 offset
|= (insn
& (1 << 11)) << 8;
6562 /* jump to the offset */
6563 addr
= s
->pc
+ offset
;
6567 /* Data processing immediate. */
6568 if (insn
& (1 << 25)) {
6569 if (insn
& (1 << 24)) {
6570 if (insn
& (1 << 20))
6572 /* Bitfield/Saturate. */
6573 op
= (insn
>> 21) & 7;
6575 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
6577 gen_op_movl_T1_im(0);
6579 gen_movl_T1_reg(s
, rn
);
6581 case 2: /* Signed bitfield extract. */
6583 if (shift
+ imm
> 32)
6586 gen_op_sbfx_T1(shift
, imm
);
6588 case 6: /* Unsigned bitfield extract. */
6590 if (shift
+ imm
> 32)
6593 gen_op_ubfx_T1(shift
, (1u << imm
) - 1);
6595 case 3: /* Bitfield insert/clear. */
6598 imm
= imm
+ 1 - shift
;
6600 gen_movl_T0_reg(s
, rd
);
6601 gen_op_bfi_T1_T0(shift
, ((1u << imm
) - 1) << shift
);
6606 default: /* Saturate. */
6607 gen_movl_T1_reg(s
, rn
);
6610 gen_op_sarl_T1_im(shift
);
6612 gen_op_shll_T1_im(shift
);
6616 gen_op_ssat_T1(imm
);
6617 if ((op
& 1) && shift
== 0)
6618 gen_op_usat16_T1(imm
);
6620 gen_op_usat_T1(imm
);
6623 gen_op_ssat_T1(imm
);
6624 if ((op
& 1) && shift
== 0)
6625 gen_op_ssat16_T1(imm
);
6627 gen_op_ssat_T1(imm
);
6631 gen_movl_reg_T1(s
, rd
);
6633 imm
= ((insn
& 0x04000000) >> 15)
6634 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
6635 if (insn
& (1 << 22)) {
6636 /* 16-bit immediate. */
6637 imm
|= (insn
>> 4) & 0xf000;
6638 if (insn
& (1 << 23)) {
6640 gen_movl_T0_reg(s
, rd
);
6641 gen_op_movtop_T0_im(imm
<< 16);
6644 gen_op_movl_T0_im(imm
);
6647 /* Add/sub 12-bit immediate. */
6649 addr
= s
->pc
& ~(uint32_t)3;
6650 if (insn
& (1 << 23))
6654 gen_op_movl_T0_im(addr
);
6656 gen_movl_T0_reg(s
, rn
);
6657 gen_op_movl_T1_im(imm
);
6658 if (insn
& (1 << 23))
6659 gen_op_subl_T0_T1();
6661 gen_op_addl_T0_T1();
6664 gen_movl_reg_T0(s
, rd
);
6667 int shifter_out
= 0;
6668 /* modified 12-bit immediate. */
6669 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
6670 imm
= (insn
& 0xff);
6673 /* Nothing to do. */
6675 case 1: /* 00XY00XY */
6678 case 2: /* XY00XY00 */
6682 case 3: /* XYXYXYXY */
6686 default: /* Rotated constant. */
6687 shift
= (shift
<< 1) | (imm
>> 7);
6689 imm
= imm
<< (32 - shift
);
6693 gen_op_movl_T1_im(imm
);
6694 rn
= (insn
>> 16) & 0xf;
6696 gen_op_movl_T0_im(0);
6698 gen_movl_T0_reg(s
, rn
);
6699 op
= (insn
>> 21) & 0xf;
6700 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
6703 rd
= (insn
>> 8) & 0xf;
6705 gen_movl_reg_T0(s
, rd
);
6710 case 12: /* Load/store single data item. */
6714 if ((insn
& 0x01100000) == 0x01000000) {
6715 if (disas_neon_ls_insn(env
, s
, insn
))
6721 /* s->pc has already been incremented by 4. */
6722 imm
= s
->pc
& 0xfffffffc;
6723 if (insn
& (1 << 23))
6724 imm
+= insn
& 0xfff;
6726 imm
-= insn
& 0xfff;
6727 gen_op_movl_T1_im(imm
);
6729 gen_movl_T1_reg(s
, rn
);
6730 if (insn
& (1 << 23)) {
6731 /* Positive offset. */
6733 gen_op_addl_T1_im(imm
);
6735 op
= (insn
>> 8) & 7;
6738 case 0: case 8: /* Shifted Register. */
6739 shift
= (insn
>> 4) & 0xf;
6742 gen_movl_T2_reg(s
, rm
);
6744 gen_op_shll_T2_im(shift
);
6745 gen_op_addl_T1_T2();
6747 case 4: /* Negative offset. */
6748 gen_op_addl_T1_im(-imm
);
6750 case 6: /* User privilege. */
6751 gen_op_addl_T1_im(imm
);
6753 case 1: /* Post-decrement. */
6756 case 3: /* Post-increment. */
6757 gen_op_movl_T2_im(imm
);
6761 case 5: /* Pre-decrement. */
6764 case 7: /* Pre-increment. */
6765 gen_op_addl_T1_im(imm
);
6773 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
6774 if (insn
& (1 << 20)) {
6776 if (rs
== 15 && op
!= 2) {
6779 /* Memory hint. Implemented as NOP. */
6782 case 0: gen_ldst(ldub
, s
); break;
6783 case 4: gen_ldst(ldsb
, s
); break;
6784 case 1: gen_ldst(lduw
, s
); break;
6785 case 5: gen_ldst(ldsw
, s
); break;
6786 case 2: gen_ldst(ldl
, s
); break;
6787 default: goto illegal_op
;
6792 gen_movl_reg_T0(s
, rs
);
6799 gen_movl_T0_reg(s
, rs
);
6801 case 0: gen_ldst(stb
, s
); break;
6802 case 1: gen_ldst(stw
, s
); break;
6803 case 2: gen_ldst(stl
, s
); break;
6804 default: goto illegal_op
;
6808 gen_op_addl_T1_im(imm
);
6810 gen_movl_reg_T1(s
, rn
);
6821 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
6823 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
6827 if (s
->condexec_mask
) {
6828 cond
= s
->condexec_cond
;
6829 s
->condlabel
= gen_new_label();
6830 gen_test_cc
[cond
^ 1](s
->condlabel
);
6834 insn
= lduw_code(s
->pc
);
6837 switch (insn
>> 12) {
6840 op
= (insn
>> 11) & 3;
6843 rn
= (insn
>> 3) & 7;
6844 gen_movl_T0_reg(s
, rn
);
6845 if (insn
& (1 << 10)) {
6847 gen_op_movl_T1_im((insn
>> 6) & 7);
6850 rm
= (insn
>> 6) & 7;
6851 gen_movl_T1_reg(s
, rm
);
6853 if (insn
& (1 << 9)) {
6854 if (s
->condexec_mask
)
6855 gen_op_subl_T0_T1();
6857 gen_op_subl_T0_T1_cc();
6859 if (s
->condexec_mask
)
6860 gen_op_addl_T0_T1();
6862 gen_op_addl_T0_T1_cc();
6864 gen_movl_reg_T0(s
, rd
);
6866 /* shift immediate */
6867 rm
= (insn
>> 3) & 7;
6868 shift
= (insn
>> 6) & 0x1f;
6869 gen_movl_T0_reg(s
, rm
);
6870 if (s
->condexec_mask
)
6871 gen_shift_T0_im_thumb
[op
](shift
);
6873 gen_shift_T0_im_thumb_cc
[op
](shift
);
6874 gen_movl_reg_T0(s
, rd
);
6878 /* arithmetic large immediate */
6879 op
= (insn
>> 11) & 3;
6880 rd
= (insn
>> 8) & 0x7;
6882 gen_op_movl_T0_im(insn
& 0xff);
6884 gen_movl_T0_reg(s
, rd
);
6885 gen_op_movl_T1_im(insn
& 0xff);
6889 if (!s
->condexec_mask
)
6890 gen_op_logic_T0_cc();
6893 gen_op_subl_T0_T1_cc();
6896 if (s
->condexec_mask
)
6897 gen_op_addl_T0_T1();
6899 gen_op_addl_T0_T1_cc();
6902 if (s
->condexec_mask
)
6903 gen_op_subl_T0_T1();
6905 gen_op_subl_T0_T1_cc();
6909 gen_movl_reg_T0(s
, rd
);
6912 if (insn
& (1 << 11)) {
6913 rd
= (insn
>> 8) & 7;
6914 /* load pc-relative. Bit 1 of PC is ignored. */
6915 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
6916 val
&= ~(uint32_t)2;
6917 gen_op_movl_T1_im(val
);
6919 gen_movl_reg_T0(s
, rd
);
6922 if (insn
& (1 << 10)) {
6923 /* data processing extended or blx */
6924 rd
= (insn
& 7) | ((insn
>> 4) & 8);
6925 rm
= (insn
>> 3) & 0xf;
6926 op
= (insn
>> 8) & 3;
6929 gen_movl_T0_reg(s
, rd
);
6930 gen_movl_T1_reg(s
, rm
);
6931 gen_op_addl_T0_T1();
6932 gen_movl_reg_T0(s
, rd
);
6935 gen_movl_T0_reg(s
, rd
);
6936 gen_movl_T1_reg(s
, rm
);
6937 gen_op_subl_T0_T1_cc();
6939 case 2: /* mov/cpy */
6940 gen_movl_T0_reg(s
, rm
);
6941 gen_movl_reg_T0(s
, rd
);
6943 case 3:/* branch [and link] exchange thumb register */
6944 if (insn
& (1 << 7)) {
6945 val
= (uint32_t)s
->pc
| 1;
6946 gen_op_movl_T1_im(val
);
6947 gen_movl_reg_T1(s
, 14);
6949 gen_movl_T0_reg(s
, rm
);
6956 /* data processing register */
6958 rm
= (insn
>> 3) & 7;
6959 op
= (insn
>> 6) & 0xf;
6960 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
6961 /* the shift/rotate ops want the operands backwards */
6970 if (op
== 9) /* neg */
6971 gen_op_movl_T0_im(0);
6972 else if (op
!= 0xf) /* mvn doesn't read its first operand */
6973 gen_movl_T0_reg(s
, rd
);
6975 gen_movl_T1_reg(s
, rm
);
6978 gen_op_andl_T0_T1();
6979 if (!s
->condexec_mask
)
6980 gen_op_logic_T0_cc();
6983 gen_op_xorl_T0_T1();
6984 if (!s
->condexec_mask
)
6985 gen_op_logic_T0_cc();
6988 if (s
->condexec_mask
) {
6989 gen_op_shll_T1_T0();
6991 gen_op_shll_T1_T0_cc();
6992 gen_op_logic_T1_cc();
6996 if (s
->condexec_mask
) {
6997 gen_op_shrl_T1_T0();
6999 gen_op_shrl_T1_T0_cc();
7000 gen_op_logic_T1_cc();
7004 if (s
->condexec_mask
) {
7005 gen_op_sarl_T1_T0();
7007 gen_op_sarl_T1_T0_cc();
7008 gen_op_logic_T1_cc();
7012 if (s
->condexec_mask
)
7013 gen_op_adcl_T0_T1();
7015 gen_op_adcl_T0_T1_cc();
7018 if (s
->condexec_mask
)
7019 gen_op_sbcl_T0_T1();
7021 gen_op_sbcl_T0_T1_cc();
7024 if (s
->condexec_mask
) {
7025 gen_op_rorl_T1_T0();
7027 gen_op_rorl_T1_T0_cc();
7028 gen_op_logic_T1_cc();
7032 gen_op_andl_T0_T1();
7033 gen_op_logic_T0_cc();
7037 if (s
->condexec_mask
)
7038 gen_op_subl_T0_T1();
7040 gen_op_subl_T0_T1_cc();
7043 gen_op_subl_T0_T1_cc();
7047 gen_op_addl_T0_T1_cc();
7052 if (!s
->condexec_mask
)
7053 gen_op_logic_T0_cc();
7056 gen_op_mull_T0_T1();
7057 if (!s
->condexec_mask
)
7058 gen_op_logic_T0_cc();
7061 gen_op_bicl_T0_T1();
7062 if (!s
->condexec_mask
)
7063 gen_op_logic_T0_cc();
7067 if (!s
->condexec_mask
)
7068 gen_op_logic_T1_cc();
7075 gen_movl_reg_T1(s
, rm
);
7077 gen_movl_reg_T0(s
, rd
);
7082 /* load/store register offset. */
7084 rn
= (insn
>> 3) & 7;
7085 rm
= (insn
>> 6) & 7;
7086 op
= (insn
>> 9) & 7;
7087 gen_movl_T1_reg(s
, rn
);
7088 gen_movl_T2_reg(s
, rm
);
7089 gen_op_addl_T1_T2();
7091 if (op
< 3) /* store */
7092 gen_movl_T0_reg(s
, rd
);
7120 if (op
>= 3) /* load */
7121 gen_movl_reg_T0(s
, rd
);
7125 /* load/store word immediate offset */
7127 rn
= (insn
>> 3) & 7;
7128 gen_movl_T1_reg(s
, rn
);
7129 val
= (insn
>> 4) & 0x7c;
7130 gen_op_movl_T2_im(val
);
7131 gen_op_addl_T1_T2();
7133 if (insn
& (1 << 11)) {
7136 gen_movl_reg_T0(s
, rd
);
7139 gen_movl_T0_reg(s
, rd
);
7145 /* load/store byte immediate offset */
7147 rn
= (insn
>> 3) & 7;
7148 gen_movl_T1_reg(s
, rn
);
7149 val
= (insn
>> 6) & 0x1f;
7150 gen_op_movl_T2_im(val
);
7151 gen_op_addl_T1_T2();
7153 if (insn
& (1 << 11)) {
7156 gen_movl_reg_T0(s
, rd
);
7159 gen_movl_T0_reg(s
, rd
);
7165 /* load/store halfword immediate offset */
7167 rn
= (insn
>> 3) & 7;
7168 gen_movl_T1_reg(s
, rn
);
7169 val
= (insn
>> 5) & 0x3e;
7170 gen_op_movl_T2_im(val
);
7171 gen_op_addl_T1_T2();
7173 if (insn
& (1 << 11)) {
7176 gen_movl_reg_T0(s
, rd
);
7179 gen_movl_T0_reg(s
, rd
);
7185 /* load/store from stack */
7186 rd
= (insn
>> 8) & 7;
7187 gen_movl_T1_reg(s
, 13);
7188 val
= (insn
& 0xff) * 4;
7189 gen_op_movl_T2_im(val
);
7190 gen_op_addl_T1_T2();
7192 if (insn
& (1 << 11)) {
7195 gen_movl_reg_T0(s
, rd
);
7198 gen_movl_T0_reg(s
, rd
);
7204 /* add to high reg */
7205 rd
= (insn
>> 8) & 7;
7206 if (insn
& (1 << 11)) {
7208 gen_movl_T0_reg(s
, 13);
7210 /* PC. bit 1 is ignored. */
7211 gen_op_movl_T0_im((s
->pc
+ 2) & ~(uint32_t)2);
7213 val
= (insn
& 0xff) * 4;
7214 gen_op_movl_T1_im(val
);
7215 gen_op_addl_T0_T1();
7216 gen_movl_reg_T0(s
, rd
);
7221 op
= (insn
>> 8) & 0xf;
7224 /* adjust stack pointer */
7225 gen_movl_T1_reg(s
, 13);
7226 val
= (insn
& 0x7f) * 4;
7227 if (insn
& (1 << 7))
7228 val
= -(int32_t)val
;
7229 gen_op_movl_T2_im(val
);
7230 gen_op_addl_T1_T2();
7231 gen_movl_reg_T1(s
, 13);
7234 case 2: /* sign/zero extend. */
7237 rm
= (insn
>> 3) & 7;
7238 gen_movl_T1_reg(s
, rm
);
7239 switch ((insn
>> 6) & 3) {
7240 case 0: gen_op_sxth_T1(); break;
7241 case 1: gen_op_sxtb_T1(); break;
7242 case 2: gen_op_uxth_T1(); break;
7243 case 3: gen_op_uxtb_T1(); break;
7245 gen_movl_reg_T1(s
, rd
);
7247 case 4: case 5: case 0xc: case 0xd:
7249 gen_movl_T1_reg(s
, 13);
7250 if (insn
& (1 << 8))
7254 for (i
= 0; i
< 8; i
++) {
7255 if (insn
& (1 << i
))
7258 if ((insn
& (1 << 11)) == 0) {
7259 gen_op_movl_T2_im(-offset
);
7260 gen_op_addl_T1_T2();
7262 gen_op_movl_T2_im(4);
7263 for (i
= 0; i
< 8; i
++) {
7264 if (insn
& (1 << i
)) {
7265 if (insn
& (1 << 11)) {
7268 gen_movl_reg_T0(s
, i
);
7271 gen_movl_T0_reg(s
, i
);
7274 /* advance to the next address. */
7275 gen_op_addl_T1_T2();
7278 if (insn
& (1 << 8)) {
7279 if (insn
& (1 << 11)) {
7282 /* don't set the pc until the rest of the instruction
7286 gen_movl_T0_reg(s
, 14);
7289 gen_op_addl_T1_T2();
7291 if ((insn
& (1 << 11)) == 0) {
7292 gen_op_movl_T2_im(-offset
);
7293 gen_op_addl_T1_T2();
7295 /* write back the new stack pointer */
7296 gen_movl_reg_T1(s
, 13);
7297 /* set the new PC value */
7298 if ((insn
& 0x0900) == 0x0900)
7302 case 1: case 3: case 9: case 11: /* czb */
7304 gen_movl_T0_reg(s
, rm
);
7305 s
->condlabel
= gen_new_label();
7307 if (insn
& (1 << 11))
7308 gen_op_testn_T0(s
->condlabel
);
7310 gen_op_test_T0(s
->condlabel
);
7312 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
7313 val
= (uint32_t)s
->pc
+ 2;
7318 case 15: /* IT, nop-hint. */
7319 if ((insn
& 0xf) == 0) {
7320 gen_nop_hint(s
, (insn
>> 4) & 0xf);
7324 s
->condexec_cond
= (insn
>> 4) & 0xe;
7325 s
->condexec_mask
= insn
& 0x1f;
7326 /* No actual code generated for this insn, just setup state. */
7329 case 0xe: /* bkpt */
7330 gen_set_condexec(s
);
7331 gen_op_movl_T0_im((long)s
->pc
- 2);
7332 gen_op_movl_reg_TN
[0][15]();
7334 s
->is_jmp
= DISAS_JUMP
;
7339 rn
= (insn
>> 3) & 0x7;
7341 gen_movl_T0_reg(s
, rn
);
7342 switch ((insn
>> 6) & 3) {
7343 case 0: gen_op_rev_T0(); break;
7344 case 1: gen_op_rev16_T0(); break;
7345 case 3: gen_op_revsh_T0(); break;
7346 default: goto illegal_op
;
7348 gen_movl_reg_T0(s
, rd
);
7356 val
= (insn
& (1 << 4)) != 0;
7357 gen_op_movl_T0_im(val
);
7360 gen_op_v7m_msr_T0(16);
7363 gen_op_v7m_msr_T0(17);
7367 if (insn
& (1 << 4))
7368 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
7372 val
= ((insn
& 7) << 6) & shift
;
7373 gen_op_movl_T0_im(val
);
7374 gen_set_psr_T0(s
, shift
, 0);
7384 /* load/store multiple */
7385 rn
= (insn
>> 8) & 0x7;
7386 gen_movl_T1_reg(s
, rn
);
7387 gen_op_movl_T2_im(4);
7388 for (i
= 0; i
< 8; i
++) {
7389 if (insn
& (1 << i
)) {
7390 if (insn
& (1 << 11)) {
7393 gen_movl_reg_T0(s
, i
);
7396 gen_movl_T0_reg(s
, i
);
7399 /* advance to the next address */
7400 gen_op_addl_T1_T2();
7403 /* Base register writeback. */
7404 if ((insn
& (1 << rn
)) == 0)
7405 gen_movl_reg_T1(s
, rn
);
7409 /* conditional branch or swi */
7410 cond
= (insn
>> 8) & 0xf;
7416 gen_set_condexec(s
);
7417 gen_op_movl_T0_im((long)s
->pc
| 1);
7418 /* Don't set r15. */
7419 gen_op_movl_reg_TN
[0][15]();
7420 s
->is_jmp
= DISAS_SWI
;
7423 /* generate a conditional jump to next instruction */
7424 s
->condlabel
= gen_new_label();
7425 gen_test_cc
[cond
^ 1](s
->condlabel
);
7427 gen_movl_T1_reg(s
, 15);
7429 /* jump to the offset */
7430 val
= (uint32_t)s
->pc
+ 2;
7431 offset
= ((int32_t)insn
<< 24) >> 24;
7437 if (insn
& (1 << 11)) {
7438 if (disas_thumb2_insn(env
, s
, insn
))
7442 /* unconditional branch */
7443 val
= (uint32_t)s
->pc
;
7444 offset
= ((int32_t)insn
<< 21) >> 21;
7445 val
+= (offset
<< 1) + 2;
7450 if (disas_thumb2_insn(env
, s
, insn
))
7456 gen_set_condexec(s
);
7457 gen_op_movl_T0_im((long)s
->pc
- 4);
7458 gen_op_movl_reg_TN
[0][15]();
7459 gen_op_undef_insn();
7460 s
->is_jmp
= DISAS_JUMP
;
7464 gen_set_condexec(s
);
7465 gen_op_movl_T0_im((long)s
->pc
- 2);
7466 gen_op_movl_reg_TN
[0][15]();
7467 gen_op_undef_insn();
7468 s
->is_jmp
= DISAS_JUMP
;
7471 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7472 basic block 'tb'. If search_pc is TRUE, also generate PC
7473 information for each intermediate instruction. */
7474 static inline int gen_intermediate_code_internal(CPUState
*env
,
7475 TranslationBlock
*tb
,
7478 DisasContext dc1
, *dc
= &dc1
;
7479 uint16_t *gen_opc_end
;
7481 target_ulong pc_start
;
7482 uint32_t next_page_start
;
7484 /* generate intermediate code */
7489 gen_opc_ptr
= gen_opc_buf
;
7490 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
7491 gen_opparam_ptr
= gen_opparam_buf
;
7493 dc
->is_jmp
= DISAS_NEXT
;
7495 dc
->singlestep_enabled
= env
->singlestep_enabled
;
7497 dc
->thumb
= env
->thumb
;
7498 dc
->condexec_mask
= (env
->condexec_bits
& 0xf) << 1;
7499 dc
->condexec_cond
= env
->condexec_bits
>> 4;
7501 #if !defined(CONFIG_USER_ONLY)
7503 dc
->user
= ((env
->v7m
.exception
== 0) && (env
->v7m
.control
& 1));
7505 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
7508 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
7511 /* Reset the conditional execution bits immediately. This avoids
7512 complications trying to do it at the end of the block. */
7513 if (env
->condexec_bits
)
7514 gen_op_set_condexec(0);
7516 #ifndef CONFIG_USER_ONLY
7517 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
7518 /* We always get here via a jump, so know we are not in a
7519 conditional execution block. */
7520 gen_op_exception_exit();
7524 if (env
->nb_breakpoints
> 0) {
7525 for(j
= 0; j
< env
->nb_breakpoints
; j
++) {
7526 if (env
->breakpoints
[j
] == dc
->pc
) {
7527 gen_set_condexec(dc
);
7528 gen_op_movl_T0_im((long)dc
->pc
);
7529 gen_op_movl_reg_TN
[0][15]();
7531 dc
->is_jmp
= DISAS_JUMP
;
7532 /* Advance PC so that clearing the breakpoint will
7533 invalidate this TB. */
7535 goto done_generating
;
7541 j
= gen_opc_ptr
- gen_opc_buf
;
7545 gen_opc_instr_start
[lj
++] = 0;
7547 gen_opc_pc
[lj
] = dc
->pc
;
7548 gen_opc_instr_start
[lj
] = 1;
7552 disas_thumb_insn(env
, dc
);
7553 if (dc
->condexec_mask
) {
7554 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
7555 | ((dc
->condexec_mask
>> 4) & 1);
7556 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
7557 if (dc
->condexec_mask
== 0) {
7558 dc
->condexec_cond
= 0;
7562 disas_arm_insn(env
, dc
);
7565 if (dc
->condjmp
&& !dc
->is_jmp
) {
7566 gen_set_label(dc
->condlabel
);
7569 /* Terminate the TB on memory ops if watchpoints are present. */
7570 /* FIXME: This should be replacd by the deterministic execution
7571 * IRQ raising bits. */
7572 if (dc
->is_mem
&& env
->nb_watchpoints
)
7575 /* Translation stops when a conditional branch is enoutered.
7576 * Otherwise the subsequent code could get translated several times.
7577 * Also stop translation when a page boundary is reached. This
7578 * ensures prefech aborts occur at the right place. */
7579 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
7580 !env
->singlestep_enabled
&&
7581 dc
->pc
< next_page_start
);
7583 /* At this stage dc->condjmp will only be set when the skipped
7584 instruction was a conditional branch or trap, and the PC has
7585 already been written. */
7586 if (__builtin_expect(env
->singlestep_enabled
, 0)) {
7587 /* Make sure the pc is updated, and raise a debug exception. */
7589 gen_set_condexec(dc
);
7590 if (dc
->is_jmp
== DISAS_SWI
) {
7595 gen_set_label(dc
->condlabel
);
7597 if (dc
->condjmp
|| !dc
->is_jmp
) {
7598 gen_op_movl_T0_im((long)dc
->pc
);
7599 gen_op_movl_reg_TN
[0][15]();
7602 gen_set_condexec(dc
);
7603 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
7606 /* FIXME: Single stepping a WFI insn will not halt
7611 /* While branches must always occur at the end of an IT block,
7612 there are a few other things that can cause us to terminate
7613 the TB in the middel of an IT block:
7614 - Exception generating instructions (bkpt, swi, undefined).
7616 - Hardware watchpoints.
7617 Hardware breakpoints have already been handled and skip this code.
7619 gen_set_condexec(dc
);
7620 switch(dc
->is_jmp
) {
7622 gen_goto_tb(dc
, 1, dc
->pc
);
7627 /* indicate that the hash table must be used to find the next TB */
7632 /* nothing more to generate */
7642 gen_set_label(dc
->condlabel
);
7643 gen_set_condexec(dc
);
7644 gen_goto_tb(dc
, 1, dc
->pc
);
7649 *gen_opc_ptr
= INDEX_op_end
;
7652 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
7653 fprintf(logfile
, "----------------\n");
7654 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc_start
));
7655 target_disas(logfile
, pc_start
, dc
->pc
- pc_start
, env
->thumb
);
7656 fprintf(logfile
, "\n");
7657 if (loglevel
& (CPU_LOG_TB_OP
)) {
7658 fprintf(logfile
, "OP:\n");
7659 dump_ops(gen_opc_buf
, gen_opparam_buf
);
7660 fprintf(logfile
, "\n");
7665 j
= gen_opc_ptr
- gen_opc_buf
;
7668 gen_opc_instr_start
[lj
++] = 0;
7670 tb
->size
= dc
->pc
- pc_start
;
7675 int gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
7677 return gen_intermediate_code_internal(env
, tb
, 0);
7680 int gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
7682 return gen_intermediate_code_internal(env
, tb
, 1);
7685 static const char *cpu_mode_names
[16] = {
7686 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
7687 "???", "???", "???", "und", "???", "???", "???", "sys"
7690 void cpu_dump_state(CPUState
*env
, FILE *f
,
7691 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
7700 /* ??? This assumes float64 and double have the same layout.
7701 Oh well, it's only debug dumps. */
7709 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
7711 cpu_fprintf(f
, "\n");
7713 cpu_fprintf(f
, " ");
7715 psr
= cpsr_read(env
);
7716 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
7718 psr
& (1 << 31) ? 'N' : '-',
7719 psr
& (1 << 30) ? 'Z' : '-',
7720 psr
& (1 << 29) ? 'C' : '-',
7721 psr
& (1 << 28) ? 'V' : '-',
7722 psr
& CPSR_T
? 'T' : 'A',
7723 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
7725 for (i
= 0; i
< 16; i
++) {
7726 d
.d
= env
->vfp
.regs
[i
];
7730 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
7731 i
* 2, (int)s0
.i
, s0
.s
,
7732 i
* 2 + 1, (int)s1
.i
, s1
.s
,
7733 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
7736 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);