4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 #define ENABLE_ARCH_5J 0
33 #define ENABLE_ARCH_6 1
34 #define ENABLE_ARCH_6T2 1
36 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
38 /* internal defines */
39 typedef struct DisasContext
{
42 /* Nonzero if this instruction has been conditionally skipped. */
44 /* The label that will be jumped to when the instruction is skipped. */
46 struct TranslationBlock
*tb
;
47 int singlestep_enabled
;
50 #if !defined(CONFIG_USER_ONLY)
55 #if defined(CONFIG_USER_ONLY)
58 #define IS_USER(s) (s->user)
61 #define DISAS_JUMP_NEXT 4
63 #ifdef USE_DIRECT_JUMP
66 #define TBPARAM(x) (long)(x)
69 /* XXX: move that elsewhere */
70 static uint16_t *gen_opc_ptr
;
71 static uint32_t *gen_opparam_ptr
;
76 #define DEF(s, n, copy_size) INDEX_op_ ## s,
84 static GenOpFunc1
*gen_test_cc
[14] = {
101 const uint8_t table_logic_cc
[16] = {
120 static GenOpFunc1
*gen_shift_T1_im
[4] = {
127 static GenOpFunc
*gen_shift_T1_0
[4] = {
134 static GenOpFunc1
*gen_shift_T2_im
[4] = {
141 static GenOpFunc
*gen_shift_T2_0
[4] = {
148 static GenOpFunc1
*gen_shift_T1_im_cc
[4] = {
149 gen_op_shll_T1_im_cc
,
150 gen_op_shrl_T1_im_cc
,
151 gen_op_sarl_T1_im_cc
,
152 gen_op_rorl_T1_im_cc
,
155 static GenOpFunc
*gen_shift_T1_0_cc
[4] = {
162 static GenOpFunc
*gen_shift_T1_T0
[4] = {
169 static GenOpFunc
*gen_shift_T1_T0_cc
[4] = {
170 gen_op_shll_T1_T0_cc
,
171 gen_op_shrl_T1_T0_cc
,
172 gen_op_sarl_T1_T0_cc
,
173 gen_op_rorl_T1_T0_cc
,
176 static GenOpFunc
*gen_op_movl_TN_reg
[3][16] = {
233 static GenOpFunc
*gen_op_movl_reg_TN
[2][16] = {
272 static GenOpFunc1
*gen_op_movl_TN_im
[3] = {
278 static GenOpFunc1
*gen_shift_T0_im_thumb
[3] = {
279 gen_op_shll_T0_im_thumb
,
280 gen_op_shrl_T0_im_thumb
,
281 gen_op_sarl_T0_im_thumb
,
284 static inline void gen_bx(DisasContext
*s
)
286 s
->is_jmp
= DISAS_UPDATE
;
291 #if defined(CONFIG_USER_ONLY)
292 #define gen_ldst(name, s) gen_op_##name##_raw()
294 #define gen_ldst(name, s) do { \
297 gen_op_##name##_user(); \
299 gen_op_##name##_kernel(); \
303 static inline void gen_movl_TN_reg(DisasContext
*s
, int reg
, int t
)
308 /* normaly, since we updated PC, we need only to add one insn */
310 val
= (long)s
->pc
+ 2;
312 val
= (long)s
->pc
+ 4;
313 gen_op_movl_TN_im
[t
](val
);
315 gen_op_movl_TN_reg
[t
][reg
]();
319 static inline void gen_movl_T0_reg(DisasContext
*s
, int reg
)
321 gen_movl_TN_reg(s
, reg
, 0);
324 static inline void gen_movl_T1_reg(DisasContext
*s
, int reg
)
326 gen_movl_TN_reg(s
, reg
, 1);
329 static inline void gen_movl_T2_reg(DisasContext
*s
, int reg
)
331 gen_movl_TN_reg(s
, reg
, 2);
334 static inline void gen_movl_reg_TN(DisasContext
*s
, int reg
, int t
)
336 gen_op_movl_reg_TN
[t
][reg
]();
338 s
->is_jmp
= DISAS_JUMP
;
342 static inline void gen_movl_reg_T0(DisasContext
*s
, int reg
)
344 gen_movl_reg_TN(s
, reg
, 0);
347 static inline void gen_movl_reg_T1(DisasContext
*s
, int reg
)
349 gen_movl_reg_TN(s
, reg
, 1);
352 /* Force a TB lookup after an instruction that changes the CPU state. */
353 static inline void gen_lookup_tb(DisasContext
*s
)
355 gen_op_movl_T0_im(s
->pc
);
356 gen_movl_reg_T0(s
, 15);
357 s
->is_jmp
= DISAS_UPDATE
;
360 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
)
362 int val
, rm
, shift
, shiftop
;
364 if (!(insn
& (1 << 25))) {
367 if (!(insn
& (1 << 23)))
370 gen_op_addl_T1_im(val
);
374 shift
= (insn
>> 7) & 0x1f;
375 gen_movl_T2_reg(s
, rm
);
376 shiftop
= (insn
>> 5) & 3;
378 gen_shift_T2_im
[shiftop
](shift
);
379 } else if (shiftop
!= 0) {
380 gen_shift_T2_0
[shiftop
]();
382 if (!(insn
& (1 << 23)))
389 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
394 if (insn
& (1 << 22)) {
396 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
397 if (!(insn
& (1 << 23)))
401 gen_op_addl_T1_im(val
);
405 gen_op_addl_T1_im(extra
);
407 gen_movl_T2_reg(s
, rm
);
408 if (!(insn
& (1 << 23)))
415 #define VFP_OP(name) \
416 static inline void gen_vfp_##name(int dp) \
419 gen_op_vfp_##name##d(); \
421 gen_op_vfp_##name##s(); \
443 static inline void gen_vfp_ld(DisasContext
*s
, int dp
)
446 gen_ldst(vfp_ldd
, s
);
448 gen_ldst(vfp_lds
, s
);
451 static inline void gen_vfp_st(DisasContext
*s
, int dp
)
454 gen_ldst(vfp_std
, s
);
456 gen_ldst(vfp_sts
, s
);
460 vfp_reg_offset (int dp
, int reg
)
463 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
465 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
466 + offsetof(CPU_DoubleU
, l
.upper
);
468 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
469 + offsetof(CPU_DoubleU
, l
.lower
);
472 static inline void gen_mov_F0_vreg(int dp
, int reg
)
475 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp
, reg
));
477 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp
, reg
));
480 static inline void gen_mov_F1_vreg(int dp
, int reg
)
483 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp
, reg
));
485 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp
, reg
));
488 static inline void gen_mov_vreg_F0(int dp
, int reg
)
491 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp
, reg
));
493 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp
, reg
));
496 #define ARM_CP_RW_BIT (1 << 20)
498 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
)
503 rd
= (insn
>> 16) & 0xf;
504 gen_movl_T1_reg(s
, rd
);
506 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
507 if (insn
& (1 << 24)) {
509 if (insn
& (1 << 23))
510 gen_op_addl_T1_im(offset
);
512 gen_op_addl_T1_im(-offset
);
514 if (insn
& (1 << 21))
515 gen_movl_reg_T1(s
, rd
);
516 } else if (insn
& (1 << 21)) {
518 if (insn
& (1 << 23))
519 gen_op_movl_T0_im(offset
);
521 gen_op_movl_T0_im(- offset
);
523 gen_movl_reg_T0(s
, rd
);
524 } else if (!(insn
& (1 << 23)))
529 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
)
531 int rd
= (insn
>> 0) & 0xf;
534 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
)
537 gen_op_iwmmxt_movl_T0_wCx(rd
);
539 gen_op_iwmmxt_movl_T0_T1_wRn(rd
);
541 gen_op_movl_T1_im(mask
);
546 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
547 (ie. an undefined instruction). */
548 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
551 int rdhi
, rdlo
, rd0
, rd1
, i
;
553 if ((insn
& 0x0e000e00) == 0x0c000000) {
554 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
556 rdlo
= (insn
>> 12) & 0xf;
557 rdhi
= (insn
>> 16) & 0xf;
558 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
559 gen_op_iwmmxt_movl_T0_T1_wRn(wrd
);
560 gen_movl_reg_T0(s
, rdlo
);
561 gen_movl_reg_T1(s
, rdhi
);
563 gen_movl_T0_reg(s
, rdlo
);
564 gen_movl_T1_reg(s
, rdhi
);
565 gen_op_iwmmxt_movl_wRn_T0_T1(wrd
);
566 gen_op_iwmmxt_set_mup();
571 wrd
= (insn
>> 12) & 0xf;
572 if (gen_iwmmxt_address(s
, insn
))
574 if (insn
& ARM_CP_RW_BIT
) {
575 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
577 gen_op_iwmmxt_movl_wCx_T0(wrd
);
580 if (insn
& (1 << 22)) /* WLDRD */
581 gen_ldst(iwmmxt_ldq
, s
);
583 gen_ldst(iwmmxt_ldl
, s
);
585 if (insn
& (1 << 22)) /* WLDRH */
586 gen_ldst(iwmmxt_ldw
, s
);
588 gen_ldst(iwmmxt_ldb
, s
);
589 gen_op_iwmmxt_movq_wRn_M0(wrd
);
592 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
593 gen_op_iwmmxt_movl_T0_wCx(wrd
);
596 gen_op_iwmmxt_movq_M0_wRn(wrd
);
598 if (insn
& (1 << 22)) /* WSTRD */
599 gen_ldst(iwmmxt_stq
, s
);
601 gen_ldst(iwmmxt_stl
, s
);
603 if (insn
& (1 << 22)) /* WSTRH */
604 gen_ldst(iwmmxt_ldw
, s
);
606 gen_ldst(iwmmxt_stb
, s
);
612 if ((insn
& 0x0f000000) != 0x0e000000)
615 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
616 case 0x000: /* WOR */
617 wrd
= (insn
>> 12) & 0xf;
618 rd0
= (insn
>> 0) & 0xf;
619 rd1
= (insn
>> 16) & 0xf;
620 gen_op_iwmmxt_movq_M0_wRn(rd0
);
621 gen_op_iwmmxt_orq_M0_wRn(rd1
);
622 gen_op_iwmmxt_setpsr_nz();
623 gen_op_iwmmxt_movq_wRn_M0(wrd
);
624 gen_op_iwmmxt_set_mup();
625 gen_op_iwmmxt_set_cup();
627 case 0x011: /* TMCR */
630 rd
= (insn
>> 12) & 0xf;
631 wrd
= (insn
>> 16) & 0xf;
633 case ARM_IWMMXT_wCID
:
634 case ARM_IWMMXT_wCASF
:
636 case ARM_IWMMXT_wCon
:
637 gen_op_iwmmxt_set_cup();
639 case ARM_IWMMXT_wCSSF
:
640 gen_op_iwmmxt_movl_T0_wCx(wrd
);
641 gen_movl_T1_reg(s
, rd
);
643 gen_op_iwmmxt_movl_wCx_T0(wrd
);
645 case ARM_IWMMXT_wCGR0
:
646 case ARM_IWMMXT_wCGR1
:
647 case ARM_IWMMXT_wCGR2
:
648 case ARM_IWMMXT_wCGR3
:
649 gen_op_iwmmxt_set_cup();
650 gen_movl_reg_T0(s
, rd
);
651 gen_op_iwmmxt_movl_wCx_T0(wrd
);
657 case 0x100: /* WXOR */
658 wrd
= (insn
>> 12) & 0xf;
659 rd0
= (insn
>> 0) & 0xf;
660 rd1
= (insn
>> 16) & 0xf;
661 gen_op_iwmmxt_movq_M0_wRn(rd0
);
662 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
663 gen_op_iwmmxt_setpsr_nz();
664 gen_op_iwmmxt_movq_wRn_M0(wrd
);
665 gen_op_iwmmxt_set_mup();
666 gen_op_iwmmxt_set_cup();
668 case 0x111: /* TMRC */
671 rd
= (insn
>> 12) & 0xf;
672 wrd
= (insn
>> 16) & 0xf;
673 gen_op_iwmmxt_movl_T0_wCx(wrd
);
674 gen_movl_reg_T0(s
, rd
);
676 case 0x300: /* WANDN */
677 wrd
= (insn
>> 12) & 0xf;
678 rd0
= (insn
>> 0) & 0xf;
679 rd1
= (insn
>> 16) & 0xf;
680 gen_op_iwmmxt_movq_M0_wRn(rd0
);
681 gen_op_iwmmxt_negq_M0();
682 gen_op_iwmmxt_andq_M0_wRn(rd1
);
683 gen_op_iwmmxt_setpsr_nz();
684 gen_op_iwmmxt_movq_wRn_M0(wrd
);
685 gen_op_iwmmxt_set_mup();
686 gen_op_iwmmxt_set_cup();
688 case 0x200: /* WAND */
689 wrd
= (insn
>> 12) & 0xf;
690 rd0
= (insn
>> 0) & 0xf;
691 rd1
= (insn
>> 16) & 0xf;
692 gen_op_iwmmxt_movq_M0_wRn(rd0
);
693 gen_op_iwmmxt_andq_M0_wRn(rd1
);
694 gen_op_iwmmxt_setpsr_nz();
695 gen_op_iwmmxt_movq_wRn_M0(wrd
);
696 gen_op_iwmmxt_set_mup();
697 gen_op_iwmmxt_set_cup();
699 case 0x810: case 0xa10: /* WMADD */
700 wrd
= (insn
>> 12) & 0xf;
701 rd0
= (insn
>> 0) & 0xf;
702 rd1
= (insn
>> 16) & 0xf;
703 gen_op_iwmmxt_movq_M0_wRn(rd0
);
704 if (insn
& (1 << 21))
705 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
707 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
708 gen_op_iwmmxt_movq_wRn_M0(wrd
);
709 gen_op_iwmmxt_set_mup();
711 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
712 wrd
= (insn
>> 12) & 0xf;
713 rd0
= (insn
>> 16) & 0xf;
714 rd1
= (insn
>> 0) & 0xf;
715 gen_op_iwmmxt_movq_M0_wRn(rd0
);
716 switch ((insn
>> 22) & 3) {
718 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
721 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
724 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
729 gen_op_iwmmxt_movq_wRn_M0(wrd
);
730 gen_op_iwmmxt_set_mup();
731 gen_op_iwmmxt_set_cup();
733 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
734 wrd
= (insn
>> 12) & 0xf;
735 rd0
= (insn
>> 16) & 0xf;
736 rd1
= (insn
>> 0) & 0xf;
737 gen_op_iwmmxt_movq_M0_wRn(rd0
);
738 switch ((insn
>> 22) & 3) {
740 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
743 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
746 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
751 gen_op_iwmmxt_movq_wRn_M0(wrd
);
752 gen_op_iwmmxt_set_mup();
753 gen_op_iwmmxt_set_cup();
755 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
756 wrd
= (insn
>> 12) & 0xf;
757 rd0
= (insn
>> 16) & 0xf;
758 rd1
= (insn
>> 0) & 0xf;
759 gen_op_iwmmxt_movq_M0_wRn(rd0
);
760 if (insn
& (1 << 22))
761 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
763 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
764 if (!(insn
& (1 << 20)))
765 gen_op_iwmmxt_addl_M0_wRn(wrd
);
766 gen_op_iwmmxt_movq_wRn_M0(wrd
);
767 gen_op_iwmmxt_set_mup();
769 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
770 wrd
= (insn
>> 12) & 0xf;
771 rd0
= (insn
>> 16) & 0xf;
772 rd1
= (insn
>> 0) & 0xf;
773 gen_op_iwmmxt_movq_M0_wRn(rd0
);
774 if (insn
& (1 << 21))
775 gen_op_iwmmxt_mulsw_M0_wRn(rd1
, (insn
& (1 << 20)) ? 16 : 0);
777 gen_op_iwmmxt_muluw_M0_wRn(rd1
, (insn
& (1 << 20)) ? 16 : 0);
778 gen_op_iwmmxt_movq_wRn_M0(wrd
);
779 gen_op_iwmmxt_set_mup();
781 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
782 wrd
= (insn
>> 12) & 0xf;
783 rd0
= (insn
>> 16) & 0xf;
784 rd1
= (insn
>> 0) & 0xf;
785 gen_op_iwmmxt_movq_M0_wRn(rd0
);
786 if (insn
& (1 << 21))
787 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
789 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
790 if (!(insn
& (1 << 20))) {
791 if (insn
& (1 << 21))
792 gen_op_iwmmxt_addsq_M0_wRn(wrd
);
794 gen_op_iwmmxt_adduq_M0_wRn(wrd
);
796 gen_op_iwmmxt_movq_wRn_M0(wrd
);
797 gen_op_iwmmxt_set_mup();
799 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
800 wrd
= (insn
>> 12) & 0xf;
801 rd0
= (insn
>> 16) & 0xf;
802 rd1
= (insn
>> 0) & 0xf;
803 gen_op_iwmmxt_movq_M0_wRn(rd0
);
804 switch ((insn
>> 22) & 3) {
806 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
809 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
812 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
817 gen_op_iwmmxt_movq_wRn_M0(wrd
);
818 gen_op_iwmmxt_set_mup();
819 gen_op_iwmmxt_set_cup();
821 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
822 wrd
= (insn
>> 12) & 0xf;
823 rd0
= (insn
>> 16) & 0xf;
824 rd1
= (insn
>> 0) & 0xf;
825 gen_op_iwmmxt_movq_M0_wRn(rd0
);
826 if (insn
& (1 << 22))
827 gen_op_iwmmxt_avgw_M0_wRn(rd1
, (insn
>> 20) & 1);
829 gen_op_iwmmxt_avgb_M0_wRn(rd1
, (insn
>> 20) & 1);
830 gen_op_iwmmxt_movq_wRn_M0(wrd
);
831 gen_op_iwmmxt_set_mup();
832 gen_op_iwmmxt_set_cup();
834 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
835 wrd
= (insn
>> 12) & 0xf;
836 rd0
= (insn
>> 16) & 0xf;
837 rd1
= (insn
>> 0) & 0xf;
838 gen_op_iwmmxt_movq_M0_wRn(rd0
);
839 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
840 gen_op_movl_T1_im(7);
842 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
843 gen_op_iwmmxt_movq_wRn_M0(wrd
);
844 gen_op_iwmmxt_set_mup();
846 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
847 rd
= (insn
>> 12) & 0xf;
848 wrd
= (insn
>> 16) & 0xf;
849 gen_movl_T0_reg(s
, rd
);
850 gen_op_iwmmxt_movq_M0_wRn(wrd
);
851 switch ((insn
>> 6) & 3) {
853 gen_op_movl_T1_im(0xff);
854 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 7) << 3);
857 gen_op_movl_T1_im(0xffff);
858 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 3) << 4);
861 gen_op_movl_T1_im(0xffffffff);
862 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 1) << 5);
867 gen_op_iwmmxt_movq_wRn_M0(wrd
);
868 gen_op_iwmmxt_set_mup();
870 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
871 rd
= (insn
>> 12) & 0xf;
872 wrd
= (insn
>> 16) & 0xf;
875 gen_op_iwmmxt_movq_M0_wRn(wrd
);
876 switch ((insn
>> 22) & 3) {
879 gen_op_iwmmxt_extrsb_T0_M0((insn
& 7) << 3);
881 gen_op_movl_T1_im(0xff);
882 gen_op_iwmmxt_extru_T0_M0_T1((insn
& 7) << 3);
887 gen_op_iwmmxt_extrsw_T0_M0((insn
& 3) << 4);
889 gen_op_movl_T1_im(0xffff);
890 gen_op_iwmmxt_extru_T0_M0_T1((insn
& 3) << 4);
894 gen_op_movl_T1_im(0xffffffff);
895 gen_op_iwmmxt_extru_T0_M0_T1((insn
& 1) << 5);
900 gen_op_movl_reg_TN
[0][rd
]();
902 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
903 if ((insn
& 0x000ff008) != 0x0003f000)
905 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
906 switch ((insn
>> 22) & 3) {
908 gen_op_shrl_T1_im(((insn
& 7) << 2) + 0);
911 gen_op_shrl_T1_im(((insn
& 3) << 3) + 4);
914 gen_op_shrl_T1_im(((insn
& 1) << 4) + 12);
919 gen_op_shll_T1_im(28);
921 gen_op_movl_cpsr_T0(0xf0000000);
923 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
924 rd
= (insn
>> 12) & 0xf;
925 wrd
= (insn
>> 16) & 0xf;
926 gen_movl_T0_reg(s
, rd
);
927 switch ((insn
>> 6) & 3) {
929 gen_op_iwmmxt_bcstb_M0_T0();
932 gen_op_iwmmxt_bcstw_M0_T0();
935 gen_op_iwmmxt_bcstl_M0_T0();
940 gen_op_iwmmxt_movq_wRn_M0(wrd
);
941 gen_op_iwmmxt_set_mup();
943 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
944 if ((insn
& 0x000ff00f) != 0x0003f000)
946 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
947 switch ((insn
>> 22) & 3) {
949 for (i
= 0; i
< 7; i
++) {
950 gen_op_shll_T1_im(4);
955 for (i
= 0; i
< 3; i
++) {
956 gen_op_shll_T1_im(8);
961 gen_op_shll_T1_im(16);
967 gen_op_movl_cpsr_T0(0xf0000000);
969 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
970 wrd
= (insn
>> 12) & 0xf;
971 rd0
= (insn
>> 16) & 0xf;
972 gen_op_iwmmxt_movq_M0_wRn(rd0
);
973 switch ((insn
>> 22) & 3) {
975 gen_op_iwmmxt_addcb_M0();
978 gen_op_iwmmxt_addcw_M0();
981 gen_op_iwmmxt_addcl_M0();
986 gen_op_iwmmxt_movq_wRn_M0(wrd
);
987 gen_op_iwmmxt_set_mup();
989 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
990 if ((insn
& 0x000ff00f) != 0x0003f000)
992 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
993 switch ((insn
>> 22) & 3) {
995 for (i
= 0; i
< 7; i
++) {
996 gen_op_shll_T1_im(4);
1001 for (i
= 0; i
< 3; i
++) {
1002 gen_op_shll_T1_im(8);
1007 gen_op_shll_T1_im(16);
1013 gen_op_movl_T1_im(0xf0000000);
1014 gen_op_andl_T0_T1();
1015 gen_op_movl_cpsr_T0(0xf0000000);
1017 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1018 rd
= (insn
>> 12) & 0xf;
1019 rd0
= (insn
>> 16) & 0xf;
1020 if ((insn
& 0xf) != 0)
1022 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1023 switch ((insn
>> 22) & 3) {
1025 gen_op_iwmmxt_msbb_T0_M0();
1028 gen_op_iwmmxt_msbw_T0_M0();
1031 gen_op_iwmmxt_msbl_T0_M0();
1036 gen_movl_reg_T0(s
, rd
);
1038 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1039 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1040 wrd
= (insn
>> 12) & 0xf;
1041 rd0
= (insn
>> 16) & 0xf;
1042 rd1
= (insn
>> 0) & 0xf;
1043 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1044 switch ((insn
>> 22) & 3) {
1046 if (insn
& (1 << 21))
1047 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1049 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1052 if (insn
& (1 << 21))
1053 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1055 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1058 if (insn
& (1 << 21))
1059 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1061 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1066 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1067 gen_op_iwmmxt_set_mup();
1068 gen_op_iwmmxt_set_cup();
1070 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1071 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1072 wrd
= (insn
>> 12) & 0xf;
1073 rd0
= (insn
>> 16) & 0xf;
1074 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1075 switch ((insn
>> 22) & 3) {
1077 if (insn
& (1 << 21))
1078 gen_op_iwmmxt_unpacklsb_M0();
1080 gen_op_iwmmxt_unpacklub_M0();
1083 if (insn
& (1 << 21))
1084 gen_op_iwmmxt_unpacklsw_M0();
1086 gen_op_iwmmxt_unpackluw_M0();
1089 if (insn
& (1 << 21))
1090 gen_op_iwmmxt_unpacklsl_M0();
1092 gen_op_iwmmxt_unpacklul_M0();
1097 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1098 gen_op_iwmmxt_set_mup();
1099 gen_op_iwmmxt_set_cup();
1101 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1102 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1103 wrd
= (insn
>> 12) & 0xf;
1104 rd0
= (insn
>> 16) & 0xf;
1105 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1106 switch ((insn
>> 22) & 3) {
1108 if (insn
& (1 << 21))
1109 gen_op_iwmmxt_unpackhsb_M0();
1111 gen_op_iwmmxt_unpackhub_M0();
1114 if (insn
& (1 << 21))
1115 gen_op_iwmmxt_unpackhsw_M0();
1117 gen_op_iwmmxt_unpackhuw_M0();
1120 if (insn
& (1 << 21))
1121 gen_op_iwmmxt_unpackhsl_M0();
1123 gen_op_iwmmxt_unpackhul_M0();
1128 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1129 gen_op_iwmmxt_set_mup();
1130 gen_op_iwmmxt_set_cup();
1132 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1133 case 0x214: case 0x614: case 0xa14: case 0xe14:
1134 wrd
= (insn
>> 12) & 0xf;
1135 rd0
= (insn
>> 16) & 0xf;
1136 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1137 if (gen_iwmmxt_shift(insn
, 0xff))
1139 switch ((insn
>> 22) & 3) {
1143 gen_op_iwmmxt_srlw_M0_T0();
1146 gen_op_iwmmxt_srll_M0_T0();
1149 gen_op_iwmmxt_srlq_M0_T0();
1152 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1153 gen_op_iwmmxt_set_mup();
1154 gen_op_iwmmxt_set_cup();
1156 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1157 case 0x014: case 0x414: case 0x814: case 0xc14:
1158 wrd
= (insn
>> 12) & 0xf;
1159 rd0
= (insn
>> 16) & 0xf;
1160 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1161 if (gen_iwmmxt_shift(insn
, 0xff))
1163 switch ((insn
>> 22) & 3) {
1167 gen_op_iwmmxt_sraw_M0_T0();
1170 gen_op_iwmmxt_sral_M0_T0();
1173 gen_op_iwmmxt_sraq_M0_T0();
1176 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1177 gen_op_iwmmxt_set_mup();
1178 gen_op_iwmmxt_set_cup();
1180 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
1181 case 0x114: case 0x514: case 0x914: case 0xd14:
1182 wrd
= (insn
>> 12) & 0xf;
1183 rd0
= (insn
>> 16) & 0xf;
1184 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1185 if (gen_iwmmxt_shift(insn
, 0xff))
1187 switch ((insn
>> 22) & 3) {
1191 gen_op_iwmmxt_sllw_M0_T0();
1194 gen_op_iwmmxt_slll_M0_T0();
1197 gen_op_iwmmxt_sllq_M0_T0();
1200 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1201 gen_op_iwmmxt_set_mup();
1202 gen_op_iwmmxt_set_cup();
1204 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
1205 case 0x314: case 0x714: case 0xb14: case 0xf14:
1206 wrd
= (insn
>> 12) & 0xf;
1207 rd0
= (insn
>> 16) & 0xf;
1208 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1209 switch ((insn
>> 22) & 3) {
1213 if (gen_iwmmxt_shift(insn
, 0xf))
1215 gen_op_iwmmxt_rorw_M0_T0();
1218 if (gen_iwmmxt_shift(insn
, 0x1f))
1220 gen_op_iwmmxt_rorl_M0_T0();
1223 if (gen_iwmmxt_shift(insn
, 0x3f))
1225 gen_op_iwmmxt_rorq_M0_T0();
1228 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1229 gen_op_iwmmxt_set_mup();
1230 gen_op_iwmmxt_set_cup();
1232 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
1233 case 0x916: case 0xb16: case 0xd16: case 0xf16:
1234 wrd
= (insn
>> 12) & 0xf;
1235 rd0
= (insn
>> 16) & 0xf;
1236 rd1
= (insn
>> 0) & 0xf;
1237 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1238 switch ((insn
>> 22) & 3) {
1240 if (insn
& (1 << 21))
1241 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
1243 gen_op_iwmmxt_minub_M0_wRn(rd1
);
1246 if (insn
& (1 << 21))
1247 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
1249 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
1252 if (insn
& (1 << 21))
1253 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
1255 gen_op_iwmmxt_minul_M0_wRn(rd1
);
1260 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1261 gen_op_iwmmxt_set_mup();
1263 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
1264 case 0x816: case 0xa16: case 0xc16: case 0xe16:
1265 wrd
= (insn
>> 12) & 0xf;
1266 rd0
= (insn
>> 16) & 0xf;
1267 rd1
= (insn
>> 0) & 0xf;
1268 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1269 switch ((insn
>> 22) & 3) {
1271 if (insn
& (1 << 21))
1272 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
1274 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
1277 if (insn
& (1 << 21))
1278 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
1280 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
1283 if (insn
& (1 << 21))
1284 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
1286 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
1291 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1292 gen_op_iwmmxt_set_mup();
1294 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
1295 case 0x402: case 0x502: case 0x602: case 0x702:
1296 wrd
= (insn
>> 12) & 0xf;
1297 rd0
= (insn
>> 16) & 0xf;
1298 rd1
= (insn
>> 0) & 0xf;
1299 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1300 gen_op_movl_T0_im((insn
>> 20) & 3);
1301 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
1302 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1303 gen_op_iwmmxt_set_mup();
1305 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
1306 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
1307 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
1308 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
1309 wrd
= (insn
>> 12) & 0xf;
1310 rd0
= (insn
>> 16) & 0xf;
1311 rd1
= (insn
>> 0) & 0xf;
1312 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1313 switch ((insn
>> 20) & 0xf) {
1315 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
1318 gen_op_iwmmxt_subub_M0_wRn(rd1
);
1321 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
1324 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
1327 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
1330 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
1333 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
1336 gen_op_iwmmxt_subul_M0_wRn(rd1
);
1339 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
1344 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1345 gen_op_iwmmxt_set_mup();
1346 gen_op_iwmmxt_set_cup();
1348 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
1349 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
1350 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
1351 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
1352 wrd
= (insn
>> 12) & 0xf;
1353 rd0
= (insn
>> 16) & 0xf;
1354 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1355 gen_op_movl_T0_im(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
1356 gen_op_iwmmxt_shufh_M0_T0();
1357 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1358 gen_op_iwmmxt_set_mup();
1359 gen_op_iwmmxt_set_cup();
1361 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
1362 case 0x418: case 0x518: case 0x618: case 0x718:
1363 case 0x818: case 0x918: case 0xa18: case 0xb18:
1364 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
1365 wrd
= (insn
>> 12) & 0xf;
1366 rd0
= (insn
>> 16) & 0xf;
1367 rd1
= (insn
>> 0) & 0xf;
1368 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1369 switch ((insn
>> 20) & 0xf) {
1371 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
1374 gen_op_iwmmxt_addub_M0_wRn(rd1
);
1377 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
1380 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
1383 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
1386 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
1389 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
1392 gen_op_iwmmxt_addul_M0_wRn(rd1
);
1395 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
1400 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1401 gen_op_iwmmxt_set_mup();
1402 gen_op_iwmmxt_set_cup();
1404 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
1405 case 0x408: case 0x508: case 0x608: case 0x708:
1406 case 0x808: case 0x908: case 0xa08: case 0xb08:
1407 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
1408 wrd
= (insn
>> 12) & 0xf;
1409 rd0
= (insn
>> 16) & 0xf;
1410 rd1
= (insn
>> 0) & 0xf;
1411 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1412 if (!(insn
& (1 << 20)))
1414 switch ((insn
>> 22) & 3) {
1418 if (insn
& (1 << 21))
1419 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
1421 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
1424 if (insn
& (1 << 21))
1425 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
1427 gen_op_iwmmxt_packul_M0_wRn(rd1
);
1430 if (insn
& (1 << 21))
1431 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
1433 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
1436 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1437 gen_op_iwmmxt_set_mup();
1438 gen_op_iwmmxt_set_cup();
1440 case 0x201: case 0x203: case 0x205: case 0x207:
1441 case 0x209: case 0x20b: case 0x20d: case 0x20f:
1442 case 0x211: case 0x213: case 0x215: case 0x217:
1443 case 0x219: case 0x21b: case 0x21d: case 0x21f:
1444 wrd
= (insn
>> 5) & 0xf;
1445 rd0
= (insn
>> 12) & 0xf;
1446 rd1
= (insn
>> 0) & 0xf;
1447 if (rd0
== 0xf || rd1
== 0xf)
1449 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1450 switch ((insn
>> 16) & 0xf) {
1451 case 0x0: /* TMIA */
1452 gen_op_movl_TN_reg
[0][rd0
]();
1453 gen_op_movl_TN_reg
[1][rd1
]();
1454 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1456 case 0x8: /* TMIAPH */
1457 gen_op_movl_TN_reg
[0][rd0
]();
1458 gen_op_movl_TN_reg
[1][rd1
]();
1459 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1461 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
1462 gen_op_movl_TN_reg
[1][rd0
]();
1463 if (insn
& (1 << 16))
1464 gen_op_shrl_T1_im(16);
1465 gen_op_movl_T0_T1();
1466 gen_op_movl_TN_reg
[1][rd1
]();
1467 if (insn
& (1 << 17))
1468 gen_op_shrl_T1_im(16);
1469 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1474 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1475 gen_op_iwmmxt_set_mup();
1484 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
1485 (ie. an undefined instruction). */
1486 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1488 int acc
, rd0
, rd1
, rdhi
, rdlo
;
1490 if ((insn
& 0x0ff00f10) == 0x0e200010) {
1491 /* Multiply with Internal Accumulate Format */
1492 rd0
= (insn
>> 12) & 0xf;
1494 acc
= (insn
>> 5) & 7;
1499 switch ((insn
>> 16) & 0xf) {
1501 gen_op_movl_TN_reg
[0][rd0
]();
1502 gen_op_movl_TN_reg
[1][rd1
]();
1503 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1505 case 0x8: /* MIAPH */
1506 gen_op_movl_TN_reg
[0][rd0
]();
1507 gen_op_movl_TN_reg
[1][rd1
]();
1508 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1510 case 0xc: /* MIABB */
1511 case 0xd: /* MIABT */
1512 case 0xe: /* MIATB */
1513 case 0xf: /* MIATT */
1514 gen_op_movl_TN_reg
[1][rd0
]();
1515 if (insn
& (1 << 16))
1516 gen_op_shrl_T1_im(16);
1517 gen_op_movl_T0_T1();
1518 gen_op_movl_TN_reg
[1][rd1
]();
1519 if (insn
& (1 << 17))
1520 gen_op_shrl_T1_im(16);
1521 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1527 gen_op_iwmmxt_movq_wRn_M0(acc
);
1531 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
1532 /* Internal Accumulator Access Format */
1533 rdhi
= (insn
>> 16) & 0xf;
1534 rdlo
= (insn
>> 12) & 0xf;
1540 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
1541 gen_op_iwmmxt_movl_T0_T1_wRn(acc
);
1542 gen_op_movl_reg_TN
[0][rdlo
]();
1543 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
1544 gen_op_andl_T0_T1();
1545 gen_op_movl_reg_TN
[0][rdhi
]();
1547 gen_op_movl_TN_reg
[0][rdlo
]();
1548 gen_op_movl_TN_reg
[1][rdhi
]();
1549 gen_op_iwmmxt_movl_wRn_T0_T1(acc
);
1557 /* Disassemble system coprocessor instruction. Return nonzero if
1558 instruction is not defined. */
1559 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1561 uint32_t rd
= (insn
>> 12) & 0xf;
1562 uint32_t cp
= (insn
>> 8) & 0xf;
1567 if (insn
& ARM_CP_RW_BIT
) {
1568 if (!env
->cp
[cp
].cp_read
)
1570 gen_op_movl_T0_im((uint32_t) s
->pc
);
1571 gen_op_movl_reg_TN
[0][15]();
1572 gen_op_movl_T0_cp(insn
);
1573 gen_movl_reg_T0(s
, rd
);
1575 if (!env
->cp
[cp
].cp_write
)
1577 gen_op_movl_T0_im((uint32_t) s
->pc
);
1578 gen_op_movl_reg_TN
[0][15]();
1579 gen_movl_T0_reg(s
, rd
);
1580 gen_op_movl_cp_T0(insn
);
1585 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
1586 instruction is not defined. */
1587 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1591 /* ??? Some cp15 registers are accessible from userspace. */
1595 if ((insn
& 0x0fff0fff) == 0x0e070f90
1596 || (insn
& 0x0fff0fff) == 0x0e070f58) {
1597 /* Wait for interrupt. */
1598 gen_op_movl_T0_im((long)s
->pc
);
1599 gen_op_movl_reg_TN
[0][15]();
1601 s
->is_jmp
= DISAS_JUMP
;
1604 rd
= (insn
>> 12) & 0xf;
1605 if (insn
& ARM_CP_RW_BIT
) {
1606 gen_op_movl_T0_cp15(insn
);
1607 /* If the destination register is r15 then sets condition codes. */
1609 gen_movl_reg_T0(s
, rd
);
1611 gen_movl_T0_reg(s
, rd
);
1612 gen_op_movl_cp15_T0(insn
);
1613 /* Normally we would always end the TB here, but Linux
1614 * arch/arm/mach-pxa/sleep.S expects two instructions following
1615 * an MMU enable to execute from cache. Imitate this behaviour. */
1616 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
1617 (insn
& 0x0fff0fff) != 0x0e010f10)
1623 /* Disassemble a VFP instruction. Returns nonzero if an error occured
1624 (ie. an undefined instruction). */
1625 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
1627 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
1630 if (!arm_feature(env
, ARM_FEATURE_VFP
))
1633 if ((env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) == 0) {
1634 /* VFP disabled. Only allow fmxr/fmrx to/from fpexc and fpsid. */
1635 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
1637 rn
= (insn
>> 16) & 0xf;
1638 if (rn
!= 0 && rn
!= 8)
1641 dp
= ((insn
& 0xf00) == 0xb00);
1642 switch ((insn
>> 24) & 0xf) {
1644 if (insn
& (1 << 4)) {
1645 /* single register transfer */
1646 if ((insn
& 0x6f) != 0x00)
1648 rd
= (insn
>> 12) & 0xf;
1652 rn
= (insn
>> 16) & 0xf;
1653 /* Get the existing value even for arm->vfp moves because
1654 we only set half the register. */
1655 gen_mov_F0_vreg(1, rn
);
1657 if (insn
& ARM_CP_RW_BIT
) {
1659 if (insn
& (1 << 21))
1660 gen_movl_reg_T1(s
, rd
);
1662 gen_movl_reg_T0(s
, rd
);
1665 if (insn
& (1 << 21))
1666 gen_movl_T1_reg(s
, rd
);
1668 gen_movl_T0_reg(s
, rd
);
1670 gen_mov_vreg_F0(dp
, rn
);
1673 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
1674 if (insn
& ARM_CP_RW_BIT
) {
1676 if (insn
& (1 << 21)) {
1677 /* system register */
1682 case ARM_VFP_FPINST
:
1683 case ARM_VFP_FPINST2
:
1684 gen_op_vfp_movl_T0_xreg(rn
);
1688 gen_op_vfp_movl_T0_fpscr_flags();
1690 gen_op_vfp_movl_T0_fpscr();
1696 gen_mov_F0_vreg(0, rn
);
1700 /* Set the 4 flag bits in the CPSR. */
1701 gen_op_movl_cpsr_T0(0xf0000000);
1703 gen_movl_reg_T0(s
, rd
);
1706 gen_movl_T0_reg(s
, rd
);
1707 if (insn
& (1 << 21)) {
1709 /* system register */
1712 /* Writes are ignored. */
1715 gen_op_vfp_movl_fpscr_T0();
1719 gen_op_vfp_movl_xreg_T0(rn
);
1722 case ARM_VFP_FPINST
:
1723 case ARM_VFP_FPINST2
:
1724 gen_op_vfp_movl_xreg_T0(rn
);
1731 gen_mov_vreg_F0(0, rn
);
1736 /* data processing */
1737 /* The opcode is in bits 23, 21, 20 and 6. */
1738 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
1742 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
1744 /* rn is register number */
1745 if (insn
& (1 << 7))
1747 rn
= (insn
>> 16) & 0xf;
1750 if (op
== 15 && (rn
== 15 || rn
> 17)) {
1751 /* Integer or single precision destination. */
1752 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
1754 if (insn
& (1 << 22))
1756 rd
= (insn
>> 12) & 0xf;
1759 if (op
== 15 && (rn
== 16 || rn
== 17)) {
1760 /* Integer source. */
1761 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
1763 if (insn
& (1 << 5))
1768 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
1769 if (op
== 15 && rn
== 15) {
1770 /* Double precision destination. */
1771 if (insn
& (1 << 22))
1773 rd
= (insn
>> 12) & 0xf;
1775 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
1776 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
1779 veclen
= env
->vfp
.vec_len
;
1780 if (op
== 15 && rn
> 3)
1783 /* Shut up compiler warnings. */
1794 /* Figure out what type of vector operation this is. */
1795 if ((rd
& bank_mask
) == 0) {
1800 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
1802 delta_d
= env
->vfp
.vec_stride
+ 1;
1804 if ((rm
& bank_mask
) == 0) {
1805 /* mixed scalar/vector */
1814 /* Load the initial operands. */
1819 /* Integer source */
1820 gen_mov_F0_vreg(0, rm
);
1825 gen_mov_F0_vreg(dp
, rd
);
1826 gen_mov_F1_vreg(dp
, rm
);
1830 /* Compare with zero */
1831 gen_mov_F0_vreg(dp
, rd
);
1835 /* One source operand. */
1836 gen_mov_F0_vreg(dp
, rm
);
1839 /* Two source operands. */
1840 gen_mov_F0_vreg(dp
, rn
);
1841 gen_mov_F1_vreg(dp
, rm
);
1845 /* Perform the calculation. */
1847 case 0: /* mac: fd + (fn * fm) */
1849 gen_mov_F1_vreg(dp
, rd
);
1852 case 1: /* nmac: fd - (fn * fm) */
1855 gen_mov_F1_vreg(dp
, rd
);
1858 case 2: /* msc: -fd + (fn * fm) */
1860 gen_mov_F1_vreg(dp
, rd
);
1863 case 3: /* nmsc: -fd - (fn * fm) */
1865 gen_mov_F1_vreg(dp
, rd
);
1869 case 4: /* mul: fn * fm */
1872 case 5: /* nmul: -(fn * fm) */
1876 case 6: /* add: fn + fm */
1879 case 7: /* sub: fn - fm */
1882 case 8: /* div: fn / fm */
1885 case 15: /* extension space */
1908 case 11: /* cmpez */
1912 case 15: /* single<->double conversion */
1914 gen_op_vfp_fcvtsd();
1916 gen_op_vfp_fcvtds();
1918 case 16: /* fuito */
1921 case 17: /* fsito */
1924 case 24: /* ftoui */
1927 case 25: /* ftouiz */
1930 case 26: /* ftosi */
1933 case 27: /* ftosiz */
1936 default: /* undefined */
1937 printf ("rn:%d\n", rn
);
1941 default: /* undefined */
1942 printf ("op:%d\n", op
);
1946 /* Write back the result. */
1947 if (op
== 15 && (rn
>= 8 && rn
<= 11))
1948 ; /* Comparison, do nothing. */
1949 else if (op
== 15 && rn
> 17)
1950 /* Integer result. */
1951 gen_mov_vreg_F0(0, rd
);
1952 else if (op
== 15 && rn
== 15)
1954 gen_mov_vreg_F0(!dp
, rd
);
1956 gen_mov_vreg_F0(dp
, rd
);
1958 /* break out of the loop if we have finished */
1962 if (op
== 15 && delta_m
== 0) {
1963 /* single source one-many */
1965 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
1967 gen_mov_vreg_F0(dp
, rd
);
1971 /* Setup the next operands. */
1973 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
1977 /* One source operand. */
1978 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
1980 gen_mov_F0_vreg(dp
, rm
);
1982 /* Two source operands. */
1983 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
1985 gen_mov_F0_vreg(dp
, rn
);
1987 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
1989 gen_mov_F1_vreg(dp
, rm
);
1997 if (dp
&& (insn
& (1 << 22))) {
1998 /* two-register transfer */
1999 rn
= (insn
>> 16) & 0xf;
2000 rd
= (insn
>> 12) & 0xf;
2002 if (insn
& (1 << 5))
2006 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
2008 if (insn
& ARM_CP_RW_BIT
) {
2011 gen_mov_F0_vreg(1, rm
);
2013 gen_movl_reg_T0(s
, rd
);
2014 gen_movl_reg_T1(s
, rn
);
2016 gen_mov_F0_vreg(0, rm
);
2018 gen_movl_reg_T0(s
, rn
);
2019 gen_mov_F0_vreg(0, rm
+ 1);
2021 gen_movl_reg_T0(s
, rd
);
2026 gen_movl_T0_reg(s
, rd
);
2027 gen_movl_T1_reg(s
, rn
);
2029 gen_mov_vreg_F0(1, rm
);
2031 gen_movl_T0_reg(s
, rn
);
2033 gen_mov_vreg_F0(0, rm
);
2034 gen_movl_T0_reg(s
, rd
);
2036 gen_mov_vreg_F0(0, rm
+ 1);
2041 rn
= (insn
>> 16) & 0xf;
2043 rd
= (insn
>> 12) & 0xf;
2045 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
2046 gen_movl_T1_reg(s
, rn
);
2047 if ((insn
& 0x01200000) == 0x01000000) {
2048 /* Single load/store */
2049 offset
= (insn
& 0xff) << 2;
2050 if ((insn
& (1 << 23)) == 0)
2052 gen_op_addl_T1_im(offset
);
2053 if (insn
& (1 << 20)) {
2055 gen_mov_vreg_F0(dp
, rd
);
2057 gen_mov_F0_vreg(dp
, rd
);
2061 /* load/store multiple */
2063 n
= (insn
>> 1) & 0x7f;
2067 if (insn
& (1 << 24)) /* pre-decrement */
2068 gen_op_addl_T1_im(-((insn
& 0xff) << 2));
2074 for (i
= 0; i
< n
; i
++) {
2075 if (insn
& ARM_CP_RW_BIT
) {
2078 gen_mov_vreg_F0(dp
, rd
+ i
);
2081 gen_mov_F0_vreg(dp
, rd
+ i
);
2084 gen_op_addl_T1_im(offset
);
2086 if (insn
& (1 << 21)) {
2088 if (insn
& (1 << 24))
2089 offset
= -offset
* n
;
2090 else if (dp
&& (insn
& 1))
2096 gen_op_addl_T1_im(offset
);
2097 gen_movl_reg_T1(s
, rn
);
2103 /* Should never happen. */
2109 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
2111 TranslationBlock
*tb
;
2114 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
2116 gen_op_goto_tb0(TBPARAM(tb
));
2118 gen_op_goto_tb1(TBPARAM(tb
));
2119 gen_op_movl_T0_im(dest
);
2120 gen_op_movl_r15_T0();
2121 gen_op_movl_T0_im((long)tb
+ n
);
2124 gen_op_movl_T0_im(dest
);
2125 gen_op_movl_r15_T0();
2131 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
2133 if (__builtin_expect(s
->singlestep_enabled
, 0)) {
2134 /* An indirect jump so that we still trigger the debug exception. */
2137 gen_op_movl_T0_im(dest
);
2140 gen_goto_tb(s
, 0, dest
);
2141 s
->is_jmp
= DISAS_TB_JUMP
;
2145 static inline void gen_mulxy(int x
, int y
)
2148 gen_op_sarl_T0_im(16);
2152 gen_op_sarl_T1_im(16);
2158 /* Return the mask of PSR bits set by a MSR instruction. */
2159 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
) {
2163 if (flags
& (1 << 0))
2165 if (flags
& (1 << 1))
2167 if (flags
& (1 << 2))
2169 if (flags
& (1 << 3))
2171 /* Mask out undefined bits. */
2173 /* Mask out state bits. */
2175 mask
&= ~0x01000020;
2176 /* Mask out privileged bits. */
2182 /* Returns nonzero if access to the PSR is not permitted. */
2183 static int gen_set_psr_T0(DisasContext
*s
, uint32_t mask
, int spsr
)
2186 /* ??? This is also undefined in system mode. */
2189 gen_op_movl_spsr_T0(mask
);
2191 gen_op_movl_cpsr_T0(mask
);
2197 static void gen_exception_return(DisasContext
*s
)
2199 gen_op_movl_reg_TN
[0][15]();
2200 gen_op_movl_T0_spsr();
2201 gen_op_movl_cpsr_T0(0xffffffff);
2202 s
->is_jmp
= DISAS_UPDATE
;
2205 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
2207 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
2209 insn
= ldl_code(s
->pc
);
2214 /* Unconditional instructions. */
2215 if ((insn
& 0x0d70f000) == 0x0550f000)
2217 else if ((insn
& 0x0e000000) == 0x0a000000) {
2218 /* branch link and change to thumb (blx <offset>) */
2221 val
= (uint32_t)s
->pc
;
2222 gen_op_movl_T0_im(val
);
2223 gen_movl_reg_T0(s
, 14);
2224 /* Sign-extend the 24-bit offset */
2225 offset
= (((int32_t)insn
) << 8) >> 8;
2226 /* offset * 4 + bit24 * 2 + (thumb bit) */
2227 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
2228 /* pipeline offset */
2230 gen_op_movl_T0_im(val
);
2233 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
2234 /* Coprocessor double register transfer. */
2235 } else if ((insn
& 0x0f000010) == 0x0e000010) {
2236 /* Additional coprocessor register transfer. */
2237 } else if ((insn
& 0x0ff10010) == 0x01000000) {
2238 /* cps (privileged) */
2239 } else if ((insn
& 0x0ffffdff) == 0x01010000) {
2241 if (insn
& (1 << 9)) {
2242 /* BE8 mode not implemented. */
2250 /* if not always execute, we generate a conditional jump to
2252 s
->condlabel
= gen_new_label();
2253 gen_test_cc
[cond
^ 1](s
->condlabel
);
2255 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2256 //s->is_jmp = DISAS_JUMP_NEXT;
2258 if ((insn
& 0x0f900000) == 0x03000000) {
2259 if ((insn
& 0x0fb0f000) != 0x0320f000)
2261 /* CPSR = immediate */
2263 shift
= ((insn
>> 8) & 0xf) * 2;
2265 val
= (val
>> shift
) | (val
<< (32 - shift
));
2266 gen_op_movl_T0_im(val
);
2267 i
= ((insn
& (1 << 22)) != 0);
2268 if (gen_set_psr_T0(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
))
2270 } else if ((insn
& 0x0f900000) == 0x01000000
2271 && (insn
& 0x00000090) != 0x00000090) {
2272 /* miscellaneous instructions */
2273 op1
= (insn
>> 21) & 3;
2274 sh
= (insn
>> 4) & 0xf;
2277 case 0x0: /* move program status register */
2280 gen_movl_T0_reg(s
, rm
);
2281 i
= ((op1
& 2) != 0);
2282 if (gen_set_psr_T0(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
))
2286 rd
= (insn
>> 12) & 0xf;
2290 gen_op_movl_T0_spsr();
2292 gen_op_movl_T0_cpsr();
2294 gen_movl_reg_T0(s
, rd
);
2299 /* branch/exchange thumb (bx). */
2300 gen_movl_T0_reg(s
, rm
);
2302 } else if (op1
== 3) {
2304 rd
= (insn
>> 12) & 0xf;
2305 gen_movl_T0_reg(s
, rm
);
2307 gen_movl_reg_T0(s
, rd
);
2315 /* Trivial implementation equivalent to bx. */
2316 gen_movl_T0_reg(s
, rm
);
2326 /* branch link/exchange thumb (blx) */
2327 val
= (uint32_t)s
->pc
;
2328 gen_op_movl_T0_im(val
);
2329 gen_movl_reg_T0(s
, 14);
2330 gen_movl_T0_reg(s
, rm
);
2333 case 0x5: /* saturating add/subtract */
2334 rd
= (insn
>> 12) & 0xf;
2335 rn
= (insn
>> 16) & 0xf;
2336 gen_movl_T0_reg(s
, rm
);
2337 gen_movl_T1_reg(s
, rn
);
2339 gen_op_double_T1_saturate();
2341 gen_op_subl_T0_T1_saturate();
2343 gen_op_addl_T0_T1_saturate();
2344 gen_movl_reg_T0(s
, rd
);
2347 gen_op_movl_T0_im((long)s
->pc
- 4);
2348 gen_op_movl_reg_TN
[0][15]();
2350 s
->is_jmp
= DISAS_JUMP
;
2352 case 0x8: /* signed multiply */
2356 rs
= (insn
>> 8) & 0xf;
2357 rn
= (insn
>> 12) & 0xf;
2358 rd
= (insn
>> 16) & 0xf;
2360 /* (32 * 16) >> 16 */
2361 gen_movl_T0_reg(s
, rm
);
2362 gen_movl_T1_reg(s
, rs
);
2364 gen_op_sarl_T1_im(16);
2367 gen_op_imulw_T0_T1();
2368 if ((sh
& 2) == 0) {
2369 gen_movl_T1_reg(s
, rn
);
2370 gen_op_addl_T0_T1_setq();
2372 gen_movl_reg_T0(s
, rd
);
2375 gen_movl_T0_reg(s
, rm
);
2376 gen_movl_T1_reg(s
, rs
);
2377 gen_mulxy(sh
& 2, sh
& 4);
2379 gen_op_signbit_T1_T0();
2380 gen_op_addq_T0_T1(rn
, rd
);
2381 gen_movl_reg_T0(s
, rn
);
2382 gen_movl_reg_T1(s
, rd
);
2385 gen_movl_T1_reg(s
, rn
);
2386 gen_op_addl_T0_T1_setq();
2388 gen_movl_reg_T0(s
, rd
);
2395 } else if (((insn
& 0x0e000000) == 0 &&
2396 (insn
& 0x00000090) != 0x90) ||
2397 ((insn
& 0x0e000000) == (1 << 25))) {
2398 int set_cc
, logic_cc
, shiftop
;
2400 op1
= (insn
>> 21) & 0xf;
2401 set_cc
= (insn
>> 20) & 1;
2402 logic_cc
= table_logic_cc
[op1
] & set_cc
;
2404 /* data processing instruction */
2405 if (insn
& (1 << 25)) {
2406 /* immediate operand */
2408 shift
= ((insn
>> 8) & 0xf) * 2;
2410 val
= (val
>> shift
) | (val
<< (32 - shift
));
2411 gen_op_movl_T1_im(val
);
2412 if (logic_cc
&& shift
)
2417 gen_movl_T1_reg(s
, rm
);
2418 shiftop
= (insn
>> 5) & 3;
2419 if (!(insn
& (1 << 4))) {
2420 shift
= (insn
>> 7) & 0x1f;
2423 gen_shift_T1_im_cc
[shiftop
](shift
);
2425 gen_shift_T1_im
[shiftop
](shift
);
2427 } else if (shiftop
!= 0) {
2429 gen_shift_T1_0_cc
[shiftop
]();
2431 gen_shift_T1_0
[shiftop
]();
2435 rs
= (insn
>> 8) & 0xf;
2436 gen_movl_T0_reg(s
, rs
);
2438 gen_shift_T1_T0_cc
[shiftop
]();
2440 gen_shift_T1_T0
[shiftop
]();
2444 if (op1
!= 0x0f && op1
!= 0x0d) {
2445 rn
= (insn
>> 16) & 0xf;
2446 gen_movl_T0_reg(s
, rn
);
2448 rd
= (insn
>> 12) & 0xf;
2451 gen_op_andl_T0_T1();
2452 gen_movl_reg_T0(s
, rd
);
2454 gen_op_logic_T0_cc();
2457 gen_op_xorl_T0_T1();
2458 gen_movl_reg_T0(s
, rd
);
2460 gen_op_logic_T0_cc();
2463 if (set_cc
&& rd
== 15) {
2464 /* SUBS r15, ... is used for exception return. */
2467 gen_op_subl_T0_T1_cc();
2468 gen_exception_return(s
);
2471 gen_op_subl_T0_T1_cc();
2473 gen_op_subl_T0_T1();
2474 gen_movl_reg_T0(s
, rd
);
2479 gen_op_rsbl_T0_T1_cc();
2481 gen_op_rsbl_T0_T1();
2482 gen_movl_reg_T0(s
, rd
);
2486 gen_op_addl_T0_T1_cc();
2488 gen_op_addl_T0_T1();
2489 gen_movl_reg_T0(s
, rd
);
2493 gen_op_adcl_T0_T1_cc();
2495 gen_op_adcl_T0_T1();
2496 gen_movl_reg_T0(s
, rd
);
2500 gen_op_sbcl_T0_T1_cc();
2502 gen_op_sbcl_T0_T1();
2503 gen_movl_reg_T0(s
, rd
);
2507 gen_op_rscl_T0_T1_cc();
2509 gen_op_rscl_T0_T1();
2510 gen_movl_reg_T0(s
, rd
);
2514 gen_op_andl_T0_T1();
2515 gen_op_logic_T0_cc();
2520 gen_op_xorl_T0_T1();
2521 gen_op_logic_T0_cc();
2526 gen_op_subl_T0_T1_cc();
2531 gen_op_addl_T0_T1_cc();
2536 gen_movl_reg_T0(s
, rd
);
2538 gen_op_logic_T0_cc();
2541 if (logic_cc
&& rd
== 15) {
2542 /* MOVS r15, ... is used for exception return. */
2545 gen_op_movl_T0_T1();
2546 gen_exception_return(s
);
2548 gen_movl_reg_T1(s
, rd
);
2550 gen_op_logic_T1_cc();
2554 gen_op_bicl_T0_T1();
2555 gen_movl_reg_T0(s
, rd
);
2557 gen_op_logic_T0_cc();
2562 gen_movl_reg_T1(s
, rd
);
2564 gen_op_logic_T1_cc();
2568 /* other instructions */
2569 op1
= (insn
>> 24) & 0xf;
2573 /* multiplies, extra load/stores */
2574 sh
= (insn
>> 5) & 3;
2577 rd
= (insn
>> 16) & 0xf;
2578 rn
= (insn
>> 12) & 0xf;
2579 rs
= (insn
>> 8) & 0xf;
2581 if (((insn
>> 22) & 3) == 0) {
2583 gen_movl_T0_reg(s
, rs
);
2584 gen_movl_T1_reg(s
, rm
);
2586 if (insn
& (1 << 21)) {
2587 gen_movl_T1_reg(s
, rn
);
2588 gen_op_addl_T0_T1();
2590 if (insn
& (1 << 20))
2591 gen_op_logic_T0_cc();
2592 gen_movl_reg_T0(s
, rd
);
2595 gen_movl_T0_reg(s
, rs
);
2596 gen_movl_T1_reg(s
, rm
);
2597 if (insn
& (1 << 22))
2598 gen_op_imull_T0_T1();
2600 gen_op_mull_T0_T1();
2601 if (insn
& (1 << 21)) /* mult accumulate */
2602 gen_op_addq_T0_T1(rn
, rd
);
2603 if (!(insn
& (1 << 23))) { /* double accumulate */
2605 gen_op_addq_lo_T0_T1(rn
);
2606 gen_op_addq_lo_T0_T1(rd
);
2608 if (insn
& (1 << 20))
2610 gen_movl_reg_T0(s
, rn
);
2611 gen_movl_reg_T1(s
, rd
);
2614 rn
= (insn
>> 16) & 0xf;
2615 rd
= (insn
>> 12) & 0xf;
2616 if (insn
& (1 << 23)) {
2617 /* load/store exclusive */
2620 /* SWP instruction */
2623 gen_movl_T0_reg(s
, rm
);
2624 gen_movl_T1_reg(s
, rn
);
2625 if (insn
& (1 << 22)) {
2630 gen_movl_reg_T0(s
, rd
);
2636 /* Misc load/store */
2637 rn
= (insn
>> 16) & 0xf;
2638 rd
= (insn
>> 12) & 0xf;
2639 gen_movl_T1_reg(s
, rn
);
2640 if (insn
& (1 << 24))
2641 gen_add_datah_offset(s
, insn
, 0);
2643 if (insn
& (1 << 20)) {
2658 } else if (sh
& 2) {
2662 gen_movl_T0_reg(s
, rd
);
2664 gen_op_addl_T1_im(4);
2665 gen_movl_T0_reg(s
, rd
+ 1);
2671 gen_movl_reg_T0(s
, rd
);
2672 gen_op_addl_T1_im(4);
2677 address_offset
= -4;
2680 gen_movl_T0_reg(s
, rd
);
2684 /* Perform base writeback before the loaded value to
2685 ensure correct behavior with overlapping index registers.
2686 ldrd with base writeback is is undefined if the
2687 destination and index registers overlap. */
2688 if (!(insn
& (1 << 24))) {
2689 gen_add_datah_offset(s
, insn
, address_offset
);
2690 gen_movl_reg_T1(s
, rn
);
2691 } else if (insn
& (1 << 21)) {
2693 gen_op_addl_T1_im(address_offset
);
2694 gen_movl_reg_T1(s
, rn
);
2697 /* Complete the load. */
2698 gen_movl_reg_T0(s
, rd
);
2706 /* Check for undefined extension instructions
2707 * per the ARM Bible IE:
2708 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
2710 sh
= (0xf << 20) | (0xf << 4);
2711 if (op1
== 0x7 && ((insn
& sh
) == sh
))
2715 /* load/store byte/word */
2716 rn
= (insn
>> 16) & 0xf;
2717 rd
= (insn
>> 12) & 0xf;
2718 gen_movl_T1_reg(s
, rn
);
2719 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
2720 if (insn
& (1 << 24))
2721 gen_add_data_offset(s
, insn
);
2722 if (insn
& (1 << 20)) {
2725 #if defined(CONFIG_USER_ONLY)
2726 if (insn
& (1 << 22))
2731 if (insn
& (1 << 22)) {
2735 gen_op_ldub_kernel();
2740 gen_op_ldl_kernel();
2745 gen_movl_T0_reg(s
, rd
);
2746 #if defined(CONFIG_USER_ONLY)
2747 if (insn
& (1 << 22))
2752 if (insn
& (1 << 22)) {
2756 gen_op_stb_kernel();
2761 gen_op_stl_kernel();
2765 if (!(insn
& (1 << 24))) {
2766 gen_add_data_offset(s
, insn
);
2767 gen_movl_reg_T1(s
, rn
);
2768 } else if (insn
& (1 << 21))
2769 gen_movl_reg_T1(s
, rn
); {
2771 if (insn
& (1 << 20)) {
2772 /* Complete the load. */
2776 gen_movl_reg_T0(s
, rd
);
2782 int j
, n
, user
, loaded_base
;
2783 /* load/store multiple words */
2784 /* XXX: store correct base if write back */
2786 if (insn
& (1 << 22)) {
2788 goto illegal_op
; /* only usable in supervisor mode */
2790 if ((insn
& (1 << 15)) == 0)
2793 rn
= (insn
>> 16) & 0xf;
2794 gen_movl_T1_reg(s
, rn
);
2796 /* compute total size */
2800 if (insn
& (1 << i
))
2803 /* XXX: test invalid n == 0 case ? */
2804 if (insn
& (1 << 23)) {
2805 if (insn
& (1 << 24)) {
2807 gen_op_addl_T1_im(4);
2809 /* post increment */
2812 if (insn
& (1 << 24)) {
2814 gen_op_addl_T1_im(-(n
* 4));
2816 /* post decrement */
2818 gen_op_addl_T1_im(-((n
- 1) * 4));
2823 if (insn
& (1 << i
)) {
2824 if (insn
& (1 << 20)) {
2830 gen_op_movl_user_T0(i
);
2831 } else if (i
== rn
) {
2832 gen_op_movl_T2_T0();
2835 gen_movl_reg_T0(s
, i
);
2840 /* special case: r15 = PC + 12 */
2841 val
= (long)s
->pc
+ 8;
2842 gen_op_movl_TN_im
[0](val
);
2844 gen_op_movl_T0_user(i
);
2846 gen_movl_T0_reg(s
, i
);
2851 /* no need to add after the last transfer */
2853 gen_op_addl_T1_im(4);
2856 if (insn
& (1 << 21)) {
2858 if (insn
& (1 << 23)) {
2859 if (insn
& (1 << 24)) {
2862 /* post increment */
2863 gen_op_addl_T1_im(4);
2866 if (insn
& (1 << 24)) {
2869 gen_op_addl_T1_im(-((n
- 1) * 4));
2871 /* post decrement */
2872 gen_op_addl_T1_im(-(n
* 4));
2875 gen_movl_reg_T1(s
, rn
);
2878 gen_op_movl_T0_T2();
2879 gen_movl_reg_T0(s
, rn
);
2881 if ((insn
& (1 << 22)) && !user
) {
2882 /* Restore CPSR from SPSR. */
2883 gen_op_movl_T0_spsr();
2884 gen_op_movl_cpsr_T0(0xffffffff);
2885 s
->is_jmp
= DISAS_UPDATE
;
2894 /* branch (and link) */
2895 val
= (int32_t)s
->pc
;
2896 if (insn
& (1 << 24)) {
2897 gen_op_movl_T0_im(val
);
2898 gen_op_movl_reg_TN
[0][14]();
2900 offset
= (((int32_t)insn
<< 8) >> 8);
2901 val
+= (offset
<< 2) + 4;
2909 op1
= (insn
>> 8) & 0xf;
2910 if (arm_feature(env
, ARM_FEATURE_XSCALE
) &&
2911 ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << op1
)))
2915 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
2916 if (disas_iwmmxt_insn(env
, s
, insn
))
2918 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
2919 if (disas_dsp_insn(env
, s
, insn
))
2926 if (disas_cp_insn (env
, s
, insn
))
2931 if (disas_vfp_insn (env
, s
, insn
))
2935 if (disas_cp15_insn (env
, s
, insn
))
2939 /* unknown coprocessor. */
2945 gen_op_movl_T0_im((long)s
->pc
);
2946 gen_op_movl_reg_TN
[0][15]();
2948 s
->is_jmp
= DISAS_JUMP
;
2952 gen_op_movl_T0_im((long)s
->pc
- 4);
2953 gen_op_movl_reg_TN
[0][15]();
2954 gen_op_undef_insn();
2955 s
->is_jmp
= DISAS_JUMP
;
2961 static void disas_thumb_insn(DisasContext
*s
)
2963 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
2967 insn
= lduw_code(s
->pc
);
2970 switch (insn
>> 12) {
2973 op
= (insn
>> 11) & 3;
2976 rn
= (insn
>> 3) & 7;
2977 gen_movl_T0_reg(s
, rn
);
2978 if (insn
& (1 << 10)) {
2980 gen_op_movl_T1_im((insn
>> 6) & 7);
2983 rm
= (insn
>> 6) & 7;
2984 gen_movl_T1_reg(s
, rm
);
2986 if (insn
& (1 << 9))
2987 gen_op_subl_T0_T1_cc();
2989 gen_op_addl_T0_T1_cc();
2990 gen_movl_reg_T0(s
, rd
);
2992 /* shift immediate */
2993 rm
= (insn
>> 3) & 7;
2994 shift
= (insn
>> 6) & 0x1f;
2995 gen_movl_T0_reg(s
, rm
);
2996 gen_shift_T0_im_thumb
[op
](shift
);
2997 gen_movl_reg_T0(s
, rd
);
3001 /* arithmetic large immediate */
3002 op
= (insn
>> 11) & 3;
3003 rd
= (insn
>> 8) & 0x7;
3005 gen_op_movl_T0_im(insn
& 0xff);
3007 gen_movl_T0_reg(s
, rd
);
3008 gen_op_movl_T1_im(insn
& 0xff);
3012 gen_op_logic_T0_cc();
3015 gen_op_subl_T0_T1_cc();
3018 gen_op_addl_T0_T1_cc();
3021 gen_op_subl_T0_T1_cc();
3025 gen_movl_reg_T0(s
, rd
);
3028 if (insn
& (1 << 11)) {
3029 rd
= (insn
>> 8) & 7;
3030 /* load pc-relative. Bit 1 of PC is ignored. */
3031 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
3032 val
&= ~(uint32_t)2;
3033 gen_op_movl_T1_im(val
);
3035 gen_movl_reg_T0(s
, rd
);
3038 if (insn
& (1 << 10)) {
3039 /* data processing extended or blx */
3040 rd
= (insn
& 7) | ((insn
>> 4) & 8);
3041 rm
= (insn
>> 3) & 0xf;
3042 op
= (insn
>> 8) & 3;
3045 gen_movl_T0_reg(s
, rd
);
3046 gen_movl_T1_reg(s
, rm
);
3047 gen_op_addl_T0_T1();
3048 gen_movl_reg_T0(s
, rd
);
3051 gen_movl_T0_reg(s
, rd
);
3052 gen_movl_T1_reg(s
, rm
);
3053 gen_op_subl_T0_T1_cc();
3055 case 2: /* mov/cpy */
3056 gen_movl_T0_reg(s
, rm
);
3057 gen_movl_reg_T0(s
, rd
);
3059 case 3:/* branch [and link] exchange thumb register */
3060 if (insn
& (1 << 7)) {
3061 val
= (uint32_t)s
->pc
| 1;
3062 gen_op_movl_T1_im(val
);
3063 gen_movl_reg_T1(s
, 14);
3065 gen_movl_T0_reg(s
, rm
);
3072 /* data processing register */
3074 rm
= (insn
>> 3) & 7;
3075 op
= (insn
>> 6) & 0xf;
3076 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
3077 /* the shift/rotate ops want the operands backwards */
3086 if (op
== 9) /* neg */
3087 gen_op_movl_T0_im(0);
3088 else if (op
!= 0xf) /* mvn doesn't read its first operand */
3089 gen_movl_T0_reg(s
, rd
);
3091 gen_movl_T1_reg(s
, rm
);
3094 gen_op_andl_T0_T1();
3095 gen_op_logic_T0_cc();
3098 gen_op_xorl_T0_T1();
3099 gen_op_logic_T0_cc();
3102 gen_op_shll_T1_T0_cc();
3103 gen_op_logic_T1_cc();
3106 gen_op_shrl_T1_T0_cc();
3107 gen_op_logic_T1_cc();
3110 gen_op_sarl_T1_T0_cc();
3111 gen_op_logic_T1_cc();
3114 gen_op_adcl_T0_T1_cc();
3117 gen_op_sbcl_T0_T1_cc();
3120 gen_op_rorl_T1_T0_cc();
3121 gen_op_logic_T1_cc();
3124 gen_op_andl_T0_T1();
3125 gen_op_logic_T0_cc();
3129 gen_op_subl_T0_T1_cc();
3132 gen_op_subl_T0_T1_cc();
3136 gen_op_addl_T0_T1_cc();
3141 gen_op_logic_T0_cc();
3144 gen_op_mull_T0_T1();
3145 gen_op_logic_T0_cc();
3148 gen_op_bicl_T0_T1();
3149 gen_op_logic_T0_cc();
3153 gen_op_logic_T1_cc();
3160 gen_movl_reg_T1(s
, rm
);
3162 gen_movl_reg_T0(s
, rd
);
3167 /* load/store register offset. */
3169 rn
= (insn
>> 3) & 7;
3170 rm
= (insn
>> 6) & 7;
3171 op
= (insn
>> 9) & 7;
3172 gen_movl_T1_reg(s
, rn
);
3173 gen_movl_T2_reg(s
, rm
);
3174 gen_op_addl_T1_T2();
3176 if (op
< 3) /* store */
3177 gen_movl_T0_reg(s
, rd
);
3205 if (op
>= 3) /* load */
3206 gen_movl_reg_T0(s
, rd
);
3210 /* load/store word immediate offset */
3212 rn
= (insn
>> 3) & 7;
3213 gen_movl_T1_reg(s
, rn
);
3214 val
= (insn
>> 4) & 0x7c;
3215 gen_op_movl_T2_im(val
);
3216 gen_op_addl_T1_T2();
3218 if (insn
& (1 << 11)) {
3221 gen_movl_reg_T0(s
, rd
);
3224 gen_movl_T0_reg(s
, rd
);
3230 /* load/store byte immediate offset */
3232 rn
= (insn
>> 3) & 7;
3233 gen_movl_T1_reg(s
, rn
);
3234 val
= (insn
>> 6) & 0x1f;
3235 gen_op_movl_T2_im(val
);
3236 gen_op_addl_T1_T2();
3238 if (insn
& (1 << 11)) {
3241 gen_movl_reg_T0(s
, rd
);
3244 gen_movl_T0_reg(s
, rd
);
3250 /* load/store halfword immediate offset */
3252 rn
= (insn
>> 3) & 7;
3253 gen_movl_T1_reg(s
, rn
);
3254 val
= (insn
>> 5) & 0x3e;
3255 gen_op_movl_T2_im(val
);
3256 gen_op_addl_T1_T2();
3258 if (insn
& (1 << 11)) {
3261 gen_movl_reg_T0(s
, rd
);
3264 gen_movl_T0_reg(s
, rd
);
3270 /* load/store from stack */
3271 rd
= (insn
>> 8) & 7;
3272 gen_movl_T1_reg(s
, 13);
3273 val
= (insn
& 0xff) * 4;
3274 gen_op_movl_T2_im(val
);
3275 gen_op_addl_T1_T2();
3277 if (insn
& (1 << 11)) {
3280 gen_movl_reg_T0(s
, rd
);
3283 gen_movl_T0_reg(s
, rd
);
3289 /* add to high reg */
3290 rd
= (insn
>> 8) & 7;
3291 if (insn
& (1 << 11)) {
3293 gen_movl_T0_reg(s
, 13);
3295 /* PC. bit 1 is ignored. */
3296 gen_op_movl_T0_im((s
->pc
+ 2) & ~(uint32_t)2);
3298 val
= (insn
& 0xff) * 4;
3299 gen_op_movl_T1_im(val
);
3300 gen_op_addl_T0_T1();
3301 gen_movl_reg_T0(s
, rd
);
3306 op
= (insn
>> 8) & 0xf;
3309 /* adjust stack pointer */
3310 gen_movl_T1_reg(s
, 13);
3311 val
= (insn
& 0x7f) * 4;
3312 if (insn
& (1 << 7))
3313 val
= -(int32_t)val
;
3314 gen_op_movl_T2_im(val
);
3315 gen_op_addl_T1_T2();
3316 gen_movl_reg_T1(s
, 13);
3319 case 4: case 5: case 0xc: case 0xd:
3321 gen_movl_T1_reg(s
, 13);
3322 if (insn
& (1 << 8))
3326 for (i
= 0; i
< 8; i
++) {
3327 if (insn
& (1 << i
))
3330 if ((insn
& (1 << 11)) == 0) {
3331 gen_op_movl_T2_im(-offset
);
3332 gen_op_addl_T1_T2();
3334 gen_op_movl_T2_im(4);
3335 for (i
= 0; i
< 8; i
++) {
3336 if (insn
& (1 << i
)) {
3337 if (insn
& (1 << 11)) {
3340 gen_movl_reg_T0(s
, i
);
3343 gen_movl_T0_reg(s
, i
);
3346 /* advance to the next address. */
3347 gen_op_addl_T1_T2();
3350 if (insn
& (1 << 8)) {
3351 if (insn
& (1 << 11)) {
3354 /* don't set the pc until the rest of the instruction
3358 gen_movl_T0_reg(s
, 14);
3361 gen_op_addl_T1_T2();
3363 if ((insn
& (1 << 11)) == 0) {
3364 gen_op_movl_T2_im(-offset
);
3365 gen_op_addl_T1_T2();
3367 /* write back the new stack pointer */
3368 gen_movl_reg_T1(s
, 13);
3369 /* set the new PC value */
3370 if ((insn
& 0x0900) == 0x0900)
3374 case 0xe: /* bkpt */
3375 gen_op_movl_T0_im((long)s
->pc
- 2);
3376 gen_op_movl_reg_TN
[0][15]();
3378 s
->is_jmp
= DISAS_JUMP
;
3387 /* load/store multiple */
3388 rn
= (insn
>> 8) & 0x7;
3389 gen_movl_T1_reg(s
, rn
);
3390 gen_op_movl_T2_im(4);
3391 for (i
= 0; i
< 8; i
++) {
3392 if (insn
& (1 << i
)) {
3393 if (insn
& (1 << 11)) {
3396 gen_movl_reg_T0(s
, i
);
3399 gen_movl_T0_reg(s
, i
);
3402 /* advance to the next address */
3403 gen_op_addl_T1_T2();
3406 /* Base register writeback. */
3407 if ((insn
& (1 << rn
)) == 0)
3408 gen_movl_reg_T1(s
, rn
);
3412 /* conditional branch or swi */
3413 cond
= (insn
>> 8) & 0xf;
3419 gen_op_movl_T0_im((long)s
->pc
| 1);
3420 /* Don't set r15. */
3421 gen_op_movl_reg_TN
[0][15]();
3423 s
->is_jmp
= DISAS_JUMP
;
3426 /* generate a conditional jump to next instruction */
3427 s
->condlabel
= gen_new_label();
3428 gen_test_cc
[cond
^ 1](s
->condlabel
);
3430 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
3431 //s->is_jmp = DISAS_JUMP_NEXT;
3432 gen_movl_T1_reg(s
, 15);
3434 /* jump to the offset */
3435 val
= (uint32_t)s
->pc
+ 2;
3436 offset
= ((int32_t)insn
<< 24) >> 24;
3442 /* unconditional branch */
3443 if (insn
& (1 << 11)) {
3444 /* Second half of blx. */
3445 offset
= ((insn
& 0x7ff) << 1);
3446 gen_movl_T0_reg(s
, 14);
3447 gen_op_movl_T1_im(offset
);
3448 gen_op_addl_T0_T1();
3449 gen_op_movl_T1_im(0xfffffffc);
3450 gen_op_andl_T0_T1();
3452 val
= (uint32_t)s
->pc
;
3453 gen_op_movl_T1_im(val
| 1);
3454 gen_movl_reg_T1(s
, 14);
3458 val
= (uint32_t)s
->pc
;
3459 offset
= ((int32_t)insn
<< 21) >> 21;
3460 val
+= (offset
<< 1) + 2;
3465 /* branch and link [and switch to arm] */
3466 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
3467 /* Instruction spans a page boundary. Implement it as two
3468 16-bit instructions in case the second half causes an
3470 offset
= ((int32_t)insn
<< 21) >> 9;
3471 val
= s
->pc
+ 2 + offset
;
3472 gen_op_movl_T0_im(val
);
3473 gen_movl_reg_T0(s
, 14);
3476 if (insn
& (1 << 11)) {
3477 /* Second half of bl. */
3478 offset
= ((insn
& 0x7ff) << 1) | 1;
3479 gen_movl_T0_reg(s
, 14);
3480 gen_op_movl_T1_im(offset
);
3481 gen_op_addl_T0_T1();
3483 val
= (uint32_t)s
->pc
;
3484 gen_op_movl_T1_im(val
| 1);
3485 gen_movl_reg_T1(s
, 14);
3489 offset
= ((int32_t)insn
<< 21) >> 10;
3490 insn
= lduw_code(s
->pc
);
3491 offset
|= insn
& 0x7ff;
3493 val
= (uint32_t)s
->pc
+ 2;
3494 gen_op_movl_T1_im(val
| 1);
3495 gen_movl_reg_T1(s
, 14);
3498 if (insn
& (1 << 12)) {
3503 val
&= ~(uint32_t)2;
3504 gen_op_movl_T0_im(val
);
3510 gen_op_movl_T0_im((long)s
->pc
- 2);
3511 gen_op_movl_reg_TN
[0][15]();
3512 gen_op_undef_insn();
3513 s
->is_jmp
= DISAS_JUMP
;
3516 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
3517 basic block 'tb'. If search_pc is TRUE, also generate PC
3518 information for each intermediate instruction. */
3519 static inline int gen_intermediate_code_internal(CPUState
*env
,
3520 TranslationBlock
*tb
,
3523 DisasContext dc1
, *dc
= &dc1
;
3524 uint16_t *gen_opc_end
;
3526 target_ulong pc_start
;
3527 uint32_t next_page_start
;
3529 /* generate intermediate code */
3534 gen_opc_ptr
= gen_opc_buf
;
3535 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
3536 gen_opparam_ptr
= gen_opparam_buf
;
3538 dc
->is_jmp
= DISAS_NEXT
;
3540 dc
->singlestep_enabled
= env
->singlestep_enabled
;
3542 dc
->thumb
= env
->thumb
;
3544 #if !defined(CONFIG_USER_ONLY)
3545 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
3547 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
3551 if (env
->nb_breakpoints
> 0) {
3552 for(j
= 0; j
< env
->nb_breakpoints
; j
++) {
3553 if (env
->breakpoints
[j
] == dc
->pc
) {
3554 gen_op_movl_T0_im((long)dc
->pc
);
3555 gen_op_movl_reg_TN
[0][15]();
3557 dc
->is_jmp
= DISAS_JUMP
;
3563 j
= gen_opc_ptr
- gen_opc_buf
;
3567 gen_opc_instr_start
[lj
++] = 0;
3569 gen_opc_pc
[lj
] = dc
->pc
;
3570 gen_opc_instr_start
[lj
] = 1;
3574 disas_thumb_insn(dc
);
3576 disas_arm_insn(env
, dc
);
3578 if (dc
->condjmp
&& !dc
->is_jmp
) {
3579 gen_set_label(dc
->condlabel
);
3582 /* Terminate the TB on memory ops if watchpoints are present. */
3583 /* FIXME: This should be replacd by the deterministic execution
3584 * IRQ raising bits. */
3585 if (dc
->is_mem
&& env
->nb_watchpoints
)
3588 /* Translation stops when a conditional branch is enoutered.
3589 * Otherwise the subsequent code could get translated several times.
3590 * Also stop translation when a page boundary is reached. This
3591 * ensures prefech aborts occur at the right place. */
3592 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
3593 !env
->singlestep_enabled
&&
3594 dc
->pc
< next_page_start
);
3595 /* At this stage dc->condjmp will only be set when the skipped
3596 * instruction was a conditional branch, and the PC has already been
3598 if (__builtin_expect(env
->singlestep_enabled
, 0)) {
3599 /* Make sure the pc is updated, and raise a debug exception. */
3602 gen_set_label(dc
->condlabel
);
3604 if (dc
->condjmp
|| !dc
->is_jmp
) {
3605 gen_op_movl_T0_im((long)dc
->pc
);
3606 gen_op_movl_reg_TN
[0][15]();
3611 switch(dc
->is_jmp
) {
3613 gen_goto_tb(dc
, 1, dc
->pc
);
3618 /* indicate that the hash table must be used to find the next TB */
3623 /* nothing more to generate */
3627 gen_set_label(dc
->condlabel
);
3628 gen_goto_tb(dc
, 1, dc
->pc
);
3632 *gen_opc_ptr
= INDEX_op_end
;
3635 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
3636 fprintf(logfile
, "----------------\n");
3637 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc_start
));
3638 target_disas(logfile
, pc_start
, dc
->pc
- pc_start
, env
->thumb
);
3639 fprintf(logfile
, "\n");
3640 if (loglevel
& (CPU_LOG_TB_OP
)) {
3641 fprintf(logfile
, "OP:\n");
3642 dump_ops(gen_opc_buf
, gen_opparam_buf
);
3643 fprintf(logfile
, "\n");
3648 j
= gen_opc_ptr
- gen_opc_buf
;
3651 gen_opc_instr_start
[lj
++] = 0;
3654 tb
->size
= dc
->pc
- pc_start
;
3659 int gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
3661 return gen_intermediate_code_internal(env
, tb
, 0);
3664 int gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
3666 return gen_intermediate_code_internal(env
, tb
, 1);
3669 static const char *cpu_mode_names
[16] = {
3670 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
3671 "???", "???", "???", "und", "???", "???", "???", "sys"
3673 void cpu_dump_state(CPUState
*env
, FILE *f
,
3674 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
3683 /* ??? This assumes float64 and double have the same layout.
3684 Oh well, it's only debug dumps. */
3692 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
3694 cpu_fprintf(f
, "\n");
3696 cpu_fprintf(f
, " ");
3698 psr
= cpsr_read(env
);
3699 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
3701 psr
& (1 << 31) ? 'N' : '-',
3702 psr
& (1 << 30) ? 'Z' : '-',
3703 psr
& (1 << 29) ? 'C' : '-',
3704 psr
& (1 << 28) ? 'V' : '-',
3705 psr
& CPSR_T
? 'T' : 'A',
3706 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
3708 for (i
= 0; i
< 16; i
++) {
3709 d
.d
= env
->vfp
.regs
[i
];
3713 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
3714 i
* 2, (int)s0
.i
, s0
.s
,
3715 i
* 2 + 1, (int)s1
.i
, s1
.s
,
3716 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
3719 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);