4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #define ENABLE_ARCH_5J 0
32 #define ENABLE_ARCH_6 1
33 #define ENABLE_ARCH_6T2 1
35 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
37 /* internal defines */
38 typedef struct DisasContext
{
41 /* Nonzero if this instruction has been conditionally skipped. */
43 /* The label that will be jumped to when the instruction is skipped. */
45 struct TranslationBlock
*tb
;
46 int singlestep_enabled
;
48 #if !defined(CONFIG_USER_ONLY)
53 #if defined(CONFIG_USER_ONLY)
56 #define IS_USER(s) (s->user)
59 #define DISAS_JUMP_NEXT 4
61 #ifdef USE_DIRECT_JUMP
64 #define TBPARAM(x) (long)(x)
67 /* XXX: move that elsewhere */
68 static uint16_t *gen_opc_ptr
;
69 static uint32_t *gen_opparam_ptr
;
74 #define DEF(s, n, copy_size) INDEX_op_ ## s,
82 static GenOpFunc1
*gen_test_cc
[14] = {
99 const uint8_t table_logic_cc
[16] = {
118 static GenOpFunc1
*gen_shift_T1_im
[4] = {
125 static GenOpFunc
*gen_shift_T1_0
[4] = {
132 static GenOpFunc1
*gen_shift_T2_im
[4] = {
139 static GenOpFunc
*gen_shift_T2_0
[4] = {
146 static GenOpFunc1
*gen_shift_T1_im_cc
[4] = {
147 gen_op_shll_T1_im_cc
,
148 gen_op_shrl_T1_im_cc
,
149 gen_op_sarl_T1_im_cc
,
150 gen_op_rorl_T1_im_cc
,
153 static GenOpFunc
*gen_shift_T1_0_cc
[4] = {
160 static GenOpFunc
*gen_shift_T1_T0
[4] = {
167 static GenOpFunc
*gen_shift_T1_T0_cc
[4] = {
168 gen_op_shll_T1_T0_cc
,
169 gen_op_shrl_T1_T0_cc
,
170 gen_op_sarl_T1_T0_cc
,
171 gen_op_rorl_T1_T0_cc
,
174 static GenOpFunc
*gen_op_movl_TN_reg
[3][16] = {
231 static GenOpFunc
*gen_op_movl_reg_TN
[2][16] = {
270 static GenOpFunc1
*gen_op_movl_TN_im
[3] = {
276 static GenOpFunc1
*gen_shift_T0_im_thumb
[3] = {
277 gen_op_shll_T0_im_thumb
,
278 gen_op_shrl_T0_im_thumb
,
279 gen_op_sarl_T0_im_thumb
,
282 static inline void gen_bx(DisasContext
*s
)
284 s
->is_jmp
= DISAS_UPDATE
;
289 #if defined(CONFIG_USER_ONLY)
290 #define gen_ldst(name, s) gen_op_##name##_raw()
292 #define gen_ldst(name, s) do { \
294 gen_op_##name##_user(); \
296 gen_op_##name##_kernel(); \
300 static inline void gen_movl_TN_reg(DisasContext
*s
, int reg
, int t
)
305 /* normaly, since we updated PC, we need only to add one insn */
307 val
= (long)s
->pc
+ 2;
309 val
= (long)s
->pc
+ 4;
310 gen_op_movl_TN_im
[t
](val
);
312 gen_op_movl_TN_reg
[t
][reg
]();
316 static inline void gen_movl_T0_reg(DisasContext
*s
, int reg
)
318 gen_movl_TN_reg(s
, reg
, 0);
321 static inline void gen_movl_T1_reg(DisasContext
*s
, int reg
)
323 gen_movl_TN_reg(s
, reg
, 1);
326 static inline void gen_movl_T2_reg(DisasContext
*s
, int reg
)
328 gen_movl_TN_reg(s
, reg
, 2);
331 static inline void gen_movl_reg_TN(DisasContext
*s
, int reg
, int t
)
333 gen_op_movl_reg_TN
[t
][reg
]();
335 s
->is_jmp
= DISAS_JUMP
;
339 static inline void gen_movl_reg_T0(DisasContext
*s
, int reg
)
341 gen_movl_reg_TN(s
, reg
, 0);
344 static inline void gen_movl_reg_T1(DisasContext
*s
, int reg
)
346 gen_movl_reg_TN(s
, reg
, 1);
349 /* Force a TB lookup after an instruction that changes the CPU state. */
350 static inline void gen_lookup_tb(DisasContext
*s
)
352 gen_op_movl_T0_im(s
->pc
);
353 gen_movl_reg_T0(s
, 15);
354 s
->is_jmp
= DISAS_UPDATE
;
357 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
)
359 int val
, rm
, shift
, shiftop
;
361 if (!(insn
& (1 << 25))) {
364 if (!(insn
& (1 << 23)))
367 gen_op_addl_T1_im(val
);
371 shift
= (insn
>> 7) & 0x1f;
372 gen_movl_T2_reg(s
, rm
);
373 shiftop
= (insn
>> 5) & 3;
375 gen_shift_T2_im
[shiftop
](shift
);
376 } else if (shiftop
!= 0) {
377 gen_shift_T2_0
[shiftop
]();
379 if (!(insn
& (1 << 23)))
386 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
)
390 if (insn
& (1 << 22)) {
392 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
393 if (!(insn
& (1 << 23)))
396 gen_op_addl_T1_im(val
);
400 gen_movl_T2_reg(s
, rm
);
401 if (!(insn
& (1 << 23)))
408 #define VFP_OP(name) \
409 static inline void gen_vfp_##name(int dp) \
412 gen_op_vfp_##name##d(); \
414 gen_op_vfp_##name##s(); \
436 static inline void gen_vfp_ld(DisasContext
*s
, int dp
)
439 gen_ldst(vfp_ldd
, s
);
441 gen_ldst(vfp_lds
, s
);
444 static inline void gen_vfp_st(DisasContext
*s
, int dp
)
447 gen_ldst(vfp_std
, s
);
449 gen_ldst(vfp_sts
, s
);
453 vfp_reg_offset (int dp
, int reg
)
456 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
458 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
459 + offsetof(CPU_DoubleU
, l
.upper
);
461 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
462 + offsetof(CPU_DoubleU
, l
.lower
);
465 static inline void gen_mov_F0_vreg(int dp
, int reg
)
468 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp
, reg
));
470 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp
, reg
));
473 static inline void gen_mov_F1_vreg(int dp
, int reg
)
476 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp
, reg
));
478 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp
, reg
));
481 static inline void gen_mov_vreg_F0(int dp
, int reg
)
484 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp
, reg
));
486 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp
, reg
));
489 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
490 instruction is not defined. */
491 static int disas_cp15_insn(DisasContext
*s
, uint32_t insn
)
495 /* ??? Some cp15 registers are accessible from userspace. */
499 if ((insn
& 0x0fff0fff) == 0x0e070f90
500 || (insn
& 0x0fff0fff) == 0x0e070f58) {
501 /* Wait for interrupt. */
502 gen_op_movl_T0_im((long)s
->pc
);
503 gen_op_movl_reg_TN
[0][15]();
505 s
->is_jmp
= DISAS_JUMP
;
508 rd
= (insn
>> 12) & 0xf;
509 if (insn
& (1 << 20)) {
510 gen_op_movl_T0_cp15(insn
);
511 /* If the destination register is r15 then sets condition codes. */
513 gen_movl_reg_T0(s
, rd
);
515 gen_movl_T0_reg(s
, rd
);
516 gen_op_movl_cp15_T0(insn
);
522 /* Disassemble a VFP instruction. Returns nonzero if an error occured
523 (ie. an undefined instruction). */
524 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
526 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
529 dp
= ((insn
& 0xf00) == 0xb00);
530 switch ((insn
>> 24) & 0xf) {
532 if (insn
& (1 << 4)) {
533 /* single register transfer */
534 if ((insn
& 0x6f) != 0x00)
536 rd
= (insn
>> 12) & 0xf;
540 rn
= (insn
>> 16) & 0xf;
541 /* Get the existing value even for arm->vfp moves because
542 we only set half the register. */
543 gen_mov_F0_vreg(1, rn
);
545 if (insn
& (1 << 20)) {
547 if (insn
& (1 << 21))
548 gen_movl_reg_T1(s
, rd
);
550 gen_movl_reg_T0(s
, rd
);
553 if (insn
& (1 << 21))
554 gen_movl_T1_reg(s
, rd
);
556 gen_movl_T0_reg(s
, rd
);
558 gen_mov_vreg_F0(dp
, rn
);
561 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
562 if (insn
& (1 << 20)) {
564 if (insn
& (1 << 21)) {
565 /* system register */
572 gen_op_vfp_movl_T0_fpscr_flags();
574 gen_op_vfp_movl_T0_fpscr();
580 gen_mov_F0_vreg(0, rn
);
584 /* Set the 4 flag bits in the CPSR. */
585 gen_op_movl_cpsr_T0(0xf0000000);
587 gen_movl_reg_T0(s
, rd
);
590 gen_movl_T0_reg(s
, rd
);
591 if (insn
& (1 << 21)) {
592 /* system register */
595 /* Writes are ignored. */
598 gen_op_vfp_movl_fpscr_T0();
599 /* This could change vector settings, so jump to
600 the next instuction. */
608 gen_mov_vreg_F0(0, rn
);
613 /* data processing */
614 /* The opcode is in bits 23, 21, 20 and 6. */
615 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
619 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
621 /* rn is register number */
624 rn
= (insn
>> 16) & 0xf;
627 if (op
== 15 && (rn
== 15 || rn
> 17)) {
628 /* Integer or single precision destination. */
629 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
631 if (insn
& (1 << 22))
633 rd
= (insn
>> 12) & 0xf;
636 if (op
== 15 && (rn
== 16 || rn
== 17)) {
637 /* Integer source. */
638 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
645 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
646 if (op
== 15 && rn
== 15) {
647 /* Double precision destination. */
648 if (insn
& (1 << 22))
650 rd
= (insn
>> 12) & 0xf;
652 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
653 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
656 veclen
= env
->vfp
.vec_len
;
657 if (op
== 15 && rn
> 3)
660 /* Shut up compiler warnings. */
671 /* Figure out what type of vector operation this is. */
672 if ((rd
& bank_mask
) == 0) {
677 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
679 delta_d
= env
->vfp
.vec_stride
+ 1;
681 if ((rm
& bank_mask
) == 0) {
682 /* mixed scalar/vector */
691 /* Load the initial operands. */
697 gen_mov_F0_vreg(0, rm
);
702 gen_mov_F0_vreg(dp
, rd
);
703 gen_mov_F1_vreg(dp
, rm
);
707 /* Compare with zero */
708 gen_mov_F0_vreg(dp
, rd
);
712 /* One source operand. */
713 gen_mov_F0_vreg(dp
, rm
);
716 /* Two source operands. */
717 gen_mov_F0_vreg(dp
, rn
);
718 gen_mov_F1_vreg(dp
, rm
);
722 /* Perform the calculation. */
724 case 0: /* mac: fd + (fn * fm) */
726 gen_mov_F1_vreg(dp
, rd
);
729 case 1: /* nmac: fd - (fn * fm) */
732 gen_mov_F1_vreg(dp
, rd
);
735 case 2: /* msc: -fd + (fn * fm) */
737 gen_mov_F1_vreg(dp
, rd
);
740 case 3: /* nmsc: -fd - (fn * fm) */
742 gen_mov_F1_vreg(dp
, rd
);
746 case 4: /* mul: fn * fm */
749 case 5: /* nmul: -(fn * fm) */
753 case 6: /* add: fn + fm */
756 case 7: /* sub: fn - fm */
759 case 8: /* div: fn / fm */
762 case 15: /* extension space */
789 case 15: /* single<->double conversion */
804 case 25: /* ftouiz */
810 case 27: /* ftosiz */
813 default: /* undefined */
814 printf ("rn:%d\n", rn
);
818 default: /* undefined */
819 printf ("op:%d\n", op
);
823 /* Write back the result. */
824 if (op
== 15 && (rn
>= 8 && rn
<= 11))
825 ; /* Comparison, do nothing. */
826 else if (op
== 15 && rn
> 17)
827 /* Integer result. */
828 gen_mov_vreg_F0(0, rd
);
829 else if (op
== 15 && rn
== 15)
831 gen_mov_vreg_F0(!dp
, rd
);
833 gen_mov_vreg_F0(dp
, rd
);
835 /* break out of the loop if we have finished */
839 if (op
== 15 && delta_m
== 0) {
840 /* single source one-many */
842 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
844 gen_mov_vreg_F0(dp
, rd
);
848 /* Setup the next operands. */
850 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
854 /* One source operand. */
855 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
857 gen_mov_F0_vreg(dp
, rm
);
859 /* Two source operands. */
860 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
862 gen_mov_F0_vreg(dp
, rn
);
864 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
866 gen_mov_F1_vreg(dp
, rm
);
874 if (dp
&& (insn
& (1 << 22))) {
875 /* two-register transfer */
876 rn
= (insn
>> 16) & 0xf;
877 rd
= (insn
>> 12) & 0xf;
883 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
885 if (insn
& (1 << 20)) {
888 gen_mov_F0_vreg(1, rm
);
890 gen_movl_reg_T0(s
, rd
);
891 gen_movl_reg_T1(s
, rn
);
893 gen_mov_F0_vreg(0, rm
);
895 gen_movl_reg_T0(s
, rn
);
896 gen_mov_F0_vreg(0, rm
+ 1);
898 gen_movl_reg_T0(s
, rd
);
903 gen_movl_T0_reg(s
, rd
);
904 gen_movl_T1_reg(s
, rn
);
906 gen_mov_vreg_F0(1, rm
);
908 gen_movl_T0_reg(s
, rn
);
910 gen_mov_vreg_F0(0, rm
);
911 gen_movl_T0_reg(s
, rd
);
913 gen_mov_vreg_F0(0, rm
+ 1);
918 rn
= (insn
>> 16) & 0xf;
920 rd
= (insn
>> 12) & 0xf;
922 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
923 gen_movl_T1_reg(s
, rn
);
924 if ((insn
& 0x01200000) == 0x01000000) {
925 /* Single load/store */
926 offset
= (insn
& 0xff) << 2;
927 if ((insn
& (1 << 23)) == 0)
929 gen_op_addl_T1_im(offset
);
930 if (insn
& (1 << 20)) {
932 gen_mov_vreg_F0(dp
, rd
);
934 gen_mov_F0_vreg(dp
, rd
);
938 /* load/store multiple */
940 n
= (insn
>> 1) & 0x7f;
944 if (insn
& (1 << 24)) /* pre-decrement */
945 gen_op_addl_T1_im(-((insn
& 0xff) << 2));
951 for (i
= 0; i
< n
; i
++) {
952 if (insn
& (1 << 20)) {
955 gen_mov_vreg_F0(dp
, rd
+ i
);
958 gen_mov_F0_vreg(dp
, rd
+ i
);
961 gen_op_addl_T1_im(offset
);
963 if (insn
& (1 << 21)) {
965 if (insn
& (1 << 24))
966 offset
= -offset
* n
;
967 else if (dp
&& (insn
& 1))
973 gen_op_addl_T1_im(offset
);
974 gen_movl_reg_T1(s
, rn
);
980 /* Should never happen. */
986 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
988 TranslationBlock
*tb
;
991 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
993 gen_op_goto_tb0(TBPARAM(tb
));
995 gen_op_goto_tb1(TBPARAM(tb
));
996 gen_op_movl_T0_im(dest
);
997 gen_op_movl_r15_T0();
998 gen_op_movl_T0_im((long)tb
+ n
);
1001 gen_op_movl_T0_im(dest
);
1002 gen_op_movl_r15_T0();
1008 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
1010 if (__builtin_expect(s
->singlestep_enabled
, 0)) {
1011 /* An indirect jump so that we still trigger the debug exception. */
1014 gen_op_movl_T0_im(dest
);
1017 gen_goto_tb(s
, 0, dest
);
1018 s
->is_jmp
= DISAS_TB_JUMP
;
1022 static inline void gen_mulxy(int x
, int y
)
1025 gen_op_sarl_T0_im(16);
1029 gen_op_sarl_T1_im(16);
1035 /* Return the mask of PSR bits set by a MSR instruction. */
1036 static uint32_t msr_mask(DisasContext
*s
, int flags
) {
1040 if (flags
& (1 << 0))
1042 if (flags
& (1 << 1))
1044 if (flags
& (1 << 2))
1046 if (flags
& (1 << 3))
1048 /* Mask out undefined bits and state bits. */
1050 /* Mask out privileged bits. */
1056 /* Returns nonzero if access to the PSR is not permitted. */
1057 static int gen_set_psr_T0(DisasContext
*s
, uint32_t mask
, int spsr
)
1060 /* ??? This is also undefined in system mode. */
1063 gen_op_movl_spsr_T0(mask
);
1065 gen_op_movl_cpsr_T0(mask
);
1071 static void gen_exception_return(DisasContext
*s
)
1073 gen_op_movl_reg_TN
[0][15]();
1074 gen_op_movl_T0_spsr();
1075 gen_op_movl_cpsr_T0(0xffffffff);
1076 s
->is_jmp
= DISAS_UPDATE
;
1079 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
1081 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
1083 insn
= ldl_code(s
->pc
);
1088 /* Unconditional instructions. */
1089 if ((insn
& 0x0d70f000) == 0x0550f000)
1091 else if ((insn
& 0x0e000000) == 0x0a000000) {
1092 /* branch link and change to thumb (blx <offset>) */
1095 val
= (uint32_t)s
->pc
;
1096 gen_op_movl_T0_im(val
);
1097 gen_movl_reg_T0(s
, 14);
1098 /* Sign-extend the 24-bit offset */
1099 offset
= (((int32_t)insn
) << 8) >> 8;
1100 /* offset * 4 + bit24 * 2 + (thumb bit) */
1101 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
1102 /* pipeline offset */
1104 gen_op_movl_T0_im(val
);
1107 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
1108 /* Coprocessor double register transfer. */
1109 } else if ((insn
& 0x0f000010) == 0x0e000010) {
1110 /* Additional coprocessor register transfer. */
1111 } else if ((insn
& 0x0ff10010) == 0x01000000) {
1112 /* cps (privileged) */
1113 } else if ((insn
& 0x0ffffdff) == 0x01010000) {
1115 if (insn
& (1 << 9)) {
1116 /* BE8 mode not implemented. */
1124 /* if not always execute, we generate a conditional jump to
1126 s
->condlabel
= gen_new_label();
1127 gen_test_cc
[cond
^ 1](s
->condlabel
);
1129 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1130 //s->is_jmp = DISAS_JUMP_NEXT;
1132 if ((insn
& 0x0f900000) == 0x03000000) {
1133 if ((insn
& 0x0fb0f000) != 0x0320f000)
1135 /* CPSR = immediate */
1137 shift
= ((insn
>> 8) & 0xf) * 2;
1139 val
= (val
>> shift
) | (val
<< (32 - shift
));
1140 gen_op_movl_T0_im(val
);
1141 if (gen_set_psr_T0(s
, msr_mask(s
, (insn
>> 16) & 0xf),
1142 (insn
& (1 << 22)) != 0))
1144 } else if ((insn
& 0x0f900000) == 0x01000000
1145 && (insn
& 0x00000090) != 0x00000090) {
1146 /* miscellaneous instructions */
1147 op1
= (insn
>> 21) & 3;
1148 sh
= (insn
>> 4) & 0xf;
1151 case 0x0: /* move program status register */
1154 gen_movl_T0_reg(s
, rm
);
1155 if (gen_set_psr_T0(s
, msr_mask(s
, (insn
>> 16) & 0xf),
1160 rd
= (insn
>> 12) & 0xf;
1164 gen_op_movl_T0_spsr();
1166 gen_op_movl_T0_cpsr();
1168 gen_movl_reg_T0(s
, rd
);
1173 /* branch/exchange thumb (bx). */
1174 gen_movl_T0_reg(s
, rm
);
1176 } else if (op1
== 3) {
1178 rd
= (insn
>> 12) & 0xf;
1179 gen_movl_T0_reg(s
, rm
);
1181 gen_movl_reg_T0(s
, rd
);
1189 /* Trivial implementation equivalent to bx. */
1190 gen_movl_T0_reg(s
, rm
);
1200 /* branch link/exchange thumb (blx) */
1201 val
= (uint32_t)s
->pc
;
1202 gen_op_movl_T0_im(val
);
1203 gen_movl_reg_T0(s
, 14);
1204 gen_movl_T0_reg(s
, rm
);
1207 case 0x5: /* saturating add/subtract */
1208 rd
= (insn
>> 12) & 0xf;
1209 rn
= (insn
>> 16) & 0xf;
1210 gen_movl_T0_reg(s
, rm
);
1211 gen_movl_T1_reg(s
, rn
);
1213 gen_op_double_T1_saturate();
1215 gen_op_subl_T0_T1_saturate();
1217 gen_op_addl_T0_T1_saturate();
1218 gen_movl_reg_T0(s
, rd
);
1221 gen_op_movl_T0_im((long)s
->pc
- 4);
1222 gen_op_movl_reg_TN
[0][15]();
1224 s
->is_jmp
= DISAS_JUMP
;
1226 case 0x8: /* signed multiply */
1230 rs
= (insn
>> 8) & 0xf;
1231 rn
= (insn
>> 12) & 0xf;
1232 rd
= (insn
>> 16) & 0xf;
1234 /* (32 * 16) >> 16 */
1235 gen_movl_T0_reg(s
, rm
);
1236 gen_movl_T1_reg(s
, rs
);
1238 gen_op_sarl_T1_im(16);
1241 gen_op_imulw_T0_T1();
1242 if ((sh
& 2) == 0) {
1243 gen_movl_T1_reg(s
, rn
);
1244 gen_op_addl_T0_T1_setq();
1246 gen_movl_reg_T0(s
, rd
);
1249 gen_movl_T0_reg(s
, rm
);
1250 gen_movl_T1_reg(s
, rs
);
1251 gen_mulxy(sh
& 2, sh
& 4);
1253 gen_op_signbit_T1_T0();
1254 gen_op_addq_T0_T1(rn
, rd
);
1255 gen_movl_reg_T0(s
, rn
);
1256 gen_movl_reg_T1(s
, rd
);
1259 gen_movl_T1_reg(s
, rn
);
1260 gen_op_addl_T0_T1_setq();
1262 gen_movl_reg_T0(s
, rd
);
1269 } else if (((insn
& 0x0e000000) == 0 &&
1270 (insn
& 0x00000090) != 0x90) ||
1271 ((insn
& 0x0e000000) == (1 << 25))) {
1272 int set_cc
, logic_cc
, shiftop
;
1274 op1
= (insn
>> 21) & 0xf;
1275 set_cc
= (insn
>> 20) & 1;
1276 logic_cc
= table_logic_cc
[op1
] & set_cc
;
1278 /* data processing instruction */
1279 if (insn
& (1 << 25)) {
1280 /* immediate operand */
1282 shift
= ((insn
>> 8) & 0xf) * 2;
1284 val
= (val
>> shift
) | (val
<< (32 - shift
));
1285 gen_op_movl_T1_im(val
);
1286 if (logic_cc
&& shift
)
1291 gen_movl_T1_reg(s
, rm
);
1292 shiftop
= (insn
>> 5) & 3;
1293 if (!(insn
& (1 << 4))) {
1294 shift
= (insn
>> 7) & 0x1f;
1297 gen_shift_T1_im_cc
[shiftop
](shift
);
1299 gen_shift_T1_im
[shiftop
](shift
);
1301 } else if (shiftop
!= 0) {
1303 gen_shift_T1_0_cc
[shiftop
]();
1305 gen_shift_T1_0
[shiftop
]();
1309 rs
= (insn
>> 8) & 0xf;
1310 gen_movl_T0_reg(s
, rs
);
1312 gen_shift_T1_T0_cc
[shiftop
]();
1314 gen_shift_T1_T0
[shiftop
]();
1318 if (op1
!= 0x0f && op1
!= 0x0d) {
1319 rn
= (insn
>> 16) & 0xf;
1320 gen_movl_T0_reg(s
, rn
);
1322 rd
= (insn
>> 12) & 0xf;
1325 gen_op_andl_T0_T1();
1326 gen_movl_reg_T0(s
, rd
);
1328 gen_op_logic_T0_cc();
1331 gen_op_xorl_T0_T1();
1332 gen_movl_reg_T0(s
, rd
);
1334 gen_op_logic_T0_cc();
1337 if (set_cc
&& rd
== 15) {
1338 /* SUBS r15, ... is used for exception return. */
1341 gen_op_subl_T0_T1_cc();
1342 gen_exception_return(s
);
1345 gen_op_subl_T0_T1_cc();
1347 gen_op_subl_T0_T1();
1348 gen_movl_reg_T0(s
, rd
);
1353 gen_op_rsbl_T0_T1_cc();
1355 gen_op_rsbl_T0_T1();
1356 gen_movl_reg_T0(s
, rd
);
1360 gen_op_addl_T0_T1_cc();
1362 gen_op_addl_T0_T1();
1363 gen_movl_reg_T0(s
, rd
);
1367 gen_op_adcl_T0_T1_cc();
1369 gen_op_adcl_T0_T1();
1370 gen_movl_reg_T0(s
, rd
);
1374 gen_op_sbcl_T0_T1_cc();
1376 gen_op_sbcl_T0_T1();
1377 gen_movl_reg_T0(s
, rd
);
1381 gen_op_rscl_T0_T1_cc();
1383 gen_op_rscl_T0_T1();
1384 gen_movl_reg_T0(s
, rd
);
1388 gen_op_andl_T0_T1();
1389 gen_op_logic_T0_cc();
1394 gen_op_xorl_T0_T1();
1395 gen_op_logic_T0_cc();
1400 gen_op_subl_T0_T1_cc();
1405 gen_op_addl_T0_T1_cc();
1410 gen_movl_reg_T0(s
, rd
);
1412 gen_op_logic_T0_cc();
1415 if (logic_cc
&& rd
== 15) {
1416 /* MOVS r15, ... is used for exception return. */
1419 gen_op_movl_T0_T1();
1420 gen_exception_return(s
);
1422 gen_movl_reg_T1(s
, rd
);
1424 gen_op_logic_T1_cc();
1428 gen_op_bicl_T0_T1();
1429 gen_movl_reg_T0(s
, rd
);
1431 gen_op_logic_T0_cc();
1436 gen_movl_reg_T1(s
, rd
);
1438 gen_op_logic_T1_cc();
1442 /* other instructions */
1443 op1
= (insn
>> 24) & 0xf;
1447 /* multiplies, extra load/stores */
1448 sh
= (insn
>> 5) & 3;
1451 rd
= (insn
>> 16) & 0xf;
1452 rn
= (insn
>> 12) & 0xf;
1453 rs
= (insn
>> 8) & 0xf;
1455 if (((insn
>> 22) & 3) == 0) {
1457 gen_movl_T0_reg(s
, rs
);
1458 gen_movl_T1_reg(s
, rm
);
1460 if (insn
& (1 << 21)) {
1461 gen_movl_T1_reg(s
, rn
);
1462 gen_op_addl_T0_T1();
1464 if (insn
& (1 << 20))
1465 gen_op_logic_T0_cc();
1466 gen_movl_reg_T0(s
, rd
);
1469 gen_movl_T0_reg(s
, rs
);
1470 gen_movl_T1_reg(s
, rm
);
1471 if (insn
& (1 << 22))
1472 gen_op_imull_T0_T1();
1474 gen_op_mull_T0_T1();
1475 if (insn
& (1 << 21)) /* mult accumulate */
1476 gen_op_addq_T0_T1(rn
, rd
);
1477 if (!(insn
& (1 << 23))) { /* double accumulate */
1479 gen_op_addq_lo_T0_T1(rn
);
1480 gen_op_addq_lo_T0_T1(rd
);
1482 if (insn
& (1 << 20))
1484 gen_movl_reg_T0(s
, rn
);
1485 gen_movl_reg_T1(s
, rd
);
1488 rn
= (insn
>> 16) & 0xf;
1489 rd
= (insn
>> 12) & 0xf;
1490 if (insn
& (1 << 23)) {
1491 /* load/store exclusive */
1494 /* SWP instruction */
1497 gen_movl_T0_reg(s
, rm
);
1498 gen_movl_T1_reg(s
, rn
);
1499 if (insn
& (1 << 22)) {
1504 gen_movl_reg_T0(s
, rd
);
1508 /* Misc load/store */
1509 rn
= (insn
>> 16) & 0xf;
1510 rd
= (insn
>> 12) & 0xf;
1511 gen_movl_T1_reg(s
, rn
);
1512 if (insn
& (1 << 24))
1513 gen_add_datah_offset(s
, insn
);
1514 if (insn
& (1 << 20)) {
1528 gen_movl_reg_T0(s
, rd
);
1529 } else if (sh
& 2) {
1533 gen_movl_T0_reg(s
, rd
);
1535 gen_op_addl_T1_im(4);
1536 gen_movl_T0_reg(s
, rd
+ 1);
1538 if ((insn
& (1 << 24)) || (insn
& (1 << 20)))
1539 gen_op_addl_T1_im(-4);
1543 gen_movl_reg_T0(s
, rd
);
1544 gen_op_addl_T1_im(4);
1546 gen_movl_reg_T0(s
, rd
+ 1);
1547 if ((insn
& (1 << 24)) || (insn
& (1 << 20)))
1548 gen_op_addl_T1_im(-4);
1552 gen_movl_T0_reg(s
, rd
);
1555 if (!(insn
& (1 << 24))) {
1556 gen_add_datah_offset(s
, insn
);
1557 gen_movl_reg_T1(s
, rn
);
1558 } else if (insn
& (1 << 21)) {
1559 gen_movl_reg_T1(s
, rn
);
1567 /* load/store byte/word */
1568 rn
= (insn
>> 16) & 0xf;
1569 rd
= (insn
>> 12) & 0xf;
1570 gen_movl_T1_reg(s
, rn
);
1571 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
1572 if (insn
& (1 << 24))
1573 gen_add_data_offset(s
, insn
);
1574 if (insn
& (1 << 20)) {
1576 #if defined(CONFIG_USER_ONLY)
1577 if (insn
& (1 << 22))
1582 if (insn
& (1 << 22)) {
1586 gen_op_ldub_kernel();
1591 gen_op_ldl_kernel();
1597 gen_movl_reg_T0(s
, rd
);
1600 gen_movl_T0_reg(s
, rd
);
1601 #if defined(CONFIG_USER_ONLY)
1602 if (insn
& (1 << 22))
1607 if (insn
& (1 << 22)) {
1611 gen_op_stb_kernel();
1616 gen_op_stl_kernel();
1620 if (!(insn
& (1 << 24))) {
1621 gen_add_data_offset(s
, insn
);
1622 gen_movl_reg_T1(s
, rn
);
1623 } else if (insn
& (1 << 21))
1624 gen_movl_reg_T1(s
, rn
); {
1630 int j
, n
, user
, loaded_base
;
1631 /* load/store multiple words */
1632 /* XXX: store correct base if write back */
1634 if (insn
& (1 << 22)) {
1636 goto illegal_op
; /* only usable in supervisor mode */
1638 if ((insn
& (1 << 15)) == 0)
1641 rn
= (insn
>> 16) & 0xf;
1642 gen_movl_T1_reg(s
, rn
);
1644 /* compute total size */
1648 if (insn
& (1 << i
))
1651 /* XXX: test invalid n == 0 case ? */
1652 if (insn
& (1 << 23)) {
1653 if (insn
& (1 << 24)) {
1655 gen_op_addl_T1_im(4);
1657 /* post increment */
1660 if (insn
& (1 << 24)) {
1662 gen_op_addl_T1_im(-(n
* 4));
1664 /* post decrement */
1666 gen_op_addl_T1_im(-((n
- 1) * 4));
1671 if (insn
& (1 << i
)) {
1672 if (insn
& (1 << 20)) {
1678 gen_op_movl_user_T0(i
);
1679 } else if (i
== rn
) {
1680 gen_op_movl_T2_T0();
1683 gen_movl_reg_T0(s
, i
);
1688 /* special case: r15 = PC + 12 */
1689 val
= (long)s
->pc
+ 8;
1690 gen_op_movl_TN_im
[0](val
);
1692 gen_op_movl_T0_user(i
);
1694 gen_movl_T0_reg(s
, i
);
1699 /* no need to add after the last transfer */
1701 gen_op_addl_T1_im(4);
1704 if (insn
& (1 << 21)) {
1706 if (insn
& (1 << 23)) {
1707 if (insn
& (1 << 24)) {
1710 /* post increment */
1711 gen_op_addl_T1_im(4);
1714 if (insn
& (1 << 24)) {
1717 gen_op_addl_T1_im(-((n
- 1) * 4));
1719 /* post decrement */
1720 gen_op_addl_T1_im(-(n
* 4));
1723 gen_movl_reg_T1(s
, rn
);
1726 gen_op_movl_T0_T2();
1727 gen_movl_reg_T0(s
, rn
);
1729 if ((insn
& (1 << 22)) && !user
) {
1730 /* Restore CPSR from SPSR. */
1731 gen_op_movl_T0_spsr();
1732 gen_op_movl_cpsr_T0(0xffffffff);
1733 s
->is_jmp
= DISAS_UPDATE
;
1742 /* branch (and link) */
1743 val
= (int32_t)s
->pc
;
1744 if (insn
& (1 << 24)) {
1745 gen_op_movl_T0_im(val
);
1746 gen_op_movl_reg_TN
[0][14]();
1748 offset
= (((int32_t)insn
<< 8) >> 8);
1749 val
+= (offset
<< 2) + 4;
1757 op1
= (insn
>> 8) & 0xf;
1761 if (disas_vfp_insn (env
, s
, insn
))
1765 if (disas_cp15_insn (s
, insn
))
1769 /* unknown coprocessor. */
1775 gen_op_movl_T0_im((long)s
->pc
);
1776 gen_op_movl_reg_TN
[0][15]();
1778 s
->is_jmp
= DISAS_JUMP
;
1782 gen_op_movl_T0_im((long)s
->pc
- 4);
1783 gen_op_movl_reg_TN
[0][15]();
1784 gen_op_undef_insn();
1785 s
->is_jmp
= DISAS_JUMP
;
1791 static void disas_thumb_insn(DisasContext
*s
)
1793 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
1797 insn
= lduw_code(s
->pc
);
1800 switch (insn
>> 12) {
1803 op
= (insn
>> 11) & 3;
1806 rn
= (insn
>> 3) & 7;
1807 gen_movl_T0_reg(s
, rn
);
1808 if (insn
& (1 << 10)) {
1810 gen_op_movl_T1_im((insn
>> 6) & 7);
1813 rm
= (insn
>> 6) & 7;
1814 gen_movl_T1_reg(s
, rm
);
1816 if (insn
& (1 << 9))
1817 gen_op_subl_T0_T1_cc();
1819 gen_op_addl_T0_T1_cc();
1820 gen_movl_reg_T0(s
, rd
);
1822 /* shift immediate */
1823 rm
= (insn
>> 3) & 7;
1824 shift
= (insn
>> 6) & 0x1f;
1825 gen_movl_T0_reg(s
, rm
);
1826 gen_shift_T0_im_thumb
[op
](shift
);
1827 gen_movl_reg_T0(s
, rd
);
1831 /* arithmetic large immediate */
1832 op
= (insn
>> 11) & 3;
1833 rd
= (insn
>> 8) & 0x7;
1835 gen_op_movl_T0_im(insn
& 0xff);
1837 gen_movl_T0_reg(s
, rd
);
1838 gen_op_movl_T1_im(insn
& 0xff);
1842 gen_op_logic_T0_cc();
1845 gen_op_subl_T0_T1_cc();
1848 gen_op_addl_T0_T1_cc();
1851 gen_op_subl_T0_T1_cc();
1855 gen_movl_reg_T0(s
, rd
);
1858 if (insn
& (1 << 11)) {
1859 rd
= (insn
>> 8) & 7;
1860 /* load pc-relative. Bit 1 of PC is ignored. */
1861 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
1862 val
&= ~(uint32_t)2;
1863 gen_op_movl_T1_im(val
);
1865 gen_movl_reg_T0(s
, rd
);
1868 if (insn
& (1 << 10)) {
1869 /* data processing extended or blx */
1870 rd
= (insn
& 7) | ((insn
>> 4) & 8);
1871 rm
= (insn
>> 3) & 0xf;
1872 op
= (insn
>> 8) & 3;
1875 gen_movl_T0_reg(s
, rd
);
1876 gen_movl_T1_reg(s
, rm
);
1877 gen_op_addl_T0_T1();
1878 gen_movl_reg_T0(s
, rd
);
1881 gen_movl_T0_reg(s
, rd
);
1882 gen_movl_T1_reg(s
, rm
);
1883 gen_op_subl_T0_T1_cc();
1885 case 2: /* mov/cpy */
1886 gen_movl_T0_reg(s
, rm
);
1887 gen_movl_reg_T0(s
, rd
);
1889 case 3:/* branch [and link] exchange thumb register */
1890 if (insn
& (1 << 7)) {
1891 val
= (uint32_t)s
->pc
| 1;
1892 gen_op_movl_T1_im(val
);
1893 gen_movl_reg_T1(s
, 14);
1895 gen_movl_T0_reg(s
, rm
);
1902 /* data processing register */
1904 rm
= (insn
>> 3) & 7;
1905 op
= (insn
>> 6) & 0xf;
1906 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
1907 /* the shift/rotate ops want the operands backwards */
1916 if (op
== 9) /* neg */
1917 gen_op_movl_T0_im(0);
1918 else if (op
!= 0xf) /* mvn doesn't read its first operand */
1919 gen_movl_T0_reg(s
, rd
);
1921 gen_movl_T1_reg(s
, rm
);
1924 gen_op_andl_T0_T1();
1925 gen_op_logic_T0_cc();
1928 gen_op_xorl_T0_T1();
1929 gen_op_logic_T0_cc();
1932 gen_op_shll_T1_T0_cc();
1935 gen_op_shrl_T1_T0_cc();
1938 gen_op_sarl_T1_T0_cc();
1941 gen_op_adcl_T0_T1_cc();
1944 gen_op_sbcl_T0_T1_cc();
1947 gen_op_rorl_T1_T0_cc();
1950 gen_op_andl_T0_T1();
1951 gen_op_logic_T0_cc();
1955 gen_op_subl_T0_T1_cc();
1958 gen_op_subl_T0_T1_cc();
1962 gen_op_addl_T0_T1_cc();
1967 gen_op_logic_T0_cc();
1970 gen_op_mull_T0_T1();
1971 gen_op_logic_T0_cc();
1974 gen_op_bicl_T0_T1();
1975 gen_op_logic_T0_cc();
1979 gen_op_logic_T1_cc();
1986 gen_movl_reg_T1(s
, rm
);
1988 gen_movl_reg_T0(s
, rd
);
1993 /* load/store register offset. */
1995 rn
= (insn
>> 3) & 7;
1996 rm
= (insn
>> 6) & 7;
1997 op
= (insn
>> 9) & 7;
1998 gen_movl_T1_reg(s
, rn
);
1999 gen_movl_T2_reg(s
, rm
);
2000 gen_op_addl_T1_T2();
2002 if (op
< 3) /* store */
2003 gen_movl_T0_reg(s
, rd
);
2031 if (op
>= 3) /* load */
2032 gen_movl_reg_T0(s
, rd
);
2036 /* load/store word immediate offset */
2038 rn
= (insn
>> 3) & 7;
2039 gen_movl_T1_reg(s
, rn
);
2040 val
= (insn
>> 4) & 0x7c;
2041 gen_op_movl_T2_im(val
);
2042 gen_op_addl_T1_T2();
2044 if (insn
& (1 << 11)) {
2047 gen_movl_reg_T0(s
, rd
);
2050 gen_movl_T0_reg(s
, rd
);
2056 /* load/store byte immediate offset */
2058 rn
= (insn
>> 3) & 7;
2059 gen_movl_T1_reg(s
, rn
);
2060 val
= (insn
>> 6) & 0x1f;
2061 gen_op_movl_T2_im(val
);
2062 gen_op_addl_T1_T2();
2064 if (insn
& (1 << 11)) {
2067 gen_movl_reg_T0(s
, rd
);
2070 gen_movl_T0_reg(s
, rd
);
2076 /* load/store halfword immediate offset */
2078 rn
= (insn
>> 3) & 7;
2079 gen_movl_T1_reg(s
, rn
);
2080 val
= (insn
>> 5) & 0x3e;
2081 gen_op_movl_T2_im(val
);
2082 gen_op_addl_T1_T2();
2084 if (insn
& (1 << 11)) {
2087 gen_movl_reg_T0(s
, rd
);
2090 gen_movl_T0_reg(s
, rd
);
2096 /* load/store from stack */
2097 rd
= (insn
>> 8) & 7;
2098 gen_movl_T1_reg(s
, 13);
2099 val
= (insn
& 0xff) * 4;
2100 gen_op_movl_T2_im(val
);
2101 gen_op_addl_T1_T2();
2103 if (insn
& (1 << 11)) {
2106 gen_movl_reg_T0(s
, rd
);
2109 gen_movl_T0_reg(s
, rd
);
2115 /* add to high reg */
2116 rd
= (insn
>> 8) & 7;
2117 if (insn
& (1 << 11)) {
2119 gen_movl_T0_reg(s
, 13);
2121 /* PC. bit 1 is ignored. */
2122 gen_op_movl_T0_im((s
->pc
+ 2) & ~(uint32_t)2);
2124 val
= (insn
& 0xff) * 4;
2125 gen_op_movl_T1_im(val
);
2126 gen_op_addl_T0_T1();
2127 gen_movl_reg_T0(s
, rd
);
2132 op
= (insn
>> 8) & 0xf;
2135 /* adjust stack pointer */
2136 gen_movl_T1_reg(s
, 13);
2137 val
= (insn
& 0x7f) * 4;
2138 if (insn
& (1 << 7))
2139 val
= -(int32_t)val
;
2140 gen_op_movl_T2_im(val
);
2141 gen_op_addl_T1_T2();
2142 gen_movl_reg_T1(s
, 13);
2145 case 4: case 5: case 0xc: case 0xd:
2147 gen_movl_T1_reg(s
, 13);
2148 if (insn
& (1 << 8))
2152 for (i
= 0; i
< 8; i
++) {
2153 if (insn
& (1 << i
))
2156 if ((insn
& (1 << 11)) == 0) {
2157 gen_op_movl_T2_im(-offset
);
2158 gen_op_addl_T1_T2();
2160 gen_op_movl_T2_im(4);
2161 for (i
= 0; i
< 8; i
++) {
2162 if (insn
& (1 << i
)) {
2163 if (insn
& (1 << 11)) {
2166 gen_movl_reg_T0(s
, i
);
2169 gen_movl_T0_reg(s
, i
);
2172 /* advance to the next address. */
2173 gen_op_addl_T1_T2();
2176 if (insn
& (1 << 8)) {
2177 if (insn
& (1 << 11)) {
2180 /* don't set the pc until the rest of the instruction
2184 gen_movl_T0_reg(s
, 14);
2187 gen_op_addl_T1_T2();
2189 if ((insn
& (1 << 11)) == 0) {
2190 gen_op_movl_T2_im(-offset
);
2191 gen_op_addl_T1_T2();
2193 /* write back the new stack pointer */
2194 gen_movl_reg_T1(s
, 13);
2195 /* set the new PC value */
2196 if ((insn
& 0x0900) == 0x0900)
2200 case 0xe: /* bkpt */
2201 gen_op_movl_T0_im((long)s
->pc
- 2);
2202 gen_op_movl_reg_TN
[0][15]();
2204 s
->is_jmp
= DISAS_JUMP
;
2213 /* load/store multiple */
2214 rn
= (insn
>> 8) & 0x7;
2215 gen_movl_T1_reg(s
, rn
);
2216 gen_op_movl_T2_im(4);
2217 for (i
= 0; i
< 8; i
++) {
2218 if (insn
& (1 << i
)) {
2219 if (insn
& (1 << 11)) {
2222 gen_movl_reg_T0(s
, i
);
2225 gen_movl_T0_reg(s
, i
);
2228 /* advance to the next address */
2229 gen_op_addl_T1_T2();
2232 /* Base register writeback. */
2233 if ((insn
& (1 << rn
)) == 0)
2234 gen_movl_reg_T1(s
, rn
);
2238 /* conditional branch or swi */
2239 cond
= (insn
>> 8) & 0xf;
2245 gen_op_movl_T0_im((long)s
->pc
| 1);
2246 /* Don't set r15. */
2247 gen_op_movl_reg_TN
[0][15]();
2249 s
->is_jmp
= DISAS_JUMP
;
2252 /* generate a conditional jump to next instruction */
2253 s
->condlabel
= gen_new_label();
2254 gen_test_cc
[cond
^ 1](s
->condlabel
);
2256 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2257 //s->is_jmp = DISAS_JUMP_NEXT;
2258 gen_movl_T1_reg(s
, 15);
2260 /* jump to the offset */
2261 val
= (uint32_t)s
->pc
+ 2;
2262 offset
= ((int32_t)insn
<< 24) >> 24;
2268 /* unconditional branch */
2269 if (insn
& (1 << 11))
2270 goto undef
; /* Second half of a blx */
2271 val
= (uint32_t)s
->pc
;
2272 offset
= ((int32_t)insn
<< 21) >> 21;
2273 val
+= (offset
<< 1) + 2;
2278 /* branch and link [and switch to arm] */
2279 offset
= ((int32_t)insn
<< 21) >> 10;
2280 insn
= lduw_code(s
->pc
);
2281 offset
|= insn
& 0x7ff;
2283 val
= (uint32_t)s
->pc
+ 2;
2284 gen_op_movl_T1_im(val
| 1);
2285 gen_movl_reg_T1(s
, 14);
2288 if (insn
& (1 << 12)) {
2293 val
&= ~(uint32_t)2;
2294 gen_op_movl_T0_im(val
);
2300 gen_op_movl_T0_im((long)s
->pc
- 2);
2301 gen_op_movl_reg_TN
[0][15]();
2302 gen_op_undef_insn();
2303 s
->is_jmp
= DISAS_JUMP
;
2306 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2307 basic block 'tb'. If search_pc is TRUE, also generate PC
2308 information for each intermediate instruction. */
2309 static inline int gen_intermediate_code_internal(CPUState
*env
,
2310 TranslationBlock
*tb
,
2313 DisasContext dc1
, *dc
= &dc1
;
2314 uint16_t *gen_opc_end
;
2316 target_ulong pc_start
;
2317 uint32_t next_page_start
;
2319 /* generate intermediate code */
2324 gen_opc_ptr
= gen_opc_buf
;
2325 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2326 gen_opparam_ptr
= gen_opparam_buf
;
2328 dc
->is_jmp
= DISAS_NEXT
;
2330 dc
->singlestep_enabled
= env
->singlestep_enabled
;
2332 dc
->thumb
= env
->thumb
;
2333 #if !defined(CONFIG_USER_ONLY)
2334 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
2336 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
2340 if (env
->nb_breakpoints
> 0) {
2341 for(j
= 0; j
< env
->nb_breakpoints
; j
++) {
2342 if (env
->breakpoints
[j
] == dc
->pc
) {
2343 gen_op_movl_T0_im((long)dc
->pc
);
2344 gen_op_movl_reg_TN
[0][15]();
2346 dc
->is_jmp
= DISAS_JUMP
;
2352 j
= gen_opc_ptr
- gen_opc_buf
;
2356 gen_opc_instr_start
[lj
++] = 0;
2358 gen_opc_pc
[lj
] = dc
->pc
;
2359 gen_opc_instr_start
[lj
] = 1;
2363 disas_thumb_insn(dc
);
2365 disas_arm_insn(env
, dc
);
2367 if (dc
->condjmp
&& !dc
->is_jmp
) {
2368 gen_set_label(dc
->condlabel
);
2371 /* Translation stops when a conditional branch is enoutered.
2372 * Otherwise the subsequent code could get translated several times.
2373 * Also stop translation when a page boundary is reached. This
2374 * ensures prefech aborts occur at the right place. */
2375 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
2376 !env
->singlestep_enabled
&&
2377 dc
->pc
< next_page_start
);
2378 /* At this stage dc->condjmp will only be set when the skipped
2379 * instruction was a conditional branch, and the PC has already been
2381 if (__builtin_expect(env
->singlestep_enabled
, 0)) {
2382 /* Make sure the pc is updated, and raise a debug exception. */
2385 gen_set_label(dc
->condlabel
);
2387 if (dc
->condjmp
|| !dc
->is_jmp
) {
2388 gen_op_movl_T0_im((long)dc
->pc
);
2389 gen_op_movl_reg_TN
[0][15]();
2394 switch(dc
->is_jmp
) {
2396 gen_goto_tb(dc
, 1, dc
->pc
);
2401 /* indicate that the hash table must be used to find the next TB */
2406 /* nothing more to generate */
2410 gen_set_label(dc
->condlabel
);
2411 gen_goto_tb(dc
, 1, dc
->pc
);
2415 *gen_opc_ptr
= INDEX_op_end
;
2418 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
2419 fprintf(logfile
, "----------------\n");
2420 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc_start
));
2421 target_disas(logfile
, pc_start
, dc
->pc
- pc_start
, env
->thumb
);
2422 fprintf(logfile
, "\n");
2423 if (loglevel
& (CPU_LOG_TB_OP
)) {
2424 fprintf(logfile
, "OP:\n");
2425 dump_ops(gen_opc_buf
, gen_opparam_buf
);
2426 fprintf(logfile
, "\n");
2431 j
= gen_opc_ptr
- gen_opc_buf
;
2434 gen_opc_instr_start
[lj
++] = 0;
2437 tb
->size
= dc
->pc
- pc_start
;
2442 int gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
2444 return gen_intermediate_code_internal(env
, tb
, 0);
2447 int gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
2449 return gen_intermediate_code_internal(env
, tb
, 1);
2452 void cpu_reset(CPUARMState
*env
)
2454 #if defined (CONFIG_USER_ONLY)
2455 env
->uncached_cpsr
= ARM_CPU_MODE_USR
;
2457 /* SVC mode with interrupts disabled. */
2458 env
->uncached_cpsr
= ARM_CPU_MODE_SVC
| CPSR_A
| CPSR_F
| CPSR_I
;
2463 CPUARMState
*cpu_arm_init(void)
2467 env
= qemu_mallocz(sizeof(CPUARMState
));
2476 void cpu_arm_close(CPUARMState
*env
)
2481 static const char *cpu_mode_names
[16] = {
2482 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
2483 "???", "???", "???", "und", "???", "???", "???", "sys"
2485 void cpu_dump_state(CPUState
*env
, FILE *f
,
2486 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
2498 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
2500 cpu_fprintf(f
, "\n");
2502 cpu_fprintf(f
, " ");
2504 psr
= cpsr_read(env
);
2505 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d %x\n",
2507 psr
& (1 << 31) ? 'N' : '-',
2508 psr
& (1 << 30) ? 'Z' : '-',
2509 psr
& (1 << 29) ? 'C' : '-',
2510 psr
& (1 << 28) ? 'V' : '-',
2511 psr
& CPSR_T
? 'T' : 'A',
2512 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
2514 for (i
= 0; i
< 16; i
++) {
2515 d
.d
= env
->vfp
.regs
[i
];
2518 cpu_fprintf(f
, "s%02d=%08x(%8f) s%02d=%08x(%8f) d%02d=%08x%08x(%8f)\n",
2519 i
* 2, (int)s0
.i
, s0
.s
,
2520 i
* 2 + 1, (int)s0
.i
, s0
.s
,
2521 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
2524 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.fpscr
);