4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 /* internal defines */
32 typedef struct DisasContext
{
35 /* Nonzero if this instruction has been conditionally skipped. */
37 /* The label that will be jumped to when the instruction is skipped. */
39 struct TranslationBlock
*tb
;
40 int singlestep_enabled
;
44 #define DISAS_JUMP_NEXT 4
46 #ifdef USE_DIRECT_JUMP
49 #define TBPARAM(x) (long)(x)
52 /* XXX: move that elsewhere */
53 static uint16_t *gen_opc_ptr
;
54 static uint32_t *gen_opparam_ptr
;
59 #define DEF(s, n, copy_size) INDEX_op_ ## s,
67 static GenOpFunc1
*gen_test_cc
[14] = {
84 const uint8_t table_logic_cc
[16] = {
103 static GenOpFunc1
*gen_shift_T1_im
[4] = {
110 static GenOpFunc
*gen_shift_T1_0
[4] = {
117 static GenOpFunc1
*gen_shift_T2_im
[4] = {
124 static GenOpFunc
*gen_shift_T2_0
[4] = {
131 static GenOpFunc1
*gen_shift_T1_im_cc
[4] = {
132 gen_op_shll_T1_im_cc
,
133 gen_op_shrl_T1_im_cc
,
134 gen_op_sarl_T1_im_cc
,
135 gen_op_rorl_T1_im_cc
,
138 static GenOpFunc
*gen_shift_T1_0_cc
[4] = {
145 static GenOpFunc
*gen_shift_T1_T0
[4] = {
152 static GenOpFunc
*gen_shift_T1_T0_cc
[4] = {
153 gen_op_shll_T1_T0_cc
,
154 gen_op_shrl_T1_T0_cc
,
155 gen_op_sarl_T1_T0_cc
,
156 gen_op_rorl_T1_T0_cc
,
159 static GenOpFunc
*gen_op_movl_TN_reg
[3][16] = {
216 static GenOpFunc
*gen_op_movl_reg_TN
[2][16] = {
255 static GenOpFunc1
*gen_op_movl_TN_im
[3] = {
261 static GenOpFunc1
*gen_shift_T0_im_thumb
[3] = {
262 gen_op_shll_T0_im_thumb
,
263 gen_op_shrl_T0_im_thumb
,
264 gen_op_sarl_T0_im_thumb
,
267 static inline void gen_bx(DisasContext
*s
)
269 s
->is_jmp
= DISAS_UPDATE
;
273 static inline void gen_movl_TN_reg(DisasContext
*s
, int reg
, int t
)
278 /* normaly, since we updated PC, we need only to add one insn */
280 val
= (long)s
->pc
+ 2;
282 val
= (long)s
->pc
+ 4;
283 gen_op_movl_TN_im
[t
](val
);
285 gen_op_movl_TN_reg
[t
][reg
]();
289 static inline void gen_movl_T0_reg(DisasContext
*s
, int reg
)
291 gen_movl_TN_reg(s
, reg
, 0);
294 static inline void gen_movl_T1_reg(DisasContext
*s
, int reg
)
296 gen_movl_TN_reg(s
, reg
, 1);
299 static inline void gen_movl_T2_reg(DisasContext
*s
, int reg
)
301 gen_movl_TN_reg(s
, reg
, 2);
304 static inline void gen_movl_reg_TN(DisasContext
*s
, int reg
, int t
)
306 gen_op_movl_reg_TN
[t
][reg
]();
308 s
->is_jmp
= DISAS_JUMP
;
312 static inline void gen_movl_reg_T0(DisasContext
*s
, int reg
)
314 gen_movl_reg_TN(s
, reg
, 0);
317 static inline void gen_movl_reg_T1(DisasContext
*s
, int reg
)
319 gen_movl_reg_TN(s
, reg
, 1);
322 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
)
324 int val
, rm
, shift
, shiftop
;
326 if (!(insn
& (1 << 25))) {
329 if (!(insn
& (1 << 23)))
332 gen_op_addl_T1_im(val
);
336 shift
= (insn
>> 7) & 0x1f;
337 gen_movl_T2_reg(s
, rm
);
338 shiftop
= (insn
>> 5) & 3;
340 gen_shift_T2_im
[shiftop
](shift
);
341 } else if (shiftop
!= 0) {
342 gen_shift_T2_0
[shiftop
]();
344 if (!(insn
& (1 << 23)))
351 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
)
355 if (insn
& (1 << 22)) {
357 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
358 if (!(insn
& (1 << 23)))
361 gen_op_addl_T1_im(val
);
365 gen_movl_T2_reg(s
, rm
);
366 if (!(insn
& (1 << 23)))
373 #define VFP_OP(name) \
374 static inline void gen_vfp_##name(int dp) \
377 gen_op_vfp_##name##d(); \
379 gen_op_vfp_##name##s(); \
404 vfp_reg_offset (int dp
, int reg
)
407 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
409 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
410 + offsetof(CPU_DoubleU
, l
.upper
);
412 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
413 + offsetof(CPU_DoubleU
, l
.lower
);
416 static inline void gen_mov_F0_vreg(int dp
, int reg
)
419 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp
, reg
));
421 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp
, reg
));
424 static inline void gen_mov_F1_vreg(int dp
, int reg
)
427 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp
, reg
));
429 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp
, reg
));
432 static inline void gen_mov_vreg_F0(int dp
, int reg
)
435 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp
, reg
));
437 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp
, reg
));
440 /* Disassemble a VFP instruction. Returns nonzero if an error occured
441 (ie. an undefined instruction). */
442 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
444 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
447 dp
= ((insn
& 0xf00) == 0xb00);
448 switch ((insn
>> 24) & 0xf) {
450 if (insn
& (1 << 4)) {
451 /* single register transfer */
452 if ((insn
& 0x6f) != 0x00)
454 rd
= (insn
>> 12) & 0xf;
458 rn
= (insn
>> 16) & 0xf;
459 /* Get the existing value even for arm->vfp moves because
460 we only set half the register. */
461 gen_mov_F0_vreg(1, rn
);
463 if (insn
& (1 << 20)) {
465 if (insn
& (1 << 21))
466 gen_movl_reg_T1(s
, rd
);
468 gen_movl_reg_T0(s
, rd
);
471 if (insn
& (1 << 21))
472 gen_movl_T1_reg(s
, rd
);
474 gen_movl_T0_reg(s
, rd
);
476 gen_mov_vreg_F0(dp
, rn
);
479 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
480 if (insn
& (1 << 20)) {
482 if (insn
& (1 << 21)) {
483 /* system register */
490 gen_op_vfp_movl_T0_fpscr_flags();
492 gen_op_vfp_movl_T0_fpscr();
498 gen_mov_F0_vreg(0, rn
);
502 /* This will only set the 4 flag bits */
503 gen_op_movl_psr_T0();
505 gen_movl_reg_T0(s
, rd
);
508 gen_movl_T0_reg(s
, rd
);
509 if (insn
& (1 << 21)) {
510 /* system register */
513 /* Writes are ignored. */
516 gen_op_vfp_movl_fpscr_T0();
517 /* This could change vector settings, so jump to
518 the next instuction. */
519 gen_op_movl_T0_im(s
->pc
);
520 gen_movl_reg_T0(s
, 15);
521 s
->is_jmp
= DISAS_UPDATE
;
528 gen_mov_vreg_F0(0, rn
);
533 /* data processing */
534 /* The opcode is in bits 23, 21, 20 and 6. */
535 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
539 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
541 /* rn is register number */
544 rn
= (insn
>> 16) & 0xf;
547 if (op
== 15 && (rn
== 15 || rn
> 17)) {
548 /* Integer or single precision destination. */
549 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
551 if (insn
& (1 << 22))
553 rd
= (insn
>> 12) & 0xf;
556 if (op
== 15 && (rn
== 16 || rn
== 17)) {
557 /* Integer source. */
558 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
565 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
566 if (op
== 15 && rn
== 15) {
567 /* Double precision destination. */
568 if (insn
& (1 << 22))
570 rd
= (insn
>> 12) & 0xf;
572 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
573 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
576 veclen
= env
->vfp
.vec_len
;
577 if (op
== 15 && rn
> 3)
580 /* Shut up compiler warnings. */
591 /* Figure out what type of vector operation this is. */
592 if ((rd
& bank_mask
) == 0) {
597 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
599 delta_d
= env
->vfp
.vec_stride
+ 1;
601 if ((rm
& bank_mask
) == 0) {
602 /* mixed scalar/vector */
611 /* Load the initial operands. */
617 gen_mov_F0_vreg(0, rm
);
622 gen_mov_F0_vreg(dp
, rd
);
623 gen_mov_F1_vreg(dp
, rm
);
627 /* Compare with zero */
628 gen_mov_F0_vreg(dp
, rd
);
632 /* One source operand. */
633 gen_mov_F0_vreg(dp
, rm
);
636 /* Two source operands. */
637 gen_mov_F0_vreg(dp
, rn
);
638 gen_mov_F1_vreg(dp
, rm
);
642 /* Perform the calculation. */
644 case 0: /* mac: fd + (fn * fm) */
646 gen_mov_F1_vreg(dp
, rd
);
649 case 1: /* nmac: fd - (fn * fm) */
652 gen_mov_F1_vreg(dp
, rd
);
655 case 2: /* msc: -fd + (fn * fm) */
657 gen_mov_F1_vreg(dp
, rd
);
660 case 3: /* nmsc: -fd - (fn * fm) */
662 gen_mov_F1_vreg(dp
, rd
);
666 case 4: /* mul: fn * fm */
669 case 5: /* nmul: -(fn * fm) */
673 case 6: /* add: fn + fm */
676 case 7: /* sub: fn - fm */
679 case 8: /* div: fn / fm */
682 case 15: /* extension space */
709 case 15: /* single<->double conversion */
724 case 25: /* ftouiz */
730 case 27: /* ftosiz */
733 default: /* undefined */
734 printf ("rn:%d\n", rn
);
738 default: /* undefined */
739 printf ("op:%d\n", op
);
743 /* Write back the result. */
744 if (op
== 15 && (rn
>= 8 && rn
<= 11))
745 ; /* Comparison, do nothing. */
746 else if (op
== 15 && rn
> 17)
747 /* Integer result. */
748 gen_mov_vreg_F0(0, rd
);
749 else if (op
== 15 && rn
== 15)
751 gen_mov_vreg_F0(!dp
, rd
);
753 gen_mov_vreg_F0(dp
, rd
);
755 /* break out of the loop if we have finished */
759 if (op
== 15 && delta_m
== 0) {
760 /* single source one-many */
762 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
764 gen_mov_vreg_F0(dp
, rd
);
768 /* Setup the next operands. */
770 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
774 /* One source operand. */
775 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
777 gen_mov_F0_vreg(dp
, rm
);
779 /* Two source operands. */
780 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
782 gen_mov_F0_vreg(dp
, rn
);
784 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
786 gen_mov_F1_vreg(dp
, rm
);
794 if (dp
&& (insn
& (1 << 22))) {
795 /* two-register transfer */
796 rn
= (insn
>> 16) & 0xf;
797 rd
= (insn
>> 12) & 0xf;
803 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
805 if (insn
& (1 << 20)) {
808 gen_mov_F0_vreg(1, rm
);
810 gen_movl_reg_T0(s
, rd
);
811 gen_movl_reg_T1(s
, rn
);
813 gen_mov_F0_vreg(0, rm
);
815 gen_movl_reg_T0(s
, rn
);
816 gen_mov_F0_vreg(0, rm
+ 1);
818 gen_movl_reg_T0(s
, rd
);
823 gen_movl_T0_reg(s
, rd
);
824 gen_movl_T1_reg(s
, rn
);
826 gen_mov_vreg_F0(1, rm
);
828 gen_movl_T0_reg(s
, rn
);
830 gen_mov_vreg_F0(0, rm
);
831 gen_movl_T0_reg(s
, rd
);
833 gen_mov_vreg_F0(0, rm
+ 1);
838 rn
= (insn
>> 16) & 0xf;
840 rd
= (insn
>> 12) & 0xf;
842 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
843 gen_movl_T1_reg(s
, rn
);
844 if ((insn
& 0x01200000) == 0x01000000) {
845 /* Single load/store */
846 offset
= (insn
& 0xff) << 2;
847 if ((insn
& (1 << 23)) == 0)
849 gen_op_addl_T1_im(offset
);
850 if (insn
& (1 << 20)) {
852 gen_mov_vreg_F0(dp
, rd
);
854 gen_mov_F0_vreg(dp
, rd
);
858 /* load/store multiple */
860 n
= (insn
>> 1) & 0x7f;
864 if (insn
& (1 << 24)) /* pre-decrement */
865 gen_op_addl_T1_im(-((insn
& 0xff) << 2));
871 for (i
= 0; i
< n
; i
++) {
872 if (insn
& (1 << 20)) {
875 gen_mov_vreg_F0(dp
, rd
+ i
);
878 gen_mov_F0_vreg(dp
, rd
+ i
);
881 gen_op_addl_T1_im(offset
);
883 if (insn
& (1 << 21)) {
885 if (insn
& (1 << 24))
886 offset
= -offset
* n
;
887 else if (dp
&& (insn
& 1))
893 gen_op_addl_T1_im(offset
);
894 gen_movl_reg_T1(s
, rn
);
900 /* Should never happen. */
906 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
908 TranslationBlock
*tb
;
911 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
913 gen_op_goto_tb0(TBPARAM(tb
));
915 gen_op_goto_tb1(TBPARAM(tb
));
916 gen_op_movl_T0_im(dest
);
917 gen_op_movl_r15_T0();
918 gen_op_movl_T0_im((long)tb
+ n
);
921 gen_op_movl_T0_im(dest
);
922 gen_op_movl_r15_T0();
928 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
930 if (__builtin_expect(s
->singlestep_enabled
, 0)) {
931 /* An indirect jump so that we still trigger the debug exception. */
934 gen_op_movl_T0_im(dest
);
937 gen_goto_tb(s
, 0, dest
);
938 s
->is_jmp
= DISAS_TB_JUMP
;
942 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
944 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
951 /* Unconditional instructions. */
952 if ((insn
& 0x0d70f000) == 0x0550f000)
954 else if ((insn
& 0x0e000000) == 0x0a000000) {
955 /* branch link and change to thumb (blx <offset>) */
958 val
= (uint32_t)s
->pc
;
959 gen_op_movl_T0_im(val
);
960 gen_movl_reg_T0(s
, 14);
961 /* Sign-extend the 24-bit offset */
962 offset
= (((int32_t)insn
) << 8) >> 8;
963 /* offset * 4 + bit24 * 2 + (thumb bit) */
964 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
965 /* pipeline offset */
967 gen_op_movl_T0_im(val
);
970 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
971 /* Coprocessor double register transfer. */
972 } else if ((insn
& 0x0f000010) == 0x0e000010) {
973 /* Additional coprocessor register transfer. */
978 /* if not always execute, we generate a conditional jump to
980 s
->condlabel
= gen_new_label();
981 gen_test_cc
[cond
^ 1](s
->condlabel
);
983 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
984 //s->is_jmp = DISAS_JUMP_NEXT;
986 if ((insn
& 0x0f900000) == 0x03000000) {
987 if ((insn
& 0x0ff0f000) != 0x0360f000)
989 /* CPSR = immediate */
991 shift
= ((insn
>> 8) & 0xf) * 2;
993 val
= (val
>> shift
) | (val
<< (32 - shift
));
994 gen_op_movl_T0_im(val
);
995 if (insn
& (1 << 19))
996 gen_op_movl_psr_T0();
997 } else if ((insn
& 0x0f900000) == 0x01000000
998 && (insn
& 0x00000090) != 0x00000090) {
999 /* miscellaneous instructions */
1000 op1
= (insn
>> 21) & 3;
1001 sh
= (insn
>> 4) & 0xf;
1004 case 0x0: /* move program status register */
1006 /* SPSR not accessible in user mode */
1011 gen_movl_T0_reg(s
, rm
);
1012 if (insn
& (1 << 19))
1013 gen_op_movl_psr_T0();
1016 rd
= (insn
>> 12) & 0xf;
1017 gen_op_movl_T0_psr();
1018 gen_movl_reg_T0(s
, rd
);
1023 /* branch/exchange thumb (bx). */
1024 gen_movl_T0_reg(s
, rm
);
1026 } else if (op1
== 3) {
1028 rd
= (insn
>> 12) & 0xf;
1029 gen_movl_T0_reg(s
, rm
);
1031 gen_movl_reg_T0(s
, rd
);
1040 /* branch link/exchange thumb (blx) */
1041 val
= (uint32_t)s
->pc
;
1042 gen_op_movl_T0_im(val
);
1043 gen_movl_reg_T0(s
, 14);
1044 gen_movl_T0_reg(s
, rm
);
1047 case 0x5: /* saturating add/subtract */
1048 rd
= (insn
>> 12) & 0xf;
1049 rn
= (insn
>> 16) & 0xf;
1050 gen_movl_T0_reg(s
, rm
);
1051 gen_movl_T1_reg(s
, rn
);
1053 gen_op_double_T1_saturate();
1055 gen_op_subl_T0_T1_saturate();
1057 gen_op_addl_T0_T1_saturate();
1058 gen_movl_reg_T0(s
, rd
);
1060 case 0x8: /* signed multiply */
1064 rs
= (insn
>> 8) & 0xf;
1065 rn
= (insn
>> 12) & 0xf;
1066 rd
= (insn
>> 16) & 0xf;
1068 /* (32 * 16) >> 16 */
1069 gen_movl_T0_reg(s
, rm
);
1070 gen_movl_T1_reg(s
, rs
);
1072 gen_op_sarl_T1_im(16);
1075 gen_op_imulw_T0_T1();
1076 if ((sh
& 2) == 0) {
1077 gen_movl_T1_reg(s
, rn
);
1078 gen_op_addl_T0_T1_setq();
1080 gen_movl_reg_T0(s
, rd
);
1083 gen_movl_T0_reg(s
, rm
);
1085 gen_op_sarl_T0_im(16);
1088 gen_movl_T1_reg(s
, rs
);
1090 gen_op_sarl_T1_im(16);
1094 gen_op_imull_T0_T1();
1095 gen_op_addq_T0_T1(rn
, rd
);
1096 gen_movl_reg_T0(s
, rn
);
1097 gen_movl_reg_T1(s
, rd
);
1101 gen_movl_T1_reg(s
, rn
);
1102 gen_op_addl_T0_T1_setq();
1104 gen_movl_reg_T0(s
, rd
);
1111 } else if (((insn
& 0x0e000000) == 0 &&
1112 (insn
& 0x00000090) != 0x90) ||
1113 ((insn
& 0x0e000000) == (1 << 25))) {
1114 int set_cc
, logic_cc
, shiftop
;
1116 op1
= (insn
>> 21) & 0xf;
1117 set_cc
= (insn
>> 20) & 1;
1118 logic_cc
= table_logic_cc
[op1
] & set_cc
;
1120 /* data processing instruction */
1121 if (insn
& (1 << 25)) {
1122 /* immediate operand */
1124 shift
= ((insn
>> 8) & 0xf) * 2;
1126 val
= (val
>> shift
) | (val
<< (32 - shift
));
1127 gen_op_movl_T1_im(val
);
1128 if (logic_cc
&& shift
)
1133 gen_movl_T1_reg(s
, rm
);
1134 shiftop
= (insn
>> 5) & 3;
1135 if (!(insn
& (1 << 4))) {
1136 shift
= (insn
>> 7) & 0x1f;
1139 gen_shift_T1_im_cc
[shiftop
](shift
);
1141 gen_shift_T1_im
[shiftop
](shift
);
1143 } else if (shiftop
!= 0) {
1145 gen_shift_T1_0_cc
[shiftop
]();
1147 gen_shift_T1_0
[shiftop
]();
1151 rs
= (insn
>> 8) & 0xf;
1152 gen_movl_T0_reg(s
, rs
);
1154 gen_shift_T1_T0_cc
[shiftop
]();
1156 gen_shift_T1_T0
[shiftop
]();
1160 if (op1
!= 0x0f && op1
!= 0x0d) {
1161 rn
= (insn
>> 16) & 0xf;
1162 gen_movl_T0_reg(s
, rn
);
1164 rd
= (insn
>> 12) & 0xf;
1167 gen_op_andl_T0_T1();
1168 gen_movl_reg_T0(s
, rd
);
1170 gen_op_logic_T0_cc();
1173 gen_op_xorl_T0_T1();
1174 gen_movl_reg_T0(s
, rd
);
1176 gen_op_logic_T0_cc();
1180 gen_op_subl_T0_T1_cc();
1182 gen_op_subl_T0_T1();
1183 gen_movl_reg_T0(s
, rd
);
1187 gen_op_rsbl_T0_T1_cc();
1189 gen_op_rsbl_T0_T1();
1190 gen_movl_reg_T0(s
, rd
);
1194 gen_op_addl_T0_T1_cc();
1196 gen_op_addl_T0_T1();
1197 gen_movl_reg_T0(s
, rd
);
1201 gen_op_adcl_T0_T1_cc();
1203 gen_op_adcl_T0_T1();
1204 gen_movl_reg_T0(s
, rd
);
1208 gen_op_sbcl_T0_T1_cc();
1210 gen_op_sbcl_T0_T1();
1211 gen_movl_reg_T0(s
, rd
);
1215 gen_op_rscl_T0_T1_cc();
1217 gen_op_rscl_T0_T1();
1218 gen_movl_reg_T0(s
, rd
);
1222 gen_op_andl_T0_T1();
1223 gen_op_logic_T0_cc();
1228 gen_op_xorl_T0_T1();
1229 gen_op_logic_T0_cc();
1234 gen_op_subl_T0_T1_cc();
1239 gen_op_addl_T0_T1_cc();
1244 gen_movl_reg_T0(s
, rd
);
1246 gen_op_logic_T0_cc();
1249 gen_movl_reg_T1(s
, rd
);
1251 gen_op_logic_T1_cc();
1254 gen_op_bicl_T0_T1();
1255 gen_movl_reg_T0(s
, rd
);
1257 gen_op_logic_T0_cc();
1262 gen_movl_reg_T1(s
, rd
);
1264 gen_op_logic_T1_cc();
1268 /* other instructions */
1269 op1
= (insn
>> 24) & 0xf;
1273 /* multiplies, extra load/stores */
1274 sh
= (insn
>> 5) & 3;
1277 rd
= (insn
>> 16) & 0xf;
1278 rn
= (insn
>> 12) & 0xf;
1279 rs
= (insn
>> 8) & 0xf;
1281 if (((insn
>> 22) & 3) == 0) {
1283 gen_movl_T0_reg(s
, rs
);
1284 gen_movl_T1_reg(s
, rm
);
1286 if (insn
& (1 << 21)) {
1287 gen_movl_T1_reg(s
, rn
);
1288 gen_op_addl_T0_T1();
1290 if (insn
& (1 << 20))
1291 gen_op_logic_T0_cc();
1292 gen_movl_reg_T0(s
, rd
);
1295 gen_movl_T0_reg(s
, rs
);
1296 gen_movl_T1_reg(s
, rm
);
1297 if (insn
& (1 << 22))
1298 gen_op_imull_T0_T1();
1300 gen_op_mull_T0_T1();
1301 if (insn
& (1 << 21)) /* mult accumulate */
1302 gen_op_addq_T0_T1(rn
, rd
);
1303 if (!(insn
& (1 << 23))) { /* double accumulate */
1304 gen_op_addq_lo_T0_T1(rn
);
1305 gen_op_addq_lo_T0_T1(rd
);
1307 if (insn
& (1 << 20))
1309 gen_movl_reg_T0(s
, rn
);
1310 gen_movl_reg_T1(s
, rd
);
1313 rn
= (insn
>> 16) & 0xf;
1314 rd
= (insn
>> 12) & 0xf;
1315 if (insn
& (1 << 23)) {
1316 /* load/store exclusive */
1319 /* SWP instruction */
1322 gen_movl_T0_reg(s
, rm
);
1323 gen_movl_T1_reg(s
, rn
);
1324 if (insn
& (1 << 22)) {
1325 gen_op_swpb_T0_T1();
1327 gen_op_swpl_T0_T1();
1329 gen_movl_reg_T0(s
, rd
);
1333 /* Misc load/store */
1334 rn
= (insn
>> 16) & 0xf;
1335 rd
= (insn
>> 12) & 0xf;
1336 gen_movl_T1_reg(s
, rn
);
1337 if (insn
& (1 << 24))
1338 gen_add_datah_offset(s
, insn
);
1339 if (insn
& (1 << 20)) {
1343 gen_op_lduw_T0_T1();
1346 gen_op_ldsb_T0_T1();
1350 gen_op_ldsw_T0_T1();
1353 gen_movl_reg_T0(s
, rd
);
1354 } else if (sh
& 2) {
1358 gen_movl_T0_reg(s
, rd
);
1360 gen_op_addl_T1_im(4);
1361 gen_movl_T0_reg(s
, rd
+ 1);
1363 if ((insn
& (1 << 24)) || (insn
& (1 << 20)))
1364 gen_op_addl_T1_im(-4);
1368 gen_movl_reg_T0(s
, rd
);
1369 gen_op_addl_T1_im(4);
1371 gen_movl_reg_T0(s
, rd
+ 1);
1372 if ((insn
& (1 << 24)) || (insn
& (1 << 20)))
1373 gen_op_addl_T1_im(-4);
1377 gen_movl_T0_reg(s
, rd
);
1380 if (!(insn
& (1 << 24))) {
1381 gen_add_datah_offset(s
, insn
);
1382 gen_movl_reg_T1(s
, rn
);
1383 } else if (insn
& (1 << 21)) {
1384 gen_movl_reg_T1(s
, rn
);
1392 /* load/store byte/word */
1393 rn
= (insn
>> 16) & 0xf;
1394 rd
= (insn
>> 12) & 0xf;
1395 gen_movl_T1_reg(s
, rn
);
1396 if (insn
& (1 << 24))
1397 gen_add_data_offset(s
, insn
);
1398 if (insn
& (1 << 20)) {
1400 if (insn
& (1 << 22))
1401 gen_op_ldub_T0_T1();
1407 gen_movl_reg_T0(s
, rd
);
1410 gen_movl_T0_reg(s
, rd
);
1411 if (insn
& (1 << 22))
1416 if (!(insn
& (1 << 24))) {
1417 gen_add_data_offset(s
, insn
);
1418 gen_movl_reg_T1(s
, rn
);
1419 } else if (insn
& (1 << 21))
1420 gen_movl_reg_T1(s
, rn
); {
1427 /* load/store multiple words */
1428 /* XXX: store correct base if write back */
1429 if (insn
& (1 << 22))
1430 goto illegal_op
; /* only usable in supervisor mode */
1431 rn
= (insn
>> 16) & 0xf;
1432 gen_movl_T1_reg(s
, rn
);
1434 /* compute total size */
1437 if (insn
& (1 << i
))
1440 /* XXX: test invalid n == 0 case ? */
1441 if (insn
& (1 << 23)) {
1442 if (insn
& (1 << 24)) {
1444 gen_op_addl_T1_im(4);
1446 /* post increment */
1449 if (insn
& (1 << 24)) {
1451 gen_op_addl_T1_im(-(n
* 4));
1453 /* post decrement */
1455 gen_op_addl_T1_im(-((n
- 1) * 4));
1460 if (insn
& (1 << i
)) {
1461 if (insn
& (1 << 20)) {
1467 gen_movl_reg_T0(s
, i
);
1471 /* special case: r15 = PC + 12 */
1472 val
= (long)s
->pc
+ 8;
1473 gen_op_movl_TN_im
[0](val
);
1475 gen_movl_T0_reg(s
, i
);
1480 /* no need to add after the last transfer */
1482 gen_op_addl_T1_im(4);
1485 if (insn
& (1 << 21)) {
1487 if (insn
& (1 << 23)) {
1488 if (insn
& (1 << 24)) {
1491 /* post increment */
1492 gen_op_addl_T1_im(4);
1495 if (insn
& (1 << 24)) {
1498 gen_op_addl_T1_im(-((n
- 1) * 4));
1500 /* post decrement */
1501 gen_op_addl_T1_im(-(n
* 4));
1504 gen_movl_reg_T1(s
, rn
);
1513 /* branch (and link) */
1514 val
= (int32_t)s
->pc
;
1515 if (insn
& (1 << 24)) {
1516 gen_op_movl_T0_im(val
);
1517 gen_op_movl_reg_TN
[0][14]();
1519 offset
= (((int32_t)insn
<< 8) >> 8);
1520 val
+= (offset
<< 2) + 4;
1528 op1
= (insn
>> 8) & 0xf;
1532 if (disas_vfp_insn (env
, s
, insn
))
1536 /* unknown coprocessor. */
1542 gen_op_movl_T0_im((long)s
->pc
);
1543 gen_op_movl_reg_TN
[0][15]();
1545 s
->is_jmp
= DISAS_JUMP
;
1549 gen_op_movl_T0_im((long)s
->pc
- 4);
1550 gen_op_movl_reg_TN
[0][15]();
1551 gen_op_undef_insn();
1552 s
->is_jmp
= DISAS_JUMP
;
1558 static void disas_thumb_insn(DisasContext
*s
)
1560 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
1567 switch (insn
>> 12) {
1570 op
= (insn
>> 11) & 3;
1573 rn
= (insn
>> 3) & 7;
1574 gen_movl_T0_reg(s
, rn
);
1575 if (insn
& (1 << 10)) {
1577 gen_op_movl_T1_im((insn
>> 6) & 7);
1580 rm
= (insn
>> 6) & 7;
1581 gen_movl_T1_reg(s
, rm
);
1583 if (insn
& (1 << 9))
1584 gen_op_subl_T0_T1_cc();
1586 gen_op_addl_T0_T1_cc();
1587 gen_movl_reg_T0(s
, rd
);
1589 /* shift immediate */
1590 rm
= (insn
>> 3) & 7;
1591 shift
= (insn
>> 6) & 0x1f;
1592 gen_movl_T0_reg(s
, rm
);
1593 gen_shift_T0_im_thumb
[op
](shift
);
1594 gen_movl_reg_T0(s
, rd
);
1598 /* arithmetic large immediate */
1599 op
= (insn
>> 11) & 3;
1600 rd
= (insn
>> 8) & 0x7;
1602 gen_op_movl_T0_im(insn
& 0xff);
1604 gen_movl_T0_reg(s
, rd
);
1605 gen_op_movl_T1_im(insn
& 0xff);
1609 gen_op_logic_T0_cc();
1612 gen_op_subl_T0_T1_cc();
1615 gen_op_addl_T0_T1_cc();
1618 gen_op_subl_T0_T1_cc();
1622 gen_movl_reg_T0(s
, rd
);
1625 if (insn
& (1 << 11)) {
1626 rd
= (insn
>> 8) & 7;
1627 /* load pc-relative. Bit 1 of PC is ignored. */
1628 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
1629 val
&= ~(uint32_t)2;
1630 gen_op_movl_T1_im(val
);
1632 gen_movl_reg_T0(s
, rd
);
1635 if (insn
& (1 << 10)) {
1636 /* data processing extended or blx */
1637 rd
= (insn
& 7) | ((insn
>> 4) & 8);
1638 rm
= (insn
>> 3) & 0xf;
1639 op
= (insn
>> 8) & 3;
1642 gen_movl_T0_reg(s
, rd
);
1643 gen_movl_T1_reg(s
, rm
);
1644 gen_op_addl_T0_T1();
1645 gen_movl_reg_T0(s
, rd
);
1648 gen_movl_T0_reg(s
, rd
);
1649 gen_movl_T1_reg(s
, rm
);
1650 gen_op_subl_T0_T1_cc();
1652 case 2: /* mov/cpy */
1653 gen_movl_T0_reg(s
, rm
);
1654 gen_movl_reg_T0(s
, rd
);
1656 case 3:/* branch [and link] exchange thumb register */
1657 if (insn
& (1 << 7)) {
1658 val
= (uint32_t)s
->pc
| 1;
1659 gen_op_movl_T1_im(val
);
1660 gen_movl_reg_T1(s
, 14);
1662 gen_movl_T0_reg(s
, rm
);
1669 /* data processing register */
1671 rm
= (insn
>> 3) & 7;
1672 op
= (insn
>> 6) & 0xf;
1673 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
1674 /* the shift/rotate ops want the operands backwards */
1683 if (op
== 9) /* neg */
1684 gen_op_movl_T0_im(0);
1685 else if (op
!= 0xf) /* mvn doesn't read its first operand */
1686 gen_movl_T0_reg(s
, rd
);
1688 gen_movl_T1_reg(s
, rm
);
1691 gen_op_andl_T0_T1();
1692 gen_op_logic_T0_cc();
1695 gen_op_xorl_T0_T1();
1696 gen_op_logic_T0_cc();
1699 gen_op_shll_T1_T0_cc();
1702 gen_op_shrl_T1_T0_cc();
1705 gen_op_sarl_T1_T0_cc();
1708 gen_op_adcl_T0_T1_cc();
1711 gen_op_sbcl_T0_T1_cc();
1714 gen_op_rorl_T1_T0_cc();
1717 gen_op_andl_T0_T1();
1718 gen_op_logic_T0_cc();
1722 gen_op_subl_T0_T1_cc();
1725 gen_op_subl_T0_T1_cc();
1729 gen_op_addl_T0_T1_cc();
1734 gen_op_logic_T0_cc();
1737 gen_op_mull_T0_T1();
1738 gen_op_logic_T0_cc();
1741 gen_op_bicl_T0_T1();
1742 gen_op_logic_T0_cc();
1746 gen_op_logic_T1_cc();
1753 gen_movl_reg_T1(s
, rm
);
1755 gen_movl_reg_T0(s
, rd
);
1760 /* load/store register offset. */
1762 rn
= (insn
>> 3) & 7;
1763 rm
= (insn
>> 6) & 7;
1764 op
= (insn
>> 9) & 7;
1765 gen_movl_T1_reg(s
, rn
);
1766 gen_movl_T2_reg(s
, rm
);
1767 gen_op_addl_T1_T2();
1769 if (op
< 3) /* store */
1770 gen_movl_T0_reg(s
, rd
);
1783 gen_op_ldsb_T0_T1();
1789 gen_op_lduw_T0_T1();
1792 gen_op_ldub_T0_T1();
1795 gen_op_ldsw_T0_T1();
1798 if (op
>= 3) /* load */
1799 gen_movl_reg_T0(s
, rd
);
1803 /* load/store word immediate offset */
1805 rn
= (insn
>> 3) & 7;
1806 gen_movl_T1_reg(s
, rn
);
1807 val
= (insn
>> 4) & 0x7c;
1808 gen_op_movl_T2_im(val
);
1809 gen_op_addl_T1_T2();
1811 if (insn
& (1 << 11)) {
1814 gen_movl_reg_T0(s
, rd
);
1817 gen_movl_T0_reg(s
, rd
);
1823 /* load/store byte immediate offset */
1825 rn
= (insn
>> 3) & 7;
1826 gen_movl_T1_reg(s
, rn
);
1827 val
= (insn
>> 6) & 0x1f;
1828 gen_op_movl_T2_im(val
);
1829 gen_op_addl_T1_T2();
1831 if (insn
& (1 << 11)) {
1833 gen_op_ldub_T0_T1();
1834 gen_movl_reg_T0(s
, rd
);
1837 gen_movl_T0_reg(s
, rd
);
1843 /* load/store halfword immediate offset */
1845 rn
= (insn
>> 3) & 7;
1846 gen_movl_T1_reg(s
, rn
);
1847 val
= (insn
>> 5) & 0x3e;
1848 gen_op_movl_T2_im(val
);
1849 gen_op_addl_T1_T2();
1851 if (insn
& (1 << 11)) {
1853 gen_op_lduw_T0_T1();
1854 gen_movl_reg_T0(s
, rd
);
1857 gen_movl_T0_reg(s
, rd
);
1863 /* load/store from stack */
1864 rd
= (insn
>> 8) & 7;
1865 gen_movl_T1_reg(s
, 13);
1866 val
= (insn
& 0xff) * 4;
1867 gen_op_movl_T2_im(val
);
1868 gen_op_addl_T1_T2();
1870 if (insn
& (1 << 11)) {
1873 gen_movl_reg_T0(s
, rd
);
1876 gen_movl_T0_reg(s
, rd
);
1882 /* add to high reg */
1883 rd
= (insn
>> 8) & 7;
1884 if (insn
& (1 << 11)) {
1886 gen_movl_T0_reg(s
, 13);
1888 /* PC. bit 1 is ignored. */
1889 gen_op_movl_T0_im((s
->pc
+ 2) & ~(uint32_t)2);
1891 val
= (insn
& 0xff) * 4;
1892 gen_op_movl_T1_im(val
);
1893 gen_op_addl_T0_T1();
1894 gen_movl_reg_T0(s
, rd
);
1899 op
= (insn
>> 8) & 0xf;
1902 /* adjust stack pointer */
1903 gen_movl_T1_reg(s
, 13);
1904 val
= (insn
& 0x7f) * 4;
1905 if (insn
& (1 << 7))
1906 val
= -(int32_t)val
;
1907 gen_op_movl_T2_im(val
);
1908 gen_op_addl_T1_T2();
1909 gen_movl_reg_T1(s
, 13);
1912 case 4: case 5: case 0xc: case 0xd:
1914 gen_movl_T1_reg(s
, 13);
1915 if (insn
& (1 << 8))
1919 for (i
= 0; i
< 8; i
++) {
1920 if (insn
& (1 << i
))
1923 if ((insn
& (1 << 11)) == 0) {
1924 gen_op_movl_T2_im(-offset
);
1925 gen_op_addl_T1_T2();
1927 gen_op_movl_T2_im(4);
1928 for (i
= 0; i
< 8; i
++) {
1929 if (insn
& (1 << i
)) {
1930 if (insn
& (1 << 11)) {
1933 gen_movl_reg_T0(s
, i
);
1936 gen_movl_T0_reg(s
, i
);
1939 /* advance to the next address. */
1940 gen_op_addl_T1_T2();
1943 if (insn
& (1 << 8)) {
1944 if (insn
& (1 << 11)) {
1947 /* don't set the pc until the rest of the instruction
1951 gen_movl_T0_reg(s
, 14);
1954 gen_op_addl_T1_T2();
1956 if ((insn
& (1 << 11)) == 0) {
1957 gen_op_movl_T2_im(-offset
);
1958 gen_op_addl_T1_T2();
1960 /* write back the new stack pointer */
1961 gen_movl_reg_T1(s
, 13);
1962 /* set the new PC value */
1963 if ((insn
& 0x0900) == 0x0900)
1973 /* load/store multiple */
1974 rn
= (insn
>> 8) & 0x7;
1975 gen_movl_T1_reg(s
, rn
);
1976 gen_op_movl_T2_im(4);
1977 for (i
= 0; i
< 8; i
++) {
1978 if (insn
& (1 << i
)) {
1979 if (insn
& (1 << 11)) {
1982 gen_movl_reg_T0(s
, i
);
1985 gen_movl_T0_reg(s
, i
);
1988 /* advance to the next address */
1989 gen_op_addl_T1_T2();
1992 /* Base register writeback. */
1993 gen_movl_reg_T1(s
, rn
);
1997 /* conditional branch or swi */
1998 cond
= (insn
>> 8) & 0xf;
2004 gen_op_movl_T0_im((long)s
->pc
| 1);
2005 /* Don't set r15. */
2006 gen_op_movl_reg_TN
[0][15]();
2008 s
->is_jmp
= DISAS_JUMP
;
2011 /* generate a conditional jump to next instruction */
2012 s
->condlabel
= gen_new_label();
2013 gen_test_cc
[cond
^ 1](s
->condlabel
);
2015 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2016 //s->is_jmp = DISAS_JUMP_NEXT;
2017 gen_movl_T1_reg(s
, 15);
2019 /* jump to the offset */
2020 val
= (uint32_t)s
->pc
+ 2;
2021 offset
= ((int32_t)insn
<< 24) >> 24;
2027 /* unconditional branch */
2028 if (insn
& (1 << 11))
2029 goto undef
; /* Second half of a blx */
2030 val
= (uint32_t)s
->pc
;
2031 offset
= ((int32_t)insn
<< 21) >> 21;
2032 val
+= (offset
<< 1) + 2;
2037 /* branch and link [and switch to arm] */
2038 offset
= ((int32_t)insn
<< 21) >> 10;
2040 offset
|= insn
& 0x7ff;
2042 val
= (uint32_t)s
->pc
+ 2;
2043 gen_op_movl_T1_im(val
| 1);
2044 gen_movl_reg_T1(s
, 14);
2047 if (insn
& (1 << 12)) {
2052 val
&= ~(uint32_t)2;
2053 gen_op_movl_T0_im(val
);
2059 gen_op_movl_T0_im((long)s
->pc
- 2);
2060 gen_op_movl_reg_TN
[0][15]();
2061 gen_op_undef_insn();
2062 s
->is_jmp
= DISAS_JUMP
;
2065 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2066 basic block 'tb'. If search_pc is TRUE, also generate PC
2067 information for each intermediate instruction. */
2068 static inline int gen_intermediate_code_internal(CPUState
*env
,
2069 TranslationBlock
*tb
,
2072 DisasContext dc1
, *dc
= &dc1
;
2073 uint16_t *gen_opc_end
;
2075 target_ulong pc_start
;
2077 /* generate intermediate code */
2082 gen_opc_ptr
= gen_opc_buf
;
2083 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2084 gen_opparam_ptr
= gen_opparam_buf
;
2086 dc
->is_jmp
= DISAS_NEXT
;
2088 dc
->singlestep_enabled
= env
->singlestep_enabled
;
2090 dc
->thumb
= env
->thumb
;
2094 if (env
->nb_breakpoints
> 0) {
2095 for(j
= 0; j
< env
->nb_breakpoints
; j
++) {
2096 if (env
->breakpoints
[j
] == dc
->pc
) {
2097 gen_op_movl_T0_im((long)dc
->pc
);
2098 gen_op_movl_reg_TN
[0][15]();
2100 dc
->is_jmp
= DISAS_JUMP
;
2106 j
= gen_opc_ptr
- gen_opc_buf
;
2110 gen_opc_instr_start
[lj
++] = 0;
2112 gen_opc_pc
[lj
] = dc
->pc
;
2113 gen_opc_instr_start
[lj
] = 1;
2117 disas_thumb_insn(dc
);
2119 disas_arm_insn(env
, dc
);
2121 if (dc
->condjmp
&& !dc
->is_jmp
) {
2122 gen_set_label(dc
->condlabel
);
2125 /* Translation stops when a conditional branch is enoutered.
2126 * Otherwise the subsequent code could get translated several times.
2128 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
2129 !env
->singlestep_enabled
&&
2130 (dc
->pc
- pc_start
) < (TARGET_PAGE_SIZE
- 32));
2131 /* It this stage dc->condjmp will only be set when the skipped
2132 * instruction was a conditional branch, and teh PC has already been
2134 if (__builtin_expect(env
->singlestep_enabled
, 0)) {
2135 /* Make sure the pc is updated, and raise a debug exception. */
2138 gen_set_label(dc
->condlabel
);
2140 if (dc
->condjmp
|| !dc
->is_jmp
) {
2141 gen_op_movl_T0_im((long)dc
->pc
);
2142 gen_op_movl_reg_TN
[0][15]();
2147 switch(dc
->is_jmp
) {
2149 gen_goto_tb(dc
, 1, dc
->pc
);
2154 /* indicate that the hash table must be used to find the next TB */
2159 /* nothing more to generate */
2163 gen_set_label(dc
->condlabel
);
2164 gen_goto_tb(dc
, 1, dc
->pc
);
2168 *gen_opc_ptr
= INDEX_op_end
;
2171 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
2172 fprintf(logfile
, "----------------\n");
2173 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc_start
));
2174 target_disas(logfile
, pc_start
, dc
->pc
- pc_start
, env
->thumb
);
2175 fprintf(logfile
, "\n");
2176 if (loglevel
& (CPU_LOG_TB_OP
)) {
2177 fprintf(logfile
, "OP:\n");
2178 dump_ops(gen_opc_buf
, gen_opparam_buf
);
2179 fprintf(logfile
, "\n");
2184 tb
->size
= dc
->pc
- pc_start
;
2188 int gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
2190 return gen_intermediate_code_internal(env
, tb
, 0);
2193 int gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
2195 return gen_intermediate_code_internal(env
, tb
, 1);
2198 CPUARMState
*cpu_arm_init(void)
2204 env
= malloc(sizeof(CPUARMState
));
2207 memset(env
, 0, sizeof(CPUARMState
));
2208 cpu_single_env
= env
;
2212 void cpu_arm_close(CPUARMState
*env
)
2217 void cpu_dump_state(CPUState
*env
, FILE *f
,
2218 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
2229 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
2231 cpu_fprintf(f
, "\n");
2233 cpu_fprintf(f
, " ");
2235 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c\n",
2237 env
->cpsr
& (1 << 31) ? 'N' : '-',
2238 env
->cpsr
& (1 << 30) ? 'Z' : '-',
2239 env
->cpsr
& (1 << 29) ? 'C' : '-',
2240 env
->cpsr
& (1 << 28) ? 'V' : '-',
2241 env
->thumb
? 'T' : 'A');
2243 for (i
= 0; i
< 16; i
++) {
2244 d
.d
= env
->vfp
.regs
[i
];
2247 cpu_fprintf(f
, "s%02d=%08x(%8f) s%02d=%08x(%8f) d%02d=%08x%08x(%8f)\n",
2248 i
* 2, (int)s0
.i
, s0
.s
,
2249 i
* 2 + 1, (int)s0
.i
, s0
.s
,
2250 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
2253 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.fpscr
);
2256 target_ulong
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
2261 #if defined(CONFIG_USER_ONLY)
2263 int cpu_arm_handle_mmu_fault (CPUState
*env
, target_ulong address
, int rw
,
2264 int is_user
, int is_softmmu
)
2266 env
->cp15_6
= address
;
2268 env
->exception_index
= EXCP_PREFETCH_ABORT
;
2270 env
->exception_index
= EXCP_DATA_ABORT
;
2277 #error not implemented