DESTDIR makefile support.
[qemu/mini2440.git] / target-arm / translate.c
blobe6e8c6835a8af495b89dc2ddc70308194b912f86
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
31 #define ENABLE_ARCH_5J 0
32 #define ENABLE_ARCH_6 1
33 #define ENABLE_ARCH_6T2 1
35 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
37 /* internal defines */
38 typedef struct DisasContext {
39 target_ulong pc;
40 int is_jmp;
41 /* Nonzero if this instruction has been conditionally skipped. */
42 int condjmp;
43 /* The label that will be jumped to when the instruction is skipped. */
44 int condlabel;
45 struct TranslationBlock *tb;
46 int singlestep_enabled;
47 int thumb;
48 #if !defined(CONFIG_USER_ONLY)
49 int user;
50 #endif
51 } DisasContext;
53 #if defined(CONFIG_USER_ONLY)
54 #define IS_USER(s) 1
55 #else
56 #define IS_USER(s) (s->user)
57 #endif
59 #define DISAS_JUMP_NEXT 4
61 #ifdef USE_DIRECT_JUMP
62 #define TBPARAM(x)
63 #else
64 #define TBPARAM(x) (long)(x)
65 #endif
67 /* XXX: move that elsewhere */
68 static uint16_t *gen_opc_ptr;
69 static uint32_t *gen_opparam_ptr;
70 extern FILE *logfile;
71 extern int loglevel;
73 enum {
74 #define DEF(s, n, copy_size) INDEX_op_ ## s,
75 #include "opc.h"
76 #undef DEF
77 NB_OPS,
80 #include "gen-op.h"
82 static GenOpFunc1 *gen_test_cc[14] = {
83 gen_op_test_eq,
84 gen_op_test_ne,
85 gen_op_test_cs,
86 gen_op_test_cc,
87 gen_op_test_mi,
88 gen_op_test_pl,
89 gen_op_test_vs,
90 gen_op_test_vc,
91 gen_op_test_hi,
92 gen_op_test_ls,
93 gen_op_test_ge,
94 gen_op_test_lt,
95 gen_op_test_gt,
96 gen_op_test_le,
99 const uint8_t table_logic_cc[16] = {
100 1, /* and */
101 1, /* xor */
102 0, /* sub */
103 0, /* rsb */
104 0, /* add */
105 0, /* adc */
106 0, /* sbc */
107 0, /* rsc */
108 1, /* andl */
109 1, /* xorl */
110 0, /* cmp */
111 0, /* cmn */
112 1, /* orr */
113 1, /* mov */
114 1, /* bic */
115 1, /* mvn */
118 static GenOpFunc1 *gen_shift_T1_im[4] = {
119 gen_op_shll_T1_im,
120 gen_op_shrl_T1_im,
121 gen_op_sarl_T1_im,
122 gen_op_rorl_T1_im,
125 static GenOpFunc *gen_shift_T1_0[4] = {
126 NULL,
127 gen_op_shrl_T1_0,
128 gen_op_sarl_T1_0,
129 gen_op_rrxl_T1,
132 static GenOpFunc1 *gen_shift_T2_im[4] = {
133 gen_op_shll_T2_im,
134 gen_op_shrl_T2_im,
135 gen_op_sarl_T2_im,
136 gen_op_rorl_T2_im,
139 static GenOpFunc *gen_shift_T2_0[4] = {
140 NULL,
141 gen_op_shrl_T2_0,
142 gen_op_sarl_T2_0,
143 gen_op_rrxl_T2,
146 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
147 gen_op_shll_T1_im_cc,
148 gen_op_shrl_T1_im_cc,
149 gen_op_sarl_T1_im_cc,
150 gen_op_rorl_T1_im_cc,
153 static GenOpFunc *gen_shift_T1_0_cc[4] = {
154 NULL,
155 gen_op_shrl_T1_0_cc,
156 gen_op_sarl_T1_0_cc,
157 gen_op_rrxl_T1_cc,
160 static GenOpFunc *gen_shift_T1_T0[4] = {
161 gen_op_shll_T1_T0,
162 gen_op_shrl_T1_T0,
163 gen_op_sarl_T1_T0,
164 gen_op_rorl_T1_T0,
167 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
168 gen_op_shll_T1_T0_cc,
169 gen_op_shrl_T1_T0_cc,
170 gen_op_sarl_T1_T0_cc,
171 gen_op_rorl_T1_T0_cc,
174 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
176 gen_op_movl_T0_r0,
177 gen_op_movl_T0_r1,
178 gen_op_movl_T0_r2,
179 gen_op_movl_T0_r3,
180 gen_op_movl_T0_r4,
181 gen_op_movl_T0_r5,
182 gen_op_movl_T0_r6,
183 gen_op_movl_T0_r7,
184 gen_op_movl_T0_r8,
185 gen_op_movl_T0_r9,
186 gen_op_movl_T0_r10,
187 gen_op_movl_T0_r11,
188 gen_op_movl_T0_r12,
189 gen_op_movl_T0_r13,
190 gen_op_movl_T0_r14,
191 gen_op_movl_T0_r15,
194 gen_op_movl_T1_r0,
195 gen_op_movl_T1_r1,
196 gen_op_movl_T1_r2,
197 gen_op_movl_T1_r3,
198 gen_op_movl_T1_r4,
199 gen_op_movl_T1_r5,
200 gen_op_movl_T1_r6,
201 gen_op_movl_T1_r7,
202 gen_op_movl_T1_r8,
203 gen_op_movl_T1_r9,
204 gen_op_movl_T1_r10,
205 gen_op_movl_T1_r11,
206 gen_op_movl_T1_r12,
207 gen_op_movl_T1_r13,
208 gen_op_movl_T1_r14,
209 gen_op_movl_T1_r15,
212 gen_op_movl_T2_r0,
213 gen_op_movl_T2_r1,
214 gen_op_movl_T2_r2,
215 gen_op_movl_T2_r3,
216 gen_op_movl_T2_r4,
217 gen_op_movl_T2_r5,
218 gen_op_movl_T2_r6,
219 gen_op_movl_T2_r7,
220 gen_op_movl_T2_r8,
221 gen_op_movl_T2_r9,
222 gen_op_movl_T2_r10,
223 gen_op_movl_T2_r11,
224 gen_op_movl_T2_r12,
225 gen_op_movl_T2_r13,
226 gen_op_movl_T2_r14,
227 gen_op_movl_T2_r15,
231 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
233 gen_op_movl_r0_T0,
234 gen_op_movl_r1_T0,
235 gen_op_movl_r2_T0,
236 gen_op_movl_r3_T0,
237 gen_op_movl_r4_T0,
238 gen_op_movl_r5_T0,
239 gen_op_movl_r6_T0,
240 gen_op_movl_r7_T0,
241 gen_op_movl_r8_T0,
242 gen_op_movl_r9_T0,
243 gen_op_movl_r10_T0,
244 gen_op_movl_r11_T0,
245 gen_op_movl_r12_T0,
246 gen_op_movl_r13_T0,
247 gen_op_movl_r14_T0,
248 gen_op_movl_r15_T0,
251 gen_op_movl_r0_T1,
252 gen_op_movl_r1_T1,
253 gen_op_movl_r2_T1,
254 gen_op_movl_r3_T1,
255 gen_op_movl_r4_T1,
256 gen_op_movl_r5_T1,
257 gen_op_movl_r6_T1,
258 gen_op_movl_r7_T1,
259 gen_op_movl_r8_T1,
260 gen_op_movl_r9_T1,
261 gen_op_movl_r10_T1,
262 gen_op_movl_r11_T1,
263 gen_op_movl_r12_T1,
264 gen_op_movl_r13_T1,
265 gen_op_movl_r14_T1,
266 gen_op_movl_r15_T1,
270 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
271 gen_op_movl_T0_im,
272 gen_op_movl_T1_im,
273 gen_op_movl_T2_im,
276 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
277 gen_op_shll_T0_im_thumb,
278 gen_op_shrl_T0_im_thumb,
279 gen_op_sarl_T0_im_thumb,
282 static inline void gen_bx(DisasContext *s)
284 s->is_jmp = DISAS_UPDATE;
285 gen_op_bx_T0();
289 #if defined(CONFIG_USER_ONLY)
290 #define gen_ldst(name, s) gen_op_##name##_raw()
291 #else
292 #define gen_ldst(name, s) do { \
293 if (IS_USER(s)) \
294 gen_op_##name##_user(); \
295 else \
296 gen_op_##name##_kernel(); \
297 } while (0)
298 #endif
300 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
302 int val;
304 if (reg == 15) {
305 /* normaly, since we updated PC, we need only to add one insn */
306 if (s->thumb)
307 val = (long)s->pc + 2;
308 else
309 val = (long)s->pc + 4;
310 gen_op_movl_TN_im[t](val);
311 } else {
312 gen_op_movl_TN_reg[t][reg]();
316 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
318 gen_movl_TN_reg(s, reg, 0);
321 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
323 gen_movl_TN_reg(s, reg, 1);
326 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
328 gen_movl_TN_reg(s, reg, 2);
331 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
333 gen_op_movl_reg_TN[t][reg]();
334 if (reg == 15) {
335 s->is_jmp = DISAS_JUMP;
339 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
341 gen_movl_reg_TN(s, reg, 0);
344 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
346 gen_movl_reg_TN(s, reg, 1);
349 /* Force a TB lookup after an instruction that changes the CPU state. */
350 static inline void gen_lookup_tb(DisasContext *s)
352 gen_op_movl_T0_im(s->pc);
353 gen_movl_reg_T0(s, 15);
354 s->is_jmp = DISAS_UPDATE;
357 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
359 int val, rm, shift, shiftop;
361 if (!(insn & (1 << 25))) {
362 /* immediate */
363 val = insn & 0xfff;
364 if (!(insn & (1 << 23)))
365 val = -val;
366 if (val != 0)
367 gen_op_addl_T1_im(val);
368 } else {
369 /* shift/register */
370 rm = (insn) & 0xf;
371 shift = (insn >> 7) & 0x1f;
372 gen_movl_T2_reg(s, rm);
373 shiftop = (insn >> 5) & 3;
374 if (shift != 0) {
375 gen_shift_T2_im[shiftop](shift);
376 } else if (shiftop != 0) {
377 gen_shift_T2_0[shiftop]();
379 if (!(insn & (1 << 23)))
380 gen_op_subl_T1_T2();
381 else
382 gen_op_addl_T1_T2();
386 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn)
388 int val, rm;
390 if (insn & (1 << 22)) {
391 /* immediate */
392 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
393 if (!(insn & (1 << 23)))
394 val = -val;
395 if (val != 0)
396 gen_op_addl_T1_im(val);
397 } else {
398 /* register */
399 rm = (insn) & 0xf;
400 gen_movl_T2_reg(s, rm);
401 if (!(insn & (1 << 23)))
402 gen_op_subl_T1_T2();
403 else
404 gen_op_addl_T1_T2();
408 #define VFP_OP(name) \
409 static inline void gen_vfp_##name(int dp) \
411 if (dp) \
412 gen_op_vfp_##name##d(); \
413 else \
414 gen_op_vfp_##name##s(); \
417 VFP_OP(add)
418 VFP_OP(sub)
419 VFP_OP(mul)
420 VFP_OP(div)
421 VFP_OP(neg)
422 VFP_OP(abs)
423 VFP_OP(sqrt)
424 VFP_OP(cmp)
425 VFP_OP(cmpe)
426 VFP_OP(F1_ld0)
427 VFP_OP(uito)
428 VFP_OP(sito)
429 VFP_OP(toui)
430 VFP_OP(touiz)
431 VFP_OP(tosi)
432 VFP_OP(tosiz)
434 #undef VFP_OP
436 static inline void gen_vfp_ld(DisasContext *s, int dp)
438 if (dp)
439 gen_ldst(vfp_ldd, s);
440 else
441 gen_ldst(vfp_lds, s);
444 static inline void gen_vfp_st(DisasContext *s, int dp)
446 if (dp)
447 gen_ldst(vfp_std, s);
448 else
449 gen_ldst(vfp_sts, s);
452 static inline long
453 vfp_reg_offset (int dp, int reg)
455 if (dp)
456 return offsetof(CPUARMState, vfp.regs[reg]);
457 else if (reg & 1) {
458 return offsetof(CPUARMState, vfp.regs[reg >> 1])
459 + offsetof(CPU_DoubleU, l.upper);
460 } else {
461 return offsetof(CPUARMState, vfp.regs[reg >> 1])
462 + offsetof(CPU_DoubleU, l.lower);
465 static inline void gen_mov_F0_vreg(int dp, int reg)
467 if (dp)
468 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
469 else
470 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
473 static inline void gen_mov_F1_vreg(int dp, int reg)
475 if (dp)
476 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
477 else
478 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
481 static inline void gen_mov_vreg_F0(int dp, int reg)
483 if (dp)
484 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
485 else
486 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
489 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
490 instruction is not defined. */
491 static int disas_cp15_insn(DisasContext *s, uint32_t insn)
493 uint32_t rd;
495 /* ??? Some cp15 registers are accessible from userspace. */
496 if (IS_USER(s)) {
497 return 1;
499 if ((insn & 0x0fff0fff) == 0x0e070f90
500 || (insn & 0x0fff0fff) == 0x0e070f58) {
501 /* Wait for interrupt. */
502 gen_op_movl_T0_im((long)s->pc);
503 gen_op_movl_reg_TN[0][15]();
504 gen_op_wfi();
505 s->is_jmp = DISAS_JUMP;
506 return 0;
508 rd = (insn >> 12) & 0xf;
509 if (insn & (1 << 20)) {
510 gen_op_movl_T0_cp15(insn);
511 /* If the destination register is r15 then sets condition codes. */
512 if (rd != 15)
513 gen_movl_reg_T0(s, rd);
514 } else {
515 gen_movl_T0_reg(s, rd);
516 gen_op_movl_cp15_T0(insn);
518 gen_lookup_tb(s);
519 return 0;
522 /* Disassemble a VFP instruction. Returns nonzero if an error occured
523 (ie. an undefined instruction). */
524 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
526 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
527 int dp, veclen;
529 if (!arm_feature(env, ARM_FEATURE_VFP))
530 return 1;
532 if ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) == 0) {
533 /* VFP disabled. Only allow fmxr/fmrx to/from fpexc and fpsid. */
534 if ((insn & 0x0fe00fff) != 0x0ee00a10)
535 return 1;
536 rn = (insn >> 16) & 0xf;
537 if (rn != 0 && rn != 8)
538 return 1;
540 dp = ((insn & 0xf00) == 0xb00);
541 switch ((insn >> 24) & 0xf) {
542 case 0xe:
543 if (insn & (1 << 4)) {
544 /* single register transfer */
545 if ((insn & 0x6f) != 0x00)
546 return 1;
547 rd = (insn >> 12) & 0xf;
548 if (dp) {
549 if (insn & 0x80)
550 return 1;
551 rn = (insn >> 16) & 0xf;
552 /* Get the existing value even for arm->vfp moves because
553 we only set half the register. */
554 gen_mov_F0_vreg(1, rn);
555 gen_op_vfp_mrrd();
556 if (insn & (1 << 20)) {
557 /* vfp->arm */
558 if (insn & (1 << 21))
559 gen_movl_reg_T1(s, rd);
560 else
561 gen_movl_reg_T0(s, rd);
562 } else {
563 /* arm->vfp */
564 if (insn & (1 << 21))
565 gen_movl_T1_reg(s, rd);
566 else
567 gen_movl_T0_reg(s, rd);
568 gen_op_vfp_mdrr();
569 gen_mov_vreg_F0(dp, rn);
571 } else {
572 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
573 if (insn & (1 << 20)) {
574 /* vfp->arm */
575 if (insn & (1 << 21)) {
576 /* system register */
577 rn >>= 1;
578 switch (rn) {
579 case ARM_VFP_FPSID:
580 case ARM_VFP_FPEXC:
581 case ARM_VFP_FPINST:
582 case ARM_VFP_FPINST2:
583 gen_op_vfp_movl_T0_xreg(rn);
584 break;
585 case ARM_VFP_FPSCR:
586 if (rd == 15)
587 gen_op_vfp_movl_T0_fpscr_flags();
588 else
589 gen_op_vfp_movl_T0_fpscr();
590 break;
591 default:
592 return 1;
594 } else {
595 gen_mov_F0_vreg(0, rn);
596 gen_op_vfp_mrs();
598 if (rd == 15) {
599 /* Set the 4 flag bits in the CPSR. */
600 gen_op_movl_cpsr_T0(0xf0000000);
601 } else
602 gen_movl_reg_T0(s, rd);
603 } else {
604 /* arm->vfp */
605 gen_movl_T0_reg(s, rd);
606 if (insn & (1 << 21)) {
607 rn >>= 1;
608 /* system register */
609 switch (rn) {
610 case ARM_VFP_FPSID:
611 /* Writes are ignored. */
612 break;
613 case ARM_VFP_FPSCR:
614 gen_op_vfp_movl_fpscr_T0();
615 gen_lookup_tb(s);
616 break;
617 case ARM_VFP_FPEXC:
618 gen_op_vfp_movl_xreg_T0(rn);
619 gen_lookup_tb(s);
620 break;
621 case ARM_VFP_FPINST:
622 case ARM_VFP_FPINST2:
623 gen_op_vfp_movl_xreg_T0(rn);
624 break;
625 default:
626 return 1;
628 } else {
629 gen_op_vfp_msr();
630 gen_mov_vreg_F0(0, rn);
634 } else {
635 /* data processing */
636 /* The opcode is in bits 23, 21, 20 and 6. */
637 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
638 if (dp) {
639 if (op == 15) {
640 /* rn is opcode */
641 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
642 } else {
643 /* rn is register number */
644 if (insn & (1 << 7))
645 return 1;
646 rn = (insn >> 16) & 0xf;
649 if (op == 15 && (rn == 15 || rn > 17)) {
650 /* Integer or single precision destination. */
651 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
652 } else {
653 if (insn & (1 << 22))
654 return 1;
655 rd = (insn >> 12) & 0xf;
658 if (op == 15 && (rn == 16 || rn == 17)) {
659 /* Integer source. */
660 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
661 } else {
662 if (insn & (1 << 5))
663 return 1;
664 rm = insn & 0xf;
666 } else {
667 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
668 if (op == 15 && rn == 15) {
669 /* Double precision destination. */
670 if (insn & (1 << 22))
671 return 1;
672 rd = (insn >> 12) & 0xf;
673 } else
674 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
675 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
678 veclen = env->vfp.vec_len;
679 if (op == 15 && rn > 3)
680 veclen = 0;
682 /* Shut up compiler warnings. */
683 delta_m = 0;
684 delta_d = 0;
685 bank_mask = 0;
687 if (veclen > 0) {
688 if (dp)
689 bank_mask = 0xc;
690 else
691 bank_mask = 0x18;
693 /* Figure out what type of vector operation this is. */
694 if ((rd & bank_mask) == 0) {
695 /* scalar */
696 veclen = 0;
697 } else {
698 if (dp)
699 delta_d = (env->vfp.vec_stride >> 1) + 1;
700 else
701 delta_d = env->vfp.vec_stride + 1;
703 if ((rm & bank_mask) == 0) {
704 /* mixed scalar/vector */
705 delta_m = 0;
706 } else {
707 /* vector */
708 delta_m = delta_d;
713 /* Load the initial operands. */
714 if (op == 15) {
715 switch (rn) {
716 case 16:
717 case 17:
718 /* Integer source */
719 gen_mov_F0_vreg(0, rm);
720 break;
721 case 8:
722 case 9:
723 /* Compare */
724 gen_mov_F0_vreg(dp, rd);
725 gen_mov_F1_vreg(dp, rm);
726 break;
727 case 10:
728 case 11:
729 /* Compare with zero */
730 gen_mov_F0_vreg(dp, rd);
731 gen_vfp_F1_ld0(dp);
732 break;
733 default:
734 /* One source operand. */
735 gen_mov_F0_vreg(dp, rm);
737 } else {
738 /* Two source operands. */
739 gen_mov_F0_vreg(dp, rn);
740 gen_mov_F1_vreg(dp, rm);
743 for (;;) {
744 /* Perform the calculation. */
745 switch (op) {
746 case 0: /* mac: fd + (fn * fm) */
747 gen_vfp_mul(dp);
748 gen_mov_F1_vreg(dp, rd);
749 gen_vfp_add(dp);
750 break;
751 case 1: /* nmac: fd - (fn * fm) */
752 gen_vfp_mul(dp);
753 gen_vfp_neg(dp);
754 gen_mov_F1_vreg(dp, rd);
755 gen_vfp_add(dp);
756 break;
757 case 2: /* msc: -fd + (fn * fm) */
758 gen_vfp_mul(dp);
759 gen_mov_F1_vreg(dp, rd);
760 gen_vfp_sub(dp);
761 break;
762 case 3: /* nmsc: -fd - (fn * fm) */
763 gen_vfp_mul(dp);
764 gen_mov_F1_vreg(dp, rd);
765 gen_vfp_add(dp);
766 gen_vfp_neg(dp);
767 break;
768 case 4: /* mul: fn * fm */
769 gen_vfp_mul(dp);
770 break;
771 case 5: /* nmul: -(fn * fm) */
772 gen_vfp_mul(dp);
773 gen_vfp_neg(dp);
774 break;
775 case 6: /* add: fn + fm */
776 gen_vfp_add(dp);
777 break;
778 case 7: /* sub: fn - fm */
779 gen_vfp_sub(dp);
780 break;
781 case 8: /* div: fn / fm */
782 gen_vfp_div(dp);
783 break;
784 case 15: /* extension space */
785 switch (rn) {
786 case 0: /* cpy */
787 /* no-op */
788 break;
789 case 1: /* abs */
790 gen_vfp_abs(dp);
791 break;
792 case 2: /* neg */
793 gen_vfp_neg(dp);
794 break;
795 case 3: /* sqrt */
796 gen_vfp_sqrt(dp);
797 break;
798 case 8: /* cmp */
799 gen_vfp_cmp(dp);
800 break;
801 case 9: /* cmpe */
802 gen_vfp_cmpe(dp);
803 break;
804 case 10: /* cmpz */
805 gen_vfp_cmp(dp);
806 break;
807 case 11: /* cmpez */
808 gen_vfp_F1_ld0(dp);
809 gen_vfp_cmpe(dp);
810 break;
811 case 15: /* single<->double conversion */
812 if (dp)
813 gen_op_vfp_fcvtsd();
814 else
815 gen_op_vfp_fcvtds();
816 break;
817 case 16: /* fuito */
818 gen_vfp_uito(dp);
819 break;
820 case 17: /* fsito */
821 gen_vfp_sito(dp);
822 break;
823 case 24: /* ftoui */
824 gen_vfp_toui(dp);
825 break;
826 case 25: /* ftouiz */
827 gen_vfp_touiz(dp);
828 break;
829 case 26: /* ftosi */
830 gen_vfp_tosi(dp);
831 break;
832 case 27: /* ftosiz */
833 gen_vfp_tosiz(dp);
834 break;
835 default: /* undefined */
836 printf ("rn:%d\n", rn);
837 return 1;
839 break;
840 default: /* undefined */
841 printf ("op:%d\n", op);
842 return 1;
845 /* Write back the result. */
846 if (op == 15 && (rn >= 8 && rn <= 11))
847 ; /* Comparison, do nothing. */
848 else if (op == 15 && rn > 17)
849 /* Integer result. */
850 gen_mov_vreg_F0(0, rd);
851 else if (op == 15 && rn == 15)
852 /* conversion */
853 gen_mov_vreg_F0(!dp, rd);
854 else
855 gen_mov_vreg_F0(dp, rd);
857 /* break out of the loop if we have finished */
858 if (veclen == 0)
859 break;
861 if (op == 15 && delta_m == 0) {
862 /* single source one-many */
863 while (veclen--) {
864 rd = ((rd + delta_d) & (bank_mask - 1))
865 | (rd & bank_mask);
866 gen_mov_vreg_F0(dp, rd);
868 break;
870 /* Setup the next operands. */
871 veclen--;
872 rd = ((rd + delta_d) & (bank_mask - 1))
873 | (rd & bank_mask);
875 if (op == 15) {
876 /* One source operand. */
877 rm = ((rm + delta_m) & (bank_mask - 1))
878 | (rm & bank_mask);
879 gen_mov_F0_vreg(dp, rm);
880 } else {
881 /* Two source operands. */
882 rn = ((rn + delta_d) & (bank_mask - 1))
883 | (rn & bank_mask);
884 gen_mov_F0_vreg(dp, rn);
885 if (delta_m) {
886 rm = ((rm + delta_m) & (bank_mask - 1))
887 | (rm & bank_mask);
888 gen_mov_F1_vreg(dp, rm);
893 break;
894 case 0xc:
895 case 0xd:
896 if (dp && (insn & (1 << 22))) {
897 /* two-register transfer */
898 rn = (insn >> 16) & 0xf;
899 rd = (insn >> 12) & 0xf;
900 if (dp) {
901 if (insn & (1 << 5))
902 return 1;
903 rm = insn & 0xf;
904 } else
905 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
907 if (insn & (1 << 20)) {
908 /* vfp->arm */
909 if (dp) {
910 gen_mov_F0_vreg(1, rm);
911 gen_op_vfp_mrrd();
912 gen_movl_reg_T0(s, rd);
913 gen_movl_reg_T1(s, rn);
914 } else {
915 gen_mov_F0_vreg(0, rm);
916 gen_op_vfp_mrs();
917 gen_movl_reg_T0(s, rn);
918 gen_mov_F0_vreg(0, rm + 1);
919 gen_op_vfp_mrs();
920 gen_movl_reg_T0(s, rd);
922 } else {
923 /* arm->vfp */
924 if (dp) {
925 gen_movl_T0_reg(s, rd);
926 gen_movl_T1_reg(s, rn);
927 gen_op_vfp_mdrr();
928 gen_mov_vreg_F0(1, rm);
929 } else {
930 gen_movl_T0_reg(s, rn);
931 gen_op_vfp_msr();
932 gen_mov_vreg_F0(0, rm);
933 gen_movl_T0_reg(s, rd);
934 gen_op_vfp_msr();
935 gen_mov_vreg_F0(0, rm + 1);
938 } else {
939 /* Load/store */
940 rn = (insn >> 16) & 0xf;
941 if (dp)
942 rd = (insn >> 12) & 0xf;
943 else
944 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
945 gen_movl_T1_reg(s, rn);
946 if ((insn & 0x01200000) == 0x01000000) {
947 /* Single load/store */
948 offset = (insn & 0xff) << 2;
949 if ((insn & (1 << 23)) == 0)
950 offset = -offset;
951 gen_op_addl_T1_im(offset);
952 if (insn & (1 << 20)) {
953 gen_vfp_ld(s, dp);
954 gen_mov_vreg_F0(dp, rd);
955 } else {
956 gen_mov_F0_vreg(dp, rd);
957 gen_vfp_st(s, dp);
959 } else {
960 /* load/store multiple */
961 if (dp)
962 n = (insn >> 1) & 0x7f;
963 else
964 n = insn & 0xff;
966 if (insn & (1 << 24)) /* pre-decrement */
967 gen_op_addl_T1_im(-((insn & 0xff) << 2));
969 if (dp)
970 offset = 8;
971 else
972 offset = 4;
973 for (i = 0; i < n; i++) {
974 if (insn & (1 << 20)) {
975 /* load */
976 gen_vfp_ld(s, dp);
977 gen_mov_vreg_F0(dp, rd + i);
978 } else {
979 /* store */
980 gen_mov_F0_vreg(dp, rd + i);
981 gen_vfp_st(s, dp);
983 gen_op_addl_T1_im(offset);
985 if (insn & (1 << 21)) {
986 /* writeback */
987 if (insn & (1 << 24))
988 offset = -offset * n;
989 else if (dp && (insn & 1))
990 offset = 4;
991 else
992 offset = 0;
994 if (offset != 0)
995 gen_op_addl_T1_im(offset);
996 gen_movl_reg_T1(s, rn);
1000 break;
1001 default:
1002 /* Should never happen. */
1003 return 1;
1005 return 0;
1008 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1010 TranslationBlock *tb;
1012 tb = s->tb;
1013 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
1014 if (n == 0)
1015 gen_op_goto_tb0(TBPARAM(tb));
1016 else
1017 gen_op_goto_tb1(TBPARAM(tb));
1018 gen_op_movl_T0_im(dest);
1019 gen_op_movl_r15_T0();
1020 gen_op_movl_T0_im((long)tb + n);
1021 gen_op_exit_tb();
1022 } else {
1023 gen_op_movl_T0_im(dest);
1024 gen_op_movl_r15_T0();
1025 gen_op_movl_T0_0();
1026 gen_op_exit_tb();
1030 static inline void gen_jmp (DisasContext *s, uint32_t dest)
1032 if (__builtin_expect(s->singlestep_enabled, 0)) {
1033 /* An indirect jump so that we still trigger the debug exception. */
1034 if (s->thumb)
1035 dest |= 1;
1036 gen_op_movl_T0_im(dest);
1037 gen_bx(s);
1038 } else {
1039 gen_goto_tb(s, 0, dest);
1040 s->is_jmp = DISAS_TB_JUMP;
1044 static inline void gen_mulxy(int x, int y)
1046 if (x)
1047 gen_op_sarl_T0_im(16);
1048 else
1049 gen_op_sxth_T0();
1050 if (y)
1051 gen_op_sarl_T1_im(16);
1052 else
1053 gen_op_sxth_T1();
1054 gen_op_mul_T0_T1();
1057 /* Return the mask of PSR bits set by a MSR instruction. */
1058 static uint32_t msr_mask(DisasContext *s, int flags, int spsr) {
1059 uint32_t mask;
1061 mask = 0;
1062 if (flags & (1 << 0))
1063 mask |= 0xff;
1064 if (flags & (1 << 1))
1065 mask |= 0xff00;
1066 if (flags & (1 << 2))
1067 mask |= 0xff0000;
1068 if (flags & (1 << 3))
1069 mask |= 0xff000000;
1070 /* Mask out undefined bits. */
1071 mask &= 0xf90f03ff;
1072 /* Mask out state bits. */
1073 if (!spsr)
1074 mask &= ~0x01000020;
1075 /* Mask out privileged bits. */
1076 if (IS_USER(s))
1077 mask &= 0xf80f0200;
1078 return mask;
1081 /* Returns nonzero if access to the PSR is not permitted. */
1082 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
1084 if (spsr) {
1085 /* ??? This is also undefined in system mode. */
1086 if (IS_USER(s))
1087 return 1;
1088 gen_op_movl_spsr_T0(mask);
1089 } else {
1090 gen_op_movl_cpsr_T0(mask);
1092 gen_lookup_tb(s);
1093 return 0;
1096 static void gen_exception_return(DisasContext *s)
1098 gen_op_movl_reg_TN[0][15]();
1099 gen_op_movl_T0_spsr();
1100 gen_op_movl_cpsr_T0(0xffffffff);
1101 s->is_jmp = DISAS_UPDATE;
1104 static void disas_arm_insn(CPUState * env, DisasContext *s)
1106 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
1108 insn = ldl_code(s->pc);
1109 s->pc += 4;
1111 cond = insn >> 28;
1112 if (cond == 0xf){
1113 /* Unconditional instructions. */
1114 if ((insn & 0x0d70f000) == 0x0550f000)
1115 return; /* PLD */
1116 else if ((insn & 0x0e000000) == 0x0a000000) {
1117 /* branch link and change to thumb (blx <offset>) */
1118 int32_t offset;
1120 val = (uint32_t)s->pc;
1121 gen_op_movl_T0_im(val);
1122 gen_movl_reg_T0(s, 14);
1123 /* Sign-extend the 24-bit offset */
1124 offset = (((int32_t)insn) << 8) >> 8;
1125 /* offset * 4 + bit24 * 2 + (thumb bit) */
1126 val += (offset << 2) | ((insn >> 23) & 2) | 1;
1127 /* pipeline offset */
1128 val += 4;
1129 gen_op_movl_T0_im(val);
1130 gen_bx(s);
1131 return;
1132 } else if ((insn & 0x0fe00000) == 0x0c400000) {
1133 /* Coprocessor double register transfer. */
1134 } else if ((insn & 0x0f000010) == 0x0e000010) {
1135 /* Additional coprocessor register transfer. */
1136 } else if ((insn & 0x0ff10010) == 0x01000000) {
1137 /* cps (privileged) */
1138 } else if ((insn & 0x0ffffdff) == 0x01010000) {
1139 /* setend */
1140 if (insn & (1 << 9)) {
1141 /* BE8 mode not implemented. */
1142 goto illegal_op;
1144 return;
1146 goto illegal_op;
1148 if (cond != 0xe) {
1149 /* if not always execute, we generate a conditional jump to
1150 next instruction */
1151 s->condlabel = gen_new_label();
1152 gen_test_cc[cond ^ 1](s->condlabel);
1153 s->condjmp = 1;
1154 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1155 //s->is_jmp = DISAS_JUMP_NEXT;
1157 if ((insn & 0x0f900000) == 0x03000000) {
1158 if ((insn & 0x0fb0f000) != 0x0320f000)
1159 goto illegal_op;
1160 /* CPSR = immediate */
1161 val = insn & 0xff;
1162 shift = ((insn >> 8) & 0xf) * 2;
1163 if (shift)
1164 val = (val >> shift) | (val << (32 - shift));
1165 gen_op_movl_T0_im(val);
1166 i = ((insn & (1 << 22)) != 0);
1167 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
1168 goto illegal_op;
1169 } else if ((insn & 0x0f900000) == 0x01000000
1170 && (insn & 0x00000090) != 0x00000090) {
1171 /* miscellaneous instructions */
1172 op1 = (insn >> 21) & 3;
1173 sh = (insn >> 4) & 0xf;
1174 rm = insn & 0xf;
1175 switch (sh) {
1176 case 0x0: /* move program status register */
1177 if (op1 & 1) {
1178 /* PSR = reg */
1179 gen_movl_T0_reg(s, rm);
1180 i = ((op1 & 2) != 0);
1181 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
1182 goto illegal_op;
1183 } else {
1184 /* reg = PSR */
1185 rd = (insn >> 12) & 0xf;
1186 if (op1 & 2) {
1187 if (IS_USER(s))
1188 goto illegal_op;
1189 gen_op_movl_T0_spsr();
1190 } else {
1191 gen_op_movl_T0_cpsr();
1193 gen_movl_reg_T0(s, rd);
1195 break;
1196 case 0x1:
1197 if (op1 == 1) {
1198 /* branch/exchange thumb (bx). */
1199 gen_movl_T0_reg(s, rm);
1200 gen_bx(s);
1201 } else if (op1 == 3) {
1202 /* clz */
1203 rd = (insn >> 12) & 0xf;
1204 gen_movl_T0_reg(s, rm);
1205 gen_op_clz_T0();
1206 gen_movl_reg_T0(s, rd);
1207 } else {
1208 goto illegal_op;
1210 break;
1211 case 0x2:
1212 if (op1 == 1) {
1213 ARCH(5J); /* bxj */
1214 /* Trivial implementation equivalent to bx. */
1215 gen_movl_T0_reg(s, rm);
1216 gen_bx(s);
1217 } else {
1218 goto illegal_op;
1220 break;
1221 case 0x3:
1222 if (op1 != 1)
1223 goto illegal_op;
1225 /* branch link/exchange thumb (blx) */
1226 val = (uint32_t)s->pc;
1227 gen_op_movl_T0_im(val);
1228 gen_movl_reg_T0(s, 14);
1229 gen_movl_T0_reg(s, rm);
1230 gen_bx(s);
1231 break;
1232 case 0x5: /* saturating add/subtract */
1233 rd = (insn >> 12) & 0xf;
1234 rn = (insn >> 16) & 0xf;
1235 gen_movl_T0_reg(s, rm);
1236 gen_movl_T1_reg(s, rn);
1237 if (op1 & 2)
1238 gen_op_double_T1_saturate();
1239 if (op1 & 1)
1240 gen_op_subl_T0_T1_saturate();
1241 else
1242 gen_op_addl_T0_T1_saturate();
1243 gen_movl_reg_T0(s, rd);
1244 break;
1245 case 7: /* bkpt */
1246 gen_op_movl_T0_im((long)s->pc - 4);
1247 gen_op_movl_reg_TN[0][15]();
1248 gen_op_bkpt();
1249 s->is_jmp = DISAS_JUMP;
1250 break;
1251 case 0x8: /* signed multiply */
1252 case 0xa:
1253 case 0xc:
1254 case 0xe:
1255 rs = (insn >> 8) & 0xf;
1256 rn = (insn >> 12) & 0xf;
1257 rd = (insn >> 16) & 0xf;
1258 if (op1 == 1) {
1259 /* (32 * 16) >> 16 */
1260 gen_movl_T0_reg(s, rm);
1261 gen_movl_T1_reg(s, rs);
1262 if (sh & 4)
1263 gen_op_sarl_T1_im(16);
1264 else
1265 gen_op_sxth_T1();
1266 gen_op_imulw_T0_T1();
1267 if ((sh & 2) == 0) {
1268 gen_movl_T1_reg(s, rn);
1269 gen_op_addl_T0_T1_setq();
1271 gen_movl_reg_T0(s, rd);
1272 } else {
1273 /* 16 * 16 */
1274 gen_movl_T0_reg(s, rm);
1275 gen_movl_T1_reg(s, rs);
1276 gen_mulxy(sh & 2, sh & 4);
1277 if (op1 == 2) {
1278 gen_op_signbit_T1_T0();
1279 gen_op_addq_T0_T1(rn, rd);
1280 gen_movl_reg_T0(s, rn);
1281 gen_movl_reg_T1(s, rd);
1282 } else {
1283 if (op1 == 0) {
1284 gen_movl_T1_reg(s, rn);
1285 gen_op_addl_T0_T1_setq();
1287 gen_movl_reg_T0(s, rd);
1290 break;
1291 default:
1292 goto illegal_op;
1294 } else if (((insn & 0x0e000000) == 0 &&
1295 (insn & 0x00000090) != 0x90) ||
1296 ((insn & 0x0e000000) == (1 << 25))) {
1297 int set_cc, logic_cc, shiftop;
1299 op1 = (insn >> 21) & 0xf;
1300 set_cc = (insn >> 20) & 1;
1301 logic_cc = table_logic_cc[op1] & set_cc;
1303 /* data processing instruction */
1304 if (insn & (1 << 25)) {
1305 /* immediate operand */
1306 val = insn & 0xff;
1307 shift = ((insn >> 8) & 0xf) * 2;
1308 if (shift)
1309 val = (val >> shift) | (val << (32 - shift));
1310 gen_op_movl_T1_im(val);
1311 if (logic_cc && shift)
1312 gen_op_mov_CF_T1();
1313 } else {
1314 /* register */
1315 rm = (insn) & 0xf;
1316 gen_movl_T1_reg(s, rm);
1317 shiftop = (insn >> 5) & 3;
1318 if (!(insn & (1 << 4))) {
1319 shift = (insn >> 7) & 0x1f;
1320 if (shift != 0) {
1321 if (logic_cc) {
1322 gen_shift_T1_im_cc[shiftop](shift);
1323 } else {
1324 gen_shift_T1_im[shiftop](shift);
1326 } else if (shiftop != 0) {
1327 if (logic_cc) {
1328 gen_shift_T1_0_cc[shiftop]();
1329 } else {
1330 gen_shift_T1_0[shiftop]();
1333 } else {
1334 rs = (insn >> 8) & 0xf;
1335 gen_movl_T0_reg(s, rs);
1336 if (logic_cc) {
1337 gen_shift_T1_T0_cc[shiftop]();
1338 } else {
1339 gen_shift_T1_T0[shiftop]();
1343 if (op1 != 0x0f && op1 != 0x0d) {
1344 rn = (insn >> 16) & 0xf;
1345 gen_movl_T0_reg(s, rn);
1347 rd = (insn >> 12) & 0xf;
1348 switch(op1) {
1349 case 0x00:
1350 gen_op_andl_T0_T1();
1351 gen_movl_reg_T0(s, rd);
1352 if (logic_cc)
1353 gen_op_logic_T0_cc();
1354 break;
1355 case 0x01:
1356 gen_op_xorl_T0_T1();
1357 gen_movl_reg_T0(s, rd);
1358 if (logic_cc)
1359 gen_op_logic_T0_cc();
1360 break;
1361 case 0x02:
1362 if (set_cc && rd == 15) {
1363 /* SUBS r15, ... is used for exception return. */
1364 if (IS_USER(s))
1365 goto illegal_op;
1366 gen_op_subl_T0_T1_cc();
1367 gen_exception_return(s);
1368 } else {
1369 if (set_cc)
1370 gen_op_subl_T0_T1_cc();
1371 else
1372 gen_op_subl_T0_T1();
1373 gen_movl_reg_T0(s, rd);
1375 break;
1376 case 0x03:
1377 if (set_cc)
1378 gen_op_rsbl_T0_T1_cc();
1379 else
1380 gen_op_rsbl_T0_T1();
1381 gen_movl_reg_T0(s, rd);
1382 break;
1383 case 0x04:
1384 if (set_cc)
1385 gen_op_addl_T0_T1_cc();
1386 else
1387 gen_op_addl_T0_T1();
1388 gen_movl_reg_T0(s, rd);
1389 break;
1390 case 0x05:
1391 if (set_cc)
1392 gen_op_adcl_T0_T1_cc();
1393 else
1394 gen_op_adcl_T0_T1();
1395 gen_movl_reg_T0(s, rd);
1396 break;
1397 case 0x06:
1398 if (set_cc)
1399 gen_op_sbcl_T0_T1_cc();
1400 else
1401 gen_op_sbcl_T0_T1();
1402 gen_movl_reg_T0(s, rd);
1403 break;
1404 case 0x07:
1405 if (set_cc)
1406 gen_op_rscl_T0_T1_cc();
1407 else
1408 gen_op_rscl_T0_T1();
1409 gen_movl_reg_T0(s, rd);
1410 break;
1411 case 0x08:
1412 if (set_cc) {
1413 gen_op_andl_T0_T1();
1414 gen_op_logic_T0_cc();
1416 break;
1417 case 0x09:
1418 if (set_cc) {
1419 gen_op_xorl_T0_T1();
1420 gen_op_logic_T0_cc();
1422 break;
1423 case 0x0a:
1424 if (set_cc) {
1425 gen_op_subl_T0_T1_cc();
1427 break;
1428 case 0x0b:
1429 if (set_cc) {
1430 gen_op_addl_T0_T1_cc();
1432 break;
1433 case 0x0c:
1434 gen_op_orl_T0_T1();
1435 gen_movl_reg_T0(s, rd);
1436 if (logic_cc)
1437 gen_op_logic_T0_cc();
1438 break;
1439 case 0x0d:
1440 if (logic_cc && rd == 15) {
1441 /* MOVS r15, ... is used for exception return. */
1442 if (IS_USER(s))
1443 goto illegal_op;
1444 gen_op_movl_T0_T1();
1445 gen_exception_return(s);
1446 } else {
1447 gen_movl_reg_T1(s, rd);
1448 if (logic_cc)
1449 gen_op_logic_T1_cc();
1451 break;
1452 case 0x0e:
1453 gen_op_bicl_T0_T1();
1454 gen_movl_reg_T0(s, rd);
1455 if (logic_cc)
1456 gen_op_logic_T0_cc();
1457 break;
1458 default:
1459 case 0x0f:
1460 gen_op_notl_T1();
1461 gen_movl_reg_T1(s, rd);
1462 if (logic_cc)
1463 gen_op_logic_T1_cc();
1464 break;
1466 } else {
1467 /* other instructions */
1468 op1 = (insn >> 24) & 0xf;
1469 switch(op1) {
1470 case 0x0:
1471 case 0x1:
1472 /* multiplies, extra load/stores */
1473 sh = (insn >> 5) & 3;
1474 if (sh == 0) {
1475 if (op1 == 0x0) {
1476 rd = (insn >> 16) & 0xf;
1477 rn = (insn >> 12) & 0xf;
1478 rs = (insn >> 8) & 0xf;
1479 rm = (insn) & 0xf;
1480 if (((insn >> 22) & 3) == 0) {
1481 /* 32 bit mul */
1482 gen_movl_T0_reg(s, rs);
1483 gen_movl_T1_reg(s, rm);
1484 gen_op_mul_T0_T1();
1485 if (insn & (1 << 21)) {
1486 gen_movl_T1_reg(s, rn);
1487 gen_op_addl_T0_T1();
1489 if (insn & (1 << 20))
1490 gen_op_logic_T0_cc();
1491 gen_movl_reg_T0(s, rd);
1492 } else {
1493 /* 64 bit mul */
1494 gen_movl_T0_reg(s, rs);
1495 gen_movl_T1_reg(s, rm);
1496 if (insn & (1 << 22))
1497 gen_op_imull_T0_T1();
1498 else
1499 gen_op_mull_T0_T1();
1500 if (insn & (1 << 21)) /* mult accumulate */
1501 gen_op_addq_T0_T1(rn, rd);
1502 if (!(insn & (1 << 23))) { /* double accumulate */
1503 ARCH(6);
1504 gen_op_addq_lo_T0_T1(rn);
1505 gen_op_addq_lo_T0_T1(rd);
1507 if (insn & (1 << 20))
1508 gen_op_logicq_cc();
1509 gen_movl_reg_T0(s, rn);
1510 gen_movl_reg_T1(s, rd);
1512 } else {
1513 rn = (insn >> 16) & 0xf;
1514 rd = (insn >> 12) & 0xf;
1515 if (insn & (1 << 23)) {
1516 /* load/store exclusive */
1517 goto illegal_op;
1518 } else {
1519 /* SWP instruction */
1520 rm = (insn) & 0xf;
1522 gen_movl_T0_reg(s, rm);
1523 gen_movl_T1_reg(s, rn);
1524 if (insn & (1 << 22)) {
1525 gen_ldst(swpb, s);
1526 } else {
1527 gen_ldst(swpl, s);
1529 gen_movl_reg_T0(s, rd);
1532 } else {
1533 /* Misc load/store */
1534 rn = (insn >> 16) & 0xf;
1535 rd = (insn >> 12) & 0xf;
1536 gen_movl_T1_reg(s, rn);
1537 if (insn & (1 << 24))
1538 gen_add_datah_offset(s, insn);
1539 if (insn & (1 << 20)) {
1540 /* load */
1541 switch(sh) {
1542 case 1:
1543 gen_ldst(lduw, s);
1544 break;
1545 case 2:
1546 gen_ldst(ldsb, s);
1547 break;
1548 default:
1549 case 3:
1550 gen_ldst(ldsw, s);
1551 break;
1553 gen_movl_reg_T0(s, rd);
1554 } else if (sh & 2) {
1555 /* doubleword */
1556 if (sh & 1) {
1557 /* store */
1558 gen_movl_T0_reg(s, rd);
1559 gen_ldst(stl, s);
1560 gen_op_addl_T1_im(4);
1561 gen_movl_T0_reg(s, rd + 1);
1562 gen_ldst(stl, s);
1563 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1564 gen_op_addl_T1_im(-4);
1565 } else {
1566 /* load */
1567 gen_ldst(ldl, s);
1568 gen_movl_reg_T0(s, rd);
1569 gen_op_addl_T1_im(4);
1570 gen_ldst(ldl, s);
1571 gen_movl_reg_T0(s, rd + 1);
1572 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1573 gen_op_addl_T1_im(-4);
1575 } else {
1576 /* store */
1577 gen_movl_T0_reg(s, rd);
1578 gen_ldst(stw, s);
1580 if (!(insn & (1 << 24))) {
1581 gen_add_datah_offset(s, insn);
1582 gen_movl_reg_T1(s, rn);
1583 } else if (insn & (1 << 21)) {
1584 gen_movl_reg_T1(s, rn);
1587 break;
1588 case 0x4:
1589 case 0x5:
1590 case 0x6:
1591 case 0x7:
1592 /* load/store byte/word */
1593 rn = (insn >> 16) & 0xf;
1594 rd = (insn >> 12) & 0xf;
1595 gen_movl_T1_reg(s, rn);
1596 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
1597 if (insn & (1 << 24))
1598 gen_add_data_offset(s, insn);
1599 if (insn & (1 << 20)) {
1600 /* load */
1601 #if defined(CONFIG_USER_ONLY)
1602 if (insn & (1 << 22))
1603 gen_op_ldub_raw();
1604 else
1605 gen_op_ldl_raw();
1606 #else
1607 if (insn & (1 << 22)) {
1608 if (i)
1609 gen_op_ldub_user();
1610 else
1611 gen_op_ldub_kernel();
1612 } else {
1613 if (i)
1614 gen_op_ldl_user();
1615 else
1616 gen_op_ldl_kernel();
1618 #endif
1619 if (rd == 15)
1620 gen_bx(s);
1621 else
1622 gen_movl_reg_T0(s, rd);
1623 } else {
1624 /* store */
1625 gen_movl_T0_reg(s, rd);
1626 #if defined(CONFIG_USER_ONLY)
1627 if (insn & (1 << 22))
1628 gen_op_stb_raw();
1629 else
1630 gen_op_stl_raw();
1631 #else
1632 if (insn & (1 << 22)) {
1633 if (i)
1634 gen_op_stb_user();
1635 else
1636 gen_op_stb_kernel();
1637 } else {
1638 if (i)
1639 gen_op_stl_user();
1640 else
1641 gen_op_stl_kernel();
1643 #endif
1645 if (!(insn & (1 << 24))) {
1646 gen_add_data_offset(s, insn);
1647 gen_movl_reg_T1(s, rn);
1648 } else if (insn & (1 << 21))
1649 gen_movl_reg_T1(s, rn); {
1651 break;
1652 case 0x08:
1653 case 0x09:
1655 int j, n, user, loaded_base;
1656 /* load/store multiple words */
1657 /* XXX: store correct base if write back */
1658 user = 0;
1659 if (insn & (1 << 22)) {
1660 if (IS_USER(s))
1661 goto illegal_op; /* only usable in supervisor mode */
1663 if ((insn & (1 << 15)) == 0)
1664 user = 1;
1666 rn = (insn >> 16) & 0xf;
1667 gen_movl_T1_reg(s, rn);
1669 /* compute total size */
1670 loaded_base = 0;
1671 n = 0;
1672 for(i=0;i<16;i++) {
1673 if (insn & (1 << i))
1674 n++;
1676 /* XXX: test invalid n == 0 case ? */
1677 if (insn & (1 << 23)) {
1678 if (insn & (1 << 24)) {
1679 /* pre increment */
1680 gen_op_addl_T1_im(4);
1681 } else {
1682 /* post increment */
1684 } else {
1685 if (insn & (1 << 24)) {
1686 /* pre decrement */
1687 gen_op_addl_T1_im(-(n * 4));
1688 } else {
1689 /* post decrement */
1690 if (n != 1)
1691 gen_op_addl_T1_im(-((n - 1) * 4));
1694 j = 0;
1695 for(i=0;i<16;i++) {
1696 if (insn & (1 << i)) {
1697 if (insn & (1 << 20)) {
1698 /* load */
1699 gen_ldst(ldl, s);
1700 if (i == 15) {
1701 gen_bx(s);
1702 } else if (user) {
1703 gen_op_movl_user_T0(i);
1704 } else if (i == rn) {
1705 gen_op_movl_T2_T0();
1706 loaded_base = 1;
1707 } else {
1708 gen_movl_reg_T0(s, i);
1710 } else {
1711 /* store */
1712 if (i == 15) {
1713 /* special case: r15 = PC + 12 */
1714 val = (long)s->pc + 8;
1715 gen_op_movl_TN_im[0](val);
1716 } else if (user) {
1717 gen_op_movl_T0_user(i);
1718 } else {
1719 gen_movl_T0_reg(s, i);
1721 gen_ldst(stl, s);
1723 j++;
1724 /* no need to add after the last transfer */
1725 if (j != n)
1726 gen_op_addl_T1_im(4);
1729 if (insn & (1 << 21)) {
1730 /* write back */
1731 if (insn & (1 << 23)) {
1732 if (insn & (1 << 24)) {
1733 /* pre increment */
1734 } else {
1735 /* post increment */
1736 gen_op_addl_T1_im(4);
1738 } else {
1739 if (insn & (1 << 24)) {
1740 /* pre decrement */
1741 if (n != 1)
1742 gen_op_addl_T1_im(-((n - 1) * 4));
1743 } else {
1744 /* post decrement */
1745 gen_op_addl_T1_im(-(n * 4));
1748 gen_movl_reg_T1(s, rn);
1750 if (loaded_base) {
1751 gen_op_movl_T0_T2();
1752 gen_movl_reg_T0(s, rn);
1754 if ((insn & (1 << 22)) && !user) {
1755 /* Restore CPSR from SPSR. */
1756 gen_op_movl_T0_spsr();
1757 gen_op_movl_cpsr_T0(0xffffffff);
1758 s->is_jmp = DISAS_UPDATE;
1761 break;
1762 case 0xa:
1763 case 0xb:
1765 int32_t offset;
1767 /* branch (and link) */
1768 val = (int32_t)s->pc;
1769 if (insn & (1 << 24)) {
1770 gen_op_movl_T0_im(val);
1771 gen_op_movl_reg_TN[0][14]();
1773 offset = (((int32_t)insn << 8) >> 8);
1774 val += (offset << 2) + 4;
1775 gen_jmp(s, val);
1777 break;
1778 case 0xc:
1779 case 0xd:
1780 case 0xe:
1781 /* Coprocessor. */
1782 op1 = (insn >> 8) & 0xf;
1783 switch (op1) {
1784 case 10:
1785 case 11:
1786 if (disas_vfp_insn (env, s, insn))
1787 goto illegal_op;
1788 break;
1789 case 15:
1790 if (disas_cp15_insn (s, insn))
1791 goto illegal_op;
1792 break;
1793 default:
1794 /* unknown coprocessor. */
1795 goto illegal_op;
1797 break;
1798 case 0xf:
1799 /* swi */
1800 gen_op_movl_T0_im((long)s->pc);
1801 gen_op_movl_reg_TN[0][15]();
1802 gen_op_swi();
1803 s->is_jmp = DISAS_JUMP;
1804 break;
1805 default:
1806 illegal_op:
1807 gen_op_movl_T0_im((long)s->pc - 4);
1808 gen_op_movl_reg_TN[0][15]();
1809 gen_op_undef_insn();
1810 s->is_jmp = DISAS_JUMP;
1811 break;
1816 static void disas_thumb_insn(DisasContext *s)
1818 uint32_t val, insn, op, rm, rn, rd, shift, cond;
1819 int32_t offset;
1820 int i;
1822 insn = lduw_code(s->pc);
1823 s->pc += 2;
1825 switch (insn >> 12) {
1826 case 0: case 1:
1827 rd = insn & 7;
1828 op = (insn >> 11) & 3;
1829 if (op == 3) {
1830 /* add/subtract */
1831 rn = (insn >> 3) & 7;
1832 gen_movl_T0_reg(s, rn);
1833 if (insn & (1 << 10)) {
1834 /* immediate */
1835 gen_op_movl_T1_im((insn >> 6) & 7);
1836 } else {
1837 /* reg */
1838 rm = (insn >> 6) & 7;
1839 gen_movl_T1_reg(s, rm);
1841 if (insn & (1 << 9))
1842 gen_op_subl_T0_T1_cc();
1843 else
1844 gen_op_addl_T0_T1_cc();
1845 gen_movl_reg_T0(s, rd);
1846 } else {
1847 /* shift immediate */
1848 rm = (insn >> 3) & 7;
1849 shift = (insn >> 6) & 0x1f;
1850 gen_movl_T0_reg(s, rm);
1851 gen_shift_T0_im_thumb[op](shift);
1852 gen_movl_reg_T0(s, rd);
1854 break;
1855 case 2: case 3:
1856 /* arithmetic large immediate */
1857 op = (insn >> 11) & 3;
1858 rd = (insn >> 8) & 0x7;
1859 if (op == 0) {
1860 gen_op_movl_T0_im(insn & 0xff);
1861 } else {
1862 gen_movl_T0_reg(s, rd);
1863 gen_op_movl_T1_im(insn & 0xff);
1865 switch (op) {
1866 case 0: /* mov */
1867 gen_op_logic_T0_cc();
1868 break;
1869 case 1: /* cmp */
1870 gen_op_subl_T0_T1_cc();
1871 break;
1872 case 2: /* add */
1873 gen_op_addl_T0_T1_cc();
1874 break;
1875 case 3: /* sub */
1876 gen_op_subl_T0_T1_cc();
1877 break;
1879 if (op != 1)
1880 gen_movl_reg_T0(s, rd);
1881 break;
1882 case 4:
1883 if (insn & (1 << 11)) {
1884 rd = (insn >> 8) & 7;
1885 /* load pc-relative. Bit 1 of PC is ignored. */
1886 val = s->pc + 2 + ((insn & 0xff) * 4);
1887 val &= ~(uint32_t)2;
1888 gen_op_movl_T1_im(val);
1889 gen_ldst(ldl, s);
1890 gen_movl_reg_T0(s, rd);
1891 break;
1893 if (insn & (1 << 10)) {
1894 /* data processing extended or blx */
1895 rd = (insn & 7) | ((insn >> 4) & 8);
1896 rm = (insn >> 3) & 0xf;
1897 op = (insn >> 8) & 3;
1898 switch (op) {
1899 case 0: /* add */
1900 gen_movl_T0_reg(s, rd);
1901 gen_movl_T1_reg(s, rm);
1902 gen_op_addl_T0_T1();
1903 gen_movl_reg_T0(s, rd);
1904 break;
1905 case 1: /* cmp */
1906 gen_movl_T0_reg(s, rd);
1907 gen_movl_T1_reg(s, rm);
1908 gen_op_subl_T0_T1_cc();
1909 break;
1910 case 2: /* mov/cpy */
1911 gen_movl_T0_reg(s, rm);
1912 gen_movl_reg_T0(s, rd);
1913 break;
1914 case 3:/* branch [and link] exchange thumb register */
1915 if (insn & (1 << 7)) {
1916 val = (uint32_t)s->pc | 1;
1917 gen_op_movl_T1_im(val);
1918 gen_movl_reg_T1(s, 14);
1920 gen_movl_T0_reg(s, rm);
1921 gen_bx(s);
1922 break;
1924 break;
1927 /* data processing register */
1928 rd = insn & 7;
1929 rm = (insn >> 3) & 7;
1930 op = (insn >> 6) & 0xf;
1931 if (op == 2 || op == 3 || op == 4 || op == 7) {
1932 /* the shift/rotate ops want the operands backwards */
1933 val = rm;
1934 rm = rd;
1935 rd = val;
1936 val = 1;
1937 } else {
1938 val = 0;
1941 if (op == 9) /* neg */
1942 gen_op_movl_T0_im(0);
1943 else if (op != 0xf) /* mvn doesn't read its first operand */
1944 gen_movl_T0_reg(s, rd);
1946 gen_movl_T1_reg(s, rm);
1947 switch (op) {
1948 case 0x0: /* and */
1949 gen_op_andl_T0_T1();
1950 gen_op_logic_T0_cc();
1951 break;
1952 case 0x1: /* eor */
1953 gen_op_xorl_T0_T1();
1954 gen_op_logic_T0_cc();
1955 break;
1956 case 0x2: /* lsl */
1957 gen_op_shll_T1_T0_cc();
1958 gen_op_logic_T1_cc();
1959 break;
1960 case 0x3: /* lsr */
1961 gen_op_shrl_T1_T0_cc();
1962 gen_op_logic_T1_cc();
1963 break;
1964 case 0x4: /* asr */
1965 gen_op_sarl_T1_T0_cc();
1966 gen_op_logic_T1_cc();
1967 break;
1968 case 0x5: /* adc */
1969 gen_op_adcl_T0_T1_cc();
1970 break;
1971 case 0x6: /* sbc */
1972 gen_op_sbcl_T0_T1_cc();
1973 break;
1974 case 0x7: /* ror */
1975 gen_op_rorl_T1_T0_cc();
1976 gen_op_logic_T1_cc();
1977 break;
1978 case 0x8: /* tst */
1979 gen_op_andl_T0_T1();
1980 gen_op_logic_T0_cc();
1981 rd = 16;
1982 break;
1983 case 0x9: /* neg */
1984 gen_op_subl_T0_T1_cc();
1985 break;
1986 case 0xa: /* cmp */
1987 gen_op_subl_T0_T1_cc();
1988 rd = 16;
1989 break;
1990 case 0xb: /* cmn */
1991 gen_op_addl_T0_T1_cc();
1992 rd = 16;
1993 break;
1994 case 0xc: /* orr */
1995 gen_op_orl_T0_T1();
1996 gen_op_logic_T0_cc();
1997 break;
1998 case 0xd: /* mul */
1999 gen_op_mull_T0_T1();
2000 gen_op_logic_T0_cc();
2001 break;
2002 case 0xe: /* bic */
2003 gen_op_bicl_T0_T1();
2004 gen_op_logic_T0_cc();
2005 break;
2006 case 0xf: /* mvn */
2007 gen_op_notl_T1();
2008 gen_op_logic_T1_cc();
2009 val = 1;
2010 rm = rd;
2011 break;
2013 if (rd != 16) {
2014 if (val)
2015 gen_movl_reg_T1(s, rm);
2016 else
2017 gen_movl_reg_T0(s, rd);
2019 break;
2021 case 5:
2022 /* load/store register offset. */
2023 rd = insn & 7;
2024 rn = (insn >> 3) & 7;
2025 rm = (insn >> 6) & 7;
2026 op = (insn >> 9) & 7;
2027 gen_movl_T1_reg(s, rn);
2028 gen_movl_T2_reg(s, rm);
2029 gen_op_addl_T1_T2();
2031 if (op < 3) /* store */
2032 gen_movl_T0_reg(s, rd);
2034 switch (op) {
2035 case 0: /* str */
2036 gen_ldst(stl, s);
2037 break;
2038 case 1: /* strh */
2039 gen_ldst(stw, s);
2040 break;
2041 case 2: /* strb */
2042 gen_ldst(stb, s);
2043 break;
2044 case 3: /* ldrsb */
2045 gen_ldst(ldsb, s);
2046 break;
2047 case 4: /* ldr */
2048 gen_ldst(ldl, s);
2049 break;
2050 case 5: /* ldrh */
2051 gen_ldst(lduw, s);
2052 break;
2053 case 6: /* ldrb */
2054 gen_ldst(ldub, s);
2055 break;
2056 case 7: /* ldrsh */
2057 gen_ldst(ldsw, s);
2058 break;
2060 if (op >= 3) /* load */
2061 gen_movl_reg_T0(s, rd);
2062 break;
2064 case 6:
2065 /* load/store word immediate offset */
2066 rd = insn & 7;
2067 rn = (insn >> 3) & 7;
2068 gen_movl_T1_reg(s, rn);
2069 val = (insn >> 4) & 0x7c;
2070 gen_op_movl_T2_im(val);
2071 gen_op_addl_T1_T2();
2073 if (insn & (1 << 11)) {
2074 /* load */
2075 gen_ldst(ldl, s);
2076 gen_movl_reg_T0(s, rd);
2077 } else {
2078 /* store */
2079 gen_movl_T0_reg(s, rd);
2080 gen_ldst(stl, s);
2082 break;
2084 case 7:
2085 /* load/store byte immediate offset */
2086 rd = insn & 7;
2087 rn = (insn >> 3) & 7;
2088 gen_movl_T1_reg(s, rn);
2089 val = (insn >> 6) & 0x1f;
2090 gen_op_movl_T2_im(val);
2091 gen_op_addl_T1_T2();
2093 if (insn & (1 << 11)) {
2094 /* load */
2095 gen_ldst(ldub, s);
2096 gen_movl_reg_T0(s, rd);
2097 } else {
2098 /* store */
2099 gen_movl_T0_reg(s, rd);
2100 gen_ldst(stb, s);
2102 break;
2104 case 8:
2105 /* load/store halfword immediate offset */
2106 rd = insn & 7;
2107 rn = (insn >> 3) & 7;
2108 gen_movl_T1_reg(s, rn);
2109 val = (insn >> 5) & 0x3e;
2110 gen_op_movl_T2_im(val);
2111 gen_op_addl_T1_T2();
2113 if (insn & (1 << 11)) {
2114 /* load */
2115 gen_ldst(lduw, s);
2116 gen_movl_reg_T0(s, rd);
2117 } else {
2118 /* store */
2119 gen_movl_T0_reg(s, rd);
2120 gen_ldst(stw, s);
2122 break;
2124 case 9:
2125 /* load/store from stack */
2126 rd = (insn >> 8) & 7;
2127 gen_movl_T1_reg(s, 13);
2128 val = (insn & 0xff) * 4;
2129 gen_op_movl_T2_im(val);
2130 gen_op_addl_T1_T2();
2132 if (insn & (1 << 11)) {
2133 /* load */
2134 gen_ldst(ldl, s);
2135 gen_movl_reg_T0(s, rd);
2136 } else {
2137 /* store */
2138 gen_movl_T0_reg(s, rd);
2139 gen_ldst(stl, s);
2141 break;
2143 case 10:
2144 /* add to high reg */
2145 rd = (insn >> 8) & 7;
2146 if (insn & (1 << 11)) {
2147 /* SP */
2148 gen_movl_T0_reg(s, 13);
2149 } else {
2150 /* PC. bit 1 is ignored. */
2151 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
2153 val = (insn & 0xff) * 4;
2154 gen_op_movl_T1_im(val);
2155 gen_op_addl_T0_T1();
2156 gen_movl_reg_T0(s, rd);
2157 break;
2159 case 11:
2160 /* misc */
2161 op = (insn >> 8) & 0xf;
2162 switch (op) {
2163 case 0:
2164 /* adjust stack pointer */
2165 gen_movl_T1_reg(s, 13);
2166 val = (insn & 0x7f) * 4;
2167 if (insn & (1 << 7))
2168 val = -(int32_t)val;
2169 gen_op_movl_T2_im(val);
2170 gen_op_addl_T1_T2();
2171 gen_movl_reg_T1(s, 13);
2172 break;
2174 case 4: case 5: case 0xc: case 0xd:
2175 /* push/pop */
2176 gen_movl_T1_reg(s, 13);
2177 if (insn & (1 << 8))
2178 offset = 4;
2179 else
2180 offset = 0;
2181 for (i = 0; i < 8; i++) {
2182 if (insn & (1 << i))
2183 offset += 4;
2185 if ((insn & (1 << 11)) == 0) {
2186 gen_op_movl_T2_im(-offset);
2187 gen_op_addl_T1_T2();
2189 gen_op_movl_T2_im(4);
2190 for (i = 0; i < 8; i++) {
2191 if (insn & (1 << i)) {
2192 if (insn & (1 << 11)) {
2193 /* pop */
2194 gen_ldst(ldl, s);
2195 gen_movl_reg_T0(s, i);
2196 } else {
2197 /* push */
2198 gen_movl_T0_reg(s, i);
2199 gen_ldst(stl, s);
2201 /* advance to the next address. */
2202 gen_op_addl_T1_T2();
2205 if (insn & (1 << 8)) {
2206 if (insn & (1 << 11)) {
2207 /* pop pc */
2208 gen_ldst(ldl, s);
2209 /* don't set the pc until the rest of the instruction
2210 has completed */
2211 } else {
2212 /* push lr */
2213 gen_movl_T0_reg(s, 14);
2214 gen_ldst(stl, s);
2216 gen_op_addl_T1_T2();
2218 if ((insn & (1 << 11)) == 0) {
2219 gen_op_movl_T2_im(-offset);
2220 gen_op_addl_T1_T2();
2222 /* write back the new stack pointer */
2223 gen_movl_reg_T1(s, 13);
2224 /* set the new PC value */
2225 if ((insn & 0x0900) == 0x0900)
2226 gen_bx(s);
2227 break;
2229 case 0xe: /* bkpt */
2230 gen_op_movl_T0_im((long)s->pc - 2);
2231 gen_op_movl_reg_TN[0][15]();
2232 gen_op_bkpt();
2233 s->is_jmp = DISAS_JUMP;
2234 break;
2236 default:
2237 goto undef;
2239 break;
2241 case 12:
2242 /* load/store multiple */
2243 rn = (insn >> 8) & 0x7;
2244 gen_movl_T1_reg(s, rn);
2245 gen_op_movl_T2_im(4);
2246 for (i = 0; i < 8; i++) {
2247 if (insn & (1 << i)) {
2248 if (insn & (1 << 11)) {
2249 /* load */
2250 gen_ldst(ldl, s);
2251 gen_movl_reg_T0(s, i);
2252 } else {
2253 /* store */
2254 gen_movl_T0_reg(s, i);
2255 gen_ldst(stl, s);
2257 /* advance to the next address */
2258 gen_op_addl_T1_T2();
2261 /* Base register writeback. */
2262 if ((insn & (1 << rn)) == 0)
2263 gen_movl_reg_T1(s, rn);
2264 break;
2266 case 13:
2267 /* conditional branch or swi */
2268 cond = (insn >> 8) & 0xf;
2269 if (cond == 0xe)
2270 goto undef;
2272 if (cond == 0xf) {
2273 /* swi */
2274 gen_op_movl_T0_im((long)s->pc | 1);
2275 /* Don't set r15. */
2276 gen_op_movl_reg_TN[0][15]();
2277 gen_op_swi();
2278 s->is_jmp = DISAS_JUMP;
2279 break;
2281 /* generate a conditional jump to next instruction */
2282 s->condlabel = gen_new_label();
2283 gen_test_cc[cond ^ 1](s->condlabel);
2284 s->condjmp = 1;
2285 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2286 //s->is_jmp = DISAS_JUMP_NEXT;
2287 gen_movl_T1_reg(s, 15);
2289 /* jump to the offset */
2290 val = (uint32_t)s->pc + 2;
2291 offset = ((int32_t)insn << 24) >> 24;
2292 val += offset << 1;
2293 gen_jmp(s, val);
2294 break;
2296 case 14:
2297 /* unconditional branch */
2298 if (insn & (1 << 11)) {
2299 /* Second half of blx. */
2300 offset = ((insn & 0x7ff) << 1);
2301 gen_movl_T0_reg(s, 14);
2302 gen_op_movl_T1_im(offset);
2303 gen_op_addl_T0_T1();
2304 gen_op_movl_T1_im(0xfffffffc);
2305 gen_op_andl_T0_T1();
2307 val = (uint32_t)s->pc;
2308 gen_op_movl_T1_im(val | 1);
2309 gen_movl_reg_T1(s, 14);
2310 gen_bx(s);
2311 break;
2313 val = (uint32_t)s->pc;
2314 offset = ((int32_t)insn << 21) >> 21;
2315 val += (offset << 1) + 2;
2316 gen_jmp(s, val);
2317 break;
2319 case 15:
2320 /* branch and link [and switch to arm] */
2321 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
2322 /* Instruction spans a page boundary. Implement it as two
2323 16-bit instructions in case the second half causes an
2324 prefetch abort. */
2325 offset = ((int32_t)insn << 21) >> 9;
2326 val = s->pc + 2 + offset;
2327 gen_op_movl_T0_im(val);
2328 gen_movl_reg_T0(s, 14);
2329 break;
2331 if (insn & (1 << 11)) {
2332 /* Second half of bl. */
2333 offset = ((insn & 0x7ff) << 1) | 1;
2334 gen_movl_T0_reg(s, 14);
2335 gen_op_movl_T1_im(offset);
2336 gen_op_addl_T0_T1();
2338 val = (uint32_t)s->pc;
2339 gen_op_movl_T1_im(val | 1);
2340 gen_movl_reg_T1(s, 14);
2341 gen_bx(s);
2342 break;
2344 offset = ((int32_t)insn << 21) >> 10;
2345 insn = lduw_code(s->pc);
2346 offset |= insn & 0x7ff;
2348 val = (uint32_t)s->pc + 2;
2349 gen_op_movl_T1_im(val | 1);
2350 gen_movl_reg_T1(s, 14);
2352 val += offset << 1;
2353 if (insn & (1 << 12)) {
2354 /* bl */
2355 gen_jmp(s, val);
2356 } else {
2357 /* blx */
2358 val &= ~(uint32_t)2;
2359 gen_op_movl_T0_im(val);
2360 gen_bx(s);
2363 return;
2364 undef:
2365 gen_op_movl_T0_im((long)s->pc - 2);
2366 gen_op_movl_reg_TN[0][15]();
2367 gen_op_undef_insn();
2368 s->is_jmp = DISAS_JUMP;
2371 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2372 basic block 'tb'. If search_pc is TRUE, also generate PC
2373 information for each intermediate instruction. */
2374 static inline int gen_intermediate_code_internal(CPUState *env,
2375 TranslationBlock *tb,
2376 int search_pc)
2378 DisasContext dc1, *dc = &dc1;
2379 uint16_t *gen_opc_end;
2380 int j, lj;
2381 target_ulong pc_start;
2382 uint32_t next_page_start;
2384 /* generate intermediate code */
2385 pc_start = tb->pc;
2387 dc->tb = tb;
2389 gen_opc_ptr = gen_opc_buf;
2390 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2391 gen_opparam_ptr = gen_opparam_buf;
2393 dc->is_jmp = DISAS_NEXT;
2394 dc->pc = pc_start;
2395 dc->singlestep_enabled = env->singlestep_enabled;
2396 dc->condjmp = 0;
2397 dc->thumb = env->thumb;
2398 #if !defined(CONFIG_USER_ONLY)
2399 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
2400 #endif
2401 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2402 nb_gen_labels = 0;
2403 lj = -1;
2404 do {
2405 if (env->nb_breakpoints > 0) {
2406 for(j = 0; j < env->nb_breakpoints; j++) {
2407 if (env->breakpoints[j] == dc->pc) {
2408 gen_op_movl_T0_im((long)dc->pc);
2409 gen_op_movl_reg_TN[0][15]();
2410 gen_op_debug();
2411 dc->is_jmp = DISAS_JUMP;
2412 break;
2416 if (search_pc) {
2417 j = gen_opc_ptr - gen_opc_buf;
2418 if (lj < j) {
2419 lj++;
2420 while (lj < j)
2421 gen_opc_instr_start[lj++] = 0;
2423 gen_opc_pc[lj] = dc->pc;
2424 gen_opc_instr_start[lj] = 1;
2427 if (env->thumb)
2428 disas_thumb_insn(dc);
2429 else
2430 disas_arm_insn(env, dc);
2432 if (dc->condjmp && !dc->is_jmp) {
2433 gen_set_label(dc->condlabel);
2434 dc->condjmp = 0;
2436 /* Translation stops when a conditional branch is enoutered.
2437 * Otherwise the subsequent code could get translated several times.
2438 * Also stop translation when a page boundary is reached. This
2439 * ensures prefech aborts occur at the right place. */
2440 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2441 !env->singlestep_enabled &&
2442 dc->pc < next_page_start);
2443 /* At this stage dc->condjmp will only be set when the skipped
2444 * instruction was a conditional branch, and the PC has already been
2445 * written. */
2446 if (__builtin_expect(env->singlestep_enabled, 0)) {
2447 /* Make sure the pc is updated, and raise a debug exception. */
2448 if (dc->condjmp) {
2449 gen_op_debug();
2450 gen_set_label(dc->condlabel);
2452 if (dc->condjmp || !dc->is_jmp) {
2453 gen_op_movl_T0_im((long)dc->pc);
2454 gen_op_movl_reg_TN[0][15]();
2455 dc->condjmp = 0;
2457 gen_op_debug();
2458 } else {
2459 switch(dc->is_jmp) {
2460 case DISAS_NEXT:
2461 gen_goto_tb(dc, 1, dc->pc);
2462 break;
2463 default:
2464 case DISAS_JUMP:
2465 case DISAS_UPDATE:
2466 /* indicate that the hash table must be used to find the next TB */
2467 gen_op_movl_T0_0();
2468 gen_op_exit_tb();
2469 break;
2470 case DISAS_TB_JUMP:
2471 /* nothing more to generate */
2472 break;
2474 if (dc->condjmp) {
2475 gen_set_label(dc->condlabel);
2476 gen_goto_tb(dc, 1, dc->pc);
2477 dc->condjmp = 0;
2480 *gen_opc_ptr = INDEX_op_end;
2482 #ifdef DEBUG_DISAS
2483 if (loglevel & CPU_LOG_TB_IN_ASM) {
2484 fprintf(logfile, "----------------\n");
2485 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
2486 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2487 fprintf(logfile, "\n");
2488 if (loglevel & (CPU_LOG_TB_OP)) {
2489 fprintf(logfile, "OP:\n");
2490 dump_ops(gen_opc_buf, gen_opparam_buf);
2491 fprintf(logfile, "\n");
2494 #endif
2495 if (search_pc) {
2496 j = gen_opc_ptr - gen_opc_buf;
2497 lj++;
2498 while (lj <= j)
2499 gen_opc_instr_start[lj++] = 0;
2500 tb->size = 0;
2501 } else {
2502 tb->size = dc->pc - pc_start;
2504 return 0;
2507 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2509 return gen_intermediate_code_internal(env, tb, 0);
2512 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2514 return gen_intermediate_code_internal(env, tb, 1);
2517 static const char *cpu_mode_names[16] = {
2518 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
2519 "???", "???", "???", "und", "???", "???", "???", "sys"
2521 void cpu_dump_state(CPUState *env, FILE *f,
2522 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
2523 int flags)
2525 int i;
2526 union {
2527 uint32_t i;
2528 float s;
2529 } s0, s1;
2530 CPU_DoubleU d;
2531 uint32_t psr;
2533 for(i=0;i<16;i++) {
2534 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2535 if ((i % 4) == 3)
2536 cpu_fprintf(f, "\n");
2537 else
2538 cpu_fprintf(f, " ");
2540 psr = cpsr_read(env);
2541 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d %x\n",
2542 psr,
2543 psr & (1 << 31) ? 'N' : '-',
2544 psr & (1 << 30) ? 'Z' : '-',
2545 psr & (1 << 29) ? 'C' : '-',
2546 psr & (1 << 28) ? 'V' : '-',
2547 psr & CPSR_T ? 'T' : 'A',
2548 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
2550 for (i = 0; i < 16; i++) {
2551 d.d = env->vfp.regs[i];
2552 s0.i = d.l.lower;
2553 s1.i = d.l.upper;
2554 cpu_fprintf(f, "s%02d=%08x(%8f) s%02d=%08x(%8f) d%02d=%08x%08x(%8f)\n",
2555 i * 2, (int)s0.i, s0.s,
2556 i * 2 + 1, (int)s0.i, s0.s,
2557 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
2558 d.d);
2560 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);