SD card emulation (initial implementation by Andrzei Zaborowski).
[qemu/mini2440.git] / target-arm / translate.c
blob1631fcd3126e4387fb2ef33b3ee1c4424dcd21b3
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
31 #define ENABLE_ARCH_5J 0
32 #define ENABLE_ARCH_6 1
33 #define ENABLE_ARCH_6T2 1
35 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
37 /* internal defines */
38 typedef struct DisasContext {
39 target_ulong pc;
40 int is_jmp;
41 /* Nonzero if this instruction has been conditionally skipped. */
42 int condjmp;
43 /* The label that will be jumped to when the instruction is skipped. */
44 int condlabel;
45 struct TranslationBlock *tb;
46 int singlestep_enabled;
47 int thumb;
48 int is_mem;
49 #if !defined(CONFIG_USER_ONLY)
50 int user;
51 #endif
52 } DisasContext;
54 #if defined(CONFIG_USER_ONLY)
55 #define IS_USER(s) 1
56 #else
57 #define IS_USER(s) (s->user)
58 #endif
60 #define DISAS_JUMP_NEXT 4
62 #ifdef USE_DIRECT_JUMP
63 #define TBPARAM(x)
64 #else
65 #define TBPARAM(x) (long)(x)
66 #endif
68 /* XXX: move that elsewhere */
69 static uint16_t *gen_opc_ptr;
70 static uint32_t *gen_opparam_ptr;
71 extern FILE *logfile;
72 extern int loglevel;
74 enum {
75 #define DEF(s, n, copy_size) INDEX_op_ ## s,
76 #include "opc.h"
77 #undef DEF
78 NB_OPS,
81 #include "gen-op.h"
83 static GenOpFunc1 *gen_test_cc[14] = {
84 gen_op_test_eq,
85 gen_op_test_ne,
86 gen_op_test_cs,
87 gen_op_test_cc,
88 gen_op_test_mi,
89 gen_op_test_pl,
90 gen_op_test_vs,
91 gen_op_test_vc,
92 gen_op_test_hi,
93 gen_op_test_ls,
94 gen_op_test_ge,
95 gen_op_test_lt,
96 gen_op_test_gt,
97 gen_op_test_le,
100 const uint8_t table_logic_cc[16] = {
101 1, /* and */
102 1, /* xor */
103 0, /* sub */
104 0, /* rsb */
105 0, /* add */
106 0, /* adc */
107 0, /* sbc */
108 0, /* rsc */
109 1, /* andl */
110 1, /* xorl */
111 0, /* cmp */
112 0, /* cmn */
113 1, /* orr */
114 1, /* mov */
115 1, /* bic */
116 1, /* mvn */
119 static GenOpFunc1 *gen_shift_T1_im[4] = {
120 gen_op_shll_T1_im,
121 gen_op_shrl_T1_im,
122 gen_op_sarl_T1_im,
123 gen_op_rorl_T1_im,
126 static GenOpFunc *gen_shift_T1_0[4] = {
127 NULL,
128 gen_op_shrl_T1_0,
129 gen_op_sarl_T1_0,
130 gen_op_rrxl_T1,
133 static GenOpFunc1 *gen_shift_T2_im[4] = {
134 gen_op_shll_T2_im,
135 gen_op_shrl_T2_im,
136 gen_op_sarl_T2_im,
137 gen_op_rorl_T2_im,
140 static GenOpFunc *gen_shift_T2_0[4] = {
141 NULL,
142 gen_op_shrl_T2_0,
143 gen_op_sarl_T2_0,
144 gen_op_rrxl_T2,
147 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
148 gen_op_shll_T1_im_cc,
149 gen_op_shrl_T1_im_cc,
150 gen_op_sarl_T1_im_cc,
151 gen_op_rorl_T1_im_cc,
154 static GenOpFunc *gen_shift_T1_0_cc[4] = {
155 NULL,
156 gen_op_shrl_T1_0_cc,
157 gen_op_sarl_T1_0_cc,
158 gen_op_rrxl_T1_cc,
161 static GenOpFunc *gen_shift_T1_T0[4] = {
162 gen_op_shll_T1_T0,
163 gen_op_shrl_T1_T0,
164 gen_op_sarl_T1_T0,
165 gen_op_rorl_T1_T0,
168 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
169 gen_op_shll_T1_T0_cc,
170 gen_op_shrl_T1_T0_cc,
171 gen_op_sarl_T1_T0_cc,
172 gen_op_rorl_T1_T0_cc,
175 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
177 gen_op_movl_T0_r0,
178 gen_op_movl_T0_r1,
179 gen_op_movl_T0_r2,
180 gen_op_movl_T0_r3,
181 gen_op_movl_T0_r4,
182 gen_op_movl_T0_r5,
183 gen_op_movl_T0_r6,
184 gen_op_movl_T0_r7,
185 gen_op_movl_T0_r8,
186 gen_op_movl_T0_r9,
187 gen_op_movl_T0_r10,
188 gen_op_movl_T0_r11,
189 gen_op_movl_T0_r12,
190 gen_op_movl_T0_r13,
191 gen_op_movl_T0_r14,
192 gen_op_movl_T0_r15,
195 gen_op_movl_T1_r0,
196 gen_op_movl_T1_r1,
197 gen_op_movl_T1_r2,
198 gen_op_movl_T1_r3,
199 gen_op_movl_T1_r4,
200 gen_op_movl_T1_r5,
201 gen_op_movl_T1_r6,
202 gen_op_movl_T1_r7,
203 gen_op_movl_T1_r8,
204 gen_op_movl_T1_r9,
205 gen_op_movl_T1_r10,
206 gen_op_movl_T1_r11,
207 gen_op_movl_T1_r12,
208 gen_op_movl_T1_r13,
209 gen_op_movl_T1_r14,
210 gen_op_movl_T1_r15,
213 gen_op_movl_T2_r0,
214 gen_op_movl_T2_r1,
215 gen_op_movl_T2_r2,
216 gen_op_movl_T2_r3,
217 gen_op_movl_T2_r4,
218 gen_op_movl_T2_r5,
219 gen_op_movl_T2_r6,
220 gen_op_movl_T2_r7,
221 gen_op_movl_T2_r8,
222 gen_op_movl_T2_r9,
223 gen_op_movl_T2_r10,
224 gen_op_movl_T2_r11,
225 gen_op_movl_T2_r12,
226 gen_op_movl_T2_r13,
227 gen_op_movl_T2_r14,
228 gen_op_movl_T2_r15,
232 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
234 gen_op_movl_r0_T0,
235 gen_op_movl_r1_T0,
236 gen_op_movl_r2_T0,
237 gen_op_movl_r3_T0,
238 gen_op_movl_r4_T0,
239 gen_op_movl_r5_T0,
240 gen_op_movl_r6_T0,
241 gen_op_movl_r7_T0,
242 gen_op_movl_r8_T0,
243 gen_op_movl_r9_T0,
244 gen_op_movl_r10_T0,
245 gen_op_movl_r11_T0,
246 gen_op_movl_r12_T0,
247 gen_op_movl_r13_T0,
248 gen_op_movl_r14_T0,
249 gen_op_movl_r15_T0,
252 gen_op_movl_r0_T1,
253 gen_op_movl_r1_T1,
254 gen_op_movl_r2_T1,
255 gen_op_movl_r3_T1,
256 gen_op_movl_r4_T1,
257 gen_op_movl_r5_T1,
258 gen_op_movl_r6_T1,
259 gen_op_movl_r7_T1,
260 gen_op_movl_r8_T1,
261 gen_op_movl_r9_T1,
262 gen_op_movl_r10_T1,
263 gen_op_movl_r11_T1,
264 gen_op_movl_r12_T1,
265 gen_op_movl_r13_T1,
266 gen_op_movl_r14_T1,
267 gen_op_movl_r15_T1,
271 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
272 gen_op_movl_T0_im,
273 gen_op_movl_T1_im,
274 gen_op_movl_T2_im,
277 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
278 gen_op_shll_T0_im_thumb,
279 gen_op_shrl_T0_im_thumb,
280 gen_op_sarl_T0_im_thumb,
283 static inline void gen_bx(DisasContext *s)
285 s->is_jmp = DISAS_UPDATE;
286 gen_op_bx_T0();
290 #if defined(CONFIG_USER_ONLY)
291 #define gen_ldst(name, s) gen_op_##name##_raw()
292 #else
293 #define gen_ldst(name, s) do { \
294 s->is_mem = 1; \
295 if (IS_USER(s)) \
296 gen_op_##name##_user(); \
297 else \
298 gen_op_##name##_kernel(); \
299 } while (0)
300 #endif
302 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
304 int val;
306 if (reg == 15) {
307 /* normaly, since we updated PC, we need only to add one insn */
308 if (s->thumb)
309 val = (long)s->pc + 2;
310 else
311 val = (long)s->pc + 4;
312 gen_op_movl_TN_im[t](val);
313 } else {
314 gen_op_movl_TN_reg[t][reg]();
318 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
320 gen_movl_TN_reg(s, reg, 0);
323 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
325 gen_movl_TN_reg(s, reg, 1);
328 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
330 gen_movl_TN_reg(s, reg, 2);
333 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
335 gen_op_movl_reg_TN[t][reg]();
336 if (reg == 15) {
337 s->is_jmp = DISAS_JUMP;
341 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
343 gen_movl_reg_TN(s, reg, 0);
346 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
348 gen_movl_reg_TN(s, reg, 1);
351 /* Force a TB lookup after an instruction that changes the CPU state. */
352 static inline void gen_lookup_tb(DisasContext *s)
354 gen_op_movl_T0_im(s->pc);
355 gen_movl_reg_T0(s, 15);
356 s->is_jmp = DISAS_UPDATE;
359 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
361 int val, rm, shift, shiftop;
363 if (!(insn & (1 << 25))) {
364 /* immediate */
365 val = insn & 0xfff;
366 if (!(insn & (1 << 23)))
367 val = -val;
368 if (val != 0)
369 gen_op_addl_T1_im(val);
370 } else {
371 /* shift/register */
372 rm = (insn) & 0xf;
373 shift = (insn >> 7) & 0x1f;
374 gen_movl_T2_reg(s, rm);
375 shiftop = (insn >> 5) & 3;
376 if (shift != 0) {
377 gen_shift_T2_im[shiftop](shift);
378 } else if (shiftop != 0) {
379 gen_shift_T2_0[shiftop]();
381 if (!(insn & (1 << 23)))
382 gen_op_subl_T1_T2();
383 else
384 gen_op_addl_T1_T2();
388 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
389 int extra)
391 int val, rm;
393 if (insn & (1 << 22)) {
394 /* immediate */
395 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
396 if (!(insn & (1 << 23)))
397 val = -val;
398 val += extra;
399 if (val != 0)
400 gen_op_addl_T1_im(val);
401 } else {
402 /* register */
403 if (extra)
404 gen_op_addl_T1_im(extra);
405 rm = (insn) & 0xf;
406 gen_movl_T2_reg(s, rm);
407 if (!(insn & (1 << 23)))
408 gen_op_subl_T1_T2();
409 else
410 gen_op_addl_T1_T2();
414 #define VFP_OP(name) \
415 static inline void gen_vfp_##name(int dp) \
417 if (dp) \
418 gen_op_vfp_##name##d(); \
419 else \
420 gen_op_vfp_##name##s(); \
423 VFP_OP(add)
424 VFP_OP(sub)
425 VFP_OP(mul)
426 VFP_OP(div)
427 VFP_OP(neg)
428 VFP_OP(abs)
429 VFP_OP(sqrt)
430 VFP_OP(cmp)
431 VFP_OP(cmpe)
432 VFP_OP(F1_ld0)
433 VFP_OP(uito)
434 VFP_OP(sito)
435 VFP_OP(toui)
436 VFP_OP(touiz)
437 VFP_OP(tosi)
438 VFP_OP(tosiz)
440 #undef VFP_OP
442 static inline void gen_vfp_ld(DisasContext *s, int dp)
444 if (dp)
445 gen_ldst(vfp_ldd, s);
446 else
447 gen_ldst(vfp_lds, s);
450 static inline void gen_vfp_st(DisasContext *s, int dp)
452 if (dp)
453 gen_ldst(vfp_std, s);
454 else
455 gen_ldst(vfp_sts, s);
458 static inline long
459 vfp_reg_offset (int dp, int reg)
461 if (dp)
462 return offsetof(CPUARMState, vfp.regs[reg]);
463 else if (reg & 1) {
464 return offsetof(CPUARMState, vfp.regs[reg >> 1])
465 + offsetof(CPU_DoubleU, l.upper);
466 } else {
467 return offsetof(CPUARMState, vfp.regs[reg >> 1])
468 + offsetof(CPU_DoubleU, l.lower);
471 static inline void gen_mov_F0_vreg(int dp, int reg)
473 if (dp)
474 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
475 else
476 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
479 static inline void gen_mov_F1_vreg(int dp, int reg)
481 if (dp)
482 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
483 else
484 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
487 static inline void gen_mov_vreg_F0(int dp, int reg)
489 if (dp)
490 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
491 else
492 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
495 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
496 instruction is not defined. */
497 static int disas_cp15_insn(DisasContext *s, uint32_t insn)
499 uint32_t rd;
501 /* ??? Some cp15 registers are accessible from userspace. */
502 if (IS_USER(s)) {
503 return 1;
505 if ((insn & 0x0fff0fff) == 0x0e070f90
506 || (insn & 0x0fff0fff) == 0x0e070f58) {
507 /* Wait for interrupt. */
508 gen_op_movl_T0_im((long)s->pc);
509 gen_op_movl_reg_TN[0][15]();
510 gen_op_wfi();
511 s->is_jmp = DISAS_JUMP;
512 return 0;
514 rd = (insn >> 12) & 0xf;
515 if (insn & (1 << 20)) {
516 gen_op_movl_T0_cp15(insn);
517 /* If the destination register is r15 then sets condition codes. */
518 if (rd != 15)
519 gen_movl_reg_T0(s, rd);
520 } else {
521 gen_movl_T0_reg(s, rd);
522 gen_op_movl_cp15_T0(insn);
524 gen_lookup_tb(s);
525 return 0;
528 /* Disassemble a VFP instruction. Returns nonzero if an error occured
529 (ie. an undefined instruction). */
530 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
532 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
533 int dp, veclen;
535 if (!arm_feature(env, ARM_FEATURE_VFP))
536 return 1;
538 if ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) == 0) {
539 /* VFP disabled. Only allow fmxr/fmrx to/from fpexc and fpsid. */
540 if ((insn & 0x0fe00fff) != 0x0ee00a10)
541 return 1;
542 rn = (insn >> 16) & 0xf;
543 if (rn != 0 && rn != 8)
544 return 1;
546 dp = ((insn & 0xf00) == 0xb00);
547 switch ((insn >> 24) & 0xf) {
548 case 0xe:
549 if (insn & (1 << 4)) {
550 /* single register transfer */
551 if ((insn & 0x6f) != 0x00)
552 return 1;
553 rd = (insn >> 12) & 0xf;
554 if (dp) {
555 if (insn & 0x80)
556 return 1;
557 rn = (insn >> 16) & 0xf;
558 /* Get the existing value even for arm->vfp moves because
559 we only set half the register. */
560 gen_mov_F0_vreg(1, rn);
561 gen_op_vfp_mrrd();
562 if (insn & (1 << 20)) {
563 /* vfp->arm */
564 if (insn & (1 << 21))
565 gen_movl_reg_T1(s, rd);
566 else
567 gen_movl_reg_T0(s, rd);
568 } else {
569 /* arm->vfp */
570 if (insn & (1 << 21))
571 gen_movl_T1_reg(s, rd);
572 else
573 gen_movl_T0_reg(s, rd);
574 gen_op_vfp_mdrr();
575 gen_mov_vreg_F0(dp, rn);
577 } else {
578 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
579 if (insn & (1 << 20)) {
580 /* vfp->arm */
581 if (insn & (1 << 21)) {
582 /* system register */
583 rn >>= 1;
584 switch (rn) {
585 case ARM_VFP_FPSID:
586 case ARM_VFP_FPEXC:
587 case ARM_VFP_FPINST:
588 case ARM_VFP_FPINST2:
589 gen_op_vfp_movl_T0_xreg(rn);
590 break;
591 case ARM_VFP_FPSCR:
592 if (rd == 15)
593 gen_op_vfp_movl_T0_fpscr_flags();
594 else
595 gen_op_vfp_movl_T0_fpscr();
596 break;
597 default:
598 return 1;
600 } else {
601 gen_mov_F0_vreg(0, rn);
602 gen_op_vfp_mrs();
604 if (rd == 15) {
605 /* Set the 4 flag bits in the CPSR. */
606 gen_op_movl_cpsr_T0(0xf0000000);
607 } else
608 gen_movl_reg_T0(s, rd);
609 } else {
610 /* arm->vfp */
611 gen_movl_T0_reg(s, rd);
612 if (insn & (1 << 21)) {
613 rn >>= 1;
614 /* system register */
615 switch (rn) {
616 case ARM_VFP_FPSID:
617 /* Writes are ignored. */
618 break;
619 case ARM_VFP_FPSCR:
620 gen_op_vfp_movl_fpscr_T0();
621 gen_lookup_tb(s);
622 break;
623 case ARM_VFP_FPEXC:
624 gen_op_vfp_movl_xreg_T0(rn);
625 gen_lookup_tb(s);
626 break;
627 case ARM_VFP_FPINST:
628 case ARM_VFP_FPINST2:
629 gen_op_vfp_movl_xreg_T0(rn);
630 break;
631 default:
632 return 1;
634 } else {
635 gen_op_vfp_msr();
636 gen_mov_vreg_F0(0, rn);
640 } else {
641 /* data processing */
642 /* The opcode is in bits 23, 21, 20 and 6. */
643 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
644 if (dp) {
645 if (op == 15) {
646 /* rn is opcode */
647 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
648 } else {
649 /* rn is register number */
650 if (insn & (1 << 7))
651 return 1;
652 rn = (insn >> 16) & 0xf;
655 if (op == 15 && (rn == 15 || rn > 17)) {
656 /* Integer or single precision destination. */
657 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
658 } else {
659 if (insn & (1 << 22))
660 return 1;
661 rd = (insn >> 12) & 0xf;
664 if (op == 15 && (rn == 16 || rn == 17)) {
665 /* Integer source. */
666 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
667 } else {
668 if (insn & (1 << 5))
669 return 1;
670 rm = insn & 0xf;
672 } else {
673 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
674 if (op == 15 && rn == 15) {
675 /* Double precision destination. */
676 if (insn & (1 << 22))
677 return 1;
678 rd = (insn >> 12) & 0xf;
679 } else
680 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
681 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
684 veclen = env->vfp.vec_len;
685 if (op == 15 && rn > 3)
686 veclen = 0;
688 /* Shut up compiler warnings. */
689 delta_m = 0;
690 delta_d = 0;
691 bank_mask = 0;
693 if (veclen > 0) {
694 if (dp)
695 bank_mask = 0xc;
696 else
697 bank_mask = 0x18;
699 /* Figure out what type of vector operation this is. */
700 if ((rd & bank_mask) == 0) {
701 /* scalar */
702 veclen = 0;
703 } else {
704 if (dp)
705 delta_d = (env->vfp.vec_stride >> 1) + 1;
706 else
707 delta_d = env->vfp.vec_stride + 1;
709 if ((rm & bank_mask) == 0) {
710 /* mixed scalar/vector */
711 delta_m = 0;
712 } else {
713 /* vector */
714 delta_m = delta_d;
719 /* Load the initial operands. */
720 if (op == 15) {
721 switch (rn) {
722 case 16:
723 case 17:
724 /* Integer source */
725 gen_mov_F0_vreg(0, rm);
726 break;
727 case 8:
728 case 9:
729 /* Compare */
730 gen_mov_F0_vreg(dp, rd);
731 gen_mov_F1_vreg(dp, rm);
732 break;
733 case 10:
734 case 11:
735 /* Compare with zero */
736 gen_mov_F0_vreg(dp, rd);
737 gen_vfp_F1_ld0(dp);
738 break;
739 default:
740 /* One source operand. */
741 gen_mov_F0_vreg(dp, rm);
743 } else {
744 /* Two source operands. */
745 gen_mov_F0_vreg(dp, rn);
746 gen_mov_F1_vreg(dp, rm);
749 for (;;) {
750 /* Perform the calculation. */
751 switch (op) {
752 case 0: /* mac: fd + (fn * fm) */
753 gen_vfp_mul(dp);
754 gen_mov_F1_vreg(dp, rd);
755 gen_vfp_add(dp);
756 break;
757 case 1: /* nmac: fd - (fn * fm) */
758 gen_vfp_mul(dp);
759 gen_vfp_neg(dp);
760 gen_mov_F1_vreg(dp, rd);
761 gen_vfp_add(dp);
762 break;
763 case 2: /* msc: -fd + (fn * fm) */
764 gen_vfp_mul(dp);
765 gen_mov_F1_vreg(dp, rd);
766 gen_vfp_sub(dp);
767 break;
768 case 3: /* nmsc: -fd - (fn * fm) */
769 gen_vfp_mul(dp);
770 gen_mov_F1_vreg(dp, rd);
771 gen_vfp_add(dp);
772 gen_vfp_neg(dp);
773 break;
774 case 4: /* mul: fn * fm */
775 gen_vfp_mul(dp);
776 break;
777 case 5: /* nmul: -(fn * fm) */
778 gen_vfp_mul(dp);
779 gen_vfp_neg(dp);
780 break;
781 case 6: /* add: fn + fm */
782 gen_vfp_add(dp);
783 break;
784 case 7: /* sub: fn - fm */
785 gen_vfp_sub(dp);
786 break;
787 case 8: /* div: fn / fm */
788 gen_vfp_div(dp);
789 break;
790 case 15: /* extension space */
791 switch (rn) {
792 case 0: /* cpy */
793 /* no-op */
794 break;
795 case 1: /* abs */
796 gen_vfp_abs(dp);
797 break;
798 case 2: /* neg */
799 gen_vfp_neg(dp);
800 break;
801 case 3: /* sqrt */
802 gen_vfp_sqrt(dp);
803 break;
804 case 8: /* cmp */
805 gen_vfp_cmp(dp);
806 break;
807 case 9: /* cmpe */
808 gen_vfp_cmpe(dp);
809 break;
810 case 10: /* cmpz */
811 gen_vfp_cmp(dp);
812 break;
813 case 11: /* cmpez */
814 gen_vfp_F1_ld0(dp);
815 gen_vfp_cmpe(dp);
816 break;
817 case 15: /* single<->double conversion */
818 if (dp)
819 gen_op_vfp_fcvtsd();
820 else
821 gen_op_vfp_fcvtds();
822 break;
823 case 16: /* fuito */
824 gen_vfp_uito(dp);
825 break;
826 case 17: /* fsito */
827 gen_vfp_sito(dp);
828 break;
829 case 24: /* ftoui */
830 gen_vfp_toui(dp);
831 break;
832 case 25: /* ftouiz */
833 gen_vfp_touiz(dp);
834 break;
835 case 26: /* ftosi */
836 gen_vfp_tosi(dp);
837 break;
838 case 27: /* ftosiz */
839 gen_vfp_tosiz(dp);
840 break;
841 default: /* undefined */
842 printf ("rn:%d\n", rn);
843 return 1;
845 break;
846 default: /* undefined */
847 printf ("op:%d\n", op);
848 return 1;
851 /* Write back the result. */
852 if (op == 15 && (rn >= 8 && rn <= 11))
853 ; /* Comparison, do nothing. */
854 else if (op == 15 && rn > 17)
855 /* Integer result. */
856 gen_mov_vreg_F0(0, rd);
857 else if (op == 15 && rn == 15)
858 /* conversion */
859 gen_mov_vreg_F0(!dp, rd);
860 else
861 gen_mov_vreg_F0(dp, rd);
863 /* break out of the loop if we have finished */
864 if (veclen == 0)
865 break;
867 if (op == 15 && delta_m == 0) {
868 /* single source one-many */
869 while (veclen--) {
870 rd = ((rd + delta_d) & (bank_mask - 1))
871 | (rd & bank_mask);
872 gen_mov_vreg_F0(dp, rd);
874 break;
876 /* Setup the next operands. */
877 veclen--;
878 rd = ((rd + delta_d) & (bank_mask - 1))
879 | (rd & bank_mask);
881 if (op == 15) {
882 /* One source operand. */
883 rm = ((rm + delta_m) & (bank_mask - 1))
884 | (rm & bank_mask);
885 gen_mov_F0_vreg(dp, rm);
886 } else {
887 /* Two source operands. */
888 rn = ((rn + delta_d) & (bank_mask - 1))
889 | (rn & bank_mask);
890 gen_mov_F0_vreg(dp, rn);
891 if (delta_m) {
892 rm = ((rm + delta_m) & (bank_mask - 1))
893 | (rm & bank_mask);
894 gen_mov_F1_vreg(dp, rm);
899 break;
900 case 0xc:
901 case 0xd:
902 if (dp && (insn & (1 << 22))) {
903 /* two-register transfer */
904 rn = (insn >> 16) & 0xf;
905 rd = (insn >> 12) & 0xf;
906 if (dp) {
907 if (insn & (1 << 5))
908 return 1;
909 rm = insn & 0xf;
910 } else
911 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
913 if (insn & (1 << 20)) {
914 /* vfp->arm */
915 if (dp) {
916 gen_mov_F0_vreg(1, rm);
917 gen_op_vfp_mrrd();
918 gen_movl_reg_T0(s, rd);
919 gen_movl_reg_T1(s, rn);
920 } else {
921 gen_mov_F0_vreg(0, rm);
922 gen_op_vfp_mrs();
923 gen_movl_reg_T0(s, rn);
924 gen_mov_F0_vreg(0, rm + 1);
925 gen_op_vfp_mrs();
926 gen_movl_reg_T0(s, rd);
928 } else {
929 /* arm->vfp */
930 if (dp) {
931 gen_movl_T0_reg(s, rd);
932 gen_movl_T1_reg(s, rn);
933 gen_op_vfp_mdrr();
934 gen_mov_vreg_F0(1, rm);
935 } else {
936 gen_movl_T0_reg(s, rn);
937 gen_op_vfp_msr();
938 gen_mov_vreg_F0(0, rm);
939 gen_movl_T0_reg(s, rd);
940 gen_op_vfp_msr();
941 gen_mov_vreg_F0(0, rm + 1);
944 } else {
945 /* Load/store */
946 rn = (insn >> 16) & 0xf;
947 if (dp)
948 rd = (insn >> 12) & 0xf;
949 else
950 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
951 gen_movl_T1_reg(s, rn);
952 if ((insn & 0x01200000) == 0x01000000) {
953 /* Single load/store */
954 offset = (insn & 0xff) << 2;
955 if ((insn & (1 << 23)) == 0)
956 offset = -offset;
957 gen_op_addl_T1_im(offset);
958 if (insn & (1 << 20)) {
959 gen_vfp_ld(s, dp);
960 gen_mov_vreg_F0(dp, rd);
961 } else {
962 gen_mov_F0_vreg(dp, rd);
963 gen_vfp_st(s, dp);
965 } else {
966 /* load/store multiple */
967 if (dp)
968 n = (insn >> 1) & 0x7f;
969 else
970 n = insn & 0xff;
972 if (insn & (1 << 24)) /* pre-decrement */
973 gen_op_addl_T1_im(-((insn & 0xff) << 2));
975 if (dp)
976 offset = 8;
977 else
978 offset = 4;
979 for (i = 0; i < n; i++) {
980 if (insn & (1 << 20)) {
981 /* load */
982 gen_vfp_ld(s, dp);
983 gen_mov_vreg_F0(dp, rd + i);
984 } else {
985 /* store */
986 gen_mov_F0_vreg(dp, rd + i);
987 gen_vfp_st(s, dp);
989 gen_op_addl_T1_im(offset);
991 if (insn & (1 << 21)) {
992 /* writeback */
993 if (insn & (1 << 24))
994 offset = -offset * n;
995 else if (dp && (insn & 1))
996 offset = 4;
997 else
998 offset = 0;
1000 if (offset != 0)
1001 gen_op_addl_T1_im(offset);
1002 gen_movl_reg_T1(s, rn);
1006 break;
1007 default:
1008 /* Should never happen. */
1009 return 1;
1011 return 0;
1014 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1016 TranslationBlock *tb;
1018 tb = s->tb;
1019 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
1020 if (n == 0)
1021 gen_op_goto_tb0(TBPARAM(tb));
1022 else
1023 gen_op_goto_tb1(TBPARAM(tb));
1024 gen_op_movl_T0_im(dest);
1025 gen_op_movl_r15_T0();
1026 gen_op_movl_T0_im((long)tb + n);
1027 gen_op_exit_tb();
1028 } else {
1029 gen_op_movl_T0_im(dest);
1030 gen_op_movl_r15_T0();
1031 gen_op_movl_T0_0();
1032 gen_op_exit_tb();
1036 static inline void gen_jmp (DisasContext *s, uint32_t dest)
1038 if (__builtin_expect(s->singlestep_enabled, 0)) {
1039 /* An indirect jump so that we still trigger the debug exception. */
1040 if (s->thumb)
1041 dest |= 1;
1042 gen_op_movl_T0_im(dest);
1043 gen_bx(s);
1044 } else {
1045 gen_goto_tb(s, 0, dest);
1046 s->is_jmp = DISAS_TB_JUMP;
1050 static inline void gen_mulxy(int x, int y)
1052 if (x)
1053 gen_op_sarl_T0_im(16);
1054 else
1055 gen_op_sxth_T0();
1056 if (y)
1057 gen_op_sarl_T1_im(16);
1058 else
1059 gen_op_sxth_T1();
1060 gen_op_mul_T0_T1();
1063 /* Return the mask of PSR bits set by a MSR instruction. */
1064 static uint32_t msr_mask(DisasContext *s, int flags, int spsr) {
1065 uint32_t mask;
1067 mask = 0;
1068 if (flags & (1 << 0))
1069 mask |= 0xff;
1070 if (flags & (1 << 1))
1071 mask |= 0xff00;
1072 if (flags & (1 << 2))
1073 mask |= 0xff0000;
1074 if (flags & (1 << 3))
1075 mask |= 0xff000000;
1076 /* Mask out undefined bits. */
1077 mask &= 0xf90f03ff;
1078 /* Mask out state bits. */
1079 if (!spsr)
1080 mask &= ~0x01000020;
1081 /* Mask out privileged bits. */
1082 if (IS_USER(s))
1083 mask &= 0xf80f0200;
1084 return mask;
1087 /* Returns nonzero if access to the PSR is not permitted. */
1088 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
1090 if (spsr) {
1091 /* ??? This is also undefined in system mode. */
1092 if (IS_USER(s))
1093 return 1;
1094 gen_op_movl_spsr_T0(mask);
1095 } else {
1096 gen_op_movl_cpsr_T0(mask);
1098 gen_lookup_tb(s);
1099 return 0;
1102 static void gen_exception_return(DisasContext *s)
1104 gen_op_movl_reg_TN[0][15]();
1105 gen_op_movl_T0_spsr();
1106 gen_op_movl_cpsr_T0(0xffffffff);
1107 s->is_jmp = DISAS_UPDATE;
1110 static void disas_arm_insn(CPUState * env, DisasContext *s)
1112 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
1114 insn = ldl_code(s->pc);
1115 s->pc += 4;
1117 cond = insn >> 28;
1118 if (cond == 0xf){
1119 /* Unconditional instructions. */
1120 if ((insn & 0x0d70f000) == 0x0550f000)
1121 return; /* PLD */
1122 else if ((insn & 0x0e000000) == 0x0a000000) {
1123 /* branch link and change to thumb (blx <offset>) */
1124 int32_t offset;
1126 val = (uint32_t)s->pc;
1127 gen_op_movl_T0_im(val);
1128 gen_movl_reg_T0(s, 14);
1129 /* Sign-extend the 24-bit offset */
1130 offset = (((int32_t)insn) << 8) >> 8;
1131 /* offset * 4 + bit24 * 2 + (thumb bit) */
1132 val += (offset << 2) | ((insn >> 23) & 2) | 1;
1133 /* pipeline offset */
1134 val += 4;
1135 gen_op_movl_T0_im(val);
1136 gen_bx(s);
1137 return;
1138 } else if ((insn & 0x0fe00000) == 0x0c400000) {
1139 /* Coprocessor double register transfer. */
1140 } else if ((insn & 0x0f000010) == 0x0e000010) {
1141 /* Additional coprocessor register transfer. */
1142 } else if ((insn & 0x0ff10010) == 0x01000000) {
1143 /* cps (privileged) */
1144 } else if ((insn & 0x0ffffdff) == 0x01010000) {
1145 /* setend */
1146 if (insn & (1 << 9)) {
1147 /* BE8 mode not implemented. */
1148 goto illegal_op;
1150 return;
1152 goto illegal_op;
1154 if (cond != 0xe) {
1155 /* if not always execute, we generate a conditional jump to
1156 next instruction */
1157 s->condlabel = gen_new_label();
1158 gen_test_cc[cond ^ 1](s->condlabel);
1159 s->condjmp = 1;
1160 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1161 //s->is_jmp = DISAS_JUMP_NEXT;
1163 if ((insn & 0x0f900000) == 0x03000000) {
1164 if ((insn & 0x0fb0f000) != 0x0320f000)
1165 goto illegal_op;
1166 /* CPSR = immediate */
1167 val = insn & 0xff;
1168 shift = ((insn >> 8) & 0xf) * 2;
1169 if (shift)
1170 val = (val >> shift) | (val << (32 - shift));
1171 gen_op_movl_T0_im(val);
1172 i = ((insn & (1 << 22)) != 0);
1173 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
1174 goto illegal_op;
1175 } else if ((insn & 0x0f900000) == 0x01000000
1176 && (insn & 0x00000090) != 0x00000090) {
1177 /* miscellaneous instructions */
1178 op1 = (insn >> 21) & 3;
1179 sh = (insn >> 4) & 0xf;
1180 rm = insn & 0xf;
1181 switch (sh) {
1182 case 0x0: /* move program status register */
1183 if (op1 & 1) {
1184 /* PSR = reg */
1185 gen_movl_T0_reg(s, rm);
1186 i = ((op1 & 2) != 0);
1187 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
1188 goto illegal_op;
1189 } else {
1190 /* reg = PSR */
1191 rd = (insn >> 12) & 0xf;
1192 if (op1 & 2) {
1193 if (IS_USER(s))
1194 goto illegal_op;
1195 gen_op_movl_T0_spsr();
1196 } else {
1197 gen_op_movl_T0_cpsr();
1199 gen_movl_reg_T0(s, rd);
1201 break;
1202 case 0x1:
1203 if (op1 == 1) {
1204 /* branch/exchange thumb (bx). */
1205 gen_movl_T0_reg(s, rm);
1206 gen_bx(s);
1207 } else if (op1 == 3) {
1208 /* clz */
1209 rd = (insn >> 12) & 0xf;
1210 gen_movl_T0_reg(s, rm);
1211 gen_op_clz_T0();
1212 gen_movl_reg_T0(s, rd);
1213 } else {
1214 goto illegal_op;
1216 break;
1217 case 0x2:
1218 if (op1 == 1) {
1219 ARCH(5J); /* bxj */
1220 /* Trivial implementation equivalent to bx. */
1221 gen_movl_T0_reg(s, rm);
1222 gen_bx(s);
1223 } else {
1224 goto illegal_op;
1226 break;
1227 case 0x3:
1228 if (op1 != 1)
1229 goto illegal_op;
1231 /* branch link/exchange thumb (blx) */
1232 val = (uint32_t)s->pc;
1233 gen_op_movl_T0_im(val);
1234 gen_movl_reg_T0(s, 14);
1235 gen_movl_T0_reg(s, rm);
1236 gen_bx(s);
1237 break;
1238 case 0x5: /* saturating add/subtract */
1239 rd = (insn >> 12) & 0xf;
1240 rn = (insn >> 16) & 0xf;
1241 gen_movl_T0_reg(s, rm);
1242 gen_movl_T1_reg(s, rn);
1243 if (op1 & 2)
1244 gen_op_double_T1_saturate();
1245 if (op1 & 1)
1246 gen_op_subl_T0_T1_saturate();
1247 else
1248 gen_op_addl_T0_T1_saturate();
1249 gen_movl_reg_T0(s, rd);
1250 break;
1251 case 7: /* bkpt */
1252 gen_op_movl_T0_im((long)s->pc - 4);
1253 gen_op_movl_reg_TN[0][15]();
1254 gen_op_bkpt();
1255 s->is_jmp = DISAS_JUMP;
1256 break;
1257 case 0x8: /* signed multiply */
1258 case 0xa:
1259 case 0xc:
1260 case 0xe:
1261 rs = (insn >> 8) & 0xf;
1262 rn = (insn >> 12) & 0xf;
1263 rd = (insn >> 16) & 0xf;
1264 if (op1 == 1) {
1265 /* (32 * 16) >> 16 */
1266 gen_movl_T0_reg(s, rm);
1267 gen_movl_T1_reg(s, rs);
1268 if (sh & 4)
1269 gen_op_sarl_T1_im(16);
1270 else
1271 gen_op_sxth_T1();
1272 gen_op_imulw_T0_T1();
1273 if ((sh & 2) == 0) {
1274 gen_movl_T1_reg(s, rn);
1275 gen_op_addl_T0_T1_setq();
1277 gen_movl_reg_T0(s, rd);
1278 } else {
1279 /* 16 * 16 */
1280 gen_movl_T0_reg(s, rm);
1281 gen_movl_T1_reg(s, rs);
1282 gen_mulxy(sh & 2, sh & 4);
1283 if (op1 == 2) {
1284 gen_op_signbit_T1_T0();
1285 gen_op_addq_T0_T1(rn, rd);
1286 gen_movl_reg_T0(s, rn);
1287 gen_movl_reg_T1(s, rd);
1288 } else {
1289 if (op1 == 0) {
1290 gen_movl_T1_reg(s, rn);
1291 gen_op_addl_T0_T1_setq();
1293 gen_movl_reg_T0(s, rd);
1296 break;
1297 default:
1298 goto illegal_op;
1300 } else if (((insn & 0x0e000000) == 0 &&
1301 (insn & 0x00000090) != 0x90) ||
1302 ((insn & 0x0e000000) == (1 << 25))) {
1303 int set_cc, logic_cc, shiftop;
1305 op1 = (insn >> 21) & 0xf;
1306 set_cc = (insn >> 20) & 1;
1307 logic_cc = table_logic_cc[op1] & set_cc;
1309 /* data processing instruction */
1310 if (insn & (1 << 25)) {
1311 /* immediate operand */
1312 val = insn & 0xff;
1313 shift = ((insn >> 8) & 0xf) * 2;
1314 if (shift)
1315 val = (val >> shift) | (val << (32 - shift));
1316 gen_op_movl_T1_im(val);
1317 if (logic_cc && shift)
1318 gen_op_mov_CF_T1();
1319 } else {
1320 /* register */
1321 rm = (insn) & 0xf;
1322 gen_movl_T1_reg(s, rm);
1323 shiftop = (insn >> 5) & 3;
1324 if (!(insn & (1 << 4))) {
1325 shift = (insn >> 7) & 0x1f;
1326 if (shift != 0) {
1327 if (logic_cc) {
1328 gen_shift_T1_im_cc[shiftop](shift);
1329 } else {
1330 gen_shift_T1_im[shiftop](shift);
1332 } else if (shiftop != 0) {
1333 if (logic_cc) {
1334 gen_shift_T1_0_cc[shiftop]();
1335 } else {
1336 gen_shift_T1_0[shiftop]();
1339 } else {
1340 rs = (insn >> 8) & 0xf;
1341 gen_movl_T0_reg(s, rs);
1342 if (logic_cc) {
1343 gen_shift_T1_T0_cc[shiftop]();
1344 } else {
1345 gen_shift_T1_T0[shiftop]();
1349 if (op1 != 0x0f && op1 != 0x0d) {
1350 rn = (insn >> 16) & 0xf;
1351 gen_movl_T0_reg(s, rn);
1353 rd = (insn >> 12) & 0xf;
1354 switch(op1) {
1355 case 0x00:
1356 gen_op_andl_T0_T1();
1357 gen_movl_reg_T0(s, rd);
1358 if (logic_cc)
1359 gen_op_logic_T0_cc();
1360 break;
1361 case 0x01:
1362 gen_op_xorl_T0_T1();
1363 gen_movl_reg_T0(s, rd);
1364 if (logic_cc)
1365 gen_op_logic_T0_cc();
1366 break;
1367 case 0x02:
1368 if (set_cc && rd == 15) {
1369 /* SUBS r15, ... is used for exception return. */
1370 if (IS_USER(s))
1371 goto illegal_op;
1372 gen_op_subl_T0_T1_cc();
1373 gen_exception_return(s);
1374 } else {
1375 if (set_cc)
1376 gen_op_subl_T0_T1_cc();
1377 else
1378 gen_op_subl_T0_T1();
1379 gen_movl_reg_T0(s, rd);
1381 break;
1382 case 0x03:
1383 if (set_cc)
1384 gen_op_rsbl_T0_T1_cc();
1385 else
1386 gen_op_rsbl_T0_T1();
1387 gen_movl_reg_T0(s, rd);
1388 break;
1389 case 0x04:
1390 if (set_cc)
1391 gen_op_addl_T0_T1_cc();
1392 else
1393 gen_op_addl_T0_T1();
1394 gen_movl_reg_T0(s, rd);
1395 break;
1396 case 0x05:
1397 if (set_cc)
1398 gen_op_adcl_T0_T1_cc();
1399 else
1400 gen_op_adcl_T0_T1();
1401 gen_movl_reg_T0(s, rd);
1402 break;
1403 case 0x06:
1404 if (set_cc)
1405 gen_op_sbcl_T0_T1_cc();
1406 else
1407 gen_op_sbcl_T0_T1();
1408 gen_movl_reg_T0(s, rd);
1409 break;
1410 case 0x07:
1411 if (set_cc)
1412 gen_op_rscl_T0_T1_cc();
1413 else
1414 gen_op_rscl_T0_T1();
1415 gen_movl_reg_T0(s, rd);
1416 break;
1417 case 0x08:
1418 if (set_cc) {
1419 gen_op_andl_T0_T1();
1420 gen_op_logic_T0_cc();
1422 break;
1423 case 0x09:
1424 if (set_cc) {
1425 gen_op_xorl_T0_T1();
1426 gen_op_logic_T0_cc();
1428 break;
1429 case 0x0a:
1430 if (set_cc) {
1431 gen_op_subl_T0_T1_cc();
1433 break;
1434 case 0x0b:
1435 if (set_cc) {
1436 gen_op_addl_T0_T1_cc();
1438 break;
1439 case 0x0c:
1440 gen_op_orl_T0_T1();
1441 gen_movl_reg_T0(s, rd);
1442 if (logic_cc)
1443 gen_op_logic_T0_cc();
1444 break;
1445 case 0x0d:
1446 if (logic_cc && rd == 15) {
1447 /* MOVS r15, ... is used for exception return. */
1448 if (IS_USER(s))
1449 goto illegal_op;
1450 gen_op_movl_T0_T1();
1451 gen_exception_return(s);
1452 } else {
1453 gen_movl_reg_T1(s, rd);
1454 if (logic_cc)
1455 gen_op_logic_T1_cc();
1457 break;
1458 case 0x0e:
1459 gen_op_bicl_T0_T1();
1460 gen_movl_reg_T0(s, rd);
1461 if (logic_cc)
1462 gen_op_logic_T0_cc();
1463 break;
1464 default:
1465 case 0x0f:
1466 gen_op_notl_T1();
1467 gen_movl_reg_T1(s, rd);
1468 if (logic_cc)
1469 gen_op_logic_T1_cc();
1470 break;
1472 } else {
1473 /* other instructions */
1474 op1 = (insn >> 24) & 0xf;
1475 switch(op1) {
1476 case 0x0:
1477 case 0x1:
1478 /* multiplies, extra load/stores */
1479 sh = (insn >> 5) & 3;
1480 if (sh == 0) {
1481 if (op1 == 0x0) {
1482 rd = (insn >> 16) & 0xf;
1483 rn = (insn >> 12) & 0xf;
1484 rs = (insn >> 8) & 0xf;
1485 rm = (insn) & 0xf;
1486 if (((insn >> 22) & 3) == 0) {
1487 /* 32 bit mul */
1488 gen_movl_T0_reg(s, rs);
1489 gen_movl_T1_reg(s, rm);
1490 gen_op_mul_T0_T1();
1491 if (insn & (1 << 21)) {
1492 gen_movl_T1_reg(s, rn);
1493 gen_op_addl_T0_T1();
1495 if (insn & (1 << 20))
1496 gen_op_logic_T0_cc();
1497 gen_movl_reg_T0(s, rd);
1498 } else {
1499 /* 64 bit mul */
1500 gen_movl_T0_reg(s, rs);
1501 gen_movl_T1_reg(s, rm);
1502 if (insn & (1 << 22))
1503 gen_op_imull_T0_T1();
1504 else
1505 gen_op_mull_T0_T1();
1506 if (insn & (1 << 21)) /* mult accumulate */
1507 gen_op_addq_T0_T1(rn, rd);
1508 if (!(insn & (1 << 23))) { /* double accumulate */
1509 ARCH(6);
1510 gen_op_addq_lo_T0_T1(rn);
1511 gen_op_addq_lo_T0_T1(rd);
1513 if (insn & (1 << 20))
1514 gen_op_logicq_cc();
1515 gen_movl_reg_T0(s, rn);
1516 gen_movl_reg_T1(s, rd);
1518 } else {
1519 rn = (insn >> 16) & 0xf;
1520 rd = (insn >> 12) & 0xf;
1521 if (insn & (1 << 23)) {
1522 /* load/store exclusive */
1523 goto illegal_op;
1524 } else {
1525 /* SWP instruction */
1526 rm = (insn) & 0xf;
1528 gen_movl_T0_reg(s, rm);
1529 gen_movl_T1_reg(s, rn);
1530 if (insn & (1 << 22)) {
1531 gen_ldst(swpb, s);
1532 } else {
1533 gen_ldst(swpl, s);
1535 gen_movl_reg_T0(s, rd);
1538 } else {
1539 int address_offset;
1540 int load;
1541 /* Misc load/store */
1542 rn = (insn >> 16) & 0xf;
1543 rd = (insn >> 12) & 0xf;
1544 gen_movl_T1_reg(s, rn);
1545 if (insn & (1 << 24))
1546 gen_add_datah_offset(s, insn, 0);
1547 address_offset = 0;
1548 if (insn & (1 << 20)) {
1549 /* load */
1550 switch(sh) {
1551 case 1:
1552 gen_ldst(lduw, s);
1553 break;
1554 case 2:
1555 gen_ldst(ldsb, s);
1556 break;
1557 default:
1558 case 3:
1559 gen_ldst(ldsw, s);
1560 break;
1562 load = 1;
1563 } else if (sh & 2) {
1564 /* doubleword */
1565 if (sh & 1) {
1566 /* store */
1567 gen_movl_T0_reg(s, rd);
1568 gen_ldst(stl, s);
1569 gen_op_addl_T1_im(4);
1570 gen_movl_T0_reg(s, rd + 1);
1571 gen_ldst(stl, s);
1572 load = 0;
1573 } else {
1574 /* load */
1575 gen_ldst(ldl, s);
1576 gen_movl_reg_T0(s, rd);
1577 gen_op_addl_T1_im(4);
1578 gen_ldst(ldl, s);
1579 rd++;
1580 load = 1;
1582 address_offset = -4;
1583 } else {
1584 /* store */
1585 gen_movl_T0_reg(s, rd);
1586 gen_ldst(stw, s);
1587 load = 0;
1589 /* Perform base writeback before the loaded value to
1590 ensure correct behavior with overlapping index registers.
1591 ldrd with base writeback is is undefined if the
1592 destination and index registers overlap. */
1593 if (!(insn & (1 << 24))) {
1594 gen_add_datah_offset(s, insn, address_offset);
1595 gen_movl_reg_T1(s, rn);
1596 } else if (insn & (1 << 21)) {
1597 if (address_offset)
1598 gen_op_addl_T1_im(address_offset);
1599 gen_movl_reg_T1(s, rn);
1601 if (load) {
1602 /* Complete the load. */
1603 gen_movl_reg_T0(s, rd);
1606 break;
1607 case 0x4:
1608 case 0x5:
1609 case 0x6:
1610 case 0x7:
1611 /* Check for undefined extension instructions
1612 * per the ARM Bible IE:
1613 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
1615 sh = (0xf << 20) | (0xf << 4);
1616 if (op1 == 0x7 && ((insn & sh) == sh))
1618 goto illegal_op;
1620 /* load/store byte/word */
1621 rn = (insn >> 16) & 0xf;
1622 rd = (insn >> 12) & 0xf;
1623 gen_movl_T1_reg(s, rn);
1624 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
1625 if (insn & (1 << 24))
1626 gen_add_data_offset(s, insn);
1627 if (insn & (1 << 20)) {
1628 /* load */
1629 s->is_mem = 1;
1630 #if defined(CONFIG_USER_ONLY)
1631 if (insn & (1 << 22))
1632 gen_op_ldub_raw();
1633 else
1634 gen_op_ldl_raw();
1635 #else
1636 if (insn & (1 << 22)) {
1637 if (i)
1638 gen_op_ldub_user();
1639 else
1640 gen_op_ldub_kernel();
1641 } else {
1642 if (i)
1643 gen_op_ldl_user();
1644 else
1645 gen_op_ldl_kernel();
1647 #endif
1648 } else {
1649 /* store */
1650 gen_movl_T0_reg(s, rd);
1651 #if defined(CONFIG_USER_ONLY)
1652 if (insn & (1 << 22))
1653 gen_op_stb_raw();
1654 else
1655 gen_op_stl_raw();
1656 #else
1657 if (insn & (1 << 22)) {
1658 if (i)
1659 gen_op_stb_user();
1660 else
1661 gen_op_stb_kernel();
1662 } else {
1663 if (i)
1664 gen_op_stl_user();
1665 else
1666 gen_op_stl_kernel();
1668 #endif
1670 if (!(insn & (1 << 24))) {
1671 gen_add_data_offset(s, insn);
1672 gen_movl_reg_T1(s, rn);
1673 } else if (insn & (1 << 21))
1674 gen_movl_reg_T1(s, rn); {
1676 if (insn & (1 << 20)) {
1677 /* Complete the load. */
1678 if (rd == 15)
1679 gen_bx(s);
1680 else
1681 gen_movl_reg_T0(s, rd);
1683 break;
1684 case 0x08:
1685 case 0x09:
1687 int j, n, user, loaded_base;
1688 /* load/store multiple words */
1689 /* XXX: store correct base if write back */
1690 user = 0;
1691 if (insn & (1 << 22)) {
1692 if (IS_USER(s))
1693 goto illegal_op; /* only usable in supervisor mode */
1695 if ((insn & (1 << 15)) == 0)
1696 user = 1;
1698 rn = (insn >> 16) & 0xf;
1699 gen_movl_T1_reg(s, rn);
1701 /* compute total size */
1702 loaded_base = 0;
1703 n = 0;
1704 for(i=0;i<16;i++) {
1705 if (insn & (1 << i))
1706 n++;
1708 /* XXX: test invalid n == 0 case ? */
1709 if (insn & (1 << 23)) {
1710 if (insn & (1 << 24)) {
1711 /* pre increment */
1712 gen_op_addl_T1_im(4);
1713 } else {
1714 /* post increment */
1716 } else {
1717 if (insn & (1 << 24)) {
1718 /* pre decrement */
1719 gen_op_addl_T1_im(-(n * 4));
1720 } else {
1721 /* post decrement */
1722 if (n != 1)
1723 gen_op_addl_T1_im(-((n - 1) * 4));
1726 j = 0;
1727 for(i=0;i<16;i++) {
1728 if (insn & (1 << i)) {
1729 if (insn & (1 << 20)) {
1730 /* load */
1731 gen_ldst(ldl, s);
1732 if (i == 15) {
1733 gen_bx(s);
1734 } else if (user) {
1735 gen_op_movl_user_T0(i);
1736 } else if (i == rn) {
1737 gen_op_movl_T2_T0();
1738 loaded_base = 1;
1739 } else {
1740 gen_movl_reg_T0(s, i);
1742 } else {
1743 /* store */
1744 if (i == 15) {
1745 /* special case: r15 = PC + 12 */
1746 val = (long)s->pc + 8;
1747 gen_op_movl_TN_im[0](val);
1748 } else if (user) {
1749 gen_op_movl_T0_user(i);
1750 } else {
1751 gen_movl_T0_reg(s, i);
1753 gen_ldst(stl, s);
1755 j++;
1756 /* no need to add after the last transfer */
1757 if (j != n)
1758 gen_op_addl_T1_im(4);
1761 if (insn & (1 << 21)) {
1762 /* write back */
1763 if (insn & (1 << 23)) {
1764 if (insn & (1 << 24)) {
1765 /* pre increment */
1766 } else {
1767 /* post increment */
1768 gen_op_addl_T1_im(4);
1770 } else {
1771 if (insn & (1 << 24)) {
1772 /* pre decrement */
1773 if (n != 1)
1774 gen_op_addl_T1_im(-((n - 1) * 4));
1775 } else {
1776 /* post decrement */
1777 gen_op_addl_T1_im(-(n * 4));
1780 gen_movl_reg_T1(s, rn);
1782 if (loaded_base) {
1783 gen_op_movl_T0_T2();
1784 gen_movl_reg_T0(s, rn);
1786 if ((insn & (1 << 22)) && !user) {
1787 /* Restore CPSR from SPSR. */
1788 gen_op_movl_T0_spsr();
1789 gen_op_movl_cpsr_T0(0xffffffff);
1790 s->is_jmp = DISAS_UPDATE;
1793 break;
1794 case 0xa:
1795 case 0xb:
1797 int32_t offset;
1799 /* branch (and link) */
1800 val = (int32_t)s->pc;
1801 if (insn & (1 << 24)) {
1802 gen_op_movl_T0_im(val);
1803 gen_op_movl_reg_TN[0][14]();
1805 offset = (((int32_t)insn << 8) >> 8);
1806 val += (offset << 2) + 4;
1807 gen_jmp(s, val);
1809 break;
1810 case 0xc:
1811 case 0xd:
1812 case 0xe:
1813 /* Coprocessor. */
1814 op1 = (insn >> 8) & 0xf;
1815 switch (op1) {
1816 case 10:
1817 case 11:
1818 if (disas_vfp_insn (env, s, insn))
1819 goto illegal_op;
1820 break;
1821 case 15:
1822 if (disas_cp15_insn (s, insn))
1823 goto illegal_op;
1824 break;
1825 default:
1826 /* unknown coprocessor. */
1827 goto illegal_op;
1829 break;
1830 case 0xf:
1831 /* swi */
1832 gen_op_movl_T0_im((long)s->pc);
1833 gen_op_movl_reg_TN[0][15]();
1834 gen_op_swi();
1835 s->is_jmp = DISAS_JUMP;
1836 break;
1837 default:
1838 illegal_op:
1839 gen_op_movl_T0_im((long)s->pc - 4);
1840 gen_op_movl_reg_TN[0][15]();
1841 gen_op_undef_insn();
1842 s->is_jmp = DISAS_JUMP;
1843 break;
1848 static void disas_thumb_insn(DisasContext *s)
1850 uint32_t val, insn, op, rm, rn, rd, shift, cond;
1851 int32_t offset;
1852 int i;
1854 insn = lduw_code(s->pc);
1855 s->pc += 2;
1857 switch (insn >> 12) {
1858 case 0: case 1:
1859 rd = insn & 7;
1860 op = (insn >> 11) & 3;
1861 if (op == 3) {
1862 /* add/subtract */
1863 rn = (insn >> 3) & 7;
1864 gen_movl_T0_reg(s, rn);
1865 if (insn & (1 << 10)) {
1866 /* immediate */
1867 gen_op_movl_T1_im((insn >> 6) & 7);
1868 } else {
1869 /* reg */
1870 rm = (insn >> 6) & 7;
1871 gen_movl_T1_reg(s, rm);
1873 if (insn & (1 << 9))
1874 gen_op_subl_T0_T1_cc();
1875 else
1876 gen_op_addl_T0_T1_cc();
1877 gen_movl_reg_T0(s, rd);
1878 } else {
1879 /* shift immediate */
1880 rm = (insn >> 3) & 7;
1881 shift = (insn >> 6) & 0x1f;
1882 gen_movl_T0_reg(s, rm);
1883 gen_shift_T0_im_thumb[op](shift);
1884 gen_movl_reg_T0(s, rd);
1886 break;
1887 case 2: case 3:
1888 /* arithmetic large immediate */
1889 op = (insn >> 11) & 3;
1890 rd = (insn >> 8) & 0x7;
1891 if (op == 0) {
1892 gen_op_movl_T0_im(insn & 0xff);
1893 } else {
1894 gen_movl_T0_reg(s, rd);
1895 gen_op_movl_T1_im(insn & 0xff);
1897 switch (op) {
1898 case 0: /* mov */
1899 gen_op_logic_T0_cc();
1900 break;
1901 case 1: /* cmp */
1902 gen_op_subl_T0_T1_cc();
1903 break;
1904 case 2: /* add */
1905 gen_op_addl_T0_T1_cc();
1906 break;
1907 case 3: /* sub */
1908 gen_op_subl_T0_T1_cc();
1909 break;
1911 if (op != 1)
1912 gen_movl_reg_T0(s, rd);
1913 break;
1914 case 4:
1915 if (insn & (1 << 11)) {
1916 rd = (insn >> 8) & 7;
1917 /* load pc-relative. Bit 1 of PC is ignored. */
1918 val = s->pc + 2 + ((insn & 0xff) * 4);
1919 val &= ~(uint32_t)2;
1920 gen_op_movl_T1_im(val);
1921 gen_ldst(ldl, s);
1922 gen_movl_reg_T0(s, rd);
1923 break;
1925 if (insn & (1 << 10)) {
1926 /* data processing extended or blx */
1927 rd = (insn & 7) | ((insn >> 4) & 8);
1928 rm = (insn >> 3) & 0xf;
1929 op = (insn >> 8) & 3;
1930 switch (op) {
1931 case 0: /* add */
1932 gen_movl_T0_reg(s, rd);
1933 gen_movl_T1_reg(s, rm);
1934 gen_op_addl_T0_T1();
1935 gen_movl_reg_T0(s, rd);
1936 break;
1937 case 1: /* cmp */
1938 gen_movl_T0_reg(s, rd);
1939 gen_movl_T1_reg(s, rm);
1940 gen_op_subl_T0_T1_cc();
1941 break;
1942 case 2: /* mov/cpy */
1943 gen_movl_T0_reg(s, rm);
1944 gen_movl_reg_T0(s, rd);
1945 break;
1946 case 3:/* branch [and link] exchange thumb register */
1947 if (insn & (1 << 7)) {
1948 val = (uint32_t)s->pc | 1;
1949 gen_op_movl_T1_im(val);
1950 gen_movl_reg_T1(s, 14);
1952 gen_movl_T0_reg(s, rm);
1953 gen_bx(s);
1954 break;
1956 break;
1959 /* data processing register */
1960 rd = insn & 7;
1961 rm = (insn >> 3) & 7;
1962 op = (insn >> 6) & 0xf;
1963 if (op == 2 || op == 3 || op == 4 || op == 7) {
1964 /* the shift/rotate ops want the operands backwards */
1965 val = rm;
1966 rm = rd;
1967 rd = val;
1968 val = 1;
1969 } else {
1970 val = 0;
1973 if (op == 9) /* neg */
1974 gen_op_movl_T0_im(0);
1975 else if (op != 0xf) /* mvn doesn't read its first operand */
1976 gen_movl_T0_reg(s, rd);
1978 gen_movl_T1_reg(s, rm);
1979 switch (op) {
1980 case 0x0: /* and */
1981 gen_op_andl_T0_T1();
1982 gen_op_logic_T0_cc();
1983 break;
1984 case 0x1: /* eor */
1985 gen_op_xorl_T0_T1();
1986 gen_op_logic_T0_cc();
1987 break;
1988 case 0x2: /* lsl */
1989 gen_op_shll_T1_T0_cc();
1990 gen_op_logic_T1_cc();
1991 break;
1992 case 0x3: /* lsr */
1993 gen_op_shrl_T1_T0_cc();
1994 gen_op_logic_T1_cc();
1995 break;
1996 case 0x4: /* asr */
1997 gen_op_sarl_T1_T0_cc();
1998 gen_op_logic_T1_cc();
1999 break;
2000 case 0x5: /* adc */
2001 gen_op_adcl_T0_T1_cc();
2002 break;
2003 case 0x6: /* sbc */
2004 gen_op_sbcl_T0_T1_cc();
2005 break;
2006 case 0x7: /* ror */
2007 gen_op_rorl_T1_T0_cc();
2008 gen_op_logic_T1_cc();
2009 break;
2010 case 0x8: /* tst */
2011 gen_op_andl_T0_T1();
2012 gen_op_logic_T0_cc();
2013 rd = 16;
2014 break;
2015 case 0x9: /* neg */
2016 gen_op_subl_T0_T1_cc();
2017 break;
2018 case 0xa: /* cmp */
2019 gen_op_subl_T0_T1_cc();
2020 rd = 16;
2021 break;
2022 case 0xb: /* cmn */
2023 gen_op_addl_T0_T1_cc();
2024 rd = 16;
2025 break;
2026 case 0xc: /* orr */
2027 gen_op_orl_T0_T1();
2028 gen_op_logic_T0_cc();
2029 break;
2030 case 0xd: /* mul */
2031 gen_op_mull_T0_T1();
2032 gen_op_logic_T0_cc();
2033 break;
2034 case 0xe: /* bic */
2035 gen_op_bicl_T0_T1();
2036 gen_op_logic_T0_cc();
2037 break;
2038 case 0xf: /* mvn */
2039 gen_op_notl_T1();
2040 gen_op_logic_T1_cc();
2041 val = 1;
2042 rm = rd;
2043 break;
2045 if (rd != 16) {
2046 if (val)
2047 gen_movl_reg_T1(s, rm);
2048 else
2049 gen_movl_reg_T0(s, rd);
2051 break;
2053 case 5:
2054 /* load/store register offset. */
2055 rd = insn & 7;
2056 rn = (insn >> 3) & 7;
2057 rm = (insn >> 6) & 7;
2058 op = (insn >> 9) & 7;
2059 gen_movl_T1_reg(s, rn);
2060 gen_movl_T2_reg(s, rm);
2061 gen_op_addl_T1_T2();
2063 if (op < 3) /* store */
2064 gen_movl_T0_reg(s, rd);
2066 switch (op) {
2067 case 0: /* str */
2068 gen_ldst(stl, s);
2069 break;
2070 case 1: /* strh */
2071 gen_ldst(stw, s);
2072 break;
2073 case 2: /* strb */
2074 gen_ldst(stb, s);
2075 break;
2076 case 3: /* ldrsb */
2077 gen_ldst(ldsb, s);
2078 break;
2079 case 4: /* ldr */
2080 gen_ldst(ldl, s);
2081 break;
2082 case 5: /* ldrh */
2083 gen_ldst(lduw, s);
2084 break;
2085 case 6: /* ldrb */
2086 gen_ldst(ldub, s);
2087 break;
2088 case 7: /* ldrsh */
2089 gen_ldst(ldsw, s);
2090 break;
2092 if (op >= 3) /* load */
2093 gen_movl_reg_T0(s, rd);
2094 break;
2096 case 6:
2097 /* load/store word immediate offset */
2098 rd = insn & 7;
2099 rn = (insn >> 3) & 7;
2100 gen_movl_T1_reg(s, rn);
2101 val = (insn >> 4) & 0x7c;
2102 gen_op_movl_T2_im(val);
2103 gen_op_addl_T1_T2();
2105 if (insn & (1 << 11)) {
2106 /* load */
2107 gen_ldst(ldl, s);
2108 gen_movl_reg_T0(s, rd);
2109 } else {
2110 /* store */
2111 gen_movl_T0_reg(s, rd);
2112 gen_ldst(stl, s);
2114 break;
2116 case 7:
2117 /* load/store byte immediate offset */
2118 rd = insn & 7;
2119 rn = (insn >> 3) & 7;
2120 gen_movl_T1_reg(s, rn);
2121 val = (insn >> 6) & 0x1f;
2122 gen_op_movl_T2_im(val);
2123 gen_op_addl_T1_T2();
2125 if (insn & (1 << 11)) {
2126 /* load */
2127 gen_ldst(ldub, s);
2128 gen_movl_reg_T0(s, rd);
2129 } else {
2130 /* store */
2131 gen_movl_T0_reg(s, rd);
2132 gen_ldst(stb, s);
2134 break;
2136 case 8:
2137 /* load/store halfword immediate offset */
2138 rd = insn & 7;
2139 rn = (insn >> 3) & 7;
2140 gen_movl_T1_reg(s, rn);
2141 val = (insn >> 5) & 0x3e;
2142 gen_op_movl_T2_im(val);
2143 gen_op_addl_T1_T2();
2145 if (insn & (1 << 11)) {
2146 /* load */
2147 gen_ldst(lduw, s);
2148 gen_movl_reg_T0(s, rd);
2149 } else {
2150 /* store */
2151 gen_movl_T0_reg(s, rd);
2152 gen_ldst(stw, s);
2154 break;
2156 case 9:
2157 /* load/store from stack */
2158 rd = (insn >> 8) & 7;
2159 gen_movl_T1_reg(s, 13);
2160 val = (insn & 0xff) * 4;
2161 gen_op_movl_T2_im(val);
2162 gen_op_addl_T1_T2();
2164 if (insn & (1 << 11)) {
2165 /* load */
2166 gen_ldst(ldl, s);
2167 gen_movl_reg_T0(s, rd);
2168 } else {
2169 /* store */
2170 gen_movl_T0_reg(s, rd);
2171 gen_ldst(stl, s);
2173 break;
2175 case 10:
2176 /* add to high reg */
2177 rd = (insn >> 8) & 7;
2178 if (insn & (1 << 11)) {
2179 /* SP */
2180 gen_movl_T0_reg(s, 13);
2181 } else {
2182 /* PC. bit 1 is ignored. */
2183 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
2185 val = (insn & 0xff) * 4;
2186 gen_op_movl_T1_im(val);
2187 gen_op_addl_T0_T1();
2188 gen_movl_reg_T0(s, rd);
2189 break;
2191 case 11:
2192 /* misc */
2193 op = (insn >> 8) & 0xf;
2194 switch (op) {
2195 case 0:
2196 /* adjust stack pointer */
2197 gen_movl_T1_reg(s, 13);
2198 val = (insn & 0x7f) * 4;
2199 if (insn & (1 << 7))
2200 val = -(int32_t)val;
2201 gen_op_movl_T2_im(val);
2202 gen_op_addl_T1_T2();
2203 gen_movl_reg_T1(s, 13);
2204 break;
2206 case 4: case 5: case 0xc: case 0xd:
2207 /* push/pop */
2208 gen_movl_T1_reg(s, 13);
2209 if (insn & (1 << 8))
2210 offset = 4;
2211 else
2212 offset = 0;
2213 for (i = 0; i < 8; i++) {
2214 if (insn & (1 << i))
2215 offset += 4;
2217 if ((insn & (1 << 11)) == 0) {
2218 gen_op_movl_T2_im(-offset);
2219 gen_op_addl_T1_T2();
2221 gen_op_movl_T2_im(4);
2222 for (i = 0; i < 8; i++) {
2223 if (insn & (1 << i)) {
2224 if (insn & (1 << 11)) {
2225 /* pop */
2226 gen_ldst(ldl, s);
2227 gen_movl_reg_T0(s, i);
2228 } else {
2229 /* push */
2230 gen_movl_T0_reg(s, i);
2231 gen_ldst(stl, s);
2233 /* advance to the next address. */
2234 gen_op_addl_T1_T2();
2237 if (insn & (1 << 8)) {
2238 if (insn & (1 << 11)) {
2239 /* pop pc */
2240 gen_ldst(ldl, s);
2241 /* don't set the pc until the rest of the instruction
2242 has completed */
2243 } else {
2244 /* push lr */
2245 gen_movl_T0_reg(s, 14);
2246 gen_ldst(stl, s);
2248 gen_op_addl_T1_T2();
2250 if ((insn & (1 << 11)) == 0) {
2251 gen_op_movl_T2_im(-offset);
2252 gen_op_addl_T1_T2();
2254 /* write back the new stack pointer */
2255 gen_movl_reg_T1(s, 13);
2256 /* set the new PC value */
2257 if ((insn & 0x0900) == 0x0900)
2258 gen_bx(s);
2259 break;
2261 case 0xe: /* bkpt */
2262 gen_op_movl_T0_im((long)s->pc - 2);
2263 gen_op_movl_reg_TN[0][15]();
2264 gen_op_bkpt();
2265 s->is_jmp = DISAS_JUMP;
2266 break;
2268 default:
2269 goto undef;
2271 break;
2273 case 12:
2274 /* load/store multiple */
2275 rn = (insn >> 8) & 0x7;
2276 gen_movl_T1_reg(s, rn);
2277 gen_op_movl_T2_im(4);
2278 for (i = 0; i < 8; i++) {
2279 if (insn & (1 << i)) {
2280 if (insn & (1 << 11)) {
2281 /* load */
2282 gen_ldst(ldl, s);
2283 gen_movl_reg_T0(s, i);
2284 } else {
2285 /* store */
2286 gen_movl_T0_reg(s, i);
2287 gen_ldst(stl, s);
2289 /* advance to the next address */
2290 gen_op_addl_T1_T2();
2293 /* Base register writeback. */
2294 if ((insn & (1 << rn)) == 0)
2295 gen_movl_reg_T1(s, rn);
2296 break;
2298 case 13:
2299 /* conditional branch or swi */
2300 cond = (insn >> 8) & 0xf;
2301 if (cond == 0xe)
2302 goto undef;
2304 if (cond == 0xf) {
2305 /* swi */
2306 gen_op_movl_T0_im((long)s->pc | 1);
2307 /* Don't set r15. */
2308 gen_op_movl_reg_TN[0][15]();
2309 gen_op_swi();
2310 s->is_jmp = DISAS_JUMP;
2311 break;
2313 /* generate a conditional jump to next instruction */
2314 s->condlabel = gen_new_label();
2315 gen_test_cc[cond ^ 1](s->condlabel);
2316 s->condjmp = 1;
2317 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2318 //s->is_jmp = DISAS_JUMP_NEXT;
2319 gen_movl_T1_reg(s, 15);
2321 /* jump to the offset */
2322 val = (uint32_t)s->pc + 2;
2323 offset = ((int32_t)insn << 24) >> 24;
2324 val += offset << 1;
2325 gen_jmp(s, val);
2326 break;
2328 case 14:
2329 /* unconditional branch */
2330 if (insn & (1 << 11)) {
2331 /* Second half of blx. */
2332 offset = ((insn & 0x7ff) << 1);
2333 gen_movl_T0_reg(s, 14);
2334 gen_op_movl_T1_im(offset);
2335 gen_op_addl_T0_T1();
2336 gen_op_movl_T1_im(0xfffffffc);
2337 gen_op_andl_T0_T1();
2339 val = (uint32_t)s->pc;
2340 gen_op_movl_T1_im(val | 1);
2341 gen_movl_reg_T1(s, 14);
2342 gen_bx(s);
2343 break;
2345 val = (uint32_t)s->pc;
2346 offset = ((int32_t)insn << 21) >> 21;
2347 val += (offset << 1) + 2;
2348 gen_jmp(s, val);
2349 break;
2351 case 15:
2352 /* branch and link [and switch to arm] */
2353 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
2354 /* Instruction spans a page boundary. Implement it as two
2355 16-bit instructions in case the second half causes an
2356 prefetch abort. */
2357 offset = ((int32_t)insn << 21) >> 9;
2358 val = s->pc + 2 + offset;
2359 gen_op_movl_T0_im(val);
2360 gen_movl_reg_T0(s, 14);
2361 break;
2363 if (insn & (1 << 11)) {
2364 /* Second half of bl. */
2365 offset = ((insn & 0x7ff) << 1) | 1;
2366 gen_movl_T0_reg(s, 14);
2367 gen_op_movl_T1_im(offset);
2368 gen_op_addl_T0_T1();
2370 val = (uint32_t)s->pc;
2371 gen_op_movl_T1_im(val | 1);
2372 gen_movl_reg_T1(s, 14);
2373 gen_bx(s);
2374 break;
2376 offset = ((int32_t)insn << 21) >> 10;
2377 insn = lduw_code(s->pc);
2378 offset |= insn & 0x7ff;
2380 val = (uint32_t)s->pc + 2;
2381 gen_op_movl_T1_im(val | 1);
2382 gen_movl_reg_T1(s, 14);
2384 val += offset << 1;
2385 if (insn & (1 << 12)) {
2386 /* bl */
2387 gen_jmp(s, val);
2388 } else {
2389 /* blx */
2390 val &= ~(uint32_t)2;
2391 gen_op_movl_T0_im(val);
2392 gen_bx(s);
2395 return;
2396 undef:
2397 gen_op_movl_T0_im((long)s->pc - 2);
2398 gen_op_movl_reg_TN[0][15]();
2399 gen_op_undef_insn();
2400 s->is_jmp = DISAS_JUMP;
2403 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2404 basic block 'tb'. If search_pc is TRUE, also generate PC
2405 information for each intermediate instruction. */
2406 static inline int gen_intermediate_code_internal(CPUState *env,
2407 TranslationBlock *tb,
2408 int search_pc)
2410 DisasContext dc1, *dc = &dc1;
2411 uint16_t *gen_opc_end;
2412 int j, lj;
2413 target_ulong pc_start;
2414 uint32_t next_page_start;
2416 /* generate intermediate code */
2417 pc_start = tb->pc;
2419 dc->tb = tb;
2421 gen_opc_ptr = gen_opc_buf;
2422 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2423 gen_opparam_ptr = gen_opparam_buf;
2425 dc->is_jmp = DISAS_NEXT;
2426 dc->pc = pc_start;
2427 dc->singlestep_enabled = env->singlestep_enabled;
2428 dc->condjmp = 0;
2429 dc->thumb = env->thumb;
2430 dc->is_mem = 0;
2431 #if !defined(CONFIG_USER_ONLY)
2432 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
2433 #endif
2434 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2435 nb_gen_labels = 0;
2436 lj = -1;
2437 do {
2438 if (env->nb_breakpoints > 0) {
2439 for(j = 0; j < env->nb_breakpoints; j++) {
2440 if (env->breakpoints[j] == dc->pc) {
2441 gen_op_movl_T0_im((long)dc->pc);
2442 gen_op_movl_reg_TN[0][15]();
2443 gen_op_debug();
2444 dc->is_jmp = DISAS_JUMP;
2445 break;
2449 if (search_pc) {
2450 j = gen_opc_ptr - gen_opc_buf;
2451 if (lj < j) {
2452 lj++;
2453 while (lj < j)
2454 gen_opc_instr_start[lj++] = 0;
2456 gen_opc_pc[lj] = dc->pc;
2457 gen_opc_instr_start[lj] = 1;
2460 if (env->thumb)
2461 disas_thumb_insn(dc);
2462 else
2463 disas_arm_insn(env, dc);
2465 if (dc->condjmp && !dc->is_jmp) {
2466 gen_set_label(dc->condlabel);
2467 dc->condjmp = 0;
2469 /* Terminate the TB on memory ops if watchpoints are present. */
2470 /* FIXME: This should be replacd by the deterministic execution
2471 * IRQ raising bits. */
2472 if (dc->is_mem && env->nb_watchpoints)
2473 break;
2475 /* Translation stops when a conditional branch is enoutered.
2476 * Otherwise the subsequent code could get translated several times.
2477 * Also stop translation when a page boundary is reached. This
2478 * ensures prefech aborts occur at the right place. */
2479 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2480 !env->singlestep_enabled &&
2481 dc->pc < next_page_start);
2482 /* At this stage dc->condjmp will only be set when the skipped
2483 * instruction was a conditional branch, and the PC has already been
2484 * written. */
2485 if (__builtin_expect(env->singlestep_enabled, 0)) {
2486 /* Make sure the pc is updated, and raise a debug exception. */
2487 if (dc->condjmp) {
2488 gen_op_debug();
2489 gen_set_label(dc->condlabel);
2491 if (dc->condjmp || !dc->is_jmp) {
2492 gen_op_movl_T0_im((long)dc->pc);
2493 gen_op_movl_reg_TN[0][15]();
2494 dc->condjmp = 0;
2496 gen_op_debug();
2497 } else {
2498 switch(dc->is_jmp) {
2499 case DISAS_NEXT:
2500 gen_goto_tb(dc, 1, dc->pc);
2501 break;
2502 default:
2503 case DISAS_JUMP:
2504 case DISAS_UPDATE:
2505 /* indicate that the hash table must be used to find the next TB */
2506 gen_op_movl_T0_0();
2507 gen_op_exit_tb();
2508 break;
2509 case DISAS_TB_JUMP:
2510 /* nothing more to generate */
2511 break;
2513 if (dc->condjmp) {
2514 gen_set_label(dc->condlabel);
2515 gen_goto_tb(dc, 1, dc->pc);
2516 dc->condjmp = 0;
2519 *gen_opc_ptr = INDEX_op_end;
2521 #ifdef DEBUG_DISAS
2522 if (loglevel & CPU_LOG_TB_IN_ASM) {
2523 fprintf(logfile, "----------------\n");
2524 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
2525 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2526 fprintf(logfile, "\n");
2527 if (loglevel & (CPU_LOG_TB_OP)) {
2528 fprintf(logfile, "OP:\n");
2529 dump_ops(gen_opc_buf, gen_opparam_buf);
2530 fprintf(logfile, "\n");
2533 #endif
2534 if (search_pc) {
2535 j = gen_opc_ptr - gen_opc_buf;
2536 lj++;
2537 while (lj <= j)
2538 gen_opc_instr_start[lj++] = 0;
2539 tb->size = 0;
2540 } else {
2541 tb->size = dc->pc - pc_start;
2543 return 0;
2546 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2548 return gen_intermediate_code_internal(env, tb, 0);
2551 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2553 return gen_intermediate_code_internal(env, tb, 1);
2556 static const char *cpu_mode_names[16] = {
2557 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
2558 "???", "???", "???", "und", "???", "???", "???", "sys"
2560 void cpu_dump_state(CPUState *env, FILE *f,
2561 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
2562 int flags)
2564 int i;
2565 union {
2566 uint32_t i;
2567 float s;
2568 } s0, s1;
2569 CPU_DoubleU d;
2570 /* ??? This assumes float64 and double have the same layout.
2571 Oh well, it's only debug dumps. */
2572 union {
2573 float64 f64;
2574 double d;
2575 } d0;
2576 uint32_t psr;
2578 for(i=0;i<16;i++) {
2579 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2580 if ((i % 4) == 3)
2581 cpu_fprintf(f, "\n");
2582 else
2583 cpu_fprintf(f, " ");
2585 psr = cpsr_read(env);
2586 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
2587 psr,
2588 psr & (1 << 31) ? 'N' : '-',
2589 psr & (1 << 30) ? 'Z' : '-',
2590 psr & (1 << 29) ? 'C' : '-',
2591 psr & (1 << 28) ? 'V' : '-',
2592 psr & CPSR_T ? 'T' : 'A',
2593 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
2595 for (i = 0; i < 16; i++) {
2596 d.d = env->vfp.regs[i];
2597 s0.i = d.l.lower;
2598 s1.i = d.l.upper;
2599 d0.f64 = d.d;
2600 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
2601 i * 2, (int)s0.i, s0.s,
2602 i * 2 + 1, (int)s1.i, s1.s,
2603 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
2604 d0.d);
2606 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);