ARM thumb fixes
[qemu/qemu_0_9_1_stable.git] / target-arm / translate.c
blob29657416f317fad63a7dfd111e334e7e38b49fc3
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
31 /* internal defines */
32 typedef struct DisasContext {
33 target_ulong pc;
34 int is_jmp;
35 /* Nonzero if this instruction has been conditionally skipped. */
36 int condjmp;
37 /* The label that will be jumped to when the instruction is skipped. */
38 int condlabel;
39 struct TranslationBlock *tb;
40 int singlestep_enabled;
41 int thumb;
42 } DisasContext;
44 #define DISAS_JUMP_NEXT 4
46 /* XXX: move that elsewhere */
47 static uint16_t *gen_opc_ptr;
48 static uint32_t *gen_opparam_ptr;
49 extern FILE *logfile;
50 extern int loglevel;
52 enum {
53 #define DEF(s, n, copy_size) INDEX_op_ ## s,
54 #include "opc.h"
55 #undef DEF
56 NB_OPS,
59 #include "gen-op.h"
61 static GenOpFunc1 *gen_test_cc[14] = {
62 gen_op_test_eq,
63 gen_op_test_ne,
64 gen_op_test_cs,
65 gen_op_test_cc,
66 gen_op_test_mi,
67 gen_op_test_pl,
68 gen_op_test_vs,
69 gen_op_test_vc,
70 gen_op_test_hi,
71 gen_op_test_ls,
72 gen_op_test_ge,
73 gen_op_test_lt,
74 gen_op_test_gt,
75 gen_op_test_le,
78 const uint8_t table_logic_cc[16] = {
79 1, /* and */
80 1, /* xor */
81 0, /* sub */
82 0, /* rsb */
83 0, /* add */
84 0, /* adc */
85 0, /* sbc */
86 0, /* rsc */
87 1, /* andl */
88 1, /* xorl */
89 0, /* cmp */
90 0, /* cmn */
91 1, /* orr */
92 1, /* mov */
93 1, /* bic */
94 1, /* mvn */
97 static GenOpFunc1 *gen_shift_T1_im[4] = {
98 gen_op_shll_T1_im,
99 gen_op_shrl_T1_im,
100 gen_op_sarl_T1_im,
101 gen_op_rorl_T1_im,
104 static GenOpFunc *gen_shift_T1_0[4] = {
105 NULL,
106 gen_op_shrl_T1_0,
107 gen_op_sarl_T1_0,
108 gen_op_rrxl_T1,
111 static GenOpFunc1 *gen_shift_T2_im[4] = {
112 gen_op_shll_T2_im,
113 gen_op_shrl_T2_im,
114 gen_op_sarl_T2_im,
115 gen_op_rorl_T2_im,
118 static GenOpFunc *gen_shift_T2_0[4] = {
119 NULL,
120 gen_op_shrl_T2_0,
121 gen_op_sarl_T2_0,
122 gen_op_rrxl_T2,
125 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
126 gen_op_shll_T1_im_cc,
127 gen_op_shrl_T1_im_cc,
128 gen_op_sarl_T1_im_cc,
129 gen_op_rorl_T1_im_cc,
132 static GenOpFunc *gen_shift_T1_0_cc[4] = {
133 NULL,
134 gen_op_shrl_T1_0_cc,
135 gen_op_sarl_T1_0_cc,
136 gen_op_rrxl_T1_cc,
139 static GenOpFunc *gen_shift_T1_T0[4] = {
140 gen_op_shll_T1_T0,
141 gen_op_shrl_T1_T0,
142 gen_op_sarl_T1_T0,
143 gen_op_rorl_T1_T0,
146 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
147 gen_op_shll_T1_T0_cc,
148 gen_op_shrl_T1_T0_cc,
149 gen_op_sarl_T1_T0_cc,
150 gen_op_rorl_T1_T0_cc,
153 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
155 gen_op_movl_T0_r0,
156 gen_op_movl_T0_r1,
157 gen_op_movl_T0_r2,
158 gen_op_movl_T0_r3,
159 gen_op_movl_T0_r4,
160 gen_op_movl_T0_r5,
161 gen_op_movl_T0_r6,
162 gen_op_movl_T0_r7,
163 gen_op_movl_T0_r8,
164 gen_op_movl_T0_r9,
165 gen_op_movl_T0_r10,
166 gen_op_movl_T0_r11,
167 gen_op_movl_T0_r12,
168 gen_op_movl_T0_r13,
169 gen_op_movl_T0_r14,
170 gen_op_movl_T0_r15,
173 gen_op_movl_T1_r0,
174 gen_op_movl_T1_r1,
175 gen_op_movl_T1_r2,
176 gen_op_movl_T1_r3,
177 gen_op_movl_T1_r4,
178 gen_op_movl_T1_r5,
179 gen_op_movl_T1_r6,
180 gen_op_movl_T1_r7,
181 gen_op_movl_T1_r8,
182 gen_op_movl_T1_r9,
183 gen_op_movl_T1_r10,
184 gen_op_movl_T1_r11,
185 gen_op_movl_T1_r12,
186 gen_op_movl_T1_r13,
187 gen_op_movl_T1_r14,
188 gen_op_movl_T1_r15,
191 gen_op_movl_T2_r0,
192 gen_op_movl_T2_r1,
193 gen_op_movl_T2_r2,
194 gen_op_movl_T2_r3,
195 gen_op_movl_T2_r4,
196 gen_op_movl_T2_r5,
197 gen_op_movl_T2_r6,
198 gen_op_movl_T2_r7,
199 gen_op_movl_T2_r8,
200 gen_op_movl_T2_r9,
201 gen_op_movl_T2_r10,
202 gen_op_movl_T2_r11,
203 gen_op_movl_T2_r12,
204 gen_op_movl_T2_r13,
205 gen_op_movl_T2_r14,
206 gen_op_movl_T2_r15,
210 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
212 gen_op_movl_r0_T0,
213 gen_op_movl_r1_T0,
214 gen_op_movl_r2_T0,
215 gen_op_movl_r3_T0,
216 gen_op_movl_r4_T0,
217 gen_op_movl_r5_T0,
218 gen_op_movl_r6_T0,
219 gen_op_movl_r7_T0,
220 gen_op_movl_r8_T0,
221 gen_op_movl_r9_T0,
222 gen_op_movl_r10_T0,
223 gen_op_movl_r11_T0,
224 gen_op_movl_r12_T0,
225 gen_op_movl_r13_T0,
226 gen_op_movl_r14_T0,
227 gen_op_movl_r15_T0,
230 gen_op_movl_r0_T1,
231 gen_op_movl_r1_T1,
232 gen_op_movl_r2_T1,
233 gen_op_movl_r3_T1,
234 gen_op_movl_r4_T1,
235 gen_op_movl_r5_T1,
236 gen_op_movl_r6_T1,
237 gen_op_movl_r7_T1,
238 gen_op_movl_r8_T1,
239 gen_op_movl_r9_T1,
240 gen_op_movl_r10_T1,
241 gen_op_movl_r11_T1,
242 gen_op_movl_r12_T1,
243 gen_op_movl_r13_T1,
244 gen_op_movl_r14_T1,
245 gen_op_movl_r15_T1,
249 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
250 gen_op_movl_T0_im,
251 gen_op_movl_T1_im,
252 gen_op_movl_T2_im,
255 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
256 gen_op_shll_T0_im_thumb,
257 gen_op_shrl_T0_im_thumb,
258 gen_op_sarl_T0_im_thumb,
261 static inline void gen_bx(DisasContext *s)
263 s->is_jmp = DISAS_UPDATE;
264 gen_op_bx_T0();
267 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
269 int val;
271 if (reg == 15) {
272 /* normaly, since we updated PC, we need only to add one insn */
273 if (s->thumb)
274 val = (long)s->pc + 2;
275 else
276 val = (long)s->pc + 4;
277 gen_op_movl_TN_im[t](val);
278 } else {
279 gen_op_movl_TN_reg[t][reg]();
283 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
285 gen_movl_TN_reg(s, reg, 0);
288 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
290 gen_movl_TN_reg(s, reg, 1);
293 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
295 gen_movl_TN_reg(s, reg, 2);
298 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
300 gen_op_movl_reg_TN[t][reg]();
301 if (reg == 15) {
302 s->is_jmp = DISAS_JUMP;
306 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
308 gen_movl_reg_TN(s, reg, 0);
311 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
313 gen_movl_reg_TN(s, reg, 1);
316 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
318 int val, rm, shift, shiftop;
320 if (!(insn & (1 << 25))) {
321 /* immediate */
322 val = insn & 0xfff;
323 if (!(insn & (1 << 23)))
324 val = -val;
325 if (val != 0)
326 gen_op_addl_T1_im(val);
327 } else {
328 /* shift/register */
329 rm = (insn) & 0xf;
330 shift = (insn >> 7) & 0x1f;
331 gen_movl_T2_reg(s, rm);
332 shiftop = (insn >> 5) & 3;
333 if (shift != 0) {
334 gen_shift_T2_im[shiftop](shift);
335 } else if (shiftop != 0) {
336 gen_shift_T2_0[shiftop]();
338 if (!(insn & (1 << 23)))
339 gen_op_subl_T1_T2();
340 else
341 gen_op_addl_T1_T2();
345 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn)
347 int val, rm;
349 if (insn & (1 << 22)) {
350 /* immediate */
351 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
352 if (!(insn & (1 << 23)))
353 val = -val;
354 if (val != 0)
355 gen_op_addl_T1_im(val);
356 } else {
357 /* register */
358 rm = (insn) & 0xf;
359 gen_movl_T2_reg(s, rm);
360 if (!(insn & (1 << 23)))
361 gen_op_subl_T1_T2();
362 else
363 gen_op_addl_T1_T2();
367 #define VFP_OP(name) \
368 static inline void gen_vfp_##name(int dp) \
370 if (dp) \
371 gen_op_vfp_##name##d(); \
372 else \
373 gen_op_vfp_##name##s(); \
376 VFP_OP(add)
377 VFP_OP(sub)
378 VFP_OP(mul)
379 VFP_OP(div)
380 VFP_OP(neg)
381 VFP_OP(abs)
382 VFP_OP(sqrt)
383 VFP_OP(cmp)
384 VFP_OP(cmpe)
385 VFP_OP(F1_ld0)
386 VFP_OP(uito)
387 VFP_OP(sito)
388 VFP_OP(toui)
389 VFP_OP(touiz)
390 VFP_OP(tosi)
391 VFP_OP(tosiz)
392 VFP_OP(ld)
393 VFP_OP(st)
395 #undef VFP_OP
397 static inline long
398 vfp_reg_offset (int dp, int reg)
400 if (dp)
401 return offsetof(CPUARMState, vfp.regs[reg]);
402 else if (reg & 1) {
403 return offsetof(CPUARMState, vfp.regs[reg >> 1])
404 + offsetof(CPU_DoubleU, l.upper);
405 } else {
406 return offsetof(CPUARMState, vfp.regs[reg >> 1])
407 + offsetof(CPU_DoubleU, l.lower);
410 static inline void gen_mov_F0_vreg(int dp, int reg)
412 if (dp)
413 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
414 else
415 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
418 static inline void gen_mov_F1_vreg(int dp, int reg)
420 if (dp)
421 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
422 else
423 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
426 static inline void gen_mov_vreg_F0(int dp, int reg)
428 if (dp)
429 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
430 else
431 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
434 /* Disassemble a VFP instruction. Returns nonzero if an error occured
435 (ie. an undefined instruction). */
436 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
438 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
439 int dp, veclen;
441 dp = ((insn & 0xf00) == 0xb00);
442 switch ((insn >> 24) & 0xf) {
443 case 0xe:
444 if (insn & (1 << 4)) {
445 /* single register transfer */
446 if ((insn & 0x6f) != 0x00)
447 return 1;
448 rd = (insn >> 12) & 0xf;
449 if (dp) {
450 if (insn & 0x80)
451 return 1;
452 rn = (insn >> 16) & 0xf;
453 /* Get the existing value even for arm->vfp moves because
454 we only set half the register. */
455 gen_mov_F0_vreg(1, rn);
456 gen_op_vfp_mrrd();
457 if (insn & (1 << 20)) {
458 /* vfp->arm */
459 if (insn & (1 << 21))
460 gen_movl_reg_T1(s, rd);
461 else
462 gen_movl_reg_T0(s, rd);
463 } else {
464 /* arm->vfp */
465 if (insn & (1 << 21))
466 gen_movl_T1_reg(s, rd);
467 else
468 gen_movl_T0_reg(s, rd);
469 gen_op_vfp_mdrr();
470 gen_mov_vreg_F0(dp, rn);
472 } else {
473 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
474 if (insn & (1 << 20)) {
475 /* vfp->arm */
476 if (insn & (1 << 21)) {
477 /* system register */
478 switch (rn) {
479 case 0: /* fpsid */
480 n = 0x0091A0000;
481 break;
482 case 2: /* fpscr */
483 if (rd == 15)
484 gen_op_vfp_movl_T0_fpscr_flags();
485 else
486 gen_op_vfp_movl_T0_fpscr();
487 break;
488 default:
489 return 1;
491 } else {
492 gen_mov_F0_vreg(0, rn);
493 gen_op_vfp_mrs();
495 if (rd == 15) {
496 /* This will only set the 4 flag bits */
497 gen_op_movl_psr_T0();
498 } else
499 gen_movl_reg_T0(s, rd);
500 } else {
501 /* arm->vfp */
502 gen_movl_T0_reg(s, rd);
503 if (insn & (1 << 21)) {
504 /* system register */
505 switch (rn) {
506 case 0: /* fpsid */
507 /* Writes are ignored. */
508 break;
509 case 2: /* fpscr */
510 gen_op_vfp_movl_fpscr_T0();
511 /* This could change vector settings, so jump to
512 the next instuction. */
513 gen_op_movl_T0_im(s->pc);
514 gen_movl_reg_T0(s, 15);
515 s->is_jmp = DISAS_UPDATE;
516 break;
517 default:
518 return 1;
520 } else {
521 gen_op_vfp_msr();
522 gen_mov_vreg_F0(0, rn);
526 } else {
527 /* data processing */
528 /* The opcode is in bits 23, 21, 20 and 6. */
529 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
530 if (dp) {
531 if (op == 15) {
532 /* rn is opcode */
533 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
534 } else {
535 /* rn is register number */
536 if (insn & (1 << 7))
537 return 1;
538 rn = (insn >> 16) & 0xf;
541 if (op == 15 && (rn == 15 || rn > 17)) {
542 /* Integer or single precision destination. */
543 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
544 } else {
545 if (insn & (1 << 22))
546 return 1;
547 rd = (insn >> 12) & 0xf;
550 if (op == 15 && (rn == 16 || rn == 17)) {
551 /* Integer source. */
552 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
553 } else {
554 if (insn & (1 << 5))
555 return 1;
556 rm = insn & 0xf;
558 } else {
559 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
560 if (op == 15 && rn == 15) {
561 /* Double precision destination. */
562 if (insn & (1 << 22))
563 return 1;
564 rd = (insn >> 12) & 0xf;
565 } else
566 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
567 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
570 veclen = env->vfp.vec_len;
571 if (op == 15 && rn > 3)
572 veclen = 0;
574 /* Shut up compiler warnings. */
575 delta_m = 0;
576 delta_d = 0;
577 bank_mask = 0;
579 if (veclen > 0) {
580 if (dp)
581 bank_mask = 0xc;
582 else
583 bank_mask = 0x18;
585 /* Figure out what type of vector operation this is. */
586 if ((rd & bank_mask) == 0) {
587 /* scalar */
588 veclen = 0;
589 } else {
590 if (dp)
591 delta_d = (env->vfp.vec_stride >> 1) + 1;
592 else
593 delta_d = env->vfp.vec_stride + 1;
595 if ((rm & bank_mask) == 0) {
596 /* mixed scalar/vector */
597 delta_m = 0;
598 } else {
599 /* vector */
600 delta_m = delta_d;
605 /* Load the initial operands. */
606 if (op == 15) {
607 switch (rn) {
608 case 16:
609 case 17:
610 /* Integer source */
611 gen_mov_F0_vreg(0, rm);
612 break;
613 case 8:
614 case 9:
615 /* Compare */
616 gen_mov_F0_vreg(dp, rd);
617 gen_mov_F1_vreg(dp, rm);
618 break;
619 case 10:
620 case 11:
621 /* Compare with zero */
622 gen_mov_F0_vreg(dp, rd);
623 gen_vfp_F1_ld0(dp);
624 break;
625 default:
626 /* One source operand. */
627 gen_mov_F0_vreg(dp, rm);
629 } else {
630 /* Two source operands. */
631 gen_mov_F0_vreg(dp, rn);
632 gen_mov_F1_vreg(dp, rm);
635 for (;;) {
636 /* Perform the calculation. */
637 switch (op) {
638 case 0: /* mac: fd + (fn * fm) */
639 gen_vfp_mul(dp);
640 gen_mov_F1_vreg(dp, rd);
641 gen_vfp_add(dp);
642 break;
643 case 1: /* nmac: fd - (fn * fm) */
644 gen_vfp_mul(dp);
645 gen_vfp_neg(dp);
646 gen_mov_F1_vreg(dp, rd);
647 gen_vfp_add(dp);
648 break;
649 case 2: /* msc: -fd + (fn * fm) */
650 gen_vfp_mul(dp);
651 gen_mov_F1_vreg(dp, rd);
652 gen_vfp_sub(dp);
653 break;
654 case 3: /* nmsc: -fd - (fn * fm) */
655 gen_vfp_mul(dp);
656 gen_mov_F1_vreg(dp, rd);
657 gen_vfp_add(dp);
658 gen_vfp_neg(dp);
659 break;
660 case 4: /* mul: fn * fm */
661 gen_vfp_mul(dp);
662 break;
663 case 5: /* nmul: -(fn * fm) */
664 gen_vfp_mul(dp);
665 gen_vfp_neg(dp);
666 break;
667 case 6: /* add: fn + fm */
668 gen_vfp_add(dp);
669 break;
670 case 7: /* sub: fn - fm */
671 gen_vfp_sub(dp);
672 break;
673 case 8: /* div: fn / fm */
674 gen_vfp_div(dp);
675 break;
676 case 15: /* extension space */
677 switch (rn) {
678 case 0: /* cpy */
679 /* no-op */
680 break;
681 case 1: /* abs */
682 gen_vfp_abs(dp);
683 break;
684 case 2: /* neg */
685 gen_vfp_neg(dp);
686 break;
687 case 3: /* sqrt */
688 gen_vfp_sqrt(dp);
689 break;
690 case 8: /* cmp */
691 gen_vfp_cmp(dp);
692 break;
693 case 9: /* cmpe */
694 gen_vfp_cmpe(dp);
695 break;
696 case 10: /* cmpz */
697 gen_vfp_cmp(dp);
698 break;
699 case 11: /* cmpez */
700 gen_vfp_F1_ld0(dp);
701 gen_vfp_cmpe(dp);
702 break;
703 case 15: /* single<->double conversion */
704 if (dp)
705 gen_op_vfp_fcvtsd();
706 else
707 gen_op_vfp_fcvtds();
708 break;
709 case 16: /* fuito */
710 gen_vfp_uito(dp);
711 break;
712 case 17: /* fsito */
713 gen_vfp_sito(dp);
714 break;
715 case 24: /* ftoui */
716 gen_vfp_toui(dp);
717 break;
718 case 25: /* ftouiz */
719 gen_vfp_touiz(dp);
720 break;
721 case 26: /* ftosi */
722 gen_vfp_tosi(dp);
723 break;
724 case 27: /* ftosiz */
725 gen_vfp_tosiz(dp);
726 break;
727 default: /* undefined */
728 printf ("rn:%d\n", rn);
729 return 1;
731 break;
732 default: /* undefined */
733 printf ("op:%d\n", op);
734 return 1;
737 /* Write back the result. */
738 if (op == 15 && (rn >= 8 && rn <= 11))
739 ; /* Comparison, do nothing. */
740 else if (op == 15 && rn > 17)
741 /* Integer result. */
742 gen_mov_vreg_F0(0, rd);
743 else if (op == 15 && rn == 15)
744 /* conversion */
745 gen_mov_vreg_F0(!dp, rd);
746 else
747 gen_mov_vreg_F0(dp, rd);
749 /* break out of the loop if we have finished */
750 if (veclen == 0)
751 break;
753 if (op == 15 && delta_m == 0) {
754 /* single source one-many */
755 while (veclen--) {
756 rd = ((rd + delta_d) & (bank_mask - 1))
757 | (rd & bank_mask);
758 gen_mov_vreg_F0(dp, rd);
760 break;
762 /* Setup the next operands. */
763 veclen--;
764 rd = ((rd + delta_d) & (bank_mask - 1))
765 | (rd & bank_mask);
767 if (op == 15) {
768 /* One source operand. */
769 rm = ((rm + delta_m) & (bank_mask - 1))
770 | (rm & bank_mask);
771 gen_mov_F0_vreg(dp, rm);
772 } else {
773 /* Two source operands. */
774 rn = ((rn + delta_d) & (bank_mask - 1))
775 | (rn & bank_mask);
776 gen_mov_F0_vreg(dp, rn);
777 if (delta_m) {
778 rm = ((rm + delta_m) & (bank_mask - 1))
779 | (rm & bank_mask);
780 gen_mov_F1_vreg(dp, rm);
785 break;
786 case 0xc:
787 case 0xd:
788 if (dp && (insn & (1 << 22))) {
789 /* two-register transfer */
790 rn = (insn >> 16) & 0xf;
791 rd = (insn >> 12) & 0xf;
792 if (dp) {
793 if (insn & (1 << 5))
794 return 1;
795 rm = insn & 0xf;
796 } else
797 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
799 if (insn & (1 << 20)) {
800 /* vfp->arm */
801 if (dp) {
802 gen_mov_F0_vreg(1, rm);
803 gen_op_vfp_mrrd();
804 gen_movl_reg_T0(s, rd);
805 gen_movl_reg_T1(s, rn);
806 } else {
807 gen_mov_F0_vreg(0, rm);
808 gen_op_vfp_mrs();
809 gen_movl_reg_T0(s, rn);
810 gen_mov_F0_vreg(0, rm + 1);
811 gen_op_vfp_mrs();
812 gen_movl_reg_T0(s, rd);
814 } else {
815 /* arm->vfp */
816 if (dp) {
817 gen_movl_T0_reg(s, rd);
818 gen_movl_T1_reg(s, rn);
819 gen_op_vfp_mdrr();
820 gen_mov_vreg_F0(1, rm);
821 } else {
822 gen_movl_T0_reg(s, rn);
823 gen_op_vfp_msr();
824 gen_mov_vreg_F0(0, rm);
825 gen_movl_T0_reg(s, rd);
826 gen_op_vfp_msr();
827 gen_mov_vreg_F0(0, rm + 1);
830 } else {
831 /* Load/store */
832 rn = (insn >> 16) & 0xf;
833 if (dp)
834 rd = (insn >> 12) & 0xf;
835 else
836 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
837 gen_movl_T1_reg(s, rn);
838 if ((insn & 0x01200000) == 0x01000000) {
839 /* Single load/store */
840 offset = (insn & 0xff) << 2;
841 if ((insn & (1 << 23)) == 0)
842 offset = -offset;
843 gen_op_addl_T1_im(offset);
844 if (insn & (1 << 20)) {
845 gen_vfp_ld(dp);
846 gen_mov_vreg_F0(dp, rd);
847 } else {
848 gen_mov_F0_vreg(dp, rd);
849 gen_vfp_st(dp);
851 } else {
852 /* load/store multiple */
853 if (dp)
854 n = (insn >> 1) & 0x7f;
855 else
856 n = insn & 0xff;
858 if (insn & (1 << 24)) /* pre-decrement */
859 gen_op_addl_T1_im(-((insn & 0xff) << 2));
861 if (dp)
862 offset = 8;
863 else
864 offset = 4;
865 for (i = 0; i < n; i++) {
866 if (insn & (1 << 20)) {
867 /* load */
868 gen_vfp_ld(dp);
869 gen_mov_vreg_F0(dp, rd + i);
870 } else {
871 /* store */
872 gen_mov_F0_vreg(dp, rd + i);
873 gen_vfp_st(dp);
875 gen_op_addl_T1_im(offset);
877 if (insn & (1 << 21)) {
878 /* writeback */
879 if (insn & (1 << 24))
880 offset = -offset * n;
881 else if (dp && (insn & 1))
882 offset = 4;
883 else
884 offset = 0;
886 if (offset != 0)
887 gen_op_addl_T1_im(offset);
888 gen_movl_reg_T1(s, rn);
892 break;
893 default:
894 /* Should never happen. */
895 return 1;
897 return 0;
900 static inline void gen_jmp (DisasContext *s, uint32_t dest)
902 if (__builtin_expect(s->singlestep_enabled, 0)) {
903 /* An indirect jump so that we still trigger the debug exception. */
904 if (s->thumb)
905 dest |= 1;
906 gen_op_movl_T0_im(dest);
907 gen_bx(s);
908 } else {
909 gen_op_jmp0((long)s->tb, dest);
910 s->is_jmp = DISAS_TB_JUMP;
914 static void disas_arm_insn(CPUState * env, DisasContext *s)
916 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
918 insn = ldl(s->pc);
919 s->pc += 4;
921 cond = insn >> 28;
922 if (cond == 0xf){
923 /* Unconditional instructions. */
924 if ((insn & 0x0d70f000) == 0x0550f000)
925 return; /* PLD */
926 else if ((insn & 0x0e000000) == 0x0a000000) {
927 /* branch link and change to thumb (blx <offset>) */
928 int32_t offset;
930 val = (uint32_t)s->pc;
931 gen_op_movl_T0_im(val);
932 gen_movl_reg_T0(s, 14);
933 /* Sign-extend the 24-bit offset */
934 offset = (((int32_t)insn) << 8) >> 8;
935 /* offset * 4 + bit24 * 2 + (thumb bit) */
936 val += (offset << 2) | ((insn >> 23) & 2) | 1;
937 /* pipeline offset */
938 val += 4;
939 gen_op_movl_T0_im(val);
940 gen_bx(s);
941 return;
942 } else if ((insn & 0x0fe00000) == 0x0c400000) {
943 /* Coprocessor double register transfer. */
944 } else if ((insn & 0x0f000010) == 0x0e000010) {
945 /* Additional coprocessor register transfer. */
947 goto illegal_op;
949 if (cond != 0xe) {
950 /* if not always execute, we generate a conditional jump to
951 next instruction */
952 s->condlabel = gen_new_label();
953 gen_test_cc[cond ^ 1](s->condlabel);
954 s->condjmp = 1;
955 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
956 //s->is_jmp = DISAS_JUMP_NEXT;
958 if ((insn & 0x0f900000) == 0x03000000) {
959 if ((insn & 0x0ff0f000) != 0x0360f000)
960 goto illegal_op;
961 /* CPSR = immediate */
962 val = insn & 0xff;
963 shift = ((insn >> 8) & 0xf) * 2;
964 if (shift)
965 val = (val >> shift) | (val << (32 - shift));
966 gen_op_movl_T0_im(val);
967 if (insn & (1 << 19))
968 gen_op_movl_psr_T0();
969 } else if ((insn & 0x0f900000) == 0x01000000
970 && (insn & 0x00000090) != 0x00000090) {
971 /* miscellaneous instructions */
972 op1 = (insn >> 21) & 3;
973 sh = (insn >> 4) & 0xf;
974 rm = insn & 0xf;
975 switch (sh) {
976 case 0x0: /* move program status register */
977 if (op1 & 2) {
978 /* SPSR not accessible in user mode */
979 goto illegal_op;
981 if (op1 & 1) {
982 /* CPSR = reg */
983 gen_movl_T0_reg(s, rm);
984 if (insn & (1 << 19))
985 gen_op_movl_psr_T0();
986 } else {
987 /* reg = CPSR */
988 rd = (insn >> 12) & 0xf;
989 gen_op_movl_T0_psr();
990 gen_movl_reg_T0(s, rd);
992 break;
993 case 0x1:
994 if (op1 == 1) {
995 /* branch/exchange thumb (bx). */
996 gen_movl_T0_reg(s, rm);
997 gen_bx(s);
998 } else if (op1 == 3) {
999 /* clz */
1000 rd = (insn >> 12) & 0xf;
1001 gen_movl_T0_reg(s, rm);
1002 gen_op_clz_T0();
1003 gen_movl_reg_T0(s, rd);
1004 } else {
1005 goto illegal_op;
1007 break;
1008 case 0x3:
1009 if (op1 != 1)
1010 goto illegal_op;
1012 /* branch link/exchange thumb (blx) */
1013 val = (uint32_t)s->pc;
1014 gen_op_movl_T0_im(val);
1015 gen_movl_reg_T0(s, 14);
1016 gen_movl_T0_reg(s, rm);
1017 gen_bx(s);
1018 break;
1019 case 0x5: /* saturating add/subtract */
1020 rd = (insn >> 12) & 0xf;
1021 rn = (insn >> 16) & 0xf;
1022 gen_movl_T0_reg(s, rn);
1023 if (op1 & 2) {
1024 gen_movl_T1_reg(s, rn);
1025 if (op1 & 1)
1026 gen_op_subl_T0_T1_saturate();
1027 else
1028 gen_op_addl_T0_T1_saturate();
1030 gen_movl_T1_reg(s, rm);
1031 if (op1 & 1)
1032 gen_op_subl_T0_T1_saturate();
1033 else
1034 gen_op_addl_T0_T1_saturate();
1035 gen_movl_reg_T0(s, rn);
1036 break;
1037 case 0x8: /* signed multiply */
1038 case 0xa:
1039 case 0xc:
1040 case 0xe:
1041 rs = (insn >> 8) & 0xf;
1042 rn = (insn >> 12) & 0xf;
1043 rd = (insn >> 16) & 0xf;
1044 if (op1 == 1) {
1045 /* (32 * 16) >> 16 */
1046 gen_movl_T0_reg(s, rm);
1047 gen_movl_T1_reg(s, rs);
1048 if (sh & 4)
1049 gen_op_sarl_T1_im(16);
1050 else
1051 gen_op_sxl_T1();
1052 gen_op_imulw_T0_T1();
1053 if ((sh & 2) == 0) {
1054 gen_movl_T1_reg(s, rn);
1055 gen_op_addl_T0_T1_setq();
1057 gen_movl_reg_T0(s, rd);
1058 } else {
1059 /* 16 * 16 */
1060 gen_movl_T0_reg(s, rm);
1061 if (sh & 2)
1062 gen_op_sarl_T0_im(16);
1063 else
1064 gen_op_sxl_T0();
1065 gen_movl_T1_reg(s, rs);
1066 if (sh & 4)
1067 gen_op_sarl_T1_im(16);
1068 else
1069 gen_op_sxl_T1();
1070 if (op1 == 2) {
1071 gen_op_imull_T0_T1();
1072 gen_op_addq_T0_T1(rn, rd);
1073 gen_movl_reg_T0(s, rn);
1074 gen_movl_reg_T1(s, rd);
1075 } else {
1076 gen_op_mul_T0_T1();
1077 if (op1 == 0) {
1078 gen_movl_T1_reg(s, rn);
1079 gen_op_addl_T0_T1_setq();
1081 gen_movl_reg_T0(s, rd);
1084 break;
1085 default:
1086 goto illegal_op;
1088 } else if (((insn & 0x0e000000) == 0 &&
1089 (insn & 0x00000090) != 0x90) ||
1090 ((insn & 0x0e000000) == (1 << 25))) {
1091 int set_cc, logic_cc, shiftop;
1093 op1 = (insn >> 21) & 0xf;
1094 set_cc = (insn >> 20) & 1;
1095 logic_cc = table_logic_cc[op1] & set_cc;
1097 /* data processing instruction */
1098 if (insn & (1 << 25)) {
1099 /* immediate operand */
1100 val = insn & 0xff;
1101 shift = ((insn >> 8) & 0xf) * 2;
1102 if (shift)
1103 val = (val >> shift) | (val << (32 - shift));
1104 gen_op_movl_T1_im(val);
1105 if (logic_cc && shift)
1106 gen_op_mov_CF_T1();
1107 } else {
1108 /* register */
1109 rm = (insn) & 0xf;
1110 gen_movl_T1_reg(s, rm);
1111 shiftop = (insn >> 5) & 3;
1112 if (!(insn & (1 << 4))) {
1113 shift = (insn >> 7) & 0x1f;
1114 if (shift != 0) {
1115 if (logic_cc) {
1116 gen_shift_T1_im_cc[shiftop](shift);
1117 } else {
1118 gen_shift_T1_im[shiftop](shift);
1120 } else if (shiftop != 0) {
1121 if (logic_cc) {
1122 gen_shift_T1_0_cc[shiftop]();
1123 } else {
1124 gen_shift_T1_0[shiftop]();
1127 } else {
1128 rs = (insn >> 8) & 0xf;
1129 gen_movl_T0_reg(s, rs);
1130 if (logic_cc) {
1131 gen_shift_T1_T0_cc[shiftop]();
1132 } else {
1133 gen_shift_T1_T0[shiftop]();
1137 if (op1 != 0x0f && op1 != 0x0d) {
1138 rn = (insn >> 16) & 0xf;
1139 gen_movl_T0_reg(s, rn);
1141 rd = (insn >> 12) & 0xf;
1142 switch(op1) {
1143 case 0x00:
1144 gen_op_andl_T0_T1();
1145 gen_movl_reg_T0(s, rd);
1146 if (logic_cc)
1147 gen_op_logic_T0_cc();
1148 break;
1149 case 0x01:
1150 gen_op_xorl_T0_T1();
1151 gen_movl_reg_T0(s, rd);
1152 if (logic_cc)
1153 gen_op_logic_T0_cc();
1154 break;
1155 case 0x02:
1156 if (set_cc)
1157 gen_op_subl_T0_T1_cc();
1158 else
1159 gen_op_subl_T0_T1();
1160 gen_movl_reg_T0(s, rd);
1161 break;
1162 case 0x03:
1163 if (set_cc)
1164 gen_op_rsbl_T0_T1_cc();
1165 else
1166 gen_op_rsbl_T0_T1();
1167 gen_movl_reg_T0(s, rd);
1168 break;
1169 case 0x04:
1170 if (set_cc)
1171 gen_op_addl_T0_T1_cc();
1172 else
1173 gen_op_addl_T0_T1();
1174 gen_movl_reg_T0(s, rd);
1175 break;
1176 case 0x05:
1177 if (set_cc)
1178 gen_op_adcl_T0_T1_cc();
1179 else
1180 gen_op_adcl_T0_T1();
1181 gen_movl_reg_T0(s, rd);
1182 break;
1183 case 0x06:
1184 if (set_cc)
1185 gen_op_sbcl_T0_T1_cc();
1186 else
1187 gen_op_sbcl_T0_T1();
1188 gen_movl_reg_T0(s, rd);
1189 break;
1190 case 0x07:
1191 if (set_cc)
1192 gen_op_rscl_T0_T1_cc();
1193 else
1194 gen_op_rscl_T0_T1();
1195 gen_movl_reg_T0(s, rd);
1196 break;
1197 case 0x08:
1198 if (set_cc) {
1199 gen_op_andl_T0_T1();
1200 gen_op_logic_T0_cc();
1202 break;
1203 case 0x09:
1204 if (set_cc) {
1205 gen_op_xorl_T0_T1();
1206 gen_op_logic_T0_cc();
1208 break;
1209 case 0x0a:
1210 if (set_cc) {
1211 gen_op_subl_T0_T1_cc();
1213 break;
1214 case 0x0b:
1215 if (set_cc) {
1216 gen_op_addl_T0_T1_cc();
1218 break;
1219 case 0x0c:
1220 gen_op_orl_T0_T1();
1221 gen_movl_reg_T0(s, rd);
1222 if (logic_cc)
1223 gen_op_logic_T0_cc();
1224 break;
1225 case 0x0d:
1226 gen_movl_reg_T1(s, rd);
1227 if (logic_cc)
1228 gen_op_logic_T1_cc();
1229 break;
1230 case 0x0e:
1231 gen_op_bicl_T0_T1();
1232 gen_movl_reg_T0(s, rd);
1233 if (logic_cc)
1234 gen_op_logic_T0_cc();
1235 break;
1236 default:
1237 case 0x0f:
1238 gen_op_notl_T1();
1239 gen_movl_reg_T1(s, rd);
1240 if (logic_cc)
1241 gen_op_logic_T1_cc();
1242 break;
1244 } else {
1245 /* other instructions */
1246 op1 = (insn >> 24) & 0xf;
1247 switch(op1) {
1248 case 0x0:
1249 case 0x1:
1250 /* multiplies, extra load/stores */
1251 sh = (insn >> 5) & 3;
1252 if (sh == 0) {
1253 if (op1 == 0x0) {
1254 rd = (insn >> 16) & 0xf;
1255 rn = (insn >> 12) & 0xf;
1256 rs = (insn >> 8) & 0xf;
1257 rm = (insn) & 0xf;
1258 if (((insn >> 22) & 3) == 0) {
1259 /* 32 bit mul */
1260 gen_movl_T0_reg(s, rs);
1261 gen_movl_T1_reg(s, rm);
1262 gen_op_mul_T0_T1();
1263 if (insn & (1 << 21)) {
1264 gen_movl_T1_reg(s, rn);
1265 gen_op_addl_T0_T1();
1267 if (insn & (1 << 20))
1268 gen_op_logic_T0_cc();
1269 gen_movl_reg_T0(s, rd);
1270 } else {
1271 /* 64 bit mul */
1272 gen_movl_T0_reg(s, rs);
1273 gen_movl_T1_reg(s, rm);
1274 if (insn & (1 << 22))
1275 gen_op_imull_T0_T1();
1276 else
1277 gen_op_mull_T0_T1();
1278 if (insn & (1 << 21)) /* mult accumulate */
1279 gen_op_addq_T0_T1(rn, rd);
1280 if (!(insn & (1 << 23))) { /* double accumulate */
1281 gen_op_addq_lo_T0_T1(rn);
1282 gen_op_addq_lo_T0_T1(rd);
1284 if (insn & (1 << 20))
1285 gen_op_logicq_cc();
1286 gen_movl_reg_T0(s, rn);
1287 gen_movl_reg_T1(s, rd);
1289 } else {
1290 rn = (insn >> 16) & 0xf;
1291 rd = (insn >> 12) & 0xf;
1292 if (insn & (1 << 23)) {
1293 /* load/store exclusive */
1294 goto illegal_op;
1295 } else {
1296 /* SWP instruction */
1297 rm = (insn) & 0xf;
1299 gen_movl_T0_reg(s, rm);
1300 gen_movl_T1_reg(s, rn);
1301 if (insn & (1 << 22)) {
1302 gen_op_swpb_T0_T1();
1303 } else {
1304 gen_op_swpl_T0_T1();
1306 gen_movl_reg_T0(s, rd);
1309 } else {
1310 /* Misc load/store */
1311 rn = (insn >> 16) & 0xf;
1312 rd = (insn >> 12) & 0xf;
1313 gen_movl_T1_reg(s, rn);
1314 if (insn & (1 << 24))
1315 gen_add_datah_offset(s, insn);
1316 if (insn & (1 << 20)) {
1317 /* load */
1318 switch(sh) {
1319 case 1:
1320 gen_op_lduw_T0_T1();
1321 break;
1322 case 2:
1323 gen_op_ldsb_T0_T1();
1324 break;
1325 default:
1326 case 3:
1327 gen_op_ldsw_T0_T1();
1328 break;
1330 gen_movl_reg_T0(s, rd);
1331 } else if (sh & 2) {
1332 /* doubleword */
1333 if (sh & 1) {
1334 /* store */
1335 gen_movl_T0_reg(s, rd);
1336 gen_op_stl_T0_T1();
1337 gen_op_addl_T1_im(4);
1338 gen_movl_T0_reg(s, rd + 1);
1339 gen_op_stl_T0_T1();
1340 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1341 gen_op_addl_T1_im(-4);
1342 } else {
1343 /* load */
1344 gen_op_ldl_T0_T1();
1345 gen_movl_reg_T0(s, rd);
1346 gen_op_addl_T1_im(4);
1347 gen_op_ldl_T0_T1();
1348 gen_movl_reg_T0(s, rd + 1);
1349 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1350 gen_op_addl_T1_im(-4);
1352 } else {
1353 /* store */
1354 gen_movl_T0_reg(s, rd);
1355 gen_op_stw_T0_T1();
1357 if (!(insn & (1 << 24))) {
1358 gen_add_datah_offset(s, insn);
1359 gen_movl_reg_T1(s, rn);
1360 } else if (insn & (1 << 21)) {
1361 gen_movl_reg_T1(s, rn);
1364 break;
1365 case 0x4:
1366 case 0x5:
1367 case 0x6:
1368 case 0x7:
1369 /* load/store byte/word */
1370 rn = (insn >> 16) & 0xf;
1371 rd = (insn >> 12) & 0xf;
1372 gen_movl_T1_reg(s, rn);
1373 if (insn & (1 << 24))
1374 gen_add_data_offset(s, insn);
1375 if (insn & (1 << 20)) {
1376 /* load */
1377 if (insn & (1 << 22))
1378 gen_op_ldub_T0_T1();
1379 else
1380 gen_op_ldl_T0_T1();
1381 if (rd == 15)
1382 gen_bx(s);
1383 else
1384 gen_movl_reg_T0(s, rd);
1385 } else {
1386 /* store */
1387 gen_movl_T0_reg(s, rd);
1388 if (insn & (1 << 22))
1389 gen_op_stb_T0_T1();
1390 else
1391 gen_op_stl_T0_T1();
1393 if (!(insn & (1 << 24))) {
1394 gen_add_data_offset(s, insn);
1395 gen_movl_reg_T1(s, rn);
1396 } else if (insn & (1 << 21))
1397 gen_movl_reg_T1(s, rn); {
1399 break;
1400 case 0x08:
1401 case 0x09:
1403 int j, n;
1404 /* load/store multiple words */
1405 /* XXX: store correct base if write back */
1406 if (insn & (1 << 22))
1407 goto illegal_op; /* only usable in supervisor mode */
1408 rn = (insn >> 16) & 0xf;
1409 gen_movl_T1_reg(s, rn);
1411 /* compute total size */
1412 n = 0;
1413 for(i=0;i<16;i++) {
1414 if (insn & (1 << i))
1415 n++;
1417 /* XXX: test invalid n == 0 case ? */
1418 if (insn & (1 << 23)) {
1419 if (insn & (1 << 24)) {
1420 /* pre increment */
1421 gen_op_addl_T1_im(4);
1422 } else {
1423 /* post increment */
1425 } else {
1426 if (insn & (1 << 24)) {
1427 /* pre decrement */
1428 gen_op_addl_T1_im(-(n * 4));
1429 } else {
1430 /* post decrement */
1431 if (n != 1)
1432 gen_op_addl_T1_im(-((n - 1) * 4));
1435 j = 0;
1436 for(i=0;i<16;i++) {
1437 if (insn & (1 << i)) {
1438 if (insn & (1 << 20)) {
1439 /* load */
1440 gen_op_ldl_T0_T1();
1441 if (i == 15)
1442 gen_bx(s);
1443 else
1444 gen_movl_reg_T0(s, i);
1445 } else {
1446 /* store */
1447 if (i == 15) {
1448 /* special case: r15 = PC + 12 */
1449 val = (long)s->pc + 8;
1450 gen_op_movl_TN_im[0](val);
1451 } else {
1452 gen_movl_T0_reg(s, i);
1454 gen_op_stl_T0_T1();
1456 j++;
1457 /* no need to add after the last transfer */
1458 if (j != n)
1459 gen_op_addl_T1_im(4);
1462 if (insn & (1 << 21)) {
1463 /* write back */
1464 if (insn & (1 << 23)) {
1465 if (insn & (1 << 24)) {
1466 /* pre increment */
1467 } else {
1468 /* post increment */
1469 gen_op_addl_T1_im(4);
1471 } else {
1472 if (insn & (1 << 24)) {
1473 /* pre decrement */
1474 if (n != 1)
1475 gen_op_addl_T1_im(-((n - 1) * 4));
1476 } else {
1477 /* post decrement */
1478 gen_op_addl_T1_im(-(n * 4));
1481 gen_movl_reg_T1(s, rn);
1484 break;
1485 case 0xa:
1486 case 0xb:
1488 int32_t offset;
1490 /* branch (and link) */
1491 val = (int32_t)s->pc;
1492 if (insn & (1 << 24)) {
1493 gen_op_movl_T0_im(val);
1494 gen_op_movl_reg_TN[0][14]();
1496 offset = (((int32_t)insn << 8) >> 8);
1497 val += (offset << 2) + 4;
1498 gen_jmp(s, val);
1500 break;
1501 case 0xc:
1502 case 0xd:
1503 case 0xe:
1504 /* Coprocessor. */
1505 op1 = (insn >> 8) & 0xf;
1506 switch (op1) {
1507 case 10:
1508 case 11:
1509 if (disas_vfp_insn (env, s, insn))
1510 goto illegal_op;
1511 break;
1512 default:
1513 /* unknown coprocessor. */
1514 goto illegal_op;
1516 break;
1517 case 0xf:
1518 /* swi */
1519 gen_op_movl_T0_im((long)s->pc);
1520 gen_op_movl_reg_TN[0][15]();
1521 gen_op_swi();
1522 s->is_jmp = DISAS_JUMP;
1523 break;
1524 default:
1525 illegal_op:
1526 gen_op_movl_T0_im((long)s->pc - 4);
1527 gen_op_movl_reg_TN[0][15]();
1528 gen_op_undef_insn();
1529 s->is_jmp = DISAS_JUMP;
1530 break;
1535 static void disas_thumb_insn(DisasContext *s)
1537 uint32_t val, insn, op, rm, rn, rd, shift, cond;
1538 int32_t offset;
1539 int i;
1541 insn = lduw(s->pc);
1542 s->pc += 2;
1544 switch (insn >> 12) {
1545 case 0: case 1:
1546 rd = insn & 7;
1547 op = (insn >> 11) & 3;
1548 if (op == 3) {
1549 /* add/subtract */
1550 rn = (insn >> 3) & 7;
1551 gen_movl_T0_reg(s, rn);
1552 if (insn & (1 << 10)) {
1553 /* immediate */
1554 gen_op_movl_T1_im((insn >> 6) & 7);
1555 } else {
1556 /* reg */
1557 rm = (insn >> 6) & 7;
1558 gen_movl_T1_reg(s, rm);
1560 if (insn & (1 << 9))
1561 gen_op_subl_T0_T1_cc();
1562 else
1563 gen_op_addl_T0_T1_cc();
1564 gen_movl_reg_T0(s, rd);
1565 } else {
1566 /* shift immediate */
1567 rm = (insn >> 3) & 7;
1568 shift = (insn >> 6) & 0x1f;
1569 gen_movl_T0_reg(s, rm);
1570 gen_shift_T0_im_thumb[op](shift);
1571 gen_movl_reg_T0(s, rd);
1573 break;
1574 case 2: case 3:
1575 /* arithmetic large immediate */
1576 op = (insn >> 11) & 3;
1577 rd = (insn >> 8) & 0x7;
1578 if (op == 0) {
1579 gen_op_movl_T0_im(insn & 0xff);
1580 } else {
1581 gen_movl_T0_reg(s, rd);
1582 gen_op_movl_T1_im(insn & 0xff);
1584 switch (op) {
1585 case 0: /* mov */
1586 gen_op_logic_T0_cc();
1587 break;
1588 case 1: /* cmp */
1589 gen_op_subl_T0_T1_cc();
1590 break;
1591 case 2: /* add */
1592 gen_op_addl_T0_T1_cc();
1593 break;
1594 case 3: /* sub */
1595 gen_op_subl_T0_T1_cc();
1596 break;
1598 if (op != 1)
1599 gen_movl_reg_T0(s, rd);
1600 break;
1601 case 4:
1602 if (insn & (1 << 11)) {
1603 rd = (insn >> 8) & 7;
1604 /* load pc-relative. Bit 1 of PC is ignored. */
1605 val = s->pc + 2 + ((insn & 0xff) * 4);
1606 val &= ~(uint32_t)2;
1607 gen_op_movl_T1_im(val);
1608 gen_op_ldl_T0_T1();
1609 gen_movl_reg_T0(s, rd);
1610 break;
1612 if (insn & (1 << 10)) {
1613 /* data processing extended or blx */
1614 rd = (insn & 7) | ((insn >> 4) & 8);
1615 rm = (insn >> 3) & 0xf;
1616 op = (insn >> 8) & 3;
1617 switch (op) {
1618 case 0: /* add */
1619 gen_movl_T0_reg(s, rd);
1620 gen_movl_T1_reg(s, rm);
1621 gen_op_addl_T0_T1();
1622 gen_movl_reg_T0(s, rd);
1623 break;
1624 case 1: /* cmp */
1625 gen_movl_T0_reg(s, rd);
1626 gen_movl_T1_reg(s, rm);
1627 gen_op_subl_T0_T1_cc();
1628 break;
1629 case 2: /* mov/cpy */
1630 gen_movl_T0_reg(s, rm);
1631 gen_movl_reg_T0(s, rd);
1632 break;
1633 case 3:/* branch [and link] exchange thumb register */
1634 if (insn & (1 << 7)) {
1635 val = (uint32_t)s->pc | 1;
1636 gen_op_movl_T1_im(val);
1637 gen_movl_reg_T1(s, 14);
1639 gen_movl_T0_reg(s, rm);
1640 gen_bx(s);
1641 break;
1643 break;
1646 /* data processing register */
1647 rd = insn & 7;
1648 rm = (insn >> 3) & 7;
1649 op = (insn >> 6) & 0xf;
1650 if (op == 2 || op == 3 || op == 4 || op == 7) {
1651 /* the shift/rotate ops want the operands backwards */
1652 val = rm;
1653 rm = rd;
1654 rd = val;
1655 val = 1;
1656 } else {
1657 val = 0;
1660 if (op == 9) /* neg */
1661 gen_op_movl_T0_im(0);
1662 else if (op != 0xf) /* mvn doesn't read its first operand */
1663 gen_movl_T0_reg(s, rd);
1665 gen_movl_T1_reg(s, rm);
1666 switch (op) {
1667 case 0x0: /* and */
1668 gen_op_andl_T0_T1();
1669 gen_op_logic_T0_cc();
1670 break;
1671 case 0x1: /* eor */
1672 gen_op_xorl_T0_T1();
1673 gen_op_logic_T0_cc();
1674 break;
1675 case 0x2: /* lsl */
1676 gen_op_shll_T1_T0_cc();
1677 break;
1678 case 0x3: /* lsr */
1679 gen_op_shrl_T1_T0_cc();
1680 break;
1681 case 0x4: /* asr */
1682 gen_op_sarl_T1_T0_cc();
1683 break;
1684 case 0x5: /* adc */
1685 gen_op_adcl_T0_T1_cc();
1686 break;
1687 case 0x6: /* sbc */
1688 gen_op_sbcl_T0_T1_cc();
1689 break;
1690 case 0x7: /* ror */
1691 gen_op_rorl_T1_T0_cc();
1692 break;
1693 case 0x8: /* tst */
1694 gen_op_andl_T0_T1();
1695 gen_op_logic_T0_cc();
1696 rd = 16;
1697 break;
1698 case 0x9: /* neg */
1699 gen_op_subl_T0_T1_cc();
1700 break;
1701 case 0xa: /* cmp */
1702 gen_op_subl_T0_T1_cc();
1703 rd = 16;
1704 break;
1705 case 0xb: /* cmn */
1706 gen_op_addl_T0_T1_cc();
1707 rd = 16;
1708 break;
1709 case 0xc: /* orr */
1710 gen_op_orl_T0_T1();
1711 gen_op_logic_T0_cc();
1712 break;
1713 case 0xd: /* mul */
1714 gen_op_mull_T0_T1();
1715 gen_op_logic_T0_cc();
1716 break;
1717 case 0xe: /* bic */
1718 gen_op_bicl_T0_T1();
1719 gen_op_logic_T0_cc();
1720 break;
1721 case 0xf: /* mvn */
1722 gen_op_notl_T1();
1723 gen_op_logic_T1_cc();
1724 val = 1;
1725 rm = rd;
1726 break;
1728 if (rd != 16) {
1729 if (val)
1730 gen_movl_reg_T1(s, rm);
1731 else
1732 gen_movl_reg_T0(s, rd);
1734 break;
1736 case 5:
1737 /* load/store register offset. */
1738 rd = insn & 7;
1739 rn = (insn >> 3) & 7;
1740 rm = (insn >> 6) & 7;
1741 op = (insn >> 9) & 7;
1742 gen_movl_T1_reg(s, rn);
1743 gen_movl_T2_reg(s, rm);
1744 gen_op_addl_T1_T2();
1746 if (op < 3) /* store */
1747 gen_movl_T0_reg(s, rd);
1749 switch (op) {
1750 case 0: /* str */
1751 gen_op_stl_T0_T1();
1752 break;
1753 case 1: /* strh */
1754 gen_op_stw_T0_T1();
1755 break;
1756 case 2: /* strb */
1757 gen_op_stb_T0_T1();
1758 break;
1759 case 3: /* ldrsb */
1760 gen_op_ldsb_T0_T1();
1761 break;
1762 case 4: /* ldr */
1763 gen_op_ldl_T0_T1();
1764 break;
1765 case 5: /* ldrh */
1766 gen_op_lduw_T0_T1();
1767 break;
1768 case 6: /* ldrb */
1769 gen_op_ldub_T0_T1();
1770 break;
1771 case 7: /* ldrsh */
1772 gen_op_ldsw_T0_T1();
1773 break;
1775 if (op >= 3) /* load */
1776 gen_movl_reg_T0(s, rd);
1777 break;
1779 case 6:
1780 /* load/store word immediate offset */
1781 rd = insn & 7;
1782 rn = (insn >> 3) & 7;
1783 gen_movl_T1_reg(s, rn);
1784 val = (insn >> 4) & 0x7c;
1785 gen_op_movl_T2_im(val);
1786 gen_op_addl_T1_T2();
1788 if (insn & (1 << 11)) {
1789 /* load */
1790 gen_op_ldl_T0_T1();
1791 gen_movl_reg_T0(s, rd);
1792 } else {
1793 /* store */
1794 gen_movl_T0_reg(s, rd);
1795 gen_op_stl_T0_T1();
1797 break;
1799 case 7:
1800 /* load/store byte immediate offset */
1801 rd = insn & 7;
1802 rn = (insn >> 3) & 7;
1803 gen_movl_T1_reg(s, rn);
1804 val = (insn >> 6) & 0x1f;
1805 gen_op_movl_T2_im(val);
1806 gen_op_addl_T1_T2();
1808 if (insn & (1 << 11)) {
1809 /* load */
1810 gen_op_ldub_T0_T1();
1811 gen_movl_reg_T0(s, rd);
1812 } else {
1813 /* store */
1814 gen_movl_T0_reg(s, rd);
1815 gen_op_stb_T0_T1();
1817 break;
1819 case 8:
1820 /* load/store halfword immediate offset */
1821 rd = insn & 7;
1822 rn = (insn >> 3) & 7;
1823 gen_movl_T1_reg(s, rn);
1824 val = (insn >> 5) & 0x3e;
1825 gen_op_movl_T2_im(val);
1826 gen_op_addl_T1_T2();
1828 if (insn & (1 << 11)) {
1829 /* load */
1830 gen_op_lduw_T0_T1();
1831 gen_movl_reg_T0(s, rd);
1832 } else {
1833 /* store */
1834 gen_movl_T0_reg(s, rd);
1835 gen_op_stw_T0_T1();
1837 break;
1839 case 9:
1840 /* load/store from stack */
1841 rd = (insn >> 8) & 7;
1842 gen_movl_T1_reg(s, 13);
1843 val = (insn & 0xff) * 4;
1844 gen_op_movl_T2_im(val);
1845 gen_op_addl_T1_T2();
1847 if (insn & (1 << 11)) {
1848 /* load */
1849 gen_op_ldl_T0_T1();
1850 gen_movl_reg_T0(s, rd);
1851 } else {
1852 /* store */
1853 gen_movl_T0_reg(s, rd);
1854 gen_op_stl_T0_T1();
1856 break;
1858 case 10:
1859 /* add to high reg */
1860 rd = (insn >> 8) & 7;
1861 if (insn & (1 << 11)) {
1862 /* SP */
1863 gen_movl_T0_reg(s, 13);
1864 } else {
1865 /* PC. bit 1 is ignored. */
1866 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
1868 val = (insn & 0xff) * 4;
1869 gen_op_movl_T1_im(val);
1870 gen_op_addl_T0_T1();
1871 gen_movl_reg_T0(s, rd);
1872 break;
1874 case 11:
1875 /* misc */
1876 op = (insn >> 8) & 0xf;
1877 switch (op) {
1878 case 0:
1879 /* adjust stack pointer */
1880 gen_movl_T1_reg(s, 13);
1881 val = (insn & 0x7f) * 4;
1882 if (insn & (1 << 7))
1883 val = -(int32_t)val;
1884 gen_op_movl_T2_im(val);
1885 gen_op_addl_T1_T2();
1886 gen_movl_reg_T1(s, 13);
1887 break;
1889 case 4: case 5: case 0xc: case 0xd:
1890 /* push/pop */
1891 gen_movl_T1_reg(s, 13);
1892 if (insn & (1 << 8))
1893 offset = 4;
1894 else
1895 offset = 0;
1896 for (i = 0; i < 8; i++) {
1897 if (insn & (1 << i))
1898 offset += 4;
1900 if ((insn & (1 << 11)) == 0) {
1901 gen_op_movl_T2_im(-offset);
1902 gen_op_addl_T1_T2();
1904 gen_op_movl_T2_im(4);
1905 for (i = 0; i < 8; i++) {
1906 if (insn & (1 << i)) {
1907 if (insn & (1 << 11)) {
1908 /* pop */
1909 gen_op_ldl_T0_T1();
1910 gen_movl_reg_T0(s, i);
1911 } else {
1912 /* push */
1913 gen_movl_T0_reg(s, i);
1914 gen_op_stl_T0_T1();
1916 /* advance to the next address. */
1917 gen_op_addl_T1_T2();
1920 if (insn & (1 << 8)) {
1921 if (insn & (1 << 11)) {
1922 /* pop pc */
1923 gen_op_ldl_T0_T1();
1924 /* don't set the pc until the rest of the instruction
1925 has completed */
1926 } else {
1927 /* push lr */
1928 gen_movl_T0_reg(s, 14);
1929 gen_op_stl_T0_T1();
1931 gen_op_addl_T1_T2();
1933 if ((insn & (1 << 11)) == 0) {
1934 gen_op_movl_T2_im(-offset);
1935 gen_op_addl_T1_T2();
1937 /* write back the new stack pointer */
1938 gen_movl_reg_T1(s, 13);
1939 /* set the new PC value */
1940 if ((insn & 0x0900) == 0x0900)
1941 gen_bx(s);
1942 break;
1944 default:
1945 goto undef;
1947 break;
1949 case 12:
1950 /* load/store multiple */
1951 rn = (insn >> 8) & 0x7;
1952 gen_movl_T1_reg(s, rn);
1953 gen_op_movl_T2_im(4);
1954 for (i = 0; i < 8; i++) {
1955 if (insn & (1 << i)) {
1956 if (insn & (1 << 11)) {
1957 /* load */
1958 gen_op_ldl_T0_T1();
1959 gen_movl_reg_T0(s, i);
1960 } else {
1961 /* store */
1962 gen_movl_T0_reg(s, i);
1963 gen_op_stl_T0_T1();
1965 /* advance to the next address */
1966 gen_op_addl_T1_T2();
1969 /* Base register writeback. */
1970 gen_movl_reg_T1(s, rn);
1971 break;
1973 case 13:
1974 /* conditional branch or swi */
1975 cond = (insn >> 8) & 0xf;
1976 if (cond == 0xe)
1977 goto undef;
1979 if (cond == 0xf) {
1980 /* swi */
1981 gen_op_movl_T0_im((long)s->pc | 1);
1982 /* Don't set r15. */
1983 gen_op_movl_reg_TN[0][15]();
1984 gen_op_swi();
1985 s->is_jmp = DISAS_JUMP;
1986 break;
1988 /* generate a conditional jump to next instruction */
1989 s->condlabel = gen_new_label();
1990 gen_test_cc[cond ^ 1](s->condlabel);
1991 s->condjmp = 1;
1992 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1993 //s->is_jmp = DISAS_JUMP_NEXT;
1994 gen_movl_T1_reg(s, 15);
1996 /* jump to the offset */
1997 val = (uint32_t)s->pc + 2;
1998 offset = ((int32_t)insn << 24) >> 24;
1999 val += offset << 1;
2000 gen_jmp(s, val);
2001 break;
2003 case 14:
2004 /* unconditional branch */
2005 if (insn & (1 << 11))
2006 goto undef; /* Second half of a blx */
2007 val = (uint32_t)s->pc;
2008 offset = ((int32_t)insn << 21) >> 21;
2009 val += (offset << 1) + 2;
2010 gen_jmp(s, val);
2011 break;
2013 case 15:
2014 /* branch and link [and switch to arm] */
2015 offset = ((int32_t)insn << 21) >> 10;
2016 insn = lduw(s->pc);
2017 offset |= insn & 0x7ff;
2019 val = (uint32_t)s->pc + 2;
2020 gen_op_movl_T1_im(val | 1);
2021 gen_movl_reg_T1(s, 14);
2023 val += offset << 1;
2024 if (insn & (1 << 11)) {
2025 /* bl */
2026 gen_jmp(s, val);
2027 } else {
2028 /* blx */
2029 val &= ~(uint32_t)2;
2030 gen_op_movl_T0_im(val);
2031 gen_bx(s);
2034 return;
2035 undef:
2036 gen_op_movl_T0_im((long)s->pc - 2);
2037 gen_op_movl_reg_TN[0][15]();
2038 gen_op_undef_insn();
2039 s->is_jmp = DISAS_JUMP;
2042 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2043 basic block 'tb'. If search_pc is TRUE, also generate PC
2044 information for each intermediate instruction. */
2045 static inline int gen_intermediate_code_internal(CPUState *env,
2046 TranslationBlock *tb,
2047 int search_pc)
2049 DisasContext dc1, *dc = &dc1;
2050 uint16_t *gen_opc_end;
2051 int j, lj;
2052 target_ulong pc_start;
2054 /* generate intermediate code */
2055 pc_start = tb->pc;
2057 dc->tb = tb;
2059 gen_opc_ptr = gen_opc_buf;
2060 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2061 gen_opparam_ptr = gen_opparam_buf;
2063 dc->is_jmp = DISAS_NEXT;
2064 dc->pc = pc_start;
2065 dc->singlestep_enabled = env->singlestep_enabled;
2066 dc->condjmp = 0;
2067 dc->thumb = env->thumb;
2068 nb_gen_labels = 0;
2069 lj = -1;
2070 do {
2071 if (env->nb_breakpoints > 0) {
2072 for(j = 0; j < env->nb_breakpoints; j++) {
2073 if (env->breakpoints[j] == dc->pc) {
2074 gen_op_movl_T0_im((long)dc->pc);
2075 gen_op_movl_reg_TN[0][15]();
2076 gen_op_debug();
2077 dc->is_jmp = DISAS_JUMP;
2078 break;
2082 if (search_pc) {
2083 j = gen_opc_ptr - gen_opc_buf;
2084 if (lj < j) {
2085 lj++;
2086 while (lj < j)
2087 gen_opc_instr_start[lj++] = 0;
2089 gen_opc_pc[lj] = dc->pc;
2090 gen_opc_instr_start[lj] = 1;
2093 if (env->thumb)
2094 disas_thumb_insn(dc);
2095 else
2096 disas_arm_insn(env, dc);
2098 if (dc->condjmp && !dc->is_jmp) {
2099 gen_set_label(dc->condlabel);
2100 dc->condjmp = 0;
2102 /* Translation stops when a conditional branch is enoutered.
2103 * Otherwise the subsequent code could get translated several times.
2105 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2106 !env->singlestep_enabled &&
2107 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32));
2108 /* It this stage dc->condjmp will only be set when the skipped
2109 * instruction was a conditional branch, and teh PC has already been
2110 * written. */
2111 if (__builtin_expect(env->singlestep_enabled, 0)) {
2112 /* Make sure the pc is updated, and raise a debug exception. */
2113 if (dc->condjmp) {
2114 gen_op_debug();
2115 gen_set_label(dc->condlabel);
2117 if (dc->condjmp || !dc->is_jmp) {
2118 gen_op_movl_T0_im((long)dc->pc);
2119 gen_op_movl_reg_TN[0][15]();
2120 dc->condjmp = 0;
2122 gen_op_debug();
2123 } else {
2124 switch(dc->is_jmp) {
2125 case DISAS_NEXT:
2126 gen_op_jmp1((long)dc->tb, (long)dc->pc);
2127 break;
2128 default:
2129 case DISAS_JUMP:
2130 case DISAS_UPDATE:
2131 /* indicate that the hash table must be used to find the next TB */
2132 gen_op_movl_T0_0();
2133 gen_op_exit_tb();
2134 break;
2135 case DISAS_TB_JUMP:
2136 /* nothing more to generate */
2137 break;
2139 if (dc->condjmp) {
2140 gen_set_label(dc->condlabel);
2141 gen_op_jmp1((long)dc->tb, (long)dc->pc);
2142 dc->condjmp = 0;
2145 *gen_opc_ptr = INDEX_op_end;
2147 #ifdef DEBUG_DISAS
2148 if (loglevel & CPU_LOG_TB_IN_ASM) {
2149 fprintf(logfile, "----------------\n");
2150 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
2151 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2152 fprintf(logfile, "\n");
2153 if (loglevel & (CPU_LOG_TB_OP)) {
2154 fprintf(logfile, "OP:\n");
2155 dump_ops(gen_opc_buf, gen_opparam_buf);
2156 fprintf(logfile, "\n");
2159 #endif
2160 if (!search_pc)
2161 tb->size = dc->pc - pc_start;
2162 return 0;
2165 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2167 return gen_intermediate_code_internal(env, tb, 0);
2170 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2172 return gen_intermediate_code_internal(env, tb, 1);
2175 CPUARMState *cpu_arm_init(void)
2177 CPUARMState *env;
2179 cpu_exec_init();
2181 env = malloc(sizeof(CPUARMState));
2182 if (!env)
2183 return NULL;
2184 memset(env, 0, sizeof(CPUARMState));
2185 cpu_single_env = env;
2186 return env;
2189 void cpu_arm_close(CPUARMState *env)
2191 free(env);
2194 void cpu_dump_state(CPUState *env, FILE *f,
2195 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
2196 int flags)
2198 int i;
2199 struct {
2200 uint32_t i;
2201 float s;
2202 } s0, s1;
2203 CPU_DoubleU d;
2205 for(i=0;i<16;i++) {
2206 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2207 if ((i % 4) == 3)
2208 cpu_fprintf(f, "\n");
2209 else
2210 cpu_fprintf(f, " ");
2212 cpu_fprintf(f, "PSR=%08x %c%c%c%c\n",
2213 env->cpsr,
2214 env->cpsr & (1 << 31) ? 'N' : '-',
2215 env->cpsr & (1 << 30) ? 'Z' : '-',
2216 env->cpsr & (1 << 29) ? 'C' : '-',
2217 env->cpsr & (1 << 28) ? 'V' : '-');
2219 for (i = 0; i < 16; i++) {
2220 d.d = env->vfp.regs[i];
2221 s0.i = d.l.lower;
2222 s1.i = d.l.upper;
2223 cpu_fprintf(f, "s%02d=%08x(%8f) s%02d=%08x(%8f) d%02d=%08x%08x(%8f)\n",
2224 i * 2, (int)s0.i, s0.s,
2225 i * 2 + 1, (int)s0.i, s0.s,
2226 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
2227 d.d);
2229 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.fpscr);
2232 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
2234 return addr;
2237 #if defined(CONFIG_USER_ONLY)
2239 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
2240 int is_user, int is_softmmu)
2242 env->cp15_6 = address;
2243 if (rw == 2) {
2244 env->exception_index = EXCP_PREFETCH_ABORT;
2245 } else {
2246 env->exception_index = EXCP_DATA_ABORT;
2248 return 1;
2251 #else
2253 #error not implemented
2255 #endif