Fix dumping of arm registers (Paul Brook)
[qemu/qemu_0_9_1_stable.git] / target-arm / translate.c
blob315595e89444530cd4e0330121f3ec5c3b78e137
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
31 /* internal defines */
32 typedef struct DisasContext {
33 target_ulong pc;
34 int is_jmp;
35 struct TranslationBlock *tb;
36 int singlestep_enabled;
37 } DisasContext;
39 #define DISAS_JUMP_NEXT 4
41 /* XXX: move that elsewhere */
42 static uint16_t *gen_opc_ptr;
43 static uint32_t *gen_opparam_ptr;
44 extern FILE *logfile;
45 extern int loglevel;
47 enum {
48 #define DEF(s, n, copy_size) INDEX_op_ ## s,
49 #include "opc.h"
50 #undef DEF
51 NB_OPS,
54 #include "gen-op.h"
56 static GenOpFunc2 *gen_test_cc[14] = {
57 gen_op_test_eq,
58 gen_op_test_ne,
59 gen_op_test_cs,
60 gen_op_test_cc,
61 gen_op_test_mi,
62 gen_op_test_pl,
63 gen_op_test_vs,
64 gen_op_test_vc,
65 gen_op_test_hi,
66 gen_op_test_ls,
67 gen_op_test_ge,
68 gen_op_test_lt,
69 gen_op_test_gt,
70 gen_op_test_le,
73 const uint8_t table_logic_cc[16] = {
74 1, /* and */
75 1, /* xor */
76 0, /* sub */
77 0, /* rsb */
78 0, /* add */
79 0, /* adc */
80 0, /* sbc */
81 0, /* rsc */
82 1, /* andl */
83 1, /* xorl */
84 0, /* cmp */
85 0, /* cmn */
86 1, /* orr */
87 1, /* mov */
88 1, /* bic */
89 1, /* mvn */
92 static GenOpFunc1 *gen_shift_T1_im[4] = {
93 gen_op_shll_T1_im,
94 gen_op_shrl_T1_im,
95 gen_op_sarl_T1_im,
96 gen_op_rorl_T1_im,
99 static GenOpFunc *gen_shift_T1_0[4] = {
100 NULL,
101 gen_op_shrl_T1_0,
102 gen_op_sarl_T1_0,
103 gen_op_rrxl_T1,
106 static GenOpFunc1 *gen_shift_T2_im[4] = {
107 gen_op_shll_T2_im,
108 gen_op_shrl_T2_im,
109 gen_op_sarl_T2_im,
110 gen_op_rorl_T2_im,
113 static GenOpFunc *gen_shift_T2_0[4] = {
114 NULL,
115 gen_op_shrl_T2_0,
116 gen_op_sarl_T2_0,
117 gen_op_rrxl_T2,
120 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
121 gen_op_shll_T1_im_cc,
122 gen_op_shrl_T1_im_cc,
123 gen_op_sarl_T1_im_cc,
124 gen_op_rorl_T1_im_cc,
127 static GenOpFunc *gen_shift_T1_0_cc[4] = {
128 NULL,
129 gen_op_shrl_T1_0_cc,
130 gen_op_sarl_T1_0_cc,
131 gen_op_rrxl_T1_cc,
134 static GenOpFunc *gen_shift_T1_T0[4] = {
135 gen_op_shll_T1_T0,
136 gen_op_shrl_T1_T0,
137 gen_op_sarl_T1_T0,
138 gen_op_rorl_T1_T0,
141 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
142 gen_op_shll_T1_T0_cc,
143 gen_op_shrl_T1_T0_cc,
144 gen_op_sarl_T1_T0_cc,
145 gen_op_rorl_T1_T0_cc,
148 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
150 gen_op_movl_T0_r0,
151 gen_op_movl_T0_r1,
152 gen_op_movl_T0_r2,
153 gen_op_movl_T0_r3,
154 gen_op_movl_T0_r4,
155 gen_op_movl_T0_r5,
156 gen_op_movl_T0_r6,
157 gen_op_movl_T0_r7,
158 gen_op_movl_T0_r8,
159 gen_op_movl_T0_r9,
160 gen_op_movl_T0_r10,
161 gen_op_movl_T0_r11,
162 gen_op_movl_T0_r12,
163 gen_op_movl_T0_r13,
164 gen_op_movl_T0_r14,
165 gen_op_movl_T0_r15,
168 gen_op_movl_T1_r0,
169 gen_op_movl_T1_r1,
170 gen_op_movl_T1_r2,
171 gen_op_movl_T1_r3,
172 gen_op_movl_T1_r4,
173 gen_op_movl_T1_r5,
174 gen_op_movl_T1_r6,
175 gen_op_movl_T1_r7,
176 gen_op_movl_T1_r8,
177 gen_op_movl_T1_r9,
178 gen_op_movl_T1_r10,
179 gen_op_movl_T1_r11,
180 gen_op_movl_T1_r12,
181 gen_op_movl_T1_r13,
182 gen_op_movl_T1_r14,
183 gen_op_movl_T1_r15,
186 gen_op_movl_T2_r0,
187 gen_op_movl_T2_r1,
188 gen_op_movl_T2_r2,
189 gen_op_movl_T2_r3,
190 gen_op_movl_T2_r4,
191 gen_op_movl_T2_r5,
192 gen_op_movl_T2_r6,
193 gen_op_movl_T2_r7,
194 gen_op_movl_T2_r8,
195 gen_op_movl_T2_r9,
196 gen_op_movl_T2_r10,
197 gen_op_movl_T2_r11,
198 gen_op_movl_T2_r12,
199 gen_op_movl_T2_r13,
200 gen_op_movl_T2_r14,
201 gen_op_movl_T2_r15,
205 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
207 gen_op_movl_r0_T0,
208 gen_op_movl_r1_T0,
209 gen_op_movl_r2_T0,
210 gen_op_movl_r3_T0,
211 gen_op_movl_r4_T0,
212 gen_op_movl_r5_T0,
213 gen_op_movl_r6_T0,
214 gen_op_movl_r7_T0,
215 gen_op_movl_r8_T0,
216 gen_op_movl_r9_T0,
217 gen_op_movl_r10_T0,
218 gen_op_movl_r11_T0,
219 gen_op_movl_r12_T0,
220 gen_op_movl_r13_T0,
221 gen_op_movl_r14_T0,
222 gen_op_movl_r15_T0,
225 gen_op_movl_r0_T1,
226 gen_op_movl_r1_T1,
227 gen_op_movl_r2_T1,
228 gen_op_movl_r3_T1,
229 gen_op_movl_r4_T1,
230 gen_op_movl_r5_T1,
231 gen_op_movl_r6_T1,
232 gen_op_movl_r7_T1,
233 gen_op_movl_r8_T1,
234 gen_op_movl_r9_T1,
235 gen_op_movl_r10_T1,
236 gen_op_movl_r11_T1,
237 gen_op_movl_r12_T1,
238 gen_op_movl_r13_T1,
239 gen_op_movl_r14_T1,
240 gen_op_movl_r15_T1,
244 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
245 gen_op_movl_T0_im,
246 gen_op_movl_T1_im,
247 gen_op_movl_T2_im,
250 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
251 gen_op_shll_T0_im_thumb,
252 gen_op_shrl_T0_im_thumb,
253 gen_op_sarl_T0_im_thumb,
256 static inline void gen_bx(DisasContext *s)
258 s->is_jmp = DISAS_UPDATE;
259 gen_op_bx_T0();
262 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
264 int val;
266 if (reg == 15) {
267 /* normaly, since we updated PC, we need only to add 4 */
268 val = (long)s->pc + 4;
269 gen_op_movl_TN_im[t](val);
270 } else {
271 gen_op_movl_TN_reg[t][reg]();
275 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
277 gen_movl_TN_reg(s, reg, 0);
280 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
282 gen_movl_TN_reg(s, reg, 1);
285 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
287 gen_movl_TN_reg(s, reg, 2);
290 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
292 gen_op_movl_reg_TN[t][reg]();
293 if (reg == 15) {
294 s->is_jmp = DISAS_JUMP;
298 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
300 gen_movl_reg_TN(s, reg, 0);
303 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
305 gen_movl_reg_TN(s, reg, 1);
308 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
310 int val, rm, shift, shiftop;
312 if (!(insn & (1 << 25))) {
313 /* immediate */
314 val = insn & 0xfff;
315 if (!(insn & (1 << 23)))
316 val = -val;
317 if (val != 0)
318 gen_op_addl_T1_im(val);
319 } else {
320 /* shift/register */
321 rm = (insn) & 0xf;
322 shift = (insn >> 7) & 0x1f;
323 gen_movl_T2_reg(s, rm);
324 shiftop = (insn >> 5) & 3;
325 if (shift != 0) {
326 gen_shift_T2_im[shiftop](shift);
327 } else if (shiftop != 0) {
328 gen_shift_T2_0[shiftop]();
330 if (!(insn & (1 << 23)))
331 gen_op_subl_T1_T2();
332 else
333 gen_op_addl_T1_T2();
337 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn)
339 int val, rm;
341 if (insn & (1 << 22)) {
342 /* immediate */
343 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
344 if (!(insn & (1 << 23)))
345 val = -val;
346 if (val != 0)
347 gen_op_addl_T1_im(val);
348 } else {
349 /* register */
350 rm = (insn) & 0xf;
351 gen_movl_T2_reg(s, rm);
352 if (!(insn & (1 << 23)))
353 gen_op_subl_T1_T2();
354 else
355 gen_op_addl_T1_T2();
359 #define VFP_OP(name) \
360 static inline void gen_vfp_##name(int dp) \
362 if (dp) \
363 gen_op_vfp_##name##d(); \
364 else \
365 gen_op_vfp_##name##s(); \
368 VFP_OP(add)
369 VFP_OP(sub)
370 VFP_OP(mul)
371 VFP_OP(div)
372 VFP_OP(neg)
373 VFP_OP(abs)
374 VFP_OP(sqrt)
375 VFP_OP(cmp)
376 VFP_OP(cmpe)
377 VFP_OP(F1_ld0)
378 VFP_OP(uito)
379 VFP_OP(sito)
380 VFP_OP(toui)
381 VFP_OP(touiz)
382 VFP_OP(tosi)
383 VFP_OP(tosiz)
384 VFP_OP(ld)
385 VFP_OP(st)
387 #undef VFP_OP
389 static inline long
390 vfp_reg_offset (int dp, int reg)
392 if (dp)
393 return offsetof(CPUARMState, vfp.regs[reg]);
394 else if (reg & 1) {
395 return offsetof(CPUARMState, vfp.regs[reg >> 1])
396 + offsetof(CPU_DoubleU, l.upper);
397 } else {
398 return offsetof(CPUARMState, vfp.regs[reg >> 1])
399 + offsetof(CPU_DoubleU, l.lower);
402 static inline void gen_mov_F0_vreg(int dp, int reg)
404 if (dp)
405 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
406 else
407 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
410 static inline void gen_mov_F1_vreg(int dp, int reg)
412 if (dp)
413 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
414 else
415 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
418 static inline void gen_mov_vreg_F0(int dp, int reg)
420 if (dp)
421 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
422 else
423 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
426 /* Disassemble a VFP instruction. Returns nonzero if an error occured
427 (ie. an undefined instruction). */
428 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
430 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
431 int dp, veclen;
433 dp = ((insn & 0xf00) == 0xb00);
434 switch ((insn >> 24) & 0xf) {
435 case 0xe:
436 if (insn & (1 << 4)) {
437 /* single register transfer */
438 if ((insn & 0x6f) != 0x00)
439 return 1;
440 rd = (insn >> 12) & 0xf;
441 if (dp) {
442 if (insn & 0x80)
443 return 1;
444 rn = (insn >> 16) & 0xf;
445 /* Get the existing value even for arm->vfp moves because
446 we only set half the register. */
447 gen_mov_F0_vreg(1, rn);
448 gen_op_vfp_mrrd();
449 if (insn & (1 << 20)) {
450 /* vfp->arm */
451 if (insn & (1 << 21))
452 gen_movl_reg_T1(s, rd);
453 else
454 gen_movl_reg_T0(s, rd);
455 } else {
456 /* arm->vfp */
457 if (insn & (1 << 21))
458 gen_movl_T1_reg(s, rd);
459 else
460 gen_movl_T0_reg(s, rd);
461 gen_op_vfp_mdrr();
462 gen_mov_vreg_F0(dp, rn);
464 } else {
465 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
466 if (insn & (1 << 20)) {
467 /* vfp->arm */
468 if (insn & (1 << 21)) {
469 /* system register */
470 switch (rn) {
471 case 0: /* fpsid */
472 n = 0x0091A0000;
473 break;
474 case 2: /* fpscr */
475 if (rd == 15)
476 gen_op_vfp_movl_T0_fpscr_flags();
477 else
478 gen_op_vfp_movl_T0_fpscr();
479 break;
480 default:
481 return 1;
483 } else {
484 gen_mov_F0_vreg(0, rn);
485 gen_op_vfp_mrs();
487 if (rd == 15) {
488 /* This will only set the 4 flag bits */
489 gen_op_movl_psr_T0();
490 } else
491 gen_movl_reg_T0(s, rd);
492 } else {
493 /* arm->vfp */
494 gen_movl_T0_reg(s, rd);
495 if (insn & (1 << 21)) {
496 /* system register */
497 switch (rn) {
498 case 0: /* fpsid */
499 /* Writes are ignored. */
500 break;
501 case 2: /* fpscr */
502 gen_op_vfp_movl_fpscr_T0();
503 /* This could change vector settings, so jump to
504 the next instuction. */
505 gen_op_movl_T0_im(s->pc);
506 gen_movl_reg_T0(s, 15);
507 s->is_jmp = DISAS_UPDATE;
508 break;
509 default:
510 return 1;
512 } else {
513 gen_op_vfp_msr();
514 gen_mov_vreg_F0(0, rn);
518 } else {
519 /* data processing */
520 /* The opcode is in bits 23, 21, 20 and 6. */
521 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
522 if (dp) {
523 if (op == 15) {
524 /* rn is opcode */
525 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
526 } else {
527 /* rn is register number */
528 if (insn & (1 << 7))
529 return 1;
530 rn = (insn >> 16) & 0xf;
533 if (op == 15 && (rn == 15 || rn > 17)) {
534 /* Integer or single precision destination. */
535 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
536 } else {
537 if (insn & (1 << 22))
538 return 1;
539 rd = (insn >> 12) & 0xf;
542 if (op == 15 && (rn == 16 || rn == 17)) {
543 /* Integer source. */
544 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
545 } else {
546 if (insn & (1 << 5))
547 return 1;
548 rm = insn & 0xf;
550 } else {
551 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
552 if (op == 15 && rn == 15) {
553 /* Double precision destination. */
554 if (insn & (1 << 22))
555 return 1;
556 rd = (insn >> 12) & 0xf;
557 } else
558 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
559 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
562 veclen = env->vfp.vec_len;
563 if (op == 15 && rn > 3)
564 veclen = 0;
566 /* Shut up compiler warnings. */
567 delta_m = 0;
568 delta_d = 0;
569 bank_mask = 0;
571 if (veclen > 0) {
572 if (dp)
573 bank_mask = 0xc;
574 else
575 bank_mask = 0x18;
577 /* Figure out what type of vector operation this is. */
578 if ((rd & bank_mask) == 0) {
579 /* scalar */
580 veclen = 0;
581 } else {
582 if (dp)
583 delta_d = (env->vfp.vec_stride >> 1) + 1;
584 else
585 delta_d = env->vfp.vec_stride + 1;
587 if ((rm & bank_mask) == 0) {
588 /* mixed scalar/vector */
589 delta_m = 0;
590 } else {
591 /* vector */
592 delta_m = delta_d;
597 /* Load the initial operands. */
598 if (op == 15) {
599 switch (rn) {
600 case 16:
601 case 17:
602 /* Integer source */
603 gen_mov_F0_vreg(0, rm);
604 break;
605 case 8:
606 case 9:
607 /* Compare */
608 gen_mov_F0_vreg(dp, rd);
609 gen_mov_F1_vreg(dp, rm);
610 break;
611 case 10:
612 case 11:
613 /* Compare with zero */
614 gen_mov_F0_vreg(dp, rd);
615 gen_vfp_F1_ld0(dp);
616 break;
617 default:
618 /* One source operand. */
619 gen_mov_F0_vreg(dp, rm);
621 } else {
622 /* Two source operands. */
623 gen_mov_F0_vreg(dp, rn);
624 gen_mov_F1_vreg(dp, rm);
627 for (;;) {
628 /* Perform the calculation. */
629 switch (op) {
630 case 0: /* mac: fd + (fn * fm) */
631 gen_vfp_mul(dp);
632 gen_mov_F1_vreg(dp, rd);
633 gen_vfp_add(dp);
634 break;
635 case 1: /* nmac: fd - (fn * fm) */
636 gen_vfp_mul(dp);
637 gen_vfp_neg(dp);
638 gen_mov_F1_vreg(dp, rd);
639 gen_vfp_add(dp);
640 break;
641 case 2: /* msc: -fd + (fn * fm) */
642 gen_vfp_mul(dp);
643 gen_mov_F1_vreg(dp, rd);
644 gen_vfp_sub(dp);
645 break;
646 case 3: /* nmsc: -fd - (fn * fm) */
647 gen_vfp_mul(dp);
648 gen_mov_F1_vreg(dp, rd);
649 gen_vfp_add(dp);
650 gen_vfp_neg(dp);
651 break;
652 case 4: /* mul: fn * fm */
653 gen_vfp_mul(dp);
654 break;
655 case 5: /* nmul: -(fn * fm) */
656 gen_vfp_mul(dp);
657 gen_vfp_neg(dp);
658 break;
659 case 6: /* add: fn + fm */
660 gen_vfp_add(dp);
661 break;
662 case 7: /* sub: fn - fm */
663 gen_vfp_sub(dp);
664 break;
665 case 8: /* div: fn / fm */
666 gen_vfp_div(dp);
667 break;
668 case 15: /* extension space */
669 switch (rn) {
670 case 0: /* cpy */
671 /* no-op */
672 break;
673 case 1: /* abs */
674 gen_vfp_abs(dp);
675 break;
676 case 2: /* neg */
677 gen_vfp_neg(dp);
678 break;
679 case 3: /* sqrt */
680 gen_vfp_sqrt(dp);
681 break;
682 case 8: /* cmp */
683 gen_vfp_cmp(dp);
684 break;
685 case 9: /* cmpe */
686 gen_vfp_cmpe(dp);
687 break;
688 case 10: /* cmpz */
689 gen_vfp_cmp(dp);
690 break;
691 case 11: /* cmpez */
692 gen_vfp_F1_ld0(dp);
693 gen_vfp_cmpe(dp);
694 break;
695 case 15: /* single<->double conversion */
696 if (dp)
697 gen_op_vfp_fcvtsd();
698 else
699 gen_op_vfp_fcvtds();
700 break;
701 case 16: /* fuito */
702 gen_vfp_uito(dp);
703 break;
704 case 17: /* fsito */
705 gen_vfp_sito(dp);
706 break;
707 case 24: /* ftoui */
708 gen_vfp_toui(dp);
709 break;
710 case 25: /* ftouiz */
711 gen_vfp_touiz(dp);
712 break;
713 case 26: /* ftosi */
714 gen_vfp_tosi(dp);
715 break;
716 case 27: /* ftosiz */
717 gen_vfp_tosiz(dp);
718 break;
719 default: /* undefined */
720 printf ("rn:%d\n", rn);
721 return 1;
723 break;
724 default: /* undefined */
725 printf ("op:%d\n", op);
726 return 1;
729 /* Write back the result. */
730 if (op == 15 && (rn >= 8 && rn <= 11))
731 ; /* Comparison, do nothing. */
732 else if (op == 15 && rn > 17)
733 /* Integer result. */
734 gen_mov_vreg_F0(0, rd);
735 else if (op == 15 && rn == 15)
736 /* conversion */
737 gen_mov_vreg_F0(!dp, rd);
738 else
739 gen_mov_vreg_F0(dp, rd);
741 /* break out of the loop if we have finished */
742 if (veclen == 0)
743 break;
745 if (op == 15 && delta_m == 0) {
746 /* single source one-many */
747 while (veclen--) {
748 rd = ((rd + delta_d) & (bank_mask - 1))
749 | (rd & bank_mask);
750 gen_mov_vreg_F0(dp, rd);
752 break;
754 /* Setup the next operands. */
755 veclen--;
756 rd = ((rd + delta_d) & (bank_mask - 1))
757 | (rd & bank_mask);
759 if (op == 15) {
760 /* One source operand. */
761 rm = ((rm + delta_m) & (bank_mask - 1))
762 | (rm & bank_mask);
763 gen_mov_F0_vreg(dp, rm);
764 } else {
765 /* Two source operands. */
766 rn = ((rn + delta_d) & (bank_mask - 1))
767 | (rn & bank_mask);
768 gen_mov_F0_vreg(dp, rn);
769 if (delta_m) {
770 rm = ((rm + delta_m) & (bank_mask - 1))
771 | (rm & bank_mask);
772 gen_mov_F1_vreg(dp, rm);
777 break;
778 case 0xc:
779 case 0xd:
780 if (dp && (insn & (1 << 22))) {
781 /* two-register transfer */
782 rn = (insn >> 16) & 0xf;
783 rd = (insn >> 12) & 0xf;
784 if (dp) {
785 if (insn & (1 << 5))
786 return 1;
787 rm = insn & 0xf;
788 } else
789 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
791 if (insn & (1 << 20)) {
792 /* vfp->arm */
793 if (dp) {
794 gen_mov_F0_vreg(1, rm);
795 gen_op_vfp_mrrd();
796 gen_movl_reg_T0(s, rd);
797 gen_movl_reg_T1(s, rn);
798 } else {
799 gen_mov_F0_vreg(0, rm);
800 gen_op_vfp_mrs();
801 gen_movl_reg_T0(s, rn);
802 gen_mov_F0_vreg(0, rm + 1);
803 gen_op_vfp_mrs();
804 gen_movl_reg_T0(s, rd);
806 } else {
807 /* arm->vfp */
808 if (dp) {
809 gen_movl_T0_reg(s, rd);
810 gen_movl_T1_reg(s, rn);
811 gen_op_vfp_mdrr();
812 gen_mov_vreg_F0(1, rm);
813 } else {
814 gen_movl_T0_reg(s, rn);
815 gen_op_vfp_msr();
816 gen_mov_vreg_F0(0, rm);
817 gen_movl_T0_reg(s, rd);
818 gen_op_vfp_msr();
819 gen_mov_vreg_F0(0, rm + 1);
822 } else {
823 /* Load/store */
824 rn = (insn >> 16) & 0xf;
825 if (dp)
826 rd = (insn >> 12) & 0xf;
827 else
828 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
829 gen_movl_T1_reg(s, rn);
830 if ((insn & 0x01200000) == 0x01000000) {
831 /* Single load/store */
832 offset = (insn & 0xff) << 2;
833 if ((insn & (1 << 23)) == 0)
834 offset = -offset;
835 gen_op_addl_T1_im(offset);
836 if (insn & (1 << 20)) {
837 gen_vfp_ld(dp);
838 gen_mov_vreg_F0(dp, rd);
839 } else {
840 gen_mov_F0_vreg(dp, rd);
841 gen_vfp_st(dp);
843 } else {
844 /* load/store multiple */
845 if (dp)
846 n = (insn >> 1) & 0x7f;
847 else
848 n = insn & 0xff;
850 if (insn & (1 << 24)) /* pre-decrement */
851 gen_op_addl_T1_im(-((insn & 0xff) << 2));
853 if (dp)
854 offset = 8;
855 else
856 offset = 4;
857 for (i = 0; i < n; i++) {
858 if (insn & (1 << 20)) {
859 /* load */
860 gen_vfp_ld(dp);
861 gen_mov_vreg_F0(dp, rd + i);
862 } else {
863 /* store */
864 gen_mov_F0_vreg(dp, rd + i);
865 gen_vfp_st(dp);
867 gen_op_addl_T1_im(offset);
869 if (insn & (1 << 21)) {
870 /* writeback */
871 if (insn & (1 << 24))
872 offset = -offset * n;
873 else if (dp && (insn & 1))
874 offset = 4;
875 else
876 offset = 0;
878 if (offset != 0)
879 gen_op_addl_T1_im(offset);
880 gen_movl_reg_T1(s, rn);
884 break;
885 default:
886 /* Should never happen. */
887 return 1;
889 return 0;
892 static inline void gen_jmp (DisasContext *s, uint32_t dest)
894 if (__builtin_expect(s->singlestep_enabled, 0)) {
895 /* An indirect jump so that we still trigger the debug exception. */
896 gen_op_movl_T0_im(dest);
897 gen_bx(s);
898 } else {
899 gen_op_jmp((long)s->tb, dest);
900 s->is_jmp = DISAS_TB_JUMP;
904 static void disas_arm_insn(CPUState * env, DisasContext *s)
906 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
908 insn = ldl(s->pc);
909 s->pc += 4;
911 cond = insn >> 28;
912 if (cond == 0xf){
913 /* Unconditional instructions. */
914 if ((insn & 0x0d70f000) == 0x0550f000)
915 return; /* PLD */
916 else if ((insn & 0x0e000000) == 0x0a000000) {
917 /* branch link and change to thumb (blx <offset>) */
918 int32_t offset;
920 val = (uint32_t)s->pc;
921 gen_op_movl_T0_im(val);
922 gen_movl_reg_T0(s, 14);
923 /* Sign-extend the 24-bit offset */
924 offset = (((int32_t)insn) << 8) >> 8;
925 /* offset * 4 + bit24 * 2 + (thumb bit) */
926 val += (offset << 2) | ((insn >> 23) & 2) | 1;
927 /* pipeline offset */
928 val += 4;
929 gen_op_movl_T0_im(val);
930 gen_bx(s);
931 return;
932 } else if ((insn & 0x0fe00000) == 0x0c400000) {
933 /* Coprocessor double register transfer. */
934 } else if ((insn & 0x0f000010) == 0x0e000010) {
935 /* Additional coprocessor register transfer. */
937 goto illegal_op;
939 if (cond != 0xe) {
940 /* if not always execute, we generate a conditional jump to
941 next instruction */
942 gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
943 s->is_jmp = DISAS_JUMP_NEXT;
945 if ((insn & 0x0f900000) == 0x03000000) {
946 if ((insn & 0x0ff0f000) != 0x0360f000)
947 goto illegal_op;
948 /* CPSR = immediate */
949 val = insn & 0xff;
950 shift = ((insn >> 8) & 0xf) * 2;
951 if (shift)
952 val = (val >> shift) | (val << (32 - shift));
953 gen_op_movl_T0_im(val);
954 if (insn & (1 << 19))
955 gen_op_movl_psr_T0();
956 } else if ((insn & 0x0f900000) == 0x01000000
957 && (insn & 0x00000090) != 0x00000090) {
958 /* miscellaneous instructions */
959 op1 = (insn >> 21) & 3;
960 sh = (insn >> 4) & 0xf;
961 rm = insn & 0xf;
962 switch (sh) {
963 case 0x0: /* move program status register */
964 if (op1 & 2) {
965 /* SPSR not accessible in user mode */
966 goto illegal_op;
968 if (op1 & 1) {
969 /* CPSR = reg */
970 gen_movl_T0_reg(s, rm);
971 if (insn & (1 << 19))
972 gen_op_movl_psr_T0();
973 } else {
974 /* reg = CPSR */
975 rd = (insn >> 12) & 0xf;
976 gen_op_movl_T0_psr();
977 gen_movl_reg_T0(s, rd);
979 break;
980 case 0x1:
981 if (op1 == 1) {
982 /* branch/exchange thumb (bx). */
983 gen_movl_T0_reg(s, rm);
984 gen_bx(s);
985 } else if (op1 == 3) {
986 /* clz */
987 rd = (insn >> 12) & 0xf;
988 gen_movl_T0_reg(s, rm);
989 gen_op_clz_T0();
990 gen_movl_reg_T0(s, rd);
991 } else {
992 goto illegal_op;
994 break;
995 case 0x3:
996 if (op1 != 1)
997 goto illegal_op;
999 /* branch link/exchange thumb (blx) */
1000 val = (uint32_t)s->pc;
1001 gen_op_movl_T0_im(val);
1002 gen_movl_reg_T0(s, 14);
1003 gen_movl_T0_reg(s, rm);
1004 gen_bx(s);
1005 break;
1006 case 0x5: /* saturating add/subtract */
1007 rd = (insn >> 12) & 0xf;
1008 rn = (insn >> 16) & 0xf;
1009 gen_movl_T0_reg(s, rn);
1010 if (op1 & 2) {
1011 gen_movl_T1_reg(s, rn);
1012 if (op1 & 1)
1013 gen_op_subl_T0_T1_saturate();
1014 else
1015 gen_op_addl_T0_T1_saturate();
1017 gen_movl_T1_reg(s, rm);
1018 if (op1 & 1)
1019 gen_op_subl_T0_T1_saturate();
1020 else
1021 gen_op_addl_T0_T1_saturate();
1022 gen_movl_reg_T0(s, rn);
1023 break;
1024 case 0x8: /* signed multiply */
1025 case 0xa:
1026 case 0xc:
1027 case 0xe:
1028 rs = (insn >> 8) & 0xf;
1029 rn = (insn >> 12) & 0xf;
1030 rd = (insn >> 16) & 0xf;
1031 if (op1 == 1) {
1032 /* (32 * 16) >> 16 */
1033 gen_movl_T0_reg(s, rm);
1034 gen_movl_T1_reg(s, rs);
1035 if (sh & 4)
1036 gen_op_sarl_T1_im(16);
1037 else
1038 gen_op_sxl_T1();
1039 gen_op_imulw_T0_T1();
1040 if ((sh & 2) == 0) {
1041 gen_movl_T1_reg(s, rn);
1042 gen_op_addl_T0_T1_setq();
1044 gen_movl_reg_T0(s, rd);
1045 } else {
1046 /* 16 * 16 */
1047 gen_movl_T0_reg(s, rm);
1048 if (sh & 2)
1049 gen_op_sarl_T0_im(16);
1050 else
1051 gen_op_sxl_T0();
1052 gen_movl_T1_reg(s, rs);
1053 if (sh & 4)
1054 gen_op_sarl_T1_im(16);
1055 else
1056 gen_op_sxl_T1();
1057 if (op1 == 2) {
1058 gen_op_imull_T0_T1();
1059 gen_op_addq_T0_T1(rn, rd);
1060 gen_movl_reg_T0(s, rn);
1061 gen_movl_reg_T1(s, rd);
1062 } else {
1063 gen_op_mul_T0_T1();
1064 if (op1 == 0) {
1065 gen_movl_T1_reg(s, rn);
1066 gen_op_addl_T0_T1_setq();
1068 gen_movl_reg_T0(s, rd);
1071 break;
1072 default:
1073 goto illegal_op;
1075 } else if (((insn & 0x0e000000) == 0 &&
1076 (insn & 0x00000090) != 0x90) ||
1077 ((insn & 0x0e000000) == (1 << 25))) {
1078 int set_cc, logic_cc, shiftop;
1080 op1 = (insn >> 21) & 0xf;
1081 set_cc = (insn >> 20) & 1;
1082 logic_cc = table_logic_cc[op1] & set_cc;
1084 /* data processing instruction */
1085 if (insn & (1 << 25)) {
1086 /* immediate operand */
1087 val = insn & 0xff;
1088 shift = ((insn >> 8) & 0xf) * 2;
1089 if (shift)
1090 val = (val >> shift) | (val << (32 - shift));
1091 gen_op_movl_T1_im(val);
1092 if (logic_cc && shift)
1093 gen_op_mov_CF_T1();
1094 } else {
1095 /* register */
1096 rm = (insn) & 0xf;
1097 gen_movl_T1_reg(s, rm);
1098 shiftop = (insn >> 5) & 3;
1099 if (!(insn & (1 << 4))) {
1100 shift = (insn >> 7) & 0x1f;
1101 if (shift != 0) {
1102 if (logic_cc) {
1103 gen_shift_T1_im_cc[shiftop](shift);
1104 } else {
1105 gen_shift_T1_im[shiftop](shift);
1107 } else if (shiftop != 0) {
1108 if (logic_cc) {
1109 gen_shift_T1_0_cc[shiftop]();
1110 } else {
1111 gen_shift_T1_0[shiftop]();
1114 } else {
1115 rs = (insn >> 8) & 0xf;
1116 gen_movl_T0_reg(s, rs);
1117 if (logic_cc) {
1118 gen_shift_T1_T0_cc[shiftop]();
1119 } else {
1120 gen_shift_T1_T0[shiftop]();
1124 if (op1 != 0x0f && op1 != 0x0d) {
1125 rn = (insn >> 16) & 0xf;
1126 gen_movl_T0_reg(s, rn);
1128 rd = (insn >> 12) & 0xf;
1129 switch(op1) {
1130 case 0x00:
1131 gen_op_andl_T0_T1();
1132 gen_movl_reg_T0(s, rd);
1133 if (logic_cc)
1134 gen_op_logic_T0_cc();
1135 break;
1136 case 0x01:
1137 gen_op_xorl_T0_T1();
1138 gen_movl_reg_T0(s, rd);
1139 if (logic_cc)
1140 gen_op_logic_T0_cc();
1141 break;
1142 case 0x02:
1143 if (set_cc)
1144 gen_op_subl_T0_T1_cc();
1145 else
1146 gen_op_subl_T0_T1();
1147 gen_movl_reg_T0(s, rd);
1148 break;
1149 case 0x03:
1150 if (set_cc)
1151 gen_op_rsbl_T0_T1_cc();
1152 else
1153 gen_op_rsbl_T0_T1();
1154 gen_movl_reg_T0(s, rd);
1155 break;
1156 case 0x04:
1157 if (set_cc)
1158 gen_op_addl_T0_T1_cc();
1159 else
1160 gen_op_addl_T0_T1();
1161 gen_movl_reg_T0(s, rd);
1162 break;
1163 case 0x05:
1164 if (set_cc)
1165 gen_op_adcl_T0_T1_cc();
1166 else
1167 gen_op_adcl_T0_T1();
1168 gen_movl_reg_T0(s, rd);
1169 break;
1170 case 0x06:
1171 if (set_cc)
1172 gen_op_sbcl_T0_T1_cc();
1173 else
1174 gen_op_sbcl_T0_T1();
1175 gen_movl_reg_T0(s, rd);
1176 break;
1177 case 0x07:
1178 if (set_cc)
1179 gen_op_rscl_T0_T1_cc();
1180 else
1181 gen_op_rscl_T0_T1();
1182 gen_movl_reg_T0(s, rd);
1183 break;
1184 case 0x08:
1185 if (set_cc) {
1186 gen_op_andl_T0_T1();
1187 gen_op_logic_T0_cc();
1189 break;
1190 case 0x09:
1191 if (set_cc) {
1192 gen_op_xorl_T0_T1();
1193 gen_op_logic_T0_cc();
1195 break;
1196 case 0x0a:
1197 if (set_cc) {
1198 gen_op_subl_T0_T1_cc();
1200 break;
1201 case 0x0b:
1202 if (set_cc) {
1203 gen_op_addl_T0_T1_cc();
1205 break;
1206 case 0x0c:
1207 gen_op_orl_T0_T1();
1208 gen_movl_reg_T0(s, rd);
1209 if (logic_cc)
1210 gen_op_logic_T0_cc();
1211 break;
1212 case 0x0d:
1213 gen_movl_reg_T1(s, rd);
1214 if (logic_cc)
1215 gen_op_logic_T1_cc();
1216 break;
1217 case 0x0e:
1218 gen_op_bicl_T0_T1();
1219 gen_movl_reg_T0(s, rd);
1220 if (logic_cc)
1221 gen_op_logic_T0_cc();
1222 break;
1223 default:
1224 case 0x0f:
1225 gen_op_notl_T1();
1226 gen_movl_reg_T1(s, rd);
1227 if (logic_cc)
1228 gen_op_logic_T1_cc();
1229 break;
1231 } else {
1232 /* other instructions */
1233 op1 = (insn >> 24) & 0xf;
1234 switch(op1) {
1235 case 0x0:
1236 case 0x1:
1237 /* multiplies, extra load/stores */
1238 sh = (insn >> 5) & 3;
1239 if (sh == 0) {
1240 if (op1 == 0x0) {
1241 rd = (insn >> 16) & 0xf;
1242 rn = (insn >> 12) & 0xf;
1243 rs = (insn >> 8) & 0xf;
1244 rm = (insn) & 0xf;
1245 if (((insn >> 22) & 3) == 0) {
1246 /* 32 bit mul */
1247 gen_movl_T0_reg(s, rs);
1248 gen_movl_T1_reg(s, rm);
1249 gen_op_mul_T0_T1();
1250 if (insn & (1 << 21)) {
1251 gen_movl_T1_reg(s, rn);
1252 gen_op_addl_T0_T1();
1254 if (insn & (1 << 20))
1255 gen_op_logic_T0_cc();
1256 gen_movl_reg_T0(s, rd);
1257 } else {
1258 /* 64 bit mul */
1259 gen_movl_T0_reg(s, rs);
1260 gen_movl_T1_reg(s, rm);
1261 if (insn & (1 << 22))
1262 gen_op_imull_T0_T1();
1263 else
1264 gen_op_mull_T0_T1();
1265 if (insn & (1 << 21)) /* mult accumulate */
1266 gen_op_addq_T0_T1(rn, rd);
1267 if (!(insn & (1 << 23))) { /* double accumulate */
1268 gen_op_addq_lo_T0_T1(rn);
1269 gen_op_addq_lo_T0_T1(rd);
1271 if (insn & (1 << 20))
1272 gen_op_logicq_cc();
1273 gen_movl_reg_T0(s, rn);
1274 gen_movl_reg_T1(s, rd);
1276 } else {
1277 rn = (insn >> 16) & 0xf;
1278 rd = (insn >> 12) & 0xf;
1279 if (insn & (1 << 23)) {
1280 /* load/store exclusive */
1281 goto illegal_op;
1282 } else {
1283 /* SWP instruction */
1284 rm = (insn) & 0xf;
1286 gen_movl_T0_reg(s, rm);
1287 gen_movl_T1_reg(s, rn);
1288 if (insn & (1 << 22)) {
1289 gen_op_swpb_T0_T1();
1290 } else {
1291 gen_op_swpl_T0_T1();
1293 gen_movl_reg_T0(s, rd);
1296 } else {
1297 /* Misc load/store */
1298 rn = (insn >> 16) & 0xf;
1299 rd = (insn >> 12) & 0xf;
1300 gen_movl_T1_reg(s, rn);
1301 if (insn & (1 << 24))
1302 gen_add_datah_offset(s, insn);
1303 if (insn & (1 << 20)) {
1304 /* load */
1305 switch(sh) {
1306 case 1:
1307 gen_op_lduw_T0_T1();
1308 break;
1309 case 2:
1310 gen_op_ldsb_T0_T1();
1311 break;
1312 default:
1313 case 3:
1314 gen_op_ldsw_T0_T1();
1315 break;
1317 gen_movl_reg_T0(s, rd);
1318 } else if (sh & 2) {
1319 /* doubleword */
1320 if (sh & 1) {
1321 /* store */
1322 gen_movl_T0_reg(s, rd);
1323 gen_op_stl_T0_T1();
1324 gen_op_addl_T1_im(4);
1325 gen_movl_T0_reg(s, rd + 1);
1326 gen_op_stl_T0_T1();
1327 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1328 gen_op_addl_T1_im(-4);
1329 } else {
1330 /* load */
1331 gen_op_ldl_T0_T1();
1332 gen_movl_reg_T0(s, rd);
1333 gen_op_addl_T1_im(4);
1334 gen_op_ldl_T0_T1();
1335 gen_movl_reg_T0(s, rd + 1);
1336 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1337 gen_op_addl_T1_im(-4);
1339 } else {
1340 /* store */
1341 gen_movl_T0_reg(s, rd);
1342 gen_op_stw_T0_T1();
1344 if (!(insn & (1 << 24))) {
1345 gen_add_datah_offset(s, insn);
1346 gen_movl_reg_T1(s, rn);
1347 } else if (insn & (1 << 21)) {
1348 gen_movl_reg_T1(s, rn);
1351 break;
1352 case 0x4:
1353 case 0x5:
1354 case 0x6:
1355 case 0x7:
1356 /* load/store byte/word */
1357 rn = (insn >> 16) & 0xf;
1358 rd = (insn >> 12) & 0xf;
1359 gen_movl_T1_reg(s, rn);
1360 if (insn & (1 << 24))
1361 gen_add_data_offset(s, insn);
1362 if (insn & (1 << 20)) {
1363 /* load */
1364 if (insn & (1 << 22))
1365 gen_op_ldub_T0_T1();
1366 else
1367 gen_op_ldl_T0_T1();
1368 if (rd == 15)
1369 gen_bx(s);
1370 else
1371 gen_movl_reg_T0(s, rd);
1372 } else {
1373 /* store */
1374 gen_movl_T0_reg(s, rd);
1375 if (insn & (1 << 22))
1376 gen_op_stb_T0_T1();
1377 else
1378 gen_op_stl_T0_T1();
1380 if (!(insn & (1 << 24))) {
1381 gen_add_data_offset(s, insn);
1382 gen_movl_reg_T1(s, rn);
1383 } else if (insn & (1 << 21))
1384 gen_movl_reg_T1(s, rn); {
1386 break;
1387 case 0x08:
1388 case 0x09:
1390 int j, n;
1391 /* load/store multiple words */
1392 /* XXX: store correct base if write back */
1393 if (insn & (1 << 22))
1394 goto illegal_op; /* only usable in supervisor mode */
1395 rn = (insn >> 16) & 0xf;
1396 gen_movl_T1_reg(s, rn);
1398 /* compute total size */
1399 n = 0;
1400 for(i=0;i<16;i++) {
1401 if (insn & (1 << i))
1402 n++;
1404 /* XXX: test invalid n == 0 case ? */
1405 if (insn & (1 << 23)) {
1406 if (insn & (1 << 24)) {
1407 /* pre increment */
1408 gen_op_addl_T1_im(4);
1409 } else {
1410 /* post increment */
1412 } else {
1413 if (insn & (1 << 24)) {
1414 /* pre decrement */
1415 gen_op_addl_T1_im(-(n * 4));
1416 } else {
1417 /* post decrement */
1418 if (n != 1)
1419 gen_op_addl_T1_im(-((n - 1) * 4));
1422 j = 0;
1423 for(i=0;i<16;i++) {
1424 if (insn & (1 << i)) {
1425 if (insn & (1 << 20)) {
1426 /* load */
1427 gen_op_ldl_T0_T1();
1428 if (i == 15)
1429 gen_bx(s);
1430 else
1431 gen_movl_reg_T0(s, i);
1432 } else {
1433 /* store */
1434 if (i == 15) {
1435 /* special case: r15 = PC + 12 */
1436 val = (long)s->pc + 8;
1437 gen_op_movl_TN_im[0](val);
1438 } else {
1439 gen_movl_T0_reg(s, i);
1441 gen_op_stl_T0_T1();
1443 j++;
1444 /* no need to add after the last transfer */
1445 if (j != n)
1446 gen_op_addl_T1_im(4);
1449 if (insn & (1 << 21)) {
1450 /* write back */
1451 if (insn & (1 << 23)) {
1452 if (insn & (1 << 24)) {
1453 /* pre increment */
1454 } else {
1455 /* post increment */
1456 gen_op_addl_T1_im(4);
1458 } else {
1459 if (insn & (1 << 24)) {
1460 /* pre decrement */
1461 if (n != 1)
1462 gen_op_addl_T1_im(-((n - 1) * 4));
1463 } else {
1464 /* post decrement */
1465 gen_op_addl_T1_im(-(n * 4));
1468 gen_movl_reg_T1(s, rn);
1471 break;
1472 case 0xa:
1473 case 0xb:
1475 int32_t offset;
1477 /* branch (and link) */
1478 val = (int32_t)s->pc;
1479 if (insn & (1 << 24)) {
1480 gen_op_movl_T0_im(val);
1481 gen_op_movl_reg_TN[0][14]();
1483 offset = (((int32_t)insn << 8) >> 8);
1484 val += (offset << 2) + 4;
1485 gen_jmp(s, val);
1487 break;
1488 case 0xc:
1489 case 0xd:
1490 case 0xe:
1491 /* Coprocessor. */
1492 op1 = (insn >> 8) & 0xf;
1493 switch (op1) {
1494 case 10:
1495 case 11:
1496 if (disas_vfp_insn (env, s, insn))
1497 goto illegal_op;
1498 break;
1499 default:
1500 /* unknown coprocessor. */
1501 goto illegal_op;
1503 break;
1504 case 0xf:
1505 /* swi */
1506 gen_op_movl_T0_im((long)s->pc);
1507 gen_op_movl_reg_TN[0][15]();
1508 gen_op_swi();
1509 s->is_jmp = DISAS_JUMP;
1510 break;
1511 default:
1512 illegal_op:
1513 gen_op_movl_T0_im((long)s->pc - 4);
1514 gen_op_movl_reg_TN[0][15]();
1515 gen_op_undef_insn();
1516 s->is_jmp = DISAS_JUMP;
1517 break;
1522 static void disas_thumb_insn(DisasContext *s)
1524 uint32_t val, insn, op, rm, rn, rd, shift, cond;
1525 int32_t offset;
1526 int i;
1528 insn = lduw(s->pc);
1529 s->pc += 2;
1531 switch (insn >> 12) {
1532 case 0: case 1:
1533 rd = insn & 7;
1534 op = (insn >> 11) & 3;
1535 if (op == 3) {
1536 /* add/subtract */
1537 rn = (insn >> 3) & 7;
1538 gen_movl_T0_reg(s, rn);
1539 if (insn & (1 << 10)) {
1540 /* immediate */
1541 gen_op_movl_T1_im((insn >> 6) & 7);
1542 } else {
1543 /* reg */
1544 rm = (insn >> 6) & 7;
1545 gen_movl_T1_reg(s, rm);
1547 if (insn & (1 << 9))
1548 gen_op_addl_T0_T1_cc();
1549 else
1550 gen_op_addl_T0_T1_cc();
1551 gen_movl_reg_T0(s, rd);
1552 } else {
1553 /* shift immediate */
1554 rm = (insn >> 3) & 7;
1555 shift = (insn >> 6) & 0x1f;
1556 gen_movl_T0_reg(s, rm);
1557 gen_shift_T0_im_thumb[op](shift);
1558 gen_movl_reg_T0(s, rd);
1560 break;
1561 case 2: case 3:
1562 /* arithmetic large immediate */
1563 op = (insn >> 11) & 3;
1564 rd = (insn >> 8) & 0x7;
1565 if (op == 0) {
1566 gen_op_movl_T0_im(insn & 0xff);
1567 } else {
1568 gen_movl_T0_reg(s, rd);
1569 gen_op_movl_T1_im(insn & 0xff);
1571 switch (op) {
1572 case 0: /* mov */
1573 gen_op_logic_T0_cc();
1574 break;
1575 case 1: /* cmp */
1576 gen_op_subl_T0_T1_cc();
1577 break;
1578 case 2: /* add */
1579 gen_op_addl_T0_T1_cc();
1580 break;
1581 case 3: /* sub */
1582 gen_op_subl_T0_T1_cc();
1583 break;
1585 if (op != 1)
1586 gen_movl_reg_T0(s, rd);
1587 break;
1588 case 4:
1589 if (insn & (1 << 11)) {
1590 rd = (insn >> 8) & 7;
1591 /* load pc-relative */
1592 val = (insn & 0xff) * 4;
1593 gen_op_movl_T1_im(val);
1594 gen_movl_T2_reg(s, 15);
1595 gen_op_addl_T1_T2();
1596 gen_op_ldl_T0_T1();
1597 gen_movl_reg_T0(s, rd);
1598 break;
1600 if (insn & (1 << 10)) {
1601 /* data processing extended or blx */
1602 rd = (insn & 7) | ((insn >> 4) & 8);
1603 rm = (insn >> 3) & 0xf;
1604 op = (insn >> 8) & 3;
1605 switch (op) {
1606 case 0: /* add */
1607 gen_movl_T0_reg(s, rd);
1608 gen_movl_T1_reg(s, rm);
1609 gen_op_addl_T0_T1();
1610 gen_movl_reg_T0(s, rd);
1611 break;
1612 case 1: /* cmp */
1613 gen_movl_T0_reg(s, rd);
1614 gen_movl_T1_reg(s, rm);
1615 gen_op_subl_T0_T1_cc();
1616 break;
1617 case 2: /* mov/cpy */
1618 gen_movl_T0_reg(s, rm);
1619 gen_movl_reg_T0(s, rd);
1620 break;
1621 case 3:/* branch [and link] exchange thumb register */
1622 if (insn & (1 << 7)) {
1623 val = (uint32_t)s->pc | 1;
1624 gen_op_movl_T1_im(val);
1625 gen_movl_reg_T1(s, 14);
1627 gen_movl_T0_reg(s, rm);
1628 gen_bx(s);
1629 break;
1631 break;
1634 /* data processing register */
1635 rd = insn & 7;
1636 rm = (insn >> 3) & 7;
1637 op = (insn >> 6) & 0xf;
1638 if (op == 2 || op == 3 || op == 4 || op == 7) {
1639 /* the shift/rotate ops want the operands backwards */
1640 val = rm;
1641 rm = rd;
1642 rd = val;
1643 val = 1;
1644 } else {
1645 val = 0;
1648 if (op == 9) /* neg */
1649 gen_op_movl_T0_im(0);
1650 else if (op != 0xf) /* mvn doesn't read its first operand */
1651 gen_movl_T0_reg(s, rd);
1653 gen_movl_T1_reg(s, rm);
1654 switch (insn >> 6) {
1655 case 0x0: /* and */
1656 gen_op_andl_T0_T1();
1657 gen_op_logic_T0_cc();
1658 break;
1659 case 0x1: /* eor */
1660 gen_op_xorl_T0_T1();
1661 gen_op_logic_T0_cc();
1662 break;
1663 case 0x2: /* lsl */
1664 gen_op_shll_T1_T0_cc();
1665 break;
1666 case 0x3: /* lsr */
1667 gen_op_shrl_T1_T0_cc();
1668 break;
1669 case 0x4: /* asr */
1670 gen_op_sarl_T1_T0_cc();
1671 break;
1672 case 0x5: /* adc */
1673 gen_op_adcl_T0_T1_cc();
1674 break;
1675 case 0x6: /* sbc */
1676 gen_op_sbcl_T0_T1_cc();
1677 break;
1678 case 0x7: /* ror */
1679 gen_op_rorl_T1_T0_cc();
1680 break;
1681 case 0x8: /* tst */
1682 gen_op_andl_T0_T1();
1683 gen_op_logic_T0_cc();
1684 rd = 16;
1685 case 0x9: /* neg */
1686 gen_op_rsbl_T0_T1_cc();
1687 break;
1688 case 0xa: /* cmp */
1689 gen_op_subl_T0_T1_cc();
1690 rd = 16;
1691 break;
1692 case 0xb: /* cmn */
1693 gen_op_addl_T0_T1_cc();
1694 rd = 16;
1695 break;
1696 case 0xc: /* orr */
1697 gen_op_orl_T0_T1();
1698 gen_op_logic_T0_cc();
1699 break;
1700 case 0xd: /* mul */
1701 gen_op_mull_T0_T1();
1702 gen_op_logic_T0_cc();
1703 break;
1704 case 0xe: /* bic */
1705 gen_op_bicl_T0_T1();
1706 gen_op_logic_T0_cc();
1707 break;
1708 case 0xf: /* mvn */
1709 gen_op_notl_T1();
1710 gen_op_logic_T1_cc();
1711 val = 1;
1712 break;
1714 if (rd != 16) {
1715 if (val)
1716 gen_movl_reg_T1(s, rd);
1717 else
1718 gen_movl_reg_T0(s, rd);
1720 break;
1722 case 5:
1723 /* load/store register offset. */
1724 rd = insn & 7;
1725 rn = (insn >> 3) & 7;
1726 rm = (insn >> 6) & 7;
1727 op = (insn >> 9) & 7;
1728 gen_movl_T1_reg(s, rn);
1729 gen_movl_T2_reg(s, rm);
1730 gen_op_addl_T1_T2();
1732 if (op < 3) /* store */
1733 gen_movl_T0_reg(s, rd);
1735 switch (op) {
1736 case 0: /* str */
1737 gen_op_stl_T0_T1();
1738 break;
1739 case 1: /* strh */
1740 gen_op_stw_T0_T1();
1741 break;
1742 case 2: /* strb */
1743 gen_op_stb_T0_T1();
1744 break;
1745 case 3: /* ldrsb */
1746 gen_op_ldsb_T0_T1();
1747 break;
1748 case 4: /* ldr */
1749 gen_op_ldl_T0_T1();
1750 break;
1751 case 5: /* ldrh */
1752 gen_op_ldsw_T0_T1();
1753 break;
1754 case 6: /* ldrb */
1755 gen_op_ldub_T0_T1();
1756 break;
1757 case 7: /* ldrsh */
1758 gen_op_ldsw_T0_T1();
1759 break;
1761 if (op >= 3) /* load */
1762 gen_movl_reg_T0(s, rd);
1763 break;
1765 case 6:
1766 /* load/store word immediate offset */
1767 rd = insn & 7;
1768 rn = (insn >> 3) & 7;
1769 gen_movl_T1_reg(s, rn);
1770 val = (insn >> 4) & 0x7c;
1771 gen_op_movl_T2_im(val);
1772 gen_op_addl_T1_T2();
1774 if (insn & (1 << 11)) {
1775 /* load */
1776 gen_op_ldl_T0_T1();
1777 gen_movl_reg_T0(s, rd);
1778 } else {
1779 /* store */
1780 gen_movl_T0_reg(s, rd);
1781 gen_op_stl_T0_T1();
1783 break;
1785 case 7:
1786 /* load/store byte immediate offset */
1787 rd = insn & 7;
1788 rn = (insn >> 3) & 7;
1789 gen_movl_T1_reg(s, rn);
1790 val = (insn >> 6) & 0x1f;
1791 gen_op_movl_T2_im(val);
1792 gen_op_addl_T1_T2();
1794 if (insn & (1 << 11)) {
1795 /* load */
1796 gen_op_ldub_T0_T1();
1797 gen_movl_reg_T0(s, rd);
1798 } else {
1799 /* store */
1800 gen_movl_T0_reg(s, rd);
1801 gen_op_stb_T0_T1();
1803 break;
1805 case 8:
1806 /* load/store halfword immediate offset */
1807 rd = insn & 7;
1808 rn = (insn >> 3) & 7;
1809 gen_movl_T1_reg(s, rn);
1810 val = (insn >> 5) & 0x3e;
1811 gen_op_movl_T2_im(val);
1812 gen_op_addl_T1_T2();
1814 if (insn & (1 << 11)) {
1815 /* load */
1816 gen_op_lduw_T0_T1();
1817 gen_movl_reg_T0(s, rd);
1818 } else {
1819 /* store */
1820 gen_movl_T0_reg(s, rd);
1821 gen_op_stw_T0_T1();
1823 break;
1825 case 9:
1826 /* load/store from stack */
1827 rd = (insn >> 8) & 7;
1828 gen_movl_T1_reg(s, 13);
1829 val = (insn & 0xff) * 4;
1830 gen_op_movl_T2_im(val);
1831 gen_op_addl_T1_T2();
1833 if (insn & (1 << 11)) {
1834 /* load */
1835 gen_op_ldl_T0_T1();
1836 gen_movl_reg_T0(s, rd);
1837 } else {
1838 /* store */
1839 gen_movl_T0_reg(s, rd);
1840 gen_op_stl_T0_T1();
1842 break;
1844 case 10:
1845 /* add to high reg */
1846 rd = (insn >> 8) & 7;
1847 if (insn & (1 << 11))
1848 rm = 13; /* sp */
1849 else
1850 rm = 15; /* pc */
1851 gen_movl_T0_reg(s, rm);
1852 val = (insn & 0xff) * 4;
1853 gen_op_movl_T1_im(val);
1854 gen_op_addl_T0_T1();
1855 gen_movl_reg_T0(s, rd);
1856 break;
1858 case 11:
1859 /* misc */
1860 op = (insn >> 8) & 0xf;
1861 switch (op) {
1862 case 0:
1863 /* adjust stack pointer */
1864 gen_movl_T1_reg(s, 13);
1865 val = (insn & 0x7f) * 4;
1866 if (insn & (1 << 7))
1867 val = -(int32_t)val;
1868 gen_op_movl_T2_im(val);
1869 gen_op_addl_T1_T2();
1870 gen_movl_reg_T1(s, 13);
1871 break;
1873 case 4: case 5: case 0xc: case 0xd:
1874 /* push/pop */
1875 gen_movl_T1_reg(s, 13);
1876 if (insn & (1 << 11))
1877 val = 4;
1878 else
1879 val = -4;
1880 gen_op_movl_T2_im(val);
1881 for (i = 0; i < 8; i++) {
1882 if (insn & (1 << i)) {
1883 if (insn & (1 << 11)) {
1884 /* pop */
1885 gen_op_ldl_T0_T1();
1886 gen_movl_reg_T0(s, i);
1887 } else {
1888 /* push */
1889 gen_movl_T0_reg(s, i);
1890 gen_op_stl_T0_T1();
1892 /* move to the next address */
1893 gen_op_addl_T1_T2();
1896 if (insn & (1 << 8)) {
1897 if (insn & (1 << 11)) {
1898 /* pop pc */
1899 gen_op_ldl_T0_T1();
1900 /* don't set the pc until the rest of the instruction
1901 has completed */
1902 } else {
1903 /* push lr */
1904 gen_movl_T0_reg(s, 14);
1905 gen_op_stl_T0_T1();
1907 gen_op_addl_T1_T2();
1910 /* write back the new stack pointer */
1911 gen_movl_reg_T1(s, 13);
1912 /* set the new PC value */
1913 if ((insn & 0x0900) == 0x0900)
1914 gen_bx(s);
1915 break;
1917 default:
1918 goto undef;
1920 break;
1922 case 12:
1923 /* load/store multiple */
1924 rn = (insn >> 8) & 0x7;
1925 gen_movl_T1_reg(s, rn);
1926 gen_op_movl_T2_im(4);
1927 val = 0;
1928 for (i = 0; i < 8; i++) {
1929 if (insn & (1 << i)) {
1930 /* advance to the next address */
1931 if (val)
1932 gen_op_addl_T1_T2();
1933 else
1934 val = 1;
1935 if (insn & (1 << 11)) {
1936 /* load */
1937 gen_op_ldl_T0_T1();
1938 gen_movl_reg_T0(s, i);
1939 } else {
1940 /* store */
1941 gen_movl_T0_reg(s, i);
1942 gen_op_stl_T0_T1();
1946 break;
1948 case 13:
1949 /* conditional branch or swi */
1950 cond = (insn >> 8) & 0xf;
1951 if (cond == 0xe)
1952 goto undef;
1954 if (cond == 0xf) {
1955 /* swi */
1956 gen_op_movl_T0_im((long)s->pc | 1);
1957 /* Don't set r15. */
1958 gen_op_movl_reg_TN[0][15]();
1959 gen_op_swi();
1960 s->is_jmp = DISAS_JUMP;
1961 break;
1963 /* generate a conditional jump to next instruction */
1964 gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1965 s->is_jmp = DISAS_JUMP_NEXT;
1966 gen_movl_T1_reg(s, 15);
1968 /* jump to the offset */
1969 val = (uint32_t)s->pc;
1970 offset = ((int32_t)insn << 24) >> 24;
1971 val += (offset << 1) + 2;
1972 gen_jmp(s, val);
1973 break;
1975 case 14:
1976 /* unconditional branch */
1977 if (insn & (1 << 11))
1978 goto undef; /* Second half of a blx */
1979 val = (uint32_t)s->pc;
1980 offset = ((int32_t)insn << 21) >> 21;
1981 val += (offset << 1) + 2;
1982 gen_jmp(s, val);
1983 break;
1985 case 15:
1986 /* branch and link [and switch to arm] */
1987 offset = ((int32_t)insn << 21) >> 10;
1988 insn = lduw(s->pc);
1989 offset |= insn & 0x7ff;
1991 val = (uint32_t)s->pc + 2;
1992 gen_op_movl_T1_im(val | 1);
1993 gen_movl_reg_T1(s, 14);
1995 val += offset;
1996 if (insn & (1 << 11)) {
1997 /* bl */
1998 gen_jmp(s, val);
1999 } else {
2000 /* blx */
2001 gen_op_movl_T0_im(val);
2002 gen_bx(s);
2005 return;
2006 undef:
2007 gen_op_movl_T0_im((long)s->pc - 4);
2008 gen_op_movl_reg_TN[0][15]();
2009 gen_op_undef_insn();
2010 s->is_jmp = DISAS_JUMP;
2013 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2014 basic block 'tb'. If search_pc is TRUE, also generate PC
2015 information for each intermediate instruction. */
2016 static inline int gen_intermediate_code_internal(CPUState *env,
2017 TranslationBlock *tb,
2018 int search_pc)
2020 DisasContext dc1, *dc = &dc1;
2021 uint16_t *gen_opc_end;
2022 int j, lj;
2023 target_ulong pc_start;
2025 /* generate intermediate code */
2026 pc_start = tb->pc;
2028 dc->tb = tb;
2030 gen_opc_ptr = gen_opc_buf;
2031 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2032 gen_opparam_ptr = gen_opparam_buf;
2034 dc->is_jmp = DISAS_NEXT;
2035 dc->pc = pc_start;
2036 dc->singlestep_enabled = env->singlestep_enabled;
2037 lj = -1;
2038 do {
2039 if (env->nb_breakpoints > 0) {
2040 for(j = 0; j < env->nb_breakpoints; j++) {
2041 if (env->breakpoints[j] == dc->pc) {
2042 gen_op_movl_T0_im((long)dc->pc);
2043 gen_op_movl_reg_TN[0][15]();
2044 gen_op_debug();
2045 dc->is_jmp = DISAS_JUMP;
2046 break;
2050 if (search_pc) {
2051 j = gen_opc_ptr - gen_opc_buf;
2052 if (lj < j) {
2053 lj++;
2054 while (lj < j)
2055 gen_opc_instr_start[lj++] = 0;
2057 gen_opc_pc[lj] = dc->pc;
2058 gen_opc_instr_start[lj] = 1;
2060 if (env->thumb)
2061 disas_thumb_insn(dc);
2062 else
2063 disas_arm_insn(env, dc);
2064 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2065 !env->singlestep_enabled &&
2066 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32));
2067 if (__builtin_expect(env->singlestep_enabled, 0)) {
2068 /* Make sure the pc is updated, and raise a debug exception. */
2069 if (dc->is_jmp == DISAS_NEXT || dc->is_jmp == DISAS_JUMP_NEXT) {
2070 gen_op_movl_T0_im((long)dc->pc);
2071 gen_op_movl_reg_TN[0][15]();
2073 gen_op_debug();
2074 } else {
2075 switch(dc->is_jmp) {
2076 case DISAS_JUMP_NEXT:
2077 case DISAS_NEXT:
2078 gen_op_jmp((long)dc->tb, (long)dc->pc);
2079 break;
2080 default:
2081 case DISAS_JUMP:
2082 case DISAS_UPDATE:
2083 /* indicate that the hash table must be used to find the next TB */
2084 gen_op_movl_T0_0();
2085 gen_op_exit_tb();
2086 break;
2087 case DISAS_TB_JUMP:
2088 /* nothing more to generate */
2089 break;
2092 *gen_opc_ptr = INDEX_op_end;
2094 #ifdef DEBUG_DISAS
2095 if (loglevel & CPU_LOG_TB_IN_ASM) {
2096 fprintf(logfile, "----------------\n");
2097 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
2098 target_disas(logfile, pc_start, dc->pc - pc_start, 0);
2099 fprintf(logfile, "\n");
2100 if (loglevel & (CPU_LOG_TB_OP)) {
2101 fprintf(logfile, "OP:\n");
2102 dump_ops(gen_opc_buf, gen_opparam_buf);
2103 fprintf(logfile, "\n");
2106 #endif
2107 if (!search_pc)
2108 tb->size = dc->pc - pc_start;
2109 return 0;
2112 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2114 return gen_intermediate_code_internal(env, tb, 0);
2117 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2119 return gen_intermediate_code_internal(env, tb, 1);
2122 CPUARMState *cpu_arm_init(void)
2124 CPUARMState *env;
2126 cpu_exec_init();
2128 env = malloc(sizeof(CPUARMState));
2129 if (!env)
2130 return NULL;
2131 memset(env, 0, sizeof(CPUARMState));
2132 cpu_single_env = env;
2133 return env;
2136 void cpu_arm_close(CPUARMState *env)
2138 free(env);
2141 void cpu_dump_state(CPUState *env, FILE *f,
2142 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
2143 int flags)
2145 int i;
2146 struct {
2147 uint32_t i;
2148 float s;
2149 } s0, s1;
2150 CPU_DoubleU d;
2152 for(i=0;i<16;i++) {
2153 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2154 if ((i % 4) == 3)
2155 cpu_fprintf(f, "\n");
2156 else
2157 cpu_fprintf(f, " ");
2159 cpu_fprintf(f, "PSR=%08x %c%c%c%c\n",
2160 env->cpsr,
2161 env->cpsr & (1 << 31) ? 'N' : '-',
2162 env->cpsr & (1 << 30) ? 'Z' : '-',
2163 env->cpsr & (1 << 29) ? 'C' : '-',
2164 env->cpsr & (1 << 28) ? 'V' : '-');
2166 for (i = 0; i < 16; i++) {
2167 d.d = env->vfp.regs[i];
2168 s0.i = d.l.lower;
2169 s1.i = d.l.upper;
2170 cpu_fprintf(f, "s%02d=%08x(%8f) s%02d=%08x(%8f) d%02d=%08x%08x(%8f)\n",
2171 i * 2, (int)s0.i, s0.s,
2172 i * 2 + 1, (int)s0.i, s0.s,
2173 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
2174 d.d);
2176 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.fpscr);
2179 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
2181 return addr;
2184 #if defined(CONFIG_USER_ONLY)
2186 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
2187 int is_user, int is_softmmu)
2189 env->cp15_6 = address;
2190 if (rw == 2) {
2191 env->exception_index = EXCP_PREFETCH_ABORT;
2192 } else {
2193 env->exception_index = EXCP_DATA_ABORT;
2195 return 1;
2198 #else
2200 #error not implemented
2202 #endif