Fix -nographic on Arm.
[qemu/mini2440.git] / target-arm / translate.c
blobd5cbc5ee1d10fc7b36c8278eff06940da2e1d54f
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
31 #define ENABLE_ARCH_5J 0
32 #define ENABLE_ARCH_6 1
33 #define ENABLE_ARCH_6T2 1
35 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
37 /* internal defines */
38 typedef struct DisasContext {
39 target_ulong pc;
40 int is_jmp;
41 /* Nonzero if this instruction has been conditionally skipped. */
42 int condjmp;
43 /* The label that will be jumped to when the instruction is skipped. */
44 int condlabel;
45 struct TranslationBlock *tb;
46 int singlestep_enabled;
47 int thumb;
48 #if !defined(CONFIG_USER_ONLY)
49 int user;
50 #endif
51 } DisasContext;
53 #if defined(CONFIG_USER_ONLY)
54 #define IS_USER(s) 1
55 #else
56 #define IS_USER(s) (s->user)
57 #endif
59 #define DISAS_JUMP_NEXT 4
61 #ifdef USE_DIRECT_JUMP
62 #define TBPARAM(x)
63 #else
64 #define TBPARAM(x) (long)(x)
65 #endif
67 /* XXX: move that elsewhere */
68 static uint16_t *gen_opc_ptr;
69 static uint32_t *gen_opparam_ptr;
70 extern FILE *logfile;
71 extern int loglevel;
73 enum {
74 #define DEF(s, n, copy_size) INDEX_op_ ## s,
75 #include "opc.h"
76 #undef DEF
77 NB_OPS,
80 #include "gen-op.h"
82 static GenOpFunc1 *gen_test_cc[14] = {
83 gen_op_test_eq,
84 gen_op_test_ne,
85 gen_op_test_cs,
86 gen_op_test_cc,
87 gen_op_test_mi,
88 gen_op_test_pl,
89 gen_op_test_vs,
90 gen_op_test_vc,
91 gen_op_test_hi,
92 gen_op_test_ls,
93 gen_op_test_ge,
94 gen_op_test_lt,
95 gen_op_test_gt,
96 gen_op_test_le,
99 const uint8_t table_logic_cc[16] = {
100 1, /* and */
101 1, /* xor */
102 0, /* sub */
103 0, /* rsb */
104 0, /* add */
105 0, /* adc */
106 0, /* sbc */
107 0, /* rsc */
108 1, /* andl */
109 1, /* xorl */
110 0, /* cmp */
111 0, /* cmn */
112 1, /* orr */
113 1, /* mov */
114 1, /* bic */
115 1, /* mvn */
118 static GenOpFunc1 *gen_shift_T1_im[4] = {
119 gen_op_shll_T1_im,
120 gen_op_shrl_T1_im,
121 gen_op_sarl_T1_im,
122 gen_op_rorl_T1_im,
125 static GenOpFunc *gen_shift_T1_0[4] = {
126 NULL,
127 gen_op_shrl_T1_0,
128 gen_op_sarl_T1_0,
129 gen_op_rrxl_T1,
132 static GenOpFunc1 *gen_shift_T2_im[4] = {
133 gen_op_shll_T2_im,
134 gen_op_shrl_T2_im,
135 gen_op_sarl_T2_im,
136 gen_op_rorl_T2_im,
139 static GenOpFunc *gen_shift_T2_0[4] = {
140 NULL,
141 gen_op_shrl_T2_0,
142 gen_op_sarl_T2_0,
143 gen_op_rrxl_T2,
146 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
147 gen_op_shll_T1_im_cc,
148 gen_op_shrl_T1_im_cc,
149 gen_op_sarl_T1_im_cc,
150 gen_op_rorl_T1_im_cc,
153 static GenOpFunc *gen_shift_T1_0_cc[4] = {
154 NULL,
155 gen_op_shrl_T1_0_cc,
156 gen_op_sarl_T1_0_cc,
157 gen_op_rrxl_T1_cc,
160 static GenOpFunc *gen_shift_T1_T0[4] = {
161 gen_op_shll_T1_T0,
162 gen_op_shrl_T1_T0,
163 gen_op_sarl_T1_T0,
164 gen_op_rorl_T1_T0,
167 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
168 gen_op_shll_T1_T0_cc,
169 gen_op_shrl_T1_T0_cc,
170 gen_op_sarl_T1_T0_cc,
171 gen_op_rorl_T1_T0_cc,
174 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
176 gen_op_movl_T0_r0,
177 gen_op_movl_T0_r1,
178 gen_op_movl_T0_r2,
179 gen_op_movl_T0_r3,
180 gen_op_movl_T0_r4,
181 gen_op_movl_T0_r5,
182 gen_op_movl_T0_r6,
183 gen_op_movl_T0_r7,
184 gen_op_movl_T0_r8,
185 gen_op_movl_T0_r9,
186 gen_op_movl_T0_r10,
187 gen_op_movl_T0_r11,
188 gen_op_movl_T0_r12,
189 gen_op_movl_T0_r13,
190 gen_op_movl_T0_r14,
191 gen_op_movl_T0_r15,
194 gen_op_movl_T1_r0,
195 gen_op_movl_T1_r1,
196 gen_op_movl_T1_r2,
197 gen_op_movl_T1_r3,
198 gen_op_movl_T1_r4,
199 gen_op_movl_T1_r5,
200 gen_op_movl_T1_r6,
201 gen_op_movl_T1_r7,
202 gen_op_movl_T1_r8,
203 gen_op_movl_T1_r9,
204 gen_op_movl_T1_r10,
205 gen_op_movl_T1_r11,
206 gen_op_movl_T1_r12,
207 gen_op_movl_T1_r13,
208 gen_op_movl_T1_r14,
209 gen_op_movl_T1_r15,
212 gen_op_movl_T2_r0,
213 gen_op_movl_T2_r1,
214 gen_op_movl_T2_r2,
215 gen_op_movl_T2_r3,
216 gen_op_movl_T2_r4,
217 gen_op_movl_T2_r5,
218 gen_op_movl_T2_r6,
219 gen_op_movl_T2_r7,
220 gen_op_movl_T2_r8,
221 gen_op_movl_T2_r9,
222 gen_op_movl_T2_r10,
223 gen_op_movl_T2_r11,
224 gen_op_movl_T2_r12,
225 gen_op_movl_T2_r13,
226 gen_op_movl_T2_r14,
227 gen_op_movl_T2_r15,
231 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
233 gen_op_movl_r0_T0,
234 gen_op_movl_r1_T0,
235 gen_op_movl_r2_T0,
236 gen_op_movl_r3_T0,
237 gen_op_movl_r4_T0,
238 gen_op_movl_r5_T0,
239 gen_op_movl_r6_T0,
240 gen_op_movl_r7_T0,
241 gen_op_movl_r8_T0,
242 gen_op_movl_r9_T0,
243 gen_op_movl_r10_T0,
244 gen_op_movl_r11_T0,
245 gen_op_movl_r12_T0,
246 gen_op_movl_r13_T0,
247 gen_op_movl_r14_T0,
248 gen_op_movl_r15_T0,
251 gen_op_movl_r0_T1,
252 gen_op_movl_r1_T1,
253 gen_op_movl_r2_T1,
254 gen_op_movl_r3_T1,
255 gen_op_movl_r4_T1,
256 gen_op_movl_r5_T1,
257 gen_op_movl_r6_T1,
258 gen_op_movl_r7_T1,
259 gen_op_movl_r8_T1,
260 gen_op_movl_r9_T1,
261 gen_op_movl_r10_T1,
262 gen_op_movl_r11_T1,
263 gen_op_movl_r12_T1,
264 gen_op_movl_r13_T1,
265 gen_op_movl_r14_T1,
266 gen_op_movl_r15_T1,
270 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
271 gen_op_movl_T0_im,
272 gen_op_movl_T1_im,
273 gen_op_movl_T2_im,
276 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
277 gen_op_shll_T0_im_thumb,
278 gen_op_shrl_T0_im_thumb,
279 gen_op_sarl_T0_im_thumb,
282 static inline void gen_bx(DisasContext *s)
284 s->is_jmp = DISAS_UPDATE;
285 gen_op_bx_T0();
289 #if defined(CONFIG_USER_ONLY)
290 #define gen_ldst(name, s) gen_op_##name##_raw()
291 #else
292 #define gen_ldst(name, s) do { \
293 if (IS_USER(s)) \
294 gen_op_##name##_user(); \
295 else \
296 gen_op_##name##_kernel(); \
297 } while (0)
298 #endif
300 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
302 int val;
304 if (reg == 15) {
305 /* normaly, since we updated PC, we need only to add one insn */
306 if (s->thumb)
307 val = (long)s->pc + 2;
308 else
309 val = (long)s->pc + 4;
310 gen_op_movl_TN_im[t](val);
311 } else {
312 gen_op_movl_TN_reg[t][reg]();
316 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
318 gen_movl_TN_reg(s, reg, 0);
321 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
323 gen_movl_TN_reg(s, reg, 1);
326 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
328 gen_movl_TN_reg(s, reg, 2);
331 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
333 gen_op_movl_reg_TN[t][reg]();
334 if (reg == 15) {
335 s->is_jmp = DISAS_JUMP;
339 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
341 gen_movl_reg_TN(s, reg, 0);
344 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
346 gen_movl_reg_TN(s, reg, 1);
349 /* Force a TB lookup after an instruction that changes the CPU state. */
350 static inline void gen_lookup_tb(DisasContext *s)
352 gen_op_movl_T0_im(s->pc);
353 gen_movl_reg_T0(s, 15);
354 s->is_jmp = DISAS_UPDATE;
357 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
359 int val, rm, shift, shiftop;
361 if (!(insn & (1 << 25))) {
362 /* immediate */
363 val = insn & 0xfff;
364 if (!(insn & (1 << 23)))
365 val = -val;
366 if (val != 0)
367 gen_op_addl_T1_im(val);
368 } else {
369 /* shift/register */
370 rm = (insn) & 0xf;
371 shift = (insn >> 7) & 0x1f;
372 gen_movl_T2_reg(s, rm);
373 shiftop = (insn >> 5) & 3;
374 if (shift != 0) {
375 gen_shift_T2_im[shiftop](shift);
376 } else if (shiftop != 0) {
377 gen_shift_T2_0[shiftop]();
379 if (!(insn & (1 << 23)))
380 gen_op_subl_T1_T2();
381 else
382 gen_op_addl_T1_T2();
386 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn)
388 int val, rm;
390 if (insn & (1 << 22)) {
391 /* immediate */
392 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
393 if (!(insn & (1 << 23)))
394 val = -val;
395 if (val != 0)
396 gen_op_addl_T1_im(val);
397 } else {
398 /* register */
399 rm = (insn) & 0xf;
400 gen_movl_T2_reg(s, rm);
401 if (!(insn & (1 << 23)))
402 gen_op_subl_T1_T2();
403 else
404 gen_op_addl_T1_T2();
408 #define VFP_OP(name) \
409 static inline void gen_vfp_##name(int dp) \
411 if (dp) \
412 gen_op_vfp_##name##d(); \
413 else \
414 gen_op_vfp_##name##s(); \
417 VFP_OP(add)
418 VFP_OP(sub)
419 VFP_OP(mul)
420 VFP_OP(div)
421 VFP_OP(neg)
422 VFP_OP(abs)
423 VFP_OP(sqrt)
424 VFP_OP(cmp)
425 VFP_OP(cmpe)
426 VFP_OP(F1_ld0)
427 VFP_OP(uito)
428 VFP_OP(sito)
429 VFP_OP(toui)
430 VFP_OP(touiz)
431 VFP_OP(tosi)
432 VFP_OP(tosiz)
434 #undef VFP_OP
436 static inline void gen_vfp_ld(DisasContext *s, int dp)
438 if (dp)
439 gen_ldst(vfp_ldd, s);
440 else
441 gen_ldst(vfp_lds, s);
444 static inline void gen_vfp_st(DisasContext *s, int dp)
446 if (dp)
447 gen_ldst(vfp_std, s);
448 else
449 gen_ldst(vfp_sts, s);
452 static inline long
453 vfp_reg_offset (int dp, int reg)
455 if (dp)
456 return offsetof(CPUARMState, vfp.regs[reg]);
457 else if (reg & 1) {
458 return offsetof(CPUARMState, vfp.regs[reg >> 1])
459 + offsetof(CPU_DoubleU, l.upper);
460 } else {
461 return offsetof(CPUARMState, vfp.regs[reg >> 1])
462 + offsetof(CPU_DoubleU, l.lower);
465 static inline void gen_mov_F0_vreg(int dp, int reg)
467 if (dp)
468 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
469 else
470 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
473 static inline void gen_mov_F1_vreg(int dp, int reg)
475 if (dp)
476 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
477 else
478 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
481 static inline void gen_mov_vreg_F0(int dp, int reg)
483 if (dp)
484 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
485 else
486 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
489 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
490 instruction is not defined. */
491 static int disas_cp15_insn(DisasContext *s, uint32_t insn)
493 uint32_t rd;
495 /* ??? Some cp15 registers are accessible from userspace. */
496 if (IS_USER(s)) {
497 return 1;
499 if ((insn & 0x0fff0fff) == 0x0e070f90
500 || (insn & 0x0fff0fff) == 0x0e070f58) {
501 /* Wait for interrupt. */
502 gen_op_movl_T0_im((long)s->pc);
503 gen_op_movl_reg_TN[0][15]();
504 gen_op_wfi();
505 s->is_jmp = DISAS_JUMP;
506 return 0;
508 rd = (insn >> 12) & 0xf;
509 if (insn & (1 << 20)) {
510 gen_op_movl_T0_cp15(insn);
511 /* If the destination register is r15 then sets condition codes. */
512 if (rd != 15)
513 gen_movl_reg_T0(s, rd);
514 } else {
515 gen_movl_T0_reg(s, rd);
516 gen_op_movl_cp15_T0(insn);
518 gen_lookup_tb(s);
519 return 0;
522 /* Disassemble a VFP instruction. Returns nonzero if an error occured
523 (ie. an undefined instruction). */
524 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
526 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
527 int dp, veclen;
529 dp = ((insn & 0xf00) == 0xb00);
530 switch ((insn >> 24) & 0xf) {
531 case 0xe:
532 if (insn & (1 << 4)) {
533 /* single register transfer */
534 if ((insn & 0x6f) != 0x00)
535 return 1;
536 rd = (insn >> 12) & 0xf;
537 if (dp) {
538 if (insn & 0x80)
539 return 1;
540 rn = (insn >> 16) & 0xf;
541 /* Get the existing value even for arm->vfp moves because
542 we only set half the register. */
543 gen_mov_F0_vreg(1, rn);
544 gen_op_vfp_mrrd();
545 if (insn & (1 << 20)) {
546 /* vfp->arm */
547 if (insn & (1 << 21))
548 gen_movl_reg_T1(s, rd);
549 else
550 gen_movl_reg_T0(s, rd);
551 } else {
552 /* arm->vfp */
553 if (insn & (1 << 21))
554 gen_movl_T1_reg(s, rd);
555 else
556 gen_movl_T0_reg(s, rd);
557 gen_op_vfp_mdrr();
558 gen_mov_vreg_F0(dp, rn);
560 } else {
561 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
562 if (insn & (1 << 20)) {
563 /* vfp->arm */
564 if (insn & (1 << 21)) {
565 /* system register */
566 switch (rn) {
567 case 0: /* fpsid */
568 n = 0x0091A0000;
569 break;
570 case 2: /* fpscr */
571 if (rd == 15)
572 gen_op_vfp_movl_T0_fpscr_flags();
573 else
574 gen_op_vfp_movl_T0_fpscr();
575 break;
576 default:
577 return 1;
579 } else {
580 gen_mov_F0_vreg(0, rn);
581 gen_op_vfp_mrs();
583 if (rd == 15) {
584 /* Set the 4 flag bits in the CPSR. */
585 gen_op_movl_cpsr_T0(0xf0000000);
586 } else
587 gen_movl_reg_T0(s, rd);
588 } else {
589 /* arm->vfp */
590 gen_movl_T0_reg(s, rd);
591 if (insn & (1 << 21)) {
592 /* system register */
593 switch (rn) {
594 case 0: /* fpsid */
595 /* Writes are ignored. */
596 break;
597 case 2: /* fpscr */
598 gen_op_vfp_movl_fpscr_T0();
599 /* This could change vector settings, so jump to
600 the next instuction. */
601 gen_lookup_tb(s);
602 break;
603 default:
604 return 1;
606 } else {
607 gen_op_vfp_msr();
608 gen_mov_vreg_F0(0, rn);
612 } else {
613 /* data processing */
614 /* The opcode is in bits 23, 21, 20 and 6. */
615 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
616 if (dp) {
617 if (op == 15) {
618 /* rn is opcode */
619 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
620 } else {
621 /* rn is register number */
622 if (insn & (1 << 7))
623 return 1;
624 rn = (insn >> 16) & 0xf;
627 if (op == 15 && (rn == 15 || rn > 17)) {
628 /* Integer or single precision destination. */
629 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
630 } else {
631 if (insn & (1 << 22))
632 return 1;
633 rd = (insn >> 12) & 0xf;
636 if (op == 15 && (rn == 16 || rn == 17)) {
637 /* Integer source. */
638 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
639 } else {
640 if (insn & (1 << 5))
641 return 1;
642 rm = insn & 0xf;
644 } else {
645 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
646 if (op == 15 && rn == 15) {
647 /* Double precision destination. */
648 if (insn & (1 << 22))
649 return 1;
650 rd = (insn >> 12) & 0xf;
651 } else
652 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
653 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
656 veclen = env->vfp.vec_len;
657 if (op == 15 && rn > 3)
658 veclen = 0;
660 /* Shut up compiler warnings. */
661 delta_m = 0;
662 delta_d = 0;
663 bank_mask = 0;
665 if (veclen > 0) {
666 if (dp)
667 bank_mask = 0xc;
668 else
669 bank_mask = 0x18;
671 /* Figure out what type of vector operation this is. */
672 if ((rd & bank_mask) == 0) {
673 /* scalar */
674 veclen = 0;
675 } else {
676 if (dp)
677 delta_d = (env->vfp.vec_stride >> 1) + 1;
678 else
679 delta_d = env->vfp.vec_stride + 1;
681 if ((rm & bank_mask) == 0) {
682 /* mixed scalar/vector */
683 delta_m = 0;
684 } else {
685 /* vector */
686 delta_m = delta_d;
691 /* Load the initial operands. */
692 if (op == 15) {
693 switch (rn) {
694 case 16:
695 case 17:
696 /* Integer source */
697 gen_mov_F0_vreg(0, rm);
698 break;
699 case 8:
700 case 9:
701 /* Compare */
702 gen_mov_F0_vreg(dp, rd);
703 gen_mov_F1_vreg(dp, rm);
704 break;
705 case 10:
706 case 11:
707 /* Compare with zero */
708 gen_mov_F0_vreg(dp, rd);
709 gen_vfp_F1_ld0(dp);
710 break;
711 default:
712 /* One source operand. */
713 gen_mov_F0_vreg(dp, rm);
715 } else {
716 /* Two source operands. */
717 gen_mov_F0_vreg(dp, rn);
718 gen_mov_F1_vreg(dp, rm);
721 for (;;) {
722 /* Perform the calculation. */
723 switch (op) {
724 case 0: /* mac: fd + (fn * fm) */
725 gen_vfp_mul(dp);
726 gen_mov_F1_vreg(dp, rd);
727 gen_vfp_add(dp);
728 break;
729 case 1: /* nmac: fd - (fn * fm) */
730 gen_vfp_mul(dp);
731 gen_vfp_neg(dp);
732 gen_mov_F1_vreg(dp, rd);
733 gen_vfp_add(dp);
734 break;
735 case 2: /* msc: -fd + (fn * fm) */
736 gen_vfp_mul(dp);
737 gen_mov_F1_vreg(dp, rd);
738 gen_vfp_sub(dp);
739 break;
740 case 3: /* nmsc: -fd - (fn * fm) */
741 gen_vfp_mul(dp);
742 gen_mov_F1_vreg(dp, rd);
743 gen_vfp_add(dp);
744 gen_vfp_neg(dp);
745 break;
746 case 4: /* mul: fn * fm */
747 gen_vfp_mul(dp);
748 break;
749 case 5: /* nmul: -(fn * fm) */
750 gen_vfp_mul(dp);
751 gen_vfp_neg(dp);
752 break;
753 case 6: /* add: fn + fm */
754 gen_vfp_add(dp);
755 break;
756 case 7: /* sub: fn - fm */
757 gen_vfp_sub(dp);
758 break;
759 case 8: /* div: fn / fm */
760 gen_vfp_div(dp);
761 break;
762 case 15: /* extension space */
763 switch (rn) {
764 case 0: /* cpy */
765 /* no-op */
766 break;
767 case 1: /* abs */
768 gen_vfp_abs(dp);
769 break;
770 case 2: /* neg */
771 gen_vfp_neg(dp);
772 break;
773 case 3: /* sqrt */
774 gen_vfp_sqrt(dp);
775 break;
776 case 8: /* cmp */
777 gen_vfp_cmp(dp);
778 break;
779 case 9: /* cmpe */
780 gen_vfp_cmpe(dp);
781 break;
782 case 10: /* cmpz */
783 gen_vfp_cmp(dp);
784 break;
785 case 11: /* cmpez */
786 gen_vfp_F1_ld0(dp);
787 gen_vfp_cmpe(dp);
788 break;
789 case 15: /* single<->double conversion */
790 if (dp)
791 gen_op_vfp_fcvtsd();
792 else
793 gen_op_vfp_fcvtds();
794 break;
795 case 16: /* fuito */
796 gen_vfp_uito(dp);
797 break;
798 case 17: /* fsito */
799 gen_vfp_sito(dp);
800 break;
801 case 24: /* ftoui */
802 gen_vfp_toui(dp);
803 break;
804 case 25: /* ftouiz */
805 gen_vfp_touiz(dp);
806 break;
807 case 26: /* ftosi */
808 gen_vfp_tosi(dp);
809 break;
810 case 27: /* ftosiz */
811 gen_vfp_tosiz(dp);
812 break;
813 default: /* undefined */
814 printf ("rn:%d\n", rn);
815 return 1;
817 break;
818 default: /* undefined */
819 printf ("op:%d\n", op);
820 return 1;
823 /* Write back the result. */
824 if (op == 15 && (rn >= 8 && rn <= 11))
825 ; /* Comparison, do nothing. */
826 else if (op == 15 && rn > 17)
827 /* Integer result. */
828 gen_mov_vreg_F0(0, rd);
829 else if (op == 15 && rn == 15)
830 /* conversion */
831 gen_mov_vreg_F0(!dp, rd);
832 else
833 gen_mov_vreg_F0(dp, rd);
835 /* break out of the loop if we have finished */
836 if (veclen == 0)
837 break;
839 if (op == 15 && delta_m == 0) {
840 /* single source one-many */
841 while (veclen--) {
842 rd = ((rd + delta_d) & (bank_mask - 1))
843 | (rd & bank_mask);
844 gen_mov_vreg_F0(dp, rd);
846 break;
848 /* Setup the next operands. */
849 veclen--;
850 rd = ((rd + delta_d) & (bank_mask - 1))
851 | (rd & bank_mask);
853 if (op == 15) {
854 /* One source operand. */
855 rm = ((rm + delta_m) & (bank_mask - 1))
856 | (rm & bank_mask);
857 gen_mov_F0_vreg(dp, rm);
858 } else {
859 /* Two source operands. */
860 rn = ((rn + delta_d) & (bank_mask - 1))
861 | (rn & bank_mask);
862 gen_mov_F0_vreg(dp, rn);
863 if (delta_m) {
864 rm = ((rm + delta_m) & (bank_mask - 1))
865 | (rm & bank_mask);
866 gen_mov_F1_vreg(dp, rm);
871 break;
872 case 0xc:
873 case 0xd:
874 if (dp && (insn & (1 << 22))) {
875 /* two-register transfer */
876 rn = (insn >> 16) & 0xf;
877 rd = (insn >> 12) & 0xf;
878 if (dp) {
879 if (insn & (1 << 5))
880 return 1;
881 rm = insn & 0xf;
882 } else
883 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
885 if (insn & (1 << 20)) {
886 /* vfp->arm */
887 if (dp) {
888 gen_mov_F0_vreg(1, rm);
889 gen_op_vfp_mrrd();
890 gen_movl_reg_T0(s, rd);
891 gen_movl_reg_T1(s, rn);
892 } else {
893 gen_mov_F0_vreg(0, rm);
894 gen_op_vfp_mrs();
895 gen_movl_reg_T0(s, rn);
896 gen_mov_F0_vreg(0, rm + 1);
897 gen_op_vfp_mrs();
898 gen_movl_reg_T0(s, rd);
900 } else {
901 /* arm->vfp */
902 if (dp) {
903 gen_movl_T0_reg(s, rd);
904 gen_movl_T1_reg(s, rn);
905 gen_op_vfp_mdrr();
906 gen_mov_vreg_F0(1, rm);
907 } else {
908 gen_movl_T0_reg(s, rn);
909 gen_op_vfp_msr();
910 gen_mov_vreg_F0(0, rm);
911 gen_movl_T0_reg(s, rd);
912 gen_op_vfp_msr();
913 gen_mov_vreg_F0(0, rm + 1);
916 } else {
917 /* Load/store */
918 rn = (insn >> 16) & 0xf;
919 if (dp)
920 rd = (insn >> 12) & 0xf;
921 else
922 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
923 gen_movl_T1_reg(s, rn);
924 if ((insn & 0x01200000) == 0x01000000) {
925 /* Single load/store */
926 offset = (insn & 0xff) << 2;
927 if ((insn & (1 << 23)) == 0)
928 offset = -offset;
929 gen_op_addl_T1_im(offset);
930 if (insn & (1 << 20)) {
931 gen_vfp_ld(s, dp);
932 gen_mov_vreg_F0(dp, rd);
933 } else {
934 gen_mov_F0_vreg(dp, rd);
935 gen_vfp_st(s, dp);
937 } else {
938 /* load/store multiple */
939 if (dp)
940 n = (insn >> 1) & 0x7f;
941 else
942 n = insn & 0xff;
944 if (insn & (1 << 24)) /* pre-decrement */
945 gen_op_addl_T1_im(-((insn & 0xff) << 2));
947 if (dp)
948 offset = 8;
949 else
950 offset = 4;
951 for (i = 0; i < n; i++) {
952 if (insn & (1 << 20)) {
953 /* load */
954 gen_vfp_ld(s, dp);
955 gen_mov_vreg_F0(dp, rd + i);
956 } else {
957 /* store */
958 gen_mov_F0_vreg(dp, rd + i);
959 gen_vfp_st(s, dp);
961 gen_op_addl_T1_im(offset);
963 if (insn & (1 << 21)) {
964 /* writeback */
965 if (insn & (1 << 24))
966 offset = -offset * n;
967 else if (dp && (insn & 1))
968 offset = 4;
969 else
970 offset = 0;
972 if (offset != 0)
973 gen_op_addl_T1_im(offset);
974 gen_movl_reg_T1(s, rn);
978 break;
979 default:
980 /* Should never happen. */
981 return 1;
983 return 0;
986 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
988 TranslationBlock *tb;
990 tb = s->tb;
991 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
992 if (n == 0)
993 gen_op_goto_tb0(TBPARAM(tb));
994 else
995 gen_op_goto_tb1(TBPARAM(tb));
996 gen_op_movl_T0_im(dest);
997 gen_op_movl_r15_T0();
998 gen_op_movl_T0_im((long)tb + n);
999 gen_op_exit_tb();
1000 } else {
1001 gen_op_movl_T0_im(dest);
1002 gen_op_movl_r15_T0();
1003 gen_op_movl_T0_0();
1004 gen_op_exit_tb();
1008 static inline void gen_jmp (DisasContext *s, uint32_t dest)
1010 if (__builtin_expect(s->singlestep_enabled, 0)) {
1011 /* An indirect jump so that we still trigger the debug exception. */
1012 if (s->thumb)
1013 dest |= 1;
1014 gen_op_movl_T0_im(dest);
1015 gen_bx(s);
1016 } else {
1017 gen_goto_tb(s, 0, dest);
1018 s->is_jmp = DISAS_TB_JUMP;
1022 static inline void gen_mulxy(int x, int y)
1024 if (x)
1025 gen_op_sarl_T0_im(16);
1026 else
1027 gen_op_sxth_T0();
1028 if (y)
1029 gen_op_sarl_T1_im(16);
1030 else
1031 gen_op_sxth_T1();
1032 gen_op_mul_T0_T1();
1035 /* Return the mask of PSR bits set by a MSR instruction. */
1036 static uint32_t msr_mask(DisasContext *s, int flags) {
1037 uint32_t mask;
1039 mask = 0;
1040 if (flags & (1 << 0))
1041 mask |= 0xff;
1042 if (flags & (1 << 1))
1043 mask |= 0xff00;
1044 if (flags & (1 << 2))
1045 mask |= 0xff0000;
1046 if (flags & (1 << 3))
1047 mask |= 0xff000000;
1048 /* Mask out undefined bits and state bits. */
1049 mask &= 0xf89f03df;
1050 /* Mask out privileged bits. */
1051 if (IS_USER(s))
1052 mask &= 0xf80f0200;
1053 return mask;
1056 /* Returns nonzero if access to the PSR is not permitted. */
1057 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
1059 if (spsr) {
1060 /* ??? This is also undefined in system mode. */
1061 if (IS_USER(s))
1062 return 1;
1063 gen_op_movl_spsr_T0(mask);
1064 } else {
1065 gen_op_movl_cpsr_T0(mask);
1067 gen_lookup_tb(s);
1068 return 0;
1071 static void gen_exception_return(DisasContext *s)
1073 gen_op_movl_reg_TN[0][15]();
1074 gen_op_movl_T0_spsr();
1075 gen_op_movl_cpsr_T0(0xffffffff);
1076 s->is_jmp = DISAS_UPDATE;
1079 static void disas_arm_insn(CPUState * env, DisasContext *s)
1081 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
1083 insn = ldl_code(s->pc);
1084 s->pc += 4;
1086 cond = insn >> 28;
1087 if (cond == 0xf){
1088 /* Unconditional instructions. */
1089 if ((insn & 0x0d70f000) == 0x0550f000)
1090 return; /* PLD */
1091 else if ((insn & 0x0e000000) == 0x0a000000) {
1092 /* branch link and change to thumb (blx <offset>) */
1093 int32_t offset;
1095 val = (uint32_t)s->pc;
1096 gen_op_movl_T0_im(val);
1097 gen_movl_reg_T0(s, 14);
1098 /* Sign-extend the 24-bit offset */
1099 offset = (((int32_t)insn) << 8) >> 8;
1100 /* offset * 4 + bit24 * 2 + (thumb bit) */
1101 val += (offset << 2) | ((insn >> 23) & 2) | 1;
1102 /* pipeline offset */
1103 val += 4;
1104 gen_op_movl_T0_im(val);
1105 gen_bx(s);
1106 return;
1107 } else if ((insn & 0x0fe00000) == 0x0c400000) {
1108 /* Coprocessor double register transfer. */
1109 } else if ((insn & 0x0f000010) == 0x0e000010) {
1110 /* Additional coprocessor register transfer. */
1111 } else if ((insn & 0x0ff10010) == 0x01000000) {
1112 /* cps (privileged) */
1113 } else if ((insn & 0x0ffffdff) == 0x01010000) {
1114 /* setend */
1115 if (insn & (1 << 9)) {
1116 /* BE8 mode not implemented. */
1117 goto illegal_op;
1119 return;
1121 goto illegal_op;
1123 if (cond != 0xe) {
1124 /* if not always execute, we generate a conditional jump to
1125 next instruction */
1126 s->condlabel = gen_new_label();
1127 gen_test_cc[cond ^ 1](s->condlabel);
1128 s->condjmp = 1;
1129 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1130 //s->is_jmp = DISAS_JUMP_NEXT;
1132 if ((insn & 0x0f900000) == 0x03000000) {
1133 if ((insn & 0x0fb0f000) != 0x0320f000)
1134 goto illegal_op;
1135 /* CPSR = immediate */
1136 val = insn & 0xff;
1137 shift = ((insn >> 8) & 0xf) * 2;
1138 if (shift)
1139 val = (val >> shift) | (val << (32 - shift));
1140 gen_op_movl_T0_im(val);
1141 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf),
1142 (insn & (1 << 22)) != 0))
1143 goto illegal_op;
1144 } else if ((insn & 0x0f900000) == 0x01000000
1145 && (insn & 0x00000090) != 0x00000090) {
1146 /* miscellaneous instructions */
1147 op1 = (insn >> 21) & 3;
1148 sh = (insn >> 4) & 0xf;
1149 rm = insn & 0xf;
1150 switch (sh) {
1151 case 0x0: /* move program status register */
1152 if (op1 & 1) {
1153 /* PSR = reg */
1154 gen_movl_T0_reg(s, rm);
1155 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf),
1156 (op1 & 2) != 0))
1157 goto illegal_op;
1158 } else {
1159 /* reg = CPSR */
1160 rd = (insn >> 12) & 0xf;
1161 if (op1 & 2) {
1162 if (IS_USER(s))
1163 goto illegal_op;
1164 gen_op_movl_T0_spsr();
1165 } else {
1166 gen_op_movl_T0_cpsr();
1168 gen_movl_reg_T0(s, rd);
1170 break;
1171 case 0x1:
1172 if (op1 == 1) {
1173 /* branch/exchange thumb (bx). */
1174 gen_movl_T0_reg(s, rm);
1175 gen_bx(s);
1176 } else if (op1 == 3) {
1177 /* clz */
1178 rd = (insn >> 12) & 0xf;
1179 gen_movl_T0_reg(s, rm);
1180 gen_op_clz_T0();
1181 gen_movl_reg_T0(s, rd);
1182 } else {
1183 goto illegal_op;
1185 break;
1186 case 0x2:
1187 if (op1 == 1) {
1188 ARCH(5J); /* bxj */
1189 /* Trivial implementation equivalent to bx. */
1190 gen_movl_T0_reg(s, rm);
1191 gen_bx(s);
1192 } else {
1193 goto illegal_op;
1195 break;
1196 case 0x3:
1197 if (op1 != 1)
1198 goto illegal_op;
1200 /* branch link/exchange thumb (blx) */
1201 val = (uint32_t)s->pc;
1202 gen_op_movl_T0_im(val);
1203 gen_movl_reg_T0(s, 14);
1204 gen_movl_T0_reg(s, rm);
1205 gen_bx(s);
1206 break;
1207 case 0x5: /* saturating add/subtract */
1208 rd = (insn >> 12) & 0xf;
1209 rn = (insn >> 16) & 0xf;
1210 gen_movl_T0_reg(s, rm);
1211 gen_movl_T1_reg(s, rn);
1212 if (op1 & 2)
1213 gen_op_double_T1_saturate();
1214 if (op1 & 1)
1215 gen_op_subl_T0_T1_saturate();
1216 else
1217 gen_op_addl_T0_T1_saturate();
1218 gen_movl_reg_T0(s, rd);
1219 break;
1220 case 7: /* bkpt */
1221 gen_op_movl_T0_im((long)s->pc - 4);
1222 gen_op_movl_reg_TN[0][15]();
1223 gen_op_bkpt();
1224 s->is_jmp = DISAS_JUMP;
1225 break;
1226 case 0x8: /* signed multiply */
1227 case 0xa:
1228 case 0xc:
1229 case 0xe:
1230 rs = (insn >> 8) & 0xf;
1231 rn = (insn >> 12) & 0xf;
1232 rd = (insn >> 16) & 0xf;
1233 if (op1 == 1) {
1234 /* (32 * 16) >> 16 */
1235 gen_movl_T0_reg(s, rm);
1236 gen_movl_T1_reg(s, rs);
1237 if (sh & 4)
1238 gen_op_sarl_T1_im(16);
1239 else
1240 gen_op_sxth_T1();
1241 gen_op_imulw_T0_T1();
1242 if ((sh & 2) == 0) {
1243 gen_movl_T1_reg(s, rn);
1244 gen_op_addl_T0_T1_setq();
1246 gen_movl_reg_T0(s, rd);
1247 } else {
1248 /* 16 * 16 */
1249 gen_movl_T0_reg(s, rm);
1250 gen_movl_T1_reg(s, rs);
1251 gen_mulxy(sh & 2, sh & 4);
1252 if (op1 == 2) {
1253 gen_op_signbit_T1_T0();
1254 gen_op_addq_T0_T1(rn, rd);
1255 gen_movl_reg_T0(s, rn);
1256 gen_movl_reg_T1(s, rd);
1257 } else {
1258 if (op1 == 0) {
1259 gen_movl_T1_reg(s, rn);
1260 gen_op_addl_T0_T1_setq();
1262 gen_movl_reg_T0(s, rd);
1265 break;
1266 default:
1267 goto illegal_op;
1269 } else if (((insn & 0x0e000000) == 0 &&
1270 (insn & 0x00000090) != 0x90) ||
1271 ((insn & 0x0e000000) == (1 << 25))) {
1272 int set_cc, logic_cc, shiftop;
1274 op1 = (insn >> 21) & 0xf;
1275 set_cc = (insn >> 20) & 1;
1276 logic_cc = table_logic_cc[op1] & set_cc;
1278 /* data processing instruction */
1279 if (insn & (1 << 25)) {
1280 /* immediate operand */
1281 val = insn & 0xff;
1282 shift = ((insn >> 8) & 0xf) * 2;
1283 if (shift)
1284 val = (val >> shift) | (val << (32 - shift));
1285 gen_op_movl_T1_im(val);
1286 if (logic_cc && shift)
1287 gen_op_mov_CF_T1();
1288 } else {
1289 /* register */
1290 rm = (insn) & 0xf;
1291 gen_movl_T1_reg(s, rm);
1292 shiftop = (insn >> 5) & 3;
1293 if (!(insn & (1 << 4))) {
1294 shift = (insn >> 7) & 0x1f;
1295 if (shift != 0) {
1296 if (logic_cc) {
1297 gen_shift_T1_im_cc[shiftop](shift);
1298 } else {
1299 gen_shift_T1_im[shiftop](shift);
1301 } else if (shiftop != 0) {
1302 if (logic_cc) {
1303 gen_shift_T1_0_cc[shiftop]();
1304 } else {
1305 gen_shift_T1_0[shiftop]();
1308 } else {
1309 rs = (insn >> 8) & 0xf;
1310 gen_movl_T0_reg(s, rs);
1311 if (logic_cc) {
1312 gen_shift_T1_T0_cc[shiftop]();
1313 } else {
1314 gen_shift_T1_T0[shiftop]();
1318 if (op1 != 0x0f && op1 != 0x0d) {
1319 rn = (insn >> 16) & 0xf;
1320 gen_movl_T0_reg(s, rn);
1322 rd = (insn >> 12) & 0xf;
1323 switch(op1) {
1324 case 0x00:
1325 gen_op_andl_T0_T1();
1326 gen_movl_reg_T0(s, rd);
1327 if (logic_cc)
1328 gen_op_logic_T0_cc();
1329 break;
1330 case 0x01:
1331 gen_op_xorl_T0_T1();
1332 gen_movl_reg_T0(s, rd);
1333 if (logic_cc)
1334 gen_op_logic_T0_cc();
1335 break;
1336 case 0x02:
1337 if (set_cc && rd == 15) {
1338 /* SUBS r15, ... is used for exception return. */
1339 if (IS_USER(s))
1340 goto illegal_op;
1341 gen_op_subl_T0_T1_cc();
1342 gen_exception_return(s);
1343 } else {
1344 if (set_cc)
1345 gen_op_subl_T0_T1_cc();
1346 else
1347 gen_op_subl_T0_T1();
1348 gen_movl_reg_T0(s, rd);
1350 break;
1351 case 0x03:
1352 if (set_cc)
1353 gen_op_rsbl_T0_T1_cc();
1354 else
1355 gen_op_rsbl_T0_T1();
1356 gen_movl_reg_T0(s, rd);
1357 break;
1358 case 0x04:
1359 if (set_cc)
1360 gen_op_addl_T0_T1_cc();
1361 else
1362 gen_op_addl_T0_T1();
1363 gen_movl_reg_T0(s, rd);
1364 break;
1365 case 0x05:
1366 if (set_cc)
1367 gen_op_adcl_T0_T1_cc();
1368 else
1369 gen_op_adcl_T0_T1();
1370 gen_movl_reg_T0(s, rd);
1371 break;
1372 case 0x06:
1373 if (set_cc)
1374 gen_op_sbcl_T0_T1_cc();
1375 else
1376 gen_op_sbcl_T0_T1();
1377 gen_movl_reg_T0(s, rd);
1378 break;
1379 case 0x07:
1380 if (set_cc)
1381 gen_op_rscl_T0_T1_cc();
1382 else
1383 gen_op_rscl_T0_T1();
1384 gen_movl_reg_T0(s, rd);
1385 break;
1386 case 0x08:
1387 if (set_cc) {
1388 gen_op_andl_T0_T1();
1389 gen_op_logic_T0_cc();
1391 break;
1392 case 0x09:
1393 if (set_cc) {
1394 gen_op_xorl_T0_T1();
1395 gen_op_logic_T0_cc();
1397 break;
1398 case 0x0a:
1399 if (set_cc) {
1400 gen_op_subl_T0_T1_cc();
1402 break;
1403 case 0x0b:
1404 if (set_cc) {
1405 gen_op_addl_T0_T1_cc();
1407 break;
1408 case 0x0c:
1409 gen_op_orl_T0_T1();
1410 gen_movl_reg_T0(s, rd);
1411 if (logic_cc)
1412 gen_op_logic_T0_cc();
1413 break;
1414 case 0x0d:
1415 if (logic_cc && rd == 15) {
1416 /* MOVS r15, ... is used for exception return. */
1417 if (IS_USER(s))
1418 goto illegal_op;
1419 gen_op_movl_T0_T1();
1420 gen_exception_return(s);
1421 } else {
1422 gen_movl_reg_T1(s, rd);
1423 if (logic_cc)
1424 gen_op_logic_T1_cc();
1426 break;
1427 case 0x0e:
1428 gen_op_bicl_T0_T1();
1429 gen_movl_reg_T0(s, rd);
1430 if (logic_cc)
1431 gen_op_logic_T0_cc();
1432 break;
1433 default:
1434 case 0x0f:
1435 gen_op_notl_T1();
1436 gen_movl_reg_T1(s, rd);
1437 if (logic_cc)
1438 gen_op_logic_T1_cc();
1439 break;
1441 } else {
1442 /* other instructions */
1443 op1 = (insn >> 24) & 0xf;
1444 switch(op1) {
1445 case 0x0:
1446 case 0x1:
1447 /* multiplies, extra load/stores */
1448 sh = (insn >> 5) & 3;
1449 if (sh == 0) {
1450 if (op1 == 0x0) {
1451 rd = (insn >> 16) & 0xf;
1452 rn = (insn >> 12) & 0xf;
1453 rs = (insn >> 8) & 0xf;
1454 rm = (insn) & 0xf;
1455 if (((insn >> 22) & 3) == 0) {
1456 /* 32 bit mul */
1457 gen_movl_T0_reg(s, rs);
1458 gen_movl_T1_reg(s, rm);
1459 gen_op_mul_T0_T1();
1460 if (insn & (1 << 21)) {
1461 gen_movl_T1_reg(s, rn);
1462 gen_op_addl_T0_T1();
1464 if (insn & (1 << 20))
1465 gen_op_logic_T0_cc();
1466 gen_movl_reg_T0(s, rd);
1467 } else {
1468 /* 64 bit mul */
1469 gen_movl_T0_reg(s, rs);
1470 gen_movl_T1_reg(s, rm);
1471 if (insn & (1 << 22))
1472 gen_op_imull_T0_T1();
1473 else
1474 gen_op_mull_T0_T1();
1475 if (insn & (1 << 21)) /* mult accumulate */
1476 gen_op_addq_T0_T1(rn, rd);
1477 if (!(insn & (1 << 23))) { /* double accumulate */
1478 ARCH(6);
1479 gen_op_addq_lo_T0_T1(rn);
1480 gen_op_addq_lo_T0_T1(rd);
1482 if (insn & (1 << 20))
1483 gen_op_logicq_cc();
1484 gen_movl_reg_T0(s, rn);
1485 gen_movl_reg_T1(s, rd);
1487 } else {
1488 rn = (insn >> 16) & 0xf;
1489 rd = (insn >> 12) & 0xf;
1490 if (insn & (1 << 23)) {
1491 /* load/store exclusive */
1492 goto illegal_op;
1493 } else {
1494 /* SWP instruction */
1495 rm = (insn) & 0xf;
1497 gen_movl_T0_reg(s, rm);
1498 gen_movl_T1_reg(s, rn);
1499 if (insn & (1 << 22)) {
1500 gen_ldst(swpb, s);
1501 } else {
1502 gen_ldst(swpl, s);
1504 gen_movl_reg_T0(s, rd);
1507 } else {
1508 /* Misc load/store */
1509 rn = (insn >> 16) & 0xf;
1510 rd = (insn >> 12) & 0xf;
1511 gen_movl_T1_reg(s, rn);
1512 if (insn & (1 << 24))
1513 gen_add_datah_offset(s, insn);
1514 if (insn & (1 << 20)) {
1515 /* load */
1516 switch(sh) {
1517 case 1:
1518 gen_ldst(lduw, s);
1519 break;
1520 case 2:
1521 gen_ldst(ldsb, s);
1522 break;
1523 default:
1524 case 3:
1525 gen_ldst(ldsw, s);
1526 break;
1528 gen_movl_reg_T0(s, rd);
1529 } else if (sh & 2) {
1530 /* doubleword */
1531 if (sh & 1) {
1532 /* store */
1533 gen_movl_T0_reg(s, rd);
1534 gen_ldst(stl, s);
1535 gen_op_addl_T1_im(4);
1536 gen_movl_T0_reg(s, rd + 1);
1537 gen_ldst(stl, s);
1538 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1539 gen_op_addl_T1_im(-4);
1540 } else {
1541 /* load */
1542 gen_ldst(ldl, s);
1543 gen_movl_reg_T0(s, rd);
1544 gen_op_addl_T1_im(4);
1545 gen_ldst(ldl, s);
1546 gen_movl_reg_T0(s, rd + 1);
1547 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1548 gen_op_addl_T1_im(-4);
1550 } else {
1551 /* store */
1552 gen_movl_T0_reg(s, rd);
1553 gen_ldst(stw, s);
1555 if (!(insn & (1 << 24))) {
1556 gen_add_datah_offset(s, insn);
1557 gen_movl_reg_T1(s, rn);
1558 } else if (insn & (1 << 21)) {
1559 gen_movl_reg_T1(s, rn);
1562 break;
1563 case 0x4:
1564 case 0x5:
1565 case 0x6:
1566 case 0x7:
1567 /* load/store byte/word */
1568 rn = (insn >> 16) & 0xf;
1569 rd = (insn >> 12) & 0xf;
1570 gen_movl_T1_reg(s, rn);
1571 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
1572 if (insn & (1 << 24))
1573 gen_add_data_offset(s, insn);
1574 if (insn & (1 << 20)) {
1575 /* load */
1576 #if defined(CONFIG_USER_ONLY)
1577 if (insn & (1 << 22))
1578 gen_op_ldub_raw();
1579 else
1580 gen_op_ldl_raw();
1581 #else
1582 if (insn & (1 << 22)) {
1583 if (i)
1584 gen_op_ldub_user();
1585 else
1586 gen_op_ldub_kernel();
1587 } else {
1588 if (i)
1589 gen_op_ldl_user();
1590 else
1591 gen_op_ldl_kernel();
1593 #endif
1594 if (rd == 15)
1595 gen_bx(s);
1596 else
1597 gen_movl_reg_T0(s, rd);
1598 } else {
1599 /* store */
1600 gen_movl_T0_reg(s, rd);
1601 #if defined(CONFIG_USER_ONLY)
1602 if (insn & (1 << 22))
1603 gen_op_stb_raw();
1604 else
1605 gen_op_stl_raw();
1606 #else
1607 if (insn & (1 << 22)) {
1608 if (i)
1609 gen_op_stb_user();
1610 else
1611 gen_op_stb_kernel();
1612 } else {
1613 if (i)
1614 gen_op_stl_user();
1615 else
1616 gen_op_stl_kernel();
1618 #endif
1620 if (!(insn & (1 << 24))) {
1621 gen_add_data_offset(s, insn);
1622 gen_movl_reg_T1(s, rn);
1623 } else if (insn & (1 << 21))
1624 gen_movl_reg_T1(s, rn); {
1626 break;
1627 case 0x08:
1628 case 0x09:
1630 int j, n, user, loaded_base;
1631 /* load/store multiple words */
1632 /* XXX: store correct base if write back */
1633 user = 0;
1634 if (insn & (1 << 22)) {
1635 if (IS_USER(s))
1636 goto illegal_op; /* only usable in supervisor mode */
1638 if ((insn & (1 << 15)) == 0)
1639 user = 1;
1641 rn = (insn >> 16) & 0xf;
1642 gen_movl_T1_reg(s, rn);
1644 /* compute total size */
1645 loaded_base = 0;
1646 n = 0;
1647 for(i=0;i<16;i++) {
1648 if (insn & (1 << i))
1649 n++;
1651 /* XXX: test invalid n == 0 case ? */
1652 if (insn & (1 << 23)) {
1653 if (insn & (1 << 24)) {
1654 /* pre increment */
1655 gen_op_addl_T1_im(4);
1656 } else {
1657 /* post increment */
1659 } else {
1660 if (insn & (1 << 24)) {
1661 /* pre decrement */
1662 gen_op_addl_T1_im(-(n * 4));
1663 } else {
1664 /* post decrement */
1665 if (n != 1)
1666 gen_op_addl_T1_im(-((n - 1) * 4));
1669 j = 0;
1670 for(i=0;i<16;i++) {
1671 if (insn & (1 << i)) {
1672 if (insn & (1 << 20)) {
1673 /* load */
1674 gen_ldst(ldl, s);
1675 if (i == 15) {
1676 gen_bx(s);
1677 } else if (user) {
1678 gen_op_movl_user_T0(i);
1679 } else if (i == rn) {
1680 gen_op_movl_T2_T0();
1681 loaded_base = 1;
1682 } else {
1683 gen_movl_reg_T0(s, i);
1685 } else {
1686 /* store */
1687 if (i == 15) {
1688 /* special case: r15 = PC + 12 */
1689 val = (long)s->pc + 8;
1690 gen_op_movl_TN_im[0](val);
1691 } else if (user) {
1692 gen_op_movl_T0_user(i);
1693 } else {
1694 gen_movl_T0_reg(s, i);
1696 gen_ldst(stl, s);
1698 j++;
1699 /* no need to add after the last transfer */
1700 if (j != n)
1701 gen_op_addl_T1_im(4);
1704 if (insn & (1 << 21)) {
1705 /* write back */
1706 if (insn & (1 << 23)) {
1707 if (insn & (1 << 24)) {
1708 /* pre increment */
1709 } else {
1710 /* post increment */
1711 gen_op_addl_T1_im(4);
1713 } else {
1714 if (insn & (1 << 24)) {
1715 /* pre decrement */
1716 if (n != 1)
1717 gen_op_addl_T1_im(-((n - 1) * 4));
1718 } else {
1719 /* post decrement */
1720 gen_op_addl_T1_im(-(n * 4));
1723 gen_movl_reg_T1(s, rn);
1725 if (loaded_base) {
1726 gen_op_movl_T0_T2();
1727 gen_movl_reg_T0(s, rn);
1729 if ((insn & (1 << 22)) && !user) {
1730 /* Restore CPSR from SPSR. */
1731 gen_op_movl_T0_spsr();
1732 gen_op_movl_cpsr_T0(0xffffffff);
1733 s->is_jmp = DISAS_UPDATE;
1736 break;
1737 case 0xa:
1738 case 0xb:
1740 int32_t offset;
1742 /* branch (and link) */
1743 val = (int32_t)s->pc;
1744 if (insn & (1 << 24)) {
1745 gen_op_movl_T0_im(val);
1746 gen_op_movl_reg_TN[0][14]();
1748 offset = (((int32_t)insn << 8) >> 8);
1749 val += (offset << 2) + 4;
1750 gen_jmp(s, val);
1752 break;
1753 case 0xc:
1754 case 0xd:
1755 case 0xe:
1756 /* Coprocessor. */
1757 op1 = (insn >> 8) & 0xf;
1758 switch (op1) {
1759 case 10:
1760 case 11:
1761 if (disas_vfp_insn (env, s, insn))
1762 goto illegal_op;
1763 break;
1764 case 15:
1765 if (disas_cp15_insn (s, insn))
1766 goto illegal_op;
1767 break;
1768 default:
1769 /* unknown coprocessor. */
1770 goto illegal_op;
1772 break;
1773 case 0xf:
1774 /* swi */
1775 gen_op_movl_T0_im((long)s->pc);
1776 gen_op_movl_reg_TN[0][15]();
1777 gen_op_swi();
1778 s->is_jmp = DISAS_JUMP;
1779 break;
1780 default:
1781 illegal_op:
1782 gen_op_movl_T0_im((long)s->pc - 4);
1783 gen_op_movl_reg_TN[0][15]();
1784 gen_op_undef_insn();
1785 s->is_jmp = DISAS_JUMP;
1786 break;
1791 static void disas_thumb_insn(DisasContext *s)
1793 uint32_t val, insn, op, rm, rn, rd, shift, cond;
1794 int32_t offset;
1795 int i;
1797 insn = lduw_code(s->pc);
1798 s->pc += 2;
1800 switch (insn >> 12) {
1801 case 0: case 1:
1802 rd = insn & 7;
1803 op = (insn >> 11) & 3;
1804 if (op == 3) {
1805 /* add/subtract */
1806 rn = (insn >> 3) & 7;
1807 gen_movl_T0_reg(s, rn);
1808 if (insn & (1 << 10)) {
1809 /* immediate */
1810 gen_op_movl_T1_im((insn >> 6) & 7);
1811 } else {
1812 /* reg */
1813 rm = (insn >> 6) & 7;
1814 gen_movl_T1_reg(s, rm);
1816 if (insn & (1 << 9))
1817 gen_op_subl_T0_T1_cc();
1818 else
1819 gen_op_addl_T0_T1_cc();
1820 gen_movl_reg_T0(s, rd);
1821 } else {
1822 /* shift immediate */
1823 rm = (insn >> 3) & 7;
1824 shift = (insn >> 6) & 0x1f;
1825 gen_movl_T0_reg(s, rm);
1826 gen_shift_T0_im_thumb[op](shift);
1827 gen_movl_reg_T0(s, rd);
1829 break;
1830 case 2: case 3:
1831 /* arithmetic large immediate */
1832 op = (insn >> 11) & 3;
1833 rd = (insn >> 8) & 0x7;
1834 if (op == 0) {
1835 gen_op_movl_T0_im(insn & 0xff);
1836 } else {
1837 gen_movl_T0_reg(s, rd);
1838 gen_op_movl_T1_im(insn & 0xff);
1840 switch (op) {
1841 case 0: /* mov */
1842 gen_op_logic_T0_cc();
1843 break;
1844 case 1: /* cmp */
1845 gen_op_subl_T0_T1_cc();
1846 break;
1847 case 2: /* add */
1848 gen_op_addl_T0_T1_cc();
1849 break;
1850 case 3: /* sub */
1851 gen_op_subl_T0_T1_cc();
1852 break;
1854 if (op != 1)
1855 gen_movl_reg_T0(s, rd);
1856 break;
1857 case 4:
1858 if (insn & (1 << 11)) {
1859 rd = (insn >> 8) & 7;
1860 /* load pc-relative. Bit 1 of PC is ignored. */
1861 val = s->pc + 2 + ((insn & 0xff) * 4);
1862 val &= ~(uint32_t)2;
1863 gen_op_movl_T1_im(val);
1864 gen_ldst(ldl, s);
1865 gen_movl_reg_T0(s, rd);
1866 break;
1868 if (insn & (1 << 10)) {
1869 /* data processing extended or blx */
1870 rd = (insn & 7) | ((insn >> 4) & 8);
1871 rm = (insn >> 3) & 0xf;
1872 op = (insn >> 8) & 3;
1873 switch (op) {
1874 case 0: /* add */
1875 gen_movl_T0_reg(s, rd);
1876 gen_movl_T1_reg(s, rm);
1877 gen_op_addl_T0_T1();
1878 gen_movl_reg_T0(s, rd);
1879 break;
1880 case 1: /* cmp */
1881 gen_movl_T0_reg(s, rd);
1882 gen_movl_T1_reg(s, rm);
1883 gen_op_subl_T0_T1_cc();
1884 break;
1885 case 2: /* mov/cpy */
1886 gen_movl_T0_reg(s, rm);
1887 gen_movl_reg_T0(s, rd);
1888 break;
1889 case 3:/* branch [and link] exchange thumb register */
1890 if (insn & (1 << 7)) {
1891 val = (uint32_t)s->pc | 1;
1892 gen_op_movl_T1_im(val);
1893 gen_movl_reg_T1(s, 14);
1895 gen_movl_T0_reg(s, rm);
1896 gen_bx(s);
1897 break;
1899 break;
1902 /* data processing register */
1903 rd = insn & 7;
1904 rm = (insn >> 3) & 7;
1905 op = (insn >> 6) & 0xf;
1906 if (op == 2 || op == 3 || op == 4 || op == 7) {
1907 /* the shift/rotate ops want the operands backwards */
1908 val = rm;
1909 rm = rd;
1910 rd = val;
1911 val = 1;
1912 } else {
1913 val = 0;
1916 if (op == 9) /* neg */
1917 gen_op_movl_T0_im(0);
1918 else if (op != 0xf) /* mvn doesn't read its first operand */
1919 gen_movl_T0_reg(s, rd);
1921 gen_movl_T1_reg(s, rm);
1922 switch (op) {
1923 case 0x0: /* and */
1924 gen_op_andl_T0_T1();
1925 gen_op_logic_T0_cc();
1926 break;
1927 case 0x1: /* eor */
1928 gen_op_xorl_T0_T1();
1929 gen_op_logic_T0_cc();
1930 break;
1931 case 0x2: /* lsl */
1932 gen_op_shll_T1_T0_cc();
1933 break;
1934 case 0x3: /* lsr */
1935 gen_op_shrl_T1_T0_cc();
1936 break;
1937 case 0x4: /* asr */
1938 gen_op_sarl_T1_T0_cc();
1939 break;
1940 case 0x5: /* adc */
1941 gen_op_adcl_T0_T1_cc();
1942 break;
1943 case 0x6: /* sbc */
1944 gen_op_sbcl_T0_T1_cc();
1945 break;
1946 case 0x7: /* ror */
1947 gen_op_rorl_T1_T0_cc();
1948 break;
1949 case 0x8: /* tst */
1950 gen_op_andl_T0_T1();
1951 gen_op_logic_T0_cc();
1952 rd = 16;
1953 break;
1954 case 0x9: /* neg */
1955 gen_op_subl_T0_T1_cc();
1956 break;
1957 case 0xa: /* cmp */
1958 gen_op_subl_T0_T1_cc();
1959 rd = 16;
1960 break;
1961 case 0xb: /* cmn */
1962 gen_op_addl_T0_T1_cc();
1963 rd = 16;
1964 break;
1965 case 0xc: /* orr */
1966 gen_op_orl_T0_T1();
1967 gen_op_logic_T0_cc();
1968 break;
1969 case 0xd: /* mul */
1970 gen_op_mull_T0_T1();
1971 gen_op_logic_T0_cc();
1972 break;
1973 case 0xe: /* bic */
1974 gen_op_bicl_T0_T1();
1975 gen_op_logic_T0_cc();
1976 break;
1977 case 0xf: /* mvn */
1978 gen_op_notl_T1();
1979 gen_op_logic_T1_cc();
1980 val = 1;
1981 rm = rd;
1982 break;
1984 if (rd != 16) {
1985 if (val)
1986 gen_movl_reg_T1(s, rm);
1987 else
1988 gen_movl_reg_T0(s, rd);
1990 break;
1992 case 5:
1993 /* load/store register offset. */
1994 rd = insn & 7;
1995 rn = (insn >> 3) & 7;
1996 rm = (insn >> 6) & 7;
1997 op = (insn >> 9) & 7;
1998 gen_movl_T1_reg(s, rn);
1999 gen_movl_T2_reg(s, rm);
2000 gen_op_addl_T1_T2();
2002 if (op < 3) /* store */
2003 gen_movl_T0_reg(s, rd);
2005 switch (op) {
2006 case 0: /* str */
2007 gen_ldst(stl, s);
2008 break;
2009 case 1: /* strh */
2010 gen_ldst(stw, s);
2011 break;
2012 case 2: /* strb */
2013 gen_ldst(stb, s);
2014 break;
2015 case 3: /* ldrsb */
2016 gen_ldst(ldsb, s);
2017 break;
2018 case 4: /* ldr */
2019 gen_ldst(ldl, s);
2020 break;
2021 case 5: /* ldrh */
2022 gen_ldst(lduw, s);
2023 break;
2024 case 6: /* ldrb */
2025 gen_ldst(ldub, s);
2026 break;
2027 case 7: /* ldrsh */
2028 gen_ldst(ldsw, s);
2029 break;
2031 if (op >= 3) /* load */
2032 gen_movl_reg_T0(s, rd);
2033 break;
2035 case 6:
2036 /* load/store word immediate offset */
2037 rd = insn & 7;
2038 rn = (insn >> 3) & 7;
2039 gen_movl_T1_reg(s, rn);
2040 val = (insn >> 4) & 0x7c;
2041 gen_op_movl_T2_im(val);
2042 gen_op_addl_T1_T2();
2044 if (insn & (1 << 11)) {
2045 /* load */
2046 gen_ldst(ldl, s);
2047 gen_movl_reg_T0(s, rd);
2048 } else {
2049 /* store */
2050 gen_movl_T0_reg(s, rd);
2051 gen_ldst(stl, s);
2053 break;
2055 case 7:
2056 /* load/store byte immediate offset */
2057 rd = insn & 7;
2058 rn = (insn >> 3) & 7;
2059 gen_movl_T1_reg(s, rn);
2060 val = (insn >> 6) & 0x1f;
2061 gen_op_movl_T2_im(val);
2062 gen_op_addl_T1_T2();
2064 if (insn & (1 << 11)) {
2065 /* load */
2066 gen_ldst(ldub, s);
2067 gen_movl_reg_T0(s, rd);
2068 } else {
2069 /* store */
2070 gen_movl_T0_reg(s, rd);
2071 gen_ldst(stb, s);
2073 break;
2075 case 8:
2076 /* load/store halfword immediate offset */
2077 rd = insn & 7;
2078 rn = (insn >> 3) & 7;
2079 gen_movl_T1_reg(s, rn);
2080 val = (insn >> 5) & 0x3e;
2081 gen_op_movl_T2_im(val);
2082 gen_op_addl_T1_T2();
2084 if (insn & (1 << 11)) {
2085 /* load */
2086 gen_ldst(lduw, s);
2087 gen_movl_reg_T0(s, rd);
2088 } else {
2089 /* store */
2090 gen_movl_T0_reg(s, rd);
2091 gen_ldst(stw, s);
2093 break;
2095 case 9:
2096 /* load/store from stack */
2097 rd = (insn >> 8) & 7;
2098 gen_movl_T1_reg(s, 13);
2099 val = (insn & 0xff) * 4;
2100 gen_op_movl_T2_im(val);
2101 gen_op_addl_T1_T2();
2103 if (insn & (1 << 11)) {
2104 /* load */
2105 gen_ldst(ldl, s);
2106 gen_movl_reg_T0(s, rd);
2107 } else {
2108 /* store */
2109 gen_movl_T0_reg(s, rd);
2110 gen_ldst(stl, s);
2112 break;
2114 case 10:
2115 /* add to high reg */
2116 rd = (insn >> 8) & 7;
2117 if (insn & (1 << 11)) {
2118 /* SP */
2119 gen_movl_T0_reg(s, 13);
2120 } else {
2121 /* PC. bit 1 is ignored. */
2122 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
2124 val = (insn & 0xff) * 4;
2125 gen_op_movl_T1_im(val);
2126 gen_op_addl_T0_T1();
2127 gen_movl_reg_T0(s, rd);
2128 break;
2130 case 11:
2131 /* misc */
2132 op = (insn >> 8) & 0xf;
2133 switch (op) {
2134 case 0:
2135 /* adjust stack pointer */
2136 gen_movl_T1_reg(s, 13);
2137 val = (insn & 0x7f) * 4;
2138 if (insn & (1 << 7))
2139 val = -(int32_t)val;
2140 gen_op_movl_T2_im(val);
2141 gen_op_addl_T1_T2();
2142 gen_movl_reg_T1(s, 13);
2143 break;
2145 case 4: case 5: case 0xc: case 0xd:
2146 /* push/pop */
2147 gen_movl_T1_reg(s, 13);
2148 if (insn & (1 << 8))
2149 offset = 4;
2150 else
2151 offset = 0;
2152 for (i = 0; i < 8; i++) {
2153 if (insn & (1 << i))
2154 offset += 4;
2156 if ((insn & (1 << 11)) == 0) {
2157 gen_op_movl_T2_im(-offset);
2158 gen_op_addl_T1_T2();
2160 gen_op_movl_T2_im(4);
2161 for (i = 0; i < 8; i++) {
2162 if (insn & (1 << i)) {
2163 if (insn & (1 << 11)) {
2164 /* pop */
2165 gen_ldst(ldl, s);
2166 gen_movl_reg_T0(s, i);
2167 } else {
2168 /* push */
2169 gen_movl_T0_reg(s, i);
2170 gen_ldst(stl, s);
2172 /* advance to the next address. */
2173 gen_op_addl_T1_T2();
2176 if (insn & (1 << 8)) {
2177 if (insn & (1 << 11)) {
2178 /* pop pc */
2179 gen_ldst(ldl, s);
2180 /* don't set the pc until the rest of the instruction
2181 has completed */
2182 } else {
2183 /* push lr */
2184 gen_movl_T0_reg(s, 14);
2185 gen_ldst(stl, s);
2187 gen_op_addl_T1_T2();
2189 if ((insn & (1 << 11)) == 0) {
2190 gen_op_movl_T2_im(-offset);
2191 gen_op_addl_T1_T2();
2193 /* write back the new stack pointer */
2194 gen_movl_reg_T1(s, 13);
2195 /* set the new PC value */
2196 if ((insn & 0x0900) == 0x0900)
2197 gen_bx(s);
2198 break;
2200 case 0xe: /* bkpt */
2201 gen_op_movl_T0_im((long)s->pc - 2);
2202 gen_op_movl_reg_TN[0][15]();
2203 gen_op_bkpt();
2204 s->is_jmp = DISAS_JUMP;
2205 break;
2207 default:
2208 goto undef;
2210 break;
2212 case 12:
2213 /* load/store multiple */
2214 rn = (insn >> 8) & 0x7;
2215 gen_movl_T1_reg(s, rn);
2216 gen_op_movl_T2_im(4);
2217 for (i = 0; i < 8; i++) {
2218 if (insn & (1 << i)) {
2219 if (insn & (1 << 11)) {
2220 /* load */
2221 gen_ldst(ldl, s);
2222 gen_movl_reg_T0(s, i);
2223 } else {
2224 /* store */
2225 gen_movl_T0_reg(s, i);
2226 gen_ldst(stl, s);
2228 /* advance to the next address */
2229 gen_op_addl_T1_T2();
2232 /* Base register writeback. */
2233 if ((insn & (1 << rn)) == 0)
2234 gen_movl_reg_T1(s, rn);
2235 break;
2237 case 13:
2238 /* conditional branch or swi */
2239 cond = (insn >> 8) & 0xf;
2240 if (cond == 0xe)
2241 goto undef;
2243 if (cond == 0xf) {
2244 /* swi */
2245 gen_op_movl_T0_im((long)s->pc | 1);
2246 /* Don't set r15. */
2247 gen_op_movl_reg_TN[0][15]();
2248 gen_op_swi();
2249 s->is_jmp = DISAS_JUMP;
2250 break;
2252 /* generate a conditional jump to next instruction */
2253 s->condlabel = gen_new_label();
2254 gen_test_cc[cond ^ 1](s->condlabel);
2255 s->condjmp = 1;
2256 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2257 //s->is_jmp = DISAS_JUMP_NEXT;
2258 gen_movl_T1_reg(s, 15);
2260 /* jump to the offset */
2261 val = (uint32_t)s->pc + 2;
2262 offset = ((int32_t)insn << 24) >> 24;
2263 val += offset << 1;
2264 gen_jmp(s, val);
2265 break;
2267 case 14:
2268 /* unconditional branch */
2269 if (insn & (1 << 11))
2270 goto undef; /* Second half of a blx */
2271 val = (uint32_t)s->pc;
2272 offset = ((int32_t)insn << 21) >> 21;
2273 val += (offset << 1) + 2;
2274 gen_jmp(s, val);
2275 break;
2277 case 15:
2278 /* branch and link [and switch to arm] */
2279 offset = ((int32_t)insn << 21) >> 10;
2280 insn = lduw_code(s->pc);
2281 offset |= insn & 0x7ff;
2283 val = (uint32_t)s->pc + 2;
2284 gen_op_movl_T1_im(val | 1);
2285 gen_movl_reg_T1(s, 14);
2287 val += offset << 1;
2288 if (insn & (1 << 12)) {
2289 /* bl */
2290 gen_jmp(s, val);
2291 } else {
2292 /* blx */
2293 val &= ~(uint32_t)2;
2294 gen_op_movl_T0_im(val);
2295 gen_bx(s);
2298 return;
2299 undef:
2300 gen_op_movl_T0_im((long)s->pc - 2);
2301 gen_op_movl_reg_TN[0][15]();
2302 gen_op_undef_insn();
2303 s->is_jmp = DISAS_JUMP;
2306 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2307 basic block 'tb'. If search_pc is TRUE, also generate PC
2308 information for each intermediate instruction. */
2309 static inline int gen_intermediate_code_internal(CPUState *env,
2310 TranslationBlock *tb,
2311 int search_pc)
2313 DisasContext dc1, *dc = &dc1;
2314 uint16_t *gen_opc_end;
2315 int j, lj;
2316 target_ulong pc_start;
2317 uint32_t next_page_start;
2319 /* generate intermediate code */
2320 pc_start = tb->pc;
2322 dc->tb = tb;
2324 gen_opc_ptr = gen_opc_buf;
2325 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2326 gen_opparam_ptr = gen_opparam_buf;
2328 dc->is_jmp = DISAS_NEXT;
2329 dc->pc = pc_start;
2330 dc->singlestep_enabled = env->singlestep_enabled;
2331 dc->condjmp = 0;
2332 dc->thumb = env->thumb;
2333 #if !defined(CONFIG_USER_ONLY)
2334 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
2335 #endif
2336 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2337 nb_gen_labels = 0;
2338 lj = -1;
2339 do {
2340 if (env->nb_breakpoints > 0) {
2341 for(j = 0; j < env->nb_breakpoints; j++) {
2342 if (env->breakpoints[j] == dc->pc) {
2343 gen_op_movl_T0_im((long)dc->pc);
2344 gen_op_movl_reg_TN[0][15]();
2345 gen_op_debug();
2346 dc->is_jmp = DISAS_JUMP;
2347 break;
2351 if (search_pc) {
2352 j = gen_opc_ptr - gen_opc_buf;
2353 if (lj < j) {
2354 lj++;
2355 while (lj < j)
2356 gen_opc_instr_start[lj++] = 0;
2358 gen_opc_pc[lj] = dc->pc;
2359 gen_opc_instr_start[lj] = 1;
2362 if (env->thumb)
2363 disas_thumb_insn(dc);
2364 else
2365 disas_arm_insn(env, dc);
2367 if (dc->condjmp && !dc->is_jmp) {
2368 gen_set_label(dc->condlabel);
2369 dc->condjmp = 0;
2371 /* Translation stops when a conditional branch is enoutered.
2372 * Otherwise the subsequent code could get translated several times.
2373 * Also stop translation when a page boundary is reached. This
2374 * ensures prefech aborts occur at the right place. */
2375 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2376 !env->singlestep_enabled &&
2377 dc->pc < next_page_start);
2378 /* At this stage dc->condjmp will only be set when the skipped
2379 * instruction was a conditional branch, and the PC has already been
2380 * written. */
2381 if (__builtin_expect(env->singlestep_enabled, 0)) {
2382 /* Make sure the pc is updated, and raise a debug exception. */
2383 if (dc->condjmp) {
2384 gen_op_debug();
2385 gen_set_label(dc->condlabel);
2387 if (dc->condjmp || !dc->is_jmp) {
2388 gen_op_movl_T0_im((long)dc->pc);
2389 gen_op_movl_reg_TN[0][15]();
2390 dc->condjmp = 0;
2392 gen_op_debug();
2393 } else {
2394 switch(dc->is_jmp) {
2395 case DISAS_NEXT:
2396 gen_goto_tb(dc, 1, dc->pc);
2397 break;
2398 default:
2399 case DISAS_JUMP:
2400 case DISAS_UPDATE:
2401 /* indicate that the hash table must be used to find the next TB */
2402 gen_op_movl_T0_0();
2403 gen_op_exit_tb();
2404 break;
2405 case DISAS_TB_JUMP:
2406 /* nothing more to generate */
2407 break;
2409 if (dc->condjmp) {
2410 gen_set_label(dc->condlabel);
2411 gen_goto_tb(dc, 1, dc->pc);
2412 dc->condjmp = 0;
2415 *gen_opc_ptr = INDEX_op_end;
2417 #ifdef DEBUG_DISAS
2418 if (loglevel & CPU_LOG_TB_IN_ASM) {
2419 fprintf(logfile, "----------------\n");
2420 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
2421 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2422 fprintf(logfile, "\n");
2423 if (loglevel & (CPU_LOG_TB_OP)) {
2424 fprintf(logfile, "OP:\n");
2425 dump_ops(gen_opc_buf, gen_opparam_buf);
2426 fprintf(logfile, "\n");
2429 #endif
2430 if (search_pc) {
2431 j = gen_opc_ptr - gen_opc_buf;
2432 lj++;
2433 while (lj <= j)
2434 gen_opc_instr_start[lj++] = 0;
2435 tb->size = 0;
2436 } else {
2437 tb->size = dc->pc - pc_start;
2439 return 0;
2442 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2444 return gen_intermediate_code_internal(env, tb, 0);
2447 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2449 return gen_intermediate_code_internal(env, tb, 1);
2452 void cpu_reset(CPUARMState *env)
2454 #if defined (CONFIG_USER_ONLY)
2455 env->uncached_cpsr = ARM_CPU_MODE_USR;
2456 #else
2457 /* SVC mode with interrupts disabled. */
2458 env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
2459 #endif
2460 env->regs[15] = 0;
2463 CPUARMState *cpu_arm_init(void)
2465 CPUARMState *env;
2467 env = qemu_mallocz(sizeof(CPUARMState));
2468 if (!env)
2469 return NULL;
2470 cpu_exec_init(env);
2471 cpu_reset(env);
2472 tlb_flush(env, 1);
2473 return env;
2476 void cpu_arm_close(CPUARMState *env)
2478 free(env);
2481 static const char *cpu_mode_names[16] = {
2482 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
2483 "???", "???", "???", "und", "???", "???", "???", "sys"
2485 void cpu_dump_state(CPUState *env, FILE *f,
2486 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
2487 int flags)
2489 int i;
2490 union {
2491 uint32_t i;
2492 float s;
2493 } s0, s1;
2494 CPU_DoubleU d;
2495 uint32_t psr;
2497 for(i=0;i<16;i++) {
2498 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2499 if ((i % 4) == 3)
2500 cpu_fprintf(f, "\n");
2501 else
2502 cpu_fprintf(f, " ");
2504 psr = cpsr_read(env);
2505 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d %x\n",
2506 psr,
2507 psr & (1 << 31) ? 'N' : '-',
2508 psr & (1 << 30) ? 'Z' : '-',
2509 psr & (1 << 29) ? 'C' : '-',
2510 psr & (1 << 28) ? 'V' : '-',
2511 psr & CPSR_T ? 'T' : 'A',
2512 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
2514 for (i = 0; i < 16; i++) {
2515 d.d = env->vfp.regs[i];
2516 s0.i = d.l.lower;
2517 s1.i = d.l.upper;
2518 cpu_fprintf(f, "s%02d=%08x(%8f) s%02d=%08x(%8f) d%02d=%08x%08x(%8f)\n",
2519 i * 2, (int)s0.i, s0.s,
2520 i * 2 + 1, (int)s0.i, s0.s,
2521 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
2522 d.d);
2524 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.fpscr);