Add missing ARM syscall numbers.
[qemu/mini2440.git] / target-arm / translate.c
blobcd91bdce8deacf6137436cad0d1f7d695a10aacf
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
31 #define ENABLE_ARCH_5J 0
32 #define ENABLE_ARCH_6 1
33 #define ENABLE_ARCH_6T2 1
35 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
37 /* internal defines */
38 typedef struct DisasContext {
39 target_ulong pc;
40 int is_jmp;
41 /* Nonzero if this instruction has been conditionally skipped. */
42 int condjmp;
43 /* The label that will be jumped to when the instruction is skipped. */
44 int condlabel;
45 struct TranslationBlock *tb;
46 int singlestep_enabled;
47 int thumb;
48 #if !defined(CONFIG_USER_ONLY)
49 int user;
50 #endif
51 } DisasContext;
53 #if defined(CONFIG_USER_ONLY)
54 #define IS_USER(s) 1
55 #else
56 #define IS_USER(s) (s->user)
57 #endif
59 #define DISAS_JUMP_NEXT 4
61 #ifdef USE_DIRECT_JUMP
62 #define TBPARAM(x)
63 #else
64 #define TBPARAM(x) (long)(x)
65 #endif
67 /* XXX: move that elsewhere */
68 static uint16_t *gen_opc_ptr;
69 static uint32_t *gen_opparam_ptr;
70 extern FILE *logfile;
71 extern int loglevel;
73 enum {
74 #define DEF(s, n, copy_size) INDEX_op_ ## s,
75 #include "opc.h"
76 #undef DEF
77 NB_OPS,
80 #include "gen-op.h"
82 static GenOpFunc1 *gen_test_cc[14] = {
83 gen_op_test_eq,
84 gen_op_test_ne,
85 gen_op_test_cs,
86 gen_op_test_cc,
87 gen_op_test_mi,
88 gen_op_test_pl,
89 gen_op_test_vs,
90 gen_op_test_vc,
91 gen_op_test_hi,
92 gen_op_test_ls,
93 gen_op_test_ge,
94 gen_op_test_lt,
95 gen_op_test_gt,
96 gen_op_test_le,
99 const uint8_t table_logic_cc[16] = {
100 1, /* and */
101 1, /* xor */
102 0, /* sub */
103 0, /* rsb */
104 0, /* add */
105 0, /* adc */
106 0, /* sbc */
107 0, /* rsc */
108 1, /* andl */
109 1, /* xorl */
110 0, /* cmp */
111 0, /* cmn */
112 1, /* orr */
113 1, /* mov */
114 1, /* bic */
115 1, /* mvn */
118 static GenOpFunc1 *gen_shift_T1_im[4] = {
119 gen_op_shll_T1_im,
120 gen_op_shrl_T1_im,
121 gen_op_sarl_T1_im,
122 gen_op_rorl_T1_im,
125 static GenOpFunc *gen_shift_T1_0[4] = {
126 NULL,
127 gen_op_shrl_T1_0,
128 gen_op_sarl_T1_0,
129 gen_op_rrxl_T1,
132 static GenOpFunc1 *gen_shift_T2_im[4] = {
133 gen_op_shll_T2_im,
134 gen_op_shrl_T2_im,
135 gen_op_sarl_T2_im,
136 gen_op_rorl_T2_im,
139 static GenOpFunc *gen_shift_T2_0[4] = {
140 NULL,
141 gen_op_shrl_T2_0,
142 gen_op_sarl_T2_0,
143 gen_op_rrxl_T2,
146 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
147 gen_op_shll_T1_im_cc,
148 gen_op_shrl_T1_im_cc,
149 gen_op_sarl_T1_im_cc,
150 gen_op_rorl_T1_im_cc,
153 static GenOpFunc *gen_shift_T1_0_cc[4] = {
154 NULL,
155 gen_op_shrl_T1_0_cc,
156 gen_op_sarl_T1_0_cc,
157 gen_op_rrxl_T1_cc,
160 static GenOpFunc *gen_shift_T1_T0[4] = {
161 gen_op_shll_T1_T0,
162 gen_op_shrl_T1_T0,
163 gen_op_sarl_T1_T0,
164 gen_op_rorl_T1_T0,
167 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
168 gen_op_shll_T1_T0_cc,
169 gen_op_shrl_T1_T0_cc,
170 gen_op_sarl_T1_T0_cc,
171 gen_op_rorl_T1_T0_cc,
174 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
176 gen_op_movl_T0_r0,
177 gen_op_movl_T0_r1,
178 gen_op_movl_T0_r2,
179 gen_op_movl_T0_r3,
180 gen_op_movl_T0_r4,
181 gen_op_movl_T0_r5,
182 gen_op_movl_T0_r6,
183 gen_op_movl_T0_r7,
184 gen_op_movl_T0_r8,
185 gen_op_movl_T0_r9,
186 gen_op_movl_T0_r10,
187 gen_op_movl_T0_r11,
188 gen_op_movl_T0_r12,
189 gen_op_movl_T0_r13,
190 gen_op_movl_T0_r14,
191 gen_op_movl_T0_r15,
194 gen_op_movl_T1_r0,
195 gen_op_movl_T1_r1,
196 gen_op_movl_T1_r2,
197 gen_op_movl_T1_r3,
198 gen_op_movl_T1_r4,
199 gen_op_movl_T1_r5,
200 gen_op_movl_T1_r6,
201 gen_op_movl_T1_r7,
202 gen_op_movl_T1_r8,
203 gen_op_movl_T1_r9,
204 gen_op_movl_T1_r10,
205 gen_op_movl_T1_r11,
206 gen_op_movl_T1_r12,
207 gen_op_movl_T1_r13,
208 gen_op_movl_T1_r14,
209 gen_op_movl_T1_r15,
212 gen_op_movl_T2_r0,
213 gen_op_movl_T2_r1,
214 gen_op_movl_T2_r2,
215 gen_op_movl_T2_r3,
216 gen_op_movl_T2_r4,
217 gen_op_movl_T2_r5,
218 gen_op_movl_T2_r6,
219 gen_op_movl_T2_r7,
220 gen_op_movl_T2_r8,
221 gen_op_movl_T2_r9,
222 gen_op_movl_T2_r10,
223 gen_op_movl_T2_r11,
224 gen_op_movl_T2_r12,
225 gen_op_movl_T2_r13,
226 gen_op_movl_T2_r14,
227 gen_op_movl_T2_r15,
231 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
233 gen_op_movl_r0_T0,
234 gen_op_movl_r1_T0,
235 gen_op_movl_r2_T0,
236 gen_op_movl_r3_T0,
237 gen_op_movl_r4_T0,
238 gen_op_movl_r5_T0,
239 gen_op_movl_r6_T0,
240 gen_op_movl_r7_T0,
241 gen_op_movl_r8_T0,
242 gen_op_movl_r9_T0,
243 gen_op_movl_r10_T0,
244 gen_op_movl_r11_T0,
245 gen_op_movl_r12_T0,
246 gen_op_movl_r13_T0,
247 gen_op_movl_r14_T0,
248 gen_op_movl_r15_T0,
251 gen_op_movl_r0_T1,
252 gen_op_movl_r1_T1,
253 gen_op_movl_r2_T1,
254 gen_op_movl_r3_T1,
255 gen_op_movl_r4_T1,
256 gen_op_movl_r5_T1,
257 gen_op_movl_r6_T1,
258 gen_op_movl_r7_T1,
259 gen_op_movl_r8_T1,
260 gen_op_movl_r9_T1,
261 gen_op_movl_r10_T1,
262 gen_op_movl_r11_T1,
263 gen_op_movl_r12_T1,
264 gen_op_movl_r13_T1,
265 gen_op_movl_r14_T1,
266 gen_op_movl_r15_T1,
270 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
271 gen_op_movl_T0_im,
272 gen_op_movl_T1_im,
273 gen_op_movl_T2_im,
276 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
277 gen_op_shll_T0_im_thumb,
278 gen_op_shrl_T0_im_thumb,
279 gen_op_sarl_T0_im_thumb,
282 static inline void gen_bx(DisasContext *s)
284 s->is_jmp = DISAS_UPDATE;
285 gen_op_bx_T0();
289 #if defined(CONFIG_USER_ONLY)
290 #define gen_ldst(name, s) gen_op_##name##_raw()
291 #else
292 #define gen_ldst(name, s) do { \
293 if (IS_USER(s)) \
294 gen_op_##name##_user(); \
295 else \
296 gen_op_##name##_kernel(); \
297 } while (0)
298 #endif
300 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
302 int val;
304 if (reg == 15) {
305 /* normaly, since we updated PC, we need only to add one insn */
306 if (s->thumb)
307 val = (long)s->pc + 2;
308 else
309 val = (long)s->pc + 4;
310 gen_op_movl_TN_im[t](val);
311 } else {
312 gen_op_movl_TN_reg[t][reg]();
316 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
318 gen_movl_TN_reg(s, reg, 0);
321 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
323 gen_movl_TN_reg(s, reg, 1);
326 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
328 gen_movl_TN_reg(s, reg, 2);
331 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
333 gen_op_movl_reg_TN[t][reg]();
334 if (reg == 15) {
335 s->is_jmp = DISAS_JUMP;
339 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
341 gen_movl_reg_TN(s, reg, 0);
344 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
346 gen_movl_reg_TN(s, reg, 1);
349 /* Force a TB lookup after an instruction that changes the CPU state. */
350 static inline void gen_lookup_tb(DisasContext *s)
352 gen_op_movl_T0_im(s->pc);
353 gen_movl_reg_T0(s, 15);
354 s->is_jmp = DISAS_UPDATE;
357 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
359 int val, rm, shift, shiftop;
361 if (!(insn & (1 << 25))) {
362 /* immediate */
363 val = insn & 0xfff;
364 if (!(insn & (1 << 23)))
365 val = -val;
366 if (val != 0)
367 gen_op_addl_T1_im(val);
368 } else {
369 /* shift/register */
370 rm = (insn) & 0xf;
371 shift = (insn >> 7) & 0x1f;
372 gen_movl_T2_reg(s, rm);
373 shiftop = (insn >> 5) & 3;
374 if (shift != 0) {
375 gen_shift_T2_im[shiftop](shift);
376 } else if (shiftop != 0) {
377 gen_shift_T2_0[shiftop]();
379 if (!(insn & (1 << 23)))
380 gen_op_subl_T1_T2();
381 else
382 gen_op_addl_T1_T2();
386 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
387 int extra)
389 int val, rm;
391 if (insn & (1 << 22)) {
392 /* immediate */
393 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
394 val += extra;
395 if (!(insn & (1 << 23)))
396 val = -val;
397 if (val != 0)
398 gen_op_addl_T1_im(val);
399 } else {
400 /* register */
401 if (extra)
402 gen_op_addl_T1_im(extra);
403 rm = (insn) & 0xf;
404 gen_movl_T2_reg(s, rm);
405 if (!(insn & (1 << 23)))
406 gen_op_subl_T1_T2();
407 else
408 gen_op_addl_T1_T2();
412 #define VFP_OP(name) \
413 static inline void gen_vfp_##name(int dp) \
415 if (dp) \
416 gen_op_vfp_##name##d(); \
417 else \
418 gen_op_vfp_##name##s(); \
421 VFP_OP(add)
422 VFP_OP(sub)
423 VFP_OP(mul)
424 VFP_OP(div)
425 VFP_OP(neg)
426 VFP_OP(abs)
427 VFP_OP(sqrt)
428 VFP_OP(cmp)
429 VFP_OP(cmpe)
430 VFP_OP(F1_ld0)
431 VFP_OP(uito)
432 VFP_OP(sito)
433 VFP_OP(toui)
434 VFP_OP(touiz)
435 VFP_OP(tosi)
436 VFP_OP(tosiz)
438 #undef VFP_OP
440 static inline void gen_vfp_ld(DisasContext *s, int dp)
442 if (dp)
443 gen_ldst(vfp_ldd, s);
444 else
445 gen_ldst(vfp_lds, s);
448 static inline void gen_vfp_st(DisasContext *s, int dp)
450 if (dp)
451 gen_ldst(vfp_std, s);
452 else
453 gen_ldst(vfp_sts, s);
456 static inline long
457 vfp_reg_offset (int dp, int reg)
459 if (dp)
460 return offsetof(CPUARMState, vfp.regs[reg]);
461 else if (reg & 1) {
462 return offsetof(CPUARMState, vfp.regs[reg >> 1])
463 + offsetof(CPU_DoubleU, l.upper);
464 } else {
465 return offsetof(CPUARMState, vfp.regs[reg >> 1])
466 + offsetof(CPU_DoubleU, l.lower);
469 static inline void gen_mov_F0_vreg(int dp, int reg)
471 if (dp)
472 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
473 else
474 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
477 static inline void gen_mov_F1_vreg(int dp, int reg)
479 if (dp)
480 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
481 else
482 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
485 static inline void gen_mov_vreg_F0(int dp, int reg)
487 if (dp)
488 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
489 else
490 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
493 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
494 instruction is not defined. */
495 static int disas_cp15_insn(DisasContext *s, uint32_t insn)
497 uint32_t rd;
499 /* ??? Some cp15 registers are accessible from userspace. */
500 if (IS_USER(s)) {
501 return 1;
503 if ((insn & 0x0fff0fff) == 0x0e070f90
504 || (insn & 0x0fff0fff) == 0x0e070f58) {
505 /* Wait for interrupt. */
506 gen_op_movl_T0_im((long)s->pc);
507 gen_op_movl_reg_TN[0][15]();
508 gen_op_wfi();
509 s->is_jmp = DISAS_JUMP;
510 return 0;
512 rd = (insn >> 12) & 0xf;
513 if (insn & (1 << 20)) {
514 gen_op_movl_T0_cp15(insn);
515 /* If the destination register is r15 then sets condition codes. */
516 if (rd != 15)
517 gen_movl_reg_T0(s, rd);
518 } else {
519 gen_movl_T0_reg(s, rd);
520 gen_op_movl_cp15_T0(insn);
522 gen_lookup_tb(s);
523 return 0;
526 /* Disassemble a VFP instruction. Returns nonzero if an error occured
527 (ie. an undefined instruction). */
528 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
530 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
531 int dp, veclen;
533 if (!arm_feature(env, ARM_FEATURE_VFP))
534 return 1;
536 if ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) == 0) {
537 /* VFP disabled. Only allow fmxr/fmrx to/from fpexc and fpsid. */
538 if ((insn & 0x0fe00fff) != 0x0ee00a10)
539 return 1;
540 rn = (insn >> 16) & 0xf;
541 if (rn != 0 && rn != 8)
542 return 1;
544 dp = ((insn & 0xf00) == 0xb00);
545 switch ((insn >> 24) & 0xf) {
546 case 0xe:
547 if (insn & (1 << 4)) {
548 /* single register transfer */
549 if ((insn & 0x6f) != 0x00)
550 return 1;
551 rd = (insn >> 12) & 0xf;
552 if (dp) {
553 if (insn & 0x80)
554 return 1;
555 rn = (insn >> 16) & 0xf;
556 /* Get the existing value even for arm->vfp moves because
557 we only set half the register. */
558 gen_mov_F0_vreg(1, rn);
559 gen_op_vfp_mrrd();
560 if (insn & (1 << 20)) {
561 /* vfp->arm */
562 if (insn & (1 << 21))
563 gen_movl_reg_T1(s, rd);
564 else
565 gen_movl_reg_T0(s, rd);
566 } else {
567 /* arm->vfp */
568 if (insn & (1 << 21))
569 gen_movl_T1_reg(s, rd);
570 else
571 gen_movl_T0_reg(s, rd);
572 gen_op_vfp_mdrr();
573 gen_mov_vreg_F0(dp, rn);
575 } else {
576 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
577 if (insn & (1 << 20)) {
578 /* vfp->arm */
579 if (insn & (1 << 21)) {
580 /* system register */
581 rn >>= 1;
582 switch (rn) {
583 case ARM_VFP_FPSID:
584 case ARM_VFP_FPEXC:
585 case ARM_VFP_FPINST:
586 case ARM_VFP_FPINST2:
587 gen_op_vfp_movl_T0_xreg(rn);
588 break;
589 case ARM_VFP_FPSCR:
590 if (rd == 15)
591 gen_op_vfp_movl_T0_fpscr_flags();
592 else
593 gen_op_vfp_movl_T0_fpscr();
594 break;
595 default:
596 return 1;
598 } else {
599 gen_mov_F0_vreg(0, rn);
600 gen_op_vfp_mrs();
602 if (rd == 15) {
603 /* Set the 4 flag bits in the CPSR. */
604 gen_op_movl_cpsr_T0(0xf0000000);
605 } else
606 gen_movl_reg_T0(s, rd);
607 } else {
608 /* arm->vfp */
609 gen_movl_T0_reg(s, rd);
610 if (insn & (1 << 21)) {
611 rn >>= 1;
612 /* system register */
613 switch (rn) {
614 case ARM_VFP_FPSID:
615 /* Writes are ignored. */
616 break;
617 case ARM_VFP_FPSCR:
618 gen_op_vfp_movl_fpscr_T0();
619 gen_lookup_tb(s);
620 break;
621 case ARM_VFP_FPEXC:
622 gen_op_vfp_movl_xreg_T0(rn);
623 gen_lookup_tb(s);
624 break;
625 case ARM_VFP_FPINST:
626 case ARM_VFP_FPINST2:
627 gen_op_vfp_movl_xreg_T0(rn);
628 break;
629 default:
630 return 1;
632 } else {
633 gen_op_vfp_msr();
634 gen_mov_vreg_F0(0, rn);
638 } else {
639 /* data processing */
640 /* The opcode is in bits 23, 21, 20 and 6. */
641 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
642 if (dp) {
643 if (op == 15) {
644 /* rn is opcode */
645 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
646 } else {
647 /* rn is register number */
648 if (insn & (1 << 7))
649 return 1;
650 rn = (insn >> 16) & 0xf;
653 if (op == 15 && (rn == 15 || rn > 17)) {
654 /* Integer or single precision destination. */
655 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
656 } else {
657 if (insn & (1 << 22))
658 return 1;
659 rd = (insn >> 12) & 0xf;
662 if (op == 15 && (rn == 16 || rn == 17)) {
663 /* Integer source. */
664 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
665 } else {
666 if (insn & (1 << 5))
667 return 1;
668 rm = insn & 0xf;
670 } else {
671 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
672 if (op == 15 && rn == 15) {
673 /* Double precision destination. */
674 if (insn & (1 << 22))
675 return 1;
676 rd = (insn >> 12) & 0xf;
677 } else
678 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
679 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
682 veclen = env->vfp.vec_len;
683 if (op == 15 && rn > 3)
684 veclen = 0;
686 /* Shut up compiler warnings. */
687 delta_m = 0;
688 delta_d = 0;
689 bank_mask = 0;
691 if (veclen > 0) {
692 if (dp)
693 bank_mask = 0xc;
694 else
695 bank_mask = 0x18;
697 /* Figure out what type of vector operation this is. */
698 if ((rd & bank_mask) == 0) {
699 /* scalar */
700 veclen = 0;
701 } else {
702 if (dp)
703 delta_d = (env->vfp.vec_stride >> 1) + 1;
704 else
705 delta_d = env->vfp.vec_stride + 1;
707 if ((rm & bank_mask) == 0) {
708 /* mixed scalar/vector */
709 delta_m = 0;
710 } else {
711 /* vector */
712 delta_m = delta_d;
717 /* Load the initial operands. */
718 if (op == 15) {
719 switch (rn) {
720 case 16:
721 case 17:
722 /* Integer source */
723 gen_mov_F0_vreg(0, rm);
724 break;
725 case 8:
726 case 9:
727 /* Compare */
728 gen_mov_F0_vreg(dp, rd);
729 gen_mov_F1_vreg(dp, rm);
730 break;
731 case 10:
732 case 11:
733 /* Compare with zero */
734 gen_mov_F0_vreg(dp, rd);
735 gen_vfp_F1_ld0(dp);
736 break;
737 default:
738 /* One source operand. */
739 gen_mov_F0_vreg(dp, rm);
741 } else {
742 /* Two source operands. */
743 gen_mov_F0_vreg(dp, rn);
744 gen_mov_F1_vreg(dp, rm);
747 for (;;) {
748 /* Perform the calculation. */
749 switch (op) {
750 case 0: /* mac: fd + (fn * fm) */
751 gen_vfp_mul(dp);
752 gen_mov_F1_vreg(dp, rd);
753 gen_vfp_add(dp);
754 break;
755 case 1: /* nmac: fd - (fn * fm) */
756 gen_vfp_mul(dp);
757 gen_vfp_neg(dp);
758 gen_mov_F1_vreg(dp, rd);
759 gen_vfp_add(dp);
760 break;
761 case 2: /* msc: -fd + (fn * fm) */
762 gen_vfp_mul(dp);
763 gen_mov_F1_vreg(dp, rd);
764 gen_vfp_sub(dp);
765 break;
766 case 3: /* nmsc: -fd - (fn * fm) */
767 gen_vfp_mul(dp);
768 gen_mov_F1_vreg(dp, rd);
769 gen_vfp_add(dp);
770 gen_vfp_neg(dp);
771 break;
772 case 4: /* mul: fn * fm */
773 gen_vfp_mul(dp);
774 break;
775 case 5: /* nmul: -(fn * fm) */
776 gen_vfp_mul(dp);
777 gen_vfp_neg(dp);
778 break;
779 case 6: /* add: fn + fm */
780 gen_vfp_add(dp);
781 break;
782 case 7: /* sub: fn - fm */
783 gen_vfp_sub(dp);
784 break;
785 case 8: /* div: fn / fm */
786 gen_vfp_div(dp);
787 break;
788 case 15: /* extension space */
789 switch (rn) {
790 case 0: /* cpy */
791 /* no-op */
792 break;
793 case 1: /* abs */
794 gen_vfp_abs(dp);
795 break;
796 case 2: /* neg */
797 gen_vfp_neg(dp);
798 break;
799 case 3: /* sqrt */
800 gen_vfp_sqrt(dp);
801 break;
802 case 8: /* cmp */
803 gen_vfp_cmp(dp);
804 break;
805 case 9: /* cmpe */
806 gen_vfp_cmpe(dp);
807 break;
808 case 10: /* cmpz */
809 gen_vfp_cmp(dp);
810 break;
811 case 11: /* cmpez */
812 gen_vfp_F1_ld0(dp);
813 gen_vfp_cmpe(dp);
814 break;
815 case 15: /* single<->double conversion */
816 if (dp)
817 gen_op_vfp_fcvtsd();
818 else
819 gen_op_vfp_fcvtds();
820 break;
821 case 16: /* fuito */
822 gen_vfp_uito(dp);
823 break;
824 case 17: /* fsito */
825 gen_vfp_sito(dp);
826 break;
827 case 24: /* ftoui */
828 gen_vfp_toui(dp);
829 break;
830 case 25: /* ftouiz */
831 gen_vfp_touiz(dp);
832 break;
833 case 26: /* ftosi */
834 gen_vfp_tosi(dp);
835 break;
836 case 27: /* ftosiz */
837 gen_vfp_tosiz(dp);
838 break;
839 default: /* undefined */
840 printf ("rn:%d\n", rn);
841 return 1;
843 break;
844 default: /* undefined */
845 printf ("op:%d\n", op);
846 return 1;
849 /* Write back the result. */
850 if (op == 15 && (rn >= 8 && rn <= 11))
851 ; /* Comparison, do nothing. */
852 else if (op == 15 && rn > 17)
853 /* Integer result. */
854 gen_mov_vreg_F0(0, rd);
855 else if (op == 15 && rn == 15)
856 /* conversion */
857 gen_mov_vreg_F0(!dp, rd);
858 else
859 gen_mov_vreg_F0(dp, rd);
861 /* break out of the loop if we have finished */
862 if (veclen == 0)
863 break;
865 if (op == 15 && delta_m == 0) {
866 /* single source one-many */
867 while (veclen--) {
868 rd = ((rd + delta_d) & (bank_mask - 1))
869 | (rd & bank_mask);
870 gen_mov_vreg_F0(dp, rd);
872 break;
874 /* Setup the next operands. */
875 veclen--;
876 rd = ((rd + delta_d) & (bank_mask - 1))
877 | (rd & bank_mask);
879 if (op == 15) {
880 /* One source operand. */
881 rm = ((rm + delta_m) & (bank_mask - 1))
882 | (rm & bank_mask);
883 gen_mov_F0_vreg(dp, rm);
884 } else {
885 /* Two source operands. */
886 rn = ((rn + delta_d) & (bank_mask - 1))
887 | (rn & bank_mask);
888 gen_mov_F0_vreg(dp, rn);
889 if (delta_m) {
890 rm = ((rm + delta_m) & (bank_mask - 1))
891 | (rm & bank_mask);
892 gen_mov_F1_vreg(dp, rm);
897 break;
898 case 0xc:
899 case 0xd:
900 if (dp && (insn & (1 << 22))) {
901 /* two-register transfer */
902 rn = (insn >> 16) & 0xf;
903 rd = (insn >> 12) & 0xf;
904 if (dp) {
905 if (insn & (1 << 5))
906 return 1;
907 rm = insn & 0xf;
908 } else
909 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
911 if (insn & (1 << 20)) {
912 /* vfp->arm */
913 if (dp) {
914 gen_mov_F0_vreg(1, rm);
915 gen_op_vfp_mrrd();
916 gen_movl_reg_T0(s, rd);
917 gen_movl_reg_T1(s, rn);
918 } else {
919 gen_mov_F0_vreg(0, rm);
920 gen_op_vfp_mrs();
921 gen_movl_reg_T0(s, rn);
922 gen_mov_F0_vreg(0, rm + 1);
923 gen_op_vfp_mrs();
924 gen_movl_reg_T0(s, rd);
926 } else {
927 /* arm->vfp */
928 if (dp) {
929 gen_movl_T0_reg(s, rd);
930 gen_movl_T1_reg(s, rn);
931 gen_op_vfp_mdrr();
932 gen_mov_vreg_F0(1, rm);
933 } else {
934 gen_movl_T0_reg(s, rn);
935 gen_op_vfp_msr();
936 gen_mov_vreg_F0(0, rm);
937 gen_movl_T0_reg(s, rd);
938 gen_op_vfp_msr();
939 gen_mov_vreg_F0(0, rm + 1);
942 } else {
943 /* Load/store */
944 rn = (insn >> 16) & 0xf;
945 if (dp)
946 rd = (insn >> 12) & 0xf;
947 else
948 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
949 gen_movl_T1_reg(s, rn);
950 if ((insn & 0x01200000) == 0x01000000) {
951 /* Single load/store */
952 offset = (insn & 0xff) << 2;
953 if ((insn & (1 << 23)) == 0)
954 offset = -offset;
955 gen_op_addl_T1_im(offset);
956 if (insn & (1 << 20)) {
957 gen_vfp_ld(s, dp);
958 gen_mov_vreg_F0(dp, rd);
959 } else {
960 gen_mov_F0_vreg(dp, rd);
961 gen_vfp_st(s, dp);
963 } else {
964 /* load/store multiple */
965 if (dp)
966 n = (insn >> 1) & 0x7f;
967 else
968 n = insn & 0xff;
970 if (insn & (1 << 24)) /* pre-decrement */
971 gen_op_addl_T1_im(-((insn & 0xff) << 2));
973 if (dp)
974 offset = 8;
975 else
976 offset = 4;
977 for (i = 0; i < n; i++) {
978 if (insn & (1 << 20)) {
979 /* load */
980 gen_vfp_ld(s, dp);
981 gen_mov_vreg_F0(dp, rd + i);
982 } else {
983 /* store */
984 gen_mov_F0_vreg(dp, rd + i);
985 gen_vfp_st(s, dp);
987 gen_op_addl_T1_im(offset);
989 if (insn & (1 << 21)) {
990 /* writeback */
991 if (insn & (1 << 24))
992 offset = -offset * n;
993 else if (dp && (insn & 1))
994 offset = 4;
995 else
996 offset = 0;
998 if (offset != 0)
999 gen_op_addl_T1_im(offset);
1000 gen_movl_reg_T1(s, rn);
1004 break;
1005 default:
1006 /* Should never happen. */
1007 return 1;
1009 return 0;
1012 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1014 TranslationBlock *tb;
1016 tb = s->tb;
1017 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
1018 if (n == 0)
1019 gen_op_goto_tb0(TBPARAM(tb));
1020 else
1021 gen_op_goto_tb1(TBPARAM(tb));
1022 gen_op_movl_T0_im(dest);
1023 gen_op_movl_r15_T0();
1024 gen_op_movl_T0_im((long)tb + n);
1025 gen_op_exit_tb();
1026 } else {
1027 gen_op_movl_T0_im(dest);
1028 gen_op_movl_r15_T0();
1029 gen_op_movl_T0_0();
1030 gen_op_exit_tb();
1034 static inline void gen_jmp (DisasContext *s, uint32_t dest)
1036 if (__builtin_expect(s->singlestep_enabled, 0)) {
1037 /* An indirect jump so that we still trigger the debug exception. */
1038 if (s->thumb)
1039 dest |= 1;
1040 gen_op_movl_T0_im(dest);
1041 gen_bx(s);
1042 } else {
1043 gen_goto_tb(s, 0, dest);
1044 s->is_jmp = DISAS_TB_JUMP;
1048 static inline void gen_mulxy(int x, int y)
1050 if (x)
1051 gen_op_sarl_T0_im(16);
1052 else
1053 gen_op_sxth_T0();
1054 if (y)
1055 gen_op_sarl_T1_im(16);
1056 else
1057 gen_op_sxth_T1();
1058 gen_op_mul_T0_T1();
1061 /* Return the mask of PSR bits set by a MSR instruction. */
1062 static uint32_t msr_mask(DisasContext *s, int flags, int spsr) {
1063 uint32_t mask;
1065 mask = 0;
1066 if (flags & (1 << 0))
1067 mask |= 0xff;
1068 if (flags & (1 << 1))
1069 mask |= 0xff00;
1070 if (flags & (1 << 2))
1071 mask |= 0xff0000;
1072 if (flags & (1 << 3))
1073 mask |= 0xff000000;
1074 /* Mask out undefined bits. */
1075 mask &= 0xf90f03ff;
1076 /* Mask out state bits. */
1077 if (!spsr)
1078 mask &= ~0x01000020;
1079 /* Mask out privileged bits. */
1080 if (IS_USER(s))
1081 mask &= 0xf80f0200;
1082 return mask;
1085 /* Returns nonzero if access to the PSR is not permitted. */
1086 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
1088 if (spsr) {
1089 /* ??? This is also undefined in system mode. */
1090 if (IS_USER(s))
1091 return 1;
1092 gen_op_movl_spsr_T0(mask);
1093 } else {
1094 gen_op_movl_cpsr_T0(mask);
1096 gen_lookup_tb(s);
1097 return 0;
1100 static void gen_exception_return(DisasContext *s)
1102 gen_op_movl_reg_TN[0][15]();
1103 gen_op_movl_T0_spsr();
1104 gen_op_movl_cpsr_T0(0xffffffff);
1105 s->is_jmp = DISAS_UPDATE;
1108 static void disas_arm_insn(CPUState * env, DisasContext *s)
1110 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
1112 insn = ldl_code(s->pc);
1113 s->pc += 4;
1115 cond = insn >> 28;
1116 if (cond == 0xf){
1117 /* Unconditional instructions. */
1118 if ((insn & 0x0d70f000) == 0x0550f000)
1119 return; /* PLD */
1120 else if ((insn & 0x0e000000) == 0x0a000000) {
1121 /* branch link and change to thumb (blx <offset>) */
1122 int32_t offset;
1124 val = (uint32_t)s->pc;
1125 gen_op_movl_T0_im(val);
1126 gen_movl_reg_T0(s, 14);
1127 /* Sign-extend the 24-bit offset */
1128 offset = (((int32_t)insn) << 8) >> 8;
1129 /* offset * 4 + bit24 * 2 + (thumb bit) */
1130 val += (offset << 2) | ((insn >> 23) & 2) | 1;
1131 /* pipeline offset */
1132 val += 4;
1133 gen_op_movl_T0_im(val);
1134 gen_bx(s);
1135 return;
1136 } else if ((insn & 0x0fe00000) == 0x0c400000) {
1137 /* Coprocessor double register transfer. */
1138 } else if ((insn & 0x0f000010) == 0x0e000010) {
1139 /* Additional coprocessor register transfer. */
1140 } else if ((insn & 0x0ff10010) == 0x01000000) {
1141 /* cps (privileged) */
1142 } else if ((insn & 0x0ffffdff) == 0x01010000) {
1143 /* setend */
1144 if (insn & (1 << 9)) {
1145 /* BE8 mode not implemented. */
1146 goto illegal_op;
1148 return;
1150 goto illegal_op;
1152 if (cond != 0xe) {
1153 /* if not always execute, we generate a conditional jump to
1154 next instruction */
1155 s->condlabel = gen_new_label();
1156 gen_test_cc[cond ^ 1](s->condlabel);
1157 s->condjmp = 1;
1158 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1159 //s->is_jmp = DISAS_JUMP_NEXT;
1161 if ((insn & 0x0f900000) == 0x03000000) {
1162 if ((insn & 0x0fb0f000) != 0x0320f000)
1163 goto illegal_op;
1164 /* CPSR = immediate */
1165 val = insn & 0xff;
1166 shift = ((insn >> 8) & 0xf) * 2;
1167 if (shift)
1168 val = (val >> shift) | (val << (32 - shift));
1169 gen_op_movl_T0_im(val);
1170 i = ((insn & (1 << 22)) != 0);
1171 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
1172 goto illegal_op;
1173 } else if ((insn & 0x0f900000) == 0x01000000
1174 && (insn & 0x00000090) != 0x00000090) {
1175 /* miscellaneous instructions */
1176 op1 = (insn >> 21) & 3;
1177 sh = (insn >> 4) & 0xf;
1178 rm = insn & 0xf;
1179 switch (sh) {
1180 case 0x0: /* move program status register */
1181 if (op1 & 1) {
1182 /* PSR = reg */
1183 gen_movl_T0_reg(s, rm);
1184 i = ((op1 & 2) != 0);
1185 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
1186 goto illegal_op;
1187 } else {
1188 /* reg = PSR */
1189 rd = (insn >> 12) & 0xf;
1190 if (op1 & 2) {
1191 if (IS_USER(s))
1192 goto illegal_op;
1193 gen_op_movl_T0_spsr();
1194 } else {
1195 gen_op_movl_T0_cpsr();
1197 gen_movl_reg_T0(s, rd);
1199 break;
1200 case 0x1:
1201 if (op1 == 1) {
1202 /* branch/exchange thumb (bx). */
1203 gen_movl_T0_reg(s, rm);
1204 gen_bx(s);
1205 } else if (op1 == 3) {
1206 /* clz */
1207 rd = (insn >> 12) & 0xf;
1208 gen_movl_T0_reg(s, rm);
1209 gen_op_clz_T0();
1210 gen_movl_reg_T0(s, rd);
1211 } else {
1212 goto illegal_op;
1214 break;
1215 case 0x2:
1216 if (op1 == 1) {
1217 ARCH(5J); /* bxj */
1218 /* Trivial implementation equivalent to bx. */
1219 gen_movl_T0_reg(s, rm);
1220 gen_bx(s);
1221 } else {
1222 goto illegal_op;
1224 break;
1225 case 0x3:
1226 if (op1 != 1)
1227 goto illegal_op;
1229 /* branch link/exchange thumb (blx) */
1230 val = (uint32_t)s->pc;
1231 gen_op_movl_T0_im(val);
1232 gen_movl_reg_T0(s, 14);
1233 gen_movl_T0_reg(s, rm);
1234 gen_bx(s);
1235 break;
1236 case 0x5: /* saturating add/subtract */
1237 rd = (insn >> 12) & 0xf;
1238 rn = (insn >> 16) & 0xf;
1239 gen_movl_T0_reg(s, rm);
1240 gen_movl_T1_reg(s, rn);
1241 if (op1 & 2)
1242 gen_op_double_T1_saturate();
1243 if (op1 & 1)
1244 gen_op_subl_T0_T1_saturate();
1245 else
1246 gen_op_addl_T0_T1_saturate();
1247 gen_movl_reg_T0(s, rd);
1248 break;
1249 case 7: /* bkpt */
1250 gen_op_movl_T0_im((long)s->pc - 4);
1251 gen_op_movl_reg_TN[0][15]();
1252 gen_op_bkpt();
1253 s->is_jmp = DISAS_JUMP;
1254 break;
1255 case 0x8: /* signed multiply */
1256 case 0xa:
1257 case 0xc:
1258 case 0xe:
1259 rs = (insn >> 8) & 0xf;
1260 rn = (insn >> 12) & 0xf;
1261 rd = (insn >> 16) & 0xf;
1262 if (op1 == 1) {
1263 /* (32 * 16) >> 16 */
1264 gen_movl_T0_reg(s, rm);
1265 gen_movl_T1_reg(s, rs);
1266 if (sh & 4)
1267 gen_op_sarl_T1_im(16);
1268 else
1269 gen_op_sxth_T1();
1270 gen_op_imulw_T0_T1();
1271 if ((sh & 2) == 0) {
1272 gen_movl_T1_reg(s, rn);
1273 gen_op_addl_T0_T1_setq();
1275 gen_movl_reg_T0(s, rd);
1276 } else {
1277 /* 16 * 16 */
1278 gen_movl_T0_reg(s, rm);
1279 gen_movl_T1_reg(s, rs);
1280 gen_mulxy(sh & 2, sh & 4);
1281 if (op1 == 2) {
1282 gen_op_signbit_T1_T0();
1283 gen_op_addq_T0_T1(rn, rd);
1284 gen_movl_reg_T0(s, rn);
1285 gen_movl_reg_T1(s, rd);
1286 } else {
1287 if (op1 == 0) {
1288 gen_movl_T1_reg(s, rn);
1289 gen_op_addl_T0_T1_setq();
1291 gen_movl_reg_T0(s, rd);
1294 break;
1295 default:
1296 goto illegal_op;
1298 } else if (((insn & 0x0e000000) == 0 &&
1299 (insn & 0x00000090) != 0x90) ||
1300 ((insn & 0x0e000000) == (1 << 25))) {
1301 int set_cc, logic_cc, shiftop;
1303 op1 = (insn >> 21) & 0xf;
1304 set_cc = (insn >> 20) & 1;
1305 logic_cc = table_logic_cc[op1] & set_cc;
1307 /* data processing instruction */
1308 if (insn & (1 << 25)) {
1309 /* immediate operand */
1310 val = insn & 0xff;
1311 shift = ((insn >> 8) & 0xf) * 2;
1312 if (shift)
1313 val = (val >> shift) | (val << (32 - shift));
1314 gen_op_movl_T1_im(val);
1315 if (logic_cc && shift)
1316 gen_op_mov_CF_T1();
1317 } else {
1318 /* register */
1319 rm = (insn) & 0xf;
1320 gen_movl_T1_reg(s, rm);
1321 shiftop = (insn >> 5) & 3;
1322 if (!(insn & (1 << 4))) {
1323 shift = (insn >> 7) & 0x1f;
1324 if (shift != 0) {
1325 if (logic_cc) {
1326 gen_shift_T1_im_cc[shiftop](shift);
1327 } else {
1328 gen_shift_T1_im[shiftop](shift);
1330 } else if (shiftop != 0) {
1331 if (logic_cc) {
1332 gen_shift_T1_0_cc[shiftop]();
1333 } else {
1334 gen_shift_T1_0[shiftop]();
1337 } else {
1338 rs = (insn >> 8) & 0xf;
1339 gen_movl_T0_reg(s, rs);
1340 if (logic_cc) {
1341 gen_shift_T1_T0_cc[shiftop]();
1342 } else {
1343 gen_shift_T1_T0[shiftop]();
1347 if (op1 != 0x0f && op1 != 0x0d) {
1348 rn = (insn >> 16) & 0xf;
1349 gen_movl_T0_reg(s, rn);
1351 rd = (insn >> 12) & 0xf;
1352 switch(op1) {
1353 case 0x00:
1354 gen_op_andl_T0_T1();
1355 gen_movl_reg_T0(s, rd);
1356 if (logic_cc)
1357 gen_op_logic_T0_cc();
1358 break;
1359 case 0x01:
1360 gen_op_xorl_T0_T1();
1361 gen_movl_reg_T0(s, rd);
1362 if (logic_cc)
1363 gen_op_logic_T0_cc();
1364 break;
1365 case 0x02:
1366 if (set_cc && rd == 15) {
1367 /* SUBS r15, ... is used for exception return. */
1368 if (IS_USER(s))
1369 goto illegal_op;
1370 gen_op_subl_T0_T1_cc();
1371 gen_exception_return(s);
1372 } else {
1373 if (set_cc)
1374 gen_op_subl_T0_T1_cc();
1375 else
1376 gen_op_subl_T0_T1();
1377 gen_movl_reg_T0(s, rd);
1379 break;
1380 case 0x03:
1381 if (set_cc)
1382 gen_op_rsbl_T0_T1_cc();
1383 else
1384 gen_op_rsbl_T0_T1();
1385 gen_movl_reg_T0(s, rd);
1386 break;
1387 case 0x04:
1388 if (set_cc)
1389 gen_op_addl_T0_T1_cc();
1390 else
1391 gen_op_addl_T0_T1();
1392 gen_movl_reg_T0(s, rd);
1393 break;
1394 case 0x05:
1395 if (set_cc)
1396 gen_op_adcl_T0_T1_cc();
1397 else
1398 gen_op_adcl_T0_T1();
1399 gen_movl_reg_T0(s, rd);
1400 break;
1401 case 0x06:
1402 if (set_cc)
1403 gen_op_sbcl_T0_T1_cc();
1404 else
1405 gen_op_sbcl_T0_T1();
1406 gen_movl_reg_T0(s, rd);
1407 break;
1408 case 0x07:
1409 if (set_cc)
1410 gen_op_rscl_T0_T1_cc();
1411 else
1412 gen_op_rscl_T0_T1();
1413 gen_movl_reg_T0(s, rd);
1414 break;
1415 case 0x08:
1416 if (set_cc) {
1417 gen_op_andl_T0_T1();
1418 gen_op_logic_T0_cc();
1420 break;
1421 case 0x09:
1422 if (set_cc) {
1423 gen_op_xorl_T0_T1();
1424 gen_op_logic_T0_cc();
1426 break;
1427 case 0x0a:
1428 if (set_cc) {
1429 gen_op_subl_T0_T1_cc();
1431 break;
1432 case 0x0b:
1433 if (set_cc) {
1434 gen_op_addl_T0_T1_cc();
1436 break;
1437 case 0x0c:
1438 gen_op_orl_T0_T1();
1439 gen_movl_reg_T0(s, rd);
1440 if (logic_cc)
1441 gen_op_logic_T0_cc();
1442 break;
1443 case 0x0d:
1444 if (logic_cc && rd == 15) {
1445 /* MOVS r15, ... is used for exception return. */
1446 if (IS_USER(s))
1447 goto illegal_op;
1448 gen_op_movl_T0_T1();
1449 gen_exception_return(s);
1450 } else {
1451 gen_movl_reg_T1(s, rd);
1452 if (logic_cc)
1453 gen_op_logic_T1_cc();
1455 break;
1456 case 0x0e:
1457 gen_op_bicl_T0_T1();
1458 gen_movl_reg_T0(s, rd);
1459 if (logic_cc)
1460 gen_op_logic_T0_cc();
1461 break;
1462 default:
1463 case 0x0f:
1464 gen_op_notl_T1();
1465 gen_movl_reg_T1(s, rd);
1466 if (logic_cc)
1467 gen_op_logic_T1_cc();
1468 break;
1470 } else {
1471 /* other instructions */
1472 op1 = (insn >> 24) & 0xf;
1473 switch(op1) {
1474 case 0x0:
1475 case 0x1:
1476 /* multiplies, extra load/stores */
1477 sh = (insn >> 5) & 3;
1478 if (sh == 0) {
1479 if (op1 == 0x0) {
1480 rd = (insn >> 16) & 0xf;
1481 rn = (insn >> 12) & 0xf;
1482 rs = (insn >> 8) & 0xf;
1483 rm = (insn) & 0xf;
1484 if (((insn >> 22) & 3) == 0) {
1485 /* 32 bit mul */
1486 gen_movl_T0_reg(s, rs);
1487 gen_movl_T1_reg(s, rm);
1488 gen_op_mul_T0_T1();
1489 if (insn & (1 << 21)) {
1490 gen_movl_T1_reg(s, rn);
1491 gen_op_addl_T0_T1();
1493 if (insn & (1 << 20))
1494 gen_op_logic_T0_cc();
1495 gen_movl_reg_T0(s, rd);
1496 } else {
1497 /* 64 bit mul */
1498 gen_movl_T0_reg(s, rs);
1499 gen_movl_T1_reg(s, rm);
1500 if (insn & (1 << 22))
1501 gen_op_imull_T0_T1();
1502 else
1503 gen_op_mull_T0_T1();
1504 if (insn & (1 << 21)) /* mult accumulate */
1505 gen_op_addq_T0_T1(rn, rd);
1506 if (!(insn & (1 << 23))) { /* double accumulate */
1507 ARCH(6);
1508 gen_op_addq_lo_T0_T1(rn);
1509 gen_op_addq_lo_T0_T1(rd);
1511 if (insn & (1 << 20))
1512 gen_op_logicq_cc();
1513 gen_movl_reg_T0(s, rn);
1514 gen_movl_reg_T1(s, rd);
1516 } else {
1517 rn = (insn >> 16) & 0xf;
1518 rd = (insn >> 12) & 0xf;
1519 if (insn & (1 << 23)) {
1520 /* load/store exclusive */
1521 goto illegal_op;
1522 } else {
1523 /* SWP instruction */
1524 rm = (insn) & 0xf;
1526 gen_movl_T0_reg(s, rm);
1527 gen_movl_T1_reg(s, rn);
1528 if (insn & (1 << 22)) {
1529 gen_ldst(swpb, s);
1530 } else {
1531 gen_ldst(swpl, s);
1533 gen_movl_reg_T0(s, rd);
1536 } else {
1537 int address_offset;
1538 /* Misc load/store */
1539 rn = (insn >> 16) & 0xf;
1540 rd = (insn >> 12) & 0xf;
1541 gen_movl_T1_reg(s, rn);
1542 if (insn & (1 << 24))
1543 gen_add_datah_offset(s, insn, 0);
1544 address_offset = 0;
1545 if (insn & (1 << 20)) {
1546 /* load */
1547 switch(sh) {
1548 case 1:
1549 gen_ldst(lduw, s);
1550 break;
1551 case 2:
1552 gen_ldst(ldsb, s);
1553 break;
1554 default:
1555 case 3:
1556 gen_ldst(ldsw, s);
1557 break;
1559 gen_movl_reg_T0(s, rd);
1560 } else if (sh & 2) {
1561 /* doubleword */
1562 if (sh & 1) {
1563 /* store */
1564 gen_movl_T0_reg(s, rd);
1565 gen_ldst(stl, s);
1566 gen_op_addl_T1_im(4);
1567 gen_movl_T0_reg(s, rd + 1);
1568 gen_ldst(stl, s);
1569 } else {
1570 /* load */
1571 gen_ldst(ldl, s);
1572 gen_movl_reg_T0(s, rd);
1573 gen_op_addl_T1_im(4);
1574 gen_ldst(ldl, s);
1575 gen_movl_reg_T0(s, rd + 1);
1577 address_offset = -4;
1578 } else {
1579 /* store */
1580 gen_movl_T0_reg(s, rd);
1581 gen_ldst(stw, s);
1583 if (!(insn & (1 << 24))) {
1584 gen_add_datah_offset(s, insn, address_offset);
1585 gen_movl_reg_T1(s, rn);
1586 } else if (insn & (1 << 21)) {
1587 if (address_offset)
1588 gen_op_addl_T1_im(address_offset);
1589 gen_movl_reg_T1(s, rn);
1592 break;
1593 case 0x4:
1594 case 0x5:
1595 case 0x6:
1596 case 0x7:
1597 /* Check for undefined extension instructions
1598 * per the ARM Bible IE:
1599 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
1601 sh = (0xf << 20) | (0xf << 4);
1602 if (op1 == 0x7 && ((insn & sh) == sh))
1604 goto illegal_op;
1606 /* load/store byte/word */
1607 rn = (insn >> 16) & 0xf;
1608 rd = (insn >> 12) & 0xf;
1609 gen_movl_T1_reg(s, rn);
1610 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
1611 if (insn & (1 << 24))
1612 gen_add_data_offset(s, insn);
1613 if (insn & (1 << 20)) {
1614 /* load */
1615 #if defined(CONFIG_USER_ONLY)
1616 if (insn & (1 << 22))
1617 gen_op_ldub_raw();
1618 else
1619 gen_op_ldl_raw();
1620 #else
1621 if (insn & (1 << 22)) {
1622 if (i)
1623 gen_op_ldub_user();
1624 else
1625 gen_op_ldub_kernel();
1626 } else {
1627 if (i)
1628 gen_op_ldl_user();
1629 else
1630 gen_op_ldl_kernel();
1632 #endif
1633 if (rd == 15)
1634 gen_bx(s);
1635 else
1636 gen_movl_reg_T0(s, rd);
1637 } else {
1638 /* store */
1639 gen_movl_T0_reg(s, rd);
1640 #if defined(CONFIG_USER_ONLY)
1641 if (insn & (1 << 22))
1642 gen_op_stb_raw();
1643 else
1644 gen_op_stl_raw();
1645 #else
1646 if (insn & (1 << 22)) {
1647 if (i)
1648 gen_op_stb_user();
1649 else
1650 gen_op_stb_kernel();
1651 } else {
1652 if (i)
1653 gen_op_stl_user();
1654 else
1655 gen_op_stl_kernel();
1657 #endif
1659 if (!(insn & (1 << 24))) {
1660 gen_add_data_offset(s, insn);
1661 gen_movl_reg_T1(s, rn);
1662 } else if (insn & (1 << 21))
1663 gen_movl_reg_T1(s, rn); {
1665 break;
1666 case 0x08:
1667 case 0x09:
1669 int j, n, user, loaded_base;
1670 /* load/store multiple words */
1671 /* XXX: store correct base if write back */
1672 user = 0;
1673 if (insn & (1 << 22)) {
1674 if (IS_USER(s))
1675 goto illegal_op; /* only usable in supervisor mode */
1677 if ((insn & (1 << 15)) == 0)
1678 user = 1;
1680 rn = (insn >> 16) & 0xf;
1681 gen_movl_T1_reg(s, rn);
1683 /* compute total size */
1684 loaded_base = 0;
1685 n = 0;
1686 for(i=0;i<16;i++) {
1687 if (insn & (1 << i))
1688 n++;
1690 /* XXX: test invalid n == 0 case ? */
1691 if (insn & (1 << 23)) {
1692 if (insn & (1 << 24)) {
1693 /* pre increment */
1694 gen_op_addl_T1_im(4);
1695 } else {
1696 /* post increment */
1698 } else {
1699 if (insn & (1 << 24)) {
1700 /* pre decrement */
1701 gen_op_addl_T1_im(-(n * 4));
1702 } else {
1703 /* post decrement */
1704 if (n != 1)
1705 gen_op_addl_T1_im(-((n - 1) * 4));
1708 j = 0;
1709 for(i=0;i<16;i++) {
1710 if (insn & (1 << i)) {
1711 if (insn & (1 << 20)) {
1712 /* load */
1713 gen_ldst(ldl, s);
1714 if (i == 15) {
1715 gen_bx(s);
1716 } else if (user) {
1717 gen_op_movl_user_T0(i);
1718 } else if (i == rn) {
1719 gen_op_movl_T2_T0();
1720 loaded_base = 1;
1721 } else {
1722 gen_movl_reg_T0(s, i);
1724 } else {
1725 /* store */
1726 if (i == 15) {
1727 /* special case: r15 = PC + 12 */
1728 val = (long)s->pc + 8;
1729 gen_op_movl_TN_im[0](val);
1730 } else if (user) {
1731 gen_op_movl_T0_user(i);
1732 } else {
1733 gen_movl_T0_reg(s, i);
1735 gen_ldst(stl, s);
1737 j++;
1738 /* no need to add after the last transfer */
1739 if (j != n)
1740 gen_op_addl_T1_im(4);
1743 if (insn & (1 << 21)) {
1744 /* write back */
1745 if (insn & (1 << 23)) {
1746 if (insn & (1 << 24)) {
1747 /* pre increment */
1748 } else {
1749 /* post increment */
1750 gen_op_addl_T1_im(4);
1752 } else {
1753 if (insn & (1 << 24)) {
1754 /* pre decrement */
1755 if (n != 1)
1756 gen_op_addl_T1_im(-((n - 1) * 4));
1757 } else {
1758 /* post decrement */
1759 gen_op_addl_T1_im(-(n * 4));
1762 gen_movl_reg_T1(s, rn);
1764 if (loaded_base) {
1765 gen_op_movl_T0_T2();
1766 gen_movl_reg_T0(s, rn);
1768 if ((insn & (1 << 22)) && !user) {
1769 /* Restore CPSR from SPSR. */
1770 gen_op_movl_T0_spsr();
1771 gen_op_movl_cpsr_T0(0xffffffff);
1772 s->is_jmp = DISAS_UPDATE;
1775 break;
1776 case 0xa:
1777 case 0xb:
1779 int32_t offset;
1781 /* branch (and link) */
1782 val = (int32_t)s->pc;
1783 if (insn & (1 << 24)) {
1784 gen_op_movl_T0_im(val);
1785 gen_op_movl_reg_TN[0][14]();
1787 offset = (((int32_t)insn << 8) >> 8);
1788 val += (offset << 2) + 4;
1789 gen_jmp(s, val);
1791 break;
1792 case 0xc:
1793 case 0xd:
1794 case 0xe:
1795 /* Coprocessor. */
1796 op1 = (insn >> 8) & 0xf;
1797 switch (op1) {
1798 case 10:
1799 case 11:
1800 if (disas_vfp_insn (env, s, insn))
1801 goto illegal_op;
1802 break;
1803 case 15:
1804 if (disas_cp15_insn (s, insn))
1805 goto illegal_op;
1806 break;
1807 default:
1808 /* unknown coprocessor. */
1809 goto illegal_op;
1811 break;
1812 case 0xf:
1813 /* swi */
1814 gen_op_movl_T0_im((long)s->pc);
1815 gen_op_movl_reg_TN[0][15]();
1816 gen_op_swi();
1817 s->is_jmp = DISAS_JUMP;
1818 break;
1819 default:
1820 illegal_op:
1821 gen_op_movl_T0_im((long)s->pc - 4);
1822 gen_op_movl_reg_TN[0][15]();
1823 gen_op_undef_insn();
1824 s->is_jmp = DISAS_JUMP;
1825 break;
1830 static void disas_thumb_insn(DisasContext *s)
1832 uint32_t val, insn, op, rm, rn, rd, shift, cond;
1833 int32_t offset;
1834 int i;
1836 insn = lduw_code(s->pc);
1837 s->pc += 2;
1839 switch (insn >> 12) {
1840 case 0: case 1:
1841 rd = insn & 7;
1842 op = (insn >> 11) & 3;
1843 if (op == 3) {
1844 /* add/subtract */
1845 rn = (insn >> 3) & 7;
1846 gen_movl_T0_reg(s, rn);
1847 if (insn & (1 << 10)) {
1848 /* immediate */
1849 gen_op_movl_T1_im((insn >> 6) & 7);
1850 } else {
1851 /* reg */
1852 rm = (insn >> 6) & 7;
1853 gen_movl_T1_reg(s, rm);
1855 if (insn & (1 << 9))
1856 gen_op_subl_T0_T1_cc();
1857 else
1858 gen_op_addl_T0_T1_cc();
1859 gen_movl_reg_T0(s, rd);
1860 } else {
1861 /* shift immediate */
1862 rm = (insn >> 3) & 7;
1863 shift = (insn >> 6) & 0x1f;
1864 gen_movl_T0_reg(s, rm);
1865 gen_shift_T0_im_thumb[op](shift);
1866 gen_movl_reg_T0(s, rd);
1868 break;
1869 case 2: case 3:
1870 /* arithmetic large immediate */
1871 op = (insn >> 11) & 3;
1872 rd = (insn >> 8) & 0x7;
1873 if (op == 0) {
1874 gen_op_movl_T0_im(insn & 0xff);
1875 } else {
1876 gen_movl_T0_reg(s, rd);
1877 gen_op_movl_T1_im(insn & 0xff);
1879 switch (op) {
1880 case 0: /* mov */
1881 gen_op_logic_T0_cc();
1882 break;
1883 case 1: /* cmp */
1884 gen_op_subl_T0_T1_cc();
1885 break;
1886 case 2: /* add */
1887 gen_op_addl_T0_T1_cc();
1888 break;
1889 case 3: /* sub */
1890 gen_op_subl_T0_T1_cc();
1891 break;
1893 if (op != 1)
1894 gen_movl_reg_T0(s, rd);
1895 break;
1896 case 4:
1897 if (insn & (1 << 11)) {
1898 rd = (insn >> 8) & 7;
1899 /* load pc-relative. Bit 1 of PC is ignored. */
1900 val = s->pc + 2 + ((insn & 0xff) * 4);
1901 val &= ~(uint32_t)2;
1902 gen_op_movl_T1_im(val);
1903 gen_ldst(ldl, s);
1904 gen_movl_reg_T0(s, rd);
1905 break;
1907 if (insn & (1 << 10)) {
1908 /* data processing extended or blx */
1909 rd = (insn & 7) | ((insn >> 4) & 8);
1910 rm = (insn >> 3) & 0xf;
1911 op = (insn >> 8) & 3;
1912 switch (op) {
1913 case 0: /* add */
1914 gen_movl_T0_reg(s, rd);
1915 gen_movl_T1_reg(s, rm);
1916 gen_op_addl_T0_T1();
1917 gen_movl_reg_T0(s, rd);
1918 break;
1919 case 1: /* cmp */
1920 gen_movl_T0_reg(s, rd);
1921 gen_movl_T1_reg(s, rm);
1922 gen_op_subl_T0_T1_cc();
1923 break;
1924 case 2: /* mov/cpy */
1925 gen_movl_T0_reg(s, rm);
1926 gen_movl_reg_T0(s, rd);
1927 break;
1928 case 3:/* branch [and link] exchange thumb register */
1929 if (insn & (1 << 7)) {
1930 val = (uint32_t)s->pc | 1;
1931 gen_op_movl_T1_im(val);
1932 gen_movl_reg_T1(s, 14);
1934 gen_movl_T0_reg(s, rm);
1935 gen_bx(s);
1936 break;
1938 break;
1941 /* data processing register */
1942 rd = insn & 7;
1943 rm = (insn >> 3) & 7;
1944 op = (insn >> 6) & 0xf;
1945 if (op == 2 || op == 3 || op == 4 || op == 7) {
1946 /* the shift/rotate ops want the operands backwards */
1947 val = rm;
1948 rm = rd;
1949 rd = val;
1950 val = 1;
1951 } else {
1952 val = 0;
1955 if (op == 9) /* neg */
1956 gen_op_movl_T0_im(0);
1957 else if (op != 0xf) /* mvn doesn't read its first operand */
1958 gen_movl_T0_reg(s, rd);
1960 gen_movl_T1_reg(s, rm);
1961 switch (op) {
1962 case 0x0: /* and */
1963 gen_op_andl_T0_T1();
1964 gen_op_logic_T0_cc();
1965 break;
1966 case 0x1: /* eor */
1967 gen_op_xorl_T0_T1();
1968 gen_op_logic_T0_cc();
1969 break;
1970 case 0x2: /* lsl */
1971 gen_op_shll_T1_T0_cc();
1972 gen_op_logic_T1_cc();
1973 break;
1974 case 0x3: /* lsr */
1975 gen_op_shrl_T1_T0_cc();
1976 gen_op_logic_T1_cc();
1977 break;
1978 case 0x4: /* asr */
1979 gen_op_sarl_T1_T0_cc();
1980 gen_op_logic_T1_cc();
1981 break;
1982 case 0x5: /* adc */
1983 gen_op_adcl_T0_T1_cc();
1984 break;
1985 case 0x6: /* sbc */
1986 gen_op_sbcl_T0_T1_cc();
1987 break;
1988 case 0x7: /* ror */
1989 gen_op_rorl_T1_T0_cc();
1990 gen_op_logic_T1_cc();
1991 break;
1992 case 0x8: /* tst */
1993 gen_op_andl_T0_T1();
1994 gen_op_logic_T0_cc();
1995 rd = 16;
1996 break;
1997 case 0x9: /* neg */
1998 gen_op_subl_T0_T1_cc();
1999 break;
2000 case 0xa: /* cmp */
2001 gen_op_subl_T0_T1_cc();
2002 rd = 16;
2003 break;
2004 case 0xb: /* cmn */
2005 gen_op_addl_T0_T1_cc();
2006 rd = 16;
2007 break;
2008 case 0xc: /* orr */
2009 gen_op_orl_T0_T1();
2010 gen_op_logic_T0_cc();
2011 break;
2012 case 0xd: /* mul */
2013 gen_op_mull_T0_T1();
2014 gen_op_logic_T0_cc();
2015 break;
2016 case 0xe: /* bic */
2017 gen_op_bicl_T0_T1();
2018 gen_op_logic_T0_cc();
2019 break;
2020 case 0xf: /* mvn */
2021 gen_op_notl_T1();
2022 gen_op_logic_T1_cc();
2023 val = 1;
2024 rm = rd;
2025 break;
2027 if (rd != 16) {
2028 if (val)
2029 gen_movl_reg_T1(s, rm);
2030 else
2031 gen_movl_reg_T0(s, rd);
2033 break;
2035 case 5:
2036 /* load/store register offset. */
2037 rd = insn & 7;
2038 rn = (insn >> 3) & 7;
2039 rm = (insn >> 6) & 7;
2040 op = (insn >> 9) & 7;
2041 gen_movl_T1_reg(s, rn);
2042 gen_movl_T2_reg(s, rm);
2043 gen_op_addl_T1_T2();
2045 if (op < 3) /* store */
2046 gen_movl_T0_reg(s, rd);
2048 switch (op) {
2049 case 0: /* str */
2050 gen_ldst(stl, s);
2051 break;
2052 case 1: /* strh */
2053 gen_ldst(stw, s);
2054 break;
2055 case 2: /* strb */
2056 gen_ldst(stb, s);
2057 break;
2058 case 3: /* ldrsb */
2059 gen_ldst(ldsb, s);
2060 break;
2061 case 4: /* ldr */
2062 gen_ldst(ldl, s);
2063 break;
2064 case 5: /* ldrh */
2065 gen_ldst(lduw, s);
2066 break;
2067 case 6: /* ldrb */
2068 gen_ldst(ldub, s);
2069 break;
2070 case 7: /* ldrsh */
2071 gen_ldst(ldsw, s);
2072 break;
2074 if (op >= 3) /* load */
2075 gen_movl_reg_T0(s, rd);
2076 break;
2078 case 6:
2079 /* load/store word immediate offset */
2080 rd = insn & 7;
2081 rn = (insn >> 3) & 7;
2082 gen_movl_T1_reg(s, rn);
2083 val = (insn >> 4) & 0x7c;
2084 gen_op_movl_T2_im(val);
2085 gen_op_addl_T1_T2();
2087 if (insn & (1 << 11)) {
2088 /* load */
2089 gen_ldst(ldl, s);
2090 gen_movl_reg_T0(s, rd);
2091 } else {
2092 /* store */
2093 gen_movl_T0_reg(s, rd);
2094 gen_ldst(stl, s);
2096 break;
2098 case 7:
2099 /* load/store byte immediate offset */
2100 rd = insn & 7;
2101 rn = (insn >> 3) & 7;
2102 gen_movl_T1_reg(s, rn);
2103 val = (insn >> 6) & 0x1f;
2104 gen_op_movl_T2_im(val);
2105 gen_op_addl_T1_T2();
2107 if (insn & (1 << 11)) {
2108 /* load */
2109 gen_ldst(ldub, s);
2110 gen_movl_reg_T0(s, rd);
2111 } else {
2112 /* store */
2113 gen_movl_T0_reg(s, rd);
2114 gen_ldst(stb, s);
2116 break;
2118 case 8:
2119 /* load/store halfword immediate offset */
2120 rd = insn & 7;
2121 rn = (insn >> 3) & 7;
2122 gen_movl_T1_reg(s, rn);
2123 val = (insn >> 5) & 0x3e;
2124 gen_op_movl_T2_im(val);
2125 gen_op_addl_T1_T2();
2127 if (insn & (1 << 11)) {
2128 /* load */
2129 gen_ldst(lduw, s);
2130 gen_movl_reg_T0(s, rd);
2131 } else {
2132 /* store */
2133 gen_movl_T0_reg(s, rd);
2134 gen_ldst(stw, s);
2136 break;
2138 case 9:
2139 /* load/store from stack */
2140 rd = (insn >> 8) & 7;
2141 gen_movl_T1_reg(s, 13);
2142 val = (insn & 0xff) * 4;
2143 gen_op_movl_T2_im(val);
2144 gen_op_addl_T1_T2();
2146 if (insn & (1 << 11)) {
2147 /* load */
2148 gen_ldst(ldl, s);
2149 gen_movl_reg_T0(s, rd);
2150 } else {
2151 /* store */
2152 gen_movl_T0_reg(s, rd);
2153 gen_ldst(stl, s);
2155 break;
2157 case 10:
2158 /* add to high reg */
2159 rd = (insn >> 8) & 7;
2160 if (insn & (1 << 11)) {
2161 /* SP */
2162 gen_movl_T0_reg(s, 13);
2163 } else {
2164 /* PC. bit 1 is ignored. */
2165 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
2167 val = (insn & 0xff) * 4;
2168 gen_op_movl_T1_im(val);
2169 gen_op_addl_T0_T1();
2170 gen_movl_reg_T0(s, rd);
2171 break;
2173 case 11:
2174 /* misc */
2175 op = (insn >> 8) & 0xf;
2176 switch (op) {
2177 case 0:
2178 /* adjust stack pointer */
2179 gen_movl_T1_reg(s, 13);
2180 val = (insn & 0x7f) * 4;
2181 if (insn & (1 << 7))
2182 val = -(int32_t)val;
2183 gen_op_movl_T2_im(val);
2184 gen_op_addl_T1_T2();
2185 gen_movl_reg_T1(s, 13);
2186 break;
2188 case 4: case 5: case 0xc: case 0xd:
2189 /* push/pop */
2190 gen_movl_T1_reg(s, 13);
2191 if (insn & (1 << 8))
2192 offset = 4;
2193 else
2194 offset = 0;
2195 for (i = 0; i < 8; i++) {
2196 if (insn & (1 << i))
2197 offset += 4;
2199 if ((insn & (1 << 11)) == 0) {
2200 gen_op_movl_T2_im(-offset);
2201 gen_op_addl_T1_T2();
2203 gen_op_movl_T2_im(4);
2204 for (i = 0; i < 8; i++) {
2205 if (insn & (1 << i)) {
2206 if (insn & (1 << 11)) {
2207 /* pop */
2208 gen_ldst(ldl, s);
2209 gen_movl_reg_T0(s, i);
2210 } else {
2211 /* push */
2212 gen_movl_T0_reg(s, i);
2213 gen_ldst(stl, s);
2215 /* advance to the next address. */
2216 gen_op_addl_T1_T2();
2219 if (insn & (1 << 8)) {
2220 if (insn & (1 << 11)) {
2221 /* pop pc */
2222 gen_ldst(ldl, s);
2223 /* don't set the pc until the rest of the instruction
2224 has completed */
2225 } else {
2226 /* push lr */
2227 gen_movl_T0_reg(s, 14);
2228 gen_ldst(stl, s);
2230 gen_op_addl_T1_T2();
2232 if ((insn & (1 << 11)) == 0) {
2233 gen_op_movl_T2_im(-offset);
2234 gen_op_addl_T1_T2();
2236 /* write back the new stack pointer */
2237 gen_movl_reg_T1(s, 13);
2238 /* set the new PC value */
2239 if ((insn & 0x0900) == 0x0900)
2240 gen_bx(s);
2241 break;
2243 case 0xe: /* bkpt */
2244 gen_op_movl_T0_im((long)s->pc - 2);
2245 gen_op_movl_reg_TN[0][15]();
2246 gen_op_bkpt();
2247 s->is_jmp = DISAS_JUMP;
2248 break;
2250 default:
2251 goto undef;
2253 break;
2255 case 12:
2256 /* load/store multiple */
2257 rn = (insn >> 8) & 0x7;
2258 gen_movl_T1_reg(s, rn);
2259 gen_op_movl_T2_im(4);
2260 for (i = 0; i < 8; i++) {
2261 if (insn & (1 << i)) {
2262 if (insn & (1 << 11)) {
2263 /* load */
2264 gen_ldst(ldl, s);
2265 gen_movl_reg_T0(s, i);
2266 } else {
2267 /* store */
2268 gen_movl_T0_reg(s, i);
2269 gen_ldst(stl, s);
2271 /* advance to the next address */
2272 gen_op_addl_T1_T2();
2275 /* Base register writeback. */
2276 if ((insn & (1 << rn)) == 0)
2277 gen_movl_reg_T1(s, rn);
2278 break;
2280 case 13:
2281 /* conditional branch or swi */
2282 cond = (insn >> 8) & 0xf;
2283 if (cond == 0xe)
2284 goto undef;
2286 if (cond == 0xf) {
2287 /* swi */
2288 gen_op_movl_T0_im((long)s->pc | 1);
2289 /* Don't set r15. */
2290 gen_op_movl_reg_TN[0][15]();
2291 gen_op_swi();
2292 s->is_jmp = DISAS_JUMP;
2293 break;
2295 /* generate a conditional jump to next instruction */
2296 s->condlabel = gen_new_label();
2297 gen_test_cc[cond ^ 1](s->condlabel);
2298 s->condjmp = 1;
2299 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2300 //s->is_jmp = DISAS_JUMP_NEXT;
2301 gen_movl_T1_reg(s, 15);
2303 /* jump to the offset */
2304 val = (uint32_t)s->pc + 2;
2305 offset = ((int32_t)insn << 24) >> 24;
2306 val += offset << 1;
2307 gen_jmp(s, val);
2308 break;
2310 case 14:
2311 /* unconditional branch */
2312 if (insn & (1 << 11)) {
2313 /* Second half of blx. */
2314 offset = ((insn & 0x7ff) << 1);
2315 gen_movl_T0_reg(s, 14);
2316 gen_op_movl_T1_im(offset);
2317 gen_op_addl_T0_T1();
2318 gen_op_movl_T1_im(0xfffffffc);
2319 gen_op_andl_T0_T1();
2321 val = (uint32_t)s->pc;
2322 gen_op_movl_T1_im(val | 1);
2323 gen_movl_reg_T1(s, 14);
2324 gen_bx(s);
2325 break;
2327 val = (uint32_t)s->pc;
2328 offset = ((int32_t)insn << 21) >> 21;
2329 val += (offset << 1) + 2;
2330 gen_jmp(s, val);
2331 break;
2333 case 15:
2334 /* branch and link [and switch to arm] */
2335 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
2336 /* Instruction spans a page boundary. Implement it as two
2337 16-bit instructions in case the second half causes an
2338 prefetch abort. */
2339 offset = ((int32_t)insn << 21) >> 9;
2340 val = s->pc + 2 + offset;
2341 gen_op_movl_T0_im(val);
2342 gen_movl_reg_T0(s, 14);
2343 break;
2345 if (insn & (1 << 11)) {
2346 /* Second half of bl. */
2347 offset = ((insn & 0x7ff) << 1) | 1;
2348 gen_movl_T0_reg(s, 14);
2349 gen_op_movl_T1_im(offset);
2350 gen_op_addl_T0_T1();
2352 val = (uint32_t)s->pc;
2353 gen_op_movl_T1_im(val | 1);
2354 gen_movl_reg_T1(s, 14);
2355 gen_bx(s);
2356 break;
2358 offset = ((int32_t)insn << 21) >> 10;
2359 insn = lduw_code(s->pc);
2360 offset |= insn & 0x7ff;
2362 val = (uint32_t)s->pc + 2;
2363 gen_op_movl_T1_im(val | 1);
2364 gen_movl_reg_T1(s, 14);
2366 val += offset << 1;
2367 if (insn & (1 << 12)) {
2368 /* bl */
2369 gen_jmp(s, val);
2370 } else {
2371 /* blx */
2372 val &= ~(uint32_t)2;
2373 gen_op_movl_T0_im(val);
2374 gen_bx(s);
2377 return;
2378 undef:
2379 gen_op_movl_T0_im((long)s->pc - 2);
2380 gen_op_movl_reg_TN[0][15]();
2381 gen_op_undef_insn();
2382 s->is_jmp = DISAS_JUMP;
2385 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2386 basic block 'tb'. If search_pc is TRUE, also generate PC
2387 information for each intermediate instruction. */
2388 static inline int gen_intermediate_code_internal(CPUState *env,
2389 TranslationBlock *tb,
2390 int search_pc)
2392 DisasContext dc1, *dc = &dc1;
2393 uint16_t *gen_opc_end;
2394 int j, lj;
2395 target_ulong pc_start;
2396 uint32_t next_page_start;
2398 /* generate intermediate code */
2399 pc_start = tb->pc;
2401 dc->tb = tb;
2403 gen_opc_ptr = gen_opc_buf;
2404 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2405 gen_opparam_ptr = gen_opparam_buf;
2407 dc->is_jmp = DISAS_NEXT;
2408 dc->pc = pc_start;
2409 dc->singlestep_enabled = env->singlestep_enabled;
2410 dc->condjmp = 0;
2411 dc->thumb = env->thumb;
2412 #if !defined(CONFIG_USER_ONLY)
2413 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
2414 #endif
2415 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2416 nb_gen_labels = 0;
2417 lj = -1;
2418 do {
2419 if (env->nb_breakpoints > 0) {
2420 for(j = 0; j < env->nb_breakpoints; j++) {
2421 if (env->breakpoints[j] == dc->pc) {
2422 gen_op_movl_T0_im((long)dc->pc);
2423 gen_op_movl_reg_TN[0][15]();
2424 gen_op_debug();
2425 dc->is_jmp = DISAS_JUMP;
2426 break;
2430 if (search_pc) {
2431 j = gen_opc_ptr - gen_opc_buf;
2432 if (lj < j) {
2433 lj++;
2434 while (lj < j)
2435 gen_opc_instr_start[lj++] = 0;
2437 gen_opc_pc[lj] = dc->pc;
2438 gen_opc_instr_start[lj] = 1;
2441 if (env->thumb)
2442 disas_thumb_insn(dc);
2443 else
2444 disas_arm_insn(env, dc);
2446 if (dc->condjmp && !dc->is_jmp) {
2447 gen_set_label(dc->condlabel);
2448 dc->condjmp = 0;
2450 /* Translation stops when a conditional branch is enoutered.
2451 * Otherwise the subsequent code could get translated several times.
2452 * Also stop translation when a page boundary is reached. This
2453 * ensures prefech aborts occur at the right place. */
2454 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2455 !env->singlestep_enabled &&
2456 dc->pc < next_page_start);
2457 /* At this stage dc->condjmp will only be set when the skipped
2458 * instruction was a conditional branch, and the PC has already been
2459 * written. */
2460 if (__builtin_expect(env->singlestep_enabled, 0)) {
2461 /* Make sure the pc is updated, and raise a debug exception. */
2462 if (dc->condjmp) {
2463 gen_op_debug();
2464 gen_set_label(dc->condlabel);
2466 if (dc->condjmp || !dc->is_jmp) {
2467 gen_op_movl_T0_im((long)dc->pc);
2468 gen_op_movl_reg_TN[0][15]();
2469 dc->condjmp = 0;
2471 gen_op_debug();
2472 } else {
2473 switch(dc->is_jmp) {
2474 case DISAS_NEXT:
2475 gen_goto_tb(dc, 1, dc->pc);
2476 break;
2477 default:
2478 case DISAS_JUMP:
2479 case DISAS_UPDATE:
2480 /* indicate that the hash table must be used to find the next TB */
2481 gen_op_movl_T0_0();
2482 gen_op_exit_tb();
2483 break;
2484 case DISAS_TB_JUMP:
2485 /* nothing more to generate */
2486 break;
2488 if (dc->condjmp) {
2489 gen_set_label(dc->condlabel);
2490 gen_goto_tb(dc, 1, dc->pc);
2491 dc->condjmp = 0;
2494 *gen_opc_ptr = INDEX_op_end;
2496 #ifdef DEBUG_DISAS
2497 if (loglevel & CPU_LOG_TB_IN_ASM) {
2498 fprintf(logfile, "----------------\n");
2499 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
2500 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2501 fprintf(logfile, "\n");
2502 if (loglevel & (CPU_LOG_TB_OP)) {
2503 fprintf(logfile, "OP:\n");
2504 dump_ops(gen_opc_buf, gen_opparam_buf);
2505 fprintf(logfile, "\n");
2508 #endif
2509 if (search_pc) {
2510 j = gen_opc_ptr - gen_opc_buf;
2511 lj++;
2512 while (lj <= j)
2513 gen_opc_instr_start[lj++] = 0;
2514 tb->size = 0;
2515 } else {
2516 tb->size = dc->pc - pc_start;
2518 return 0;
2521 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2523 return gen_intermediate_code_internal(env, tb, 0);
2526 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2528 return gen_intermediate_code_internal(env, tb, 1);
2531 static const char *cpu_mode_names[16] = {
2532 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
2533 "???", "???", "???", "und", "???", "???", "???", "sys"
2535 void cpu_dump_state(CPUState *env, FILE *f,
2536 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
2537 int flags)
2539 int i;
2540 union {
2541 uint32_t i;
2542 float s;
2543 } s0, s1;
2544 CPU_DoubleU d;
2545 /* ??? This assumes float64 and double have the same layout.
2546 Oh well, it's only debug dumps. */
2547 union {
2548 float64 f64;
2549 double d;
2550 } d0;
2551 uint32_t psr;
2553 for(i=0;i<16;i++) {
2554 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2555 if ((i % 4) == 3)
2556 cpu_fprintf(f, "\n");
2557 else
2558 cpu_fprintf(f, " ");
2560 psr = cpsr_read(env);
2561 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d %x\n",
2562 psr,
2563 psr & (1 << 31) ? 'N' : '-',
2564 psr & (1 << 30) ? 'Z' : '-',
2565 psr & (1 << 29) ? 'C' : '-',
2566 psr & (1 << 28) ? 'V' : '-',
2567 psr & CPSR_T ? 'T' : 'A',
2568 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
2570 for (i = 0; i < 16; i++) {
2571 d.d = env->vfp.regs[i];
2572 s0.i = d.l.lower;
2573 s1.i = d.l.upper;
2574 d0.f64 = d.d;
2575 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
2576 i * 2, (int)s0.i, s0.s,
2577 i * 2 + 1, (int)s1.i, s1.s,
2578 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
2579 d0.d);
2581 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);