Update Changelog.
[qemu/mini2440.git] / target-arm / translate.c
blob364f4eadba9d5e98c0f1dc6582cdef98d70d0b62
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <stdarg.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <string.h>
26 #include <inttypes.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "disas.h"
32 #define ENABLE_ARCH_5J 0
33 #define ENABLE_ARCH_6 1
34 #define ENABLE_ARCH_6T2 1
36 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
38 /* internal defines */
39 typedef struct DisasContext {
40 target_ulong pc;
41 int is_jmp;
42 /* Nonzero if this instruction has been conditionally skipped. */
43 int condjmp;
44 /* The label that will be jumped to when the instruction is skipped. */
45 int condlabel;
46 struct TranslationBlock *tb;
47 int singlestep_enabled;
48 int thumb;
49 int is_mem;
50 #if !defined(CONFIG_USER_ONLY)
51 int user;
52 #endif
53 } DisasContext;
55 #if defined(CONFIG_USER_ONLY)
56 #define IS_USER(s) 1
57 #else
58 #define IS_USER(s) (s->user)
59 #endif
61 #define DISAS_JUMP_NEXT 4
63 #ifdef USE_DIRECT_JUMP
64 #define TBPARAM(x)
65 #else
66 #define TBPARAM(x) (long)(x)
67 #endif
69 /* XXX: move that elsewhere */
70 static uint16_t *gen_opc_ptr;
71 static uint32_t *gen_opparam_ptr;
72 extern FILE *logfile;
73 extern int loglevel;
75 enum {
76 #define DEF(s, n, copy_size) INDEX_op_ ## s,
77 #include "opc.h"
78 #undef DEF
79 NB_OPS,
82 #include "gen-op.h"
84 static GenOpFunc1 *gen_test_cc[14] = {
85 gen_op_test_eq,
86 gen_op_test_ne,
87 gen_op_test_cs,
88 gen_op_test_cc,
89 gen_op_test_mi,
90 gen_op_test_pl,
91 gen_op_test_vs,
92 gen_op_test_vc,
93 gen_op_test_hi,
94 gen_op_test_ls,
95 gen_op_test_ge,
96 gen_op_test_lt,
97 gen_op_test_gt,
98 gen_op_test_le,
101 const uint8_t table_logic_cc[16] = {
102 1, /* and */
103 1, /* xor */
104 0, /* sub */
105 0, /* rsb */
106 0, /* add */
107 0, /* adc */
108 0, /* sbc */
109 0, /* rsc */
110 1, /* andl */
111 1, /* xorl */
112 0, /* cmp */
113 0, /* cmn */
114 1, /* orr */
115 1, /* mov */
116 1, /* bic */
117 1, /* mvn */
120 static GenOpFunc1 *gen_shift_T1_im[4] = {
121 gen_op_shll_T1_im,
122 gen_op_shrl_T1_im,
123 gen_op_sarl_T1_im,
124 gen_op_rorl_T1_im,
127 static GenOpFunc *gen_shift_T1_0[4] = {
128 NULL,
129 gen_op_shrl_T1_0,
130 gen_op_sarl_T1_0,
131 gen_op_rrxl_T1,
134 static GenOpFunc1 *gen_shift_T2_im[4] = {
135 gen_op_shll_T2_im,
136 gen_op_shrl_T2_im,
137 gen_op_sarl_T2_im,
138 gen_op_rorl_T2_im,
141 static GenOpFunc *gen_shift_T2_0[4] = {
142 NULL,
143 gen_op_shrl_T2_0,
144 gen_op_sarl_T2_0,
145 gen_op_rrxl_T2,
148 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
149 gen_op_shll_T1_im_cc,
150 gen_op_shrl_T1_im_cc,
151 gen_op_sarl_T1_im_cc,
152 gen_op_rorl_T1_im_cc,
155 static GenOpFunc *gen_shift_T1_0_cc[4] = {
156 NULL,
157 gen_op_shrl_T1_0_cc,
158 gen_op_sarl_T1_0_cc,
159 gen_op_rrxl_T1_cc,
162 static GenOpFunc *gen_shift_T1_T0[4] = {
163 gen_op_shll_T1_T0,
164 gen_op_shrl_T1_T0,
165 gen_op_sarl_T1_T0,
166 gen_op_rorl_T1_T0,
169 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
170 gen_op_shll_T1_T0_cc,
171 gen_op_shrl_T1_T0_cc,
172 gen_op_sarl_T1_T0_cc,
173 gen_op_rorl_T1_T0_cc,
176 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
178 gen_op_movl_T0_r0,
179 gen_op_movl_T0_r1,
180 gen_op_movl_T0_r2,
181 gen_op_movl_T0_r3,
182 gen_op_movl_T0_r4,
183 gen_op_movl_T0_r5,
184 gen_op_movl_T0_r6,
185 gen_op_movl_T0_r7,
186 gen_op_movl_T0_r8,
187 gen_op_movl_T0_r9,
188 gen_op_movl_T0_r10,
189 gen_op_movl_T0_r11,
190 gen_op_movl_T0_r12,
191 gen_op_movl_T0_r13,
192 gen_op_movl_T0_r14,
193 gen_op_movl_T0_r15,
196 gen_op_movl_T1_r0,
197 gen_op_movl_T1_r1,
198 gen_op_movl_T1_r2,
199 gen_op_movl_T1_r3,
200 gen_op_movl_T1_r4,
201 gen_op_movl_T1_r5,
202 gen_op_movl_T1_r6,
203 gen_op_movl_T1_r7,
204 gen_op_movl_T1_r8,
205 gen_op_movl_T1_r9,
206 gen_op_movl_T1_r10,
207 gen_op_movl_T1_r11,
208 gen_op_movl_T1_r12,
209 gen_op_movl_T1_r13,
210 gen_op_movl_T1_r14,
211 gen_op_movl_T1_r15,
214 gen_op_movl_T2_r0,
215 gen_op_movl_T2_r1,
216 gen_op_movl_T2_r2,
217 gen_op_movl_T2_r3,
218 gen_op_movl_T2_r4,
219 gen_op_movl_T2_r5,
220 gen_op_movl_T2_r6,
221 gen_op_movl_T2_r7,
222 gen_op_movl_T2_r8,
223 gen_op_movl_T2_r9,
224 gen_op_movl_T2_r10,
225 gen_op_movl_T2_r11,
226 gen_op_movl_T2_r12,
227 gen_op_movl_T2_r13,
228 gen_op_movl_T2_r14,
229 gen_op_movl_T2_r15,
233 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
235 gen_op_movl_r0_T0,
236 gen_op_movl_r1_T0,
237 gen_op_movl_r2_T0,
238 gen_op_movl_r3_T0,
239 gen_op_movl_r4_T0,
240 gen_op_movl_r5_T0,
241 gen_op_movl_r6_T0,
242 gen_op_movl_r7_T0,
243 gen_op_movl_r8_T0,
244 gen_op_movl_r9_T0,
245 gen_op_movl_r10_T0,
246 gen_op_movl_r11_T0,
247 gen_op_movl_r12_T0,
248 gen_op_movl_r13_T0,
249 gen_op_movl_r14_T0,
250 gen_op_movl_r15_T0,
253 gen_op_movl_r0_T1,
254 gen_op_movl_r1_T1,
255 gen_op_movl_r2_T1,
256 gen_op_movl_r3_T1,
257 gen_op_movl_r4_T1,
258 gen_op_movl_r5_T1,
259 gen_op_movl_r6_T1,
260 gen_op_movl_r7_T1,
261 gen_op_movl_r8_T1,
262 gen_op_movl_r9_T1,
263 gen_op_movl_r10_T1,
264 gen_op_movl_r11_T1,
265 gen_op_movl_r12_T1,
266 gen_op_movl_r13_T1,
267 gen_op_movl_r14_T1,
268 gen_op_movl_r15_T1,
272 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
273 gen_op_movl_T0_im,
274 gen_op_movl_T1_im,
275 gen_op_movl_T2_im,
278 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
279 gen_op_shll_T0_im_thumb,
280 gen_op_shrl_T0_im_thumb,
281 gen_op_sarl_T0_im_thumb,
284 static inline void gen_bx(DisasContext *s)
286 s->is_jmp = DISAS_UPDATE;
287 gen_op_bx_T0();
291 #if defined(CONFIG_USER_ONLY)
292 #define gen_ldst(name, s) gen_op_##name##_raw()
293 #else
294 #define gen_ldst(name, s) do { \
295 s->is_mem = 1; \
296 if (IS_USER(s)) \
297 gen_op_##name##_user(); \
298 else \
299 gen_op_##name##_kernel(); \
300 } while (0)
301 #endif
303 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
305 int val;
307 if (reg == 15) {
308 /* normaly, since we updated PC, we need only to add one insn */
309 if (s->thumb)
310 val = (long)s->pc + 2;
311 else
312 val = (long)s->pc + 4;
313 gen_op_movl_TN_im[t](val);
314 } else {
315 gen_op_movl_TN_reg[t][reg]();
319 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
321 gen_movl_TN_reg(s, reg, 0);
324 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
326 gen_movl_TN_reg(s, reg, 1);
329 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
331 gen_movl_TN_reg(s, reg, 2);
334 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
336 gen_op_movl_reg_TN[t][reg]();
337 if (reg == 15) {
338 s->is_jmp = DISAS_JUMP;
342 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
344 gen_movl_reg_TN(s, reg, 0);
347 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
349 gen_movl_reg_TN(s, reg, 1);
352 /* Force a TB lookup after an instruction that changes the CPU state. */
353 static inline void gen_lookup_tb(DisasContext *s)
355 gen_op_movl_T0_im(s->pc);
356 gen_movl_reg_T0(s, 15);
357 s->is_jmp = DISAS_UPDATE;
360 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
362 int val, rm, shift, shiftop;
364 if (!(insn & (1 << 25))) {
365 /* immediate */
366 val = insn & 0xfff;
367 if (!(insn & (1 << 23)))
368 val = -val;
369 if (val != 0)
370 gen_op_addl_T1_im(val);
371 } else {
372 /* shift/register */
373 rm = (insn) & 0xf;
374 shift = (insn >> 7) & 0x1f;
375 gen_movl_T2_reg(s, rm);
376 shiftop = (insn >> 5) & 3;
377 if (shift != 0) {
378 gen_shift_T2_im[shiftop](shift);
379 } else if (shiftop != 0) {
380 gen_shift_T2_0[shiftop]();
382 if (!(insn & (1 << 23)))
383 gen_op_subl_T1_T2();
384 else
385 gen_op_addl_T1_T2();
389 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
390 int extra)
392 int val, rm;
394 if (insn & (1 << 22)) {
395 /* immediate */
396 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
397 if (!(insn & (1 << 23)))
398 val = -val;
399 val += extra;
400 if (val != 0)
401 gen_op_addl_T1_im(val);
402 } else {
403 /* register */
404 if (extra)
405 gen_op_addl_T1_im(extra);
406 rm = (insn) & 0xf;
407 gen_movl_T2_reg(s, rm);
408 if (!(insn & (1 << 23)))
409 gen_op_subl_T1_T2();
410 else
411 gen_op_addl_T1_T2();
415 #define VFP_OP(name) \
416 static inline void gen_vfp_##name(int dp) \
418 if (dp) \
419 gen_op_vfp_##name##d(); \
420 else \
421 gen_op_vfp_##name##s(); \
424 VFP_OP(add)
425 VFP_OP(sub)
426 VFP_OP(mul)
427 VFP_OP(div)
428 VFP_OP(neg)
429 VFP_OP(abs)
430 VFP_OP(sqrt)
431 VFP_OP(cmp)
432 VFP_OP(cmpe)
433 VFP_OP(F1_ld0)
434 VFP_OP(uito)
435 VFP_OP(sito)
436 VFP_OP(toui)
437 VFP_OP(touiz)
438 VFP_OP(tosi)
439 VFP_OP(tosiz)
441 #undef VFP_OP
443 static inline void gen_vfp_ld(DisasContext *s, int dp)
445 if (dp)
446 gen_ldst(vfp_ldd, s);
447 else
448 gen_ldst(vfp_lds, s);
451 static inline void gen_vfp_st(DisasContext *s, int dp)
453 if (dp)
454 gen_ldst(vfp_std, s);
455 else
456 gen_ldst(vfp_sts, s);
459 static inline long
460 vfp_reg_offset (int dp, int reg)
462 if (dp)
463 return offsetof(CPUARMState, vfp.regs[reg]);
464 else if (reg & 1) {
465 return offsetof(CPUARMState, vfp.regs[reg >> 1])
466 + offsetof(CPU_DoubleU, l.upper);
467 } else {
468 return offsetof(CPUARMState, vfp.regs[reg >> 1])
469 + offsetof(CPU_DoubleU, l.lower);
472 static inline void gen_mov_F0_vreg(int dp, int reg)
474 if (dp)
475 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
476 else
477 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
480 static inline void gen_mov_F1_vreg(int dp, int reg)
482 if (dp)
483 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
484 else
485 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
488 static inline void gen_mov_vreg_F0(int dp, int reg)
490 if (dp)
491 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
492 else
493 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
496 #define ARM_CP_RW_BIT (1 << 20)
498 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
500 int rd;
501 uint32_t offset;
503 rd = (insn >> 16) & 0xf;
504 gen_movl_T1_reg(s, rd);
506 offset = (insn & 0xff) << ((insn >> 7) & 2);
507 if (insn & (1 << 24)) {
508 /* Pre indexed */
509 if (insn & (1 << 23))
510 gen_op_addl_T1_im(offset);
511 else
512 gen_op_addl_T1_im(-offset);
514 if (insn & (1 << 21))
515 gen_movl_reg_T1(s, rd);
516 } else if (insn & (1 << 21)) {
517 /* Post indexed */
518 if (insn & (1 << 23))
519 gen_op_movl_T0_im(offset);
520 else
521 gen_op_movl_T0_im(- offset);
522 gen_op_addl_T0_T1();
523 gen_movl_reg_T0(s, rd);
524 } else if (!(insn & (1 << 23)))
525 return 1;
526 return 0;
529 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
531 int rd = (insn >> 0) & 0xf;
533 if (insn & (1 << 8))
534 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
535 return 1;
536 else
537 gen_op_iwmmxt_movl_T0_wCx(rd);
538 else
539 gen_op_iwmmxt_movl_T0_T1_wRn(rd);
541 gen_op_movl_T1_im(mask);
542 gen_op_andl_T0_T1();
543 return 0;
546 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
547 (ie. an undefined instruction). */
548 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
550 int rd, wrd;
551 int rdhi, rdlo, rd0, rd1, i;
553 if ((insn & 0x0e000e00) == 0x0c000000) {
554 if ((insn & 0x0fe00ff0) == 0x0c400000) {
555 wrd = insn & 0xf;
556 rdlo = (insn >> 12) & 0xf;
557 rdhi = (insn >> 16) & 0xf;
558 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
559 gen_op_iwmmxt_movl_T0_T1_wRn(wrd);
560 gen_movl_reg_T0(s, rdlo);
561 gen_movl_reg_T1(s, rdhi);
562 } else { /* TMCRR */
563 gen_movl_T0_reg(s, rdlo);
564 gen_movl_T1_reg(s, rdhi);
565 gen_op_iwmmxt_movl_wRn_T0_T1(wrd);
566 gen_op_iwmmxt_set_mup();
568 return 0;
571 wrd = (insn >> 12) & 0xf;
572 if (gen_iwmmxt_address(s, insn))
573 return 1;
574 if (insn & ARM_CP_RW_BIT) {
575 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
576 gen_ldst(ldl, s);
577 gen_op_iwmmxt_movl_wCx_T0(wrd);
578 } else {
579 if (insn & (1 << 8))
580 if (insn & (1 << 22)) /* WLDRD */
581 gen_ldst(iwmmxt_ldq, s);
582 else /* WLDRW wRd */
583 gen_ldst(iwmmxt_ldl, s);
584 else
585 if (insn & (1 << 22)) /* WLDRH */
586 gen_ldst(iwmmxt_ldw, s);
587 else /* WLDRB */
588 gen_ldst(iwmmxt_ldb, s);
589 gen_op_iwmmxt_movq_wRn_M0(wrd);
591 } else {
592 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
593 gen_op_iwmmxt_movl_T0_wCx(wrd);
594 gen_ldst(stl, s);
595 } else {
596 gen_op_iwmmxt_movq_M0_wRn(wrd);
597 if (insn & (1 << 8))
598 if (insn & (1 << 22)) /* WSTRD */
599 gen_ldst(iwmmxt_stq, s);
600 else /* WSTRW wRd */
601 gen_ldst(iwmmxt_stl, s);
602 else
603 if (insn & (1 << 22)) /* WSTRH */
604 gen_ldst(iwmmxt_ldw, s);
605 else /* WSTRB */
606 gen_ldst(iwmmxt_stb, s);
609 return 0;
612 if ((insn & 0x0f000000) != 0x0e000000)
613 return 1;
615 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
616 case 0x000: /* WOR */
617 wrd = (insn >> 12) & 0xf;
618 rd0 = (insn >> 0) & 0xf;
619 rd1 = (insn >> 16) & 0xf;
620 gen_op_iwmmxt_movq_M0_wRn(rd0);
621 gen_op_iwmmxt_orq_M0_wRn(rd1);
622 gen_op_iwmmxt_setpsr_nz();
623 gen_op_iwmmxt_movq_wRn_M0(wrd);
624 gen_op_iwmmxt_set_mup();
625 gen_op_iwmmxt_set_cup();
626 break;
627 case 0x011: /* TMCR */
628 if (insn & 0xf)
629 return 1;
630 rd = (insn >> 12) & 0xf;
631 wrd = (insn >> 16) & 0xf;
632 switch (wrd) {
633 case ARM_IWMMXT_wCID:
634 case ARM_IWMMXT_wCASF:
635 break;
636 case ARM_IWMMXT_wCon:
637 gen_op_iwmmxt_set_cup();
638 /* Fall through. */
639 case ARM_IWMMXT_wCSSF:
640 gen_op_iwmmxt_movl_T0_wCx(wrd);
641 gen_movl_T1_reg(s, rd);
642 gen_op_bicl_T0_T1();
643 gen_op_iwmmxt_movl_wCx_T0(wrd);
644 break;
645 case ARM_IWMMXT_wCGR0:
646 case ARM_IWMMXT_wCGR1:
647 case ARM_IWMMXT_wCGR2:
648 case ARM_IWMMXT_wCGR3:
649 gen_op_iwmmxt_set_cup();
650 gen_movl_reg_T0(s, rd);
651 gen_op_iwmmxt_movl_wCx_T0(wrd);
652 break;
653 default:
654 return 1;
656 break;
657 case 0x100: /* WXOR */
658 wrd = (insn >> 12) & 0xf;
659 rd0 = (insn >> 0) & 0xf;
660 rd1 = (insn >> 16) & 0xf;
661 gen_op_iwmmxt_movq_M0_wRn(rd0);
662 gen_op_iwmmxt_xorq_M0_wRn(rd1);
663 gen_op_iwmmxt_setpsr_nz();
664 gen_op_iwmmxt_movq_wRn_M0(wrd);
665 gen_op_iwmmxt_set_mup();
666 gen_op_iwmmxt_set_cup();
667 break;
668 case 0x111: /* TMRC */
669 if (insn & 0xf)
670 return 1;
671 rd = (insn >> 12) & 0xf;
672 wrd = (insn >> 16) & 0xf;
673 gen_op_iwmmxt_movl_T0_wCx(wrd);
674 gen_movl_reg_T0(s, rd);
675 break;
676 case 0x300: /* WANDN */
677 wrd = (insn >> 12) & 0xf;
678 rd0 = (insn >> 0) & 0xf;
679 rd1 = (insn >> 16) & 0xf;
680 gen_op_iwmmxt_movq_M0_wRn(rd0);
681 gen_op_iwmmxt_negq_M0();
682 gen_op_iwmmxt_andq_M0_wRn(rd1);
683 gen_op_iwmmxt_setpsr_nz();
684 gen_op_iwmmxt_movq_wRn_M0(wrd);
685 gen_op_iwmmxt_set_mup();
686 gen_op_iwmmxt_set_cup();
687 break;
688 case 0x200: /* WAND */
689 wrd = (insn >> 12) & 0xf;
690 rd0 = (insn >> 0) & 0xf;
691 rd1 = (insn >> 16) & 0xf;
692 gen_op_iwmmxt_movq_M0_wRn(rd0);
693 gen_op_iwmmxt_andq_M0_wRn(rd1);
694 gen_op_iwmmxt_setpsr_nz();
695 gen_op_iwmmxt_movq_wRn_M0(wrd);
696 gen_op_iwmmxt_set_mup();
697 gen_op_iwmmxt_set_cup();
698 break;
699 case 0x810: case 0xa10: /* WMADD */
700 wrd = (insn >> 12) & 0xf;
701 rd0 = (insn >> 0) & 0xf;
702 rd1 = (insn >> 16) & 0xf;
703 gen_op_iwmmxt_movq_M0_wRn(rd0);
704 if (insn & (1 << 21))
705 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
706 else
707 gen_op_iwmmxt_madduq_M0_wRn(rd1);
708 gen_op_iwmmxt_movq_wRn_M0(wrd);
709 gen_op_iwmmxt_set_mup();
710 break;
711 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
712 wrd = (insn >> 12) & 0xf;
713 rd0 = (insn >> 16) & 0xf;
714 rd1 = (insn >> 0) & 0xf;
715 gen_op_iwmmxt_movq_M0_wRn(rd0);
716 switch ((insn >> 22) & 3) {
717 case 0:
718 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
719 break;
720 case 1:
721 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
722 break;
723 case 2:
724 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
725 break;
726 case 3:
727 return 1;
729 gen_op_iwmmxt_movq_wRn_M0(wrd);
730 gen_op_iwmmxt_set_mup();
731 gen_op_iwmmxt_set_cup();
732 break;
733 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
734 wrd = (insn >> 12) & 0xf;
735 rd0 = (insn >> 16) & 0xf;
736 rd1 = (insn >> 0) & 0xf;
737 gen_op_iwmmxt_movq_M0_wRn(rd0);
738 switch ((insn >> 22) & 3) {
739 case 0:
740 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
741 break;
742 case 1:
743 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
744 break;
745 case 2:
746 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
747 break;
748 case 3:
749 return 1;
751 gen_op_iwmmxt_movq_wRn_M0(wrd);
752 gen_op_iwmmxt_set_mup();
753 gen_op_iwmmxt_set_cup();
754 break;
755 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
756 wrd = (insn >> 12) & 0xf;
757 rd0 = (insn >> 16) & 0xf;
758 rd1 = (insn >> 0) & 0xf;
759 gen_op_iwmmxt_movq_M0_wRn(rd0);
760 if (insn & (1 << 22))
761 gen_op_iwmmxt_sadw_M0_wRn(rd1);
762 else
763 gen_op_iwmmxt_sadb_M0_wRn(rd1);
764 if (!(insn & (1 << 20)))
765 gen_op_iwmmxt_addl_M0_wRn(wrd);
766 gen_op_iwmmxt_movq_wRn_M0(wrd);
767 gen_op_iwmmxt_set_mup();
768 break;
769 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
770 wrd = (insn >> 12) & 0xf;
771 rd0 = (insn >> 16) & 0xf;
772 rd1 = (insn >> 0) & 0xf;
773 gen_op_iwmmxt_movq_M0_wRn(rd0);
774 if (insn & (1 << 21))
775 gen_op_iwmmxt_mulsw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
776 else
777 gen_op_iwmmxt_muluw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
778 gen_op_iwmmxt_movq_wRn_M0(wrd);
779 gen_op_iwmmxt_set_mup();
780 break;
781 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
782 wrd = (insn >> 12) & 0xf;
783 rd0 = (insn >> 16) & 0xf;
784 rd1 = (insn >> 0) & 0xf;
785 gen_op_iwmmxt_movq_M0_wRn(rd0);
786 if (insn & (1 << 21))
787 gen_op_iwmmxt_macsw_M0_wRn(rd1);
788 else
789 gen_op_iwmmxt_macuw_M0_wRn(rd1);
790 if (!(insn & (1 << 20))) {
791 if (insn & (1 << 21))
792 gen_op_iwmmxt_addsq_M0_wRn(wrd);
793 else
794 gen_op_iwmmxt_adduq_M0_wRn(wrd);
796 gen_op_iwmmxt_movq_wRn_M0(wrd);
797 gen_op_iwmmxt_set_mup();
798 break;
799 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
800 wrd = (insn >> 12) & 0xf;
801 rd0 = (insn >> 16) & 0xf;
802 rd1 = (insn >> 0) & 0xf;
803 gen_op_iwmmxt_movq_M0_wRn(rd0);
804 switch ((insn >> 22) & 3) {
805 case 0:
806 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
807 break;
808 case 1:
809 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
810 break;
811 case 2:
812 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
813 break;
814 case 3:
815 return 1;
817 gen_op_iwmmxt_movq_wRn_M0(wrd);
818 gen_op_iwmmxt_set_mup();
819 gen_op_iwmmxt_set_cup();
820 break;
821 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
822 wrd = (insn >> 12) & 0xf;
823 rd0 = (insn >> 16) & 0xf;
824 rd1 = (insn >> 0) & 0xf;
825 gen_op_iwmmxt_movq_M0_wRn(rd0);
826 if (insn & (1 << 22))
827 gen_op_iwmmxt_avgw_M0_wRn(rd1, (insn >> 20) & 1);
828 else
829 gen_op_iwmmxt_avgb_M0_wRn(rd1, (insn >> 20) & 1);
830 gen_op_iwmmxt_movq_wRn_M0(wrd);
831 gen_op_iwmmxt_set_mup();
832 gen_op_iwmmxt_set_cup();
833 break;
834 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
835 wrd = (insn >> 12) & 0xf;
836 rd0 = (insn >> 16) & 0xf;
837 rd1 = (insn >> 0) & 0xf;
838 gen_op_iwmmxt_movq_M0_wRn(rd0);
839 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
840 gen_op_movl_T1_im(7);
841 gen_op_andl_T0_T1();
842 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
843 gen_op_iwmmxt_movq_wRn_M0(wrd);
844 gen_op_iwmmxt_set_mup();
845 break;
846 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
847 rd = (insn >> 12) & 0xf;
848 wrd = (insn >> 16) & 0xf;
849 gen_movl_T0_reg(s, rd);
850 gen_op_iwmmxt_movq_M0_wRn(wrd);
851 switch ((insn >> 6) & 3) {
852 case 0:
853 gen_op_movl_T1_im(0xff);
854 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
855 break;
856 case 1:
857 gen_op_movl_T1_im(0xffff);
858 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
859 break;
860 case 2:
861 gen_op_movl_T1_im(0xffffffff);
862 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
863 break;
864 case 3:
865 return 1;
867 gen_op_iwmmxt_movq_wRn_M0(wrd);
868 gen_op_iwmmxt_set_mup();
869 break;
870 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
871 rd = (insn >> 12) & 0xf;
872 wrd = (insn >> 16) & 0xf;
873 if (rd == 15)
874 return 1;
875 gen_op_iwmmxt_movq_M0_wRn(wrd);
876 switch ((insn >> 22) & 3) {
877 case 0:
878 if (insn & 8)
879 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
880 else {
881 gen_op_movl_T1_im(0xff);
882 gen_op_iwmmxt_extru_T0_M0_T1((insn & 7) << 3);
884 break;
885 case 1:
886 if (insn & 8)
887 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
888 else {
889 gen_op_movl_T1_im(0xffff);
890 gen_op_iwmmxt_extru_T0_M0_T1((insn & 3) << 4);
892 break;
893 case 2:
894 gen_op_movl_T1_im(0xffffffff);
895 gen_op_iwmmxt_extru_T0_M0_T1((insn & 1) << 5);
896 break;
897 case 3:
898 return 1;
900 gen_op_movl_reg_TN[0][rd]();
901 break;
902 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
903 if ((insn & 0x000ff008) != 0x0003f000)
904 return 1;
905 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
906 switch ((insn >> 22) & 3) {
907 case 0:
908 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
909 break;
910 case 1:
911 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
912 break;
913 case 2:
914 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
915 break;
916 case 3:
917 return 1;
919 gen_op_shll_T1_im(28);
920 gen_op_movl_T0_T1();
921 gen_op_movl_cpsr_T0(0xf0000000);
922 break;
923 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
924 rd = (insn >> 12) & 0xf;
925 wrd = (insn >> 16) & 0xf;
926 gen_movl_T0_reg(s, rd);
927 switch ((insn >> 6) & 3) {
928 case 0:
929 gen_op_iwmmxt_bcstb_M0_T0();
930 break;
931 case 1:
932 gen_op_iwmmxt_bcstw_M0_T0();
933 break;
934 case 2:
935 gen_op_iwmmxt_bcstl_M0_T0();
936 break;
937 case 3:
938 return 1;
940 gen_op_iwmmxt_movq_wRn_M0(wrd);
941 gen_op_iwmmxt_set_mup();
942 break;
943 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
944 if ((insn & 0x000ff00f) != 0x0003f000)
945 return 1;
946 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
947 switch ((insn >> 22) & 3) {
948 case 0:
949 for (i = 0; i < 7; i ++) {
950 gen_op_shll_T1_im(4);
951 gen_op_andl_T0_T1();
953 break;
954 case 1:
955 for (i = 0; i < 3; i ++) {
956 gen_op_shll_T1_im(8);
957 gen_op_andl_T0_T1();
959 break;
960 case 2:
961 gen_op_shll_T1_im(16);
962 gen_op_andl_T0_T1();
963 break;
964 case 3:
965 return 1;
967 gen_op_movl_cpsr_T0(0xf0000000);
968 break;
969 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
970 wrd = (insn >> 12) & 0xf;
971 rd0 = (insn >> 16) & 0xf;
972 gen_op_iwmmxt_movq_M0_wRn(rd0);
973 switch ((insn >> 22) & 3) {
974 case 0:
975 gen_op_iwmmxt_addcb_M0();
976 break;
977 case 1:
978 gen_op_iwmmxt_addcw_M0();
979 break;
980 case 2:
981 gen_op_iwmmxt_addcl_M0();
982 break;
983 case 3:
984 return 1;
986 gen_op_iwmmxt_movq_wRn_M0(wrd);
987 gen_op_iwmmxt_set_mup();
988 break;
989 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
990 if ((insn & 0x000ff00f) != 0x0003f000)
991 return 1;
992 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
993 switch ((insn >> 22) & 3) {
994 case 0:
995 for (i = 0; i < 7; i ++) {
996 gen_op_shll_T1_im(4);
997 gen_op_orl_T0_T1();
999 break;
1000 case 1:
1001 for (i = 0; i < 3; i ++) {
1002 gen_op_shll_T1_im(8);
1003 gen_op_orl_T0_T1();
1005 break;
1006 case 2:
1007 gen_op_shll_T1_im(16);
1008 gen_op_orl_T0_T1();
1009 break;
1010 case 3:
1011 return 1;
1013 gen_op_movl_T1_im(0xf0000000);
1014 gen_op_andl_T0_T1();
1015 gen_op_movl_cpsr_T0(0xf0000000);
1016 break;
1017 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1018 rd = (insn >> 12) & 0xf;
1019 rd0 = (insn >> 16) & 0xf;
1020 if ((insn & 0xf) != 0)
1021 return 1;
1022 gen_op_iwmmxt_movq_M0_wRn(rd0);
1023 switch ((insn >> 22) & 3) {
1024 case 0:
1025 gen_op_iwmmxt_msbb_T0_M0();
1026 break;
1027 case 1:
1028 gen_op_iwmmxt_msbw_T0_M0();
1029 break;
1030 case 2:
1031 gen_op_iwmmxt_msbl_T0_M0();
1032 break;
1033 case 3:
1034 return 1;
1036 gen_movl_reg_T0(s, rd);
1037 break;
1038 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1039 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1040 wrd = (insn >> 12) & 0xf;
1041 rd0 = (insn >> 16) & 0xf;
1042 rd1 = (insn >> 0) & 0xf;
1043 gen_op_iwmmxt_movq_M0_wRn(rd0);
1044 switch ((insn >> 22) & 3) {
1045 case 0:
1046 if (insn & (1 << 21))
1047 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1048 else
1049 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1050 break;
1051 case 1:
1052 if (insn & (1 << 21))
1053 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1054 else
1055 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1056 break;
1057 case 2:
1058 if (insn & (1 << 21))
1059 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1060 else
1061 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1062 break;
1063 case 3:
1064 return 1;
1066 gen_op_iwmmxt_movq_wRn_M0(wrd);
1067 gen_op_iwmmxt_set_mup();
1068 gen_op_iwmmxt_set_cup();
1069 break;
1070 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1071 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1072 wrd = (insn >> 12) & 0xf;
1073 rd0 = (insn >> 16) & 0xf;
1074 gen_op_iwmmxt_movq_M0_wRn(rd0);
1075 switch ((insn >> 22) & 3) {
1076 case 0:
1077 if (insn & (1 << 21))
1078 gen_op_iwmmxt_unpacklsb_M0();
1079 else
1080 gen_op_iwmmxt_unpacklub_M0();
1081 break;
1082 case 1:
1083 if (insn & (1 << 21))
1084 gen_op_iwmmxt_unpacklsw_M0();
1085 else
1086 gen_op_iwmmxt_unpackluw_M0();
1087 break;
1088 case 2:
1089 if (insn & (1 << 21))
1090 gen_op_iwmmxt_unpacklsl_M0();
1091 else
1092 gen_op_iwmmxt_unpacklul_M0();
1093 break;
1094 case 3:
1095 return 1;
1097 gen_op_iwmmxt_movq_wRn_M0(wrd);
1098 gen_op_iwmmxt_set_mup();
1099 gen_op_iwmmxt_set_cup();
1100 break;
1101 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1102 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1103 wrd = (insn >> 12) & 0xf;
1104 rd0 = (insn >> 16) & 0xf;
1105 gen_op_iwmmxt_movq_M0_wRn(rd0);
1106 switch ((insn >> 22) & 3) {
1107 case 0:
1108 if (insn & (1 << 21))
1109 gen_op_iwmmxt_unpackhsb_M0();
1110 else
1111 gen_op_iwmmxt_unpackhub_M0();
1112 break;
1113 case 1:
1114 if (insn & (1 << 21))
1115 gen_op_iwmmxt_unpackhsw_M0();
1116 else
1117 gen_op_iwmmxt_unpackhuw_M0();
1118 break;
1119 case 2:
1120 if (insn & (1 << 21))
1121 gen_op_iwmmxt_unpackhsl_M0();
1122 else
1123 gen_op_iwmmxt_unpackhul_M0();
1124 break;
1125 case 3:
1126 return 1;
1128 gen_op_iwmmxt_movq_wRn_M0(wrd);
1129 gen_op_iwmmxt_set_mup();
1130 gen_op_iwmmxt_set_cup();
1131 break;
1132 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1133 case 0x214: case 0x614: case 0xa14: case 0xe14:
1134 wrd = (insn >> 12) & 0xf;
1135 rd0 = (insn >> 16) & 0xf;
1136 gen_op_iwmmxt_movq_M0_wRn(rd0);
1137 if (gen_iwmmxt_shift(insn, 0xff))
1138 return 1;
1139 switch ((insn >> 22) & 3) {
1140 case 0:
1141 return 1;
1142 case 1:
1143 gen_op_iwmmxt_srlw_M0_T0();
1144 break;
1145 case 2:
1146 gen_op_iwmmxt_srll_M0_T0();
1147 break;
1148 case 3:
1149 gen_op_iwmmxt_srlq_M0_T0();
1150 break;
1152 gen_op_iwmmxt_movq_wRn_M0(wrd);
1153 gen_op_iwmmxt_set_mup();
1154 gen_op_iwmmxt_set_cup();
1155 break;
1156 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1157 case 0x014: case 0x414: case 0x814: case 0xc14:
1158 wrd = (insn >> 12) & 0xf;
1159 rd0 = (insn >> 16) & 0xf;
1160 gen_op_iwmmxt_movq_M0_wRn(rd0);
1161 if (gen_iwmmxt_shift(insn, 0xff))
1162 return 1;
1163 switch ((insn >> 22) & 3) {
1164 case 0:
1165 return 1;
1166 case 1:
1167 gen_op_iwmmxt_sraw_M0_T0();
1168 break;
1169 case 2:
1170 gen_op_iwmmxt_sral_M0_T0();
1171 break;
1172 case 3:
1173 gen_op_iwmmxt_sraq_M0_T0();
1174 break;
1176 gen_op_iwmmxt_movq_wRn_M0(wrd);
1177 gen_op_iwmmxt_set_mup();
1178 gen_op_iwmmxt_set_cup();
1179 break;
1180 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
1181 case 0x114: case 0x514: case 0x914: case 0xd14:
1182 wrd = (insn >> 12) & 0xf;
1183 rd0 = (insn >> 16) & 0xf;
1184 gen_op_iwmmxt_movq_M0_wRn(rd0);
1185 if (gen_iwmmxt_shift(insn, 0xff))
1186 return 1;
1187 switch ((insn >> 22) & 3) {
1188 case 0:
1189 return 1;
1190 case 1:
1191 gen_op_iwmmxt_sllw_M0_T0();
1192 break;
1193 case 2:
1194 gen_op_iwmmxt_slll_M0_T0();
1195 break;
1196 case 3:
1197 gen_op_iwmmxt_sllq_M0_T0();
1198 break;
1200 gen_op_iwmmxt_movq_wRn_M0(wrd);
1201 gen_op_iwmmxt_set_mup();
1202 gen_op_iwmmxt_set_cup();
1203 break;
1204 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
1205 case 0x314: case 0x714: case 0xb14: case 0xf14:
1206 wrd = (insn >> 12) & 0xf;
1207 rd0 = (insn >> 16) & 0xf;
1208 gen_op_iwmmxt_movq_M0_wRn(rd0);
1209 switch ((insn >> 22) & 3) {
1210 case 0:
1211 return 1;
1212 case 1:
1213 if (gen_iwmmxt_shift(insn, 0xf))
1214 return 1;
1215 gen_op_iwmmxt_rorw_M0_T0();
1216 break;
1217 case 2:
1218 if (gen_iwmmxt_shift(insn, 0x1f))
1219 return 1;
1220 gen_op_iwmmxt_rorl_M0_T0();
1221 break;
1222 case 3:
1223 if (gen_iwmmxt_shift(insn, 0x3f))
1224 return 1;
1225 gen_op_iwmmxt_rorq_M0_T0();
1226 break;
1228 gen_op_iwmmxt_movq_wRn_M0(wrd);
1229 gen_op_iwmmxt_set_mup();
1230 gen_op_iwmmxt_set_cup();
1231 break;
1232 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
1233 case 0x916: case 0xb16: case 0xd16: case 0xf16:
1234 wrd = (insn >> 12) & 0xf;
1235 rd0 = (insn >> 16) & 0xf;
1236 rd1 = (insn >> 0) & 0xf;
1237 gen_op_iwmmxt_movq_M0_wRn(rd0);
1238 switch ((insn >> 22) & 3) {
1239 case 0:
1240 if (insn & (1 << 21))
1241 gen_op_iwmmxt_minsb_M0_wRn(rd1);
1242 else
1243 gen_op_iwmmxt_minub_M0_wRn(rd1);
1244 break;
1245 case 1:
1246 if (insn & (1 << 21))
1247 gen_op_iwmmxt_minsw_M0_wRn(rd1);
1248 else
1249 gen_op_iwmmxt_minuw_M0_wRn(rd1);
1250 break;
1251 case 2:
1252 if (insn & (1 << 21))
1253 gen_op_iwmmxt_minsl_M0_wRn(rd1);
1254 else
1255 gen_op_iwmmxt_minul_M0_wRn(rd1);
1256 break;
1257 case 3:
1258 return 1;
1260 gen_op_iwmmxt_movq_wRn_M0(wrd);
1261 gen_op_iwmmxt_set_mup();
1262 break;
1263 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
1264 case 0x816: case 0xa16: case 0xc16: case 0xe16:
1265 wrd = (insn >> 12) & 0xf;
1266 rd0 = (insn >> 16) & 0xf;
1267 rd1 = (insn >> 0) & 0xf;
1268 gen_op_iwmmxt_movq_M0_wRn(rd0);
1269 switch ((insn >> 22) & 3) {
1270 case 0:
1271 if (insn & (1 << 21))
1272 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
1273 else
1274 gen_op_iwmmxt_maxub_M0_wRn(rd1);
1275 break;
1276 case 1:
1277 if (insn & (1 << 21))
1278 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
1279 else
1280 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
1281 break;
1282 case 2:
1283 if (insn & (1 << 21))
1284 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
1285 else
1286 gen_op_iwmmxt_maxul_M0_wRn(rd1);
1287 break;
1288 case 3:
1289 return 1;
1291 gen_op_iwmmxt_movq_wRn_M0(wrd);
1292 gen_op_iwmmxt_set_mup();
1293 break;
1294 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
1295 case 0x402: case 0x502: case 0x602: case 0x702:
1296 wrd = (insn >> 12) & 0xf;
1297 rd0 = (insn >> 16) & 0xf;
1298 rd1 = (insn >> 0) & 0xf;
1299 gen_op_iwmmxt_movq_M0_wRn(rd0);
1300 gen_op_movl_T0_im((insn >> 20) & 3);
1301 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1302 gen_op_iwmmxt_movq_wRn_M0(wrd);
1303 gen_op_iwmmxt_set_mup();
1304 break;
1305 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
1306 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
1307 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
1308 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
1309 wrd = (insn >> 12) & 0xf;
1310 rd0 = (insn >> 16) & 0xf;
1311 rd1 = (insn >> 0) & 0xf;
1312 gen_op_iwmmxt_movq_M0_wRn(rd0);
1313 switch ((insn >> 20) & 0xf) {
1314 case 0x0:
1315 gen_op_iwmmxt_subnb_M0_wRn(rd1);
1316 break;
1317 case 0x1:
1318 gen_op_iwmmxt_subub_M0_wRn(rd1);
1319 break;
1320 case 0x3:
1321 gen_op_iwmmxt_subsb_M0_wRn(rd1);
1322 break;
1323 case 0x4:
1324 gen_op_iwmmxt_subnw_M0_wRn(rd1);
1325 break;
1326 case 0x5:
1327 gen_op_iwmmxt_subuw_M0_wRn(rd1);
1328 break;
1329 case 0x7:
1330 gen_op_iwmmxt_subsw_M0_wRn(rd1);
1331 break;
1332 case 0x8:
1333 gen_op_iwmmxt_subnl_M0_wRn(rd1);
1334 break;
1335 case 0x9:
1336 gen_op_iwmmxt_subul_M0_wRn(rd1);
1337 break;
1338 case 0xb:
1339 gen_op_iwmmxt_subsl_M0_wRn(rd1);
1340 break;
1341 default:
1342 return 1;
1344 gen_op_iwmmxt_movq_wRn_M0(wrd);
1345 gen_op_iwmmxt_set_mup();
1346 gen_op_iwmmxt_set_cup();
1347 break;
1348 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
1349 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
1350 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
1351 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
1352 wrd = (insn >> 12) & 0xf;
1353 rd0 = (insn >> 16) & 0xf;
1354 gen_op_iwmmxt_movq_M0_wRn(rd0);
1355 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
1356 gen_op_iwmmxt_shufh_M0_T0();
1357 gen_op_iwmmxt_movq_wRn_M0(wrd);
1358 gen_op_iwmmxt_set_mup();
1359 gen_op_iwmmxt_set_cup();
1360 break;
1361 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
1362 case 0x418: case 0x518: case 0x618: case 0x718:
1363 case 0x818: case 0x918: case 0xa18: case 0xb18:
1364 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
1365 wrd = (insn >> 12) & 0xf;
1366 rd0 = (insn >> 16) & 0xf;
1367 rd1 = (insn >> 0) & 0xf;
1368 gen_op_iwmmxt_movq_M0_wRn(rd0);
1369 switch ((insn >> 20) & 0xf) {
1370 case 0x0:
1371 gen_op_iwmmxt_addnb_M0_wRn(rd1);
1372 break;
1373 case 0x1:
1374 gen_op_iwmmxt_addub_M0_wRn(rd1);
1375 break;
1376 case 0x3:
1377 gen_op_iwmmxt_addsb_M0_wRn(rd1);
1378 break;
1379 case 0x4:
1380 gen_op_iwmmxt_addnw_M0_wRn(rd1);
1381 break;
1382 case 0x5:
1383 gen_op_iwmmxt_adduw_M0_wRn(rd1);
1384 break;
1385 case 0x7:
1386 gen_op_iwmmxt_addsw_M0_wRn(rd1);
1387 break;
1388 case 0x8:
1389 gen_op_iwmmxt_addnl_M0_wRn(rd1);
1390 break;
1391 case 0x9:
1392 gen_op_iwmmxt_addul_M0_wRn(rd1);
1393 break;
1394 case 0xb:
1395 gen_op_iwmmxt_addsl_M0_wRn(rd1);
1396 break;
1397 default:
1398 return 1;
1400 gen_op_iwmmxt_movq_wRn_M0(wrd);
1401 gen_op_iwmmxt_set_mup();
1402 gen_op_iwmmxt_set_cup();
1403 break;
1404 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
1405 case 0x408: case 0x508: case 0x608: case 0x708:
1406 case 0x808: case 0x908: case 0xa08: case 0xb08:
1407 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
1408 wrd = (insn >> 12) & 0xf;
1409 rd0 = (insn >> 16) & 0xf;
1410 rd1 = (insn >> 0) & 0xf;
1411 gen_op_iwmmxt_movq_M0_wRn(rd0);
1412 if (!(insn & (1 << 20)))
1413 return 1;
1414 switch ((insn >> 22) & 3) {
1415 case 0:
1416 return 1;
1417 case 1:
1418 if (insn & (1 << 21))
1419 gen_op_iwmmxt_packsw_M0_wRn(rd1);
1420 else
1421 gen_op_iwmmxt_packuw_M0_wRn(rd1);
1422 break;
1423 case 2:
1424 if (insn & (1 << 21))
1425 gen_op_iwmmxt_packsl_M0_wRn(rd1);
1426 else
1427 gen_op_iwmmxt_packul_M0_wRn(rd1);
1428 break;
1429 case 3:
1430 if (insn & (1 << 21))
1431 gen_op_iwmmxt_packsq_M0_wRn(rd1);
1432 else
1433 gen_op_iwmmxt_packuq_M0_wRn(rd1);
1434 break;
1436 gen_op_iwmmxt_movq_wRn_M0(wrd);
1437 gen_op_iwmmxt_set_mup();
1438 gen_op_iwmmxt_set_cup();
1439 break;
1440 case 0x201: case 0x203: case 0x205: case 0x207:
1441 case 0x209: case 0x20b: case 0x20d: case 0x20f:
1442 case 0x211: case 0x213: case 0x215: case 0x217:
1443 case 0x219: case 0x21b: case 0x21d: case 0x21f:
1444 wrd = (insn >> 5) & 0xf;
1445 rd0 = (insn >> 12) & 0xf;
1446 rd1 = (insn >> 0) & 0xf;
1447 if (rd0 == 0xf || rd1 == 0xf)
1448 return 1;
1449 gen_op_iwmmxt_movq_M0_wRn(wrd);
1450 switch ((insn >> 16) & 0xf) {
1451 case 0x0: /* TMIA */
1452 gen_op_movl_TN_reg[0][rd0]();
1453 gen_op_movl_TN_reg[1][rd1]();
1454 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1455 break;
1456 case 0x8: /* TMIAPH */
1457 gen_op_movl_TN_reg[0][rd0]();
1458 gen_op_movl_TN_reg[1][rd1]();
1459 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1460 break;
1461 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
1462 gen_op_movl_TN_reg[1][rd0]();
1463 if (insn & (1 << 16))
1464 gen_op_shrl_T1_im(16);
1465 gen_op_movl_T0_T1();
1466 gen_op_movl_TN_reg[1][rd1]();
1467 if (insn & (1 << 17))
1468 gen_op_shrl_T1_im(16);
1469 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1470 break;
1471 default:
1472 return 1;
1474 gen_op_iwmmxt_movq_wRn_M0(wrd);
1475 gen_op_iwmmxt_set_mup();
1476 break;
1477 default:
1478 return 1;
1481 return 0;
1484 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
1485 (ie. an undefined instruction). */
1486 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1488 int acc, rd0, rd1, rdhi, rdlo;
1490 if ((insn & 0x0ff00f10) == 0x0e200010) {
1491 /* Multiply with Internal Accumulate Format */
1492 rd0 = (insn >> 12) & 0xf;
1493 rd1 = insn & 0xf;
1494 acc = (insn >> 5) & 7;
1496 if (acc != 0)
1497 return 1;
1499 switch ((insn >> 16) & 0xf) {
1500 case 0x0: /* MIA */
1501 gen_op_movl_TN_reg[0][rd0]();
1502 gen_op_movl_TN_reg[1][rd1]();
1503 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1504 break;
1505 case 0x8: /* MIAPH */
1506 gen_op_movl_TN_reg[0][rd0]();
1507 gen_op_movl_TN_reg[1][rd1]();
1508 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1509 break;
1510 case 0xc: /* MIABB */
1511 case 0xd: /* MIABT */
1512 case 0xe: /* MIATB */
1513 case 0xf: /* MIATT */
1514 gen_op_movl_TN_reg[1][rd0]();
1515 if (insn & (1 << 16))
1516 gen_op_shrl_T1_im(16);
1517 gen_op_movl_T0_T1();
1518 gen_op_movl_TN_reg[1][rd1]();
1519 if (insn & (1 << 17))
1520 gen_op_shrl_T1_im(16);
1521 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1522 break;
1523 default:
1524 return 1;
1527 gen_op_iwmmxt_movq_wRn_M0(acc);
1528 return 0;
1531 if ((insn & 0x0fe00ff8) == 0x0c400000) {
1532 /* Internal Accumulator Access Format */
1533 rdhi = (insn >> 16) & 0xf;
1534 rdlo = (insn >> 12) & 0xf;
1535 acc = insn & 7;
1537 if (acc != 0)
1538 return 1;
1540 if (insn & ARM_CP_RW_BIT) { /* MRA */
1541 gen_op_iwmmxt_movl_T0_T1_wRn(acc);
1542 gen_op_movl_reg_TN[0][rdlo]();
1543 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
1544 gen_op_andl_T0_T1();
1545 gen_op_movl_reg_TN[0][rdhi]();
1546 } else { /* MAR */
1547 gen_op_movl_TN_reg[0][rdlo]();
1548 gen_op_movl_TN_reg[1][rdhi]();
1549 gen_op_iwmmxt_movl_wRn_T0_T1(acc);
1551 return 0;
1554 return 1;
1557 /* Disassemble system coprocessor instruction. Return nonzero if
1558 instruction is not defined. */
1559 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1561 uint32_t rd = (insn >> 12) & 0xf;
1562 uint32_t cp = (insn >> 8) & 0xf;
1563 if (IS_USER(s)) {
1564 return 1;
1567 if (insn & ARM_CP_RW_BIT) {
1568 if (!env->cp[cp].cp_read)
1569 return 1;
1570 gen_op_movl_T0_im((uint32_t) s->pc);
1571 gen_op_movl_reg_TN[0][15]();
1572 gen_op_movl_T0_cp(insn);
1573 gen_movl_reg_T0(s, rd);
1574 } else {
1575 if (!env->cp[cp].cp_write)
1576 return 1;
1577 gen_op_movl_T0_im((uint32_t) s->pc);
1578 gen_op_movl_reg_TN[0][15]();
1579 gen_movl_T0_reg(s, rd);
1580 gen_op_movl_cp_T0(insn);
1582 return 0;
1585 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
1586 instruction is not defined. */
1587 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
1589 uint32_t rd;
1591 /* ??? Some cp15 registers are accessible from userspace. */
1592 if (IS_USER(s)) {
1593 return 1;
1595 if ((insn & 0x0fff0fff) == 0x0e070f90
1596 || (insn & 0x0fff0fff) == 0x0e070f58) {
1597 /* Wait for interrupt. */
1598 gen_op_movl_T0_im((long)s->pc);
1599 gen_op_movl_reg_TN[0][15]();
1600 gen_op_wfi();
1601 s->is_jmp = DISAS_JUMP;
1602 return 0;
1604 rd = (insn >> 12) & 0xf;
1605 if (insn & ARM_CP_RW_BIT) {
1606 gen_op_movl_T0_cp15(insn);
1607 /* If the destination register is r15 then sets condition codes. */
1608 if (rd != 15)
1609 gen_movl_reg_T0(s, rd);
1610 } else {
1611 gen_movl_T0_reg(s, rd);
1612 gen_op_movl_cp15_T0(insn);
1613 /* Normally we would always end the TB here, but Linux
1614 * arch/arm/mach-pxa/sleep.S expects two instructions following
1615 * an MMU enable to execute from cache. Imitate this behaviour. */
1616 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
1617 (insn & 0x0fff0fff) != 0x0e010f10)
1618 gen_lookup_tb(s);
1620 return 0;
1623 /* Disassemble a VFP instruction. Returns nonzero if an error occured
1624 (ie. an undefined instruction). */
1625 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
1627 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
1628 int dp, veclen;
1630 if (!arm_feature(env, ARM_FEATURE_VFP))
1631 return 1;
1633 if ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) == 0) {
1634 /* VFP disabled. Only allow fmxr/fmrx to/from fpexc and fpsid. */
1635 if ((insn & 0x0fe00fff) != 0x0ee00a10)
1636 return 1;
1637 rn = (insn >> 16) & 0xf;
1638 if (rn != 0 && rn != 8)
1639 return 1;
1641 dp = ((insn & 0xf00) == 0xb00);
1642 switch ((insn >> 24) & 0xf) {
1643 case 0xe:
1644 if (insn & (1 << 4)) {
1645 /* single register transfer */
1646 if ((insn & 0x6f) != 0x00)
1647 return 1;
1648 rd = (insn >> 12) & 0xf;
1649 if (dp) {
1650 if (insn & 0x80)
1651 return 1;
1652 rn = (insn >> 16) & 0xf;
1653 /* Get the existing value even for arm->vfp moves because
1654 we only set half the register. */
1655 gen_mov_F0_vreg(1, rn);
1656 gen_op_vfp_mrrd();
1657 if (insn & ARM_CP_RW_BIT) {
1658 /* vfp->arm */
1659 if (insn & (1 << 21))
1660 gen_movl_reg_T1(s, rd);
1661 else
1662 gen_movl_reg_T0(s, rd);
1663 } else {
1664 /* arm->vfp */
1665 if (insn & (1 << 21))
1666 gen_movl_T1_reg(s, rd);
1667 else
1668 gen_movl_T0_reg(s, rd);
1669 gen_op_vfp_mdrr();
1670 gen_mov_vreg_F0(dp, rn);
1672 } else {
1673 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
1674 if (insn & ARM_CP_RW_BIT) {
1675 /* vfp->arm */
1676 if (insn & (1 << 21)) {
1677 /* system register */
1678 rn >>= 1;
1679 switch (rn) {
1680 case ARM_VFP_FPSID:
1681 case ARM_VFP_FPEXC:
1682 case ARM_VFP_FPINST:
1683 case ARM_VFP_FPINST2:
1684 gen_op_vfp_movl_T0_xreg(rn);
1685 break;
1686 case ARM_VFP_FPSCR:
1687 if (rd == 15)
1688 gen_op_vfp_movl_T0_fpscr_flags();
1689 else
1690 gen_op_vfp_movl_T0_fpscr();
1691 break;
1692 default:
1693 return 1;
1695 } else {
1696 gen_mov_F0_vreg(0, rn);
1697 gen_op_vfp_mrs();
1699 if (rd == 15) {
1700 /* Set the 4 flag bits in the CPSR. */
1701 gen_op_movl_cpsr_T0(0xf0000000);
1702 } else
1703 gen_movl_reg_T0(s, rd);
1704 } else {
1705 /* arm->vfp */
1706 gen_movl_T0_reg(s, rd);
1707 if (insn & (1 << 21)) {
1708 rn >>= 1;
1709 /* system register */
1710 switch (rn) {
1711 case ARM_VFP_FPSID:
1712 /* Writes are ignored. */
1713 break;
1714 case ARM_VFP_FPSCR:
1715 gen_op_vfp_movl_fpscr_T0();
1716 gen_lookup_tb(s);
1717 break;
1718 case ARM_VFP_FPEXC:
1719 gen_op_vfp_movl_xreg_T0(rn);
1720 gen_lookup_tb(s);
1721 break;
1722 case ARM_VFP_FPINST:
1723 case ARM_VFP_FPINST2:
1724 gen_op_vfp_movl_xreg_T0(rn);
1725 break;
1726 default:
1727 return 1;
1729 } else {
1730 gen_op_vfp_msr();
1731 gen_mov_vreg_F0(0, rn);
1735 } else {
1736 /* data processing */
1737 /* The opcode is in bits 23, 21, 20 and 6. */
1738 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
1739 if (dp) {
1740 if (op == 15) {
1741 /* rn is opcode */
1742 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
1743 } else {
1744 /* rn is register number */
1745 if (insn & (1 << 7))
1746 return 1;
1747 rn = (insn >> 16) & 0xf;
1750 if (op == 15 && (rn == 15 || rn > 17)) {
1751 /* Integer or single precision destination. */
1752 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
1753 } else {
1754 if (insn & (1 << 22))
1755 return 1;
1756 rd = (insn >> 12) & 0xf;
1759 if (op == 15 && (rn == 16 || rn == 17)) {
1760 /* Integer source. */
1761 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
1762 } else {
1763 if (insn & (1 << 5))
1764 return 1;
1765 rm = insn & 0xf;
1767 } else {
1768 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
1769 if (op == 15 && rn == 15) {
1770 /* Double precision destination. */
1771 if (insn & (1 << 22))
1772 return 1;
1773 rd = (insn >> 12) & 0xf;
1774 } else
1775 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
1776 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
1779 veclen = env->vfp.vec_len;
1780 if (op == 15 && rn > 3)
1781 veclen = 0;
1783 /* Shut up compiler warnings. */
1784 delta_m = 0;
1785 delta_d = 0;
1786 bank_mask = 0;
1788 if (veclen > 0) {
1789 if (dp)
1790 bank_mask = 0xc;
1791 else
1792 bank_mask = 0x18;
1794 /* Figure out what type of vector operation this is. */
1795 if ((rd & bank_mask) == 0) {
1796 /* scalar */
1797 veclen = 0;
1798 } else {
1799 if (dp)
1800 delta_d = (env->vfp.vec_stride >> 1) + 1;
1801 else
1802 delta_d = env->vfp.vec_stride + 1;
1804 if ((rm & bank_mask) == 0) {
1805 /* mixed scalar/vector */
1806 delta_m = 0;
1807 } else {
1808 /* vector */
1809 delta_m = delta_d;
1814 /* Load the initial operands. */
1815 if (op == 15) {
1816 switch (rn) {
1817 case 16:
1818 case 17:
1819 /* Integer source */
1820 gen_mov_F0_vreg(0, rm);
1821 break;
1822 case 8:
1823 case 9:
1824 /* Compare */
1825 gen_mov_F0_vreg(dp, rd);
1826 gen_mov_F1_vreg(dp, rm);
1827 break;
1828 case 10:
1829 case 11:
1830 /* Compare with zero */
1831 gen_mov_F0_vreg(dp, rd);
1832 gen_vfp_F1_ld0(dp);
1833 break;
1834 default:
1835 /* One source operand. */
1836 gen_mov_F0_vreg(dp, rm);
1838 } else {
1839 /* Two source operands. */
1840 gen_mov_F0_vreg(dp, rn);
1841 gen_mov_F1_vreg(dp, rm);
1844 for (;;) {
1845 /* Perform the calculation. */
1846 switch (op) {
1847 case 0: /* mac: fd + (fn * fm) */
1848 gen_vfp_mul(dp);
1849 gen_mov_F1_vreg(dp, rd);
1850 gen_vfp_add(dp);
1851 break;
1852 case 1: /* nmac: fd - (fn * fm) */
1853 gen_vfp_mul(dp);
1854 gen_vfp_neg(dp);
1855 gen_mov_F1_vreg(dp, rd);
1856 gen_vfp_add(dp);
1857 break;
1858 case 2: /* msc: -fd + (fn * fm) */
1859 gen_vfp_mul(dp);
1860 gen_mov_F1_vreg(dp, rd);
1861 gen_vfp_sub(dp);
1862 break;
1863 case 3: /* nmsc: -fd - (fn * fm) */
1864 gen_vfp_mul(dp);
1865 gen_mov_F1_vreg(dp, rd);
1866 gen_vfp_add(dp);
1867 gen_vfp_neg(dp);
1868 break;
1869 case 4: /* mul: fn * fm */
1870 gen_vfp_mul(dp);
1871 break;
1872 case 5: /* nmul: -(fn * fm) */
1873 gen_vfp_mul(dp);
1874 gen_vfp_neg(dp);
1875 break;
1876 case 6: /* add: fn + fm */
1877 gen_vfp_add(dp);
1878 break;
1879 case 7: /* sub: fn - fm */
1880 gen_vfp_sub(dp);
1881 break;
1882 case 8: /* div: fn / fm */
1883 gen_vfp_div(dp);
1884 break;
1885 case 15: /* extension space */
1886 switch (rn) {
1887 case 0: /* cpy */
1888 /* no-op */
1889 break;
1890 case 1: /* abs */
1891 gen_vfp_abs(dp);
1892 break;
1893 case 2: /* neg */
1894 gen_vfp_neg(dp);
1895 break;
1896 case 3: /* sqrt */
1897 gen_vfp_sqrt(dp);
1898 break;
1899 case 8: /* cmp */
1900 gen_vfp_cmp(dp);
1901 break;
1902 case 9: /* cmpe */
1903 gen_vfp_cmpe(dp);
1904 break;
1905 case 10: /* cmpz */
1906 gen_vfp_cmp(dp);
1907 break;
1908 case 11: /* cmpez */
1909 gen_vfp_F1_ld0(dp);
1910 gen_vfp_cmpe(dp);
1911 break;
1912 case 15: /* single<->double conversion */
1913 if (dp)
1914 gen_op_vfp_fcvtsd();
1915 else
1916 gen_op_vfp_fcvtds();
1917 break;
1918 case 16: /* fuito */
1919 gen_vfp_uito(dp);
1920 break;
1921 case 17: /* fsito */
1922 gen_vfp_sito(dp);
1923 break;
1924 case 24: /* ftoui */
1925 gen_vfp_toui(dp);
1926 break;
1927 case 25: /* ftouiz */
1928 gen_vfp_touiz(dp);
1929 break;
1930 case 26: /* ftosi */
1931 gen_vfp_tosi(dp);
1932 break;
1933 case 27: /* ftosiz */
1934 gen_vfp_tosiz(dp);
1935 break;
1936 default: /* undefined */
1937 printf ("rn:%d\n", rn);
1938 return 1;
1940 break;
1941 default: /* undefined */
1942 printf ("op:%d\n", op);
1943 return 1;
1946 /* Write back the result. */
1947 if (op == 15 && (rn >= 8 && rn <= 11))
1948 ; /* Comparison, do nothing. */
1949 else if (op == 15 && rn > 17)
1950 /* Integer result. */
1951 gen_mov_vreg_F0(0, rd);
1952 else if (op == 15 && rn == 15)
1953 /* conversion */
1954 gen_mov_vreg_F0(!dp, rd);
1955 else
1956 gen_mov_vreg_F0(dp, rd);
1958 /* break out of the loop if we have finished */
1959 if (veclen == 0)
1960 break;
1962 if (op == 15 && delta_m == 0) {
1963 /* single source one-many */
1964 while (veclen--) {
1965 rd = ((rd + delta_d) & (bank_mask - 1))
1966 | (rd & bank_mask);
1967 gen_mov_vreg_F0(dp, rd);
1969 break;
1971 /* Setup the next operands. */
1972 veclen--;
1973 rd = ((rd + delta_d) & (bank_mask - 1))
1974 | (rd & bank_mask);
1976 if (op == 15) {
1977 /* One source operand. */
1978 rm = ((rm + delta_m) & (bank_mask - 1))
1979 | (rm & bank_mask);
1980 gen_mov_F0_vreg(dp, rm);
1981 } else {
1982 /* Two source operands. */
1983 rn = ((rn + delta_d) & (bank_mask - 1))
1984 | (rn & bank_mask);
1985 gen_mov_F0_vreg(dp, rn);
1986 if (delta_m) {
1987 rm = ((rm + delta_m) & (bank_mask - 1))
1988 | (rm & bank_mask);
1989 gen_mov_F1_vreg(dp, rm);
1994 break;
1995 case 0xc:
1996 case 0xd:
1997 if (dp && (insn & (1 << 22))) {
1998 /* two-register transfer */
1999 rn = (insn >> 16) & 0xf;
2000 rd = (insn >> 12) & 0xf;
2001 if (dp) {
2002 if (insn & (1 << 5))
2003 return 1;
2004 rm = insn & 0xf;
2005 } else
2006 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2008 if (insn & ARM_CP_RW_BIT) {
2009 /* vfp->arm */
2010 if (dp) {
2011 gen_mov_F0_vreg(1, rm);
2012 gen_op_vfp_mrrd();
2013 gen_movl_reg_T0(s, rd);
2014 gen_movl_reg_T1(s, rn);
2015 } else {
2016 gen_mov_F0_vreg(0, rm);
2017 gen_op_vfp_mrs();
2018 gen_movl_reg_T0(s, rn);
2019 gen_mov_F0_vreg(0, rm + 1);
2020 gen_op_vfp_mrs();
2021 gen_movl_reg_T0(s, rd);
2023 } else {
2024 /* arm->vfp */
2025 if (dp) {
2026 gen_movl_T0_reg(s, rd);
2027 gen_movl_T1_reg(s, rn);
2028 gen_op_vfp_mdrr();
2029 gen_mov_vreg_F0(1, rm);
2030 } else {
2031 gen_movl_T0_reg(s, rn);
2032 gen_op_vfp_msr();
2033 gen_mov_vreg_F0(0, rm);
2034 gen_movl_T0_reg(s, rd);
2035 gen_op_vfp_msr();
2036 gen_mov_vreg_F0(0, rm + 1);
2039 } else {
2040 /* Load/store */
2041 rn = (insn >> 16) & 0xf;
2042 if (dp)
2043 rd = (insn >> 12) & 0xf;
2044 else
2045 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
2046 gen_movl_T1_reg(s, rn);
2047 if ((insn & 0x01200000) == 0x01000000) {
2048 /* Single load/store */
2049 offset = (insn & 0xff) << 2;
2050 if ((insn & (1 << 23)) == 0)
2051 offset = -offset;
2052 gen_op_addl_T1_im(offset);
2053 if (insn & (1 << 20)) {
2054 gen_vfp_ld(s, dp);
2055 gen_mov_vreg_F0(dp, rd);
2056 } else {
2057 gen_mov_F0_vreg(dp, rd);
2058 gen_vfp_st(s, dp);
2060 } else {
2061 /* load/store multiple */
2062 if (dp)
2063 n = (insn >> 1) & 0x7f;
2064 else
2065 n = insn & 0xff;
2067 if (insn & (1 << 24)) /* pre-decrement */
2068 gen_op_addl_T1_im(-((insn & 0xff) << 2));
2070 if (dp)
2071 offset = 8;
2072 else
2073 offset = 4;
2074 for (i = 0; i < n; i++) {
2075 if (insn & ARM_CP_RW_BIT) {
2076 /* load */
2077 gen_vfp_ld(s, dp);
2078 gen_mov_vreg_F0(dp, rd + i);
2079 } else {
2080 /* store */
2081 gen_mov_F0_vreg(dp, rd + i);
2082 gen_vfp_st(s, dp);
2084 gen_op_addl_T1_im(offset);
2086 if (insn & (1 << 21)) {
2087 /* writeback */
2088 if (insn & (1 << 24))
2089 offset = -offset * n;
2090 else if (dp && (insn & 1))
2091 offset = 4;
2092 else
2093 offset = 0;
2095 if (offset != 0)
2096 gen_op_addl_T1_im(offset);
2097 gen_movl_reg_T1(s, rn);
2101 break;
2102 default:
2103 /* Should never happen. */
2104 return 1;
2106 return 0;
2109 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
2111 TranslationBlock *tb;
2113 tb = s->tb;
2114 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
2115 if (n == 0)
2116 gen_op_goto_tb0(TBPARAM(tb));
2117 else
2118 gen_op_goto_tb1(TBPARAM(tb));
2119 gen_op_movl_T0_im(dest);
2120 gen_op_movl_r15_T0();
2121 gen_op_movl_T0_im((long)tb + n);
2122 gen_op_exit_tb();
2123 } else {
2124 gen_op_movl_T0_im(dest);
2125 gen_op_movl_r15_T0();
2126 gen_op_movl_T0_0();
2127 gen_op_exit_tb();
2131 static inline void gen_jmp (DisasContext *s, uint32_t dest)
2133 if (__builtin_expect(s->singlestep_enabled, 0)) {
2134 /* An indirect jump so that we still trigger the debug exception. */
2135 if (s->thumb)
2136 dest |= 1;
2137 gen_op_movl_T0_im(dest);
2138 gen_bx(s);
2139 } else {
2140 gen_goto_tb(s, 0, dest);
2141 s->is_jmp = DISAS_TB_JUMP;
2145 static inline void gen_mulxy(int x, int y)
2147 if (x)
2148 gen_op_sarl_T0_im(16);
2149 else
2150 gen_op_sxth_T0();
2151 if (y)
2152 gen_op_sarl_T1_im(16);
2153 else
2154 gen_op_sxth_T1();
2155 gen_op_mul_T0_T1();
2158 /* Return the mask of PSR bits set by a MSR instruction. */
2159 static uint32_t msr_mask(DisasContext *s, int flags, int spsr) {
2160 uint32_t mask;
2162 mask = 0;
2163 if (flags & (1 << 0))
2164 mask |= 0xff;
2165 if (flags & (1 << 1))
2166 mask |= 0xff00;
2167 if (flags & (1 << 2))
2168 mask |= 0xff0000;
2169 if (flags & (1 << 3))
2170 mask |= 0xff000000;
2171 /* Mask out undefined bits. */
2172 mask &= 0xf90f03ff;
2173 /* Mask out state bits. */
2174 if (!spsr)
2175 mask &= ~0x01000020;
2176 /* Mask out privileged bits. */
2177 if (IS_USER(s))
2178 mask &= 0xf80f0200;
2179 return mask;
2182 /* Returns nonzero if access to the PSR is not permitted. */
2183 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
2185 if (spsr) {
2186 /* ??? This is also undefined in system mode. */
2187 if (IS_USER(s))
2188 return 1;
2189 gen_op_movl_spsr_T0(mask);
2190 } else {
2191 gen_op_movl_cpsr_T0(mask);
2193 gen_lookup_tb(s);
2194 return 0;
2197 static void gen_exception_return(DisasContext *s)
2199 gen_op_movl_reg_TN[0][15]();
2200 gen_op_movl_T0_spsr();
2201 gen_op_movl_cpsr_T0(0xffffffff);
2202 s->is_jmp = DISAS_UPDATE;
2205 static void disas_arm_insn(CPUState * env, DisasContext *s)
2207 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
2209 insn = ldl_code(s->pc);
2210 s->pc += 4;
2212 cond = insn >> 28;
2213 if (cond == 0xf){
2214 /* Unconditional instructions. */
2215 if ((insn & 0x0d70f000) == 0x0550f000)
2216 return; /* PLD */
2217 else if ((insn & 0x0e000000) == 0x0a000000) {
2218 /* branch link and change to thumb (blx <offset>) */
2219 int32_t offset;
2221 val = (uint32_t)s->pc;
2222 gen_op_movl_T0_im(val);
2223 gen_movl_reg_T0(s, 14);
2224 /* Sign-extend the 24-bit offset */
2225 offset = (((int32_t)insn) << 8) >> 8;
2226 /* offset * 4 + bit24 * 2 + (thumb bit) */
2227 val += (offset << 2) | ((insn >> 23) & 2) | 1;
2228 /* pipeline offset */
2229 val += 4;
2230 gen_op_movl_T0_im(val);
2231 gen_bx(s);
2232 return;
2233 } else if ((insn & 0x0fe00000) == 0x0c400000) {
2234 /* Coprocessor double register transfer. */
2235 } else if ((insn & 0x0f000010) == 0x0e000010) {
2236 /* Additional coprocessor register transfer. */
2237 } else if ((insn & 0x0ff10010) == 0x01000000) {
2238 /* cps (privileged) */
2239 } else if ((insn & 0x0ffffdff) == 0x01010000) {
2240 /* setend */
2241 if (insn & (1 << 9)) {
2242 /* BE8 mode not implemented. */
2243 goto illegal_op;
2245 return;
2247 goto illegal_op;
2249 if (cond != 0xe) {
2250 /* if not always execute, we generate a conditional jump to
2251 next instruction */
2252 s->condlabel = gen_new_label();
2253 gen_test_cc[cond ^ 1](s->condlabel);
2254 s->condjmp = 1;
2255 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2256 //s->is_jmp = DISAS_JUMP_NEXT;
2258 if ((insn & 0x0f900000) == 0x03000000) {
2259 if ((insn & 0x0fb0f000) != 0x0320f000)
2260 goto illegal_op;
2261 /* CPSR = immediate */
2262 val = insn & 0xff;
2263 shift = ((insn >> 8) & 0xf) * 2;
2264 if (shift)
2265 val = (val >> shift) | (val << (32 - shift));
2266 gen_op_movl_T0_im(val);
2267 i = ((insn & (1 << 22)) != 0);
2268 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
2269 goto illegal_op;
2270 } else if ((insn & 0x0f900000) == 0x01000000
2271 && (insn & 0x00000090) != 0x00000090) {
2272 /* miscellaneous instructions */
2273 op1 = (insn >> 21) & 3;
2274 sh = (insn >> 4) & 0xf;
2275 rm = insn & 0xf;
2276 switch (sh) {
2277 case 0x0: /* move program status register */
2278 if (op1 & 1) {
2279 /* PSR = reg */
2280 gen_movl_T0_reg(s, rm);
2281 i = ((op1 & 2) != 0);
2282 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
2283 goto illegal_op;
2284 } else {
2285 /* reg = PSR */
2286 rd = (insn >> 12) & 0xf;
2287 if (op1 & 2) {
2288 if (IS_USER(s))
2289 goto illegal_op;
2290 gen_op_movl_T0_spsr();
2291 } else {
2292 gen_op_movl_T0_cpsr();
2294 gen_movl_reg_T0(s, rd);
2296 break;
2297 case 0x1:
2298 if (op1 == 1) {
2299 /* branch/exchange thumb (bx). */
2300 gen_movl_T0_reg(s, rm);
2301 gen_bx(s);
2302 } else if (op1 == 3) {
2303 /* clz */
2304 rd = (insn >> 12) & 0xf;
2305 gen_movl_T0_reg(s, rm);
2306 gen_op_clz_T0();
2307 gen_movl_reg_T0(s, rd);
2308 } else {
2309 goto illegal_op;
2311 break;
2312 case 0x2:
2313 if (op1 == 1) {
2314 ARCH(5J); /* bxj */
2315 /* Trivial implementation equivalent to bx. */
2316 gen_movl_T0_reg(s, rm);
2317 gen_bx(s);
2318 } else {
2319 goto illegal_op;
2321 break;
2322 case 0x3:
2323 if (op1 != 1)
2324 goto illegal_op;
2326 /* branch link/exchange thumb (blx) */
2327 val = (uint32_t)s->pc;
2328 gen_op_movl_T0_im(val);
2329 gen_movl_reg_T0(s, 14);
2330 gen_movl_T0_reg(s, rm);
2331 gen_bx(s);
2332 break;
2333 case 0x5: /* saturating add/subtract */
2334 rd = (insn >> 12) & 0xf;
2335 rn = (insn >> 16) & 0xf;
2336 gen_movl_T0_reg(s, rm);
2337 gen_movl_T1_reg(s, rn);
2338 if (op1 & 2)
2339 gen_op_double_T1_saturate();
2340 if (op1 & 1)
2341 gen_op_subl_T0_T1_saturate();
2342 else
2343 gen_op_addl_T0_T1_saturate();
2344 gen_movl_reg_T0(s, rd);
2345 break;
2346 case 7: /* bkpt */
2347 gen_op_movl_T0_im((long)s->pc - 4);
2348 gen_op_movl_reg_TN[0][15]();
2349 gen_op_bkpt();
2350 s->is_jmp = DISAS_JUMP;
2351 break;
2352 case 0x8: /* signed multiply */
2353 case 0xa:
2354 case 0xc:
2355 case 0xe:
2356 rs = (insn >> 8) & 0xf;
2357 rn = (insn >> 12) & 0xf;
2358 rd = (insn >> 16) & 0xf;
2359 if (op1 == 1) {
2360 /* (32 * 16) >> 16 */
2361 gen_movl_T0_reg(s, rm);
2362 gen_movl_T1_reg(s, rs);
2363 if (sh & 4)
2364 gen_op_sarl_T1_im(16);
2365 else
2366 gen_op_sxth_T1();
2367 gen_op_imulw_T0_T1();
2368 if ((sh & 2) == 0) {
2369 gen_movl_T1_reg(s, rn);
2370 gen_op_addl_T0_T1_setq();
2372 gen_movl_reg_T0(s, rd);
2373 } else {
2374 /* 16 * 16 */
2375 gen_movl_T0_reg(s, rm);
2376 gen_movl_T1_reg(s, rs);
2377 gen_mulxy(sh & 2, sh & 4);
2378 if (op1 == 2) {
2379 gen_op_signbit_T1_T0();
2380 gen_op_addq_T0_T1(rn, rd);
2381 gen_movl_reg_T0(s, rn);
2382 gen_movl_reg_T1(s, rd);
2383 } else {
2384 if (op1 == 0) {
2385 gen_movl_T1_reg(s, rn);
2386 gen_op_addl_T0_T1_setq();
2388 gen_movl_reg_T0(s, rd);
2391 break;
2392 default:
2393 goto illegal_op;
2395 } else if (((insn & 0x0e000000) == 0 &&
2396 (insn & 0x00000090) != 0x90) ||
2397 ((insn & 0x0e000000) == (1 << 25))) {
2398 int set_cc, logic_cc, shiftop;
2400 op1 = (insn >> 21) & 0xf;
2401 set_cc = (insn >> 20) & 1;
2402 logic_cc = table_logic_cc[op1] & set_cc;
2404 /* data processing instruction */
2405 if (insn & (1 << 25)) {
2406 /* immediate operand */
2407 val = insn & 0xff;
2408 shift = ((insn >> 8) & 0xf) * 2;
2409 if (shift)
2410 val = (val >> shift) | (val << (32 - shift));
2411 gen_op_movl_T1_im(val);
2412 if (logic_cc && shift)
2413 gen_op_mov_CF_T1();
2414 } else {
2415 /* register */
2416 rm = (insn) & 0xf;
2417 gen_movl_T1_reg(s, rm);
2418 shiftop = (insn >> 5) & 3;
2419 if (!(insn & (1 << 4))) {
2420 shift = (insn >> 7) & 0x1f;
2421 if (shift != 0) {
2422 if (logic_cc) {
2423 gen_shift_T1_im_cc[shiftop](shift);
2424 } else {
2425 gen_shift_T1_im[shiftop](shift);
2427 } else if (shiftop != 0) {
2428 if (logic_cc) {
2429 gen_shift_T1_0_cc[shiftop]();
2430 } else {
2431 gen_shift_T1_0[shiftop]();
2434 } else {
2435 rs = (insn >> 8) & 0xf;
2436 gen_movl_T0_reg(s, rs);
2437 if (logic_cc) {
2438 gen_shift_T1_T0_cc[shiftop]();
2439 } else {
2440 gen_shift_T1_T0[shiftop]();
2444 if (op1 != 0x0f && op1 != 0x0d) {
2445 rn = (insn >> 16) & 0xf;
2446 gen_movl_T0_reg(s, rn);
2448 rd = (insn >> 12) & 0xf;
2449 switch(op1) {
2450 case 0x00:
2451 gen_op_andl_T0_T1();
2452 gen_movl_reg_T0(s, rd);
2453 if (logic_cc)
2454 gen_op_logic_T0_cc();
2455 break;
2456 case 0x01:
2457 gen_op_xorl_T0_T1();
2458 gen_movl_reg_T0(s, rd);
2459 if (logic_cc)
2460 gen_op_logic_T0_cc();
2461 break;
2462 case 0x02:
2463 if (set_cc && rd == 15) {
2464 /* SUBS r15, ... is used for exception return. */
2465 if (IS_USER(s))
2466 goto illegal_op;
2467 gen_op_subl_T0_T1_cc();
2468 gen_exception_return(s);
2469 } else {
2470 if (set_cc)
2471 gen_op_subl_T0_T1_cc();
2472 else
2473 gen_op_subl_T0_T1();
2474 gen_movl_reg_T0(s, rd);
2476 break;
2477 case 0x03:
2478 if (set_cc)
2479 gen_op_rsbl_T0_T1_cc();
2480 else
2481 gen_op_rsbl_T0_T1();
2482 gen_movl_reg_T0(s, rd);
2483 break;
2484 case 0x04:
2485 if (set_cc)
2486 gen_op_addl_T0_T1_cc();
2487 else
2488 gen_op_addl_T0_T1();
2489 gen_movl_reg_T0(s, rd);
2490 break;
2491 case 0x05:
2492 if (set_cc)
2493 gen_op_adcl_T0_T1_cc();
2494 else
2495 gen_op_adcl_T0_T1();
2496 gen_movl_reg_T0(s, rd);
2497 break;
2498 case 0x06:
2499 if (set_cc)
2500 gen_op_sbcl_T0_T1_cc();
2501 else
2502 gen_op_sbcl_T0_T1();
2503 gen_movl_reg_T0(s, rd);
2504 break;
2505 case 0x07:
2506 if (set_cc)
2507 gen_op_rscl_T0_T1_cc();
2508 else
2509 gen_op_rscl_T0_T1();
2510 gen_movl_reg_T0(s, rd);
2511 break;
2512 case 0x08:
2513 if (set_cc) {
2514 gen_op_andl_T0_T1();
2515 gen_op_logic_T0_cc();
2517 break;
2518 case 0x09:
2519 if (set_cc) {
2520 gen_op_xorl_T0_T1();
2521 gen_op_logic_T0_cc();
2523 break;
2524 case 0x0a:
2525 if (set_cc) {
2526 gen_op_subl_T0_T1_cc();
2528 break;
2529 case 0x0b:
2530 if (set_cc) {
2531 gen_op_addl_T0_T1_cc();
2533 break;
2534 case 0x0c:
2535 gen_op_orl_T0_T1();
2536 gen_movl_reg_T0(s, rd);
2537 if (logic_cc)
2538 gen_op_logic_T0_cc();
2539 break;
2540 case 0x0d:
2541 if (logic_cc && rd == 15) {
2542 /* MOVS r15, ... is used for exception return. */
2543 if (IS_USER(s))
2544 goto illegal_op;
2545 gen_op_movl_T0_T1();
2546 gen_exception_return(s);
2547 } else {
2548 gen_movl_reg_T1(s, rd);
2549 if (logic_cc)
2550 gen_op_logic_T1_cc();
2552 break;
2553 case 0x0e:
2554 gen_op_bicl_T0_T1();
2555 gen_movl_reg_T0(s, rd);
2556 if (logic_cc)
2557 gen_op_logic_T0_cc();
2558 break;
2559 default:
2560 case 0x0f:
2561 gen_op_notl_T1();
2562 gen_movl_reg_T1(s, rd);
2563 if (logic_cc)
2564 gen_op_logic_T1_cc();
2565 break;
2567 } else {
2568 /* other instructions */
2569 op1 = (insn >> 24) & 0xf;
2570 switch(op1) {
2571 case 0x0:
2572 case 0x1:
2573 /* multiplies, extra load/stores */
2574 sh = (insn >> 5) & 3;
2575 if (sh == 0) {
2576 if (op1 == 0x0) {
2577 rd = (insn >> 16) & 0xf;
2578 rn = (insn >> 12) & 0xf;
2579 rs = (insn >> 8) & 0xf;
2580 rm = (insn) & 0xf;
2581 if (((insn >> 22) & 3) == 0) {
2582 /* 32 bit mul */
2583 gen_movl_T0_reg(s, rs);
2584 gen_movl_T1_reg(s, rm);
2585 gen_op_mul_T0_T1();
2586 if (insn & (1 << 21)) {
2587 gen_movl_T1_reg(s, rn);
2588 gen_op_addl_T0_T1();
2590 if (insn & (1 << 20))
2591 gen_op_logic_T0_cc();
2592 gen_movl_reg_T0(s, rd);
2593 } else {
2594 /* 64 bit mul */
2595 gen_movl_T0_reg(s, rs);
2596 gen_movl_T1_reg(s, rm);
2597 if (insn & (1 << 22))
2598 gen_op_imull_T0_T1();
2599 else
2600 gen_op_mull_T0_T1();
2601 if (insn & (1 << 21)) /* mult accumulate */
2602 gen_op_addq_T0_T1(rn, rd);
2603 if (!(insn & (1 << 23))) { /* double accumulate */
2604 ARCH(6);
2605 gen_op_addq_lo_T0_T1(rn);
2606 gen_op_addq_lo_T0_T1(rd);
2608 if (insn & (1 << 20))
2609 gen_op_logicq_cc();
2610 gen_movl_reg_T0(s, rn);
2611 gen_movl_reg_T1(s, rd);
2613 } else {
2614 rn = (insn >> 16) & 0xf;
2615 rd = (insn >> 12) & 0xf;
2616 if (insn & (1 << 23)) {
2617 /* load/store exclusive */
2618 goto illegal_op;
2619 } else {
2620 /* SWP instruction */
2621 rm = (insn) & 0xf;
2623 gen_movl_T0_reg(s, rm);
2624 gen_movl_T1_reg(s, rn);
2625 if (insn & (1 << 22)) {
2626 gen_ldst(swpb, s);
2627 } else {
2628 gen_ldst(swpl, s);
2630 gen_movl_reg_T0(s, rd);
2633 } else {
2634 int address_offset;
2635 int load;
2636 /* Misc load/store */
2637 rn = (insn >> 16) & 0xf;
2638 rd = (insn >> 12) & 0xf;
2639 gen_movl_T1_reg(s, rn);
2640 if (insn & (1 << 24))
2641 gen_add_datah_offset(s, insn, 0);
2642 address_offset = 0;
2643 if (insn & (1 << 20)) {
2644 /* load */
2645 switch(sh) {
2646 case 1:
2647 gen_ldst(lduw, s);
2648 break;
2649 case 2:
2650 gen_ldst(ldsb, s);
2651 break;
2652 default:
2653 case 3:
2654 gen_ldst(ldsw, s);
2655 break;
2657 load = 1;
2658 } else if (sh & 2) {
2659 /* doubleword */
2660 if (sh & 1) {
2661 /* store */
2662 gen_movl_T0_reg(s, rd);
2663 gen_ldst(stl, s);
2664 gen_op_addl_T1_im(4);
2665 gen_movl_T0_reg(s, rd + 1);
2666 gen_ldst(stl, s);
2667 load = 0;
2668 } else {
2669 /* load */
2670 gen_ldst(ldl, s);
2671 gen_movl_reg_T0(s, rd);
2672 gen_op_addl_T1_im(4);
2673 gen_ldst(ldl, s);
2674 rd++;
2675 load = 1;
2677 address_offset = -4;
2678 } else {
2679 /* store */
2680 gen_movl_T0_reg(s, rd);
2681 gen_ldst(stw, s);
2682 load = 0;
2684 /* Perform base writeback before the loaded value to
2685 ensure correct behavior with overlapping index registers.
2686 ldrd with base writeback is is undefined if the
2687 destination and index registers overlap. */
2688 if (!(insn & (1 << 24))) {
2689 gen_add_datah_offset(s, insn, address_offset);
2690 gen_movl_reg_T1(s, rn);
2691 } else if (insn & (1 << 21)) {
2692 if (address_offset)
2693 gen_op_addl_T1_im(address_offset);
2694 gen_movl_reg_T1(s, rn);
2696 if (load) {
2697 /* Complete the load. */
2698 gen_movl_reg_T0(s, rd);
2701 break;
2702 case 0x4:
2703 case 0x5:
2704 case 0x6:
2705 case 0x7:
2706 /* Check for undefined extension instructions
2707 * per the ARM Bible IE:
2708 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
2710 sh = (0xf << 20) | (0xf << 4);
2711 if (op1 == 0x7 && ((insn & sh) == sh))
2713 goto illegal_op;
2715 /* load/store byte/word */
2716 rn = (insn >> 16) & 0xf;
2717 rd = (insn >> 12) & 0xf;
2718 gen_movl_T1_reg(s, rn);
2719 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
2720 if (insn & (1 << 24))
2721 gen_add_data_offset(s, insn);
2722 if (insn & (1 << 20)) {
2723 /* load */
2724 s->is_mem = 1;
2725 #if defined(CONFIG_USER_ONLY)
2726 if (insn & (1 << 22))
2727 gen_op_ldub_raw();
2728 else
2729 gen_op_ldl_raw();
2730 #else
2731 if (insn & (1 << 22)) {
2732 if (i)
2733 gen_op_ldub_user();
2734 else
2735 gen_op_ldub_kernel();
2736 } else {
2737 if (i)
2738 gen_op_ldl_user();
2739 else
2740 gen_op_ldl_kernel();
2742 #endif
2743 } else {
2744 /* store */
2745 gen_movl_T0_reg(s, rd);
2746 #if defined(CONFIG_USER_ONLY)
2747 if (insn & (1 << 22))
2748 gen_op_stb_raw();
2749 else
2750 gen_op_stl_raw();
2751 #else
2752 if (insn & (1 << 22)) {
2753 if (i)
2754 gen_op_stb_user();
2755 else
2756 gen_op_stb_kernel();
2757 } else {
2758 if (i)
2759 gen_op_stl_user();
2760 else
2761 gen_op_stl_kernel();
2763 #endif
2765 if (!(insn & (1 << 24))) {
2766 gen_add_data_offset(s, insn);
2767 gen_movl_reg_T1(s, rn);
2768 } else if (insn & (1 << 21))
2769 gen_movl_reg_T1(s, rn); {
2771 if (insn & (1 << 20)) {
2772 /* Complete the load. */
2773 if (rd == 15)
2774 gen_bx(s);
2775 else
2776 gen_movl_reg_T0(s, rd);
2778 break;
2779 case 0x08:
2780 case 0x09:
2782 int j, n, user, loaded_base;
2783 /* load/store multiple words */
2784 /* XXX: store correct base if write back */
2785 user = 0;
2786 if (insn & (1 << 22)) {
2787 if (IS_USER(s))
2788 goto illegal_op; /* only usable in supervisor mode */
2790 if ((insn & (1 << 15)) == 0)
2791 user = 1;
2793 rn = (insn >> 16) & 0xf;
2794 gen_movl_T1_reg(s, rn);
2796 /* compute total size */
2797 loaded_base = 0;
2798 n = 0;
2799 for(i=0;i<16;i++) {
2800 if (insn & (1 << i))
2801 n++;
2803 /* XXX: test invalid n == 0 case ? */
2804 if (insn & (1 << 23)) {
2805 if (insn & (1 << 24)) {
2806 /* pre increment */
2807 gen_op_addl_T1_im(4);
2808 } else {
2809 /* post increment */
2811 } else {
2812 if (insn & (1 << 24)) {
2813 /* pre decrement */
2814 gen_op_addl_T1_im(-(n * 4));
2815 } else {
2816 /* post decrement */
2817 if (n != 1)
2818 gen_op_addl_T1_im(-((n - 1) * 4));
2821 j = 0;
2822 for(i=0;i<16;i++) {
2823 if (insn & (1 << i)) {
2824 if (insn & (1 << 20)) {
2825 /* load */
2826 gen_ldst(ldl, s);
2827 if (i == 15) {
2828 gen_bx(s);
2829 } else if (user) {
2830 gen_op_movl_user_T0(i);
2831 } else if (i == rn) {
2832 gen_op_movl_T2_T0();
2833 loaded_base = 1;
2834 } else {
2835 gen_movl_reg_T0(s, i);
2837 } else {
2838 /* store */
2839 if (i == 15) {
2840 /* special case: r15 = PC + 12 */
2841 val = (long)s->pc + 8;
2842 gen_op_movl_TN_im[0](val);
2843 } else if (user) {
2844 gen_op_movl_T0_user(i);
2845 } else {
2846 gen_movl_T0_reg(s, i);
2848 gen_ldst(stl, s);
2850 j++;
2851 /* no need to add after the last transfer */
2852 if (j != n)
2853 gen_op_addl_T1_im(4);
2856 if (insn & (1 << 21)) {
2857 /* write back */
2858 if (insn & (1 << 23)) {
2859 if (insn & (1 << 24)) {
2860 /* pre increment */
2861 } else {
2862 /* post increment */
2863 gen_op_addl_T1_im(4);
2865 } else {
2866 if (insn & (1 << 24)) {
2867 /* pre decrement */
2868 if (n != 1)
2869 gen_op_addl_T1_im(-((n - 1) * 4));
2870 } else {
2871 /* post decrement */
2872 gen_op_addl_T1_im(-(n * 4));
2875 gen_movl_reg_T1(s, rn);
2877 if (loaded_base) {
2878 gen_op_movl_T0_T2();
2879 gen_movl_reg_T0(s, rn);
2881 if ((insn & (1 << 22)) && !user) {
2882 /* Restore CPSR from SPSR. */
2883 gen_op_movl_T0_spsr();
2884 gen_op_movl_cpsr_T0(0xffffffff);
2885 s->is_jmp = DISAS_UPDATE;
2888 break;
2889 case 0xa:
2890 case 0xb:
2892 int32_t offset;
2894 /* branch (and link) */
2895 val = (int32_t)s->pc;
2896 if (insn & (1 << 24)) {
2897 gen_op_movl_T0_im(val);
2898 gen_op_movl_reg_TN[0][14]();
2900 offset = (((int32_t)insn << 8) >> 8);
2901 val += (offset << 2) + 4;
2902 gen_jmp(s, val);
2904 break;
2905 case 0xc:
2906 case 0xd:
2907 case 0xe:
2908 /* Coprocessor. */
2909 op1 = (insn >> 8) & 0xf;
2910 if (arm_feature(env, ARM_FEATURE_XSCALE) &&
2911 ((env->cp15.c15_cpar ^ 0x3fff) & (1 << op1)))
2912 goto illegal_op;
2913 switch (op1) {
2914 case 0 ... 1:
2915 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2916 if (disas_iwmmxt_insn(env, s, insn))
2917 goto illegal_op;
2918 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2919 if (disas_dsp_insn(env, s, insn))
2920 goto illegal_op;
2921 } else
2922 goto illegal_op;
2923 break;
2924 case 2 ... 9:
2925 case 12 ... 14:
2926 if (disas_cp_insn (env, s, insn))
2927 goto illegal_op;
2928 break;
2929 case 10:
2930 case 11:
2931 if (disas_vfp_insn (env, s, insn))
2932 goto illegal_op;
2933 break;
2934 case 15:
2935 if (disas_cp15_insn (env, s, insn))
2936 goto illegal_op;
2937 break;
2938 default:
2939 /* unknown coprocessor. */
2940 goto illegal_op;
2942 break;
2943 case 0xf:
2944 /* swi */
2945 gen_op_movl_T0_im((long)s->pc);
2946 gen_op_movl_reg_TN[0][15]();
2947 gen_op_swi();
2948 s->is_jmp = DISAS_JUMP;
2949 break;
2950 default:
2951 illegal_op:
2952 gen_op_movl_T0_im((long)s->pc - 4);
2953 gen_op_movl_reg_TN[0][15]();
2954 gen_op_undef_insn();
2955 s->is_jmp = DISAS_JUMP;
2956 break;
2961 static void disas_thumb_insn(DisasContext *s)
2963 uint32_t val, insn, op, rm, rn, rd, shift, cond;
2964 int32_t offset;
2965 int i;
2967 insn = lduw_code(s->pc);
2968 s->pc += 2;
2970 switch (insn >> 12) {
2971 case 0: case 1:
2972 rd = insn & 7;
2973 op = (insn >> 11) & 3;
2974 if (op == 3) {
2975 /* add/subtract */
2976 rn = (insn >> 3) & 7;
2977 gen_movl_T0_reg(s, rn);
2978 if (insn & (1 << 10)) {
2979 /* immediate */
2980 gen_op_movl_T1_im((insn >> 6) & 7);
2981 } else {
2982 /* reg */
2983 rm = (insn >> 6) & 7;
2984 gen_movl_T1_reg(s, rm);
2986 if (insn & (1 << 9))
2987 gen_op_subl_T0_T1_cc();
2988 else
2989 gen_op_addl_T0_T1_cc();
2990 gen_movl_reg_T0(s, rd);
2991 } else {
2992 /* shift immediate */
2993 rm = (insn >> 3) & 7;
2994 shift = (insn >> 6) & 0x1f;
2995 gen_movl_T0_reg(s, rm);
2996 gen_shift_T0_im_thumb[op](shift);
2997 gen_movl_reg_T0(s, rd);
2999 break;
3000 case 2: case 3:
3001 /* arithmetic large immediate */
3002 op = (insn >> 11) & 3;
3003 rd = (insn >> 8) & 0x7;
3004 if (op == 0) {
3005 gen_op_movl_T0_im(insn & 0xff);
3006 } else {
3007 gen_movl_T0_reg(s, rd);
3008 gen_op_movl_T1_im(insn & 0xff);
3010 switch (op) {
3011 case 0: /* mov */
3012 gen_op_logic_T0_cc();
3013 break;
3014 case 1: /* cmp */
3015 gen_op_subl_T0_T1_cc();
3016 break;
3017 case 2: /* add */
3018 gen_op_addl_T0_T1_cc();
3019 break;
3020 case 3: /* sub */
3021 gen_op_subl_T0_T1_cc();
3022 break;
3024 if (op != 1)
3025 gen_movl_reg_T0(s, rd);
3026 break;
3027 case 4:
3028 if (insn & (1 << 11)) {
3029 rd = (insn >> 8) & 7;
3030 /* load pc-relative. Bit 1 of PC is ignored. */
3031 val = s->pc + 2 + ((insn & 0xff) * 4);
3032 val &= ~(uint32_t)2;
3033 gen_op_movl_T1_im(val);
3034 gen_ldst(ldl, s);
3035 gen_movl_reg_T0(s, rd);
3036 break;
3038 if (insn & (1 << 10)) {
3039 /* data processing extended or blx */
3040 rd = (insn & 7) | ((insn >> 4) & 8);
3041 rm = (insn >> 3) & 0xf;
3042 op = (insn >> 8) & 3;
3043 switch (op) {
3044 case 0: /* add */
3045 gen_movl_T0_reg(s, rd);
3046 gen_movl_T1_reg(s, rm);
3047 gen_op_addl_T0_T1();
3048 gen_movl_reg_T0(s, rd);
3049 break;
3050 case 1: /* cmp */
3051 gen_movl_T0_reg(s, rd);
3052 gen_movl_T1_reg(s, rm);
3053 gen_op_subl_T0_T1_cc();
3054 break;
3055 case 2: /* mov/cpy */
3056 gen_movl_T0_reg(s, rm);
3057 gen_movl_reg_T0(s, rd);
3058 break;
3059 case 3:/* branch [and link] exchange thumb register */
3060 if (insn & (1 << 7)) {
3061 val = (uint32_t)s->pc | 1;
3062 gen_op_movl_T1_im(val);
3063 gen_movl_reg_T1(s, 14);
3065 gen_movl_T0_reg(s, rm);
3066 gen_bx(s);
3067 break;
3069 break;
3072 /* data processing register */
3073 rd = insn & 7;
3074 rm = (insn >> 3) & 7;
3075 op = (insn >> 6) & 0xf;
3076 if (op == 2 || op == 3 || op == 4 || op == 7) {
3077 /* the shift/rotate ops want the operands backwards */
3078 val = rm;
3079 rm = rd;
3080 rd = val;
3081 val = 1;
3082 } else {
3083 val = 0;
3086 if (op == 9) /* neg */
3087 gen_op_movl_T0_im(0);
3088 else if (op != 0xf) /* mvn doesn't read its first operand */
3089 gen_movl_T0_reg(s, rd);
3091 gen_movl_T1_reg(s, rm);
3092 switch (op) {
3093 case 0x0: /* and */
3094 gen_op_andl_T0_T1();
3095 gen_op_logic_T0_cc();
3096 break;
3097 case 0x1: /* eor */
3098 gen_op_xorl_T0_T1();
3099 gen_op_logic_T0_cc();
3100 break;
3101 case 0x2: /* lsl */
3102 gen_op_shll_T1_T0_cc();
3103 gen_op_logic_T1_cc();
3104 break;
3105 case 0x3: /* lsr */
3106 gen_op_shrl_T1_T0_cc();
3107 gen_op_logic_T1_cc();
3108 break;
3109 case 0x4: /* asr */
3110 gen_op_sarl_T1_T0_cc();
3111 gen_op_logic_T1_cc();
3112 break;
3113 case 0x5: /* adc */
3114 gen_op_adcl_T0_T1_cc();
3115 break;
3116 case 0x6: /* sbc */
3117 gen_op_sbcl_T0_T1_cc();
3118 break;
3119 case 0x7: /* ror */
3120 gen_op_rorl_T1_T0_cc();
3121 gen_op_logic_T1_cc();
3122 break;
3123 case 0x8: /* tst */
3124 gen_op_andl_T0_T1();
3125 gen_op_logic_T0_cc();
3126 rd = 16;
3127 break;
3128 case 0x9: /* neg */
3129 gen_op_subl_T0_T1_cc();
3130 break;
3131 case 0xa: /* cmp */
3132 gen_op_subl_T0_T1_cc();
3133 rd = 16;
3134 break;
3135 case 0xb: /* cmn */
3136 gen_op_addl_T0_T1_cc();
3137 rd = 16;
3138 break;
3139 case 0xc: /* orr */
3140 gen_op_orl_T0_T1();
3141 gen_op_logic_T0_cc();
3142 break;
3143 case 0xd: /* mul */
3144 gen_op_mull_T0_T1();
3145 gen_op_logic_T0_cc();
3146 break;
3147 case 0xe: /* bic */
3148 gen_op_bicl_T0_T1();
3149 gen_op_logic_T0_cc();
3150 break;
3151 case 0xf: /* mvn */
3152 gen_op_notl_T1();
3153 gen_op_logic_T1_cc();
3154 val = 1;
3155 rm = rd;
3156 break;
3158 if (rd != 16) {
3159 if (val)
3160 gen_movl_reg_T1(s, rm);
3161 else
3162 gen_movl_reg_T0(s, rd);
3164 break;
3166 case 5:
3167 /* load/store register offset. */
3168 rd = insn & 7;
3169 rn = (insn >> 3) & 7;
3170 rm = (insn >> 6) & 7;
3171 op = (insn >> 9) & 7;
3172 gen_movl_T1_reg(s, rn);
3173 gen_movl_T2_reg(s, rm);
3174 gen_op_addl_T1_T2();
3176 if (op < 3) /* store */
3177 gen_movl_T0_reg(s, rd);
3179 switch (op) {
3180 case 0: /* str */
3181 gen_ldst(stl, s);
3182 break;
3183 case 1: /* strh */
3184 gen_ldst(stw, s);
3185 break;
3186 case 2: /* strb */
3187 gen_ldst(stb, s);
3188 break;
3189 case 3: /* ldrsb */
3190 gen_ldst(ldsb, s);
3191 break;
3192 case 4: /* ldr */
3193 gen_ldst(ldl, s);
3194 break;
3195 case 5: /* ldrh */
3196 gen_ldst(lduw, s);
3197 break;
3198 case 6: /* ldrb */
3199 gen_ldst(ldub, s);
3200 break;
3201 case 7: /* ldrsh */
3202 gen_ldst(ldsw, s);
3203 break;
3205 if (op >= 3) /* load */
3206 gen_movl_reg_T0(s, rd);
3207 break;
3209 case 6:
3210 /* load/store word immediate offset */
3211 rd = insn & 7;
3212 rn = (insn >> 3) & 7;
3213 gen_movl_T1_reg(s, rn);
3214 val = (insn >> 4) & 0x7c;
3215 gen_op_movl_T2_im(val);
3216 gen_op_addl_T1_T2();
3218 if (insn & (1 << 11)) {
3219 /* load */
3220 gen_ldst(ldl, s);
3221 gen_movl_reg_T0(s, rd);
3222 } else {
3223 /* store */
3224 gen_movl_T0_reg(s, rd);
3225 gen_ldst(stl, s);
3227 break;
3229 case 7:
3230 /* load/store byte immediate offset */
3231 rd = insn & 7;
3232 rn = (insn >> 3) & 7;
3233 gen_movl_T1_reg(s, rn);
3234 val = (insn >> 6) & 0x1f;
3235 gen_op_movl_T2_im(val);
3236 gen_op_addl_T1_T2();
3238 if (insn & (1 << 11)) {
3239 /* load */
3240 gen_ldst(ldub, s);
3241 gen_movl_reg_T0(s, rd);
3242 } else {
3243 /* store */
3244 gen_movl_T0_reg(s, rd);
3245 gen_ldst(stb, s);
3247 break;
3249 case 8:
3250 /* load/store halfword immediate offset */
3251 rd = insn & 7;
3252 rn = (insn >> 3) & 7;
3253 gen_movl_T1_reg(s, rn);
3254 val = (insn >> 5) & 0x3e;
3255 gen_op_movl_T2_im(val);
3256 gen_op_addl_T1_T2();
3258 if (insn & (1 << 11)) {
3259 /* load */
3260 gen_ldst(lduw, s);
3261 gen_movl_reg_T0(s, rd);
3262 } else {
3263 /* store */
3264 gen_movl_T0_reg(s, rd);
3265 gen_ldst(stw, s);
3267 break;
3269 case 9:
3270 /* load/store from stack */
3271 rd = (insn >> 8) & 7;
3272 gen_movl_T1_reg(s, 13);
3273 val = (insn & 0xff) * 4;
3274 gen_op_movl_T2_im(val);
3275 gen_op_addl_T1_T2();
3277 if (insn & (1 << 11)) {
3278 /* load */
3279 gen_ldst(ldl, s);
3280 gen_movl_reg_T0(s, rd);
3281 } else {
3282 /* store */
3283 gen_movl_T0_reg(s, rd);
3284 gen_ldst(stl, s);
3286 break;
3288 case 10:
3289 /* add to high reg */
3290 rd = (insn >> 8) & 7;
3291 if (insn & (1 << 11)) {
3292 /* SP */
3293 gen_movl_T0_reg(s, 13);
3294 } else {
3295 /* PC. bit 1 is ignored. */
3296 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
3298 val = (insn & 0xff) * 4;
3299 gen_op_movl_T1_im(val);
3300 gen_op_addl_T0_T1();
3301 gen_movl_reg_T0(s, rd);
3302 break;
3304 case 11:
3305 /* misc */
3306 op = (insn >> 8) & 0xf;
3307 switch (op) {
3308 case 0:
3309 /* adjust stack pointer */
3310 gen_movl_T1_reg(s, 13);
3311 val = (insn & 0x7f) * 4;
3312 if (insn & (1 << 7))
3313 val = -(int32_t)val;
3314 gen_op_movl_T2_im(val);
3315 gen_op_addl_T1_T2();
3316 gen_movl_reg_T1(s, 13);
3317 break;
3319 case 4: case 5: case 0xc: case 0xd:
3320 /* push/pop */
3321 gen_movl_T1_reg(s, 13);
3322 if (insn & (1 << 8))
3323 offset = 4;
3324 else
3325 offset = 0;
3326 for (i = 0; i < 8; i++) {
3327 if (insn & (1 << i))
3328 offset += 4;
3330 if ((insn & (1 << 11)) == 0) {
3331 gen_op_movl_T2_im(-offset);
3332 gen_op_addl_T1_T2();
3334 gen_op_movl_T2_im(4);
3335 for (i = 0; i < 8; i++) {
3336 if (insn & (1 << i)) {
3337 if (insn & (1 << 11)) {
3338 /* pop */
3339 gen_ldst(ldl, s);
3340 gen_movl_reg_T0(s, i);
3341 } else {
3342 /* push */
3343 gen_movl_T0_reg(s, i);
3344 gen_ldst(stl, s);
3346 /* advance to the next address. */
3347 gen_op_addl_T1_T2();
3350 if (insn & (1 << 8)) {
3351 if (insn & (1 << 11)) {
3352 /* pop pc */
3353 gen_ldst(ldl, s);
3354 /* don't set the pc until the rest of the instruction
3355 has completed */
3356 } else {
3357 /* push lr */
3358 gen_movl_T0_reg(s, 14);
3359 gen_ldst(stl, s);
3361 gen_op_addl_T1_T2();
3363 if ((insn & (1 << 11)) == 0) {
3364 gen_op_movl_T2_im(-offset);
3365 gen_op_addl_T1_T2();
3367 /* write back the new stack pointer */
3368 gen_movl_reg_T1(s, 13);
3369 /* set the new PC value */
3370 if ((insn & 0x0900) == 0x0900)
3371 gen_bx(s);
3372 break;
3374 case 0xe: /* bkpt */
3375 gen_op_movl_T0_im((long)s->pc - 2);
3376 gen_op_movl_reg_TN[0][15]();
3377 gen_op_bkpt();
3378 s->is_jmp = DISAS_JUMP;
3379 break;
3381 default:
3382 goto undef;
3384 break;
3386 case 12:
3387 /* load/store multiple */
3388 rn = (insn >> 8) & 0x7;
3389 gen_movl_T1_reg(s, rn);
3390 gen_op_movl_T2_im(4);
3391 for (i = 0; i < 8; i++) {
3392 if (insn & (1 << i)) {
3393 if (insn & (1 << 11)) {
3394 /* load */
3395 gen_ldst(ldl, s);
3396 gen_movl_reg_T0(s, i);
3397 } else {
3398 /* store */
3399 gen_movl_T0_reg(s, i);
3400 gen_ldst(stl, s);
3402 /* advance to the next address */
3403 gen_op_addl_T1_T2();
3406 /* Base register writeback. */
3407 if ((insn & (1 << rn)) == 0)
3408 gen_movl_reg_T1(s, rn);
3409 break;
3411 case 13:
3412 /* conditional branch or swi */
3413 cond = (insn >> 8) & 0xf;
3414 if (cond == 0xe)
3415 goto undef;
3417 if (cond == 0xf) {
3418 /* swi */
3419 gen_op_movl_T0_im((long)s->pc | 1);
3420 /* Don't set r15. */
3421 gen_op_movl_reg_TN[0][15]();
3422 gen_op_swi();
3423 s->is_jmp = DISAS_JUMP;
3424 break;
3426 /* generate a conditional jump to next instruction */
3427 s->condlabel = gen_new_label();
3428 gen_test_cc[cond ^ 1](s->condlabel);
3429 s->condjmp = 1;
3430 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
3431 //s->is_jmp = DISAS_JUMP_NEXT;
3432 gen_movl_T1_reg(s, 15);
3434 /* jump to the offset */
3435 val = (uint32_t)s->pc + 2;
3436 offset = ((int32_t)insn << 24) >> 24;
3437 val += offset << 1;
3438 gen_jmp(s, val);
3439 break;
3441 case 14:
3442 /* unconditional branch */
3443 if (insn & (1 << 11)) {
3444 /* Second half of blx. */
3445 offset = ((insn & 0x7ff) << 1);
3446 gen_movl_T0_reg(s, 14);
3447 gen_op_movl_T1_im(offset);
3448 gen_op_addl_T0_T1();
3449 gen_op_movl_T1_im(0xfffffffc);
3450 gen_op_andl_T0_T1();
3452 val = (uint32_t)s->pc;
3453 gen_op_movl_T1_im(val | 1);
3454 gen_movl_reg_T1(s, 14);
3455 gen_bx(s);
3456 break;
3458 val = (uint32_t)s->pc;
3459 offset = ((int32_t)insn << 21) >> 21;
3460 val += (offset << 1) + 2;
3461 gen_jmp(s, val);
3462 break;
3464 case 15:
3465 /* branch and link [and switch to arm] */
3466 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
3467 /* Instruction spans a page boundary. Implement it as two
3468 16-bit instructions in case the second half causes an
3469 prefetch abort. */
3470 offset = ((int32_t)insn << 21) >> 9;
3471 val = s->pc + 2 + offset;
3472 gen_op_movl_T0_im(val);
3473 gen_movl_reg_T0(s, 14);
3474 break;
3476 if (insn & (1 << 11)) {
3477 /* Second half of bl. */
3478 offset = ((insn & 0x7ff) << 1) | 1;
3479 gen_movl_T0_reg(s, 14);
3480 gen_op_movl_T1_im(offset);
3481 gen_op_addl_T0_T1();
3483 val = (uint32_t)s->pc;
3484 gen_op_movl_T1_im(val | 1);
3485 gen_movl_reg_T1(s, 14);
3486 gen_bx(s);
3487 break;
3489 offset = ((int32_t)insn << 21) >> 10;
3490 insn = lduw_code(s->pc);
3491 offset |= insn & 0x7ff;
3493 val = (uint32_t)s->pc + 2;
3494 gen_op_movl_T1_im(val | 1);
3495 gen_movl_reg_T1(s, 14);
3497 val += offset << 1;
3498 if (insn & (1 << 12)) {
3499 /* bl */
3500 gen_jmp(s, val);
3501 } else {
3502 /* blx */
3503 val &= ~(uint32_t)2;
3504 gen_op_movl_T0_im(val);
3505 gen_bx(s);
3508 return;
3509 undef:
3510 gen_op_movl_T0_im((long)s->pc - 2);
3511 gen_op_movl_reg_TN[0][15]();
3512 gen_op_undef_insn();
3513 s->is_jmp = DISAS_JUMP;
3516 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
3517 basic block 'tb'. If search_pc is TRUE, also generate PC
3518 information for each intermediate instruction. */
3519 static inline int gen_intermediate_code_internal(CPUState *env,
3520 TranslationBlock *tb,
3521 int search_pc)
3523 DisasContext dc1, *dc = &dc1;
3524 uint16_t *gen_opc_end;
3525 int j, lj;
3526 target_ulong pc_start;
3527 uint32_t next_page_start;
3529 /* generate intermediate code */
3530 pc_start = tb->pc;
3532 dc->tb = tb;
3534 gen_opc_ptr = gen_opc_buf;
3535 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3536 gen_opparam_ptr = gen_opparam_buf;
3538 dc->is_jmp = DISAS_NEXT;
3539 dc->pc = pc_start;
3540 dc->singlestep_enabled = env->singlestep_enabled;
3541 dc->condjmp = 0;
3542 dc->thumb = env->thumb;
3543 dc->is_mem = 0;
3544 #if !defined(CONFIG_USER_ONLY)
3545 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
3546 #endif
3547 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
3548 nb_gen_labels = 0;
3549 lj = -1;
3550 do {
3551 if (env->nb_breakpoints > 0) {
3552 for(j = 0; j < env->nb_breakpoints; j++) {
3553 if (env->breakpoints[j] == dc->pc) {
3554 gen_op_movl_T0_im((long)dc->pc);
3555 gen_op_movl_reg_TN[0][15]();
3556 gen_op_debug();
3557 dc->is_jmp = DISAS_JUMP;
3558 break;
3562 if (search_pc) {
3563 j = gen_opc_ptr - gen_opc_buf;
3564 if (lj < j) {
3565 lj++;
3566 while (lj < j)
3567 gen_opc_instr_start[lj++] = 0;
3569 gen_opc_pc[lj] = dc->pc;
3570 gen_opc_instr_start[lj] = 1;
3573 if (env->thumb)
3574 disas_thumb_insn(dc);
3575 else
3576 disas_arm_insn(env, dc);
3578 if (dc->condjmp && !dc->is_jmp) {
3579 gen_set_label(dc->condlabel);
3580 dc->condjmp = 0;
3582 /* Terminate the TB on memory ops if watchpoints are present. */
3583 /* FIXME: This should be replacd by the deterministic execution
3584 * IRQ raising bits. */
3585 if (dc->is_mem && env->nb_watchpoints)
3586 break;
3588 /* Translation stops when a conditional branch is enoutered.
3589 * Otherwise the subsequent code could get translated several times.
3590 * Also stop translation when a page boundary is reached. This
3591 * ensures prefech aborts occur at the right place. */
3592 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
3593 !env->singlestep_enabled &&
3594 dc->pc < next_page_start);
3595 /* At this stage dc->condjmp will only be set when the skipped
3596 * instruction was a conditional branch, and the PC has already been
3597 * written. */
3598 if (__builtin_expect(env->singlestep_enabled, 0)) {
3599 /* Make sure the pc is updated, and raise a debug exception. */
3600 if (dc->condjmp) {
3601 gen_op_debug();
3602 gen_set_label(dc->condlabel);
3604 if (dc->condjmp || !dc->is_jmp) {
3605 gen_op_movl_T0_im((long)dc->pc);
3606 gen_op_movl_reg_TN[0][15]();
3607 dc->condjmp = 0;
3609 gen_op_debug();
3610 } else {
3611 switch(dc->is_jmp) {
3612 case DISAS_NEXT:
3613 gen_goto_tb(dc, 1, dc->pc);
3614 break;
3615 default:
3616 case DISAS_JUMP:
3617 case DISAS_UPDATE:
3618 /* indicate that the hash table must be used to find the next TB */
3619 gen_op_movl_T0_0();
3620 gen_op_exit_tb();
3621 break;
3622 case DISAS_TB_JUMP:
3623 /* nothing more to generate */
3624 break;
3626 if (dc->condjmp) {
3627 gen_set_label(dc->condlabel);
3628 gen_goto_tb(dc, 1, dc->pc);
3629 dc->condjmp = 0;
3632 *gen_opc_ptr = INDEX_op_end;
3634 #ifdef DEBUG_DISAS
3635 if (loglevel & CPU_LOG_TB_IN_ASM) {
3636 fprintf(logfile, "----------------\n");
3637 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
3638 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
3639 fprintf(logfile, "\n");
3640 if (loglevel & (CPU_LOG_TB_OP)) {
3641 fprintf(logfile, "OP:\n");
3642 dump_ops(gen_opc_buf, gen_opparam_buf);
3643 fprintf(logfile, "\n");
3646 #endif
3647 if (search_pc) {
3648 j = gen_opc_ptr - gen_opc_buf;
3649 lj++;
3650 while (lj <= j)
3651 gen_opc_instr_start[lj++] = 0;
3652 tb->size = 0;
3653 } else {
3654 tb->size = dc->pc - pc_start;
3656 return 0;
3659 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
3661 return gen_intermediate_code_internal(env, tb, 0);
3664 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
3666 return gen_intermediate_code_internal(env, tb, 1);
3669 static const char *cpu_mode_names[16] = {
3670 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
3671 "???", "???", "???", "und", "???", "???", "???", "sys"
3673 void cpu_dump_state(CPUState *env, FILE *f,
3674 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
3675 int flags)
3677 int i;
3678 union {
3679 uint32_t i;
3680 float s;
3681 } s0, s1;
3682 CPU_DoubleU d;
3683 /* ??? This assumes float64 and double have the same layout.
3684 Oh well, it's only debug dumps. */
3685 union {
3686 float64 f64;
3687 double d;
3688 } d0;
3689 uint32_t psr;
3691 for(i=0;i<16;i++) {
3692 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
3693 if ((i % 4) == 3)
3694 cpu_fprintf(f, "\n");
3695 else
3696 cpu_fprintf(f, " ");
3698 psr = cpsr_read(env);
3699 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
3700 psr,
3701 psr & (1 << 31) ? 'N' : '-',
3702 psr & (1 << 30) ? 'Z' : '-',
3703 psr & (1 << 29) ? 'C' : '-',
3704 psr & (1 << 28) ? 'V' : '-',
3705 psr & CPSR_T ? 'T' : 'A',
3706 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
3708 for (i = 0; i < 16; i++) {
3709 d.d = env->vfp.regs[i];
3710 s0.i = d.l.lower;
3711 s1.i = d.l.upper;
3712 d0.f64 = d.d;
3713 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
3714 i * 2, (int)s0.i, s0.s,
3715 i * 2 + 1, (int)s1.i, s1.s,
3716 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
3717 d0.d);
3719 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);