Fix typo in comment, by Andreas Faerber.
[qemu/dscho.git] / target-arm / translate.c
blob799aef2f15b1d118b9b5e25a02caeac8ff66a690
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <stdarg.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <string.h>
26 #include <inttypes.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "disas.h"
32 #define ENABLE_ARCH_5J 0
33 #define ENABLE_ARCH_6 1
34 #define ENABLE_ARCH_6T2 1
36 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
38 /* internal defines */
39 typedef struct DisasContext {
40 target_ulong pc;
41 int is_jmp;
42 /* Nonzero if this instruction has been conditionally skipped. */
43 int condjmp;
44 /* The label that will be jumped to when the instruction is skipped. */
45 int condlabel;
46 struct TranslationBlock *tb;
47 int singlestep_enabled;
48 int thumb;
49 int is_mem;
50 #if !defined(CONFIG_USER_ONLY)
51 int user;
52 #endif
53 } DisasContext;
55 #if defined(CONFIG_USER_ONLY)
56 #define IS_USER(s) 1
57 #else
58 #define IS_USER(s) (s->user)
59 #endif
61 #define DISAS_JUMP_NEXT 4
63 #ifdef USE_DIRECT_JUMP
64 #define TBPARAM(x)
65 #else
66 #define TBPARAM(x) (long)(x)
67 #endif
69 /* XXX: move that elsewhere */
70 static uint16_t *gen_opc_ptr;
71 static uint32_t *gen_opparam_ptr;
72 extern FILE *logfile;
73 extern int loglevel;
75 enum {
76 #define DEF(s, n, copy_size) INDEX_op_ ## s,
77 #include "opc.h"
78 #undef DEF
79 NB_OPS,
82 #include "gen-op.h"
84 static GenOpFunc1 *gen_test_cc[14] = {
85 gen_op_test_eq,
86 gen_op_test_ne,
87 gen_op_test_cs,
88 gen_op_test_cc,
89 gen_op_test_mi,
90 gen_op_test_pl,
91 gen_op_test_vs,
92 gen_op_test_vc,
93 gen_op_test_hi,
94 gen_op_test_ls,
95 gen_op_test_ge,
96 gen_op_test_lt,
97 gen_op_test_gt,
98 gen_op_test_le,
101 const uint8_t table_logic_cc[16] = {
102 1, /* and */
103 1, /* xor */
104 0, /* sub */
105 0, /* rsb */
106 0, /* add */
107 0, /* adc */
108 0, /* sbc */
109 0, /* rsc */
110 1, /* andl */
111 1, /* xorl */
112 0, /* cmp */
113 0, /* cmn */
114 1, /* orr */
115 1, /* mov */
116 1, /* bic */
117 1, /* mvn */
120 static GenOpFunc1 *gen_shift_T1_im[4] = {
121 gen_op_shll_T1_im,
122 gen_op_shrl_T1_im,
123 gen_op_sarl_T1_im,
124 gen_op_rorl_T1_im,
127 static GenOpFunc *gen_shift_T1_0[4] = {
128 NULL,
129 gen_op_shrl_T1_0,
130 gen_op_sarl_T1_0,
131 gen_op_rrxl_T1,
134 static GenOpFunc1 *gen_shift_T2_im[4] = {
135 gen_op_shll_T2_im,
136 gen_op_shrl_T2_im,
137 gen_op_sarl_T2_im,
138 gen_op_rorl_T2_im,
141 static GenOpFunc *gen_shift_T2_0[4] = {
142 NULL,
143 gen_op_shrl_T2_0,
144 gen_op_sarl_T2_0,
145 gen_op_rrxl_T2,
148 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
149 gen_op_shll_T1_im_cc,
150 gen_op_shrl_T1_im_cc,
151 gen_op_sarl_T1_im_cc,
152 gen_op_rorl_T1_im_cc,
155 static GenOpFunc *gen_shift_T1_0_cc[4] = {
156 NULL,
157 gen_op_shrl_T1_0_cc,
158 gen_op_sarl_T1_0_cc,
159 gen_op_rrxl_T1_cc,
162 static GenOpFunc *gen_shift_T1_T0[4] = {
163 gen_op_shll_T1_T0,
164 gen_op_shrl_T1_T0,
165 gen_op_sarl_T1_T0,
166 gen_op_rorl_T1_T0,
169 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
170 gen_op_shll_T1_T0_cc,
171 gen_op_shrl_T1_T0_cc,
172 gen_op_sarl_T1_T0_cc,
173 gen_op_rorl_T1_T0_cc,
176 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
178 gen_op_movl_T0_r0,
179 gen_op_movl_T0_r1,
180 gen_op_movl_T0_r2,
181 gen_op_movl_T0_r3,
182 gen_op_movl_T0_r4,
183 gen_op_movl_T0_r5,
184 gen_op_movl_T0_r6,
185 gen_op_movl_T0_r7,
186 gen_op_movl_T0_r8,
187 gen_op_movl_T0_r9,
188 gen_op_movl_T0_r10,
189 gen_op_movl_T0_r11,
190 gen_op_movl_T0_r12,
191 gen_op_movl_T0_r13,
192 gen_op_movl_T0_r14,
193 gen_op_movl_T0_r15,
196 gen_op_movl_T1_r0,
197 gen_op_movl_T1_r1,
198 gen_op_movl_T1_r2,
199 gen_op_movl_T1_r3,
200 gen_op_movl_T1_r4,
201 gen_op_movl_T1_r5,
202 gen_op_movl_T1_r6,
203 gen_op_movl_T1_r7,
204 gen_op_movl_T1_r8,
205 gen_op_movl_T1_r9,
206 gen_op_movl_T1_r10,
207 gen_op_movl_T1_r11,
208 gen_op_movl_T1_r12,
209 gen_op_movl_T1_r13,
210 gen_op_movl_T1_r14,
211 gen_op_movl_T1_r15,
214 gen_op_movl_T2_r0,
215 gen_op_movl_T2_r1,
216 gen_op_movl_T2_r2,
217 gen_op_movl_T2_r3,
218 gen_op_movl_T2_r4,
219 gen_op_movl_T2_r5,
220 gen_op_movl_T2_r6,
221 gen_op_movl_T2_r7,
222 gen_op_movl_T2_r8,
223 gen_op_movl_T2_r9,
224 gen_op_movl_T2_r10,
225 gen_op_movl_T2_r11,
226 gen_op_movl_T2_r12,
227 gen_op_movl_T2_r13,
228 gen_op_movl_T2_r14,
229 gen_op_movl_T2_r15,
233 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
235 gen_op_movl_r0_T0,
236 gen_op_movl_r1_T0,
237 gen_op_movl_r2_T0,
238 gen_op_movl_r3_T0,
239 gen_op_movl_r4_T0,
240 gen_op_movl_r5_T0,
241 gen_op_movl_r6_T0,
242 gen_op_movl_r7_T0,
243 gen_op_movl_r8_T0,
244 gen_op_movl_r9_T0,
245 gen_op_movl_r10_T0,
246 gen_op_movl_r11_T0,
247 gen_op_movl_r12_T0,
248 gen_op_movl_r13_T0,
249 gen_op_movl_r14_T0,
250 gen_op_movl_r15_T0,
253 gen_op_movl_r0_T1,
254 gen_op_movl_r1_T1,
255 gen_op_movl_r2_T1,
256 gen_op_movl_r3_T1,
257 gen_op_movl_r4_T1,
258 gen_op_movl_r5_T1,
259 gen_op_movl_r6_T1,
260 gen_op_movl_r7_T1,
261 gen_op_movl_r8_T1,
262 gen_op_movl_r9_T1,
263 gen_op_movl_r10_T1,
264 gen_op_movl_r11_T1,
265 gen_op_movl_r12_T1,
266 gen_op_movl_r13_T1,
267 gen_op_movl_r14_T1,
268 gen_op_movl_r15_T1,
272 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
273 gen_op_movl_T0_im,
274 gen_op_movl_T1_im,
275 gen_op_movl_T2_im,
278 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
279 gen_op_shll_T0_im_thumb,
280 gen_op_shrl_T0_im_thumb,
281 gen_op_sarl_T0_im_thumb,
284 static inline void gen_bx(DisasContext *s)
286 s->is_jmp = DISAS_UPDATE;
287 gen_op_bx_T0();
291 #if defined(CONFIG_USER_ONLY)
292 #define gen_ldst(name, s) gen_op_##name##_raw()
293 #else
294 #define gen_ldst(name, s) do { \
295 s->is_mem = 1; \
296 if (IS_USER(s)) \
297 gen_op_##name##_user(); \
298 else \
299 gen_op_##name##_kernel(); \
300 } while (0)
301 #endif
303 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
305 int val;
307 if (reg == 15) {
308 /* normaly, since we updated PC, we need only to add one insn */
309 if (s->thumb)
310 val = (long)s->pc + 2;
311 else
312 val = (long)s->pc + 4;
313 gen_op_movl_TN_im[t](val);
314 } else {
315 gen_op_movl_TN_reg[t][reg]();
319 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
321 gen_movl_TN_reg(s, reg, 0);
324 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
326 gen_movl_TN_reg(s, reg, 1);
329 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
331 gen_movl_TN_reg(s, reg, 2);
334 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
336 gen_op_movl_reg_TN[t][reg]();
337 if (reg == 15) {
338 s->is_jmp = DISAS_JUMP;
342 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
344 gen_movl_reg_TN(s, reg, 0);
347 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
349 gen_movl_reg_TN(s, reg, 1);
352 /* Force a TB lookup after an instruction that changes the CPU state. */
353 static inline void gen_lookup_tb(DisasContext *s)
355 gen_op_movl_T0_im(s->pc);
356 gen_movl_reg_T0(s, 15);
357 s->is_jmp = DISAS_UPDATE;
360 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
362 int val, rm, shift, shiftop;
364 if (!(insn & (1 << 25))) {
365 /* immediate */
366 val = insn & 0xfff;
367 if (!(insn & (1 << 23)))
368 val = -val;
369 if (val != 0)
370 gen_op_addl_T1_im(val);
371 } else {
372 /* shift/register */
373 rm = (insn) & 0xf;
374 shift = (insn >> 7) & 0x1f;
375 gen_movl_T2_reg(s, rm);
376 shiftop = (insn >> 5) & 3;
377 if (shift != 0) {
378 gen_shift_T2_im[shiftop](shift);
379 } else if (shiftop != 0) {
380 gen_shift_T2_0[shiftop]();
382 if (!(insn & (1 << 23)))
383 gen_op_subl_T1_T2();
384 else
385 gen_op_addl_T1_T2();
389 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
390 int extra)
392 int val, rm;
394 if (insn & (1 << 22)) {
395 /* immediate */
396 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
397 if (!(insn & (1 << 23)))
398 val = -val;
399 val += extra;
400 if (val != 0)
401 gen_op_addl_T1_im(val);
402 } else {
403 /* register */
404 if (extra)
405 gen_op_addl_T1_im(extra);
406 rm = (insn) & 0xf;
407 gen_movl_T2_reg(s, rm);
408 if (!(insn & (1 << 23)))
409 gen_op_subl_T1_T2();
410 else
411 gen_op_addl_T1_T2();
415 #define VFP_OP(name) \
416 static inline void gen_vfp_##name(int dp) \
418 if (dp) \
419 gen_op_vfp_##name##d(); \
420 else \
421 gen_op_vfp_##name##s(); \
424 VFP_OP(add)
425 VFP_OP(sub)
426 VFP_OP(mul)
427 VFP_OP(div)
428 VFP_OP(neg)
429 VFP_OP(abs)
430 VFP_OP(sqrt)
431 VFP_OP(cmp)
432 VFP_OP(cmpe)
433 VFP_OP(F1_ld0)
434 VFP_OP(uito)
435 VFP_OP(sito)
436 VFP_OP(toui)
437 VFP_OP(touiz)
438 VFP_OP(tosi)
439 VFP_OP(tosiz)
441 #undef VFP_OP
443 static inline void gen_vfp_ld(DisasContext *s, int dp)
445 if (dp)
446 gen_ldst(vfp_ldd, s);
447 else
448 gen_ldst(vfp_lds, s);
451 static inline void gen_vfp_st(DisasContext *s, int dp)
453 if (dp)
454 gen_ldst(vfp_std, s);
455 else
456 gen_ldst(vfp_sts, s);
459 static inline long
460 vfp_reg_offset (int dp, int reg)
462 if (dp)
463 return offsetof(CPUARMState, vfp.regs[reg]);
464 else if (reg & 1) {
465 return offsetof(CPUARMState, vfp.regs[reg >> 1])
466 + offsetof(CPU_DoubleU, l.upper);
467 } else {
468 return offsetof(CPUARMState, vfp.regs[reg >> 1])
469 + offsetof(CPU_DoubleU, l.lower);
472 static inline void gen_mov_F0_vreg(int dp, int reg)
474 if (dp)
475 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
476 else
477 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
480 static inline void gen_mov_F1_vreg(int dp, int reg)
482 if (dp)
483 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
484 else
485 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
488 static inline void gen_mov_vreg_F0(int dp, int reg)
490 if (dp)
491 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
492 else
493 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
496 #define ARM_CP_RW_BIT (1 << 20)
498 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
500 int rd;
501 uint32_t offset;
503 rd = (insn >> 16) & 0xf;
504 gen_movl_T1_reg(s, rd);
506 offset = (insn & 0xff) << ((insn >> 7) & 2);
507 if (insn & (1 << 24)) {
508 /* Pre indexed */
509 if (insn & (1 << 23))
510 gen_op_addl_T1_im(offset);
511 else
512 gen_op_addl_T1_im(-offset);
514 if (insn & (1 << 21))
515 gen_movl_reg_T1(s, rd);
516 } else if (insn & (1 << 21)) {
517 /* Post indexed */
518 if (insn & (1 << 23))
519 gen_op_movl_T0_im(offset);
520 else
521 gen_op_movl_T0_im(- offset);
522 gen_op_addl_T0_T1();
523 gen_movl_reg_T0(s, rd);
524 } else if (!(insn & (1 << 23)))
525 return 1;
526 return 0;
529 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
531 int rd = (insn >> 0) & 0xf;
533 if (insn & (1 << 8))
534 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
535 return 1;
536 else
537 gen_op_iwmmxt_movl_T0_wCx(rd);
538 else
539 gen_op_iwmmxt_movl_T0_T1_wRn(rd);
541 gen_op_movl_T1_im(mask);
542 gen_op_andl_T0_T1();
543 return 0;
546 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
547 (ie. an undefined instruction). */
548 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
550 int rd, wrd;
551 int rdhi, rdlo, rd0, rd1, i;
553 if ((insn & 0x0e000e00) == 0x0c000000) {
554 if ((insn & 0x0fe00ff0) == 0x0c400000) {
555 wrd = insn & 0xf;
556 rdlo = (insn >> 12) & 0xf;
557 rdhi = (insn >> 16) & 0xf;
558 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
559 gen_op_iwmmxt_movl_T0_T1_wRn(wrd);
560 gen_movl_reg_T0(s, rdlo);
561 gen_movl_reg_T1(s, rdhi);
562 } else { /* TMCRR */
563 gen_movl_T0_reg(s, rdlo);
564 gen_movl_T1_reg(s, rdhi);
565 gen_op_iwmmxt_movl_wRn_T0_T1(wrd);
566 gen_op_iwmmxt_set_mup();
568 return 0;
571 wrd = (insn >> 12) & 0xf;
572 if (gen_iwmmxt_address(s, insn))
573 return 1;
574 if (insn & ARM_CP_RW_BIT) {
575 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
576 gen_ldst(ldl, s);
577 gen_op_iwmmxt_movl_wCx_T0(wrd);
578 } else {
579 if (insn & (1 << 8))
580 if (insn & (1 << 22)) /* WLDRD */
581 gen_ldst(iwmmxt_ldq, s);
582 else /* WLDRW wRd */
583 gen_ldst(iwmmxt_ldl, s);
584 else
585 if (insn & (1 << 22)) /* WLDRH */
586 gen_ldst(iwmmxt_ldw, s);
587 else /* WLDRB */
588 gen_ldst(iwmmxt_ldb, s);
589 gen_op_iwmmxt_movq_wRn_M0(wrd);
591 } else {
592 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
593 gen_op_iwmmxt_movl_T0_wCx(wrd);
594 gen_ldst(stl, s);
595 } else {
596 gen_op_iwmmxt_movq_M0_wRn(wrd);
597 if (insn & (1 << 8))
598 if (insn & (1 << 22)) /* WSTRD */
599 gen_ldst(iwmmxt_stq, s);
600 else /* WSTRW wRd */
601 gen_ldst(iwmmxt_stl, s);
602 else
603 if (insn & (1 << 22)) /* WSTRH */
604 gen_ldst(iwmmxt_ldw, s);
605 else /* WSTRB */
606 gen_ldst(iwmmxt_stb, s);
609 return 0;
612 if ((insn & 0x0f000000) != 0x0e000000)
613 return 1;
615 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
616 case 0x000: /* WOR */
617 wrd = (insn >> 12) & 0xf;
618 rd0 = (insn >> 0) & 0xf;
619 rd1 = (insn >> 16) & 0xf;
620 gen_op_iwmmxt_movq_M0_wRn(rd0);
621 gen_op_iwmmxt_orq_M0_wRn(rd1);
622 gen_op_iwmmxt_setpsr_nz();
623 gen_op_iwmmxt_movq_wRn_M0(wrd);
624 gen_op_iwmmxt_set_mup();
625 gen_op_iwmmxt_set_cup();
626 break;
627 case 0x011: /* TMCR */
628 if (insn & 0xf)
629 return 1;
630 rd = (insn >> 12) & 0xf;
631 wrd = (insn >> 16) & 0xf;
632 switch (wrd) {
633 case ARM_IWMMXT_wCID:
634 case ARM_IWMMXT_wCASF:
635 break;
636 case ARM_IWMMXT_wCon:
637 gen_op_iwmmxt_set_cup();
638 /* Fall through. */
639 case ARM_IWMMXT_wCSSF:
640 gen_op_iwmmxt_movl_T0_wCx(wrd);
641 gen_movl_T1_reg(s, rd);
642 gen_op_bicl_T0_T1();
643 gen_op_iwmmxt_movl_wCx_T0(wrd);
644 break;
645 case ARM_IWMMXT_wCGR0:
646 case ARM_IWMMXT_wCGR1:
647 case ARM_IWMMXT_wCGR2:
648 case ARM_IWMMXT_wCGR3:
649 gen_op_iwmmxt_set_cup();
650 gen_movl_reg_T0(s, rd);
651 gen_op_iwmmxt_movl_wCx_T0(wrd);
652 break;
653 default:
654 return 1;
656 break;
657 case 0x100: /* WXOR */
658 wrd = (insn >> 12) & 0xf;
659 rd0 = (insn >> 0) & 0xf;
660 rd1 = (insn >> 16) & 0xf;
661 gen_op_iwmmxt_movq_M0_wRn(rd0);
662 gen_op_iwmmxt_xorq_M0_wRn(rd1);
663 gen_op_iwmmxt_setpsr_nz();
664 gen_op_iwmmxt_movq_wRn_M0(wrd);
665 gen_op_iwmmxt_set_mup();
666 gen_op_iwmmxt_set_cup();
667 break;
668 case 0x111: /* TMRC */
669 if (insn & 0xf)
670 return 1;
671 rd = (insn >> 12) & 0xf;
672 wrd = (insn >> 16) & 0xf;
673 gen_op_iwmmxt_movl_T0_wCx(wrd);
674 gen_movl_reg_T0(s, rd);
675 break;
676 case 0x300: /* WANDN */
677 wrd = (insn >> 12) & 0xf;
678 rd0 = (insn >> 0) & 0xf;
679 rd1 = (insn >> 16) & 0xf;
680 gen_op_iwmmxt_movq_M0_wRn(rd0);
681 gen_op_iwmmxt_negq_M0();
682 gen_op_iwmmxt_andq_M0_wRn(rd1);
683 gen_op_iwmmxt_setpsr_nz();
684 gen_op_iwmmxt_movq_wRn_M0(wrd);
685 gen_op_iwmmxt_set_mup();
686 gen_op_iwmmxt_set_cup();
687 break;
688 case 0x200: /* WAND */
689 wrd = (insn >> 12) & 0xf;
690 rd0 = (insn >> 0) & 0xf;
691 rd1 = (insn >> 16) & 0xf;
692 gen_op_iwmmxt_movq_M0_wRn(rd0);
693 gen_op_iwmmxt_andq_M0_wRn(rd1);
694 gen_op_iwmmxt_setpsr_nz();
695 gen_op_iwmmxt_movq_wRn_M0(wrd);
696 gen_op_iwmmxt_set_mup();
697 gen_op_iwmmxt_set_cup();
698 break;
699 case 0x810: case 0xa10: /* WMADD */
700 wrd = (insn >> 12) & 0xf;
701 rd0 = (insn >> 0) & 0xf;
702 rd1 = (insn >> 16) & 0xf;
703 gen_op_iwmmxt_movq_M0_wRn(rd0);
704 if (insn & (1 << 21))
705 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
706 else
707 gen_op_iwmmxt_madduq_M0_wRn(rd1);
708 gen_op_iwmmxt_movq_wRn_M0(wrd);
709 gen_op_iwmmxt_set_mup();
710 break;
711 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
712 wrd = (insn >> 12) & 0xf;
713 rd0 = (insn >> 16) & 0xf;
714 rd1 = (insn >> 0) & 0xf;
715 gen_op_iwmmxt_movq_M0_wRn(rd0);
716 switch ((insn >> 22) & 3) {
717 case 0:
718 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
719 break;
720 case 1:
721 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
722 break;
723 case 2:
724 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
725 break;
726 case 3:
727 return 1;
729 gen_op_iwmmxt_movq_wRn_M0(wrd);
730 gen_op_iwmmxt_set_mup();
731 gen_op_iwmmxt_set_cup();
732 break;
733 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
734 wrd = (insn >> 12) & 0xf;
735 rd0 = (insn >> 16) & 0xf;
736 rd1 = (insn >> 0) & 0xf;
737 gen_op_iwmmxt_movq_M0_wRn(rd0);
738 switch ((insn >> 22) & 3) {
739 case 0:
740 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
741 break;
742 case 1:
743 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
744 break;
745 case 2:
746 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
747 break;
748 case 3:
749 return 1;
751 gen_op_iwmmxt_movq_wRn_M0(wrd);
752 gen_op_iwmmxt_set_mup();
753 gen_op_iwmmxt_set_cup();
754 break;
755 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
756 wrd = (insn >> 12) & 0xf;
757 rd0 = (insn >> 16) & 0xf;
758 rd1 = (insn >> 0) & 0xf;
759 gen_op_iwmmxt_movq_M0_wRn(rd0);
760 if (insn & (1 << 22))
761 gen_op_iwmmxt_sadw_M0_wRn(rd1);
762 else
763 gen_op_iwmmxt_sadb_M0_wRn(rd1);
764 if (!(insn & (1 << 20)))
765 gen_op_iwmmxt_addl_M0_wRn(wrd);
766 gen_op_iwmmxt_movq_wRn_M0(wrd);
767 gen_op_iwmmxt_set_mup();
768 break;
769 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
770 wrd = (insn >> 12) & 0xf;
771 rd0 = (insn >> 16) & 0xf;
772 rd1 = (insn >> 0) & 0xf;
773 gen_op_iwmmxt_movq_M0_wRn(rd0);
774 if (insn & (1 << 21))
775 gen_op_iwmmxt_mulsw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
776 else
777 gen_op_iwmmxt_muluw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
778 gen_op_iwmmxt_movq_wRn_M0(wrd);
779 gen_op_iwmmxt_set_mup();
780 break;
781 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
782 wrd = (insn >> 12) & 0xf;
783 rd0 = (insn >> 16) & 0xf;
784 rd1 = (insn >> 0) & 0xf;
785 gen_op_iwmmxt_movq_M0_wRn(rd0);
786 if (insn & (1 << 21))
787 gen_op_iwmmxt_macsw_M0_wRn(rd1);
788 else
789 gen_op_iwmmxt_macuw_M0_wRn(rd1);
790 if (!(insn & (1 << 20))) {
791 if (insn & (1 << 21))
792 gen_op_iwmmxt_addsq_M0_wRn(wrd);
793 else
794 gen_op_iwmmxt_adduq_M0_wRn(wrd);
796 gen_op_iwmmxt_movq_wRn_M0(wrd);
797 gen_op_iwmmxt_set_mup();
798 break;
799 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
800 wrd = (insn >> 12) & 0xf;
801 rd0 = (insn >> 16) & 0xf;
802 rd1 = (insn >> 0) & 0xf;
803 gen_op_iwmmxt_movq_M0_wRn(rd0);
804 switch ((insn >> 22) & 3) {
805 case 0:
806 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
807 break;
808 case 1:
809 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
810 break;
811 case 2:
812 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
813 break;
814 case 3:
815 return 1;
817 gen_op_iwmmxt_movq_wRn_M0(wrd);
818 gen_op_iwmmxt_set_mup();
819 gen_op_iwmmxt_set_cup();
820 break;
821 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
822 wrd = (insn >> 12) & 0xf;
823 rd0 = (insn >> 16) & 0xf;
824 rd1 = (insn >> 0) & 0xf;
825 gen_op_iwmmxt_movq_M0_wRn(rd0);
826 if (insn & (1 << 22))
827 gen_op_iwmmxt_avgw_M0_wRn(rd1, (insn >> 20) & 1);
828 else
829 gen_op_iwmmxt_avgb_M0_wRn(rd1, (insn >> 20) & 1);
830 gen_op_iwmmxt_movq_wRn_M0(wrd);
831 gen_op_iwmmxt_set_mup();
832 gen_op_iwmmxt_set_cup();
833 break;
834 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
835 wrd = (insn >> 12) & 0xf;
836 rd0 = (insn >> 16) & 0xf;
837 rd1 = (insn >> 0) & 0xf;
838 gen_op_iwmmxt_movq_M0_wRn(rd0);
839 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
840 gen_op_movl_T1_im(7);
841 gen_op_andl_T0_T1();
842 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
843 gen_op_iwmmxt_movq_wRn_M0(wrd);
844 gen_op_iwmmxt_set_mup();
845 break;
846 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
847 rd = (insn >> 12) & 0xf;
848 wrd = (insn >> 16) & 0xf;
849 gen_movl_T0_reg(s, rd);
850 gen_op_iwmmxt_movq_M0_wRn(wrd);
851 switch ((insn >> 6) & 3) {
852 case 0:
853 gen_op_movl_T1_im(0xff);
854 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
855 break;
856 case 1:
857 gen_op_movl_T1_im(0xffff);
858 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
859 break;
860 case 2:
861 gen_op_movl_T1_im(0xffffffff);
862 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
863 break;
864 case 3:
865 return 1;
867 gen_op_iwmmxt_movq_wRn_M0(wrd);
868 gen_op_iwmmxt_set_mup();
869 break;
870 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
871 rd = (insn >> 12) & 0xf;
872 wrd = (insn >> 16) & 0xf;
873 if (rd == 15)
874 return 1;
875 gen_op_iwmmxt_movq_M0_wRn(wrd);
876 switch ((insn >> 22) & 3) {
877 case 0:
878 if (insn & 8)
879 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
880 else {
881 gen_op_movl_T1_im(0xff);
882 gen_op_iwmmxt_extru_T0_M0_T1((insn & 7) << 3);
884 break;
885 case 1:
886 if (insn & 8)
887 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
888 else {
889 gen_op_movl_T1_im(0xffff);
890 gen_op_iwmmxt_extru_T0_M0_T1((insn & 3) << 4);
892 break;
893 case 2:
894 gen_op_movl_T1_im(0xffffffff);
895 gen_op_iwmmxt_extru_T0_M0_T1((insn & 1) << 5);
896 break;
897 case 3:
898 return 1;
900 gen_op_movl_reg_TN[0][rd]();
901 break;
902 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
903 if ((insn & 0x000ff008) != 0x0003f000)
904 return 1;
905 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
906 switch ((insn >> 22) & 3) {
907 case 0:
908 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
909 break;
910 case 1:
911 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
912 break;
913 case 2:
914 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
915 break;
916 case 3:
917 return 1;
919 gen_op_shll_T1_im(28);
920 gen_op_movl_T0_T1();
921 gen_op_movl_cpsr_T0(0xf0000000);
922 break;
923 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
924 rd = (insn >> 12) & 0xf;
925 wrd = (insn >> 16) & 0xf;
926 gen_movl_T0_reg(s, rd);
927 switch ((insn >> 6) & 3) {
928 case 0:
929 gen_op_iwmmxt_bcstb_M0_T0();
930 break;
931 case 1:
932 gen_op_iwmmxt_bcstw_M0_T0();
933 break;
934 case 2:
935 gen_op_iwmmxt_bcstl_M0_T0();
936 break;
937 case 3:
938 return 1;
940 gen_op_iwmmxt_movq_wRn_M0(wrd);
941 gen_op_iwmmxt_set_mup();
942 break;
943 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
944 if ((insn & 0x000ff00f) != 0x0003f000)
945 return 1;
946 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
947 switch ((insn >> 22) & 3) {
948 case 0:
949 for (i = 0; i < 7; i ++) {
950 gen_op_shll_T1_im(4);
951 gen_op_andl_T0_T1();
953 break;
954 case 1:
955 for (i = 0; i < 3; i ++) {
956 gen_op_shll_T1_im(8);
957 gen_op_andl_T0_T1();
959 break;
960 case 2:
961 gen_op_shll_T1_im(16);
962 gen_op_andl_T0_T1();
963 break;
964 case 3:
965 return 1;
967 gen_op_movl_cpsr_T0(0xf0000000);
968 break;
969 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
970 wrd = (insn >> 12) & 0xf;
971 rd0 = (insn >> 16) & 0xf;
972 gen_op_iwmmxt_movq_M0_wRn(rd0);
973 switch ((insn >> 22) & 3) {
974 case 0:
975 gen_op_iwmmxt_addcb_M0();
976 break;
977 case 1:
978 gen_op_iwmmxt_addcw_M0();
979 break;
980 case 2:
981 gen_op_iwmmxt_addcl_M0();
982 break;
983 case 3:
984 return 1;
986 gen_op_iwmmxt_movq_wRn_M0(wrd);
987 gen_op_iwmmxt_set_mup();
988 break;
989 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
990 if ((insn & 0x000ff00f) != 0x0003f000)
991 return 1;
992 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
993 switch ((insn >> 22) & 3) {
994 case 0:
995 for (i = 0; i < 7; i ++) {
996 gen_op_shll_T1_im(4);
997 gen_op_orl_T0_T1();
999 break;
1000 case 1:
1001 for (i = 0; i < 3; i ++) {
1002 gen_op_shll_T1_im(8);
1003 gen_op_orl_T0_T1();
1005 break;
1006 case 2:
1007 gen_op_shll_T1_im(16);
1008 gen_op_orl_T0_T1();
1009 break;
1010 case 3:
1011 return 1;
1013 gen_op_movl_T1_im(0xf0000000);
1014 gen_op_andl_T0_T1();
1015 gen_op_movl_cpsr_T0(0xf0000000);
1016 break;
1017 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1018 rd = (insn >> 12) & 0xf;
1019 rd0 = (insn >> 16) & 0xf;
1020 if ((insn & 0xf) != 0)
1021 return 1;
1022 gen_op_iwmmxt_movq_M0_wRn(rd0);
1023 switch ((insn >> 22) & 3) {
1024 case 0:
1025 gen_op_iwmmxt_msbb_T0_M0();
1026 break;
1027 case 1:
1028 gen_op_iwmmxt_msbw_T0_M0();
1029 break;
1030 case 2:
1031 gen_op_iwmmxt_msbl_T0_M0();
1032 break;
1033 case 3:
1034 return 1;
1036 gen_movl_reg_T0(s, rd);
1037 break;
1038 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1039 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1040 wrd = (insn >> 12) & 0xf;
1041 rd0 = (insn >> 16) & 0xf;
1042 rd1 = (insn >> 0) & 0xf;
1043 gen_op_iwmmxt_movq_M0_wRn(rd0);
1044 switch ((insn >> 22) & 3) {
1045 case 0:
1046 if (insn & (1 << 21))
1047 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1048 else
1049 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1050 break;
1051 case 1:
1052 if (insn & (1 << 21))
1053 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1054 else
1055 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1056 break;
1057 case 2:
1058 if (insn & (1 << 21))
1059 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1060 else
1061 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1062 break;
1063 case 3:
1064 return 1;
1066 gen_op_iwmmxt_movq_wRn_M0(wrd);
1067 gen_op_iwmmxt_set_mup();
1068 gen_op_iwmmxt_set_cup();
1069 break;
1070 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1071 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1072 wrd = (insn >> 12) & 0xf;
1073 rd0 = (insn >> 16) & 0xf;
1074 gen_op_iwmmxt_movq_M0_wRn(rd0);
1075 switch ((insn >> 22) & 3) {
1076 case 0:
1077 if (insn & (1 << 21))
1078 gen_op_iwmmxt_unpacklsb_M0();
1079 else
1080 gen_op_iwmmxt_unpacklub_M0();
1081 break;
1082 case 1:
1083 if (insn & (1 << 21))
1084 gen_op_iwmmxt_unpacklsw_M0();
1085 else
1086 gen_op_iwmmxt_unpackluw_M0();
1087 break;
1088 case 2:
1089 if (insn & (1 << 21))
1090 gen_op_iwmmxt_unpacklsl_M0();
1091 else
1092 gen_op_iwmmxt_unpacklul_M0();
1093 break;
1094 case 3:
1095 return 1;
1097 gen_op_iwmmxt_movq_wRn_M0(wrd);
1098 gen_op_iwmmxt_set_mup();
1099 gen_op_iwmmxt_set_cup();
1100 break;
1101 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1102 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1103 wrd = (insn >> 12) & 0xf;
1104 rd0 = (insn >> 16) & 0xf;
1105 gen_op_iwmmxt_movq_M0_wRn(rd0);
1106 switch ((insn >> 22) & 3) {
1107 case 0:
1108 if (insn & (1 << 21))
1109 gen_op_iwmmxt_unpackhsb_M0();
1110 else
1111 gen_op_iwmmxt_unpackhub_M0();
1112 break;
1113 case 1:
1114 if (insn & (1 << 21))
1115 gen_op_iwmmxt_unpackhsw_M0();
1116 else
1117 gen_op_iwmmxt_unpackhuw_M0();
1118 break;
1119 case 2:
1120 if (insn & (1 << 21))
1121 gen_op_iwmmxt_unpackhsl_M0();
1122 else
1123 gen_op_iwmmxt_unpackhul_M0();
1124 break;
1125 case 3:
1126 return 1;
1128 gen_op_iwmmxt_movq_wRn_M0(wrd);
1129 gen_op_iwmmxt_set_mup();
1130 gen_op_iwmmxt_set_cup();
1131 break;
1132 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1133 case 0x214: case 0x614: case 0xa14: case 0xe14:
1134 wrd = (insn >> 12) & 0xf;
1135 rd0 = (insn >> 16) & 0xf;
1136 gen_op_iwmmxt_movq_M0_wRn(rd0);
1137 if (gen_iwmmxt_shift(insn, 0xff))
1138 return 1;
1139 switch ((insn >> 22) & 3) {
1140 case 0:
1141 return 1;
1142 case 1:
1143 gen_op_iwmmxt_srlw_M0_T0();
1144 break;
1145 case 2:
1146 gen_op_iwmmxt_srll_M0_T0();
1147 break;
1148 case 3:
1149 gen_op_iwmmxt_srlq_M0_T0();
1150 break;
1152 gen_op_iwmmxt_movq_wRn_M0(wrd);
1153 gen_op_iwmmxt_set_mup();
1154 gen_op_iwmmxt_set_cup();
1155 break;
1156 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1157 case 0x014: case 0x414: case 0x814: case 0xc14:
1158 wrd = (insn >> 12) & 0xf;
1159 rd0 = (insn >> 16) & 0xf;
1160 gen_op_iwmmxt_movq_M0_wRn(rd0);
1161 if (gen_iwmmxt_shift(insn, 0xff))
1162 return 1;
1163 switch ((insn >> 22) & 3) {
1164 case 0:
1165 return 1;
1166 case 1:
1167 gen_op_iwmmxt_sraw_M0_T0();
1168 break;
1169 case 2:
1170 gen_op_iwmmxt_sral_M0_T0();
1171 break;
1172 case 3:
1173 gen_op_iwmmxt_sraq_M0_T0();
1174 break;
1176 gen_op_iwmmxt_movq_wRn_M0(wrd);
1177 gen_op_iwmmxt_set_mup();
1178 gen_op_iwmmxt_set_cup();
1179 break;
1180 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
1181 case 0x114: case 0x514: case 0x914: case 0xd14:
1182 wrd = (insn >> 12) & 0xf;
1183 rd0 = (insn >> 16) & 0xf;
1184 gen_op_iwmmxt_movq_M0_wRn(rd0);
1185 if (gen_iwmmxt_shift(insn, 0xff))
1186 return 1;
1187 switch ((insn >> 22) & 3) {
1188 case 0:
1189 return 1;
1190 case 1:
1191 gen_op_iwmmxt_sllw_M0_T0();
1192 break;
1193 case 2:
1194 gen_op_iwmmxt_slll_M0_T0();
1195 break;
1196 case 3:
1197 gen_op_iwmmxt_sllq_M0_T0();
1198 break;
1200 gen_op_iwmmxt_movq_wRn_M0(wrd);
1201 gen_op_iwmmxt_set_mup();
1202 gen_op_iwmmxt_set_cup();
1203 break;
1204 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
1205 case 0x314: case 0x714: case 0xb14: case 0xf14:
1206 wrd = (insn >> 12) & 0xf;
1207 rd0 = (insn >> 16) & 0xf;
1208 gen_op_iwmmxt_movq_M0_wRn(rd0);
1209 switch ((insn >> 22) & 3) {
1210 case 0:
1211 return 1;
1212 case 1:
1213 if (gen_iwmmxt_shift(insn, 0xf))
1214 return 1;
1215 gen_op_iwmmxt_rorw_M0_T0();
1216 break;
1217 case 2:
1218 if (gen_iwmmxt_shift(insn, 0x1f))
1219 return 1;
1220 gen_op_iwmmxt_rorl_M0_T0();
1221 break;
1222 case 3:
1223 if (gen_iwmmxt_shift(insn, 0x3f))
1224 return 1;
1225 gen_op_iwmmxt_rorq_M0_T0();
1226 break;
1228 gen_op_iwmmxt_movq_wRn_M0(wrd);
1229 gen_op_iwmmxt_set_mup();
1230 gen_op_iwmmxt_set_cup();
1231 break;
1232 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
1233 case 0x916: case 0xb16: case 0xd16: case 0xf16:
1234 wrd = (insn >> 12) & 0xf;
1235 rd0 = (insn >> 16) & 0xf;
1236 rd1 = (insn >> 0) & 0xf;
1237 gen_op_iwmmxt_movq_M0_wRn(rd0);
1238 switch ((insn >> 22) & 3) {
1239 case 0:
1240 if (insn & (1 << 21))
1241 gen_op_iwmmxt_minsb_M0_wRn(rd1);
1242 else
1243 gen_op_iwmmxt_minub_M0_wRn(rd1);
1244 break;
1245 case 1:
1246 if (insn & (1 << 21))
1247 gen_op_iwmmxt_minsw_M0_wRn(rd1);
1248 else
1249 gen_op_iwmmxt_minuw_M0_wRn(rd1);
1250 break;
1251 case 2:
1252 if (insn & (1 << 21))
1253 gen_op_iwmmxt_minsl_M0_wRn(rd1);
1254 else
1255 gen_op_iwmmxt_minul_M0_wRn(rd1);
1256 break;
1257 case 3:
1258 return 1;
1260 gen_op_iwmmxt_movq_wRn_M0(wrd);
1261 gen_op_iwmmxt_set_mup();
1262 break;
1263 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
1264 case 0x816: case 0xa16: case 0xc16: case 0xe16:
1265 wrd = (insn >> 12) & 0xf;
1266 rd0 = (insn >> 16) & 0xf;
1267 rd1 = (insn >> 0) & 0xf;
1268 gen_op_iwmmxt_movq_M0_wRn(rd0);
1269 switch ((insn >> 22) & 3) {
1270 case 0:
1271 if (insn & (1 << 21))
1272 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
1273 else
1274 gen_op_iwmmxt_maxub_M0_wRn(rd1);
1275 break;
1276 case 1:
1277 if (insn & (1 << 21))
1278 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
1279 else
1280 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
1281 break;
1282 case 2:
1283 if (insn & (1 << 21))
1284 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
1285 else
1286 gen_op_iwmmxt_maxul_M0_wRn(rd1);
1287 break;
1288 case 3:
1289 return 1;
1291 gen_op_iwmmxt_movq_wRn_M0(wrd);
1292 gen_op_iwmmxt_set_mup();
1293 break;
1294 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
1295 case 0x402: case 0x502: case 0x602: case 0x702:
1296 wrd = (insn >> 12) & 0xf;
1297 rd0 = (insn >> 16) & 0xf;
1298 rd1 = (insn >> 0) & 0xf;
1299 gen_op_iwmmxt_movq_M0_wRn(rd0);
1300 gen_op_movl_T0_im((insn >> 20) & 3);
1301 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1302 gen_op_iwmmxt_movq_wRn_M0(wrd);
1303 gen_op_iwmmxt_set_mup();
1304 break;
1305 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
1306 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
1307 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
1308 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
1309 wrd = (insn >> 12) & 0xf;
1310 rd0 = (insn >> 16) & 0xf;
1311 rd1 = (insn >> 0) & 0xf;
1312 gen_op_iwmmxt_movq_M0_wRn(rd0);
1313 switch ((insn >> 20) & 0xf) {
1314 case 0x0:
1315 gen_op_iwmmxt_subnb_M0_wRn(rd1);
1316 break;
1317 case 0x1:
1318 gen_op_iwmmxt_subub_M0_wRn(rd1);
1319 break;
1320 case 0x3:
1321 gen_op_iwmmxt_subsb_M0_wRn(rd1);
1322 break;
1323 case 0x4:
1324 gen_op_iwmmxt_subnw_M0_wRn(rd1);
1325 break;
1326 case 0x5:
1327 gen_op_iwmmxt_subuw_M0_wRn(rd1);
1328 break;
1329 case 0x7:
1330 gen_op_iwmmxt_subsw_M0_wRn(rd1);
1331 break;
1332 case 0x8:
1333 gen_op_iwmmxt_subnl_M0_wRn(rd1);
1334 break;
1335 case 0x9:
1336 gen_op_iwmmxt_subul_M0_wRn(rd1);
1337 break;
1338 case 0xb:
1339 gen_op_iwmmxt_subsl_M0_wRn(rd1);
1340 break;
1341 default:
1342 return 1;
1344 gen_op_iwmmxt_movq_wRn_M0(wrd);
1345 gen_op_iwmmxt_set_mup();
1346 gen_op_iwmmxt_set_cup();
1347 break;
1348 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
1349 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
1350 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
1351 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
1352 wrd = (insn >> 12) & 0xf;
1353 rd0 = (insn >> 16) & 0xf;
1354 gen_op_iwmmxt_movq_M0_wRn(rd0);
1355 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
1356 gen_op_iwmmxt_shufh_M0_T0();
1357 gen_op_iwmmxt_movq_wRn_M0(wrd);
1358 gen_op_iwmmxt_set_mup();
1359 gen_op_iwmmxt_set_cup();
1360 break;
1361 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
1362 case 0x418: case 0x518: case 0x618: case 0x718:
1363 case 0x818: case 0x918: case 0xa18: case 0xb18:
1364 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
1365 wrd = (insn >> 12) & 0xf;
1366 rd0 = (insn >> 16) & 0xf;
1367 rd1 = (insn >> 0) & 0xf;
1368 gen_op_iwmmxt_movq_M0_wRn(rd0);
1369 switch ((insn >> 20) & 0xf) {
1370 case 0x0:
1371 gen_op_iwmmxt_addnb_M0_wRn(rd1);
1372 break;
1373 case 0x1:
1374 gen_op_iwmmxt_addub_M0_wRn(rd1);
1375 break;
1376 case 0x3:
1377 gen_op_iwmmxt_addsb_M0_wRn(rd1);
1378 break;
1379 case 0x4:
1380 gen_op_iwmmxt_addnw_M0_wRn(rd1);
1381 break;
1382 case 0x5:
1383 gen_op_iwmmxt_adduw_M0_wRn(rd1);
1384 break;
1385 case 0x7:
1386 gen_op_iwmmxt_addsw_M0_wRn(rd1);
1387 break;
1388 case 0x8:
1389 gen_op_iwmmxt_addnl_M0_wRn(rd1);
1390 break;
1391 case 0x9:
1392 gen_op_iwmmxt_addul_M0_wRn(rd1);
1393 break;
1394 case 0xb:
1395 gen_op_iwmmxt_addsl_M0_wRn(rd1);
1396 break;
1397 default:
1398 return 1;
1400 gen_op_iwmmxt_movq_wRn_M0(wrd);
1401 gen_op_iwmmxt_set_mup();
1402 gen_op_iwmmxt_set_cup();
1403 break;
1404 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
1405 case 0x408: case 0x508: case 0x608: case 0x708:
1406 case 0x808: case 0x908: case 0xa08: case 0xb08:
1407 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
1408 wrd = (insn >> 12) & 0xf;
1409 rd0 = (insn >> 16) & 0xf;
1410 rd1 = (insn >> 0) & 0xf;
1411 gen_op_iwmmxt_movq_M0_wRn(rd0);
1412 if (!(insn & (1 << 20)))
1413 return 1;
1414 switch ((insn >> 22) & 3) {
1415 case 0:
1416 return 1;
1417 case 1:
1418 if (insn & (1 << 21))
1419 gen_op_iwmmxt_packsw_M0_wRn(rd1);
1420 else
1421 gen_op_iwmmxt_packuw_M0_wRn(rd1);
1422 break;
1423 case 2:
1424 if (insn & (1 << 21))
1425 gen_op_iwmmxt_packsl_M0_wRn(rd1);
1426 else
1427 gen_op_iwmmxt_packul_M0_wRn(rd1);
1428 break;
1429 case 3:
1430 if (insn & (1 << 21))
1431 gen_op_iwmmxt_packsq_M0_wRn(rd1);
1432 else
1433 gen_op_iwmmxt_packuq_M0_wRn(rd1);
1434 break;
1436 gen_op_iwmmxt_movq_wRn_M0(wrd);
1437 gen_op_iwmmxt_set_mup();
1438 gen_op_iwmmxt_set_cup();
1439 break;
1440 case 0x201: case 0x203: case 0x205: case 0x207:
1441 case 0x209: case 0x20b: case 0x20d: case 0x20f:
1442 case 0x211: case 0x213: case 0x215: case 0x217:
1443 case 0x219: case 0x21b: case 0x21d: case 0x21f:
1444 wrd = (insn >> 5) & 0xf;
1445 rd0 = (insn >> 12) & 0xf;
1446 rd1 = (insn >> 0) & 0xf;
1447 if (rd0 == 0xf || rd1 == 0xf)
1448 return 1;
1449 gen_op_iwmmxt_movq_M0_wRn(wrd);
1450 switch ((insn >> 16) & 0xf) {
1451 case 0x0: /* TMIA */
1452 gen_op_movl_TN_reg[0][rd0]();
1453 gen_op_movl_TN_reg[1][rd1]();
1454 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1455 break;
1456 case 0x8: /* TMIAPH */
1457 gen_op_movl_TN_reg[0][rd0]();
1458 gen_op_movl_TN_reg[1][rd1]();
1459 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1460 break;
1461 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
1462 gen_op_movl_TN_reg[1][rd0]();
1463 if (insn & (1 << 16))
1464 gen_op_shrl_T1_im(16);
1465 gen_op_movl_T0_T1();
1466 gen_op_movl_TN_reg[1][rd1]();
1467 if (insn & (1 << 17))
1468 gen_op_shrl_T1_im(16);
1469 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1470 break;
1471 default:
1472 return 1;
1474 gen_op_iwmmxt_movq_wRn_M0(wrd);
1475 gen_op_iwmmxt_set_mup();
1476 break;
1477 default:
1478 return 1;
1481 return 0;
1484 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
1485 (ie. an undefined instruction). */
1486 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1488 int acc, rd0, rd1, rdhi, rdlo;
1490 if ((insn & 0x0ff00f10) == 0x0e200010) {
1491 /* Multiply with Internal Accumulate Format */
1492 rd0 = (insn >> 12) & 0xf;
1493 rd1 = insn & 0xf;
1494 acc = (insn >> 5) & 7;
1496 if (acc != 0)
1497 return 1;
1499 switch ((insn >> 16) & 0xf) {
1500 case 0x0: /* MIA */
1501 gen_op_movl_TN_reg[0][rd0]();
1502 gen_op_movl_TN_reg[1][rd1]();
1503 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1504 break;
1505 case 0x8: /* MIAPH */
1506 gen_op_movl_TN_reg[0][rd0]();
1507 gen_op_movl_TN_reg[1][rd1]();
1508 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1509 break;
1510 case 0xc: /* MIABB */
1511 case 0xd: /* MIABT */
1512 case 0xe: /* MIATB */
1513 case 0xf: /* MIATT */
1514 gen_op_movl_TN_reg[1][rd0]();
1515 if (insn & (1 << 16))
1516 gen_op_shrl_T1_im(16);
1517 gen_op_movl_T0_T1();
1518 gen_op_movl_TN_reg[1][rd1]();
1519 if (insn & (1 << 17))
1520 gen_op_shrl_T1_im(16);
1521 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1522 break;
1523 default:
1524 return 1;
1527 gen_op_iwmmxt_movq_wRn_M0(acc);
1528 return 0;
1531 if ((insn & 0x0fe00ff8) == 0x0c400000) {
1532 /* Internal Accumulator Access Format */
1533 rdhi = (insn >> 16) & 0xf;
1534 rdlo = (insn >> 12) & 0xf;
1535 acc = insn & 7;
1537 if (acc != 0)
1538 return 1;
1540 if (insn & ARM_CP_RW_BIT) { /* MRA */
1541 gen_op_iwmmxt_movl_T0_T1_wRn(acc);
1542 gen_op_movl_reg_TN[0][rdlo]();
1543 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
1544 gen_op_andl_T0_T1();
1545 gen_op_movl_reg_TN[0][rdhi]();
1546 } else { /* MAR */
1547 gen_op_movl_TN_reg[0][rdlo]();
1548 gen_op_movl_TN_reg[1][rdhi]();
1549 gen_op_iwmmxt_movl_wRn_T0_T1(acc);
1551 return 0;
1554 return 1;
1557 /* Disassemble system coprocessor instruction. Return nonzero if
1558 instruction is not defined. */
1559 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1561 uint32_t rd = (insn >> 12) & 0xf;
1562 uint32_t cp = (insn >> 8) & 0xf;
1563 if (IS_USER(s)) {
1564 return 1;
1567 if (insn & ARM_CP_RW_BIT) {
1568 if (!env->cp[cp].cp_read)
1569 return 1;
1570 gen_op_movl_T0_im((uint32_t) s->pc);
1571 gen_op_movl_reg_TN[0][15]();
1572 gen_op_movl_T0_cp(insn);
1573 gen_movl_reg_T0(s, rd);
1574 } else {
1575 if (!env->cp[cp].cp_write)
1576 return 1;
1577 gen_op_movl_T0_im((uint32_t) s->pc);
1578 gen_op_movl_reg_TN[0][15]();
1579 gen_movl_T0_reg(s, rd);
1580 gen_op_movl_cp_T0(insn);
1582 return 0;
1585 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
1586 instruction is not defined. */
1587 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
1589 uint32_t rd;
1591 /* ??? Some cp15 registers are accessible from userspace. */
1592 if (IS_USER(s)) {
1593 return 1;
1595 if ((insn & 0x0fff0fff) == 0x0e070f90
1596 || (insn & 0x0fff0fff) == 0x0e070f58) {
1597 /* Wait for interrupt. */
1598 gen_op_movl_T0_im((long)s->pc);
1599 gen_op_movl_reg_TN[0][15]();
1600 gen_op_wfi();
1601 s->is_jmp = DISAS_JUMP;
1602 return 0;
1604 rd = (insn >> 12) & 0xf;
1605 if (insn & ARM_CP_RW_BIT) {
1606 gen_op_movl_T0_cp15(insn);
1607 /* If the destination register is r15 then sets condition codes. */
1608 if (rd != 15)
1609 gen_movl_reg_T0(s, rd);
1610 } else {
1611 gen_movl_T0_reg(s, rd);
1612 gen_op_movl_cp15_T0(insn);
1613 /* Normally we would always end the TB here, but Linux
1614 * arch/arm/mach-pxa/sleep.S expects two instructions following
1615 * an MMU enable to execute from cache. Imitate this behaviour. */
1616 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
1617 (insn & 0x0fff0fff) != 0x0e010f10)
1618 gen_lookup_tb(s);
1620 return 0;
1623 /* Disassemble a VFP instruction. Returns nonzero if an error occured
1624 (ie. an undefined instruction). */
1625 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
1627 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
1628 int dp, veclen;
1630 if (!arm_feature(env, ARM_FEATURE_VFP))
1631 return 1;
1633 if ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) == 0) {
1634 /* VFP disabled. Only allow fmxr/fmrx to/from fpexc and fpsid. */
1635 if ((insn & 0x0fe00fff) != 0x0ee00a10)
1636 return 1;
1637 rn = (insn >> 16) & 0xf;
1638 if (rn != 0 && rn != 8)
1639 return 1;
1641 dp = ((insn & 0xf00) == 0xb00);
1642 switch ((insn >> 24) & 0xf) {
1643 case 0xe:
1644 if (insn & (1 << 4)) {
1645 /* single register transfer */
1646 if ((insn & 0x6f) != 0x00)
1647 return 1;
1648 rd = (insn >> 12) & 0xf;
1649 if (dp) {
1650 if (insn & 0x80)
1651 return 1;
1652 rn = (insn >> 16) & 0xf;
1653 /* Get the existing value even for arm->vfp moves because
1654 we only set half the register. */
1655 gen_mov_F0_vreg(1, rn);
1656 gen_op_vfp_mrrd();
1657 if (insn & ARM_CP_RW_BIT) {
1658 /* vfp->arm */
1659 if (insn & (1 << 21))
1660 gen_movl_reg_T1(s, rd);
1661 else
1662 gen_movl_reg_T0(s, rd);
1663 } else {
1664 /* arm->vfp */
1665 if (insn & (1 << 21))
1666 gen_movl_T1_reg(s, rd);
1667 else
1668 gen_movl_T0_reg(s, rd);
1669 gen_op_vfp_mdrr();
1670 gen_mov_vreg_F0(dp, rn);
1672 } else {
1673 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
1674 if (insn & ARM_CP_RW_BIT) {
1675 /* vfp->arm */
1676 if (insn & (1 << 21)) {
1677 /* system register */
1678 rn >>= 1;
1679 switch (rn) {
1680 case ARM_VFP_FPSID:
1681 case ARM_VFP_FPEXC:
1682 case ARM_VFP_FPINST:
1683 case ARM_VFP_FPINST2:
1684 gen_op_vfp_movl_T0_xreg(rn);
1685 break;
1686 case ARM_VFP_FPSCR:
1687 if (rd == 15)
1688 gen_op_vfp_movl_T0_fpscr_flags();
1689 else
1690 gen_op_vfp_movl_T0_fpscr();
1691 break;
1692 default:
1693 return 1;
1695 } else {
1696 gen_mov_F0_vreg(0, rn);
1697 gen_op_vfp_mrs();
1699 if (rd == 15) {
1700 /* Set the 4 flag bits in the CPSR. */
1701 gen_op_movl_cpsr_T0(0xf0000000);
1702 } else
1703 gen_movl_reg_T0(s, rd);
1704 } else {
1705 /* arm->vfp */
1706 gen_movl_T0_reg(s, rd);
1707 if (insn & (1 << 21)) {
1708 rn >>= 1;
1709 /* system register */
1710 switch (rn) {
1711 case ARM_VFP_FPSID:
1712 /* Writes are ignored. */
1713 break;
1714 case ARM_VFP_FPSCR:
1715 gen_op_vfp_movl_fpscr_T0();
1716 gen_lookup_tb(s);
1717 break;
1718 case ARM_VFP_FPEXC:
1719 gen_op_vfp_movl_xreg_T0(rn);
1720 gen_lookup_tb(s);
1721 break;
1722 case ARM_VFP_FPINST:
1723 case ARM_VFP_FPINST2:
1724 gen_op_vfp_movl_xreg_T0(rn);
1725 break;
1726 default:
1727 return 1;
1729 } else {
1730 gen_op_vfp_msr();
1731 gen_mov_vreg_F0(0, rn);
1735 } else {
1736 /* data processing */
1737 /* The opcode is in bits 23, 21, 20 and 6. */
1738 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
1739 if (dp) {
1740 if (op == 15) {
1741 /* rn is opcode */
1742 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
1743 } else {
1744 /* rn is register number */
1745 if (insn & (1 << 7))
1746 return 1;
1747 rn = (insn >> 16) & 0xf;
1750 if (op == 15 && (rn == 15 || rn > 17)) {
1751 /* Integer or single precision destination. */
1752 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
1753 } else {
1754 if (insn & (1 << 22))
1755 return 1;
1756 rd = (insn >> 12) & 0xf;
1759 if (op == 15 && (rn == 16 || rn == 17)) {
1760 /* Integer source. */
1761 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
1762 } else {
1763 if (insn & (1 << 5))
1764 return 1;
1765 rm = insn & 0xf;
1767 } else {
1768 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
1769 if (op == 15 && rn == 15) {
1770 /* Double precision destination. */
1771 if (insn & (1 << 22))
1772 return 1;
1773 rd = (insn >> 12) & 0xf;
1774 } else
1775 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
1776 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
1779 veclen = env->vfp.vec_len;
1780 if (op == 15 && rn > 3)
1781 veclen = 0;
1783 /* Shut up compiler warnings. */
1784 delta_m = 0;
1785 delta_d = 0;
1786 bank_mask = 0;
1788 if (veclen > 0) {
1789 if (dp)
1790 bank_mask = 0xc;
1791 else
1792 bank_mask = 0x18;
1794 /* Figure out what type of vector operation this is. */
1795 if ((rd & bank_mask) == 0) {
1796 /* scalar */
1797 veclen = 0;
1798 } else {
1799 if (dp)
1800 delta_d = (env->vfp.vec_stride >> 1) + 1;
1801 else
1802 delta_d = env->vfp.vec_stride + 1;
1804 if ((rm & bank_mask) == 0) {
1805 /* mixed scalar/vector */
1806 delta_m = 0;
1807 } else {
1808 /* vector */
1809 delta_m = delta_d;
1814 /* Load the initial operands. */
1815 if (op == 15) {
1816 switch (rn) {
1817 case 16:
1818 case 17:
1819 /* Integer source */
1820 gen_mov_F0_vreg(0, rm);
1821 break;
1822 case 8:
1823 case 9:
1824 /* Compare */
1825 gen_mov_F0_vreg(dp, rd);
1826 gen_mov_F1_vreg(dp, rm);
1827 break;
1828 case 10:
1829 case 11:
1830 /* Compare with zero */
1831 gen_mov_F0_vreg(dp, rd);
1832 gen_vfp_F1_ld0(dp);
1833 break;
1834 default:
1835 /* One source operand. */
1836 gen_mov_F0_vreg(dp, rm);
1838 } else {
1839 /* Two source operands. */
1840 gen_mov_F0_vreg(dp, rn);
1841 gen_mov_F1_vreg(dp, rm);
1844 for (;;) {
1845 /* Perform the calculation. */
1846 switch (op) {
1847 case 0: /* mac: fd + (fn * fm) */
1848 gen_vfp_mul(dp);
1849 gen_mov_F1_vreg(dp, rd);
1850 gen_vfp_add(dp);
1851 break;
1852 case 1: /* nmac: fd - (fn * fm) */
1853 gen_vfp_mul(dp);
1854 gen_vfp_neg(dp);
1855 gen_mov_F1_vreg(dp, rd);
1856 gen_vfp_add(dp);
1857 break;
1858 case 2: /* msc: -fd + (fn * fm) */
1859 gen_vfp_mul(dp);
1860 gen_mov_F1_vreg(dp, rd);
1861 gen_vfp_sub(dp);
1862 break;
1863 case 3: /* nmsc: -fd - (fn * fm) */
1864 gen_vfp_mul(dp);
1865 gen_mov_F1_vreg(dp, rd);
1866 gen_vfp_add(dp);
1867 gen_vfp_neg(dp);
1868 break;
1869 case 4: /* mul: fn * fm */
1870 gen_vfp_mul(dp);
1871 break;
1872 case 5: /* nmul: -(fn * fm) */
1873 gen_vfp_mul(dp);
1874 gen_vfp_neg(dp);
1875 break;
1876 case 6: /* add: fn + fm */
1877 gen_vfp_add(dp);
1878 break;
1879 case 7: /* sub: fn - fm */
1880 gen_vfp_sub(dp);
1881 break;
1882 case 8: /* div: fn / fm */
1883 gen_vfp_div(dp);
1884 break;
1885 case 15: /* extension space */
1886 switch (rn) {
1887 case 0: /* cpy */
1888 /* no-op */
1889 break;
1890 case 1: /* abs */
1891 gen_vfp_abs(dp);
1892 break;
1893 case 2: /* neg */
1894 gen_vfp_neg(dp);
1895 break;
1896 case 3: /* sqrt */
1897 gen_vfp_sqrt(dp);
1898 break;
1899 case 8: /* cmp */
1900 gen_vfp_cmp(dp);
1901 break;
1902 case 9: /* cmpe */
1903 gen_vfp_cmpe(dp);
1904 break;
1905 case 10: /* cmpz */
1906 gen_vfp_cmp(dp);
1907 break;
1908 case 11: /* cmpez */
1909 gen_vfp_F1_ld0(dp);
1910 gen_vfp_cmpe(dp);
1911 break;
1912 case 15: /* single<->double conversion */
1913 if (dp)
1914 gen_op_vfp_fcvtsd();
1915 else
1916 gen_op_vfp_fcvtds();
1917 break;
1918 case 16: /* fuito */
1919 gen_vfp_uito(dp);
1920 break;
1921 case 17: /* fsito */
1922 gen_vfp_sito(dp);
1923 break;
1924 case 24: /* ftoui */
1925 gen_vfp_toui(dp);
1926 break;
1927 case 25: /* ftouiz */
1928 gen_vfp_touiz(dp);
1929 break;
1930 case 26: /* ftosi */
1931 gen_vfp_tosi(dp);
1932 break;
1933 case 27: /* ftosiz */
1934 gen_vfp_tosiz(dp);
1935 break;
1936 default: /* undefined */
1937 printf ("rn:%d\n", rn);
1938 return 1;
1940 break;
1941 default: /* undefined */
1942 printf ("op:%d\n", op);
1943 return 1;
1946 /* Write back the result. */
1947 if (op == 15 && (rn >= 8 && rn <= 11))
1948 ; /* Comparison, do nothing. */
1949 else if (op == 15 && rn > 17)
1950 /* Integer result. */
1951 gen_mov_vreg_F0(0, rd);
1952 else if (op == 15 && rn == 15)
1953 /* conversion */
1954 gen_mov_vreg_F0(!dp, rd);
1955 else
1956 gen_mov_vreg_F0(dp, rd);
1958 /* break out of the loop if we have finished */
1959 if (veclen == 0)
1960 break;
1962 if (op == 15 && delta_m == 0) {
1963 /* single source one-many */
1964 while (veclen--) {
1965 rd = ((rd + delta_d) & (bank_mask - 1))
1966 | (rd & bank_mask);
1967 gen_mov_vreg_F0(dp, rd);
1969 break;
1971 /* Setup the next operands. */
1972 veclen--;
1973 rd = ((rd + delta_d) & (bank_mask - 1))
1974 | (rd & bank_mask);
1976 if (op == 15) {
1977 /* One source operand. */
1978 rm = ((rm + delta_m) & (bank_mask - 1))
1979 | (rm & bank_mask);
1980 gen_mov_F0_vreg(dp, rm);
1981 } else {
1982 /* Two source operands. */
1983 rn = ((rn + delta_d) & (bank_mask - 1))
1984 | (rn & bank_mask);
1985 gen_mov_F0_vreg(dp, rn);
1986 if (delta_m) {
1987 rm = ((rm + delta_m) & (bank_mask - 1))
1988 | (rm & bank_mask);
1989 gen_mov_F1_vreg(dp, rm);
1994 break;
1995 case 0xc:
1996 case 0xd:
1997 if (dp && (insn & (1 << 22))) {
1998 /* two-register transfer */
1999 rn = (insn >> 16) & 0xf;
2000 rd = (insn >> 12) & 0xf;
2001 if (dp) {
2002 if (insn & (1 << 5))
2003 return 1;
2004 rm = insn & 0xf;
2005 } else
2006 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2008 if (insn & ARM_CP_RW_BIT) {
2009 /* vfp->arm */
2010 if (dp) {
2011 gen_mov_F0_vreg(1, rm);
2012 gen_op_vfp_mrrd();
2013 gen_movl_reg_T0(s, rd);
2014 gen_movl_reg_T1(s, rn);
2015 } else {
2016 gen_mov_F0_vreg(0, rm);
2017 gen_op_vfp_mrs();
2018 gen_movl_reg_T0(s, rn);
2019 gen_mov_F0_vreg(0, rm + 1);
2020 gen_op_vfp_mrs();
2021 gen_movl_reg_T0(s, rd);
2023 } else {
2024 /* arm->vfp */
2025 if (dp) {
2026 gen_movl_T0_reg(s, rd);
2027 gen_movl_T1_reg(s, rn);
2028 gen_op_vfp_mdrr();
2029 gen_mov_vreg_F0(1, rm);
2030 } else {
2031 gen_movl_T0_reg(s, rn);
2032 gen_op_vfp_msr();
2033 gen_mov_vreg_F0(0, rm);
2034 gen_movl_T0_reg(s, rd);
2035 gen_op_vfp_msr();
2036 gen_mov_vreg_F0(0, rm + 1);
2039 } else {
2040 /* Load/store */
2041 rn = (insn >> 16) & 0xf;
2042 if (dp)
2043 rd = (insn >> 12) & 0xf;
2044 else
2045 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
2046 gen_movl_T1_reg(s, rn);
2047 if ((insn & 0x01200000) == 0x01000000) {
2048 /* Single load/store */
2049 offset = (insn & 0xff) << 2;
2050 if ((insn & (1 << 23)) == 0)
2051 offset = -offset;
2052 gen_op_addl_T1_im(offset);
2053 if (insn & (1 << 20)) {
2054 gen_vfp_ld(s, dp);
2055 gen_mov_vreg_F0(dp, rd);
2056 } else {
2057 gen_mov_F0_vreg(dp, rd);
2058 gen_vfp_st(s, dp);
2060 } else {
2061 /* load/store multiple */
2062 if (dp)
2063 n = (insn >> 1) & 0x7f;
2064 else
2065 n = insn & 0xff;
2067 if (insn & (1 << 24)) /* pre-decrement */
2068 gen_op_addl_T1_im(-((insn & 0xff) << 2));
2070 if (dp)
2071 offset = 8;
2072 else
2073 offset = 4;
2074 for (i = 0; i < n; i++) {
2075 if (insn & ARM_CP_RW_BIT) {
2076 /* load */
2077 gen_vfp_ld(s, dp);
2078 gen_mov_vreg_F0(dp, rd + i);
2079 } else {
2080 /* store */
2081 gen_mov_F0_vreg(dp, rd + i);
2082 gen_vfp_st(s, dp);
2084 gen_op_addl_T1_im(offset);
2086 if (insn & (1 << 21)) {
2087 /* writeback */
2088 if (insn & (1 << 24))
2089 offset = -offset * n;
2090 else if (dp && (insn & 1))
2091 offset = 4;
2092 else
2093 offset = 0;
2095 if (offset != 0)
2096 gen_op_addl_T1_im(offset);
2097 gen_movl_reg_T1(s, rn);
2101 break;
2102 default:
2103 /* Should never happen. */
2104 return 1;
2106 return 0;
2109 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
2111 TranslationBlock *tb;
2113 tb = s->tb;
2114 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
2115 if (n == 0)
2116 gen_op_goto_tb0(TBPARAM(tb));
2117 else
2118 gen_op_goto_tb1(TBPARAM(tb));
2119 gen_op_movl_T0_im(dest);
2120 gen_op_movl_r15_T0();
2121 gen_op_movl_T0_im((long)tb + n);
2122 gen_op_exit_tb();
2123 } else {
2124 gen_op_movl_T0_im(dest);
2125 gen_op_movl_r15_T0();
2126 gen_op_movl_T0_0();
2127 gen_op_exit_tb();
2131 static inline void gen_jmp (DisasContext *s, uint32_t dest)
2133 if (__builtin_expect(s->singlestep_enabled, 0)) {
2134 /* An indirect jump so that we still trigger the debug exception. */
2135 if (s->thumb)
2136 dest |= 1;
2137 gen_op_movl_T0_im(dest);
2138 gen_bx(s);
2139 } else {
2140 gen_goto_tb(s, 0, dest);
2141 s->is_jmp = DISAS_TB_JUMP;
2145 static inline void gen_mulxy(int x, int y)
2147 if (x)
2148 gen_op_sarl_T0_im(16);
2149 else
2150 gen_op_sxth_T0();
2151 if (y)
2152 gen_op_sarl_T1_im(16);
2153 else
2154 gen_op_sxth_T1();
2155 gen_op_mul_T0_T1();
2158 /* Return the mask of PSR bits set by a MSR instruction. */
2159 static uint32_t msr_mask(DisasContext *s, int flags, int spsr) {
2160 uint32_t mask;
2162 mask = 0;
2163 if (flags & (1 << 0))
2164 mask |= 0xff;
2165 if (flags & (1 << 1))
2166 mask |= 0xff00;
2167 if (flags & (1 << 2))
2168 mask |= 0xff0000;
2169 if (flags & (1 << 3))
2170 mask |= 0xff000000;
2171 /* Mask out undefined bits. */
2172 mask &= 0xf90f03ff;
2173 /* Mask out state bits. */
2174 if (!spsr)
2175 mask &= ~0x01000020;
2176 /* Mask out privileged bits. */
2177 if (IS_USER(s))
2178 mask &= 0xf80f0200;
2179 return mask;
2182 /* Returns nonzero if access to the PSR is not permitted. */
2183 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
2185 if (spsr) {
2186 /* ??? This is also undefined in system mode. */
2187 if (IS_USER(s))
2188 return 1;
2189 gen_op_movl_spsr_T0(mask);
2190 } else {
2191 gen_op_movl_cpsr_T0(mask);
2193 gen_lookup_tb(s);
2194 return 0;
2197 static void gen_exception_return(DisasContext *s)
2199 gen_op_movl_reg_TN[0][15]();
2200 gen_op_movl_T0_spsr();
2201 gen_op_movl_cpsr_T0(0xffffffff);
2202 s->is_jmp = DISAS_UPDATE;
2205 static void disas_arm_insn(CPUState * env, DisasContext *s)
2207 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
2209 insn = ldl_code(s->pc);
2210 s->pc += 4;
2212 cond = insn >> 28;
2213 if (cond == 0xf){
2214 /* Unconditional instructions. */
2215 if ((insn & 0x0d70f000) == 0x0550f000)
2216 return; /* PLD */
2217 else if ((insn & 0x0e000000) == 0x0a000000) {
2218 /* branch link and change to thumb (blx <offset>) */
2219 int32_t offset;
2221 val = (uint32_t)s->pc;
2222 gen_op_movl_T0_im(val);
2223 gen_movl_reg_T0(s, 14);
2224 /* Sign-extend the 24-bit offset */
2225 offset = (((int32_t)insn) << 8) >> 8;
2226 /* offset * 4 + bit24 * 2 + (thumb bit) */
2227 val += (offset << 2) | ((insn >> 23) & 2) | 1;
2228 /* pipeline offset */
2229 val += 4;
2230 gen_op_movl_T0_im(val);
2231 gen_bx(s);
2232 return;
2233 } else if ((insn & 0x0e000f00) == 0x0c000100) {
2234 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2235 /* iWMMXt register transfer. */
2236 if (env->cp15.c15_cpar & (1 << 1))
2237 if (!disas_iwmmxt_insn(env, s, insn))
2238 return;
2240 } else if ((insn & 0x0fe00000) == 0x0c400000) {
2241 /* Coprocessor double register transfer. */
2242 } else if ((insn & 0x0f000010) == 0x0e000010) {
2243 /* Additional coprocessor register transfer. */
2244 } else if ((insn & 0x0ff10010) == 0x01000000) {
2245 /* cps (privileged) */
2246 } else if ((insn & 0x0ffffdff) == 0x01010000) {
2247 /* setend */
2248 if (insn & (1 << 9)) {
2249 /* BE8 mode not implemented. */
2250 goto illegal_op;
2252 return;
2254 goto illegal_op;
2256 if (cond != 0xe) {
2257 /* if not always execute, we generate a conditional jump to
2258 next instruction */
2259 s->condlabel = gen_new_label();
2260 gen_test_cc[cond ^ 1](s->condlabel);
2261 s->condjmp = 1;
2262 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2263 //s->is_jmp = DISAS_JUMP_NEXT;
2265 if ((insn & 0x0f900000) == 0x03000000) {
2266 if ((insn & 0x0fb0f000) != 0x0320f000)
2267 goto illegal_op;
2268 /* CPSR = immediate */
2269 val = insn & 0xff;
2270 shift = ((insn >> 8) & 0xf) * 2;
2271 if (shift)
2272 val = (val >> shift) | (val << (32 - shift));
2273 gen_op_movl_T0_im(val);
2274 i = ((insn & (1 << 22)) != 0);
2275 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
2276 goto illegal_op;
2277 } else if ((insn & 0x0f900000) == 0x01000000
2278 && (insn & 0x00000090) != 0x00000090) {
2279 /* miscellaneous instructions */
2280 op1 = (insn >> 21) & 3;
2281 sh = (insn >> 4) & 0xf;
2282 rm = insn & 0xf;
2283 switch (sh) {
2284 case 0x0: /* move program status register */
2285 if (op1 & 1) {
2286 /* PSR = reg */
2287 gen_movl_T0_reg(s, rm);
2288 i = ((op1 & 2) != 0);
2289 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
2290 goto illegal_op;
2291 } else {
2292 /* reg = PSR */
2293 rd = (insn >> 12) & 0xf;
2294 if (op1 & 2) {
2295 if (IS_USER(s))
2296 goto illegal_op;
2297 gen_op_movl_T0_spsr();
2298 } else {
2299 gen_op_movl_T0_cpsr();
2301 gen_movl_reg_T0(s, rd);
2303 break;
2304 case 0x1:
2305 if (op1 == 1) {
2306 /* branch/exchange thumb (bx). */
2307 gen_movl_T0_reg(s, rm);
2308 gen_bx(s);
2309 } else if (op1 == 3) {
2310 /* clz */
2311 rd = (insn >> 12) & 0xf;
2312 gen_movl_T0_reg(s, rm);
2313 gen_op_clz_T0();
2314 gen_movl_reg_T0(s, rd);
2315 } else {
2316 goto illegal_op;
2318 break;
2319 case 0x2:
2320 if (op1 == 1) {
2321 ARCH(5J); /* bxj */
2322 /* Trivial implementation equivalent to bx. */
2323 gen_movl_T0_reg(s, rm);
2324 gen_bx(s);
2325 } else {
2326 goto illegal_op;
2328 break;
2329 case 0x3:
2330 if (op1 != 1)
2331 goto illegal_op;
2333 /* branch link/exchange thumb (blx) */
2334 val = (uint32_t)s->pc;
2335 gen_op_movl_T1_im(val);
2336 gen_movl_T0_reg(s, rm);
2337 gen_movl_reg_T1(s, 14);
2338 gen_bx(s);
2339 break;
2340 case 0x5: /* saturating add/subtract */
2341 rd = (insn >> 12) & 0xf;
2342 rn = (insn >> 16) & 0xf;
2343 gen_movl_T0_reg(s, rm);
2344 gen_movl_T1_reg(s, rn);
2345 if (op1 & 2)
2346 gen_op_double_T1_saturate();
2347 if (op1 & 1)
2348 gen_op_subl_T0_T1_saturate();
2349 else
2350 gen_op_addl_T0_T1_saturate();
2351 gen_movl_reg_T0(s, rd);
2352 break;
2353 case 7: /* bkpt */
2354 gen_op_movl_T0_im((long)s->pc - 4);
2355 gen_op_movl_reg_TN[0][15]();
2356 gen_op_bkpt();
2357 s->is_jmp = DISAS_JUMP;
2358 break;
2359 case 0x8: /* signed multiply */
2360 case 0xa:
2361 case 0xc:
2362 case 0xe:
2363 rs = (insn >> 8) & 0xf;
2364 rn = (insn >> 12) & 0xf;
2365 rd = (insn >> 16) & 0xf;
2366 if (op1 == 1) {
2367 /* (32 * 16) >> 16 */
2368 gen_movl_T0_reg(s, rm);
2369 gen_movl_T1_reg(s, rs);
2370 if (sh & 4)
2371 gen_op_sarl_T1_im(16);
2372 else
2373 gen_op_sxth_T1();
2374 gen_op_imulw_T0_T1();
2375 if ((sh & 2) == 0) {
2376 gen_movl_T1_reg(s, rn);
2377 gen_op_addl_T0_T1_setq();
2379 gen_movl_reg_T0(s, rd);
2380 } else {
2381 /* 16 * 16 */
2382 gen_movl_T0_reg(s, rm);
2383 gen_movl_T1_reg(s, rs);
2384 gen_mulxy(sh & 2, sh & 4);
2385 if (op1 == 2) {
2386 gen_op_signbit_T1_T0();
2387 gen_op_addq_T0_T1(rn, rd);
2388 gen_movl_reg_T0(s, rn);
2389 gen_movl_reg_T1(s, rd);
2390 } else {
2391 if (op1 == 0) {
2392 gen_movl_T1_reg(s, rn);
2393 gen_op_addl_T0_T1_setq();
2395 gen_movl_reg_T0(s, rd);
2398 break;
2399 default:
2400 goto illegal_op;
2402 } else if (((insn & 0x0e000000) == 0 &&
2403 (insn & 0x00000090) != 0x90) ||
2404 ((insn & 0x0e000000) == (1 << 25))) {
2405 int set_cc, logic_cc, shiftop;
2407 op1 = (insn >> 21) & 0xf;
2408 set_cc = (insn >> 20) & 1;
2409 logic_cc = table_logic_cc[op1] & set_cc;
2411 /* data processing instruction */
2412 if (insn & (1 << 25)) {
2413 /* immediate operand */
2414 val = insn & 0xff;
2415 shift = ((insn >> 8) & 0xf) * 2;
2416 if (shift)
2417 val = (val >> shift) | (val << (32 - shift));
2418 gen_op_movl_T1_im(val);
2419 if (logic_cc && shift)
2420 gen_op_mov_CF_T1();
2421 } else {
2422 /* register */
2423 rm = (insn) & 0xf;
2424 gen_movl_T1_reg(s, rm);
2425 shiftop = (insn >> 5) & 3;
2426 if (!(insn & (1 << 4))) {
2427 shift = (insn >> 7) & 0x1f;
2428 if (shift != 0) {
2429 if (logic_cc) {
2430 gen_shift_T1_im_cc[shiftop](shift);
2431 } else {
2432 gen_shift_T1_im[shiftop](shift);
2434 } else if (shiftop != 0) {
2435 if (logic_cc) {
2436 gen_shift_T1_0_cc[shiftop]();
2437 } else {
2438 gen_shift_T1_0[shiftop]();
2441 } else {
2442 rs = (insn >> 8) & 0xf;
2443 gen_movl_T0_reg(s, rs);
2444 if (logic_cc) {
2445 gen_shift_T1_T0_cc[shiftop]();
2446 } else {
2447 gen_shift_T1_T0[shiftop]();
2451 if (op1 != 0x0f && op1 != 0x0d) {
2452 rn = (insn >> 16) & 0xf;
2453 gen_movl_T0_reg(s, rn);
2455 rd = (insn >> 12) & 0xf;
2456 switch(op1) {
2457 case 0x00:
2458 gen_op_andl_T0_T1();
2459 gen_movl_reg_T0(s, rd);
2460 if (logic_cc)
2461 gen_op_logic_T0_cc();
2462 break;
2463 case 0x01:
2464 gen_op_xorl_T0_T1();
2465 gen_movl_reg_T0(s, rd);
2466 if (logic_cc)
2467 gen_op_logic_T0_cc();
2468 break;
2469 case 0x02:
2470 if (set_cc && rd == 15) {
2471 /* SUBS r15, ... is used for exception return. */
2472 if (IS_USER(s))
2473 goto illegal_op;
2474 gen_op_subl_T0_T1_cc();
2475 gen_exception_return(s);
2476 } else {
2477 if (set_cc)
2478 gen_op_subl_T0_T1_cc();
2479 else
2480 gen_op_subl_T0_T1();
2481 gen_movl_reg_T0(s, rd);
2483 break;
2484 case 0x03:
2485 if (set_cc)
2486 gen_op_rsbl_T0_T1_cc();
2487 else
2488 gen_op_rsbl_T0_T1();
2489 gen_movl_reg_T0(s, rd);
2490 break;
2491 case 0x04:
2492 if (set_cc)
2493 gen_op_addl_T0_T1_cc();
2494 else
2495 gen_op_addl_T0_T1();
2496 gen_movl_reg_T0(s, rd);
2497 break;
2498 case 0x05:
2499 if (set_cc)
2500 gen_op_adcl_T0_T1_cc();
2501 else
2502 gen_op_adcl_T0_T1();
2503 gen_movl_reg_T0(s, rd);
2504 break;
2505 case 0x06:
2506 if (set_cc)
2507 gen_op_sbcl_T0_T1_cc();
2508 else
2509 gen_op_sbcl_T0_T1();
2510 gen_movl_reg_T0(s, rd);
2511 break;
2512 case 0x07:
2513 if (set_cc)
2514 gen_op_rscl_T0_T1_cc();
2515 else
2516 gen_op_rscl_T0_T1();
2517 gen_movl_reg_T0(s, rd);
2518 break;
2519 case 0x08:
2520 if (set_cc) {
2521 gen_op_andl_T0_T1();
2522 gen_op_logic_T0_cc();
2524 break;
2525 case 0x09:
2526 if (set_cc) {
2527 gen_op_xorl_T0_T1();
2528 gen_op_logic_T0_cc();
2530 break;
2531 case 0x0a:
2532 if (set_cc) {
2533 gen_op_subl_T0_T1_cc();
2535 break;
2536 case 0x0b:
2537 if (set_cc) {
2538 gen_op_addl_T0_T1_cc();
2540 break;
2541 case 0x0c:
2542 gen_op_orl_T0_T1();
2543 gen_movl_reg_T0(s, rd);
2544 if (logic_cc)
2545 gen_op_logic_T0_cc();
2546 break;
2547 case 0x0d:
2548 if (logic_cc && rd == 15) {
2549 /* MOVS r15, ... is used for exception return. */
2550 if (IS_USER(s))
2551 goto illegal_op;
2552 gen_op_movl_T0_T1();
2553 gen_exception_return(s);
2554 } else {
2555 gen_movl_reg_T1(s, rd);
2556 if (logic_cc)
2557 gen_op_logic_T1_cc();
2559 break;
2560 case 0x0e:
2561 gen_op_bicl_T0_T1();
2562 gen_movl_reg_T0(s, rd);
2563 if (logic_cc)
2564 gen_op_logic_T0_cc();
2565 break;
2566 default:
2567 case 0x0f:
2568 gen_op_notl_T1();
2569 gen_movl_reg_T1(s, rd);
2570 if (logic_cc)
2571 gen_op_logic_T1_cc();
2572 break;
2574 } else {
2575 /* other instructions */
2576 op1 = (insn >> 24) & 0xf;
2577 switch(op1) {
2578 case 0x0:
2579 case 0x1:
2580 /* multiplies, extra load/stores */
2581 sh = (insn >> 5) & 3;
2582 if (sh == 0) {
2583 if (op1 == 0x0) {
2584 rd = (insn >> 16) & 0xf;
2585 rn = (insn >> 12) & 0xf;
2586 rs = (insn >> 8) & 0xf;
2587 rm = (insn) & 0xf;
2588 if (((insn >> 22) & 3) == 0) {
2589 /* 32 bit mul */
2590 gen_movl_T0_reg(s, rs);
2591 gen_movl_T1_reg(s, rm);
2592 gen_op_mul_T0_T1();
2593 if (insn & (1 << 21)) {
2594 gen_movl_T1_reg(s, rn);
2595 gen_op_addl_T0_T1();
2597 if (insn & (1 << 20))
2598 gen_op_logic_T0_cc();
2599 gen_movl_reg_T0(s, rd);
2600 } else {
2601 /* 64 bit mul */
2602 gen_movl_T0_reg(s, rs);
2603 gen_movl_T1_reg(s, rm);
2604 if (insn & (1 << 22))
2605 gen_op_imull_T0_T1();
2606 else
2607 gen_op_mull_T0_T1();
2608 if (insn & (1 << 21)) /* mult accumulate */
2609 gen_op_addq_T0_T1(rn, rd);
2610 if (!(insn & (1 << 23))) { /* double accumulate */
2611 ARCH(6);
2612 gen_op_addq_lo_T0_T1(rn);
2613 gen_op_addq_lo_T0_T1(rd);
2615 if (insn & (1 << 20))
2616 gen_op_logicq_cc();
2617 gen_movl_reg_T0(s, rn);
2618 gen_movl_reg_T1(s, rd);
2620 } else {
2621 rn = (insn >> 16) & 0xf;
2622 rd = (insn >> 12) & 0xf;
2623 if (insn & (1 << 23)) {
2624 /* load/store exclusive */
2625 goto illegal_op;
2626 } else {
2627 /* SWP instruction */
2628 rm = (insn) & 0xf;
2630 gen_movl_T0_reg(s, rm);
2631 gen_movl_T1_reg(s, rn);
2632 if (insn & (1 << 22)) {
2633 gen_ldst(swpb, s);
2634 } else {
2635 gen_ldst(swpl, s);
2637 gen_movl_reg_T0(s, rd);
2640 } else {
2641 int address_offset;
2642 int load;
2643 /* Misc load/store */
2644 rn = (insn >> 16) & 0xf;
2645 rd = (insn >> 12) & 0xf;
2646 gen_movl_T1_reg(s, rn);
2647 if (insn & (1 << 24))
2648 gen_add_datah_offset(s, insn, 0);
2649 address_offset = 0;
2650 if (insn & (1 << 20)) {
2651 /* load */
2652 switch(sh) {
2653 case 1:
2654 gen_ldst(lduw, s);
2655 break;
2656 case 2:
2657 gen_ldst(ldsb, s);
2658 break;
2659 default:
2660 case 3:
2661 gen_ldst(ldsw, s);
2662 break;
2664 load = 1;
2665 } else if (sh & 2) {
2666 /* doubleword */
2667 if (sh & 1) {
2668 /* store */
2669 gen_movl_T0_reg(s, rd);
2670 gen_ldst(stl, s);
2671 gen_op_addl_T1_im(4);
2672 gen_movl_T0_reg(s, rd + 1);
2673 gen_ldst(stl, s);
2674 load = 0;
2675 } else {
2676 /* load */
2677 gen_ldst(ldl, s);
2678 gen_movl_reg_T0(s, rd);
2679 gen_op_addl_T1_im(4);
2680 gen_ldst(ldl, s);
2681 rd++;
2682 load = 1;
2684 address_offset = -4;
2685 } else {
2686 /* store */
2687 gen_movl_T0_reg(s, rd);
2688 gen_ldst(stw, s);
2689 load = 0;
2691 /* Perform base writeback before the loaded value to
2692 ensure correct behavior with overlapping index registers.
2693 ldrd with base writeback is is undefined if the
2694 destination and index registers overlap. */
2695 if (!(insn & (1 << 24))) {
2696 gen_add_datah_offset(s, insn, address_offset);
2697 gen_movl_reg_T1(s, rn);
2698 } else if (insn & (1 << 21)) {
2699 if (address_offset)
2700 gen_op_addl_T1_im(address_offset);
2701 gen_movl_reg_T1(s, rn);
2703 if (load) {
2704 /* Complete the load. */
2705 gen_movl_reg_T0(s, rd);
2708 break;
2709 case 0x4:
2710 case 0x5:
2711 case 0x6:
2712 case 0x7:
2713 /* Check for undefined extension instructions
2714 * per the ARM Bible IE:
2715 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
2717 sh = (0xf << 20) | (0xf << 4);
2718 if (op1 == 0x7 && ((insn & sh) == sh))
2720 goto illegal_op;
2722 /* load/store byte/word */
2723 rn = (insn >> 16) & 0xf;
2724 rd = (insn >> 12) & 0xf;
2725 gen_movl_T1_reg(s, rn);
2726 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
2727 if (insn & (1 << 24))
2728 gen_add_data_offset(s, insn);
2729 if (insn & (1 << 20)) {
2730 /* load */
2731 s->is_mem = 1;
2732 #if defined(CONFIG_USER_ONLY)
2733 if (insn & (1 << 22))
2734 gen_op_ldub_raw();
2735 else
2736 gen_op_ldl_raw();
2737 #else
2738 if (insn & (1 << 22)) {
2739 if (i)
2740 gen_op_ldub_user();
2741 else
2742 gen_op_ldub_kernel();
2743 } else {
2744 if (i)
2745 gen_op_ldl_user();
2746 else
2747 gen_op_ldl_kernel();
2749 #endif
2750 } else {
2751 /* store */
2752 gen_movl_T0_reg(s, rd);
2753 #if defined(CONFIG_USER_ONLY)
2754 if (insn & (1 << 22))
2755 gen_op_stb_raw();
2756 else
2757 gen_op_stl_raw();
2758 #else
2759 if (insn & (1 << 22)) {
2760 if (i)
2761 gen_op_stb_user();
2762 else
2763 gen_op_stb_kernel();
2764 } else {
2765 if (i)
2766 gen_op_stl_user();
2767 else
2768 gen_op_stl_kernel();
2770 #endif
2772 if (!(insn & (1 << 24))) {
2773 gen_add_data_offset(s, insn);
2774 gen_movl_reg_T1(s, rn);
2775 } else if (insn & (1 << 21))
2776 gen_movl_reg_T1(s, rn); {
2778 if (insn & (1 << 20)) {
2779 /* Complete the load. */
2780 if (rd == 15)
2781 gen_bx(s);
2782 else
2783 gen_movl_reg_T0(s, rd);
2785 break;
2786 case 0x08:
2787 case 0x09:
2789 int j, n, user, loaded_base;
2790 /* load/store multiple words */
2791 /* XXX: store correct base if write back */
2792 user = 0;
2793 if (insn & (1 << 22)) {
2794 if (IS_USER(s))
2795 goto illegal_op; /* only usable in supervisor mode */
2797 if ((insn & (1 << 15)) == 0)
2798 user = 1;
2800 rn = (insn >> 16) & 0xf;
2801 gen_movl_T1_reg(s, rn);
2803 /* compute total size */
2804 loaded_base = 0;
2805 n = 0;
2806 for(i=0;i<16;i++) {
2807 if (insn & (1 << i))
2808 n++;
2810 /* XXX: test invalid n == 0 case ? */
2811 if (insn & (1 << 23)) {
2812 if (insn & (1 << 24)) {
2813 /* pre increment */
2814 gen_op_addl_T1_im(4);
2815 } else {
2816 /* post increment */
2818 } else {
2819 if (insn & (1 << 24)) {
2820 /* pre decrement */
2821 gen_op_addl_T1_im(-(n * 4));
2822 } else {
2823 /* post decrement */
2824 if (n != 1)
2825 gen_op_addl_T1_im(-((n - 1) * 4));
2828 j = 0;
2829 for(i=0;i<16;i++) {
2830 if (insn & (1 << i)) {
2831 if (insn & (1 << 20)) {
2832 /* load */
2833 gen_ldst(ldl, s);
2834 if (i == 15) {
2835 gen_bx(s);
2836 } else if (user) {
2837 gen_op_movl_user_T0(i);
2838 } else if (i == rn) {
2839 gen_op_movl_T2_T0();
2840 loaded_base = 1;
2841 } else {
2842 gen_movl_reg_T0(s, i);
2844 } else {
2845 /* store */
2846 if (i == 15) {
2847 /* special case: r15 = PC + 8 */
2848 val = (long)s->pc + 4;
2849 gen_op_movl_TN_im[0](val);
2850 } else if (user) {
2851 gen_op_movl_T0_user(i);
2852 } else {
2853 gen_movl_T0_reg(s, i);
2855 gen_ldst(stl, s);
2857 j++;
2858 /* no need to add after the last transfer */
2859 if (j != n)
2860 gen_op_addl_T1_im(4);
2863 if (insn & (1 << 21)) {
2864 /* write back */
2865 if (insn & (1 << 23)) {
2866 if (insn & (1 << 24)) {
2867 /* pre increment */
2868 } else {
2869 /* post increment */
2870 gen_op_addl_T1_im(4);
2872 } else {
2873 if (insn & (1 << 24)) {
2874 /* pre decrement */
2875 if (n != 1)
2876 gen_op_addl_T1_im(-((n - 1) * 4));
2877 } else {
2878 /* post decrement */
2879 gen_op_addl_T1_im(-(n * 4));
2882 gen_movl_reg_T1(s, rn);
2884 if (loaded_base) {
2885 gen_op_movl_T0_T2();
2886 gen_movl_reg_T0(s, rn);
2888 if ((insn & (1 << 22)) && !user) {
2889 /* Restore CPSR from SPSR. */
2890 gen_op_movl_T0_spsr();
2891 gen_op_movl_cpsr_T0(0xffffffff);
2892 s->is_jmp = DISAS_UPDATE;
2895 break;
2896 case 0xa:
2897 case 0xb:
2899 int32_t offset;
2901 /* branch (and link) */
2902 val = (int32_t)s->pc;
2903 if (insn & (1 << 24)) {
2904 gen_op_movl_T0_im(val);
2905 gen_op_movl_reg_TN[0][14]();
2907 offset = (((int32_t)insn << 8) >> 8);
2908 val += (offset << 2) + 4;
2909 gen_jmp(s, val);
2911 break;
2912 case 0xc:
2913 case 0xd:
2914 case 0xe:
2915 /* Coprocessor. */
2916 op1 = (insn >> 8) & 0xf;
2917 if (arm_feature(env, ARM_FEATURE_XSCALE) &&
2918 ((env->cp15.c15_cpar ^ 0x3fff) & (1 << op1)))
2919 goto illegal_op;
2920 switch (op1) {
2921 case 0 ... 1:
2922 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2923 if (disas_iwmmxt_insn(env, s, insn))
2924 goto illegal_op;
2925 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2926 if (disas_dsp_insn(env, s, insn))
2927 goto illegal_op;
2928 } else
2929 goto illegal_op;
2930 break;
2931 case 2 ... 9:
2932 case 12 ... 14:
2933 if (disas_cp_insn (env, s, insn))
2934 goto illegal_op;
2935 break;
2936 case 10:
2937 case 11:
2938 if (disas_vfp_insn (env, s, insn))
2939 goto illegal_op;
2940 break;
2941 case 15:
2942 if (disas_cp15_insn (env, s, insn))
2943 goto illegal_op;
2944 break;
2945 default:
2946 /* unknown coprocessor. */
2947 goto illegal_op;
2949 break;
2950 case 0xf:
2951 /* swi */
2952 gen_op_movl_T0_im((long)s->pc);
2953 gen_op_movl_reg_TN[0][15]();
2954 gen_op_swi();
2955 s->is_jmp = DISAS_JUMP;
2956 break;
2957 default:
2958 illegal_op:
2959 gen_op_movl_T0_im((long)s->pc - 4);
2960 gen_op_movl_reg_TN[0][15]();
2961 gen_op_undef_insn();
2962 s->is_jmp = DISAS_JUMP;
2963 break;
2968 static void disas_thumb_insn(DisasContext *s)
2970 uint32_t val, insn, op, rm, rn, rd, shift, cond;
2971 int32_t offset;
2972 int i;
2974 insn = lduw_code(s->pc);
2975 s->pc += 2;
2977 switch (insn >> 12) {
2978 case 0: case 1:
2979 rd = insn & 7;
2980 op = (insn >> 11) & 3;
2981 if (op == 3) {
2982 /* add/subtract */
2983 rn = (insn >> 3) & 7;
2984 gen_movl_T0_reg(s, rn);
2985 if (insn & (1 << 10)) {
2986 /* immediate */
2987 gen_op_movl_T1_im((insn >> 6) & 7);
2988 } else {
2989 /* reg */
2990 rm = (insn >> 6) & 7;
2991 gen_movl_T1_reg(s, rm);
2993 if (insn & (1 << 9))
2994 gen_op_subl_T0_T1_cc();
2995 else
2996 gen_op_addl_T0_T1_cc();
2997 gen_movl_reg_T0(s, rd);
2998 } else {
2999 /* shift immediate */
3000 rm = (insn >> 3) & 7;
3001 shift = (insn >> 6) & 0x1f;
3002 gen_movl_T0_reg(s, rm);
3003 gen_shift_T0_im_thumb[op](shift);
3004 gen_movl_reg_T0(s, rd);
3006 break;
3007 case 2: case 3:
3008 /* arithmetic large immediate */
3009 op = (insn >> 11) & 3;
3010 rd = (insn >> 8) & 0x7;
3011 if (op == 0) {
3012 gen_op_movl_T0_im(insn & 0xff);
3013 } else {
3014 gen_movl_T0_reg(s, rd);
3015 gen_op_movl_T1_im(insn & 0xff);
3017 switch (op) {
3018 case 0: /* mov */
3019 gen_op_logic_T0_cc();
3020 break;
3021 case 1: /* cmp */
3022 gen_op_subl_T0_T1_cc();
3023 break;
3024 case 2: /* add */
3025 gen_op_addl_T0_T1_cc();
3026 break;
3027 case 3: /* sub */
3028 gen_op_subl_T0_T1_cc();
3029 break;
3031 if (op != 1)
3032 gen_movl_reg_T0(s, rd);
3033 break;
3034 case 4:
3035 if (insn & (1 << 11)) {
3036 rd = (insn >> 8) & 7;
3037 /* load pc-relative. Bit 1 of PC is ignored. */
3038 val = s->pc + 2 + ((insn & 0xff) * 4);
3039 val &= ~(uint32_t)2;
3040 gen_op_movl_T1_im(val);
3041 gen_ldst(ldl, s);
3042 gen_movl_reg_T0(s, rd);
3043 break;
3045 if (insn & (1 << 10)) {
3046 /* data processing extended or blx */
3047 rd = (insn & 7) | ((insn >> 4) & 8);
3048 rm = (insn >> 3) & 0xf;
3049 op = (insn >> 8) & 3;
3050 switch (op) {
3051 case 0: /* add */
3052 gen_movl_T0_reg(s, rd);
3053 gen_movl_T1_reg(s, rm);
3054 gen_op_addl_T0_T1();
3055 gen_movl_reg_T0(s, rd);
3056 break;
3057 case 1: /* cmp */
3058 gen_movl_T0_reg(s, rd);
3059 gen_movl_T1_reg(s, rm);
3060 gen_op_subl_T0_T1_cc();
3061 break;
3062 case 2: /* mov/cpy */
3063 gen_movl_T0_reg(s, rm);
3064 gen_movl_reg_T0(s, rd);
3065 break;
3066 case 3:/* branch [and link] exchange thumb register */
3067 if (insn & (1 << 7)) {
3068 val = (uint32_t)s->pc | 1;
3069 gen_op_movl_T1_im(val);
3070 gen_movl_reg_T1(s, 14);
3072 gen_movl_T0_reg(s, rm);
3073 gen_bx(s);
3074 break;
3076 break;
3079 /* data processing register */
3080 rd = insn & 7;
3081 rm = (insn >> 3) & 7;
3082 op = (insn >> 6) & 0xf;
3083 if (op == 2 || op == 3 || op == 4 || op == 7) {
3084 /* the shift/rotate ops want the operands backwards */
3085 val = rm;
3086 rm = rd;
3087 rd = val;
3088 val = 1;
3089 } else {
3090 val = 0;
3093 if (op == 9) /* neg */
3094 gen_op_movl_T0_im(0);
3095 else if (op != 0xf) /* mvn doesn't read its first operand */
3096 gen_movl_T0_reg(s, rd);
3098 gen_movl_T1_reg(s, rm);
3099 switch (op) {
3100 case 0x0: /* and */
3101 gen_op_andl_T0_T1();
3102 gen_op_logic_T0_cc();
3103 break;
3104 case 0x1: /* eor */
3105 gen_op_xorl_T0_T1();
3106 gen_op_logic_T0_cc();
3107 break;
3108 case 0x2: /* lsl */
3109 gen_op_shll_T1_T0_cc();
3110 gen_op_logic_T1_cc();
3111 break;
3112 case 0x3: /* lsr */
3113 gen_op_shrl_T1_T0_cc();
3114 gen_op_logic_T1_cc();
3115 break;
3116 case 0x4: /* asr */
3117 gen_op_sarl_T1_T0_cc();
3118 gen_op_logic_T1_cc();
3119 break;
3120 case 0x5: /* adc */
3121 gen_op_adcl_T0_T1_cc();
3122 break;
3123 case 0x6: /* sbc */
3124 gen_op_sbcl_T0_T1_cc();
3125 break;
3126 case 0x7: /* ror */
3127 gen_op_rorl_T1_T0_cc();
3128 gen_op_logic_T1_cc();
3129 break;
3130 case 0x8: /* tst */
3131 gen_op_andl_T0_T1();
3132 gen_op_logic_T0_cc();
3133 rd = 16;
3134 break;
3135 case 0x9: /* neg */
3136 gen_op_subl_T0_T1_cc();
3137 break;
3138 case 0xa: /* cmp */
3139 gen_op_subl_T0_T1_cc();
3140 rd = 16;
3141 break;
3142 case 0xb: /* cmn */
3143 gen_op_addl_T0_T1_cc();
3144 rd = 16;
3145 break;
3146 case 0xc: /* orr */
3147 gen_op_orl_T0_T1();
3148 gen_op_logic_T0_cc();
3149 break;
3150 case 0xd: /* mul */
3151 gen_op_mull_T0_T1();
3152 gen_op_logic_T0_cc();
3153 break;
3154 case 0xe: /* bic */
3155 gen_op_bicl_T0_T1();
3156 gen_op_logic_T0_cc();
3157 break;
3158 case 0xf: /* mvn */
3159 gen_op_notl_T1();
3160 gen_op_logic_T1_cc();
3161 val = 1;
3162 rm = rd;
3163 break;
3165 if (rd != 16) {
3166 if (val)
3167 gen_movl_reg_T1(s, rm);
3168 else
3169 gen_movl_reg_T0(s, rd);
3171 break;
3173 case 5:
3174 /* load/store register offset. */
3175 rd = insn & 7;
3176 rn = (insn >> 3) & 7;
3177 rm = (insn >> 6) & 7;
3178 op = (insn >> 9) & 7;
3179 gen_movl_T1_reg(s, rn);
3180 gen_movl_T2_reg(s, rm);
3181 gen_op_addl_T1_T2();
3183 if (op < 3) /* store */
3184 gen_movl_T0_reg(s, rd);
3186 switch (op) {
3187 case 0: /* str */
3188 gen_ldst(stl, s);
3189 break;
3190 case 1: /* strh */
3191 gen_ldst(stw, s);
3192 break;
3193 case 2: /* strb */
3194 gen_ldst(stb, s);
3195 break;
3196 case 3: /* ldrsb */
3197 gen_ldst(ldsb, s);
3198 break;
3199 case 4: /* ldr */
3200 gen_ldst(ldl, s);
3201 break;
3202 case 5: /* ldrh */
3203 gen_ldst(lduw, s);
3204 break;
3205 case 6: /* ldrb */
3206 gen_ldst(ldub, s);
3207 break;
3208 case 7: /* ldrsh */
3209 gen_ldst(ldsw, s);
3210 break;
3212 if (op >= 3) /* load */
3213 gen_movl_reg_T0(s, rd);
3214 break;
3216 case 6:
3217 /* load/store word immediate offset */
3218 rd = insn & 7;
3219 rn = (insn >> 3) & 7;
3220 gen_movl_T1_reg(s, rn);
3221 val = (insn >> 4) & 0x7c;
3222 gen_op_movl_T2_im(val);
3223 gen_op_addl_T1_T2();
3225 if (insn & (1 << 11)) {
3226 /* load */
3227 gen_ldst(ldl, s);
3228 gen_movl_reg_T0(s, rd);
3229 } else {
3230 /* store */
3231 gen_movl_T0_reg(s, rd);
3232 gen_ldst(stl, s);
3234 break;
3236 case 7:
3237 /* load/store byte immediate offset */
3238 rd = insn & 7;
3239 rn = (insn >> 3) & 7;
3240 gen_movl_T1_reg(s, rn);
3241 val = (insn >> 6) & 0x1f;
3242 gen_op_movl_T2_im(val);
3243 gen_op_addl_T1_T2();
3245 if (insn & (1 << 11)) {
3246 /* load */
3247 gen_ldst(ldub, s);
3248 gen_movl_reg_T0(s, rd);
3249 } else {
3250 /* store */
3251 gen_movl_T0_reg(s, rd);
3252 gen_ldst(stb, s);
3254 break;
3256 case 8:
3257 /* load/store halfword immediate offset */
3258 rd = insn & 7;
3259 rn = (insn >> 3) & 7;
3260 gen_movl_T1_reg(s, rn);
3261 val = (insn >> 5) & 0x3e;
3262 gen_op_movl_T2_im(val);
3263 gen_op_addl_T1_T2();
3265 if (insn & (1 << 11)) {
3266 /* load */
3267 gen_ldst(lduw, s);
3268 gen_movl_reg_T0(s, rd);
3269 } else {
3270 /* store */
3271 gen_movl_T0_reg(s, rd);
3272 gen_ldst(stw, s);
3274 break;
3276 case 9:
3277 /* load/store from stack */
3278 rd = (insn >> 8) & 7;
3279 gen_movl_T1_reg(s, 13);
3280 val = (insn & 0xff) * 4;
3281 gen_op_movl_T2_im(val);
3282 gen_op_addl_T1_T2();
3284 if (insn & (1 << 11)) {
3285 /* load */
3286 gen_ldst(ldl, s);
3287 gen_movl_reg_T0(s, rd);
3288 } else {
3289 /* store */
3290 gen_movl_T0_reg(s, rd);
3291 gen_ldst(stl, s);
3293 break;
3295 case 10:
3296 /* add to high reg */
3297 rd = (insn >> 8) & 7;
3298 if (insn & (1 << 11)) {
3299 /* SP */
3300 gen_movl_T0_reg(s, 13);
3301 } else {
3302 /* PC. bit 1 is ignored. */
3303 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
3305 val = (insn & 0xff) * 4;
3306 gen_op_movl_T1_im(val);
3307 gen_op_addl_T0_T1();
3308 gen_movl_reg_T0(s, rd);
3309 break;
3311 case 11:
3312 /* misc */
3313 op = (insn >> 8) & 0xf;
3314 switch (op) {
3315 case 0:
3316 /* adjust stack pointer */
3317 gen_movl_T1_reg(s, 13);
3318 val = (insn & 0x7f) * 4;
3319 if (insn & (1 << 7))
3320 val = -(int32_t)val;
3321 gen_op_movl_T2_im(val);
3322 gen_op_addl_T1_T2();
3323 gen_movl_reg_T1(s, 13);
3324 break;
3326 case 4: case 5: case 0xc: case 0xd:
3327 /* push/pop */
3328 gen_movl_T1_reg(s, 13);
3329 if (insn & (1 << 8))
3330 offset = 4;
3331 else
3332 offset = 0;
3333 for (i = 0; i < 8; i++) {
3334 if (insn & (1 << i))
3335 offset += 4;
3337 if ((insn & (1 << 11)) == 0) {
3338 gen_op_movl_T2_im(-offset);
3339 gen_op_addl_T1_T2();
3341 gen_op_movl_T2_im(4);
3342 for (i = 0; i < 8; i++) {
3343 if (insn & (1 << i)) {
3344 if (insn & (1 << 11)) {
3345 /* pop */
3346 gen_ldst(ldl, s);
3347 gen_movl_reg_T0(s, i);
3348 } else {
3349 /* push */
3350 gen_movl_T0_reg(s, i);
3351 gen_ldst(stl, s);
3353 /* advance to the next address. */
3354 gen_op_addl_T1_T2();
3357 if (insn & (1 << 8)) {
3358 if (insn & (1 << 11)) {
3359 /* pop pc */
3360 gen_ldst(ldl, s);
3361 /* don't set the pc until the rest of the instruction
3362 has completed */
3363 } else {
3364 /* push lr */
3365 gen_movl_T0_reg(s, 14);
3366 gen_ldst(stl, s);
3368 gen_op_addl_T1_T2();
3370 if ((insn & (1 << 11)) == 0) {
3371 gen_op_movl_T2_im(-offset);
3372 gen_op_addl_T1_T2();
3374 /* write back the new stack pointer */
3375 gen_movl_reg_T1(s, 13);
3376 /* set the new PC value */
3377 if ((insn & 0x0900) == 0x0900)
3378 gen_bx(s);
3379 break;
3381 case 0xe: /* bkpt */
3382 gen_op_movl_T0_im((long)s->pc - 2);
3383 gen_op_movl_reg_TN[0][15]();
3384 gen_op_bkpt();
3385 s->is_jmp = DISAS_JUMP;
3386 break;
3388 default:
3389 goto undef;
3391 break;
3393 case 12:
3394 /* load/store multiple */
3395 rn = (insn >> 8) & 0x7;
3396 gen_movl_T1_reg(s, rn);
3397 gen_op_movl_T2_im(4);
3398 for (i = 0; i < 8; i++) {
3399 if (insn & (1 << i)) {
3400 if (insn & (1 << 11)) {
3401 /* load */
3402 gen_ldst(ldl, s);
3403 gen_movl_reg_T0(s, i);
3404 } else {
3405 /* store */
3406 gen_movl_T0_reg(s, i);
3407 gen_ldst(stl, s);
3409 /* advance to the next address */
3410 gen_op_addl_T1_T2();
3413 /* Base register writeback. */
3414 if ((insn & (1 << rn)) == 0)
3415 gen_movl_reg_T1(s, rn);
3416 break;
3418 case 13:
3419 /* conditional branch or swi */
3420 cond = (insn >> 8) & 0xf;
3421 if (cond == 0xe)
3422 goto undef;
3424 if (cond == 0xf) {
3425 /* swi */
3426 gen_op_movl_T0_im((long)s->pc | 1);
3427 /* Don't set r15. */
3428 gen_op_movl_reg_TN[0][15]();
3429 gen_op_swi();
3430 s->is_jmp = DISAS_JUMP;
3431 break;
3433 /* generate a conditional jump to next instruction */
3434 s->condlabel = gen_new_label();
3435 gen_test_cc[cond ^ 1](s->condlabel);
3436 s->condjmp = 1;
3437 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
3438 //s->is_jmp = DISAS_JUMP_NEXT;
3439 gen_movl_T1_reg(s, 15);
3441 /* jump to the offset */
3442 val = (uint32_t)s->pc + 2;
3443 offset = ((int32_t)insn << 24) >> 24;
3444 val += offset << 1;
3445 gen_jmp(s, val);
3446 break;
3448 case 14:
3449 /* unconditional branch */
3450 if (insn & (1 << 11)) {
3451 /* Second half of blx. */
3452 offset = ((insn & 0x7ff) << 1);
3453 gen_movl_T0_reg(s, 14);
3454 gen_op_movl_T1_im(offset);
3455 gen_op_addl_T0_T1();
3456 gen_op_movl_T1_im(0xfffffffc);
3457 gen_op_andl_T0_T1();
3459 val = (uint32_t)s->pc;
3460 gen_op_movl_T1_im(val | 1);
3461 gen_movl_reg_T1(s, 14);
3462 gen_bx(s);
3463 break;
3465 val = (uint32_t)s->pc;
3466 offset = ((int32_t)insn << 21) >> 21;
3467 val += (offset << 1) + 2;
3468 gen_jmp(s, val);
3469 break;
3471 case 15:
3472 /* branch and link [and switch to arm] */
3473 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
3474 /* Instruction spans a page boundary. Implement it as two
3475 16-bit instructions in case the second half causes an
3476 prefetch abort. */
3477 offset = ((int32_t)insn << 21) >> 9;
3478 val = s->pc + 2 + offset;
3479 gen_op_movl_T0_im(val);
3480 gen_movl_reg_T0(s, 14);
3481 break;
3483 if (insn & (1 << 11)) {
3484 /* Second half of bl. */
3485 offset = ((insn & 0x7ff) << 1) | 1;
3486 gen_movl_T0_reg(s, 14);
3487 gen_op_movl_T1_im(offset);
3488 gen_op_addl_T0_T1();
3490 val = (uint32_t)s->pc;
3491 gen_op_movl_T1_im(val | 1);
3492 gen_movl_reg_T1(s, 14);
3493 gen_bx(s);
3494 break;
3496 offset = ((int32_t)insn << 21) >> 10;
3497 insn = lduw_code(s->pc);
3498 offset |= insn & 0x7ff;
3500 val = (uint32_t)s->pc + 2;
3501 gen_op_movl_T1_im(val | 1);
3502 gen_movl_reg_T1(s, 14);
3504 val += offset << 1;
3505 if (insn & (1 << 12)) {
3506 /* bl */
3507 gen_jmp(s, val);
3508 } else {
3509 /* blx */
3510 val &= ~(uint32_t)2;
3511 gen_op_movl_T0_im(val);
3512 gen_bx(s);
3515 return;
3516 undef:
3517 gen_op_movl_T0_im((long)s->pc - 2);
3518 gen_op_movl_reg_TN[0][15]();
3519 gen_op_undef_insn();
3520 s->is_jmp = DISAS_JUMP;
3523 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
3524 basic block 'tb'. If search_pc is TRUE, also generate PC
3525 information for each intermediate instruction. */
3526 static inline int gen_intermediate_code_internal(CPUState *env,
3527 TranslationBlock *tb,
3528 int search_pc)
3530 DisasContext dc1, *dc = &dc1;
3531 uint16_t *gen_opc_end;
3532 int j, lj;
3533 target_ulong pc_start;
3534 uint32_t next_page_start;
3536 /* generate intermediate code */
3537 pc_start = tb->pc;
3539 dc->tb = tb;
3541 gen_opc_ptr = gen_opc_buf;
3542 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3543 gen_opparam_ptr = gen_opparam_buf;
3545 dc->is_jmp = DISAS_NEXT;
3546 dc->pc = pc_start;
3547 dc->singlestep_enabled = env->singlestep_enabled;
3548 dc->condjmp = 0;
3549 dc->thumb = env->thumb;
3550 dc->is_mem = 0;
3551 #if !defined(CONFIG_USER_ONLY)
3552 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
3553 #endif
3554 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
3555 nb_gen_labels = 0;
3556 lj = -1;
3557 do {
3558 if (env->nb_breakpoints > 0) {
3559 for(j = 0; j < env->nb_breakpoints; j++) {
3560 if (env->breakpoints[j] == dc->pc) {
3561 gen_op_movl_T0_im((long)dc->pc);
3562 gen_op_movl_reg_TN[0][15]();
3563 gen_op_debug();
3564 dc->is_jmp = DISAS_JUMP;
3565 break;
3569 if (search_pc) {
3570 j = gen_opc_ptr - gen_opc_buf;
3571 if (lj < j) {
3572 lj++;
3573 while (lj < j)
3574 gen_opc_instr_start[lj++] = 0;
3576 gen_opc_pc[lj] = dc->pc;
3577 gen_opc_instr_start[lj] = 1;
3580 if (env->thumb)
3581 disas_thumb_insn(dc);
3582 else
3583 disas_arm_insn(env, dc);
3585 if (dc->condjmp && !dc->is_jmp) {
3586 gen_set_label(dc->condlabel);
3587 dc->condjmp = 0;
3589 /* Terminate the TB on memory ops if watchpoints are present. */
3590 /* FIXME: This should be replacd by the deterministic execution
3591 * IRQ raising bits. */
3592 if (dc->is_mem && env->nb_watchpoints)
3593 break;
3595 /* Translation stops when a conditional branch is enoutered.
3596 * Otherwise the subsequent code could get translated several times.
3597 * Also stop translation when a page boundary is reached. This
3598 * ensures prefech aborts occur at the right place. */
3599 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
3600 !env->singlestep_enabled &&
3601 dc->pc < next_page_start);
3602 /* At this stage dc->condjmp will only be set when the skipped
3603 * instruction was a conditional branch, and the PC has already been
3604 * written. */
3605 if (__builtin_expect(env->singlestep_enabled, 0)) {
3606 /* Make sure the pc is updated, and raise a debug exception. */
3607 if (dc->condjmp) {
3608 gen_op_debug();
3609 gen_set_label(dc->condlabel);
3611 if (dc->condjmp || !dc->is_jmp) {
3612 gen_op_movl_T0_im((long)dc->pc);
3613 gen_op_movl_reg_TN[0][15]();
3614 dc->condjmp = 0;
3616 gen_op_debug();
3617 } else {
3618 switch(dc->is_jmp) {
3619 case DISAS_NEXT:
3620 gen_goto_tb(dc, 1, dc->pc);
3621 break;
3622 default:
3623 case DISAS_JUMP:
3624 case DISAS_UPDATE:
3625 /* indicate that the hash table must be used to find the next TB */
3626 gen_op_movl_T0_0();
3627 gen_op_exit_tb();
3628 break;
3629 case DISAS_TB_JUMP:
3630 /* nothing more to generate */
3631 break;
3633 if (dc->condjmp) {
3634 gen_set_label(dc->condlabel);
3635 gen_goto_tb(dc, 1, dc->pc);
3636 dc->condjmp = 0;
3639 *gen_opc_ptr = INDEX_op_end;
3641 #ifdef DEBUG_DISAS
3642 if (loglevel & CPU_LOG_TB_IN_ASM) {
3643 fprintf(logfile, "----------------\n");
3644 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
3645 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
3646 fprintf(logfile, "\n");
3647 if (loglevel & (CPU_LOG_TB_OP)) {
3648 fprintf(logfile, "OP:\n");
3649 dump_ops(gen_opc_buf, gen_opparam_buf);
3650 fprintf(logfile, "\n");
3653 #endif
3654 if (search_pc) {
3655 j = gen_opc_ptr - gen_opc_buf;
3656 lj++;
3657 while (lj <= j)
3658 gen_opc_instr_start[lj++] = 0;
3659 tb->size = 0;
3660 } else {
3661 tb->size = dc->pc - pc_start;
3663 return 0;
3666 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
3668 return gen_intermediate_code_internal(env, tb, 0);
3671 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
3673 return gen_intermediate_code_internal(env, tb, 1);
3676 static const char *cpu_mode_names[16] = {
3677 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
3678 "???", "???", "???", "und", "???", "???", "???", "sys"
3680 void cpu_dump_state(CPUState *env, FILE *f,
3681 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
3682 int flags)
3684 int i;
3685 union {
3686 uint32_t i;
3687 float s;
3688 } s0, s1;
3689 CPU_DoubleU d;
3690 /* ??? This assumes float64 and double have the same layout.
3691 Oh well, it's only debug dumps. */
3692 union {
3693 float64 f64;
3694 double d;
3695 } d0;
3696 uint32_t psr;
3698 for(i=0;i<16;i++) {
3699 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
3700 if ((i % 4) == 3)
3701 cpu_fprintf(f, "\n");
3702 else
3703 cpu_fprintf(f, " ");
3705 psr = cpsr_read(env);
3706 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
3707 psr,
3708 psr & (1 << 31) ? 'N' : '-',
3709 psr & (1 << 30) ? 'Z' : '-',
3710 psr & (1 << 29) ? 'C' : '-',
3711 psr & (1 << 28) ? 'V' : '-',
3712 psr & CPSR_T ? 'T' : 'A',
3713 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
3715 for (i = 0; i < 16; i++) {
3716 d.d = env->vfp.regs[i];
3717 s0.i = d.l.lower;
3718 s1.i = d.l.upper;
3719 d0.f64 = d.d;
3720 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
3721 i * 2, (int)s0.i, s0.s,
3722 i * 2 + 1, (int)s1.i, s1.s,
3723 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
3724 d0.d);
3726 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);