Pci hotplug GPE support
[qemu-kvm/fedora.git] / target-arm / translate.c
blob6de78f8caa5b9f0629b51eaa39c7f12441ffe457
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <stdarg.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <string.h>
26 #include <inttypes.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "disas.h"
31 #include "tcg-op.h"
33 #define ENABLE_ARCH_5J 0
34 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
35 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
36 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
37 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
39 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
41 /* internal defines */
42 typedef struct DisasContext {
43 target_ulong pc;
44 int is_jmp;
45 /* Nonzero if this instruction has been conditionally skipped. */
46 int condjmp;
47 /* The label that will be jumped to when the instruction is skipped. */
48 int condlabel;
49 /* Thumb-2 condtional execution bits. */
50 int condexec_mask;
51 int condexec_cond;
52 struct TranslationBlock *tb;
53 int singlestep_enabled;
54 int thumb;
55 int is_mem;
56 #if !defined(CONFIG_USER_ONLY)
57 int user;
58 #endif
59 } DisasContext;
61 #if defined(CONFIG_USER_ONLY)
62 #define IS_USER(s) 1
63 #else
64 #define IS_USER(s) (s->user)
65 #endif
67 /* These instructions trap after executing, so defer them until after the
68 conditional executions state has been updated. */
69 #define DISAS_WFI 4
70 #define DISAS_SWI 5
72 /* XXX: move that elsewhere */
73 extern FILE *logfile;
74 extern int loglevel;
76 #define PAS_OP(pfx) { \
77 gen_op_ ## pfx ## add16_T0_T1, \
78 gen_op_ ## pfx ## addsubx_T0_T1, \
79 gen_op_ ## pfx ## subaddx_T0_T1, \
80 gen_op_ ## pfx ## sub16_T0_T1, \
81 gen_op_ ## pfx ## add8_T0_T1, \
82 NULL, \
83 NULL, \
84 gen_op_ ## pfx ## sub8_T0_T1 }
86 static GenOpFunc *gen_arm_parallel_addsub[8][8] = {
87 {},
88 PAS_OP(s),
89 PAS_OP(q),
90 PAS_OP(sh),
91 {},
92 PAS_OP(u),
93 PAS_OP(uq),
94 PAS_OP(uh),
96 #undef PAS_OP
98 /* For unknown reasons Arm and Thumb-2 use arbitrarily diffenet encodings. */
99 #define PAS_OP(pfx) { \
100 gen_op_ ## pfx ## add8_T0_T1, \
101 gen_op_ ## pfx ## add16_T0_T1, \
102 gen_op_ ## pfx ## addsubx_T0_T1, \
103 NULL, \
104 gen_op_ ## pfx ## sub8_T0_T1, \
105 gen_op_ ## pfx ## sub16_T0_T1, \
106 gen_op_ ## pfx ## subaddx_T0_T1, \
107 NULL }
109 static GenOpFunc *gen_thumb2_parallel_addsub[8][8] = {
110 PAS_OP(s),
111 PAS_OP(q),
112 PAS_OP(sh),
114 PAS_OP(u),
115 PAS_OP(uq),
116 PAS_OP(uh),
119 #undef PAS_OP
121 static GenOpFunc1 *gen_test_cc[14] = {
122 gen_op_test_eq,
123 gen_op_test_ne,
124 gen_op_test_cs,
125 gen_op_test_cc,
126 gen_op_test_mi,
127 gen_op_test_pl,
128 gen_op_test_vs,
129 gen_op_test_vc,
130 gen_op_test_hi,
131 gen_op_test_ls,
132 gen_op_test_ge,
133 gen_op_test_lt,
134 gen_op_test_gt,
135 gen_op_test_le,
138 const uint8_t table_logic_cc[16] = {
139 1, /* and */
140 1, /* xor */
141 0, /* sub */
142 0, /* rsb */
143 0, /* add */
144 0, /* adc */
145 0, /* sbc */
146 0, /* rsc */
147 1, /* andl */
148 1, /* xorl */
149 0, /* cmp */
150 0, /* cmn */
151 1, /* orr */
152 1, /* mov */
153 1, /* bic */
154 1, /* mvn */
157 static GenOpFunc1 *gen_shift_T1_im[4] = {
158 gen_op_shll_T1_im,
159 gen_op_shrl_T1_im,
160 gen_op_sarl_T1_im,
161 gen_op_rorl_T1_im,
164 static GenOpFunc *gen_shift_T1_0[4] = {
165 NULL,
166 gen_op_shrl_T1_0,
167 gen_op_sarl_T1_0,
168 gen_op_rrxl_T1,
171 static GenOpFunc1 *gen_shift_T2_im[4] = {
172 gen_op_shll_T2_im,
173 gen_op_shrl_T2_im,
174 gen_op_sarl_T2_im,
175 gen_op_rorl_T2_im,
178 static GenOpFunc *gen_shift_T2_0[4] = {
179 NULL,
180 gen_op_shrl_T2_0,
181 gen_op_sarl_T2_0,
182 gen_op_rrxl_T2,
185 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
186 gen_op_shll_T1_im_cc,
187 gen_op_shrl_T1_im_cc,
188 gen_op_sarl_T1_im_cc,
189 gen_op_rorl_T1_im_cc,
192 static GenOpFunc *gen_shift_T1_0_cc[4] = {
193 NULL,
194 gen_op_shrl_T1_0_cc,
195 gen_op_sarl_T1_0_cc,
196 gen_op_rrxl_T1_cc,
199 static GenOpFunc *gen_shift_T1_T0[4] = {
200 gen_op_shll_T1_T0,
201 gen_op_shrl_T1_T0,
202 gen_op_sarl_T1_T0,
203 gen_op_rorl_T1_T0,
206 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
207 gen_op_shll_T1_T0_cc,
208 gen_op_shrl_T1_T0_cc,
209 gen_op_sarl_T1_T0_cc,
210 gen_op_rorl_T1_T0_cc,
213 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
215 gen_op_movl_T0_r0,
216 gen_op_movl_T0_r1,
217 gen_op_movl_T0_r2,
218 gen_op_movl_T0_r3,
219 gen_op_movl_T0_r4,
220 gen_op_movl_T0_r5,
221 gen_op_movl_T0_r6,
222 gen_op_movl_T0_r7,
223 gen_op_movl_T0_r8,
224 gen_op_movl_T0_r9,
225 gen_op_movl_T0_r10,
226 gen_op_movl_T0_r11,
227 gen_op_movl_T0_r12,
228 gen_op_movl_T0_r13,
229 gen_op_movl_T0_r14,
230 gen_op_movl_T0_r15,
233 gen_op_movl_T1_r0,
234 gen_op_movl_T1_r1,
235 gen_op_movl_T1_r2,
236 gen_op_movl_T1_r3,
237 gen_op_movl_T1_r4,
238 gen_op_movl_T1_r5,
239 gen_op_movl_T1_r6,
240 gen_op_movl_T1_r7,
241 gen_op_movl_T1_r8,
242 gen_op_movl_T1_r9,
243 gen_op_movl_T1_r10,
244 gen_op_movl_T1_r11,
245 gen_op_movl_T1_r12,
246 gen_op_movl_T1_r13,
247 gen_op_movl_T1_r14,
248 gen_op_movl_T1_r15,
251 gen_op_movl_T2_r0,
252 gen_op_movl_T2_r1,
253 gen_op_movl_T2_r2,
254 gen_op_movl_T2_r3,
255 gen_op_movl_T2_r4,
256 gen_op_movl_T2_r5,
257 gen_op_movl_T2_r6,
258 gen_op_movl_T2_r7,
259 gen_op_movl_T2_r8,
260 gen_op_movl_T2_r9,
261 gen_op_movl_T2_r10,
262 gen_op_movl_T2_r11,
263 gen_op_movl_T2_r12,
264 gen_op_movl_T2_r13,
265 gen_op_movl_T2_r14,
266 gen_op_movl_T2_r15,
270 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
272 gen_op_movl_r0_T0,
273 gen_op_movl_r1_T0,
274 gen_op_movl_r2_T0,
275 gen_op_movl_r3_T0,
276 gen_op_movl_r4_T0,
277 gen_op_movl_r5_T0,
278 gen_op_movl_r6_T0,
279 gen_op_movl_r7_T0,
280 gen_op_movl_r8_T0,
281 gen_op_movl_r9_T0,
282 gen_op_movl_r10_T0,
283 gen_op_movl_r11_T0,
284 gen_op_movl_r12_T0,
285 gen_op_movl_r13_T0,
286 gen_op_movl_r14_T0,
287 gen_op_movl_r15_T0,
290 gen_op_movl_r0_T1,
291 gen_op_movl_r1_T1,
292 gen_op_movl_r2_T1,
293 gen_op_movl_r3_T1,
294 gen_op_movl_r4_T1,
295 gen_op_movl_r5_T1,
296 gen_op_movl_r6_T1,
297 gen_op_movl_r7_T1,
298 gen_op_movl_r8_T1,
299 gen_op_movl_r9_T1,
300 gen_op_movl_r10_T1,
301 gen_op_movl_r11_T1,
302 gen_op_movl_r12_T1,
303 gen_op_movl_r13_T1,
304 gen_op_movl_r14_T1,
305 gen_op_movl_r15_T1,
309 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
310 gen_op_movl_T0_im,
311 gen_op_movl_T1_im,
312 gen_op_movl_T2_im,
315 static GenOpFunc1 *gen_shift_T0_im_thumb_cc[3] = {
316 gen_op_shll_T0_im_thumb_cc,
317 gen_op_shrl_T0_im_thumb_cc,
318 gen_op_sarl_T0_im_thumb_cc,
321 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
322 gen_op_shll_T0_im_thumb,
323 gen_op_shrl_T0_im_thumb,
324 gen_op_sarl_T0_im_thumb,
327 static inline void gen_bx(DisasContext *s)
329 s->is_jmp = DISAS_UPDATE;
330 gen_op_bx_T0();
334 #if defined(CONFIG_USER_ONLY)
335 #define gen_ldst(name, s) gen_op_##name##_raw()
336 #else
337 #define gen_ldst(name, s) do { \
338 s->is_mem = 1; \
339 if (IS_USER(s)) \
340 gen_op_##name##_user(); \
341 else \
342 gen_op_##name##_kernel(); \
343 } while (0)
344 #endif
346 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
348 int val;
350 if (reg == 15) {
351 /* normaly, since we updated PC, we need only to add one insn */
352 if (s->thumb)
353 val = (long)s->pc + 2;
354 else
355 val = (long)s->pc + 4;
356 gen_op_movl_TN_im[t](val);
357 } else {
358 gen_op_movl_TN_reg[t][reg]();
362 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
364 gen_movl_TN_reg(s, reg, 0);
367 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
369 gen_movl_TN_reg(s, reg, 1);
372 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
374 gen_movl_TN_reg(s, reg, 2);
377 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
379 gen_op_movl_reg_TN[t][reg]();
380 if (reg == 15) {
381 s->is_jmp = DISAS_JUMP;
385 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
387 gen_movl_reg_TN(s, reg, 0);
390 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
392 gen_movl_reg_TN(s, reg, 1);
395 /* Force a TB lookup after an instruction that changes the CPU state. */
396 static inline void gen_lookup_tb(DisasContext *s)
398 gen_op_movl_T0_im(s->pc);
399 gen_movl_reg_T0(s, 15);
400 s->is_jmp = DISAS_UPDATE;
403 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
405 int val, rm, shift, shiftop;
407 if (!(insn & (1 << 25))) {
408 /* immediate */
409 val = insn & 0xfff;
410 if (!(insn & (1 << 23)))
411 val = -val;
412 if (val != 0)
413 gen_op_addl_T1_im(val);
414 } else {
415 /* shift/register */
416 rm = (insn) & 0xf;
417 shift = (insn >> 7) & 0x1f;
418 gen_movl_T2_reg(s, rm);
419 shiftop = (insn >> 5) & 3;
420 if (shift != 0) {
421 gen_shift_T2_im[shiftop](shift);
422 } else if (shiftop != 0) {
423 gen_shift_T2_0[shiftop]();
425 if (!(insn & (1 << 23)))
426 gen_op_subl_T1_T2();
427 else
428 gen_op_addl_T1_T2();
432 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
433 int extra)
435 int val, rm;
437 if (insn & (1 << 22)) {
438 /* immediate */
439 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
440 if (!(insn & (1 << 23)))
441 val = -val;
442 val += extra;
443 if (val != 0)
444 gen_op_addl_T1_im(val);
445 } else {
446 /* register */
447 if (extra)
448 gen_op_addl_T1_im(extra);
449 rm = (insn) & 0xf;
450 gen_movl_T2_reg(s, rm);
451 if (!(insn & (1 << 23)))
452 gen_op_subl_T1_T2();
453 else
454 gen_op_addl_T1_T2();
458 #define VFP_OP(name) \
459 static inline void gen_vfp_##name(int dp) \
461 if (dp) \
462 gen_op_vfp_##name##d(); \
463 else \
464 gen_op_vfp_##name##s(); \
467 #define VFP_OP1(name) \
468 static inline void gen_vfp_##name(int dp, int arg) \
470 if (dp) \
471 gen_op_vfp_##name##d(arg); \
472 else \
473 gen_op_vfp_##name##s(arg); \
476 VFP_OP(add)
477 VFP_OP(sub)
478 VFP_OP(mul)
479 VFP_OP(div)
480 VFP_OP(neg)
481 VFP_OP(abs)
482 VFP_OP(sqrt)
483 VFP_OP(cmp)
484 VFP_OP(cmpe)
485 VFP_OP(F1_ld0)
486 VFP_OP(uito)
487 VFP_OP(sito)
488 VFP_OP(toui)
489 VFP_OP(touiz)
490 VFP_OP(tosi)
491 VFP_OP(tosiz)
492 VFP_OP1(tosh)
493 VFP_OP1(tosl)
494 VFP_OP1(touh)
495 VFP_OP1(toul)
496 VFP_OP1(shto)
497 VFP_OP1(slto)
498 VFP_OP1(uhto)
499 VFP_OP1(ulto)
501 #undef VFP_OP
503 static inline void gen_vfp_fconst(int dp, uint32_t val)
505 if (dp)
506 gen_op_vfp_fconstd(val);
507 else
508 gen_op_vfp_fconsts(val);
511 static inline void gen_vfp_ld(DisasContext *s, int dp)
513 if (dp)
514 gen_ldst(vfp_ldd, s);
515 else
516 gen_ldst(vfp_lds, s);
519 static inline void gen_vfp_st(DisasContext *s, int dp)
521 if (dp)
522 gen_ldst(vfp_std, s);
523 else
524 gen_ldst(vfp_sts, s);
527 static inline long
528 vfp_reg_offset (int dp, int reg)
530 if (dp)
531 return offsetof(CPUARMState, vfp.regs[reg]);
532 else if (reg & 1) {
533 return offsetof(CPUARMState, vfp.regs[reg >> 1])
534 + offsetof(CPU_DoubleU, l.upper);
535 } else {
536 return offsetof(CPUARMState, vfp.regs[reg >> 1])
537 + offsetof(CPU_DoubleU, l.lower);
541 /* Return the offset of a 32-bit piece of a NEON register.
542 zero is the least significant end of the register. */
543 static inline long
544 neon_reg_offset (int reg, int n)
546 int sreg;
547 sreg = reg * 2 + n;
548 return vfp_reg_offset(0, sreg);
551 #define NEON_GET_REG(T, reg, n) gen_op_neon_getreg_##T(neon_reg_offset(reg, n))
552 #define NEON_SET_REG(T, reg, n) gen_op_neon_setreg_##T(neon_reg_offset(reg, n))
554 static inline void gen_mov_F0_vreg(int dp, int reg)
556 if (dp)
557 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
558 else
559 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
562 static inline void gen_mov_F1_vreg(int dp, int reg)
564 if (dp)
565 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
566 else
567 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
570 static inline void gen_mov_vreg_F0(int dp, int reg)
572 if (dp)
573 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
574 else
575 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
578 #define ARM_CP_RW_BIT (1 << 20)
580 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
582 int rd;
583 uint32_t offset;
585 rd = (insn >> 16) & 0xf;
586 gen_movl_T1_reg(s, rd);
588 offset = (insn & 0xff) << ((insn >> 7) & 2);
589 if (insn & (1 << 24)) {
590 /* Pre indexed */
591 if (insn & (1 << 23))
592 gen_op_addl_T1_im(offset);
593 else
594 gen_op_addl_T1_im(-offset);
596 if (insn & (1 << 21))
597 gen_movl_reg_T1(s, rd);
598 } else if (insn & (1 << 21)) {
599 /* Post indexed */
600 if (insn & (1 << 23))
601 gen_op_movl_T0_im(offset);
602 else
603 gen_op_movl_T0_im(- offset);
604 gen_op_addl_T0_T1();
605 gen_movl_reg_T0(s, rd);
606 } else if (!(insn & (1 << 23)))
607 return 1;
608 return 0;
611 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
613 int rd = (insn >> 0) & 0xf;
615 if (insn & (1 << 8))
616 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
617 return 1;
618 else
619 gen_op_iwmmxt_movl_T0_wCx(rd);
620 else
621 gen_op_iwmmxt_movl_T0_T1_wRn(rd);
623 gen_op_movl_T1_im(mask);
624 gen_op_andl_T0_T1();
625 return 0;
628 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
629 (ie. an undefined instruction). */
630 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
632 int rd, wrd;
633 int rdhi, rdlo, rd0, rd1, i;
635 if ((insn & 0x0e000e00) == 0x0c000000) {
636 if ((insn & 0x0fe00ff0) == 0x0c400000) {
637 wrd = insn & 0xf;
638 rdlo = (insn >> 12) & 0xf;
639 rdhi = (insn >> 16) & 0xf;
640 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
641 gen_op_iwmmxt_movl_T0_T1_wRn(wrd);
642 gen_movl_reg_T0(s, rdlo);
643 gen_movl_reg_T1(s, rdhi);
644 } else { /* TMCRR */
645 gen_movl_T0_reg(s, rdlo);
646 gen_movl_T1_reg(s, rdhi);
647 gen_op_iwmmxt_movl_wRn_T0_T1(wrd);
648 gen_op_iwmmxt_set_mup();
650 return 0;
653 wrd = (insn >> 12) & 0xf;
654 if (gen_iwmmxt_address(s, insn))
655 return 1;
656 if (insn & ARM_CP_RW_BIT) {
657 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
658 gen_ldst(ldl, s);
659 gen_op_iwmmxt_movl_wCx_T0(wrd);
660 } else {
661 if (insn & (1 << 8))
662 if (insn & (1 << 22)) /* WLDRD */
663 gen_ldst(iwmmxt_ldq, s);
664 else /* WLDRW wRd */
665 gen_ldst(iwmmxt_ldl, s);
666 else
667 if (insn & (1 << 22)) /* WLDRH */
668 gen_ldst(iwmmxt_ldw, s);
669 else /* WLDRB */
670 gen_ldst(iwmmxt_ldb, s);
671 gen_op_iwmmxt_movq_wRn_M0(wrd);
673 } else {
674 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
675 gen_op_iwmmxt_movl_T0_wCx(wrd);
676 gen_ldst(stl, s);
677 } else {
678 gen_op_iwmmxt_movq_M0_wRn(wrd);
679 if (insn & (1 << 8))
680 if (insn & (1 << 22)) /* WSTRD */
681 gen_ldst(iwmmxt_stq, s);
682 else /* WSTRW wRd */
683 gen_ldst(iwmmxt_stl, s);
684 else
685 if (insn & (1 << 22)) /* WSTRH */
686 gen_ldst(iwmmxt_ldw, s);
687 else /* WSTRB */
688 gen_ldst(iwmmxt_stb, s);
691 return 0;
694 if ((insn & 0x0f000000) != 0x0e000000)
695 return 1;
697 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
698 case 0x000: /* WOR */
699 wrd = (insn >> 12) & 0xf;
700 rd0 = (insn >> 0) & 0xf;
701 rd1 = (insn >> 16) & 0xf;
702 gen_op_iwmmxt_movq_M0_wRn(rd0);
703 gen_op_iwmmxt_orq_M0_wRn(rd1);
704 gen_op_iwmmxt_setpsr_nz();
705 gen_op_iwmmxt_movq_wRn_M0(wrd);
706 gen_op_iwmmxt_set_mup();
707 gen_op_iwmmxt_set_cup();
708 break;
709 case 0x011: /* TMCR */
710 if (insn & 0xf)
711 return 1;
712 rd = (insn >> 12) & 0xf;
713 wrd = (insn >> 16) & 0xf;
714 switch (wrd) {
715 case ARM_IWMMXT_wCID:
716 case ARM_IWMMXT_wCASF:
717 break;
718 case ARM_IWMMXT_wCon:
719 gen_op_iwmmxt_set_cup();
720 /* Fall through. */
721 case ARM_IWMMXT_wCSSF:
722 gen_op_iwmmxt_movl_T0_wCx(wrd);
723 gen_movl_T1_reg(s, rd);
724 gen_op_bicl_T0_T1();
725 gen_op_iwmmxt_movl_wCx_T0(wrd);
726 break;
727 case ARM_IWMMXT_wCGR0:
728 case ARM_IWMMXT_wCGR1:
729 case ARM_IWMMXT_wCGR2:
730 case ARM_IWMMXT_wCGR3:
731 gen_op_iwmmxt_set_cup();
732 gen_movl_reg_T0(s, rd);
733 gen_op_iwmmxt_movl_wCx_T0(wrd);
734 break;
735 default:
736 return 1;
738 break;
739 case 0x100: /* WXOR */
740 wrd = (insn >> 12) & 0xf;
741 rd0 = (insn >> 0) & 0xf;
742 rd1 = (insn >> 16) & 0xf;
743 gen_op_iwmmxt_movq_M0_wRn(rd0);
744 gen_op_iwmmxt_xorq_M0_wRn(rd1);
745 gen_op_iwmmxt_setpsr_nz();
746 gen_op_iwmmxt_movq_wRn_M0(wrd);
747 gen_op_iwmmxt_set_mup();
748 gen_op_iwmmxt_set_cup();
749 break;
750 case 0x111: /* TMRC */
751 if (insn & 0xf)
752 return 1;
753 rd = (insn >> 12) & 0xf;
754 wrd = (insn >> 16) & 0xf;
755 gen_op_iwmmxt_movl_T0_wCx(wrd);
756 gen_movl_reg_T0(s, rd);
757 break;
758 case 0x300: /* WANDN */
759 wrd = (insn >> 12) & 0xf;
760 rd0 = (insn >> 0) & 0xf;
761 rd1 = (insn >> 16) & 0xf;
762 gen_op_iwmmxt_movq_M0_wRn(rd0);
763 gen_op_iwmmxt_negq_M0();
764 gen_op_iwmmxt_andq_M0_wRn(rd1);
765 gen_op_iwmmxt_setpsr_nz();
766 gen_op_iwmmxt_movq_wRn_M0(wrd);
767 gen_op_iwmmxt_set_mup();
768 gen_op_iwmmxt_set_cup();
769 break;
770 case 0x200: /* WAND */
771 wrd = (insn >> 12) & 0xf;
772 rd0 = (insn >> 0) & 0xf;
773 rd1 = (insn >> 16) & 0xf;
774 gen_op_iwmmxt_movq_M0_wRn(rd0);
775 gen_op_iwmmxt_andq_M0_wRn(rd1);
776 gen_op_iwmmxt_setpsr_nz();
777 gen_op_iwmmxt_movq_wRn_M0(wrd);
778 gen_op_iwmmxt_set_mup();
779 gen_op_iwmmxt_set_cup();
780 break;
781 case 0x810: case 0xa10: /* WMADD */
782 wrd = (insn >> 12) & 0xf;
783 rd0 = (insn >> 0) & 0xf;
784 rd1 = (insn >> 16) & 0xf;
785 gen_op_iwmmxt_movq_M0_wRn(rd0);
786 if (insn & (1 << 21))
787 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
788 else
789 gen_op_iwmmxt_madduq_M0_wRn(rd1);
790 gen_op_iwmmxt_movq_wRn_M0(wrd);
791 gen_op_iwmmxt_set_mup();
792 break;
793 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
794 wrd = (insn >> 12) & 0xf;
795 rd0 = (insn >> 16) & 0xf;
796 rd1 = (insn >> 0) & 0xf;
797 gen_op_iwmmxt_movq_M0_wRn(rd0);
798 switch ((insn >> 22) & 3) {
799 case 0:
800 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
801 break;
802 case 1:
803 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
804 break;
805 case 2:
806 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
807 break;
808 case 3:
809 return 1;
811 gen_op_iwmmxt_movq_wRn_M0(wrd);
812 gen_op_iwmmxt_set_mup();
813 gen_op_iwmmxt_set_cup();
814 break;
815 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
816 wrd = (insn >> 12) & 0xf;
817 rd0 = (insn >> 16) & 0xf;
818 rd1 = (insn >> 0) & 0xf;
819 gen_op_iwmmxt_movq_M0_wRn(rd0);
820 switch ((insn >> 22) & 3) {
821 case 0:
822 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
823 break;
824 case 1:
825 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
826 break;
827 case 2:
828 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
829 break;
830 case 3:
831 return 1;
833 gen_op_iwmmxt_movq_wRn_M0(wrd);
834 gen_op_iwmmxt_set_mup();
835 gen_op_iwmmxt_set_cup();
836 break;
837 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
838 wrd = (insn >> 12) & 0xf;
839 rd0 = (insn >> 16) & 0xf;
840 rd1 = (insn >> 0) & 0xf;
841 gen_op_iwmmxt_movq_M0_wRn(rd0);
842 if (insn & (1 << 22))
843 gen_op_iwmmxt_sadw_M0_wRn(rd1);
844 else
845 gen_op_iwmmxt_sadb_M0_wRn(rd1);
846 if (!(insn & (1 << 20)))
847 gen_op_iwmmxt_addl_M0_wRn(wrd);
848 gen_op_iwmmxt_movq_wRn_M0(wrd);
849 gen_op_iwmmxt_set_mup();
850 break;
851 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
852 wrd = (insn >> 12) & 0xf;
853 rd0 = (insn >> 16) & 0xf;
854 rd1 = (insn >> 0) & 0xf;
855 gen_op_iwmmxt_movq_M0_wRn(rd0);
856 if (insn & (1 << 21))
857 gen_op_iwmmxt_mulsw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
858 else
859 gen_op_iwmmxt_muluw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
860 gen_op_iwmmxt_movq_wRn_M0(wrd);
861 gen_op_iwmmxt_set_mup();
862 break;
863 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
864 wrd = (insn >> 12) & 0xf;
865 rd0 = (insn >> 16) & 0xf;
866 rd1 = (insn >> 0) & 0xf;
867 gen_op_iwmmxt_movq_M0_wRn(rd0);
868 if (insn & (1 << 21))
869 gen_op_iwmmxt_macsw_M0_wRn(rd1);
870 else
871 gen_op_iwmmxt_macuw_M0_wRn(rd1);
872 if (!(insn & (1 << 20))) {
873 if (insn & (1 << 21))
874 gen_op_iwmmxt_addsq_M0_wRn(wrd);
875 else
876 gen_op_iwmmxt_adduq_M0_wRn(wrd);
878 gen_op_iwmmxt_movq_wRn_M0(wrd);
879 gen_op_iwmmxt_set_mup();
880 break;
881 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
882 wrd = (insn >> 12) & 0xf;
883 rd0 = (insn >> 16) & 0xf;
884 rd1 = (insn >> 0) & 0xf;
885 gen_op_iwmmxt_movq_M0_wRn(rd0);
886 switch ((insn >> 22) & 3) {
887 case 0:
888 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
889 break;
890 case 1:
891 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
892 break;
893 case 2:
894 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
895 break;
896 case 3:
897 return 1;
899 gen_op_iwmmxt_movq_wRn_M0(wrd);
900 gen_op_iwmmxt_set_mup();
901 gen_op_iwmmxt_set_cup();
902 break;
903 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
904 wrd = (insn >> 12) & 0xf;
905 rd0 = (insn >> 16) & 0xf;
906 rd1 = (insn >> 0) & 0xf;
907 gen_op_iwmmxt_movq_M0_wRn(rd0);
908 if (insn & (1 << 22))
909 gen_op_iwmmxt_avgw_M0_wRn(rd1, (insn >> 20) & 1);
910 else
911 gen_op_iwmmxt_avgb_M0_wRn(rd1, (insn >> 20) & 1);
912 gen_op_iwmmxt_movq_wRn_M0(wrd);
913 gen_op_iwmmxt_set_mup();
914 gen_op_iwmmxt_set_cup();
915 break;
916 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
917 wrd = (insn >> 12) & 0xf;
918 rd0 = (insn >> 16) & 0xf;
919 rd1 = (insn >> 0) & 0xf;
920 gen_op_iwmmxt_movq_M0_wRn(rd0);
921 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
922 gen_op_movl_T1_im(7);
923 gen_op_andl_T0_T1();
924 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
925 gen_op_iwmmxt_movq_wRn_M0(wrd);
926 gen_op_iwmmxt_set_mup();
927 break;
928 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
929 rd = (insn >> 12) & 0xf;
930 wrd = (insn >> 16) & 0xf;
931 gen_movl_T0_reg(s, rd);
932 gen_op_iwmmxt_movq_M0_wRn(wrd);
933 switch ((insn >> 6) & 3) {
934 case 0:
935 gen_op_movl_T1_im(0xff);
936 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
937 break;
938 case 1:
939 gen_op_movl_T1_im(0xffff);
940 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
941 break;
942 case 2:
943 gen_op_movl_T1_im(0xffffffff);
944 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
945 break;
946 case 3:
947 return 1;
949 gen_op_iwmmxt_movq_wRn_M0(wrd);
950 gen_op_iwmmxt_set_mup();
951 break;
952 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
953 rd = (insn >> 12) & 0xf;
954 wrd = (insn >> 16) & 0xf;
955 if (rd == 15)
956 return 1;
957 gen_op_iwmmxt_movq_M0_wRn(wrd);
958 switch ((insn >> 22) & 3) {
959 case 0:
960 if (insn & 8)
961 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
962 else {
963 gen_op_movl_T1_im(0xff);
964 gen_op_iwmmxt_extru_T0_M0_T1((insn & 7) << 3);
966 break;
967 case 1:
968 if (insn & 8)
969 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
970 else {
971 gen_op_movl_T1_im(0xffff);
972 gen_op_iwmmxt_extru_T0_M0_T1((insn & 3) << 4);
974 break;
975 case 2:
976 gen_op_movl_T1_im(0xffffffff);
977 gen_op_iwmmxt_extru_T0_M0_T1((insn & 1) << 5);
978 break;
979 case 3:
980 return 1;
982 gen_op_movl_reg_TN[0][rd]();
983 break;
984 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
985 if ((insn & 0x000ff008) != 0x0003f000)
986 return 1;
987 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
988 switch ((insn >> 22) & 3) {
989 case 0:
990 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
991 break;
992 case 1:
993 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
994 break;
995 case 2:
996 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
997 break;
998 case 3:
999 return 1;
1001 gen_op_shll_T1_im(28);
1002 gen_op_movl_T0_T1();
1003 gen_op_movl_cpsr_T0(0xf0000000);
1004 break;
1005 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1006 rd = (insn >> 12) & 0xf;
1007 wrd = (insn >> 16) & 0xf;
1008 gen_movl_T0_reg(s, rd);
1009 switch ((insn >> 6) & 3) {
1010 case 0:
1011 gen_op_iwmmxt_bcstb_M0_T0();
1012 break;
1013 case 1:
1014 gen_op_iwmmxt_bcstw_M0_T0();
1015 break;
1016 case 2:
1017 gen_op_iwmmxt_bcstl_M0_T0();
1018 break;
1019 case 3:
1020 return 1;
1022 gen_op_iwmmxt_movq_wRn_M0(wrd);
1023 gen_op_iwmmxt_set_mup();
1024 break;
1025 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1026 if ((insn & 0x000ff00f) != 0x0003f000)
1027 return 1;
1028 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1029 switch ((insn >> 22) & 3) {
1030 case 0:
1031 for (i = 0; i < 7; i ++) {
1032 gen_op_shll_T1_im(4);
1033 gen_op_andl_T0_T1();
1035 break;
1036 case 1:
1037 for (i = 0; i < 3; i ++) {
1038 gen_op_shll_T1_im(8);
1039 gen_op_andl_T0_T1();
1041 break;
1042 case 2:
1043 gen_op_shll_T1_im(16);
1044 gen_op_andl_T0_T1();
1045 break;
1046 case 3:
1047 return 1;
1049 gen_op_movl_cpsr_T0(0xf0000000);
1050 break;
1051 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1052 wrd = (insn >> 12) & 0xf;
1053 rd0 = (insn >> 16) & 0xf;
1054 gen_op_iwmmxt_movq_M0_wRn(rd0);
1055 switch ((insn >> 22) & 3) {
1056 case 0:
1057 gen_op_iwmmxt_addcb_M0();
1058 break;
1059 case 1:
1060 gen_op_iwmmxt_addcw_M0();
1061 break;
1062 case 2:
1063 gen_op_iwmmxt_addcl_M0();
1064 break;
1065 case 3:
1066 return 1;
1068 gen_op_iwmmxt_movq_wRn_M0(wrd);
1069 gen_op_iwmmxt_set_mup();
1070 break;
1071 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1072 if ((insn & 0x000ff00f) != 0x0003f000)
1073 return 1;
1074 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1075 switch ((insn >> 22) & 3) {
1076 case 0:
1077 for (i = 0; i < 7; i ++) {
1078 gen_op_shll_T1_im(4);
1079 gen_op_orl_T0_T1();
1081 break;
1082 case 1:
1083 for (i = 0; i < 3; i ++) {
1084 gen_op_shll_T1_im(8);
1085 gen_op_orl_T0_T1();
1087 break;
1088 case 2:
1089 gen_op_shll_T1_im(16);
1090 gen_op_orl_T0_T1();
1091 break;
1092 case 3:
1093 return 1;
1095 gen_op_movl_T1_im(0xf0000000);
1096 gen_op_andl_T0_T1();
1097 gen_op_movl_cpsr_T0(0xf0000000);
1098 break;
1099 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1100 rd = (insn >> 12) & 0xf;
1101 rd0 = (insn >> 16) & 0xf;
1102 if ((insn & 0xf) != 0)
1103 return 1;
1104 gen_op_iwmmxt_movq_M0_wRn(rd0);
1105 switch ((insn >> 22) & 3) {
1106 case 0:
1107 gen_op_iwmmxt_msbb_T0_M0();
1108 break;
1109 case 1:
1110 gen_op_iwmmxt_msbw_T0_M0();
1111 break;
1112 case 2:
1113 gen_op_iwmmxt_msbl_T0_M0();
1114 break;
1115 case 3:
1116 return 1;
1118 gen_movl_reg_T0(s, rd);
1119 break;
1120 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1121 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1122 wrd = (insn >> 12) & 0xf;
1123 rd0 = (insn >> 16) & 0xf;
1124 rd1 = (insn >> 0) & 0xf;
1125 gen_op_iwmmxt_movq_M0_wRn(rd0);
1126 switch ((insn >> 22) & 3) {
1127 case 0:
1128 if (insn & (1 << 21))
1129 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1130 else
1131 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1132 break;
1133 case 1:
1134 if (insn & (1 << 21))
1135 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1136 else
1137 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1138 break;
1139 case 2:
1140 if (insn & (1 << 21))
1141 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1142 else
1143 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1144 break;
1145 case 3:
1146 return 1;
1148 gen_op_iwmmxt_movq_wRn_M0(wrd);
1149 gen_op_iwmmxt_set_mup();
1150 gen_op_iwmmxt_set_cup();
1151 break;
1152 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1153 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1154 wrd = (insn >> 12) & 0xf;
1155 rd0 = (insn >> 16) & 0xf;
1156 gen_op_iwmmxt_movq_M0_wRn(rd0);
1157 switch ((insn >> 22) & 3) {
1158 case 0:
1159 if (insn & (1 << 21))
1160 gen_op_iwmmxt_unpacklsb_M0();
1161 else
1162 gen_op_iwmmxt_unpacklub_M0();
1163 break;
1164 case 1:
1165 if (insn & (1 << 21))
1166 gen_op_iwmmxt_unpacklsw_M0();
1167 else
1168 gen_op_iwmmxt_unpackluw_M0();
1169 break;
1170 case 2:
1171 if (insn & (1 << 21))
1172 gen_op_iwmmxt_unpacklsl_M0();
1173 else
1174 gen_op_iwmmxt_unpacklul_M0();
1175 break;
1176 case 3:
1177 return 1;
1179 gen_op_iwmmxt_movq_wRn_M0(wrd);
1180 gen_op_iwmmxt_set_mup();
1181 gen_op_iwmmxt_set_cup();
1182 break;
1183 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1184 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1185 wrd = (insn >> 12) & 0xf;
1186 rd0 = (insn >> 16) & 0xf;
1187 gen_op_iwmmxt_movq_M0_wRn(rd0);
1188 switch ((insn >> 22) & 3) {
1189 case 0:
1190 if (insn & (1 << 21))
1191 gen_op_iwmmxt_unpackhsb_M0();
1192 else
1193 gen_op_iwmmxt_unpackhub_M0();
1194 break;
1195 case 1:
1196 if (insn & (1 << 21))
1197 gen_op_iwmmxt_unpackhsw_M0();
1198 else
1199 gen_op_iwmmxt_unpackhuw_M0();
1200 break;
1201 case 2:
1202 if (insn & (1 << 21))
1203 gen_op_iwmmxt_unpackhsl_M0();
1204 else
1205 gen_op_iwmmxt_unpackhul_M0();
1206 break;
1207 case 3:
1208 return 1;
1210 gen_op_iwmmxt_movq_wRn_M0(wrd);
1211 gen_op_iwmmxt_set_mup();
1212 gen_op_iwmmxt_set_cup();
1213 break;
1214 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1215 case 0x214: case 0x614: case 0xa14: case 0xe14:
1216 wrd = (insn >> 12) & 0xf;
1217 rd0 = (insn >> 16) & 0xf;
1218 gen_op_iwmmxt_movq_M0_wRn(rd0);
1219 if (gen_iwmmxt_shift(insn, 0xff))
1220 return 1;
1221 switch ((insn >> 22) & 3) {
1222 case 0:
1223 return 1;
1224 case 1:
1225 gen_op_iwmmxt_srlw_M0_T0();
1226 break;
1227 case 2:
1228 gen_op_iwmmxt_srll_M0_T0();
1229 break;
1230 case 3:
1231 gen_op_iwmmxt_srlq_M0_T0();
1232 break;
1234 gen_op_iwmmxt_movq_wRn_M0(wrd);
1235 gen_op_iwmmxt_set_mup();
1236 gen_op_iwmmxt_set_cup();
1237 break;
1238 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1239 case 0x014: case 0x414: case 0x814: case 0xc14:
1240 wrd = (insn >> 12) & 0xf;
1241 rd0 = (insn >> 16) & 0xf;
1242 gen_op_iwmmxt_movq_M0_wRn(rd0);
1243 if (gen_iwmmxt_shift(insn, 0xff))
1244 return 1;
1245 switch ((insn >> 22) & 3) {
1246 case 0:
1247 return 1;
1248 case 1:
1249 gen_op_iwmmxt_sraw_M0_T0();
1250 break;
1251 case 2:
1252 gen_op_iwmmxt_sral_M0_T0();
1253 break;
1254 case 3:
1255 gen_op_iwmmxt_sraq_M0_T0();
1256 break;
1258 gen_op_iwmmxt_movq_wRn_M0(wrd);
1259 gen_op_iwmmxt_set_mup();
1260 gen_op_iwmmxt_set_cup();
1261 break;
1262 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
1263 case 0x114: case 0x514: case 0x914: case 0xd14:
1264 wrd = (insn >> 12) & 0xf;
1265 rd0 = (insn >> 16) & 0xf;
1266 gen_op_iwmmxt_movq_M0_wRn(rd0);
1267 if (gen_iwmmxt_shift(insn, 0xff))
1268 return 1;
1269 switch ((insn >> 22) & 3) {
1270 case 0:
1271 return 1;
1272 case 1:
1273 gen_op_iwmmxt_sllw_M0_T0();
1274 break;
1275 case 2:
1276 gen_op_iwmmxt_slll_M0_T0();
1277 break;
1278 case 3:
1279 gen_op_iwmmxt_sllq_M0_T0();
1280 break;
1282 gen_op_iwmmxt_movq_wRn_M0(wrd);
1283 gen_op_iwmmxt_set_mup();
1284 gen_op_iwmmxt_set_cup();
1285 break;
1286 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
1287 case 0x314: case 0x714: case 0xb14: case 0xf14:
1288 wrd = (insn >> 12) & 0xf;
1289 rd0 = (insn >> 16) & 0xf;
1290 gen_op_iwmmxt_movq_M0_wRn(rd0);
1291 switch ((insn >> 22) & 3) {
1292 case 0:
1293 return 1;
1294 case 1:
1295 if (gen_iwmmxt_shift(insn, 0xf))
1296 return 1;
1297 gen_op_iwmmxt_rorw_M0_T0();
1298 break;
1299 case 2:
1300 if (gen_iwmmxt_shift(insn, 0x1f))
1301 return 1;
1302 gen_op_iwmmxt_rorl_M0_T0();
1303 break;
1304 case 3:
1305 if (gen_iwmmxt_shift(insn, 0x3f))
1306 return 1;
1307 gen_op_iwmmxt_rorq_M0_T0();
1308 break;
1310 gen_op_iwmmxt_movq_wRn_M0(wrd);
1311 gen_op_iwmmxt_set_mup();
1312 gen_op_iwmmxt_set_cup();
1313 break;
1314 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
1315 case 0x916: case 0xb16: case 0xd16: case 0xf16:
1316 wrd = (insn >> 12) & 0xf;
1317 rd0 = (insn >> 16) & 0xf;
1318 rd1 = (insn >> 0) & 0xf;
1319 gen_op_iwmmxt_movq_M0_wRn(rd0);
1320 switch ((insn >> 22) & 3) {
1321 case 0:
1322 if (insn & (1 << 21))
1323 gen_op_iwmmxt_minsb_M0_wRn(rd1);
1324 else
1325 gen_op_iwmmxt_minub_M0_wRn(rd1);
1326 break;
1327 case 1:
1328 if (insn & (1 << 21))
1329 gen_op_iwmmxt_minsw_M0_wRn(rd1);
1330 else
1331 gen_op_iwmmxt_minuw_M0_wRn(rd1);
1332 break;
1333 case 2:
1334 if (insn & (1 << 21))
1335 gen_op_iwmmxt_minsl_M0_wRn(rd1);
1336 else
1337 gen_op_iwmmxt_minul_M0_wRn(rd1);
1338 break;
1339 case 3:
1340 return 1;
1342 gen_op_iwmmxt_movq_wRn_M0(wrd);
1343 gen_op_iwmmxt_set_mup();
1344 break;
1345 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
1346 case 0x816: case 0xa16: case 0xc16: case 0xe16:
1347 wrd = (insn >> 12) & 0xf;
1348 rd0 = (insn >> 16) & 0xf;
1349 rd1 = (insn >> 0) & 0xf;
1350 gen_op_iwmmxt_movq_M0_wRn(rd0);
1351 switch ((insn >> 22) & 3) {
1352 case 0:
1353 if (insn & (1 << 21))
1354 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
1355 else
1356 gen_op_iwmmxt_maxub_M0_wRn(rd1);
1357 break;
1358 case 1:
1359 if (insn & (1 << 21))
1360 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
1361 else
1362 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
1363 break;
1364 case 2:
1365 if (insn & (1 << 21))
1366 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
1367 else
1368 gen_op_iwmmxt_maxul_M0_wRn(rd1);
1369 break;
1370 case 3:
1371 return 1;
1373 gen_op_iwmmxt_movq_wRn_M0(wrd);
1374 gen_op_iwmmxt_set_mup();
1375 break;
1376 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
1377 case 0x402: case 0x502: case 0x602: case 0x702:
1378 wrd = (insn >> 12) & 0xf;
1379 rd0 = (insn >> 16) & 0xf;
1380 rd1 = (insn >> 0) & 0xf;
1381 gen_op_iwmmxt_movq_M0_wRn(rd0);
1382 gen_op_movl_T0_im((insn >> 20) & 3);
1383 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1384 gen_op_iwmmxt_movq_wRn_M0(wrd);
1385 gen_op_iwmmxt_set_mup();
1386 break;
1387 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
1388 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
1389 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
1390 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
1391 wrd = (insn >> 12) & 0xf;
1392 rd0 = (insn >> 16) & 0xf;
1393 rd1 = (insn >> 0) & 0xf;
1394 gen_op_iwmmxt_movq_M0_wRn(rd0);
1395 switch ((insn >> 20) & 0xf) {
1396 case 0x0:
1397 gen_op_iwmmxt_subnb_M0_wRn(rd1);
1398 break;
1399 case 0x1:
1400 gen_op_iwmmxt_subub_M0_wRn(rd1);
1401 break;
1402 case 0x3:
1403 gen_op_iwmmxt_subsb_M0_wRn(rd1);
1404 break;
1405 case 0x4:
1406 gen_op_iwmmxt_subnw_M0_wRn(rd1);
1407 break;
1408 case 0x5:
1409 gen_op_iwmmxt_subuw_M0_wRn(rd1);
1410 break;
1411 case 0x7:
1412 gen_op_iwmmxt_subsw_M0_wRn(rd1);
1413 break;
1414 case 0x8:
1415 gen_op_iwmmxt_subnl_M0_wRn(rd1);
1416 break;
1417 case 0x9:
1418 gen_op_iwmmxt_subul_M0_wRn(rd1);
1419 break;
1420 case 0xb:
1421 gen_op_iwmmxt_subsl_M0_wRn(rd1);
1422 break;
1423 default:
1424 return 1;
1426 gen_op_iwmmxt_movq_wRn_M0(wrd);
1427 gen_op_iwmmxt_set_mup();
1428 gen_op_iwmmxt_set_cup();
1429 break;
1430 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
1431 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
1432 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
1433 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
1434 wrd = (insn >> 12) & 0xf;
1435 rd0 = (insn >> 16) & 0xf;
1436 gen_op_iwmmxt_movq_M0_wRn(rd0);
1437 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
1438 gen_op_iwmmxt_shufh_M0_T0();
1439 gen_op_iwmmxt_movq_wRn_M0(wrd);
1440 gen_op_iwmmxt_set_mup();
1441 gen_op_iwmmxt_set_cup();
1442 break;
1443 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
1444 case 0x418: case 0x518: case 0x618: case 0x718:
1445 case 0x818: case 0x918: case 0xa18: case 0xb18:
1446 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
1447 wrd = (insn >> 12) & 0xf;
1448 rd0 = (insn >> 16) & 0xf;
1449 rd1 = (insn >> 0) & 0xf;
1450 gen_op_iwmmxt_movq_M0_wRn(rd0);
1451 switch ((insn >> 20) & 0xf) {
1452 case 0x0:
1453 gen_op_iwmmxt_addnb_M0_wRn(rd1);
1454 break;
1455 case 0x1:
1456 gen_op_iwmmxt_addub_M0_wRn(rd1);
1457 break;
1458 case 0x3:
1459 gen_op_iwmmxt_addsb_M0_wRn(rd1);
1460 break;
1461 case 0x4:
1462 gen_op_iwmmxt_addnw_M0_wRn(rd1);
1463 break;
1464 case 0x5:
1465 gen_op_iwmmxt_adduw_M0_wRn(rd1);
1466 break;
1467 case 0x7:
1468 gen_op_iwmmxt_addsw_M0_wRn(rd1);
1469 break;
1470 case 0x8:
1471 gen_op_iwmmxt_addnl_M0_wRn(rd1);
1472 break;
1473 case 0x9:
1474 gen_op_iwmmxt_addul_M0_wRn(rd1);
1475 break;
1476 case 0xb:
1477 gen_op_iwmmxt_addsl_M0_wRn(rd1);
1478 break;
1479 default:
1480 return 1;
1482 gen_op_iwmmxt_movq_wRn_M0(wrd);
1483 gen_op_iwmmxt_set_mup();
1484 gen_op_iwmmxt_set_cup();
1485 break;
1486 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
1487 case 0x408: case 0x508: case 0x608: case 0x708:
1488 case 0x808: case 0x908: case 0xa08: case 0xb08:
1489 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
1490 wrd = (insn >> 12) & 0xf;
1491 rd0 = (insn >> 16) & 0xf;
1492 rd1 = (insn >> 0) & 0xf;
1493 gen_op_iwmmxt_movq_M0_wRn(rd0);
1494 if (!(insn & (1 << 20)))
1495 return 1;
1496 switch ((insn >> 22) & 3) {
1497 case 0:
1498 return 1;
1499 case 1:
1500 if (insn & (1 << 21))
1501 gen_op_iwmmxt_packsw_M0_wRn(rd1);
1502 else
1503 gen_op_iwmmxt_packuw_M0_wRn(rd1);
1504 break;
1505 case 2:
1506 if (insn & (1 << 21))
1507 gen_op_iwmmxt_packsl_M0_wRn(rd1);
1508 else
1509 gen_op_iwmmxt_packul_M0_wRn(rd1);
1510 break;
1511 case 3:
1512 if (insn & (1 << 21))
1513 gen_op_iwmmxt_packsq_M0_wRn(rd1);
1514 else
1515 gen_op_iwmmxt_packuq_M0_wRn(rd1);
1516 break;
1518 gen_op_iwmmxt_movq_wRn_M0(wrd);
1519 gen_op_iwmmxt_set_mup();
1520 gen_op_iwmmxt_set_cup();
1521 break;
1522 case 0x201: case 0x203: case 0x205: case 0x207:
1523 case 0x209: case 0x20b: case 0x20d: case 0x20f:
1524 case 0x211: case 0x213: case 0x215: case 0x217:
1525 case 0x219: case 0x21b: case 0x21d: case 0x21f:
1526 wrd = (insn >> 5) & 0xf;
1527 rd0 = (insn >> 12) & 0xf;
1528 rd1 = (insn >> 0) & 0xf;
1529 if (rd0 == 0xf || rd1 == 0xf)
1530 return 1;
1531 gen_op_iwmmxt_movq_M0_wRn(wrd);
1532 switch ((insn >> 16) & 0xf) {
1533 case 0x0: /* TMIA */
1534 gen_op_movl_TN_reg[0][rd0]();
1535 gen_op_movl_TN_reg[1][rd1]();
1536 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1537 break;
1538 case 0x8: /* TMIAPH */
1539 gen_op_movl_TN_reg[0][rd0]();
1540 gen_op_movl_TN_reg[1][rd1]();
1541 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1542 break;
1543 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
1544 gen_op_movl_TN_reg[1][rd0]();
1545 if (insn & (1 << 16))
1546 gen_op_shrl_T1_im(16);
1547 gen_op_movl_T0_T1();
1548 gen_op_movl_TN_reg[1][rd1]();
1549 if (insn & (1 << 17))
1550 gen_op_shrl_T1_im(16);
1551 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1552 break;
1553 default:
1554 return 1;
1556 gen_op_iwmmxt_movq_wRn_M0(wrd);
1557 gen_op_iwmmxt_set_mup();
1558 break;
1559 default:
1560 return 1;
1563 return 0;
1566 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
1567 (ie. an undefined instruction). */
1568 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1570 int acc, rd0, rd1, rdhi, rdlo;
1572 if ((insn & 0x0ff00f10) == 0x0e200010) {
1573 /* Multiply with Internal Accumulate Format */
1574 rd0 = (insn >> 12) & 0xf;
1575 rd1 = insn & 0xf;
1576 acc = (insn >> 5) & 7;
1578 if (acc != 0)
1579 return 1;
1581 switch ((insn >> 16) & 0xf) {
1582 case 0x0: /* MIA */
1583 gen_op_movl_TN_reg[0][rd0]();
1584 gen_op_movl_TN_reg[1][rd1]();
1585 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1586 break;
1587 case 0x8: /* MIAPH */
1588 gen_op_movl_TN_reg[0][rd0]();
1589 gen_op_movl_TN_reg[1][rd1]();
1590 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1591 break;
1592 case 0xc: /* MIABB */
1593 case 0xd: /* MIABT */
1594 case 0xe: /* MIATB */
1595 case 0xf: /* MIATT */
1596 gen_op_movl_TN_reg[1][rd0]();
1597 if (insn & (1 << 16))
1598 gen_op_shrl_T1_im(16);
1599 gen_op_movl_T0_T1();
1600 gen_op_movl_TN_reg[1][rd1]();
1601 if (insn & (1 << 17))
1602 gen_op_shrl_T1_im(16);
1603 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1604 break;
1605 default:
1606 return 1;
1609 gen_op_iwmmxt_movq_wRn_M0(acc);
1610 return 0;
1613 if ((insn & 0x0fe00ff8) == 0x0c400000) {
1614 /* Internal Accumulator Access Format */
1615 rdhi = (insn >> 16) & 0xf;
1616 rdlo = (insn >> 12) & 0xf;
1617 acc = insn & 7;
1619 if (acc != 0)
1620 return 1;
1622 if (insn & ARM_CP_RW_BIT) { /* MRA */
1623 gen_op_iwmmxt_movl_T0_T1_wRn(acc);
1624 gen_op_movl_reg_TN[0][rdlo]();
1625 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
1626 gen_op_andl_T0_T1();
1627 gen_op_movl_reg_TN[0][rdhi]();
1628 } else { /* MAR */
1629 gen_op_movl_TN_reg[0][rdlo]();
1630 gen_op_movl_TN_reg[1][rdhi]();
1631 gen_op_iwmmxt_movl_wRn_T0_T1(acc);
1633 return 0;
1636 return 1;
1639 /* Disassemble system coprocessor instruction. Return nonzero if
1640 instruction is not defined. */
1641 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1643 uint32_t rd = (insn >> 12) & 0xf;
1644 uint32_t cp = (insn >> 8) & 0xf;
1645 if (IS_USER(s)) {
1646 return 1;
1649 if (insn & ARM_CP_RW_BIT) {
1650 if (!env->cp[cp].cp_read)
1651 return 1;
1652 gen_op_movl_T0_im((uint32_t) s->pc);
1653 gen_op_movl_reg_TN[0][15]();
1654 gen_op_movl_T0_cp(insn);
1655 gen_movl_reg_T0(s, rd);
1656 } else {
1657 if (!env->cp[cp].cp_write)
1658 return 1;
1659 gen_op_movl_T0_im((uint32_t) s->pc);
1660 gen_op_movl_reg_TN[0][15]();
1661 gen_movl_T0_reg(s, rd);
1662 gen_op_movl_cp_T0(insn);
1664 return 0;
1667 static int cp15_user_ok(uint32_t insn)
1669 int cpn = (insn >> 16) & 0xf;
1670 int cpm = insn & 0xf;
1671 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
1673 if (cpn == 13 && cpm == 0) {
1674 /* TLS register. */
1675 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
1676 return 1;
1678 if (cpn == 7) {
1679 /* ISB, DSB, DMB. */
1680 if ((cpm == 5 && op == 4)
1681 || (cpm == 10 && (op == 4 || op == 5)))
1682 return 1;
1684 return 0;
1687 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
1688 instruction is not defined. */
1689 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
1691 uint32_t rd;
1693 /* M profile cores use memory mapped registers instead of cp15. */
1694 if (arm_feature(env, ARM_FEATURE_M))
1695 return 1;
1697 if ((insn & (1 << 25)) == 0) {
1698 if (insn & (1 << 20)) {
1699 /* mrrc */
1700 return 1;
1702 /* mcrr. Used for block cache operations, so implement as no-op. */
1703 return 0;
1705 if ((insn & (1 << 4)) == 0) {
1706 /* cdp */
1707 return 1;
1709 if (IS_USER(s) && !cp15_user_ok(insn)) {
1710 return 1;
1712 if ((insn & 0x0fff0fff) == 0x0e070f90
1713 || (insn & 0x0fff0fff) == 0x0e070f58) {
1714 /* Wait for interrupt. */
1715 gen_op_movl_T0_im((long)s->pc);
1716 gen_op_movl_reg_TN[0][15]();
1717 s->is_jmp = DISAS_WFI;
1718 return 0;
1720 rd = (insn >> 12) & 0xf;
1721 if (insn & ARM_CP_RW_BIT) {
1722 gen_op_movl_T0_cp15(insn);
1723 /* If the destination register is r15 then sets condition codes. */
1724 if (rd != 15)
1725 gen_movl_reg_T0(s, rd);
1726 } else {
1727 gen_movl_T0_reg(s, rd);
1728 gen_op_movl_cp15_T0(insn);
1729 /* Normally we would always end the TB here, but Linux
1730 * arch/arm/mach-pxa/sleep.S expects two instructions following
1731 * an MMU enable to execute from cache. Imitate this behaviour. */
1732 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
1733 (insn & 0x0fff0fff) != 0x0e010f10)
1734 gen_lookup_tb(s);
1736 return 0;
1739 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
1740 #define VFP_SREG(insn, bigbit, smallbit) \
1741 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
1742 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
1743 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
1744 reg = (((insn) >> (bigbit)) & 0x0f) \
1745 | (((insn) >> ((smallbit) - 4)) & 0x10); \
1746 } else { \
1747 if (insn & (1 << (smallbit))) \
1748 return 1; \
1749 reg = ((insn) >> (bigbit)) & 0x0f; \
1750 }} while (0)
1752 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
1753 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
1754 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
1755 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
1756 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
1757 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
1759 static inline int
1760 vfp_enabled(CPUState * env)
1762 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
1765 /* Disassemble a VFP instruction. Returns nonzero if an error occured
1766 (ie. an undefined instruction). */
1767 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
1769 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
1770 int dp, veclen;
1772 if (!arm_feature(env, ARM_FEATURE_VFP))
1773 return 1;
1775 if (!vfp_enabled(env)) {
1776 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
1777 if ((insn & 0x0fe00fff) != 0x0ee00a10)
1778 return 1;
1779 rn = (insn >> 16) & 0xf;
1780 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
1781 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
1782 return 1;
1784 dp = ((insn & 0xf00) == 0xb00);
1785 switch ((insn >> 24) & 0xf) {
1786 case 0xe:
1787 if (insn & (1 << 4)) {
1788 /* single register transfer */
1789 rd = (insn >> 12) & 0xf;
1790 if (dp) {
1791 int size;
1792 int pass;
1794 VFP_DREG_N(rn, insn);
1795 if (insn & 0xf)
1796 return 1;
1797 if (insn & 0x00c00060
1798 && !arm_feature(env, ARM_FEATURE_NEON))
1799 return 1;
1801 pass = (insn >> 21) & 1;
1802 if (insn & (1 << 22)) {
1803 size = 0;
1804 offset = ((insn >> 5) & 3) * 8;
1805 } else if (insn & (1 << 5)) {
1806 size = 1;
1807 offset = (insn & (1 << 6)) ? 16 : 0;
1808 } else {
1809 size = 2;
1810 offset = 0;
1812 if (insn & ARM_CP_RW_BIT) {
1813 /* vfp->arm */
1814 switch (size) {
1815 case 0:
1816 NEON_GET_REG(T1, rn, pass);
1817 if (offset)
1818 gen_op_shrl_T1_im(offset);
1819 if (insn & (1 << 23))
1820 gen_op_uxtb_T1();
1821 else
1822 gen_op_sxtb_T1();
1823 break;
1824 case 1:
1825 NEON_GET_REG(T1, rn, pass);
1826 if (insn & (1 << 23)) {
1827 if (offset) {
1828 gen_op_shrl_T1_im(16);
1829 } else {
1830 gen_op_uxth_T1();
1832 } else {
1833 if (offset) {
1834 gen_op_sarl_T1_im(16);
1835 } else {
1836 gen_op_sxth_T1();
1839 break;
1840 case 2:
1841 NEON_GET_REG(T1, rn, pass);
1842 break;
1844 gen_movl_reg_T1(s, rd);
1845 } else {
1846 /* arm->vfp */
1847 gen_movl_T0_reg(s, rd);
1848 if (insn & (1 << 23)) {
1849 /* VDUP */
1850 if (size == 0) {
1851 gen_op_neon_dup_u8(0);
1852 } else if (size == 1) {
1853 gen_op_neon_dup_low16();
1855 NEON_SET_REG(T0, rn, 0);
1856 NEON_SET_REG(T0, rn, 1);
1857 } else {
1858 /* VMOV */
1859 switch (size) {
1860 case 0:
1861 NEON_GET_REG(T2, rn, pass);
1862 gen_op_movl_T1_im(0xff);
1863 gen_op_andl_T0_T1();
1864 gen_op_neon_insert_elt(offset, ~(0xff << offset));
1865 NEON_SET_REG(T2, rn, pass);
1866 break;
1867 case 1:
1868 NEON_GET_REG(T2, rn, pass);
1869 gen_op_movl_T1_im(0xffff);
1870 gen_op_andl_T0_T1();
1871 bank_mask = offset ? 0xffff : 0xffff0000;
1872 gen_op_neon_insert_elt(offset, bank_mask);
1873 NEON_SET_REG(T2, rn, pass);
1874 break;
1875 case 2:
1876 NEON_SET_REG(T0, rn, pass);
1877 break;
1881 } else { /* !dp */
1882 if ((insn & 0x6f) != 0x00)
1883 return 1;
1884 rn = VFP_SREG_N(insn);
1885 if (insn & ARM_CP_RW_BIT) {
1886 /* vfp->arm */
1887 if (insn & (1 << 21)) {
1888 /* system register */
1889 rn >>= 1;
1891 switch (rn) {
1892 case ARM_VFP_FPSID:
1893 /* VFP2 allows access for FSID from userspace.
1894 VFP3 restricts all id registers to privileged
1895 accesses. */
1896 if (IS_USER(s)
1897 && arm_feature(env, ARM_FEATURE_VFP3))
1898 return 1;
1899 gen_op_vfp_movl_T0_xreg(rn);
1900 break;
1901 case ARM_VFP_FPEXC:
1902 if (IS_USER(s))
1903 return 1;
1904 gen_op_vfp_movl_T0_xreg(rn);
1905 break;
1906 case ARM_VFP_FPINST:
1907 case ARM_VFP_FPINST2:
1908 /* Not present in VFP3. */
1909 if (IS_USER(s)
1910 || arm_feature(env, ARM_FEATURE_VFP3))
1911 return 1;
1912 gen_op_vfp_movl_T0_xreg(rn);
1913 break;
1914 case ARM_VFP_FPSCR:
1915 if (rd == 15)
1916 gen_op_vfp_movl_T0_fpscr_flags();
1917 else
1918 gen_op_vfp_movl_T0_fpscr();
1919 break;
1920 case ARM_VFP_MVFR0:
1921 case ARM_VFP_MVFR1:
1922 if (IS_USER(s)
1923 || !arm_feature(env, ARM_FEATURE_VFP3))
1924 return 1;
1925 gen_op_vfp_movl_T0_xreg(rn);
1926 break;
1927 default:
1928 return 1;
1930 } else {
1931 gen_mov_F0_vreg(0, rn);
1932 gen_op_vfp_mrs();
1934 if (rd == 15) {
1935 /* Set the 4 flag bits in the CPSR. */
1936 gen_op_movl_cpsr_T0(0xf0000000);
1937 } else
1938 gen_movl_reg_T0(s, rd);
1939 } else {
1940 /* arm->vfp */
1941 gen_movl_T0_reg(s, rd);
1942 if (insn & (1 << 21)) {
1943 rn >>= 1;
1944 /* system register */
1945 switch (rn) {
1946 case ARM_VFP_FPSID:
1947 case ARM_VFP_MVFR0:
1948 case ARM_VFP_MVFR1:
1949 /* Writes are ignored. */
1950 break;
1951 case ARM_VFP_FPSCR:
1952 gen_op_vfp_movl_fpscr_T0();
1953 gen_lookup_tb(s);
1954 break;
1955 case ARM_VFP_FPEXC:
1956 if (IS_USER(s))
1957 return 1;
1958 gen_op_vfp_movl_xreg_T0(rn);
1959 gen_lookup_tb(s);
1960 break;
1961 case ARM_VFP_FPINST:
1962 case ARM_VFP_FPINST2:
1963 gen_op_vfp_movl_xreg_T0(rn);
1964 break;
1965 default:
1966 return 1;
1968 } else {
1969 gen_op_vfp_msr();
1970 gen_mov_vreg_F0(0, rn);
1974 } else {
1975 /* data processing */
1976 /* The opcode is in bits 23, 21, 20 and 6. */
1977 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
1978 if (dp) {
1979 if (op == 15) {
1980 /* rn is opcode */
1981 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
1982 } else {
1983 /* rn is register number */
1984 VFP_DREG_N(rn, insn);
1987 if (op == 15 && (rn == 15 || rn > 17)) {
1988 /* Integer or single precision destination. */
1989 rd = VFP_SREG_D(insn);
1990 } else {
1991 VFP_DREG_D(rd, insn);
1994 if (op == 15 && (rn == 16 || rn == 17)) {
1995 /* Integer source. */
1996 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
1997 } else {
1998 VFP_DREG_M(rm, insn);
2000 } else {
2001 rn = VFP_SREG_N(insn);
2002 if (op == 15 && rn == 15) {
2003 /* Double precision destination. */
2004 VFP_DREG_D(rd, insn);
2005 } else {
2006 rd = VFP_SREG_D(insn);
2008 rm = VFP_SREG_M(insn);
2011 veclen = env->vfp.vec_len;
2012 if (op == 15 && rn > 3)
2013 veclen = 0;
2015 /* Shut up compiler warnings. */
2016 delta_m = 0;
2017 delta_d = 0;
2018 bank_mask = 0;
2020 if (veclen > 0) {
2021 if (dp)
2022 bank_mask = 0xc;
2023 else
2024 bank_mask = 0x18;
2026 /* Figure out what type of vector operation this is. */
2027 if ((rd & bank_mask) == 0) {
2028 /* scalar */
2029 veclen = 0;
2030 } else {
2031 if (dp)
2032 delta_d = (env->vfp.vec_stride >> 1) + 1;
2033 else
2034 delta_d = env->vfp.vec_stride + 1;
2036 if ((rm & bank_mask) == 0) {
2037 /* mixed scalar/vector */
2038 delta_m = 0;
2039 } else {
2040 /* vector */
2041 delta_m = delta_d;
2046 /* Load the initial operands. */
2047 if (op == 15) {
2048 switch (rn) {
2049 case 16:
2050 case 17:
2051 /* Integer source */
2052 gen_mov_F0_vreg(0, rm);
2053 break;
2054 case 8:
2055 case 9:
2056 /* Compare */
2057 gen_mov_F0_vreg(dp, rd);
2058 gen_mov_F1_vreg(dp, rm);
2059 break;
2060 case 10:
2061 case 11:
2062 /* Compare with zero */
2063 gen_mov_F0_vreg(dp, rd);
2064 gen_vfp_F1_ld0(dp);
2065 break;
2066 case 20:
2067 case 21:
2068 case 22:
2069 case 23:
2070 /* Source and destination the same. */
2071 gen_mov_F0_vreg(dp, rd);
2072 break;
2073 default:
2074 /* One source operand. */
2075 gen_mov_F0_vreg(dp, rm);
2076 break;
2078 } else {
2079 /* Two source operands. */
2080 gen_mov_F0_vreg(dp, rn);
2081 gen_mov_F1_vreg(dp, rm);
2084 for (;;) {
2085 /* Perform the calculation. */
2086 switch (op) {
2087 case 0: /* mac: fd + (fn * fm) */
2088 gen_vfp_mul(dp);
2089 gen_mov_F1_vreg(dp, rd);
2090 gen_vfp_add(dp);
2091 break;
2092 case 1: /* nmac: fd - (fn * fm) */
2093 gen_vfp_mul(dp);
2094 gen_vfp_neg(dp);
2095 gen_mov_F1_vreg(dp, rd);
2096 gen_vfp_add(dp);
2097 break;
2098 case 2: /* msc: -fd + (fn * fm) */
2099 gen_vfp_mul(dp);
2100 gen_mov_F1_vreg(dp, rd);
2101 gen_vfp_sub(dp);
2102 break;
2103 case 3: /* nmsc: -fd - (fn * fm) */
2104 gen_vfp_mul(dp);
2105 gen_mov_F1_vreg(dp, rd);
2106 gen_vfp_add(dp);
2107 gen_vfp_neg(dp);
2108 break;
2109 case 4: /* mul: fn * fm */
2110 gen_vfp_mul(dp);
2111 break;
2112 case 5: /* nmul: -(fn * fm) */
2113 gen_vfp_mul(dp);
2114 gen_vfp_neg(dp);
2115 break;
2116 case 6: /* add: fn + fm */
2117 gen_vfp_add(dp);
2118 break;
2119 case 7: /* sub: fn - fm */
2120 gen_vfp_sub(dp);
2121 break;
2122 case 8: /* div: fn / fm */
2123 gen_vfp_div(dp);
2124 break;
2125 case 14: /* fconst */
2126 if (!arm_feature(env, ARM_FEATURE_VFP3))
2127 return 1;
2129 n = (insn << 12) & 0x80000000;
2130 i = ((insn >> 12) & 0x70) | (insn & 0xf);
2131 if (dp) {
2132 if (i & 0x40)
2133 i |= 0x3f80;
2134 else
2135 i |= 0x4000;
2136 n |= i << 16;
2137 } else {
2138 if (i & 0x40)
2139 i |= 0x780;
2140 else
2141 i |= 0x800;
2142 n |= i << 19;
2144 gen_vfp_fconst(dp, n);
2145 break;
2146 case 15: /* extension space */
2147 switch (rn) {
2148 case 0: /* cpy */
2149 /* no-op */
2150 break;
2151 case 1: /* abs */
2152 gen_vfp_abs(dp);
2153 break;
2154 case 2: /* neg */
2155 gen_vfp_neg(dp);
2156 break;
2157 case 3: /* sqrt */
2158 gen_vfp_sqrt(dp);
2159 break;
2160 case 8: /* cmp */
2161 gen_vfp_cmp(dp);
2162 break;
2163 case 9: /* cmpe */
2164 gen_vfp_cmpe(dp);
2165 break;
2166 case 10: /* cmpz */
2167 gen_vfp_cmp(dp);
2168 break;
2169 case 11: /* cmpez */
2170 gen_vfp_F1_ld0(dp);
2171 gen_vfp_cmpe(dp);
2172 break;
2173 case 15: /* single<->double conversion */
2174 if (dp)
2175 gen_op_vfp_fcvtsd();
2176 else
2177 gen_op_vfp_fcvtds();
2178 break;
2179 case 16: /* fuito */
2180 gen_vfp_uito(dp);
2181 break;
2182 case 17: /* fsito */
2183 gen_vfp_sito(dp);
2184 break;
2185 case 20: /* fshto */
2186 if (!arm_feature(env, ARM_FEATURE_VFP3))
2187 return 1;
2188 gen_vfp_shto(dp, rm);
2189 break;
2190 case 21: /* fslto */
2191 if (!arm_feature(env, ARM_FEATURE_VFP3))
2192 return 1;
2193 gen_vfp_slto(dp, rm);
2194 break;
2195 case 22: /* fuhto */
2196 if (!arm_feature(env, ARM_FEATURE_VFP3))
2197 return 1;
2198 gen_vfp_uhto(dp, rm);
2199 break;
2200 case 23: /* fulto */
2201 if (!arm_feature(env, ARM_FEATURE_VFP3))
2202 return 1;
2203 gen_vfp_ulto(dp, rm);
2204 break;
2205 case 24: /* ftoui */
2206 gen_vfp_toui(dp);
2207 break;
2208 case 25: /* ftouiz */
2209 gen_vfp_touiz(dp);
2210 break;
2211 case 26: /* ftosi */
2212 gen_vfp_tosi(dp);
2213 break;
2214 case 27: /* ftosiz */
2215 gen_vfp_tosiz(dp);
2216 break;
2217 case 28: /* ftosh */
2218 if (!arm_feature(env, ARM_FEATURE_VFP3))
2219 return 1;
2220 gen_vfp_tosh(dp, rm);
2221 break;
2222 case 29: /* ftosl */
2223 if (!arm_feature(env, ARM_FEATURE_VFP3))
2224 return 1;
2225 gen_vfp_tosl(dp, rm);
2226 break;
2227 case 30: /* ftouh */
2228 if (!arm_feature(env, ARM_FEATURE_VFP3))
2229 return 1;
2230 gen_vfp_touh(dp, rm);
2231 break;
2232 case 31: /* ftoul */
2233 if (!arm_feature(env, ARM_FEATURE_VFP3))
2234 return 1;
2235 gen_vfp_toul(dp, rm);
2236 break;
2237 default: /* undefined */
2238 printf ("rn:%d\n", rn);
2239 return 1;
2241 break;
2242 default: /* undefined */
2243 printf ("op:%d\n", op);
2244 return 1;
2247 /* Write back the result. */
2248 if (op == 15 && (rn >= 8 && rn <= 11))
2249 ; /* Comparison, do nothing. */
2250 else if (op == 15 && rn > 17)
2251 /* Integer result. */
2252 gen_mov_vreg_F0(0, rd);
2253 else if (op == 15 && rn == 15)
2254 /* conversion */
2255 gen_mov_vreg_F0(!dp, rd);
2256 else
2257 gen_mov_vreg_F0(dp, rd);
2259 /* break out of the loop if we have finished */
2260 if (veclen == 0)
2261 break;
2263 if (op == 15 && delta_m == 0) {
2264 /* single source one-many */
2265 while (veclen--) {
2266 rd = ((rd + delta_d) & (bank_mask - 1))
2267 | (rd & bank_mask);
2268 gen_mov_vreg_F0(dp, rd);
2270 break;
2272 /* Setup the next operands. */
2273 veclen--;
2274 rd = ((rd + delta_d) & (bank_mask - 1))
2275 | (rd & bank_mask);
2277 if (op == 15) {
2278 /* One source operand. */
2279 rm = ((rm + delta_m) & (bank_mask - 1))
2280 | (rm & bank_mask);
2281 gen_mov_F0_vreg(dp, rm);
2282 } else {
2283 /* Two source operands. */
2284 rn = ((rn + delta_d) & (bank_mask - 1))
2285 | (rn & bank_mask);
2286 gen_mov_F0_vreg(dp, rn);
2287 if (delta_m) {
2288 rm = ((rm + delta_m) & (bank_mask - 1))
2289 | (rm & bank_mask);
2290 gen_mov_F1_vreg(dp, rm);
2295 break;
2296 case 0xc:
2297 case 0xd:
2298 if (dp && (insn & 0x03e00000) == 0x00400000) {
2299 /* two-register transfer */
2300 rn = (insn >> 16) & 0xf;
2301 rd = (insn >> 12) & 0xf;
2302 if (dp) {
2303 VFP_DREG_M(rm, insn);
2304 } else {
2305 rm = VFP_SREG_M(insn);
2308 if (insn & ARM_CP_RW_BIT) {
2309 /* vfp->arm */
2310 if (dp) {
2311 gen_mov_F0_vreg(1, rm);
2312 gen_op_vfp_mrrd();
2313 gen_movl_reg_T0(s, rd);
2314 gen_movl_reg_T1(s, rn);
2315 } else {
2316 gen_mov_F0_vreg(0, rm);
2317 gen_op_vfp_mrs();
2318 gen_movl_reg_T0(s, rn);
2319 gen_mov_F0_vreg(0, rm + 1);
2320 gen_op_vfp_mrs();
2321 gen_movl_reg_T0(s, rd);
2323 } else {
2324 /* arm->vfp */
2325 if (dp) {
2326 gen_movl_T0_reg(s, rd);
2327 gen_movl_T1_reg(s, rn);
2328 gen_op_vfp_mdrr();
2329 gen_mov_vreg_F0(1, rm);
2330 } else {
2331 gen_movl_T0_reg(s, rn);
2332 gen_op_vfp_msr();
2333 gen_mov_vreg_F0(0, rm);
2334 gen_movl_T0_reg(s, rd);
2335 gen_op_vfp_msr();
2336 gen_mov_vreg_F0(0, rm + 1);
2339 } else {
2340 /* Load/store */
2341 rn = (insn >> 16) & 0xf;
2342 if (dp)
2343 VFP_DREG_D(rd, insn);
2344 else
2345 rd = VFP_SREG_D(insn);
2346 if (s->thumb && rn == 15) {
2347 gen_op_movl_T1_im(s->pc & ~2);
2348 } else {
2349 gen_movl_T1_reg(s, rn);
2351 if ((insn & 0x01200000) == 0x01000000) {
2352 /* Single load/store */
2353 offset = (insn & 0xff) << 2;
2354 if ((insn & (1 << 23)) == 0)
2355 offset = -offset;
2356 gen_op_addl_T1_im(offset);
2357 if (insn & (1 << 20)) {
2358 gen_vfp_ld(s, dp);
2359 gen_mov_vreg_F0(dp, rd);
2360 } else {
2361 gen_mov_F0_vreg(dp, rd);
2362 gen_vfp_st(s, dp);
2364 } else {
2365 /* load/store multiple */
2366 if (dp)
2367 n = (insn >> 1) & 0x7f;
2368 else
2369 n = insn & 0xff;
2371 if (insn & (1 << 24)) /* pre-decrement */
2372 gen_op_addl_T1_im(-((insn & 0xff) << 2));
2374 if (dp)
2375 offset = 8;
2376 else
2377 offset = 4;
2378 for (i = 0; i < n; i++) {
2379 if (insn & ARM_CP_RW_BIT) {
2380 /* load */
2381 gen_vfp_ld(s, dp);
2382 gen_mov_vreg_F0(dp, rd + i);
2383 } else {
2384 /* store */
2385 gen_mov_F0_vreg(dp, rd + i);
2386 gen_vfp_st(s, dp);
2388 gen_op_addl_T1_im(offset);
2390 if (insn & (1 << 21)) {
2391 /* writeback */
2392 if (insn & (1 << 24))
2393 offset = -offset * n;
2394 else if (dp && (insn & 1))
2395 offset = 4;
2396 else
2397 offset = 0;
2399 if (offset != 0)
2400 gen_op_addl_T1_im(offset);
2401 gen_movl_reg_T1(s, rn);
2405 break;
2406 default:
2407 /* Should never happen. */
2408 return 1;
2410 return 0;
2413 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
2415 TranslationBlock *tb;
2417 tb = s->tb;
2418 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
2419 tcg_gen_goto_tb(n);
2420 gen_op_movl_T0_im(dest);
2421 gen_op_movl_r15_T0();
2422 tcg_gen_exit_tb((long)tb + n);
2423 } else {
2424 gen_op_movl_T0_im(dest);
2425 gen_op_movl_r15_T0();
2426 tcg_gen_exit_tb(0);
2430 static inline void gen_jmp (DisasContext *s, uint32_t dest)
2432 if (__builtin_expect(s->singlestep_enabled, 0)) {
2433 /* An indirect jump so that we still trigger the debug exception. */
2434 if (s->thumb)
2435 dest |= 1;
2436 gen_op_movl_T0_im(dest);
2437 gen_bx(s);
2438 } else {
2439 gen_goto_tb(s, 0, dest);
2440 s->is_jmp = DISAS_TB_JUMP;
2444 static inline void gen_mulxy(int x, int y)
2446 if (x)
2447 gen_op_sarl_T0_im(16);
2448 else
2449 gen_op_sxth_T0();
2450 if (y)
2451 gen_op_sarl_T1_im(16);
2452 else
2453 gen_op_sxth_T1();
2454 gen_op_mul_T0_T1();
2457 /* Return the mask of PSR bits set by a MSR instruction. */
2458 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
2459 uint32_t mask;
2461 mask = 0;
2462 if (flags & (1 << 0))
2463 mask |= 0xff;
2464 if (flags & (1 << 1))
2465 mask |= 0xff00;
2466 if (flags & (1 << 2))
2467 mask |= 0xff0000;
2468 if (flags & (1 << 3))
2469 mask |= 0xff000000;
2471 /* Mask out undefined bits. */
2472 mask &= ~CPSR_RESERVED;
2473 if (!arm_feature(env, ARM_FEATURE_V6))
2474 mask &= ~(CPSR_E | CPSR_GE);
2475 if (!arm_feature(env, ARM_FEATURE_THUMB2))
2476 mask &= ~CPSR_IT;
2477 /* Mask out execution state bits. */
2478 if (!spsr)
2479 mask &= ~CPSR_EXEC;
2480 /* Mask out privileged bits. */
2481 if (IS_USER(s))
2482 mask &= CPSR_USER;
2483 return mask;
2486 /* Returns nonzero if access to the PSR is not permitted. */
2487 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
2489 if (spsr) {
2490 /* ??? This is also undefined in system mode. */
2491 if (IS_USER(s))
2492 return 1;
2493 gen_op_movl_spsr_T0(mask);
2494 } else {
2495 gen_op_movl_cpsr_T0(mask);
2497 gen_lookup_tb(s);
2498 return 0;
2501 /* Generate an old-style exception return. */
2502 static void gen_exception_return(DisasContext *s)
2504 gen_op_movl_reg_TN[0][15]();
2505 gen_op_movl_T0_spsr();
2506 gen_op_movl_cpsr_T0(0xffffffff);
2507 s->is_jmp = DISAS_UPDATE;
2510 /* Generate a v6 exception return. */
2511 static void gen_rfe(DisasContext *s)
2513 gen_op_movl_cpsr_T0(0xffffffff);
2514 gen_op_movl_T0_T2();
2515 gen_op_movl_reg_TN[0][15]();
2516 s->is_jmp = DISAS_UPDATE;
2519 static inline void
2520 gen_set_condexec (DisasContext *s)
2522 if (s->condexec_mask) {
2523 gen_op_set_condexec((s->condexec_cond << 4) | (s->condexec_mask >> 1));
2527 static void gen_nop_hint(DisasContext *s, int val)
2529 switch (val) {
2530 case 3: /* wfi */
2531 gen_op_movl_T0_im((long)s->pc);
2532 gen_op_movl_reg_TN[0][15]();
2533 s->is_jmp = DISAS_WFI;
2534 break;
2535 case 2: /* wfe */
2536 case 4: /* sev */
2537 /* TODO: Implement SEV and WFE. May help SMP performance. */
2538 default: /* nop */
2539 break;
2543 /* Neon shift by constant. The actual ops are the same as used for variable
2544 shifts. [OP][U][SIZE] */
2545 static GenOpFunc *gen_neon_shift_im[8][2][4] = {
2546 { /* 0 */ /* VSHR */
2548 gen_op_neon_shl_u8,
2549 gen_op_neon_shl_u16,
2550 gen_op_neon_shl_u32,
2551 gen_op_neon_shl_u64
2552 }, {
2553 gen_op_neon_shl_s8,
2554 gen_op_neon_shl_s16,
2555 gen_op_neon_shl_s32,
2556 gen_op_neon_shl_s64
2558 }, { /* 1 */ /* VSRA */
2560 gen_op_neon_shl_u8,
2561 gen_op_neon_shl_u16,
2562 gen_op_neon_shl_u32,
2563 gen_op_neon_shl_u64
2564 }, {
2565 gen_op_neon_shl_s8,
2566 gen_op_neon_shl_s16,
2567 gen_op_neon_shl_s32,
2568 gen_op_neon_shl_s64
2570 }, { /* 2 */ /* VRSHR */
2572 gen_op_neon_rshl_u8,
2573 gen_op_neon_rshl_u16,
2574 gen_op_neon_rshl_u32,
2575 gen_op_neon_rshl_u64
2576 }, {
2577 gen_op_neon_rshl_s8,
2578 gen_op_neon_rshl_s16,
2579 gen_op_neon_rshl_s32,
2580 gen_op_neon_rshl_s64
2582 }, { /* 3 */ /* VRSRA */
2584 gen_op_neon_rshl_u8,
2585 gen_op_neon_rshl_u16,
2586 gen_op_neon_rshl_u32,
2587 gen_op_neon_rshl_u64
2588 }, {
2589 gen_op_neon_rshl_s8,
2590 gen_op_neon_rshl_s16,
2591 gen_op_neon_rshl_s32,
2592 gen_op_neon_rshl_s64
2594 }, { /* 4 */
2596 NULL, NULL, NULL, NULL
2597 }, { /* VSRI */
2598 gen_op_neon_shl_u8,
2599 gen_op_neon_shl_u16,
2600 gen_op_neon_shl_u32,
2601 gen_op_neon_shl_u64,
2603 }, { /* 5 */
2604 { /* VSHL */
2605 gen_op_neon_shl_u8,
2606 gen_op_neon_shl_u16,
2607 gen_op_neon_shl_u32,
2608 gen_op_neon_shl_u64,
2609 }, { /* VSLI */
2610 gen_op_neon_shl_u8,
2611 gen_op_neon_shl_u16,
2612 gen_op_neon_shl_u32,
2613 gen_op_neon_shl_u64,
2615 }, { /* 6 */ /* VQSHL */
2617 gen_op_neon_qshl_u8,
2618 gen_op_neon_qshl_u16,
2619 gen_op_neon_qshl_u32,
2620 gen_op_neon_qshl_u64
2621 }, {
2622 gen_op_neon_qshl_s8,
2623 gen_op_neon_qshl_s16,
2624 gen_op_neon_qshl_s32,
2625 gen_op_neon_qshl_s64
2627 }, { /* 7 */ /* VQSHLU */
2629 gen_op_neon_qshl_u8,
2630 gen_op_neon_qshl_u16,
2631 gen_op_neon_qshl_u32,
2632 gen_op_neon_qshl_u64
2633 }, {
2634 gen_op_neon_qshl_u8,
2635 gen_op_neon_qshl_u16,
2636 gen_op_neon_qshl_u32,
2637 gen_op_neon_qshl_u64
2642 /* [R][U][size - 1] */
2643 static GenOpFunc *gen_neon_shift_im_narrow[2][2][3] = {
2646 gen_op_neon_shl_u16,
2647 gen_op_neon_shl_u32,
2648 gen_op_neon_shl_u64
2649 }, {
2650 gen_op_neon_shl_s16,
2651 gen_op_neon_shl_s32,
2652 gen_op_neon_shl_s64
2654 }, {
2656 gen_op_neon_rshl_u16,
2657 gen_op_neon_rshl_u32,
2658 gen_op_neon_rshl_u64
2659 }, {
2660 gen_op_neon_rshl_s16,
2661 gen_op_neon_rshl_s32,
2662 gen_op_neon_rshl_s64
2667 static inline void
2668 gen_op_neon_narrow_u32 ()
2670 /* No-op. */
2673 static GenOpFunc *gen_neon_narrow[3] = {
2674 gen_op_neon_narrow_u8,
2675 gen_op_neon_narrow_u16,
2676 gen_op_neon_narrow_u32
2679 static GenOpFunc *gen_neon_narrow_satu[3] = {
2680 gen_op_neon_narrow_sat_u8,
2681 gen_op_neon_narrow_sat_u16,
2682 gen_op_neon_narrow_sat_u32
2685 static GenOpFunc *gen_neon_narrow_sats[3] = {
2686 gen_op_neon_narrow_sat_s8,
2687 gen_op_neon_narrow_sat_s16,
2688 gen_op_neon_narrow_sat_s32
2691 static inline int gen_neon_add(int size)
2693 switch (size) {
2694 case 0: gen_op_neon_add_u8(); break;
2695 case 1: gen_op_neon_add_u16(); break;
2696 case 2: gen_op_addl_T0_T1(); break;
2697 default: return 1;
2699 return 0;
2702 /* 32-bit pairwise ops end up the same as the elementsise versions. */
2703 #define gen_op_neon_pmax_s32 gen_op_neon_max_s32
2704 #define gen_op_neon_pmax_u32 gen_op_neon_max_u32
2705 #define gen_op_neon_pmin_s32 gen_op_neon_min_s32
2706 #define gen_op_neon_pmin_u32 gen_op_neon_min_u32
2708 #define GEN_NEON_INTEGER_OP(name) do { \
2709 switch ((size << 1) | u) { \
2710 case 0: gen_op_neon_##name##_s8(); break; \
2711 case 1: gen_op_neon_##name##_u8(); break; \
2712 case 2: gen_op_neon_##name##_s16(); break; \
2713 case 3: gen_op_neon_##name##_u16(); break; \
2714 case 4: gen_op_neon_##name##_s32(); break; \
2715 case 5: gen_op_neon_##name##_u32(); break; \
2716 default: return 1; \
2717 }} while (0)
2719 static inline void
2720 gen_neon_movl_scratch_T0(int scratch)
2722 uint32_t offset;
2724 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2725 gen_op_neon_setreg_T0(offset);
2728 static inline void
2729 gen_neon_movl_scratch_T1(int scratch)
2731 uint32_t offset;
2733 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2734 gen_op_neon_setreg_T1(offset);
2737 static inline void
2738 gen_neon_movl_T0_scratch(int scratch)
2740 uint32_t offset;
2742 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2743 gen_op_neon_getreg_T0(offset);
2746 static inline void
2747 gen_neon_movl_T1_scratch(int scratch)
2749 uint32_t offset;
2751 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2752 gen_op_neon_getreg_T1(offset);
2755 static inline void gen_op_neon_widen_u32(void)
2757 gen_op_movl_T1_im(0);
2760 static inline void gen_neon_get_scalar(int size, int reg)
2762 if (size == 1) {
2763 NEON_GET_REG(T0, reg >> 1, reg & 1);
2764 } else {
2765 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
2766 if (reg & 1)
2767 gen_op_neon_dup_low16();
2768 else
2769 gen_op_neon_dup_high16();
2773 static void gen_neon_unzip(int reg, int q, int tmp, int size)
2775 int n;
2777 for (n = 0; n < q + 1; n += 2) {
2778 NEON_GET_REG(T0, reg, n);
2779 NEON_GET_REG(T0, reg, n + n);
2780 switch (size) {
2781 case 0: gen_op_neon_unzip_u8(); break;
2782 case 1: gen_op_neon_zip_u16(); break; /* zip and unzip are the same. */
2783 case 2: /* no-op */; break;
2784 default: abort();
2786 gen_neon_movl_scratch_T0(tmp + n);
2787 gen_neon_movl_scratch_T1(tmp + n + 1);
2791 static struct {
2792 int nregs;
2793 int interleave;
2794 int spacing;
2795 } neon_ls_element_type[11] = {
2796 {4, 4, 1},
2797 {4, 4, 2},
2798 {4, 1, 1},
2799 {4, 2, 1},
2800 {3, 3, 1},
2801 {3, 3, 2},
2802 {3, 1, 1},
2803 {1, 1, 1},
2804 {2, 2, 1},
2805 {2, 2, 2},
2806 {2, 1, 1}
2809 /* Translate a NEON load/store element instruction. Return nonzero if the
2810 instruction is invalid. */
2811 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
2813 int rd, rn, rm;
2814 int op;
2815 int nregs;
2816 int interleave;
2817 int stride;
2818 int size;
2819 int reg;
2820 int pass;
2821 int load;
2822 int shift;
2823 uint32_t mask;
2824 int n;
2826 if (!vfp_enabled(env))
2827 return 1;
2828 VFP_DREG_D(rd, insn);
2829 rn = (insn >> 16) & 0xf;
2830 rm = insn & 0xf;
2831 load = (insn & (1 << 21)) != 0;
2832 if ((insn & (1 << 23)) == 0) {
2833 /* Load store all elements. */
2834 op = (insn >> 8) & 0xf;
2835 size = (insn >> 6) & 3;
2836 if (op > 10 || size == 3)
2837 return 1;
2838 nregs = neon_ls_element_type[op].nregs;
2839 interleave = neon_ls_element_type[op].interleave;
2840 gen_movl_T1_reg(s, rn);
2841 stride = (1 << size) * interleave;
2842 for (reg = 0; reg < nregs; reg++) {
2843 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
2844 gen_movl_T1_reg(s, rn);
2845 gen_op_addl_T1_im((1 << size) * reg);
2846 } else if (interleave == 2 && nregs == 4 && reg == 2) {
2847 gen_movl_T1_reg(s, rn);
2848 gen_op_addl_T1_im(1 << size);
2850 for (pass = 0; pass < 2; pass++) {
2851 if (size == 2) {
2852 if (load) {
2853 gen_ldst(ldl, s);
2854 NEON_SET_REG(T0, rd, pass);
2855 } else {
2856 NEON_GET_REG(T0, rd, pass);
2857 gen_ldst(stl, s);
2859 gen_op_addl_T1_im(stride);
2860 } else if (size == 1) {
2861 if (load) {
2862 gen_ldst(lduw, s);
2863 gen_op_addl_T1_im(stride);
2864 gen_op_movl_T2_T0();
2865 gen_ldst(lduw, s);
2866 gen_op_addl_T1_im(stride);
2867 gen_op_neon_insert_elt(16, 0xffff);
2868 NEON_SET_REG(T2, rd, pass);
2869 } else {
2870 NEON_GET_REG(T2, rd, pass);
2871 gen_op_movl_T0_T2();
2872 gen_ldst(stw, s);
2873 gen_op_addl_T1_im(stride);
2874 gen_op_neon_extract_elt(16, 0xffff0000);
2875 gen_ldst(stw, s);
2876 gen_op_addl_T1_im(stride);
2878 } else /* size == 0 */ {
2879 if (load) {
2880 mask = 0xff;
2881 for (n = 0; n < 4; n++) {
2882 gen_ldst(ldub, s);
2883 gen_op_addl_T1_im(stride);
2884 if (n == 0) {
2885 gen_op_movl_T2_T0();
2886 } else {
2887 gen_op_neon_insert_elt(n * 8, ~mask);
2889 mask <<= 8;
2891 NEON_SET_REG(T2, rd, pass);
2892 } else {
2893 NEON_GET_REG(T2, rd, pass);
2894 mask = 0xff;
2895 for (n = 0; n < 4; n++) {
2896 if (n == 0) {
2897 gen_op_movl_T0_T2();
2898 } else {
2899 gen_op_neon_extract_elt(n * 8, mask);
2901 gen_ldst(stb, s);
2902 gen_op_addl_T1_im(stride);
2903 mask <<= 8;
2908 rd += neon_ls_element_type[op].spacing;
2910 stride = nregs * 8;
2911 } else {
2912 size = (insn >> 10) & 3;
2913 if (size == 3) {
2914 /* Load single element to all lanes. */
2915 if (!load)
2916 return 1;
2917 size = (insn >> 6) & 3;
2918 nregs = ((insn >> 8) & 3) + 1;
2919 stride = (insn & (1 << 5)) ? 2 : 1;
2920 gen_movl_T1_reg(s, rn);
2921 for (reg = 0; reg < nregs; reg++) {
2922 switch (size) {
2923 case 0:
2924 gen_ldst(ldub, s);
2925 gen_op_neon_dup_u8(0);
2926 break;
2927 case 1:
2928 gen_ldst(lduw, s);
2929 gen_op_neon_dup_low16();
2930 break;
2931 case 2:
2932 gen_ldst(ldl, s);
2933 break;
2934 case 3:
2935 return 1;
2937 gen_op_addl_T1_im(1 << size);
2938 NEON_SET_REG(T0, rd, 0);
2939 NEON_SET_REG(T0, rd, 1);
2940 rd += stride;
2942 stride = (1 << size) * nregs;
2943 } else {
2944 /* Single element. */
2945 pass = (insn >> 7) & 1;
2946 switch (size) {
2947 case 0:
2948 shift = ((insn >> 5) & 3) * 8;
2949 mask = 0xff << shift;
2950 stride = 1;
2951 break;
2952 case 1:
2953 shift = ((insn >> 6) & 1) * 16;
2954 mask = shift ? 0xffff0000 : 0xffff;
2955 stride = (insn & (1 << 5)) ? 2 : 1;
2956 break;
2957 case 2:
2958 shift = 0;
2959 mask = 0xffffffff;
2960 stride = (insn & (1 << 6)) ? 2 : 1;
2961 break;
2962 default:
2963 abort();
2965 nregs = ((insn >> 8) & 3) + 1;
2966 gen_movl_T1_reg(s, rn);
2967 for (reg = 0; reg < nregs; reg++) {
2968 if (load) {
2969 if (size != 2) {
2970 NEON_GET_REG(T2, rd, pass);
2972 switch (size) {
2973 case 0:
2974 gen_ldst(ldub, s);
2975 break;
2976 case 1:
2977 gen_ldst(lduw, s);
2978 break;
2979 case 2:
2980 gen_ldst(ldl, s);
2981 NEON_SET_REG(T0, rd, pass);
2982 break;
2984 if (size != 2) {
2985 gen_op_neon_insert_elt(shift, ~mask);
2986 NEON_SET_REG(T0, rd, pass);
2988 } else { /* Store */
2989 if (size == 2) {
2990 NEON_GET_REG(T0, rd, pass);
2991 } else {
2992 NEON_GET_REG(T2, rd, pass);
2993 gen_op_neon_extract_elt(shift, mask);
2995 switch (size) {
2996 case 0:
2997 gen_ldst(stb, s);
2998 break;
2999 case 1:
3000 gen_ldst(stw, s);
3001 break;
3002 case 2:
3003 gen_ldst(stl, s);
3004 break;
3007 rd += stride;
3008 gen_op_addl_T1_im(1 << size);
3010 stride = nregs * (1 << size);
3013 if (rm != 15) {
3014 gen_movl_T1_reg(s, rn);
3015 if (rm == 13) {
3016 gen_op_addl_T1_im(stride);
3017 } else {
3018 gen_movl_T2_reg(s, rm);
3019 gen_op_addl_T1_T2();
3021 gen_movl_reg_T1(s, rn);
3023 return 0;
3026 /* Translate a NEON data processing instruction. Return nonzero if the
3027 instruction is invalid.
3028 In general we process vectors in 32-bit chunks. This means we can reuse
3029 some of the scalar ops, and hopefully the code generated for 32-bit
3030 hosts won't be too awful. The downside is that the few 64-bit operations
3031 (mainly shifts) get complicated. */
3033 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
3035 int op;
3036 int q;
3037 int rd, rn, rm;
3038 int size;
3039 int shift;
3040 int pass;
3041 int count;
3042 int pairwise;
3043 int u;
3044 int n;
3045 uint32_t imm;
3047 if (!vfp_enabled(env))
3048 return 1;
3049 q = (insn & (1 << 6)) != 0;
3050 u = (insn >> 24) & 1;
3051 VFP_DREG_D(rd, insn);
3052 VFP_DREG_N(rn, insn);
3053 VFP_DREG_M(rm, insn);
3054 size = (insn >> 20) & 3;
3055 if ((insn & (1 << 23)) == 0) {
3056 /* Three register same length. */
3057 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
3058 if (size == 3 && (op == 1 || op == 5 || op == 16)) {
3059 for (pass = 0; pass < (q ? 2 : 1); pass++) {
3060 NEON_GET_REG(T0, rm, pass * 2);
3061 NEON_GET_REG(T1, rm, pass * 2 + 1);
3062 gen_neon_movl_scratch_T0(0);
3063 gen_neon_movl_scratch_T1(1);
3064 NEON_GET_REG(T0, rn, pass * 2);
3065 NEON_GET_REG(T1, rn, pass * 2 + 1);
3066 switch (op) {
3067 case 1: /* VQADD */
3068 if (u) {
3069 gen_op_neon_addl_saturate_u64();
3070 } else {
3071 gen_op_neon_addl_saturate_s64();
3073 break;
3074 case 5: /* VQSUB */
3075 if (u) {
3076 gen_op_neon_subl_saturate_u64();
3077 } else {
3078 gen_op_neon_subl_saturate_s64();
3080 break;
3081 case 16:
3082 if (u) {
3083 gen_op_neon_subl_u64();
3084 } else {
3085 gen_op_neon_addl_u64();
3087 break;
3088 default:
3089 abort();
3091 NEON_SET_REG(T0, rd, pass * 2);
3092 NEON_SET_REG(T1, rd, pass * 2 + 1);
3094 return 0;
3096 switch (op) {
3097 case 8: /* VSHL */
3098 case 9: /* VQSHL */
3099 case 10: /* VRSHL */
3100 case 11: /* VQSHL */
3101 /* Shift operations have Rn and Rm reversed. */
3103 int tmp;
3104 tmp = rn;
3105 rn = rm;
3106 rm = tmp;
3107 pairwise = 0;
3109 break;
3110 case 20: /* VPMAX */
3111 case 21: /* VPMIN */
3112 case 23: /* VPADD */
3113 pairwise = 1;
3114 break;
3115 case 26: /* VPADD (float) */
3116 pairwise = (u && size < 2);
3117 break;
3118 case 30: /* VPMIN/VPMAX (float) */
3119 pairwise = u;
3120 break;
3121 default:
3122 pairwise = 0;
3123 break;
3125 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3127 if (pairwise) {
3128 /* Pairwise. */
3129 if (q)
3130 n = (pass & 1) * 2;
3131 else
3132 n = 0;
3133 if (pass < q + 1) {
3134 NEON_GET_REG(T0, rn, n);
3135 NEON_GET_REG(T1, rn, n + 1);
3136 } else {
3137 NEON_GET_REG(T0, rm, n);
3138 NEON_GET_REG(T1, rm, n + 1);
3140 } else {
3141 /* Elementwise. */
3142 NEON_GET_REG(T0, rn, pass);
3143 NEON_GET_REG(T1, rm, pass);
3145 switch (op) {
3146 case 0: /* VHADD */
3147 GEN_NEON_INTEGER_OP(hadd);
3148 break;
3149 case 1: /* VQADD */
3150 switch (size << 1| u) {
3151 case 0: gen_op_neon_qadd_s8(); break;
3152 case 1: gen_op_neon_qadd_u8(); break;
3153 case 2: gen_op_neon_qadd_s16(); break;
3154 case 3: gen_op_neon_qadd_u16(); break;
3155 case 4: gen_op_addl_T0_T1_saturate(); break;
3156 case 5: gen_op_addl_T0_T1_usaturate(); break;
3157 default: abort();
3159 break;
3160 case 2: /* VRHADD */
3161 GEN_NEON_INTEGER_OP(rhadd);
3162 break;
3163 case 3: /* Logic ops. */
3164 switch ((u << 2) | size) {
3165 case 0: /* VAND */
3166 gen_op_andl_T0_T1();
3167 break;
3168 case 1: /* BIC */
3169 gen_op_bicl_T0_T1();
3170 break;
3171 case 2: /* VORR */
3172 gen_op_orl_T0_T1();
3173 break;
3174 case 3: /* VORN */
3175 gen_op_notl_T1();
3176 gen_op_orl_T0_T1();
3177 break;
3178 case 4: /* VEOR */
3179 gen_op_xorl_T0_T1();
3180 break;
3181 case 5: /* VBSL */
3182 NEON_GET_REG(T2, rd, pass);
3183 gen_op_neon_bsl();
3184 break;
3185 case 6: /* VBIT */
3186 NEON_GET_REG(T2, rd, pass);
3187 gen_op_neon_bit();
3188 break;
3189 case 7: /* VBIF */
3190 NEON_GET_REG(T2, rd, pass);
3191 gen_op_neon_bif();
3192 break;
3194 break;
3195 case 4: /* VHSUB */
3196 GEN_NEON_INTEGER_OP(hsub);
3197 break;
3198 case 5: /* VQSUB */
3199 switch ((size << 1) | u) {
3200 case 0: gen_op_neon_qsub_s8(); break;
3201 case 1: gen_op_neon_qsub_u8(); break;
3202 case 2: gen_op_neon_qsub_s16(); break;
3203 case 3: gen_op_neon_qsub_u16(); break;
3204 case 4: gen_op_subl_T0_T1_saturate(); break;
3205 case 5: gen_op_subl_T0_T1_usaturate(); break;
3206 default: abort();
3208 break;
3209 case 6: /* VCGT */
3210 GEN_NEON_INTEGER_OP(cgt);
3211 break;
3212 case 7: /* VCGE */
3213 GEN_NEON_INTEGER_OP(cge);
3214 break;
3215 case 8: /* VSHL */
3216 switch ((size << 1) | u) {
3217 case 0: gen_op_neon_shl_s8(); break;
3218 case 1: gen_op_neon_shl_u8(); break;
3219 case 2: gen_op_neon_shl_s16(); break;
3220 case 3: gen_op_neon_shl_u16(); break;
3221 case 4: gen_op_neon_shl_s32(); break;
3222 case 5: gen_op_neon_shl_u32(); break;
3223 #if 0
3224 /* ??? Implementing these is tricky because the vector ops work
3225 on 32-bit pieces. */
3226 case 6: gen_op_neon_shl_s64(); break;
3227 case 7: gen_op_neon_shl_u64(); break;
3228 #else
3229 case 6: case 7: cpu_abort(env, "VSHL.64 not implemented");
3230 #endif
3232 break;
3233 case 9: /* VQSHL */
3234 switch ((size << 1) | u) {
3235 case 0: gen_op_neon_qshl_s8(); break;
3236 case 1: gen_op_neon_qshl_u8(); break;
3237 case 2: gen_op_neon_qshl_s16(); break;
3238 case 3: gen_op_neon_qshl_u16(); break;
3239 case 4: gen_op_neon_qshl_s32(); break;
3240 case 5: gen_op_neon_qshl_u32(); break;
3241 #if 0
3242 /* ??? Implementing these is tricky because the vector ops work
3243 on 32-bit pieces. */
3244 case 6: gen_op_neon_qshl_s64(); break;
3245 case 7: gen_op_neon_qshl_u64(); break;
3246 #else
3247 case 6: case 7: cpu_abort(env, "VQSHL.64 not implemented");
3248 #endif
3250 break;
3251 case 10: /* VRSHL */
3252 switch ((size << 1) | u) {
3253 case 0: gen_op_neon_rshl_s8(); break;
3254 case 1: gen_op_neon_rshl_u8(); break;
3255 case 2: gen_op_neon_rshl_s16(); break;
3256 case 3: gen_op_neon_rshl_u16(); break;
3257 case 4: gen_op_neon_rshl_s32(); break;
3258 case 5: gen_op_neon_rshl_u32(); break;
3259 #if 0
3260 /* ??? Implementing these is tricky because the vector ops work
3261 on 32-bit pieces. */
3262 case 6: gen_op_neon_rshl_s64(); break;
3263 case 7: gen_op_neon_rshl_u64(); break;
3264 #else
3265 case 6: case 7: cpu_abort(env, "VRSHL.64 not implemented");
3266 #endif
3268 break;
3269 case 11: /* VQRSHL */
3270 switch ((size << 1) | u) {
3271 case 0: gen_op_neon_qrshl_s8(); break;
3272 case 1: gen_op_neon_qrshl_u8(); break;
3273 case 2: gen_op_neon_qrshl_s16(); break;
3274 case 3: gen_op_neon_qrshl_u16(); break;
3275 case 4: gen_op_neon_qrshl_s32(); break;
3276 case 5: gen_op_neon_qrshl_u32(); break;
3277 #if 0
3278 /* ??? Implementing these is tricky because the vector ops work
3279 on 32-bit pieces. */
3280 case 6: gen_op_neon_qrshl_s64(); break;
3281 case 7: gen_op_neon_qrshl_u64(); break;
3282 #else
3283 case 6: case 7: cpu_abort(env, "VQRSHL.64 not implemented");
3284 #endif
3286 break;
3287 case 12: /* VMAX */
3288 GEN_NEON_INTEGER_OP(max);
3289 break;
3290 case 13: /* VMIN */
3291 GEN_NEON_INTEGER_OP(min);
3292 break;
3293 case 14: /* VABD */
3294 GEN_NEON_INTEGER_OP(abd);
3295 break;
3296 case 15: /* VABA */
3297 GEN_NEON_INTEGER_OP(abd);
3298 NEON_GET_REG(T1, rd, pass);
3299 gen_neon_add(size);
3300 break;
3301 case 16:
3302 if (!u) { /* VADD */
3303 if (gen_neon_add(size))
3304 return 1;
3305 } else { /* VSUB */
3306 switch (size) {
3307 case 0: gen_op_neon_sub_u8(); break;
3308 case 1: gen_op_neon_sub_u16(); break;
3309 case 2: gen_op_subl_T0_T1(); break;
3310 default: return 1;
3313 break;
3314 case 17:
3315 if (!u) { /* VTST */
3316 switch (size) {
3317 case 0: gen_op_neon_tst_u8(); break;
3318 case 1: gen_op_neon_tst_u16(); break;
3319 case 2: gen_op_neon_tst_u32(); break;
3320 default: return 1;
3322 } else { /* VCEQ */
3323 switch (size) {
3324 case 0: gen_op_neon_ceq_u8(); break;
3325 case 1: gen_op_neon_ceq_u16(); break;
3326 case 2: gen_op_neon_ceq_u32(); break;
3327 default: return 1;
3330 break;
3331 case 18: /* Multiply. */
3332 switch (size) {
3333 case 0: gen_op_neon_mul_u8(); break;
3334 case 1: gen_op_neon_mul_u16(); break;
3335 case 2: gen_op_mul_T0_T1(); break;
3336 default: return 1;
3338 NEON_GET_REG(T1, rd, pass);
3339 if (u) { /* VMLS */
3340 switch (size) {
3341 case 0: gen_op_neon_rsb_u8(); break;
3342 case 1: gen_op_neon_rsb_u16(); break;
3343 case 2: gen_op_rsbl_T0_T1(); break;
3344 default: return 1;
3346 } else { /* VMLA */
3347 gen_neon_add(size);
3349 break;
3350 case 19: /* VMUL */
3351 if (u) { /* polynomial */
3352 gen_op_neon_mul_p8();
3353 } else { /* Integer */
3354 switch (size) {
3355 case 0: gen_op_neon_mul_u8(); break;
3356 case 1: gen_op_neon_mul_u16(); break;
3357 case 2: gen_op_mul_T0_T1(); break;
3358 default: return 1;
3361 break;
3362 case 20: /* VPMAX */
3363 GEN_NEON_INTEGER_OP(pmax);
3364 break;
3365 case 21: /* VPMIN */
3366 GEN_NEON_INTEGER_OP(pmin);
3367 break;
3368 case 22: /* Hultiply high. */
3369 if (!u) { /* VQDMULH */
3370 switch (size) {
3371 case 1: gen_op_neon_qdmulh_s16(); break;
3372 case 2: gen_op_neon_qdmulh_s32(); break;
3373 default: return 1;
3375 } else { /* VQRDHMUL */
3376 switch (size) {
3377 case 1: gen_op_neon_qrdmulh_s16(); break;
3378 case 2: gen_op_neon_qrdmulh_s32(); break;
3379 default: return 1;
3382 break;
3383 case 23: /* VPADD */
3384 if (u)
3385 return 1;
3386 switch (size) {
3387 case 0: gen_op_neon_padd_u8(); break;
3388 case 1: gen_op_neon_padd_u16(); break;
3389 case 2: gen_op_addl_T0_T1(); break;
3390 default: return 1;
3392 break;
3393 case 26: /* Floating point arithnetic. */
3394 switch ((u << 2) | size) {
3395 case 0: /* VADD */
3396 gen_op_neon_add_f32();
3397 break;
3398 case 2: /* VSUB */
3399 gen_op_neon_sub_f32();
3400 break;
3401 case 4: /* VPADD */
3402 gen_op_neon_add_f32();
3403 break;
3404 case 6: /* VABD */
3405 gen_op_neon_abd_f32();
3406 break;
3407 default:
3408 return 1;
3410 break;
3411 case 27: /* Float multiply. */
3412 gen_op_neon_mul_f32();
3413 if (!u) {
3414 NEON_GET_REG(T1, rd, pass);
3415 if (size == 0) {
3416 gen_op_neon_add_f32();
3417 } else {
3418 gen_op_neon_rsb_f32();
3421 break;
3422 case 28: /* Float compare. */
3423 if (!u) {
3424 gen_op_neon_ceq_f32();
3425 } else {
3426 if (size == 0)
3427 gen_op_neon_cge_f32();
3428 else
3429 gen_op_neon_cgt_f32();
3431 break;
3432 case 29: /* Float compare absolute. */
3433 if (!u)
3434 return 1;
3435 if (size == 0)
3436 gen_op_neon_acge_f32();
3437 else
3438 gen_op_neon_acgt_f32();
3439 break;
3440 case 30: /* Float min/max. */
3441 if (size == 0)
3442 gen_op_neon_max_f32();
3443 else
3444 gen_op_neon_min_f32();
3445 break;
3446 case 31:
3447 if (size == 0)
3448 gen_op_neon_recps_f32();
3449 else
3450 gen_op_neon_rsqrts_f32();
3451 break;
3452 default:
3453 abort();
3455 /* Save the result. For elementwise operations we can put it
3456 straight into the destination register. For pairwise operations
3457 we have to be careful to avoid clobbering the source operands. */
3458 if (pairwise && rd == rm) {
3459 gen_neon_movl_scratch_T0(pass);
3460 } else {
3461 NEON_SET_REG(T0, rd, pass);
3464 } /* for pass */
3465 if (pairwise && rd == rm) {
3466 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3467 gen_neon_movl_T0_scratch(pass);
3468 NEON_SET_REG(T0, rd, pass);
3471 } else if (insn & (1 << 4)) {
3472 if ((insn & 0x00380080) != 0) {
3473 /* Two registers and shift. */
3474 op = (insn >> 8) & 0xf;
3475 if (insn & (1 << 7)) {
3476 /* 64-bit shift. */
3477 size = 3;
3478 } else {
3479 size = 2;
3480 while ((insn & (1 << (size + 19))) == 0)
3481 size--;
3483 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
3484 /* To avoid excessive dumplication of ops we implement shift
3485 by immediate using the variable shift operations. */
3486 if (op < 8) {
3487 /* Shift by immediate:
3488 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
3489 /* Right shifts are encoded as N - shift, where N is the
3490 element size in bits. */
3491 if (op <= 4)
3492 shift = shift - (1 << (size + 3));
3493 else
3494 shift++;
3495 if (size == 3) {
3496 count = q + 1;
3497 } else {
3498 count = q ? 4: 2;
3500 switch (size) {
3501 case 0:
3502 imm = (uint8_t) shift;
3503 imm |= imm << 8;
3504 imm |= imm << 16;
3505 break;
3506 case 1:
3507 imm = (uint16_t) shift;
3508 imm |= imm << 16;
3509 break;
3510 case 2:
3511 case 3:
3512 imm = shift;
3513 break;
3514 default:
3515 abort();
3518 for (pass = 0; pass < count; pass++) {
3519 if (size < 3) {
3520 /* Operands in T0 and T1. */
3521 gen_op_movl_T1_im(imm);
3522 NEON_GET_REG(T0, rm, pass);
3523 } else {
3524 /* Operands in {T0, T1} and env->vfp.scratch. */
3525 gen_op_movl_T0_im(imm);
3526 gen_neon_movl_scratch_T0(0);
3527 gen_op_movl_T0_im((int32_t)imm >> 31);
3528 gen_neon_movl_scratch_T0(1);
3529 NEON_GET_REG(T0, rm, pass * 2);
3530 NEON_GET_REG(T1, rm, pass * 2 + 1);
3533 if (gen_neon_shift_im[op][u][size] == NULL)
3534 return 1;
3535 gen_neon_shift_im[op][u][size]();
3537 if (op == 1 || op == 3) {
3538 /* Accumulate. */
3539 if (size == 3) {
3540 gen_neon_movl_scratch_T0(0);
3541 gen_neon_movl_scratch_T1(1);
3542 NEON_GET_REG(T0, rd, pass * 2);
3543 NEON_GET_REG(T1, rd, pass * 2 + 1);
3544 gen_op_neon_addl_u64();
3545 } else {
3546 NEON_GET_REG(T1, rd, pass);
3547 gen_neon_add(size);
3549 } else if (op == 4 || (op == 5 && u)) {
3550 /* Insert */
3551 if (size == 3) {
3552 cpu_abort(env, "VS[LR]I.64 not implemented");
3554 switch (size) {
3555 case 0:
3556 if (op == 4)
3557 imm = 0xff >> -shift;
3558 else
3559 imm = (uint8_t)(0xff << shift);
3560 imm |= imm << 8;
3561 imm |= imm << 16;
3562 break;
3563 case 1:
3564 if (op == 4)
3565 imm = 0xffff >> -shift;
3566 else
3567 imm = (uint16_t)(0xffff << shift);
3568 imm |= imm << 16;
3569 break;
3570 case 2:
3571 if (op == 4)
3572 imm = 0xffffffffu >> -shift;
3573 else
3574 imm = 0xffffffffu << shift;
3575 break;
3576 default:
3577 abort();
3579 NEON_GET_REG(T1, rd, pass);
3580 gen_op_movl_T2_im(imm);
3581 gen_op_neon_bsl();
3583 if (size == 3) {
3584 NEON_SET_REG(T0, rd, pass * 2);
3585 NEON_SET_REG(T1, rd, pass * 2 + 1);
3586 } else {
3587 NEON_SET_REG(T0, rd, pass);
3589 } /* for pass */
3590 } else if (op < 10) {
3591 /* Shift by immedaiate and narrow:
3592 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
3593 shift = shift - (1 << (size + 3));
3594 size++;
3595 if (size == 3) {
3596 count = q + 1;
3597 } else {
3598 count = q ? 4: 2;
3600 switch (size) {
3601 case 1:
3602 imm = (uint16_t) shift;
3603 imm |= imm << 16;
3604 break;
3605 case 2:
3606 case 3:
3607 imm = shift;
3608 break;
3609 default:
3610 abort();
3613 /* Processing MSB first means we need to do less shuffling at
3614 the end. */
3615 for (pass = count - 1; pass >= 0; pass--) {
3616 /* Avoid clobbering the second operand before it has been
3617 written. */
3618 n = pass;
3619 if (rd == rm)
3620 n ^= (count - 1);
3621 else
3622 n = pass;
3624 if (size < 3) {
3625 /* Operands in T0 and T1. */
3626 gen_op_movl_T1_im(imm);
3627 NEON_GET_REG(T0, rm, n);
3628 } else {
3629 /* Operands in {T0, T1} and env->vfp.scratch. */
3630 gen_op_movl_T0_im(imm);
3631 gen_neon_movl_scratch_T0(0);
3632 gen_op_movl_T0_im((int32_t)imm >> 31);
3633 gen_neon_movl_scratch_T0(1);
3634 NEON_GET_REG(T0, rm, n * 2);
3635 NEON_GET_REG(T0, rm, n * 2 + 1);
3638 gen_neon_shift_im_narrow[q][u][size - 1]();
3640 if (size < 3 && (pass & 1) == 0) {
3641 gen_neon_movl_scratch_T0(0);
3642 } else {
3643 uint32_t offset;
3645 if (size < 3)
3646 gen_neon_movl_T1_scratch(0);
3648 if (op == 8 && !u) {
3649 gen_neon_narrow[size - 1]();
3650 } else {
3651 if (op == 8)
3652 gen_neon_narrow_sats[size - 2]();
3653 else
3654 gen_neon_narrow_satu[size - 1]();
3656 if (size == 3)
3657 offset = neon_reg_offset(rd, n);
3658 else
3659 offset = neon_reg_offset(rd, n >> 1);
3660 gen_op_neon_setreg_T0(offset);
3662 } /* for pass */
3663 } else if (op == 10) {
3664 /* VSHLL */
3665 if (q)
3666 return 1;
3667 for (pass = 0; pass < 2; pass++) {
3668 /* Avoid clobbering the input operand. */
3669 if (rd == rm)
3670 n = 1 - pass;
3671 else
3672 n = pass;
3674 NEON_GET_REG(T0, rm, n);
3675 GEN_NEON_INTEGER_OP(widen);
3676 if (shift != 0) {
3677 /* The shift is less than the width of the source
3678 type, so in some cases we can just
3679 shift the whole register. */
3680 if (size == 1 || (size == 0 && u)) {
3681 gen_op_shll_T0_im(shift);
3682 gen_op_shll_T1_im(shift);
3683 } else {
3684 switch (size) {
3685 case 0: gen_op_neon_shll_u16(shift); break;
3686 case 2: gen_op_neon_shll_u64(shift); break;
3687 default: abort();
3691 NEON_SET_REG(T0, rd, n * 2);
3692 NEON_SET_REG(T1, rd, n * 2 + 1);
3694 } else if (op == 15 || op == 16) {
3695 /* VCVT fixed-point. */
3696 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3697 gen_op_vfp_getreg_F0s(neon_reg_offset(rm, pass));
3698 if (op & 1) {
3699 if (u)
3700 gen_op_vfp_ultos(shift);
3701 else
3702 gen_op_vfp_sltos(shift);
3703 } else {
3704 if (u)
3705 gen_op_vfp_touls(shift);
3706 else
3707 gen_op_vfp_tosls(shift);
3709 gen_op_vfp_setreg_F0s(neon_reg_offset(rd, pass));
3711 } else {
3712 return 1;
3714 } else { /* (insn & 0x00380080) == 0 */
3715 int invert;
3717 op = (insn >> 8) & 0xf;
3718 /* One register and immediate. */
3719 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
3720 invert = (insn & (1 << 5)) != 0;
3721 switch (op) {
3722 case 0: case 1:
3723 /* no-op */
3724 break;
3725 case 2: case 3:
3726 imm <<= 8;
3727 break;
3728 case 4: case 5:
3729 imm <<= 16;
3730 break;
3731 case 6: case 7:
3732 imm <<= 24;
3733 break;
3734 case 8: case 9:
3735 imm |= imm << 16;
3736 break;
3737 case 10: case 11:
3738 imm = (imm << 8) | (imm << 24);
3739 break;
3740 case 12:
3741 imm = (imm < 8) | 0xff;
3742 break;
3743 case 13:
3744 imm = (imm << 16) | 0xffff;
3745 break;
3746 case 14:
3747 imm |= (imm << 8) | (imm << 16) | (imm << 24);
3748 if (invert)
3749 imm = ~imm;
3750 break;
3751 case 15:
3752 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
3753 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
3754 break;
3756 if (invert)
3757 imm = ~imm;
3759 if (op != 14 || !invert)
3760 gen_op_movl_T1_im(imm);
3762 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3763 if (op & 1 && op < 12) {
3764 NEON_GET_REG(T0, rd, pass);
3765 if (invert) {
3766 /* The immediate value has already been inverted, so
3767 BIC becomes AND. */
3768 gen_op_andl_T0_T1();
3769 } else {
3770 gen_op_orl_T0_T1();
3772 NEON_SET_REG(T0, rd, pass);
3773 } else {
3774 if (op == 14 && invert) {
3775 uint32_t tmp;
3776 tmp = 0;
3777 for (n = 0; n < 4; n++) {
3778 if (imm & (1 << (n + (pass & 1) * 4)))
3779 tmp |= 0xff << (n * 8);
3781 gen_op_movl_T1_im(tmp);
3783 /* VMOV, VMVN. */
3784 NEON_SET_REG(T1, rd, pass);
3788 } else { /* (insn & 0x00800010 == 0x00800010) */
3789 if (size != 3) {
3790 op = (insn >> 8) & 0xf;
3791 if ((insn & (1 << 6)) == 0) {
3792 /* Three registers of different lengths. */
3793 int src1_wide;
3794 int src2_wide;
3795 int prewiden;
3796 /* prewiden, src1_wide, src2_wide */
3797 static const int neon_3reg_wide[16][3] = {
3798 {1, 0, 0}, /* VADDL */
3799 {1, 1, 0}, /* VADDW */
3800 {1, 0, 0}, /* VSUBL */
3801 {1, 1, 0}, /* VSUBW */
3802 {0, 1, 1}, /* VADDHN */
3803 {0, 0, 0}, /* VABAL */
3804 {0, 1, 1}, /* VSUBHN */
3805 {0, 0, 0}, /* VABDL */
3806 {0, 0, 0}, /* VMLAL */
3807 {0, 0, 0}, /* VQDMLAL */
3808 {0, 0, 0}, /* VMLSL */
3809 {0, 0, 0}, /* VQDMLSL */
3810 {0, 0, 0}, /* Integer VMULL */
3811 {0, 0, 0}, /* VQDMULL */
3812 {0, 0, 0} /* Polynomial VMULL */
3815 prewiden = neon_3reg_wide[op][0];
3816 src1_wide = neon_3reg_wide[op][1];
3817 src2_wide = neon_3reg_wide[op][2];
3819 /* Avoid overlapping operands. Wide source operands are
3820 always aligned so will never overlap with wide
3821 destinations in problematic ways. */
3822 if (rd == rm) {
3823 NEON_GET_REG(T2, rm, 1);
3824 } else if (rd == rn) {
3825 NEON_GET_REG(T2, rn, 1);
3827 for (pass = 0; pass < 2; pass++) {
3828 /* Load the second operand into env->vfp.scratch.
3829 Also widen narrow operands. */
3830 if (pass == 1 && rd == rm) {
3831 if (prewiden) {
3832 gen_op_movl_T0_T2();
3833 } else {
3834 gen_op_movl_T1_T2();
3836 } else {
3837 if (src2_wide) {
3838 NEON_GET_REG(T0, rm, pass * 2);
3839 NEON_GET_REG(T1, rm, pass * 2 + 1);
3840 } else {
3841 if (prewiden) {
3842 NEON_GET_REG(T0, rm, pass);
3843 } else {
3844 NEON_GET_REG(T1, rm, pass);
3848 if (prewiden && !src2_wide) {
3849 GEN_NEON_INTEGER_OP(widen);
3851 if (prewiden || src2_wide) {
3852 gen_neon_movl_scratch_T0(0);
3853 gen_neon_movl_scratch_T1(1);
3856 /* Load the first operand. */
3857 if (pass == 1 && rd == rn) {
3858 gen_op_movl_T0_T2();
3859 } else {
3860 if (src1_wide) {
3861 NEON_GET_REG(T0, rn, pass * 2);
3862 NEON_GET_REG(T1, rn, pass * 2 + 1);
3863 } else {
3864 NEON_GET_REG(T0, rn, pass);
3867 if (prewiden && !src1_wide) {
3868 GEN_NEON_INTEGER_OP(widen);
3870 switch (op) {
3871 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
3872 switch (size) {
3873 case 0: gen_op_neon_addl_u16(); break;
3874 case 1: gen_op_neon_addl_u32(); break;
3875 case 2: gen_op_neon_addl_u64(); break;
3876 default: abort();
3878 break;
3879 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
3880 switch (size) {
3881 case 0: gen_op_neon_subl_u16(); break;
3882 case 1: gen_op_neon_subl_u32(); break;
3883 case 2: gen_op_neon_subl_u64(); break;
3884 default: abort();
3886 break;
3887 case 5: case 7: /* VABAL, VABDL */
3888 switch ((size << 1) | u) {
3889 case 0: gen_op_neon_abdl_s16(); break;
3890 case 1: gen_op_neon_abdl_u16(); break;
3891 case 2: gen_op_neon_abdl_s32(); break;
3892 case 3: gen_op_neon_abdl_u32(); break;
3893 case 4: gen_op_neon_abdl_s64(); break;
3894 case 5: gen_op_neon_abdl_u64(); break;
3895 default: abort();
3897 break;
3898 case 8: case 9: case 10: case 11: case 12: case 13:
3899 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
3900 switch ((size << 1) | u) {
3901 case 0: gen_op_neon_mull_s8(); break;
3902 case 1: gen_op_neon_mull_u8(); break;
3903 case 2: gen_op_neon_mull_s16(); break;
3904 case 3: gen_op_neon_mull_u16(); break;
3905 case 4: gen_op_imull_T0_T1(); break;
3906 case 5: gen_op_mull_T0_T1(); break;
3907 default: abort();
3909 break;
3910 case 14: /* Polynomial VMULL */
3911 cpu_abort(env, "Polynomial VMULL not implemented");
3913 default: /* 15 is RESERVED. */
3914 return 1;
3916 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
3917 /* Accumulate. */
3918 if (op == 10 || op == 11) {
3919 switch (size) {
3920 case 0: gen_op_neon_negl_u16(); break;
3921 case 1: gen_op_neon_negl_u32(); break;
3922 case 2: gen_op_neon_negl_u64(); break;
3923 default: abort();
3927 gen_neon_movl_scratch_T0(0);
3928 gen_neon_movl_scratch_T1(1);
3930 if (op != 13) {
3931 NEON_GET_REG(T0, rd, pass * 2);
3932 NEON_GET_REG(T1, rd, pass * 2 + 1);
3935 switch (op) {
3936 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
3937 switch (size) {
3938 case 0: gen_op_neon_addl_u16(); break;
3939 case 1: gen_op_neon_addl_u32(); break;
3940 case 2: gen_op_neon_addl_u64(); break;
3941 default: abort();
3943 break;
3944 case 9: case 11: /* VQDMLAL, VQDMLSL */
3945 switch (size) {
3946 case 1: gen_op_neon_addl_saturate_s32(); break;
3947 case 2: gen_op_neon_addl_saturate_s64(); break;
3948 default: abort();
3950 /* Fall through. */
3951 case 13: /* VQDMULL */
3952 switch (size) {
3953 case 1: gen_op_neon_addl_saturate_s32(); break;
3954 case 2: gen_op_neon_addl_saturate_s64(); break;
3955 default: abort();
3957 break;
3958 default:
3959 abort();
3961 NEON_SET_REG(T0, rd, pass * 2);
3962 NEON_SET_REG(T1, rd, pass * 2 + 1);
3963 } else if (op == 4 || op == 6) {
3964 /* Narrowing operation. */
3965 if (u) {
3966 switch (size) {
3967 case 0: gen_op_neon_narrow_high_u8(); break;
3968 case 1: gen_op_neon_narrow_high_u16(); break;
3969 case 2: gen_op_movl_T0_T1(); break;
3970 default: abort();
3972 } else {
3973 switch (size) {
3974 case 0: gen_op_neon_narrow_high_round_u8(); break;
3975 case 1: gen_op_neon_narrow_high_round_u16(); break;
3976 case 2: gen_op_neon_narrow_high_round_u32(); break;
3977 default: abort();
3980 NEON_SET_REG(T0, rd, pass);
3981 } else {
3982 /* Write back the result. */
3983 NEON_SET_REG(T0, rd, pass * 2);
3984 NEON_SET_REG(T1, rd, pass * 2 + 1);
3987 } else {
3988 /* Two registers and a scalar. */
3989 switch (op) {
3990 case 0: /* Integer VMLA scalar */
3991 case 1: /* Float VMLA scalar */
3992 case 4: /* Integer VMLS scalar */
3993 case 5: /* Floating point VMLS scalar */
3994 case 8: /* Integer VMUL scalar */
3995 case 9: /* Floating point VMUL scalar */
3996 case 12: /* VQDMULH scalar */
3997 case 13: /* VQRDMULH scalar */
3998 gen_neon_get_scalar(size, rm);
3999 gen_op_movl_T2_T0();
4000 for (pass = 0; pass < (u ? 4 : 2); pass++) {
4001 if (pass != 0)
4002 gen_op_movl_T0_T2();
4003 NEON_GET_REG(T1, rn, pass);
4004 if (op == 12) {
4005 if (size == 1) {
4006 gen_op_neon_qdmulh_s16();
4007 } else {
4008 gen_op_neon_qdmulh_s32();
4010 } else if (op == 13) {
4011 if (size == 1) {
4012 gen_op_neon_qrdmulh_s16();
4013 } else {
4014 gen_op_neon_qrdmulh_s32();
4016 } else if (op & 1) {
4017 gen_op_neon_mul_f32();
4018 } else {
4019 switch (size) {
4020 case 0: gen_op_neon_mul_u8(); break;
4021 case 1: gen_op_neon_mul_u16(); break;
4022 case 2: gen_op_mul_T0_T1(); break;
4023 default: return 1;
4026 if (op < 8) {
4027 /* Accumulate. */
4028 NEON_GET_REG(T1, rd, pass);
4029 switch (op) {
4030 case 0:
4031 gen_neon_add(size);
4032 break;
4033 case 1:
4034 gen_op_neon_add_f32();
4035 break;
4036 case 4:
4037 switch (size) {
4038 case 0: gen_op_neon_rsb_u8(); break;
4039 case 1: gen_op_neon_rsb_u16(); break;
4040 case 2: gen_op_rsbl_T0_T1(); break;
4041 default: return 1;
4043 break;
4044 case 5:
4045 gen_op_neon_rsb_f32();
4046 break;
4047 default:
4048 abort();
4051 NEON_SET_REG(T0, rd, pass);
4053 break;
4054 case 2: /* VMLAL sclar */
4055 case 3: /* VQDMLAL scalar */
4056 case 6: /* VMLSL scalar */
4057 case 7: /* VQDMLSL scalar */
4058 case 10: /* VMULL scalar */
4059 case 11: /* VQDMULL scalar */
4060 if (rd == rn) {
4061 /* Save overlapping operands before they are
4062 clobbered. */
4063 NEON_GET_REG(T0, rn, 1);
4064 gen_neon_movl_scratch_T0(2);
4066 gen_neon_get_scalar(size, rm);
4067 gen_op_movl_T2_T0();
4068 for (pass = 0; pass < 2; pass++) {
4069 if (pass != 0) {
4070 gen_op_movl_T0_T2();
4072 if (pass != 0 && rd == rn) {
4073 gen_neon_movl_T1_scratch(2);
4074 } else {
4075 NEON_GET_REG(T1, rn, pass);
4077 switch ((size << 1) | u) {
4078 case 0: gen_op_neon_mull_s8(); break;
4079 case 1: gen_op_neon_mull_u8(); break;
4080 case 2: gen_op_neon_mull_s16(); break;
4081 case 3: gen_op_neon_mull_u16(); break;
4082 case 4: gen_op_imull_T0_T1(); break;
4083 case 5: gen_op_mull_T0_T1(); break;
4084 default: abort();
4086 if (op == 6 || op == 7) {
4087 switch (size) {
4088 case 0: gen_op_neon_negl_u16(); break;
4089 case 1: gen_op_neon_negl_u32(); break;
4090 case 2: gen_op_neon_negl_u64(); break;
4091 default: abort();
4094 gen_neon_movl_scratch_T0(0);
4095 gen_neon_movl_scratch_T1(1);
4096 NEON_GET_REG(T0, rd, pass * 2);
4097 NEON_GET_REG(T1, rd, pass * 2 + 1);
4098 switch (op) {
4099 case 2: case 6:
4100 switch (size) {
4101 case 0: gen_op_neon_addl_u16(); break;
4102 case 1: gen_op_neon_addl_u32(); break;
4103 case 2: gen_op_neon_addl_u64(); break;
4104 default: abort();
4106 break;
4107 case 3: case 7:
4108 switch (size) {
4109 case 1:
4110 gen_op_neon_addl_saturate_s32();
4111 gen_op_neon_addl_saturate_s32();
4112 break;
4113 case 2:
4114 gen_op_neon_addl_saturate_s64();
4115 gen_op_neon_addl_saturate_s64();
4116 break;
4117 default: abort();
4119 break;
4120 case 10:
4121 /* no-op */
4122 break;
4123 case 11:
4124 switch (size) {
4125 case 1: gen_op_neon_addl_saturate_s32(); break;
4126 case 2: gen_op_neon_addl_saturate_s64(); break;
4127 default: abort();
4129 break;
4130 default:
4131 abort();
4133 NEON_SET_REG(T0, rd, pass * 2);
4134 NEON_SET_REG(T1, rd, pass * 2 + 1);
4136 break;
4137 default: /* 14 and 15 are RESERVED */
4138 return 1;
4141 } else { /* size == 3 */
4142 if (!u) {
4143 /* Extract. */
4144 int reg;
4145 imm = (insn >> 8) & 0xf;
4146 reg = rn;
4147 count = q ? 4 : 2;
4148 n = imm >> 2;
4149 NEON_GET_REG(T0, reg, n);
4150 for (pass = 0; pass < count; pass++) {
4151 n++;
4152 if (n > count) {
4153 reg = rm;
4154 n -= count;
4156 if (imm & 3) {
4157 NEON_GET_REG(T1, reg, n);
4158 gen_op_neon_extract((insn << 3) & 0x1f);
4160 /* ??? This is broken if rd and rm overlap */
4161 NEON_SET_REG(T0, rd, pass);
4162 if (imm & 3) {
4163 gen_op_movl_T0_T1();
4164 } else {
4165 NEON_GET_REG(T0, reg, n);
4168 } else if ((insn & (1 << 11)) == 0) {
4169 /* Two register misc. */
4170 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
4171 size = (insn >> 18) & 3;
4172 switch (op) {
4173 case 0: /* VREV64 */
4174 if (size == 3)
4175 return 1;
4176 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4177 NEON_GET_REG(T0, rm, pass * 2);
4178 NEON_GET_REG(T1, rm, pass * 2 + 1);
4179 switch (size) {
4180 case 0: gen_op_rev_T0(); break;
4181 case 1: gen_op_revh_T0(); break;
4182 case 2: /* no-op */ break;
4183 default: abort();
4185 NEON_SET_REG(T0, rd, pass * 2 + 1);
4186 if (size == 2) {
4187 NEON_SET_REG(T1, rd, pass * 2);
4188 } else {
4189 gen_op_movl_T0_T1();
4190 switch (size) {
4191 case 0: gen_op_rev_T0(); break;
4192 case 1: gen_op_revh_T0(); break;
4193 default: abort();
4195 NEON_SET_REG(T0, rd, pass * 2);
4198 break;
4199 case 4: case 5: /* VPADDL */
4200 case 12: case 13: /* VPADAL */
4201 if (size < 2)
4202 goto elementwise;
4203 if (size == 3)
4204 return 1;
4205 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4206 NEON_GET_REG(T0, rm, pass * 2);
4207 NEON_GET_REG(T1, rm, pass * 2 + 1);
4208 if (op & 1)
4209 gen_op_neon_paddl_u32();
4210 else
4211 gen_op_neon_paddl_s32();
4212 if (op >= 12) {
4213 /* Accumulate. */
4214 gen_neon_movl_scratch_T0(0);
4215 gen_neon_movl_scratch_T1(1);
4217 NEON_GET_REG(T0, rd, pass * 2);
4218 NEON_GET_REG(T1, rd, pass * 2 + 1);
4219 gen_op_neon_addl_u64();
4221 NEON_SET_REG(T0, rd, pass * 2);
4222 NEON_SET_REG(T1, rd, pass * 2 + 1);
4224 break;
4225 case 33: /* VTRN */
4226 if (size == 2) {
4227 for (n = 0; n < (q ? 4 : 2); n += 2) {
4228 NEON_GET_REG(T0, rm, n);
4229 NEON_GET_REG(T1, rd, n + 1);
4230 NEON_SET_REG(T1, rm, n);
4231 NEON_SET_REG(T0, rd, n + 1);
4233 } else {
4234 goto elementwise;
4236 break;
4237 case 34: /* VUZP */
4238 /* Reg Before After
4239 Rd A3 A2 A1 A0 B2 B0 A2 A0
4240 Rm B3 B2 B1 B0 B3 B1 A3 A1
4242 if (size == 3)
4243 return 1;
4244 gen_neon_unzip(rd, q, 0, size);
4245 gen_neon_unzip(rm, q, 4, size);
4246 if (q) {
4247 static int unzip_order_q[8] =
4248 {0, 2, 4, 6, 1, 3, 5, 7};
4249 for (n = 0; n < 8; n++) {
4250 int reg = (n < 4) ? rd : rm;
4251 gen_neon_movl_T0_scratch(unzip_order_q[n]);
4252 NEON_SET_REG(T0, reg, n % 4);
4254 } else {
4255 static int unzip_order[4] =
4256 {0, 4, 1, 5};
4257 for (n = 0; n < 4; n++) {
4258 int reg = (n < 2) ? rd : rm;
4259 gen_neon_movl_T0_scratch(unzip_order[n]);
4260 NEON_SET_REG(T0, reg, n % 2);
4263 break;
4264 case 35: /* VZIP */
4265 /* Reg Before After
4266 Rd A3 A2 A1 A0 B1 A1 B0 A0
4267 Rm B3 B2 B1 B0 B3 A3 B2 A2
4269 if (size == 3)
4270 return 1;
4271 count = (q ? 4 : 2);
4272 for (n = 0; n < count; n++) {
4273 NEON_GET_REG(T0, rd, n);
4274 NEON_GET_REG(T1, rd, n);
4275 switch (size) {
4276 case 0: gen_op_neon_zip_u8(); break;
4277 case 1: gen_op_neon_zip_u16(); break;
4278 case 2: /* no-op */; break;
4279 default: abort();
4281 gen_neon_movl_scratch_T0(n * 2);
4282 gen_neon_movl_scratch_T1(n * 2 + 1);
4284 for (n = 0; n < count * 2; n++) {
4285 int reg = (n < count) ? rd : rm;
4286 gen_neon_movl_T0_scratch(n);
4287 NEON_SET_REG(T0, reg, n % count);
4289 break;
4290 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
4291 for (pass = 0; pass < 2; pass++) {
4292 if (rd == rm + 1) {
4293 n = 1 - pass;
4294 } else {
4295 n = pass;
4297 NEON_GET_REG(T0, rm, n * 2);
4298 NEON_GET_REG(T1, rm, n * 2 + 1);
4299 if (op == 36 && q == 0) {
4300 switch (size) {
4301 case 0: gen_op_neon_narrow_u8(); break;
4302 case 1: gen_op_neon_narrow_u16(); break;
4303 case 2: /* no-op */ break;
4304 default: return 1;
4306 } else if (q) {
4307 switch (size) {
4308 case 0: gen_op_neon_narrow_sat_u8(); break;
4309 case 1: gen_op_neon_narrow_sat_u16(); break;
4310 case 2: gen_op_neon_narrow_sat_u32(); break;
4311 default: return 1;
4313 } else {
4314 switch (size) {
4315 case 0: gen_op_neon_narrow_sat_s8(); break;
4316 case 1: gen_op_neon_narrow_sat_s16(); break;
4317 case 2: gen_op_neon_narrow_sat_s32(); break;
4318 default: return 1;
4321 NEON_SET_REG(T0, rd, n);
4323 break;
4324 case 38: /* VSHLL */
4325 if (q)
4326 return 1;
4327 if (rm == rd) {
4328 NEON_GET_REG(T2, rm, 1);
4330 for (pass = 0; pass < 2; pass++) {
4331 if (pass == 1 && rm == rd) {
4332 gen_op_movl_T0_T2();
4333 } else {
4334 NEON_GET_REG(T0, rm, pass);
4336 switch (size) {
4337 case 0: gen_op_neon_widen_high_u8(); break;
4338 case 1: gen_op_neon_widen_high_u16(); break;
4339 case 2:
4340 gen_op_movl_T1_T0();
4341 gen_op_movl_T0_im(0);
4342 break;
4343 default: return 1;
4345 NEON_SET_REG(T0, rd, pass * 2);
4346 NEON_SET_REG(T1, rd, pass * 2 + 1);
4348 break;
4349 default:
4350 elementwise:
4351 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4352 if (op == 30 || op == 31 || op >= 58) {
4353 gen_op_vfp_getreg_F0s(neon_reg_offset(rm, pass));
4354 } else {
4355 NEON_GET_REG(T0, rm, pass);
4357 switch (op) {
4358 case 1: /* VREV32 */
4359 switch (size) {
4360 case 0: gen_op_rev_T0(); break;
4361 case 1: gen_op_revh_T0(); break;
4362 default: return 1;
4364 break;
4365 case 2: /* VREV16 */
4366 if (size != 0)
4367 return 1;
4368 gen_op_rev16_T0();
4369 break;
4370 case 4: case 5: /* VPADDL */
4371 case 12: case 13: /* VPADAL */
4372 switch ((size << 1) | (op & 1)) {
4373 case 0: gen_op_neon_paddl_s8(); break;
4374 case 1: gen_op_neon_paddl_u8(); break;
4375 case 2: gen_op_neon_paddl_s16(); break;
4376 case 3: gen_op_neon_paddl_u16(); break;
4377 default: abort();
4379 if (op >= 12) {
4380 /* Accumulate */
4381 NEON_GET_REG(T1, rd, pass);
4382 switch (size) {
4383 case 0: gen_op_neon_add_u16(); break;
4384 case 1: gen_op_addl_T0_T1(); break;
4385 default: abort();
4388 break;
4389 case 8: /* CLS */
4390 switch (size) {
4391 case 0: gen_op_neon_cls_s8(); break;
4392 case 1: gen_op_neon_cls_s16(); break;
4393 case 2: gen_op_neon_cls_s32(); break;
4394 default: return 1;
4396 break;
4397 case 9: /* CLZ */
4398 switch (size) {
4399 case 0: gen_op_neon_clz_u8(); break;
4400 case 1: gen_op_neon_clz_u16(); break;
4401 case 2: gen_op_clz_T0(); break;
4402 default: return 1;
4404 break;
4405 case 10: /* CNT */
4406 if (size != 0)
4407 return 1;
4408 gen_op_neon_cnt_u8();
4409 break;
4410 case 11: /* VNOT */
4411 if (size != 0)
4412 return 1;
4413 gen_op_notl_T0();
4414 break;
4415 case 14: /* VQABS */
4416 switch (size) {
4417 case 0: gen_op_neon_qabs_s8(); break;
4418 case 1: gen_op_neon_qabs_s16(); break;
4419 case 2: gen_op_neon_qabs_s32(); break;
4420 default: return 1;
4422 break;
4423 case 15: /* VQNEG */
4424 switch (size) {
4425 case 0: gen_op_neon_qneg_s8(); break;
4426 case 1: gen_op_neon_qneg_s16(); break;
4427 case 2: gen_op_neon_qneg_s32(); break;
4428 default: return 1;
4430 break;
4431 case 16: case 19: /* VCGT #0, VCLE #0 */
4432 gen_op_movl_T1_im(0);
4433 switch(size) {
4434 case 0: gen_op_neon_cgt_s8(); break;
4435 case 1: gen_op_neon_cgt_s16(); break;
4436 case 2: gen_op_neon_cgt_s32(); break;
4437 default: return 1;
4439 if (op == 19)
4440 gen_op_notl_T0();
4441 break;
4442 case 17: case 20: /* VCGE #0, VCLT #0 */
4443 gen_op_movl_T1_im(0);
4444 switch(size) {
4445 case 0: gen_op_neon_cge_s8(); break;
4446 case 1: gen_op_neon_cge_s16(); break;
4447 case 2: gen_op_neon_cge_s32(); break;
4448 default: return 1;
4450 if (op == 20)
4451 gen_op_notl_T0();
4452 break;
4453 case 18: /* VCEQ #0 */
4454 gen_op_movl_T1_im(0);
4455 switch(size) {
4456 case 0: gen_op_neon_ceq_u8(); break;
4457 case 1: gen_op_neon_ceq_u16(); break;
4458 case 2: gen_op_neon_ceq_u32(); break;
4459 default: return 1;
4461 break;
4462 case 22: /* VABS */
4463 switch(size) {
4464 case 0: gen_op_neon_abs_s8(); break;
4465 case 1: gen_op_neon_abs_s16(); break;
4466 case 2: gen_op_neon_abs_s32(); break;
4467 default: return 1;
4469 break;
4470 case 23: /* VNEG */
4471 gen_op_movl_T1_im(0);
4472 switch(size) {
4473 case 0: gen_op_neon_rsb_u8(); break;
4474 case 1: gen_op_neon_rsb_u16(); break;
4475 case 2: gen_op_rsbl_T0_T1(); break;
4476 default: return 1;
4478 break;
4479 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
4480 gen_op_movl_T1_im(0);
4481 gen_op_neon_cgt_f32();
4482 if (op == 27)
4483 gen_op_notl_T0();
4484 break;
4485 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
4486 gen_op_movl_T1_im(0);
4487 gen_op_neon_cge_f32();
4488 if (op == 28)
4489 gen_op_notl_T0();
4490 break;
4491 case 26: /* Float VCEQ #0 */
4492 gen_op_movl_T1_im(0);
4493 gen_op_neon_ceq_f32();
4494 break;
4495 case 30: /* Float VABS */
4496 gen_op_vfp_abss();
4497 break;
4498 case 31: /* Float VNEG */
4499 gen_op_vfp_negs();
4500 break;
4501 case 32: /* VSWP */
4502 NEON_GET_REG(T1, rd, pass);
4503 NEON_SET_REG(T1, rm, pass);
4504 break;
4505 case 33: /* VTRN */
4506 NEON_GET_REG(T1, rd, pass);
4507 switch (size) {
4508 case 0: gen_op_neon_trn_u8(); break;
4509 case 1: gen_op_neon_trn_u16(); break;
4510 case 2: abort();
4511 default: return 1;
4513 NEON_SET_REG(T1, rm, pass);
4514 break;
4515 case 56: /* Integer VRECPE */
4516 gen_op_neon_recpe_u32();
4517 break;
4518 case 57: /* Integer VRSQRTE */
4519 gen_op_neon_rsqrte_u32();
4520 break;
4521 case 58: /* Float VRECPE */
4522 gen_op_neon_recpe_f32();
4523 break;
4524 case 59: /* Float VRSQRTE */
4525 gen_op_neon_rsqrte_f32();
4526 break;
4527 case 60: /* VCVT.F32.S32 */
4528 gen_op_vfp_tosizs();
4529 break;
4530 case 61: /* VCVT.F32.U32 */
4531 gen_op_vfp_touizs();
4532 break;
4533 case 62: /* VCVT.S32.F32 */
4534 gen_op_vfp_sitos();
4535 break;
4536 case 63: /* VCVT.U32.F32 */
4537 gen_op_vfp_uitos();
4538 break;
4539 default:
4540 /* Reserved: 21, 29, 39-56 */
4541 return 1;
4543 if (op == 30 || op == 31 || op >= 58) {
4544 gen_op_vfp_setreg_F0s(neon_reg_offset(rm, pass));
4545 } else {
4546 NEON_SET_REG(T0, rd, pass);
4549 break;
4551 } else if ((insn & (1 << 10)) == 0) {
4552 /* VTBL, VTBX. */
4553 n = (insn >> 5) & 0x18;
4554 NEON_GET_REG(T1, rm, 0);
4555 if (insn & (1 << 6)) {
4556 NEON_GET_REG(T0, rd, 0);
4557 } else {
4558 gen_op_movl_T0_im(0);
4560 gen_op_neon_tbl(rn, n);
4561 gen_op_movl_T2_T0();
4562 NEON_GET_REG(T1, rm, 1);
4563 if (insn & (1 << 6)) {
4564 NEON_GET_REG(T0, rd, 0);
4565 } else {
4566 gen_op_movl_T0_im(0);
4568 gen_op_neon_tbl(rn, n);
4569 NEON_SET_REG(T2, rd, 0);
4570 NEON_SET_REG(T0, rd, 1);
4571 } else if ((insn & 0x380) == 0) {
4572 /* VDUP */
4573 if (insn & (1 << 19)) {
4574 NEON_SET_REG(T0, rm, 1);
4575 } else {
4576 NEON_SET_REG(T0, rm, 0);
4578 if (insn & (1 << 16)) {
4579 gen_op_neon_dup_u8(((insn >> 17) & 3) * 8);
4580 } else if (insn & (1 << 17)) {
4581 if ((insn >> 18) & 1)
4582 gen_op_neon_dup_high16();
4583 else
4584 gen_op_neon_dup_low16();
4586 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4587 NEON_SET_REG(T0, rd, pass);
4589 } else {
4590 return 1;
4594 return 0;
4597 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
4599 int cpnum;
4601 cpnum = (insn >> 8) & 0xf;
4602 if (arm_feature(env, ARM_FEATURE_XSCALE)
4603 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
4604 return 1;
4606 switch (cpnum) {
4607 case 0:
4608 case 1:
4609 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
4610 return disas_iwmmxt_insn(env, s, insn);
4611 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
4612 return disas_dsp_insn(env, s, insn);
4614 return 1;
4615 case 10:
4616 case 11:
4617 return disas_vfp_insn (env, s, insn);
4618 case 15:
4619 return disas_cp15_insn (env, s, insn);
4620 default:
4621 /* Unknown coprocessor. See if the board has hooked it. */
4622 return disas_cp_insn (env, s, insn);
4626 static void disas_arm_insn(CPUState * env, DisasContext *s)
4628 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
4630 insn = ldl_code(s->pc);
4631 s->pc += 4;
4633 /* M variants do not implement ARM mode. */
4634 if (IS_M(env))
4635 goto illegal_op;
4636 cond = insn >> 28;
4637 if (cond == 0xf){
4638 /* Unconditional instructions. */
4639 if (((insn >> 25) & 7) == 1) {
4640 /* NEON Data processing. */
4641 if (!arm_feature(env, ARM_FEATURE_NEON))
4642 goto illegal_op;
4644 if (disas_neon_data_insn(env, s, insn))
4645 goto illegal_op;
4646 return;
4648 if ((insn & 0x0f100000) == 0x04000000) {
4649 /* NEON load/store. */
4650 if (!arm_feature(env, ARM_FEATURE_NEON))
4651 goto illegal_op;
4653 if (disas_neon_ls_insn(env, s, insn))
4654 goto illegal_op;
4655 return;
4657 if ((insn & 0x0d70f000) == 0x0550f000)
4658 return; /* PLD */
4659 else if ((insn & 0x0ffffdff) == 0x01010000) {
4660 ARCH(6);
4661 /* setend */
4662 if (insn & (1 << 9)) {
4663 /* BE8 mode not implemented. */
4664 goto illegal_op;
4666 return;
4667 } else if ((insn & 0x0fffff00) == 0x057ff000) {
4668 switch ((insn >> 4) & 0xf) {
4669 case 1: /* clrex */
4670 ARCH(6K);
4671 gen_op_clrex();
4672 return;
4673 case 4: /* dsb */
4674 case 5: /* dmb */
4675 case 6: /* isb */
4676 ARCH(7);
4677 /* We don't emulate caches so these are a no-op. */
4678 return;
4679 default:
4680 goto illegal_op;
4682 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
4683 /* srs */
4684 uint32_t offset;
4685 if (IS_USER(s))
4686 goto illegal_op;
4687 ARCH(6);
4688 op1 = (insn & 0x1f);
4689 if (op1 == (env->uncached_cpsr & CPSR_M)) {
4690 gen_movl_T1_reg(s, 13);
4691 } else {
4692 gen_op_movl_T1_r13_banked(op1);
4694 i = (insn >> 23) & 3;
4695 switch (i) {
4696 case 0: offset = -4; break; /* DA */
4697 case 1: offset = -8; break; /* DB */
4698 case 2: offset = 0; break; /* IA */
4699 case 3: offset = 4; break; /* IB */
4700 default: abort();
4702 if (offset)
4703 gen_op_addl_T1_im(offset);
4704 gen_movl_T0_reg(s, 14);
4705 gen_ldst(stl, s);
4706 gen_op_movl_T0_cpsr();
4707 gen_op_addl_T1_im(4);
4708 gen_ldst(stl, s);
4709 if (insn & (1 << 21)) {
4710 /* Base writeback. */
4711 switch (i) {
4712 case 0: offset = -8; break;
4713 case 1: offset = -4; break;
4714 case 2: offset = 4; break;
4715 case 3: offset = 0; break;
4716 default: abort();
4718 if (offset)
4719 gen_op_addl_T1_im(offset);
4720 if (op1 == (env->uncached_cpsr & CPSR_M)) {
4721 gen_movl_reg_T1(s, 13);
4722 } else {
4723 gen_op_movl_r13_T1_banked(op1);
4726 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
4727 /* rfe */
4728 uint32_t offset;
4729 if (IS_USER(s))
4730 goto illegal_op;
4731 ARCH(6);
4732 rn = (insn >> 16) & 0xf;
4733 gen_movl_T1_reg(s, rn);
4734 i = (insn >> 23) & 3;
4735 switch (i) {
4736 case 0: offset = 0; break; /* DA */
4737 case 1: offset = -4; break; /* DB */
4738 case 2: offset = 4; break; /* IA */
4739 case 3: offset = 8; break; /* IB */
4740 default: abort();
4742 if (offset)
4743 gen_op_addl_T1_im(offset);
4744 /* Load CPSR into T2 and PC into T0. */
4745 gen_ldst(ldl, s);
4746 gen_op_movl_T2_T0();
4747 gen_op_addl_T1_im(-4);
4748 gen_ldst(ldl, s);
4749 if (insn & (1 << 21)) {
4750 /* Base writeback. */
4751 switch (i) {
4752 case 0: offset = -4; break;
4753 case 1: offset = 0; break;
4754 case 2: offset = 8; break;
4755 case 3: offset = 4; break;
4756 default: abort();
4758 if (offset)
4759 gen_op_addl_T1_im(offset);
4760 gen_movl_reg_T1(s, rn);
4762 gen_rfe(s);
4763 } else if ((insn & 0x0e000000) == 0x0a000000) {
4764 /* branch link and change to thumb (blx <offset>) */
4765 int32_t offset;
4767 val = (uint32_t)s->pc;
4768 gen_op_movl_T0_im(val);
4769 gen_movl_reg_T0(s, 14);
4770 /* Sign-extend the 24-bit offset */
4771 offset = (((int32_t)insn) << 8) >> 8;
4772 /* offset * 4 + bit24 * 2 + (thumb bit) */
4773 val += (offset << 2) | ((insn >> 23) & 2) | 1;
4774 /* pipeline offset */
4775 val += 4;
4776 gen_op_movl_T0_im(val);
4777 gen_bx(s);
4778 return;
4779 } else if ((insn & 0x0e000f00) == 0x0c000100) {
4780 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
4781 /* iWMMXt register transfer. */
4782 if (env->cp15.c15_cpar & (1 << 1))
4783 if (!disas_iwmmxt_insn(env, s, insn))
4784 return;
4786 } else if ((insn & 0x0fe00000) == 0x0c400000) {
4787 /* Coprocessor double register transfer. */
4788 } else if ((insn & 0x0f000010) == 0x0e000010) {
4789 /* Additional coprocessor register transfer. */
4790 } else if ((insn & 0x0ff10010) == 0x01000000) {
4791 uint32_t mask;
4792 uint32_t val;
4793 /* cps (privileged) */
4794 if (IS_USER(s))
4795 return;
4796 mask = val = 0;
4797 if (insn & (1 << 19)) {
4798 if (insn & (1 << 8))
4799 mask |= CPSR_A;
4800 if (insn & (1 << 7))
4801 mask |= CPSR_I;
4802 if (insn & (1 << 6))
4803 mask |= CPSR_F;
4804 if (insn & (1 << 18))
4805 val |= mask;
4807 if (insn & (1 << 14)) {
4808 mask |= CPSR_M;
4809 val |= (insn & 0x1f);
4811 if (mask) {
4812 gen_op_movl_T0_im(val);
4813 gen_set_psr_T0(s, mask, 0);
4815 return;
4817 goto illegal_op;
4819 if (cond != 0xe) {
4820 /* if not always execute, we generate a conditional jump to
4821 next instruction */
4822 s->condlabel = gen_new_label();
4823 gen_test_cc[cond ^ 1](s->condlabel);
4824 s->condjmp = 1;
4826 if ((insn & 0x0f900000) == 0x03000000) {
4827 if ((insn & (1 << 21)) == 0) {
4828 ARCH(6T2);
4829 rd = (insn >> 12) & 0xf;
4830 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
4831 if ((insn & (1 << 22)) == 0) {
4832 /* MOVW */
4833 gen_op_movl_T0_im(val);
4834 } else {
4835 /* MOVT */
4836 gen_movl_T0_reg(s, rd);
4837 gen_op_movl_T1_im(0xffff);
4838 gen_op_andl_T0_T1();
4839 gen_op_movl_T1_im(val << 16);
4840 gen_op_orl_T0_T1();
4842 gen_movl_reg_T0(s, rd);
4843 } else {
4844 if (((insn >> 12) & 0xf) != 0xf)
4845 goto illegal_op;
4846 if (((insn >> 16) & 0xf) == 0) {
4847 gen_nop_hint(s, insn & 0xff);
4848 } else {
4849 /* CPSR = immediate */
4850 val = insn & 0xff;
4851 shift = ((insn >> 8) & 0xf) * 2;
4852 if (shift)
4853 val = (val >> shift) | (val << (32 - shift));
4854 gen_op_movl_T0_im(val);
4855 i = ((insn & (1 << 22)) != 0);
4856 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
4857 goto illegal_op;
4860 } else if ((insn & 0x0f900000) == 0x01000000
4861 && (insn & 0x00000090) != 0x00000090) {
4862 /* miscellaneous instructions */
4863 op1 = (insn >> 21) & 3;
4864 sh = (insn >> 4) & 0xf;
4865 rm = insn & 0xf;
4866 switch (sh) {
4867 case 0x0: /* move program status register */
4868 if (op1 & 1) {
4869 /* PSR = reg */
4870 gen_movl_T0_reg(s, rm);
4871 i = ((op1 & 2) != 0);
4872 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
4873 goto illegal_op;
4874 } else {
4875 /* reg = PSR */
4876 rd = (insn >> 12) & 0xf;
4877 if (op1 & 2) {
4878 if (IS_USER(s))
4879 goto illegal_op;
4880 gen_op_movl_T0_spsr();
4881 } else {
4882 gen_op_movl_T0_cpsr();
4884 gen_movl_reg_T0(s, rd);
4886 break;
4887 case 0x1:
4888 if (op1 == 1) {
4889 /* branch/exchange thumb (bx). */
4890 gen_movl_T0_reg(s, rm);
4891 gen_bx(s);
4892 } else if (op1 == 3) {
4893 /* clz */
4894 rd = (insn >> 12) & 0xf;
4895 gen_movl_T0_reg(s, rm);
4896 gen_op_clz_T0();
4897 gen_movl_reg_T0(s, rd);
4898 } else {
4899 goto illegal_op;
4901 break;
4902 case 0x2:
4903 if (op1 == 1) {
4904 ARCH(5J); /* bxj */
4905 /* Trivial implementation equivalent to bx. */
4906 gen_movl_T0_reg(s, rm);
4907 gen_bx(s);
4908 } else {
4909 goto illegal_op;
4911 break;
4912 case 0x3:
4913 if (op1 != 1)
4914 goto illegal_op;
4916 /* branch link/exchange thumb (blx) */
4917 val = (uint32_t)s->pc;
4918 gen_op_movl_T1_im(val);
4919 gen_movl_T0_reg(s, rm);
4920 gen_movl_reg_T1(s, 14);
4921 gen_bx(s);
4922 break;
4923 case 0x5: /* saturating add/subtract */
4924 rd = (insn >> 12) & 0xf;
4925 rn = (insn >> 16) & 0xf;
4926 gen_movl_T0_reg(s, rm);
4927 gen_movl_T1_reg(s, rn);
4928 if (op1 & 2)
4929 gen_op_double_T1_saturate();
4930 if (op1 & 1)
4931 gen_op_subl_T0_T1_saturate();
4932 else
4933 gen_op_addl_T0_T1_saturate();
4934 gen_movl_reg_T0(s, rd);
4935 break;
4936 case 7: /* bkpt */
4937 gen_set_condexec(s);
4938 gen_op_movl_T0_im((long)s->pc - 4);
4939 gen_op_movl_reg_TN[0][15]();
4940 gen_op_bkpt();
4941 s->is_jmp = DISAS_JUMP;
4942 break;
4943 case 0x8: /* signed multiply */
4944 case 0xa:
4945 case 0xc:
4946 case 0xe:
4947 rs = (insn >> 8) & 0xf;
4948 rn = (insn >> 12) & 0xf;
4949 rd = (insn >> 16) & 0xf;
4950 if (op1 == 1) {
4951 /* (32 * 16) >> 16 */
4952 gen_movl_T0_reg(s, rm);
4953 gen_movl_T1_reg(s, rs);
4954 if (sh & 4)
4955 gen_op_sarl_T1_im(16);
4956 else
4957 gen_op_sxth_T1();
4958 gen_op_imulw_T0_T1();
4959 if ((sh & 2) == 0) {
4960 gen_movl_T1_reg(s, rn);
4961 gen_op_addl_T0_T1_setq();
4963 gen_movl_reg_T0(s, rd);
4964 } else {
4965 /* 16 * 16 */
4966 gen_movl_T0_reg(s, rm);
4967 gen_movl_T1_reg(s, rs);
4968 gen_mulxy(sh & 2, sh & 4);
4969 if (op1 == 2) {
4970 gen_op_signbit_T1_T0();
4971 gen_op_addq_T0_T1(rn, rd);
4972 gen_movl_reg_T0(s, rn);
4973 gen_movl_reg_T1(s, rd);
4974 } else {
4975 if (op1 == 0) {
4976 gen_movl_T1_reg(s, rn);
4977 gen_op_addl_T0_T1_setq();
4979 gen_movl_reg_T0(s, rd);
4982 break;
4983 default:
4984 goto illegal_op;
4986 } else if (((insn & 0x0e000000) == 0 &&
4987 (insn & 0x00000090) != 0x90) ||
4988 ((insn & 0x0e000000) == (1 << 25))) {
4989 int set_cc, logic_cc, shiftop;
4991 op1 = (insn >> 21) & 0xf;
4992 set_cc = (insn >> 20) & 1;
4993 logic_cc = table_logic_cc[op1] & set_cc;
4995 /* data processing instruction */
4996 if (insn & (1 << 25)) {
4997 /* immediate operand */
4998 val = insn & 0xff;
4999 shift = ((insn >> 8) & 0xf) * 2;
5000 if (shift)
5001 val = (val >> shift) | (val << (32 - shift));
5002 gen_op_movl_T1_im(val);
5003 if (logic_cc && shift)
5004 gen_op_mov_CF_T1();
5005 } else {
5006 /* register */
5007 rm = (insn) & 0xf;
5008 gen_movl_T1_reg(s, rm);
5009 shiftop = (insn >> 5) & 3;
5010 if (!(insn & (1 << 4))) {
5011 shift = (insn >> 7) & 0x1f;
5012 if (shift != 0) {
5013 if (logic_cc) {
5014 gen_shift_T1_im_cc[shiftop](shift);
5015 } else {
5016 gen_shift_T1_im[shiftop](shift);
5018 } else if (shiftop != 0) {
5019 if (logic_cc) {
5020 gen_shift_T1_0_cc[shiftop]();
5021 } else {
5022 gen_shift_T1_0[shiftop]();
5025 } else {
5026 rs = (insn >> 8) & 0xf;
5027 gen_movl_T0_reg(s, rs);
5028 if (logic_cc) {
5029 gen_shift_T1_T0_cc[shiftop]();
5030 } else {
5031 gen_shift_T1_T0[shiftop]();
5035 if (op1 != 0x0f && op1 != 0x0d) {
5036 rn = (insn >> 16) & 0xf;
5037 gen_movl_T0_reg(s, rn);
5039 rd = (insn >> 12) & 0xf;
5040 switch(op1) {
5041 case 0x00:
5042 gen_op_andl_T0_T1();
5043 gen_movl_reg_T0(s, rd);
5044 if (logic_cc)
5045 gen_op_logic_T0_cc();
5046 break;
5047 case 0x01:
5048 gen_op_xorl_T0_T1();
5049 gen_movl_reg_T0(s, rd);
5050 if (logic_cc)
5051 gen_op_logic_T0_cc();
5052 break;
5053 case 0x02:
5054 if (set_cc && rd == 15) {
5055 /* SUBS r15, ... is used for exception return. */
5056 if (IS_USER(s))
5057 goto illegal_op;
5058 gen_op_subl_T0_T1_cc();
5059 gen_exception_return(s);
5060 } else {
5061 if (set_cc)
5062 gen_op_subl_T0_T1_cc();
5063 else
5064 gen_op_subl_T0_T1();
5065 gen_movl_reg_T0(s, rd);
5067 break;
5068 case 0x03:
5069 if (set_cc)
5070 gen_op_rsbl_T0_T1_cc();
5071 else
5072 gen_op_rsbl_T0_T1();
5073 gen_movl_reg_T0(s, rd);
5074 break;
5075 case 0x04:
5076 if (set_cc)
5077 gen_op_addl_T0_T1_cc();
5078 else
5079 gen_op_addl_T0_T1();
5080 gen_movl_reg_T0(s, rd);
5081 break;
5082 case 0x05:
5083 if (set_cc)
5084 gen_op_adcl_T0_T1_cc();
5085 else
5086 gen_op_adcl_T0_T1();
5087 gen_movl_reg_T0(s, rd);
5088 break;
5089 case 0x06:
5090 if (set_cc)
5091 gen_op_sbcl_T0_T1_cc();
5092 else
5093 gen_op_sbcl_T0_T1();
5094 gen_movl_reg_T0(s, rd);
5095 break;
5096 case 0x07:
5097 if (set_cc)
5098 gen_op_rscl_T0_T1_cc();
5099 else
5100 gen_op_rscl_T0_T1();
5101 gen_movl_reg_T0(s, rd);
5102 break;
5103 case 0x08:
5104 if (set_cc) {
5105 gen_op_andl_T0_T1();
5106 gen_op_logic_T0_cc();
5108 break;
5109 case 0x09:
5110 if (set_cc) {
5111 gen_op_xorl_T0_T1();
5112 gen_op_logic_T0_cc();
5114 break;
5115 case 0x0a:
5116 if (set_cc) {
5117 gen_op_subl_T0_T1_cc();
5119 break;
5120 case 0x0b:
5121 if (set_cc) {
5122 gen_op_addl_T0_T1_cc();
5124 break;
5125 case 0x0c:
5126 gen_op_orl_T0_T1();
5127 gen_movl_reg_T0(s, rd);
5128 if (logic_cc)
5129 gen_op_logic_T0_cc();
5130 break;
5131 case 0x0d:
5132 if (logic_cc && rd == 15) {
5133 /* MOVS r15, ... is used for exception return. */
5134 if (IS_USER(s))
5135 goto illegal_op;
5136 gen_op_movl_T0_T1();
5137 gen_exception_return(s);
5138 } else {
5139 gen_movl_reg_T1(s, rd);
5140 if (logic_cc)
5141 gen_op_logic_T1_cc();
5143 break;
5144 case 0x0e:
5145 gen_op_bicl_T0_T1();
5146 gen_movl_reg_T0(s, rd);
5147 if (logic_cc)
5148 gen_op_logic_T0_cc();
5149 break;
5150 default:
5151 case 0x0f:
5152 gen_op_notl_T1();
5153 gen_movl_reg_T1(s, rd);
5154 if (logic_cc)
5155 gen_op_logic_T1_cc();
5156 break;
5158 } else {
5159 /* other instructions */
5160 op1 = (insn >> 24) & 0xf;
5161 switch(op1) {
5162 case 0x0:
5163 case 0x1:
5164 /* multiplies, extra load/stores */
5165 sh = (insn >> 5) & 3;
5166 if (sh == 0) {
5167 if (op1 == 0x0) {
5168 rd = (insn >> 16) & 0xf;
5169 rn = (insn >> 12) & 0xf;
5170 rs = (insn >> 8) & 0xf;
5171 rm = (insn) & 0xf;
5172 op1 = (insn >> 20) & 0xf;
5173 switch (op1) {
5174 case 0: case 1: case 2: case 3: case 6:
5175 /* 32 bit mul */
5176 gen_movl_T0_reg(s, rs);
5177 gen_movl_T1_reg(s, rm);
5178 gen_op_mul_T0_T1();
5179 if (insn & (1 << 22)) {
5180 /* Subtract (mls) */
5181 ARCH(6T2);
5182 gen_movl_T1_reg(s, rn);
5183 gen_op_rsbl_T0_T1();
5184 } else if (insn & (1 << 21)) {
5185 /* Add */
5186 gen_movl_T1_reg(s, rn);
5187 gen_op_addl_T0_T1();
5189 if (insn & (1 << 20))
5190 gen_op_logic_T0_cc();
5191 gen_movl_reg_T0(s, rd);
5192 break;
5193 default:
5194 /* 64 bit mul */
5195 gen_movl_T0_reg(s, rs);
5196 gen_movl_T1_reg(s, rm);
5197 if (insn & (1 << 22))
5198 gen_op_imull_T0_T1();
5199 else
5200 gen_op_mull_T0_T1();
5201 if (insn & (1 << 21)) /* mult accumulate */
5202 gen_op_addq_T0_T1(rn, rd);
5203 if (!(insn & (1 << 23))) { /* double accumulate */
5204 ARCH(6);
5205 gen_op_addq_lo_T0_T1(rn);
5206 gen_op_addq_lo_T0_T1(rd);
5208 if (insn & (1 << 20))
5209 gen_op_logicq_cc();
5210 gen_movl_reg_T0(s, rn);
5211 gen_movl_reg_T1(s, rd);
5212 break;
5214 } else {
5215 rn = (insn >> 16) & 0xf;
5216 rd = (insn >> 12) & 0xf;
5217 if (insn & (1 << 23)) {
5218 /* load/store exclusive */
5219 gen_movl_T1_reg(s, rn);
5220 if (insn & (1 << 20)) {
5221 gen_ldst(ldlex, s);
5222 } else {
5223 rm = insn & 0xf;
5224 gen_movl_T0_reg(s, rm);
5225 gen_ldst(stlex, s);
5227 gen_movl_reg_T0(s, rd);
5228 } else {
5229 /* SWP instruction */
5230 rm = (insn) & 0xf;
5232 gen_movl_T0_reg(s, rm);
5233 gen_movl_T1_reg(s, rn);
5234 if (insn & (1 << 22)) {
5235 gen_ldst(swpb, s);
5236 } else {
5237 gen_ldst(swpl, s);
5239 gen_movl_reg_T0(s, rd);
5242 } else {
5243 int address_offset;
5244 int load;
5245 /* Misc load/store */
5246 rn = (insn >> 16) & 0xf;
5247 rd = (insn >> 12) & 0xf;
5248 gen_movl_T1_reg(s, rn);
5249 if (insn & (1 << 24))
5250 gen_add_datah_offset(s, insn, 0);
5251 address_offset = 0;
5252 if (insn & (1 << 20)) {
5253 /* load */
5254 switch(sh) {
5255 case 1:
5256 gen_ldst(lduw, s);
5257 break;
5258 case 2:
5259 gen_ldst(ldsb, s);
5260 break;
5261 default:
5262 case 3:
5263 gen_ldst(ldsw, s);
5264 break;
5266 load = 1;
5267 } else if (sh & 2) {
5268 /* doubleword */
5269 if (sh & 1) {
5270 /* store */
5271 gen_movl_T0_reg(s, rd);
5272 gen_ldst(stl, s);
5273 gen_op_addl_T1_im(4);
5274 gen_movl_T0_reg(s, rd + 1);
5275 gen_ldst(stl, s);
5276 load = 0;
5277 } else {
5278 /* load */
5279 gen_ldst(ldl, s);
5280 gen_movl_reg_T0(s, rd);
5281 gen_op_addl_T1_im(4);
5282 gen_ldst(ldl, s);
5283 rd++;
5284 load = 1;
5286 address_offset = -4;
5287 } else {
5288 /* store */
5289 gen_movl_T0_reg(s, rd);
5290 gen_ldst(stw, s);
5291 load = 0;
5293 /* Perform base writeback before the loaded value to
5294 ensure correct behavior with overlapping index registers.
5295 ldrd with base writeback is is undefined if the
5296 destination and index registers overlap. */
5297 if (!(insn & (1 << 24))) {
5298 gen_add_datah_offset(s, insn, address_offset);
5299 gen_movl_reg_T1(s, rn);
5300 } else if (insn & (1 << 21)) {
5301 if (address_offset)
5302 gen_op_addl_T1_im(address_offset);
5303 gen_movl_reg_T1(s, rn);
5305 if (load) {
5306 /* Complete the load. */
5307 gen_movl_reg_T0(s, rd);
5310 break;
5311 case 0x4:
5312 case 0x5:
5313 goto do_ldst;
5314 case 0x6:
5315 case 0x7:
5316 if (insn & (1 << 4)) {
5317 ARCH(6);
5318 /* Armv6 Media instructions. */
5319 rm = insn & 0xf;
5320 rn = (insn >> 16) & 0xf;
5321 rd = (insn >> 12) & 0xf;
5322 rs = (insn >> 8) & 0xf;
5323 switch ((insn >> 23) & 3) {
5324 case 0: /* Parallel add/subtract. */
5325 op1 = (insn >> 20) & 7;
5326 gen_movl_T0_reg(s, rn);
5327 gen_movl_T1_reg(s, rm);
5328 sh = (insn >> 5) & 7;
5329 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
5330 goto illegal_op;
5331 gen_arm_parallel_addsub[op1][sh]();
5332 gen_movl_reg_T0(s, rd);
5333 break;
5334 case 1:
5335 if ((insn & 0x00700020) == 0) {
5336 /* Hafword pack. */
5337 gen_movl_T0_reg(s, rn);
5338 gen_movl_T1_reg(s, rm);
5339 shift = (insn >> 7) & 0x1f;
5340 if (shift)
5341 gen_op_shll_T1_im(shift);
5342 if (insn & (1 << 6))
5343 gen_op_pkhtb_T0_T1();
5344 else
5345 gen_op_pkhbt_T0_T1();
5346 gen_movl_reg_T0(s, rd);
5347 } else if ((insn & 0x00200020) == 0x00200000) {
5348 /* [us]sat */
5349 gen_movl_T1_reg(s, rm);
5350 shift = (insn >> 7) & 0x1f;
5351 if (insn & (1 << 6)) {
5352 if (shift == 0)
5353 shift = 31;
5354 gen_op_sarl_T1_im(shift);
5355 } else {
5356 gen_op_shll_T1_im(shift);
5358 sh = (insn >> 16) & 0x1f;
5359 if (sh != 0) {
5360 if (insn & (1 << 22))
5361 gen_op_usat_T1(sh);
5362 else
5363 gen_op_ssat_T1(sh);
5365 gen_movl_T1_reg(s, rd);
5366 } else if ((insn & 0x00300fe0) == 0x00200f20) {
5367 /* [us]sat16 */
5368 gen_movl_T1_reg(s, rm);
5369 sh = (insn >> 16) & 0x1f;
5370 if (sh != 0) {
5371 if (insn & (1 << 22))
5372 gen_op_usat16_T1(sh);
5373 else
5374 gen_op_ssat16_T1(sh);
5376 gen_movl_T1_reg(s, rd);
5377 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
5378 /* Select bytes. */
5379 gen_movl_T0_reg(s, rn);
5380 gen_movl_T1_reg(s, rm);
5381 gen_op_sel_T0_T1();
5382 gen_movl_reg_T0(s, rd);
5383 } else if ((insn & 0x000003e0) == 0x00000060) {
5384 gen_movl_T1_reg(s, rm);
5385 shift = (insn >> 10) & 3;
5386 /* ??? In many cases it's not neccessary to do a
5387 rotate, a shift is sufficient. */
5388 if (shift != 0)
5389 gen_op_rorl_T1_im(shift * 8);
5390 op1 = (insn >> 20) & 7;
5391 switch (op1) {
5392 case 0: gen_op_sxtb16_T1(); break;
5393 case 2: gen_op_sxtb_T1(); break;
5394 case 3: gen_op_sxth_T1(); break;
5395 case 4: gen_op_uxtb16_T1(); break;
5396 case 6: gen_op_uxtb_T1(); break;
5397 case 7: gen_op_uxth_T1(); break;
5398 default: goto illegal_op;
5400 if (rn != 15) {
5401 gen_movl_T2_reg(s, rn);
5402 if ((op1 & 3) == 0) {
5403 gen_op_add16_T1_T2();
5404 } else {
5405 gen_op_addl_T1_T2();
5408 gen_movl_reg_T1(s, rd);
5409 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
5410 /* rev */
5411 gen_movl_T0_reg(s, rm);
5412 if (insn & (1 << 22)) {
5413 if (insn & (1 << 7)) {
5414 gen_op_revsh_T0();
5415 } else {
5416 ARCH(6T2);
5417 gen_op_rbit_T0();
5419 } else {
5420 if (insn & (1 << 7))
5421 gen_op_rev16_T0();
5422 else
5423 gen_op_rev_T0();
5425 gen_movl_reg_T0(s, rd);
5426 } else {
5427 goto illegal_op;
5429 break;
5430 case 2: /* Multiplies (Type 3). */
5431 gen_movl_T0_reg(s, rm);
5432 gen_movl_T1_reg(s, rs);
5433 if (insn & (1 << 20)) {
5434 /* Signed multiply most significant [accumulate]. */
5435 gen_op_imull_T0_T1();
5436 if (insn & (1 << 5))
5437 gen_op_roundqd_T0_T1();
5438 else
5439 gen_op_movl_T0_T1();
5440 if (rn != 15) {
5441 gen_movl_T1_reg(s, rn);
5442 if (insn & (1 << 6)) {
5443 gen_op_addl_T0_T1();
5444 } else {
5445 gen_op_rsbl_T0_T1();
5448 gen_movl_reg_T0(s, rd);
5449 } else {
5450 if (insn & (1 << 5))
5451 gen_op_swap_half_T1();
5452 gen_op_mul_dual_T0_T1();
5453 if (insn & (1 << 22)) {
5454 if (insn & (1 << 6)) {
5455 /* smlald */
5456 gen_op_addq_T0_T1_dual(rn, rd);
5457 } else {
5458 /* smlsld */
5459 gen_op_subq_T0_T1_dual(rn, rd);
5461 } else {
5462 /* This addition cannot overflow. */
5463 if (insn & (1 << 6)) {
5464 /* sm[ul]sd */
5465 gen_op_subl_T0_T1();
5466 } else {
5467 /* sm[ul]ad */
5468 gen_op_addl_T0_T1();
5470 if (rn != 15)
5472 gen_movl_T1_reg(s, rn);
5473 gen_op_addl_T0_T1_setq();
5475 gen_movl_reg_T0(s, rd);
5478 break;
5479 case 3:
5480 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
5481 switch (op1) {
5482 case 0: /* Unsigned sum of absolute differences. */
5483 goto illegal_op;
5484 gen_movl_T0_reg(s, rm);
5485 gen_movl_T1_reg(s, rs);
5486 gen_op_usad8_T0_T1();
5487 if (rn != 15) {
5488 gen_movl_T1_reg(s, rn);
5489 gen_op_addl_T0_T1();
5491 gen_movl_reg_T0(s, rd);
5492 break;
5493 case 0x20: case 0x24: case 0x28: case 0x2c:
5494 /* Bitfield insert/clear. */
5495 ARCH(6T2);
5496 shift = (insn >> 7) & 0x1f;
5497 i = (insn >> 16) & 0x1f;
5498 i = i + 1 - shift;
5499 if (rm == 15) {
5500 gen_op_movl_T1_im(0);
5501 } else {
5502 gen_movl_T1_reg(s, rm);
5504 if (i != 32) {
5505 gen_movl_T0_reg(s, rd);
5506 gen_op_bfi_T1_T0(shift, ((1u << i) - 1) << shift);
5508 gen_movl_reg_T1(s, rd);
5509 break;
5510 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
5511 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5512 gen_movl_T1_reg(s, rm);
5513 shift = (insn >> 7) & 0x1f;
5514 i = ((insn >> 16) & 0x1f) + 1;
5515 if (shift + i > 32)
5516 goto illegal_op;
5517 if (i < 32) {
5518 if (op1 & 0x20) {
5519 gen_op_ubfx_T1(shift, (1u << i) - 1);
5520 } else {
5521 gen_op_sbfx_T1(shift, i);
5524 gen_movl_reg_T1(s, rd);
5525 break;
5526 default:
5527 goto illegal_op;
5529 break;
5531 break;
5533 do_ldst:
5534 /* Check for undefined extension instructions
5535 * per the ARM Bible IE:
5536 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
5538 sh = (0xf << 20) | (0xf << 4);
5539 if (op1 == 0x7 && ((insn & sh) == sh))
5541 goto illegal_op;
5543 /* load/store byte/word */
5544 rn = (insn >> 16) & 0xf;
5545 rd = (insn >> 12) & 0xf;
5546 gen_movl_T1_reg(s, rn);
5547 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
5548 if (insn & (1 << 24))
5549 gen_add_data_offset(s, insn);
5550 if (insn & (1 << 20)) {
5551 /* load */
5552 s->is_mem = 1;
5553 #if defined(CONFIG_USER_ONLY)
5554 if (insn & (1 << 22))
5555 gen_op_ldub_raw();
5556 else
5557 gen_op_ldl_raw();
5558 #else
5559 if (insn & (1 << 22)) {
5560 if (i)
5561 gen_op_ldub_user();
5562 else
5563 gen_op_ldub_kernel();
5564 } else {
5565 if (i)
5566 gen_op_ldl_user();
5567 else
5568 gen_op_ldl_kernel();
5570 #endif
5571 } else {
5572 /* store */
5573 gen_movl_T0_reg(s, rd);
5574 #if defined(CONFIG_USER_ONLY)
5575 if (insn & (1 << 22))
5576 gen_op_stb_raw();
5577 else
5578 gen_op_stl_raw();
5579 #else
5580 if (insn & (1 << 22)) {
5581 if (i)
5582 gen_op_stb_user();
5583 else
5584 gen_op_stb_kernel();
5585 } else {
5586 if (i)
5587 gen_op_stl_user();
5588 else
5589 gen_op_stl_kernel();
5591 #endif
5593 if (!(insn & (1 << 24))) {
5594 gen_add_data_offset(s, insn);
5595 gen_movl_reg_T1(s, rn);
5596 } else if (insn & (1 << 21))
5597 gen_movl_reg_T1(s, rn); {
5599 if (insn & (1 << 20)) {
5600 /* Complete the load. */
5601 if (rd == 15)
5602 gen_bx(s);
5603 else
5604 gen_movl_reg_T0(s, rd);
5606 break;
5607 case 0x08:
5608 case 0x09:
5610 int j, n, user, loaded_base;
5611 /* load/store multiple words */
5612 /* XXX: store correct base if write back */
5613 user = 0;
5614 if (insn & (1 << 22)) {
5615 if (IS_USER(s))
5616 goto illegal_op; /* only usable in supervisor mode */
5618 if ((insn & (1 << 15)) == 0)
5619 user = 1;
5621 rn = (insn >> 16) & 0xf;
5622 gen_movl_T1_reg(s, rn);
5624 /* compute total size */
5625 loaded_base = 0;
5626 n = 0;
5627 for(i=0;i<16;i++) {
5628 if (insn & (1 << i))
5629 n++;
5631 /* XXX: test invalid n == 0 case ? */
5632 if (insn & (1 << 23)) {
5633 if (insn & (1 << 24)) {
5634 /* pre increment */
5635 gen_op_addl_T1_im(4);
5636 } else {
5637 /* post increment */
5639 } else {
5640 if (insn & (1 << 24)) {
5641 /* pre decrement */
5642 gen_op_addl_T1_im(-(n * 4));
5643 } else {
5644 /* post decrement */
5645 if (n != 1)
5646 gen_op_addl_T1_im(-((n - 1) * 4));
5649 j = 0;
5650 for(i=0;i<16;i++) {
5651 if (insn & (1 << i)) {
5652 if (insn & (1 << 20)) {
5653 /* load */
5654 gen_ldst(ldl, s);
5655 if (i == 15) {
5656 gen_bx(s);
5657 } else if (user) {
5658 gen_op_movl_user_T0(i);
5659 } else if (i == rn) {
5660 gen_op_movl_T2_T0();
5661 loaded_base = 1;
5662 } else {
5663 gen_movl_reg_T0(s, i);
5665 } else {
5666 /* store */
5667 if (i == 15) {
5668 /* special case: r15 = PC + 8 */
5669 val = (long)s->pc + 4;
5670 gen_op_movl_TN_im[0](val);
5671 } else if (user) {
5672 gen_op_movl_T0_user(i);
5673 } else {
5674 gen_movl_T0_reg(s, i);
5676 gen_ldst(stl, s);
5678 j++;
5679 /* no need to add after the last transfer */
5680 if (j != n)
5681 gen_op_addl_T1_im(4);
5684 if (insn & (1 << 21)) {
5685 /* write back */
5686 if (insn & (1 << 23)) {
5687 if (insn & (1 << 24)) {
5688 /* pre increment */
5689 } else {
5690 /* post increment */
5691 gen_op_addl_T1_im(4);
5693 } else {
5694 if (insn & (1 << 24)) {
5695 /* pre decrement */
5696 if (n != 1)
5697 gen_op_addl_T1_im(-((n - 1) * 4));
5698 } else {
5699 /* post decrement */
5700 gen_op_addl_T1_im(-(n * 4));
5703 gen_movl_reg_T1(s, rn);
5705 if (loaded_base) {
5706 gen_op_movl_T0_T2();
5707 gen_movl_reg_T0(s, rn);
5709 if ((insn & (1 << 22)) && !user) {
5710 /* Restore CPSR from SPSR. */
5711 gen_op_movl_T0_spsr();
5712 gen_op_movl_cpsr_T0(0xffffffff);
5713 s->is_jmp = DISAS_UPDATE;
5716 break;
5717 case 0xa:
5718 case 0xb:
5720 int32_t offset;
5722 /* branch (and link) */
5723 val = (int32_t)s->pc;
5724 if (insn & (1 << 24)) {
5725 gen_op_movl_T0_im(val);
5726 gen_op_movl_reg_TN[0][14]();
5728 offset = (((int32_t)insn << 8) >> 8);
5729 val += (offset << 2) + 4;
5730 gen_jmp(s, val);
5732 break;
5733 case 0xc:
5734 case 0xd:
5735 case 0xe:
5736 /* Coprocessor. */
5737 if (disas_coproc_insn(env, s, insn))
5738 goto illegal_op;
5739 break;
5740 case 0xf:
5741 /* swi */
5742 gen_op_movl_T0_im((long)s->pc);
5743 gen_op_movl_reg_TN[0][15]();
5744 s->is_jmp = DISAS_SWI;
5745 break;
5746 default:
5747 illegal_op:
5748 gen_set_condexec(s);
5749 gen_op_movl_T0_im((long)s->pc - 4);
5750 gen_op_movl_reg_TN[0][15]();
5751 gen_op_undef_insn();
5752 s->is_jmp = DISAS_JUMP;
5753 break;
5758 /* Return true if this is a Thumb-2 logical op. */
5759 static int
5760 thumb2_logic_op(int op)
5762 return (op < 8);
5765 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
5766 then set condition code flags based on the result of the operation.
5767 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
5768 to the high bit of T1.
5769 Returns zero if the opcode is valid. */
5771 static int
5772 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
5774 int logic_cc;
5776 logic_cc = 0;
5777 switch (op) {
5778 case 0: /* and */
5779 gen_op_andl_T0_T1();
5780 logic_cc = conds;
5781 break;
5782 case 1: /* bic */
5783 gen_op_bicl_T0_T1();
5784 logic_cc = conds;
5785 break;
5786 case 2: /* orr */
5787 gen_op_orl_T0_T1();
5788 logic_cc = conds;
5789 break;
5790 case 3: /* orn */
5791 gen_op_notl_T1();
5792 gen_op_orl_T0_T1();
5793 logic_cc = conds;
5794 break;
5795 case 4: /* eor */
5796 gen_op_xorl_T0_T1();
5797 logic_cc = conds;
5798 break;
5799 case 8: /* add */
5800 if (conds)
5801 gen_op_addl_T0_T1_cc();
5802 else
5803 gen_op_addl_T0_T1();
5804 break;
5805 case 10: /* adc */
5806 if (conds)
5807 gen_op_adcl_T0_T1_cc();
5808 else
5809 gen_op_adcl_T0_T1();
5810 break;
5811 case 11: /* sbc */
5812 if (conds)
5813 gen_op_sbcl_T0_T1_cc();
5814 else
5815 gen_op_sbcl_T0_T1();
5816 break;
5817 case 13: /* sub */
5818 if (conds)
5819 gen_op_subl_T0_T1_cc();
5820 else
5821 gen_op_subl_T0_T1();
5822 break;
5823 case 14: /* rsb */
5824 if (conds)
5825 gen_op_rsbl_T0_T1_cc();
5826 else
5827 gen_op_rsbl_T0_T1();
5828 break;
5829 default: /* 5, 6, 7, 9, 12, 15. */
5830 return 1;
5832 if (logic_cc) {
5833 gen_op_logic_T0_cc();
5834 if (shifter_out)
5835 gen_op_mov_CF_T1();
5837 return 0;
5840 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
5841 is not legal. */
5842 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
5844 uint32_t insn, imm, shift, offset, addr;
5845 uint32_t rd, rn, rm, rs;
5846 int op;
5847 int shiftop;
5848 int conds;
5849 int logic_cc;
5851 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
5852 || arm_feature (env, ARM_FEATURE_M))) {
5853 /* Thumb-1 cores may need to tread bl and blx as a pair of
5854 16-bit instructions to get correct prefetch abort behavior. */
5855 insn = insn_hw1;
5856 if ((insn & (1 << 12)) == 0) {
5857 /* Second half of blx. */
5858 offset = ((insn & 0x7ff) << 1);
5859 gen_movl_T0_reg(s, 14);
5860 gen_op_movl_T1_im(offset);
5861 gen_op_addl_T0_T1();
5862 gen_op_movl_T1_im(0xfffffffc);
5863 gen_op_andl_T0_T1();
5865 addr = (uint32_t)s->pc;
5866 gen_op_movl_T1_im(addr | 1);
5867 gen_movl_reg_T1(s, 14);
5868 gen_bx(s);
5869 return 0;
5871 if (insn & (1 << 11)) {
5872 /* Second half of bl. */
5873 offset = ((insn & 0x7ff) << 1) | 1;
5874 gen_movl_T0_reg(s, 14);
5875 gen_op_movl_T1_im(offset);
5876 gen_op_addl_T0_T1();
5878 addr = (uint32_t)s->pc;
5879 gen_op_movl_T1_im(addr | 1);
5880 gen_movl_reg_T1(s, 14);
5881 gen_bx(s);
5882 return 0;
5884 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
5885 /* Instruction spans a page boundary. Implement it as two
5886 16-bit instructions in case the second half causes an
5887 prefetch abort. */
5888 offset = ((int32_t)insn << 21) >> 9;
5889 addr = s->pc + 2 + offset;
5890 gen_op_movl_T0_im(addr);
5891 gen_movl_reg_T0(s, 14);
5892 return 0;
5894 /* Fall through to 32-bit decode. */
5897 insn = lduw_code(s->pc);
5898 s->pc += 2;
5899 insn |= (uint32_t)insn_hw1 << 16;
5901 if ((insn & 0xf800e800) != 0xf000e800) {
5902 ARCH(6T2);
5905 rn = (insn >> 16) & 0xf;
5906 rs = (insn >> 12) & 0xf;
5907 rd = (insn >> 8) & 0xf;
5908 rm = insn & 0xf;
5909 switch ((insn >> 25) & 0xf) {
5910 case 0: case 1: case 2: case 3:
5911 /* 16-bit instructions. Should never happen. */
5912 abort();
5913 case 4:
5914 if (insn & (1 << 22)) {
5915 /* Other load/store, table branch. */
5916 if (insn & 0x01200000) {
5917 /* Load/store doubleword. */
5918 if (rn == 15) {
5919 gen_op_movl_T1_im(s->pc & ~3);
5920 } else {
5921 gen_movl_T1_reg(s, rn);
5923 offset = (insn & 0xff) * 4;
5924 if ((insn & (1 << 23)) == 0)
5925 offset = -offset;
5926 if (insn & (1 << 24)) {
5927 gen_op_addl_T1_im(offset);
5928 offset = 0;
5930 if (insn & (1 << 20)) {
5931 /* ldrd */
5932 gen_ldst(ldl, s);
5933 gen_movl_reg_T0(s, rs);
5934 gen_op_addl_T1_im(4);
5935 gen_ldst(ldl, s);
5936 gen_movl_reg_T0(s, rd);
5937 } else {
5938 /* strd */
5939 gen_movl_T0_reg(s, rs);
5940 gen_ldst(stl, s);
5941 gen_op_addl_T1_im(4);
5942 gen_movl_T0_reg(s, rd);
5943 gen_ldst(stl, s);
5945 if (insn & (1 << 21)) {
5946 /* Base writeback. */
5947 if (rn == 15)
5948 goto illegal_op;
5949 gen_op_addl_T1_im(offset - 4);
5950 gen_movl_reg_T1(s, rn);
5952 } else if ((insn & (1 << 23)) == 0) {
5953 /* Load/store exclusive word. */
5954 gen_movl_T0_reg(s, rd);
5955 gen_movl_T1_reg(s, rn);
5956 if (insn & (1 << 20)) {
5957 gen_ldst(ldlex, s);
5958 } else {
5959 gen_ldst(stlex, s);
5961 gen_movl_reg_T0(s, rd);
5962 } else if ((insn & (1 << 6)) == 0) {
5963 /* Table Branch. */
5964 if (rn == 15) {
5965 gen_op_movl_T1_im(s->pc);
5966 } else {
5967 gen_movl_T1_reg(s, rn);
5969 gen_movl_T2_reg(s, rm);
5970 gen_op_addl_T1_T2();
5971 if (insn & (1 << 4)) {
5972 /* tbh */
5973 gen_op_addl_T1_T2();
5974 gen_ldst(lduw, s);
5975 } else { /* tbb */
5976 gen_ldst(ldub, s);
5978 gen_op_jmp_T0_im(s->pc);
5979 s->is_jmp = DISAS_JUMP;
5980 } else {
5981 /* Load/store exclusive byte/halfword/doubleword. */
5982 op = (insn >> 4) & 0x3;
5983 gen_movl_T1_reg(s, rn);
5984 if (insn & (1 << 20)) {
5985 switch (op) {
5986 case 0:
5987 gen_ldst(ldbex, s);
5988 break;
5989 case 1:
5990 gen_ldst(ldwex, s);
5991 break;
5992 case 3:
5993 gen_ldst(ldqex, s);
5994 gen_movl_reg_T1(s, rd);
5995 break;
5996 default:
5997 goto illegal_op;
5999 gen_movl_reg_T0(s, rs);
6000 } else {
6001 gen_movl_T0_reg(s, rs);
6002 switch (op) {
6003 case 0:
6004 gen_ldst(stbex, s);
6005 break;
6006 case 1:
6007 gen_ldst(stwex, s);
6008 break;
6009 case 3:
6010 gen_movl_T2_reg(s, rd);
6011 gen_ldst(stqex, s);
6012 break;
6013 default:
6014 goto illegal_op;
6016 gen_movl_reg_T0(s, rm);
6019 } else {
6020 /* Load/store multiple, RFE, SRS. */
6021 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
6022 /* Not available in user mode. */
6023 if (!IS_USER(s))
6024 goto illegal_op;
6025 if (insn & (1 << 20)) {
6026 /* rfe */
6027 gen_movl_T1_reg(s, rn);
6028 if (insn & (1 << 24)) {
6029 gen_op_addl_T1_im(4);
6030 } else {
6031 gen_op_addl_T1_im(-4);
6033 /* Load CPSR into T2 and PC into T0. */
6034 gen_ldst(ldl, s);
6035 gen_op_movl_T2_T0();
6036 gen_op_addl_T1_im(-4);
6037 gen_ldst(ldl, s);
6038 if (insn & (1 << 21)) {
6039 /* Base writeback. */
6040 if (insn & (1 << 24))
6041 gen_op_addl_T1_im(8);
6042 gen_movl_reg_T1(s, rn);
6044 gen_rfe(s);
6045 } else {
6046 /* srs */
6047 op = (insn & 0x1f);
6048 if (op == (env->uncached_cpsr & CPSR_M)) {
6049 gen_movl_T1_reg(s, 13);
6050 } else {
6051 gen_op_movl_T1_r13_banked(op);
6053 if ((insn & (1 << 24)) == 0) {
6054 gen_op_addl_T1_im(-8);
6056 gen_movl_T0_reg(s, 14);
6057 gen_ldst(stl, s);
6058 gen_op_movl_T0_cpsr();
6059 gen_op_addl_T1_im(4);
6060 gen_ldst(stl, s);
6061 if (insn & (1 << 21)) {
6062 if ((insn & (1 << 24)) == 0) {
6063 gen_op_addl_T1_im(-4);
6064 } else {
6065 gen_op_addl_T1_im(4);
6067 if (op == (env->uncached_cpsr & CPSR_M)) {
6068 gen_movl_reg_T1(s, 13);
6069 } else {
6070 gen_op_movl_r13_T1_banked(op);
6074 } else {
6075 int i;
6076 /* Load/store multiple. */
6077 gen_movl_T1_reg(s, rn);
6078 offset = 0;
6079 for (i = 0; i < 16; i++) {
6080 if (insn & (1 << i))
6081 offset += 4;
6083 if (insn & (1 << 24)) {
6084 gen_op_addl_T1_im(-offset);
6087 for (i = 0; i < 16; i++) {
6088 if ((insn & (1 << i)) == 0)
6089 continue;
6090 if (insn & (1 << 20)) {
6091 /* Load. */
6092 gen_ldst(ldl, s);
6093 if (i == 15) {
6094 gen_bx(s);
6095 } else {
6096 gen_movl_reg_T0(s, i);
6098 } else {
6099 /* Store. */
6100 gen_movl_T0_reg(s, i);
6101 gen_ldst(stl, s);
6103 gen_op_addl_T1_im(4);
6105 if (insn & (1 << 21)) {
6106 /* Base register writeback. */
6107 if (insn & (1 << 24)) {
6108 gen_op_addl_T1_im(-offset);
6110 /* Fault if writeback register is in register list. */
6111 if (insn & (1 << rn))
6112 goto illegal_op;
6113 gen_movl_reg_T1(s, rn);
6117 break;
6118 case 5: /* Data processing register constant shift. */
6119 if (rn == 15)
6120 gen_op_movl_T0_im(0);
6121 else
6122 gen_movl_T0_reg(s, rn);
6123 gen_movl_T1_reg(s, rm);
6124 op = (insn >> 21) & 0xf;
6125 shiftop = (insn >> 4) & 3;
6126 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6127 conds = (insn & (1 << 20)) != 0;
6128 logic_cc = (conds && thumb2_logic_op(op));
6129 if (shift != 0) {
6130 if (logic_cc) {
6131 gen_shift_T1_im_cc[shiftop](shift);
6132 } else {
6133 gen_shift_T1_im[shiftop](shift);
6135 } else if (shiftop != 0) {
6136 if (logic_cc) {
6137 gen_shift_T1_0_cc[shiftop]();
6138 } else {
6139 gen_shift_T1_0[shiftop]();
6142 if (gen_thumb2_data_op(s, op, conds, 0))
6143 goto illegal_op;
6144 if (rd != 15)
6145 gen_movl_reg_T0(s, rd);
6146 break;
6147 case 13: /* Misc data processing. */
6148 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
6149 if (op < 4 && (insn & 0xf000) != 0xf000)
6150 goto illegal_op;
6151 switch (op) {
6152 case 0: /* Register controlled shift. */
6153 gen_movl_T0_reg(s, rm);
6154 gen_movl_T1_reg(s, rn);
6155 if ((insn & 0x70) != 0)
6156 goto illegal_op;
6157 op = (insn >> 21) & 3;
6158 if (insn & (1 << 20)) {
6159 gen_shift_T1_T0_cc[op]();
6160 gen_op_logic_T1_cc();
6161 } else {
6162 gen_shift_T1_T0[op]();
6164 gen_movl_reg_T1(s, rd);
6165 break;
6166 case 1: /* Sign/zero extend. */
6167 gen_movl_T1_reg(s, rm);
6168 shift = (insn >> 4) & 3;
6169 /* ??? In many cases it's not neccessary to do a
6170 rotate, a shift is sufficient. */
6171 if (shift != 0)
6172 gen_op_rorl_T1_im(shift * 8);
6173 op = (insn >> 20) & 7;
6174 switch (op) {
6175 case 0: gen_op_sxth_T1(); break;
6176 case 1: gen_op_uxth_T1(); break;
6177 case 2: gen_op_sxtb16_T1(); break;
6178 case 3: gen_op_uxtb16_T1(); break;
6179 case 4: gen_op_sxtb_T1(); break;
6180 case 5: gen_op_uxtb_T1(); break;
6181 default: goto illegal_op;
6183 if (rn != 15) {
6184 gen_movl_T2_reg(s, rn);
6185 if ((op >> 1) == 1) {
6186 gen_op_add16_T1_T2();
6187 } else {
6188 gen_op_addl_T1_T2();
6191 gen_movl_reg_T1(s, rd);
6192 break;
6193 case 2: /* SIMD add/subtract. */
6194 op = (insn >> 20) & 7;
6195 shift = (insn >> 4) & 7;
6196 if ((op & 3) == 3 || (shift & 3) == 3)
6197 goto illegal_op;
6198 gen_movl_T0_reg(s, rn);
6199 gen_movl_T1_reg(s, rm);
6200 gen_thumb2_parallel_addsub[op][shift]();
6201 gen_movl_reg_T0(s, rd);
6202 break;
6203 case 3: /* Other data processing. */
6204 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
6205 if (op < 4) {
6206 /* Saturating add/subtract. */
6207 gen_movl_T0_reg(s, rm);
6208 gen_movl_T1_reg(s, rn);
6209 if (op & 2)
6210 gen_op_double_T1_saturate();
6211 if (op & 1)
6212 gen_op_subl_T0_T1_saturate();
6213 else
6214 gen_op_addl_T0_T1_saturate();
6215 } else {
6216 gen_movl_T0_reg(s, rn);
6217 switch (op) {
6218 case 0x0a: /* rbit */
6219 gen_op_rbit_T0();
6220 break;
6221 case 0x08: /* rev */
6222 gen_op_rev_T0();
6223 break;
6224 case 0x09: /* rev16 */
6225 gen_op_rev16_T0();
6226 break;
6227 case 0x0b: /* revsh */
6228 gen_op_revsh_T0();
6229 break;
6230 case 0x10: /* sel */
6231 gen_movl_T1_reg(s, rm);
6232 gen_op_sel_T0_T1();
6233 break;
6234 case 0x18: /* clz */
6235 gen_op_clz_T0();
6236 break;
6237 default:
6238 goto illegal_op;
6241 gen_movl_reg_T0(s, rd);
6242 break;
6243 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
6244 op = (insn >> 4) & 0xf;
6245 gen_movl_T0_reg(s, rn);
6246 gen_movl_T1_reg(s, rm);
6247 switch ((insn >> 20) & 7) {
6248 case 0: /* 32 x 32 -> 32 */
6249 gen_op_mul_T0_T1();
6250 if (rs != 15) {
6251 gen_movl_T1_reg(s, rs);
6252 if (op)
6253 gen_op_rsbl_T0_T1();
6254 else
6255 gen_op_addl_T0_T1();
6257 gen_movl_reg_T0(s, rd);
6258 break;
6259 case 1: /* 16 x 16 -> 32 */
6260 gen_mulxy(op & 2, op & 1);
6261 if (rs != 15) {
6262 gen_movl_T1_reg(s, rs);
6263 gen_op_addl_T0_T1_setq();
6265 gen_movl_reg_T0(s, rd);
6266 break;
6267 case 2: /* Dual multiply add. */
6268 case 4: /* Dual multiply subtract. */
6269 if (op)
6270 gen_op_swap_half_T1();
6271 gen_op_mul_dual_T0_T1();
6272 /* This addition cannot overflow. */
6273 if (insn & (1 << 22)) {
6274 gen_op_subl_T0_T1();
6275 } else {
6276 gen_op_addl_T0_T1();
6278 if (rs != 15)
6280 gen_movl_T1_reg(s, rs);
6281 gen_op_addl_T0_T1_setq();
6283 gen_movl_reg_T0(s, rd);
6284 break;
6285 case 3: /* 32 * 16 -> 32msb */
6286 if (op)
6287 gen_op_sarl_T1_im(16);
6288 else
6289 gen_op_sxth_T1();
6290 gen_op_imulw_T0_T1();
6291 if (rs != 15)
6293 gen_movl_T1_reg(s, rs);
6294 gen_op_addl_T0_T1_setq();
6296 gen_movl_reg_T0(s, rd);
6297 break;
6298 case 5: case 6: /* 32 * 32 -> 32msb */
6299 gen_op_imull_T0_T1();
6300 if (insn & (1 << 5))
6301 gen_op_roundqd_T0_T1();
6302 else
6303 gen_op_movl_T0_T1();
6304 if (rs != 15) {
6305 gen_movl_T1_reg(s, rs);
6306 if (insn & (1 << 21)) {
6307 gen_op_addl_T0_T1();
6308 } else {
6309 gen_op_rsbl_T0_T1();
6312 gen_movl_reg_T0(s, rd);
6313 break;
6314 case 7: /* Unsigned sum of absolute differences. */
6315 gen_op_usad8_T0_T1();
6316 if (rs != 15) {
6317 gen_movl_T1_reg(s, rs);
6318 gen_op_addl_T0_T1();
6320 gen_movl_reg_T0(s, rd);
6321 break;
6323 break;
6324 case 6: case 7: /* 64-bit multiply, Divide. */
6325 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
6326 gen_movl_T0_reg(s, rn);
6327 gen_movl_T1_reg(s, rm);
6328 if ((op & 0x50) == 0x10) {
6329 /* sdiv, udiv */
6330 if (!arm_feature(env, ARM_FEATURE_DIV))
6331 goto illegal_op;
6332 if (op & 0x20)
6333 gen_op_udivl_T0_T1();
6334 else
6335 gen_op_sdivl_T0_T1();
6336 gen_movl_reg_T0(s, rd);
6337 } else if ((op & 0xe) == 0xc) {
6338 /* Dual multiply accumulate long. */
6339 if (op & 1)
6340 gen_op_swap_half_T1();
6341 gen_op_mul_dual_T0_T1();
6342 if (op & 0x10) {
6343 gen_op_subl_T0_T1();
6344 } else {
6345 gen_op_addl_T0_T1();
6347 gen_op_signbit_T1_T0();
6348 gen_op_addq_T0_T1(rs, rd);
6349 gen_movl_reg_T0(s, rs);
6350 gen_movl_reg_T1(s, rd);
6351 } else {
6352 if (op & 0x20) {
6353 /* Unsigned 64-bit multiply */
6354 gen_op_mull_T0_T1();
6355 } else {
6356 if (op & 8) {
6357 /* smlalxy */
6358 gen_mulxy(op & 2, op & 1);
6359 gen_op_signbit_T1_T0();
6360 } else {
6361 /* Signed 64-bit multiply */
6362 gen_op_imull_T0_T1();
6365 if (op & 4) {
6366 /* umaal */
6367 gen_op_addq_lo_T0_T1(rs);
6368 gen_op_addq_lo_T0_T1(rd);
6369 } else if (op & 0x40) {
6370 /* 64-bit accumulate. */
6371 gen_op_addq_T0_T1(rs, rd);
6373 gen_movl_reg_T0(s, rs);
6374 gen_movl_reg_T1(s, rd);
6376 break;
6378 break;
6379 case 6: case 7: case 14: case 15:
6380 /* Coprocessor. */
6381 if (((insn >> 24) & 3) == 3) {
6382 /* Translate into the equivalent ARM encoding. */
6383 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
6384 if (disas_neon_data_insn(env, s, insn))
6385 goto illegal_op;
6386 } else {
6387 if (insn & (1 << 28))
6388 goto illegal_op;
6389 if (disas_coproc_insn (env, s, insn))
6390 goto illegal_op;
6392 break;
6393 case 8: case 9: case 10: case 11:
6394 if (insn & (1 << 15)) {
6395 /* Branches, misc control. */
6396 if (insn & 0x5000) {
6397 /* Unconditional branch. */
6398 /* signextend(hw1[10:0]) -> offset[:12]. */
6399 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
6400 /* hw1[10:0] -> offset[11:1]. */
6401 offset |= (insn & 0x7ff) << 1;
6402 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
6403 offset[24:22] already have the same value because of the
6404 sign extension above. */
6405 offset ^= ((~insn) & (1 << 13)) << 10;
6406 offset ^= ((~insn) & (1 << 11)) << 11;
6408 addr = s->pc;
6409 if (insn & (1 << 14)) {
6410 /* Branch and link. */
6411 gen_op_movl_T1_im(addr | 1);
6412 gen_movl_reg_T1(s, 14);
6415 addr += offset;
6416 if (insn & (1 << 12)) {
6417 /* b/bl */
6418 gen_jmp(s, addr);
6419 } else {
6420 /* blx */
6421 addr &= ~(uint32_t)2;
6422 gen_op_movl_T0_im(addr);
6423 gen_bx(s);
6425 } else if (((insn >> 23) & 7) == 7) {
6426 /* Misc control */
6427 if (insn & (1 << 13))
6428 goto illegal_op;
6430 if (insn & (1 << 26)) {
6431 /* Secure monitor call (v6Z) */
6432 goto illegal_op; /* not implemented. */
6433 } else {
6434 op = (insn >> 20) & 7;
6435 switch (op) {
6436 case 0: /* msr cpsr. */
6437 if (IS_M(env)) {
6438 gen_op_v7m_msr_T0(insn & 0xff);
6439 gen_movl_reg_T0(s, rn);
6440 gen_lookup_tb(s);
6441 break;
6443 /* fall through */
6444 case 1: /* msr spsr. */
6445 if (IS_M(env))
6446 goto illegal_op;
6447 gen_movl_T0_reg(s, rn);
6448 if (gen_set_psr_T0(s,
6449 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
6450 op == 1))
6451 goto illegal_op;
6452 break;
6453 case 2: /* cps, nop-hint. */
6454 if (((insn >> 8) & 7) == 0) {
6455 gen_nop_hint(s, insn & 0xff);
6457 /* Implemented as NOP in user mode. */
6458 if (IS_USER(s))
6459 break;
6460 offset = 0;
6461 imm = 0;
6462 if (insn & (1 << 10)) {
6463 if (insn & (1 << 7))
6464 offset |= CPSR_A;
6465 if (insn & (1 << 6))
6466 offset |= CPSR_I;
6467 if (insn & (1 << 5))
6468 offset |= CPSR_F;
6469 if (insn & (1 << 9))
6470 imm = CPSR_A | CPSR_I | CPSR_F;
6472 if (insn & (1 << 8)) {
6473 offset |= 0x1f;
6474 imm |= (insn & 0x1f);
6476 if (offset) {
6477 gen_op_movl_T0_im(imm);
6478 gen_set_psr_T0(s, offset, 0);
6480 break;
6481 case 3: /* Special control operations. */
6482 op = (insn >> 4) & 0xf;
6483 switch (op) {
6484 case 2: /* clrex */
6485 gen_op_clrex();
6486 break;
6487 case 4: /* dsb */
6488 case 5: /* dmb */
6489 case 6: /* isb */
6490 /* These execute as NOPs. */
6491 ARCH(7);
6492 break;
6493 default:
6494 goto illegal_op;
6496 break;
6497 case 4: /* bxj */
6498 /* Trivial implementation equivalent to bx. */
6499 gen_movl_T0_reg(s, rn);
6500 gen_bx(s);
6501 break;
6502 case 5: /* Exception return. */
6503 /* Unpredictable in user mode. */
6504 goto illegal_op;
6505 case 6: /* mrs cpsr. */
6506 if (IS_M(env)) {
6507 gen_op_v7m_mrs_T0(insn & 0xff);
6508 } else {
6509 gen_op_movl_T0_cpsr();
6511 gen_movl_reg_T0(s, rd);
6512 break;
6513 case 7: /* mrs spsr. */
6514 /* Not accessible in user mode. */
6515 if (IS_USER(s) || IS_M(env))
6516 goto illegal_op;
6517 gen_op_movl_T0_spsr();
6518 gen_movl_reg_T0(s, rd);
6519 break;
6522 } else {
6523 /* Conditional branch. */
6524 op = (insn >> 22) & 0xf;
6525 /* Generate a conditional jump to next instruction. */
6526 s->condlabel = gen_new_label();
6527 gen_test_cc[op ^ 1](s->condlabel);
6528 s->condjmp = 1;
6530 /* offset[11:1] = insn[10:0] */
6531 offset = (insn & 0x7ff) << 1;
6532 /* offset[17:12] = insn[21:16]. */
6533 offset |= (insn & 0x003f0000) >> 4;
6534 /* offset[31:20] = insn[26]. */
6535 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
6536 /* offset[18] = insn[13]. */
6537 offset |= (insn & (1 << 13)) << 5;
6538 /* offset[19] = insn[11]. */
6539 offset |= (insn & (1 << 11)) << 8;
6541 /* jump to the offset */
6542 addr = s->pc + offset;
6543 gen_jmp(s, addr);
6545 } else {
6546 /* Data processing immediate. */
6547 if (insn & (1 << 25)) {
6548 if (insn & (1 << 24)) {
6549 if (insn & (1 << 20))
6550 goto illegal_op;
6551 /* Bitfield/Saturate. */
6552 op = (insn >> 21) & 7;
6553 imm = insn & 0x1f;
6554 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6555 if (rn == 15)
6556 gen_op_movl_T1_im(0);
6557 else
6558 gen_movl_T1_reg(s, rn);
6559 switch (op) {
6560 case 2: /* Signed bitfield extract. */
6561 imm++;
6562 if (shift + imm > 32)
6563 goto illegal_op;
6564 if (imm < 32)
6565 gen_op_sbfx_T1(shift, imm);
6566 break;
6567 case 6: /* Unsigned bitfield extract. */
6568 imm++;
6569 if (shift + imm > 32)
6570 goto illegal_op;
6571 if (imm < 32)
6572 gen_op_ubfx_T1(shift, (1u << imm) - 1);
6573 break;
6574 case 3: /* Bitfield insert/clear. */
6575 if (imm < shift)
6576 goto illegal_op;
6577 imm = imm + 1 - shift;
6578 if (imm != 32) {
6579 gen_movl_T0_reg(s, rd);
6580 gen_op_bfi_T1_T0(shift, ((1u << imm) - 1) << shift);
6582 break;
6583 case 7:
6584 goto illegal_op;
6585 default: /* Saturate. */
6586 gen_movl_T1_reg(s, rn);
6587 if (shift) {
6588 if (op & 1)
6589 gen_op_sarl_T1_im(shift);
6590 else
6591 gen_op_shll_T1_im(shift);
6593 if (op & 4) {
6594 /* Unsigned. */
6595 gen_op_ssat_T1(imm);
6596 if ((op & 1) && shift == 0)
6597 gen_op_usat16_T1(imm);
6598 else
6599 gen_op_usat_T1(imm);
6600 } else {
6601 /* Signed. */
6602 gen_op_ssat_T1(imm);
6603 if ((op & 1) && shift == 0)
6604 gen_op_ssat16_T1(imm);
6605 else
6606 gen_op_ssat_T1(imm);
6608 break;
6610 gen_movl_reg_T1(s, rd);
6611 } else {
6612 imm = ((insn & 0x04000000) >> 15)
6613 | ((insn & 0x7000) >> 4) | (insn & 0xff);
6614 if (insn & (1 << 22)) {
6615 /* 16-bit immediate. */
6616 imm |= (insn >> 4) & 0xf000;
6617 if (insn & (1 << 23)) {
6618 /* movt */
6619 gen_movl_T0_reg(s, rd);
6620 gen_op_movtop_T0_im(imm << 16);
6621 } else {
6622 /* movw */
6623 gen_op_movl_T0_im(imm);
6625 } else {
6626 /* Add/sub 12-bit immediate. */
6627 if (rn == 15) {
6628 addr = s->pc & ~(uint32_t)3;
6629 if (insn & (1 << 23))
6630 addr -= imm;
6631 else
6632 addr += imm;
6633 gen_op_movl_T0_im(addr);
6634 } else {
6635 gen_movl_T0_reg(s, rn);
6636 gen_op_movl_T1_im(imm);
6637 if (insn & (1 << 23))
6638 gen_op_subl_T0_T1();
6639 else
6640 gen_op_addl_T0_T1();
6643 gen_movl_reg_T0(s, rd);
6645 } else {
6646 int shifter_out = 0;
6647 /* modified 12-bit immediate. */
6648 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
6649 imm = (insn & 0xff);
6650 switch (shift) {
6651 case 0: /* XY */
6652 /* Nothing to do. */
6653 break;
6654 case 1: /* 00XY00XY */
6655 imm |= imm << 16;
6656 break;
6657 case 2: /* XY00XY00 */
6658 imm |= imm << 16;
6659 imm <<= 8;
6660 break;
6661 case 3: /* XYXYXYXY */
6662 imm |= imm << 16;
6663 imm |= imm << 8;
6664 break;
6665 default: /* Rotated constant. */
6666 shift = (shift << 1) | (imm >> 7);
6667 imm |= 0x80;
6668 imm = imm << (32 - shift);
6669 shifter_out = 1;
6670 break;
6672 gen_op_movl_T1_im(imm);
6673 rn = (insn >> 16) & 0xf;
6674 if (rn == 15)
6675 gen_op_movl_T0_im(0);
6676 else
6677 gen_movl_T0_reg(s, rn);
6678 op = (insn >> 21) & 0xf;
6679 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
6680 shifter_out))
6681 goto illegal_op;
6682 rd = (insn >> 8) & 0xf;
6683 if (rd != 15) {
6684 gen_movl_reg_T0(s, rd);
6688 break;
6689 case 12: /* Load/store single data item. */
6691 int postinc = 0;
6692 int writeback = 0;
6693 if ((insn & 0x01100000) == 0x01000000) {
6694 if (disas_neon_ls_insn(env, s, insn))
6695 goto illegal_op;
6696 break;
6698 if (rn == 15) {
6699 /* PC relative. */
6700 /* s->pc has already been incremented by 4. */
6701 imm = s->pc & 0xfffffffc;
6702 if (insn & (1 << 23))
6703 imm += insn & 0xfff;
6704 else
6705 imm -= insn & 0xfff;
6706 gen_op_movl_T1_im(imm);
6707 } else {
6708 gen_movl_T1_reg(s, rn);
6709 if (insn & (1 << 23)) {
6710 /* Positive offset. */
6711 imm = insn & 0xfff;
6712 gen_op_addl_T1_im(imm);
6713 } else {
6714 op = (insn >> 8) & 7;
6715 imm = insn & 0xff;
6716 switch (op) {
6717 case 0: case 8: /* Shifted Register. */
6718 shift = (insn >> 4) & 0xf;
6719 if (shift > 3)
6720 goto illegal_op;
6721 gen_movl_T2_reg(s, rm);
6722 if (shift)
6723 gen_op_shll_T2_im(shift);
6724 gen_op_addl_T1_T2();
6725 break;
6726 case 4: /* Negative offset. */
6727 gen_op_addl_T1_im(-imm);
6728 break;
6729 case 6: /* User privilege. */
6730 gen_op_addl_T1_im(imm);
6731 break;
6732 case 1: /* Post-decrement. */
6733 imm = -imm;
6734 /* Fall through. */
6735 case 3: /* Post-increment. */
6736 gen_op_movl_T2_im(imm);
6737 postinc = 1;
6738 writeback = 1;
6739 break;
6740 case 5: /* Pre-decrement. */
6741 imm = -imm;
6742 /* Fall through. */
6743 case 7: /* Pre-increment. */
6744 gen_op_addl_T1_im(imm);
6745 writeback = 1;
6746 break;
6747 default:
6748 goto illegal_op;
6752 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
6753 if (insn & (1 << 20)) {
6754 /* Load. */
6755 if (rs == 15 && op != 2) {
6756 if (op & 2)
6757 goto illegal_op;
6758 /* Memory hint. Implemented as NOP. */
6759 } else {
6760 switch (op) {
6761 case 0: gen_ldst(ldub, s); break;
6762 case 4: gen_ldst(ldsb, s); break;
6763 case 1: gen_ldst(lduw, s); break;
6764 case 5: gen_ldst(ldsw, s); break;
6765 case 2: gen_ldst(ldl, s); break;
6766 default: goto illegal_op;
6768 if (rs == 15) {
6769 gen_bx(s);
6770 } else {
6771 gen_movl_reg_T0(s, rs);
6774 } else {
6775 /* Store. */
6776 if (rs == 15)
6777 goto illegal_op;
6778 gen_movl_T0_reg(s, rs);
6779 switch (op) {
6780 case 0: gen_ldst(stb, s); break;
6781 case 1: gen_ldst(stw, s); break;
6782 case 2: gen_ldst(stl, s); break;
6783 default: goto illegal_op;
6786 if (postinc)
6787 gen_op_addl_T1_im(imm);
6788 if (writeback)
6789 gen_movl_reg_T1(s, rn);
6791 break;
6792 default:
6793 goto illegal_op;
6795 return 0;
6796 illegal_op:
6797 return 1;
6800 static void disas_thumb_insn(CPUState *env, DisasContext *s)
6802 uint32_t val, insn, op, rm, rn, rd, shift, cond;
6803 int32_t offset;
6804 int i;
6806 if (s->condexec_mask) {
6807 cond = s->condexec_cond;
6808 s->condlabel = gen_new_label();
6809 gen_test_cc[cond ^ 1](s->condlabel);
6810 s->condjmp = 1;
6813 insn = lduw_code(s->pc);
6814 s->pc += 2;
6816 switch (insn >> 12) {
6817 case 0: case 1:
6818 rd = insn & 7;
6819 op = (insn >> 11) & 3;
6820 if (op == 3) {
6821 /* add/subtract */
6822 rn = (insn >> 3) & 7;
6823 gen_movl_T0_reg(s, rn);
6824 if (insn & (1 << 10)) {
6825 /* immediate */
6826 gen_op_movl_T1_im((insn >> 6) & 7);
6827 } else {
6828 /* reg */
6829 rm = (insn >> 6) & 7;
6830 gen_movl_T1_reg(s, rm);
6832 if (insn & (1 << 9)) {
6833 if (s->condexec_mask)
6834 gen_op_subl_T0_T1();
6835 else
6836 gen_op_subl_T0_T1_cc();
6837 } else {
6838 if (s->condexec_mask)
6839 gen_op_addl_T0_T1();
6840 else
6841 gen_op_addl_T0_T1_cc();
6843 gen_movl_reg_T0(s, rd);
6844 } else {
6845 /* shift immediate */
6846 rm = (insn >> 3) & 7;
6847 shift = (insn >> 6) & 0x1f;
6848 gen_movl_T0_reg(s, rm);
6849 if (s->condexec_mask)
6850 gen_shift_T0_im_thumb[op](shift);
6851 else
6852 gen_shift_T0_im_thumb_cc[op](shift);
6853 gen_movl_reg_T0(s, rd);
6855 break;
6856 case 2: case 3:
6857 /* arithmetic large immediate */
6858 op = (insn >> 11) & 3;
6859 rd = (insn >> 8) & 0x7;
6860 if (op == 0) {
6861 gen_op_movl_T0_im(insn & 0xff);
6862 } else {
6863 gen_movl_T0_reg(s, rd);
6864 gen_op_movl_T1_im(insn & 0xff);
6866 switch (op) {
6867 case 0: /* mov */
6868 if (!s->condexec_mask)
6869 gen_op_logic_T0_cc();
6870 break;
6871 case 1: /* cmp */
6872 gen_op_subl_T0_T1_cc();
6873 break;
6874 case 2: /* add */
6875 if (s->condexec_mask)
6876 gen_op_addl_T0_T1();
6877 else
6878 gen_op_addl_T0_T1_cc();
6879 break;
6880 case 3: /* sub */
6881 if (s->condexec_mask)
6882 gen_op_subl_T0_T1();
6883 else
6884 gen_op_subl_T0_T1_cc();
6885 break;
6887 if (op != 1)
6888 gen_movl_reg_T0(s, rd);
6889 break;
6890 case 4:
6891 if (insn & (1 << 11)) {
6892 rd = (insn >> 8) & 7;
6893 /* load pc-relative. Bit 1 of PC is ignored. */
6894 val = s->pc + 2 + ((insn & 0xff) * 4);
6895 val &= ~(uint32_t)2;
6896 gen_op_movl_T1_im(val);
6897 gen_ldst(ldl, s);
6898 gen_movl_reg_T0(s, rd);
6899 break;
6901 if (insn & (1 << 10)) {
6902 /* data processing extended or blx */
6903 rd = (insn & 7) | ((insn >> 4) & 8);
6904 rm = (insn >> 3) & 0xf;
6905 op = (insn >> 8) & 3;
6906 switch (op) {
6907 case 0: /* add */
6908 gen_movl_T0_reg(s, rd);
6909 gen_movl_T1_reg(s, rm);
6910 gen_op_addl_T0_T1();
6911 gen_movl_reg_T0(s, rd);
6912 break;
6913 case 1: /* cmp */
6914 gen_movl_T0_reg(s, rd);
6915 gen_movl_T1_reg(s, rm);
6916 gen_op_subl_T0_T1_cc();
6917 break;
6918 case 2: /* mov/cpy */
6919 gen_movl_T0_reg(s, rm);
6920 gen_movl_reg_T0(s, rd);
6921 break;
6922 case 3:/* branch [and link] exchange thumb register */
6923 if (insn & (1 << 7)) {
6924 val = (uint32_t)s->pc | 1;
6925 gen_op_movl_T1_im(val);
6926 gen_movl_reg_T1(s, 14);
6928 gen_movl_T0_reg(s, rm);
6929 gen_bx(s);
6930 break;
6932 break;
6935 /* data processing register */
6936 rd = insn & 7;
6937 rm = (insn >> 3) & 7;
6938 op = (insn >> 6) & 0xf;
6939 if (op == 2 || op == 3 || op == 4 || op == 7) {
6940 /* the shift/rotate ops want the operands backwards */
6941 val = rm;
6942 rm = rd;
6943 rd = val;
6944 val = 1;
6945 } else {
6946 val = 0;
6949 if (op == 9) /* neg */
6950 gen_op_movl_T0_im(0);
6951 else if (op != 0xf) /* mvn doesn't read its first operand */
6952 gen_movl_T0_reg(s, rd);
6954 gen_movl_T1_reg(s, rm);
6955 switch (op) {
6956 case 0x0: /* and */
6957 gen_op_andl_T0_T1();
6958 if (!s->condexec_mask)
6959 gen_op_logic_T0_cc();
6960 break;
6961 case 0x1: /* eor */
6962 gen_op_xorl_T0_T1();
6963 if (!s->condexec_mask)
6964 gen_op_logic_T0_cc();
6965 break;
6966 case 0x2: /* lsl */
6967 if (s->condexec_mask) {
6968 gen_op_shll_T1_T0();
6969 } else {
6970 gen_op_shll_T1_T0_cc();
6971 gen_op_logic_T1_cc();
6973 break;
6974 case 0x3: /* lsr */
6975 if (s->condexec_mask) {
6976 gen_op_shrl_T1_T0();
6977 } else {
6978 gen_op_shrl_T1_T0_cc();
6979 gen_op_logic_T1_cc();
6981 break;
6982 case 0x4: /* asr */
6983 if (s->condexec_mask) {
6984 gen_op_sarl_T1_T0();
6985 } else {
6986 gen_op_sarl_T1_T0_cc();
6987 gen_op_logic_T1_cc();
6989 break;
6990 case 0x5: /* adc */
6991 if (s->condexec_mask)
6992 gen_op_adcl_T0_T1();
6993 else
6994 gen_op_adcl_T0_T1_cc();
6995 break;
6996 case 0x6: /* sbc */
6997 if (s->condexec_mask)
6998 gen_op_sbcl_T0_T1();
6999 else
7000 gen_op_sbcl_T0_T1_cc();
7001 break;
7002 case 0x7: /* ror */
7003 if (s->condexec_mask) {
7004 gen_op_rorl_T1_T0();
7005 } else {
7006 gen_op_rorl_T1_T0_cc();
7007 gen_op_logic_T1_cc();
7009 break;
7010 case 0x8: /* tst */
7011 gen_op_andl_T0_T1();
7012 gen_op_logic_T0_cc();
7013 rd = 16;
7014 break;
7015 case 0x9: /* neg */
7016 if (s->condexec_mask)
7017 gen_op_subl_T0_T1();
7018 else
7019 gen_op_subl_T0_T1_cc();
7020 break;
7021 case 0xa: /* cmp */
7022 gen_op_subl_T0_T1_cc();
7023 rd = 16;
7024 break;
7025 case 0xb: /* cmn */
7026 gen_op_addl_T0_T1_cc();
7027 rd = 16;
7028 break;
7029 case 0xc: /* orr */
7030 gen_op_orl_T0_T1();
7031 if (!s->condexec_mask)
7032 gen_op_logic_T0_cc();
7033 break;
7034 case 0xd: /* mul */
7035 gen_op_mull_T0_T1();
7036 if (!s->condexec_mask)
7037 gen_op_logic_T0_cc();
7038 break;
7039 case 0xe: /* bic */
7040 gen_op_bicl_T0_T1();
7041 if (!s->condexec_mask)
7042 gen_op_logic_T0_cc();
7043 break;
7044 case 0xf: /* mvn */
7045 gen_op_notl_T1();
7046 if (!s->condexec_mask)
7047 gen_op_logic_T1_cc();
7048 val = 1;
7049 rm = rd;
7050 break;
7052 if (rd != 16) {
7053 if (val)
7054 gen_movl_reg_T1(s, rm);
7055 else
7056 gen_movl_reg_T0(s, rd);
7058 break;
7060 case 5:
7061 /* load/store register offset. */
7062 rd = insn & 7;
7063 rn = (insn >> 3) & 7;
7064 rm = (insn >> 6) & 7;
7065 op = (insn >> 9) & 7;
7066 gen_movl_T1_reg(s, rn);
7067 gen_movl_T2_reg(s, rm);
7068 gen_op_addl_T1_T2();
7070 if (op < 3) /* store */
7071 gen_movl_T0_reg(s, rd);
7073 switch (op) {
7074 case 0: /* str */
7075 gen_ldst(stl, s);
7076 break;
7077 case 1: /* strh */
7078 gen_ldst(stw, s);
7079 break;
7080 case 2: /* strb */
7081 gen_ldst(stb, s);
7082 break;
7083 case 3: /* ldrsb */
7084 gen_ldst(ldsb, s);
7085 break;
7086 case 4: /* ldr */
7087 gen_ldst(ldl, s);
7088 break;
7089 case 5: /* ldrh */
7090 gen_ldst(lduw, s);
7091 break;
7092 case 6: /* ldrb */
7093 gen_ldst(ldub, s);
7094 break;
7095 case 7: /* ldrsh */
7096 gen_ldst(ldsw, s);
7097 break;
7099 if (op >= 3) /* load */
7100 gen_movl_reg_T0(s, rd);
7101 break;
7103 case 6:
7104 /* load/store word immediate offset */
7105 rd = insn & 7;
7106 rn = (insn >> 3) & 7;
7107 gen_movl_T1_reg(s, rn);
7108 val = (insn >> 4) & 0x7c;
7109 gen_op_movl_T2_im(val);
7110 gen_op_addl_T1_T2();
7112 if (insn & (1 << 11)) {
7113 /* load */
7114 gen_ldst(ldl, s);
7115 gen_movl_reg_T0(s, rd);
7116 } else {
7117 /* store */
7118 gen_movl_T0_reg(s, rd);
7119 gen_ldst(stl, s);
7121 break;
7123 case 7:
7124 /* load/store byte immediate offset */
7125 rd = insn & 7;
7126 rn = (insn >> 3) & 7;
7127 gen_movl_T1_reg(s, rn);
7128 val = (insn >> 6) & 0x1f;
7129 gen_op_movl_T2_im(val);
7130 gen_op_addl_T1_T2();
7132 if (insn & (1 << 11)) {
7133 /* load */
7134 gen_ldst(ldub, s);
7135 gen_movl_reg_T0(s, rd);
7136 } else {
7137 /* store */
7138 gen_movl_T0_reg(s, rd);
7139 gen_ldst(stb, s);
7141 break;
7143 case 8:
7144 /* load/store halfword immediate offset */
7145 rd = insn & 7;
7146 rn = (insn >> 3) & 7;
7147 gen_movl_T1_reg(s, rn);
7148 val = (insn >> 5) & 0x3e;
7149 gen_op_movl_T2_im(val);
7150 gen_op_addl_T1_T2();
7152 if (insn & (1 << 11)) {
7153 /* load */
7154 gen_ldst(lduw, s);
7155 gen_movl_reg_T0(s, rd);
7156 } else {
7157 /* store */
7158 gen_movl_T0_reg(s, rd);
7159 gen_ldst(stw, s);
7161 break;
7163 case 9:
7164 /* load/store from stack */
7165 rd = (insn >> 8) & 7;
7166 gen_movl_T1_reg(s, 13);
7167 val = (insn & 0xff) * 4;
7168 gen_op_movl_T2_im(val);
7169 gen_op_addl_T1_T2();
7171 if (insn & (1 << 11)) {
7172 /* load */
7173 gen_ldst(ldl, s);
7174 gen_movl_reg_T0(s, rd);
7175 } else {
7176 /* store */
7177 gen_movl_T0_reg(s, rd);
7178 gen_ldst(stl, s);
7180 break;
7182 case 10:
7183 /* add to high reg */
7184 rd = (insn >> 8) & 7;
7185 if (insn & (1 << 11)) {
7186 /* SP */
7187 gen_movl_T0_reg(s, 13);
7188 } else {
7189 /* PC. bit 1 is ignored. */
7190 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
7192 val = (insn & 0xff) * 4;
7193 gen_op_movl_T1_im(val);
7194 gen_op_addl_T0_T1();
7195 gen_movl_reg_T0(s, rd);
7196 break;
7198 case 11:
7199 /* misc */
7200 op = (insn >> 8) & 0xf;
7201 switch (op) {
7202 case 0:
7203 /* adjust stack pointer */
7204 gen_movl_T1_reg(s, 13);
7205 val = (insn & 0x7f) * 4;
7206 if (insn & (1 << 7))
7207 val = -(int32_t)val;
7208 gen_op_movl_T2_im(val);
7209 gen_op_addl_T1_T2();
7210 gen_movl_reg_T1(s, 13);
7211 break;
7213 case 2: /* sign/zero extend. */
7214 ARCH(6);
7215 rd = insn & 7;
7216 rm = (insn >> 3) & 7;
7217 gen_movl_T1_reg(s, rm);
7218 switch ((insn >> 6) & 3) {
7219 case 0: gen_op_sxth_T1(); break;
7220 case 1: gen_op_sxtb_T1(); break;
7221 case 2: gen_op_uxth_T1(); break;
7222 case 3: gen_op_uxtb_T1(); break;
7224 gen_movl_reg_T1(s, rd);
7225 break;
7226 case 4: case 5: case 0xc: case 0xd:
7227 /* push/pop */
7228 gen_movl_T1_reg(s, 13);
7229 if (insn & (1 << 8))
7230 offset = 4;
7231 else
7232 offset = 0;
7233 for (i = 0; i < 8; i++) {
7234 if (insn & (1 << i))
7235 offset += 4;
7237 if ((insn & (1 << 11)) == 0) {
7238 gen_op_movl_T2_im(-offset);
7239 gen_op_addl_T1_T2();
7241 gen_op_movl_T2_im(4);
7242 for (i = 0; i < 8; i++) {
7243 if (insn & (1 << i)) {
7244 if (insn & (1 << 11)) {
7245 /* pop */
7246 gen_ldst(ldl, s);
7247 gen_movl_reg_T0(s, i);
7248 } else {
7249 /* push */
7250 gen_movl_T0_reg(s, i);
7251 gen_ldst(stl, s);
7253 /* advance to the next address. */
7254 gen_op_addl_T1_T2();
7257 if (insn & (1 << 8)) {
7258 if (insn & (1 << 11)) {
7259 /* pop pc */
7260 gen_ldst(ldl, s);
7261 /* don't set the pc until the rest of the instruction
7262 has completed */
7263 } else {
7264 /* push lr */
7265 gen_movl_T0_reg(s, 14);
7266 gen_ldst(stl, s);
7268 gen_op_addl_T1_T2();
7270 if ((insn & (1 << 11)) == 0) {
7271 gen_op_movl_T2_im(-offset);
7272 gen_op_addl_T1_T2();
7274 /* write back the new stack pointer */
7275 gen_movl_reg_T1(s, 13);
7276 /* set the new PC value */
7277 if ((insn & 0x0900) == 0x0900)
7278 gen_bx(s);
7279 break;
7281 case 1: case 3: case 9: case 11: /* czb */
7282 rm = insn & 7;
7283 gen_movl_T0_reg(s, rm);
7284 s->condlabel = gen_new_label();
7285 s->condjmp = 1;
7286 if (insn & (1 << 11))
7287 gen_op_testn_T0(s->condlabel);
7288 else
7289 gen_op_test_T0(s->condlabel);
7291 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
7292 val = (uint32_t)s->pc + 2;
7293 val += offset;
7294 gen_jmp(s, val);
7295 break;
7297 case 15: /* IT, nop-hint. */
7298 if ((insn & 0xf) == 0) {
7299 gen_nop_hint(s, (insn >> 4) & 0xf);
7300 break;
7302 /* If Then. */
7303 s->condexec_cond = (insn >> 4) & 0xe;
7304 s->condexec_mask = insn & 0x1f;
7305 /* No actual code generated for this insn, just setup state. */
7306 break;
7308 case 0xe: /* bkpt */
7309 gen_set_condexec(s);
7310 gen_op_movl_T0_im((long)s->pc - 2);
7311 gen_op_movl_reg_TN[0][15]();
7312 gen_op_bkpt();
7313 s->is_jmp = DISAS_JUMP;
7314 break;
7316 case 0xa: /* rev */
7317 ARCH(6);
7318 rn = (insn >> 3) & 0x7;
7319 rd = insn & 0x7;
7320 gen_movl_T0_reg(s, rn);
7321 switch ((insn >> 6) & 3) {
7322 case 0: gen_op_rev_T0(); break;
7323 case 1: gen_op_rev16_T0(); break;
7324 case 3: gen_op_revsh_T0(); break;
7325 default: goto illegal_op;
7327 gen_movl_reg_T0(s, rd);
7328 break;
7330 case 6: /* cps */
7331 ARCH(6);
7332 if (IS_USER(s))
7333 break;
7334 if (IS_M(env)) {
7335 val = (insn & (1 << 4)) != 0;
7336 gen_op_movl_T0_im(val);
7337 /* PRIMASK */
7338 if (insn & 1)
7339 gen_op_v7m_msr_T0(16);
7340 /* FAULTMASK */
7341 if (insn & 2)
7342 gen_op_v7m_msr_T0(17);
7344 gen_lookup_tb(s);
7345 } else {
7346 if (insn & (1 << 4))
7347 shift = CPSR_A | CPSR_I | CPSR_F;
7348 else
7349 shift = 0;
7351 val = ((insn & 7) << 6) & shift;
7352 gen_op_movl_T0_im(val);
7353 gen_set_psr_T0(s, shift, 0);
7355 break;
7357 default:
7358 goto undef;
7360 break;
7362 case 12:
7363 /* load/store multiple */
7364 rn = (insn >> 8) & 0x7;
7365 gen_movl_T1_reg(s, rn);
7366 gen_op_movl_T2_im(4);
7367 for (i = 0; i < 8; i++) {
7368 if (insn & (1 << i)) {
7369 if (insn & (1 << 11)) {
7370 /* load */
7371 gen_ldst(ldl, s);
7372 gen_movl_reg_T0(s, i);
7373 } else {
7374 /* store */
7375 gen_movl_T0_reg(s, i);
7376 gen_ldst(stl, s);
7378 /* advance to the next address */
7379 gen_op_addl_T1_T2();
7382 /* Base register writeback. */
7383 if ((insn & (1 << rn)) == 0)
7384 gen_movl_reg_T1(s, rn);
7385 break;
7387 case 13:
7388 /* conditional branch or swi */
7389 cond = (insn >> 8) & 0xf;
7390 if (cond == 0xe)
7391 goto undef;
7393 if (cond == 0xf) {
7394 /* swi */
7395 gen_set_condexec(s);
7396 gen_op_movl_T0_im((long)s->pc | 1);
7397 /* Don't set r15. */
7398 gen_op_movl_reg_TN[0][15]();
7399 s->is_jmp = DISAS_SWI;
7400 break;
7402 /* generate a conditional jump to next instruction */
7403 s->condlabel = gen_new_label();
7404 gen_test_cc[cond ^ 1](s->condlabel);
7405 s->condjmp = 1;
7406 gen_movl_T1_reg(s, 15);
7408 /* jump to the offset */
7409 val = (uint32_t)s->pc + 2;
7410 offset = ((int32_t)insn << 24) >> 24;
7411 val += offset << 1;
7412 gen_jmp(s, val);
7413 break;
7415 case 14:
7416 if (insn & (1 << 11)) {
7417 if (disas_thumb2_insn(env, s, insn))
7418 goto undef32;
7419 break;
7421 /* unconditional branch */
7422 val = (uint32_t)s->pc;
7423 offset = ((int32_t)insn << 21) >> 21;
7424 val += (offset << 1) + 2;
7425 gen_jmp(s, val);
7426 break;
7428 case 15:
7429 if (disas_thumb2_insn(env, s, insn))
7430 goto undef32;
7431 break;
7433 return;
7434 undef32:
7435 gen_set_condexec(s);
7436 gen_op_movl_T0_im((long)s->pc - 4);
7437 gen_op_movl_reg_TN[0][15]();
7438 gen_op_undef_insn();
7439 s->is_jmp = DISAS_JUMP;
7440 return;
7441 illegal_op:
7442 undef:
7443 gen_set_condexec(s);
7444 gen_op_movl_T0_im((long)s->pc - 2);
7445 gen_op_movl_reg_TN[0][15]();
7446 gen_op_undef_insn();
7447 s->is_jmp = DISAS_JUMP;
7450 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7451 basic block 'tb'. If search_pc is TRUE, also generate PC
7452 information for each intermediate instruction. */
7453 static inline int gen_intermediate_code_internal(CPUState *env,
7454 TranslationBlock *tb,
7455 int search_pc)
7457 DisasContext dc1, *dc = &dc1;
7458 uint16_t *gen_opc_end;
7459 int j, lj;
7460 target_ulong pc_start;
7461 uint32_t next_page_start;
7463 /* generate intermediate code */
7464 pc_start = tb->pc;
7466 dc->tb = tb;
7468 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7470 dc->is_jmp = DISAS_NEXT;
7471 dc->pc = pc_start;
7472 dc->singlestep_enabled = env->singlestep_enabled;
7473 dc->condjmp = 0;
7474 dc->thumb = env->thumb;
7475 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
7476 dc->condexec_cond = env->condexec_bits >> 4;
7477 dc->is_mem = 0;
7478 #if !defined(CONFIG_USER_ONLY)
7479 if (IS_M(env)) {
7480 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
7481 } else {
7482 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
7484 #endif
7485 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
7486 lj = -1;
7487 /* Reset the conditional execution bits immediately. This avoids
7488 complications trying to do it at the end of the block. */
7489 if (env->condexec_bits)
7490 gen_op_set_condexec(0);
7491 do {
7492 #ifndef CONFIG_USER_ONLY
7493 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
7494 /* We always get here via a jump, so know we are not in a
7495 conditional execution block. */
7496 gen_op_exception_exit();
7498 #endif
7500 if (env->nb_breakpoints > 0) {
7501 for(j = 0; j < env->nb_breakpoints; j++) {
7502 if (env->breakpoints[j] == dc->pc) {
7503 gen_set_condexec(dc);
7504 gen_op_movl_T0_im((long)dc->pc);
7505 gen_op_movl_reg_TN[0][15]();
7506 gen_op_debug();
7507 dc->is_jmp = DISAS_JUMP;
7508 /* Advance PC so that clearing the breakpoint will
7509 invalidate this TB. */
7510 dc->pc += 2;
7511 goto done_generating;
7512 break;
7516 if (search_pc) {
7517 j = gen_opc_ptr - gen_opc_buf;
7518 if (lj < j) {
7519 lj++;
7520 while (lj < j)
7521 gen_opc_instr_start[lj++] = 0;
7523 gen_opc_pc[lj] = dc->pc;
7524 gen_opc_instr_start[lj] = 1;
7527 if (env->thumb) {
7528 disas_thumb_insn(env, dc);
7529 if (dc->condexec_mask) {
7530 dc->condexec_cond = (dc->condexec_cond & 0xe)
7531 | ((dc->condexec_mask >> 4) & 1);
7532 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
7533 if (dc->condexec_mask == 0) {
7534 dc->condexec_cond = 0;
7537 } else {
7538 disas_arm_insn(env, dc);
7541 if (dc->condjmp && !dc->is_jmp) {
7542 gen_set_label(dc->condlabel);
7543 dc->condjmp = 0;
7545 /* Terminate the TB on memory ops if watchpoints are present. */
7546 /* FIXME: This should be replacd by the deterministic execution
7547 * IRQ raising bits. */
7548 if (dc->is_mem && env->nb_watchpoints)
7549 break;
7551 /* Translation stops when a conditional branch is enoutered.
7552 * Otherwise the subsequent code could get translated several times.
7553 * Also stop translation when a page boundary is reached. This
7554 * ensures prefech aborts occur at the right place. */
7555 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
7556 !env->singlestep_enabled &&
7557 dc->pc < next_page_start);
7559 /* At this stage dc->condjmp will only be set when the skipped
7560 instruction was a conditional branch or trap, and the PC has
7561 already been written. */
7562 if (__builtin_expect(env->singlestep_enabled, 0)) {
7563 /* Make sure the pc is updated, and raise a debug exception. */
7564 if (dc->condjmp) {
7565 gen_set_condexec(dc);
7566 if (dc->is_jmp == DISAS_SWI) {
7567 gen_op_swi();
7568 } else {
7569 gen_op_debug();
7571 gen_set_label(dc->condlabel);
7573 if (dc->condjmp || !dc->is_jmp) {
7574 gen_op_movl_T0_im((long)dc->pc);
7575 gen_op_movl_reg_TN[0][15]();
7576 dc->condjmp = 0;
7578 gen_set_condexec(dc);
7579 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
7580 gen_op_swi();
7581 } else {
7582 /* FIXME: Single stepping a WFI insn will not halt
7583 the CPU. */
7584 gen_op_debug();
7586 } else {
7587 /* While branches must always occur at the end of an IT block,
7588 there are a few other things that can cause us to terminate
7589 the TB in the middel of an IT block:
7590 - Exception generating instructions (bkpt, swi, undefined).
7591 - Page boundaries.
7592 - Hardware watchpoints.
7593 Hardware breakpoints have already been handled and skip this code.
7595 gen_set_condexec(dc);
7596 switch(dc->is_jmp) {
7597 case DISAS_NEXT:
7598 gen_goto_tb(dc, 1, dc->pc);
7599 break;
7600 default:
7601 case DISAS_JUMP:
7602 case DISAS_UPDATE:
7603 /* indicate that the hash table must be used to find the next TB */
7604 tcg_gen_exit_tb(0);
7605 break;
7606 case DISAS_TB_JUMP:
7607 /* nothing more to generate */
7608 break;
7609 case DISAS_WFI:
7610 gen_op_wfi();
7611 break;
7612 case DISAS_SWI:
7613 gen_op_swi();
7614 break;
7616 if (dc->condjmp) {
7617 gen_set_label(dc->condlabel);
7618 gen_set_condexec(dc);
7619 gen_goto_tb(dc, 1, dc->pc);
7620 dc->condjmp = 0;
7623 done_generating:
7624 *gen_opc_ptr = INDEX_op_end;
7626 #ifdef DEBUG_DISAS
7627 if (loglevel & CPU_LOG_TB_IN_ASM) {
7628 fprintf(logfile, "----------------\n");
7629 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
7630 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
7631 fprintf(logfile, "\n");
7633 #endif
7634 if (search_pc) {
7635 j = gen_opc_ptr - gen_opc_buf;
7636 lj++;
7637 while (lj <= j)
7638 gen_opc_instr_start[lj++] = 0;
7639 } else {
7640 tb->size = dc->pc - pc_start;
7642 return 0;
7645 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
7647 return gen_intermediate_code_internal(env, tb, 0);
7650 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
7652 return gen_intermediate_code_internal(env, tb, 1);
7655 static const char *cpu_mode_names[16] = {
7656 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
7657 "???", "???", "???", "und", "???", "???", "???", "sys"
7660 void cpu_dump_state(CPUState *env, FILE *f,
7661 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
7662 int flags)
7664 int i;
7665 union {
7666 uint32_t i;
7667 float s;
7668 } s0, s1;
7669 CPU_DoubleU d;
7670 /* ??? This assumes float64 and double have the same layout.
7671 Oh well, it's only debug dumps. */
7672 union {
7673 float64 f64;
7674 double d;
7675 } d0;
7676 uint32_t psr;
7678 for(i=0;i<16;i++) {
7679 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
7680 if ((i % 4) == 3)
7681 cpu_fprintf(f, "\n");
7682 else
7683 cpu_fprintf(f, " ");
7685 psr = cpsr_read(env);
7686 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
7687 psr,
7688 psr & (1 << 31) ? 'N' : '-',
7689 psr & (1 << 30) ? 'Z' : '-',
7690 psr & (1 << 29) ? 'C' : '-',
7691 psr & (1 << 28) ? 'V' : '-',
7692 psr & CPSR_T ? 'T' : 'A',
7693 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
7695 for (i = 0; i < 16; i++) {
7696 d.d = env->vfp.regs[i];
7697 s0.i = d.l.lower;
7698 s1.i = d.l.upper;
7699 d0.f64 = d.d;
7700 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
7701 i * 2, (int)s0.i, s0.s,
7702 i * 2 + 1, (int)s1.i, s1.s,
7703 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
7704 d0.d);
7706 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);