target-arm: convert gen_lookup_tb not to use cpu_T
[qemu/stefanha.git] / target-arm / translate.c
blob8e5380cc6e05723739126c58b0af201efeda1f5a
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
45 /* internal defines */
46 typedef struct DisasContext {
47 target_ulong pc;
48 int is_jmp;
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
58 int thumb;
59 #if !defined(CONFIG_USER_ONLY)
60 int user;
61 #endif
62 } DisasContext;
64 #if defined(CONFIG_USER_ONLY)
65 #define IS_USER(s) 1
66 #else
67 #define IS_USER(s) (s->user)
68 #endif
70 /* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72 #define DISAS_WFI 4
73 #define DISAS_SWI 5
75 static TCGv_ptr cpu_env;
76 /* We reuse the same 64-bit temporaries for efficiency. */
77 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
78 static TCGv_i32 cpu_R[16];
80 /* FIXME: These should be removed. */
81 static TCGv cpu_T[2];
82 static TCGv cpu_F0s, cpu_F1s;
83 static TCGv_i64 cpu_F0d, cpu_F1d;
85 #define ICOUNT_TEMP cpu_T[0]
86 #include "gen-icount.h"
88 static const char *regnames[] =
89 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
90 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
92 /* initialize TCG globals. */
93 void arm_translate_init(void)
95 int i;
97 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
99 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
100 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
102 for (i = 0; i < 16; i++) {
103 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
104 offsetof(CPUState, regs[i]),
105 regnames[i]);
108 #define GEN_HELPER 2
109 #include "helpers.h"
112 static int num_temps;
114 /* Allocate a temporary variable. */
115 static TCGv_i32 new_tmp(void)
117 num_temps++;
118 return tcg_temp_new_i32();
121 /* Release a temporary variable. */
122 static void dead_tmp(TCGv tmp)
124 tcg_temp_free(tmp);
125 num_temps--;
128 static inline TCGv load_cpu_offset(int offset)
130 TCGv tmp = new_tmp();
131 tcg_gen_ld_i32(tmp, cpu_env, offset);
132 return tmp;
135 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
137 static inline void store_cpu_offset(TCGv var, int offset)
139 tcg_gen_st_i32(var, cpu_env, offset);
140 dead_tmp(var);
143 #define store_cpu_field(var, name) \
144 store_cpu_offset(var, offsetof(CPUState, name))
146 /* Set a variable to the value of a CPU register. */
147 static void load_reg_var(DisasContext *s, TCGv var, int reg)
149 if (reg == 15) {
150 uint32_t addr;
151 /* normaly, since we updated PC, we need only to add one insn */
152 if (s->thumb)
153 addr = (long)s->pc + 2;
154 else
155 addr = (long)s->pc + 4;
156 tcg_gen_movi_i32(var, addr);
157 } else {
158 tcg_gen_mov_i32(var, cpu_R[reg]);
162 /* Create a new temporary and set it to the value of a CPU register. */
163 static inline TCGv load_reg(DisasContext *s, int reg)
165 TCGv tmp = new_tmp();
166 load_reg_var(s, tmp, reg);
167 return tmp;
170 /* Set a CPU register. The source must be a temporary and will be
171 marked as dead. */
172 static void store_reg(DisasContext *s, int reg, TCGv var)
174 if (reg == 15) {
175 tcg_gen_andi_i32(var, var, ~1);
176 s->is_jmp = DISAS_JUMP;
178 tcg_gen_mov_i32(cpu_R[reg], var);
179 dead_tmp(var);
183 /* Basic operations. */
184 #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
185 #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
186 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
188 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
189 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
190 #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
191 #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
193 #define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
194 #define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
195 #define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
196 #define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
197 #define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
199 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
200 #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
201 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202 #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
203 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
204 #define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
205 #define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
207 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
208 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
210 /* Value extensions. */
211 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
212 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
213 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
214 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
216 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
217 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
219 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
221 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
222 /* Set NZCV flags from the high 4 bits of var. */
223 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
225 static void gen_exception(int excp)
227 TCGv tmp = new_tmp();
228 tcg_gen_movi_i32(tmp, excp);
229 gen_helper_exception(tmp);
230 dead_tmp(tmp);
233 static void gen_smul_dual(TCGv a, TCGv b)
235 TCGv tmp1 = new_tmp();
236 TCGv tmp2 = new_tmp();
237 tcg_gen_ext16s_i32(tmp1, a);
238 tcg_gen_ext16s_i32(tmp2, b);
239 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
240 dead_tmp(tmp2);
241 tcg_gen_sari_i32(a, a, 16);
242 tcg_gen_sari_i32(b, b, 16);
243 tcg_gen_mul_i32(b, b, a);
244 tcg_gen_mov_i32(a, tmp1);
245 dead_tmp(tmp1);
248 /* Byteswap each halfword. */
249 static void gen_rev16(TCGv var)
251 TCGv tmp = new_tmp();
252 tcg_gen_shri_i32(tmp, var, 8);
253 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
254 tcg_gen_shli_i32(var, var, 8);
255 tcg_gen_andi_i32(var, var, 0xff00ff00);
256 tcg_gen_or_i32(var, var, tmp);
257 dead_tmp(tmp);
260 /* Byteswap low halfword and sign extend. */
261 static void gen_revsh(TCGv var)
263 TCGv tmp = new_tmp();
264 tcg_gen_shri_i32(tmp, var, 8);
265 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
266 tcg_gen_shli_i32(var, var, 8);
267 tcg_gen_ext8s_i32(var, var);
268 tcg_gen_or_i32(var, var, tmp);
269 dead_tmp(tmp);
272 /* Unsigned bitfield extract. */
273 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
275 if (shift)
276 tcg_gen_shri_i32(var, var, shift);
277 tcg_gen_andi_i32(var, var, mask);
280 /* Signed bitfield extract. */
281 static void gen_sbfx(TCGv var, int shift, int width)
283 uint32_t signbit;
285 if (shift)
286 tcg_gen_sari_i32(var, var, shift);
287 if (shift + width < 32) {
288 signbit = 1u << (width - 1);
289 tcg_gen_andi_i32(var, var, (1u << width) - 1);
290 tcg_gen_xori_i32(var, var, signbit);
291 tcg_gen_subi_i32(var, var, signbit);
295 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
296 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
298 tcg_gen_andi_i32(val, val, mask);
299 tcg_gen_shli_i32(val, val, shift);
300 tcg_gen_andi_i32(base, base, ~(mask << shift));
301 tcg_gen_or_i32(dest, base, val);
304 /* Round the top 32 bits of a 64-bit value. */
305 static void gen_roundqd(TCGv a, TCGv b)
307 tcg_gen_shri_i32(a, a, 31);
308 tcg_gen_add_i32(a, a, b);
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
316 TCGv_i64 tmp1 = tcg_temp_new_i64();
317 TCGv_i64 tmp2 = tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1, a);
320 dead_tmp(a);
321 tcg_gen_extu_i32_i64(tmp2, b);
322 dead_tmp(b);
323 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
324 return tmp1;
327 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
332 tcg_gen_ext_i32_i64(tmp1, a);
333 dead_tmp(a);
334 tcg_gen_ext_i32_i64(tmp2, b);
335 dead_tmp(b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
337 return tmp1;
340 /* Unsigned 32x32->64 multiply. */
341 static void gen_op_mull_T0_T1(void)
343 TCGv_i64 tmp1 = tcg_temp_new_i64();
344 TCGv_i64 tmp2 = tcg_temp_new_i64();
346 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
347 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
348 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
349 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
350 tcg_gen_shri_i64(tmp1, tmp1, 32);
351 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
354 /* Signed 32x32->64 multiply. */
355 static void gen_imull(TCGv a, TCGv b)
357 TCGv_i64 tmp1 = tcg_temp_new_i64();
358 TCGv_i64 tmp2 = tcg_temp_new_i64();
360 tcg_gen_ext_i32_i64(tmp1, a);
361 tcg_gen_ext_i32_i64(tmp2, b);
362 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
363 tcg_gen_trunc_i64_i32(a, tmp1);
364 tcg_gen_shri_i64(tmp1, tmp1, 32);
365 tcg_gen_trunc_i64_i32(b, tmp1);
368 /* Swap low and high halfwords. */
369 static void gen_swap_half(TCGv var)
371 TCGv tmp = new_tmp();
372 tcg_gen_shri_i32(tmp, var, 16);
373 tcg_gen_shli_i32(var, var, 16);
374 tcg_gen_or_i32(var, var, tmp);
375 dead_tmp(tmp);
378 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
379 tmp = (t0 ^ t1) & 0x8000;
380 t0 &= ~0x8000;
381 t1 &= ~0x8000;
382 t0 = (t0 + t1) ^ tmp;
385 static void gen_add16(TCGv t0, TCGv t1)
387 TCGv tmp = new_tmp();
388 tcg_gen_xor_i32(tmp, t0, t1);
389 tcg_gen_andi_i32(tmp, tmp, 0x8000);
390 tcg_gen_andi_i32(t0, t0, ~0x8000);
391 tcg_gen_andi_i32(t1, t1, ~0x8000);
392 tcg_gen_add_i32(t0, t0, t1);
393 tcg_gen_xor_i32(t0, t0, tmp);
394 dead_tmp(tmp);
395 dead_tmp(t1);
398 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
400 /* Set CF to the top bit of var. */
401 static void gen_set_CF_bit31(TCGv var)
403 TCGv tmp = new_tmp();
404 tcg_gen_shri_i32(tmp, var, 31);
405 gen_set_CF(tmp);
406 dead_tmp(tmp);
409 /* Set N and Z flags from var. */
410 static inline void gen_logic_CC(TCGv var)
412 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
413 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
416 /* T0 += T1 + CF. */
417 static void gen_adc_T0_T1(void)
419 TCGv tmp;
420 gen_op_addl_T0_T1();
421 tmp = load_cpu_field(CF);
422 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
423 dead_tmp(tmp);
426 /* dest = T0 + T1 + CF. */
427 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
429 TCGv tmp;
430 tcg_gen_add_i32(dest, t0, t1);
431 tmp = load_cpu_field(CF);
432 tcg_gen_add_i32(dest, dest, tmp);
433 dead_tmp(tmp);
436 /* dest = T0 - T1 + CF - 1. */
437 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
439 TCGv tmp;
440 tcg_gen_sub_i32(dest, t0, t1);
441 tmp = load_cpu_field(CF);
442 tcg_gen_add_i32(dest, dest, tmp);
443 tcg_gen_subi_i32(dest, dest, 1);
444 dead_tmp(tmp);
447 #define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
448 #define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
450 /* T0 &= ~T1. Clobbers T1. */
451 /* FIXME: Implement bic natively. */
452 static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
454 TCGv tmp = new_tmp();
455 tcg_gen_not_i32(tmp, t1);
456 tcg_gen_and_i32(dest, t0, tmp);
457 dead_tmp(tmp);
459 static inline void gen_op_bicl_T0_T1(void)
461 gen_op_notl_T1();
462 gen_op_andl_T0_T1();
465 /* FIXME: Implement this natively. */
466 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
468 /* FIXME: Implement this natively. */
469 static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
471 TCGv tmp;
473 if (i == 0)
474 return;
476 tmp = new_tmp();
477 tcg_gen_shri_i32(tmp, t1, i);
478 tcg_gen_shli_i32(t1, t1, 32 - i);
479 tcg_gen_or_i32(t0, t1, tmp);
480 dead_tmp(tmp);
483 static void shifter_out_im(TCGv var, int shift)
485 TCGv tmp = new_tmp();
486 if (shift == 0) {
487 tcg_gen_andi_i32(tmp, var, 1);
488 } else {
489 tcg_gen_shri_i32(tmp, var, shift);
490 if (shift != 31)
491 tcg_gen_andi_i32(tmp, tmp, 1);
493 gen_set_CF(tmp);
494 dead_tmp(tmp);
497 /* Shift by immediate. Includes special handling for shift == 0. */
498 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
500 switch (shiftop) {
501 case 0: /* LSL */
502 if (shift != 0) {
503 if (flags)
504 shifter_out_im(var, 32 - shift);
505 tcg_gen_shli_i32(var, var, shift);
507 break;
508 case 1: /* LSR */
509 if (shift == 0) {
510 if (flags) {
511 tcg_gen_shri_i32(var, var, 31);
512 gen_set_CF(var);
514 tcg_gen_movi_i32(var, 0);
515 } else {
516 if (flags)
517 shifter_out_im(var, shift - 1);
518 tcg_gen_shri_i32(var, var, shift);
520 break;
521 case 2: /* ASR */
522 if (shift == 0)
523 shift = 32;
524 if (flags)
525 shifter_out_im(var, shift - 1);
526 if (shift == 32)
527 shift = 31;
528 tcg_gen_sari_i32(var, var, shift);
529 break;
530 case 3: /* ROR/RRX */
531 if (shift != 0) {
532 if (flags)
533 shifter_out_im(var, shift - 1);
534 tcg_gen_rori_i32(var, var, shift); break;
535 } else {
536 TCGv tmp = load_cpu_field(CF);
537 if (flags)
538 shifter_out_im(var, 0);
539 tcg_gen_shri_i32(var, var, 1);
540 tcg_gen_shli_i32(tmp, tmp, 31);
541 tcg_gen_or_i32(var, var, tmp);
542 dead_tmp(tmp);
547 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
548 TCGv shift, int flags)
550 if (flags) {
551 switch (shiftop) {
552 case 0: gen_helper_shl_cc(var, var, shift); break;
553 case 1: gen_helper_shr_cc(var, var, shift); break;
554 case 2: gen_helper_sar_cc(var, var, shift); break;
555 case 3: gen_helper_ror_cc(var, var, shift); break;
557 } else {
558 switch (shiftop) {
559 case 0: gen_helper_shl(var, var, shift); break;
560 case 1: gen_helper_shr(var, var, shift); break;
561 case 2: gen_helper_sar(var, var, shift); break;
562 case 3: gen_helper_ror(var, var, shift); break;
565 dead_tmp(shift);
568 #define PAS_OP(pfx) \
569 switch (op2) { \
570 case 0: gen_pas_helper(glue(pfx,add16)); break; \
571 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
572 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
573 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
574 case 4: gen_pas_helper(glue(pfx,add8)); break; \
575 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
577 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
579 TCGv_ptr tmp;
581 switch (op1) {
582 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
583 case 1:
584 tmp = tcg_temp_new_ptr();
585 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
586 PAS_OP(s)
587 break;
588 case 5:
589 tmp = tcg_temp_new_ptr();
590 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
591 PAS_OP(u)
592 break;
593 #undef gen_pas_helper
594 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
595 case 2:
596 PAS_OP(q);
597 break;
598 case 3:
599 PAS_OP(sh);
600 break;
601 case 6:
602 PAS_OP(uq);
603 break;
604 case 7:
605 PAS_OP(uh);
606 break;
607 #undef gen_pas_helper
610 #undef PAS_OP
612 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
613 #define PAS_OP(pfx) \
614 switch (op2) { \
615 case 0: gen_pas_helper(glue(pfx,add8)); break; \
616 case 1: gen_pas_helper(glue(pfx,add16)); break; \
617 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
618 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
619 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
620 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
622 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
624 TCGv_ptr tmp;
626 switch (op1) {
627 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
628 case 0:
629 tmp = tcg_temp_new_ptr();
630 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
631 PAS_OP(s)
632 break;
633 case 4:
634 tmp = tcg_temp_new_ptr();
635 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
636 PAS_OP(u)
637 break;
638 #undef gen_pas_helper
639 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
640 case 1:
641 PAS_OP(q);
642 break;
643 case 2:
644 PAS_OP(sh);
645 break;
646 case 5:
647 PAS_OP(uq);
648 break;
649 case 6:
650 PAS_OP(uh);
651 break;
652 #undef gen_pas_helper
655 #undef PAS_OP
657 static void gen_test_cc(int cc, int label)
659 TCGv tmp;
660 TCGv tmp2;
661 int inv;
663 switch (cc) {
664 case 0: /* eq: Z */
665 tmp = load_cpu_field(ZF);
666 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
667 break;
668 case 1: /* ne: !Z */
669 tmp = load_cpu_field(ZF);
670 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
671 break;
672 case 2: /* cs: C */
673 tmp = load_cpu_field(CF);
674 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
675 break;
676 case 3: /* cc: !C */
677 tmp = load_cpu_field(CF);
678 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
679 break;
680 case 4: /* mi: N */
681 tmp = load_cpu_field(NF);
682 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
683 break;
684 case 5: /* pl: !N */
685 tmp = load_cpu_field(NF);
686 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
687 break;
688 case 6: /* vs: V */
689 tmp = load_cpu_field(VF);
690 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
691 break;
692 case 7: /* vc: !V */
693 tmp = load_cpu_field(VF);
694 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
695 break;
696 case 8: /* hi: C && !Z */
697 inv = gen_new_label();
698 tmp = load_cpu_field(CF);
699 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
700 dead_tmp(tmp);
701 tmp = load_cpu_field(ZF);
702 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
703 gen_set_label(inv);
704 break;
705 case 9: /* ls: !C || Z */
706 tmp = load_cpu_field(CF);
707 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
708 dead_tmp(tmp);
709 tmp = load_cpu_field(ZF);
710 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
711 break;
712 case 10: /* ge: N == V -> N ^ V == 0 */
713 tmp = load_cpu_field(VF);
714 tmp2 = load_cpu_field(NF);
715 tcg_gen_xor_i32(tmp, tmp, tmp2);
716 dead_tmp(tmp2);
717 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
718 break;
719 case 11: /* lt: N != V -> N ^ V != 0 */
720 tmp = load_cpu_field(VF);
721 tmp2 = load_cpu_field(NF);
722 tcg_gen_xor_i32(tmp, tmp, tmp2);
723 dead_tmp(tmp2);
724 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
725 break;
726 case 12: /* gt: !Z && N == V */
727 inv = gen_new_label();
728 tmp = load_cpu_field(ZF);
729 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
730 dead_tmp(tmp);
731 tmp = load_cpu_field(VF);
732 tmp2 = load_cpu_field(NF);
733 tcg_gen_xor_i32(tmp, tmp, tmp2);
734 dead_tmp(tmp2);
735 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
736 gen_set_label(inv);
737 break;
738 case 13: /* le: Z || N != V */
739 tmp = load_cpu_field(ZF);
740 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
741 dead_tmp(tmp);
742 tmp = load_cpu_field(VF);
743 tmp2 = load_cpu_field(NF);
744 tcg_gen_xor_i32(tmp, tmp, tmp2);
745 dead_tmp(tmp2);
746 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
747 break;
748 default:
749 fprintf(stderr, "Bad condition code 0x%x\n", cc);
750 abort();
752 dead_tmp(tmp);
755 static const uint8_t table_logic_cc[16] = {
756 1, /* and */
757 1, /* xor */
758 0, /* sub */
759 0, /* rsb */
760 0, /* add */
761 0, /* adc */
762 0, /* sbc */
763 0, /* rsc */
764 1, /* andl */
765 1, /* xorl */
766 0, /* cmp */
767 0, /* cmn */
768 1, /* orr */
769 1, /* mov */
770 1, /* bic */
771 1, /* mvn */
774 /* Set PC and Thumb state from an immediate address. */
775 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
777 TCGv tmp;
779 s->is_jmp = DISAS_UPDATE;
780 if (s->thumb != (addr & 1)) {
781 tmp = new_tmp();
782 tcg_gen_movi_i32(tmp, addr & 1);
783 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
784 dead_tmp(tmp);
786 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
789 /* Set PC and Thumb state from var. var is marked as dead. */
790 static inline void gen_bx(DisasContext *s, TCGv var)
792 s->is_jmp = DISAS_UPDATE;
793 tcg_gen_andi_i32(cpu_R[15], var, ~1);
794 tcg_gen_andi_i32(var, var, 1);
795 store_cpu_field(var, thumb);
798 /* Variant of store_reg which uses branch&exchange logic when storing
799 to r15 in ARM architecture v7 and above. The source must be a temporary
800 and will be marked as dead. */
801 static inline void store_reg_bx(CPUState *env, DisasContext *s,
802 int reg, TCGv var)
804 if (reg == 15 && ENABLE_ARCH_7) {
805 gen_bx(s, var);
806 } else {
807 store_reg(s, reg, var);
811 static inline TCGv gen_ld8s(TCGv addr, int index)
813 TCGv tmp = new_tmp();
814 tcg_gen_qemu_ld8s(tmp, addr, index);
815 return tmp;
817 static inline TCGv gen_ld8u(TCGv addr, int index)
819 TCGv tmp = new_tmp();
820 tcg_gen_qemu_ld8u(tmp, addr, index);
821 return tmp;
823 static inline TCGv gen_ld16s(TCGv addr, int index)
825 TCGv tmp = new_tmp();
826 tcg_gen_qemu_ld16s(tmp, addr, index);
827 return tmp;
829 static inline TCGv gen_ld16u(TCGv addr, int index)
831 TCGv tmp = new_tmp();
832 tcg_gen_qemu_ld16u(tmp, addr, index);
833 return tmp;
835 static inline TCGv gen_ld32(TCGv addr, int index)
837 TCGv tmp = new_tmp();
838 tcg_gen_qemu_ld32u(tmp, addr, index);
839 return tmp;
841 static inline void gen_st8(TCGv val, TCGv addr, int index)
843 tcg_gen_qemu_st8(val, addr, index);
844 dead_tmp(val);
846 static inline void gen_st16(TCGv val, TCGv addr, int index)
848 tcg_gen_qemu_st16(val, addr, index);
849 dead_tmp(val);
851 static inline void gen_st32(TCGv val, TCGv addr, int index)
853 tcg_gen_qemu_st32(val, addr, index);
854 dead_tmp(val);
857 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
859 load_reg_var(s, cpu_T[0], reg);
862 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
864 load_reg_var(s, cpu_T[1], reg);
867 static inline void gen_set_pc_im(uint32_t val)
869 tcg_gen_movi_i32(cpu_R[15], val);
872 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
874 TCGv tmp;
875 if (reg == 15) {
876 tmp = new_tmp();
877 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
878 } else {
879 tmp = cpu_T[t];
881 tcg_gen_mov_i32(cpu_R[reg], tmp);
882 if (reg == 15) {
883 dead_tmp(tmp);
884 s->is_jmp = DISAS_JUMP;
888 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
890 gen_movl_reg_TN(s, reg, 0);
893 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
895 gen_movl_reg_TN(s, reg, 1);
898 /* Force a TB lookup after an instruction that changes the CPU state. */
899 static inline void gen_lookup_tb(DisasContext *s)
901 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
902 s->is_jmp = DISAS_UPDATE;
905 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
906 TCGv var)
908 int val, rm, shift, shiftop;
909 TCGv offset;
911 if (!(insn & (1 << 25))) {
912 /* immediate */
913 val = insn & 0xfff;
914 if (!(insn & (1 << 23)))
915 val = -val;
916 if (val != 0)
917 tcg_gen_addi_i32(var, var, val);
918 } else {
919 /* shift/register */
920 rm = (insn) & 0xf;
921 shift = (insn >> 7) & 0x1f;
922 shiftop = (insn >> 5) & 3;
923 offset = load_reg(s, rm);
924 gen_arm_shift_im(offset, shiftop, shift, 0);
925 if (!(insn & (1 << 23)))
926 tcg_gen_sub_i32(var, var, offset);
927 else
928 tcg_gen_add_i32(var, var, offset);
929 dead_tmp(offset);
933 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
934 int extra, TCGv var)
936 int val, rm;
937 TCGv offset;
939 if (insn & (1 << 22)) {
940 /* immediate */
941 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
942 if (!(insn & (1 << 23)))
943 val = -val;
944 val += extra;
945 if (val != 0)
946 tcg_gen_addi_i32(var, var, val);
947 } else {
948 /* register */
949 if (extra)
950 tcg_gen_addi_i32(var, var, extra);
951 rm = (insn) & 0xf;
952 offset = load_reg(s, rm);
953 if (!(insn & (1 << 23)))
954 tcg_gen_sub_i32(var, var, offset);
955 else
956 tcg_gen_add_i32(var, var, offset);
957 dead_tmp(offset);
961 #define VFP_OP2(name) \
962 static inline void gen_vfp_##name(int dp) \
964 if (dp) \
965 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
966 else \
967 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
970 VFP_OP2(add)
971 VFP_OP2(sub)
972 VFP_OP2(mul)
973 VFP_OP2(div)
975 #undef VFP_OP2
977 static inline void gen_vfp_abs(int dp)
979 if (dp)
980 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
981 else
982 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
985 static inline void gen_vfp_neg(int dp)
987 if (dp)
988 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
989 else
990 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
993 static inline void gen_vfp_sqrt(int dp)
995 if (dp)
996 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
997 else
998 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1001 static inline void gen_vfp_cmp(int dp)
1003 if (dp)
1004 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1005 else
1006 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1009 static inline void gen_vfp_cmpe(int dp)
1011 if (dp)
1012 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1013 else
1014 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1017 static inline void gen_vfp_F1_ld0(int dp)
1019 if (dp)
1020 tcg_gen_movi_i64(cpu_F1d, 0);
1021 else
1022 tcg_gen_movi_i32(cpu_F1s, 0);
1025 static inline void gen_vfp_uito(int dp)
1027 if (dp)
1028 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1029 else
1030 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1033 static inline void gen_vfp_sito(int dp)
1035 if (dp)
1036 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
1037 else
1038 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
1041 static inline void gen_vfp_toui(int dp)
1043 if (dp)
1044 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1045 else
1046 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1049 static inline void gen_vfp_touiz(int dp)
1051 if (dp)
1052 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1053 else
1054 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1057 static inline void gen_vfp_tosi(int dp)
1059 if (dp)
1060 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1061 else
1062 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1065 static inline void gen_vfp_tosiz(int dp)
1067 if (dp)
1068 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1069 else
1070 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1073 #define VFP_GEN_FIX(name) \
1074 static inline void gen_vfp_##name(int dp, int shift) \
1076 if (dp) \
1077 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1078 else \
1079 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
1081 VFP_GEN_FIX(tosh)
1082 VFP_GEN_FIX(tosl)
1083 VFP_GEN_FIX(touh)
1084 VFP_GEN_FIX(toul)
1085 VFP_GEN_FIX(shto)
1086 VFP_GEN_FIX(slto)
1087 VFP_GEN_FIX(uhto)
1088 VFP_GEN_FIX(ulto)
1089 #undef VFP_GEN_FIX
1091 static inline void gen_vfp_ld(DisasContext *s, int dp)
1093 if (dp)
1094 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
1095 else
1096 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
1099 static inline void gen_vfp_st(DisasContext *s, int dp)
1101 if (dp)
1102 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
1103 else
1104 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
1107 static inline long
1108 vfp_reg_offset (int dp, int reg)
1110 if (dp)
1111 return offsetof(CPUARMState, vfp.regs[reg]);
1112 else if (reg & 1) {
1113 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1114 + offsetof(CPU_DoubleU, l.upper);
1115 } else {
1116 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1117 + offsetof(CPU_DoubleU, l.lower);
1121 /* Return the offset of a 32-bit piece of a NEON register.
1122 zero is the least significant end of the register. */
1123 static inline long
1124 neon_reg_offset (int reg, int n)
1126 int sreg;
1127 sreg = reg * 2 + n;
1128 return vfp_reg_offset(0, sreg);
1131 /* FIXME: Remove these. */
1132 #define neon_T0 cpu_T[0]
1133 #define neon_T1 cpu_T[1]
1134 #define NEON_GET_REG(T, reg, n) \
1135 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1136 #define NEON_SET_REG(T, reg, n) \
1137 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1139 static TCGv neon_load_reg(int reg, int pass)
1141 TCGv tmp = new_tmp();
1142 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1143 return tmp;
1146 static void neon_store_reg(int reg, int pass, TCGv var)
1148 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1149 dead_tmp(var);
1152 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1154 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1157 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1159 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1162 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1163 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1164 #define tcg_gen_st_f32 tcg_gen_st_i32
1165 #define tcg_gen_st_f64 tcg_gen_st_i64
1167 static inline void gen_mov_F0_vreg(int dp, int reg)
1169 if (dp)
1170 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1171 else
1172 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1175 static inline void gen_mov_F1_vreg(int dp, int reg)
1177 if (dp)
1178 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1179 else
1180 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1183 static inline void gen_mov_vreg_F0(int dp, int reg)
1185 if (dp)
1186 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1187 else
1188 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1191 #define ARM_CP_RW_BIT (1 << 20)
1193 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1195 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1198 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1200 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1203 static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1205 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1208 static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1210 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1213 static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1215 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1218 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1220 iwmmxt_store_reg(cpu_M0, rn);
1223 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1225 iwmmxt_load_reg(cpu_M0, rn);
1228 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1230 iwmmxt_load_reg(cpu_V1, rn);
1231 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1234 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1236 iwmmxt_load_reg(cpu_V1, rn);
1237 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1240 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1242 iwmmxt_load_reg(cpu_V1, rn);
1243 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1246 #define IWMMXT_OP(name) \
1247 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1249 iwmmxt_load_reg(cpu_V1, rn); \
1250 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1253 #define IWMMXT_OP_ENV(name) \
1254 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1256 iwmmxt_load_reg(cpu_V1, rn); \
1257 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1260 #define IWMMXT_OP_ENV_SIZE(name) \
1261 IWMMXT_OP_ENV(name##b) \
1262 IWMMXT_OP_ENV(name##w) \
1263 IWMMXT_OP_ENV(name##l)
1265 #define IWMMXT_OP_ENV1(name) \
1266 static inline void gen_op_iwmmxt_##name##_M0(void) \
1268 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1271 IWMMXT_OP(maddsq)
1272 IWMMXT_OP(madduq)
1273 IWMMXT_OP(sadb)
1274 IWMMXT_OP(sadw)
1275 IWMMXT_OP(mulslw)
1276 IWMMXT_OP(mulshw)
1277 IWMMXT_OP(mululw)
1278 IWMMXT_OP(muluhw)
1279 IWMMXT_OP(macsw)
1280 IWMMXT_OP(macuw)
1282 IWMMXT_OP_ENV_SIZE(unpackl)
1283 IWMMXT_OP_ENV_SIZE(unpackh)
1285 IWMMXT_OP_ENV1(unpacklub)
1286 IWMMXT_OP_ENV1(unpackluw)
1287 IWMMXT_OP_ENV1(unpacklul)
1288 IWMMXT_OP_ENV1(unpackhub)
1289 IWMMXT_OP_ENV1(unpackhuw)
1290 IWMMXT_OP_ENV1(unpackhul)
1291 IWMMXT_OP_ENV1(unpacklsb)
1292 IWMMXT_OP_ENV1(unpacklsw)
1293 IWMMXT_OP_ENV1(unpacklsl)
1294 IWMMXT_OP_ENV1(unpackhsb)
1295 IWMMXT_OP_ENV1(unpackhsw)
1296 IWMMXT_OP_ENV1(unpackhsl)
1298 IWMMXT_OP_ENV_SIZE(cmpeq)
1299 IWMMXT_OP_ENV_SIZE(cmpgtu)
1300 IWMMXT_OP_ENV_SIZE(cmpgts)
1302 IWMMXT_OP_ENV_SIZE(mins)
1303 IWMMXT_OP_ENV_SIZE(minu)
1304 IWMMXT_OP_ENV_SIZE(maxs)
1305 IWMMXT_OP_ENV_SIZE(maxu)
1307 IWMMXT_OP_ENV_SIZE(subn)
1308 IWMMXT_OP_ENV_SIZE(addn)
1309 IWMMXT_OP_ENV_SIZE(subu)
1310 IWMMXT_OP_ENV_SIZE(addu)
1311 IWMMXT_OP_ENV_SIZE(subs)
1312 IWMMXT_OP_ENV_SIZE(adds)
1314 IWMMXT_OP_ENV(avgb0)
1315 IWMMXT_OP_ENV(avgb1)
1316 IWMMXT_OP_ENV(avgw0)
1317 IWMMXT_OP_ENV(avgw1)
1319 IWMMXT_OP(msadb)
1321 IWMMXT_OP_ENV(packuw)
1322 IWMMXT_OP_ENV(packul)
1323 IWMMXT_OP_ENV(packuq)
1324 IWMMXT_OP_ENV(packsw)
1325 IWMMXT_OP_ENV(packsl)
1326 IWMMXT_OP_ENV(packsq)
1328 static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1330 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1333 static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1335 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1338 static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1340 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1343 static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1345 iwmmxt_load_reg(cpu_V1, rn);
1346 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1349 static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1351 TCGv tmp = tcg_const_i32(shift);
1352 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1355 static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1357 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1358 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1359 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1362 static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1364 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1365 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1366 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1369 static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1371 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1372 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1373 if (mask != ~0u)
1374 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1377 static void gen_op_iwmmxt_set_mup(void)
1379 TCGv tmp;
1380 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1381 tcg_gen_ori_i32(tmp, tmp, 2);
1382 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1385 static void gen_op_iwmmxt_set_cup(void)
1387 TCGv tmp;
1388 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1389 tcg_gen_ori_i32(tmp, tmp, 1);
1390 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1393 static void gen_op_iwmmxt_setpsr_nz(void)
1395 TCGv tmp = new_tmp();
1396 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1397 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1400 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1402 iwmmxt_load_reg(cpu_V1, rn);
1403 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1404 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1408 static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1410 iwmmxt_load_reg(cpu_V0, rn);
1411 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1412 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1413 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1416 static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1418 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
1419 iwmmxt_store_reg(cpu_V0, rn);
1422 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1424 int rd;
1425 uint32_t offset;
1427 rd = (insn >> 16) & 0xf;
1428 gen_movl_T1_reg(s, rd);
1430 offset = (insn & 0xff) << ((insn >> 7) & 2);
1431 if (insn & (1 << 24)) {
1432 /* Pre indexed */
1433 if (insn & (1 << 23))
1434 gen_op_addl_T1_im(offset);
1435 else
1436 gen_op_addl_T1_im(-offset);
1438 if (insn & (1 << 21))
1439 gen_movl_reg_T1(s, rd);
1440 } else if (insn & (1 << 21)) {
1441 /* Post indexed */
1442 if (insn & (1 << 23))
1443 gen_op_movl_T0_im(offset);
1444 else
1445 gen_op_movl_T0_im(- offset);
1446 gen_op_addl_T0_T1();
1447 gen_movl_reg_T0(s, rd);
1448 } else if (!(insn & (1 << 23)))
1449 return 1;
1450 return 0;
1453 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1455 int rd = (insn >> 0) & 0xf;
1457 if (insn & (1 << 8))
1458 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1459 return 1;
1460 else
1461 gen_op_iwmmxt_movl_T0_wCx(rd);
1462 else
1463 gen_iwmmxt_movl_T0_T1_wRn(rd);
1465 gen_op_movl_T1_im(mask);
1466 gen_op_andl_T0_T1();
1467 return 0;
1470 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1471 (ie. an undefined instruction). */
1472 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1474 int rd, wrd;
1475 int rdhi, rdlo, rd0, rd1, i;
1476 TCGv tmp;
1478 if ((insn & 0x0e000e00) == 0x0c000000) {
1479 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1480 wrd = insn & 0xf;
1481 rdlo = (insn >> 12) & 0xf;
1482 rdhi = (insn >> 16) & 0xf;
1483 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1484 gen_iwmmxt_movl_T0_T1_wRn(wrd);
1485 gen_movl_reg_T0(s, rdlo);
1486 gen_movl_reg_T1(s, rdhi);
1487 } else { /* TMCRR */
1488 gen_movl_T0_reg(s, rdlo);
1489 gen_movl_T1_reg(s, rdhi);
1490 gen_iwmmxt_movl_wRn_T0_T1(wrd);
1491 gen_op_iwmmxt_set_mup();
1493 return 0;
1496 wrd = (insn >> 12) & 0xf;
1497 if (gen_iwmmxt_address(s, insn))
1498 return 1;
1499 if (insn & ARM_CP_RW_BIT) {
1500 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1501 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1502 tcg_gen_mov_i32(cpu_T[0], tmp);
1503 dead_tmp(tmp);
1504 gen_op_iwmmxt_movl_wCx_T0(wrd);
1505 } else {
1506 i = 1;
1507 if (insn & (1 << 8)) {
1508 if (insn & (1 << 22)) { /* WLDRD */
1509 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1510 i = 0;
1511 } else { /* WLDRW wRd */
1512 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1514 } else {
1515 if (insn & (1 << 22)) { /* WLDRH */
1516 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1517 } else { /* WLDRB */
1518 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1521 if (i) {
1522 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1523 dead_tmp(tmp);
1525 gen_op_iwmmxt_movq_wRn_M0(wrd);
1527 } else {
1528 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1529 gen_op_iwmmxt_movl_T0_wCx(wrd);
1530 tmp = new_tmp();
1531 tcg_gen_mov_i32(tmp, cpu_T[0]);
1532 gen_st32(tmp, cpu_T[1], IS_USER(s));
1533 } else {
1534 gen_op_iwmmxt_movq_M0_wRn(wrd);
1535 tmp = new_tmp();
1536 if (insn & (1 << 8)) {
1537 if (insn & (1 << 22)) { /* WSTRD */
1538 dead_tmp(tmp);
1539 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1540 } else { /* WSTRW wRd */
1541 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1542 gen_st32(tmp, cpu_T[1], IS_USER(s));
1544 } else {
1545 if (insn & (1 << 22)) { /* WSTRH */
1546 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1547 gen_st16(tmp, cpu_T[1], IS_USER(s));
1548 } else { /* WSTRB */
1549 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1550 gen_st8(tmp, cpu_T[1], IS_USER(s));
1555 return 0;
1558 if ((insn & 0x0f000000) != 0x0e000000)
1559 return 1;
1561 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1562 case 0x000: /* WOR */
1563 wrd = (insn >> 12) & 0xf;
1564 rd0 = (insn >> 0) & 0xf;
1565 rd1 = (insn >> 16) & 0xf;
1566 gen_op_iwmmxt_movq_M0_wRn(rd0);
1567 gen_op_iwmmxt_orq_M0_wRn(rd1);
1568 gen_op_iwmmxt_setpsr_nz();
1569 gen_op_iwmmxt_movq_wRn_M0(wrd);
1570 gen_op_iwmmxt_set_mup();
1571 gen_op_iwmmxt_set_cup();
1572 break;
1573 case 0x011: /* TMCR */
1574 if (insn & 0xf)
1575 return 1;
1576 rd = (insn >> 12) & 0xf;
1577 wrd = (insn >> 16) & 0xf;
1578 switch (wrd) {
1579 case ARM_IWMMXT_wCID:
1580 case ARM_IWMMXT_wCASF:
1581 break;
1582 case ARM_IWMMXT_wCon:
1583 gen_op_iwmmxt_set_cup();
1584 /* Fall through. */
1585 case ARM_IWMMXT_wCSSF:
1586 gen_op_iwmmxt_movl_T0_wCx(wrd);
1587 gen_movl_T1_reg(s, rd);
1588 gen_op_bicl_T0_T1();
1589 gen_op_iwmmxt_movl_wCx_T0(wrd);
1590 break;
1591 case ARM_IWMMXT_wCGR0:
1592 case ARM_IWMMXT_wCGR1:
1593 case ARM_IWMMXT_wCGR2:
1594 case ARM_IWMMXT_wCGR3:
1595 gen_op_iwmmxt_set_cup();
1596 gen_movl_reg_T0(s, rd);
1597 gen_op_iwmmxt_movl_wCx_T0(wrd);
1598 break;
1599 default:
1600 return 1;
1602 break;
1603 case 0x100: /* WXOR */
1604 wrd = (insn >> 12) & 0xf;
1605 rd0 = (insn >> 0) & 0xf;
1606 rd1 = (insn >> 16) & 0xf;
1607 gen_op_iwmmxt_movq_M0_wRn(rd0);
1608 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1609 gen_op_iwmmxt_setpsr_nz();
1610 gen_op_iwmmxt_movq_wRn_M0(wrd);
1611 gen_op_iwmmxt_set_mup();
1612 gen_op_iwmmxt_set_cup();
1613 break;
1614 case 0x111: /* TMRC */
1615 if (insn & 0xf)
1616 return 1;
1617 rd = (insn >> 12) & 0xf;
1618 wrd = (insn >> 16) & 0xf;
1619 gen_op_iwmmxt_movl_T0_wCx(wrd);
1620 gen_movl_reg_T0(s, rd);
1621 break;
1622 case 0x300: /* WANDN */
1623 wrd = (insn >> 12) & 0xf;
1624 rd0 = (insn >> 0) & 0xf;
1625 rd1 = (insn >> 16) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0);
1627 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1628 gen_op_iwmmxt_andq_M0_wRn(rd1);
1629 gen_op_iwmmxt_setpsr_nz();
1630 gen_op_iwmmxt_movq_wRn_M0(wrd);
1631 gen_op_iwmmxt_set_mup();
1632 gen_op_iwmmxt_set_cup();
1633 break;
1634 case 0x200: /* WAND */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 0) & 0xf;
1637 rd1 = (insn >> 16) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 gen_op_iwmmxt_andq_M0_wRn(rd1);
1640 gen_op_iwmmxt_setpsr_nz();
1641 gen_op_iwmmxt_movq_wRn_M0(wrd);
1642 gen_op_iwmmxt_set_mup();
1643 gen_op_iwmmxt_set_cup();
1644 break;
1645 case 0x810: case 0xa10: /* WMADD */
1646 wrd = (insn >> 12) & 0xf;
1647 rd0 = (insn >> 0) & 0xf;
1648 rd1 = (insn >> 16) & 0xf;
1649 gen_op_iwmmxt_movq_M0_wRn(rd0);
1650 if (insn & (1 << 21))
1651 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1654 gen_op_iwmmxt_movq_wRn_M0(wrd);
1655 gen_op_iwmmxt_set_mup();
1656 break;
1657 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1658 wrd = (insn >> 12) & 0xf;
1659 rd0 = (insn >> 16) & 0xf;
1660 rd1 = (insn >> 0) & 0xf;
1661 gen_op_iwmmxt_movq_M0_wRn(rd0);
1662 switch ((insn >> 22) & 3) {
1663 case 0:
1664 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1665 break;
1666 case 1:
1667 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1668 break;
1669 case 2:
1670 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1671 break;
1672 case 3:
1673 return 1;
1675 gen_op_iwmmxt_movq_wRn_M0(wrd);
1676 gen_op_iwmmxt_set_mup();
1677 gen_op_iwmmxt_set_cup();
1678 break;
1679 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1680 wrd = (insn >> 12) & 0xf;
1681 rd0 = (insn >> 16) & 0xf;
1682 rd1 = (insn >> 0) & 0xf;
1683 gen_op_iwmmxt_movq_M0_wRn(rd0);
1684 switch ((insn >> 22) & 3) {
1685 case 0:
1686 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1687 break;
1688 case 1:
1689 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1690 break;
1691 case 2:
1692 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1693 break;
1694 case 3:
1695 return 1;
1697 gen_op_iwmmxt_movq_wRn_M0(wrd);
1698 gen_op_iwmmxt_set_mup();
1699 gen_op_iwmmxt_set_cup();
1700 break;
1701 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1702 wrd = (insn >> 12) & 0xf;
1703 rd0 = (insn >> 16) & 0xf;
1704 rd1 = (insn >> 0) & 0xf;
1705 gen_op_iwmmxt_movq_M0_wRn(rd0);
1706 if (insn & (1 << 22))
1707 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1708 else
1709 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1710 if (!(insn & (1 << 20)))
1711 gen_op_iwmmxt_addl_M0_wRn(wrd);
1712 gen_op_iwmmxt_movq_wRn_M0(wrd);
1713 gen_op_iwmmxt_set_mup();
1714 break;
1715 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1716 wrd = (insn >> 12) & 0xf;
1717 rd0 = (insn >> 16) & 0xf;
1718 rd1 = (insn >> 0) & 0xf;
1719 gen_op_iwmmxt_movq_M0_wRn(rd0);
1720 if (insn & (1 << 21)) {
1721 if (insn & (1 << 20))
1722 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1723 else
1724 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1725 } else {
1726 if (insn & (1 << 20))
1727 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1728 else
1729 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1731 gen_op_iwmmxt_movq_wRn_M0(wrd);
1732 gen_op_iwmmxt_set_mup();
1733 break;
1734 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1735 wrd = (insn >> 12) & 0xf;
1736 rd0 = (insn >> 16) & 0xf;
1737 rd1 = (insn >> 0) & 0xf;
1738 gen_op_iwmmxt_movq_M0_wRn(rd0);
1739 if (insn & (1 << 21))
1740 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1741 else
1742 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1743 if (!(insn & (1 << 20))) {
1744 iwmmxt_load_reg(cpu_V1, wrd);
1745 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1747 gen_op_iwmmxt_movq_wRn_M0(wrd);
1748 gen_op_iwmmxt_set_mup();
1749 break;
1750 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1751 wrd = (insn >> 12) & 0xf;
1752 rd0 = (insn >> 16) & 0xf;
1753 rd1 = (insn >> 0) & 0xf;
1754 gen_op_iwmmxt_movq_M0_wRn(rd0);
1755 switch ((insn >> 22) & 3) {
1756 case 0:
1757 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1758 break;
1759 case 1:
1760 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1761 break;
1762 case 2:
1763 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1764 break;
1765 case 3:
1766 return 1;
1768 gen_op_iwmmxt_movq_wRn_M0(wrd);
1769 gen_op_iwmmxt_set_mup();
1770 gen_op_iwmmxt_set_cup();
1771 break;
1772 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1773 wrd = (insn >> 12) & 0xf;
1774 rd0 = (insn >> 16) & 0xf;
1775 rd1 = (insn >> 0) & 0xf;
1776 gen_op_iwmmxt_movq_M0_wRn(rd0);
1777 if (insn & (1 << 22)) {
1778 if (insn & (1 << 20))
1779 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1780 else
1781 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1782 } else {
1783 if (insn & (1 << 20))
1784 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1785 else
1786 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1788 gen_op_iwmmxt_movq_wRn_M0(wrd);
1789 gen_op_iwmmxt_set_mup();
1790 gen_op_iwmmxt_set_cup();
1791 break;
1792 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1793 wrd = (insn >> 12) & 0xf;
1794 rd0 = (insn >> 16) & 0xf;
1795 rd1 = (insn >> 0) & 0xf;
1796 gen_op_iwmmxt_movq_M0_wRn(rd0);
1797 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1798 gen_op_movl_T1_im(7);
1799 gen_op_andl_T0_T1();
1800 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1801 gen_op_iwmmxt_movq_wRn_M0(wrd);
1802 gen_op_iwmmxt_set_mup();
1803 break;
1804 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1805 rd = (insn >> 12) & 0xf;
1806 wrd = (insn >> 16) & 0xf;
1807 gen_movl_T0_reg(s, rd);
1808 gen_op_iwmmxt_movq_M0_wRn(wrd);
1809 switch ((insn >> 6) & 3) {
1810 case 0:
1811 gen_op_movl_T1_im(0xff);
1812 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1813 break;
1814 case 1:
1815 gen_op_movl_T1_im(0xffff);
1816 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1817 break;
1818 case 2:
1819 gen_op_movl_T1_im(0xffffffff);
1820 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1821 break;
1822 case 3:
1823 return 1;
1825 gen_op_iwmmxt_movq_wRn_M0(wrd);
1826 gen_op_iwmmxt_set_mup();
1827 break;
1828 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1829 rd = (insn >> 12) & 0xf;
1830 wrd = (insn >> 16) & 0xf;
1831 if (rd == 15)
1832 return 1;
1833 gen_op_iwmmxt_movq_M0_wRn(wrd);
1834 switch ((insn >> 22) & 3) {
1835 case 0:
1836 if (insn & 8)
1837 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1838 else {
1839 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
1841 break;
1842 case 1:
1843 if (insn & 8)
1844 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1845 else {
1846 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
1848 break;
1849 case 2:
1850 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
1851 break;
1852 case 3:
1853 return 1;
1855 gen_movl_reg_T0(s, rd);
1856 break;
1857 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1858 if ((insn & 0x000ff008) != 0x0003f000)
1859 return 1;
1860 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1861 switch ((insn >> 22) & 3) {
1862 case 0:
1863 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1864 break;
1865 case 1:
1866 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1867 break;
1868 case 2:
1869 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1870 break;
1871 case 3:
1872 return 1;
1874 gen_op_shll_T1_im(28);
1875 gen_set_nzcv(cpu_T[1]);
1876 break;
1877 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1878 rd = (insn >> 12) & 0xf;
1879 wrd = (insn >> 16) & 0xf;
1880 gen_movl_T0_reg(s, rd);
1881 switch ((insn >> 6) & 3) {
1882 case 0:
1883 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
1884 break;
1885 case 1:
1886 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
1887 break;
1888 case 2:
1889 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
1890 break;
1891 case 3:
1892 return 1;
1894 gen_op_iwmmxt_movq_wRn_M0(wrd);
1895 gen_op_iwmmxt_set_mup();
1896 break;
1897 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1898 if ((insn & 0x000ff00f) != 0x0003f000)
1899 return 1;
1900 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1901 switch ((insn >> 22) & 3) {
1902 case 0:
1903 for (i = 0; i < 7; i ++) {
1904 gen_op_shll_T1_im(4);
1905 gen_op_andl_T0_T1();
1907 break;
1908 case 1:
1909 for (i = 0; i < 3; i ++) {
1910 gen_op_shll_T1_im(8);
1911 gen_op_andl_T0_T1();
1913 break;
1914 case 2:
1915 gen_op_shll_T1_im(16);
1916 gen_op_andl_T0_T1();
1917 break;
1918 case 3:
1919 return 1;
1921 gen_set_nzcv(cpu_T[0]);
1922 break;
1923 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1924 wrd = (insn >> 12) & 0xf;
1925 rd0 = (insn >> 16) & 0xf;
1926 gen_op_iwmmxt_movq_M0_wRn(rd0);
1927 switch ((insn >> 22) & 3) {
1928 case 0:
1929 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1930 break;
1931 case 1:
1932 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1933 break;
1934 case 2:
1935 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1936 break;
1937 case 3:
1938 return 1;
1940 gen_op_iwmmxt_movq_wRn_M0(wrd);
1941 gen_op_iwmmxt_set_mup();
1942 break;
1943 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1944 if ((insn & 0x000ff00f) != 0x0003f000)
1945 return 1;
1946 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1947 switch ((insn >> 22) & 3) {
1948 case 0:
1949 for (i = 0; i < 7; i ++) {
1950 gen_op_shll_T1_im(4);
1951 gen_op_orl_T0_T1();
1953 break;
1954 case 1:
1955 for (i = 0; i < 3; i ++) {
1956 gen_op_shll_T1_im(8);
1957 gen_op_orl_T0_T1();
1959 break;
1960 case 2:
1961 gen_op_shll_T1_im(16);
1962 gen_op_orl_T0_T1();
1963 break;
1964 case 3:
1965 return 1;
1967 gen_set_nzcv(cpu_T[0]);
1968 break;
1969 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1970 rd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 if ((insn & 0xf) != 0)
1973 return 1;
1974 gen_op_iwmmxt_movq_M0_wRn(rd0);
1975 switch ((insn >> 22) & 3) {
1976 case 0:
1977 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
1978 break;
1979 case 1:
1980 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
1981 break;
1982 case 2:
1983 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
1984 break;
1985 case 3:
1986 return 1;
1988 gen_movl_reg_T0(s, rd);
1989 break;
1990 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1991 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1992 wrd = (insn >> 12) & 0xf;
1993 rd0 = (insn >> 16) & 0xf;
1994 rd1 = (insn >> 0) & 0xf;
1995 gen_op_iwmmxt_movq_M0_wRn(rd0);
1996 switch ((insn >> 22) & 3) {
1997 case 0:
1998 if (insn & (1 << 21))
1999 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2000 else
2001 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2002 break;
2003 case 1:
2004 if (insn & (1 << 21))
2005 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2006 else
2007 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2008 break;
2009 case 2:
2010 if (insn & (1 << 21))
2011 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2012 else
2013 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2014 break;
2015 case 3:
2016 return 1;
2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2019 gen_op_iwmmxt_set_mup();
2020 gen_op_iwmmxt_set_cup();
2021 break;
2022 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2023 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2024 wrd = (insn >> 12) & 0xf;
2025 rd0 = (insn >> 16) & 0xf;
2026 gen_op_iwmmxt_movq_M0_wRn(rd0);
2027 switch ((insn >> 22) & 3) {
2028 case 0:
2029 if (insn & (1 << 21))
2030 gen_op_iwmmxt_unpacklsb_M0();
2031 else
2032 gen_op_iwmmxt_unpacklub_M0();
2033 break;
2034 case 1:
2035 if (insn & (1 << 21))
2036 gen_op_iwmmxt_unpacklsw_M0();
2037 else
2038 gen_op_iwmmxt_unpackluw_M0();
2039 break;
2040 case 2:
2041 if (insn & (1 << 21))
2042 gen_op_iwmmxt_unpacklsl_M0();
2043 else
2044 gen_op_iwmmxt_unpacklul_M0();
2045 break;
2046 case 3:
2047 return 1;
2049 gen_op_iwmmxt_movq_wRn_M0(wrd);
2050 gen_op_iwmmxt_set_mup();
2051 gen_op_iwmmxt_set_cup();
2052 break;
2053 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2054 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2055 wrd = (insn >> 12) & 0xf;
2056 rd0 = (insn >> 16) & 0xf;
2057 gen_op_iwmmxt_movq_M0_wRn(rd0);
2058 switch ((insn >> 22) & 3) {
2059 case 0:
2060 if (insn & (1 << 21))
2061 gen_op_iwmmxt_unpackhsb_M0();
2062 else
2063 gen_op_iwmmxt_unpackhub_M0();
2064 break;
2065 case 1:
2066 if (insn & (1 << 21))
2067 gen_op_iwmmxt_unpackhsw_M0();
2068 else
2069 gen_op_iwmmxt_unpackhuw_M0();
2070 break;
2071 case 2:
2072 if (insn & (1 << 21))
2073 gen_op_iwmmxt_unpackhsl_M0();
2074 else
2075 gen_op_iwmmxt_unpackhul_M0();
2076 break;
2077 case 3:
2078 return 1;
2080 gen_op_iwmmxt_movq_wRn_M0(wrd);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2083 break;
2084 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2085 case 0x214: case 0x614: case 0xa14: case 0xe14:
2086 wrd = (insn >> 12) & 0xf;
2087 rd0 = (insn >> 16) & 0xf;
2088 gen_op_iwmmxt_movq_M0_wRn(rd0);
2089 if (gen_iwmmxt_shift(insn, 0xff))
2090 return 1;
2091 switch ((insn >> 22) & 3) {
2092 case 0:
2093 return 1;
2094 case 1:
2095 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2096 break;
2097 case 2:
2098 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2099 break;
2100 case 3:
2101 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2102 break;
2104 gen_op_iwmmxt_movq_wRn_M0(wrd);
2105 gen_op_iwmmxt_set_mup();
2106 gen_op_iwmmxt_set_cup();
2107 break;
2108 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2109 case 0x014: case 0x414: case 0x814: case 0xc14:
2110 wrd = (insn >> 12) & 0xf;
2111 rd0 = (insn >> 16) & 0xf;
2112 gen_op_iwmmxt_movq_M0_wRn(rd0);
2113 if (gen_iwmmxt_shift(insn, 0xff))
2114 return 1;
2115 switch ((insn >> 22) & 3) {
2116 case 0:
2117 return 1;
2118 case 1:
2119 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2120 break;
2121 case 2:
2122 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2123 break;
2124 case 3:
2125 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2126 break;
2128 gen_op_iwmmxt_movq_wRn_M0(wrd);
2129 gen_op_iwmmxt_set_mup();
2130 gen_op_iwmmxt_set_cup();
2131 break;
2132 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2133 case 0x114: case 0x514: case 0x914: case 0xd14:
2134 wrd = (insn >> 12) & 0xf;
2135 rd0 = (insn >> 16) & 0xf;
2136 gen_op_iwmmxt_movq_M0_wRn(rd0);
2137 if (gen_iwmmxt_shift(insn, 0xff))
2138 return 1;
2139 switch ((insn >> 22) & 3) {
2140 case 0:
2141 return 1;
2142 case 1:
2143 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2144 break;
2145 case 2:
2146 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2147 break;
2148 case 3:
2149 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2150 break;
2152 gen_op_iwmmxt_movq_wRn_M0(wrd);
2153 gen_op_iwmmxt_set_mup();
2154 gen_op_iwmmxt_set_cup();
2155 break;
2156 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2157 case 0x314: case 0x714: case 0xb14: case 0xf14:
2158 wrd = (insn >> 12) & 0xf;
2159 rd0 = (insn >> 16) & 0xf;
2160 gen_op_iwmmxt_movq_M0_wRn(rd0);
2161 switch ((insn >> 22) & 3) {
2162 case 0:
2163 return 1;
2164 case 1:
2165 if (gen_iwmmxt_shift(insn, 0xf))
2166 return 1;
2167 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2168 break;
2169 case 2:
2170 if (gen_iwmmxt_shift(insn, 0x1f))
2171 return 1;
2172 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2173 break;
2174 case 3:
2175 if (gen_iwmmxt_shift(insn, 0x3f))
2176 return 1;
2177 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2178 break;
2180 gen_op_iwmmxt_movq_wRn_M0(wrd);
2181 gen_op_iwmmxt_set_mup();
2182 gen_op_iwmmxt_set_cup();
2183 break;
2184 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2185 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2186 wrd = (insn >> 12) & 0xf;
2187 rd0 = (insn >> 16) & 0xf;
2188 rd1 = (insn >> 0) & 0xf;
2189 gen_op_iwmmxt_movq_M0_wRn(rd0);
2190 switch ((insn >> 22) & 3) {
2191 case 0:
2192 if (insn & (1 << 21))
2193 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2194 else
2195 gen_op_iwmmxt_minub_M0_wRn(rd1);
2196 break;
2197 case 1:
2198 if (insn & (1 << 21))
2199 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2200 else
2201 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2202 break;
2203 case 2:
2204 if (insn & (1 << 21))
2205 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2206 else
2207 gen_op_iwmmxt_minul_M0_wRn(rd1);
2208 break;
2209 case 3:
2210 return 1;
2212 gen_op_iwmmxt_movq_wRn_M0(wrd);
2213 gen_op_iwmmxt_set_mup();
2214 break;
2215 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2216 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2217 wrd = (insn >> 12) & 0xf;
2218 rd0 = (insn >> 16) & 0xf;
2219 rd1 = (insn >> 0) & 0xf;
2220 gen_op_iwmmxt_movq_M0_wRn(rd0);
2221 switch ((insn >> 22) & 3) {
2222 case 0:
2223 if (insn & (1 << 21))
2224 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2225 else
2226 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2227 break;
2228 case 1:
2229 if (insn & (1 << 21))
2230 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2231 else
2232 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2233 break;
2234 case 2:
2235 if (insn & (1 << 21))
2236 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2237 else
2238 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2239 break;
2240 case 3:
2241 return 1;
2243 gen_op_iwmmxt_movq_wRn_M0(wrd);
2244 gen_op_iwmmxt_set_mup();
2245 break;
2246 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2247 case 0x402: case 0x502: case 0x602: case 0x702:
2248 wrd = (insn >> 12) & 0xf;
2249 rd0 = (insn >> 16) & 0xf;
2250 rd1 = (insn >> 0) & 0xf;
2251 gen_op_iwmmxt_movq_M0_wRn(rd0);
2252 gen_op_movl_T0_im((insn >> 20) & 3);
2253 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 break;
2257 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2258 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2259 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2260 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2261 wrd = (insn >> 12) & 0xf;
2262 rd0 = (insn >> 16) & 0xf;
2263 rd1 = (insn >> 0) & 0xf;
2264 gen_op_iwmmxt_movq_M0_wRn(rd0);
2265 switch ((insn >> 20) & 0xf) {
2266 case 0x0:
2267 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2268 break;
2269 case 0x1:
2270 gen_op_iwmmxt_subub_M0_wRn(rd1);
2271 break;
2272 case 0x3:
2273 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2274 break;
2275 case 0x4:
2276 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2277 break;
2278 case 0x5:
2279 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2280 break;
2281 case 0x7:
2282 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2283 break;
2284 case 0x8:
2285 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2286 break;
2287 case 0x9:
2288 gen_op_iwmmxt_subul_M0_wRn(rd1);
2289 break;
2290 case 0xb:
2291 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2292 break;
2293 default:
2294 return 1;
2296 gen_op_iwmmxt_movq_wRn_M0(wrd);
2297 gen_op_iwmmxt_set_mup();
2298 gen_op_iwmmxt_set_cup();
2299 break;
2300 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2301 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2302 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2303 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2304 wrd = (insn >> 12) & 0xf;
2305 rd0 = (insn >> 16) & 0xf;
2306 gen_op_iwmmxt_movq_M0_wRn(rd0);
2307 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
2308 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2309 gen_op_iwmmxt_movq_wRn_M0(wrd);
2310 gen_op_iwmmxt_set_mup();
2311 gen_op_iwmmxt_set_cup();
2312 break;
2313 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2314 case 0x418: case 0x518: case 0x618: case 0x718:
2315 case 0x818: case 0x918: case 0xa18: case 0xb18:
2316 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2317 wrd = (insn >> 12) & 0xf;
2318 rd0 = (insn >> 16) & 0xf;
2319 rd1 = (insn >> 0) & 0xf;
2320 gen_op_iwmmxt_movq_M0_wRn(rd0);
2321 switch ((insn >> 20) & 0xf) {
2322 case 0x0:
2323 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2324 break;
2325 case 0x1:
2326 gen_op_iwmmxt_addub_M0_wRn(rd1);
2327 break;
2328 case 0x3:
2329 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2330 break;
2331 case 0x4:
2332 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2333 break;
2334 case 0x5:
2335 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2336 break;
2337 case 0x7:
2338 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2339 break;
2340 case 0x8:
2341 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2342 break;
2343 case 0x9:
2344 gen_op_iwmmxt_addul_M0_wRn(rd1);
2345 break;
2346 case 0xb:
2347 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2348 break;
2349 default:
2350 return 1;
2352 gen_op_iwmmxt_movq_wRn_M0(wrd);
2353 gen_op_iwmmxt_set_mup();
2354 gen_op_iwmmxt_set_cup();
2355 break;
2356 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2357 case 0x408: case 0x508: case 0x608: case 0x708:
2358 case 0x808: case 0x908: case 0xa08: case 0xb08:
2359 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2360 wrd = (insn >> 12) & 0xf;
2361 rd0 = (insn >> 16) & 0xf;
2362 rd1 = (insn >> 0) & 0xf;
2363 gen_op_iwmmxt_movq_M0_wRn(rd0);
2364 if (!(insn & (1 << 20)))
2365 return 1;
2366 switch ((insn >> 22) & 3) {
2367 case 0:
2368 return 1;
2369 case 1:
2370 if (insn & (1 << 21))
2371 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2372 else
2373 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2374 break;
2375 case 2:
2376 if (insn & (1 << 21))
2377 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2378 else
2379 gen_op_iwmmxt_packul_M0_wRn(rd1);
2380 break;
2381 case 3:
2382 if (insn & (1 << 21))
2383 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2384 else
2385 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2386 break;
2388 gen_op_iwmmxt_movq_wRn_M0(wrd);
2389 gen_op_iwmmxt_set_mup();
2390 gen_op_iwmmxt_set_cup();
2391 break;
2392 case 0x201: case 0x203: case 0x205: case 0x207:
2393 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2394 case 0x211: case 0x213: case 0x215: case 0x217:
2395 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2396 wrd = (insn >> 5) & 0xf;
2397 rd0 = (insn >> 12) & 0xf;
2398 rd1 = (insn >> 0) & 0xf;
2399 if (rd0 == 0xf || rd1 == 0xf)
2400 return 1;
2401 gen_op_iwmmxt_movq_M0_wRn(wrd);
2402 switch ((insn >> 16) & 0xf) {
2403 case 0x0: /* TMIA */
2404 gen_movl_T0_reg(s, rd0);
2405 gen_movl_T1_reg(s, rd1);
2406 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2407 break;
2408 case 0x8: /* TMIAPH */
2409 gen_movl_T0_reg(s, rd0);
2410 gen_movl_T1_reg(s, rd1);
2411 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2412 break;
2413 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2414 gen_movl_T1_reg(s, rd0);
2415 if (insn & (1 << 16))
2416 gen_op_shrl_T1_im(16);
2417 gen_op_movl_T0_T1();
2418 gen_movl_T1_reg(s, rd1);
2419 if (insn & (1 << 17))
2420 gen_op_shrl_T1_im(16);
2421 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2422 break;
2423 default:
2424 return 1;
2426 gen_op_iwmmxt_movq_wRn_M0(wrd);
2427 gen_op_iwmmxt_set_mup();
2428 break;
2429 default:
2430 return 1;
2433 return 0;
2436 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2437 (ie. an undefined instruction). */
2438 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2440 int acc, rd0, rd1, rdhi, rdlo;
2442 if ((insn & 0x0ff00f10) == 0x0e200010) {
2443 /* Multiply with Internal Accumulate Format */
2444 rd0 = (insn >> 12) & 0xf;
2445 rd1 = insn & 0xf;
2446 acc = (insn >> 5) & 7;
2448 if (acc != 0)
2449 return 1;
2451 switch ((insn >> 16) & 0xf) {
2452 case 0x0: /* MIA */
2453 gen_movl_T0_reg(s, rd0);
2454 gen_movl_T1_reg(s, rd1);
2455 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2456 break;
2457 case 0x8: /* MIAPH */
2458 gen_movl_T0_reg(s, rd0);
2459 gen_movl_T1_reg(s, rd1);
2460 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2461 break;
2462 case 0xc: /* MIABB */
2463 case 0xd: /* MIABT */
2464 case 0xe: /* MIATB */
2465 case 0xf: /* MIATT */
2466 gen_movl_T1_reg(s, rd0);
2467 if (insn & (1 << 16))
2468 gen_op_shrl_T1_im(16);
2469 gen_op_movl_T0_T1();
2470 gen_movl_T1_reg(s, rd1);
2471 if (insn & (1 << 17))
2472 gen_op_shrl_T1_im(16);
2473 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2474 break;
2475 default:
2476 return 1;
2479 gen_op_iwmmxt_movq_wRn_M0(acc);
2480 return 0;
2483 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2484 /* Internal Accumulator Access Format */
2485 rdhi = (insn >> 16) & 0xf;
2486 rdlo = (insn >> 12) & 0xf;
2487 acc = insn & 7;
2489 if (acc != 0)
2490 return 1;
2492 if (insn & ARM_CP_RW_BIT) { /* MRA */
2493 gen_iwmmxt_movl_T0_T1_wRn(acc);
2494 gen_movl_reg_T0(s, rdlo);
2495 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2496 gen_op_andl_T0_T1();
2497 gen_movl_reg_T0(s, rdhi);
2498 } else { /* MAR */
2499 gen_movl_T0_reg(s, rdlo);
2500 gen_movl_T1_reg(s, rdhi);
2501 gen_iwmmxt_movl_wRn_T0_T1(acc);
2503 return 0;
2506 return 1;
2509 /* Disassemble system coprocessor instruction. Return nonzero if
2510 instruction is not defined. */
2511 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2513 TCGv tmp;
2514 uint32_t rd = (insn >> 12) & 0xf;
2515 uint32_t cp = (insn >> 8) & 0xf;
2516 if (IS_USER(s)) {
2517 return 1;
2520 if (insn & ARM_CP_RW_BIT) {
2521 if (!env->cp[cp].cp_read)
2522 return 1;
2523 gen_set_pc_im(s->pc);
2524 tmp = new_tmp();
2525 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2526 store_reg(s, rd, tmp);
2527 } else {
2528 if (!env->cp[cp].cp_write)
2529 return 1;
2530 gen_set_pc_im(s->pc);
2531 tmp = load_reg(s, rd);
2532 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
2533 dead_tmp(tmp);
2535 return 0;
2538 static int cp15_user_ok(uint32_t insn)
2540 int cpn = (insn >> 16) & 0xf;
2541 int cpm = insn & 0xf;
2542 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2544 if (cpn == 13 && cpm == 0) {
2545 /* TLS register. */
2546 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2547 return 1;
2549 if (cpn == 7) {
2550 /* ISB, DSB, DMB. */
2551 if ((cpm == 5 && op == 4)
2552 || (cpm == 10 && (op == 4 || op == 5)))
2553 return 1;
2555 return 0;
2558 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2559 instruction is not defined. */
2560 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2562 uint32_t rd;
2563 TCGv tmp;
2565 /* M profile cores use memory mapped registers instead of cp15. */
2566 if (arm_feature(env, ARM_FEATURE_M))
2567 return 1;
2569 if ((insn & (1 << 25)) == 0) {
2570 if (insn & (1 << 20)) {
2571 /* mrrc */
2572 return 1;
2574 /* mcrr. Used for block cache operations, so implement as no-op. */
2575 return 0;
2577 if ((insn & (1 << 4)) == 0) {
2578 /* cdp */
2579 return 1;
2581 if (IS_USER(s) && !cp15_user_ok(insn)) {
2582 return 1;
2584 if ((insn & 0x0fff0fff) == 0x0e070f90
2585 || (insn & 0x0fff0fff) == 0x0e070f58) {
2586 /* Wait for interrupt. */
2587 gen_set_pc_im(s->pc);
2588 s->is_jmp = DISAS_WFI;
2589 return 0;
2591 rd = (insn >> 12) & 0xf;
2592 if (insn & ARM_CP_RW_BIT) {
2593 tmp = new_tmp();
2594 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
2595 /* If the destination register is r15 then sets condition codes. */
2596 if (rd != 15)
2597 store_reg(s, rd, tmp);
2598 else
2599 dead_tmp(tmp);
2600 } else {
2601 tmp = load_reg(s, rd);
2602 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2603 dead_tmp(tmp);
2604 /* Normally we would always end the TB here, but Linux
2605 * arch/arm/mach-pxa/sleep.S expects two instructions following
2606 * an MMU enable to execute from cache. Imitate this behaviour. */
2607 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2608 (insn & 0x0fff0fff) != 0x0e010f10)
2609 gen_lookup_tb(s);
2611 return 0;
2614 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2615 #define VFP_SREG(insn, bigbit, smallbit) \
2616 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2617 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2618 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2619 reg = (((insn) >> (bigbit)) & 0x0f) \
2620 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2621 } else { \
2622 if (insn & (1 << (smallbit))) \
2623 return 1; \
2624 reg = ((insn) >> (bigbit)) & 0x0f; \
2625 }} while (0)
2627 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2628 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2629 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2630 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2631 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2632 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2634 /* Move between integer and VFP cores. */
2635 static TCGv gen_vfp_mrs(void)
2637 TCGv tmp = new_tmp();
2638 tcg_gen_mov_i32(tmp, cpu_F0s);
2639 return tmp;
2642 static void gen_vfp_msr(TCGv tmp)
2644 tcg_gen_mov_i32(cpu_F0s, tmp);
2645 dead_tmp(tmp);
2648 static inline int
2649 vfp_enabled(CPUState * env)
2651 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2654 static void gen_neon_dup_u8(TCGv var, int shift)
2656 TCGv tmp = new_tmp();
2657 if (shift)
2658 tcg_gen_shri_i32(var, var, shift);
2659 tcg_gen_ext8u_i32(var, var);
2660 tcg_gen_shli_i32(tmp, var, 8);
2661 tcg_gen_or_i32(var, var, tmp);
2662 tcg_gen_shli_i32(tmp, var, 16);
2663 tcg_gen_or_i32(var, var, tmp);
2664 dead_tmp(tmp);
2667 static void gen_neon_dup_low16(TCGv var)
2669 TCGv tmp = new_tmp();
2670 tcg_gen_ext16u_i32(var, var);
2671 tcg_gen_shli_i32(tmp, var, 16);
2672 tcg_gen_or_i32(var, var, tmp);
2673 dead_tmp(tmp);
2676 static void gen_neon_dup_high16(TCGv var)
2678 TCGv tmp = new_tmp();
2679 tcg_gen_andi_i32(var, var, 0xffff0000);
2680 tcg_gen_shri_i32(tmp, var, 16);
2681 tcg_gen_or_i32(var, var, tmp);
2682 dead_tmp(tmp);
2685 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2686 (ie. an undefined instruction). */
2687 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2689 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2690 int dp, veclen;
2691 TCGv tmp;
2692 TCGv tmp2;
2694 if (!arm_feature(env, ARM_FEATURE_VFP))
2695 return 1;
2697 if (!vfp_enabled(env)) {
2698 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2699 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2700 return 1;
2701 rn = (insn >> 16) & 0xf;
2702 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2703 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2704 return 1;
2706 dp = ((insn & 0xf00) == 0xb00);
2707 switch ((insn >> 24) & 0xf) {
2708 case 0xe:
2709 if (insn & (1 << 4)) {
2710 /* single register transfer */
2711 rd = (insn >> 12) & 0xf;
2712 if (dp) {
2713 int size;
2714 int pass;
2716 VFP_DREG_N(rn, insn);
2717 if (insn & 0xf)
2718 return 1;
2719 if (insn & 0x00c00060
2720 && !arm_feature(env, ARM_FEATURE_NEON))
2721 return 1;
2723 pass = (insn >> 21) & 1;
2724 if (insn & (1 << 22)) {
2725 size = 0;
2726 offset = ((insn >> 5) & 3) * 8;
2727 } else if (insn & (1 << 5)) {
2728 size = 1;
2729 offset = (insn & (1 << 6)) ? 16 : 0;
2730 } else {
2731 size = 2;
2732 offset = 0;
2734 if (insn & ARM_CP_RW_BIT) {
2735 /* vfp->arm */
2736 tmp = neon_load_reg(rn, pass);
2737 switch (size) {
2738 case 0:
2739 if (offset)
2740 tcg_gen_shri_i32(tmp, tmp, offset);
2741 if (insn & (1 << 23))
2742 gen_uxtb(tmp);
2743 else
2744 gen_sxtb(tmp);
2745 break;
2746 case 1:
2747 if (insn & (1 << 23)) {
2748 if (offset) {
2749 tcg_gen_shri_i32(tmp, tmp, 16);
2750 } else {
2751 gen_uxth(tmp);
2753 } else {
2754 if (offset) {
2755 tcg_gen_sari_i32(tmp, tmp, 16);
2756 } else {
2757 gen_sxth(tmp);
2760 break;
2761 case 2:
2762 break;
2764 store_reg(s, rd, tmp);
2765 } else {
2766 /* arm->vfp */
2767 tmp = load_reg(s, rd);
2768 if (insn & (1 << 23)) {
2769 /* VDUP */
2770 if (size == 0) {
2771 gen_neon_dup_u8(tmp, 0);
2772 } else if (size == 1) {
2773 gen_neon_dup_low16(tmp);
2775 for (n = 0; n <= pass * 2; n++) {
2776 tmp2 = new_tmp();
2777 tcg_gen_mov_i32(tmp2, tmp);
2778 neon_store_reg(rn, n, tmp2);
2780 neon_store_reg(rn, n, tmp);
2781 } else {
2782 /* VMOV */
2783 switch (size) {
2784 case 0:
2785 tmp2 = neon_load_reg(rn, pass);
2786 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2787 dead_tmp(tmp2);
2788 break;
2789 case 1:
2790 tmp2 = neon_load_reg(rn, pass);
2791 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2792 dead_tmp(tmp2);
2793 break;
2794 case 2:
2795 break;
2797 neon_store_reg(rn, pass, tmp);
2800 } else { /* !dp */
2801 if ((insn & 0x6f) != 0x00)
2802 return 1;
2803 rn = VFP_SREG_N(insn);
2804 if (insn & ARM_CP_RW_BIT) {
2805 /* vfp->arm */
2806 if (insn & (1 << 21)) {
2807 /* system register */
2808 rn >>= 1;
2810 switch (rn) {
2811 case ARM_VFP_FPSID:
2812 /* VFP2 allows access to FSID from userspace.
2813 VFP3 restricts all id registers to privileged
2814 accesses. */
2815 if (IS_USER(s)
2816 && arm_feature(env, ARM_FEATURE_VFP3))
2817 return 1;
2818 tmp = load_cpu_field(vfp.xregs[rn]);
2819 break;
2820 case ARM_VFP_FPEXC:
2821 if (IS_USER(s))
2822 return 1;
2823 tmp = load_cpu_field(vfp.xregs[rn]);
2824 break;
2825 case ARM_VFP_FPINST:
2826 case ARM_VFP_FPINST2:
2827 /* Not present in VFP3. */
2828 if (IS_USER(s)
2829 || arm_feature(env, ARM_FEATURE_VFP3))
2830 return 1;
2831 tmp = load_cpu_field(vfp.xregs[rn]);
2832 break;
2833 case ARM_VFP_FPSCR:
2834 if (rd == 15) {
2835 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2836 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2837 } else {
2838 tmp = new_tmp();
2839 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2841 break;
2842 case ARM_VFP_MVFR0:
2843 case ARM_VFP_MVFR1:
2844 if (IS_USER(s)
2845 || !arm_feature(env, ARM_FEATURE_VFP3))
2846 return 1;
2847 tmp = load_cpu_field(vfp.xregs[rn]);
2848 break;
2849 default:
2850 return 1;
2852 } else {
2853 gen_mov_F0_vreg(0, rn);
2854 tmp = gen_vfp_mrs();
2856 if (rd == 15) {
2857 /* Set the 4 flag bits in the CPSR. */
2858 gen_set_nzcv(tmp);
2859 dead_tmp(tmp);
2860 } else {
2861 store_reg(s, rd, tmp);
2863 } else {
2864 /* arm->vfp */
2865 tmp = load_reg(s, rd);
2866 if (insn & (1 << 21)) {
2867 rn >>= 1;
2868 /* system register */
2869 switch (rn) {
2870 case ARM_VFP_FPSID:
2871 case ARM_VFP_MVFR0:
2872 case ARM_VFP_MVFR1:
2873 /* Writes are ignored. */
2874 break;
2875 case ARM_VFP_FPSCR:
2876 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2877 dead_tmp(tmp);
2878 gen_lookup_tb(s);
2879 break;
2880 case ARM_VFP_FPEXC:
2881 if (IS_USER(s))
2882 return 1;
2883 store_cpu_field(tmp, vfp.xregs[rn]);
2884 gen_lookup_tb(s);
2885 break;
2886 case ARM_VFP_FPINST:
2887 case ARM_VFP_FPINST2:
2888 store_cpu_field(tmp, vfp.xregs[rn]);
2889 break;
2890 default:
2891 return 1;
2893 } else {
2894 gen_vfp_msr(tmp);
2895 gen_mov_vreg_F0(0, rn);
2899 } else {
2900 /* data processing */
2901 /* The opcode is in bits 23, 21, 20 and 6. */
2902 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2903 if (dp) {
2904 if (op == 15) {
2905 /* rn is opcode */
2906 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2907 } else {
2908 /* rn is register number */
2909 VFP_DREG_N(rn, insn);
2912 if (op == 15 && (rn == 15 || rn > 17)) {
2913 /* Integer or single precision destination. */
2914 rd = VFP_SREG_D(insn);
2915 } else {
2916 VFP_DREG_D(rd, insn);
2919 if (op == 15 && (rn == 16 || rn == 17)) {
2920 /* Integer source. */
2921 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2922 } else {
2923 VFP_DREG_M(rm, insn);
2925 } else {
2926 rn = VFP_SREG_N(insn);
2927 if (op == 15 && rn == 15) {
2928 /* Double precision destination. */
2929 VFP_DREG_D(rd, insn);
2930 } else {
2931 rd = VFP_SREG_D(insn);
2933 rm = VFP_SREG_M(insn);
2936 veclen = env->vfp.vec_len;
2937 if (op == 15 && rn > 3)
2938 veclen = 0;
2940 /* Shut up compiler warnings. */
2941 delta_m = 0;
2942 delta_d = 0;
2943 bank_mask = 0;
2945 if (veclen > 0) {
2946 if (dp)
2947 bank_mask = 0xc;
2948 else
2949 bank_mask = 0x18;
2951 /* Figure out what type of vector operation this is. */
2952 if ((rd & bank_mask) == 0) {
2953 /* scalar */
2954 veclen = 0;
2955 } else {
2956 if (dp)
2957 delta_d = (env->vfp.vec_stride >> 1) + 1;
2958 else
2959 delta_d = env->vfp.vec_stride + 1;
2961 if ((rm & bank_mask) == 0) {
2962 /* mixed scalar/vector */
2963 delta_m = 0;
2964 } else {
2965 /* vector */
2966 delta_m = delta_d;
2971 /* Load the initial operands. */
2972 if (op == 15) {
2973 switch (rn) {
2974 case 16:
2975 case 17:
2976 /* Integer source */
2977 gen_mov_F0_vreg(0, rm);
2978 break;
2979 case 8:
2980 case 9:
2981 /* Compare */
2982 gen_mov_F0_vreg(dp, rd);
2983 gen_mov_F1_vreg(dp, rm);
2984 break;
2985 case 10:
2986 case 11:
2987 /* Compare with zero */
2988 gen_mov_F0_vreg(dp, rd);
2989 gen_vfp_F1_ld0(dp);
2990 break;
2991 case 20:
2992 case 21:
2993 case 22:
2994 case 23:
2995 case 28:
2996 case 29:
2997 case 30:
2998 case 31:
2999 /* Source and destination the same. */
3000 gen_mov_F0_vreg(dp, rd);
3001 break;
3002 default:
3003 /* One source operand. */
3004 gen_mov_F0_vreg(dp, rm);
3005 break;
3007 } else {
3008 /* Two source operands. */
3009 gen_mov_F0_vreg(dp, rn);
3010 gen_mov_F1_vreg(dp, rm);
3013 for (;;) {
3014 /* Perform the calculation. */
3015 switch (op) {
3016 case 0: /* mac: fd + (fn * fm) */
3017 gen_vfp_mul(dp);
3018 gen_mov_F1_vreg(dp, rd);
3019 gen_vfp_add(dp);
3020 break;
3021 case 1: /* nmac: fd - (fn * fm) */
3022 gen_vfp_mul(dp);
3023 gen_vfp_neg(dp);
3024 gen_mov_F1_vreg(dp, rd);
3025 gen_vfp_add(dp);
3026 break;
3027 case 2: /* msc: -fd + (fn * fm) */
3028 gen_vfp_mul(dp);
3029 gen_mov_F1_vreg(dp, rd);
3030 gen_vfp_sub(dp);
3031 break;
3032 case 3: /* nmsc: -fd - (fn * fm) */
3033 gen_vfp_mul(dp);
3034 gen_vfp_neg(dp);
3035 gen_mov_F1_vreg(dp, rd);
3036 gen_vfp_sub(dp);
3037 break;
3038 case 4: /* mul: fn * fm */
3039 gen_vfp_mul(dp);
3040 break;
3041 case 5: /* nmul: -(fn * fm) */
3042 gen_vfp_mul(dp);
3043 gen_vfp_neg(dp);
3044 break;
3045 case 6: /* add: fn + fm */
3046 gen_vfp_add(dp);
3047 break;
3048 case 7: /* sub: fn - fm */
3049 gen_vfp_sub(dp);
3050 break;
3051 case 8: /* div: fn / fm */
3052 gen_vfp_div(dp);
3053 break;
3054 case 14: /* fconst */
3055 if (!arm_feature(env, ARM_FEATURE_VFP3))
3056 return 1;
3058 n = (insn << 12) & 0x80000000;
3059 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3060 if (dp) {
3061 if (i & 0x40)
3062 i |= 0x3f80;
3063 else
3064 i |= 0x4000;
3065 n |= i << 16;
3066 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3067 } else {
3068 if (i & 0x40)
3069 i |= 0x780;
3070 else
3071 i |= 0x800;
3072 n |= i << 19;
3073 tcg_gen_movi_i32(cpu_F0s, n);
3075 break;
3076 case 15: /* extension space */
3077 switch (rn) {
3078 case 0: /* cpy */
3079 /* no-op */
3080 break;
3081 case 1: /* abs */
3082 gen_vfp_abs(dp);
3083 break;
3084 case 2: /* neg */
3085 gen_vfp_neg(dp);
3086 break;
3087 case 3: /* sqrt */
3088 gen_vfp_sqrt(dp);
3089 break;
3090 case 8: /* cmp */
3091 gen_vfp_cmp(dp);
3092 break;
3093 case 9: /* cmpe */
3094 gen_vfp_cmpe(dp);
3095 break;
3096 case 10: /* cmpz */
3097 gen_vfp_cmp(dp);
3098 break;
3099 case 11: /* cmpez */
3100 gen_vfp_F1_ld0(dp);
3101 gen_vfp_cmpe(dp);
3102 break;
3103 case 15: /* single<->double conversion */
3104 if (dp)
3105 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3106 else
3107 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3108 break;
3109 case 16: /* fuito */
3110 gen_vfp_uito(dp);
3111 break;
3112 case 17: /* fsito */
3113 gen_vfp_sito(dp);
3114 break;
3115 case 20: /* fshto */
3116 if (!arm_feature(env, ARM_FEATURE_VFP3))
3117 return 1;
3118 gen_vfp_shto(dp, 16 - rm);
3119 break;
3120 case 21: /* fslto */
3121 if (!arm_feature(env, ARM_FEATURE_VFP3))
3122 return 1;
3123 gen_vfp_slto(dp, 32 - rm);
3124 break;
3125 case 22: /* fuhto */
3126 if (!arm_feature(env, ARM_FEATURE_VFP3))
3127 return 1;
3128 gen_vfp_uhto(dp, 16 - rm);
3129 break;
3130 case 23: /* fulto */
3131 if (!arm_feature(env, ARM_FEATURE_VFP3))
3132 return 1;
3133 gen_vfp_ulto(dp, 32 - rm);
3134 break;
3135 case 24: /* ftoui */
3136 gen_vfp_toui(dp);
3137 break;
3138 case 25: /* ftouiz */
3139 gen_vfp_touiz(dp);
3140 break;
3141 case 26: /* ftosi */
3142 gen_vfp_tosi(dp);
3143 break;
3144 case 27: /* ftosiz */
3145 gen_vfp_tosiz(dp);
3146 break;
3147 case 28: /* ftosh */
3148 if (!arm_feature(env, ARM_FEATURE_VFP3))
3149 return 1;
3150 gen_vfp_tosh(dp, 16 - rm);
3151 break;
3152 case 29: /* ftosl */
3153 if (!arm_feature(env, ARM_FEATURE_VFP3))
3154 return 1;
3155 gen_vfp_tosl(dp, 32 - rm);
3156 break;
3157 case 30: /* ftouh */
3158 if (!arm_feature(env, ARM_FEATURE_VFP3))
3159 return 1;
3160 gen_vfp_touh(dp, 16 - rm);
3161 break;
3162 case 31: /* ftoul */
3163 if (!arm_feature(env, ARM_FEATURE_VFP3))
3164 return 1;
3165 gen_vfp_toul(dp, 32 - rm);
3166 break;
3167 default: /* undefined */
3168 printf ("rn:%d\n", rn);
3169 return 1;
3171 break;
3172 default: /* undefined */
3173 printf ("op:%d\n", op);
3174 return 1;
3177 /* Write back the result. */
3178 if (op == 15 && (rn >= 8 && rn <= 11))
3179 ; /* Comparison, do nothing. */
3180 else if (op == 15 && rn > 17)
3181 /* Integer result. */
3182 gen_mov_vreg_F0(0, rd);
3183 else if (op == 15 && rn == 15)
3184 /* conversion */
3185 gen_mov_vreg_F0(!dp, rd);
3186 else
3187 gen_mov_vreg_F0(dp, rd);
3189 /* break out of the loop if we have finished */
3190 if (veclen == 0)
3191 break;
3193 if (op == 15 && delta_m == 0) {
3194 /* single source one-many */
3195 while (veclen--) {
3196 rd = ((rd + delta_d) & (bank_mask - 1))
3197 | (rd & bank_mask);
3198 gen_mov_vreg_F0(dp, rd);
3200 break;
3202 /* Setup the next operands. */
3203 veclen--;
3204 rd = ((rd + delta_d) & (bank_mask - 1))
3205 | (rd & bank_mask);
3207 if (op == 15) {
3208 /* One source operand. */
3209 rm = ((rm + delta_m) & (bank_mask - 1))
3210 | (rm & bank_mask);
3211 gen_mov_F0_vreg(dp, rm);
3212 } else {
3213 /* Two source operands. */
3214 rn = ((rn + delta_d) & (bank_mask - 1))
3215 | (rn & bank_mask);
3216 gen_mov_F0_vreg(dp, rn);
3217 if (delta_m) {
3218 rm = ((rm + delta_m) & (bank_mask - 1))
3219 | (rm & bank_mask);
3220 gen_mov_F1_vreg(dp, rm);
3225 break;
3226 case 0xc:
3227 case 0xd:
3228 if (dp && (insn & 0x03e00000) == 0x00400000) {
3229 /* two-register transfer */
3230 rn = (insn >> 16) & 0xf;
3231 rd = (insn >> 12) & 0xf;
3232 if (dp) {
3233 VFP_DREG_M(rm, insn);
3234 } else {
3235 rm = VFP_SREG_M(insn);
3238 if (insn & ARM_CP_RW_BIT) {
3239 /* vfp->arm */
3240 if (dp) {
3241 gen_mov_F0_vreg(0, rm * 2);
3242 tmp = gen_vfp_mrs();
3243 store_reg(s, rd, tmp);
3244 gen_mov_F0_vreg(0, rm * 2 + 1);
3245 tmp = gen_vfp_mrs();
3246 store_reg(s, rn, tmp);
3247 } else {
3248 gen_mov_F0_vreg(0, rm);
3249 tmp = gen_vfp_mrs();
3250 store_reg(s, rn, tmp);
3251 gen_mov_F0_vreg(0, rm + 1);
3252 tmp = gen_vfp_mrs();
3253 store_reg(s, rd, tmp);
3255 } else {
3256 /* arm->vfp */
3257 if (dp) {
3258 tmp = load_reg(s, rd);
3259 gen_vfp_msr(tmp);
3260 gen_mov_vreg_F0(0, rm * 2);
3261 tmp = load_reg(s, rn);
3262 gen_vfp_msr(tmp);
3263 gen_mov_vreg_F0(0, rm * 2 + 1);
3264 } else {
3265 tmp = load_reg(s, rn);
3266 gen_vfp_msr(tmp);
3267 gen_mov_vreg_F0(0, rm);
3268 tmp = load_reg(s, rd);
3269 gen_vfp_msr(tmp);
3270 gen_mov_vreg_F0(0, rm + 1);
3273 } else {
3274 /* Load/store */
3275 rn = (insn >> 16) & 0xf;
3276 if (dp)
3277 VFP_DREG_D(rd, insn);
3278 else
3279 rd = VFP_SREG_D(insn);
3280 if (s->thumb && rn == 15) {
3281 gen_op_movl_T1_im(s->pc & ~2);
3282 } else {
3283 gen_movl_T1_reg(s, rn);
3285 if ((insn & 0x01200000) == 0x01000000) {
3286 /* Single load/store */
3287 offset = (insn & 0xff) << 2;
3288 if ((insn & (1 << 23)) == 0)
3289 offset = -offset;
3290 gen_op_addl_T1_im(offset);
3291 if (insn & (1 << 20)) {
3292 gen_vfp_ld(s, dp);
3293 gen_mov_vreg_F0(dp, rd);
3294 } else {
3295 gen_mov_F0_vreg(dp, rd);
3296 gen_vfp_st(s, dp);
3298 } else {
3299 /* load/store multiple */
3300 if (dp)
3301 n = (insn >> 1) & 0x7f;
3302 else
3303 n = insn & 0xff;
3305 if (insn & (1 << 24)) /* pre-decrement */
3306 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3308 if (dp)
3309 offset = 8;
3310 else
3311 offset = 4;
3312 for (i = 0; i < n; i++) {
3313 if (insn & ARM_CP_RW_BIT) {
3314 /* load */
3315 gen_vfp_ld(s, dp);
3316 gen_mov_vreg_F0(dp, rd + i);
3317 } else {
3318 /* store */
3319 gen_mov_F0_vreg(dp, rd + i);
3320 gen_vfp_st(s, dp);
3322 gen_op_addl_T1_im(offset);
3324 if (insn & (1 << 21)) {
3325 /* writeback */
3326 if (insn & (1 << 24))
3327 offset = -offset * n;
3328 else if (dp && (insn & 1))
3329 offset = 4;
3330 else
3331 offset = 0;
3333 if (offset != 0)
3334 gen_op_addl_T1_im(offset);
3335 gen_movl_reg_T1(s, rn);
3339 break;
3340 default:
3341 /* Should never happen. */
3342 return 1;
3344 return 0;
3347 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3349 TranslationBlock *tb;
3351 tb = s->tb;
3352 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3353 tcg_gen_goto_tb(n);
3354 gen_set_pc_im(dest);
3355 tcg_gen_exit_tb((long)tb + n);
3356 } else {
3357 gen_set_pc_im(dest);
3358 tcg_gen_exit_tb(0);
3362 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3364 if (unlikely(s->singlestep_enabled)) {
3365 /* An indirect jump so that we still trigger the debug exception. */
3366 if (s->thumb)
3367 dest |= 1;
3368 gen_bx_im(s, dest);
3369 } else {
3370 gen_goto_tb(s, 0, dest);
3371 s->is_jmp = DISAS_TB_JUMP;
3375 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3377 if (x)
3378 tcg_gen_sari_i32(t0, t0, 16);
3379 else
3380 gen_sxth(t0);
3381 if (y)
3382 tcg_gen_sari_i32(t1, t1, 16);
3383 else
3384 gen_sxth(t1);
3385 tcg_gen_mul_i32(t0, t0, t1);
3388 /* Return the mask of PSR bits set by a MSR instruction. */
3389 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3390 uint32_t mask;
3392 mask = 0;
3393 if (flags & (1 << 0))
3394 mask |= 0xff;
3395 if (flags & (1 << 1))
3396 mask |= 0xff00;
3397 if (flags & (1 << 2))
3398 mask |= 0xff0000;
3399 if (flags & (1 << 3))
3400 mask |= 0xff000000;
3402 /* Mask out undefined bits. */
3403 mask &= ~CPSR_RESERVED;
3404 if (!arm_feature(env, ARM_FEATURE_V6))
3405 mask &= ~(CPSR_E | CPSR_GE);
3406 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3407 mask &= ~CPSR_IT;
3408 /* Mask out execution state bits. */
3409 if (!spsr)
3410 mask &= ~CPSR_EXEC;
3411 /* Mask out privileged bits. */
3412 if (IS_USER(s))
3413 mask &= CPSR_USER;
3414 return mask;
3417 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3418 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3420 TCGv tmp;
3421 if (spsr) {
3422 /* ??? This is also undefined in system mode. */
3423 if (IS_USER(s))
3424 return 1;
3426 tmp = load_cpu_field(spsr);
3427 tcg_gen_andi_i32(tmp, tmp, ~mask);
3428 tcg_gen_andi_i32(t0, t0, mask);
3429 tcg_gen_or_i32(tmp, tmp, t0);
3430 store_cpu_field(tmp, spsr);
3431 } else {
3432 gen_set_cpsr(t0, mask);
3434 dead_tmp(t0);
3435 gen_lookup_tb(s);
3436 return 0;
3439 /* Returns nonzero if access to the PSR is not permitted. */
3440 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3442 TCGv tmp;
3443 tmp = new_tmp();
3444 tcg_gen_movi_i32(tmp, val);
3445 return gen_set_psr(s, mask, spsr, tmp);
3448 /* Generate an old-style exception return. Marks pc as dead. */
3449 static void gen_exception_return(DisasContext *s, TCGv pc)
3451 TCGv tmp;
3452 store_reg(s, 15, pc);
3453 tmp = load_cpu_field(spsr);
3454 gen_set_cpsr(tmp, 0xffffffff);
3455 dead_tmp(tmp);
3456 s->is_jmp = DISAS_UPDATE;
3459 /* Generate a v6 exception return. Marks both values as dead. */
3460 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3462 gen_set_cpsr(cpsr, 0xffffffff);
3463 dead_tmp(cpsr);
3464 store_reg(s, 15, pc);
3465 s->is_jmp = DISAS_UPDATE;
3468 static inline void
3469 gen_set_condexec (DisasContext *s)
3471 if (s->condexec_mask) {
3472 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3473 TCGv tmp = new_tmp();
3474 tcg_gen_movi_i32(tmp, val);
3475 store_cpu_field(tmp, condexec_bits);
3479 static void gen_nop_hint(DisasContext *s, int val)
3481 switch (val) {
3482 case 3: /* wfi */
3483 gen_set_pc_im(s->pc);
3484 s->is_jmp = DISAS_WFI;
3485 break;
3486 case 2: /* wfe */
3487 case 4: /* sev */
3488 /* TODO: Implement SEV and WFE. May help SMP performance. */
3489 default: /* nop */
3490 break;
3494 /* These macros help make the code more readable when migrating from the
3495 old dyngen helpers. They should probably be removed when
3496 T0/T1 are removed. */
3497 #define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3498 #define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
3500 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3502 static inline int gen_neon_add(int size)
3504 switch (size) {
3505 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3506 case 1: gen_helper_neon_add_u16(CPU_T001); break;
3507 case 2: gen_op_addl_T0_T1(); break;
3508 default: return 1;
3510 return 0;
3513 static inline void gen_neon_rsb(int size)
3515 switch (size) {
3516 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3517 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3518 case 2: gen_op_rsbl_T0_T1(); break;
3519 default: return;
3523 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3524 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3525 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3526 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3527 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3529 /* FIXME: This is wrong. They set the wrong overflow bit. */
3530 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3531 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3532 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3533 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3535 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3536 switch ((size << 1) | u) { \
3537 case 0: \
3538 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3539 break; \
3540 case 1: \
3541 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3542 break; \
3543 case 2: \
3544 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3545 break; \
3546 case 3: \
3547 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3548 break; \
3549 case 4: \
3550 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3551 break; \
3552 case 5: \
3553 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3554 break; \
3555 default: return 1; \
3556 }} while (0)
3558 #define GEN_NEON_INTEGER_OP(name) do { \
3559 switch ((size << 1) | u) { \
3560 case 0: \
3561 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3562 break; \
3563 case 1: \
3564 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3565 break; \
3566 case 2: \
3567 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3568 break; \
3569 case 3: \
3570 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3571 break; \
3572 case 4: \
3573 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3574 break; \
3575 case 5: \
3576 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3577 break; \
3578 default: return 1; \
3579 }} while (0)
3581 static inline void
3582 gen_neon_movl_scratch_T0(int scratch)
3584 uint32_t offset;
3586 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3587 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
3590 static inline void
3591 gen_neon_movl_scratch_T1(int scratch)
3593 uint32_t offset;
3595 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3596 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
3599 static inline void
3600 gen_neon_movl_T0_scratch(int scratch)
3602 uint32_t offset;
3604 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3605 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
3608 static inline void
3609 gen_neon_movl_T1_scratch(int scratch)
3611 uint32_t offset;
3613 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3614 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
3617 static inline void gen_neon_get_scalar(int size, int reg)
3619 if (size == 1) {
3620 NEON_GET_REG(T0, reg >> 1, reg & 1);
3621 } else {
3622 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3623 if (reg & 1)
3624 gen_neon_dup_low16(cpu_T[0]);
3625 else
3626 gen_neon_dup_high16(cpu_T[0]);
3630 static void gen_neon_unzip(int reg, int q, int tmp, int size)
3632 int n;
3634 for (n = 0; n < q + 1; n += 2) {
3635 NEON_GET_REG(T0, reg, n);
3636 NEON_GET_REG(T0, reg, n + n);
3637 switch (size) {
3638 case 0: gen_helper_neon_unzip_u8(); break;
3639 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
3640 case 2: /* no-op */; break;
3641 default: abort();
3643 gen_neon_movl_scratch_T0(tmp + n);
3644 gen_neon_movl_scratch_T1(tmp + n + 1);
3648 static struct {
3649 int nregs;
3650 int interleave;
3651 int spacing;
3652 } neon_ls_element_type[11] = {
3653 {4, 4, 1},
3654 {4, 4, 2},
3655 {4, 1, 1},
3656 {4, 2, 1},
3657 {3, 3, 1},
3658 {3, 3, 2},
3659 {3, 1, 1},
3660 {1, 1, 1},
3661 {2, 2, 1},
3662 {2, 2, 2},
3663 {2, 1, 1}
3666 /* Translate a NEON load/store element instruction. Return nonzero if the
3667 instruction is invalid. */
3668 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3670 int rd, rn, rm;
3671 int op;
3672 int nregs;
3673 int interleave;
3674 int stride;
3675 int size;
3676 int reg;
3677 int pass;
3678 int load;
3679 int shift;
3680 int n;
3681 TCGv tmp;
3682 TCGv tmp2;
3684 if (!vfp_enabled(env))
3685 return 1;
3686 VFP_DREG_D(rd, insn);
3687 rn = (insn >> 16) & 0xf;
3688 rm = insn & 0xf;
3689 load = (insn & (1 << 21)) != 0;
3690 if ((insn & (1 << 23)) == 0) {
3691 /* Load store all elements. */
3692 op = (insn >> 8) & 0xf;
3693 size = (insn >> 6) & 3;
3694 if (op > 10 || size == 3)
3695 return 1;
3696 nregs = neon_ls_element_type[op].nregs;
3697 interleave = neon_ls_element_type[op].interleave;
3698 gen_movl_T1_reg(s, rn);
3699 stride = (1 << size) * interleave;
3700 for (reg = 0; reg < nregs; reg++) {
3701 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3702 gen_movl_T1_reg(s, rn);
3703 gen_op_addl_T1_im((1 << size) * reg);
3704 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3705 gen_movl_T1_reg(s, rn);
3706 gen_op_addl_T1_im(1 << size);
3708 for (pass = 0; pass < 2; pass++) {
3709 if (size == 2) {
3710 if (load) {
3711 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3712 neon_store_reg(rd, pass, tmp);
3713 } else {
3714 tmp = neon_load_reg(rd, pass);
3715 gen_st32(tmp, cpu_T[1], IS_USER(s));
3717 gen_op_addl_T1_im(stride);
3718 } else if (size == 1) {
3719 if (load) {
3720 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3721 gen_op_addl_T1_im(stride);
3722 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
3723 gen_op_addl_T1_im(stride);
3724 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3725 dead_tmp(tmp2);
3726 neon_store_reg(rd, pass, tmp);
3727 } else {
3728 tmp = neon_load_reg(rd, pass);
3729 tmp2 = new_tmp();
3730 tcg_gen_shri_i32(tmp2, tmp, 16);
3731 gen_st16(tmp, cpu_T[1], IS_USER(s));
3732 gen_op_addl_T1_im(stride);
3733 gen_st16(tmp2, cpu_T[1], IS_USER(s));
3734 gen_op_addl_T1_im(stride);
3736 } else /* size == 0 */ {
3737 if (load) {
3738 TCGV_UNUSED(tmp2);
3739 for (n = 0; n < 4; n++) {
3740 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3741 gen_op_addl_T1_im(stride);
3742 if (n == 0) {
3743 tmp2 = tmp;
3744 } else {
3745 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3746 dead_tmp(tmp);
3749 neon_store_reg(rd, pass, tmp2);
3750 } else {
3751 tmp2 = neon_load_reg(rd, pass);
3752 for (n = 0; n < 4; n++) {
3753 tmp = new_tmp();
3754 if (n == 0) {
3755 tcg_gen_mov_i32(tmp, tmp2);
3756 } else {
3757 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3759 gen_st8(tmp, cpu_T[1], IS_USER(s));
3760 gen_op_addl_T1_im(stride);
3762 dead_tmp(tmp2);
3766 rd += neon_ls_element_type[op].spacing;
3768 stride = nregs * 8;
3769 } else {
3770 size = (insn >> 10) & 3;
3771 if (size == 3) {
3772 /* Load single element to all lanes. */
3773 if (!load)
3774 return 1;
3775 size = (insn >> 6) & 3;
3776 nregs = ((insn >> 8) & 3) + 1;
3777 stride = (insn & (1 << 5)) ? 2 : 1;
3778 gen_movl_T1_reg(s, rn);
3779 for (reg = 0; reg < nregs; reg++) {
3780 switch (size) {
3781 case 0:
3782 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3783 gen_neon_dup_u8(tmp, 0);
3784 break;
3785 case 1:
3786 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3787 gen_neon_dup_low16(tmp);
3788 break;
3789 case 2:
3790 tmp = gen_ld32(cpu_T[0], IS_USER(s));
3791 break;
3792 case 3:
3793 return 1;
3794 default: /* Avoid compiler warnings. */
3795 abort();
3797 gen_op_addl_T1_im(1 << size);
3798 tmp2 = new_tmp();
3799 tcg_gen_mov_i32(tmp2, tmp);
3800 neon_store_reg(rd, 0, tmp2);
3801 neon_store_reg(rd, 1, tmp);
3802 rd += stride;
3804 stride = (1 << size) * nregs;
3805 } else {
3806 /* Single element. */
3807 pass = (insn >> 7) & 1;
3808 switch (size) {
3809 case 0:
3810 shift = ((insn >> 5) & 3) * 8;
3811 stride = 1;
3812 break;
3813 case 1:
3814 shift = ((insn >> 6) & 1) * 16;
3815 stride = (insn & (1 << 5)) ? 2 : 1;
3816 break;
3817 case 2:
3818 shift = 0;
3819 stride = (insn & (1 << 6)) ? 2 : 1;
3820 break;
3821 default:
3822 abort();
3824 nregs = ((insn >> 8) & 3) + 1;
3825 gen_movl_T1_reg(s, rn);
3826 for (reg = 0; reg < nregs; reg++) {
3827 if (load) {
3828 switch (size) {
3829 case 0:
3830 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3831 break;
3832 case 1:
3833 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3834 break;
3835 case 2:
3836 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3837 break;
3838 default: /* Avoid compiler warnings. */
3839 abort();
3841 if (size != 2) {
3842 tmp2 = neon_load_reg(rd, pass);
3843 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3844 dead_tmp(tmp2);
3846 neon_store_reg(rd, pass, tmp);
3847 } else { /* Store */
3848 tmp = neon_load_reg(rd, pass);
3849 if (shift)
3850 tcg_gen_shri_i32(tmp, tmp, shift);
3851 switch (size) {
3852 case 0:
3853 gen_st8(tmp, cpu_T[1], IS_USER(s));
3854 break;
3855 case 1:
3856 gen_st16(tmp, cpu_T[1], IS_USER(s));
3857 break;
3858 case 2:
3859 gen_st32(tmp, cpu_T[1], IS_USER(s));
3860 break;
3863 rd += stride;
3864 gen_op_addl_T1_im(1 << size);
3866 stride = nregs * (1 << size);
3869 if (rm != 15) {
3870 TCGv base;
3872 base = load_reg(s, rn);
3873 if (rm == 13) {
3874 tcg_gen_addi_i32(base, base, stride);
3875 } else {
3876 TCGv index;
3877 index = load_reg(s, rm);
3878 tcg_gen_add_i32(base, base, index);
3879 dead_tmp(index);
3881 store_reg(s, rn, base);
3883 return 0;
3886 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3887 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3889 tcg_gen_and_i32(t, t, c);
3890 tcg_gen_bic_i32(f, f, c);
3891 tcg_gen_or_i32(dest, t, f);
3894 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
3896 switch (size) {
3897 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3898 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3899 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3900 default: abort();
3904 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
3906 switch (size) {
3907 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3908 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3909 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3910 default: abort();
3914 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
3916 switch (size) {
3917 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3918 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3919 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3920 default: abort();
3924 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3925 int q, int u)
3927 if (q) {
3928 if (u) {
3929 switch (size) {
3930 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3931 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3932 default: abort();
3934 } else {
3935 switch (size) {
3936 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3937 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3938 default: abort();
3941 } else {
3942 if (u) {
3943 switch (size) {
3944 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3945 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3946 default: abort();
3948 } else {
3949 switch (size) {
3950 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3951 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3952 default: abort();
3958 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
3960 if (u) {
3961 switch (size) {
3962 case 0: gen_helper_neon_widen_u8(dest, src); break;
3963 case 1: gen_helper_neon_widen_u16(dest, src); break;
3964 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3965 default: abort();
3967 } else {
3968 switch (size) {
3969 case 0: gen_helper_neon_widen_s8(dest, src); break;
3970 case 1: gen_helper_neon_widen_s16(dest, src); break;
3971 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3972 default: abort();
3975 dead_tmp(src);
3978 static inline void gen_neon_addl(int size)
3980 switch (size) {
3981 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3982 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3983 case 2: tcg_gen_add_i64(CPU_V001); break;
3984 default: abort();
3988 static inline void gen_neon_subl(int size)
3990 switch (size) {
3991 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3992 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3993 case 2: tcg_gen_sub_i64(CPU_V001); break;
3994 default: abort();
3998 static inline void gen_neon_negl(TCGv_i64 var, int size)
4000 switch (size) {
4001 case 0: gen_helper_neon_negl_u16(var, var); break;
4002 case 1: gen_helper_neon_negl_u32(var, var); break;
4003 case 2: gen_helper_neon_negl_u64(var, var); break;
4004 default: abort();
4008 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4010 switch (size) {
4011 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4012 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4013 default: abort();
4017 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4019 TCGv_i64 tmp;
4021 switch ((size << 1) | u) {
4022 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4023 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4024 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4025 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4026 case 4:
4027 tmp = gen_muls_i64_i32(a, b);
4028 tcg_gen_mov_i64(dest, tmp);
4029 break;
4030 case 5:
4031 tmp = gen_mulu_i64_i32(a, b);
4032 tcg_gen_mov_i64(dest, tmp);
4033 break;
4034 default: abort();
4036 if (size < 2) {
4037 dead_tmp(b);
4038 dead_tmp(a);
4042 /* Translate a NEON data processing instruction. Return nonzero if the
4043 instruction is invalid.
4044 We process data in a mixture of 32-bit and 64-bit chunks.
4045 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4047 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4049 int op;
4050 int q;
4051 int rd, rn, rm;
4052 int size;
4053 int shift;
4054 int pass;
4055 int count;
4056 int pairwise;
4057 int u;
4058 int n;
4059 uint32_t imm;
4060 TCGv tmp;
4061 TCGv tmp2;
4062 TCGv tmp3;
4063 TCGv_i64 tmp64;
4065 if (!vfp_enabled(env))
4066 return 1;
4067 q = (insn & (1 << 6)) != 0;
4068 u = (insn >> 24) & 1;
4069 VFP_DREG_D(rd, insn);
4070 VFP_DREG_N(rn, insn);
4071 VFP_DREG_M(rm, insn);
4072 size = (insn >> 20) & 3;
4073 if ((insn & (1 << 23)) == 0) {
4074 /* Three register same length. */
4075 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4076 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4077 || op == 10 || op == 11 || op == 16)) {
4078 /* 64-bit element instructions. */
4079 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4080 neon_load_reg64(cpu_V0, rn + pass);
4081 neon_load_reg64(cpu_V1, rm + pass);
4082 switch (op) {
4083 case 1: /* VQADD */
4084 if (u) {
4085 gen_helper_neon_add_saturate_u64(CPU_V001);
4086 } else {
4087 gen_helper_neon_add_saturate_s64(CPU_V001);
4089 break;
4090 case 5: /* VQSUB */
4091 if (u) {
4092 gen_helper_neon_sub_saturate_u64(CPU_V001);
4093 } else {
4094 gen_helper_neon_sub_saturate_s64(CPU_V001);
4096 break;
4097 case 8: /* VSHL */
4098 if (u) {
4099 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4100 } else {
4101 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4103 break;
4104 case 9: /* VQSHL */
4105 if (u) {
4106 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4107 cpu_V0, cpu_V0);
4108 } else {
4109 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4110 cpu_V1, cpu_V0);
4112 break;
4113 case 10: /* VRSHL */
4114 if (u) {
4115 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4116 } else {
4117 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4119 break;
4120 case 11: /* VQRSHL */
4121 if (u) {
4122 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4123 cpu_V1, cpu_V0);
4124 } else {
4125 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4126 cpu_V1, cpu_V0);
4128 break;
4129 case 16:
4130 if (u) {
4131 tcg_gen_sub_i64(CPU_V001);
4132 } else {
4133 tcg_gen_add_i64(CPU_V001);
4135 break;
4136 default:
4137 abort();
4139 neon_store_reg64(cpu_V0, rd + pass);
4141 return 0;
4143 switch (op) {
4144 case 8: /* VSHL */
4145 case 9: /* VQSHL */
4146 case 10: /* VRSHL */
4147 case 11: /* VQRSHL */
4149 int rtmp;
4150 /* Shift instruction operands are reversed. */
4151 rtmp = rn;
4152 rn = rm;
4153 rm = rtmp;
4154 pairwise = 0;
4156 break;
4157 case 20: /* VPMAX */
4158 case 21: /* VPMIN */
4159 case 23: /* VPADD */
4160 pairwise = 1;
4161 break;
4162 case 26: /* VPADD (float) */
4163 pairwise = (u && size < 2);
4164 break;
4165 case 30: /* VPMIN/VPMAX (float) */
4166 pairwise = u;
4167 break;
4168 default:
4169 pairwise = 0;
4170 break;
4172 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4174 if (pairwise) {
4175 /* Pairwise. */
4176 if (q)
4177 n = (pass & 1) * 2;
4178 else
4179 n = 0;
4180 if (pass < q + 1) {
4181 NEON_GET_REG(T0, rn, n);
4182 NEON_GET_REG(T1, rn, n + 1);
4183 } else {
4184 NEON_GET_REG(T0, rm, n);
4185 NEON_GET_REG(T1, rm, n + 1);
4187 } else {
4188 /* Elementwise. */
4189 NEON_GET_REG(T0, rn, pass);
4190 NEON_GET_REG(T1, rm, pass);
4192 switch (op) {
4193 case 0: /* VHADD */
4194 GEN_NEON_INTEGER_OP(hadd);
4195 break;
4196 case 1: /* VQADD */
4197 GEN_NEON_INTEGER_OP_ENV(qadd);
4198 break;
4199 case 2: /* VRHADD */
4200 GEN_NEON_INTEGER_OP(rhadd);
4201 break;
4202 case 3: /* Logic ops. */
4203 switch ((u << 2) | size) {
4204 case 0: /* VAND */
4205 gen_op_andl_T0_T1();
4206 break;
4207 case 1: /* BIC */
4208 gen_op_bicl_T0_T1();
4209 break;
4210 case 2: /* VORR */
4211 gen_op_orl_T0_T1();
4212 break;
4213 case 3: /* VORN */
4214 gen_op_notl_T1();
4215 gen_op_orl_T0_T1();
4216 break;
4217 case 4: /* VEOR */
4218 gen_op_xorl_T0_T1();
4219 break;
4220 case 5: /* VBSL */
4221 tmp = neon_load_reg(rd, pass);
4222 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4223 dead_tmp(tmp);
4224 break;
4225 case 6: /* VBIT */
4226 tmp = neon_load_reg(rd, pass);
4227 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4228 dead_tmp(tmp);
4229 break;
4230 case 7: /* VBIF */
4231 tmp = neon_load_reg(rd, pass);
4232 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4233 dead_tmp(tmp);
4234 break;
4236 break;
4237 case 4: /* VHSUB */
4238 GEN_NEON_INTEGER_OP(hsub);
4239 break;
4240 case 5: /* VQSUB */
4241 GEN_NEON_INTEGER_OP_ENV(qsub);
4242 break;
4243 case 6: /* VCGT */
4244 GEN_NEON_INTEGER_OP(cgt);
4245 break;
4246 case 7: /* VCGE */
4247 GEN_NEON_INTEGER_OP(cge);
4248 break;
4249 case 8: /* VSHL */
4250 GEN_NEON_INTEGER_OP(shl);
4251 break;
4252 case 9: /* VQSHL */
4253 GEN_NEON_INTEGER_OP_ENV(qshl);
4254 break;
4255 case 10: /* VRSHL */
4256 GEN_NEON_INTEGER_OP(rshl);
4257 break;
4258 case 11: /* VQRSHL */
4259 GEN_NEON_INTEGER_OP_ENV(qrshl);
4260 break;
4261 case 12: /* VMAX */
4262 GEN_NEON_INTEGER_OP(max);
4263 break;
4264 case 13: /* VMIN */
4265 GEN_NEON_INTEGER_OP(min);
4266 break;
4267 case 14: /* VABD */
4268 GEN_NEON_INTEGER_OP(abd);
4269 break;
4270 case 15: /* VABA */
4271 GEN_NEON_INTEGER_OP(abd);
4272 NEON_GET_REG(T1, rd, pass);
4273 gen_neon_add(size);
4274 break;
4275 case 16:
4276 if (!u) { /* VADD */
4277 if (gen_neon_add(size))
4278 return 1;
4279 } else { /* VSUB */
4280 switch (size) {
4281 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4282 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
4283 case 2: gen_op_subl_T0_T1(); break;
4284 default: return 1;
4287 break;
4288 case 17:
4289 if (!u) { /* VTST */
4290 switch (size) {
4291 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4292 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4293 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
4294 default: return 1;
4296 } else { /* VCEQ */
4297 switch (size) {
4298 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4299 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4300 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
4301 default: return 1;
4304 break;
4305 case 18: /* Multiply. */
4306 switch (size) {
4307 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4308 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4309 case 2: gen_op_mul_T0_T1(); break;
4310 default: return 1;
4312 NEON_GET_REG(T1, rd, pass);
4313 if (u) { /* VMLS */
4314 gen_neon_rsb(size);
4315 } else { /* VMLA */
4316 gen_neon_add(size);
4318 break;
4319 case 19: /* VMUL */
4320 if (u) { /* polynomial */
4321 gen_helper_neon_mul_p8(CPU_T001);
4322 } else { /* Integer */
4323 switch (size) {
4324 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4325 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4326 case 2: gen_op_mul_T0_T1(); break;
4327 default: return 1;
4330 break;
4331 case 20: /* VPMAX */
4332 GEN_NEON_INTEGER_OP(pmax);
4333 break;
4334 case 21: /* VPMIN */
4335 GEN_NEON_INTEGER_OP(pmin);
4336 break;
4337 case 22: /* Hultiply high. */
4338 if (!u) { /* VQDMULH */
4339 switch (size) {
4340 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4341 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
4342 default: return 1;
4344 } else { /* VQRDHMUL */
4345 switch (size) {
4346 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4347 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
4348 default: return 1;
4351 break;
4352 case 23: /* VPADD */
4353 if (u)
4354 return 1;
4355 switch (size) {
4356 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4357 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
4358 case 2: gen_op_addl_T0_T1(); break;
4359 default: return 1;
4361 break;
4362 case 26: /* Floating point arithnetic. */
4363 switch ((u << 2) | size) {
4364 case 0: /* VADD */
4365 gen_helper_neon_add_f32(CPU_T001);
4366 break;
4367 case 2: /* VSUB */
4368 gen_helper_neon_sub_f32(CPU_T001);
4369 break;
4370 case 4: /* VPADD */
4371 gen_helper_neon_add_f32(CPU_T001);
4372 break;
4373 case 6: /* VABD */
4374 gen_helper_neon_abd_f32(CPU_T001);
4375 break;
4376 default:
4377 return 1;
4379 break;
4380 case 27: /* Float multiply. */
4381 gen_helper_neon_mul_f32(CPU_T001);
4382 if (!u) {
4383 NEON_GET_REG(T1, rd, pass);
4384 if (size == 0) {
4385 gen_helper_neon_add_f32(CPU_T001);
4386 } else {
4387 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
4390 break;
4391 case 28: /* Float compare. */
4392 if (!u) {
4393 gen_helper_neon_ceq_f32(CPU_T001);
4394 } else {
4395 if (size == 0)
4396 gen_helper_neon_cge_f32(CPU_T001);
4397 else
4398 gen_helper_neon_cgt_f32(CPU_T001);
4400 break;
4401 case 29: /* Float compare absolute. */
4402 if (!u)
4403 return 1;
4404 if (size == 0)
4405 gen_helper_neon_acge_f32(CPU_T001);
4406 else
4407 gen_helper_neon_acgt_f32(CPU_T001);
4408 break;
4409 case 30: /* Float min/max. */
4410 if (size == 0)
4411 gen_helper_neon_max_f32(CPU_T001);
4412 else
4413 gen_helper_neon_min_f32(CPU_T001);
4414 break;
4415 case 31:
4416 if (size == 0)
4417 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4418 else
4419 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4420 break;
4421 default:
4422 abort();
4424 /* Save the result. For elementwise operations we can put it
4425 straight into the destination register. For pairwise operations
4426 we have to be careful to avoid clobbering the source operands. */
4427 if (pairwise && rd == rm) {
4428 gen_neon_movl_scratch_T0(pass);
4429 } else {
4430 NEON_SET_REG(T0, rd, pass);
4433 } /* for pass */
4434 if (pairwise && rd == rm) {
4435 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4436 gen_neon_movl_T0_scratch(pass);
4437 NEON_SET_REG(T0, rd, pass);
4440 /* End of 3 register same size operations. */
4441 } else if (insn & (1 << 4)) {
4442 if ((insn & 0x00380080) != 0) {
4443 /* Two registers and shift. */
4444 op = (insn >> 8) & 0xf;
4445 if (insn & (1 << 7)) {
4446 /* 64-bit shift. */
4447 size = 3;
4448 } else {
4449 size = 2;
4450 while ((insn & (1 << (size + 19))) == 0)
4451 size--;
4453 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4454 /* To avoid excessive dumplication of ops we implement shift
4455 by immediate using the variable shift operations. */
4456 if (op < 8) {
4457 /* Shift by immediate:
4458 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4459 /* Right shifts are encoded as N - shift, where N is the
4460 element size in bits. */
4461 if (op <= 4)
4462 shift = shift - (1 << (size + 3));
4463 if (size == 3) {
4464 count = q + 1;
4465 } else {
4466 count = q ? 4: 2;
4468 switch (size) {
4469 case 0:
4470 imm = (uint8_t) shift;
4471 imm |= imm << 8;
4472 imm |= imm << 16;
4473 break;
4474 case 1:
4475 imm = (uint16_t) shift;
4476 imm |= imm << 16;
4477 break;
4478 case 2:
4479 case 3:
4480 imm = shift;
4481 break;
4482 default:
4483 abort();
4486 for (pass = 0; pass < count; pass++) {
4487 if (size == 3) {
4488 neon_load_reg64(cpu_V0, rm + pass);
4489 tcg_gen_movi_i64(cpu_V1, imm);
4490 switch (op) {
4491 case 0: /* VSHR */
4492 case 1: /* VSRA */
4493 if (u)
4494 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4495 else
4496 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4497 break;
4498 case 2: /* VRSHR */
4499 case 3: /* VRSRA */
4500 if (u)
4501 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4502 else
4503 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4504 break;
4505 case 4: /* VSRI */
4506 if (!u)
4507 return 1;
4508 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4509 break;
4510 case 5: /* VSHL, VSLI */
4511 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4512 break;
4513 case 6: /* VQSHL */
4514 if (u)
4515 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4516 else
4517 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4518 break;
4519 case 7: /* VQSHLU */
4520 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4521 break;
4523 if (op == 1 || op == 3) {
4524 /* Accumulate. */
4525 neon_load_reg64(cpu_V0, rd + pass);
4526 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4527 } else if (op == 4 || (op == 5 && u)) {
4528 /* Insert */
4529 cpu_abort(env, "VS[LR]I.64 not implemented");
4531 neon_store_reg64(cpu_V0, rd + pass);
4532 } else { /* size < 3 */
4533 /* Operands in T0 and T1. */
4534 gen_op_movl_T1_im(imm);
4535 NEON_GET_REG(T0, rm, pass);
4536 switch (op) {
4537 case 0: /* VSHR */
4538 case 1: /* VSRA */
4539 GEN_NEON_INTEGER_OP(shl);
4540 break;
4541 case 2: /* VRSHR */
4542 case 3: /* VRSRA */
4543 GEN_NEON_INTEGER_OP(rshl);
4544 break;
4545 case 4: /* VSRI */
4546 if (!u)
4547 return 1;
4548 GEN_NEON_INTEGER_OP(shl);
4549 break;
4550 case 5: /* VSHL, VSLI */
4551 switch (size) {
4552 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4553 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4554 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4555 default: return 1;
4557 break;
4558 case 6: /* VQSHL */
4559 GEN_NEON_INTEGER_OP_ENV(qshl);
4560 break;
4561 case 7: /* VQSHLU */
4562 switch (size) {
4563 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4564 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4565 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4566 default: return 1;
4568 break;
4571 if (op == 1 || op == 3) {
4572 /* Accumulate. */
4573 NEON_GET_REG(T1, rd, pass);
4574 gen_neon_add(size);
4575 } else if (op == 4 || (op == 5 && u)) {
4576 /* Insert */
4577 switch (size) {
4578 case 0:
4579 if (op == 4)
4580 imm = 0xff >> -shift;
4581 else
4582 imm = (uint8_t)(0xff << shift);
4583 imm |= imm << 8;
4584 imm |= imm << 16;
4585 break;
4586 case 1:
4587 if (op == 4)
4588 imm = 0xffff >> -shift;
4589 else
4590 imm = (uint16_t)(0xffff << shift);
4591 imm |= imm << 16;
4592 break;
4593 case 2:
4594 if (op == 4)
4595 imm = 0xffffffffu >> -shift;
4596 else
4597 imm = 0xffffffffu << shift;
4598 break;
4599 default:
4600 abort();
4602 tmp = neon_load_reg(rd, pass);
4603 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4604 tcg_gen_andi_i32(tmp, tmp, ~imm);
4605 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4607 NEON_SET_REG(T0, rd, pass);
4609 } /* for pass */
4610 } else if (op < 10) {
4611 /* Shift by immediate and narrow:
4612 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4613 shift = shift - (1 << (size + 3));
4614 size++;
4615 switch (size) {
4616 case 1:
4617 imm = (uint16_t)shift;
4618 imm |= imm << 16;
4619 tmp2 = tcg_const_i32(imm);
4620 TCGV_UNUSED_I64(tmp64);
4621 break;
4622 case 2:
4623 imm = (uint32_t)shift;
4624 tmp2 = tcg_const_i32(imm);
4625 TCGV_UNUSED_I64(tmp64);
4626 break;
4627 case 3:
4628 tmp64 = tcg_const_i64(shift);
4629 TCGV_UNUSED(tmp2);
4630 break;
4631 default:
4632 abort();
4635 for (pass = 0; pass < 2; pass++) {
4636 if (size == 3) {
4637 neon_load_reg64(cpu_V0, rm + pass);
4638 if (q) {
4639 if (u)
4640 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4641 else
4642 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4643 } else {
4644 if (u)
4645 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4646 else
4647 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4649 } else {
4650 tmp = neon_load_reg(rm + pass, 0);
4651 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4652 tmp3 = neon_load_reg(rm + pass, 1);
4653 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4654 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4655 dead_tmp(tmp);
4656 dead_tmp(tmp3);
4658 tmp = new_tmp();
4659 if (op == 8 && !u) {
4660 gen_neon_narrow(size - 1, tmp, cpu_V0);
4661 } else {
4662 if (op == 8)
4663 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4664 else
4665 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4667 if (pass == 0) {
4668 tmp2 = tmp;
4669 } else {
4670 neon_store_reg(rd, 0, tmp2);
4671 neon_store_reg(rd, 1, tmp);
4673 } /* for pass */
4674 } else if (op == 10) {
4675 /* VSHLL */
4676 if (q || size == 3)
4677 return 1;
4678 tmp = neon_load_reg(rm, 0);
4679 tmp2 = neon_load_reg(rm, 1);
4680 for (pass = 0; pass < 2; pass++) {
4681 if (pass == 1)
4682 tmp = tmp2;
4684 gen_neon_widen(cpu_V0, tmp, size, u);
4686 if (shift != 0) {
4687 /* The shift is less than the width of the source
4688 type, so we can just shift the whole register. */
4689 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4690 if (size < 2 || !u) {
4691 uint64_t imm64;
4692 if (size == 0) {
4693 imm = (0xffu >> (8 - shift));
4694 imm |= imm << 16;
4695 } else {
4696 imm = 0xffff >> (16 - shift);
4698 imm64 = imm | (((uint64_t)imm) << 32);
4699 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
4702 neon_store_reg64(cpu_V0, rd + pass);
4704 } else if (op == 15 || op == 16) {
4705 /* VCVT fixed-point. */
4706 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4707 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4708 if (op & 1) {
4709 if (u)
4710 gen_vfp_ulto(0, shift);
4711 else
4712 gen_vfp_slto(0, shift);
4713 } else {
4714 if (u)
4715 gen_vfp_toul(0, shift);
4716 else
4717 gen_vfp_tosl(0, shift);
4719 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4721 } else {
4722 return 1;
4724 } else { /* (insn & 0x00380080) == 0 */
4725 int invert;
4727 op = (insn >> 8) & 0xf;
4728 /* One register and immediate. */
4729 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4730 invert = (insn & (1 << 5)) != 0;
4731 switch (op) {
4732 case 0: case 1:
4733 /* no-op */
4734 break;
4735 case 2: case 3:
4736 imm <<= 8;
4737 break;
4738 case 4: case 5:
4739 imm <<= 16;
4740 break;
4741 case 6: case 7:
4742 imm <<= 24;
4743 break;
4744 case 8: case 9:
4745 imm |= imm << 16;
4746 break;
4747 case 10: case 11:
4748 imm = (imm << 8) | (imm << 24);
4749 break;
4750 case 12:
4751 imm = (imm < 8) | 0xff;
4752 break;
4753 case 13:
4754 imm = (imm << 16) | 0xffff;
4755 break;
4756 case 14:
4757 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4758 if (invert)
4759 imm = ~imm;
4760 break;
4761 case 15:
4762 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4763 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4764 break;
4766 if (invert)
4767 imm = ~imm;
4769 if (op != 14 || !invert)
4770 gen_op_movl_T1_im(imm);
4772 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4773 if (op & 1 && op < 12) {
4774 tmp = neon_load_reg(rd, pass);
4775 if (invert) {
4776 /* The immediate value has already been inverted, so
4777 BIC becomes AND. */
4778 tcg_gen_andi_i32(tmp, tmp, imm);
4779 } else {
4780 tcg_gen_ori_i32(tmp, tmp, imm);
4782 } else {
4783 /* VMOV, VMVN. */
4784 tmp = new_tmp();
4785 if (op == 14 && invert) {
4786 uint32_t val;
4787 val = 0;
4788 for (n = 0; n < 4; n++) {
4789 if (imm & (1 << (n + (pass & 1) * 4)))
4790 val |= 0xff << (n * 8);
4792 tcg_gen_movi_i32(tmp, val);
4793 } else {
4794 tcg_gen_movi_i32(tmp, imm);
4797 neon_store_reg(rd, pass, tmp);
4800 } else { /* (insn & 0x00800010 == 0x00800000) */
4801 if (size != 3) {
4802 op = (insn >> 8) & 0xf;
4803 if ((insn & (1 << 6)) == 0) {
4804 /* Three registers of different lengths. */
4805 int src1_wide;
4806 int src2_wide;
4807 int prewiden;
4808 /* prewiden, src1_wide, src2_wide */
4809 static const int neon_3reg_wide[16][3] = {
4810 {1, 0, 0}, /* VADDL */
4811 {1, 1, 0}, /* VADDW */
4812 {1, 0, 0}, /* VSUBL */
4813 {1, 1, 0}, /* VSUBW */
4814 {0, 1, 1}, /* VADDHN */
4815 {0, 0, 0}, /* VABAL */
4816 {0, 1, 1}, /* VSUBHN */
4817 {0, 0, 0}, /* VABDL */
4818 {0, 0, 0}, /* VMLAL */
4819 {0, 0, 0}, /* VQDMLAL */
4820 {0, 0, 0}, /* VMLSL */
4821 {0, 0, 0}, /* VQDMLSL */
4822 {0, 0, 0}, /* Integer VMULL */
4823 {0, 0, 0}, /* VQDMULL */
4824 {0, 0, 0} /* Polynomial VMULL */
4827 prewiden = neon_3reg_wide[op][0];
4828 src1_wide = neon_3reg_wide[op][1];
4829 src2_wide = neon_3reg_wide[op][2];
4831 if (size == 0 && (op == 9 || op == 11 || op == 13))
4832 return 1;
4834 /* Avoid overlapping operands. Wide source operands are
4835 always aligned so will never overlap with wide
4836 destinations in problematic ways. */
4837 if (rd == rm && !src2_wide) {
4838 NEON_GET_REG(T0, rm, 1);
4839 gen_neon_movl_scratch_T0(2);
4840 } else if (rd == rn && !src1_wide) {
4841 NEON_GET_REG(T0, rn, 1);
4842 gen_neon_movl_scratch_T0(2);
4844 TCGV_UNUSED(tmp3);
4845 for (pass = 0; pass < 2; pass++) {
4846 if (src1_wide) {
4847 neon_load_reg64(cpu_V0, rn + pass);
4848 TCGV_UNUSED(tmp);
4849 } else {
4850 if (pass == 1 && rd == rn) {
4851 gen_neon_movl_T0_scratch(2);
4852 tmp = new_tmp();
4853 tcg_gen_mov_i32(tmp, cpu_T[0]);
4854 } else {
4855 tmp = neon_load_reg(rn, pass);
4857 if (prewiden) {
4858 gen_neon_widen(cpu_V0, tmp, size, u);
4861 if (src2_wide) {
4862 neon_load_reg64(cpu_V1, rm + pass);
4863 TCGV_UNUSED(tmp2);
4864 } else {
4865 if (pass == 1 && rd == rm) {
4866 gen_neon_movl_T0_scratch(2);
4867 tmp2 = new_tmp();
4868 tcg_gen_mov_i32(tmp2, cpu_T[0]);
4869 } else {
4870 tmp2 = neon_load_reg(rm, pass);
4872 if (prewiden) {
4873 gen_neon_widen(cpu_V1, tmp2, size, u);
4876 switch (op) {
4877 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4878 gen_neon_addl(size);
4879 break;
4880 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4881 gen_neon_subl(size);
4882 break;
4883 case 5: case 7: /* VABAL, VABDL */
4884 switch ((size << 1) | u) {
4885 case 0:
4886 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4887 break;
4888 case 1:
4889 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4890 break;
4891 case 2:
4892 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4893 break;
4894 case 3:
4895 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4896 break;
4897 case 4:
4898 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4899 break;
4900 case 5:
4901 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4902 break;
4903 default: abort();
4905 dead_tmp(tmp2);
4906 dead_tmp(tmp);
4907 break;
4908 case 8: case 9: case 10: case 11: case 12: case 13:
4909 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4910 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
4911 break;
4912 case 14: /* Polynomial VMULL */
4913 cpu_abort(env, "Polynomial VMULL not implemented");
4915 default: /* 15 is RESERVED. */
4916 return 1;
4918 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4919 /* Accumulate. */
4920 if (op == 10 || op == 11) {
4921 gen_neon_negl(cpu_V0, size);
4924 if (op != 13) {
4925 neon_load_reg64(cpu_V1, rd + pass);
4928 switch (op) {
4929 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4930 gen_neon_addl(size);
4931 break;
4932 case 9: case 11: /* VQDMLAL, VQDMLSL */
4933 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4934 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4935 break;
4936 /* Fall through. */
4937 case 13: /* VQDMULL */
4938 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4939 break;
4940 default:
4941 abort();
4943 neon_store_reg64(cpu_V0, rd + pass);
4944 } else if (op == 4 || op == 6) {
4945 /* Narrowing operation. */
4946 tmp = new_tmp();
4947 if (u) {
4948 switch (size) {
4949 case 0:
4950 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4951 break;
4952 case 1:
4953 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4954 break;
4955 case 2:
4956 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4957 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4958 break;
4959 default: abort();
4961 } else {
4962 switch (size) {
4963 case 0:
4964 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4965 break;
4966 case 1:
4967 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4968 break;
4969 case 2:
4970 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4971 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4972 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4973 break;
4974 default: abort();
4977 if (pass == 0) {
4978 tmp3 = tmp;
4979 } else {
4980 neon_store_reg(rd, 0, tmp3);
4981 neon_store_reg(rd, 1, tmp);
4983 } else {
4984 /* Write back the result. */
4985 neon_store_reg64(cpu_V0, rd + pass);
4988 } else {
4989 /* Two registers and a scalar. */
4990 switch (op) {
4991 case 0: /* Integer VMLA scalar */
4992 case 1: /* Float VMLA scalar */
4993 case 4: /* Integer VMLS scalar */
4994 case 5: /* Floating point VMLS scalar */
4995 case 8: /* Integer VMUL scalar */
4996 case 9: /* Floating point VMUL scalar */
4997 case 12: /* VQDMULH scalar */
4998 case 13: /* VQRDMULH scalar */
4999 gen_neon_get_scalar(size, rm);
5000 gen_neon_movl_scratch_T0(0);
5001 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5002 if (pass != 0)
5003 gen_neon_movl_T0_scratch(0);
5004 NEON_GET_REG(T1, rn, pass);
5005 if (op == 12) {
5006 if (size == 1) {
5007 gen_helper_neon_qdmulh_s16(CPU_T0E01);
5008 } else {
5009 gen_helper_neon_qdmulh_s32(CPU_T0E01);
5011 } else if (op == 13) {
5012 if (size == 1) {
5013 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
5014 } else {
5015 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
5017 } else if (op & 1) {
5018 gen_helper_neon_mul_f32(CPU_T001);
5019 } else {
5020 switch (size) {
5021 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5022 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
5023 case 2: gen_op_mul_T0_T1(); break;
5024 default: return 1;
5027 if (op < 8) {
5028 /* Accumulate. */
5029 NEON_GET_REG(T1, rd, pass);
5030 switch (op) {
5031 case 0:
5032 gen_neon_add(size);
5033 break;
5034 case 1:
5035 gen_helper_neon_add_f32(CPU_T001);
5036 break;
5037 case 4:
5038 gen_neon_rsb(size);
5039 break;
5040 case 5:
5041 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
5042 break;
5043 default:
5044 abort();
5047 NEON_SET_REG(T0, rd, pass);
5049 break;
5050 case 2: /* VMLAL sclar */
5051 case 3: /* VQDMLAL scalar */
5052 case 6: /* VMLSL scalar */
5053 case 7: /* VQDMLSL scalar */
5054 case 10: /* VMULL scalar */
5055 case 11: /* VQDMULL scalar */
5056 if (size == 0 && (op == 3 || op == 7 || op == 11))
5057 return 1;
5059 gen_neon_get_scalar(size, rm);
5060 NEON_GET_REG(T1, rn, 1);
5062 for (pass = 0; pass < 2; pass++) {
5063 if (pass == 0) {
5064 tmp = neon_load_reg(rn, 0);
5065 } else {
5066 tmp = new_tmp();
5067 tcg_gen_mov_i32(tmp, cpu_T[1]);
5069 tmp2 = new_tmp();
5070 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5071 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5072 if (op == 6 || op == 7) {
5073 gen_neon_negl(cpu_V0, size);
5075 if (op != 11) {
5076 neon_load_reg64(cpu_V1, rd + pass);
5078 switch (op) {
5079 case 2: case 6:
5080 gen_neon_addl(size);
5081 break;
5082 case 3: case 7:
5083 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5084 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5085 break;
5086 case 10:
5087 /* no-op */
5088 break;
5089 case 11:
5090 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5091 break;
5092 default:
5093 abort();
5095 neon_store_reg64(cpu_V0, rd + pass);
5097 break;
5098 default: /* 14 and 15 are RESERVED */
5099 return 1;
5102 } else { /* size == 3 */
5103 if (!u) {
5104 /* Extract. */
5105 imm = (insn >> 8) & 0xf;
5106 count = q + 1;
5108 if (imm > 7 && !q)
5109 return 1;
5111 if (imm == 0) {
5112 neon_load_reg64(cpu_V0, rn);
5113 if (q) {
5114 neon_load_reg64(cpu_V1, rn + 1);
5116 } else if (imm == 8) {
5117 neon_load_reg64(cpu_V0, rn + 1);
5118 if (q) {
5119 neon_load_reg64(cpu_V1, rm);
5121 } else if (q) {
5122 tmp64 = tcg_temp_new_i64();
5123 if (imm < 8) {
5124 neon_load_reg64(cpu_V0, rn);
5125 neon_load_reg64(tmp64, rn + 1);
5126 } else {
5127 neon_load_reg64(cpu_V0, rn + 1);
5128 neon_load_reg64(tmp64, rm);
5130 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5131 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5132 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5133 if (imm < 8) {
5134 neon_load_reg64(cpu_V1, rm);
5135 } else {
5136 neon_load_reg64(cpu_V1, rm + 1);
5137 imm -= 8;
5139 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5140 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5141 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5142 } else {
5143 /* BUGFIX */
5144 neon_load_reg64(cpu_V0, rn);
5145 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5146 neon_load_reg64(cpu_V1, rm);
5147 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5148 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5150 neon_store_reg64(cpu_V0, rd);
5151 if (q) {
5152 neon_store_reg64(cpu_V1, rd + 1);
5154 } else if ((insn & (1 << 11)) == 0) {
5155 /* Two register misc. */
5156 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5157 size = (insn >> 18) & 3;
5158 switch (op) {
5159 case 0: /* VREV64 */
5160 if (size == 3)
5161 return 1;
5162 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5163 NEON_GET_REG(T0, rm, pass * 2);
5164 NEON_GET_REG(T1, rm, pass * 2 + 1);
5165 switch (size) {
5166 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5167 case 1: gen_swap_half(cpu_T[0]); break;
5168 case 2: /* no-op */ break;
5169 default: abort();
5171 NEON_SET_REG(T0, rd, pass * 2 + 1);
5172 if (size == 2) {
5173 NEON_SET_REG(T1, rd, pass * 2);
5174 } else {
5175 gen_op_movl_T0_T1();
5176 switch (size) {
5177 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5178 case 1: gen_swap_half(cpu_T[0]); break;
5179 default: abort();
5181 NEON_SET_REG(T0, rd, pass * 2);
5184 break;
5185 case 4: case 5: /* VPADDL */
5186 case 12: case 13: /* VPADAL */
5187 if (size == 3)
5188 return 1;
5189 for (pass = 0; pass < q + 1; pass++) {
5190 tmp = neon_load_reg(rm, pass * 2);
5191 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5192 tmp = neon_load_reg(rm, pass * 2 + 1);
5193 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5194 switch (size) {
5195 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5196 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5197 case 2: tcg_gen_add_i64(CPU_V001); break;
5198 default: abort();
5200 if (op >= 12) {
5201 /* Accumulate. */
5202 neon_load_reg64(cpu_V1, rd + pass);
5203 gen_neon_addl(size);
5205 neon_store_reg64(cpu_V0, rd + pass);
5207 break;
5208 case 33: /* VTRN */
5209 if (size == 2) {
5210 for (n = 0; n < (q ? 4 : 2); n += 2) {
5211 NEON_GET_REG(T0, rm, n);
5212 NEON_GET_REG(T1, rd, n + 1);
5213 NEON_SET_REG(T1, rm, n);
5214 NEON_SET_REG(T0, rd, n + 1);
5216 } else {
5217 goto elementwise;
5219 break;
5220 case 34: /* VUZP */
5221 /* Reg Before After
5222 Rd A3 A2 A1 A0 B2 B0 A2 A0
5223 Rm B3 B2 B1 B0 B3 B1 A3 A1
5225 if (size == 3)
5226 return 1;
5227 gen_neon_unzip(rd, q, 0, size);
5228 gen_neon_unzip(rm, q, 4, size);
5229 if (q) {
5230 static int unzip_order_q[8] =
5231 {0, 2, 4, 6, 1, 3, 5, 7};
5232 for (n = 0; n < 8; n++) {
5233 int reg = (n < 4) ? rd : rm;
5234 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5235 NEON_SET_REG(T0, reg, n % 4);
5237 } else {
5238 static int unzip_order[4] =
5239 {0, 4, 1, 5};
5240 for (n = 0; n < 4; n++) {
5241 int reg = (n < 2) ? rd : rm;
5242 gen_neon_movl_T0_scratch(unzip_order[n]);
5243 NEON_SET_REG(T0, reg, n % 2);
5246 break;
5247 case 35: /* VZIP */
5248 /* Reg Before After
5249 Rd A3 A2 A1 A0 B1 A1 B0 A0
5250 Rm B3 B2 B1 B0 B3 A3 B2 A2
5252 if (size == 3)
5253 return 1;
5254 count = (q ? 4 : 2);
5255 for (n = 0; n < count; n++) {
5256 NEON_GET_REG(T0, rd, n);
5257 NEON_GET_REG(T1, rd, n);
5258 switch (size) {
5259 case 0: gen_helper_neon_zip_u8(); break;
5260 case 1: gen_helper_neon_zip_u16(); break;
5261 case 2: /* no-op */; break;
5262 default: abort();
5264 gen_neon_movl_scratch_T0(n * 2);
5265 gen_neon_movl_scratch_T1(n * 2 + 1);
5267 for (n = 0; n < count * 2; n++) {
5268 int reg = (n < count) ? rd : rm;
5269 gen_neon_movl_T0_scratch(n);
5270 NEON_SET_REG(T0, reg, n % count);
5272 break;
5273 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5274 if (size == 3)
5275 return 1;
5276 TCGV_UNUSED(tmp2);
5277 for (pass = 0; pass < 2; pass++) {
5278 neon_load_reg64(cpu_V0, rm + pass);
5279 tmp = new_tmp();
5280 if (op == 36 && q == 0) {
5281 gen_neon_narrow(size, tmp, cpu_V0);
5282 } else if (q) {
5283 gen_neon_narrow_satu(size, tmp, cpu_V0);
5284 } else {
5285 gen_neon_narrow_sats(size, tmp, cpu_V0);
5287 if (pass == 0) {
5288 tmp2 = tmp;
5289 } else {
5290 neon_store_reg(rd, 0, tmp2);
5291 neon_store_reg(rd, 1, tmp);
5294 break;
5295 case 38: /* VSHLL */
5296 if (q || size == 3)
5297 return 1;
5298 tmp = neon_load_reg(rm, 0);
5299 tmp2 = neon_load_reg(rm, 1);
5300 for (pass = 0; pass < 2; pass++) {
5301 if (pass == 1)
5302 tmp = tmp2;
5303 gen_neon_widen(cpu_V0, tmp, size, 1);
5304 neon_store_reg64(cpu_V0, rd + pass);
5306 break;
5307 default:
5308 elementwise:
5309 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5310 if (op == 30 || op == 31 || op >= 58) {
5311 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5312 neon_reg_offset(rm, pass));
5313 } else {
5314 NEON_GET_REG(T0, rm, pass);
5316 switch (op) {
5317 case 1: /* VREV32 */
5318 switch (size) {
5319 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5320 case 1: gen_swap_half(cpu_T[0]); break;
5321 default: return 1;
5323 break;
5324 case 2: /* VREV16 */
5325 if (size != 0)
5326 return 1;
5327 gen_rev16(cpu_T[0]);
5328 break;
5329 case 8: /* CLS */
5330 switch (size) {
5331 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5332 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5333 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
5334 default: return 1;
5336 break;
5337 case 9: /* CLZ */
5338 switch (size) {
5339 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5340 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
5341 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
5342 default: return 1;
5344 break;
5345 case 10: /* CNT */
5346 if (size != 0)
5347 return 1;
5348 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
5349 break;
5350 case 11: /* VNOT */
5351 if (size != 0)
5352 return 1;
5353 gen_op_notl_T0();
5354 break;
5355 case 14: /* VQABS */
5356 switch (size) {
5357 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5358 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5359 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5360 default: return 1;
5362 break;
5363 case 15: /* VQNEG */
5364 switch (size) {
5365 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5366 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5367 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5368 default: return 1;
5370 break;
5371 case 16: case 19: /* VCGT #0, VCLE #0 */
5372 gen_op_movl_T1_im(0);
5373 switch(size) {
5374 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5375 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5376 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
5377 default: return 1;
5379 if (op == 19)
5380 gen_op_notl_T0();
5381 break;
5382 case 17: case 20: /* VCGE #0, VCLT #0 */
5383 gen_op_movl_T1_im(0);
5384 switch(size) {
5385 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5386 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5387 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
5388 default: return 1;
5390 if (op == 20)
5391 gen_op_notl_T0();
5392 break;
5393 case 18: /* VCEQ #0 */
5394 gen_op_movl_T1_im(0);
5395 switch(size) {
5396 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5397 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5398 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
5399 default: return 1;
5401 break;
5402 case 22: /* VABS */
5403 switch(size) {
5404 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5405 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5406 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
5407 default: return 1;
5409 break;
5410 case 23: /* VNEG */
5411 gen_op_movl_T1_im(0);
5412 if (size == 3)
5413 return 1;
5414 gen_neon_rsb(size);
5415 break;
5416 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5417 gen_op_movl_T1_im(0);
5418 gen_helper_neon_cgt_f32(CPU_T001);
5419 if (op == 27)
5420 gen_op_notl_T0();
5421 break;
5422 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5423 gen_op_movl_T1_im(0);
5424 gen_helper_neon_cge_f32(CPU_T001);
5425 if (op == 28)
5426 gen_op_notl_T0();
5427 break;
5428 case 26: /* Float VCEQ #0 */
5429 gen_op_movl_T1_im(0);
5430 gen_helper_neon_ceq_f32(CPU_T001);
5431 break;
5432 case 30: /* Float VABS */
5433 gen_vfp_abs(0);
5434 break;
5435 case 31: /* Float VNEG */
5436 gen_vfp_neg(0);
5437 break;
5438 case 32: /* VSWP */
5439 NEON_GET_REG(T1, rd, pass);
5440 NEON_SET_REG(T1, rm, pass);
5441 break;
5442 case 33: /* VTRN */
5443 NEON_GET_REG(T1, rd, pass);
5444 switch (size) {
5445 case 0: gen_helper_neon_trn_u8(); break;
5446 case 1: gen_helper_neon_trn_u16(); break;
5447 case 2: abort();
5448 default: return 1;
5450 NEON_SET_REG(T1, rm, pass);
5451 break;
5452 case 56: /* Integer VRECPE */
5453 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
5454 break;
5455 case 57: /* Integer VRSQRTE */
5456 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
5457 break;
5458 case 58: /* Float VRECPE */
5459 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5460 break;
5461 case 59: /* Float VRSQRTE */
5462 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5463 break;
5464 case 60: /* VCVT.F32.S32 */
5465 gen_vfp_tosiz(0);
5466 break;
5467 case 61: /* VCVT.F32.U32 */
5468 gen_vfp_touiz(0);
5469 break;
5470 case 62: /* VCVT.S32.F32 */
5471 gen_vfp_sito(0);
5472 break;
5473 case 63: /* VCVT.U32.F32 */
5474 gen_vfp_uito(0);
5475 break;
5476 default:
5477 /* Reserved: 21, 29, 39-56 */
5478 return 1;
5480 if (op == 30 || op == 31 || op >= 58) {
5481 tcg_gen_st_f32(cpu_F0s, cpu_env,
5482 neon_reg_offset(rd, pass));
5483 } else {
5484 NEON_SET_REG(T0, rd, pass);
5487 break;
5489 } else if ((insn & (1 << 10)) == 0) {
5490 /* VTBL, VTBX. */
5491 n = ((insn >> 5) & 0x18) + 8;
5492 if (insn & (1 << 6)) {
5493 tmp = neon_load_reg(rd, 0);
5494 } else {
5495 tmp = new_tmp();
5496 tcg_gen_movi_i32(tmp, 0);
5498 tmp2 = neon_load_reg(rm, 0);
5499 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5500 tcg_const_i32(n));
5501 dead_tmp(tmp);
5502 if (insn & (1 << 6)) {
5503 tmp = neon_load_reg(rd, 1);
5504 } else {
5505 tmp = new_tmp();
5506 tcg_gen_movi_i32(tmp, 0);
5508 tmp3 = neon_load_reg(rm, 1);
5509 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5510 tcg_const_i32(n));
5511 neon_store_reg(rd, 0, tmp2);
5512 neon_store_reg(rd, 1, tmp3);
5513 dead_tmp(tmp);
5514 } else if ((insn & 0x380) == 0) {
5515 /* VDUP */
5516 if (insn & (1 << 19)) {
5517 NEON_SET_REG(T0, rm, 1);
5518 } else {
5519 NEON_SET_REG(T0, rm, 0);
5521 if (insn & (1 << 16)) {
5522 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
5523 } else if (insn & (1 << 17)) {
5524 if ((insn >> 18) & 1)
5525 gen_neon_dup_high16(cpu_T[0]);
5526 else
5527 gen_neon_dup_low16(cpu_T[0]);
5529 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5530 NEON_SET_REG(T0, rd, pass);
5532 } else {
5533 return 1;
5537 return 0;
5540 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5542 int crn = (insn >> 16) & 0xf;
5543 int crm = insn & 0xf;
5544 int op1 = (insn >> 21) & 7;
5545 int op2 = (insn >> 5) & 7;
5546 int rt = (insn >> 12) & 0xf;
5547 TCGv tmp;
5549 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5550 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5551 /* TEECR */
5552 if (IS_USER(s))
5553 return 1;
5554 tmp = load_cpu_field(teecr);
5555 store_reg(s, rt, tmp);
5556 return 0;
5558 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5559 /* TEEHBR */
5560 if (IS_USER(s) && (env->teecr & 1))
5561 return 1;
5562 tmp = load_cpu_field(teehbr);
5563 store_reg(s, rt, tmp);
5564 return 0;
5567 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5568 op1, crn, crm, op2);
5569 return 1;
5572 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5574 int crn = (insn >> 16) & 0xf;
5575 int crm = insn & 0xf;
5576 int op1 = (insn >> 21) & 7;
5577 int op2 = (insn >> 5) & 7;
5578 int rt = (insn >> 12) & 0xf;
5579 TCGv tmp;
5581 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5582 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5583 /* TEECR */
5584 if (IS_USER(s))
5585 return 1;
5586 tmp = load_reg(s, rt);
5587 gen_helper_set_teecr(cpu_env, tmp);
5588 dead_tmp(tmp);
5589 return 0;
5591 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5592 /* TEEHBR */
5593 if (IS_USER(s) && (env->teecr & 1))
5594 return 1;
5595 tmp = load_reg(s, rt);
5596 store_cpu_field(tmp, teehbr);
5597 return 0;
5600 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5601 op1, crn, crm, op2);
5602 return 1;
5605 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5607 int cpnum;
5609 cpnum = (insn >> 8) & 0xf;
5610 if (arm_feature(env, ARM_FEATURE_XSCALE)
5611 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5612 return 1;
5614 switch (cpnum) {
5615 case 0:
5616 case 1:
5617 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5618 return disas_iwmmxt_insn(env, s, insn);
5619 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5620 return disas_dsp_insn(env, s, insn);
5622 return 1;
5623 case 10:
5624 case 11:
5625 return disas_vfp_insn (env, s, insn);
5626 case 14:
5627 /* Coprocessors 7-15 are architecturally reserved by ARM.
5628 Unfortunately Intel decided to ignore this. */
5629 if (arm_feature(env, ARM_FEATURE_XSCALE))
5630 goto board;
5631 if (insn & (1 << 20))
5632 return disas_cp14_read(env, s, insn);
5633 else
5634 return disas_cp14_write(env, s, insn);
5635 case 15:
5636 return disas_cp15_insn (env, s, insn);
5637 default:
5638 board:
5639 /* Unknown coprocessor. See if the board has hooked it. */
5640 return disas_cp_insn (env, s, insn);
5645 /* Store a 64-bit value to a register pair. Clobbers val. */
5646 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5648 TCGv tmp;
5649 tmp = new_tmp();
5650 tcg_gen_trunc_i64_i32(tmp, val);
5651 store_reg(s, rlow, tmp);
5652 tmp = new_tmp();
5653 tcg_gen_shri_i64(val, val, 32);
5654 tcg_gen_trunc_i64_i32(tmp, val);
5655 store_reg(s, rhigh, tmp);
5658 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5659 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5661 TCGv_i64 tmp;
5662 TCGv tmp2;
5664 /* Load value and extend to 64 bits. */
5665 tmp = tcg_temp_new_i64();
5666 tmp2 = load_reg(s, rlow);
5667 tcg_gen_extu_i32_i64(tmp, tmp2);
5668 dead_tmp(tmp2);
5669 tcg_gen_add_i64(val, val, tmp);
5672 /* load and add a 64-bit value from a register pair. */
5673 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5675 TCGv_i64 tmp;
5676 TCGv tmpl;
5677 TCGv tmph;
5679 /* Load 64-bit value rd:rn. */
5680 tmpl = load_reg(s, rlow);
5681 tmph = load_reg(s, rhigh);
5682 tmp = tcg_temp_new_i64();
5683 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5684 dead_tmp(tmpl);
5685 dead_tmp(tmph);
5686 tcg_gen_add_i64(val, val, tmp);
5689 /* Set N and Z flags from a 64-bit value. */
5690 static void gen_logicq_cc(TCGv_i64 val)
5692 TCGv tmp = new_tmp();
5693 gen_helper_logicq_cc(tmp, val);
5694 gen_logic_CC(tmp);
5695 dead_tmp(tmp);
5698 static void disas_arm_insn(CPUState * env, DisasContext *s)
5700 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
5701 TCGv tmp;
5702 TCGv tmp2;
5703 TCGv tmp3;
5704 TCGv addr;
5705 TCGv_i64 tmp64;
5707 insn = ldl_code(s->pc);
5708 s->pc += 4;
5710 /* M variants do not implement ARM mode. */
5711 if (IS_M(env))
5712 goto illegal_op;
5713 cond = insn >> 28;
5714 if (cond == 0xf){
5715 /* Unconditional instructions. */
5716 if (((insn >> 25) & 7) == 1) {
5717 /* NEON Data processing. */
5718 if (!arm_feature(env, ARM_FEATURE_NEON))
5719 goto illegal_op;
5721 if (disas_neon_data_insn(env, s, insn))
5722 goto illegal_op;
5723 return;
5725 if ((insn & 0x0f100000) == 0x04000000) {
5726 /* NEON load/store. */
5727 if (!arm_feature(env, ARM_FEATURE_NEON))
5728 goto illegal_op;
5730 if (disas_neon_ls_insn(env, s, insn))
5731 goto illegal_op;
5732 return;
5734 if ((insn & 0x0d70f000) == 0x0550f000)
5735 return; /* PLD */
5736 else if ((insn & 0x0ffffdff) == 0x01010000) {
5737 ARCH(6);
5738 /* setend */
5739 if (insn & (1 << 9)) {
5740 /* BE8 mode not implemented. */
5741 goto illegal_op;
5743 return;
5744 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5745 switch ((insn >> 4) & 0xf) {
5746 case 1: /* clrex */
5747 ARCH(6K);
5748 gen_helper_clrex(cpu_env);
5749 return;
5750 case 4: /* dsb */
5751 case 5: /* dmb */
5752 case 6: /* isb */
5753 ARCH(7);
5754 /* We don't emulate caches so these are a no-op. */
5755 return;
5756 default:
5757 goto illegal_op;
5759 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5760 /* srs */
5761 int32_t offset;
5762 if (IS_USER(s))
5763 goto illegal_op;
5764 ARCH(6);
5765 op1 = (insn & 0x1f);
5766 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5767 addr = load_reg(s, 13);
5768 } else {
5769 addr = new_tmp();
5770 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
5772 i = (insn >> 23) & 3;
5773 switch (i) {
5774 case 0: offset = -4; break; /* DA */
5775 case 1: offset = 0; break; /* IA */
5776 case 2: offset = -8; break; /* DB */
5777 case 3: offset = 4; break; /* IB */
5778 default: abort();
5780 if (offset)
5781 tcg_gen_addi_i32(addr, addr, offset);
5782 tmp = load_reg(s, 14);
5783 gen_st32(tmp, addr, 0);
5784 tmp = load_cpu_field(spsr);
5785 tcg_gen_addi_i32(addr, addr, 4);
5786 gen_st32(tmp, addr, 0);
5787 if (insn & (1 << 21)) {
5788 /* Base writeback. */
5789 switch (i) {
5790 case 0: offset = -8; break;
5791 case 1: offset = 4; break;
5792 case 2: offset = -4; break;
5793 case 3: offset = 0; break;
5794 default: abort();
5796 if (offset)
5797 tcg_gen_addi_i32(addr, addr, offset);
5798 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5799 store_reg(s, 13, addr);
5800 } else {
5801 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), addr);
5802 dead_tmp(addr);
5804 } else {
5805 dead_tmp(addr);
5807 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5808 /* rfe */
5809 int32_t offset;
5810 if (IS_USER(s))
5811 goto illegal_op;
5812 ARCH(6);
5813 rn = (insn >> 16) & 0xf;
5814 addr = load_reg(s, rn);
5815 i = (insn >> 23) & 3;
5816 switch (i) {
5817 case 0: offset = -4; break; /* DA */
5818 case 1: offset = 0; break; /* IA */
5819 case 2: offset = -8; break; /* DB */
5820 case 3: offset = 4; break; /* IB */
5821 default: abort();
5823 if (offset)
5824 tcg_gen_addi_i32(addr, addr, offset);
5825 /* Load PC into tmp and CPSR into tmp2. */
5826 tmp = gen_ld32(addr, 0);
5827 tcg_gen_addi_i32(addr, addr, 4);
5828 tmp2 = gen_ld32(addr, 0);
5829 if (insn & (1 << 21)) {
5830 /* Base writeback. */
5831 switch (i) {
5832 case 0: offset = -8; break;
5833 case 1: offset = 4; break;
5834 case 2: offset = -4; break;
5835 case 3: offset = 0; break;
5836 default: abort();
5838 if (offset)
5839 tcg_gen_addi_i32(addr, addr, offset);
5840 store_reg(s, rn, addr);
5841 } else {
5842 dead_tmp(addr);
5844 gen_rfe(s, tmp, tmp2);
5845 return;
5846 } else if ((insn & 0x0e000000) == 0x0a000000) {
5847 /* branch link and change to thumb (blx <offset>) */
5848 int32_t offset;
5850 val = (uint32_t)s->pc;
5851 tmp = new_tmp();
5852 tcg_gen_movi_i32(tmp, val);
5853 store_reg(s, 14, tmp);
5854 /* Sign-extend the 24-bit offset */
5855 offset = (((int32_t)insn) << 8) >> 8;
5856 /* offset * 4 + bit24 * 2 + (thumb bit) */
5857 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5858 /* pipeline offset */
5859 val += 4;
5860 gen_bx_im(s, val);
5861 return;
5862 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5863 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5864 /* iWMMXt register transfer. */
5865 if (env->cp15.c15_cpar & (1 << 1))
5866 if (!disas_iwmmxt_insn(env, s, insn))
5867 return;
5869 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5870 /* Coprocessor double register transfer. */
5871 } else if ((insn & 0x0f000010) == 0x0e000010) {
5872 /* Additional coprocessor register transfer. */
5873 } else if ((insn & 0x0ff10020) == 0x01000000) {
5874 uint32_t mask;
5875 uint32_t val;
5876 /* cps (privileged) */
5877 if (IS_USER(s))
5878 return;
5879 mask = val = 0;
5880 if (insn & (1 << 19)) {
5881 if (insn & (1 << 8))
5882 mask |= CPSR_A;
5883 if (insn & (1 << 7))
5884 mask |= CPSR_I;
5885 if (insn & (1 << 6))
5886 mask |= CPSR_F;
5887 if (insn & (1 << 18))
5888 val |= mask;
5890 if (insn & (1 << 17)) {
5891 mask |= CPSR_M;
5892 val |= (insn & 0x1f);
5894 if (mask) {
5895 gen_set_psr_im(s, mask, 0, val);
5897 return;
5899 goto illegal_op;
5901 if (cond != 0xe) {
5902 /* if not always execute, we generate a conditional jump to
5903 next instruction */
5904 s->condlabel = gen_new_label();
5905 gen_test_cc(cond ^ 1, s->condlabel);
5906 s->condjmp = 1;
5908 if ((insn & 0x0f900000) == 0x03000000) {
5909 if ((insn & (1 << 21)) == 0) {
5910 ARCH(6T2);
5911 rd = (insn >> 12) & 0xf;
5912 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5913 if ((insn & (1 << 22)) == 0) {
5914 /* MOVW */
5915 tmp = new_tmp();
5916 tcg_gen_movi_i32(tmp, val);
5917 } else {
5918 /* MOVT */
5919 tmp = load_reg(s, rd);
5920 tcg_gen_ext16u_i32(tmp, tmp);
5921 tcg_gen_ori_i32(tmp, tmp, val << 16);
5923 store_reg(s, rd, tmp);
5924 } else {
5925 if (((insn >> 12) & 0xf) != 0xf)
5926 goto illegal_op;
5927 if (((insn >> 16) & 0xf) == 0) {
5928 gen_nop_hint(s, insn & 0xff);
5929 } else {
5930 /* CPSR = immediate */
5931 val = insn & 0xff;
5932 shift = ((insn >> 8) & 0xf) * 2;
5933 if (shift)
5934 val = (val >> shift) | (val << (32 - shift));
5935 i = ((insn & (1 << 22)) != 0);
5936 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
5937 goto illegal_op;
5940 } else if ((insn & 0x0f900000) == 0x01000000
5941 && (insn & 0x00000090) != 0x00000090) {
5942 /* miscellaneous instructions */
5943 op1 = (insn >> 21) & 3;
5944 sh = (insn >> 4) & 0xf;
5945 rm = insn & 0xf;
5946 switch (sh) {
5947 case 0x0: /* move program status register */
5948 if (op1 & 1) {
5949 /* PSR = reg */
5950 tmp = load_reg(s, rm);
5951 i = ((op1 & 2) != 0);
5952 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
5953 goto illegal_op;
5954 } else {
5955 /* reg = PSR */
5956 rd = (insn >> 12) & 0xf;
5957 if (op1 & 2) {
5958 if (IS_USER(s))
5959 goto illegal_op;
5960 tmp = load_cpu_field(spsr);
5961 } else {
5962 tmp = new_tmp();
5963 gen_helper_cpsr_read(tmp);
5965 store_reg(s, rd, tmp);
5967 break;
5968 case 0x1:
5969 if (op1 == 1) {
5970 /* branch/exchange thumb (bx). */
5971 tmp = load_reg(s, rm);
5972 gen_bx(s, tmp);
5973 } else if (op1 == 3) {
5974 /* clz */
5975 rd = (insn >> 12) & 0xf;
5976 tmp = load_reg(s, rm);
5977 gen_helper_clz(tmp, tmp);
5978 store_reg(s, rd, tmp);
5979 } else {
5980 goto illegal_op;
5982 break;
5983 case 0x2:
5984 if (op1 == 1) {
5985 ARCH(5J); /* bxj */
5986 /* Trivial implementation equivalent to bx. */
5987 tmp = load_reg(s, rm);
5988 gen_bx(s, tmp);
5989 } else {
5990 goto illegal_op;
5992 break;
5993 case 0x3:
5994 if (op1 != 1)
5995 goto illegal_op;
5997 /* branch link/exchange thumb (blx) */
5998 tmp = load_reg(s, rm);
5999 tmp2 = new_tmp();
6000 tcg_gen_movi_i32(tmp2, s->pc);
6001 store_reg(s, 14, tmp2);
6002 gen_bx(s, tmp);
6003 break;
6004 case 0x5: /* saturating add/subtract */
6005 rd = (insn >> 12) & 0xf;
6006 rn = (insn >> 16) & 0xf;
6007 tmp = load_reg(s, rm);
6008 tmp2 = load_reg(s, rn);
6009 if (op1 & 2)
6010 gen_helper_double_saturate(tmp2, tmp2);
6011 if (op1 & 1)
6012 gen_helper_sub_saturate(tmp, tmp, tmp2);
6013 else
6014 gen_helper_add_saturate(tmp, tmp, tmp2);
6015 dead_tmp(tmp2);
6016 store_reg(s, rd, tmp);
6017 break;
6018 case 7: /* bkpt */
6019 gen_set_condexec(s);
6020 gen_set_pc_im(s->pc - 4);
6021 gen_exception(EXCP_BKPT);
6022 s->is_jmp = DISAS_JUMP;
6023 break;
6024 case 0x8: /* signed multiply */
6025 case 0xa:
6026 case 0xc:
6027 case 0xe:
6028 rs = (insn >> 8) & 0xf;
6029 rn = (insn >> 12) & 0xf;
6030 rd = (insn >> 16) & 0xf;
6031 if (op1 == 1) {
6032 /* (32 * 16) >> 16 */
6033 tmp = load_reg(s, rm);
6034 tmp2 = load_reg(s, rs);
6035 if (sh & 4)
6036 tcg_gen_sari_i32(tmp2, tmp2, 16);
6037 else
6038 gen_sxth(tmp2);
6039 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6040 tcg_gen_shri_i64(tmp64, tmp64, 16);
6041 tmp = new_tmp();
6042 tcg_gen_trunc_i64_i32(tmp, tmp64);
6043 if ((sh & 2) == 0) {
6044 tmp2 = load_reg(s, rn);
6045 gen_helper_add_setq(tmp, tmp, tmp2);
6046 dead_tmp(tmp2);
6048 store_reg(s, rd, tmp);
6049 } else {
6050 /* 16 * 16 */
6051 tmp = load_reg(s, rm);
6052 tmp2 = load_reg(s, rs);
6053 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6054 dead_tmp(tmp2);
6055 if (op1 == 2) {
6056 tmp64 = tcg_temp_new_i64();
6057 tcg_gen_ext_i32_i64(tmp64, tmp);
6058 dead_tmp(tmp);
6059 gen_addq(s, tmp64, rn, rd);
6060 gen_storeq_reg(s, rn, rd, tmp64);
6061 } else {
6062 if (op1 == 0) {
6063 tmp2 = load_reg(s, rn);
6064 gen_helper_add_setq(tmp, tmp, tmp2);
6065 dead_tmp(tmp2);
6067 store_reg(s, rd, tmp);
6070 break;
6071 default:
6072 goto illegal_op;
6074 } else if (((insn & 0x0e000000) == 0 &&
6075 (insn & 0x00000090) != 0x90) ||
6076 ((insn & 0x0e000000) == (1 << 25))) {
6077 int set_cc, logic_cc, shiftop;
6079 op1 = (insn >> 21) & 0xf;
6080 set_cc = (insn >> 20) & 1;
6081 logic_cc = table_logic_cc[op1] & set_cc;
6083 /* data processing instruction */
6084 if (insn & (1 << 25)) {
6085 /* immediate operand */
6086 val = insn & 0xff;
6087 shift = ((insn >> 8) & 0xf) * 2;
6088 if (shift) {
6089 val = (val >> shift) | (val << (32 - shift));
6091 tmp2 = new_tmp();
6092 tcg_gen_movi_i32(tmp2, val);
6093 if (logic_cc && shift) {
6094 gen_set_CF_bit31(tmp2);
6096 } else {
6097 /* register */
6098 rm = (insn) & 0xf;
6099 tmp2 = load_reg(s, rm);
6100 shiftop = (insn >> 5) & 3;
6101 if (!(insn & (1 << 4))) {
6102 shift = (insn >> 7) & 0x1f;
6103 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6104 } else {
6105 rs = (insn >> 8) & 0xf;
6106 tmp = load_reg(s, rs);
6107 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6110 if (op1 != 0x0f && op1 != 0x0d) {
6111 rn = (insn >> 16) & 0xf;
6112 tmp = load_reg(s, rn);
6113 } else {
6114 TCGV_UNUSED(tmp);
6116 rd = (insn >> 12) & 0xf;
6117 switch(op1) {
6118 case 0x00:
6119 tcg_gen_and_i32(tmp, tmp, tmp2);
6120 if (logic_cc) {
6121 gen_logic_CC(tmp);
6123 store_reg_bx(env, s, rd, tmp);
6124 break;
6125 case 0x01:
6126 tcg_gen_xor_i32(tmp, tmp, tmp2);
6127 if (logic_cc) {
6128 gen_logic_CC(tmp);
6130 store_reg_bx(env, s, rd, tmp);
6131 break;
6132 case 0x02:
6133 if (set_cc && rd == 15) {
6134 /* SUBS r15, ... is used for exception return. */
6135 if (IS_USER(s)) {
6136 goto illegal_op;
6138 gen_helper_sub_cc(tmp, tmp, tmp2);
6139 gen_exception_return(s, tmp);
6140 } else {
6141 if (set_cc) {
6142 gen_helper_sub_cc(tmp, tmp, tmp2);
6143 } else {
6144 tcg_gen_sub_i32(tmp, tmp, tmp2);
6146 store_reg_bx(env, s, rd, tmp);
6148 break;
6149 case 0x03:
6150 if (set_cc) {
6151 gen_helper_sub_cc(tmp, tmp2, tmp);
6152 } else {
6153 tcg_gen_sub_i32(tmp, tmp2, tmp);
6155 store_reg_bx(env, s, rd, tmp);
6156 break;
6157 case 0x04:
6158 if (set_cc) {
6159 gen_helper_add_cc(tmp, tmp, tmp2);
6160 } else {
6161 tcg_gen_add_i32(tmp, tmp, tmp2);
6163 store_reg_bx(env, s, rd, tmp);
6164 break;
6165 case 0x05:
6166 if (set_cc) {
6167 gen_helper_adc_cc(tmp, tmp, tmp2);
6168 } else {
6169 gen_add_carry(tmp, tmp, tmp2);
6171 store_reg_bx(env, s, rd, tmp);
6172 break;
6173 case 0x06:
6174 if (set_cc) {
6175 gen_helper_sbc_cc(tmp, tmp, tmp2);
6176 } else {
6177 gen_sub_carry(tmp, tmp, tmp2);
6179 store_reg_bx(env, s, rd, tmp);
6180 break;
6181 case 0x07:
6182 if (set_cc) {
6183 gen_helper_sbc_cc(tmp, tmp2, tmp);
6184 } else {
6185 gen_sub_carry(tmp, tmp2, tmp);
6187 store_reg_bx(env, s, rd, tmp);
6188 break;
6189 case 0x08:
6190 if (set_cc) {
6191 tcg_gen_and_i32(tmp, tmp, tmp2);
6192 gen_logic_CC(tmp);
6194 dead_tmp(tmp);
6195 break;
6196 case 0x09:
6197 if (set_cc) {
6198 tcg_gen_xor_i32(tmp, tmp, tmp2);
6199 gen_logic_CC(tmp);
6201 dead_tmp(tmp);
6202 break;
6203 case 0x0a:
6204 if (set_cc) {
6205 gen_helper_sub_cc(tmp, tmp, tmp2);
6207 dead_tmp(tmp);
6208 break;
6209 case 0x0b:
6210 if (set_cc) {
6211 gen_helper_add_cc(tmp, tmp, tmp2);
6213 dead_tmp(tmp);
6214 break;
6215 case 0x0c:
6216 tcg_gen_or_i32(tmp, tmp, tmp2);
6217 if (logic_cc) {
6218 gen_logic_CC(tmp);
6220 store_reg_bx(env, s, rd, tmp);
6221 break;
6222 case 0x0d:
6223 if (logic_cc && rd == 15) {
6224 /* MOVS r15, ... is used for exception return. */
6225 if (IS_USER(s)) {
6226 goto illegal_op;
6228 gen_exception_return(s, tmp2);
6229 } else {
6230 if (logic_cc) {
6231 gen_logic_CC(tmp2);
6233 store_reg_bx(env, s, rd, tmp2);
6235 break;
6236 case 0x0e:
6237 tcg_gen_bic_i32(tmp, tmp, tmp2);
6238 if (logic_cc) {
6239 gen_logic_CC(tmp);
6241 store_reg_bx(env, s, rd, tmp);
6242 break;
6243 default:
6244 case 0x0f:
6245 tcg_gen_not_i32(tmp2, tmp2);
6246 if (logic_cc) {
6247 gen_logic_CC(tmp2);
6249 store_reg_bx(env, s, rd, tmp2);
6250 break;
6252 if (op1 != 0x0f && op1 != 0x0d) {
6253 dead_tmp(tmp2);
6255 } else {
6256 /* other instructions */
6257 op1 = (insn >> 24) & 0xf;
6258 switch(op1) {
6259 case 0x0:
6260 case 0x1:
6261 /* multiplies, extra load/stores */
6262 sh = (insn >> 5) & 3;
6263 if (sh == 0) {
6264 if (op1 == 0x0) {
6265 rd = (insn >> 16) & 0xf;
6266 rn = (insn >> 12) & 0xf;
6267 rs = (insn >> 8) & 0xf;
6268 rm = (insn) & 0xf;
6269 op1 = (insn >> 20) & 0xf;
6270 switch (op1) {
6271 case 0: case 1: case 2: case 3: case 6:
6272 /* 32 bit mul */
6273 tmp = load_reg(s, rs);
6274 tmp2 = load_reg(s, rm);
6275 tcg_gen_mul_i32(tmp, tmp, tmp2);
6276 dead_tmp(tmp2);
6277 if (insn & (1 << 22)) {
6278 /* Subtract (mls) */
6279 ARCH(6T2);
6280 tmp2 = load_reg(s, rn);
6281 tcg_gen_sub_i32(tmp, tmp2, tmp);
6282 dead_tmp(tmp2);
6283 } else if (insn & (1 << 21)) {
6284 /* Add */
6285 tmp2 = load_reg(s, rn);
6286 tcg_gen_add_i32(tmp, tmp, tmp2);
6287 dead_tmp(tmp2);
6289 if (insn & (1 << 20))
6290 gen_logic_CC(tmp);
6291 store_reg(s, rd, tmp);
6292 break;
6293 default:
6294 /* 64 bit mul */
6295 tmp = load_reg(s, rs);
6296 tmp2 = load_reg(s, rm);
6297 if (insn & (1 << 22))
6298 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6299 else
6300 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6301 if (insn & (1 << 21)) /* mult accumulate */
6302 gen_addq(s, tmp64, rn, rd);
6303 if (!(insn & (1 << 23))) { /* double accumulate */
6304 ARCH(6);
6305 gen_addq_lo(s, tmp64, rn);
6306 gen_addq_lo(s, tmp64, rd);
6308 if (insn & (1 << 20))
6309 gen_logicq_cc(tmp64);
6310 gen_storeq_reg(s, rn, rd, tmp64);
6311 break;
6313 } else {
6314 rn = (insn >> 16) & 0xf;
6315 rd = (insn >> 12) & 0xf;
6316 if (insn & (1 << 23)) {
6317 /* load/store exclusive */
6318 op1 = (insn >> 21) & 0x3;
6319 if (op1)
6320 ARCH(6K);
6321 else
6322 ARCH(6);
6323 gen_movl_T1_reg(s, rn);
6324 addr = cpu_T[1];
6325 if (insn & (1 << 20)) {
6326 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6327 switch (op1) {
6328 case 0: /* ldrex */
6329 tmp = gen_ld32(addr, IS_USER(s));
6330 break;
6331 case 1: /* ldrexd */
6332 tmp = gen_ld32(addr, IS_USER(s));
6333 store_reg(s, rd, tmp);
6334 tcg_gen_addi_i32(addr, addr, 4);
6335 tmp = gen_ld32(addr, IS_USER(s));
6336 rd++;
6337 break;
6338 case 2: /* ldrexb */
6339 tmp = gen_ld8u(addr, IS_USER(s));
6340 break;
6341 case 3: /* ldrexh */
6342 tmp = gen_ld16u(addr, IS_USER(s));
6343 break;
6344 default:
6345 abort();
6347 store_reg(s, rd, tmp);
6348 } else {
6349 int label = gen_new_label();
6350 rm = insn & 0xf;
6351 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
6352 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6353 0, label);
6354 tmp = load_reg(s,rm);
6355 switch (op1) {
6356 case 0: /* strex */
6357 gen_st32(tmp, addr, IS_USER(s));
6358 break;
6359 case 1: /* strexd */
6360 gen_st32(tmp, addr, IS_USER(s));
6361 tcg_gen_addi_i32(addr, addr, 4);
6362 tmp = load_reg(s, rm + 1);
6363 gen_st32(tmp, addr, IS_USER(s));
6364 break;
6365 case 2: /* strexb */
6366 gen_st8(tmp, addr, IS_USER(s));
6367 break;
6368 case 3: /* strexh */
6369 gen_st16(tmp, addr, IS_USER(s));
6370 break;
6371 default:
6372 abort();
6374 gen_set_label(label);
6375 gen_movl_reg_T0(s, rd);
6377 } else {
6378 /* SWP instruction */
6379 rm = (insn) & 0xf;
6381 /* ??? This is not really atomic. However we know
6382 we never have multiple CPUs running in parallel,
6383 so it is good enough. */
6384 addr = load_reg(s, rn);
6385 tmp = load_reg(s, rm);
6386 if (insn & (1 << 22)) {
6387 tmp2 = gen_ld8u(addr, IS_USER(s));
6388 gen_st8(tmp, addr, IS_USER(s));
6389 } else {
6390 tmp2 = gen_ld32(addr, IS_USER(s));
6391 gen_st32(tmp, addr, IS_USER(s));
6393 dead_tmp(addr);
6394 store_reg(s, rd, tmp2);
6397 } else {
6398 int address_offset;
6399 int load;
6400 /* Misc load/store */
6401 rn = (insn >> 16) & 0xf;
6402 rd = (insn >> 12) & 0xf;
6403 addr = load_reg(s, rn);
6404 if (insn & (1 << 24))
6405 gen_add_datah_offset(s, insn, 0, addr);
6406 address_offset = 0;
6407 if (insn & (1 << 20)) {
6408 /* load */
6409 switch(sh) {
6410 case 1:
6411 tmp = gen_ld16u(addr, IS_USER(s));
6412 break;
6413 case 2:
6414 tmp = gen_ld8s(addr, IS_USER(s));
6415 break;
6416 default:
6417 case 3:
6418 tmp = gen_ld16s(addr, IS_USER(s));
6419 break;
6421 load = 1;
6422 } else if (sh & 2) {
6423 /* doubleword */
6424 if (sh & 1) {
6425 /* store */
6426 tmp = load_reg(s, rd);
6427 gen_st32(tmp, addr, IS_USER(s));
6428 tcg_gen_addi_i32(addr, addr, 4);
6429 tmp = load_reg(s, rd + 1);
6430 gen_st32(tmp, addr, IS_USER(s));
6431 load = 0;
6432 } else {
6433 /* load */
6434 tmp = gen_ld32(addr, IS_USER(s));
6435 store_reg(s, rd, tmp);
6436 tcg_gen_addi_i32(addr, addr, 4);
6437 tmp = gen_ld32(addr, IS_USER(s));
6438 rd++;
6439 load = 1;
6441 address_offset = -4;
6442 } else {
6443 /* store */
6444 tmp = load_reg(s, rd);
6445 gen_st16(tmp, addr, IS_USER(s));
6446 load = 0;
6448 /* Perform base writeback before the loaded value to
6449 ensure correct behavior with overlapping index registers.
6450 ldrd with base writeback is is undefined if the
6451 destination and index registers overlap. */
6452 if (!(insn & (1 << 24))) {
6453 gen_add_datah_offset(s, insn, address_offset, addr);
6454 store_reg(s, rn, addr);
6455 } else if (insn & (1 << 21)) {
6456 if (address_offset)
6457 tcg_gen_addi_i32(addr, addr, address_offset);
6458 store_reg(s, rn, addr);
6459 } else {
6460 dead_tmp(addr);
6462 if (load) {
6463 /* Complete the load. */
6464 store_reg(s, rd, tmp);
6467 break;
6468 case 0x4:
6469 case 0x5:
6470 goto do_ldst;
6471 case 0x6:
6472 case 0x7:
6473 if (insn & (1 << 4)) {
6474 ARCH(6);
6475 /* Armv6 Media instructions. */
6476 rm = insn & 0xf;
6477 rn = (insn >> 16) & 0xf;
6478 rd = (insn >> 12) & 0xf;
6479 rs = (insn >> 8) & 0xf;
6480 switch ((insn >> 23) & 3) {
6481 case 0: /* Parallel add/subtract. */
6482 op1 = (insn >> 20) & 7;
6483 tmp = load_reg(s, rn);
6484 tmp2 = load_reg(s, rm);
6485 sh = (insn >> 5) & 7;
6486 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6487 goto illegal_op;
6488 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6489 dead_tmp(tmp2);
6490 store_reg(s, rd, tmp);
6491 break;
6492 case 1:
6493 if ((insn & 0x00700020) == 0) {
6494 /* Halfword pack. */
6495 tmp = load_reg(s, rn);
6496 tmp2 = load_reg(s, rm);
6497 shift = (insn >> 7) & 0x1f;
6498 if (insn & (1 << 6)) {
6499 /* pkhtb */
6500 if (shift == 0)
6501 shift = 31;
6502 tcg_gen_sari_i32(tmp2, tmp2, shift);
6503 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6504 tcg_gen_ext16u_i32(tmp2, tmp2);
6505 } else {
6506 /* pkhbt */
6507 if (shift)
6508 tcg_gen_shli_i32(tmp2, tmp2, shift);
6509 tcg_gen_ext16u_i32(tmp, tmp);
6510 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6512 tcg_gen_or_i32(tmp, tmp, tmp2);
6513 dead_tmp(tmp2);
6514 store_reg(s, rd, tmp);
6515 } else if ((insn & 0x00200020) == 0x00200000) {
6516 /* [us]sat */
6517 tmp = load_reg(s, rm);
6518 shift = (insn >> 7) & 0x1f;
6519 if (insn & (1 << 6)) {
6520 if (shift == 0)
6521 shift = 31;
6522 tcg_gen_sari_i32(tmp, tmp, shift);
6523 } else {
6524 tcg_gen_shli_i32(tmp, tmp, shift);
6526 sh = (insn >> 16) & 0x1f;
6527 if (sh != 0) {
6528 if (insn & (1 << 22))
6529 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
6530 else
6531 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
6533 store_reg(s, rd, tmp);
6534 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6535 /* [us]sat16 */
6536 tmp = load_reg(s, rm);
6537 sh = (insn >> 16) & 0x1f;
6538 if (sh != 0) {
6539 if (insn & (1 << 22))
6540 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
6541 else
6542 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
6544 store_reg(s, rd, tmp);
6545 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6546 /* Select bytes. */
6547 tmp = load_reg(s, rn);
6548 tmp2 = load_reg(s, rm);
6549 tmp3 = new_tmp();
6550 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6551 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6552 dead_tmp(tmp3);
6553 dead_tmp(tmp2);
6554 store_reg(s, rd, tmp);
6555 } else if ((insn & 0x000003e0) == 0x00000060) {
6556 tmp = load_reg(s, rm);
6557 shift = (insn >> 10) & 3;
6558 /* ??? In many cases it's not neccessary to do a
6559 rotate, a shift is sufficient. */
6560 if (shift != 0)
6561 tcg_gen_rori_i32(tmp, tmp, shift * 8);
6562 op1 = (insn >> 20) & 7;
6563 switch (op1) {
6564 case 0: gen_sxtb16(tmp); break;
6565 case 2: gen_sxtb(tmp); break;
6566 case 3: gen_sxth(tmp); break;
6567 case 4: gen_uxtb16(tmp); break;
6568 case 6: gen_uxtb(tmp); break;
6569 case 7: gen_uxth(tmp); break;
6570 default: goto illegal_op;
6572 if (rn != 15) {
6573 tmp2 = load_reg(s, rn);
6574 if ((op1 & 3) == 0) {
6575 gen_add16(tmp, tmp2);
6576 } else {
6577 tcg_gen_add_i32(tmp, tmp, tmp2);
6578 dead_tmp(tmp2);
6581 store_reg(s, rd, tmp);
6582 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6583 /* rev */
6584 tmp = load_reg(s, rm);
6585 if (insn & (1 << 22)) {
6586 if (insn & (1 << 7)) {
6587 gen_revsh(tmp);
6588 } else {
6589 ARCH(6T2);
6590 gen_helper_rbit(tmp, tmp);
6592 } else {
6593 if (insn & (1 << 7))
6594 gen_rev16(tmp);
6595 else
6596 tcg_gen_bswap32_i32(tmp, tmp);
6598 store_reg(s, rd, tmp);
6599 } else {
6600 goto illegal_op;
6602 break;
6603 case 2: /* Multiplies (Type 3). */
6604 tmp = load_reg(s, rm);
6605 tmp2 = load_reg(s, rs);
6606 if (insn & (1 << 20)) {
6607 /* Signed multiply most significant [accumulate]. */
6608 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6609 if (insn & (1 << 5))
6610 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6611 tcg_gen_shri_i64(tmp64, tmp64, 32);
6612 tmp = new_tmp();
6613 tcg_gen_trunc_i64_i32(tmp, tmp64);
6614 if (rd != 15) {
6615 tmp2 = load_reg(s, rd);
6616 if (insn & (1 << 6)) {
6617 tcg_gen_sub_i32(tmp, tmp, tmp2);
6618 } else {
6619 tcg_gen_add_i32(tmp, tmp, tmp2);
6621 dead_tmp(tmp2);
6623 store_reg(s, rn, tmp);
6624 } else {
6625 if (insn & (1 << 5))
6626 gen_swap_half(tmp2);
6627 gen_smul_dual(tmp, tmp2);
6628 /* This addition cannot overflow. */
6629 if (insn & (1 << 6)) {
6630 tcg_gen_sub_i32(tmp, tmp, tmp2);
6631 } else {
6632 tcg_gen_add_i32(tmp, tmp, tmp2);
6634 dead_tmp(tmp2);
6635 if (insn & (1 << 22)) {
6636 /* smlald, smlsld */
6637 tmp64 = tcg_temp_new_i64();
6638 tcg_gen_ext_i32_i64(tmp64, tmp);
6639 dead_tmp(tmp);
6640 gen_addq(s, tmp64, rd, rn);
6641 gen_storeq_reg(s, rd, rn, tmp64);
6642 } else {
6643 /* smuad, smusd, smlad, smlsd */
6644 if (rd != 15)
6646 tmp2 = load_reg(s, rd);
6647 gen_helper_add_setq(tmp, tmp, tmp2);
6648 dead_tmp(tmp2);
6650 store_reg(s, rn, tmp);
6653 break;
6654 case 3:
6655 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6656 switch (op1) {
6657 case 0: /* Unsigned sum of absolute differences. */
6658 ARCH(6);
6659 tmp = load_reg(s, rm);
6660 tmp2 = load_reg(s, rs);
6661 gen_helper_usad8(tmp, tmp, tmp2);
6662 dead_tmp(tmp2);
6663 if (rd != 15) {
6664 tmp2 = load_reg(s, rd);
6665 tcg_gen_add_i32(tmp, tmp, tmp2);
6666 dead_tmp(tmp2);
6668 store_reg(s, rn, tmp);
6669 break;
6670 case 0x20: case 0x24: case 0x28: case 0x2c:
6671 /* Bitfield insert/clear. */
6672 ARCH(6T2);
6673 shift = (insn >> 7) & 0x1f;
6674 i = (insn >> 16) & 0x1f;
6675 i = i + 1 - shift;
6676 if (rm == 15) {
6677 tmp = new_tmp();
6678 tcg_gen_movi_i32(tmp, 0);
6679 } else {
6680 tmp = load_reg(s, rm);
6682 if (i != 32) {
6683 tmp2 = load_reg(s, rd);
6684 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
6685 dead_tmp(tmp2);
6687 store_reg(s, rd, tmp);
6688 break;
6689 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6690 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6691 ARCH(6T2);
6692 tmp = load_reg(s, rm);
6693 shift = (insn >> 7) & 0x1f;
6694 i = ((insn >> 16) & 0x1f) + 1;
6695 if (shift + i > 32)
6696 goto illegal_op;
6697 if (i < 32) {
6698 if (op1 & 0x20) {
6699 gen_ubfx(tmp, shift, (1u << i) - 1);
6700 } else {
6701 gen_sbfx(tmp, shift, i);
6704 store_reg(s, rd, tmp);
6705 break;
6706 default:
6707 goto illegal_op;
6709 break;
6711 break;
6713 do_ldst:
6714 /* Check for undefined extension instructions
6715 * per the ARM Bible IE:
6716 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6718 sh = (0xf << 20) | (0xf << 4);
6719 if (op1 == 0x7 && ((insn & sh) == sh))
6721 goto illegal_op;
6723 /* load/store byte/word */
6724 rn = (insn >> 16) & 0xf;
6725 rd = (insn >> 12) & 0xf;
6726 tmp2 = load_reg(s, rn);
6727 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6728 if (insn & (1 << 24))
6729 gen_add_data_offset(s, insn, tmp2);
6730 if (insn & (1 << 20)) {
6731 /* load */
6732 if (insn & (1 << 22)) {
6733 tmp = gen_ld8u(tmp2, i);
6734 } else {
6735 tmp = gen_ld32(tmp2, i);
6737 } else {
6738 /* store */
6739 tmp = load_reg(s, rd);
6740 if (insn & (1 << 22))
6741 gen_st8(tmp, tmp2, i);
6742 else
6743 gen_st32(tmp, tmp2, i);
6745 if (!(insn & (1 << 24))) {
6746 gen_add_data_offset(s, insn, tmp2);
6747 store_reg(s, rn, tmp2);
6748 } else if (insn & (1 << 21)) {
6749 store_reg(s, rn, tmp2);
6750 } else {
6751 dead_tmp(tmp2);
6753 if (insn & (1 << 20)) {
6754 /* Complete the load. */
6755 if (rd == 15)
6756 gen_bx(s, tmp);
6757 else
6758 store_reg(s, rd, tmp);
6760 break;
6761 case 0x08:
6762 case 0x09:
6764 int j, n, user, loaded_base;
6765 TCGv loaded_var;
6766 /* load/store multiple words */
6767 /* XXX: store correct base if write back */
6768 user = 0;
6769 if (insn & (1 << 22)) {
6770 if (IS_USER(s))
6771 goto illegal_op; /* only usable in supervisor mode */
6773 if ((insn & (1 << 15)) == 0)
6774 user = 1;
6776 rn = (insn >> 16) & 0xf;
6777 addr = load_reg(s, rn);
6779 /* compute total size */
6780 loaded_base = 0;
6781 TCGV_UNUSED(loaded_var);
6782 n = 0;
6783 for(i=0;i<16;i++) {
6784 if (insn & (1 << i))
6785 n++;
6787 /* XXX: test invalid n == 0 case ? */
6788 if (insn & (1 << 23)) {
6789 if (insn & (1 << 24)) {
6790 /* pre increment */
6791 tcg_gen_addi_i32(addr, addr, 4);
6792 } else {
6793 /* post increment */
6795 } else {
6796 if (insn & (1 << 24)) {
6797 /* pre decrement */
6798 tcg_gen_addi_i32(addr, addr, -(n * 4));
6799 } else {
6800 /* post decrement */
6801 if (n != 1)
6802 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6805 j = 0;
6806 for(i=0;i<16;i++) {
6807 if (insn & (1 << i)) {
6808 if (insn & (1 << 20)) {
6809 /* load */
6810 tmp = gen_ld32(addr, IS_USER(s));
6811 if (i == 15) {
6812 gen_bx(s, tmp);
6813 } else if (user) {
6814 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6815 dead_tmp(tmp);
6816 } else if (i == rn) {
6817 loaded_var = tmp;
6818 loaded_base = 1;
6819 } else {
6820 store_reg(s, i, tmp);
6822 } else {
6823 /* store */
6824 if (i == 15) {
6825 /* special case: r15 = PC + 8 */
6826 val = (long)s->pc + 4;
6827 tmp = new_tmp();
6828 tcg_gen_movi_i32(tmp, val);
6829 } else if (user) {
6830 tmp = new_tmp();
6831 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
6832 } else {
6833 tmp = load_reg(s, i);
6835 gen_st32(tmp, addr, IS_USER(s));
6837 j++;
6838 /* no need to add after the last transfer */
6839 if (j != n)
6840 tcg_gen_addi_i32(addr, addr, 4);
6843 if (insn & (1 << 21)) {
6844 /* write back */
6845 if (insn & (1 << 23)) {
6846 if (insn & (1 << 24)) {
6847 /* pre increment */
6848 } else {
6849 /* post increment */
6850 tcg_gen_addi_i32(addr, addr, 4);
6852 } else {
6853 if (insn & (1 << 24)) {
6854 /* pre decrement */
6855 if (n != 1)
6856 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6857 } else {
6858 /* post decrement */
6859 tcg_gen_addi_i32(addr, addr, -(n * 4));
6862 store_reg(s, rn, addr);
6863 } else {
6864 dead_tmp(addr);
6866 if (loaded_base) {
6867 store_reg(s, rn, loaded_var);
6869 if ((insn & (1 << 22)) && !user) {
6870 /* Restore CPSR from SPSR. */
6871 tmp = load_cpu_field(spsr);
6872 gen_set_cpsr(tmp, 0xffffffff);
6873 dead_tmp(tmp);
6874 s->is_jmp = DISAS_UPDATE;
6877 break;
6878 case 0xa:
6879 case 0xb:
6881 int32_t offset;
6883 /* branch (and link) */
6884 val = (int32_t)s->pc;
6885 if (insn & (1 << 24)) {
6886 tmp = new_tmp();
6887 tcg_gen_movi_i32(tmp, val);
6888 store_reg(s, 14, tmp);
6890 offset = (((int32_t)insn << 8) >> 8);
6891 val += (offset << 2) + 4;
6892 gen_jmp(s, val);
6894 break;
6895 case 0xc:
6896 case 0xd:
6897 case 0xe:
6898 /* Coprocessor. */
6899 if (disas_coproc_insn(env, s, insn))
6900 goto illegal_op;
6901 break;
6902 case 0xf:
6903 /* swi */
6904 gen_set_pc_im(s->pc);
6905 s->is_jmp = DISAS_SWI;
6906 break;
6907 default:
6908 illegal_op:
6909 gen_set_condexec(s);
6910 gen_set_pc_im(s->pc - 4);
6911 gen_exception(EXCP_UDEF);
6912 s->is_jmp = DISAS_JUMP;
6913 break;
6918 /* Return true if this is a Thumb-2 logical op. */
6919 static int
6920 thumb2_logic_op(int op)
6922 return (op < 8);
6925 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6926 then set condition code flags based on the result of the operation.
6927 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6928 to the high bit of T1.
6929 Returns zero if the opcode is valid. */
6931 static int
6932 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6934 int logic_cc;
6936 logic_cc = 0;
6937 switch (op) {
6938 case 0: /* and */
6939 gen_op_andl_T0_T1();
6940 logic_cc = conds;
6941 break;
6942 case 1: /* bic */
6943 gen_op_bicl_T0_T1();
6944 logic_cc = conds;
6945 break;
6946 case 2: /* orr */
6947 gen_op_orl_T0_T1();
6948 logic_cc = conds;
6949 break;
6950 case 3: /* orn */
6951 gen_op_notl_T1();
6952 gen_op_orl_T0_T1();
6953 logic_cc = conds;
6954 break;
6955 case 4: /* eor */
6956 gen_op_xorl_T0_T1();
6957 logic_cc = conds;
6958 break;
6959 case 8: /* add */
6960 if (conds)
6961 gen_op_addl_T0_T1_cc();
6962 else
6963 gen_op_addl_T0_T1();
6964 break;
6965 case 10: /* adc */
6966 if (conds)
6967 gen_op_adcl_T0_T1_cc();
6968 else
6969 gen_adc_T0_T1();
6970 break;
6971 case 11: /* sbc */
6972 if (conds)
6973 gen_op_sbcl_T0_T1_cc();
6974 else
6975 gen_sbc_T0_T1();
6976 break;
6977 case 13: /* sub */
6978 if (conds)
6979 gen_op_subl_T0_T1_cc();
6980 else
6981 gen_op_subl_T0_T1();
6982 break;
6983 case 14: /* rsb */
6984 if (conds)
6985 gen_op_rsbl_T0_T1_cc();
6986 else
6987 gen_op_rsbl_T0_T1();
6988 break;
6989 default: /* 5, 6, 7, 9, 12, 15. */
6990 return 1;
6992 if (logic_cc) {
6993 gen_op_logic_T0_cc();
6994 if (shifter_out)
6995 gen_set_CF_bit31(cpu_T[1]);
6997 return 0;
7000 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7001 is not legal. */
7002 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7004 uint32_t insn, imm, shift, offset;
7005 uint32_t rd, rn, rm, rs;
7006 TCGv tmp;
7007 TCGv tmp2;
7008 TCGv tmp3;
7009 TCGv addr;
7010 TCGv_i64 tmp64;
7011 int op;
7012 int shiftop;
7013 int conds;
7014 int logic_cc;
7016 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7017 || arm_feature (env, ARM_FEATURE_M))) {
7018 /* Thumb-1 cores may need to treat bl and blx as a pair of
7019 16-bit instructions to get correct prefetch abort behavior. */
7020 insn = insn_hw1;
7021 if ((insn & (1 << 12)) == 0) {
7022 /* Second half of blx. */
7023 offset = ((insn & 0x7ff) << 1);
7024 tmp = load_reg(s, 14);
7025 tcg_gen_addi_i32(tmp, tmp, offset);
7026 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7028 tmp2 = new_tmp();
7029 tcg_gen_movi_i32(tmp2, s->pc | 1);
7030 store_reg(s, 14, tmp2);
7031 gen_bx(s, tmp);
7032 return 0;
7034 if (insn & (1 << 11)) {
7035 /* Second half of bl. */
7036 offset = ((insn & 0x7ff) << 1) | 1;
7037 tmp = load_reg(s, 14);
7038 tcg_gen_addi_i32(tmp, tmp, offset);
7040 tmp2 = new_tmp();
7041 tcg_gen_movi_i32(tmp2, s->pc | 1);
7042 store_reg(s, 14, tmp2);
7043 gen_bx(s, tmp);
7044 return 0;
7046 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7047 /* Instruction spans a page boundary. Implement it as two
7048 16-bit instructions in case the second half causes an
7049 prefetch abort. */
7050 offset = ((int32_t)insn << 21) >> 9;
7051 gen_op_movl_T0_im(s->pc + 2 + offset);
7052 gen_movl_reg_T0(s, 14);
7053 return 0;
7055 /* Fall through to 32-bit decode. */
7058 insn = lduw_code(s->pc);
7059 s->pc += 2;
7060 insn |= (uint32_t)insn_hw1 << 16;
7062 if ((insn & 0xf800e800) != 0xf000e800) {
7063 ARCH(6T2);
7066 rn = (insn >> 16) & 0xf;
7067 rs = (insn >> 12) & 0xf;
7068 rd = (insn >> 8) & 0xf;
7069 rm = insn & 0xf;
7070 switch ((insn >> 25) & 0xf) {
7071 case 0: case 1: case 2: case 3:
7072 /* 16-bit instructions. Should never happen. */
7073 abort();
7074 case 4:
7075 if (insn & (1 << 22)) {
7076 /* Other load/store, table branch. */
7077 if (insn & 0x01200000) {
7078 /* Load/store doubleword. */
7079 if (rn == 15) {
7080 addr = new_tmp();
7081 tcg_gen_movi_i32(addr, s->pc & ~3);
7082 } else {
7083 addr = load_reg(s, rn);
7085 offset = (insn & 0xff) * 4;
7086 if ((insn & (1 << 23)) == 0)
7087 offset = -offset;
7088 if (insn & (1 << 24)) {
7089 tcg_gen_addi_i32(addr, addr, offset);
7090 offset = 0;
7092 if (insn & (1 << 20)) {
7093 /* ldrd */
7094 tmp = gen_ld32(addr, IS_USER(s));
7095 store_reg(s, rs, tmp);
7096 tcg_gen_addi_i32(addr, addr, 4);
7097 tmp = gen_ld32(addr, IS_USER(s));
7098 store_reg(s, rd, tmp);
7099 } else {
7100 /* strd */
7101 tmp = load_reg(s, rs);
7102 gen_st32(tmp, addr, IS_USER(s));
7103 tcg_gen_addi_i32(addr, addr, 4);
7104 tmp = load_reg(s, rd);
7105 gen_st32(tmp, addr, IS_USER(s));
7107 if (insn & (1 << 21)) {
7108 /* Base writeback. */
7109 if (rn == 15)
7110 goto illegal_op;
7111 tcg_gen_addi_i32(addr, addr, offset - 4);
7112 store_reg(s, rn, addr);
7113 } else {
7114 dead_tmp(addr);
7116 } else if ((insn & (1 << 23)) == 0) {
7117 /* Load/store exclusive word. */
7118 gen_movl_T1_reg(s, rn);
7119 addr = cpu_T[1];
7120 if (insn & (1 << 20)) {
7121 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
7122 tmp = gen_ld32(addr, IS_USER(s));
7123 store_reg(s, rd, tmp);
7124 } else {
7125 int label = gen_new_label();
7126 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7127 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
7128 0, label);
7129 tmp = load_reg(s, rs);
7130 gen_st32(tmp, cpu_T[1], IS_USER(s));
7131 gen_set_label(label);
7132 gen_movl_reg_T0(s, rd);
7134 } else if ((insn & (1 << 6)) == 0) {
7135 /* Table Branch. */
7136 if (rn == 15) {
7137 addr = new_tmp();
7138 tcg_gen_movi_i32(addr, s->pc);
7139 } else {
7140 addr = load_reg(s, rn);
7142 tmp = load_reg(s, rm);
7143 tcg_gen_add_i32(addr, addr, tmp);
7144 if (insn & (1 << 4)) {
7145 /* tbh */
7146 tcg_gen_add_i32(addr, addr, tmp);
7147 dead_tmp(tmp);
7148 tmp = gen_ld16u(addr, IS_USER(s));
7149 } else { /* tbb */
7150 dead_tmp(tmp);
7151 tmp = gen_ld8u(addr, IS_USER(s));
7153 dead_tmp(addr);
7154 tcg_gen_shli_i32(tmp, tmp, 1);
7155 tcg_gen_addi_i32(tmp, tmp, s->pc);
7156 store_reg(s, 15, tmp);
7157 } else {
7158 /* Load/store exclusive byte/halfword/doubleword. */
7159 /* ??? These are not really atomic. However we know
7160 we never have multiple CPUs running in parallel,
7161 so it is good enough. */
7162 op = (insn >> 4) & 0x3;
7163 /* Must use a global reg for the address because we have
7164 a conditional branch in the store instruction. */
7165 gen_movl_T1_reg(s, rn);
7166 addr = cpu_T[1];
7167 if (insn & (1 << 20)) {
7168 gen_helper_mark_exclusive(cpu_env, addr);
7169 switch (op) {
7170 case 0:
7171 tmp = gen_ld8u(addr, IS_USER(s));
7172 break;
7173 case 1:
7174 tmp = gen_ld16u(addr, IS_USER(s));
7175 break;
7176 case 3:
7177 tmp = gen_ld32(addr, IS_USER(s));
7178 tcg_gen_addi_i32(addr, addr, 4);
7179 tmp2 = gen_ld32(addr, IS_USER(s));
7180 store_reg(s, rd, tmp2);
7181 break;
7182 default:
7183 goto illegal_op;
7185 store_reg(s, rs, tmp);
7186 } else {
7187 int label = gen_new_label();
7188 /* Must use a global that is not killed by the branch. */
7189 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7190 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
7191 tmp = load_reg(s, rs);
7192 switch (op) {
7193 case 0:
7194 gen_st8(tmp, addr, IS_USER(s));
7195 break;
7196 case 1:
7197 gen_st16(tmp, addr, IS_USER(s));
7198 break;
7199 case 3:
7200 gen_st32(tmp, addr, IS_USER(s));
7201 tcg_gen_addi_i32(addr, addr, 4);
7202 tmp = load_reg(s, rd);
7203 gen_st32(tmp, addr, IS_USER(s));
7204 break;
7205 default:
7206 goto illegal_op;
7208 gen_set_label(label);
7209 gen_movl_reg_T0(s, rm);
7212 } else {
7213 /* Load/store multiple, RFE, SRS. */
7214 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7215 /* Not available in user mode. */
7216 if (IS_USER(s))
7217 goto illegal_op;
7218 if (insn & (1 << 20)) {
7219 /* rfe */
7220 addr = load_reg(s, rn);
7221 if ((insn & (1 << 24)) == 0)
7222 tcg_gen_addi_i32(addr, addr, -8);
7223 /* Load PC into tmp and CPSR into tmp2. */
7224 tmp = gen_ld32(addr, 0);
7225 tcg_gen_addi_i32(addr, addr, 4);
7226 tmp2 = gen_ld32(addr, 0);
7227 if (insn & (1 << 21)) {
7228 /* Base writeback. */
7229 if (insn & (1 << 24)) {
7230 tcg_gen_addi_i32(addr, addr, 4);
7231 } else {
7232 tcg_gen_addi_i32(addr, addr, -4);
7234 store_reg(s, rn, addr);
7235 } else {
7236 dead_tmp(addr);
7238 gen_rfe(s, tmp, tmp2);
7239 } else {
7240 /* srs */
7241 op = (insn & 0x1f);
7242 if (op == (env->uncached_cpsr & CPSR_M)) {
7243 addr = load_reg(s, 13);
7244 } else {
7245 addr = new_tmp();
7246 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
7248 if ((insn & (1 << 24)) == 0) {
7249 tcg_gen_addi_i32(addr, addr, -8);
7251 tmp = load_reg(s, 14);
7252 gen_st32(tmp, addr, 0);
7253 tcg_gen_addi_i32(addr, addr, 4);
7254 tmp = new_tmp();
7255 gen_helper_cpsr_read(tmp);
7256 gen_st32(tmp, addr, 0);
7257 if (insn & (1 << 21)) {
7258 if ((insn & (1 << 24)) == 0) {
7259 tcg_gen_addi_i32(addr, addr, -4);
7260 } else {
7261 tcg_gen_addi_i32(addr, addr, 4);
7263 if (op == (env->uncached_cpsr & CPSR_M)) {
7264 store_reg(s, 13, addr);
7265 } else {
7266 gen_helper_set_r13_banked(cpu_env,
7267 tcg_const_i32(op), addr);
7269 } else {
7270 dead_tmp(addr);
7273 } else {
7274 int i;
7275 /* Load/store multiple. */
7276 addr = load_reg(s, rn);
7277 offset = 0;
7278 for (i = 0; i < 16; i++) {
7279 if (insn & (1 << i))
7280 offset += 4;
7282 if (insn & (1 << 24)) {
7283 tcg_gen_addi_i32(addr, addr, -offset);
7286 for (i = 0; i < 16; i++) {
7287 if ((insn & (1 << i)) == 0)
7288 continue;
7289 if (insn & (1 << 20)) {
7290 /* Load. */
7291 tmp = gen_ld32(addr, IS_USER(s));
7292 if (i == 15) {
7293 gen_bx(s, tmp);
7294 } else {
7295 store_reg(s, i, tmp);
7297 } else {
7298 /* Store. */
7299 tmp = load_reg(s, i);
7300 gen_st32(tmp, addr, IS_USER(s));
7302 tcg_gen_addi_i32(addr, addr, 4);
7304 if (insn & (1 << 21)) {
7305 /* Base register writeback. */
7306 if (insn & (1 << 24)) {
7307 tcg_gen_addi_i32(addr, addr, -offset);
7309 /* Fault if writeback register is in register list. */
7310 if (insn & (1 << rn))
7311 goto illegal_op;
7312 store_reg(s, rn, addr);
7313 } else {
7314 dead_tmp(addr);
7318 break;
7319 case 5: /* Data processing register constant shift. */
7320 if (rn == 15)
7321 gen_op_movl_T0_im(0);
7322 else
7323 gen_movl_T0_reg(s, rn);
7324 gen_movl_T1_reg(s, rm);
7325 op = (insn >> 21) & 0xf;
7326 shiftop = (insn >> 4) & 3;
7327 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7328 conds = (insn & (1 << 20)) != 0;
7329 logic_cc = (conds && thumb2_logic_op(op));
7330 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
7331 if (gen_thumb2_data_op(s, op, conds, 0))
7332 goto illegal_op;
7333 if (rd != 15)
7334 gen_movl_reg_T0(s, rd);
7335 break;
7336 case 13: /* Misc data processing. */
7337 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7338 if (op < 4 && (insn & 0xf000) != 0xf000)
7339 goto illegal_op;
7340 switch (op) {
7341 case 0: /* Register controlled shift. */
7342 tmp = load_reg(s, rn);
7343 tmp2 = load_reg(s, rm);
7344 if ((insn & 0x70) != 0)
7345 goto illegal_op;
7346 op = (insn >> 21) & 3;
7347 logic_cc = (insn & (1 << 20)) != 0;
7348 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7349 if (logic_cc)
7350 gen_logic_CC(tmp);
7351 store_reg_bx(env, s, rd, tmp);
7352 break;
7353 case 1: /* Sign/zero extend. */
7354 tmp = load_reg(s, rm);
7355 shift = (insn >> 4) & 3;
7356 /* ??? In many cases it's not neccessary to do a
7357 rotate, a shift is sufficient. */
7358 if (shift != 0)
7359 tcg_gen_rori_i32(tmp, tmp, shift * 8);
7360 op = (insn >> 20) & 7;
7361 switch (op) {
7362 case 0: gen_sxth(tmp); break;
7363 case 1: gen_uxth(tmp); break;
7364 case 2: gen_sxtb16(tmp); break;
7365 case 3: gen_uxtb16(tmp); break;
7366 case 4: gen_sxtb(tmp); break;
7367 case 5: gen_uxtb(tmp); break;
7368 default: goto illegal_op;
7370 if (rn != 15) {
7371 tmp2 = load_reg(s, rn);
7372 if ((op >> 1) == 1) {
7373 gen_add16(tmp, tmp2);
7374 } else {
7375 tcg_gen_add_i32(tmp, tmp, tmp2);
7376 dead_tmp(tmp2);
7379 store_reg(s, rd, tmp);
7380 break;
7381 case 2: /* SIMD add/subtract. */
7382 op = (insn >> 20) & 7;
7383 shift = (insn >> 4) & 7;
7384 if ((op & 3) == 3 || (shift & 3) == 3)
7385 goto illegal_op;
7386 tmp = load_reg(s, rn);
7387 tmp2 = load_reg(s, rm);
7388 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7389 dead_tmp(tmp2);
7390 store_reg(s, rd, tmp);
7391 break;
7392 case 3: /* Other data processing. */
7393 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7394 if (op < 4) {
7395 /* Saturating add/subtract. */
7396 tmp = load_reg(s, rn);
7397 tmp2 = load_reg(s, rm);
7398 if (op & 2)
7399 gen_helper_double_saturate(tmp, tmp);
7400 if (op & 1)
7401 gen_helper_sub_saturate(tmp, tmp2, tmp);
7402 else
7403 gen_helper_add_saturate(tmp, tmp, tmp2);
7404 dead_tmp(tmp2);
7405 } else {
7406 tmp = load_reg(s, rn);
7407 switch (op) {
7408 case 0x0a: /* rbit */
7409 gen_helper_rbit(tmp, tmp);
7410 break;
7411 case 0x08: /* rev */
7412 tcg_gen_bswap32_i32(tmp, tmp);
7413 break;
7414 case 0x09: /* rev16 */
7415 gen_rev16(tmp);
7416 break;
7417 case 0x0b: /* revsh */
7418 gen_revsh(tmp);
7419 break;
7420 case 0x10: /* sel */
7421 tmp2 = load_reg(s, rm);
7422 tmp3 = new_tmp();
7423 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7424 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7425 dead_tmp(tmp3);
7426 dead_tmp(tmp2);
7427 break;
7428 case 0x18: /* clz */
7429 gen_helper_clz(tmp, tmp);
7430 break;
7431 default:
7432 goto illegal_op;
7435 store_reg(s, rd, tmp);
7436 break;
7437 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7438 op = (insn >> 4) & 0xf;
7439 tmp = load_reg(s, rn);
7440 tmp2 = load_reg(s, rm);
7441 switch ((insn >> 20) & 7) {
7442 case 0: /* 32 x 32 -> 32 */
7443 tcg_gen_mul_i32(tmp, tmp, tmp2);
7444 dead_tmp(tmp2);
7445 if (rs != 15) {
7446 tmp2 = load_reg(s, rs);
7447 if (op)
7448 tcg_gen_sub_i32(tmp, tmp2, tmp);
7449 else
7450 tcg_gen_add_i32(tmp, tmp, tmp2);
7451 dead_tmp(tmp2);
7453 break;
7454 case 1: /* 16 x 16 -> 32 */
7455 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7456 dead_tmp(tmp2);
7457 if (rs != 15) {
7458 tmp2 = load_reg(s, rs);
7459 gen_helper_add_setq(tmp, tmp, tmp2);
7460 dead_tmp(tmp2);
7462 break;
7463 case 2: /* Dual multiply add. */
7464 case 4: /* Dual multiply subtract. */
7465 if (op)
7466 gen_swap_half(tmp2);
7467 gen_smul_dual(tmp, tmp2);
7468 /* This addition cannot overflow. */
7469 if (insn & (1 << 22)) {
7470 tcg_gen_sub_i32(tmp, tmp, tmp2);
7471 } else {
7472 tcg_gen_add_i32(tmp, tmp, tmp2);
7474 dead_tmp(tmp2);
7475 if (rs != 15)
7477 tmp2 = load_reg(s, rs);
7478 gen_helper_add_setq(tmp, tmp, tmp2);
7479 dead_tmp(tmp2);
7481 break;
7482 case 3: /* 32 * 16 -> 32msb */
7483 if (op)
7484 tcg_gen_sari_i32(tmp2, tmp2, 16);
7485 else
7486 gen_sxth(tmp2);
7487 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7488 tcg_gen_shri_i64(tmp64, tmp64, 16);
7489 tmp = new_tmp();
7490 tcg_gen_trunc_i64_i32(tmp, tmp64);
7491 if (rs != 15)
7493 tmp2 = load_reg(s, rs);
7494 gen_helper_add_setq(tmp, tmp, tmp2);
7495 dead_tmp(tmp2);
7497 break;
7498 case 5: case 6: /* 32 * 32 -> 32msb */
7499 gen_imull(tmp, tmp2);
7500 if (insn & (1 << 5)) {
7501 gen_roundqd(tmp, tmp2);
7502 dead_tmp(tmp2);
7503 } else {
7504 dead_tmp(tmp);
7505 tmp = tmp2;
7507 if (rs != 15) {
7508 tmp2 = load_reg(s, rs);
7509 if (insn & (1 << 21)) {
7510 tcg_gen_add_i32(tmp, tmp, tmp2);
7511 } else {
7512 tcg_gen_sub_i32(tmp, tmp2, tmp);
7514 dead_tmp(tmp2);
7516 break;
7517 case 7: /* Unsigned sum of absolute differences. */
7518 gen_helper_usad8(tmp, tmp, tmp2);
7519 dead_tmp(tmp2);
7520 if (rs != 15) {
7521 tmp2 = load_reg(s, rs);
7522 tcg_gen_add_i32(tmp, tmp, tmp2);
7523 dead_tmp(tmp2);
7525 break;
7527 store_reg(s, rd, tmp);
7528 break;
7529 case 6: case 7: /* 64-bit multiply, Divide. */
7530 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7531 tmp = load_reg(s, rn);
7532 tmp2 = load_reg(s, rm);
7533 if ((op & 0x50) == 0x10) {
7534 /* sdiv, udiv */
7535 if (!arm_feature(env, ARM_FEATURE_DIV))
7536 goto illegal_op;
7537 if (op & 0x20)
7538 gen_helper_udiv(tmp, tmp, tmp2);
7539 else
7540 gen_helper_sdiv(tmp, tmp, tmp2);
7541 dead_tmp(tmp2);
7542 store_reg(s, rd, tmp);
7543 } else if ((op & 0xe) == 0xc) {
7544 /* Dual multiply accumulate long. */
7545 if (op & 1)
7546 gen_swap_half(tmp2);
7547 gen_smul_dual(tmp, tmp2);
7548 if (op & 0x10) {
7549 tcg_gen_sub_i32(tmp, tmp, tmp2);
7550 } else {
7551 tcg_gen_add_i32(tmp, tmp, tmp2);
7553 dead_tmp(tmp2);
7554 /* BUGFIX */
7555 tmp64 = tcg_temp_new_i64();
7556 tcg_gen_ext_i32_i64(tmp64, tmp);
7557 dead_tmp(tmp);
7558 gen_addq(s, tmp64, rs, rd);
7559 gen_storeq_reg(s, rs, rd, tmp64);
7560 } else {
7561 if (op & 0x20) {
7562 /* Unsigned 64-bit multiply */
7563 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7564 } else {
7565 if (op & 8) {
7566 /* smlalxy */
7567 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7568 dead_tmp(tmp2);
7569 tmp64 = tcg_temp_new_i64();
7570 tcg_gen_ext_i32_i64(tmp64, tmp);
7571 dead_tmp(tmp);
7572 } else {
7573 /* Signed 64-bit multiply */
7574 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7577 if (op & 4) {
7578 /* umaal */
7579 gen_addq_lo(s, tmp64, rs);
7580 gen_addq_lo(s, tmp64, rd);
7581 } else if (op & 0x40) {
7582 /* 64-bit accumulate. */
7583 gen_addq(s, tmp64, rs, rd);
7585 gen_storeq_reg(s, rs, rd, tmp64);
7587 break;
7589 break;
7590 case 6: case 7: case 14: case 15:
7591 /* Coprocessor. */
7592 if (((insn >> 24) & 3) == 3) {
7593 /* Translate into the equivalent ARM encoding. */
7594 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7595 if (disas_neon_data_insn(env, s, insn))
7596 goto illegal_op;
7597 } else {
7598 if (insn & (1 << 28))
7599 goto illegal_op;
7600 if (disas_coproc_insn (env, s, insn))
7601 goto illegal_op;
7603 break;
7604 case 8: case 9: case 10: case 11:
7605 if (insn & (1 << 15)) {
7606 /* Branches, misc control. */
7607 if (insn & 0x5000) {
7608 /* Unconditional branch. */
7609 /* signextend(hw1[10:0]) -> offset[:12]. */
7610 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7611 /* hw1[10:0] -> offset[11:1]. */
7612 offset |= (insn & 0x7ff) << 1;
7613 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7614 offset[24:22] already have the same value because of the
7615 sign extension above. */
7616 offset ^= ((~insn) & (1 << 13)) << 10;
7617 offset ^= ((~insn) & (1 << 11)) << 11;
7619 if (insn & (1 << 14)) {
7620 /* Branch and link. */
7621 gen_op_movl_T1_im(s->pc | 1);
7622 gen_movl_reg_T1(s, 14);
7625 offset += s->pc;
7626 if (insn & (1 << 12)) {
7627 /* b/bl */
7628 gen_jmp(s, offset);
7629 } else {
7630 /* blx */
7631 offset &= ~(uint32_t)2;
7632 gen_bx_im(s, offset);
7634 } else if (((insn >> 23) & 7) == 7) {
7635 /* Misc control */
7636 if (insn & (1 << 13))
7637 goto illegal_op;
7639 if (insn & (1 << 26)) {
7640 /* Secure monitor call (v6Z) */
7641 goto illegal_op; /* not implemented. */
7642 } else {
7643 op = (insn >> 20) & 7;
7644 switch (op) {
7645 case 0: /* msr cpsr. */
7646 if (IS_M(env)) {
7647 tmp = load_reg(s, rn);
7648 addr = tcg_const_i32(insn & 0xff);
7649 gen_helper_v7m_msr(cpu_env, addr, tmp);
7650 gen_lookup_tb(s);
7651 break;
7653 /* fall through */
7654 case 1: /* msr spsr. */
7655 if (IS_M(env))
7656 goto illegal_op;
7657 tmp = load_reg(s, rn);
7658 if (gen_set_psr(s,
7659 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7660 op == 1, tmp))
7661 goto illegal_op;
7662 break;
7663 case 2: /* cps, nop-hint. */
7664 if (((insn >> 8) & 7) == 0) {
7665 gen_nop_hint(s, insn & 0xff);
7667 /* Implemented as NOP in user mode. */
7668 if (IS_USER(s))
7669 break;
7670 offset = 0;
7671 imm = 0;
7672 if (insn & (1 << 10)) {
7673 if (insn & (1 << 7))
7674 offset |= CPSR_A;
7675 if (insn & (1 << 6))
7676 offset |= CPSR_I;
7677 if (insn & (1 << 5))
7678 offset |= CPSR_F;
7679 if (insn & (1 << 9))
7680 imm = CPSR_A | CPSR_I | CPSR_F;
7682 if (insn & (1 << 8)) {
7683 offset |= 0x1f;
7684 imm |= (insn & 0x1f);
7686 if (offset) {
7687 gen_set_psr_im(s, offset, 0, imm);
7689 break;
7690 case 3: /* Special control operations. */
7691 op = (insn >> 4) & 0xf;
7692 switch (op) {
7693 case 2: /* clrex */
7694 gen_helper_clrex(cpu_env);
7695 break;
7696 case 4: /* dsb */
7697 case 5: /* dmb */
7698 case 6: /* isb */
7699 /* These execute as NOPs. */
7700 ARCH(7);
7701 break;
7702 default:
7703 goto illegal_op;
7705 break;
7706 case 4: /* bxj */
7707 /* Trivial implementation equivalent to bx. */
7708 tmp = load_reg(s, rn);
7709 gen_bx(s, tmp);
7710 break;
7711 case 5: /* Exception return. */
7712 /* Unpredictable in user mode. */
7713 goto illegal_op;
7714 case 6: /* mrs cpsr. */
7715 tmp = new_tmp();
7716 if (IS_M(env)) {
7717 addr = tcg_const_i32(insn & 0xff);
7718 gen_helper_v7m_mrs(tmp, cpu_env, addr);
7719 } else {
7720 gen_helper_cpsr_read(tmp);
7722 store_reg(s, rd, tmp);
7723 break;
7724 case 7: /* mrs spsr. */
7725 /* Not accessible in user mode. */
7726 if (IS_USER(s) || IS_M(env))
7727 goto illegal_op;
7728 tmp = load_cpu_field(spsr);
7729 store_reg(s, rd, tmp);
7730 break;
7733 } else {
7734 /* Conditional branch. */
7735 op = (insn >> 22) & 0xf;
7736 /* Generate a conditional jump to next instruction. */
7737 s->condlabel = gen_new_label();
7738 gen_test_cc(op ^ 1, s->condlabel);
7739 s->condjmp = 1;
7741 /* offset[11:1] = insn[10:0] */
7742 offset = (insn & 0x7ff) << 1;
7743 /* offset[17:12] = insn[21:16]. */
7744 offset |= (insn & 0x003f0000) >> 4;
7745 /* offset[31:20] = insn[26]. */
7746 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7747 /* offset[18] = insn[13]. */
7748 offset |= (insn & (1 << 13)) << 5;
7749 /* offset[19] = insn[11]. */
7750 offset |= (insn & (1 << 11)) << 8;
7752 /* jump to the offset */
7753 gen_jmp(s, s->pc + offset);
7755 } else {
7756 /* Data processing immediate. */
7757 if (insn & (1 << 25)) {
7758 if (insn & (1 << 24)) {
7759 if (insn & (1 << 20))
7760 goto illegal_op;
7761 /* Bitfield/Saturate. */
7762 op = (insn >> 21) & 7;
7763 imm = insn & 0x1f;
7764 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7765 if (rn == 15) {
7766 tmp = new_tmp();
7767 tcg_gen_movi_i32(tmp, 0);
7768 } else {
7769 tmp = load_reg(s, rn);
7771 switch (op) {
7772 case 2: /* Signed bitfield extract. */
7773 imm++;
7774 if (shift + imm > 32)
7775 goto illegal_op;
7776 if (imm < 32)
7777 gen_sbfx(tmp, shift, imm);
7778 break;
7779 case 6: /* Unsigned bitfield extract. */
7780 imm++;
7781 if (shift + imm > 32)
7782 goto illegal_op;
7783 if (imm < 32)
7784 gen_ubfx(tmp, shift, (1u << imm) - 1);
7785 break;
7786 case 3: /* Bitfield insert/clear. */
7787 if (imm < shift)
7788 goto illegal_op;
7789 imm = imm + 1 - shift;
7790 if (imm != 32) {
7791 tmp2 = load_reg(s, rd);
7792 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7793 dead_tmp(tmp2);
7795 break;
7796 case 7:
7797 goto illegal_op;
7798 default: /* Saturate. */
7799 if (shift) {
7800 if (op & 1)
7801 tcg_gen_sari_i32(tmp, tmp, shift);
7802 else
7803 tcg_gen_shli_i32(tmp, tmp, shift);
7805 tmp2 = tcg_const_i32(imm);
7806 if (op & 4) {
7807 /* Unsigned. */
7808 if ((op & 1) && shift == 0)
7809 gen_helper_usat16(tmp, tmp, tmp2);
7810 else
7811 gen_helper_usat(tmp, tmp, tmp2);
7812 } else {
7813 /* Signed. */
7814 if ((op & 1) && shift == 0)
7815 gen_helper_ssat16(tmp, tmp, tmp2);
7816 else
7817 gen_helper_ssat(tmp, tmp, tmp2);
7819 break;
7821 store_reg(s, rd, tmp);
7822 } else {
7823 imm = ((insn & 0x04000000) >> 15)
7824 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7825 if (insn & (1 << 22)) {
7826 /* 16-bit immediate. */
7827 imm |= (insn >> 4) & 0xf000;
7828 if (insn & (1 << 23)) {
7829 /* movt */
7830 tmp = load_reg(s, rd);
7831 tcg_gen_ext16u_i32(tmp, tmp);
7832 tcg_gen_ori_i32(tmp, tmp, imm << 16);
7833 } else {
7834 /* movw */
7835 tmp = new_tmp();
7836 tcg_gen_movi_i32(tmp, imm);
7838 } else {
7839 /* Add/sub 12-bit immediate. */
7840 if (rn == 15) {
7841 offset = s->pc & ~(uint32_t)3;
7842 if (insn & (1 << 23))
7843 offset -= imm;
7844 else
7845 offset += imm;
7846 tmp = new_tmp();
7847 tcg_gen_movi_i32(tmp, offset);
7848 } else {
7849 tmp = load_reg(s, rn);
7850 if (insn & (1 << 23))
7851 tcg_gen_subi_i32(tmp, tmp, imm);
7852 else
7853 tcg_gen_addi_i32(tmp, tmp, imm);
7856 store_reg(s, rd, tmp);
7858 } else {
7859 int shifter_out = 0;
7860 /* modified 12-bit immediate. */
7861 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7862 imm = (insn & 0xff);
7863 switch (shift) {
7864 case 0: /* XY */
7865 /* Nothing to do. */
7866 break;
7867 case 1: /* 00XY00XY */
7868 imm |= imm << 16;
7869 break;
7870 case 2: /* XY00XY00 */
7871 imm |= imm << 16;
7872 imm <<= 8;
7873 break;
7874 case 3: /* XYXYXYXY */
7875 imm |= imm << 16;
7876 imm |= imm << 8;
7877 break;
7878 default: /* Rotated constant. */
7879 shift = (shift << 1) | (imm >> 7);
7880 imm |= 0x80;
7881 imm = imm << (32 - shift);
7882 shifter_out = 1;
7883 break;
7885 gen_op_movl_T1_im(imm);
7886 rn = (insn >> 16) & 0xf;
7887 if (rn == 15)
7888 gen_op_movl_T0_im(0);
7889 else
7890 gen_movl_T0_reg(s, rn);
7891 op = (insn >> 21) & 0xf;
7892 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7893 shifter_out))
7894 goto illegal_op;
7895 rd = (insn >> 8) & 0xf;
7896 if (rd != 15) {
7897 gen_movl_reg_T0(s, rd);
7901 break;
7902 case 12: /* Load/store single data item. */
7904 int postinc = 0;
7905 int writeback = 0;
7906 int user;
7907 if ((insn & 0x01100000) == 0x01000000) {
7908 if (disas_neon_ls_insn(env, s, insn))
7909 goto illegal_op;
7910 break;
7912 user = IS_USER(s);
7913 if (rn == 15) {
7914 addr = new_tmp();
7915 /* PC relative. */
7916 /* s->pc has already been incremented by 4. */
7917 imm = s->pc & 0xfffffffc;
7918 if (insn & (1 << 23))
7919 imm += insn & 0xfff;
7920 else
7921 imm -= insn & 0xfff;
7922 tcg_gen_movi_i32(addr, imm);
7923 } else {
7924 addr = load_reg(s, rn);
7925 if (insn & (1 << 23)) {
7926 /* Positive offset. */
7927 imm = insn & 0xfff;
7928 tcg_gen_addi_i32(addr, addr, imm);
7929 } else {
7930 op = (insn >> 8) & 7;
7931 imm = insn & 0xff;
7932 switch (op) {
7933 case 0: case 8: /* Shifted Register. */
7934 shift = (insn >> 4) & 0xf;
7935 if (shift > 3)
7936 goto illegal_op;
7937 tmp = load_reg(s, rm);
7938 if (shift)
7939 tcg_gen_shli_i32(tmp, tmp, shift);
7940 tcg_gen_add_i32(addr, addr, tmp);
7941 dead_tmp(tmp);
7942 break;
7943 case 4: /* Negative offset. */
7944 tcg_gen_addi_i32(addr, addr, -imm);
7945 break;
7946 case 6: /* User privilege. */
7947 tcg_gen_addi_i32(addr, addr, imm);
7948 user = 1;
7949 break;
7950 case 1: /* Post-decrement. */
7951 imm = -imm;
7952 /* Fall through. */
7953 case 3: /* Post-increment. */
7954 postinc = 1;
7955 writeback = 1;
7956 break;
7957 case 5: /* Pre-decrement. */
7958 imm = -imm;
7959 /* Fall through. */
7960 case 7: /* Pre-increment. */
7961 tcg_gen_addi_i32(addr, addr, imm);
7962 writeback = 1;
7963 break;
7964 default:
7965 goto illegal_op;
7969 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7970 if (insn & (1 << 20)) {
7971 /* Load. */
7972 if (rs == 15 && op != 2) {
7973 if (op & 2)
7974 goto illegal_op;
7975 /* Memory hint. Implemented as NOP. */
7976 } else {
7977 switch (op) {
7978 case 0: tmp = gen_ld8u(addr, user); break;
7979 case 4: tmp = gen_ld8s(addr, user); break;
7980 case 1: tmp = gen_ld16u(addr, user); break;
7981 case 5: tmp = gen_ld16s(addr, user); break;
7982 case 2: tmp = gen_ld32(addr, user); break;
7983 default: goto illegal_op;
7985 if (rs == 15) {
7986 gen_bx(s, tmp);
7987 } else {
7988 store_reg(s, rs, tmp);
7991 } else {
7992 /* Store. */
7993 if (rs == 15)
7994 goto illegal_op;
7995 tmp = load_reg(s, rs);
7996 switch (op) {
7997 case 0: gen_st8(tmp, addr, user); break;
7998 case 1: gen_st16(tmp, addr, user); break;
7999 case 2: gen_st32(tmp, addr, user); break;
8000 default: goto illegal_op;
8003 if (postinc)
8004 tcg_gen_addi_i32(addr, addr, imm);
8005 if (writeback) {
8006 store_reg(s, rn, addr);
8007 } else {
8008 dead_tmp(addr);
8011 break;
8012 default:
8013 goto illegal_op;
8015 return 0;
8016 illegal_op:
8017 return 1;
8020 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8022 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8023 int32_t offset;
8024 int i;
8025 TCGv tmp;
8026 TCGv tmp2;
8027 TCGv addr;
8029 if (s->condexec_mask) {
8030 cond = s->condexec_cond;
8031 s->condlabel = gen_new_label();
8032 gen_test_cc(cond ^ 1, s->condlabel);
8033 s->condjmp = 1;
8036 insn = lduw_code(s->pc);
8037 s->pc += 2;
8039 switch (insn >> 12) {
8040 case 0: case 1:
8041 rd = insn & 7;
8042 op = (insn >> 11) & 3;
8043 if (op == 3) {
8044 /* add/subtract */
8045 rn = (insn >> 3) & 7;
8046 gen_movl_T0_reg(s, rn);
8047 if (insn & (1 << 10)) {
8048 /* immediate */
8049 gen_op_movl_T1_im((insn >> 6) & 7);
8050 } else {
8051 /* reg */
8052 rm = (insn >> 6) & 7;
8053 gen_movl_T1_reg(s, rm);
8055 if (insn & (1 << 9)) {
8056 if (s->condexec_mask)
8057 gen_op_subl_T0_T1();
8058 else
8059 gen_op_subl_T0_T1_cc();
8060 } else {
8061 if (s->condexec_mask)
8062 gen_op_addl_T0_T1();
8063 else
8064 gen_op_addl_T0_T1_cc();
8066 gen_movl_reg_T0(s, rd);
8067 } else {
8068 /* shift immediate */
8069 rm = (insn >> 3) & 7;
8070 shift = (insn >> 6) & 0x1f;
8071 tmp = load_reg(s, rm);
8072 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8073 if (!s->condexec_mask)
8074 gen_logic_CC(tmp);
8075 store_reg(s, rd, tmp);
8077 break;
8078 case 2: case 3:
8079 /* arithmetic large immediate */
8080 op = (insn >> 11) & 3;
8081 rd = (insn >> 8) & 0x7;
8082 if (op == 0) {
8083 gen_op_movl_T0_im(insn & 0xff);
8084 } else {
8085 gen_movl_T0_reg(s, rd);
8086 gen_op_movl_T1_im(insn & 0xff);
8088 switch (op) {
8089 case 0: /* mov */
8090 if (!s->condexec_mask)
8091 gen_op_logic_T0_cc();
8092 break;
8093 case 1: /* cmp */
8094 gen_op_subl_T0_T1_cc();
8095 break;
8096 case 2: /* add */
8097 if (s->condexec_mask)
8098 gen_op_addl_T0_T1();
8099 else
8100 gen_op_addl_T0_T1_cc();
8101 break;
8102 case 3: /* sub */
8103 if (s->condexec_mask)
8104 gen_op_subl_T0_T1();
8105 else
8106 gen_op_subl_T0_T1_cc();
8107 break;
8109 if (op != 1)
8110 gen_movl_reg_T0(s, rd);
8111 break;
8112 case 4:
8113 if (insn & (1 << 11)) {
8114 rd = (insn >> 8) & 7;
8115 /* load pc-relative. Bit 1 of PC is ignored. */
8116 val = s->pc + 2 + ((insn & 0xff) * 4);
8117 val &= ~(uint32_t)2;
8118 addr = new_tmp();
8119 tcg_gen_movi_i32(addr, val);
8120 tmp = gen_ld32(addr, IS_USER(s));
8121 dead_tmp(addr);
8122 store_reg(s, rd, tmp);
8123 break;
8125 if (insn & (1 << 10)) {
8126 /* data processing extended or blx */
8127 rd = (insn & 7) | ((insn >> 4) & 8);
8128 rm = (insn >> 3) & 0xf;
8129 op = (insn >> 8) & 3;
8130 switch (op) {
8131 case 0: /* add */
8132 gen_movl_T0_reg(s, rd);
8133 gen_movl_T1_reg(s, rm);
8134 gen_op_addl_T0_T1();
8135 gen_movl_reg_T0(s, rd);
8136 break;
8137 case 1: /* cmp */
8138 gen_movl_T0_reg(s, rd);
8139 gen_movl_T1_reg(s, rm);
8140 gen_op_subl_T0_T1_cc();
8141 break;
8142 case 2: /* mov/cpy */
8143 gen_movl_T0_reg(s, rm);
8144 gen_movl_reg_T0(s, rd);
8145 break;
8146 case 3:/* branch [and link] exchange thumb register */
8147 tmp = load_reg(s, rm);
8148 if (insn & (1 << 7)) {
8149 val = (uint32_t)s->pc | 1;
8150 tmp2 = new_tmp();
8151 tcg_gen_movi_i32(tmp2, val);
8152 store_reg(s, 14, tmp2);
8154 gen_bx(s, tmp);
8155 break;
8157 break;
8160 /* data processing register */
8161 rd = insn & 7;
8162 rm = (insn >> 3) & 7;
8163 op = (insn >> 6) & 0xf;
8164 if (op == 2 || op == 3 || op == 4 || op == 7) {
8165 /* the shift/rotate ops want the operands backwards */
8166 val = rm;
8167 rm = rd;
8168 rd = val;
8169 val = 1;
8170 } else {
8171 val = 0;
8174 if (op == 9) /* neg */
8175 gen_op_movl_T0_im(0);
8176 else if (op != 0xf) /* mvn doesn't read its first operand */
8177 gen_movl_T0_reg(s, rd);
8179 gen_movl_T1_reg(s, rm);
8180 switch (op) {
8181 case 0x0: /* and */
8182 gen_op_andl_T0_T1();
8183 if (!s->condexec_mask)
8184 gen_op_logic_T0_cc();
8185 break;
8186 case 0x1: /* eor */
8187 gen_op_xorl_T0_T1();
8188 if (!s->condexec_mask)
8189 gen_op_logic_T0_cc();
8190 break;
8191 case 0x2: /* lsl */
8192 if (s->condexec_mask) {
8193 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
8194 } else {
8195 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8196 gen_op_logic_T1_cc();
8198 break;
8199 case 0x3: /* lsr */
8200 if (s->condexec_mask) {
8201 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
8202 } else {
8203 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8204 gen_op_logic_T1_cc();
8206 break;
8207 case 0x4: /* asr */
8208 if (s->condexec_mask) {
8209 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
8210 } else {
8211 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8212 gen_op_logic_T1_cc();
8214 break;
8215 case 0x5: /* adc */
8216 if (s->condexec_mask)
8217 gen_adc_T0_T1();
8218 else
8219 gen_op_adcl_T0_T1_cc();
8220 break;
8221 case 0x6: /* sbc */
8222 if (s->condexec_mask)
8223 gen_sbc_T0_T1();
8224 else
8225 gen_op_sbcl_T0_T1_cc();
8226 break;
8227 case 0x7: /* ror */
8228 if (s->condexec_mask) {
8229 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
8230 } else {
8231 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8232 gen_op_logic_T1_cc();
8234 break;
8235 case 0x8: /* tst */
8236 gen_op_andl_T0_T1();
8237 gen_op_logic_T0_cc();
8238 rd = 16;
8239 break;
8240 case 0x9: /* neg */
8241 if (s->condexec_mask)
8242 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
8243 else
8244 gen_op_subl_T0_T1_cc();
8245 break;
8246 case 0xa: /* cmp */
8247 gen_op_subl_T0_T1_cc();
8248 rd = 16;
8249 break;
8250 case 0xb: /* cmn */
8251 gen_op_addl_T0_T1_cc();
8252 rd = 16;
8253 break;
8254 case 0xc: /* orr */
8255 gen_op_orl_T0_T1();
8256 if (!s->condexec_mask)
8257 gen_op_logic_T0_cc();
8258 break;
8259 case 0xd: /* mul */
8260 gen_op_mull_T0_T1();
8261 if (!s->condexec_mask)
8262 gen_op_logic_T0_cc();
8263 break;
8264 case 0xe: /* bic */
8265 gen_op_bicl_T0_T1();
8266 if (!s->condexec_mask)
8267 gen_op_logic_T0_cc();
8268 break;
8269 case 0xf: /* mvn */
8270 gen_op_notl_T1();
8271 if (!s->condexec_mask)
8272 gen_op_logic_T1_cc();
8273 val = 1;
8274 rm = rd;
8275 break;
8277 if (rd != 16) {
8278 if (val)
8279 gen_movl_reg_T1(s, rm);
8280 else
8281 gen_movl_reg_T0(s, rd);
8283 break;
8285 case 5:
8286 /* load/store register offset. */
8287 rd = insn & 7;
8288 rn = (insn >> 3) & 7;
8289 rm = (insn >> 6) & 7;
8290 op = (insn >> 9) & 7;
8291 addr = load_reg(s, rn);
8292 tmp = load_reg(s, rm);
8293 tcg_gen_add_i32(addr, addr, tmp);
8294 dead_tmp(tmp);
8296 if (op < 3) /* store */
8297 tmp = load_reg(s, rd);
8299 switch (op) {
8300 case 0: /* str */
8301 gen_st32(tmp, addr, IS_USER(s));
8302 break;
8303 case 1: /* strh */
8304 gen_st16(tmp, addr, IS_USER(s));
8305 break;
8306 case 2: /* strb */
8307 gen_st8(tmp, addr, IS_USER(s));
8308 break;
8309 case 3: /* ldrsb */
8310 tmp = gen_ld8s(addr, IS_USER(s));
8311 break;
8312 case 4: /* ldr */
8313 tmp = gen_ld32(addr, IS_USER(s));
8314 break;
8315 case 5: /* ldrh */
8316 tmp = gen_ld16u(addr, IS_USER(s));
8317 break;
8318 case 6: /* ldrb */
8319 tmp = gen_ld8u(addr, IS_USER(s));
8320 break;
8321 case 7: /* ldrsh */
8322 tmp = gen_ld16s(addr, IS_USER(s));
8323 break;
8325 if (op >= 3) /* load */
8326 store_reg(s, rd, tmp);
8327 dead_tmp(addr);
8328 break;
8330 case 6:
8331 /* load/store word immediate offset */
8332 rd = insn & 7;
8333 rn = (insn >> 3) & 7;
8334 addr = load_reg(s, rn);
8335 val = (insn >> 4) & 0x7c;
8336 tcg_gen_addi_i32(addr, addr, val);
8338 if (insn & (1 << 11)) {
8339 /* load */
8340 tmp = gen_ld32(addr, IS_USER(s));
8341 store_reg(s, rd, tmp);
8342 } else {
8343 /* store */
8344 tmp = load_reg(s, rd);
8345 gen_st32(tmp, addr, IS_USER(s));
8347 dead_tmp(addr);
8348 break;
8350 case 7:
8351 /* load/store byte immediate offset */
8352 rd = insn & 7;
8353 rn = (insn >> 3) & 7;
8354 addr = load_reg(s, rn);
8355 val = (insn >> 6) & 0x1f;
8356 tcg_gen_addi_i32(addr, addr, val);
8358 if (insn & (1 << 11)) {
8359 /* load */
8360 tmp = gen_ld8u(addr, IS_USER(s));
8361 store_reg(s, rd, tmp);
8362 } else {
8363 /* store */
8364 tmp = load_reg(s, rd);
8365 gen_st8(tmp, addr, IS_USER(s));
8367 dead_tmp(addr);
8368 break;
8370 case 8:
8371 /* load/store halfword immediate offset */
8372 rd = insn & 7;
8373 rn = (insn >> 3) & 7;
8374 addr = load_reg(s, rn);
8375 val = (insn >> 5) & 0x3e;
8376 tcg_gen_addi_i32(addr, addr, val);
8378 if (insn & (1 << 11)) {
8379 /* load */
8380 tmp = gen_ld16u(addr, IS_USER(s));
8381 store_reg(s, rd, tmp);
8382 } else {
8383 /* store */
8384 tmp = load_reg(s, rd);
8385 gen_st16(tmp, addr, IS_USER(s));
8387 dead_tmp(addr);
8388 break;
8390 case 9:
8391 /* load/store from stack */
8392 rd = (insn >> 8) & 7;
8393 addr = load_reg(s, 13);
8394 val = (insn & 0xff) * 4;
8395 tcg_gen_addi_i32(addr, addr, val);
8397 if (insn & (1 << 11)) {
8398 /* load */
8399 tmp = gen_ld32(addr, IS_USER(s));
8400 store_reg(s, rd, tmp);
8401 } else {
8402 /* store */
8403 tmp = load_reg(s, rd);
8404 gen_st32(tmp, addr, IS_USER(s));
8406 dead_tmp(addr);
8407 break;
8409 case 10:
8410 /* add to high reg */
8411 rd = (insn >> 8) & 7;
8412 if (insn & (1 << 11)) {
8413 /* SP */
8414 tmp = load_reg(s, 13);
8415 } else {
8416 /* PC. bit 1 is ignored. */
8417 tmp = new_tmp();
8418 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8420 val = (insn & 0xff) * 4;
8421 tcg_gen_addi_i32(tmp, tmp, val);
8422 store_reg(s, rd, tmp);
8423 break;
8425 case 11:
8426 /* misc */
8427 op = (insn >> 8) & 0xf;
8428 switch (op) {
8429 case 0:
8430 /* adjust stack pointer */
8431 tmp = load_reg(s, 13);
8432 val = (insn & 0x7f) * 4;
8433 if (insn & (1 << 7))
8434 val = -(int32_t)val;
8435 tcg_gen_addi_i32(tmp, tmp, val);
8436 store_reg(s, 13, tmp);
8437 break;
8439 case 2: /* sign/zero extend. */
8440 ARCH(6);
8441 rd = insn & 7;
8442 rm = (insn >> 3) & 7;
8443 tmp = load_reg(s, rm);
8444 switch ((insn >> 6) & 3) {
8445 case 0: gen_sxth(tmp); break;
8446 case 1: gen_sxtb(tmp); break;
8447 case 2: gen_uxth(tmp); break;
8448 case 3: gen_uxtb(tmp); break;
8450 store_reg(s, rd, tmp);
8451 break;
8452 case 4: case 5: case 0xc: case 0xd:
8453 /* push/pop */
8454 addr = load_reg(s, 13);
8455 if (insn & (1 << 8))
8456 offset = 4;
8457 else
8458 offset = 0;
8459 for (i = 0; i < 8; i++) {
8460 if (insn & (1 << i))
8461 offset += 4;
8463 if ((insn & (1 << 11)) == 0) {
8464 tcg_gen_addi_i32(addr, addr, -offset);
8466 for (i = 0; i < 8; i++) {
8467 if (insn & (1 << i)) {
8468 if (insn & (1 << 11)) {
8469 /* pop */
8470 tmp = gen_ld32(addr, IS_USER(s));
8471 store_reg(s, i, tmp);
8472 } else {
8473 /* push */
8474 tmp = load_reg(s, i);
8475 gen_st32(tmp, addr, IS_USER(s));
8477 /* advance to the next address. */
8478 tcg_gen_addi_i32(addr, addr, 4);
8481 TCGV_UNUSED(tmp);
8482 if (insn & (1 << 8)) {
8483 if (insn & (1 << 11)) {
8484 /* pop pc */
8485 tmp = gen_ld32(addr, IS_USER(s));
8486 /* don't set the pc until the rest of the instruction
8487 has completed */
8488 } else {
8489 /* push lr */
8490 tmp = load_reg(s, 14);
8491 gen_st32(tmp, addr, IS_USER(s));
8493 tcg_gen_addi_i32(addr, addr, 4);
8495 if ((insn & (1 << 11)) == 0) {
8496 tcg_gen_addi_i32(addr, addr, -offset);
8498 /* write back the new stack pointer */
8499 store_reg(s, 13, addr);
8500 /* set the new PC value */
8501 if ((insn & 0x0900) == 0x0900)
8502 gen_bx(s, tmp);
8503 break;
8505 case 1: case 3: case 9: case 11: /* czb */
8506 rm = insn & 7;
8507 tmp = load_reg(s, rm);
8508 s->condlabel = gen_new_label();
8509 s->condjmp = 1;
8510 if (insn & (1 << 11))
8511 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8512 else
8513 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8514 dead_tmp(tmp);
8515 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8516 val = (uint32_t)s->pc + 2;
8517 val += offset;
8518 gen_jmp(s, val);
8519 break;
8521 case 15: /* IT, nop-hint. */
8522 if ((insn & 0xf) == 0) {
8523 gen_nop_hint(s, (insn >> 4) & 0xf);
8524 break;
8526 /* If Then. */
8527 s->condexec_cond = (insn >> 4) & 0xe;
8528 s->condexec_mask = insn & 0x1f;
8529 /* No actual code generated for this insn, just setup state. */
8530 break;
8532 case 0xe: /* bkpt */
8533 gen_set_condexec(s);
8534 gen_set_pc_im(s->pc - 2);
8535 gen_exception(EXCP_BKPT);
8536 s->is_jmp = DISAS_JUMP;
8537 break;
8539 case 0xa: /* rev */
8540 ARCH(6);
8541 rn = (insn >> 3) & 0x7;
8542 rd = insn & 0x7;
8543 tmp = load_reg(s, rn);
8544 switch ((insn >> 6) & 3) {
8545 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8546 case 1: gen_rev16(tmp); break;
8547 case 3: gen_revsh(tmp); break;
8548 default: goto illegal_op;
8550 store_reg(s, rd, tmp);
8551 break;
8553 case 6: /* cps */
8554 ARCH(6);
8555 if (IS_USER(s))
8556 break;
8557 if (IS_M(env)) {
8558 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8559 /* PRIMASK */
8560 if (insn & 1) {
8561 addr = tcg_const_i32(16);
8562 gen_helper_v7m_msr(cpu_env, addr, tmp);
8564 /* FAULTMASK */
8565 if (insn & 2) {
8566 addr = tcg_const_i32(17);
8567 gen_helper_v7m_msr(cpu_env, addr, tmp);
8569 gen_lookup_tb(s);
8570 } else {
8571 if (insn & (1 << 4))
8572 shift = CPSR_A | CPSR_I | CPSR_F;
8573 else
8574 shift = 0;
8575 gen_set_psr_im(s, shift, 0, ((insn & 7) << 6) & shift);
8577 break;
8579 default:
8580 goto undef;
8582 break;
8584 case 12:
8585 /* load/store multiple */
8586 rn = (insn >> 8) & 0x7;
8587 addr = load_reg(s, rn);
8588 for (i = 0; i < 8; i++) {
8589 if (insn & (1 << i)) {
8590 if (insn & (1 << 11)) {
8591 /* load */
8592 tmp = gen_ld32(addr, IS_USER(s));
8593 store_reg(s, i, tmp);
8594 } else {
8595 /* store */
8596 tmp = load_reg(s, i);
8597 gen_st32(tmp, addr, IS_USER(s));
8599 /* advance to the next address */
8600 tcg_gen_addi_i32(addr, addr, 4);
8603 /* Base register writeback. */
8604 if ((insn & (1 << rn)) == 0) {
8605 store_reg(s, rn, addr);
8606 } else {
8607 dead_tmp(addr);
8609 break;
8611 case 13:
8612 /* conditional branch or swi */
8613 cond = (insn >> 8) & 0xf;
8614 if (cond == 0xe)
8615 goto undef;
8617 if (cond == 0xf) {
8618 /* swi */
8619 gen_set_condexec(s);
8620 gen_set_pc_im(s->pc);
8621 s->is_jmp = DISAS_SWI;
8622 break;
8624 /* generate a conditional jump to next instruction */
8625 s->condlabel = gen_new_label();
8626 gen_test_cc(cond ^ 1, s->condlabel);
8627 s->condjmp = 1;
8629 /* jump to the offset */
8630 val = (uint32_t)s->pc + 2;
8631 offset = ((int32_t)insn << 24) >> 24;
8632 val += offset << 1;
8633 gen_jmp(s, val);
8634 break;
8636 case 14:
8637 if (insn & (1 << 11)) {
8638 if (disas_thumb2_insn(env, s, insn))
8639 goto undef32;
8640 break;
8642 /* unconditional branch */
8643 val = (uint32_t)s->pc;
8644 offset = ((int32_t)insn << 21) >> 21;
8645 val += (offset << 1) + 2;
8646 gen_jmp(s, val);
8647 break;
8649 case 15:
8650 if (disas_thumb2_insn(env, s, insn))
8651 goto undef32;
8652 break;
8654 return;
8655 undef32:
8656 gen_set_condexec(s);
8657 gen_set_pc_im(s->pc - 4);
8658 gen_exception(EXCP_UDEF);
8659 s->is_jmp = DISAS_JUMP;
8660 return;
8661 illegal_op:
8662 undef:
8663 gen_set_condexec(s);
8664 gen_set_pc_im(s->pc - 2);
8665 gen_exception(EXCP_UDEF);
8666 s->is_jmp = DISAS_JUMP;
8669 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8670 basic block 'tb'. If search_pc is TRUE, also generate PC
8671 information for each intermediate instruction. */
8672 static inline void gen_intermediate_code_internal(CPUState *env,
8673 TranslationBlock *tb,
8674 int search_pc)
8676 DisasContext dc1, *dc = &dc1;
8677 CPUBreakpoint *bp;
8678 uint16_t *gen_opc_end;
8679 int j, lj;
8680 target_ulong pc_start;
8681 uint32_t next_page_start;
8682 int num_insns;
8683 int max_insns;
8685 /* generate intermediate code */
8686 num_temps = 0;
8688 pc_start = tb->pc;
8690 dc->tb = tb;
8692 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
8694 dc->is_jmp = DISAS_NEXT;
8695 dc->pc = pc_start;
8696 dc->singlestep_enabled = env->singlestep_enabled;
8697 dc->condjmp = 0;
8698 dc->thumb = env->thumb;
8699 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8700 dc->condexec_cond = env->condexec_bits >> 4;
8701 #if !defined(CONFIG_USER_ONLY)
8702 if (IS_M(env)) {
8703 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8704 } else {
8705 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8707 #endif
8708 cpu_F0s = tcg_temp_new_i32();
8709 cpu_F1s = tcg_temp_new_i32();
8710 cpu_F0d = tcg_temp_new_i64();
8711 cpu_F1d = tcg_temp_new_i64();
8712 cpu_V0 = cpu_F0d;
8713 cpu_V1 = cpu_F1d;
8714 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8715 cpu_M0 = tcg_temp_new_i64();
8716 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
8717 lj = -1;
8718 num_insns = 0;
8719 max_insns = tb->cflags & CF_COUNT_MASK;
8720 if (max_insns == 0)
8721 max_insns = CF_COUNT_MASK;
8723 gen_icount_start();
8724 /* Reset the conditional execution bits immediately. This avoids
8725 complications trying to do it at the end of the block. */
8726 if (env->condexec_bits)
8728 TCGv tmp = new_tmp();
8729 tcg_gen_movi_i32(tmp, 0);
8730 store_cpu_field(tmp, condexec_bits);
8732 do {
8733 #ifdef CONFIG_USER_ONLY
8734 /* Intercept jump to the magic kernel page. */
8735 if (dc->pc >= 0xffff0000) {
8736 /* We always get here via a jump, so know we are not in a
8737 conditional execution block. */
8738 gen_exception(EXCP_KERNEL_TRAP);
8739 dc->is_jmp = DISAS_UPDATE;
8740 break;
8742 #else
8743 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8744 /* We always get here via a jump, so know we are not in a
8745 conditional execution block. */
8746 gen_exception(EXCP_EXCEPTION_EXIT);
8747 dc->is_jmp = DISAS_UPDATE;
8748 break;
8750 #endif
8752 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8753 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
8754 if (bp->pc == dc->pc) {
8755 gen_set_condexec(dc);
8756 gen_set_pc_im(dc->pc);
8757 gen_exception(EXCP_DEBUG);
8758 dc->is_jmp = DISAS_JUMP;
8759 /* Advance PC so that clearing the breakpoint will
8760 invalidate this TB. */
8761 dc->pc += 2;
8762 goto done_generating;
8763 break;
8767 if (search_pc) {
8768 j = gen_opc_ptr - gen_opc_buf;
8769 if (lj < j) {
8770 lj++;
8771 while (lj < j)
8772 gen_opc_instr_start[lj++] = 0;
8774 gen_opc_pc[lj] = dc->pc;
8775 gen_opc_instr_start[lj] = 1;
8776 gen_opc_icount[lj] = num_insns;
8779 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8780 gen_io_start();
8782 if (env->thumb) {
8783 disas_thumb_insn(env, dc);
8784 if (dc->condexec_mask) {
8785 dc->condexec_cond = (dc->condexec_cond & 0xe)
8786 | ((dc->condexec_mask >> 4) & 1);
8787 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8788 if (dc->condexec_mask == 0) {
8789 dc->condexec_cond = 0;
8792 } else {
8793 disas_arm_insn(env, dc);
8795 if (num_temps) {
8796 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8797 num_temps = 0;
8800 if (dc->condjmp && !dc->is_jmp) {
8801 gen_set_label(dc->condlabel);
8802 dc->condjmp = 0;
8804 /* Translation stops when a conditional branch is encountered.
8805 * Otherwise the subsequent code could get translated several times.
8806 * Also stop translation when a page boundary is reached. This
8807 * ensures prefetch aborts occur at the right place. */
8808 num_insns ++;
8809 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8810 !env->singlestep_enabled &&
8811 !singlestep &&
8812 dc->pc < next_page_start &&
8813 num_insns < max_insns);
8815 if (tb->cflags & CF_LAST_IO) {
8816 if (dc->condjmp) {
8817 /* FIXME: This can theoretically happen with self-modifying
8818 code. */
8819 cpu_abort(env, "IO on conditional branch instruction");
8821 gen_io_end();
8824 /* At this stage dc->condjmp will only be set when the skipped
8825 instruction was a conditional branch or trap, and the PC has
8826 already been written. */
8827 if (unlikely(env->singlestep_enabled)) {
8828 /* Make sure the pc is updated, and raise a debug exception. */
8829 if (dc->condjmp) {
8830 gen_set_condexec(dc);
8831 if (dc->is_jmp == DISAS_SWI) {
8832 gen_exception(EXCP_SWI);
8833 } else {
8834 gen_exception(EXCP_DEBUG);
8836 gen_set_label(dc->condlabel);
8838 if (dc->condjmp || !dc->is_jmp) {
8839 gen_set_pc_im(dc->pc);
8840 dc->condjmp = 0;
8842 gen_set_condexec(dc);
8843 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
8844 gen_exception(EXCP_SWI);
8845 } else {
8846 /* FIXME: Single stepping a WFI insn will not halt
8847 the CPU. */
8848 gen_exception(EXCP_DEBUG);
8850 } else {
8851 /* While branches must always occur at the end of an IT block,
8852 there are a few other things that can cause us to terminate
8853 the TB in the middel of an IT block:
8854 - Exception generating instructions (bkpt, swi, undefined).
8855 - Page boundaries.
8856 - Hardware watchpoints.
8857 Hardware breakpoints have already been handled and skip this code.
8859 gen_set_condexec(dc);
8860 switch(dc->is_jmp) {
8861 case DISAS_NEXT:
8862 gen_goto_tb(dc, 1, dc->pc);
8863 break;
8864 default:
8865 case DISAS_JUMP:
8866 case DISAS_UPDATE:
8867 /* indicate that the hash table must be used to find the next TB */
8868 tcg_gen_exit_tb(0);
8869 break;
8870 case DISAS_TB_JUMP:
8871 /* nothing more to generate */
8872 break;
8873 case DISAS_WFI:
8874 gen_helper_wfi();
8875 break;
8876 case DISAS_SWI:
8877 gen_exception(EXCP_SWI);
8878 break;
8880 if (dc->condjmp) {
8881 gen_set_label(dc->condlabel);
8882 gen_set_condexec(dc);
8883 gen_goto_tb(dc, 1, dc->pc);
8884 dc->condjmp = 0;
8888 done_generating:
8889 gen_icount_end(tb, num_insns);
8890 *gen_opc_ptr = INDEX_op_end;
8892 #ifdef DEBUG_DISAS
8893 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8894 qemu_log("----------------\n");
8895 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8896 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
8897 qemu_log("\n");
8899 #endif
8900 if (search_pc) {
8901 j = gen_opc_ptr - gen_opc_buf;
8902 lj++;
8903 while (lj <= j)
8904 gen_opc_instr_start[lj++] = 0;
8905 } else {
8906 tb->size = dc->pc - pc_start;
8907 tb->icount = num_insns;
8911 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8913 gen_intermediate_code_internal(env, tb, 0);
8916 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8918 gen_intermediate_code_internal(env, tb, 1);
8921 static const char *cpu_mode_names[16] = {
8922 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8923 "???", "???", "???", "und", "???", "???", "???", "sys"
8926 void cpu_dump_state(CPUState *env, FILE *f,
8927 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8928 int flags)
8930 int i;
8931 #if 0
8932 union {
8933 uint32_t i;
8934 float s;
8935 } s0, s1;
8936 CPU_DoubleU d;
8937 /* ??? This assumes float64 and double have the same layout.
8938 Oh well, it's only debug dumps. */
8939 union {
8940 float64 f64;
8941 double d;
8942 } d0;
8943 #endif
8944 uint32_t psr;
8946 for(i=0;i<16;i++) {
8947 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
8948 if ((i % 4) == 3)
8949 cpu_fprintf(f, "\n");
8950 else
8951 cpu_fprintf(f, " ");
8953 psr = cpsr_read(env);
8954 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8955 psr,
8956 psr & (1 << 31) ? 'N' : '-',
8957 psr & (1 << 30) ? 'Z' : '-',
8958 psr & (1 << 29) ? 'C' : '-',
8959 psr & (1 << 28) ? 'V' : '-',
8960 psr & CPSR_T ? 'T' : 'A',
8961 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
8963 #if 0
8964 for (i = 0; i < 16; i++) {
8965 d.d = env->vfp.regs[i];
8966 s0.i = d.l.lower;
8967 s1.i = d.l.upper;
8968 d0.f64 = d.d;
8969 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
8970 i * 2, (int)s0.i, s0.s,
8971 i * 2 + 1, (int)s1.i, s1.s,
8972 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
8973 d0.d);
8975 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
8976 #endif
8979 void gen_pc_load(CPUState *env, TranslationBlock *tb,
8980 unsigned long searched_pc, int pc_pos, void *puc)
8982 env->regs[15] = gen_opc_pc[pc_pos];