qcow1: Fix qcow_aio_writev
[qemu-kvm/fedora.git] / target-arm / translate.c
blob64956e4d29e159e9b9ef0e6f9ac2c9468951a27c
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #include <stdarg.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <string.h>
26 #include <inttypes.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "disas.h"
31 #include "tcg-op.h"
32 #include "qemu-log.h"
34 #include "helpers.h"
35 #define GEN_HELPER 1
36 #include "helpers.h"
38 #define ENABLE_ARCH_5J 0
39 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
40 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
41 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
42 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
44 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
46 /* internal defines */
47 typedef struct DisasContext {
48 target_ulong pc;
49 int is_jmp;
50 /* Nonzero if this instruction has been conditionally skipped. */
51 int condjmp;
52 /* The label that will be jumped to when the instruction is skipped. */
53 int condlabel;
54 /* Thumb-2 condtional execution bits. */
55 int condexec_mask;
56 int condexec_cond;
57 struct TranslationBlock *tb;
58 int singlestep_enabled;
59 int thumb;
60 #if !defined(CONFIG_USER_ONLY)
61 int user;
62 #endif
63 } DisasContext;
65 #if defined(CONFIG_USER_ONLY)
66 #define IS_USER(s) 1
67 #else
68 #define IS_USER(s) (s->user)
69 #endif
71 /* These instructions trap after executing, so defer them until after the
72 conditional executions state has been updated. */
73 #define DISAS_WFI 4
74 #define DISAS_SWI 5
76 static TCGv_ptr cpu_env;
77 /* We reuse the same 64-bit temporaries for efficiency. */
78 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
80 /* FIXME: These should be removed. */
81 static TCGv cpu_T[2];
82 static TCGv cpu_F0s, cpu_F1s;
83 static TCGv_i64 cpu_F0d, cpu_F1d;
85 #define ICOUNT_TEMP cpu_T[0]
86 #include "gen-icount.h"
88 /* initialize TCG globals. */
89 void arm_translate_init(void)
91 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
93 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
94 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
96 #define GEN_HELPER 2
97 #include "helpers.h"
100 /* The code generator doesn't like lots of temporaries, so maintain our own
101 cache for reuse within a function. */
102 #define MAX_TEMPS 8
103 static int num_temps;
104 static TCGv temps[MAX_TEMPS];
106 /* Allocate a temporary variable. */
107 static TCGv_i32 new_tmp(void)
109 TCGv tmp;
110 if (num_temps == MAX_TEMPS)
111 abort();
113 if (GET_TCGV_I32(temps[num_temps]))
114 return temps[num_temps++];
116 tmp = tcg_temp_new_i32();
117 temps[num_temps++] = tmp;
118 return tmp;
121 /* Release a temporary variable. */
122 static void dead_tmp(TCGv tmp)
124 int i;
125 num_temps--;
126 i = num_temps;
127 if (TCGV_EQUAL(temps[i], tmp))
128 return;
130 /* Shuffle this temp to the last slot. */
131 while (!TCGV_EQUAL(temps[i], tmp))
132 i--;
133 while (i < num_temps) {
134 temps[i] = temps[i + 1];
135 i++;
137 temps[i] = tmp;
140 static inline TCGv load_cpu_offset(int offset)
142 TCGv tmp = new_tmp();
143 tcg_gen_ld_i32(tmp, cpu_env, offset);
144 return tmp;
147 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
149 static inline void store_cpu_offset(TCGv var, int offset)
151 tcg_gen_st_i32(var, cpu_env, offset);
152 dead_tmp(var);
155 #define store_cpu_field(var, name) \
156 store_cpu_offset(var, offsetof(CPUState, name))
158 /* Set a variable to the value of a CPU register. */
159 static void load_reg_var(DisasContext *s, TCGv var, int reg)
161 if (reg == 15) {
162 uint32_t addr;
163 /* normaly, since we updated PC, we need only to add one insn */
164 if (s->thumb)
165 addr = (long)s->pc + 2;
166 else
167 addr = (long)s->pc + 4;
168 tcg_gen_movi_i32(var, addr);
169 } else {
170 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
174 /* Create a new temporary and set it to the value of a CPU register. */
175 static inline TCGv load_reg(DisasContext *s, int reg)
177 TCGv tmp = new_tmp();
178 load_reg_var(s, tmp, reg);
179 return tmp;
182 /* Set a CPU register. The source must be a temporary and will be
183 marked as dead. */
184 static void store_reg(DisasContext *s, int reg, TCGv var)
186 if (reg == 15) {
187 tcg_gen_andi_i32(var, var, ~1);
188 s->is_jmp = DISAS_JUMP;
190 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
191 dead_tmp(var);
195 /* Basic operations. */
196 #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
197 #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
198 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
200 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
201 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202 #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
203 #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
205 #define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206 #define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
207 #define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
208 #define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
209 #define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
211 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
212 #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
213 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
214 #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
215 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
216 #define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
217 #define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
219 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
220 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
222 /* Value extensions. */
223 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
224 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
225 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
226 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
228 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
229 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
231 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
233 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
234 /* Set NZCV flags from the high 4 bits of var. */
235 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
237 static void gen_exception(int excp)
239 TCGv tmp = new_tmp();
240 tcg_gen_movi_i32(tmp, excp);
241 gen_helper_exception(tmp);
242 dead_tmp(tmp);
245 static void gen_smul_dual(TCGv a, TCGv b)
247 TCGv tmp1 = new_tmp();
248 TCGv tmp2 = new_tmp();
249 tcg_gen_ext16s_i32(tmp1, a);
250 tcg_gen_ext16s_i32(tmp2, b);
251 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
252 dead_tmp(tmp2);
253 tcg_gen_sari_i32(a, a, 16);
254 tcg_gen_sari_i32(b, b, 16);
255 tcg_gen_mul_i32(b, b, a);
256 tcg_gen_mov_i32(a, tmp1);
257 dead_tmp(tmp1);
260 /* Byteswap each halfword. */
261 static void gen_rev16(TCGv var)
263 TCGv tmp = new_tmp();
264 tcg_gen_shri_i32(tmp, var, 8);
265 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
266 tcg_gen_shli_i32(var, var, 8);
267 tcg_gen_andi_i32(var, var, 0xff00ff00);
268 tcg_gen_or_i32(var, var, tmp);
269 dead_tmp(tmp);
272 /* Byteswap low halfword and sign extend. */
273 static void gen_revsh(TCGv var)
275 TCGv tmp = new_tmp();
276 tcg_gen_shri_i32(tmp, var, 8);
277 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
278 tcg_gen_shli_i32(var, var, 8);
279 tcg_gen_ext8s_i32(var, var);
280 tcg_gen_or_i32(var, var, tmp);
281 dead_tmp(tmp);
284 /* Unsigned bitfield extract. */
285 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
287 if (shift)
288 tcg_gen_shri_i32(var, var, shift);
289 tcg_gen_andi_i32(var, var, mask);
292 /* Signed bitfield extract. */
293 static void gen_sbfx(TCGv var, int shift, int width)
295 uint32_t signbit;
297 if (shift)
298 tcg_gen_sari_i32(var, var, shift);
299 if (shift + width < 32) {
300 signbit = 1u << (width - 1);
301 tcg_gen_andi_i32(var, var, (1u << width) - 1);
302 tcg_gen_xori_i32(var, var, signbit);
303 tcg_gen_subi_i32(var, var, signbit);
307 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
308 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
310 tcg_gen_andi_i32(val, val, mask);
311 tcg_gen_shli_i32(val, val, shift);
312 tcg_gen_andi_i32(base, base, ~(mask << shift));
313 tcg_gen_or_i32(dest, base, val);
316 /* Round the top 32 bits of a 64-bit value. */
317 static void gen_roundqd(TCGv a, TCGv b)
319 tcg_gen_shri_i32(a, a, 31);
320 tcg_gen_add_i32(a, a, b);
323 /* FIXME: Most targets have native widening multiplication.
324 It would be good to use that instead of a full wide multiply. */
325 /* 32x32->64 multiply. Marks inputs as dead. */
326 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
328 TCGv_i64 tmp1 = tcg_temp_new_i64();
329 TCGv_i64 tmp2 = tcg_temp_new_i64();
331 tcg_gen_extu_i32_i64(tmp1, a);
332 dead_tmp(a);
333 tcg_gen_extu_i32_i64(tmp2, b);
334 dead_tmp(b);
335 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
336 return tmp1;
339 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
341 TCGv_i64 tmp1 = tcg_temp_new_i64();
342 TCGv_i64 tmp2 = tcg_temp_new_i64();
344 tcg_gen_ext_i32_i64(tmp1, a);
345 dead_tmp(a);
346 tcg_gen_ext_i32_i64(tmp2, b);
347 dead_tmp(b);
348 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
349 return tmp1;
352 /* Unsigned 32x32->64 multiply. */
353 static void gen_op_mull_T0_T1(void)
355 TCGv_i64 tmp1 = tcg_temp_new_i64();
356 TCGv_i64 tmp2 = tcg_temp_new_i64();
358 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
359 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
360 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
361 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
362 tcg_gen_shri_i64(tmp1, tmp1, 32);
363 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
366 /* Signed 32x32->64 multiply. */
367 static void gen_imull(TCGv a, TCGv b)
369 TCGv_i64 tmp1 = tcg_temp_new_i64();
370 TCGv_i64 tmp2 = tcg_temp_new_i64();
372 tcg_gen_ext_i32_i64(tmp1, a);
373 tcg_gen_ext_i32_i64(tmp2, b);
374 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
375 tcg_gen_trunc_i64_i32(a, tmp1);
376 tcg_gen_shri_i64(tmp1, tmp1, 32);
377 tcg_gen_trunc_i64_i32(b, tmp1);
380 /* Swap low and high halfwords. */
381 static void gen_swap_half(TCGv var)
383 TCGv tmp = new_tmp();
384 tcg_gen_shri_i32(tmp, var, 16);
385 tcg_gen_shli_i32(var, var, 16);
386 tcg_gen_or_i32(var, var, tmp);
387 dead_tmp(tmp);
390 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
391 tmp = (t0 ^ t1) & 0x8000;
392 t0 &= ~0x8000;
393 t1 &= ~0x8000;
394 t0 = (t0 + t1) ^ tmp;
397 static void gen_add16(TCGv t0, TCGv t1)
399 TCGv tmp = new_tmp();
400 tcg_gen_xor_i32(tmp, t0, t1);
401 tcg_gen_andi_i32(tmp, tmp, 0x8000);
402 tcg_gen_andi_i32(t0, t0, ~0x8000);
403 tcg_gen_andi_i32(t1, t1, ~0x8000);
404 tcg_gen_add_i32(t0, t0, t1);
405 tcg_gen_xor_i32(t0, t0, tmp);
406 dead_tmp(tmp);
407 dead_tmp(t1);
410 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
412 /* Set CF to the top bit of var. */
413 static void gen_set_CF_bit31(TCGv var)
415 TCGv tmp = new_tmp();
416 tcg_gen_shri_i32(tmp, var, 31);
417 gen_set_CF(tmp);
418 dead_tmp(tmp);
421 /* Set N and Z flags from var. */
422 static inline void gen_logic_CC(TCGv var)
424 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
425 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
428 /* T0 += T1 + CF. */
429 static void gen_adc_T0_T1(void)
431 TCGv tmp;
432 gen_op_addl_T0_T1();
433 tmp = load_cpu_field(CF);
434 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
435 dead_tmp(tmp);
438 /* dest = T0 + T1 + CF. */
439 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
441 TCGv tmp;
442 tcg_gen_add_i32(dest, t0, t1);
443 tmp = load_cpu_field(CF);
444 tcg_gen_add_i32(dest, dest, tmp);
445 dead_tmp(tmp);
448 /* dest = T0 - T1 + CF - 1. */
449 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
451 TCGv tmp;
452 tcg_gen_sub_i32(dest, t0, t1);
453 tmp = load_cpu_field(CF);
454 tcg_gen_add_i32(dest, dest, tmp);
455 tcg_gen_subi_i32(dest, dest, 1);
456 dead_tmp(tmp);
459 #define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
460 #define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
462 /* T0 &= ~T1. Clobbers T1. */
463 /* FIXME: Implement bic natively. */
464 static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
466 TCGv tmp = new_tmp();
467 tcg_gen_not_i32(tmp, t1);
468 tcg_gen_and_i32(dest, t0, tmp);
469 dead_tmp(tmp);
471 static inline void gen_op_bicl_T0_T1(void)
473 gen_op_notl_T1();
474 gen_op_andl_T0_T1();
477 /* FIXME: Implement this natively. */
478 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
480 /* FIXME: Implement this natively. */
481 static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
483 TCGv tmp;
485 if (i == 0)
486 return;
488 tmp = new_tmp();
489 tcg_gen_shri_i32(tmp, t1, i);
490 tcg_gen_shli_i32(t1, t1, 32 - i);
491 tcg_gen_or_i32(t0, t1, tmp);
492 dead_tmp(tmp);
495 static void shifter_out_im(TCGv var, int shift)
497 TCGv tmp = new_tmp();
498 if (shift == 0) {
499 tcg_gen_andi_i32(tmp, var, 1);
500 } else {
501 tcg_gen_shri_i32(tmp, var, shift);
502 if (shift != 31)
503 tcg_gen_andi_i32(tmp, tmp, 1);
505 gen_set_CF(tmp);
506 dead_tmp(tmp);
509 /* Shift by immediate. Includes special handling for shift == 0. */
510 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
512 switch (shiftop) {
513 case 0: /* LSL */
514 if (shift != 0) {
515 if (flags)
516 shifter_out_im(var, 32 - shift);
517 tcg_gen_shli_i32(var, var, shift);
519 break;
520 case 1: /* LSR */
521 if (shift == 0) {
522 if (flags) {
523 tcg_gen_shri_i32(var, var, 31);
524 gen_set_CF(var);
526 tcg_gen_movi_i32(var, 0);
527 } else {
528 if (flags)
529 shifter_out_im(var, shift - 1);
530 tcg_gen_shri_i32(var, var, shift);
532 break;
533 case 2: /* ASR */
534 if (shift == 0)
535 shift = 32;
536 if (flags)
537 shifter_out_im(var, shift - 1);
538 if (shift == 32)
539 shift = 31;
540 tcg_gen_sari_i32(var, var, shift);
541 break;
542 case 3: /* ROR/RRX */
543 if (shift != 0) {
544 if (flags)
545 shifter_out_im(var, shift - 1);
546 tcg_gen_rori_i32(var, var, shift); break;
547 } else {
548 TCGv tmp = load_cpu_field(CF);
549 if (flags)
550 shifter_out_im(var, 0);
551 tcg_gen_shri_i32(var, var, 1);
552 tcg_gen_shli_i32(tmp, tmp, 31);
553 tcg_gen_or_i32(var, var, tmp);
554 dead_tmp(tmp);
559 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
560 TCGv shift, int flags)
562 if (flags) {
563 switch (shiftop) {
564 case 0: gen_helper_shl_cc(var, var, shift); break;
565 case 1: gen_helper_shr_cc(var, var, shift); break;
566 case 2: gen_helper_sar_cc(var, var, shift); break;
567 case 3: gen_helper_ror_cc(var, var, shift); break;
569 } else {
570 switch (shiftop) {
571 case 0: gen_helper_shl(var, var, shift); break;
572 case 1: gen_helper_shr(var, var, shift); break;
573 case 2: gen_helper_sar(var, var, shift); break;
574 case 3: gen_helper_ror(var, var, shift); break;
577 dead_tmp(shift);
580 #define PAS_OP(pfx) \
581 switch (op2) { \
582 case 0: gen_pas_helper(glue(pfx,add16)); break; \
583 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
584 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
585 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
586 case 4: gen_pas_helper(glue(pfx,add8)); break; \
587 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
589 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
591 TCGv_ptr tmp;
593 switch (op1) {
594 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
595 case 1:
596 tmp = tcg_temp_new_ptr();
597 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
598 PAS_OP(s)
599 break;
600 case 5:
601 tmp = tcg_temp_new_ptr();
602 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
603 PAS_OP(u)
604 break;
605 #undef gen_pas_helper
606 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
607 case 2:
608 PAS_OP(q);
609 break;
610 case 3:
611 PAS_OP(sh);
612 break;
613 case 6:
614 PAS_OP(uq);
615 break;
616 case 7:
617 PAS_OP(uh);
618 break;
619 #undef gen_pas_helper
622 #undef PAS_OP
624 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
625 #define PAS_OP(pfx) \
626 switch (op2) { \
627 case 0: gen_pas_helper(glue(pfx,add8)); break; \
628 case 1: gen_pas_helper(glue(pfx,add16)); break; \
629 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
630 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
631 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
632 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
634 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
636 TCGv_ptr tmp;
638 switch (op1) {
639 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
640 case 0:
641 tmp = tcg_temp_new_ptr();
642 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
643 PAS_OP(s)
644 break;
645 case 4:
646 tmp = tcg_temp_new_ptr();
647 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
648 PAS_OP(u)
649 break;
650 #undef gen_pas_helper
651 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
652 case 1:
653 PAS_OP(q);
654 break;
655 case 2:
656 PAS_OP(sh);
657 break;
658 case 5:
659 PAS_OP(uq);
660 break;
661 case 6:
662 PAS_OP(uh);
663 break;
664 #undef gen_pas_helper
667 #undef PAS_OP
669 static void gen_test_cc(int cc, int label)
671 TCGv tmp;
672 TCGv tmp2;
673 int inv;
675 switch (cc) {
676 case 0: /* eq: Z */
677 tmp = load_cpu_field(ZF);
678 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
679 break;
680 case 1: /* ne: !Z */
681 tmp = load_cpu_field(ZF);
682 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
683 break;
684 case 2: /* cs: C */
685 tmp = load_cpu_field(CF);
686 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
687 break;
688 case 3: /* cc: !C */
689 tmp = load_cpu_field(CF);
690 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
691 break;
692 case 4: /* mi: N */
693 tmp = load_cpu_field(NF);
694 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
695 break;
696 case 5: /* pl: !N */
697 tmp = load_cpu_field(NF);
698 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
699 break;
700 case 6: /* vs: V */
701 tmp = load_cpu_field(VF);
702 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
703 break;
704 case 7: /* vc: !V */
705 tmp = load_cpu_field(VF);
706 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
707 break;
708 case 8: /* hi: C && !Z */
709 inv = gen_new_label();
710 tmp = load_cpu_field(CF);
711 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
712 dead_tmp(tmp);
713 tmp = load_cpu_field(ZF);
714 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
715 gen_set_label(inv);
716 break;
717 case 9: /* ls: !C || Z */
718 tmp = load_cpu_field(CF);
719 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
720 dead_tmp(tmp);
721 tmp = load_cpu_field(ZF);
722 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
723 break;
724 case 10: /* ge: N == V -> N ^ V == 0 */
725 tmp = load_cpu_field(VF);
726 tmp2 = load_cpu_field(NF);
727 tcg_gen_xor_i32(tmp, tmp, tmp2);
728 dead_tmp(tmp2);
729 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
730 break;
731 case 11: /* lt: N != V -> N ^ V != 0 */
732 tmp = load_cpu_field(VF);
733 tmp2 = load_cpu_field(NF);
734 tcg_gen_xor_i32(tmp, tmp, tmp2);
735 dead_tmp(tmp2);
736 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
737 break;
738 case 12: /* gt: !Z && N == V */
739 inv = gen_new_label();
740 tmp = load_cpu_field(ZF);
741 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
742 dead_tmp(tmp);
743 tmp = load_cpu_field(VF);
744 tmp2 = load_cpu_field(NF);
745 tcg_gen_xor_i32(tmp, tmp, tmp2);
746 dead_tmp(tmp2);
747 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
748 gen_set_label(inv);
749 break;
750 case 13: /* le: Z || N != V */
751 tmp = load_cpu_field(ZF);
752 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
753 dead_tmp(tmp);
754 tmp = load_cpu_field(VF);
755 tmp2 = load_cpu_field(NF);
756 tcg_gen_xor_i32(tmp, tmp, tmp2);
757 dead_tmp(tmp2);
758 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
759 break;
760 default:
761 fprintf(stderr, "Bad condition code 0x%x\n", cc);
762 abort();
764 dead_tmp(tmp);
767 static const uint8_t table_logic_cc[16] = {
768 1, /* and */
769 1, /* xor */
770 0, /* sub */
771 0, /* rsb */
772 0, /* add */
773 0, /* adc */
774 0, /* sbc */
775 0, /* rsc */
776 1, /* andl */
777 1, /* xorl */
778 0, /* cmp */
779 0, /* cmn */
780 1, /* orr */
781 1, /* mov */
782 1, /* bic */
783 1, /* mvn */
786 /* Set PC and Thumb state from an immediate address. */
787 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
789 TCGv tmp;
791 s->is_jmp = DISAS_UPDATE;
792 tmp = new_tmp();
793 if (s->thumb != (addr & 1)) {
794 tcg_gen_movi_i32(tmp, addr & 1);
795 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
797 tcg_gen_movi_i32(tmp, addr & ~1);
798 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
799 dead_tmp(tmp);
802 /* Set PC and Thumb state from var. var is marked as dead. */
803 static inline void gen_bx(DisasContext *s, TCGv var)
805 TCGv tmp;
807 s->is_jmp = DISAS_UPDATE;
808 tmp = new_tmp();
809 tcg_gen_andi_i32(tmp, var, 1);
810 store_cpu_field(tmp, thumb);
811 tcg_gen_andi_i32(var, var, ~1);
812 store_cpu_field(var, regs[15]);
815 /* Variant of store_reg which uses branch&exchange logic when storing
816 to r15 in ARM architecture v7 and above. The source must be a temporary
817 and will be marked as dead. */
818 static inline void store_reg_bx(CPUState *env, DisasContext *s,
819 int reg, TCGv var)
821 if (reg == 15 && ENABLE_ARCH_7) {
822 gen_bx(s, var);
823 } else {
824 store_reg(s, reg, var);
828 static inline TCGv gen_ld8s(TCGv addr, int index)
830 TCGv tmp = new_tmp();
831 tcg_gen_qemu_ld8s(tmp, addr, index);
832 return tmp;
834 static inline TCGv gen_ld8u(TCGv addr, int index)
836 TCGv tmp = new_tmp();
837 tcg_gen_qemu_ld8u(tmp, addr, index);
838 return tmp;
840 static inline TCGv gen_ld16s(TCGv addr, int index)
842 TCGv tmp = new_tmp();
843 tcg_gen_qemu_ld16s(tmp, addr, index);
844 return tmp;
846 static inline TCGv gen_ld16u(TCGv addr, int index)
848 TCGv tmp = new_tmp();
849 tcg_gen_qemu_ld16u(tmp, addr, index);
850 return tmp;
852 static inline TCGv gen_ld32(TCGv addr, int index)
854 TCGv tmp = new_tmp();
855 tcg_gen_qemu_ld32u(tmp, addr, index);
856 return tmp;
858 static inline void gen_st8(TCGv val, TCGv addr, int index)
860 tcg_gen_qemu_st8(val, addr, index);
861 dead_tmp(val);
863 static inline void gen_st16(TCGv val, TCGv addr, int index)
865 tcg_gen_qemu_st16(val, addr, index);
866 dead_tmp(val);
868 static inline void gen_st32(TCGv val, TCGv addr, int index)
870 tcg_gen_qemu_st32(val, addr, index);
871 dead_tmp(val);
874 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
876 load_reg_var(s, cpu_T[0], reg);
879 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
881 load_reg_var(s, cpu_T[1], reg);
884 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
886 load_reg_var(s, cpu_T[2], reg);
889 static inline void gen_set_pc_im(uint32_t val)
891 TCGv tmp = new_tmp();
892 tcg_gen_movi_i32(tmp, val);
893 store_cpu_field(tmp, regs[15]);
896 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
898 TCGv tmp;
899 if (reg == 15) {
900 tmp = new_tmp();
901 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
902 } else {
903 tmp = cpu_T[t];
905 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
906 if (reg == 15) {
907 dead_tmp(tmp);
908 s->is_jmp = DISAS_JUMP;
912 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
914 gen_movl_reg_TN(s, reg, 0);
917 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
919 gen_movl_reg_TN(s, reg, 1);
922 /* Force a TB lookup after an instruction that changes the CPU state. */
923 static inline void gen_lookup_tb(DisasContext *s)
925 gen_op_movl_T0_im(s->pc);
926 gen_movl_reg_T0(s, 15);
927 s->is_jmp = DISAS_UPDATE;
930 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
931 TCGv var)
933 int val, rm, shift, shiftop;
934 TCGv offset;
936 if (!(insn & (1 << 25))) {
937 /* immediate */
938 val = insn & 0xfff;
939 if (!(insn & (1 << 23)))
940 val = -val;
941 if (val != 0)
942 tcg_gen_addi_i32(var, var, val);
943 } else {
944 /* shift/register */
945 rm = (insn) & 0xf;
946 shift = (insn >> 7) & 0x1f;
947 shiftop = (insn >> 5) & 3;
948 offset = load_reg(s, rm);
949 gen_arm_shift_im(offset, shiftop, shift, 0);
950 if (!(insn & (1 << 23)))
951 tcg_gen_sub_i32(var, var, offset);
952 else
953 tcg_gen_add_i32(var, var, offset);
954 dead_tmp(offset);
958 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
959 int extra, TCGv var)
961 int val, rm;
962 TCGv offset;
964 if (insn & (1 << 22)) {
965 /* immediate */
966 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
967 if (!(insn & (1 << 23)))
968 val = -val;
969 val += extra;
970 if (val != 0)
971 tcg_gen_addi_i32(var, var, val);
972 } else {
973 /* register */
974 if (extra)
975 tcg_gen_addi_i32(var, var, extra);
976 rm = (insn) & 0xf;
977 offset = load_reg(s, rm);
978 if (!(insn & (1 << 23)))
979 tcg_gen_sub_i32(var, var, offset);
980 else
981 tcg_gen_add_i32(var, var, offset);
982 dead_tmp(offset);
986 #define VFP_OP2(name) \
987 static inline void gen_vfp_##name(int dp) \
989 if (dp) \
990 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
991 else \
992 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
995 VFP_OP2(add)
996 VFP_OP2(sub)
997 VFP_OP2(mul)
998 VFP_OP2(div)
1000 #undef VFP_OP2
1002 static inline void gen_vfp_abs(int dp)
1004 if (dp)
1005 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1006 else
1007 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1010 static inline void gen_vfp_neg(int dp)
1012 if (dp)
1013 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1014 else
1015 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1018 static inline void gen_vfp_sqrt(int dp)
1020 if (dp)
1021 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1022 else
1023 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1026 static inline void gen_vfp_cmp(int dp)
1028 if (dp)
1029 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1030 else
1031 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1034 static inline void gen_vfp_cmpe(int dp)
1036 if (dp)
1037 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1038 else
1039 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1042 static inline void gen_vfp_F1_ld0(int dp)
1044 if (dp)
1045 tcg_gen_movi_i64(cpu_F1d, 0);
1046 else
1047 tcg_gen_movi_i32(cpu_F1s, 0);
1050 static inline void gen_vfp_uito(int dp)
1052 if (dp)
1053 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1054 else
1055 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1058 static inline void gen_vfp_sito(int dp)
1060 if (dp)
1061 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
1062 else
1063 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
1066 static inline void gen_vfp_toui(int dp)
1068 if (dp)
1069 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1070 else
1071 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1074 static inline void gen_vfp_touiz(int dp)
1076 if (dp)
1077 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1078 else
1079 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1082 static inline void gen_vfp_tosi(int dp)
1084 if (dp)
1085 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1086 else
1087 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1090 static inline void gen_vfp_tosiz(int dp)
1092 if (dp)
1093 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1094 else
1095 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1098 #define VFP_GEN_FIX(name) \
1099 static inline void gen_vfp_##name(int dp, int shift) \
1101 if (dp) \
1102 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1103 else \
1104 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
1106 VFP_GEN_FIX(tosh)
1107 VFP_GEN_FIX(tosl)
1108 VFP_GEN_FIX(touh)
1109 VFP_GEN_FIX(toul)
1110 VFP_GEN_FIX(shto)
1111 VFP_GEN_FIX(slto)
1112 VFP_GEN_FIX(uhto)
1113 VFP_GEN_FIX(ulto)
1114 #undef VFP_GEN_FIX
1116 static inline void gen_vfp_ld(DisasContext *s, int dp)
1118 if (dp)
1119 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
1120 else
1121 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
1124 static inline void gen_vfp_st(DisasContext *s, int dp)
1126 if (dp)
1127 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
1128 else
1129 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
1132 static inline long
1133 vfp_reg_offset (int dp, int reg)
1135 if (dp)
1136 return offsetof(CPUARMState, vfp.regs[reg]);
1137 else if (reg & 1) {
1138 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1139 + offsetof(CPU_DoubleU, l.upper);
1140 } else {
1141 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1142 + offsetof(CPU_DoubleU, l.lower);
1146 /* Return the offset of a 32-bit piece of a NEON register.
1147 zero is the least significant end of the register. */
1148 static inline long
1149 neon_reg_offset (int reg, int n)
1151 int sreg;
1152 sreg = reg * 2 + n;
1153 return vfp_reg_offset(0, sreg);
1156 /* FIXME: Remove these. */
1157 #define neon_T0 cpu_T[0]
1158 #define neon_T1 cpu_T[1]
1159 #define NEON_GET_REG(T, reg, n) \
1160 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1161 #define NEON_SET_REG(T, reg, n) \
1162 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1164 static TCGv neon_load_reg(int reg, int pass)
1166 TCGv tmp = new_tmp();
1167 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1168 return tmp;
1171 static void neon_store_reg(int reg, int pass, TCGv var)
1173 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1174 dead_tmp(var);
1177 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1179 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1182 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1184 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1187 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1188 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1189 #define tcg_gen_st_f32 tcg_gen_st_i32
1190 #define tcg_gen_st_f64 tcg_gen_st_i64
1192 static inline void gen_mov_F0_vreg(int dp, int reg)
1194 if (dp)
1195 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1196 else
1197 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1200 static inline void gen_mov_F1_vreg(int dp, int reg)
1202 if (dp)
1203 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1204 else
1205 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1208 static inline void gen_mov_vreg_F0(int dp, int reg)
1210 if (dp)
1211 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1212 else
1213 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1216 #define ARM_CP_RW_BIT (1 << 20)
1218 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1220 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1223 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1225 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1228 static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1230 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1233 static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1235 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1238 static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1240 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1243 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1245 iwmmxt_store_reg(cpu_M0, rn);
1248 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1250 iwmmxt_load_reg(cpu_M0, rn);
1253 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1255 iwmmxt_load_reg(cpu_V1, rn);
1256 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1259 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1261 iwmmxt_load_reg(cpu_V1, rn);
1262 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1265 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1267 iwmmxt_load_reg(cpu_V1, rn);
1268 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1271 #define IWMMXT_OP(name) \
1272 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1274 iwmmxt_load_reg(cpu_V1, rn); \
1275 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1278 #define IWMMXT_OP_ENV(name) \
1279 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1281 iwmmxt_load_reg(cpu_V1, rn); \
1282 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1285 #define IWMMXT_OP_ENV_SIZE(name) \
1286 IWMMXT_OP_ENV(name##b) \
1287 IWMMXT_OP_ENV(name##w) \
1288 IWMMXT_OP_ENV(name##l)
1290 #define IWMMXT_OP_ENV1(name) \
1291 static inline void gen_op_iwmmxt_##name##_M0(void) \
1293 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1296 IWMMXT_OP(maddsq)
1297 IWMMXT_OP(madduq)
1298 IWMMXT_OP(sadb)
1299 IWMMXT_OP(sadw)
1300 IWMMXT_OP(mulslw)
1301 IWMMXT_OP(mulshw)
1302 IWMMXT_OP(mululw)
1303 IWMMXT_OP(muluhw)
1304 IWMMXT_OP(macsw)
1305 IWMMXT_OP(macuw)
1307 IWMMXT_OP_ENV_SIZE(unpackl)
1308 IWMMXT_OP_ENV_SIZE(unpackh)
1310 IWMMXT_OP_ENV1(unpacklub)
1311 IWMMXT_OP_ENV1(unpackluw)
1312 IWMMXT_OP_ENV1(unpacklul)
1313 IWMMXT_OP_ENV1(unpackhub)
1314 IWMMXT_OP_ENV1(unpackhuw)
1315 IWMMXT_OP_ENV1(unpackhul)
1316 IWMMXT_OP_ENV1(unpacklsb)
1317 IWMMXT_OP_ENV1(unpacklsw)
1318 IWMMXT_OP_ENV1(unpacklsl)
1319 IWMMXT_OP_ENV1(unpackhsb)
1320 IWMMXT_OP_ENV1(unpackhsw)
1321 IWMMXT_OP_ENV1(unpackhsl)
1323 IWMMXT_OP_ENV_SIZE(cmpeq)
1324 IWMMXT_OP_ENV_SIZE(cmpgtu)
1325 IWMMXT_OP_ENV_SIZE(cmpgts)
1327 IWMMXT_OP_ENV_SIZE(mins)
1328 IWMMXT_OP_ENV_SIZE(minu)
1329 IWMMXT_OP_ENV_SIZE(maxs)
1330 IWMMXT_OP_ENV_SIZE(maxu)
1332 IWMMXT_OP_ENV_SIZE(subn)
1333 IWMMXT_OP_ENV_SIZE(addn)
1334 IWMMXT_OP_ENV_SIZE(subu)
1335 IWMMXT_OP_ENV_SIZE(addu)
1336 IWMMXT_OP_ENV_SIZE(subs)
1337 IWMMXT_OP_ENV_SIZE(adds)
1339 IWMMXT_OP_ENV(avgb0)
1340 IWMMXT_OP_ENV(avgb1)
1341 IWMMXT_OP_ENV(avgw0)
1342 IWMMXT_OP_ENV(avgw1)
1344 IWMMXT_OP(msadb)
1346 IWMMXT_OP_ENV(packuw)
1347 IWMMXT_OP_ENV(packul)
1348 IWMMXT_OP_ENV(packuq)
1349 IWMMXT_OP_ENV(packsw)
1350 IWMMXT_OP_ENV(packsl)
1351 IWMMXT_OP_ENV(packsq)
1353 static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1355 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1358 static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1360 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1363 static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1365 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1368 static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1370 iwmmxt_load_reg(cpu_V1, rn);
1371 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1374 static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1376 TCGv tmp = tcg_const_i32(shift);
1377 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1380 static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1382 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1383 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1384 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1387 static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1389 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1390 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1391 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1394 static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1396 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1397 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1398 if (mask != ~0u)
1399 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1402 static void gen_op_iwmmxt_set_mup(void)
1404 TCGv tmp;
1405 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1406 tcg_gen_ori_i32(tmp, tmp, 2);
1407 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1410 static void gen_op_iwmmxt_set_cup(void)
1412 TCGv tmp;
1413 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1414 tcg_gen_ori_i32(tmp, tmp, 1);
1415 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1418 static void gen_op_iwmmxt_setpsr_nz(void)
1420 TCGv tmp = new_tmp();
1421 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1422 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1425 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1427 iwmmxt_load_reg(cpu_V1, rn);
1428 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1429 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1433 static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1435 iwmmxt_load_reg(cpu_V0, rn);
1436 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1437 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1438 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1441 static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1443 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
1444 iwmmxt_store_reg(cpu_V0, rn);
1447 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1449 int rd;
1450 uint32_t offset;
1452 rd = (insn >> 16) & 0xf;
1453 gen_movl_T1_reg(s, rd);
1455 offset = (insn & 0xff) << ((insn >> 7) & 2);
1456 if (insn & (1 << 24)) {
1457 /* Pre indexed */
1458 if (insn & (1 << 23))
1459 gen_op_addl_T1_im(offset);
1460 else
1461 gen_op_addl_T1_im(-offset);
1463 if (insn & (1 << 21))
1464 gen_movl_reg_T1(s, rd);
1465 } else if (insn & (1 << 21)) {
1466 /* Post indexed */
1467 if (insn & (1 << 23))
1468 gen_op_movl_T0_im(offset);
1469 else
1470 gen_op_movl_T0_im(- offset);
1471 gen_op_addl_T0_T1();
1472 gen_movl_reg_T0(s, rd);
1473 } else if (!(insn & (1 << 23)))
1474 return 1;
1475 return 0;
1478 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1480 int rd = (insn >> 0) & 0xf;
1482 if (insn & (1 << 8))
1483 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1484 return 1;
1485 else
1486 gen_op_iwmmxt_movl_T0_wCx(rd);
1487 else
1488 gen_iwmmxt_movl_T0_T1_wRn(rd);
1490 gen_op_movl_T1_im(mask);
1491 gen_op_andl_T0_T1();
1492 return 0;
1495 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1496 (ie. an undefined instruction). */
1497 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1499 int rd, wrd;
1500 int rdhi, rdlo, rd0, rd1, i;
1501 TCGv tmp;
1503 if ((insn & 0x0e000e00) == 0x0c000000) {
1504 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1505 wrd = insn & 0xf;
1506 rdlo = (insn >> 12) & 0xf;
1507 rdhi = (insn >> 16) & 0xf;
1508 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1509 gen_iwmmxt_movl_T0_T1_wRn(wrd);
1510 gen_movl_reg_T0(s, rdlo);
1511 gen_movl_reg_T1(s, rdhi);
1512 } else { /* TMCRR */
1513 gen_movl_T0_reg(s, rdlo);
1514 gen_movl_T1_reg(s, rdhi);
1515 gen_iwmmxt_movl_wRn_T0_T1(wrd);
1516 gen_op_iwmmxt_set_mup();
1518 return 0;
1521 wrd = (insn >> 12) & 0xf;
1522 if (gen_iwmmxt_address(s, insn))
1523 return 1;
1524 if (insn & ARM_CP_RW_BIT) {
1525 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1526 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1527 tcg_gen_mov_i32(cpu_T[0], tmp);
1528 dead_tmp(tmp);
1529 gen_op_iwmmxt_movl_wCx_T0(wrd);
1530 } else {
1531 i = 1;
1532 if (insn & (1 << 8)) {
1533 if (insn & (1 << 22)) { /* WLDRD */
1534 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1535 i = 0;
1536 } else { /* WLDRW wRd */
1537 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1539 } else {
1540 if (insn & (1 << 22)) { /* WLDRH */
1541 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1542 } else { /* WLDRB */
1543 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1546 if (i) {
1547 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1548 dead_tmp(tmp);
1550 gen_op_iwmmxt_movq_wRn_M0(wrd);
1552 } else {
1553 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1554 gen_op_iwmmxt_movl_T0_wCx(wrd);
1555 tmp = new_tmp();
1556 tcg_gen_mov_i32(tmp, cpu_T[0]);
1557 gen_st32(tmp, cpu_T[1], IS_USER(s));
1558 } else {
1559 gen_op_iwmmxt_movq_M0_wRn(wrd);
1560 tmp = new_tmp();
1561 if (insn & (1 << 8)) {
1562 if (insn & (1 << 22)) { /* WSTRD */
1563 dead_tmp(tmp);
1564 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1565 } else { /* WSTRW wRd */
1566 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1567 gen_st32(tmp, cpu_T[1], IS_USER(s));
1569 } else {
1570 if (insn & (1 << 22)) { /* WSTRH */
1571 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1572 gen_st16(tmp, cpu_T[1], IS_USER(s));
1573 } else { /* WSTRB */
1574 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1575 gen_st8(tmp, cpu_T[1], IS_USER(s));
1580 return 0;
1583 if ((insn & 0x0f000000) != 0x0e000000)
1584 return 1;
1586 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1587 case 0x000: /* WOR */
1588 wrd = (insn >> 12) & 0xf;
1589 rd0 = (insn >> 0) & 0xf;
1590 rd1 = (insn >> 16) & 0xf;
1591 gen_op_iwmmxt_movq_M0_wRn(rd0);
1592 gen_op_iwmmxt_orq_M0_wRn(rd1);
1593 gen_op_iwmmxt_setpsr_nz();
1594 gen_op_iwmmxt_movq_wRn_M0(wrd);
1595 gen_op_iwmmxt_set_mup();
1596 gen_op_iwmmxt_set_cup();
1597 break;
1598 case 0x011: /* TMCR */
1599 if (insn & 0xf)
1600 return 1;
1601 rd = (insn >> 12) & 0xf;
1602 wrd = (insn >> 16) & 0xf;
1603 switch (wrd) {
1604 case ARM_IWMMXT_wCID:
1605 case ARM_IWMMXT_wCASF:
1606 break;
1607 case ARM_IWMMXT_wCon:
1608 gen_op_iwmmxt_set_cup();
1609 /* Fall through. */
1610 case ARM_IWMMXT_wCSSF:
1611 gen_op_iwmmxt_movl_T0_wCx(wrd);
1612 gen_movl_T1_reg(s, rd);
1613 gen_op_bicl_T0_T1();
1614 gen_op_iwmmxt_movl_wCx_T0(wrd);
1615 break;
1616 case ARM_IWMMXT_wCGR0:
1617 case ARM_IWMMXT_wCGR1:
1618 case ARM_IWMMXT_wCGR2:
1619 case ARM_IWMMXT_wCGR3:
1620 gen_op_iwmmxt_set_cup();
1621 gen_movl_reg_T0(s, rd);
1622 gen_op_iwmmxt_movl_wCx_T0(wrd);
1623 break;
1624 default:
1625 return 1;
1627 break;
1628 case 0x100: /* WXOR */
1629 wrd = (insn >> 12) & 0xf;
1630 rd0 = (insn >> 0) & 0xf;
1631 rd1 = (insn >> 16) & 0xf;
1632 gen_op_iwmmxt_movq_M0_wRn(rd0);
1633 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1634 gen_op_iwmmxt_setpsr_nz();
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1638 break;
1639 case 0x111: /* TMRC */
1640 if (insn & 0xf)
1641 return 1;
1642 rd = (insn >> 12) & 0xf;
1643 wrd = (insn >> 16) & 0xf;
1644 gen_op_iwmmxt_movl_T0_wCx(wrd);
1645 gen_movl_reg_T0(s, rd);
1646 break;
1647 case 0x300: /* WANDN */
1648 wrd = (insn >> 12) & 0xf;
1649 rd0 = (insn >> 0) & 0xf;
1650 rd1 = (insn >> 16) & 0xf;
1651 gen_op_iwmmxt_movq_M0_wRn(rd0);
1652 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1653 gen_op_iwmmxt_andq_M0_wRn(rd1);
1654 gen_op_iwmmxt_setpsr_nz();
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1658 break;
1659 case 0x200: /* WAND */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 0) & 0xf;
1662 rd1 = (insn >> 16) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
1664 gen_op_iwmmxt_andq_M0_wRn(rd1);
1665 gen_op_iwmmxt_setpsr_nz();
1666 gen_op_iwmmxt_movq_wRn_M0(wrd);
1667 gen_op_iwmmxt_set_mup();
1668 gen_op_iwmmxt_set_cup();
1669 break;
1670 case 0x810: case 0xa10: /* WMADD */
1671 wrd = (insn >> 12) & 0xf;
1672 rd0 = (insn >> 0) & 0xf;
1673 rd1 = (insn >> 16) & 0xf;
1674 gen_op_iwmmxt_movq_M0_wRn(rd0);
1675 if (insn & (1 << 21))
1676 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1677 else
1678 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1679 gen_op_iwmmxt_movq_wRn_M0(wrd);
1680 gen_op_iwmmxt_set_mup();
1681 break;
1682 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1683 wrd = (insn >> 12) & 0xf;
1684 rd0 = (insn >> 16) & 0xf;
1685 rd1 = (insn >> 0) & 0xf;
1686 gen_op_iwmmxt_movq_M0_wRn(rd0);
1687 switch ((insn >> 22) & 3) {
1688 case 0:
1689 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1690 break;
1691 case 1:
1692 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1693 break;
1694 case 2:
1695 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1696 break;
1697 case 3:
1698 return 1;
1700 gen_op_iwmmxt_movq_wRn_M0(wrd);
1701 gen_op_iwmmxt_set_mup();
1702 gen_op_iwmmxt_set_cup();
1703 break;
1704 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1705 wrd = (insn >> 12) & 0xf;
1706 rd0 = (insn >> 16) & 0xf;
1707 rd1 = (insn >> 0) & 0xf;
1708 gen_op_iwmmxt_movq_M0_wRn(rd0);
1709 switch ((insn >> 22) & 3) {
1710 case 0:
1711 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1712 break;
1713 case 1:
1714 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1715 break;
1716 case 2:
1717 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1718 break;
1719 case 3:
1720 return 1;
1722 gen_op_iwmmxt_movq_wRn_M0(wrd);
1723 gen_op_iwmmxt_set_mup();
1724 gen_op_iwmmxt_set_cup();
1725 break;
1726 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1727 wrd = (insn >> 12) & 0xf;
1728 rd0 = (insn >> 16) & 0xf;
1729 rd1 = (insn >> 0) & 0xf;
1730 gen_op_iwmmxt_movq_M0_wRn(rd0);
1731 if (insn & (1 << 22))
1732 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1733 else
1734 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1735 if (!(insn & (1 << 20)))
1736 gen_op_iwmmxt_addl_M0_wRn(wrd);
1737 gen_op_iwmmxt_movq_wRn_M0(wrd);
1738 gen_op_iwmmxt_set_mup();
1739 break;
1740 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1741 wrd = (insn >> 12) & 0xf;
1742 rd0 = (insn >> 16) & 0xf;
1743 rd1 = (insn >> 0) & 0xf;
1744 gen_op_iwmmxt_movq_M0_wRn(rd0);
1745 if (insn & (1 << 21)) {
1746 if (insn & (1 << 20))
1747 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1748 else
1749 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1750 } else {
1751 if (insn & (1 << 20))
1752 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1753 else
1754 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1756 gen_op_iwmmxt_movq_wRn_M0(wrd);
1757 gen_op_iwmmxt_set_mup();
1758 break;
1759 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1760 wrd = (insn >> 12) & 0xf;
1761 rd0 = (insn >> 16) & 0xf;
1762 rd1 = (insn >> 0) & 0xf;
1763 gen_op_iwmmxt_movq_M0_wRn(rd0);
1764 if (insn & (1 << 21))
1765 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1766 else
1767 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1768 if (!(insn & (1 << 20))) {
1769 iwmmxt_load_reg(cpu_V1, wrd);
1770 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1772 gen_op_iwmmxt_movq_wRn_M0(wrd);
1773 gen_op_iwmmxt_set_mup();
1774 break;
1775 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1776 wrd = (insn >> 12) & 0xf;
1777 rd0 = (insn >> 16) & 0xf;
1778 rd1 = (insn >> 0) & 0xf;
1779 gen_op_iwmmxt_movq_M0_wRn(rd0);
1780 switch ((insn >> 22) & 3) {
1781 case 0:
1782 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1783 break;
1784 case 1:
1785 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1786 break;
1787 case 2:
1788 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1789 break;
1790 case 3:
1791 return 1;
1793 gen_op_iwmmxt_movq_wRn_M0(wrd);
1794 gen_op_iwmmxt_set_mup();
1795 gen_op_iwmmxt_set_cup();
1796 break;
1797 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1798 wrd = (insn >> 12) & 0xf;
1799 rd0 = (insn >> 16) & 0xf;
1800 rd1 = (insn >> 0) & 0xf;
1801 gen_op_iwmmxt_movq_M0_wRn(rd0);
1802 if (insn & (1 << 22)) {
1803 if (insn & (1 << 20))
1804 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1805 else
1806 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1807 } else {
1808 if (insn & (1 << 20))
1809 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1810 else
1811 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1813 gen_op_iwmmxt_movq_wRn_M0(wrd);
1814 gen_op_iwmmxt_set_mup();
1815 gen_op_iwmmxt_set_cup();
1816 break;
1817 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1818 wrd = (insn >> 12) & 0xf;
1819 rd0 = (insn >> 16) & 0xf;
1820 rd1 = (insn >> 0) & 0xf;
1821 gen_op_iwmmxt_movq_M0_wRn(rd0);
1822 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1823 gen_op_movl_T1_im(7);
1824 gen_op_andl_T0_T1();
1825 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 break;
1829 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1830 rd = (insn >> 12) & 0xf;
1831 wrd = (insn >> 16) & 0xf;
1832 gen_movl_T0_reg(s, rd);
1833 gen_op_iwmmxt_movq_M0_wRn(wrd);
1834 switch ((insn >> 6) & 3) {
1835 case 0:
1836 gen_op_movl_T1_im(0xff);
1837 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1838 break;
1839 case 1:
1840 gen_op_movl_T1_im(0xffff);
1841 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1842 break;
1843 case 2:
1844 gen_op_movl_T1_im(0xffffffff);
1845 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1846 break;
1847 case 3:
1848 return 1;
1850 gen_op_iwmmxt_movq_wRn_M0(wrd);
1851 gen_op_iwmmxt_set_mup();
1852 break;
1853 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1854 rd = (insn >> 12) & 0xf;
1855 wrd = (insn >> 16) & 0xf;
1856 if (rd == 15)
1857 return 1;
1858 gen_op_iwmmxt_movq_M0_wRn(wrd);
1859 switch ((insn >> 22) & 3) {
1860 case 0:
1861 if (insn & 8)
1862 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1863 else {
1864 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
1866 break;
1867 case 1:
1868 if (insn & 8)
1869 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1870 else {
1871 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
1873 break;
1874 case 2:
1875 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
1876 break;
1877 case 3:
1878 return 1;
1880 gen_movl_reg_T0(s, rd);
1881 break;
1882 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1883 if ((insn & 0x000ff008) != 0x0003f000)
1884 return 1;
1885 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1886 switch ((insn >> 22) & 3) {
1887 case 0:
1888 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1889 break;
1890 case 1:
1891 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1892 break;
1893 case 2:
1894 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1895 break;
1896 case 3:
1897 return 1;
1899 gen_op_shll_T1_im(28);
1900 gen_set_nzcv(cpu_T[1]);
1901 break;
1902 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1903 rd = (insn >> 12) & 0xf;
1904 wrd = (insn >> 16) & 0xf;
1905 gen_movl_T0_reg(s, rd);
1906 switch ((insn >> 6) & 3) {
1907 case 0:
1908 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
1909 break;
1910 case 1:
1911 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
1912 break;
1913 case 2:
1914 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
1915 break;
1916 case 3:
1917 return 1;
1919 gen_op_iwmmxt_movq_wRn_M0(wrd);
1920 gen_op_iwmmxt_set_mup();
1921 break;
1922 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1923 if ((insn & 0x000ff00f) != 0x0003f000)
1924 return 1;
1925 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1926 switch ((insn >> 22) & 3) {
1927 case 0:
1928 for (i = 0; i < 7; i ++) {
1929 gen_op_shll_T1_im(4);
1930 gen_op_andl_T0_T1();
1932 break;
1933 case 1:
1934 for (i = 0; i < 3; i ++) {
1935 gen_op_shll_T1_im(8);
1936 gen_op_andl_T0_T1();
1938 break;
1939 case 2:
1940 gen_op_shll_T1_im(16);
1941 gen_op_andl_T0_T1();
1942 break;
1943 case 3:
1944 return 1;
1946 gen_set_nzcv(cpu_T[0]);
1947 break;
1948 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1949 wrd = (insn >> 12) & 0xf;
1950 rd0 = (insn >> 16) & 0xf;
1951 gen_op_iwmmxt_movq_M0_wRn(rd0);
1952 switch ((insn >> 22) & 3) {
1953 case 0:
1954 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1955 break;
1956 case 1:
1957 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1958 break;
1959 case 2:
1960 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1961 break;
1962 case 3:
1963 return 1;
1965 gen_op_iwmmxt_movq_wRn_M0(wrd);
1966 gen_op_iwmmxt_set_mup();
1967 break;
1968 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1969 if ((insn & 0x000ff00f) != 0x0003f000)
1970 return 1;
1971 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1972 switch ((insn >> 22) & 3) {
1973 case 0:
1974 for (i = 0; i < 7; i ++) {
1975 gen_op_shll_T1_im(4);
1976 gen_op_orl_T0_T1();
1978 break;
1979 case 1:
1980 for (i = 0; i < 3; i ++) {
1981 gen_op_shll_T1_im(8);
1982 gen_op_orl_T0_T1();
1984 break;
1985 case 2:
1986 gen_op_shll_T1_im(16);
1987 gen_op_orl_T0_T1();
1988 break;
1989 case 3:
1990 return 1;
1992 gen_set_nzcv(cpu_T[0]);
1993 break;
1994 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1995 rd = (insn >> 12) & 0xf;
1996 rd0 = (insn >> 16) & 0xf;
1997 if ((insn & 0xf) != 0)
1998 return 1;
1999 gen_op_iwmmxt_movq_M0_wRn(rd0);
2000 switch ((insn >> 22) & 3) {
2001 case 0:
2002 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
2003 break;
2004 case 1:
2005 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
2006 break;
2007 case 2:
2008 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
2009 break;
2010 case 3:
2011 return 1;
2013 gen_movl_reg_T0(s, rd);
2014 break;
2015 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2016 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2017 wrd = (insn >> 12) & 0xf;
2018 rd0 = (insn >> 16) & 0xf;
2019 rd1 = (insn >> 0) & 0xf;
2020 gen_op_iwmmxt_movq_M0_wRn(rd0);
2021 switch ((insn >> 22) & 3) {
2022 case 0:
2023 if (insn & (1 << 21))
2024 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2025 else
2026 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2027 break;
2028 case 1:
2029 if (insn & (1 << 21))
2030 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2031 else
2032 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2033 break;
2034 case 2:
2035 if (insn & (1 << 21))
2036 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2037 else
2038 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2039 break;
2040 case 3:
2041 return 1;
2043 gen_op_iwmmxt_movq_wRn_M0(wrd);
2044 gen_op_iwmmxt_set_mup();
2045 gen_op_iwmmxt_set_cup();
2046 break;
2047 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2048 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2049 wrd = (insn >> 12) & 0xf;
2050 rd0 = (insn >> 16) & 0xf;
2051 gen_op_iwmmxt_movq_M0_wRn(rd0);
2052 switch ((insn >> 22) & 3) {
2053 case 0:
2054 if (insn & (1 << 21))
2055 gen_op_iwmmxt_unpacklsb_M0();
2056 else
2057 gen_op_iwmmxt_unpacklub_M0();
2058 break;
2059 case 1:
2060 if (insn & (1 << 21))
2061 gen_op_iwmmxt_unpacklsw_M0();
2062 else
2063 gen_op_iwmmxt_unpackluw_M0();
2064 break;
2065 case 2:
2066 if (insn & (1 << 21))
2067 gen_op_iwmmxt_unpacklsl_M0();
2068 else
2069 gen_op_iwmmxt_unpacklul_M0();
2070 break;
2071 case 3:
2072 return 1;
2074 gen_op_iwmmxt_movq_wRn_M0(wrd);
2075 gen_op_iwmmxt_set_mup();
2076 gen_op_iwmmxt_set_cup();
2077 break;
2078 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2079 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2080 wrd = (insn >> 12) & 0xf;
2081 rd0 = (insn >> 16) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0);
2083 switch ((insn >> 22) & 3) {
2084 case 0:
2085 if (insn & (1 << 21))
2086 gen_op_iwmmxt_unpackhsb_M0();
2087 else
2088 gen_op_iwmmxt_unpackhub_M0();
2089 break;
2090 case 1:
2091 if (insn & (1 << 21))
2092 gen_op_iwmmxt_unpackhsw_M0();
2093 else
2094 gen_op_iwmmxt_unpackhuw_M0();
2095 break;
2096 case 2:
2097 if (insn & (1 << 21))
2098 gen_op_iwmmxt_unpackhsl_M0();
2099 else
2100 gen_op_iwmmxt_unpackhul_M0();
2101 break;
2102 case 3:
2103 return 1;
2105 gen_op_iwmmxt_movq_wRn_M0(wrd);
2106 gen_op_iwmmxt_set_mup();
2107 gen_op_iwmmxt_set_cup();
2108 break;
2109 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2110 case 0x214: case 0x614: case 0xa14: case 0xe14:
2111 wrd = (insn >> 12) & 0xf;
2112 rd0 = (insn >> 16) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0);
2114 if (gen_iwmmxt_shift(insn, 0xff))
2115 return 1;
2116 switch ((insn >> 22) & 3) {
2117 case 0:
2118 return 1;
2119 case 1:
2120 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2121 break;
2122 case 2:
2123 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2124 break;
2125 case 3:
2126 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2127 break;
2129 gen_op_iwmmxt_movq_wRn_M0(wrd);
2130 gen_op_iwmmxt_set_mup();
2131 gen_op_iwmmxt_set_cup();
2132 break;
2133 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2134 case 0x014: case 0x414: case 0x814: case 0xc14:
2135 wrd = (insn >> 12) & 0xf;
2136 rd0 = (insn >> 16) & 0xf;
2137 gen_op_iwmmxt_movq_M0_wRn(rd0);
2138 if (gen_iwmmxt_shift(insn, 0xff))
2139 return 1;
2140 switch ((insn >> 22) & 3) {
2141 case 0:
2142 return 1;
2143 case 1:
2144 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2145 break;
2146 case 2:
2147 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2148 break;
2149 case 3:
2150 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2151 break;
2153 gen_op_iwmmxt_movq_wRn_M0(wrd);
2154 gen_op_iwmmxt_set_mup();
2155 gen_op_iwmmxt_set_cup();
2156 break;
2157 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2158 case 0x114: case 0x514: case 0x914: case 0xd14:
2159 wrd = (insn >> 12) & 0xf;
2160 rd0 = (insn >> 16) & 0xf;
2161 gen_op_iwmmxt_movq_M0_wRn(rd0);
2162 if (gen_iwmmxt_shift(insn, 0xff))
2163 return 1;
2164 switch ((insn >> 22) & 3) {
2165 case 0:
2166 return 1;
2167 case 1:
2168 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2169 break;
2170 case 2:
2171 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2172 break;
2173 case 3:
2174 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2175 break;
2177 gen_op_iwmmxt_movq_wRn_M0(wrd);
2178 gen_op_iwmmxt_set_mup();
2179 gen_op_iwmmxt_set_cup();
2180 break;
2181 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2182 case 0x314: case 0x714: case 0xb14: case 0xf14:
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 gen_op_iwmmxt_movq_M0_wRn(rd0);
2186 switch ((insn >> 22) & 3) {
2187 case 0:
2188 return 1;
2189 case 1:
2190 if (gen_iwmmxt_shift(insn, 0xf))
2191 return 1;
2192 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2193 break;
2194 case 2:
2195 if (gen_iwmmxt_shift(insn, 0x1f))
2196 return 1;
2197 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2198 break;
2199 case 3:
2200 if (gen_iwmmxt_shift(insn, 0x3f))
2201 return 1;
2202 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2203 break;
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2208 break;
2209 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2210 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2211 wrd = (insn >> 12) & 0xf;
2212 rd0 = (insn >> 16) & 0xf;
2213 rd1 = (insn >> 0) & 0xf;
2214 gen_op_iwmmxt_movq_M0_wRn(rd0);
2215 switch ((insn >> 22) & 3) {
2216 case 0:
2217 if (insn & (1 << 21))
2218 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2219 else
2220 gen_op_iwmmxt_minub_M0_wRn(rd1);
2221 break;
2222 case 1:
2223 if (insn & (1 << 21))
2224 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2225 else
2226 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2227 break;
2228 case 2:
2229 if (insn & (1 << 21))
2230 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2231 else
2232 gen_op_iwmmxt_minul_M0_wRn(rd1);
2233 break;
2234 case 3:
2235 return 1;
2237 gen_op_iwmmxt_movq_wRn_M0(wrd);
2238 gen_op_iwmmxt_set_mup();
2239 break;
2240 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2241 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2242 wrd = (insn >> 12) & 0xf;
2243 rd0 = (insn >> 16) & 0xf;
2244 rd1 = (insn >> 0) & 0xf;
2245 gen_op_iwmmxt_movq_M0_wRn(rd0);
2246 switch ((insn >> 22) & 3) {
2247 case 0:
2248 if (insn & (1 << 21))
2249 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2250 else
2251 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2252 break;
2253 case 1:
2254 if (insn & (1 << 21))
2255 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2256 else
2257 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2258 break;
2259 case 2:
2260 if (insn & (1 << 21))
2261 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2262 else
2263 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2264 break;
2265 case 3:
2266 return 1;
2268 gen_op_iwmmxt_movq_wRn_M0(wrd);
2269 gen_op_iwmmxt_set_mup();
2270 break;
2271 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2272 case 0x402: case 0x502: case 0x602: case 0x702:
2273 wrd = (insn >> 12) & 0xf;
2274 rd0 = (insn >> 16) & 0xf;
2275 rd1 = (insn >> 0) & 0xf;
2276 gen_op_iwmmxt_movq_M0_wRn(rd0);
2277 gen_op_movl_T0_im((insn >> 20) & 3);
2278 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2279 gen_op_iwmmxt_movq_wRn_M0(wrd);
2280 gen_op_iwmmxt_set_mup();
2281 break;
2282 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2283 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2284 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2285 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2286 wrd = (insn >> 12) & 0xf;
2287 rd0 = (insn >> 16) & 0xf;
2288 rd1 = (insn >> 0) & 0xf;
2289 gen_op_iwmmxt_movq_M0_wRn(rd0);
2290 switch ((insn >> 20) & 0xf) {
2291 case 0x0:
2292 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2293 break;
2294 case 0x1:
2295 gen_op_iwmmxt_subub_M0_wRn(rd1);
2296 break;
2297 case 0x3:
2298 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2299 break;
2300 case 0x4:
2301 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2302 break;
2303 case 0x5:
2304 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2305 break;
2306 case 0x7:
2307 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2308 break;
2309 case 0x8:
2310 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2311 break;
2312 case 0x9:
2313 gen_op_iwmmxt_subul_M0_wRn(rd1);
2314 break;
2315 case 0xb:
2316 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2317 break;
2318 default:
2319 return 1;
2321 gen_op_iwmmxt_movq_wRn_M0(wrd);
2322 gen_op_iwmmxt_set_mup();
2323 gen_op_iwmmxt_set_cup();
2324 break;
2325 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2326 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2327 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2328 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2329 wrd = (insn >> 12) & 0xf;
2330 rd0 = (insn >> 16) & 0xf;
2331 gen_op_iwmmxt_movq_M0_wRn(rd0);
2332 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
2333 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2334 gen_op_iwmmxt_movq_wRn_M0(wrd);
2335 gen_op_iwmmxt_set_mup();
2336 gen_op_iwmmxt_set_cup();
2337 break;
2338 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2339 case 0x418: case 0x518: case 0x618: case 0x718:
2340 case 0x818: case 0x918: case 0xa18: case 0xb18:
2341 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2342 wrd = (insn >> 12) & 0xf;
2343 rd0 = (insn >> 16) & 0xf;
2344 rd1 = (insn >> 0) & 0xf;
2345 gen_op_iwmmxt_movq_M0_wRn(rd0);
2346 switch ((insn >> 20) & 0xf) {
2347 case 0x0:
2348 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2349 break;
2350 case 0x1:
2351 gen_op_iwmmxt_addub_M0_wRn(rd1);
2352 break;
2353 case 0x3:
2354 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2355 break;
2356 case 0x4:
2357 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2358 break;
2359 case 0x5:
2360 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2361 break;
2362 case 0x7:
2363 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2364 break;
2365 case 0x8:
2366 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2367 break;
2368 case 0x9:
2369 gen_op_iwmmxt_addul_M0_wRn(rd1);
2370 break;
2371 case 0xb:
2372 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2373 break;
2374 default:
2375 return 1;
2377 gen_op_iwmmxt_movq_wRn_M0(wrd);
2378 gen_op_iwmmxt_set_mup();
2379 gen_op_iwmmxt_set_cup();
2380 break;
2381 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2382 case 0x408: case 0x508: case 0x608: case 0x708:
2383 case 0x808: case 0x908: case 0xa08: case 0xb08:
2384 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2385 wrd = (insn >> 12) & 0xf;
2386 rd0 = (insn >> 16) & 0xf;
2387 rd1 = (insn >> 0) & 0xf;
2388 gen_op_iwmmxt_movq_M0_wRn(rd0);
2389 if (!(insn & (1 << 20)))
2390 return 1;
2391 switch ((insn >> 22) & 3) {
2392 case 0:
2393 return 1;
2394 case 1:
2395 if (insn & (1 << 21))
2396 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2397 else
2398 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2399 break;
2400 case 2:
2401 if (insn & (1 << 21))
2402 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2403 else
2404 gen_op_iwmmxt_packul_M0_wRn(rd1);
2405 break;
2406 case 3:
2407 if (insn & (1 << 21))
2408 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2409 else
2410 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2411 break;
2413 gen_op_iwmmxt_movq_wRn_M0(wrd);
2414 gen_op_iwmmxt_set_mup();
2415 gen_op_iwmmxt_set_cup();
2416 break;
2417 case 0x201: case 0x203: case 0x205: case 0x207:
2418 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2419 case 0x211: case 0x213: case 0x215: case 0x217:
2420 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2421 wrd = (insn >> 5) & 0xf;
2422 rd0 = (insn >> 12) & 0xf;
2423 rd1 = (insn >> 0) & 0xf;
2424 if (rd0 == 0xf || rd1 == 0xf)
2425 return 1;
2426 gen_op_iwmmxt_movq_M0_wRn(wrd);
2427 switch ((insn >> 16) & 0xf) {
2428 case 0x0: /* TMIA */
2429 gen_movl_T0_reg(s, rd0);
2430 gen_movl_T1_reg(s, rd1);
2431 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2432 break;
2433 case 0x8: /* TMIAPH */
2434 gen_movl_T0_reg(s, rd0);
2435 gen_movl_T1_reg(s, rd1);
2436 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2437 break;
2438 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2439 gen_movl_T1_reg(s, rd0);
2440 if (insn & (1 << 16))
2441 gen_op_shrl_T1_im(16);
2442 gen_op_movl_T0_T1();
2443 gen_movl_T1_reg(s, rd1);
2444 if (insn & (1 << 17))
2445 gen_op_shrl_T1_im(16);
2446 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2447 break;
2448 default:
2449 return 1;
2451 gen_op_iwmmxt_movq_wRn_M0(wrd);
2452 gen_op_iwmmxt_set_mup();
2453 break;
2454 default:
2455 return 1;
2458 return 0;
2461 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2462 (ie. an undefined instruction). */
2463 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2465 int acc, rd0, rd1, rdhi, rdlo;
2467 if ((insn & 0x0ff00f10) == 0x0e200010) {
2468 /* Multiply with Internal Accumulate Format */
2469 rd0 = (insn >> 12) & 0xf;
2470 rd1 = insn & 0xf;
2471 acc = (insn >> 5) & 7;
2473 if (acc != 0)
2474 return 1;
2476 switch ((insn >> 16) & 0xf) {
2477 case 0x0: /* MIA */
2478 gen_movl_T0_reg(s, rd0);
2479 gen_movl_T1_reg(s, rd1);
2480 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2481 break;
2482 case 0x8: /* MIAPH */
2483 gen_movl_T0_reg(s, rd0);
2484 gen_movl_T1_reg(s, rd1);
2485 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2486 break;
2487 case 0xc: /* MIABB */
2488 case 0xd: /* MIABT */
2489 case 0xe: /* MIATB */
2490 case 0xf: /* MIATT */
2491 gen_movl_T1_reg(s, rd0);
2492 if (insn & (1 << 16))
2493 gen_op_shrl_T1_im(16);
2494 gen_op_movl_T0_T1();
2495 gen_movl_T1_reg(s, rd1);
2496 if (insn & (1 << 17))
2497 gen_op_shrl_T1_im(16);
2498 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2499 break;
2500 default:
2501 return 1;
2504 gen_op_iwmmxt_movq_wRn_M0(acc);
2505 return 0;
2508 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2509 /* Internal Accumulator Access Format */
2510 rdhi = (insn >> 16) & 0xf;
2511 rdlo = (insn >> 12) & 0xf;
2512 acc = insn & 7;
2514 if (acc != 0)
2515 return 1;
2517 if (insn & ARM_CP_RW_BIT) { /* MRA */
2518 gen_iwmmxt_movl_T0_T1_wRn(acc);
2519 gen_movl_reg_T0(s, rdlo);
2520 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2521 gen_op_andl_T0_T1();
2522 gen_movl_reg_T0(s, rdhi);
2523 } else { /* MAR */
2524 gen_movl_T0_reg(s, rdlo);
2525 gen_movl_T1_reg(s, rdhi);
2526 gen_iwmmxt_movl_wRn_T0_T1(acc);
2528 return 0;
2531 return 1;
2534 /* Disassemble system coprocessor instruction. Return nonzero if
2535 instruction is not defined. */
2536 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2538 TCGv tmp;
2539 uint32_t rd = (insn >> 12) & 0xf;
2540 uint32_t cp = (insn >> 8) & 0xf;
2541 if (IS_USER(s)) {
2542 return 1;
2545 if (insn & ARM_CP_RW_BIT) {
2546 if (!env->cp[cp].cp_read)
2547 return 1;
2548 gen_set_pc_im(s->pc);
2549 tmp = new_tmp();
2550 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2551 store_reg(s, rd, tmp);
2552 } else {
2553 if (!env->cp[cp].cp_write)
2554 return 1;
2555 gen_set_pc_im(s->pc);
2556 tmp = load_reg(s, rd);
2557 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
2558 dead_tmp(tmp);
2560 return 0;
2563 static int cp15_user_ok(uint32_t insn)
2565 int cpn = (insn >> 16) & 0xf;
2566 int cpm = insn & 0xf;
2567 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2569 if (cpn == 13 && cpm == 0) {
2570 /* TLS register. */
2571 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2572 return 1;
2574 if (cpn == 7) {
2575 /* ISB, DSB, DMB. */
2576 if ((cpm == 5 && op == 4)
2577 || (cpm == 10 && (op == 4 || op == 5)))
2578 return 1;
2580 return 0;
2583 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2584 instruction is not defined. */
2585 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2587 uint32_t rd;
2588 TCGv tmp;
2590 /* M profile cores use memory mapped registers instead of cp15. */
2591 if (arm_feature(env, ARM_FEATURE_M))
2592 return 1;
2594 if ((insn & (1 << 25)) == 0) {
2595 if (insn & (1 << 20)) {
2596 /* mrrc */
2597 return 1;
2599 /* mcrr. Used for block cache operations, so implement as no-op. */
2600 return 0;
2602 if ((insn & (1 << 4)) == 0) {
2603 /* cdp */
2604 return 1;
2606 if (IS_USER(s) && !cp15_user_ok(insn)) {
2607 return 1;
2609 if ((insn & 0x0fff0fff) == 0x0e070f90
2610 || (insn & 0x0fff0fff) == 0x0e070f58) {
2611 /* Wait for interrupt. */
2612 gen_set_pc_im(s->pc);
2613 s->is_jmp = DISAS_WFI;
2614 return 0;
2616 rd = (insn >> 12) & 0xf;
2617 if (insn & ARM_CP_RW_BIT) {
2618 tmp = new_tmp();
2619 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
2620 /* If the destination register is r15 then sets condition codes. */
2621 if (rd != 15)
2622 store_reg(s, rd, tmp);
2623 else
2624 dead_tmp(tmp);
2625 } else {
2626 tmp = load_reg(s, rd);
2627 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2628 dead_tmp(tmp);
2629 /* Normally we would always end the TB here, but Linux
2630 * arch/arm/mach-pxa/sleep.S expects two instructions following
2631 * an MMU enable to execute from cache. Imitate this behaviour. */
2632 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2633 (insn & 0x0fff0fff) != 0x0e010f10)
2634 gen_lookup_tb(s);
2636 return 0;
2639 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2640 #define VFP_SREG(insn, bigbit, smallbit) \
2641 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2642 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2643 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2644 reg = (((insn) >> (bigbit)) & 0x0f) \
2645 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2646 } else { \
2647 if (insn & (1 << (smallbit))) \
2648 return 1; \
2649 reg = ((insn) >> (bigbit)) & 0x0f; \
2650 }} while (0)
2652 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2653 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2654 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2655 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2656 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2657 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2659 /* Move between integer and VFP cores. */
2660 static TCGv gen_vfp_mrs(void)
2662 TCGv tmp = new_tmp();
2663 tcg_gen_mov_i32(tmp, cpu_F0s);
2664 return tmp;
2667 static void gen_vfp_msr(TCGv tmp)
2669 tcg_gen_mov_i32(cpu_F0s, tmp);
2670 dead_tmp(tmp);
2673 static inline int
2674 vfp_enabled(CPUState * env)
2676 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2679 static void gen_neon_dup_u8(TCGv var, int shift)
2681 TCGv tmp = new_tmp();
2682 if (shift)
2683 tcg_gen_shri_i32(var, var, shift);
2684 tcg_gen_ext8u_i32(var, var);
2685 tcg_gen_shli_i32(tmp, var, 8);
2686 tcg_gen_or_i32(var, var, tmp);
2687 tcg_gen_shli_i32(tmp, var, 16);
2688 tcg_gen_or_i32(var, var, tmp);
2689 dead_tmp(tmp);
2692 static void gen_neon_dup_low16(TCGv var)
2694 TCGv tmp = new_tmp();
2695 tcg_gen_ext16u_i32(var, var);
2696 tcg_gen_shli_i32(tmp, var, 16);
2697 tcg_gen_or_i32(var, var, tmp);
2698 dead_tmp(tmp);
2701 static void gen_neon_dup_high16(TCGv var)
2703 TCGv tmp = new_tmp();
2704 tcg_gen_andi_i32(var, var, 0xffff0000);
2705 tcg_gen_shri_i32(tmp, var, 16);
2706 tcg_gen_or_i32(var, var, tmp);
2707 dead_tmp(tmp);
2710 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2711 (ie. an undefined instruction). */
2712 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2714 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2715 int dp, veclen;
2716 TCGv tmp;
2717 TCGv tmp2;
2719 if (!arm_feature(env, ARM_FEATURE_VFP))
2720 return 1;
2722 if (!vfp_enabled(env)) {
2723 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2724 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2725 return 1;
2726 rn = (insn >> 16) & 0xf;
2727 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2728 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2729 return 1;
2731 dp = ((insn & 0xf00) == 0xb00);
2732 switch ((insn >> 24) & 0xf) {
2733 case 0xe:
2734 if (insn & (1 << 4)) {
2735 /* single register transfer */
2736 rd = (insn >> 12) & 0xf;
2737 if (dp) {
2738 int size;
2739 int pass;
2741 VFP_DREG_N(rn, insn);
2742 if (insn & 0xf)
2743 return 1;
2744 if (insn & 0x00c00060
2745 && !arm_feature(env, ARM_FEATURE_NEON))
2746 return 1;
2748 pass = (insn >> 21) & 1;
2749 if (insn & (1 << 22)) {
2750 size = 0;
2751 offset = ((insn >> 5) & 3) * 8;
2752 } else if (insn & (1 << 5)) {
2753 size = 1;
2754 offset = (insn & (1 << 6)) ? 16 : 0;
2755 } else {
2756 size = 2;
2757 offset = 0;
2759 if (insn & ARM_CP_RW_BIT) {
2760 /* vfp->arm */
2761 tmp = neon_load_reg(rn, pass);
2762 switch (size) {
2763 case 0:
2764 if (offset)
2765 tcg_gen_shri_i32(tmp, tmp, offset);
2766 if (insn & (1 << 23))
2767 gen_uxtb(tmp);
2768 else
2769 gen_sxtb(tmp);
2770 break;
2771 case 1:
2772 if (insn & (1 << 23)) {
2773 if (offset) {
2774 tcg_gen_shri_i32(tmp, tmp, 16);
2775 } else {
2776 gen_uxth(tmp);
2778 } else {
2779 if (offset) {
2780 tcg_gen_sari_i32(tmp, tmp, 16);
2781 } else {
2782 gen_sxth(tmp);
2785 break;
2786 case 2:
2787 break;
2789 store_reg(s, rd, tmp);
2790 } else {
2791 /* arm->vfp */
2792 tmp = load_reg(s, rd);
2793 if (insn & (1 << 23)) {
2794 /* VDUP */
2795 if (size == 0) {
2796 gen_neon_dup_u8(tmp, 0);
2797 } else if (size == 1) {
2798 gen_neon_dup_low16(tmp);
2800 for (n = 0; n <= pass * 2; n++) {
2801 tmp2 = new_tmp();
2802 tcg_gen_mov_i32(tmp2, tmp);
2803 neon_store_reg(rn, n, tmp2);
2805 neon_store_reg(rn, n, tmp);
2806 } else {
2807 /* VMOV */
2808 switch (size) {
2809 case 0:
2810 tmp2 = neon_load_reg(rn, pass);
2811 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2812 dead_tmp(tmp2);
2813 break;
2814 case 1:
2815 tmp2 = neon_load_reg(rn, pass);
2816 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2817 dead_tmp(tmp2);
2818 break;
2819 case 2:
2820 break;
2822 neon_store_reg(rn, pass, tmp);
2825 } else { /* !dp */
2826 if ((insn & 0x6f) != 0x00)
2827 return 1;
2828 rn = VFP_SREG_N(insn);
2829 if (insn & ARM_CP_RW_BIT) {
2830 /* vfp->arm */
2831 if (insn & (1 << 21)) {
2832 /* system register */
2833 rn >>= 1;
2835 switch (rn) {
2836 case ARM_VFP_FPSID:
2837 /* VFP2 allows access to FSID from userspace.
2838 VFP3 restricts all id registers to privileged
2839 accesses. */
2840 if (IS_USER(s)
2841 && arm_feature(env, ARM_FEATURE_VFP3))
2842 return 1;
2843 tmp = load_cpu_field(vfp.xregs[rn]);
2844 break;
2845 case ARM_VFP_FPEXC:
2846 if (IS_USER(s))
2847 return 1;
2848 tmp = load_cpu_field(vfp.xregs[rn]);
2849 break;
2850 case ARM_VFP_FPINST:
2851 case ARM_VFP_FPINST2:
2852 /* Not present in VFP3. */
2853 if (IS_USER(s)
2854 || arm_feature(env, ARM_FEATURE_VFP3))
2855 return 1;
2856 tmp = load_cpu_field(vfp.xregs[rn]);
2857 break;
2858 case ARM_VFP_FPSCR:
2859 if (rd == 15) {
2860 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2861 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2862 } else {
2863 tmp = new_tmp();
2864 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2866 break;
2867 case ARM_VFP_MVFR0:
2868 case ARM_VFP_MVFR1:
2869 if (IS_USER(s)
2870 || !arm_feature(env, ARM_FEATURE_VFP3))
2871 return 1;
2872 tmp = load_cpu_field(vfp.xregs[rn]);
2873 break;
2874 default:
2875 return 1;
2877 } else {
2878 gen_mov_F0_vreg(0, rn);
2879 tmp = gen_vfp_mrs();
2881 if (rd == 15) {
2882 /* Set the 4 flag bits in the CPSR. */
2883 gen_set_nzcv(tmp);
2884 dead_tmp(tmp);
2885 } else {
2886 store_reg(s, rd, tmp);
2888 } else {
2889 /* arm->vfp */
2890 tmp = load_reg(s, rd);
2891 if (insn & (1 << 21)) {
2892 rn >>= 1;
2893 /* system register */
2894 switch (rn) {
2895 case ARM_VFP_FPSID:
2896 case ARM_VFP_MVFR0:
2897 case ARM_VFP_MVFR1:
2898 /* Writes are ignored. */
2899 break;
2900 case ARM_VFP_FPSCR:
2901 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2902 dead_tmp(tmp);
2903 gen_lookup_tb(s);
2904 break;
2905 case ARM_VFP_FPEXC:
2906 if (IS_USER(s))
2907 return 1;
2908 store_cpu_field(tmp, vfp.xregs[rn]);
2909 gen_lookup_tb(s);
2910 break;
2911 case ARM_VFP_FPINST:
2912 case ARM_VFP_FPINST2:
2913 store_cpu_field(tmp, vfp.xregs[rn]);
2914 break;
2915 default:
2916 return 1;
2918 } else {
2919 gen_vfp_msr(tmp);
2920 gen_mov_vreg_F0(0, rn);
2924 } else {
2925 /* data processing */
2926 /* The opcode is in bits 23, 21, 20 and 6. */
2927 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2928 if (dp) {
2929 if (op == 15) {
2930 /* rn is opcode */
2931 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2932 } else {
2933 /* rn is register number */
2934 VFP_DREG_N(rn, insn);
2937 if (op == 15 && (rn == 15 || rn > 17)) {
2938 /* Integer or single precision destination. */
2939 rd = VFP_SREG_D(insn);
2940 } else {
2941 VFP_DREG_D(rd, insn);
2944 if (op == 15 && (rn == 16 || rn == 17)) {
2945 /* Integer source. */
2946 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2947 } else {
2948 VFP_DREG_M(rm, insn);
2950 } else {
2951 rn = VFP_SREG_N(insn);
2952 if (op == 15 && rn == 15) {
2953 /* Double precision destination. */
2954 VFP_DREG_D(rd, insn);
2955 } else {
2956 rd = VFP_SREG_D(insn);
2958 rm = VFP_SREG_M(insn);
2961 veclen = env->vfp.vec_len;
2962 if (op == 15 && rn > 3)
2963 veclen = 0;
2965 /* Shut up compiler warnings. */
2966 delta_m = 0;
2967 delta_d = 0;
2968 bank_mask = 0;
2970 if (veclen > 0) {
2971 if (dp)
2972 bank_mask = 0xc;
2973 else
2974 bank_mask = 0x18;
2976 /* Figure out what type of vector operation this is. */
2977 if ((rd & bank_mask) == 0) {
2978 /* scalar */
2979 veclen = 0;
2980 } else {
2981 if (dp)
2982 delta_d = (env->vfp.vec_stride >> 1) + 1;
2983 else
2984 delta_d = env->vfp.vec_stride + 1;
2986 if ((rm & bank_mask) == 0) {
2987 /* mixed scalar/vector */
2988 delta_m = 0;
2989 } else {
2990 /* vector */
2991 delta_m = delta_d;
2996 /* Load the initial operands. */
2997 if (op == 15) {
2998 switch (rn) {
2999 case 16:
3000 case 17:
3001 /* Integer source */
3002 gen_mov_F0_vreg(0, rm);
3003 break;
3004 case 8:
3005 case 9:
3006 /* Compare */
3007 gen_mov_F0_vreg(dp, rd);
3008 gen_mov_F1_vreg(dp, rm);
3009 break;
3010 case 10:
3011 case 11:
3012 /* Compare with zero */
3013 gen_mov_F0_vreg(dp, rd);
3014 gen_vfp_F1_ld0(dp);
3015 break;
3016 case 20:
3017 case 21:
3018 case 22:
3019 case 23:
3020 case 28:
3021 case 29:
3022 case 30:
3023 case 31:
3024 /* Source and destination the same. */
3025 gen_mov_F0_vreg(dp, rd);
3026 break;
3027 default:
3028 /* One source operand. */
3029 gen_mov_F0_vreg(dp, rm);
3030 break;
3032 } else {
3033 /* Two source operands. */
3034 gen_mov_F0_vreg(dp, rn);
3035 gen_mov_F1_vreg(dp, rm);
3038 for (;;) {
3039 /* Perform the calculation. */
3040 switch (op) {
3041 case 0: /* mac: fd + (fn * fm) */
3042 gen_vfp_mul(dp);
3043 gen_mov_F1_vreg(dp, rd);
3044 gen_vfp_add(dp);
3045 break;
3046 case 1: /* nmac: fd - (fn * fm) */
3047 gen_vfp_mul(dp);
3048 gen_vfp_neg(dp);
3049 gen_mov_F1_vreg(dp, rd);
3050 gen_vfp_add(dp);
3051 break;
3052 case 2: /* msc: -fd + (fn * fm) */
3053 gen_vfp_mul(dp);
3054 gen_mov_F1_vreg(dp, rd);
3055 gen_vfp_sub(dp);
3056 break;
3057 case 3: /* nmsc: -fd - (fn * fm) */
3058 gen_vfp_mul(dp);
3059 gen_vfp_neg(dp);
3060 gen_mov_F1_vreg(dp, rd);
3061 gen_vfp_sub(dp);
3062 break;
3063 case 4: /* mul: fn * fm */
3064 gen_vfp_mul(dp);
3065 break;
3066 case 5: /* nmul: -(fn * fm) */
3067 gen_vfp_mul(dp);
3068 gen_vfp_neg(dp);
3069 break;
3070 case 6: /* add: fn + fm */
3071 gen_vfp_add(dp);
3072 break;
3073 case 7: /* sub: fn - fm */
3074 gen_vfp_sub(dp);
3075 break;
3076 case 8: /* div: fn / fm */
3077 gen_vfp_div(dp);
3078 break;
3079 case 14: /* fconst */
3080 if (!arm_feature(env, ARM_FEATURE_VFP3))
3081 return 1;
3083 n = (insn << 12) & 0x80000000;
3084 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3085 if (dp) {
3086 if (i & 0x40)
3087 i |= 0x3f80;
3088 else
3089 i |= 0x4000;
3090 n |= i << 16;
3091 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3092 } else {
3093 if (i & 0x40)
3094 i |= 0x780;
3095 else
3096 i |= 0x800;
3097 n |= i << 19;
3098 tcg_gen_movi_i32(cpu_F0s, n);
3100 break;
3101 case 15: /* extension space */
3102 switch (rn) {
3103 case 0: /* cpy */
3104 /* no-op */
3105 break;
3106 case 1: /* abs */
3107 gen_vfp_abs(dp);
3108 break;
3109 case 2: /* neg */
3110 gen_vfp_neg(dp);
3111 break;
3112 case 3: /* sqrt */
3113 gen_vfp_sqrt(dp);
3114 break;
3115 case 8: /* cmp */
3116 gen_vfp_cmp(dp);
3117 break;
3118 case 9: /* cmpe */
3119 gen_vfp_cmpe(dp);
3120 break;
3121 case 10: /* cmpz */
3122 gen_vfp_cmp(dp);
3123 break;
3124 case 11: /* cmpez */
3125 gen_vfp_F1_ld0(dp);
3126 gen_vfp_cmpe(dp);
3127 break;
3128 case 15: /* single<->double conversion */
3129 if (dp)
3130 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3131 else
3132 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3133 break;
3134 case 16: /* fuito */
3135 gen_vfp_uito(dp);
3136 break;
3137 case 17: /* fsito */
3138 gen_vfp_sito(dp);
3139 break;
3140 case 20: /* fshto */
3141 if (!arm_feature(env, ARM_FEATURE_VFP3))
3142 return 1;
3143 gen_vfp_shto(dp, 16 - rm);
3144 break;
3145 case 21: /* fslto */
3146 if (!arm_feature(env, ARM_FEATURE_VFP3))
3147 return 1;
3148 gen_vfp_slto(dp, 32 - rm);
3149 break;
3150 case 22: /* fuhto */
3151 if (!arm_feature(env, ARM_FEATURE_VFP3))
3152 return 1;
3153 gen_vfp_uhto(dp, 16 - rm);
3154 break;
3155 case 23: /* fulto */
3156 if (!arm_feature(env, ARM_FEATURE_VFP3))
3157 return 1;
3158 gen_vfp_ulto(dp, 32 - rm);
3159 break;
3160 case 24: /* ftoui */
3161 gen_vfp_toui(dp);
3162 break;
3163 case 25: /* ftouiz */
3164 gen_vfp_touiz(dp);
3165 break;
3166 case 26: /* ftosi */
3167 gen_vfp_tosi(dp);
3168 break;
3169 case 27: /* ftosiz */
3170 gen_vfp_tosiz(dp);
3171 break;
3172 case 28: /* ftosh */
3173 if (!arm_feature(env, ARM_FEATURE_VFP3))
3174 return 1;
3175 gen_vfp_tosh(dp, 16 - rm);
3176 break;
3177 case 29: /* ftosl */
3178 if (!arm_feature(env, ARM_FEATURE_VFP3))
3179 return 1;
3180 gen_vfp_tosl(dp, 32 - rm);
3181 break;
3182 case 30: /* ftouh */
3183 if (!arm_feature(env, ARM_FEATURE_VFP3))
3184 return 1;
3185 gen_vfp_touh(dp, 16 - rm);
3186 break;
3187 case 31: /* ftoul */
3188 if (!arm_feature(env, ARM_FEATURE_VFP3))
3189 return 1;
3190 gen_vfp_toul(dp, 32 - rm);
3191 break;
3192 default: /* undefined */
3193 printf ("rn:%d\n", rn);
3194 return 1;
3196 break;
3197 default: /* undefined */
3198 printf ("op:%d\n", op);
3199 return 1;
3202 /* Write back the result. */
3203 if (op == 15 && (rn >= 8 && rn <= 11))
3204 ; /* Comparison, do nothing. */
3205 else if (op == 15 && rn > 17)
3206 /* Integer result. */
3207 gen_mov_vreg_F0(0, rd);
3208 else if (op == 15 && rn == 15)
3209 /* conversion */
3210 gen_mov_vreg_F0(!dp, rd);
3211 else
3212 gen_mov_vreg_F0(dp, rd);
3214 /* break out of the loop if we have finished */
3215 if (veclen == 0)
3216 break;
3218 if (op == 15 && delta_m == 0) {
3219 /* single source one-many */
3220 while (veclen--) {
3221 rd = ((rd + delta_d) & (bank_mask - 1))
3222 | (rd & bank_mask);
3223 gen_mov_vreg_F0(dp, rd);
3225 break;
3227 /* Setup the next operands. */
3228 veclen--;
3229 rd = ((rd + delta_d) & (bank_mask - 1))
3230 | (rd & bank_mask);
3232 if (op == 15) {
3233 /* One source operand. */
3234 rm = ((rm + delta_m) & (bank_mask - 1))
3235 | (rm & bank_mask);
3236 gen_mov_F0_vreg(dp, rm);
3237 } else {
3238 /* Two source operands. */
3239 rn = ((rn + delta_d) & (bank_mask - 1))
3240 | (rn & bank_mask);
3241 gen_mov_F0_vreg(dp, rn);
3242 if (delta_m) {
3243 rm = ((rm + delta_m) & (bank_mask - 1))
3244 | (rm & bank_mask);
3245 gen_mov_F1_vreg(dp, rm);
3250 break;
3251 case 0xc:
3252 case 0xd:
3253 if (dp && (insn & 0x03e00000) == 0x00400000) {
3254 /* two-register transfer */
3255 rn = (insn >> 16) & 0xf;
3256 rd = (insn >> 12) & 0xf;
3257 if (dp) {
3258 VFP_DREG_M(rm, insn);
3259 } else {
3260 rm = VFP_SREG_M(insn);
3263 if (insn & ARM_CP_RW_BIT) {
3264 /* vfp->arm */
3265 if (dp) {
3266 gen_mov_F0_vreg(0, rm * 2);
3267 tmp = gen_vfp_mrs();
3268 store_reg(s, rd, tmp);
3269 gen_mov_F0_vreg(0, rm * 2 + 1);
3270 tmp = gen_vfp_mrs();
3271 store_reg(s, rn, tmp);
3272 } else {
3273 gen_mov_F0_vreg(0, rm);
3274 tmp = gen_vfp_mrs();
3275 store_reg(s, rn, tmp);
3276 gen_mov_F0_vreg(0, rm + 1);
3277 tmp = gen_vfp_mrs();
3278 store_reg(s, rd, tmp);
3280 } else {
3281 /* arm->vfp */
3282 if (dp) {
3283 tmp = load_reg(s, rd);
3284 gen_vfp_msr(tmp);
3285 gen_mov_vreg_F0(0, rm * 2);
3286 tmp = load_reg(s, rn);
3287 gen_vfp_msr(tmp);
3288 gen_mov_vreg_F0(0, rm * 2 + 1);
3289 } else {
3290 tmp = load_reg(s, rn);
3291 gen_vfp_msr(tmp);
3292 gen_mov_vreg_F0(0, rm);
3293 tmp = load_reg(s, rd);
3294 gen_vfp_msr(tmp);
3295 gen_mov_vreg_F0(0, rm + 1);
3298 } else {
3299 /* Load/store */
3300 rn = (insn >> 16) & 0xf;
3301 if (dp)
3302 VFP_DREG_D(rd, insn);
3303 else
3304 rd = VFP_SREG_D(insn);
3305 if (s->thumb && rn == 15) {
3306 gen_op_movl_T1_im(s->pc & ~2);
3307 } else {
3308 gen_movl_T1_reg(s, rn);
3310 if ((insn & 0x01200000) == 0x01000000) {
3311 /* Single load/store */
3312 offset = (insn & 0xff) << 2;
3313 if ((insn & (1 << 23)) == 0)
3314 offset = -offset;
3315 gen_op_addl_T1_im(offset);
3316 if (insn & (1 << 20)) {
3317 gen_vfp_ld(s, dp);
3318 gen_mov_vreg_F0(dp, rd);
3319 } else {
3320 gen_mov_F0_vreg(dp, rd);
3321 gen_vfp_st(s, dp);
3323 } else {
3324 /* load/store multiple */
3325 if (dp)
3326 n = (insn >> 1) & 0x7f;
3327 else
3328 n = insn & 0xff;
3330 if (insn & (1 << 24)) /* pre-decrement */
3331 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3333 if (dp)
3334 offset = 8;
3335 else
3336 offset = 4;
3337 for (i = 0; i < n; i++) {
3338 if (insn & ARM_CP_RW_BIT) {
3339 /* load */
3340 gen_vfp_ld(s, dp);
3341 gen_mov_vreg_F0(dp, rd + i);
3342 } else {
3343 /* store */
3344 gen_mov_F0_vreg(dp, rd + i);
3345 gen_vfp_st(s, dp);
3347 gen_op_addl_T1_im(offset);
3349 if (insn & (1 << 21)) {
3350 /* writeback */
3351 if (insn & (1 << 24))
3352 offset = -offset * n;
3353 else if (dp && (insn & 1))
3354 offset = 4;
3355 else
3356 offset = 0;
3358 if (offset != 0)
3359 gen_op_addl_T1_im(offset);
3360 gen_movl_reg_T1(s, rn);
3364 break;
3365 default:
3366 /* Should never happen. */
3367 return 1;
3369 return 0;
3372 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3374 TranslationBlock *tb;
3376 tb = s->tb;
3377 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3378 tcg_gen_goto_tb(n);
3379 gen_set_pc_im(dest);
3380 tcg_gen_exit_tb((long)tb + n);
3381 } else {
3382 gen_set_pc_im(dest);
3383 tcg_gen_exit_tb(0);
3387 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3389 if (unlikely(s->singlestep_enabled)) {
3390 /* An indirect jump so that we still trigger the debug exception. */
3391 if (s->thumb)
3392 dest |= 1;
3393 gen_bx_im(s, dest);
3394 } else {
3395 gen_goto_tb(s, 0, dest);
3396 s->is_jmp = DISAS_TB_JUMP;
3400 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3402 if (x)
3403 tcg_gen_sari_i32(t0, t0, 16);
3404 else
3405 gen_sxth(t0);
3406 if (y)
3407 tcg_gen_sari_i32(t1, t1, 16);
3408 else
3409 gen_sxth(t1);
3410 tcg_gen_mul_i32(t0, t0, t1);
3413 /* Return the mask of PSR bits set by a MSR instruction. */
3414 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3415 uint32_t mask;
3417 mask = 0;
3418 if (flags & (1 << 0))
3419 mask |= 0xff;
3420 if (flags & (1 << 1))
3421 mask |= 0xff00;
3422 if (flags & (1 << 2))
3423 mask |= 0xff0000;
3424 if (flags & (1 << 3))
3425 mask |= 0xff000000;
3427 /* Mask out undefined bits. */
3428 mask &= ~CPSR_RESERVED;
3429 if (!arm_feature(env, ARM_FEATURE_V6))
3430 mask &= ~(CPSR_E | CPSR_GE);
3431 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3432 mask &= ~CPSR_IT;
3433 /* Mask out execution state bits. */
3434 if (!spsr)
3435 mask &= ~CPSR_EXEC;
3436 /* Mask out privileged bits. */
3437 if (IS_USER(s))
3438 mask &= CPSR_USER;
3439 return mask;
3442 /* Returns nonzero if access to the PSR is not permitted. */
3443 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3445 TCGv tmp;
3446 if (spsr) {
3447 /* ??? This is also undefined in system mode. */
3448 if (IS_USER(s))
3449 return 1;
3451 tmp = load_cpu_field(spsr);
3452 tcg_gen_andi_i32(tmp, tmp, ~mask);
3453 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3454 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3455 store_cpu_field(tmp, spsr);
3456 } else {
3457 gen_set_cpsr(cpu_T[0], mask);
3459 gen_lookup_tb(s);
3460 return 0;
3463 /* Generate an old-style exception return. Marks pc as dead. */
3464 static void gen_exception_return(DisasContext *s, TCGv pc)
3466 TCGv tmp;
3467 store_reg(s, 15, pc);
3468 tmp = load_cpu_field(spsr);
3469 gen_set_cpsr(tmp, 0xffffffff);
3470 dead_tmp(tmp);
3471 s->is_jmp = DISAS_UPDATE;
3474 /* Generate a v6 exception return. Marks both values as dead. */
3475 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3477 gen_set_cpsr(cpsr, 0xffffffff);
3478 dead_tmp(cpsr);
3479 store_reg(s, 15, pc);
3480 s->is_jmp = DISAS_UPDATE;
3483 static inline void
3484 gen_set_condexec (DisasContext *s)
3486 if (s->condexec_mask) {
3487 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3488 TCGv tmp = new_tmp();
3489 tcg_gen_movi_i32(tmp, val);
3490 store_cpu_field(tmp, condexec_bits);
3494 static void gen_nop_hint(DisasContext *s, int val)
3496 switch (val) {
3497 case 3: /* wfi */
3498 gen_set_pc_im(s->pc);
3499 s->is_jmp = DISAS_WFI;
3500 break;
3501 case 2: /* wfe */
3502 case 4: /* sev */
3503 /* TODO: Implement SEV and WFE. May help SMP performance. */
3504 default: /* nop */
3505 break;
3509 /* These macros help make the code more readable when migrating from the
3510 old dyngen helpers. They should probably be removed when
3511 T0/T1 are removed. */
3512 #define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3513 #define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
3515 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3517 static inline int gen_neon_add(int size)
3519 switch (size) {
3520 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3521 case 1: gen_helper_neon_add_u16(CPU_T001); break;
3522 case 2: gen_op_addl_T0_T1(); break;
3523 default: return 1;
3525 return 0;
3528 static inline void gen_neon_rsb(int size)
3530 switch (size) {
3531 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3532 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3533 case 2: gen_op_rsbl_T0_T1(); break;
3534 default: return;
3538 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3539 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3540 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3541 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3542 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3544 /* FIXME: This is wrong. They set the wrong overflow bit. */
3545 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3546 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3547 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3548 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3550 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3551 switch ((size << 1) | u) { \
3552 case 0: \
3553 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3554 break; \
3555 case 1: \
3556 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3557 break; \
3558 case 2: \
3559 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3560 break; \
3561 case 3: \
3562 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3563 break; \
3564 case 4: \
3565 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3566 break; \
3567 case 5: \
3568 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3569 break; \
3570 default: return 1; \
3571 }} while (0)
3573 #define GEN_NEON_INTEGER_OP(name) do { \
3574 switch ((size << 1) | u) { \
3575 case 0: \
3576 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3577 break; \
3578 case 1: \
3579 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3580 break; \
3581 case 2: \
3582 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3583 break; \
3584 case 3: \
3585 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3586 break; \
3587 case 4: \
3588 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3589 break; \
3590 case 5: \
3591 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3592 break; \
3593 default: return 1; \
3594 }} while (0)
3596 static inline void
3597 gen_neon_movl_scratch_T0(int scratch)
3599 uint32_t offset;
3601 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3602 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
3605 static inline void
3606 gen_neon_movl_scratch_T1(int scratch)
3608 uint32_t offset;
3610 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3611 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
3614 static inline void
3615 gen_neon_movl_T0_scratch(int scratch)
3617 uint32_t offset;
3619 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3620 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
3623 static inline void
3624 gen_neon_movl_T1_scratch(int scratch)
3626 uint32_t offset;
3628 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3629 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
3632 static inline void gen_neon_get_scalar(int size, int reg)
3634 if (size == 1) {
3635 NEON_GET_REG(T0, reg >> 1, reg & 1);
3636 } else {
3637 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3638 if (reg & 1)
3639 gen_neon_dup_low16(cpu_T[0]);
3640 else
3641 gen_neon_dup_high16(cpu_T[0]);
3645 static void gen_neon_unzip(int reg, int q, int tmp, int size)
3647 int n;
3649 for (n = 0; n < q + 1; n += 2) {
3650 NEON_GET_REG(T0, reg, n);
3651 NEON_GET_REG(T0, reg, n + n);
3652 switch (size) {
3653 case 0: gen_helper_neon_unzip_u8(); break;
3654 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
3655 case 2: /* no-op */; break;
3656 default: abort();
3658 gen_neon_movl_scratch_T0(tmp + n);
3659 gen_neon_movl_scratch_T1(tmp + n + 1);
3663 static struct {
3664 int nregs;
3665 int interleave;
3666 int spacing;
3667 } neon_ls_element_type[11] = {
3668 {4, 4, 1},
3669 {4, 4, 2},
3670 {4, 1, 1},
3671 {4, 2, 1},
3672 {3, 3, 1},
3673 {3, 3, 2},
3674 {3, 1, 1},
3675 {1, 1, 1},
3676 {2, 2, 1},
3677 {2, 2, 2},
3678 {2, 1, 1}
3681 /* Translate a NEON load/store element instruction. Return nonzero if the
3682 instruction is invalid. */
3683 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3685 int rd, rn, rm;
3686 int op;
3687 int nregs;
3688 int interleave;
3689 int stride;
3690 int size;
3691 int reg;
3692 int pass;
3693 int load;
3694 int shift;
3695 int n;
3696 TCGv tmp;
3697 TCGv tmp2;
3699 if (!vfp_enabled(env))
3700 return 1;
3701 VFP_DREG_D(rd, insn);
3702 rn = (insn >> 16) & 0xf;
3703 rm = insn & 0xf;
3704 load = (insn & (1 << 21)) != 0;
3705 if ((insn & (1 << 23)) == 0) {
3706 /* Load store all elements. */
3707 op = (insn >> 8) & 0xf;
3708 size = (insn >> 6) & 3;
3709 if (op > 10 || size == 3)
3710 return 1;
3711 nregs = neon_ls_element_type[op].nregs;
3712 interleave = neon_ls_element_type[op].interleave;
3713 gen_movl_T1_reg(s, rn);
3714 stride = (1 << size) * interleave;
3715 for (reg = 0; reg < nregs; reg++) {
3716 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3717 gen_movl_T1_reg(s, rn);
3718 gen_op_addl_T1_im((1 << size) * reg);
3719 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3720 gen_movl_T1_reg(s, rn);
3721 gen_op_addl_T1_im(1 << size);
3723 for (pass = 0; pass < 2; pass++) {
3724 if (size == 2) {
3725 if (load) {
3726 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3727 neon_store_reg(rd, pass, tmp);
3728 } else {
3729 tmp = neon_load_reg(rd, pass);
3730 gen_st32(tmp, cpu_T[1], IS_USER(s));
3732 gen_op_addl_T1_im(stride);
3733 } else if (size == 1) {
3734 if (load) {
3735 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3736 gen_op_addl_T1_im(stride);
3737 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
3738 gen_op_addl_T1_im(stride);
3739 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3740 dead_tmp(tmp2);
3741 neon_store_reg(rd, pass, tmp);
3742 } else {
3743 tmp = neon_load_reg(rd, pass);
3744 tmp2 = new_tmp();
3745 tcg_gen_shri_i32(tmp2, tmp, 16);
3746 gen_st16(tmp, cpu_T[1], IS_USER(s));
3747 gen_op_addl_T1_im(stride);
3748 gen_st16(tmp2, cpu_T[1], IS_USER(s));
3749 gen_op_addl_T1_im(stride);
3751 } else /* size == 0 */ {
3752 if (load) {
3753 TCGV_UNUSED(tmp2);
3754 for (n = 0; n < 4; n++) {
3755 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3756 gen_op_addl_T1_im(stride);
3757 if (n == 0) {
3758 tmp2 = tmp;
3759 } else {
3760 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3761 dead_tmp(tmp);
3764 neon_store_reg(rd, pass, tmp2);
3765 } else {
3766 tmp2 = neon_load_reg(rd, pass);
3767 for (n = 0; n < 4; n++) {
3768 tmp = new_tmp();
3769 if (n == 0) {
3770 tcg_gen_mov_i32(tmp, tmp2);
3771 } else {
3772 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3774 gen_st8(tmp, cpu_T[1], IS_USER(s));
3775 gen_op_addl_T1_im(stride);
3777 dead_tmp(tmp2);
3781 rd += neon_ls_element_type[op].spacing;
3783 stride = nregs * 8;
3784 } else {
3785 size = (insn >> 10) & 3;
3786 if (size == 3) {
3787 /* Load single element to all lanes. */
3788 if (!load)
3789 return 1;
3790 size = (insn >> 6) & 3;
3791 nregs = ((insn >> 8) & 3) + 1;
3792 stride = (insn & (1 << 5)) ? 2 : 1;
3793 gen_movl_T1_reg(s, rn);
3794 for (reg = 0; reg < nregs; reg++) {
3795 switch (size) {
3796 case 0:
3797 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3798 gen_neon_dup_u8(tmp, 0);
3799 break;
3800 case 1:
3801 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3802 gen_neon_dup_low16(tmp);
3803 break;
3804 case 2:
3805 tmp = gen_ld32(cpu_T[0], IS_USER(s));
3806 break;
3807 case 3:
3808 return 1;
3809 default: /* Avoid compiler warnings. */
3810 abort();
3812 gen_op_addl_T1_im(1 << size);
3813 tmp2 = new_tmp();
3814 tcg_gen_mov_i32(tmp2, tmp);
3815 neon_store_reg(rd, 0, tmp2);
3816 neon_store_reg(rd, 1, tmp);
3817 rd += stride;
3819 stride = (1 << size) * nregs;
3820 } else {
3821 /* Single element. */
3822 pass = (insn >> 7) & 1;
3823 switch (size) {
3824 case 0:
3825 shift = ((insn >> 5) & 3) * 8;
3826 stride = 1;
3827 break;
3828 case 1:
3829 shift = ((insn >> 6) & 1) * 16;
3830 stride = (insn & (1 << 5)) ? 2 : 1;
3831 break;
3832 case 2:
3833 shift = 0;
3834 stride = (insn & (1 << 6)) ? 2 : 1;
3835 break;
3836 default:
3837 abort();
3839 nregs = ((insn >> 8) & 3) + 1;
3840 gen_movl_T1_reg(s, rn);
3841 for (reg = 0; reg < nregs; reg++) {
3842 if (load) {
3843 switch (size) {
3844 case 0:
3845 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3846 break;
3847 case 1:
3848 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3849 break;
3850 case 2:
3851 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3852 break;
3853 default: /* Avoid compiler warnings. */
3854 abort();
3856 if (size != 2) {
3857 tmp2 = neon_load_reg(rd, pass);
3858 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3859 dead_tmp(tmp2);
3861 neon_store_reg(rd, pass, tmp);
3862 } else { /* Store */
3863 tmp = neon_load_reg(rd, pass);
3864 if (shift)
3865 tcg_gen_shri_i32(tmp, tmp, shift);
3866 switch (size) {
3867 case 0:
3868 gen_st8(tmp, cpu_T[1], IS_USER(s));
3869 break;
3870 case 1:
3871 gen_st16(tmp, cpu_T[1], IS_USER(s));
3872 break;
3873 case 2:
3874 gen_st32(tmp, cpu_T[1], IS_USER(s));
3875 break;
3878 rd += stride;
3879 gen_op_addl_T1_im(1 << size);
3881 stride = nregs * (1 << size);
3884 if (rm != 15) {
3885 TCGv base;
3887 base = load_reg(s, rn);
3888 if (rm == 13) {
3889 tcg_gen_addi_i32(base, base, stride);
3890 } else {
3891 TCGv index;
3892 index = load_reg(s, rm);
3893 tcg_gen_add_i32(base, base, index);
3894 dead_tmp(index);
3896 store_reg(s, rn, base);
3898 return 0;
3901 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3902 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3904 tcg_gen_and_i32(t, t, c);
3905 tcg_gen_bic_i32(f, f, c);
3906 tcg_gen_or_i32(dest, t, f);
3909 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
3911 switch (size) {
3912 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3913 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3914 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3915 default: abort();
3919 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
3921 switch (size) {
3922 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3923 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3924 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3925 default: abort();
3929 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
3931 switch (size) {
3932 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3933 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3934 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3935 default: abort();
3939 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3940 int q, int u)
3942 if (q) {
3943 if (u) {
3944 switch (size) {
3945 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3946 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3947 default: abort();
3949 } else {
3950 switch (size) {
3951 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3952 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3953 default: abort();
3956 } else {
3957 if (u) {
3958 switch (size) {
3959 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3960 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3961 default: abort();
3963 } else {
3964 switch (size) {
3965 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3966 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3967 default: abort();
3973 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
3975 if (u) {
3976 switch (size) {
3977 case 0: gen_helper_neon_widen_u8(dest, src); break;
3978 case 1: gen_helper_neon_widen_u16(dest, src); break;
3979 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3980 default: abort();
3982 } else {
3983 switch (size) {
3984 case 0: gen_helper_neon_widen_s8(dest, src); break;
3985 case 1: gen_helper_neon_widen_s16(dest, src); break;
3986 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3987 default: abort();
3990 dead_tmp(src);
3993 static inline void gen_neon_addl(int size)
3995 switch (size) {
3996 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3997 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3998 case 2: tcg_gen_add_i64(CPU_V001); break;
3999 default: abort();
4003 static inline void gen_neon_subl(int size)
4005 switch (size) {
4006 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4007 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4008 case 2: tcg_gen_sub_i64(CPU_V001); break;
4009 default: abort();
4013 static inline void gen_neon_negl(TCGv_i64 var, int size)
4015 switch (size) {
4016 case 0: gen_helper_neon_negl_u16(var, var); break;
4017 case 1: gen_helper_neon_negl_u32(var, var); break;
4018 case 2: gen_helper_neon_negl_u64(var, var); break;
4019 default: abort();
4023 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4025 switch (size) {
4026 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4027 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4028 default: abort();
4032 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4034 TCGv_i64 tmp;
4036 switch ((size << 1) | u) {
4037 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4038 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4039 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4040 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4041 case 4:
4042 tmp = gen_muls_i64_i32(a, b);
4043 tcg_gen_mov_i64(dest, tmp);
4044 break;
4045 case 5:
4046 tmp = gen_mulu_i64_i32(a, b);
4047 tcg_gen_mov_i64(dest, tmp);
4048 break;
4049 default: abort();
4051 if (size < 2) {
4052 dead_tmp(b);
4053 dead_tmp(a);
4057 /* Translate a NEON data processing instruction. Return nonzero if the
4058 instruction is invalid.
4059 We process data in a mixture of 32-bit and 64-bit chunks.
4060 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4062 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4064 int op;
4065 int q;
4066 int rd, rn, rm;
4067 int size;
4068 int shift;
4069 int pass;
4070 int count;
4071 int pairwise;
4072 int u;
4073 int n;
4074 uint32_t imm;
4075 TCGv tmp;
4076 TCGv tmp2;
4077 TCGv tmp3;
4078 TCGv_i64 tmp64;
4080 if (!vfp_enabled(env))
4081 return 1;
4082 q = (insn & (1 << 6)) != 0;
4083 u = (insn >> 24) & 1;
4084 VFP_DREG_D(rd, insn);
4085 VFP_DREG_N(rn, insn);
4086 VFP_DREG_M(rm, insn);
4087 size = (insn >> 20) & 3;
4088 if ((insn & (1 << 23)) == 0) {
4089 /* Three register same length. */
4090 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4091 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4092 || op == 10 || op == 11 || op == 16)) {
4093 /* 64-bit element instructions. */
4094 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4095 neon_load_reg64(cpu_V0, rn + pass);
4096 neon_load_reg64(cpu_V1, rm + pass);
4097 switch (op) {
4098 case 1: /* VQADD */
4099 if (u) {
4100 gen_helper_neon_add_saturate_u64(CPU_V001);
4101 } else {
4102 gen_helper_neon_add_saturate_s64(CPU_V001);
4104 break;
4105 case 5: /* VQSUB */
4106 if (u) {
4107 gen_helper_neon_sub_saturate_u64(CPU_V001);
4108 } else {
4109 gen_helper_neon_sub_saturate_s64(CPU_V001);
4111 break;
4112 case 8: /* VSHL */
4113 if (u) {
4114 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4115 } else {
4116 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4118 break;
4119 case 9: /* VQSHL */
4120 if (u) {
4121 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4122 cpu_V0, cpu_V0);
4123 } else {
4124 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4125 cpu_V1, cpu_V0);
4127 break;
4128 case 10: /* VRSHL */
4129 if (u) {
4130 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4131 } else {
4132 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4134 break;
4135 case 11: /* VQRSHL */
4136 if (u) {
4137 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4138 cpu_V1, cpu_V0);
4139 } else {
4140 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4141 cpu_V1, cpu_V0);
4143 break;
4144 case 16:
4145 if (u) {
4146 tcg_gen_sub_i64(CPU_V001);
4147 } else {
4148 tcg_gen_add_i64(CPU_V001);
4150 break;
4151 default:
4152 abort();
4154 neon_store_reg64(cpu_V0, rd + pass);
4156 return 0;
4158 switch (op) {
4159 case 8: /* VSHL */
4160 case 9: /* VQSHL */
4161 case 10: /* VRSHL */
4162 case 11: /* VQRSHL */
4164 int rtmp;
4165 /* Shift instruction operands are reversed. */
4166 rtmp = rn;
4167 rn = rm;
4168 rm = rtmp;
4169 pairwise = 0;
4171 break;
4172 case 20: /* VPMAX */
4173 case 21: /* VPMIN */
4174 case 23: /* VPADD */
4175 pairwise = 1;
4176 break;
4177 case 26: /* VPADD (float) */
4178 pairwise = (u && size < 2);
4179 break;
4180 case 30: /* VPMIN/VPMAX (float) */
4181 pairwise = u;
4182 break;
4183 default:
4184 pairwise = 0;
4185 break;
4187 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4189 if (pairwise) {
4190 /* Pairwise. */
4191 if (q)
4192 n = (pass & 1) * 2;
4193 else
4194 n = 0;
4195 if (pass < q + 1) {
4196 NEON_GET_REG(T0, rn, n);
4197 NEON_GET_REG(T1, rn, n + 1);
4198 } else {
4199 NEON_GET_REG(T0, rm, n);
4200 NEON_GET_REG(T1, rm, n + 1);
4202 } else {
4203 /* Elementwise. */
4204 NEON_GET_REG(T0, rn, pass);
4205 NEON_GET_REG(T1, rm, pass);
4207 switch (op) {
4208 case 0: /* VHADD */
4209 GEN_NEON_INTEGER_OP(hadd);
4210 break;
4211 case 1: /* VQADD */
4212 GEN_NEON_INTEGER_OP_ENV(qadd);
4213 break;
4214 case 2: /* VRHADD */
4215 GEN_NEON_INTEGER_OP(rhadd);
4216 break;
4217 case 3: /* Logic ops. */
4218 switch ((u << 2) | size) {
4219 case 0: /* VAND */
4220 gen_op_andl_T0_T1();
4221 break;
4222 case 1: /* BIC */
4223 gen_op_bicl_T0_T1();
4224 break;
4225 case 2: /* VORR */
4226 gen_op_orl_T0_T1();
4227 break;
4228 case 3: /* VORN */
4229 gen_op_notl_T1();
4230 gen_op_orl_T0_T1();
4231 break;
4232 case 4: /* VEOR */
4233 gen_op_xorl_T0_T1();
4234 break;
4235 case 5: /* VBSL */
4236 tmp = neon_load_reg(rd, pass);
4237 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4238 dead_tmp(tmp);
4239 break;
4240 case 6: /* VBIT */
4241 tmp = neon_load_reg(rd, pass);
4242 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4243 dead_tmp(tmp);
4244 break;
4245 case 7: /* VBIF */
4246 tmp = neon_load_reg(rd, pass);
4247 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4248 dead_tmp(tmp);
4249 break;
4251 break;
4252 case 4: /* VHSUB */
4253 GEN_NEON_INTEGER_OP(hsub);
4254 break;
4255 case 5: /* VQSUB */
4256 GEN_NEON_INTEGER_OP_ENV(qsub);
4257 break;
4258 case 6: /* VCGT */
4259 GEN_NEON_INTEGER_OP(cgt);
4260 break;
4261 case 7: /* VCGE */
4262 GEN_NEON_INTEGER_OP(cge);
4263 break;
4264 case 8: /* VSHL */
4265 GEN_NEON_INTEGER_OP(shl);
4266 break;
4267 case 9: /* VQSHL */
4268 GEN_NEON_INTEGER_OP_ENV(qshl);
4269 break;
4270 case 10: /* VRSHL */
4271 GEN_NEON_INTEGER_OP(rshl);
4272 break;
4273 case 11: /* VQRSHL */
4274 GEN_NEON_INTEGER_OP_ENV(qrshl);
4275 break;
4276 case 12: /* VMAX */
4277 GEN_NEON_INTEGER_OP(max);
4278 break;
4279 case 13: /* VMIN */
4280 GEN_NEON_INTEGER_OP(min);
4281 break;
4282 case 14: /* VABD */
4283 GEN_NEON_INTEGER_OP(abd);
4284 break;
4285 case 15: /* VABA */
4286 GEN_NEON_INTEGER_OP(abd);
4287 NEON_GET_REG(T1, rd, pass);
4288 gen_neon_add(size);
4289 break;
4290 case 16:
4291 if (!u) { /* VADD */
4292 if (gen_neon_add(size))
4293 return 1;
4294 } else { /* VSUB */
4295 switch (size) {
4296 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4297 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
4298 case 2: gen_op_subl_T0_T1(); break;
4299 default: return 1;
4302 break;
4303 case 17:
4304 if (!u) { /* VTST */
4305 switch (size) {
4306 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4307 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4308 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
4309 default: return 1;
4311 } else { /* VCEQ */
4312 switch (size) {
4313 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4314 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4315 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
4316 default: return 1;
4319 break;
4320 case 18: /* Multiply. */
4321 switch (size) {
4322 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4323 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4324 case 2: gen_op_mul_T0_T1(); break;
4325 default: return 1;
4327 NEON_GET_REG(T1, rd, pass);
4328 if (u) { /* VMLS */
4329 gen_neon_rsb(size);
4330 } else { /* VMLA */
4331 gen_neon_add(size);
4333 break;
4334 case 19: /* VMUL */
4335 if (u) { /* polynomial */
4336 gen_helper_neon_mul_p8(CPU_T001);
4337 } else { /* Integer */
4338 switch (size) {
4339 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4340 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4341 case 2: gen_op_mul_T0_T1(); break;
4342 default: return 1;
4345 break;
4346 case 20: /* VPMAX */
4347 GEN_NEON_INTEGER_OP(pmax);
4348 break;
4349 case 21: /* VPMIN */
4350 GEN_NEON_INTEGER_OP(pmin);
4351 break;
4352 case 22: /* Hultiply high. */
4353 if (!u) { /* VQDMULH */
4354 switch (size) {
4355 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4356 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
4357 default: return 1;
4359 } else { /* VQRDHMUL */
4360 switch (size) {
4361 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4362 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
4363 default: return 1;
4366 break;
4367 case 23: /* VPADD */
4368 if (u)
4369 return 1;
4370 switch (size) {
4371 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4372 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
4373 case 2: gen_op_addl_T0_T1(); break;
4374 default: return 1;
4376 break;
4377 case 26: /* Floating point arithnetic. */
4378 switch ((u << 2) | size) {
4379 case 0: /* VADD */
4380 gen_helper_neon_add_f32(CPU_T001);
4381 break;
4382 case 2: /* VSUB */
4383 gen_helper_neon_sub_f32(CPU_T001);
4384 break;
4385 case 4: /* VPADD */
4386 gen_helper_neon_add_f32(CPU_T001);
4387 break;
4388 case 6: /* VABD */
4389 gen_helper_neon_abd_f32(CPU_T001);
4390 break;
4391 default:
4392 return 1;
4394 break;
4395 case 27: /* Float multiply. */
4396 gen_helper_neon_mul_f32(CPU_T001);
4397 if (!u) {
4398 NEON_GET_REG(T1, rd, pass);
4399 if (size == 0) {
4400 gen_helper_neon_add_f32(CPU_T001);
4401 } else {
4402 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
4405 break;
4406 case 28: /* Float compare. */
4407 if (!u) {
4408 gen_helper_neon_ceq_f32(CPU_T001);
4409 } else {
4410 if (size == 0)
4411 gen_helper_neon_cge_f32(CPU_T001);
4412 else
4413 gen_helper_neon_cgt_f32(CPU_T001);
4415 break;
4416 case 29: /* Float compare absolute. */
4417 if (!u)
4418 return 1;
4419 if (size == 0)
4420 gen_helper_neon_acge_f32(CPU_T001);
4421 else
4422 gen_helper_neon_acgt_f32(CPU_T001);
4423 break;
4424 case 30: /* Float min/max. */
4425 if (size == 0)
4426 gen_helper_neon_max_f32(CPU_T001);
4427 else
4428 gen_helper_neon_min_f32(CPU_T001);
4429 break;
4430 case 31:
4431 if (size == 0)
4432 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4433 else
4434 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4435 break;
4436 default:
4437 abort();
4439 /* Save the result. For elementwise operations we can put it
4440 straight into the destination register. For pairwise operations
4441 we have to be careful to avoid clobbering the source operands. */
4442 if (pairwise && rd == rm) {
4443 gen_neon_movl_scratch_T0(pass);
4444 } else {
4445 NEON_SET_REG(T0, rd, pass);
4448 } /* for pass */
4449 if (pairwise && rd == rm) {
4450 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4451 gen_neon_movl_T0_scratch(pass);
4452 NEON_SET_REG(T0, rd, pass);
4455 /* End of 3 register same size operations. */
4456 } else if (insn & (1 << 4)) {
4457 if ((insn & 0x00380080) != 0) {
4458 /* Two registers and shift. */
4459 op = (insn >> 8) & 0xf;
4460 if (insn & (1 << 7)) {
4461 /* 64-bit shift. */
4462 size = 3;
4463 } else {
4464 size = 2;
4465 while ((insn & (1 << (size + 19))) == 0)
4466 size--;
4468 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4469 /* To avoid excessive dumplication of ops we implement shift
4470 by immediate using the variable shift operations. */
4471 if (op < 8) {
4472 /* Shift by immediate:
4473 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4474 /* Right shifts are encoded as N - shift, where N is the
4475 element size in bits. */
4476 if (op <= 4)
4477 shift = shift - (1 << (size + 3));
4478 if (size == 3) {
4479 count = q + 1;
4480 } else {
4481 count = q ? 4: 2;
4483 switch (size) {
4484 case 0:
4485 imm = (uint8_t) shift;
4486 imm |= imm << 8;
4487 imm |= imm << 16;
4488 break;
4489 case 1:
4490 imm = (uint16_t) shift;
4491 imm |= imm << 16;
4492 break;
4493 case 2:
4494 case 3:
4495 imm = shift;
4496 break;
4497 default:
4498 abort();
4501 for (pass = 0; pass < count; pass++) {
4502 if (size == 3) {
4503 neon_load_reg64(cpu_V0, rm + pass);
4504 tcg_gen_movi_i64(cpu_V1, imm);
4505 switch (op) {
4506 case 0: /* VSHR */
4507 case 1: /* VSRA */
4508 if (u)
4509 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4510 else
4511 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4512 break;
4513 case 2: /* VRSHR */
4514 case 3: /* VRSRA */
4515 if (u)
4516 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4517 else
4518 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4519 break;
4520 case 4: /* VSRI */
4521 if (!u)
4522 return 1;
4523 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4524 break;
4525 case 5: /* VSHL, VSLI */
4526 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4527 break;
4528 case 6: /* VQSHL */
4529 if (u)
4530 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4531 else
4532 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4533 break;
4534 case 7: /* VQSHLU */
4535 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4536 break;
4538 if (op == 1 || op == 3) {
4539 /* Accumulate. */
4540 neon_load_reg64(cpu_V0, rd + pass);
4541 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4542 } else if (op == 4 || (op == 5 && u)) {
4543 /* Insert */
4544 cpu_abort(env, "VS[LR]I.64 not implemented");
4546 neon_store_reg64(cpu_V0, rd + pass);
4547 } else { /* size < 3 */
4548 /* Operands in T0 and T1. */
4549 gen_op_movl_T1_im(imm);
4550 NEON_GET_REG(T0, rm, pass);
4551 switch (op) {
4552 case 0: /* VSHR */
4553 case 1: /* VSRA */
4554 GEN_NEON_INTEGER_OP(shl);
4555 break;
4556 case 2: /* VRSHR */
4557 case 3: /* VRSRA */
4558 GEN_NEON_INTEGER_OP(rshl);
4559 break;
4560 case 4: /* VSRI */
4561 if (!u)
4562 return 1;
4563 GEN_NEON_INTEGER_OP(shl);
4564 break;
4565 case 5: /* VSHL, VSLI */
4566 switch (size) {
4567 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4568 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4569 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4570 default: return 1;
4572 break;
4573 case 6: /* VQSHL */
4574 GEN_NEON_INTEGER_OP_ENV(qshl);
4575 break;
4576 case 7: /* VQSHLU */
4577 switch (size) {
4578 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4579 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4580 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4581 default: return 1;
4583 break;
4586 if (op == 1 || op == 3) {
4587 /* Accumulate. */
4588 NEON_GET_REG(T1, rd, pass);
4589 gen_neon_add(size);
4590 } else if (op == 4 || (op == 5 && u)) {
4591 /* Insert */
4592 switch (size) {
4593 case 0:
4594 if (op == 4)
4595 imm = 0xff >> -shift;
4596 else
4597 imm = (uint8_t)(0xff << shift);
4598 imm |= imm << 8;
4599 imm |= imm << 16;
4600 break;
4601 case 1:
4602 if (op == 4)
4603 imm = 0xffff >> -shift;
4604 else
4605 imm = (uint16_t)(0xffff << shift);
4606 imm |= imm << 16;
4607 break;
4608 case 2:
4609 if (op == 4)
4610 imm = 0xffffffffu >> -shift;
4611 else
4612 imm = 0xffffffffu << shift;
4613 break;
4614 default:
4615 abort();
4617 tmp = neon_load_reg(rd, pass);
4618 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4619 tcg_gen_andi_i32(tmp, tmp, ~imm);
4620 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4622 NEON_SET_REG(T0, rd, pass);
4624 } /* for pass */
4625 } else if (op < 10) {
4626 /* Shift by immediate and narrow:
4627 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4628 shift = shift - (1 << (size + 3));
4629 size++;
4630 switch (size) {
4631 case 1:
4632 imm = (uint16_t)shift;
4633 imm |= imm << 16;
4634 tmp2 = tcg_const_i32(imm);
4635 TCGV_UNUSED_I64(tmp64);
4636 break;
4637 case 2:
4638 imm = (uint32_t)shift;
4639 tmp2 = tcg_const_i32(imm);
4640 TCGV_UNUSED_I64(tmp64);
4641 break;
4642 case 3:
4643 tmp64 = tcg_const_i64(shift);
4644 TCGV_UNUSED(tmp2);
4645 break;
4646 default:
4647 abort();
4650 for (pass = 0; pass < 2; pass++) {
4651 if (size == 3) {
4652 neon_load_reg64(cpu_V0, rm + pass);
4653 if (q) {
4654 if (u)
4655 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4656 else
4657 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4658 } else {
4659 if (u)
4660 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4661 else
4662 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4664 } else {
4665 tmp = neon_load_reg(rm + pass, 0);
4666 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4667 tmp3 = neon_load_reg(rm + pass, 1);
4668 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4669 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4670 dead_tmp(tmp);
4671 dead_tmp(tmp3);
4673 tmp = new_tmp();
4674 if (op == 8 && !u) {
4675 gen_neon_narrow(size - 1, tmp, cpu_V0);
4676 } else {
4677 if (op == 8)
4678 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4679 else
4680 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4682 if (pass == 0) {
4683 tmp2 = tmp;
4684 } else {
4685 neon_store_reg(rd, 0, tmp2);
4686 neon_store_reg(rd, 1, tmp);
4688 } /* for pass */
4689 } else if (op == 10) {
4690 /* VSHLL */
4691 if (q || size == 3)
4692 return 1;
4693 tmp = neon_load_reg(rm, 0);
4694 tmp2 = neon_load_reg(rm, 1);
4695 for (pass = 0; pass < 2; pass++) {
4696 if (pass == 1)
4697 tmp = tmp2;
4699 gen_neon_widen(cpu_V0, tmp, size, u);
4701 if (shift != 0) {
4702 /* The shift is less than the width of the source
4703 type, so we can just shift the whole register. */
4704 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4705 if (size < 2 || !u) {
4706 uint64_t imm64;
4707 if (size == 0) {
4708 imm = (0xffu >> (8 - shift));
4709 imm |= imm << 16;
4710 } else {
4711 imm = 0xffff >> (16 - shift);
4713 imm64 = imm | (((uint64_t)imm) << 32);
4714 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
4717 neon_store_reg64(cpu_V0, rd + pass);
4719 } else if (op == 15 || op == 16) {
4720 /* VCVT fixed-point. */
4721 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4722 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4723 if (op & 1) {
4724 if (u)
4725 gen_vfp_ulto(0, shift);
4726 else
4727 gen_vfp_slto(0, shift);
4728 } else {
4729 if (u)
4730 gen_vfp_toul(0, shift);
4731 else
4732 gen_vfp_tosl(0, shift);
4734 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4736 } else {
4737 return 1;
4739 } else { /* (insn & 0x00380080) == 0 */
4740 int invert;
4742 op = (insn >> 8) & 0xf;
4743 /* One register and immediate. */
4744 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4745 invert = (insn & (1 << 5)) != 0;
4746 switch (op) {
4747 case 0: case 1:
4748 /* no-op */
4749 break;
4750 case 2: case 3:
4751 imm <<= 8;
4752 break;
4753 case 4: case 5:
4754 imm <<= 16;
4755 break;
4756 case 6: case 7:
4757 imm <<= 24;
4758 break;
4759 case 8: case 9:
4760 imm |= imm << 16;
4761 break;
4762 case 10: case 11:
4763 imm = (imm << 8) | (imm << 24);
4764 break;
4765 case 12:
4766 imm = (imm < 8) | 0xff;
4767 break;
4768 case 13:
4769 imm = (imm << 16) | 0xffff;
4770 break;
4771 case 14:
4772 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4773 if (invert)
4774 imm = ~imm;
4775 break;
4776 case 15:
4777 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4778 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4779 break;
4781 if (invert)
4782 imm = ~imm;
4784 if (op != 14 || !invert)
4785 gen_op_movl_T1_im(imm);
4787 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4788 if (op & 1 && op < 12) {
4789 tmp = neon_load_reg(rd, pass);
4790 if (invert) {
4791 /* The immediate value has already been inverted, so
4792 BIC becomes AND. */
4793 tcg_gen_andi_i32(tmp, tmp, imm);
4794 } else {
4795 tcg_gen_ori_i32(tmp, tmp, imm);
4797 } else {
4798 /* VMOV, VMVN. */
4799 tmp = new_tmp();
4800 if (op == 14 && invert) {
4801 uint32_t val;
4802 val = 0;
4803 for (n = 0; n < 4; n++) {
4804 if (imm & (1 << (n + (pass & 1) * 4)))
4805 val |= 0xff << (n * 8);
4807 tcg_gen_movi_i32(tmp, val);
4808 } else {
4809 tcg_gen_movi_i32(tmp, imm);
4812 neon_store_reg(rd, pass, tmp);
4815 } else { /* (insn & 0x00800010 == 0x00800000) */
4816 if (size != 3) {
4817 op = (insn >> 8) & 0xf;
4818 if ((insn & (1 << 6)) == 0) {
4819 /* Three registers of different lengths. */
4820 int src1_wide;
4821 int src2_wide;
4822 int prewiden;
4823 /* prewiden, src1_wide, src2_wide */
4824 static const int neon_3reg_wide[16][3] = {
4825 {1, 0, 0}, /* VADDL */
4826 {1, 1, 0}, /* VADDW */
4827 {1, 0, 0}, /* VSUBL */
4828 {1, 1, 0}, /* VSUBW */
4829 {0, 1, 1}, /* VADDHN */
4830 {0, 0, 0}, /* VABAL */
4831 {0, 1, 1}, /* VSUBHN */
4832 {0, 0, 0}, /* VABDL */
4833 {0, 0, 0}, /* VMLAL */
4834 {0, 0, 0}, /* VQDMLAL */
4835 {0, 0, 0}, /* VMLSL */
4836 {0, 0, 0}, /* VQDMLSL */
4837 {0, 0, 0}, /* Integer VMULL */
4838 {0, 0, 0}, /* VQDMULL */
4839 {0, 0, 0} /* Polynomial VMULL */
4842 prewiden = neon_3reg_wide[op][0];
4843 src1_wide = neon_3reg_wide[op][1];
4844 src2_wide = neon_3reg_wide[op][2];
4846 if (size == 0 && (op == 9 || op == 11 || op == 13))
4847 return 1;
4849 /* Avoid overlapping operands. Wide source operands are
4850 always aligned so will never overlap with wide
4851 destinations in problematic ways. */
4852 if (rd == rm && !src2_wide) {
4853 NEON_GET_REG(T0, rm, 1);
4854 gen_neon_movl_scratch_T0(2);
4855 } else if (rd == rn && !src1_wide) {
4856 NEON_GET_REG(T0, rn, 1);
4857 gen_neon_movl_scratch_T0(2);
4859 TCGV_UNUSED(tmp3);
4860 for (pass = 0; pass < 2; pass++) {
4861 if (src1_wide) {
4862 neon_load_reg64(cpu_V0, rn + pass);
4863 TCGV_UNUSED(tmp);
4864 } else {
4865 if (pass == 1 && rd == rn) {
4866 gen_neon_movl_T0_scratch(2);
4867 tmp = new_tmp();
4868 tcg_gen_mov_i32(tmp, cpu_T[0]);
4869 } else {
4870 tmp = neon_load_reg(rn, pass);
4872 if (prewiden) {
4873 gen_neon_widen(cpu_V0, tmp, size, u);
4876 if (src2_wide) {
4877 neon_load_reg64(cpu_V1, rm + pass);
4878 TCGV_UNUSED(tmp2);
4879 } else {
4880 if (pass == 1 && rd == rm) {
4881 gen_neon_movl_T0_scratch(2);
4882 tmp2 = new_tmp();
4883 tcg_gen_mov_i32(tmp2, cpu_T[0]);
4884 } else {
4885 tmp2 = neon_load_reg(rm, pass);
4887 if (prewiden) {
4888 gen_neon_widen(cpu_V1, tmp2, size, u);
4891 switch (op) {
4892 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4893 gen_neon_addl(size);
4894 break;
4895 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4896 gen_neon_subl(size);
4897 break;
4898 case 5: case 7: /* VABAL, VABDL */
4899 switch ((size << 1) | u) {
4900 case 0:
4901 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4902 break;
4903 case 1:
4904 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4905 break;
4906 case 2:
4907 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4908 break;
4909 case 3:
4910 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4911 break;
4912 case 4:
4913 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4914 break;
4915 case 5:
4916 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4917 break;
4918 default: abort();
4920 dead_tmp(tmp2);
4921 dead_tmp(tmp);
4922 break;
4923 case 8: case 9: case 10: case 11: case 12: case 13:
4924 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4925 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
4926 break;
4927 case 14: /* Polynomial VMULL */
4928 cpu_abort(env, "Polynomial VMULL not implemented");
4930 default: /* 15 is RESERVED. */
4931 return 1;
4933 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4934 /* Accumulate. */
4935 if (op == 10 || op == 11) {
4936 gen_neon_negl(cpu_V0, size);
4939 if (op != 13) {
4940 neon_load_reg64(cpu_V1, rd + pass);
4943 switch (op) {
4944 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4945 gen_neon_addl(size);
4946 break;
4947 case 9: case 11: /* VQDMLAL, VQDMLSL */
4948 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4949 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4950 break;
4951 /* Fall through. */
4952 case 13: /* VQDMULL */
4953 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4954 break;
4955 default:
4956 abort();
4958 neon_store_reg64(cpu_V0, rd + pass);
4959 } else if (op == 4 || op == 6) {
4960 /* Narrowing operation. */
4961 tmp = new_tmp();
4962 if (u) {
4963 switch (size) {
4964 case 0:
4965 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4966 break;
4967 case 1:
4968 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4969 break;
4970 case 2:
4971 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4972 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4973 break;
4974 default: abort();
4976 } else {
4977 switch (size) {
4978 case 0:
4979 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4980 break;
4981 case 1:
4982 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4983 break;
4984 case 2:
4985 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4986 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4987 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4988 break;
4989 default: abort();
4992 if (pass == 0) {
4993 tmp3 = tmp;
4994 } else {
4995 neon_store_reg(rd, 0, tmp3);
4996 neon_store_reg(rd, 1, tmp);
4998 } else {
4999 /* Write back the result. */
5000 neon_store_reg64(cpu_V0, rd + pass);
5003 } else {
5004 /* Two registers and a scalar. */
5005 switch (op) {
5006 case 0: /* Integer VMLA scalar */
5007 case 1: /* Float VMLA scalar */
5008 case 4: /* Integer VMLS scalar */
5009 case 5: /* Floating point VMLS scalar */
5010 case 8: /* Integer VMUL scalar */
5011 case 9: /* Floating point VMUL scalar */
5012 case 12: /* VQDMULH scalar */
5013 case 13: /* VQRDMULH scalar */
5014 gen_neon_get_scalar(size, rm);
5015 gen_neon_movl_scratch_T0(0);
5016 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5017 if (pass != 0)
5018 gen_neon_movl_T0_scratch(0);
5019 NEON_GET_REG(T1, rn, pass);
5020 if (op == 12) {
5021 if (size == 1) {
5022 gen_helper_neon_qdmulh_s16(CPU_T0E01);
5023 } else {
5024 gen_helper_neon_qdmulh_s32(CPU_T0E01);
5026 } else if (op == 13) {
5027 if (size == 1) {
5028 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
5029 } else {
5030 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
5032 } else if (op & 1) {
5033 gen_helper_neon_mul_f32(CPU_T001);
5034 } else {
5035 switch (size) {
5036 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5037 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
5038 case 2: gen_op_mul_T0_T1(); break;
5039 default: return 1;
5042 if (op < 8) {
5043 /* Accumulate. */
5044 NEON_GET_REG(T1, rd, pass);
5045 switch (op) {
5046 case 0:
5047 gen_neon_add(size);
5048 break;
5049 case 1:
5050 gen_helper_neon_add_f32(CPU_T001);
5051 break;
5052 case 4:
5053 gen_neon_rsb(size);
5054 break;
5055 case 5:
5056 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
5057 break;
5058 default:
5059 abort();
5062 NEON_SET_REG(T0, rd, pass);
5064 break;
5065 case 2: /* VMLAL sclar */
5066 case 3: /* VQDMLAL scalar */
5067 case 6: /* VMLSL scalar */
5068 case 7: /* VQDMLSL scalar */
5069 case 10: /* VMULL scalar */
5070 case 11: /* VQDMULL scalar */
5071 if (size == 0 && (op == 3 || op == 7 || op == 11))
5072 return 1;
5074 gen_neon_get_scalar(size, rm);
5075 NEON_GET_REG(T1, rn, 1);
5077 for (pass = 0; pass < 2; pass++) {
5078 if (pass == 0) {
5079 tmp = neon_load_reg(rn, 0);
5080 } else {
5081 tmp = new_tmp();
5082 tcg_gen_mov_i32(tmp, cpu_T[1]);
5084 tmp2 = new_tmp();
5085 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5086 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5087 if (op == 6 || op == 7) {
5088 gen_neon_negl(cpu_V0, size);
5090 if (op != 11) {
5091 neon_load_reg64(cpu_V1, rd + pass);
5093 switch (op) {
5094 case 2: case 6:
5095 gen_neon_addl(size);
5096 break;
5097 case 3: case 7:
5098 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5099 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5100 break;
5101 case 10:
5102 /* no-op */
5103 break;
5104 case 11:
5105 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5106 break;
5107 default:
5108 abort();
5110 neon_store_reg64(cpu_V0, rd + pass);
5112 break;
5113 default: /* 14 and 15 are RESERVED */
5114 return 1;
5117 } else { /* size == 3 */
5118 if (!u) {
5119 /* Extract. */
5120 imm = (insn >> 8) & 0xf;
5121 count = q + 1;
5123 if (imm > 7 && !q)
5124 return 1;
5126 if (imm == 0) {
5127 neon_load_reg64(cpu_V0, rn);
5128 if (q) {
5129 neon_load_reg64(cpu_V1, rn + 1);
5131 } else if (imm == 8) {
5132 neon_load_reg64(cpu_V0, rn + 1);
5133 if (q) {
5134 neon_load_reg64(cpu_V1, rm);
5136 } else if (q) {
5137 tmp64 = tcg_temp_new_i64();
5138 if (imm < 8) {
5139 neon_load_reg64(cpu_V0, rn);
5140 neon_load_reg64(tmp64, rn + 1);
5141 } else {
5142 neon_load_reg64(cpu_V0, rn + 1);
5143 neon_load_reg64(tmp64, rm);
5145 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5146 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5147 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5148 if (imm < 8) {
5149 neon_load_reg64(cpu_V1, rm);
5150 } else {
5151 neon_load_reg64(cpu_V1, rm + 1);
5152 imm -= 8;
5154 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5155 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5156 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5157 } else {
5158 /* BUGFIX */
5159 neon_load_reg64(cpu_V0, rn);
5160 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5161 neon_load_reg64(cpu_V1, rm);
5162 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5163 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5165 neon_store_reg64(cpu_V0, rd);
5166 if (q) {
5167 neon_store_reg64(cpu_V1, rd + 1);
5169 } else if ((insn & (1 << 11)) == 0) {
5170 /* Two register misc. */
5171 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5172 size = (insn >> 18) & 3;
5173 switch (op) {
5174 case 0: /* VREV64 */
5175 if (size == 3)
5176 return 1;
5177 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5178 NEON_GET_REG(T0, rm, pass * 2);
5179 NEON_GET_REG(T1, rm, pass * 2 + 1);
5180 switch (size) {
5181 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5182 case 1: gen_swap_half(cpu_T[0]); break;
5183 case 2: /* no-op */ break;
5184 default: abort();
5186 NEON_SET_REG(T0, rd, pass * 2 + 1);
5187 if (size == 2) {
5188 NEON_SET_REG(T1, rd, pass * 2);
5189 } else {
5190 gen_op_movl_T0_T1();
5191 switch (size) {
5192 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5193 case 1: gen_swap_half(cpu_T[0]); break;
5194 default: abort();
5196 NEON_SET_REG(T0, rd, pass * 2);
5199 break;
5200 case 4: case 5: /* VPADDL */
5201 case 12: case 13: /* VPADAL */
5202 if (size == 3)
5203 return 1;
5204 for (pass = 0; pass < q + 1; pass++) {
5205 tmp = neon_load_reg(rm, pass * 2);
5206 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5207 tmp = neon_load_reg(rm, pass * 2 + 1);
5208 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5209 switch (size) {
5210 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5211 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5212 case 2: tcg_gen_add_i64(CPU_V001); break;
5213 default: abort();
5215 if (op >= 12) {
5216 /* Accumulate. */
5217 neon_load_reg64(cpu_V1, rd + pass);
5218 gen_neon_addl(size);
5220 neon_store_reg64(cpu_V0, rd + pass);
5222 break;
5223 case 33: /* VTRN */
5224 if (size == 2) {
5225 for (n = 0; n < (q ? 4 : 2); n += 2) {
5226 NEON_GET_REG(T0, rm, n);
5227 NEON_GET_REG(T1, rd, n + 1);
5228 NEON_SET_REG(T1, rm, n);
5229 NEON_SET_REG(T0, rd, n + 1);
5231 } else {
5232 goto elementwise;
5234 break;
5235 case 34: /* VUZP */
5236 /* Reg Before After
5237 Rd A3 A2 A1 A0 B2 B0 A2 A0
5238 Rm B3 B2 B1 B0 B3 B1 A3 A1
5240 if (size == 3)
5241 return 1;
5242 gen_neon_unzip(rd, q, 0, size);
5243 gen_neon_unzip(rm, q, 4, size);
5244 if (q) {
5245 static int unzip_order_q[8] =
5246 {0, 2, 4, 6, 1, 3, 5, 7};
5247 for (n = 0; n < 8; n++) {
5248 int reg = (n < 4) ? rd : rm;
5249 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5250 NEON_SET_REG(T0, reg, n % 4);
5252 } else {
5253 static int unzip_order[4] =
5254 {0, 4, 1, 5};
5255 for (n = 0; n < 4; n++) {
5256 int reg = (n < 2) ? rd : rm;
5257 gen_neon_movl_T0_scratch(unzip_order[n]);
5258 NEON_SET_REG(T0, reg, n % 2);
5261 break;
5262 case 35: /* VZIP */
5263 /* Reg Before After
5264 Rd A3 A2 A1 A0 B1 A1 B0 A0
5265 Rm B3 B2 B1 B0 B3 A3 B2 A2
5267 if (size == 3)
5268 return 1;
5269 count = (q ? 4 : 2);
5270 for (n = 0; n < count; n++) {
5271 NEON_GET_REG(T0, rd, n);
5272 NEON_GET_REG(T1, rd, n);
5273 switch (size) {
5274 case 0: gen_helper_neon_zip_u8(); break;
5275 case 1: gen_helper_neon_zip_u16(); break;
5276 case 2: /* no-op */; break;
5277 default: abort();
5279 gen_neon_movl_scratch_T0(n * 2);
5280 gen_neon_movl_scratch_T1(n * 2 + 1);
5282 for (n = 0; n < count * 2; n++) {
5283 int reg = (n < count) ? rd : rm;
5284 gen_neon_movl_T0_scratch(n);
5285 NEON_SET_REG(T0, reg, n % count);
5287 break;
5288 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5289 if (size == 3)
5290 return 1;
5291 TCGV_UNUSED(tmp2);
5292 for (pass = 0; pass < 2; pass++) {
5293 neon_load_reg64(cpu_V0, rm + pass);
5294 tmp = new_tmp();
5295 if (op == 36 && q == 0) {
5296 gen_neon_narrow(size, tmp, cpu_V0);
5297 } else if (q) {
5298 gen_neon_narrow_satu(size, tmp, cpu_V0);
5299 } else {
5300 gen_neon_narrow_sats(size, tmp, cpu_V0);
5302 if (pass == 0) {
5303 tmp2 = tmp;
5304 } else {
5305 neon_store_reg(rd, 0, tmp2);
5306 neon_store_reg(rd, 1, tmp);
5309 break;
5310 case 38: /* VSHLL */
5311 if (q || size == 3)
5312 return 1;
5313 tmp = neon_load_reg(rm, 0);
5314 tmp2 = neon_load_reg(rm, 1);
5315 for (pass = 0; pass < 2; pass++) {
5316 if (pass == 1)
5317 tmp = tmp2;
5318 gen_neon_widen(cpu_V0, tmp, size, 1);
5319 neon_store_reg64(cpu_V0, rd + pass);
5321 break;
5322 default:
5323 elementwise:
5324 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5325 if (op == 30 || op == 31 || op >= 58) {
5326 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5327 neon_reg_offset(rm, pass));
5328 } else {
5329 NEON_GET_REG(T0, rm, pass);
5331 switch (op) {
5332 case 1: /* VREV32 */
5333 switch (size) {
5334 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5335 case 1: gen_swap_half(cpu_T[0]); break;
5336 default: return 1;
5338 break;
5339 case 2: /* VREV16 */
5340 if (size != 0)
5341 return 1;
5342 gen_rev16(cpu_T[0]);
5343 break;
5344 case 8: /* CLS */
5345 switch (size) {
5346 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5347 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5348 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
5349 default: return 1;
5351 break;
5352 case 9: /* CLZ */
5353 switch (size) {
5354 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5355 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
5356 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
5357 default: return 1;
5359 break;
5360 case 10: /* CNT */
5361 if (size != 0)
5362 return 1;
5363 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
5364 break;
5365 case 11: /* VNOT */
5366 if (size != 0)
5367 return 1;
5368 gen_op_notl_T0();
5369 break;
5370 case 14: /* VQABS */
5371 switch (size) {
5372 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5373 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5374 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5375 default: return 1;
5377 break;
5378 case 15: /* VQNEG */
5379 switch (size) {
5380 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5381 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5382 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5383 default: return 1;
5385 break;
5386 case 16: case 19: /* VCGT #0, VCLE #0 */
5387 gen_op_movl_T1_im(0);
5388 switch(size) {
5389 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5390 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5391 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
5392 default: return 1;
5394 if (op == 19)
5395 gen_op_notl_T0();
5396 break;
5397 case 17: case 20: /* VCGE #0, VCLT #0 */
5398 gen_op_movl_T1_im(0);
5399 switch(size) {
5400 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5401 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5402 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
5403 default: return 1;
5405 if (op == 20)
5406 gen_op_notl_T0();
5407 break;
5408 case 18: /* VCEQ #0 */
5409 gen_op_movl_T1_im(0);
5410 switch(size) {
5411 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5412 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5413 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
5414 default: return 1;
5416 break;
5417 case 22: /* VABS */
5418 switch(size) {
5419 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5420 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5421 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
5422 default: return 1;
5424 break;
5425 case 23: /* VNEG */
5426 gen_op_movl_T1_im(0);
5427 if (size == 3)
5428 return 1;
5429 gen_neon_rsb(size);
5430 break;
5431 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5432 gen_op_movl_T1_im(0);
5433 gen_helper_neon_cgt_f32(CPU_T001);
5434 if (op == 27)
5435 gen_op_notl_T0();
5436 break;
5437 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5438 gen_op_movl_T1_im(0);
5439 gen_helper_neon_cge_f32(CPU_T001);
5440 if (op == 28)
5441 gen_op_notl_T0();
5442 break;
5443 case 26: /* Float VCEQ #0 */
5444 gen_op_movl_T1_im(0);
5445 gen_helper_neon_ceq_f32(CPU_T001);
5446 break;
5447 case 30: /* Float VABS */
5448 gen_vfp_abs(0);
5449 break;
5450 case 31: /* Float VNEG */
5451 gen_vfp_neg(0);
5452 break;
5453 case 32: /* VSWP */
5454 NEON_GET_REG(T1, rd, pass);
5455 NEON_SET_REG(T1, rm, pass);
5456 break;
5457 case 33: /* VTRN */
5458 NEON_GET_REG(T1, rd, pass);
5459 switch (size) {
5460 case 0: gen_helper_neon_trn_u8(); break;
5461 case 1: gen_helper_neon_trn_u16(); break;
5462 case 2: abort();
5463 default: return 1;
5465 NEON_SET_REG(T1, rm, pass);
5466 break;
5467 case 56: /* Integer VRECPE */
5468 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
5469 break;
5470 case 57: /* Integer VRSQRTE */
5471 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
5472 break;
5473 case 58: /* Float VRECPE */
5474 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5475 break;
5476 case 59: /* Float VRSQRTE */
5477 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5478 break;
5479 case 60: /* VCVT.F32.S32 */
5480 gen_vfp_tosiz(0);
5481 break;
5482 case 61: /* VCVT.F32.U32 */
5483 gen_vfp_touiz(0);
5484 break;
5485 case 62: /* VCVT.S32.F32 */
5486 gen_vfp_sito(0);
5487 break;
5488 case 63: /* VCVT.U32.F32 */
5489 gen_vfp_uito(0);
5490 break;
5491 default:
5492 /* Reserved: 21, 29, 39-56 */
5493 return 1;
5495 if (op == 30 || op == 31 || op >= 58) {
5496 tcg_gen_st_f32(cpu_F0s, cpu_env,
5497 neon_reg_offset(rd, pass));
5498 } else {
5499 NEON_SET_REG(T0, rd, pass);
5502 break;
5504 } else if ((insn & (1 << 10)) == 0) {
5505 /* VTBL, VTBX. */
5506 n = ((insn >> 5) & 0x18) + 8;
5507 if (insn & (1 << 6)) {
5508 tmp = neon_load_reg(rd, 0);
5509 } else {
5510 tmp = new_tmp();
5511 tcg_gen_movi_i32(tmp, 0);
5513 tmp2 = neon_load_reg(rm, 0);
5514 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5515 tcg_const_i32(n));
5516 dead_tmp(tmp);
5517 if (insn & (1 << 6)) {
5518 tmp = neon_load_reg(rd, 1);
5519 } else {
5520 tmp = new_tmp();
5521 tcg_gen_movi_i32(tmp, 0);
5523 tmp3 = neon_load_reg(rm, 1);
5524 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5525 tcg_const_i32(n));
5526 neon_store_reg(rd, 0, tmp2);
5527 neon_store_reg(rd, 1, tmp3);
5528 dead_tmp(tmp);
5529 } else if ((insn & 0x380) == 0) {
5530 /* VDUP */
5531 if (insn & (1 << 19)) {
5532 NEON_SET_REG(T0, rm, 1);
5533 } else {
5534 NEON_SET_REG(T0, rm, 0);
5536 if (insn & (1 << 16)) {
5537 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
5538 } else if (insn & (1 << 17)) {
5539 if ((insn >> 18) & 1)
5540 gen_neon_dup_high16(cpu_T[0]);
5541 else
5542 gen_neon_dup_low16(cpu_T[0]);
5544 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5545 NEON_SET_REG(T0, rd, pass);
5547 } else {
5548 return 1;
5552 return 0;
5555 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5557 int crn = (insn >> 16) & 0xf;
5558 int crm = insn & 0xf;
5559 int op1 = (insn >> 21) & 7;
5560 int op2 = (insn >> 5) & 7;
5561 int rt = (insn >> 12) & 0xf;
5562 TCGv tmp;
5564 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5565 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5566 /* TEECR */
5567 if (IS_USER(s))
5568 return 1;
5569 tmp = load_cpu_field(teecr);
5570 store_reg(s, rt, tmp);
5571 return 0;
5573 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5574 /* TEEHBR */
5575 if (IS_USER(s) && (env->teecr & 1))
5576 return 1;
5577 tmp = load_cpu_field(teehbr);
5578 store_reg(s, rt, tmp);
5579 return 0;
5582 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5583 op1, crn, crm, op2);
5584 return 1;
5587 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5589 int crn = (insn >> 16) & 0xf;
5590 int crm = insn & 0xf;
5591 int op1 = (insn >> 21) & 7;
5592 int op2 = (insn >> 5) & 7;
5593 int rt = (insn >> 12) & 0xf;
5594 TCGv tmp;
5596 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5597 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5598 /* TEECR */
5599 if (IS_USER(s))
5600 return 1;
5601 tmp = load_reg(s, rt);
5602 gen_helper_set_teecr(cpu_env, tmp);
5603 dead_tmp(tmp);
5604 return 0;
5606 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5607 /* TEEHBR */
5608 if (IS_USER(s) && (env->teecr & 1))
5609 return 1;
5610 tmp = load_reg(s, rt);
5611 store_cpu_field(tmp, teehbr);
5612 return 0;
5615 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5616 op1, crn, crm, op2);
5617 return 1;
5620 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5622 int cpnum;
5624 cpnum = (insn >> 8) & 0xf;
5625 if (arm_feature(env, ARM_FEATURE_XSCALE)
5626 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5627 return 1;
5629 switch (cpnum) {
5630 case 0:
5631 case 1:
5632 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5633 return disas_iwmmxt_insn(env, s, insn);
5634 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5635 return disas_dsp_insn(env, s, insn);
5637 return 1;
5638 case 10:
5639 case 11:
5640 return disas_vfp_insn (env, s, insn);
5641 case 14:
5642 /* Coprocessors 7-15 are architecturally reserved by ARM.
5643 Unfortunately Intel decided to ignore this. */
5644 if (arm_feature(env, ARM_FEATURE_XSCALE))
5645 goto board;
5646 if (insn & (1 << 20))
5647 return disas_cp14_read(env, s, insn);
5648 else
5649 return disas_cp14_write(env, s, insn);
5650 case 15:
5651 return disas_cp15_insn (env, s, insn);
5652 default:
5653 board:
5654 /* Unknown coprocessor. See if the board has hooked it. */
5655 return disas_cp_insn (env, s, insn);
5660 /* Store a 64-bit value to a register pair. Clobbers val. */
5661 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5663 TCGv tmp;
5664 tmp = new_tmp();
5665 tcg_gen_trunc_i64_i32(tmp, val);
5666 store_reg(s, rlow, tmp);
5667 tmp = new_tmp();
5668 tcg_gen_shri_i64(val, val, 32);
5669 tcg_gen_trunc_i64_i32(tmp, val);
5670 store_reg(s, rhigh, tmp);
5673 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5674 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5676 TCGv_i64 tmp;
5677 TCGv tmp2;
5679 /* Load value and extend to 64 bits. */
5680 tmp = tcg_temp_new_i64();
5681 tmp2 = load_reg(s, rlow);
5682 tcg_gen_extu_i32_i64(tmp, tmp2);
5683 dead_tmp(tmp2);
5684 tcg_gen_add_i64(val, val, tmp);
5687 /* load and add a 64-bit value from a register pair. */
5688 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5690 TCGv_i64 tmp;
5691 TCGv tmpl;
5692 TCGv tmph;
5694 /* Load 64-bit value rd:rn. */
5695 tmpl = load_reg(s, rlow);
5696 tmph = load_reg(s, rhigh);
5697 tmp = tcg_temp_new_i64();
5698 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5699 dead_tmp(tmpl);
5700 dead_tmp(tmph);
5701 tcg_gen_add_i64(val, val, tmp);
5704 /* Set N and Z flags from a 64-bit value. */
5705 static void gen_logicq_cc(TCGv_i64 val)
5707 TCGv tmp = new_tmp();
5708 gen_helper_logicq_cc(tmp, val);
5709 gen_logic_CC(tmp);
5710 dead_tmp(tmp);
5713 static void disas_arm_insn(CPUState * env, DisasContext *s)
5715 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
5716 TCGv tmp;
5717 TCGv tmp2;
5718 TCGv tmp3;
5719 TCGv addr;
5720 TCGv_i64 tmp64;
5722 insn = ldl_code(s->pc);
5723 s->pc += 4;
5725 /* M variants do not implement ARM mode. */
5726 if (IS_M(env))
5727 goto illegal_op;
5728 cond = insn >> 28;
5729 if (cond == 0xf){
5730 /* Unconditional instructions. */
5731 if (((insn >> 25) & 7) == 1) {
5732 /* NEON Data processing. */
5733 if (!arm_feature(env, ARM_FEATURE_NEON))
5734 goto illegal_op;
5736 if (disas_neon_data_insn(env, s, insn))
5737 goto illegal_op;
5738 return;
5740 if ((insn & 0x0f100000) == 0x04000000) {
5741 /* NEON load/store. */
5742 if (!arm_feature(env, ARM_FEATURE_NEON))
5743 goto illegal_op;
5745 if (disas_neon_ls_insn(env, s, insn))
5746 goto illegal_op;
5747 return;
5749 if ((insn & 0x0d70f000) == 0x0550f000)
5750 return; /* PLD */
5751 else if ((insn & 0x0ffffdff) == 0x01010000) {
5752 ARCH(6);
5753 /* setend */
5754 if (insn & (1 << 9)) {
5755 /* BE8 mode not implemented. */
5756 goto illegal_op;
5758 return;
5759 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5760 switch ((insn >> 4) & 0xf) {
5761 case 1: /* clrex */
5762 ARCH(6K);
5763 gen_helper_clrex(cpu_env);
5764 return;
5765 case 4: /* dsb */
5766 case 5: /* dmb */
5767 case 6: /* isb */
5768 ARCH(7);
5769 /* We don't emulate caches so these are a no-op. */
5770 return;
5771 default:
5772 goto illegal_op;
5774 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5775 /* srs */
5776 uint32_t offset;
5777 if (IS_USER(s))
5778 goto illegal_op;
5779 ARCH(6);
5780 op1 = (insn & 0x1f);
5781 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5782 addr = load_reg(s, 13);
5783 } else {
5784 addr = new_tmp();
5785 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
5787 i = (insn >> 23) & 3;
5788 switch (i) {
5789 case 0: offset = -4; break; /* DA */
5790 case 1: offset = -8; break; /* DB */
5791 case 2: offset = 0; break; /* IA */
5792 case 3: offset = 4; break; /* IB */
5793 default: abort();
5795 if (offset)
5796 tcg_gen_addi_i32(addr, addr, offset);
5797 tmp = load_reg(s, 14);
5798 gen_st32(tmp, addr, 0);
5799 tmp = new_tmp();
5800 gen_helper_cpsr_read(tmp);
5801 tcg_gen_addi_i32(addr, addr, 4);
5802 gen_st32(tmp, addr, 0);
5803 if (insn & (1 << 21)) {
5804 /* Base writeback. */
5805 switch (i) {
5806 case 0: offset = -8; break;
5807 case 1: offset = -4; break;
5808 case 2: offset = 4; break;
5809 case 3: offset = 0; break;
5810 default: abort();
5812 if (offset)
5813 tcg_gen_addi_i32(addr, tmp, offset);
5814 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5815 gen_movl_reg_T1(s, 13);
5816 } else {
5817 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
5819 } else {
5820 dead_tmp(addr);
5822 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5823 /* rfe */
5824 uint32_t offset;
5825 if (IS_USER(s))
5826 goto illegal_op;
5827 ARCH(6);
5828 rn = (insn >> 16) & 0xf;
5829 addr = load_reg(s, rn);
5830 i = (insn >> 23) & 3;
5831 switch (i) {
5832 case 0: offset = -4; break; /* DA */
5833 case 1: offset = -8; break; /* DB */
5834 case 2: offset = 0; break; /* IA */
5835 case 3: offset = 4; break; /* IB */
5836 default: abort();
5838 if (offset)
5839 tcg_gen_addi_i32(addr, addr, offset);
5840 /* Load PC into tmp and CPSR into tmp2. */
5841 tmp = gen_ld32(addr, 0);
5842 tcg_gen_addi_i32(addr, addr, 4);
5843 tmp2 = gen_ld32(addr, 0);
5844 if (insn & (1 << 21)) {
5845 /* Base writeback. */
5846 switch (i) {
5847 case 0: offset = -8; break;
5848 case 1: offset = -4; break;
5849 case 2: offset = 4; break;
5850 case 3: offset = 0; break;
5851 default: abort();
5853 if (offset)
5854 tcg_gen_addi_i32(addr, addr, offset);
5855 store_reg(s, rn, addr);
5856 } else {
5857 dead_tmp(addr);
5859 gen_rfe(s, tmp, tmp2);
5860 } else if ((insn & 0x0e000000) == 0x0a000000) {
5861 /* branch link and change to thumb (blx <offset>) */
5862 int32_t offset;
5864 val = (uint32_t)s->pc;
5865 tmp = new_tmp();
5866 tcg_gen_movi_i32(tmp, val);
5867 store_reg(s, 14, tmp);
5868 /* Sign-extend the 24-bit offset */
5869 offset = (((int32_t)insn) << 8) >> 8;
5870 /* offset * 4 + bit24 * 2 + (thumb bit) */
5871 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5872 /* pipeline offset */
5873 val += 4;
5874 gen_bx_im(s, val);
5875 return;
5876 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5877 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5878 /* iWMMXt register transfer. */
5879 if (env->cp15.c15_cpar & (1 << 1))
5880 if (!disas_iwmmxt_insn(env, s, insn))
5881 return;
5883 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5884 /* Coprocessor double register transfer. */
5885 } else if ((insn & 0x0f000010) == 0x0e000010) {
5886 /* Additional coprocessor register transfer. */
5887 } else if ((insn & 0x0ff10020) == 0x01000000) {
5888 uint32_t mask;
5889 uint32_t val;
5890 /* cps (privileged) */
5891 if (IS_USER(s))
5892 return;
5893 mask = val = 0;
5894 if (insn & (1 << 19)) {
5895 if (insn & (1 << 8))
5896 mask |= CPSR_A;
5897 if (insn & (1 << 7))
5898 mask |= CPSR_I;
5899 if (insn & (1 << 6))
5900 mask |= CPSR_F;
5901 if (insn & (1 << 18))
5902 val |= mask;
5904 if (insn & (1 << 17)) {
5905 mask |= CPSR_M;
5906 val |= (insn & 0x1f);
5908 if (mask) {
5909 gen_op_movl_T0_im(val);
5910 gen_set_psr_T0(s, mask, 0);
5912 return;
5914 goto illegal_op;
5916 if (cond != 0xe) {
5917 /* if not always execute, we generate a conditional jump to
5918 next instruction */
5919 s->condlabel = gen_new_label();
5920 gen_test_cc(cond ^ 1, s->condlabel);
5921 s->condjmp = 1;
5923 if ((insn & 0x0f900000) == 0x03000000) {
5924 if ((insn & (1 << 21)) == 0) {
5925 ARCH(6T2);
5926 rd = (insn >> 12) & 0xf;
5927 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5928 if ((insn & (1 << 22)) == 0) {
5929 /* MOVW */
5930 tmp = new_tmp();
5931 tcg_gen_movi_i32(tmp, val);
5932 } else {
5933 /* MOVT */
5934 tmp = load_reg(s, rd);
5935 tcg_gen_ext16u_i32(tmp, tmp);
5936 tcg_gen_ori_i32(tmp, tmp, val << 16);
5938 store_reg(s, rd, tmp);
5939 } else {
5940 if (((insn >> 12) & 0xf) != 0xf)
5941 goto illegal_op;
5942 if (((insn >> 16) & 0xf) == 0) {
5943 gen_nop_hint(s, insn & 0xff);
5944 } else {
5945 /* CPSR = immediate */
5946 val = insn & 0xff;
5947 shift = ((insn >> 8) & 0xf) * 2;
5948 if (shift)
5949 val = (val >> shift) | (val << (32 - shift));
5950 gen_op_movl_T0_im(val);
5951 i = ((insn & (1 << 22)) != 0);
5952 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5953 goto illegal_op;
5956 } else if ((insn & 0x0f900000) == 0x01000000
5957 && (insn & 0x00000090) != 0x00000090) {
5958 /* miscellaneous instructions */
5959 op1 = (insn >> 21) & 3;
5960 sh = (insn >> 4) & 0xf;
5961 rm = insn & 0xf;
5962 switch (sh) {
5963 case 0x0: /* move program status register */
5964 if (op1 & 1) {
5965 /* PSR = reg */
5966 gen_movl_T0_reg(s, rm);
5967 i = ((op1 & 2) != 0);
5968 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5969 goto illegal_op;
5970 } else {
5971 /* reg = PSR */
5972 rd = (insn >> 12) & 0xf;
5973 if (op1 & 2) {
5974 if (IS_USER(s))
5975 goto illegal_op;
5976 tmp = load_cpu_field(spsr);
5977 } else {
5978 tmp = new_tmp();
5979 gen_helper_cpsr_read(tmp);
5981 store_reg(s, rd, tmp);
5983 break;
5984 case 0x1:
5985 if (op1 == 1) {
5986 /* branch/exchange thumb (bx). */
5987 tmp = load_reg(s, rm);
5988 gen_bx(s, tmp);
5989 } else if (op1 == 3) {
5990 /* clz */
5991 rd = (insn >> 12) & 0xf;
5992 tmp = load_reg(s, rm);
5993 gen_helper_clz(tmp, tmp);
5994 store_reg(s, rd, tmp);
5995 } else {
5996 goto illegal_op;
5998 break;
5999 case 0x2:
6000 if (op1 == 1) {
6001 ARCH(5J); /* bxj */
6002 /* Trivial implementation equivalent to bx. */
6003 tmp = load_reg(s, rm);
6004 gen_bx(s, tmp);
6005 } else {
6006 goto illegal_op;
6008 break;
6009 case 0x3:
6010 if (op1 != 1)
6011 goto illegal_op;
6013 /* branch link/exchange thumb (blx) */
6014 tmp = load_reg(s, rm);
6015 tmp2 = new_tmp();
6016 tcg_gen_movi_i32(tmp2, s->pc);
6017 store_reg(s, 14, tmp2);
6018 gen_bx(s, tmp);
6019 break;
6020 case 0x5: /* saturating add/subtract */
6021 rd = (insn >> 12) & 0xf;
6022 rn = (insn >> 16) & 0xf;
6023 tmp = load_reg(s, rm);
6024 tmp2 = load_reg(s, rn);
6025 if (op1 & 2)
6026 gen_helper_double_saturate(tmp2, tmp2);
6027 if (op1 & 1)
6028 gen_helper_sub_saturate(tmp, tmp, tmp2);
6029 else
6030 gen_helper_add_saturate(tmp, tmp, tmp2);
6031 dead_tmp(tmp2);
6032 store_reg(s, rd, tmp);
6033 break;
6034 case 7: /* bkpt */
6035 gen_set_condexec(s);
6036 gen_set_pc_im(s->pc - 4);
6037 gen_exception(EXCP_BKPT);
6038 s->is_jmp = DISAS_JUMP;
6039 break;
6040 case 0x8: /* signed multiply */
6041 case 0xa:
6042 case 0xc:
6043 case 0xe:
6044 rs = (insn >> 8) & 0xf;
6045 rn = (insn >> 12) & 0xf;
6046 rd = (insn >> 16) & 0xf;
6047 if (op1 == 1) {
6048 /* (32 * 16) >> 16 */
6049 tmp = load_reg(s, rm);
6050 tmp2 = load_reg(s, rs);
6051 if (sh & 4)
6052 tcg_gen_sari_i32(tmp2, tmp2, 16);
6053 else
6054 gen_sxth(tmp2);
6055 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6056 tcg_gen_shri_i64(tmp64, tmp64, 16);
6057 tmp = new_tmp();
6058 tcg_gen_trunc_i64_i32(tmp, tmp64);
6059 if ((sh & 2) == 0) {
6060 tmp2 = load_reg(s, rn);
6061 gen_helper_add_setq(tmp, tmp, tmp2);
6062 dead_tmp(tmp2);
6064 store_reg(s, rd, tmp);
6065 } else {
6066 /* 16 * 16 */
6067 tmp = load_reg(s, rm);
6068 tmp2 = load_reg(s, rs);
6069 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6070 dead_tmp(tmp2);
6071 if (op1 == 2) {
6072 tmp64 = tcg_temp_new_i64();
6073 tcg_gen_ext_i32_i64(tmp64, tmp);
6074 dead_tmp(tmp);
6075 gen_addq(s, tmp64, rn, rd);
6076 gen_storeq_reg(s, rn, rd, tmp64);
6077 } else {
6078 if (op1 == 0) {
6079 tmp2 = load_reg(s, rn);
6080 gen_helper_add_setq(tmp, tmp, tmp2);
6081 dead_tmp(tmp2);
6083 store_reg(s, rd, tmp);
6086 break;
6087 default:
6088 goto illegal_op;
6090 } else if (((insn & 0x0e000000) == 0 &&
6091 (insn & 0x00000090) != 0x90) ||
6092 ((insn & 0x0e000000) == (1 << 25))) {
6093 int set_cc, logic_cc, shiftop;
6095 op1 = (insn >> 21) & 0xf;
6096 set_cc = (insn >> 20) & 1;
6097 logic_cc = table_logic_cc[op1] & set_cc;
6099 /* data processing instruction */
6100 if (insn & (1 << 25)) {
6101 /* immediate operand */
6102 val = insn & 0xff;
6103 shift = ((insn >> 8) & 0xf) * 2;
6104 if (shift) {
6105 val = (val >> shift) | (val << (32 - shift));
6107 tmp2 = new_tmp();
6108 tcg_gen_movi_i32(tmp2, val);
6109 if (logic_cc && shift) {
6110 gen_set_CF_bit31(tmp2);
6112 } else {
6113 /* register */
6114 rm = (insn) & 0xf;
6115 tmp2 = load_reg(s, rm);
6116 shiftop = (insn >> 5) & 3;
6117 if (!(insn & (1 << 4))) {
6118 shift = (insn >> 7) & 0x1f;
6119 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6120 } else {
6121 rs = (insn >> 8) & 0xf;
6122 tmp = load_reg(s, rs);
6123 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6126 if (op1 != 0x0f && op1 != 0x0d) {
6127 rn = (insn >> 16) & 0xf;
6128 tmp = load_reg(s, rn);
6129 } else {
6130 TCGV_UNUSED(tmp);
6132 rd = (insn >> 12) & 0xf;
6133 switch(op1) {
6134 case 0x00:
6135 tcg_gen_and_i32(tmp, tmp, tmp2);
6136 if (logic_cc) {
6137 gen_logic_CC(tmp);
6139 store_reg_bx(env, s, rd, tmp);
6140 break;
6141 case 0x01:
6142 tcg_gen_xor_i32(tmp, tmp, tmp2);
6143 if (logic_cc) {
6144 gen_logic_CC(tmp);
6146 store_reg_bx(env, s, rd, tmp);
6147 break;
6148 case 0x02:
6149 if (set_cc && rd == 15) {
6150 /* SUBS r15, ... is used for exception return. */
6151 if (IS_USER(s)) {
6152 goto illegal_op;
6154 gen_helper_sub_cc(tmp, tmp, tmp2);
6155 gen_exception_return(s, tmp);
6156 } else {
6157 if (set_cc) {
6158 gen_helper_sub_cc(tmp, tmp, tmp2);
6159 } else {
6160 tcg_gen_sub_i32(tmp, tmp, tmp2);
6162 store_reg_bx(env, s, rd, tmp);
6164 break;
6165 case 0x03:
6166 if (set_cc) {
6167 gen_helper_sub_cc(tmp, tmp2, tmp);
6168 } else {
6169 tcg_gen_sub_i32(tmp, tmp2, tmp);
6171 store_reg_bx(env, s, rd, tmp);
6172 break;
6173 case 0x04:
6174 if (set_cc) {
6175 gen_helper_add_cc(tmp, tmp, tmp2);
6176 } else {
6177 tcg_gen_add_i32(tmp, tmp, tmp2);
6179 store_reg_bx(env, s, rd, tmp);
6180 break;
6181 case 0x05:
6182 if (set_cc) {
6183 gen_helper_adc_cc(tmp, tmp, tmp2);
6184 } else {
6185 gen_add_carry(tmp, tmp, tmp2);
6187 store_reg_bx(env, s, rd, tmp);
6188 break;
6189 case 0x06:
6190 if (set_cc) {
6191 gen_helper_sbc_cc(tmp, tmp, tmp2);
6192 } else {
6193 gen_sub_carry(tmp, tmp, tmp2);
6195 store_reg_bx(env, s, rd, tmp);
6196 break;
6197 case 0x07:
6198 if (set_cc) {
6199 gen_helper_sbc_cc(tmp, tmp2, tmp);
6200 } else {
6201 gen_sub_carry(tmp, tmp2, tmp);
6203 store_reg_bx(env, s, rd, tmp);
6204 break;
6205 case 0x08:
6206 if (set_cc) {
6207 tcg_gen_and_i32(tmp, tmp, tmp2);
6208 gen_logic_CC(tmp);
6210 dead_tmp(tmp);
6211 break;
6212 case 0x09:
6213 if (set_cc) {
6214 tcg_gen_xor_i32(tmp, tmp, tmp2);
6215 gen_logic_CC(tmp);
6217 dead_tmp(tmp);
6218 break;
6219 case 0x0a:
6220 if (set_cc) {
6221 gen_helper_sub_cc(tmp, tmp, tmp2);
6223 dead_tmp(tmp);
6224 break;
6225 case 0x0b:
6226 if (set_cc) {
6227 gen_helper_add_cc(tmp, tmp, tmp2);
6229 dead_tmp(tmp);
6230 break;
6231 case 0x0c:
6232 tcg_gen_or_i32(tmp, tmp, tmp2);
6233 if (logic_cc) {
6234 gen_logic_CC(tmp);
6236 store_reg_bx(env, s, rd, tmp);
6237 break;
6238 case 0x0d:
6239 if (logic_cc && rd == 15) {
6240 /* MOVS r15, ... is used for exception return. */
6241 if (IS_USER(s)) {
6242 goto illegal_op;
6244 gen_exception_return(s, tmp2);
6245 } else {
6246 if (logic_cc) {
6247 gen_logic_CC(tmp2);
6249 store_reg_bx(env, s, rd, tmp2);
6251 break;
6252 case 0x0e:
6253 tcg_gen_bic_i32(tmp, tmp, tmp2);
6254 if (logic_cc) {
6255 gen_logic_CC(tmp);
6257 store_reg_bx(env, s, rd, tmp);
6258 break;
6259 default:
6260 case 0x0f:
6261 tcg_gen_not_i32(tmp2, tmp2);
6262 if (logic_cc) {
6263 gen_logic_CC(tmp2);
6265 store_reg_bx(env, s, rd, tmp2);
6266 break;
6268 if (op1 != 0x0f && op1 != 0x0d) {
6269 dead_tmp(tmp2);
6271 } else {
6272 /* other instructions */
6273 op1 = (insn >> 24) & 0xf;
6274 switch(op1) {
6275 case 0x0:
6276 case 0x1:
6277 /* multiplies, extra load/stores */
6278 sh = (insn >> 5) & 3;
6279 if (sh == 0) {
6280 if (op1 == 0x0) {
6281 rd = (insn >> 16) & 0xf;
6282 rn = (insn >> 12) & 0xf;
6283 rs = (insn >> 8) & 0xf;
6284 rm = (insn) & 0xf;
6285 op1 = (insn >> 20) & 0xf;
6286 switch (op1) {
6287 case 0: case 1: case 2: case 3: case 6:
6288 /* 32 bit mul */
6289 tmp = load_reg(s, rs);
6290 tmp2 = load_reg(s, rm);
6291 tcg_gen_mul_i32(tmp, tmp, tmp2);
6292 dead_tmp(tmp2);
6293 if (insn & (1 << 22)) {
6294 /* Subtract (mls) */
6295 ARCH(6T2);
6296 tmp2 = load_reg(s, rn);
6297 tcg_gen_sub_i32(tmp, tmp2, tmp);
6298 dead_tmp(tmp2);
6299 } else if (insn & (1 << 21)) {
6300 /* Add */
6301 tmp2 = load_reg(s, rn);
6302 tcg_gen_add_i32(tmp, tmp, tmp2);
6303 dead_tmp(tmp2);
6305 if (insn & (1 << 20))
6306 gen_logic_CC(tmp);
6307 store_reg(s, rd, tmp);
6308 break;
6309 default:
6310 /* 64 bit mul */
6311 tmp = load_reg(s, rs);
6312 tmp2 = load_reg(s, rm);
6313 if (insn & (1 << 22))
6314 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6315 else
6316 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6317 if (insn & (1 << 21)) /* mult accumulate */
6318 gen_addq(s, tmp64, rn, rd);
6319 if (!(insn & (1 << 23))) { /* double accumulate */
6320 ARCH(6);
6321 gen_addq_lo(s, tmp64, rn);
6322 gen_addq_lo(s, tmp64, rd);
6324 if (insn & (1 << 20))
6325 gen_logicq_cc(tmp64);
6326 gen_storeq_reg(s, rn, rd, tmp64);
6327 break;
6329 } else {
6330 rn = (insn >> 16) & 0xf;
6331 rd = (insn >> 12) & 0xf;
6332 if (insn & (1 << 23)) {
6333 /* load/store exclusive */
6334 op1 = (insn >> 21) & 0x3;
6335 if (op1)
6336 ARCH(6K);
6337 else
6338 ARCH(6);
6339 gen_movl_T1_reg(s, rn);
6340 addr = cpu_T[1];
6341 if (insn & (1 << 20)) {
6342 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6343 switch (op1) {
6344 case 0: /* ldrex */
6345 tmp = gen_ld32(addr, IS_USER(s));
6346 break;
6347 case 1: /* ldrexd */
6348 tmp = gen_ld32(addr, IS_USER(s));
6349 store_reg(s, rd, tmp);
6350 tcg_gen_addi_i32(addr, addr, 4);
6351 tmp = gen_ld32(addr, IS_USER(s));
6352 rd++;
6353 break;
6354 case 2: /* ldrexb */
6355 tmp = gen_ld8u(addr, IS_USER(s));
6356 break;
6357 case 3: /* ldrexh */
6358 tmp = gen_ld16u(addr, IS_USER(s));
6359 break;
6360 default:
6361 abort();
6363 store_reg(s, rd, tmp);
6364 } else {
6365 int label = gen_new_label();
6366 rm = insn & 0xf;
6367 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
6368 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6369 0, label);
6370 tmp = load_reg(s,rm);
6371 switch (op1) {
6372 case 0: /* strex */
6373 gen_st32(tmp, addr, IS_USER(s));
6374 break;
6375 case 1: /* strexd */
6376 gen_st32(tmp, addr, IS_USER(s));
6377 tcg_gen_addi_i32(addr, addr, 4);
6378 tmp = load_reg(s, rm + 1);
6379 gen_st32(tmp, addr, IS_USER(s));
6380 break;
6381 case 2: /* strexb */
6382 gen_st8(tmp, addr, IS_USER(s));
6383 break;
6384 case 3: /* strexh */
6385 gen_st16(tmp, addr, IS_USER(s));
6386 break;
6387 default:
6388 abort();
6390 gen_set_label(label);
6391 gen_movl_reg_T0(s, rd);
6393 } else {
6394 /* SWP instruction */
6395 rm = (insn) & 0xf;
6397 /* ??? This is not really atomic. However we know
6398 we never have multiple CPUs running in parallel,
6399 so it is good enough. */
6400 addr = load_reg(s, rn);
6401 tmp = load_reg(s, rm);
6402 if (insn & (1 << 22)) {
6403 tmp2 = gen_ld8u(addr, IS_USER(s));
6404 gen_st8(tmp, addr, IS_USER(s));
6405 } else {
6406 tmp2 = gen_ld32(addr, IS_USER(s));
6407 gen_st32(tmp, addr, IS_USER(s));
6409 dead_tmp(addr);
6410 store_reg(s, rd, tmp2);
6413 } else {
6414 int address_offset;
6415 int load;
6416 /* Misc load/store */
6417 rn = (insn >> 16) & 0xf;
6418 rd = (insn >> 12) & 0xf;
6419 addr = load_reg(s, rn);
6420 if (insn & (1 << 24))
6421 gen_add_datah_offset(s, insn, 0, addr);
6422 address_offset = 0;
6423 if (insn & (1 << 20)) {
6424 /* load */
6425 switch(sh) {
6426 case 1:
6427 tmp = gen_ld16u(addr, IS_USER(s));
6428 break;
6429 case 2:
6430 tmp = gen_ld8s(addr, IS_USER(s));
6431 break;
6432 default:
6433 case 3:
6434 tmp = gen_ld16s(addr, IS_USER(s));
6435 break;
6437 load = 1;
6438 } else if (sh & 2) {
6439 /* doubleword */
6440 if (sh & 1) {
6441 /* store */
6442 tmp = load_reg(s, rd);
6443 gen_st32(tmp, addr, IS_USER(s));
6444 tcg_gen_addi_i32(addr, addr, 4);
6445 tmp = load_reg(s, rd + 1);
6446 gen_st32(tmp, addr, IS_USER(s));
6447 load = 0;
6448 } else {
6449 /* load */
6450 tmp = gen_ld32(addr, IS_USER(s));
6451 store_reg(s, rd, tmp);
6452 tcg_gen_addi_i32(addr, addr, 4);
6453 tmp = gen_ld32(addr, IS_USER(s));
6454 rd++;
6455 load = 1;
6457 address_offset = -4;
6458 } else {
6459 /* store */
6460 tmp = load_reg(s, rd);
6461 gen_st16(tmp, addr, IS_USER(s));
6462 load = 0;
6464 /* Perform base writeback before the loaded value to
6465 ensure correct behavior with overlapping index registers.
6466 ldrd with base writeback is is undefined if the
6467 destination and index registers overlap. */
6468 if (!(insn & (1 << 24))) {
6469 gen_add_datah_offset(s, insn, address_offset, addr);
6470 store_reg(s, rn, addr);
6471 } else if (insn & (1 << 21)) {
6472 if (address_offset)
6473 tcg_gen_addi_i32(addr, addr, address_offset);
6474 store_reg(s, rn, addr);
6475 } else {
6476 dead_tmp(addr);
6478 if (load) {
6479 /* Complete the load. */
6480 store_reg(s, rd, tmp);
6483 break;
6484 case 0x4:
6485 case 0x5:
6486 goto do_ldst;
6487 case 0x6:
6488 case 0x7:
6489 if (insn & (1 << 4)) {
6490 ARCH(6);
6491 /* Armv6 Media instructions. */
6492 rm = insn & 0xf;
6493 rn = (insn >> 16) & 0xf;
6494 rd = (insn >> 12) & 0xf;
6495 rs = (insn >> 8) & 0xf;
6496 switch ((insn >> 23) & 3) {
6497 case 0: /* Parallel add/subtract. */
6498 op1 = (insn >> 20) & 7;
6499 tmp = load_reg(s, rn);
6500 tmp2 = load_reg(s, rm);
6501 sh = (insn >> 5) & 7;
6502 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6503 goto illegal_op;
6504 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6505 dead_tmp(tmp2);
6506 store_reg(s, rd, tmp);
6507 break;
6508 case 1:
6509 if ((insn & 0x00700020) == 0) {
6510 /* Halfword pack. */
6511 tmp = load_reg(s, rn);
6512 tmp2 = load_reg(s, rm);
6513 shift = (insn >> 7) & 0x1f;
6514 if (insn & (1 << 6)) {
6515 /* pkhtb */
6516 if (shift == 0)
6517 shift = 31;
6518 tcg_gen_sari_i32(tmp2, tmp2, shift);
6519 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6520 tcg_gen_ext16u_i32(tmp2, tmp2);
6521 } else {
6522 /* pkhbt */
6523 if (shift)
6524 tcg_gen_shli_i32(tmp2, tmp2, shift);
6525 tcg_gen_ext16u_i32(tmp, tmp);
6526 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6528 tcg_gen_or_i32(tmp, tmp, tmp2);
6529 dead_tmp(tmp2);
6530 store_reg(s, rd, tmp);
6531 } else if ((insn & 0x00200020) == 0x00200000) {
6532 /* [us]sat */
6533 tmp = load_reg(s, rm);
6534 shift = (insn >> 7) & 0x1f;
6535 if (insn & (1 << 6)) {
6536 if (shift == 0)
6537 shift = 31;
6538 tcg_gen_sari_i32(tmp, tmp, shift);
6539 } else {
6540 tcg_gen_shli_i32(tmp, tmp, shift);
6542 sh = (insn >> 16) & 0x1f;
6543 if (sh != 0) {
6544 if (insn & (1 << 22))
6545 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
6546 else
6547 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
6549 store_reg(s, rd, tmp);
6550 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6551 /* [us]sat16 */
6552 tmp = load_reg(s, rm);
6553 sh = (insn >> 16) & 0x1f;
6554 if (sh != 0) {
6555 if (insn & (1 << 22))
6556 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
6557 else
6558 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
6560 store_reg(s, rd, tmp);
6561 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6562 /* Select bytes. */
6563 tmp = load_reg(s, rn);
6564 tmp2 = load_reg(s, rm);
6565 tmp3 = new_tmp();
6566 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6567 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6568 dead_tmp(tmp3);
6569 dead_tmp(tmp2);
6570 store_reg(s, rd, tmp);
6571 } else if ((insn & 0x000003e0) == 0x00000060) {
6572 tmp = load_reg(s, rm);
6573 shift = (insn >> 10) & 3;
6574 /* ??? In many cases it's not neccessary to do a
6575 rotate, a shift is sufficient. */
6576 if (shift != 0)
6577 tcg_gen_rori_i32(tmp, tmp, shift * 8);
6578 op1 = (insn >> 20) & 7;
6579 switch (op1) {
6580 case 0: gen_sxtb16(tmp); break;
6581 case 2: gen_sxtb(tmp); break;
6582 case 3: gen_sxth(tmp); break;
6583 case 4: gen_uxtb16(tmp); break;
6584 case 6: gen_uxtb(tmp); break;
6585 case 7: gen_uxth(tmp); break;
6586 default: goto illegal_op;
6588 if (rn != 15) {
6589 tmp2 = load_reg(s, rn);
6590 if ((op1 & 3) == 0) {
6591 gen_add16(tmp, tmp2);
6592 } else {
6593 tcg_gen_add_i32(tmp, tmp, tmp2);
6594 dead_tmp(tmp2);
6597 store_reg(s, rd, tmp);
6598 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6599 /* rev */
6600 tmp = load_reg(s, rm);
6601 if (insn & (1 << 22)) {
6602 if (insn & (1 << 7)) {
6603 gen_revsh(tmp);
6604 } else {
6605 ARCH(6T2);
6606 gen_helper_rbit(tmp, tmp);
6608 } else {
6609 if (insn & (1 << 7))
6610 gen_rev16(tmp);
6611 else
6612 tcg_gen_bswap32_i32(tmp, tmp);
6614 store_reg(s, rd, tmp);
6615 } else {
6616 goto illegal_op;
6618 break;
6619 case 2: /* Multiplies (Type 3). */
6620 tmp = load_reg(s, rm);
6621 tmp2 = load_reg(s, rs);
6622 if (insn & (1 << 20)) {
6623 /* Signed multiply most significant [accumulate]. */
6624 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6625 if (insn & (1 << 5))
6626 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6627 tcg_gen_shri_i64(tmp64, tmp64, 32);
6628 tmp = new_tmp();
6629 tcg_gen_trunc_i64_i32(tmp, tmp64);
6630 if (rd != 15) {
6631 tmp2 = load_reg(s, rd);
6632 if (insn & (1 << 6)) {
6633 tcg_gen_sub_i32(tmp, tmp, tmp2);
6634 } else {
6635 tcg_gen_add_i32(tmp, tmp, tmp2);
6637 dead_tmp(tmp2);
6639 store_reg(s, rn, tmp);
6640 } else {
6641 if (insn & (1 << 5))
6642 gen_swap_half(tmp2);
6643 gen_smul_dual(tmp, tmp2);
6644 /* This addition cannot overflow. */
6645 if (insn & (1 << 6)) {
6646 tcg_gen_sub_i32(tmp, tmp, tmp2);
6647 } else {
6648 tcg_gen_add_i32(tmp, tmp, tmp2);
6650 dead_tmp(tmp2);
6651 if (insn & (1 << 22)) {
6652 /* smlald, smlsld */
6653 tmp64 = tcg_temp_new_i64();
6654 tcg_gen_ext_i32_i64(tmp64, tmp);
6655 dead_tmp(tmp);
6656 gen_addq(s, tmp64, rd, rn);
6657 gen_storeq_reg(s, rd, rn, tmp64);
6658 } else {
6659 /* smuad, smusd, smlad, smlsd */
6660 if (rd != 15)
6662 tmp2 = load_reg(s, rd);
6663 gen_helper_add_setq(tmp, tmp, tmp2);
6664 dead_tmp(tmp2);
6666 store_reg(s, rn, tmp);
6669 break;
6670 case 3:
6671 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6672 switch (op1) {
6673 case 0: /* Unsigned sum of absolute differences. */
6674 ARCH(6);
6675 tmp = load_reg(s, rm);
6676 tmp2 = load_reg(s, rs);
6677 gen_helper_usad8(tmp, tmp, tmp2);
6678 dead_tmp(tmp2);
6679 if (rd != 15) {
6680 tmp2 = load_reg(s, rd);
6681 tcg_gen_add_i32(tmp, tmp, tmp2);
6682 dead_tmp(tmp2);
6684 store_reg(s, rn, tmp);
6685 break;
6686 case 0x20: case 0x24: case 0x28: case 0x2c:
6687 /* Bitfield insert/clear. */
6688 ARCH(6T2);
6689 shift = (insn >> 7) & 0x1f;
6690 i = (insn >> 16) & 0x1f;
6691 i = i + 1 - shift;
6692 if (rm == 15) {
6693 tmp = new_tmp();
6694 tcg_gen_movi_i32(tmp, 0);
6695 } else {
6696 tmp = load_reg(s, rm);
6698 if (i != 32) {
6699 tmp2 = load_reg(s, rd);
6700 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
6701 dead_tmp(tmp2);
6703 store_reg(s, rd, tmp);
6704 break;
6705 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6706 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6707 ARCH(6T2);
6708 tmp = load_reg(s, rm);
6709 shift = (insn >> 7) & 0x1f;
6710 i = ((insn >> 16) & 0x1f) + 1;
6711 if (shift + i > 32)
6712 goto illegal_op;
6713 if (i < 32) {
6714 if (op1 & 0x20) {
6715 gen_ubfx(tmp, shift, (1u << i) - 1);
6716 } else {
6717 gen_sbfx(tmp, shift, i);
6720 store_reg(s, rd, tmp);
6721 break;
6722 default:
6723 goto illegal_op;
6725 break;
6727 break;
6729 do_ldst:
6730 /* Check for undefined extension instructions
6731 * per the ARM Bible IE:
6732 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6734 sh = (0xf << 20) | (0xf << 4);
6735 if (op1 == 0x7 && ((insn & sh) == sh))
6737 goto illegal_op;
6739 /* load/store byte/word */
6740 rn = (insn >> 16) & 0xf;
6741 rd = (insn >> 12) & 0xf;
6742 tmp2 = load_reg(s, rn);
6743 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6744 if (insn & (1 << 24))
6745 gen_add_data_offset(s, insn, tmp2);
6746 if (insn & (1 << 20)) {
6747 /* load */
6748 if (insn & (1 << 22)) {
6749 tmp = gen_ld8u(tmp2, i);
6750 } else {
6751 tmp = gen_ld32(tmp2, i);
6753 } else {
6754 /* store */
6755 tmp = load_reg(s, rd);
6756 if (insn & (1 << 22))
6757 gen_st8(tmp, tmp2, i);
6758 else
6759 gen_st32(tmp, tmp2, i);
6761 if (!(insn & (1 << 24))) {
6762 gen_add_data_offset(s, insn, tmp2);
6763 store_reg(s, rn, tmp2);
6764 } else if (insn & (1 << 21)) {
6765 store_reg(s, rn, tmp2);
6766 } else {
6767 dead_tmp(tmp2);
6769 if (insn & (1 << 20)) {
6770 /* Complete the load. */
6771 if (rd == 15)
6772 gen_bx(s, tmp);
6773 else
6774 store_reg(s, rd, tmp);
6776 break;
6777 case 0x08:
6778 case 0x09:
6780 int j, n, user, loaded_base;
6781 TCGv loaded_var;
6782 /* load/store multiple words */
6783 /* XXX: store correct base if write back */
6784 user = 0;
6785 if (insn & (1 << 22)) {
6786 if (IS_USER(s))
6787 goto illegal_op; /* only usable in supervisor mode */
6789 if ((insn & (1 << 15)) == 0)
6790 user = 1;
6792 rn = (insn >> 16) & 0xf;
6793 addr = load_reg(s, rn);
6795 /* compute total size */
6796 loaded_base = 0;
6797 TCGV_UNUSED(loaded_var);
6798 n = 0;
6799 for(i=0;i<16;i++) {
6800 if (insn & (1 << i))
6801 n++;
6803 /* XXX: test invalid n == 0 case ? */
6804 if (insn & (1 << 23)) {
6805 if (insn & (1 << 24)) {
6806 /* pre increment */
6807 tcg_gen_addi_i32(addr, addr, 4);
6808 } else {
6809 /* post increment */
6811 } else {
6812 if (insn & (1 << 24)) {
6813 /* pre decrement */
6814 tcg_gen_addi_i32(addr, addr, -(n * 4));
6815 } else {
6816 /* post decrement */
6817 if (n != 1)
6818 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6821 j = 0;
6822 for(i=0;i<16;i++) {
6823 if (insn & (1 << i)) {
6824 if (insn & (1 << 20)) {
6825 /* load */
6826 tmp = gen_ld32(addr, IS_USER(s));
6827 if (i == 15) {
6828 gen_bx(s, tmp);
6829 } else if (user) {
6830 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6831 dead_tmp(tmp);
6832 } else if (i == rn) {
6833 loaded_var = tmp;
6834 loaded_base = 1;
6835 } else {
6836 store_reg(s, i, tmp);
6838 } else {
6839 /* store */
6840 if (i == 15) {
6841 /* special case: r15 = PC + 8 */
6842 val = (long)s->pc + 4;
6843 tmp = new_tmp();
6844 tcg_gen_movi_i32(tmp, val);
6845 } else if (user) {
6846 tmp = new_tmp();
6847 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
6848 } else {
6849 tmp = load_reg(s, i);
6851 gen_st32(tmp, addr, IS_USER(s));
6853 j++;
6854 /* no need to add after the last transfer */
6855 if (j != n)
6856 tcg_gen_addi_i32(addr, addr, 4);
6859 if (insn & (1 << 21)) {
6860 /* write back */
6861 if (insn & (1 << 23)) {
6862 if (insn & (1 << 24)) {
6863 /* pre increment */
6864 } else {
6865 /* post increment */
6866 tcg_gen_addi_i32(addr, addr, 4);
6868 } else {
6869 if (insn & (1 << 24)) {
6870 /* pre decrement */
6871 if (n != 1)
6872 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6873 } else {
6874 /* post decrement */
6875 tcg_gen_addi_i32(addr, addr, -(n * 4));
6878 store_reg(s, rn, addr);
6879 } else {
6880 dead_tmp(addr);
6882 if (loaded_base) {
6883 store_reg(s, rn, loaded_var);
6885 if ((insn & (1 << 22)) && !user) {
6886 /* Restore CPSR from SPSR. */
6887 tmp = load_cpu_field(spsr);
6888 gen_set_cpsr(tmp, 0xffffffff);
6889 dead_tmp(tmp);
6890 s->is_jmp = DISAS_UPDATE;
6893 break;
6894 case 0xa:
6895 case 0xb:
6897 int32_t offset;
6899 /* branch (and link) */
6900 val = (int32_t)s->pc;
6901 if (insn & (1 << 24)) {
6902 tmp = new_tmp();
6903 tcg_gen_movi_i32(tmp, val);
6904 store_reg(s, 14, tmp);
6906 offset = (((int32_t)insn << 8) >> 8);
6907 val += (offset << 2) + 4;
6908 gen_jmp(s, val);
6910 break;
6911 case 0xc:
6912 case 0xd:
6913 case 0xe:
6914 /* Coprocessor. */
6915 if (disas_coproc_insn(env, s, insn))
6916 goto illegal_op;
6917 break;
6918 case 0xf:
6919 /* swi */
6920 gen_set_pc_im(s->pc);
6921 s->is_jmp = DISAS_SWI;
6922 break;
6923 default:
6924 illegal_op:
6925 gen_set_condexec(s);
6926 gen_set_pc_im(s->pc - 4);
6927 gen_exception(EXCP_UDEF);
6928 s->is_jmp = DISAS_JUMP;
6929 break;
6934 /* Return true if this is a Thumb-2 logical op. */
6935 static int
6936 thumb2_logic_op(int op)
6938 return (op < 8);
6941 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6942 then set condition code flags based on the result of the operation.
6943 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6944 to the high bit of T1.
6945 Returns zero if the opcode is valid. */
6947 static int
6948 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6950 int logic_cc;
6952 logic_cc = 0;
6953 switch (op) {
6954 case 0: /* and */
6955 gen_op_andl_T0_T1();
6956 logic_cc = conds;
6957 break;
6958 case 1: /* bic */
6959 gen_op_bicl_T0_T1();
6960 logic_cc = conds;
6961 break;
6962 case 2: /* orr */
6963 gen_op_orl_T0_T1();
6964 logic_cc = conds;
6965 break;
6966 case 3: /* orn */
6967 gen_op_notl_T1();
6968 gen_op_orl_T0_T1();
6969 logic_cc = conds;
6970 break;
6971 case 4: /* eor */
6972 gen_op_xorl_T0_T1();
6973 logic_cc = conds;
6974 break;
6975 case 8: /* add */
6976 if (conds)
6977 gen_op_addl_T0_T1_cc();
6978 else
6979 gen_op_addl_T0_T1();
6980 break;
6981 case 10: /* adc */
6982 if (conds)
6983 gen_op_adcl_T0_T1_cc();
6984 else
6985 gen_adc_T0_T1();
6986 break;
6987 case 11: /* sbc */
6988 if (conds)
6989 gen_op_sbcl_T0_T1_cc();
6990 else
6991 gen_sbc_T0_T1();
6992 break;
6993 case 13: /* sub */
6994 if (conds)
6995 gen_op_subl_T0_T1_cc();
6996 else
6997 gen_op_subl_T0_T1();
6998 break;
6999 case 14: /* rsb */
7000 if (conds)
7001 gen_op_rsbl_T0_T1_cc();
7002 else
7003 gen_op_rsbl_T0_T1();
7004 break;
7005 default: /* 5, 6, 7, 9, 12, 15. */
7006 return 1;
7008 if (logic_cc) {
7009 gen_op_logic_T0_cc();
7010 if (shifter_out)
7011 gen_set_CF_bit31(cpu_T[1]);
7013 return 0;
7016 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7017 is not legal. */
7018 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7020 uint32_t insn, imm, shift, offset;
7021 uint32_t rd, rn, rm, rs;
7022 TCGv tmp;
7023 TCGv tmp2;
7024 TCGv tmp3;
7025 TCGv addr;
7026 TCGv_i64 tmp64;
7027 int op;
7028 int shiftop;
7029 int conds;
7030 int logic_cc;
7032 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7033 || arm_feature (env, ARM_FEATURE_M))) {
7034 /* Thumb-1 cores may need to treat bl and blx as a pair of
7035 16-bit instructions to get correct prefetch abort behavior. */
7036 insn = insn_hw1;
7037 if ((insn & (1 << 12)) == 0) {
7038 /* Second half of blx. */
7039 offset = ((insn & 0x7ff) << 1);
7040 tmp = load_reg(s, 14);
7041 tcg_gen_addi_i32(tmp, tmp, offset);
7042 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7044 tmp2 = new_tmp();
7045 tcg_gen_movi_i32(tmp2, s->pc | 1);
7046 store_reg(s, 14, tmp2);
7047 gen_bx(s, tmp);
7048 return 0;
7050 if (insn & (1 << 11)) {
7051 /* Second half of bl. */
7052 offset = ((insn & 0x7ff) << 1) | 1;
7053 tmp = load_reg(s, 14);
7054 tcg_gen_addi_i32(tmp, tmp, offset);
7056 tmp2 = new_tmp();
7057 tcg_gen_movi_i32(tmp2, s->pc | 1);
7058 store_reg(s, 14, tmp2);
7059 gen_bx(s, tmp);
7060 return 0;
7062 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7063 /* Instruction spans a page boundary. Implement it as two
7064 16-bit instructions in case the second half causes an
7065 prefetch abort. */
7066 offset = ((int32_t)insn << 21) >> 9;
7067 gen_op_movl_T0_im(s->pc + 2 + offset);
7068 gen_movl_reg_T0(s, 14);
7069 return 0;
7071 /* Fall through to 32-bit decode. */
7074 insn = lduw_code(s->pc);
7075 s->pc += 2;
7076 insn |= (uint32_t)insn_hw1 << 16;
7078 if ((insn & 0xf800e800) != 0xf000e800) {
7079 ARCH(6T2);
7082 rn = (insn >> 16) & 0xf;
7083 rs = (insn >> 12) & 0xf;
7084 rd = (insn >> 8) & 0xf;
7085 rm = insn & 0xf;
7086 switch ((insn >> 25) & 0xf) {
7087 case 0: case 1: case 2: case 3:
7088 /* 16-bit instructions. Should never happen. */
7089 abort();
7090 case 4:
7091 if (insn & (1 << 22)) {
7092 /* Other load/store, table branch. */
7093 if (insn & 0x01200000) {
7094 /* Load/store doubleword. */
7095 if (rn == 15) {
7096 addr = new_tmp();
7097 tcg_gen_movi_i32(addr, s->pc & ~3);
7098 } else {
7099 addr = load_reg(s, rn);
7101 offset = (insn & 0xff) * 4;
7102 if ((insn & (1 << 23)) == 0)
7103 offset = -offset;
7104 if (insn & (1 << 24)) {
7105 tcg_gen_addi_i32(addr, addr, offset);
7106 offset = 0;
7108 if (insn & (1 << 20)) {
7109 /* ldrd */
7110 tmp = gen_ld32(addr, IS_USER(s));
7111 store_reg(s, rs, tmp);
7112 tcg_gen_addi_i32(addr, addr, 4);
7113 tmp = gen_ld32(addr, IS_USER(s));
7114 store_reg(s, rd, tmp);
7115 } else {
7116 /* strd */
7117 tmp = load_reg(s, rs);
7118 gen_st32(tmp, addr, IS_USER(s));
7119 tcg_gen_addi_i32(addr, addr, 4);
7120 tmp = load_reg(s, rd);
7121 gen_st32(tmp, addr, IS_USER(s));
7123 if (insn & (1 << 21)) {
7124 /* Base writeback. */
7125 if (rn == 15)
7126 goto illegal_op;
7127 tcg_gen_addi_i32(addr, addr, offset - 4);
7128 store_reg(s, rn, addr);
7129 } else {
7130 dead_tmp(addr);
7132 } else if ((insn & (1 << 23)) == 0) {
7133 /* Load/store exclusive word. */
7134 gen_movl_T1_reg(s, rn);
7135 addr = cpu_T[1];
7136 if (insn & (1 << 20)) {
7137 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
7138 tmp = gen_ld32(addr, IS_USER(s));
7139 store_reg(s, rd, tmp);
7140 } else {
7141 int label = gen_new_label();
7142 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7143 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
7144 0, label);
7145 tmp = load_reg(s, rs);
7146 gen_st32(tmp, cpu_T[1], IS_USER(s));
7147 gen_set_label(label);
7148 gen_movl_reg_T0(s, rd);
7150 } else if ((insn & (1 << 6)) == 0) {
7151 /* Table Branch. */
7152 if (rn == 15) {
7153 addr = new_tmp();
7154 tcg_gen_movi_i32(addr, s->pc);
7155 } else {
7156 addr = load_reg(s, rn);
7158 tmp = load_reg(s, rm);
7159 tcg_gen_add_i32(addr, addr, tmp);
7160 if (insn & (1 << 4)) {
7161 /* tbh */
7162 tcg_gen_add_i32(addr, addr, tmp);
7163 dead_tmp(tmp);
7164 tmp = gen_ld16u(addr, IS_USER(s));
7165 } else { /* tbb */
7166 dead_tmp(tmp);
7167 tmp = gen_ld8u(addr, IS_USER(s));
7169 dead_tmp(addr);
7170 tcg_gen_shli_i32(tmp, tmp, 1);
7171 tcg_gen_addi_i32(tmp, tmp, s->pc);
7172 store_reg(s, 15, tmp);
7173 } else {
7174 /* Load/store exclusive byte/halfword/doubleword. */
7175 /* ??? These are not really atomic. However we know
7176 we never have multiple CPUs running in parallel,
7177 so it is good enough. */
7178 op = (insn >> 4) & 0x3;
7179 /* Must use a global reg for the address because we have
7180 a conditional branch in the store instruction. */
7181 gen_movl_T1_reg(s, rn);
7182 addr = cpu_T[1];
7183 if (insn & (1 << 20)) {
7184 gen_helper_mark_exclusive(cpu_env, addr);
7185 switch (op) {
7186 case 0:
7187 tmp = gen_ld8u(addr, IS_USER(s));
7188 break;
7189 case 1:
7190 tmp = gen_ld16u(addr, IS_USER(s));
7191 break;
7192 case 3:
7193 tmp = gen_ld32(addr, IS_USER(s));
7194 tcg_gen_addi_i32(addr, addr, 4);
7195 tmp2 = gen_ld32(addr, IS_USER(s));
7196 store_reg(s, rd, tmp2);
7197 break;
7198 default:
7199 goto illegal_op;
7201 store_reg(s, rs, tmp);
7202 } else {
7203 int label = gen_new_label();
7204 /* Must use a global that is not killed by the branch. */
7205 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7206 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
7207 tmp = load_reg(s, rs);
7208 switch (op) {
7209 case 0:
7210 gen_st8(tmp, addr, IS_USER(s));
7211 break;
7212 case 1:
7213 gen_st16(tmp, addr, IS_USER(s));
7214 break;
7215 case 3:
7216 gen_st32(tmp, addr, IS_USER(s));
7217 tcg_gen_addi_i32(addr, addr, 4);
7218 tmp = load_reg(s, rd);
7219 gen_st32(tmp, addr, IS_USER(s));
7220 break;
7221 default:
7222 goto illegal_op;
7224 gen_set_label(label);
7225 gen_movl_reg_T0(s, rm);
7228 } else {
7229 /* Load/store multiple, RFE, SRS. */
7230 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7231 /* Not available in user mode. */
7232 if (IS_USER(s))
7233 goto illegal_op;
7234 if (insn & (1 << 20)) {
7235 /* rfe */
7236 addr = load_reg(s, rn);
7237 if ((insn & (1 << 24)) == 0)
7238 tcg_gen_addi_i32(addr, addr, -8);
7239 /* Load PC into tmp and CPSR into tmp2. */
7240 tmp = gen_ld32(addr, 0);
7241 tcg_gen_addi_i32(addr, addr, 4);
7242 tmp2 = gen_ld32(addr, 0);
7243 if (insn & (1 << 21)) {
7244 /* Base writeback. */
7245 if (insn & (1 << 24)) {
7246 tcg_gen_addi_i32(addr, addr, 4);
7247 } else {
7248 tcg_gen_addi_i32(addr, addr, -4);
7250 store_reg(s, rn, addr);
7251 } else {
7252 dead_tmp(addr);
7254 gen_rfe(s, tmp, tmp2);
7255 } else {
7256 /* srs */
7257 op = (insn & 0x1f);
7258 if (op == (env->uncached_cpsr & CPSR_M)) {
7259 addr = load_reg(s, 13);
7260 } else {
7261 addr = new_tmp();
7262 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
7264 if ((insn & (1 << 24)) == 0) {
7265 tcg_gen_addi_i32(addr, addr, -8);
7267 tmp = load_reg(s, 14);
7268 gen_st32(tmp, addr, 0);
7269 tcg_gen_addi_i32(addr, addr, 4);
7270 tmp = new_tmp();
7271 gen_helper_cpsr_read(tmp);
7272 gen_st32(tmp, addr, 0);
7273 if (insn & (1 << 21)) {
7274 if ((insn & (1 << 24)) == 0) {
7275 tcg_gen_addi_i32(addr, addr, -4);
7276 } else {
7277 tcg_gen_addi_i32(addr, addr, 4);
7279 if (op == (env->uncached_cpsr & CPSR_M)) {
7280 store_reg(s, 13, addr);
7281 } else {
7282 gen_helper_set_r13_banked(cpu_env,
7283 tcg_const_i32(op), addr);
7285 } else {
7286 dead_tmp(addr);
7289 } else {
7290 int i;
7291 /* Load/store multiple. */
7292 addr = load_reg(s, rn);
7293 offset = 0;
7294 for (i = 0; i < 16; i++) {
7295 if (insn & (1 << i))
7296 offset += 4;
7298 if (insn & (1 << 24)) {
7299 tcg_gen_addi_i32(addr, addr, -offset);
7302 for (i = 0; i < 16; i++) {
7303 if ((insn & (1 << i)) == 0)
7304 continue;
7305 if (insn & (1 << 20)) {
7306 /* Load. */
7307 tmp = gen_ld32(addr, IS_USER(s));
7308 if (i == 15) {
7309 gen_bx(s, tmp);
7310 } else {
7311 store_reg(s, i, tmp);
7313 } else {
7314 /* Store. */
7315 tmp = load_reg(s, i);
7316 gen_st32(tmp, addr, IS_USER(s));
7318 tcg_gen_addi_i32(addr, addr, 4);
7320 if (insn & (1 << 21)) {
7321 /* Base register writeback. */
7322 if (insn & (1 << 24)) {
7323 tcg_gen_addi_i32(addr, addr, -offset);
7325 /* Fault if writeback register is in register list. */
7326 if (insn & (1 << rn))
7327 goto illegal_op;
7328 store_reg(s, rn, addr);
7329 } else {
7330 dead_tmp(addr);
7334 break;
7335 case 5: /* Data processing register constant shift. */
7336 if (rn == 15)
7337 gen_op_movl_T0_im(0);
7338 else
7339 gen_movl_T0_reg(s, rn);
7340 gen_movl_T1_reg(s, rm);
7341 op = (insn >> 21) & 0xf;
7342 shiftop = (insn >> 4) & 3;
7343 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7344 conds = (insn & (1 << 20)) != 0;
7345 logic_cc = (conds && thumb2_logic_op(op));
7346 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
7347 if (gen_thumb2_data_op(s, op, conds, 0))
7348 goto illegal_op;
7349 if (rd != 15)
7350 gen_movl_reg_T0(s, rd);
7351 break;
7352 case 13: /* Misc data processing. */
7353 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7354 if (op < 4 && (insn & 0xf000) != 0xf000)
7355 goto illegal_op;
7356 switch (op) {
7357 case 0: /* Register controlled shift. */
7358 tmp = load_reg(s, rn);
7359 tmp2 = load_reg(s, rm);
7360 if ((insn & 0x70) != 0)
7361 goto illegal_op;
7362 op = (insn >> 21) & 3;
7363 logic_cc = (insn & (1 << 20)) != 0;
7364 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7365 if (logic_cc)
7366 gen_logic_CC(tmp);
7367 store_reg_bx(env, s, rd, tmp);
7368 break;
7369 case 1: /* Sign/zero extend. */
7370 tmp = load_reg(s, rm);
7371 shift = (insn >> 4) & 3;
7372 /* ??? In many cases it's not neccessary to do a
7373 rotate, a shift is sufficient. */
7374 if (shift != 0)
7375 tcg_gen_rori_i32(tmp, tmp, shift * 8);
7376 op = (insn >> 20) & 7;
7377 switch (op) {
7378 case 0: gen_sxth(tmp); break;
7379 case 1: gen_uxth(tmp); break;
7380 case 2: gen_sxtb16(tmp); break;
7381 case 3: gen_uxtb16(tmp); break;
7382 case 4: gen_sxtb(tmp); break;
7383 case 5: gen_uxtb(tmp); break;
7384 default: goto illegal_op;
7386 if (rn != 15) {
7387 tmp2 = load_reg(s, rn);
7388 if ((op >> 1) == 1) {
7389 gen_add16(tmp, tmp2);
7390 } else {
7391 tcg_gen_add_i32(tmp, tmp, tmp2);
7392 dead_tmp(tmp2);
7395 store_reg(s, rd, tmp);
7396 break;
7397 case 2: /* SIMD add/subtract. */
7398 op = (insn >> 20) & 7;
7399 shift = (insn >> 4) & 7;
7400 if ((op & 3) == 3 || (shift & 3) == 3)
7401 goto illegal_op;
7402 tmp = load_reg(s, rn);
7403 tmp2 = load_reg(s, rm);
7404 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7405 dead_tmp(tmp2);
7406 store_reg(s, rd, tmp);
7407 break;
7408 case 3: /* Other data processing. */
7409 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7410 if (op < 4) {
7411 /* Saturating add/subtract. */
7412 tmp = load_reg(s, rn);
7413 tmp2 = load_reg(s, rm);
7414 if (op & 2)
7415 gen_helper_double_saturate(tmp, tmp);
7416 if (op & 1)
7417 gen_helper_sub_saturate(tmp, tmp2, tmp);
7418 else
7419 gen_helper_add_saturate(tmp, tmp, tmp2);
7420 dead_tmp(tmp2);
7421 } else {
7422 tmp = load_reg(s, rn);
7423 switch (op) {
7424 case 0x0a: /* rbit */
7425 gen_helper_rbit(tmp, tmp);
7426 break;
7427 case 0x08: /* rev */
7428 tcg_gen_bswap32_i32(tmp, tmp);
7429 break;
7430 case 0x09: /* rev16 */
7431 gen_rev16(tmp);
7432 break;
7433 case 0x0b: /* revsh */
7434 gen_revsh(tmp);
7435 break;
7436 case 0x10: /* sel */
7437 tmp2 = load_reg(s, rm);
7438 tmp3 = new_tmp();
7439 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7440 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7441 dead_tmp(tmp3);
7442 dead_tmp(tmp2);
7443 break;
7444 case 0x18: /* clz */
7445 gen_helper_clz(tmp, tmp);
7446 break;
7447 default:
7448 goto illegal_op;
7451 store_reg(s, rd, tmp);
7452 break;
7453 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7454 op = (insn >> 4) & 0xf;
7455 tmp = load_reg(s, rn);
7456 tmp2 = load_reg(s, rm);
7457 switch ((insn >> 20) & 7) {
7458 case 0: /* 32 x 32 -> 32 */
7459 tcg_gen_mul_i32(tmp, tmp, tmp2);
7460 dead_tmp(tmp2);
7461 if (rs != 15) {
7462 tmp2 = load_reg(s, rs);
7463 if (op)
7464 tcg_gen_sub_i32(tmp, tmp2, tmp);
7465 else
7466 tcg_gen_add_i32(tmp, tmp, tmp2);
7467 dead_tmp(tmp2);
7469 break;
7470 case 1: /* 16 x 16 -> 32 */
7471 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7472 dead_tmp(tmp2);
7473 if (rs != 15) {
7474 tmp2 = load_reg(s, rs);
7475 gen_helper_add_setq(tmp, tmp, tmp2);
7476 dead_tmp(tmp2);
7478 break;
7479 case 2: /* Dual multiply add. */
7480 case 4: /* Dual multiply subtract. */
7481 if (op)
7482 gen_swap_half(tmp2);
7483 gen_smul_dual(tmp, tmp2);
7484 /* This addition cannot overflow. */
7485 if (insn & (1 << 22)) {
7486 tcg_gen_sub_i32(tmp, tmp, tmp2);
7487 } else {
7488 tcg_gen_add_i32(tmp, tmp, tmp2);
7490 dead_tmp(tmp2);
7491 if (rs != 15)
7493 tmp2 = load_reg(s, rs);
7494 gen_helper_add_setq(tmp, tmp, tmp2);
7495 dead_tmp(tmp2);
7497 break;
7498 case 3: /* 32 * 16 -> 32msb */
7499 if (op)
7500 tcg_gen_sari_i32(tmp2, tmp2, 16);
7501 else
7502 gen_sxth(tmp2);
7503 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7504 tcg_gen_shri_i64(tmp64, tmp64, 16);
7505 tmp = new_tmp();
7506 tcg_gen_trunc_i64_i32(tmp, tmp64);
7507 if (rs != 15)
7509 tmp2 = load_reg(s, rs);
7510 gen_helper_add_setq(tmp, tmp, tmp2);
7511 dead_tmp(tmp2);
7513 break;
7514 case 5: case 6: /* 32 * 32 -> 32msb */
7515 gen_imull(tmp, tmp2);
7516 if (insn & (1 << 5)) {
7517 gen_roundqd(tmp, tmp2);
7518 dead_tmp(tmp2);
7519 } else {
7520 dead_tmp(tmp);
7521 tmp = tmp2;
7523 if (rs != 15) {
7524 tmp2 = load_reg(s, rs);
7525 if (insn & (1 << 21)) {
7526 tcg_gen_add_i32(tmp, tmp, tmp2);
7527 } else {
7528 tcg_gen_sub_i32(tmp, tmp2, tmp);
7530 dead_tmp(tmp2);
7532 break;
7533 case 7: /* Unsigned sum of absolute differences. */
7534 gen_helper_usad8(tmp, tmp, tmp2);
7535 dead_tmp(tmp2);
7536 if (rs != 15) {
7537 tmp2 = load_reg(s, rs);
7538 tcg_gen_add_i32(tmp, tmp, tmp2);
7539 dead_tmp(tmp2);
7541 break;
7543 store_reg(s, rd, tmp);
7544 break;
7545 case 6: case 7: /* 64-bit multiply, Divide. */
7546 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7547 tmp = load_reg(s, rn);
7548 tmp2 = load_reg(s, rm);
7549 if ((op & 0x50) == 0x10) {
7550 /* sdiv, udiv */
7551 if (!arm_feature(env, ARM_FEATURE_DIV))
7552 goto illegal_op;
7553 if (op & 0x20)
7554 gen_helper_udiv(tmp, tmp, tmp2);
7555 else
7556 gen_helper_sdiv(tmp, tmp, tmp2);
7557 dead_tmp(tmp2);
7558 store_reg(s, rd, tmp);
7559 } else if ((op & 0xe) == 0xc) {
7560 /* Dual multiply accumulate long. */
7561 if (op & 1)
7562 gen_swap_half(tmp2);
7563 gen_smul_dual(tmp, tmp2);
7564 if (op & 0x10) {
7565 tcg_gen_sub_i32(tmp, tmp, tmp2);
7566 } else {
7567 tcg_gen_add_i32(tmp, tmp, tmp2);
7569 dead_tmp(tmp2);
7570 /* BUGFIX */
7571 tmp64 = tcg_temp_new_i64();
7572 tcg_gen_ext_i32_i64(tmp64, tmp);
7573 dead_tmp(tmp);
7574 gen_addq(s, tmp64, rs, rd);
7575 gen_storeq_reg(s, rs, rd, tmp64);
7576 } else {
7577 if (op & 0x20) {
7578 /* Unsigned 64-bit multiply */
7579 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7580 } else {
7581 if (op & 8) {
7582 /* smlalxy */
7583 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7584 dead_tmp(tmp2);
7585 tmp64 = tcg_temp_new_i64();
7586 tcg_gen_ext_i32_i64(tmp64, tmp);
7587 dead_tmp(tmp);
7588 } else {
7589 /* Signed 64-bit multiply */
7590 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7593 if (op & 4) {
7594 /* umaal */
7595 gen_addq_lo(s, tmp64, rs);
7596 gen_addq_lo(s, tmp64, rd);
7597 } else if (op & 0x40) {
7598 /* 64-bit accumulate. */
7599 gen_addq(s, tmp64, rs, rd);
7601 gen_storeq_reg(s, rs, rd, tmp64);
7603 break;
7605 break;
7606 case 6: case 7: case 14: case 15:
7607 /* Coprocessor. */
7608 if (((insn >> 24) & 3) == 3) {
7609 /* Translate into the equivalent ARM encoding. */
7610 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7611 if (disas_neon_data_insn(env, s, insn))
7612 goto illegal_op;
7613 } else {
7614 if (insn & (1 << 28))
7615 goto illegal_op;
7616 if (disas_coproc_insn (env, s, insn))
7617 goto illegal_op;
7619 break;
7620 case 8: case 9: case 10: case 11:
7621 if (insn & (1 << 15)) {
7622 /* Branches, misc control. */
7623 if (insn & 0x5000) {
7624 /* Unconditional branch. */
7625 /* signextend(hw1[10:0]) -> offset[:12]. */
7626 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7627 /* hw1[10:0] -> offset[11:1]. */
7628 offset |= (insn & 0x7ff) << 1;
7629 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7630 offset[24:22] already have the same value because of the
7631 sign extension above. */
7632 offset ^= ((~insn) & (1 << 13)) << 10;
7633 offset ^= ((~insn) & (1 << 11)) << 11;
7635 if (insn & (1 << 14)) {
7636 /* Branch and link. */
7637 gen_op_movl_T1_im(s->pc | 1);
7638 gen_movl_reg_T1(s, 14);
7641 offset += s->pc;
7642 if (insn & (1 << 12)) {
7643 /* b/bl */
7644 gen_jmp(s, offset);
7645 } else {
7646 /* blx */
7647 offset &= ~(uint32_t)2;
7648 gen_bx_im(s, offset);
7650 } else if (((insn >> 23) & 7) == 7) {
7651 /* Misc control */
7652 if (insn & (1 << 13))
7653 goto illegal_op;
7655 if (insn & (1 << 26)) {
7656 /* Secure monitor call (v6Z) */
7657 goto illegal_op; /* not implemented. */
7658 } else {
7659 op = (insn >> 20) & 7;
7660 switch (op) {
7661 case 0: /* msr cpsr. */
7662 if (IS_M(env)) {
7663 tmp = load_reg(s, rn);
7664 addr = tcg_const_i32(insn & 0xff);
7665 gen_helper_v7m_msr(cpu_env, addr, tmp);
7666 gen_lookup_tb(s);
7667 break;
7669 /* fall through */
7670 case 1: /* msr spsr. */
7671 if (IS_M(env))
7672 goto illegal_op;
7673 gen_movl_T0_reg(s, rn);
7674 if (gen_set_psr_T0(s,
7675 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7676 op == 1))
7677 goto illegal_op;
7678 break;
7679 case 2: /* cps, nop-hint. */
7680 if (((insn >> 8) & 7) == 0) {
7681 gen_nop_hint(s, insn & 0xff);
7683 /* Implemented as NOP in user mode. */
7684 if (IS_USER(s))
7685 break;
7686 offset = 0;
7687 imm = 0;
7688 if (insn & (1 << 10)) {
7689 if (insn & (1 << 7))
7690 offset |= CPSR_A;
7691 if (insn & (1 << 6))
7692 offset |= CPSR_I;
7693 if (insn & (1 << 5))
7694 offset |= CPSR_F;
7695 if (insn & (1 << 9))
7696 imm = CPSR_A | CPSR_I | CPSR_F;
7698 if (insn & (1 << 8)) {
7699 offset |= 0x1f;
7700 imm |= (insn & 0x1f);
7702 if (offset) {
7703 gen_op_movl_T0_im(imm);
7704 gen_set_psr_T0(s, offset, 0);
7706 break;
7707 case 3: /* Special control operations. */
7708 op = (insn >> 4) & 0xf;
7709 switch (op) {
7710 case 2: /* clrex */
7711 gen_helper_clrex(cpu_env);
7712 break;
7713 case 4: /* dsb */
7714 case 5: /* dmb */
7715 case 6: /* isb */
7716 /* These execute as NOPs. */
7717 ARCH(7);
7718 break;
7719 default:
7720 goto illegal_op;
7722 break;
7723 case 4: /* bxj */
7724 /* Trivial implementation equivalent to bx. */
7725 tmp = load_reg(s, rn);
7726 gen_bx(s, tmp);
7727 break;
7728 case 5: /* Exception return. */
7729 /* Unpredictable in user mode. */
7730 goto illegal_op;
7731 case 6: /* mrs cpsr. */
7732 tmp = new_tmp();
7733 if (IS_M(env)) {
7734 addr = tcg_const_i32(insn & 0xff);
7735 gen_helper_v7m_mrs(tmp, cpu_env, addr);
7736 } else {
7737 gen_helper_cpsr_read(tmp);
7739 store_reg(s, rd, tmp);
7740 break;
7741 case 7: /* mrs spsr. */
7742 /* Not accessible in user mode. */
7743 if (IS_USER(s) || IS_M(env))
7744 goto illegal_op;
7745 tmp = load_cpu_field(spsr);
7746 store_reg(s, rd, tmp);
7747 break;
7750 } else {
7751 /* Conditional branch. */
7752 op = (insn >> 22) & 0xf;
7753 /* Generate a conditional jump to next instruction. */
7754 s->condlabel = gen_new_label();
7755 gen_test_cc(op ^ 1, s->condlabel);
7756 s->condjmp = 1;
7758 /* offset[11:1] = insn[10:0] */
7759 offset = (insn & 0x7ff) << 1;
7760 /* offset[17:12] = insn[21:16]. */
7761 offset |= (insn & 0x003f0000) >> 4;
7762 /* offset[31:20] = insn[26]. */
7763 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7764 /* offset[18] = insn[13]. */
7765 offset |= (insn & (1 << 13)) << 5;
7766 /* offset[19] = insn[11]. */
7767 offset |= (insn & (1 << 11)) << 8;
7769 /* jump to the offset */
7770 gen_jmp(s, s->pc + offset);
7772 } else {
7773 /* Data processing immediate. */
7774 if (insn & (1 << 25)) {
7775 if (insn & (1 << 24)) {
7776 if (insn & (1 << 20))
7777 goto illegal_op;
7778 /* Bitfield/Saturate. */
7779 op = (insn >> 21) & 7;
7780 imm = insn & 0x1f;
7781 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7782 if (rn == 15) {
7783 tmp = new_tmp();
7784 tcg_gen_movi_i32(tmp, 0);
7785 } else {
7786 tmp = load_reg(s, rn);
7788 switch (op) {
7789 case 2: /* Signed bitfield extract. */
7790 imm++;
7791 if (shift + imm > 32)
7792 goto illegal_op;
7793 if (imm < 32)
7794 gen_sbfx(tmp, shift, imm);
7795 break;
7796 case 6: /* Unsigned bitfield extract. */
7797 imm++;
7798 if (shift + imm > 32)
7799 goto illegal_op;
7800 if (imm < 32)
7801 gen_ubfx(tmp, shift, (1u << imm) - 1);
7802 break;
7803 case 3: /* Bitfield insert/clear. */
7804 if (imm < shift)
7805 goto illegal_op;
7806 imm = imm + 1 - shift;
7807 if (imm != 32) {
7808 tmp2 = load_reg(s, rd);
7809 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7810 dead_tmp(tmp2);
7812 break;
7813 case 7:
7814 goto illegal_op;
7815 default: /* Saturate. */
7816 if (shift) {
7817 if (op & 1)
7818 tcg_gen_sari_i32(tmp, tmp, shift);
7819 else
7820 tcg_gen_shli_i32(tmp, tmp, shift);
7822 tmp2 = tcg_const_i32(imm);
7823 if (op & 4) {
7824 /* Unsigned. */
7825 if ((op & 1) && shift == 0)
7826 gen_helper_usat16(tmp, tmp, tmp2);
7827 else
7828 gen_helper_usat(tmp, tmp, tmp2);
7829 } else {
7830 /* Signed. */
7831 if ((op & 1) && shift == 0)
7832 gen_helper_ssat16(tmp, tmp, tmp2);
7833 else
7834 gen_helper_ssat(tmp, tmp, tmp2);
7836 break;
7838 store_reg(s, rd, tmp);
7839 } else {
7840 imm = ((insn & 0x04000000) >> 15)
7841 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7842 if (insn & (1 << 22)) {
7843 /* 16-bit immediate. */
7844 imm |= (insn >> 4) & 0xf000;
7845 if (insn & (1 << 23)) {
7846 /* movt */
7847 tmp = load_reg(s, rd);
7848 tcg_gen_ext16u_i32(tmp, tmp);
7849 tcg_gen_ori_i32(tmp, tmp, imm << 16);
7850 } else {
7851 /* movw */
7852 tmp = new_tmp();
7853 tcg_gen_movi_i32(tmp, imm);
7855 } else {
7856 /* Add/sub 12-bit immediate. */
7857 if (rn == 15) {
7858 offset = s->pc & ~(uint32_t)3;
7859 if (insn & (1 << 23))
7860 offset -= imm;
7861 else
7862 offset += imm;
7863 tmp = new_tmp();
7864 tcg_gen_movi_i32(tmp, offset);
7865 } else {
7866 tmp = load_reg(s, rn);
7867 if (insn & (1 << 23))
7868 tcg_gen_subi_i32(tmp, tmp, imm);
7869 else
7870 tcg_gen_addi_i32(tmp, tmp, imm);
7873 store_reg(s, rd, tmp);
7875 } else {
7876 int shifter_out = 0;
7877 /* modified 12-bit immediate. */
7878 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7879 imm = (insn & 0xff);
7880 switch (shift) {
7881 case 0: /* XY */
7882 /* Nothing to do. */
7883 break;
7884 case 1: /* 00XY00XY */
7885 imm |= imm << 16;
7886 break;
7887 case 2: /* XY00XY00 */
7888 imm |= imm << 16;
7889 imm <<= 8;
7890 break;
7891 case 3: /* XYXYXYXY */
7892 imm |= imm << 16;
7893 imm |= imm << 8;
7894 break;
7895 default: /* Rotated constant. */
7896 shift = (shift << 1) | (imm >> 7);
7897 imm |= 0x80;
7898 imm = imm << (32 - shift);
7899 shifter_out = 1;
7900 break;
7902 gen_op_movl_T1_im(imm);
7903 rn = (insn >> 16) & 0xf;
7904 if (rn == 15)
7905 gen_op_movl_T0_im(0);
7906 else
7907 gen_movl_T0_reg(s, rn);
7908 op = (insn >> 21) & 0xf;
7909 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7910 shifter_out))
7911 goto illegal_op;
7912 rd = (insn >> 8) & 0xf;
7913 if (rd != 15) {
7914 gen_movl_reg_T0(s, rd);
7918 break;
7919 case 12: /* Load/store single data item. */
7921 int postinc = 0;
7922 int writeback = 0;
7923 int user;
7924 if ((insn & 0x01100000) == 0x01000000) {
7925 if (disas_neon_ls_insn(env, s, insn))
7926 goto illegal_op;
7927 break;
7929 user = IS_USER(s);
7930 if (rn == 15) {
7931 addr = new_tmp();
7932 /* PC relative. */
7933 /* s->pc has already been incremented by 4. */
7934 imm = s->pc & 0xfffffffc;
7935 if (insn & (1 << 23))
7936 imm += insn & 0xfff;
7937 else
7938 imm -= insn & 0xfff;
7939 tcg_gen_movi_i32(addr, imm);
7940 } else {
7941 addr = load_reg(s, rn);
7942 if (insn & (1 << 23)) {
7943 /* Positive offset. */
7944 imm = insn & 0xfff;
7945 tcg_gen_addi_i32(addr, addr, imm);
7946 } else {
7947 op = (insn >> 8) & 7;
7948 imm = insn & 0xff;
7949 switch (op) {
7950 case 0: case 8: /* Shifted Register. */
7951 shift = (insn >> 4) & 0xf;
7952 if (shift > 3)
7953 goto illegal_op;
7954 tmp = load_reg(s, rm);
7955 if (shift)
7956 tcg_gen_shli_i32(tmp, tmp, shift);
7957 tcg_gen_add_i32(addr, addr, tmp);
7958 dead_tmp(tmp);
7959 break;
7960 case 4: /* Negative offset. */
7961 tcg_gen_addi_i32(addr, addr, -imm);
7962 break;
7963 case 6: /* User privilege. */
7964 tcg_gen_addi_i32(addr, addr, imm);
7965 user = 1;
7966 break;
7967 case 1: /* Post-decrement. */
7968 imm = -imm;
7969 /* Fall through. */
7970 case 3: /* Post-increment. */
7971 postinc = 1;
7972 writeback = 1;
7973 break;
7974 case 5: /* Pre-decrement. */
7975 imm = -imm;
7976 /* Fall through. */
7977 case 7: /* Pre-increment. */
7978 tcg_gen_addi_i32(addr, addr, imm);
7979 writeback = 1;
7980 break;
7981 default:
7982 goto illegal_op;
7986 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7987 if (insn & (1 << 20)) {
7988 /* Load. */
7989 if (rs == 15 && op != 2) {
7990 if (op & 2)
7991 goto illegal_op;
7992 /* Memory hint. Implemented as NOP. */
7993 } else {
7994 switch (op) {
7995 case 0: tmp = gen_ld8u(addr, user); break;
7996 case 4: tmp = gen_ld8s(addr, user); break;
7997 case 1: tmp = gen_ld16u(addr, user); break;
7998 case 5: tmp = gen_ld16s(addr, user); break;
7999 case 2: tmp = gen_ld32(addr, user); break;
8000 default: goto illegal_op;
8002 if (rs == 15) {
8003 gen_bx(s, tmp);
8004 } else {
8005 store_reg(s, rs, tmp);
8008 } else {
8009 /* Store. */
8010 if (rs == 15)
8011 goto illegal_op;
8012 tmp = load_reg(s, rs);
8013 switch (op) {
8014 case 0: gen_st8(tmp, addr, user); break;
8015 case 1: gen_st16(tmp, addr, user); break;
8016 case 2: gen_st32(tmp, addr, user); break;
8017 default: goto illegal_op;
8020 if (postinc)
8021 tcg_gen_addi_i32(addr, addr, imm);
8022 if (writeback) {
8023 store_reg(s, rn, addr);
8024 } else {
8025 dead_tmp(addr);
8028 break;
8029 default:
8030 goto illegal_op;
8032 return 0;
8033 illegal_op:
8034 return 1;
8037 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8039 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8040 int32_t offset;
8041 int i;
8042 TCGv tmp;
8043 TCGv tmp2;
8044 TCGv addr;
8046 if (s->condexec_mask) {
8047 cond = s->condexec_cond;
8048 s->condlabel = gen_new_label();
8049 gen_test_cc(cond ^ 1, s->condlabel);
8050 s->condjmp = 1;
8053 insn = lduw_code(s->pc);
8054 s->pc += 2;
8056 switch (insn >> 12) {
8057 case 0: case 1:
8058 rd = insn & 7;
8059 op = (insn >> 11) & 3;
8060 if (op == 3) {
8061 /* add/subtract */
8062 rn = (insn >> 3) & 7;
8063 gen_movl_T0_reg(s, rn);
8064 if (insn & (1 << 10)) {
8065 /* immediate */
8066 gen_op_movl_T1_im((insn >> 6) & 7);
8067 } else {
8068 /* reg */
8069 rm = (insn >> 6) & 7;
8070 gen_movl_T1_reg(s, rm);
8072 if (insn & (1 << 9)) {
8073 if (s->condexec_mask)
8074 gen_op_subl_T0_T1();
8075 else
8076 gen_op_subl_T0_T1_cc();
8077 } else {
8078 if (s->condexec_mask)
8079 gen_op_addl_T0_T1();
8080 else
8081 gen_op_addl_T0_T1_cc();
8083 gen_movl_reg_T0(s, rd);
8084 } else {
8085 /* shift immediate */
8086 rm = (insn >> 3) & 7;
8087 shift = (insn >> 6) & 0x1f;
8088 tmp = load_reg(s, rm);
8089 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8090 if (!s->condexec_mask)
8091 gen_logic_CC(tmp);
8092 store_reg(s, rd, tmp);
8094 break;
8095 case 2: case 3:
8096 /* arithmetic large immediate */
8097 op = (insn >> 11) & 3;
8098 rd = (insn >> 8) & 0x7;
8099 if (op == 0) {
8100 gen_op_movl_T0_im(insn & 0xff);
8101 } else {
8102 gen_movl_T0_reg(s, rd);
8103 gen_op_movl_T1_im(insn & 0xff);
8105 switch (op) {
8106 case 0: /* mov */
8107 if (!s->condexec_mask)
8108 gen_op_logic_T0_cc();
8109 break;
8110 case 1: /* cmp */
8111 gen_op_subl_T0_T1_cc();
8112 break;
8113 case 2: /* add */
8114 if (s->condexec_mask)
8115 gen_op_addl_T0_T1();
8116 else
8117 gen_op_addl_T0_T1_cc();
8118 break;
8119 case 3: /* sub */
8120 if (s->condexec_mask)
8121 gen_op_subl_T0_T1();
8122 else
8123 gen_op_subl_T0_T1_cc();
8124 break;
8126 if (op != 1)
8127 gen_movl_reg_T0(s, rd);
8128 break;
8129 case 4:
8130 if (insn & (1 << 11)) {
8131 rd = (insn >> 8) & 7;
8132 /* load pc-relative. Bit 1 of PC is ignored. */
8133 val = s->pc + 2 + ((insn & 0xff) * 4);
8134 val &= ~(uint32_t)2;
8135 addr = new_tmp();
8136 tcg_gen_movi_i32(addr, val);
8137 tmp = gen_ld32(addr, IS_USER(s));
8138 dead_tmp(addr);
8139 store_reg(s, rd, tmp);
8140 break;
8142 if (insn & (1 << 10)) {
8143 /* data processing extended or blx */
8144 rd = (insn & 7) | ((insn >> 4) & 8);
8145 rm = (insn >> 3) & 0xf;
8146 op = (insn >> 8) & 3;
8147 switch (op) {
8148 case 0: /* add */
8149 gen_movl_T0_reg(s, rd);
8150 gen_movl_T1_reg(s, rm);
8151 gen_op_addl_T0_T1();
8152 gen_movl_reg_T0(s, rd);
8153 break;
8154 case 1: /* cmp */
8155 gen_movl_T0_reg(s, rd);
8156 gen_movl_T1_reg(s, rm);
8157 gen_op_subl_T0_T1_cc();
8158 break;
8159 case 2: /* mov/cpy */
8160 gen_movl_T0_reg(s, rm);
8161 gen_movl_reg_T0(s, rd);
8162 break;
8163 case 3:/* branch [and link] exchange thumb register */
8164 tmp = load_reg(s, rm);
8165 if (insn & (1 << 7)) {
8166 val = (uint32_t)s->pc | 1;
8167 tmp2 = new_tmp();
8168 tcg_gen_movi_i32(tmp2, val);
8169 store_reg(s, 14, tmp2);
8171 gen_bx(s, tmp);
8172 break;
8174 break;
8177 /* data processing register */
8178 rd = insn & 7;
8179 rm = (insn >> 3) & 7;
8180 op = (insn >> 6) & 0xf;
8181 if (op == 2 || op == 3 || op == 4 || op == 7) {
8182 /* the shift/rotate ops want the operands backwards */
8183 val = rm;
8184 rm = rd;
8185 rd = val;
8186 val = 1;
8187 } else {
8188 val = 0;
8191 if (op == 9) /* neg */
8192 gen_op_movl_T0_im(0);
8193 else if (op != 0xf) /* mvn doesn't read its first operand */
8194 gen_movl_T0_reg(s, rd);
8196 gen_movl_T1_reg(s, rm);
8197 switch (op) {
8198 case 0x0: /* and */
8199 gen_op_andl_T0_T1();
8200 if (!s->condexec_mask)
8201 gen_op_logic_T0_cc();
8202 break;
8203 case 0x1: /* eor */
8204 gen_op_xorl_T0_T1();
8205 if (!s->condexec_mask)
8206 gen_op_logic_T0_cc();
8207 break;
8208 case 0x2: /* lsl */
8209 if (s->condexec_mask) {
8210 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
8211 } else {
8212 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8213 gen_op_logic_T1_cc();
8215 break;
8216 case 0x3: /* lsr */
8217 if (s->condexec_mask) {
8218 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
8219 } else {
8220 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8221 gen_op_logic_T1_cc();
8223 break;
8224 case 0x4: /* asr */
8225 if (s->condexec_mask) {
8226 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
8227 } else {
8228 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8229 gen_op_logic_T1_cc();
8231 break;
8232 case 0x5: /* adc */
8233 if (s->condexec_mask)
8234 gen_adc_T0_T1();
8235 else
8236 gen_op_adcl_T0_T1_cc();
8237 break;
8238 case 0x6: /* sbc */
8239 if (s->condexec_mask)
8240 gen_sbc_T0_T1();
8241 else
8242 gen_op_sbcl_T0_T1_cc();
8243 break;
8244 case 0x7: /* ror */
8245 if (s->condexec_mask) {
8246 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
8247 } else {
8248 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8249 gen_op_logic_T1_cc();
8251 break;
8252 case 0x8: /* tst */
8253 gen_op_andl_T0_T1();
8254 gen_op_logic_T0_cc();
8255 rd = 16;
8256 break;
8257 case 0x9: /* neg */
8258 if (s->condexec_mask)
8259 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
8260 else
8261 gen_op_subl_T0_T1_cc();
8262 break;
8263 case 0xa: /* cmp */
8264 gen_op_subl_T0_T1_cc();
8265 rd = 16;
8266 break;
8267 case 0xb: /* cmn */
8268 gen_op_addl_T0_T1_cc();
8269 rd = 16;
8270 break;
8271 case 0xc: /* orr */
8272 gen_op_orl_T0_T1();
8273 if (!s->condexec_mask)
8274 gen_op_logic_T0_cc();
8275 break;
8276 case 0xd: /* mul */
8277 gen_op_mull_T0_T1();
8278 if (!s->condexec_mask)
8279 gen_op_logic_T0_cc();
8280 break;
8281 case 0xe: /* bic */
8282 gen_op_bicl_T0_T1();
8283 if (!s->condexec_mask)
8284 gen_op_logic_T0_cc();
8285 break;
8286 case 0xf: /* mvn */
8287 gen_op_notl_T1();
8288 if (!s->condexec_mask)
8289 gen_op_logic_T1_cc();
8290 val = 1;
8291 rm = rd;
8292 break;
8294 if (rd != 16) {
8295 if (val)
8296 gen_movl_reg_T1(s, rm);
8297 else
8298 gen_movl_reg_T0(s, rd);
8300 break;
8302 case 5:
8303 /* load/store register offset. */
8304 rd = insn & 7;
8305 rn = (insn >> 3) & 7;
8306 rm = (insn >> 6) & 7;
8307 op = (insn >> 9) & 7;
8308 addr = load_reg(s, rn);
8309 tmp = load_reg(s, rm);
8310 tcg_gen_add_i32(addr, addr, tmp);
8311 dead_tmp(tmp);
8313 if (op < 3) /* store */
8314 tmp = load_reg(s, rd);
8316 switch (op) {
8317 case 0: /* str */
8318 gen_st32(tmp, addr, IS_USER(s));
8319 break;
8320 case 1: /* strh */
8321 gen_st16(tmp, addr, IS_USER(s));
8322 break;
8323 case 2: /* strb */
8324 gen_st8(tmp, addr, IS_USER(s));
8325 break;
8326 case 3: /* ldrsb */
8327 tmp = gen_ld8s(addr, IS_USER(s));
8328 break;
8329 case 4: /* ldr */
8330 tmp = gen_ld32(addr, IS_USER(s));
8331 break;
8332 case 5: /* ldrh */
8333 tmp = gen_ld16u(addr, IS_USER(s));
8334 break;
8335 case 6: /* ldrb */
8336 tmp = gen_ld8u(addr, IS_USER(s));
8337 break;
8338 case 7: /* ldrsh */
8339 tmp = gen_ld16s(addr, IS_USER(s));
8340 break;
8342 if (op >= 3) /* load */
8343 store_reg(s, rd, tmp);
8344 dead_tmp(addr);
8345 break;
8347 case 6:
8348 /* load/store word immediate offset */
8349 rd = insn & 7;
8350 rn = (insn >> 3) & 7;
8351 addr = load_reg(s, rn);
8352 val = (insn >> 4) & 0x7c;
8353 tcg_gen_addi_i32(addr, addr, val);
8355 if (insn & (1 << 11)) {
8356 /* load */
8357 tmp = gen_ld32(addr, IS_USER(s));
8358 store_reg(s, rd, tmp);
8359 } else {
8360 /* store */
8361 tmp = load_reg(s, rd);
8362 gen_st32(tmp, addr, IS_USER(s));
8364 dead_tmp(addr);
8365 break;
8367 case 7:
8368 /* load/store byte immediate offset */
8369 rd = insn & 7;
8370 rn = (insn >> 3) & 7;
8371 addr = load_reg(s, rn);
8372 val = (insn >> 6) & 0x1f;
8373 tcg_gen_addi_i32(addr, addr, val);
8375 if (insn & (1 << 11)) {
8376 /* load */
8377 tmp = gen_ld8u(addr, IS_USER(s));
8378 store_reg(s, rd, tmp);
8379 } else {
8380 /* store */
8381 tmp = load_reg(s, rd);
8382 gen_st8(tmp, addr, IS_USER(s));
8384 dead_tmp(addr);
8385 break;
8387 case 8:
8388 /* load/store halfword immediate offset */
8389 rd = insn & 7;
8390 rn = (insn >> 3) & 7;
8391 addr = load_reg(s, rn);
8392 val = (insn >> 5) & 0x3e;
8393 tcg_gen_addi_i32(addr, addr, val);
8395 if (insn & (1 << 11)) {
8396 /* load */
8397 tmp = gen_ld16u(addr, IS_USER(s));
8398 store_reg(s, rd, tmp);
8399 } else {
8400 /* store */
8401 tmp = load_reg(s, rd);
8402 gen_st16(tmp, addr, IS_USER(s));
8404 dead_tmp(addr);
8405 break;
8407 case 9:
8408 /* load/store from stack */
8409 rd = (insn >> 8) & 7;
8410 addr = load_reg(s, 13);
8411 val = (insn & 0xff) * 4;
8412 tcg_gen_addi_i32(addr, addr, val);
8414 if (insn & (1 << 11)) {
8415 /* load */
8416 tmp = gen_ld32(addr, IS_USER(s));
8417 store_reg(s, rd, tmp);
8418 } else {
8419 /* store */
8420 tmp = load_reg(s, rd);
8421 gen_st32(tmp, addr, IS_USER(s));
8423 dead_tmp(addr);
8424 break;
8426 case 10:
8427 /* add to high reg */
8428 rd = (insn >> 8) & 7;
8429 if (insn & (1 << 11)) {
8430 /* SP */
8431 tmp = load_reg(s, 13);
8432 } else {
8433 /* PC. bit 1 is ignored. */
8434 tmp = new_tmp();
8435 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8437 val = (insn & 0xff) * 4;
8438 tcg_gen_addi_i32(tmp, tmp, val);
8439 store_reg(s, rd, tmp);
8440 break;
8442 case 11:
8443 /* misc */
8444 op = (insn >> 8) & 0xf;
8445 switch (op) {
8446 case 0:
8447 /* adjust stack pointer */
8448 tmp = load_reg(s, 13);
8449 val = (insn & 0x7f) * 4;
8450 if (insn & (1 << 7))
8451 val = -(int32_t)val;
8452 tcg_gen_addi_i32(tmp, tmp, val);
8453 store_reg(s, 13, tmp);
8454 break;
8456 case 2: /* sign/zero extend. */
8457 ARCH(6);
8458 rd = insn & 7;
8459 rm = (insn >> 3) & 7;
8460 tmp = load_reg(s, rm);
8461 switch ((insn >> 6) & 3) {
8462 case 0: gen_sxth(tmp); break;
8463 case 1: gen_sxtb(tmp); break;
8464 case 2: gen_uxth(tmp); break;
8465 case 3: gen_uxtb(tmp); break;
8467 store_reg(s, rd, tmp);
8468 break;
8469 case 4: case 5: case 0xc: case 0xd:
8470 /* push/pop */
8471 addr = load_reg(s, 13);
8472 if (insn & (1 << 8))
8473 offset = 4;
8474 else
8475 offset = 0;
8476 for (i = 0; i < 8; i++) {
8477 if (insn & (1 << i))
8478 offset += 4;
8480 if ((insn & (1 << 11)) == 0) {
8481 tcg_gen_addi_i32(addr, addr, -offset);
8483 for (i = 0; i < 8; i++) {
8484 if (insn & (1 << i)) {
8485 if (insn & (1 << 11)) {
8486 /* pop */
8487 tmp = gen_ld32(addr, IS_USER(s));
8488 store_reg(s, i, tmp);
8489 } else {
8490 /* push */
8491 tmp = load_reg(s, i);
8492 gen_st32(tmp, addr, IS_USER(s));
8494 /* advance to the next address. */
8495 tcg_gen_addi_i32(addr, addr, 4);
8498 TCGV_UNUSED(tmp);
8499 if (insn & (1 << 8)) {
8500 if (insn & (1 << 11)) {
8501 /* pop pc */
8502 tmp = gen_ld32(addr, IS_USER(s));
8503 /* don't set the pc until the rest of the instruction
8504 has completed */
8505 } else {
8506 /* push lr */
8507 tmp = load_reg(s, 14);
8508 gen_st32(tmp, addr, IS_USER(s));
8510 tcg_gen_addi_i32(addr, addr, 4);
8512 if ((insn & (1 << 11)) == 0) {
8513 tcg_gen_addi_i32(addr, addr, -offset);
8515 /* write back the new stack pointer */
8516 store_reg(s, 13, addr);
8517 /* set the new PC value */
8518 if ((insn & 0x0900) == 0x0900)
8519 gen_bx(s, tmp);
8520 break;
8522 case 1: case 3: case 9: case 11: /* czb */
8523 rm = insn & 7;
8524 tmp = load_reg(s, rm);
8525 s->condlabel = gen_new_label();
8526 s->condjmp = 1;
8527 if (insn & (1 << 11))
8528 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8529 else
8530 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8531 dead_tmp(tmp);
8532 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8533 val = (uint32_t)s->pc + 2;
8534 val += offset;
8535 gen_jmp(s, val);
8536 break;
8538 case 15: /* IT, nop-hint. */
8539 if ((insn & 0xf) == 0) {
8540 gen_nop_hint(s, (insn >> 4) & 0xf);
8541 break;
8543 /* If Then. */
8544 s->condexec_cond = (insn >> 4) & 0xe;
8545 s->condexec_mask = insn & 0x1f;
8546 /* No actual code generated for this insn, just setup state. */
8547 break;
8549 case 0xe: /* bkpt */
8550 gen_set_condexec(s);
8551 gen_set_pc_im(s->pc - 2);
8552 gen_exception(EXCP_BKPT);
8553 s->is_jmp = DISAS_JUMP;
8554 break;
8556 case 0xa: /* rev */
8557 ARCH(6);
8558 rn = (insn >> 3) & 0x7;
8559 rd = insn & 0x7;
8560 tmp = load_reg(s, rn);
8561 switch ((insn >> 6) & 3) {
8562 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8563 case 1: gen_rev16(tmp); break;
8564 case 3: gen_revsh(tmp); break;
8565 default: goto illegal_op;
8567 store_reg(s, rd, tmp);
8568 break;
8570 case 6: /* cps */
8571 ARCH(6);
8572 if (IS_USER(s))
8573 break;
8574 if (IS_M(env)) {
8575 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8576 /* PRIMASK */
8577 if (insn & 1) {
8578 addr = tcg_const_i32(16);
8579 gen_helper_v7m_msr(cpu_env, addr, tmp);
8581 /* FAULTMASK */
8582 if (insn & 2) {
8583 addr = tcg_const_i32(17);
8584 gen_helper_v7m_msr(cpu_env, addr, tmp);
8586 gen_lookup_tb(s);
8587 } else {
8588 if (insn & (1 << 4))
8589 shift = CPSR_A | CPSR_I | CPSR_F;
8590 else
8591 shift = 0;
8593 val = ((insn & 7) << 6) & shift;
8594 gen_op_movl_T0_im(val);
8595 gen_set_psr_T0(s, shift, 0);
8597 break;
8599 default:
8600 goto undef;
8602 break;
8604 case 12:
8605 /* load/store multiple */
8606 rn = (insn >> 8) & 0x7;
8607 addr = load_reg(s, rn);
8608 for (i = 0; i < 8; i++) {
8609 if (insn & (1 << i)) {
8610 if (insn & (1 << 11)) {
8611 /* load */
8612 tmp = gen_ld32(addr, IS_USER(s));
8613 store_reg(s, i, tmp);
8614 } else {
8615 /* store */
8616 tmp = load_reg(s, i);
8617 gen_st32(tmp, addr, IS_USER(s));
8619 /* advance to the next address */
8620 tcg_gen_addi_i32(addr, addr, 4);
8623 /* Base register writeback. */
8624 if ((insn & (1 << rn)) == 0) {
8625 store_reg(s, rn, addr);
8626 } else {
8627 dead_tmp(addr);
8629 break;
8631 case 13:
8632 /* conditional branch or swi */
8633 cond = (insn >> 8) & 0xf;
8634 if (cond == 0xe)
8635 goto undef;
8637 if (cond == 0xf) {
8638 /* swi */
8639 gen_set_condexec(s);
8640 gen_set_pc_im(s->pc);
8641 s->is_jmp = DISAS_SWI;
8642 break;
8644 /* generate a conditional jump to next instruction */
8645 s->condlabel = gen_new_label();
8646 gen_test_cc(cond ^ 1, s->condlabel);
8647 s->condjmp = 1;
8648 gen_movl_T1_reg(s, 15);
8650 /* jump to the offset */
8651 val = (uint32_t)s->pc + 2;
8652 offset = ((int32_t)insn << 24) >> 24;
8653 val += offset << 1;
8654 gen_jmp(s, val);
8655 break;
8657 case 14:
8658 if (insn & (1 << 11)) {
8659 if (disas_thumb2_insn(env, s, insn))
8660 goto undef32;
8661 break;
8663 /* unconditional branch */
8664 val = (uint32_t)s->pc;
8665 offset = ((int32_t)insn << 21) >> 21;
8666 val += (offset << 1) + 2;
8667 gen_jmp(s, val);
8668 break;
8670 case 15:
8671 if (disas_thumb2_insn(env, s, insn))
8672 goto undef32;
8673 break;
8675 return;
8676 undef32:
8677 gen_set_condexec(s);
8678 gen_set_pc_im(s->pc - 4);
8679 gen_exception(EXCP_UDEF);
8680 s->is_jmp = DISAS_JUMP;
8681 return;
8682 illegal_op:
8683 undef:
8684 gen_set_condexec(s);
8685 gen_set_pc_im(s->pc - 2);
8686 gen_exception(EXCP_UDEF);
8687 s->is_jmp = DISAS_JUMP;
8690 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8691 basic block 'tb'. If search_pc is TRUE, also generate PC
8692 information for each intermediate instruction. */
8693 static inline void gen_intermediate_code_internal(CPUState *env,
8694 TranslationBlock *tb,
8695 int search_pc)
8697 DisasContext dc1, *dc = &dc1;
8698 CPUBreakpoint *bp;
8699 uint16_t *gen_opc_end;
8700 int j, lj;
8701 target_ulong pc_start;
8702 uint32_t next_page_start;
8703 int num_insns;
8704 int max_insns;
8706 /* generate intermediate code */
8707 num_temps = 0;
8708 memset(temps, 0, sizeof(temps));
8710 pc_start = tb->pc;
8712 dc->tb = tb;
8714 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
8716 dc->is_jmp = DISAS_NEXT;
8717 dc->pc = pc_start;
8718 dc->singlestep_enabled = env->singlestep_enabled;
8719 dc->condjmp = 0;
8720 dc->thumb = env->thumb;
8721 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8722 dc->condexec_cond = env->condexec_bits >> 4;
8723 #if !defined(CONFIG_USER_ONLY)
8724 if (IS_M(env)) {
8725 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8726 } else {
8727 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8729 #endif
8730 cpu_F0s = tcg_temp_new_i32();
8731 cpu_F1s = tcg_temp_new_i32();
8732 cpu_F0d = tcg_temp_new_i64();
8733 cpu_F1d = tcg_temp_new_i64();
8734 cpu_V0 = cpu_F0d;
8735 cpu_V1 = cpu_F1d;
8736 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8737 cpu_M0 = tcg_temp_new_i64();
8738 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
8739 lj = -1;
8740 num_insns = 0;
8741 max_insns = tb->cflags & CF_COUNT_MASK;
8742 if (max_insns == 0)
8743 max_insns = CF_COUNT_MASK;
8745 gen_icount_start();
8746 /* Reset the conditional execution bits immediately. This avoids
8747 complications trying to do it at the end of the block. */
8748 if (env->condexec_bits)
8750 TCGv tmp = new_tmp();
8751 tcg_gen_movi_i32(tmp, 0);
8752 store_cpu_field(tmp, condexec_bits);
8754 do {
8755 #ifdef CONFIG_USER_ONLY
8756 /* Intercept jump to the magic kernel page. */
8757 if (dc->pc >= 0xffff0000) {
8758 /* We always get here via a jump, so know we are not in a
8759 conditional execution block. */
8760 gen_exception(EXCP_KERNEL_TRAP);
8761 dc->is_jmp = DISAS_UPDATE;
8762 break;
8764 #else
8765 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8766 /* We always get here via a jump, so know we are not in a
8767 conditional execution block. */
8768 gen_exception(EXCP_EXCEPTION_EXIT);
8769 dc->is_jmp = DISAS_UPDATE;
8770 break;
8772 #endif
8774 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
8775 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
8776 if (bp->pc == dc->pc) {
8777 gen_set_condexec(dc);
8778 gen_set_pc_im(dc->pc);
8779 gen_exception(EXCP_DEBUG);
8780 dc->is_jmp = DISAS_JUMP;
8781 /* Advance PC so that clearing the breakpoint will
8782 invalidate this TB. */
8783 dc->pc += 2;
8784 goto done_generating;
8785 break;
8789 if (search_pc) {
8790 j = gen_opc_ptr - gen_opc_buf;
8791 if (lj < j) {
8792 lj++;
8793 while (lj < j)
8794 gen_opc_instr_start[lj++] = 0;
8796 gen_opc_pc[lj] = dc->pc;
8797 gen_opc_instr_start[lj] = 1;
8798 gen_opc_icount[lj] = num_insns;
8801 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8802 gen_io_start();
8804 if (env->thumb) {
8805 disas_thumb_insn(env, dc);
8806 if (dc->condexec_mask) {
8807 dc->condexec_cond = (dc->condexec_cond & 0xe)
8808 | ((dc->condexec_mask >> 4) & 1);
8809 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8810 if (dc->condexec_mask == 0) {
8811 dc->condexec_cond = 0;
8814 } else {
8815 disas_arm_insn(env, dc);
8817 if (num_temps) {
8818 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8819 num_temps = 0;
8822 if (dc->condjmp && !dc->is_jmp) {
8823 gen_set_label(dc->condlabel);
8824 dc->condjmp = 0;
8826 /* Translation stops when a conditional branch is encountered.
8827 * Otherwise the subsequent code could get translated several times.
8828 * Also stop translation when a page boundary is reached. This
8829 * ensures prefetch aborts occur at the right place. */
8830 num_insns ++;
8831 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8832 !env->singlestep_enabled &&
8833 !singlestep &&
8834 dc->pc < next_page_start &&
8835 num_insns < max_insns);
8837 if (tb->cflags & CF_LAST_IO) {
8838 if (dc->condjmp) {
8839 /* FIXME: This can theoretically happen with self-modifying
8840 code. */
8841 cpu_abort(env, "IO on conditional branch instruction");
8843 gen_io_end();
8846 /* At this stage dc->condjmp will only be set when the skipped
8847 instruction was a conditional branch or trap, and the PC has
8848 already been written. */
8849 if (unlikely(env->singlestep_enabled)) {
8850 /* Make sure the pc is updated, and raise a debug exception. */
8851 if (dc->condjmp) {
8852 gen_set_condexec(dc);
8853 if (dc->is_jmp == DISAS_SWI) {
8854 gen_exception(EXCP_SWI);
8855 } else {
8856 gen_exception(EXCP_DEBUG);
8858 gen_set_label(dc->condlabel);
8860 if (dc->condjmp || !dc->is_jmp) {
8861 gen_set_pc_im(dc->pc);
8862 dc->condjmp = 0;
8864 gen_set_condexec(dc);
8865 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
8866 gen_exception(EXCP_SWI);
8867 } else {
8868 /* FIXME: Single stepping a WFI insn will not halt
8869 the CPU. */
8870 gen_exception(EXCP_DEBUG);
8872 } else {
8873 /* While branches must always occur at the end of an IT block,
8874 there are a few other things that can cause us to terminate
8875 the TB in the middel of an IT block:
8876 - Exception generating instructions (bkpt, swi, undefined).
8877 - Page boundaries.
8878 - Hardware watchpoints.
8879 Hardware breakpoints have already been handled and skip this code.
8881 gen_set_condexec(dc);
8882 switch(dc->is_jmp) {
8883 case DISAS_NEXT:
8884 gen_goto_tb(dc, 1, dc->pc);
8885 break;
8886 default:
8887 case DISAS_JUMP:
8888 case DISAS_UPDATE:
8889 /* indicate that the hash table must be used to find the next TB */
8890 tcg_gen_exit_tb(0);
8891 break;
8892 case DISAS_TB_JUMP:
8893 /* nothing more to generate */
8894 break;
8895 case DISAS_WFI:
8896 gen_helper_wfi();
8897 break;
8898 case DISAS_SWI:
8899 gen_exception(EXCP_SWI);
8900 break;
8902 if (dc->condjmp) {
8903 gen_set_label(dc->condlabel);
8904 gen_set_condexec(dc);
8905 gen_goto_tb(dc, 1, dc->pc);
8906 dc->condjmp = 0;
8910 done_generating:
8911 gen_icount_end(tb, num_insns);
8912 *gen_opc_ptr = INDEX_op_end;
8914 #ifdef DEBUG_DISAS
8915 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8916 qemu_log("----------------\n");
8917 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8918 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
8919 qemu_log("\n");
8921 #endif
8922 if (search_pc) {
8923 j = gen_opc_ptr - gen_opc_buf;
8924 lj++;
8925 while (lj <= j)
8926 gen_opc_instr_start[lj++] = 0;
8927 } else {
8928 tb->size = dc->pc - pc_start;
8929 tb->icount = num_insns;
8933 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8935 gen_intermediate_code_internal(env, tb, 0);
8938 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8940 gen_intermediate_code_internal(env, tb, 1);
8943 static const char *cpu_mode_names[16] = {
8944 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8945 "???", "???", "???", "und", "???", "???", "???", "sys"
8948 void cpu_dump_state(CPUState *env, FILE *f,
8949 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8950 int flags)
8952 int i;
8953 #if 0
8954 union {
8955 uint32_t i;
8956 float s;
8957 } s0, s1;
8958 CPU_DoubleU d;
8959 /* ??? This assumes float64 and double have the same layout.
8960 Oh well, it's only debug dumps. */
8961 union {
8962 float64 f64;
8963 double d;
8964 } d0;
8965 #endif
8966 uint32_t psr;
8968 for(i=0;i<16;i++) {
8969 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
8970 if ((i % 4) == 3)
8971 cpu_fprintf(f, "\n");
8972 else
8973 cpu_fprintf(f, " ");
8975 psr = cpsr_read(env);
8976 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8977 psr,
8978 psr & (1 << 31) ? 'N' : '-',
8979 psr & (1 << 30) ? 'Z' : '-',
8980 psr & (1 << 29) ? 'C' : '-',
8981 psr & (1 << 28) ? 'V' : '-',
8982 psr & CPSR_T ? 'T' : 'A',
8983 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
8985 #if 0
8986 for (i = 0; i < 16; i++) {
8987 d.d = env->vfp.regs[i];
8988 s0.i = d.l.lower;
8989 s1.i = d.l.upper;
8990 d0.f64 = d.d;
8991 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
8992 i * 2, (int)s0.i, s0.s,
8993 i * 2 + 1, (int)s1.i, s1.s,
8994 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
8995 d0.d);
8997 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
8998 #endif
9001 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9002 unsigned long searched_pc, int pc_pos, void *puc)
9004 env->regs[15] = gen_opc_pc[pc_pos];