1. fix the bug of sm502 pci configuration r/w
[qemu/qemu-loongson.git] / target-arm / translate.c
blobf7f2a8d6f40730be3fbac4d102eba575a9c92cc0
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #include <stdarg.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <string.h>
26 #include <inttypes.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "disas.h"
31 #include "tcg-op.h"
32 #include "qemu-log.h"
34 #include "helpers.h"
35 #define GEN_HELPER 1
36 #include "helpers.h"
38 #define ENABLE_ARCH_5J 0
39 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
40 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
41 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
42 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
44 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
46 /* internal defines */
47 typedef struct DisasContext {
48 target_ulong pc;
49 int is_jmp;
50 /* Nonzero if this instruction has been conditionally skipped. */
51 int condjmp;
52 /* The label that will be jumped to when the instruction is skipped. */
53 int condlabel;
54 /* Thumb-2 condtional execution bits. */
55 int condexec_mask;
56 int condexec_cond;
57 struct TranslationBlock *tb;
58 int singlestep_enabled;
59 int thumb;
60 #if !defined(CONFIG_USER_ONLY)
61 int user;
62 #endif
63 } DisasContext;
65 #if defined(CONFIG_USER_ONLY)
66 #define IS_USER(s) 1
67 #else
68 #define IS_USER(s) (s->user)
69 #endif
71 /* These instructions trap after executing, so defer them until after the
72 conditional executions state has been updated. */
73 #define DISAS_WFI 4
74 #define DISAS_SWI 5
76 static TCGv_ptr cpu_env;
77 /* We reuse the same 64-bit temporaries for efficiency. */
78 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
80 /* FIXME: These should be removed. */
81 static TCGv cpu_T[2];
82 static TCGv cpu_F0s, cpu_F1s;
83 static TCGv_i64 cpu_F0d, cpu_F1d;
85 #define ICOUNT_TEMP cpu_T[0]
86 #include "gen-icount.h"
88 /* initialize TCG globals. */
89 void arm_translate_init(void)
91 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
93 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
94 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
96 #define GEN_HELPER 2
97 #include "helpers.h"
100 /* The code generator doesn't like lots of temporaries, so maintain our own
101 cache for reuse within a function. */
102 #define MAX_TEMPS 8
103 static int num_temps;
104 static TCGv temps[MAX_TEMPS];
106 /* Allocate a temporary variable. */
107 static TCGv_i32 new_tmp(void)
109 TCGv tmp;
110 if (num_temps == MAX_TEMPS)
111 abort();
113 if (GET_TCGV_I32(temps[num_temps]))
114 return temps[num_temps++];
116 tmp = tcg_temp_new_i32();
117 temps[num_temps++] = tmp;
118 return tmp;
121 /* Release a temporary variable. */
122 static void dead_tmp(TCGv tmp)
124 int i;
125 num_temps--;
126 i = num_temps;
127 if (TCGV_EQUAL(temps[i], tmp))
128 return;
130 /* Shuffle this temp to the last slot. */
131 while (!TCGV_EQUAL(temps[i], tmp))
132 i--;
133 while (i < num_temps) {
134 temps[i] = temps[i + 1];
135 i++;
137 temps[i] = tmp;
140 static inline TCGv load_cpu_offset(int offset)
142 TCGv tmp = new_tmp();
143 tcg_gen_ld_i32(tmp, cpu_env, offset);
144 return tmp;
147 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
149 static inline void store_cpu_offset(TCGv var, int offset)
151 tcg_gen_st_i32(var, cpu_env, offset);
152 dead_tmp(var);
155 #define store_cpu_field(var, name) \
156 store_cpu_offset(var, offsetof(CPUState, name))
158 /* Set a variable to the value of a CPU register. */
159 static void load_reg_var(DisasContext *s, TCGv var, int reg)
161 if (reg == 15) {
162 uint32_t addr;
163 /* normaly, since we updated PC, we need only to add one insn */
164 if (s->thumb)
165 addr = (long)s->pc + 2;
166 else
167 addr = (long)s->pc + 4;
168 tcg_gen_movi_i32(var, addr);
169 } else {
170 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
174 /* Create a new temporary and set it to the value of a CPU register. */
175 static inline TCGv load_reg(DisasContext *s, int reg)
177 TCGv tmp = new_tmp();
178 load_reg_var(s, tmp, reg);
179 return tmp;
182 /* Set a CPU register. The source must be a temporary and will be
183 marked as dead. */
184 static void store_reg(DisasContext *s, int reg, TCGv var)
186 if (reg == 15) {
187 tcg_gen_andi_i32(var, var, ~1);
188 s->is_jmp = DISAS_JUMP;
190 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
191 dead_tmp(var);
195 /* Basic operations. */
196 #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
197 #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
198 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
200 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
201 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202 #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
203 #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
205 #define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206 #define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
207 #define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
208 #define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
209 #define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
210 #define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
212 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
213 #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
214 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
215 #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
216 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
217 #define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
218 #define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
220 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
221 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
223 /* Value extensions. */
224 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
225 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
226 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
227 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
229 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
230 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
232 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
234 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
235 /* Set NZCV flags from the high 4 bits of var. */
236 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
238 static void gen_exception(int excp)
240 TCGv tmp = new_tmp();
241 tcg_gen_movi_i32(tmp, excp);
242 gen_helper_exception(tmp);
243 dead_tmp(tmp);
246 static void gen_smul_dual(TCGv a, TCGv b)
248 TCGv tmp1 = new_tmp();
249 TCGv tmp2 = new_tmp();
250 tcg_gen_ext16s_i32(tmp1, a);
251 tcg_gen_ext16s_i32(tmp2, b);
252 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
253 dead_tmp(tmp2);
254 tcg_gen_sari_i32(a, a, 16);
255 tcg_gen_sari_i32(b, b, 16);
256 tcg_gen_mul_i32(b, b, a);
257 tcg_gen_mov_i32(a, tmp1);
258 dead_tmp(tmp1);
261 /* Byteswap each halfword. */
262 static void gen_rev16(TCGv var)
264 TCGv tmp = new_tmp();
265 tcg_gen_shri_i32(tmp, var, 8);
266 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
267 tcg_gen_shli_i32(var, var, 8);
268 tcg_gen_andi_i32(var, var, 0xff00ff00);
269 tcg_gen_or_i32(var, var, tmp);
270 dead_tmp(tmp);
273 /* Byteswap low halfword and sign extend. */
274 static void gen_revsh(TCGv var)
276 TCGv tmp = new_tmp();
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_ext8s_i32(var, var);
281 tcg_gen_or_i32(var, var, tmp);
282 dead_tmp(tmp);
285 /* Unsigned bitfield extract. */
286 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
288 if (shift)
289 tcg_gen_shri_i32(var, var, shift);
290 tcg_gen_andi_i32(var, var, mask);
293 /* Signed bitfield extract. */
294 static void gen_sbfx(TCGv var, int shift, int width)
296 uint32_t signbit;
298 if (shift)
299 tcg_gen_sari_i32(var, var, shift);
300 if (shift + width < 32) {
301 signbit = 1u << (width - 1);
302 tcg_gen_andi_i32(var, var, (1u << width) - 1);
303 tcg_gen_xori_i32(var, var, signbit);
304 tcg_gen_subi_i32(var, var, signbit);
308 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
309 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
311 tcg_gen_andi_i32(val, val, mask);
312 tcg_gen_shli_i32(val, val, shift);
313 tcg_gen_andi_i32(base, base, ~(mask << shift));
314 tcg_gen_or_i32(dest, base, val);
317 /* Round the top 32 bits of a 64-bit value. */
318 static void gen_roundqd(TCGv a, TCGv b)
320 tcg_gen_shri_i32(a, a, 31);
321 tcg_gen_add_i32(a, a, b);
324 /* FIXME: Most targets have native widening multiplication.
325 It would be good to use that instead of a full wide multiply. */
326 /* 32x32->64 multiply. Marks inputs as dead. */
327 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
332 tcg_gen_extu_i32_i64(tmp1, a);
333 dead_tmp(a);
334 tcg_gen_extu_i32_i64(tmp2, b);
335 dead_tmp(b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
337 return tmp1;
340 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
342 TCGv_i64 tmp1 = tcg_temp_new_i64();
343 TCGv_i64 tmp2 = tcg_temp_new_i64();
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 return tmp1;
353 /* Unsigned 32x32->64 multiply. */
354 static void gen_op_mull_T0_T1(void)
356 TCGv_i64 tmp1 = tcg_temp_new_i64();
357 TCGv_i64 tmp2 = tcg_temp_new_i64();
359 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
360 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
361 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
362 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
363 tcg_gen_shri_i64(tmp1, tmp1, 32);
364 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
367 /* Signed 32x32->64 multiply. */
368 static void gen_imull(TCGv a, TCGv b)
370 TCGv_i64 tmp1 = tcg_temp_new_i64();
371 TCGv_i64 tmp2 = tcg_temp_new_i64();
373 tcg_gen_ext_i32_i64(tmp1, a);
374 tcg_gen_ext_i32_i64(tmp2, b);
375 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
376 tcg_gen_trunc_i64_i32(a, tmp1);
377 tcg_gen_shri_i64(tmp1, tmp1, 32);
378 tcg_gen_trunc_i64_i32(b, tmp1);
381 /* Swap low and high halfwords. */
382 static void gen_swap_half(TCGv var)
384 TCGv tmp = new_tmp();
385 tcg_gen_shri_i32(tmp, var, 16);
386 tcg_gen_shli_i32(var, var, 16);
387 tcg_gen_or_i32(var, var, tmp);
388 dead_tmp(tmp);
391 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
392 tmp = (t0 ^ t1) & 0x8000;
393 t0 &= ~0x8000;
394 t1 &= ~0x8000;
395 t0 = (t0 + t1) ^ tmp;
398 static void gen_add16(TCGv t0, TCGv t1)
400 TCGv tmp = new_tmp();
401 tcg_gen_xor_i32(tmp, t0, t1);
402 tcg_gen_andi_i32(tmp, tmp, 0x8000);
403 tcg_gen_andi_i32(t0, t0, ~0x8000);
404 tcg_gen_andi_i32(t1, t1, ~0x8000);
405 tcg_gen_add_i32(t0, t0, t1);
406 tcg_gen_xor_i32(t0, t0, tmp);
407 dead_tmp(tmp);
408 dead_tmp(t1);
411 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
413 /* Set CF to the top bit of var. */
414 static void gen_set_CF_bit31(TCGv var)
416 TCGv tmp = new_tmp();
417 tcg_gen_shri_i32(tmp, var, 31);
418 gen_set_CF(tmp);
419 dead_tmp(tmp);
422 /* Set N and Z flags from var. */
423 static inline void gen_logic_CC(TCGv var)
425 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
426 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
429 /* T0 += T1 + CF. */
430 static void gen_adc_T0_T1(void)
432 TCGv tmp;
433 gen_op_addl_T0_T1();
434 tmp = load_cpu_field(CF);
435 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
436 dead_tmp(tmp);
439 /* dest = T0 - T1 + CF - 1. */
440 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
442 TCGv tmp;
443 tcg_gen_sub_i32(dest, t0, t1);
444 tmp = load_cpu_field(CF);
445 tcg_gen_add_i32(dest, dest, tmp);
446 tcg_gen_subi_i32(dest, dest, 1);
447 dead_tmp(tmp);
450 #define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
451 #define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
453 /* T0 &= ~T1. Clobbers T1. */
454 /* FIXME: Implement bic natively. */
455 static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
457 TCGv tmp = new_tmp();
458 tcg_gen_not_i32(tmp, t1);
459 tcg_gen_and_i32(dest, t0, tmp);
460 dead_tmp(tmp);
462 static inline void gen_op_bicl_T0_T1(void)
464 gen_op_notl_T1();
465 gen_op_andl_T0_T1();
468 /* FIXME: Implement this natively. */
469 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
471 /* FIXME: Implement this natively. */
472 static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
474 TCGv tmp;
476 if (i == 0)
477 return;
479 tmp = new_tmp();
480 tcg_gen_shri_i32(tmp, t1, i);
481 tcg_gen_shli_i32(t1, t1, 32 - i);
482 tcg_gen_or_i32(t0, t1, tmp);
483 dead_tmp(tmp);
486 static void shifter_out_im(TCGv var, int shift)
488 TCGv tmp = new_tmp();
489 if (shift == 0) {
490 tcg_gen_andi_i32(tmp, var, 1);
491 } else {
492 tcg_gen_shri_i32(tmp, var, shift);
493 if (shift != 31)
494 tcg_gen_andi_i32(tmp, tmp, 1);
496 gen_set_CF(tmp);
497 dead_tmp(tmp);
500 /* Shift by immediate. Includes special handling for shift == 0. */
501 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
503 switch (shiftop) {
504 case 0: /* LSL */
505 if (shift != 0) {
506 if (flags)
507 shifter_out_im(var, 32 - shift);
508 tcg_gen_shli_i32(var, var, shift);
510 break;
511 case 1: /* LSR */
512 if (shift == 0) {
513 if (flags) {
514 tcg_gen_shri_i32(var, var, 31);
515 gen_set_CF(var);
517 tcg_gen_movi_i32(var, 0);
518 } else {
519 if (flags)
520 shifter_out_im(var, shift - 1);
521 tcg_gen_shri_i32(var, var, shift);
523 break;
524 case 2: /* ASR */
525 if (shift == 0)
526 shift = 32;
527 if (flags)
528 shifter_out_im(var, shift - 1);
529 if (shift == 32)
530 shift = 31;
531 tcg_gen_sari_i32(var, var, shift);
532 break;
533 case 3: /* ROR/RRX */
534 if (shift != 0) {
535 if (flags)
536 shifter_out_im(var, shift - 1);
537 tcg_gen_rori_i32(var, var, shift); break;
538 } else {
539 TCGv tmp = load_cpu_field(CF);
540 if (flags)
541 shifter_out_im(var, 0);
542 tcg_gen_shri_i32(var, var, 1);
543 tcg_gen_shli_i32(tmp, tmp, 31);
544 tcg_gen_or_i32(var, var, tmp);
545 dead_tmp(tmp);
550 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
551 TCGv shift, int flags)
553 if (flags) {
554 switch (shiftop) {
555 case 0: gen_helper_shl_cc(var, var, shift); break;
556 case 1: gen_helper_shr_cc(var, var, shift); break;
557 case 2: gen_helper_sar_cc(var, var, shift); break;
558 case 3: gen_helper_ror_cc(var, var, shift); break;
560 } else {
561 switch (shiftop) {
562 case 0: gen_helper_shl(var, var, shift); break;
563 case 1: gen_helper_shr(var, var, shift); break;
564 case 2: gen_helper_sar(var, var, shift); break;
565 case 3: gen_helper_ror(var, var, shift); break;
568 dead_tmp(shift);
571 #define PAS_OP(pfx) \
572 switch (op2) { \
573 case 0: gen_pas_helper(glue(pfx,add16)); break; \
574 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
575 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
576 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
577 case 4: gen_pas_helper(glue(pfx,add8)); break; \
578 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
580 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
582 TCGv_ptr tmp;
584 switch (op1) {
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
586 case 1:
587 tmp = tcg_temp_new_ptr();
588 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
589 PAS_OP(s)
590 break;
591 case 5:
592 tmp = tcg_temp_new_ptr();
593 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
594 PAS_OP(u)
595 break;
596 #undef gen_pas_helper
597 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 case 2:
599 PAS_OP(q);
600 break;
601 case 3:
602 PAS_OP(sh);
603 break;
604 case 6:
605 PAS_OP(uq);
606 break;
607 case 7:
608 PAS_OP(uh);
609 break;
610 #undef gen_pas_helper
613 #undef PAS_OP
615 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
616 #define PAS_OP(pfx) \
617 switch (op2) { \
618 case 0: gen_pas_helper(glue(pfx,add8)); break; \
619 case 1: gen_pas_helper(glue(pfx,add16)); break; \
620 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
621 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
622 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
623 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
625 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
627 TCGv_ptr tmp;
629 switch (op1) {
630 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
631 case 0:
632 tmp = tcg_temp_new_ptr();
633 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
634 PAS_OP(s)
635 break;
636 case 4:
637 tmp = tcg_temp_new_ptr();
638 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
639 PAS_OP(u)
640 break;
641 #undef gen_pas_helper
642 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
643 case 1:
644 PAS_OP(q);
645 break;
646 case 2:
647 PAS_OP(sh);
648 break;
649 case 5:
650 PAS_OP(uq);
651 break;
652 case 6:
653 PAS_OP(uh);
654 break;
655 #undef gen_pas_helper
658 #undef PAS_OP
660 static void gen_test_cc(int cc, int label)
662 TCGv tmp;
663 TCGv tmp2;
664 int inv;
666 switch (cc) {
667 case 0: /* eq: Z */
668 tmp = load_cpu_field(ZF);
669 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
670 break;
671 case 1: /* ne: !Z */
672 tmp = load_cpu_field(ZF);
673 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
674 break;
675 case 2: /* cs: C */
676 tmp = load_cpu_field(CF);
677 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
678 break;
679 case 3: /* cc: !C */
680 tmp = load_cpu_field(CF);
681 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
682 break;
683 case 4: /* mi: N */
684 tmp = load_cpu_field(NF);
685 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
686 break;
687 case 5: /* pl: !N */
688 tmp = load_cpu_field(NF);
689 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
690 break;
691 case 6: /* vs: V */
692 tmp = load_cpu_field(VF);
693 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
694 break;
695 case 7: /* vc: !V */
696 tmp = load_cpu_field(VF);
697 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
698 break;
699 case 8: /* hi: C && !Z */
700 inv = gen_new_label();
701 tmp = load_cpu_field(CF);
702 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
703 dead_tmp(tmp);
704 tmp = load_cpu_field(ZF);
705 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
706 gen_set_label(inv);
707 break;
708 case 9: /* ls: !C || Z */
709 tmp = load_cpu_field(CF);
710 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
711 dead_tmp(tmp);
712 tmp = load_cpu_field(ZF);
713 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
714 break;
715 case 10: /* ge: N == V -> N ^ V == 0 */
716 tmp = load_cpu_field(VF);
717 tmp2 = load_cpu_field(NF);
718 tcg_gen_xor_i32(tmp, tmp, tmp2);
719 dead_tmp(tmp2);
720 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
721 break;
722 case 11: /* lt: N != V -> N ^ V != 0 */
723 tmp = load_cpu_field(VF);
724 tmp2 = load_cpu_field(NF);
725 tcg_gen_xor_i32(tmp, tmp, tmp2);
726 dead_tmp(tmp2);
727 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
728 break;
729 case 12: /* gt: !Z && N == V */
730 inv = gen_new_label();
731 tmp = load_cpu_field(ZF);
732 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
733 dead_tmp(tmp);
734 tmp = load_cpu_field(VF);
735 tmp2 = load_cpu_field(NF);
736 tcg_gen_xor_i32(tmp, tmp, tmp2);
737 dead_tmp(tmp2);
738 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
739 gen_set_label(inv);
740 break;
741 case 13: /* le: Z || N != V */
742 tmp = load_cpu_field(ZF);
743 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
744 dead_tmp(tmp);
745 tmp = load_cpu_field(VF);
746 tmp2 = load_cpu_field(NF);
747 tcg_gen_xor_i32(tmp, tmp, tmp2);
748 dead_tmp(tmp2);
749 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
750 break;
751 default:
752 fprintf(stderr, "Bad condition code 0x%x\n", cc);
753 abort();
755 dead_tmp(tmp);
758 static const uint8_t table_logic_cc[16] = {
759 1, /* and */
760 1, /* xor */
761 0, /* sub */
762 0, /* rsb */
763 0, /* add */
764 0, /* adc */
765 0, /* sbc */
766 0, /* rsc */
767 1, /* andl */
768 1, /* xorl */
769 0, /* cmp */
770 0, /* cmn */
771 1, /* orr */
772 1, /* mov */
773 1, /* bic */
774 1, /* mvn */
777 /* Set PC and Thumb state from an immediate address. */
778 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
780 TCGv tmp;
782 s->is_jmp = DISAS_UPDATE;
783 tmp = new_tmp();
784 if (s->thumb != (addr & 1)) {
785 tcg_gen_movi_i32(tmp, addr & 1);
786 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
788 tcg_gen_movi_i32(tmp, addr & ~1);
789 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
790 dead_tmp(tmp);
793 /* Set PC and Thumb state from var. var is marked as dead. */
794 static inline void gen_bx(DisasContext *s, TCGv var)
796 TCGv tmp;
798 s->is_jmp = DISAS_UPDATE;
799 tmp = new_tmp();
800 tcg_gen_andi_i32(tmp, var, 1);
801 store_cpu_field(tmp, thumb);
802 tcg_gen_andi_i32(var, var, ~1);
803 store_cpu_field(var, regs[15]);
806 /* TODO: This should be removed. Use gen_bx instead. */
807 static inline void gen_bx_T0(DisasContext *s)
809 TCGv tmp = new_tmp();
810 tcg_gen_mov_i32(tmp, cpu_T[0]);
811 gen_bx(s, tmp);
814 static inline TCGv gen_ld8s(TCGv addr, int index)
816 TCGv tmp = new_tmp();
817 tcg_gen_qemu_ld8s(tmp, addr, index);
818 return tmp;
820 static inline TCGv gen_ld8u(TCGv addr, int index)
822 TCGv tmp = new_tmp();
823 tcg_gen_qemu_ld8u(tmp, addr, index);
824 return tmp;
826 static inline TCGv gen_ld16s(TCGv addr, int index)
828 TCGv tmp = new_tmp();
829 tcg_gen_qemu_ld16s(tmp, addr, index);
830 return tmp;
832 static inline TCGv gen_ld16u(TCGv addr, int index)
834 TCGv tmp = new_tmp();
835 tcg_gen_qemu_ld16u(tmp, addr, index);
836 return tmp;
838 static inline TCGv gen_ld32(TCGv addr, int index)
840 TCGv tmp = new_tmp();
841 tcg_gen_qemu_ld32u(tmp, addr, index);
842 return tmp;
844 static inline void gen_st8(TCGv val, TCGv addr, int index)
846 tcg_gen_qemu_st8(val, addr, index);
847 dead_tmp(val);
849 static inline void gen_st16(TCGv val, TCGv addr, int index)
851 tcg_gen_qemu_st16(val, addr, index);
852 dead_tmp(val);
854 static inline void gen_st32(TCGv val, TCGv addr, int index)
856 tcg_gen_qemu_st32(val, addr, index);
857 dead_tmp(val);
860 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
862 load_reg_var(s, cpu_T[0], reg);
865 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
867 load_reg_var(s, cpu_T[1], reg);
870 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
872 load_reg_var(s, cpu_T[2], reg);
875 static inline void gen_set_pc_im(uint32_t val)
877 TCGv tmp = new_tmp();
878 tcg_gen_movi_i32(tmp, val);
879 store_cpu_field(tmp, regs[15]);
882 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
884 TCGv tmp;
885 if (reg == 15) {
886 tmp = new_tmp();
887 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
888 } else {
889 tmp = cpu_T[t];
891 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
892 if (reg == 15) {
893 dead_tmp(tmp);
894 s->is_jmp = DISAS_JUMP;
898 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
900 gen_movl_reg_TN(s, reg, 0);
903 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
905 gen_movl_reg_TN(s, reg, 1);
908 /* Force a TB lookup after an instruction that changes the CPU state. */
909 static inline void gen_lookup_tb(DisasContext *s)
911 gen_op_movl_T0_im(s->pc);
912 gen_movl_reg_T0(s, 15);
913 s->is_jmp = DISAS_UPDATE;
916 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
917 TCGv var)
919 int val, rm, shift, shiftop;
920 TCGv offset;
922 if (!(insn & (1 << 25))) {
923 /* immediate */
924 val = insn & 0xfff;
925 if (!(insn & (1 << 23)))
926 val = -val;
927 if (val != 0)
928 tcg_gen_addi_i32(var, var, val);
929 } else {
930 /* shift/register */
931 rm = (insn) & 0xf;
932 shift = (insn >> 7) & 0x1f;
933 shiftop = (insn >> 5) & 3;
934 offset = load_reg(s, rm);
935 gen_arm_shift_im(offset, shiftop, shift, 0);
936 if (!(insn & (1 << 23)))
937 tcg_gen_sub_i32(var, var, offset);
938 else
939 tcg_gen_add_i32(var, var, offset);
940 dead_tmp(offset);
944 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
945 int extra, TCGv var)
947 int val, rm;
948 TCGv offset;
950 if (insn & (1 << 22)) {
951 /* immediate */
952 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
953 if (!(insn & (1 << 23)))
954 val = -val;
955 val += extra;
956 if (val != 0)
957 tcg_gen_addi_i32(var, var, val);
958 } else {
959 /* register */
960 if (extra)
961 tcg_gen_addi_i32(var, var, extra);
962 rm = (insn) & 0xf;
963 offset = load_reg(s, rm);
964 if (!(insn & (1 << 23)))
965 tcg_gen_sub_i32(var, var, offset);
966 else
967 tcg_gen_add_i32(var, var, offset);
968 dead_tmp(offset);
972 #define VFP_OP2(name) \
973 static inline void gen_vfp_##name(int dp) \
975 if (dp) \
976 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
977 else \
978 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
981 VFP_OP2(add)
982 VFP_OP2(sub)
983 VFP_OP2(mul)
984 VFP_OP2(div)
986 #undef VFP_OP2
988 static inline void gen_vfp_abs(int dp)
990 if (dp)
991 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
992 else
993 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
996 static inline void gen_vfp_neg(int dp)
998 if (dp)
999 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1000 else
1001 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1004 static inline void gen_vfp_sqrt(int dp)
1006 if (dp)
1007 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1008 else
1009 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1012 static inline void gen_vfp_cmp(int dp)
1014 if (dp)
1015 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1016 else
1017 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1020 static inline void gen_vfp_cmpe(int dp)
1022 if (dp)
1023 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1024 else
1025 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1028 static inline void gen_vfp_F1_ld0(int dp)
1030 if (dp)
1031 tcg_gen_movi_i64(cpu_F1d, 0);
1032 else
1033 tcg_gen_movi_i32(cpu_F1s, 0);
1036 static inline void gen_vfp_uito(int dp)
1038 if (dp)
1039 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1040 else
1041 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1044 static inline void gen_vfp_sito(int dp)
1046 if (dp)
1047 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
1048 else
1049 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
1052 static inline void gen_vfp_toui(int dp)
1054 if (dp)
1055 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1056 else
1057 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1060 static inline void gen_vfp_touiz(int dp)
1062 if (dp)
1063 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1064 else
1065 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1068 static inline void gen_vfp_tosi(int dp)
1070 if (dp)
1071 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1072 else
1073 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1076 static inline void gen_vfp_tosiz(int dp)
1078 if (dp)
1079 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1080 else
1081 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1084 #define VFP_GEN_FIX(name) \
1085 static inline void gen_vfp_##name(int dp, int shift) \
1087 if (dp) \
1088 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1089 else \
1090 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
1092 VFP_GEN_FIX(tosh)
1093 VFP_GEN_FIX(tosl)
1094 VFP_GEN_FIX(touh)
1095 VFP_GEN_FIX(toul)
1096 VFP_GEN_FIX(shto)
1097 VFP_GEN_FIX(slto)
1098 VFP_GEN_FIX(uhto)
1099 VFP_GEN_FIX(ulto)
1100 #undef VFP_GEN_FIX
1102 static inline void gen_vfp_ld(DisasContext *s, int dp)
1104 if (dp)
1105 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
1106 else
1107 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
1110 static inline void gen_vfp_st(DisasContext *s, int dp)
1112 if (dp)
1113 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
1114 else
1115 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
1118 static inline long
1119 vfp_reg_offset (int dp, int reg)
1121 if (dp)
1122 return offsetof(CPUARMState, vfp.regs[reg]);
1123 else if (reg & 1) {
1124 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1125 + offsetof(CPU_DoubleU, l.upper);
1126 } else {
1127 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1128 + offsetof(CPU_DoubleU, l.lower);
1132 /* Return the offset of a 32-bit piece of a NEON register.
1133 zero is the least significant end of the register. */
1134 static inline long
1135 neon_reg_offset (int reg, int n)
1137 int sreg;
1138 sreg = reg * 2 + n;
1139 return vfp_reg_offset(0, sreg);
1142 /* FIXME: Remove these. */
1143 #define neon_T0 cpu_T[0]
1144 #define neon_T1 cpu_T[1]
1145 #define NEON_GET_REG(T, reg, n) \
1146 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1147 #define NEON_SET_REG(T, reg, n) \
1148 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1150 static TCGv neon_load_reg(int reg, int pass)
1152 TCGv tmp = new_tmp();
1153 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1154 return tmp;
1157 static void neon_store_reg(int reg, int pass, TCGv var)
1159 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1160 dead_tmp(var);
1163 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1165 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1168 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1170 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1173 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1174 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1175 #define tcg_gen_st_f32 tcg_gen_st_i32
1176 #define tcg_gen_st_f64 tcg_gen_st_i64
1178 static inline void gen_mov_F0_vreg(int dp, int reg)
1180 if (dp)
1181 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1182 else
1183 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1186 static inline void gen_mov_F1_vreg(int dp, int reg)
1188 if (dp)
1189 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1190 else
1191 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1194 static inline void gen_mov_vreg_F0(int dp, int reg)
1196 if (dp)
1197 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1198 else
1199 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1202 #define ARM_CP_RW_BIT (1 << 20)
1204 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1206 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1209 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1211 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1214 static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1216 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1219 static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1221 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1224 static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1226 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1229 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1231 iwmmxt_store_reg(cpu_M0, rn);
1234 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1236 iwmmxt_load_reg(cpu_M0, rn);
1239 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1241 iwmmxt_load_reg(cpu_V1, rn);
1242 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1245 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1247 iwmmxt_load_reg(cpu_V1, rn);
1248 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1251 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1253 iwmmxt_load_reg(cpu_V1, rn);
1254 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1257 #define IWMMXT_OP(name) \
1258 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1260 iwmmxt_load_reg(cpu_V1, rn); \
1261 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1264 #define IWMMXT_OP_ENV(name) \
1265 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1267 iwmmxt_load_reg(cpu_V1, rn); \
1268 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1271 #define IWMMXT_OP_ENV_SIZE(name) \
1272 IWMMXT_OP_ENV(name##b) \
1273 IWMMXT_OP_ENV(name##w) \
1274 IWMMXT_OP_ENV(name##l)
1276 #define IWMMXT_OP_ENV1(name) \
1277 static inline void gen_op_iwmmxt_##name##_M0(void) \
1279 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1282 IWMMXT_OP(maddsq)
1283 IWMMXT_OP(madduq)
1284 IWMMXT_OP(sadb)
1285 IWMMXT_OP(sadw)
1286 IWMMXT_OP(mulslw)
1287 IWMMXT_OP(mulshw)
1288 IWMMXT_OP(mululw)
1289 IWMMXT_OP(muluhw)
1290 IWMMXT_OP(macsw)
1291 IWMMXT_OP(macuw)
1293 IWMMXT_OP_ENV_SIZE(unpackl)
1294 IWMMXT_OP_ENV_SIZE(unpackh)
1296 IWMMXT_OP_ENV1(unpacklub)
1297 IWMMXT_OP_ENV1(unpackluw)
1298 IWMMXT_OP_ENV1(unpacklul)
1299 IWMMXT_OP_ENV1(unpackhub)
1300 IWMMXT_OP_ENV1(unpackhuw)
1301 IWMMXT_OP_ENV1(unpackhul)
1302 IWMMXT_OP_ENV1(unpacklsb)
1303 IWMMXT_OP_ENV1(unpacklsw)
1304 IWMMXT_OP_ENV1(unpacklsl)
1305 IWMMXT_OP_ENV1(unpackhsb)
1306 IWMMXT_OP_ENV1(unpackhsw)
1307 IWMMXT_OP_ENV1(unpackhsl)
1309 IWMMXT_OP_ENV_SIZE(cmpeq)
1310 IWMMXT_OP_ENV_SIZE(cmpgtu)
1311 IWMMXT_OP_ENV_SIZE(cmpgts)
1313 IWMMXT_OP_ENV_SIZE(mins)
1314 IWMMXT_OP_ENV_SIZE(minu)
1315 IWMMXT_OP_ENV_SIZE(maxs)
1316 IWMMXT_OP_ENV_SIZE(maxu)
1318 IWMMXT_OP_ENV_SIZE(subn)
1319 IWMMXT_OP_ENV_SIZE(addn)
1320 IWMMXT_OP_ENV_SIZE(subu)
1321 IWMMXT_OP_ENV_SIZE(addu)
1322 IWMMXT_OP_ENV_SIZE(subs)
1323 IWMMXT_OP_ENV_SIZE(adds)
1325 IWMMXT_OP_ENV(avgb0)
1326 IWMMXT_OP_ENV(avgb1)
1327 IWMMXT_OP_ENV(avgw0)
1328 IWMMXT_OP_ENV(avgw1)
1330 IWMMXT_OP(msadb)
1332 IWMMXT_OP_ENV(packuw)
1333 IWMMXT_OP_ENV(packul)
1334 IWMMXT_OP_ENV(packuq)
1335 IWMMXT_OP_ENV(packsw)
1336 IWMMXT_OP_ENV(packsl)
1337 IWMMXT_OP_ENV(packsq)
1339 static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1341 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1344 static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1346 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1349 static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1351 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1354 static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1356 iwmmxt_load_reg(cpu_V1, rn);
1357 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1360 static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1362 TCGv tmp = tcg_const_i32(shift);
1363 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1366 static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1368 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1369 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1370 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1373 static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1375 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1376 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1377 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1380 static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1382 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1383 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1384 if (mask != ~0u)
1385 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1388 static void gen_op_iwmmxt_set_mup(void)
1390 TCGv tmp;
1391 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1392 tcg_gen_ori_i32(tmp, tmp, 2);
1393 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1396 static void gen_op_iwmmxt_set_cup(void)
1398 TCGv tmp;
1399 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1400 tcg_gen_ori_i32(tmp, tmp, 1);
1401 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1404 static void gen_op_iwmmxt_setpsr_nz(void)
1406 TCGv tmp = new_tmp();
1407 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1408 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1411 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1413 iwmmxt_load_reg(cpu_V1, rn);
1414 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1415 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1419 static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1421 iwmmxt_load_reg(cpu_V0, rn);
1422 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1423 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1424 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1427 static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1429 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
1430 iwmmxt_store_reg(cpu_V0, rn);
1433 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1435 int rd;
1436 uint32_t offset;
1438 rd = (insn >> 16) & 0xf;
1439 gen_movl_T1_reg(s, rd);
1441 offset = (insn & 0xff) << ((insn >> 7) & 2);
1442 if (insn & (1 << 24)) {
1443 /* Pre indexed */
1444 if (insn & (1 << 23))
1445 gen_op_addl_T1_im(offset);
1446 else
1447 gen_op_addl_T1_im(-offset);
1449 if (insn & (1 << 21))
1450 gen_movl_reg_T1(s, rd);
1451 } else if (insn & (1 << 21)) {
1452 /* Post indexed */
1453 if (insn & (1 << 23))
1454 gen_op_movl_T0_im(offset);
1455 else
1456 gen_op_movl_T0_im(- offset);
1457 gen_op_addl_T0_T1();
1458 gen_movl_reg_T0(s, rd);
1459 } else if (!(insn & (1 << 23)))
1460 return 1;
1461 return 0;
1464 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1466 int rd = (insn >> 0) & 0xf;
1468 if (insn & (1 << 8))
1469 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1470 return 1;
1471 else
1472 gen_op_iwmmxt_movl_T0_wCx(rd);
1473 else
1474 gen_iwmmxt_movl_T0_T1_wRn(rd);
1476 gen_op_movl_T1_im(mask);
1477 gen_op_andl_T0_T1();
1478 return 0;
1481 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1482 (ie. an undefined instruction). */
1483 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1485 int rd, wrd;
1486 int rdhi, rdlo, rd0, rd1, i;
1487 TCGv tmp;
1489 if ((insn & 0x0e000e00) == 0x0c000000) {
1490 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1491 wrd = insn & 0xf;
1492 rdlo = (insn >> 12) & 0xf;
1493 rdhi = (insn >> 16) & 0xf;
1494 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1495 gen_iwmmxt_movl_T0_T1_wRn(wrd);
1496 gen_movl_reg_T0(s, rdlo);
1497 gen_movl_reg_T1(s, rdhi);
1498 } else { /* TMCRR */
1499 gen_movl_T0_reg(s, rdlo);
1500 gen_movl_T1_reg(s, rdhi);
1501 gen_iwmmxt_movl_wRn_T0_T1(wrd);
1502 gen_op_iwmmxt_set_mup();
1504 return 0;
1507 wrd = (insn >> 12) & 0xf;
1508 if (gen_iwmmxt_address(s, insn))
1509 return 1;
1510 if (insn & ARM_CP_RW_BIT) {
1511 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1512 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1513 tcg_gen_mov_i32(cpu_T[0], tmp);
1514 dead_tmp(tmp);
1515 gen_op_iwmmxt_movl_wCx_T0(wrd);
1516 } else {
1517 i = 1;
1518 if (insn & (1 << 8)) {
1519 if (insn & (1 << 22)) { /* WLDRD */
1520 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1521 i = 0;
1522 } else { /* WLDRW wRd */
1523 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1525 } else {
1526 if (insn & (1 << 22)) { /* WLDRH */
1527 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1528 } else { /* WLDRB */
1529 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1532 if (i) {
1533 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1534 dead_tmp(tmp);
1536 gen_op_iwmmxt_movq_wRn_M0(wrd);
1538 } else {
1539 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1540 gen_op_iwmmxt_movl_T0_wCx(wrd);
1541 tmp = new_tmp();
1542 tcg_gen_mov_i32(tmp, cpu_T[0]);
1543 gen_st32(tmp, cpu_T[1], IS_USER(s));
1544 } else {
1545 gen_op_iwmmxt_movq_M0_wRn(wrd);
1546 tmp = new_tmp();
1547 if (insn & (1 << 8)) {
1548 if (insn & (1 << 22)) { /* WSTRD */
1549 dead_tmp(tmp);
1550 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1551 } else { /* WSTRW wRd */
1552 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1553 gen_st32(tmp, cpu_T[1], IS_USER(s));
1555 } else {
1556 if (insn & (1 << 22)) { /* WSTRH */
1557 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1558 gen_st16(tmp, cpu_T[1], IS_USER(s));
1559 } else { /* WSTRB */
1560 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1561 gen_st8(tmp, cpu_T[1], IS_USER(s));
1566 return 0;
1569 if ((insn & 0x0f000000) != 0x0e000000)
1570 return 1;
1572 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1573 case 0x000: /* WOR */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 0) & 0xf;
1576 rd1 = (insn >> 16) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
1578 gen_op_iwmmxt_orq_M0_wRn(rd1);
1579 gen_op_iwmmxt_setpsr_nz();
1580 gen_op_iwmmxt_movq_wRn_M0(wrd);
1581 gen_op_iwmmxt_set_mup();
1582 gen_op_iwmmxt_set_cup();
1583 break;
1584 case 0x011: /* TMCR */
1585 if (insn & 0xf)
1586 return 1;
1587 rd = (insn >> 12) & 0xf;
1588 wrd = (insn >> 16) & 0xf;
1589 switch (wrd) {
1590 case ARM_IWMMXT_wCID:
1591 case ARM_IWMMXT_wCASF:
1592 break;
1593 case ARM_IWMMXT_wCon:
1594 gen_op_iwmmxt_set_cup();
1595 /* Fall through. */
1596 case ARM_IWMMXT_wCSSF:
1597 gen_op_iwmmxt_movl_T0_wCx(wrd);
1598 gen_movl_T1_reg(s, rd);
1599 gen_op_bicl_T0_T1();
1600 gen_op_iwmmxt_movl_wCx_T0(wrd);
1601 break;
1602 case ARM_IWMMXT_wCGR0:
1603 case ARM_IWMMXT_wCGR1:
1604 case ARM_IWMMXT_wCGR2:
1605 case ARM_IWMMXT_wCGR3:
1606 gen_op_iwmmxt_set_cup();
1607 gen_movl_reg_T0(s, rd);
1608 gen_op_iwmmxt_movl_wCx_T0(wrd);
1609 break;
1610 default:
1611 return 1;
1613 break;
1614 case 0x100: /* WXOR */
1615 wrd = (insn >> 12) & 0xf;
1616 rd0 = (insn >> 0) & 0xf;
1617 rd1 = (insn >> 16) & 0xf;
1618 gen_op_iwmmxt_movq_M0_wRn(rd0);
1619 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1620 gen_op_iwmmxt_setpsr_nz();
1621 gen_op_iwmmxt_movq_wRn_M0(wrd);
1622 gen_op_iwmmxt_set_mup();
1623 gen_op_iwmmxt_set_cup();
1624 break;
1625 case 0x111: /* TMRC */
1626 if (insn & 0xf)
1627 return 1;
1628 rd = (insn >> 12) & 0xf;
1629 wrd = (insn >> 16) & 0xf;
1630 gen_op_iwmmxt_movl_T0_wCx(wrd);
1631 gen_movl_reg_T0(s, rd);
1632 break;
1633 case 0x300: /* WANDN */
1634 wrd = (insn >> 12) & 0xf;
1635 rd0 = (insn >> 0) & 0xf;
1636 rd1 = (insn >> 16) & 0xf;
1637 gen_op_iwmmxt_movq_M0_wRn(rd0);
1638 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1639 gen_op_iwmmxt_andq_M0_wRn(rd1);
1640 gen_op_iwmmxt_setpsr_nz();
1641 gen_op_iwmmxt_movq_wRn_M0(wrd);
1642 gen_op_iwmmxt_set_mup();
1643 gen_op_iwmmxt_set_cup();
1644 break;
1645 case 0x200: /* WAND */
1646 wrd = (insn >> 12) & 0xf;
1647 rd0 = (insn >> 0) & 0xf;
1648 rd1 = (insn >> 16) & 0xf;
1649 gen_op_iwmmxt_movq_M0_wRn(rd0);
1650 gen_op_iwmmxt_andq_M0_wRn(rd1);
1651 gen_op_iwmmxt_setpsr_nz();
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1655 break;
1656 case 0x810: case 0xa10: /* WMADD */
1657 wrd = (insn >> 12) & 0xf;
1658 rd0 = (insn >> 0) & 0xf;
1659 rd1 = (insn >> 16) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0);
1661 if (insn & (1 << 21))
1662 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1663 else
1664 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1665 gen_op_iwmmxt_movq_wRn_M0(wrd);
1666 gen_op_iwmmxt_set_mup();
1667 break;
1668 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1669 wrd = (insn >> 12) & 0xf;
1670 rd0 = (insn >> 16) & 0xf;
1671 rd1 = (insn >> 0) & 0xf;
1672 gen_op_iwmmxt_movq_M0_wRn(rd0);
1673 switch ((insn >> 22) & 3) {
1674 case 0:
1675 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1676 break;
1677 case 1:
1678 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1679 break;
1680 case 2:
1681 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1682 break;
1683 case 3:
1684 return 1;
1686 gen_op_iwmmxt_movq_wRn_M0(wrd);
1687 gen_op_iwmmxt_set_mup();
1688 gen_op_iwmmxt_set_cup();
1689 break;
1690 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1691 wrd = (insn >> 12) & 0xf;
1692 rd0 = (insn >> 16) & 0xf;
1693 rd1 = (insn >> 0) & 0xf;
1694 gen_op_iwmmxt_movq_M0_wRn(rd0);
1695 switch ((insn >> 22) & 3) {
1696 case 0:
1697 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1698 break;
1699 case 1:
1700 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1701 break;
1702 case 2:
1703 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1704 break;
1705 case 3:
1706 return 1;
1708 gen_op_iwmmxt_movq_wRn_M0(wrd);
1709 gen_op_iwmmxt_set_mup();
1710 gen_op_iwmmxt_set_cup();
1711 break;
1712 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1713 wrd = (insn >> 12) & 0xf;
1714 rd0 = (insn >> 16) & 0xf;
1715 rd1 = (insn >> 0) & 0xf;
1716 gen_op_iwmmxt_movq_M0_wRn(rd0);
1717 if (insn & (1 << 22))
1718 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1719 else
1720 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1721 if (!(insn & (1 << 20)))
1722 gen_op_iwmmxt_addl_M0_wRn(wrd);
1723 gen_op_iwmmxt_movq_wRn_M0(wrd);
1724 gen_op_iwmmxt_set_mup();
1725 break;
1726 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1727 wrd = (insn >> 12) & 0xf;
1728 rd0 = (insn >> 16) & 0xf;
1729 rd1 = (insn >> 0) & 0xf;
1730 gen_op_iwmmxt_movq_M0_wRn(rd0);
1731 if (insn & (1 << 21)) {
1732 if (insn & (1 << 20))
1733 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1734 else
1735 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1736 } else {
1737 if (insn & (1 << 20))
1738 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1739 else
1740 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1742 gen_op_iwmmxt_movq_wRn_M0(wrd);
1743 gen_op_iwmmxt_set_mup();
1744 break;
1745 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1746 wrd = (insn >> 12) & 0xf;
1747 rd0 = (insn >> 16) & 0xf;
1748 rd1 = (insn >> 0) & 0xf;
1749 gen_op_iwmmxt_movq_M0_wRn(rd0);
1750 if (insn & (1 << 21))
1751 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1752 else
1753 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1754 if (!(insn & (1 << 20))) {
1755 iwmmxt_load_reg(cpu_V1, wrd);
1756 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1758 gen_op_iwmmxt_movq_wRn_M0(wrd);
1759 gen_op_iwmmxt_set_mup();
1760 break;
1761 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1762 wrd = (insn >> 12) & 0xf;
1763 rd0 = (insn >> 16) & 0xf;
1764 rd1 = (insn >> 0) & 0xf;
1765 gen_op_iwmmxt_movq_M0_wRn(rd0);
1766 switch ((insn >> 22) & 3) {
1767 case 0:
1768 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1769 break;
1770 case 1:
1771 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1772 break;
1773 case 2:
1774 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1775 break;
1776 case 3:
1777 return 1;
1779 gen_op_iwmmxt_movq_wRn_M0(wrd);
1780 gen_op_iwmmxt_set_mup();
1781 gen_op_iwmmxt_set_cup();
1782 break;
1783 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1784 wrd = (insn >> 12) & 0xf;
1785 rd0 = (insn >> 16) & 0xf;
1786 rd1 = (insn >> 0) & 0xf;
1787 gen_op_iwmmxt_movq_M0_wRn(rd0);
1788 if (insn & (1 << 22)) {
1789 if (insn & (1 << 20))
1790 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1791 else
1792 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1793 } else {
1794 if (insn & (1 << 20))
1795 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1796 else
1797 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1799 gen_op_iwmmxt_movq_wRn_M0(wrd);
1800 gen_op_iwmmxt_set_mup();
1801 gen_op_iwmmxt_set_cup();
1802 break;
1803 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1804 wrd = (insn >> 12) & 0xf;
1805 rd0 = (insn >> 16) & 0xf;
1806 rd1 = (insn >> 0) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1809 gen_op_movl_T1_im(7);
1810 gen_op_andl_T0_T1();
1811 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1812 gen_op_iwmmxt_movq_wRn_M0(wrd);
1813 gen_op_iwmmxt_set_mup();
1814 break;
1815 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1816 rd = (insn >> 12) & 0xf;
1817 wrd = (insn >> 16) & 0xf;
1818 gen_movl_T0_reg(s, rd);
1819 gen_op_iwmmxt_movq_M0_wRn(wrd);
1820 switch ((insn >> 6) & 3) {
1821 case 0:
1822 gen_op_movl_T1_im(0xff);
1823 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1824 break;
1825 case 1:
1826 gen_op_movl_T1_im(0xffff);
1827 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1828 break;
1829 case 2:
1830 gen_op_movl_T1_im(0xffffffff);
1831 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1832 break;
1833 case 3:
1834 return 1;
1836 gen_op_iwmmxt_movq_wRn_M0(wrd);
1837 gen_op_iwmmxt_set_mup();
1838 break;
1839 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1840 rd = (insn >> 12) & 0xf;
1841 wrd = (insn >> 16) & 0xf;
1842 if (rd == 15)
1843 return 1;
1844 gen_op_iwmmxt_movq_M0_wRn(wrd);
1845 switch ((insn >> 22) & 3) {
1846 case 0:
1847 if (insn & 8)
1848 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1849 else {
1850 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
1852 break;
1853 case 1:
1854 if (insn & 8)
1855 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1856 else {
1857 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
1859 break;
1860 case 2:
1861 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
1862 break;
1863 case 3:
1864 return 1;
1866 gen_movl_reg_T0(s, rd);
1867 break;
1868 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1869 if ((insn & 0x000ff008) != 0x0003f000)
1870 return 1;
1871 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1872 switch ((insn >> 22) & 3) {
1873 case 0:
1874 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1875 break;
1876 case 1:
1877 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1878 break;
1879 case 2:
1880 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1881 break;
1882 case 3:
1883 return 1;
1885 gen_op_shll_T1_im(28);
1886 gen_set_nzcv(cpu_T[1]);
1887 break;
1888 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1889 rd = (insn >> 12) & 0xf;
1890 wrd = (insn >> 16) & 0xf;
1891 gen_movl_T0_reg(s, rd);
1892 switch ((insn >> 6) & 3) {
1893 case 0:
1894 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
1895 break;
1896 case 1:
1897 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
1898 break;
1899 case 2:
1900 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
1901 break;
1902 case 3:
1903 return 1;
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 break;
1908 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1909 if ((insn & 0x000ff00f) != 0x0003f000)
1910 return 1;
1911 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1912 switch ((insn >> 22) & 3) {
1913 case 0:
1914 for (i = 0; i < 7; i ++) {
1915 gen_op_shll_T1_im(4);
1916 gen_op_andl_T0_T1();
1918 break;
1919 case 1:
1920 for (i = 0; i < 3; i ++) {
1921 gen_op_shll_T1_im(8);
1922 gen_op_andl_T0_T1();
1924 break;
1925 case 2:
1926 gen_op_shll_T1_im(16);
1927 gen_op_andl_T0_T1();
1928 break;
1929 case 3:
1930 return 1;
1932 gen_set_nzcv(cpu_T[0]);
1933 break;
1934 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1935 wrd = (insn >> 12) & 0xf;
1936 rd0 = (insn >> 16) & 0xf;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0);
1938 switch ((insn >> 22) & 3) {
1939 case 0:
1940 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1941 break;
1942 case 1:
1943 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1944 break;
1945 case 2:
1946 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1947 break;
1948 case 3:
1949 return 1;
1951 gen_op_iwmmxt_movq_wRn_M0(wrd);
1952 gen_op_iwmmxt_set_mup();
1953 break;
1954 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1955 if ((insn & 0x000ff00f) != 0x0003f000)
1956 return 1;
1957 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1958 switch ((insn >> 22) & 3) {
1959 case 0:
1960 for (i = 0; i < 7; i ++) {
1961 gen_op_shll_T1_im(4);
1962 gen_op_orl_T0_T1();
1964 break;
1965 case 1:
1966 for (i = 0; i < 3; i ++) {
1967 gen_op_shll_T1_im(8);
1968 gen_op_orl_T0_T1();
1970 break;
1971 case 2:
1972 gen_op_shll_T1_im(16);
1973 gen_op_orl_T0_T1();
1974 break;
1975 case 3:
1976 return 1;
1978 gen_set_nzcv(cpu_T[0]);
1979 break;
1980 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1981 rd = (insn >> 12) & 0xf;
1982 rd0 = (insn >> 16) & 0xf;
1983 if ((insn & 0xf) != 0)
1984 return 1;
1985 gen_op_iwmmxt_movq_M0_wRn(rd0);
1986 switch ((insn >> 22) & 3) {
1987 case 0:
1988 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
1989 break;
1990 case 1:
1991 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
1992 break;
1993 case 2:
1994 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
1995 break;
1996 case 3:
1997 return 1;
1999 gen_movl_reg_T0(s, rd);
2000 break;
2001 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2002 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 rd1 = (insn >> 0) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
2007 switch ((insn >> 22) & 3) {
2008 case 0:
2009 if (insn & (1 << 21))
2010 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2011 else
2012 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2013 break;
2014 case 1:
2015 if (insn & (1 << 21))
2016 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2017 else
2018 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2019 break;
2020 case 2:
2021 if (insn & (1 << 21))
2022 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2023 else
2024 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2025 break;
2026 case 3:
2027 return 1;
2029 gen_op_iwmmxt_movq_wRn_M0(wrd);
2030 gen_op_iwmmxt_set_mup();
2031 gen_op_iwmmxt_set_cup();
2032 break;
2033 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2034 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2035 wrd = (insn >> 12) & 0xf;
2036 rd0 = (insn >> 16) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
2038 switch ((insn >> 22) & 3) {
2039 case 0:
2040 if (insn & (1 << 21))
2041 gen_op_iwmmxt_unpacklsb_M0();
2042 else
2043 gen_op_iwmmxt_unpacklub_M0();
2044 break;
2045 case 1:
2046 if (insn & (1 << 21))
2047 gen_op_iwmmxt_unpacklsw_M0();
2048 else
2049 gen_op_iwmmxt_unpackluw_M0();
2050 break;
2051 case 2:
2052 if (insn & (1 << 21))
2053 gen_op_iwmmxt_unpacklsl_M0();
2054 else
2055 gen_op_iwmmxt_unpacklul_M0();
2056 break;
2057 case 3:
2058 return 1;
2060 gen_op_iwmmxt_movq_wRn_M0(wrd);
2061 gen_op_iwmmxt_set_mup();
2062 gen_op_iwmmxt_set_cup();
2063 break;
2064 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2065 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2066 wrd = (insn >> 12) & 0xf;
2067 rd0 = (insn >> 16) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0);
2069 switch ((insn >> 22) & 3) {
2070 case 0:
2071 if (insn & (1 << 21))
2072 gen_op_iwmmxt_unpackhsb_M0();
2073 else
2074 gen_op_iwmmxt_unpackhub_M0();
2075 break;
2076 case 1:
2077 if (insn & (1 << 21))
2078 gen_op_iwmmxt_unpackhsw_M0();
2079 else
2080 gen_op_iwmmxt_unpackhuw_M0();
2081 break;
2082 case 2:
2083 if (insn & (1 << 21))
2084 gen_op_iwmmxt_unpackhsl_M0();
2085 else
2086 gen_op_iwmmxt_unpackhul_M0();
2087 break;
2088 case 3:
2089 return 1;
2091 gen_op_iwmmxt_movq_wRn_M0(wrd);
2092 gen_op_iwmmxt_set_mup();
2093 gen_op_iwmmxt_set_cup();
2094 break;
2095 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2096 case 0x214: case 0x614: case 0xa14: case 0xe14:
2097 wrd = (insn >> 12) & 0xf;
2098 rd0 = (insn >> 16) & 0xf;
2099 gen_op_iwmmxt_movq_M0_wRn(rd0);
2100 if (gen_iwmmxt_shift(insn, 0xff))
2101 return 1;
2102 switch ((insn >> 22) & 3) {
2103 case 0:
2104 return 1;
2105 case 1:
2106 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2107 break;
2108 case 2:
2109 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2110 break;
2111 case 3:
2112 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2113 break;
2115 gen_op_iwmmxt_movq_wRn_M0(wrd);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2118 break;
2119 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2120 case 0x014: case 0x414: case 0x814: case 0xc14:
2121 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 16) & 0xf;
2123 gen_op_iwmmxt_movq_M0_wRn(rd0);
2124 if (gen_iwmmxt_shift(insn, 0xff))
2125 return 1;
2126 switch ((insn >> 22) & 3) {
2127 case 0:
2128 return 1;
2129 case 1:
2130 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2131 break;
2132 case 2:
2133 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2134 break;
2135 case 3:
2136 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2137 break;
2139 gen_op_iwmmxt_movq_wRn_M0(wrd);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2142 break;
2143 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2144 case 0x114: case 0x514: case 0x914: case 0xd14:
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 if (gen_iwmmxt_shift(insn, 0xff))
2149 return 1;
2150 switch ((insn >> 22) & 3) {
2151 case 0:
2152 return 1;
2153 case 1:
2154 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2155 break;
2156 case 2:
2157 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2158 break;
2159 case 3:
2160 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2161 break;
2163 gen_op_iwmmxt_movq_wRn_M0(wrd);
2164 gen_op_iwmmxt_set_mup();
2165 gen_op_iwmmxt_set_cup();
2166 break;
2167 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2168 case 0x314: case 0x714: case 0xb14: case 0xf14:
2169 wrd = (insn >> 12) & 0xf;
2170 rd0 = (insn >> 16) & 0xf;
2171 gen_op_iwmmxt_movq_M0_wRn(rd0);
2172 switch ((insn >> 22) & 3) {
2173 case 0:
2174 return 1;
2175 case 1:
2176 if (gen_iwmmxt_shift(insn, 0xf))
2177 return 1;
2178 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2179 break;
2180 case 2:
2181 if (gen_iwmmxt_shift(insn, 0x1f))
2182 return 1;
2183 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2184 break;
2185 case 3:
2186 if (gen_iwmmxt_shift(insn, 0x3f))
2187 return 1;
2188 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2189 break;
2191 gen_op_iwmmxt_movq_wRn_M0(wrd);
2192 gen_op_iwmmxt_set_mup();
2193 gen_op_iwmmxt_set_cup();
2194 break;
2195 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2196 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2197 wrd = (insn >> 12) & 0xf;
2198 rd0 = (insn >> 16) & 0xf;
2199 rd1 = (insn >> 0) & 0xf;
2200 gen_op_iwmmxt_movq_M0_wRn(rd0);
2201 switch ((insn >> 22) & 3) {
2202 case 0:
2203 if (insn & (1 << 21))
2204 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2205 else
2206 gen_op_iwmmxt_minub_M0_wRn(rd1);
2207 break;
2208 case 1:
2209 if (insn & (1 << 21))
2210 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2211 else
2212 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2213 break;
2214 case 2:
2215 if (insn & (1 << 21))
2216 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2217 else
2218 gen_op_iwmmxt_minul_M0_wRn(rd1);
2219 break;
2220 case 3:
2221 return 1;
2223 gen_op_iwmmxt_movq_wRn_M0(wrd);
2224 gen_op_iwmmxt_set_mup();
2225 break;
2226 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2227 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2228 wrd = (insn >> 12) & 0xf;
2229 rd0 = (insn >> 16) & 0xf;
2230 rd1 = (insn >> 0) & 0xf;
2231 gen_op_iwmmxt_movq_M0_wRn(rd0);
2232 switch ((insn >> 22) & 3) {
2233 case 0:
2234 if (insn & (1 << 21))
2235 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2236 else
2237 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2238 break;
2239 case 1:
2240 if (insn & (1 << 21))
2241 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2242 else
2243 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2244 break;
2245 case 2:
2246 if (insn & (1 << 21))
2247 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2248 else
2249 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2250 break;
2251 case 3:
2252 return 1;
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 break;
2257 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2258 case 0x402: case 0x502: case 0x602: case 0x702:
2259 wrd = (insn >> 12) & 0xf;
2260 rd0 = (insn >> 16) & 0xf;
2261 rd1 = (insn >> 0) & 0xf;
2262 gen_op_iwmmxt_movq_M0_wRn(rd0);
2263 gen_op_movl_T0_im((insn >> 20) & 3);
2264 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2265 gen_op_iwmmxt_movq_wRn_M0(wrd);
2266 gen_op_iwmmxt_set_mup();
2267 break;
2268 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2269 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2270 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2271 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
2276 switch ((insn >> 20) & 0xf) {
2277 case 0x0:
2278 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2279 break;
2280 case 0x1:
2281 gen_op_iwmmxt_subub_M0_wRn(rd1);
2282 break;
2283 case 0x3:
2284 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2285 break;
2286 case 0x4:
2287 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2288 break;
2289 case 0x5:
2290 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2291 break;
2292 case 0x7:
2293 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2294 break;
2295 case 0x8:
2296 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2297 break;
2298 case 0x9:
2299 gen_op_iwmmxt_subul_M0_wRn(rd1);
2300 break;
2301 case 0xb:
2302 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2303 break;
2304 default:
2305 return 1;
2307 gen_op_iwmmxt_movq_wRn_M0(wrd);
2308 gen_op_iwmmxt_set_mup();
2309 gen_op_iwmmxt_set_cup();
2310 break;
2311 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2312 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2313 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2314 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2315 wrd = (insn >> 12) & 0xf;
2316 rd0 = (insn >> 16) & 0xf;
2317 gen_op_iwmmxt_movq_M0_wRn(rd0);
2318 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
2319 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2320 gen_op_iwmmxt_movq_wRn_M0(wrd);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2323 break;
2324 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2325 case 0x418: case 0x518: case 0x618: case 0x718:
2326 case 0x818: case 0x918: case 0xa18: case 0xb18:
2327 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2328 wrd = (insn >> 12) & 0xf;
2329 rd0 = (insn >> 16) & 0xf;
2330 rd1 = (insn >> 0) & 0xf;
2331 gen_op_iwmmxt_movq_M0_wRn(rd0);
2332 switch ((insn >> 20) & 0xf) {
2333 case 0x0:
2334 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2335 break;
2336 case 0x1:
2337 gen_op_iwmmxt_addub_M0_wRn(rd1);
2338 break;
2339 case 0x3:
2340 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2341 break;
2342 case 0x4:
2343 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2344 break;
2345 case 0x5:
2346 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2347 break;
2348 case 0x7:
2349 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2350 break;
2351 case 0x8:
2352 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2353 break;
2354 case 0x9:
2355 gen_op_iwmmxt_addul_M0_wRn(rd1);
2356 break;
2357 case 0xb:
2358 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2359 break;
2360 default:
2361 return 1;
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2366 break;
2367 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2368 case 0x408: case 0x508: case 0x608: case 0x708:
2369 case 0x808: case 0x908: case 0xa08: case 0xb08:
2370 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2371 wrd = (insn >> 12) & 0xf;
2372 rd0 = (insn >> 16) & 0xf;
2373 rd1 = (insn >> 0) & 0xf;
2374 gen_op_iwmmxt_movq_M0_wRn(rd0);
2375 if (!(insn & (1 << 20)))
2376 return 1;
2377 switch ((insn >> 22) & 3) {
2378 case 0:
2379 return 1;
2380 case 1:
2381 if (insn & (1 << 21))
2382 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2383 else
2384 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2385 break;
2386 case 2:
2387 if (insn & (1 << 21))
2388 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2389 else
2390 gen_op_iwmmxt_packul_M0_wRn(rd1);
2391 break;
2392 case 3:
2393 if (insn & (1 << 21))
2394 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2395 else
2396 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2397 break;
2399 gen_op_iwmmxt_movq_wRn_M0(wrd);
2400 gen_op_iwmmxt_set_mup();
2401 gen_op_iwmmxt_set_cup();
2402 break;
2403 case 0x201: case 0x203: case 0x205: case 0x207:
2404 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2405 case 0x211: case 0x213: case 0x215: case 0x217:
2406 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2407 wrd = (insn >> 5) & 0xf;
2408 rd0 = (insn >> 12) & 0xf;
2409 rd1 = (insn >> 0) & 0xf;
2410 if (rd0 == 0xf || rd1 == 0xf)
2411 return 1;
2412 gen_op_iwmmxt_movq_M0_wRn(wrd);
2413 switch ((insn >> 16) & 0xf) {
2414 case 0x0: /* TMIA */
2415 gen_movl_T0_reg(s, rd0);
2416 gen_movl_T1_reg(s, rd1);
2417 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2418 break;
2419 case 0x8: /* TMIAPH */
2420 gen_movl_T0_reg(s, rd0);
2421 gen_movl_T1_reg(s, rd1);
2422 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2423 break;
2424 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2425 gen_movl_T1_reg(s, rd0);
2426 if (insn & (1 << 16))
2427 gen_op_shrl_T1_im(16);
2428 gen_op_movl_T0_T1();
2429 gen_movl_T1_reg(s, rd1);
2430 if (insn & (1 << 17))
2431 gen_op_shrl_T1_im(16);
2432 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2433 break;
2434 default:
2435 return 1;
2437 gen_op_iwmmxt_movq_wRn_M0(wrd);
2438 gen_op_iwmmxt_set_mup();
2439 break;
2440 default:
2441 return 1;
2444 return 0;
2447 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2448 (ie. an undefined instruction). */
2449 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2451 int acc, rd0, rd1, rdhi, rdlo;
2453 if ((insn & 0x0ff00f10) == 0x0e200010) {
2454 /* Multiply with Internal Accumulate Format */
2455 rd0 = (insn >> 12) & 0xf;
2456 rd1 = insn & 0xf;
2457 acc = (insn >> 5) & 7;
2459 if (acc != 0)
2460 return 1;
2462 switch ((insn >> 16) & 0xf) {
2463 case 0x0: /* MIA */
2464 gen_movl_T0_reg(s, rd0);
2465 gen_movl_T1_reg(s, rd1);
2466 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2467 break;
2468 case 0x8: /* MIAPH */
2469 gen_movl_T0_reg(s, rd0);
2470 gen_movl_T1_reg(s, rd1);
2471 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2472 break;
2473 case 0xc: /* MIABB */
2474 case 0xd: /* MIABT */
2475 case 0xe: /* MIATB */
2476 case 0xf: /* MIATT */
2477 gen_movl_T1_reg(s, rd0);
2478 if (insn & (1 << 16))
2479 gen_op_shrl_T1_im(16);
2480 gen_op_movl_T0_T1();
2481 gen_movl_T1_reg(s, rd1);
2482 if (insn & (1 << 17))
2483 gen_op_shrl_T1_im(16);
2484 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2485 break;
2486 default:
2487 return 1;
2490 gen_op_iwmmxt_movq_wRn_M0(acc);
2491 return 0;
2494 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2495 /* Internal Accumulator Access Format */
2496 rdhi = (insn >> 16) & 0xf;
2497 rdlo = (insn >> 12) & 0xf;
2498 acc = insn & 7;
2500 if (acc != 0)
2501 return 1;
2503 if (insn & ARM_CP_RW_BIT) { /* MRA */
2504 gen_iwmmxt_movl_T0_T1_wRn(acc);
2505 gen_movl_reg_T0(s, rdlo);
2506 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2507 gen_op_andl_T0_T1();
2508 gen_movl_reg_T0(s, rdhi);
2509 } else { /* MAR */
2510 gen_movl_T0_reg(s, rdlo);
2511 gen_movl_T1_reg(s, rdhi);
2512 gen_iwmmxt_movl_wRn_T0_T1(acc);
2514 return 0;
2517 return 1;
2520 /* Disassemble system coprocessor instruction. Return nonzero if
2521 instruction is not defined. */
2522 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2524 TCGv tmp;
2525 uint32_t rd = (insn >> 12) & 0xf;
2526 uint32_t cp = (insn >> 8) & 0xf;
2527 if (IS_USER(s)) {
2528 return 1;
2531 if (insn & ARM_CP_RW_BIT) {
2532 if (!env->cp[cp].cp_read)
2533 return 1;
2534 gen_set_pc_im(s->pc);
2535 tmp = new_tmp();
2536 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2537 store_reg(s, rd, tmp);
2538 } else {
2539 if (!env->cp[cp].cp_write)
2540 return 1;
2541 gen_set_pc_im(s->pc);
2542 tmp = load_reg(s, rd);
2543 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
2544 dead_tmp(tmp);
2546 return 0;
2549 static int cp15_user_ok(uint32_t insn)
2551 int cpn = (insn >> 16) & 0xf;
2552 int cpm = insn & 0xf;
2553 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2555 if (cpn == 13 && cpm == 0) {
2556 /* TLS register. */
2557 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2558 return 1;
2560 if (cpn == 7) {
2561 /* ISB, DSB, DMB. */
2562 if ((cpm == 5 && op == 4)
2563 || (cpm == 10 && (op == 4 || op == 5)))
2564 return 1;
2566 return 0;
2569 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2570 instruction is not defined. */
2571 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2573 uint32_t rd;
2574 TCGv tmp;
2576 /* M profile cores use memory mapped registers instead of cp15. */
2577 if (arm_feature(env, ARM_FEATURE_M))
2578 return 1;
2580 if ((insn & (1 << 25)) == 0) {
2581 if (insn & (1 << 20)) {
2582 /* mrrc */
2583 return 1;
2585 /* mcrr. Used for block cache operations, so implement as no-op. */
2586 return 0;
2588 if ((insn & (1 << 4)) == 0) {
2589 /* cdp */
2590 return 1;
2592 if (IS_USER(s) && !cp15_user_ok(insn)) {
2593 return 1;
2595 if ((insn & 0x0fff0fff) == 0x0e070f90
2596 || (insn & 0x0fff0fff) == 0x0e070f58) {
2597 /* Wait for interrupt. */
2598 gen_set_pc_im(s->pc);
2599 s->is_jmp = DISAS_WFI;
2600 return 0;
2602 rd = (insn >> 12) & 0xf;
2603 if (insn & ARM_CP_RW_BIT) {
2604 tmp = new_tmp();
2605 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
2606 /* If the destination register is r15 then sets condition codes. */
2607 if (rd != 15)
2608 store_reg(s, rd, tmp);
2609 else
2610 dead_tmp(tmp);
2611 } else {
2612 tmp = load_reg(s, rd);
2613 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2614 dead_tmp(tmp);
2615 /* Normally we would always end the TB here, but Linux
2616 * arch/arm/mach-pxa/sleep.S expects two instructions following
2617 * an MMU enable to execute from cache. Imitate this behaviour. */
2618 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2619 (insn & 0x0fff0fff) != 0x0e010f10)
2620 gen_lookup_tb(s);
2622 return 0;
2625 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2626 #define VFP_SREG(insn, bigbit, smallbit) \
2627 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2628 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2629 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2630 reg = (((insn) >> (bigbit)) & 0x0f) \
2631 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2632 } else { \
2633 if (insn & (1 << (smallbit))) \
2634 return 1; \
2635 reg = ((insn) >> (bigbit)) & 0x0f; \
2636 }} while (0)
2638 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2639 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2640 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2641 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2642 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2643 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2645 /* Move between integer and VFP cores. */
2646 static TCGv gen_vfp_mrs(void)
2648 TCGv tmp = new_tmp();
2649 tcg_gen_mov_i32(tmp, cpu_F0s);
2650 return tmp;
2653 static void gen_vfp_msr(TCGv tmp)
2655 tcg_gen_mov_i32(cpu_F0s, tmp);
2656 dead_tmp(tmp);
2659 static inline int
2660 vfp_enabled(CPUState * env)
2662 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2665 static void gen_neon_dup_u8(TCGv var, int shift)
2667 TCGv tmp = new_tmp();
2668 if (shift)
2669 tcg_gen_shri_i32(var, var, shift);
2670 tcg_gen_ext8u_i32(var, var);
2671 tcg_gen_shli_i32(tmp, var, 8);
2672 tcg_gen_or_i32(var, var, tmp);
2673 tcg_gen_shli_i32(tmp, var, 16);
2674 tcg_gen_or_i32(var, var, tmp);
2675 dead_tmp(tmp);
2678 static void gen_neon_dup_low16(TCGv var)
2680 TCGv tmp = new_tmp();
2681 tcg_gen_ext16u_i32(var, var);
2682 tcg_gen_shli_i32(tmp, var, 16);
2683 tcg_gen_or_i32(var, var, tmp);
2684 dead_tmp(tmp);
2687 static void gen_neon_dup_high16(TCGv var)
2689 TCGv tmp = new_tmp();
2690 tcg_gen_andi_i32(var, var, 0xffff0000);
2691 tcg_gen_shri_i32(tmp, var, 16);
2692 tcg_gen_or_i32(var, var, tmp);
2693 dead_tmp(tmp);
2696 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2697 (ie. an undefined instruction). */
2698 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2700 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2701 int dp, veclen;
2702 TCGv tmp;
2703 TCGv tmp2;
2705 if (!arm_feature(env, ARM_FEATURE_VFP))
2706 return 1;
2708 if (!vfp_enabled(env)) {
2709 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2710 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2711 return 1;
2712 rn = (insn >> 16) & 0xf;
2713 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2714 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2715 return 1;
2717 dp = ((insn & 0xf00) == 0xb00);
2718 switch ((insn >> 24) & 0xf) {
2719 case 0xe:
2720 if (insn & (1 << 4)) {
2721 /* single register transfer */
2722 rd = (insn >> 12) & 0xf;
2723 if (dp) {
2724 int size;
2725 int pass;
2727 VFP_DREG_N(rn, insn);
2728 if (insn & 0xf)
2729 return 1;
2730 if (insn & 0x00c00060
2731 && !arm_feature(env, ARM_FEATURE_NEON))
2732 return 1;
2734 pass = (insn >> 21) & 1;
2735 if (insn & (1 << 22)) {
2736 size = 0;
2737 offset = ((insn >> 5) & 3) * 8;
2738 } else if (insn & (1 << 5)) {
2739 size = 1;
2740 offset = (insn & (1 << 6)) ? 16 : 0;
2741 } else {
2742 size = 2;
2743 offset = 0;
2745 if (insn & ARM_CP_RW_BIT) {
2746 /* vfp->arm */
2747 tmp = neon_load_reg(rn, pass);
2748 switch (size) {
2749 case 0:
2750 if (offset)
2751 tcg_gen_shri_i32(tmp, tmp, offset);
2752 if (insn & (1 << 23))
2753 gen_uxtb(tmp);
2754 else
2755 gen_sxtb(tmp);
2756 break;
2757 case 1:
2758 if (insn & (1 << 23)) {
2759 if (offset) {
2760 tcg_gen_shri_i32(tmp, tmp, 16);
2761 } else {
2762 gen_uxth(tmp);
2764 } else {
2765 if (offset) {
2766 tcg_gen_sari_i32(tmp, tmp, 16);
2767 } else {
2768 gen_sxth(tmp);
2771 break;
2772 case 2:
2773 break;
2775 store_reg(s, rd, tmp);
2776 } else {
2777 /* arm->vfp */
2778 tmp = load_reg(s, rd);
2779 if (insn & (1 << 23)) {
2780 /* VDUP */
2781 if (size == 0) {
2782 gen_neon_dup_u8(tmp, 0);
2783 } else if (size == 1) {
2784 gen_neon_dup_low16(tmp);
2786 for (n = 0; n <= pass * 2; n++) {
2787 tmp2 = new_tmp();
2788 tcg_gen_mov_i32(tmp2, tmp);
2789 neon_store_reg(rn, n, tmp2);
2791 neon_store_reg(rn, n, tmp);
2792 } else {
2793 /* VMOV */
2794 switch (size) {
2795 case 0:
2796 tmp2 = neon_load_reg(rn, pass);
2797 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2798 dead_tmp(tmp2);
2799 break;
2800 case 1:
2801 tmp2 = neon_load_reg(rn, pass);
2802 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2803 dead_tmp(tmp2);
2804 break;
2805 case 2:
2806 break;
2808 neon_store_reg(rn, pass, tmp);
2811 } else { /* !dp */
2812 if ((insn & 0x6f) != 0x00)
2813 return 1;
2814 rn = VFP_SREG_N(insn);
2815 if (insn & ARM_CP_RW_BIT) {
2816 /* vfp->arm */
2817 if (insn & (1 << 21)) {
2818 /* system register */
2819 rn >>= 1;
2821 switch (rn) {
2822 case ARM_VFP_FPSID:
2823 /* VFP2 allows access to FSID from userspace.
2824 VFP3 restricts all id registers to privileged
2825 accesses. */
2826 if (IS_USER(s)
2827 && arm_feature(env, ARM_FEATURE_VFP3))
2828 return 1;
2829 tmp = load_cpu_field(vfp.xregs[rn]);
2830 break;
2831 case ARM_VFP_FPEXC:
2832 if (IS_USER(s))
2833 return 1;
2834 tmp = load_cpu_field(vfp.xregs[rn]);
2835 break;
2836 case ARM_VFP_FPINST:
2837 case ARM_VFP_FPINST2:
2838 /* Not present in VFP3. */
2839 if (IS_USER(s)
2840 || arm_feature(env, ARM_FEATURE_VFP3))
2841 return 1;
2842 tmp = load_cpu_field(vfp.xregs[rn]);
2843 break;
2844 case ARM_VFP_FPSCR:
2845 if (rd == 15) {
2846 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2847 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2848 } else {
2849 tmp = new_tmp();
2850 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2852 break;
2853 case ARM_VFP_MVFR0:
2854 case ARM_VFP_MVFR1:
2855 if (IS_USER(s)
2856 || !arm_feature(env, ARM_FEATURE_VFP3))
2857 return 1;
2858 tmp = load_cpu_field(vfp.xregs[rn]);
2859 break;
2860 default:
2861 return 1;
2863 } else {
2864 gen_mov_F0_vreg(0, rn);
2865 tmp = gen_vfp_mrs();
2867 if (rd == 15) {
2868 /* Set the 4 flag bits in the CPSR. */
2869 gen_set_nzcv(tmp);
2870 dead_tmp(tmp);
2871 } else {
2872 store_reg(s, rd, tmp);
2874 } else {
2875 /* arm->vfp */
2876 tmp = load_reg(s, rd);
2877 if (insn & (1 << 21)) {
2878 rn >>= 1;
2879 /* system register */
2880 switch (rn) {
2881 case ARM_VFP_FPSID:
2882 case ARM_VFP_MVFR0:
2883 case ARM_VFP_MVFR1:
2884 /* Writes are ignored. */
2885 break;
2886 case ARM_VFP_FPSCR:
2887 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2888 dead_tmp(tmp);
2889 gen_lookup_tb(s);
2890 break;
2891 case ARM_VFP_FPEXC:
2892 if (IS_USER(s))
2893 return 1;
2894 store_cpu_field(tmp, vfp.xregs[rn]);
2895 gen_lookup_tb(s);
2896 break;
2897 case ARM_VFP_FPINST:
2898 case ARM_VFP_FPINST2:
2899 store_cpu_field(tmp, vfp.xregs[rn]);
2900 break;
2901 default:
2902 return 1;
2904 } else {
2905 gen_vfp_msr(tmp);
2906 gen_mov_vreg_F0(0, rn);
2910 } else {
2911 /* data processing */
2912 /* The opcode is in bits 23, 21, 20 and 6. */
2913 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2914 if (dp) {
2915 if (op == 15) {
2916 /* rn is opcode */
2917 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2918 } else {
2919 /* rn is register number */
2920 VFP_DREG_N(rn, insn);
2923 if (op == 15 && (rn == 15 || rn > 17)) {
2924 /* Integer or single precision destination. */
2925 rd = VFP_SREG_D(insn);
2926 } else {
2927 VFP_DREG_D(rd, insn);
2930 if (op == 15 && (rn == 16 || rn == 17)) {
2931 /* Integer source. */
2932 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2933 } else {
2934 VFP_DREG_M(rm, insn);
2936 } else {
2937 rn = VFP_SREG_N(insn);
2938 if (op == 15 && rn == 15) {
2939 /* Double precision destination. */
2940 VFP_DREG_D(rd, insn);
2941 } else {
2942 rd = VFP_SREG_D(insn);
2944 rm = VFP_SREG_M(insn);
2947 veclen = env->vfp.vec_len;
2948 if (op == 15 && rn > 3)
2949 veclen = 0;
2951 /* Shut up compiler warnings. */
2952 delta_m = 0;
2953 delta_d = 0;
2954 bank_mask = 0;
2956 if (veclen > 0) {
2957 if (dp)
2958 bank_mask = 0xc;
2959 else
2960 bank_mask = 0x18;
2962 /* Figure out what type of vector operation this is. */
2963 if ((rd & bank_mask) == 0) {
2964 /* scalar */
2965 veclen = 0;
2966 } else {
2967 if (dp)
2968 delta_d = (env->vfp.vec_stride >> 1) + 1;
2969 else
2970 delta_d = env->vfp.vec_stride + 1;
2972 if ((rm & bank_mask) == 0) {
2973 /* mixed scalar/vector */
2974 delta_m = 0;
2975 } else {
2976 /* vector */
2977 delta_m = delta_d;
2982 /* Load the initial operands. */
2983 if (op == 15) {
2984 switch (rn) {
2985 case 16:
2986 case 17:
2987 /* Integer source */
2988 gen_mov_F0_vreg(0, rm);
2989 break;
2990 case 8:
2991 case 9:
2992 /* Compare */
2993 gen_mov_F0_vreg(dp, rd);
2994 gen_mov_F1_vreg(dp, rm);
2995 break;
2996 case 10:
2997 case 11:
2998 /* Compare with zero */
2999 gen_mov_F0_vreg(dp, rd);
3000 gen_vfp_F1_ld0(dp);
3001 break;
3002 case 20:
3003 case 21:
3004 case 22:
3005 case 23:
3006 case 28:
3007 case 29:
3008 case 30:
3009 case 31:
3010 /* Source and destination the same. */
3011 gen_mov_F0_vreg(dp, rd);
3012 break;
3013 default:
3014 /* One source operand. */
3015 gen_mov_F0_vreg(dp, rm);
3016 break;
3018 } else {
3019 /* Two source operands. */
3020 gen_mov_F0_vreg(dp, rn);
3021 gen_mov_F1_vreg(dp, rm);
3024 for (;;) {
3025 /* Perform the calculation. */
3026 switch (op) {
3027 case 0: /* mac: fd + (fn * fm) */
3028 gen_vfp_mul(dp);
3029 gen_mov_F1_vreg(dp, rd);
3030 gen_vfp_add(dp);
3031 break;
3032 case 1: /* nmac: fd - (fn * fm) */
3033 gen_vfp_mul(dp);
3034 gen_vfp_neg(dp);
3035 gen_mov_F1_vreg(dp, rd);
3036 gen_vfp_add(dp);
3037 break;
3038 case 2: /* msc: -fd + (fn * fm) */
3039 gen_vfp_mul(dp);
3040 gen_mov_F1_vreg(dp, rd);
3041 gen_vfp_sub(dp);
3042 break;
3043 case 3: /* nmsc: -fd - (fn * fm) */
3044 gen_vfp_mul(dp);
3045 gen_vfp_neg(dp);
3046 gen_mov_F1_vreg(dp, rd);
3047 gen_vfp_sub(dp);
3048 break;
3049 case 4: /* mul: fn * fm */
3050 gen_vfp_mul(dp);
3051 break;
3052 case 5: /* nmul: -(fn * fm) */
3053 gen_vfp_mul(dp);
3054 gen_vfp_neg(dp);
3055 break;
3056 case 6: /* add: fn + fm */
3057 gen_vfp_add(dp);
3058 break;
3059 case 7: /* sub: fn - fm */
3060 gen_vfp_sub(dp);
3061 break;
3062 case 8: /* div: fn / fm */
3063 gen_vfp_div(dp);
3064 break;
3065 case 14: /* fconst */
3066 if (!arm_feature(env, ARM_FEATURE_VFP3))
3067 return 1;
3069 n = (insn << 12) & 0x80000000;
3070 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3071 if (dp) {
3072 if (i & 0x40)
3073 i |= 0x3f80;
3074 else
3075 i |= 0x4000;
3076 n |= i << 16;
3077 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3078 } else {
3079 if (i & 0x40)
3080 i |= 0x780;
3081 else
3082 i |= 0x800;
3083 n |= i << 19;
3084 tcg_gen_movi_i32(cpu_F0s, n);
3086 break;
3087 case 15: /* extension space */
3088 switch (rn) {
3089 case 0: /* cpy */
3090 /* no-op */
3091 break;
3092 case 1: /* abs */
3093 gen_vfp_abs(dp);
3094 break;
3095 case 2: /* neg */
3096 gen_vfp_neg(dp);
3097 break;
3098 case 3: /* sqrt */
3099 gen_vfp_sqrt(dp);
3100 break;
3101 case 8: /* cmp */
3102 gen_vfp_cmp(dp);
3103 break;
3104 case 9: /* cmpe */
3105 gen_vfp_cmpe(dp);
3106 break;
3107 case 10: /* cmpz */
3108 gen_vfp_cmp(dp);
3109 break;
3110 case 11: /* cmpez */
3111 gen_vfp_F1_ld0(dp);
3112 gen_vfp_cmpe(dp);
3113 break;
3114 case 15: /* single<->double conversion */
3115 if (dp)
3116 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3117 else
3118 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3119 break;
3120 case 16: /* fuito */
3121 gen_vfp_uito(dp);
3122 break;
3123 case 17: /* fsito */
3124 gen_vfp_sito(dp);
3125 break;
3126 case 20: /* fshto */
3127 if (!arm_feature(env, ARM_FEATURE_VFP3))
3128 return 1;
3129 gen_vfp_shto(dp, 16 - rm);
3130 break;
3131 case 21: /* fslto */
3132 if (!arm_feature(env, ARM_FEATURE_VFP3))
3133 return 1;
3134 gen_vfp_slto(dp, 32 - rm);
3135 break;
3136 case 22: /* fuhto */
3137 if (!arm_feature(env, ARM_FEATURE_VFP3))
3138 return 1;
3139 gen_vfp_uhto(dp, 16 - rm);
3140 break;
3141 case 23: /* fulto */
3142 if (!arm_feature(env, ARM_FEATURE_VFP3))
3143 return 1;
3144 gen_vfp_ulto(dp, 32 - rm);
3145 break;
3146 case 24: /* ftoui */
3147 gen_vfp_toui(dp);
3148 break;
3149 case 25: /* ftouiz */
3150 gen_vfp_touiz(dp);
3151 break;
3152 case 26: /* ftosi */
3153 gen_vfp_tosi(dp);
3154 break;
3155 case 27: /* ftosiz */
3156 gen_vfp_tosiz(dp);
3157 break;
3158 case 28: /* ftosh */
3159 if (!arm_feature(env, ARM_FEATURE_VFP3))
3160 return 1;
3161 gen_vfp_tosh(dp, 16 - rm);
3162 break;
3163 case 29: /* ftosl */
3164 if (!arm_feature(env, ARM_FEATURE_VFP3))
3165 return 1;
3166 gen_vfp_tosl(dp, 32 - rm);
3167 break;
3168 case 30: /* ftouh */
3169 if (!arm_feature(env, ARM_FEATURE_VFP3))
3170 return 1;
3171 gen_vfp_touh(dp, 16 - rm);
3172 break;
3173 case 31: /* ftoul */
3174 if (!arm_feature(env, ARM_FEATURE_VFP3))
3175 return 1;
3176 gen_vfp_toul(dp, 32 - rm);
3177 break;
3178 default: /* undefined */
3179 printf ("rn:%d\n", rn);
3180 return 1;
3182 break;
3183 default: /* undefined */
3184 printf ("op:%d\n", op);
3185 return 1;
3188 /* Write back the result. */
3189 if (op == 15 && (rn >= 8 && rn <= 11))
3190 ; /* Comparison, do nothing. */
3191 else if (op == 15 && rn > 17)
3192 /* Integer result. */
3193 gen_mov_vreg_F0(0, rd);
3194 else if (op == 15 && rn == 15)
3195 /* conversion */
3196 gen_mov_vreg_F0(!dp, rd);
3197 else
3198 gen_mov_vreg_F0(dp, rd);
3200 /* break out of the loop if we have finished */
3201 if (veclen == 0)
3202 break;
3204 if (op == 15 && delta_m == 0) {
3205 /* single source one-many */
3206 while (veclen--) {
3207 rd = ((rd + delta_d) & (bank_mask - 1))
3208 | (rd & bank_mask);
3209 gen_mov_vreg_F0(dp, rd);
3211 break;
3213 /* Setup the next operands. */
3214 veclen--;
3215 rd = ((rd + delta_d) & (bank_mask - 1))
3216 | (rd & bank_mask);
3218 if (op == 15) {
3219 /* One source operand. */
3220 rm = ((rm + delta_m) & (bank_mask - 1))
3221 | (rm & bank_mask);
3222 gen_mov_F0_vreg(dp, rm);
3223 } else {
3224 /* Two source operands. */
3225 rn = ((rn + delta_d) & (bank_mask - 1))
3226 | (rn & bank_mask);
3227 gen_mov_F0_vreg(dp, rn);
3228 if (delta_m) {
3229 rm = ((rm + delta_m) & (bank_mask - 1))
3230 | (rm & bank_mask);
3231 gen_mov_F1_vreg(dp, rm);
3236 break;
3237 case 0xc:
3238 case 0xd:
3239 if (dp && (insn & 0x03e00000) == 0x00400000) {
3240 /* two-register transfer */
3241 rn = (insn >> 16) & 0xf;
3242 rd = (insn >> 12) & 0xf;
3243 if (dp) {
3244 VFP_DREG_M(rm, insn);
3245 } else {
3246 rm = VFP_SREG_M(insn);
3249 if (insn & ARM_CP_RW_BIT) {
3250 /* vfp->arm */
3251 if (dp) {
3252 gen_mov_F0_vreg(0, rm * 2);
3253 tmp = gen_vfp_mrs();
3254 store_reg(s, rd, tmp);
3255 gen_mov_F0_vreg(0, rm * 2 + 1);
3256 tmp = gen_vfp_mrs();
3257 store_reg(s, rn, tmp);
3258 } else {
3259 gen_mov_F0_vreg(0, rm);
3260 tmp = gen_vfp_mrs();
3261 store_reg(s, rn, tmp);
3262 gen_mov_F0_vreg(0, rm + 1);
3263 tmp = gen_vfp_mrs();
3264 store_reg(s, rd, tmp);
3266 } else {
3267 /* arm->vfp */
3268 if (dp) {
3269 tmp = load_reg(s, rd);
3270 gen_vfp_msr(tmp);
3271 gen_mov_vreg_F0(0, rm * 2);
3272 tmp = load_reg(s, rn);
3273 gen_vfp_msr(tmp);
3274 gen_mov_vreg_F0(0, rm * 2 + 1);
3275 } else {
3276 tmp = load_reg(s, rn);
3277 gen_vfp_msr(tmp);
3278 gen_mov_vreg_F0(0, rm);
3279 tmp = load_reg(s, rd);
3280 gen_vfp_msr(tmp);
3281 gen_mov_vreg_F0(0, rm + 1);
3284 } else {
3285 /* Load/store */
3286 rn = (insn >> 16) & 0xf;
3287 if (dp)
3288 VFP_DREG_D(rd, insn);
3289 else
3290 rd = VFP_SREG_D(insn);
3291 if (s->thumb && rn == 15) {
3292 gen_op_movl_T1_im(s->pc & ~2);
3293 } else {
3294 gen_movl_T1_reg(s, rn);
3296 if ((insn & 0x01200000) == 0x01000000) {
3297 /* Single load/store */
3298 offset = (insn & 0xff) << 2;
3299 if ((insn & (1 << 23)) == 0)
3300 offset = -offset;
3301 gen_op_addl_T1_im(offset);
3302 if (insn & (1 << 20)) {
3303 gen_vfp_ld(s, dp);
3304 gen_mov_vreg_F0(dp, rd);
3305 } else {
3306 gen_mov_F0_vreg(dp, rd);
3307 gen_vfp_st(s, dp);
3309 } else {
3310 /* load/store multiple */
3311 if (dp)
3312 n = (insn >> 1) & 0x7f;
3313 else
3314 n = insn & 0xff;
3316 if (insn & (1 << 24)) /* pre-decrement */
3317 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3319 if (dp)
3320 offset = 8;
3321 else
3322 offset = 4;
3323 for (i = 0; i < n; i++) {
3324 if (insn & ARM_CP_RW_BIT) {
3325 /* load */
3326 gen_vfp_ld(s, dp);
3327 gen_mov_vreg_F0(dp, rd + i);
3328 } else {
3329 /* store */
3330 gen_mov_F0_vreg(dp, rd + i);
3331 gen_vfp_st(s, dp);
3333 gen_op_addl_T1_im(offset);
3335 if (insn & (1 << 21)) {
3336 /* writeback */
3337 if (insn & (1 << 24))
3338 offset = -offset * n;
3339 else if (dp && (insn & 1))
3340 offset = 4;
3341 else
3342 offset = 0;
3344 if (offset != 0)
3345 gen_op_addl_T1_im(offset);
3346 gen_movl_reg_T1(s, rn);
3350 break;
3351 default:
3352 /* Should never happen. */
3353 return 1;
3355 return 0;
3358 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3360 TranslationBlock *tb;
3362 tb = s->tb;
3363 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3364 tcg_gen_goto_tb(n);
3365 gen_set_pc_im(dest);
3366 tcg_gen_exit_tb((long)tb + n);
3367 } else {
3368 gen_set_pc_im(dest);
3369 tcg_gen_exit_tb(0);
3373 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3375 if (unlikely(s->singlestep_enabled)) {
3376 /* An indirect jump so that we still trigger the debug exception. */
3377 if (s->thumb)
3378 dest |= 1;
3379 gen_bx_im(s, dest);
3380 } else {
3381 gen_goto_tb(s, 0, dest);
3382 s->is_jmp = DISAS_TB_JUMP;
3386 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3388 if (x)
3389 tcg_gen_sari_i32(t0, t0, 16);
3390 else
3391 gen_sxth(t0);
3392 if (y)
3393 tcg_gen_sari_i32(t1, t1, 16);
3394 else
3395 gen_sxth(t1);
3396 tcg_gen_mul_i32(t0, t0, t1);
3399 /* Return the mask of PSR bits set by a MSR instruction. */
3400 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3401 uint32_t mask;
3403 mask = 0;
3404 if (flags & (1 << 0))
3405 mask |= 0xff;
3406 if (flags & (1 << 1))
3407 mask |= 0xff00;
3408 if (flags & (1 << 2))
3409 mask |= 0xff0000;
3410 if (flags & (1 << 3))
3411 mask |= 0xff000000;
3413 /* Mask out undefined bits. */
3414 mask &= ~CPSR_RESERVED;
3415 if (!arm_feature(env, ARM_FEATURE_V6))
3416 mask &= ~(CPSR_E | CPSR_GE);
3417 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3418 mask &= ~CPSR_IT;
3419 /* Mask out execution state bits. */
3420 if (!spsr)
3421 mask &= ~CPSR_EXEC;
3422 /* Mask out privileged bits. */
3423 if (IS_USER(s))
3424 mask &= CPSR_USER;
3425 return mask;
3428 /* Returns nonzero if access to the PSR is not permitted. */
3429 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3431 TCGv tmp;
3432 if (spsr) {
3433 /* ??? This is also undefined in system mode. */
3434 if (IS_USER(s))
3435 return 1;
3437 tmp = load_cpu_field(spsr);
3438 tcg_gen_andi_i32(tmp, tmp, ~mask);
3439 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3440 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3441 store_cpu_field(tmp, spsr);
3442 } else {
3443 gen_set_cpsr(cpu_T[0], mask);
3445 gen_lookup_tb(s);
3446 return 0;
3449 /* Generate an old-style exception return. */
3450 static void gen_exception_return(DisasContext *s)
3452 TCGv tmp;
3453 gen_movl_reg_T0(s, 15);
3454 tmp = load_cpu_field(spsr);
3455 gen_set_cpsr(tmp, 0xffffffff);
3456 dead_tmp(tmp);
3457 s->is_jmp = DISAS_UPDATE;
3460 /* Generate a v6 exception return. Marks both values as dead. */
3461 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3463 gen_set_cpsr(cpsr, 0xffffffff);
3464 dead_tmp(cpsr);
3465 store_reg(s, 15, pc);
3466 s->is_jmp = DISAS_UPDATE;
3469 static inline void
3470 gen_set_condexec (DisasContext *s)
3472 if (s->condexec_mask) {
3473 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3474 TCGv tmp = new_tmp();
3475 tcg_gen_movi_i32(tmp, val);
3476 store_cpu_field(tmp, condexec_bits);
3480 static void gen_nop_hint(DisasContext *s, int val)
3482 switch (val) {
3483 case 3: /* wfi */
3484 gen_set_pc_im(s->pc);
3485 s->is_jmp = DISAS_WFI;
3486 break;
3487 case 2: /* wfe */
3488 case 4: /* sev */
3489 /* TODO: Implement SEV and WFE. May help SMP performance. */
3490 default: /* nop */
3491 break;
3495 /* These macros help make the code more readable when migrating from the
3496 old dyngen helpers. They should probably be removed when
3497 T0/T1 are removed. */
3498 #define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3499 #define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
3501 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3503 static inline int gen_neon_add(int size)
3505 switch (size) {
3506 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3507 case 1: gen_helper_neon_add_u16(CPU_T001); break;
3508 case 2: gen_op_addl_T0_T1(); break;
3509 default: return 1;
3511 return 0;
3514 static inline void gen_neon_rsb(int size)
3516 switch (size) {
3517 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3518 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3519 case 2: gen_op_rsbl_T0_T1(); break;
3520 default: return;
3524 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3525 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3526 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3527 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3528 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3530 /* FIXME: This is wrong. They set the wrong overflow bit. */
3531 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3532 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3533 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3534 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3536 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3537 switch ((size << 1) | u) { \
3538 case 0: \
3539 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3540 break; \
3541 case 1: \
3542 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3543 break; \
3544 case 2: \
3545 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3546 break; \
3547 case 3: \
3548 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3549 break; \
3550 case 4: \
3551 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3552 break; \
3553 case 5: \
3554 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3555 break; \
3556 default: return 1; \
3557 }} while (0)
3559 #define GEN_NEON_INTEGER_OP(name) do { \
3560 switch ((size << 1) | u) { \
3561 case 0: \
3562 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3563 break; \
3564 case 1: \
3565 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3566 break; \
3567 case 2: \
3568 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3569 break; \
3570 case 3: \
3571 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3572 break; \
3573 case 4: \
3574 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3575 break; \
3576 case 5: \
3577 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3578 break; \
3579 default: return 1; \
3580 }} while (0)
3582 static inline void
3583 gen_neon_movl_scratch_T0(int scratch)
3585 uint32_t offset;
3587 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3588 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
3591 static inline void
3592 gen_neon_movl_scratch_T1(int scratch)
3594 uint32_t offset;
3596 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3597 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
3600 static inline void
3601 gen_neon_movl_T0_scratch(int scratch)
3603 uint32_t offset;
3605 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3606 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
3609 static inline void
3610 gen_neon_movl_T1_scratch(int scratch)
3612 uint32_t offset;
3614 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3615 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
3618 static inline void gen_neon_get_scalar(int size, int reg)
3620 if (size == 1) {
3621 NEON_GET_REG(T0, reg >> 1, reg & 1);
3622 } else {
3623 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3624 if (reg & 1)
3625 gen_neon_dup_low16(cpu_T[0]);
3626 else
3627 gen_neon_dup_high16(cpu_T[0]);
3631 static void gen_neon_unzip(int reg, int q, int tmp, int size)
3633 int n;
3635 for (n = 0; n < q + 1; n += 2) {
3636 NEON_GET_REG(T0, reg, n);
3637 NEON_GET_REG(T0, reg, n + n);
3638 switch (size) {
3639 case 0: gen_helper_neon_unzip_u8(); break;
3640 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
3641 case 2: /* no-op */; break;
3642 default: abort();
3644 gen_neon_movl_scratch_T0(tmp + n);
3645 gen_neon_movl_scratch_T1(tmp + n + 1);
3649 static struct {
3650 int nregs;
3651 int interleave;
3652 int spacing;
3653 } neon_ls_element_type[11] = {
3654 {4, 4, 1},
3655 {4, 4, 2},
3656 {4, 1, 1},
3657 {4, 2, 1},
3658 {3, 3, 1},
3659 {3, 3, 2},
3660 {3, 1, 1},
3661 {1, 1, 1},
3662 {2, 2, 1},
3663 {2, 2, 2},
3664 {2, 1, 1}
3667 /* Translate a NEON load/store element instruction. Return nonzero if the
3668 instruction is invalid. */
3669 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3671 int rd, rn, rm;
3672 int op;
3673 int nregs;
3674 int interleave;
3675 int stride;
3676 int size;
3677 int reg;
3678 int pass;
3679 int load;
3680 int shift;
3681 int n;
3682 TCGv tmp;
3683 TCGv tmp2;
3685 if (!vfp_enabled(env))
3686 return 1;
3687 VFP_DREG_D(rd, insn);
3688 rn = (insn >> 16) & 0xf;
3689 rm = insn & 0xf;
3690 load = (insn & (1 << 21)) != 0;
3691 if ((insn & (1 << 23)) == 0) {
3692 /* Load store all elements. */
3693 op = (insn >> 8) & 0xf;
3694 size = (insn >> 6) & 3;
3695 if (op > 10 || size == 3)
3696 return 1;
3697 nregs = neon_ls_element_type[op].nregs;
3698 interleave = neon_ls_element_type[op].interleave;
3699 gen_movl_T1_reg(s, rn);
3700 stride = (1 << size) * interleave;
3701 for (reg = 0; reg < nregs; reg++) {
3702 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3703 gen_movl_T1_reg(s, rn);
3704 gen_op_addl_T1_im((1 << size) * reg);
3705 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3706 gen_movl_T1_reg(s, rn);
3707 gen_op_addl_T1_im(1 << size);
3709 for (pass = 0; pass < 2; pass++) {
3710 if (size == 2) {
3711 if (load) {
3712 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3713 neon_store_reg(rd, pass, tmp);
3714 } else {
3715 tmp = neon_load_reg(rd, pass);
3716 gen_st32(tmp, cpu_T[1], IS_USER(s));
3718 gen_op_addl_T1_im(stride);
3719 } else if (size == 1) {
3720 if (load) {
3721 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3722 gen_op_addl_T1_im(stride);
3723 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
3724 gen_op_addl_T1_im(stride);
3725 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3726 dead_tmp(tmp2);
3727 neon_store_reg(rd, pass, tmp);
3728 } else {
3729 tmp = neon_load_reg(rd, pass);
3730 tmp2 = new_tmp();
3731 tcg_gen_shri_i32(tmp2, tmp, 16);
3732 gen_st16(tmp, cpu_T[1], IS_USER(s));
3733 gen_op_addl_T1_im(stride);
3734 gen_st16(tmp2, cpu_T[1], IS_USER(s));
3735 gen_op_addl_T1_im(stride);
3737 } else /* size == 0 */ {
3738 if (load) {
3739 TCGV_UNUSED(tmp2);
3740 for (n = 0; n < 4; n++) {
3741 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3742 gen_op_addl_T1_im(stride);
3743 if (n == 0) {
3744 tmp2 = tmp;
3745 } else {
3746 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3747 dead_tmp(tmp);
3750 neon_store_reg(rd, pass, tmp2);
3751 } else {
3752 tmp2 = neon_load_reg(rd, pass);
3753 for (n = 0; n < 4; n++) {
3754 tmp = new_tmp();
3755 if (n == 0) {
3756 tcg_gen_mov_i32(tmp, tmp2);
3757 } else {
3758 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3760 gen_st8(tmp, cpu_T[1], IS_USER(s));
3761 gen_op_addl_T1_im(stride);
3763 dead_tmp(tmp2);
3767 rd += neon_ls_element_type[op].spacing;
3769 stride = nregs * 8;
3770 } else {
3771 size = (insn >> 10) & 3;
3772 if (size == 3) {
3773 /* Load single element to all lanes. */
3774 if (!load)
3775 return 1;
3776 size = (insn >> 6) & 3;
3777 nregs = ((insn >> 8) & 3) + 1;
3778 stride = (insn & (1 << 5)) ? 2 : 1;
3779 gen_movl_T1_reg(s, rn);
3780 for (reg = 0; reg < nregs; reg++) {
3781 switch (size) {
3782 case 0:
3783 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3784 gen_neon_dup_u8(tmp, 0);
3785 break;
3786 case 1:
3787 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3788 gen_neon_dup_low16(tmp);
3789 break;
3790 case 2:
3791 tmp = gen_ld32(cpu_T[0], IS_USER(s));
3792 break;
3793 case 3:
3794 return 1;
3795 default: /* Avoid compiler warnings. */
3796 abort();
3798 gen_op_addl_T1_im(1 << size);
3799 tmp2 = new_tmp();
3800 tcg_gen_mov_i32(tmp2, tmp);
3801 neon_store_reg(rd, 0, tmp2);
3802 neon_store_reg(rd, 1, tmp);
3803 rd += stride;
3805 stride = (1 << size) * nregs;
3806 } else {
3807 /* Single element. */
3808 pass = (insn >> 7) & 1;
3809 switch (size) {
3810 case 0:
3811 shift = ((insn >> 5) & 3) * 8;
3812 stride = 1;
3813 break;
3814 case 1:
3815 shift = ((insn >> 6) & 1) * 16;
3816 stride = (insn & (1 << 5)) ? 2 : 1;
3817 break;
3818 case 2:
3819 shift = 0;
3820 stride = (insn & (1 << 6)) ? 2 : 1;
3821 break;
3822 default:
3823 abort();
3825 nregs = ((insn >> 8) & 3) + 1;
3826 gen_movl_T1_reg(s, rn);
3827 for (reg = 0; reg < nregs; reg++) {
3828 if (load) {
3829 switch (size) {
3830 case 0:
3831 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3832 break;
3833 case 1:
3834 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3835 break;
3836 case 2:
3837 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3838 break;
3839 default: /* Avoid compiler warnings. */
3840 abort();
3842 if (size != 2) {
3843 tmp2 = neon_load_reg(rd, pass);
3844 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3845 dead_tmp(tmp2);
3847 neon_store_reg(rd, pass, tmp);
3848 } else { /* Store */
3849 tmp = neon_load_reg(rd, pass);
3850 if (shift)
3851 tcg_gen_shri_i32(tmp, tmp, shift);
3852 switch (size) {
3853 case 0:
3854 gen_st8(tmp, cpu_T[1], IS_USER(s));
3855 break;
3856 case 1:
3857 gen_st16(tmp, cpu_T[1], IS_USER(s));
3858 break;
3859 case 2:
3860 gen_st32(tmp, cpu_T[1], IS_USER(s));
3861 break;
3864 rd += stride;
3865 gen_op_addl_T1_im(1 << size);
3867 stride = nregs * (1 << size);
3870 if (rm != 15) {
3871 TCGv base;
3873 base = load_reg(s, rn);
3874 if (rm == 13) {
3875 tcg_gen_addi_i32(base, base, stride);
3876 } else {
3877 TCGv index;
3878 index = load_reg(s, rm);
3879 tcg_gen_add_i32(base, base, index);
3880 dead_tmp(index);
3882 store_reg(s, rn, base);
3884 return 0;
3887 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3888 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3890 tcg_gen_and_i32(t, t, c);
3891 tcg_gen_bic_i32(f, f, c);
3892 tcg_gen_or_i32(dest, t, f);
3895 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
3897 switch (size) {
3898 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3899 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3900 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3901 default: abort();
3905 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
3907 switch (size) {
3908 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3909 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3910 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3911 default: abort();
3915 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
3917 switch (size) {
3918 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3919 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3920 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3921 default: abort();
3925 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3926 int q, int u)
3928 if (q) {
3929 if (u) {
3930 switch (size) {
3931 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3932 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3933 default: abort();
3935 } else {
3936 switch (size) {
3937 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3938 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3939 default: abort();
3942 } else {
3943 if (u) {
3944 switch (size) {
3945 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3946 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3947 default: abort();
3949 } else {
3950 switch (size) {
3951 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3952 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3953 default: abort();
3959 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
3961 if (u) {
3962 switch (size) {
3963 case 0: gen_helper_neon_widen_u8(dest, src); break;
3964 case 1: gen_helper_neon_widen_u16(dest, src); break;
3965 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3966 default: abort();
3968 } else {
3969 switch (size) {
3970 case 0: gen_helper_neon_widen_s8(dest, src); break;
3971 case 1: gen_helper_neon_widen_s16(dest, src); break;
3972 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3973 default: abort();
3976 dead_tmp(src);
3979 static inline void gen_neon_addl(int size)
3981 switch (size) {
3982 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3983 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3984 case 2: tcg_gen_add_i64(CPU_V001); break;
3985 default: abort();
3989 static inline void gen_neon_subl(int size)
3991 switch (size) {
3992 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3993 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3994 case 2: tcg_gen_sub_i64(CPU_V001); break;
3995 default: abort();
3999 static inline void gen_neon_negl(TCGv_i64 var, int size)
4001 switch (size) {
4002 case 0: gen_helper_neon_negl_u16(var, var); break;
4003 case 1: gen_helper_neon_negl_u32(var, var); break;
4004 case 2: gen_helper_neon_negl_u64(var, var); break;
4005 default: abort();
4009 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4011 switch (size) {
4012 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4013 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4014 default: abort();
4018 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4020 TCGv_i64 tmp;
4022 switch ((size << 1) | u) {
4023 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4024 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4025 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4026 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4027 case 4:
4028 tmp = gen_muls_i64_i32(a, b);
4029 tcg_gen_mov_i64(dest, tmp);
4030 break;
4031 case 5:
4032 tmp = gen_mulu_i64_i32(a, b);
4033 tcg_gen_mov_i64(dest, tmp);
4034 break;
4035 default: abort();
4037 if (size < 2) {
4038 dead_tmp(b);
4039 dead_tmp(a);
4043 /* Translate a NEON data processing instruction. Return nonzero if the
4044 instruction is invalid.
4045 We process data in a mixture of 32-bit and 64-bit chunks.
4046 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4048 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4050 int op;
4051 int q;
4052 int rd, rn, rm;
4053 int size;
4054 int shift;
4055 int pass;
4056 int count;
4057 int pairwise;
4058 int u;
4059 int n;
4060 uint32_t imm;
4061 TCGv tmp;
4062 TCGv tmp2;
4063 TCGv tmp3;
4064 TCGv_i64 tmp64;
4066 if (!vfp_enabled(env))
4067 return 1;
4068 q = (insn & (1 << 6)) != 0;
4069 u = (insn >> 24) & 1;
4070 VFP_DREG_D(rd, insn);
4071 VFP_DREG_N(rn, insn);
4072 VFP_DREG_M(rm, insn);
4073 size = (insn >> 20) & 3;
4074 if ((insn & (1 << 23)) == 0) {
4075 /* Three register same length. */
4076 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4077 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4078 || op == 10 || op == 11 || op == 16)) {
4079 /* 64-bit element instructions. */
4080 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4081 neon_load_reg64(cpu_V0, rn + pass);
4082 neon_load_reg64(cpu_V1, rm + pass);
4083 switch (op) {
4084 case 1: /* VQADD */
4085 if (u) {
4086 gen_helper_neon_add_saturate_u64(CPU_V001);
4087 } else {
4088 gen_helper_neon_add_saturate_s64(CPU_V001);
4090 break;
4091 case 5: /* VQSUB */
4092 if (u) {
4093 gen_helper_neon_sub_saturate_u64(CPU_V001);
4094 } else {
4095 gen_helper_neon_sub_saturate_s64(CPU_V001);
4097 break;
4098 case 8: /* VSHL */
4099 if (u) {
4100 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4101 } else {
4102 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4104 break;
4105 case 9: /* VQSHL */
4106 if (u) {
4107 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4108 cpu_V0, cpu_V0);
4109 } else {
4110 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4111 cpu_V1, cpu_V0);
4113 break;
4114 case 10: /* VRSHL */
4115 if (u) {
4116 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4117 } else {
4118 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4120 break;
4121 case 11: /* VQRSHL */
4122 if (u) {
4123 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4124 cpu_V1, cpu_V0);
4125 } else {
4126 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4127 cpu_V1, cpu_V0);
4129 break;
4130 case 16:
4131 if (u) {
4132 tcg_gen_sub_i64(CPU_V001);
4133 } else {
4134 tcg_gen_add_i64(CPU_V001);
4136 break;
4137 default:
4138 abort();
4140 neon_store_reg64(cpu_V0, rd + pass);
4142 return 0;
4144 switch (op) {
4145 case 8: /* VSHL */
4146 case 9: /* VQSHL */
4147 case 10: /* VRSHL */
4148 case 11: /* VQRSHL */
4150 int rtmp;
4151 /* Shift instruction operands are reversed. */
4152 rtmp = rn;
4153 rn = rm;
4154 rm = rtmp;
4155 pairwise = 0;
4157 break;
4158 case 20: /* VPMAX */
4159 case 21: /* VPMIN */
4160 case 23: /* VPADD */
4161 pairwise = 1;
4162 break;
4163 case 26: /* VPADD (float) */
4164 pairwise = (u && size < 2);
4165 break;
4166 case 30: /* VPMIN/VPMAX (float) */
4167 pairwise = u;
4168 break;
4169 default:
4170 pairwise = 0;
4171 break;
4173 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4175 if (pairwise) {
4176 /* Pairwise. */
4177 if (q)
4178 n = (pass & 1) * 2;
4179 else
4180 n = 0;
4181 if (pass < q + 1) {
4182 NEON_GET_REG(T0, rn, n);
4183 NEON_GET_REG(T1, rn, n + 1);
4184 } else {
4185 NEON_GET_REG(T0, rm, n);
4186 NEON_GET_REG(T1, rm, n + 1);
4188 } else {
4189 /* Elementwise. */
4190 NEON_GET_REG(T0, rn, pass);
4191 NEON_GET_REG(T1, rm, pass);
4193 switch (op) {
4194 case 0: /* VHADD */
4195 GEN_NEON_INTEGER_OP(hadd);
4196 break;
4197 case 1: /* VQADD */
4198 GEN_NEON_INTEGER_OP_ENV(qadd);
4199 break;
4200 case 2: /* VRHADD */
4201 GEN_NEON_INTEGER_OP(rhadd);
4202 break;
4203 case 3: /* Logic ops. */
4204 switch ((u << 2) | size) {
4205 case 0: /* VAND */
4206 gen_op_andl_T0_T1();
4207 break;
4208 case 1: /* BIC */
4209 gen_op_bicl_T0_T1();
4210 break;
4211 case 2: /* VORR */
4212 gen_op_orl_T0_T1();
4213 break;
4214 case 3: /* VORN */
4215 gen_op_notl_T1();
4216 gen_op_orl_T0_T1();
4217 break;
4218 case 4: /* VEOR */
4219 gen_op_xorl_T0_T1();
4220 break;
4221 case 5: /* VBSL */
4222 tmp = neon_load_reg(rd, pass);
4223 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4224 dead_tmp(tmp);
4225 break;
4226 case 6: /* VBIT */
4227 tmp = neon_load_reg(rd, pass);
4228 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4229 dead_tmp(tmp);
4230 break;
4231 case 7: /* VBIF */
4232 tmp = neon_load_reg(rd, pass);
4233 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4234 dead_tmp(tmp);
4235 break;
4237 break;
4238 case 4: /* VHSUB */
4239 GEN_NEON_INTEGER_OP(hsub);
4240 break;
4241 case 5: /* VQSUB */
4242 GEN_NEON_INTEGER_OP_ENV(qsub);
4243 break;
4244 case 6: /* VCGT */
4245 GEN_NEON_INTEGER_OP(cgt);
4246 break;
4247 case 7: /* VCGE */
4248 GEN_NEON_INTEGER_OP(cge);
4249 break;
4250 case 8: /* VSHL */
4251 GEN_NEON_INTEGER_OP(shl);
4252 break;
4253 case 9: /* VQSHL */
4254 GEN_NEON_INTEGER_OP_ENV(qshl);
4255 break;
4256 case 10: /* VRSHL */
4257 GEN_NEON_INTEGER_OP(rshl);
4258 break;
4259 case 11: /* VQRSHL */
4260 GEN_NEON_INTEGER_OP_ENV(qrshl);
4261 break;
4262 case 12: /* VMAX */
4263 GEN_NEON_INTEGER_OP(max);
4264 break;
4265 case 13: /* VMIN */
4266 GEN_NEON_INTEGER_OP(min);
4267 break;
4268 case 14: /* VABD */
4269 GEN_NEON_INTEGER_OP(abd);
4270 break;
4271 case 15: /* VABA */
4272 GEN_NEON_INTEGER_OP(abd);
4273 NEON_GET_REG(T1, rd, pass);
4274 gen_neon_add(size);
4275 break;
4276 case 16:
4277 if (!u) { /* VADD */
4278 if (gen_neon_add(size))
4279 return 1;
4280 } else { /* VSUB */
4281 switch (size) {
4282 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4283 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
4284 case 2: gen_op_subl_T0_T1(); break;
4285 default: return 1;
4288 break;
4289 case 17:
4290 if (!u) { /* VTST */
4291 switch (size) {
4292 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4293 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4294 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
4295 default: return 1;
4297 } else { /* VCEQ */
4298 switch (size) {
4299 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4300 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4301 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
4302 default: return 1;
4305 break;
4306 case 18: /* Multiply. */
4307 switch (size) {
4308 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4309 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4310 case 2: gen_op_mul_T0_T1(); break;
4311 default: return 1;
4313 NEON_GET_REG(T1, rd, pass);
4314 if (u) { /* VMLS */
4315 gen_neon_rsb(size);
4316 } else { /* VMLA */
4317 gen_neon_add(size);
4319 break;
4320 case 19: /* VMUL */
4321 if (u) { /* polynomial */
4322 gen_helper_neon_mul_p8(CPU_T001);
4323 } else { /* Integer */
4324 switch (size) {
4325 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4326 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4327 case 2: gen_op_mul_T0_T1(); break;
4328 default: return 1;
4331 break;
4332 case 20: /* VPMAX */
4333 GEN_NEON_INTEGER_OP(pmax);
4334 break;
4335 case 21: /* VPMIN */
4336 GEN_NEON_INTEGER_OP(pmin);
4337 break;
4338 case 22: /* Hultiply high. */
4339 if (!u) { /* VQDMULH */
4340 switch (size) {
4341 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4342 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
4343 default: return 1;
4345 } else { /* VQRDHMUL */
4346 switch (size) {
4347 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4348 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
4349 default: return 1;
4352 break;
4353 case 23: /* VPADD */
4354 if (u)
4355 return 1;
4356 switch (size) {
4357 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4358 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
4359 case 2: gen_op_addl_T0_T1(); break;
4360 default: return 1;
4362 break;
4363 case 26: /* Floating point arithnetic. */
4364 switch ((u << 2) | size) {
4365 case 0: /* VADD */
4366 gen_helper_neon_add_f32(CPU_T001);
4367 break;
4368 case 2: /* VSUB */
4369 gen_helper_neon_sub_f32(CPU_T001);
4370 break;
4371 case 4: /* VPADD */
4372 gen_helper_neon_add_f32(CPU_T001);
4373 break;
4374 case 6: /* VABD */
4375 gen_helper_neon_abd_f32(CPU_T001);
4376 break;
4377 default:
4378 return 1;
4380 break;
4381 case 27: /* Float multiply. */
4382 gen_helper_neon_mul_f32(CPU_T001);
4383 if (!u) {
4384 NEON_GET_REG(T1, rd, pass);
4385 if (size == 0) {
4386 gen_helper_neon_add_f32(CPU_T001);
4387 } else {
4388 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
4391 break;
4392 case 28: /* Float compare. */
4393 if (!u) {
4394 gen_helper_neon_ceq_f32(CPU_T001);
4395 } else {
4396 if (size == 0)
4397 gen_helper_neon_cge_f32(CPU_T001);
4398 else
4399 gen_helper_neon_cgt_f32(CPU_T001);
4401 break;
4402 case 29: /* Float compare absolute. */
4403 if (!u)
4404 return 1;
4405 if (size == 0)
4406 gen_helper_neon_acge_f32(CPU_T001);
4407 else
4408 gen_helper_neon_acgt_f32(CPU_T001);
4409 break;
4410 case 30: /* Float min/max. */
4411 if (size == 0)
4412 gen_helper_neon_max_f32(CPU_T001);
4413 else
4414 gen_helper_neon_min_f32(CPU_T001);
4415 break;
4416 case 31:
4417 if (size == 0)
4418 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4419 else
4420 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4421 break;
4422 default:
4423 abort();
4425 /* Save the result. For elementwise operations we can put it
4426 straight into the destination register. For pairwise operations
4427 we have to be careful to avoid clobbering the source operands. */
4428 if (pairwise && rd == rm) {
4429 gen_neon_movl_scratch_T0(pass);
4430 } else {
4431 NEON_SET_REG(T0, rd, pass);
4434 } /* for pass */
4435 if (pairwise && rd == rm) {
4436 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4437 gen_neon_movl_T0_scratch(pass);
4438 NEON_SET_REG(T0, rd, pass);
4441 /* End of 3 register same size operations. */
4442 } else if (insn & (1 << 4)) {
4443 if ((insn & 0x00380080) != 0) {
4444 /* Two registers and shift. */
4445 op = (insn >> 8) & 0xf;
4446 if (insn & (1 << 7)) {
4447 /* 64-bit shift. */
4448 size = 3;
4449 } else {
4450 size = 2;
4451 while ((insn & (1 << (size + 19))) == 0)
4452 size--;
4454 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4455 /* To avoid excessive dumplication of ops we implement shift
4456 by immediate using the variable shift operations. */
4457 if (op < 8) {
4458 /* Shift by immediate:
4459 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4460 /* Right shifts are encoded as N - shift, where N is the
4461 element size in bits. */
4462 if (op <= 4)
4463 shift = shift - (1 << (size + 3));
4464 if (size == 3) {
4465 count = q + 1;
4466 } else {
4467 count = q ? 4: 2;
4469 switch (size) {
4470 case 0:
4471 imm = (uint8_t) shift;
4472 imm |= imm << 8;
4473 imm |= imm << 16;
4474 break;
4475 case 1:
4476 imm = (uint16_t) shift;
4477 imm |= imm << 16;
4478 break;
4479 case 2:
4480 case 3:
4481 imm = shift;
4482 break;
4483 default:
4484 abort();
4487 for (pass = 0; pass < count; pass++) {
4488 if (size == 3) {
4489 neon_load_reg64(cpu_V0, rm + pass);
4490 tcg_gen_movi_i64(cpu_V1, imm);
4491 switch (op) {
4492 case 0: /* VSHR */
4493 case 1: /* VSRA */
4494 if (u)
4495 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4496 else
4497 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4498 break;
4499 case 2: /* VRSHR */
4500 case 3: /* VRSRA */
4501 if (u)
4502 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4503 else
4504 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4505 break;
4506 case 4: /* VSRI */
4507 if (!u)
4508 return 1;
4509 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4510 break;
4511 case 5: /* VSHL, VSLI */
4512 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4513 break;
4514 case 6: /* VQSHL */
4515 if (u)
4516 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4517 else
4518 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4519 break;
4520 case 7: /* VQSHLU */
4521 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4522 break;
4524 if (op == 1 || op == 3) {
4525 /* Accumulate. */
4526 neon_load_reg64(cpu_V0, rd + pass);
4527 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4528 } else if (op == 4 || (op == 5 && u)) {
4529 /* Insert */
4530 cpu_abort(env, "VS[LR]I.64 not implemented");
4532 neon_store_reg64(cpu_V0, rd + pass);
4533 } else { /* size < 3 */
4534 /* Operands in T0 and T1. */
4535 gen_op_movl_T1_im(imm);
4536 NEON_GET_REG(T0, rm, pass);
4537 switch (op) {
4538 case 0: /* VSHR */
4539 case 1: /* VSRA */
4540 GEN_NEON_INTEGER_OP(shl);
4541 break;
4542 case 2: /* VRSHR */
4543 case 3: /* VRSRA */
4544 GEN_NEON_INTEGER_OP(rshl);
4545 break;
4546 case 4: /* VSRI */
4547 if (!u)
4548 return 1;
4549 GEN_NEON_INTEGER_OP(shl);
4550 break;
4551 case 5: /* VSHL, VSLI */
4552 switch (size) {
4553 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4554 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4555 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4556 default: return 1;
4558 break;
4559 case 6: /* VQSHL */
4560 GEN_NEON_INTEGER_OP_ENV(qshl);
4561 break;
4562 case 7: /* VQSHLU */
4563 switch (size) {
4564 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4565 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4566 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4567 default: return 1;
4569 break;
4572 if (op == 1 || op == 3) {
4573 /* Accumulate. */
4574 NEON_GET_REG(T1, rd, pass);
4575 gen_neon_add(size);
4576 } else if (op == 4 || (op == 5 && u)) {
4577 /* Insert */
4578 switch (size) {
4579 case 0:
4580 if (op == 4)
4581 imm = 0xff >> -shift;
4582 else
4583 imm = (uint8_t)(0xff << shift);
4584 imm |= imm << 8;
4585 imm |= imm << 16;
4586 break;
4587 case 1:
4588 if (op == 4)
4589 imm = 0xffff >> -shift;
4590 else
4591 imm = (uint16_t)(0xffff << shift);
4592 imm |= imm << 16;
4593 break;
4594 case 2:
4595 if (op == 4)
4596 imm = 0xffffffffu >> -shift;
4597 else
4598 imm = 0xffffffffu << shift;
4599 break;
4600 default:
4601 abort();
4603 tmp = neon_load_reg(rd, pass);
4604 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4605 tcg_gen_andi_i32(tmp, tmp, ~imm);
4606 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4608 NEON_SET_REG(T0, rd, pass);
4610 } /* for pass */
4611 } else if (op < 10) {
4612 /* Shift by immediate and narrow:
4613 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4614 shift = shift - (1 << (size + 3));
4615 size++;
4616 switch (size) {
4617 case 1:
4618 imm = (uint16_t)shift;
4619 imm |= imm << 16;
4620 tmp2 = tcg_const_i32(imm);
4621 TCGV_UNUSED_I64(tmp64);
4622 break;
4623 case 2:
4624 imm = (uint32_t)shift;
4625 tmp2 = tcg_const_i32(imm);
4626 TCGV_UNUSED_I64(tmp64);
4627 break;
4628 case 3:
4629 tmp64 = tcg_const_i64(shift);
4630 TCGV_UNUSED(tmp2);
4631 break;
4632 default:
4633 abort();
4636 for (pass = 0; pass < 2; pass++) {
4637 if (size == 3) {
4638 neon_load_reg64(cpu_V0, rm + pass);
4639 if (q) {
4640 if (u)
4641 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4642 else
4643 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4644 } else {
4645 if (u)
4646 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4647 else
4648 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4650 } else {
4651 tmp = neon_load_reg(rm + pass, 0);
4652 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4653 tmp3 = neon_load_reg(rm + pass, 1);
4654 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4655 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4656 dead_tmp(tmp);
4657 dead_tmp(tmp3);
4659 tmp = new_tmp();
4660 if (op == 8 && !u) {
4661 gen_neon_narrow(size - 1, tmp, cpu_V0);
4662 } else {
4663 if (op == 8)
4664 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4665 else
4666 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4668 if (pass == 0) {
4669 tmp2 = tmp;
4670 } else {
4671 neon_store_reg(rd, 0, tmp2);
4672 neon_store_reg(rd, 1, tmp);
4674 } /* for pass */
4675 } else if (op == 10) {
4676 /* VSHLL */
4677 if (q || size == 3)
4678 return 1;
4679 tmp = neon_load_reg(rm, 0);
4680 tmp2 = neon_load_reg(rm, 1);
4681 for (pass = 0; pass < 2; pass++) {
4682 if (pass == 1)
4683 tmp = tmp2;
4685 gen_neon_widen(cpu_V0, tmp, size, u);
4687 if (shift != 0) {
4688 /* The shift is less than the width of the source
4689 type, so we can just shift the whole register. */
4690 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4691 if (size < 2 || !u) {
4692 uint64_t imm64;
4693 if (size == 0) {
4694 imm = (0xffu >> (8 - shift));
4695 imm |= imm << 16;
4696 } else {
4697 imm = 0xffff >> (16 - shift);
4699 imm64 = imm | (((uint64_t)imm) << 32);
4700 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
4703 neon_store_reg64(cpu_V0, rd + pass);
4705 } else if (op == 15 || op == 16) {
4706 /* VCVT fixed-point. */
4707 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4708 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4709 if (op & 1) {
4710 if (u)
4711 gen_vfp_ulto(0, shift);
4712 else
4713 gen_vfp_slto(0, shift);
4714 } else {
4715 if (u)
4716 gen_vfp_toul(0, shift);
4717 else
4718 gen_vfp_tosl(0, shift);
4720 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4722 } else {
4723 return 1;
4725 } else { /* (insn & 0x00380080) == 0 */
4726 int invert;
4728 op = (insn >> 8) & 0xf;
4729 /* One register and immediate. */
4730 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4731 invert = (insn & (1 << 5)) != 0;
4732 switch (op) {
4733 case 0: case 1:
4734 /* no-op */
4735 break;
4736 case 2: case 3:
4737 imm <<= 8;
4738 break;
4739 case 4: case 5:
4740 imm <<= 16;
4741 break;
4742 case 6: case 7:
4743 imm <<= 24;
4744 break;
4745 case 8: case 9:
4746 imm |= imm << 16;
4747 break;
4748 case 10: case 11:
4749 imm = (imm << 8) | (imm << 24);
4750 break;
4751 case 12:
4752 imm = (imm < 8) | 0xff;
4753 break;
4754 case 13:
4755 imm = (imm << 16) | 0xffff;
4756 break;
4757 case 14:
4758 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4759 if (invert)
4760 imm = ~imm;
4761 break;
4762 case 15:
4763 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4764 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4765 break;
4767 if (invert)
4768 imm = ~imm;
4770 if (op != 14 || !invert)
4771 gen_op_movl_T1_im(imm);
4773 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4774 if (op & 1 && op < 12) {
4775 tmp = neon_load_reg(rd, pass);
4776 if (invert) {
4777 /* The immediate value has already been inverted, so
4778 BIC becomes AND. */
4779 tcg_gen_andi_i32(tmp, tmp, imm);
4780 } else {
4781 tcg_gen_ori_i32(tmp, tmp, imm);
4783 } else {
4784 /* VMOV, VMVN. */
4785 tmp = new_tmp();
4786 if (op == 14 && invert) {
4787 uint32_t val;
4788 val = 0;
4789 for (n = 0; n < 4; n++) {
4790 if (imm & (1 << (n + (pass & 1) * 4)))
4791 val |= 0xff << (n * 8);
4793 tcg_gen_movi_i32(tmp, val);
4794 } else {
4795 tcg_gen_movi_i32(tmp, imm);
4798 neon_store_reg(rd, pass, tmp);
4801 } else { /* (insn & 0x00800010 == 0x00800000) */
4802 if (size != 3) {
4803 op = (insn >> 8) & 0xf;
4804 if ((insn & (1 << 6)) == 0) {
4805 /* Three registers of different lengths. */
4806 int src1_wide;
4807 int src2_wide;
4808 int prewiden;
4809 /* prewiden, src1_wide, src2_wide */
4810 static const int neon_3reg_wide[16][3] = {
4811 {1, 0, 0}, /* VADDL */
4812 {1, 1, 0}, /* VADDW */
4813 {1, 0, 0}, /* VSUBL */
4814 {1, 1, 0}, /* VSUBW */
4815 {0, 1, 1}, /* VADDHN */
4816 {0, 0, 0}, /* VABAL */
4817 {0, 1, 1}, /* VSUBHN */
4818 {0, 0, 0}, /* VABDL */
4819 {0, 0, 0}, /* VMLAL */
4820 {0, 0, 0}, /* VQDMLAL */
4821 {0, 0, 0}, /* VMLSL */
4822 {0, 0, 0}, /* VQDMLSL */
4823 {0, 0, 0}, /* Integer VMULL */
4824 {0, 0, 0}, /* VQDMULL */
4825 {0, 0, 0} /* Polynomial VMULL */
4828 prewiden = neon_3reg_wide[op][0];
4829 src1_wide = neon_3reg_wide[op][1];
4830 src2_wide = neon_3reg_wide[op][2];
4832 if (size == 0 && (op == 9 || op == 11 || op == 13))
4833 return 1;
4835 /* Avoid overlapping operands. Wide source operands are
4836 always aligned so will never overlap with wide
4837 destinations in problematic ways. */
4838 if (rd == rm && !src2_wide) {
4839 NEON_GET_REG(T0, rm, 1);
4840 gen_neon_movl_scratch_T0(2);
4841 } else if (rd == rn && !src1_wide) {
4842 NEON_GET_REG(T0, rn, 1);
4843 gen_neon_movl_scratch_T0(2);
4845 TCGV_UNUSED(tmp3);
4846 for (pass = 0; pass < 2; pass++) {
4847 if (src1_wide) {
4848 neon_load_reg64(cpu_V0, rn + pass);
4849 TCGV_UNUSED(tmp);
4850 } else {
4851 if (pass == 1 && rd == rn) {
4852 gen_neon_movl_T0_scratch(2);
4853 tmp = new_tmp();
4854 tcg_gen_mov_i32(tmp, cpu_T[0]);
4855 } else {
4856 tmp = neon_load_reg(rn, pass);
4858 if (prewiden) {
4859 gen_neon_widen(cpu_V0, tmp, size, u);
4862 if (src2_wide) {
4863 neon_load_reg64(cpu_V1, rm + pass);
4864 TCGV_UNUSED(tmp2);
4865 } else {
4866 if (pass == 1 && rd == rm) {
4867 gen_neon_movl_T0_scratch(2);
4868 tmp2 = new_tmp();
4869 tcg_gen_mov_i32(tmp2, cpu_T[0]);
4870 } else {
4871 tmp2 = neon_load_reg(rm, pass);
4873 if (prewiden) {
4874 gen_neon_widen(cpu_V1, tmp2, size, u);
4877 switch (op) {
4878 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4879 gen_neon_addl(size);
4880 break;
4881 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4882 gen_neon_subl(size);
4883 break;
4884 case 5: case 7: /* VABAL, VABDL */
4885 switch ((size << 1) | u) {
4886 case 0:
4887 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4888 break;
4889 case 1:
4890 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4891 break;
4892 case 2:
4893 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4894 break;
4895 case 3:
4896 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4897 break;
4898 case 4:
4899 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4900 break;
4901 case 5:
4902 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4903 break;
4904 default: abort();
4906 dead_tmp(tmp2);
4907 dead_tmp(tmp);
4908 break;
4909 case 8: case 9: case 10: case 11: case 12: case 13:
4910 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4911 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
4912 break;
4913 case 14: /* Polynomial VMULL */
4914 cpu_abort(env, "Polynomial VMULL not implemented");
4916 default: /* 15 is RESERVED. */
4917 return 1;
4919 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4920 /* Accumulate. */
4921 if (op == 10 || op == 11) {
4922 gen_neon_negl(cpu_V0, size);
4925 if (op != 13) {
4926 neon_load_reg64(cpu_V1, rd + pass);
4929 switch (op) {
4930 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4931 gen_neon_addl(size);
4932 break;
4933 case 9: case 11: /* VQDMLAL, VQDMLSL */
4934 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4935 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4936 break;
4937 /* Fall through. */
4938 case 13: /* VQDMULL */
4939 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4940 break;
4941 default:
4942 abort();
4944 neon_store_reg64(cpu_V0, rd + pass);
4945 } else if (op == 4 || op == 6) {
4946 /* Narrowing operation. */
4947 tmp = new_tmp();
4948 if (u) {
4949 switch (size) {
4950 case 0:
4951 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4952 break;
4953 case 1:
4954 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4955 break;
4956 case 2:
4957 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4958 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4959 break;
4960 default: abort();
4962 } else {
4963 switch (size) {
4964 case 0:
4965 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4966 break;
4967 case 1:
4968 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4969 break;
4970 case 2:
4971 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4972 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4973 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4974 break;
4975 default: abort();
4978 if (pass == 0) {
4979 tmp3 = tmp;
4980 } else {
4981 neon_store_reg(rd, 0, tmp3);
4982 neon_store_reg(rd, 1, tmp);
4984 } else {
4985 /* Write back the result. */
4986 neon_store_reg64(cpu_V0, rd + pass);
4989 } else {
4990 /* Two registers and a scalar. */
4991 switch (op) {
4992 case 0: /* Integer VMLA scalar */
4993 case 1: /* Float VMLA scalar */
4994 case 4: /* Integer VMLS scalar */
4995 case 5: /* Floating point VMLS scalar */
4996 case 8: /* Integer VMUL scalar */
4997 case 9: /* Floating point VMUL scalar */
4998 case 12: /* VQDMULH scalar */
4999 case 13: /* VQRDMULH scalar */
5000 gen_neon_get_scalar(size, rm);
5001 gen_neon_movl_scratch_T0(0);
5002 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5003 if (pass != 0)
5004 gen_neon_movl_T0_scratch(0);
5005 NEON_GET_REG(T1, rn, pass);
5006 if (op == 12) {
5007 if (size == 1) {
5008 gen_helper_neon_qdmulh_s16(CPU_T0E01);
5009 } else {
5010 gen_helper_neon_qdmulh_s32(CPU_T0E01);
5012 } else if (op == 13) {
5013 if (size == 1) {
5014 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
5015 } else {
5016 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
5018 } else if (op & 1) {
5019 gen_helper_neon_mul_f32(CPU_T001);
5020 } else {
5021 switch (size) {
5022 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5023 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
5024 case 2: gen_op_mul_T0_T1(); break;
5025 default: return 1;
5028 if (op < 8) {
5029 /* Accumulate. */
5030 NEON_GET_REG(T1, rd, pass);
5031 switch (op) {
5032 case 0:
5033 gen_neon_add(size);
5034 break;
5035 case 1:
5036 gen_helper_neon_add_f32(CPU_T001);
5037 break;
5038 case 4:
5039 gen_neon_rsb(size);
5040 break;
5041 case 5:
5042 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
5043 break;
5044 default:
5045 abort();
5048 NEON_SET_REG(T0, rd, pass);
5050 break;
5051 case 2: /* VMLAL sclar */
5052 case 3: /* VQDMLAL scalar */
5053 case 6: /* VMLSL scalar */
5054 case 7: /* VQDMLSL scalar */
5055 case 10: /* VMULL scalar */
5056 case 11: /* VQDMULL scalar */
5057 if (size == 0 && (op == 3 || op == 7 || op == 11))
5058 return 1;
5060 gen_neon_get_scalar(size, rm);
5061 NEON_GET_REG(T1, rn, 1);
5063 for (pass = 0; pass < 2; pass++) {
5064 if (pass == 0) {
5065 tmp = neon_load_reg(rn, 0);
5066 } else {
5067 tmp = new_tmp();
5068 tcg_gen_mov_i32(tmp, cpu_T[1]);
5070 tmp2 = new_tmp();
5071 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5072 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5073 if (op == 6 || op == 7) {
5074 gen_neon_negl(cpu_V0, size);
5076 if (op != 11) {
5077 neon_load_reg64(cpu_V1, rd + pass);
5079 switch (op) {
5080 case 2: case 6:
5081 gen_neon_addl(size);
5082 break;
5083 case 3: case 7:
5084 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5085 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5086 break;
5087 case 10:
5088 /* no-op */
5089 break;
5090 case 11:
5091 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5092 break;
5093 default:
5094 abort();
5096 neon_store_reg64(cpu_V0, rd + pass);
5098 break;
5099 default: /* 14 and 15 are RESERVED */
5100 return 1;
5103 } else { /* size == 3 */
5104 if (!u) {
5105 /* Extract. */
5106 imm = (insn >> 8) & 0xf;
5107 count = q + 1;
5109 if (imm > 7 && !q)
5110 return 1;
5112 if (imm == 0) {
5113 neon_load_reg64(cpu_V0, rn);
5114 if (q) {
5115 neon_load_reg64(cpu_V1, rn + 1);
5117 } else if (imm == 8) {
5118 neon_load_reg64(cpu_V0, rn + 1);
5119 if (q) {
5120 neon_load_reg64(cpu_V1, rm);
5122 } else if (q) {
5123 tmp64 = tcg_temp_new_i64();
5124 if (imm < 8) {
5125 neon_load_reg64(cpu_V0, rn);
5126 neon_load_reg64(tmp64, rn + 1);
5127 } else {
5128 neon_load_reg64(cpu_V0, rn + 1);
5129 neon_load_reg64(tmp64, rm);
5131 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5132 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5133 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5134 if (imm < 8) {
5135 neon_load_reg64(cpu_V1, rm);
5136 } else {
5137 neon_load_reg64(cpu_V1, rm + 1);
5138 imm -= 8;
5140 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5141 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5142 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5143 } else {
5144 /* BUGFIX */
5145 neon_load_reg64(cpu_V0, rn);
5146 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5147 neon_load_reg64(cpu_V1, rm);
5148 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5149 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5151 neon_store_reg64(cpu_V0, rd);
5152 if (q) {
5153 neon_store_reg64(cpu_V1, rd + 1);
5155 } else if ((insn & (1 << 11)) == 0) {
5156 /* Two register misc. */
5157 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5158 size = (insn >> 18) & 3;
5159 switch (op) {
5160 case 0: /* VREV64 */
5161 if (size == 3)
5162 return 1;
5163 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5164 NEON_GET_REG(T0, rm, pass * 2);
5165 NEON_GET_REG(T1, rm, pass * 2 + 1);
5166 switch (size) {
5167 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5168 case 1: gen_swap_half(cpu_T[0]); break;
5169 case 2: /* no-op */ break;
5170 default: abort();
5172 NEON_SET_REG(T0, rd, pass * 2 + 1);
5173 if (size == 2) {
5174 NEON_SET_REG(T1, rd, pass * 2);
5175 } else {
5176 gen_op_movl_T0_T1();
5177 switch (size) {
5178 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5179 case 1: gen_swap_half(cpu_T[0]); break;
5180 default: abort();
5182 NEON_SET_REG(T0, rd, pass * 2);
5185 break;
5186 case 4: case 5: /* VPADDL */
5187 case 12: case 13: /* VPADAL */
5188 if (size == 3)
5189 return 1;
5190 for (pass = 0; pass < q + 1; pass++) {
5191 tmp = neon_load_reg(rm, pass * 2);
5192 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5193 tmp = neon_load_reg(rm, pass * 2 + 1);
5194 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5195 switch (size) {
5196 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5197 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5198 case 2: tcg_gen_add_i64(CPU_V001); break;
5199 default: abort();
5201 if (op >= 12) {
5202 /* Accumulate. */
5203 neon_load_reg64(cpu_V1, rd + pass);
5204 gen_neon_addl(size);
5206 neon_store_reg64(cpu_V0, rd + pass);
5208 break;
5209 case 33: /* VTRN */
5210 if (size == 2) {
5211 for (n = 0; n < (q ? 4 : 2); n += 2) {
5212 NEON_GET_REG(T0, rm, n);
5213 NEON_GET_REG(T1, rd, n + 1);
5214 NEON_SET_REG(T1, rm, n);
5215 NEON_SET_REG(T0, rd, n + 1);
5217 } else {
5218 goto elementwise;
5220 break;
5221 case 34: /* VUZP */
5222 /* Reg Before After
5223 Rd A3 A2 A1 A0 B2 B0 A2 A0
5224 Rm B3 B2 B1 B0 B3 B1 A3 A1
5226 if (size == 3)
5227 return 1;
5228 gen_neon_unzip(rd, q, 0, size);
5229 gen_neon_unzip(rm, q, 4, size);
5230 if (q) {
5231 static int unzip_order_q[8] =
5232 {0, 2, 4, 6, 1, 3, 5, 7};
5233 for (n = 0; n < 8; n++) {
5234 int reg = (n < 4) ? rd : rm;
5235 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5236 NEON_SET_REG(T0, reg, n % 4);
5238 } else {
5239 static int unzip_order[4] =
5240 {0, 4, 1, 5};
5241 for (n = 0; n < 4; n++) {
5242 int reg = (n < 2) ? rd : rm;
5243 gen_neon_movl_T0_scratch(unzip_order[n]);
5244 NEON_SET_REG(T0, reg, n % 2);
5247 break;
5248 case 35: /* VZIP */
5249 /* Reg Before After
5250 Rd A3 A2 A1 A0 B1 A1 B0 A0
5251 Rm B3 B2 B1 B0 B3 A3 B2 A2
5253 if (size == 3)
5254 return 1;
5255 count = (q ? 4 : 2);
5256 for (n = 0; n < count; n++) {
5257 NEON_GET_REG(T0, rd, n);
5258 NEON_GET_REG(T1, rd, n);
5259 switch (size) {
5260 case 0: gen_helper_neon_zip_u8(); break;
5261 case 1: gen_helper_neon_zip_u16(); break;
5262 case 2: /* no-op */; break;
5263 default: abort();
5265 gen_neon_movl_scratch_T0(n * 2);
5266 gen_neon_movl_scratch_T1(n * 2 + 1);
5268 for (n = 0; n < count * 2; n++) {
5269 int reg = (n < count) ? rd : rm;
5270 gen_neon_movl_T0_scratch(n);
5271 NEON_SET_REG(T0, reg, n % count);
5273 break;
5274 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5275 if (size == 3)
5276 return 1;
5277 TCGV_UNUSED(tmp2);
5278 for (pass = 0; pass < 2; pass++) {
5279 neon_load_reg64(cpu_V0, rm + pass);
5280 tmp = new_tmp();
5281 if (op == 36 && q == 0) {
5282 gen_neon_narrow(size, tmp, cpu_V0);
5283 } else if (q) {
5284 gen_neon_narrow_satu(size, tmp, cpu_V0);
5285 } else {
5286 gen_neon_narrow_sats(size, tmp, cpu_V0);
5288 if (pass == 0) {
5289 tmp2 = tmp;
5290 } else {
5291 neon_store_reg(rd, 0, tmp2);
5292 neon_store_reg(rd, 1, tmp);
5295 break;
5296 case 38: /* VSHLL */
5297 if (q || size == 3)
5298 return 1;
5299 tmp = neon_load_reg(rm, 0);
5300 tmp2 = neon_load_reg(rm, 1);
5301 for (pass = 0; pass < 2; pass++) {
5302 if (pass == 1)
5303 tmp = tmp2;
5304 gen_neon_widen(cpu_V0, tmp, size, 1);
5305 neon_store_reg64(cpu_V0, rd + pass);
5307 break;
5308 default:
5309 elementwise:
5310 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5311 if (op == 30 || op == 31 || op >= 58) {
5312 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5313 neon_reg_offset(rm, pass));
5314 } else {
5315 NEON_GET_REG(T0, rm, pass);
5317 switch (op) {
5318 case 1: /* VREV32 */
5319 switch (size) {
5320 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5321 case 1: gen_swap_half(cpu_T[0]); break;
5322 default: return 1;
5324 break;
5325 case 2: /* VREV16 */
5326 if (size != 0)
5327 return 1;
5328 gen_rev16(cpu_T[0]);
5329 break;
5330 case 8: /* CLS */
5331 switch (size) {
5332 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5333 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5334 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
5335 default: return 1;
5337 break;
5338 case 9: /* CLZ */
5339 switch (size) {
5340 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5341 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
5342 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
5343 default: return 1;
5345 break;
5346 case 10: /* CNT */
5347 if (size != 0)
5348 return 1;
5349 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
5350 break;
5351 case 11: /* VNOT */
5352 if (size != 0)
5353 return 1;
5354 gen_op_notl_T0();
5355 break;
5356 case 14: /* VQABS */
5357 switch (size) {
5358 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5359 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5360 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5361 default: return 1;
5363 break;
5364 case 15: /* VQNEG */
5365 switch (size) {
5366 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5367 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5368 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5369 default: return 1;
5371 break;
5372 case 16: case 19: /* VCGT #0, VCLE #0 */
5373 gen_op_movl_T1_im(0);
5374 switch(size) {
5375 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5376 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5377 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
5378 default: return 1;
5380 if (op == 19)
5381 gen_op_notl_T0();
5382 break;
5383 case 17: case 20: /* VCGE #0, VCLT #0 */
5384 gen_op_movl_T1_im(0);
5385 switch(size) {
5386 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5387 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5388 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
5389 default: return 1;
5391 if (op == 20)
5392 gen_op_notl_T0();
5393 break;
5394 case 18: /* VCEQ #0 */
5395 gen_op_movl_T1_im(0);
5396 switch(size) {
5397 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5398 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5399 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
5400 default: return 1;
5402 break;
5403 case 22: /* VABS */
5404 switch(size) {
5405 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5406 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5407 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
5408 default: return 1;
5410 break;
5411 case 23: /* VNEG */
5412 gen_op_movl_T1_im(0);
5413 if (size == 3)
5414 return 1;
5415 gen_neon_rsb(size);
5416 break;
5417 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5418 gen_op_movl_T1_im(0);
5419 gen_helper_neon_cgt_f32(CPU_T001);
5420 if (op == 27)
5421 gen_op_notl_T0();
5422 break;
5423 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5424 gen_op_movl_T1_im(0);
5425 gen_helper_neon_cge_f32(CPU_T001);
5426 if (op == 28)
5427 gen_op_notl_T0();
5428 break;
5429 case 26: /* Float VCEQ #0 */
5430 gen_op_movl_T1_im(0);
5431 gen_helper_neon_ceq_f32(CPU_T001);
5432 break;
5433 case 30: /* Float VABS */
5434 gen_vfp_abs(0);
5435 break;
5436 case 31: /* Float VNEG */
5437 gen_vfp_neg(0);
5438 break;
5439 case 32: /* VSWP */
5440 NEON_GET_REG(T1, rd, pass);
5441 NEON_SET_REG(T1, rm, pass);
5442 break;
5443 case 33: /* VTRN */
5444 NEON_GET_REG(T1, rd, pass);
5445 switch (size) {
5446 case 0: gen_helper_neon_trn_u8(); break;
5447 case 1: gen_helper_neon_trn_u16(); break;
5448 case 2: abort();
5449 default: return 1;
5451 NEON_SET_REG(T1, rm, pass);
5452 break;
5453 case 56: /* Integer VRECPE */
5454 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
5455 break;
5456 case 57: /* Integer VRSQRTE */
5457 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
5458 break;
5459 case 58: /* Float VRECPE */
5460 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5461 break;
5462 case 59: /* Float VRSQRTE */
5463 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5464 break;
5465 case 60: /* VCVT.F32.S32 */
5466 gen_vfp_tosiz(0);
5467 break;
5468 case 61: /* VCVT.F32.U32 */
5469 gen_vfp_touiz(0);
5470 break;
5471 case 62: /* VCVT.S32.F32 */
5472 gen_vfp_sito(0);
5473 break;
5474 case 63: /* VCVT.U32.F32 */
5475 gen_vfp_uito(0);
5476 break;
5477 default:
5478 /* Reserved: 21, 29, 39-56 */
5479 return 1;
5481 if (op == 30 || op == 31 || op >= 58) {
5482 tcg_gen_st_f32(cpu_F0s, cpu_env,
5483 neon_reg_offset(rd, pass));
5484 } else {
5485 NEON_SET_REG(T0, rd, pass);
5488 break;
5490 } else if ((insn & (1 << 10)) == 0) {
5491 /* VTBL, VTBX. */
5492 n = ((insn >> 5) & 0x18) + 8;
5493 if (insn & (1 << 6)) {
5494 tmp = neon_load_reg(rd, 0);
5495 } else {
5496 tmp = new_tmp();
5497 tcg_gen_movi_i32(tmp, 0);
5499 tmp2 = neon_load_reg(rm, 0);
5500 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5501 tcg_const_i32(n));
5502 dead_tmp(tmp);
5503 if (insn & (1 << 6)) {
5504 tmp = neon_load_reg(rd, 1);
5505 } else {
5506 tmp = new_tmp();
5507 tcg_gen_movi_i32(tmp, 0);
5509 tmp3 = neon_load_reg(rm, 1);
5510 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5511 tcg_const_i32(n));
5512 neon_store_reg(rd, 0, tmp2);
5513 neon_store_reg(rd, 1, tmp3);
5514 dead_tmp(tmp);
5515 } else if ((insn & 0x380) == 0) {
5516 /* VDUP */
5517 if (insn & (1 << 19)) {
5518 NEON_SET_REG(T0, rm, 1);
5519 } else {
5520 NEON_SET_REG(T0, rm, 0);
5522 if (insn & (1 << 16)) {
5523 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
5524 } else if (insn & (1 << 17)) {
5525 if ((insn >> 18) & 1)
5526 gen_neon_dup_high16(cpu_T[0]);
5527 else
5528 gen_neon_dup_low16(cpu_T[0]);
5530 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5531 NEON_SET_REG(T0, rd, pass);
5533 } else {
5534 return 1;
5538 return 0;
5541 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5543 int crn = (insn >> 16) & 0xf;
5544 int crm = insn & 0xf;
5545 int op1 = (insn >> 21) & 7;
5546 int op2 = (insn >> 5) & 7;
5547 int rt = (insn >> 12) & 0xf;
5548 TCGv tmp;
5550 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5551 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5552 /* TEECR */
5553 if (IS_USER(s))
5554 return 1;
5555 tmp = load_cpu_field(teecr);
5556 store_reg(s, rt, tmp);
5557 return 0;
5559 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5560 /* TEEHBR */
5561 if (IS_USER(s) && (env->teecr & 1))
5562 return 1;
5563 tmp = load_cpu_field(teehbr);
5564 store_reg(s, rt, tmp);
5565 return 0;
5568 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5569 op1, crn, crm, op2);
5570 return 1;
5573 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5575 int crn = (insn >> 16) & 0xf;
5576 int crm = insn & 0xf;
5577 int op1 = (insn >> 21) & 7;
5578 int op2 = (insn >> 5) & 7;
5579 int rt = (insn >> 12) & 0xf;
5580 TCGv tmp;
5582 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5583 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5584 /* TEECR */
5585 if (IS_USER(s))
5586 return 1;
5587 tmp = load_reg(s, rt);
5588 gen_helper_set_teecr(cpu_env, tmp);
5589 dead_tmp(tmp);
5590 return 0;
5592 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5593 /* TEEHBR */
5594 if (IS_USER(s) && (env->teecr & 1))
5595 return 1;
5596 tmp = load_reg(s, rt);
5597 store_cpu_field(tmp, teehbr);
5598 return 0;
5601 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5602 op1, crn, crm, op2);
5603 return 1;
5606 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5608 int cpnum;
5610 cpnum = (insn >> 8) & 0xf;
5611 if (arm_feature(env, ARM_FEATURE_XSCALE)
5612 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5613 return 1;
5615 switch (cpnum) {
5616 case 0:
5617 case 1:
5618 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5619 return disas_iwmmxt_insn(env, s, insn);
5620 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5621 return disas_dsp_insn(env, s, insn);
5623 return 1;
5624 case 10:
5625 case 11:
5626 return disas_vfp_insn (env, s, insn);
5627 case 14:
5628 /* Coprocessors 7-15 are architecturally reserved by ARM.
5629 Unfortunately Intel decided to ignore this. */
5630 if (arm_feature(env, ARM_FEATURE_XSCALE))
5631 goto board;
5632 if (insn & (1 << 20))
5633 return disas_cp14_read(env, s, insn);
5634 else
5635 return disas_cp14_write(env, s, insn);
5636 case 15:
5637 return disas_cp15_insn (env, s, insn);
5638 default:
5639 board:
5640 /* Unknown coprocessor. See if the board has hooked it. */
5641 return disas_cp_insn (env, s, insn);
5646 /* Store a 64-bit value to a register pair. Clobbers val. */
5647 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5649 TCGv tmp;
5650 tmp = new_tmp();
5651 tcg_gen_trunc_i64_i32(tmp, val);
5652 store_reg(s, rlow, tmp);
5653 tmp = new_tmp();
5654 tcg_gen_shri_i64(val, val, 32);
5655 tcg_gen_trunc_i64_i32(tmp, val);
5656 store_reg(s, rhigh, tmp);
5659 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5660 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5662 TCGv_i64 tmp;
5663 TCGv tmp2;
5665 /* Load value and extend to 64 bits. */
5666 tmp = tcg_temp_new_i64();
5667 tmp2 = load_reg(s, rlow);
5668 tcg_gen_extu_i32_i64(tmp, tmp2);
5669 dead_tmp(tmp2);
5670 tcg_gen_add_i64(val, val, tmp);
5673 /* load and add a 64-bit value from a register pair. */
5674 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5676 TCGv_i64 tmp;
5677 TCGv tmpl;
5678 TCGv tmph;
5680 /* Load 64-bit value rd:rn. */
5681 tmpl = load_reg(s, rlow);
5682 tmph = load_reg(s, rhigh);
5683 tmp = tcg_temp_new_i64();
5684 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5685 dead_tmp(tmpl);
5686 dead_tmp(tmph);
5687 tcg_gen_add_i64(val, val, tmp);
5690 /* Set N and Z flags from a 64-bit value. */
5691 static void gen_logicq_cc(TCGv_i64 val)
5693 TCGv tmp = new_tmp();
5694 gen_helper_logicq_cc(tmp, val);
5695 gen_logic_CC(tmp);
5696 dead_tmp(tmp);
5699 static void disas_arm_insn(CPUState * env, DisasContext *s)
5701 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
5702 TCGv tmp;
5703 TCGv tmp2;
5704 TCGv tmp3;
5705 TCGv addr;
5706 TCGv_i64 tmp64;
5708 insn = ldl_code(s->pc);
5709 s->pc += 4;
5711 /* M variants do not implement ARM mode. */
5712 if (IS_M(env))
5713 goto illegal_op;
5714 cond = insn >> 28;
5715 if (cond == 0xf){
5716 /* Unconditional instructions. */
5717 if (((insn >> 25) & 7) == 1) {
5718 /* NEON Data processing. */
5719 if (!arm_feature(env, ARM_FEATURE_NEON))
5720 goto illegal_op;
5722 if (disas_neon_data_insn(env, s, insn))
5723 goto illegal_op;
5724 return;
5726 if ((insn & 0x0f100000) == 0x04000000) {
5727 /* NEON load/store. */
5728 if (!arm_feature(env, ARM_FEATURE_NEON))
5729 goto illegal_op;
5731 if (disas_neon_ls_insn(env, s, insn))
5732 goto illegal_op;
5733 return;
5735 if ((insn & 0x0d70f000) == 0x0550f000)
5736 return; /* PLD */
5737 else if ((insn & 0x0ffffdff) == 0x01010000) {
5738 ARCH(6);
5739 /* setend */
5740 if (insn & (1 << 9)) {
5741 /* BE8 mode not implemented. */
5742 goto illegal_op;
5744 return;
5745 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5746 switch ((insn >> 4) & 0xf) {
5747 case 1: /* clrex */
5748 ARCH(6K);
5749 gen_helper_clrex(cpu_env);
5750 return;
5751 case 4: /* dsb */
5752 case 5: /* dmb */
5753 case 6: /* isb */
5754 ARCH(7);
5755 /* We don't emulate caches so these are a no-op. */
5756 return;
5757 default:
5758 goto illegal_op;
5760 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5761 /* srs */
5762 uint32_t offset;
5763 if (IS_USER(s))
5764 goto illegal_op;
5765 ARCH(6);
5766 op1 = (insn & 0x1f);
5767 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5768 addr = load_reg(s, 13);
5769 } else {
5770 addr = new_tmp();
5771 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
5773 i = (insn >> 23) & 3;
5774 switch (i) {
5775 case 0: offset = -4; break; /* DA */
5776 case 1: offset = -8; break; /* DB */
5777 case 2: offset = 0; break; /* IA */
5778 case 3: offset = 4; break; /* IB */
5779 default: abort();
5781 if (offset)
5782 tcg_gen_addi_i32(addr, addr, offset);
5783 tmp = load_reg(s, 14);
5784 gen_st32(tmp, addr, 0);
5785 tmp = new_tmp();
5786 gen_helper_cpsr_read(tmp);
5787 tcg_gen_addi_i32(addr, addr, 4);
5788 gen_st32(tmp, addr, 0);
5789 if (insn & (1 << 21)) {
5790 /* Base writeback. */
5791 switch (i) {
5792 case 0: offset = -8; break;
5793 case 1: offset = -4; break;
5794 case 2: offset = 4; break;
5795 case 3: offset = 0; break;
5796 default: abort();
5798 if (offset)
5799 tcg_gen_addi_i32(addr, tmp, offset);
5800 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5801 gen_movl_reg_T1(s, 13);
5802 } else {
5803 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
5805 } else {
5806 dead_tmp(addr);
5808 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5809 /* rfe */
5810 uint32_t offset;
5811 if (IS_USER(s))
5812 goto illegal_op;
5813 ARCH(6);
5814 rn = (insn >> 16) & 0xf;
5815 addr = load_reg(s, rn);
5816 i = (insn >> 23) & 3;
5817 switch (i) {
5818 case 0: offset = -4; break; /* DA */
5819 case 1: offset = -8; break; /* DB */
5820 case 2: offset = 0; break; /* IA */
5821 case 3: offset = 4; break; /* IB */
5822 default: abort();
5824 if (offset)
5825 tcg_gen_addi_i32(addr, addr, offset);
5826 /* Load PC into tmp and CPSR into tmp2. */
5827 tmp = gen_ld32(addr, 0);
5828 tcg_gen_addi_i32(addr, addr, 4);
5829 tmp2 = gen_ld32(addr, 0);
5830 if (insn & (1 << 21)) {
5831 /* Base writeback. */
5832 switch (i) {
5833 case 0: offset = -8; break;
5834 case 1: offset = -4; break;
5835 case 2: offset = 4; break;
5836 case 3: offset = 0; break;
5837 default: abort();
5839 if (offset)
5840 tcg_gen_addi_i32(addr, addr, offset);
5841 store_reg(s, rn, addr);
5842 } else {
5843 dead_tmp(addr);
5845 gen_rfe(s, tmp, tmp2);
5846 } else if ((insn & 0x0e000000) == 0x0a000000) {
5847 /* branch link and change to thumb (blx <offset>) */
5848 int32_t offset;
5850 val = (uint32_t)s->pc;
5851 tmp = new_tmp();
5852 tcg_gen_movi_i32(tmp, val);
5853 store_reg(s, 14, tmp);
5854 /* Sign-extend the 24-bit offset */
5855 offset = (((int32_t)insn) << 8) >> 8;
5856 /* offset * 4 + bit24 * 2 + (thumb bit) */
5857 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5858 /* pipeline offset */
5859 val += 4;
5860 gen_bx_im(s, val);
5861 return;
5862 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5863 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5864 /* iWMMXt register transfer. */
5865 if (env->cp15.c15_cpar & (1 << 1))
5866 if (!disas_iwmmxt_insn(env, s, insn))
5867 return;
5869 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5870 /* Coprocessor double register transfer. */
5871 } else if ((insn & 0x0f000010) == 0x0e000010) {
5872 /* Additional coprocessor register transfer. */
5873 } else if ((insn & 0x0ff10020) == 0x01000000) {
5874 uint32_t mask;
5875 uint32_t val;
5876 /* cps (privileged) */
5877 if (IS_USER(s))
5878 return;
5879 mask = val = 0;
5880 if (insn & (1 << 19)) {
5881 if (insn & (1 << 8))
5882 mask |= CPSR_A;
5883 if (insn & (1 << 7))
5884 mask |= CPSR_I;
5885 if (insn & (1 << 6))
5886 mask |= CPSR_F;
5887 if (insn & (1 << 18))
5888 val |= mask;
5890 if (insn & (1 << 17)) {
5891 mask |= CPSR_M;
5892 val |= (insn & 0x1f);
5894 if (mask) {
5895 gen_op_movl_T0_im(val);
5896 gen_set_psr_T0(s, mask, 0);
5898 return;
5900 goto illegal_op;
5902 if (cond != 0xe) {
5903 /* if not always execute, we generate a conditional jump to
5904 next instruction */
5905 s->condlabel = gen_new_label();
5906 gen_test_cc(cond ^ 1, s->condlabel);
5907 s->condjmp = 1;
5909 if ((insn & 0x0f900000) == 0x03000000) {
5910 if ((insn & (1 << 21)) == 0) {
5911 ARCH(6T2);
5912 rd = (insn >> 12) & 0xf;
5913 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5914 if ((insn & (1 << 22)) == 0) {
5915 /* MOVW */
5916 tmp = new_tmp();
5917 tcg_gen_movi_i32(tmp, val);
5918 } else {
5919 /* MOVT */
5920 tmp = load_reg(s, rd);
5921 tcg_gen_ext16u_i32(tmp, tmp);
5922 tcg_gen_ori_i32(tmp, tmp, val << 16);
5924 store_reg(s, rd, tmp);
5925 } else {
5926 if (((insn >> 12) & 0xf) != 0xf)
5927 goto illegal_op;
5928 if (((insn >> 16) & 0xf) == 0) {
5929 gen_nop_hint(s, insn & 0xff);
5930 } else {
5931 /* CPSR = immediate */
5932 val = insn & 0xff;
5933 shift = ((insn >> 8) & 0xf) * 2;
5934 if (shift)
5935 val = (val >> shift) | (val << (32 - shift));
5936 gen_op_movl_T0_im(val);
5937 i = ((insn & (1 << 22)) != 0);
5938 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5939 goto illegal_op;
5942 } else if ((insn & 0x0f900000) == 0x01000000
5943 && (insn & 0x00000090) != 0x00000090) {
5944 /* miscellaneous instructions */
5945 op1 = (insn >> 21) & 3;
5946 sh = (insn >> 4) & 0xf;
5947 rm = insn & 0xf;
5948 switch (sh) {
5949 case 0x0: /* move program status register */
5950 if (op1 & 1) {
5951 /* PSR = reg */
5952 gen_movl_T0_reg(s, rm);
5953 i = ((op1 & 2) != 0);
5954 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5955 goto illegal_op;
5956 } else {
5957 /* reg = PSR */
5958 rd = (insn >> 12) & 0xf;
5959 if (op1 & 2) {
5960 if (IS_USER(s))
5961 goto illegal_op;
5962 tmp = load_cpu_field(spsr);
5963 } else {
5964 tmp = new_tmp();
5965 gen_helper_cpsr_read(tmp);
5967 store_reg(s, rd, tmp);
5969 break;
5970 case 0x1:
5971 if (op1 == 1) {
5972 /* branch/exchange thumb (bx). */
5973 tmp = load_reg(s, rm);
5974 gen_bx(s, tmp);
5975 } else if (op1 == 3) {
5976 /* clz */
5977 rd = (insn >> 12) & 0xf;
5978 tmp = load_reg(s, rm);
5979 gen_helper_clz(tmp, tmp);
5980 store_reg(s, rd, tmp);
5981 } else {
5982 goto illegal_op;
5984 break;
5985 case 0x2:
5986 if (op1 == 1) {
5987 ARCH(5J); /* bxj */
5988 /* Trivial implementation equivalent to bx. */
5989 tmp = load_reg(s, rm);
5990 gen_bx(s, tmp);
5991 } else {
5992 goto illegal_op;
5994 break;
5995 case 0x3:
5996 if (op1 != 1)
5997 goto illegal_op;
5999 /* branch link/exchange thumb (blx) */
6000 tmp = load_reg(s, rm);
6001 tmp2 = new_tmp();
6002 tcg_gen_movi_i32(tmp2, s->pc);
6003 store_reg(s, 14, tmp2);
6004 gen_bx(s, tmp);
6005 break;
6006 case 0x5: /* saturating add/subtract */
6007 rd = (insn >> 12) & 0xf;
6008 rn = (insn >> 16) & 0xf;
6009 tmp = load_reg(s, rm);
6010 tmp2 = load_reg(s, rn);
6011 if (op1 & 2)
6012 gen_helper_double_saturate(tmp2, tmp2);
6013 if (op1 & 1)
6014 gen_helper_sub_saturate(tmp, tmp, tmp2);
6015 else
6016 gen_helper_add_saturate(tmp, tmp, tmp2);
6017 dead_tmp(tmp2);
6018 store_reg(s, rd, tmp);
6019 break;
6020 case 7: /* bkpt */
6021 gen_set_condexec(s);
6022 gen_set_pc_im(s->pc - 4);
6023 gen_exception(EXCP_BKPT);
6024 s->is_jmp = DISAS_JUMP;
6025 break;
6026 case 0x8: /* signed multiply */
6027 case 0xa:
6028 case 0xc:
6029 case 0xe:
6030 rs = (insn >> 8) & 0xf;
6031 rn = (insn >> 12) & 0xf;
6032 rd = (insn >> 16) & 0xf;
6033 if (op1 == 1) {
6034 /* (32 * 16) >> 16 */
6035 tmp = load_reg(s, rm);
6036 tmp2 = load_reg(s, rs);
6037 if (sh & 4)
6038 tcg_gen_sari_i32(tmp2, tmp2, 16);
6039 else
6040 gen_sxth(tmp2);
6041 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6042 tcg_gen_shri_i64(tmp64, tmp64, 16);
6043 tmp = new_tmp();
6044 tcg_gen_trunc_i64_i32(tmp, tmp64);
6045 if ((sh & 2) == 0) {
6046 tmp2 = load_reg(s, rn);
6047 gen_helper_add_setq(tmp, tmp, tmp2);
6048 dead_tmp(tmp2);
6050 store_reg(s, rd, tmp);
6051 } else {
6052 /* 16 * 16 */
6053 tmp = load_reg(s, rm);
6054 tmp2 = load_reg(s, rs);
6055 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6056 dead_tmp(tmp2);
6057 if (op1 == 2) {
6058 tmp64 = tcg_temp_new_i64();
6059 tcg_gen_ext_i32_i64(tmp64, tmp);
6060 dead_tmp(tmp);
6061 gen_addq(s, tmp64, rn, rd);
6062 gen_storeq_reg(s, rn, rd, tmp64);
6063 } else {
6064 if (op1 == 0) {
6065 tmp2 = load_reg(s, rn);
6066 gen_helper_add_setq(tmp, tmp, tmp2);
6067 dead_tmp(tmp2);
6069 store_reg(s, rd, tmp);
6072 break;
6073 default:
6074 goto illegal_op;
6076 } else if (((insn & 0x0e000000) == 0 &&
6077 (insn & 0x00000090) != 0x90) ||
6078 ((insn & 0x0e000000) == (1 << 25))) {
6079 int set_cc, logic_cc, shiftop;
6081 op1 = (insn >> 21) & 0xf;
6082 set_cc = (insn >> 20) & 1;
6083 logic_cc = table_logic_cc[op1] & set_cc;
6085 /* data processing instruction */
6086 if (insn & (1 << 25)) {
6087 /* immediate operand */
6088 val = insn & 0xff;
6089 shift = ((insn >> 8) & 0xf) * 2;
6090 if (shift)
6091 val = (val >> shift) | (val << (32 - shift));
6092 gen_op_movl_T1_im(val);
6093 if (logic_cc && shift)
6094 gen_set_CF_bit31(cpu_T[1]);
6095 } else {
6096 /* register */
6097 rm = (insn) & 0xf;
6098 gen_movl_T1_reg(s, rm);
6099 shiftop = (insn >> 5) & 3;
6100 if (!(insn & (1 << 4))) {
6101 shift = (insn >> 7) & 0x1f;
6102 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
6103 } else {
6104 rs = (insn >> 8) & 0xf;
6105 tmp = load_reg(s, rs);
6106 gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
6109 if (op1 != 0x0f && op1 != 0x0d) {
6110 rn = (insn >> 16) & 0xf;
6111 gen_movl_T0_reg(s, rn);
6113 rd = (insn >> 12) & 0xf;
6114 switch(op1) {
6115 case 0x00:
6116 gen_op_andl_T0_T1();
6117 gen_movl_reg_T0(s, rd);
6118 if (logic_cc)
6119 gen_op_logic_T0_cc();
6120 break;
6121 case 0x01:
6122 gen_op_xorl_T0_T1();
6123 gen_movl_reg_T0(s, rd);
6124 if (logic_cc)
6125 gen_op_logic_T0_cc();
6126 break;
6127 case 0x02:
6128 if (set_cc && rd == 15) {
6129 /* SUBS r15, ... is used for exception return. */
6130 if (IS_USER(s))
6131 goto illegal_op;
6132 gen_op_subl_T0_T1_cc();
6133 gen_exception_return(s);
6134 } else {
6135 if (set_cc)
6136 gen_op_subl_T0_T1_cc();
6137 else
6138 gen_op_subl_T0_T1();
6139 gen_movl_reg_T0(s, rd);
6141 break;
6142 case 0x03:
6143 if (set_cc)
6144 gen_op_rsbl_T0_T1_cc();
6145 else
6146 gen_op_rsbl_T0_T1();
6147 gen_movl_reg_T0(s, rd);
6148 break;
6149 case 0x04:
6150 if (set_cc)
6151 gen_op_addl_T0_T1_cc();
6152 else
6153 gen_op_addl_T0_T1();
6154 gen_movl_reg_T0(s, rd);
6155 break;
6156 case 0x05:
6157 if (set_cc)
6158 gen_op_adcl_T0_T1_cc();
6159 else
6160 gen_adc_T0_T1();
6161 gen_movl_reg_T0(s, rd);
6162 break;
6163 case 0x06:
6164 if (set_cc)
6165 gen_op_sbcl_T0_T1_cc();
6166 else
6167 gen_sbc_T0_T1();
6168 gen_movl_reg_T0(s, rd);
6169 break;
6170 case 0x07:
6171 if (set_cc)
6172 gen_op_rscl_T0_T1_cc();
6173 else
6174 gen_rsc_T0_T1();
6175 gen_movl_reg_T0(s, rd);
6176 break;
6177 case 0x08:
6178 if (set_cc) {
6179 gen_op_andl_T0_T1();
6180 gen_op_logic_T0_cc();
6182 break;
6183 case 0x09:
6184 if (set_cc) {
6185 gen_op_xorl_T0_T1();
6186 gen_op_logic_T0_cc();
6188 break;
6189 case 0x0a:
6190 if (set_cc) {
6191 gen_op_subl_T0_T1_cc();
6193 break;
6194 case 0x0b:
6195 if (set_cc) {
6196 gen_op_addl_T0_T1_cc();
6198 break;
6199 case 0x0c:
6200 gen_op_orl_T0_T1();
6201 gen_movl_reg_T0(s, rd);
6202 if (logic_cc)
6203 gen_op_logic_T0_cc();
6204 break;
6205 case 0x0d:
6206 if (logic_cc && rd == 15) {
6207 /* MOVS r15, ... is used for exception return. */
6208 if (IS_USER(s))
6209 goto illegal_op;
6210 gen_op_movl_T0_T1();
6211 gen_exception_return(s);
6212 } else {
6213 gen_movl_reg_T1(s, rd);
6214 if (logic_cc)
6215 gen_op_logic_T1_cc();
6217 break;
6218 case 0x0e:
6219 gen_op_bicl_T0_T1();
6220 gen_movl_reg_T0(s, rd);
6221 if (logic_cc)
6222 gen_op_logic_T0_cc();
6223 break;
6224 default:
6225 case 0x0f:
6226 gen_op_notl_T1();
6227 gen_movl_reg_T1(s, rd);
6228 if (logic_cc)
6229 gen_op_logic_T1_cc();
6230 break;
6232 } else {
6233 /* other instructions */
6234 op1 = (insn >> 24) & 0xf;
6235 switch(op1) {
6236 case 0x0:
6237 case 0x1:
6238 /* multiplies, extra load/stores */
6239 sh = (insn >> 5) & 3;
6240 if (sh == 0) {
6241 if (op1 == 0x0) {
6242 rd = (insn >> 16) & 0xf;
6243 rn = (insn >> 12) & 0xf;
6244 rs = (insn >> 8) & 0xf;
6245 rm = (insn) & 0xf;
6246 op1 = (insn >> 20) & 0xf;
6247 switch (op1) {
6248 case 0: case 1: case 2: case 3: case 6:
6249 /* 32 bit mul */
6250 tmp = load_reg(s, rs);
6251 tmp2 = load_reg(s, rm);
6252 tcg_gen_mul_i32(tmp, tmp, tmp2);
6253 dead_tmp(tmp2);
6254 if (insn & (1 << 22)) {
6255 /* Subtract (mls) */
6256 ARCH(6T2);
6257 tmp2 = load_reg(s, rn);
6258 tcg_gen_sub_i32(tmp, tmp2, tmp);
6259 dead_tmp(tmp2);
6260 } else if (insn & (1 << 21)) {
6261 /* Add */
6262 tmp2 = load_reg(s, rn);
6263 tcg_gen_add_i32(tmp, tmp, tmp2);
6264 dead_tmp(tmp2);
6266 if (insn & (1 << 20))
6267 gen_logic_CC(tmp);
6268 store_reg(s, rd, tmp);
6269 break;
6270 default:
6271 /* 64 bit mul */
6272 tmp = load_reg(s, rs);
6273 tmp2 = load_reg(s, rm);
6274 if (insn & (1 << 22))
6275 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6276 else
6277 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6278 if (insn & (1 << 21)) /* mult accumulate */
6279 gen_addq(s, tmp64, rn, rd);
6280 if (!(insn & (1 << 23))) { /* double accumulate */
6281 ARCH(6);
6282 gen_addq_lo(s, tmp64, rn);
6283 gen_addq_lo(s, tmp64, rd);
6285 if (insn & (1 << 20))
6286 gen_logicq_cc(tmp64);
6287 gen_storeq_reg(s, rn, rd, tmp64);
6288 break;
6290 } else {
6291 rn = (insn >> 16) & 0xf;
6292 rd = (insn >> 12) & 0xf;
6293 if (insn & (1 << 23)) {
6294 /* load/store exclusive */
6295 op1 = (insn >> 21) & 0x3;
6296 if (op1)
6297 ARCH(6K);
6298 else
6299 ARCH(6);
6300 gen_movl_T1_reg(s, rn);
6301 addr = cpu_T[1];
6302 if (insn & (1 << 20)) {
6303 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6304 switch (op1) {
6305 case 0: /* ldrex */
6306 tmp = gen_ld32(addr, IS_USER(s));
6307 break;
6308 case 1: /* ldrexd */
6309 tmp = gen_ld32(addr, IS_USER(s));
6310 store_reg(s, rd, tmp);
6311 tcg_gen_addi_i32(addr, addr, 4);
6312 tmp = gen_ld32(addr, IS_USER(s));
6313 rd++;
6314 break;
6315 case 2: /* ldrexb */
6316 tmp = gen_ld8u(addr, IS_USER(s));
6317 break;
6318 case 3: /* ldrexh */
6319 tmp = gen_ld16u(addr, IS_USER(s));
6320 break;
6321 default:
6322 abort();
6324 store_reg(s, rd, tmp);
6325 } else {
6326 int label = gen_new_label();
6327 rm = insn & 0xf;
6328 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
6329 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6330 0, label);
6331 tmp = load_reg(s,rm);
6332 switch (op1) {
6333 case 0: /* strex */
6334 gen_st32(tmp, addr, IS_USER(s));
6335 break;
6336 case 1: /* strexd */
6337 gen_st32(tmp, addr, IS_USER(s));
6338 tcg_gen_addi_i32(addr, addr, 4);
6339 tmp = load_reg(s, rm + 1);
6340 gen_st32(tmp, addr, IS_USER(s));
6341 break;
6342 case 2: /* strexb */
6343 gen_st8(tmp, addr, IS_USER(s));
6344 break;
6345 case 3: /* strexh */
6346 gen_st16(tmp, addr, IS_USER(s));
6347 break;
6348 default:
6349 abort();
6351 gen_set_label(label);
6352 gen_movl_reg_T0(s, rd);
6354 } else {
6355 /* SWP instruction */
6356 rm = (insn) & 0xf;
6358 /* ??? This is not really atomic. However we know
6359 we never have multiple CPUs running in parallel,
6360 so it is good enough. */
6361 addr = load_reg(s, rn);
6362 tmp = load_reg(s, rm);
6363 if (insn & (1 << 22)) {
6364 tmp2 = gen_ld8u(addr, IS_USER(s));
6365 gen_st8(tmp, addr, IS_USER(s));
6366 } else {
6367 tmp2 = gen_ld32(addr, IS_USER(s));
6368 gen_st32(tmp, addr, IS_USER(s));
6370 dead_tmp(addr);
6371 store_reg(s, rd, tmp2);
6374 } else {
6375 int address_offset;
6376 int load;
6377 /* Misc load/store */
6378 rn = (insn >> 16) & 0xf;
6379 rd = (insn >> 12) & 0xf;
6380 addr = load_reg(s, rn);
6381 if (insn & (1 << 24))
6382 gen_add_datah_offset(s, insn, 0, addr);
6383 address_offset = 0;
6384 if (insn & (1 << 20)) {
6385 /* load */
6386 switch(sh) {
6387 case 1:
6388 tmp = gen_ld16u(addr, IS_USER(s));
6389 break;
6390 case 2:
6391 tmp = gen_ld8s(addr, IS_USER(s));
6392 break;
6393 default:
6394 case 3:
6395 tmp = gen_ld16s(addr, IS_USER(s));
6396 break;
6398 load = 1;
6399 } else if (sh & 2) {
6400 /* doubleword */
6401 if (sh & 1) {
6402 /* store */
6403 tmp = load_reg(s, rd);
6404 gen_st32(tmp, addr, IS_USER(s));
6405 tcg_gen_addi_i32(addr, addr, 4);
6406 tmp = load_reg(s, rd + 1);
6407 gen_st32(tmp, addr, IS_USER(s));
6408 load = 0;
6409 } else {
6410 /* load */
6411 tmp = gen_ld32(addr, IS_USER(s));
6412 store_reg(s, rd, tmp);
6413 tcg_gen_addi_i32(addr, addr, 4);
6414 tmp = gen_ld32(addr, IS_USER(s));
6415 rd++;
6416 load = 1;
6418 address_offset = -4;
6419 } else {
6420 /* store */
6421 tmp = load_reg(s, rd);
6422 gen_st16(tmp, addr, IS_USER(s));
6423 load = 0;
6425 /* Perform base writeback before the loaded value to
6426 ensure correct behavior with overlapping index registers.
6427 ldrd with base writeback is is undefined if the
6428 destination and index registers overlap. */
6429 if (!(insn & (1 << 24))) {
6430 gen_add_datah_offset(s, insn, address_offset, addr);
6431 store_reg(s, rn, addr);
6432 } else if (insn & (1 << 21)) {
6433 if (address_offset)
6434 tcg_gen_addi_i32(addr, addr, address_offset);
6435 store_reg(s, rn, addr);
6436 } else {
6437 dead_tmp(addr);
6439 if (load) {
6440 /* Complete the load. */
6441 store_reg(s, rd, tmp);
6444 break;
6445 case 0x4:
6446 case 0x5:
6447 goto do_ldst;
6448 case 0x6:
6449 case 0x7:
6450 if (insn & (1 << 4)) {
6451 ARCH(6);
6452 /* Armv6 Media instructions. */
6453 rm = insn & 0xf;
6454 rn = (insn >> 16) & 0xf;
6455 rd = (insn >> 12) & 0xf;
6456 rs = (insn >> 8) & 0xf;
6457 switch ((insn >> 23) & 3) {
6458 case 0: /* Parallel add/subtract. */
6459 op1 = (insn >> 20) & 7;
6460 tmp = load_reg(s, rn);
6461 tmp2 = load_reg(s, rm);
6462 sh = (insn >> 5) & 7;
6463 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6464 goto illegal_op;
6465 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6466 dead_tmp(tmp2);
6467 store_reg(s, rd, tmp);
6468 break;
6469 case 1:
6470 if ((insn & 0x00700020) == 0) {
6471 /* Halfword pack. */
6472 tmp = load_reg(s, rn);
6473 tmp2 = load_reg(s, rm);
6474 shift = (insn >> 7) & 0x1f;
6475 if (insn & (1 << 6)) {
6476 /* pkhtb */
6477 if (shift == 0)
6478 shift = 31;
6479 tcg_gen_sari_i32(tmp2, tmp2, shift);
6480 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6481 tcg_gen_ext16u_i32(tmp2, tmp2);
6482 } else {
6483 /* pkhbt */
6484 if (shift)
6485 tcg_gen_shli_i32(tmp2, tmp2, shift);
6486 tcg_gen_ext16u_i32(tmp, tmp);
6487 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6489 tcg_gen_or_i32(tmp, tmp, tmp2);
6490 dead_tmp(tmp2);
6491 store_reg(s, rd, tmp);
6492 } else if ((insn & 0x00200020) == 0x00200000) {
6493 /* [us]sat */
6494 tmp = load_reg(s, rm);
6495 shift = (insn >> 7) & 0x1f;
6496 if (insn & (1 << 6)) {
6497 if (shift == 0)
6498 shift = 31;
6499 tcg_gen_sari_i32(tmp, tmp, shift);
6500 } else {
6501 tcg_gen_shli_i32(tmp, tmp, shift);
6503 sh = (insn >> 16) & 0x1f;
6504 if (sh != 0) {
6505 if (insn & (1 << 22))
6506 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
6507 else
6508 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
6510 store_reg(s, rd, tmp);
6511 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6512 /* [us]sat16 */
6513 tmp = load_reg(s, rm);
6514 sh = (insn >> 16) & 0x1f;
6515 if (sh != 0) {
6516 if (insn & (1 << 22))
6517 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
6518 else
6519 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
6521 store_reg(s, rd, tmp);
6522 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6523 /* Select bytes. */
6524 tmp = load_reg(s, rn);
6525 tmp2 = load_reg(s, rm);
6526 tmp3 = new_tmp();
6527 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6528 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6529 dead_tmp(tmp3);
6530 dead_tmp(tmp2);
6531 store_reg(s, rd, tmp);
6532 } else if ((insn & 0x000003e0) == 0x00000060) {
6533 tmp = load_reg(s, rm);
6534 shift = (insn >> 10) & 3;
6535 /* ??? In many cases it's not neccessary to do a
6536 rotate, a shift is sufficient. */
6537 if (shift != 0)
6538 tcg_gen_rori_i32(tmp, tmp, shift * 8);
6539 op1 = (insn >> 20) & 7;
6540 switch (op1) {
6541 case 0: gen_sxtb16(tmp); break;
6542 case 2: gen_sxtb(tmp); break;
6543 case 3: gen_sxth(tmp); break;
6544 case 4: gen_uxtb16(tmp); break;
6545 case 6: gen_uxtb(tmp); break;
6546 case 7: gen_uxth(tmp); break;
6547 default: goto illegal_op;
6549 if (rn != 15) {
6550 tmp2 = load_reg(s, rn);
6551 if ((op1 & 3) == 0) {
6552 gen_add16(tmp, tmp2);
6553 } else {
6554 tcg_gen_add_i32(tmp, tmp, tmp2);
6555 dead_tmp(tmp2);
6558 store_reg(s, rd, tmp);
6559 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6560 /* rev */
6561 tmp = load_reg(s, rm);
6562 if (insn & (1 << 22)) {
6563 if (insn & (1 << 7)) {
6564 gen_revsh(tmp);
6565 } else {
6566 ARCH(6T2);
6567 gen_helper_rbit(tmp, tmp);
6569 } else {
6570 if (insn & (1 << 7))
6571 gen_rev16(tmp);
6572 else
6573 tcg_gen_bswap32_i32(tmp, tmp);
6575 store_reg(s, rd, tmp);
6576 } else {
6577 goto illegal_op;
6579 break;
6580 case 2: /* Multiplies (Type 3). */
6581 tmp = load_reg(s, rm);
6582 tmp2 = load_reg(s, rs);
6583 if (insn & (1 << 20)) {
6584 /* Signed multiply most significant [accumulate]. */
6585 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6586 if (insn & (1 << 5))
6587 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6588 tcg_gen_shri_i64(tmp64, tmp64, 32);
6589 tmp = new_tmp();
6590 tcg_gen_trunc_i64_i32(tmp, tmp64);
6591 if (rd != 15) {
6592 tmp2 = load_reg(s, rd);
6593 if (insn & (1 << 6)) {
6594 tcg_gen_sub_i32(tmp, tmp, tmp2);
6595 } else {
6596 tcg_gen_add_i32(tmp, tmp, tmp2);
6598 dead_tmp(tmp2);
6600 store_reg(s, rn, tmp);
6601 } else {
6602 if (insn & (1 << 5))
6603 gen_swap_half(tmp2);
6604 gen_smul_dual(tmp, tmp2);
6605 /* This addition cannot overflow. */
6606 if (insn & (1 << 6)) {
6607 tcg_gen_sub_i32(tmp, tmp, tmp2);
6608 } else {
6609 tcg_gen_add_i32(tmp, tmp, tmp2);
6611 dead_tmp(tmp2);
6612 if (insn & (1 << 22)) {
6613 /* smlald, smlsld */
6614 tmp64 = tcg_temp_new_i64();
6615 tcg_gen_ext_i32_i64(tmp64, tmp);
6616 dead_tmp(tmp);
6617 gen_addq(s, tmp64, rd, rn);
6618 gen_storeq_reg(s, rd, rn, tmp64);
6619 } else {
6620 /* smuad, smusd, smlad, smlsd */
6621 if (rd != 15)
6623 tmp2 = load_reg(s, rd);
6624 gen_helper_add_setq(tmp, tmp, tmp2);
6625 dead_tmp(tmp2);
6627 store_reg(s, rn, tmp);
6630 break;
6631 case 3:
6632 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6633 switch (op1) {
6634 case 0: /* Unsigned sum of absolute differences. */
6635 ARCH(6);
6636 tmp = load_reg(s, rm);
6637 tmp2 = load_reg(s, rs);
6638 gen_helper_usad8(tmp, tmp, tmp2);
6639 dead_tmp(tmp2);
6640 if (rd != 15) {
6641 tmp2 = load_reg(s, rd);
6642 tcg_gen_add_i32(tmp, tmp, tmp2);
6643 dead_tmp(tmp2);
6645 store_reg(s, rn, tmp);
6646 break;
6647 case 0x20: case 0x24: case 0x28: case 0x2c:
6648 /* Bitfield insert/clear. */
6649 ARCH(6T2);
6650 shift = (insn >> 7) & 0x1f;
6651 i = (insn >> 16) & 0x1f;
6652 i = i + 1 - shift;
6653 if (rm == 15) {
6654 tmp = new_tmp();
6655 tcg_gen_movi_i32(tmp, 0);
6656 } else {
6657 tmp = load_reg(s, rm);
6659 if (i != 32) {
6660 tmp2 = load_reg(s, rd);
6661 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
6662 dead_tmp(tmp2);
6664 store_reg(s, rd, tmp);
6665 break;
6666 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6667 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6668 ARCH(6T2);
6669 tmp = load_reg(s, rm);
6670 shift = (insn >> 7) & 0x1f;
6671 i = ((insn >> 16) & 0x1f) + 1;
6672 if (shift + i > 32)
6673 goto illegal_op;
6674 if (i < 32) {
6675 if (op1 & 0x20) {
6676 gen_ubfx(tmp, shift, (1u << i) - 1);
6677 } else {
6678 gen_sbfx(tmp, shift, i);
6681 store_reg(s, rd, tmp);
6682 break;
6683 default:
6684 goto illegal_op;
6686 break;
6688 break;
6690 do_ldst:
6691 /* Check for undefined extension instructions
6692 * per the ARM Bible IE:
6693 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6695 sh = (0xf << 20) | (0xf << 4);
6696 if (op1 == 0x7 && ((insn & sh) == sh))
6698 goto illegal_op;
6700 /* load/store byte/word */
6701 rn = (insn >> 16) & 0xf;
6702 rd = (insn >> 12) & 0xf;
6703 tmp2 = load_reg(s, rn);
6704 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6705 if (insn & (1 << 24))
6706 gen_add_data_offset(s, insn, tmp2);
6707 if (insn & (1 << 20)) {
6708 /* load */
6709 if (insn & (1 << 22)) {
6710 tmp = gen_ld8u(tmp2, i);
6711 } else {
6712 tmp = gen_ld32(tmp2, i);
6714 } else {
6715 /* store */
6716 tmp = load_reg(s, rd);
6717 if (insn & (1 << 22))
6718 gen_st8(tmp, tmp2, i);
6719 else
6720 gen_st32(tmp, tmp2, i);
6722 if (!(insn & (1 << 24))) {
6723 gen_add_data_offset(s, insn, tmp2);
6724 store_reg(s, rn, tmp2);
6725 } else if (insn & (1 << 21)) {
6726 store_reg(s, rn, tmp2);
6727 } else {
6728 dead_tmp(tmp2);
6730 if (insn & (1 << 20)) {
6731 /* Complete the load. */
6732 if (rd == 15)
6733 gen_bx(s, tmp);
6734 else
6735 store_reg(s, rd, tmp);
6737 break;
6738 case 0x08:
6739 case 0x09:
6741 int j, n, user, loaded_base;
6742 TCGv loaded_var;
6743 /* load/store multiple words */
6744 /* XXX: store correct base if write back */
6745 user = 0;
6746 if (insn & (1 << 22)) {
6747 if (IS_USER(s))
6748 goto illegal_op; /* only usable in supervisor mode */
6750 if ((insn & (1 << 15)) == 0)
6751 user = 1;
6753 rn = (insn >> 16) & 0xf;
6754 addr = load_reg(s, rn);
6756 /* compute total size */
6757 loaded_base = 0;
6758 TCGV_UNUSED(loaded_var);
6759 n = 0;
6760 for(i=0;i<16;i++) {
6761 if (insn & (1 << i))
6762 n++;
6764 /* XXX: test invalid n == 0 case ? */
6765 if (insn & (1 << 23)) {
6766 if (insn & (1 << 24)) {
6767 /* pre increment */
6768 tcg_gen_addi_i32(addr, addr, 4);
6769 } else {
6770 /* post increment */
6772 } else {
6773 if (insn & (1 << 24)) {
6774 /* pre decrement */
6775 tcg_gen_addi_i32(addr, addr, -(n * 4));
6776 } else {
6777 /* post decrement */
6778 if (n != 1)
6779 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6782 j = 0;
6783 for(i=0;i<16;i++) {
6784 if (insn & (1 << i)) {
6785 if (insn & (1 << 20)) {
6786 /* load */
6787 tmp = gen_ld32(addr, IS_USER(s));
6788 if (i == 15) {
6789 gen_bx(s, tmp);
6790 } else if (user) {
6791 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6792 dead_tmp(tmp);
6793 } else if (i == rn) {
6794 loaded_var = tmp;
6795 loaded_base = 1;
6796 } else {
6797 store_reg(s, i, tmp);
6799 } else {
6800 /* store */
6801 if (i == 15) {
6802 /* special case: r15 = PC + 8 */
6803 val = (long)s->pc + 4;
6804 tmp = new_tmp();
6805 tcg_gen_movi_i32(tmp, val);
6806 } else if (user) {
6807 tmp = new_tmp();
6808 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
6809 } else {
6810 tmp = load_reg(s, i);
6812 gen_st32(tmp, addr, IS_USER(s));
6814 j++;
6815 /* no need to add after the last transfer */
6816 if (j != n)
6817 tcg_gen_addi_i32(addr, addr, 4);
6820 if (insn & (1 << 21)) {
6821 /* write back */
6822 if (insn & (1 << 23)) {
6823 if (insn & (1 << 24)) {
6824 /* pre increment */
6825 } else {
6826 /* post increment */
6827 tcg_gen_addi_i32(addr, addr, 4);
6829 } else {
6830 if (insn & (1 << 24)) {
6831 /* pre decrement */
6832 if (n != 1)
6833 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6834 } else {
6835 /* post decrement */
6836 tcg_gen_addi_i32(addr, addr, -(n * 4));
6839 store_reg(s, rn, addr);
6840 } else {
6841 dead_tmp(addr);
6843 if (loaded_base) {
6844 store_reg(s, rn, loaded_var);
6846 if ((insn & (1 << 22)) && !user) {
6847 /* Restore CPSR from SPSR. */
6848 tmp = load_cpu_field(spsr);
6849 gen_set_cpsr(tmp, 0xffffffff);
6850 dead_tmp(tmp);
6851 s->is_jmp = DISAS_UPDATE;
6854 break;
6855 case 0xa:
6856 case 0xb:
6858 int32_t offset;
6860 /* branch (and link) */
6861 val = (int32_t)s->pc;
6862 if (insn & (1 << 24)) {
6863 tmp = new_tmp();
6864 tcg_gen_movi_i32(tmp, val);
6865 store_reg(s, 14, tmp);
6867 offset = (((int32_t)insn << 8) >> 8);
6868 val += (offset << 2) + 4;
6869 gen_jmp(s, val);
6871 break;
6872 case 0xc:
6873 case 0xd:
6874 case 0xe:
6875 /* Coprocessor. */
6876 if (disas_coproc_insn(env, s, insn))
6877 goto illegal_op;
6878 break;
6879 case 0xf:
6880 /* swi */
6881 gen_set_pc_im(s->pc);
6882 s->is_jmp = DISAS_SWI;
6883 break;
6884 default:
6885 illegal_op:
6886 gen_set_condexec(s);
6887 gen_set_pc_im(s->pc - 4);
6888 gen_exception(EXCP_UDEF);
6889 s->is_jmp = DISAS_JUMP;
6890 break;
6895 /* Return true if this is a Thumb-2 logical op. */
6896 static int
6897 thumb2_logic_op(int op)
6899 return (op < 8);
6902 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6903 then set condition code flags based on the result of the operation.
6904 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6905 to the high bit of T1.
6906 Returns zero if the opcode is valid. */
6908 static int
6909 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6911 int logic_cc;
6913 logic_cc = 0;
6914 switch (op) {
6915 case 0: /* and */
6916 gen_op_andl_T0_T1();
6917 logic_cc = conds;
6918 break;
6919 case 1: /* bic */
6920 gen_op_bicl_T0_T1();
6921 logic_cc = conds;
6922 break;
6923 case 2: /* orr */
6924 gen_op_orl_T0_T1();
6925 logic_cc = conds;
6926 break;
6927 case 3: /* orn */
6928 gen_op_notl_T1();
6929 gen_op_orl_T0_T1();
6930 logic_cc = conds;
6931 break;
6932 case 4: /* eor */
6933 gen_op_xorl_T0_T1();
6934 logic_cc = conds;
6935 break;
6936 case 8: /* add */
6937 if (conds)
6938 gen_op_addl_T0_T1_cc();
6939 else
6940 gen_op_addl_T0_T1();
6941 break;
6942 case 10: /* adc */
6943 if (conds)
6944 gen_op_adcl_T0_T1_cc();
6945 else
6946 gen_adc_T0_T1();
6947 break;
6948 case 11: /* sbc */
6949 if (conds)
6950 gen_op_sbcl_T0_T1_cc();
6951 else
6952 gen_sbc_T0_T1();
6953 break;
6954 case 13: /* sub */
6955 if (conds)
6956 gen_op_subl_T0_T1_cc();
6957 else
6958 gen_op_subl_T0_T1();
6959 break;
6960 case 14: /* rsb */
6961 if (conds)
6962 gen_op_rsbl_T0_T1_cc();
6963 else
6964 gen_op_rsbl_T0_T1();
6965 break;
6966 default: /* 5, 6, 7, 9, 12, 15. */
6967 return 1;
6969 if (logic_cc) {
6970 gen_op_logic_T0_cc();
6971 if (shifter_out)
6972 gen_set_CF_bit31(cpu_T[1]);
6974 return 0;
6977 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6978 is not legal. */
6979 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6981 uint32_t insn, imm, shift, offset;
6982 uint32_t rd, rn, rm, rs;
6983 TCGv tmp;
6984 TCGv tmp2;
6985 TCGv tmp3;
6986 TCGv addr;
6987 TCGv_i64 tmp64;
6988 int op;
6989 int shiftop;
6990 int conds;
6991 int logic_cc;
6993 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
6994 || arm_feature (env, ARM_FEATURE_M))) {
6995 /* Thumb-1 cores may need to treat bl and blx as a pair of
6996 16-bit instructions to get correct prefetch abort behavior. */
6997 insn = insn_hw1;
6998 if ((insn & (1 << 12)) == 0) {
6999 /* Second half of blx. */
7000 offset = ((insn & 0x7ff) << 1);
7001 tmp = load_reg(s, 14);
7002 tcg_gen_addi_i32(tmp, tmp, offset);
7003 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7005 tmp2 = new_tmp();
7006 tcg_gen_movi_i32(tmp2, s->pc | 1);
7007 store_reg(s, 14, tmp2);
7008 gen_bx(s, tmp);
7009 return 0;
7011 if (insn & (1 << 11)) {
7012 /* Second half of bl. */
7013 offset = ((insn & 0x7ff) << 1) | 1;
7014 tmp = load_reg(s, 14);
7015 tcg_gen_addi_i32(tmp, tmp, offset);
7017 tmp2 = new_tmp();
7018 tcg_gen_movi_i32(tmp2, s->pc | 1);
7019 store_reg(s, 14, tmp2);
7020 gen_bx(s, tmp);
7021 return 0;
7023 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7024 /* Instruction spans a page boundary. Implement it as two
7025 16-bit instructions in case the second half causes an
7026 prefetch abort. */
7027 offset = ((int32_t)insn << 21) >> 9;
7028 gen_op_movl_T0_im(s->pc + 2 + offset);
7029 gen_movl_reg_T0(s, 14);
7030 return 0;
7032 /* Fall through to 32-bit decode. */
7035 insn = lduw_code(s->pc);
7036 s->pc += 2;
7037 insn |= (uint32_t)insn_hw1 << 16;
7039 if ((insn & 0xf800e800) != 0xf000e800) {
7040 ARCH(6T2);
7043 rn = (insn >> 16) & 0xf;
7044 rs = (insn >> 12) & 0xf;
7045 rd = (insn >> 8) & 0xf;
7046 rm = insn & 0xf;
7047 switch ((insn >> 25) & 0xf) {
7048 case 0: case 1: case 2: case 3:
7049 /* 16-bit instructions. Should never happen. */
7050 abort();
7051 case 4:
7052 if (insn & (1 << 22)) {
7053 /* Other load/store, table branch. */
7054 if (insn & 0x01200000) {
7055 /* Load/store doubleword. */
7056 if (rn == 15) {
7057 addr = new_tmp();
7058 tcg_gen_movi_i32(addr, s->pc & ~3);
7059 } else {
7060 addr = load_reg(s, rn);
7062 offset = (insn & 0xff) * 4;
7063 if ((insn & (1 << 23)) == 0)
7064 offset = -offset;
7065 if (insn & (1 << 24)) {
7066 tcg_gen_addi_i32(addr, addr, offset);
7067 offset = 0;
7069 if (insn & (1 << 20)) {
7070 /* ldrd */
7071 tmp = gen_ld32(addr, IS_USER(s));
7072 store_reg(s, rs, tmp);
7073 tcg_gen_addi_i32(addr, addr, 4);
7074 tmp = gen_ld32(addr, IS_USER(s));
7075 store_reg(s, rd, tmp);
7076 } else {
7077 /* strd */
7078 tmp = load_reg(s, rs);
7079 gen_st32(tmp, addr, IS_USER(s));
7080 tcg_gen_addi_i32(addr, addr, 4);
7081 tmp = load_reg(s, rd);
7082 gen_st32(tmp, addr, IS_USER(s));
7084 if (insn & (1 << 21)) {
7085 /* Base writeback. */
7086 if (rn == 15)
7087 goto illegal_op;
7088 tcg_gen_addi_i32(addr, addr, offset - 4);
7089 store_reg(s, rn, addr);
7090 } else {
7091 dead_tmp(addr);
7093 } else if ((insn & (1 << 23)) == 0) {
7094 /* Load/store exclusive word. */
7095 gen_movl_T1_reg(s, rn);
7096 addr = cpu_T[1];
7097 if (insn & (1 << 20)) {
7098 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
7099 tmp = gen_ld32(addr, IS_USER(s));
7100 store_reg(s, rd, tmp);
7101 } else {
7102 int label = gen_new_label();
7103 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7104 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
7105 0, label);
7106 tmp = load_reg(s, rs);
7107 gen_st32(tmp, cpu_T[1], IS_USER(s));
7108 gen_set_label(label);
7109 gen_movl_reg_T0(s, rd);
7111 } else if ((insn & (1 << 6)) == 0) {
7112 /* Table Branch. */
7113 if (rn == 15) {
7114 addr = new_tmp();
7115 tcg_gen_movi_i32(addr, s->pc);
7116 } else {
7117 addr = load_reg(s, rn);
7119 tmp = load_reg(s, rm);
7120 tcg_gen_add_i32(addr, addr, tmp);
7121 if (insn & (1 << 4)) {
7122 /* tbh */
7123 tcg_gen_add_i32(addr, addr, tmp);
7124 dead_tmp(tmp);
7125 tmp = gen_ld16u(addr, IS_USER(s));
7126 } else { /* tbb */
7127 dead_tmp(tmp);
7128 tmp = gen_ld8u(addr, IS_USER(s));
7130 dead_tmp(addr);
7131 tcg_gen_shli_i32(tmp, tmp, 1);
7132 tcg_gen_addi_i32(tmp, tmp, s->pc);
7133 store_reg(s, 15, tmp);
7134 } else {
7135 /* Load/store exclusive byte/halfword/doubleword. */
7136 /* ??? These are not really atomic. However we know
7137 we never have multiple CPUs running in parallel,
7138 so it is good enough. */
7139 op = (insn >> 4) & 0x3;
7140 /* Must use a global reg for the address because we have
7141 a conditional branch in the store instruction. */
7142 gen_movl_T1_reg(s, rn);
7143 addr = cpu_T[1];
7144 if (insn & (1 << 20)) {
7145 gen_helper_mark_exclusive(cpu_env, addr);
7146 switch (op) {
7147 case 0:
7148 tmp = gen_ld8u(addr, IS_USER(s));
7149 break;
7150 case 1:
7151 tmp = gen_ld16u(addr, IS_USER(s));
7152 break;
7153 case 3:
7154 tmp = gen_ld32(addr, IS_USER(s));
7155 tcg_gen_addi_i32(addr, addr, 4);
7156 tmp2 = gen_ld32(addr, IS_USER(s));
7157 store_reg(s, rd, tmp2);
7158 break;
7159 default:
7160 goto illegal_op;
7162 store_reg(s, rs, tmp);
7163 } else {
7164 int label = gen_new_label();
7165 /* Must use a global that is not killed by the branch. */
7166 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7167 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
7168 tmp = load_reg(s, rs);
7169 switch (op) {
7170 case 0:
7171 gen_st8(tmp, addr, IS_USER(s));
7172 break;
7173 case 1:
7174 gen_st16(tmp, addr, IS_USER(s));
7175 break;
7176 case 3:
7177 gen_st32(tmp, addr, IS_USER(s));
7178 tcg_gen_addi_i32(addr, addr, 4);
7179 tmp = load_reg(s, rd);
7180 gen_st32(tmp, addr, IS_USER(s));
7181 break;
7182 default:
7183 goto illegal_op;
7185 gen_set_label(label);
7186 gen_movl_reg_T0(s, rm);
7189 } else {
7190 /* Load/store multiple, RFE, SRS. */
7191 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7192 /* Not available in user mode. */
7193 if (IS_USER(s))
7194 goto illegal_op;
7195 if (insn & (1 << 20)) {
7196 /* rfe */
7197 addr = load_reg(s, rn);
7198 if ((insn & (1 << 24)) == 0)
7199 tcg_gen_addi_i32(addr, addr, -8);
7200 /* Load PC into tmp and CPSR into tmp2. */
7201 tmp = gen_ld32(addr, 0);
7202 tcg_gen_addi_i32(addr, addr, 4);
7203 tmp2 = gen_ld32(addr, 0);
7204 if (insn & (1 << 21)) {
7205 /* Base writeback. */
7206 if (insn & (1 << 24)) {
7207 tcg_gen_addi_i32(addr, addr, 4);
7208 } else {
7209 tcg_gen_addi_i32(addr, addr, -4);
7211 store_reg(s, rn, addr);
7212 } else {
7213 dead_tmp(addr);
7215 gen_rfe(s, tmp, tmp2);
7216 } else {
7217 /* srs */
7218 op = (insn & 0x1f);
7219 if (op == (env->uncached_cpsr & CPSR_M)) {
7220 addr = load_reg(s, 13);
7221 } else {
7222 addr = new_tmp();
7223 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
7225 if ((insn & (1 << 24)) == 0) {
7226 tcg_gen_addi_i32(addr, addr, -8);
7228 tmp = load_reg(s, 14);
7229 gen_st32(tmp, addr, 0);
7230 tcg_gen_addi_i32(addr, addr, 4);
7231 tmp = new_tmp();
7232 gen_helper_cpsr_read(tmp);
7233 gen_st32(tmp, addr, 0);
7234 if (insn & (1 << 21)) {
7235 if ((insn & (1 << 24)) == 0) {
7236 tcg_gen_addi_i32(addr, addr, -4);
7237 } else {
7238 tcg_gen_addi_i32(addr, addr, 4);
7240 if (op == (env->uncached_cpsr & CPSR_M)) {
7241 store_reg(s, 13, addr);
7242 } else {
7243 gen_helper_set_r13_banked(cpu_env,
7244 tcg_const_i32(op), addr);
7246 } else {
7247 dead_tmp(addr);
7250 } else {
7251 int i;
7252 /* Load/store multiple. */
7253 addr = load_reg(s, rn);
7254 offset = 0;
7255 for (i = 0; i < 16; i++) {
7256 if (insn & (1 << i))
7257 offset += 4;
7259 if (insn & (1 << 24)) {
7260 tcg_gen_addi_i32(addr, addr, -offset);
7263 for (i = 0; i < 16; i++) {
7264 if ((insn & (1 << i)) == 0)
7265 continue;
7266 if (insn & (1 << 20)) {
7267 /* Load. */
7268 tmp = gen_ld32(addr, IS_USER(s));
7269 if (i == 15) {
7270 gen_bx(s, tmp);
7271 } else {
7272 store_reg(s, i, tmp);
7274 } else {
7275 /* Store. */
7276 tmp = load_reg(s, i);
7277 gen_st32(tmp, addr, IS_USER(s));
7279 tcg_gen_addi_i32(addr, addr, 4);
7281 if (insn & (1 << 21)) {
7282 /* Base register writeback. */
7283 if (insn & (1 << 24)) {
7284 tcg_gen_addi_i32(addr, addr, -offset);
7286 /* Fault if writeback register is in register list. */
7287 if (insn & (1 << rn))
7288 goto illegal_op;
7289 store_reg(s, rn, addr);
7290 } else {
7291 dead_tmp(addr);
7295 break;
7296 case 5: /* Data processing register constant shift. */
7297 if (rn == 15)
7298 gen_op_movl_T0_im(0);
7299 else
7300 gen_movl_T0_reg(s, rn);
7301 gen_movl_T1_reg(s, rm);
7302 op = (insn >> 21) & 0xf;
7303 shiftop = (insn >> 4) & 3;
7304 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7305 conds = (insn & (1 << 20)) != 0;
7306 logic_cc = (conds && thumb2_logic_op(op));
7307 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
7308 if (gen_thumb2_data_op(s, op, conds, 0))
7309 goto illegal_op;
7310 if (rd != 15)
7311 gen_movl_reg_T0(s, rd);
7312 break;
7313 case 13: /* Misc data processing. */
7314 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7315 if (op < 4 && (insn & 0xf000) != 0xf000)
7316 goto illegal_op;
7317 switch (op) {
7318 case 0: /* Register controlled shift. */
7319 tmp = load_reg(s, rn);
7320 tmp2 = load_reg(s, rm);
7321 if ((insn & 0x70) != 0)
7322 goto illegal_op;
7323 op = (insn >> 21) & 3;
7324 logic_cc = (insn & (1 << 20)) != 0;
7325 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7326 if (logic_cc)
7327 gen_logic_CC(tmp);
7328 store_reg(s, rd, tmp);
7329 break;
7330 case 1: /* Sign/zero extend. */
7331 tmp = load_reg(s, rm);
7332 shift = (insn >> 4) & 3;
7333 /* ??? In many cases it's not neccessary to do a
7334 rotate, a shift is sufficient. */
7335 if (shift != 0)
7336 tcg_gen_rori_i32(tmp, tmp, shift * 8);
7337 op = (insn >> 20) & 7;
7338 switch (op) {
7339 case 0: gen_sxth(tmp); break;
7340 case 1: gen_uxth(tmp); break;
7341 case 2: gen_sxtb16(tmp); break;
7342 case 3: gen_uxtb16(tmp); break;
7343 case 4: gen_sxtb(tmp); break;
7344 case 5: gen_uxtb(tmp); break;
7345 default: goto illegal_op;
7347 if (rn != 15) {
7348 tmp2 = load_reg(s, rn);
7349 if ((op >> 1) == 1) {
7350 gen_add16(tmp, tmp2);
7351 } else {
7352 tcg_gen_add_i32(tmp, tmp, tmp2);
7353 dead_tmp(tmp2);
7356 store_reg(s, rd, tmp);
7357 break;
7358 case 2: /* SIMD add/subtract. */
7359 op = (insn >> 20) & 7;
7360 shift = (insn >> 4) & 7;
7361 if ((op & 3) == 3 || (shift & 3) == 3)
7362 goto illegal_op;
7363 tmp = load_reg(s, rn);
7364 tmp2 = load_reg(s, rm);
7365 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7366 dead_tmp(tmp2);
7367 store_reg(s, rd, tmp);
7368 break;
7369 case 3: /* Other data processing. */
7370 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7371 if (op < 4) {
7372 /* Saturating add/subtract. */
7373 tmp = load_reg(s, rn);
7374 tmp2 = load_reg(s, rm);
7375 if (op & 2)
7376 gen_helper_double_saturate(tmp, tmp);
7377 if (op & 1)
7378 gen_helper_sub_saturate(tmp, tmp2, tmp);
7379 else
7380 gen_helper_add_saturate(tmp, tmp, tmp2);
7381 dead_tmp(tmp2);
7382 } else {
7383 tmp = load_reg(s, rn);
7384 switch (op) {
7385 case 0x0a: /* rbit */
7386 gen_helper_rbit(tmp, tmp);
7387 break;
7388 case 0x08: /* rev */
7389 tcg_gen_bswap32_i32(tmp, tmp);
7390 break;
7391 case 0x09: /* rev16 */
7392 gen_rev16(tmp);
7393 break;
7394 case 0x0b: /* revsh */
7395 gen_revsh(tmp);
7396 break;
7397 case 0x10: /* sel */
7398 tmp2 = load_reg(s, rm);
7399 tmp3 = new_tmp();
7400 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7401 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7402 dead_tmp(tmp3);
7403 dead_tmp(tmp2);
7404 break;
7405 case 0x18: /* clz */
7406 gen_helper_clz(tmp, tmp);
7407 break;
7408 default:
7409 goto illegal_op;
7412 store_reg(s, rd, tmp);
7413 break;
7414 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7415 op = (insn >> 4) & 0xf;
7416 tmp = load_reg(s, rn);
7417 tmp2 = load_reg(s, rm);
7418 switch ((insn >> 20) & 7) {
7419 case 0: /* 32 x 32 -> 32 */
7420 tcg_gen_mul_i32(tmp, tmp, tmp2);
7421 dead_tmp(tmp2);
7422 if (rs != 15) {
7423 tmp2 = load_reg(s, rs);
7424 if (op)
7425 tcg_gen_sub_i32(tmp, tmp2, tmp);
7426 else
7427 tcg_gen_add_i32(tmp, tmp, tmp2);
7428 dead_tmp(tmp2);
7430 break;
7431 case 1: /* 16 x 16 -> 32 */
7432 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7433 dead_tmp(tmp2);
7434 if (rs != 15) {
7435 tmp2 = load_reg(s, rs);
7436 gen_helper_add_setq(tmp, tmp, tmp2);
7437 dead_tmp(tmp2);
7439 break;
7440 case 2: /* Dual multiply add. */
7441 case 4: /* Dual multiply subtract. */
7442 if (op)
7443 gen_swap_half(tmp2);
7444 gen_smul_dual(tmp, tmp2);
7445 /* This addition cannot overflow. */
7446 if (insn & (1 << 22)) {
7447 tcg_gen_sub_i32(tmp, tmp, tmp2);
7448 } else {
7449 tcg_gen_add_i32(tmp, tmp, tmp2);
7451 dead_tmp(tmp2);
7452 if (rs != 15)
7454 tmp2 = load_reg(s, rs);
7455 gen_helper_add_setq(tmp, tmp, tmp2);
7456 dead_tmp(tmp2);
7458 break;
7459 case 3: /* 32 * 16 -> 32msb */
7460 if (op)
7461 tcg_gen_sari_i32(tmp2, tmp2, 16);
7462 else
7463 gen_sxth(tmp2);
7464 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7465 tcg_gen_shri_i64(tmp64, tmp64, 16);
7466 tmp = new_tmp();
7467 tcg_gen_trunc_i64_i32(tmp, tmp64);
7468 if (rs != 15)
7470 tmp2 = load_reg(s, rs);
7471 gen_helper_add_setq(tmp, tmp, tmp2);
7472 dead_tmp(tmp2);
7474 break;
7475 case 5: case 6: /* 32 * 32 -> 32msb */
7476 gen_imull(tmp, tmp2);
7477 if (insn & (1 << 5)) {
7478 gen_roundqd(tmp, tmp2);
7479 dead_tmp(tmp2);
7480 } else {
7481 dead_tmp(tmp);
7482 tmp = tmp2;
7484 if (rs != 15) {
7485 tmp2 = load_reg(s, rs);
7486 if (insn & (1 << 21)) {
7487 tcg_gen_add_i32(tmp, tmp, tmp2);
7488 } else {
7489 tcg_gen_sub_i32(tmp, tmp2, tmp);
7491 dead_tmp(tmp2);
7493 break;
7494 case 7: /* Unsigned sum of absolute differences. */
7495 gen_helper_usad8(tmp, tmp, tmp2);
7496 dead_tmp(tmp2);
7497 if (rs != 15) {
7498 tmp2 = load_reg(s, rs);
7499 tcg_gen_add_i32(tmp, tmp, tmp2);
7500 dead_tmp(tmp2);
7502 break;
7504 store_reg(s, rd, tmp);
7505 break;
7506 case 6: case 7: /* 64-bit multiply, Divide. */
7507 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7508 tmp = load_reg(s, rn);
7509 tmp2 = load_reg(s, rm);
7510 if ((op & 0x50) == 0x10) {
7511 /* sdiv, udiv */
7512 if (!arm_feature(env, ARM_FEATURE_DIV))
7513 goto illegal_op;
7514 if (op & 0x20)
7515 gen_helper_udiv(tmp, tmp, tmp2);
7516 else
7517 gen_helper_sdiv(tmp, tmp, tmp2);
7518 dead_tmp(tmp2);
7519 store_reg(s, rd, tmp);
7520 } else if ((op & 0xe) == 0xc) {
7521 /* Dual multiply accumulate long. */
7522 if (op & 1)
7523 gen_swap_half(tmp2);
7524 gen_smul_dual(tmp, tmp2);
7525 if (op & 0x10) {
7526 tcg_gen_sub_i32(tmp, tmp, tmp2);
7527 } else {
7528 tcg_gen_add_i32(tmp, tmp, tmp2);
7530 dead_tmp(tmp2);
7531 /* BUGFIX */
7532 tmp64 = tcg_temp_new_i64();
7533 tcg_gen_ext_i32_i64(tmp64, tmp);
7534 dead_tmp(tmp);
7535 gen_addq(s, tmp64, rs, rd);
7536 gen_storeq_reg(s, rs, rd, tmp64);
7537 } else {
7538 if (op & 0x20) {
7539 /* Unsigned 64-bit multiply */
7540 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7541 } else {
7542 if (op & 8) {
7543 /* smlalxy */
7544 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7545 dead_tmp(tmp2);
7546 tmp64 = tcg_temp_new_i64();
7547 tcg_gen_ext_i32_i64(tmp64, tmp);
7548 dead_tmp(tmp);
7549 } else {
7550 /* Signed 64-bit multiply */
7551 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7554 if (op & 4) {
7555 /* umaal */
7556 gen_addq_lo(s, tmp64, rs);
7557 gen_addq_lo(s, tmp64, rd);
7558 } else if (op & 0x40) {
7559 /* 64-bit accumulate. */
7560 gen_addq(s, tmp64, rs, rd);
7562 gen_storeq_reg(s, rs, rd, tmp64);
7564 break;
7566 break;
7567 case 6: case 7: case 14: case 15:
7568 /* Coprocessor. */
7569 if (((insn >> 24) & 3) == 3) {
7570 /* Translate into the equivalent ARM encoding. */
7571 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7572 if (disas_neon_data_insn(env, s, insn))
7573 goto illegal_op;
7574 } else {
7575 if (insn & (1 << 28))
7576 goto illegal_op;
7577 if (disas_coproc_insn (env, s, insn))
7578 goto illegal_op;
7580 break;
7581 case 8: case 9: case 10: case 11:
7582 if (insn & (1 << 15)) {
7583 /* Branches, misc control. */
7584 if (insn & 0x5000) {
7585 /* Unconditional branch. */
7586 /* signextend(hw1[10:0]) -> offset[:12]. */
7587 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7588 /* hw1[10:0] -> offset[11:1]. */
7589 offset |= (insn & 0x7ff) << 1;
7590 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7591 offset[24:22] already have the same value because of the
7592 sign extension above. */
7593 offset ^= ((~insn) & (1 << 13)) << 10;
7594 offset ^= ((~insn) & (1 << 11)) << 11;
7596 if (insn & (1 << 14)) {
7597 /* Branch and link. */
7598 gen_op_movl_T1_im(s->pc | 1);
7599 gen_movl_reg_T1(s, 14);
7602 offset += s->pc;
7603 if (insn & (1 << 12)) {
7604 /* b/bl */
7605 gen_jmp(s, offset);
7606 } else {
7607 /* blx */
7608 offset &= ~(uint32_t)2;
7609 gen_bx_im(s, offset);
7611 } else if (((insn >> 23) & 7) == 7) {
7612 /* Misc control */
7613 if (insn & (1 << 13))
7614 goto illegal_op;
7616 if (insn & (1 << 26)) {
7617 /* Secure monitor call (v6Z) */
7618 goto illegal_op; /* not implemented. */
7619 } else {
7620 op = (insn >> 20) & 7;
7621 switch (op) {
7622 case 0: /* msr cpsr. */
7623 if (IS_M(env)) {
7624 tmp = load_reg(s, rn);
7625 addr = tcg_const_i32(insn & 0xff);
7626 gen_helper_v7m_msr(cpu_env, addr, tmp);
7627 gen_lookup_tb(s);
7628 break;
7630 /* fall through */
7631 case 1: /* msr spsr. */
7632 if (IS_M(env))
7633 goto illegal_op;
7634 gen_movl_T0_reg(s, rn);
7635 if (gen_set_psr_T0(s,
7636 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7637 op == 1))
7638 goto illegal_op;
7639 break;
7640 case 2: /* cps, nop-hint. */
7641 if (((insn >> 8) & 7) == 0) {
7642 gen_nop_hint(s, insn & 0xff);
7644 /* Implemented as NOP in user mode. */
7645 if (IS_USER(s))
7646 break;
7647 offset = 0;
7648 imm = 0;
7649 if (insn & (1 << 10)) {
7650 if (insn & (1 << 7))
7651 offset |= CPSR_A;
7652 if (insn & (1 << 6))
7653 offset |= CPSR_I;
7654 if (insn & (1 << 5))
7655 offset |= CPSR_F;
7656 if (insn & (1 << 9))
7657 imm = CPSR_A | CPSR_I | CPSR_F;
7659 if (insn & (1 << 8)) {
7660 offset |= 0x1f;
7661 imm |= (insn & 0x1f);
7663 if (offset) {
7664 gen_op_movl_T0_im(imm);
7665 gen_set_psr_T0(s, offset, 0);
7667 break;
7668 case 3: /* Special control operations. */
7669 op = (insn >> 4) & 0xf;
7670 switch (op) {
7671 case 2: /* clrex */
7672 gen_helper_clrex(cpu_env);
7673 break;
7674 case 4: /* dsb */
7675 case 5: /* dmb */
7676 case 6: /* isb */
7677 /* These execute as NOPs. */
7678 ARCH(7);
7679 break;
7680 default:
7681 goto illegal_op;
7683 break;
7684 case 4: /* bxj */
7685 /* Trivial implementation equivalent to bx. */
7686 tmp = load_reg(s, rn);
7687 gen_bx(s, tmp);
7688 break;
7689 case 5: /* Exception return. */
7690 /* Unpredictable in user mode. */
7691 goto illegal_op;
7692 case 6: /* mrs cpsr. */
7693 tmp = new_tmp();
7694 if (IS_M(env)) {
7695 addr = tcg_const_i32(insn & 0xff);
7696 gen_helper_v7m_mrs(tmp, cpu_env, addr);
7697 } else {
7698 gen_helper_cpsr_read(tmp);
7700 store_reg(s, rd, tmp);
7701 break;
7702 case 7: /* mrs spsr. */
7703 /* Not accessible in user mode. */
7704 if (IS_USER(s) || IS_M(env))
7705 goto illegal_op;
7706 tmp = load_cpu_field(spsr);
7707 store_reg(s, rd, tmp);
7708 break;
7711 } else {
7712 /* Conditional branch. */
7713 op = (insn >> 22) & 0xf;
7714 /* Generate a conditional jump to next instruction. */
7715 s->condlabel = gen_new_label();
7716 gen_test_cc(op ^ 1, s->condlabel);
7717 s->condjmp = 1;
7719 /* offset[11:1] = insn[10:0] */
7720 offset = (insn & 0x7ff) << 1;
7721 /* offset[17:12] = insn[21:16]. */
7722 offset |= (insn & 0x003f0000) >> 4;
7723 /* offset[31:20] = insn[26]. */
7724 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7725 /* offset[18] = insn[13]. */
7726 offset |= (insn & (1 << 13)) << 5;
7727 /* offset[19] = insn[11]. */
7728 offset |= (insn & (1 << 11)) << 8;
7730 /* jump to the offset */
7731 gen_jmp(s, s->pc + offset);
7733 } else {
7734 /* Data processing immediate. */
7735 if (insn & (1 << 25)) {
7736 if (insn & (1 << 24)) {
7737 if (insn & (1 << 20))
7738 goto illegal_op;
7739 /* Bitfield/Saturate. */
7740 op = (insn >> 21) & 7;
7741 imm = insn & 0x1f;
7742 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7743 if (rn == 15) {
7744 tmp = new_tmp();
7745 tcg_gen_movi_i32(tmp, 0);
7746 } else {
7747 tmp = load_reg(s, rn);
7749 switch (op) {
7750 case 2: /* Signed bitfield extract. */
7751 imm++;
7752 if (shift + imm > 32)
7753 goto illegal_op;
7754 if (imm < 32)
7755 gen_sbfx(tmp, shift, imm);
7756 break;
7757 case 6: /* Unsigned bitfield extract. */
7758 imm++;
7759 if (shift + imm > 32)
7760 goto illegal_op;
7761 if (imm < 32)
7762 gen_ubfx(tmp, shift, (1u << imm) - 1);
7763 break;
7764 case 3: /* Bitfield insert/clear. */
7765 if (imm < shift)
7766 goto illegal_op;
7767 imm = imm + 1 - shift;
7768 if (imm != 32) {
7769 tmp2 = load_reg(s, rd);
7770 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7771 dead_tmp(tmp2);
7773 break;
7774 case 7:
7775 goto illegal_op;
7776 default: /* Saturate. */
7777 if (shift) {
7778 if (op & 1)
7779 tcg_gen_sari_i32(tmp, tmp, shift);
7780 else
7781 tcg_gen_shli_i32(tmp, tmp, shift);
7783 tmp2 = tcg_const_i32(imm);
7784 if (op & 4) {
7785 /* Unsigned. */
7786 if ((op & 1) && shift == 0)
7787 gen_helper_usat16(tmp, tmp, tmp2);
7788 else
7789 gen_helper_usat(tmp, tmp, tmp2);
7790 } else {
7791 /* Signed. */
7792 if ((op & 1) && shift == 0)
7793 gen_helper_ssat16(tmp, tmp, tmp2);
7794 else
7795 gen_helper_ssat(tmp, tmp, tmp2);
7797 break;
7799 store_reg(s, rd, tmp);
7800 } else {
7801 imm = ((insn & 0x04000000) >> 15)
7802 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7803 if (insn & (1 << 22)) {
7804 /* 16-bit immediate. */
7805 imm |= (insn >> 4) & 0xf000;
7806 if (insn & (1 << 23)) {
7807 /* movt */
7808 tmp = load_reg(s, rd);
7809 tcg_gen_ext16u_i32(tmp, tmp);
7810 tcg_gen_ori_i32(tmp, tmp, imm << 16);
7811 } else {
7812 /* movw */
7813 tmp = new_tmp();
7814 tcg_gen_movi_i32(tmp, imm);
7816 } else {
7817 /* Add/sub 12-bit immediate. */
7818 if (rn == 15) {
7819 offset = s->pc & ~(uint32_t)3;
7820 if (insn & (1 << 23))
7821 offset -= imm;
7822 else
7823 offset += imm;
7824 tmp = new_tmp();
7825 tcg_gen_movi_i32(tmp, offset);
7826 } else {
7827 tmp = load_reg(s, rn);
7828 if (insn & (1 << 23))
7829 tcg_gen_subi_i32(tmp, tmp, imm);
7830 else
7831 tcg_gen_addi_i32(tmp, tmp, imm);
7834 store_reg(s, rd, tmp);
7836 } else {
7837 int shifter_out = 0;
7838 /* modified 12-bit immediate. */
7839 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7840 imm = (insn & 0xff);
7841 switch (shift) {
7842 case 0: /* XY */
7843 /* Nothing to do. */
7844 break;
7845 case 1: /* 00XY00XY */
7846 imm |= imm << 16;
7847 break;
7848 case 2: /* XY00XY00 */
7849 imm |= imm << 16;
7850 imm <<= 8;
7851 break;
7852 case 3: /* XYXYXYXY */
7853 imm |= imm << 16;
7854 imm |= imm << 8;
7855 break;
7856 default: /* Rotated constant. */
7857 shift = (shift << 1) | (imm >> 7);
7858 imm |= 0x80;
7859 imm = imm << (32 - shift);
7860 shifter_out = 1;
7861 break;
7863 gen_op_movl_T1_im(imm);
7864 rn = (insn >> 16) & 0xf;
7865 if (rn == 15)
7866 gen_op_movl_T0_im(0);
7867 else
7868 gen_movl_T0_reg(s, rn);
7869 op = (insn >> 21) & 0xf;
7870 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7871 shifter_out))
7872 goto illegal_op;
7873 rd = (insn >> 8) & 0xf;
7874 if (rd != 15) {
7875 gen_movl_reg_T0(s, rd);
7879 break;
7880 case 12: /* Load/store single data item. */
7882 int postinc = 0;
7883 int writeback = 0;
7884 int user;
7885 if ((insn & 0x01100000) == 0x01000000) {
7886 if (disas_neon_ls_insn(env, s, insn))
7887 goto illegal_op;
7888 break;
7890 user = IS_USER(s);
7891 if (rn == 15) {
7892 addr = new_tmp();
7893 /* PC relative. */
7894 /* s->pc has already been incremented by 4. */
7895 imm = s->pc & 0xfffffffc;
7896 if (insn & (1 << 23))
7897 imm += insn & 0xfff;
7898 else
7899 imm -= insn & 0xfff;
7900 tcg_gen_movi_i32(addr, imm);
7901 } else {
7902 addr = load_reg(s, rn);
7903 if (insn & (1 << 23)) {
7904 /* Positive offset. */
7905 imm = insn & 0xfff;
7906 tcg_gen_addi_i32(addr, addr, imm);
7907 } else {
7908 op = (insn >> 8) & 7;
7909 imm = insn & 0xff;
7910 switch (op) {
7911 case 0: case 8: /* Shifted Register. */
7912 shift = (insn >> 4) & 0xf;
7913 if (shift > 3)
7914 goto illegal_op;
7915 tmp = load_reg(s, rm);
7916 if (shift)
7917 tcg_gen_shli_i32(tmp, tmp, shift);
7918 tcg_gen_add_i32(addr, addr, tmp);
7919 dead_tmp(tmp);
7920 break;
7921 case 4: /* Negative offset. */
7922 tcg_gen_addi_i32(addr, addr, -imm);
7923 break;
7924 case 6: /* User privilege. */
7925 tcg_gen_addi_i32(addr, addr, imm);
7926 user = 1;
7927 break;
7928 case 1: /* Post-decrement. */
7929 imm = -imm;
7930 /* Fall through. */
7931 case 3: /* Post-increment. */
7932 postinc = 1;
7933 writeback = 1;
7934 break;
7935 case 5: /* Pre-decrement. */
7936 imm = -imm;
7937 /* Fall through. */
7938 case 7: /* Pre-increment. */
7939 tcg_gen_addi_i32(addr, addr, imm);
7940 writeback = 1;
7941 break;
7942 default:
7943 goto illegal_op;
7947 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7948 if (insn & (1 << 20)) {
7949 /* Load. */
7950 if (rs == 15 && op != 2) {
7951 if (op & 2)
7952 goto illegal_op;
7953 /* Memory hint. Implemented as NOP. */
7954 } else {
7955 switch (op) {
7956 case 0: tmp = gen_ld8u(addr, user); break;
7957 case 4: tmp = gen_ld8s(addr, user); break;
7958 case 1: tmp = gen_ld16u(addr, user); break;
7959 case 5: tmp = gen_ld16s(addr, user); break;
7960 case 2: tmp = gen_ld32(addr, user); break;
7961 default: goto illegal_op;
7963 if (rs == 15) {
7964 gen_bx(s, tmp);
7965 } else {
7966 store_reg(s, rs, tmp);
7969 } else {
7970 /* Store. */
7971 if (rs == 15)
7972 goto illegal_op;
7973 tmp = load_reg(s, rs);
7974 switch (op) {
7975 case 0: gen_st8(tmp, addr, user); break;
7976 case 1: gen_st16(tmp, addr, user); break;
7977 case 2: gen_st32(tmp, addr, user); break;
7978 default: goto illegal_op;
7981 if (postinc)
7982 tcg_gen_addi_i32(addr, addr, imm);
7983 if (writeback) {
7984 store_reg(s, rn, addr);
7985 } else {
7986 dead_tmp(addr);
7989 break;
7990 default:
7991 goto illegal_op;
7993 return 0;
7994 illegal_op:
7995 return 1;
7998 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8000 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8001 int32_t offset;
8002 int i;
8003 TCGv tmp;
8004 TCGv tmp2;
8005 TCGv addr;
8007 if (s->condexec_mask) {
8008 cond = s->condexec_cond;
8009 s->condlabel = gen_new_label();
8010 gen_test_cc(cond ^ 1, s->condlabel);
8011 s->condjmp = 1;
8014 insn = lduw_code(s->pc);
8015 s->pc += 2;
8017 switch (insn >> 12) {
8018 case 0: case 1:
8019 rd = insn & 7;
8020 op = (insn >> 11) & 3;
8021 if (op == 3) {
8022 /* add/subtract */
8023 rn = (insn >> 3) & 7;
8024 gen_movl_T0_reg(s, rn);
8025 if (insn & (1 << 10)) {
8026 /* immediate */
8027 gen_op_movl_T1_im((insn >> 6) & 7);
8028 } else {
8029 /* reg */
8030 rm = (insn >> 6) & 7;
8031 gen_movl_T1_reg(s, rm);
8033 if (insn & (1 << 9)) {
8034 if (s->condexec_mask)
8035 gen_op_subl_T0_T1();
8036 else
8037 gen_op_subl_T0_T1_cc();
8038 } else {
8039 if (s->condexec_mask)
8040 gen_op_addl_T0_T1();
8041 else
8042 gen_op_addl_T0_T1_cc();
8044 gen_movl_reg_T0(s, rd);
8045 } else {
8046 /* shift immediate */
8047 rm = (insn >> 3) & 7;
8048 shift = (insn >> 6) & 0x1f;
8049 tmp = load_reg(s, rm);
8050 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8051 if (!s->condexec_mask)
8052 gen_logic_CC(tmp);
8053 store_reg(s, rd, tmp);
8055 break;
8056 case 2: case 3:
8057 /* arithmetic large immediate */
8058 op = (insn >> 11) & 3;
8059 rd = (insn >> 8) & 0x7;
8060 if (op == 0) {
8061 gen_op_movl_T0_im(insn & 0xff);
8062 } else {
8063 gen_movl_T0_reg(s, rd);
8064 gen_op_movl_T1_im(insn & 0xff);
8066 switch (op) {
8067 case 0: /* mov */
8068 if (!s->condexec_mask)
8069 gen_op_logic_T0_cc();
8070 break;
8071 case 1: /* cmp */
8072 gen_op_subl_T0_T1_cc();
8073 break;
8074 case 2: /* add */
8075 if (s->condexec_mask)
8076 gen_op_addl_T0_T1();
8077 else
8078 gen_op_addl_T0_T1_cc();
8079 break;
8080 case 3: /* sub */
8081 if (s->condexec_mask)
8082 gen_op_subl_T0_T1();
8083 else
8084 gen_op_subl_T0_T1_cc();
8085 break;
8087 if (op != 1)
8088 gen_movl_reg_T0(s, rd);
8089 break;
8090 case 4:
8091 if (insn & (1 << 11)) {
8092 rd = (insn >> 8) & 7;
8093 /* load pc-relative. Bit 1 of PC is ignored. */
8094 val = s->pc + 2 + ((insn & 0xff) * 4);
8095 val &= ~(uint32_t)2;
8096 addr = new_tmp();
8097 tcg_gen_movi_i32(addr, val);
8098 tmp = gen_ld32(addr, IS_USER(s));
8099 dead_tmp(addr);
8100 store_reg(s, rd, tmp);
8101 break;
8103 if (insn & (1 << 10)) {
8104 /* data processing extended or blx */
8105 rd = (insn & 7) | ((insn >> 4) & 8);
8106 rm = (insn >> 3) & 0xf;
8107 op = (insn >> 8) & 3;
8108 switch (op) {
8109 case 0: /* add */
8110 gen_movl_T0_reg(s, rd);
8111 gen_movl_T1_reg(s, rm);
8112 gen_op_addl_T0_T1();
8113 gen_movl_reg_T0(s, rd);
8114 break;
8115 case 1: /* cmp */
8116 gen_movl_T0_reg(s, rd);
8117 gen_movl_T1_reg(s, rm);
8118 gen_op_subl_T0_T1_cc();
8119 break;
8120 case 2: /* mov/cpy */
8121 gen_movl_T0_reg(s, rm);
8122 gen_movl_reg_T0(s, rd);
8123 break;
8124 case 3:/* branch [and link] exchange thumb register */
8125 tmp = load_reg(s, rm);
8126 if (insn & (1 << 7)) {
8127 val = (uint32_t)s->pc | 1;
8128 tmp2 = new_tmp();
8129 tcg_gen_movi_i32(tmp2, val);
8130 store_reg(s, 14, tmp2);
8132 gen_bx(s, tmp);
8133 break;
8135 break;
8138 /* data processing register */
8139 rd = insn & 7;
8140 rm = (insn >> 3) & 7;
8141 op = (insn >> 6) & 0xf;
8142 if (op == 2 || op == 3 || op == 4 || op == 7) {
8143 /* the shift/rotate ops want the operands backwards */
8144 val = rm;
8145 rm = rd;
8146 rd = val;
8147 val = 1;
8148 } else {
8149 val = 0;
8152 if (op == 9) /* neg */
8153 gen_op_movl_T0_im(0);
8154 else if (op != 0xf) /* mvn doesn't read its first operand */
8155 gen_movl_T0_reg(s, rd);
8157 gen_movl_T1_reg(s, rm);
8158 switch (op) {
8159 case 0x0: /* and */
8160 gen_op_andl_T0_T1();
8161 if (!s->condexec_mask)
8162 gen_op_logic_T0_cc();
8163 break;
8164 case 0x1: /* eor */
8165 gen_op_xorl_T0_T1();
8166 if (!s->condexec_mask)
8167 gen_op_logic_T0_cc();
8168 break;
8169 case 0x2: /* lsl */
8170 if (s->condexec_mask) {
8171 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
8172 } else {
8173 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8174 gen_op_logic_T1_cc();
8176 break;
8177 case 0x3: /* lsr */
8178 if (s->condexec_mask) {
8179 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
8180 } else {
8181 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8182 gen_op_logic_T1_cc();
8184 break;
8185 case 0x4: /* asr */
8186 if (s->condexec_mask) {
8187 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
8188 } else {
8189 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8190 gen_op_logic_T1_cc();
8192 break;
8193 case 0x5: /* adc */
8194 if (s->condexec_mask)
8195 gen_adc_T0_T1();
8196 else
8197 gen_op_adcl_T0_T1_cc();
8198 break;
8199 case 0x6: /* sbc */
8200 if (s->condexec_mask)
8201 gen_sbc_T0_T1();
8202 else
8203 gen_op_sbcl_T0_T1_cc();
8204 break;
8205 case 0x7: /* ror */
8206 if (s->condexec_mask) {
8207 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
8208 } else {
8209 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8210 gen_op_logic_T1_cc();
8212 break;
8213 case 0x8: /* tst */
8214 gen_op_andl_T0_T1();
8215 gen_op_logic_T0_cc();
8216 rd = 16;
8217 break;
8218 case 0x9: /* neg */
8219 if (s->condexec_mask)
8220 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
8221 else
8222 gen_op_subl_T0_T1_cc();
8223 break;
8224 case 0xa: /* cmp */
8225 gen_op_subl_T0_T1_cc();
8226 rd = 16;
8227 break;
8228 case 0xb: /* cmn */
8229 gen_op_addl_T0_T1_cc();
8230 rd = 16;
8231 break;
8232 case 0xc: /* orr */
8233 gen_op_orl_T0_T1();
8234 if (!s->condexec_mask)
8235 gen_op_logic_T0_cc();
8236 break;
8237 case 0xd: /* mul */
8238 gen_op_mull_T0_T1();
8239 if (!s->condexec_mask)
8240 gen_op_logic_T0_cc();
8241 break;
8242 case 0xe: /* bic */
8243 gen_op_bicl_T0_T1();
8244 if (!s->condexec_mask)
8245 gen_op_logic_T0_cc();
8246 break;
8247 case 0xf: /* mvn */
8248 gen_op_notl_T1();
8249 if (!s->condexec_mask)
8250 gen_op_logic_T1_cc();
8251 val = 1;
8252 rm = rd;
8253 break;
8255 if (rd != 16) {
8256 if (val)
8257 gen_movl_reg_T1(s, rm);
8258 else
8259 gen_movl_reg_T0(s, rd);
8261 break;
8263 case 5:
8264 /* load/store register offset. */
8265 rd = insn & 7;
8266 rn = (insn >> 3) & 7;
8267 rm = (insn >> 6) & 7;
8268 op = (insn >> 9) & 7;
8269 addr = load_reg(s, rn);
8270 tmp = load_reg(s, rm);
8271 tcg_gen_add_i32(addr, addr, tmp);
8272 dead_tmp(tmp);
8274 if (op < 3) /* store */
8275 tmp = load_reg(s, rd);
8277 switch (op) {
8278 case 0: /* str */
8279 gen_st32(tmp, addr, IS_USER(s));
8280 break;
8281 case 1: /* strh */
8282 gen_st16(tmp, addr, IS_USER(s));
8283 break;
8284 case 2: /* strb */
8285 gen_st8(tmp, addr, IS_USER(s));
8286 break;
8287 case 3: /* ldrsb */
8288 tmp = gen_ld8s(addr, IS_USER(s));
8289 break;
8290 case 4: /* ldr */
8291 tmp = gen_ld32(addr, IS_USER(s));
8292 break;
8293 case 5: /* ldrh */
8294 tmp = gen_ld16u(addr, IS_USER(s));
8295 break;
8296 case 6: /* ldrb */
8297 tmp = gen_ld8u(addr, IS_USER(s));
8298 break;
8299 case 7: /* ldrsh */
8300 tmp = gen_ld16s(addr, IS_USER(s));
8301 break;
8303 if (op >= 3) /* load */
8304 store_reg(s, rd, tmp);
8305 dead_tmp(addr);
8306 break;
8308 case 6:
8309 /* load/store word immediate offset */
8310 rd = insn & 7;
8311 rn = (insn >> 3) & 7;
8312 addr = load_reg(s, rn);
8313 val = (insn >> 4) & 0x7c;
8314 tcg_gen_addi_i32(addr, addr, val);
8316 if (insn & (1 << 11)) {
8317 /* load */
8318 tmp = gen_ld32(addr, IS_USER(s));
8319 store_reg(s, rd, tmp);
8320 } else {
8321 /* store */
8322 tmp = load_reg(s, rd);
8323 gen_st32(tmp, addr, IS_USER(s));
8325 dead_tmp(addr);
8326 break;
8328 case 7:
8329 /* load/store byte immediate offset */
8330 rd = insn & 7;
8331 rn = (insn >> 3) & 7;
8332 addr = load_reg(s, rn);
8333 val = (insn >> 6) & 0x1f;
8334 tcg_gen_addi_i32(addr, addr, val);
8336 if (insn & (1 << 11)) {
8337 /* load */
8338 tmp = gen_ld8u(addr, IS_USER(s));
8339 store_reg(s, rd, tmp);
8340 } else {
8341 /* store */
8342 tmp = load_reg(s, rd);
8343 gen_st8(tmp, addr, IS_USER(s));
8345 dead_tmp(addr);
8346 break;
8348 case 8:
8349 /* load/store halfword immediate offset */
8350 rd = insn & 7;
8351 rn = (insn >> 3) & 7;
8352 addr = load_reg(s, rn);
8353 val = (insn >> 5) & 0x3e;
8354 tcg_gen_addi_i32(addr, addr, val);
8356 if (insn & (1 << 11)) {
8357 /* load */
8358 tmp = gen_ld16u(addr, IS_USER(s));
8359 store_reg(s, rd, tmp);
8360 } else {
8361 /* store */
8362 tmp = load_reg(s, rd);
8363 gen_st16(tmp, addr, IS_USER(s));
8365 dead_tmp(addr);
8366 break;
8368 case 9:
8369 /* load/store from stack */
8370 rd = (insn >> 8) & 7;
8371 addr = load_reg(s, 13);
8372 val = (insn & 0xff) * 4;
8373 tcg_gen_addi_i32(addr, addr, val);
8375 if (insn & (1 << 11)) {
8376 /* load */
8377 tmp = gen_ld32(addr, IS_USER(s));
8378 store_reg(s, rd, tmp);
8379 } else {
8380 /* store */
8381 tmp = load_reg(s, rd);
8382 gen_st32(tmp, addr, IS_USER(s));
8384 dead_tmp(addr);
8385 break;
8387 case 10:
8388 /* add to high reg */
8389 rd = (insn >> 8) & 7;
8390 if (insn & (1 << 11)) {
8391 /* SP */
8392 tmp = load_reg(s, 13);
8393 } else {
8394 /* PC. bit 1 is ignored. */
8395 tmp = new_tmp();
8396 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8398 val = (insn & 0xff) * 4;
8399 tcg_gen_addi_i32(tmp, tmp, val);
8400 store_reg(s, rd, tmp);
8401 break;
8403 case 11:
8404 /* misc */
8405 op = (insn >> 8) & 0xf;
8406 switch (op) {
8407 case 0:
8408 /* adjust stack pointer */
8409 tmp = load_reg(s, 13);
8410 val = (insn & 0x7f) * 4;
8411 if (insn & (1 << 7))
8412 val = -(int32_t)val;
8413 tcg_gen_addi_i32(tmp, tmp, val);
8414 store_reg(s, 13, tmp);
8415 break;
8417 case 2: /* sign/zero extend. */
8418 ARCH(6);
8419 rd = insn & 7;
8420 rm = (insn >> 3) & 7;
8421 tmp = load_reg(s, rm);
8422 switch ((insn >> 6) & 3) {
8423 case 0: gen_sxth(tmp); break;
8424 case 1: gen_sxtb(tmp); break;
8425 case 2: gen_uxth(tmp); break;
8426 case 3: gen_uxtb(tmp); break;
8428 store_reg(s, rd, tmp);
8429 break;
8430 case 4: case 5: case 0xc: case 0xd:
8431 /* push/pop */
8432 addr = load_reg(s, 13);
8433 if (insn & (1 << 8))
8434 offset = 4;
8435 else
8436 offset = 0;
8437 for (i = 0; i < 8; i++) {
8438 if (insn & (1 << i))
8439 offset += 4;
8441 if ((insn & (1 << 11)) == 0) {
8442 tcg_gen_addi_i32(addr, addr, -offset);
8444 for (i = 0; i < 8; i++) {
8445 if (insn & (1 << i)) {
8446 if (insn & (1 << 11)) {
8447 /* pop */
8448 tmp = gen_ld32(addr, IS_USER(s));
8449 store_reg(s, i, tmp);
8450 } else {
8451 /* push */
8452 tmp = load_reg(s, i);
8453 gen_st32(tmp, addr, IS_USER(s));
8455 /* advance to the next address. */
8456 tcg_gen_addi_i32(addr, addr, 4);
8459 TCGV_UNUSED(tmp);
8460 if (insn & (1 << 8)) {
8461 if (insn & (1 << 11)) {
8462 /* pop pc */
8463 tmp = gen_ld32(addr, IS_USER(s));
8464 /* don't set the pc until the rest of the instruction
8465 has completed */
8466 } else {
8467 /* push lr */
8468 tmp = load_reg(s, 14);
8469 gen_st32(tmp, addr, IS_USER(s));
8471 tcg_gen_addi_i32(addr, addr, 4);
8473 if ((insn & (1 << 11)) == 0) {
8474 tcg_gen_addi_i32(addr, addr, -offset);
8476 /* write back the new stack pointer */
8477 store_reg(s, 13, addr);
8478 /* set the new PC value */
8479 if ((insn & 0x0900) == 0x0900)
8480 gen_bx(s, tmp);
8481 break;
8483 case 1: case 3: case 9: case 11: /* czb */
8484 rm = insn & 7;
8485 tmp = load_reg(s, rm);
8486 s->condlabel = gen_new_label();
8487 s->condjmp = 1;
8488 if (insn & (1 << 11))
8489 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8490 else
8491 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8492 dead_tmp(tmp);
8493 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8494 val = (uint32_t)s->pc + 2;
8495 val += offset;
8496 gen_jmp(s, val);
8497 break;
8499 case 15: /* IT, nop-hint. */
8500 if ((insn & 0xf) == 0) {
8501 gen_nop_hint(s, (insn >> 4) & 0xf);
8502 break;
8504 /* If Then. */
8505 s->condexec_cond = (insn >> 4) & 0xe;
8506 s->condexec_mask = insn & 0x1f;
8507 /* No actual code generated for this insn, just setup state. */
8508 break;
8510 case 0xe: /* bkpt */
8511 gen_set_condexec(s);
8512 gen_set_pc_im(s->pc - 2);
8513 gen_exception(EXCP_BKPT);
8514 s->is_jmp = DISAS_JUMP;
8515 break;
8517 case 0xa: /* rev */
8518 ARCH(6);
8519 rn = (insn >> 3) & 0x7;
8520 rd = insn & 0x7;
8521 tmp = load_reg(s, rn);
8522 switch ((insn >> 6) & 3) {
8523 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8524 case 1: gen_rev16(tmp); break;
8525 case 3: gen_revsh(tmp); break;
8526 default: goto illegal_op;
8528 store_reg(s, rd, tmp);
8529 break;
8531 case 6: /* cps */
8532 ARCH(6);
8533 if (IS_USER(s))
8534 break;
8535 if (IS_M(env)) {
8536 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8537 /* PRIMASK */
8538 if (insn & 1) {
8539 addr = tcg_const_i32(16);
8540 gen_helper_v7m_msr(cpu_env, addr, tmp);
8542 /* FAULTMASK */
8543 if (insn & 2) {
8544 addr = tcg_const_i32(17);
8545 gen_helper_v7m_msr(cpu_env, addr, tmp);
8547 gen_lookup_tb(s);
8548 } else {
8549 if (insn & (1 << 4))
8550 shift = CPSR_A | CPSR_I | CPSR_F;
8551 else
8552 shift = 0;
8554 val = ((insn & 7) << 6) & shift;
8555 gen_op_movl_T0_im(val);
8556 gen_set_psr_T0(s, shift, 0);
8558 break;
8560 default:
8561 goto undef;
8563 break;
8565 case 12:
8566 /* load/store multiple */
8567 rn = (insn >> 8) & 0x7;
8568 addr = load_reg(s, rn);
8569 for (i = 0; i < 8; i++) {
8570 if (insn & (1 << i)) {
8571 if (insn & (1 << 11)) {
8572 /* load */
8573 tmp = gen_ld32(addr, IS_USER(s));
8574 store_reg(s, i, tmp);
8575 } else {
8576 /* store */
8577 tmp = load_reg(s, i);
8578 gen_st32(tmp, addr, IS_USER(s));
8580 /* advance to the next address */
8581 tcg_gen_addi_i32(addr, addr, 4);
8584 /* Base register writeback. */
8585 if ((insn & (1 << rn)) == 0) {
8586 store_reg(s, rn, addr);
8587 } else {
8588 dead_tmp(addr);
8590 break;
8592 case 13:
8593 /* conditional branch or swi */
8594 cond = (insn >> 8) & 0xf;
8595 if (cond == 0xe)
8596 goto undef;
8598 if (cond == 0xf) {
8599 /* swi */
8600 gen_set_condexec(s);
8601 gen_set_pc_im(s->pc);
8602 s->is_jmp = DISAS_SWI;
8603 break;
8605 /* generate a conditional jump to next instruction */
8606 s->condlabel = gen_new_label();
8607 gen_test_cc(cond ^ 1, s->condlabel);
8608 s->condjmp = 1;
8609 gen_movl_T1_reg(s, 15);
8611 /* jump to the offset */
8612 val = (uint32_t)s->pc + 2;
8613 offset = ((int32_t)insn << 24) >> 24;
8614 val += offset << 1;
8615 gen_jmp(s, val);
8616 break;
8618 case 14:
8619 if (insn & (1 << 11)) {
8620 if (disas_thumb2_insn(env, s, insn))
8621 goto undef32;
8622 break;
8624 /* unconditional branch */
8625 val = (uint32_t)s->pc;
8626 offset = ((int32_t)insn << 21) >> 21;
8627 val += (offset << 1) + 2;
8628 gen_jmp(s, val);
8629 break;
8631 case 15:
8632 if (disas_thumb2_insn(env, s, insn))
8633 goto undef32;
8634 break;
8636 return;
8637 undef32:
8638 gen_set_condexec(s);
8639 gen_set_pc_im(s->pc - 4);
8640 gen_exception(EXCP_UDEF);
8641 s->is_jmp = DISAS_JUMP;
8642 return;
8643 illegal_op:
8644 undef:
8645 gen_set_condexec(s);
8646 gen_set_pc_im(s->pc - 2);
8647 gen_exception(EXCP_UDEF);
8648 s->is_jmp = DISAS_JUMP;
8651 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8652 basic block 'tb'. If search_pc is TRUE, also generate PC
8653 information for each intermediate instruction. */
8654 static inline void gen_intermediate_code_internal(CPUState *env,
8655 TranslationBlock *tb,
8656 int search_pc)
8658 DisasContext dc1, *dc = &dc1;
8659 CPUBreakpoint *bp;
8660 uint16_t *gen_opc_end;
8661 int j, lj;
8662 target_ulong pc_start;
8663 uint32_t next_page_start;
8664 int num_insns;
8665 int max_insns;
8667 /* generate intermediate code */
8668 num_temps = 0;
8669 memset(temps, 0, sizeof(temps));
8671 pc_start = tb->pc;
8673 dc->tb = tb;
8675 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
8677 dc->is_jmp = DISAS_NEXT;
8678 dc->pc = pc_start;
8679 dc->singlestep_enabled = env->singlestep_enabled;
8680 dc->condjmp = 0;
8681 dc->thumb = env->thumb;
8682 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8683 dc->condexec_cond = env->condexec_bits >> 4;
8684 #if !defined(CONFIG_USER_ONLY)
8685 if (IS_M(env)) {
8686 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8687 } else {
8688 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8690 #endif
8691 cpu_F0s = tcg_temp_new_i32();
8692 cpu_F1s = tcg_temp_new_i32();
8693 cpu_F0d = tcg_temp_new_i64();
8694 cpu_F1d = tcg_temp_new_i64();
8695 cpu_V0 = cpu_F0d;
8696 cpu_V1 = cpu_F1d;
8697 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8698 cpu_M0 = tcg_temp_new_i64();
8699 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
8700 lj = -1;
8701 num_insns = 0;
8702 max_insns = tb->cflags & CF_COUNT_MASK;
8703 if (max_insns == 0)
8704 max_insns = CF_COUNT_MASK;
8706 gen_icount_start();
8707 /* Reset the conditional execution bits immediately. This avoids
8708 complications trying to do it at the end of the block. */
8709 if (env->condexec_bits)
8711 TCGv tmp = new_tmp();
8712 tcg_gen_movi_i32(tmp, 0);
8713 store_cpu_field(tmp, condexec_bits);
8715 do {
8716 #ifdef CONFIG_USER_ONLY
8717 /* Intercept jump to the magic kernel page. */
8718 if (dc->pc >= 0xffff0000) {
8719 /* We always get here via a jump, so know we are not in a
8720 conditional execution block. */
8721 gen_exception(EXCP_KERNEL_TRAP);
8722 dc->is_jmp = DISAS_UPDATE;
8723 break;
8725 #else
8726 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8727 /* We always get here via a jump, so know we are not in a
8728 conditional execution block. */
8729 gen_exception(EXCP_EXCEPTION_EXIT);
8730 dc->is_jmp = DISAS_UPDATE;
8731 break;
8733 #endif
8735 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
8736 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
8737 if (bp->pc == dc->pc) {
8738 gen_set_condexec(dc);
8739 gen_set_pc_im(dc->pc);
8740 gen_exception(EXCP_DEBUG);
8741 dc->is_jmp = DISAS_JUMP;
8742 /* Advance PC so that clearing the breakpoint will
8743 invalidate this TB. */
8744 dc->pc += 2;
8745 goto done_generating;
8746 break;
8750 if (search_pc) {
8751 j = gen_opc_ptr - gen_opc_buf;
8752 if (lj < j) {
8753 lj++;
8754 while (lj < j)
8755 gen_opc_instr_start[lj++] = 0;
8757 gen_opc_pc[lj] = dc->pc;
8758 gen_opc_instr_start[lj] = 1;
8759 gen_opc_icount[lj] = num_insns;
8762 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8763 gen_io_start();
8765 if (env->thumb) {
8766 disas_thumb_insn(env, dc);
8767 if (dc->condexec_mask) {
8768 dc->condexec_cond = (dc->condexec_cond & 0xe)
8769 | ((dc->condexec_mask >> 4) & 1);
8770 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8771 if (dc->condexec_mask == 0) {
8772 dc->condexec_cond = 0;
8775 } else {
8776 disas_arm_insn(env, dc);
8778 if (num_temps) {
8779 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8780 num_temps = 0;
8783 if (dc->condjmp && !dc->is_jmp) {
8784 gen_set_label(dc->condlabel);
8785 dc->condjmp = 0;
8787 /* Translation stops when a conditional branch is encountered.
8788 * Otherwise the subsequent code could get translated several times.
8789 * Also stop translation when a page boundary is reached. This
8790 * ensures prefetch aborts occur at the right place. */
8791 num_insns ++;
8792 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8793 !env->singlestep_enabled &&
8794 dc->pc < next_page_start &&
8795 num_insns < max_insns);
8797 if (tb->cflags & CF_LAST_IO) {
8798 if (dc->condjmp) {
8799 /* FIXME: This can theoretically happen with self-modifying
8800 code. */
8801 cpu_abort(env, "IO on conditional branch instruction");
8803 gen_io_end();
8806 /* At this stage dc->condjmp will only be set when the skipped
8807 instruction was a conditional branch or trap, and the PC has
8808 already been written. */
8809 if (unlikely(env->singlestep_enabled)) {
8810 /* Make sure the pc is updated, and raise a debug exception. */
8811 if (dc->condjmp) {
8812 gen_set_condexec(dc);
8813 if (dc->is_jmp == DISAS_SWI) {
8814 gen_exception(EXCP_SWI);
8815 } else {
8816 gen_exception(EXCP_DEBUG);
8818 gen_set_label(dc->condlabel);
8820 if (dc->condjmp || !dc->is_jmp) {
8821 gen_set_pc_im(dc->pc);
8822 dc->condjmp = 0;
8824 gen_set_condexec(dc);
8825 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
8826 gen_exception(EXCP_SWI);
8827 } else {
8828 /* FIXME: Single stepping a WFI insn will not halt
8829 the CPU. */
8830 gen_exception(EXCP_DEBUG);
8832 } else {
8833 /* While branches must always occur at the end of an IT block,
8834 there are a few other things that can cause us to terminate
8835 the TB in the middel of an IT block:
8836 - Exception generating instructions (bkpt, swi, undefined).
8837 - Page boundaries.
8838 - Hardware watchpoints.
8839 Hardware breakpoints have already been handled and skip this code.
8841 gen_set_condexec(dc);
8842 switch(dc->is_jmp) {
8843 case DISAS_NEXT:
8844 gen_goto_tb(dc, 1, dc->pc);
8845 break;
8846 default:
8847 case DISAS_JUMP:
8848 case DISAS_UPDATE:
8849 /* indicate that the hash table must be used to find the next TB */
8850 tcg_gen_exit_tb(0);
8851 break;
8852 case DISAS_TB_JUMP:
8853 /* nothing more to generate */
8854 break;
8855 case DISAS_WFI:
8856 gen_helper_wfi();
8857 break;
8858 case DISAS_SWI:
8859 gen_exception(EXCP_SWI);
8860 break;
8862 if (dc->condjmp) {
8863 gen_set_label(dc->condlabel);
8864 gen_set_condexec(dc);
8865 gen_goto_tb(dc, 1, dc->pc);
8866 dc->condjmp = 0;
8870 done_generating:
8871 gen_icount_end(tb, num_insns);
8872 *gen_opc_ptr = INDEX_op_end;
8874 #ifdef DEBUG_DISAS
8875 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8876 qemu_log("----------------\n");
8877 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8878 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
8879 qemu_log("\n");
8881 #endif
8882 if (search_pc) {
8883 j = gen_opc_ptr - gen_opc_buf;
8884 lj++;
8885 while (lj <= j)
8886 gen_opc_instr_start[lj++] = 0;
8887 } else {
8888 tb->size = dc->pc - pc_start;
8889 tb->icount = num_insns;
8893 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8895 gen_intermediate_code_internal(env, tb, 0);
8898 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8900 gen_intermediate_code_internal(env, tb, 1);
8903 static const char *cpu_mode_names[16] = {
8904 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8905 "???", "???", "???", "und", "???", "???", "???", "sys"
8908 void cpu_dump_state(CPUState *env, FILE *f,
8909 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8910 int flags)
8912 int i;
8913 #if 0
8914 union {
8915 uint32_t i;
8916 float s;
8917 } s0, s1;
8918 CPU_DoubleU d;
8919 /* ??? This assumes float64 and double have the same layout.
8920 Oh well, it's only debug dumps. */
8921 union {
8922 float64 f64;
8923 double d;
8924 } d0;
8925 #endif
8926 uint32_t psr;
8928 for(i=0;i<16;i++) {
8929 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
8930 if ((i % 4) == 3)
8931 cpu_fprintf(f, "\n");
8932 else
8933 cpu_fprintf(f, " ");
8935 psr = cpsr_read(env);
8936 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8937 psr,
8938 psr & (1 << 31) ? 'N' : '-',
8939 psr & (1 << 30) ? 'Z' : '-',
8940 psr & (1 << 29) ? 'C' : '-',
8941 psr & (1 << 28) ? 'V' : '-',
8942 psr & CPSR_T ? 'T' : 'A',
8943 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
8945 #if 0
8946 for (i = 0; i < 16; i++) {
8947 d.d = env->vfp.regs[i];
8948 s0.i = d.l.lower;
8949 s1.i = d.l.upper;
8950 d0.f64 = d.d;
8951 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
8952 i * 2, (int)s0.i, s0.s,
8953 i * 2 + 1, (int)s1.i, s1.s,
8954 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
8955 d0.d);
8957 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
8958 #endif
8961 void gen_pc_load(CPUState *env, TranslationBlock *tb,
8962 unsigned long searched_pc, int pc_pos, void *puc)
8964 env->regs[15] = gen_opc_pc[pc_pos];