Allow pulseaudio backend to be the default
[qemu-kvm/fedora.git] / target-arm / translate.c
blob4db8d0e262768f4345b0dbbeabac2c747a3bc5c4
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
45 /* internal defines */
46 typedef struct DisasContext {
47 target_ulong pc;
48 int is_jmp;
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
58 int thumb;
59 #if !defined(CONFIG_USER_ONLY)
60 int user;
61 #endif
62 } DisasContext;
64 #if defined(CONFIG_USER_ONLY)
65 #define IS_USER(s) 1
66 #else
67 #define IS_USER(s) (s->user)
68 #endif
70 /* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72 #define DISAS_WFI 4
73 #define DISAS_SWI 5
75 static TCGv_ptr cpu_env;
76 /* We reuse the same 64-bit temporaries for efficiency. */
77 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
79 /* FIXME: These should be removed. */
80 static TCGv cpu_T[2];
81 static TCGv cpu_F0s, cpu_F1s;
82 static TCGv_i64 cpu_F0d, cpu_F1d;
84 #define ICOUNT_TEMP cpu_T[0]
85 #include "gen-icount.h"
87 /* initialize TCG globals. */
88 void arm_translate_init(void)
90 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
92 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
93 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
95 #define GEN_HELPER 2
96 #include "helpers.h"
99 /* The code generator doesn't like lots of temporaries, so maintain our own
100 cache for reuse within a function. */
101 #define MAX_TEMPS 8
102 static int num_temps;
103 static TCGv temps[MAX_TEMPS];
105 /* Allocate a temporary variable. */
106 static TCGv_i32 new_tmp(void)
108 TCGv tmp;
109 if (num_temps == MAX_TEMPS)
110 abort();
112 if (GET_TCGV_I32(temps[num_temps]))
113 return temps[num_temps++];
115 tmp = tcg_temp_new_i32();
116 temps[num_temps++] = tmp;
117 return tmp;
120 /* Release a temporary variable. */
121 static void dead_tmp(TCGv tmp)
123 int i;
124 num_temps--;
125 i = num_temps;
126 if (TCGV_EQUAL(temps[i], tmp))
127 return;
129 /* Shuffle this temp to the last slot. */
130 while (!TCGV_EQUAL(temps[i], tmp))
131 i--;
132 while (i < num_temps) {
133 temps[i] = temps[i + 1];
134 i++;
136 temps[i] = tmp;
139 static inline TCGv load_cpu_offset(int offset)
141 TCGv tmp = new_tmp();
142 tcg_gen_ld_i32(tmp, cpu_env, offset);
143 return tmp;
146 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
148 static inline void store_cpu_offset(TCGv var, int offset)
150 tcg_gen_st_i32(var, cpu_env, offset);
151 dead_tmp(var);
154 #define store_cpu_field(var, name) \
155 store_cpu_offset(var, offsetof(CPUState, name))
157 /* Set a variable to the value of a CPU register. */
158 static void load_reg_var(DisasContext *s, TCGv var, int reg)
160 if (reg == 15) {
161 uint32_t addr;
162 /* normaly, since we updated PC, we need only to add one insn */
163 if (s->thumb)
164 addr = (long)s->pc + 2;
165 else
166 addr = (long)s->pc + 4;
167 tcg_gen_movi_i32(var, addr);
168 } else {
169 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
173 /* Create a new temporary and set it to the value of a CPU register. */
174 static inline TCGv load_reg(DisasContext *s, int reg)
176 TCGv tmp = new_tmp();
177 load_reg_var(s, tmp, reg);
178 return tmp;
181 /* Set a CPU register. The source must be a temporary and will be
182 marked as dead. */
183 static void store_reg(DisasContext *s, int reg, TCGv var)
185 if (reg == 15) {
186 tcg_gen_andi_i32(var, var, ~1);
187 s->is_jmp = DISAS_JUMP;
189 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
190 dead_tmp(var);
194 /* Basic operations. */
195 #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
196 #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
197 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
199 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
200 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
201 #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202 #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
204 #define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
205 #define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206 #define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
207 #define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
208 #define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
210 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
211 #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
212 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
213 #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
214 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
215 #define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
216 #define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
218 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
219 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
221 /* Value extensions. */
222 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
223 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
224 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
225 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
227 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
228 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
230 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
232 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
233 /* Set NZCV flags from the high 4 bits of var. */
234 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
236 static void gen_exception(int excp)
238 TCGv tmp = new_tmp();
239 tcg_gen_movi_i32(tmp, excp);
240 gen_helper_exception(tmp);
241 dead_tmp(tmp);
244 static void gen_smul_dual(TCGv a, TCGv b)
246 TCGv tmp1 = new_tmp();
247 TCGv tmp2 = new_tmp();
248 tcg_gen_ext16s_i32(tmp1, a);
249 tcg_gen_ext16s_i32(tmp2, b);
250 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
251 dead_tmp(tmp2);
252 tcg_gen_sari_i32(a, a, 16);
253 tcg_gen_sari_i32(b, b, 16);
254 tcg_gen_mul_i32(b, b, a);
255 tcg_gen_mov_i32(a, tmp1);
256 dead_tmp(tmp1);
259 /* Byteswap each halfword. */
260 static void gen_rev16(TCGv var)
262 TCGv tmp = new_tmp();
263 tcg_gen_shri_i32(tmp, var, 8);
264 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
265 tcg_gen_shli_i32(var, var, 8);
266 tcg_gen_andi_i32(var, var, 0xff00ff00);
267 tcg_gen_or_i32(var, var, tmp);
268 dead_tmp(tmp);
271 /* Byteswap low halfword and sign extend. */
272 static void gen_revsh(TCGv var)
274 TCGv tmp = new_tmp();
275 tcg_gen_shri_i32(tmp, var, 8);
276 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
277 tcg_gen_shli_i32(var, var, 8);
278 tcg_gen_ext8s_i32(var, var);
279 tcg_gen_or_i32(var, var, tmp);
280 dead_tmp(tmp);
283 /* Unsigned bitfield extract. */
284 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
286 if (shift)
287 tcg_gen_shri_i32(var, var, shift);
288 tcg_gen_andi_i32(var, var, mask);
291 /* Signed bitfield extract. */
292 static void gen_sbfx(TCGv var, int shift, int width)
294 uint32_t signbit;
296 if (shift)
297 tcg_gen_sari_i32(var, var, shift);
298 if (shift + width < 32) {
299 signbit = 1u << (width - 1);
300 tcg_gen_andi_i32(var, var, (1u << width) - 1);
301 tcg_gen_xori_i32(var, var, signbit);
302 tcg_gen_subi_i32(var, var, signbit);
306 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
307 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
309 tcg_gen_andi_i32(val, val, mask);
310 tcg_gen_shli_i32(val, val, shift);
311 tcg_gen_andi_i32(base, base, ~(mask << shift));
312 tcg_gen_or_i32(dest, base, val);
315 /* Round the top 32 bits of a 64-bit value. */
316 static void gen_roundqd(TCGv a, TCGv b)
318 tcg_gen_shri_i32(a, a, 31);
319 tcg_gen_add_i32(a, a, b);
322 /* FIXME: Most targets have native widening multiplication.
323 It would be good to use that instead of a full wide multiply. */
324 /* 32x32->64 multiply. Marks inputs as dead. */
325 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
327 TCGv_i64 tmp1 = tcg_temp_new_i64();
328 TCGv_i64 tmp2 = tcg_temp_new_i64();
330 tcg_gen_extu_i32_i64(tmp1, a);
331 dead_tmp(a);
332 tcg_gen_extu_i32_i64(tmp2, b);
333 dead_tmp(b);
334 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
335 return tmp1;
338 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
340 TCGv_i64 tmp1 = tcg_temp_new_i64();
341 TCGv_i64 tmp2 = tcg_temp_new_i64();
343 tcg_gen_ext_i32_i64(tmp1, a);
344 dead_tmp(a);
345 tcg_gen_ext_i32_i64(tmp2, b);
346 dead_tmp(b);
347 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
348 return tmp1;
351 /* Unsigned 32x32->64 multiply. */
352 static void gen_op_mull_T0_T1(void)
354 TCGv_i64 tmp1 = tcg_temp_new_i64();
355 TCGv_i64 tmp2 = tcg_temp_new_i64();
357 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
358 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
359 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
360 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
361 tcg_gen_shri_i64(tmp1, tmp1, 32);
362 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
365 /* Signed 32x32->64 multiply. */
366 static void gen_imull(TCGv a, TCGv b)
368 TCGv_i64 tmp1 = tcg_temp_new_i64();
369 TCGv_i64 tmp2 = tcg_temp_new_i64();
371 tcg_gen_ext_i32_i64(tmp1, a);
372 tcg_gen_ext_i32_i64(tmp2, b);
373 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
374 tcg_gen_trunc_i64_i32(a, tmp1);
375 tcg_gen_shri_i64(tmp1, tmp1, 32);
376 tcg_gen_trunc_i64_i32(b, tmp1);
379 /* Swap low and high halfwords. */
380 static void gen_swap_half(TCGv var)
382 TCGv tmp = new_tmp();
383 tcg_gen_shri_i32(tmp, var, 16);
384 tcg_gen_shli_i32(var, var, 16);
385 tcg_gen_or_i32(var, var, tmp);
386 dead_tmp(tmp);
389 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
390 tmp = (t0 ^ t1) & 0x8000;
391 t0 &= ~0x8000;
392 t1 &= ~0x8000;
393 t0 = (t0 + t1) ^ tmp;
396 static void gen_add16(TCGv t0, TCGv t1)
398 TCGv tmp = new_tmp();
399 tcg_gen_xor_i32(tmp, t0, t1);
400 tcg_gen_andi_i32(tmp, tmp, 0x8000);
401 tcg_gen_andi_i32(t0, t0, ~0x8000);
402 tcg_gen_andi_i32(t1, t1, ~0x8000);
403 tcg_gen_add_i32(t0, t0, t1);
404 tcg_gen_xor_i32(t0, t0, tmp);
405 dead_tmp(tmp);
406 dead_tmp(t1);
409 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
411 /* Set CF to the top bit of var. */
412 static void gen_set_CF_bit31(TCGv var)
414 TCGv tmp = new_tmp();
415 tcg_gen_shri_i32(tmp, var, 31);
416 gen_set_CF(tmp);
417 dead_tmp(tmp);
420 /* Set N and Z flags from var. */
421 static inline void gen_logic_CC(TCGv var)
423 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
424 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
427 /* T0 += T1 + CF. */
428 static void gen_adc_T0_T1(void)
430 TCGv tmp;
431 gen_op_addl_T0_T1();
432 tmp = load_cpu_field(CF);
433 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
434 dead_tmp(tmp);
437 /* dest = T0 + T1 + CF. */
438 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
440 TCGv tmp;
441 tcg_gen_add_i32(dest, t0, t1);
442 tmp = load_cpu_field(CF);
443 tcg_gen_add_i32(dest, dest, tmp);
444 dead_tmp(tmp);
447 /* dest = T0 - T1 + CF - 1. */
448 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
450 TCGv tmp;
451 tcg_gen_sub_i32(dest, t0, t1);
452 tmp = load_cpu_field(CF);
453 tcg_gen_add_i32(dest, dest, tmp);
454 tcg_gen_subi_i32(dest, dest, 1);
455 dead_tmp(tmp);
458 #define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
459 #define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
461 /* T0 &= ~T1. Clobbers T1. */
462 /* FIXME: Implement bic natively. */
463 static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
465 TCGv tmp = new_tmp();
466 tcg_gen_not_i32(tmp, t1);
467 tcg_gen_and_i32(dest, t0, tmp);
468 dead_tmp(tmp);
470 static inline void gen_op_bicl_T0_T1(void)
472 gen_op_notl_T1();
473 gen_op_andl_T0_T1();
476 /* FIXME: Implement this natively. */
477 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
479 /* FIXME: Implement this natively. */
480 static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
482 TCGv tmp;
484 if (i == 0)
485 return;
487 tmp = new_tmp();
488 tcg_gen_shri_i32(tmp, t1, i);
489 tcg_gen_shli_i32(t1, t1, 32 - i);
490 tcg_gen_or_i32(t0, t1, tmp);
491 dead_tmp(tmp);
494 static void shifter_out_im(TCGv var, int shift)
496 TCGv tmp = new_tmp();
497 if (shift == 0) {
498 tcg_gen_andi_i32(tmp, var, 1);
499 } else {
500 tcg_gen_shri_i32(tmp, var, shift);
501 if (shift != 31)
502 tcg_gen_andi_i32(tmp, tmp, 1);
504 gen_set_CF(tmp);
505 dead_tmp(tmp);
508 /* Shift by immediate. Includes special handling for shift == 0. */
509 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
511 switch (shiftop) {
512 case 0: /* LSL */
513 if (shift != 0) {
514 if (flags)
515 shifter_out_im(var, 32 - shift);
516 tcg_gen_shli_i32(var, var, shift);
518 break;
519 case 1: /* LSR */
520 if (shift == 0) {
521 if (flags) {
522 tcg_gen_shri_i32(var, var, 31);
523 gen_set_CF(var);
525 tcg_gen_movi_i32(var, 0);
526 } else {
527 if (flags)
528 shifter_out_im(var, shift - 1);
529 tcg_gen_shri_i32(var, var, shift);
531 break;
532 case 2: /* ASR */
533 if (shift == 0)
534 shift = 32;
535 if (flags)
536 shifter_out_im(var, shift - 1);
537 if (shift == 32)
538 shift = 31;
539 tcg_gen_sari_i32(var, var, shift);
540 break;
541 case 3: /* ROR/RRX */
542 if (shift != 0) {
543 if (flags)
544 shifter_out_im(var, shift - 1);
545 tcg_gen_rori_i32(var, var, shift); break;
546 } else {
547 TCGv tmp = load_cpu_field(CF);
548 if (flags)
549 shifter_out_im(var, 0);
550 tcg_gen_shri_i32(var, var, 1);
551 tcg_gen_shli_i32(tmp, tmp, 31);
552 tcg_gen_or_i32(var, var, tmp);
553 dead_tmp(tmp);
558 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
559 TCGv shift, int flags)
561 if (flags) {
562 switch (shiftop) {
563 case 0: gen_helper_shl_cc(var, var, shift); break;
564 case 1: gen_helper_shr_cc(var, var, shift); break;
565 case 2: gen_helper_sar_cc(var, var, shift); break;
566 case 3: gen_helper_ror_cc(var, var, shift); break;
568 } else {
569 switch (shiftop) {
570 case 0: gen_helper_shl(var, var, shift); break;
571 case 1: gen_helper_shr(var, var, shift); break;
572 case 2: gen_helper_sar(var, var, shift); break;
573 case 3: gen_helper_ror(var, var, shift); break;
576 dead_tmp(shift);
579 #define PAS_OP(pfx) \
580 switch (op2) { \
581 case 0: gen_pas_helper(glue(pfx,add16)); break; \
582 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
583 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
584 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
585 case 4: gen_pas_helper(glue(pfx,add8)); break; \
586 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
588 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
590 TCGv_ptr tmp;
592 switch (op1) {
593 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
594 case 1:
595 tmp = tcg_temp_new_ptr();
596 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
597 PAS_OP(s)
598 break;
599 case 5:
600 tmp = tcg_temp_new_ptr();
601 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
602 PAS_OP(u)
603 break;
604 #undef gen_pas_helper
605 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
606 case 2:
607 PAS_OP(q);
608 break;
609 case 3:
610 PAS_OP(sh);
611 break;
612 case 6:
613 PAS_OP(uq);
614 break;
615 case 7:
616 PAS_OP(uh);
617 break;
618 #undef gen_pas_helper
621 #undef PAS_OP
623 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
624 #define PAS_OP(pfx) \
625 switch (op2) { \
626 case 0: gen_pas_helper(glue(pfx,add8)); break; \
627 case 1: gen_pas_helper(glue(pfx,add16)); break; \
628 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
629 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
630 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
631 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
633 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
635 TCGv_ptr tmp;
637 switch (op1) {
638 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
639 case 0:
640 tmp = tcg_temp_new_ptr();
641 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
642 PAS_OP(s)
643 break;
644 case 4:
645 tmp = tcg_temp_new_ptr();
646 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
647 PAS_OP(u)
648 break;
649 #undef gen_pas_helper
650 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
651 case 1:
652 PAS_OP(q);
653 break;
654 case 2:
655 PAS_OP(sh);
656 break;
657 case 5:
658 PAS_OP(uq);
659 break;
660 case 6:
661 PAS_OP(uh);
662 break;
663 #undef gen_pas_helper
666 #undef PAS_OP
668 static void gen_test_cc(int cc, int label)
670 TCGv tmp;
671 TCGv tmp2;
672 int inv;
674 switch (cc) {
675 case 0: /* eq: Z */
676 tmp = load_cpu_field(ZF);
677 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
678 break;
679 case 1: /* ne: !Z */
680 tmp = load_cpu_field(ZF);
681 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
682 break;
683 case 2: /* cs: C */
684 tmp = load_cpu_field(CF);
685 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
686 break;
687 case 3: /* cc: !C */
688 tmp = load_cpu_field(CF);
689 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
690 break;
691 case 4: /* mi: N */
692 tmp = load_cpu_field(NF);
693 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
694 break;
695 case 5: /* pl: !N */
696 tmp = load_cpu_field(NF);
697 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
698 break;
699 case 6: /* vs: V */
700 tmp = load_cpu_field(VF);
701 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
702 break;
703 case 7: /* vc: !V */
704 tmp = load_cpu_field(VF);
705 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
706 break;
707 case 8: /* hi: C && !Z */
708 inv = gen_new_label();
709 tmp = load_cpu_field(CF);
710 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
711 dead_tmp(tmp);
712 tmp = load_cpu_field(ZF);
713 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
714 gen_set_label(inv);
715 break;
716 case 9: /* ls: !C || Z */
717 tmp = load_cpu_field(CF);
718 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
719 dead_tmp(tmp);
720 tmp = load_cpu_field(ZF);
721 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
722 break;
723 case 10: /* ge: N == V -> N ^ V == 0 */
724 tmp = load_cpu_field(VF);
725 tmp2 = load_cpu_field(NF);
726 tcg_gen_xor_i32(tmp, tmp, tmp2);
727 dead_tmp(tmp2);
728 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
729 break;
730 case 11: /* lt: N != V -> N ^ V != 0 */
731 tmp = load_cpu_field(VF);
732 tmp2 = load_cpu_field(NF);
733 tcg_gen_xor_i32(tmp, tmp, tmp2);
734 dead_tmp(tmp2);
735 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
736 break;
737 case 12: /* gt: !Z && N == V */
738 inv = gen_new_label();
739 tmp = load_cpu_field(ZF);
740 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
741 dead_tmp(tmp);
742 tmp = load_cpu_field(VF);
743 tmp2 = load_cpu_field(NF);
744 tcg_gen_xor_i32(tmp, tmp, tmp2);
745 dead_tmp(tmp2);
746 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
747 gen_set_label(inv);
748 break;
749 case 13: /* le: Z || N != V */
750 tmp = load_cpu_field(ZF);
751 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
752 dead_tmp(tmp);
753 tmp = load_cpu_field(VF);
754 tmp2 = load_cpu_field(NF);
755 tcg_gen_xor_i32(tmp, tmp, tmp2);
756 dead_tmp(tmp2);
757 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
758 break;
759 default:
760 fprintf(stderr, "Bad condition code 0x%x\n", cc);
761 abort();
763 dead_tmp(tmp);
766 static const uint8_t table_logic_cc[16] = {
767 1, /* and */
768 1, /* xor */
769 0, /* sub */
770 0, /* rsb */
771 0, /* add */
772 0, /* adc */
773 0, /* sbc */
774 0, /* rsc */
775 1, /* andl */
776 1, /* xorl */
777 0, /* cmp */
778 0, /* cmn */
779 1, /* orr */
780 1, /* mov */
781 1, /* bic */
782 1, /* mvn */
785 /* Set PC and Thumb state from an immediate address. */
786 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
788 TCGv tmp;
790 s->is_jmp = DISAS_UPDATE;
791 tmp = new_tmp();
792 if (s->thumb != (addr & 1)) {
793 tcg_gen_movi_i32(tmp, addr & 1);
794 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
796 tcg_gen_movi_i32(tmp, addr & ~1);
797 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
798 dead_tmp(tmp);
801 /* Set PC and Thumb state from var. var is marked as dead. */
802 static inline void gen_bx(DisasContext *s, TCGv var)
804 TCGv tmp;
806 s->is_jmp = DISAS_UPDATE;
807 tmp = new_tmp();
808 tcg_gen_andi_i32(tmp, var, 1);
809 store_cpu_field(tmp, thumb);
810 tcg_gen_andi_i32(var, var, ~1);
811 store_cpu_field(var, regs[15]);
814 /* Variant of store_reg which uses branch&exchange logic when storing
815 to r15 in ARM architecture v7 and above. The source must be a temporary
816 and will be marked as dead. */
817 static inline void store_reg_bx(CPUState *env, DisasContext *s,
818 int reg, TCGv var)
820 if (reg == 15 && ENABLE_ARCH_7) {
821 gen_bx(s, var);
822 } else {
823 store_reg(s, reg, var);
827 static inline TCGv gen_ld8s(TCGv addr, int index)
829 TCGv tmp = new_tmp();
830 tcg_gen_qemu_ld8s(tmp, addr, index);
831 return tmp;
833 static inline TCGv gen_ld8u(TCGv addr, int index)
835 TCGv tmp = new_tmp();
836 tcg_gen_qemu_ld8u(tmp, addr, index);
837 return tmp;
839 static inline TCGv gen_ld16s(TCGv addr, int index)
841 TCGv tmp = new_tmp();
842 tcg_gen_qemu_ld16s(tmp, addr, index);
843 return tmp;
845 static inline TCGv gen_ld16u(TCGv addr, int index)
847 TCGv tmp = new_tmp();
848 tcg_gen_qemu_ld16u(tmp, addr, index);
849 return tmp;
851 static inline TCGv gen_ld32(TCGv addr, int index)
853 TCGv tmp = new_tmp();
854 tcg_gen_qemu_ld32u(tmp, addr, index);
855 return tmp;
857 static inline void gen_st8(TCGv val, TCGv addr, int index)
859 tcg_gen_qemu_st8(val, addr, index);
860 dead_tmp(val);
862 static inline void gen_st16(TCGv val, TCGv addr, int index)
864 tcg_gen_qemu_st16(val, addr, index);
865 dead_tmp(val);
867 static inline void gen_st32(TCGv val, TCGv addr, int index)
869 tcg_gen_qemu_st32(val, addr, index);
870 dead_tmp(val);
873 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
875 load_reg_var(s, cpu_T[0], reg);
878 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
880 load_reg_var(s, cpu_T[1], reg);
883 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
885 load_reg_var(s, cpu_T[2], reg);
888 static inline void gen_set_pc_im(uint32_t val)
890 TCGv tmp = new_tmp();
891 tcg_gen_movi_i32(tmp, val);
892 store_cpu_field(tmp, regs[15]);
895 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
897 TCGv tmp;
898 if (reg == 15) {
899 tmp = new_tmp();
900 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
901 } else {
902 tmp = cpu_T[t];
904 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
905 if (reg == 15) {
906 dead_tmp(tmp);
907 s->is_jmp = DISAS_JUMP;
911 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
913 gen_movl_reg_TN(s, reg, 0);
916 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
918 gen_movl_reg_TN(s, reg, 1);
921 /* Force a TB lookup after an instruction that changes the CPU state. */
922 static inline void gen_lookup_tb(DisasContext *s)
924 gen_op_movl_T0_im(s->pc);
925 gen_movl_reg_T0(s, 15);
926 s->is_jmp = DISAS_UPDATE;
929 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
930 TCGv var)
932 int val, rm, shift, shiftop;
933 TCGv offset;
935 if (!(insn & (1 << 25))) {
936 /* immediate */
937 val = insn & 0xfff;
938 if (!(insn & (1 << 23)))
939 val = -val;
940 if (val != 0)
941 tcg_gen_addi_i32(var, var, val);
942 } else {
943 /* shift/register */
944 rm = (insn) & 0xf;
945 shift = (insn >> 7) & 0x1f;
946 shiftop = (insn >> 5) & 3;
947 offset = load_reg(s, rm);
948 gen_arm_shift_im(offset, shiftop, shift, 0);
949 if (!(insn & (1 << 23)))
950 tcg_gen_sub_i32(var, var, offset);
951 else
952 tcg_gen_add_i32(var, var, offset);
953 dead_tmp(offset);
957 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
958 int extra, TCGv var)
960 int val, rm;
961 TCGv offset;
963 if (insn & (1 << 22)) {
964 /* immediate */
965 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
966 if (!(insn & (1 << 23)))
967 val = -val;
968 val += extra;
969 if (val != 0)
970 tcg_gen_addi_i32(var, var, val);
971 } else {
972 /* register */
973 if (extra)
974 tcg_gen_addi_i32(var, var, extra);
975 rm = (insn) & 0xf;
976 offset = load_reg(s, rm);
977 if (!(insn & (1 << 23)))
978 tcg_gen_sub_i32(var, var, offset);
979 else
980 tcg_gen_add_i32(var, var, offset);
981 dead_tmp(offset);
985 #define VFP_OP2(name) \
986 static inline void gen_vfp_##name(int dp) \
988 if (dp) \
989 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
990 else \
991 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
994 VFP_OP2(add)
995 VFP_OP2(sub)
996 VFP_OP2(mul)
997 VFP_OP2(div)
999 #undef VFP_OP2
1001 static inline void gen_vfp_abs(int dp)
1003 if (dp)
1004 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1005 else
1006 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1009 static inline void gen_vfp_neg(int dp)
1011 if (dp)
1012 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1013 else
1014 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1017 static inline void gen_vfp_sqrt(int dp)
1019 if (dp)
1020 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1021 else
1022 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1025 static inline void gen_vfp_cmp(int dp)
1027 if (dp)
1028 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1029 else
1030 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1033 static inline void gen_vfp_cmpe(int dp)
1035 if (dp)
1036 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1037 else
1038 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1041 static inline void gen_vfp_F1_ld0(int dp)
1043 if (dp)
1044 tcg_gen_movi_i64(cpu_F1d, 0);
1045 else
1046 tcg_gen_movi_i32(cpu_F1s, 0);
1049 static inline void gen_vfp_uito(int dp)
1051 if (dp)
1052 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1053 else
1054 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1057 static inline void gen_vfp_sito(int dp)
1059 if (dp)
1060 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
1061 else
1062 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
1065 static inline void gen_vfp_toui(int dp)
1067 if (dp)
1068 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1069 else
1070 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1073 static inline void gen_vfp_touiz(int dp)
1075 if (dp)
1076 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1077 else
1078 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1081 static inline void gen_vfp_tosi(int dp)
1083 if (dp)
1084 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1085 else
1086 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1089 static inline void gen_vfp_tosiz(int dp)
1091 if (dp)
1092 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1093 else
1094 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1097 #define VFP_GEN_FIX(name) \
1098 static inline void gen_vfp_##name(int dp, int shift) \
1100 if (dp) \
1101 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1102 else \
1103 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
1105 VFP_GEN_FIX(tosh)
1106 VFP_GEN_FIX(tosl)
1107 VFP_GEN_FIX(touh)
1108 VFP_GEN_FIX(toul)
1109 VFP_GEN_FIX(shto)
1110 VFP_GEN_FIX(slto)
1111 VFP_GEN_FIX(uhto)
1112 VFP_GEN_FIX(ulto)
1113 #undef VFP_GEN_FIX
1115 static inline void gen_vfp_ld(DisasContext *s, int dp)
1117 if (dp)
1118 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
1119 else
1120 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
1123 static inline void gen_vfp_st(DisasContext *s, int dp)
1125 if (dp)
1126 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
1127 else
1128 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
1131 static inline long
1132 vfp_reg_offset (int dp, int reg)
1134 if (dp)
1135 return offsetof(CPUARMState, vfp.regs[reg]);
1136 else if (reg & 1) {
1137 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1138 + offsetof(CPU_DoubleU, l.upper);
1139 } else {
1140 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1141 + offsetof(CPU_DoubleU, l.lower);
1145 /* Return the offset of a 32-bit piece of a NEON register.
1146 zero is the least significant end of the register. */
1147 static inline long
1148 neon_reg_offset (int reg, int n)
1150 int sreg;
1151 sreg = reg * 2 + n;
1152 return vfp_reg_offset(0, sreg);
1155 /* FIXME: Remove these. */
1156 #define neon_T0 cpu_T[0]
1157 #define neon_T1 cpu_T[1]
1158 #define NEON_GET_REG(T, reg, n) \
1159 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1160 #define NEON_SET_REG(T, reg, n) \
1161 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1163 static TCGv neon_load_reg(int reg, int pass)
1165 TCGv tmp = new_tmp();
1166 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1167 return tmp;
1170 static void neon_store_reg(int reg, int pass, TCGv var)
1172 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1173 dead_tmp(var);
1176 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1178 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1181 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1183 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1186 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1187 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1188 #define tcg_gen_st_f32 tcg_gen_st_i32
1189 #define tcg_gen_st_f64 tcg_gen_st_i64
1191 static inline void gen_mov_F0_vreg(int dp, int reg)
1193 if (dp)
1194 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1195 else
1196 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1199 static inline void gen_mov_F1_vreg(int dp, int reg)
1201 if (dp)
1202 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1203 else
1204 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1207 static inline void gen_mov_vreg_F0(int dp, int reg)
1209 if (dp)
1210 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1211 else
1212 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1215 #define ARM_CP_RW_BIT (1 << 20)
1217 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1219 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1222 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1224 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1227 static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1229 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1232 static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1234 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1237 static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1239 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1242 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1244 iwmmxt_store_reg(cpu_M0, rn);
1247 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1249 iwmmxt_load_reg(cpu_M0, rn);
1252 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1254 iwmmxt_load_reg(cpu_V1, rn);
1255 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1258 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1260 iwmmxt_load_reg(cpu_V1, rn);
1261 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1264 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1266 iwmmxt_load_reg(cpu_V1, rn);
1267 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1270 #define IWMMXT_OP(name) \
1271 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1273 iwmmxt_load_reg(cpu_V1, rn); \
1274 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1277 #define IWMMXT_OP_ENV(name) \
1278 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1280 iwmmxt_load_reg(cpu_V1, rn); \
1281 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1284 #define IWMMXT_OP_ENV_SIZE(name) \
1285 IWMMXT_OP_ENV(name##b) \
1286 IWMMXT_OP_ENV(name##w) \
1287 IWMMXT_OP_ENV(name##l)
1289 #define IWMMXT_OP_ENV1(name) \
1290 static inline void gen_op_iwmmxt_##name##_M0(void) \
1292 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1295 IWMMXT_OP(maddsq)
1296 IWMMXT_OP(madduq)
1297 IWMMXT_OP(sadb)
1298 IWMMXT_OP(sadw)
1299 IWMMXT_OP(mulslw)
1300 IWMMXT_OP(mulshw)
1301 IWMMXT_OP(mululw)
1302 IWMMXT_OP(muluhw)
1303 IWMMXT_OP(macsw)
1304 IWMMXT_OP(macuw)
1306 IWMMXT_OP_ENV_SIZE(unpackl)
1307 IWMMXT_OP_ENV_SIZE(unpackh)
1309 IWMMXT_OP_ENV1(unpacklub)
1310 IWMMXT_OP_ENV1(unpackluw)
1311 IWMMXT_OP_ENV1(unpacklul)
1312 IWMMXT_OP_ENV1(unpackhub)
1313 IWMMXT_OP_ENV1(unpackhuw)
1314 IWMMXT_OP_ENV1(unpackhul)
1315 IWMMXT_OP_ENV1(unpacklsb)
1316 IWMMXT_OP_ENV1(unpacklsw)
1317 IWMMXT_OP_ENV1(unpacklsl)
1318 IWMMXT_OP_ENV1(unpackhsb)
1319 IWMMXT_OP_ENV1(unpackhsw)
1320 IWMMXT_OP_ENV1(unpackhsl)
1322 IWMMXT_OP_ENV_SIZE(cmpeq)
1323 IWMMXT_OP_ENV_SIZE(cmpgtu)
1324 IWMMXT_OP_ENV_SIZE(cmpgts)
1326 IWMMXT_OP_ENV_SIZE(mins)
1327 IWMMXT_OP_ENV_SIZE(minu)
1328 IWMMXT_OP_ENV_SIZE(maxs)
1329 IWMMXT_OP_ENV_SIZE(maxu)
1331 IWMMXT_OP_ENV_SIZE(subn)
1332 IWMMXT_OP_ENV_SIZE(addn)
1333 IWMMXT_OP_ENV_SIZE(subu)
1334 IWMMXT_OP_ENV_SIZE(addu)
1335 IWMMXT_OP_ENV_SIZE(subs)
1336 IWMMXT_OP_ENV_SIZE(adds)
1338 IWMMXT_OP_ENV(avgb0)
1339 IWMMXT_OP_ENV(avgb1)
1340 IWMMXT_OP_ENV(avgw0)
1341 IWMMXT_OP_ENV(avgw1)
1343 IWMMXT_OP(msadb)
1345 IWMMXT_OP_ENV(packuw)
1346 IWMMXT_OP_ENV(packul)
1347 IWMMXT_OP_ENV(packuq)
1348 IWMMXT_OP_ENV(packsw)
1349 IWMMXT_OP_ENV(packsl)
1350 IWMMXT_OP_ENV(packsq)
1352 static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1354 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1357 static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1359 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1362 static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1364 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1367 static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1369 iwmmxt_load_reg(cpu_V1, rn);
1370 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1373 static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1375 TCGv tmp = tcg_const_i32(shift);
1376 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1379 static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1381 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1382 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1383 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1386 static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1388 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1389 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1390 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1393 static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1395 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1396 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1397 if (mask != ~0u)
1398 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1401 static void gen_op_iwmmxt_set_mup(void)
1403 TCGv tmp;
1404 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1405 tcg_gen_ori_i32(tmp, tmp, 2);
1406 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1409 static void gen_op_iwmmxt_set_cup(void)
1411 TCGv tmp;
1412 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1413 tcg_gen_ori_i32(tmp, tmp, 1);
1414 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1417 static void gen_op_iwmmxt_setpsr_nz(void)
1419 TCGv tmp = new_tmp();
1420 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1421 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1424 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1426 iwmmxt_load_reg(cpu_V1, rn);
1427 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1428 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1432 static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1434 iwmmxt_load_reg(cpu_V0, rn);
1435 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1436 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1437 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1440 static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1442 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
1443 iwmmxt_store_reg(cpu_V0, rn);
1446 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1448 int rd;
1449 uint32_t offset;
1451 rd = (insn >> 16) & 0xf;
1452 gen_movl_T1_reg(s, rd);
1454 offset = (insn & 0xff) << ((insn >> 7) & 2);
1455 if (insn & (1 << 24)) {
1456 /* Pre indexed */
1457 if (insn & (1 << 23))
1458 gen_op_addl_T1_im(offset);
1459 else
1460 gen_op_addl_T1_im(-offset);
1462 if (insn & (1 << 21))
1463 gen_movl_reg_T1(s, rd);
1464 } else if (insn & (1 << 21)) {
1465 /* Post indexed */
1466 if (insn & (1 << 23))
1467 gen_op_movl_T0_im(offset);
1468 else
1469 gen_op_movl_T0_im(- offset);
1470 gen_op_addl_T0_T1();
1471 gen_movl_reg_T0(s, rd);
1472 } else if (!(insn & (1 << 23)))
1473 return 1;
1474 return 0;
1477 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1479 int rd = (insn >> 0) & 0xf;
1481 if (insn & (1 << 8))
1482 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1483 return 1;
1484 else
1485 gen_op_iwmmxt_movl_T0_wCx(rd);
1486 else
1487 gen_iwmmxt_movl_T0_T1_wRn(rd);
1489 gen_op_movl_T1_im(mask);
1490 gen_op_andl_T0_T1();
1491 return 0;
1494 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1495 (ie. an undefined instruction). */
1496 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1498 int rd, wrd;
1499 int rdhi, rdlo, rd0, rd1, i;
1500 TCGv tmp;
1502 if ((insn & 0x0e000e00) == 0x0c000000) {
1503 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1504 wrd = insn & 0xf;
1505 rdlo = (insn >> 12) & 0xf;
1506 rdhi = (insn >> 16) & 0xf;
1507 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1508 gen_iwmmxt_movl_T0_T1_wRn(wrd);
1509 gen_movl_reg_T0(s, rdlo);
1510 gen_movl_reg_T1(s, rdhi);
1511 } else { /* TMCRR */
1512 gen_movl_T0_reg(s, rdlo);
1513 gen_movl_T1_reg(s, rdhi);
1514 gen_iwmmxt_movl_wRn_T0_T1(wrd);
1515 gen_op_iwmmxt_set_mup();
1517 return 0;
1520 wrd = (insn >> 12) & 0xf;
1521 if (gen_iwmmxt_address(s, insn))
1522 return 1;
1523 if (insn & ARM_CP_RW_BIT) {
1524 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1525 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1526 tcg_gen_mov_i32(cpu_T[0], tmp);
1527 dead_tmp(tmp);
1528 gen_op_iwmmxt_movl_wCx_T0(wrd);
1529 } else {
1530 i = 1;
1531 if (insn & (1 << 8)) {
1532 if (insn & (1 << 22)) { /* WLDRD */
1533 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1534 i = 0;
1535 } else { /* WLDRW wRd */
1536 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1538 } else {
1539 if (insn & (1 << 22)) { /* WLDRH */
1540 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1541 } else { /* WLDRB */
1542 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1545 if (i) {
1546 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1547 dead_tmp(tmp);
1549 gen_op_iwmmxt_movq_wRn_M0(wrd);
1551 } else {
1552 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1553 gen_op_iwmmxt_movl_T0_wCx(wrd);
1554 tmp = new_tmp();
1555 tcg_gen_mov_i32(tmp, cpu_T[0]);
1556 gen_st32(tmp, cpu_T[1], IS_USER(s));
1557 } else {
1558 gen_op_iwmmxt_movq_M0_wRn(wrd);
1559 tmp = new_tmp();
1560 if (insn & (1 << 8)) {
1561 if (insn & (1 << 22)) { /* WSTRD */
1562 dead_tmp(tmp);
1563 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1564 } else { /* WSTRW wRd */
1565 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1566 gen_st32(tmp, cpu_T[1], IS_USER(s));
1568 } else {
1569 if (insn & (1 << 22)) { /* WSTRH */
1570 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1571 gen_st16(tmp, cpu_T[1], IS_USER(s));
1572 } else { /* WSTRB */
1573 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1574 gen_st8(tmp, cpu_T[1], IS_USER(s));
1579 return 0;
1582 if ((insn & 0x0f000000) != 0x0e000000)
1583 return 1;
1585 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1586 case 0x000: /* WOR */
1587 wrd = (insn >> 12) & 0xf;
1588 rd0 = (insn >> 0) & 0xf;
1589 rd1 = (insn >> 16) & 0xf;
1590 gen_op_iwmmxt_movq_M0_wRn(rd0);
1591 gen_op_iwmmxt_orq_M0_wRn(rd1);
1592 gen_op_iwmmxt_setpsr_nz();
1593 gen_op_iwmmxt_movq_wRn_M0(wrd);
1594 gen_op_iwmmxt_set_mup();
1595 gen_op_iwmmxt_set_cup();
1596 break;
1597 case 0x011: /* TMCR */
1598 if (insn & 0xf)
1599 return 1;
1600 rd = (insn >> 12) & 0xf;
1601 wrd = (insn >> 16) & 0xf;
1602 switch (wrd) {
1603 case ARM_IWMMXT_wCID:
1604 case ARM_IWMMXT_wCASF:
1605 break;
1606 case ARM_IWMMXT_wCon:
1607 gen_op_iwmmxt_set_cup();
1608 /* Fall through. */
1609 case ARM_IWMMXT_wCSSF:
1610 gen_op_iwmmxt_movl_T0_wCx(wrd);
1611 gen_movl_T1_reg(s, rd);
1612 gen_op_bicl_T0_T1();
1613 gen_op_iwmmxt_movl_wCx_T0(wrd);
1614 break;
1615 case ARM_IWMMXT_wCGR0:
1616 case ARM_IWMMXT_wCGR1:
1617 case ARM_IWMMXT_wCGR2:
1618 case ARM_IWMMXT_wCGR3:
1619 gen_op_iwmmxt_set_cup();
1620 gen_movl_reg_T0(s, rd);
1621 gen_op_iwmmxt_movl_wCx_T0(wrd);
1622 break;
1623 default:
1624 return 1;
1626 break;
1627 case 0x100: /* WXOR */
1628 wrd = (insn >> 12) & 0xf;
1629 rd0 = (insn >> 0) & 0xf;
1630 rd1 = (insn >> 16) & 0xf;
1631 gen_op_iwmmxt_movq_M0_wRn(rd0);
1632 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1633 gen_op_iwmmxt_setpsr_nz();
1634 gen_op_iwmmxt_movq_wRn_M0(wrd);
1635 gen_op_iwmmxt_set_mup();
1636 gen_op_iwmmxt_set_cup();
1637 break;
1638 case 0x111: /* TMRC */
1639 if (insn & 0xf)
1640 return 1;
1641 rd = (insn >> 12) & 0xf;
1642 wrd = (insn >> 16) & 0xf;
1643 gen_op_iwmmxt_movl_T0_wCx(wrd);
1644 gen_movl_reg_T0(s, rd);
1645 break;
1646 case 0x300: /* WANDN */
1647 wrd = (insn >> 12) & 0xf;
1648 rd0 = (insn >> 0) & 0xf;
1649 rd1 = (insn >> 16) & 0xf;
1650 gen_op_iwmmxt_movq_M0_wRn(rd0);
1651 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1652 gen_op_iwmmxt_andq_M0_wRn(rd1);
1653 gen_op_iwmmxt_setpsr_nz();
1654 gen_op_iwmmxt_movq_wRn_M0(wrd);
1655 gen_op_iwmmxt_set_mup();
1656 gen_op_iwmmxt_set_cup();
1657 break;
1658 case 0x200: /* WAND */
1659 wrd = (insn >> 12) & 0xf;
1660 rd0 = (insn >> 0) & 0xf;
1661 rd1 = (insn >> 16) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0);
1663 gen_op_iwmmxt_andq_M0_wRn(rd1);
1664 gen_op_iwmmxt_setpsr_nz();
1665 gen_op_iwmmxt_movq_wRn_M0(wrd);
1666 gen_op_iwmmxt_set_mup();
1667 gen_op_iwmmxt_set_cup();
1668 break;
1669 case 0x810: case 0xa10: /* WMADD */
1670 wrd = (insn >> 12) & 0xf;
1671 rd0 = (insn >> 0) & 0xf;
1672 rd1 = (insn >> 16) & 0xf;
1673 gen_op_iwmmxt_movq_M0_wRn(rd0);
1674 if (insn & (1 << 21))
1675 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1676 else
1677 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1678 gen_op_iwmmxt_movq_wRn_M0(wrd);
1679 gen_op_iwmmxt_set_mup();
1680 break;
1681 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1682 wrd = (insn >> 12) & 0xf;
1683 rd0 = (insn >> 16) & 0xf;
1684 rd1 = (insn >> 0) & 0xf;
1685 gen_op_iwmmxt_movq_M0_wRn(rd0);
1686 switch ((insn >> 22) & 3) {
1687 case 0:
1688 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1689 break;
1690 case 1:
1691 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1692 break;
1693 case 2:
1694 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1695 break;
1696 case 3:
1697 return 1;
1699 gen_op_iwmmxt_movq_wRn_M0(wrd);
1700 gen_op_iwmmxt_set_mup();
1701 gen_op_iwmmxt_set_cup();
1702 break;
1703 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1704 wrd = (insn >> 12) & 0xf;
1705 rd0 = (insn >> 16) & 0xf;
1706 rd1 = (insn >> 0) & 0xf;
1707 gen_op_iwmmxt_movq_M0_wRn(rd0);
1708 switch ((insn >> 22) & 3) {
1709 case 0:
1710 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1711 break;
1712 case 1:
1713 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1714 break;
1715 case 2:
1716 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1717 break;
1718 case 3:
1719 return 1;
1721 gen_op_iwmmxt_movq_wRn_M0(wrd);
1722 gen_op_iwmmxt_set_mup();
1723 gen_op_iwmmxt_set_cup();
1724 break;
1725 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1726 wrd = (insn >> 12) & 0xf;
1727 rd0 = (insn >> 16) & 0xf;
1728 rd1 = (insn >> 0) & 0xf;
1729 gen_op_iwmmxt_movq_M0_wRn(rd0);
1730 if (insn & (1 << 22))
1731 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1732 else
1733 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1734 if (!(insn & (1 << 20)))
1735 gen_op_iwmmxt_addl_M0_wRn(wrd);
1736 gen_op_iwmmxt_movq_wRn_M0(wrd);
1737 gen_op_iwmmxt_set_mup();
1738 break;
1739 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1740 wrd = (insn >> 12) & 0xf;
1741 rd0 = (insn >> 16) & 0xf;
1742 rd1 = (insn >> 0) & 0xf;
1743 gen_op_iwmmxt_movq_M0_wRn(rd0);
1744 if (insn & (1 << 21)) {
1745 if (insn & (1 << 20))
1746 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1747 else
1748 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1749 } else {
1750 if (insn & (1 << 20))
1751 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1752 else
1753 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1755 gen_op_iwmmxt_movq_wRn_M0(wrd);
1756 gen_op_iwmmxt_set_mup();
1757 break;
1758 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1759 wrd = (insn >> 12) & 0xf;
1760 rd0 = (insn >> 16) & 0xf;
1761 rd1 = (insn >> 0) & 0xf;
1762 gen_op_iwmmxt_movq_M0_wRn(rd0);
1763 if (insn & (1 << 21))
1764 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1765 else
1766 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1767 if (!(insn & (1 << 20))) {
1768 iwmmxt_load_reg(cpu_V1, wrd);
1769 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1771 gen_op_iwmmxt_movq_wRn_M0(wrd);
1772 gen_op_iwmmxt_set_mup();
1773 break;
1774 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1775 wrd = (insn >> 12) & 0xf;
1776 rd0 = (insn >> 16) & 0xf;
1777 rd1 = (insn >> 0) & 0xf;
1778 gen_op_iwmmxt_movq_M0_wRn(rd0);
1779 switch ((insn >> 22) & 3) {
1780 case 0:
1781 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1782 break;
1783 case 1:
1784 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1785 break;
1786 case 2:
1787 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1788 break;
1789 case 3:
1790 return 1;
1792 gen_op_iwmmxt_movq_wRn_M0(wrd);
1793 gen_op_iwmmxt_set_mup();
1794 gen_op_iwmmxt_set_cup();
1795 break;
1796 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1797 wrd = (insn >> 12) & 0xf;
1798 rd0 = (insn >> 16) & 0xf;
1799 rd1 = (insn >> 0) & 0xf;
1800 gen_op_iwmmxt_movq_M0_wRn(rd0);
1801 if (insn & (1 << 22)) {
1802 if (insn & (1 << 20))
1803 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1804 else
1805 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1806 } else {
1807 if (insn & (1 << 20))
1808 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1809 else
1810 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1812 gen_op_iwmmxt_movq_wRn_M0(wrd);
1813 gen_op_iwmmxt_set_mup();
1814 gen_op_iwmmxt_set_cup();
1815 break;
1816 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1817 wrd = (insn >> 12) & 0xf;
1818 rd0 = (insn >> 16) & 0xf;
1819 rd1 = (insn >> 0) & 0xf;
1820 gen_op_iwmmxt_movq_M0_wRn(rd0);
1821 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1822 gen_op_movl_T1_im(7);
1823 gen_op_andl_T0_T1();
1824 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1825 gen_op_iwmmxt_movq_wRn_M0(wrd);
1826 gen_op_iwmmxt_set_mup();
1827 break;
1828 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1829 rd = (insn >> 12) & 0xf;
1830 wrd = (insn >> 16) & 0xf;
1831 gen_movl_T0_reg(s, rd);
1832 gen_op_iwmmxt_movq_M0_wRn(wrd);
1833 switch ((insn >> 6) & 3) {
1834 case 0:
1835 gen_op_movl_T1_im(0xff);
1836 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1837 break;
1838 case 1:
1839 gen_op_movl_T1_im(0xffff);
1840 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1841 break;
1842 case 2:
1843 gen_op_movl_T1_im(0xffffffff);
1844 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1845 break;
1846 case 3:
1847 return 1;
1849 gen_op_iwmmxt_movq_wRn_M0(wrd);
1850 gen_op_iwmmxt_set_mup();
1851 break;
1852 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1853 rd = (insn >> 12) & 0xf;
1854 wrd = (insn >> 16) & 0xf;
1855 if (rd == 15)
1856 return 1;
1857 gen_op_iwmmxt_movq_M0_wRn(wrd);
1858 switch ((insn >> 22) & 3) {
1859 case 0:
1860 if (insn & 8)
1861 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1862 else {
1863 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
1865 break;
1866 case 1:
1867 if (insn & 8)
1868 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1869 else {
1870 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
1872 break;
1873 case 2:
1874 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
1875 break;
1876 case 3:
1877 return 1;
1879 gen_movl_reg_T0(s, rd);
1880 break;
1881 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1882 if ((insn & 0x000ff008) != 0x0003f000)
1883 return 1;
1884 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1885 switch ((insn >> 22) & 3) {
1886 case 0:
1887 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1888 break;
1889 case 1:
1890 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1891 break;
1892 case 2:
1893 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1894 break;
1895 case 3:
1896 return 1;
1898 gen_op_shll_T1_im(28);
1899 gen_set_nzcv(cpu_T[1]);
1900 break;
1901 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1902 rd = (insn >> 12) & 0xf;
1903 wrd = (insn >> 16) & 0xf;
1904 gen_movl_T0_reg(s, rd);
1905 switch ((insn >> 6) & 3) {
1906 case 0:
1907 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
1908 break;
1909 case 1:
1910 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
1911 break;
1912 case 2:
1913 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
1914 break;
1915 case 3:
1916 return 1;
1918 gen_op_iwmmxt_movq_wRn_M0(wrd);
1919 gen_op_iwmmxt_set_mup();
1920 break;
1921 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1922 if ((insn & 0x000ff00f) != 0x0003f000)
1923 return 1;
1924 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1925 switch ((insn >> 22) & 3) {
1926 case 0:
1927 for (i = 0; i < 7; i ++) {
1928 gen_op_shll_T1_im(4);
1929 gen_op_andl_T0_T1();
1931 break;
1932 case 1:
1933 for (i = 0; i < 3; i ++) {
1934 gen_op_shll_T1_im(8);
1935 gen_op_andl_T0_T1();
1937 break;
1938 case 2:
1939 gen_op_shll_T1_im(16);
1940 gen_op_andl_T0_T1();
1941 break;
1942 case 3:
1943 return 1;
1945 gen_set_nzcv(cpu_T[0]);
1946 break;
1947 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1948 wrd = (insn >> 12) & 0xf;
1949 rd0 = (insn >> 16) & 0xf;
1950 gen_op_iwmmxt_movq_M0_wRn(rd0);
1951 switch ((insn >> 22) & 3) {
1952 case 0:
1953 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1954 break;
1955 case 1:
1956 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1957 break;
1958 case 2:
1959 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1960 break;
1961 case 3:
1962 return 1;
1964 gen_op_iwmmxt_movq_wRn_M0(wrd);
1965 gen_op_iwmmxt_set_mup();
1966 break;
1967 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1968 if ((insn & 0x000ff00f) != 0x0003f000)
1969 return 1;
1970 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1971 switch ((insn >> 22) & 3) {
1972 case 0:
1973 for (i = 0; i < 7; i ++) {
1974 gen_op_shll_T1_im(4);
1975 gen_op_orl_T0_T1();
1977 break;
1978 case 1:
1979 for (i = 0; i < 3; i ++) {
1980 gen_op_shll_T1_im(8);
1981 gen_op_orl_T0_T1();
1983 break;
1984 case 2:
1985 gen_op_shll_T1_im(16);
1986 gen_op_orl_T0_T1();
1987 break;
1988 case 3:
1989 return 1;
1991 gen_set_nzcv(cpu_T[0]);
1992 break;
1993 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1994 rd = (insn >> 12) & 0xf;
1995 rd0 = (insn >> 16) & 0xf;
1996 if ((insn & 0xf) != 0)
1997 return 1;
1998 gen_op_iwmmxt_movq_M0_wRn(rd0);
1999 switch ((insn >> 22) & 3) {
2000 case 0:
2001 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
2002 break;
2003 case 1:
2004 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
2005 break;
2006 case 2:
2007 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
2008 break;
2009 case 3:
2010 return 1;
2012 gen_movl_reg_T0(s, rd);
2013 break;
2014 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2015 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2016 wrd = (insn >> 12) & 0xf;
2017 rd0 = (insn >> 16) & 0xf;
2018 rd1 = (insn >> 0) & 0xf;
2019 gen_op_iwmmxt_movq_M0_wRn(rd0);
2020 switch ((insn >> 22) & 3) {
2021 case 0:
2022 if (insn & (1 << 21))
2023 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2024 else
2025 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2026 break;
2027 case 1:
2028 if (insn & (1 << 21))
2029 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2030 else
2031 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2032 break;
2033 case 2:
2034 if (insn & (1 << 21))
2035 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2036 else
2037 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2038 break;
2039 case 3:
2040 return 1;
2042 gen_op_iwmmxt_movq_wRn_M0(wrd);
2043 gen_op_iwmmxt_set_mup();
2044 gen_op_iwmmxt_set_cup();
2045 break;
2046 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2047 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2048 wrd = (insn >> 12) & 0xf;
2049 rd0 = (insn >> 16) & 0xf;
2050 gen_op_iwmmxt_movq_M0_wRn(rd0);
2051 switch ((insn >> 22) & 3) {
2052 case 0:
2053 if (insn & (1 << 21))
2054 gen_op_iwmmxt_unpacklsb_M0();
2055 else
2056 gen_op_iwmmxt_unpacklub_M0();
2057 break;
2058 case 1:
2059 if (insn & (1 << 21))
2060 gen_op_iwmmxt_unpacklsw_M0();
2061 else
2062 gen_op_iwmmxt_unpackluw_M0();
2063 break;
2064 case 2:
2065 if (insn & (1 << 21))
2066 gen_op_iwmmxt_unpacklsl_M0();
2067 else
2068 gen_op_iwmmxt_unpacklul_M0();
2069 break;
2070 case 3:
2071 return 1;
2073 gen_op_iwmmxt_movq_wRn_M0(wrd);
2074 gen_op_iwmmxt_set_mup();
2075 gen_op_iwmmxt_set_cup();
2076 break;
2077 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2078 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2079 wrd = (insn >> 12) & 0xf;
2080 rd0 = (insn >> 16) & 0xf;
2081 gen_op_iwmmxt_movq_M0_wRn(rd0);
2082 switch ((insn >> 22) & 3) {
2083 case 0:
2084 if (insn & (1 << 21))
2085 gen_op_iwmmxt_unpackhsb_M0();
2086 else
2087 gen_op_iwmmxt_unpackhub_M0();
2088 break;
2089 case 1:
2090 if (insn & (1 << 21))
2091 gen_op_iwmmxt_unpackhsw_M0();
2092 else
2093 gen_op_iwmmxt_unpackhuw_M0();
2094 break;
2095 case 2:
2096 if (insn & (1 << 21))
2097 gen_op_iwmmxt_unpackhsl_M0();
2098 else
2099 gen_op_iwmmxt_unpackhul_M0();
2100 break;
2101 case 3:
2102 return 1;
2104 gen_op_iwmmxt_movq_wRn_M0(wrd);
2105 gen_op_iwmmxt_set_mup();
2106 gen_op_iwmmxt_set_cup();
2107 break;
2108 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2109 case 0x214: case 0x614: case 0xa14: case 0xe14:
2110 wrd = (insn >> 12) & 0xf;
2111 rd0 = (insn >> 16) & 0xf;
2112 gen_op_iwmmxt_movq_M0_wRn(rd0);
2113 if (gen_iwmmxt_shift(insn, 0xff))
2114 return 1;
2115 switch ((insn >> 22) & 3) {
2116 case 0:
2117 return 1;
2118 case 1:
2119 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2120 break;
2121 case 2:
2122 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2123 break;
2124 case 3:
2125 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2126 break;
2128 gen_op_iwmmxt_movq_wRn_M0(wrd);
2129 gen_op_iwmmxt_set_mup();
2130 gen_op_iwmmxt_set_cup();
2131 break;
2132 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2133 case 0x014: case 0x414: case 0x814: case 0xc14:
2134 wrd = (insn >> 12) & 0xf;
2135 rd0 = (insn >> 16) & 0xf;
2136 gen_op_iwmmxt_movq_M0_wRn(rd0);
2137 if (gen_iwmmxt_shift(insn, 0xff))
2138 return 1;
2139 switch ((insn >> 22) & 3) {
2140 case 0:
2141 return 1;
2142 case 1:
2143 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2144 break;
2145 case 2:
2146 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2147 break;
2148 case 3:
2149 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2150 break;
2152 gen_op_iwmmxt_movq_wRn_M0(wrd);
2153 gen_op_iwmmxt_set_mup();
2154 gen_op_iwmmxt_set_cup();
2155 break;
2156 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2157 case 0x114: case 0x514: case 0x914: case 0xd14:
2158 wrd = (insn >> 12) & 0xf;
2159 rd0 = (insn >> 16) & 0xf;
2160 gen_op_iwmmxt_movq_M0_wRn(rd0);
2161 if (gen_iwmmxt_shift(insn, 0xff))
2162 return 1;
2163 switch ((insn >> 22) & 3) {
2164 case 0:
2165 return 1;
2166 case 1:
2167 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2168 break;
2169 case 2:
2170 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2171 break;
2172 case 3:
2173 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2174 break;
2176 gen_op_iwmmxt_movq_wRn_M0(wrd);
2177 gen_op_iwmmxt_set_mup();
2178 gen_op_iwmmxt_set_cup();
2179 break;
2180 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2181 case 0x314: case 0x714: case 0xb14: case 0xf14:
2182 wrd = (insn >> 12) & 0xf;
2183 rd0 = (insn >> 16) & 0xf;
2184 gen_op_iwmmxt_movq_M0_wRn(rd0);
2185 switch ((insn >> 22) & 3) {
2186 case 0:
2187 return 1;
2188 case 1:
2189 if (gen_iwmmxt_shift(insn, 0xf))
2190 return 1;
2191 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2192 break;
2193 case 2:
2194 if (gen_iwmmxt_shift(insn, 0x1f))
2195 return 1;
2196 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2197 break;
2198 case 3:
2199 if (gen_iwmmxt_shift(insn, 0x3f))
2200 return 1;
2201 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2202 break;
2204 gen_op_iwmmxt_movq_wRn_M0(wrd);
2205 gen_op_iwmmxt_set_mup();
2206 gen_op_iwmmxt_set_cup();
2207 break;
2208 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2209 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2210 wrd = (insn >> 12) & 0xf;
2211 rd0 = (insn >> 16) & 0xf;
2212 rd1 = (insn >> 0) & 0xf;
2213 gen_op_iwmmxt_movq_M0_wRn(rd0);
2214 switch ((insn >> 22) & 3) {
2215 case 0:
2216 if (insn & (1 << 21))
2217 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2218 else
2219 gen_op_iwmmxt_minub_M0_wRn(rd1);
2220 break;
2221 case 1:
2222 if (insn & (1 << 21))
2223 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2224 else
2225 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2226 break;
2227 case 2:
2228 if (insn & (1 << 21))
2229 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2230 else
2231 gen_op_iwmmxt_minul_M0_wRn(rd1);
2232 break;
2233 case 3:
2234 return 1;
2236 gen_op_iwmmxt_movq_wRn_M0(wrd);
2237 gen_op_iwmmxt_set_mup();
2238 break;
2239 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2240 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2241 wrd = (insn >> 12) & 0xf;
2242 rd0 = (insn >> 16) & 0xf;
2243 rd1 = (insn >> 0) & 0xf;
2244 gen_op_iwmmxt_movq_M0_wRn(rd0);
2245 switch ((insn >> 22) & 3) {
2246 case 0:
2247 if (insn & (1 << 21))
2248 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2249 else
2250 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2251 break;
2252 case 1:
2253 if (insn & (1 << 21))
2254 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2255 else
2256 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2257 break;
2258 case 2:
2259 if (insn & (1 << 21))
2260 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2261 else
2262 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2263 break;
2264 case 3:
2265 return 1;
2267 gen_op_iwmmxt_movq_wRn_M0(wrd);
2268 gen_op_iwmmxt_set_mup();
2269 break;
2270 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2271 case 0x402: case 0x502: case 0x602: case 0x702:
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
2276 gen_op_movl_T0_im((insn >> 20) & 3);
2277 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2278 gen_op_iwmmxt_movq_wRn_M0(wrd);
2279 gen_op_iwmmxt_set_mup();
2280 break;
2281 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2282 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2283 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2284 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2285 wrd = (insn >> 12) & 0xf;
2286 rd0 = (insn >> 16) & 0xf;
2287 rd1 = (insn >> 0) & 0xf;
2288 gen_op_iwmmxt_movq_M0_wRn(rd0);
2289 switch ((insn >> 20) & 0xf) {
2290 case 0x0:
2291 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2292 break;
2293 case 0x1:
2294 gen_op_iwmmxt_subub_M0_wRn(rd1);
2295 break;
2296 case 0x3:
2297 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2298 break;
2299 case 0x4:
2300 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2301 break;
2302 case 0x5:
2303 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2304 break;
2305 case 0x7:
2306 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2307 break;
2308 case 0x8:
2309 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2310 break;
2311 case 0x9:
2312 gen_op_iwmmxt_subul_M0_wRn(rd1);
2313 break;
2314 case 0xb:
2315 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2316 break;
2317 default:
2318 return 1;
2320 gen_op_iwmmxt_movq_wRn_M0(wrd);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2323 break;
2324 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2325 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2326 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2327 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2328 wrd = (insn >> 12) & 0xf;
2329 rd0 = (insn >> 16) & 0xf;
2330 gen_op_iwmmxt_movq_M0_wRn(rd0);
2331 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
2332 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2333 gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 gen_op_iwmmxt_set_mup();
2335 gen_op_iwmmxt_set_cup();
2336 break;
2337 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2338 case 0x418: case 0x518: case 0x618: case 0x718:
2339 case 0x818: case 0x918: case 0xa18: case 0xb18:
2340 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2341 wrd = (insn >> 12) & 0xf;
2342 rd0 = (insn >> 16) & 0xf;
2343 rd1 = (insn >> 0) & 0xf;
2344 gen_op_iwmmxt_movq_M0_wRn(rd0);
2345 switch ((insn >> 20) & 0xf) {
2346 case 0x0:
2347 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2348 break;
2349 case 0x1:
2350 gen_op_iwmmxt_addub_M0_wRn(rd1);
2351 break;
2352 case 0x3:
2353 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2354 break;
2355 case 0x4:
2356 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2357 break;
2358 case 0x5:
2359 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2360 break;
2361 case 0x7:
2362 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2363 break;
2364 case 0x8:
2365 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2366 break;
2367 case 0x9:
2368 gen_op_iwmmxt_addul_M0_wRn(rd1);
2369 break;
2370 case 0xb:
2371 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2372 break;
2373 default:
2374 return 1;
2376 gen_op_iwmmxt_movq_wRn_M0(wrd);
2377 gen_op_iwmmxt_set_mup();
2378 gen_op_iwmmxt_set_cup();
2379 break;
2380 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2381 case 0x408: case 0x508: case 0x608: case 0x708:
2382 case 0x808: case 0x908: case 0xa08: case 0xb08:
2383 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2384 wrd = (insn >> 12) & 0xf;
2385 rd0 = (insn >> 16) & 0xf;
2386 rd1 = (insn >> 0) & 0xf;
2387 gen_op_iwmmxt_movq_M0_wRn(rd0);
2388 if (!(insn & (1 << 20)))
2389 return 1;
2390 switch ((insn >> 22) & 3) {
2391 case 0:
2392 return 1;
2393 case 1:
2394 if (insn & (1 << 21))
2395 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2396 else
2397 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2398 break;
2399 case 2:
2400 if (insn & (1 << 21))
2401 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2402 else
2403 gen_op_iwmmxt_packul_M0_wRn(rd1);
2404 break;
2405 case 3:
2406 if (insn & (1 << 21))
2407 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2408 else
2409 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2410 break;
2412 gen_op_iwmmxt_movq_wRn_M0(wrd);
2413 gen_op_iwmmxt_set_mup();
2414 gen_op_iwmmxt_set_cup();
2415 break;
2416 case 0x201: case 0x203: case 0x205: case 0x207:
2417 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2418 case 0x211: case 0x213: case 0x215: case 0x217:
2419 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2420 wrd = (insn >> 5) & 0xf;
2421 rd0 = (insn >> 12) & 0xf;
2422 rd1 = (insn >> 0) & 0xf;
2423 if (rd0 == 0xf || rd1 == 0xf)
2424 return 1;
2425 gen_op_iwmmxt_movq_M0_wRn(wrd);
2426 switch ((insn >> 16) & 0xf) {
2427 case 0x0: /* TMIA */
2428 gen_movl_T0_reg(s, rd0);
2429 gen_movl_T1_reg(s, rd1);
2430 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2431 break;
2432 case 0x8: /* TMIAPH */
2433 gen_movl_T0_reg(s, rd0);
2434 gen_movl_T1_reg(s, rd1);
2435 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2436 break;
2437 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2438 gen_movl_T1_reg(s, rd0);
2439 if (insn & (1 << 16))
2440 gen_op_shrl_T1_im(16);
2441 gen_op_movl_T0_T1();
2442 gen_movl_T1_reg(s, rd1);
2443 if (insn & (1 << 17))
2444 gen_op_shrl_T1_im(16);
2445 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2446 break;
2447 default:
2448 return 1;
2450 gen_op_iwmmxt_movq_wRn_M0(wrd);
2451 gen_op_iwmmxt_set_mup();
2452 break;
2453 default:
2454 return 1;
2457 return 0;
2460 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2461 (ie. an undefined instruction). */
2462 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2464 int acc, rd0, rd1, rdhi, rdlo;
2466 if ((insn & 0x0ff00f10) == 0x0e200010) {
2467 /* Multiply with Internal Accumulate Format */
2468 rd0 = (insn >> 12) & 0xf;
2469 rd1 = insn & 0xf;
2470 acc = (insn >> 5) & 7;
2472 if (acc != 0)
2473 return 1;
2475 switch ((insn >> 16) & 0xf) {
2476 case 0x0: /* MIA */
2477 gen_movl_T0_reg(s, rd0);
2478 gen_movl_T1_reg(s, rd1);
2479 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2480 break;
2481 case 0x8: /* MIAPH */
2482 gen_movl_T0_reg(s, rd0);
2483 gen_movl_T1_reg(s, rd1);
2484 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2485 break;
2486 case 0xc: /* MIABB */
2487 case 0xd: /* MIABT */
2488 case 0xe: /* MIATB */
2489 case 0xf: /* MIATT */
2490 gen_movl_T1_reg(s, rd0);
2491 if (insn & (1 << 16))
2492 gen_op_shrl_T1_im(16);
2493 gen_op_movl_T0_T1();
2494 gen_movl_T1_reg(s, rd1);
2495 if (insn & (1 << 17))
2496 gen_op_shrl_T1_im(16);
2497 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2498 break;
2499 default:
2500 return 1;
2503 gen_op_iwmmxt_movq_wRn_M0(acc);
2504 return 0;
2507 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2508 /* Internal Accumulator Access Format */
2509 rdhi = (insn >> 16) & 0xf;
2510 rdlo = (insn >> 12) & 0xf;
2511 acc = insn & 7;
2513 if (acc != 0)
2514 return 1;
2516 if (insn & ARM_CP_RW_BIT) { /* MRA */
2517 gen_iwmmxt_movl_T0_T1_wRn(acc);
2518 gen_movl_reg_T0(s, rdlo);
2519 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2520 gen_op_andl_T0_T1();
2521 gen_movl_reg_T0(s, rdhi);
2522 } else { /* MAR */
2523 gen_movl_T0_reg(s, rdlo);
2524 gen_movl_T1_reg(s, rdhi);
2525 gen_iwmmxt_movl_wRn_T0_T1(acc);
2527 return 0;
2530 return 1;
2533 /* Disassemble system coprocessor instruction. Return nonzero if
2534 instruction is not defined. */
2535 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2537 TCGv tmp;
2538 uint32_t rd = (insn >> 12) & 0xf;
2539 uint32_t cp = (insn >> 8) & 0xf;
2540 if (IS_USER(s)) {
2541 return 1;
2544 if (insn & ARM_CP_RW_BIT) {
2545 if (!env->cp[cp].cp_read)
2546 return 1;
2547 gen_set_pc_im(s->pc);
2548 tmp = new_tmp();
2549 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2550 store_reg(s, rd, tmp);
2551 } else {
2552 if (!env->cp[cp].cp_write)
2553 return 1;
2554 gen_set_pc_im(s->pc);
2555 tmp = load_reg(s, rd);
2556 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
2557 dead_tmp(tmp);
2559 return 0;
2562 static int cp15_user_ok(uint32_t insn)
2564 int cpn = (insn >> 16) & 0xf;
2565 int cpm = insn & 0xf;
2566 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2568 if (cpn == 13 && cpm == 0) {
2569 /* TLS register. */
2570 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2571 return 1;
2573 if (cpn == 7) {
2574 /* ISB, DSB, DMB. */
2575 if ((cpm == 5 && op == 4)
2576 || (cpm == 10 && (op == 4 || op == 5)))
2577 return 1;
2579 return 0;
2582 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2583 instruction is not defined. */
2584 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2586 uint32_t rd;
2587 TCGv tmp;
2589 /* M profile cores use memory mapped registers instead of cp15. */
2590 if (arm_feature(env, ARM_FEATURE_M))
2591 return 1;
2593 if ((insn & (1 << 25)) == 0) {
2594 if (insn & (1 << 20)) {
2595 /* mrrc */
2596 return 1;
2598 /* mcrr. Used for block cache operations, so implement as no-op. */
2599 return 0;
2601 if ((insn & (1 << 4)) == 0) {
2602 /* cdp */
2603 return 1;
2605 if (IS_USER(s) && !cp15_user_ok(insn)) {
2606 return 1;
2608 if ((insn & 0x0fff0fff) == 0x0e070f90
2609 || (insn & 0x0fff0fff) == 0x0e070f58) {
2610 /* Wait for interrupt. */
2611 gen_set_pc_im(s->pc);
2612 s->is_jmp = DISAS_WFI;
2613 return 0;
2615 rd = (insn >> 12) & 0xf;
2616 if (insn & ARM_CP_RW_BIT) {
2617 tmp = new_tmp();
2618 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
2619 /* If the destination register is r15 then sets condition codes. */
2620 if (rd != 15)
2621 store_reg(s, rd, tmp);
2622 else
2623 dead_tmp(tmp);
2624 } else {
2625 tmp = load_reg(s, rd);
2626 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2627 dead_tmp(tmp);
2628 /* Normally we would always end the TB here, but Linux
2629 * arch/arm/mach-pxa/sleep.S expects two instructions following
2630 * an MMU enable to execute from cache. Imitate this behaviour. */
2631 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2632 (insn & 0x0fff0fff) != 0x0e010f10)
2633 gen_lookup_tb(s);
2635 return 0;
2638 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2639 #define VFP_SREG(insn, bigbit, smallbit) \
2640 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2641 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2642 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2643 reg = (((insn) >> (bigbit)) & 0x0f) \
2644 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2645 } else { \
2646 if (insn & (1 << (smallbit))) \
2647 return 1; \
2648 reg = ((insn) >> (bigbit)) & 0x0f; \
2649 }} while (0)
2651 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2652 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2653 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2654 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2655 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2656 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2658 /* Move between integer and VFP cores. */
2659 static TCGv gen_vfp_mrs(void)
2661 TCGv tmp = new_tmp();
2662 tcg_gen_mov_i32(tmp, cpu_F0s);
2663 return tmp;
2666 static void gen_vfp_msr(TCGv tmp)
2668 tcg_gen_mov_i32(cpu_F0s, tmp);
2669 dead_tmp(tmp);
2672 static inline int
2673 vfp_enabled(CPUState * env)
2675 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2678 static void gen_neon_dup_u8(TCGv var, int shift)
2680 TCGv tmp = new_tmp();
2681 if (shift)
2682 tcg_gen_shri_i32(var, var, shift);
2683 tcg_gen_ext8u_i32(var, var);
2684 tcg_gen_shli_i32(tmp, var, 8);
2685 tcg_gen_or_i32(var, var, tmp);
2686 tcg_gen_shli_i32(tmp, var, 16);
2687 tcg_gen_or_i32(var, var, tmp);
2688 dead_tmp(tmp);
2691 static void gen_neon_dup_low16(TCGv var)
2693 TCGv tmp = new_tmp();
2694 tcg_gen_ext16u_i32(var, var);
2695 tcg_gen_shli_i32(tmp, var, 16);
2696 tcg_gen_or_i32(var, var, tmp);
2697 dead_tmp(tmp);
2700 static void gen_neon_dup_high16(TCGv var)
2702 TCGv tmp = new_tmp();
2703 tcg_gen_andi_i32(var, var, 0xffff0000);
2704 tcg_gen_shri_i32(tmp, var, 16);
2705 tcg_gen_or_i32(var, var, tmp);
2706 dead_tmp(tmp);
2709 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2710 (ie. an undefined instruction). */
2711 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2713 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2714 int dp, veclen;
2715 TCGv tmp;
2716 TCGv tmp2;
2718 if (!arm_feature(env, ARM_FEATURE_VFP))
2719 return 1;
2721 if (!vfp_enabled(env)) {
2722 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2723 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2724 return 1;
2725 rn = (insn >> 16) & 0xf;
2726 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2727 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2728 return 1;
2730 dp = ((insn & 0xf00) == 0xb00);
2731 switch ((insn >> 24) & 0xf) {
2732 case 0xe:
2733 if (insn & (1 << 4)) {
2734 /* single register transfer */
2735 rd = (insn >> 12) & 0xf;
2736 if (dp) {
2737 int size;
2738 int pass;
2740 VFP_DREG_N(rn, insn);
2741 if (insn & 0xf)
2742 return 1;
2743 if (insn & 0x00c00060
2744 && !arm_feature(env, ARM_FEATURE_NEON))
2745 return 1;
2747 pass = (insn >> 21) & 1;
2748 if (insn & (1 << 22)) {
2749 size = 0;
2750 offset = ((insn >> 5) & 3) * 8;
2751 } else if (insn & (1 << 5)) {
2752 size = 1;
2753 offset = (insn & (1 << 6)) ? 16 : 0;
2754 } else {
2755 size = 2;
2756 offset = 0;
2758 if (insn & ARM_CP_RW_BIT) {
2759 /* vfp->arm */
2760 tmp = neon_load_reg(rn, pass);
2761 switch (size) {
2762 case 0:
2763 if (offset)
2764 tcg_gen_shri_i32(tmp, tmp, offset);
2765 if (insn & (1 << 23))
2766 gen_uxtb(tmp);
2767 else
2768 gen_sxtb(tmp);
2769 break;
2770 case 1:
2771 if (insn & (1 << 23)) {
2772 if (offset) {
2773 tcg_gen_shri_i32(tmp, tmp, 16);
2774 } else {
2775 gen_uxth(tmp);
2777 } else {
2778 if (offset) {
2779 tcg_gen_sari_i32(tmp, tmp, 16);
2780 } else {
2781 gen_sxth(tmp);
2784 break;
2785 case 2:
2786 break;
2788 store_reg(s, rd, tmp);
2789 } else {
2790 /* arm->vfp */
2791 tmp = load_reg(s, rd);
2792 if (insn & (1 << 23)) {
2793 /* VDUP */
2794 if (size == 0) {
2795 gen_neon_dup_u8(tmp, 0);
2796 } else if (size == 1) {
2797 gen_neon_dup_low16(tmp);
2799 for (n = 0; n <= pass * 2; n++) {
2800 tmp2 = new_tmp();
2801 tcg_gen_mov_i32(tmp2, tmp);
2802 neon_store_reg(rn, n, tmp2);
2804 neon_store_reg(rn, n, tmp);
2805 } else {
2806 /* VMOV */
2807 switch (size) {
2808 case 0:
2809 tmp2 = neon_load_reg(rn, pass);
2810 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2811 dead_tmp(tmp2);
2812 break;
2813 case 1:
2814 tmp2 = neon_load_reg(rn, pass);
2815 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2816 dead_tmp(tmp2);
2817 break;
2818 case 2:
2819 break;
2821 neon_store_reg(rn, pass, tmp);
2824 } else { /* !dp */
2825 if ((insn & 0x6f) != 0x00)
2826 return 1;
2827 rn = VFP_SREG_N(insn);
2828 if (insn & ARM_CP_RW_BIT) {
2829 /* vfp->arm */
2830 if (insn & (1 << 21)) {
2831 /* system register */
2832 rn >>= 1;
2834 switch (rn) {
2835 case ARM_VFP_FPSID:
2836 /* VFP2 allows access to FSID from userspace.
2837 VFP3 restricts all id registers to privileged
2838 accesses. */
2839 if (IS_USER(s)
2840 && arm_feature(env, ARM_FEATURE_VFP3))
2841 return 1;
2842 tmp = load_cpu_field(vfp.xregs[rn]);
2843 break;
2844 case ARM_VFP_FPEXC:
2845 if (IS_USER(s))
2846 return 1;
2847 tmp = load_cpu_field(vfp.xregs[rn]);
2848 break;
2849 case ARM_VFP_FPINST:
2850 case ARM_VFP_FPINST2:
2851 /* Not present in VFP3. */
2852 if (IS_USER(s)
2853 || arm_feature(env, ARM_FEATURE_VFP3))
2854 return 1;
2855 tmp = load_cpu_field(vfp.xregs[rn]);
2856 break;
2857 case ARM_VFP_FPSCR:
2858 if (rd == 15) {
2859 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2860 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2861 } else {
2862 tmp = new_tmp();
2863 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2865 break;
2866 case ARM_VFP_MVFR0:
2867 case ARM_VFP_MVFR1:
2868 if (IS_USER(s)
2869 || !arm_feature(env, ARM_FEATURE_VFP3))
2870 return 1;
2871 tmp = load_cpu_field(vfp.xregs[rn]);
2872 break;
2873 default:
2874 return 1;
2876 } else {
2877 gen_mov_F0_vreg(0, rn);
2878 tmp = gen_vfp_mrs();
2880 if (rd == 15) {
2881 /* Set the 4 flag bits in the CPSR. */
2882 gen_set_nzcv(tmp);
2883 dead_tmp(tmp);
2884 } else {
2885 store_reg(s, rd, tmp);
2887 } else {
2888 /* arm->vfp */
2889 tmp = load_reg(s, rd);
2890 if (insn & (1 << 21)) {
2891 rn >>= 1;
2892 /* system register */
2893 switch (rn) {
2894 case ARM_VFP_FPSID:
2895 case ARM_VFP_MVFR0:
2896 case ARM_VFP_MVFR1:
2897 /* Writes are ignored. */
2898 break;
2899 case ARM_VFP_FPSCR:
2900 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2901 dead_tmp(tmp);
2902 gen_lookup_tb(s);
2903 break;
2904 case ARM_VFP_FPEXC:
2905 if (IS_USER(s))
2906 return 1;
2907 store_cpu_field(tmp, vfp.xregs[rn]);
2908 gen_lookup_tb(s);
2909 break;
2910 case ARM_VFP_FPINST:
2911 case ARM_VFP_FPINST2:
2912 store_cpu_field(tmp, vfp.xregs[rn]);
2913 break;
2914 default:
2915 return 1;
2917 } else {
2918 gen_vfp_msr(tmp);
2919 gen_mov_vreg_F0(0, rn);
2923 } else {
2924 /* data processing */
2925 /* The opcode is in bits 23, 21, 20 and 6. */
2926 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2927 if (dp) {
2928 if (op == 15) {
2929 /* rn is opcode */
2930 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2931 } else {
2932 /* rn is register number */
2933 VFP_DREG_N(rn, insn);
2936 if (op == 15 && (rn == 15 || rn > 17)) {
2937 /* Integer or single precision destination. */
2938 rd = VFP_SREG_D(insn);
2939 } else {
2940 VFP_DREG_D(rd, insn);
2943 if (op == 15 && (rn == 16 || rn == 17)) {
2944 /* Integer source. */
2945 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2946 } else {
2947 VFP_DREG_M(rm, insn);
2949 } else {
2950 rn = VFP_SREG_N(insn);
2951 if (op == 15 && rn == 15) {
2952 /* Double precision destination. */
2953 VFP_DREG_D(rd, insn);
2954 } else {
2955 rd = VFP_SREG_D(insn);
2957 rm = VFP_SREG_M(insn);
2960 veclen = env->vfp.vec_len;
2961 if (op == 15 && rn > 3)
2962 veclen = 0;
2964 /* Shut up compiler warnings. */
2965 delta_m = 0;
2966 delta_d = 0;
2967 bank_mask = 0;
2969 if (veclen > 0) {
2970 if (dp)
2971 bank_mask = 0xc;
2972 else
2973 bank_mask = 0x18;
2975 /* Figure out what type of vector operation this is. */
2976 if ((rd & bank_mask) == 0) {
2977 /* scalar */
2978 veclen = 0;
2979 } else {
2980 if (dp)
2981 delta_d = (env->vfp.vec_stride >> 1) + 1;
2982 else
2983 delta_d = env->vfp.vec_stride + 1;
2985 if ((rm & bank_mask) == 0) {
2986 /* mixed scalar/vector */
2987 delta_m = 0;
2988 } else {
2989 /* vector */
2990 delta_m = delta_d;
2995 /* Load the initial operands. */
2996 if (op == 15) {
2997 switch (rn) {
2998 case 16:
2999 case 17:
3000 /* Integer source */
3001 gen_mov_F0_vreg(0, rm);
3002 break;
3003 case 8:
3004 case 9:
3005 /* Compare */
3006 gen_mov_F0_vreg(dp, rd);
3007 gen_mov_F1_vreg(dp, rm);
3008 break;
3009 case 10:
3010 case 11:
3011 /* Compare with zero */
3012 gen_mov_F0_vreg(dp, rd);
3013 gen_vfp_F1_ld0(dp);
3014 break;
3015 case 20:
3016 case 21:
3017 case 22:
3018 case 23:
3019 case 28:
3020 case 29:
3021 case 30:
3022 case 31:
3023 /* Source and destination the same. */
3024 gen_mov_F0_vreg(dp, rd);
3025 break;
3026 default:
3027 /* One source operand. */
3028 gen_mov_F0_vreg(dp, rm);
3029 break;
3031 } else {
3032 /* Two source operands. */
3033 gen_mov_F0_vreg(dp, rn);
3034 gen_mov_F1_vreg(dp, rm);
3037 for (;;) {
3038 /* Perform the calculation. */
3039 switch (op) {
3040 case 0: /* mac: fd + (fn * fm) */
3041 gen_vfp_mul(dp);
3042 gen_mov_F1_vreg(dp, rd);
3043 gen_vfp_add(dp);
3044 break;
3045 case 1: /* nmac: fd - (fn * fm) */
3046 gen_vfp_mul(dp);
3047 gen_vfp_neg(dp);
3048 gen_mov_F1_vreg(dp, rd);
3049 gen_vfp_add(dp);
3050 break;
3051 case 2: /* msc: -fd + (fn * fm) */
3052 gen_vfp_mul(dp);
3053 gen_mov_F1_vreg(dp, rd);
3054 gen_vfp_sub(dp);
3055 break;
3056 case 3: /* nmsc: -fd - (fn * fm) */
3057 gen_vfp_mul(dp);
3058 gen_vfp_neg(dp);
3059 gen_mov_F1_vreg(dp, rd);
3060 gen_vfp_sub(dp);
3061 break;
3062 case 4: /* mul: fn * fm */
3063 gen_vfp_mul(dp);
3064 break;
3065 case 5: /* nmul: -(fn * fm) */
3066 gen_vfp_mul(dp);
3067 gen_vfp_neg(dp);
3068 break;
3069 case 6: /* add: fn + fm */
3070 gen_vfp_add(dp);
3071 break;
3072 case 7: /* sub: fn - fm */
3073 gen_vfp_sub(dp);
3074 break;
3075 case 8: /* div: fn / fm */
3076 gen_vfp_div(dp);
3077 break;
3078 case 14: /* fconst */
3079 if (!arm_feature(env, ARM_FEATURE_VFP3))
3080 return 1;
3082 n = (insn << 12) & 0x80000000;
3083 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3084 if (dp) {
3085 if (i & 0x40)
3086 i |= 0x3f80;
3087 else
3088 i |= 0x4000;
3089 n |= i << 16;
3090 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3091 } else {
3092 if (i & 0x40)
3093 i |= 0x780;
3094 else
3095 i |= 0x800;
3096 n |= i << 19;
3097 tcg_gen_movi_i32(cpu_F0s, n);
3099 break;
3100 case 15: /* extension space */
3101 switch (rn) {
3102 case 0: /* cpy */
3103 /* no-op */
3104 break;
3105 case 1: /* abs */
3106 gen_vfp_abs(dp);
3107 break;
3108 case 2: /* neg */
3109 gen_vfp_neg(dp);
3110 break;
3111 case 3: /* sqrt */
3112 gen_vfp_sqrt(dp);
3113 break;
3114 case 8: /* cmp */
3115 gen_vfp_cmp(dp);
3116 break;
3117 case 9: /* cmpe */
3118 gen_vfp_cmpe(dp);
3119 break;
3120 case 10: /* cmpz */
3121 gen_vfp_cmp(dp);
3122 break;
3123 case 11: /* cmpez */
3124 gen_vfp_F1_ld0(dp);
3125 gen_vfp_cmpe(dp);
3126 break;
3127 case 15: /* single<->double conversion */
3128 if (dp)
3129 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3130 else
3131 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3132 break;
3133 case 16: /* fuito */
3134 gen_vfp_uito(dp);
3135 break;
3136 case 17: /* fsito */
3137 gen_vfp_sito(dp);
3138 break;
3139 case 20: /* fshto */
3140 if (!arm_feature(env, ARM_FEATURE_VFP3))
3141 return 1;
3142 gen_vfp_shto(dp, 16 - rm);
3143 break;
3144 case 21: /* fslto */
3145 if (!arm_feature(env, ARM_FEATURE_VFP3))
3146 return 1;
3147 gen_vfp_slto(dp, 32 - rm);
3148 break;
3149 case 22: /* fuhto */
3150 if (!arm_feature(env, ARM_FEATURE_VFP3))
3151 return 1;
3152 gen_vfp_uhto(dp, 16 - rm);
3153 break;
3154 case 23: /* fulto */
3155 if (!arm_feature(env, ARM_FEATURE_VFP3))
3156 return 1;
3157 gen_vfp_ulto(dp, 32 - rm);
3158 break;
3159 case 24: /* ftoui */
3160 gen_vfp_toui(dp);
3161 break;
3162 case 25: /* ftouiz */
3163 gen_vfp_touiz(dp);
3164 break;
3165 case 26: /* ftosi */
3166 gen_vfp_tosi(dp);
3167 break;
3168 case 27: /* ftosiz */
3169 gen_vfp_tosiz(dp);
3170 break;
3171 case 28: /* ftosh */
3172 if (!arm_feature(env, ARM_FEATURE_VFP3))
3173 return 1;
3174 gen_vfp_tosh(dp, 16 - rm);
3175 break;
3176 case 29: /* ftosl */
3177 if (!arm_feature(env, ARM_FEATURE_VFP3))
3178 return 1;
3179 gen_vfp_tosl(dp, 32 - rm);
3180 break;
3181 case 30: /* ftouh */
3182 if (!arm_feature(env, ARM_FEATURE_VFP3))
3183 return 1;
3184 gen_vfp_touh(dp, 16 - rm);
3185 break;
3186 case 31: /* ftoul */
3187 if (!arm_feature(env, ARM_FEATURE_VFP3))
3188 return 1;
3189 gen_vfp_toul(dp, 32 - rm);
3190 break;
3191 default: /* undefined */
3192 printf ("rn:%d\n", rn);
3193 return 1;
3195 break;
3196 default: /* undefined */
3197 printf ("op:%d\n", op);
3198 return 1;
3201 /* Write back the result. */
3202 if (op == 15 && (rn >= 8 && rn <= 11))
3203 ; /* Comparison, do nothing. */
3204 else if (op == 15 && rn > 17)
3205 /* Integer result. */
3206 gen_mov_vreg_F0(0, rd);
3207 else if (op == 15 && rn == 15)
3208 /* conversion */
3209 gen_mov_vreg_F0(!dp, rd);
3210 else
3211 gen_mov_vreg_F0(dp, rd);
3213 /* break out of the loop if we have finished */
3214 if (veclen == 0)
3215 break;
3217 if (op == 15 && delta_m == 0) {
3218 /* single source one-many */
3219 while (veclen--) {
3220 rd = ((rd + delta_d) & (bank_mask - 1))
3221 | (rd & bank_mask);
3222 gen_mov_vreg_F0(dp, rd);
3224 break;
3226 /* Setup the next operands. */
3227 veclen--;
3228 rd = ((rd + delta_d) & (bank_mask - 1))
3229 | (rd & bank_mask);
3231 if (op == 15) {
3232 /* One source operand. */
3233 rm = ((rm + delta_m) & (bank_mask - 1))
3234 | (rm & bank_mask);
3235 gen_mov_F0_vreg(dp, rm);
3236 } else {
3237 /* Two source operands. */
3238 rn = ((rn + delta_d) & (bank_mask - 1))
3239 | (rn & bank_mask);
3240 gen_mov_F0_vreg(dp, rn);
3241 if (delta_m) {
3242 rm = ((rm + delta_m) & (bank_mask - 1))
3243 | (rm & bank_mask);
3244 gen_mov_F1_vreg(dp, rm);
3249 break;
3250 case 0xc:
3251 case 0xd:
3252 if (dp && (insn & 0x03e00000) == 0x00400000) {
3253 /* two-register transfer */
3254 rn = (insn >> 16) & 0xf;
3255 rd = (insn >> 12) & 0xf;
3256 if (dp) {
3257 VFP_DREG_M(rm, insn);
3258 } else {
3259 rm = VFP_SREG_M(insn);
3262 if (insn & ARM_CP_RW_BIT) {
3263 /* vfp->arm */
3264 if (dp) {
3265 gen_mov_F0_vreg(0, rm * 2);
3266 tmp = gen_vfp_mrs();
3267 store_reg(s, rd, tmp);
3268 gen_mov_F0_vreg(0, rm * 2 + 1);
3269 tmp = gen_vfp_mrs();
3270 store_reg(s, rn, tmp);
3271 } else {
3272 gen_mov_F0_vreg(0, rm);
3273 tmp = gen_vfp_mrs();
3274 store_reg(s, rn, tmp);
3275 gen_mov_F0_vreg(0, rm + 1);
3276 tmp = gen_vfp_mrs();
3277 store_reg(s, rd, tmp);
3279 } else {
3280 /* arm->vfp */
3281 if (dp) {
3282 tmp = load_reg(s, rd);
3283 gen_vfp_msr(tmp);
3284 gen_mov_vreg_F0(0, rm * 2);
3285 tmp = load_reg(s, rn);
3286 gen_vfp_msr(tmp);
3287 gen_mov_vreg_F0(0, rm * 2 + 1);
3288 } else {
3289 tmp = load_reg(s, rn);
3290 gen_vfp_msr(tmp);
3291 gen_mov_vreg_F0(0, rm);
3292 tmp = load_reg(s, rd);
3293 gen_vfp_msr(tmp);
3294 gen_mov_vreg_F0(0, rm + 1);
3297 } else {
3298 /* Load/store */
3299 rn = (insn >> 16) & 0xf;
3300 if (dp)
3301 VFP_DREG_D(rd, insn);
3302 else
3303 rd = VFP_SREG_D(insn);
3304 if (s->thumb && rn == 15) {
3305 gen_op_movl_T1_im(s->pc & ~2);
3306 } else {
3307 gen_movl_T1_reg(s, rn);
3309 if ((insn & 0x01200000) == 0x01000000) {
3310 /* Single load/store */
3311 offset = (insn & 0xff) << 2;
3312 if ((insn & (1 << 23)) == 0)
3313 offset = -offset;
3314 gen_op_addl_T1_im(offset);
3315 if (insn & (1 << 20)) {
3316 gen_vfp_ld(s, dp);
3317 gen_mov_vreg_F0(dp, rd);
3318 } else {
3319 gen_mov_F0_vreg(dp, rd);
3320 gen_vfp_st(s, dp);
3322 } else {
3323 /* load/store multiple */
3324 if (dp)
3325 n = (insn >> 1) & 0x7f;
3326 else
3327 n = insn & 0xff;
3329 if (insn & (1 << 24)) /* pre-decrement */
3330 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3332 if (dp)
3333 offset = 8;
3334 else
3335 offset = 4;
3336 for (i = 0; i < n; i++) {
3337 if (insn & ARM_CP_RW_BIT) {
3338 /* load */
3339 gen_vfp_ld(s, dp);
3340 gen_mov_vreg_F0(dp, rd + i);
3341 } else {
3342 /* store */
3343 gen_mov_F0_vreg(dp, rd + i);
3344 gen_vfp_st(s, dp);
3346 gen_op_addl_T1_im(offset);
3348 if (insn & (1 << 21)) {
3349 /* writeback */
3350 if (insn & (1 << 24))
3351 offset = -offset * n;
3352 else if (dp && (insn & 1))
3353 offset = 4;
3354 else
3355 offset = 0;
3357 if (offset != 0)
3358 gen_op_addl_T1_im(offset);
3359 gen_movl_reg_T1(s, rn);
3363 break;
3364 default:
3365 /* Should never happen. */
3366 return 1;
3368 return 0;
3371 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3373 TranslationBlock *tb;
3375 tb = s->tb;
3376 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3377 tcg_gen_goto_tb(n);
3378 gen_set_pc_im(dest);
3379 tcg_gen_exit_tb((long)tb + n);
3380 } else {
3381 gen_set_pc_im(dest);
3382 tcg_gen_exit_tb(0);
3386 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3388 if (unlikely(s->singlestep_enabled)) {
3389 /* An indirect jump so that we still trigger the debug exception. */
3390 if (s->thumb)
3391 dest |= 1;
3392 gen_bx_im(s, dest);
3393 } else {
3394 gen_goto_tb(s, 0, dest);
3395 s->is_jmp = DISAS_TB_JUMP;
3399 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3401 if (x)
3402 tcg_gen_sari_i32(t0, t0, 16);
3403 else
3404 gen_sxth(t0);
3405 if (y)
3406 tcg_gen_sari_i32(t1, t1, 16);
3407 else
3408 gen_sxth(t1);
3409 tcg_gen_mul_i32(t0, t0, t1);
3412 /* Return the mask of PSR bits set by a MSR instruction. */
3413 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3414 uint32_t mask;
3416 mask = 0;
3417 if (flags & (1 << 0))
3418 mask |= 0xff;
3419 if (flags & (1 << 1))
3420 mask |= 0xff00;
3421 if (flags & (1 << 2))
3422 mask |= 0xff0000;
3423 if (flags & (1 << 3))
3424 mask |= 0xff000000;
3426 /* Mask out undefined bits. */
3427 mask &= ~CPSR_RESERVED;
3428 if (!arm_feature(env, ARM_FEATURE_V6))
3429 mask &= ~(CPSR_E | CPSR_GE);
3430 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3431 mask &= ~CPSR_IT;
3432 /* Mask out execution state bits. */
3433 if (!spsr)
3434 mask &= ~CPSR_EXEC;
3435 /* Mask out privileged bits. */
3436 if (IS_USER(s))
3437 mask &= CPSR_USER;
3438 return mask;
3441 /* Returns nonzero if access to the PSR is not permitted. */
3442 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3444 TCGv tmp;
3445 if (spsr) {
3446 /* ??? This is also undefined in system mode. */
3447 if (IS_USER(s))
3448 return 1;
3450 tmp = load_cpu_field(spsr);
3451 tcg_gen_andi_i32(tmp, tmp, ~mask);
3452 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3453 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3454 store_cpu_field(tmp, spsr);
3455 } else {
3456 gen_set_cpsr(cpu_T[0], mask);
3458 gen_lookup_tb(s);
3459 return 0;
3462 /* Generate an old-style exception return. Marks pc as dead. */
3463 static void gen_exception_return(DisasContext *s, TCGv pc)
3465 TCGv tmp;
3466 store_reg(s, 15, pc);
3467 tmp = load_cpu_field(spsr);
3468 gen_set_cpsr(tmp, 0xffffffff);
3469 dead_tmp(tmp);
3470 s->is_jmp = DISAS_UPDATE;
3473 /* Generate a v6 exception return. Marks both values as dead. */
3474 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3476 gen_set_cpsr(cpsr, 0xffffffff);
3477 dead_tmp(cpsr);
3478 store_reg(s, 15, pc);
3479 s->is_jmp = DISAS_UPDATE;
3482 static inline void
3483 gen_set_condexec (DisasContext *s)
3485 if (s->condexec_mask) {
3486 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3487 TCGv tmp = new_tmp();
3488 tcg_gen_movi_i32(tmp, val);
3489 store_cpu_field(tmp, condexec_bits);
3493 static void gen_nop_hint(DisasContext *s, int val)
3495 switch (val) {
3496 case 3: /* wfi */
3497 gen_set_pc_im(s->pc);
3498 s->is_jmp = DISAS_WFI;
3499 break;
3500 case 2: /* wfe */
3501 case 4: /* sev */
3502 /* TODO: Implement SEV and WFE. May help SMP performance. */
3503 default: /* nop */
3504 break;
3508 /* These macros help make the code more readable when migrating from the
3509 old dyngen helpers. They should probably be removed when
3510 T0/T1 are removed. */
3511 #define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3512 #define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
3514 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3516 static inline int gen_neon_add(int size)
3518 switch (size) {
3519 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3520 case 1: gen_helper_neon_add_u16(CPU_T001); break;
3521 case 2: gen_op_addl_T0_T1(); break;
3522 default: return 1;
3524 return 0;
3527 static inline void gen_neon_rsb(int size)
3529 switch (size) {
3530 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3531 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3532 case 2: gen_op_rsbl_T0_T1(); break;
3533 default: return;
3537 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3538 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3539 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3540 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3541 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3543 /* FIXME: This is wrong. They set the wrong overflow bit. */
3544 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3545 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3546 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3547 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3549 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3550 switch ((size << 1) | u) { \
3551 case 0: \
3552 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3553 break; \
3554 case 1: \
3555 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3556 break; \
3557 case 2: \
3558 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3559 break; \
3560 case 3: \
3561 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3562 break; \
3563 case 4: \
3564 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3565 break; \
3566 case 5: \
3567 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3568 break; \
3569 default: return 1; \
3570 }} while (0)
3572 #define GEN_NEON_INTEGER_OP(name) do { \
3573 switch ((size << 1) | u) { \
3574 case 0: \
3575 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3576 break; \
3577 case 1: \
3578 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3579 break; \
3580 case 2: \
3581 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3582 break; \
3583 case 3: \
3584 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3585 break; \
3586 case 4: \
3587 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3588 break; \
3589 case 5: \
3590 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3591 break; \
3592 default: return 1; \
3593 }} while (0)
3595 static inline void
3596 gen_neon_movl_scratch_T0(int scratch)
3598 uint32_t offset;
3600 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3601 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
3604 static inline void
3605 gen_neon_movl_scratch_T1(int scratch)
3607 uint32_t offset;
3609 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3610 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
3613 static inline void
3614 gen_neon_movl_T0_scratch(int scratch)
3616 uint32_t offset;
3618 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3619 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
3622 static inline void
3623 gen_neon_movl_T1_scratch(int scratch)
3625 uint32_t offset;
3627 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3628 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
3631 static inline void gen_neon_get_scalar(int size, int reg)
3633 if (size == 1) {
3634 NEON_GET_REG(T0, reg >> 1, reg & 1);
3635 } else {
3636 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3637 if (reg & 1)
3638 gen_neon_dup_low16(cpu_T[0]);
3639 else
3640 gen_neon_dup_high16(cpu_T[0]);
3644 static void gen_neon_unzip(int reg, int q, int tmp, int size)
3646 int n;
3648 for (n = 0; n < q + 1; n += 2) {
3649 NEON_GET_REG(T0, reg, n);
3650 NEON_GET_REG(T0, reg, n + n);
3651 switch (size) {
3652 case 0: gen_helper_neon_unzip_u8(); break;
3653 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
3654 case 2: /* no-op */; break;
3655 default: abort();
3657 gen_neon_movl_scratch_T0(tmp + n);
3658 gen_neon_movl_scratch_T1(tmp + n + 1);
3662 static struct {
3663 int nregs;
3664 int interleave;
3665 int spacing;
3666 } neon_ls_element_type[11] = {
3667 {4, 4, 1},
3668 {4, 4, 2},
3669 {4, 1, 1},
3670 {4, 2, 1},
3671 {3, 3, 1},
3672 {3, 3, 2},
3673 {3, 1, 1},
3674 {1, 1, 1},
3675 {2, 2, 1},
3676 {2, 2, 2},
3677 {2, 1, 1}
3680 /* Translate a NEON load/store element instruction. Return nonzero if the
3681 instruction is invalid. */
3682 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3684 int rd, rn, rm;
3685 int op;
3686 int nregs;
3687 int interleave;
3688 int stride;
3689 int size;
3690 int reg;
3691 int pass;
3692 int load;
3693 int shift;
3694 int n;
3695 TCGv tmp;
3696 TCGv tmp2;
3698 if (!vfp_enabled(env))
3699 return 1;
3700 VFP_DREG_D(rd, insn);
3701 rn = (insn >> 16) & 0xf;
3702 rm = insn & 0xf;
3703 load = (insn & (1 << 21)) != 0;
3704 if ((insn & (1 << 23)) == 0) {
3705 /* Load store all elements. */
3706 op = (insn >> 8) & 0xf;
3707 size = (insn >> 6) & 3;
3708 if (op > 10 || size == 3)
3709 return 1;
3710 nregs = neon_ls_element_type[op].nregs;
3711 interleave = neon_ls_element_type[op].interleave;
3712 gen_movl_T1_reg(s, rn);
3713 stride = (1 << size) * interleave;
3714 for (reg = 0; reg < nregs; reg++) {
3715 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3716 gen_movl_T1_reg(s, rn);
3717 gen_op_addl_T1_im((1 << size) * reg);
3718 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3719 gen_movl_T1_reg(s, rn);
3720 gen_op_addl_T1_im(1 << size);
3722 for (pass = 0; pass < 2; pass++) {
3723 if (size == 2) {
3724 if (load) {
3725 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3726 neon_store_reg(rd, pass, tmp);
3727 } else {
3728 tmp = neon_load_reg(rd, pass);
3729 gen_st32(tmp, cpu_T[1], IS_USER(s));
3731 gen_op_addl_T1_im(stride);
3732 } else if (size == 1) {
3733 if (load) {
3734 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3735 gen_op_addl_T1_im(stride);
3736 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
3737 gen_op_addl_T1_im(stride);
3738 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3739 dead_tmp(tmp2);
3740 neon_store_reg(rd, pass, tmp);
3741 } else {
3742 tmp = neon_load_reg(rd, pass);
3743 tmp2 = new_tmp();
3744 tcg_gen_shri_i32(tmp2, tmp, 16);
3745 gen_st16(tmp, cpu_T[1], IS_USER(s));
3746 gen_op_addl_T1_im(stride);
3747 gen_st16(tmp2, cpu_T[1], IS_USER(s));
3748 gen_op_addl_T1_im(stride);
3750 } else /* size == 0 */ {
3751 if (load) {
3752 TCGV_UNUSED(tmp2);
3753 for (n = 0; n < 4; n++) {
3754 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3755 gen_op_addl_T1_im(stride);
3756 if (n == 0) {
3757 tmp2 = tmp;
3758 } else {
3759 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3760 dead_tmp(tmp);
3763 neon_store_reg(rd, pass, tmp2);
3764 } else {
3765 tmp2 = neon_load_reg(rd, pass);
3766 for (n = 0; n < 4; n++) {
3767 tmp = new_tmp();
3768 if (n == 0) {
3769 tcg_gen_mov_i32(tmp, tmp2);
3770 } else {
3771 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3773 gen_st8(tmp, cpu_T[1], IS_USER(s));
3774 gen_op_addl_T1_im(stride);
3776 dead_tmp(tmp2);
3780 rd += neon_ls_element_type[op].spacing;
3782 stride = nregs * 8;
3783 } else {
3784 size = (insn >> 10) & 3;
3785 if (size == 3) {
3786 /* Load single element to all lanes. */
3787 if (!load)
3788 return 1;
3789 size = (insn >> 6) & 3;
3790 nregs = ((insn >> 8) & 3) + 1;
3791 stride = (insn & (1 << 5)) ? 2 : 1;
3792 gen_movl_T1_reg(s, rn);
3793 for (reg = 0; reg < nregs; reg++) {
3794 switch (size) {
3795 case 0:
3796 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3797 gen_neon_dup_u8(tmp, 0);
3798 break;
3799 case 1:
3800 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3801 gen_neon_dup_low16(tmp);
3802 break;
3803 case 2:
3804 tmp = gen_ld32(cpu_T[0], IS_USER(s));
3805 break;
3806 case 3:
3807 return 1;
3808 default: /* Avoid compiler warnings. */
3809 abort();
3811 gen_op_addl_T1_im(1 << size);
3812 tmp2 = new_tmp();
3813 tcg_gen_mov_i32(tmp2, tmp);
3814 neon_store_reg(rd, 0, tmp2);
3815 neon_store_reg(rd, 1, tmp);
3816 rd += stride;
3818 stride = (1 << size) * nregs;
3819 } else {
3820 /* Single element. */
3821 pass = (insn >> 7) & 1;
3822 switch (size) {
3823 case 0:
3824 shift = ((insn >> 5) & 3) * 8;
3825 stride = 1;
3826 break;
3827 case 1:
3828 shift = ((insn >> 6) & 1) * 16;
3829 stride = (insn & (1 << 5)) ? 2 : 1;
3830 break;
3831 case 2:
3832 shift = 0;
3833 stride = (insn & (1 << 6)) ? 2 : 1;
3834 break;
3835 default:
3836 abort();
3838 nregs = ((insn >> 8) & 3) + 1;
3839 gen_movl_T1_reg(s, rn);
3840 for (reg = 0; reg < nregs; reg++) {
3841 if (load) {
3842 switch (size) {
3843 case 0:
3844 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3845 break;
3846 case 1:
3847 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3848 break;
3849 case 2:
3850 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3851 break;
3852 default: /* Avoid compiler warnings. */
3853 abort();
3855 if (size != 2) {
3856 tmp2 = neon_load_reg(rd, pass);
3857 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3858 dead_tmp(tmp2);
3860 neon_store_reg(rd, pass, tmp);
3861 } else { /* Store */
3862 tmp = neon_load_reg(rd, pass);
3863 if (shift)
3864 tcg_gen_shri_i32(tmp, tmp, shift);
3865 switch (size) {
3866 case 0:
3867 gen_st8(tmp, cpu_T[1], IS_USER(s));
3868 break;
3869 case 1:
3870 gen_st16(tmp, cpu_T[1], IS_USER(s));
3871 break;
3872 case 2:
3873 gen_st32(tmp, cpu_T[1], IS_USER(s));
3874 break;
3877 rd += stride;
3878 gen_op_addl_T1_im(1 << size);
3880 stride = nregs * (1 << size);
3883 if (rm != 15) {
3884 TCGv base;
3886 base = load_reg(s, rn);
3887 if (rm == 13) {
3888 tcg_gen_addi_i32(base, base, stride);
3889 } else {
3890 TCGv index;
3891 index = load_reg(s, rm);
3892 tcg_gen_add_i32(base, base, index);
3893 dead_tmp(index);
3895 store_reg(s, rn, base);
3897 return 0;
3900 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3901 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3903 tcg_gen_and_i32(t, t, c);
3904 tcg_gen_bic_i32(f, f, c);
3905 tcg_gen_or_i32(dest, t, f);
3908 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
3910 switch (size) {
3911 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3912 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3913 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3914 default: abort();
3918 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
3920 switch (size) {
3921 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3922 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3923 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3924 default: abort();
3928 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
3930 switch (size) {
3931 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3932 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3933 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3934 default: abort();
3938 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3939 int q, int u)
3941 if (q) {
3942 if (u) {
3943 switch (size) {
3944 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3945 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3946 default: abort();
3948 } else {
3949 switch (size) {
3950 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3951 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3952 default: abort();
3955 } else {
3956 if (u) {
3957 switch (size) {
3958 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3959 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3960 default: abort();
3962 } else {
3963 switch (size) {
3964 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3965 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3966 default: abort();
3972 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
3974 if (u) {
3975 switch (size) {
3976 case 0: gen_helper_neon_widen_u8(dest, src); break;
3977 case 1: gen_helper_neon_widen_u16(dest, src); break;
3978 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3979 default: abort();
3981 } else {
3982 switch (size) {
3983 case 0: gen_helper_neon_widen_s8(dest, src); break;
3984 case 1: gen_helper_neon_widen_s16(dest, src); break;
3985 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3986 default: abort();
3989 dead_tmp(src);
3992 static inline void gen_neon_addl(int size)
3994 switch (size) {
3995 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3996 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3997 case 2: tcg_gen_add_i64(CPU_V001); break;
3998 default: abort();
4002 static inline void gen_neon_subl(int size)
4004 switch (size) {
4005 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4006 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4007 case 2: tcg_gen_sub_i64(CPU_V001); break;
4008 default: abort();
4012 static inline void gen_neon_negl(TCGv_i64 var, int size)
4014 switch (size) {
4015 case 0: gen_helper_neon_negl_u16(var, var); break;
4016 case 1: gen_helper_neon_negl_u32(var, var); break;
4017 case 2: gen_helper_neon_negl_u64(var, var); break;
4018 default: abort();
4022 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4024 switch (size) {
4025 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4026 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4027 default: abort();
4031 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4033 TCGv_i64 tmp;
4035 switch ((size << 1) | u) {
4036 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4037 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4038 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4039 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4040 case 4:
4041 tmp = gen_muls_i64_i32(a, b);
4042 tcg_gen_mov_i64(dest, tmp);
4043 break;
4044 case 5:
4045 tmp = gen_mulu_i64_i32(a, b);
4046 tcg_gen_mov_i64(dest, tmp);
4047 break;
4048 default: abort();
4050 if (size < 2) {
4051 dead_tmp(b);
4052 dead_tmp(a);
4056 /* Translate a NEON data processing instruction. Return nonzero if the
4057 instruction is invalid.
4058 We process data in a mixture of 32-bit and 64-bit chunks.
4059 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4061 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4063 int op;
4064 int q;
4065 int rd, rn, rm;
4066 int size;
4067 int shift;
4068 int pass;
4069 int count;
4070 int pairwise;
4071 int u;
4072 int n;
4073 uint32_t imm;
4074 TCGv tmp;
4075 TCGv tmp2;
4076 TCGv tmp3;
4077 TCGv_i64 tmp64;
4079 if (!vfp_enabled(env))
4080 return 1;
4081 q = (insn & (1 << 6)) != 0;
4082 u = (insn >> 24) & 1;
4083 VFP_DREG_D(rd, insn);
4084 VFP_DREG_N(rn, insn);
4085 VFP_DREG_M(rm, insn);
4086 size = (insn >> 20) & 3;
4087 if ((insn & (1 << 23)) == 0) {
4088 /* Three register same length. */
4089 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4090 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4091 || op == 10 || op == 11 || op == 16)) {
4092 /* 64-bit element instructions. */
4093 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4094 neon_load_reg64(cpu_V0, rn + pass);
4095 neon_load_reg64(cpu_V1, rm + pass);
4096 switch (op) {
4097 case 1: /* VQADD */
4098 if (u) {
4099 gen_helper_neon_add_saturate_u64(CPU_V001);
4100 } else {
4101 gen_helper_neon_add_saturate_s64(CPU_V001);
4103 break;
4104 case 5: /* VQSUB */
4105 if (u) {
4106 gen_helper_neon_sub_saturate_u64(CPU_V001);
4107 } else {
4108 gen_helper_neon_sub_saturate_s64(CPU_V001);
4110 break;
4111 case 8: /* VSHL */
4112 if (u) {
4113 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4114 } else {
4115 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4117 break;
4118 case 9: /* VQSHL */
4119 if (u) {
4120 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4121 cpu_V0, cpu_V0);
4122 } else {
4123 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4124 cpu_V1, cpu_V0);
4126 break;
4127 case 10: /* VRSHL */
4128 if (u) {
4129 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4130 } else {
4131 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4133 break;
4134 case 11: /* VQRSHL */
4135 if (u) {
4136 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4137 cpu_V1, cpu_V0);
4138 } else {
4139 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4140 cpu_V1, cpu_V0);
4142 break;
4143 case 16:
4144 if (u) {
4145 tcg_gen_sub_i64(CPU_V001);
4146 } else {
4147 tcg_gen_add_i64(CPU_V001);
4149 break;
4150 default:
4151 abort();
4153 neon_store_reg64(cpu_V0, rd + pass);
4155 return 0;
4157 switch (op) {
4158 case 8: /* VSHL */
4159 case 9: /* VQSHL */
4160 case 10: /* VRSHL */
4161 case 11: /* VQRSHL */
4163 int rtmp;
4164 /* Shift instruction operands are reversed. */
4165 rtmp = rn;
4166 rn = rm;
4167 rm = rtmp;
4168 pairwise = 0;
4170 break;
4171 case 20: /* VPMAX */
4172 case 21: /* VPMIN */
4173 case 23: /* VPADD */
4174 pairwise = 1;
4175 break;
4176 case 26: /* VPADD (float) */
4177 pairwise = (u && size < 2);
4178 break;
4179 case 30: /* VPMIN/VPMAX (float) */
4180 pairwise = u;
4181 break;
4182 default:
4183 pairwise = 0;
4184 break;
4186 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4188 if (pairwise) {
4189 /* Pairwise. */
4190 if (q)
4191 n = (pass & 1) * 2;
4192 else
4193 n = 0;
4194 if (pass < q + 1) {
4195 NEON_GET_REG(T0, rn, n);
4196 NEON_GET_REG(T1, rn, n + 1);
4197 } else {
4198 NEON_GET_REG(T0, rm, n);
4199 NEON_GET_REG(T1, rm, n + 1);
4201 } else {
4202 /* Elementwise. */
4203 NEON_GET_REG(T0, rn, pass);
4204 NEON_GET_REG(T1, rm, pass);
4206 switch (op) {
4207 case 0: /* VHADD */
4208 GEN_NEON_INTEGER_OP(hadd);
4209 break;
4210 case 1: /* VQADD */
4211 GEN_NEON_INTEGER_OP_ENV(qadd);
4212 break;
4213 case 2: /* VRHADD */
4214 GEN_NEON_INTEGER_OP(rhadd);
4215 break;
4216 case 3: /* Logic ops. */
4217 switch ((u << 2) | size) {
4218 case 0: /* VAND */
4219 gen_op_andl_T0_T1();
4220 break;
4221 case 1: /* BIC */
4222 gen_op_bicl_T0_T1();
4223 break;
4224 case 2: /* VORR */
4225 gen_op_orl_T0_T1();
4226 break;
4227 case 3: /* VORN */
4228 gen_op_notl_T1();
4229 gen_op_orl_T0_T1();
4230 break;
4231 case 4: /* VEOR */
4232 gen_op_xorl_T0_T1();
4233 break;
4234 case 5: /* VBSL */
4235 tmp = neon_load_reg(rd, pass);
4236 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4237 dead_tmp(tmp);
4238 break;
4239 case 6: /* VBIT */
4240 tmp = neon_load_reg(rd, pass);
4241 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4242 dead_tmp(tmp);
4243 break;
4244 case 7: /* VBIF */
4245 tmp = neon_load_reg(rd, pass);
4246 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4247 dead_tmp(tmp);
4248 break;
4250 break;
4251 case 4: /* VHSUB */
4252 GEN_NEON_INTEGER_OP(hsub);
4253 break;
4254 case 5: /* VQSUB */
4255 GEN_NEON_INTEGER_OP_ENV(qsub);
4256 break;
4257 case 6: /* VCGT */
4258 GEN_NEON_INTEGER_OP(cgt);
4259 break;
4260 case 7: /* VCGE */
4261 GEN_NEON_INTEGER_OP(cge);
4262 break;
4263 case 8: /* VSHL */
4264 GEN_NEON_INTEGER_OP(shl);
4265 break;
4266 case 9: /* VQSHL */
4267 GEN_NEON_INTEGER_OP_ENV(qshl);
4268 break;
4269 case 10: /* VRSHL */
4270 GEN_NEON_INTEGER_OP(rshl);
4271 break;
4272 case 11: /* VQRSHL */
4273 GEN_NEON_INTEGER_OP_ENV(qrshl);
4274 break;
4275 case 12: /* VMAX */
4276 GEN_NEON_INTEGER_OP(max);
4277 break;
4278 case 13: /* VMIN */
4279 GEN_NEON_INTEGER_OP(min);
4280 break;
4281 case 14: /* VABD */
4282 GEN_NEON_INTEGER_OP(abd);
4283 break;
4284 case 15: /* VABA */
4285 GEN_NEON_INTEGER_OP(abd);
4286 NEON_GET_REG(T1, rd, pass);
4287 gen_neon_add(size);
4288 break;
4289 case 16:
4290 if (!u) { /* VADD */
4291 if (gen_neon_add(size))
4292 return 1;
4293 } else { /* VSUB */
4294 switch (size) {
4295 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4296 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
4297 case 2: gen_op_subl_T0_T1(); break;
4298 default: return 1;
4301 break;
4302 case 17:
4303 if (!u) { /* VTST */
4304 switch (size) {
4305 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4306 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4307 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
4308 default: return 1;
4310 } else { /* VCEQ */
4311 switch (size) {
4312 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4313 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4314 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
4315 default: return 1;
4318 break;
4319 case 18: /* Multiply. */
4320 switch (size) {
4321 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4322 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4323 case 2: gen_op_mul_T0_T1(); break;
4324 default: return 1;
4326 NEON_GET_REG(T1, rd, pass);
4327 if (u) { /* VMLS */
4328 gen_neon_rsb(size);
4329 } else { /* VMLA */
4330 gen_neon_add(size);
4332 break;
4333 case 19: /* VMUL */
4334 if (u) { /* polynomial */
4335 gen_helper_neon_mul_p8(CPU_T001);
4336 } else { /* Integer */
4337 switch (size) {
4338 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4339 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4340 case 2: gen_op_mul_T0_T1(); break;
4341 default: return 1;
4344 break;
4345 case 20: /* VPMAX */
4346 GEN_NEON_INTEGER_OP(pmax);
4347 break;
4348 case 21: /* VPMIN */
4349 GEN_NEON_INTEGER_OP(pmin);
4350 break;
4351 case 22: /* Hultiply high. */
4352 if (!u) { /* VQDMULH */
4353 switch (size) {
4354 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4355 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
4356 default: return 1;
4358 } else { /* VQRDHMUL */
4359 switch (size) {
4360 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4361 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
4362 default: return 1;
4365 break;
4366 case 23: /* VPADD */
4367 if (u)
4368 return 1;
4369 switch (size) {
4370 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4371 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
4372 case 2: gen_op_addl_T0_T1(); break;
4373 default: return 1;
4375 break;
4376 case 26: /* Floating point arithnetic. */
4377 switch ((u << 2) | size) {
4378 case 0: /* VADD */
4379 gen_helper_neon_add_f32(CPU_T001);
4380 break;
4381 case 2: /* VSUB */
4382 gen_helper_neon_sub_f32(CPU_T001);
4383 break;
4384 case 4: /* VPADD */
4385 gen_helper_neon_add_f32(CPU_T001);
4386 break;
4387 case 6: /* VABD */
4388 gen_helper_neon_abd_f32(CPU_T001);
4389 break;
4390 default:
4391 return 1;
4393 break;
4394 case 27: /* Float multiply. */
4395 gen_helper_neon_mul_f32(CPU_T001);
4396 if (!u) {
4397 NEON_GET_REG(T1, rd, pass);
4398 if (size == 0) {
4399 gen_helper_neon_add_f32(CPU_T001);
4400 } else {
4401 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
4404 break;
4405 case 28: /* Float compare. */
4406 if (!u) {
4407 gen_helper_neon_ceq_f32(CPU_T001);
4408 } else {
4409 if (size == 0)
4410 gen_helper_neon_cge_f32(CPU_T001);
4411 else
4412 gen_helper_neon_cgt_f32(CPU_T001);
4414 break;
4415 case 29: /* Float compare absolute. */
4416 if (!u)
4417 return 1;
4418 if (size == 0)
4419 gen_helper_neon_acge_f32(CPU_T001);
4420 else
4421 gen_helper_neon_acgt_f32(CPU_T001);
4422 break;
4423 case 30: /* Float min/max. */
4424 if (size == 0)
4425 gen_helper_neon_max_f32(CPU_T001);
4426 else
4427 gen_helper_neon_min_f32(CPU_T001);
4428 break;
4429 case 31:
4430 if (size == 0)
4431 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4432 else
4433 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4434 break;
4435 default:
4436 abort();
4438 /* Save the result. For elementwise operations we can put it
4439 straight into the destination register. For pairwise operations
4440 we have to be careful to avoid clobbering the source operands. */
4441 if (pairwise && rd == rm) {
4442 gen_neon_movl_scratch_T0(pass);
4443 } else {
4444 NEON_SET_REG(T0, rd, pass);
4447 } /* for pass */
4448 if (pairwise && rd == rm) {
4449 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4450 gen_neon_movl_T0_scratch(pass);
4451 NEON_SET_REG(T0, rd, pass);
4454 /* End of 3 register same size operations. */
4455 } else if (insn & (1 << 4)) {
4456 if ((insn & 0x00380080) != 0) {
4457 /* Two registers and shift. */
4458 op = (insn >> 8) & 0xf;
4459 if (insn & (1 << 7)) {
4460 /* 64-bit shift. */
4461 size = 3;
4462 } else {
4463 size = 2;
4464 while ((insn & (1 << (size + 19))) == 0)
4465 size--;
4467 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4468 /* To avoid excessive dumplication of ops we implement shift
4469 by immediate using the variable shift operations. */
4470 if (op < 8) {
4471 /* Shift by immediate:
4472 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4473 /* Right shifts are encoded as N - shift, where N is the
4474 element size in bits. */
4475 if (op <= 4)
4476 shift = shift - (1 << (size + 3));
4477 if (size == 3) {
4478 count = q + 1;
4479 } else {
4480 count = q ? 4: 2;
4482 switch (size) {
4483 case 0:
4484 imm = (uint8_t) shift;
4485 imm |= imm << 8;
4486 imm |= imm << 16;
4487 break;
4488 case 1:
4489 imm = (uint16_t) shift;
4490 imm |= imm << 16;
4491 break;
4492 case 2:
4493 case 3:
4494 imm = shift;
4495 break;
4496 default:
4497 abort();
4500 for (pass = 0; pass < count; pass++) {
4501 if (size == 3) {
4502 neon_load_reg64(cpu_V0, rm + pass);
4503 tcg_gen_movi_i64(cpu_V1, imm);
4504 switch (op) {
4505 case 0: /* VSHR */
4506 case 1: /* VSRA */
4507 if (u)
4508 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4509 else
4510 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4511 break;
4512 case 2: /* VRSHR */
4513 case 3: /* VRSRA */
4514 if (u)
4515 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4516 else
4517 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4518 break;
4519 case 4: /* VSRI */
4520 if (!u)
4521 return 1;
4522 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4523 break;
4524 case 5: /* VSHL, VSLI */
4525 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4526 break;
4527 case 6: /* VQSHL */
4528 if (u)
4529 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4530 else
4531 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4532 break;
4533 case 7: /* VQSHLU */
4534 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4535 break;
4537 if (op == 1 || op == 3) {
4538 /* Accumulate. */
4539 neon_load_reg64(cpu_V0, rd + pass);
4540 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4541 } else if (op == 4 || (op == 5 && u)) {
4542 /* Insert */
4543 cpu_abort(env, "VS[LR]I.64 not implemented");
4545 neon_store_reg64(cpu_V0, rd + pass);
4546 } else { /* size < 3 */
4547 /* Operands in T0 and T1. */
4548 gen_op_movl_T1_im(imm);
4549 NEON_GET_REG(T0, rm, pass);
4550 switch (op) {
4551 case 0: /* VSHR */
4552 case 1: /* VSRA */
4553 GEN_NEON_INTEGER_OP(shl);
4554 break;
4555 case 2: /* VRSHR */
4556 case 3: /* VRSRA */
4557 GEN_NEON_INTEGER_OP(rshl);
4558 break;
4559 case 4: /* VSRI */
4560 if (!u)
4561 return 1;
4562 GEN_NEON_INTEGER_OP(shl);
4563 break;
4564 case 5: /* VSHL, VSLI */
4565 switch (size) {
4566 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4567 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4568 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4569 default: return 1;
4571 break;
4572 case 6: /* VQSHL */
4573 GEN_NEON_INTEGER_OP_ENV(qshl);
4574 break;
4575 case 7: /* VQSHLU */
4576 switch (size) {
4577 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4578 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4579 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4580 default: return 1;
4582 break;
4585 if (op == 1 || op == 3) {
4586 /* Accumulate. */
4587 NEON_GET_REG(T1, rd, pass);
4588 gen_neon_add(size);
4589 } else if (op == 4 || (op == 5 && u)) {
4590 /* Insert */
4591 switch (size) {
4592 case 0:
4593 if (op == 4)
4594 imm = 0xff >> -shift;
4595 else
4596 imm = (uint8_t)(0xff << shift);
4597 imm |= imm << 8;
4598 imm |= imm << 16;
4599 break;
4600 case 1:
4601 if (op == 4)
4602 imm = 0xffff >> -shift;
4603 else
4604 imm = (uint16_t)(0xffff << shift);
4605 imm |= imm << 16;
4606 break;
4607 case 2:
4608 if (op == 4)
4609 imm = 0xffffffffu >> -shift;
4610 else
4611 imm = 0xffffffffu << shift;
4612 break;
4613 default:
4614 abort();
4616 tmp = neon_load_reg(rd, pass);
4617 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4618 tcg_gen_andi_i32(tmp, tmp, ~imm);
4619 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4621 NEON_SET_REG(T0, rd, pass);
4623 } /* for pass */
4624 } else if (op < 10) {
4625 /* Shift by immediate and narrow:
4626 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4627 shift = shift - (1 << (size + 3));
4628 size++;
4629 switch (size) {
4630 case 1:
4631 imm = (uint16_t)shift;
4632 imm |= imm << 16;
4633 tmp2 = tcg_const_i32(imm);
4634 TCGV_UNUSED_I64(tmp64);
4635 break;
4636 case 2:
4637 imm = (uint32_t)shift;
4638 tmp2 = tcg_const_i32(imm);
4639 TCGV_UNUSED_I64(tmp64);
4640 break;
4641 case 3:
4642 tmp64 = tcg_const_i64(shift);
4643 TCGV_UNUSED(tmp2);
4644 break;
4645 default:
4646 abort();
4649 for (pass = 0; pass < 2; pass++) {
4650 if (size == 3) {
4651 neon_load_reg64(cpu_V0, rm + pass);
4652 if (q) {
4653 if (u)
4654 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4655 else
4656 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4657 } else {
4658 if (u)
4659 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4660 else
4661 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4663 } else {
4664 tmp = neon_load_reg(rm + pass, 0);
4665 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4666 tmp3 = neon_load_reg(rm + pass, 1);
4667 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4668 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4669 dead_tmp(tmp);
4670 dead_tmp(tmp3);
4672 tmp = new_tmp();
4673 if (op == 8 && !u) {
4674 gen_neon_narrow(size - 1, tmp, cpu_V0);
4675 } else {
4676 if (op == 8)
4677 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4678 else
4679 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4681 if (pass == 0) {
4682 tmp2 = tmp;
4683 } else {
4684 neon_store_reg(rd, 0, tmp2);
4685 neon_store_reg(rd, 1, tmp);
4687 } /* for pass */
4688 } else if (op == 10) {
4689 /* VSHLL */
4690 if (q || size == 3)
4691 return 1;
4692 tmp = neon_load_reg(rm, 0);
4693 tmp2 = neon_load_reg(rm, 1);
4694 for (pass = 0; pass < 2; pass++) {
4695 if (pass == 1)
4696 tmp = tmp2;
4698 gen_neon_widen(cpu_V0, tmp, size, u);
4700 if (shift != 0) {
4701 /* The shift is less than the width of the source
4702 type, so we can just shift the whole register. */
4703 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4704 if (size < 2 || !u) {
4705 uint64_t imm64;
4706 if (size == 0) {
4707 imm = (0xffu >> (8 - shift));
4708 imm |= imm << 16;
4709 } else {
4710 imm = 0xffff >> (16 - shift);
4712 imm64 = imm | (((uint64_t)imm) << 32);
4713 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
4716 neon_store_reg64(cpu_V0, rd + pass);
4718 } else if (op == 15 || op == 16) {
4719 /* VCVT fixed-point. */
4720 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4721 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4722 if (op & 1) {
4723 if (u)
4724 gen_vfp_ulto(0, shift);
4725 else
4726 gen_vfp_slto(0, shift);
4727 } else {
4728 if (u)
4729 gen_vfp_toul(0, shift);
4730 else
4731 gen_vfp_tosl(0, shift);
4733 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4735 } else {
4736 return 1;
4738 } else { /* (insn & 0x00380080) == 0 */
4739 int invert;
4741 op = (insn >> 8) & 0xf;
4742 /* One register and immediate. */
4743 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4744 invert = (insn & (1 << 5)) != 0;
4745 switch (op) {
4746 case 0: case 1:
4747 /* no-op */
4748 break;
4749 case 2: case 3:
4750 imm <<= 8;
4751 break;
4752 case 4: case 5:
4753 imm <<= 16;
4754 break;
4755 case 6: case 7:
4756 imm <<= 24;
4757 break;
4758 case 8: case 9:
4759 imm |= imm << 16;
4760 break;
4761 case 10: case 11:
4762 imm = (imm << 8) | (imm << 24);
4763 break;
4764 case 12:
4765 imm = (imm < 8) | 0xff;
4766 break;
4767 case 13:
4768 imm = (imm << 16) | 0xffff;
4769 break;
4770 case 14:
4771 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4772 if (invert)
4773 imm = ~imm;
4774 break;
4775 case 15:
4776 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4777 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4778 break;
4780 if (invert)
4781 imm = ~imm;
4783 if (op != 14 || !invert)
4784 gen_op_movl_T1_im(imm);
4786 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4787 if (op & 1 && op < 12) {
4788 tmp = neon_load_reg(rd, pass);
4789 if (invert) {
4790 /* The immediate value has already been inverted, so
4791 BIC becomes AND. */
4792 tcg_gen_andi_i32(tmp, tmp, imm);
4793 } else {
4794 tcg_gen_ori_i32(tmp, tmp, imm);
4796 } else {
4797 /* VMOV, VMVN. */
4798 tmp = new_tmp();
4799 if (op == 14 && invert) {
4800 uint32_t val;
4801 val = 0;
4802 for (n = 0; n < 4; n++) {
4803 if (imm & (1 << (n + (pass & 1) * 4)))
4804 val |= 0xff << (n * 8);
4806 tcg_gen_movi_i32(tmp, val);
4807 } else {
4808 tcg_gen_movi_i32(tmp, imm);
4811 neon_store_reg(rd, pass, tmp);
4814 } else { /* (insn & 0x00800010 == 0x00800000) */
4815 if (size != 3) {
4816 op = (insn >> 8) & 0xf;
4817 if ((insn & (1 << 6)) == 0) {
4818 /* Three registers of different lengths. */
4819 int src1_wide;
4820 int src2_wide;
4821 int prewiden;
4822 /* prewiden, src1_wide, src2_wide */
4823 static const int neon_3reg_wide[16][3] = {
4824 {1, 0, 0}, /* VADDL */
4825 {1, 1, 0}, /* VADDW */
4826 {1, 0, 0}, /* VSUBL */
4827 {1, 1, 0}, /* VSUBW */
4828 {0, 1, 1}, /* VADDHN */
4829 {0, 0, 0}, /* VABAL */
4830 {0, 1, 1}, /* VSUBHN */
4831 {0, 0, 0}, /* VABDL */
4832 {0, 0, 0}, /* VMLAL */
4833 {0, 0, 0}, /* VQDMLAL */
4834 {0, 0, 0}, /* VMLSL */
4835 {0, 0, 0}, /* VQDMLSL */
4836 {0, 0, 0}, /* Integer VMULL */
4837 {0, 0, 0}, /* VQDMULL */
4838 {0, 0, 0} /* Polynomial VMULL */
4841 prewiden = neon_3reg_wide[op][0];
4842 src1_wide = neon_3reg_wide[op][1];
4843 src2_wide = neon_3reg_wide[op][2];
4845 if (size == 0 && (op == 9 || op == 11 || op == 13))
4846 return 1;
4848 /* Avoid overlapping operands. Wide source operands are
4849 always aligned so will never overlap with wide
4850 destinations in problematic ways. */
4851 if (rd == rm && !src2_wide) {
4852 NEON_GET_REG(T0, rm, 1);
4853 gen_neon_movl_scratch_T0(2);
4854 } else if (rd == rn && !src1_wide) {
4855 NEON_GET_REG(T0, rn, 1);
4856 gen_neon_movl_scratch_T0(2);
4858 TCGV_UNUSED(tmp3);
4859 for (pass = 0; pass < 2; pass++) {
4860 if (src1_wide) {
4861 neon_load_reg64(cpu_V0, rn + pass);
4862 TCGV_UNUSED(tmp);
4863 } else {
4864 if (pass == 1 && rd == rn) {
4865 gen_neon_movl_T0_scratch(2);
4866 tmp = new_tmp();
4867 tcg_gen_mov_i32(tmp, cpu_T[0]);
4868 } else {
4869 tmp = neon_load_reg(rn, pass);
4871 if (prewiden) {
4872 gen_neon_widen(cpu_V0, tmp, size, u);
4875 if (src2_wide) {
4876 neon_load_reg64(cpu_V1, rm + pass);
4877 TCGV_UNUSED(tmp2);
4878 } else {
4879 if (pass == 1 && rd == rm) {
4880 gen_neon_movl_T0_scratch(2);
4881 tmp2 = new_tmp();
4882 tcg_gen_mov_i32(tmp2, cpu_T[0]);
4883 } else {
4884 tmp2 = neon_load_reg(rm, pass);
4886 if (prewiden) {
4887 gen_neon_widen(cpu_V1, tmp2, size, u);
4890 switch (op) {
4891 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4892 gen_neon_addl(size);
4893 break;
4894 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4895 gen_neon_subl(size);
4896 break;
4897 case 5: case 7: /* VABAL, VABDL */
4898 switch ((size << 1) | u) {
4899 case 0:
4900 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4901 break;
4902 case 1:
4903 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4904 break;
4905 case 2:
4906 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4907 break;
4908 case 3:
4909 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4910 break;
4911 case 4:
4912 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4913 break;
4914 case 5:
4915 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4916 break;
4917 default: abort();
4919 dead_tmp(tmp2);
4920 dead_tmp(tmp);
4921 break;
4922 case 8: case 9: case 10: case 11: case 12: case 13:
4923 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4924 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
4925 break;
4926 case 14: /* Polynomial VMULL */
4927 cpu_abort(env, "Polynomial VMULL not implemented");
4929 default: /* 15 is RESERVED. */
4930 return 1;
4932 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4933 /* Accumulate. */
4934 if (op == 10 || op == 11) {
4935 gen_neon_negl(cpu_V0, size);
4938 if (op != 13) {
4939 neon_load_reg64(cpu_V1, rd + pass);
4942 switch (op) {
4943 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4944 gen_neon_addl(size);
4945 break;
4946 case 9: case 11: /* VQDMLAL, VQDMLSL */
4947 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4948 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4949 break;
4950 /* Fall through. */
4951 case 13: /* VQDMULL */
4952 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4953 break;
4954 default:
4955 abort();
4957 neon_store_reg64(cpu_V0, rd + pass);
4958 } else if (op == 4 || op == 6) {
4959 /* Narrowing operation. */
4960 tmp = new_tmp();
4961 if (u) {
4962 switch (size) {
4963 case 0:
4964 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4965 break;
4966 case 1:
4967 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4968 break;
4969 case 2:
4970 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4971 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4972 break;
4973 default: abort();
4975 } else {
4976 switch (size) {
4977 case 0:
4978 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4979 break;
4980 case 1:
4981 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4982 break;
4983 case 2:
4984 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4985 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4986 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4987 break;
4988 default: abort();
4991 if (pass == 0) {
4992 tmp3 = tmp;
4993 } else {
4994 neon_store_reg(rd, 0, tmp3);
4995 neon_store_reg(rd, 1, tmp);
4997 } else {
4998 /* Write back the result. */
4999 neon_store_reg64(cpu_V0, rd + pass);
5002 } else {
5003 /* Two registers and a scalar. */
5004 switch (op) {
5005 case 0: /* Integer VMLA scalar */
5006 case 1: /* Float VMLA scalar */
5007 case 4: /* Integer VMLS scalar */
5008 case 5: /* Floating point VMLS scalar */
5009 case 8: /* Integer VMUL scalar */
5010 case 9: /* Floating point VMUL scalar */
5011 case 12: /* VQDMULH scalar */
5012 case 13: /* VQRDMULH scalar */
5013 gen_neon_get_scalar(size, rm);
5014 gen_neon_movl_scratch_T0(0);
5015 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5016 if (pass != 0)
5017 gen_neon_movl_T0_scratch(0);
5018 NEON_GET_REG(T1, rn, pass);
5019 if (op == 12) {
5020 if (size == 1) {
5021 gen_helper_neon_qdmulh_s16(CPU_T0E01);
5022 } else {
5023 gen_helper_neon_qdmulh_s32(CPU_T0E01);
5025 } else if (op == 13) {
5026 if (size == 1) {
5027 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
5028 } else {
5029 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
5031 } else if (op & 1) {
5032 gen_helper_neon_mul_f32(CPU_T001);
5033 } else {
5034 switch (size) {
5035 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5036 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
5037 case 2: gen_op_mul_T0_T1(); break;
5038 default: return 1;
5041 if (op < 8) {
5042 /* Accumulate. */
5043 NEON_GET_REG(T1, rd, pass);
5044 switch (op) {
5045 case 0:
5046 gen_neon_add(size);
5047 break;
5048 case 1:
5049 gen_helper_neon_add_f32(CPU_T001);
5050 break;
5051 case 4:
5052 gen_neon_rsb(size);
5053 break;
5054 case 5:
5055 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
5056 break;
5057 default:
5058 abort();
5061 NEON_SET_REG(T0, rd, pass);
5063 break;
5064 case 2: /* VMLAL sclar */
5065 case 3: /* VQDMLAL scalar */
5066 case 6: /* VMLSL scalar */
5067 case 7: /* VQDMLSL scalar */
5068 case 10: /* VMULL scalar */
5069 case 11: /* VQDMULL scalar */
5070 if (size == 0 && (op == 3 || op == 7 || op == 11))
5071 return 1;
5073 gen_neon_get_scalar(size, rm);
5074 NEON_GET_REG(T1, rn, 1);
5076 for (pass = 0; pass < 2; pass++) {
5077 if (pass == 0) {
5078 tmp = neon_load_reg(rn, 0);
5079 } else {
5080 tmp = new_tmp();
5081 tcg_gen_mov_i32(tmp, cpu_T[1]);
5083 tmp2 = new_tmp();
5084 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5085 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5086 if (op == 6 || op == 7) {
5087 gen_neon_negl(cpu_V0, size);
5089 if (op != 11) {
5090 neon_load_reg64(cpu_V1, rd + pass);
5092 switch (op) {
5093 case 2: case 6:
5094 gen_neon_addl(size);
5095 break;
5096 case 3: case 7:
5097 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5098 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5099 break;
5100 case 10:
5101 /* no-op */
5102 break;
5103 case 11:
5104 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5105 break;
5106 default:
5107 abort();
5109 neon_store_reg64(cpu_V0, rd + pass);
5111 break;
5112 default: /* 14 and 15 are RESERVED */
5113 return 1;
5116 } else { /* size == 3 */
5117 if (!u) {
5118 /* Extract. */
5119 imm = (insn >> 8) & 0xf;
5120 count = q + 1;
5122 if (imm > 7 && !q)
5123 return 1;
5125 if (imm == 0) {
5126 neon_load_reg64(cpu_V0, rn);
5127 if (q) {
5128 neon_load_reg64(cpu_V1, rn + 1);
5130 } else if (imm == 8) {
5131 neon_load_reg64(cpu_V0, rn + 1);
5132 if (q) {
5133 neon_load_reg64(cpu_V1, rm);
5135 } else if (q) {
5136 tmp64 = tcg_temp_new_i64();
5137 if (imm < 8) {
5138 neon_load_reg64(cpu_V0, rn);
5139 neon_load_reg64(tmp64, rn + 1);
5140 } else {
5141 neon_load_reg64(cpu_V0, rn + 1);
5142 neon_load_reg64(tmp64, rm);
5144 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5145 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5146 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5147 if (imm < 8) {
5148 neon_load_reg64(cpu_V1, rm);
5149 } else {
5150 neon_load_reg64(cpu_V1, rm + 1);
5151 imm -= 8;
5153 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5154 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5155 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5156 } else {
5157 /* BUGFIX */
5158 neon_load_reg64(cpu_V0, rn);
5159 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5160 neon_load_reg64(cpu_V1, rm);
5161 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5162 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5164 neon_store_reg64(cpu_V0, rd);
5165 if (q) {
5166 neon_store_reg64(cpu_V1, rd + 1);
5168 } else if ((insn & (1 << 11)) == 0) {
5169 /* Two register misc. */
5170 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5171 size = (insn >> 18) & 3;
5172 switch (op) {
5173 case 0: /* VREV64 */
5174 if (size == 3)
5175 return 1;
5176 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5177 NEON_GET_REG(T0, rm, pass * 2);
5178 NEON_GET_REG(T1, rm, pass * 2 + 1);
5179 switch (size) {
5180 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5181 case 1: gen_swap_half(cpu_T[0]); break;
5182 case 2: /* no-op */ break;
5183 default: abort();
5185 NEON_SET_REG(T0, rd, pass * 2 + 1);
5186 if (size == 2) {
5187 NEON_SET_REG(T1, rd, pass * 2);
5188 } else {
5189 gen_op_movl_T0_T1();
5190 switch (size) {
5191 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5192 case 1: gen_swap_half(cpu_T[0]); break;
5193 default: abort();
5195 NEON_SET_REG(T0, rd, pass * 2);
5198 break;
5199 case 4: case 5: /* VPADDL */
5200 case 12: case 13: /* VPADAL */
5201 if (size == 3)
5202 return 1;
5203 for (pass = 0; pass < q + 1; pass++) {
5204 tmp = neon_load_reg(rm, pass * 2);
5205 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5206 tmp = neon_load_reg(rm, pass * 2 + 1);
5207 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5208 switch (size) {
5209 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5210 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5211 case 2: tcg_gen_add_i64(CPU_V001); break;
5212 default: abort();
5214 if (op >= 12) {
5215 /* Accumulate. */
5216 neon_load_reg64(cpu_V1, rd + pass);
5217 gen_neon_addl(size);
5219 neon_store_reg64(cpu_V0, rd + pass);
5221 break;
5222 case 33: /* VTRN */
5223 if (size == 2) {
5224 for (n = 0; n < (q ? 4 : 2); n += 2) {
5225 NEON_GET_REG(T0, rm, n);
5226 NEON_GET_REG(T1, rd, n + 1);
5227 NEON_SET_REG(T1, rm, n);
5228 NEON_SET_REG(T0, rd, n + 1);
5230 } else {
5231 goto elementwise;
5233 break;
5234 case 34: /* VUZP */
5235 /* Reg Before After
5236 Rd A3 A2 A1 A0 B2 B0 A2 A0
5237 Rm B3 B2 B1 B0 B3 B1 A3 A1
5239 if (size == 3)
5240 return 1;
5241 gen_neon_unzip(rd, q, 0, size);
5242 gen_neon_unzip(rm, q, 4, size);
5243 if (q) {
5244 static int unzip_order_q[8] =
5245 {0, 2, 4, 6, 1, 3, 5, 7};
5246 for (n = 0; n < 8; n++) {
5247 int reg = (n < 4) ? rd : rm;
5248 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5249 NEON_SET_REG(T0, reg, n % 4);
5251 } else {
5252 static int unzip_order[4] =
5253 {0, 4, 1, 5};
5254 for (n = 0; n < 4; n++) {
5255 int reg = (n < 2) ? rd : rm;
5256 gen_neon_movl_T0_scratch(unzip_order[n]);
5257 NEON_SET_REG(T0, reg, n % 2);
5260 break;
5261 case 35: /* VZIP */
5262 /* Reg Before After
5263 Rd A3 A2 A1 A0 B1 A1 B0 A0
5264 Rm B3 B2 B1 B0 B3 A3 B2 A2
5266 if (size == 3)
5267 return 1;
5268 count = (q ? 4 : 2);
5269 for (n = 0; n < count; n++) {
5270 NEON_GET_REG(T0, rd, n);
5271 NEON_GET_REG(T1, rd, n);
5272 switch (size) {
5273 case 0: gen_helper_neon_zip_u8(); break;
5274 case 1: gen_helper_neon_zip_u16(); break;
5275 case 2: /* no-op */; break;
5276 default: abort();
5278 gen_neon_movl_scratch_T0(n * 2);
5279 gen_neon_movl_scratch_T1(n * 2 + 1);
5281 for (n = 0; n < count * 2; n++) {
5282 int reg = (n < count) ? rd : rm;
5283 gen_neon_movl_T0_scratch(n);
5284 NEON_SET_REG(T0, reg, n % count);
5286 break;
5287 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5288 if (size == 3)
5289 return 1;
5290 TCGV_UNUSED(tmp2);
5291 for (pass = 0; pass < 2; pass++) {
5292 neon_load_reg64(cpu_V0, rm + pass);
5293 tmp = new_tmp();
5294 if (op == 36 && q == 0) {
5295 gen_neon_narrow(size, tmp, cpu_V0);
5296 } else if (q) {
5297 gen_neon_narrow_satu(size, tmp, cpu_V0);
5298 } else {
5299 gen_neon_narrow_sats(size, tmp, cpu_V0);
5301 if (pass == 0) {
5302 tmp2 = tmp;
5303 } else {
5304 neon_store_reg(rd, 0, tmp2);
5305 neon_store_reg(rd, 1, tmp);
5308 break;
5309 case 38: /* VSHLL */
5310 if (q || size == 3)
5311 return 1;
5312 tmp = neon_load_reg(rm, 0);
5313 tmp2 = neon_load_reg(rm, 1);
5314 for (pass = 0; pass < 2; pass++) {
5315 if (pass == 1)
5316 tmp = tmp2;
5317 gen_neon_widen(cpu_V0, tmp, size, 1);
5318 neon_store_reg64(cpu_V0, rd + pass);
5320 break;
5321 default:
5322 elementwise:
5323 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5324 if (op == 30 || op == 31 || op >= 58) {
5325 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5326 neon_reg_offset(rm, pass));
5327 } else {
5328 NEON_GET_REG(T0, rm, pass);
5330 switch (op) {
5331 case 1: /* VREV32 */
5332 switch (size) {
5333 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5334 case 1: gen_swap_half(cpu_T[0]); break;
5335 default: return 1;
5337 break;
5338 case 2: /* VREV16 */
5339 if (size != 0)
5340 return 1;
5341 gen_rev16(cpu_T[0]);
5342 break;
5343 case 8: /* CLS */
5344 switch (size) {
5345 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5346 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5347 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
5348 default: return 1;
5350 break;
5351 case 9: /* CLZ */
5352 switch (size) {
5353 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5354 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
5355 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
5356 default: return 1;
5358 break;
5359 case 10: /* CNT */
5360 if (size != 0)
5361 return 1;
5362 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
5363 break;
5364 case 11: /* VNOT */
5365 if (size != 0)
5366 return 1;
5367 gen_op_notl_T0();
5368 break;
5369 case 14: /* VQABS */
5370 switch (size) {
5371 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5372 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5373 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5374 default: return 1;
5376 break;
5377 case 15: /* VQNEG */
5378 switch (size) {
5379 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5380 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5381 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5382 default: return 1;
5384 break;
5385 case 16: case 19: /* VCGT #0, VCLE #0 */
5386 gen_op_movl_T1_im(0);
5387 switch(size) {
5388 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5389 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5390 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
5391 default: return 1;
5393 if (op == 19)
5394 gen_op_notl_T0();
5395 break;
5396 case 17: case 20: /* VCGE #0, VCLT #0 */
5397 gen_op_movl_T1_im(0);
5398 switch(size) {
5399 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5400 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5401 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
5402 default: return 1;
5404 if (op == 20)
5405 gen_op_notl_T0();
5406 break;
5407 case 18: /* VCEQ #0 */
5408 gen_op_movl_T1_im(0);
5409 switch(size) {
5410 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5411 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5412 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
5413 default: return 1;
5415 break;
5416 case 22: /* VABS */
5417 switch(size) {
5418 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5419 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5420 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
5421 default: return 1;
5423 break;
5424 case 23: /* VNEG */
5425 gen_op_movl_T1_im(0);
5426 if (size == 3)
5427 return 1;
5428 gen_neon_rsb(size);
5429 break;
5430 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5431 gen_op_movl_T1_im(0);
5432 gen_helper_neon_cgt_f32(CPU_T001);
5433 if (op == 27)
5434 gen_op_notl_T0();
5435 break;
5436 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5437 gen_op_movl_T1_im(0);
5438 gen_helper_neon_cge_f32(CPU_T001);
5439 if (op == 28)
5440 gen_op_notl_T0();
5441 break;
5442 case 26: /* Float VCEQ #0 */
5443 gen_op_movl_T1_im(0);
5444 gen_helper_neon_ceq_f32(CPU_T001);
5445 break;
5446 case 30: /* Float VABS */
5447 gen_vfp_abs(0);
5448 break;
5449 case 31: /* Float VNEG */
5450 gen_vfp_neg(0);
5451 break;
5452 case 32: /* VSWP */
5453 NEON_GET_REG(T1, rd, pass);
5454 NEON_SET_REG(T1, rm, pass);
5455 break;
5456 case 33: /* VTRN */
5457 NEON_GET_REG(T1, rd, pass);
5458 switch (size) {
5459 case 0: gen_helper_neon_trn_u8(); break;
5460 case 1: gen_helper_neon_trn_u16(); break;
5461 case 2: abort();
5462 default: return 1;
5464 NEON_SET_REG(T1, rm, pass);
5465 break;
5466 case 56: /* Integer VRECPE */
5467 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
5468 break;
5469 case 57: /* Integer VRSQRTE */
5470 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
5471 break;
5472 case 58: /* Float VRECPE */
5473 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5474 break;
5475 case 59: /* Float VRSQRTE */
5476 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5477 break;
5478 case 60: /* VCVT.F32.S32 */
5479 gen_vfp_tosiz(0);
5480 break;
5481 case 61: /* VCVT.F32.U32 */
5482 gen_vfp_touiz(0);
5483 break;
5484 case 62: /* VCVT.S32.F32 */
5485 gen_vfp_sito(0);
5486 break;
5487 case 63: /* VCVT.U32.F32 */
5488 gen_vfp_uito(0);
5489 break;
5490 default:
5491 /* Reserved: 21, 29, 39-56 */
5492 return 1;
5494 if (op == 30 || op == 31 || op >= 58) {
5495 tcg_gen_st_f32(cpu_F0s, cpu_env,
5496 neon_reg_offset(rd, pass));
5497 } else {
5498 NEON_SET_REG(T0, rd, pass);
5501 break;
5503 } else if ((insn & (1 << 10)) == 0) {
5504 /* VTBL, VTBX. */
5505 n = ((insn >> 5) & 0x18) + 8;
5506 if (insn & (1 << 6)) {
5507 tmp = neon_load_reg(rd, 0);
5508 } else {
5509 tmp = new_tmp();
5510 tcg_gen_movi_i32(tmp, 0);
5512 tmp2 = neon_load_reg(rm, 0);
5513 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5514 tcg_const_i32(n));
5515 dead_tmp(tmp);
5516 if (insn & (1 << 6)) {
5517 tmp = neon_load_reg(rd, 1);
5518 } else {
5519 tmp = new_tmp();
5520 tcg_gen_movi_i32(tmp, 0);
5522 tmp3 = neon_load_reg(rm, 1);
5523 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5524 tcg_const_i32(n));
5525 neon_store_reg(rd, 0, tmp2);
5526 neon_store_reg(rd, 1, tmp3);
5527 dead_tmp(tmp);
5528 } else if ((insn & 0x380) == 0) {
5529 /* VDUP */
5530 if (insn & (1 << 19)) {
5531 NEON_SET_REG(T0, rm, 1);
5532 } else {
5533 NEON_SET_REG(T0, rm, 0);
5535 if (insn & (1 << 16)) {
5536 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
5537 } else if (insn & (1 << 17)) {
5538 if ((insn >> 18) & 1)
5539 gen_neon_dup_high16(cpu_T[0]);
5540 else
5541 gen_neon_dup_low16(cpu_T[0]);
5543 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5544 NEON_SET_REG(T0, rd, pass);
5546 } else {
5547 return 1;
5551 return 0;
5554 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5556 int crn = (insn >> 16) & 0xf;
5557 int crm = insn & 0xf;
5558 int op1 = (insn >> 21) & 7;
5559 int op2 = (insn >> 5) & 7;
5560 int rt = (insn >> 12) & 0xf;
5561 TCGv tmp;
5563 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5564 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5565 /* TEECR */
5566 if (IS_USER(s))
5567 return 1;
5568 tmp = load_cpu_field(teecr);
5569 store_reg(s, rt, tmp);
5570 return 0;
5572 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5573 /* TEEHBR */
5574 if (IS_USER(s) && (env->teecr & 1))
5575 return 1;
5576 tmp = load_cpu_field(teehbr);
5577 store_reg(s, rt, tmp);
5578 return 0;
5581 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5582 op1, crn, crm, op2);
5583 return 1;
5586 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5588 int crn = (insn >> 16) & 0xf;
5589 int crm = insn & 0xf;
5590 int op1 = (insn >> 21) & 7;
5591 int op2 = (insn >> 5) & 7;
5592 int rt = (insn >> 12) & 0xf;
5593 TCGv tmp;
5595 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5596 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5597 /* TEECR */
5598 if (IS_USER(s))
5599 return 1;
5600 tmp = load_reg(s, rt);
5601 gen_helper_set_teecr(cpu_env, tmp);
5602 dead_tmp(tmp);
5603 return 0;
5605 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5606 /* TEEHBR */
5607 if (IS_USER(s) && (env->teecr & 1))
5608 return 1;
5609 tmp = load_reg(s, rt);
5610 store_cpu_field(tmp, teehbr);
5611 return 0;
5614 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5615 op1, crn, crm, op2);
5616 return 1;
5619 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5621 int cpnum;
5623 cpnum = (insn >> 8) & 0xf;
5624 if (arm_feature(env, ARM_FEATURE_XSCALE)
5625 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5626 return 1;
5628 switch (cpnum) {
5629 case 0:
5630 case 1:
5631 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5632 return disas_iwmmxt_insn(env, s, insn);
5633 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5634 return disas_dsp_insn(env, s, insn);
5636 return 1;
5637 case 10:
5638 case 11:
5639 return disas_vfp_insn (env, s, insn);
5640 case 14:
5641 /* Coprocessors 7-15 are architecturally reserved by ARM.
5642 Unfortunately Intel decided to ignore this. */
5643 if (arm_feature(env, ARM_FEATURE_XSCALE))
5644 goto board;
5645 if (insn & (1 << 20))
5646 return disas_cp14_read(env, s, insn);
5647 else
5648 return disas_cp14_write(env, s, insn);
5649 case 15:
5650 return disas_cp15_insn (env, s, insn);
5651 default:
5652 board:
5653 /* Unknown coprocessor. See if the board has hooked it. */
5654 return disas_cp_insn (env, s, insn);
5659 /* Store a 64-bit value to a register pair. Clobbers val. */
5660 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5662 TCGv tmp;
5663 tmp = new_tmp();
5664 tcg_gen_trunc_i64_i32(tmp, val);
5665 store_reg(s, rlow, tmp);
5666 tmp = new_tmp();
5667 tcg_gen_shri_i64(val, val, 32);
5668 tcg_gen_trunc_i64_i32(tmp, val);
5669 store_reg(s, rhigh, tmp);
5672 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5673 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5675 TCGv_i64 tmp;
5676 TCGv tmp2;
5678 /* Load value and extend to 64 bits. */
5679 tmp = tcg_temp_new_i64();
5680 tmp2 = load_reg(s, rlow);
5681 tcg_gen_extu_i32_i64(tmp, tmp2);
5682 dead_tmp(tmp2);
5683 tcg_gen_add_i64(val, val, tmp);
5686 /* load and add a 64-bit value from a register pair. */
5687 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5689 TCGv_i64 tmp;
5690 TCGv tmpl;
5691 TCGv tmph;
5693 /* Load 64-bit value rd:rn. */
5694 tmpl = load_reg(s, rlow);
5695 tmph = load_reg(s, rhigh);
5696 tmp = tcg_temp_new_i64();
5697 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5698 dead_tmp(tmpl);
5699 dead_tmp(tmph);
5700 tcg_gen_add_i64(val, val, tmp);
5703 /* Set N and Z flags from a 64-bit value. */
5704 static void gen_logicq_cc(TCGv_i64 val)
5706 TCGv tmp = new_tmp();
5707 gen_helper_logicq_cc(tmp, val);
5708 gen_logic_CC(tmp);
5709 dead_tmp(tmp);
5712 static void disas_arm_insn(CPUState * env, DisasContext *s)
5714 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
5715 TCGv tmp;
5716 TCGv tmp2;
5717 TCGv tmp3;
5718 TCGv addr;
5719 TCGv_i64 tmp64;
5721 insn = ldl_code(s->pc);
5722 s->pc += 4;
5724 /* M variants do not implement ARM mode. */
5725 if (IS_M(env))
5726 goto illegal_op;
5727 cond = insn >> 28;
5728 if (cond == 0xf){
5729 /* Unconditional instructions. */
5730 if (((insn >> 25) & 7) == 1) {
5731 /* NEON Data processing. */
5732 if (!arm_feature(env, ARM_FEATURE_NEON))
5733 goto illegal_op;
5735 if (disas_neon_data_insn(env, s, insn))
5736 goto illegal_op;
5737 return;
5739 if ((insn & 0x0f100000) == 0x04000000) {
5740 /* NEON load/store. */
5741 if (!arm_feature(env, ARM_FEATURE_NEON))
5742 goto illegal_op;
5744 if (disas_neon_ls_insn(env, s, insn))
5745 goto illegal_op;
5746 return;
5748 if ((insn & 0x0d70f000) == 0x0550f000)
5749 return; /* PLD */
5750 else if ((insn & 0x0ffffdff) == 0x01010000) {
5751 ARCH(6);
5752 /* setend */
5753 if (insn & (1 << 9)) {
5754 /* BE8 mode not implemented. */
5755 goto illegal_op;
5757 return;
5758 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5759 switch ((insn >> 4) & 0xf) {
5760 case 1: /* clrex */
5761 ARCH(6K);
5762 gen_helper_clrex(cpu_env);
5763 return;
5764 case 4: /* dsb */
5765 case 5: /* dmb */
5766 case 6: /* isb */
5767 ARCH(7);
5768 /* We don't emulate caches so these are a no-op. */
5769 return;
5770 default:
5771 goto illegal_op;
5773 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5774 /* srs */
5775 uint32_t offset;
5776 if (IS_USER(s))
5777 goto illegal_op;
5778 ARCH(6);
5779 op1 = (insn & 0x1f);
5780 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5781 addr = load_reg(s, 13);
5782 } else {
5783 addr = new_tmp();
5784 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
5786 i = (insn >> 23) & 3;
5787 switch (i) {
5788 case 0: offset = -4; break; /* DA */
5789 case 1: offset = -8; break; /* DB */
5790 case 2: offset = 0; break; /* IA */
5791 case 3: offset = 4; break; /* IB */
5792 default: abort();
5794 if (offset)
5795 tcg_gen_addi_i32(addr, addr, offset);
5796 tmp = load_reg(s, 14);
5797 gen_st32(tmp, addr, 0);
5798 tmp = new_tmp();
5799 gen_helper_cpsr_read(tmp);
5800 tcg_gen_addi_i32(addr, addr, 4);
5801 gen_st32(tmp, addr, 0);
5802 if (insn & (1 << 21)) {
5803 /* Base writeback. */
5804 switch (i) {
5805 case 0: offset = -8; break;
5806 case 1: offset = -4; break;
5807 case 2: offset = 4; break;
5808 case 3: offset = 0; break;
5809 default: abort();
5811 if (offset)
5812 tcg_gen_addi_i32(addr, tmp, offset);
5813 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5814 gen_movl_reg_T1(s, 13);
5815 } else {
5816 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
5818 } else {
5819 dead_tmp(addr);
5821 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5822 /* rfe */
5823 uint32_t offset;
5824 if (IS_USER(s))
5825 goto illegal_op;
5826 ARCH(6);
5827 rn = (insn >> 16) & 0xf;
5828 addr = load_reg(s, rn);
5829 i = (insn >> 23) & 3;
5830 switch (i) {
5831 case 0: offset = -4; break; /* DA */
5832 case 1: offset = -8; break; /* DB */
5833 case 2: offset = 0; break; /* IA */
5834 case 3: offset = 4; break; /* IB */
5835 default: abort();
5837 if (offset)
5838 tcg_gen_addi_i32(addr, addr, offset);
5839 /* Load PC into tmp and CPSR into tmp2. */
5840 tmp = gen_ld32(addr, 0);
5841 tcg_gen_addi_i32(addr, addr, 4);
5842 tmp2 = gen_ld32(addr, 0);
5843 if (insn & (1 << 21)) {
5844 /* Base writeback. */
5845 switch (i) {
5846 case 0: offset = -8; break;
5847 case 1: offset = -4; break;
5848 case 2: offset = 4; break;
5849 case 3: offset = 0; break;
5850 default: abort();
5852 if (offset)
5853 tcg_gen_addi_i32(addr, addr, offset);
5854 store_reg(s, rn, addr);
5855 } else {
5856 dead_tmp(addr);
5858 gen_rfe(s, tmp, tmp2);
5859 } else if ((insn & 0x0e000000) == 0x0a000000) {
5860 /* branch link and change to thumb (blx <offset>) */
5861 int32_t offset;
5863 val = (uint32_t)s->pc;
5864 tmp = new_tmp();
5865 tcg_gen_movi_i32(tmp, val);
5866 store_reg(s, 14, tmp);
5867 /* Sign-extend the 24-bit offset */
5868 offset = (((int32_t)insn) << 8) >> 8;
5869 /* offset * 4 + bit24 * 2 + (thumb bit) */
5870 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5871 /* pipeline offset */
5872 val += 4;
5873 gen_bx_im(s, val);
5874 return;
5875 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5876 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5877 /* iWMMXt register transfer. */
5878 if (env->cp15.c15_cpar & (1 << 1))
5879 if (!disas_iwmmxt_insn(env, s, insn))
5880 return;
5882 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5883 /* Coprocessor double register transfer. */
5884 } else if ((insn & 0x0f000010) == 0x0e000010) {
5885 /* Additional coprocessor register transfer. */
5886 } else if ((insn & 0x0ff10020) == 0x01000000) {
5887 uint32_t mask;
5888 uint32_t val;
5889 /* cps (privileged) */
5890 if (IS_USER(s))
5891 return;
5892 mask = val = 0;
5893 if (insn & (1 << 19)) {
5894 if (insn & (1 << 8))
5895 mask |= CPSR_A;
5896 if (insn & (1 << 7))
5897 mask |= CPSR_I;
5898 if (insn & (1 << 6))
5899 mask |= CPSR_F;
5900 if (insn & (1 << 18))
5901 val |= mask;
5903 if (insn & (1 << 17)) {
5904 mask |= CPSR_M;
5905 val |= (insn & 0x1f);
5907 if (mask) {
5908 gen_op_movl_T0_im(val);
5909 gen_set_psr_T0(s, mask, 0);
5911 return;
5913 goto illegal_op;
5915 if (cond != 0xe) {
5916 /* if not always execute, we generate a conditional jump to
5917 next instruction */
5918 s->condlabel = gen_new_label();
5919 gen_test_cc(cond ^ 1, s->condlabel);
5920 s->condjmp = 1;
5922 if ((insn & 0x0f900000) == 0x03000000) {
5923 if ((insn & (1 << 21)) == 0) {
5924 ARCH(6T2);
5925 rd = (insn >> 12) & 0xf;
5926 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5927 if ((insn & (1 << 22)) == 0) {
5928 /* MOVW */
5929 tmp = new_tmp();
5930 tcg_gen_movi_i32(tmp, val);
5931 } else {
5932 /* MOVT */
5933 tmp = load_reg(s, rd);
5934 tcg_gen_ext16u_i32(tmp, tmp);
5935 tcg_gen_ori_i32(tmp, tmp, val << 16);
5937 store_reg(s, rd, tmp);
5938 } else {
5939 if (((insn >> 12) & 0xf) != 0xf)
5940 goto illegal_op;
5941 if (((insn >> 16) & 0xf) == 0) {
5942 gen_nop_hint(s, insn & 0xff);
5943 } else {
5944 /* CPSR = immediate */
5945 val = insn & 0xff;
5946 shift = ((insn >> 8) & 0xf) * 2;
5947 if (shift)
5948 val = (val >> shift) | (val << (32 - shift));
5949 gen_op_movl_T0_im(val);
5950 i = ((insn & (1 << 22)) != 0);
5951 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5952 goto illegal_op;
5955 } else if ((insn & 0x0f900000) == 0x01000000
5956 && (insn & 0x00000090) != 0x00000090) {
5957 /* miscellaneous instructions */
5958 op1 = (insn >> 21) & 3;
5959 sh = (insn >> 4) & 0xf;
5960 rm = insn & 0xf;
5961 switch (sh) {
5962 case 0x0: /* move program status register */
5963 if (op1 & 1) {
5964 /* PSR = reg */
5965 gen_movl_T0_reg(s, rm);
5966 i = ((op1 & 2) != 0);
5967 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5968 goto illegal_op;
5969 } else {
5970 /* reg = PSR */
5971 rd = (insn >> 12) & 0xf;
5972 if (op1 & 2) {
5973 if (IS_USER(s))
5974 goto illegal_op;
5975 tmp = load_cpu_field(spsr);
5976 } else {
5977 tmp = new_tmp();
5978 gen_helper_cpsr_read(tmp);
5980 store_reg(s, rd, tmp);
5982 break;
5983 case 0x1:
5984 if (op1 == 1) {
5985 /* branch/exchange thumb (bx). */
5986 tmp = load_reg(s, rm);
5987 gen_bx(s, tmp);
5988 } else if (op1 == 3) {
5989 /* clz */
5990 rd = (insn >> 12) & 0xf;
5991 tmp = load_reg(s, rm);
5992 gen_helper_clz(tmp, tmp);
5993 store_reg(s, rd, tmp);
5994 } else {
5995 goto illegal_op;
5997 break;
5998 case 0x2:
5999 if (op1 == 1) {
6000 ARCH(5J); /* bxj */
6001 /* Trivial implementation equivalent to bx. */
6002 tmp = load_reg(s, rm);
6003 gen_bx(s, tmp);
6004 } else {
6005 goto illegal_op;
6007 break;
6008 case 0x3:
6009 if (op1 != 1)
6010 goto illegal_op;
6012 /* branch link/exchange thumb (blx) */
6013 tmp = load_reg(s, rm);
6014 tmp2 = new_tmp();
6015 tcg_gen_movi_i32(tmp2, s->pc);
6016 store_reg(s, 14, tmp2);
6017 gen_bx(s, tmp);
6018 break;
6019 case 0x5: /* saturating add/subtract */
6020 rd = (insn >> 12) & 0xf;
6021 rn = (insn >> 16) & 0xf;
6022 tmp = load_reg(s, rm);
6023 tmp2 = load_reg(s, rn);
6024 if (op1 & 2)
6025 gen_helper_double_saturate(tmp2, tmp2);
6026 if (op1 & 1)
6027 gen_helper_sub_saturate(tmp, tmp, tmp2);
6028 else
6029 gen_helper_add_saturate(tmp, tmp, tmp2);
6030 dead_tmp(tmp2);
6031 store_reg(s, rd, tmp);
6032 break;
6033 case 7: /* bkpt */
6034 gen_set_condexec(s);
6035 gen_set_pc_im(s->pc - 4);
6036 gen_exception(EXCP_BKPT);
6037 s->is_jmp = DISAS_JUMP;
6038 break;
6039 case 0x8: /* signed multiply */
6040 case 0xa:
6041 case 0xc:
6042 case 0xe:
6043 rs = (insn >> 8) & 0xf;
6044 rn = (insn >> 12) & 0xf;
6045 rd = (insn >> 16) & 0xf;
6046 if (op1 == 1) {
6047 /* (32 * 16) >> 16 */
6048 tmp = load_reg(s, rm);
6049 tmp2 = load_reg(s, rs);
6050 if (sh & 4)
6051 tcg_gen_sari_i32(tmp2, tmp2, 16);
6052 else
6053 gen_sxth(tmp2);
6054 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6055 tcg_gen_shri_i64(tmp64, tmp64, 16);
6056 tmp = new_tmp();
6057 tcg_gen_trunc_i64_i32(tmp, tmp64);
6058 if ((sh & 2) == 0) {
6059 tmp2 = load_reg(s, rn);
6060 gen_helper_add_setq(tmp, tmp, tmp2);
6061 dead_tmp(tmp2);
6063 store_reg(s, rd, tmp);
6064 } else {
6065 /* 16 * 16 */
6066 tmp = load_reg(s, rm);
6067 tmp2 = load_reg(s, rs);
6068 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6069 dead_tmp(tmp2);
6070 if (op1 == 2) {
6071 tmp64 = tcg_temp_new_i64();
6072 tcg_gen_ext_i32_i64(tmp64, tmp);
6073 dead_tmp(tmp);
6074 gen_addq(s, tmp64, rn, rd);
6075 gen_storeq_reg(s, rn, rd, tmp64);
6076 } else {
6077 if (op1 == 0) {
6078 tmp2 = load_reg(s, rn);
6079 gen_helper_add_setq(tmp, tmp, tmp2);
6080 dead_tmp(tmp2);
6082 store_reg(s, rd, tmp);
6085 break;
6086 default:
6087 goto illegal_op;
6089 } else if (((insn & 0x0e000000) == 0 &&
6090 (insn & 0x00000090) != 0x90) ||
6091 ((insn & 0x0e000000) == (1 << 25))) {
6092 int set_cc, logic_cc, shiftop;
6094 op1 = (insn >> 21) & 0xf;
6095 set_cc = (insn >> 20) & 1;
6096 logic_cc = table_logic_cc[op1] & set_cc;
6098 /* data processing instruction */
6099 if (insn & (1 << 25)) {
6100 /* immediate operand */
6101 val = insn & 0xff;
6102 shift = ((insn >> 8) & 0xf) * 2;
6103 if (shift) {
6104 val = (val >> shift) | (val << (32 - shift));
6106 tmp2 = new_tmp();
6107 tcg_gen_movi_i32(tmp2, val);
6108 if (logic_cc && shift) {
6109 gen_set_CF_bit31(tmp2);
6111 } else {
6112 /* register */
6113 rm = (insn) & 0xf;
6114 tmp2 = load_reg(s, rm);
6115 shiftop = (insn >> 5) & 3;
6116 if (!(insn & (1 << 4))) {
6117 shift = (insn >> 7) & 0x1f;
6118 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6119 } else {
6120 rs = (insn >> 8) & 0xf;
6121 tmp = load_reg(s, rs);
6122 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6125 if (op1 != 0x0f && op1 != 0x0d) {
6126 rn = (insn >> 16) & 0xf;
6127 tmp = load_reg(s, rn);
6128 } else {
6129 TCGV_UNUSED(tmp);
6131 rd = (insn >> 12) & 0xf;
6132 switch(op1) {
6133 case 0x00:
6134 tcg_gen_and_i32(tmp, tmp, tmp2);
6135 if (logic_cc) {
6136 gen_logic_CC(tmp);
6138 store_reg_bx(env, s, rd, tmp);
6139 break;
6140 case 0x01:
6141 tcg_gen_xor_i32(tmp, tmp, tmp2);
6142 if (logic_cc) {
6143 gen_logic_CC(tmp);
6145 store_reg_bx(env, s, rd, tmp);
6146 break;
6147 case 0x02:
6148 if (set_cc && rd == 15) {
6149 /* SUBS r15, ... is used for exception return. */
6150 if (IS_USER(s)) {
6151 goto illegal_op;
6153 gen_helper_sub_cc(tmp, tmp, tmp2);
6154 gen_exception_return(s, tmp);
6155 } else {
6156 if (set_cc) {
6157 gen_helper_sub_cc(tmp, tmp, tmp2);
6158 } else {
6159 tcg_gen_sub_i32(tmp, tmp, tmp2);
6161 store_reg_bx(env, s, rd, tmp);
6163 break;
6164 case 0x03:
6165 if (set_cc) {
6166 gen_helper_sub_cc(tmp, tmp2, tmp);
6167 } else {
6168 tcg_gen_sub_i32(tmp, tmp2, tmp);
6170 store_reg_bx(env, s, rd, tmp);
6171 break;
6172 case 0x04:
6173 if (set_cc) {
6174 gen_helper_add_cc(tmp, tmp, tmp2);
6175 } else {
6176 tcg_gen_add_i32(tmp, tmp, tmp2);
6178 store_reg_bx(env, s, rd, tmp);
6179 break;
6180 case 0x05:
6181 if (set_cc) {
6182 gen_helper_adc_cc(tmp, tmp, tmp2);
6183 } else {
6184 gen_add_carry(tmp, tmp, tmp2);
6186 store_reg_bx(env, s, rd, tmp);
6187 break;
6188 case 0x06:
6189 if (set_cc) {
6190 gen_helper_sbc_cc(tmp, tmp, tmp2);
6191 } else {
6192 gen_sub_carry(tmp, tmp, tmp2);
6194 store_reg_bx(env, s, rd, tmp);
6195 break;
6196 case 0x07:
6197 if (set_cc) {
6198 gen_helper_sbc_cc(tmp, tmp2, tmp);
6199 } else {
6200 gen_sub_carry(tmp, tmp2, tmp);
6202 store_reg_bx(env, s, rd, tmp);
6203 break;
6204 case 0x08:
6205 if (set_cc) {
6206 tcg_gen_and_i32(tmp, tmp, tmp2);
6207 gen_logic_CC(tmp);
6209 dead_tmp(tmp);
6210 break;
6211 case 0x09:
6212 if (set_cc) {
6213 tcg_gen_xor_i32(tmp, tmp, tmp2);
6214 gen_logic_CC(tmp);
6216 dead_tmp(tmp);
6217 break;
6218 case 0x0a:
6219 if (set_cc) {
6220 gen_helper_sub_cc(tmp, tmp, tmp2);
6222 dead_tmp(tmp);
6223 break;
6224 case 0x0b:
6225 if (set_cc) {
6226 gen_helper_add_cc(tmp, tmp, tmp2);
6228 dead_tmp(tmp);
6229 break;
6230 case 0x0c:
6231 tcg_gen_or_i32(tmp, tmp, tmp2);
6232 if (logic_cc) {
6233 gen_logic_CC(tmp);
6235 store_reg_bx(env, s, rd, tmp);
6236 break;
6237 case 0x0d:
6238 if (logic_cc && rd == 15) {
6239 /* MOVS r15, ... is used for exception return. */
6240 if (IS_USER(s)) {
6241 goto illegal_op;
6243 gen_exception_return(s, tmp2);
6244 } else {
6245 if (logic_cc) {
6246 gen_logic_CC(tmp2);
6248 store_reg_bx(env, s, rd, tmp2);
6250 break;
6251 case 0x0e:
6252 tcg_gen_bic_i32(tmp, tmp, tmp2);
6253 if (logic_cc) {
6254 gen_logic_CC(tmp);
6256 store_reg_bx(env, s, rd, tmp);
6257 break;
6258 default:
6259 case 0x0f:
6260 tcg_gen_not_i32(tmp2, tmp2);
6261 if (logic_cc) {
6262 gen_logic_CC(tmp2);
6264 store_reg_bx(env, s, rd, tmp2);
6265 break;
6267 if (op1 != 0x0f && op1 != 0x0d) {
6268 dead_tmp(tmp2);
6270 } else {
6271 /* other instructions */
6272 op1 = (insn >> 24) & 0xf;
6273 switch(op1) {
6274 case 0x0:
6275 case 0x1:
6276 /* multiplies, extra load/stores */
6277 sh = (insn >> 5) & 3;
6278 if (sh == 0) {
6279 if (op1 == 0x0) {
6280 rd = (insn >> 16) & 0xf;
6281 rn = (insn >> 12) & 0xf;
6282 rs = (insn >> 8) & 0xf;
6283 rm = (insn) & 0xf;
6284 op1 = (insn >> 20) & 0xf;
6285 switch (op1) {
6286 case 0: case 1: case 2: case 3: case 6:
6287 /* 32 bit mul */
6288 tmp = load_reg(s, rs);
6289 tmp2 = load_reg(s, rm);
6290 tcg_gen_mul_i32(tmp, tmp, tmp2);
6291 dead_tmp(tmp2);
6292 if (insn & (1 << 22)) {
6293 /* Subtract (mls) */
6294 ARCH(6T2);
6295 tmp2 = load_reg(s, rn);
6296 tcg_gen_sub_i32(tmp, tmp2, tmp);
6297 dead_tmp(tmp2);
6298 } else if (insn & (1 << 21)) {
6299 /* Add */
6300 tmp2 = load_reg(s, rn);
6301 tcg_gen_add_i32(tmp, tmp, tmp2);
6302 dead_tmp(tmp2);
6304 if (insn & (1 << 20))
6305 gen_logic_CC(tmp);
6306 store_reg(s, rd, tmp);
6307 break;
6308 default:
6309 /* 64 bit mul */
6310 tmp = load_reg(s, rs);
6311 tmp2 = load_reg(s, rm);
6312 if (insn & (1 << 22))
6313 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6314 else
6315 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6316 if (insn & (1 << 21)) /* mult accumulate */
6317 gen_addq(s, tmp64, rn, rd);
6318 if (!(insn & (1 << 23))) { /* double accumulate */
6319 ARCH(6);
6320 gen_addq_lo(s, tmp64, rn);
6321 gen_addq_lo(s, tmp64, rd);
6323 if (insn & (1 << 20))
6324 gen_logicq_cc(tmp64);
6325 gen_storeq_reg(s, rn, rd, tmp64);
6326 break;
6328 } else {
6329 rn = (insn >> 16) & 0xf;
6330 rd = (insn >> 12) & 0xf;
6331 if (insn & (1 << 23)) {
6332 /* load/store exclusive */
6333 op1 = (insn >> 21) & 0x3;
6334 if (op1)
6335 ARCH(6K);
6336 else
6337 ARCH(6);
6338 gen_movl_T1_reg(s, rn);
6339 addr = cpu_T[1];
6340 if (insn & (1 << 20)) {
6341 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6342 switch (op1) {
6343 case 0: /* ldrex */
6344 tmp = gen_ld32(addr, IS_USER(s));
6345 break;
6346 case 1: /* ldrexd */
6347 tmp = gen_ld32(addr, IS_USER(s));
6348 store_reg(s, rd, tmp);
6349 tcg_gen_addi_i32(addr, addr, 4);
6350 tmp = gen_ld32(addr, IS_USER(s));
6351 rd++;
6352 break;
6353 case 2: /* ldrexb */
6354 tmp = gen_ld8u(addr, IS_USER(s));
6355 break;
6356 case 3: /* ldrexh */
6357 tmp = gen_ld16u(addr, IS_USER(s));
6358 break;
6359 default:
6360 abort();
6362 store_reg(s, rd, tmp);
6363 } else {
6364 int label = gen_new_label();
6365 rm = insn & 0xf;
6366 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
6367 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6368 0, label);
6369 tmp = load_reg(s,rm);
6370 switch (op1) {
6371 case 0: /* strex */
6372 gen_st32(tmp, addr, IS_USER(s));
6373 break;
6374 case 1: /* strexd */
6375 gen_st32(tmp, addr, IS_USER(s));
6376 tcg_gen_addi_i32(addr, addr, 4);
6377 tmp = load_reg(s, rm + 1);
6378 gen_st32(tmp, addr, IS_USER(s));
6379 break;
6380 case 2: /* strexb */
6381 gen_st8(tmp, addr, IS_USER(s));
6382 break;
6383 case 3: /* strexh */
6384 gen_st16(tmp, addr, IS_USER(s));
6385 break;
6386 default:
6387 abort();
6389 gen_set_label(label);
6390 gen_movl_reg_T0(s, rd);
6392 } else {
6393 /* SWP instruction */
6394 rm = (insn) & 0xf;
6396 /* ??? This is not really atomic. However we know
6397 we never have multiple CPUs running in parallel,
6398 so it is good enough. */
6399 addr = load_reg(s, rn);
6400 tmp = load_reg(s, rm);
6401 if (insn & (1 << 22)) {
6402 tmp2 = gen_ld8u(addr, IS_USER(s));
6403 gen_st8(tmp, addr, IS_USER(s));
6404 } else {
6405 tmp2 = gen_ld32(addr, IS_USER(s));
6406 gen_st32(tmp, addr, IS_USER(s));
6408 dead_tmp(addr);
6409 store_reg(s, rd, tmp2);
6412 } else {
6413 int address_offset;
6414 int load;
6415 /* Misc load/store */
6416 rn = (insn >> 16) & 0xf;
6417 rd = (insn >> 12) & 0xf;
6418 addr = load_reg(s, rn);
6419 if (insn & (1 << 24))
6420 gen_add_datah_offset(s, insn, 0, addr);
6421 address_offset = 0;
6422 if (insn & (1 << 20)) {
6423 /* load */
6424 switch(sh) {
6425 case 1:
6426 tmp = gen_ld16u(addr, IS_USER(s));
6427 break;
6428 case 2:
6429 tmp = gen_ld8s(addr, IS_USER(s));
6430 break;
6431 default:
6432 case 3:
6433 tmp = gen_ld16s(addr, IS_USER(s));
6434 break;
6436 load = 1;
6437 } else if (sh & 2) {
6438 /* doubleword */
6439 if (sh & 1) {
6440 /* store */
6441 tmp = load_reg(s, rd);
6442 gen_st32(tmp, addr, IS_USER(s));
6443 tcg_gen_addi_i32(addr, addr, 4);
6444 tmp = load_reg(s, rd + 1);
6445 gen_st32(tmp, addr, IS_USER(s));
6446 load = 0;
6447 } else {
6448 /* load */
6449 tmp = gen_ld32(addr, IS_USER(s));
6450 store_reg(s, rd, tmp);
6451 tcg_gen_addi_i32(addr, addr, 4);
6452 tmp = gen_ld32(addr, IS_USER(s));
6453 rd++;
6454 load = 1;
6456 address_offset = -4;
6457 } else {
6458 /* store */
6459 tmp = load_reg(s, rd);
6460 gen_st16(tmp, addr, IS_USER(s));
6461 load = 0;
6463 /* Perform base writeback before the loaded value to
6464 ensure correct behavior with overlapping index registers.
6465 ldrd with base writeback is is undefined if the
6466 destination and index registers overlap. */
6467 if (!(insn & (1 << 24))) {
6468 gen_add_datah_offset(s, insn, address_offset, addr);
6469 store_reg(s, rn, addr);
6470 } else if (insn & (1 << 21)) {
6471 if (address_offset)
6472 tcg_gen_addi_i32(addr, addr, address_offset);
6473 store_reg(s, rn, addr);
6474 } else {
6475 dead_tmp(addr);
6477 if (load) {
6478 /* Complete the load. */
6479 store_reg(s, rd, tmp);
6482 break;
6483 case 0x4:
6484 case 0x5:
6485 goto do_ldst;
6486 case 0x6:
6487 case 0x7:
6488 if (insn & (1 << 4)) {
6489 ARCH(6);
6490 /* Armv6 Media instructions. */
6491 rm = insn & 0xf;
6492 rn = (insn >> 16) & 0xf;
6493 rd = (insn >> 12) & 0xf;
6494 rs = (insn >> 8) & 0xf;
6495 switch ((insn >> 23) & 3) {
6496 case 0: /* Parallel add/subtract. */
6497 op1 = (insn >> 20) & 7;
6498 tmp = load_reg(s, rn);
6499 tmp2 = load_reg(s, rm);
6500 sh = (insn >> 5) & 7;
6501 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6502 goto illegal_op;
6503 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6504 dead_tmp(tmp2);
6505 store_reg(s, rd, tmp);
6506 break;
6507 case 1:
6508 if ((insn & 0x00700020) == 0) {
6509 /* Halfword pack. */
6510 tmp = load_reg(s, rn);
6511 tmp2 = load_reg(s, rm);
6512 shift = (insn >> 7) & 0x1f;
6513 if (insn & (1 << 6)) {
6514 /* pkhtb */
6515 if (shift == 0)
6516 shift = 31;
6517 tcg_gen_sari_i32(tmp2, tmp2, shift);
6518 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6519 tcg_gen_ext16u_i32(tmp2, tmp2);
6520 } else {
6521 /* pkhbt */
6522 if (shift)
6523 tcg_gen_shli_i32(tmp2, tmp2, shift);
6524 tcg_gen_ext16u_i32(tmp, tmp);
6525 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6527 tcg_gen_or_i32(tmp, tmp, tmp2);
6528 dead_tmp(tmp2);
6529 store_reg(s, rd, tmp);
6530 } else if ((insn & 0x00200020) == 0x00200000) {
6531 /* [us]sat */
6532 tmp = load_reg(s, rm);
6533 shift = (insn >> 7) & 0x1f;
6534 if (insn & (1 << 6)) {
6535 if (shift == 0)
6536 shift = 31;
6537 tcg_gen_sari_i32(tmp, tmp, shift);
6538 } else {
6539 tcg_gen_shli_i32(tmp, tmp, shift);
6541 sh = (insn >> 16) & 0x1f;
6542 if (sh != 0) {
6543 if (insn & (1 << 22))
6544 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
6545 else
6546 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
6548 store_reg(s, rd, tmp);
6549 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6550 /* [us]sat16 */
6551 tmp = load_reg(s, rm);
6552 sh = (insn >> 16) & 0x1f;
6553 if (sh != 0) {
6554 if (insn & (1 << 22))
6555 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
6556 else
6557 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
6559 store_reg(s, rd, tmp);
6560 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6561 /* Select bytes. */
6562 tmp = load_reg(s, rn);
6563 tmp2 = load_reg(s, rm);
6564 tmp3 = new_tmp();
6565 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6566 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6567 dead_tmp(tmp3);
6568 dead_tmp(tmp2);
6569 store_reg(s, rd, tmp);
6570 } else if ((insn & 0x000003e0) == 0x00000060) {
6571 tmp = load_reg(s, rm);
6572 shift = (insn >> 10) & 3;
6573 /* ??? In many cases it's not neccessary to do a
6574 rotate, a shift is sufficient. */
6575 if (shift != 0)
6576 tcg_gen_rori_i32(tmp, tmp, shift * 8);
6577 op1 = (insn >> 20) & 7;
6578 switch (op1) {
6579 case 0: gen_sxtb16(tmp); break;
6580 case 2: gen_sxtb(tmp); break;
6581 case 3: gen_sxth(tmp); break;
6582 case 4: gen_uxtb16(tmp); break;
6583 case 6: gen_uxtb(tmp); break;
6584 case 7: gen_uxth(tmp); break;
6585 default: goto illegal_op;
6587 if (rn != 15) {
6588 tmp2 = load_reg(s, rn);
6589 if ((op1 & 3) == 0) {
6590 gen_add16(tmp, tmp2);
6591 } else {
6592 tcg_gen_add_i32(tmp, tmp, tmp2);
6593 dead_tmp(tmp2);
6596 store_reg(s, rd, tmp);
6597 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6598 /* rev */
6599 tmp = load_reg(s, rm);
6600 if (insn & (1 << 22)) {
6601 if (insn & (1 << 7)) {
6602 gen_revsh(tmp);
6603 } else {
6604 ARCH(6T2);
6605 gen_helper_rbit(tmp, tmp);
6607 } else {
6608 if (insn & (1 << 7))
6609 gen_rev16(tmp);
6610 else
6611 tcg_gen_bswap32_i32(tmp, tmp);
6613 store_reg(s, rd, tmp);
6614 } else {
6615 goto illegal_op;
6617 break;
6618 case 2: /* Multiplies (Type 3). */
6619 tmp = load_reg(s, rm);
6620 tmp2 = load_reg(s, rs);
6621 if (insn & (1 << 20)) {
6622 /* Signed multiply most significant [accumulate]. */
6623 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6624 if (insn & (1 << 5))
6625 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6626 tcg_gen_shri_i64(tmp64, tmp64, 32);
6627 tmp = new_tmp();
6628 tcg_gen_trunc_i64_i32(tmp, tmp64);
6629 if (rd != 15) {
6630 tmp2 = load_reg(s, rd);
6631 if (insn & (1 << 6)) {
6632 tcg_gen_sub_i32(tmp, tmp, tmp2);
6633 } else {
6634 tcg_gen_add_i32(tmp, tmp, tmp2);
6636 dead_tmp(tmp2);
6638 store_reg(s, rn, tmp);
6639 } else {
6640 if (insn & (1 << 5))
6641 gen_swap_half(tmp2);
6642 gen_smul_dual(tmp, tmp2);
6643 /* This addition cannot overflow. */
6644 if (insn & (1 << 6)) {
6645 tcg_gen_sub_i32(tmp, tmp, tmp2);
6646 } else {
6647 tcg_gen_add_i32(tmp, tmp, tmp2);
6649 dead_tmp(tmp2);
6650 if (insn & (1 << 22)) {
6651 /* smlald, smlsld */
6652 tmp64 = tcg_temp_new_i64();
6653 tcg_gen_ext_i32_i64(tmp64, tmp);
6654 dead_tmp(tmp);
6655 gen_addq(s, tmp64, rd, rn);
6656 gen_storeq_reg(s, rd, rn, tmp64);
6657 } else {
6658 /* smuad, smusd, smlad, smlsd */
6659 if (rd != 15)
6661 tmp2 = load_reg(s, rd);
6662 gen_helper_add_setq(tmp, tmp, tmp2);
6663 dead_tmp(tmp2);
6665 store_reg(s, rn, tmp);
6668 break;
6669 case 3:
6670 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6671 switch (op1) {
6672 case 0: /* Unsigned sum of absolute differences. */
6673 ARCH(6);
6674 tmp = load_reg(s, rm);
6675 tmp2 = load_reg(s, rs);
6676 gen_helper_usad8(tmp, tmp, tmp2);
6677 dead_tmp(tmp2);
6678 if (rd != 15) {
6679 tmp2 = load_reg(s, rd);
6680 tcg_gen_add_i32(tmp, tmp, tmp2);
6681 dead_tmp(tmp2);
6683 store_reg(s, rn, tmp);
6684 break;
6685 case 0x20: case 0x24: case 0x28: case 0x2c:
6686 /* Bitfield insert/clear. */
6687 ARCH(6T2);
6688 shift = (insn >> 7) & 0x1f;
6689 i = (insn >> 16) & 0x1f;
6690 i = i + 1 - shift;
6691 if (rm == 15) {
6692 tmp = new_tmp();
6693 tcg_gen_movi_i32(tmp, 0);
6694 } else {
6695 tmp = load_reg(s, rm);
6697 if (i != 32) {
6698 tmp2 = load_reg(s, rd);
6699 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
6700 dead_tmp(tmp2);
6702 store_reg(s, rd, tmp);
6703 break;
6704 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6705 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6706 ARCH(6T2);
6707 tmp = load_reg(s, rm);
6708 shift = (insn >> 7) & 0x1f;
6709 i = ((insn >> 16) & 0x1f) + 1;
6710 if (shift + i > 32)
6711 goto illegal_op;
6712 if (i < 32) {
6713 if (op1 & 0x20) {
6714 gen_ubfx(tmp, shift, (1u << i) - 1);
6715 } else {
6716 gen_sbfx(tmp, shift, i);
6719 store_reg(s, rd, tmp);
6720 break;
6721 default:
6722 goto illegal_op;
6724 break;
6726 break;
6728 do_ldst:
6729 /* Check for undefined extension instructions
6730 * per the ARM Bible IE:
6731 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6733 sh = (0xf << 20) | (0xf << 4);
6734 if (op1 == 0x7 && ((insn & sh) == sh))
6736 goto illegal_op;
6738 /* load/store byte/word */
6739 rn = (insn >> 16) & 0xf;
6740 rd = (insn >> 12) & 0xf;
6741 tmp2 = load_reg(s, rn);
6742 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6743 if (insn & (1 << 24))
6744 gen_add_data_offset(s, insn, tmp2);
6745 if (insn & (1 << 20)) {
6746 /* load */
6747 if (insn & (1 << 22)) {
6748 tmp = gen_ld8u(tmp2, i);
6749 } else {
6750 tmp = gen_ld32(tmp2, i);
6752 } else {
6753 /* store */
6754 tmp = load_reg(s, rd);
6755 if (insn & (1 << 22))
6756 gen_st8(tmp, tmp2, i);
6757 else
6758 gen_st32(tmp, tmp2, i);
6760 if (!(insn & (1 << 24))) {
6761 gen_add_data_offset(s, insn, tmp2);
6762 store_reg(s, rn, tmp2);
6763 } else if (insn & (1 << 21)) {
6764 store_reg(s, rn, tmp2);
6765 } else {
6766 dead_tmp(tmp2);
6768 if (insn & (1 << 20)) {
6769 /* Complete the load. */
6770 if (rd == 15)
6771 gen_bx(s, tmp);
6772 else
6773 store_reg(s, rd, tmp);
6775 break;
6776 case 0x08:
6777 case 0x09:
6779 int j, n, user, loaded_base;
6780 TCGv loaded_var;
6781 /* load/store multiple words */
6782 /* XXX: store correct base if write back */
6783 user = 0;
6784 if (insn & (1 << 22)) {
6785 if (IS_USER(s))
6786 goto illegal_op; /* only usable in supervisor mode */
6788 if ((insn & (1 << 15)) == 0)
6789 user = 1;
6791 rn = (insn >> 16) & 0xf;
6792 addr = load_reg(s, rn);
6794 /* compute total size */
6795 loaded_base = 0;
6796 TCGV_UNUSED(loaded_var);
6797 n = 0;
6798 for(i=0;i<16;i++) {
6799 if (insn & (1 << i))
6800 n++;
6802 /* XXX: test invalid n == 0 case ? */
6803 if (insn & (1 << 23)) {
6804 if (insn & (1 << 24)) {
6805 /* pre increment */
6806 tcg_gen_addi_i32(addr, addr, 4);
6807 } else {
6808 /* post increment */
6810 } else {
6811 if (insn & (1 << 24)) {
6812 /* pre decrement */
6813 tcg_gen_addi_i32(addr, addr, -(n * 4));
6814 } else {
6815 /* post decrement */
6816 if (n != 1)
6817 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6820 j = 0;
6821 for(i=0;i<16;i++) {
6822 if (insn & (1 << i)) {
6823 if (insn & (1 << 20)) {
6824 /* load */
6825 tmp = gen_ld32(addr, IS_USER(s));
6826 if (i == 15) {
6827 gen_bx(s, tmp);
6828 } else if (user) {
6829 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6830 dead_tmp(tmp);
6831 } else if (i == rn) {
6832 loaded_var = tmp;
6833 loaded_base = 1;
6834 } else {
6835 store_reg(s, i, tmp);
6837 } else {
6838 /* store */
6839 if (i == 15) {
6840 /* special case: r15 = PC + 8 */
6841 val = (long)s->pc + 4;
6842 tmp = new_tmp();
6843 tcg_gen_movi_i32(tmp, val);
6844 } else if (user) {
6845 tmp = new_tmp();
6846 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
6847 } else {
6848 tmp = load_reg(s, i);
6850 gen_st32(tmp, addr, IS_USER(s));
6852 j++;
6853 /* no need to add after the last transfer */
6854 if (j != n)
6855 tcg_gen_addi_i32(addr, addr, 4);
6858 if (insn & (1 << 21)) {
6859 /* write back */
6860 if (insn & (1 << 23)) {
6861 if (insn & (1 << 24)) {
6862 /* pre increment */
6863 } else {
6864 /* post increment */
6865 tcg_gen_addi_i32(addr, addr, 4);
6867 } else {
6868 if (insn & (1 << 24)) {
6869 /* pre decrement */
6870 if (n != 1)
6871 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6872 } else {
6873 /* post decrement */
6874 tcg_gen_addi_i32(addr, addr, -(n * 4));
6877 store_reg(s, rn, addr);
6878 } else {
6879 dead_tmp(addr);
6881 if (loaded_base) {
6882 store_reg(s, rn, loaded_var);
6884 if ((insn & (1 << 22)) && !user) {
6885 /* Restore CPSR from SPSR. */
6886 tmp = load_cpu_field(spsr);
6887 gen_set_cpsr(tmp, 0xffffffff);
6888 dead_tmp(tmp);
6889 s->is_jmp = DISAS_UPDATE;
6892 break;
6893 case 0xa:
6894 case 0xb:
6896 int32_t offset;
6898 /* branch (and link) */
6899 val = (int32_t)s->pc;
6900 if (insn & (1 << 24)) {
6901 tmp = new_tmp();
6902 tcg_gen_movi_i32(tmp, val);
6903 store_reg(s, 14, tmp);
6905 offset = (((int32_t)insn << 8) >> 8);
6906 val += (offset << 2) + 4;
6907 gen_jmp(s, val);
6909 break;
6910 case 0xc:
6911 case 0xd:
6912 case 0xe:
6913 /* Coprocessor. */
6914 if (disas_coproc_insn(env, s, insn))
6915 goto illegal_op;
6916 break;
6917 case 0xf:
6918 /* swi */
6919 gen_set_pc_im(s->pc);
6920 s->is_jmp = DISAS_SWI;
6921 break;
6922 default:
6923 illegal_op:
6924 gen_set_condexec(s);
6925 gen_set_pc_im(s->pc - 4);
6926 gen_exception(EXCP_UDEF);
6927 s->is_jmp = DISAS_JUMP;
6928 break;
6933 /* Return true if this is a Thumb-2 logical op. */
6934 static int
6935 thumb2_logic_op(int op)
6937 return (op < 8);
6940 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6941 then set condition code flags based on the result of the operation.
6942 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6943 to the high bit of T1.
6944 Returns zero if the opcode is valid. */
6946 static int
6947 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6949 int logic_cc;
6951 logic_cc = 0;
6952 switch (op) {
6953 case 0: /* and */
6954 gen_op_andl_T0_T1();
6955 logic_cc = conds;
6956 break;
6957 case 1: /* bic */
6958 gen_op_bicl_T0_T1();
6959 logic_cc = conds;
6960 break;
6961 case 2: /* orr */
6962 gen_op_orl_T0_T1();
6963 logic_cc = conds;
6964 break;
6965 case 3: /* orn */
6966 gen_op_notl_T1();
6967 gen_op_orl_T0_T1();
6968 logic_cc = conds;
6969 break;
6970 case 4: /* eor */
6971 gen_op_xorl_T0_T1();
6972 logic_cc = conds;
6973 break;
6974 case 8: /* add */
6975 if (conds)
6976 gen_op_addl_T0_T1_cc();
6977 else
6978 gen_op_addl_T0_T1();
6979 break;
6980 case 10: /* adc */
6981 if (conds)
6982 gen_op_adcl_T0_T1_cc();
6983 else
6984 gen_adc_T0_T1();
6985 break;
6986 case 11: /* sbc */
6987 if (conds)
6988 gen_op_sbcl_T0_T1_cc();
6989 else
6990 gen_sbc_T0_T1();
6991 break;
6992 case 13: /* sub */
6993 if (conds)
6994 gen_op_subl_T0_T1_cc();
6995 else
6996 gen_op_subl_T0_T1();
6997 break;
6998 case 14: /* rsb */
6999 if (conds)
7000 gen_op_rsbl_T0_T1_cc();
7001 else
7002 gen_op_rsbl_T0_T1();
7003 break;
7004 default: /* 5, 6, 7, 9, 12, 15. */
7005 return 1;
7007 if (logic_cc) {
7008 gen_op_logic_T0_cc();
7009 if (shifter_out)
7010 gen_set_CF_bit31(cpu_T[1]);
7012 return 0;
7015 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7016 is not legal. */
7017 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7019 uint32_t insn, imm, shift, offset;
7020 uint32_t rd, rn, rm, rs;
7021 TCGv tmp;
7022 TCGv tmp2;
7023 TCGv tmp3;
7024 TCGv addr;
7025 TCGv_i64 tmp64;
7026 int op;
7027 int shiftop;
7028 int conds;
7029 int logic_cc;
7031 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7032 || arm_feature (env, ARM_FEATURE_M))) {
7033 /* Thumb-1 cores may need to treat bl and blx as a pair of
7034 16-bit instructions to get correct prefetch abort behavior. */
7035 insn = insn_hw1;
7036 if ((insn & (1 << 12)) == 0) {
7037 /* Second half of blx. */
7038 offset = ((insn & 0x7ff) << 1);
7039 tmp = load_reg(s, 14);
7040 tcg_gen_addi_i32(tmp, tmp, offset);
7041 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7043 tmp2 = new_tmp();
7044 tcg_gen_movi_i32(tmp2, s->pc | 1);
7045 store_reg(s, 14, tmp2);
7046 gen_bx(s, tmp);
7047 return 0;
7049 if (insn & (1 << 11)) {
7050 /* Second half of bl. */
7051 offset = ((insn & 0x7ff) << 1) | 1;
7052 tmp = load_reg(s, 14);
7053 tcg_gen_addi_i32(tmp, tmp, offset);
7055 tmp2 = new_tmp();
7056 tcg_gen_movi_i32(tmp2, s->pc | 1);
7057 store_reg(s, 14, tmp2);
7058 gen_bx(s, tmp);
7059 return 0;
7061 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7062 /* Instruction spans a page boundary. Implement it as two
7063 16-bit instructions in case the second half causes an
7064 prefetch abort. */
7065 offset = ((int32_t)insn << 21) >> 9;
7066 gen_op_movl_T0_im(s->pc + 2 + offset);
7067 gen_movl_reg_T0(s, 14);
7068 return 0;
7070 /* Fall through to 32-bit decode. */
7073 insn = lduw_code(s->pc);
7074 s->pc += 2;
7075 insn |= (uint32_t)insn_hw1 << 16;
7077 if ((insn & 0xf800e800) != 0xf000e800) {
7078 ARCH(6T2);
7081 rn = (insn >> 16) & 0xf;
7082 rs = (insn >> 12) & 0xf;
7083 rd = (insn >> 8) & 0xf;
7084 rm = insn & 0xf;
7085 switch ((insn >> 25) & 0xf) {
7086 case 0: case 1: case 2: case 3:
7087 /* 16-bit instructions. Should never happen. */
7088 abort();
7089 case 4:
7090 if (insn & (1 << 22)) {
7091 /* Other load/store, table branch. */
7092 if (insn & 0x01200000) {
7093 /* Load/store doubleword. */
7094 if (rn == 15) {
7095 addr = new_tmp();
7096 tcg_gen_movi_i32(addr, s->pc & ~3);
7097 } else {
7098 addr = load_reg(s, rn);
7100 offset = (insn & 0xff) * 4;
7101 if ((insn & (1 << 23)) == 0)
7102 offset = -offset;
7103 if (insn & (1 << 24)) {
7104 tcg_gen_addi_i32(addr, addr, offset);
7105 offset = 0;
7107 if (insn & (1 << 20)) {
7108 /* ldrd */
7109 tmp = gen_ld32(addr, IS_USER(s));
7110 store_reg(s, rs, tmp);
7111 tcg_gen_addi_i32(addr, addr, 4);
7112 tmp = gen_ld32(addr, IS_USER(s));
7113 store_reg(s, rd, tmp);
7114 } else {
7115 /* strd */
7116 tmp = load_reg(s, rs);
7117 gen_st32(tmp, addr, IS_USER(s));
7118 tcg_gen_addi_i32(addr, addr, 4);
7119 tmp = load_reg(s, rd);
7120 gen_st32(tmp, addr, IS_USER(s));
7122 if (insn & (1 << 21)) {
7123 /* Base writeback. */
7124 if (rn == 15)
7125 goto illegal_op;
7126 tcg_gen_addi_i32(addr, addr, offset - 4);
7127 store_reg(s, rn, addr);
7128 } else {
7129 dead_tmp(addr);
7131 } else if ((insn & (1 << 23)) == 0) {
7132 /* Load/store exclusive word. */
7133 gen_movl_T1_reg(s, rn);
7134 addr = cpu_T[1];
7135 if (insn & (1 << 20)) {
7136 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
7137 tmp = gen_ld32(addr, IS_USER(s));
7138 store_reg(s, rd, tmp);
7139 } else {
7140 int label = gen_new_label();
7141 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7142 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
7143 0, label);
7144 tmp = load_reg(s, rs);
7145 gen_st32(tmp, cpu_T[1], IS_USER(s));
7146 gen_set_label(label);
7147 gen_movl_reg_T0(s, rd);
7149 } else if ((insn & (1 << 6)) == 0) {
7150 /* Table Branch. */
7151 if (rn == 15) {
7152 addr = new_tmp();
7153 tcg_gen_movi_i32(addr, s->pc);
7154 } else {
7155 addr = load_reg(s, rn);
7157 tmp = load_reg(s, rm);
7158 tcg_gen_add_i32(addr, addr, tmp);
7159 if (insn & (1 << 4)) {
7160 /* tbh */
7161 tcg_gen_add_i32(addr, addr, tmp);
7162 dead_tmp(tmp);
7163 tmp = gen_ld16u(addr, IS_USER(s));
7164 } else { /* tbb */
7165 dead_tmp(tmp);
7166 tmp = gen_ld8u(addr, IS_USER(s));
7168 dead_tmp(addr);
7169 tcg_gen_shli_i32(tmp, tmp, 1);
7170 tcg_gen_addi_i32(tmp, tmp, s->pc);
7171 store_reg(s, 15, tmp);
7172 } else {
7173 /* Load/store exclusive byte/halfword/doubleword. */
7174 /* ??? These are not really atomic. However we know
7175 we never have multiple CPUs running in parallel,
7176 so it is good enough. */
7177 op = (insn >> 4) & 0x3;
7178 /* Must use a global reg for the address because we have
7179 a conditional branch in the store instruction. */
7180 gen_movl_T1_reg(s, rn);
7181 addr = cpu_T[1];
7182 if (insn & (1 << 20)) {
7183 gen_helper_mark_exclusive(cpu_env, addr);
7184 switch (op) {
7185 case 0:
7186 tmp = gen_ld8u(addr, IS_USER(s));
7187 break;
7188 case 1:
7189 tmp = gen_ld16u(addr, IS_USER(s));
7190 break;
7191 case 3:
7192 tmp = gen_ld32(addr, IS_USER(s));
7193 tcg_gen_addi_i32(addr, addr, 4);
7194 tmp2 = gen_ld32(addr, IS_USER(s));
7195 store_reg(s, rd, tmp2);
7196 break;
7197 default:
7198 goto illegal_op;
7200 store_reg(s, rs, tmp);
7201 } else {
7202 int label = gen_new_label();
7203 /* Must use a global that is not killed by the branch. */
7204 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7205 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
7206 tmp = load_reg(s, rs);
7207 switch (op) {
7208 case 0:
7209 gen_st8(tmp, addr, IS_USER(s));
7210 break;
7211 case 1:
7212 gen_st16(tmp, addr, IS_USER(s));
7213 break;
7214 case 3:
7215 gen_st32(tmp, addr, IS_USER(s));
7216 tcg_gen_addi_i32(addr, addr, 4);
7217 tmp = load_reg(s, rd);
7218 gen_st32(tmp, addr, IS_USER(s));
7219 break;
7220 default:
7221 goto illegal_op;
7223 gen_set_label(label);
7224 gen_movl_reg_T0(s, rm);
7227 } else {
7228 /* Load/store multiple, RFE, SRS. */
7229 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7230 /* Not available in user mode. */
7231 if (IS_USER(s))
7232 goto illegal_op;
7233 if (insn & (1 << 20)) {
7234 /* rfe */
7235 addr = load_reg(s, rn);
7236 if ((insn & (1 << 24)) == 0)
7237 tcg_gen_addi_i32(addr, addr, -8);
7238 /* Load PC into tmp and CPSR into tmp2. */
7239 tmp = gen_ld32(addr, 0);
7240 tcg_gen_addi_i32(addr, addr, 4);
7241 tmp2 = gen_ld32(addr, 0);
7242 if (insn & (1 << 21)) {
7243 /* Base writeback. */
7244 if (insn & (1 << 24)) {
7245 tcg_gen_addi_i32(addr, addr, 4);
7246 } else {
7247 tcg_gen_addi_i32(addr, addr, -4);
7249 store_reg(s, rn, addr);
7250 } else {
7251 dead_tmp(addr);
7253 gen_rfe(s, tmp, tmp2);
7254 } else {
7255 /* srs */
7256 op = (insn & 0x1f);
7257 if (op == (env->uncached_cpsr & CPSR_M)) {
7258 addr = load_reg(s, 13);
7259 } else {
7260 addr = new_tmp();
7261 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
7263 if ((insn & (1 << 24)) == 0) {
7264 tcg_gen_addi_i32(addr, addr, -8);
7266 tmp = load_reg(s, 14);
7267 gen_st32(tmp, addr, 0);
7268 tcg_gen_addi_i32(addr, addr, 4);
7269 tmp = new_tmp();
7270 gen_helper_cpsr_read(tmp);
7271 gen_st32(tmp, addr, 0);
7272 if (insn & (1 << 21)) {
7273 if ((insn & (1 << 24)) == 0) {
7274 tcg_gen_addi_i32(addr, addr, -4);
7275 } else {
7276 tcg_gen_addi_i32(addr, addr, 4);
7278 if (op == (env->uncached_cpsr & CPSR_M)) {
7279 store_reg(s, 13, addr);
7280 } else {
7281 gen_helper_set_r13_banked(cpu_env,
7282 tcg_const_i32(op), addr);
7284 } else {
7285 dead_tmp(addr);
7288 } else {
7289 int i;
7290 /* Load/store multiple. */
7291 addr = load_reg(s, rn);
7292 offset = 0;
7293 for (i = 0; i < 16; i++) {
7294 if (insn & (1 << i))
7295 offset += 4;
7297 if (insn & (1 << 24)) {
7298 tcg_gen_addi_i32(addr, addr, -offset);
7301 for (i = 0; i < 16; i++) {
7302 if ((insn & (1 << i)) == 0)
7303 continue;
7304 if (insn & (1 << 20)) {
7305 /* Load. */
7306 tmp = gen_ld32(addr, IS_USER(s));
7307 if (i == 15) {
7308 gen_bx(s, tmp);
7309 } else {
7310 store_reg(s, i, tmp);
7312 } else {
7313 /* Store. */
7314 tmp = load_reg(s, i);
7315 gen_st32(tmp, addr, IS_USER(s));
7317 tcg_gen_addi_i32(addr, addr, 4);
7319 if (insn & (1 << 21)) {
7320 /* Base register writeback. */
7321 if (insn & (1 << 24)) {
7322 tcg_gen_addi_i32(addr, addr, -offset);
7324 /* Fault if writeback register is in register list. */
7325 if (insn & (1 << rn))
7326 goto illegal_op;
7327 store_reg(s, rn, addr);
7328 } else {
7329 dead_tmp(addr);
7333 break;
7334 case 5: /* Data processing register constant shift. */
7335 if (rn == 15)
7336 gen_op_movl_T0_im(0);
7337 else
7338 gen_movl_T0_reg(s, rn);
7339 gen_movl_T1_reg(s, rm);
7340 op = (insn >> 21) & 0xf;
7341 shiftop = (insn >> 4) & 3;
7342 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7343 conds = (insn & (1 << 20)) != 0;
7344 logic_cc = (conds && thumb2_logic_op(op));
7345 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
7346 if (gen_thumb2_data_op(s, op, conds, 0))
7347 goto illegal_op;
7348 if (rd != 15)
7349 gen_movl_reg_T0(s, rd);
7350 break;
7351 case 13: /* Misc data processing. */
7352 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7353 if (op < 4 && (insn & 0xf000) != 0xf000)
7354 goto illegal_op;
7355 switch (op) {
7356 case 0: /* Register controlled shift. */
7357 tmp = load_reg(s, rn);
7358 tmp2 = load_reg(s, rm);
7359 if ((insn & 0x70) != 0)
7360 goto illegal_op;
7361 op = (insn >> 21) & 3;
7362 logic_cc = (insn & (1 << 20)) != 0;
7363 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7364 if (logic_cc)
7365 gen_logic_CC(tmp);
7366 store_reg_bx(env, s, rd, tmp);
7367 break;
7368 case 1: /* Sign/zero extend. */
7369 tmp = load_reg(s, rm);
7370 shift = (insn >> 4) & 3;
7371 /* ??? In many cases it's not neccessary to do a
7372 rotate, a shift is sufficient. */
7373 if (shift != 0)
7374 tcg_gen_rori_i32(tmp, tmp, shift * 8);
7375 op = (insn >> 20) & 7;
7376 switch (op) {
7377 case 0: gen_sxth(tmp); break;
7378 case 1: gen_uxth(tmp); break;
7379 case 2: gen_sxtb16(tmp); break;
7380 case 3: gen_uxtb16(tmp); break;
7381 case 4: gen_sxtb(tmp); break;
7382 case 5: gen_uxtb(tmp); break;
7383 default: goto illegal_op;
7385 if (rn != 15) {
7386 tmp2 = load_reg(s, rn);
7387 if ((op >> 1) == 1) {
7388 gen_add16(tmp, tmp2);
7389 } else {
7390 tcg_gen_add_i32(tmp, tmp, tmp2);
7391 dead_tmp(tmp2);
7394 store_reg(s, rd, tmp);
7395 break;
7396 case 2: /* SIMD add/subtract. */
7397 op = (insn >> 20) & 7;
7398 shift = (insn >> 4) & 7;
7399 if ((op & 3) == 3 || (shift & 3) == 3)
7400 goto illegal_op;
7401 tmp = load_reg(s, rn);
7402 tmp2 = load_reg(s, rm);
7403 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7404 dead_tmp(tmp2);
7405 store_reg(s, rd, tmp);
7406 break;
7407 case 3: /* Other data processing. */
7408 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7409 if (op < 4) {
7410 /* Saturating add/subtract. */
7411 tmp = load_reg(s, rn);
7412 tmp2 = load_reg(s, rm);
7413 if (op & 2)
7414 gen_helper_double_saturate(tmp, tmp);
7415 if (op & 1)
7416 gen_helper_sub_saturate(tmp, tmp2, tmp);
7417 else
7418 gen_helper_add_saturate(tmp, tmp, tmp2);
7419 dead_tmp(tmp2);
7420 } else {
7421 tmp = load_reg(s, rn);
7422 switch (op) {
7423 case 0x0a: /* rbit */
7424 gen_helper_rbit(tmp, tmp);
7425 break;
7426 case 0x08: /* rev */
7427 tcg_gen_bswap32_i32(tmp, tmp);
7428 break;
7429 case 0x09: /* rev16 */
7430 gen_rev16(tmp);
7431 break;
7432 case 0x0b: /* revsh */
7433 gen_revsh(tmp);
7434 break;
7435 case 0x10: /* sel */
7436 tmp2 = load_reg(s, rm);
7437 tmp3 = new_tmp();
7438 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7439 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7440 dead_tmp(tmp3);
7441 dead_tmp(tmp2);
7442 break;
7443 case 0x18: /* clz */
7444 gen_helper_clz(tmp, tmp);
7445 break;
7446 default:
7447 goto illegal_op;
7450 store_reg(s, rd, tmp);
7451 break;
7452 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7453 op = (insn >> 4) & 0xf;
7454 tmp = load_reg(s, rn);
7455 tmp2 = load_reg(s, rm);
7456 switch ((insn >> 20) & 7) {
7457 case 0: /* 32 x 32 -> 32 */
7458 tcg_gen_mul_i32(tmp, tmp, tmp2);
7459 dead_tmp(tmp2);
7460 if (rs != 15) {
7461 tmp2 = load_reg(s, rs);
7462 if (op)
7463 tcg_gen_sub_i32(tmp, tmp2, tmp);
7464 else
7465 tcg_gen_add_i32(tmp, tmp, tmp2);
7466 dead_tmp(tmp2);
7468 break;
7469 case 1: /* 16 x 16 -> 32 */
7470 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7471 dead_tmp(tmp2);
7472 if (rs != 15) {
7473 tmp2 = load_reg(s, rs);
7474 gen_helper_add_setq(tmp, tmp, tmp2);
7475 dead_tmp(tmp2);
7477 break;
7478 case 2: /* Dual multiply add. */
7479 case 4: /* Dual multiply subtract. */
7480 if (op)
7481 gen_swap_half(tmp2);
7482 gen_smul_dual(tmp, tmp2);
7483 /* This addition cannot overflow. */
7484 if (insn & (1 << 22)) {
7485 tcg_gen_sub_i32(tmp, tmp, tmp2);
7486 } else {
7487 tcg_gen_add_i32(tmp, tmp, tmp2);
7489 dead_tmp(tmp2);
7490 if (rs != 15)
7492 tmp2 = load_reg(s, rs);
7493 gen_helper_add_setq(tmp, tmp, tmp2);
7494 dead_tmp(tmp2);
7496 break;
7497 case 3: /* 32 * 16 -> 32msb */
7498 if (op)
7499 tcg_gen_sari_i32(tmp2, tmp2, 16);
7500 else
7501 gen_sxth(tmp2);
7502 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7503 tcg_gen_shri_i64(tmp64, tmp64, 16);
7504 tmp = new_tmp();
7505 tcg_gen_trunc_i64_i32(tmp, tmp64);
7506 if (rs != 15)
7508 tmp2 = load_reg(s, rs);
7509 gen_helper_add_setq(tmp, tmp, tmp2);
7510 dead_tmp(tmp2);
7512 break;
7513 case 5: case 6: /* 32 * 32 -> 32msb */
7514 gen_imull(tmp, tmp2);
7515 if (insn & (1 << 5)) {
7516 gen_roundqd(tmp, tmp2);
7517 dead_tmp(tmp2);
7518 } else {
7519 dead_tmp(tmp);
7520 tmp = tmp2;
7522 if (rs != 15) {
7523 tmp2 = load_reg(s, rs);
7524 if (insn & (1 << 21)) {
7525 tcg_gen_add_i32(tmp, tmp, tmp2);
7526 } else {
7527 tcg_gen_sub_i32(tmp, tmp2, tmp);
7529 dead_tmp(tmp2);
7531 break;
7532 case 7: /* Unsigned sum of absolute differences. */
7533 gen_helper_usad8(tmp, tmp, tmp2);
7534 dead_tmp(tmp2);
7535 if (rs != 15) {
7536 tmp2 = load_reg(s, rs);
7537 tcg_gen_add_i32(tmp, tmp, tmp2);
7538 dead_tmp(tmp2);
7540 break;
7542 store_reg(s, rd, tmp);
7543 break;
7544 case 6: case 7: /* 64-bit multiply, Divide. */
7545 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7546 tmp = load_reg(s, rn);
7547 tmp2 = load_reg(s, rm);
7548 if ((op & 0x50) == 0x10) {
7549 /* sdiv, udiv */
7550 if (!arm_feature(env, ARM_FEATURE_DIV))
7551 goto illegal_op;
7552 if (op & 0x20)
7553 gen_helper_udiv(tmp, tmp, tmp2);
7554 else
7555 gen_helper_sdiv(tmp, tmp, tmp2);
7556 dead_tmp(tmp2);
7557 store_reg(s, rd, tmp);
7558 } else if ((op & 0xe) == 0xc) {
7559 /* Dual multiply accumulate long. */
7560 if (op & 1)
7561 gen_swap_half(tmp2);
7562 gen_smul_dual(tmp, tmp2);
7563 if (op & 0x10) {
7564 tcg_gen_sub_i32(tmp, tmp, tmp2);
7565 } else {
7566 tcg_gen_add_i32(tmp, tmp, tmp2);
7568 dead_tmp(tmp2);
7569 /* BUGFIX */
7570 tmp64 = tcg_temp_new_i64();
7571 tcg_gen_ext_i32_i64(tmp64, tmp);
7572 dead_tmp(tmp);
7573 gen_addq(s, tmp64, rs, rd);
7574 gen_storeq_reg(s, rs, rd, tmp64);
7575 } else {
7576 if (op & 0x20) {
7577 /* Unsigned 64-bit multiply */
7578 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7579 } else {
7580 if (op & 8) {
7581 /* smlalxy */
7582 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7583 dead_tmp(tmp2);
7584 tmp64 = tcg_temp_new_i64();
7585 tcg_gen_ext_i32_i64(tmp64, tmp);
7586 dead_tmp(tmp);
7587 } else {
7588 /* Signed 64-bit multiply */
7589 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7592 if (op & 4) {
7593 /* umaal */
7594 gen_addq_lo(s, tmp64, rs);
7595 gen_addq_lo(s, tmp64, rd);
7596 } else if (op & 0x40) {
7597 /* 64-bit accumulate. */
7598 gen_addq(s, tmp64, rs, rd);
7600 gen_storeq_reg(s, rs, rd, tmp64);
7602 break;
7604 break;
7605 case 6: case 7: case 14: case 15:
7606 /* Coprocessor. */
7607 if (((insn >> 24) & 3) == 3) {
7608 /* Translate into the equivalent ARM encoding. */
7609 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7610 if (disas_neon_data_insn(env, s, insn))
7611 goto illegal_op;
7612 } else {
7613 if (insn & (1 << 28))
7614 goto illegal_op;
7615 if (disas_coproc_insn (env, s, insn))
7616 goto illegal_op;
7618 break;
7619 case 8: case 9: case 10: case 11:
7620 if (insn & (1 << 15)) {
7621 /* Branches, misc control. */
7622 if (insn & 0x5000) {
7623 /* Unconditional branch. */
7624 /* signextend(hw1[10:0]) -> offset[:12]. */
7625 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7626 /* hw1[10:0] -> offset[11:1]. */
7627 offset |= (insn & 0x7ff) << 1;
7628 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7629 offset[24:22] already have the same value because of the
7630 sign extension above. */
7631 offset ^= ((~insn) & (1 << 13)) << 10;
7632 offset ^= ((~insn) & (1 << 11)) << 11;
7634 if (insn & (1 << 14)) {
7635 /* Branch and link. */
7636 gen_op_movl_T1_im(s->pc | 1);
7637 gen_movl_reg_T1(s, 14);
7640 offset += s->pc;
7641 if (insn & (1 << 12)) {
7642 /* b/bl */
7643 gen_jmp(s, offset);
7644 } else {
7645 /* blx */
7646 offset &= ~(uint32_t)2;
7647 gen_bx_im(s, offset);
7649 } else if (((insn >> 23) & 7) == 7) {
7650 /* Misc control */
7651 if (insn & (1 << 13))
7652 goto illegal_op;
7654 if (insn & (1 << 26)) {
7655 /* Secure monitor call (v6Z) */
7656 goto illegal_op; /* not implemented. */
7657 } else {
7658 op = (insn >> 20) & 7;
7659 switch (op) {
7660 case 0: /* msr cpsr. */
7661 if (IS_M(env)) {
7662 tmp = load_reg(s, rn);
7663 addr = tcg_const_i32(insn & 0xff);
7664 gen_helper_v7m_msr(cpu_env, addr, tmp);
7665 gen_lookup_tb(s);
7666 break;
7668 /* fall through */
7669 case 1: /* msr spsr. */
7670 if (IS_M(env))
7671 goto illegal_op;
7672 gen_movl_T0_reg(s, rn);
7673 if (gen_set_psr_T0(s,
7674 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7675 op == 1))
7676 goto illegal_op;
7677 break;
7678 case 2: /* cps, nop-hint. */
7679 if (((insn >> 8) & 7) == 0) {
7680 gen_nop_hint(s, insn & 0xff);
7682 /* Implemented as NOP in user mode. */
7683 if (IS_USER(s))
7684 break;
7685 offset = 0;
7686 imm = 0;
7687 if (insn & (1 << 10)) {
7688 if (insn & (1 << 7))
7689 offset |= CPSR_A;
7690 if (insn & (1 << 6))
7691 offset |= CPSR_I;
7692 if (insn & (1 << 5))
7693 offset |= CPSR_F;
7694 if (insn & (1 << 9))
7695 imm = CPSR_A | CPSR_I | CPSR_F;
7697 if (insn & (1 << 8)) {
7698 offset |= 0x1f;
7699 imm |= (insn & 0x1f);
7701 if (offset) {
7702 gen_op_movl_T0_im(imm);
7703 gen_set_psr_T0(s, offset, 0);
7705 break;
7706 case 3: /* Special control operations. */
7707 op = (insn >> 4) & 0xf;
7708 switch (op) {
7709 case 2: /* clrex */
7710 gen_helper_clrex(cpu_env);
7711 break;
7712 case 4: /* dsb */
7713 case 5: /* dmb */
7714 case 6: /* isb */
7715 /* These execute as NOPs. */
7716 ARCH(7);
7717 break;
7718 default:
7719 goto illegal_op;
7721 break;
7722 case 4: /* bxj */
7723 /* Trivial implementation equivalent to bx. */
7724 tmp = load_reg(s, rn);
7725 gen_bx(s, tmp);
7726 break;
7727 case 5: /* Exception return. */
7728 /* Unpredictable in user mode. */
7729 goto illegal_op;
7730 case 6: /* mrs cpsr. */
7731 tmp = new_tmp();
7732 if (IS_M(env)) {
7733 addr = tcg_const_i32(insn & 0xff);
7734 gen_helper_v7m_mrs(tmp, cpu_env, addr);
7735 } else {
7736 gen_helper_cpsr_read(tmp);
7738 store_reg(s, rd, tmp);
7739 break;
7740 case 7: /* mrs spsr. */
7741 /* Not accessible in user mode. */
7742 if (IS_USER(s) || IS_M(env))
7743 goto illegal_op;
7744 tmp = load_cpu_field(spsr);
7745 store_reg(s, rd, tmp);
7746 break;
7749 } else {
7750 /* Conditional branch. */
7751 op = (insn >> 22) & 0xf;
7752 /* Generate a conditional jump to next instruction. */
7753 s->condlabel = gen_new_label();
7754 gen_test_cc(op ^ 1, s->condlabel);
7755 s->condjmp = 1;
7757 /* offset[11:1] = insn[10:0] */
7758 offset = (insn & 0x7ff) << 1;
7759 /* offset[17:12] = insn[21:16]. */
7760 offset |= (insn & 0x003f0000) >> 4;
7761 /* offset[31:20] = insn[26]. */
7762 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7763 /* offset[18] = insn[13]. */
7764 offset |= (insn & (1 << 13)) << 5;
7765 /* offset[19] = insn[11]. */
7766 offset |= (insn & (1 << 11)) << 8;
7768 /* jump to the offset */
7769 gen_jmp(s, s->pc + offset);
7771 } else {
7772 /* Data processing immediate. */
7773 if (insn & (1 << 25)) {
7774 if (insn & (1 << 24)) {
7775 if (insn & (1 << 20))
7776 goto illegal_op;
7777 /* Bitfield/Saturate. */
7778 op = (insn >> 21) & 7;
7779 imm = insn & 0x1f;
7780 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7781 if (rn == 15) {
7782 tmp = new_tmp();
7783 tcg_gen_movi_i32(tmp, 0);
7784 } else {
7785 tmp = load_reg(s, rn);
7787 switch (op) {
7788 case 2: /* Signed bitfield extract. */
7789 imm++;
7790 if (shift + imm > 32)
7791 goto illegal_op;
7792 if (imm < 32)
7793 gen_sbfx(tmp, shift, imm);
7794 break;
7795 case 6: /* Unsigned bitfield extract. */
7796 imm++;
7797 if (shift + imm > 32)
7798 goto illegal_op;
7799 if (imm < 32)
7800 gen_ubfx(tmp, shift, (1u << imm) - 1);
7801 break;
7802 case 3: /* Bitfield insert/clear. */
7803 if (imm < shift)
7804 goto illegal_op;
7805 imm = imm + 1 - shift;
7806 if (imm != 32) {
7807 tmp2 = load_reg(s, rd);
7808 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7809 dead_tmp(tmp2);
7811 break;
7812 case 7:
7813 goto illegal_op;
7814 default: /* Saturate. */
7815 if (shift) {
7816 if (op & 1)
7817 tcg_gen_sari_i32(tmp, tmp, shift);
7818 else
7819 tcg_gen_shli_i32(tmp, tmp, shift);
7821 tmp2 = tcg_const_i32(imm);
7822 if (op & 4) {
7823 /* Unsigned. */
7824 if ((op & 1) && shift == 0)
7825 gen_helper_usat16(tmp, tmp, tmp2);
7826 else
7827 gen_helper_usat(tmp, tmp, tmp2);
7828 } else {
7829 /* Signed. */
7830 if ((op & 1) && shift == 0)
7831 gen_helper_ssat16(tmp, tmp, tmp2);
7832 else
7833 gen_helper_ssat(tmp, tmp, tmp2);
7835 break;
7837 store_reg(s, rd, tmp);
7838 } else {
7839 imm = ((insn & 0x04000000) >> 15)
7840 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7841 if (insn & (1 << 22)) {
7842 /* 16-bit immediate. */
7843 imm |= (insn >> 4) & 0xf000;
7844 if (insn & (1 << 23)) {
7845 /* movt */
7846 tmp = load_reg(s, rd);
7847 tcg_gen_ext16u_i32(tmp, tmp);
7848 tcg_gen_ori_i32(tmp, tmp, imm << 16);
7849 } else {
7850 /* movw */
7851 tmp = new_tmp();
7852 tcg_gen_movi_i32(tmp, imm);
7854 } else {
7855 /* Add/sub 12-bit immediate. */
7856 if (rn == 15) {
7857 offset = s->pc & ~(uint32_t)3;
7858 if (insn & (1 << 23))
7859 offset -= imm;
7860 else
7861 offset += imm;
7862 tmp = new_tmp();
7863 tcg_gen_movi_i32(tmp, offset);
7864 } else {
7865 tmp = load_reg(s, rn);
7866 if (insn & (1 << 23))
7867 tcg_gen_subi_i32(tmp, tmp, imm);
7868 else
7869 tcg_gen_addi_i32(tmp, tmp, imm);
7872 store_reg(s, rd, tmp);
7874 } else {
7875 int shifter_out = 0;
7876 /* modified 12-bit immediate. */
7877 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7878 imm = (insn & 0xff);
7879 switch (shift) {
7880 case 0: /* XY */
7881 /* Nothing to do. */
7882 break;
7883 case 1: /* 00XY00XY */
7884 imm |= imm << 16;
7885 break;
7886 case 2: /* XY00XY00 */
7887 imm |= imm << 16;
7888 imm <<= 8;
7889 break;
7890 case 3: /* XYXYXYXY */
7891 imm |= imm << 16;
7892 imm |= imm << 8;
7893 break;
7894 default: /* Rotated constant. */
7895 shift = (shift << 1) | (imm >> 7);
7896 imm |= 0x80;
7897 imm = imm << (32 - shift);
7898 shifter_out = 1;
7899 break;
7901 gen_op_movl_T1_im(imm);
7902 rn = (insn >> 16) & 0xf;
7903 if (rn == 15)
7904 gen_op_movl_T0_im(0);
7905 else
7906 gen_movl_T0_reg(s, rn);
7907 op = (insn >> 21) & 0xf;
7908 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7909 shifter_out))
7910 goto illegal_op;
7911 rd = (insn >> 8) & 0xf;
7912 if (rd != 15) {
7913 gen_movl_reg_T0(s, rd);
7917 break;
7918 case 12: /* Load/store single data item. */
7920 int postinc = 0;
7921 int writeback = 0;
7922 int user;
7923 if ((insn & 0x01100000) == 0x01000000) {
7924 if (disas_neon_ls_insn(env, s, insn))
7925 goto illegal_op;
7926 break;
7928 user = IS_USER(s);
7929 if (rn == 15) {
7930 addr = new_tmp();
7931 /* PC relative. */
7932 /* s->pc has already been incremented by 4. */
7933 imm = s->pc & 0xfffffffc;
7934 if (insn & (1 << 23))
7935 imm += insn & 0xfff;
7936 else
7937 imm -= insn & 0xfff;
7938 tcg_gen_movi_i32(addr, imm);
7939 } else {
7940 addr = load_reg(s, rn);
7941 if (insn & (1 << 23)) {
7942 /* Positive offset. */
7943 imm = insn & 0xfff;
7944 tcg_gen_addi_i32(addr, addr, imm);
7945 } else {
7946 op = (insn >> 8) & 7;
7947 imm = insn & 0xff;
7948 switch (op) {
7949 case 0: case 8: /* Shifted Register. */
7950 shift = (insn >> 4) & 0xf;
7951 if (shift > 3)
7952 goto illegal_op;
7953 tmp = load_reg(s, rm);
7954 if (shift)
7955 tcg_gen_shli_i32(tmp, tmp, shift);
7956 tcg_gen_add_i32(addr, addr, tmp);
7957 dead_tmp(tmp);
7958 break;
7959 case 4: /* Negative offset. */
7960 tcg_gen_addi_i32(addr, addr, -imm);
7961 break;
7962 case 6: /* User privilege. */
7963 tcg_gen_addi_i32(addr, addr, imm);
7964 user = 1;
7965 break;
7966 case 1: /* Post-decrement. */
7967 imm = -imm;
7968 /* Fall through. */
7969 case 3: /* Post-increment. */
7970 postinc = 1;
7971 writeback = 1;
7972 break;
7973 case 5: /* Pre-decrement. */
7974 imm = -imm;
7975 /* Fall through. */
7976 case 7: /* Pre-increment. */
7977 tcg_gen_addi_i32(addr, addr, imm);
7978 writeback = 1;
7979 break;
7980 default:
7981 goto illegal_op;
7985 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7986 if (insn & (1 << 20)) {
7987 /* Load. */
7988 if (rs == 15 && op != 2) {
7989 if (op & 2)
7990 goto illegal_op;
7991 /* Memory hint. Implemented as NOP. */
7992 } else {
7993 switch (op) {
7994 case 0: tmp = gen_ld8u(addr, user); break;
7995 case 4: tmp = gen_ld8s(addr, user); break;
7996 case 1: tmp = gen_ld16u(addr, user); break;
7997 case 5: tmp = gen_ld16s(addr, user); break;
7998 case 2: tmp = gen_ld32(addr, user); break;
7999 default: goto illegal_op;
8001 if (rs == 15) {
8002 gen_bx(s, tmp);
8003 } else {
8004 store_reg(s, rs, tmp);
8007 } else {
8008 /* Store. */
8009 if (rs == 15)
8010 goto illegal_op;
8011 tmp = load_reg(s, rs);
8012 switch (op) {
8013 case 0: gen_st8(tmp, addr, user); break;
8014 case 1: gen_st16(tmp, addr, user); break;
8015 case 2: gen_st32(tmp, addr, user); break;
8016 default: goto illegal_op;
8019 if (postinc)
8020 tcg_gen_addi_i32(addr, addr, imm);
8021 if (writeback) {
8022 store_reg(s, rn, addr);
8023 } else {
8024 dead_tmp(addr);
8027 break;
8028 default:
8029 goto illegal_op;
8031 return 0;
8032 illegal_op:
8033 return 1;
8036 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8038 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8039 int32_t offset;
8040 int i;
8041 TCGv tmp;
8042 TCGv tmp2;
8043 TCGv addr;
8045 if (s->condexec_mask) {
8046 cond = s->condexec_cond;
8047 s->condlabel = gen_new_label();
8048 gen_test_cc(cond ^ 1, s->condlabel);
8049 s->condjmp = 1;
8052 insn = lduw_code(s->pc);
8053 s->pc += 2;
8055 switch (insn >> 12) {
8056 case 0: case 1:
8057 rd = insn & 7;
8058 op = (insn >> 11) & 3;
8059 if (op == 3) {
8060 /* add/subtract */
8061 rn = (insn >> 3) & 7;
8062 gen_movl_T0_reg(s, rn);
8063 if (insn & (1 << 10)) {
8064 /* immediate */
8065 gen_op_movl_T1_im((insn >> 6) & 7);
8066 } else {
8067 /* reg */
8068 rm = (insn >> 6) & 7;
8069 gen_movl_T1_reg(s, rm);
8071 if (insn & (1 << 9)) {
8072 if (s->condexec_mask)
8073 gen_op_subl_T0_T1();
8074 else
8075 gen_op_subl_T0_T1_cc();
8076 } else {
8077 if (s->condexec_mask)
8078 gen_op_addl_T0_T1();
8079 else
8080 gen_op_addl_T0_T1_cc();
8082 gen_movl_reg_T0(s, rd);
8083 } else {
8084 /* shift immediate */
8085 rm = (insn >> 3) & 7;
8086 shift = (insn >> 6) & 0x1f;
8087 tmp = load_reg(s, rm);
8088 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8089 if (!s->condexec_mask)
8090 gen_logic_CC(tmp);
8091 store_reg(s, rd, tmp);
8093 break;
8094 case 2: case 3:
8095 /* arithmetic large immediate */
8096 op = (insn >> 11) & 3;
8097 rd = (insn >> 8) & 0x7;
8098 if (op == 0) {
8099 gen_op_movl_T0_im(insn & 0xff);
8100 } else {
8101 gen_movl_T0_reg(s, rd);
8102 gen_op_movl_T1_im(insn & 0xff);
8104 switch (op) {
8105 case 0: /* mov */
8106 if (!s->condexec_mask)
8107 gen_op_logic_T0_cc();
8108 break;
8109 case 1: /* cmp */
8110 gen_op_subl_T0_T1_cc();
8111 break;
8112 case 2: /* add */
8113 if (s->condexec_mask)
8114 gen_op_addl_T0_T1();
8115 else
8116 gen_op_addl_T0_T1_cc();
8117 break;
8118 case 3: /* sub */
8119 if (s->condexec_mask)
8120 gen_op_subl_T0_T1();
8121 else
8122 gen_op_subl_T0_T1_cc();
8123 break;
8125 if (op != 1)
8126 gen_movl_reg_T0(s, rd);
8127 break;
8128 case 4:
8129 if (insn & (1 << 11)) {
8130 rd = (insn >> 8) & 7;
8131 /* load pc-relative. Bit 1 of PC is ignored. */
8132 val = s->pc + 2 + ((insn & 0xff) * 4);
8133 val &= ~(uint32_t)2;
8134 addr = new_tmp();
8135 tcg_gen_movi_i32(addr, val);
8136 tmp = gen_ld32(addr, IS_USER(s));
8137 dead_tmp(addr);
8138 store_reg(s, rd, tmp);
8139 break;
8141 if (insn & (1 << 10)) {
8142 /* data processing extended or blx */
8143 rd = (insn & 7) | ((insn >> 4) & 8);
8144 rm = (insn >> 3) & 0xf;
8145 op = (insn >> 8) & 3;
8146 switch (op) {
8147 case 0: /* add */
8148 gen_movl_T0_reg(s, rd);
8149 gen_movl_T1_reg(s, rm);
8150 gen_op_addl_T0_T1();
8151 gen_movl_reg_T0(s, rd);
8152 break;
8153 case 1: /* cmp */
8154 gen_movl_T0_reg(s, rd);
8155 gen_movl_T1_reg(s, rm);
8156 gen_op_subl_T0_T1_cc();
8157 break;
8158 case 2: /* mov/cpy */
8159 gen_movl_T0_reg(s, rm);
8160 gen_movl_reg_T0(s, rd);
8161 break;
8162 case 3:/* branch [and link] exchange thumb register */
8163 tmp = load_reg(s, rm);
8164 if (insn & (1 << 7)) {
8165 val = (uint32_t)s->pc | 1;
8166 tmp2 = new_tmp();
8167 tcg_gen_movi_i32(tmp2, val);
8168 store_reg(s, 14, tmp2);
8170 gen_bx(s, tmp);
8171 break;
8173 break;
8176 /* data processing register */
8177 rd = insn & 7;
8178 rm = (insn >> 3) & 7;
8179 op = (insn >> 6) & 0xf;
8180 if (op == 2 || op == 3 || op == 4 || op == 7) {
8181 /* the shift/rotate ops want the operands backwards */
8182 val = rm;
8183 rm = rd;
8184 rd = val;
8185 val = 1;
8186 } else {
8187 val = 0;
8190 if (op == 9) /* neg */
8191 gen_op_movl_T0_im(0);
8192 else if (op != 0xf) /* mvn doesn't read its first operand */
8193 gen_movl_T0_reg(s, rd);
8195 gen_movl_T1_reg(s, rm);
8196 switch (op) {
8197 case 0x0: /* and */
8198 gen_op_andl_T0_T1();
8199 if (!s->condexec_mask)
8200 gen_op_logic_T0_cc();
8201 break;
8202 case 0x1: /* eor */
8203 gen_op_xorl_T0_T1();
8204 if (!s->condexec_mask)
8205 gen_op_logic_T0_cc();
8206 break;
8207 case 0x2: /* lsl */
8208 if (s->condexec_mask) {
8209 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
8210 } else {
8211 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8212 gen_op_logic_T1_cc();
8214 break;
8215 case 0x3: /* lsr */
8216 if (s->condexec_mask) {
8217 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
8218 } else {
8219 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8220 gen_op_logic_T1_cc();
8222 break;
8223 case 0x4: /* asr */
8224 if (s->condexec_mask) {
8225 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
8226 } else {
8227 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8228 gen_op_logic_T1_cc();
8230 break;
8231 case 0x5: /* adc */
8232 if (s->condexec_mask)
8233 gen_adc_T0_T1();
8234 else
8235 gen_op_adcl_T0_T1_cc();
8236 break;
8237 case 0x6: /* sbc */
8238 if (s->condexec_mask)
8239 gen_sbc_T0_T1();
8240 else
8241 gen_op_sbcl_T0_T1_cc();
8242 break;
8243 case 0x7: /* ror */
8244 if (s->condexec_mask) {
8245 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
8246 } else {
8247 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8248 gen_op_logic_T1_cc();
8250 break;
8251 case 0x8: /* tst */
8252 gen_op_andl_T0_T1();
8253 gen_op_logic_T0_cc();
8254 rd = 16;
8255 break;
8256 case 0x9: /* neg */
8257 if (s->condexec_mask)
8258 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
8259 else
8260 gen_op_subl_T0_T1_cc();
8261 break;
8262 case 0xa: /* cmp */
8263 gen_op_subl_T0_T1_cc();
8264 rd = 16;
8265 break;
8266 case 0xb: /* cmn */
8267 gen_op_addl_T0_T1_cc();
8268 rd = 16;
8269 break;
8270 case 0xc: /* orr */
8271 gen_op_orl_T0_T1();
8272 if (!s->condexec_mask)
8273 gen_op_logic_T0_cc();
8274 break;
8275 case 0xd: /* mul */
8276 gen_op_mull_T0_T1();
8277 if (!s->condexec_mask)
8278 gen_op_logic_T0_cc();
8279 break;
8280 case 0xe: /* bic */
8281 gen_op_bicl_T0_T1();
8282 if (!s->condexec_mask)
8283 gen_op_logic_T0_cc();
8284 break;
8285 case 0xf: /* mvn */
8286 gen_op_notl_T1();
8287 if (!s->condexec_mask)
8288 gen_op_logic_T1_cc();
8289 val = 1;
8290 rm = rd;
8291 break;
8293 if (rd != 16) {
8294 if (val)
8295 gen_movl_reg_T1(s, rm);
8296 else
8297 gen_movl_reg_T0(s, rd);
8299 break;
8301 case 5:
8302 /* load/store register offset. */
8303 rd = insn & 7;
8304 rn = (insn >> 3) & 7;
8305 rm = (insn >> 6) & 7;
8306 op = (insn >> 9) & 7;
8307 addr = load_reg(s, rn);
8308 tmp = load_reg(s, rm);
8309 tcg_gen_add_i32(addr, addr, tmp);
8310 dead_tmp(tmp);
8312 if (op < 3) /* store */
8313 tmp = load_reg(s, rd);
8315 switch (op) {
8316 case 0: /* str */
8317 gen_st32(tmp, addr, IS_USER(s));
8318 break;
8319 case 1: /* strh */
8320 gen_st16(tmp, addr, IS_USER(s));
8321 break;
8322 case 2: /* strb */
8323 gen_st8(tmp, addr, IS_USER(s));
8324 break;
8325 case 3: /* ldrsb */
8326 tmp = gen_ld8s(addr, IS_USER(s));
8327 break;
8328 case 4: /* ldr */
8329 tmp = gen_ld32(addr, IS_USER(s));
8330 break;
8331 case 5: /* ldrh */
8332 tmp = gen_ld16u(addr, IS_USER(s));
8333 break;
8334 case 6: /* ldrb */
8335 tmp = gen_ld8u(addr, IS_USER(s));
8336 break;
8337 case 7: /* ldrsh */
8338 tmp = gen_ld16s(addr, IS_USER(s));
8339 break;
8341 if (op >= 3) /* load */
8342 store_reg(s, rd, tmp);
8343 dead_tmp(addr);
8344 break;
8346 case 6:
8347 /* load/store word immediate offset */
8348 rd = insn & 7;
8349 rn = (insn >> 3) & 7;
8350 addr = load_reg(s, rn);
8351 val = (insn >> 4) & 0x7c;
8352 tcg_gen_addi_i32(addr, addr, val);
8354 if (insn & (1 << 11)) {
8355 /* load */
8356 tmp = gen_ld32(addr, IS_USER(s));
8357 store_reg(s, rd, tmp);
8358 } else {
8359 /* store */
8360 tmp = load_reg(s, rd);
8361 gen_st32(tmp, addr, IS_USER(s));
8363 dead_tmp(addr);
8364 break;
8366 case 7:
8367 /* load/store byte immediate offset */
8368 rd = insn & 7;
8369 rn = (insn >> 3) & 7;
8370 addr = load_reg(s, rn);
8371 val = (insn >> 6) & 0x1f;
8372 tcg_gen_addi_i32(addr, addr, val);
8374 if (insn & (1 << 11)) {
8375 /* load */
8376 tmp = gen_ld8u(addr, IS_USER(s));
8377 store_reg(s, rd, tmp);
8378 } else {
8379 /* store */
8380 tmp = load_reg(s, rd);
8381 gen_st8(tmp, addr, IS_USER(s));
8383 dead_tmp(addr);
8384 break;
8386 case 8:
8387 /* load/store halfword immediate offset */
8388 rd = insn & 7;
8389 rn = (insn >> 3) & 7;
8390 addr = load_reg(s, rn);
8391 val = (insn >> 5) & 0x3e;
8392 tcg_gen_addi_i32(addr, addr, val);
8394 if (insn & (1 << 11)) {
8395 /* load */
8396 tmp = gen_ld16u(addr, IS_USER(s));
8397 store_reg(s, rd, tmp);
8398 } else {
8399 /* store */
8400 tmp = load_reg(s, rd);
8401 gen_st16(tmp, addr, IS_USER(s));
8403 dead_tmp(addr);
8404 break;
8406 case 9:
8407 /* load/store from stack */
8408 rd = (insn >> 8) & 7;
8409 addr = load_reg(s, 13);
8410 val = (insn & 0xff) * 4;
8411 tcg_gen_addi_i32(addr, addr, val);
8413 if (insn & (1 << 11)) {
8414 /* load */
8415 tmp = gen_ld32(addr, IS_USER(s));
8416 store_reg(s, rd, tmp);
8417 } else {
8418 /* store */
8419 tmp = load_reg(s, rd);
8420 gen_st32(tmp, addr, IS_USER(s));
8422 dead_tmp(addr);
8423 break;
8425 case 10:
8426 /* add to high reg */
8427 rd = (insn >> 8) & 7;
8428 if (insn & (1 << 11)) {
8429 /* SP */
8430 tmp = load_reg(s, 13);
8431 } else {
8432 /* PC. bit 1 is ignored. */
8433 tmp = new_tmp();
8434 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8436 val = (insn & 0xff) * 4;
8437 tcg_gen_addi_i32(tmp, tmp, val);
8438 store_reg(s, rd, tmp);
8439 break;
8441 case 11:
8442 /* misc */
8443 op = (insn >> 8) & 0xf;
8444 switch (op) {
8445 case 0:
8446 /* adjust stack pointer */
8447 tmp = load_reg(s, 13);
8448 val = (insn & 0x7f) * 4;
8449 if (insn & (1 << 7))
8450 val = -(int32_t)val;
8451 tcg_gen_addi_i32(tmp, tmp, val);
8452 store_reg(s, 13, tmp);
8453 break;
8455 case 2: /* sign/zero extend. */
8456 ARCH(6);
8457 rd = insn & 7;
8458 rm = (insn >> 3) & 7;
8459 tmp = load_reg(s, rm);
8460 switch ((insn >> 6) & 3) {
8461 case 0: gen_sxth(tmp); break;
8462 case 1: gen_sxtb(tmp); break;
8463 case 2: gen_uxth(tmp); break;
8464 case 3: gen_uxtb(tmp); break;
8466 store_reg(s, rd, tmp);
8467 break;
8468 case 4: case 5: case 0xc: case 0xd:
8469 /* push/pop */
8470 addr = load_reg(s, 13);
8471 if (insn & (1 << 8))
8472 offset = 4;
8473 else
8474 offset = 0;
8475 for (i = 0; i < 8; i++) {
8476 if (insn & (1 << i))
8477 offset += 4;
8479 if ((insn & (1 << 11)) == 0) {
8480 tcg_gen_addi_i32(addr, addr, -offset);
8482 for (i = 0; i < 8; i++) {
8483 if (insn & (1 << i)) {
8484 if (insn & (1 << 11)) {
8485 /* pop */
8486 tmp = gen_ld32(addr, IS_USER(s));
8487 store_reg(s, i, tmp);
8488 } else {
8489 /* push */
8490 tmp = load_reg(s, i);
8491 gen_st32(tmp, addr, IS_USER(s));
8493 /* advance to the next address. */
8494 tcg_gen_addi_i32(addr, addr, 4);
8497 TCGV_UNUSED(tmp);
8498 if (insn & (1 << 8)) {
8499 if (insn & (1 << 11)) {
8500 /* pop pc */
8501 tmp = gen_ld32(addr, IS_USER(s));
8502 /* don't set the pc until the rest of the instruction
8503 has completed */
8504 } else {
8505 /* push lr */
8506 tmp = load_reg(s, 14);
8507 gen_st32(tmp, addr, IS_USER(s));
8509 tcg_gen_addi_i32(addr, addr, 4);
8511 if ((insn & (1 << 11)) == 0) {
8512 tcg_gen_addi_i32(addr, addr, -offset);
8514 /* write back the new stack pointer */
8515 store_reg(s, 13, addr);
8516 /* set the new PC value */
8517 if ((insn & 0x0900) == 0x0900)
8518 gen_bx(s, tmp);
8519 break;
8521 case 1: case 3: case 9: case 11: /* czb */
8522 rm = insn & 7;
8523 tmp = load_reg(s, rm);
8524 s->condlabel = gen_new_label();
8525 s->condjmp = 1;
8526 if (insn & (1 << 11))
8527 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8528 else
8529 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8530 dead_tmp(tmp);
8531 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8532 val = (uint32_t)s->pc + 2;
8533 val += offset;
8534 gen_jmp(s, val);
8535 break;
8537 case 15: /* IT, nop-hint. */
8538 if ((insn & 0xf) == 0) {
8539 gen_nop_hint(s, (insn >> 4) & 0xf);
8540 break;
8542 /* If Then. */
8543 s->condexec_cond = (insn >> 4) & 0xe;
8544 s->condexec_mask = insn & 0x1f;
8545 /* No actual code generated for this insn, just setup state. */
8546 break;
8548 case 0xe: /* bkpt */
8549 gen_set_condexec(s);
8550 gen_set_pc_im(s->pc - 2);
8551 gen_exception(EXCP_BKPT);
8552 s->is_jmp = DISAS_JUMP;
8553 break;
8555 case 0xa: /* rev */
8556 ARCH(6);
8557 rn = (insn >> 3) & 0x7;
8558 rd = insn & 0x7;
8559 tmp = load_reg(s, rn);
8560 switch ((insn >> 6) & 3) {
8561 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8562 case 1: gen_rev16(tmp); break;
8563 case 3: gen_revsh(tmp); break;
8564 default: goto illegal_op;
8566 store_reg(s, rd, tmp);
8567 break;
8569 case 6: /* cps */
8570 ARCH(6);
8571 if (IS_USER(s))
8572 break;
8573 if (IS_M(env)) {
8574 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8575 /* PRIMASK */
8576 if (insn & 1) {
8577 addr = tcg_const_i32(16);
8578 gen_helper_v7m_msr(cpu_env, addr, tmp);
8580 /* FAULTMASK */
8581 if (insn & 2) {
8582 addr = tcg_const_i32(17);
8583 gen_helper_v7m_msr(cpu_env, addr, tmp);
8585 gen_lookup_tb(s);
8586 } else {
8587 if (insn & (1 << 4))
8588 shift = CPSR_A | CPSR_I | CPSR_F;
8589 else
8590 shift = 0;
8592 val = ((insn & 7) << 6) & shift;
8593 gen_op_movl_T0_im(val);
8594 gen_set_psr_T0(s, shift, 0);
8596 break;
8598 default:
8599 goto undef;
8601 break;
8603 case 12:
8604 /* load/store multiple */
8605 rn = (insn >> 8) & 0x7;
8606 addr = load_reg(s, rn);
8607 for (i = 0; i < 8; i++) {
8608 if (insn & (1 << i)) {
8609 if (insn & (1 << 11)) {
8610 /* load */
8611 tmp = gen_ld32(addr, IS_USER(s));
8612 store_reg(s, i, tmp);
8613 } else {
8614 /* store */
8615 tmp = load_reg(s, i);
8616 gen_st32(tmp, addr, IS_USER(s));
8618 /* advance to the next address */
8619 tcg_gen_addi_i32(addr, addr, 4);
8622 /* Base register writeback. */
8623 if ((insn & (1 << rn)) == 0) {
8624 store_reg(s, rn, addr);
8625 } else {
8626 dead_tmp(addr);
8628 break;
8630 case 13:
8631 /* conditional branch or swi */
8632 cond = (insn >> 8) & 0xf;
8633 if (cond == 0xe)
8634 goto undef;
8636 if (cond == 0xf) {
8637 /* swi */
8638 gen_set_condexec(s);
8639 gen_set_pc_im(s->pc);
8640 s->is_jmp = DISAS_SWI;
8641 break;
8643 /* generate a conditional jump to next instruction */
8644 s->condlabel = gen_new_label();
8645 gen_test_cc(cond ^ 1, s->condlabel);
8646 s->condjmp = 1;
8647 gen_movl_T1_reg(s, 15);
8649 /* jump to the offset */
8650 val = (uint32_t)s->pc + 2;
8651 offset = ((int32_t)insn << 24) >> 24;
8652 val += offset << 1;
8653 gen_jmp(s, val);
8654 break;
8656 case 14:
8657 if (insn & (1 << 11)) {
8658 if (disas_thumb2_insn(env, s, insn))
8659 goto undef32;
8660 break;
8662 /* unconditional branch */
8663 val = (uint32_t)s->pc;
8664 offset = ((int32_t)insn << 21) >> 21;
8665 val += (offset << 1) + 2;
8666 gen_jmp(s, val);
8667 break;
8669 case 15:
8670 if (disas_thumb2_insn(env, s, insn))
8671 goto undef32;
8672 break;
8674 return;
8675 undef32:
8676 gen_set_condexec(s);
8677 gen_set_pc_im(s->pc - 4);
8678 gen_exception(EXCP_UDEF);
8679 s->is_jmp = DISAS_JUMP;
8680 return;
8681 illegal_op:
8682 undef:
8683 gen_set_condexec(s);
8684 gen_set_pc_im(s->pc - 2);
8685 gen_exception(EXCP_UDEF);
8686 s->is_jmp = DISAS_JUMP;
8689 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8690 basic block 'tb'. If search_pc is TRUE, also generate PC
8691 information for each intermediate instruction. */
8692 static inline void gen_intermediate_code_internal(CPUState *env,
8693 TranslationBlock *tb,
8694 int search_pc)
8696 DisasContext dc1, *dc = &dc1;
8697 CPUBreakpoint *bp;
8698 uint16_t *gen_opc_end;
8699 int j, lj;
8700 target_ulong pc_start;
8701 uint32_t next_page_start;
8702 int num_insns;
8703 int max_insns;
8705 /* generate intermediate code */
8706 num_temps = 0;
8707 memset(temps, 0, sizeof(temps));
8709 pc_start = tb->pc;
8711 dc->tb = tb;
8713 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
8715 dc->is_jmp = DISAS_NEXT;
8716 dc->pc = pc_start;
8717 dc->singlestep_enabled = env->singlestep_enabled;
8718 dc->condjmp = 0;
8719 dc->thumb = env->thumb;
8720 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8721 dc->condexec_cond = env->condexec_bits >> 4;
8722 #if !defined(CONFIG_USER_ONLY)
8723 if (IS_M(env)) {
8724 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8725 } else {
8726 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8728 #endif
8729 cpu_F0s = tcg_temp_new_i32();
8730 cpu_F1s = tcg_temp_new_i32();
8731 cpu_F0d = tcg_temp_new_i64();
8732 cpu_F1d = tcg_temp_new_i64();
8733 cpu_V0 = cpu_F0d;
8734 cpu_V1 = cpu_F1d;
8735 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8736 cpu_M0 = tcg_temp_new_i64();
8737 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
8738 lj = -1;
8739 num_insns = 0;
8740 max_insns = tb->cflags & CF_COUNT_MASK;
8741 if (max_insns == 0)
8742 max_insns = CF_COUNT_MASK;
8744 gen_icount_start();
8745 /* Reset the conditional execution bits immediately. This avoids
8746 complications trying to do it at the end of the block. */
8747 if (env->condexec_bits)
8749 TCGv tmp = new_tmp();
8750 tcg_gen_movi_i32(tmp, 0);
8751 store_cpu_field(tmp, condexec_bits);
8753 do {
8754 #ifdef CONFIG_USER_ONLY
8755 /* Intercept jump to the magic kernel page. */
8756 if (dc->pc >= 0xffff0000) {
8757 /* We always get here via a jump, so know we are not in a
8758 conditional execution block. */
8759 gen_exception(EXCP_KERNEL_TRAP);
8760 dc->is_jmp = DISAS_UPDATE;
8761 break;
8763 #else
8764 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8765 /* We always get here via a jump, so know we are not in a
8766 conditional execution block. */
8767 gen_exception(EXCP_EXCEPTION_EXIT);
8768 dc->is_jmp = DISAS_UPDATE;
8769 break;
8771 #endif
8773 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
8774 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
8775 if (bp->pc == dc->pc) {
8776 gen_set_condexec(dc);
8777 gen_set_pc_im(dc->pc);
8778 gen_exception(EXCP_DEBUG);
8779 dc->is_jmp = DISAS_JUMP;
8780 /* Advance PC so that clearing the breakpoint will
8781 invalidate this TB. */
8782 dc->pc += 2;
8783 goto done_generating;
8784 break;
8788 if (search_pc) {
8789 j = gen_opc_ptr - gen_opc_buf;
8790 if (lj < j) {
8791 lj++;
8792 while (lj < j)
8793 gen_opc_instr_start[lj++] = 0;
8795 gen_opc_pc[lj] = dc->pc;
8796 gen_opc_instr_start[lj] = 1;
8797 gen_opc_icount[lj] = num_insns;
8800 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8801 gen_io_start();
8803 if (env->thumb) {
8804 disas_thumb_insn(env, dc);
8805 if (dc->condexec_mask) {
8806 dc->condexec_cond = (dc->condexec_cond & 0xe)
8807 | ((dc->condexec_mask >> 4) & 1);
8808 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8809 if (dc->condexec_mask == 0) {
8810 dc->condexec_cond = 0;
8813 } else {
8814 disas_arm_insn(env, dc);
8816 if (num_temps) {
8817 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8818 num_temps = 0;
8821 if (dc->condjmp && !dc->is_jmp) {
8822 gen_set_label(dc->condlabel);
8823 dc->condjmp = 0;
8825 /* Translation stops when a conditional branch is encountered.
8826 * Otherwise the subsequent code could get translated several times.
8827 * Also stop translation when a page boundary is reached. This
8828 * ensures prefetch aborts occur at the right place. */
8829 num_insns ++;
8830 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8831 !env->singlestep_enabled &&
8832 !singlestep &&
8833 dc->pc < next_page_start &&
8834 num_insns < max_insns);
8836 if (tb->cflags & CF_LAST_IO) {
8837 if (dc->condjmp) {
8838 /* FIXME: This can theoretically happen with self-modifying
8839 code. */
8840 cpu_abort(env, "IO on conditional branch instruction");
8842 gen_io_end();
8845 /* At this stage dc->condjmp will only be set when the skipped
8846 instruction was a conditional branch or trap, and the PC has
8847 already been written. */
8848 if (unlikely(env->singlestep_enabled)) {
8849 /* Make sure the pc is updated, and raise a debug exception. */
8850 if (dc->condjmp) {
8851 gen_set_condexec(dc);
8852 if (dc->is_jmp == DISAS_SWI) {
8853 gen_exception(EXCP_SWI);
8854 } else {
8855 gen_exception(EXCP_DEBUG);
8857 gen_set_label(dc->condlabel);
8859 if (dc->condjmp || !dc->is_jmp) {
8860 gen_set_pc_im(dc->pc);
8861 dc->condjmp = 0;
8863 gen_set_condexec(dc);
8864 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
8865 gen_exception(EXCP_SWI);
8866 } else {
8867 /* FIXME: Single stepping a WFI insn will not halt
8868 the CPU. */
8869 gen_exception(EXCP_DEBUG);
8871 } else {
8872 /* While branches must always occur at the end of an IT block,
8873 there are a few other things that can cause us to terminate
8874 the TB in the middel of an IT block:
8875 - Exception generating instructions (bkpt, swi, undefined).
8876 - Page boundaries.
8877 - Hardware watchpoints.
8878 Hardware breakpoints have already been handled and skip this code.
8880 gen_set_condexec(dc);
8881 switch(dc->is_jmp) {
8882 case DISAS_NEXT:
8883 gen_goto_tb(dc, 1, dc->pc);
8884 break;
8885 default:
8886 case DISAS_JUMP:
8887 case DISAS_UPDATE:
8888 /* indicate that the hash table must be used to find the next TB */
8889 tcg_gen_exit_tb(0);
8890 break;
8891 case DISAS_TB_JUMP:
8892 /* nothing more to generate */
8893 break;
8894 case DISAS_WFI:
8895 gen_helper_wfi();
8896 break;
8897 case DISAS_SWI:
8898 gen_exception(EXCP_SWI);
8899 break;
8901 if (dc->condjmp) {
8902 gen_set_label(dc->condlabel);
8903 gen_set_condexec(dc);
8904 gen_goto_tb(dc, 1, dc->pc);
8905 dc->condjmp = 0;
8909 done_generating:
8910 gen_icount_end(tb, num_insns);
8911 *gen_opc_ptr = INDEX_op_end;
8913 #ifdef DEBUG_DISAS
8914 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8915 qemu_log("----------------\n");
8916 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8917 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
8918 qemu_log("\n");
8920 #endif
8921 if (search_pc) {
8922 j = gen_opc_ptr - gen_opc_buf;
8923 lj++;
8924 while (lj <= j)
8925 gen_opc_instr_start[lj++] = 0;
8926 } else {
8927 tb->size = dc->pc - pc_start;
8928 tb->icount = num_insns;
8932 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8934 gen_intermediate_code_internal(env, tb, 0);
8937 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8939 gen_intermediate_code_internal(env, tb, 1);
8942 static const char *cpu_mode_names[16] = {
8943 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8944 "???", "???", "???", "und", "???", "???", "???", "sys"
8947 void cpu_dump_state(CPUState *env, FILE *f,
8948 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8949 int flags)
8951 int i;
8952 #if 0
8953 union {
8954 uint32_t i;
8955 float s;
8956 } s0, s1;
8957 CPU_DoubleU d;
8958 /* ??? This assumes float64 and double have the same layout.
8959 Oh well, it's only debug dumps. */
8960 union {
8961 float64 f64;
8962 double d;
8963 } d0;
8964 #endif
8965 uint32_t psr;
8967 for(i=0;i<16;i++) {
8968 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
8969 if ((i % 4) == 3)
8970 cpu_fprintf(f, "\n");
8971 else
8972 cpu_fprintf(f, " ");
8974 psr = cpsr_read(env);
8975 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8976 psr,
8977 psr & (1 << 31) ? 'N' : '-',
8978 psr & (1 << 30) ? 'Z' : '-',
8979 psr & (1 << 29) ? 'C' : '-',
8980 psr & (1 << 28) ? 'V' : '-',
8981 psr & CPSR_T ? 'T' : 'A',
8982 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
8984 #if 0
8985 for (i = 0; i < 16; i++) {
8986 d.d = env->vfp.regs[i];
8987 s0.i = d.l.lower;
8988 s1.i = d.l.upper;
8989 d0.f64 = d.d;
8990 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
8991 i * 2, (int)s0.i, s0.s,
8992 i * 2 + 1, (int)s1.i, s1.s,
8993 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
8994 d0.d);
8996 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
8997 #endif
9000 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9001 unsigned long searched_pc, int pc_pos, void *puc)
9003 env->regs[15] = gen_opc_pc[pc_pos];