Convert NEON VZIP/VUZP/VTRN helper functions to pure TCG
[qemu/navara.git] / target-arm / translate.c
blob52b417d4d88eb4cdcb10b086bf7b2b6c06027f7c
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
37 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38 #define ENABLE_ARCH_5J 0
39 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
40 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
41 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
42 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
44 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
46 /* internal defines */
47 typedef struct DisasContext {
48 target_ulong pc;
49 int is_jmp;
50 /* Nonzero if this instruction has been conditionally skipped. */
51 int condjmp;
52 /* The label that will be jumped to when the instruction is skipped. */
53 int condlabel;
54 /* Thumb-2 condtional execution bits. */
55 int condexec_mask;
56 int condexec_cond;
57 struct TranslationBlock *tb;
58 int singlestep_enabled;
59 int thumb;
60 #if !defined(CONFIG_USER_ONLY)
61 int user;
62 #endif
63 } DisasContext;
65 #if defined(CONFIG_USER_ONLY)
66 #define IS_USER(s) 1
67 #else
68 #define IS_USER(s) (s->user)
69 #endif
71 /* These instructions trap after executing, so defer them until after the
72 conditional executions state has been updated. */
73 #define DISAS_WFI 4
74 #define DISAS_SWI 5
76 static TCGv_ptr cpu_env;
77 /* We reuse the same 64-bit temporaries for efficiency. */
78 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
79 static TCGv_i32 cpu_R[16];
81 /* FIXME: These should be removed. */
82 static TCGv cpu_T[2];
83 static TCGv cpu_F0s, cpu_F1s;
84 static TCGv_i64 cpu_F0d, cpu_F1d;
86 #define ICOUNT_TEMP cpu_T[0]
87 #include "gen-icount.h"
89 static const char *regnames[] =
90 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
91 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
93 /* initialize TCG globals. */
94 void arm_translate_init(void)
96 int i;
98 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
100 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
101 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
103 for (i = 0; i < 16; i++) {
104 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
105 offsetof(CPUState, regs[i]),
106 regnames[i]);
109 #define GEN_HELPER 2
110 #include "helpers.h"
113 static int num_temps;
115 /* Allocate a temporary variable. */
116 static TCGv_i32 new_tmp(void)
118 num_temps++;
119 return tcg_temp_new_i32();
122 /* Release a temporary variable. */
123 static void dead_tmp(TCGv tmp)
125 tcg_temp_free(tmp);
126 num_temps--;
129 static inline TCGv load_cpu_offset(int offset)
131 TCGv tmp = new_tmp();
132 tcg_gen_ld_i32(tmp, cpu_env, offset);
133 return tmp;
136 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
138 static inline void store_cpu_offset(TCGv var, int offset)
140 tcg_gen_st_i32(var, cpu_env, offset);
141 dead_tmp(var);
144 #define store_cpu_field(var, name) \
145 store_cpu_offset(var, offsetof(CPUState, name))
147 /* Set a variable to the value of a CPU register. */
148 static void load_reg_var(DisasContext *s, TCGv var, int reg)
150 if (reg == 15) {
151 uint32_t addr;
152 /* normaly, since we updated PC, we need only to add one insn */
153 if (s->thumb)
154 addr = (long)s->pc + 2;
155 else
156 addr = (long)s->pc + 4;
157 tcg_gen_movi_i32(var, addr);
158 } else {
159 tcg_gen_mov_i32(var, cpu_R[reg]);
163 /* Create a new temporary and set it to the value of a CPU register. */
164 static inline TCGv load_reg(DisasContext *s, int reg)
166 TCGv tmp = new_tmp();
167 load_reg_var(s, tmp, reg);
168 return tmp;
171 /* Set a CPU register. The source must be a temporary and will be
172 marked as dead. */
173 static void store_reg(DisasContext *s, int reg, TCGv var)
175 if (reg == 15) {
176 tcg_gen_andi_i32(var, var, ~1);
177 s->is_jmp = DISAS_JUMP;
179 tcg_gen_mov_i32(cpu_R[reg], var);
180 dead_tmp(var);
184 /* Basic operations. */
185 #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
186 #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
187 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
189 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
190 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
191 #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
192 #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
194 #define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
195 #define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
196 #define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
197 #define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
198 #define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
200 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
201 #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
203 #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
204 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
205 #define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
206 #define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
208 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
209 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
211 /* Value extensions. */
212 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
213 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
214 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
215 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
217 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
218 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
220 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
222 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
223 /* Set NZCV flags from the high 4 bits of var. */
224 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
226 static void gen_exception(int excp)
228 TCGv tmp = new_tmp();
229 tcg_gen_movi_i32(tmp, excp);
230 gen_helper_exception(tmp);
231 dead_tmp(tmp);
234 static void gen_smul_dual(TCGv a, TCGv b)
236 TCGv tmp1 = new_tmp();
237 TCGv tmp2 = new_tmp();
238 tcg_gen_ext16s_i32(tmp1, a);
239 tcg_gen_ext16s_i32(tmp2, b);
240 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
241 dead_tmp(tmp2);
242 tcg_gen_sari_i32(a, a, 16);
243 tcg_gen_sari_i32(b, b, 16);
244 tcg_gen_mul_i32(b, b, a);
245 tcg_gen_mov_i32(a, tmp1);
246 dead_tmp(tmp1);
249 /* Byteswap each halfword. */
250 static void gen_rev16(TCGv var)
252 TCGv tmp = new_tmp();
253 tcg_gen_shri_i32(tmp, var, 8);
254 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
255 tcg_gen_shli_i32(var, var, 8);
256 tcg_gen_andi_i32(var, var, 0xff00ff00);
257 tcg_gen_or_i32(var, var, tmp);
258 dead_tmp(tmp);
261 /* Byteswap low halfword and sign extend. */
262 static void gen_revsh(TCGv var)
264 TCGv tmp = new_tmp();
265 tcg_gen_shri_i32(tmp, var, 8);
266 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
267 tcg_gen_shli_i32(var, var, 8);
268 tcg_gen_ext8s_i32(var, var);
269 tcg_gen_or_i32(var, var, tmp);
270 dead_tmp(tmp);
273 /* Unsigned bitfield extract. */
274 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
276 if (shift)
277 tcg_gen_shri_i32(var, var, shift);
278 tcg_gen_andi_i32(var, var, mask);
281 /* Signed bitfield extract. */
282 static void gen_sbfx(TCGv var, int shift, int width)
284 uint32_t signbit;
286 if (shift)
287 tcg_gen_sari_i32(var, var, shift);
288 if (shift + width < 32) {
289 signbit = 1u << (width - 1);
290 tcg_gen_andi_i32(var, var, (1u << width) - 1);
291 tcg_gen_xori_i32(var, var, signbit);
292 tcg_gen_subi_i32(var, var, signbit);
296 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
297 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
299 tcg_gen_andi_i32(val, val, mask);
300 tcg_gen_shli_i32(val, val, shift);
301 tcg_gen_andi_i32(base, base, ~(mask << shift));
302 tcg_gen_or_i32(dest, base, val);
305 /* Round the top 32 bits of a 64-bit value. */
306 static void gen_roundqd(TCGv a, TCGv b)
308 tcg_gen_shri_i32(a, a, 31);
309 tcg_gen_add_i32(a, a, b);
312 /* FIXME: Most targets have native widening multiplication.
313 It would be good to use that instead of a full wide multiply. */
314 /* 32x32->64 multiply. Marks inputs as dead. */
315 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
317 TCGv_i64 tmp1 = tcg_temp_new_i64();
318 TCGv_i64 tmp2 = tcg_temp_new_i64();
320 tcg_gen_extu_i32_i64(tmp1, a);
321 dead_tmp(a);
322 tcg_gen_extu_i32_i64(tmp2, b);
323 dead_tmp(b);
324 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
325 return tmp1;
328 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
330 TCGv_i64 tmp1 = tcg_temp_new_i64();
331 TCGv_i64 tmp2 = tcg_temp_new_i64();
333 tcg_gen_ext_i32_i64(tmp1, a);
334 dead_tmp(a);
335 tcg_gen_ext_i32_i64(tmp2, b);
336 dead_tmp(b);
337 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
338 return tmp1;
341 /* Unsigned 32x32->64 multiply. */
342 static void gen_op_mull_T0_T1(void)
344 TCGv_i64 tmp1 = tcg_temp_new_i64();
345 TCGv_i64 tmp2 = tcg_temp_new_i64();
347 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
348 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
351 tcg_gen_shri_i64(tmp1, tmp1, 32);
352 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
355 /* Signed 32x32->64 multiply. */
356 static void gen_imull(TCGv a, TCGv b)
358 TCGv_i64 tmp1 = tcg_temp_new_i64();
359 TCGv_i64 tmp2 = tcg_temp_new_i64();
361 tcg_gen_ext_i32_i64(tmp1, a);
362 tcg_gen_ext_i32_i64(tmp2, b);
363 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
364 tcg_gen_trunc_i64_i32(a, tmp1);
365 tcg_gen_shri_i64(tmp1, tmp1, 32);
366 tcg_gen_trunc_i64_i32(b, tmp1);
369 /* Swap low and high halfwords. */
370 static void gen_swap_half(TCGv var)
372 TCGv tmp = new_tmp();
373 tcg_gen_shri_i32(tmp, var, 16);
374 tcg_gen_shli_i32(var, var, 16);
375 tcg_gen_or_i32(var, var, tmp);
376 dead_tmp(tmp);
379 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
380 tmp = (t0 ^ t1) & 0x8000;
381 t0 &= ~0x8000;
382 t1 &= ~0x8000;
383 t0 = (t0 + t1) ^ tmp;
386 static void gen_add16(TCGv t0, TCGv t1)
388 TCGv tmp = new_tmp();
389 tcg_gen_xor_i32(tmp, t0, t1);
390 tcg_gen_andi_i32(tmp, tmp, 0x8000);
391 tcg_gen_andi_i32(t0, t0, ~0x8000);
392 tcg_gen_andi_i32(t1, t1, ~0x8000);
393 tcg_gen_add_i32(t0, t0, t1);
394 tcg_gen_xor_i32(t0, t0, tmp);
395 dead_tmp(tmp);
396 dead_tmp(t1);
399 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
401 /* Set CF to the top bit of var. */
402 static void gen_set_CF_bit31(TCGv var)
404 TCGv tmp = new_tmp();
405 tcg_gen_shri_i32(tmp, var, 31);
406 gen_set_CF(tmp);
407 dead_tmp(tmp);
410 /* Set N and Z flags from var. */
411 static inline void gen_logic_CC(TCGv var)
413 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
414 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
417 /* T0 += T1 + CF. */
418 static void gen_adc_T0_T1(void)
420 TCGv tmp;
421 gen_op_addl_T0_T1();
422 tmp = load_cpu_field(CF);
423 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
424 dead_tmp(tmp);
427 /* dest = T0 + T1 + CF. */
428 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
430 TCGv tmp;
431 tcg_gen_add_i32(dest, t0, t1);
432 tmp = load_cpu_field(CF);
433 tcg_gen_add_i32(dest, dest, tmp);
434 dead_tmp(tmp);
437 /* dest = T0 - T1 + CF - 1. */
438 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
440 TCGv tmp;
441 tcg_gen_sub_i32(dest, t0, t1);
442 tmp = load_cpu_field(CF);
443 tcg_gen_add_i32(dest, dest, tmp);
444 tcg_gen_subi_i32(dest, dest, 1);
445 dead_tmp(tmp);
448 #define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
449 #define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
451 /* T0 &= ~T1. Clobbers T1. */
452 /* FIXME: Implement bic natively. */
453 static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
455 TCGv tmp = new_tmp();
456 tcg_gen_not_i32(tmp, t1);
457 tcg_gen_and_i32(dest, t0, tmp);
458 dead_tmp(tmp);
460 static inline void gen_op_bicl_T0_T1(void)
462 gen_op_notl_T1();
463 gen_op_andl_T0_T1();
466 /* FIXME: Implement this natively. */
467 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
469 /* FIXME: Implement this natively. */
470 static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
472 TCGv tmp;
474 if (i == 0)
475 return;
477 tmp = new_tmp();
478 tcg_gen_shri_i32(tmp, t1, i);
479 tcg_gen_shli_i32(t1, t1, 32 - i);
480 tcg_gen_or_i32(t0, t1, tmp);
481 dead_tmp(tmp);
484 static void shifter_out_im(TCGv var, int shift)
486 TCGv tmp = new_tmp();
487 if (shift == 0) {
488 tcg_gen_andi_i32(tmp, var, 1);
489 } else {
490 tcg_gen_shri_i32(tmp, var, shift);
491 if (shift != 31)
492 tcg_gen_andi_i32(tmp, tmp, 1);
494 gen_set_CF(tmp);
495 dead_tmp(tmp);
498 /* Shift by immediate. Includes special handling for shift == 0. */
499 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
501 switch (shiftop) {
502 case 0: /* LSL */
503 if (shift != 0) {
504 if (flags)
505 shifter_out_im(var, 32 - shift);
506 tcg_gen_shli_i32(var, var, shift);
508 break;
509 case 1: /* LSR */
510 if (shift == 0) {
511 if (flags) {
512 tcg_gen_shri_i32(var, var, 31);
513 gen_set_CF(var);
515 tcg_gen_movi_i32(var, 0);
516 } else {
517 if (flags)
518 shifter_out_im(var, shift - 1);
519 tcg_gen_shri_i32(var, var, shift);
521 break;
522 case 2: /* ASR */
523 if (shift == 0)
524 shift = 32;
525 if (flags)
526 shifter_out_im(var, shift - 1);
527 if (shift == 32)
528 shift = 31;
529 tcg_gen_sari_i32(var, var, shift);
530 break;
531 case 3: /* ROR/RRX */
532 if (shift != 0) {
533 if (flags)
534 shifter_out_im(var, shift - 1);
535 tcg_gen_rori_i32(var, var, shift); break;
536 } else {
537 TCGv tmp = load_cpu_field(CF);
538 if (flags)
539 shifter_out_im(var, 0);
540 tcg_gen_shri_i32(var, var, 1);
541 tcg_gen_shli_i32(tmp, tmp, 31);
542 tcg_gen_or_i32(var, var, tmp);
543 dead_tmp(tmp);
548 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
549 TCGv shift, int flags)
551 if (flags) {
552 switch (shiftop) {
553 case 0: gen_helper_shl_cc(var, var, shift); break;
554 case 1: gen_helper_shr_cc(var, var, shift); break;
555 case 2: gen_helper_sar_cc(var, var, shift); break;
556 case 3: gen_helper_ror_cc(var, var, shift); break;
558 } else {
559 switch (shiftop) {
560 case 0: gen_helper_shl(var, var, shift); break;
561 case 1: gen_helper_shr(var, var, shift); break;
562 case 2: gen_helper_sar(var, var, shift); break;
563 case 3: gen_helper_ror(var, var, shift); break;
566 dead_tmp(shift);
569 #define PAS_OP(pfx) \
570 switch (op2) { \
571 case 0: gen_pas_helper(glue(pfx,add16)); break; \
572 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
573 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
574 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
575 case 4: gen_pas_helper(glue(pfx,add8)); break; \
576 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
578 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
580 TCGv_ptr tmp;
582 switch (op1) {
583 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
584 case 1:
585 tmp = tcg_temp_new_ptr();
586 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
587 PAS_OP(s)
588 break;
589 case 5:
590 tmp = tcg_temp_new_ptr();
591 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
592 PAS_OP(u)
593 break;
594 #undef gen_pas_helper
595 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
596 case 2:
597 PAS_OP(q);
598 break;
599 case 3:
600 PAS_OP(sh);
601 break;
602 case 6:
603 PAS_OP(uq);
604 break;
605 case 7:
606 PAS_OP(uh);
607 break;
608 #undef gen_pas_helper
611 #undef PAS_OP
613 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
614 #define PAS_OP(pfx) \
615 switch (op2) { \
616 case 0: gen_pas_helper(glue(pfx,add8)); break; \
617 case 1: gen_pas_helper(glue(pfx,add16)); break; \
618 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
619 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
620 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
621 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
623 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
625 TCGv_ptr tmp;
627 switch (op1) {
628 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
629 case 0:
630 tmp = tcg_temp_new_ptr();
631 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
632 PAS_OP(s)
633 break;
634 case 4:
635 tmp = tcg_temp_new_ptr();
636 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
637 PAS_OP(u)
638 break;
639 #undef gen_pas_helper
640 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
641 case 1:
642 PAS_OP(q);
643 break;
644 case 2:
645 PAS_OP(sh);
646 break;
647 case 5:
648 PAS_OP(uq);
649 break;
650 case 6:
651 PAS_OP(uh);
652 break;
653 #undef gen_pas_helper
656 #undef PAS_OP
658 static void gen_test_cc(int cc, int label)
660 TCGv tmp;
661 TCGv tmp2;
662 int inv;
664 switch (cc) {
665 case 0: /* eq: Z */
666 tmp = load_cpu_field(ZF);
667 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
668 break;
669 case 1: /* ne: !Z */
670 tmp = load_cpu_field(ZF);
671 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
672 break;
673 case 2: /* cs: C */
674 tmp = load_cpu_field(CF);
675 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
676 break;
677 case 3: /* cc: !C */
678 tmp = load_cpu_field(CF);
679 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
680 break;
681 case 4: /* mi: N */
682 tmp = load_cpu_field(NF);
683 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
684 break;
685 case 5: /* pl: !N */
686 tmp = load_cpu_field(NF);
687 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
688 break;
689 case 6: /* vs: V */
690 tmp = load_cpu_field(VF);
691 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
692 break;
693 case 7: /* vc: !V */
694 tmp = load_cpu_field(VF);
695 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
696 break;
697 case 8: /* hi: C && !Z */
698 inv = gen_new_label();
699 tmp = load_cpu_field(CF);
700 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
701 dead_tmp(tmp);
702 tmp = load_cpu_field(ZF);
703 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
704 gen_set_label(inv);
705 break;
706 case 9: /* ls: !C || Z */
707 tmp = load_cpu_field(CF);
708 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
709 dead_tmp(tmp);
710 tmp = load_cpu_field(ZF);
711 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
712 break;
713 case 10: /* ge: N == V -> N ^ V == 0 */
714 tmp = load_cpu_field(VF);
715 tmp2 = load_cpu_field(NF);
716 tcg_gen_xor_i32(tmp, tmp, tmp2);
717 dead_tmp(tmp2);
718 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
719 break;
720 case 11: /* lt: N != V -> N ^ V != 0 */
721 tmp = load_cpu_field(VF);
722 tmp2 = load_cpu_field(NF);
723 tcg_gen_xor_i32(tmp, tmp, tmp2);
724 dead_tmp(tmp2);
725 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
726 break;
727 case 12: /* gt: !Z && N == V */
728 inv = gen_new_label();
729 tmp = load_cpu_field(ZF);
730 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
731 dead_tmp(tmp);
732 tmp = load_cpu_field(VF);
733 tmp2 = load_cpu_field(NF);
734 tcg_gen_xor_i32(tmp, tmp, tmp2);
735 dead_tmp(tmp2);
736 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
737 gen_set_label(inv);
738 break;
739 case 13: /* le: Z || N != V */
740 tmp = load_cpu_field(ZF);
741 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
742 dead_tmp(tmp);
743 tmp = load_cpu_field(VF);
744 tmp2 = load_cpu_field(NF);
745 tcg_gen_xor_i32(tmp, tmp, tmp2);
746 dead_tmp(tmp2);
747 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
748 break;
749 default:
750 fprintf(stderr, "Bad condition code 0x%x\n", cc);
751 abort();
753 dead_tmp(tmp);
756 static const uint8_t table_logic_cc[16] = {
757 1, /* and */
758 1, /* xor */
759 0, /* sub */
760 0, /* rsb */
761 0, /* add */
762 0, /* adc */
763 0, /* sbc */
764 0, /* rsc */
765 1, /* andl */
766 1, /* xorl */
767 0, /* cmp */
768 0, /* cmn */
769 1, /* orr */
770 1, /* mov */
771 1, /* bic */
772 1, /* mvn */
775 /* Set PC and Thumb state from an immediate address. */
776 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
778 TCGv tmp;
780 s->is_jmp = DISAS_UPDATE;
781 if (s->thumb != (addr & 1)) {
782 tmp = new_tmp();
783 tcg_gen_movi_i32(tmp, addr & 1);
784 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
785 dead_tmp(tmp);
787 tcg_gen_mov_i32(cpu_R[15], addr & ~1);
790 /* Set PC and Thumb state from var. var is marked as dead. */
791 static inline void gen_bx(DisasContext *s, TCGv var)
793 s->is_jmp = DISAS_UPDATE;
794 tcg_gen_andi_i32(cpu_R[15], var, ~1);
795 tcg_gen_andi_i32(var, var, 1);
796 store_cpu_field(var, thumb);
799 /* Variant of store_reg which uses branch&exchange logic when storing
800 to r15 in ARM architecture v7 and above. The source must be a temporary
801 and will be marked as dead. */
802 static inline void store_reg_bx(CPUState *env, DisasContext *s,
803 int reg, TCGv var)
805 if (reg == 15 && ENABLE_ARCH_7) {
806 gen_bx(s, var);
807 } else {
808 store_reg(s, reg, var);
812 static inline TCGv gen_ld8s(TCGv addr, int index)
814 TCGv tmp = new_tmp();
815 tcg_gen_qemu_ld8s(tmp, addr, index);
816 return tmp;
818 static inline TCGv gen_ld8u(TCGv addr, int index)
820 TCGv tmp = new_tmp();
821 tcg_gen_qemu_ld8u(tmp, addr, index);
822 return tmp;
824 static inline TCGv gen_ld16s(TCGv addr, int index)
826 TCGv tmp = new_tmp();
827 tcg_gen_qemu_ld16s(tmp, addr, index);
828 return tmp;
830 static inline TCGv gen_ld16u(TCGv addr, int index)
832 TCGv tmp = new_tmp();
833 tcg_gen_qemu_ld16u(tmp, addr, index);
834 return tmp;
836 static inline TCGv gen_ld32(TCGv addr, int index)
838 TCGv tmp = new_tmp();
839 tcg_gen_qemu_ld32u(tmp, addr, index);
840 return tmp;
842 static inline void gen_st8(TCGv val, TCGv addr, int index)
844 tcg_gen_qemu_st8(val, addr, index);
845 dead_tmp(val);
847 static inline void gen_st16(TCGv val, TCGv addr, int index)
849 tcg_gen_qemu_st16(val, addr, index);
850 dead_tmp(val);
852 static inline void gen_st32(TCGv val, TCGv addr, int index)
854 tcg_gen_qemu_st32(val, addr, index);
855 dead_tmp(val);
858 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
860 load_reg_var(s, cpu_T[0], reg);
863 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
865 load_reg_var(s, cpu_T[1], reg);
868 static inline void gen_set_pc_im(uint32_t val)
870 tcg_gen_movi_i32(cpu_R[15], val);
873 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
875 TCGv tmp;
876 if (reg == 15) {
877 tmp = new_tmp();
878 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
879 } else {
880 tmp = cpu_T[t];
882 tcg_gen_mov_i32(cpu_R[reg], tmp);
883 if (reg == 15) {
884 dead_tmp(tmp);
885 s->is_jmp = DISAS_JUMP;
889 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
891 gen_movl_reg_TN(s, reg, 0);
894 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
896 gen_movl_reg_TN(s, reg, 1);
899 /* Force a TB lookup after an instruction that changes the CPU state. */
900 static inline void gen_lookup_tb(DisasContext *s)
902 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
903 s->is_jmp = DISAS_UPDATE;
906 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
907 TCGv var)
909 int val, rm, shift, shiftop;
910 TCGv offset;
912 if (!(insn & (1 << 25))) {
913 /* immediate */
914 val = insn & 0xfff;
915 if (!(insn & (1 << 23)))
916 val = -val;
917 if (val != 0)
918 tcg_gen_addi_i32(var, var, val);
919 } else {
920 /* shift/register */
921 rm = (insn) & 0xf;
922 shift = (insn >> 7) & 0x1f;
923 shiftop = (insn >> 5) & 3;
924 offset = load_reg(s, rm);
925 gen_arm_shift_im(offset, shiftop, shift, 0);
926 if (!(insn & (1 << 23)))
927 tcg_gen_sub_i32(var, var, offset);
928 else
929 tcg_gen_add_i32(var, var, offset);
930 dead_tmp(offset);
934 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
935 int extra, TCGv var)
937 int val, rm;
938 TCGv offset;
940 if (insn & (1 << 22)) {
941 /* immediate */
942 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
943 if (!(insn & (1 << 23)))
944 val = -val;
945 val += extra;
946 if (val != 0)
947 tcg_gen_addi_i32(var, var, val);
948 } else {
949 /* register */
950 if (extra)
951 tcg_gen_addi_i32(var, var, extra);
952 rm = (insn) & 0xf;
953 offset = load_reg(s, rm);
954 if (!(insn & (1 << 23)))
955 tcg_gen_sub_i32(var, var, offset);
956 else
957 tcg_gen_add_i32(var, var, offset);
958 dead_tmp(offset);
962 #define VFP_OP2(name) \
963 static inline void gen_vfp_##name(int dp) \
965 if (dp) \
966 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
967 else \
968 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
971 VFP_OP2(add)
972 VFP_OP2(sub)
973 VFP_OP2(mul)
974 VFP_OP2(div)
976 #undef VFP_OP2
978 static inline void gen_vfp_abs(int dp)
980 if (dp)
981 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
982 else
983 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
986 static inline void gen_vfp_neg(int dp)
988 if (dp)
989 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
990 else
991 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
994 static inline void gen_vfp_sqrt(int dp)
996 if (dp)
997 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
998 else
999 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1002 static inline void gen_vfp_cmp(int dp)
1004 if (dp)
1005 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1006 else
1007 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1010 static inline void gen_vfp_cmpe(int dp)
1012 if (dp)
1013 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1014 else
1015 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1018 static inline void gen_vfp_F1_ld0(int dp)
1020 if (dp)
1021 tcg_gen_movi_i64(cpu_F1d, 0);
1022 else
1023 tcg_gen_movi_i32(cpu_F1s, 0);
1026 static inline void gen_vfp_uito(int dp)
1028 if (dp)
1029 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1030 else
1031 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1034 static inline void gen_vfp_sito(int dp)
1036 if (dp)
1037 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
1038 else
1039 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
1042 static inline void gen_vfp_toui(int dp)
1044 if (dp)
1045 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1046 else
1047 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1050 static inline void gen_vfp_touiz(int dp)
1052 if (dp)
1053 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1054 else
1055 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1058 static inline void gen_vfp_tosi(int dp)
1060 if (dp)
1061 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1062 else
1063 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1066 static inline void gen_vfp_tosiz(int dp)
1068 if (dp)
1069 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1070 else
1071 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1074 #define VFP_GEN_FIX(name) \
1075 static inline void gen_vfp_##name(int dp, int shift) \
1077 if (dp) \
1078 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1079 else \
1080 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
1082 VFP_GEN_FIX(tosh)
1083 VFP_GEN_FIX(tosl)
1084 VFP_GEN_FIX(touh)
1085 VFP_GEN_FIX(toul)
1086 VFP_GEN_FIX(shto)
1087 VFP_GEN_FIX(slto)
1088 VFP_GEN_FIX(uhto)
1089 VFP_GEN_FIX(ulto)
1090 #undef VFP_GEN_FIX
1092 static inline void gen_vfp_ld(DisasContext *s, int dp)
1094 if (dp)
1095 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
1096 else
1097 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
1100 static inline void gen_vfp_st(DisasContext *s, int dp)
1102 if (dp)
1103 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
1104 else
1105 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
1108 static inline long
1109 vfp_reg_offset (int dp, int reg)
1111 if (dp)
1112 return offsetof(CPUARMState, vfp.regs[reg]);
1113 else if (reg & 1) {
1114 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1115 + offsetof(CPU_DoubleU, l.upper);
1116 } else {
1117 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1118 + offsetof(CPU_DoubleU, l.lower);
1122 /* Return the offset of a 32-bit piece of a NEON register.
1123 zero is the least significant end of the register. */
1124 static inline long
1125 neon_reg_offset (int reg, int n)
1127 int sreg;
1128 sreg = reg * 2 + n;
1129 return vfp_reg_offset(0, sreg);
1132 /* FIXME: Remove these. */
1133 #define neon_T0 cpu_T[0]
1134 #define neon_T1 cpu_T[1]
1135 #define NEON_GET_REG(T, reg, n) \
1136 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1137 #define NEON_SET_REG(T, reg, n) \
1138 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1140 static TCGv neon_load_reg(int reg, int pass)
1142 TCGv tmp = new_tmp();
1143 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1144 return tmp;
1147 static void neon_store_reg(int reg, int pass, TCGv var)
1149 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1150 dead_tmp(var);
1153 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1155 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1158 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1160 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1163 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1164 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1165 #define tcg_gen_st_f32 tcg_gen_st_i32
1166 #define tcg_gen_st_f64 tcg_gen_st_i64
1168 static inline void gen_mov_F0_vreg(int dp, int reg)
1170 if (dp)
1171 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1172 else
1173 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1176 static inline void gen_mov_F1_vreg(int dp, int reg)
1178 if (dp)
1179 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1180 else
1181 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1184 static inline void gen_mov_vreg_F0(int dp, int reg)
1186 if (dp)
1187 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1188 else
1189 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1192 #define ARM_CP_RW_BIT (1 << 20)
1194 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1196 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1199 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1201 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1204 static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1206 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1209 static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1211 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1214 static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1216 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1219 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1221 iwmmxt_store_reg(cpu_M0, rn);
1224 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1226 iwmmxt_load_reg(cpu_M0, rn);
1229 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1231 iwmmxt_load_reg(cpu_V1, rn);
1232 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1235 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1237 iwmmxt_load_reg(cpu_V1, rn);
1238 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1241 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1243 iwmmxt_load_reg(cpu_V1, rn);
1244 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1247 #define IWMMXT_OP(name) \
1248 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1250 iwmmxt_load_reg(cpu_V1, rn); \
1251 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1254 #define IWMMXT_OP_ENV(name) \
1255 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1257 iwmmxt_load_reg(cpu_V1, rn); \
1258 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1261 #define IWMMXT_OP_ENV_SIZE(name) \
1262 IWMMXT_OP_ENV(name##b) \
1263 IWMMXT_OP_ENV(name##w) \
1264 IWMMXT_OP_ENV(name##l)
1266 #define IWMMXT_OP_ENV1(name) \
1267 static inline void gen_op_iwmmxt_##name##_M0(void) \
1269 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1272 IWMMXT_OP(maddsq)
1273 IWMMXT_OP(madduq)
1274 IWMMXT_OP(sadb)
1275 IWMMXT_OP(sadw)
1276 IWMMXT_OP(mulslw)
1277 IWMMXT_OP(mulshw)
1278 IWMMXT_OP(mululw)
1279 IWMMXT_OP(muluhw)
1280 IWMMXT_OP(macsw)
1281 IWMMXT_OP(macuw)
1283 IWMMXT_OP_ENV_SIZE(unpackl)
1284 IWMMXT_OP_ENV_SIZE(unpackh)
1286 IWMMXT_OP_ENV1(unpacklub)
1287 IWMMXT_OP_ENV1(unpackluw)
1288 IWMMXT_OP_ENV1(unpacklul)
1289 IWMMXT_OP_ENV1(unpackhub)
1290 IWMMXT_OP_ENV1(unpackhuw)
1291 IWMMXT_OP_ENV1(unpackhul)
1292 IWMMXT_OP_ENV1(unpacklsb)
1293 IWMMXT_OP_ENV1(unpacklsw)
1294 IWMMXT_OP_ENV1(unpacklsl)
1295 IWMMXT_OP_ENV1(unpackhsb)
1296 IWMMXT_OP_ENV1(unpackhsw)
1297 IWMMXT_OP_ENV1(unpackhsl)
1299 IWMMXT_OP_ENV_SIZE(cmpeq)
1300 IWMMXT_OP_ENV_SIZE(cmpgtu)
1301 IWMMXT_OP_ENV_SIZE(cmpgts)
1303 IWMMXT_OP_ENV_SIZE(mins)
1304 IWMMXT_OP_ENV_SIZE(minu)
1305 IWMMXT_OP_ENV_SIZE(maxs)
1306 IWMMXT_OP_ENV_SIZE(maxu)
1308 IWMMXT_OP_ENV_SIZE(subn)
1309 IWMMXT_OP_ENV_SIZE(addn)
1310 IWMMXT_OP_ENV_SIZE(subu)
1311 IWMMXT_OP_ENV_SIZE(addu)
1312 IWMMXT_OP_ENV_SIZE(subs)
1313 IWMMXT_OP_ENV_SIZE(adds)
1315 IWMMXT_OP_ENV(avgb0)
1316 IWMMXT_OP_ENV(avgb1)
1317 IWMMXT_OP_ENV(avgw0)
1318 IWMMXT_OP_ENV(avgw1)
1320 IWMMXT_OP(msadb)
1322 IWMMXT_OP_ENV(packuw)
1323 IWMMXT_OP_ENV(packul)
1324 IWMMXT_OP_ENV(packuq)
1325 IWMMXT_OP_ENV(packsw)
1326 IWMMXT_OP_ENV(packsl)
1327 IWMMXT_OP_ENV(packsq)
1329 static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1331 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1334 static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1336 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1339 static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1341 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1344 static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1346 iwmmxt_load_reg(cpu_V1, rn);
1347 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1350 static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1352 TCGv tmp = tcg_const_i32(shift);
1353 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1356 static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1358 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1359 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1360 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1363 static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1365 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1366 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1367 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1370 static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1372 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1373 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1374 if (mask != ~0u)
1375 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1378 static void gen_op_iwmmxt_set_mup(void)
1380 TCGv tmp;
1381 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1382 tcg_gen_ori_i32(tmp, tmp, 2);
1383 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1386 static void gen_op_iwmmxt_set_cup(void)
1388 TCGv tmp;
1389 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1390 tcg_gen_ori_i32(tmp, tmp, 1);
1391 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1394 static void gen_op_iwmmxt_setpsr_nz(void)
1396 TCGv tmp = new_tmp();
1397 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1398 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1401 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1403 iwmmxt_load_reg(cpu_V1, rn);
1404 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1405 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1409 static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1411 iwmmxt_load_reg(cpu_V0, rn);
1412 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1413 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1414 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1417 static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1419 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
1420 iwmmxt_store_reg(cpu_V0, rn);
1423 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1425 int rd;
1426 uint32_t offset;
1428 rd = (insn >> 16) & 0xf;
1429 gen_movl_T1_reg(s, rd);
1431 offset = (insn & 0xff) << ((insn >> 7) & 2);
1432 if (insn & (1 << 24)) {
1433 /* Pre indexed */
1434 if (insn & (1 << 23))
1435 gen_op_addl_T1_im(offset);
1436 else
1437 gen_op_addl_T1_im(-offset);
1439 if (insn & (1 << 21))
1440 gen_movl_reg_T1(s, rd);
1441 } else if (insn & (1 << 21)) {
1442 /* Post indexed */
1443 if (insn & (1 << 23))
1444 gen_op_movl_T0_im(offset);
1445 else
1446 gen_op_movl_T0_im(- offset);
1447 gen_op_addl_T0_T1();
1448 gen_movl_reg_T0(s, rd);
1449 } else if (!(insn & (1 << 23)))
1450 return 1;
1451 return 0;
1454 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1456 int rd = (insn >> 0) & 0xf;
1458 if (insn & (1 << 8))
1459 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1460 return 1;
1461 else
1462 gen_op_iwmmxt_movl_T0_wCx(rd);
1463 else
1464 gen_iwmmxt_movl_T0_T1_wRn(rd);
1466 gen_op_movl_T1_im(mask);
1467 gen_op_andl_T0_T1();
1468 return 0;
1471 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1472 (ie. an undefined instruction). */
1473 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1475 int rd, wrd;
1476 int rdhi, rdlo, rd0, rd1, i;
1477 TCGv tmp;
1479 if ((insn & 0x0e000e00) == 0x0c000000) {
1480 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1481 wrd = insn & 0xf;
1482 rdlo = (insn >> 12) & 0xf;
1483 rdhi = (insn >> 16) & 0xf;
1484 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1485 gen_iwmmxt_movl_T0_T1_wRn(wrd);
1486 gen_movl_reg_T0(s, rdlo);
1487 gen_movl_reg_T1(s, rdhi);
1488 } else { /* TMCRR */
1489 gen_movl_T0_reg(s, rdlo);
1490 gen_movl_T1_reg(s, rdhi);
1491 gen_iwmmxt_movl_wRn_T0_T1(wrd);
1492 gen_op_iwmmxt_set_mup();
1494 return 0;
1497 wrd = (insn >> 12) & 0xf;
1498 if (gen_iwmmxt_address(s, insn))
1499 return 1;
1500 if (insn & ARM_CP_RW_BIT) {
1501 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1502 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1503 tcg_gen_mov_i32(cpu_T[0], tmp);
1504 dead_tmp(tmp);
1505 gen_op_iwmmxt_movl_wCx_T0(wrd);
1506 } else {
1507 i = 1;
1508 if (insn & (1 << 8)) {
1509 if (insn & (1 << 22)) { /* WLDRD */
1510 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1511 i = 0;
1512 } else { /* WLDRW wRd */
1513 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1515 } else {
1516 if (insn & (1 << 22)) { /* WLDRH */
1517 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1518 } else { /* WLDRB */
1519 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1522 if (i) {
1523 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1524 dead_tmp(tmp);
1526 gen_op_iwmmxt_movq_wRn_M0(wrd);
1528 } else {
1529 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1530 gen_op_iwmmxt_movl_T0_wCx(wrd);
1531 tmp = new_tmp();
1532 tcg_gen_mov_i32(tmp, cpu_T[0]);
1533 gen_st32(tmp, cpu_T[1], IS_USER(s));
1534 } else {
1535 gen_op_iwmmxt_movq_M0_wRn(wrd);
1536 tmp = new_tmp();
1537 if (insn & (1 << 8)) {
1538 if (insn & (1 << 22)) { /* WSTRD */
1539 dead_tmp(tmp);
1540 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1541 } else { /* WSTRW wRd */
1542 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1543 gen_st32(tmp, cpu_T[1], IS_USER(s));
1545 } else {
1546 if (insn & (1 << 22)) { /* WSTRH */
1547 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1548 gen_st16(tmp, cpu_T[1], IS_USER(s));
1549 } else { /* WSTRB */
1550 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1551 gen_st8(tmp, cpu_T[1], IS_USER(s));
1556 return 0;
1559 if ((insn & 0x0f000000) != 0x0e000000)
1560 return 1;
1562 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1563 case 0x000: /* WOR */
1564 wrd = (insn >> 12) & 0xf;
1565 rd0 = (insn >> 0) & 0xf;
1566 rd1 = (insn >> 16) & 0xf;
1567 gen_op_iwmmxt_movq_M0_wRn(rd0);
1568 gen_op_iwmmxt_orq_M0_wRn(rd1);
1569 gen_op_iwmmxt_setpsr_nz();
1570 gen_op_iwmmxt_movq_wRn_M0(wrd);
1571 gen_op_iwmmxt_set_mup();
1572 gen_op_iwmmxt_set_cup();
1573 break;
1574 case 0x011: /* TMCR */
1575 if (insn & 0xf)
1576 return 1;
1577 rd = (insn >> 12) & 0xf;
1578 wrd = (insn >> 16) & 0xf;
1579 switch (wrd) {
1580 case ARM_IWMMXT_wCID:
1581 case ARM_IWMMXT_wCASF:
1582 break;
1583 case ARM_IWMMXT_wCon:
1584 gen_op_iwmmxt_set_cup();
1585 /* Fall through. */
1586 case ARM_IWMMXT_wCSSF:
1587 gen_op_iwmmxt_movl_T0_wCx(wrd);
1588 gen_movl_T1_reg(s, rd);
1589 gen_op_bicl_T0_T1();
1590 gen_op_iwmmxt_movl_wCx_T0(wrd);
1591 break;
1592 case ARM_IWMMXT_wCGR0:
1593 case ARM_IWMMXT_wCGR1:
1594 case ARM_IWMMXT_wCGR2:
1595 case ARM_IWMMXT_wCGR3:
1596 gen_op_iwmmxt_set_cup();
1597 gen_movl_reg_T0(s, rd);
1598 gen_op_iwmmxt_movl_wCx_T0(wrd);
1599 break;
1600 default:
1601 return 1;
1603 break;
1604 case 0x100: /* WXOR */
1605 wrd = (insn >> 12) & 0xf;
1606 rd0 = (insn >> 0) & 0xf;
1607 rd1 = (insn >> 16) & 0xf;
1608 gen_op_iwmmxt_movq_M0_wRn(rd0);
1609 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1610 gen_op_iwmmxt_setpsr_nz();
1611 gen_op_iwmmxt_movq_wRn_M0(wrd);
1612 gen_op_iwmmxt_set_mup();
1613 gen_op_iwmmxt_set_cup();
1614 break;
1615 case 0x111: /* TMRC */
1616 if (insn & 0xf)
1617 return 1;
1618 rd = (insn >> 12) & 0xf;
1619 wrd = (insn >> 16) & 0xf;
1620 gen_op_iwmmxt_movl_T0_wCx(wrd);
1621 gen_movl_reg_T0(s, rd);
1622 break;
1623 case 0x300: /* WANDN */
1624 wrd = (insn >> 12) & 0xf;
1625 rd0 = (insn >> 0) & 0xf;
1626 rd1 = (insn >> 16) & 0xf;
1627 gen_op_iwmmxt_movq_M0_wRn(rd0);
1628 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1629 gen_op_iwmmxt_andq_M0_wRn(rd1);
1630 gen_op_iwmmxt_setpsr_nz();
1631 gen_op_iwmmxt_movq_wRn_M0(wrd);
1632 gen_op_iwmmxt_set_mup();
1633 gen_op_iwmmxt_set_cup();
1634 break;
1635 case 0x200: /* WAND */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 0) & 0xf;
1638 rd1 = (insn >> 16) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
1640 gen_op_iwmmxt_andq_M0_wRn(rd1);
1641 gen_op_iwmmxt_setpsr_nz();
1642 gen_op_iwmmxt_movq_wRn_M0(wrd);
1643 gen_op_iwmmxt_set_mup();
1644 gen_op_iwmmxt_set_cup();
1645 break;
1646 case 0x810: case 0xa10: /* WMADD */
1647 wrd = (insn >> 12) & 0xf;
1648 rd0 = (insn >> 0) & 0xf;
1649 rd1 = (insn >> 16) & 0xf;
1650 gen_op_iwmmxt_movq_M0_wRn(rd0);
1651 if (insn & (1 << 21))
1652 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1653 else
1654 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 break;
1658 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1659 wrd = (insn >> 12) & 0xf;
1660 rd0 = (insn >> 16) & 0xf;
1661 rd1 = (insn >> 0) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0);
1663 switch ((insn >> 22) & 3) {
1664 case 0:
1665 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1666 break;
1667 case 1:
1668 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1669 break;
1670 case 2:
1671 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1672 break;
1673 case 3:
1674 return 1;
1676 gen_op_iwmmxt_movq_wRn_M0(wrd);
1677 gen_op_iwmmxt_set_mup();
1678 gen_op_iwmmxt_set_cup();
1679 break;
1680 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1681 wrd = (insn >> 12) & 0xf;
1682 rd0 = (insn >> 16) & 0xf;
1683 rd1 = (insn >> 0) & 0xf;
1684 gen_op_iwmmxt_movq_M0_wRn(rd0);
1685 switch ((insn >> 22) & 3) {
1686 case 0:
1687 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1688 break;
1689 case 1:
1690 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1691 break;
1692 case 2:
1693 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1694 break;
1695 case 3:
1696 return 1;
1698 gen_op_iwmmxt_movq_wRn_M0(wrd);
1699 gen_op_iwmmxt_set_mup();
1700 gen_op_iwmmxt_set_cup();
1701 break;
1702 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1703 wrd = (insn >> 12) & 0xf;
1704 rd0 = (insn >> 16) & 0xf;
1705 rd1 = (insn >> 0) & 0xf;
1706 gen_op_iwmmxt_movq_M0_wRn(rd0);
1707 if (insn & (1 << 22))
1708 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1709 else
1710 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1711 if (!(insn & (1 << 20)))
1712 gen_op_iwmmxt_addl_M0_wRn(wrd);
1713 gen_op_iwmmxt_movq_wRn_M0(wrd);
1714 gen_op_iwmmxt_set_mup();
1715 break;
1716 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1717 wrd = (insn >> 12) & 0xf;
1718 rd0 = (insn >> 16) & 0xf;
1719 rd1 = (insn >> 0) & 0xf;
1720 gen_op_iwmmxt_movq_M0_wRn(rd0);
1721 if (insn & (1 << 21)) {
1722 if (insn & (1 << 20))
1723 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1724 else
1725 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1726 } else {
1727 if (insn & (1 << 20))
1728 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1729 else
1730 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1732 gen_op_iwmmxt_movq_wRn_M0(wrd);
1733 gen_op_iwmmxt_set_mup();
1734 break;
1735 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1736 wrd = (insn >> 12) & 0xf;
1737 rd0 = (insn >> 16) & 0xf;
1738 rd1 = (insn >> 0) & 0xf;
1739 gen_op_iwmmxt_movq_M0_wRn(rd0);
1740 if (insn & (1 << 21))
1741 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1742 else
1743 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1744 if (!(insn & (1 << 20))) {
1745 iwmmxt_load_reg(cpu_V1, wrd);
1746 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1748 gen_op_iwmmxt_movq_wRn_M0(wrd);
1749 gen_op_iwmmxt_set_mup();
1750 break;
1751 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1752 wrd = (insn >> 12) & 0xf;
1753 rd0 = (insn >> 16) & 0xf;
1754 rd1 = (insn >> 0) & 0xf;
1755 gen_op_iwmmxt_movq_M0_wRn(rd0);
1756 switch ((insn >> 22) & 3) {
1757 case 0:
1758 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1759 break;
1760 case 1:
1761 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1762 break;
1763 case 2:
1764 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1765 break;
1766 case 3:
1767 return 1;
1769 gen_op_iwmmxt_movq_wRn_M0(wrd);
1770 gen_op_iwmmxt_set_mup();
1771 gen_op_iwmmxt_set_cup();
1772 break;
1773 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1774 wrd = (insn >> 12) & 0xf;
1775 rd0 = (insn >> 16) & 0xf;
1776 rd1 = (insn >> 0) & 0xf;
1777 gen_op_iwmmxt_movq_M0_wRn(rd0);
1778 if (insn & (1 << 22)) {
1779 if (insn & (1 << 20))
1780 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1781 else
1782 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1783 } else {
1784 if (insn & (1 << 20))
1785 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1786 else
1787 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1789 gen_op_iwmmxt_movq_wRn_M0(wrd);
1790 gen_op_iwmmxt_set_mup();
1791 gen_op_iwmmxt_set_cup();
1792 break;
1793 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1794 wrd = (insn >> 12) & 0xf;
1795 rd0 = (insn >> 16) & 0xf;
1796 rd1 = (insn >> 0) & 0xf;
1797 gen_op_iwmmxt_movq_M0_wRn(rd0);
1798 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1799 gen_op_movl_T1_im(7);
1800 gen_op_andl_T0_T1();
1801 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1802 gen_op_iwmmxt_movq_wRn_M0(wrd);
1803 gen_op_iwmmxt_set_mup();
1804 break;
1805 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1806 rd = (insn >> 12) & 0xf;
1807 wrd = (insn >> 16) & 0xf;
1808 gen_movl_T0_reg(s, rd);
1809 gen_op_iwmmxt_movq_M0_wRn(wrd);
1810 switch ((insn >> 6) & 3) {
1811 case 0:
1812 gen_op_movl_T1_im(0xff);
1813 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1814 break;
1815 case 1:
1816 gen_op_movl_T1_im(0xffff);
1817 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1818 break;
1819 case 2:
1820 gen_op_movl_T1_im(0xffffffff);
1821 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1822 break;
1823 case 3:
1824 return 1;
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 break;
1829 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1830 rd = (insn >> 12) & 0xf;
1831 wrd = (insn >> 16) & 0xf;
1832 if (rd == 15)
1833 return 1;
1834 gen_op_iwmmxt_movq_M0_wRn(wrd);
1835 switch ((insn >> 22) & 3) {
1836 case 0:
1837 if (insn & 8)
1838 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1839 else {
1840 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
1842 break;
1843 case 1:
1844 if (insn & 8)
1845 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1846 else {
1847 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
1849 break;
1850 case 2:
1851 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
1852 break;
1853 case 3:
1854 return 1;
1856 gen_movl_reg_T0(s, rd);
1857 break;
1858 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1859 if ((insn & 0x000ff008) != 0x0003f000)
1860 return 1;
1861 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1862 switch ((insn >> 22) & 3) {
1863 case 0:
1864 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1865 break;
1866 case 1:
1867 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1868 break;
1869 case 2:
1870 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1871 break;
1872 case 3:
1873 return 1;
1875 gen_op_shll_T1_im(28);
1876 gen_set_nzcv(cpu_T[1]);
1877 break;
1878 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1879 rd = (insn >> 12) & 0xf;
1880 wrd = (insn >> 16) & 0xf;
1881 gen_movl_T0_reg(s, rd);
1882 switch ((insn >> 6) & 3) {
1883 case 0:
1884 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
1885 break;
1886 case 1:
1887 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
1888 break;
1889 case 2:
1890 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
1891 break;
1892 case 3:
1893 return 1;
1895 gen_op_iwmmxt_movq_wRn_M0(wrd);
1896 gen_op_iwmmxt_set_mup();
1897 break;
1898 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1899 if ((insn & 0x000ff00f) != 0x0003f000)
1900 return 1;
1901 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1902 switch ((insn >> 22) & 3) {
1903 case 0:
1904 for (i = 0; i < 7; i ++) {
1905 gen_op_shll_T1_im(4);
1906 gen_op_andl_T0_T1();
1908 break;
1909 case 1:
1910 for (i = 0; i < 3; i ++) {
1911 gen_op_shll_T1_im(8);
1912 gen_op_andl_T0_T1();
1914 break;
1915 case 2:
1916 gen_op_shll_T1_im(16);
1917 gen_op_andl_T0_T1();
1918 break;
1919 case 3:
1920 return 1;
1922 gen_set_nzcv(cpu_T[0]);
1923 break;
1924 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1925 wrd = (insn >> 12) & 0xf;
1926 rd0 = (insn >> 16) & 0xf;
1927 gen_op_iwmmxt_movq_M0_wRn(rd0);
1928 switch ((insn >> 22) & 3) {
1929 case 0:
1930 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1931 break;
1932 case 1:
1933 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1934 break;
1935 case 2:
1936 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1937 break;
1938 case 3:
1939 return 1;
1941 gen_op_iwmmxt_movq_wRn_M0(wrd);
1942 gen_op_iwmmxt_set_mup();
1943 break;
1944 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1945 if ((insn & 0x000ff00f) != 0x0003f000)
1946 return 1;
1947 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1948 switch ((insn >> 22) & 3) {
1949 case 0:
1950 for (i = 0; i < 7; i ++) {
1951 gen_op_shll_T1_im(4);
1952 gen_op_orl_T0_T1();
1954 break;
1955 case 1:
1956 for (i = 0; i < 3; i ++) {
1957 gen_op_shll_T1_im(8);
1958 gen_op_orl_T0_T1();
1960 break;
1961 case 2:
1962 gen_op_shll_T1_im(16);
1963 gen_op_orl_T0_T1();
1964 break;
1965 case 3:
1966 return 1;
1968 gen_set_nzcv(cpu_T[0]);
1969 break;
1970 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1971 rd = (insn >> 12) & 0xf;
1972 rd0 = (insn >> 16) & 0xf;
1973 if ((insn & 0xf) != 0)
1974 return 1;
1975 gen_op_iwmmxt_movq_M0_wRn(rd0);
1976 switch ((insn >> 22) & 3) {
1977 case 0:
1978 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
1979 break;
1980 case 1:
1981 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
1982 break;
1983 case 2:
1984 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
1985 break;
1986 case 3:
1987 return 1;
1989 gen_movl_reg_T0(s, rd);
1990 break;
1991 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1992 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1993 wrd = (insn >> 12) & 0xf;
1994 rd0 = (insn >> 16) & 0xf;
1995 rd1 = (insn >> 0) & 0xf;
1996 gen_op_iwmmxt_movq_M0_wRn(rd0);
1997 switch ((insn >> 22) & 3) {
1998 case 0:
1999 if (insn & (1 << 21))
2000 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2001 else
2002 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2003 break;
2004 case 1:
2005 if (insn & (1 << 21))
2006 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2007 else
2008 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2009 break;
2010 case 2:
2011 if (insn & (1 << 21))
2012 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2013 else
2014 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2015 break;
2016 case 3:
2017 return 1;
2019 gen_op_iwmmxt_movq_wRn_M0(wrd);
2020 gen_op_iwmmxt_set_mup();
2021 gen_op_iwmmxt_set_cup();
2022 break;
2023 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2024 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2025 wrd = (insn >> 12) & 0xf;
2026 rd0 = (insn >> 16) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0);
2028 switch ((insn >> 22) & 3) {
2029 case 0:
2030 if (insn & (1 << 21))
2031 gen_op_iwmmxt_unpacklsb_M0();
2032 else
2033 gen_op_iwmmxt_unpacklub_M0();
2034 break;
2035 case 1:
2036 if (insn & (1 << 21))
2037 gen_op_iwmmxt_unpacklsw_M0();
2038 else
2039 gen_op_iwmmxt_unpackluw_M0();
2040 break;
2041 case 2:
2042 if (insn & (1 << 21))
2043 gen_op_iwmmxt_unpacklsl_M0();
2044 else
2045 gen_op_iwmmxt_unpacklul_M0();
2046 break;
2047 case 3:
2048 return 1;
2050 gen_op_iwmmxt_movq_wRn_M0(wrd);
2051 gen_op_iwmmxt_set_mup();
2052 gen_op_iwmmxt_set_cup();
2053 break;
2054 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2055 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2056 wrd = (insn >> 12) & 0xf;
2057 rd0 = (insn >> 16) & 0xf;
2058 gen_op_iwmmxt_movq_M0_wRn(rd0);
2059 switch ((insn >> 22) & 3) {
2060 case 0:
2061 if (insn & (1 << 21))
2062 gen_op_iwmmxt_unpackhsb_M0();
2063 else
2064 gen_op_iwmmxt_unpackhub_M0();
2065 break;
2066 case 1:
2067 if (insn & (1 << 21))
2068 gen_op_iwmmxt_unpackhsw_M0();
2069 else
2070 gen_op_iwmmxt_unpackhuw_M0();
2071 break;
2072 case 2:
2073 if (insn & (1 << 21))
2074 gen_op_iwmmxt_unpackhsl_M0();
2075 else
2076 gen_op_iwmmxt_unpackhul_M0();
2077 break;
2078 case 3:
2079 return 1;
2081 gen_op_iwmmxt_movq_wRn_M0(wrd);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2084 break;
2085 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2086 case 0x214: case 0x614: case 0xa14: case 0xe14:
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0);
2090 if (gen_iwmmxt_shift(insn, 0xff))
2091 return 1;
2092 switch ((insn >> 22) & 3) {
2093 case 0:
2094 return 1;
2095 case 1:
2096 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2097 break;
2098 case 2:
2099 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2100 break;
2101 case 3:
2102 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2103 break;
2105 gen_op_iwmmxt_movq_wRn_M0(wrd);
2106 gen_op_iwmmxt_set_mup();
2107 gen_op_iwmmxt_set_cup();
2108 break;
2109 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2110 case 0x014: case 0x414: case 0x814: case 0xc14:
2111 wrd = (insn >> 12) & 0xf;
2112 rd0 = (insn >> 16) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0);
2114 if (gen_iwmmxt_shift(insn, 0xff))
2115 return 1;
2116 switch ((insn >> 22) & 3) {
2117 case 0:
2118 return 1;
2119 case 1:
2120 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2121 break;
2122 case 2:
2123 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2124 break;
2125 case 3:
2126 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2127 break;
2129 gen_op_iwmmxt_movq_wRn_M0(wrd);
2130 gen_op_iwmmxt_set_mup();
2131 gen_op_iwmmxt_set_cup();
2132 break;
2133 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2134 case 0x114: case 0x514: case 0x914: case 0xd14:
2135 wrd = (insn >> 12) & 0xf;
2136 rd0 = (insn >> 16) & 0xf;
2137 gen_op_iwmmxt_movq_M0_wRn(rd0);
2138 if (gen_iwmmxt_shift(insn, 0xff))
2139 return 1;
2140 switch ((insn >> 22) & 3) {
2141 case 0:
2142 return 1;
2143 case 1:
2144 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2145 break;
2146 case 2:
2147 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2148 break;
2149 case 3:
2150 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2151 break;
2153 gen_op_iwmmxt_movq_wRn_M0(wrd);
2154 gen_op_iwmmxt_set_mup();
2155 gen_op_iwmmxt_set_cup();
2156 break;
2157 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2158 case 0x314: case 0x714: case 0xb14: case 0xf14:
2159 wrd = (insn >> 12) & 0xf;
2160 rd0 = (insn >> 16) & 0xf;
2161 gen_op_iwmmxt_movq_M0_wRn(rd0);
2162 switch ((insn >> 22) & 3) {
2163 case 0:
2164 return 1;
2165 case 1:
2166 if (gen_iwmmxt_shift(insn, 0xf))
2167 return 1;
2168 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2169 break;
2170 case 2:
2171 if (gen_iwmmxt_shift(insn, 0x1f))
2172 return 1;
2173 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2174 break;
2175 case 3:
2176 if (gen_iwmmxt_shift(insn, 0x3f))
2177 return 1;
2178 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2179 break;
2181 gen_op_iwmmxt_movq_wRn_M0(wrd);
2182 gen_op_iwmmxt_set_mup();
2183 gen_op_iwmmxt_set_cup();
2184 break;
2185 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2186 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2187 wrd = (insn >> 12) & 0xf;
2188 rd0 = (insn >> 16) & 0xf;
2189 rd1 = (insn >> 0) & 0xf;
2190 gen_op_iwmmxt_movq_M0_wRn(rd0);
2191 switch ((insn >> 22) & 3) {
2192 case 0:
2193 if (insn & (1 << 21))
2194 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2195 else
2196 gen_op_iwmmxt_minub_M0_wRn(rd1);
2197 break;
2198 case 1:
2199 if (insn & (1 << 21))
2200 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2201 else
2202 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2203 break;
2204 case 2:
2205 if (insn & (1 << 21))
2206 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2207 else
2208 gen_op_iwmmxt_minul_M0_wRn(rd1);
2209 break;
2210 case 3:
2211 return 1;
2213 gen_op_iwmmxt_movq_wRn_M0(wrd);
2214 gen_op_iwmmxt_set_mup();
2215 break;
2216 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2217 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2218 wrd = (insn >> 12) & 0xf;
2219 rd0 = (insn >> 16) & 0xf;
2220 rd1 = (insn >> 0) & 0xf;
2221 gen_op_iwmmxt_movq_M0_wRn(rd0);
2222 switch ((insn >> 22) & 3) {
2223 case 0:
2224 if (insn & (1 << 21))
2225 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2226 else
2227 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2228 break;
2229 case 1:
2230 if (insn & (1 << 21))
2231 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2232 else
2233 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2234 break;
2235 case 2:
2236 if (insn & (1 << 21))
2237 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2238 else
2239 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2240 break;
2241 case 3:
2242 return 1;
2244 gen_op_iwmmxt_movq_wRn_M0(wrd);
2245 gen_op_iwmmxt_set_mup();
2246 break;
2247 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2248 case 0x402: case 0x502: case 0x602: case 0x702:
2249 wrd = (insn >> 12) & 0xf;
2250 rd0 = (insn >> 16) & 0xf;
2251 rd1 = (insn >> 0) & 0xf;
2252 gen_op_iwmmxt_movq_M0_wRn(rd0);
2253 gen_op_movl_T0_im((insn >> 20) & 3);
2254 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2255 gen_op_iwmmxt_movq_wRn_M0(wrd);
2256 gen_op_iwmmxt_set_mup();
2257 break;
2258 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2259 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2260 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2261 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2262 wrd = (insn >> 12) & 0xf;
2263 rd0 = (insn >> 16) & 0xf;
2264 rd1 = (insn >> 0) & 0xf;
2265 gen_op_iwmmxt_movq_M0_wRn(rd0);
2266 switch ((insn >> 20) & 0xf) {
2267 case 0x0:
2268 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2269 break;
2270 case 0x1:
2271 gen_op_iwmmxt_subub_M0_wRn(rd1);
2272 break;
2273 case 0x3:
2274 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2275 break;
2276 case 0x4:
2277 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2278 break;
2279 case 0x5:
2280 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2281 break;
2282 case 0x7:
2283 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2284 break;
2285 case 0x8:
2286 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2287 break;
2288 case 0x9:
2289 gen_op_iwmmxt_subul_M0_wRn(rd1);
2290 break;
2291 case 0xb:
2292 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2293 break;
2294 default:
2295 return 1;
2297 gen_op_iwmmxt_movq_wRn_M0(wrd);
2298 gen_op_iwmmxt_set_mup();
2299 gen_op_iwmmxt_set_cup();
2300 break;
2301 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2302 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2303 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2304 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2305 wrd = (insn >> 12) & 0xf;
2306 rd0 = (insn >> 16) & 0xf;
2307 gen_op_iwmmxt_movq_M0_wRn(rd0);
2308 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
2309 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2310 gen_op_iwmmxt_movq_wRn_M0(wrd);
2311 gen_op_iwmmxt_set_mup();
2312 gen_op_iwmmxt_set_cup();
2313 break;
2314 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2315 case 0x418: case 0x518: case 0x618: case 0x718:
2316 case 0x818: case 0x918: case 0xa18: case 0xb18:
2317 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2318 wrd = (insn >> 12) & 0xf;
2319 rd0 = (insn >> 16) & 0xf;
2320 rd1 = (insn >> 0) & 0xf;
2321 gen_op_iwmmxt_movq_M0_wRn(rd0);
2322 switch ((insn >> 20) & 0xf) {
2323 case 0x0:
2324 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2325 break;
2326 case 0x1:
2327 gen_op_iwmmxt_addub_M0_wRn(rd1);
2328 break;
2329 case 0x3:
2330 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2331 break;
2332 case 0x4:
2333 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2334 break;
2335 case 0x5:
2336 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2337 break;
2338 case 0x7:
2339 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2340 break;
2341 case 0x8:
2342 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2343 break;
2344 case 0x9:
2345 gen_op_iwmmxt_addul_M0_wRn(rd1);
2346 break;
2347 case 0xb:
2348 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2349 break;
2350 default:
2351 return 1;
2353 gen_op_iwmmxt_movq_wRn_M0(wrd);
2354 gen_op_iwmmxt_set_mup();
2355 gen_op_iwmmxt_set_cup();
2356 break;
2357 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2358 case 0x408: case 0x508: case 0x608: case 0x708:
2359 case 0x808: case 0x908: case 0xa08: case 0xb08:
2360 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2361 wrd = (insn >> 12) & 0xf;
2362 rd0 = (insn >> 16) & 0xf;
2363 rd1 = (insn >> 0) & 0xf;
2364 gen_op_iwmmxt_movq_M0_wRn(rd0);
2365 if (!(insn & (1 << 20)))
2366 return 1;
2367 switch ((insn >> 22) & 3) {
2368 case 0:
2369 return 1;
2370 case 1:
2371 if (insn & (1 << 21))
2372 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2373 else
2374 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2375 break;
2376 case 2:
2377 if (insn & (1 << 21))
2378 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2379 else
2380 gen_op_iwmmxt_packul_M0_wRn(rd1);
2381 break;
2382 case 3:
2383 if (insn & (1 << 21))
2384 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2385 else
2386 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2387 break;
2389 gen_op_iwmmxt_movq_wRn_M0(wrd);
2390 gen_op_iwmmxt_set_mup();
2391 gen_op_iwmmxt_set_cup();
2392 break;
2393 case 0x201: case 0x203: case 0x205: case 0x207:
2394 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2395 case 0x211: case 0x213: case 0x215: case 0x217:
2396 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2397 wrd = (insn >> 5) & 0xf;
2398 rd0 = (insn >> 12) & 0xf;
2399 rd1 = (insn >> 0) & 0xf;
2400 if (rd0 == 0xf || rd1 == 0xf)
2401 return 1;
2402 gen_op_iwmmxt_movq_M0_wRn(wrd);
2403 switch ((insn >> 16) & 0xf) {
2404 case 0x0: /* TMIA */
2405 gen_movl_T0_reg(s, rd0);
2406 gen_movl_T1_reg(s, rd1);
2407 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2408 break;
2409 case 0x8: /* TMIAPH */
2410 gen_movl_T0_reg(s, rd0);
2411 gen_movl_T1_reg(s, rd1);
2412 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2413 break;
2414 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2415 gen_movl_T1_reg(s, rd0);
2416 if (insn & (1 << 16))
2417 gen_op_shrl_T1_im(16);
2418 gen_op_movl_T0_T1();
2419 gen_movl_T1_reg(s, rd1);
2420 if (insn & (1 << 17))
2421 gen_op_shrl_T1_im(16);
2422 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2423 break;
2424 default:
2425 return 1;
2427 gen_op_iwmmxt_movq_wRn_M0(wrd);
2428 gen_op_iwmmxt_set_mup();
2429 break;
2430 default:
2431 return 1;
2434 return 0;
2437 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2438 (ie. an undefined instruction). */
2439 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2441 int acc, rd0, rd1, rdhi, rdlo;
2443 if ((insn & 0x0ff00f10) == 0x0e200010) {
2444 /* Multiply with Internal Accumulate Format */
2445 rd0 = (insn >> 12) & 0xf;
2446 rd1 = insn & 0xf;
2447 acc = (insn >> 5) & 7;
2449 if (acc != 0)
2450 return 1;
2452 switch ((insn >> 16) & 0xf) {
2453 case 0x0: /* MIA */
2454 gen_movl_T0_reg(s, rd0);
2455 gen_movl_T1_reg(s, rd1);
2456 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2457 break;
2458 case 0x8: /* MIAPH */
2459 gen_movl_T0_reg(s, rd0);
2460 gen_movl_T1_reg(s, rd1);
2461 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2462 break;
2463 case 0xc: /* MIABB */
2464 case 0xd: /* MIABT */
2465 case 0xe: /* MIATB */
2466 case 0xf: /* MIATT */
2467 gen_movl_T1_reg(s, rd0);
2468 if (insn & (1 << 16))
2469 gen_op_shrl_T1_im(16);
2470 gen_op_movl_T0_T1();
2471 gen_movl_T1_reg(s, rd1);
2472 if (insn & (1 << 17))
2473 gen_op_shrl_T1_im(16);
2474 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2475 break;
2476 default:
2477 return 1;
2480 gen_op_iwmmxt_movq_wRn_M0(acc);
2481 return 0;
2484 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2485 /* Internal Accumulator Access Format */
2486 rdhi = (insn >> 16) & 0xf;
2487 rdlo = (insn >> 12) & 0xf;
2488 acc = insn & 7;
2490 if (acc != 0)
2491 return 1;
2493 if (insn & ARM_CP_RW_BIT) { /* MRA */
2494 gen_iwmmxt_movl_T0_T1_wRn(acc);
2495 gen_movl_reg_T0(s, rdlo);
2496 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2497 gen_op_andl_T0_T1();
2498 gen_movl_reg_T0(s, rdhi);
2499 } else { /* MAR */
2500 gen_movl_T0_reg(s, rdlo);
2501 gen_movl_T1_reg(s, rdhi);
2502 gen_iwmmxt_movl_wRn_T0_T1(acc);
2504 return 0;
2507 return 1;
2510 /* Disassemble system coprocessor instruction. Return nonzero if
2511 instruction is not defined. */
2512 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2514 TCGv tmp;
2515 uint32_t rd = (insn >> 12) & 0xf;
2516 uint32_t cp = (insn >> 8) & 0xf;
2517 if (IS_USER(s)) {
2518 return 1;
2521 if (insn & ARM_CP_RW_BIT) {
2522 if (!env->cp[cp].cp_read)
2523 return 1;
2524 gen_set_pc_im(s->pc);
2525 tmp = new_tmp();
2526 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2527 store_reg(s, rd, tmp);
2528 } else {
2529 if (!env->cp[cp].cp_write)
2530 return 1;
2531 gen_set_pc_im(s->pc);
2532 tmp = load_reg(s, rd);
2533 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
2534 dead_tmp(tmp);
2536 return 0;
2539 static int cp15_user_ok(uint32_t insn)
2541 int cpn = (insn >> 16) & 0xf;
2542 int cpm = insn & 0xf;
2543 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2545 if (cpn == 13 && cpm == 0) {
2546 /* TLS register. */
2547 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2548 return 1;
2550 if (cpn == 7) {
2551 /* ISB, DSB, DMB. */
2552 if ((cpm == 5 && op == 4)
2553 || (cpm == 10 && (op == 4 || op == 5)))
2554 return 1;
2556 return 0;
2559 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2560 instruction is not defined. */
2561 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2563 uint32_t rd;
2564 TCGv tmp;
2566 /* M profile cores use memory mapped registers instead of cp15. */
2567 if (arm_feature(env, ARM_FEATURE_M) ||
2568 !arm_feature(env, ARM_FEATURE_CP15)) {
2569 return 1;
2572 if ((insn & (1 << 25)) == 0) {
2573 if (insn & (1 << 20)) {
2574 /* mrrc */
2575 return 1;
2577 /* mcrr. Used for block cache operations, so implement as no-op. */
2578 return 0;
2580 if ((insn & (1 << 4)) == 0) {
2581 /* cdp */
2582 return 1;
2584 if (IS_USER(s) && !cp15_user_ok(insn)) {
2585 return 1;
2587 if ((insn & 0x0fff0fff) == 0x0e070f90
2588 || (insn & 0x0fff0fff) == 0x0e070f58) {
2589 /* Wait for interrupt. */
2590 gen_set_pc_im(s->pc);
2591 s->is_jmp = DISAS_WFI;
2592 return 0;
2594 rd = (insn >> 12) & 0xf;
2595 if (insn & ARM_CP_RW_BIT) {
2596 tmp = new_tmp();
2597 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
2598 /* If the destination register is r15 then sets condition codes. */
2599 if (rd != 15)
2600 store_reg(s, rd, tmp);
2601 else
2602 dead_tmp(tmp);
2603 } else {
2604 tmp = load_reg(s, rd);
2605 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2606 dead_tmp(tmp);
2607 /* Normally we would always end the TB here, but Linux
2608 * arch/arm/mach-pxa/sleep.S expects two instructions following
2609 * an MMU enable to execute from cache. Imitate this behaviour. */
2610 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2611 (insn & 0x0fff0fff) != 0x0e010f10)
2612 gen_lookup_tb(s);
2614 return 0;
2617 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2618 #define VFP_SREG(insn, bigbit, smallbit) \
2619 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2620 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2621 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2622 reg = (((insn) >> (bigbit)) & 0x0f) \
2623 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2624 } else { \
2625 if (insn & (1 << (smallbit))) \
2626 return 1; \
2627 reg = ((insn) >> (bigbit)) & 0x0f; \
2628 }} while (0)
2630 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2631 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2632 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2633 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2634 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2635 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2637 /* Move between integer and VFP cores. */
2638 static TCGv gen_vfp_mrs(void)
2640 TCGv tmp = new_tmp();
2641 tcg_gen_mov_i32(tmp, cpu_F0s);
2642 return tmp;
2645 static void gen_vfp_msr(TCGv tmp)
2647 tcg_gen_mov_i32(cpu_F0s, tmp);
2648 dead_tmp(tmp);
2651 static inline int
2652 vfp_enabled(CPUState * env)
2654 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2657 static void gen_neon_dup_u8(TCGv var, int shift)
2659 TCGv tmp = new_tmp();
2660 if (shift)
2661 tcg_gen_shri_i32(var, var, shift);
2662 tcg_gen_ext8u_i32(var, var);
2663 tcg_gen_shli_i32(tmp, var, 8);
2664 tcg_gen_or_i32(var, var, tmp);
2665 tcg_gen_shli_i32(tmp, var, 16);
2666 tcg_gen_or_i32(var, var, tmp);
2667 dead_tmp(tmp);
2670 static void gen_neon_dup_low16(TCGv var)
2672 TCGv tmp = new_tmp();
2673 tcg_gen_ext16u_i32(var, var);
2674 tcg_gen_shli_i32(tmp, var, 16);
2675 tcg_gen_or_i32(var, var, tmp);
2676 dead_tmp(tmp);
2679 static void gen_neon_dup_high16(TCGv var)
2681 TCGv tmp = new_tmp();
2682 tcg_gen_andi_i32(var, var, 0xffff0000);
2683 tcg_gen_shri_i32(tmp, var, 16);
2684 tcg_gen_or_i32(var, var, tmp);
2685 dead_tmp(tmp);
2688 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2689 (ie. an undefined instruction). */
2690 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2692 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2693 int dp, veclen;
2694 TCGv tmp;
2695 TCGv tmp2;
2697 if (!arm_feature(env, ARM_FEATURE_VFP))
2698 return 1;
2700 if (!vfp_enabled(env)) {
2701 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2702 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2703 return 1;
2704 rn = (insn >> 16) & 0xf;
2705 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2706 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2707 return 1;
2709 dp = ((insn & 0xf00) == 0xb00);
2710 switch ((insn >> 24) & 0xf) {
2711 case 0xe:
2712 if (insn & (1 << 4)) {
2713 /* single register transfer */
2714 rd = (insn >> 12) & 0xf;
2715 if (dp) {
2716 int size;
2717 int pass;
2719 VFP_DREG_N(rn, insn);
2720 if (insn & 0xf)
2721 return 1;
2722 if (insn & 0x00c00060
2723 && !arm_feature(env, ARM_FEATURE_NEON))
2724 return 1;
2726 pass = (insn >> 21) & 1;
2727 if (insn & (1 << 22)) {
2728 size = 0;
2729 offset = ((insn >> 5) & 3) * 8;
2730 } else if (insn & (1 << 5)) {
2731 size = 1;
2732 offset = (insn & (1 << 6)) ? 16 : 0;
2733 } else {
2734 size = 2;
2735 offset = 0;
2737 if (insn & ARM_CP_RW_BIT) {
2738 /* vfp->arm */
2739 tmp = neon_load_reg(rn, pass);
2740 switch (size) {
2741 case 0:
2742 if (offset)
2743 tcg_gen_shri_i32(tmp, tmp, offset);
2744 if (insn & (1 << 23))
2745 gen_uxtb(tmp);
2746 else
2747 gen_sxtb(tmp);
2748 break;
2749 case 1:
2750 if (insn & (1 << 23)) {
2751 if (offset) {
2752 tcg_gen_shri_i32(tmp, tmp, 16);
2753 } else {
2754 gen_uxth(tmp);
2756 } else {
2757 if (offset) {
2758 tcg_gen_sari_i32(tmp, tmp, 16);
2759 } else {
2760 gen_sxth(tmp);
2763 break;
2764 case 2:
2765 break;
2767 store_reg(s, rd, tmp);
2768 } else {
2769 /* arm->vfp */
2770 tmp = load_reg(s, rd);
2771 if (insn & (1 << 23)) {
2772 /* VDUP */
2773 if (size == 0) {
2774 gen_neon_dup_u8(tmp, 0);
2775 } else if (size == 1) {
2776 gen_neon_dup_low16(tmp);
2778 for (n = 0; n <= pass * 2; n++) {
2779 tmp2 = new_tmp();
2780 tcg_gen_mov_i32(tmp2, tmp);
2781 neon_store_reg(rn, n, tmp2);
2783 neon_store_reg(rn, n, tmp);
2784 } else {
2785 /* VMOV */
2786 switch (size) {
2787 case 0:
2788 tmp2 = neon_load_reg(rn, pass);
2789 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2790 dead_tmp(tmp2);
2791 break;
2792 case 1:
2793 tmp2 = neon_load_reg(rn, pass);
2794 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2795 dead_tmp(tmp2);
2796 break;
2797 case 2:
2798 break;
2800 neon_store_reg(rn, pass, tmp);
2803 } else { /* !dp */
2804 if ((insn & 0x6f) != 0x00)
2805 return 1;
2806 rn = VFP_SREG_N(insn);
2807 if (insn & ARM_CP_RW_BIT) {
2808 /* vfp->arm */
2809 if (insn & (1 << 21)) {
2810 /* system register */
2811 rn >>= 1;
2813 switch (rn) {
2814 case ARM_VFP_FPSID:
2815 /* VFP2 allows access to FSID from userspace.
2816 VFP3 restricts all id registers to privileged
2817 accesses. */
2818 if (IS_USER(s)
2819 && arm_feature(env, ARM_FEATURE_VFP3))
2820 return 1;
2821 tmp = load_cpu_field(vfp.xregs[rn]);
2822 break;
2823 case ARM_VFP_FPEXC:
2824 if (IS_USER(s))
2825 return 1;
2826 tmp = load_cpu_field(vfp.xregs[rn]);
2827 break;
2828 case ARM_VFP_FPINST:
2829 case ARM_VFP_FPINST2:
2830 /* Not present in VFP3. */
2831 if (IS_USER(s)
2832 || arm_feature(env, ARM_FEATURE_VFP3))
2833 return 1;
2834 tmp = load_cpu_field(vfp.xregs[rn]);
2835 break;
2836 case ARM_VFP_FPSCR:
2837 if (rd == 15) {
2838 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2839 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2840 } else {
2841 tmp = new_tmp();
2842 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2844 break;
2845 case ARM_VFP_MVFR0:
2846 case ARM_VFP_MVFR1:
2847 if (IS_USER(s)
2848 || !arm_feature(env, ARM_FEATURE_VFP3))
2849 return 1;
2850 tmp = load_cpu_field(vfp.xregs[rn]);
2851 break;
2852 default:
2853 return 1;
2855 } else {
2856 gen_mov_F0_vreg(0, rn);
2857 tmp = gen_vfp_mrs();
2859 if (rd == 15) {
2860 /* Set the 4 flag bits in the CPSR. */
2861 gen_set_nzcv(tmp);
2862 dead_tmp(tmp);
2863 } else {
2864 store_reg(s, rd, tmp);
2866 } else {
2867 /* arm->vfp */
2868 tmp = load_reg(s, rd);
2869 if (insn & (1 << 21)) {
2870 rn >>= 1;
2871 /* system register */
2872 switch (rn) {
2873 case ARM_VFP_FPSID:
2874 case ARM_VFP_MVFR0:
2875 case ARM_VFP_MVFR1:
2876 /* Writes are ignored. */
2877 break;
2878 case ARM_VFP_FPSCR:
2879 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2880 dead_tmp(tmp);
2881 gen_lookup_tb(s);
2882 break;
2883 case ARM_VFP_FPEXC:
2884 if (IS_USER(s))
2885 return 1;
2886 store_cpu_field(tmp, vfp.xregs[rn]);
2887 gen_lookup_tb(s);
2888 break;
2889 case ARM_VFP_FPINST:
2890 case ARM_VFP_FPINST2:
2891 store_cpu_field(tmp, vfp.xregs[rn]);
2892 break;
2893 default:
2894 return 1;
2896 } else {
2897 gen_vfp_msr(tmp);
2898 gen_mov_vreg_F0(0, rn);
2902 } else {
2903 /* data processing */
2904 /* The opcode is in bits 23, 21, 20 and 6. */
2905 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2906 if (dp) {
2907 if (op == 15) {
2908 /* rn is opcode */
2909 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2910 } else {
2911 /* rn is register number */
2912 VFP_DREG_N(rn, insn);
2915 if (op == 15 && (rn == 15 || rn > 17)) {
2916 /* Integer or single precision destination. */
2917 rd = VFP_SREG_D(insn);
2918 } else {
2919 VFP_DREG_D(rd, insn);
2922 if (op == 15 && (rn == 16 || rn == 17)) {
2923 /* Integer source. */
2924 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2925 } else {
2926 VFP_DREG_M(rm, insn);
2928 } else {
2929 rn = VFP_SREG_N(insn);
2930 if (op == 15 && rn == 15) {
2931 /* Double precision destination. */
2932 VFP_DREG_D(rd, insn);
2933 } else {
2934 rd = VFP_SREG_D(insn);
2936 rm = VFP_SREG_M(insn);
2939 veclen = env->vfp.vec_len;
2940 if (op == 15 && rn > 3)
2941 veclen = 0;
2943 /* Shut up compiler warnings. */
2944 delta_m = 0;
2945 delta_d = 0;
2946 bank_mask = 0;
2948 if (veclen > 0) {
2949 if (dp)
2950 bank_mask = 0xc;
2951 else
2952 bank_mask = 0x18;
2954 /* Figure out what type of vector operation this is. */
2955 if ((rd & bank_mask) == 0) {
2956 /* scalar */
2957 veclen = 0;
2958 } else {
2959 if (dp)
2960 delta_d = (env->vfp.vec_stride >> 1) + 1;
2961 else
2962 delta_d = env->vfp.vec_stride + 1;
2964 if ((rm & bank_mask) == 0) {
2965 /* mixed scalar/vector */
2966 delta_m = 0;
2967 } else {
2968 /* vector */
2969 delta_m = delta_d;
2974 /* Load the initial operands. */
2975 if (op == 15) {
2976 switch (rn) {
2977 case 16:
2978 case 17:
2979 /* Integer source */
2980 gen_mov_F0_vreg(0, rm);
2981 break;
2982 case 8:
2983 case 9:
2984 /* Compare */
2985 gen_mov_F0_vreg(dp, rd);
2986 gen_mov_F1_vreg(dp, rm);
2987 break;
2988 case 10:
2989 case 11:
2990 /* Compare with zero */
2991 gen_mov_F0_vreg(dp, rd);
2992 gen_vfp_F1_ld0(dp);
2993 break;
2994 case 20:
2995 case 21:
2996 case 22:
2997 case 23:
2998 case 28:
2999 case 29:
3000 case 30:
3001 case 31:
3002 /* Source and destination the same. */
3003 gen_mov_F0_vreg(dp, rd);
3004 break;
3005 default:
3006 /* One source operand. */
3007 gen_mov_F0_vreg(dp, rm);
3008 break;
3010 } else {
3011 /* Two source operands. */
3012 gen_mov_F0_vreg(dp, rn);
3013 gen_mov_F1_vreg(dp, rm);
3016 for (;;) {
3017 /* Perform the calculation. */
3018 switch (op) {
3019 case 0: /* mac: fd + (fn * fm) */
3020 gen_vfp_mul(dp);
3021 gen_mov_F1_vreg(dp, rd);
3022 gen_vfp_add(dp);
3023 break;
3024 case 1: /* nmac: fd - (fn * fm) */
3025 gen_vfp_mul(dp);
3026 gen_vfp_neg(dp);
3027 gen_mov_F1_vreg(dp, rd);
3028 gen_vfp_add(dp);
3029 break;
3030 case 2: /* msc: -fd + (fn * fm) */
3031 gen_vfp_mul(dp);
3032 gen_mov_F1_vreg(dp, rd);
3033 gen_vfp_sub(dp);
3034 break;
3035 case 3: /* nmsc: -fd - (fn * fm) */
3036 gen_vfp_mul(dp);
3037 gen_vfp_neg(dp);
3038 gen_mov_F1_vreg(dp, rd);
3039 gen_vfp_sub(dp);
3040 break;
3041 case 4: /* mul: fn * fm */
3042 gen_vfp_mul(dp);
3043 break;
3044 case 5: /* nmul: -(fn * fm) */
3045 gen_vfp_mul(dp);
3046 gen_vfp_neg(dp);
3047 break;
3048 case 6: /* add: fn + fm */
3049 gen_vfp_add(dp);
3050 break;
3051 case 7: /* sub: fn - fm */
3052 gen_vfp_sub(dp);
3053 break;
3054 case 8: /* div: fn / fm */
3055 gen_vfp_div(dp);
3056 break;
3057 case 14: /* fconst */
3058 if (!arm_feature(env, ARM_FEATURE_VFP3))
3059 return 1;
3061 n = (insn << 12) & 0x80000000;
3062 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3063 if (dp) {
3064 if (i & 0x40)
3065 i |= 0x3f80;
3066 else
3067 i |= 0x4000;
3068 n |= i << 16;
3069 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3070 } else {
3071 if (i & 0x40)
3072 i |= 0x780;
3073 else
3074 i |= 0x800;
3075 n |= i << 19;
3076 tcg_gen_movi_i32(cpu_F0s, n);
3078 break;
3079 case 15: /* extension space */
3080 switch (rn) {
3081 case 0: /* cpy */
3082 /* no-op */
3083 break;
3084 case 1: /* abs */
3085 gen_vfp_abs(dp);
3086 break;
3087 case 2: /* neg */
3088 gen_vfp_neg(dp);
3089 break;
3090 case 3: /* sqrt */
3091 gen_vfp_sqrt(dp);
3092 break;
3093 case 8: /* cmp */
3094 gen_vfp_cmp(dp);
3095 break;
3096 case 9: /* cmpe */
3097 gen_vfp_cmpe(dp);
3098 break;
3099 case 10: /* cmpz */
3100 gen_vfp_cmp(dp);
3101 break;
3102 case 11: /* cmpez */
3103 gen_vfp_F1_ld0(dp);
3104 gen_vfp_cmpe(dp);
3105 break;
3106 case 15: /* single<->double conversion */
3107 if (dp)
3108 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3109 else
3110 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3111 break;
3112 case 16: /* fuito */
3113 gen_vfp_uito(dp);
3114 break;
3115 case 17: /* fsito */
3116 gen_vfp_sito(dp);
3117 break;
3118 case 20: /* fshto */
3119 if (!arm_feature(env, ARM_FEATURE_VFP3))
3120 return 1;
3121 gen_vfp_shto(dp, 16 - rm);
3122 break;
3123 case 21: /* fslto */
3124 if (!arm_feature(env, ARM_FEATURE_VFP3))
3125 return 1;
3126 gen_vfp_slto(dp, 32 - rm);
3127 break;
3128 case 22: /* fuhto */
3129 if (!arm_feature(env, ARM_FEATURE_VFP3))
3130 return 1;
3131 gen_vfp_uhto(dp, 16 - rm);
3132 break;
3133 case 23: /* fulto */
3134 if (!arm_feature(env, ARM_FEATURE_VFP3))
3135 return 1;
3136 gen_vfp_ulto(dp, 32 - rm);
3137 break;
3138 case 24: /* ftoui */
3139 gen_vfp_toui(dp);
3140 break;
3141 case 25: /* ftouiz */
3142 gen_vfp_touiz(dp);
3143 break;
3144 case 26: /* ftosi */
3145 gen_vfp_tosi(dp);
3146 break;
3147 case 27: /* ftosiz */
3148 gen_vfp_tosiz(dp);
3149 break;
3150 case 28: /* ftosh */
3151 if (!arm_feature(env, ARM_FEATURE_VFP3))
3152 return 1;
3153 gen_vfp_tosh(dp, 16 - rm);
3154 break;
3155 case 29: /* ftosl */
3156 if (!arm_feature(env, ARM_FEATURE_VFP3))
3157 return 1;
3158 gen_vfp_tosl(dp, 32 - rm);
3159 break;
3160 case 30: /* ftouh */
3161 if (!arm_feature(env, ARM_FEATURE_VFP3))
3162 return 1;
3163 gen_vfp_touh(dp, 16 - rm);
3164 break;
3165 case 31: /* ftoul */
3166 if (!arm_feature(env, ARM_FEATURE_VFP3))
3167 return 1;
3168 gen_vfp_toul(dp, 32 - rm);
3169 break;
3170 default: /* undefined */
3171 printf ("rn:%d\n", rn);
3172 return 1;
3174 break;
3175 default: /* undefined */
3176 printf ("op:%d\n", op);
3177 return 1;
3180 /* Write back the result. */
3181 if (op == 15 && (rn >= 8 && rn <= 11))
3182 ; /* Comparison, do nothing. */
3183 else if (op == 15 && rn > 17)
3184 /* Integer result. */
3185 gen_mov_vreg_F0(0, rd);
3186 else if (op == 15 && rn == 15)
3187 /* conversion */
3188 gen_mov_vreg_F0(!dp, rd);
3189 else
3190 gen_mov_vreg_F0(dp, rd);
3192 /* break out of the loop if we have finished */
3193 if (veclen == 0)
3194 break;
3196 if (op == 15 && delta_m == 0) {
3197 /* single source one-many */
3198 while (veclen--) {
3199 rd = ((rd + delta_d) & (bank_mask - 1))
3200 | (rd & bank_mask);
3201 gen_mov_vreg_F0(dp, rd);
3203 break;
3205 /* Setup the next operands. */
3206 veclen--;
3207 rd = ((rd + delta_d) & (bank_mask - 1))
3208 | (rd & bank_mask);
3210 if (op == 15) {
3211 /* One source operand. */
3212 rm = ((rm + delta_m) & (bank_mask - 1))
3213 | (rm & bank_mask);
3214 gen_mov_F0_vreg(dp, rm);
3215 } else {
3216 /* Two source operands. */
3217 rn = ((rn + delta_d) & (bank_mask - 1))
3218 | (rn & bank_mask);
3219 gen_mov_F0_vreg(dp, rn);
3220 if (delta_m) {
3221 rm = ((rm + delta_m) & (bank_mask - 1))
3222 | (rm & bank_mask);
3223 gen_mov_F1_vreg(dp, rm);
3228 break;
3229 case 0xc:
3230 case 0xd:
3231 if (dp && (insn & 0x03e00000) == 0x00400000) {
3232 /* two-register transfer */
3233 rn = (insn >> 16) & 0xf;
3234 rd = (insn >> 12) & 0xf;
3235 if (dp) {
3236 VFP_DREG_M(rm, insn);
3237 } else {
3238 rm = VFP_SREG_M(insn);
3241 if (insn & ARM_CP_RW_BIT) {
3242 /* vfp->arm */
3243 if (dp) {
3244 gen_mov_F0_vreg(0, rm * 2);
3245 tmp = gen_vfp_mrs();
3246 store_reg(s, rd, tmp);
3247 gen_mov_F0_vreg(0, rm * 2 + 1);
3248 tmp = gen_vfp_mrs();
3249 store_reg(s, rn, tmp);
3250 } else {
3251 gen_mov_F0_vreg(0, rm);
3252 tmp = gen_vfp_mrs();
3253 store_reg(s, rn, tmp);
3254 gen_mov_F0_vreg(0, rm + 1);
3255 tmp = gen_vfp_mrs();
3256 store_reg(s, rd, tmp);
3258 } else {
3259 /* arm->vfp */
3260 if (dp) {
3261 tmp = load_reg(s, rd);
3262 gen_vfp_msr(tmp);
3263 gen_mov_vreg_F0(0, rm * 2);
3264 tmp = load_reg(s, rn);
3265 gen_vfp_msr(tmp);
3266 gen_mov_vreg_F0(0, rm * 2 + 1);
3267 } else {
3268 tmp = load_reg(s, rn);
3269 gen_vfp_msr(tmp);
3270 gen_mov_vreg_F0(0, rm);
3271 tmp = load_reg(s, rd);
3272 gen_vfp_msr(tmp);
3273 gen_mov_vreg_F0(0, rm + 1);
3276 } else {
3277 /* Load/store */
3278 rn = (insn >> 16) & 0xf;
3279 if (dp)
3280 VFP_DREG_D(rd, insn);
3281 else
3282 rd = VFP_SREG_D(insn);
3283 if (s->thumb && rn == 15) {
3284 gen_op_movl_T1_im(s->pc & ~2);
3285 } else {
3286 gen_movl_T1_reg(s, rn);
3288 if ((insn & 0x01200000) == 0x01000000) {
3289 /* Single load/store */
3290 offset = (insn & 0xff) << 2;
3291 if ((insn & (1 << 23)) == 0)
3292 offset = -offset;
3293 gen_op_addl_T1_im(offset);
3294 if (insn & (1 << 20)) {
3295 gen_vfp_ld(s, dp);
3296 gen_mov_vreg_F0(dp, rd);
3297 } else {
3298 gen_mov_F0_vreg(dp, rd);
3299 gen_vfp_st(s, dp);
3301 } else {
3302 /* load/store multiple */
3303 if (dp)
3304 n = (insn >> 1) & 0x7f;
3305 else
3306 n = insn & 0xff;
3308 if (insn & (1 << 24)) /* pre-decrement */
3309 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3311 if (dp)
3312 offset = 8;
3313 else
3314 offset = 4;
3315 for (i = 0; i < n; i++) {
3316 if (insn & ARM_CP_RW_BIT) {
3317 /* load */
3318 gen_vfp_ld(s, dp);
3319 gen_mov_vreg_F0(dp, rd + i);
3320 } else {
3321 /* store */
3322 gen_mov_F0_vreg(dp, rd + i);
3323 gen_vfp_st(s, dp);
3325 gen_op_addl_T1_im(offset);
3327 if (insn & (1 << 21)) {
3328 /* writeback */
3329 if (insn & (1 << 24))
3330 offset = -offset * n;
3331 else if (dp && (insn & 1))
3332 offset = 4;
3333 else
3334 offset = 0;
3336 if (offset != 0)
3337 gen_op_addl_T1_im(offset);
3338 gen_movl_reg_T1(s, rn);
3342 break;
3343 default:
3344 /* Should never happen. */
3345 return 1;
3347 return 0;
3350 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3352 TranslationBlock *tb;
3354 tb = s->tb;
3355 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3356 tcg_gen_goto_tb(n);
3357 gen_set_pc_im(dest);
3358 tcg_gen_exit_tb((long)tb + n);
3359 } else {
3360 gen_set_pc_im(dest);
3361 tcg_gen_exit_tb(0);
3365 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3367 if (unlikely(s->singlestep_enabled)) {
3368 /* An indirect jump so that we still trigger the debug exception. */
3369 if (s->thumb)
3370 dest |= 1;
3371 gen_bx_im(s, dest);
3372 } else {
3373 gen_goto_tb(s, 0, dest);
3374 s->is_jmp = DISAS_TB_JUMP;
3378 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3380 if (x)
3381 tcg_gen_sari_i32(t0, t0, 16);
3382 else
3383 gen_sxth(t0);
3384 if (y)
3385 tcg_gen_sari_i32(t1, t1, 16);
3386 else
3387 gen_sxth(t1);
3388 tcg_gen_mul_i32(t0, t0, t1);
3391 /* Return the mask of PSR bits set by a MSR instruction. */
3392 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3393 uint32_t mask;
3395 mask = 0;
3396 if (flags & (1 << 0))
3397 mask |= 0xff;
3398 if (flags & (1 << 1))
3399 mask |= 0xff00;
3400 if (flags & (1 << 2))
3401 mask |= 0xff0000;
3402 if (flags & (1 << 3))
3403 mask |= 0xff000000;
3405 /* Mask out undefined bits. */
3406 mask &= ~CPSR_RESERVED;
3407 if (!arm_feature(env, ARM_FEATURE_V6))
3408 mask &= ~(CPSR_E | CPSR_GE);
3409 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3410 mask &= ~CPSR_IT;
3411 /* Mask out execution state bits. */
3412 if (!spsr)
3413 mask &= ~CPSR_EXEC;
3414 /* Mask out privileged bits. */
3415 if (IS_USER(s))
3416 mask &= CPSR_USER;
3417 return mask;
3420 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3421 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3423 TCGv tmp;
3424 if (spsr) {
3425 /* ??? This is also undefined in system mode. */
3426 if (IS_USER(s))
3427 return 1;
3429 tmp = load_cpu_field(spsr);
3430 tcg_gen_andi_i32(tmp, tmp, ~mask);
3431 tcg_gen_andi_i32(t0, t0, mask);
3432 tcg_gen_or_i32(tmp, tmp, t0);
3433 store_cpu_field(tmp, spsr);
3434 } else {
3435 gen_set_cpsr(t0, mask);
3437 dead_tmp(t0);
3438 gen_lookup_tb(s);
3439 return 0;
3442 /* Returns nonzero if access to the PSR is not permitted. */
3443 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3445 TCGv tmp;
3446 tmp = new_tmp();
3447 tcg_gen_movi_i32(tmp, val);
3448 return gen_set_psr(s, mask, spsr, tmp);
3451 /* Generate an old-style exception return. Marks pc as dead. */
3452 static void gen_exception_return(DisasContext *s, TCGv pc)
3454 TCGv tmp;
3455 store_reg(s, 15, pc);
3456 tmp = load_cpu_field(spsr);
3457 gen_set_cpsr(tmp, 0xffffffff);
3458 dead_tmp(tmp);
3459 s->is_jmp = DISAS_UPDATE;
3462 /* Generate a v6 exception return. Marks both values as dead. */
3463 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3465 gen_set_cpsr(cpsr, 0xffffffff);
3466 dead_tmp(cpsr);
3467 store_reg(s, 15, pc);
3468 s->is_jmp = DISAS_UPDATE;
3471 static inline void
3472 gen_set_condexec (DisasContext *s)
3474 if (s->condexec_mask) {
3475 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3476 TCGv tmp = new_tmp();
3477 tcg_gen_movi_i32(tmp, val);
3478 store_cpu_field(tmp, condexec_bits);
3482 static void gen_nop_hint(DisasContext *s, int val)
3484 switch (val) {
3485 case 3: /* wfi */
3486 gen_set_pc_im(s->pc);
3487 s->is_jmp = DISAS_WFI;
3488 break;
3489 case 2: /* wfe */
3490 case 4: /* sev */
3491 /* TODO: Implement SEV and WFE. May help SMP performance. */
3492 default: /* nop */
3493 break;
3497 /* These macros help make the code more readable when migrating from the
3498 old dyngen helpers. They should probably be removed when
3499 T0/T1 are removed. */
3500 #define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3501 #define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
3503 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3505 static inline int gen_neon_add(int size)
3507 switch (size) {
3508 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3509 case 1: gen_helper_neon_add_u16(CPU_T001); break;
3510 case 2: gen_op_addl_T0_T1(); break;
3511 default: return 1;
3513 return 0;
3516 static inline void gen_neon_rsb(int size)
3518 switch (size) {
3519 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3520 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3521 case 2: gen_op_rsbl_T0_T1(); break;
3522 default: return;
3526 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3527 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3528 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3529 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3530 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3532 /* FIXME: This is wrong. They set the wrong overflow bit. */
3533 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3534 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3535 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3536 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3538 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3539 switch ((size << 1) | u) { \
3540 case 0: \
3541 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3542 break; \
3543 case 1: \
3544 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3545 break; \
3546 case 2: \
3547 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3548 break; \
3549 case 3: \
3550 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3551 break; \
3552 case 4: \
3553 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3554 break; \
3555 case 5: \
3556 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3557 break; \
3558 default: return 1; \
3559 }} while (0)
3561 #define GEN_NEON_INTEGER_OP(name) do { \
3562 switch ((size << 1) | u) { \
3563 case 0: \
3564 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3565 break; \
3566 case 1: \
3567 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3568 break; \
3569 case 2: \
3570 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3571 break; \
3572 case 3: \
3573 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3574 break; \
3575 case 4: \
3576 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3577 break; \
3578 case 5: \
3579 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3580 break; \
3581 default: return 1; \
3582 }} while (0)
3584 static inline void
3585 gen_neon_movl_scratch_T0(int scratch)
3587 uint32_t offset;
3589 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3590 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
3593 static inline void
3594 gen_neon_movl_scratch_T1(int scratch)
3596 uint32_t offset;
3598 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3599 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
3602 static inline void
3603 gen_neon_movl_T0_scratch(int scratch)
3605 uint32_t offset;
3607 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3608 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
3611 static inline void
3612 gen_neon_movl_T1_scratch(int scratch)
3614 uint32_t offset;
3616 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3617 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
3620 static inline void gen_neon_get_scalar(int size, int reg)
3622 if (size == 1) {
3623 NEON_GET_REG(T0, reg >> 1, reg & 1);
3624 } else {
3625 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3626 if (reg & 1)
3627 gen_neon_dup_low16(cpu_T[0]);
3628 else
3629 gen_neon_dup_high16(cpu_T[0]);
3633 static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3635 TCGv rd, rm, tmp;
3637 rd = new_tmp();
3638 rm = new_tmp();
3639 tmp = new_tmp();
3641 tcg_gen_andi_i32(rd, t0, 0xff);
3642 tcg_gen_shri_i32(tmp, t0, 8);
3643 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3644 tcg_gen_or_i32(rd, rd, tmp);
3645 tcg_gen_shli_i32(tmp, t1, 16);
3646 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3647 tcg_gen_or_i32(rd, rd, tmp);
3648 tcg_gen_shli_i32(tmp, t1, 8);
3649 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3650 tcg_gen_or_i32(rd, rd, tmp);
3652 tcg_gen_shri_i32(rm, t0, 8);
3653 tcg_gen_andi_i32(rm, rm, 0xff);
3654 tcg_gen_shri_i32(tmp, t0, 16);
3655 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3656 tcg_gen_or_i32(rm, rm, tmp);
3657 tcg_gen_shli_i32(tmp, t1, 8);
3658 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3659 tcg_gen_or_i32(rm, rm, tmp);
3660 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3661 tcg_gen_or_i32(t1, rm, tmp);
3662 tcg_gen_mov_i32(t0, rd);
3664 dead_tmp(tmp);
3665 dead_tmp(rm);
3666 dead_tmp(rd);
3669 static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3671 TCGv rd, rm, tmp;
3673 rd = new_tmp();
3674 rm = new_tmp();
3675 tmp = new_tmp();
3677 tcg_gen_andi_i32(rd, t0, 0xff);
3678 tcg_gen_shli_i32(tmp, t1, 8);
3679 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3680 tcg_gen_or_i32(rd, rd, tmp);
3681 tcg_gen_shli_i32(tmp, t0, 16);
3682 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3683 tcg_gen_or_i32(rd, rd, tmp);
3684 tcg_gen_shli_i32(tmp, t1, 24);
3685 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3686 tcg_gen_or_i32(rd, rd, tmp);
3688 tcg_gen_andi_i32(rm, t1, 0xff000000);
3689 tcg_gen_shri_i32(tmp, t0, 8);
3690 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3691 tcg_gen_or_i32(rm, rm, tmp);
3692 tcg_gen_shri_i32(tmp, t1, 8);
3693 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3694 tcg_gen_or_i32(rm, rm, tmp);
3695 tcg_gen_shri_i32(tmp, t0, 16);
3696 tcg_gen_andi_i32(tmp, tmp, 0xff);
3697 tcg_gen_or_i32(t1, rm, tmp);
3698 tcg_gen_mov_i32(t0, rd);
3700 dead_tmp(tmp);
3701 dead_tmp(rm);
3702 dead_tmp(rd);
3705 static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3707 TCGv tmp, tmp2;
3709 tmp = new_tmp();
3710 tmp2 = new_tmp();
3712 tcg_gen_andi_i32(tmp, t0, 0xffff);
3713 tcg_gen_shli_i32(tmp2, t1, 16);
3714 tcg_gen_or_i32(tmp, tmp, tmp2);
3715 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3716 tcg_gen_shri_i32(tmp2, t0, 16);
3717 tcg_gen_or_i32(t1, t1, tmp2);
3718 tcg_gen_mov_i32(t0, tmp);
3720 dead_tmp(tmp2);
3721 dead_tmp(tmp);
3724 static void gen_neon_unzip(int reg, int q, int tmp, int size)
3726 int n;
3728 for (n = 0; n < q + 1; n += 2) {
3729 NEON_GET_REG(T0, reg, n);
3730 NEON_GET_REG(T1, reg, n + 1);
3731 switch (size) {
3732 case 0: gen_neon_unzip_u8(cpu_T[0], cpu_T[1]); break;
3733 case 1: gen_neon_zip_u16(cpu_T[0], cpu_T[1]); break; /* zip and unzip are the same. */
3734 case 2: /* no-op */; break;
3735 default: abort();
3737 gen_neon_movl_T0_scratch(tmp + n);
3738 gen_neon_movl_T1_scratch(tmp + n + 1);
3742 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3744 TCGv rd, tmp;
3746 rd = new_tmp();
3747 tmp = new_tmp();
3749 tcg_gen_shli_i32(rd, t0, 8);
3750 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3751 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3752 tcg_gen_or_i32(rd, rd, tmp);
3754 tcg_gen_shri_i32(t1, t1, 8);
3755 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3756 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3757 tcg_gen_or_i32(t1, t1, tmp);
3758 tcg_gen_mov_i32(t0, rd);
3760 dead_tmp(tmp);
3761 dead_tmp(rd);
3764 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3766 TCGv rd, tmp;
3768 rd = new_tmp();
3769 tmp = new_tmp();
3771 tcg_gen_shli_i32(rd, t0, 16);
3772 tcg_gen_andi_i32(tmp, t1, 0xffff);
3773 tcg_gen_or_i32(rd, rd, tmp);
3774 tcg_gen_shri_i32(t1, t1, 16);
3775 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3776 tcg_gen_or_i32(t1, t1, tmp);
3777 tcg_gen_mov_i32(t0, rd);
3779 dead_tmp(tmp);
3780 dead_tmp(rd);
3784 static struct {
3785 int nregs;
3786 int interleave;
3787 int spacing;
3788 } neon_ls_element_type[11] = {
3789 {4, 4, 1},
3790 {4, 4, 2},
3791 {4, 1, 1},
3792 {4, 2, 1},
3793 {3, 3, 1},
3794 {3, 3, 2},
3795 {3, 1, 1},
3796 {1, 1, 1},
3797 {2, 2, 1},
3798 {2, 2, 2},
3799 {2, 1, 1}
3802 /* Translate a NEON load/store element instruction. Return nonzero if the
3803 instruction is invalid. */
3804 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3806 int rd, rn, rm;
3807 int op;
3808 int nregs;
3809 int interleave;
3810 int stride;
3811 int size;
3812 int reg;
3813 int pass;
3814 int load;
3815 int shift;
3816 int n;
3817 TCGv tmp;
3818 TCGv tmp2;
3820 if (!vfp_enabled(env))
3821 return 1;
3822 VFP_DREG_D(rd, insn);
3823 rn = (insn >> 16) & 0xf;
3824 rm = insn & 0xf;
3825 load = (insn & (1 << 21)) != 0;
3826 if ((insn & (1 << 23)) == 0) {
3827 /* Load store all elements. */
3828 op = (insn >> 8) & 0xf;
3829 size = (insn >> 6) & 3;
3830 if (op > 10 || size == 3)
3831 return 1;
3832 nregs = neon_ls_element_type[op].nregs;
3833 interleave = neon_ls_element_type[op].interleave;
3834 gen_movl_T1_reg(s, rn);
3835 stride = (1 << size) * interleave;
3836 for (reg = 0; reg < nregs; reg++) {
3837 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3838 gen_movl_T1_reg(s, rn);
3839 gen_op_addl_T1_im((1 << size) * reg);
3840 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3841 gen_movl_T1_reg(s, rn);
3842 gen_op_addl_T1_im(1 << size);
3844 for (pass = 0; pass < 2; pass++) {
3845 if (size == 2) {
3846 if (load) {
3847 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3848 neon_store_reg(rd, pass, tmp);
3849 } else {
3850 tmp = neon_load_reg(rd, pass);
3851 gen_st32(tmp, cpu_T[1], IS_USER(s));
3853 gen_op_addl_T1_im(stride);
3854 } else if (size == 1) {
3855 if (load) {
3856 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3857 gen_op_addl_T1_im(stride);
3858 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
3859 gen_op_addl_T1_im(stride);
3860 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3861 dead_tmp(tmp2);
3862 neon_store_reg(rd, pass, tmp);
3863 } else {
3864 tmp = neon_load_reg(rd, pass);
3865 tmp2 = new_tmp();
3866 tcg_gen_shri_i32(tmp2, tmp, 16);
3867 gen_st16(tmp, cpu_T[1], IS_USER(s));
3868 gen_op_addl_T1_im(stride);
3869 gen_st16(tmp2, cpu_T[1], IS_USER(s));
3870 gen_op_addl_T1_im(stride);
3872 } else /* size == 0 */ {
3873 if (load) {
3874 TCGV_UNUSED(tmp2);
3875 for (n = 0; n < 4; n++) {
3876 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3877 gen_op_addl_T1_im(stride);
3878 if (n == 0) {
3879 tmp2 = tmp;
3880 } else {
3881 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3882 dead_tmp(tmp);
3885 neon_store_reg(rd, pass, tmp2);
3886 } else {
3887 tmp2 = neon_load_reg(rd, pass);
3888 for (n = 0; n < 4; n++) {
3889 tmp = new_tmp();
3890 if (n == 0) {
3891 tcg_gen_mov_i32(tmp, tmp2);
3892 } else {
3893 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3895 gen_st8(tmp, cpu_T[1], IS_USER(s));
3896 gen_op_addl_T1_im(stride);
3898 dead_tmp(tmp2);
3902 rd += neon_ls_element_type[op].spacing;
3904 stride = nregs * 8;
3905 } else {
3906 size = (insn >> 10) & 3;
3907 if (size == 3) {
3908 /* Load single element to all lanes. */
3909 if (!load)
3910 return 1;
3911 size = (insn >> 6) & 3;
3912 nregs = ((insn >> 8) & 3) + 1;
3913 stride = (insn & (1 << 5)) ? 2 : 1;
3914 gen_movl_T1_reg(s, rn);
3915 for (reg = 0; reg < nregs; reg++) {
3916 switch (size) {
3917 case 0:
3918 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3919 gen_neon_dup_u8(tmp, 0);
3920 break;
3921 case 1:
3922 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3923 gen_neon_dup_low16(tmp);
3924 break;
3925 case 2:
3926 tmp = gen_ld32(cpu_T[0], IS_USER(s));
3927 break;
3928 case 3:
3929 return 1;
3930 default: /* Avoid compiler warnings. */
3931 abort();
3933 gen_op_addl_T1_im(1 << size);
3934 tmp2 = new_tmp();
3935 tcg_gen_mov_i32(tmp2, tmp);
3936 neon_store_reg(rd, 0, tmp2);
3937 neon_store_reg(rd, 1, tmp);
3938 rd += stride;
3940 stride = (1 << size) * nregs;
3941 } else {
3942 /* Single element. */
3943 pass = (insn >> 7) & 1;
3944 switch (size) {
3945 case 0:
3946 shift = ((insn >> 5) & 3) * 8;
3947 stride = 1;
3948 break;
3949 case 1:
3950 shift = ((insn >> 6) & 1) * 16;
3951 stride = (insn & (1 << 5)) ? 2 : 1;
3952 break;
3953 case 2:
3954 shift = 0;
3955 stride = (insn & (1 << 6)) ? 2 : 1;
3956 break;
3957 default:
3958 abort();
3960 nregs = ((insn >> 8) & 3) + 1;
3961 gen_movl_T1_reg(s, rn);
3962 for (reg = 0; reg < nregs; reg++) {
3963 if (load) {
3964 switch (size) {
3965 case 0:
3966 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3967 break;
3968 case 1:
3969 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3970 break;
3971 case 2:
3972 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3973 break;
3974 default: /* Avoid compiler warnings. */
3975 abort();
3977 if (size != 2) {
3978 tmp2 = neon_load_reg(rd, pass);
3979 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3980 dead_tmp(tmp2);
3982 neon_store_reg(rd, pass, tmp);
3983 } else { /* Store */
3984 tmp = neon_load_reg(rd, pass);
3985 if (shift)
3986 tcg_gen_shri_i32(tmp, tmp, shift);
3987 switch (size) {
3988 case 0:
3989 gen_st8(tmp, cpu_T[1], IS_USER(s));
3990 break;
3991 case 1:
3992 gen_st16(tmp, cpu_T[1], IS_USER(s));
3993 break;
3994 case 2:
3995 gen_st32(tmp, cpu_T[1], IS_USER(s));
3996 break;
3999 rd += stride;
4000 gen_op_addl_T1_im(1 << size);
4002 stride = nregs * (1 << size);
4005 if (rm != 15) {
4006 TCGv base;
4008 base = load_reg(s, rn);
4009 if (rm == 13) {
4010 tcg_gen_addi_i32(base, base, stride);
4011 } else {
4012 TCGv index;
4013 index = load_reg(s, rm);
4014 tcg_gen_add_i32(base, base, index);
4015 dead_tmp(index);
4017 store_reg(s, rn, base);
4019 return 0;
4022 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4023 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4025 tcg_gen_and_i32(t, t, c);
4026 tcg_gen_bic_i32(f, f, c);
4027 tcg_gen_or_i32(dest, t, f);
4030 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4032 switch (size) {
4033 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4034 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4035 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4036 default: abort();
4040 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4042 switch (size) {
4043 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4044 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4045 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4046 default: abort();
4050 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4052 switch (size) {
4053 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4054 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4055 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4056 default: abort();
4060 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4061 int q, int u)
4063 if (q) {
4064 if (u) {
4065 switch (size) {
4066 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4067 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4068 default: abort();
4070 } else {
4071 switch (size) {
4072 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4073 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4074 default: abort();
4077 } else {
4078 if (u) {
4079 switch (size) {
4080 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4081 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4082 default: abort();
4084 } else {
4085 switch (size) {
4086 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4087 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4088 default: abort();
4094 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4096 if (u) {
4097 switch (size) {
4098 case 0: gen_helper_neon_widen_u8(dest, src); break;
4099 case 1: gen_helper_neon_widen_u16(dest, src); break;
4100 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4101 default: abort();
4103 } else {
4104 switch (size) {
4105 case 0: gen_helper_neon_widen_s8(dest, src); break;
4106 case 1: gen_helper_neon_widen_s16(dest, src); break;
4107 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4108 default: abort();
4111 dead_tmp(src);
4114 static inline void gen_neon_addl(int size)
4116 switch (size) {
4117 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4118 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4119 case 2: tcg_gen_add_i64(CPU_V001); break;
4120 default: abort();
4124 static inline void gen_neon_subl(int size)
4126 switch (size) {
4127 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4128 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4129 case 2: tcg_gen_sub_i64(CPU_V001); break;
4130 default: abort();
4134 static inline void gen_neon_negl(TCGv_i64 var, int size)
4136 switch (size) {
4137 case 0: gen_helper_neon_negl_u16(var, var); break;
4138 case 1: gen_helper_neon_negl_u32(var, var); break;
4139 case 2: gen_helper_neon_negl_u64(var, var); break;
4140 default: abort();
4144 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4146 switch (size) {
4147 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4148 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4149 default: abort();
4153 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4155 TCGv_i64 tmp;
4157 switch ((size << 1) | u) {
4158 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4159 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4160 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4161 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4162 case 4:
4163 tmp = gen_muls_i64_i32(a, b);
4164 tcg_gen_mov_i64(dest, tmp);
4165 break;
4166 case 5:
4167 tmp = gen_mulu_i64_i32(a, b);
4168 tcg_gen_mov_i64(dest, tmp);
4169 break;
4170 default: abort();
4172 if (size < 2) {
4173 dead_tmp(b);
4174 dead_tmp(a);
4178 /* Translate a NEON data processing instruction. Return nonzero if the
4179 instruction is invalid.
4180 We process data in a mixture of 32-bit and 64-bit chunks.
4181 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4183 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4185 int op;
4186 int q;
4187 int rd, rn, rm;
4188 int size;
4189 int shift;
4190 int pass;
4191 int count;
4192 int pairwise;
4193 int u;
4194 int n;
4195 uint32_t imm;
4196 TCGv tmp;
4197 TCGv tmp2;
4198 TCGv tmp3;
4199 TCGv_i64 tmp64;
4201 if (!vfp_enabled(env))
4202 return 1;
4203 q = (insn & (1 << 6)) != 0;
4204 u = (insn >> 24) & 1;
4205 VFP_DREG_D(rd, insn);
4206 VFP_DREG_N(rn, insn);
4207 VFP_DREG_M(rm, insn);
4208 size = (insn >> 20) & 3;
4209 if ((insn & (1 << 23)) == 0) {
4210 /* Three register same length. */
4211 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4212 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4213 || op == 10 || op == 11 || op == 16)) {
4214 /* 64-bit element instructions. */
4215 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4216 neon_load_reg64(cpu_V0, rn + pass);
4217 neon_load_reg64(cpu_V1, rm + pass);
4218 switch (op) {
4219 case 1: /* VQADD */
4220 if (u) {
4221 gen_helper_neon_add_saturate_u64(CPU_V001);
4222 } else {
4223 gen_helper_neon_add_saturate_s64(CPU_V001);
4225 break;
4226 case 5: /* VQSUB */
4227 if (u) {
4228 gen_helper_neon_sub_saturate_u64(CPU_V001);
4229 } else {
4230 gen_helper_neon_sub_saturate_s64(CPU_V001);
4232 break;
4233 case 8: /* VSHL */
4234 if (u) {
4235 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4236 } else {
4237 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4239 break;
4240 case 9: /* VQSHL */
4241 if (u) {
4242 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4243 cpu_V0, cpu_V0);
4244 } else {
4245 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4246 cpu_V1, cpu_V0);
4248 break;
4249 case 10: /* VRSHL */
4250 if (u) {
4251 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4252 } else {
4253 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4255 break;
4256 case 11: /* VQRSHL */
4257 if (u) {
4258 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4259 cpu_V1, cpu_V0);
4260 } else {
4261 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4262 cpu_V1, cpu_V0);
4264 break;
4265 case 16:
4266 if (u) {
4267 tcg_gen_sub_i64(CPU_V001);
4268 } else {
4269 tcg_gen_add_i64(CPU_V001);
4271 break;
4272 default:
4273 abort();
4275 neon_store_reg64(cpu_V0, rd + pass);
4277 return 0;
4279 switch (op) {
4280 case 8: /* VSHL */
4281 case 9: /* VQSHL */
4282 case 10: /* VRSHL */
4283 case 11: /* VQRSHL */
4285 int rtmp;
4286 /* Shift instruction operands are reversed. */
4287 rtmp = rn;
4288 rn = rm;
4289 rm = rtmp;
4290 pairwise = 0;
4292 break;
4293 case 20: /* VPMAX */
4294 case 21: /* VPMIN */
4295 case 23: /* VPADD */
4296 pairwise = 1;
4297 break;
4298 case 26: /* VPADD (float) */
4299 pairwise = (u && size < 2);
4300 break;
4301 case 30: /* VPMIN/VPMAX (float) */
4302 pairwise = u;
4303 break;
4304 default:
4305 pairwise = 0;
4306 break;
4308 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4310 if (pairwise) {
4311 /* Pairwise. */
4312 if (q)
4313 n = (pass & 1) * 2;
4314 else
4315 n = 0;
4316 if (pass < q + 1) {
4317 NEON_GET_REG(T0, rn, n);
4318 NEON_GET_REG(T1, rn, n + 1);
4319 } else {
4320 NEON_GET_REG(T0, rm, n);
4321 NEON_GET_REG(T1, rm, n + 1);
4323 } else {
4324 /* Elementwise. */
4325 NEON_GET_REG(T0, rn, pass);
4326 NEON_GET_REG(T1, rm, pass);
4328 switch (op) {
4329 case 0: /* VHADD */
4330 GEN_NEON_INTEGER_OP(hadd);
4331 break;
4332 case 1: /* VQADD */
4333 GEN_NEON_INTEGER_OP_ENV(qadd);
4334 break;
4335 case 2: /* VRHADD */
4336 GEN_NEON_INTEGER_OP(rhadd);
4337 break;
4338 case 3: /* Logic ops. */
4339 switch ((u << 2) | size) {
4340 case 0: /* VAND */
4341 gen_op_andl_T0_T1();
4342 break;
4343 case 1: /* BIC */
4344 gen_op_bicl_T0_T1();
4345 break;
4346 case 2: /* VORR */
4347 gen_op_orl_T0_T1();
4348 break;
4349 case 3: /* VORN */
4350 gen_op_notl_T1();
4351 gen_op_orl_T0_T1();
4352 break;
4353 case 4: /* VEOR */
4354 gen_op_xorl_T0_T1();
4355 break;
4356 case 5: /* VBSL */
4357 tmp = neon_load_reg(rd, pass);
4358 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4359 dead_tmp(tmp);
4360 break;
4361 case 6: /* VBIT */
4362 tmp = neon_load_reg(rd, pass);
4363 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4364 dead_tmp(tmp);
4365 break;
4366 case 7: /* VBIF */
4367 tmp = neon_load_reg(rd, pass);
4368 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4369 dead_tmp(tmp);
4370 break;
4372 break;
4373 case 4: /* VHSUB */
4374 GEN_NEON_INTEGER_OP(hsub);
4375 break;
4376 case 5: /* VQSUB */
4377 GEN_NEON_INTEGER_OP_ENV(qsub);
4378 break;
4379 case 6: /* VCGT */
4380 GEN_NEON_INTEGER_OP(cgt);
4381 break;
4382 case 7: /* VCGE */
4383 GEN_NEON_INTEGER_OP(cge);
4384 break;
4385 case 8: /* VSHL */
4386 GEN_NEON_INTEGER_OP(shl);
4387 break;
4388 case 9: /* VQSHL */
4389 GEN_NEON_INTEGER_OP_ENV(qshl);
4390 break;
4391 case 10: /* VRSHL */
4392 GEN_NEON_INTEGER_OP(rshl);
4393 break;
4394 case 11: /* VQRSHL */
4395 GEN_NEON_INTEGER_OP_ENV(qrshl);
4396 break;
4397 case 12: /* VMAX */
4398 GEN_NEON_INTEGER_OP(max);
4399 break;
4400 case 13: /* VMIN */
4401 GEN_NEON_INTEGER_OP(min);
4402 break;
4403 case 14: /* VABD */
4404 GEN_NEON_INTEGER_OP(abd);
4405 break;
4406 case 15: /* VABA */
4407 GEN_NEON_INTEGER_OP(abd);
4408 NEON_GET_REG(T1, rd, pass);
4409 gen_neon_add(size);
4410 break;
4411 case 16:
4412 if (!u) { /* VADD */
4413 if (gen_neon_add(size))
4414 return 1;
4415 } else { /* VSUB */
4416 switch (size) {
4417 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4418 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
4419 case 2: gen_op_subl_T0_T1(); break;
4420 default: return 1;
4423 break;
4424 case 17:
4425 if (!u) { /* VTST */
4426 switch (size) {
4427 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4428 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4429 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
4430 default: return 1;
4432 } else { /* VCEQ */
4433 switch (size) {
4434 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4435 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4436 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
4437 default: return 1;
4440 break;
4441 case 18: /* Multiply. */
4442 switch (size) {
4443 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4444 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4445 case 2: gen_op_mul_T0_T1(); break;
4446 default: return 1;
4448 NEON_GET_REG(T1, rd, pass);
4449 if (u) { /* VMLS */
4450 gen_neon_rsb(size);
4451 } else { /* VMLA */
4452 gen_neon_add(size);
4454 break;
4455 case 19: /* VMUL */
4456 if (u) { /* polynomial */
4457 gen_helper_neon_mul_p8(CPU_T001);
4458 } else { /* Integer */
4459 switch (size) {
4460 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4461 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4462 case 2: gen_op_mul_T0_T1(); break;
4463 default: return 1;
4466 break;
4467 case 20: /* VPMAX */
4468 GEN_NEON_INTEGER_OP(pmax);
4469 break;
4470 case 21: /* VPMIN */
4471 GEN_NEON_INTEGER_OP(pmin);
4472 break;
4473 case 22: /* Hultiply high. */
4474 if (!u) { /* VQDMULH */
4475 switch (size) {
4476 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4477 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
4478 default: return 1;
4480 } else { /* VQRDHMUL */
4481 switch (size) {
4482 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4483 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
4484 default: return 1;
4487 break;
4488 case 23: /* VPADD */
4489 if (u)
4490 return 1;
4491 switch (size) {
4492 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4493 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
4494 case 2: gen_op_addl_T0_T1(); break;
4495 default: return 1;
4497 break;
4498 case 26: /* Floating point arithnetic. */
4499 switch ((u << 2) | size) {
4500 case 0: /* VADD */
4501 gen_helper_neon_add_f32(CPU_T001);
4502 break;
4503 case 2: /* VSUB */
4504 gen_helper_neon_sub_f32(CPU_T001);
4505 break;
4506 case 4: /* VPADD */
4507 gen_helper_neon_add_f32(CPU_T001);
4508 break;
4509 case 6: /* VABD */
4510 gen_helper_neon_abd_f32(CPU_T001);
4511 break;
4512 default:
4513 return 1;
4515 break;
4516 case 27: /* Float multiply. */
4517 gen_helper_neon_mul_f32(CPU_T001);
4518 if (!u) {
4519 NEON_GET_REG(T1, rd, pass);
4520 if (size == 0) {
4521 gen_helper_neon_add_f32(CPU_T001);
4522 } else {
4523 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
4526 break;
4527 case 28: /* Float compare. */
4528 if (!u) {
4529 gen_helper_neon_ceq_f32(CPU_T001);
4530 } else {
4531 if (size == 0)
4532 gen_helper_neon_cge_f32(CPU_T001);
4533 else
4534 gen_helper_neon_cgt_f32(CPU_T001);
4536 break;
4537 case 29: /* Float compare absolute. */
4538 if (!u)
4539 return 1;
4540 if (size == 0)
4541 gen_helper_neon_acge_f32(CPU_T001);
4542 else
4543 gen_helper_neon_acgt_f32(CPU_T001);
4544 break;
4545 case 30: /* Float min/max. */
4546 if (size == 0)
4547 gen_helper_neon_max_f32(CPU_T001);
4548 else
4549 gen_helper_neon_min_f32(CPU_T001);
4550 break;
4551 case 31:
4552 if (size == 0)
4553 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4554 else
4555 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4556 break;
4557 default:
4558 abort();
4560 /* Save the result. For elementwise operations we can put it
4561 straight into the destination register. For pairwise operations
4562 we have to be careful to avoid clobbering the source operands. */
4563 if (pairwise && rd == rm) {
4564 gen_neon_movl_scratch_T0(pass);
4565 } else {
4566 NEON_SET_REG(T0, rd, pass);
4569 } /* for pass */
4570 if (pairwise && rd == rm) {
4571 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4572 gen_neon_movl_T0_scratch(pass);
4573 NEON_SET_REG(T0, rd, pass);
4576 /* End of 3 register same size operations. */
4577 } else if (insn & (1 << 4)) {
4578 if ((insn & 0x00380080) != 0) {
4579 /* Two registers and shift. */
4580 op = (insn >> 8) & 0xf;
4581 if (insn & (1 << 7)) {
4582 /* 64-bit shift. */
4583 size = 3;
4584 } else {
4585 size = 2;
4586 while ((insn & (1 << (size + 19))) == 0)
4587 size--;
4589 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4590 /* To avoid excessive dumplication of ops we implement shift
4591 by immediate using the variable shift operations. */
4592 if (op < 8) {
4593 /* Shift by immediate:
4594 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4595 /* Right shifts are encoded as N - shift, where N is the
4596 element size in bits. */
4597 if (op <= 4)
4598 shift = shift - (1 << (size + 3));
4599 if (size == 3) {
4600 count = q + 1;
4601 } else {
4602 count = q ? 4: 2;
4604 switch (size) {
4605 case 0:
4606 imm = (uint8_t) shift;
4607 imm |= imm << 8;
4608 imm |= imm << 16;
4609 break;
4610 case 1:
4611 imm = (uint16_t) shift;
4612 imm |= imm << 16;
4613 break;
4614 case 2:
4615 case 3:
4616 imm = shift;
4617 break;
4618 default:
4619 abort();
4622 for (pass = 0; pass < count; pass++) {
4623 if (size == 3) {
4624 neon_load_reg64(cpu_V0, rm + pass);
4625 tcg_gen_movi_i64(cpu_V1, imm);
4626 switch (op) {
4627 case 0: /* VSHR */
4628 case 1: /* VSRA */
4629 if (u)
4630 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4631 else
4632 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4633 break;
4634 case 2: /* VRSHR */
4635 case 3: /* VRSRA */
4636 if (u)
4637 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4638 else
4639 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4640 break;
4641 case 4: /* VSRI */
4642 if (!u)
4643 return 1;
4644 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4645 break;
4646 case 5: /* VSHL, VSLI */
4647 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4648 break;
4649 case 6: /* VQSHL */
4650 if (u)
4651 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4652 else
4653 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4654 break;
4655 case 7: /* VQSHLU */
4656 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4657 break;
4659 if (op == 1 || op == 3) {
4660 /* Accumulate. */
4661 neon_load_reg64(cpu_V0, rd + pass);
4662 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4663 } else if (op == 4 || (op == 5 && u)) {
4664 /* Insert */
4665 cpu_abort(env, "VS[LR]I.64 not implemented");
4667 neon_store_reg64(cpu_V0, rd + pass);
4668 } else { /* size < 3 */
4669 /* Operands in T0 and T1. */
4670 gen_op_movl_T1_im(imm);
4671 NEON_GET_REG(T0, rm, pass);
4672 switch (op) {
4673 case 0: /* VSHR */
4674 case 1: /* VSRA */
4675 GEN_NEON_INTEGER_OP(shl);
4676 break;
4677 case 2: /* VRSHR */
4678 case 3: /* VRSRA */
4679 GEN_NEON_INTEGER_OP(rshl);
4680 break;
4681 case 4: /* VSRI */
4682 if (!u)
4683 return 1;
4684 GEN_NEON_INTEGER_OP(shl);
4685 break;
4686 case 5: /* VSHL, VSLI */
4687 switch (size) {
4688 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4689 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4690 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4691 default: return 1;
4693 break;
4694 case 6: /* VQSHL */
4695 GEN_NEON_INTEGER_OP_ENV(qshl);
4696 break;
4697 case 7: /* VQSHLU */
4698 switch (size) {
4699 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4700 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4701 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4702 default: return 1;
4704 break;
4707 if (op == 1 || op == 3) {
4708 /* Accumulate. */
4709 NEON_GET_REG(T1, rd, pass);
4710 gen_neon_add(size);
4711 } else if (op == 4 || (op == 5 && u)) {
4712 /* Insert */
4713 switch (size) {
4714 case 0:
4715 if (op == 4)
4716 imm = 0xff >> -shift;
4717 else
4718 imm = (uint8_t)(0xff << shift);
4719 imm |= imm << 8;
4720 imm |= imm << 16;
4721 break;
4722 case 1:
4723 if (op == 4)
4724 imm = 0xffff >> -shift;
4725 else
4726 imm = (uint16_t)(0xffff << shift);
4727 imm |= imm << 16;
4728 break;
4729 case 2:
4730 if (op == 4)
4731 imm = 0xffffffffu >> -shift;
4732 else
4733 imm = 0xffffffffu << shift;
4734 break;
4735 default:
4736 abort();
4738 tmp = neon_load_reg(rd, pass);
4739 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4740 tcg_gen_andi_i32(tmp, tmp, ~imm);
4741 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4743 NEON_SET_REG(T0, rd, pass);
4745 } /* for pass */
4746 } else if (op < 10) {
4747 /* Shift by immediate and narrow:
4748 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4749 shift = shift - (1 << (size + 3));
4750 size++;
4751 switch (size) {
4752 case 1:
4753 imm = (uint16_t)shift;
4754 imm |= imm << 16;
4755 tmp2 = tcg_const_i32(imm);
4756 TCGV_UNUSED_I64(tmp64);
4757 break;
4758 case 2:
4759 imm = (uint32_t)shift;
4760 tmp2 = tcg_const_i32(imm);
4761 TCGV_UNUSED_I64(tmp64);
4762 break;
4763 case 3:
4764 tmp64 = tcg_const_i64(shift);
4765 TCGV_UNUSED(tmp2);
4766 break;
4767 default:
4768 abort();
4771 for (pass = 0; pass < 2; pass++) {
4772 if (size == 3) {
4773 neon_load_reg64(cpu_V0, rm + pass);
4774 if (q) {
4775 if (u)
4776 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4777 else
4778 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4779 } else {
4780 if (u)
4781 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4782 else
4783 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4785 } else {
4786 tmp = neon_load_reg(rm + pass, 0);
4787 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4788 tmp3 = neon_load_reg(rm + pass, 1);
4789 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4790 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4791 dead_tmp(tmp);
4792 dead_tmp(tmp3);
4794 tmp = new_tmp();
4795 if (op == 8 && !u) {
4796 gen_neon_narrow(size - 1, tmp, cpu_V0);
4797 } else {
4798 if (op == 8)
4799 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4800 else
4801 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4803 if (pass == 0) {
4804 tmp2 = tmp;
4805 } else {
4806 neon_store_reg(rd, 0, tmp2);
4807 neon_store_reg(rd, 1, tmp);
4809 } /* for pass */
4810 } else if (op == 10) {
4811 /* VSHLL */
4812 if (q || size == 3)
4813 return 1;
4814 tmp = neon_load_reg(rm, 0);
4815 tmp2 = neon_load_reg(rm, 1);
4816 for (pass = 0; pass < 2; pass++) {
4817 if (pass == 1)
4818 tmp = tmp2;
4820 gen_neon_widen(cpu_V0, tmp, size, u);
4822 if (shift != 0) {
4823 /* The shift is less than the width of the source
4824 type, so we can just shift the whole register. */
4825 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4826 if (size < 2 || !u) {
4827 uint64_t imm64;
4828 if (size == 0) {
4829 imm = (0xffu >> (8 - shift));
4830 imm |= imm << 16;
4831 } else {
4832 imm = 0xffff >> (16 - shift);
4834 imm64 = imm | (((uint64_t)imm) << 32);
4835 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
4838 neon_store_reg64(cpu_V0, rd + pass);
4840 } else if (op == 15 || op == 16) {
4841 /* VCVT fixed-point. */
4842 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4843 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4844 if (op & 1) {
4845 if (u)
4846 gen_vfp_ulto(0, shift);
4847 else
4848 gen_vfp_slto(0, shift);
4849 } else {
4850 if (u)
4851 gen_vfp_toul(0, shift);
4852 else
4853 gen_vfp_tosl(0, shift);
4855 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4857 } else {
4858 return 1;
4860 } else { /* (insn & 0x00380080) == 0 */
4861 int invert;
4863 op = (insn >> 8) & 0xf;
4864 /* One register and immediate. */
4865 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4866 invert = (insn & (1 << 5)) != 0;
4867 switch (op) {
4868 case 0: case 1:
4869 /* no-op */
4870 break;
4871 case 2: case 3:
4872 imm <<= 8;
4873 break;
4874 case 4: case 5:
4875 imm <<= 16;
4876 break;
4877 case 6: case 7:
4878 imm <<= 24;
4879 break;
4880 case 8: case 9:
4881 imm |= imm << 16;
4882 break;
4883 case 10: case 11:
4884 imm = (imm << 8) | (imm << 24);
4885 break;
4886 case 12:
4887 imm = (imm < 8) | 0xff;
4888 break;
4889 case 13:
4890 imm = (imm << 16) | 0xffff;
4891 break;
4892 case 14:
4893 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4894 if (invert)
4895 imm = ~imm;
4896 break;
4897 case 15:
4898 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4899 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4900 break;
4902 if (invert)
4903 imm = ~imm;
4905 if (op != 14 || !invert)
4906 gen_op_movl_T1_im(imm);
4908 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4909 if (op & 1 && op < 12) {
4910 tmp = neon_load_reg(rd, pass);
4911 if (invert) {
4912 /* The immediate value has already been inverted, so
4913 BIC becomes AND. */
4914 tcg_gen_andi_i32(tmp, tmp, imm);
4915 } else {
4916 tcg_gen_ori_i32(tmp, tmp, imm);
4918 } else {
4919 /* VMOV, VMVN. */
4920 tmp = new_tmp();
4921 if (op == 14 && invert) {
4922 uint32_t val;
4923 val = 0;
4924 for (n = 0; n < 4; n++) {
4925 if (imm & (1 << (n + (pass & 1) * 4)))
4926 val |= 0xff << (n * 8);
4928 tcg_gen_movi_i32(tmp, val);
4929 } else {
4930 tcg_gen_movi_i32(tmp, imm);
4933 neon_store_reg(rd, pass, tmp);
4936 } else { /* (insn & 0x00800010 == 0x00800000) */
4937 if (size != 3) {
4938 op = (insn >> 8) & 0xf;
4939 if ((insn & (1 << 6)) == 0) {
4940 /* Three registers of different lengths. */
4941 int src1_wide;
4942 int src2_wide;
4943 int prewiden;
4944 /* prewiden, src1_wide, src2_wide */
4945 static const int neon_3reg_wide[16][3] = {
4946 {1, 0, 0}, /* VADDL */
4947 {1, 1, 0}, /* VADDW */
4948 {1, 0, 0}, /* VSUBL */
4949 {1, 1, 0}, /* VSUBW */
4950 {0, 1, 1}, /* VADDHN */
4951 {0, 0, 0}, /* VABAL */
4952 {0, 1, 1}, /* VSUBHN */
4953 {0, 0, 0}, /* VABDL */
4954 {0, 0, 0}, /* VMLAL */
4955 {0, 0, 0}, /* VQDMLAL */
4956 {0, 0, 0}, /* VMLSL */
4957 {0, 0, 0}, /* VQDMLSL */
4958 {0, 0, 0}, /* Integer VMULL */
4959 {0, 0, 0}, /* VQDMULL */
4960 {0, 0, 0} /* Polynomial VMULL */
4963 prewiden = neon_3reg_wide[op][0];
4964 src1_wide = neon_3reg_wide[op][1];
4965 src2_wide = neon_3reg_wide[op][2];
4967 if (size == 0 && (op == 9 || op == 11 || op == 13))
4968 return 1;
4970 /* Avoid overlapping operands. Wide source operands are
4971 always aligned so will never overlap with wide
4972 destinations in problematic ways. */
4973 if (rd == rm && !src2_wide) {
4974 NEON_GET_REG(T0, rm, 1);
4975 gen_neon_movl_scratch_T0(2);
4976 } else if (rd == rn && !src1_wide) {
4977 NEON_GET_REG(T0, rn, 1);
4978 gen_neon_movl_scratch_T0(2);
4980 TCGV_UNUSED(tmp3);
4981 for (pass = 0; pass < 2; pass++) {
4982 if (src1_wide) {
4983 neon_load_reg64(cpu_V0, rn + pass);
4984 TCGV_UNUSED(tmp);
4985 } else {
4986 if (pass == 1 && rd == rn) {
4987 gen_neon_movl_T0_scratch(2);
4988 tmp = new_tmp();
4989 tcg_gen_mov_i32(tmp, cpu_T[0]);
4990 } else {
4991 tmp = neon_load_reg(rn, pass);
4993 if (prewiden) {
4994 gen_neon_widen(cpu_V0, tmp, size, u);
4997 if (src2_wide) {
4998 neon_load_reg64(cpu_V1, rm + pass);
4999 TCGV_UNUSED(tmp2);
5000 } else {
5001 if (pass == 1 && rd == rm) {
5002 gen_neon_movl_T0_scratch(2);
5003 tmp2 = new_tmp();
5004 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5005 } else {
5006 tmp2 = neon_load_reg(rm, pass);
5008 if (prewiden) {
5009 gen_neon_widen(cpu_V1, tmp2, size, u);
5012 switch (op) {
5013 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5014 gen_neon_addl(size);
5015 break;
5016 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
5017 gen_neon_subl(size);
5018 break;
5019 case 5: case 7: /* VABAL, VABDL */
5020 switch ((size << 1) | u) {
5021 case 0:
5022 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5023 break;
5024 case 1:
5025 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5026 break;
5027 case 2:
5028 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5029 break;
5030 case 3:
5031 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5032 break;
5033 case 4:
5034 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5035 break;
5036 case 5:
5037 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5038 break;
5039 default: abort();
5041 dead_tmp(tmp2);
5042 dead_tmp(tmp);
5043 break;
5044 case 8: case 9: case 10: case 11: case 12: case 13:
5045 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5046 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5047 break;
5048 case 14: /* Polynomial VMULL */
5049 cpu_abort(env, "Polynomial VMULL not implemented");
5051 default: /* 15 is RESERVED. */
5052 return 1;
5054 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5055 /* Accumulate. */
5056 if (op == 10 || op == 11) {
5057 gen_neon_negl(cpu_V0, size);
5060 if (op != 13) {
5061 neon_load_reg64(cpu_V1, rd + pass);
5064 switch (op) {
5065 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
5066 gen_neon_addl(size);
5067 break;
5068 case 9: case 11: /* VQDMLAL, VQDMLSL */
5069 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5070 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5071 break;
5072 /* Fall through. */
5073 case 13: /* VQDMULL */
5074 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5075 break;
5076 default:
5077 abort();
5079 neon_store_reg64(cpu_V0, rd + pass);
5080 } else if (op == 4 || op == 6) {
5081 /* Narrowing operation. */
5082 tmp = new_tmp();
5083 if (u) {
5084 switch (size) {
5085 case 0:
5086 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5087 break;
5088 case 1:
5089 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5090 break;
5091 case 2:
5092 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5093 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5094 break;
5095 default: abort();
5097 } else {
5098 switch (size) {
5099 case 0:
5100 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5101 break;
5102 case 1:
5103 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5104 break;
5105 case 2:
5106 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5107 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5108 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5109 break;
5110 default: abort();
5113 if (pass == 0) {
5114 tmp3 = tmp;
5115 } else {
5116 neon_store_reg(rd, 0, tmp3);
5117 neon_store_reg(rd, 1, tmp);
5119 } else {
5120 /* Write back the result. */
5121 neon_store_reg64(cpu_V0, rd + pass);
5124 } else {
5125 /* Two registers and a scalar. */
5126 switch (op) {
5127 case 0: /* Integer VMLA scalar */
5128 case 1: /* Float VMLA scalar */
5129 case 4: /* Integer VMLS scalar */
5130 case 5: /* Floating point VMLS scalar */
5131 case 8: /* Integer VMUL scalar */
5132 case 9: /* Floating point VMUL scalar */
5133 case 12: /* VQDMULH scalar */
5134 case 13: /* VQRDMULH scalar */
5135 gen_neon_get_scalar(size, rm);
5136 gen_neon_movl_scratch_T0(0);
5137 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5138 if (pass != 0)
5139 gen_neon_movl_T0_scratch(0);
5140 NEON_GET_REG(T1, rn, pass);
5141 if (op == 12) {
5142 if (size == 1) {
5143 gen_helper_neon_qdmulh_s16(CPU_T0E01);
5144 } else {
5145 gen_helper_neon_qdmulh_s32(CPU_T0E01);
5147 } else if (op == 13) {
5148 if (size == 1) {
5149 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
5150 } else {
5151 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
5153 } else if (op & 1) {
5154 gen_helper_neon_mul_f32(CPU_T001);
5155 } else {
5156 switch (size) {
5157 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5158 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
5159 case 2: gen_op_mul_T0_T1(); break;
5160 default: return 1;
5163 if (op < 8) {
5164 /* Accumulate. */
5165 NEON_GET_REG(T1, rd, pass);
5166 switch (op) {
5167 case 0:
5168 gen_neon_add(size);
5169 break;
5170 case 1:
5171 gen_helper_neon_add_f32(CPU_T001);
5172 break;
5173 case 4:
5174 gen_neon_rsb(size);
5175 break;
5176 case 5:
5177 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
5178 break;
5179 default:
5180 abort();
5183 NEON_SET_REG(T0, rd, pass);
5185 break;
5186 case 2: /* VMLAL sclar */
5187 case 3: /* VQDMLAL scalar */
5188 case 6: /* VMLSL scalar */
5189 case 7: /* VQDMLSL scalar */
5190 case 10: /* VMULL scalar */
5191 case 11: /* VQDMULL scalar */
5192 if (size == 0 && (op == 3 || op == 7 || op == 11))
5193 return 1;
5195 gen_neon_get_scalar(size, rm);
5196 NEON_GET_REG(T1, rn, 1);
5198 for (pass = 0; pass < 2; pass++) {
5199 if (pass == 0) {
5200 tmp = neon_load_reg(rn, 0);
5201 } else {
5202 tmp = new_tmp();
5203 tcg_gen_mov_i32(tmp, cpu_T[1]);
5205 tmp2 = new_tmp();
5206 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5207 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5208 if (op == 6 || op == 7) {
5209 gen_neon_negl(cpu_V0, size);
5211 if (op != 11) {
5212 neon_load_reg64(cpu_V1, rd + pass);
5214 switch (op) {
5215 case 2: case 6:
5216 gen_neon_addl(size);
5217 break;
5218 case 3: case 7:
5219 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5220 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5221 break;
5222 case 10:
5223 /* no-op */
5224 break;
5225 case 11:
5226 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5227 break;
5228 default:
5229 abort();
5231 neon_store_reg64(cpu_V0, rd + pass);
5233 break;
5234 default: /* 14 and 15 are RESERVED */
5235 return 1;
5238 } else { /* size == 3 */
5239 if (!u) {
5240 /* Extract. */
5241 imm = (insn >> 8) & 0xf;
5242 count = q + 1;
5244 if (imm > 7 && !q)
5245 return 1;
5247 if (imm == 0) {
5248 neon_load_reg64(cpu_V0, rn);
5249 if (q) {
5250 neon_load_reg64(cpu_V1, rn + 1);
5252 } else if (imm == 8) {
5253 neon_load_reg64(cpu_V0, rn + 1);
5254 if (q) {
5255 neon_load_reg64(cpu_V1, rm);
5257 } else if (q) {
5258 tmp64 = tcg_temp_new_i64();
5259 if (imm < 8) {
5260 neon_load_reg64(cpu_V0, rn);
5261 neon_load_reg64(tmp64, rn + 1);
5262 } else {
5263 neon_load_reg64(cpu_V0, rn + 1);
5264 neon_load_reg64(tmp64, rm);
5266 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5267 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5268 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5269 if (imm < 8) {
5270 neon_load_reg64(cpu_V1, rm);
5271 } else {
5272 neon_load_reg64(cpu_V1, rm + 1);
5273 imm -= 8;
5275 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5276 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5277 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5278 } else {
5279 /* BUGFIX */
5280 neon_load_reg64(cpu_V0, rn);
5281 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5282 neon_load_reg64(cpu_V1, rm);
5283 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5284 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5286 neon_store_reg64(cpu_V0, rd);
5287 if (q) {
5288 neon_store_reg64(cpu_V1, rd + 1);
5290 } else if ((insn & (1 << 11)) == 0) {
5291 /* Two register misc. */
5292 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5293 size = (insn >> 18) & 3;
5294 switch (op) {
5295 case 0: /* VREV64 */
5296 if (size == 3)
5297 return 1;
5298 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5299 NEON_GET_REG(T0, rm, pass * 2);
5300 NEON_GET_REG(T1, rm, pass * 2 + 1);
5301 switch (size) {
5302 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5303 case 1: gen_swap_half(cpu_T[0]); break;
5304 case 2: /* no-op */ break;
5305 default: abort();
5307 NEON_SET_REG(T0, rd, pass * 2 + 1);
5308 if (size == 2) {
5309 NEON_SET_REG(T1, rd, pass * 2);
5310 } else {
5311 gen_op_movl_T0_T1();
5312 switch (size) {
5313 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5314 case 1: gen_swap_half(cpu_T[0]); break;
5315 default: abort();
5317 NEON_SET_REG(T0, rd, pass * 2);
5320 break;
5321 case 4: case 5: /* VPADDL */
5322 case 12: case 13: /* VPADAL */
5323 if (size == 3)
5324 return 1;
5325 for (pass = 0; pass < q + 1; pass++) {
5326 tmp = neon_load_reg(rm, pass * 2);
5327 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5328 tmp = neon_load_reg(rm, pass * 2 + 1);
5329 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5330 switch (size) {
5331 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5332 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5333 case 2: tcg_gen_add_i64(CPU_V001); break;
5334 default: abort();
5336 if (op >= 12) {
5337 /* Accumulate. */
5338 neon_load_reg64(cpu_V1, rd + pass);
5339 gen_neon_addl(size);
5341 neon_store_reg64(cpu_V0, rd + pass);
5343 break;
5344 case 33: /* VTRN */
5345 if (size == 2) {
5346 for (n = 0; n < (q ? 4 : 2); n += 2) {
5347 NEON_GET_REG(T0, rm, n);
5348 NEON_GET_REG(T1, rd, n + 1);
5349 NEON_SET_REG(T1, rm, n);
5350 NEON_SET_REG(T0, rd, n + 1);
5352 } else {
5353 goto elementwise;
5355 break;
5356 case 34: /* VUZP */
5357 /* Reg Before After
5358 Rd A3 A2 A1 A0 B2 B0 A2 A0
5359 Rm B3 B2 B1 B0 B3 B1 A3 A1
5361 if (size == 3)
5362 return 1;
5363 gen_neon_unzip(rd, q, 0, size);
5364 gen_neon_unzip(rm, q, 4, size);
5365 if (q) {
5366 static int unzip_order_q[8] =
5367 {0, 2, 4, 6, 1, 3, 5, 7};
5368 for (n = 0; n < 8; n++) {
5369 int reg = (n < 4) ? rd : rm;
5370 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5371 NEON_SET_REG(T0, reg, n % 4);
5373 } else {
5374 static int unzip_order[4] =
5375 {0, 4, 1, 5};
5376 for (n = 0; n < 4; n++) {
5377 int reg = (n < 2) ? rd : rm;
5378 gen_neon_movl_T0_scratch(unzip_order[n]);
5379 NEON_SET_REG(T0, reg, n % 2);
5382 break;
5383 case 35: /* VZIP */
5384 /* Reg Before After
5385 Rd A3 A2 A1 A0 B1 A1 B0 A0
5386 Rm B3 B2 B1 B0 B3 A3 B2 A2
5388 if (size == 3)
5389 return 1;
5390 count = (q ? 4 : 2);
5391 for (n = 0; n < count; n++) {
5392 NEON_GET_REG(T0, rd, n);
5393 NEON_GET_REG(T1, rd, n);
5394 switch (size) {
5395 case 0: gen_neon_zip_u8(cpu_T[0], cpu_T[1]); break;
5396 case 1: gen_neon_zip_u16(cpu_T[0], cpu_T[1]); break;
5397 case 2: /* no-op */; break;
5398 default: abort();
5400 gen_neon_movl_scratch_T0(n * 2);
5401 gen_neon_movl_scratch_T1(n * 2 + 1);
5403 for (n = 0; n < count * 2; n++) {
5404 int reg = (n < count) ? rd : rm;
5405 gen_neon_movl_T0_scratch(n);
5406 NEON_SET_REG(T0, reg, n % count);
5408 break;
5409 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5410 if (size == 3)
5411 return 1;
5412 TCGV_UNUSED(tmp2);
5413 for (pass = 0; pass < 2; pass++) {
5414 neon_load_reg64(cpu_V0, rm + pass);
5415 tmp = new_tmp();
5416 if (op == 36 && q == 0) {
5417 gen_neon_narrow(size, tmp, cpu_V0);
5418 } else if (q) {
5419 gen_neon_narrow_satu(size, tmp, cpu_V0);
5420 } else {
5421 gen_neon_narrow_sats(size, tmp, cpu_V0);
5423 if (pass == 0) {
5424 tmp2 = tmp;
5425 } else {
5426 neon_store_reg(rd, 0, tmp2);
5427 neon_store_reg(rd, 1, tmp);
5430 break;
5431 case 38: /* VSHLL */
5432 if (q || size == 3)
5433 return 1;
5434 tmp = neon_load_reg(rm, 0);
5435 tmp2 = neon_load_reg(rm, 1);
5436 for (pass = 0; pass < 2; pass++) {
5437 if (pass == 1)
5438 tmp = tmp2;
5439 gen_neon_widen(cpu_V0, tmp, size, 1);
5440 neon_store_reg64(cpu_V0, rd + pass);
5442 break;
5443 default:
5444 elementwise:
5445 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5446 if (op == 30 || op == 31 || op >= 58) {
5447 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5448 neon_reg_offset(rm, pass));
5449 } else {
5450 NEON_GET_REG(T0, rm, pass);
5452 switch (op) {
5453 case 1: /* VREV32 */
5454 switch (size) {
5455 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
5456 case 1: gen_swap_half(cpu_T[0]); break;
5457 default: return 1;
5459 break;
5460 case 2: /* VREV16 */
5461 if (size != 0)
5462 return 1;
5463 gen_rev16(cpu_T[0]);
5464 break;
5465 case 8: /* CLS */
5466 switch (size) {
5467 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5468 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5469 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
5470 default: return 1;
5472 break;
5473 case 9: /* CLZ */
5474 switch (size) {
5475 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5476 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
5477 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
5478 default: return 1;
5480 break;
5481 case 10: /* CNT */
5482 if (size != 0)
5483 return 1;
5484 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
5485 break;
5486 case 11: /* VNOT */
5487 if (size != 0)
5488 return 1;
5489 gen_op_notl_T0();
5490 break;
5491 case 14: /* VQABS */
5492 switch (size) {
5493 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5494 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5495 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5496 default: return 1;
5498 break;
5499 case 15: /* VQNEG */
5500 switch (size) {
5501 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5502 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5503 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5504 default: return 1;
5506 break;
5507 case 16: case 19: /* VCGT #0, VCLE #0 */
5508 gen_op_movl_T1_im(0);
5509 switch(size) {
5510 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5511 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5512 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
5513 default: return 1;
5515 if (op == 19)
5516 gen_op_notl_T0();
5517 break;
5518 case 17: case 20: /* VCGE #0, VCLT #0 */
5519 gen_op_movl_T1_im(0);
5520 switch(size) {
5521 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5522 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5523 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
5524 default: return 1;
5526 if (op == 20)
5527 gen_op_notl_T0();
5528 break;
5529 case 18: /* VCEQ #0 */
5530 gen_op_movl_T1_im(0);
5531 switch(size) {
5532 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5533 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5534 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
5535 default: return 1;
5537 break;
5538 case 22: /* VABS */
5539 switch(size) {
5540 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5541 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5542 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
5543 default: return 1;
5545 break;
5546 case 23: /* VNEG */
5547 gen_op_movl_T1_im(0);
5548 if (size == 3)
5549 return 1;
5550 gen_neon_rsb(size);
5551 break;
5552 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5553 gen_op_movl_T1_im(0);
5554 gen_helper_neon_cgt_f32(CPU_T001);
5555 if (op == 27)
5556 gen_op_notl_T0();
5557 break;
5558 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5559 gen_op_movl_T1_im(0);
5560 gen_helper_neon_cge_f32(CPU_T001);
5561 if (op == 28)
5562 gen_op_notl_T0();
5563 break;
5564 case 26: /* Float VCEQ #0 */
5565 gen_op_movl_T1_im(0);
5566 gen_helper_neon_ceq_f32(CPU_T001);
5567 break;
5568 case 30: /* Float VABS */
5569 gen_vfp_abs(0);
5570 break;
5571 case 31: /* Float VNEG */
5572 gen_vfp_neg(0);
5573 break;
5574 case 32: /* VSWP */
5575 NEON_GET_REG(T1, rd, pass);
5576 NEON_SET_REG(T1, rm, pass);
5577 break;
5578 case 33: /* VTRN */
5579 NEON_GET_REG(T1, rd, pass);
5580 switch (size) {
5581 case 0: gen_neon_trn_u8(cpu_T[0], cpu_T[1]); break;
5582 case 1: gen_neon_trn_u16(cpu_T[0], cpu_T[1]); break;
5583 case 2: abort();
5584 default: return 1;
5586 NEON_SET_REG(T1, rm, pass);
5587 break;
5588 case 56: /* Integer VRECPE */
5589 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
5590 break;
5591 case 57: /* Integer VRSQRTE */
5592 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
5593 break;
5594 case 58: /* Float VRECPE */
5595 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5596 break;
5597 case 59: /* Float VRSQRTE */
5598 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5599 break;
5600 case 60: /* VCVT.F32.S32 */
5601 gen_vfp_tosiz(0);
5602 break;
5603 case 61: /* VCVT.F32.U32 */
5604 gen_vfp_touiz(0);
5605 break;
5606 case 62: /* VCVT.S32.F32 */
5607 gen_vfp_sito(0);
5608 break;
5609 case 63: /* VCVT.U32.F32 */
5610 gen_vfp_uito(0);
5611 break;
5612 default:
5613 /* Reserved: 21, 29, 39-56 */
5614 return 1;
5616 if (op == 30 || op == 31 || op >= 58) {
5617 tcg_gen_st_f32(cpu_F0s, cpu_env,
5618 neon_reg_offset(rd, pass));
5619 } else {
5620 NEON_SET_REG(T0, rd, pass);
5623 break;
5625 } else if ((insn & (1 << 10)) == 0) {
5626 /* VTBL, VTBX. */
5627 n = ((insn >> 5) & 0x18) + 8;
5628 if (insn & (1 << 6)) {
5629 tmp = neon_load_reg(rd, 0);
5630 } else {
5631 tmp = new_tmp();
5632 tcg_gen_movi_i32(tmp, 0);
5634 tmp2 = neon_load_reg(rm, 0);
5635 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5636 tcg_const_i32(n));
5637 dead_tmp(tmp);
5638 if (insn & (1 << 6)) {
5639 tmp = neon_load_reg(rd, 1);
5640 } else {
5641 tmp = new_tmp();
5642 tcg_gen_movi_i32(tmp, 0);
5644 tmp3 = neon_load_reg(rm, 1);
5645 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5646 tcg_const_i32(n));
5647 neon_store_reg(rd, 0, tmp2);
5648 neon_store_reg(rd, 1, tmp3);
5649 dead_tmp(tmp);
5650 } else if ((insn & 0x380) == 0) {
5651 /* VDUP */
5652 if (insn & (1 << 19)) {
5653 NEON_SET_REG(T0, rm, 1);
5654 } else {
5655 NEON_SET_REG(T0, rm, 0);
5657 if (insn & (1 << 16)) {
5658 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
5659 } else if (insn & (1 << 17)) {
5660 if ((insn >> 18) & 1)
5661 gen_neon_dup_high16(cpu_T[0]);
5662 else
5663 gen_neon_dup_low16(cpu_T[0]);
5665 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5666 NEON_SET_REG(T0, rd, pass);
5668 } else {
5669 return 1;
5673 return 0;
5676 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5678 int crn = (insn >> 16) & 0xf;
5679 int crm = insn & 0xf;
5680 int op1 = (insn >> 21) & 7;
5681 int op2 = (insn >> 5) & 7;
5682 int rt = (insn >> 12) & 0xf;
5683 TCGv tmp;
5685 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5686 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5687 /* TEECR */
5688 if (IS_USER(s))
5689 return 1;
5690 tmp = load_cpu_field(teecr);
5691 store_reg(s, rt, tmp);
5692 return 0;
5694 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5695 /* TEEHBR */
5696 if (IS_USER(s) && (env->teecr & 1))
5697 return 1;
5698 tmp = load_cpu_field(teehbr);
5699 store_reg(s, rt, tmp);
5700 return 0;
5703 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5704 op1, crn, crm, op2);
5705 return 1;
5708 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5710 int crn = (insn >> 16) & 0xf;
5711 int crm = insn & 0xf;
5712 int op1 = (insn >> 21) & 7;
5713 int op2 = (insn >> 5) & 7;
5714 int rt = (insn >> 12) & 0xf;
5715 TCGv tmp;
5717 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5718 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5719 /* TEECR */
5720 if (IS_USER(s))
5721 return 1;
5722 tmp = load_reg(s, rt);
5723 gen_helper_set_teecr(cpu_env, tmp);
5724 dead_tmp(tmp);
5725 return 0;
5727 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5728 /* TEEHBR */
5729 if (IS_USER(s) && (env->teecr & 1))
5730 return 1;
5731 tmp = load_reg(s, rt);
5732 store_cpu_field(tmp, teehbr);
5733 return 0;
5736 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5737 op1, crn, crm, op2);
5738 return 1;
5741 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5743 int cpnum;
5745 cpnum = (insn >> 8) & 0xf;
5746 if (arm_feature(env, ARM_FEATURE_XSCALE)
5747 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5748 return 1;
5750 switch (cpnum) {
5751 case 0:
5752 case 1:
5753 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5754 return disas_iwmmxt_insn(env, s, insn);
5755 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5756 return disas_dsp_insn(env, s, insn);
5758 return 1;
5759 case 10:
5760 case 11:
5761 return disas_vfp_insn (env, s, insn);
5762 case 14:
5763 /* Coprocessors 7-15 are architecturally reserved by ARM.
5764 Unfortunately Intel decided to ignore this. */
5765 if (arm_feature(env, ARM_FEATURE_XSCALE))
5766 goto board;
5767 if (insn & (1 << 20))
5768 return disas_cp14_read(env, s, insn);
5769 else
5770 return disas_cp14_write(env, s, insn);
5771 case 15:
5772 return disas_cp15_insn (env, s, insn);
5773 default:
5774 board:
5775 /* Unknown coprocessor. See if the board has hooked it. */
5776 return disas_cp_insn (env, s, insn);
5781 /* Store a 64-bit value to a register pair. Clobbers val. */
5782 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5784 TCGv tmp;
5785 tmp = new_tmp();
5786 tcg_gen_trunc_i64_i32(tmp, val);
5787 store_reg(s, rlow, tmp);
5788 tmp = new_tmp();
5789 tcg_gen_shri_i64(val, val, 32);
5790 tcg_gen_trunc_i64_i32(tmp, val);
5791 store_reg(s, rhigh, tmp);
5794 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5795 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5797 TCGv_i64 tmp;
5798 TCGv tmp2;
5800 /* Load value and extend to 64 bits. */
5801 tmp = tcg_temp_new_i64();
5802 tmp2 = load_reg(s, rlow);
5803 tcg_gen_extu_i32_i64(tmp, tmp2);
5804 dead_tmp(tmp2);
5805 tcg_gen_add_i64(val, val, tmp);
5808 /* load and add a 64-bit value from a register pair. */
5809 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5811 TCGv_i64 tmp;
5812 TCGv tmpl;
5813 TCGv tmph;
5815 /* Load 64-bit value rd:rn. */
5816 tmpl = load_reg(s, rlow);
5817 tmph = load_reg(s, rhigh);
5818 tmp = tcg_temp_new_i64();
5819 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5820 dead_tmp(tmpl);
5821 dead_tmp(tmph);
5822 tcg_gen_add_i64(val, val, tmp);
5825 /* Set N and Z flags from a 64-bit value. */
5826 static void gen_logicq_cc(TCGv_i64 val)
5828 TCGv tmp = new_tmp();
5829 gen_helper_logicq_cc(tmp, val);
5830 gen_logic_CC(tmp);
5831 dead_tmp(tmp);
5834 static void disas_arm_insn(CPUState * env, DisasContext *s)
5836 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
5837 TCGv tmp;
5838 TCGv tmp2;
5839 TCGv tmp3;
5840 TCGv addr;
5841 TCGv_i64 tmp64;
5843 insn = ldl_code(s->pc);
5844 s->pc += 4;
5846 /* M variants do not implement ARM mode. */
5847 if (IS_M(env))
5848 goto illegal_op;
5849 cond = insn >> 28;
5850 if (cond == 0xf){
5851 /* Unconditional instructions. */
5852 if (((insn >> 25) & 7) == 1) {
5853 /* NEON Data processing. */
5854 if (!arm_feature(env, ARM_FEATURE_NEON))
5855 goto illegal_op;
5857 if (disas_neon_data_insn(env, s, insn))
5858 goto illegal_op;
5859 return;
5861 if ((insn & 0x0f100000) == 0x04000000) {
5862 /* NEON load/store. */
5863 if (!arm_feature(env, ARM_FEATURE_NEON))
5864 goto illegal_op;
5866 if (disas_neon_ls_insn(env, s, insn))
5867 goto illegal_op;
5868 return;
5870 if ((insn & 0x0d70f000) == 0x0550f000) {
5871 ARCH(5);
5872 return; /* PLD */
5873 } else if ((insn & 0x0ffffdff) == 0x01010000) {
5874 ARCH(6);
5875 /* setend */
5876 if (insn & (1 << 9)) {
5877 /* BE8 mode not implemented. */
5878 goto illegal_op;
5880 return;
5881 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5882 switch ((insn >> 4) & 0xf) {
5883 case 1: /* clrex */
5884 ARCH(6K);
5885 gen_helper_clrex(cpu_env);
5886 return;
5887 case 4: /* dsb */
5888 case 5: /* dmb */
5889 case 6: /* isb */
5890 ARCH(7);
5891 /* We don't emulate caches so these are a no-op. */
5892 return;
5893 default:
5894 goto illegal_op;
5896 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5897 /* srs */
5898 int32_t offset;
5899 if (IS_USER(s))
5900 goto illegal_op;
5901 ARCH(6);
5902 op1 = (insn & 0x1f);
5903 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5904 addr = load_reg(s, 13);
5905 } else {
5906 addr = new_tmp();
5907 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
5909 i = (insn >> 23) & 3;
5910 switch (i) {
5911 case 0: offset = -4; break; /* DA */
5912 case 1: offset = 0; break; /* IA */
5913 case 2: offset = -8; break; /* DB */
5914 case 3: offset = 4; break; /* IB */
5915 default: abort();
5917 if (offset)
5918 tcg_gen_addi_i32(addr, addr, offset);
5919 tmp = load_reg(s, 14);
5920 gen_st32(tmp, addr, 0);
5921 tmp = load_cpu_field(spsr);
5922 tcg_gen_addi_i32(addr, addr, 4);
5923 gen_st32(tmp, addr, 0);
5924 if (insn & (1 << 21)) {
5925 /* Base writeback. */
5926 switch (i) {
5927 case 0: offset = -8; break;
5928 case 1: offset = 4; break;
5929 case 2: offset = -4; break;
5930 case 3: offset = 0; break;
5931 default: abort();
5933 if (offset)
5934 tcg_gen_addi_i32(addr, addr, offset);
5935 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5936 store_reg(s, 13, addr);
5937 } else {
5938 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), addr);
5939 dead_tmp(addr);
5941 } else {
5942 dead_tmp(addr);
5944 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5945 /* rfe */
5946 int32_t offset;
5947 if (IS_USER(s))
5948 goto illegal_op;
5949 ARCH(6);
5950 rn = (insn >> 16) & 0xf;
5951 addr = load_reg(s, rn);
5952 i = (insn >> 23) & 3;
5953 switch (i) {
5954 case 0: offset = -4; break; /* DA */
5955 case 1: offset = 0; break; /* IA */
5956 case 2: offset = -8; break; /* DB */
5957 case 3: offset = 4; break; /* IB */
5958 default: abort();
5960 if (offset)
5961 tcg_gen_addi_i32(addr, addr, offset);
5962 /* Load PC into tmp and CPSR into tmp2. */
5963 tmp = gen_ld32(addr, 0);
5964 tcg_gen_addi_i32(addr, addr, 4);
5965 tmp2 = gen_ld32(addr, 0);
5966 if (insn & (1 << 21)) {
5967 /* Base writeback. */
5968 switch (i) {
5969 case 0: offset = -8; break;
5970 case 1: offset = 4; break;
5971 case 2: offset = -4; break;
5972 case 3: offset = 0; break;
5973 default: abort();
5975 if (offset)
5976 tcg_gen_addi_i32(addr, addr, offset);
5977 store_reg(s, rn, addr);
5978 } else {
5979 dead_tmp(addr);
5981 gen_rfe(s, tmp, tmp2);
5982 return;
5983 } else if ((insn & 0x0e000000) == 0x0a000000) {
5984 /* branch link and change to thumb (blx <offset>) */
5985 int32_t offset;
5986 ARCH(5);
5987 val = (uint32_t)s->pc;
5988 tmp = new_tmp();
5989 tcg_gen_movi_i32(tmp, val);
5990 store_reg(s, 14, tmp);
5991 /* Sign-extend the 24-bit offset */
5992 offset = (((int32_t)insn) << 8) >> 8;
5993 /* offset * 4 + bit24 * 2 + (thumb bit) */
5994 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5995 /* pipeline offset */
5996 val += 4;
5997 gen_bx_im(s, val);
5998 return;
5999 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6000 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6001 /* iWMMXt register transfer. */
6002 if (env->cp15.c15_cpar & (1 << 1))
6003 if (!disas_iwmmxt_insn(env, s, insn))
6004 return;
6006 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6007 /* Coprocessor double register transfer. */
6008 ARCH(5);
6009 } else if ((insn & 0x0f000010) == 0x0e000010) {
6010 /* Additional coprocessor register transfer. */
6011 ARCH(5);
6012 } else if ((insn & 0x0ff10020) == 0x01000000) {
6013 uint32_t mask;
6014 uint32_t val;
6015 /* cps (privileged) */
6016 if (IS_USER(s))
6017 return;
6018 mask = val = 0;
6019 if (insn & (1 << 19)) {
6020 if (insn & (1 << 8))
6021 mask |= CPSR_A;
6022 if (insn & (1 << 7))
6023 mask |= CPSR_I;
6024 if (insn & (1 << 6))
6025 mask |= CPSR_F;
6026 if (insn & (1 << 18))
6027 val |= mask;
6029 if (insn & (1 << 17)) {
6030 mask |= CPSR_M;
6031 val |= (insn & 0x1f);
6033 if (mask) {
6034 gen_set_psr_im(s, mask, 0, val);
6036 return;
6038 goto illegal_op;
6040 if (cond != 0xe) {
6041 /* if not always execute, we generate a conditional jump to
6042 next instruction */
6043 s->condlabel = gen_new_label();
6044 gen_test_cc(cond ^ 1, s->condlabel);
6045 s->condjmp = 1;
6047 if ((insn & 0x0f900000) == 0x03000000) {
6048 if ((insn & (1 << 21)) == 0) {
6049 ARCH(6T2);
6050 rd = (insn >> 12) & 0xf;
6051 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6052 if ((insn & (1 << 22)) == 0) {
6053 /* MOVW */
6054 tmp = new_tmp();
6055 tcg_gen_movi_i32(tmp, val);
6056 } else {
6057 /* MOVT */
6058 tmp = load_reg(s, rd);
6059 tcg_gen_ext16u_i32(tmp, tmp);
6060 tcg_gen_ori_i32(tmp, tmp, val << 16);
6062 store_reg(s, rd, tmp);
6063 } else {
6064 if (((insn >> 12) & 0xf) != 0xf)
6065 goto illegal_op;
6066 if (((insn >> 16) & 0xf) == 0) {
6067 gen_nop_hint(s, insn & 0xff);
6068 } else {
6069 /* CPSR = immediate */
6070 val = insn & 0xff;
6071 shift = ((insn >> 8) & 0xf) * 2;
6072 if (shift)
6073 val = (val >> shift) | (val << (32 - shift));
6074 i = ((insn & (1 << 22)) != 0);
6075 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6076 goto illegal_op;
6079 } else if ((insn & 0x0f900000) == 0x01000000
6080 && (insn & 0x00000090) != 0x00000090) {
6081 /* miscellaneous instructions */
6082 op1 = (insn >> 21) & 3;
6083 sh = (insn >> 4) & 0xf;
6084 rm = insn & 0xf;
6085 switch (sh) {
6086 case 0x0: /* move program status register */
6087 if (op1 & 1) {
6088 /* PSR = reg */
6089 tmp = load_reg(s, rm);
6090 i = ((op1 & 2) != 0);
6091 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6092 goto illegal_op;
6093 } else {
6094 /* reg = PSR */
6095 rd = (insn >> 12) & 0xf;
6096 if (op1 & 2) {
6097 if (IS_USER(s))
6098 goto illegal_op;
6099 tmp = load_cpu_field(spsr);
6100 } else {
6101 tmp = new_tmp();
6102 gen_helper_cpsr_read(tmp);
6104 store_reg(s, rd, tmp);
6106 break;
6107 case 0x1:
6108 if (op1 == 1) {
6109 /* branch/exchange thumb (bx). */
6110 tmp = load_reg(s, rm);
6111 gen_bx(s, tmp);
6112 } else if (op1 == 3) {
6113 /* clz */
6114 rd = (insn >> 12) & 0xf;
6115 tmp = load_reg(s, rm);
6116 gen_helper_clz(tmp, tmp);
6117 store_reg(s, rd, tmp);
6118 } else {
6119 goto illegal_op;
6121 break;
6122 case 0x2:
6123 if (op1 == 1) {
6124 ARCH(5J); /* bxj */
6125 /* Trivial implementation equivalent to bx. */
6126 tmp = load_reg(s, rm);
6127 gen_bx(s, tmp);
6128 } else {
6129 goto illegal_op;
6131 break;
6132 case 0x3:
6133 if (op1 != 1)
6134 goto illegal_op;
6135 ARCH(5);
6136 /* branch link/exchange thumb (blx) */
6137 tmp = load_reg(s, rm);
6138 tmp2 = new_tmp();
6139 tcg_gen_movi_i32(tmp2, s->pc);
6140 store_reg(s, 14, tmp2);
6141 gen_bx(s, tmp);
6142 break;
6143 case 0x5: /* saturating add/subtract */
6144 rd = (insn >> 12) & 0xf;
6145 rn = (insn >> 16) & 0xf;
6146 tmp = load_reg(s, rm);
6147 tmp2 = load_reg(s, rn);
6148 if (op1 & 2)
6149 gen_helper_double_saturate(tmp2, tmp2);
6150 if (op1 & 1)
6151 gen_helper_sub_saturate(tmp, tmp, tmp2);
6152 else
6153 gen_helper_add_saturate(tmp, tmp, tmp2);
6154 dead_tmp(tmp2);
6155 store_reg(s, rd, tmp);
6156 break;
6157 case 7: /* bkpt */
6158 ARCH(5);
6159 gen_set_condexec(s);
6160 gen_set_pc_im(s->pc - 4);
6161 gen_exception(EXCP_BKPT);
6162 s->is_jmp = DISAS_JUMP;
6163 break;
6164 case 0x8: /* signed multiply */
6165 case 0xa:
6166 case 0xc:
6167 case 0xe:
6168 rs = (insn >> 8) & 0xf;
6169 rn = (insn >> 12) & 0xf;
6170 rd = (insn >> 16) & 0xf;
6171 if (op1 == 1) {
6172 /* (32 * 16) >> 16 */
6173 tmp = load_reg(s, rm);
6174 tmp2 = load_reg(s, rs);
6175 if (sh & 4)
6176 tcg_gen_sari_i32(tmp2, tmp2, 16);
6177 else
6178 gen_sxth(tmp2);
6179 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6180 tcg_gen_shri_i64(tmp64, tmp64, 16);
6181 tmp = new_tmp();
6182 tcg_gen_trunc_i64_i32(tmp, tmp64);
6183 if ((sh & 2) == 0) {
6184 tmp2 = load_reg(s, rn);
6185 gen_helper_add_setq(tmp, tmp, tmp2);
6186 dead_tmp(tmp2);
6188 store_reg(s, rd, tmp);
6189 } else {
6190 /* 16 * 16 */
6191 tmp = load_reg(s, rm);
6192 tmp2 = load_reg(s, rs);
6193 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6194 dead_tmp(tmp2);
6195 if (op1 == 2) {
6196 tmp64 = tcg_temp_new_i64();
6197 tcg_gen_ext_i32_i64(tmp64, tmp);
6198 dead_tmp(tmp);
6199 gen_addq(s, tmp64, rn, rd);
6200 gen_storeq_reg(s, rn, rd, tmp64);
6201 } else {
6202 if (op1 == 0) {
6203 tmp2 = load_reg(s, rn);
6204 gen_helper_add_setq(tmp, tmp, tmp2);
6205 dead_tmp(tmp2);
6207 store_reg(s, rd, tmp);
6210 break;
6211 default:
6212 goto illegal_op;
6214 } else if (((insn & 0x0e000000) == 0 &&
6215 (insn & 0x00000090) != 0x90) ||
6216 ((insn & 0x0e000000) == (1 << 25))) {
6217 int set_cc, logic_cc, shiftop;
6219 op1 = (insn >> 21) & 0xf;
6220 set_cc = (insn >> 20) & 1;
6221 logic_cc = table_logic_cc[op1] & set_cc;
6223 /* data processing instruction */
6224 if (insn & (1 << 25)) {
6225 /* immediate operand */
6226 val = insn & 0xff;
6227 shift = ((insn >> 8) & 0xf) * 2;
6228 if (shift) {
6229 val = (val >> shift) | (val << (32 - shift));
6231 tmp2 = new_tmp();
6232 tcg_gen_movi_i32(tmp2, val);
6233 if (logic_cc && shift) {
6234 gen_set_CF_bit31(tmp2);
6236 } else {
6237 /* register */
6238 rm = (insn) & 0xf;
6239 tmp2 = load_reg(s, rm);
6240 shiftop = (insn >> 5) & 3;
6241 if (!(insn & (1 << 4))) {
6242 shift = (insn >> 7) & 0x1f;
6243 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6244 } else {
6245 rs = (insn >> 8) & 0xf;
6246 tmp = load_reg(s, rs);
6247 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6250 if (op1 != 0x0f && op1 != 0x0d) {
6251 rn = (insn >> 16) & 0xf;
6252 tmp = load_reg(s, rn);
6253 } else {
6254 TCGV_UNUSED(tmp);
6256 rd = (insn >> 12) & 0xf;
6257 switch(op1) {
6258 case 0x00:
6259 tcg_gen_and_i32(tmp, tmp, tmp2);
6260 if (logic_cc) {
6261 gen_logic_CC(tmp);
6263 store_reg_bx(env, s, rd, tmp);
6264 break;
6265 case 0x01:
6266 tcg_gen_xor_i32(tmp, tmp, tmp2);
6267 if (logic_cc) {
6268 gen_logic_CC(tmp);
6270 store_reg_bx(env, s, rd, tmp);
6271 break;
6272 case 0x02:
6273 if (set_cc && rd == 15) {
6274 /* SUBS r15, ... is used for exception return. */
6275 if (IS_USER(s)) {
6276 goto illegal_op;
6278 gen_helper_sub_cc(tmp, tmp, tmp2);
6279 gen_exception_return(s, tmp);
6280 } else {
6281 if (set_cc) {
6282 gen_helper_sub_cc(tmp, tmp, tmp2);
6283 } else {
6284 tcg_gen_sub_i32(tmp, tmp, tmp2);
6286 store_reg_bx(env, s, rd, tmp);
6288 break;
6289 case 0x03:
6290 if (set_cc) {
6291 gen_helper_sub_cc(tmp, tmp2, tmp);
6292 } else {
6293 tcg_gen_sub_i32(tmp, tmp2, tmp);
6295 store_reg_bx(env, s, rd, tmp);
6296 break;
6297 case 0x04:
6298 if (set_cc) {
6299 gen_helper_add_cc(tmp, tmp, tmp2);
6300 } else {
6301 tcg_gen_add_i32(tmp, tmp, tmp2);
6303 store_reg_bx(env, s, rd, tmp);
6304 break;
6305 case 0x05:
6306 if (set_cc) {
6307 gen_helper_adc_cc(tmp, tmp, tmp2);
6308 } else {
6309 gen_add_carry(tmp, tmp, tmp2);
6311 store_reg_bx(env, s, rd, tmp);
6312 break;
6313 case 0x06:
6314 if (set_cc) {
6315 gen_helper_sbc_cc(tmp, tmp, tmp2);
6316 } else {
6317 gen_sub_carry(tmp, tmp, tmp2);
6319 store_reg_bx(env, s, rd, tmp);
6320 break;
6321 case 0x07:
6322 if (set_cc) {
6323 gen_helper_sbc_cc(tmp, tmp2, tmp);
6324 } else {
6325 gen_sub_carry(tmp, tmp2, tmp);
6327 store_reg_bx(env, s, rd, tmp);
6328 break;
6329 case 0x08:
6330 if (set_cc) {
6331 tcg_gen_and_i32(tmp, tmp, tmp2);
6332 gen_logic_CC(tmp);
6334 dead_tmp(tmp);
6335 break;
6336 case 0x09:
6337 if (set_cc) {
6338 tcg_gen_xor_i32(tmp, tmp, tmp2);
6339 gen_logic_CC(tmp);
6341 dead_tmp(tmp);
6342 break;
6343 case 0x0a:
6344 if (set_cc) {
6345 gen_helper_sub_cc(tmp, tmp, tmp2);
6347 dead_tmp(tmp);
6348 break;
6349 case 0x0b:
6350 if (set_cc) {
6351 gen_helper_add_cc(tmp, tmp, tmp2);
6353 dead_tmp(tmp);
6354 break;
6355 case 0x0c:
6356 tcg_gen_or_i32(tmp, tmp, tmp2);
6357 if (logic_cc) {
6358 gen_logic_CC(tmp);
6360 store_reg_bx(env, s, rd, tmp);
6361 break;
6362 case 0x0d:
6363 if (logic_cc && rd == 15) {
6364 /* MOVS r15, ... is used for exception return. */
6365 if (IS_USER(s)) {
6366 goto illegal_op;
6368 gen_exception_return(s, tmp2);
6369 } else {
6370 if (logic_cc) {
6371 gen_logic_CC(tmp2);
6373 store_reg_bx(env, s, rd, tmp2);
6375 break;
6376 case 0x0e:
6377 tcg_gen_bic_i32(tmp, tmp, tmp2);
6378 if (logic_cc) {
6379 gen_logic_CC(tmp);
6381 store_reg_bx(env, s, rd, tmp);
6382 break;
6383 default:
6384 case 0x0f:
6385 tcg_gen_not_i32(tmp2, tmp2);
6386 if (logic_cc) {
6387 gen_logic_CC(tmp2);
6389 store_reg_bx(env, s, rd, tmp2);
6390 break;
6392 if (op1 != 0x0f && op1 != 0x0d) {
6393 dead_tmp(tmp2);
6395 } else {
6396 /* other instructions */
6397 op1 = (insn >> 24) & 0xf;
6398 switch(op1) {
6399 case 0x0:
6400 case 0x1:
6401 /* multiplies, extra load/stores */
6402 sh = (insn >> 5) & 3;
6403 if (sh == 0) {
6404 if (op1 == 0x0) {
6405 rd = (insn >> 16) & 0xf;
6406 rn = (insn >> 12) & 0xf;
6407 rs = (insn >> 8) & 0xf;
6408 rm = (insn) & 0xf;
6409 op1 = (insn >> 20) & 0xf;
6410 switch (op1) {
6411 case 0: case 1: case 2: case 3: case 6:
6412 /* 32 bit mul */
6413 tmp = load_reg(s, rs);
6414 tmp2 = load_reg(s, rm);
6415 tcg_gen_mul_i32(tmp, tmp, tmp2);
6416 dead_tmp(tmp2);
6417 if (insn & (1 << 22)) {
6418 /* Subtract (mls) */
6419 ARCH(6T2);
6420 tmp2 = load_reg(s, rn);
6421 tcg_gen_sub_i32(tmp, tmp2, tmp);
6422 dead_tmp(tmp2);
6423 } else if (insn & (1 << 21)) {
6424 /* Add */
6425 tmp2 = load_reg(s, rn);
6426 tcg_gen_add_i32(tmp, tmp, tmp2);
6427 dead_tmp(tmp2);
6429 if (insn & (1 << 20))
6430 gen_logic_CC(tmp);
6431 store_reg(s, rd, tmp);
6432 break;
6433 default:
6434 /* 64 bit mul */
6435 tmp = load_reg(s, rs);
6436 tmp2 = load_reg(s, rm);
6437 if (insn & (1 << 22))
6438 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6439 else
6440 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6441 if (insn & (1 << 21)) /* mult accumulate */
6442 gen_addq(s, tmp64, rn, rd);
6443 if (!(insn & (1 << 23))) { /* double accumulate */
6444 ARCH(6);
6445 gen_addq_lo(s, tmp64, rn);
6446 gen_addq_lo(s, tmp64, rd);
6448 if (insn & (1 << 20))
6449 gen_logicq_cc(tmp64);
6450 gen_storeq_reg(s, rn, rd, tmp64);
6451 break;
6453 } else {
6454 rn = (insn >> 16) & 0xf;
6455 rd = (insn >> 12) & 0xf;
6456 if (insn & (1 << 23)) {
6457 /* load/store exclusive */
6458 op1 = (insn >> 21) & 0x3;
6459 if (op1)
6460 ARCH(6K);
6461 else
6462 ARCH(6);
6463 gen_movl_T1_reg(s, rn);
6464 addr = cpu_T[1];
6465 if (insn & (1 << 20)) {
6466 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6467 switch (op1) {
6468 case 0: /* ldrex */
6469 tmp = gen_ld32(addr, IS_USER(s));
6470 break;
6471 case 1: /* ldrexd */
6472 tmp = gen_ld32(addr, IS_USER(s));
6473 store_reg(s, rd, tmp);
6474 tcg_gen_addi_i32(addr, addr, 4);
6475 tmp = gen_ld32(addr, IS_USER(s));
6476 rd++;
6477 break;
6478 case 2: /* ldrexb */
6479 tmp = gen_ld8u(addr, IS_USER(s));
6480 break;
6481 case 3: /* ldrexh */
6482 tmp = gen_ld16u(addr, IS_USER(s));
6483 break;
6484 default:
6485 abort();
6487 store_reg(s, rd, tmp);
6488 } else {
6489 int label = gen_new_label();
6490 rm = insn & 0xf;
6491 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
6492 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6493 0, label);
6494 tmp = load_reg(s,rm);
6495 switch (op1) {
6496 case 0: /* strex */
6497 gen_st32(tmp, addr, IS_USER(s));
6498 break;
6499 case 1: /* strexd */
6500 gen_st32(tmp, addr, IS_USER(s));
6501 tcg_gen_addi_i32(addr, addr, 4);
6502 tmp = load_reg(s, rm + 1);
6503 gen_st32(tmp, addr, IS_USER(s));
6504 break;
6505 case 2: /* strexb */
6506 gen_st8(tmp, addr, IS_USER(s));
6507 break;
6508 case 3: /* strexh */
6509 gen_st16(tmp, addr, IS_USER(s));
6510 break;
6511 default:
6512 abort();
6514 gen_set_label(label);
6515 gen_movl_reg_T0(s, rd);
6517 } else {
6518 /* SWP instruction */
6519 rm = (insn) & 0xf;
6521 /* ??? This is not really atomic. However we know
6522 we never have multiple CPUs running in parallel,
6523 so it is good enough. */
6524 addr = load_reg(s, rn);
6525 tmp = load_reg(s, rm);
6526 if (insn & (1 << 22)) {
6527 tmp2 = gen_ld8u(addr, IS_USER(s));
6528 gen_st8(tmp, addr, IS_USER(s));
6529 } else {
6530 tmp2 = gen_ld32(addr, IS_USER(s));
6531 gen_st32(tmp, addr, IS_USER(s));
6533 dead_tmp(addr);
6534 store_reg(s, rd, tmp2);
6537 } else {
6538 int address_offset;
6539 int load;
6540 /* Misc load/store */
6541 rn = (insn >> 16) & 0xf;
6542 rd = (insn >> 12) & 0xf;
6543 addr = load_reg(s, rn);
6544 if (insn & (1 << 24))
6545 gen_add_datah_offset(s, insn, 0, addr);
6546 address_offset = 0;
6547 if (insn & (1 << 20)) {
6548 /* load */
6549 switch(sh) {
6550 case 1:
6551 tmp = gen_ld16u(addr, IS_USER(s));
6552 break;
6553 case 2:
6554 tmp = gen_ld8s(addr, IS_USER(s));
6555 break;
6556 default:
6557 case 3:
6558 tmp = gen_ld16s(addr, IS_USER(s));
6559 break;
6561 load = 1;
6562 } else if (sh & 2) {
6563 /* doubleword */
6564 if (sh & 1) {
6565 /* store */
6566 tmp = load_reg(s, rd);
6567 gen_st32(tmp, addr, IS_USER(s));
6568 tcg_gen_addi_i32(addr, addr, 4);
6569 tmp = load_reg(s, rd + 1);
6570 gen_st32(tmp, addr, IS_USER(s));
6571 load = 0;
6572 } else {
6573 /* load */
6574 tmp = gen_ld32(addr, IS_USER(s));
6575 store_reg(s, rd, tmp);
6576 tcg_gen_addi_i32(addr, addr, 4);
6577 tmp = gen_ld32(addr, IS_USER(s));
6578 rd++;
6579 load = 1;
6581 address_offset = -4;
6582 } else {
6583 /* store */
6584 tmp = load_reg(s, rd);
6585 gen_st16(tmp, addr, IS_USER(s));
6586 load = 0;
6588 /* Perform base writeback before the loaded value to
6589 ensure correct behavior with overlapping index registers.
6590 ldrd with base writeback is is undefined if the
6591 destination and index registers overlap. */
6592 if (!(insn & (1 << 24))) {
6593 gen_add_datah_offset(s, insn, address_offset, addr);
6594 store_reg(s, rn, addr);
6595 } else if (insn & (1 << 21)) {
6596 if (address_offset)
6597 tcg_gen_addi_i32(addr, addr, address_offset);
6598 store_reg(s, rn, addr);
6599 } else {
6600 dead_tmp(addr);
6602 if (load) {
6603 /* Complete the load. */
6604 store_reg(s, rd, tmp);
6607 break;
6608 case 0x4:
6609 case 0x5:
6610 goto do_ldst;
6611 case 0x6:
6612 case 0x7:
6613 if (insn & (1 << 4)) {
6614 ARCH(6);
6615 /* Armv6 Media instructions. */
6616 rm = insn & 0xf;
6617 rn = (insn >> 16) & 0xf;
6618 rd = (insn >> 12) & 0xf;
6619 rs = (insn >> 8) & 0xf;
6620 switch ((insn >> 23) & 3) {
6621 case 0: /* Parallel add/subtract. */
6622 op1 = (insn >> 20) & 7;
6623 tmp = load_reg(s, rn);
6624 tmp2 = load_reg(s, rm);
6625 sh = (insn >> 5) & 7;
6626 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6627 goto illegal_op;
6628 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6629 dead_tmp(tmp2);
6630 store_reg(s, rd, tmp);
6631 break;
6632 case 1:
6633 if ((insn & 0x00700020) == 0) {
6634 /* Halfword pack. */
6635 tmp = load_reg(s, rn);
6636 tmp2 = load_reg(s, rm);
6637 shift = (insn >> 7) & 0x1f;
6638 if (insn & (1 << 6)) {
6639 /* pkhtb */
6640 if (shift == 0)
6641 shift = 31;
6642 tcg_gen_sari_i32(tmp2, tmp2, shift);
6643 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6644 tcg_gen_ext16u_i32(tmp2, tmp2);
6645 } else {
6646 /* pkhbt */
6647 if (shift)
6648 tcg_gen_shli_i32(tmp2, tmp2, shift);
6649 tcg_gen_ext16u_i32(tmp, tmp);
6650 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6652 tcg_gen_or_i32(tmp, tmp, tmp2);
6653 dead_tmp(tmp2);
6654 store_reg(s, rd, tmp);
6655 } else if ((insn & 0x00200020) == 0x00200000) {
6656 /* [us]sat */
6657 tmp = load_reg(s, rm);
6658 shift = (insn >> 7) & 0x1f;
6659 if (insn & (1 << 6)) {
6660 if (shift == 0)
6661 shift = 31;
6662 tcg_gen_sari_i32(tmp, tmp, shift);
6663 } else {
6664 tcg_gen_shli_i32(tmp, tmp, shift);
6666 sh = (insn >> 16) & 0x1f;
6667 if (sh != 0) {
6668 if (insn & (1 << 22))
6669 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
6670 else
6671 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
6673 store_reg(s, rd, tmp);
6674 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6675 /* [us]sat16 */
6676 tmp = load_reg(s, rm);
6677 sh = (insn >> 16) & 0x1f;
6678 if (sh != 0) {
6679 if (insn & (1 << 22))
6680 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
6681 else
6682 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
6684 store_reg(s, rd, tmp);
6685 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6686 /* Select bytes. */
6687 tmp = load_reg(s, rn);
6688 tmp2 = load_reg(s, rm);
6689 tmp3 = new_tmp();
6690 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6691 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6692 dead_tmp(tmp3);
6693 dead_tmp(tmp2);
6694 store_reg(s, rd, tmp);
6695 } else if ((insn & 0x000003e0) == 0x00000060) {
6696 tmp = load_reg(s, rm);
6697 shift = (insn >> 10) & 3;
6698 /* ??? In many cases it's not neccessary to do a
6699 rotate, a shift is sufficient. */
6700 if (shift != 0)
6701 tcg_gen_rori_i32(tmp, tmp, shift * 8);
6702 op1 = (insn >> 20) & 7;
6703 switch (op1) {
6704 case 0: gen_sxtb16(tmp); break;
6705 case 2: gen_sxtb(tmp); break;
6706 case 3: gen_sxth(tmp); break;
6707 case 4: gen_uxtb16(tmp); break;
6708 case 6: gen_uxtb(tmp); break;
6709 case 7: gen_uxth(tmp); break;
6710 default: goto illegal_op;
6712 if (rn != 15) {
6713 tmp2 = load_reg(s, rn);
6714 if ((op1 & 3) == 0) {
6715 gen_add16(tmp, tmp2);
6716 } else {
6717 tcg_gen_add_i32(tmp, tmp, tmp2);
6718 dead_tmp(tmp2);
6721 store_reg(s, rd, tmp);
6722 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6723 /* rev */
6724 tmp = load_reg(s, rm);
6725 if (insn & (1 << 22)) {
6726 if (insn & (1 << 7)) {
6727 gen_revsh(tmp);
6728 } else {
6729 ARCH(6T2);
6730 gen_helper_rbit(tmp, tmp);
6732 } else {
6733 if (insn & (1 << 7))
6734 gen_rev16(tmp);
6735 else
6736 tcg_gen_bswap32_i32(tmp, tmp);
6738 store_reg(s, rd, tmp);
6739 } else {
6740 goto illegal_op;
6742 break;
6743 case 2: /* Multiplies (Type 3). */
6744 tmp = load_reg(s, rm);
6745 tmp2 = load_reg(s, rs);
6746 if (insn & (1 << 20)) {
6747 /* Signed multiply most significant [accumulate]. */
6748 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6749 if (insn & (1 << 5))
6750 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6751 tcg_gen_shri_i64(tmp64, tmp64, 32);
6752 tmp = new_tmp();
6753 tcg_gen_trunc_i64_i32(tmp, tmp64);
6754 if (rd != 15) {
6755 tmp2 = load_reg(s, rd);
6756 if (insn & (1 << 6)) {
6757 tcg_gen_sub_i32(tmp, tmp, tmp2);
6758 } else {
6759 tcg_gen_add_i32(tmp, tmp, tmp2);
6761 dead_tmp(tmp2);
6763 store_reg(s, rn, tmp);
6764 } else {
6765 if (insn & (1 << 5))
6766 gen_swap_half(tmp2);
6767 gen_smul_dual(tmp, tmp2);
6768 /* This addition cannot overflow. */
6769 if (insn & (1 << 6)) {
6770 tcg_gen_sub_i32(tmp, tmp, tmp2);
6771 } else {
6772 tcg_gen_add_i32(tmp, tmp, tmp2);
6774 dead_tmp(tmp2);
6775 if (insn & (1 << 22)) {
6776 /* smlald, smlsld */
6777 tmp64 = tcg_temp_new_i64();
6778 tcg_gen_ext_i32_i64(tmp64, tmp);
6779 dead_tmp(tmp);
6780 gen_addq(s, tmp64, rd, rn);
6781 gen_storeq_reg(s, rd, rn, tmp64);
6782 } else {
6783 /* smuad, smusd, smlad, smlsd */
6784 if (rd != 15)
6786 tmp2 = load_reg(s, rd);
6787 gen_helper_add_setq(tmp, tmp, tmp2);
6788 dead_tmp(tmp2);
6790 store_reg(s, rn, tmp);
6793 break;
6794 case 3:
6795 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6796 switch (op1) {
6797 case 0: /* Unsigned sum of absolute differences. */
6798 ARCH(6);
6799 tmp = load_reg(s, rm);
6800 tmp2 = load_reg(s, rs);
6801 gen_helper_usad8(tmp, tmp, tmp2);
6802 dead_tmp(tmp2);
6803 if (rd != 15) {
6804 tmp2 = load_reg(s, rd);
6805 tcg_gen_add_i32(tmp, tmp, tmp2);
6806 dead_tmp(tmp2);
6808 store_reg(s, rn, tmp);
6809 break;
6810 case 0x20: case 0x24: case 0x28: case 0x2c:
6811 /* Bitfield insert/clear. */
6812 ARCH(6T2);
6813 shift = (insn >> 7) & 0x1f;
6814 i = (insn >> 16) & 0x1f;
6815 i = i + 1 - shift;
6816 if (rm == 15) {
6817 tmp = new_tmp();
6818 tcg_gen_movi_i32(tmp, 0);
6819 } else {
6820 tmp = load_reg(s, rm);
6822 if (i != 32) {
6823 tmp2 = load_reg(s, rd);
6824 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
6825 dead_tmp(tmp2);
6827 store_reg(s, rd, tmp);
6828 break;
6829 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6830 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6831 ARCH(6T2);
6832 tmp = load_reg(s, rm);
6833 shift = (insn >> 7) & 0x1f;
6834 i = ((insn >> 16) & 0x1f) + 1;
6835 if (shift + i > 32)
6836 goto illegal_op;
6837 if (i < 32) {
6838 if (op1 & 0x20) {
6839 gen_ubfx(tmp, shift, (1u << i) - 1);
6840 } else {
6841 gen_sbfx(tmp, shift, i);
6844 store_reg(s, rd, tmp);
6845 break;
6846 default:
6847 goto illegal_op;
6849 break;
6851 break;
6853 do_ldst:
6854 /* Check for undefined extension instructions
6855 * per the ARM Bible IE:
6856 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6858 sh = (0xf << 20) | (0xf << 4);
6859 if (op1 == 0x7 && ((insn & sh) == sh))
6861 goto illegal_op;
6863 /* load/store byte/word */
6864 rn = (insn >> 16) & 0xf;
6865 rd = (insn >> 12) & 0xf;
6866 tmp2 = load_reg(s, rn);
6867 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6868 if (insn & (1 << 24))
6869 gen_add_data_offset(s, insn, tmp2);
6870 if (insn & (1 << 20)) {
6871 /* load */
6872 if (insn & (1 << 22)) {
6873 tmp = gen_ld8u(tmp2, i);
6874 } else {
6875 tmp = gen_ld32(tmp2, i);
6877 } else {
6878 /* store */
6879 tmp = load_reg(s, rd);
6880 if (insn & (1 << 22))
6881 gen_st8(tmp, tmp2, i);
6882 else
6883 gen_st32(tmp, tmp2, i);
6885 if (!(insn & (1 << 24))) {
6886 gen_add_data_offset(s, insn, tmp2);
6887 store_reg(s, rn, tmp2);
6888 } else if (insn & (1 << 21)) {
6889 store_reg(s, rn, tmp2);
6890 } else {
6891 dead_tmp(tmp2);
6893 if (insn & (1 << 20)) {
6894 /* Complete the load. */
6895 if (rd == 15 && ENABLE_ARCH_5)
6896 gen_bx(s, tmp);
6897 else
6898 store_reg(s, rd, tmp);
6900 break;
6901 case 0x08:
6902 case 0x09:
6904 int j, n, user, loaded_base;
6905 int crement = 0;
6906 TCGv loaded_var;
6907 /* load/store multiple words */
6908 /* XXX: store correct base if write back */
6909 user = 0;
6910 if (insn & (1 << 22)) {
6911 if (IS_USER(s))
6912 goto illegal_op; /* only usable in supervisor mode */
6914 if ((insn & (1 << 15)) == 0)
6915 user = 1;
6917 rn = (insn >> 16) & 0xf;
6918 addr = load_reg(s, rn);
6920 /* compute total size */
6921 loaded_base = 0;
6922 TCGV_UNUSED(loaded_var);
6923 n = 0;
6924 for(i=0;i<16;i++) {
6925 if (insn & (1 << i))
6926 n++;
6928 /* XXX: test invalid n == 0 case ? */
6929 if (insn & (1 << 23)) {
6930 if (insn & (1 << 24)) {
6931 /* pre increment */
6932 tcg_gen_addi_i32(addr, addr, 4);
6933 } else {
6934 /* post increment */
6936 } else {
6937 if (insn & (1 << 24)) {
6938 /* pre decrement */
6939 tcg_gen_addi_i32(addr, addr, -(n * 4));
6940 } else {
6941 /* post decrement */
6942 if (n != 1)
6943 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6947 if (insn & (1 << 21)) {
6948 /* write back */
6949 if (insn & (1 << 23)) {
6950 if (insn & (1 << 24)) {
6951 /* pre increment */
6952 } else {
6953 /* post increment */
6954 crement = 4;
6956 } else {
6957 if (insn & (1 << 24)) {
6958 /* pre decrement */
6959 if (n != 1) {
6960 crement = -((n - 1) * 4);
6962 } else {
6963 /* post decrement */
6964 crement = -(n * 4);
6967 if (arm_feature(env, ARM_FEATURE_ABORT_BU)) {
6968 /* base-updated abort model: update base register
6969 before an abort can happen */
6970 crement += (n - 1) * 4;
6971 tmp = new_tmp();
6972 tcg_gen_addi_i32(tmp, addr, crement);
6973 store_reg(s, rn, tmp);
6978 j = 0;
6979 for(i=0;i<16;i++) {
6980 if (insn & (1 << i)) {
6981 if (insn & (1 << 20)) {
6982 /* load */
6983 tmp = gen_ld32(addr, IS_USER(s));
6984 if (i == 15) {
6985 if (ENABLE_ARCH_5) {
6986 gen_bx(s, tmp);
6987 } else {
6988 store_reg(s, i, tmp);
6990 } else if (user) {
6991 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6992 dead_tmp(tmp);
6993 } else if (i == rn) {
6994 loaded_var = tmp;
6995 loaded_base = 1;
6996 } else {
6997 store_reg(s, i, tmp);
6999 } else {
7000 /* store */
7001 if (i == 15) {
7002 /* special case: r15 = PC + 8 */
7003 val = (long)s->pc + 4;
7004 tmp = new_tmp();
7005 tcg_gen_movi_i32(tmp, val);
7006 } else if (user) {
7007 tmp = new_tmp();
7008 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
7009 } else {
7010 tmp = load_reg(s, i);
7012 gen_st32(tmp, addr, IS_USER(s));
7014 j++;
7015 /* no need to add after the last transfer */
7016 if (j != n)
7017 tcg_gen_addi_i32(addr, addr, 4);
7020 if (!arm_feature(env, ARM_FEATURE_ABORT_BU) && (insn & (1 << 21))) {
7021 tcg_gen_addi_i32(addr, addr, crement);
7022 store_reg(s, rn, addr);
7023 } else {
7024 dead_tmp(addr);
7026 if (loaded_base) {
7027 store_reg(s, rn, loaded_var);
7029 if ((insn & (1 << 22)) && !user) {
7030 /* Restore CPSR from SPSR. */
7031 tmp = load_cpu_field(spsr);
7032 gen_set_cpsr(tmp, 0xffffffff);
7033 dead_tmp(tmp);
7034 s->is_jmp = DISAS_UPDATE;
7037 break;
7038 case 0xa:
7039 case 0xb:
7041 int32_t offset;
7043 /* branch (and link) */
7044 val = (int32_t)s->pc;
7045 if (insn & (1 << 24)) {
7046 tmp = new_tmp();
7047 tcg_gen_movi_i32(tmp, val);
7048 store_reg(s, 14, tmp);
7050 offset = (((int32_t)insn << 8) >> 8);
7051 val += (offset << 2) + 4;
7052 gen_jmp(s, val);
7054 break;
7055 case 0xc:
7056 case 0xd:
7057 case 0xe:
7058 /* Coprocessor. */
7059 if (disas_coproc_insn(env, s, insn))
7060 goto illegal_op;
7061 break;
7062 case 0xf:
7063 /* swi */
7064 gen_set_pc_im(s->pc);
7065 s->is_jmp = DISAS_SWI;
7066 break;
7067 default:
7068 illegal_op:
7069 gen_set_condexec(s);
7070 gen_set_pc_im(s->pc - 4);
7071 gen_exception(EXCP_UDEF);
7072 s->is_jmp = DISAS_JUMP;
7073 break;
7078 /* Return true if this is a Thumb-2 logical op. */
7079 static int
7080 thumb2_logic_op(int op)
7082 return (op < 8);
7085 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7086 then set condition code flags based on the result of the operation.
7087 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7088 to the high bit of T1.
7089 Returns zero if the opcode is valid. */
7091 static int
7092 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
7094 int logic_cc;
7096 logic_cc = 0;
7097 switch (op) {
7098 case 0: /* and */
7099 gen_op_andl_T0_T1();
7100 logic_cc = conds;
7101 break;
7102 case 1: /* bic */
7103 gen_op_bicl_T0_T1();
7104 logic_cc = conds;
7105 break;
7106 case 2: /* orr */
7107 gen_op_orl_T0_T1();
7108 logic_cc = conds;
7109 break;
7110 case 3: /* orn */
7111 gen_op_notl_T1();
7112 gen_op_orl_T0_T1();
7113 logic_cc = conds;
7114 break;
7115 case 4: /* eor */
7116 gen_op_xorl_T0_T1();
7117 logic_cc = conds;
7118 break;
7119 case 8: /* add */
7120 if (conds)
7121 gen_op_addl_T0_T1_cc();
7122 else
7123 gen_op_addl_T0_T1();
7124 break;
7125 case 10: /* adc */
7126 if (conds)
7127 gen_op_adcl_T0_T1_cc();
7128 else
7129 gen_adc_T0_T1();
7130 break;
7131 case 11: /* sbc */
7132 if (conds)
7133 gen_op_sbcl_T0_T1_cc();
7134 else
7135 gen_sbc_T0_T1();
7136 break;
7137 case 13: /* sub */
7138 if (conds)
7139 gen_op_subl_T0_T1_cc();
7140 else
7141 gen_op_subl_T0_T1();
7142 break;
7143 case 14: /* rsb */
7144 if (conds)
7145 gen_op_rsbl_T0_T1_cc();
7146 else
7147 gen_op_rsbl_T0_T1();
7148 break;
7149 default: /* 5, 6, 7, 9, 12, 15. */
7150 return 1;
7152 if (logic_cc) {
7153 gen_op_logic_T0_cc();
7154 if (shifter_out)
7155 gen_set_CF_bit31(cpu_T[1]);
7157 return 0;
7160 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7161 is not legal. */
7162 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7164 uint32_t insn, imm, shift, offset;
7165 uint32_t rd, rn, rm, rs;
7166 TCGv tmp;
7167 TCGv tmp2;
7168 TCGv tmp3;
7169 TCGv addr;
7170 TCGv_i64 tmp64;
7171 int op;
7172 int shiftop;
7173 int conds;
7174 int logic_cc;
7176 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7177 || arm_feature (env, ARM_FEATURE_M))) {
7178 /* Thumb-1 cores may need to treat bl and blx as a pair of
7179 16-bit instructions to get correct prefetch abort behavior. */
7180 insn = insn_hw1;
7181 if ((insn & (1 << 12)) == 0) {
7182 ARCH(5);
7183 /* Second half of blx. */
7184 offset = ((insn & 0x7ff) << 1);
7185 tmp = load_reg(s, 14);
7186 tcg_gen_addi_i32(tmp, tmp, offset);
7187 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7189 tmp2 = new_tmp();
7190 tcg_gen_movi_i32(tmp2, s->pc | 1);
7191 store_reg(s, 14, tmp2);
7192 gen_bx(s, tmp);
7193 return 0;
7195 if (insn & (1 << 11)) {
7196 /* Second half of bl. */
7197 offset = ((insn & 0x7ff) << 1) | 1;
7198 tmp = load_reg(s, 14);
7199 tcg_gen_addi_i32(tmp, tmp, offset);
7201 tmp2 = new_tmp();
7202 tcg_gen_movi_i32(tmp2, s->pc | 1);
7203 store_reg(s, 14, tmp2);
7204 gen_bx(s, tmp);
7205 return 0;
7207 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7208 /* Instruction spans a page boundary. Implement it as two
7209 16-bit instructions in case the second half causes an
7210 prefetch abort. */
7211 offset = ((int32_t)insn << 21) >> 9;
7212 gen_op_movl_T0_im(s->pc + 2 + offset);
7213 gen_movl_reg_T0(s, 14);
7214 return 0;
7216 /* Fall through to 32-bit decode. */
7219 insn = lduw_code(s->pc);
7220 s->pc += 2;
7221 insn |= (uint32_t)insn_hw1 << 16;
7223 if ((insn & 0xf800e800) != 0xf000e800) {
7224 ARCH(6T2);
7227 rn = (insn >> 16) & 0xf;
7228 rs = (insn >> 12) & 0xf;
7229 rd = (insn >> 8) & 0xf;
7230 rm = insn & 0xf;
7231 switch ((insn >> 25) & 0xf) {
7232 case 0: case 1: case 2: case 3:
7233 /* 16-bit instructions. Should never happen. */
7234 abort();
7235 case 4:
7236 if (insn & (1 << 22)) {
7237 /* Other load/store, table branch. */
7238 if (insn & 0x01200000) {
7239 /* Load/store doubleword. */
7240 ARCH(5);
7241 if (rn == 15) {
7242 addr = new_tmp();
7243 tcg_gen_movi_i32(addr, s->pc & ~3);
7244 } else {
7245 addr = load_reg(s, rn);
7247 offset = (insn & 0xff) * 4;
7248 if ((insn & (1 << 23)) == 0)
7249 offset = -offset;
7250 if (insn & (1 << 24)) {
7251 tcg_gen_addi_i32(addr, addr, offset);
7252 offset = 0;
7254 if (insn & (1 << 20)) {
7255 /* ldrd */
7256 tmp = gen_ld32(addr, IS_USER(s));
7257 store_reg(s, rs, tmp);
7258 tcg_gen_addi_i32(addr, addr, 4);
7259 tmp = gen_ld32(addr, IS_USER(s));
7260 store_reg(s, rd, tmp);
7261 } else {
7262 /* strd */
7263 tmp = load_reg(s, rs);
7264 gen_st32(tmp, addr, IS_USER(s));
7265 tcg_gen_addi_i32(addr, addr, 4);
7266 tmp = load_reg(s, rd);
7267 gen_st32(tmp, addr, IS_USER(s));
7269 if (insn & (1 << 21)) {
7270 /* Base writeback. */
7271 if (rn == 15)
7272 goto illegal_op;
7273 tcg_gen_addi_i32(addr, addr, offset - 4);
7274 store_reg(s, rn, addr);
7275 } else {
7276 dead_tmp(addr);
7278 } else if ((insn & (1 << 23)) == 0) {
7279 /* Load/store exclusive word. */
7280 gen_movl_T1_reg(s, rn);
7281 addr = cpu_T[1];
7282 if (insn & (1 << 20)) {
7283 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
7284 tmp = gen_ld32(addr, IS_USER(s));
7285 store_reg(s, rd, tmp);
7286 } else {
7287 int label = gen_new_label();
7288 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7289 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
7290 0, label);
7291 tmp = load_reg(s, rs);
7292 gen_st32(tmp, cpu_T[1], IS_USER(s));
7293 gen_set_label(label);
7294 gen_movl_reg_T0(s, rd);
7296 } else if ((insn & (1 << 6)) == 0) {
7297 /* Table Branch. */
7298 if (rn == 15) {
7299 addr = new_tmp();
7300 tcg_gen_movi_i32(addr, s->pc);
7301 } else {
7302 addr = load_reg(s, rn);
7304 tmp = load_reg(s, rm);
7305 tcg_gen_add_i32(addr, addr, tmp);
7306 if (insn & (1 << 4)) {
7307 /* tbh */
7308 tcg_gen_add_i32(addr, addr, tmp);
7309 dead_tmp(tmp);
7310 tmp = gen_ld16u(addr, IS_USER(s));
7311 } else { /* tbb */
7312 dead_tmp(tmp);
7313 tmp = gen_ld8u(addr, IS_USER(s));
7315 dead_tmp(addr);
7316 tcg_gen_shli_i32(tmp, tmp, 1);
7317 tcg_gen_addi_i32(tmp, tmp, s->pc);
7318 store_reg(s, 15, tmp);
7319 } else {
7320 /* Load/store exclusive byte/halfword/doubleword. */
7321 /* ??? These are not really atomic. However we know
7322 we never have multiple CPUs running in parallel,
7323 so it is good enough. */
7324 op = (insn >> 4) & 0x3;
7325 /* Must use a global reg for the address because we have
7326 a conditional branch in the store instruction. */
7327 gen_movl_T1_reg(s, rn);
7328 addr = cpu_T[1];
7329 if (insn & (1 << 20)) {
7330 gen_helper_mark_exclusive(cpu_env, addr);
7331 switch (op) {
7332 case 0:
7333 tmp = gen_ld8u(addr, IS_USER(s));
7334 break;
7335 case 1:
7336 tmp = gen_ld16u(addr, IS_USER(s));
7337 break;
7338 case 3:
7339 tmp = gen_ld32(addr, IS_USER(s));
7340 tcg_gen_addi_i32(addr, addr, 4);
7341 tmp2 = gen_ld32(addr, IS_USER(s));
7342 store_reg(s, rd, tmp2);
7343 break;
7344 default:
7345 goto illegal_op;
7347 store_reg(s, rs, tmp);
7348 } else {
7349 int label = gen_new_label();
7350 /* Must use a global that is not killed by the branch. */
7351 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7352 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
7353 tmp = load_reg(s, rs);
7354 switch (op) {
7355 case 0:
7356 gen_st8(tmp, addr, IS_USER(s));
7357 break;
7358 case 1:
7359 gen_st16(tmp, addr, IS_USER(s));
7360 break;
7361 case 3:
7362 gen_st32(tmp, addr, IS_USER(s));
7363 tcg_gen_addi_i32(addr, addr, 4);
7364 tmp = load_reg(s, rd);
7365 gen_st32(tmp, addr, IS_USER(s));
7366 break;
7367 default:
7368 goto illegal_op;
7370 gen_set_label(label);
7371 gen_movl_reg_T0(s, rm);
7374 } else {
7375 /* Load/store multiple, RFE, SRS. */
7376 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7377 /* Not available in user mode. */
7378 if (IS_USER(s))
7379 goto illegal_op;
7380 if (insn & (1 << 20)) {
7381 /* rfe */
7382 addr = load_reg(s, rn);
7383 if ((insn & (1 << 24)) == 0)
7384 tcg_gen_addi_i32(addr, addr, -8);
7385 /* Load PC into tmp and CPSR into tmp2. */
7386 tmp = gen_ld32(addr, 0);
7387 tcg_gen_addi_i32(addr, addr, 4);
7388 tmp2 = gen_ld32(addr, 0);
7389 if (insn & (1 << 21)) {
7390 /* Base writeback. */
7391 if (insn & (1 << 24)) {
7392 tcg_gen_addi_i32(addr, addr, 4);
7393 } else {
7394 tcg_gen_addi_i32(addr, addr, -4);
7396 store_reg(s, rn, addr);
7397 } else {
7398 dead_tmp(addr);
7400 gen_rfe(s, tmp, tmp2);
7401 } else {
7402 /* srs */
7403 op = (insn & 0x1f);
7404 if (op == (env->uncached_cpsr & CPSR_M)) {
7405 addr = load_reg(s, 13);
7406 } else {
7407 addr = new_tmp();
7408 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
7410 if ((insn & (1 << 24)) == 0) {
7411 tcg_gen_addi_i32(addr, addr, -8);
7413 tmp = load_reg(s, 14);
7414 gen_st32(tmp, addr, 0);
7415 tcg_gen_addi_i32(addr, addr, 4);
7416 tmp = new_tmp();
7417 gen_helper_cpsr_read(tmp);
7418 gen_st32(tmp, addr, 0);
7419 if (insn & (1 << 21)) {
7420 if ((insn & (1 << 24)) == 0) {
7421 tcg_gen_addi_i32(addr, addr, -4);
7422 } else {
7423 tcg_gen_addi_i32(addr, addr, 4);
7425 if (op == (env->uncached_cpsr & CPSR_M)) {
7426 store_reg(s, 13, addr);
7427 } else {
7428 gen_helper_set_r13_banked(cpu_env,
7429 tcg_const_i32(op), addr);
7431 } else {
7432 dead_tmp(addr);
7435 } else {
7436 int i;
7437 /* Load/store multiple. */
7438 addr = load_reg(s, rn);
7439 offset = 0;
7440 for (i = 0; i < 16; i++) {
7441 if (insn & (1 << i))
7442 offset += 4;
7444 if (insn & (1 << 24)) {
7445 tcg_gen_addi_i32(addr, addr, -offset);
7448 for (i = 0; i < 16; i++) {
7449 if ((insn & (1 << i)) == 0)
7450 continue;
7451 if (insn & (1 << 20)) {
7452 /* Load. */
7453 tmp = gen_ld32(addr, IS_USER(s));
7454 if (i == 15 && ENABLE_ARCH_5) {
7455 gen_bx(s, tmp);
7456 } else {
7457 store_reg(s, i, tmp);
7459 } else {
7460 /* Store. */
7461 tmp = load_reg(s, i);
7462 gen_st32(tmp, addr, IS_USER(s));
7464 tcg_gen_addi_i32(addr, addr, 4);
7466 if (insn & (1 << 21)) {
7467 /* Base register writeback. */
7468 if (insn & (1 << 24)) {
7469 tcg_gen_addi_i32(addr, addr, -offset);
7471 /* Fault if writeback register is in register list. */
7472 if (insn & (1 << rn))
7473 goto illegal_op;
7474 store_reg(s, rn, addr);
7475 } else {
7476 dead_tmp(addr);
7480 break;
7481 case 5: /* Data processing register constant shift. */
7482 if (rn == 15)
7483 gen_op_movl_T0_im(0);
7484 else
7485 gen_movl_T0_reg(s, rn);
7486 gen_movl_T1_reg(s, rm);
7487 op = (insn >> 21) & 0xf;
7488 shiftop = (insn >> 4) & 3;
7489 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7490 conds = (insn & (1 << 20)) != 0;
7491 logic_cc = (conds && thumb2_logic_op(op));
7492 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
7493 if (gen_thumb2_data_op(s, op, conds, 0))
7494 goto illegal_op;
7495 if (rd != 15)
7496 gen_movl_reg_T0(s, rd);
7497 break;
7498 case 13: /* Misc data processing. */
7499 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7500 if (op < 4 && (insn & 0xf000) != 0xf000)
7501 goto illegal_op;
7502 switch (op) {
7503 case 0: /* Register controlled shift. */
7504 tmp = load_reg(s, rn);
7505 tmp2 = load_reg(s, rm);
7506 if ((insn & 0x70) != 0)
7507 goto illegal_op;
7508 op = (insn >> 21) & 3;
7509 logic_cc = (insn & (1 << 20)) != 0;
7510 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7511 if (logic_cc)
7512 gen_logic_CC(tmp);
7513 store_reg_bx(env, s, rd, tmp);
7514 break;
7515 case 1: /* Sign/zero extend. */
7516 tmp = load_reg(s, rm);
7517 shift = (insn >> 4) & 3;
7518 /* ??? In many cases it's not neccessary to do a
7519 rotate, a shift is sufficient. */
7520 if (shift != 0)
7521 tcg_gen_rori_i32(tmp, tmp, shift * 8);
7522 op = (insn >> 20) & 7;
7523 switch (op) {
7524 case 0: gen_sxth(tmp); break;
7525 case 1: gen_uxth(tmp); break;
7526 case 2: gen_sxtb16(tmp); break;
7527 case 3: gen_uxtb16(tmp); break;
7528 case 4: gen_sxtb(tmp); break;
7529 case 5: gen_uxtb(tmp); break;
7530 default: goto illegal_op;
7532 if (rn != 15) {
7533 tmp2 = load_reg(s, rn);
7534 if ((op >> 1) == 1) {
7535 gen_add16(tmp, tmp2);
7536 } else {
7537 tcg_gen_add_i32(tmp, tmp, tmp2);
7538 dead_tmp(tmp2);
7541 store_reg(s, rd, tmp);
7542 break;
7543 case 2: /* SIMD add/subtract. */
7544 op = (insn >> 20) & 7;
7545 shift = (insn >> 4) & 7;
7546 if ((op & 3) == 3 || (shift & 3) == 3)
7547 goto illegal_op;
7548 tmp = load_reg(s, rn);
7549 tmp2 = load_reg(s, rm);
7550 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7551 dead_tmp(tmp2);
7552 store_reg(s, rd, tmp);
7553 break;
7554 case 3: /* Other data processing. */
7555 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7556 if (op < 4) {
7557 /* Saturating add/subtract. */
7558 tmp = load_reg(s, rn);
7559 tmp2 = load_reg(s, rm);
7560 if (op & 2)
7561 gen_helper_double_saturate(tmp, tmp);
7562 if (op & 1)
7563 gen_helper_sub_saturate(tmp, tmp2, tmp);
7564 else
7565 gen_helper_add_saturate(tmp, tmp, tmp2);
7566 dead_tmp(tmp2);
7567 } else {
7568 tmp = load_reg(s, rn);
7569 switch (op) {
7570 case 0x0a: /* rbit */
7571 gen_helper_rbit(tmp, tmp);
7572 break;
7573 case 0x08: /* rev */
7574 tcg_gen_bswap32_i32(tmp, tmp);
7575 break;
7576 case 0x09: /* rev16 */
7577 gen_rev16(tmp);
7578 break;
7579 case 0x0b: /* revsh */
7580 gen_revsh(tmp);
7581 break;
7582 case 0x10: /* sel */
7583 tmp2 = load_reg(s, rm);
7584 tmp3 = new_tmp();
7585 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7586 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7587 dead_tmp(tmp3);
7588 dead_tmp(tmp2);
7589 break;
7590 case 0x18: /* clz */
7591 gen_helper_clz(tmp, tmp);
7592 break;
7593 default:
7594 goto illegal_op;
7597 store_reg(s, rd, tmp);
7598 break;
7599 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7600 op = (insn >> 4) & 0xf;
7601 tmp = load_reg(s, rn);
7602 tmp2 = load_reg(s, rm);
7603 switch ((insn >> 20) & 7) {
7604 case 0: /* 32 x 32 -> 32 */
7605 tcg_gen_mul_i32(tmp, tmp, tmp2);
7606 dead_tmp(tmp2);
7607 if (rs != 15) {
7608 tmp2 = load_reg(s, rs);
7609 if (op)
7610 tcg_gen_sub_i32(tmp, tmp2, tmp);
7611 else
7612 tcg_gen_add_i32(tmp, tmp, tmp2);
7613 dead_tmp(tmp2);
7615 break;
7616 case 1: /* 16 x 16 -> 32 */
7617 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7618 dead_tmp(tmp2);
7619 if (rs != 15) {
7620 tmp2 = load_reg(s, rs);
7621 gen_helper_add_setq(tmp, tmp, tmp2);
7622 dead_tmp(tmp2);
7624 break;
7625 case 2: /* Dual multiply add. */
7626 case 4: /* Dual multiply subtract. */
7627 if (op)
7628 gen_swap_half(tmp2);
7629 gen_smul_dual(tmp, tmp2);
7630 /* This addition cannot overflow. */
7631 if (insn & (1 << 22)) {
7632 tcg_gen_sub_i32(tmp, tmp, tmp2);
7633 } else {
7634 tcg_gen_add_i32(tmp, tmp, tmp2);
7636 dead_tmp(tmp2);
7637 if (rs != 15)
7639 tmp2 = load_reg(s, rs);
7640 gen_helper_add_setq(tmp, tmp, tmp2);
7641 dead_tmp(tmp2);
7643 break;
7644 case 3: /* 32 * 16 -> 32msb */
7645 if (op)
7646 tcg_gen_sari_i32(tmp2, tmp2, 16);
7647 else
7648 gen_sxth(tmp2);
7649 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7650 tcg_gen_shri_i64(tmp64, tmp64, 16);
7651 tmp = new_tmp();
7652 tcg_gen_trunc_i64_i32(tmp, tmp64);
7653 if (rs != 15)
7655 tmp2 = load_reg(s, rs);
7656 gen_helper_add_setq(tmp, tmp, tmp2);
7657 dead_tmp(tmp2);
7659 break;
7660 case 5: case 6: /* 32 * 32 -> 32msb */
7661 gen_imull(tmp, tmp2);
7662 if (insn & (1 << 5)) {
7663 gen_roundqd(tmp, tmp2);
7664 dead_tmp(tmp2);
7665 } else {
7666 dead_tmp(tmp);
7667 tmp = tmp2;
7669 if (rs != 15) {
7670 tmp2 = load_reg(s, rs);
7671 if (insn & (1 << 21)) {
7672 tcg_gen_add_i32(tmp, tmp, tmp2);
7673 } else {
7674 tcg_gen_sub_i32(tmp, tmp2, tmp);
7676 dead_tmp(tmp2);
7678 break;
7679 case 7: /* Unsigned sum of absolute differences. */
7680 gen_helper_usad8(tmp, tmp, tmp2);
7681 dead_tmp(tmp2);
7682 if (rs != 15) {
7683 tmp2 = load_reg(s, rs);
7684 tcg_gen_add_i32(tmp, tmp, tmp2);
7685 dead_tmp(tmp2);
7687 break;
7689 store_reg(s, rd, tmp);
7690 break;
7691 case 6: case 7: /* 64-bit multiply, Divide. */
7692 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7693 tmp = load_reg(s, rn);
7694 tmp2 = load_reg(s, rm);
7695 if ((op & 0x50) == 0x10) {
7696 /* sdiv, udiv */
7697 if (!arm_feature(env, ARM_FEATURE_DIV))
7698 goto illegal_op;
7699 if (op & 0x20)
7700 gen_helper_udiv(tmp, tmp, tmp2);
7701 else
7702 gen_helper_sdiv(tmp, tmp, tmp2);
7703 dead_tmp(tmp2);
7704 store_reg(s, rd, tmp);
7705 } else if ((op & 0xe) == 0xc) {
7706 /* Dual multiply accumulate long. */
7707 if (op & 1)
7708 gen_swap_half(tmp2);
7709 gen_smul_dual(tmp, tmp2);
7710 if (op & 0x10) {
7711 tcg_gen_sub_i32(tmp, tmp, tmp2);
7712 } else {
7713 tcg_gen_add_i32(tmp, tmp, tmp2);
7715 dead_tmp(tmp2);
7716 /* BUGFIX */
7717 tmp64 = tcg_temp_new_i64();
7718 tcg_gen_ext_i32_i64(tmp64, tmp);
7719 dead_tmp(tmp);
7720 gen_addq(s, tmp64, rs, rd);
7721 gen_storeq_reg(s, rs, rd, tmp64);
7722 } else {
7723 if (op & 0x20) {
7724 /* Unsigned 64-bit multiply */
7725 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7726 } else {
7727 if (op & 8) {
7728 /* smlalxy */
7729 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7730 dead_tmp(tmp2);
7731 tmp64 = tcg_temp_new_i64();
7732 tcg_gen_ext_i32_i64(tmp64, tmp);
7733 dead_tmp(tmp);
7734 } else {
7735 /* Signed 64-bit multiply */
7736 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7739 if (op & 4) {
7740 /* umaal */
7741 gen_addq_lo(s, tmp64, rs);
7742 gen_addq_lo(s, tmp64, rd);
7743 } else if (op & 0x40) {
7744 /* 64-bit accumulate. */
7745 gen_addq(s, tmp64, rs, rd);
7747 gen_storeq_reg(s, rs, rd, tmp64);
7749 break;
7751 break;
7752 case 6: case 7: case 14: case 15:
7753 /* Coprocessor. */
7754 if (((insn >> 24) & 3) == 3) {
7755 /* Translate into the equivalent ARM encoding. */
7756 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7757 if (disas_neon_data_insn(env, s, insn))
7758 goto illegal_op;
7759 } else {
7760 if (insn & (1 << 28))
7761 goto illegal_op;
7762 if (disas_coproc_insn (env, s, insn))
7763 goto illegal_op;
7765 break;
7766 case 8: case 9: case 10: case 11:
7767 if (insn & (1 << 15)) {
7768 /* Branches, misc control. */
7769 if (insn & 0x5000) {
7770 /* Unconditional branch. */
7771 /* signextend(hw1[10:0]) -> offset[:12]. */
7772 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7773 /* hw1[10:0] -> offset[11:1]. */
7774 offset |= (insn & 0x7ff) << 1;
7775 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7776 offset[24:22] already have the same value because of the
7777 sign extension above. */
7778 offset ^= ((~insn) & (1 << 13)) << 10;
7779 offset ^= ((~insn) & (1 << 11)) << 11;
7781 if (insn & (1 << 14)) {
7782 /* Branch and link. */
7783 gen_op_movl_T1_im(s->pc | 1);
7784 gen_movl_reg_T1(s, 14);
7787 offset += s->pc;
7788 if (insn & (1 << 12)) {
7789 /* b/bl */
7790 gen_jmp(s, offset);
7791 } else {
7792 /* blx */
7793 ARCH(5);
7794 offset &= ~(uint32_t)2;
7795 gen_bx_im(s, offset);
7797 } else if (((insn >> 23) & 7) == 7) {
7798 /* Misc control */
7799 if (insn & (1 << 13))
7800 goto illegal_op;
7802 if (insn & (1 << 26)) {
7803 /* Secure monitor call (v6Z) */
7804 goto illegal_op; /* not implemented. */
7805 } else {
7806 op = (insn >> 20) & 7;
7807 switch (op) {
7808 case 0: /* msr cpsr. */
7809 if (IS_M(env)) {
7810 tmp = load_reg(s, rn);
7811 addr = tcg_const_i32(insn & 0xff);
7812 gen_helper_v7m_msr(cpu_env, addr, tmp);
7813 gen_lookup_tb(s);
7814 break;
7816 /* fall through */
7817 case 1: /* msr spsr. */
7818 if (IS_M(env))
7819 goto illegal_op;
7820 tmp = load_reg(s, rn);
7821 if (gen_set_psr(s,
7822 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7823 op == 1, tmp))
7824 goto illegal_op;
7825 break;
7826 case 2: /* cps, nop-hint. */
7827 if (((insn >> 8) & 7) == 0) {
7828 gen_nop_hint(s, insn & 0xff);
7830 /* Implemented as NOP in user mode. */
7831 if (IS_USER(s))
7832 break;
7833 offset = 0;
7834 imm = 0;
7835 if (insn & (1 << 10)) {
7836 if (insn & (1 << 7))
7837 offset |= CPSR_A;
7838 if (insn & (1 << 6))
7839 offset |= CPSR_I;
7840 if (insn & (1 << 5))
7841 offset |= CPSR_F;
7842 if (insn & (1 << 9))
7843 imm = CPSR_A | CPSR_I | CPSR_F;
7845 if (insn & (1 << 8)) {
7846 offset |= 0x1f;
7847 imm |= (insn & 0x1f);
7849 if (offset) {
7850 gen_set_psr_im(s, offset, 0, imm);
7852 break;
7853 case 3: /* Special control operations. */
7854 op = (insn >> 4) & 0xf;
7855 switch (op) {
7856 case 2: /* clrex */
7857 gen_helper_clrex(cpu_env);
7858 break;
7859 case 4: /* dsb */
7860 case 5: /* dmb */
7861 case 6: /* isb */
7862 /* These execute as NOPs. */
7863 ARCH(7);
7864 break;
7865 default:
7866 goto illegal_op;
7868 break;
7869 case 4: /* bxj */
7870 /* Trivial implementation equivalent to bx. */
7871 tmp = load_reg(s, rn);
7872 gen_bx(s, tmp);
7873 break;
7874 case 5: /* Exception return. */
7875 /* Unpredictable in user mode. */
7876 goto illegal_op;
7877 case 6: /* mrs cpsr. */
7878 tmp = new_tmp();
7879 if (IS_M(env)) {
7880 addr = tcg_const_i32(insn & 0xff);
7881 gen_helper_v7m_mrs(tmp, cpu_env, addr);
7882 } else {
7883 gen_helper_cpsr_read(tmp);
7885 store_reg(s, rd, tmp);
7886 break;
7887 case 7: /* mrs spsr. */
7888 /* Not accessible in user mode. */
7889 if (IS_USER(s) || IS_M(env))
7890 goto illegal_op;
7891 tmp = load_cpu_field(spsr);
7892 store_reg(s, rd, tmp);
7893 break;
7896 } else {
7897 /* Conditional branch. */
7898 op = (insn >> 22) & 0xf;
7899 /* Generate a conditional jump to next instruction. */
7900 s->condlabel = gen_new_label();
7901 gen_test_cc(op ^ 1, s->condlabel);
7902 s->condjmp = 1;
7904 /* offset[11:1] = insn[10:0] */
7905 offset = (insn & 0x7ff) << 1;
7906 /* offset[17:12] = insn[21:16]. */
7907 offset |= (insn & 0x003f0000) >> 4;
7908 /* offset[31:20] = insn[26]. */
7909 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7910 /* offset[18] = insn[13]. */
7911 offset |= (insn & (1 << 13)) << 5;
7912 /* offset[19] = insn[11]. */
7913 offset |= (insn & (1 << 11)) << 8;
7915 /* jump to the offset */
7916 gen_jmp(s, s->pc + offset);
7918 } else {
7919 /* Data processing immediate. */
7920 if (insn & (1 << 25)) {
7921 if (insn & (1 << 24)) {
7922 if (insn & (1 << 20))
7923 goto illegal_op;
7924 /* Bitfield/Saturate. */
7925 op = (insn >> 21) & 7;
7926 imm = insn & 0x1f;
7927 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7928 if (rn == 15) {
7929 tmp = new_tmp();
7930 tcg_gen_movi_i32(tmp, 0);
7931 } else {
7932 tmp = load_reg(s, rn);
7934 switch (op) {
7935 case 2: /* Signed bitfield extract. */
7936 imm++;
7937 if (shift + imm > 32)
7938 goto illegal_op;
7939 if (imm < 32)
7940 gen_sbfx(tmp, shift, imm);
7941 break;
7942 case 6: /* Unsigned bitfield extract. */
7943 imm++;
7944 if (shift + imm > 32)
7945 goto illegal_op;
7946 if (imm < 32)
7947 gen_ubfx(tmp, shift, (1u << imm) - 1);
7948 break;
7949 case 3: /* Bitfield insert/clear. */
7950 if (imm < shift)
7951 goto illegal_op;
7952 imm = imm + 1 - shift;
7953 if (imm != 32) {
7954 tmp2 = load_reg(s, rd);
7955 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7956 dead_tmp(tmp2);
7958 break;
7959 case 7:
7960 goto illegal_op;
7961 default: /* Saturate. */
7962 if (shift) {
7963 if (op & 1)
7964 tcg_gen_sari_i32(tmp, tmp, shift);
7965 else
7966 tcg_gen_shli_i32(tmp, tmp, shift);
7968 tmp2 = tcg_const_i32(imm);
7969 if (op & 4) {
7970 /* Unsigned. */
7971 if ((op & 1) && shift == 0)
7972 gen_helper_usat16(tmp, tmp, tmp2);
7973 else
7974 gen_helper_usat(tmp, tmp, tmp2);
7975 } else {
7976 /* Signed. */
7977 if ((op & 1) && shift == 0)
7978 gen_helper_ssat16(tmp, tmp, tmp2);
7979 else
7980 gen_helper_ssat(tmp, tmp, tmp2);
7982 break;
7984 store_reg(s, rd, tmp);
7985 } else {
7986 imm = ((insn & 0x04000000) >> 15)
7987 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7988 if (insn & (1 << 22)) {
7989 /* 16-bit immediate. */
7990 imm |= (insn >> 4) & 0xf000;
7991 if (insn & (1 << 23)) {
7992 /* movt */
7993 tmp = load_reg(s, rd);
7994 tcg_gen_ext16u_i32(tmp, tmp);
7995 tcg_gen_ori_i32(tmp, tmp, imm << 16);
7996 } else {
7997 /* movw */
7998 tmp = new_tmp();
7999 tcg_gen_movi_i32(tmp, imm);
8001 } else {
8002 /* Add/sub 12-bit immediate. */
8003 if (rn == 15) {
8004 offset = s->pc & ~(uint32_t)3;
8005 if (insn & (1 << 23))
8006 offset -= imm;
8007 else
8008 offset += imm;
8009 tmp = new_tmp();
8010 tcg_gen_movi_i32(tmp, offset);
8011 } else {
8012 tmp = load_reg(s, rn);
8013 if (insn & (1 << 23))
8014 tcg_gen_subi_i32(tmp, tmp, imm);
8015 else
8016 tcg_gen_addi_i32(tmp, tmp, imm);
8019 store_reg(s, rd, tmp);
8021 } else {
8022 int shifter_out = 0;
8023 /* modified 12-bit immediate. */
8024 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8025 imm = (insn & 0xff);
8026 switch (shift) {
8027 case 0: /* XY */
8028 /* Nothing to do. */
8029 break;
8030 case 1: /* 00XY00XY */
8031 imm |= imm << 16;
8032 break;
8033 case 2: /* XY00XY00 */
8034 imm |= imm << 16;
8035 imm <<= 8;
8036 break;
8037 case 3: /* XYXYXYXY */
8038 imm |= imm << 16;
8039 imm |= imm << 8;
8040 break;
8041 default: /* Rotated constant. */
8042 shift = (shift << 1) | (imm >> 7);
8043 imm |= 0x80;
8044 imm = imm << (32 - shift);
8045 shifter_out = 1;
8046 break;
8048 gen_op_movl_T1_im(imm);
8049 rn = (insn >> 16) & 0xf;
8050 if (rn == 15)
8051 gen_op_movl_T0_im(0);
8052 else
8053 gen_movl_T0_reg(s, rn);
8054 op = (insn >> 21) & 0xf;
8055 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8056 shifter_out))
8057 goto illegal_op;
8058 rd = (insn >> 8) & 0xf;
8059 if (rd != 15) {
8060 gen_movl_reg_T0(s, rd);
8064 break;
8065 case 12: /* Load/store single data item. */
8067 int postinc = 0;
8068 int writeback = 0;
8069 int user;
8070 if ((insn & 0x01100000) == 0x01000000) {
8071 if (disas_neon_ls_insn(env, s, insn))
8072 goto illegal_op;
8073 break;
8075 user = IS_USER(s);
8076 if (rn == 15) {
8077 addr = new_tmp();
8078 /* PC relative. */
8079 /* s->pc has already been incremented by 4. */
8080 imm = s->pc & 0xfffffffc;
8081 if (insn & (1 << 23))
8082 imm += insn & 0xfff;
8083 else
8084 imm -= insn & 0xfff;
8085 tcg_gen_movi_i32(addr, imm);
8086 } else {
8087 addr = load_reg(s, rn);
8088 if (insn & (1 << 23)) {
8089 /* Positive offset. */
8090 imm = insn & 0xfff;
8091 tcg_gen_addi_i32(addr, addr, imm);
8092 } else {
8093 op = (insn >> 8) & 7;
8094 imm = insn & 0xff;
8095 switch (op) {
8096 case 0: case 8: /* Shifted Register. */
8097 shift = (insn >> 4) & 0xf;
8098 if (shift > 3)
8099 goto illegal_op;
8100 tmp = load_reg(s, rm);
8101 if (shift)
8102 tcg_gen_shli_i32(tmp, tmp, shift);
8103 tcg_gen_add_i32(addr, addr, tmp);
8104 dead_tmp(tmp);
8105 break;
8106 case 4: /* Negative offset. */
8107 tcg_gen_addi_i32(addr, addr, -imm);
8108 break;
8109 case 6: /* User privilege. */
8110 tcg_gen_addi_i32(addr, addr, imm);
8111 user = 1;
8112 break;
8113 case 1: /* Post-decrement. */
8114 imm = -imm;
8115 /* Fall through. */
8116 case 3: /* Post-increment. */
8117 postinc = 1;
8118 writeback = 1;
8119 break;
8120 case 5: /* Pre-decrement. */
8121 imm = -imm;
8122 /* Fall through. */
8123 case 7: /* Pre-increment. */
8124 tcg_gen_addi_i32(addr, addr, imm);
8125 writeback = 1;
8126 break;
8127 default:
8128 goto illegal_op;
8132 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8133 if (insn & (1 << 20)) {
8134 /* Load. */
8135 if (rs == 15 && op != 2) {
8136 if (op & 2)
8137 goto illegal_op;
8138 /* Memory hint. Implemented as NOP. */
8139 } else {
8140 switch (op) {
8141 case 0: tmp = gen_ld8u(addr, user); break;
8142 case 4: tmp = gen_ld8s(addr, user); break;
8143 case 1: tmp = gen_ld16u(addr, user); break;
8144 case 5: tmp = gen_ld16s(addr, user); break;
8145 case 2: tmp = gen_ld32(addr, user); break;
8146 default: goto illegal_op;
8148 if (rs == 15 && ENABLE_ARCH_5) {
8149 gen_bx(s, tmp);
8150 } else {
8151 store_reg(s, rs, tmp);
8154 } else {
8155 /* Store. */
8156 if (rs == 15)
8157 goto illegal_op;
8158 tmp = load_reg(s, rs);
8159 switch (op) {
8160 case 0: gen_st8(tmp, addr, user); break;
8161 case 1: gen_st16(tmp, addr, user); break;
8162 case 2: gen_st32(tmp, addr, user); break;
8163 default: goto illegal_op;
8166 if (postinc)
8167 tcg_gen_addi_i32(addr, addr, imm);
8168 if (writeback) {
8169 store_reg(s, rn, addr);
8170 } else {
8171 dead_tmp(addr);
8174 break;
8175 default:
8176 goto illegal_op;
8178 return 0;
8179 illegal_op:
8180 return 1;
8183 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8185 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8186 int32_t offset;
8187 int i;
8188 TCGv tmp;
8189 TCGv tmp2;
8190 TCGv addr;
8191 int crement;
8193 if (s->condexec_mask) {
8194 cond = s->condexec_cond;
8195 s->condlabel = gen_new_label();
8196 gen_test_cc(cond ^ 1, s->condlabel);
8197 s->condjmp = 1;
8200 insn = lduw_code(s->pc);
8201 s->pc += 2;
8203 switch (insn >> 12) {
8204 case 0: case 1:
8205 rd = insn & 7;
8206 op = (insn >> 11) & 3;
8207 if (op == 3) {
8208 /* add/subtract */
8209 rn = (insn >> 3) & 7;
8210 gen_movl_T0_reg(s, rn);
8211 if (insn & (1 << 10)) {
8212 /* immediate */
8213 gen_op_movl_T1_im((insn >> 6) & 7);
8214 } else {
8215 /* reg */
8216 rm = (insn >> 6) & 7;
8217 gen_movl_T1_reg(s, rm);
8219 if (insn & (1 << 9)) {
8220 if (s->condexec_mask)
8221 gen_op_subl_T0_T1();
8222 else
8223 gen_op_subl_T0_T1_cc();
8224 } else {
8225 if (s->condexec_mask)
8226 gen_op_addl_T0_T1();
8227 else
8228 gen_op_addl_T0_T1_cc();
8230 gen_movl_reg_T0(s, rd);
8231 } else {
8232 /* shift immediate */
8233 rm = (insn >> 3) & 7;
8234 shift = (insn >> 6) & 0x1f;
8235 tmp = load_reg(s, rm);
8236 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8237 if (!s->condexec_mask)
8238 gen_logic_CC(tmp);
8239 store_reg(s, rd, tmp);
8241 break;
8242 case 2: case 3:
8243 /* arithmetic large immediate */
8244 op = (insn >> 11) & 3;
8245 rd = (insn >> 8) & 0x7;
8246 if (op == 0) {
8247 gen_op_movl_T0_im(insn & 0xff);
8248 } else {
8249 gen_movl_T0_reg(s, rd);
8250 gen_op_movl_T1_im(insn & 0xff);
8252 switch (op) {
8253 case 0: /* mov */
8254 if (!s->condexec_mask)
8255 gen_op_logic_T0_cc();
8256 break;
8257 case 1: /* cmp */
8258 gen_op_subl_T0_T1_cc();
8259 break;
8260 case 2: /* add */
8261 if (s->condexec_mask)
8262 gen_op_addl_T0_T1();
8263 else
8264 gen_op_addl_T0_T1_cc();
8265 break;
8266 case 3: /* sub */
8267 if (s->condexec_mask)
8268 gen_op_subl_T0_T1();
8269 else
8270 gen_op_subl_T0_T1_cc();
8271 break;
8273 if (op != 1)
8274 gen_movl_reg_T0(s, rd);
8275 break;
8276 case 4:
8277 if (insn & (1 << 11)) {
8278 rd = (insn >> 8) & 7;
8279 /* load pc-relative. Bit 1 of PC is ignored. */
8280 val = s->pc + 2 + ((insn & 0xff) * 4);
8281 val &= ~(uint32_t)2;
8282 addr = new_tmp();
8283 tcg_gen_movi_i32(addr, val);
8284 tmp = gen_ld32(addr, IS_USER(s));
8285 dead_tmp(addr);
8286 store_reg(s, rd, tmp);
8287 break;
8289 if (insn & (1 << 10)) {
8290 /* data processing extended or blx */
8291 rd = (insn & 7) | ((insn >> 4) & 8);
8292 rm = (insn >> 3) & 0xf;
8293 op = (insn >> 8) & 3;
8294 switch (op) {
8295 case 0: /* add */
8296 gen_movl_T0_reg(s, rd);
8297 gen_movl_T1_reg(s, rm);
8298 gen_op_addl_T0_T1();
8299 gen_movl_reg_T0(s, rd);
8300 break;
8301 case 1: /* cmp */
8302 gen_movl_T0_reg(s, rd);
8303 gen_movl_T1_reg(s, rm);
8304 gen_op_subl_T0_T1_cc();
8305 break;
8306 case 2: /* mov/cpy */
8307 gen_movl_T0_reg(s, rm);
8308 gen_movl_reg_T0(s, rd);
8309 break;
8310 case 3:/* branch [and link] exchange thumb register */
8311 tmp = load_reg(s, rm);
8312 if (insn & (1 << 7)) {
8313 ARCH(5);
8314 val = (uint32_t)s->pc | 1;
8315 tmp2 = new_tmp();
8316 tcg_gen_movi_i32(tmp2, val);
8317 store_reg(s, 14, tmp2);
8319 gen_bx(s, tmp);
8320 break;
8322 break;
8325 /* data processing register */
8326 rd = insn & 7;
8327 rm = (insn >> 3) & 7;
8328 op = (insn >> 6) & 0xf;
8329 if (op == 2 || op == 3 || op == 4 || op == 7) {
8330 /* the shift/rotate ops want the operands backwards */
8331 val = rm;
8332 rm = rd;
8333 rd = val;
8334 val = 1;
8335 } else {
8336 val = 0;
8339 if (op == 9) /* neg */
8340 gen_op_movl_T0_im(0);
8341 else if (op != 0xf) /* mvn doesn't read its first operand */
8342 gen_movl_T0_reg(s, rd);
8344 gen_movl_T1_reg(s, rm);
8345 switch (op) {
8346 case 0x0: /* and */
8347 gen_op_andl_T0_T1();
8348 if (!s->condexec_mask)
8349 gen_op_logic_T0_cc();
8350 break;
8351 case 0x1: /* eor */
8352 gen_op_xorl_T0_T1();
8353 if (!s->condexec_mask)
8354 gen_op_logic_T0_cc();
8355 break;
8356 case 0x2: /* lsl */
8357 if (s->condexec_mask) {
8358 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
8359 } else {
8360 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8361 gen_op_logic_T1_cc();
8363 break;
8364 case 0x3: /* lsr */
8365 if (s->condexec_mask) {
8366 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
8367 } else {
8368 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8369 gen_op_logic_T1_cc();
8371 break;
8372 case 0x4: /* asr */
8373 if (s->condexec_mask) {
8374 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
8375 } else {
8376 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8377 gen_op_logic_T1_cc();
8379 break;
8380 case 0x5: /* adc */
8381 if (s->condexec_mask)
8382 gen_adc_T0_T1();
8383 else
8384 gen_op_adcl_T0_T1_cc();
8385 break;
8386 case 0x6: /* sbc */
8387 if (s->condexec_mask)
8388 gen_sbc_T0_T1();
8389 else
8390 gen_op_sbcl_T0_T1_cc();
8391 break;
8392 case 0x7: /* ror */
8393 if (s->condexec_mask) {
8394 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
8395 } else {
8396 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8397 gen_op_logic_T1_cc();
8399 break;
8400 case 0x8: /* tst */
8401 gen_op_andl_T0_T1();
8402 gen_op_logic_T0_cc();
8403 rd = 16;
8404 break;
8405 case 0x9: /* neg */
8406 if (s->condexec_mask)
8407 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
8408 else
8409 gen_op_subl_T0_T1_cc();
8410 break;
8411 case 0xa: /* cmp */
8412 gen_op_subl_T0_T1_cc();
8413 rd = 16;
8414 break;
8415 case 0xb: /* cmn */
8416 gen_op_addl_T0_T1_cc();
8417 rd = 16;
8418 break;
8419 case 0xc: /* orr */
8420 gen_op_orl_T0_T1();
8421 if (!s->condexec_mask)
8422 gen_op_logic_T0_cc();
8423 break;
8424 case 0xd: /* mul */
8425 gen_op_mull_T0_T1();
8426 if (!s->condexec_mask)
8427 gen_op_logic_T0_cc();
8428 break;
8429 case 0xe: /* bic */
8430 gen_op_bicl_T0_T1();
8431 if (!s->condexec_mask)
8432 gen_op_logic_T0_cc();
8433 break;
8434 case 0xf: /* mvn */
8435 gen_op_notl_T1();
8436 if (!s->condexec_mask)
8437 gen_op_logic_T1_cc();
8438 val = 1;
8439 rm = rd;
8440 break;
8442 if (rd != 16) {
8443 if (val)
8444 gen_movl_reg_T1(s, rm);
8445 else
8446 gen_movl_reg_T0(s, rd);
8448 break;
8450 case 5:
8451 /* load/store register offset. */
8452 rd = insn & 7;
8453 rn = (insn >> 3) & 7;
8454 rm = (insn >> 6) & 7;
8455 op = (insn >> 9) & 7;
8456 addr = load_reg(s, rn);
8457 tmp = load_reg(s, rm);
8458 tcg_gen_add_i32(addr, addr, tmp);
8459 dead_tmp(tmp);
8461 if (op < 3) /* store */
8462 tmp = load_reg(s, rd);
8464 switch (op) {
8465 case 0: /* str */
8466 gen_st32(tmp, addr, IS_USER(s));
8467 break;
8468 case 1: /* strh */
8469 gen_st16(tmp, addr, IS_USER(s));
8470 break;
8471 case 2: /* strb */
8472 gen_st8(tmp, addr, IS_USER(s));
8473 break;
8474 case 3: /* ldrsb */
8475 tmp = gen_ld8s(addr, IS_USER(s));
8476 break;
8477 case 4: /* ldr */
8478 tmp = gen_ld32(addr, IS_USER(s));
8479 break;
8480 case 5: /* ldrh */
8481 tmp = gen_ld16u(addr, IS_USER(s));
8482 break;
8483 case 6: /* ldrb */
8484 tmp = gen_ld8u(addr, IS_USER(s));
8485 break;
8486 case 7: /* ldrsh */
8487 tmp = gen_ld16s(addr, IS_USER(s));
8488 break;
8490 if (op >= 3) /* load */
8491 store_reg(s, rd, tmp);
8492 dead_tmp(addr);
8493 break;
8495 case 6:
8496 /* load/store word immediate offset */
8497 rd = insn & 7;
8498 rn = (insn >> 3) & 7;
8499 addr = load_reg(s, rn);
8500 val = (insn >> 4) & 0x7c;
8501 tcg_gen_addi_i32(addr, addr, val);
8503 if (insn & (1 << 11)) {
8504 /* load */
8505 tmp = gen_ld32(addr, IS_USER(s));
8506 store_reg(s, rd, tmp);
8507 } else {
8508 /* store */
8509 tmp = load_reg(s, rd);
8510 gen_st32(tmp, addr, IS_USER(s));
8512 dead_tmp(addr);
8513 break;
8515 case 7:
8516 /* load/store byte immediate offset */
8517 rd = insn & 7;
8518 rn = (insn >> 3) & 7;
8519 addr = load_reg(s, rn);
8520 val = (insn >> 6) & 0x1f;
8521 tcg_gen_addi_i32(addr, addr, val);
8523 if (insn & (1 << 11)) {
8524 /* load */
8525 tmp = gen_ld8u(addr, IS_USER(s));
8526 store_reg(s, rd, tmp);
8527 } else {
8528 /* store */
8529 tmp = load_reg(s, rd);
8530 gen_st8(tmp, addr, IS_USER(s));
8532 dead_tmp(addr);
8533 break;
8535 case 8:
8536 /* load/store halfword immediate offset */
8537 rd = insn & 7;
8538 rn = (insn >> 3) & 7;
8539 addr = load_reg(s, rn);
8540 val = (insn >> 5) & 0x3e;
8541 tcg_gen_addi_i32(addr, addr, val);
8543 if (insn & (1 << 11)) {
8544 /* load */
8545 tmp = gen_ld16u(addr, IS_USER(s));
8546 store_reg(s, rd, tmp);
8547 } else {
8548 /* store */
8549 tmp = load_reg(s, rd);
8550 gen_st16(tmp, addr, IS_USER(s));
8552 dead_tmp(addr);
8553 break;
8555 case 9:
8556 /* load/store from stack */
8557 rd = (insn >> 8) & 7;
8558 addr = load_reg(s, 13);
8559 val = (insn & 0xff) * 4;
8560 tcg_gen_addi_i32(addr, addr, val);
8562 if (insn & (1 << 11)) {
8563 /* load */
8564 tmp = gen_ld32(addr, IS_USER(s));
8565 store_reg(s, rd, tmp);
8566 } else {
8567 /* store */
8568 tmp = load_reg(s, rd);
8569 gen_st32(tmp, addr, IS_USER(s));
8571 dead_tmp(addr);
8572 break;
8574 case 10:
8575 /* add to high reg */
8576 rd = (insn >> 8) & 7;
8577 if (insn & (1 << 11)) {
8578 /* SP */
8579 tmp = load_reg(s, 13);
8580 } else {
8581 /* PC. bit 1 is ignored. */
8582 tmp = new_tmp();
8583 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8585 val = (insn & 0xff) * 4;
8586 tcg_gen_addi_i32(tmp, tmp, val);
8587 store_reg(s, rd, tmp);
8588 break;
8590 case 11:
8591 /* misc */
8592 op = (insn >> 8) & 0xf;
8593 switch (op) {
8594 case 0:
8595 /* adjust stack pointer */
8596 tmp = load_reg(s, 13);
8597 val = (insn & 0x7f) * 4;
8598 if (insn & (1 << 7))
8599 val = -(int32_t)val;
8600 tcg_gen_addi_i32(tmp, tmp, val);
8601 store_reg(s, 13, tmp);
8602 break;
8604 case 2: /* sign/zero extend. */
8605 ARCH(6);
8606 rd = insn & 7;
8607 rm = (insn >> 3) & 7;
8608 tmp = load_reg(s, rm);
8609 switch ((insn >> 6) & 3) {
8610 case 0: gen_sxth(tmp); break;
8611 case 1: gen_sxtb(tmp); break;
8612 case 2: gen_uxth(tmp); break;
8613 case 3: gen_uxtb(tmp); break;
8615 store_reg(s, rd, tmp);
8616 break;
8617 case 4: case 5: case 0xc: case 0xd:
8618 /* push/pop */
8619 addr = load_reg(s, 13);
8620 if (insn & (1 << 8))
8621 offset = 4;
8622 else
8623 offset = 0;
8624 for (i = 0; i < 8; i++) {
8625 if (insn & (1 << i))
8626 offset += 4;
8628 if ((insn & (1 << 11)) == 0) {
8629 tcg_gen_addi_i32(addr, addr, -offset);
8631 for (i = 0; i < 8; i++) {
8632 if (insn & (1 << i)) {
8633 if (insn & (1 << 11)) {
8634 /* pop */
8635 tmp = gen_ld32(addr, IS_USER(s));
8636 store_reg(s, i, tmp);
8637 } else {
8638 /* push */
8639 tmp = load_reg(s, i);
8640 gen_st32(tmp, addr, IS_USER(s));
8642 /* advance to the next address. */
8643 tcg_gen_addi_i32(addr, addr, 4);
8646 TCGV_UNUSED(tmp);
8647 if (insn & (1 << 8)) {
8648 if (insn & (1 << 11)) {
8649 /* pop pc */
8650 tmp = gen_ld32(addr, IS_USER(s));
8651 /* don't set the pc until the rest of the instruction
8652 has completed */
8653 } else {
8654 /* push lr */
8655 tmp = load_reg(s, 14);
8656 gen_st32(tmp, addr, IS_USER(s));
8658 tcg_gen_addi_i32(addr, addr, 4);
8660 if ((insn & (1 << 11)) == 0) {
8661 tcg_gen_addi_i32(addr, addr, -offset);
8663 /* write back the new stack pointer */
8664 store_reg(s, 13, addr);
8665 /* set the new PC value */
8666 if ((insn & 0x0900) == 0x0900) {
8667 if (ENABLE_ARCH_5) {
8668 gen_bx(s, tmp);
8669 } else {
8670 store_reg(s, 15, tmp);
8673 break;
8675 case 1: case 3: case 9: case 11: /* czb */
8676 rm = insn & 7;
8677 tmp = load_reg(s, rm);
8678 s->condlabel = gen_new_label();
8679 s->condjmp = 1;
8680 if (insn & (1 << 11))
8681 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8682 else
8683 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8684 dead_tmp(tmp);
8685 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8686 val = (uint32_t)s->pc + 2;
8687 val += offset;
8688 gen_jmp(s, val);
8689 break;
8691 case 15: /* IT, nop-hint. */
8692 if ((insn & 0xf) == 0) {
8693 gen_nop_hint(s, (insn >> 4) & 0xf);
8694 break;
8696 /* If Then. */
8697 s->condexec_cond = (insn >> 4) & 0xe;
8698 s->condexec_mask = insn & 0x1f;
8699 /* No actual code generated for this insn, just setup state. */
8700 break;
8702 case 0xe: /* bkpt */
8703 gen_set_condexec(s);
8704 gen_set_pc_im(s->pc - 2);
8705 gen_exception(EXCP_BKPT);
8706 s->is_jmp = DISAS_JUMP;
8707 break;
8709 case 0xa: /* rev */
8710 ARCH(6);
8711 rn = (insn >> 3) & 0x7;
8712 rd = insn & 0x7;
8713 tmp = load_reg(s, rn);
8714 switch ((insn >> 6) & 3) {
8715 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8716 case 1: gen_rev16(tmp); break;
8717 case 3: gen_revsh(tmp); break;
8718 default: goto illegal_op;
8720 store_reg(s, rd, tmp);
8721 break;
8723 case 6: /* cps */
8724 ARCH(6);
8725 if (IS_USER(s))
8726 break;
8727 if (IS_M(env)) {
8728 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8729 /* PRIMASK */
8730 if (insn & 1) {
8731 addr = tcg_const_i32(16);
8732 gen_helper_v7m_msr(cpu_env, addr, tmp);
8734 /* FAULTMASK */
8735 if (insn & 2) {
8736 addr = tcg_const_i32(17);
8737 gen_helper_v7m_msr(cpu_env, addr, tmp);
8739 gen_lookup_tb(s);
8740 } else {
8741 if (insn & (1 << 4))
8742 shift = CPSR_A | CPSR_I | CPSR_F;
8743 else
8744 shift = 0;
8745 gen_set_psr_im(s, shift, 0, ((insn & 7) << 6) & shift);
8747 break;
8749 default:
8750 goto undef;
8752 break;
8754 case 12:
8755 /* load/store multiple */
8756 rn = (insn >> 8) & 0x7;
8757 addr = load_reg(s, rn);
8758 if (arm_feature(env, ARM_FEATURE_ABORT_BU) && (insn & (1 << rn)) == 0) {
8759 /* base-updated abort model: update base register
8760 before an abort can happen */
8761 crement = 0;
8762 for (i = 0; i < 8; i++) {
8763 if (insn & (1 << i)) {
8764 crement += 4;
8767 tmp = new_tmp();
8768 tcg_gen_addi_i32(tmp, addr, crement);
8769 store_reg(s, rn, tmp);
8771 for (i = 0; i < 8; i++) {
8772 if (insn & (1 << i)) {
8773 if (insn & (1 << 11)) {
8774 /* load */
8775 tmp = gen_ld32(addr, IS_USER(s));
8776 store_reg(s, i, tmp);
8777 } else {
8778 /* store */
8779 tmp = load_reg(s, i);
8780 gen_st32(tmp, addr, IS_USER(s));
8782 /* advance to the next address */
8783 tcg_gen_addi_i32(addr, addr, 4);
8786 /* Base register writeback. */
8787 if (!arm_feature(env, ARM_FEATURE_ABORT_BU) && (insn & (1 << rn)) == 0) {
8788 store_reg(s, rn, addr);
8789 } else {
8790 dead_tmp(addr);
8792 break;
8794 case 13:
8795 /* conditional branch or swi */
8796 cond = (insn >> 8) & 0xf;
8797 if (cond == 0xe)
8798 goto undef;
8800 if (cond == 0xf) {
8801 /* swi */
8802 gen_set_condexec(s);
8803 gen_set_pc_im(s->pc);
8804 s->is_jmp = DISAS_SWI;
8805 break;
8807 /* generate a conditional jump to next instruction */
8808 s->condlabel = gen_new_label();
8809 gen_test_cc(cond ^ 1, s->condlabel);
8810 s->condjmp = 1;
8812 /* jump to the offset */
8813 val = (uint32_t)s->pc + 2;
8814 offset = ((int32_t)insn << 24) >> 24;
8815 val += offset << 1;
8816 gen_jmp(s, val);
8817 break;
8819 case 14:
8820 if (insn & (1 << 11)) {
8821 if (disas_thumb2_insn(env, s, insn))
8822 goto undef32;
8823 break;
8825 /* unconditional branch */
8826 val = (uint32_t)s->pc;
8827 offset = ((int32_t)insn << 21) >> 21;
8828 val += (offset << 1) + 2;
8829 gen_jmp(s, val);
8830 break;
8832 case 15:
8833 if (disas_thumb2_insn(env, s, insn))
8834 goto undef32;
8835 break;
8837 return;
8838 undef32:
8839 gen_set_condexec(s);
8840 gen_set_pc_im(s->pc - 4);
8841 gen_exception(EXCP_UDEF);
8842 s->is_jmp = DISAS_JUMP;
8843 return;
8844 illegal_op:
8845 undef:
8846 gen_set_condexec(s);
8847 gen_set_pc_im(s->pc - 2);
8848 gen_exception(EXCP_UDEF);
8849 s->is_jmp = DISAS_JUMP;
8852 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8853 basic block 'tb'. If search_pc is TRUE, also generate PC
8854 information for each intermediate instruction. */
8855 static inline void gen_intermediate_code_internal(CPUState *env,
8856 TranslationBlock *tb,
8857 int search_pc)
8859 DisasContext dc1, *dc = &dc1;
8860 CPUBreakpoint *bp;
8861 uint16_t *gen_opc_end;
8862 int j, lj;
8863 target_ulong pc_start;
8864 uint32_t next_page_start;
8865 int num_insns;
8866 int max_insns;
8868 /* generate intermediate code */
8869 num_temps = 0;
8871 pc_start = tb->pc;
8873 dc->tb = tb;
8875 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
8877 dc->is_jmp = DISAS_NEXT;
8878 dc->pc = pc_start;
8879 dc->singlestep_enabled = env->singlestep_enabled;
8880 dc->condjmp = 0;
8881 dc->thumb = env->thumb;
8882 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8883 dc->condexec_cond = env->condexec_bits >> 4;
8884 #if !defined(CONFIG_USER_ONLY)
8885 if (IS_M(env)) {
8886 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8887 } else {
8888 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8890 #endif
8891 cpu_F0s = tcg_temp_new_i32();
8892 cpu_F1s = tcg_temp_new_i32();
8893 cpu_F0d = tcg_temp_new_i64();
8894 cpu_F1d = tcg_temp_new_i64();
8895 cpu_V0 = cpu_F0d;
8896 cpu_V1 = cpu_F1d;
8897 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8898 cpu_M0 = tcg_temp_new_i64();
8899 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
8900 lj = -1;
8901 num_insns = 0;
8902 max_insns = tb->cflags & CF_COUNT_MASK;
8903 if (max_insns == 0)
8904 max_insns = CF_COUNT_MASK;
8906 gen_icount_start();
8907 /* Reset the conditional execution bits immediately. This avoids
8908 complications trying to do it at the end of the block. */
8909 if (env->condexec_bits)
8911 TCGv tmp = new_tmp();
8912 tcg_gen_movi_i32(tmp, 0);
8913 store_cpu_field(tmp, condexec_bits);
8915 do {
8916 #ifdef CONFIG_USER_ONLY
8917 /* Intercept jump to the magic kernel page. */
8918 if (dc->pc >= 0xffff0000) {
8919 /* We always get here via a jump, so know we are not in a
8920 conditional execution block. */
8921 gen_exception(EXCP_KERNEL_TRAP);
8922 dc->is_jmp = DISAS_UPDATE;
8923 break;
8925 #else
8926 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8927 /* We always get here via a jump, so know we are not in a
8928 conditional execution block. */
8929 gen_exception(EXCP_EXCEPTION_EXIT);
8930 dc->is_jmp = DISAS_UPDATE;
8931 break;
8933 #endif
8935 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
8936 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
8937 if (bp->pc == dc->pc) {
8938 gen_set_condexec(dc);
8939 gen_set_pc_im(dc->pc);
8940 gen_exception(EXCP_DEBUG);
8941 dc->is_jmp = DISAS_JUMP;
8942 /* Advance PC so that clearing the breakpoint will
8943 invalidate this TB. */
8944 dc->pc += 2;
8945 goto done_generating;
8946 break;
8950 if (search_pc) {
8951 j = gen_opc_ptr - gen_opc_buf;
8952 if (lj < j) {
8953 lj++;
8954 while (lj < j)
8955 gen_opc_instr_start[lj++] = 0;
8957 gen_opc_pc[lj] = dc->pc;
8958 gen_opc_instr_start[lj] = 1;
8959 gen_opc_icount[lj] = num_insns;
8962 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8963 gen_io_start();
8965 if (env->thumb) {
8966 disas_thumb_insn(env, dc);
8967 if (dc->condexec_mask) {
8968 dc->condexec_cond = (dc->condexec_cond & 0xe)
8969 | ((dc->condexec_mask >> 4) & 1);
8970 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8971 if (dc->condexec_mask == 0) {
8972 dc->condexec_cond = 0;
8975 } else {
8976 disas_arm_insn(env, dc);
8978 if (num_temps) {
8979 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8980 num_temps = 0;
8983 if (dc->condjmp && !dc->is_jmp) {
8984 gen_set_label(dc->condlabel);
8985 dc->condjmp = 0;
8987 /* Translation stops when a conditional branch is encountered.
8988 * Otherwise the subsequent code could get translated several times.
8989 * Also stop translation when a page boundary is reached. This
8990 * ensures prefetch aborts occur at the right place. */
8991 num_insns ++;
8992 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8993 !env->singlestep_enabled &&
8994 !singlestep &&
8995 dc->pc < next_page_start &&
8996 num_insns < max_insns);
8998 if (tb->cflags & CF_LAST_IO) {
8999 if (dc->condjmp) {
9000 /* FIXME: This can theoretically happen with self-modifying
9001 code. */
9002 cpu_abort(env, "IO on conditional branch instruction");
9004 gen_io_end();
9007 /* At this stage dc->condjmp will only be set when the skipped
9008 instruction was a conditional branch or trap, and the PC has
9009 already been written. */
9010 if (unlikely(env->singlestep_enabled)) {
9011 /* Make sure the pc is updated, and raise a debug exception. */
9012 if (dc->condjmp) {
9013 gen_set_condexec(dc);
9014 if (dc->is_jmp == DISAS_SWI) {
9015 gen_exception(EXCP_SWI);
9016 } else {
9017 gen_exception(EXCP_DEBUG);
9019 gen_set_label(dc->condlabel);
9021 if (dc->condjmp || !dc->is_jmp) {
9022 gen_set_pc_im(dc->pc);
9023 dc->condjmp = 0;
9025 gen_set_condexec(dc);
9026 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9027 gen_exception(EXCP_SWI);
9028 } else {
9029 /* FIXME: Single stepping a WFI insn will not halt
9030 the CPU. */
9031 gen_exception(EXCP_DEBUG);
9033 } else {
9034 /* While branches must always occur at the end of an IT block,
9035 there are a few other things that can cause us to terminate
9036 the TB in the middel of an IT block:
9037 - Exception generating instructions (bkpt, swi, undefined).
9038 - Page boundaries.
9039 - Hardware watchpoints.
9040 Hardware breakpoints have already been handled and skip this code.
9042 gen_set_condexec(dc);
9043 switch(dc->is_jmp) {
9044 case DISAS_NEXT:
9045 gen_goto_tb(dc, 1, dc->pc);
9046 break;
9047 default:
9048 case DISAS_JUMP:
9049 case DISAS_UPDATE:
9050 /* indicate that the hash table must be used to find the next TB */
9051 tcg_gen_exit_tb(0);
9052 break;
9053 case DISAS_TB_JUMP:
9054 /* nothing more to generate */
9055 break;
9056 case DISAS_WFI:
9057 gen_helper_wfi();
9058 break;
9059 case DISAS_SWI:
9060 gen_exception(EXCP_SWI);
9061 break;
9063 if (dc->condjmp) {
9064 gen_set_label(dc->condlabel);
9065 gen_set_condexec(dc);
9066 gen_goto_tb(dc, 1, dc->pc);
9067 dc->condjmp = 0;
9071 done_generating:
9072 gen_icount_end(tb, num_insns);
9073 *gen_opc_ptr = INDEX_op_end;
9075 #ifdef DEBUG_DISAS
9076 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9077 qemu_log("----------------\n");
9078 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9079 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9080 qemu_log("\n");
9082 #endif
9083 if (search_pc) {
9084 j = gen_opc_ptr - gen_opc_buf;
9085 lj++;
9086 while (lj <= j)
9087 gen_opc_instr_start[lj++] = 0;
9088 } else {
9089 tb->size = dc->pc - pc_start;
9090 tb->icount = num_insns;
9094 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9096 gen_intermediate_code_internal(env, tb, 0);
9099 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9101 gen_intermediate_code_internal(env, tb, 1);
9104 static const char *cpu_mode_names[16] = {
9105 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9106 "???", "???", "???", "und", "???", "???", "???", "sys"
9109 void cpu_dump_state(CPUState *env, FILE *f,
9110 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
9111 int flags)
9113 int i;
9114 #if 0
9115 union {
9116 uint32_t i;
9117 float s;
9118 } s0, s1;
9119 CPU_DoubleU d;
9120 /* ??? This assumes float64 and double have the same layout.
9121 Oh well, it's only debug dumps. */
9122 union {
9123 float64 f64;
9124 double d;
9125 } d0;
9126 #endif
9127 uint32_t psr;
9129 for(i=0;i<16;i++) {
9130 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9131 if ((i % 4) == 3)
9132 cpu_fprintf(f, "\n");
9133 else
9134 cpu_fprintf(f, " ");
9136 psr = cpsr_read(env);
9137 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9138 psr,
9139 psr & (1 << 31) ? 'N' : '-',
9140 psr & (1 << 30) ? 'Z' : '-',
9141 psr & (1 << 29) ? 'C' : '-',
9142 psr & (1 << 28) ? 'V' : '-',
9143 psr & CPSR_T ? 'T' : 'A',
9144 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9146 #if 0
9147 for (i = 0; i < 16; i++) {
9148 d.d = env->vfp.regs[i];
9149 s0.i = d.l.lower;
9150 s1.i = d.l.upper;
9151 d0.f64 = d.d;
9152 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9153 i * 2, (int)s0.i, s0.s,
9154 i * 2 + 1, (int)s1.i, s1.s,
9155 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9156 d0.d);
9158 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9159 #endif
9162 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9163 unsigned long searched_pc, int pc_pos, void *puc)
9165 env->regs[15] = gen_opc_pc[pc_pos];