4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
45 /* internal defines */
46 typedef struct DisasContext
{
49 /* Nonzero if this instruction has been conditionally skipped. */
51 /* The label that will be jumped to when the instruction is skipped. */
53 /* Thumb-2 condtional execution bits. */
56 struct TranslationBlock
*tb
;
57 int singlestep_enabled
;
59 #if !defined(CONFIG_USER_ONLY)
64 #if defined(CONFIG_USER_ONLY)
67 #define IS_USER(s) (s->user)
70 /* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
75 static TCGv_ptr cpu_env
;
76 /* We reuse the same 64-bit temporaries for efficiency. */
77 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
78 static TCGv_i32 cpu_R
[16];
80 /* FIXME: These should be removed. */
82 static TCGv cpu_F0s
, cpu_F1s
;
83 static TCGv_i64 cpu_F0d
, cpu_F1d
;
85 #define ICOUNT_TEMP cpu_T[0]
86 #include "gen-icount.h"
88 static const char *regnames
[] =
89 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
90 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
92 /* initialize TCG globals. */
93 void arm_translate_init(void)
97 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
99 cpu_T
[0] = tcg_global_reg_new_i32(TCG_AREG1
, "T0");
100 cpu_T
[1] = tcg_global_reg_new_i32(TCG_AREG2
, "T1");
102 for (i
= 0; i
< 16; i
++) {
103 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
104 offsetof(CPUState
, regs
[i
]),
112 static int num_temps
;
114 /* Allocate a temporary variable. */
115 static TCGv_i32
new_tmp(void)
118 return tcg_temp_new_i32();
121 /* Release a temporary variable. */
122 static void dead_tmp(TCGv tmp
)
128 static inline TCGv
load_cpu_offset(int offset
)
130 TCGv tmp
= new_tmp();
131 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
135 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
137 static inline void store_cpu_offset(TCGv var
, int offset
)
139 tcg_gen_st_i32(var
, cpu_env
, offset
);
143 #define store_cpu_field(var, name) \
144 store_cpu_offset(var, offsetof(CPUState, name))
146 /* Set a variable to the value of a CPU register. */
147 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
151 /* normaly, since we updated PC, we need only to add one insn */
153 addr
= (long)s
->pc
+ 2;
155 addr
= (long)s
->pc
+ 4;
156 tcg_gen_movi_i32(var
, addr
);
158 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
162 /* Create a new temporary and set it to the value of a CPU register. */
163 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
165 TCGv tmp
= new_tmp();
166 load_reg_var(s
, tmp
, reg
);
170 /* Set a CPU register. The source must be a temporary and will be
172 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
175 tcg_gen_andi_i32(var
, var
, ~1);
176 s
->is_jmp
= DISAS_JUMP
;
178 tcg_gen_mov_i32(cpu_R
[reg
], var
);
183 /* Basic operations. */
184 #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
185 #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
186 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
188 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
189 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
190 #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
191 #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
193 #define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
194 #define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
195 #define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
196 #define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
197 #define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
199 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
200 #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
201 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202 #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
203 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
204 #define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
205 #define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
207 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
208 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
210 /* Value extensions. */
211 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
212 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
213 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
214 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
216 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
217 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
219 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
221 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
222 /* Set NZCV flags from the high 4 bits of var. */
223 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
225 static void gen_exception(int excp
)
227 TCGv tmp
= new_tmp();
228 tcg_gen_movi_i32(tmp
, excp
);
229 gen_helper_exception(tmp
);
233 static void gen_smul_dual(TCGv a
, TCGv b
)
235 TCGv tmp1
= new_tmp();
236 TCGv tmp2
= new_tmp();
237 tcg_gen_ext16s_i32(tmp1
, a
);
238 tcg_gen_ext16s_i32(tmp2
, b
);
239 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
241 tcg_gen_sari_i32(a
, a
, 16);
242 tcg_gen_sari_i32(b
, b
, 16);
243 tcg_gen_mul_i32(b
, b
, a
);
244 tcg_gen_mov_i32(a
, tmp1
);
248 /* Byteswap each halfword. */
249 static void gen_rev16(TCGv var
)
251 TCGv tmp
= new_tmp();
252 tcg_gen_shri_i32(tmp
, var
, 8);
253 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
254 tcg_gen_shli_i32(var
, var
, 8);
255 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
256 tcg_gen_or_i32(var
, var
, tmp
);
260 /* Byteswap low halfword and sign extend. */
261 static void gen_revsh(TCGv var
)
263 TCGv tmp
= new_tmp();
264 tcg_gen_shri_i32(tmp
, var
, 8);
265 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff);
266 tcg_gen_shli_i32(var
, var
, 8);
267 tcg_gen_ext8s_i32(var
, var
);
268 tcg_gen_or_i32(var
, var
, tmp
);
272 /* Unsigned bitfield extract. */
273 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
276 tcg_gen_shri_i32(var
, var
, shift
);
277 tcg_gen_andi_i32(var
, var
, mask
);
280 /* Signed bitfield extract. */
281 static void gen_sbfx(TCGv var
, int shift
, int width
)
286 tcg_gen_sari_i32(var
, var
, shift
);
287 if (shift
+ width
< 32) {
288 signbit
= 1u << (width
- 1);
289 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
290 tcg_gen_xori_i32(var
, var
, signbit
);
291 tcg_gen_subi_i32(var
, var
, signbit
);
295 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
296 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
298 tcg_gen_andi_i32(val
, val
, mask
);
299 tcg_gen_shli_i32(val
, val
, shift
);
300 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
301 tcg_gen_or_i32(dest
, base
, val
);
304 /* Round the top 32 bits of a 64-bit value. */
305 static void gen_roundqd(TCGv a
, TCGv b
)
307 tcg_gen_shri_i32(a
, a
, 31);
308 tcg_gen_add_i32(a
, a
, b
);
311 /* FIXME: Most targets have native widening multiplication.
312 It would be good to use that instead of a full wide multiply. */
313 /* 32x32->64 multiply. Marks inputs as dead. */
314 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
316 TCGv_i64 tmp1
= tcg_temp_new_i64();
317 TCGv_i64 tmp2
= tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp1
, a
);
321 tcg_gen_extu_i32_i64(tmp2
, b
);
323 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
327 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
329 TCGv_i64 tmp1
= tcg_temp_new_i64();
330 TCGv_i64 tmp2
= tcg_temp_new_i64();
332 tcg_gen_ext_i32_i64(tmp1
, a
);
334 tcg_gen_ext_i32_i64(tmp2
, b
);
336 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
340 /* Unsigned 32x32->64 multiply. */
341 static void gen_op_mull_T0_T1(void)
343 TCGv_i64 tmp1
= tcg_temp_new_i64();
344 TCGv_i64 tmp2
= tcg_temp_new_i64();
346 tcg_gen_extu_i32_i64(tmp1
, cpu_T
[0]);
347 tcg_gen_extu_i32_i64(tmp2
, cpu_T
[1]);
348 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
349 tcg_gen_trunc_i64_i32(cpu_T
[0], tmp1
);
350 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
351 tcg_gen_trunc_i64_i32(cpu_T
[1], tmp1
);
354 /* Signed 32x32->64 multiply. */
355 static void gen_imull(TCGv a
, TCGv b
)
357 TCGv_i64 tmp1
= tcg_temp_new_i64();
358 TCGv_i64 tmp2
= tcg_temp_new_i64();
360 tcg_gen_ext_i32_i64(tmp1
, a
);
361 tcg_gen_ext_i32_i64(tmp2
, b
);
362 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
363 tcg_gen_trunc_i64_i32(a
, tmp1
);
364 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
365 tcg_gen_trunc_i64_i32(b
, tmp1
);
368 /* Swap low and high halfwords. */
369 static void gen_swap_half(TCGv var
)
371 TCGv tmp
= new_tmp();
372 tcg_gen_shri_i32(tmp
, var
, 16);
373 tcg_gen_shli_i32(var
, var
, 16);
374 tcg_gen_or_i32(var
, var
, tmp
);
378 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
379 tmp = (t0 ^ t1) & 0x8000;
382 t0 = (t0 + t1) ^ tmp;
385 static void gen_add16(TCGv t0
, TCGv t1
)
387 TCGv tmp
= new_tmp();
388 tcg_gen_xor_i32(tmp
, t0
, t1
);
389 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
390 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
391 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
392 tcg_gen_add_i32(t0
, t0
, t1
);
393 tcg_gen_xor_i32(t0
, t0
, tmp
);
398 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
400 /* Set CF to the top bit of var. */
401 static void gen_set_CF_bit31(TCGv var
)
403 TCGv tmp
= new_tmp();
404 tcg_gen_shri_i32(tmp
, var
, 31);
409 /* Set N and Z flags from var. */
410 static inline void gen_logic_CC(TCGv var
)
412 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
413 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
417 static void gen_adc_T0_T1(void)
421 tmp
= load_cpu_field(CF
);
422 tcg_gen_add_i32(cpu_T
[0], cpu_T
[0], tmp
);
426 /* dest = T0 + T1 + CF. */
427 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
430 tcg_gen_add_i32(dest
, t0
, t1
);
431 tmp
= load_cpu_field(CF
);
432 tcg_gen_add_i32(dest
, dest
, tmp
);
436 /* dest = T0 - T1 + CF - 1. */
437 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
440 tcg_gen_sub_i32(dest
, t0
, t1
);
441 tmp
= load_cpu_field(CF
);
442 tcg_gen_add_i32(dest
, dest
, tmp
);
443 tcg_gen_subi_i32(dest
, dest
, 1);
447 #define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
448 #define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
450 /* T0 &= ~T1. Clobbers T1. */
451 /* FIXME: Implement bic natively. */
452 static inline void tcg_gen_bic_i32(TCGv dest
, TCGv t0
, TCGv t1
)
454 TCGv tmp
= new_tmp();
455 tcg_gen_not_i32(tmp
, t1
);
456 tcg_gen_and_i32(dest
, t0
, tmp
);
459 static inline void gen_op_bicl_T0_T1(void)
465 /* FIXME: Implement this natively. */
466 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
468 /* FIXME: Implement this natively. */
469 static void tcg_gen_rori_i32(TCGv t0
, TCGv t1
, int i
)
477 tcg_gen_shri_i32(tmp
, t1
, i
);
478 tcg_gen_shli_i32(t1
, t1
, 32 - i
);
479 tcg_gen_or_i32(t0
, t1
, tmp
);
483 static void shifter_out_im(TCGv var
, int shift
)
485 TCGv tmp
= new_tmp();
487 tcg_gen_andi_i32(tmp
, var
, 1);
489 tcg_gen_shri_i32(tmp
, var
, shift
);
491 tcg_gen_andi_i32(tmp
, tmp
, 1);
497 /* Shift by immediate. Includes special handling for shift == 0. */
498 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
504 shifter_out_im(var
, 32 - shift
);
505 tcg_gen_shli_i32(var
, var
, shift
);
511 tcg_gen_shri_i32(var
, var
, 31);
514 tcg_gen_movi_i32(var
, 0);
517 shifter_out_im(var
, shift
- 1);
518 tcg_gen_shri_i32(var
, var
, shift
);
525 shifter_out_im(var
, shift
- 1);
528 tcg_gen_sari_i32(var
, var
, shift
);
530 case 3: /* ROR/RRX */
533 shifter_out_im(var
, shift
- 1);
534 tcg_gen_rori_i32(var
, var
, shift
); break;
536 TCGv tmp
= load_cpu_field(CF
);
538 shifter_out_im(var
, 0);
539 tcg_gen_shri_i32(var
, var
, 1);
540 tcg_gen_shli_i32(tmp
, tmp
, 31);
541 tcg_gen_or_i32(var
, var
, tmp
);
547 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
548 TCGv shift
, int flags
)
552 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
553 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
554 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
555 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
559 case 0: gen_helper_shl(var
, var
, shift
); break;
560 case 1: gen_helper_shr(var
, var
, shift
); break;
561 case 2: gen_helper_sar(var
, var
, shift
); break;
562 case 3: gen_helper_ror(var
, var
, shift
); break;
568 #define PAS_OP(pfx) \
570 case 0: gen_pas_helper(glue(pfx,add16)); break; \
571 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
572 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
573 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
574 case 4: gen_pas_helper(glue(pfx,add8)); break; \
575 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
577 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
582 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
584 tmp
= tcg_temp_new_ptr();
585 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
589 tmp
= tcg_temp_new_ptr();
590 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
593 #undef gen_pas_helper
594 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
607 #undef gen_pas_helper
612 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
613 #define PAS_OP(pfx) \
615 case 0: gen_pas_helper(glue(pfx,add8)); break; \
616 case 1: gen_pas_helper(glue(pfx,add16)); break; \
617 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
618 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
619 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
620 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
622 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
627 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
629 tmp
= tcg_temp_new_ptr();
630 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
634 tmp
= tcg_temp_new_ptr();
635 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
638 #undef gen_pas_helper
639 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
652 #undef gen_pas_helper
657 static void gen_test_cc(int cc
, int label
)
665 tmp
= load_cpu_field(ZF
);
666 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
669 tmp
= load_cpu_field(ZF
);
670 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
673 tmp
= load_cpu_field(CF
);
674 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
677 tmp
= load_cpu_field(CF
);
678 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
681 tmp
= load_cpu_field(NF
);
682 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
685 tmp
= load_cpu_field(NF
);
686 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
689 tmp
= load_cpu_field(VF
);
690 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
693 tmp
= load_cpu_field(VF
);
694 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
696 case 8: /* hi: C && !Z */
697 inv
= gen_new_label();
698 tmp
= load_cpu_field(CF
);
699 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
701 tmp
= load_cpu_field(ZF
);
702 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
705 case 9: /* ls: !C || Z */
706 tmp
= load_cpu_field(CF
);
707 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
709 tmp
= load_cpu_field(ZF
);
710 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
712 case 10: /* ge: N == V -> N ^ V == 0 */
713 tmp
= load_cpu_field(VF
);
714 tmp2
= load_cpu_field(NF
);
715 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
717 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
719 case 11: /* lt: N != V -> N ^ V != 0 */
720 tmp
= load_cpu_field(VF
);
721 tmp2
= load_cpu_field(NF
);
722 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
724 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
726 case 12: /* gt: !Z && N == V */
727 inv
= gen_new_label();
728 tmp
= load_cpu_field(ZF
);
729 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
731 tmp
= load_cpu_field(VF
);
732 tmp2
= load_cpu_field(NF
);
733 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
735 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
738 case 13: /* le: Z || N != V */
739 tmp
= load_cpu_field(ZF
);
740 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
742 tmp
= load_cpu_field(VF
);
743 tmp2
= load_cpu_field(NF
);
744 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
746 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
749 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
755 static const uint8_t table_logic_cc
[16] = {
774 /* Set PC and Thumb state from an immediate address. */
775 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
779 s
->is_jmp
= DISAS_UPDATE
;
780 if (s
->thumb
!= (addr
& 1)) {
782 tcg_gen_movi_i32(tmp
, addr
& 1);
783 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
786 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
789 /* Set PC and Thumb state from var. var is marked as dead. */
790 static inline void gen_bx(DisasContext
*s
, TCGv var
)
792 s
->is_jmp
= DISAS_UPDATE
;
793 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
794 tcg_gen_andi_i32(var
, var
, 1);
795 store_cpu_field(var
, thumb
);
798 /* Variant of store_reg which uses branch&exchange logic when storing
799 to r15 in ARM architecture v7 and above. The source must be a temporary
800 and will be marked as dead. */
801 static inline void store_reg_bx(CPUState
*env
, DisasContext
*s
,
804 if (reg
== 15 && ENABLE_ARCH_7
) {
807 store_reg(s
, reg
, var
);
811 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
813 TCGv tmp
= new_tmp();
814 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
817 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
819 TCGv tmp
= new_tmp();
820 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
823 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
825 TCGv tmp
= new_tmp();
826 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
829 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
831 TCGv tmp
= new_tmp();
832 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
835 static inline TCGv
gen_ld32(TCGv addr
, int index
)
837 TCGv tmp
= new_tmp();
838 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
841 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
843 tcg_gen_qemu_st8(val
, addr
, index
);
846 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
848 tcg_gen_qemu_st16(val
, addr
, index
);
851 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
853 tcg_gen_qemu_st32(val
, addr
, index
);
857 static inline void gen_movl_T0_reg(DisasContext
*s
, int reg
)
859 load_reg_var(s
, cpu_T
[0], reg
);
862 static inline void gen_movl_T1_reg(DisasContext
*s
, int reg
)
864 load_reg_var(s
, cpu_T
[1], reg
);
867 static inline void gen_set_pc_im(uint32_t val
)
869 tcg_gen_movi_i32(cpu_R
[15], val
);
872 static inline void gen_movl_reg_TN(DisasContext
*s
, int reg
, int t
)
877 tcg_gen_andi_i32(tmp
, cpu_T
[t
], ~1);
881 tcg_gen_mov_i32(cpu_R
[reg
], tmp
);
884 s
->is_jmp
= DISAS_JUMP
;
888 static inline void gen_movl_reg_T0(DisasContext
*s
, int reg
)
890 gen_movl_reg_TN(s
, reg
, 0);
893 static inline void gen_movl_reg_T1(DisasContext
*s
, int reg
)
895 gen_movl_reg_TN(s
, reg
, 1);
898 /* Force a TB lookup after an instruction that changes the CPU state. */
899 static inline void gen_lookup_tb(DisasContext
*s
)
901 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
902 s
->is_jmp
= DISAS_UPDATE
;
905 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
908 int val
, rm
, shift
, shiftop
;
911 if (!(insn
& (1 << 25))) {
914 if (!(insn
& (1 << 23)))
917 tcg_gen_addi_i32(var
, var
, val
);
921 shift
= (insn
>> 7) & 0x1f;
922 shiftop
= (insn
>> 5) & 3;
923 offset
= load_reg(s
, rm
);
924 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
925 if (!(insn
& (1 << 23)))
926 tcg_gen_sub_i32(var
, var
, offset
);
928 tcg_gen_add_i32(var
, var
, offset
);
933 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
939 if (insn
& (1 << 22)) {
941 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
942 if (!(insn
& (1 << 23)))
946 tcg_gen_addi_i32(var
, var
, val
);
950 tcg_gen_addi_i32(var
, var
, extra
);
952 offset
= load_reg(s
, rm
);
953 if (!(insn
& (1 << 23)))
954 tcg_gen_sub_i32(var
, var
, offset
);
956 tcg_gen_add_i32(var
, var
, offset
);
961 #define VFP_OP2(name) \
962 static inline void gen_vfp_##name(int dp) \
965 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
967 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
977 static inline void gen_vfp_abs(int dp
)
980 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
982 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
985 static inline void gen_vfp_neg(int dp
)
988 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
990 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
993 static inline void gen_vfp_sqrt(int dp
)
996 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
998 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1001 static inline void gen_vfp_cmp(int dp
)
1004 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1006 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1009 static inline void gen_vfp_cmpe(int dp
)
1012 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1014 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1017 static inline void gen_vfp_F1_ld0(int dp
)
1020 tcg_gen_movi_i64(cpu_F1d
, 0);
1022 tcg_gen_movi_i32(cpu_F1s
, 0);
1025 static inline void gen_vfp_uito(int dp
)
1028 gen_helper_vfp_uitod(cpu_F0d
, cpu_F0s
, cpu_env
);
1030 gen_helper_vfp_uitos(cpu_F0s
, cpu_F0s
, cpu_env
);
1033 static inline void gen_vfp_sito(int dp
)
1036 gen_helper_vfp_sitod(cpu_F0d
, cpu_F0s
, cpu_env
);
1038 gen_helper_vfp_sitos(cpu_F0s
, cpu_F0s
, cpu_env
);
1041 static inline void gen_vfp_toui(int dp
)
1044 gen_helper_vfp_touid(cpu_F0s
, cpu_F0d
, cpu_env
);
1046 gen_helper_vfp_touis(cpu_F0s
, cpu_F0s
, cpu_env
);
1049 static inline void gen_vfp_touiz(int dp
)
1052 gen_helper_vfp_touizd(cpu_F0s
, cpu_F0d
, cpu_env
);
1054 gen_helper_vfp_touizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1057 static inline void gen_vfp_tosi(int dp
)
1060 gen_helper_vfp_tosid(cpu_F0s
, cpu_F0d
, cpu_env
);
1062 gen_helper_vfp_tosis(cpu_F0s
, cpu_F0s
, cpu_env
);
1065 static inline void gen_vfp_tosiz(int dp
)
1068 gen_helper_vfp_tosizd(cpu_F0s
, cpu_F0d
, cpu_env
);
1070 gen_helper_vfp_tosizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1073 #define VFP_GEN_FIX(name) \
1074 static inline void gen_vfp_##name(int dp, int shift) \
1077 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1079 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
1091 static inline void gen_vfp_ld(DisasContext
*s
, int dp
)
1094 tcg_gen_qemu_ld64(cpu_F0d
, cpu_T
[1], IS_USER(s
));
1096 tcg_gen_qemu_ld32u(cpu_F0s
, cpu_T
[1], IS_USER(s
));
1099 static inline void gen_vfp_st(DisasContext
*s
, int dp
)
1102 tcg_gen_qemu_st64(cpu_F0d
, cpu_T
[1], IS_USER(s
));
1104 tcg_gen_qemu_st32(cpu_F0s
, cpu_T
[1], IS_USER(s
));
1108 vfp_reg_offset (int dp
, int reg
)
1111 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1113 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1114 + offsetof(CPU_DoubleU
, l
.upper
);
1116 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1117 + offsetof(CPU_DoubleU
, l
.lower
);
1121 /* Return the offset of a 32-bit piece of a NEON register.
1122 zero is the least significant end of the register. */
1124 neon_reg_offset (int reg
, int n
)
1128 return vfp_reg_offset(0, sreg
);
1131 /* FIXME: Remove these. */
1132 #define neon_T0 cpu_T[0]
1133 #define neon_T1 cpu_T[1]
1134 #define NEON_GET_REG(T, reg, n) \
1135 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1136 #define NEON_SET_REG(T, reg, n) \
1137 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1139 static TCGv
neon_load_reg(int reg
, int pass
)
1141 TCGv tmp
= new_tmp();
1142 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1146 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1148 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1152 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1154 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1157 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1159 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1162 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1163 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1164 #define tcg_gen_st_f32 tcg_gen_st_i32
1165 #define tcg_gen_st_f64 tcg_gen_st_i64
1167 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1170 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1172 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1175 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1178 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1180 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1183 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1186 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1188 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1191 #define ARM_CP_RW_BIT (1 << 20)
1193 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1195 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1198 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1200 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1203 static inline void gen_op_iwmmxt_movl_wCx_T0(int reg
)
1205 tcg_gen_st_i32(cpu_T
[0], cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1208 static inline void gen_op_iwmmxt_movl_T0_wCx(int reg
)
1210 tcg_gen_ld_i32(cpu_T
[0], cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1213 static inline void gen_op_iwmmxt_movl_T1_wCx(int reg
)
1215 tcg_gen_ld_i32(cpu_T
[1], cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1218 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1220 iwmmxt_store_reg(cpu_M0
, rn
);
1223 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1225 iwmmxt_load_reg(cpu_M0
, rn
);
1228 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1230 iwmmxt_load_reg(cpu_V1
, rn
);
1231 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1234 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1236 iwmmxt_load_reg(cpu_V1
, rn
);
1237 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1240 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1242 iwmmxt_load_reg(cpu_V1
, rn
);
1243 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1246 #define IWMMXT_OP(name) \
1247 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1249 iwmmxt_load_reg(cpu_V1, rn); \
1250 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1253 #define IWMMXT_OP_ENV(name) \
1254 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1256 iwmmxt_load_reg(cpu_V1, rn); \
1257 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1260 #define IWMMXT_OP_ENV_SIZE(name) \
1261 IWMMXT_OP_ENV(name##b) \
1262 IWMMXT_OP_ENV(name##w) \
1263 IWMMXT_OP_ENV(name##l)
1265 #define IWMMXT_OP_ENV1(name) \
1266 static inline void gen_op_iwmmxt_##name##_M0(void) \
1268 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1282 IWMMXT_OP_ENV_SIZE(unpackl
)
1283 IWMMXT_OP_ENV_SIZE(unpackh
)
1285 IWMMXT_OP_ENV1(unpacklub
)
1286 IWMMXT_OP_ENV1(unpackluw
)
1287 IWMMXT_OP_ENV1(unpacklul
)
1288 IWMMXT_OP_ENV1(unpackhub
)
1289 IWMMXT_OP_ENV1(unpackhuw
)
1290 IWMMXT_OP_ENV1(unpackhul
)
1291 IWMMXT_OP_ENV1(unpacklsb
)
1292 IWMMXT_OP_ENV1(unpacklsw
)
1293 IWMMXT_OP_ENV1(unpacklsl
)
1294 IWMMXT_OP_ENV1(unpackhsb
)
1295 IWMMXT_OP_ENV1(unpackhsw
)
1296 IWMMXT_OP_ENV1(unpackhsl
)
1298 IWMMXT_OP_ENV_SIZE(cmpeq
)
1299 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1300 IWMMXT_OP_ENV_SIZE(cmpgts
)
1302 IWMMXT_OP_ENV_SIZE(mins
)
1303 IWMMXT_OP_ENV_SIZE(minu
)
1304 IWMMXT_OP_ENV_SIZE(maxs
)
1305 IWMMXT_OP_ENV_SIZE(maxu
)
1307 IWMMXT_OP_ENV_SIZE(subn
)
1308 IWMMXT_OP_ENV_SIZE(addn
)
1309 IWMMXT_OP_ENV_SIZE(subu
)
1310 IWMMXT_OP_ENV_SIZE(addu
)
1311 IWMMXT_OP_ENV_SIZE(subs
)
1312 IWMMXT_OP_ENV_SIZE(adds
)
1314 IWMMXT_OP_ENV(avgb0
)
1315 IWMMXT_OP_ENV(avgb1
)
1316 IWMMXT_OP_ENV(avgw0
)
1317 IWMMXT_OP_ENV(avgw1
)
1321 IWMMXT_OP_ENV(packuw
)
1322 IWMMXT_OP_ENV(packul
)
1323 IWMMXT_OP_ENV(packuq
)
1324 IWMMXT_OP_ENV(packsw
)
1325 IWMMXT_OP_ENV(packsl
)
1326 IWMMXT_OP_ENV(packsq
)
1328 static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1330 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1]);
1333 static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1335 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1]);
1338 static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1340 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1]);
1343 static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn
)
1345 iwmmxt_load_reg(cpu_V1
, rn
);
1346 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, cpu_T
[0]);
1349 static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift
)
1351 TCGv tmp
= tcg_const_i32(shift
);
1352 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1], tmp
);
1355 static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift
)
1357 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, shift
);
1358 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_M0
);
1359 tcg_gen_ext8s_i32(cpu_T
[0], cpu_T
[0]);
1362 static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift
)
1364 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, shift
);
1365 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_M0
);
1366 tcg_gen_ext16s_i32(cpu_T
[0], cpu_T
[0]);
1369 static inline void gen_op_iwmmxt_extru_T0_M0(int shift
, uint32_t mask
)
1371 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, shift
);
1372 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_M0
);
1374 tcg_gen_andi_i32(cpu_T
[0], cpu_T
[0], mask
);
1377 static void gen_op_iwmmxt_set_mup(void)
1380 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1381 tcg_gen_ori_i32(tmp
, tmp
, 2);
1382 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1385 static void gen_op_iwmmxt_set_cup(void)
1388 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1389 tcg_gen_ori_i32(tmp
, tmp
, 1);
1390 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1393 static void gen_op_iwmmxt_setpsr_nz(void)
1395 TCGv tmp
= new_tmp();
1396 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1397 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1400 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1402 iwmmxt_load_reg(cpu_V1
, rn
);
1403 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1404 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1408 static void gen_iwmmxt_movl_T0_T1_wRn(int rn
)
1410 iwmmxt_load_reg(cpu_V0
, rn
);
1411 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_V0
);
1412 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1413 tcg_gen_trunc_i64_i32(cpu_T
[1], cpu_V0
);
1416 static void gen_iwmmxt_movl_wRn_T0_T1(int rn
)
1418 tcg_gen_concat_i32_i64(cpu_V0
, cpu_T
[0], cpu_T
[1]);
1419 iwmmxt_store_reg(cpu_V0
, rn
);
1422 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
)
1427 rd
= (insn
>> 16) & 0xf;
1428 gen_movl_T1_reg(s
, rd
);
1430 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1431 if (insn
& (1 << 24)) {
1433 if (insn
& (1 << 23))
1434 gen_op_addl_T1_im(offset
);
1436 gen_op_addl_T1_im(-offset
);
1438 if (insn
& (1 << 21))
1439 gen_movl_reg_T1(s
, rd
);
1440 } else if (insn
& (1 << 21)) {
1442 if (insn
& (1 << 23))
1443 gen_op_movl_T0_im(offset
);
1445 gen_op_movl_T0_im(- offset
);
1446 gen_op_addl_T0_T1();
1447 gen_movl_reg_T0(s
, rd
);
1448 } else if (!(insn
& (1 << 23)))
1453 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
)
1455 int rd
= (insn
>> 0) & 0xf;
1457 if (insn
& (1 << 8))
1458 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
)
1461 gen_op_iwmmxt_movl_T0_wCx(rd
);
1463 gen_iwmmxt_movl_T0_T1_wRn(rd
);
1465 gen_op_movl_T1_im(mask
);
1466 gen_op_andl_T0_T1();
1470 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1471 (ie. an undefined instruction). */
1472 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1475 int rdhi
, rdlo
, rd0
, rd1
, i
;
1478 if ((insn
& 0x0e000e00) == 0x0c000000) {
1479 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1481 rdlo
= (insn
>> 12) & 0xf;
1482 rdhi
= (insn
>> 16) & 0xf;
1483 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1484 gen_iwmmxt_movl_T0_T1_wRn(wrd
);
1485 gen_movl_reg_T0(s
, rdlo
);
1486 gen_movl_reg_T1(s
, rdhi
);
1487 } else { /* TMCRR */
1488 gen_movl_T0_reg(s
, rdlo
);
1489 gen_movl_T1_reg(s
, rdhi
);
1490 gen_iwmmxt_movl_wRn_T0_T1(wrd
);
1491 gen_op_iwmmxt_set_mup();
1496 wrd
= (insn
>> 12) & 0xf;
1497 if (gen_iwmmxt_address(s
, insn
))
1499 if (insn
& ARM_CP_RW_BIT
) {
1500 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1501 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
1502 tcg_gen_mov_i32(cpu_T
[0], tmp
);
1504 gen_op_iwmmxt_movl_wCx_T0(wrd
);
1507 if (insn
& (1 << 8)) {
1508 if (insn
& (1 << 22)) { /* WLDRD */
1509 tcg_gen_qemu_ld64(cpu_M0
, cpu_T
[1], IS_USER(s
));
1511 } else { /* WLDRW wRd */
1512 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
1515 if (insn
& (1 << 22)) { /* WLDRH */
1516 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
1517 } else { /* WLDRB */
1518 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
1522 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1525 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1528 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1529 gen_op_iwmmxt_movl_T0_wCx(wrd
);
1531 tcg_gen_mov_i32(tmp
, cpu_T
[0]);
1532 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
1534 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1536 if (insn
& (1 << 8)) {
1537 if (insn
& (1 << 22)) { /* WSTRD */
1539 tcg_gen_qemu_st64(cpu_M0
, cpu_T
[1], IS_USER(s
));
1540 } else { /* WSTRW wRd */
1541 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1542 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
1545 if (insn
& (1 << 22)) { /* WSTRH */
1546 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1547 gen_st16(tmp
, cpu_T
[1], IS_USER(s
));
1548 } else { /* WSTRB */
1549 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1550 gen_st8(tmp
, cpu_T
[1], IS_USER(s
));
1558 if ((insn
& 0x0f000000) != 0x0e000000)
1561 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1562 case 0x000: /* WOR */
1563 wrd
= (insn
>> 12) & 0xf;
1564 rd0
= (insn
>> 0) & 0xf;
1565 rd1
= (insn
>> 16) & 0xf;
1566 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1567 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1568 gen_op_iwmmxt_setpsr_nz();
1569 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1570 gen_op_iwmmxt_set_mup();
1571 gen_op_iwmmxt_set_cup();
1573 case 0x011: /* TMCR */
1576 rd
= (insn
>> 12) & 0xf;
1577 wrd
= (insn
>> 16) & 0xf;
1579 case ARM_IWMMXT_wCID
:
1580 case ARM_IWMMXT_wCASF
:
1582 case ARM_IWMMXT_wCon
:
1583 gen_op_iwmmxt_set_cup();
1585 case ARM_IWMMXT_wCSSF
:
1586 gen_op_iwmmxt_movl_T0_wCx(wrd
);
1587 gen_movl_T1_reg(s
, rd
);
1588 gen_op_bicl_T0_T1();
1589 gen_op_iwmmxt_movl_wCx_T0(wrd
);
1591 case ARM_IWMMXT_wCGR0
:
1592 case ARM_IWMMXT_wCGR1
:
1593 case ARM_IWMMXT_wCGR2
:
1594 case ARM_IWMMXT_wCGR3
:
1595 gen_op_iwmmxt_set_cup();
1596 gen_movl_reg_T0(s
, rd
);
1597 gen_op_iwmmxt_movl_wCx_T0(wrd
);
1603 case 0x100: /* WXOR */
1604 wrd
= (insn
>> 12) & 0xf;
1605 rd0
= (insn
>> 0) & 0xf;
1606 rd1
= (insn
>> 16) & 0xf;
1607 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1608 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1609 gen_op_iwmmxt_setpsr_nz();
1610 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1611 gen_op_iwmmxt_set_mup();
1612 gen_op_iwmmxt_set_cup();
1614 case 0x111: /* TMRC */
1617 rd
= (insn
>> 12) & 0xf;
1618 wrd
= (insn
>> 16) & 0xf;
1619 gen_op_iwmmxt_movl_T0_wCx(wrd
);
1620 gen_movl_reg_T0(s
, rd
);
1622 case 0x300: /* WANDN */
1623 wrd
= (insn
>> 12) & 0xf;
1624 rd0
= (insn
>> 0) & 0xf;
1625 rd1
= (insn
>> 16) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1627 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1628 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1629 gen_op_iwmmxt_setpsr_nz();
1630 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1631 gen_op_iwmmxt_set_mup();
1632 gen_op_iwmmxt_set_cup();
1634 case 0x200: /* WAND */
1635 wrd
= (insn
>> 12) & 0xf;
1636 rd0
= (insn
>> 0) & 0xf;
1637 rd1
= (insn
>> 16) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1639 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1640 gen_op_iwmmxt_setpsr_nz();
1641 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1642 gen_op_iwmmxt_set_mup();
1643 gen_op_iwmmxt_set_cup();
1645 case 0x810: case 0xa10: /* WMADD */
1646 wrd
= (insn
>> 12) & 0xf;
1647 rd0
= (insn
>> 0) & 0xf;
1648 rd1
= (insn
>> 16) & 0xf;
1649 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1650 if (insn
& (1 << 21))
1651 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1653 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1654 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1655 gen_op_iwmmxt_set_mup();
1657 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1658 wrd
= (insn
>> 12) & 0xf;
1659 rd0
= (insn
>> 16) & 0xf;
1660 rd1
= (insn
>> 0) & 0xf;
1661 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1662 switch ((insn
>> 22) & 3) {
1664 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1667 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1670 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1675 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1676 gen_op_iwmmxt_set_mup();
1677 gen_op_iwmmxt_set_cup();
1679 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1680 wrd
= (insn
>> 12) & 0xf;
1681 rd0
= (insn
>> 16) & 0xf;
1682 rd1
= (insn
>> 0) & 0xf;
1683 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1684 switch ((insn
>> 22) & 3) {
1686 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1689 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1692 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1697 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1698 gen_op_iwmmxt_set_mup();
1699 gen_op_iwmmxt_set_cup();
1701 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1702 wrd
= (insn
>> 12) & 0xf;
1703 rd0
= (insn
>> 16) & 0xf;
1704 rd1
= (insn
>> 0) & 0xf;
1705 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1706 if (insn
& (1 << 22))
1707 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1709 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1710 if (!(insn
& (1 << 20)))
1711 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1712 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1713 gen_op_iwmmxt_set_mup();
1715 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1716 wrd
= (insn
>> 12) & 0xf;
1717 rd0
= (insn
>> 16) & 0xf;
1718 rd1
= (insn
>> 0) & 0xf;
1719 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1720 if (insn
& (1 << 21)) {
1721 if (insn
& (1 << 20))
1722 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1724 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1726 if (insn
& (1 << 20))
1727 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1729 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1731 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1732 gen_op_iwmmxt_set_mup();
1734 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1735 wrd
= (insn
>> 12) & 0xf;
1736 rd0
= (insn
>> 16) & 0xf;
1737 rd1
= (insn
>> 0) & 0xf;
1738 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1739 if (insn
& (1 << 21))
1740 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1742 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1743 if (!(insn
& (1 << 20))) {
1744 iwmmxt_load_reg(cpu_V1
, wrd
);
1745 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1747 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1748 gen_op_iwmmxt_set_mup();
1750 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1751 wrd
= (insn
>> 12) & 0xf;
1752 rd0
= (insn
>> 16) & 0xf;
1753 rd1
= (insn
>> 0) & 0xf;
1754 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1755 switch ((insn
>> 22) & 3) {
1757 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1760 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1763 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1768 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1769 gen_op_iwmmxt_set_mup();
1770 gen_op_iwmmxt_set_cup();
1772 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1773 wrd
= (insn
>> 12) & 0xf;
1774 rd0
= (insn
>> 16) & 0xf;
1775 rd1
= (insn
>> 0) & 0xf;
1776 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1777 if (insn
& (1 << 22)) {
1778 if (insn
& (1 << 20))
1779 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1781 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1783 if (insn
& (1 << 20))
1784 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1786 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1788 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1789 gen_op_iwmmxt_set_mup();
1790 gen_op_iwmmxt_set_cup();
1792 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1793 wrd
= (insn
>> 12) & 0xf;
1794 rd0
= (insn
>> 16) & 0xf;
1795 rd1
= (insn
>> 0) & 0xf;
1796 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1797 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1798 gen_op_movl_T1_im(7);
1799 gen_op_andl_T0_T1();
1800 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
1801 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1802 gen_op_iwmmxt_set_mup();
1804 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1805 rd
= (insn
>> 12) & 0xf;
1806 wrd
= (insn
>> 16) & 0xf;
1807 gen_movl_T0_reg(s
, rd
);
1808 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1809 switch ((insn
>> 6) & 3) {
1811 gen_op_movl_T1_im(0xff);
1812 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 7) << 3);
1815 gen_op_movl_T1_im(0xffff);
1816 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 3) << 4);
1819 gen_op_movl_T1_im(0xffffffff);
1820 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 1) << 5);
1825 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1826 gen_op_iwmmxt_set_mup();
1828 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1829 rd
= (insn
>> 12) & 0xf;
1830 wrd
= (insn
>> 16) & 0xf;
1833 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1834 switch ((insn
>> 22) & 3) {
1837 gen_op_iwmmxt_extrsb_T0_M0((insn
& 7) << 3);
1839 gen_op_iwmmxt_extru_T0_M0((insn
& 7) << 3, 0xff);
1844 gen_op_iwmmxt_extrsw_T0_M0((insn
& 3) << 4);
1846 gen_op_iwmmxt_extru_T0_M0((insn
& 3) << 4, 0xffff);
1850 gen_op_iwmmxt_extru_T0_M0((insn
& 1) << 5, ~0u);
1855 gen_movl_reg_T0(s
, rd
);
1857 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1858 if ((insn
& 0x000ff008) != 0x0003f000)
1860 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1861 switch ((insn
>> 22) & 3) {
1863 gen_op_shrl_T1_im(((insn
& 7) << 2) + 0);
1866 gen_op_shrl_T1_im(((insn
& 3) << 3) + 4);
1869 gen_op_shrl_T1_im(((insn
& 1) << 4) + 12);
1874 gen_op_shll_T1_im(28);
1875 gen_set_nzcv(cpu_T
[1]);
1877 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1878 rd
= (insn
>> 12) & 0xf;
1879 wrd
= (insn
>> 16) & 0xf;
1880 gen_movl_T0_reg(s
, rd
);
1881 switch ((insn
>> 6) & 3) {
1883 gen_helper_iwmmxt_bcstb(cpu_M0
, cpu_T
[0]);
1886 gen_helper_iwmmxt_bcstw(cpu_M0
, cpu_T
[0]);
1889 gen_helper_iwmmxt_bcstl(cpu_M0
, cpu_T
[0]);
1894 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1895 gen_op_iwmmxt_set_mup();
1897 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1898 if ((insn
& 0x000ff00f) != 0x0003f000)
1900 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1901 switch ((insn
>> 22) & 3) {
1903 for (i
= 0; i
< 7; i
++) {
1904 gen_op_shll_T1_im(4);
1905 gen_op_andl_T0_T1();
1909 for (i
= 0; i
< 3; i
++) {
1910 gen_op_shll_T1_im(8);
1911 gen_op_andl_T0_T1();
1915 gen_op_shll_T1_im(16);
1916 gen_op_andl_T0_T1();
1921 gen_set_nzcv(cpu_T
[0]);
1923 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1924 wrd
= (insn
>> 12) & 0xf;
1925 rd0
= (insn
>> 16) & 0xf;
1926 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1927 switch ((insn
>> 22) & 3) {
1929 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1932 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1935 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1940 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1941 gen_op_iwmmxt_set_mup();
1943 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1944 if ((insn
& 0x000ff00f) != 0x0003f000)
1946 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1947 switch ((insn
>> 22) & 3) {
1949 for (i
= 0; i
< 7; i
++) {
1950 gen_op_shll_T1_im(4);
1955 for (i
= 0; i
< 3; i
++) {
1956 gen_op_shll_T1_im(8);
1961 gen_op_shll_T1_im(16);
1967 gen_set_nzcv(cpu_T
[0]);
1969 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1970 rd
= (insn
>> 12) & 0xf;
1971 rd0
= (insn
>> 16) & 0xf;
1972 if ((insn
& 0xf) != 0)
1974 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1975 switch ((insn
>> 22) & 3) {
1977 gen_helper_iwmmxt_msbb(cpu_T
[0], cpu_M0
);
1980 gen_helper_iwmmxt_msbw(cpu_T
[0], cpu_M0
);
1983 gen_helper_iwmmxt_msbl(cpu_T
[0], cpu_M0
);
1988 gen_movl_reg_T0(s
, rd
);
1990 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1991 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1992 wrd
= (insn
>> 12) & 0xf;
1993 rd0
= (insn
>> 16) & 0xf;
1994 rd1
= (insn
>> 0) & 0xf;
1995 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1996 switch ((insn
>> 22) & 3) {
1998 if (insn
& (1 << 21))
1999 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2001 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2004 if (insn
& (1 << 21))
2005 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2007 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2010 if (insn
& (1 << 21))
2011 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2013 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2018 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2019 gen_op_iwmmxt_set_mup();
2020 gen_op_iwmmxt_set_cup();
2022 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2023 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2024 wrd
= (insn
>> 12) & 0xf;
2025 rd0
= (insn
>> 16) & 0xf;
2026 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2027 switch ((insn
>> 22) & 3) {
2029 if (insn
& (1 << 21))
2030 gen_op_iwmmxt_unpacklsb_M0();
2032 gen_op_iwmmxt_unpacklub_M0();
2035 if (insn
& (1 << 21))
2036 gen_op_iwmmxt_unpacklsw_M0();
2038 gen_op_iwmmxt_unpackluw_M0();
2041 if (insn
& (1 << 21))
2042 gen_op_iwmmxt_unpacklsl_M0();
2044 gen_op_iwmmxt_unpacklul_M0();
2049 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2050 gen_op_iwmmxt_set_mup();
2051 gen_op_iwmmxt_set_cup();
2053 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2054 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2055 wrd
= (insn
>> 12) & 0xf;
2056 rd0
= (insn
>> 16) & 0xf;
2057 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2058 switch ((insn
>> 22) & 3) {
2060 if (insn
& (1 << 21))
2061 gen_op_iwmmxt_unpackhsb_M0();
2063 gen_op_iwmmxt_unpackhub_M0();
2066 if (insn
& (1 << 21))
2067 gen_op_iwmmxt_unpackhsw_M0();
2069 gen_op_iwmmxt_unpackhuw_M0();
2072 if (insn
& (1 << 21))
2073 gen_op_iwmmxt_unpackhsl_M0();
2075 gen_op_iwmmxt_unpackhul_M0();
2080 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2084 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2085 case 0x214: case 0x614: case 0xa14: case 0xe14:
2086 wrd
= (insn
>> 12) & 0xf;
2087 rd0
= (insn
>> 16) & 0xf;
2088 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2089 if (gen_iwmmxt_shift(insn
, 0xff))
2091 switch ((insn
>> 22) & 3) {
2095 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2098 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2101 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2104 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2105 gen_op_iwmmxt_set_mup();
2106 gen_op_iwmmxt_set_cup();
2108 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2109 case 0x014: case 0x414: case 0x814: case 0xc14:
2110 wrd
= (insn
>> 12) & 0xf;
2111 rd0
= (insn
>> 16) & 0xf;
2112 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2113 if (gen_iwmmxt_shift(insn
, 0xff))
2115 switch ((insn
>> 22) & 3) {
2119 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2122 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2125 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2128 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2129 gen_op_iwmmxt_set_mup();
2130 gen_op_iwmmxt_set_cup();
2132 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2133 case 0x114: case 0x514: case 0x914: case 0xd14:
2134 wrd
= (insn
>> 12) & 0xf;
2135 rd0
= (insn
>> 16) & 0xf;
2136 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2137 if (gen_iwmmxt_shift(insn
, 0xff))
2139 switch ((insn
>> 22) & 3) {
2143 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2146 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2149 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2152 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2153 gen_op_iwmmxt_set_mup();
2154 gen_op_iwmmxt_set_cup();
2156 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2157 case 0x314: case 0x714: case 0xb14: case 0xf14:
2158 wrd
= (insn
>> 12) & 0xf;
2159 rd0
= (insn
>> 16) & 0xf;
2160 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2161 switch ((insn
>> 22) & 3) {
2165 if (gen_iwmmxt_shift(insn
, 0xf))
2167 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2170 if (gen_iwmmxt_shift(insn
, 0x1f))
2172 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2175 if (gen_iwmmxt_shift(insn
, 0x3f))
2177 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2180 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2181 gen_op_iwmmxt_set_mup();
2182 gen_op_iwmmxt_set_cup();
2184 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2185 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2186 wrd
= (insn
>> 12) & 0xf;
2187 rd0
= (insn
>> 16) & 0xf;
2188 rd1
= (insn
>> 0) & 0xf;
2189 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2190 switch ((insn
>> 22) & 3) {
2192 if (insn
& (1 << 21))
2193 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2195 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2198 if (insn
& (1 << 21))
2199 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2201 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2204 if (insn
& (1 << 21))
2205 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2207 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2212 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2213 gen_op_iwmmxt_set_mup();
2215 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2216 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2217 wrd
= (insn
>> 12) & 0xf;
2218 rd0
= (insn
>> 16) & 0xf;
2219 rd1
= (insn
>> 0) & 0xf;
2220 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2221 switch ((insn
>> 22) & 3) {
2223 if (insn
& (1 << 21))
2224 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2226 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2229 if (insn
& (1 << 21))
2230 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2232 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2235 if (insn
& (1 << 21))
2236 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2238 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2243 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2244 gen_op_iwmmxt_set_mup();
2246 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2247 case 0x402: case 0x502: case 0x602: case 0x702:
2248 wrd
= (insn
>> 12) & 0xf;
2249 rd0
= (insn
>> 16) & 0xf;
2250 rd1
= (insn
>> 0) & 0xf;
2251 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2252 gen_op_movl_T0_im((insn
>> 20) & 3);
2253 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
2254 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2255 gen_op_iwmmxt_set_mup();
2257 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2258 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2259 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2260 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2261 wrd
= (insn
>> 12) & 0xf;
2262 rd0
= (insn
>> 16) & 0xf;
2263 rd1
= (insn
>> 0) & 0xf;
2264 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2265 switch ((insn
>> 20) & 0xf) {
2267 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2270 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2273 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2276 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2279 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2282 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2285 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2288 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2291 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2296 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2297 gen_op_iwmmxt_set_mup();
2298 gen_op_iwmmxt_set_cup();
2300 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2301 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2302 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2303 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2304 wrd
= (insn
>> 12) & 0xf;
2305 rd0
= (insn
>> 16) & 0xf;
2306 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2307 gen_op_movl_T0_im(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2308 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2309 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2310 gen_op_iwmmxt_set_mup();
2311 gen_op_iwmmxt_set_cup();
2313 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2314 case 0x418: case 0x518: case 0x618: case 0x718:
2315 case 0x818: case 0x918: case 0xa18: case 0xb18:
2316 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2317 wrd
= (insn
>> 12) & 0xf;
2318 rd0
= (insn
>> 16) & 0xf;
2319 rd1
= (insn
>> 0) & 0xf;
2320 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2321 switch ((insn
>> 20) & 0xf) {
2323 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2326 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2329 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2332 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2335 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2338 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2341 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2344 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2347 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2352 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2353 gen_op_iwmmxt_set_mup();
2354 gen_op_iwmmxt_set_cup();
2356 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2357 case 0x408: case 0x508: case 0x608: case 0x708:
2358 case 0x808: case 0x908: case 0xa08: case 0xb08:
2359 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2360 wrd
= (insn
>> 12) & 0xf;
2361 rd0
= (insn
>> 16) & 0xf;
2362 rd1
= (insn
>> 0) & 0xf;
2363 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2364 if (!(insn
& (1 << 20)))
2366 switch ((insn
>> 22) & 3) {
2370 if (insn
& (1 << 21))
2371 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2373 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2376 if (insn
& (1 << 21))
2377 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2379 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2382 if (insn
& (1 << 21))
2383 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2385 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2388 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2389 gen_op_iwmmxt_set_mup();
2390 gen_op_iwmmxt_set_cup();
2392 case 0x201: case 0x203: case 0x205: case 0x207:
2393 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2394 case 0x211: case 0x213: case 0x215: case 0x217:
2395 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2396 wrd
= (insn
>> 5) & 0xf;
2397 rd0
= (insn
>> 12) & 0xf;
2398 rd1
= (insn
>> 0) & 0xf;
2399 if (rd0
== 0xf || rd1
== 0xf)
2401 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2402 switch ((insn
>> 16) & 0xf) {
2403 case 0x0: /* TMIA */
2404 gen_movl_T0_reg(s
, rd0
);
2405 gen_movl_T1_reg(s
, rd1
);
2406 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2408 case 0x8: /* TMIAPH */
2409 gen_movl_T0_reg(s
, rd0
);
2410 gen_movl_T1_reg(s
, rd1
);
2411 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2413 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2414 gen_movl_T1_reg(s
, rd0
);
2415 if (insn
& (1 << 16))
2416 gen_op_shrl_T1_im(16);
2417 gen_op_movl_T0_T1();
2418 gen_movl_T1_reg(s
, rd1
);
2419 if (insn
& (1 << 17))
2420 gen_op_shrl_T1_im(16);
2421 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2426 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2427 gen_op_iwmmxt_set_mup();
2436 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2437 (ie. an undefined instruction). */
2438 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2440 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2442 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2443 /* Multiply with Internal Accumulate Format */
2444 rd0
= (insn
>> 12) & 0xf;
2446 acc
= (insn
>> 5) & 7;
2451 switch ((insn
>> 16) & 0xf) {
2453 gen_movl_T0_reg(s
, rd0
);
2454 gen_movl_T1_reg(s
, rd1
);
2455 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2457 case 0x8: /* MIAPH */
2458 gen_movl_T0_reg(s
, rd0
);
2459 gen_movl_T1_reg(s
, rd1
);
2460 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2462 case 0xc: /* MIABB */
2463 case 0xd: /* MIABT */
2464 case 0xe: /* MIATB */
2465 case 0xf: /* MIATT */
2466 gen_movl_T1_reg(s
, rd0
);
2467 if (insn
& (1 << 16))
2468 gen_op_shrl_T1_im(16);
2469 gen_op_movl_T0_T1();
2470 gen_movl_T1_reg(s
, rd1
);
2471 if (insn
& (1 << 17))
2472 gen_op_shrl_T1_im(16);
2473 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2479 gen_op_iwmmxt_movq_wRn_M0(acc
);
2483 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2484 /* Internal Accumulator Access Format */
2485 rdhi
= (insn
>> 16) & 0xf;
2486 rdlo
= (insn
>> 12) & 0xf;
2492 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2493 gen_iwmmxt_movl_T0_T1_wRn(acc
);
2494 gen_movl_reg_T0(s
, rdlo
);
2495 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2496 gen_op_andl_T0_T1();
2497 gen_movl_reg_T0(s
, rdhi
);
2499 gen_movl_T0_reg(s
, rdlo
);
2500 gen_movl_T1_reg(s
, rdhi
);
2501 gen_iwmmxt_movl_wRn_T0_T1(acc
);
2509 /* Disassemble system coprocessor instruction. Return nonzero if
2510 instruction is not defined. */
2511 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2514 uint32_t rd
= (insn
>> 12) & 0xf;
2515 uint32_t cp
= (insn
>> 8) & 0xf;
2520 if (insn
& ARM_CP_RW_BIT
) {
2521 if (!env
->cp
[cp
].cp_read
)
2523 gen_set_pc_im(s
->pc
);
2525 gen_helper_get_cp(tmp
, cpu_env
, tcg_const_i32(insn
));
2526 store_reg(s
, rd
, tmp
);
2528 if (!env
->cp
[cp
].cp_write
)
2530 gen_set_pc_im(s
->pc
);
2531 tmp
= load_reg(s
, rd
);
2532 gen_helper_set_cp(cpu_env
, tcg_const_i32(insn
), tmp
);
2538 static int cp15_user_ok(uint32_t insn
)
2540 int cpn
= (insn
>> 16) & 0xf;
2541 int cpm
= insn
& 0xf;
2542 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2544 if (cpn
== 13 && cpm
== 0) {
2546 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2550 /* ISB, DSB, DMB. */
2551 if ((cpm
== 5 && op
== 4)
2552 || (cpm
== 10 && (op
== 4 || op
== 5)))
2558 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2559 instruction is not defined. */
2560 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2565 /* M profile cores use memory mapped registers instead of cp15. */
2566 if (arm_feature(env
, ARM_FEATURE_M
))
2569 if ((insn
& (1 << 25)) == 0) {
2570 if (insn
& (1 << 20)) {
2574 /* mcrr. Used for block cache operations, so implement as no-op. */
2577 if ((insn
& (1 << 4)) == 0) {
2581 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
2584 if ((insn
& 0x0fff0fff) == 0x0e070f90
2585 || (insn
& 0x0fff0fff) == 0x0e070f58) {
2586 /* Wait for interrupt. */
2587 gen_set_pc_im(s
->pc
);
2588 s
->is_jmp
= DISAS_WFI
;
2591 rd
= (insn
>> 12) & 0xf;
2592 if (insn
& ARM_CP_RW_BIT
) {
2594 gen_helper_get_cp15(tmp
, cpu_env
, tcg_const_i32(insn
));
2595 /* If the destination register is r15 then sets condition codes. */
2597 store_reg(s
, rd
, tmp
);
2601 tmp
= load_reg(s
, rd
);
2602 gen_helper_set_cp15(cpu_env
, tcg_const_i32(insn
), tmp
);
2604 /* Normally we would always end the TB here, but Linux
2605 * arch/arm/mach-pxa/sleep.S expects two instructions following
2606 * an MMU enable to execute from cache. Imitate this behaviour. */
2607 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2608 (insn
& 0x0fff0fff) != 0x0e010f10)
2614 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2615 #define VFP_SREG(insn, bigbit, smallbit) \
2616 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2617 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2618 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2619 reg = (((insn) >> (bigbit)) & 0x0f) \
2620 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2622 if (insn & (1 << (smallbit))) \
2624 reg = ((insn) >> (bigbit)) & 0x0f; \
2627 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2628 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2629 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2630 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2631 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2632 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2634 /* Move between integer and VFP cores. */
2635 static TCGv
gen_vfp_mrs(void)
2637 TCGv tmp
= new_tmp();
2638 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2642 static void gen_vfp_msr(TCGv tmp
)
2644 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2649 vfp_enabled(CPUState
* env
)
2651 return ((env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) != 0);
2654 static void gen_neon_dup_u8(TCGv var
, int shift
)
2656 TCGv tmp
= new_tmp();
2658 tcg_gen_shri_i32(var
, var
, shift
);
2659 tcg_gen_ext8u_i32(var
, var
);
2660 tcg_gen_shli_i32(tmp
, var
, 8);
2661 tcg_gen_or_i32(var
, var
, tmp
);
2662 tcg_gen_shli_i32(tmp
, var
, 16);
2663 tcg_gen_or_i32(var
, var
, tmp
);
2667 static void gen_neon_dup_low16(TCGv var
)
2669 TCGv tmp
= new_tmp();
2670 tcg_gen_ext16u_i32(var
, var
);
2671 tcg_gen_shli_i32(tmp
, var
, 16);
2672 tcg_gen_or_i32(var
, var
, tmp
);
2676 static void gen_neon_dup_high16(TCGv var
)
2678 TCGv tmp
= new_tmp();
2679 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2680 tcg_gen_shri_i32(tmp
, var
, 16);
2681 tcg_gen_or_i32(var
, var
, tmp
);
2685 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2686 (ie. an undefined instruction). */
2687 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2689 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2694 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2697 if (!vfp_enabled(env
)) {
2698 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2699 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2701 rn
= (insn
>> 16) & 0xf;
2702 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2703 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2706 dp
= ((insn
& 0xf00) == 0xb00);
2707 switch ((insn
>> 24) & 0xf) {
2709 if (insn
& (1 << 4)) {
2710 /* single register transfer */
2711 rd
= (insn
>> 12) & 0xf;
2716 VFP_DREG_N(rn
, insn
);
2719 if (insn
& 0x00c00060
2720 && !arm_feature(env
, ARM_FEATURE_NEON
))
2723 pass
= (insn
>> 21) & 1;
2724 if (insn
& (1 << 22)) {
2726 offset
= ((insn
>> 5) & 3) * 8;
2727 } else if (insn
& (1 << 5)) {
2729 offset
= (insn
& (1 << 6)) ? 16 : 0;
2734 if (insn
& ARM_CP_RW_BIT
) {
2736 tmp
= neon_load_reg(rn
, pass
);
2740 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2741 if (insn
& (1 << 23))
2747 if (insn
& (1 << 23)) {
2749 tcg_gen_shri_i32(tmp
, tmp
, 16);
2755 tcg_gen_sari_i32(tmp
, tmp
, 16);
2764 store_reg(s
, rd
, tmp
);
2767 tmp
= load_reg(s
, rd
);
2768 if (insn
& (1 << 23)) {
2771 gen_neon_dup_u8(tmp
, 0);
2772 } else if (size
== 1) {
2773 gen_neon_dup_low16(tmp
);
2775 for (n
= 0; n
<= pass
* 2; n
++) {
2777 tcg_gen_mov_i32(tmp2
, tmp
);
2778 neon_store_reg(rn
, n
, tmp2
);
2780 neon_store_reg(rn
, n
, tmp
);
2785 tmp2
= neon_load_reg(rn
, pass
);
2786 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2790 tmp2
= neon_load_reg(rn
, pass
);
2791 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2797 neon_store_reg(rn
, pass
, tmp
);
2801 if ((insn
& 0x6f) != 0x00)
2803 rn
= VFP_SREG_N(insn
);
2804 if (insn
& ARM_CP_RW_BIT
) {
2806 if (insn
& (1 << 21)) {
2807 /* system register */
2812 /* VFP2 allows access to FSID from userspace.
2813 VFP3 restricts all id registers to privileged
2816 && arm_feature(env
, ARM_FEATURE_VFP3
))
2818 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2823 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2825 case ARM_VFP_FPINST
:
2826 case ARM_VFP_FPINST2
:
2827 /* Not present in VFP3. */
2829 || arm_feature(env
, ARM_FEATURE_VFP3
))
2831 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2835 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2836 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2839 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2845 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2847 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2853 gen_mov_F0_vreg(0, rn
);
2854 tmp
= gen_vfp_mrs();
2857 /* Set the 4 flag bits in the CPSR. */
2861 store_reg(s
, rd
, tmp
);
2865 tmp
= load_reg(s
, rd
);
2866 if (insn
& (1 << 21)) {
2868 /* system register */
2873 /* Writes are ignored. */
2876 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2883 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2886 case ARM_VFP_FPINST
:
2887 case ARM_VFP_FPINST2
:
2888 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2895 gen_mov_vreg_F0(0, rn
);
2900 /* data processing */
2901 /* The opcode is in bits 23, 21, 20 and 6. */
2902 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2906 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2908 /* rn is register number */
2909 VFP_DREG_N(rn
, insn
);
2912 if (op
== 15 && (rn
== 15 || rn
> 17)) {
2913 /* Integer or single precision destination. */
2914 rd
= VFP_SREG_D(insn
);
2916 VFP_DREG_D(rd
, insn
);
2919 if (op
== 15 && (rn
== 16 || rn
== 17)) {
2920 /* Integer source. */
2921 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
2923 VFP_DREG_M(rm
, insn
);
2926 rn
= VFP_SREG_N(insn
);
2927 if (op
== 15 && rn
== 15) {
2928 /* Double precision destination. */
2929 VFP_DREG_D(rd
, insn
);
2931 rd
= VFP_SREG_D(insn
);
2933 rm
= VFP_SREG_M(insn
);
2936 veclen
= env
->vfp
.vec_len
;
2937 if (op
== 15 && rn
> 3)
2940 /* Shut up compiler warnings. */
2951 /* Figure out what type of vector operation this is. */
2952 if ((rd
& bank_mask
) == 0) {
2957 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
2959 delta_d
= env
->vfp
.vec_stride
+ 1;
2961 if ((rm
& bank_mask
) == 0) {
2962 /* mixed scalar/vector */
2971 /* Load the initial operands. */
2976 /* Integer source */
2977 gen_mov_F0_vreg(0, rm
);
2982 gen_mov_F0_vreg(dp
, rd
);
2983 gen_mov_F1_vreg(dp
, rm
);
2987 /* Compare with zero */
2988 gen_mov_F0_vreg(dp
, rd
);
2999 /* Source and destination the same. */
3000 gen_mov_F0_vreg(dp
, rd
);
3003 /* One source operand. */
3004 gen_mov_F0_vreg(dp
, rm
);
3008 /* Two source operands. */
3009 gen_mov_F0_vreg(dp
, rn
);
3010 gen_mov_F1_vreg(dp
, rm
);
3014 /* Perform the calculation. */
3016 case 0: /* mac: fd + (fn * fm) */
3018 gen_mov_F1_vreg(dp
, rd
);
3021 case 1: /* nmac: fd - (fn * fm) */
3024 gen_mov_F1_vreg(dp
, rd
);
3027 case 2: /* msc: -fd + (fn * fm) */
3029 gen_mov_F1_vreg(dp
, rd
);
3032 case 3: /* nmsc: -fd - (fn * fm) */
3035 gen_mov_F1_vreg(dp
, rd
);
3038 case 4: /* mul: fn * fm */
3041 case 5: /* nmul: -(fn * fm) */
3045 case 6: /* add: fn + fm */
3048 case 7: /* sub: fn - fm */
3051 case 8: /* div: fn / fm */
3054 case 14: /* fconst */
3055 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3058 n
= (insn
<< 12) & 0x80000000;
3059 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3066 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3073 tcg_gen_movi_i32(cpu_F0s
, n
);
3076 case 15: /* extension space */
3099 case 11: /* cmpez */
3103 case 15: /* single<->double conversion */
3105 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3107 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3109 case 16: /* fuito */
3112 case 17: /* fsito */
3115 case 20: /* fshto */
3116 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3118 gen_vfp_shto(dp
, 16 - rm
);
3120 case 21: /* fslto */
3121 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3123 gen_vfp_slto(dp
, 32 - rm
);
3125 case 22: /* fuhto */
3126 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3128 gen_vfp_uhto(dp
, 16 - rm
);
3130 case 23: /* fulto */
3131 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3133 gen_vfp_ulto(dp
, 32 - rm
);
3135 case 24: /* ftoui */
3138 case 25: /* ftouiz */
3141 case 26: /* ftosi */
3144 case 27: /* ftosiz */
3147 case 28: /* ftosh */
3148 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3150 gen_vfp_tosh(dp
, 16 - rm
);
3152 case 29: /* ftosl */
3153 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3155 gen_vfp_tosl(dp
, 32 - rm
);
3157 case 30: /* ftouh */
3158 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3160 gen_vfp_touh(dp
, 16 - rm
);
3162 case 31: /* ftoul */
3163 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3165 gen_vfp_toul(dp
, 32 - rm
);
3167 default: /* undefined */
3168 printf ("rn:%d\n", rn
);
3172 default: /* undefined */
3173 printf ("op:%d\n", op
);
3177 /* Write back the result. */
3178 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3179 ; /* Comparison, do nothing. */
3180 else if (op
== 15 && rn
> 17)
3181 /* Integer result. */
3182 gen_mov_vreg_F0(0, rd
);
3183 else if (op
== 15 && rn
== 15)
3185 gen_mov_vreg_F0(!dp
, rd
);
3187 gen_mov_vreg_F0(dp
, rd
);
3189 /* break out of the loop if we have finished */
3193 if (op
== 15 && delta_m
== 0) {
3194 /* single source one-many */
3196 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3198 gen_mov_vreg_F0(dp
, rd
);
3202 /* Setup the next operands. */
3204 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3208 /* One source operand. */
3209 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3211 gen_mov_F0_vreg(dp
, rm
);
3213 /* Two source operands. */
3214 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3216 gen_mov_F0_vreg(dp
, rn
);
3218 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3220 gen_mov_F1_vreg(dp
, rm
);
3228 if (dp
&& (insn
& 0x03e00000) == 0x00400000) {
3229 /* two-register transfer */
3230 rn
= (insn
>> 16) & 0xf;
3231 rd
= (insn
>> 12) & 0xf;
3233 VFP_DREG_M(rm
, insn
);
3235 rm
= VFP_SREG_M(insn
);
3238 if (insn
& ARM_CP_RW_BIT
) {
3241 gen_mov_F0_vreg(0, rm
* 2);
3242 tmp
= gen_vfp_mrs();
3243 store_reg(s
, rd
, tmp
);
3244 gen_mov_F0_vreg(0, rm
* 2 + 1);
3245 tmp
= gen_vfp_mrs();
3246 store_reg(s
, rn
, tmp
);
3248 gen_mov_F0_vreg(0, rm
);
3249 tmp
= gen_vfp_mrs();
3250 store_reg(s
, rn
, tmp
);
3251 gen_mov_F0_vreg(0, rm
+ 1);
3252 tmp
= gen_vfp_mrs();
3253 store_reg(s
, rd
, tmp
);
3258 tmp
= load_reg(s
, rd
);
3260 gen_mov_vreg_F0(0, rm
* 2);
3261 tmp
= load_reg(s
, rn
);
3263 gen_mov_vreg_F0(0, rm
* 2 + 1);
3265 tmp
= load_reg(s
, rn
);
3267 gen_mov_vreg_F0(0, rm
);
3268 tmp
= load_reg(s
, rd
);
3270 gen_mov_vreg_F0(0, rm
+ 1);
3275 rn
= (insn
>> 16) & 0xf;
3277 VFP_DREG_D(rd
, insn
);
3279 rd
= VFP_SREG_D(insn
);
3280 if (s
->thumb
&& rn
== 15) {
3281 gen_op_movl_T1_im(s
->pc
& ~2);
3283 gen_movl_T1_reg(s
, rn
);
3285 if ((insn
& 0x01200000) == 0x01000000) {
3286 /* Single load/store */
3287 offset
= (insn
& 0xff) << 2;
3288 if ((insn
& (1 << 23)) == 0)
3290 gen_op_addl_T1_im(offset
);
3291 if (insn
& (1 << 20)) {
3293 gen_mov_vreg_F0(dp
, rd
);
3295 gen_mov_F0_vreg(dp
, rd
);
3299 /* load/store multiple */
3301 n
= (insn
>> 1) & 0x7f;
3305 if (insn
& (1 << 24)) /* pre-decrement */
3306 gen_op_addl_T1_im(-((insn
& 0xff) << 2));
3312 for (i
= 0; i
< n
; i
++) {
3313 if (insn
& ARM_CP_RW_BIT
) {
3316 gen_mov_vreg_F0(dp
, rd
+ i
);
3319 gen_mov_F0_vreg(dp
, rd
+ i
);
3322 gen_op_addl_T1_im(offset
);
3324 if (insn
& (1 << 21)) {
3326 if (insn
& (1 << 24))
3327 offset
= -offset
* n
;
3328 else if (dp
&& (insn
& 1))
3334 gen_op_addl_T1_im(offset
);
3335 gen_movl_reg_T1(s
, rn
);
3341 /* Should never happen. */
3347 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3349 TranslationBlock
*tb
;
3352 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3354 gen_set_pc_im(dest
);
3355 tcg_gen_exit_tb((long)tb
+ n
);
3357 gen_set_pc_im(dest
);
3362 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3364 if (unlikely(s
->singlestep_enabled
)) {
3365 /* An indirect jump so that we still trigger the debug exception. */
3370 gen_goto_tb(s
, 0, dest
);
3371 s
->is_jmp
= DISAS_TB_JUMP
;
3375 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3378 tcg_gen_sari_i32(t0
, t0
, 16);
3382 tcg_gen_sari_i32(t1
, t1
, 16);
3385 tcg_gen_mul_i32(t0
, t0
, t1
);
3388 /* Return the mask of PSR bits set by a MSR instruction. */
3389 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3393 if (flags
& (1 << 0))
3395 if (flags
& (1 << 1))
3397 if (flags
& (1 << 2))
3399 if (flags
& (1 << 3))
3402 /* Mask out undefined bits. */
3403 mask
&= ~CPSR_RESERVED
;
3404 if (!arm_feature(env
, ARM_FEATURE_V6
))
3405 mask
&= ~(CPSR_E
| CPSR_GE
);
3406 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3408 /* Mask out execution state bits. */
3411 /* Mask out privileged bits. */
3417 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3418 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv t0
)
3422 /* ??? This is also undefined in system mode. */
3426 tmp
= load_cpu_field(spsr
);
3427 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3428 tcg_gen_andi_i32(t0
, t0
, mask
);
3429 tcg_gen_or_i32(tmp
, tmp
, t0
);
3430 store_cpu_field(tmp
, spsr
);
3432 gen_set_cpsr(t0
, mask
);
3439 /* Returns nonzero if access to the PSR is not permitted. */
3440 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3444 tcg_gen_movi_i32(tmp
, val
);
3445 return gen_set_psr(s
, mask
, spsr
, tmp
);
3448 /* Generate an old-style exception return. Marks pc as dead. */
3449 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
3452 store_reg(s
, 15, pc
);
3453 tmp
= load_cpu_field(spsr
);
3454 gen_set_cpsr(tmp
, 0xffffffff);
3456 s
->is_jmp
= DISAS_UPDATE
;
3459 /* Generate a v6 exception return. Marks both values as dead. */
3460 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3462 gen_set_cpsr(cpsr
, 0xffffffff);
3464 store_reg(s
, 15, pc
);
3465 s
->is_jmp
= DISAS_UPDATE
;
3469 gen_set_condexec (DisasContext
*s
)
3471 if (s
->condexec_mask
) {
3472 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3473 TCGv tmp
= new_tmp();
3474 tcg_gen_movi_i32(tmp
, val
);
3475 store_cpu_field(tmp
, condexec_bits
);
3479 static void gen_nop_hint(DisasContext
*s
, int val
)
3483 gen_set_pc_im(s
->pc
);
3484 s
->is_jmp
= DISAS_WFI
;
3488 /* TODO: Implement SEV and WFE. May help SMP performance. */
3494 /* These macros help make the code more readable when migrating from the
3495 old dyngen helpers. They should probably be removed when
3496 T0/T1 are removed. */
3497 #define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3498 #define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
3500 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3502 static inline int gen_neon_add(int size
)
3505 case 0: gen_helper_neon_add_u8(CPU_T001
); break;
3506 case 1: gen_helper_neon_add_u16(CPU_T001
); break;
3507 case 2: gen_op_addl_T0_T1(); break;
3513 static inline void gen_neon_rsb(int size
)
3516 case 0: gen_helper_neon_sub_u8(cpu_T
[0], cpu_T
[1], cpu_T
[0]); break;
3517 case 1: gen_helper_neon_sub_u16(cpu_T
[0], cpu_T
[1], cpu_T
[0]); break;
3518 case 2: gen_op_rsbl_T0_T1(); break;
3523 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3524 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3525 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3526 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3527 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3529 /* FIXME: This is wrong. They set the wrong overflow bit. */
3530 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3531 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3532 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3533 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3535 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3536 switch ((size << 1) | u) { \
3538 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3541 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3544 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3547 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3550 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3553 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3555 default: return 1; \
3558 #define GEN_NEON_INTEGER_OP(name) do { \
3559 switch ((size << 1) | u) { \
3561 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3564 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3567 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3570 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3573 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3576 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3578 default: return 1; \
3582 gen_neon_movl_scratch_T0(int scratch
)
3586 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3587 tcg_gen_st_i32(cpu_T
[0], cpu_env
, offset
);
3591 gen_neon_movl_scratch_T1(int scratch
)
3595 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3596 tcg_gen_st_i32(cpu_T
[1], cpu_env
, offset
);
3600 gen_neon_movl_T0_scratch(int scratch
)
3604 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3605 tcg_gen_ld_i32(cpu_T
[0], cpu_env
, offset
);
3609 gen_neon_movl_T1_scratch(int scratch
)
3613 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3614 tcg_gen_ld_i32(cpu_T
[1], cpu_env
, offset
);
3617 static inline void gen_neon_get_scalar(int size
, int reg
)
3620 NEON_GET_REG(T0
, reg
>> 1, reg
& 1);
3622 NEON_GET_REG(T0
, reg
>> 2, (reg
>> 1) & 1);
3624 gen_neon_dup_low16(cpu_T
[0]);
3626 gen_neon_dup_high16(cpu_T
[0]);
3630 static void gen_neon_unzip(int reg
, int q
, int tmp
, int size
)
3634 for (n
= 0; n
< q
+ 1; n
+= 2) {
3635 NEON_GET_REG(T0
, reg
, n
);
3636 NEON_GET_REG(T0
, reg
, n
+ n
);
3638 case 0: gen_helper_neon_unzip_u8(); break;
3639 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
3640 case 2: /* no-op */; break;
3643 gen_neon_movl_scratch_T0(tmp
+ n
);
3644 gen_neon_movl_scratch_T1(tmp
+ n
+ 1);
3652 } neon_ls_element_type
[11] = {
3666 /* Translate a NEON load/store element instruction. Return nonzero if the
3667 instruction is invalid. */
3668 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3684 if (!vfp_enabled(env
))
3686 VFP_DREG_D(rd
, insn
);
3687 rn
= (insn
>> 16) & 0xf;
3689 load
= (insn
& (1 << 21)) != 0;
3690 if ((insn
& (1 << 23)) == 0) {
3691 /* Load store all elements. */
3692 op
= (insn
>> 8) & 0xf;
3693 size
= (insn
>> 6) & 3;
3694 if (op
> 10 || size
== 3)
3696 nregs
= neon_ls_element_type
[op
].nregs
;
3697 interleave
= neon_ls_element_type
[op
].interleave
;
3698 gen_movl_T1_reg(s
, rn
);
3699 stride
= (1 << size
) * interleave
;
3700 for (reg
= 0; reg
< nregs
; reg
++) {
3701 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3702 gen_movl_T1_reg(s
, rn
);
3703 gen_op_addl_T1_im((1 << size
) * reg
);
3704 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3705 gen_movl_T1_reg(s
, rn
);
3706 gen_op_addl_T1_im(1 << size
);
3708 for (pass
= 0; pass
< 2; pass
++) {
3711 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
3712 neon_store_reg(rd
, pass
, tmp
);
3714 tmp
= neon_load_reg(rd
, pass
);
3715 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
3717 gen_op_addl_T1_im(stride
);
3718 } else if (size
== 1) {
3720 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3721 gen_op_addl_T1_im(stride
);
3722 tmp2
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3723 gen_op_addl_T1_im(stride
);
3724 gen_bfi(tmp
, tmp
, tmp2
, 16, 0xffff);
3726 neon_store_reg(rd
, pass
, tmp
);
3728 tmp
= neon_load_reg(rd
, pass
);
3730 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3731 gen_st16(tmp
, cpu_T
[1], IS_USER(s
));
3732 gen_op_addl_T1_im(stride
);
3733 gen_st16(tmp2
, cpu_T
[1], IS_USER(s
));
3734 gen_op_addl_T1_im(stride
);
3736 } else /* size == 0 */ {
3739 for (n
= 0; n
< 4; n
++) {
3740 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
3741 gen_op_addl_T1_im(stride
);
3745 gen_bfi(tmp2
, tmp2
, tmp
, n
* 8, 0xff);
3749 neon_store_reg(rd
, pass
, tmp2
);
3751 tmp2
= neon_load_reg(rd
, pass
);
3752 for (n
= 0; n
< 4; n
++) {
3755 tcg_gen_mov_i32(tmp
, tmp2
);
3757 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3759 gen_st8(tmp
, cpu_T
[1], IS_USER(s
));
3760 gen_op_addl_T1_im(stride
);
3766 rd
+= neon_ls_element_type
[op
].spacing
;
3770 size
= (insn
>> 10) & 3;
3772 /* Load single element to all lanes. */
3775 size
= (insn
>> 6) & 3;
3776 nregs
= ((insn
>> 8) & 3) + 1;
3777 stride
= (insn
& (1 << 5)) ? 2 : 1;
3778 gen_movl_T1_reg(s
, rn
);
3779 for (reg
= 0; reg
< nregs
; reg
++) {
3782 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
3783 gen_neon_dup_u8(tmp
, 0);
3786 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3787 gen_neon_dup_low16(tmp
);
3790 tmp
= gen_ld32(cpu_T
[0], IS_USER(s
));
3794 default: /* Avoid compiler warnings. */
3797 gen_op_addl_T1_im(1 << size
);
3799 tcg_gen_mov_i32(tmp2
, tmp
);
3800 neon_store_reg(rd
, 0, tmp2
);
3801 neon_store_reg(rd
, 1, tmp
);
3804 stride
= (1 << size
) * nregs
;
3806 /* Single element. */
3807 pass
= (insn
>> 7) & 1;
3810 shift
= ((insn
>> 5) & 3) * 8;
3814 shift
= ((insn
>> 6) & 1) * 16;
3815 stride
= (insn
& (1 << 5)) ? 2 : 1;
3819 stride
= (insn
& (1 << 6)) ? 2 : 1;
3824 nregs
= ((insn
>> 8) & 3) + 1;
3825 gen_movl_T1_reg(s
, rn
);
3826 for (reg
= 0; reg
< nregs
; reg
++) {
3830 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
3833 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3836 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
3838 default: /* Avoid compiler warnings. */
3842 tmp2
= neon_load_reg(rd
, pass
);
3843 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
3846 neon_store_reg(rd
, pass
, tmp
);
3847 } else { /* Store */
3848 tmp
= neon_load_reg(rd
, pass
);
3850 tcg_gen_shri_i32(tmp
, tmp
, shift
);
3853 gen_st8(tmp
, cpu_T
[1], IS_USER(s
));
3856 gen_st16(tmp
, cpu_T
[1], IS_USER(s
));
3859 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
3864 gen_op_addl_T1_im(1 << size
);
3866 stride
= nregs
* (1 << size
);
3872 base
= load_reg(s
, rn
);
3874 tcg_gen_addi_i32(base
, base
, stride
);
3877 index
= load_reg(s
, rm
);
3878 tcg_gen_add_i32(base
, base
, index
);
3881 store_reg(s
, rn
, base
);
3886 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3887 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
3889 tcg_gen_and_i32(t
, t
, c
);
3890 tcg_gen_bic_i32(f
, f
, c
);
3891 tcg_gen_or_i32(dest
, t
, f
);
3894 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
3897 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
3898 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
3899 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
3904 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
3907 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
3908 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
3909 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
3914 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
3917 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
3918 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
3919 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
3924 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
3930 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
3931 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
3936 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
3937 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
3944 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
3945 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
3950 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
3951 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
3958 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
3962 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
3963 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
3964 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
3969 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
3970 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
3971 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
3978 static inline void gen_neon_addl(int size
)
3981 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
3982 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
3983 case 2: tcg_gen_add_i64(CPU_V001
); break;
3988 static inline void gen_neon_subl(int size
)
3991 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
3992 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
3993 case 2: tcg_gen_sub_i64(CPU_V001
); break;
3998 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4001 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4002 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4003 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4008 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4011 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4012 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4017 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4021 switch ((size
<< 1) | u
) {
4022 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4023 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4024 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4025 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4027 tmp
= gen_muls_i64_i32(a
, b
);
4028 tcg_gen_mov_i64(dest
, tmp
);
4031 tmp
= gen_mulu_i64_i32(a
, b
);
4032 tcg_gen_mov_i64(dest
, tmp
);
4042 /* Translate a NEON data processing instruction. Return nonzero if the
4043 instruction is invalid.
4044 We process data in a mixture of 32-bit and 64-bit chunks.
4045 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4047 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4065 if (!vfp_enabled(env
))
4067 q
= (insn
& (1 << 6)) != 0;
4068 u
= (insn
>> 24) & 1;
4069 VFP_DREG_D(rd
, insn
);
4070 VFP_DREG_N(rn
, insn
);
4071 VFP_DREG_M(rm
, insn
);
4072 size
= (insn
>> 20) & 3;
4073 if ((insn
& (1 << 23)) == 0) {
4074 /* Three register same length. */
4075 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4076 if (size
== 3 && (op
== 1 || op
== 5 || op
== 8 || op
== 9
4077 || op
== 10 || op
== 11 || op
== 16)) {
4078 /* 64-bit element instructions. */
4079 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4080 neon_load_reg64(cpu_V0
, rn
+ pass
);
4081 neon_load_reg64(cpu_V1
, rm
+ pass
);
4085 gen_helper_neon_add_saturate_u64(CPU_V001
);
4087 gen_helper_neon_add_saturate_s64(CPU_V001
);
4092 gen_helper_neon_sub_saturate_u64(CPU_V001
);
4094 gen_helper_neon_sub_saturate_s64(CPU_V001
);
4099 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4101 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4106 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4109 gen_helper_neon_qshl_s64(cpu_V1
, cpu_env
,
4113 case 10: /* VRSHL */
4115 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4117 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4120 case 11: /* VQRSHL */
4122 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4125 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4131 tcg_gen_sub_i64(CPU_V001
);
4133 tcg_gen_add_i64(CPU_V001
);
4139 neon_store_reg64(cpu_V0
, rd
+ pass
);
4146 case 10: /* VRSHL */
4147 case 11: /* VQRSHL */
4150 /* Shift instruction operands are reversed. */
4157 case 20: /* VPMAX */
4158 case 21: /* VPMIN */
4159 case 23: /* VPADD */
4162 case 26: /* VPADD (float) */
4163 pairwise
= (u
&& size
< 2);
4165 case 30: /* VPMIN/VPMAX (float) */
4172 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4181 NEON_GET_REG(T0
, rn
, n
);
4182 NEON_GET_REG(T1
, rn
, n
+ 1);
4184 NEON_GET_REG(T0
, rm
, n
);
4185 NEON_GET_REG(T1
, rm
, n
+ 1);
4189 NEON_GET_REG(T0
, rn
, pass
);
4190 NEON_GET_REG(T1
, rm
, pass
);
4194 GEN_NEON_INTEGER_OP(hadd
);
4197 GEN_NEON_INTEGER_OP_ENV(qadd
);
4199 case 2: /* VRHADD */
4200 GEN_NEON_INTEGER_OP(rhadd
);
4202 case 3: /* Logic ops. */
4203 switch ((u
<< 2) | size
) {
4205 gen_op_andl_T0_T1();
4208 gen_op_bicl_T0_T1();
4218 gen_op_xorl_T0_T1();
4221 tmp
= neon_load_reg(rd
, pass
);
4222 gen_neon_bsl(cpu_T
[0], cpu_T
[0], cpu_T
[1], tmp
);
4226 tmp
= neon_load_reg(rd
, pass
);
4227 gen_neon_bsl(cpu_T
[0], cpu_T
[0], tmp
, cpu_T
[1]);
4231 tmp
= neon_load_reg(rd
, pass
);
4232 gen_neon_bsl(cpu_T
[0], tmp
, cpu_T
[0], cpu_T
[1]);
4238 GEN_NEON_INTEGER_OP(hsub
);
4241 GEN_NEON_INTEGER_OP_ENV(qsub
);
4244 GEN_NEON_INTEGER_OP(cgt
);
4247 GEN_NEON_INTEGER_OP(cge
);
4250 GEN_NEON_INTEGER_OP(shl
);
4253 GEN_NEON_INTEGER_OP_ENV(qshl
);
4255 case 10: /* VRSHL */
4256 GEN_NEON_INTEGER_OP(rshl
);
4258 case 11: /* VQRSHL */
4259 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4262 GEN_NEON_INTEGER_OP(max
);
4265 GEN_NEON_INTEGER_OP(min
);
4268 GEN_NEON_INTEGER_OP(abd
);
4271 GEN_NEON_INTEGER_OP(abd
);
4272 NEON_GET_REG(T1
, rd
, pass
);
4276 if (!u
) { /* VADD */
4277 if (gen_neon_add(size
))
4281 case 0: gen_helper_neon_sub_u8(CPU_T001
); break;
4282 case 1: gen_helper_neon_sub_u16(CPU_T001
); break;
4283 case 2: gen_op_subl_T0_T1(); break;
4289 if (!u
) { /* VTST */
4291 case 0: gen_helper_neon_tst_u8(CPU_T001
); break;
4292 case 1: gen_helper_neon_tst_u16(CPU_T001
); break;
4293 case 2: gen_helper_neon_tst_u32(CPU_T001
); break;
4298 case 0: gen_helper_neon_ceq_u8(CPU_T001
); break;
4299 case 1: gen_helper_neon_ceq_u16(CPU_T001
); break;
4300 case 2: gen_helper_neon_ceq_u32(CPU_T001
); break;
4305 case 18: /* Multiply. */
4307 case 0: gen_helper_neon_mul_u8(CPU_T001
); break;
4308 case 1: gen_helper_neon_mul_u16(CPU_T001
); break;
4309 case 2: gen_op_mul_T0_T1(); break;
4312 NEON_GET_REG(T1
, rd
, pass
);
4320 if (u
) { /* polynomial */
4321 gen_helper_neon_mul_p8(CPU_T001
);
4322 } else { /* Integer */
4324 case 0: gen_helper_neon_mul_u8(CPU_T001
); break;
4325 case 1: gen_helper_neon_mul_u16(CPU_T001
); break;
4326 case 2: gen_op_mul_T0_T1(); break;
4331 case 20: /* VPMAX */
4332 GEN_NEON_INTEGER_OP(pmax
);
4334 case 21: /* VPMIN */
4335 GEN_NEON_INTEGER_OP(pmin
);
4337 case 22: /* Hultiply high. */
4338 if (!u
) { /* VQDMULH */
4340 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01
); break;
4341 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01
); break;
4344 } else { /* VQRDHMUL */
4346 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01
); break;
4347 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01
); break;
4352 case 23: /* VPADD */
4356 case 0: gen_helper_neon_padd_u8(CPU_T001
); break;
4357 case 1: gen_helper_neon_padd_u16(CPU_T001
); break;
4358 case 2: gen_op_addl_T0_T1(); break;
4362 case 26: /* Floating point arithnetic. */
4363 switch ((u
<< 2) | size
) {
4365 gen_helper_neon_add_f32(CPU_T001
);
4368 gen_helper_neon_sub_f32(CPU_T001
);
4371 gen_helper_neon_add_f32(CPU_T001
);
4374 gen_helper_neon_abd_f32(CPU_T001
);
4380 case 27: /* Float multiply. */
4381 gen_helper_neon_mul_f32(CPU_T001
);
4383 NEON_GET_REG(T1
, rd
, pass
);
4385 gen_helper_neon_add_f32(CPU_T001
);
4387 gen_helper_neon_sub_f32(cpu_T
[0], cpu_T
[1], cpu_T
[0]);
4391 case 28: /* Float compare. */
4393 gen_helper_neon_ceq_f32(CPU_T001
);
4396 gen_helper_neon_cge_f32(CPU_T001
);
4398 gen_helper_neon_cgt_f32(CPU_T001
);
4401 case 29: /* Float compare absolute. */
4405 gen_helper_neon_acge_f32(CPU_T001
);
4407 gen_helper_neon_acgt_f32(CPU_T001
);
4409 case 30: /* Float min/max. */
4411 gen_helper_neon_max_f32(CPU_T001
);
4413 gen_helper_neon_min_f32(CPU_T001
);
4417 gen_helper_recps_f32(cpu_T
[0], cpu_T
[0], cpu_T
[1], cpu_env
);
4419 gen_helper_rsqrts_f32(cpu_T
[0], cpu_T
[0], cpu_T
[1], cpu_env
);
4424 /* Save the result. For elementwise operations we can put it
4425 straight into the destination register. For pairwise operations
4426 we have to be careful to avoid clobbering the source operands. */
4427 if (pairwise
&& rd
== rm
) {
4428 gen_neon_movl_scratch_T0(pass
);
4430 NEON_SET_REG(T0
, rd
, pass
);
4434 if (pairwise
&& rd
== rm
) {
4435 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4436 gen_neon_movl_T0_scratch(pass
);
4437 NEON_SET_REG(T0
, rd
, pass
);
4440 /* End of 3 register same size operations. */
4441 } else if (insn
& (1 << 4)) {
4442 if ((insn
& 0x00380080) != 0) {
4443 /* Two registers and shift. */
4444 op
= (insn
>> 8) & 0xf;
4445 if (insn
& (1 << 7)) {
4450 while ((insn
& (1 << (size
+ 19))) == 0)
4453 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4454 /* To avoid excessive dumplication of ops we implement shift
4455 by immediate using the variable shift operations. */
4457 /* Shift by immediate:
4458 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4459 /* Right shifts are encoded as N - shift, where N is the
4460 element size in bits. */
4462 shift
= shift
- (1 << (size
+ 3));
4470 imm
= (uint8_t) shift
;
4475 imm
= (uint16_t) shift
;
4486 for (pass
= 0; pass
< count
; pass
++) {
4488 neon_load_reg64(cpu_V0
, rm
+ pass
);
4489 tcg_gen_movi_i64(cpu_V1
, imm
);
4494 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4496 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4501 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4503 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4508 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4510 case 5: /* VSHL, VSLI */
4511 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4515 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4517 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4519 case 7: /* VQSHLU */
4520 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4523 if (op
== 1 || op
== 3) {
4525 neon_load_reg64(cpu_V0
, rd
+ pass
);
4526 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4527 } else if (op
== 4 || (op
== 5 && u
)) {
4529 cpu_abort(env
, "VS[LR]I.64 not implemented");
4531 neon_store_reg64(cpu_V0
, rd
+ pass
);
4532 } else { /* size < 3 */
4533 /* Operands in T0 and T1. */
4534 gen_op_movl_T1_im(imm
);
4535 NEON_GET_REG(T0
, rm
, pass
);
4539 GEN_NEON_INTEGER_OP(shl
);
4543 GEN_NEON_INTEGER_OP(rshl
);
4548 GEN_NEON_INTEGER_OP(shl
);
4550 case 5: /* VSHL, VSLI */
4552 case 0: gen_helper_neon_shl_u8(CPU_T001
); break;
4553 case 1: gen_helper_neon_shl_u16(CPU_T001
); break;
4554 case 2: gen_helper_neon_shl_u32(CPU_T001
); break;
4559 GEN_NEON_INTEGER_OP_ENV(qshl
);
4561 case 7: /* VQSHLU */
4563 case 0: gen_helper_neon_qshl_u8(CPU_T0E01
); break;
4564 case 1: gen_helper_neon_qshl_u16(CPU_T0E01
); break;
4565 case 2: gen_helper_neon_qshl_u32(CPU_T0E01
); break;
4571 if (op
== 1 || op
== 3) {
4573 NEON_GET_REG(T1
, rd
, pass
);
4575 } else if (op
== 4 || (op
== 5 && u
)) {
4580 imm
= 0xff >> -shift
;
4582 imm
= (uint8_t)(0xff << shift
);
4588 imm
= 0xffff >> -shift
;
4590 imm
= (uint16_t)(0xffff << shift
);
4595 imm
= 0xffffffffu
>> -shift
;
4597 imm
= 0xffffffffu
<< shift
;
4602 tmp
= neon_load_reg(rd
, pass
);
4603 tcg_gen_andi_i32(cpu_T
[0], cpu_T
[0], imm
);
4604 tcg_gen_andi_i32(tmp
, tmp
, ~imm
);
4605 tcg_gen_or_i32(cpu_T
[0], cpu_T
[0], tmp
);
4607 NEON_SET_REG(T0
, rd
, pass
);
4610 } else if (op
< 10) {
4611 /* Shift by immediate and narrow:
4612 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4613 shift
= shift
- (1 << (size
+ 3));
4617 imm
= (uint16_t)shift
;
4619 tmp2
= tcg_const_i32(imm
);
4620 TCGV_UNUSED_I64(tmp64
);
4623 imm
= (uint32_t)shift
;
4624 tmp2
= tcg_const_i32(imm
);
4625 TCGV_UNUSED_I64(tmp64
);
4628 tmp64
= tcg_const_i64(shift
);
4635 for (pass
= 0; pass
< 2; pass
++) {
4637 neon_load_reg64(cpu_V0
, rm
+ pass
);
4640 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, tmp64
);
4642 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, tmp64
);
4645 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, tmp64
);
4647 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, tmp64
);
4650 tmp
= neon_load_reg(rm
+ pass
, 0);
4651 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
, u
);
4652 tmp3
= neon_load_reg(rm
+ pass
, 1);
4653 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
, u
);
4654 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
4659 if (op
== 8 && !u
) {
4660 gen_neon_narrow(size
- 1, tmp
, cpu_V0
);
4663 gen_neon_narrow_sats(size
- 1, tmp
, cpu_V0
);
4665 gen_neon_narrow_satu(size
- 1, tmp
, cpu_V0
);
4670 neon_store_reg(rd
, 0, tmp2
);
4671 neon_store_reg(rd
, 1, tmp
);
4674 } else if (op
== 10) {
4678 tmp
= neon_load_reg(rm
, 0);
4679 tmp2
= neon_load_reg(rm
, 1);
4680 for (pass
= 0; pass
< 2; pass
++) {
4684 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4687 /* The shift is less than the width of the source
4688 type, so we can just shift the whole register. */
4689 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
4690 if (size
< 2 || !u
) {
4693 imm
= (0xffu
>> (8 - shift
));
4696 imm
= 0xffff >> (16 - shift
);
4698 imm64
= imm
| (((uint64_t)imm
) << 32);
4699 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, imm64
);
4702 neon_store_reg64(cpu_V0
, rd
+ pass
);
4704 } else if (op
== 15 || op
== 16) {
4705 /* VCVT fixed-point. */
4706 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4707 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
4710 gen_vfp_ulto(0, shift
);
4712 gen_vfp_slto(0, shift
);
4715 gen_vfp_toul(0, shift
);
4717 gen_vfp_tosl(0, shift
);
4719 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
4724 } else { /* (insn & 0x00380080) == 0 */
4727 op
= (insn
>> 8) & 0xf;
4728 /* One register and immediate. */
4729 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
4730 invert
= (insn
& (1 << 5)) != 0;
4748 imm
= (imm
<< 8) | (imm
<< 24);
4751 imm
= (imm
< 8) | 0xff;
4754 imm
= (imm
<< 16) | 0xffff;
4757 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
4762 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
4763 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
4769 if (op
!= 14 || !invert
)
4770 gen_op_movl_T1_im(imm
);
4772 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4773 if (op
& 1 && op
< 12) {
4774 tmp
= neon_load_reg(rd
, pass
);
4776 /* The immediate value has already been inverted, so
4778 tcg_gen_andi_i32(tmp
, tmp
, imm
);
4780 tcg_gen_ori_i32(tmp
, tmp
, imm
);
4785 if (op
== 14 && invert
) {
4788 for (n
= 0; n
< 4; n
++) {
4789 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
4790 val
|= 0xff << (n
* 8);
4792 tcg_gen_movi_i32(tmp
, val
);
4794 tcg_gen_movi_i32(tmp
, imm
);
4797 neon_store_reg(rd
, pass
, tmp
);
4800 } else { /* (insn & 0x00800010 == 0x00800000) */
4802 op
= (insn
>> 8) & 0xf;
4803 if ((insn
& (1 << 6)) == 0) {
4804 /* Three registers of different lengths. */
4808 /* prewiden, src1_wide, src2_wide */
4809 static const int neon_3reg_wide
[16][3] = {
4810 {1, 0, 0}, /* VADDL */
4811 {1, 1, 0}, /* VADDW */
4812 {1, 0, 0}, /* VSUBL */
4813 {1, 1, 0}, /* VSUBW */
4814 {0, 1, 1}, /* VADDHN */
4815 {0, 0, 0}, /* VABAL */
4816 {0, 1, 1}, /* VSUBHN */
4817 {0, 0, 0}, /* VABDL */
4818 {0, 0, 0}, /* VMLAL */
4819 {0, 0, 0}, /* VQDMLAL */
4820 {0, 0, 0}, /* VMLSL */
4821 {0, 0, 0}, /* VQDMLSL */
4822 {0, 0, 0}, /* Integer VMULL */
4823 {0, 0, 0}, /* VQDMULL */
4824 {0, 0, 0} /* Polynomial VMULL */
4827 prewiden
= neon_3reg_wide
[op
][0];
4828 src1_wide
= neon_3reg_wide
[op
][1];
4829 src2_wide
= neon_3reg_wide
[op
][2];
4831 if (size
== 0 && (op
== 9 || op
== 11 || op
== 13))
4834 /* Avoid overlapping operands. Wide source operands are
4835 always aligned so will never overlap with wide
4836 destinations in problematic ways. */
4837 if (rd
== rm
&& !src2_wide
) {
4838 NEON_GET_REG(T0
, rm
, 1);
4839 gen_neon_movl_scratch_T0(2);
4840 } else if (rd
== rn
&& !src1_wide
) {
4841 NEON_GET_REG(T0
, rn
, 1);
4842 gen_neon_movl_scratch_T0(2);
4845 for (pass
= 0; pass
< 2; pass
++) {
4847 neon_load_reg64(cpu_V0
, rn
+ pass
);
4850 if (pass
== 1 && rd
== rn
) {
4851 gen_neon_movl_T0_scratch(2);
4853 tcg_gen_mov_i32(tmp
, cpu_T
[0]);
4855 tmp
= neon_load_reg(rn
, pass
);
4858 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4862 neon_load_reg64(cpu_V1
, rm
+ pass
);
4865 if (pass
== 1 && rd
== rm
) {
4866 gen_neon_movl_T0_scratch(2);
4868 tcg_gen_mov_i32(tmp2
, cpu_T
[0]);
4870 tmp2
= neon_load_reg(rm
, pass
);
4873 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
4877 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4878 gen_neon_addl(size
);
4880 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4881 gen_neon_subl(size
);
4883 case 5: case 7: /* VABAL, VABDL */
4884 switch ((size
<< 1) | u
) {
4886 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
4889 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
4892 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
4895 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
4898 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
4901 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
4908 case 8: case 9: case 10: case 11: case 12: case 13:
4909 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4910 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
4912 case 14: /* Polynomial VMULL */
4913 cpu_abort(env
, "Polynomial VMULL not implemented");
4915 default: /* 15 is RESERVED. */
4918 if (op
== 5 || op
== 13 || (op
>= 8 && op
<= 11)) {
4920 if (op
== 10 || op
== 11) {
4921 gen_neon_negl(cpu_V0
, size
);
4925 neon_load_reg64(cpu_V1
, rd
+ pass
);
4929 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4930 gen_neon_addl(size
);
4932 case 9: case 11: /* VQDMLAL, VQDMLSL */
4933 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
4934 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
4937 case 13: /* VQDMULL */
4938 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
4943 neon_store_reg64(cpu_V0
, rd
+ pass
);
4944 } else if (op
== 4 || op
== 6) {
4945 /* Narrowing operation. */
4950 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
4953 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
4956 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
4957 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
4964 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
4967 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
4970 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
4971 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
4972 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
4980 neon_store_reg(rd
, 0, tmp3
);
4981 neon_store_reg(rd
, 1, tmp
);
4984 /* Write back the result. */
4985 neon_store_reg64(cpu_V0
, rd
+ pass
);
4989 /* Two registers and a scalar. */
4991 case 0: /* Integer VMLA scalar */
4992 case 1: /* Float VMLA scalar */
4993 case 4: /* Integer VMLS scalar */
4994 case 5: /* Floating point VMLS scalar */
4995 case 8: /* Integer VMUL scalar */
4996 case 9: /* Floating point VMUL scalar */
4997 case 12: /* VQDMULH scalar */
4998 case 13: /* VQRDMULH scalar */
4999 gen_neon_get_scalar(size
, rm
);
5000 gen_neon_movl_scratch_T0(0);
5001 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5003 gen_neon_movl_T0_scratch(0);
5004 NEON_GET_REG(T1
, rn
, pass
);
5007 gen_helper_neon_qdmulh_s16(CPU_T0E01
);
5009 gen_helper_neon_qdmulh_s32(CPU_T0E01
);
5011 } else if (op
== 13) {
5013 gen_helper_neon_qrdmulh_s16(CPU_T0E01
);
5015 gen_helper_neon_qrdmulh_s32(CPU_T0E01
);
5017 } else if (op
& 1) {
5018 gen_helper_neon_mul_f32(CPU_T001
);
5021 case 0: gen_helper_neon_mul_u8(CPU_T001
); break;
5022 case 1: gen_helper_neon_mul_u16(CPU_T001
); break;
5023 case 2: gen_op_mul_T0_T1(); break;
5029 NEON_GET_REG(T1
, rd
, pass
);
5035 gen_helper_neon_add_f32(CPU_T001
);
5041 gen_helper_neon_sub_f32(cpu_T
[0], cpu_T
[1], cpu_T
[0]);
5047 NEON_SET_REG(T0
, rd
, pass
);
5050 case 2: /* VMLAL sclar */
5051 case 3: /* VQDMLAL scalar */
5052 case 6: /* VMLSL scalar */
5053 case 7: /* VQDMLSL scalar */
5054 case 10: /* VMULL scalar */
5055 case 11: /* VQDMULL scalar */
5056 if (size
== 0 && (op
== 3 || op
== 7 || op
== 11))
5059 gen_neon_get_scalar(size
, rm
);
5060 NEON_GET_REG(T1
, rn
, 1);
5062 for (pass
= 0; pass
< 2; pass
++) {
5064 tmp
= neon_load_reg(rn
, 0);
5067 tcg_gen_mov_i32(tmp
, cpu_T
[1]);
5070 tcg_gen_mov_i32(tmp2
, cpu_T
[0]);
5071 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5072 if (op
== 6 || op
== 7) {
5073 gen_neon_negl(cpu_V0
, size
);
5076 neon_load_reg64(cpu_V1
, rd
+ pass
);
5080 gen_neon_addl(size
);
5083 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5084 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5090 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5095 neon_store_reg64(cpu_V0
, rd
+ pass
);
5098 default: /* 14 and 15 are RESERVED */
5102 } else { /* size == 3 */
5105 imm
= (insn
>> 8) & 0xf;
5112 neon_load_reg64(cpu_V0
, rn
);
5114 neon_load_reg64(cpu_V1
, rn
+ 1);
5116 } else if (imm
== 8) {
5117 neon_load_reg64(cpu_V0
, rn
+ 1);
5119 neon_load_reg64(cpu_V1
, rm
);
5122 tmp64
= tcg_temp_new_i64();
5124 neon_load_reg64(cpu_V0
, rn
);
5125 neon_load_reg64(tmp64
, rn
+ 1);
5127 neon_load_reg64(cpu_V0
, rn
+ 1);
5128 neon_load_reg64(tmp64
, rm
);
5130 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5131 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5132 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5134 neon_load_reg64(cpu_V1
, rm
);
5136 neon_load_reg64(cpu_V1
, rm
+ 1);
5139 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5140 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5141 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5144 neon_load_reg64(cpu_V0
, rn
);
5145 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5146 neon_load_reg64(cpu_V1
, rm
);
5147 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5148 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5150 neon_store_reg64(cpu_V0
, rd
);
5152 neon_store_reg64(cpu_V1
, rd
+ 1);
5154 } else if ((insn
& (1 << 11)) == 0) {
5155 /* Two register misc. */
5156 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5157 size
= (insn
>> 18) & 3;
5159 case 0: /* VREV64 */
5162 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5163 NEON_GET_REG(T0
, rm
, pass
* 2);
5164 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
5166 case 0: tcg_gen_bswap32_i32(cpu_T
[0], cpu_T
[0]); break;
5167 case 1: gen_swap_half(cpu_T
[0]); break;
5168 case 2: /* no-op */ break;
5171 NEON_SET_REG(T0
, rd
, pass
* 2 + 1);
5173 NEON_SET_REG(T1
, rd
, pass
* 2);
5175 gen_op_movl_T0_T1();
5177 case 0: tcg_gen_bswap32_i32(cpu_T
[0], cpu_T
[0]); break;
5178 case 1: gen_swap_half(cpu_T
[0]); break;
5181 NEON_SET_REG(T0
, rd
, pass
* 2);
5185 case 4: case 5: /* VPADDL */
5186 case 12: case 13: /* VPADAL */
5189 for (pass
= 0; pass
< q
+ 1; pass
++) {
5190 tmp
= neon_load_reg(rm
, pass
* 2);
5191 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5192 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5193 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5195 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5196 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5197 case 2: tcg_gen_add_i64(CPU_V001
); break;
5202 neon_load_reg64(cpu_V1
, rd
+ pass
);
5203 gen_neon_addl(size
);
5205 neon_store_reg64(cpu_V0
, rd
+ pass
);
5210 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5211 NEON_GET_REG(T0
, rm
, n
);
5212 NEON_GET_REG(T1
, rd
, n
+ 1);
5213 NEON_SET_REG(T1
, rm
, n
);
5214 NEON_SET_REG(T0
, rd
, n
+ 1);
5222 Rd A3 A2 A1 A0 B2 B0 A2 A0
5223 Rm B3 B2 B1 B0 B3 B1 A3 A1
5227 gen_neon_unzip(rd
, q
, 0, size
);
5228 gen_neon_unzip(rm
, q
, 4, size
);
5230 static int unzip_order_q
[8] =
5231 {0, 2, 4, 6, 1, 3, 5, 7};
5232 for (n
= 0; n
< 8; n
++) {
5233 int reg
= (n
< 4) ? rd
: rm
;
5234 gen_neon_movl_T0_scratch(unzip_order_q
[n
]);
5235 NEON_SET_REG(T0
, reg
, n
% 4);
5238 static int unzip_order
[4] =
5240 for (n
= 0; n
< 4; n
++) {
5241 int reg
= (n
< 2) ? rd
: rm
;
5242 gen_neon_movl_T0_scratch(unzip_order
[n
]);
5243 NEON_SET_REG(T0
, reg
, n
% 2);
5249 Rd A3 A2 A1 A0 B1 A1 B0 A0
5250 Rm B3 B2 B1 B0 B3 A3 B2 A2
5254 count
= (q
? 4 : 2);
5255 for (n
= 0; n
< count
; n
++) {
5256 NEON_GET_REG(T0
, rd
, n
);
5257 NEON_GET_REG(T1
, rd
, n
);
5259 case 0: gen_helper_neon_zip_u8(); break;
5260 case 1: gen_helper_neon_zip_u16(); break;
5261 case 2: /* no-op */; break;
5264 gen_neon_movl_scratch_T0(n
* 2);
5265 gen_neon_movl_scratch_T1(n
* 2 + 1);
5267 for (n
= 0; n
< count
* 2; n
++) {
5268 int reg
= (n
< count
) ? rd
: rm
;
5269 gen_neon_movl_T0_scratch(n
);
5270 NEON_SET_REG(T0
, reg
, n
% count
);
5273 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5277 for (pass
= 0; pass
< 2; pass
++) {
5278 neon_load_reg64(cpu_V0
, rm
+ pass
);
5280 if (op
== 36 && q
== 0) {
5281 gen_neon_narrow(size
, tmp
, cpu_V0
);
5283 gen_neon_narrow_satu(size
, tmp
, cpu_V0
);
5285 gen_neon_narrow_sats(size
, tmp
, cpu_V0
);
5290 neon_store_reg(rd
, 0, tmp2
);
5291 neon_store_reg(rd
, 1, tmp
);
5295 case 38: /* VSHLL */
5298 tmp
= neon_load_reg(rm
, 0);
5299 tmp2
= neon_load_reg(rm
, 1);
5300 for (pass
= 0; pass
< 2; pass
++) {
5303 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5304 neon_store_reg64(cpu_V0
, rd
+ pass
);
5309 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5310 if (op
== 30 || op
== 31 || op
>= 58) {
5311 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5312 neon_reg_offset(rm
, pass
));
5314 NEON_GET_REG(T0
, rm
, pass
);
5317 case 1: /* VREV32 */
5319 case 0: tcg_gen_bswap32_i32(cpu_T
[0], cpu_T
[0]); break;
5320 case 1: gen_swap_half(cpu_T
[0]); break;
5324 case 2: /* VREV16 */
5327 gen_rev16(cpu_T
[0]);
5331 case 0: gen_helper_neon_cls_s8(cpu_T
[0], cpu_T
[0]); break;
5332 case 1: gen_helper_neon_cls_s16(cpu_T
[0], cpu_T
[0]); break;
5333 case 2: gen_helper_neon_cls_s32(cpu_T
[0], cpu_T
[0]); break;
5339 case 0: gen_helper_neon_clz_u8(cpu_T
[0], cpu_T
[0]); break;
5340 case 1: gen_helper_neon_clz_u16(cpu_T
[0], cpu_T
[0]); break;
5341 case 2: gen_helper_clz(cpu_T
[0], cpu_T
[0]); break;
5348 gen_helper_neon_cnt_u8(cpu_T
[0], cpu_T
[0]);
5355 case 14: /* VQABS */
5357 case 0: gen_helper_neon_qabs_s8(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5358 case 1: gen_helper_neon_qabs_s16(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5359 case 2: gen_helper_neon_qabs_s32(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5363 case 15: /* VQNEG */
5365 case 0: gen_helper_neon_qneg_s8(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5366 case 1: gen_helper_neon_qneg_s16(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5367 case 2: gen_helper_neon_qneg_s32(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5371 case 16: case 19: /* VCGT #0, VCLE #0 */
5372 gen_op_movl_T1_im(0);
5374 case 0: gen_helper_neon_cgt_s8(CPU_T001
); break;
5375 case 1: gen_helper_neon_cgt_s16(CPU_T001
); break;
5376 case 2: gen_helper_neon_cgt_s32(CPU_T001
); break;
5382 case 17: case 20: /* VCGE #0, VCLT #0 */
5383 gen_op_movl_T1_im(0);
5385 case 0: gen_helper_neon_cge_s8(CPU_T001
); break;
5386 case 1: gen_helper_neon_cge_s16(CPU_T001
); break;
5387 case 2: gen_helper_neon_cge_s32(CPU_T001
); break;
5393 case 18: /* VCEQ #0 */
5394 gen_op_movl_T1_im(0);
5396 case 0: gen_helper_neon_ceq_u8(CPU_T001
); break;
5397 case 1: gen_helper_neon_ceq_u16(CPU_T001
); break;
5398 case 2: gen_helper_neon_ceq_u32(CPU_T001
); break;
5404 case 0: gen_helper_neon_abs_s8(cpu_T
[0], cpu_T
[0]); break;
5405 case 1: gen_helper_neon_abs_s16(cpu_T
[0], cpu_T
[0]); break;
5406 case 2: tcg_gen_abs_i32(cpu_T
[0], cpu_T
[0]); break;
5411 gen_op_movl_T1_im(0);
5416 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5417 gen_op_movl_T1_im(0);
5418 gen_helper_neon_cgt_f32(CPU_T001
);
5422 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5423 gen_op_movl_T1_im(0);
5424 gen_helper_neon_cge_f32(CPU_T001
);
5428 case 26: /* Float VCEQ #0 */
5429 gen_op_movl_T1_im(0);
5430 gen_helper_neon_ceq_f32(CPU_T001
);
5432 case 30: /* Float VABS */
5435 case 31: /* Float VNEG */
5439 NEON_GET_REG(T1
, rd
, pass
);
5440 NEON_SET_REG(T1
, rm
, pass
);
5443 NEON_GET_REG(T1
, rd
, pass
);
5445 case 0: gen_helper_neon_trn_u8(); break;
5446 case 1: gen_helper_neon_trn_u16(); break;
5450 NEON_SET_REG(T1
, rm
, pass
);
5452 case 56: /* Integer VRECPE */
5453 gen_helper_recpe_u32(cpu_T
[0], cpu_T
[0], cpu_env
);
5455 case 57: /* Integer VRSQRTE */
5456 gen_helper_rsqrte_u32(cpu_T
[0], cpu_T
[0], cpu_env
);
5458 case 58: /* Float VRECPE */
5459 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5461 case 59: /* Float VRSQRTE */
5462 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5464 case 60: /* VCVT.F32.S32 */
5467 case 61: /* VCVT.F32.U32 */
5470 case 62: /* VCVT.S32.F32 */
5473 case 63: /* VCVT.U32.F32 */
5477 /* Reserved: 21, 29, 39-56 */
5480 if (op
== 30 || op
== 31 || op
>= 58) {
5481 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
5482 neon_reg_offset(rd
, pass
));
5484 NEON_SET_REG(T0
, rd
, pass
);
5489 } else if ((insn
& (1 << 10)) == 0) {
5491 n
= ((insn
>> 5) & 0x18) + 8;
5492 if (insn
& (1 << 6)) {
5493 tmp
= neon_load_reg(rd
, 0);
5496 tcg_gen_movi_i32(tmp
, 0);
5498 tmp2
= neon_load_reg(rm
, 0);
5499 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tcg_const_i32(rn
),
5502 if (insn
& (1 << 6)) {
5503 tmp
= neon_load_reg(rd
, 1);
5506 tcg_gen_movi_i32(tmp
, 0);
5508 tmp3
= neon_load_reg(rm
, 1);
5509 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tcg_const_i32(rn
),
5511 neon_store_reg(rd
, 0, tmp2
);
5512 neon_store_reg(rd
, 1, tmp3
);
5514 } else if ((insn
& 0x380) == 0) {
5516 if (insn
& (1 << 19)) {
5517 NEON_SET_REG(T0
, rm
, 1);
5519 NEON_SET_REG(T0
, rm
, 0);
5521 if (insn
& (1 << 16)) {
5522 gen_neon_dup_u8(cpu_T
[0], ((insn
>> 17) & 3) * 8);
5523 } else if (insn
& (1 << 17)) {
5524 if ((insn
>> 18) & 1)
5525 gen_neon_dup_high16(cpu_T
[0]);
5527 gen_neon_dup_low16(cpu_T
[0]);
5529 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5530 NEON_SET_REG(T0
, rd
, pass
);
5540 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5542 int crn
= (insn
>> 16) & 0xf;
5543 int crm
= insn
& 0xf;
5544 int op1
= (insn
>> 21) & 7;
5545 int op2
= (insn
>> 5) & 7;
5546 int rt
= (insn
>> 12) & 0xf;
5549 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5550 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5554 tmp
= load_cpu_field(teecr
);
5555 store_reg(s
, rt
, tmp
);
5558 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5560 if (IS_USER(s
) && (env
->teecr
& 1))
5562 tmp
= load_cpu_field(teehbr
);
5563 store_reg(s
, rt
, tmp
);
5567 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5568 op1
, crn
, crm
, op2
);
5572 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5574 int crn
= (insn
>> 16) & 0xf;
5575 int crm
= insn
& 0xf;
5576 int op1
= (insn
>> 21) & 7;
5577 int op2
= (insn
>> 5) & 7;
5578 int rt
= (insn
>> 12) & 0xf;
5581 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5582 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5586 tmp
= load_reg(s
, rt
);
5587 gen_helper_set_teecr(cpu_env
, tmp
);
5591 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5593 if (IS_USER(s
) && (env
->teecr
& 1))
5595 tmp
= load_reg(s
, rt
);
5596 store_cpu_field(tmp
, teehbr
);
5600 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5601 op1
, crn
, crm
, op2
);
5605 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5609 cpnum
= (insn
>> 8) & 0xf;
5610 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
5611 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
5617 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5618 return disas_iwmmxt_insn(env
, s
, insn
);
5619 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5620 return disas_dsp_insn(env
, s
, insn
);
5625 return disas_vfp_insn (env
, s
, insn
);
5627 /* Coprocessors 7-15 are architecturally reserved by ARM.
5628 Unfortunately Intel decided to ignore this. */
5629 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
5631 if (insn
& (1 << 20))
5632 return disas_cp14_read(env
, s
, insn
);
5634 return disas_cp14_write(env
, s
, insn
);
5636 return disas_cp15_insn (env
, s
, insn
);
5639 /* Unknown coprocessor. See if the board has hooked it. */
5640 return disas_cp_insn (env
, s
, insn
);
5645 /* Store a 64-bit value to a register pair. Clobbers val. */
5646 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
5650 tcg_gen_trunc_i64_i32(tmp
, val
);
5651 store_reg(s
, rlow
, tmp
);
5653 tcg_gen_shri_i64(val
, val
, 32);
5654 tcg_gen_trunc_i64_i32(tmp
, val
);
5655 store_reg(s
, rhigh
, tmp
);
5658 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5659 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
5664 /* Load value and extend to 64 bits. */
5665 tmp
= tcg_temp_new_i64();
5666 tmp2
= load_reg(s
, rlow
);
5667 tcg_gen_extu_i32_i64(tmp
, tmp2
);
5669 tcg_gen_add_i64(val
, val
, tmp
);
5672 /* load and add a 64-bit value from a register pair. */
5673 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
5679 /* Load 64-bit value rd:rn. */
5680 tmpl
= load_reg(s
, rlow
);
5681 tmph
= load_reg(s
, rhigh
);
5682 tmp
= tcg_temp_new_i64();
5683 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
5686 tcg_gen_add_i64(val
, val
, tmp
);
5689 /* Set N and Z flags from a 64-bit value. */
5690 static void gen_logicq_cc(TCGv_i64 val
)
5692 TCGv tmp
= new_tmp();
5693 gen_helper_logicq_cc(tmp
, val
);
5698 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
5700 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
5707 insn
= ldl_code(s
->pc
);
5710 /* M variants do not implement ARM mode. */
5715 /* Unconditional instructions. */
5716 if (((insn
>> 25) & 7) == 1) {
5717 /* NEON Data processing. */
5718 if (!arm_feature(env
, ARM_FEATURE_NEON
))
5721 if (disas_neon_data_insn(env
, s
, insn
))
5725 if ((insn
& 0x0f100000) == 0x04000000) {
5726 /* NEON load/store. */
5727 if (!arm_feature(env
, ARM_FEATURE_NEON
))
5730 if (disas_neon_ls_insn(env
, s
, insn
))
5734 if ((insn
& 0x0d70f000) == 0x0550f000)
5736 else if ((insn
& 0x0ffffdff) == 0x01010000) {
5739 if (insn
& (1 << 9)) {
5740 /* BE8 mode not implemented. */
5744 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
5745 switch ((insn
>> 4) & 0xf) {
5748 gen_helper_clrex(cpu_env
);
5754 /* We don't emulate caches so these are a no-op. */
5759 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
5765 op1
= (insn
& 0x1f);
5766 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
5767 addr
= load_reg(s
, 13);
5770 gen_helper_get_r13_banked(addr
, cpu_env
, tcg_const_i32(op1
));
5772 i
= (insn
>> 23) & 3;
5774 case 0: offset
= -4; break; /* DA */
5775 case 1: offset
= 0; break; /* IA */
5776 case 2: offset
= -8; break; /* DB */
5777 case 3: offset
= 4; break; /* IB */
5781 tcg_gen_addi_i32(addr
, addr
, offset
);
5782 tmp
= load_reg(s
, 14);
5783 gen_st32(tmp
, addr
, 0);
5784 tmp
= load_cpu_field(spsr
);
5785 tcg_gen_addi_i32(addr
, addr
, 4);
5786 gen_st32(tmp
, addr
, 0);
5787 if (insn
& (1 << 21)) {
5788 /* Base writeback. */
5790 case 0: offset
= -8; break;
5791 case 1: offset
= 4; break;
5792 case 2: offset
= -4; break;
5793 case 3: offset
= 0; break;
5797 tcg_gen_addi_i32(addr
, addr
, offset
);
5798 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
5799 store_reg(s
, 13, addr
);
5801 gen_helper_set_r13_banked(cpu_env
, tcg_const_i32(op1
), addr
);
5807 } else if ((insn
& 0x0e5fffe0) == 0x081d0a00) {
5813 rn
= (insn
>> 16) & 0xf;
5814 addr
= load_reg(s
, rn
);
5815 i
= (insn
>> 23) & 3;
5817 case 0: offset
= -4; break; /* DA */
5818 case 1: offset
= 0; break; /* IA */
5819 case 2: offset
= -8; break; /* DB */
5820 case 3: offset
= 4; break; /* IB */
5824 tcg_gen_addi_i32(addr
, addr
, offset
);
5825 /* Load PC into tmp and CPSR into tmp2. */
5826 tmp
= gen_ld32(addr
, 0);
5827 tcg_gen_addi_i32(addr
, addr
, 4);
5828 tmp2
= gen_ld32(addr
, 0);
5829 if (insn
& (1 << 21)) {
5830 /* Base writeback. */
5832 case 0: offset
= -8; break;
5833 case 1: offset
= 4; break;
5834 case 2: offset
= -4; break;
5835 case 3: offset
= 0; break;
5839 tcg_gen_addi_i32(addr
, addr
, offset
);
5840 store_reg(s
, rn
, addr
);
5844 gen_rfe(s
, tmp
, tmp2
);
5846 } else if ((insn
& 0x0e000000) == 0x0a000000) {
5847 /* branch link and change to thumb (blx <offset>) */
5850 val
= (uint32_t)s
->pc
;
5852 tcg_gen_movi_i32(tmp
, val
);
5853 store_reg(s
, 14, tmp
);
5854 /* Sign-extend the 24-bit offset */
5855 offset
= (((int32_t)insn
) << 8) >> 8;
5856 /* offset * 4 + bit24 * 2 + (thumb bit) */
5857 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
5858 /* pipeline offset */
5862 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
5863 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5864 /* iWMMXt register transfer. */
5865 if (env
->cp15
.c15_cpar
& (1 << 1))
5866 if (!disas_iwmmxt_insn(env
, s
, insn
))
5869 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
5870 /* Coprocessor double register transfer. */
5871 } else if ((insn
& 0x0f000010) == 0x0e000010) {
5872 /* Additional coprocessor register transfer. */
5873 } else if ((insn
& 0x0ff10020) == 0x01000000) {
5876 /* cps (privileged) */
5880 if (insn
& (1 << 19)) {
5881 if (insn
& (1 << 8))
5883 if (insn
& (1 << 7))
5885 if (insn
& (1 << 6))
5887 if (insn
& (1 << 18))
5890 if (insn
& (1 << 17)) {
5892 val
|= (insn
& 0x1f);
5895 gen_set_psr_im(s
, mask
, 0, val
);
5902 /* if not always execute, we generate a conditional jump to
5904 s
->condlabel
= gen_new_label();
5905 gen_test_cc(cond
^ 1, s
->condlabel
);
5908 if ((insn
& 0x0f900000) == 0x03000000) {
5909 if ((insn
& (1 << 21)) == 0) {
5911 rd
= (insn
>> 12) & 0xf;
5912 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
5913 if ((insn
& (1 << 22)) == 0) {
5916 tcg_gen_movi_i32(tmp
, val
);
5919 tmp
= load_reg(s
, rd
);
5920 tcg_gen_ext16u_i32(tmp
, tmp
);
5921 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
5923 store_reg(s
, rd
, tmp
);
5925 if (((insn
>> 12) & 0xf) != 0xf)
5927 if (((insn
>> 16) & 0xf) == 0) {
5928 gen_nop_hint(s
, insn
& 0xff);
5930 /* CPSR = immediate */
5932 shift
= ((insn
>> 8) & 0xf) * 2;
5934 val
= (val
>> shift
) | (val
<< (32 - shift
));
5935 i
= ((insn
& (1 << 22)) != 0);
5936 if (gen_set_psr_im(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, val
))
5940 } else if ((insn
& 0x0f900000) == 0x01000000
5941 && (insn
& 0x00000090) != 0x00000090) {
5942 /* miscellaneous instructions */
5943 op1
= (insn
>> 21) & 3;
5944 sh
= (insn
>> 4) & 0xf;
5947 case 0x0: /* move program status register */
5950 tmp
= load_reg(s
, rm
);
5951 i
= ((op1
& 2) != 0);
5952 if (gen_set_psr(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
5956 rd
= (insn
>> 12) & 0xf;
5960 tmp
= load_cpu_field(spsr
);
5963 gen_helper_cpsr_read(tmp
);
5965 store_reg(s
, rd
, tmp
);
5970 /* branch/exchange thumb (bx). */
5971 tmp
= load_reg(s
, rm
);
5973 } else if (op1
== 3) {
5975 rd
= (insn
>> 12) & 0xf;
5976 tmp
= load_reg(s
, rm
);
5977 gen_helper_clz(tmp
, tmp
);
5978 store_reg(s
, rd
, tmp
);
5986 /* Trivial implementation equivalent to bx. */
5987 tmp
= load_reg(s
, rm
);
5997 /* branch link/exchange thumb (blx) */
5998 tmp
= load_reg(s
, rm
);
6000 tcg_gen_movi_i32(tmp2
, s
->pc
);
6001 store_reg(s
, 14, tmp2
);
6004 case 0x5: /* saturating add/subtract */
6005 rd
= (insn
>> 12) & 0xf;
6006 rn
= (insn
>> 16) & 0xf;
6007 tmp
= load_reg(s
, rm
);
6008 tmp2
= load_reg(s
, rn
);
6010 gen_helper_double_saturate(tmp2
, tmp2
);
6012 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6014 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6016 store_reg(s
, rd
, tmp
);
6019 gen_set_condexec(s
);
6020 gen_set_pc_im(s
->pc
- 4);
6021 gen_exception(EXCP_BKPT
);
6022 s
->is_jmp
= DISAS_JUMP
;
6024 case 0x8: /* signed multiply */
6028 rs
= (insn
>> 8) & 0xf;
6029 rn
= (insn
>> 12) & 0xf;
6030 rd
= (insn
>> 16) & 0xf;
6032 /* (32 * 16) >> 16 */
6033 tmp
= load_reg(s
, rm
);
6034 tmp2
= load_reg(s
, rs
);
6036 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6039 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6040 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6042 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6043 if ((sh
& 2) == 0) {
6044 tmp2
= load_reg(s
, rn
);
6045 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6048 store_reg(s
, rd
, tmp
);
6051 tmp
= load_reg(s
, rm
);
6052 tmp2
= load_reg(s
, rs
);
6053 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6056 tmp64
= tcg_temp_new_i64();
6057 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6059 gen_addq(s
, tmp64
, rn
, rd
);
6060 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6063 tmp2
= load_reg(s
, rn
);
6064 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6067 store_reg(s
, rd
, tmp
);
6074 } else if (((insn
& 0x0e000000) == 0 &&
6075 (insn
& 0x00000090) != 0x90) ||
6076 ((insn
& 0x0e000000) == (1 << 25))) {
6077 int set_cc
, logic_cc
, shiftop
;
6079 op1
= (insn
>> 21) & 0xf;
6080 set_cc
= (insn
>> 20) & 1;
6081 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6083 /* data processing instruction */
6084 if (insn
& (1 << 25)) {
6085 /* immediate operand */
6087 shift
= ((insn
>> 8) & 0xf) * 2;
6089 val
= (val
>> shift
) | (val
<< (32 - shift
));
6092 tcg_gen_movi_i32(tmp2
, val
);
6093 if (logic_cc
&& shift
) {
6094 gen_set_CF_bit31(tmp2
);
6099 tmp2
= load_reg(s
, rm
);
6100 shiftop
= (insn
>> 5) & 3;
6101 if (!(insn
& (1 << 4))) {
6102 shift
= (insn
>> 7) & 0x1f;
6103 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
6105 rs
= (insn
>> 8) & 0xf;
6106 tmp
= load_reg(s
, rs
);
6107 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
6110 if (op1
!= 0x0f && op1
!= 0x0d) {
6111 rn
= (insn
>> 16) & 0xf;
6112 tmp
= load_reg(s
, rn
);
6116 rd
= (insn
>> 12) & 0xf;
6119 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6123 store_reg_bx(env
, s
, rd
, tmp
);
6126 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6130 store_reg_bx(env
, s
, rd
, tmp
);
6133 if (set_cc
&& rd
== 15) {
6134 /* SUBS r15, ... is used for exception return. */
6138 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6139 gen_exception_return(s
, tmp
);
6142 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6144 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6146 store_reg_bx(env
, s
, rd
, tmp
);
6151 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
6153 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6155 store_reg_bx(env
, s
, rd
, tmp
);
6159 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6161 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6163 store_reg_bx(env
, s
, rd
, tmp
);
6167 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
6169 gen_add_carry(tmp
, tmp
, tmp2
);
6171 store_reg_bx(env
, s
, rd
, tmp
);
6175 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
6177 gen_sub_carry(tmp
, tmp
, tmp2
);
6179 store_reg_bx(env
, s
, rd
, tmp
);
6183 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
6185 gen_sub_carry(tmp
, tmp2
, tmp
);
6187 store_reg_bx(env
, s
, rd
, tmp
);
6191 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
6198 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
6205 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
6211 gen_helper_add_cc(tmp
, tmp
, tmp2
);
6216 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6220 store_reg_bx(env
, s
, rd
, tmp
);
6223 if (logic_cc
&& rd
== 15) {
6224 /* MOVS r15, ... is used for exception return. */
6228 gen_exception_return(s
, tmp2
);
6233 store_reg_bx(env
, s
, rd
, tmp2
);
6237 tcg_gen_bic_i32(tmp
, tmp
, tmp2
);
6241 store_reg_bx(env
, s
, rd
, tmp
);
6245 tcg_gen_not_i32(tmp2
, tmp2
);
6249 store_reg_bx(env
, s
, rd
, tmp2
);
6252 if (op1
!= 0x0f && op1
!= 0x0d) {
6256 /* other instructions */
6257 op1
= (insn
>> 24) & 0xf;
6261 /* multiplies, extra load/stores */
6262 sh
= (insn
>> 5) & 3;
6265 rd
= (insn
>> 16) & 0xf;
6266 rn
= (insn
>> 12) & 0xf;
6267 rs
= (insn
>> 8) & 0xf;
6269 op1
= (insn
>> 20) & 0xf;
6271 case 0: case 1: case 2: case 3: case 6:
6273 tmp
= load_reg(s
, rs
);
6274 tmp2
= load_reg(s
, rm
);
6275 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
6277 if (insn
& (1 << 22)) {
6278 /* Subtract (mls) */
6280 tmp2
= load_reg(s
, rn
);
6281 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6283 } else if (insn
& (1 << 21)) {
6285 tmp2
= load_reg(s
, rn
);
6286 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6289 if (insn
& (1 << 20))
6291 store_reg(s
, rd
, tmp
);
6295 tmp
= load_reg(s
, rs
);
6296 tmp2
= load_reg(s
, rm
);
6297 if (insn
& (1 << 22))
6298 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6300 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6301 if (insn
& (1 << 21)) /* mult accumulate */
6302 gen_addq(s
, tmp64
, rn
, rd
);
6303 if (!(insn
& (1 << 23))) { /* double accumulate */
6305 gen_addq_lo(s
, tmp64
, rn
);
6306 gen_addq_lo(s
, tmp64
, rd
);
6308 if (insn
& (1 << 20))
6309 gen_logicq_cc(tmp64
);
6310 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6314 rn
= (insn
>> 16) & 0xf;
6315 rd
= (insn
>> 12) & 0xf;
6316 if (insn
& (1 << 23)) {
6317 /* load/store exclusive */
6318 op1
= (insn
>> 21) & 0x3;
6323 gen_movl_T1_reg(s
, rn
);
6325 if (insn
& (1 << 20)) {
6326 gen_helper_mark_exclusive(cpu_env
, cpu_T
[1]);
6329 tmp
= gen_ld32(addr
, IS_USER(s
));
6331 case 1: /* ldrexd */
6332 tmp
= gen_ld32(addr
, IS_USER(s
));
6333 store_reg(s
, rd
, tmp
);
6334 tcg_gen_addi_i32(addr
, addr
, 4);
6335 tmp
= gen_ld32(addr
, IS_USER(s
));
6338 case 2: /* ldrexb */
6339 tmp
= gen_ld8u(addr
, IS_USER(s
));
6341 case 3: /* ldrexh */
6342 tmp
= gen_ld16u(addr
, IS_USER(s
));
6347 store_reg(s
, rd
, tmp
);
6349 int label
= gen_new_label();
6351 gen_helper_test_exclusive(cpu_T
[0], cpu_env
, addr
);
6352 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_T
[0],
6354 tmp
= load_reg(s
,rm
);
6357 gen_st32(tmp
, addr
, IS_USER(s
));
6359 case 1: /* strexd */
6360 gen_st32(tmp
, addr
, IS_USER(s
));
6361 tcg_gen_addi_i32(addr
, addr
, 4);
6362 tmp
= load_reg(s
, rm
+ 1);
6363 gen_st32(tmp
, addr
, IS_USER(s
));
6365 case 2: /* strexb */
6366 gen_st8(tmp
, addr
, IS_USER(s
));
6368 case 3: /* strexh */
6369 gen_st16(tmp
, addr
, IS_USER(s
));
6374 gen_set_label(label
);
6375 gen_movl_reg_T0(s
, rd
);
6378 /* SWP instruction */
6381 /* ??? This is not really atomic. However we know
6382 we never have multiple CPUs running in parallel,
6383 so it is good enough. */
6384 addr
= load_reg(s
, rn
);
6385 tmp
= load_reg(s
, rm
);
6386 if (insn
& (1 << 22)) {
6387 tmp2
= gen_ld8u(addr
, IS_USER(s
));
6388 gen_st8(tmp
, addr
, IS_USER(s
));
6390 tmp2
= gen_ld32(addr
, IS_USER(s
));
6391 gen_st32(tmp
, addr
, IS_USER(s
));
6394 store_reg(s
, rd
, tmp2
);
6400 /* Misc load/store */
6401 rn
= (insn
>> 16) & 0xf;
6402 rd
= (insn
>> 12) & 0xf;
6403 addr
= load_reg(s
, rn
);
6404 if (insn
& (1 << 24))
6405 gen_add_datah_offset(s
, insn
, 0, addr
);
6407 if (insn
& (1 << 20)) {
6411 tmp
= gen_ld16u(addr
, IS_USER(s
));
6414 tmp
= gen_ld8s(addr
, IS_USER(s
));
6418 tmp
= gen_ld16s(addr
, IS_USER(s
));
6422 } else if (sh
& 2) {
6426 tmp
= load_reg(s
, rd
);
6427 gen_st32(tmp
, addr
, IS_USER(s
));
6428 tcg_gen_addi_i32(addr
, addr
, 4);
6429 tmp
= load_reg(s
, rd
+ 1);
6430 gen_st32(tmp
, addr
, IS_USER(s
));
6434 tmp
= gen_ld32(addr
, IS_USER(s
));
6435 store_reg(s
, rd
, tmp
);
6436 tcg_gen_addi_i32(addr
, addr
, 4);
6437 tmp
= gen_ld32(addr
, IS_USER(s
));
6441 address_offset
= -4;
6444 tmp
= load_reg(s
, rd
);
6445 gen_st16(tmp
, addr
, IS_USER(s
));
6448 /* Perform base writeback before the loaded value to
6449 ensure correct behavior with overlapping index registers.
6450 ldrd with base writeback is is undefined if the
6451 destination and index registers overlap. */
6452 if (!(insn
& (1 << 24))) {
6453 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
6454 store_reg(s
, rn
, addr
);
6455 } else if (insn
& (1 << 21)) {
6457 tcg_gen_addi_i32(addr
, addr
, address_offset
);
6458 store_reg(s
, rn
, addr
);
6463 /* Complete the load. */
6464 store_reg(s
, rd
, tmp
);
6473 if (insn
& (1 << 4)) {
6475 /* Armv6 Media instructions. */
6477 rn
= (insn
>> 16) & 0xf;
6478 rd
= (insn
>> 12) & 0xf;
6479 rs
= (insn
>> 8) & 0xf;
6480 switch ((insn
>> 23) & 3) {
6481 case 0: /* Parallel add/subtract. */
6482 op1
= (insn
>> 20) & 7;
6483 tmp
= load_reg(s
, rn
);
6484 tmp2
= load_reg(s
, rm
);
6485 sh
= (insn
>> 5) & 7;
6486 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
6488 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
6490 store_reg(s
, rd
, tmp
);
6493 if ((insn
& 0x00700020) == 0) {
6494 /* Halfword pack. */
6495 tmp
= load_reg(s
, rn
);
6496 tmp2
= load_reg(s
, rm
);
6497 shift
= (insn
>> 7) & 0x1f;
6498 if (insn
& (1 << 6)) {
6502 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
6503 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
6504 tcg_gen_ext16u_i32(tmp2
, tmp2
);
6508 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
6509 tcg_gen_ext16u_i32(tmp
, tmp
);
6510 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
6512 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6514 store_reg(s
, rd
, tmp
);
6515 } else if ((insn
& 0x00200020) == 0x00200000) {
6517 tmp
= load_reg(s
, rm
);
6518 shift
= (insn
>> 7) & 0x1f;
6519 if (insn
& (1 << 6)) {
6522 tcg_gen_sari_i32(tmp
, tmp
, shift
);
6524 tcg_gen_shli_i32(tmp
, tmp
, shift
);
6526 sh
= (insn
>> 16) & 0x1f;
6528 if (insn
& (1 << 22))
6529 gen_helper_usat(tmp
, tmp
, tcg_const_i32(sh
));
6531 gen_helper_ssat(tmp
, tmp
, tcg_const_i32(sh
));
6533 store_reg(s
, rd
, tmp
);
6534 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
6536 tmp
= load_reg(s
, rm
);
6537 sh
= (insn
>> 16) & 0x1f;
6539 if (insn
& (1 << 22))
6540 gen_helper_usat16(tmp
, tmp
, tcg_const_i32(sh
));
6542 gen_helper_ssat16(tmp
, tmp
, tcg_const_i32(sh
));
6544 store_reg(s
, rd
, tmp
);
6545 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
6547 tmp
= load_reg(s
, rn
);
6548 tmp2
= load_reg(s
, rm
);
6550 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
6551 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
6554 store_reg(s
, rd
, tmp
);
6555 } else if ((insn
& 0x000003e0) == 0x00000060) {
6556 tmp
= load_reg(s
, rm
);
6557 shift
= (insn
>> 10) & 3;
6558 /* ??? In many cases it's not neccessary to do a
6559 rotate, a shift is sufficient. */
6561 tcg_gen_rori_i32(tmp
, tmp
, shift
* 8);
6562 op1
= (insn
>> 20) & 7;
6564 case 0: gen_sxtb16(tmp
); break;
6565 case 2: gen_sxtb(tmp
); break;
6566 case 3: gen_sxth(tmp
); break;
6567 case 4: gen_uxtb16(tmp
); break;
6568 case 6: gen_uxtb(tmp
); break;
6569 case 7: gen_uxth(tmp
); break;
6570 default: goto illegal_op
;
6573 tmp2
= load_reg(s
, rn
);
6574 if ((op1
& 3) == 0) {
6575 gen_add16(tmp
, tmp2
);
6577 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6581 store_reg(s
, rd
, tmp
);
6582 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
6584 tmp
= load_reg(s
, rm
);
6585 if (insn
& (1 << 22)) {
6586 if (insn
& (1 << 7)) {
6590 gen_helper_rbit(tmp
, tmp
);
6593 if (insn
& (1 << 7))
6596 tcg_gen_bswap32_i32(tmp
, tmp
);
6598 store_reg(s
, rd
, tmp
);
6603 case 2: /* Multiplies (Type 3). */
6604 tmp
= load_reg(s
, rm
);
6605 tmp2
= load_reg(s
, rs
);
6606 if (insn
& (1 << 20)) {
6607 /* Signed multiply most significant [accumulate]. */
6608 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6609 if (insn
& (1 << 5))
6610 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
6611 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6613 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6615 tmp2
= load_reg(s
, rd
);
6616 if (insn
& (1 << 6)) {
6617 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6619 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6623 store_reg(s
, rn
, tmp
);
6625 if (insn
& (1 << 5))
6626 gen_swap_half(tmp2
);
6627 gen_smul_dual(tmp
, tmp2
);
6628 /* This addition cannot overflow. */
6629 if (insn
& (1 << 6)) {
6630 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6632 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6635 if (insn
& (1 << 22)) {
6636 /* smlald, smlsld */
6637 tmp64
= tcg_temp_new_i64();
6638 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6640 gen_addq(s
, tmp64
, rd
, rn
);
6641 gen_storeq_reg(s
, rd
, rn
, tmp64
);
6643 /* smuad, smusd, smlad, smlsd */
6646 tmp2
= load_reg(s
, rd
);
6647 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6650 store_reg(s
, rn
, tmp
);
6655 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
6657 case 0: /* Unsigned sum of absolute differences. */
6659 tmp
= load_reg(s
, rm
);
6660 tmp2
= load_reg(s
, rs
);
6661 gen_helper_usad8(tmp
, tmp
, tmp2
);
6664 tmp2
= load_reg(s
, rd
);
6665 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6668 store_reg(s
, rn
, tmp
);
6670 case 0x20: case 0x24: case 0x28: case 0x2c:
6671 /* Bitfield insert/clear. */
6673 shift
= (insn
>> 7) & 0x1f;
6674 i
= (insn
>> 16) & 0x1f;
6678 tcg_gen_movi_i32(tmp
, 0);
6680 tmp
= load_reg(s
, rm
);
6683 tmp2
= load_reg(s
, rd
);
6684 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
6687 store_reg(s
, rd
, tmp
);
6689 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6690 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6692 tmp
= load_reg(s
, rm
);
6693 shift
= (insn
>> 7) & 0x1f;
6694 i
= ((insn
>> 16) & 0x1f) + 1;
6699 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
6701 gen_sbfx(tmp
, shift
, i
);
6704 store_reg(s
, rd
, tmp
);
6714 /* Check for undefined extension instructions
6715 * per the ARM Bible IE:
6716 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6718 sh
= (0xf << 20) | (0xf << 4);
6719 if (op1
== 0x7 && ((insn
& sh
) == sh
))
6723 /* load/store byte/word */
6724 rn
= (insn
>> 16) & 0xf;
6725 rd
= (insn
>> 12) & 0xf;
6726 tmp2
= load_reg(s
, rn
);
6727 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
6728 if (insn
& (1 << 24))
6729 gen_add_data_offset(s
, insn
, tmp2
);
6730 if (insn
& (1 << 20)) {
6732 if (insn
& (1 << 22)) {
6733 tmp
= gen_ld8u(tmp2
, i
);
6735 tmp
= gen_ld32(tmp2
, i
);
6739 tmp
= load_reg(s
, rd
);
6740 if (insn
& (1 << 22))
6741 gen_st8(tmp
, tmp2
, i
);
6743 gen_st32(tmp
, tmp2
, i
);
6745 if (!(insn
& (1 << 24))) {
6746 gen_add_data_offset(s
, insn
, tmp2
);
6747 store_reg(s
, rn
, tmp2
);
6748 } else if (insn
& (1 << 21)) {
6749 store_reg(s
, rn
, tmp2
);
6753 if (insn
& (1 << 20)) {
6754 /* Complete the load. */
6758 store_reg(s
, rd
, tmp
);
6764 int j
, n
, user
, loaded_base
;
6766 /* load/store multiple words */
6767 /* XXX: store correct base if write back */
6769 if (insn
& (1 << 22)) {
6771 goto illegal_op
; /* only usable in supervisor mode */
6773 if ((insn
& (1 << 15)) == 0)
6776 rn
= (insn
>> 16) & 0xf;
6777 addr
= load_reg(s
, rn
);
6779 /* compute total size */
6781 TCGV_UNUSED(loaded_var
);
6784 if (insn
& (1 << i
))
6787 /* XXX: test invalid n == 0 case ? */
6788 if (insn
& (1 << 23)) {
6789 if (insn
& (1 << 24)) {
6791 tcg_gen_addi_i32(addr
, addr
, 4);
6793 /* post increment */
6796 if (insn
& (1 << 24)) {
6798 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
6800 /* post decrement */
6802 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
6807 if (insn
& (1 << i
)) {
6808 if (insn
& (1 << 20)) {
6810 tmp
= gen_ld32(addr
, IS_USER(s
));
6814 gen_helper_set_user_reg(tcg_const_i32(i
), tmp
);
6816 } else if (i
== rn
) {
6820 store_reg(s
, i
, tmp
);
6825 /* special case: r15 = PC + 8 */
6826 val
= (long)s
->pc
+ 4;
6828 tcg_gen_movi_i32(tmp
, val
);
6831 gen_helper_get_user_reg(tmp
, tcg_const_i32(i
));
6833 tmp
= load_reg(s
, i
);
6835 gen_st32(tmp
, addr
, IS_USER(s
));
6838 /* no need to add after the last transfer */
6840 tcg_gen_addi_i32(addr
, addr
, 4);
6843 if (insn
& (1 << 21)) {
6845 if (insn
& (1 << 23)) {
6846 if (insn
& (1 << 24)) {
6849 /* post increment */
6850 tcg_gen_addi_i32(addr
, addr
, 4);
6853 if (insn
& (1 << 24)) {
6856 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
6858 /* post decrement */
6859 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
6862 store_reg(s
, rn
, addr
);
6867 store_reg(s
, rn
, loaded_var
);
6869 if ((insn
& (1 << 22)) && !user
) {
6870 /* Restore CPSR from SPSR. */
6871 tmp
= load_cpu_field(spsr
);
6872 gen_set_cpsr(tmp
, 0xffffffff);
6874 s
->is_jmp
= DISAS_UPDATE
;
6883 /* branch (and link) */
6884 val
= (int32_t)s
->pc
;
6885 if (insn
& (1 << 24)) {
6887 tcg_gen_movi_i32(tmp
, val
);
6888 store_reg(s
, 14, tmp
);
6890 offset
= (((int32_t)insn
<< 8) >> 8);
6891 val
+= (offset
<< 2) + 4;
6899 if (disas_coproc_insn(env
, s
, insn
))
6904 gen_set_pc_im(s
->pc
);
6905 s
->is_jmp
= DISAS_SWI
;
6909 gen_set_condexec(s
);
6910 gen_set_pc_im(s
->pc
- 4);
6911 gen_exception(EXCP_UDEF
);
6912 s
->is_jmp
= DISAS_JUMP
;
6918 /* Return true if this is a Thumb-2 logical op. */
6920 thumb2_logic_op(int op
)
6925 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6926 then set condition code flags based on the result of the operation.
6927 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6928 to the high bit of T1.
6929 Returns zero if the opcode is valid. */
6932 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
)
6939 gen_op_andl_T0_T1();
6943 gen_op_bicl_T0_T1();
6956 gen_op_xorl_T0_T1();
6961 gen_op_addl_T0_T1_cc();
6963 gen_op_addl_T0_T1();
6967 gen_op_adcl_T0_T1_cc();
6973 gen_op_sbcl_T0_T1_cc();
6979 gen_op_subl_T0_T1_cc();
6981 gen_op_subl_T0_T1();
6985 gen_op_rsbl_T0_T1_cc();
6987 gen_op_rsbl_T0_T1();
6989 default: /* 5, 6, 7, 9, 12, 15. */
6993 gen_op_logic_T0_cc();
6995 gen_set_CF_bit31(cpu_T
[1]);
7000 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7002 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
7004 uint32_t insn
, imm
, shift
, offset
;
7005 uint32_t rd
, rn
, rm
, rs
;
7016 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
7017 || arm_feature (env
, ARM_FEATURE_M
))) {
7018 /* Thumb-1 cores may need to treat bl and blx as a pair of
7019 16-bit instructions to get correct prefetch abort behavior. */
7021 if ((insn
& (1 << 12)) == 0) {
7022 /* Second half of blx. */
7023 offset
= ((insn
& 0x7ff) << 1);
7024 tmp
= load_reg(s
, 14);
7025 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7026 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7029 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7030 store_reg(s
, 14, tmp2
);
7034 if (insn
& (1 << 11)) {
7035 /* Second half of bl. */
7036 offset
= ((insn
& 0x7ff) << 1) | 1;
7037 tmp
= load_reg(s
, 14);
7038 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7041 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7042 store_reg(s
, 14, tmp2
);
7046 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7047 /* Instruction spans a page boundary. Implement it as two
7048 16-bit instructions in case the second half causes an
7050 offset
= ((int32_t)insn
<< 21) >> 9;
7051 gen_op_movl_T0_im(s
->pc
+ 2 + offset
);
7052 gen_movl_reg_T0(s
, 14);
7055 /* Fall through to 32-bit decode. */
7058 insn
= lduw_code(s
->pc
);
7060 insn
|= (uint32_t)insn_hw1
<< 16;
7062 if ((insn
& 0xf800e800) != 0xf000e800) {
7066 rn
= (insn
>> 16) & 0xf;
7067 rs
= (insn
>> 12) & 0xf;
7068 rd
= (insn
>> 8) & 0xf;
7070 switch ((insn
>> 25) & 0xf) {
7071 case 0: case 1: case 2: case 3:
7072 /* 16-bit instructions. Should never happen. */
7075 if (insn
& (1 << 22)) {
7076 /* Other load/store, table branch. */
7077 if (insn
& 0x01200000) {
7078 /* Load/store doubleword. */
7081 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7083 addr
= load_reg(s
, rn
);
7085 offset
= (insn
& 0xff) * 4;
7086 if ((insn
& (1 << 23)) == 0)
7088 if (insn
& (1 << 24)) {
7089 tcg_gen_addi_i32(addr
, addr
, offset
);
7092 if (insn
& (1 << 20)) {
7094 tmp
= gen_ld32(addr
, IS_USER(s
));
7095 store_reg(s
, rs
, tmp
);
7096 tcg_gen_addi_i32(addr
, addr
, 4);
7097 tmp
= gen_ld32(addr
, IS_USER(s
));
7098 store_reg(s
, rd
, tmp
);
7101 tmp
= load_reg(s
, rs
);
7102 gen_st32(tmp
, addr
, IS_USER(s
));
7103 tcg_gen_addi_i32(addr
, addr
, 4);
7104 tmp
= load_reg(s
, rd
);
7105 gen_st32(tmp
, addr
, IS_USER(s
));
7107 if (insn
& (1 << 21)) {
7108 /* Base writeback. */
7111 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7112 store_reg(s
, rn
, addr
);
7116 } else if ((insn
& (1 << 23)) == 0) {
7117 /* Load/store exclusive word. */
7118 gen_movl_T1_reg(s
, rn
);
7120 if (insn
& (1 << 20)) {
7121 gen_helper_mark_exclusive(cpu_env
, cpu_T
[1]);
7122 tmp
= gen_ld32(addr
, IS_USER(s
));
7123 store_reg(s
, rd
, tmp
);
7125 int label
= gen_new_label();
7126 gen_helper_test_exclusive(cpu_T
[0], cpu_env
, addr
);
7127 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_T
[0],
7129 tmp
= load_reg(s
, rs
);
7130 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
7131 gen_set_label(label
);
7132 gen_movl_reg_T0(s
, rd
);
7134 } else if ((insn
& (1 << 6)) == 0) {
7138 tcg_gen_movi_i32(addr
, s
->pc
);
7140 addr
= load_reg(s
, rn
);
7142 tmp
= load_reg(s
, rm
);
7143 tcg_gen_add_i32(addr
, addr
, tmp
);
7144 if (insn
& (1 << 4)) {
7146 tcg_gen_add_i32(addr
, addr
, tmp
);
7148 tmp
= gen_ld16u(addr
, IS_USER(s
));
7151 tmp
= gen_ld8u(addr
, IS_USER(s
));
7154 tcg_gen_shli_i32(tmp
, tmp
, 1);
7155 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
7156 store_reg(s
, 15, tmp
);
7158 /* Load/store exclusive byte/halfword/doubleword. */
7159 /* ??? These are not really atomic. However we know
7160 we never have multiple CPUs running in parallel,
7161 so it is good enough. */
7162 op
= (insn
>> 4) & 0x3;
7163 /* Must use a global reg for the address because we have
7164 a conditional branch in the store instruction. */
7165 gen_movl_T1_reg(s
, rn
);
7167 if (insn
& (1 << 20)) {
7168 gen_helper_mark_exclusive(cpu_env
, addr
);
7171 tmp
= gen_ld8u(addr
, IS_USER(s
));
7174 tmp
= gen_ld16u(addr
, IS_USER(s
));
7177 tmp
= gen_ld32(addr
, IS_USER(s
));
7178 tcg_gen_addi_i32(addr
, addr
, 4);
7179 tmp2
= gen_ld32(addr
, IS_USER(s
));
7180 store_reg(s
, rd
, tmp2
);
7185 store_reg(s
, rs
, tmp
);
7187 int label
= gen_new_label();
7188 /* Must use a global that is not killed by the branch. */
7189 gen_helper_test_exclusive(cpu_T
[0], cpu_env
, addr
);
7190 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_T
[0], 0, label
);
7191 tmp
= load_reg(s
, rs
);
7194 gen_st8(tmp
, addr
, IS_USER(s
));
7197 gen_st16(tmp
, addr
, IS_USER(s
));
7200 gen_st32(tmp
, addr
, IS_USER(s
));
7201 tcg_gen_addi_i32(addr
, addr
, 4);
7202 tmp
= load_reg(s
, rd
);
7203 gen_st32(tmp
, addr
, IS_USER(s
));
7208 gen_set_label(label
);
7209 gen_movl_reg_T0(s
, rm
);
7213 /* Load/store multiple, RFE, SRS. */
7214 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
7215 /* Not available in user mode. */
7218 if (insn
& (1 << 20)) {
7220 addr
= load_reg(s
, rn
);
7221 if ((insn
& (1 << 24)) == 0)
7222 tcg_gen_addi_i32(addr
, addr
, -8);
7223 /* Load PC into tmp and CPSR into tmp2. */
7224 tmp
= gen_ld32(addr
, 0);
7225 tcg_gen_addi_i32(addr
, addr
, 4);
7226 tmp2
= gen_ld32(addr
, 0);
7227 if (insn
& (1 << 21)) {
7228 /* Base writeback. */
7229 if (insn
& (1 << 24)) {
7230 tcg_gen_addi_i32(addr
, addr
, 4);
7232 tcg_gen_addi_i32(addr
, addr
, -4);
7234 store_reg(s
, rn
, addr
);
7238 gen_rfe(s
, tmp
, tmp2
);
7242 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7243 addr
= load_reg(s
, 13);
7246 gen_helper_get_r13_banked(addr
, cpu_env
, tcg_const_i32(op
));
7248 if ((insn
& (1 << 24)) == 0) {
7249 tcg_gen_addi_i32(addr
, addr
, -8);
7251 tmp
= load_reg(s
, 14);
7252 gen_st32(tmp
, addr
, 0);
7253 tcg_gen_addi_i32(addr
, addr
, 4);
7255 gen_helper_cpsr_read(tmp
);
7256 gen_st32(tmp
, addr
, 0);
7257 if (insn
& (1 << 21)) {
7258 if ((insn
& (1 << 24)) == 0) {
7259 tcg_gen_addi_i32(addr
, addr
, -4);
7261 tcg_gen_addi_i32(addr
, addr
, 4);
7263 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7264 store_reg(s
, 13, addr
);
7266 gen_helper_set_r13_banked(cpu_env
,
7267 tcg_const_i32(op
), addr
);
7275 /* Load/store multiple. */
7276 addr
= load_reg(s
, rn
);
7278 for (i
= 0; i
< 16; i
++) {
7279 if (insn
& (1 << i
))
7282 if (insn
& (1 << 24)) {
7283 tcg_gen_addi_i32(addr
, addr
, -offset
);
7286 for (i
= 0; i
< 16; i
++) {
7287 if ((insn
& (1 << i
)) == 0)
7289 if (insn
& (1 << 20)) {
7291 tmp
= gen_ld32(addr
, IS_USER(s
));
7295 store_reg(s
, i
, tmp
);
7299 tmp
= load_reg(s
, i
);
7300 gen_st32(tmp
, addr
, IS_USER(s
));
7302 tcg_gen_addi_i32(addr
, addr
, 4);
7304 if (insn
& (1 << 21)) {
7305 /* Base register writeback. */
7306 if (insn
& (1 << 24)) {
7307 tcg_gen_addi_i32(addr
, addr
, -offset
);
7309 /* Fault if writeback register is in register list. */
7310 if (insn
& (1 << rn
))
7312 store_reg(s
, rn
, addr
);
7319 case 5: /* Data processing register constant shift. */
7321 gen_op_movl_T0_im(0);
7323 gen_movl_T0_reg(s
, rn
);
7324 gen_movl_T1_reg(s
, rm
);
7325 op
= (insn
>> 21) & 0xf;
7326 shiftop
= (insn
>> 4) & 3;
7327 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7328 conds
= (insn
& (1 << 20)) != 0;
7329 logic_cc
= (conds
&& thumb2_logic_op(op
));
7330 gen_arm_shift_im(cpu_T
[1], shiftop
, shift
, logic_cc
);
7331 if (gen_thumb2_data_op(s
, op
, conds
, 0))
7334 gen_movl_reg_T0(s
, rd
);
7336 case 13: /* Misc data processing. */
7337 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
7338 if (op
< 4 && (insn
& 0xf000) != 0xf000)
7341 case 0: /* Register controlled shift. */
7342 tmp
= load_reg(s
, rn
);
7343 tmp2
= load_reg(s
, rm
);
7344 if ((insn
& 0x70) != 0)
7346 op
= (insn
>> 21) & 3;
7347 logic_cc
= (insn
& (1 << 20)) != 0;
7348 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
7351 store_reg_bx(env
, s
, rd
, tmp
);
7353 case 1: /* Sign/zero extend. */
7354 tmp
= load_reg(s
, rm
);
7355 shift
= (insn
>> 4) & 3;
7356 /* ??? In many cases it's not neccessary to do a
7357 rotate, a shift is sufficient. */
7359 tcg_gen_rori_i32(tmp
, tmp
, shift
* 8);
7360 op
= (insn
>> 20) & 7;
7362 case 0: gen_sxth(tmp
); break;
7363 case 1: gen_uxth(tmp
); break;
7364 case 2: gen_sxtb16(tmp
); break;
7365 case 3: gen_uxtb16(tmp
); break;
7366 case 4: gen_sxtb(tmp
); break;
7367 case 5: gen_uxtb(tmp
); break;
7368 default: goto illegal_op
;
7371 tmp2
= load_reg(s
, rn
);
7372 if ((op
>> 1) == 1) {
7373 gen_add16(tmp
, tmp2
);
7375 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7379 store_reg(s
, rd
, tmp
);
7381 case 2: /* SIMD add/subtract. */
7382 op
= (insn
>> 20) & 7;
7383 shift
= (insn
>> 4) & 7;
7384 if ((op
& 3) == 3 || (shift
& 3) == 3)
7386 tmp
= load_reg(s
, rn
);
7387 tmp2
= load_reg(s
, rm
);
7388 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
7390 store_reg(s
, rd
, tmp
);
7392 case 3: /* Other data processing. */
7393 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
7395 /* Saturating add/subtract. */
7396 tmp
= load_reg(s
, rn
);
7397 tmp2
= load_reg(s
, rm
);
7399 gen_helper_double_saturate(tmp
, tmp
);
7401 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
7403 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
7406 tmp
= load_reg(s
, rn
);
7408 case 0x0a: /* rbit */
7409 gen_helper_rbit(tmp
, tmp
);
7411 case 0x08: /* rev */
7412 tcg_gen_bswap32_i32(tmp
, tmp
);
7414 case 0x09: /* rev16 */
7417 case 0x0b: /* revsh */
7420 case 0x10: /* sel */
7421 tmp2
= load_reg(s
, rm
);
7423 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7424 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7428 case 0x18: /* clz */
7429 gen_helper_clz(tmp
, tmp
);
7435 store_reg(s
, rd
, tmp
);
7437 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7438 op
= (insn
>> 4) & 0xf;
7439 tmp
= load_reg(s
, rn
);
7440 tmp2
= load_reg(s
, rm
);
7441 switch ((insn
>> 20) & 7) {
7442 case 0: /* 32 x 32 -> 32 */
7443 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7446 tmp2
= load_reg(s
, rs
);
7448 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7450 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7454 case 1: /* 16 x 16 -> 32 */
7455 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7458 tmp2
= load_reg(s
, rs
);
7459 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7463 case 2: /* Dual multiply add. */
7464 case 4: /* Dual multiply subtract. */
7466 gen_swap_half(tmp2
);
7467 gen_smul_dual(tmp
, tmp2
);
7468 /* This addition cannot overflow. */
7469 if (insn
& (1 << 22)) {
7470 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7472 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7477 tmp2
= load_reg(s
, rs
);
7478 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7482 case 3: /* 32 * 16 -> 32msb */
7484 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7487 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7488 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7490 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7493 tmp2
= load_reg(s
, rs
);
7494 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7498 case 5: case 6: /* 32 * 32 -> 32msb */
7499 gen_imull(tmp
, tmp2
);
7500 if (insn
& (1 << 5)) {
7501 gen_roundqd(tmp
, tmp2
);
7508 tmp2
= load_reg(s
, rs
);
7509 if (insn
& (1 << 21)) {
7510 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7512 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7517 case 7: /* Unsigned sum of absolute differences. */
7518 gen_helper_usad8(tmp
, tmp
, tmp2
);
7521 tmp2
= load_reg(s
, rs
);
7522 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7527 store_reg(s
, rd
, tmp
);
7529 case 6: case 7: /* 64-bit multiply, Divide. */
7530 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
7531 tmp
= load_reg(s
, rn
);
7532 tmp2
= load_reg(s
, rm
);
7533 if ((op
& 0x50) == 0x10) {
7535 if (!arm_feature(env
, ARM_FEATURE_DIV
))
7538 gen_helper_udiv(tmp
, tmp
, tmp2
);
7540 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7542 store_reg(s
, rd
, tmp
);
7543 } else if ((op
& 0xe) == 0xc) {
7544 /* Dual multiply accumulate long. */
7546 gen_swap_half(tmp2
);
7547 gen_smul_dual(tmp
, tmp2
);
7549 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7551 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7555 tmp64
= tcg_temp_new_i64();
7556 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7558 gen_addq(s
, tmp64
, rs
, rd
);
7559 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7562 /* Unsigned 64-bit multiply */
7563 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7567 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7569 tmp64
= tcg_temp_new_i64();
7570 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7573 /* Signed 64-bit multiply */
7574 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7579 gen_addq_lo(s
, tmp64
, rs
);
7580 gen_addq_lo(s
, tmp64
, rd
);
7581 } else if (op
& 0x40) {
7582 /* 64-bit accumulate. */
7583 gen_addq(s
, tmp64
, rs
, rd
);
7585 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7590 case 6: case 7: case 14: case 15:
7592 if (((insn
>> 24) & 3) == 3) {
7593 /* Translate into the equivalent ARM encoding. */
7594 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4);
7595 if (disas_neon_data_insn(env
, s
, insn
))
7598 if (insn
& (1 << 28))
7600 if (disas_coproc_insn (env
, s
, insn
))
7604 case 8: case 9: case 10: case 11:
7605 if (insn
& (1 << 15)) {
7606 /* Branches, misc control. */
7607 if (insn
& 0x5000) {
7608 /* Unconditional branch. */
7609 /* signextend(hw1[10:0]) -> offset[:12]. */
7610 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
7611 /* hw1[10:0] -> offset[11:1]. */
7612 offset
|= (insn
& 0x7ff) << 1;
7613 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7614 offset[24:22] already have the same value because of the
7615 sign extension above. */
7616 offset
^= ((~insn
) & (1 << 13)) << 10;
7617 offset
^= ((~insn
) & (1 << 11)) << 11;
7619 if (insn
& (1 << 14)) {
7620 /* Branch and link. */
7621 gen_op_movl_T1_im(s
->pc
| 1);
7622 gen_movl_reg_T1(s
, 14);
7626 if (insn
& (1 << 12)) {
7631 offset
&= ~(uint32_t)2;
7632 gen_bx_im(s
, offset
);
7634 } else if (((insn
>> 23) & 7) == 7) {
7636 if (insn
& (1 << 13))
7639 if (insn
& (1 << 26)) {
7640 /* Secure monitor call (v6Z) */
7641 goto illegal_op
; /* not implemented. */
7643 op
= (insn
>> 20) & 7;
7645 case 0: /* msr cpsr. */
7647 tmp
= load_reg(s
, rn
);
7648 addr
= tcg_const_i32(insn
& 0xff);
7649 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
7654 case 1: /* msr spsr. */
7657 tmp
= load_reg(s
, rn
);
7659 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
7663 case 2: /* cps, nop-hint. */
7664 if (((insn
>> 8) & 7) == 0) {
7665 gen_nop_hint(s
, insn
& 0xff);
7667 /* Implemented as NOP in user mode. */
7672 if (insn
& (1 << 10)) {
7673 if (insn
& (1 << 7))
7675 if (insn
& (1 << 6))
7677 if (insn
& (1 << 5))
7679 if (insn
& (1 << 9))
7680 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
7682 if (insn
& (1 << 8)) {
7684 imm
|= (insn
& 0x1f);
7687 gen_set_psr_im(s
, offset
, 0, imm
);
7690 case 3: /* Special control operations. */
7691 op
= (insn
>> 4) & 0xf;
7694 gen_helper_clrex(cpu_env
);
7699 /* These execute as NOPs. */
7707 /* Trivial implementation equivalent to bx. */
7708 tmp
= load_reg(s
, rn
);
7711 case 5: /* Exception return. */
7712 /* Unpredictable in user mode. */
7714 case 6: /* mrs cpsr. */
7717 addr
= tcg_const_i32(insn
& 0xff);
7718 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
7720 gen_helper_cpsr_read(tmp
);
7722 store_reg(s
, rd
, tmp
);
7724 case 7: /* mrs spsr. */
7725 /* Not accessible in user mode. */
7726 if (IS_USER(s
) || IS_M(env
))
7728 tmp
= load_cpu_field(spsr
);
7729 store_reg(s
, rd
, tmp
);
7734 /* Conditional branch. */
7735 op
= (insn
>> 22) & 0xf;
7736 /* Generate a conditional jump to next instruction. */
7737 s
->condlabel
= gen_new_label();
7738 gen_test_cc(op
^ 1, s
->condlabel
);
7741 /* offset[11:1] = insn[10:0] */
7742 offset
= (insn
& 0x7ff) << 1;
7743 /* offset[17:12] = insn[21:16]. */
7744 offset
|= (insn
& 0x003f0000) >> 4;
7745 /* offset[31:20] = insn[26]. */
7746 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
7747 /* offset[18] = insn[13]. */
7748 offset
|= (insn
& (1 << 13)) << 5;
7749 /* offset[19] = insn[11]. */
7750 offset
|= (insn
& (1 << 11)) << 8;
7752 /* jump to the offset */
7753 gen_jmp(s
, s
->pc
+ offset
);
7756 /* Data processing immediate. */
7757 if (insn
& (1 << 25)) {
7758 if (insn
& (1 << 24)) {
7759 if (insn
& (1 << 20))
7761 /* Bitfield/Saturate. */
7762 op
= (insn
>> 21) & 7;
7764 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7767 tcg_gen_movi_i32(tmp
, 0);
7769 tmp
= load_reg(s
, rn
);
7772 case 2: /* Signed bitfield extract. */
7774 if (shift
+ imm
> 32)
7777 gen_sbfx(tmp
, shift
, imm
);
7779 case 6: /* Unsigned bitfield extract. */
7781 if (shift
+ imm
> 32)
7784 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
7786 case 3: /* Bitfield insert/clear. */
7789 imm
= imm
+ 1 - shift
;
7791 tmp2
= load_reg(s
, rd
);
7792 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
7798 default: /* Saturate. */
7801 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7803 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7805 tmp2
= tcg_const_i32(imm
);
7808 if ((op
& 1) && shift
== 0)
7809 gen_helper_usat16(tmp
, tmp
, tmp2
);
7811 gen_helper_usat(tmp
, tmp
, tmp2
);
7814 if ((op
& 1) && shift
== 0)
7815 gen_helper_ssat16(tmp
, tmp
, tmp2
);
7817 gen_helper_ssat(tmp
, tmp
, tmp2
);
7821 store_reg(s
, rd
, tmp
);
7823 imm
= ((insn
& 0x04000000) >> 15)
7824 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
7825 if (insn
& (1 << 22)) {
7826 /* 16-bit immediate. */
7827 imm
|= (insn
>> 4) & 0xf000;
7828 if (insn
& (1 << 23)) {
7830 tmp
= load_reg(s
, rd
);
7831 tcg_gen_ext16u_i32(tmp
, tmp
);
7832 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
7836 tcg_gen_movi_i32(tmp
, imm
);
7839 /* Add/sub 12-bit immediate. */
7841 offset
= s
->pc
& ~(uint32_t)3;
7842 if (insn
& (1 << 23))
7847 tcg_gen_movi_i32(tmp
, offset
);
7849 tmp
= load_reg(s
, rn
);
7850 if (insn
& (1 << 23))
7851 tcg_gen_subi_i32(tmp
, tmp
, imm
);
7853 tcg_gen_addi_i32(tmp
, tmp
, imm
);
7856 store_reg(s
, rd
, tmp
);
7859 int shifter_out
= 0;
7860 /* modified 12-bit immediate. */
7861 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
7862 imm
= (insn
& 0xff);
7865 /* Nothing to do. */
7867 case 1: /* 00XY00XY */
7870 case 2: /* XY00XY00 */
7874 case 3: /* XYXYXYXY */
7878 default: /* Rotated constant. */
7879 shift
= (shift
<< 1) | (imm
>> 7);
7881 imm
= imm
<< (32 - shift
);
7885 gen_op_movl_T1_im(imm
);
7886 rn
= (insn
>> 16) & 0xf;
7888 gen_op_movl_T0_im(0);
7890 gen_movl_T0_reg(s
, rn
);
7891 op
= (insn
>> 21) & 0xf;
7892 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
7895 rd
= (insn
>> 8) & 0xf;
7897 gen_movl_reg_T0(s
, rd
);
7902 case 12: /* Load/store single data item. */
7907 if ((insn
& 0x01100000) == 0x01000000) {
7908 if (disas_neon_ls_insn(env
, s
, insn
))
7916 /* s->pc has already been incremented by 4. */
7917 imm
= s
->pc
& 0xfffffffc;
7918 if (insn
& (1 << 23))
7919 imm
+= insn
& 0xfff;
7921 imm
-= insn
& 0xfff;
7922 tcg_gen_movi_i32(addr
, imm
);
7924 addr
= load_reg(s
, rn
);
7925 if (insn
& (1 << 23)) {
7926 /* Positive offset. */
7928 tcg_gen_addi_i32(addr
, addr
, imm
);
7930 op
= (insn
>> 8) & 7;
7933 case 0: case 8: /* Shifted Register. */
7934 shift
= (insn
>> 4) & 0xf;
7937 tmp
= load_reg(s
, rm
);
7939 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7940 tcg_gen_add_i32(addr
, addr
, tmp
);
7943 case 4: /* Negative offset. */
7944 tcg_gen_addi_i32(addr
, addr
, -imm
);
7946 case 6: /* User privilege. */
7947 tcg_gen_addi_i32(addr
, addr
, imm
);
7950 case 1: /* Post-decrement. */
7953 case 3: /* Post-increment. */
7957 case 5: /* Pre-decrement. */
7960 case 7: /* Pre-increment. */
7961 tcg_gen_addi_i32(addr
, addr
, imm
);
7969 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
7970 if (insn
& (1 << 20)) {
7972 if (rs
== 15 && op
!= 2) {
7975 /* Memory hint. Implemented as NOP. */
7978 case 0: tmp
= gen_ld8u(addr
, user
); break;
7979 case 4: tmp
= gen_ld8s(addr
, user
); break;
7980 case 1: tmp
= gen_ld16u(addr
, user
); break;
7981 case 5: tmp
= gen_ld16s(addr
, user
); break;
7982 case 2: tmp
= gen_ld32(addr
, user
); break;
7983 default: goto illegal_op
;
7988 store_reg(s
, rs
, tmp
);
7995 tmp
= load_reg(s
, rs
);
7997 case 0: gen_st8(tmp
, addr
, user
); break;
7998 case 1: gen_st16(tmp
, addr
, user
); break;
7999 case 2: gen_st32(tmp
, addr
, user
); break;
8000 default: goto illegal_op
;
8004 tcg_gen_addi_i32(addr
, addr
, imm
);
8006 store_reg(s
, rn
, addr
);
8020 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
8022 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8029 if (s
->condexec_mask
) {
8030 cond
= s
->condexec_cond
;
8031 s
->condlabel
= gen_new_label();
8032 gen_test_cc(cond
^ 1, s
->condlabel
);
8036 insn
= lduw_code(s
->pc
);
8039 switch (insn
>> 12) {
8042 op
= (insn
>> 11) & 3;
8045 rn
= (insn
>> 3) & 7;
8046 gen_movl_T0_reg(s
, rn
);
8047 if (insn
& (1 << 10)) {
8049 gen_op_movl_T1_im((insn
>> 6) & 7);
8052 rm
= (insn
>> 6) & 7;
8053 gen_movl_T1_reg(s
, rm
);
8055 if (insn
& (1 << 9)) {
8056 if (s
->condexec_mask
)
8057 gen_op_subl_T0_T1();
8059 gen_op_subl_T0_T1_cc();
8061 if (s
->condexec_mask
)
8062 gen_op_addl_T0_T1();
8064 gen_op_addl_T0_T1_cc();
8066 gen_movl_reg_T0(s
, rd
);
8068 /* shift immediate */
8069 rm
= (insn
>> 3) & 7;
8070 shift
= (insn
>> 6) & 0x1f;
8071 tmp
= load_reg(s
, rm
);
8072 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
8073 if (!s
->condexec_mask
)
8075 store_reg(s
, rd
, tmp
);
8079 /* arithmetic large immediate */
8080 op
= (insn
>> 11) & 3;
8081 rd
= (insn
>> 8) & 0x7;
8083 gen_op_movl_T0_im(insn
& 0xff);
8085 gen_movl_T0_reg(s
, rd
);
8086 gen_op_movl_T1_im(insn
& 0xff);
8090 if (!s
->condexec_mask
)
8091 gen_op_logic_T0_cc();
8094 gen_op_subl_T0_T1_cc();
8097 if (s
->condexec_mask
)
8098 gen_op_addl_T0_T1();
8100 gen_op_addl_T0_T1_cc();
8103 if (s
->condexec_mask
)
8104 gen_op_subl_T0_T1();
8106 gen_op_subl_T0_T1_cc();
8110 gen_movl_reg_T0(s
, rd
);
8113 if (insn
& (1 << 11)) {
8114 rd
= (insn
>> 8) & 7;
8115 /* load pc-relative. Bit 1 of PC is ignored. */
8116 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
8117 val
&= ~(uint32_t)2;
8119 tcg_gen_movi_i32(addr
, val
);
8120 tmp
= gen_ld32(addr
, IS_USER(s
));
8122 store_reg(s
, rd
, tmp
);
8125 if (insn
& (1 << 10)) {
8126 /* data processing extended or blx */
8127 rd
= (insn
& 7) | ((insn
>> 4) & 8);
8128 rm
= (insn
>> 3) & 0xf;
8129 op
= (insn
>> 8) & 3;
8132 gen_movl_T0_reg(s
, rd
);
8133 gen_movl_T1_reg(s
, rm
);
8134 gen_op_addl_T0_T1();
8135 gen_movl_reg_T0(s
, rd
);
8138 gen_movl_T0_reg(s
, rd
);
8139 gen_movl_T1_reg(s
, rm
);
8140 gen_op_subl_T0_T1_cc();
8142 case 2: /* mov/cpy */
8143 gen_movl_T0_reg(s
, rm
);
8144 gen_movl_reg_T0(s
, rd
);
8146 case 3:/* branch [and link] exchange thumb register */
8147 tmp
= load_reg(s
, rm
);
8148 if (insn
& (1 << 7)) {
8149 val
= (uint32_t)s
->pc
| 1;
8151 tcg_gen_movi_i32(tmp2
, val
);
8152 store_reg(s
, 14, tmp2
);
8160 /* data processing register */
8162 rm
= (insn
>> 3) & 7;
8163 op
= (insn
>> 6) & 0xf;
8164 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
8165 /* the shift/rotate ops want the operands backwards */
8174 if (op
== 9) /* neg */
8175 gen_op_movl_T0_im(0);
8176 else if (op
!= 0xf) /* mvn doesn't read its first operand */
8177 gen_movl_T0_reg(s
, rd
);
8179 gen_movl_T1_reg(s
, rm
);
8182 gen_op_andl_T0_T1();
8183 if (!s
->condexec_mask
)
8184 gen_op_logic_T0_cc();
8187 gen_op_xorl_T0_T1();
8188 if (!s
->condexec_mask
)
8189 gen_op_logic_T0_cc();
8192 if (s
->condexec_mask
) {
8193 gen_helper_shl(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8195 gen_helper_shl_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8196 gen_op_logic_T1_cc();
8200 if (s
->condexec_mask
) {
8201 gen_helper_shr(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8203 gen_helper_shr_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8204 gen_op_logic_T1_cc();
8208 if (s
->condexec_mask
) {
8209 gen_helper_sar(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8211 gen_helper_sar_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8212 gen_op_logic_T1_cc();
8216 if (s
->condexec_mask
)
8219 gen_op_adcl_T0_T1_cc();
8222 if (s
->condexec_mask
)
8225 gen_op_sbcl_T0_T1_cc();
8228 if (s
->condexec_mask
) {
8229 gen_helper_ror(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8231 gen_helper_ror_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8232 gen_op_logic_T1_cc();
8236 gen_op_andl_T0_T1();
8237 gen_op_logic_T0_cc();
8241 if (s
->condexec_mask
)
8242 tcg_gen_neg_i32(cpu_T
[0], cpu_T
[1]);
8244 gen_op_subl_T0_T1_cc();
8247 gen_op_subl_T0_T1_cc();
8251 gen_op_addl_T0_T1_cc();
8256 if (!s
->condexec_mask
)
8257 gen_op_logic_T0_cc();
8260 gen_op_mull_T0_T1();
8261 if (!s
->condexec_mask
)
8262 gen_op_logic_T0_cc();
8265 gen_op_bicl_T0_T1();
8266 if (!s
->condexec_mask
)
8267 gen_op_logic_T0_cc();
8271 if (!s
->condexec_mask
)
8272 gen_op_logic_T1_cc();
8279 gen_movl_reg_T1(s
, rm
);
8281 gen_movl_reg_T0(s
, rd
);
8286 /* load/store register offset. */
8288 rn
= (insn
>> 3) & 7;
8289 rm
= (insn
>> 6) & 7;
8290 op
= (insn
>> 9) & 7;
8291 addr
= load_reg(s
, rn
);
8292 tmp
= load_reg(s
, rm
);
8293 tcg_gen_add_i32(addr
, addr
, tmp
);
8296 if (op
< 3) /* store */
8297 tmp
= load_reg(s
, rd
);
8301 gen_st32(tmp
, addr
, IS_USER(s
));
8304 gen_st16(tmp
, addr
, IS_USER(s
));
8307 gen_st8(tmp
, addr
, IS_USER(s
));
8310 tmp
= gen_ld8s(addr
, IS_USER(s
));
8313 tmp
= gen_ld32(addr
, IS_USER(s
));
8316 tmp
= gen_ld16u(addr
, IS_USER(s
));
8319 tmp
= gen_ld8u(addr
, IS_USER(s
));
8322 tmp
= gen_ld16s(addr
, IS_USER(s
));
8325 if (op
>= 3) /* load */
8326 store_reg(s
, rd
, tmp
);
8331 /* load/store word immediate offset */
8333 rn
= (insn
>> 3) & 7;
8334 addr
= load_reg(s
, rn
);
8335 val
= (insn
>> 4) & 0x7c;
8336 tcg_gen_addi_i32(addr
, addr
, val
);
8338 if (insn
& (1 << 11)) {
8340 tmp
= gen_ld32(addr
, IS_USER(s
));
8341 store_reg(s
, rd
, tmp
);
8344 tmp
= load_reg(s
, rd
);
8345 gen_st32(tmp
, addr
, IS_USER(s
));
8351 /* load/store byte immediate offset */
8353 rn
= (insn
>> 3) & 7;
8354 addr
= load_reg(s
, rn
);
8355 val
= (insn
>> 6) & 0x1f;
8356 tcg_gen_addi_i32(addr
, addr
, val
);
8358 if (insn
& (1 << 11)) {
8360 tmp
= gen_ld8u(addr
, IS_USER(s
));
8361 store_reg(s
, rd
, tmp
);
8364 tmp
= load_reg(s
, rd
);
8365 gen_st8(tmp
, addr
, IS_USER(s
));
8371 /* load/store halfword immediate offset */
8373 rn
= (insn
>> 3) & 7;
8374 addr
= load_reg(s
, rn
);
8375 val
= (insn
>> 5) & 0x3e;
8376 tcg_gen_addi_i32(addr
, addr
, val
);
8378 if (insn
& (1 << 11)) {
8380 tmp
= gen_ld16u(addr
, IS_USER(s
));
8381 store_reg(s
, rd
, tmp
);
8384 tmp
= load_reg(s
, rd
);
8385 gen_st16(tmp
, addr
, IS_USER(s
));
8391 /* load/store from stack */
8392 rd
= (insn
>> 8) & 7;
8393 addr
= load_reg(s
, 13);
8394 val
= (insn
& 0xff) * 4;
8395 tcg_gen_addi_i32(addr
, addr
, val
);
8397 if (insn
& (1 << 11)) {
8399 tmp
= gen_ld32(addr
, IS_USER(s
));
8400 store_reg(s
, rd
, tmp
);
8403 tmp
= load_reg(s
, rd
);
8404 gen_st32(tmp
, addr
, IS_USER(s
));
8410 /* add to high reg */
8411 rd
= (insn
>> 8) & 7;
8412 if (insn
& (1 << 11)) {
8414 tmp
= load_reg(s
, 13);
8416 /* PC. bit 1 is ignored. */
8418 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
8420 val
= (insn
& 0xff) * 4;
8421 tcg_gen_addi_i32(tmp
, tmp
, val
);
8422 store_reg(s
, rd
, tmp
);
8427 op
= (insn
>> 8) & 0xf;
8430 /* adjust stack pointer */
8431 tmp
= load_reg(s
, 13);
8432 val
= (insn
& 0x7f) * 4;
8433 if (insn
& (1 << 7))
8434 val
= -(int32_t)val
;
8435 tcg_gen_addi_i32(tmp
, tmp
, val
);
8436 store_reg(s
, 13, tmp
);
8439 case 2: /* sign/zero extend. */
8442 rm
= (insn
>> 3) & 7;
8443 tmp
= load_reg(s
, rm
);
8444 switch ((insn
>> 6) & 3) {
8445 case 0: gen_sxth(tmp
); break;
8446 case 1: gen_sxtb(tmp
); break;
8447 case 2: gen_uxth(tmp
); break;
8448 case 3: gen_uxtb(tmp
); break;
8450 store_reg(s
, rd
, tmp
);
8452 case 4: case 5: case 0xc: case 0xd:
8454 addr
= load_reg(s
, 13);
8455 if (insn
& (1 << 8))
8459 for (i
= 0; i
< 8; i
++) {
8460 if (insn
& (1 << i
))
8463 if ((insn
& (1 << 11)) == 0) {
8464 tcg_gen_addi_i32(addr
, addr
, -offset
);
8466 for (i
= 0; i
< 8; i
++) {
8467 if (insn
& (1 << i
)) {
8468 if (insn
& (1 << 11)) {
8470 tmp
= gen_ld32(addr
, IS_USER(s
));
8471 store_reg(s
, i
, tmp
);
8474 tmp
= load_reg(s
, i
);
8475 gen_st32(tmp
, addr
, IS_USER(s
));
8477 /* advance to the next address. */
8478 tcg_gen_addi_i32(addr
, addr
, 4);
8482 if (insn
& (1 << 8)) {
8483 if (insn
& (1 << 11)) {
8485 tmp
= gen_ld32(addr
, IS_USER(s
));
8486 /* don't set the pc until the rest of the instruction
8490 tmp
= load_reg(s
, 14);
8491 gen_st32(tmp
, addr
, IS_USER(s
));
8493 tcg_gen_addi_i32(addr
, addr
, 4);
8495 if ((insn
& (1 << 11)) == 0) {
8496 tcg_gen_addi_i32(addr
, addr
, -offset
);
8498 /* write back the new stack pointer */
8499 store_reg(s
, 13, addr
);
8500 /* set the new PC value */
8501 if ((insn
& 0x0900) == 0x0900)
8505 case 1: case 3: case 9: case 11: /* czb */
8507 tmp
= load_reg(s
, rm
);
8508 s
->condlabel
= gen_new_label();
8510 if (insn
& (1 << 11))
8511 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
8513 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
8515 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
8516 val
= (uint32_t)s
->pc
+ 2;
8521 case 15: /* IT, nop-hint. */
8522 if ((insn
& 0xf) == 0) {
8523 gen_nop_hint(s
, (insn
>> 4) & 0xf);
8527 s
->condexec_cond
= (insn
>> 4) & 0xe;
8528 s
->condexec_mask
= insn
& 0x1f;
8529 /* No actual code generated for this insn, just setup state. */
8532 case 0xe: /* bkpt */
8533 gen_set_condexec(s
);
8534 gen_set_pc_im(s
->pc
- 2);
8535 gen_exception(EXCP_BKPT
);
8536 s
->is_jmp
= DISAS_JUMP
;
8541 rn
= (insn
>> 3) & 0x7;
8543 tmp
= load_reg(s
, rn
);
8544 switch ((insn
>> 6) & 3) {
8545 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
8546 case 1: gen_rev16(tmp
); break;
8547 case 3: gen_revsh(tmp
); break;
8548 default: goto illegal_op
;
8550 store_reg(s
, rd
, tmp
);
8558 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
8561 addr
= tcg_const_i32(16);
8562 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8566 addr
= tcg_const_i32(17);
8567 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8571 if (insn
& (1 << 4))
8572 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
8575 gen_set_psr_im(s
, shift
, 0, ((insn
& 7) << 6) & shift
);
8585 /* load/store multiple */
8586 rn
= (insn
>> 8) & 0x7;
8587 addr
= load_reg(s
, rn
);
8588 for (i
= 0; i
< 8; i
++) {
8589 if (insn
& (1 << i
)) {
8590 if (insn
& (1 << 11)) {
8592 tmp
= gen_ld32(addr
, IS_USER(s
));
8593 store_reg(s
, i
, tmp
);
8596 tmp
= load_reg(s
, i
);
8597 gen_st32(tmp
, addr
, IS_USER(s
));
8599 /* advance to the next address */
8600 tcg_gen_addi_i32(addr
, addr
, 4);
8603 /* Base register writeback. */
8604 if ((insn
& (1 << rn
)) == 0) {
8605 store_reg(s
, rn
, addr
);
8612 /* conditional branch or swi */
8613 cond
= (insn
>> 8) & 0xf;
8619 gen_set_condexec(s
);
8620 gen_set_pc_im(s
->pc
);
8621 s
->is_jmp
= DISAS_SWI
;
8624 /* generate a conditional jump to next instruction */
8625 s
->condlabel
= gen_new_label();
8626 gen_test_cc(cond
^ 1, s
->condlabel
);
8629 /* jump to the offset */
8630 val
= (uint32_t)s
->pc
+ 2;
8631 offset
= ((int32_t)insn
<< 24) >> 24;
8637 if (insn
& (1 << 11)) {
8638 if (disas_thumb2_insn(env
, s
, insn
))
8642 /* unconditional branch */
8643 val
= (uint32_t)s
->pc
;
8644 offset
= ((int32_t)insn
<< 21) >> 21;
8645 val
+= (offset
<< 1) + 2;
8650 if (disas_thumb2_insn(env
, s
, insn
))
8656 gen_set_condexec(s
);
8657 gen_set_pc_im(s
->pc
- 4);
8658 gen_exception(EXCP_UDEF
);
8659 s
->is_jmp
= DISAS_JUMP
;
8663 gen_set_condexec(s
);
8664 gen_set_pc_im(s
->pc
- 2);
8665 gen_exception(EXCP_UDEF
);
8666 s
->is_jmp
= DISAS_JUMP
;
8669 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8670 basic block 'tb'. If search_pc is TRUE, also generate PC
8671 information for each intermediate instruction. */
8672 static inline void gen_intermediate_code_internal(CPUState
*env
,
8673 TranslationBlock
*tb
,
8676 DisasContext dc1
, *dc
= &dc1
;
8678 uint16_t *gen_opc_end
;
8680 target_ulong pc_start
;
8681 uint32_t next_page_start
;
8685 /* generate intermediate code */
8692 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
8694 dc
->is_jmp
= DISAS_NEXT
;
8696 dc
->singlestep_enabled
= env
->singlestep_enabled
;
8698 dc
->thumb
= env
->thumb
;
8699 dc
->condexec_mask
= (env
->condexec_bits
& 0xf) << 1;
8700 dc
->condexec_cond
= env
->condexec_bits
>> 4;
8701 #if !defined(CONFIG_USER_ONLY)
8703 dc
->user
= ((env
->v7m
.exception
== 0) && (env
->v7m
.control
& 1));
8705 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
8708 cpu_F0s
= tcg_temp_new_i32();
8709 cpu_F1s
= tcg_temp_new_i32();
8710 cpu_F0d
= tcg_temp_new_i64();
8711 cpu_F1d
= tcg_temp_new_i64();
8714 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8715 cpu_M0
= tcg_temp_new_i64();
8716 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
8719 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8721 max_insns
= CF_COUNT_MASK
;
8724 /* Reset the conditional execution bits immediately. This avoids
8725 complications trying to do it at the end of the block. */
8726 if (env
->condexec_bits
)
8728 TCGv tmp
= new_tmp();
8729 tcg_gen_movi_i32(tmp
, 0);
8730 store_cpu_field(tmp
, condexec_bits
);
8733 #ifdef CONFIG_USER_ONLY
8734 /* Intercept jump to the magic kernel page. */
8735 if (dc
->pc
>= 0xffff0000) {
8736 /* We always get here via a jump, so know we are not in a
8737 conditional execution block. */
8738 gen_exception(EXCP_KERNEL_TRAP
);
8739 dc
->is_jmp
= DISAS_UPDATE
;
8743 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
8744 /* We always get here via a jump, so know we are not in a
8745 conditional execution block. */
8746 gen_exception(EXCP_EXCEPTION_EXIT
);
8747 dc
->is_jmp
= DISAS_UPDATE
;
8752 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
8753 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
8754 if (bp
->pc
== dc
->pc
) {
8755 gen_set_condexec(dc
);
8756 gen_set_pc_im(dc
->pc
);
8757 gen_exception(EXCP_DEBUG
);
8758 dc
->is_jmp
= DISAS_JUMP
;
8759 /* Advance PC so that clearing the breakpoint will
8760 invalidate this TB. */
8762 goto done_generating
;
8768 j
= gen_opc_ptr
- gen_opc_buf
;
8772 gen_opc_instr_start
[lj
++] = 0;
8774 gen_opc_pc
[lj
] = dc
->pc
;
8775 gen_opc_instr_start
[lj
] = 1;
8776 gen_opc_icount
[lj
] = num_insns
;
8779 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
8783 disas_thumb_insn(env
, dc
);
8784 if (dc
->condexec_mask
) {
8785 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
8786 | ((dc
->condexec_mask
>> 4) & 1);
8787 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
8788 if (dc
->condexec_mask
== 0) {
8789 dc
->condexec_cond
= 0;
8793 disas_arm_insn(env
, dc
);
8796 fprintf(stderr
, "Internal resource leak before %08x\n", dc
->pc
);
8800 if (dc
->condjmp
&& !dc
->is_jmp
) {
8801 gen_set_label(dc
->condlabel
);
8804 /* Translation stops when a conditional branch is encountered.
8805 * Otherwise the subsequent code could get translated several times.
8806 * Also stop translation when a page boundary is reached. This
8807 * ensures prefetch aborts occur at the right place. */
8809 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
8810 !env
->singlestep_enabled
&&
8812 dc
->pc
< next_page_start
&&
8813 num_insns
< max_insns
);
8815 if (tb
->cflags
& CF_LAST_IO
) {
8817 /* FIXME: This can theoretically happen with self-modifying
8819 cpu_abort(env
, "IO on conditional branch instruction");
8824 /* At this stage dc->condjmp will only be set when the skipped
8825 instruction was a conditional branch or trap, and the PC has
8826 already been written. */
8827 if (unlikely(env
->singlestep_enabled
)) {
8828 /* Make sure the pc is updated, and raise a debug exception. */
8830 gen_set_condexec(dc
);
8831 if (dc
->is_jmp
== DISAS_SWI
) {
8832 gen_exception(EXCP_SWI
);
8834 gen_exception(EXCP_DEBUG
);
8836 gen_set_label(dc
->condlabel
);
8838 if (dc
->condjmp
|| !dc
->is_jmp
) {
8839 gen_set_pc_im(dc
->pc
);
8842 gen_set_condexec(dc
);
8843 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
8844 gen_exception(EXCP_SWI
);
8846 /* FIXME: Single stepping a WFI insn will not halt
8848 gen_exception(EXCP_DEBUG
);
8851 /* While branches must always occur at the end of an IT block,
8852 there are a few other things that can cause us to terminate
8853 the TB in the middel of an IT block:
8854 - Exception generating instructions (bkpt, swi, undefined).
8856 - Hardware watchpoints.
8857 Hardware breakpoints have already been handled and skip this code.
8859 gen_set_condexec(dc
);
8860 switch(dc
->is_jmp
) {
8862 gen_goto_tb(dc
, 1, dc
->pc
);
8867 /* indicate that the hash table must be used to find the next TB */
8871 /* nothing more to generate */
8877 gen_exception(EXCP_SWI
);
8881 gen_set_label(dc
->condlabel
);
8882 gen_set_condexec(dc
);
8883 gen_goto_tb(dc
, 1, dc
->pc
);
8889 gen_icount_end(tb
, num_insns
);
8890 *gen_opc_ptr
= INDEX_op_end
;
8893 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8894 qemu_log("----------------\n");
8895 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8896 log_target_disas(pc_start
, dc
->pc
- pc_start
, env
->thumb
);
8901 j
= gen_opc_ptr
- gen_opc_buf
;
8904 gen_opc_instr_start
[lj
++] = 0;
8906 tb
->size
= dc
->pc
- pc_start
;
8907 tb
->icount
= num_insns
;
8911 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
8913 gen_intermediate_code_internal(env
, tb
, 0);
8916 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
8918 gen_intermediate_code_internal(env
, tb
, 1);
8921 static const char *cpu_mode_names
[16] = {
8922 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8923 "???", "???", "???", "und", "???", "???", "???", "sys"
8926 void cpu_dump_state(CPUState
*env
, FILE *f
,
8927 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
8937 /* ??? This assumes float64 and double have the same layout.
8938 Oh well, it's only debug dumps. */
8947 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
8949 cpu_fprintf(f
, "\n");
8951 cpu_fprintf(f
, " ");
8953 psr
= cpsr_read(env
);
8954 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
8956 psr
& (1 << 31) ? 'N' : '-',
8957 psr
& (1 << 30) ? 'Z' : '-',
8958 psr
& (1 << 29) ? 'C' : '-',
8959 psr
& (1 << 28) ? 'V' : '-',
8960 psr
& CPSR_T
? 'T' : 'A',
8961 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
8964 for (i
= 0; i
< 16; i
++) {
8965 d
.d
= env
->vfp
.regs
[i
];
8969 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
8970 i
* 2, (int)s0
.i
, s0
.s
,
8971 i
* 2 + 1, (int)s1
.i
, s1
.s
,
8972 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
8975 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
8979 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
8980 unsigned long searched_pc
, int pc_pos
, void *puc
)
8982 env
->regs
[15] = gen_opc_pc
[pc_pos
];