4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
38 #define ENABLE_ARCH_5J 0
39 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
40 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
41 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
42 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
44 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
46 /* internal defines */
47 typedef struct DisasContext
{
50 /* Nonzero if this instruction has been conditionally skipped. */
52 /* The label that will be jumped to when the instruction is skipped. */
54 /* Thumb-2 condtional execution bits. */
57 struct TranslationBlock
*tb
;
58 int singlestep_enabled
;
60 #if !defined(CONFIG_USER_ONLY)
65 #if defined(CONFIG_USER_ONLY)
68 #define IS_USER(s) (s->user)
71 /* These instructions trap after executing, so defer them until after the
72 conditional executions state has been updated. */
76 static TCGv_ptr cpu_env
;
77 /* We reuse the same 64-bit temporaries for efficiency. */
78 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
80 /* FIXME: These should be removed. */
82 static TCGv cpu_F0s
, cpu_F1s
;
83 static TCGv_i64 cpu_F0d
, cpu_F1d
;
85 #define ICOUNT_TEMP cpu_T[0]
86 #include "gen-icount.h"
88 /* initialize TCG globals. */
89 void arm_translate_init(void)
91 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
93 cpu_T
[0] = tcg_global_reg_new_i32(TCG_AREG1
, "T0");
94 cpu_T
[1] = tcg_global_reg_new_i32(TCG_AREG2
, "T1");
100 /* The code generator doesn't like lots of temporaries, so maintain our own
101 cache for reuse within a function. */
103 static int num_temps
;
104 static TCGv temps
[MAX_TEMPS
];
106 /* Allocate a temporary variable. */
107 static TCGv_i32
new_tmp(void)
110 if (num_temps
== MAX_TEMPS
)
113 if (GET_TCGV_I32(temps
[num_temps
]))
114 return temps
[num_temps
++];
116 tmp
= tcg_temp_new_i32();
117 temps
[num_temps
++] = tmp
;
121 /* Release a temporary variable. */
122 static void dead_tmp(TCGv tmp
)
127 if (TCGV_EQUAL(temps
[i
], tmp
))
130 /* Shuffle this temp to the last slot. */
131 while (!TCGV_EQUAL(temps
[i
], tmp
))
133 while (i
< num_temps
) {
134 temps
[i
] = temps
[i
+ 1];
140 static inline TCGv
load_cpu_offset(int offset
)
142 TCGv tmp
= new_tmp();
143 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
147 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
149 static inline void store_cpu_offset(TCGv var
, int offset
)
151 tcg_gen_st_i32(var
, cpu_env
, offset
);
155 #define store_cpu_field(var, name) \
156 store_cpu_offset(var, offsetof(CPUState, name))
158 /* Set a variable to the value of a CPU register. */
159 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
163 /* normaly, since we updated PC, we need only to add one insn */
165 addr
= (long)s
->pc
+ 2;
167 addr
= (long)s
->pc
+ 4;
168 tcg_gen_movi_i32(var
, addr
);
170 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
174 /* Create a new temporary and set it to the value of a CPU register. */
175 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
177 TCGv tmp
= new_tmp();
178 load_reg_var(s
, tmp
, reg
);
182 /* Set a CPU register. The source must be a temporary and will be
184 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
187 tcg_gen_andi_i32(var
, var
, ~1);
188 s
->is_jmp
= DISAS_JUMP
;
190 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
195 /* Basic operations. */
196 #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
197 #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
198 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
200 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
201 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202 #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
203 #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
205 #define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206 #define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
207 #define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
208 #define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
209 #define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
210 #define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
212 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
213 #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
214 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
215 #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
216 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
217 #define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
218 #define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
220 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
221 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
223 /* Value extensions. */
224 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
225 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
226 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
227 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
229 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
230 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
232 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
234 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
235 /* Set NZCV flags from the high 4 bits of var. */
236 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
238 static void gen_exception(int excp
)
240 TCGv tmp
= new_tmp();
241 tcg_gen_movi_i32(tmp
, excp
);
242 gen_helper_exception(tmp
);
246 static void gen_smul_dual(TCGv a
, TCGv b
)
248 TCGv tmp1
= new_tmp();
249 TCGv tmp2
= new_tmp();
250 tcg_gen_ext16s_i32(tmp1
, a
);
251 tcg_gen_ext16s_i32(tmp2
, b
);
252 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
254 tcg_gen_sari_i32(a
, a
, 16);
255 tcg_gen_sari_i32(b
, b
, 16);
256 tcg_gen_mul_i32(b
, b
, a
);
257 tcg_gen_mov_i32(a
, tmp1
);
261 /* Byteswap each halfword. */
262 static void gen_rev16(TCGv var
)
264 TCGv tmp
= new_tmp();
265 tcg_gen_shri_i32(tmp
, var
, 8);
266 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
267 tcg_gen_shli_i32(var
, var
, 8);
268 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
269 tcg_gen_or_i32(var
, var
, tmp
);
273 /* Byteswap low halfword and sign extend. */
274 static void gen_revsh(TCGv var
)
276 TCGv tmp
= new_tmp();
277 tcg_gen_shri_i32(tmp
, var
, 8);
278 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff);
279 tcg_gen_shli_i32(var
, var
, 8);
280 tcg_gen_ext8s_i32(var
, var
);
281 tcg_gen_or_i32(var
, var
, tmp
);
285 /* Unsigned bitfield extract. */
286 static void gen_ubfx(TCGv var
, int shift
, uint32_t mask
)
289 tcg_gen_shri_i32(var
, var
, shift
);
290 tcg_gen_andi_i32(var
, var
, mask
);
293 /* Signed bitfield extract. */
294 static void gen_sbfx(TCGv var
, int shift
, int width
)
299 tcg_gen_sari_i32(var
, var
, shift
);
300 if (shift
+ width
< 32) {
301 signbit
= 1u << (width
- 1);
302 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
303 tcg_gen_xori_i32(var
, var
, signbit
);
304 tcg_gen_subi_i32(var
, var
, signbit
);
308 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
309 static void gen_bfi(TCGv dest
, TCGv base
, TCGv val
, int shift
, uint32_t mask
)
311 tcg_gen_andi_i32(val
, val
, mask
);
312 tcg_gen_shli_i32(val
, val
, shift
);
313 tcg_gen_andi_i32(base
, base
, ~(mask
<< shift
));
314 tcg_gen_or_i32(dest
, base
, val
);
317 /* Round the top 32 bits of a 64-bit value. */
318 static void gen_roundqd(TCGv a
, TCGv b
)
320 tcg_gen_shri_i32(a
, a
, 31);
321 tcg_gen_add_i32(a
, a
, b
);
324 /* FIXME: Most targets have native widening multiplication.
325 It would be good to use that instead of a full wide multiply. */
326 /* 32x32->64 multiply. Marks inputs as dead. */
327 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
329 TCGv_i64 tmp1
= tcg_temp_new_i64();
330 TCGv_i64 tmp2
= tcg_temp_new_i64();
332 tcg_gen_extu_i32_i64(tmp1
, a
);
334 tcg_gen_extu_i32_i64(tmp2
, b
);
336 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
340 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
342 TCGv_i64 tmp1
= tcg_temp_new_i64();
343 TCGv_i64 tmp2
= tcg_temp_new_i64();
345 tcg_gen_ext_i32_i64(tmp1
, a
);
347 tcg_gen_ext_i32_i64(tmp2
, b
);
349 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
353 /* Unsigned 32x32->64 multiply. */
354 static void gen_op_mull_T0_T1(void)
356 TCGv_i64 tmp1
= tcg_temp_new_i64();
357 TCGv_i64 tmp2
= tcg_temp_new_i64();
359 tcg_gen_extu_i32_i64(tmp1
, cpu_T
[0]);
360 tcg_gen_extu_i32_i64(tmp2
, cpu_T
[1]);
361 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
362 tcg_gen_trunc_i64_i32(cpu_T
[0], tmp1
);
363 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
364 tcg_gen_trunc_i64_i32(cpu_T
[1], tmp1
);
367 /* Signed 32x32->64 multiply. */
368 static void gen_imull(TCGv a
, TCGv b
)
370 TCGv_i64 tmp1
= tcg_temp_new_i64();
371 TCGv_i64 tmp2
= tcg_temp_new_i64();
373 tcg_gen_ext_i32_i64(tmp1
, a
);
374 tcg_gen_ext_i32_i64(tmp2
, b
);
375 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
376 tcg_gen_trunc_i64_i32(a
, tmp1
);
377 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
378 tcg_gen_trunc_i64_i32(b
, tmp1
);
381 /* Swap low and high halfwords. */
382 static void gen_swap_half(TCGv var
)
384 TCGv tmp
= new_tmp();
385 tcg_gen_shri_i32(tmp
, var
, 16);
386 tcg_gen_shli_i32(var
, var
, 16);
387 tcg_gen_or_i32(var
, var
, tmp
);
391 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
392 tmp = (t0 ^ t1) & 0x8000;
395 t0 = (t0 + t1) ^ tmp;
398 static void gen_add16(TCGv t0
, TCGv t1
)
400 TCGv tmp
= new_tmp();
401 tcg_gen_xor_i32(tmp
, t0
, t1
);
402 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
403 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
404 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
405 tcg_gen_add_i32(t0
, t0
, t1
);
406 tcg_gen_xor_i32(t0
, t0
, tmp
);
411 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
413 /* Set CF to the top bit of var. */
414 static void gen_set_CF_bit31(TCGv var
)
416 TCGv tmp
= new_tmp();
417 tcg_gen_shri_i32(tmp
, var
, 31);
422 /* Set N and Z flags from var. */
423 static inline void gen_logic_CC(TCGv var
)
425 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, NF
));
426 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUState
, ZF
));
430 static void gen_adc_T0_T1(void)
434 tmp
= load_cpu_field(CF
);
435 tcg_gen_add_i32(cpu_T
[0], cpu_T
[0], tmp
);
439 /* dest = T0 - T1 + CF - 1. */
440 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
443 tcg_gen_sub_i32(dest
, t0
, t1
);
444 tmp
= load_cpu_field(CF
);
445 tcg_gen_add_i32(dest
, dest
, tmp
);
446 tcg_gen_subi_i32(dest
, dest
, 1);
450 #define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
451 #define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
453 /* T0 &= ~T1. Clobbers T1. */
454 /* FIXME: Implement bic natively. */
455 static inline void tcg_gen_bic_i32(TCGv dest
, TCGv t0
, TCGv t1
)
457 TCGv tmp
= new_tmp();
458 tcg_gen_not_i32(tmp
, t1
);
459 tcg_gen_and_i32(dest
, t0
, tmp
);
462 static inline void gen_op_bicl_T0_T1(void)
468 /* FIXME: Implement this natively. */
469 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
471 /* FIXME: Implement this natively. */
472 static void tcg_gen_rori_i32(TCGv t0
, TCGv t1
, int i
)
480 tcg_gen_shri_i32(tmp
, t1
, i
);
481 tcg_gen_shli_i32(t1
, t1
, 32 - i
);
482 tcg_gen_or_i32(t0
, t1
, tmp
);
486 static void shifter_out_im(TCGv var
, int shift
)
488 TCGv tmp
= new_tmp();
490 tcg_gen_andi_i32(tmp
, var
, 1);
492 tcg_gen_shri_i32(tmp
, var
, shift
);
494 tcg_gen_andi_i32(tmp
, tmp
, 1);
500 /* Shift by immediate. Includes special handling for shift == 0. */
501 static inline void gen_arm_shift_im(TCGv var
, int shiftop
, int shift
, int flags
)
507 shifter_out_im(var
, 32 - shift
);
508 tcg_gen_shli_i32(var
, var
, shift
);
514 tcg_gen_shri_i32(var
, var
, 31);
517 tcg_gen_movi_i32(var
, 0);
520 shifter_out_im(var
, shift
- 1);
521 tcg_gen_shri_i32(var
, var
, shift
);
528 shifter_out_im(var
, shift
- 1);
531 tcg_gen_sari_i32(var
, var
, shift
);
533 case 3: /* ROR/RRX */
536 shifter_out_im(var
, shift
- 1);
537 tcg_gen_rori_i32(var
, var
, shift
); break;
539 TCGv tmp
= load_cpu_field(CF
);
541 shifter_out_im(var
, 0);
542 tcg_gen_shri_i32(var
, var
, 1);
543 tcg_gen_shli_i32(tmp
, tmp
, 31);
544 tcg_gen_or_i32(var
, var
, tmp
);
550 static inline void gen_arm_shift_reg(TCGv var
, int shiftop
,
551 TCGv shift
, int flags
)
555 case 0: gen_helper_shl_cc(var
, var
, shift
); break;
556 case 1: gen_helper_shr_cc(var
, var
, shift
); break;
557 case 2: gen_helper_sar_cc(var
, var
, shift
); break;
558 case 3: gen_helper_ror_cc(var
, var
, shift
); break;
562 case 0: gen_helper_shl(var
, var
, shift
); break;
563 case 1: gen_helper_shr(var
, var
, shift
); break;
564 case 2: gen_helper_sar(var
, var
, shift
); break;
565 case 3: gen_helper_ror(var
, var
, shift
); break;
571 #define PAS_OP(pfx) \
573 case 0: gen_pas_helper(glue(pfx,add16)); break; \
574 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
575 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
576 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
577 case 4: gen_pas_helper(glue(pfx,add8)); break; \
578 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
580 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
587 tmp
= tcg_temp_new_ptr();
588 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
592 tmp
= tcg_temp_new_ptr();
593 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
596 #undef gen_pas_helper
597 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
610 #undef gen_pas_helper
615 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
616 #define PAS_OP(pfx) \
618 case 0: gen_pas_helper(glue(pfx,add8)); break; \
619 case 1: gen_pas_helper(glue(pfx,add16)); break; \
620 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
621 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
622 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
623 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
625 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv a
, TCGv b
)
630 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
632 tmp
= tcg_temp_new_ptr();
633 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
637 tmp
= tcg_temp_new_ptr();
638 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUState
, GE
));
641 #undef gen_pas_helper
642 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
655 #undef gen_pas_helper
660 static void gen_test_cc(int cc
, int label
)
668 tmp
= load_cpu_field(ZF
);
669 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
672 tmp
= load_cpu_field(ZF
);
673 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
676 tmp
= load_cpu_field(CF
);
677 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
680 tmp
= load_cpu_field(CF
);
681 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
684 tmp
= load_cpu_field(NF
);
685 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
688 tmp
= load_cpu_field(NF
);
689 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
692 tmp
= load_cpu_field(VF
);
693 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
696 tmp
= load_cpu_field(VF
);
697 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
699 case 8: /* hi: C && !Z */
700 inv
= gen_new_label();
701 tmp
= load_cpu_field(CF
);
702 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
704 tmp
= load_cpu_field(ZF
);
705 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
708 case 9: /* ls: !C || Z */
709 tmp
= load_cpu_field(CF
);
710 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
712 tmp
= load_cpu_field(ZF
);
713 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
715 case 10: /* ge: N == V -> N ^ V == 0 */
716 tmp
= load_cpu_field(VF
);
717 tmp2
= load_cpu_field(NF
);
718 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
720 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
722 case 11: /* lt: N != V -> N ^ V != 0 */
723 tmp
= load_cpu_field(VF
);
724 tmp2
= load_cpu_field(NF
);
725 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
727 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
729 case 12: /* gt: !Z && N == V */
730 inv
= gen_new_label();
731 tmp
= load_cpu_field(ZF
);
732 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
734 tmp
= load_cpu_field(VF
);
735 tmp2
= load_cpu_field(NF
);
736 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
738 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
741 case 13: /* le: Z || N != V */
742 tmp
= load_cpu_field(ZF
);
743 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
745 tmp
= load_cpu_field(VF
);
746 tmp2
= load_cpu_field(NF
);
747 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
749 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
752 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
758 static const uint8_t table_logic_cc
[16] = {
777 /* Set PC and Thumb state from an immediate address. */
778 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
782 s
->is_jmp
= DISAS_UPDATE
;
784 if (s
->thumb
!= (addr
& 1)) {
785 tcg_gen_movi_i32(tmp
, addr
& 1);
786 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, thumb
));
788 tcg_gen_movi_i32(tmp
, addr
& ~1);
789 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, regs
[15]));
793 /* Set PC and Thumb state from var. var is marked as dead. */
794 static inline void gen_bx(DisasContext
*s
, TCGv var
)
798 s
->is_jmp
= DISAS_UPDATE
;
800 tcg_gen_andi_i32(tmp
, var
, 1);
801 store_cpu_field(tmp
, thumb
);
802 tcg_gen_andi_i32(var
, var
, ~1);
803 store_cpu_field(var
, regs
[15]);
806 /* TODO: This should be removed. Use gen_bx instead. */
807 static inline void gen_bx_T0(DisasContext
*s
)
809 TCGv tmp
= new_tmp();
810 tcg_gen_mov_i32(tmp
, cpu_T
[0]);
814 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
816 TCGv tmp
= new_tmp();
817 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
820 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
822 TCGv tmp
= new_tmp();
823 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
826 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
828 TCGv tmp
= new_tmp();
829 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
832 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
834 TCGv tmp
= new_tmp();
835 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
838 static inline TCGv
gen_ld32(TCGv addr
, int index
)
840 TCGv tmp
= new_tmp();
841 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
844 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
846 tcg_gen_qemu_st8(val
, addr
, index
);
849 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
851 tcg_gen_qemu_st16(val
, addr
, index
);
854 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
856 tcg_gen_qemu_st32(val
, addr
, index
);
860 static inline void gen_movl_T0_reg(DisasContext
*s
, int reg
)
862 load_reg_var(s
, cpu_T
[0], reg
);
865 static inline void gen_movl_T1_reg(DisasContext
*s
, int reg
)
867 load_reg_var(s
, cpu_T
[1], reg
);
870 static inline void gen_movl_T2_reg(DisasContext
*s
, int reg
)
872 load_reg_var(s
, cpu_T
[2], reg
);
875 static inline void gen_set_pc_im(uint32_t val
)
877 TCGv tmp
= new_tmp();
878 tcg_gen_movi_i32(tmp
, val
);
879 store_cpu_field(tmp
, regs
[15]);
882 static inline void gen_movl_reg_TN(DisasContext
*s
, int reg
, int t
)
887 tcg_gen_andi_i32(tmp
, cpu_T
[t
], ~1);
891 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUState
, regs
[reg
]));
894 s
->is_jmp
= DISAS_JUMP
;
898 static inline void gen_movl_reg_T0(DisasContext
*s
, int reg
)
900 gen_movl_reg_TN(s
, reg
, 0);
903 static inline void gen_movl_reg_T1(DisasContext
*s
, int reg
)
905 gen_movl_reg_TN(s
, reg
, 1);
908 /* Force a TB lookup after an instruction that changes the CPU state. */
909 static inline void gen_lookup_tb(DisasContext
*s
)
911 gen_op_movl_T0_im(s
->pc
);
912 gen_movl_reg_T0(s
, 15);
913 s
->is_jmp
= DISAS_UPDATE
;
916 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
919 int val
, rm
, shift
, shiftop
;
922 if (!(insn
& (1 << 25))) {
925 if (!(insn
& (1 << 23)))
928 tcg_gen_addi_i32(var
, var
, val
);
932 shift
= (insn
>> 7) & 0x1f;
933 shiftop
= (insn
>> 5) & 3;
934 offset
= load_reg(s
, rm
);
935 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
936 if (!(insn
& (1 << 23)))
937 tcg_gen_sub_i32(var
, var
, offset
);
939 tcg_gen_add_i32(var
, var
, offset
);
944 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
950 if (insn
& (1 << 22)) {
952 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
953 if (!(insn
& (1 << 23)))
957 tcg_gen_addi_i32(var
, var
, val
);
961 tcg_gen_addi_i32(var
, var
, extra
);
963 offset
= load_reg(s
, rm
);
964 if (!(insn
& (1 << 23)))
965 tcg_gen_sub_i32(var
, var
, offset
);
967 tcg_gen_add_i32(var
, var
, offset
);
972 #define VFP_OP2(name) \
973 static inline void gen_vfp_##name(int dp) \
976 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
978 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
988 static inline void gen_vfp_abs(int dp
)
991 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
993 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
996 static inline void gen_vfp_neg(int dp
)
999 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1001 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1004 static inline void gen_vfp_sqrt(int dp
)
1007 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
1009 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1012 static inline void gen_vfp_cmp(int dp
)
1015 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1017 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1020 static inline void gen_vfp_cmpe(int dp
)
1023 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1025 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1028 static inline void gen_vfp_F1_ld0(int dp
)
1031 tcg_gen_movi_i64(cpu_F1d
, 0);
1033 tcg_gen_movi_i32(cpu_F1s
, 0);
1036 static inline void gen_vfp_uito(int dp
)
1039 gen_helper_vfp_uitod(cpu_F0d
, cpu_F0s
, cpu_env
);
1041 gen_helper_vfp_uitos(cpu_F0s
, cpu_F0s
, cpu_env
);
1044 static inline void gen_vfp_sito(int dp
)
1047 gen_helper_vfp_sitod(cpu_F0d
, cpu_F0s
, cpu_env
);
1049 gen_helper_vfp_sitos(cpu_F0s
, cpu_F0s
, cpu_env
);
1052 static inline void gen_vfp_toui(int dp
)
1055 gen_helper_vfp_touid(cpu_F0s
, cpu_F0d
, cpu_env
);
1057 gen_helper_vfp_touis(cpu_F0s
, cpu_F0s
, cpu_env
);
1060 static inline void gen_vfp_touiz(int dp
)
1063 gen_helper_vfp_touizd(cpu_F0s
, cpu_F0d
, cpu_env
);
1065 gen_helper_vfp_touizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1068 static inline void gen_vfp_tosi(int dp
)
1071 gen_helper_vfp_tosid(cpu_F0s
, cpu_F0d
, cpu_env
);
1073 gen_helper_vfp_tosis(cpu_F0s
, cpu_F0s
, cpu_env
);
1076 static inline void gen_vfp_tosiz(int dp
)
1079 gen_helper_vfp_tosizd(cpu_F0s
, cpu_F0d
, cpu_env
);
1081 gen_helper_vfp_tosizs(cpu_F0s
, cpu_F0s
, cpu_env
);
1084 #define VFP_GEN_FIX(name) \
1085 static inline void gen_vfp_##name(int dp, int shift) \
1088 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1090 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
1102 static inline void gen_vfp_ld(DisasContext
*s
, int dp
)
1105 tcg_gen_qemu_ld64(cpu_F0d
, cpu_T
[1], IS_USER(s
));
1107 tcg_gen_qemu_ld32u(cpu_F0s
, cpu_T
[1], IS_USER(s
));
1110 static inline void gen_vfp_st(DisasContext
*s
, int dp
)
1113 tcg_gen_qemu_st64(cpu_F0d
, cpu_T
[1], IS_USER(s
));
1115 tcg_gen_qemu_st32(cpu_F0s
, cpu_T
[1], IS_USER(s
));
1119 vfp_reg_offset (int dp
, int reg
)
1122 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1124 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1125 + offsetof(CPU_DoubleU
, l
.upper
);
1127 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1128 + offsetof(CPU_DoubleU
, l
.lower
);
1132 /* Return the offset of a 32-bit piece of a NEON register.
1133 zero is the least significant end of the register. */
1135 neon_reg_offset (int reg
, int n
)
1139 return vfp_reg_offset(0, sreg
);
1142 /* FIXME: Remove these. */
1143 #define neon_T0 cpu_T[0]
1144 #define neon_T1 cpu_T[1]
1145 #define NEON_GET_REG(T, reg, n) \
1146 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1147 #define NEON_SET_REG(T, reg, n) \
1148 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1150 static TCGv
neon_load_reg(int reg
, int pass
)
1152 TCGv tmp
= new_tmp();
1153 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1157 static void neon_store_reg(int reg
, int pass
, TCGv var
)
1159 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1163 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1165 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1168 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1170 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1173 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1174 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1175 #define tcg_gen_st_f32 tcg_gen_st_i32
1176 #define tcg_gen_st_f64 tcg_gen_st_i64
1178 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1181 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1183 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1186 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1189 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1191 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1194 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1197 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1199 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1202 #define ARM_CP_RW_BIT (1 << 20)
1204 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1206 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1209 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1211 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUState
, iwmmxt
.regs
[reg
]));
1214 static inline void gen_op_iwmmxt_movl_wCx_T0(int reg
)
1216 tcg_gen_st_i32(cpu_T
[0], cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1219 static inline void gen_op_iwmmxt_movl_T0_wCx(int reg
)
1221 tcg_gen_ld_i32(cpu_T
[0], cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1224 static inline void gen_op_iwmmxt_movl_T1_wCx(int reg
)
1226 tcg_gen_ld_i32(cpu_T
[1], cpu_env
, offsetof(CPUState
, iwmmxt
.cregs
[reg
]));
1229 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1231 iwmmxt_store_reg(cpu_M0
, rn
);
1234 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1236 iwmmxt_load_reg(cpu_M0
, rn
);
1239 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1241 iwmmxt_load_reg(cpu_V1
, rn
);
1242 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1245 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1247 iwmmxt_load_reg(cpu_V1
, rn
);
1248 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1251 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1253 iwmmxt_load_reg(cpu_V1
, rn
);
1254 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1257 #define IWMMXT_OP(name) \
1258 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1260 iwmmxt_load_reg(cpu_V1, rn); \
1261 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1264 #define IWMMXT_OP_ENV(name) \
1265 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1267 iwmmxt_load_reg(cpu_V1, rn); \
1268 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1271 #define IWMMXT_OP_ENV_SIZE(name) \
1272 IWMMXT_OP_ENV(name##b) \
1273 IWMMXT_OP_ENV(name##w) \
1274 IWMMXT_OP_ENV(name##l)
1276 #define IWMMXT_OP_ENV1(name) \
1277 static inline void gen_op_iwmmxt_##name##_M0(void) \
1279 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1293 IWMMXT_OP_ENV_SIZE(unpackl
)
1294 IWMMXT_OP_ENV_SIZE(unpackh
)
1296 IWMMXT_OP_ENV1(unpacklub
)
1297 IWMMXT_OP_ENV1(unpackluw
)
1298 IWMMXT_OP_ENV1(unpacklul
)
1299 IWMMXT_OP_ENV1(unpackhub
)
1300 IWMMXT_OP_ENV1(unpackhuw
)
1301 IWMMXT_OP_ENV1(unpackhul
)
1302 IWMMXT_OP_ENV1(unpacklsb
)
1303 IWMMXT_OP_ENV1(unpacklsw
)
1304 IWMMXT_OP_ENV1(unpacklsl
)
1305 IWMMXT_OP_ENV1(unpackhsb
)
1306 IWMMXT_OP_ENV1(unpackhsw
)
1307 IWMMXT_OP_ENV1(unpackhsl
)
1309 IWMMXT_OP_ENV_SIZE(cmpeq
)
1310 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1311 IWMMXT_OP_ENV_SIZE(cmpgts
)
1313 IWMMXT_OP_ENV_SIZE(mins
)
1314 IWMMXT_OP_ENV_SIZE(minu
)
1315 IWMMXT_OP_ENV_SIZE(maxs
)
1316 IWMMXT_OP_ENV_SIZE(maxu
)
1318 IWMMXT_OP_ENV_SIZE(subn
)
1319 IWMMXT_OP_ENV_SIZE(addn
)
1320 IWMMXT_OP_ENV_SIZE(subu
)
1321 IWMMXT_OP_ENV_SIZE(addu
)
1322 IWMMXT_OP_ENV_SIZE(subs
)
1323 IWMMXT_OP_ENV_SIZE(adds
)
1325 IWMMXT_OP_ENV(avgb0
)
1326 IWMMXT_OP_ENV(avgb1
)
1327 IWMMXT_OP_ENV(avgw0
)
1328 IWMMXT_OP_ENV(avgw1
)
1332 IWMMXT_OP_ENV(packuw
)
1333 IWMMXT_OP_ENV(packul
)
1334 IWMMXT_OP_ENV(packuq
)
1335 IWMMXT_OP_ENV(packsw
)
1336 IWMMXT_OP_ENV(packsl
)
1337 IWMMXT_OP_ENV(packsq
)
1339 static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1341 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1]);
1344 static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1346 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1]);
1349 static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1351 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1]);
1354 static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn
)
1356 iwmmxt_load_reg(cpu_V1
, rn
);
1357 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, cpu_T
[0]);
1360 static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift
)
1362 TCGv tmp
= tcg_const_i32(shift
);
1363 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, cpu_T
[0], cpu_T
[1], tmp
);
1366 static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift
)
1368 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, shift
);
1369 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_M0
);
1370 tcg_gen_ext8s_i32(cpu_T
[0], cpu_T
[0]);
1373 static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift
)
1375 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, shift
);
1376 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_M0
);
1377 tcg_gen_ext16s_i32(cpu_T
[0], cpu_T
[0]);
1380 static inline void gen_op_iwmmxt_extru_T0_M0(int shift
, uint32_t mask
)
1382 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, shift
);
1383 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_M0
);
1385 tcg_gen_andi_i32(cpu_T
[0], cpu_T
[0], mask
);
1388 static void gen_op_iwmmxt_set_mup(void)
1391 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1392 tcg_gen_ori_i32(tmp
, tmp
, 2);
1393 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1396 static void gen_op_iwmmxt_set_cup(void)
1399 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1400 tcg_gen_ori_i32(tmp
, tmp
, 1);
1401 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1404 static void gen_op_iwmmxt_setpsr_nz(void)
1406 TCGv tmp
= new_tmp();
1407 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1408 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1411 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1413 iwmmxt_load_reg(cpu_V1
, rn
);
1414 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1415 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1419 static void gen_iwmmxt_movl_T0_T1_wRn(int rn
)
1421 iwmmxt_load_reg(cpu_V0
, rn
);
1422 tcg_gen_trunc_i64_i32(cpu_T
[0], cpu_V0
);
1423 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1424 tcg_gen_trunc_i64_i32(cpu_T
[1], cpu_V0
);
1427 static void gen_iwmmxt_movl_wRn_T0_T1(int rn
)
1429 tcg_gen_concat_i32_i64(cpu_V0
, cpu_T
[0], cpu_T
[1]);
1430 iwmmxt_store_reg(cpu_V0
, rn
);
1433 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
)
1438 rd
= (insn
>> 16) & 0xf;
1439 gen_movl_T1_reg(s
, rd
);
1441 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1442 if (insn
& (1 << 24)) {
1444 if (insn
& (1 << 23))
1445 gen_op_addl_T1_im(offset
);
1447 gen_op_addl_T1_im(-offset
);
1449 if (insn
& (1 << 21))
1450 gen_movl_reg_T1(s
, rd
);
1451 } else if (insn
& (1 << 21)) {
1453 if (insn
& (1 << 23))
1454 gen_op_movl_T0_im(offset
);
1456 gen_op_movl_T0_im(- offset
);
1457 gen_op_addl_T0_T1();
1458 gen_movl_reg_T0(s
, rd
);
1459 } else if (!(insn
& (1 << 23)))
1464 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
)
1466 int rd
= (insn
>> 0) & 0xf;
1468 if (insn
& (1 << 8))
1469 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
)
1472 gen_op_iwmmxt_movl_T0_wCx(rd
);
1474 gen_iwmmxt_movl_T0_T1_wRn(rd
);
1476 gen_op_movl_T1_im(mask
);
1477 gen_op_andl_T0_T1();
1481 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1482 (ie. an undefined instruction). */
1483 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1486 int rdhi
, rdlo
, rd0
, rd1
, i
;
1489 if ((insn
& 0x0e000e00) == 0x0c000000) {
1490 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1492 rdlo
= (insn
>> 12) & 0xf;
1493 rdhi
= (insn
>> 16) & 0xf;
1494 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1495 gen_iwmmxt_movl_T0_T1_wRn(wrd
);
1496 gen_movl_reg_T0(s
, rdlo
);
1497 gen_movl_reg_T1(s
, rdhi
);
1498 } else { /* TMCRR */
1499 gen_movl_T0_reg(s
, rdlo
);
1500 gen_movl_T1_reg(s
, rdhi
);
1501 gen_iwmmxt_movl_wRn_T0_T1(wrd
);
1502 gen_op_iwmmxt_set_mup();
1507 wrd
= (insn
>> 12) & 0xf;
1508 if (gen_iwmmxt_address(s
, insn
))
1510 if (insn
& ARM_CP_RW_BIT
) {
1511 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1512 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
1513 tcg_gen_mov_i32(cpu_T
[0], tmp
);
1515 gen_op_iwmmxt_movl_wCx_T0(wrd
);
1518 if (insn
& (1 << 8)) {
1519 if (insn
& (1 << 22)) { /* WLDRD */
1520 tcg_gen_qemu_ld64(cpu_M0
, cpu_T
[1], IS_USER(s
));
1522 } else { /* WLDRW wRd */
1523 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
1526 if (insn
& (1 << 22)) { /* WLDRH */
1527 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
1528 } else { /* WLDRB */
1529 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
1533 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1536 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1539 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1540 gen_op_iwmmxt_movl_T0_wCx(wrd
);
1542 tcg_gen_mov_i32(tmp
, cpu_T
[0]);
1543 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
1545 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1547 if (insn
& (1 << 8)) {
1548 if (insn
& (1 << 22)) { /* WSTRD */
1550 tcg_gen_qemu_st64(cpu_M0
, cpu_T
[1], IS_USER(s
));
1551 } else { /* WSTRW wRd */
1552 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1553 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
1556 if (insn
& (1 << 22)) { /* WSTRH */
1557 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1558 gen_st16(tmp
, cpu_T
[1], IS_USER(s
));
1559 } else { /* WSTRB */
1560 tcg_gen_trunc_i64_i32(tmp
, cpu_M0
);
1561 gen_st8(tmp
, cpu_T
[1], IS_USER(s
));
1569 if ((insn
& 0x0f000000) != 0x0e000000)
1572 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1573 case 0x000: /* WOR */
1574 wrd
= (insn
>> 12) & 0xf;
1575 rd0
= (insn
>> 0) & 0xf;
1576 rd1
= (insn
>> 16) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1578 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1579 gen_op_iwmmxt_setpsr_nz();
1580 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1581 gen_op_iwmmxt_set_mup();
1582 gen_op_iwmmxt_set_cup();
1584 case 0x011: /* TMCR */
1587 rd
= (insn
>> 12) & 0xf;
1588 wrd
= (insn
>> 16) & 0xf;
1590 case ARM_IWMMXT_wCID
:
1591 case ARM_IWMMXT_wCASF
:
1593 case ARM_IWMMXT_wCon
:
1594 gen_op_iwmmxt_set_cup();
1596 case ARM_IWMMXT_wCSSF
:
1597 gen_op_iwmmxt_movl_T0_wCx(wrd
);
1598 gen_movl_T1_reg(s
, rd
);
1599 gen_op_bicl_T0_T1();
1600 gen_op_iwmmxt_movl_wCx_T0(wrd
);
1602 case ARM_IWMMXT_wCGR0
:
1603 case ARM_IWMMXT_wCGR1
:
1604 case ARM_IWMMXT_wCGR2
:
1605 case ARM_IWMMXT_wCGR3
:
1606 gen_op_iwmmxt_set_cup();
1607 gen_movl_reg_T0(s
, rd
);
1608 gen_op_iwmmxt_movl_wCx_T0(wrd
);
1614 case 0x100: /* WXOR */
1615 wrd
= (insn
>> 12) & 0xf;
1616 rd0
= (insn
>> 0) & 0xf;
1617 rd1
= (insn
>> 16) & 0xf;
1618 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1619 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1620 gen_op_iwmmxt_setpsr_nz();
1621 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1622 gen_op_iwmmxt_set_mup();
1623 gen_op_iwmmxt_set_cup();
1625 case 0x111: /* TMRC */
1628 rd
= (insn
>> 12) & 0xf;
1629 wrd
= (insn
>> 16) & 0xf;
1630 gen_op_iwmmxt_movl_T0_wCx(wrd
);
1631 gen_movl_reg_T0(s
, rd
);
1633 case 0x300: /* WANDN */
1634 wrd
= (insn
>> 12) & 0xf;
1635 rd0
= (insn
>> 0) & 0xf;
1636 rd1
= (insn
>> 16) & 0xf;
1637 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1638 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1639 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1640 gen_op_iwmmxt_setpsr_nz();
1641 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1642 gen_op_iwmmxt_set_mup();
1643 gen_op_iwmmxt_set_cup();
1645 case 0x200: /* WAND */
1646 wrd
= (insn
>> 12) & 0xf;
1647 rd0
= (insn
>> 0) & 0xf;
1648 rd1
= (insn
>> 16) & 0xf;
1649 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1650 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1651 gen_op_iwmmxt_setpsr_nz();
1652 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1656 case 0x810: case 0xa10: /* WMADD */
1657 wrd
= (insn
>> 12) & 0xf;
1658 rd0
= (insn
>> 0) & 0xf;
1659 rd1
= (insn
>> 16) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1661 if (insn
& (1 << 21))
1662 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1664 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1665 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1666 gen_op_iwmmxt_set_mup();
1668 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1669 wrd
= (insn
>> 12) & 0xf;
1670 rd0
= (insn
>> 16) & 0xf;
1671 rd1
= (insn
>> 0) & 0xf;
1672 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1673 switch ((insn
>> 22) & 3) {
1675 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1678 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1681 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1686 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1687 gen_op_iwmmxt_set_mup();
1688 gen_op_iwmmxt_set_cup();
1690 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1691 wrd
= (insn
>> 12) & 0xf;
1692 rd0
= (insn
>> 16) & 0xf;
1693 rd1
= (insn
>> 0) & 0xf;
1694 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1695 switch ((insn
>> 22) & 3) {
1697 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1700 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1703 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1708 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1709 gen_op_iwmmxt_set_mup();
1710 gen_op_iwmmxt_set_cup();
1712 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1713 wrd
= (insn
>> 12) & 0xf;
1714 rd0
= (insn
>> 16) & 0xf;
1715 rd1
= (insn
>> 0) & 0xf;
1716 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1717 if (insn
& (1 << 22))
1718 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1720 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1721 if (!(insn
& (1 << 20)))
1722 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1723 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1724 gen_op_iwmmxt_set_mup();
1726 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1727 wrd
= (insn
>> 12) & 0xf;
1728 rd0
= (insn
>> 16) & 0xf;
1729 rd1
= (insn
>> 0) & 0xf;
1730 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1731 if (insn
& (1 << 21)) {
1732 if (insn
& (1 << 20))
1733 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1735 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1737 if (insn
& (1 << 20))
1738 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1740 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1742 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1743 gen_op_iwmmxt_set_mup();
1745 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1746 wrd
= (insn
>> 12) & 0xf;
1747 rd0
= (insn
>> 16) & 0xf;
1748 rd1
= (insn
>> 0) & 0xf;
1749 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1750 if (insn
& (1 << 21))
1751 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1753 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1754 if (!(insn
& (1 << 20))) {
1755 iwmmxt_load_reg(cpu_V1
, wrd
);
1756 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1758 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1759 gen_op_iwmmxt_set_mup();
1761 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1762 wrd
= (insn
>> 12) & 0xf;
1763 rd0
= (insn
>> 16) & 0xf;
1764 rd1
= (insn
>> 0) & 0xf;
1765 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1766 switch ((insn
>> 22) & 3) {
1768 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1771 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1774 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1779 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1780 gen_op_iwmmxt_set_mup();
1781 gen_op_iwmmxt_set_cup();
1783 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1784 wrd
= (insn
>> 12) & 0xf;
1785 rd0
= (insn
>> 16) & 0xf;
1786 rd1
= (insn
>> 0) & 0xf;
1787 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1788 if (insn
& (1 << 22)) {
1789 if (insn
& (1 << 20))
1790 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1792 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1794 if (insn
& (1 << 20))
1795 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1797 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1799 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1800 gen_op_iwmmxt_set_mup();
1801 gen_op_iwmmxt_set_cup();
1803 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1804 wrd
= (insn
>> 12) & 0xf;
1805 rd0
= (insn
>> 16) & 0xf;
1806 rd1
= (insn
>> 0) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1808 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1809 gen_op_movl_T1_im(7);
1810 gen_op_andl_T0_T1();
1811 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
1812 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1813 gen_op_iwmmxt_set_mup();
1815 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1816 rd
= (insn
>> 12) & 0xf;
1817 wrd
= (insn
>> 16) & 0xf;
1818 gen_movl_T0_reg(s
, rd
);
1819 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1820 switch ((insn
>> 6) & 3) {
1822 gen_op_movl_T1_im(0xff);
1823 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 7) << 3);
1826 gen_op_movl_T1_im(0xffff);
1827 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 3) << 4);
1830 gen_op_movl_T1_im(0xffffffff);
1831 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 1) << 5);
1836 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1837 gen_op_iwmmxt_set_mup();
1839 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1840 rd
= (insn
>> 12) & 0xf;
1841 wrd
= (insn
>> 16) & 0xf;
1844 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1845 switch ((insn
>> 22) & 3) {
1848 gen_op_iwmmxt_extrsb_T0_M0((insn
& 7) << 3);
1850 gen_op_iwmmxt_extru_T0_M0((insn
& 7) << 3, 0xff);
1855 gen_op_iwmmxt_extrsw_T0_M0((insn
& 3) << 4);
1857 gen_op_iwmmxt_extru_T0_M0((insn
& 3) << 4, 0xffff);
1861 gen_op_iwmmxt_extru_T0_M0((insn
& 1) << 5, ~0u);
1866 gen_movl_reg_T0(s
, rd
);
1868 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1869 if ((insn
& 0x000ff008) != 0x0003f000)
1871 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1872 switch ((insn
>> 22) & 3) {
1874 gen_op_shrl_T1_im(((insn
& 7) << 2) + 0);
1877 gen_op_shrl_T1_im(((insn
& 3) << 3) + 4);
1880 gen_op_shrl_T1_im(((insn
& 1) << 4) + 12);
1885 gen_op_shll_T1_im(28);
1886 gen_set_nzcv(cpu_T
[1]);
1888 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1889 rd
= (insn
>> 12) & 0xf;
1890 wrd
= (insn
>> 16) & 0xf;
1891 gen_movl_T0_reg(s
, rd
);
1892 switch ((insn
>> 6) & 3) {
1894 gen_helper_iwmmxt_bcstb(cpu_M0
, cpu_T
[0]);
1897 gen_helper_iwmmxt_bcstw(cpu_M0
, cpu_T
[0]);
1900 gen_helper_iwmmxt_bcstl(cpu_M0
, cpu_T
[0]);
1905 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1906 gen_op_iwmmxt_set_mup();
1908 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1909 if ((insn
& 0x000ff00f) != 0x0003f000)
1911 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1912 switch ((insn
>> 22) & 3) {
1914 for (i
= 0; i
< 7; i
++) {
1915 gen_op_shll_T1_im(4);
1916 gen_op_andl_T0_T1();
1920 for (i
= 0; i
< 3; i
++) {
1921 gen_op_shll_T1_im(8);
1922 gen_op_andl_T0_T1();
1926 gen_op_shll_T1_im(16);
1927 gen_op_andl_T0_T1();
1932 gen_set_nzcv(cpu_T
[0]);
1934 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1935 wrd
= (insn
>> 12) & 0xf;
1936 rd0
= (insn
>> 16) & 0xf;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1938 switch ((insn
>> 22) & 3) {
1940 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1943 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1946 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
1951 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1952 gen_op_iwmmxt_set_mup();
1954 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1955 if ((insn
& 0x000ff00f) != 0x0003f000)
1957 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
1958 switch ((insn
>> 22) & 3) {
1960 for (i
= 0; i
< 7; i
++) {
1961 gen_op_shll_T1_im(4);
1966 for (i
= 0; i
< 3; i
++) {
1967 gen_op_shll_T1_im(8);
1972 gen_op_shll_T1_im(16);
1978 gen_set_nzcv(cpu_T
[0]);
1980 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1981 rd
= (insn
>> 12) & 0xf;
1982 rd0
= (insn
>> 16) & 0xf;
1983 if ((insn
& 0xf) != 0)
1985 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1986 switch ((insn
>> 22) & 3) {
1988 gen_helper_iwmmxt_msbb(cpu_T
[0], cpu_M0
);
1991 gen_helper_iwmmxt_msbw(cpu_T
[0], cpu_M0
);
1994 gen_helper_iwmmxt_msbl(cpu_T
[0], cpu_M0
);
1999 gen_movl_reg_T0(s
, rd
);
2001 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2002 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2003 wrd
= (insn
>> 12) & 0xf;
2004 rd0
= (insn
>> 16) & 0xf;
2005 rd1
= (insn
>> 0) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2007 switch ((insn
>> 22) & 3) {
2009 if (insn
& (1 << 21))
2010 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2012 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2015 if (insn
& (1 << 21))
2016 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2018 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2021 if (insn
& (1 << 21))
2022 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2024 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2029 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2030 gen_op_iwmmxt_set_mup();
2031 gen_op_iwmmxt_set_cup();
2033 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2034 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2035 wrd
= (insn
>> 12) & 0xf;
2036 rd0
= (insn
>> 16) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2038 switch ((insn
>> 22) & 3) {
2040 if (insn
& (1 << 21))
2041 gen_op_iwmmxt_unpacklsb_M0();
2043 gen_op_iwmmxt_unpacklub_M0();
2046 if (insn
& (1 << 21))
2047 gen_op_iwmmxt_unpacklsw_M0();
2049 gen_op_iwmmxt_unpackluw_M0();
2052 if (insn
& (1 << 21))
2053 gen_op_iwmmxt_unpacklsl_M0();
2055 gen_op_iwmmxt_unpacklul_M0();
2060 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2061 gen_op_iwmmxt_set_mup();
2062 gen_op_iwmmxt_set_cup();
2064 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2065 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2066 wrd
= (insn
>> 12) & 0xf;
2067 rd0
= (insn
>> 16) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2069 switch ((insn
>> 22) & 3) {
2071 if (insn
& (1 << 21))
2072 gen_op_iwmmxt_unpackhsb_M0();
2074 gen_op_iwmmxt_unpackhub_M0();
2077 if (insn
& (1 << 21))
2078 gen_op_iwmmxt_unpackhsw_M0();
2080 gen_op_iwmmxt_unpackhuw_M0();
2083 if (insn
& (1 << 21))
2084 gen_op_iwmmxt_unpackhsl_M0();
2086 gen_op_iwmmxt_unpackhul_M0();
2091 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2092 gen_op_iwmmxt_set_mup();
2093 gen_op_iwmmxt_set_cup();
2095 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2096 case 0x214: case 0x614: case 0xa14: case 0xe14:
2097 wrd
= (insn
>> 12) & 0xf;
2098 rd0
= (insn
>> 16) & 0xf;
2099 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2100 if (gen_iwmmxt_shift(insn
, 0xff))
2102 switch ((insn
>> 22) & 3) {
2106 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2109 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2112 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2115 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2119 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2120 case 0x014: case 0x414: case 0x814: case 0xc14:
2121 wrd
= (insn
>> 12) & 0xf;
2122 rd0
= (insn
>> 16) & 0xf;
2123 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2124 if (gen_iwmmxt_shift(insn
, 0xff))
2126 switch ((insn
>> 22) & 3) {
2130 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2133 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2136 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2139 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2143 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2144 case 0x114: case 0x514: case 0x914: case 0xd14:
2145 wrd
= (insn
>> 12) & 0xf;
2146 rd0
= (insn
>> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2148 if (gen_iwmmxt_shift(insn
, 0xff))
2150 switch ((insn
>> 22) & 3) {
2154 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2157 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2160 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2163 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2164 gen_op_iwmmxt_set_mup();
2165 gen_op_iwmmxt_set_cup();
2167 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2168 case 0x314: case 0x714: case 0xb14: case 0xf14:
2169 wrd
= (insn
>> 12) & 0xf;
2170 rd0
= (insn
>> 16) & 0xf;
2171 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2172 switch ((insn
>> 22) & 3) {
2176 if (gen_iwmmxt_shift(insn
, 0xf))
2178 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2181 if (gen_iwmmxt_shift(insn
, 0x1f))
2183 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2186 if (gen_iwmmxt_shift(insn
, 0x3f))
2188 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2191 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2192 gen_op_iwmmxt_set_mup();
2193 gen_op_iwmmxt_set_cup();
2195 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2196 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2197 wrd
= (insn
>> 12) & 0xf;
2198 rd0
= (insn
>> 16) & 0xf;
2199 rd1
= (insn
>> 0) & 0xf;
2200 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2201 switch ((insn
>> 22) & 3) {
2203 if (insn
& (1 << 21))
2204 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2206 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2209 if (insn
& (1 << 21))
2210 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2212 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2215 if (insn
& (1 << 21))
2216 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2218 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2223 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2224 gen_op_iwmmxt_set_mup();
2226 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2227 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2228 wrd
= (insn
>> 12) & 0xf;
2229 rd0
= (insn
>> 16) & 0xf;
2230 rd1
= (insn
>> 0) & 0xf;
2231 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2232 switch ((insn
>> 22) & 3) {
2234 if (insn
& (1 << 21))
2235 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2237 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2240 if (insn
& (1 << 21))
2241 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2243 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2246 if (insn
& (1 << 21))
2247 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2249 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2254 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2255 gen_op_iwmmxt_set_mup();
2257 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2258 case 0x402: case 0x502: case 0x602: case 0x702:
2259 wrd
= (insn
>> 12) & 0xf;
2260 rd0
= (insn
>> 16) & 0xf;
2261 rd1
= (insn
>> 0) & 0xf;
2262 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2263 gen_op_movl_T0_im((insn
>> 20) & 3);
2264 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
2265 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2266 gen_op_iwmmxt_set_mup();
2268 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2269 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2270 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2271 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2272 wrd
= (insn
>> 12) & 0xf;
2273 rd0
= (insn
>> 16) & 0xf;
2274 rd1
= (insn
>> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2276 switch ((insn
>> 20) & 0xf) {
2278 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2281 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2284 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2287 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2290 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2293 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2296 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2299 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2302 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2307 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2308 gen_op_iwmmxt_set_mup();
2309 gen_op_iwmmxt_set_cup();
2311 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2312 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2313 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2314 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2315 wrd
= (insn
>> 12) & 0xf;
2316 rd0
= (insn
>> 16) & 0xf;
2317 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2318 gen_op_movl_T0_im(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2319 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, cpu_T
[0]);
2320 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2324 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2325 case 0x418: case 0x518: case 0x618: case 0x718:
2326 case 0x818: case 0x918: case 0xa18: case 0xb18:
2327 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2328 wrd
= (insn
>> 12) & 0xf;
2329 rd0
= (insn
>> 16) & 0xf;
2330 rd1
= (insn
>> 0) & 0xf;
2331 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2332 switch ((insn
>> 20) & 0xf) {
2334 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2337 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2340 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2343 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2346 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2349 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2352 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2355 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2358 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2363 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2367 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2368 case 0x408: case 0x508: case 0x608: case 0x708:
2369 case 0x808: case 0x908: case 0xa08: case 0xb08:
2370 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2371 wrd
= (insn
>> 12) & 0xf;
2372 rd0
= (insn
>> 16) & 0xf;
2373 rd1
= (insn
>> 0) & 0xf;
2374 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2375 if (!(insn
& (1 << 20)))
2377 switch ((insn
>> 22) & 3) {
2381 if (insn
& (1 << 21))
2382 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2384 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2387 if (insn
& (1 << 21))
2388 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2390 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2393 if (insn
& (1 << 21))
2394 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2396 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2399 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2400 gen_op_iwmmxt_set_mup();
2401 gen_op_iwmmxt_set_cup();
2403 case 0x201: case 0x203: case 0x205: case 0x207:
2404 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2405 case 0x211: case 0x213: case 0x215: case 0x217:
2406 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2407 wrd
= (insn
>> 5) & 0xf;
2408 rd0
= (insn
>> 12) & 0xf;
2409 rd1
= (insn
>> 0) & 0xf;
2410 if (rd0
== 0xf || rd1
== 0xf)
2412 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2413 switch ((insn
>> 16) & 0xf) {
2414 case 0x0: /* TMIA */
2415 gen_movl_T0_reg(s
, rd0
);
2416 gen_movl_T1_reg(s
, rd1
);
2417 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2419 case 0x8: /* TMIAPH */
2420 gen_movl_T0_reg(s
, rd0
);
2421 gen_movl_T1_reg(s
, rd1
);
2422 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2424 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2425 gen_movl_T1_reg(s
, rd0
);
2426 if (insn
& (1 << 16))
2427 gen_op_shrl_T1_im(16);
2428 gen_op_movl_T0_T1();
2429 gen_movl_T1_reg(s
, rd1
);
2430 if (insn
& (1 << 17))
2431 gen_op_shrl_T1_im(16);
2432 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2437 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2438 gen_op_iwmmxt_set_mup();
2447 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2448 (ie. an undefined instruction). */
2449 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2451 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2453 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2454 /* Multiply with Internal Accumulate Format */
2455 rd0
= (insn
>> 12) & 0xf;
2457 acc
= (insn
>> 5) & 7;
2462 switch ((insn
>> 16) & 0xf) {
2464 gen_movl_T0_reg(s
, rd0
);
2465 gen_movl_T1_reg(s
, rd1
);
2466 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2468 case 0x8: /* MIAPH */
2469 gen_movl_T0_reg(s
, rd0
);
2470 gen_movl_T1_reg(s
, rd1
);
2471 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2473 case 0xc: /* MIABB */
2474 case 0xd: /* MIABT */
2475 case 0xe: /* MIATB */
2476 case 0xf: /* MIATT */
2477 gen_movl_T1_reg(s
, rd0
);
2478 if (insn
& (1 << 16))
2479 gen_op_shrl_T1_im(16);
2480 gen_op_movl_T0_T1();
2481 gen_movl_T1_reg(s
, rd1
);
2482 if (insn
& (1 << 17))
2483 gen_op_shrl_T1_im(16);
2484 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2490 gen_op_iwmmxt_movq_wRn_M0(acc
);
2494 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2495 /* Internal Accumulator Access Format */
2496 rdhi
= (insn
>> 16) & 0xf;
2497 rdlo
= (insn
>> 12) & 0xf;
2503 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2504 gen_iwmmxt_movl_T0_T1_wRn(acc
);
2505 gen_movl_reg_T0(s
, rdlo
);
2506 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2507 gen_op_andl_T0_T1();
2508 gen_movl_reg_T0(s
, rdhi
);
2510 gen_movl_T0_reg(s
, rdlo
);
2511 gen_movl_T1_reg(s
, rdhi
);
2512 gen_iwmmxt_movl_wRn_T0_T1(acc
);
2520 /* Disassemble system coprocessor instruction. Return nonzero if
2521 instruction is not defined. */
2522 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2525 uint32_t rd
= (insn
>> 12) & 0xf;
2526 uint32_t cp
= (insn
>> 8) & 0xf;
2531 if (insn
& ARM_CP_RW_BIT
) {
2532 if (!env
->cp
[cp
].cp_read
)
2534 gen_set_pc_im(s
->pc
);
2536 gen_helper_get_cp(tmp
, cpu_env
, tcg_const_i32(insn
));
2537 store_reg(s
, rd
, tmp
);
2539 if (!env
->cp
[cp
].cp_write
)
2541 gen_set_pc_im(s
->pc
);
2542 tmp
= load_reg(s
, rd
);
2543 gen_helper_set_cp(cpu_env
, tcg_const_i32(insn
), tmp
);
2549 static int cp15_user_ok(uint32_t insn
)
2551 int cpn
= (insn
>> 16) & 0xf;
2552 int cpm
= insn
& 0xf;
2553 int op
= ((insn
>> 5) & 7) | ((insn
>> 18) & 0x38);
2555 if (cpn
== 13 && cpm
== 0) {
2557 if (op
== 2 || (op
== 3 && (insn
& ARM_CP_RW_BIT
)))
2561 /* ISB, DSB, DMB. */
2562 if ((cpm
== 5 && op
== 4)
2563 || (cpm
== 10 && (op
== 4 || op
== 5)))
2569 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2570 instruction is not defined. */
2571 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
2576 /* M profile cores use memory mapped registers instead of cp15. */
2577 if (arm_feature(env
, ARM_FEATURE_M
))
2580 if ((insn
& (1 << 25)) == 0) {
2581 if (insn
& (1 << 20)) {
2585 /* mcrr. Used for block cache operations, so implement as no-op. */
2588 if ((insn
& (1 << 4)) == 0) {
2592 if (IS_USER(s
) && !cp15_user_ok(insn
)) {
2595 if ((insn
& 0x0fff0fff) == 0x0e070f90
2596 || (insn
& 0x0fff0fff) == 0x0e070f58) {
2597 /* Wait for interrupt. */
2598 gen_set_pc_im(s
->pc
);
2599 s
->is_jmp
= DISAS_WFI
;
2602 rd
= (insn
>> 12) & 0xf;
2603 if (insn
& ARM_CP_RW_BIT
) {
2605 gen_helper_get_cp15(tmp
, cpu_env
, tcg_const_i32(insn
));
2606 /* If the destination register is r15 then sets condition codes. */
2608 store_reg(s
, rd
, tmp
);
2612 tmp
= load_reg(s
, rd
);
2613 gen_helper_set_cp15(cpu_env
, tcg_const_i32(insn
), tmp
);
2615 /* Normally we would always end the TB here, but Linux
2616 * arch/arm/mach-pxa/sleep.S expects two instructions following
2617 * an MMU enable to execute from cache. Imitate this behaviour. */
2618 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
2619 (insn
& 0x0fff0fff) != 0x0e010f10)
2625 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2626 #define VFP_SREG(insn, bigbit, smallbit) \
2627 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2628 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2629 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2630 reg = (((insn) >> (bigbit)) & 0x0f) \
2631 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2633 if (insn & (1 << (smallbit))) \
2635 reg = ((insn) >> (bigbit)) & 0x0f; \
2638 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2639 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2640 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2641 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2642 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2643 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2645 /* Move between integer and VFP cores. */
2646 static TCGv
gen_vfp_mrs(void)
2648 TCGv tmp
= new_tmp();
2649 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2653 static void gen_vfp_msr(TCGv tmp
)
2655 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2660 vfp_enabled(CPUState
* env
)
2662 return ((env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) != 0);
2665 static void gen_neon_dup_u8(TCGv var
, int shift
)
2667 TCGv tmp
= new_tmp();
2669 tcg_gen_shri_i32(var
, var
, shift
);
2670 tcg_gen_ext8u_i32(var
, var
);
2671 tcg_gen_shli_i32(tmp
, var
, 8);
2672 tcg_gen_or_i32(var
, var
, tmp
);
2673 tcg_gen_shli_i32(tmp
, var
, 16);
2674 tcg_gen_or_i32(var
, var
, tmp
);
2678 static void gen_neon_dup_low16(TCGv var
)
2680 TCGv tmp
= new_tmp();
2681 tcg_gen_ext16u_i32(var
, var
);
2682 tcg_gen_shli_i32(tmp
, var
, 16);
2683 tcg_gen_or_i32(var
, var
, tmp
);
2687 static void gen_neon_dup_high16(TCGv var
)
2689 TCGv tmp
= new_tmp();
2690 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2691 tcg_gen_shri_i32(tmp
, var
, 16);
2692 tcg_gen_or_i32(var
, var
, tmp
);
2696 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2697 (ie. an undefined instruction). */
2698 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
2700 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
2705 if (!arm_feature(env
, ARM_FEATURE_VFP
))
2708 if (!vfp_enabled(env
)) {
2709 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2710 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
2712 rn
= (insn
>> 16) & 0xf;
2713 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
2714 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
)
2717 dp
= ((insn
& 0xf00) == 0xb00);
2718 switch ((insn
>> 24) & 0xf) {
2720 if (insn
& (1 << 4)) {
2721 /* single register transfer */
2722 rd
= (insn
>> 12) & 0xf;
2727 VFP_DREG_N(rn
, insn
);
2730 if (insn
& 0x00c00060
2731 && !arm_feature(env
, ARM_FEATURE_NEON
))
2734 pass
= (insn
>> 21) & 1;
2735 if (insn
& (1 << 22)) {
2737 offset
= ((insn
>> 5) & 3) * 8;
2738 } else if (insn
& (1 << 5)) {
2740 offset
= (insn
& (1 << 6)) ? 16 : 0;
2745 if (insn
& ARM_CP_RW_BIT
) {
2747 tmp
= neon_load_reg(rn
, pass
);
2751 tcg_gen_shri_i32(tmp
, tmp
, offset
);
2752 if (insn
& (1 << 23))
2758 if (insn
& (1 << 23)) {
2760 tcg_gen_shri_i32(tmp
, tmp
, 16);
2766 tcg_gen_sari_i32(tmp
, tmp
, 16);
2775 store_reg(s
, rd
, tmp
);
2778 tmp
= load_reg(s
, rd
);
2779 if (insn
& (1 << 23)) {
2782 gen_neon_dup_u8(tmp
, 0);
2783 } else if (size
== 1) {
2784 gen_neon_dup_low16(tmp
);
2787 tcg_gen_mov_i32(tmp2
, tmp
);
2788 neon_store_reg(rn
, 0, tmp2
);
2789 neon_store_reg(rn
, 1, tmp
);
2794 tmp2
= neon_load_reg(rn
, pass
);
2795 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xff);
2799 tmp2
= neon_load_reg(rn
, pass
);
2800 gen_bfi(tmp
, tmp2
, tmp
, offset
, 0xffff);
2806 neon_store_reg(rn
, pass
, tmp
);
2810 if ((insn
& 0x6f) != 0x00)
2812 rn
= VFP_SREG_N(insn
);
2813 if (insn
& ARM_CP_RW_BIT
) {
2815 if (insn
& (1 << 21)) {
2816 /* system register */
2821 /* VFP2 allows access to FSID from userspace.
2822 VFP3 restricts all id registers to privileged
2825 && arm_feature(env
, ARM_FEATURE_VFP3
))
2827 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2832 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2834 case ARM_VFP_FPINST
:
2835 case ARM_VFP_FPINST2
:
2836 /* Not present in VFP3. */
2838 || arm_feature(env
, ARM_FEATURE_VFP3
))
2840 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2844 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
2845 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
2848 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
2854 || !arm_feature(env
, ARM_FEATURE_VFP3
))
2856 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
2862 gen_mov_F0_vreg(0, rn
);
2863 tmp
= gen_vfp_mrs();
2866 /* Set the 4 flag bits in the CPSR. */
2870 store_reg(s
, rd
, tmp
);
2874 tmp
= load_reg(s
, rd
);
2875 if (insn
& (1 << 21)) {
2877 /* system register */
2882 /* Writes are ignored. */
2885 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
2892 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2895 case ARM_VFP_FPINST
:
2896 case ARM_VFP_FPINST2
:
2897 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
2904 gen_mov_vreg_F0(0, rn
);
2909 /* data processing */
2910 /* The opcode is in bits 23, 21, 20 and 6. */
2911 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
2915 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
2917 /* rn is register number */
2918 VFP_DREG_N(rn
, insn
);
2921 if (op
== 15 && (rn
== 15 || rn
> 17)) {
2922 /* Integer or single precision destination. */
2923 rd
= VFP_SREG_D(insn
);
2925 VFP_DREG_D(rd
, insn
);
2928 if (op
== 15 && (rn
== 16 || rn
== 17)) {
2929 /* Integer source. */
2930 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
2932 VFP_DREG_M(rm
, insn
);
2935 rn
= VFP_SREG_N(insn
);
2936 if (op
== 15 && rn
== 15) {
2937 /* Double precision destination. */
2938 VFP_DREG_D(rd
, insn
);
2940 rd
= VFP_SREG_D(insn
);
2942 rm
= VFP_SREG_M(insn
);
2945 veclen
= env
->vfp
.vec_len
;
2946 if (op
== 15 && rn
> 3)
2949 /* Shut up compiler warnings. */
2960 /* Figure out what type of vector operation this is. */
2961 if ((rd
& bank_mask
) == 0) {
2966 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
2968 delta_d
= env
->vfp
.vec_stride
+ 1;
2970 if ((rm
& bank_mask
) == 0) {
2971 /* mixed scalar/vector */
2980 /* Load the initial operands. */
2985 /* Integer source */
2986 gen_mov_F0_vreg(0, rm
);
2991 gen_mov_F0_vreg(dp
, rd
);
2992 gen_mov_F1_vreg(dp
, rm
);
2996 /* Compare with zero */
2997 gen_mov_F0_vreg(dp
, rd
);
3008 /* Source and destination the same. */
3009 gen_mov_F0_vreg(dp
, rd
);
3012 /* One source operand. */
3013 gen_mov_F0_vreg(dp
, rm
);
3017 /* Two source operands. */
3018 gen_mov_F0_vreg(dp
, rn
);
3019 gen_mov_F1_vreg(dp
, rm
);
3023 /* Perform the calculation. */
3025 case 0: /* mac: fd + (fn * fm) */
3027 gen_mov_F1_vreg(dp
, rd
);
3030 case 1: /* nmac: fd - (fn * fm) */
3033 gen_mov_F1_vreg(dp
, rd
);
3036 case 2: /* msc: -fd + (fn * fm) */
3038 gen_mov_F1_vreg(dp
, rd
);
3041 case 3: /* nmsc: -fd - (fn * fm) */
3044 gen_mov_F1_vreg(dp
, rd
);
3047 case 4: /* mul: fn * fm */
3050 case 5: /* nmul: -(fn * fm) */
3054 case 6: /* add: fn + fm */
3057 case 7: /* sub: fn - fm */
3060 case 8: /* div: fn / fm */
3063 case 14: /* fconst */
3064 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3067 n
= (insn
<< 12) & 0x80000000;
3068 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3075 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3082 tcg_gen_movi_i32(cpu_F0s
, n
);
3085 case 15: /* extension space */
3108 case 11: /* cmpez */
3112 case 15: /* single<->double conversion */
3114 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3116 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3118 case 16: /* fuito */
3121 case 17: /* fsito */
3124 case 20: /* fshto */
3125 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3127 gen_vfp_shto(dp
, 16 - rm
);
3129 case 21: /* fslto */
3130 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3132 gen_vfp_slto(dp
, 32 - rm
);
3134 case 22: /* fuhto */
3135 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3137 gen_vfp_uhto(dp
, 16 - rm
);
3139 case 23: /* fulto */
3140 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3142 gen_vfp_ulto(dp
, 32 - rm
);
3144 case 24: /* ftoui */
3147 case 25: /* ftouiz */
3150 case 26: /* ftosi */
3153 case 27: /* ftosiz */
3156 case 28: /* ftosh */
3157 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3159 gen_vfp_tosh(dp
, 16 - rm
);
3161 case 29: /* ftosl */
3162 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3164 gen_vfp_tosl(dp
, 32 - rm
);
3166 case 30: /* ftouh */
3167 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3169 gen_vfp_touh(dp
, 16 - rm
);
3171 case 31: /* ftoul */
3172 if (!arm_feature(env
, ARM_FEATURE_VFP3
))
3174 gen_vfp_toul(dp
, 32 - rm
);
3176 default: /* undefined */
3177 printf ("rn:%d\n", rn
);
3181 default: /* undefined */
3182 printf ("op:%d\n", op
);
3186 /* Write back the result. */
3187 if (op
== 15 && (rn
>= 8 && rn
<= 11))
3188 ; /* Comparison, do nothing. */
3189 else if (op
== 15 && rn
> 17)
3190 /* Integer result. */
3191 gen_mov_vreg_F0(0, rd
);
3192 else if (op
== 15 && rn
== 15)
3194 gen_mov_vreg_F0(!dp
, rd
);
3196 gen_mov_vreg_F0(dp
, rd
);
3198 /* break out of the loop if we have finished */
3202 if (op
== 15 && delta_m
== 0) {
3203 /* single source one-many */
3205 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3207 gen_mov_vreg_F0(dp
, rd
);
3211 /* Setup the next operands. */
3213 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3217 /* One source operand. */
3218 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3220 gen_mov_F0_vreg(dp
, rm
);
3222 /* Two source operands. */
3223 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3225 gen_mov_F0_vreg(dp
, rn
);
3227 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3229 gen_mov_F1_vreg(dp
, rm
);
3237 if (dp
&& (insn
& 0x03e00000) == 0x00400000) {
3238 /* two-register transfer */
3239 rn
= (insn
>> 16) & 0xf;
3240 rd
= (insn
>> 12) & 0xf;
3242 VFP_DREG_M(rm
, insn
);
3244 rm
= VFP_SREG_M(insn
);
3247 if (insn
& ARM_CP_RW_BIT
) {
3250 gen_mov_F0_vreg(0, rm
* 2);
3251 tmp
= gen_vfp_mrs();
3252 store_reg(s
, rd
, tmp
);
3253 gen_mov_F0_vreg(0, rm
* 2 + 1);
3254 tmp
= gen_vfp_mrs();
3255 store_reg(s
, rn
, tmp
);
3257 gen_mov_F0_vreg(0, rm
);
3258 tmp
= gen_vfp_mrs();
3259 store_reg(s
, rn
, tmp
);
3260 gen_mov_F0_vreg(0, rm
+ 1);
3261 tmp
= gen_vfp_mrs();
3262 store_reg(s
, rd
, tmp
);
3267 tmp
= load_reg(s
, rd
);
3269 gen_mov_vreg_F0(0, rm
* 2);
3270 tmp
= load_reg(s
, rn
);
3272 gen_mov_vreg_F0(0, rm
* 2 + 1);
3274 tmp
= load_reg(s
, rn
);
3276 gen_mov_vreg_F0(0, rm
);
3277 tmp
= load_reg(s
, rd
);
3279 gen_mov_vreg_F0(0, rm
+ 1);
3284 rn
= (insn
>> 16) & 0xf;
3286 VFP_DREG_D(rd
, insn
);
3288 rd
= VFP_SREG_D(insn
);
3289 if (s
->thumb
&& rn
== 15) {
3290 gen_op_movl_T1_im(s
->pc
& ~2);
3292 gen_movl_T1_reg(s
, rn
);
3294 if ((insn
& 0x01200000) == 0x01000000) {
3295 /* Single load/store */
3296 offset
= (insn
& 0xff) << 2;
3297 if ((insn
& (1 << 23)) == 0)
3299 gen_op_addl_T1_im(offset
);
3300 if (insn
& (1 << 20)) {
3302 gen_mov_vreg_F0(dp
, rd
);
3304 gen_mov_F0_vreg(dp
, rd
);
3308 /* load/store multiple */
3310 n
= (insn
>> 1) & 0x7f;
3314 if (insn
& (1 << 24)) /* pre-decrement */
3315 gen_op_addl_T1_im(-((insn
& 0xff) << 2));
3321 for (i
= 0; i
< n
; i
++) {
3322 if (insn
& ARM_CP_RW_BIT
) {
3325 gen_mov_vreg_F0(dp
, rd
+ i
);
3328 gen_mov_F0_vreg(dp
, rd
+ i
);
3331 gen_op_addl_T1_im(offset
);
3333 if (insn
& (1 << 21)) {
3335 if (insn
& (1 << 24))
3336 offset
= -offset
* n
;
3337 else if (dp
&& (insn
& 1))
3343 gen_op_addl_T1_im(offset
);
3344 gen_movl_reg_T1(s
, rn
);
3350 /* Should never happen. */
3356 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
3358 TranslationBlock
*tb
;
3361 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
3363 gen_set_pc_im(dest
);
3364 tcg_gen_exit_tb((long)tb
+ n
);
3366 gen_set_pc_im(dest
);
3371 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3373 if (unlikely(s
->singlestep_enabled
)) {
3374 /* An indirect jump so that we still trigger the debug exception. */
3379 gen_goto_tb(s
, 0, dest
);
3380 s
->is_jmp
= DISAS_TB_JUMP
;
3384 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
3387 tcg_gen_sari_i32(t0
, t0
, 16);
3391 tcg_gen_sari_i32(t1
, t1
, 16);
3394 tcg_gen_mul_i32(t0
, t0
, t1
);
3397 /* Return the mask of PSR bits set by a MSR instruction. */
3398 static uint32_t msr_mask(CPUState
*env
, DisasContext
*s
, int flags
, int spsr
) {
3402 if (flags
& (1 << 0))
3404 if (flags
& (1 << 1))
3406 if (flags
& (1 << 2))
3408 if (flags
& (1 << 3))
3411 /* Mask out undefined bits. */
3412 mask
&= ~CPSR_RESERVED
;
3413 if (!arm_feature(env
, ARM_FEATURE_V6
))
3414 mask
&= ~(CPSR_E
| CPSR_GE
);
3415 if (!arm_feature(env
, ARM_FEATURE_THUMB2
))
3417 /* Mask out execution state bits. */
3420 /* Mask out privileged bits. */
3426 /* Returns nonzero if access to the PSR is not permitted. */
3427 static int gen_set_psr_T0(DisasContext
*s
, uint32_t mask
, int spsr
)
3431 /* ??? This is also undefined in system mode. */
3435 tmp
= load_cpu_field(spsr
);
3436 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3437 tcg_gen_andi_i32(cpu_T
[0], cpu_T
[0], mask
);
3438 tcg_gen_or_i32(tmp
, tmp
, cpu_T
[0]);
3439 store_cpu_field(tmp
, spsr
);
3441 gen_set_cpsr(cpu_T
[0], mask
);
3447 /* Generate an old-style exception return. */
3448 static void gen_exception_return(DisasContext
*s
)
3451 gen_movl_reg_T0(s
, 15);
3452 tmp
= load_cpu_field(spsr
);
3453 gen_set_cpsr(tmp
, 0xffffffff);
3455 s
->is_jmp
= DISAS_UPDATE
;
3458 /* Generate a v6 exception return. Marks both values as dead. */
3459 static void gen_rfe(DisasContext
*s
, TCGv pc
, TCGv cpsr
)
3461 gen_set_cpsr(cpsr
, 0xffffffff);
3463 store_reg(s
, 15, pc
);
3464 s
->is_jmp
= DISAS_UPDATE
;
3468 gen_set_condexec (DisasContext
*s
)
3470 if (s
->condexec_mask
) {
3471 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
3472 TCGv tmp
= new_tmp();
3473 tcg_gen_movi_i32(tmp
, val
);
3474 store_cpu_field(tmp
, condexec_bits
);
3478 static void gen_nop_hint(DisasContext
*s
, int val
)
3482 gen_set_pc_im(s
->pc
);
3483 s
->is_jmp
= DISAS_WFI
;
3487 /* TODO: Implement SEV and WFE. May help SMP performance. */
3493 /* These macros help make the code more readable when migrating from the
3494 old dyngen helpers. They should probably be removed when
3495 T0/T1 are removed. */
3496 #define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3497 #define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
3499 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3501 static inline int gen_neon_add(int size
)
3504 case 0: gen_helper_neon_add_u8(CPU_T001
); break;
3505 case 1: gen_helper_neon_add_u16(CPU_T001
); break;
3506 case 2: gen_op_addl_T0_T1(); break;
3512 static inline void gen_neon_rsb(int size
)
3515 case 0: gen_helper_neon_sub_u8(cpu_T
[0], cpu_T
[1], cpu_T
[0]); break;
3516 case 1: gen_helper_neon_sub_u16(cpu_T
[0], cpu_T
[1], cpu_T
[0]); break;
3517 case 2: gen_op_rsbl_T0_T1(); break;
3522 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3523 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3524 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3525 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3526 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3528 /* FIXME: This is wrong. They set the wrong overflow bit. */
3529 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3530 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3531 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3532 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3534 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3535 switch ((size << 1) | u) { \
3537 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3540 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3543 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3546 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3549 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3552 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3554 default: return 1; \
3557 #define GEN_NEON_INTEGER_OP(name) do { \
3558 switch ((size << 1) | u) { \
3560 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3563 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3566 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3569 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3572 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3575 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3577 default: return 1; \
3581 gen_neon_movl_scratch_T0(int scratch
)
3585 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3586 tcg_gen_st_i32(cpu_T
[0], cpu_env
, offset
);
3590 gen_neon_movl_scratch_T1(int scratch
)
3594 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3595 tcg_gen_st_i32(cpu_T
[1], cpu_env
, offset
);
3599 gen_neon_movl_T0_scratch(int scratch
)
3603 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3604 tcg_gen_ld_i32(cpu_T
[0], cpu_env
, offset
);
3608 gen_neon_movl_T1_scratch(int scratch
)
3612 offset
= offsetof(CPUARMState
, vfp
.scratch
[scratch
]);
3613 tcg_gen_ld_i32(cpu_T
[1], cpu_env
, offset
);
3616 static inline void gen_neon_get_scalar(int size
, int reg
)
3619 NEON_GET_REG(T0
, reg
>> 1, reg
& 1);
3621 NEON_GET_REG(T0
, reg
>> 2, (reg
>> 1) & 1);
3623 gen_neon_dup_low16(cpu_T
[0]);
3625 gen_neon_dup_high16(cpu_T
[0]);
3629 static void gen_neon_unzip(int reg
, int q
, int tmp
, int size
)
3633 for (n
= 0; n
< q
+ 1; n
+= 2) {
3634 NEON_GET_REG(T0
, reg
, n
);
3635 NEON_GET_REG(T0
, reg
, n
+ n
);
3637 case 0: gen_helper_neon_unzip_u8(); break;
3638 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
3639 case 2: /* no-op */; break;
3642 gen_neon_movl_scratch_T0(tmp
+ n
);
3643 gen_neon_movl_scratch_T1(tmp
+ n
+ 1);
3651 } neon_ls_element_type
[11] = {
3665 /* Translate a NEON load/store element instruction. Return nonzero if the
3666 instruction is invalid. */
3667 static int disas_neon_ls_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
3683 if (!vfp_enabled(env
))
3685 VFP_DREG_D(rd
, insn
);
3686 rn
= (insn
>> 16) & 0xf;
3688 load
= (insn
& (1 << 21)) != 0;
3689 if ((insn
& (1 << 23)) == 0) {
3690 /* Load store all elements. */
3691 op
= (insn
>> 8) & 0xf;
3692 size
= (insn
>> 6) & 3;
3693 if (op
> 10 || size
== 3)
3695 nregs
= neon_ls_element_type
[op
].nregs
;
3696 interleave
= neon_ls_element_type
[op
].interleave
;
3697 gen_movl_T1_reg(s
, rn
);
3698 stride
= (1 << size
) * interleave
;
3699 for (reg
= 0; reg
< nregs
; reg
++) {
3700 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
3701 gen_movl_T1_reg(s
, rn
);
3702 gen_op_addl_T1_im((1 << size
) * reg
);
3703 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
3704 gen_movl_T1_reg(s
, rn
);
3705 gen_op_addl_T1_im(1 << size
);
3707 for (pass
= 0; pass
< 2; pass
++) {
3710 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
3711 neon_store_reg(rd
, pass
, tmp
);
3713 tmp
= neon_load_reg(rd
, pass
);
3714 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
3716 gen_op_addl_T1_im(stride
);
3717 } else if (size
== 1) {
3719 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3720 gen_op_addl_T1_im(stride
);
3721 tmp2
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3722 gen_op_addl_T1_im(stride
);
3723 gen_bfi(tmp
, tmp
, tmp2
, 16, 0xffff);
3725 neon_store_reg(rd
, pass
, tmp
);
3727 tmp
= neon_load_reg(rd
, pass
);
3729 tcg_gen_shri_i32(tmp2
, tmp
, 16);
3730 gen_st16(tmp
, cpu_T
[1], IS_USER(s
));
3731 gen_op_addl_T1_im(stride
);
3732 gen_st16(tmp2
, cpu_T
[1], IS_USER(s
));
3733 gen_op_addl_T1_im(stride
);
3735 } else /* size == 0 */ {
3738 for (n
= 0; n
< 4; n
++) {
3739 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
3740 gen_op_addl_T1_im(stride
);
3744 gen_bfi(tmp2
, tmp2
, tmp
, n
* 8, 0xff);
3748 neon_store_reg(rd
, pass
, tmp2
);
3750 tmp2
= neon_load_reg(rd
, pass
);
3751 for (n
= 0; n
< 4; n
++) {
3754 tcg_gen_mov_i32(tmp
, tmp2
);
3756 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
3758 gen_st8(tmp
, cpu_T
[1], IS_USER(s
));
3759 gen_op_addl_T1_im(stride
);
3765 rd
+= neon_ls_element_type
[op
].spacing
;
3769 size
= (insn
>> 10) & 3;
3771 /* Load single element to all lanes. */
3774 size
= (insn
>> 6) & 3;
3775 nregs
= ((insn
>> 8) & 3) + 1;
3776 stride
= (insn
& (1 << 5)) ? 2 : 1;
3777 gen_movl_T1_reg(s
, rn
);
3778 for (reg
= 0; reg
< nregs
; reg
++) {
3781 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
3782 gen_neon_dup_u8(tmp
, 0);
3785 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3786 gen_neon_dup_low16(tmp
);
3789 tmp
= gen_ld32(cpu_T
[0], IS_USER(s
));
3793 default: /* Avoid compiler warnings. */
3796 gen_op_addl_T1_im(1 << size
);
3798 tcg_gen_mov_i32(tmp2
, tmp
);
3799 neon_store_reg(rd
, 0, tmp2
);
3800 neon_store_reg(rd
, 1, tmp
);
3803 stride
= (1 << size
) * nregs
;
3805 /* Single element. */
3806 pass
= (insn
>> 7) & 1;
3809 shift
= ((insn
>> 5) & 3) * 8;
3813 shift
= ((insn
>> 6) & 1) * 16;
3814 stride
= (insn
& (1 << 5)) ? 2 : 1;
3818 stride
= (insn
& (1 << 6)) ? 2 : 1;
3823 nregs
= ((insn
>> 8) & 3) + 1;
3824 gen_movl_T1_reg(s
, rn
);
3825 for (reg
= 0; reg
< nregs
; reg
++) {
3829 tmp
= gen_ld8u(cpu_T
[1], IS_USER(s
));
3832 tmp
= gen_ld16u(cpu_T
[1], IS_USER(s
));
3835 tmp
= gen_ld32(cpu_T
[1], IS_USER(s
));
3837 default: /* Avoid compiler warnings. */
3841 tmp2
= neon_load_reg(rd
, pass
);
3842 gen_bfi(tmp
, tmp2
, tmp
, shift
, size
? 0xffff : 0xff);
3845 neon_store_reg(rd
, pass
, tmp
);
3846 } else { /* Store */
3847 tmp
= neon_load_reg(rd
, pass
);
3849 tcg_gen_shri_i32(tmp
, tmp
, shift
);
3852 gen_st8(tmp
, cpu_T
[1], IS_USER(s
));
3855 gen_st16(tmp
, cpu_T
[1], IS_USER(s
));
3858 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
3863 gen_op_addl_T1_im(1 << size
);
3865 stride
= nregs
* (1 << size
);
3871 base
= load_reg(s
, rn
);
3873 tcg_gen_addi_i32(base
, base
, stride
);
3876 index
= load_reg(s
, rm
);
3877 tcg_gen_add_i32(base
, base
, index
);
3880 store_reg(s
, rn
, base
);
3885 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3886 static void gen_neon_bsl(TCGv dest
, TCGv t
, TCGv f
, TCGv c
)
3888 tcg_gen_and_i32(t
, t
, c
);
3889 tcg_gen_bic_i32(f
, f
, c
);
3890 tcg_gen_or_i32(dest
, t
, f
);
3893 static inline void gen_neon_narrow(int size
, TCGv dest
, TCGv_i64 src
)
3896 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
3897 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
3898 case 2: tcg_gen_trunc_i64_i32(dest
, src
); break;
3903 static inline void gen_neon_narrow_sats(int size
, TCGv dest
, TCGv_i64 src
)
3906 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
3907 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
3908 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
3913 static inline void gen_neon_narrow_satu(int size
, TCGv dest
, TCGv_i64 src
)
3916 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
3917 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
3918 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
3923 static inline void gen_neon_shift_narrow(int size
, TCGv var
, TCGv shift
,
3929 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
3930 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
3935 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
3936 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
3943 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
3944 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
3949 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
3950 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
3957 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv src
, int size
, int u
)
3961 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
3962 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
3963 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
3968 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
3969 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
3970 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
3977 static inline void gen_neon_addl(int size
)
3980 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
3981 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
3982 case 2: tcg_gen_add_i64(CPU_V001
); break;
3987 static inline void gen_neon_subl(int size
)
3990 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
3991 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
3992 case 2: tcg_gen_sub_i64(CPU_V001
); break;
3997 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4000 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4001 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4002 case 2: gen_helper_neon_negl_u64(var
, var
); break;
4007 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4010 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4011 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4016 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv a
, TCGv b
, int size
, int u
)
4020 switch ((size
<< 1) | u
) {
4021 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4022 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4023 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4024 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4026 tmp
= gen_muls_i64_i32(a
, b
);
4027 tcg_gen_mov_i64(dest
, tmp
);
4030 tmp
= gen_mulu_i64_i32(a
, b
);
4031 tcg_gen_mov_i64(dest
, tmp
);
4041 /* Translate a NEON data processing instruction. Return nonzero if the
4042 instruction is invalid.
4043 We process data in a mixture of 32-bit and 64-bit chunks.
4044 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4046 static int disas_neon_data_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
4064 if (!vfp_enabled(env
))
4066 q
= (insn
& (1 << 6)) != 0;
4067 u
= (insn
>> 24) & 1;
4068 VFP_DREG_D(rd
, insn
);
4069 VFP_DREG_N(rn
, insn
);
4070 VFP_DREG_M(rm
, insn
);
4071 size
= (insn
>> 20) & 3;
4072 if ((insn
& (1 << 23)) == 0) {
4073 /* Three register same length. */
4074 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4075 if (size
== 3 && (op
== 1 || op
== 5 || op
== 8 || op
== 9
4076 || op
== 10 || op
== 11 || op
== 16)) {
4077 /* 64-bit element instructions. */
4078 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4079 neon_load_reg64(cpu_V0
, rn
+ pass
);
4080 neon_load_reg64(cpu_V1
, rm
+ pass
);
4084 gen_helper_neon_add_saturate_u64(CPU_V001
);
4086 gen_helper_neon_add_saturate_s64(CPU_V001
);
4091 gen_helper_neon_sub_saturate_u64(CPU_V001
);
4093 gen_helper_neon_sub_saturate_s64(CPU_V001
);
4098 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4100 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4105 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4108 gen_helper_neon_qshl_s64(cpu_V1
, cpu_env
,
4112 case 10: /* VRSHL */
4114 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4116 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4119 case 11: /* VQRSHL */
4121 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4124 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4130 tcg_gen_sub_i64(CPU_V001
);
4132 tcg_gen_add_i64(CPU_V001
);
4138 neon_store_reg64(cpu_V0
, rd
+ pass
);
4145 case 10: /* VRSHL */
4146 case 11: /* VQRSHL */
4149 /* Shift instruction operands are reversed. */
4156 case 20: /* VPMAX */
4157 case 21: /* VPMIN */
4158 case 23: /* VPADD */
4161 case 26: /* VPADD (float) */
4162 pairwise
= (u
&& size
< 2);
4164 case 30: /* VPMIN/VPMAX (float) */
4171 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4180 NEON_GET_REG(T0
, rn
, n
);
4181 NEON_GET_REG(T1
, rn
, n
+ 1);
4183 NEON_GET_REG(T0
, rm
, n
);
4184 NEON_GET_REG(T1
, rm
, n
+ 1);
4188 NEON_GET_REG(T0
, rn
, pass
);
4189 NEON_GET_REG(T1
, rm
, pass
);
4193 GEN_NEON_INTEGER_OP(hadd
);
4196 GEN_NEON_INTEGER_OP_ENV(qadd
);
4198 case 2: /* VRHADD */
4199 GEN_NEON_INTEGER_OP(rhadd
);
4201 case 3: /* Logic ops. */
4202 switch ((u
<< 2) | size
) {
4204 gen_op_andl_T0_T1();
4207 gen_op_bicl_T0_T1();
4217 gen_op_xorl_T0_T1();
4220 tmp
= neon_load_reg(rd
, pass
);
4221 gen_neon_bsl(cpu_T
[0], cpu_T
[0], cpu_T
[1], tmp
);
4225 tmp
= neon_load_reg(rd
, pass
);
4226 gen_neon_bsl(cpu_T
[0], cpu_T
[0], tmp
, cpu_T
[1]);
4230 tmp
= neon_load_reg(rd
, pass
);
4231 gen_neon_bsl(cpu_T
[0], tmp
, cpu_T
[0], cpu_T
[1]);
4237 GEN_NEON_INTEGER_OP(hsub
);
4240 GEN_NEON_INTEGER_OP_ENV(qsub
);
4243 GEN_NEON_INTEGER_OP(cgt
);
4246 GEN_NEON_INTEGER_OP(cge
);
4249 GEN_NEON_INTEGER_OP(shl
);
4252 GEN_NEON_INTEGER_OP_ENV(qshl
);
4254 case 10: /* VRSHL */
4255 GEN_NEON_INTEGER_OP(rshl
);
4257 case 11: /* VQRSHL */
4258 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4261 GEN_NEON_INTEGER_OP(max
);
4264 GEN_NEON_INTEGER_OP(min
);
4267 GEN_NEON_INTEGER_OP(abd
);
4270 GEN_NEON_INTEGER_OP(abd
);
4271 NEON_GET_REG(T1
, rd
, pass
);
4275 if (!u
) { /* VADD */
4276 if (gen_neon_add(size
))
4280 case 0: gen_helper_neon_sub_u8(CPU_T001
); break;
4281 case 1: gen_helper_neon_sub_u16(CPU_T001
); break;
4282 case 2: gen_op_subl_T0_T1(); break;
4288 if (!u
) { /* VTST */
4290 case 0: gen_helper_neon_tst_u8(CPU_T001
); break;
4291 case 1: gen_helper_neon_tst_u16(CPU_T001
); break;
4292 case 2: gen_helper_neon_tst_u32(CPU_T001
); break;
4297 case 0: gen_helper_neon_ceq_u8(CPU_T001
); break;
4298 case 1: gen_helper_neon_ceq_u16(CPU_T001
); break;
4299 case 2: gen_helper_neon_ceq_u32(CPU_T001
); break;
4304 case 18: /* Multiply. */
4306 case 0: gen_helper_neon_mul_u8(CPU_T001
); break;
4307 case 1: gen_helper_neon_mul_u16(CPU_T001
); break;
4308 case 2: gen_op_mul_T0_T1(); break;
4311 NEON_GET_REG(T1
, rd
, pass
);
4319 if (u
) { /* polynomial */
4320 gen_helper_neon_mul_p8(CPU_T001
);
4321 } else { /* Integer */
4323 case 0: gen_helper_neon_mul_u8(CPU_T001
); break;
4324 case 1: gen_helper_neon_mul_u16(CPU_T001
); break;
4325 case 2: gen_op_mul_T0_T1(); break;
4330 case 20: /* VPMAX */
4331 GEN_NEON_INTEGER_OP(pmax
);
4333 case 21: /* VPMIN */
4334 GEN_NEON_INTEGER_OP(pmin
);
4336 case 22: /* Hultiply high. */
4337 if (!u
) { /* VQDMULH */
4339 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01
); break;
4340 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01
); break;
4343 } else { /* VQRDHMUL */
4345 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01
); break;
4346 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01
); break;
4351 case 23: /* VPADD */
4355 case 0: gen_helper_neon_padd_u8(CPU_T001
); break;
4356 case 1: gen_helper_neon_padd_u16(CPU_T001
); break;
4357 case 2: gen_op_addl_T0_T1(); break;
4361 case 26: /* Floating point arithnetic. */
4362 switch ((u
<< 2) | size
) {
4364 gen_helper_neon_add_f32(CPU_T001
);
4367 gen_helper_neon_sub_f32(CPU_T001
);
4370 gen_helper_neon_add_f32(CPU_T001
);
4373 gen_helper_neon_abd_f32(CPU_T001
);
4379 case 27: /* Float multiply. */
4380 gen_helper_neon_mul_f32(CPU_T001
);
4382 NEON_GET_REG(T1
, rd
, pass
);
4384 gen_helper_neon_add_f32(CPU_T001
);
4386 gen_helper_neon_sub_f32(cpu_T
[0], cpu_T
[1], cpu_T
[0]);
4390 case 28: /* Float compare. */
4392 gen_helper_neon_ceq_f32(CPU_T001
);
4395 gen_helper_neon_cge_f32(CPU_T001
);
4397 gen_helper_neon_cgt_f32(CPU_T001
);
4400 case 29: /* Float compare absolute. */
4404 gen_helper_neon_acge_f32(CPU_T001
);
4406 gen_helper_neon_acgt_f32(CPU_T001
);
4408 case 30: /* Float min/max. */
4410 gen_helper_neon_max_f32(CPU_T001
);
4412 gen_helper_neon_min_f32(CPU_T001
);
4416 gen_helper_recps_f32(cpu_T
[0], cpu_T
[0], cpu_T
[1], cpu_env
);
4418 gen_helper_rsqrts_f32(cpu_T
[0], cpu_T
[0], cpu_T
[1], cpu_env
);
4423 /* Save the result. For elementwise operations we can put it
4424 straight into the destination register. For pairwise operations
4425 we have to be careful to avoid clobbering the source operands. */
4426 if (pairwise
&& rd
== rm
) {
4427 gen_neon_movl_scratch_T0(pass
);
4429 NEON_SET_REG(T0
, rd
, pass
);
4433 if (pairwise
&& rd
== rm
) {
4434 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4435 gen_neon_movl_T0_scratch(pass
);
4436 NEON_SET_REG(T0
, rd
, pass
);
4439 /* End of 3 register same size operations. */
4440 } else if (insn
& (1 << 4)) {
4441 if ((insn
& 0x00380080) != 0) {
4442 /* Two registers and shift. */
4443 op
= (insn
>> 8) & 0xf;
4444 if (insn
& (1 << 7)) {
4449 while ((insn
& (1 << (size
+ 19))) == 0)
4452 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
4453 /* To avoid excessive dumplication of ops we implement shift
4454 by immediate using the variable shift operations. */
4456 /* Shift by immediate:
4457 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4458 /* Right shifts are encoded as N - shift, where N is the
4459 element size in bits. */
4461 shift
= shift
- (1 << (size
+ 3));
4469 imm
= (uint8_t) shift
;
4474 imm
= (uint16_t) shift
;
4485 for (pass
= 0; pass
< count
; pass
++) {
4487 neon_load_reg64(cpu_V0
, rm
+ pass
);
4488 tcg_gen_movi_i64(cpu_V1
, imm
);
4493 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4495 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4500 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4502 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
4507 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4509 case 5: /* VSHL, VSLI */
4510 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
4514 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4516 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4518 case 7: /* VQSHLU */
4519 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
, cpu_V0
, cpu_V1
);
4522 if (op
== 1 || op
== 3) {
4524 neon_load_reg64(cpu_V0
, rd
+ pass
);
4525 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
4526 } else if (op
== 4 || (op
== 5 && u
)) {
4528 cpu_abort(env
, "VS[LR]I.64 not implemented");
4530 neon_store_reg64(cpu_V0
, rd
+ pass
);
4531 } else { /* size < 3 */
4532 /* Operands in T0 and T1. */
4533 gen_op_movl_T1_im(imm
);
4534 NEON_GET_REG(T0
, rm
, pass
);
4538 GEN_NEON_INTEGER_OP(shl
);
4542 GEN_NEON_INTEGER_OP(rshl
);
4547 GEN_NEON_INTEGER_OP(shl
);
4549 case 5: /* VSHL, VSLI */
4551 case 0: gen_helper_neon_shl_u8(CPU_T001
); break;
4552 case 1: gen_helper_neon_shl_u16(CPU_T001
); break;
4553 case 2: gen_helper_neon_shl_u32(CPU_T001
); break;
4558 GEN_NEON_INTEGER_OP_ENV(qshl
);
4560 case 7: /* VQSHLU */
4562 case 0: gen_helper_neon_qshl_u8(CPU_T0E01
); break;
4563 case 1: gen_helper_neon_qshl_u16(CPU_T0E01
); break;
4564 case 2: gen_helper_neon_qshl_u32(CPU_T0E01
); break;
4570 if (op
== 1 || op
== 3) {
4572 NEON_GET_REG(T1
, rd
, pass
);
4574 } else if (op
== 4 || (op
== 5 && u
)) {
4579 imm
= 0xff >> -shift
;
4581 imm
= (uint8_t)(0xff << shift
);
4587 imm
= 0xffff >> -shift
;
4589 imm
= (uint16_t)(0xffff << shift
);
4594 imm
= 0xffffffffu
>> -shift
;
4596 imm
= 0xffffffffu
<< shift
;
4601 tmp
= neon_load_reg(rd
, pass
);
4602 tcg_gen_andi_i32(cpu_T
[0], cpu_T
[0], imm
);
4603 tcg_gen_andi_i32(tmp
, tmp
, ~imm
);
4604 tcg_gen_or_i32(cpu_T
[0], cpu_T
[0], tmp
);
4606 NEON_SET_REG(T0
, rd
, pass
);
4609 } else if (op
< 10) {
4610 /* Shift by immediate and narrow:
4611 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4612 shift
= shift
- (1 << (size
+ 3));
4616 imm
= (uint16_t)shift
;
4618 tmp2
= tcg_const_i32(imm
);
4619 TCGV_UNUSED_I64(tmp64
);
4622 imm
= (uint32_t)shift
;
4623 tmp2
= tcg_const_i32(imm
);
4624 TCGV_UNUSED_I64(tmp64
);
4627 tmp64
= tcg_const_i64(shift
);
4634 for (pass
= 0; pass
< 2; pass
++) {
4636 neon_load_reg64(cpu_V0
, rm
+ pass
);
4639 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, tmp64
);
4641 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, tmp64
);
4644 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, tmp64
);
4646 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, tmp64
);
4649 tmp
= neon_load_reg(rm
+ pass
, 0);
4650 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
, u
);
4651 tmp3
= neon_load_reg(rm
+ pass
, 1);
4652 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
, u
);
4653 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
4658 if (op
== 8 && !u
) {
4659 gen_neon_narrow(size
- 1, tmp
, cpu_V0
);
4662 gen_neon_narrow_sats(size
- 1, tmp
, cpu_V0
);
4664 gen_neon_narrow_satu(size
- 1, tmp
, cpu_V0
);
4669 neon_store_reg(rd
, 0, tmp2
);
4670 neon_store_reg(rd
, 1, tmp
);
4673 } else if (op
== 10) {
4677 tmp
= neon_load_reg(rm
, 0);
4678 tmp2
= neon_load_reg(rm
, 1);
4679 for (pass
= 0; pass
< 2; pass
++) {
4683 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4686 /* The shift is less than the width of the source
4687 type, so we can just shift the whole register. */
4688 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
4689 if (size
< 2 || !u
) {
4692 imm
= (0xffu
>> (8 - shift
));
4695 imm
= 0xffff >> (16 - shift
);
4697 imm64
= imm
| (((uint64_t)imm
) << 32);
4698 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, imm64
);
4701 neon_store_reg64(cpu_V0
, rd
+ pass
);
4703 } else if (op
== 15 || op
== 16) {
4704 /* VCVT fixed-point. */
4705 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4706 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
4709 gen_vfp_ulto(0, shift
);
4711 gen_vfp_slto(0, shift
);
4714 gen_vfp_toul(0, shift
);
4716 gen_vfp_tosl(0, shift
);
4718 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
4723 } else { /* (insn & 0x00380080) == 0 */
4726 op
= (insn
>> 8) & 0xf;
4727 /* One register and immediate. */
4728 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
4729 invert
= (insn
& (1 << 5)) != 0;
4747 imm
= (imm
<< 8) | (imm
<< 24);
4750 imm
= (imm
< 8) | 0xff;
4753 imm
= (imm
<< 16) | 0xffff;
4756 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
4761 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
4762 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
4768 if (op
!= 14 || !invert
)
4769 gen_op_movl_T1_im(imm
);
4771 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4772 if (op
& 1 && op
< 12) {
4773 tmp
= neon_load_reg(rd
, pass
);
4775 /* The immediate value has already been inverted, so
4777 tcg_gen_andi_i32(tmp
, tmp
, imm
);
4779 tcg_gen_ori_i32(tmp
, tmp
, imm
);
4784 if (op
== 14 && invert
) {
4787 for (n
= 0; n
< 4; n
++) {
4788 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
4789 val
|= 0xff << (n
* 8);
4791 tcg_gen_movi_i32(tmp
, val
);
4793 tcg_gen_movi_i32(tmp
, imm
);
4796 neon_store_reg(rd
, pass
, tmp
);
4799 } else { /* (insn & 0x00800010 == 0x00800000) */
4801 op
= (insn
>> 8) & 0xf;
4802 if ((insn
& (1 << 6)) == 0) {
4803 /* Three registers of different lengths. */
4807 /* prewiden, src1_wide, src2_wide */
4808 static const int neon_3reg_wide
[16][3] = {
4809 {1, 0, 0}, /* VADDL */
4810 {1, 1, 0}, /* VADDW */
4811 {1, 0, 0}, /* VSUBL */
4812 {1, 1, 0}, /* VSUBW */
4813 {0, 1, 1}, /* VADDHN */
4814 {0, 0, 0}, /* VABAL */
4815 {0, 1, 1}, /* VSUBHN */
4816 {0, 0, 0}, /* VABDL */
4817 {0, 0, 0}, /* VMLAL */
4818 {0, 0, 0}, /* VQDMLAL */
4819 {0, 0, 0}, /* VMLSL */
4820 {0, 0, 0}, /* VQDMLSL */
4821 {0, 0, 0}, /* Integer VMULL */
4822 {0, 0, 0}, /* VQDMULL */
4823 {0, 0, 0} /* Polynomial VMULL */
4826 prewiden
= neon_3reg_wide
[op
][0];
4827 src1_wide
= neon_3reg_wide
[op
][1];
4828 src2_wide
= neon_3reg_wide
[op
][2];
4830 if (size
== 0 && (op
== 9 || op
== 11 || op
== 13))
4833 /* Avoid overlapping operands. Wide source operands are
4834 always aligned so will never overlap with wide
4835 destinations in problematic ways. */
4836 if (rd
== rm
&& !src2_wide
) {
4837 NEON_GET_REG(T0
, rm
, 1);
4838 gen_neon_movl_scratch_T0(2);
4839 } else if (rd
== rn
&& !src1_wide
) {
4840 NEON_GET_REG(T0
, rn
, 1);
4841 gen_neon_movl_scratch_T0(2);
4844 for (pass
= 0; pass
< 2; pass
++) {
4846 neon_load_reg64(cpu_V0
, rn
+ pass
);
4849 if (pass
== 1 && rd
== rn
) {
4850 gen_neon_movl_T0_scratch(2);
4852 tcg_gen_mov_i32(tmp
, cpu_T
[0]);
4854 tmp
= neon_load_reg(rn
, pass
);
4857 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
4861 neon_load_reg64(cpu_V1
, rm
+ pass
);
4864 if (pass
== 1 && rd
== rm
) {
4865 gen_neon_movl_T0_scratch(2);
4867 tcg_gen_mov_i32(tmp2
, cpu_T
[0]);
4869 tmp2
= neon_load_reg(rm
, pass
);
4872 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
4876 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4877 gen_neon_addl(size
);
4879 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4880 gen_neon_subl(size
);
4882 case 5: case 7: /* VABAL, VABDL */
4883 switch ((size
<< 1) | u
) {
4885 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
4888 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
4891 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
4894 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
4897 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
4900 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
4907 case 8: case 9: case 10: case 11: case 12: case 13:
4908 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4909 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
4911 case 14: /* Polynomial VMULL */
4912 cpu_abort(env
, "Polynomial VMULL not implemented");
4914 default: /* 15 is RESERVED. */
4917 if (op
== 5 || op
== 13 || (op
>= 8 && op
<= 11)) {
4919 if (op
== 10 || op
== 11) {
4920 gen_neon_negl(cpu_V0
, size
);
4924 neon_load_reg64(cpu_V1
, rd
+ pass
);
4928 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4929 gen_neon_addl(size
);
4931 case 9: case 11: /* VQDMLAL, VQDMLSL */
4932 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
4933 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
4936 case 13: /* VQDMULL */
4937 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
4942 neon_store_reg64(cpu_V0
, rd
+ pass
);
4943 } else if (op
== 4 || op
== 6) {
4944 /* Narrowing operation. */
4949 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
4952 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
4955 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
4956 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
4963 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
4966 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
4969 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
4970 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
4971 tcg_gen_trunc_i64_i32(tmp
, cpu_V0
);
4979 neon_store_reg(rd
, 0, tmp3
);
4980 neon_store_reg(rd
, 1, tmp
);
4983 /* Write back the result. */
4984 neon_store_reg64(cpu_V0
, rd
+ pass
);
4988 /* Two registers and a scalar. */
4990 case 0: /* Integer VMLA scalar */
4991 case 1: /* Float VMLA scalar */
4992 case 4: /* Integer VMLS scalar */
4993 case 5: /* Floating point VMLS scalar */
4994 case 8: /* Integer VMUL scalar */
4995 case 9: /* Floating point VMUL scalar */
4996 case 12: /* VQDMULH scalar */
4997 case 13: /* VQRDMULH scalar */
4998 gen_neon_get_scalar(size
, rm
);
4999 gen_neon_movl_scratch_T0(0);
5000 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5002 gen_neon_movl_T0_scratch(0);
5003 NEON_GET_REG(T1
, rn
, pass
);
5006 gen_helper_neon_qdmulh_s16(CPU_T0E01
);
5008 gen_helper_neon_qdmulh_s32(CPU_T0E01
);
5010 } else if (op
== 13) {
5012 gen_helper_neon_qrdmulh_s16(CPU_T0E01
);
5014 gen_helper_neon_qrdmulh_s32(CPU_T0E01
);
5016 } else if (op
& 1) {
5017 gen_helper_neon_mul_f32(CPU_T001
);
5020 case 0: gen_helper_neon_mul_u8(CPU_T001
); break;
5021 case 1: gen_helper_neon_mul_u16(CPU_T001
); break;
5022 case 2: gen_op_mul_T0_T1(); break;
5028 NEON_GET_REG(T1
, rd
, pass
);
5034 gen_helper_neon_add_f32(CPU_T001
);
5040 gen_helper_neon_sub_f32(cpu_T
[0], cpu_T
[1], cpu_T
[0]);
5046 NEON_SET_REG(T0
, rd
, pass
);
5049 case 2: /* VMLAL sclar */
5050 case 3: /* VQDMLAL scalar */
5051 case 6: /* VMLSL scalar */
5052 case 7: /* VQDMLSL scalar */
5053 case 10: /* VMULL scalar */
5054 case 11: /* VQDMULL scalar */
5055 if (size
== 0 && (op
== 3 || op
== 7 || op
== 11))
5058 gen_neon_get_scalar(size
, rm
);
5059 NEON_GET_REG(T1
, rn
, 1);
5061 for (pass
= 0; pass
< 2; pass
++) {
5063 tmp
= neon_load_reg(rn
, 0);
5066 tcg_gen_mov_i32(tmp
, cpu_T
[1]);
5069 tcg_gen_mov_i32(tmp2
, cpu_T
[0]);
5070 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5071 if (op
== 6 || op
== 7) {
5072 gen_neon_negl(cpu_V0
, size
);
5075 neon_load_reg64(cpu_V1
, rd
+ pass
);
5079 gen_neon_addl(size
);
5082 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5083 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5089 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5094 neon_store_reg64(cpu_V0
, rd
+ pass
);
5097 default: /* 14 and 15 are RESERVED */
5101 } else { /* size == 3 */
5104 imm
= (insn
>> 8) & 0xf;
5111 neon_load_reg64(cpu_V0
, rn
);
5113 neon_load_reg64(cpu_V1
, rn
+ 1);
5115 } else if (imm
== 8) {
5116 neon_load_reg64(cpu_V0
, rn
+ 1);
5118 neon_load_reg64(cpu_V1
, rm
);
5121 tmp64
= tcg_temp_new_i64();
5123 neon_load_reg64(cpu_V0
, rn
);
5124 neon_load_reg64(tmp64
, rn
+ 1);
5126 neon_load_reg64(cpu_V0
, rn
+ 1);
5127 neon_load_reg64(tmp64
, rm
);
5129 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
5130 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
5131 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5133 neon_load_reg64(cpu_V1
, rm
);
5135 neon_load_reg64(cpu_V1
, rm
+ 1);
5138 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5139 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
5140 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
5143 neon_load_reg64(cpu_V0
, rn
);
5144 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
5145 neon_load_reg64(cpu_V1
, rm
);
5146 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
5147 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5149 neon_store_reg64(cpu_V0
, rd
);
5151 neon_store_reg64(cpu_V1
, rd
+ 1);
5153 } else if ((insn
& (1 << 11)) == 0) {
5154 /* Two register misc. */
5155 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
5156 size
= (insn
>> 18) & 3;
5158 case 0: /* VREV64 */
5161 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5162 NEON_GET_REG(T0
, rm
, pass
* 2);
5163 NEON_GET_REG(T1
, rm
, pass
* 2 + 1);
5165 case 0: tcg_gen_bswap_i32(cpu_T
[0], cpu_T
[0]); break;
5166 case 1: gen_swap_half(cpu_T
[0]); break;
5167 case 2: /* no-op */ break;
5170 NEON_SET_REG(T0
, rd
, pass
* 2 + 1);
5172 NEON_SET_REG(T1
, rd
, pass
* 2);
5174 gen_op_movl_T0_T1();
5176 case 0: tcg_gen_bswap_i32(cpu_T
[0], cpu_T
[0]); break;
5177 case 1: gen_swap_half(cpu_T
[0]); break;
5180 NEON_SET_REG(T0
, rd
, pass
* 2);
5184 case 4: case 5: /* VPADDL */
5185 case 12: case 13: /* VPADAL */
5188 for (pass
= 0; pass
< q
+ 1; pass
++) {
5189 tmp
= neon_load_reg(rm
, pass
* 2);
5190 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
5191 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
5192 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
5194 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
5195 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
5196 case 2: tcg_gen_add_i64(CPU_V001
); break;
5201 neon_load_reg64(cpu_V1
, rd
+ pass
);
5202 gen_neon_addl(size
);
5204 neon_store_reg64(cpu_V0
, rd
+ pass
);
5209 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
5210 NEON_GET_REG(T0
, rm
, n
);
5211 NEON_GET_REG(T1
, rd
, n
+ 1);
5212 NEON_SET_REG(T1
, rm
, n
);
5213 NEON_SET_REG(T0
, rd
, n
+ 1);
5221 Rd A3 A2 A1 A0 B2 B0 A2 A0
5222 Rm B3 B2 B1 B0 B3 B1 A3 A1
5226 gen_neon_unzip(rd
, q
, 0, size
);
5227 gen_neon_unzip(rm
, q
, 4, size
);
5229 static int unzip_order_q
[8] =
5230 {0, 2, 4, 6, 1, 3, 5, 7};
5231 for (n
= 0; n
< 8; n
++) {
5232 int reg
= (n
< 4) ? rd
: rm
;
5233 gen_neon_movl_T0_scratch(unzip_order_q
[n
]);
5234 NEON_SET_REG(T0
, reg
, n
% 4);
5237 static int unzip_order
[4] =
5239 for (n
= 0; n
< 4; n
++) {
5240 int reg
= (n
< 2) ? rd
: rm
;
5241 gen_neon_movl_T0_scratch(unzip_order
[n
]);
5242 NEON_SET_REG(T0
, reg
, n
% 2);
5248 Rd A3 A2 A1 A0 B1 A1 B0 A0
5249 Rm B3 B2 B1 B0 B3 A3 B2 A2
5253 count
= (q
? 4 : 2);
5254 for (n
= 0; n
< count
; n
++) {
5255 NEON_GET_REG(T0
, rd
, n
);
5256 NEON_GET_REG(T1
, rd
, n
);
5258 case 0: gen_helper_neon_zip_u8(); break;
5259 case 1: gen_helper_neon_zip_u16(); break;
5260 case 2: /* no-op */; break;
5263 gen_neon_movl_scratch_T0(n
* 2);
5264 gen_neon_movl_scratch_T1(n
* 2 + 1);
5266 for (n
= 0; n
< count
* 2; n
++) {
5267 int reg
= (n
< count
) ? rd
: rm
;
5268 gen_neon_movl_T0_scratch(n
);
5269 NEON_SET_REG(T0
, reg
, n
% count
);
5272 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5276 for (pass
= 0; pass
< 2; pass
++) {
5277 neon_load_reg64(cpu_V0
, rm
+ pass
);
5279 if (op
== 36 && q
== 0) {
5280 gen_neon_narrow(size
, tmp
, cpu_V0
);
5282 gen_neon_narrow_satu(size
, tmp
, cpu_V0
);
5284 gen_neon_narrow_sats(size
, tmp
, cpu_V0
);
5289 neon_store_reg(rd
, 0, tmp2
);
5290 neon_store_reg(rd
, 1, tmp
);
5294 case 38: /* VSHLL */
5297 tmp
= neon_load_reg(rm
, 0);
5298 tmp2
= neon_load_reg(rm
, 1);
5299 for (pass
= 0; pass
< 2; pass
++) {
5302 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
5303 neon_store_reg64(cpu_V0
, rd
+ pass
);
5308 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5309 if (op
== 30 || op
== 31 || op
>= 58) {
5310 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
5311 neon_reg_offset(rm
, pass
));
5313 NEON_GET_REG(T0
, rm
, pass
);
5316 case 1: /* VREV32 */
5318 case 0: tcg_gen_bswap_i32(cpu_T
[0], cpu_T
[0]); break;
5319 case 1: gen_swap_half(cpu_T
[0]); break;
5323 case 2: /* VREV16 */
5326 gen_rev16(cpu_T
[0]);
5330 case 0: gen_helper_neon_cls_s8(cpu_T
[0], cpu_T
[0]); break;
5331 case 1: gen_helper_neon_cls_s16(cpu_T
[0], cpu_T
[0]); break;
5332 case 2: gen_helper_neon_cls_s32(cpu_T
[0], cpu_T
[0]); break;
5338 case 0: gen_helper_neon_clz_u8(cpu_T
[0], cpu_T
[0]); break;
5339 case 1: gen_helper_neon_clz_u16(cpu_T
[0], cpu_T
[0]); break;
5340 case 2: gen_helper_clz(cpu_T
[0], cpu_T
[0]); break;
5347 gen_helper_neon_cnt_u8(cpu_T
[0], cpu_T
[0]);
5354 case 14: /* VQABS */
5356 case 0: gen_helper_neon_qabs_s8(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5357 case 1: gen_helper_neon_qabs_s16(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5358 case 2: gen_helper_neon_qabs_s32(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5362 case 15: /* VQNEG */
5364 case 0: gen_helper_neon_qneg_s8(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5365 case 1: gen_helper_neon_qneg_s16(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5366 case 2: gen_helper_neon_qneg_s32(cpu_T
[0], cpu_env
, cpu_T
[0]); break;
5370 case 16: case 19: /* VCGT #0, VCLE #0 */
5371 gen_op_movl_T1_im(0);
5373 case 0: gen_helper_neon_cgt_s8(CPU_T001
); break;
5374 case 1: gen_helper_neon_cgt_s16(CPU_T001
); break;
5375 case 2: gen_helper_neon_cgt_s32(CPU_T001
); break;
5381 case 17: case 20: /* VCGE #0, VCLT #0 */
5382 gen_op_movl_T1_im(0);
5384 case 0: gen_helper_neon_cge_s8(CPU_T001
); break;
5385 case 1: gen_helper_neon_cge_s16(CPU_T001
); break;
5386 case 2: gen_helper_neon_cge_s32(CPU_T001
); break;
5392 case 18: /* VCEQ #0 */
5393 gen_op_movl_T1_im(0);
5395 case 0: gen_helper_neon_ceq_u8(CPU_T001
); break;
5396 case 1: gen_helper_neon_ceq_u16(CPU_T001
); break;
5397 case 2: gen_helper_neon_ceq_u32(CPU_T001
); break;
5403 case 0: gen_helper_neon_abs_s8(cpu_T
[0], cpu_T
[0]); break;
5404 case 1: gen_helper_neon_abs_s16(cpu_T
[0], cpu_T
[0]); break;
5405 case 2: tcg_gen_abs_i32(cpu_T
[0], cpu_T
[0]); break;
5410 gen_op_movl_T1_im(0);
5415 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5416 gen_op_movl_T1_im(0);
5417 gen_helper_neon_cgt_f32(CPU_T001
);
5421 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5422 gen_op_movl_T1_im(0);
5423 gen_helper_neon_cge_f32(CPU_T001
);
5427 case 26: /* Float VCEQ #0 */
5428 gen_op_movl_T1_im(0);
5429 gen_helper_neon_ceq_f32(CPU_T001
);
5431 case 30: /* Float VABS */
5434 case 31: /* Float VNEG */
5438 NEON_GET_REG(T1
, rd
, pass
);
5439 NEON_SET_REG(T1
, rm
, pass
);
5442 NEON_GET_REG(T1
, rd
, pass
);
5444 case 0: gen_helper_neon_trn_u8(); break;
5445 case 1: gen_helper_neon_trn_u16(); break;
5449 NEON_SET_REG(T1
, rm
, pass
);
5451 case 56: /* Integer VRECPE */
5452 gen_helper_recpe_u32(cpu_T
[0], cpu_T
[0], cpu_env
);
5454 case 57: /* Integer VRSQRTE */
5455 gen_helper_rsqrte_u32(cpu_T
[0], cpu_T
[0], cpu_env
);
5457 case 58: /* Float VRECPE */
5458 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5460 case 59: /* Float VRSQRTE */
5461 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, cpu_env
);
5463 case 60: /* VCVT.F32.S32 */
5466 case 61: /* VCVT.F32.U32 */
5469 case 62: /* VCVT.S32.F32 */
5472 case 63: /* VCVT.U32.F32 */
5476 /* Reserved: 21, 29, 39-56 */
5479 if (op
== 30 || op
== 31 || op
>= 58) {
5480 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
5481 neon_reg_offset(rd
, pass
));
5483 NEON_SET_REG(T0
, rd
, pass
);
5488 } else if ((insn
& (1 << 10)) == 0) {
5490 n
= ((insn
>> 5) & 0x18) + 8;
5491 if (insn
& (1 << 6)) {
5492 tmp
= neon_load_reg(rd
, 0);
5495 tcg_gen_movi_i32(tmp
, 0);
5497 tmp2
= neon_load_reg(rm
, 0);
5498 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, tcg_const_i32(rn
),
5501 if (insn
& (1 << 6)) {
5502 tmp
= neon_load_reg(rd
, 1);
5505 tcg_gen_movi_i32(tmp
, 0);
5507 tmp3
= neon_load_reg(rm
, 1);
5508 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, tcg_const_i32(rn
),
5510 neon_store_reg(rd
, 0, tmp2
);
5511 neon_store_reg(rd
, 1, tmp3
);
5513 } else if ((insn
& 0x380) == 0) {
5515 if (insn
& (1 << 19)) {
5516 NEON_SET_REG(T0
, rm
, 1);
5518 NEON_SET_REG(T0
, rm
, 0);
5520 if (insn
& (1 << 16)) {
5521 gen_neon_dup_u8(cpu_T
[0], ((insn
>> 17) & 3) * 8);
5522 } else if (insn
& (1 << 17)) {
5523 if ((insn
>> 18) & 1)
5524 gen_neon_dup_high16(cpu_T
[0]);
5526 gen_neon_dup_low16(cpu_T
[0]);
5528 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5529 NEON_SET_REG(T0
, rd
, pass
);
5539 static int disas_cp14_read(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5541 int crn
= (insn
>> 16) & 0xf;
5542 int crm
= insn
& 0xf;
5543 int op1
= (insn
>> 21) & 7;
5544 int op2
= (insn
>> 5) & 7;
5545 int rt
= (insn
>> 12) & 0xf;
5548 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5549 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5553 tmp
= load_cpu_field(teecr
);
5554 store_reg(s
, rt
, tmp
);
5557 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5559 if (IS_USER(s
) && (env
->teecr
& 1))
5561 tmp
= load_cpu_field(teehbr
);
5562 store_reg(s
, rt
, tmp
);
5566 fprintf(stderr
, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5567 op1
, crn
, crm
, op2
);
5571 static int disas_cp14_write(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5573 int crn
= (insn
>> 16) & 0xf;
5574 int crm
= insn
& 0xf;
5575 int op1
= (insn
>> 21) & 7;
5576 int op2
= (insn
>> 5) & 7;
5577 int rt
= (insn
>> 12) & 0xf;
5580 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5581 if (op1
== 6 && crn
== 0 && crm
== 0 && op2
== 0) {
5585 tmp
= load_reg(s
, rt
);
5586 gen_helper_set_teecr(cpu_env
, tmp
);
5590 if (op1
== 6 && crn
== 1 && crm
== 0 && op2
== 0) {
5592 if (IS_USER(s
) && (env
->teecr
& 1))
5594 tmp
= load_reg(s
, rt
);
5595 store_cpu_field(tmp
, teehbr
);
5599 fprintf(stderr
, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5600 op1
, crn
, crm
, op2
);
5604 static int disas_coproc_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
5608 cpnum
= (insn
>> 8) & 0xf;
5609 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
5610 && ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << cpnum
)))
5616 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5617 return disas_iwmmxt_insn(env
, s
, insn
);
5618 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5619 return disas_dsp_insn(env
, s
, insn
);
5624 return disas_vfp_insn (env
, s
, insn
);
5626 /* Coprocessors 7-15 are architecturally reserved by ARM.
5627 Unfortunately Intel decided to ignore this. */
5628 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
5630 if (insn
& (1 << 20))
5631 return disas_cp14_read(env
, s
, insn
);
5633 return disas_cp14_write(env
, s
, insn
);
5635 return disas_cp15_insn (env
, s
, insn
);
5638 /* Unknown coprocessor. See if the board has hooked it. */
5639 return disas_cp_insn (env
, s
, insn
);
5644 /* Store a 64-bit value to a register pair. Clobbers val. */
5645 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
5649 tcg_gen_trunc_i64_i32(tmp
, val
);
5650 store_reg(s
, rlow
, tmp
);
5652 tcg_gen_shri_i64(val
, val
, 32);
5653 tcg_gen_trunc_i64_i32(tmp
, val
);
5654 store_reg(s
, rhigh
, tmp
);
5657 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5658 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
5663 /* Load value and extend to 64 bits. */
5664 tmp
= tcg_temp_new_i64();
5665 tmp2
= load_reg(s
, rlow
);
5666 tcg_gen_extu_i32_i64(tmp
, tmp2
);
5668 tcg_gen_add_i64(val
, val
, tmp
);
5671 /* load and add a 64-bit value from a register pair. */
5672 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
5678 /* Load 64-bit value rd:rn. */
5679 tmpl
= load_reg(s
, rlow
);
5680 tmph
= load_reg(s
, rhigh
);
5681 tmp
= tcg_temp_new_i64();
5682 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
5685 tcg_gen_add_i64(val
, val
, tmp
);
5688 /* Set N and Z flags from a 64-bit value. */
5689 static void gen_logicq_cc(TCGv_i64 val
)
5691 TCGv tmp
= new_tmp();
5692 gen_helper_logicq_cc(tmp
, val
);
5697 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
5699 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
5706 insn
= ldl_code(s
->pc
);
5709 /* M variants do not implement ARM mode. */
5714 /* Unconditional instructions. */
5715 if (((insn
>> 25) & 7) == 1) {
5716 /* NEON Data processing. */
5717 if (!arm_feature(env
, ARM_FEATURE_NEON
))
5720 if (disas_neon_data_insn(env
, s
, insn
))
5724 if ((insn
& 0x0f100000) == 0x04000000) {
5725 /* NEON load/store. */
5726 if (!arm_feature(env
, ARM_FEATURE_NEON
))
5729 if (disas_neon_ls_insn(env
, s
, insn
))
5733 if ((insn
& 0x0d70f000) == 0x0550f000)
5735 else if ((insn
& 0x0ffffdff) == 0x01010000) {
5738 if (insn
& (1 << 9)) {
5739 /* BE8 mode not implemented. */
5743 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
5744 switch ((insn
>> 4) & 0xf) {
5747 gen_helper_clrex(cpu_env
);
5753 /* We don't emulate caches so these are a no-op. */
5758 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
5764 op1
= (insn
& 0x1f);
5765 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
5766 addr
= load_reg(s
, 13);
5769 gen_helper_get_r13_banked(addr
, cpu_env
, tcg_const_i32(op1
));
5771 i
= (insn
>> 23) & 3;
5773 case 0: offset
= -4; break; /* DA */
5774 case 1: offset
= -8; break; /* DB */
5775 case 2: offset
= 0; break; /* IA */
5776 case 3: offset
= 4; break; /* IB */
5780 tcg_gen_addi_i32(addr
, addr
, offset
);
5781 tmp
= load_reg(s
, 14);
5782 gen_st32(tmp
, addr
, 0);
5784 gen_helper_cpsr_read(tmp
);
5785 tcg_gen_addi_i32(addr
, addr
, 4);
5786 gen_st32(tmp
, addr
, 0);
5787 if (insn
& (1 << 21)) {
5788 /* Base writeback. */
5790 case 0: offset
= -8; break;
5791 case 1: offset
= -4; break;
5792 case 2: offset
= 4; break;
5793 case 3: offset
= 0; break;
5797 tcg_gen_addi_i32(addr
, tmp
, offset
);
5798 if (op1
== (env
->uncached_cpsr
& CPSR_M
)) {
5799 gen_movl_reg_T1(s
, 13);
5801 gen_helper_set_r13_banked(cpu_env
, tcg_const_i32(op1
), cpu_T
[1]);
5806 } else if ((insn
& 0x0e5fffe0) == 0x081d0a00) {
5812 rn
= (insn
>> 16) & 0xf;
5813 addr
= load_reg(s
, rn
);
5814 i
= (insn
>> 23) & 3;
5816 case 0: offset
= -4; break; /* DA */
5817 case 1: offset
= -8; break; /* DB */
5818 case 2: offset
= 0; break; /* IA */
5819 case 3: offset
= 4; break; /* IB */
5823 tcg_gen_addi_i32(addr
, addr
, offset
);
5824 /* Load PC into tmp and CPSR into tmp2. */
5825 tmp
= gen_ld32(addr
, 0);
5826 tcg_gen_addi_i32(addr
, addr
, 4);
5827 tmp2
= gen_ld32(addr
, 0);
5828 if (insn
& (1 << 21)) {
5829 /* Base writeback. */
5831 case 0: offset
= -8; break;
5832 case 1: offset
= -4; break;
5833 case 2: offset
= 4; break;
5834 case 3: offset
= 0; break;
5838 tcg_gen_addi_i32(addr
, addr
, offset
);
5839 store_reg(s
, rn
, addr
);
5843 gen_rfe(s
, tmp
, tmp2
);
5844 } else if ((insn
& 0x0e000000) == 0x0a000000) {
5845 /* branch link and change to thumb (blx <offset>) */
5848 val
= (uint32_t)s
->pc
;
5850 tcg_gen_movi_i32(tmp
, val
);
5851 store_reg(s
, 14, tmp
);
5852 /* Sign-extend the 24-bit offset */
5853 offset
= (((int32_t)insn
) << 8) >> 8;
5854 /* offset * 4 + bit24 * 2 + (thumb bit) */
5855 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
5856 /* pipeline offset */
5860 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
5861 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
5862 /* iWMMXt register transfer. */
5863 if (env
->cp15
.c15_cpar
& (1 << 1))
5864 if (!disas_iwmmxt_insn(env
, s
, insn
))
5867 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
5868 /* Coprocessor double register transfer. */
5869 } else if ((insn
& 0x0f000010) == 0x0e000010) {
5870 /* Additional coprocessor register transfer. */
5871 } else if ((insn
& 0x0ff10020) == 0x01000000) {
5874 /* cps (privileged) */
5878 if (insn
& (1 << 19)) {
5879 if (insn
& (1 << 8))
5881 if (insn
& (1 << 7))
5883 if (insn
& (1 << 6))
5885 if (insn
& (1 << 18))
5888 if (insn
& (1 << 17)) {
5890 val
|= (insn
& 0x1f);
5893 gen_op_movl_T0_im(val
);
5894 gen_set_psr_T0(s
, mask
, 0);
5901 /* if not always execute, we generate a conditional jump to
5903 s
->condlabel
= gen_new_label();
5904 gen_test_cc(cond
^ 1, s
->condlabel
);
5907 if ((insn
& 0x0f900000) == 0x03000000) {
5908 if ((insn
& (1 << 21)) == 0) {
5910 rd
= (insn
>> 12) & 0xf;
5911 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
5912 if ((insn
& (1 << 22)) == 0) {
5915 tcg_gen_movi_i32(tmp
, val
);
5918 tmp
= load_reg(s
, rd
);
5919 tcg_gen_ext16u_i32(tmp
, tmp
);
5920 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
5922 store_reg(s
, rd
, tmp
);
5924 if (((insn
>> 12) & 0xf) != 0xf)
5926 if (((insn
>> 16) & 0xf) == 0) {
5927 gen_nop_hint(s
, insn
& 0xff);
5929 /* CPSR = immediate */
5931 shift
= ((insn
>> 8) & 0xf) * 2;
5933 val
= (val
>> shift
) | (val
<< (32 - shift
));
5934 gen_op_movl_T0_im(val
);
5935 i
= ((insn
& (1 << 22)) != 0);
5936 if (gen_set_psr_T0(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
))
5940 } else if ((insn
& 0x0f900000) == 0x01000000
5941 && (insn
& 0x00000090) != 0x00000090) {
5942 /* miscellaneous instructions */
5943 op1
= (insn
>> 21) & 3;
5944 sh
= (insn
>> 4) & 0xf;
5947 case 0x0: /* move program status register */
5950 gen_movl_T0_reg(s
, rm
);
5951 i
= ((op1
& 2) != 0);
5952 if (gen_set_psr_T0(s
, msr_mask(env
, s
, (insn
>> 16) & 0xf, i
), i
))
5956 rd
= (insn
>> 12) & 0xf;
5960 tmp
= load_cpu_field(spsr
);
5963 gen_helper_cpsr_read(tmp
);
5965 store_reg(s
, rd
, tmp
);
5970 /* branch/exchange thumb (bx). */
5971 tmp
= load_reg(s
, rm
);
5973 } else if (op1
== 3) {
5975 rd
= (insn
>> 12) & 0xf;
5976 tmp
= load_reg(s
, rm
);
5977 gen_helper_clz(tmp
, tmp
);
5978 store_reg(s
, rd
, tmp
);
5986 /* Trivial implementation equivalent to bx. */
5987 tmp
= load_reg(s
, rm
);
5997 /* branch link/exchange thumb (blx) */
5998 tmp
= load_reg(s
, rm
);
6000 tcg_gen_movi_i32(tmp2
, s
->pc
);
6001 store_reg(s
, 14, tmp2
);
6004 case 0x5: /* saturating add/subtract */
6005 rd
= (insn
>> 12) & 0xf;
6006 rn
= (insn
>> 16) & 0xf;
6007 tmp
= load_reg(s
, rm
);
6008 tmp2
= load_reg(s
, rn
);
6010 gen_helper_double_saturate(tmp2
, tmp2
);
6012 gen_helper_sub_saturate(tmp
, tmp
, tmp2
);
6014 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
6016 store_reg(s
, rd
, tmp
);
6019 gen_set_condexec(s
);
6020 gen_set_pc_im(s
->pc
- 4);
6021 gen_exception(EXCP_BKPT
);
6022 s
->is_jmp
= DISAS_JUMP
;
6024 case 0x8: /* signed multiply */
6028 rs
= (insn
>> 8) & 0xf;
6029 rn
= (insn
>> 12) & 0xf;
6030 rd
= (insn
>> 16) & 0xf;
6032 /* (32 * 16) >> 16 */
6033 tmp
= load_reg(s
, rm
);
6034 tmp2
= load_reg(s
, rs
);
6036 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
6039 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6040 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
6042 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6043 if ((sh
& 2) == 0) {
6044 tmp2
= load_reg(s
, rn
);
6045 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6048 store_reg(s
, rd
, tmp
);
6051 tmp
= load_reg(s
, rm
);
6052 tmp2
= load_reg(s
, rs
);
6053 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
6056 tmp64
= tcg_temp_new_i64();
6057 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6059 gen_addq(s
, tmp64
, rn
, rd
);
6060 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6063 tmp2
= load_reg(s
, rn
);
6064 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6067 store_reg(s
, rd
, tmp
);
6074 } else if (((insn
& 0x0e000000) == 0 &&
6075 (insn
& 0x00000090) != 0x90) ||
6076 ((insn
& 0x0e000000) == (1 << 25))) {
6077 int set_cc
, logic_cc
, shiftop
;
6079 op1
= (insn
>> 21) & 0xf;
6080 set_cc
= (insn
>> 20) & 1;
6081 logic_cc
= table_logic_cc
[op1
] & set_cc
;
6083 /* data processing instruction */
6084 if (insn
& (1 << 25)) {
6085 /* immediate operand */
6087 shift
= ((insn
>> 8) & 0xf) * 2;
6089 val
= (val
>> shift
) | (val
<< (32 - shift
));
6090 gen_op_movl_T1_im(val
);
6091 if (logic_cc
&& shift
)
6092 gen_set_CF_bit31(cpu_T
[1]);
6096 gen_movl_T1_reg(s
, rm
);
6097 shiftop
= (insn
>> 5) & 3;
6098 if (!(insn
& (1 << 4))) {
6099 shift
= (insn
>> 7) & 0x1f;
6100 gen_arm_shift_im(cpu_T
[1], shiftop
, shift
, logic_cc
);
6102 rs
= (insn
>> 8) & 0xf;
6103 tmp
= load_reg(s
, rs
);
6104 gen_arm_shift_reg(cpu_T
[1], shiftop
, tmp
, logic_cc
);
6107 if (op1
!= 0x0f && op1
!= 0x0d) {
6108 rn
= (insn
>> 16) & 0xf;
6109 gen_movl_T0_reg(s
, rn
);
6111 rd
= (insn
>> 12) & 0xf;
6114 gen_op_andl_T0_T1();
6115 gen_movl_reg_T0(s
, rd
);
6117 gen_op_logic_T0_cc();
6120 gen_op_xorl_T0_T1();
6121 gen_movl_reg_T0(s
, rd
);
6123 gen_op_logic_T0_cc();
6126 if (set_cc
&& rd
== 15) {
6127 /* SUBS r15, ... is used for exception return. */
6130 gen_op_subl_T0_T1_cc();
6131 gen_exception_return(s
);
6134 gen_op_subl_T0_T1_cc();
6136 gen_op_subl_T0_T1();
6137 gen_movl_reg_T0(s
, rd
);
6142 gen_op_rsbl_T0_T1_cc();
6144 gen_op_rsbl_T0_T1();
6145 gen_movl_reg_T0(s
, rd
);
6149 gen_op_addl_T0_T1_cc();
6151 gen_op_addl_T0_T1();
6152 gen_movl_reg_T0(s
, rd
);
6156 gen_op_adcl_T0_T1_cc();
6159 gen_movl_reg_T0(s
, rd
);
6163 gen_op_sbcl_T0_T1_cc();
6166 gen_movl_reg_T0(s
, rd
);
6170 gen_op_rscl_T0_T1_cc();
6173 gen_movl_reg_T0(s
, rd
);
6177 gen_op_andl_T0_T1();
6178 gen_op_logic_T0_cc();
6183 gen_op_xorl_T0_T1();
6184 gen_op_logic_T0_cc();
6189 gen_op_subl_T0_T1_cc();
6194 gen_op_addl_T0_T1_cc();
6199 gen_movl_reg_T0(s
, rd
);
6201 gen_op_logic_T0_cc();
6204 if (logic_cc
&& rd
== 15) {
6205 /* MOVS r15, ... is used for exception return. */
6208 gen_op_movl_T0_T1();
6209 gen_exception_return(s
);
6211 gen_movl_reg_T1(s
, rd
);
6213 gen_op_logic_T1_cc();
6217 gen_op_bicl_T0_T1();
6218 gen_movl_reg_T0(s
, rd
);
6220 gen_op_logic_T0_cc();
6225 gen_movl_reg_T1(s
, rd
);
6227 gen_op_logic_T1_cc();
6231 /* other instructions */
6232 op1
= (insn
>> 24) & 0xf;
6236 /* multiplies, extra load/stores */
6237 sh
= (insn
>> 5) & 3;
6240 rd
= (insn
>> 16) & 0xf;
6241 rn
= (insn
>> 12) & 0xf;
6242 rs
= (insn
>> 8) & 0xf;
6244 op1
= (insn
>> 20) & 0xf;
6246 case 0: case 1: case 2: case 3: case 6:
6248 tmp
= load_reg(s
, rs
);
6249 tmp2
= load_reg(s
, rm
);
6250 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
6252 if (insn
& (1 << 22)) {
6253 /* Subtract (mls) */
6255 tmp2
= load_reg(s
, rn
);
6256 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
6258 } else if (insn
& (1 << 21)) {
6260 tmp2
= load_reg(s
, rn
);
6261 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6264 if (insn
& (1 << 20))
6266 store_reg(s
, rd
, tmp
);
6270 tmp
= load_reg(s
, rs
);
6271 tmp2
= load_reg(s
, rm
);
6272 if (insn
& (1 << 22))
6273 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6275 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
6276 if (insn
& (1 << 21)) /* mult accumulate */
6277 gen_addq(s
, tmp64
, rn
, rd
);
6278 if (!(insn
& (1 << 23))) { /* double accumulate */
6280 gen_addq_lo(s
, tmp64
, rn
);
6281 gen_addq_lo(s
, tmp64
, rd
);
6283 if (insn
& (1 << 20))
6284 gen_logicq_cc(tmp64
);
6285 gen_storeq_reg(s
, rn
, rd
, tmp64
);
6289 rn
= (insn
>> 16) & 0xf;
6290 rd
= (insn
>> 12) & 0xf;
6291 if (insn
& (1 << 23)) {
6292 /* load/store exclusive */
6293 op1
= (insn
>> 21) & 0x3;
6298 gen_movl_T1_reg(s
, rn
);
6300 if (insn
& (1 << 20)) {
6301 gen_helper_mark_exclusive(cpu_env
, cpu_T
[1]);
6304 tmp
= gen_ld32(addr
, IS_USER(s
));
6306 case 1: /* ldrexd */
6307 tmp
= gen_ld32(addr
, IS_USER(s
));
6308 store_reg(s
, rd
, tmp
);
6309 tcg_gen_addi_i32(addr
, addr
, 4);
6310 tmp
= gen_ld32(addr
, IS_USER(s
));
6313 case 2: /* ldrexb */
6314 tmp
= gen_ld8u(addr
, IS_USER(s
));
6316 case 3: /* ldrexh */
6317 tmp
= gen_ld16u(addr
, IS_USER(s
));
6322 store_reg(s
, rd
, tmp
);
6324 int label
= gen_new_label();
6326 gen_helper_test_exclusive(cpu_T
[0], cpu_env
, addr
);
6327 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_T
[0],
6329 tmp
= load_reg(s
,rm
);
6332 gen_st32(tmp
, addr
, IS_USER(s
));
6334 case 1: /* strexd */
6335 gen_st32(tmp
, addr
, IS_USER(s
));
6336 tcg_gen_addi_i32(addr
, addr
, 4);
6337 tmp
= load_reg(s
, rm
+ 1);
6338 gen_st32(tmp
, addr
, IS_USER(s
));
6340 case 2: /* strexb */
6341 gen_st8(tmp
, addr
, IS_USER(s
));
6343 case 3: /* strexh */
6344 gen_st16(tmp
, addr
, IS_USER(s
));
6349 gen_set_label(label
);
6350 gen_movl_reg_T0(s
, rd
);
6353 /* SWP instruction */
6356 /* ??? This is not really atomic. However we know
6357 we never have multiple CPUs running in parallel,
6358 so it is good enough. */
6359 addr
= load_reg(s
, rn
);
6360 tmp
= load_reg(s
, rm
);
6361 if (insn
& (1 << 22)) {
6362 tmp2
= gen_ld8u(addr
, IS_USER(s
));
6363 gen_st8(tmp
, addr
, IS_USER(s
));
6365 tmp2
= gen_ld32(addr
, IS_USER(s
));
6366 gen_st32(tmp
, addr
, IS_USER(s
));
6369 store_reg(s
, rd
, tmp2
);
6375 /* Misc load/store */
6376 rn
= (insn
>> 16) & 0xf;
6377 rd
= (insn
>> 12) & 0xf;
6378 addr
= load_reg(s
, rn
);
6379 if (insn
& (1 << 24))
6380 gen_add_datah_offset(s
, insn
, 0, addr
);
6382 if (insn
& (1 << 20)) {
6386 tmp
= gen_ld16u(addr
, IS_USER(s
));
6389 tmp
= gen_ld8s(addr
, IS_USER(s
));
6393 tmp
= gen_ld16s(addr
, IS_USER(s
));
6397 } else if (sh
& 2) {
6401 tmp
= load_reg(s
, rd
);
6402 gen_st32(tmp
, addr
, IS_USER(s
));
6403 tcg_gen_addi_i32(addr
, addr
, 4);
6404 tmp
= load_reg(s
, rd
+ 1);
6405 gen_st32(tmp
, addr
, IS_USER(s
));
6409 tmp
= gen_ld32(addr
, IS_USER(s
));
6410 store_reg(s
, rd
, tmp
);
6411 tcg_gen_addi_i32(addr
, addr
, 4);
6412 tmp
= gen_ld32(addr
, IS_USER(s
));
6416 address_offset
= -4;
6419 tmp
= load_reg(s
, rd
);
6420 gen_st16(tmp
, addr
, IS_USER(s
));
6423 /* Perform base writeback before the loaded value to
6424 ensure correct behavior with overlapping index registers.
6425 ldrd with base writeback is is undefined if the
6426 destination and index registers overlap. */
6427 if (!(insn
& (1 << 24))) {
6428 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
6429 store_reg(s
, rn
, addr
);
6430 } else if (insn
& (1 << 21)) {
6432 tcg_gen_addi_i32(addr
, addr
, address_offset
);
6433 store_reg(s
, rn
, addr
);
6438 /* Complete the load. */
6439 store_reg(s
, rd
, tmp
);
6448 if (insn
& (1 << 4)) {
6450 /* Armv6 Media instructions. */
6452 rn
= (insn
>> 16) & 0xf;
6453 rd
= (insn
>> 12) & 0xf;
6454 rs
= (insn
>> 8) & 0xf;
6455 switch ((insn
>> 23) & 3) {
6456 case 0: /* Parallel add/subtract. */
6457 op1
= (insn
>> 20) & 7;
6458 tmp
= load_reg(s
, rn
);
6459 tmp2
= load_reg(s
, rm
);
6460 sh
= (insn
>> 5) & 7;
6461 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
6463 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
6465 store_reg(s
, rd
, tmp
);
6468 if ((insn
& 0x00700020) == 0) {
6469 /* Halfword pack. */
6470 tmp
= load_reg(s
, rn
);
6471 tmp2
= load_reg(s
, rm
);
6472 shift
= (insn
>> 7) & 0x1f;
6473 if (insn
& (1 << 6)) {
6477 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
6478 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
6479 tcg_gen_ext16u_i32(tmp2
, tmp2
);
6483 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
6484 tcg_gen_ext16u_i32(tmp
, tmp
);
6485 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
6487 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6489 store_reg(s
, rd
, tmp
);
6490 } else if ((insn
& 0x00200020) == 0x00200000) {
6492 tmp
= load_reg(s
, rm
);
6493 shift
= (insn
>> 7) & 0x1f;
6494 if (insn
& (1 << 6)) {
6497 tcg_gen_sari_i32(tmp
, tmp
, shift
);
6499 tcg_gen_shli_i32(tmp
, tmp
, shift
);
6501 sh
= (insn
>> 16) & 0x1f;
6503 if (insn
& (1 << 22))
6504 gen_helper_usat(tmp
, tmp
, tcg_const_i32(sh
));
6506 gen_helper_ssat(tmp
, tmp
, tcg_const_i32(sh
));
6508 store_reg(s
, rd
, tmp
);
6509 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
6511 tmp
= load_reg(s
, rm
);
6512 sh
= (insn
>> 16) & 0x1f;
6514 if (insn
& (1 << 22))
6515 gen_helper_usat16(tmp
, tmp
, tcg_const_i32(sh
));
6517 gen_helper_ssat16(tmp
, tmp
, tcg_const_i32(sh
));
6519 store_reg(s
, rd
, tmp
);
6520 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
6522 tmp
= load_reg(s
, rn
);
6523 tmp2
= load_reg(s
, rm
);
6525 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
6526 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
6529 store_reg(s
, rd
, tmp
);
6530 } else if ((insn
& 0x000003e0) == 0x00000060) {
6531 tmp
= load_reg(s
, rm
);
6532 shift
= (insn
>> 10) & 3;
6533 /* ??? In many cases it's not neccessary to do a
6534 rotate, a shift is sufficient. */
6536 tcg_gen_rori_i32(tmp
, tmp
, shift
* 8);
6537 op1
= (insn
>> 20) & 7;
6539 case 0: gen_sxtb16(tmp
); break;
6540 case 2: gen_sxtb(tmp
); break;
6541 case 3: gen_sxth(tmp
); break;
6542 case 4: gen_uxtb16(tmp
); break;
6543 case 6: gen_uxtb(tmp
); break;
6544 case 7: gen_uxth(tmp
); break;
6545 default: goto illegal_op
;
6548 tmp2
= load_reg(s
, rn
);
6549 if ((op1
& 3) == 0) {
6550 gen_add16(tmp
, tmp2
);
6552 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6556 store_reg(s
, rd
, tmp
);
6557 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
6559 tmp
= load_reg(s
, rm
);
6560 if (insn
& (1 << 22)) {
6561 if (insn
& (1 << 7)) {
6565 gen_helper_rbit(tmp
, tmp
);
6568 if (insn
& (1 << 7))
6571 tcg_gen_bswap_i32(tmp
, tmp
);
6573 store_reg(s
, rd
, tmp
);
6578 case 2: /* Multiplies (Type 3). */
6579 tmp
= load_reg(s
, rm
);
6580 tmp2
= load_reg(s
, rs
);
6581 if (insn
& (1 << 20)) {
6582 /* Signed multiply most significant [accumulate]. */
6583 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
6584 if (insn
& (1 << 5))
6585 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
6586 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
6588 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
6590 tmp2
= load_reg(s
, rd
);
6591 if (insn
& (1 << 6)) {
6592 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6594 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6598 store_reg(s
, rn
, tmp
);
6600 if (insn
& (1 << 5))
6601 gen_swap_half(tmp2
);
6602 gen_smul_dual(tmp
, tmp2
);
6603 /* This addition cannot overflow. */
6604 if (insn
& (1 << 6)) {
6605 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
6607 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6610 if (insn
& (1 << 22)) {
6611 /* smlald, smlsld */
6612 tmp64
= tcg_temp_new_i64();
6613 tcg_gen_ext_i32_i64(tmp64
, tmp
);
6615 gen_addq(s
, tmp64
, rd
, rn
);
6616 gen_storeq_reg(s
, rd
, rn
, tmp64
);
6618 /* smuad, smusd, smlad, smlsd */
6621 tmp2
= load_reg(s
, rd
);
6622 gen_helper_add_setq(tmp
, tmp
, tmp2
);
6625 store_reg(s
, rn
, tmp
);
6630 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
6632 case 0: /* Unsigned sum of absolute differences. */
6634 tmp
= load_reg(s
, rm
);
6635 tmp2
= load_reg(s
, rs
);
6636 gen_helper_usad8(tmp
, tmp
, tmp2
);
6639 tmp2
= load_reg(s
, rd
);
6640 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
6643 store_reg(s
, rn
, tmp
);
6645 case 0x20: case 0x24: case 0x28: case 0x2c:
6646 /* Bitfield insert/clear. */
6648 shift
= (insn
>> 7) & 0x1f;
6649 i
= (insn
>> 16) & 0x1f;
6653 tcg_gen_movi_i32(tmp
, 0);
6655 tmp
= load_reg(s
, rm
);
6658 tmp2
= load_reg(s
, rd
);
6659 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << i
) - 1);
6662 store_reg(s
, rd
, tmp
);
6664 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6665 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6667 tmp
= load_reg(s
, rm
);
6668 shift
= (insn
>> 7) & 0x1f;
6669 i
= ((insn
>> 16) & 0x1f) + 1;
6674 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
6676 gen_sbfx(tmp
, shift
, i
);
6679 store_reg(s
, rd
, tmp
);
6689 /* Check for undefined extension instructions
6690 * per the ARM Bible IE:
6691 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6693 sh
= (0xf << 20) | (0xf << 4);
6694 if (op1
== 0x7 && ((insn
& sh
) == sh
))
6698 /* load/store byte/word */
6699 rn
= (insn
>> 16) & 0xf;
6700 rd
= (insn
>> 12) & 0xf;
6701 tmp2
= load_reg(s
, rn
);
6702 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
6703 if (insn
& (1 << 24))
6704 gen_add_data_offset(s
, insn
, tmp2
);
6705 if (insn
& (1 << 20)) {
6707 if (insn
& (1 << 22)) {
6708 tmp
= gen_ld8u(tmp2
, i
);
6710 tmp
= gen_ld32(tmp2
, i
);
6714 tmp
= load_reg(s
, rd
);
6715 if (insn
& (1 << 22))
6716 gen_st8(tmp
, tmp2
, i
);
6718 gen_st32(tmp
, tmp2
, i
);
6720 if (!(insn
& (1 << 24))) {
6721 gen_add_data_offset(s
, insn
, tmp2
);
6722 store_reg(s
, rn
, tmp2
);
6723 } else if (insn
& (1 << 21)) {
6724 store_reg(s
, rn
, tmp2
);
6728 if (insn
& (1 << 20)) {
6729 /* Complete the load. */
6733 store_reg(s
, rd
, tmp
);
6739 int j
, n
, user
, loaded_base
;
6741 /* load/store multiple words */
6742 /* XXX: store correct base if write back */
6744 if (insn
& (1 << 22)) {
6746 goto illegal_op
; /* only usable in supervisor mode */
6748 if ((insn
& (1 << 15)) == 0)
6751 rn
= (insn
>> 16) & 0xf;
6752 addr
= load_reg(s
, rn
);
6754 /* compute total size */
6756 TCGV_UNUSED(loaded_var
);
6759 if (insn
& (1 << i
))
6762 /* XXX: test invalid n == 0 case ? */
6763 if (insn
& (1 << 23)) {
6764 if (insn
& (1 << 24)) {
6766 tcg_gen_addi_i32(addr
, addr
, 4);
6768 /* post increment */
6771 if (insn
& (1 << 24)) {
6773 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
6775 /* post decrement */
6777 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
6782 if (insn
& (1 << i
)) {
6783 if (insn
& (1 << 20)) {
6785 tmp
= gen_ld32(addr
, IS_USER(s
));
6789 gen_helper_set_user_reg(tcg_const_i32(i
), tmp
);
6791 } else if (i
== rn
) {
6795 store_reg(s
, i
, tmp
);
6800 /* special case: r15 = PC + 8 */
6801 val
= (long)s
->pc
+ 4;
6803 tcg_gen_movi_i32(tmp
, val
);
6806 gen_helper_get_user_reg(tmp
, tcg_const_i32(i
));
6808 tmp
= load_reg(s
, i
);
6810 gen_st32(tmp
, addr
, IS_USER(s
));
6813 /* no need to add after the last transfer */
6815 tcg_gen_addi_i32(addr
, addr
, 4);
6818 if (insn
& (1 << 21)) {
6820 if (insn
& (1 << 23)) {
6821 if (insn
& (1 << 24)) {
6824 /* post increment */
6825 tcg_gen_addi_i32(addr
, addr
, 4);
6828 if (insn
& (1 << 24)) {
6831 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
6833 /* post decrement */
6834 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
6837 store_reg(s
, rn
, addr
);
6842 store_reg(s
, rn
, loaded_var
);
6844 if ((insn
& (1 << 22)) && !user
) {
6845 /* Restore CPSR from SPSR. */
6846 tmp
= load_cpu_field(spsr
);
6847 gen_set_cpsr(tmp
, 0xffffffff);
6849 s
->is_jmp
= DISAS_UPDATE
;
6858 /* branch (and link) */
6859 val
= (int32_t)s
->pc
;
6860 if (insn
& (1 << 24)) {
6862 tcg_gen_movi_i32(tmp
, val
);
6863 store_reg(s
, 14, tmp
);
6865 offset
= (((int32_t)insn
<< 8) >> 8);
6866 val
+= (offset
<< 2) + 4;
6874 if (disas_coproc_insn(env
, s
, insn
))
6879 gen_set_pc_im(s
->pc
);
6880 s
->is_jmp
= DISAS_SWI
;
6884 gen_set_condexec(s
);
6885 gen_set_pc_im(s
->pc
- 4);
6886 gen_exception(EXCP_UDEF
);
6887 s
->is_jmp
= DISAS_JUMP
;
6893 /* Return true if this is a Thumb-2 logical op. */
6895 thumb2_logic_op(int op
)
6900 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6901 then set condition code flags based on the result of the operation.
6902 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6903 to the high bit of T1.
6904 Returns zero if the opcode is valid. */
6907 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
)
6914 gen_op_andl_T0_T1();
6918 gen_op_bicl_T0_T1();
6931 gen_op_xorl_T0_T1();
6936 gen_op_addl_T0_T1_cc();
6938 gen_op_addl_T0_T1();
6942 gen_op_adcl_T0_T1_cc();
6948 gen_op_sbcl_T0_T1_cc();
6954 gen_op_subl_T0_T1_cc();
6956 gen_op_subl_T0_T1();
6960 gen_op_rsbl_T0_T1_cc();
6962 gen_op_rsbl_T0_T1();
6964 default: /* 5, 6, 7, 9, 12, 15. */
6968 gen_op_logic_T0_cc();
6970 gen_set_CF_bit31(cpu_T
[1]);
6975 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6977 static int disas_thumb2_insn(CPUState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
6979 uint32_t insn
, imm
, shift
, offset
;
6980 uint32_t rd
, rn
, rm
, rs
;
6991 if (!(arm_feature(env
, ARM_FEATURE_THUMB2
)
6992 || arm_feature (env
, ARM_FEATURE_M
))) {
6993 /* Thumb-1 cores may need to treat bl and blx as a pair of
6994 16-bit instructions to get correct prefetch abort behavior. */
6996 if ((insn
& (1 << 12)) == 0) {
6997 /* Second half of blx. */
6998 offset
= ((insn
& 0x7ff) << 1);
6999 tmp
= load_reg(s
, 14);
7000 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7001 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
7004 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7005 store_reg(s
, 14, tmp2
);
7009 if (insn
& (1 << 11)) {
7010 /* Second half of bl. */
7011 offset
= ((insn
& 0x7ff) << 1) | 1;
7012 tmp
= load_reg(s
, 14);
7013 tcg_gen_addi_i32(tmp
, tmp
, offset
);
7016 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
7017 store_reg(s
, 14, tmp2
);
7021 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
7022 /* Instruction spans a page boundary. Implement it as two
7023 16-bit instructions in case the second half causes an
7025 offset
= ((int32_t)insn
<< 21) >> 9;
7026 gen_op_movl_T0_im(s
->pc
+ 2 + offset
);
7027 gen_movl_reg_T0(s
, 14);
7030 /* Fall through to 32-bit decode. */
7033 insn
= lduw_code(s
->pc
);
7035 insn
|= (uint32_t)insn_hw1
<< 16;
7037 if ((insn
& 0xf800e800) != 0xf000e800) {
7041 rn
= (insn
>> 16) & 0xf;
7042 rs
= (insn
>> 12) & 0xf;
7043 rd
= (insn
>> 8) & 0xf;
7045 switch ((insn
>> 25) & 0xf) {
7046 case 0: case 1: case 2: case 3:
7047 /* 16-bit instructions. Should never happen. */
7050 if (insn
& (1 << 22)) {
7051 /* Other load/store, table branch. */
7052 if (insn
& 0x01200000) {
7053 /* Load/store doubleword. */
7056 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
7058 addr
= load_reg(s
, rn
);
7060 offset
= (insn
& 0xff) * 4;
7061 if ((insn
& (1 << 23)) == 0)
7063 if (insn
& (1 << 24)) {
7064 tcg_gen_addi_i32(addr
, addr
, offset
);
7067 if (insn
& (1 << 20)) {
7069 tmp
= gen_ld32(addr
, IS_USER(s
));
7070 store_reg(s
, rs
, tmp
);
7071 tcg_gen_addi_i32(addr
, addr
, 4);
7072 tmp
= gen_ld32(addr
, IS_USER(s
));
7073 store_reg(s
, rd
, tmp
);
7076 tmp
= load_reg(s
, rs
);
7077 gen_st32(tmp
, addr
, IS_USER(s
));
7078 tcg_gen_addi_i32(addr
, addr
, 4);
7079 tmp
= load_reg(s
, rd
);
7080 gen_st32(tmp
, addr
, IS_USER(s
));
7082 if (insn
& (1 << 21)) {
7083 /* Base writeback. */
7086 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
7087 store_reg(s
, rn
, addr
);
7091 } else if ((insn
& (1 << 23)) == 0) {
7092 /* Load/store exclusive word. */
7093 gen_movl_T1_reg(s
, rn
);
7095 if (insn
& (1 << 20)) {
7096 gen_helper_mark_exclusive(cpu_env
, cpu_T
[1]);
7097 tmp
= gen_ld32(addr
, IS_USER(s
));
7098 store_reg(s
, rd
, tmp
);
7100 int label
= gen_new_label();
7101 gen_helper_test_exclusive(cpu_T
[0], cpu_env
, addr
);
7102 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_T
[0],
7104 tmp
= load_reg(s
, rs
);
7105 gen_st32(tmp
, cpu_T
[1], IS_USER(s
));
7106 gen_set_label(label
);
7107 gen_movl_reg_T0(s
, rd
);
7109 } else if ((insn
& (1 << 6)) == 0) {
7113 tcg_gen_movi_i32(addr
, s
->pc
);
7115 addr
= load_reg(s
, rn
);
7117 tmp
= load_reg(s
, rm
);
7118 tcg_gen_add_i32(addr
, addr
, tmp
);
7119 if (insn
& (1 << 4)) {
7121 tcg_gen_add_i32(addr
, addr
, tmp
);
7123 tmp
= gen_ld16u(addr
, IS_USER(s
));
7126 tmp
= gen_ld8u(addr
, IS_USER(s
));
7129 tcg_gen_shli_i32(tmp
, tmp
, 1);
7130 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
7131 store_reg(s
, 15, tmp
);
7133 /* Load/store exclusive byte/halfword/doubleword. */
7134 /* ??? These are not really atomic. However we know
7135 we never have multiple CPUs running in parallel,
7136 so it is good enough. */
7137 op
= (insn
>> 4) & 0x3;
7138 /* Must use a global reg for the address because we have
7139 a conditional branch in the store instruction. */
7140 gen_movl_T1_reg(s
, rn
);
7142 if (insn
& (1 << 20)) {
7143 gen_helper_mark_exclusive(cpu_env
, addr
);
7146 tmp
= gen_ld8u(addr
, IS_USER(s
));
7149 tmp
= gen_ld16u(addr
, IS_USER(s
));
7152 tmp
= gen_ld32(addr
, IS_USER(s
));
7153 tcg_gen_addi_i32(addr
, addr
, 4);
7154 tmp2
= gen_ld32(addr
, IS_USER(s
));
7155 store_reg(s
, rd
, tmp2
);
7160 store_reg(s
, rs
, tmp
);
7162 int label
= gen_new_label();
7163 /* Must use a global that is not killed by the branch. */
7164 gen_helper_test_exclusive(cpu_T
[0], cpu_env
, addr
);
7165 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_T
[0], 0, label
);
7166 tmp
= load_reg(s
, rs
);
7169 gen_st8(tmp
, addr
, IS_USER(s
));
7172 gen_st16(tmp
, addr
, IS_USER(s
));
7175 gen_st32(tmp
, addr
, IS_USER(s
));
7176 tcg_gen_addi_i32(addr
, addr
, 4);
7177 tmp
= load_reg(s
, rd
);
7178 gen_st32(tmp
, addr
, IS_USER(s
));
7183 gen_set_label(label
);
7184 gen_movl_reg_T0(s
, rm
);
7188 /* Load/store multiple, RFE, SRS. */
7189 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
7190 /* Not available in user mode. */
7193 if (insn
& (1 << 20)) {
7195 addr
= load_reg(s
, rn
);
7196 if ((insn
& (1 << 24)) == 0)
7197 tcg_gen_addi_i32(addr
, addr
, -8);
7198 /* Load PC into tmp and CPSR into tmp2. */
7199 tmp
= gen_ld32(addr
, 0);
7200 tcg_gen_addi_i32(addr
, addr
, 4);
7201 tmp2
= gen_ld32(addr
, 0);
7202 if (insn
& (1 << 21)) {
7203 /* Base writeback. */
7204 if (insn
& (1 << 24)) {
7205 tcg_gen_addi_i32(addr
, addr
, 4);
7207 tcg_gen_addi_i32(addr
, addr
, -4);
7209 store_reg(s
, rn
, addr
);
7213 gen_rfe(s
, tmp
, tmp2
);
7217 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7218 addr
= load_reg(s
, 13);
7221 gen_helper_get_r13_banked(addr
, cpu_env
, tcg_const_i32(op
));
7223 if ((insn
& (1 << 24)) == 0) {
7224 tcg_gen_addi_i32(addr
, addr
, -8);
7226 tmp
= load_reg(s
, 14);
7227 gen_st32(tmp
, addr
, 0);
7228 tcg_gen_addi_i32(addr
, addr
, 4);
7230 gen_helper_cpsr_read(tmp
);
7231 gen_st32(tmp
, addr
, 0);
7232 if (insn
& (1 << 21)) {
7233 if ((insn
& (1 << 24)) == 0) {
7234 tcg_gen_addi_i32(addr
, addr
, -4);
7236 tcg_gen_addi_i32(addr
, addr
, 4);
7238 if (op
== (env
->uncached_cpsr
& CPSR_M
)) {
7239 store_reg(s
, 13, addr
);
7241 gen_helper_set_r13_banked(cpu_env
,
7242 tcg_const_i32(op
), addr
);
7250 /* Load/store multiple. */
7251 addr
= load_reg(s
, rn
);
7253 for (i
= 0; i
< 16; i
++) {
7254 if (insn
& (1 << i
))
7257 if (insn
& (1 << 24)) {
7258 tcg_gen_addi_i32(addr
, addr
, -offset
);
7261 for (i
= 0; i
< 16; i
++) {
7262 if ((insn
& (1 << i
)) == 0)
7264 if (insn
& (1 << 20)) {
7266 tmp
= gen_ld32(addr
, IS_USER(s
));
7270 store_reg(s
, i
, tmp
);
7274 tmp
= load_reg(s
, i
);
7275 gen_st32(tmp
, addr
, IS_USER(s
));
7277 tcg_gen_addi_i32(addr
, addr
, 4);
7279 if (insn
& (1 << 21)) {
7280 /* Base register writeback. */
7281 if (insn
& (1 << 24)) {
7282 tcg_gen_addi_i32(addr
, addr
, -offset
);
7284 /* Fault if writeback register is in register list. */
7285 if (insn
& (1 << rn
))
7287 store_reg(s
, rn
, addr
);
7294 case 5: /* Data processing register constant shift. */
7296 gen_op_movl_T0_im(0);
7298 gen_movl_T0_reg(s
, rn
);
7299 gen_movl_T1_reg(s
, rm
);
7300 op
= (insn
>> 21) & 0xf;
7301 shiftop
= (insn
>> 4) & 3;
7302 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7303 conds
= (insn
& (1 << 20)) != 0;
7304 logic_cc
= (conds
&& thumb2_logic_op(op
));
7305 gen_arm_shift_im(cpu_T
[1], shiftop
, shift
, logic_cc
);
7306 if (gen_thumb2_data_op(s
, op
, conds
, 0))
7309 gen_movl_reg_T0(s
, rd
);
7311 case 13: /* Misc data processing. */
7312 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
7313 if (op
< 4 && (insn
& 0xf000) != 0xf000)
7316 case 0: /* Register controlled shift. */
7317 tmp
= load_reg(s
, rn
);
7318 tmp2
= load_reg(s
, rm
);
7319 if ((insn
& 0x70) != 0)
7321 op
= (insn
>> 21) & 3;
7322 logic_cc
= (insn
& (1 << 20)) != 0;
7323 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
7326 store_reg(s
, rd
, tmp
);
7328 case 1: /* Sign/zero extend. */
7329 tmp
= load_reg(s
, rm
);
7330 shift
= (insn
>> 4) & 3;
7331 /* ??? In many cases it's not neccessary to do a
7332 rotate, a shift is sufficient. */
7334 tcg_gen_rori_i32(tmp
, tmp
, shift
* 8);
7335 op
= (insn
>> 20) & 7;
7337 case 0: gen_sxth(tmp
); break;
7338 case 1: gen_uxth(tmp
); break;
7339 case 2: gen_sxtb16(tmp
); break;
7340 case 3: gen_uxtb16(tmp
); break;
7341 case 4: gen_sxtb(tmp
); break;
7342 case 5: gen_uxtb(tmp
); break;
7343 default: goto illegal_op
;
7346 tmp2
= load_reg(s
, rn
);
7347 if ((op
>> 1) == 1) {
7348 gen_add16(tmp
, tmp2
);
7350 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7354 store_reg(s
, rd
, tmp
);
7356 case 2: /* SIMD add/subtract. */
7357 op
= (insn
>> 20) & 7;
7358 shift
= (insn
>> 4) & 7;
7359 if ((op
& 3) == 3 || (shift
& 3) == 3)
7361 tmp
= load_reg(s
, rn
);
7362 tmp2
= load_reg(s
, rm
);
7363 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
7365 store_reg(s
, rd
, tmp
);
7367 case 3: /* Other data processing. */
7368 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
7370 /* Saturating add/subtract. */
7371 tmp
= load_reg(s
, rn
);
7372 tmp2
= load_reg(s
, rm
);
7374 gen_helper_double_saturate(tmp
, tmp
);
7376 gen_helper_sub_saturate(tmp
, tmp2
, tmp
);
7378 gen_helper_add_saturate(tmp
, tmp
, tmp2
);
7381 tmp
= load_reg(s
, rn
);
7383 case 0x0a: /* rbit */
7384 gen_helper_rbit(tmp
, tmp
);
7386 case 0x08: /* rev */
7387 tcg_gen_bswap_i32(tmp
, tmp
);
7389 case 0x09: /* rev16 */
7392 case 0x0b: /* revsh */
7395 case 0x10: /* sel */
7396 tmp2
= load_reg(s
, rm
);
7398 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUState
, GE
));
7399 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
7403 case 0x18: /* clz */
7404 gen_helper_clz(tmp
, tmp
);
7410 store_reg(s
, rd
, tmp
);
7412 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7413 op
= (insn
>> 4) & 0xf;
7414 tmp
= load_reg(s
, rn
);
7415 tmp2
= load_reg(s
, rm
);
7416 switch ((insn
>> 20) & 7) {
7417 case 0: /* 32 x 32 -> 32 */
7418 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
7421 tmp2
= load_reg(s
, rs
);
7423 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7425 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7429 case 1: /* 16 x 16 -> 32 */
7430 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7433 tmp2
= load_reg(s
, rs
);
7434 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7438 case 2: /* Dual multiply add. */
7439 case 4: /* Dual multiply subtract. */
7441 gen_swap_half(tmp2
);
7442 gen_smul_dual(tmp
, tmp2
);
7443 /* This addition cannot overflow. */
7444 if (insn
& (1 << 22)) {
7445 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7447 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7452 tmp2
= load_reg(s
, rs
);
7453 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7457 case 3: /* 32 * 16 -> 32msb */
7459 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
7462 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7463 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
7465 tcg_gen_trunc_i64_i32(tmp
, tmp64
);
7468 tmp2
= load_reg(s
, rs
);
7469 gen_helper_add_setq(tmp
, tmp
, tmp2
);
7473 case 5: case 6: /* 32 * 32 -> 32msb */
7474 gen_imull(tmp
, tmp2
);
7475 if (insn
& (1 << 5)) {
7476 gen_roundqd(tmp
, tmp2
);
7483 tmp2
= load_reg(s
, rs
);
7484 if (insn
& (1 << 21)) {
7485 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7487 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
7492 case 7: /* Unsigned sum of absolute differences. */
7493 gen_helper_usad8(tmp
, tmp
, tmp2
);
7496 tmp2
= load_reg(s
, rs
);
7497 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7502 store_reg(s
, rd
, tmp
);
7504 case 6: case 7: /* 64-bit multiply, Divide. */
7505 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
7506 tmp
= load_reg(s
, rn
);
7507 tmp2
= load_reg(s
, rm
);
7508 if ((op
& 0x50) == 0x10) {
7510 if (!arm_feature(env
, ARM_FEATURE_DIV
))
7513 gen_helper_udiv(tmp
, tmp
, tmp2
);
7515 gen_helper_sdiv(tmp
, tmp
, tmp2
);
7517 store_reg(s
, rd
, tmp
);
7518 } else if ((op
& 0xe) == 0xc) {
7519 /* Dual multiply accumulate long. */
7521 gen_swap_half(tmp2
);
7522 gen_smul_dual(tmp
, tmp2
);
7524 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
7526 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
7530 tmp64
= tcg_temp_new_i64();
7531 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7533 gen_addq(s
, tmp64
, rs
, rd
);
7534 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7537 /* Unsigned 64-bit multiply */
7538 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
7542 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
7544 tmp64
= tcg_temp_new_i64();
7545 tcg_gen_ext_i32_i64(tmp64
, tmp
);
7548 /* Signed 64-bit multiply */
7549 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
7554 gen_addq_lo(s
, tmp64
, rs
);
7555 gen_addq_lo(s
, tmp64
, rd
);
7556 } else if (op
& 0x40) {
7557 /* 64-bit accumulate. */
7558 gen_addq(s
, tmp64
, rs
, rd
);
7560 gen_storeq_reg(s
, rs
, rd
, tmp64
);
7565 case 6: case 7: case 14: case 15:
7567 if (((insn
>> 24) & 3) == 3) {
7568 /* Translate into the equivalent ARM encoding. */
7569 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4);
7570 if (disas_neon_data_insn(env
, s
, insn
))
7573 if (insn
& (1 << 28))
7575 if (disas_coproc_insn (env
, s
, insn
))
7579 case 8: case 9: case 10: case 11:
7580 if (insn
& (1 << 15)) {
7581 /* Branches, misc control. */
7582 if (insn
& 0x5000) {
7583 /* Unconditional branch. */
7584 /* signextend(hw1[10:0]) -> offset[:12]. */
7585 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
7586 /* hw1[10:0] -> offset[11:1]. */
7587 offset
|= (insn
& 0x7ff) << 1;
7588 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7589 offset[24:22] already have the same value because of the
7590 sign extension above. */
7591 offset
^= ((~insn
) & (1 << 13)) << 10;
7592 offset
^= ((~insn
) & (1 << 11)) << 11;
7594 if (insn
& (1 << 14)) {
7595 /* Branch and link. */
7596 gen_op_movl_T1_im(s
->pc
| 1);
7597 gen_movl_reg_T1(s
, 14);
7601 if (insn
& (1 << 12)) {
7606 offset
&= ~(uint32_t)2;
7607 gen_bx_im(s
, offset
);
7609 } else if (((insn
>> 23) & 7) == 7) {
7611 if (insn
& (1 << 13))
7614 if (insn
& (1 << 26)) {
7615 /* Secure monitor call (v6Z) */
7616 goto illegal_op
; /* not implemented. */
7618 op
= (insn
>> 20) & 7;
7620 case 0: /* msr cpsr. */
7622 tmp
= load_reg(s
, rn
);
7623 addr
= tcg_const_i32(insn
& 0xff);
7624 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
7629 case 1: /* msr spsr. */
7632 gen_movl_T0_reg(s
, rn
);
7633 if (gen_set_psr_T0(s
,
7634 msr_mask(env
, s
, (insn
>> 8) & 0xf, op
== 1),
7638 case 2: /* cps, nop-hint. */
7639 if (((insn
>> 8) & 7) == 0) {
7640 gen_nop_hint(s
, insn
& 0xff);
7642 /* Implemented as NOP in user mode. */
7647 if (insn
& (1 << 10)) {
7648 if (insn
& (1 << 7))
7650 if (insn
& (1 << 6))
7652 if (insn
& (1 << 5))
7654 if (insn
& (1 << 9))
7655 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
7657 if (insn
& (1 << 8)) {
7659 imm
|= (insn
& 0x1f);
7662 gen_op_movl_T0_im(imm
);
7663 gen_set_psr_T0(s
, offset
, 0);
7666 case 3: /* Special control operations. */
7667 op
= (insn
>> 4) & 0xf;
7670 gen_helper_clrex(cpu_env
);
7675 /* These execute as NOPs. */
7683 /* Trivial implementation equivalent to bx. */
7684 tmp
= load_reg(s
, rn
);
7687 case 5: /* Exception return. */
7688 /* Unpredictable in user mode. */
7690 case 6: /* mrs cpsr. */
7693 addr
= tcg_const_i32(insn
& 0xff);
7694 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
7696 gen_helper_cpsr_read(tmp
);
7698 store_reg(s
, rd
, tmp
);
7700 case 7: /* mrs spsr. */
7701 /* Not accessible in user mode. */
7702 if (IS_USER(s
) || IS_M(env
))
7704 tmp
= load_cpu_field(spsr
);
7705 store_reg(s
, rd
, tmp
);
7710 /* Conditional branch. */
7711 op
= (insn
>> 22) & 0xf;
7712 /* Generate a conditional jump to next instruction. */
7713 s
->condlabel
= gen_new_label();
7714 gen_test_cc(op
^ 1, s
->condlabel
);
7717 /* offset[11:1] = insn[10:0] */
7718 offset
= (insn
& 0x7ff) << 1;
7719 /* offset[17:12] = insn[21:16]. */
7720 offset
|= (insn
& 0x003f0000) >> 4;
7721 /* offset[31:20] = insn[26]. */
7722 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
7723 /* offset[18] = insn[13]. */
7724 offset
|= (insn
& (1 << 13)) << 5;
7725 /* offset[19] = insn[11]. */
7726 offset
|= (insn
& (1 << 11)) << 8;
7728 /* jump to the offset */
7729 gen_jmp(s
, s
->pc
+ offset
);
7732 /* Data processing immediate. */
7733 if (insn
& (1 << 25)) {
7734 if (insn
& (1 << 24)) {
7735 if (insn
& (1 << 20))
7737 /* Bitfield/Saturate. */
7738 op
= (insn
>> 21) & 7;
7740 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
7743 tcg_gen_movi_i32(tmp
, 0);
7745 tmp
= load_reg(s
, rn
);
7748 case 2: /* Signed bitfield extract. */
7750 if (shift
+ imm
> 32)
7753 gen_sbfx(tmp
, shift
, imm
);
7755 case 6: /* Unsigned bitfield extract. */
7757 if (shift
+ imm
> 32)
7760 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
7762 case 3: /* Bitfield insert/clear. */
7765 imm
= imm
+ 1 - shift
;
7767 tmp2
= load_reg(s
, rd
);
7768 gen_bfi(tmp
, tmp2
, tmp
, shift
, (1u << imm
) - 1);
7774 default: /* Saturate. */
7777 tcg_gen_sari_i32(tmp
, tmp
, shift
);
7779 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7781 tmp2
= tcg_const_i32(imm
);
7784 if ((op
& 1) && shift
== 0)
7785 gen_helper_usat16(tmp
, tmp
, tmp2
);
7787 gen_helper_usat(tmp
, tmp
, tmp2
);
7790 if ((op
& 1) && shift
== 0)
7791 gen_helper_ssat16(tmp
, tmp
, tmp2
);
7793 gen_helper_ssat(tmp
, tmp
, tmp2
);
7797 store_reg(s
, rd
, tmp
);
7799 imm
= ((insn
& 0x04000000) >> 15)
7800 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
7801 if (insn
& (1 << 22)) {
7802 /* 16-bit immediate. */
7803 imm
|= (insn
>> 4) & 0xf000;
7804 if (insn
& (1 << 23)) {
7806 tmp
= load_reg(s
, rd
);
7807 tcg_gen_ext16u_i32(tmp
, tmp
);
7808 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
7812 tcg_gen_movi_i32(tmp
, imm
);
7815 /* Add/sub 12-bit immediate. */
7817 offset
= s
->pc
& ~(uint32_t)3;
7818 if (insn
& (1 << 23))
7823 tcg_gen_movi_i32(tmp
, offset
);
7825 tmp
= load_reg(s
, rn
);
7826 if (insn
& (1 << 23))
7827 tcg_gen_subi_i32(tmp
, tmp
, imm
);
7829 tcg_gen_addi_i32(tmp
, tmp
, imm
);
7832 store_reg(s
, rd
, tmp
);
7835 int shifter_out
= 0;
7836 /* modified 12-bit immediate. */
7837 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
7838 imm
= (insn
& 0xff);
7841 /* Nothing to do. */
7843 case 1: /* 00XY00XY */
7846 case 2: /* XY00XY00 */
7850 case 3: /* XYXYXYXY */
7854 default: /* Rotated constant. */
7855 shift
= (shift
<< 1) | (imm
>> 7);
7857 imm
= imm
<< (32 - shift
);
7861 gen_op_movl_T1_im(imm
);
7862 rn
= (insn
>> 16) & 0xf;
7864 gen_op_movl_T0_im(0);
7866 gen_movl_T0_reg(s
, rn
);
7867 op
= (insn
>> 21) & 0xf;
7868 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
7871 rd
= (insn
>> 8) & 0xf;
7873 gen_movl_reg_T0(s
, rd
);
7878 case 12: /* Load/store single data item. */
7883 if ((insn
& 0x01100000) == 0x01000000) {
7884 if (disas_neon_ls_insn(env
, s
, insn
))
7892 /* s->pc has already been incremented by 4. */
7893 imm
= s
->pc
& 0xfffffffc;
7894 if (insn
& (1 << 23))
7895 imm
+= insn
& 0xfff;
7897 imm
-= insn
& 0xfff;
7898 tcg_gen_movi_i32(addr
, imm
);
7900 addr
= load_reg(s
, rn
);
7901 if (insn
& (1 << 23)) {
7902 /* Positive offset. */
7904 tcg_gen_addi_i32(addr
, addr
, imm
);
7906 op
= (insn
>> 8) & 7;
7909 case 0: case 8: /* Shifted Register. */
7910 shift
= (insn
>> 4) & 0xf;
7913 tmp
= load_reg(s
, rm
);
7915 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7916 tcg_gen_add_i32(addr
, addr
, tmp
);
7919 case 4: /* Negative offset. */
7920 tcg_gen_addi_i32(addr
, addr
, -imm
);
7922 case 6: /* User privilege. */
7923 tcg_gen_addi_i32(addr
, addr
, imm
);
7926 case 1: /* Post-decrement. */
7929 case 3: /* Post-increment. */
7933 case 5: /* Pre-decrement. */
7936 case 7: /* Pre-increment. */
7937 tcg_gen_addi_i32(addr
, addr
, imm
);
7945 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
7946 if (insn
& (1 << 20)) {
7948 if (rs
== 15 && op
!= 2) {
7951 /* Memory hint. Implemented as NOP. */
7954 case 0: tmp
= gen_ld8u(addr
, user
); break;
7955 case 4: tmp
= gen_ld8s(addr
, user
); break;
7956 case 1: tmp
= gen_ld16u(addr
, user
); break;
7957 case 5: tmp
= gen_ld16s(addr
, user
); break;
7958 case 2: tmp
= gen_ld32(addr
, user
); break;
7959 default: goto illegal_op
;
7964 store_reg(s
, rs
, tmp
);
7971 tmp
= load_reg(s
, rs
);
7973 case 0: gen_st8(tmp
, addr
, user
); break;
7974 case 1: gen_st16(tmp
, addr
, user
); break;
7975 case 2: gen_st32(tmp
, addr
, user
); break;
7976 default: goto illegal_op
;
7980 tcg_gen_addi_i32(addr
, addr
, imm
);
7982 store_reg(s
, rn
, addr
);
7996 static void disas_thumb_insn(CPUState
*env
, DisasContext
*s
)
7998 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
8005 if (s
->condexec_mask
) {
8006 cond
= s
->condexec_cond
;
8007 s
->condlabel
= gen_new_label();
8008 gen_test_cc(cond
^ 1, s
->condlabel
);
8012 insn
= lduw_code(s
->pc
);
8015 switch (insn
>> 12) {
8018 op
= (insn
>> 11) & 3;
8021 rn
= (insn
>> 3) & 7;
8022 gen_movl_T0_reg(s
, rn
);
8023 if (insn
& (1 << 10)) {
8025 gen_op_movl_T1_im((insn
>> 6) & 7);
8028 rm
= (insn
>> 6) & 7;
8029 gen_movl_T1_reg(s
, rm
);
8031 if (insn
& (1 << 9)) {
8032 if (s
->condexec_mask
)
8033 gen_op_subl_T0_T1();
8035 gen_op_subl_T0_T1_cc();
8037 if (s
->condexec_mask
)
8038 gen_op_addl_T0_T1();
8040 gen_op_addl_T0_T1_cc();
8042 gen_movl_reg_T0(s
, rd
);
8044 /* shift immediate */
8045 rm
= (insn
>> 3) & 7;
8046 shift
= (insn
>> 6) & 0x1f;
8047 tmp
= load_reg(s
, rm
);
8048 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
8049 if (!s
->condexec_mask
)
8051 store_reg(s
, rd
, tmp
);
8055 /* arithmetic large immediate */
8056 op
= (insn
>> 11) & 3;
8057 rd
= (insn
>> 8) & 0x7;
8059 gen_op_movl_T0_im(insn
& 0xff);
8061 gen_movl_T0_reg(s
, rd
);
8062 gen_op_movl_T1_im(insn
& 0xff);
8066 if (!s
->condexec_mask
)
8067 gen_op_logic_T0_cc();
8070 gen_op_subl_T0_T1_cc();
8073 if (s
->condexec_mask
)
8074 gen_op_addl_T0_T1();
8076 gen_op_addl_T0_T1_cc();
8079 if (s
->condexec_mask
)
8080 gen_op_subl_T0_T1();
8082 gen_op_subl_T0_T1_cc();
8086 gen_movl_reg_T0(s
, rd
);
8089 if (insn
& (1 << 11)) {
8090 rd
= (insn
>> 8) & 7;
8091 /* load pc-relative. Bit 1 of PC is ignored. */
8092 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
8093 val
&= ~(uint32_t)2;
8095 tcg_gen_movi_i32(addr
, val
);
8096 tmp
= gen_ld32(addr
, IS_USER(s
));
8098 store_reg(s
, rd
, tmp
);
8101 if (insn
& (1 << 10)) {
8102 /* data processing extended or blx */
8103 rd
= (insn
& 7) | ((insn
>> 4) & 8);
8104 rm
= (insn
>> 3) & 0xf;
8105 op
= (insn
>> 8) & 3;
8108 gen_movl_T0_reg(s
, rd
);
8109 gen_movl_T1_reg(s
, rm
);
8110 gen_op_addl_T0_T1();
8111 gen_movl_reg_T0(s
, rd
);
8114 gen_movl_T0_reg(s
, rd
);
8115 gen_movl_T1_reg(s
, rm
);
8116 gen_op_subl_T0_T1_cc();
8118 case 2: /* mov/cpy */
8119 gen_movl_T0_reg(s
, rm
);
8120 gen_movl_reg_T0(s
, rd
);
8122 case 3:/* branch [and link] exchange thumb register */
8123 tmp
= load_reg(s
, rm
);
8124 if (insn
& (1 << 7)) {
8125 val
= (uint32_t)s
->pc
| 1;
8127 tcg_gen_movi_i32(tmp2
, val
);
8128 store_reg(s
, 14, tmp2
);
8136 /* data processing register */
8138 rm
= (insn
>> 3) & 7;
8139 op
= (insn
>> 6) & 0xf;
8140 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
8141 /* the shift/rotate ops want the operands backwards */
8150 if (op
== 9) /* neg */
8151 gen_op_movl_T0_im(0);
8152 else if (op
!= 0xf) /* mvn doesn't read its first operand */
8153 gen_movl_T0_reg(s
, rd
);
8155 gen_movl_T1_reg(s
, rm
);
8158 gen_op_andl_T0_T1();
8159 if (!s
->condexec_mask
)
8160 gen_op_logic_T0_cc();
8163 gen_op_xorl_T0_T1();
8164 if (!s
->condexec_mask
)
8165 gen_op_logic_T0_cc();
8168 if (s
->condexec_mask
) {
8169 gen_helper_shl(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8171 gen_helper_shl_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8172 gen_op_logic_T1_cc();
8176 if (s
->condexec_mask
) {
8177 gen_helper_shr(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8179 gen_helper_shr_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8180 gen_op_logic_T1_cc();
8184 if (s
->condexec_mask
) {
8185 gen_helper_sar(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8187 gen_helper_sar_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8188 gen_op_logic_T1_cc();
8192 if (s
->condexec_mask
)
8195 gen_op_adcl_T0_T1_cc();
8198 if (s
->condexec_mask
)
8201 gen_op_sbcl_T0_T1_cc();
8204 if (s
->condexec_mask
) {
8205 gen_helper_ror(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8207 gen_helper_ror_cc(cpu_T
[1], cpu_T
[1], cpu_T
[0]);
8208 gen_op_logic_T1_cc();
8212 gen_op_andl_T0_T1();
8213 gen_op_logic_T0_cc();
8217 if (s
->condexec_mask
)
8218 tcg_gen_neg_i32(cpu_T
[0], cpu_T
[1]);
8220 gen_op_subl_T0_T1_cc();
8223 gen_op_subl_T0_T1_cc();
8227 gen_op_addl_T0_T1_cc();
8232 if (!s
->condexec_mask
)
8233 gen_op_logic_T0_cc();
8236 gen_op_mull_T0_T1();
8237 if (!s
->condexec_mask
)
8238 gen_op_logic_T0_cc();
8241 gen_op_bicl_T0_T1();
8242 if (!s
->condexec_mask
)
8243 gen_op_logic_T0_cc();
8247 if (!s
->condexec_mask
)
8248 gen_op_logic_T1_cc();
8255 gen_movl_reg_T1(s
, rm
);
8257 gen_movl_reg_T0(s
, rd
);
8262 /* load/store register offset. */
8264 rn
= (insn
>> 3) & 7;
8265 rm
= (insn
>> 6) & 7;
8266 op
= (insn
>> 9) & 7;
8267 addr
= load_reg(s
, rn
);
8268 tmp
= load_reg(s
, rm
);
8269 tcg_gen_add_i32(addr
, addr
, tmp
);
8272 if (op
< 3) /* store */
8273 tmp
= load_reg(s
, rd
);
8277 gen_st32(tmp
, addr
, IS_USER(s
));
8280 gen_st16(tmp
, addr
, IS_USER(s
));
8283 gen_st8(tmp
, addr
, IS_USER(s
));
8286 tmp
= gen_ld8s(addr
, IS_USER(s
));
8289 tmp
= gen_ld32(addr
, IS_USER(s
));
8292 tmp
= gen_ld16u(addr
, IS_USER(s
));
8295 tmp
= gen_ld8u(addr
, IS_USER(s
));
8298 tmp
= gen_ld16s(addr
, IS_USER(s
));
8301 if (op
>= 3) /* load */
8302 store_reg(s
, rd
, tmp
);
8307 /* load/store word immediate offset */
8309 rn
= (insn
>> 3) & 7;
8310 addr
= load_reg(s
, rn
);
8311 val
= (insn
>> 4) & 0x7c;
8312 tcg_gen_addi_i32(addr
, addr
, val
);
8314 if (insn
& (1 << 11)) {
8316 tmp
= gen_ld32(addr
, IS_USER(s
));
8317 store_reg(s
, rd
, tmp
);
8320 tmp
= load_reg(s
, rd
);
8321 gen_st32(tmp
, addr
, IS_USER(s
));
8327 /* load/store byte immediate offset */
8329 rn
= (insn
>> 3) & 7;
8330 addr
= load_reg(s
, rn
);
8331 val
= (insn
>> 6) & 0x1f;
8332 tcg_gen_addi_i32(addr
, addr
, val
);
8334 if (insn
& (1 << 11)) {
8336 tmp
= gen_ld8u(addr
, IS_USER(s
));
8337 store_reg(s
, rd
, tmp
);
8340 tmp
= load_reg(s
, rd
);
8341 gen_st8(tmp
, addr
, IS_USER(s
));
8347 /* load/store halfword immediate offset */
8349 rn
= (insn
>> 3) & 7;
8350 addr
= load_reg(s
, rn
);
8351 val
= (insn
>> 5) & 0x3e;
8352 tcg_gen_addi_i32(addr
, addr
, val
);
8354 if (insn
& (1 << 11)) {
8356 tmp
= gen_ld16u(addr
, IS_USER(s
));
8357 store_reg(s
, rd
, tmp
);
8360 tmp
= load_reg(s
, rd
);
8361 gen_st16(tmp
, addr
, IS_USER(s
));
8367 /* load/store from stack */
8368 rd
= (insn
>> 8) & 7;
8369 addr
= load_reg(s
, 13);
8370 val
= (insn
& 0xff) * 4;
8371 tcg_gen_addi_i32(addr
, addr
, val
);
8373 if (insn
& (1 << 11)) {
8375 tmp
= gen_ld32(addr
, IS_USER(s
));
8376 store_reg(s
, rd
, tmp
);
8379 tmp
= load_reg(s
, rd
);
8380 gen_st32(tmp
, addr
, IS_USER(s
));
8386 /* add to high reg */
8387 rd
= (insn
>> 8) & 7;
8388 if (insn
& (1 << 11)) {
8390 tmp
= load_reg(s
, 13);
8392 /* PC. bit 1 is ignored. */
8394 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
8396 val
= (insn
& 0xff) * 4;
8397 tcg_gen_addi_i32(tmp
, tmp
, val
);
8398 store_reg(s
, rd
, tmp
);
8403 op
= (insn
>> 8) & 0xf;
8406 /* adjust stack pointer */
8407 tmp
= load_reg(s
, 13);
8408 val
= (insn
& 0x7f) * 4;
8409 if (insn
& (1 << 7))
8410 val
= -(int32_t)val
;
8411 tcg_gen_addi_i32(tmp
, tmp
, val
);
8412 store_reg(s
, 13, tmp
);
8415 case 2: /* sign/zero extend. */
8418 rm
= (insn
>> 3) & 7;
8419 tmp
= load_reg(s
, rm
);
8420 switch ((insn
>> 6) & 3) {
8421 case 0: gen_sxth(tmp
); break;
8422 case 1: gen_sxtb(tmp
); break;
8423 case 2: gen_uxth(tmp
); break;
8424 case 3: gen_uxtb(tmp
); break;
8426 store_reg(s
, rd
, tmp
);
8428 case 4: case 5: case 0xc: case 0xd:
8430 addr
= load_reg(s
, 13);
8431 if (insn
& (1 << 8))
8435 for (i
= 0; i
< 8; i
++) {
8436 if (insn
& (1 << i
))
8439 if ((insn
& (1 << 11)) == 0) {
8440 tcg_gen_addi_i32(addr
, addr
, -offset
);
8442 for (i
= 0; i
< 8; i
++) {
8443 if (insn
& (1 << i
)) {
8444 if (insn
& (1 << 11)) {
8446 tmp
= gen_ld32(addr
, IS_USER(s
));
8447 store_reg(s
, i
, tmp
);
8450 tmp
= load_reg(s
, i
);
8451 gen_st32(tmp
, addr
, IS_USER(s
));
8453 /* advance to the next address. */
8454 tcg_gen_addi_i32(addr
, addr
, 4);
8458 if (insn
& (1 << 8)) {
8459 if (insn
& (1 << 11)) {
8461 tmp
= gen_ld32(addr
, IS_USER(s
));
8462 /* don't set the pc until the rest of the instruction
8466 tmp
= load_reg(s
, 14);
8467 gen_st32(tmp
, addr
, IS_USER(s
));
8469 tcg_gen_addi_i32(addr
, addr
, 4);
8471 if ((insn
& (1 << 11)) == 0) {
8472 tcg_gen_addi_i32(addr
, addr
, -offset
);
8474 /* write back the new stack pointer */
8475 store_reg(s
, 13, addr
);
8476 /* set the new PC value */
8477 if ((insn
& 0x0900) == 0x0900)
8481 case 1: case 3: case 9: case 11: /* czb */
8483 tmp
= load_reg(s
, rm
);
8484 s
->condlabel
= gen_new_label();
8486 if (insn
& (1 << 11))
8487 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
8489 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
8491 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
8492 val
= (uint32_t)s
->pc
+ 2;
8497 case 15: /* IT, nop-hint. */
8498 if ((insn
& 0xf) == 0) {
8499 gen_nop_hint(s
, (insn
>> 4) & 0xf);
8503 s
->condexec_cond
= (insn
>> 4) & 0xe;
8504 s
->condexec_mask
= insn
& 0x1f;
8505 /* No actual code generated for this insn, just setup state. */
8508 case 0xe: /* bkpt */
8509 gen_set_condexec(s
);
8510 gen_set_pc_im(s
->pc
- 2);
8511 gen_exception(EXCP_BKPT
);
8512 s
->is_jmp
= DISAS_JUMP
;
8517 rn
= (insn
>> 3) & 0x7;
8519 tmp
= load_reg(s
, rn
);
8520 switch ((insn
>> 6) & 3) {
8521 case 0: tcg_gen_bswap_i32(tmp
, tmp
); break;
8522 case 1: gen_rev16(tmp
); break;
8523 case 3: gen_revsh(tmp
); break;
8524 default: goto illegal_op
;
8526 store_reg(s
, rd
, tmp
);
8534 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
8537 addr
= tcg_const_i32(16);
8538 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8542 addr
= tcg_const_i32(17);
8543 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8547 if (insn
& (1 << 4))
8548 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
8552 val
= ((insn
& 7) << 6) & shift
;
8553 gen_op_movl_T0_im(val
);
8554 gen_set_psr_T0(s
, shift
, 0);
8564 /* load/store multiple */
8565 rn
= (insn
>> 8) & 0x7;
8566 addr
= load_reg(s
, rn
);
8567 for (i
= 0; i
< 8; i
++) {
8568 if (insn
& (1 << i
)) {
8569 if (insn
& (1 << 11)) {
8571 tmp
= gen_ld32(addr
, IS_USER(s
));
8572 store_reg(s
, i
, tmp
);
8575 tmp
= load_reg(s
, i
);
8576 gen_st32(tmp
, addr
, IS_USER(s
));
8578 /* advance to the next address */
8579 tcg_gen_addi_i32(addr
, addr
, 4);
8582 /* Base register writeback. */
8583 if ((insn
& (1 << rn
)) == 0) {
8584 store_reg(s
, rn
, addr
);
8591 /* conditional branch or swi */
8592 cond
= (insn
>> 8) & 0xf;
8598 gen_set_condexec(s
);
8599 gen_set_pc_im(s
->pc
);
8600 s
->is_jmp
= DISAS_SWI
;
8603 /* generate a conditional jump to next instruction */
8604 s
->condlabel
= gen_new_label();
8605 gen_test_cc(cond
^ 1, s
->condlabel
);
8607 gen_movl_T1_reg(s
, 15);
8609 /* jump to the offset */
8610 val
= (uint32_t)s
->pc
+ 2;
8611 offset
= ((int32_t)insn
<< 24) >> 24;
8617 if (insn
& (1 << 11)) {
8618 if (disas_thumb2_insn(env
, s
, insn
))
8622 /* unconditional branch */
8623 val
= (uint32_t)s
->pc
;
8624 offset
= ((int32_t)insn
<< 21) >> 21;
8625 val
+= (offset
<< 1) + 2;
8630 if (disas_thumb2_insn(env
, s
, insn
))
8636 gen_set_condexec(s
);
8637 gen_set_pc_im(s
->pc
- 4);
8638 gen_exception(EXCP_UDEF
);
8639 s
->is_jmp
= DISAS_JUMP
;
8643 gen_set_condexec(s
);
8644 gen_set_pc_im(s
->pc
- 2);
8645 gen_exception(EXCP_UDEF
);
8646 s
->is_jmp
= DISAS_JUMP
;
8649 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8650 basic block 'tb'. If search_pc is TRUE, also generate PC
8651 information for each intermediate instruction. */
8652 static inline void gen_intermediate_code_internal(CPUState
*env
,
8653 TranslationBlock
*tb
,
8656 DisasContext dc1
, *dc
= &dc1
;
8658 uint16_t *gen_opc_end
;
8660 target_ulong pc_start
;
8661 uint32_t next_page_start
;
8665 /* generate intermediate code */
8667 memset(temps
, 0, sizeof(temps
));
8673 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
8675 dc
->is_jmp
= DISAS_NEXT
;
8677 dc
->singlestep_enabled
= env
->singlestep_enabled
;
8679 dc
->thumb
= env
->thumb
;
8680 dc
->condexec_mask
= (env
->condexec_bits
& 0xf) << 1;
8681 dc
->condexec_cond
= env
->condexec_bits
>> 4;
8682 #if !defined(CONFIG_USER_ONLY)
8684 dc
->user
= ((env
->v7m
.exception
== 0) && (env
->v7m
.control
& 1));
8686 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
8689 cpu_F0s
= tcg_temp_new_i32();
8690 cpu_F1s
= tcg_temp_new_i32();
8691 cpu_F0d
= tcg_temp_new_i64();
8692 cpu_F1d
= tcg_temp_new_i64();
8695 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8696 cpu_M0
= tcg_temp_new_i64();
8697 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
8700 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8702 max_insns
= CF_COUNT_MASK
;
8705 /* Reset the conditional execution bits immediately. This avoids
8706 complications trying to do it at the end of the block. */
8707 if (env
->condexec_bits
)
8709 TCGv tmp
= new_tmp();
8710 tcg_gen_movi_i32(tmp
, 0);
8711 store_cpu_field(tmp
, condexec_bits
);
8714 #ifdef CONFIG_USER_ONLY
8715 /* Intercept jump to the magic kernel page. */
8716 if (dc
->pc
>= 0xffff0000) {
8717 /* We always get here via a jump, so know we are not in a
8718 conditional execution block. */
8719 gen_exception(EXCP_KERNEL_TRAP
);
8720 dc
->is_jmp
= DISAS_UPDATE
;
8724 if (dc
->pc
>= 0xfffffff0 && IS_M(env
)) {
8725 /* We always get here via a jump, so know we are not in a
8726 conditional execution block. */
8727 gen_exception(EXCP_EXCEPTION_EXIT
);
8728 dc
->is_jmp
= DISAS_UPDATE
;
8733 if (unlikely(!TAILQ_EMPTY(&env
->breakpoints
))) {
8734 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
8735 if (bp
->pc
== dc
->pc
) {
8736 gen_set_condexec(dc
);
8737 gen_set_pc_im(dc
->pc
);
8738 gen_exception(EXCP_DEBUG
);
8739 dc
->is_jmp
= DISAS_JUMP
;
8740 /* Advance PC so that clearing the breakpoint will
8741 invalidate this TB. */
8743 goto done_generating
;
8749 j
= gen_opc_ptr
- gen_opc_buf
;
8753 gen_opc_instr_start
[lj
++] = 0;
8755 gen_opc_pc
[lj
] = dc
->pc
;
8756 gen_opc_instr_start
[lj
] = 1;
8757 gen_opc_icount
[lj
] = num_insns
;
8760 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
8764 disas_thumb_insn(env
, dc
);
8765 if (dc
->condexec_mask
) {
8766 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
8767 | ((dc
->condexec_mask
>> 4) & 1);
8768 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
8769 if (dc
->condexec_mask
== 0) {
8770 dc
->condexec_cond
= 0;
8774 disas_arm_insn(env
, dc
);
8777 fprintf(stderr
, "Internal resource leak before %08x\n", dc
->pc
);
8781 if (dc
->condjmp
&& !dc
->is_jmp
) {
8782 gen_set_label(dc
->condlabel
);
8785 /* Translation stops when a conditional branch is encountered.
8786 * Otherwise the subsequent code could get translated several times.
8787 * Also stop translation when a page boundary is reached. This
8788 * ensures prefetch aborts occur at the right place. */
8790 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
8791 !env
->singlestep_enabled
&&
8792 dc
->pc
< next_page_start
&&
8793 num_insns
< max_insns
);
8795 if (tb
->cflags
& CF_LAST_IO
) {
8797 /* FIXME: This can theoretically happen with self-modifying
8799 cpu_abort(env
, "IO on conditional branch instruction");
8804 /* At this stage dc->condjmp will only be set when the skipped
8805 instruction was a conditional branch or trap, and the PC has
8806 already been written. */
8807 if (unlikely(env
->singlestep_enabled
)) {
8808 /* Make sure the pc is updated, and raise a debug exception. */
8810 gen_set_condexec(dc
);
8811 if (dc
->is_jmp
== DISAS_SWI
) {
8812 gen_exception(EXCP_SWI
);
8814 gen_exception(EXCP_DEBUG
);
8816 gen_set_label(dc
->condlabel
);
8818 if (dc
->condjmp
|| !dc
->is_jmp
) {
8819 gen_set_pc_im(dc
->pc
);
8822 gen_set_condexec(dc
);
8823 if (dc
->is_jmp
== DISAS_SWI
&& !dc
->condjmp
) {
8824 gen_exception(EXCP_SWI
);
8826 /* FIXME: Single stepping a WFI insn will not halt
8828 gen_exception(EXCP_DEBUG
);
8831 /* While branches must always occur at the end of an IT block,
8832 there are a few other things that can cause us to terminate
8833 the TB in the middel of an IT block:
8834 - Exception generating instructions (bkpt, swi, undefined).
8836 - Hardware watchpoints.
8837 Hardware breakpoints have already been handled and skip this code.
8839 gen_set_condexec(dc
);
8840 switch(dc
->is_jmp
) {
8842 gen_goto_tb(dc
, 1, dc
->pc
);
8847 /* indicate that the hash table must be used to find the next TB */
8851 /* nothing more to generate */
8857 gen_exception(EXCP_SWI
);
8861 gen_set_label(dc
->condlabel
);
8862 gen_set_condexec(dc
);
8863 gen_goto_tb(dc
, 1, dc
->pc
);
8869 gen_icount_end(tb
, num_insns
);
8870 *gen_opc_ptr
= INDEX_op_end
;
8873 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
8874 fprintf(logfile
, "----------------\n");
8875 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc_start
));
8876 target_disas(logfile
, pc_start
, dc
->pc
- pc_start
, env
->thumb
);
8877 fprintf(logfile
, "\n");
8881 j
= gen_opc_ptr
- gen_opc_buf
;
8884 gen_opc_instr_start
[lj
++] = 0;
8886 tb
->size
= dc
->pc
- pc_start
;
8887 tb
->icount
= num_insns
;
8891 void gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
8893 gen_intermediate_code_internal(env
, tb
, 0);
8896 void gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
8898 gen_intermediate_code_internal(env
, tb
, 1);
8901 static const char *cpu_mode_names
[16] = {
8902 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8903 "???", "???", "???", "und", "???", "???", "???", "sys"
8906 void cpu_dump_state(CPUState
*env
, FILE *f
,
8907 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
8917 /* ??? This assumes float64 and double have the same layout.
8918 Oh well, it's only debug dumps. */
8927 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
8929 cpu_fprintf(f
, "\n");
8931 cpu_fprintf(f
, " ");
8933 psr
= cpsr_read(env
);
8934 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
8936 psr
& (1 << 31) ? 'N' : '-',
8937 psr
& (1 << 30) ? 'Z' : '-',
8938 psr
& (1 << 29) ? 'C' : '-',
8939 psr
& (1 << 28) ? 'V' : '-',
8940 psr
& CPSR_T
? 'T' : 'A',
8941 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
8944 for (i
= 0; i
< 16; i
++) {
8945 d
.d
= env
->vfp
.regs
[i
];
8949 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
8950 i
* 2, (int)s0
.i
, s0
.s
,
8951 i
* 2 + 1, (int)s1
.i
, s1
.s
,
8952 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
8955 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
8959 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
8960 unsigned long searched_pc
, int pc_pos
, void *puc
)
8962 env
->regs
[15] = gen_opc_pc
[pc_pos
];