Merge branch 'master' into raspi (no conflicts)
[qemu/ar7.git] / target-arm / translate.c
blob360faba5dce8785b83dce8ab8eefa5673f58c524
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "internals.h"
29 #include "disas/disas.h"
30 #include "tcg-op.h"
31 #include "qemu/log.h"
32 #include "qemu/bitops.h"
33 #include "arm_ldst.h"
35 #include "exec/helper-proto.h"
36 #include "exec/helper-gen.h"
38 #include "trace-tcg.h"
40 #define CONFIG_ALIGNMENT_EXCEPTIONS 1
42 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
43 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
44 /* currently all emulated v5 cores are also v5TE, so don't bother */
45 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
46 #define ENABLE_ARCH_5J 0
47 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
48 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
49 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
50 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
51 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
53 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
55 #include "translate.h"
57 #if defined(CONFIG_USER_ONLY)
58 #define IS_USER(s) 1
59 #else
60 #define IS_USER(s) (s->user)
61 #endif
63 TCGv_ptr cpu_env;
64 /* We reuse the same 64-bit temporaries for efficiency. */
65 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
66 static TCGv_i32 cpu_R[16];
67 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
68 TCGv_i64 cpu_exclusive_addr;
69 TCGv_i64 cpu_exclusive_val;
70 #ifdef CONFIG_USER_ONLY
71 TCGv_i64 cpu_exclusive_test;
72 TCGv_i32 cpu_exclusive_info;
73 #endif
75 /* FIXME: These should be removed. */
76 static TCGv_i32 cpu_F0s, cpu_F1s;
77 static TCGv_i64 cpu_F0d, cpu_F1d;
79 #include "exec/gen-icount.h"
81 static const char *regnames[] =
82 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
83 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
85 /* initialize TCG globals. */
86 void arm_translate_init(void)
88 int i;
90 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
92 for (i = 0; i < 16; i++) {
93 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
94 offsetof(CPUARMState, regs[i]),
95 regnames[i]);
97 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
98 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
99 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
100 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
102 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
103 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
104 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
105 offsetof(CPUARMState, exclusive_val), "exclusive_val");
106 #ifdef CONFIG_USER_ONLY
107 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
108 offsetof(CPUARMState, exclusive_test), "exclusive_test");
109 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUARMState, exclusive_info), "exclusive_info");
111 #endif
113 a64_translate_init();
116 static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
118 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
119 * insns:
120 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
121 * otherwise, access as if at PL0.
123 switch (s->mmu_idx) {
124 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
125 case ARMMMUIdx_S12NSE0:
126 case ARMMMUIdx_S12NSE1:
127 return ARMMMUIdx_S12NSE0;
128 case ARMMMUIdx_S1E3:
129 case ARMMMUIdx_S1SE0:
130 case ARMMMUIdx_S1SE1:
131 return ARMMMUIdx_S1SE0;
132 case ARMMMUIdx_S2NS:
133 default:
134 g_assert_not_reached();
138 static inline TCGv_i32 load_cpu_offset(int offset)
140 TCGv_i32 tmp = tcg_temp_new_i32();
141 tcg_gen_ld_i32(tmp, cpu_env, offset);
142 return tmp;
145 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
147 static inline void store_cpu_offset(TCGv_i32 var, int offset)
149 tcg_gen_st_i32(var, cpu_env, offset);
150 tcg_temp_free_i32(var);
153 #define store_cpu_field(var, name) \
154 store_cpu_offset(var, offsetof(CPUARMState, name))
156 /* Set a variable to the value of a CPU register. */
157 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
159 if (reg == 15) {
160 uint32_t addr;
161 /* normally, since we updated PC, we need only to add one insn */
162 if (s->thumb)
163 addr = (long)s->pc + 2;
164 else
165 addr = (long)s->pc + 4;
166 tcg_gen_movi_i32(var, addr);
167 } else {
168 tcg_gen_mov_i32(var, cpu_R[reg]);
172 /* Create a new temporary and set it to the value of a CPU register. */
173 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
175 TCGv_i32 tmp = tcg_temp_new_i32();
176 load_reg_var(s, tmp, reg);
177 return tmp;
180 /* Set a CPU register. The source must be a temporary and will be
181 marked as dead. */
182 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
184 if (reg == 15) {
185 tcg_gen_andi_i32(var, var, ~1);
186 s->is_jmp = DISAS_JUMP;
188 tcg_gen_mov_i32(cpu_R[reg], var);
189 tcg_temp_free_i32(var);
192 /* Value extensions. */
193 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
194 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
195 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
196 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
198 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
199 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
202 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
204 TCGv_i32 tmp_mask = tcg_const_i32(mask);
205 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
206 tcg_temp_free_i32(tmp_mask);
208 /* Set NZCV flags from the high 4 bits of var. */
209 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
211 static void gen_exception_internal(int excp)
213 TCGv_i32 tcg_excp = tcg_const_i32(excp);
215 assert(excp_is_internal(excp));
216 gen_helper_exception_internal(cpu_env, tcg_excp);
217 tcg_temp_free_i32(tcg_excp);
220 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
222 TCGv_i32 tcg_excp = tcg_const_i32(excp);
223 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
224 TCGv_i32 tcg_el = tcg_const_i32(target_el);
226 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
227 tcg_syn, tcg_el);
229 tcg_temp_free_i32(tcg_el);
230 tcg_temp_free_i32(tcg_syn);
231 tcg_temp_free_i32(tcg_excp);
234 static void gen_ss_advance(DisasContext *s)
236 /* If the singlestep state is Active-not-pending, advance to
237 * Active-pending.
239 if (s->ss_active) {
240 s->pstate_ss = 0;
241 gen_helper_clear_pstate_ss(cpu_env);
245 static void gen_step_complete_exception(DisasContext *s)
247 /* We just completed step of an insn. Move from Active-not-pending
248 * to Active-pending, and then also take the swstep exception.
249 * This corresponds to making the (IMPDEF) choice to prioritize
250 * swstep exceptions over asynchronous exceptions taken to an exception
251 * level where debug is disabled. This choice has the advantage that
252 * we do not need to maintain internal state corresponding to the
253 * ISV/EX syndrome bits between completion of the step and generation
254 * of the exception, and our syndrome information is always correct.
256 gen_ss_advance(s);
257 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
258 default_exception_el(s));
259 s->is_jmp = DISAS_EXC;
262 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
264 TCGv_i32 tmp1 = tcg_temp_new_i32();
265 TCGv_i32 tmp2 = tcg_temp_new_i32();
266 tcg_gen_ext16s_i32(tmp1, a);
267 tcg_gen_ext16s_i32(tmp2, b);
268 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
269 tcg_temp_free_i32(tmp2);
270 tcg_gen_sari_i32(a, a, 16);
271 tcg_gen_sari_i32(b, b, 16);
272 tcg_gen_mul_i32(b, b, a);
273 tcg_gen_mov_i32(a, tmp1);
274 tcg_temp_free_i32(tmp1);
277 /* Byteswap each halfword. */
278 static void gen_rev16(TCGv_i32 var)
280 TCGv_i32 tmp = tcg_temp_new_i32();
281 tcg_gen_shri_i32(tmp, var, 8);
282 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
283 tcg_gen_shli_i32(var, var, 8);
284 tcg_gen_andi_i32(var, var, 0xff00ff00);
285 tcg_gen_or_i32(var, var, tmp);
286 tcg_temp_free_i32(tmp);
289 /* Byteswap low halfword and sign extend. */
290 static void gen_revsh(TCGv_i32 var)
292 tcg_gen_ext16u_i32(var, var);
293 tcg_gen_bswap16_i32(var, var);
294 tcg_gen_ext16s_i32(var, var);
297 /* Unsigned bitfield extract. */
298 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
300 if (shift)
301 tcg_gen_shri_i32(var, var, shift);
302 tcg_gen_andi_i32(var, var, mask);
305 /* Signed bitfield extract. */
306 static void gen_sbfx(TCGv_i32 var, int shift, int width)
308 uint32_t signbit;
310 if (shift)
311 tcg_gen_sari_i32(var, var, shift);
312 if (shift + width < 32) {
313 signbit = 1u << (width - 1);
314 tcg_gen_andi_i32(var, var, (1u << width) - 1);
315 tcg_gen_xori_i32(var, var, signbit);
316 tcg_gen_subi_i32(var, var, signbit);
320 /* Return (b << 32) + a. Mark inputs as dead */
321 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
323 TCGv_i64 tmp64 = tcg_temp_new_i64();
325 tcg_gen_extu_i32_i64(tmp64, b);
326 tcg_temp_free_i32(b);
327 tcg_gen_shli_i64(tmp64, tmp64, 32);
328 tcg_gen_add_i64(a, tmp64, a);
330 tcg_temp_free_i64(tmp64);
331 return a;
334 /* Return (b << 32) - a. Mark inputs as dead. */
335 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
337 TCGv_i64 tmp64 = tcg_temp_new_i64();
339 tcg_gen_extu_i32_i64(tmp64, b);
340 tcg_temp_free_i32(b);
341 tcg_gen_shli_i64(tmp64, tmp64, 32);
342 tcg_gen_sub_i64(a, tmp64, a);
344 tcg_temp_free_i64(tmp64);
345 return a;
348 /* 32x32->64 multiply. Marks inputs as dead. */
349 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
351 TCGv_i32 lo = tcg_temp_new_i32();
352 TCGv_i32 hi = tcg_temp_new_i32();
353 TCGv_i64 ret;
355 tcg_gen_mulu2_i32(lo, hi, a, b);
356 tcg_temp_free_i32(a);
357 tcg_temp_free_i32(b);
359 ret = tcg_temp_new_i64();
360 tcg_gen_concat_i32_i64(ret, lo, hi);
361 tcg_temp_free_i32(lo);
362 tcg_temp_free_i32(hi);
364 return ret;
367 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
369 TCGv_i32 lo = tcg_temp_new_i32();
370 TCGv_i32 hi = tcg_temp_new_i32();
371 TCGv_i64 ret;
373 tcg_gen_muls2_i32(lo, hi, a, b);
374 tcg_temp_free_i32(a);
375 tcg_temp_free_i32(b);
377 ret = tcg_temp_new_i64();
378 tcg_gen_concat_i32_i64(ret, lo, hi);
379 tcg_temp_free_i32(lo);
380 tcg_temp_free_i32(hi);
382 return ret;
385 /* Swap low and high halfwords. */
386 static void gen_swap_half(TCGv_i32 var)
388 TCGv_i32 tmp = tcg_temp_new_i32();
389 tcg_gen_shri_i32(tmp, var, 16);
390 tcg_gen_shli_i32(var, var, 16);
391 tcg_gen_or_i32(var, var, tmp);
392 tcg_temp_free_i32(tmp);
395 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
396 tmp = (t0 ^ t1) & 0x8000;
397 t0 &= ~0x8000;
398 t1 &= ~0x8000;
399 t0 = (t0 + t1) ^ tmp;
402 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
404 TCGv_i32 tmp = tcg_temp_new_i32();
405 tcg_gen_xor_i32(tmp, t0, t1);
406 tcg_gen_andi_i32(tmp, tmp, 0x8000);
407 tcg_gen_andi_i32(t0, t0, ~0x8000);
408 tcg_gen_andi_i32(t1, t1, ~0x8000);
409 tcg_gen_add_i32(t0, t0, t1);
410 tcg_gen_xor_i32(t0, t0, tmp);
411 tcg_temp_free_i32(tmp);
412 tcg_temp_free_i32(t1);
415 /* Set CF to the top bit of var. */
416 static void gen_set_CF_bit31(TCGv_i32 var)
418 tcg_gen_shri_i32(cpu_CF, var, 31);
421 /* Set N and Z flags from var. */
422 static inline void gen_logic_CC(TCGv_i32 var)
424 tcg_gen_mov_i32(cpu_NF, var);
425 tcg_gen_mov_i32(cpu_ZF, var);
428 /* T0 += T1 + CF. */
429 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
431 tcg_gen_add_i32(t0, t0, t1);
432 tcg_gen_add_i32(t0, t0, cpu_CF);
435 /* dest = T0 + T1 + CF. */
436 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
438 tcg_gen_add_i32(dest, t0, t1);
439 tcg_gen_add_i32(dest, dest, cpu_CF);
442 /* dest = T0 - T1 + CF - 1. */
443 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
445 tcg_gen_sub_i32(dest, t0, t1);
446 tcg_gen_add_i32(dest, dest, cpu_CF);
447 tcg_gen_subi_i32(dest, dest, 1);
450 /* dest = T0 + T1. Compute C, N, V and Z flags */
451 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
453 TCGv_i32 tmp = tcg_temp_new_i32();
454 tcg_gen_movi_i32(tmp, 0);
455 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
456 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
457 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
458 tcg_gen_xor_i32(tmp, t0, t1);
459 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
460 tcg_temp_free_i32(tmp);
461 tcg_gen_mov_i32(dest, cpu_NF);
464 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
465 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
467 TCGv_i32 tmp = tcg_temp_new_i32();
468 if (TCG_TARGET_HAS_add2_i32) {
469 tcg_gen_movi_i32(tmp, 0);
470 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
471 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
472 } else {
473 TCGv_i64 q0 = tcg_temp_new_i64();
474 TCGv_i64 q1 = tcg_temp_new_i64();
475 tcg_gen_extu_i32_i64(q0, t0);
476 tcg_gen_extu_i32_i64(q1, t1);
477 tcg_gen_add_i64(q0, q0, q1);
478 tcg_gen_extu_i32_i64(q1, cpu_CF);
479 tcg_gen_add_i64(q0, q0, q1);
480 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
481 tcg_temp_free_i64(q0);
482 tcg_temp_free_i64(q1);
484 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
485 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
486 tcg_gen_xor_i32(tmp, t0, t1);
487 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
488 tcg_temp_free_i32(tmp);
489 tcg_gen_mov_i32(dest, cpu_NF);
492 /* dest = T0 - T1. Compute C, N, V and Z flags */
493 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
495 TCGv_i32 tmp;
496 tcg_gen_sub_i32(cpu_NF, t0, t1);
497 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
498 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
499 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
500 tmp = tcg_temp_new_i32();
501 tcg_gen_xor_i32(tmp, t0, t1);
502 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
503 tcg_temp_free_i32(tmp);
504 tcg_gen_mov_i32(dest, cpu_NF);
507 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
508 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
510 TCGv_i32 tmp = tcg_temp_new_i32();
511 tcg_gen_not_i32(tmp, t1);
512 gen_adc_CC(dest, t0, tmp);
513 tcg_temp_free_i32(tmp);
516 #define GEN_SHIFT(name) \
517 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
519 TCGv_i32 tmp1, tmp2, tmp3; \
520 tmp1 = tcg_temp_new_i32(); \
521 tcg_gen_andi_i32(tmp1, t1, 0xff); \
522 tmp2 = tcg_const_i32(0); \
523 tmp3 = tcg_const_i32(0x1f); \
524 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
525 tcg_temp_free_i32(tmp3); \
526 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
527 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
528 tcg_temp_free_i32(tmp2); \
529 tcg_temp_free_i32(tmp1); \
531 GEN_SHIFT(shl)
532 GEN_SHIFT(shr)
533 #undef GEN_SHIFT
535 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
537 TCGv_i32 tmp1, tmp2;
538 tmp1 = tcg_temp_new_i32();
539 tcg_gen_andi_i32(tmp1, t1, 0xff);
540 tmp2 = tcg_const_i32(0x1f);
541 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
542 tcg_temp_free_i32(tmp2);
543 tcg_gen_sar_i32(dest, t0, tmp1);
544 tcg_temp_free_i32(tmp1);
547 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
549 TCGv_i32 c0 = tcg_const_i32(0);
550 TCGv_i32 tmp = tcg_temp_new_i32();
551 tcg_gen_neg_i32(tmp, src);
552 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
553 tcg_temp_free_i32(c0);
554 tcg_temp_free_i32(tmp);
557 static void shifter_out_im(TCGv_i32 var, int shift)
559 if (shift == 0) {
560 tcg_gen_andi_i32(cpu_CF, var, 1);
561 } else {
562 tcg_gen_shri_i32(cpu_CF, var, shift);
563 if (shift != 31) {
564 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
569 /* Shift by immediate. Includes special handling for shift == 0. */
570 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
571 int shift, int flags)
573 switch (shiftop) {
574 case 0: /* LSL */
575 if (shift != 0) {
576 if (flags)
577 shifter_out_im(var, 32 - shift);
578 tcg_gen_shli_i32(var, var, shift);
580 break;
581 case 1: /* LSR */
582 if (shift == 0) {
583 if (flags) {
584 tcg_gen_shri_i32(cpu_CF, var, 31);
586 tcg_gen_movi_i32(var, 0);
587 } else {
588 if (flags)
589 shifter_out_im(var, shift - 1);
590 tcg_gen_shri_i32(var, var, shift);
592 break;
593 case 2: /* ASR */
594 if (shift == 0)
595 shift = 32;
596 if (flags)
597 shifter_out_im(var, shift - 1);
598 if (shift == 32)
599 shift = 31;
600 tcg_gen_sari_i32(var, var, shift);
601 break;
602 case 3: /* ROR/RRX */
603 if (shift != 0) {
604 if (flags)
605 shifter_out_im(var, shift - 1);
606 tcg_gen_rotri_i32(var, var, shift); break;
607 } else {
608 TCGv_i32 tmp = tcg_temp_new_i32();
609 tcg_gen_shli_i32(tmp, cpu_CF, 31);
610 if (flags)
611 shifter_out_im(var, 0);
612 tcg_gen_shri_i32(var, var, 1);
613 tcg_gen_or_i32(var, var, tmp);
614 tcg_temp_free_i32(tmp);
619 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
620 TCGv_i32 shift, int flags)
622 if (flags) {
623 switch (shiftop) {
624 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
625 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
626 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
627 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
629 } else {
630 switch (shiftop) {
631 case 0:
632 gen_shl(var, var, shift);
633 break;
634 case 1:
635 gen_shr(var, var, shift);
636 break;
637 case 2:
638 gen_sar(var, var, shift);
639 break;
640 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
641 tcg_gen_rotr_i32(var, var, shift); break;
644 tcg_temp_free_i32(shift);
647 #define PAS_OP(pfx) \
648 switch (op2) { \
649 case 0: gen_pas_helper(glue(pfx,add16)); break; \
650 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
651 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
652 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
653 case 4: gen_pas_helper(glue(pfx,add8)); break; \
654 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
656 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
658 TCGv_ptr tmp;
660 switch (op1) {
661 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
662 case 1:
663 tmp = tcg_temp_new_ptr();
664 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
665 PAS_OP(s)
666 tcg_temp_free_ptr(tmp);
667 break;
668 case 5:
669 tmp = tcg_temp_new_ptr();
670 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
671 PAS_OP(u)
672 tcg_temp_free_ptr(tmp);
673 break;
674 #undef gen_pas_helper
675 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
676 case 2:
677 PAS_OP(q);
678 break;
679 case 3:
680 PAS_OP(sh);
681 break;
682 case 6:
683 PAS_OP(uq);
684 break;
685 case 7:
686 PAS_OP(uh);
687 break;
688 #undef gen_pas_helper
691 #undef PAS_OP
693 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
694 #define PAS_OP(pfx) \
695 switch (op1) { \
696 case 0: gen_pas_helper(glue(pfx,add8)); break; \
697 case 1: gen_pas_helper(glue(pfx,add16)); break; \
698 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
699 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
700 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
701 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
703 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
705 TCGv_ptr tmp;
707 switch (op2) {
708 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
709 case 0:
710 tmp = tcg_temp_new_ptr();
711 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
712 PAS_OP(s)
713 tcg_temp_free_ptr(tmp);
714 break;
715 case 4:
716 tmp = tcg_temp_new_ptr();
717 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
718 PAS_OP(u)
719 tcg_temp_free_ptr(tmp);
720 break;
721 #undef gen_pas_helper
722 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
723 case 1:
724 PAS_OP(q);
725 break;
726 case 2:
727 PAS_OP(sh);
728 break;
729 case 5:
730 PAS_OP(uq);
731 break;
732 case 6:
733 PAS_OP(uh);
734 break;
735 #undef gen_pas_helper
738 #undef PAS_OP
741 * Generate a conditional based on ARM condition code cc.
742 * This is common between ARM and Aarch64 targets.
744 void arm_test_cc(DisasCompare *cmp, int cc)
746 TCGv_i32 value;
747 TCGCond cond;
748 bool global = true;
750 switch (cc) {
751 case 0: /* eq: Z */
752 case 1: /* ne: !Z */
753 cond = TCG_COND_EQ;
754 value = cpu_ZF;
755 break;
757 case 2: /* cs: C */
758 case 3: /* cc: !C */
759 cond = TCG_COND_NE;
760 value = cpu_CF;
761 break;
763 case 4: /* mi: N */
764 case 5: /* pl: !N */
765 cond = TCG_COND_LT;
766 value = cpu_NF;
767 break;
769 case 6: /* vs: V */
770 case 7: /* vc: !V */
771 cond = TCG_COND_LT;
772 value = cpu_VF;
773 break;
775 case 8: /* hi: C && !Z */
776 case 9: /* ls: !C || Z -> !(C && !Z) */
777 cond = TCG_COND_NE;
778 value = tcg_temp_new_i32();
779 global = false;
780 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
781 ZF is non-zero for !Z; so AND the two subexpressions. */
782 tcg_gen_neg_i32(value, cpu_CF);
783 tcg_gen_and_i32(value, value, cpu_ZF);
784 break;
786 case 10: /* ge: N == V -> N ^ V == 0 */
787 case 11: /* lt: N != V -> N ^ V != 0 */
788 /* Since we're only interested in the sign bit, == 0 is >= 0. */
789 cond = TCG_COND_GE;
790 value = tcg_temp_new_i32();
791 global = false;
792 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
793 break;
795 case 12: /* gt: !Z && N == V */
796 case 13: /* le: Z || N != V */
797 cond = TCG_COND_NE;
798 value = tcg_temp_new_i32();
799 global = false;
800 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
801 * the sign bit then AND with ZF to yield the result. */
802 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
803 tcg_gen_sari_i32(value, value, 31);
804 tcg_gen_andc_i32(value, cpu_ZF, value);
805 break;
807 case 14: /* always */
808 case 15: /* always */
809 /* Use the ALWAYS condition, which will fold early.
810 * It doesn't matter what we use for the value. */
811 cond = TCG_COND_ALWAYS;
812 value = cpu_ZF;
813 goto no_invert;
815 default:
816 fprintf(stderr, "Bad condition code 0x%x\n", cc);
817 abort();
820 if (cc & 1) {
821 cond = tcg_invert_cond(cond);
824 no_invert:
825 cmp->cond = cond;
826 cmp->value = value;
827 cmp->value_global = global;
830 void arm_free_cc(DisasCompare *cmp)
832 if (!cmp->value_global) {
833 tcg_temp_free_i32(cmp->value);
837 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
839 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
842 void arm_gen_test_cc(int cc, TCGLabel *label)
844 DisasCompare cmp;
845 arm_test_cc(&cmp, cc);
846 arm_jump_cc(&cmp, label);
847 arm_free_cc(&cmp);
850 static const uint8_t table_logic_cc[16] = {
851 1, /* and */
852 1, /* xor */
853 0, /* sub */
854 0, /* rsb */
855 0, /* add */
856 0, /* adc */
857 0, /* sbc */
858 0, /* rsc */
859 1, /* andl */
860 1, /* xorl */
861 0, /* cmp */
862 0, /* cmn */
863 1, /* orr */
864 1, /* mov */
865 1, /* bic */
866 1, /* mvn */
869 /* Set PC and Thumb state from an immediate address. */
870 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
872 TCGv_i32 tmp;
874 s->is_jmp = DISAS_JUMP;
875 if (s->thumb != (addr & 1)) {
876 tmp = tcg_temp_new_i32();
877 tcg_gen_movi_i32(tmp, addr & 1);
878 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
879 tcg_temp_free_i32(tmp);
881 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
884 /* Set PC and Thumb state from var. var is marked as dead. */
885 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
887 s->is_jmp = DISAS_JUMP;
888 tcg_gen_andi_i32(cpu_R[15], var, ~1);
889 tcg_gen_andi_i32(var, var, 1);
890 store_cpu_field(var, thumb);
893 /* Variant of store_reg which uses branch&exchange logic when storing
894 to r15 in ARM architecture v7 and above. The source must be a temporary
895 and will be marked as dead. */
896 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
898 if (reg == 15 && ENABLE_ARCH_7) {
899 gen_bx(s, var);
900 } else {
901 store_reg(s, reg, var);
905 /* Variant of store_reg which uses branch&exchange logic when storing
906 * to r15 in ARM architecture v5T and above. This is used for storing
907 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
908 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
909 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
911 if (reg == 15 && ENABLE_ARCH_5) {
912 gen_bx(s, var);
913 } else {
914 store_reg(s, reg, var);
918 /* Abstractions of "generate code to do a guest load/store for
919 * AArch32", where a vaddr is always 32 bits (and is zero
920 * extended if we're a 64 bit core) and data is also
921 * 32 bits unless specifically doing a 64 bit access.
922 * These functions work like tcg_gen_qemu_{ld,st}* except
923 * that the address argument is TCGv_i32 rather than TCGv.
925 #if TARGET_LONG_BITS == 32
927 #define DO_GEN_LD(SUFF, OPC) \
928 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
930 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
933 #define DO_GEN_ST(SUFF, OPC) \
934 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
936 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
939 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
941 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
944 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
946 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
949 #else
951 #define DO_GEN_LD(SUFF, OPC) \
952 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
954 TCGv addr64 = tcg_temp_new(); \
955 tcg_gen_extu_i32_i64(addr64, addr); \
956 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
957 tcg_temp_free(addr64); \
960 #define DO_GEN_ST(SUFF, OPC) \
961 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
963 TCGv addr64 = tcg_temp_new(); \
964 tcg_gen_extu_i32_i64(addr64, addr); \
965 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
966 tcg_temp_free(addr64); \
969 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
971 TCGv addr64 = tcg_temp_new();
972 tcg_gen_extu_i32_i64(addr64, addr);
973 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
974 tcg_temp_free(addr64);
977 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
979 TCGv addr64 = tcg_temp_new();
980 tcg_gen_extu_i32_i64(addr64, addr);
981 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
982 tcg_temp_free(addr64);
985 #endif
987 DO_GEN_LD(8s, MO_SB)
988 DO_GEN_LD(8u, MO_UB)
989 DO_GEN_LD(16s, MO_TESW)
990 DO_GEN_LD(16u, MO_TEUW)
991 DO_GEN_LD(32u, MO_TEUL)
992 DO_GEN_ST(8, MO_UB)
993 DO_GEN_ST(16, MO_TEUW)
994 DO_GEN_ST(32, MO_TEUL)
996 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
998 tcg_gen_movi_i32(cpu_R[15], val);
1001 static inline void gen_hvc(DisasContext *s, int imm16)
1003 /* The pre HVC helper handles cases when HVC gets trapped
1004 * as an undefined insn by runtime configuration (ie before
1005 * the insn really executes).
1007 gen_set_pc_im(s, s->pc - 4);
1008 gen_helper_pre_hvc(cpu_env);
1009 /* Otherwise we will treat this as a real exception which
1010 * happens after execution of the insn. (The distinction matters
1011 * for the PC value reported to the exception handler and also
1012 * for single stepping.)
1014 s->svc_imm = imm16;
1015 gen_set_pc_im(s, s->pc);
1016 s->is_jmp = DISAS_HVC;
1019 static inline void gen_smc(DisasContext *s)
1021 /* As with HVC, we may take an exception either before or after
1022 * the insn executes.
1024 TCGv_i32 tmp;
1026 gen_set_pc_im(s, s->pc - 4);
1027 tmp = tcg_const_i32(syn_aa32_smc());
1028 gen_helper_pre_smc(cpu_env, tmp);
1029 tcg_temp_free_i32(tmp);
1030 gen_set_pc_im(s, s->pc);
1031 s->is_jmp = DISAS_SMC;
1034 static inline void
1035 gen_set_condexec (DisasContext *s)
1037 if (s->condexec_mask) {
1038 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1039 TCGv_i32 tmp = tcg_temp_new_i32();
1040 tcg_gen_movi_i32(tmp, val);
1041 store_cpu_field(tmp, condexec_bits);
1045 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1047 gen_set_condexec(s);
1048 gen_set_pc_im(s, s->pc - offset);
1049 gen_exception_internal(excp);
1050 s->is_jmp = DISAS_JUMP;
1053 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1054 int syn, uint32_t target_el)
1056 gen_set_condexec(s);
1057 gen_set_pc_im(s, s->pc - offset);
1058 gen_exception(excp, syn, target_el);
1059 s->is_jmp = DISAS_JUMP;
1062 /* Emit an inline alignment check, which raises an exception if the given
1063 * address is not aligned according to "size" (which must be a power of 2). */
1064 static void gen_alignment_check(DisasContext *s, int pc_offset,
1065 target_ulong size, TCGv addr)
1067 #ifdef CONFIG_ALIGNMENT_EXCEPTIONS
1068 TCGLabel *alignok_label = gen_new_label();
1069 TCGv tmp = tcg_temp_new();
1071 /* check alignment, branch to alignok_label if aligned */
1072 tcg_gen_andi_tl(tmp, addr, size - 1);
1073 tcg_gen_brcondi_tl(TCG_COND_EQ, tmp, 0, alignok_label);
1075 /* emit alignment exception */
1076 gen_set_pc_im(s, s->pc - pc_offset);
1077 gen_helper_alignment_exception(cpu_env, addr);
1079 gen_set_label(alignok_label);
1080 tcg_temp_free(tmp);
1081 #endif
1084 /* Force a TB lookup after an instruction that changes the CPU state. */
1085 static inline void gen_lookup_tb(DisasContext *s)
1087 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1088 s->is_jmp = DISAS_JUMP;
1091 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1092 TCGv_i32 var)
1094 int val, rm, shift, shiftop;
1095 TCGv_i32 offset;
1097 if (!(insn & (1 << 25))) {
1098 /* immediate */
1099 val = insn & 0xfff;
1100 if (!(insn & (1 << 23)))
1101 val = -val;
1102 if (val != 0)
1103 tcg_gen_addi_i32(var, var, val);
1104 } else {
1105 /* shift/register */
1106 rm = (insn) & 0xf;
1107 shift = (insn >> 7) & 0x1f;
1108 shiftop = (insn >> 5) & 3;
1109 offset = load_reg(s, rm);
1110 gen_arm_shift_im(offset, shiftop, shift, 0);
1111 if (!(insn & (1 << 23)))
1112 tcg_gen_sub_i32(var, var, offset);
1113 else
1114 tcg_gen_add_i32(var, var, offset);
1115 tcg_temp_free_i32(offset);
1119 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1120 int extra, TCGv_i32 var)
1122 int val, rm;
1123 TCGv_i32 offset;
1125 if (insn & (1 << 22)) {
1126 /* immediate */
1127 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1128 if (!(insn & (1 << 23)))
1129 val = -val;
1130 val += extra;
1131 if (val != 0)
1132 tcg_gen_addi_i32(var, var, val);
1133 } else {
1134 /* register */
1135 if (extra)
1136 tcg_gen_addi_i32(var, var, extra);
1137 rm = (insn) & 0xf;
1138 offset = load_reg(s, rm);
1139 if (!(insn & (1 << 23)))
1140 tcg_gen_sub_i32(var, var, offset);
1141 else
1142 tcg_gen_add_i32(var, var, offset);
1143 tcg_temp_free_i32(offset);
1147 static TCGv_ptr get_fpstatus_ptr(int neon)
1149 TCGv_ptr statusptr = tcg_temp_new_ptr();
1150 int offset;
1151 if (neon) {
1152 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1153 } else {
1154 offset = offsetof(CPUARMState, vfp.fp_status);
1156 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1157 return statusptr;
1160 #define VFP_OP2(name) \
1161 static inline void gen_vfp_##name(int dp) \
1163 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1164 if (dp) { \
1165 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1166 } else { \
1167 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1169 tcg_temp_free_ptr(fpst); \
1172 VFP_OP2(add)
1173 VFP_OP2(sub)
1174 VFP_OP2(mul)
1175 VFP_OP2(div)
1177 #undef VFP_OP2
1179 static inline void gen_vfp_F1_mul(int dp)
1181 /* Like gen_vfp_mul() but put result in F1 */
1182 TCGv_ptr fpst = get_fpstatus_ptr(0);
1183 if (dp) {
1184 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1185 } else {
1186 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1188 tcg_temp_free_ptr(fpst);
1191 static inline void gen_vfp_F1_neg(int dp)
1193 /* Like gen_vfp_neg() but put result in F1 */
1194 if (dp) {
1195 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1196 } else {
1197 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1201 static inline void gen_vfp_abs(int dp)
1203 if (dp)
1204 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1205 else
1206 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1209 static inline void gen_vfp_neg(int dp)
1211 if (dp)
1212 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1213 else
1214 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1217 static inline void gen_vfp_sqrt(int dp)
1219 if (dp)
1220 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1221 else
1222 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1225 static inline void gen_vfp_cmp(int dp)
1227 if (dp)
1228 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1229 else
1230 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1233 static inline void gen_vfp_cmpe(int dp)
1235 if (dp)
1236 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1237 else
1238 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1241 static inline void gen_vfp_F1_ld0(int dp)
1243 if (dp)
1244 tcg_gen_movi_i64(cpu_F1d, 0);
1245 else
1246 tcg_gen_movi_i32(cpu_F1s, 0);
1249 #define VFP_GEN_ITOF(name) \
1250 static inline void gen_vfp_##name(int dp, int neon) \
1252 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1253 if (dp) { \
1254 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1255 } else { \
1256 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1258 tcg_temp_free_ptr(statusptr); \
1261 VFP_GEN_ITOF(uito)
1262 VFP_GEN_ITOF(sito)
1263 #undef VFP_GEN_ITOF
1265 #define VFP_GEN_FTOI(name) \
1266 static inline void gen_vfp_##name(int dp, int neon) \
1268 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1269 if (dp) { \
1270 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1271 } else { \
1272 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1274 tcg_temp_free_ptr(statusptr); \
1277 VFP_GEN_FTOI(toui)
1278 VFP_GEN_FTOI(touiz)
1279 VFP_GEN_FTOI(tosi)
1280 VFP_GEN_FTOI(tosiz)
1281 #undef VFP_GEN_FTOI
1283 #define VFP_GEN_FIX(name, round) \
1284 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1286 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1287 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1288 if (dp) { \
1289 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1290 statusptr); \
1291 } else { \
1292 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1293 statusptr); \
1295 tcg_temp_free_i32(tmp_shift); \
1296 tcg_temp_free_ptr(statusptr); \
1298 VFP_GEN_FIX(tosh, _round_to_zero)
1299 VFP_GEN_FIX(tosl, _round_to_zero)
1300 VFP_GEN_FIX(touh, _round_to_zero)
1301 VFP_GEN_FIX(toul, _round_to_zero)
1302 VFP_GEN_FIX(shto, )
1303 VFP_GEN_FIX(slto, )
1304 VFP_GEN_FIX(uhto, )
1305 VFP_GEN_FIX(ulto, )
1306 #undef VFP_GEN_FIX
1308 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1310 if (dp) {
1311 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
1312 } else {
1313 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
1317 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1319 if (dp) {
1320 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
1321 } else {
1322 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
1326 static inline long
1327 vfp_reg_offset (int dp, int reg)
1329 if (dp)
1330 return offsetof(CPUARMState, vfp.regs[reg]);
1331 else if (reg & 1) {
1332 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1333 + offsetof(CPU_DoubleU, l.upper);
1334 } else {
1335 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1336 + offsetof(CPU_DoubleU, l.lower);
1340 /* Return the offset of a 32-bit piece of a NEON register.
1341 zero is the least significant end of the register. */
1342 static inline long
1343 neon_reg_offset (int reg, int n)
1345 int sreg;
1346 sreg = reg * 2 + n;
1347 return vfp_reg_offset(0, sreg);
1350 static TCGv_i32 neon_load_reg(int reg, int pass)
1352 TCGv_i32 tmp = tcg_temp_new_i32();
1353 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1354 return tmp;
1357 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1359 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1360 tcg_temp_free_i32(var);
1363 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1365 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1368 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1370 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1373 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1374 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1375 #define tcg_gen_st_f32 tcg_gen_st_i32
1376 #define tcg_gen_st_f64 tcg_gen_st_i64
1378 static inline void gen_mov_F0_vreg(int dp, int reg)
1380 if (dp)
1381 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1382 else
1383 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1386 static inline void gen_mov_F1_vreg(int dp, int reg)
1388 if (dp)
1389 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1390 else
1391 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1394 static inline void gen_mov_vreg_F0(int dp, int reg)
1396 if (dp)
1397 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1398 else
1399 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1402 #define ARM_CP_RW_BIT (1 << 20)
1404 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1406 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1409 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1411 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1414 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1416 TCGv_i32 var = tcg_temp_new_i32();
1417 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1418 return var;
1421 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1423 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1424 tcg_temp_free_i32(var);
1427 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1429 iwmmxt_store_reg(cpu_M0, rn);
1432 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1434 iwmmxt_load_reg(cpu_M0, rn);
1437 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1439 iwmmxt_load_reg(cpu_V1, rn);
1440 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1443 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1445 iwmmxt_load_reg(cpu_V1, rn);
1446 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1449 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1451 iwmmxt_load_reg(cpu_V1, rn);
1452 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1455 #define IWMMXT_OP(name) \
1456 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1458 iwmmxt_load_reg(cpu_V1, rn); \
1459 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1462 #define IWMMXT_OP_ENV(name) \
1463 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1465 iwmmxt_load_reg(cpu_V1, rn); \
1466 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1469 #define IWMMXT_OP_ENV_SIZE(name) \
1470 IWMMXT_OP_ENV(name##b) \
1471 IWMMXT_OP_ENV(name##w) \
1472 IWMMXT_OP_ENV(name##l)
1474 #define IWMMXT_OP_ENV1(name) \
1475 static inline void gen_op_iwmmxt_##name##_M0(void) \
1477 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1480 IWMMXT_OP(maddsq)
1481 IWMMXT_OP(madduq)
1482 IWMMXT_OP(sadb)
1483 IWMMXT_OP(sadw)
1484 IWMMXT_OP(mulslw)
1485 IWMMXT_OP(mulshw)
1486 IWMMXT_OP(mululw)
1487 IWMMXT_OP(muluhw)
1488 IWMMXT_OP(macsw)
1489 IWMMXT_OP(macuw)
1491 IWMMXT_OP_ENV_SIZE(unpackl)
1492 IWMMXT_OP_ENV_SIZE(unpackh)
1494 IWMMXT_OP_ENV1(unpacklub)
1495 IWMMXT_OP_ENV1(unpackluw)
1496 IWMMXT_OP_ENV1(unpacklul)
1497 IWMMXT_OP_ENV1(unpackhub)
1498 IWMMXT_OP_ENV1(unpackhuw)
1499 IWMMXT_OP_ENV1(unpackhul)
1500 IWMMXT_OP_ENV1(unpacklsb)
1501 IWMMXT_OP_ENV1(unpacklsw)
1502 IWMMXT_OP_ENV1(unpacklsl)
1503 IWMMXT_OP_ENV1(unpackhsb)
1504 IWMMXT_OP_ENV1(unpackhsw)
1505 IWMMXT_OP_ENV1(unpackhsl)
1507 IWMMXT_OP_ENV_SIZE(cmpeq)
1508 IWMMXT_OP_ENV_SIZE(cmpgtu)
1509 IWMMXT_OP_ENV_SIZE(cmpgts)
1511 IWMMXT_OP_ENV_SIZE(mins)
1512 IWMMXT_OP_ENV_SIZE(minu)
1513 IWMMXT_OP_ENV_SIZE(maxs)
1514 IWMMXT_OP_ENV_SIZE(maxu)
1516 IWMMXT_OP_ENV_SIZE(subn)
1517 IWMMXT_OP_ENV_SIZE(addn)
1518 IWMMXT_OP_ENV_SIZE(subu)
1519 IWMMXT_OP_ENV_SIZE(addu)
1520 IWMMXT_OP_ENV_SIZE(subs)
1521 IWMMXT_OP_ENV_SIZE(adds)
1523 IWMMXT_OP_ENV(avgb0)
1524 IWMMXT_OP_ENV(avgb1)
1525 IWMMXT_OP_ENV(avgw0)
1526 IWMMXT_OP_ENV(avgw1)
1528 IWMMXT_OP_ENV(packuw)
1529 IWMMXT_OP_ENV(packul)
1530 IWMMXT_OP_ENV(packuq)
1531 IWMMXT_OP_ENV(packsw)
1532 IWMMXT_OP_ENV(packsl)
1533 IWMMXT_OP_ENV(packsq)
1535 static void gen_op_iwmmxt_set_mup(void)
1537 TCGv_i32 tmp;
1538 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1539 tcg_gen_ori_i32(tmp, tmp, 2);
1540 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1543 static void gen_op_iwmmxt_set_cup(void)
1545 TCGv_i32 tmp;
1546 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1547 tcg_gen_ori_i32(tmp, tmp, 1);
1548 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1551 static void gen_op_iwmmxt_setpsr_nz(void)
1553 TCGv_i32 tmp = tcg_temp_new_i32();
1554 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1555 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1558 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1560 iwmmxt_load_reg(cpu_V1, rn);
1561 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1562 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1565 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1566 TCGv_i32 dest)
1568 int rd;
1569 uint32_t offset;
1570 TCGv_i32 tmp;
1572 rd = (insn >> 16) & 0xf;
1573 tmp = load_reg(s, rd);
1575 offset = (insn & 0xff) << ((insn >> 7) & 2);
1576 if (insn & (1 << 24)) {
1577 /* Pre indexed */
1578 if (insn & (1 << 23))
1579 tcg_gen_addi_i32(tmp, tmp, offset);
1580 else
1581 tcg_gen_addi_i32(tmp, tmp, -offset);
1582 tcg_gen_mov_i32(dest, tmp);
1583 if (insn & (1 << 21))
1584 store_reg(s, rd, tmp);
1585 else
1586 tcg_temp_free_i32(tmp);
1587 } else if (insn & (1 << 21)) {
1588 /* Post indexed */
1589 tcg_gen_mov_i32(dest, tmp);
1590 if (insn & (1 << 23))
1591 tcg_gen_addi_i32(tmp, tmp, offset);
1592 else
1593 tcg_gen_addi_i32(tmp, tmp, -offset);
1594 store_reg(s, rd, tmp);
1595 } else if (!(insn & (1 << 23)))
1596 return 1;
1597 return 0;
1600 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1602 int rd = (insn >> 0) & 0xf;
1603 TCGv_i32 tmp;
1605 if (insn & (1 << 8)) {
1606 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1607 return 1;
1608 } else {
1609 tmp = iwmmxt_load_creg(rd);
1611 } else {
1612 tmp = tcg_temp_new_i32();
1613 iwmmxt_load_reg(cpu_V0, rd);
1614 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1616 tcg_gen_andi_i32(tmp, tmp, mask);
1617 tcg_gen_mov_i32(dest, tmp);
1618 tcg_temp_free_i32(tmp);
1619 return 0;
1622 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1623 (ie. an undefined instruction). */
1624 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1626 int rd, wrd;
1627 int rdhi, rdlo, rd0, rd1, i;
1628 TCGv_i32 addr;
1629 TCGv_i32 tmp, tmp2, tmp3;
1631 if ((insn & 0x0e000e00) == 0x0c000000) {
1632 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1633 wrd = insn & 0xf;
1634 rdlo = (insn >> 12) & 0xf;
1635 rdhi = (insn >> 16) & 0xf;
1636 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1637 iwmmxt_load_reg(cpu_V0, wrd);
1638 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1639 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1640 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1641 } else { /* TMCRR */
1642 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1643 iwmmxt_store_reg(cpu_V0, wrd);
1644 gen_op_iwmmxt_set_mup();
1646 return 0;
1649 wrd = (insn >> 12) & 0xf;
1650 addr = tcg_temp_new_i32();
1651 if (gen_iwmmxt_address(s, insn, addr)) {
1652 tcg_temp_free_i32(addr);
1653 return 1;
1655 if (insn & ARM_CP_RW_BIT) {
1656 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1657 tmp = tcg_temp_new_i32();
1658 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1659 iwmmxt_store_creg(wrd, tmp);
1660 } else {
1661 i = 1;
1662 if (insn & (1 << 8)) {
1663 if (insn & (1 << 22)) { /* WLDRD */
1664 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
1665 i = 0;
1666 } else { /* WLDRW wRd */
1667 tmp = tcg_temp_new_i32();
1668 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1670 } else {
1671 tmp = tcg_temp_new_i32();
1672 if (insn & (1 << 22)) { /* WLDRH */
1673 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
1674 } else { /* WLDRB */
1675 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
1678 if (i) {
1679 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1680 tcg_temp_free_i32(tmp);
1682 gen_op_iwmmxt_movq_wRn_M0(wrd);
1684 } else {
1685 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1686 tmp = iwmmxt_load_creg(wrd);
1687 gen_aa32_st32(tmp, addr, get_mem_index(s));
1688 } else {
1689 gen_op_iwmmxt_movq_M0_wRn(wrd);
1690 tmp = tcg_temp_new_i32();
1691 if (insn & (1 << 8)) {
1692 if (insn & (1 << 22)) { /* WSTRD */
1693 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
1694 } else { /* WSTRW wRd */
1695 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1696 gen_aa32_st32(tmp, addr, get_mem_index(s));
1698 } else {
1699 if (insn & (1 << 22)) { /* WSTRH */
1700 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1701 gen_aa32_st16(tmp, addr, get_mem_index(s));
1702 } else { /* WSTRB */
1703 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1704 gen_aa32_st8(tmp, addr, get_mem_index(s));
1708 tcg_temp_free_i32(tmp);
1710 tcg_temp_free_i32(addr);
1711 return 0;
1714 if ((insn & 0x0f000000) != 0x0e000000)
1715 return 1;
1717 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1718 case 0x000: /* WOR */
1719 wrd = (insn >> 12) & 0xf;
1720 rd0 = (insn >> 0) & 0xf;
1721 rd1 = (insn >> 16) & 0xf;
1722 gen_op_iwmmxt_movq_M0_wRn(rd0);
1723 gen_op_iwmmxt_orq_M0_wRn(rd1);
1724 gen_op_iwmmxt_setpsr_nz();
1725 gen_op_iwmmxt_movq_wRn_M0(wrd);
1726 gen_op_iwmmxt_set_mup();
1727 gen_op_iwmmxt_set_cup();
1728 break;
1729 case 0x011: /* TMCR */
1730 if (insn & 0xf)
1731 return 1;
1732 rd = (insn >> 12) & 0xf;
1733 wrd = (insn >> 16) & 0xf;
1734 switch (wrd) {
1735 case ARM_IWMMXT_wCID:
1736 case ARM_IWMMXT_wCASF:
1737 break;
1738 case ARM_IWMMXT_wCon:
1739 gen_op_iwmmxt_set_cup();
1740 /* Fall through. */
1741 case ARM_IWMMXT_wCSSF:
1742 tmp = iwmmxt_load_creg(wrd);
1743 tmp2 = load_reg(s, rd);
1744 tcg_gen_andc_i32(tmp, tmp, tmp2);
1745 tcg_temp_free_i32(tmp2);
1746 iwmmxt_store_creg(wrd, tmp);
1747 break;
1748 case ARM_IWMMXT_wCGR0:
1749 case ARM_IWMMXT_wCGR1:
1750 case ARM_IWMMXT_wCGR2:
1751 case ARM_IWMMXT_wCGR3:
1752 gen_op_iwmmxt_set_cup();
1753 tmp = load_reg(s, rd);
1754 iwmmxt_store_creg(wrd, tmp);
1755 break;
1756 default:
1757 return 1;
1759 break;
1760 case 0x100: /* WXOR */
1761 wrd = (insn >> 12) & 0xf;
1762 rd0 = (insn >> 0) & 0xf;
1763 rd1 = (insn >> 16) & 0xf;
1764 gen_op_iwmmxt_movq_M0_wRn(rd0);
1765 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1766 gen_op_iwmmxt_setpsr_nz();
1767 gen_op_iwmmxt_movq_wRn_M0(wrd);
1768 gen_op_iwmmxt_set_mup();
1769 gen_op_iwmmxt_set_cup();
1770 break;
1771 case 0x111: /* TMRC */
1772 if (insn & 0xf)
1773 return 1;
1774 rd = (insn >> 12) & 0xf;
1775 wrd = (insn >> 16) & 0xf;
1776 tmp = iwmmxt_load_creg(wrd);
1777 store_reg(s, rd, tmp);
1778 break;
1779 case 0x300: /* WANDN */
1780 wrd = (insn >> 12) & 0xf;
1781 rd0 = (insn >> 0) & 0xf;
1782 rd1 = (insn >> 16) & 0xf;
1783 gen_op_iwmmxt_movq_M0_wRn(rd0);
1784 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1785 gen_op_iwmmxt_andq_M0_wRn(rd1);
1786 gen_op_iwmmxt_setpsr_nz();
1787 gen_op_iwmmxt_movq_wRn_M0(wrd);
1788 gen_op_iwmmxt_set_mup();
1789 gen_op_iwmmxt_set_cup();
1790 break;
1791 case 0x200: /* WAND */
1792 wrd = (insn >> 12) & 0xf;
1793 rd0 = (insn >> 0) & 0xf;
1794 rd1 = (insn >> 16) & 0xf;
1795 gen_op_iwmmxt_movq_M0_wRn(rd0);
1796 gen_op_iwmmxt_andq_M0_wRn(rd1);
1797 gen_op_iwmmxt_setpsr_nz();
1798 gen_op_iwmmxt_movq_wRn_M0(wrd);
1799 gen_op_iwmmxt_set_mup();
1800 gen_op_iwmmxt_set_cup();
1801 break;
1802 case 0x810: case 0xa10: /* WMADD */
1803 wrd = (insn >> 12) & 0xf;
1804 rd0 = (insn >> 0) & 0xf;
1805 rd1 = (insn >> 16) & 0xf;
1806 gen_op_iwmmxt_movq_M0_wRn(rd0);
1807 if (insn & (1 << 21))
1808 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1809 else
1810 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1811 gen_op_iwmmxt_movq_wRn_M0(wrd);
1812 gen_op_iwmmxt_set_mup();
1813 break;
1814 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1815 wrd = (insn >> 12) & 0xf;
1816 rd0 = (insn >> 16) & 0xf;
1817 rd1 = (insn >> 0) & 0xf;
1818 gen_op_iwmmxt_movq_M0_wRn(rd0);
1819 switch ((insn >> 22) & 3) {
1820 case 0:
1821 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1822 break;
1823 case 1:
1824 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1825 break;
1826 case 2:
1827 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1828 break;
1829 case 3:
1830 return 1;
1832 gen_op_iwmmxt_movq_wRn_M0(wrd);
1833 gen_op_iwmmxt_set_mup();
1834 gen_op_iwmmxt_set_cup();
1835 break;
1836 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1837 wrd = (insn >> 12) & 0xf;
1838 rd0 = (insn >> 16) & 0xf;
1839 rd1 = (insn >> 0) & 0xf;
1840 gen_op_iwmmxt_movq_M0_wRn(rd0);
1841 switch ((insn >> 22) & 3) {
1842 case 0:
1843 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1844 break;
1845 case 1:
1846 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1847 break;
1848 case 2:
1849 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1850 break;
1851 case 3:
1852 return 1;
1854 gen_op_iwmmxt_movq_wRn_M0(wrd);
1855 gen_op_iwmmxt_set_mup();
1856 gen_op_iwmmxt_set_cup();
1857 break;
1858 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1859 wrd = (insn >> 12) & 0xf;
1860 rd0 = (insn >> 16) & 0xf;
1861 rd1 = (insn >> 0) & 0xf;
1862 gen_op_iwmmxt_movq_M0_wRn(rd0);
1863 if (insn & (1 << 22))
1864 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1865 else
1866 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1867 if (!(insn & (1 << 20)))
1868 gen_op_iwmmxt_addl_M0_wRn(wrd);
1869 gen_op_iwmmxt_movq_wRn_M0(wrd);
1870 gen_op_iwmmxt_set_mup();
1871 break;
1872 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1873 wrd = (insn >> 12) & 0xf;
1874 rd0 = (insn >> 16) & 0xf;
1875 rd1 = (insn >> 0) & 0xf;
1876 gen_op_iwmmxt_movq_M0_wRn(rd0);
1877 if (insn & (1 << 21)) {
1878 if (insn & (1 << 20))
1879 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1880 else
1881 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1882 } else {
1883 if (insn & (1 << 20))
1884 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1885 else
1886 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1888 gen_op_iwmmxt_movq_wRn_M0(wrd);
1889 gen_op_iwmmxt_set_mup();
1890 break;
1891 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1892 wrd = (insn >> 12) & 0xf;
1893 rd0 = (insn >> 16) & 0xf;
1894 rd1 = (insn >> 0) & 0xf;
1895 gen_op_iwmmxt_movq_M0_wRn(rd0);
1896 if (insn & (1 << 21))
1897 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1898 else
1899 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1900 if (!(insn & (1 << 20))) {
1901 iwmmxt_load_reg(cpu_V1, wrd);
1902 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1904 gen_op_iwmmxt_movq_wRn_M0(wrd);
1905 gen_op_iwmmxt_set_mup();
1906 break;
1907 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1908 wrd = (insn >> 12) & 0xf;
1909 rd0 = (insn >> 16) & 0xf;
1910 rd1 = (insn >> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 switch ((insn >> 22) & 3) {
1913 case 0:
1914 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1915 break;
1916 case 1:
1917 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1918 break;
1919 case 2:
1920 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1921 break;
1922 case 3:
1923 return 1;
1925 gen_op_iwmmxt_movq_wRn_M0(wrd);
1926 gen_op_iwmmxt_set_mup();
1927 gen_op_iwmmxt_set_cup();
1928 break;
1929 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1930 wrd = (insn >> 12) & 0xf;
1931 rd0 = (insn >> 16) & 0xf;
1932 rd1 = (insn >> 0) & 0xf;
1933 gen_op_iwmmxt_movq_M0_wRn(rd0);
1934 if (insn & (1 << 22)) {
1935 if (insn & (1 << 20))
1936 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1937 else
1938 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1939 } else {
1940 if (insn & (1 << 20))
1941 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1942 else
1943 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1945 gen_op_iwmmxt_movq_wRn_M0(wrd);
1946 gen_op_iwmmxt_set_mup();
1947 gen_op_iwmmxt_set_cup();
1948 break;
1949 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1950 wrd = (insn >> 12) & 0xf;
1951 rd0 = (insn >> 16) & 0xf;
1952 rd1 = (insn >> 0) & 0xf;
1953 gen_op_iwmmxt_movq_M0_wRn(rd0);
1954 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1955 tcg_gen_andi_i32(tmp, tmp, 7);
1956 iwmmxt_load_reg(cpu_V1, rd1);
1957 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1958 tcg_temp_free_i32(tmp);
1959 gen_op_iwmmxt_movq_wRn_M0(wrd);
1960 gen_op_iwmmxt_set_mup();
1961 break;
1962 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1963 if (((insn >> 6) & 3) == 3)
1964 return 1;
1965 rd = (insn >> 12) & 0xf;
1966 wrd = (insn >> 16) & 0xf;
1967 tmp = load_reg(s, rd);
1968 gen_op_iwmmxt_movq_M0_wRn(wrd);
1969 switch ((insn >> 6) & 3) {
1970 case 0:
1971 tmp2 = tcg_const_i32(0xff);
1972 tmp3 = tcg_const_i32((insn & 7) << 3);
1973 break;
1974 case 1:
1975 tmp2 = tcg_const_i32(0xffff);
1976 tmp3 = tcg_const_i32((insn & 3) << 4);
1977 break;
1978 case 2:
1979 tmp2 = tcg_const_i32(0xffffffff);
1980 tmp3 = tcg_const_i32((insn & 1) << 5);
1981 break;
1982 default:
1983 TCGV_UNUSED_I32(tmp2);
1984 TCGV_UNUSED_I32(tmp3);
1986 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1987 tcg_temp_free_i32(tmp3);
1988 tcg_temp_free_i32(tmp2);
1989 tcg_temp_free_i32(tmp);
1990 gen_op_iwmmxt_movq_wRn_M0(wrd);
1991 gen_op_iwmmxt_set_mup();
1992 break;
1993 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1994 rd = (insn >> 12) & 0xf;
1995 wrd = (insn >> 16) & 0xf;
1996 if (rd == 15 || ((insn >> 22) & 3) == 3)
1997 return 1;
1998 gen_op_iwmmxt_movq_M0_wRn(wrd);
1999 tmp = tcg_temp_new_i32();
2000 switch ((insn >> 22) & 3) {
2001 case 0:
2002 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2003 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2004 if (insn & 8) {
2005 tcg_gen_ext8s_i32(tmp, tmp);
2006 } else {
2007 tcg_gen_andi_i32(tmp, tmp, 0xff);
2009 break;
2010 case 1:
2011 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2012 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2013 if (insn & 8) {
2014 tcg_gen_ext16s_i32(tmp, tmp);
2015 } else {
2016 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2018 break;
2019 case 2:
2020 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2021 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2022 break;
2024 store_reg(s, rd, tmp);
2025 break;
2026 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2027 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2028 return 1;
2029 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2030 switch ((insn >> 22) & 3) {
2031 case 0:
2032 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2033 break;
2034 case 1:
2035 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2036 break;
2037 case 2:
2038 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2039 break;
2041 tcg_gen_shli_i32(tmp, tmp, 28);
2042 gen_set_nzcv(tmp);
2043 tcg_temp_free_i32(tmp);
2044 break;
2045 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2046 if (((insn >> 6) & 3) == 3)
2047 return 1;
2048 rd = (insn >> 12) & 0xf;
2049 wrd = (insn >> 16) & 0xf;
2050 tmp = load_reg(s, rd);
2051 switch ((insn >> 6) & 3) {
2052 case 0:
2053 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2054 break;
2055 case 1:
2056 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2057 break;
2058 case 2:
2059 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2060 break;
2062 tcg_temp_free_i32(tmp);
2063 gen_op_iwmmxt_movq_wRn_M0(wrd);
2064 gen_op_iwmmxt_set_mup();
2065 break;
2066 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2067 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2068 return 1;
2069 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2070 tmp2 = tcg_temp_new_i32();
2071 tcg_gen_mov_i32(tmp2, tmp);
2072 switch ((insn >> 22) & 3) {
2073 case 0:
2074 for (i = 0; i < 7; i ++) {
2075 tcg_gen_shli_i32(tmp2, tmp2, 4);
2076 tcg_gen_and_i32(tmp, tmp, tmp2);
2078 break;
2079 case 1:
2080 for (i = 0; i < 3; i ++) {
2081 tcg_gen_shli_i32(tmp2, tmp2, 8);
2082 tcg_gen_and_i32(tmp, tmp, tmp2);
2084 break;
2085 case 2:
2086 tcg_gen_shli_i32(tmp2, tmp2, 16);
2087 tcg_gen_and_i32(tmp, tmp, tmp2);
2088 break;
2090 gen_set_nzcv(tmp);
2091 tcg_temp_free_i32(tmp2);
2092 tcg_temp_free_i32(tmp);
2093 break;
2094 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2095 wrd = (insn >> 12) & 0xf;
2096 rd0 = (insn >> 16) & 0xf;
2097 gen_op_iwmmxt_movq_M0_wRn(rd0);
2098 switch ((insn >> 22) & 3) {
2099 case 0:
2100 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2101 break;
2102 case 1:
2103 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2104 break;
2105 case 2:
2106 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2107 break;
2108 case 3:
2109 return 1;
2111 gen_op_iwmmxt_movq_wRn_M0(wrd);
2112 gen_op_iwmmxt_set_mup();
2113 break;
2114 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2115 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2116 return 1;
2117 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2118 tmp2 = tcg_temp_new_i32();
2119 tcg_gen_mov_i32(tmp2, tmp);
2120 switch ((insn >> 22) & 3) {
2121 case 0:
2122 for (i = 0; i < 7; i ++) {
2123 tcg_gen_shli_i32(tmp2, tmp2, 4);
2124 tcg_gen_or_i32(tmp, tmp, tmp2);
2126 break;
2127 case 1:
2128 for (i = 0; i < 3; i ++) {
2129 tcg_gen_shli_i32(tmp2, tmp2, 8);
2130 tcg_gen_or_i32(tmp, tmp, tmp2);
2132 break;
2133 case 2:
2134 tcg_gen_shli_i32(tmp2, tmp2, 16);
2135 tcg_gen_or_i32(tmp, tmp, tmp2);
2136 break;
2138 gen_set_nzcv(tmp);
2139 tcg_temp_free_i32(tmp2);
2140 tcg_temp_free_i32(tmp);
2141 break;
2142 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2143 rd = (insn >> 12) & 0xf;
2144 rd0 = (insn >> 16) & 0xf;
2145 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2146 return 1;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 tmp = tcg_temp_new_i32();
2149 switch ((insn >> 22) & 3) {
2150 case 0:
2151 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2152 break;
2153 case 1:
2154 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2155 break;
2156 case 2:
2157 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2158 break;
2160 store_reg(s, rd, tmp);
2161 break;
2162 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2163 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2164 wrd = (insn >> 12) & 0xf;
2165 rd0 = (insn >> 16) & 0xf;
2166 rd1 = (insn >> 0) & 0xf;
2167 gen_op_iwmmxt_movq_M0_wRn(rd0);
2168 switch ((insn >> 22) & 3) {
2169 case 0:
2170 if (insn & (1 << 21))
2171 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2172 else
2173 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2174 break;
2175 case 1:
2176 if (insn & (1 << 21))
2177 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2178 else
2179 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2180 break;
2181 case 2:
2182 if (insn & (1 << 21))
2183 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2184 else
2185 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2186 break;
2187 case 3:
2188 return 1;
2190 gen_op_iwmmxt_movq_wRn_M0(wrd);
2191 gen_op_iwmmxt_set_mup();
2192 gen_op_iwmmxt_set_cup();
2193 break;
2194 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2195 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2196 wrd = (insn >> 12) & 0xf;
2197 rd0 = (insn >> 16) & 0xf;
2198 gen_op_iwmmxt_movq_M0_wRn(rd0);
2199 switch ((insn >> 22) & 3) {
2200 case 0:
2201 if (insn & (1 << 21))
2202 gen_op_iwmmxt_unpacklsb_M0();
2203 else
2204 gen_op_iwmmxt_unpacklub_M0();
2205 break;
2206 case 1:
2207 if (insn & (1 << 21))
2208 gen_op_iwmmxt_unpacklsw_M0();
2209 else
2210 gen_op_iwmmxt_unpackluw_M0();
2211 break;
2212 case 2:
2213 if (insn & (1 << 21))
2214 gen_op_iwmmxt_unpacklsl_M0();
2215 else
2216 gen_op_iwmmxt_unpacklul_M0();
2217 break;
2218 case 3:
2219 return 1;
2221 gen_op_iwmmxt_movq_wRn_M0(wrd);
2222 gen_op_iwmmxt_set_mup();
2223 gen_op_iwmmxt_set_cup();
2224 break;
2225 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2226 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2227 wrd = (insn >> 12) & 0xf;
2228 rd0 = (insn >> 16) & 0xf;
2229 gen_op_iwmmxt_movq_M0_wRn(rd0);
2230 switch ((insn >> 22) & 3) {
2231 case 0:
2232 if (insn & (1 << 21))
2233 gen_op_iwmmxt_unpackhsb_M0();
2234 else
2235 gen_op_iwmmxt_unpackhub_M0();
2236 break;
2237 case 1:
2238 if (insn & (1 << 21))
2239 gen_op_iwmmxt_unpackhsw_M0();
2240 else
2241 gen_op_iwmmxt_unpackhuw_M0();
2242 break;
2243 case 2:
2244 if (insn & (1 << 21))
2245 gen_op_iwmmxt_unpackhsl_M0();
2246 else
2247 gen_op_iwmmxt_unpackhul_M0();
2248 break;
2249 case 3:
2250 return 1;
2252 gen_op_iwmmxt_movq_wRn_M0(wrd);
2253 gen_op_iwmmxt_set_mup();
2254 gen_op_iwmmxt_set_cup();
2255 break;
2256 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2257 case 0x214: case 0x614: case 0xa14: case 0xe14:
2258 if (((insn >> 22) & 3) == 0)
2259 return 1;
2260 wrd = (insn >> 12) & 0xf;
2261 rd0 = (insn >> 16) & 0xf;
2262 gen_op_iwmmxt_movq_M0_wRn(rd0);
2263 tmp = tcg_temp_new_i32();
2264 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2265 tcg_temp_free_i32(tmp);
2266 return 1;
2268 switch ((insn >> 22) & 3) {
2269 case 1:
2270 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2271 break;
2272 case 2:
2273 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2274 break;
2275 case 3:
2276 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2277 break;
2279 tcg_temp_free_i32(tmp);
2280 gen_op_iwmmxt_movq_wRn_M0(wrd);
2281 gen_op_iwmmxt_set_mup();
2282 gen_op_iwmmxt_set_cup();
2283 break;
2284 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2285 case 0x014: case 0x414: case 0x814: case 0xc14:
2286 if (((insn >> 22) & 3) == 0)
2287 return 1;
2288 wrd = (insn >> 12) & 0xf;
2289 rd0 = (insn >> 16) & 0xf;
2290 gen_op_iwmmxt_movq_M0_wRn(rd0);
2291 tmp = tcg_temp_new_i32();
2292 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2293 tcg_temp_free_i32(tmp);
2294 return 1;
2296 switch ((insn >> 22) & 3) {
2297 case 1:
2298 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2299 break;
2300 case 2:
2301 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2302 break;
2303 case 3:
2304 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2305 break;
2307 tcg_temp_free_i32(tmp);
2308 gen_op_iwmmxt_movq_wRn_M0(wrd);
2309 gen_op_iwmmxt_set_mup();
2310 gen_op_iwmmxt_set_cup();
2311 break;
2312 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2313 case 0x114: case 0x514: case 0x914: case 0xd14:
2314 if (((insn >> 22) & 3) == 0)
2315 return 1;
2316 wrd = (insn >> 12) & 0xf;
2317 rd0 = (insn >> 16) & 0xf;
2318 gen_op_iwmmxt_movq_M0_wRn(rd0);
2319 tmp = tcg_temp_new_i32();
2320 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2321 tcg_temp_free_i32(tmp);
2322 return 1;
2324 switch ((insn >> 22) & 3) {
2325 case 1:
2326 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2327 break;
2328 case 2:
2329 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2330 break;
2331 case 3:
2332 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2333 break;
2335 tcg_temp_free_i32(tmp);
2336 gen_op_iwmmxt_movq_wRn_M0(wrd);
2337 gen_op_iwmmxt_set_mup();
2338 gen_op_iwmmxt_set_cup();
2339 break;
2340 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2341 case 0x314: case 0x714: case 0xb14: case 0xf14:
2342 if (((insn >> 22) & 3) == 0)
2343 return 1;
2344 wrd = (insn >> 12) & 0xf;
2345 rd0 = (insn >> 16) & 0xf;
2346 gen_op_iwmmxt_movq_M0_wRn(rd0);
2347 tmp = tcg_temp_new_i32();
2348 switch ((insn >> 22) & 3) {
2349 case 1:
2350 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2351 tcg_temp_free_i32(tmp);
2352 return 1;
2354 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2355 break;
2356 case 2:
2357 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2358 tcg_temp_free_i32(tmp);
2359 return 1;
2361 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2362 break;
2363 case 3:
2364 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2365 tcg_temp_free_i32(tmp);
2366 return 1;
2368 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2369 break;
2371 tcg_temp_free_i32(tmp);
2372 gen_op_iwmmxt_movq_wRn_M0(wrd);
2373 gen_op_iwmmxt_set_mup();
2374 gen_op_iwmmxt_set_cup();
2375 break;
2376 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2377 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2378 wrd = (insn >> 12) & 0xf;
2379 rd0 = (insn >> 16) & 0xf;
2380 rd1 = (insn >> 0) & 0xf;
2381 gen_op_iwmmxt_movq_M0_wRn(rd0);
2382 switch ((insn >> 22) & 3) {
2383 case 0:
2384 if (insn & (1 << 21))
2385 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2386 else
2387 gen_op_iwmmxt_minub_M0_wRn(rd1);
2388 break;
2389 case 1:
2390 if (insn & (1 << 21))
2391 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2392 else
2393 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2394 break;
2395 case 2:
2396 if (insn & (1 << 21))
2397 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2398 else
2399 gen_op_iwmmxt_minul_M0_wRn(rd1);
2400 break;
2401 case 3:
2402 return 1;
2404 gen_op_iwmmxt_movq_wRn_M0(wrd);
2405 gen_op_iwmmxt_set_mup();
2406 break;
2407 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2408 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2409 wrd = (insn >> 12) & 0xf;
2410 rd0 = (insn >> 16) & 0xf;
2411 rd1 = (insn >> 0) & 0xf;
2412 gen_op_iwmmxt_movq_M0_wRn(rd0);
2413 switch ((insn >> 22) & 3) {
2414 case 0:
2415 if (insn & (1 << 21))
2416 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2417 else
2418 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2419 break;
2420 case 1:
2421 if (insn & (1 << 21))
2422 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2423 else
2424 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2425 break;
2426 case 2:
2427 if (insn & (1 << 21))
2428 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2429 else
2430 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2431 break;
2432 case 3:
2433 return 1;
2435 gen_op_iwmmxt_movq_wRn_M0(wrd);
2436 gen_op_iwmmxt_set_mup();
2437 break;
2438 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2439 case 0x402: case 0x502: case 0x602: case 0x702:
2440 wrd = (insn >> 12) & 0xf;
2441 rd0 = (insn >> 16) & 0xf;
2442 rd1 = (insn >> 0) & 0xf;
2443 gen_op_iwmmxt_movq_M0_wRn(rd0);
2444 tmp = tcg_const_i32((insn >> 20) & 3);
2445 iwmmxt_load_reg(cpu_V1, rd1);
2446 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2447 tcg_temp_free_i32(tmp);
2448 gen_op_iwmmxt_movq_wRn_M0(wrd);
2449 gen_op_iwmmxt_set_mup();
2450 break;
2451 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2452 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2453 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2454 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2455 wrd = (insn >> 12) & 0xf;
2456 rd0 = (insn >> 16) & 0xf;
2457 rd1 = (insn >> 0) & 0xf;
2458 gen_op_iwmmxt_movq_M0_wRn(rd0);
2459 switch ((insn >> 20) & 0xf) {
2460 case 0x0:
2461 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2462 break;
2463 case 0x1:
2464 gen_op_iwmmxt_subub_M0_wRn(rd1);
2465 break;
2466 case 0x3:
2467 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2468 break;
2469 case 0x4:
2470 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2471 break;
2472 case 0x5:
2473 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2474 break;
2475 case 0x7:
2476 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2477 break;
2478 case 0x8:
2479 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2480 break;
2481 case 0x9:
2482 gen_op_iwmmxt_subul_M0_wRn(rd1);
2483 break;
2484 case 0xb:
2485 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2486 break;
2487 default:
2488 return 1;
2490 gen_op_iwmmxt_movq_wRn_M0(wrd);
2491 gen_op_iwmmxt_set_mup();
2492 gen_op_iwmmxt_set_cup();
2493 break;
2494 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2495 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2496 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2497 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2498 wrd = (insn >> 12) & 0xf;
2499 rd0 = (insn >> 16) & 0xf;
2500 gen_op_iwmmxt_movq_M0_wRn(rd0);
2501 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2502 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2503 tcg_temp_free_i32(tmp);
2504 gen_op_iwmmxt_movq_wRn_M0(wrd);
2505 gen_op_iwmmxt_set_mup();
2506 gen_op_iwmmxt_set_cup();
2507 break;
2508 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2509 case 0x418: case 0x518: case 0x618: case 0x718:
2510 case 0x818: case 0x918: case 0xa18: case 0xb18:
2511 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2512 wrd = (insn >> 12) & 0xf;
2513 rd0 = (insn >> 16) & 0xf;
2514 rd1 = (insn >> 0) & 0xf;
2515 gen_op_iwmmxt_movq_M0_wRn(rd0);
2516 switch ((insn >> 20) & 0xf) {
2517 case 0x0:
2518 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2519 break;
2520 case 0x1:
2521 gen_op_iwmmxt_addub_M0_wRn(rd1);
2522 break;
2523 case 0x3:
2524 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2525 break;
2526 case 0x4:
2527 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2528 break;
2529 case 0x5:
2530 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2531 break;
2532 case 0x7:
2533 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2534 break;
2535 case 0x8:
2536 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2537 break;
2538 case 0x9:
2539 gen_op_iwmmxt_addul_M0_wRn(rd1);
2540 break;
2541 case 0xb:
2542 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2543 break;
2544 default:
2545 return 1;
2547 gen_op_iwmmxt_movq_wRn_M0(wrd);
2548 gen_op_iwmmxt_set_mup();
2549 gen_op_iwmmxt_set_cup();
2550 break;
2551 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2552 case 0x408: case 0x508: case 0x608: case 0x708:
2553 case 0x808: case 0x908: case 0xa08: case 0xb08:
2554 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2555 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2556 return 1;
2557 wrd = (insn >> 12) & 0xf;
2558 rd0 = (insn >> 16) & 0xf;
2559 rd1 = (insn >> 0) & 0xf;
2560 gen_op_iwmmxt_movq_M0_wRn(rd0);
2561 switch ((insn >> 22) & 3) {
2562 case 1:
2563 if (insn & (1 << 21))
2564 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2565 else
2566 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2567 break;
2568 case 2:
2569 if (insn & (1 << 21))
2570 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2571 else
2572 gen_op_iwmmxt_packul_M0_wRn(rd1);
2573 break;
2574 case 3:
2575 if (insn & (1 << 21))
2576 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2577 else
2578 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2579 break;
2581 gen_op_iwmmxt_movq_wRn_M0(wrd);
2582 gen_op_iwmmxt_set_mup();
2583 gen_op_iwmmxt_set_cup();
2584 break;
2585 case 0x201: case 0x203: case 0x205: case 0x207:
2586 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2587 case 0x211: case 0x213: case 0x215: case 0x217:
2588 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2589 wrd = (insn >> 5) & 0xf;
2590 rd0 = (insn >> 12) & 0xf;
2591 rd1 = (insn >> 0) & 0xf;
2592 if (rd0 == 0xf || rd1 == 0xf)
2593 return 1;
2594 gen_op_iwmmxt_movq_M0_wRn(wrd);
2595 tmp = load_reg(s, rd0);
2596 tmp2 = load_reg(s, rd1);
2597 switch ((insn >> 16) & 0xf) {
2598 case 0x0: /* TMIA */
2599 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2600 break;
2601 case 0x8: /* TMIAPH */
2602 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2603 break;
2604 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2605 if (insn & (1 << 16))
2606 tcg_gen_shri_i32(tmp, tmp, 16);
2607 if (insn & (1 << 17))
2608 tcg_gen_shri_i32(tmp2, tmp2, 16);
2609 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2610 break;
2611 default:
2612 tcg_temp_free_i32(tmp2);
2613 tcg_temp_free_i32(tmp);
2614 return 1;
2616 tcg_temp_free_i32(tmp2);
2617 tcg_temp_free_i32(tmp);
2618 gen_op_iwmmxt_movq_wRn_M0(wrd);
2619 gen_op_iwmmxt_set_mup();
2620 break;
2621 default:
2622 return 1;
2625 return 0;
2628 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2629 (ie. an undefined instruction). */
2630 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2632 int acc, rd0, rd1, rdhi, rdlo;
2633 TCGv_i32 tmp, tmp2;
2635 if ((insn & 0x0ff00f10) == 0x0e200010) {
2636 /* Multiply with Internal Accumulate Format */
2637 rd0 = (insn >> 12) & 0xf;
2638 rd1 = insn & 0xf;
2639 acc = (insn >> 5) & 7;
2641 if (acc != 0)
2642 return 1;
2644 tmp = load_reg(s, rd0);
2645 tmp2 = load_reg(s, rd1);
2646 switch ((insn >> 16) & 0xf) {
2647 case 0x0: /* MIA */
2648 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2649 break;
2650 case 0x8: /* MIAPH */
2651 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2652 break;
2653 case 0xc: /* MIABB */
2654 case 0xd: /* MIABT */
2655 case 0xe: /* MIATB */
2656 case 0xf: /* MIATT */
2657 if (insn & (1 << 16))
2658 tcg_gen_shri_i32(tmp, tmp, 16);
2659 if (insn & (1 << 17))
2660 tcg_gen_shri_i32(tmp2, tmp2, 16);
2661 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2662 break;
2663 default:
2664 return 1;
2666 tcg_temp_free_i32(tmp2);
2667 tcg_temp_free_i32(tmp);
2669 gen_op_iwmmxt_movq_wRn_M0(acc);
2670 return 0;
2673 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2674 /* Internal Accumulator Access Format */
2675 rdhi = (insn >> 16) & 0xf;
2676 rdlo = (insn >> 12) & 0xf;
2677 acc = insn & 7;
2679 if (acc != 0)
2680 return 1;
2682 if (insn & ARM_CP_RW_BIT) { /* MRA */
2683 iwmmxt_load_reg(cpu_V0, acc);
2684 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2685 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2686 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2687 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2688 } else { /* MAR */
2689 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2690 iwmmxt_store_reg(cpu_V0, acc);
2692 return 0;
2695 return 1;
2698 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2699 #define VFP_SREG(insn, bigbit, smallbit) \
2700 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2701 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2702 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2703 reg = (((insn) >> (bigbit)) & 0x0f) \
2704 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2705 } else { \
2706 if (insn & (1 << (smallbit))) \
2707 return 1; \
2708 reg = ((insn) >> (bigbit)) & 0x0f; \
2709 }} while (0)
2711 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2712 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2713 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2714 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2715 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2716 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2718 /* Move between integer and VFP cores. */
2719 static TCGv_i32 gen_vfp_mrs(void)
2721 TCGv_i32 tmp = tcg_temp_new_i32();
2722 tcg_gen_mov_i32(tmp, cpu_F0s);
2723 return tmp;
2726 static void gen_vfp_msr(TCGv_i32 tmp)
2728 tcg_gen_mov_i32(cpu_F0s, tmp);
2729 tcg_temp_free_i32(tmp);
2732 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2734 TCGv_i32 tmp = tcg_temp_new_i32();
2735 if (shift)
2736 tcg_gen_shri_i32(var, var, shift);
2737 tcg_gen_ext8u_i32(var, var);
2738 tcg_gen_shli_i32(tmp, var, 8);
2739 tcg_gen_or_i32(var, var, tmp);
2740 tcg_gen_shli_i32(tmp, var, 16);
2741 tcg_gen_or_i32(var, var, tmp);
2742 tcg_temp_free_i32(tmp);
2745 static void gen_neon_dup_low16(TCGv_i32 var)
2747 TCGv_i32 tmp = tcg_temp_new_i32();
2748 tcg_gen_ext16u_i32(var, var);
2749 tcg_gen_shli_i32(tmp, var, 16);
2750 tcg_gen_or_i32(var, var, tmp);
2751 tcg_temp_free_i32(tmp);
2754 static void gen_neon_dup_high16(TCGv_i32 var)
2756 TCGv_i32 tmp = tcg_temp_new_i32();
2757 tcg_gen_andi_i32(var, var, 0xffff0000);
2758 tcg_gen_shri_i32(tmp, var, 16);
2759 tcg_gen_or_i32(var, var, tmp);
2760 tcg_temp_free_i32(tmp);
2763 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2765 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2766 TCGv_i32 tmp = tcg_temp_new_i32();
2767 switch (size) {
2768 case 0:
2769 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2770 gen_neon_dup_u8(tmp, 0);
2771 break;
2772 case 1:
2773 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2774 gen_neon_dup_low16(tmp);
2775 break;
2776 case 2:
2777 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2778 break;
2779 default: /* Avoid compiler warnings. */
2780 abort();
2782 return tmp;
2785 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2786 uint32_t dp)
2788 uint32_t cc = extract32(insn, 20, 2);
2790 if (dp) {
2791 TCGv_i64 frn, frm, dest;
2792 TCGv_i64 tmp, zero, zf, nf, vf;
2794 zero = tcg_const_i64(0);
2796 frn = tcg_temp_new_i64();
2797 frm = tcg_temp_new_i64();
2798 dest = tcg_temp_new_i64();
2800 zf = tcg_temp_new_i64();
2801 nf = tcg_temp_new_i64();
2802 vf = tcg_temp_new_i64();
2804 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2805 tcg_gen_ext_i32_i64(nf, cpu_NF);
2806 tcg_gen_ext_i32_i64(vf, cpu_VF);
2808 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2809 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2810 switch (cc) {
2811 case 0: /* eq: Z */
2812 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2813 frn, frm);
2814 break;
2815 case 1: /* vs: V */
2816 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2817 frn, frm);
2818 break;
2819 case 2: /* ge: N == V -> N ^ V == 0 */
2820 tmp = tcg_temp_new_i64();
2821 tcg_gen_xor_i64(tmp, vf, nf);
2822 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2823 frn, frm);
2824 tcg_temp_free_i64(tmp);
2825 break;
2826 case 3: /* gt: !Z && N == V */
2827 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2828 frn, frm);
2829 tmp = tcg_temp_new_i64();
2830 tcg_gen_xor_i64(tmp, vf, nf);
2831 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2832 dest, frm);
2833 tcg_temp_free_i64(tmp);
2834 break;
2836 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2837 tcg_temp_free_i64(frn);
2838 tcg_temp_free_i64(frm);
2839 tcg_temp_free_i64(dest);
2841 tcg_temp_free_i64(zf);
2842 tcg_temp_free_i64(nf);
2843 tcg_temp_free_i64(vf);
2845 tcg_temp_free_i64(zero);
2846 } else {
2847 TCGv_i32 frn, frm, dest;
2848 TCGv_i32 tmp, zero;
2850 zero = tcg_const_i32(0);
2852 frn = tcg_temp_new_i32();
2853 frm = tcg_temp_new_i32();
2854 dest = tcg_temp_new_i32();
2855 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2856 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2857 switch (cc) {
2858 case 0: /* eq: Z */
2859 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2860 frn, frm);
2861 break;
2862 case 1: /* vs: V */
2863 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2864 frn, frm);
2865 break;
2866 case 2: /* ge: N == V -> N ^ V == 0 */
2867 tmp = tcg_temp_new_i32();
2868 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2869 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2870 frn, frm);
2871 tcg_temp_free_i32(tmp);
2872 break;
2873 case 3: /* gt: !Z && N == V */
2874 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2875 frn, frm);
2876 tmp = tcg_temp_new_i32();
2877 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2878 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2879 dest, frm);
2880 tcg_temp_free_i32(tmp);
2881 break;
2883 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2884 tcg_temp_free_i32(frn);
2885 tcg_temp_free_i32(frm);
2886 tcg_temp_free_i32(dest);
2888 tcg_temp_free_i32(zero);
2891 return 0;
2894 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2895 uint32_t rm, uint32_t dp)
2897 uint32_t vmin = extract32(insn, 6, 1);
2898 TCGv_ptr fpst = get_fpstatus_ptr(0);
2900 if (dp) {
2901 TCGv_i64 frn, frm, dest;
2903 frn = tcg_temp_new_i64();
2904 frm = tcg_temp_new_i64();
2905 dest = tcg_temp_new_i64();
2907 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2908 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2909 if (vmin) {
2910 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2911 } else {
2912 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2914 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2915 tcg_temp_free_i64(frn);
2916 tcg_temp_free_i64(frm);
2917 tcg_temp_free_i64(dest);
2918 } else {
2919 TCGv_i32 frn, frm, dest;
2921 frn = tcg_temp_new_i32();
2922 frm = tcg_temp_new_i32();
2923 dest = tcg_temp_new_i32();
2925 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2926 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2927 if (vmin) {
2928 gen_helper_vfp_minnums(dest, frn, frm, fpst);
2929 } else {
2930 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
2932 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2933 tcg_temp_free_i32(frn);
2934 tcg_temp_free_i32(frm);
2935 tcg_temp_free_i32(dest);
2938 tcg_temp_free_ptr(fpst);
2939 return 0;
2942 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2943 int rounding)
2945 TCGv_ptr fpst = get_fpstatus_ptr(0);
2946 TCGv_i32 tcg_rmode;
2948 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2949 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2951 if (dp) {
2952 TCGv_i64 tcg_op;
2953 TCGv_i64 tcg_res;
2954 tcg_op = tcg_temp_new_i64();
2955 tcg_res = tcg_temp_new_i64();
2956 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2957 gen_helper_rintd(tcg_res, tcg_op, fpst);
2958 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2959 tcg_temp_free_i64(tcg_op);
2960 tcg_temp_free_i64(tcg_res);
2961 } else {
2962 TCGv_i32 tcg_op;
2963 TCGv_i32 tcg_res;
2964 tcg_op = tcg_temp_new_i32();
2965 tcg_res = tcg_temp_new_i32();
2966 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2967 gen_helper_rints(tcg_res, tcg_op, fpst);
2968 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2969 tcg_temp_free_i32(tcg_op);
2970 tcg_temp_free_i32(tcg_res);
2973 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2974 tcg_temp_free_i32(tcg_rmode);
2976 tcg_temp_free_ptr(fpst);
2977 return 0;
2980 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2981 int rounding)
2983 bool is_signed = extract32(insn, 7, 1);
2984 TCGv_ptr fpst = get_fpstatus_ptr(0);
2985 TCGv_i32 tcg_rmode, tcg_shift;
2987 tcg_shift = tcg_const_i32(0);
2989 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2990 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2992 if (dp) {
2993 TCGv_i64 tcg_double, tcg_res;
2994 TCGv_i32 tcg_tmp;
2995 /* Rd is encoded as a single precision register even when the source
2996 * is double precision.
2998 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2999 tcg_double = tcg_temp_new_i64();
3000 tcg_res = tcg_temp_new_i64();
3001 tcg_tmp = tcg_temp_new_i32();
3002 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3003 if (is_signed) {
3004 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3005 } else {
3006 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3008 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
3009 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3010 tcg_temp_free_i32(tcg_tmp);
3011 tcg_temp_free_i64(tcg_res);
3012 tcg_temp_free_i64(tcg_double);
3013 } else {
3014 TCGv_i32 tcg_single, tcg_res;
3015 tcg_single = tcg_temp_new_i32();
3016 tcg_res = tcg_temp_new_i32();
3017 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3018 if (is_signed) {
3019 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3020 } else {
3021 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3023 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3024 tcg_temp_free_i32(tcg_res);
3025 tcg_temp_free_i32(tcg_single);
3028 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3029 tcg_temp_free_i32(tcg_rmode);
3031 tcg_temp_free_i32(tcg_shift);
3033 tcg_temp_free_ptr(fpst);
3035 return 0;
3038 /* Table for converting the most common AArch32 encoding of
3039 * rounding mode to arm_fprounding order (which matches the
3040 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3042 static const uint8_t fp_decode_rm[] = {
3043 FPROUNDING_TIEAWAY,
3044 FPROUNDING_TIEEVEN,
3045 FPROUNDING_POSINF,
3046 FPROUNDING_NEGINF,
3049 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3051 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3053 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3054 return 1;
3057 if (dp) {
3058 VFP_DREG_D(rd, insn);
3059 VFP_DREG_N(rn, insn);
3060 VFP_DREG_M(rm, insn);
3061 } else {
3062 rd = VFP_SREG_D(insn);
3063 rn = VFP_SREG_N(insn);
3064 rm = VFP_SREG_M(insn);
3067 if ((insn & 0x0f800e50) == 0x0e000a00) {
3068 return handle_vsel(insn, rd, rn, rm, dp);
3069 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3070 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3071 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3072 /* VRINTA, VRINTN, VRINTP, VRINTM */
3073 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3074 return handle_vrint(insn, rd, rm, dp, rounding);
3075 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3076 /* VCVTA, VCVTN, VCVTP, VCVTM */
3077 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3078 return handle_vcvt(insn, rd, rm, dp, rounding);
3080 return 1;
3083 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3084 (ie. an undefined instruction). */
3085 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3087 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3088 int dp, veclen;
3089 TCGv_i32 addr;
3090 TCGv_i32 tmp;
3091 TCGv_i32 tmp2;
3093 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3094 return 1;
3097 /* FIXME: this access check should not take precedence over UNDEF
3098 * for invalid encodings; we will generate incorrect syndrome information
3099 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3101 if (s->fp_excp_el) {
3102 gen_exception_insn(s, 4, EXCP_UDEF,
3103 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
3104 return 0;
3107 if (!s->vfp_enabled) {
3108 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3109 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3110 return 1;
3111 rn = (insn >> 16) & 0xf;
3112 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3113 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3114 return 1;
3118 if (extract32(insn, 28, 4) == 0xf) {
3119 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3120 * only used in v8 and above.
3122 return disas_vfp_v8_insn(s, insn);
3125 dp = ((insn & 0xf00) == 0xb00);
3126 switch ((insn >> 24) & 0xf) {
3127 case 0xe:
3128 if (insn & (1 << 4)) {
3129 /* single register transfer */
3130 rd = (insn >> 12) & 0xf;
3131 if (dp) {
3132 int size;
3133 int pass;
3135 VFP_DREG_N(rn, insn);
3136 if (insn & 0xf)
3137 return 1;
3138 if (insn & 0x00c00060
3139 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3140 return 1;
3143 pass = (insn >> 21) & 1;
3144 if (insn & (1 << 22)) {
3145 size = 0;
3146 offset = ((insn >> 5) & 3) * 8;
3147 } else if (insn & (1 << 5)) {
3148 size = 1;
3149 offset = (insn & (1 << 6)) ? 16 : 0;
3150 } else {
3151 size = 2;
3152 offset = 0;
3154 if (insn & ARM_CP_RW_BIT) {
3155 /* vfp->arm */
3156 tmp = neon_load_reg(rn, pass);
3157 switch (size) {
3158 case 0:
3159 if (offset)
3160 tcg_gen_shri_i32(tmp, tmp, offset);
3161 if (insn & (1 << 23))
3162 gen_uxtb(tmp);
3163 else
3164 gen_sxtb(tmp);
3165 break;
3166 case 1:
3167 if (insn & (1 << 23)) {
3168 if (offset) {
3169 tcg_gen_shri_i32(tmp, tmp, 16);
3170 } else {
3171 gen_uxth(tmp);
3173 } else {
3174 if (offset) {
3175 tcg_gen_sari_i32(tmp, tmp, 16);
3176 } else {
3177 gen_sxth(tmp);
3180 break;
3181 case 2:
3182 break;
3184 store_reg(s, rd, tmp);
3185 } else {
3186 /* arm->vfp */
3187 tmp = load_reg(s, rd);
3188 if (insn & (1 << 23)) {
3189 /* VDUP */
3190 if (size == 0) {
3191 gen_neon_dup_u8(tmp, 0);
3192 } else if (size == 1) {
3193 gen_neon_dup_low16(tmp);
3195 for (n = 0; n <= pass * 2; n++) {
3196 tmp2 = tcg_temp_new_i32();
3197 tcg_gen_mov_i32(tmp2, tmp);
3198 neon_store_reg(rn, n, tmp2);
3200 neon_store_reg(rn, n, tmp);
3201 } else {
3202 /* VMOV */
3203 switch (size) {
3204 case 0:
3205 tmp2 = neon_load_reg(rn, pass);
3206 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3207 tcg_temp_free_i32(tmp2);
3208 break;
3209 case 1:
3210 tmp2 = neon_load_reg(rn, pass);
3211 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3212 tcg_temp_free_i32(tmp2);
3213 break;
3214 case 2:
3215 break;
3217 neon_store_reg(rn, pass, tmp);
3220 } else { /* !dp */
3221 if ((insn & 0x6f) != 0x00)
3222 return 1;
3223 rn = VFP_SREG_N(insn);
3224 if (insn & ARM_CP_RW_BIT) {
3225 /* vfp->arm */
3226 if (insn & (1 << 21)) {
3227 /* system register */
3228 rn >>= 1;
3230 switch (rn) {
3231 case ARM_VFP_FPSID:
3232 /* VFP2 allows access to FSID from userspace.
3233 VFP3 restricts all id registers to privileged
3234 accesses. */
3235 if (IS_USER(s)
3236 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3237 return 1;
3239 tmp = load_cpu_field(vfp.xregs[rn]);
3240 break;
3241 case ARM_VFP_FPEXC:
3242 if (IS_USER(s))
3243 return 1;
3244 tmp = load_cpu_field(vfp.xregs[rn]);
3245 break;
3246 case ARM_VFP_FPINST:
3247 case ARM_VFP_FPINST2:
3248 /* Not present in VFP3. */
3249 if (IS_USER(s)
3250 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3251 return 1;
3253 tmp = load_cpu_field(vfp.xregs[rn]);
3254 break;
3255 case ARM_VFP_FPSCR:
3256 if (rd == 15) {
3257 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3258 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3259 } else {
3260 tmp = tcg_temp_new_i32();
3261 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3263 break;
3264 case ARM_VFP_MVFR2:
3265 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3266 return 1;
3268 /* fall through */
3269 case ARM_VFP_MVFR0:
3270 case ARM_VFP_MVFR1:
3271 if (IS_USER(s)
3272 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3273 return 1;
3275 tmp = load_cpu_field(vfp.xregs[rn]);
3276 break;
3277 default:
3278 return 1;
3280 } else {
3281 gen_mov_F0_vreg(0, rn);
3282 tmp = gen_vfp_mrs();
3284 if (rd == 15) {
3285 /* Set the 4 flag bits in the CPSR. */
3286 gen_set_nzcv(tmp);
3287 tcg_temp_free_i32(tmp);
3288 } else {
3289 store_reg(s, rd, tmp);
3291 } else {
3292 /* arm->vfp */
3293 if (insn & (1 << 21)) {
3294 rn >>= 1;
3295 /* system register */
3296 switch (rn) {
3297 case ARM_VFP_FPSID:
3298 case ARM_VFP_MVFR0:
3299 case ARM_VFP_MVFR1:
3300 /* Writes are ignored. */
3301 break;
3302 case ARM_VFP_FPSCR:
3303 tmp = load_reg(s, rd);
3304 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3305 tcg_temp_free_i32(tmp);
3306 gen_lookup_tb(s);
3307 break;
3308 case ARM_VFP_FPEXC:
3309 if (IS_USER(s))
3310 return 1;
3311 /* TODO: VFP subarchitecture support.
3312 * For now, keep the EN bit only */
3313 tmp = load_reg(s, rd);
3314 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3315 store_cpu_field(tmp, vfp.xregs[rn]);
3316 gen_lookup_tb(s);
3317 break;
3318 case ARM_VFP_FPINST:
3319 case ARM_VFP_FPINST2:
3320 if (IS_USER(s)) {
3321 return 1;
3323 tmp = load_reg(s, rd);
3324 store_cpu_field(tmp, vfp.xregs[rn]);
3325 break;
3326 default:
3327 return 1;
3329 } else {
3330 tmp = load_reg(s, rd);
3331 gen_vfp_msr(tmp);
3332 gen_mov_vreg_F0(0, rn);
3336 } else {
3337 /* data processing */
3338 /* The opcode is in bits 23, 21, 20 and 6. */
3339 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3340 if (dp) {
3341 if (op == 15) {
3342 /* rn is opcode */
3343 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3344 } else {
3345 /* rn is register number */
3346 VFP_DREG_N(rn, insn);
3349 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3350 ((rn & 0x1e) == 0x6))) {
3351 /* Integer or single/half precision destination. */
3352 rd = VFP_SREG_D(insn);
3353 } else {
3354 VFP_DREG_D(rd, insn);
3356 if (op == 15 &&
3357 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3358 ((rn & 0x1e) == 0x4))) {
3359 /* VCVT from int or half precision is always from S reg
3360 * regardless of dp bit. VCVT with immediate frac_bits
3361 * has same format as SREG_M.
3363 rm = VFP_SREG_M(insn);
3364 } else {
3365 VFP_DREG_M(rm, insn);
3367 } else {
3368 rn = VFP_SREG_N(insn);
3369 if (op == 15 && rn == 15) {
3370 /* Double precision destination. */
3371 VFP_DREG_D(rd, insn);
3372 } else {
3373 rd = VFP_SREG_D(insn);
3375 /* NB that we implicitly rely on the encoding for the frac_bits
3376 * in VCVT of fixed to float being the same as that of an SREG_M
3378 rm = VFP_SREG_M(insn);
3381 veclen = s->vec_len;
3382 if (op == 15 && rn > 3)
3383 veclen = 0;
3385 /* Shut up compiler warnings. */
3386 delta_m = 0;
3387 delta_d = 0;
3388 bank_mask = 0;
3390 if (veclen > 0) {
3391 if (dp)
3392 bank_mask = 0xc;
3393 else
3394 bank_mask = 0x18;
3396 /* Figure out what type of vector operation this is. */
3397 if ((rd & bank_mask) == 0) {
3398 /* scalar */
3399 veclen = 0;
3400 } else {
3401 if (dp)
3402 delta_d = (s->vec_stride >> 1) + 1;
3403 else
3404 delta_d = s->vec_stride + 1;
3406 if ((rm & bank_mask) == 0) {
3407 /* mixed scalar/vector */
3408 delta_m = 0;
3409 } else {
3410 /* vector */
3411 delta_m = delta_d;
3416 /* Load the initial operands. */
3417 if (op == 15) {
3418 switch (rn) {
3419 case 16:
3420 case 17:
3421 /* Integer source */
3422 gen_mov_F0_vreg(0, rm);
3423 break;
3424 case 8:
3425 case 9:
3426 /* Compare */
3427 gen_mov_F0_vreg(dp, rd);
3428 gen_mov_F1_vreg(dp, rm);
3429 break;
3430 case 10:
3431 case 11:
3432 /* Compare with zero */
3433 gen_mov_F0_vreg(dp, rd);
3434 gen_vfp_F1_ld0(dp);
3435 break;
3436 case 20:
3437 case 21:
3438 case 22:
3439 case 23:
3440 case 28:
3441 case 29:
3442 case 30:
3443 case 31:
3444 /* Source and destination the same. */
3445 gen_mov_F0_vreg(dp, rd);
3446 break;
3447 case 4:
3448 case 5:
3449 case 6:
3450 case 7:
3451 /* VCVTB, VCVTT: only present with the halfprec extension
3452 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3453 * (we choose to UNDEF)
3455 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3456 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3457 return 1;
3459 if (!extract32(rn, 1, 1)) {
3460 /* Half precision source. */
3461 gen_mov_F0_vreg(0, rm);
3462 break;
3464 /* Otherwise fall through */
3465 default:
3466 /* One source operand. */
3467 gen_mov_F0_vreg(dp, rm);
3468 break;
3470 } else {
3471 /* Two source operands. */
3472 gen_mov_F0_vreg(dp, rn);
3473 gen_mov_F1_vreg(dp, rm);
3476 for (;;) {
3477 /* Perform the calculation. */
3478 switch (op) {
3479 case 0: /* VMLA: fd + (fn * fm) */
3480 /* Note that order of inputs to the add matters for NaNs */
3481 gen_vfp_F1_mul(dp);
3482 gen_mov_F0_vreg(dp, rd);
3483 gen_vfp_add(dp);
3484 break;
3485 case 1: /* VMLS: fd + -(fn * fm) */
3486 gen_vfp_mul(dp);
3487 gen_vfp_F1_neg(dp);
3488 gen_mov_F0_vreg(dp, rd);
3489 gen_vfp_add(dp);
3490 break;
3491 case 2: /* VNMLS: -fd + (fn * fm) */
3492 /* Note that it isn't valid to replace (-A + B) with (B - A)
3493 * or similar plausible looking simplifications
3494 * because this will give wrong results for NaNs.
3496 gen_vfp_F1_mul(dp);
3497 gen_mov_F0_vreg(dp, rd);
3498 gen_vfp_neg(dp);
3499 gen_vfp_add(dp);
3500 break;
3501 case 3: /* VNMLA: -fd + -(fn * fm) */
3502 gen_vfp_mul(dp);
3503 gen_vfp_F1_neg(dp);
3504 gen_mov_F0_vreg(dp, rd);
3505 gen_vfp_neg(dp);
3506 gen_vfp_add(dp);
3507 break;
3508 case 4: /* mul: fn * fm */
3509 gen_vfp_mul(dp);
3510 break;
3511 case 5: /* nmul: -(fn * fm) */
3512 gen_vfp_mul(dp);
3513 gen_vfp_neg(dp);
3514 break;
3515 case 6: /* add: fn + fm */
3516 gen_vfp_add(dp);
3517 break;
3518 case 7: /* sub: fn - fm */
3519 gen_vfp_sub(dp);
3520 break;
3521 case 8: /* div: fn / fm */
3522 gen_vfp_div(dp);
3523 break;
3524 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3525 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3526 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3527 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3528 /* These are fused multiply-add, and must be done as one
3529 * floating point operation with no rounding between the
3530 * multiplication and addition steps.
3531 * NB that doing the negations here as separate steps is
3532 * correct : an input NaN should come out with its sign bit
3533 * flipped if it is a negated-input.
3535 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3536 return 1;
3538 if (dp) {
3539 TCGv_ptr fpst;
3540 TCGv_i64 frd;
3541 if (op & 1) {
3542 /* VFNMS, VFMS */
3543 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3545 frd = tcg_temp_new_i64();
3546 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3547 if (op & 2) {
3548 /* VFNMA, VFNMS */
3549 gen_helper_vfp_negd(frd, frd);
3551 fpst = get_fpstatus_ptr(0);
3552 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3553 cpu_F1d, frd, fpst);
3554 tcg_temp_free_ptr(fpst);
3555 tcg_temp_free_i64(frd);
3556 } else {
3557 TCGv_ptr fpst;
3558 TCGv_i32 frd;
3559 if (op & 1) {
3560 /* VFNMS, VFMS */
3561 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3563 frd = tcg_temp_new_i32();
3564 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3565 if (op & 2) {
3566 gen_helper_vfp_negs(frd, frd);
3568 fpst = get_fpstatus_ptr(0);
3569 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3570 cpu_F1s, frd, fpst);
3571 tcg_temp_free_ptr(fpst);
3572 tcg_temp_free_i32(frd);
3574 break;
3575 case 14: /* fconst */
3576 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3577 return 1;
3580 n = (insn << 12) & 0x80000000;
3581 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3582 if (dp) {
3583 if (i & 0x40)
3584 i |= 0x3f80;
3585 else
3586 i |= 0x4000;
3587 n |= i << 16;
3588 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3589 } else {
3590 if (i & 0x40)
3591 i |= 0x780;
3592 else
3593 i |= 0x800;
3594 n |= i << 19;
3595 tcg_gen_movi_i32(cpu_F0s, n);
3597 break;
3598 case 15: /* extension space */
3599 switch (rn) {
3600 case 0: /* cpy */
3601 /* no-op */
3602 break;
3603 case 1: /* abs */
3604 gen_vfp_abs(dp);
3605 break;
3606 case 2: /* neg */
3607 gen_vfp_neg(dp);
3608 break;
3609 case 3: /* sqrt */
3610 gen_vfp_sqrt(dp);
3611 break;
3612 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3613 tmp = gen_vfp_mrs();
3614 tcg_gen_ext16u_i32(tmp, tmp);
3615 if (dp) {
3616 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3617 cpu_env);
3618 } else {
3619 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3620 cpu_env);
3622 tcg_temp_free_i32(tmp);
3623 break;
3624 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3625 tmp = gen_vfp_mrs();
3626 tcg_gen_shri_i32(tmp, tmp, 16);
3627 if (dp) {
3628 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3629 cpu_env);
3630 } else {
3631 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3632 cpu_env);
3634 tcg_temp_free_i32(tmp);
3635 break;
3636 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3637 tmp = tcg_temp_new_i32();
3638 if (dp) {
3639 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3640 cpu_env);
3641 } else {
3642 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3643 cpu_env);
3645 gen_mov_F0_vreg(0, rd);
3646 tmp2 = gen_vfp_mrs();
3647 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3648 tcg_gen_or_i32(tmp, tmp, tmp2);
3649 tcg_temp_free_i32(tmp2);
3650 gen_vfp_msr(tmp);
3651 break;
3652 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3653 tmp = tcg_temp_new_i32();
3654 if (dp) {
3655 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3656 cpu_env);
3657 } else {
3658 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3659 cpu_env);
3661 tcg_gen_shli_i32(tmp, tmp, 16);
3662 gen_mov_F0_vreg(0, rd);
3663 tmp2 = gen_vfp_mrs();
3664 tcg_gen_ext16u_i32(tmp2, tmp2);
3665 tcg_gen_or_i32(tmp, tmp, tmp2);
3666 tcg_temp_free_i32(tmp2);
3667 gen_vfp_msr(tmp);
3668 break;
3669 case 8: /* cmp */
3670 gen_vfp_cmp(dp);
3671 break;
3672 case 9: /* cmpe */
3673 gen_vfp_cmpe(dp);
3674 break;
3675 case 10: /* cmpz */
3676 gen_vfp_cmp(dp);
3677 break;
3678 case 11: /* cmpez */
3679 gen_vfp_F1_ld0(dp);
3680 gen_vfp_cmpe(dp);
3681 break;
3682 case 12: /* vrintr */
3684 TCGv_ptr fpst = get_fpstatus_ptr(0);
3685 if (dp) {
3686 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3687 } else {
3688 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3690 tcg_temp_free_ptr(fpst);
3691 break;
3693 case 13: /* vrintz */
3695 TCGv_ptr fpst = get_fpstatus_ptr(0);
3696 TCGv_i32 tcg_rmode;
3697 tcg_rmode = tcg_const_i32(float_round_to_zero);
3698 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3699 if (dp) {
3700 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3701 } else {
3702 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3704 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3705 tcg_temp_free_i32(tcg_rmode);
3706 tcg_temp_free_ptr(fpst);
3707 break;
3709 case 14: /* vrintx */
3711 TCGv_ptr fpst = get_fpstatus_ptr(0);
3712 if (dp) {
3713 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3714 } else {
3715 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3717 tcg_temp_free_ptr(fpst);
3718 break;
3720 case 15: /* single<->double conversion */
3721 if (dp)
3722 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3723 else
3724 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3725 break;
3726 case 16: /* fuito */
3727 gen_vfp_uito(dp, 0);
3728 break;
3729 case 17: /* fsito */
3730 gen_vfp_sito(dp, 0);
3731 break;
3732 case 20: /* fshto */
3733 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3734 return 1;
3736 gen_vfp_shto(dp, 16 - rm, 0);
3737 break;
3738 case 21: /* fslto */
3739 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3740 return 1;
3742 gen_vfp_slto(dp, 32 - rm, 0);
3743 break;
3744 case 22: /* fuhto */
3745 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3746 return 1;
3748 gen_vfp_uhto(dp, 16 - rm, 0);
3749 break;
3750 case 23: /* fulto */
3751 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3752 return 1;
3754 gen_vfp_ulto(dp, 32 - rm, 0);
3755 break;
3756 case 24: /* ftoui */
3757 gen_vfp_toui(dp, 0);
3758 break;
3759 case 25: /* ftouiz */
3760 gen_vfp_touiz(dp, 0);
3761 break;
3762 case 26: /* ftosi */
3763 gen_vfp_tosi(dp, 0);
3764 break;
3765 case 27: /* ftosiz */
3766 gen_vfp_tosiz(dp, 0);
3767 break;
3768 case 28: /* ftosh */
3769 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3770 return 1;
3772 gen_vfp_tosh(dp, 16 - rm, 0);
3773 break;
3774 case 29: /* ftosl */
3775 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3776 return 1;
3778 gen_vfp_tosl(dp, 32 - rm, 0);
3779 break;
3780 case 30: /* ftouh */
3781 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3782 return 1;
3784 gen_vfp_touh(dp, 16 - rm, 0);
3785 break;
3786 case 31: /* ftoul */
3787 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3788 return 1;
3790 gen_vfp_toul(dp, 32 - rm, 0);
3791 break;
3792 default: /* undefined */
3793 return 1;
3795 break;
3796 default: /* undefined */
3797 return 1;
3800 /* Write back the result. */
3801 if (op == 15 && (rn >= 8 && rn <= 11)) {
3802 /* Comparison, do nothing. */
3803 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3804 (rn & 0x1e) == 0x6)) {
3805 /* VCVT double to int: always integer result.
3806 * VCVT double to half precision is always a single
3807 * precision result.
3809 gen_mov_vreg_F0(0, rd);
3810 } else if (op == 15 && rn == 15) {
3811 /* conversion */
3812 gen_mov_vreg_F0(!dp, rd);
3813 } else {
3814 gen_mov_vreg_F0(dp, rd);
3817 /* break out of the loop if we have finished */
3818 if (veclen == 0)
3819 break;
3821 if (op == 15 && delta_m == 0) {
3822 /* single source one-many */
3823 while (veclen--) {
3824 rd = ((rd + delta_d) & (bank_mask - 1))
3825 | (rd & bank_mask);
3826 gen_mov_vreg_F0(dp, rd);
3828 break;
3830 /* Setup the next operands. */
3831 veclen--;
3832 rd = ((rd + delta_d) & (bank_mask - 1))
3833 | (rd & bank_mask);
3835 if (op == 15) {
3836 /* One source operand. */
3837 rm = ((rm + delta_m) & (bank_mask - 1))
3838 | (rm & bank_mask);
3839 gen_mov_F0_vreg(dp, rm);
3840 } else {
3841 /* Two source operands. */
3842 rn = ((rn + delta_d) & (bank_mask - 1))
3843 | (rn & bank_mask);
3844 gen_mov_F0_vreg(dp, rn);
3845 if (delta_m) {
3846 rm = ((rm + delta_m) & (bank_mask - 1))
3847 | (rm & bank_mask);
3848 gen_mov_F1_vreg(dp, rm);
3853 break;
3854 case 0xc:
3855 case 0xd:
3856 if ((insn & 0x03e00000) == 0x00400000) {
3857 /* two-register transfer */
3858 rn = (insn >> 16) & 0xf;
3859 rd = (insn >> 12) & 0xf;
3860 if (dp) {
3861 VFP_DREG_M(rm, insn);
3862 } else {
3863 rm = VFP_SREG_M(insn);
3866 if (insn & ARM_CP_RW_BIT) {
3867 /* vfp->arm */
3868 if (dp) {
3869 gen_mov_F0_vreg(0, rm * 2);
3870 tmp = gen_vfp_mrs();
3871 store_reg(s, rd, tmp);
3872 gen_mov_F0_vreg(0, rm * 2 + 1);
3873 tmp = gen_vfp_mrs();
3874 store_reg(s, rn, tmp);
3875 } else {
3876 gen_mov_F0_vreg(0, rm);
3877 tmp = gen_vfp_mrs();
3878 store_reg(s, rd, tmp);
3879 gen_mov_F0_vreg(0, rm + 1);
3880 tmp = gen_vfp_mrs();
3881 store_reg(s, rn, tmp);
3883 } else {
3884 /* arm->vfp */
3885 if (dp) {
3886 tmp = load_reg(s, rd);
3887 gen_vfp_msr(tmp);
3888 gen_mov_vreg_F0(0, rm * 2);
3889 tmp = load_reg(s, rn);
3890 gen_vfp_msr(tmp);
3891 gen_mov_vreg_F0(0, rm * 2 + 1);
3892 } else {
3893 tmp = load_reg(s, rd);
3894 gen_vfp_msr(tmp);
3895 gen_mov_vreg_F0(0, rm);
3896 tmp = load_reg(s, rn);
3897 gen_vfp_msr(tmp);
3898 gen_mov_vreg_F0(0, rm + 1);
3901 } else {
3902 /* Load/store */
3903 rn = (insn >> 16) & 0xf;
3904 if (dp)
3905 VFP_DREG_D(rd, insn);
3906 else
3907 rd = VFP_SREG_D(insn);
3908 if ((insn & 0x01200000) == 0x01000000) {
3909 /* Single load/store */
3910 offset = (insn & 0xff) << 2;
3911 if ((insn & (1 << 23)) == 0)
3912 offset = -offset;
3913 if (s->thumb && rn == 15) {
3914 /* This is actually UNPREDICTABLE */
3915 addr = tcg_temp_new_i32();
3916 tcg_gen_movi_i32(addr, s->pc & ~2);
3917 } else {
3918 addr = load_reg(s, rn);
3920 tcg_gen_addi_i32(addr, addr, offset);
3921 if (insn & (1 << 20)) {
3922 gen_vfp_ld(s, dp, addr);
3923 gen_mov_vreg_F0(dp, rd);
3924 } else {
3925 gen_mov_F0_vreg(dp, rd);
3926 gen_vfp_st(s, dp, addr);
3928 tcg_temp_free_i32(addr);
3929 } else {
3930 /* load/store multiple */
3931 int w = insn & (1 << 21);
3932 if (dp)
3933 n = (insn >> 1) & 0x7f;
3934 else
3935 n = insn & 0xff;
3937 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3938 /* P == U , W == 1 => UNDEF */
3939 return 1;
3941 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3942 /* UNPREDICTABLE cases for bad immediates: we choose to
3943 * UNDEF to avoid generating huge numbers of TCG ops
3945 return 1;
3947 if (rn == 15 && w) {
3948 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3949 return 1;
3952 if (s->thumb && rn == 15) {
3953 /* This is actually UNPREDICTABLE */
3954 addr = tcg_temp_new_i32();
3955 tcg_gen_movi_i32(addr, s->pc & ~2);
3956 } else {
3957 addr = load_reg(s, rn);
3959 if (insn & (1 << 24)) /* pre-decrement */
3960 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3962 if (dp)
3963 offset = 8;
3964 else
3965 offset = 4;
3966 for (i = 0; i < n; i++) {
3967 if (insn & ARM_CP_RW_BIT) {
3968 /* load */
3969 gen_vfp_ld(s, dp, addr);
3970 gen_mov_vreg_F0(dp, rd + i);
3971 } else {
3972 /* store */
3973 gen_mov_F0_vreg(dp, rd + i);
3974 gen_vfp_st(s, dp, addr);
3976 tcg_gen_addi_i32(addr, addr, offset);
3978 if (w) {
3979 /* writeback */
3980 if (insn & (1 << 24))
3981 offset = -offset * n;
3982 else if (dp && (insn & 1))
3983 offset = 4;
3984 else
3985 offset = 0;
3987 if (offset != 0)
3988 tcg_gen_addi_i32(addr, addr, offset);
3989 store_reg(s, rn, addr);
3990 } else {
3991 tcg_temp_free_i32(addr);
3995 break;
3996 default:
3997 /* Should never happen. */
3998 return 1;
4000 return 0;
4003 static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4005 TranslationBlock *tb;
4007 tb = s->tb;
4008 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
4009 tcg_gen_goto_tb(n);
4010 gen_set_pc_im(s, dest);
4011 tcg_gen_exit_tb((uintptr_t)tb + n);
4012 } else {
4013 gen_set_pc_im(s, dest);
4014 tcg_gen_exit_tb(0);
4018 static inline void gen_jmp (DisasContext *s, uint32_t dest)
4020 if (unlikely(s->singlestep_enabled || s->ss_active)) {
4021 /* An indirect jump so that we still trigger the debug exception. */
4022 if (s->thumb)
4023 dest |= 1;
4024 gen_bx_im(s, dest);
4025 } else {
4026 gen_goto_tb(s, 0, dest);
4027 s->is_jmp = DISAS_TB_JUMP;
4031 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
4033 if (x)
4034 tcg_gen_sari_i32(t0, t0, 16);
4035 else
4036 gen_sxth(t0);
4037 if (y)
4038 tcg_gen_sari_i32(t1, t1, 16);
4039 else
4040 gen_sxth(t1);
4041 tcg_gen_mul_i32(t0, t0, t1);
4044 /* Return the mask of PSR bits set by a MSR instruction. */
4045 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4047 uint32_t mask;
4049 mask = 0;
4050 if (flags & (1 << 0))
4051 mask |= 0xff;
4052 if (flags & (1 << 1))
4053 mask |= 0xff00;
4054 if (flags & (1 << 2))
4055 mask |= 0xff0000;
4056 if (flags & (1 << 3))
4057 mask |= 0xff000000;
4059 /* Mask out undefined bits. */
4060 mask &= ~CPSR_RESERVED;
4061 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4062 mask &= ~CPSR_T;
4064 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4065 mask &= ~CPSR_Q; /* V5TE in reality*/
4067 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4068 mask &= ~(CPSR_E | CPSR_GE);
4070 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4071 mask &= ~CPSR_IT;
4073 /* Mask out execution state and reserved bits. */
4074 if (!spsr) {
4075 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4077 /* Mask out privileged bits. */
4078 if (IS_USER(s))
4079 mask &= CPSR_USER;
4080 return mask;
4083 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4084 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4086 TCGv_i32 tmp;
4087 if (spsr) {
4088 /* ??? This is also undefined in system mode. */
4089 if (IS_USER(s))
4090 return 1;
4092 tmp = load_cpu_field(spsr);
4093 tcg_gen_andi_i32(tmp, tmp, ~mask);
4094 tcg_gen_andi_i32(t0, t0, mask);
4095 tcg_gen_or_i32(tmp, tmp, t0);
4096 store_cpu_field(tmp, spsr);
4097 } else {
4098 gen_set_cpsr(t0, mask);
4100 tcg_temp_free_i32(t0);
4101 gen_lookup_tb(s);
4102 return 0;
4105 /* Returns nonzero if access to the PSR is not permitted. */
4106 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4108 TCGv_i32 tmp;
4109 tmp = tcg_temp_new_i32();
4110 tcg_gen_movi_i32(tmp, val);
4111 return gen_set_psr(s, mask, spsr, tmp);
4114 /* Generate an old-style exception return. Marks pc as dead. */
4115 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4117 TCGv_i32 tmp;
4118 store_reg(s, 15, pc);
4119 tmp = load_cpu_field(spsr);
4120 gen_set_cpsr(tmp, CPSR_ERET_MASK);
4121 tcg_temp_free_i32(tmp);
4122 s->is_jmp = DISAS_JUMP;
4125 /* Generate a v6 exception return. Marks both values as dead. */
4126 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4128 gen_set_cpsr(cpsr, CPSR_ERET_MASK);
4129 tcg_temp_free_i32(cpsr);
4130 store_reg(s, 15, pc);
4131 s->is_jmp = DISAS_JUMP;
4134 static void gen_nop_hint(DisasContext *s, int val)
4136 switch (val) {
4137 case 1: /* yield */
4138 gen_set_pc_im(s, s->pc);
4139 s->is_jmp = DISAS_YIELD;
4140 break;
4141 case 3: /* wfi */
4142 gen_set_pc_im(s, s->pc);
4143 s->is_jmp = DISAS_WFI;
4144 break;
4145 case 2: /* wfe */
4146 gen_set_pc_im(s, s->pc);
4147 s->is_jmp = DISAS_WFE;
4148 break;
4149 case 4: /* sev */
4150 case 5: /* sevl */
4151 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4152 default: /* nop */
4153 break;
4157 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4159 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4161 switch (size) {
4162 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4163 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4164 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4165 default: abort();
4169 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4171 switch (size) {
4172 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4173 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4174 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4175 default: return;
4179 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4180 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4181 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4182 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4183 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4185 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4186 switch ((size << 1) | u) { \
4187 case 0: \
4188 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4189 break; \
4190 case 1: \
4191 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4192 break; \
4193 case 2: \
4194 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4195 break; \
4196 case 3: \
4197 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4198 break; \
4199 case 4: \
4200 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4201 break; \
4202 case 5: \
4203 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4204 break; \
4205 default: return 1; \
4206 }} while (0)
4208 #define GEN_NEON_INTEGER_OP(name) do { \
4209 switch ((size << 1) | u) { \
4210 case 0: \
4211 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4212 break; \
4213 case 1: \
4214 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4215 break; \
4216 case 2: \
4217 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4218 break; \
4219 case 3: \
4220 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4221 break; \
4222 case 4: \
4223 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4224 break; \
4225 case 5: \
4226 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4227 break; \
4228 default: return 1; \
4229 }} while (0)
4231 static TCGv_i32 neon_load_scratch(int scratch)
4233 TCGv_i32 tmp = tcg_temp_new_i32();
4234 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4235 return tmp;
4238 static void neon_store_scratch(int scratch, TCGv_i32 var)
4240 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4241 tcg_temp_free_i32(var);
4244 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4246 TCGv_i32 tmp;
4247 if (size == 1) {
4248 tmp = neon_load_reg(reg & 7, reg >> 4);
4249 if (reg & 8) {
4250 gen_neon_dup_high16(tmp);
4251 } else {
4252 gen_neon_dup_low16(tmp);
4254 } else {
4255 tmp = neon_load_reg(reg & 15, reg >> 4);
4257 return tmp;
4260 static int gen_neon_unzip(int rd, int rm, int size, int q)
4262 TCGv_i32 tmp, tmp2;
4263 if (!q && size == 2) {
4264 return 1;
4266 tmp = tcg_const_i32(rd);
4267 tmp2 = tcg_const_i32(rm);
4268 if (q) {
4269 switch (size) {
4270 case 0:
4271 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
4272 break;
4273 case 1:
4274 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
4275 break;
4276 case 2:
4277 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
4278 break;
4279 default:
4280 abort();
4282 } else {
4283 switch (size) {
4284 case 0:
4285 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
4286 break;
4287 case 1:
4288 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
4289 break;
4290 default:
4291 abort();
4294 tcg_temp_free_i32(tmp);
4295 tcg_temp_free_i32(tmp2);
4296 return 0;
4299 static int gen_neon_zip(int rd, int rm, int size, int q)
4301 TCGv_i32 tmp, tmp2;
4302 if (!q && size == 2) {
4303 return 1;
4305 tmp = tcg_const_i32(rd);
4306 tmp2 = tcg_const_i32(rm);
4307 if (q) {
4308 switch (size) {
4309 case 0:
4310 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
4311 break;
4312 case 1:
4313 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
4314 break;
4315 case 2:
4316 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
4317 break;
4318 default:
4319 abort();
4321 } else {
4322 switch (size) {
4323 case 0:
4324 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
4325 break;
4326 case 1:
4327 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
4328 break;
4329 default:
4330 abort();
4333 tcg_temp_free_i32(tmp);
4334 tcg_temp_free_i32(tmp2);
4335 return 0;
4338 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4340 TCGv_i32 rd, tmp;
4342 rd = tcg_temp_new_i32();
4343 tmp = tcg_temp_new_i32();
4345 tcg_gen_shli_i32(rd, t0, 8);
4346 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4347 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4348 tcg_gen_or_i32(rd, rd, tmp);
4350 tcg_gen_shri_i32(t1, t1, 8);
4351 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4352 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4353 tcg_gen_or_i32(t1, t1, tmp);
4354 tcg_gen_mov_i32(t0, rd);
4356 tcg_temp_free_i32(tmp);
4357 tcg_temp_free_i32(rd);
4360 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4362 TCGv_i32 rd, tmp;
4364 rd = tcg_temp_new_i32();
4365 tmp = tcg_temp_new_i32();
4367 tcg_gen_shli_i32(rd, t0, 16);
4368 tcg_gen_andi_i32(tmp, t1, 0xffff);
4369 tcg_gen_or_i32(rd, rd, tmp);
4370 tcg_gen_shri_i32(t1, t1, 16);
4371 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4372 tcg_gen_or_i32(t1, t1, tmp);
4373 tcg_gen_mov_i32(t0, rd);
4375 tcg_temp_free_i32(tmp);
4376 tcg_temp_free_i32(rd);
4380 static struct {
4381 int nregs;
4382 int interleave;
4383 int spacing;
4384 } neon_ls_element_type[11] = {
4385 {4, 4, 1},
4386 {4, 4, 2},
4387 {4, 1, 1},
4388 {4, 2, 1},
4389 {3, 3, 1},
4390 {3, 3, 2},
4391 {3, 1, 1},
4392 {1, 1, 1},
4393 {2, 2, 1},
4394 {2, 2, 2},
4395 {2, 1, 1}
4398 /* Translate a NEON load/store element instruction. Return nonzero if the
4399 instruction is invalid. */
4400 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4402 int rd, rn, rm;
4403 int op;
4404 int nregs;
4405 int interleave;
4406 int spacing;
4407 int stride;
4408 int size;
4409 int reg;
4410 int pass;
4411 int load;
4412 int shift;
4413 int n;
4414 TCGv_i32 addr;
4415 TCGv_i32 tmp;
4416 TCGv_i32 tmp2;
4417 TCGv_i64 tmp64;
4419 /* FIXME: this access check should not take precedence over UNDEF
4420 * for invalid encodings; we will generate incorrect syndrome information
4421 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4423 if (s->fp_excp_el) {
4424 gen_exception_insn(s, 4, EXCP_UDEF,
4425 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
4426 return 0;
4429 if (!s->vfp_enabled)
4430 return 1;
4431 VFP_DREG_D(rd, insn);
4432 rn = (insn >> 16) & 0xf;
4433 rm = insn & 0xf;
4434 load = (insn & (1 << 21)) != 0;
4435 if ((insn & (1 << 23)) == 0) {
4436 /* Load store all elements. */
4437 op = (insn >> 8) & 0xf;
4438 size = (insn >> 6) & 3;
4439 if (op > 10)
4440 return 1;
4441 /* Catch UNDEF cases for bad values of align field */
4442 switch (op & 0xc) {
4443 case 4:
4444 if (((insn >> 5) & 1) == 1) {
4445 return 1;
4447 break;
4448 case 8:
4449 if (((insn >> 4) & 3) == 3) {
4450 return 1;
4452 break;
4453 default:
4454 break;
4456 nregs = neon_ls_element_type[op].nregs;
4457 interleave = neon_ls_element_type[op].interleave;
4458 spacing = neon_ls_element_type[op].spacing;
4459 if (size == 3 && (interleave | spacing) != 1)
4460 return 1;
4461 addr = tcg_temp_new_i32();
4462 load_reg_var(s, addr, rn);
4463 stride = (1 << size) * interleave;
4464 for (reg = 0; reg < nregs; reg++) {
4465 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4466 load_reg_var(s, addr, rn);
4467 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4468 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4469 load_reg_var(s, addr, rn);
4470 tcg_gen_addi_i32(addr, addr, 1 << size);
4472 if (size == 3) {
4473 tmp64 = tcg_temp_new_i64();
4474 if (load) {
4475 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
4476 neon_store_reg64(tmp64, rd);
4477 } else {
4478 neon_load_reg64(tmp64, rd);
4479 gen_aa32_st64(tmp64, addr, get_mem_index(s));
4481 tcg_temp_free_i64(tmp64);
4482 tcg_gen_addi_i32(addr, addr, stride);
4483 } else {
4484 for (pass = 0; pass < 2; pass++) {
4485 if (size == 2) {
4486 if (load) {
4487 tmp = tcg_temp_new_i32();
4488 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
4489 neon_store_reg(rd, pass, tmp);
4490 } else {
4491 tmp = neon_load_reg(rd, pass);
4492 gen_aa32_st32(tmp, addr, get_mem_index(s));
4493 tcg_temp_free_i32(tmp);
4495 tcg_gen_addi_i32(addr, addr, stride);
4496 } else if (size == 1) {
4497 if (load) {
4498 tmp = tcg_temp_new_i32();
4499 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
4500 tcg_gen_addi_i32(addr, addr, stride);
4501 tmp2 = tcg_temp_new_i32();
4502 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
4503 tcg_gen_addi_i32(addr, addr, stride);
4504 tcg_gen_shli_i32(tmp2, tmp2, 16);
4505 tcg_gen_or_i32(tmp, tmp, tmp2);
4506 tcg_temp_free_i32(tmp2);
4507 neon_store_reg(rd, pass, tmp);
4508 } else {
4509 tmp = neon_load_reg(rd, pass);
4510 tmp2 = tcg_temp_new_i32();
4511 tcg_gen_shri_i32(tmp2, tmp, 16);
4512 gen_aa32_st16(tmp, addr, get_mem_index(s));
4513 tcg_temp_free_i32(tmp);
4514 tcg_gen_addi_i32(addr, addr, stride);
4515 gen_aa32_st16(tmp2, addr, get_mem_index(s));
4516 tcg_temp_free_i32(tmp2);
4517 tcg_gen_addi_i32(addr, addr, stride);
4519 } else /* size == 0 */ {
4520 if (load) {
4521 TCGV_UNUSED_I32(tmp2);
4522 for (n = 0; n < 4; n++) {
4523 tmp = tcg_temp_new_i32();
4524 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
4525 tcg_gen_addi_i32(addr, addr, stride);
4526 if (n == 0) {
4527 tmp2 = tmp;
4528 } else {
4529 tcg_gen_shli_i32(tmp, tmp, n * 8);
4530 tcg_gen_or_i32(tmp2, tmp2, tmp);
4531 tcg_temp_free_i32(tmp);
4534 neon_store_reg(rd, pass, tmp2);
4535 } else {
4536 tmp2 = neon_load_reg(rd, pass);
4537 for (n = 0; n < 4; n++) {
4538 tmp = tcg_temp_new_i32();
4539 if (n == 0) {
4540 tcg_gen_mov_i32(tmp, tmp2);
4541 } else {
4542 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4544 gen_aa32_st8(tmp, addr, get_mem_index(s));
4545 tcg_temp_free_i32(tmp);
4546 tcg_gen_addi_i32(addr, addr, stride);
4548 tcg_temp_free_i32(tmp2);
4553 rd += spacing;
4555 tcg_temp_free_i32(addr);
4556 stride = nregs * 8;
4557 } else {
4558 size = (insn >> 10) & 3;
4559 if (size == 3) {
4560 /* Load single element to all lanes. */
4561 int a = (insn >> 4) & 1;
4562 if (!load) {
4563 return 1;
4565 size = (insn >> 6) & 3;
4566 nregs = ((insn >> 8) & 3) + 1;
4568 if (size == 3) {
4569 if (nregs != 4 || a == 0) {
4570 return 1;
4572 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4573 size = 2;
4575 if (nregs == 1 && a == 1 && size == 0) {
4576 return 1;
4578 if (nregs == 3 && a == 1) {
4579 return 1;
4581 addr = tcg_temp_new_i32();
4582 load_reg_var(s, addr, rn);
4583 if (nregs == 1) {
4584 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4585 tmp = gen_load_and_replicate(s, addr, size);
4586 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4587 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4588 if (insn & (1 << 5)) {
4589 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4590 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4592 tcg_temp_free_i32(tmp);
4593 } else {
4594 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4595 stride = (insn & (1 << 5)) ? 2 : 1;
4596 for (reg = 0; reg < nregs; reg++) {
4597 tmp = gen_load_and_replicate(s, addr, size);
4598 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4599 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4600 tcg_temp_free_i32(tmp);
4601 tcg_gen_addi_i32(addr, addr, 1 << size);
4602 rd += stride;
4605 tcg_temp_free_i32(addr);
4606 stride = (1 << size) * nregs;
4607 } else {
4608 /* Single element. */
4609 int idx = (insn >> 4) & 0xf;
4610 pass = (insn >> 7) & 1;
4611 switch (size) {
4612 case 0:
4613 shift = ((insn >> 5) & 3) * 8;
4614 stride = 1;
4615 break;
4616 case 1:
4617 shift = ((insn >> 6) & 1) * 16;
4618 stride = (insn & (1 << 5)) ? 2 : 1;
4619 break;
4620 case 2:
4621 shift = 0;
4622 stride = (insn & (1 << 6)) ? 2 : 1;
4623 break;
4624 default:
4625 abort();
4627 nregs = ((insn >> 8) & 3) + 1;
4628 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4629 switch (nregs) {
4630 case 1:
4631 if (((idx & (1 << size)) != 0) ||
4632 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4633 return 1;
4635 break;
4636 case 3:
4637 if ((idx & 1) != 0) {
4638 return 1;
4640 /* fall through */
4641 case 2:
4642 if (size == 2 && (idx & 2) != 0) {
4643 return 1;
4645 break;
4646 case 4:
4647 if ((size == 2) && ((idx & 3) == 3)) {
4648 return 1;
4650 break;
4651 default:
4652 abort();
4654 if ((rd + stride * (nregs - 1)) > 31) {
4655 /* Attempts to write off the end of the register file
4656 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4657 * the neon_load_reg() would write off the end of the array.
4659 return 1;
4661 addr = tcg_temp_new_i32();
4662 load_reg_var(s, addr, rn);
4663 for (reg = 0; reg < nregs; reg++) {
4664 if (load) {
4665 tmp = tcg_temp_new_i32();
4666 switch (size) {
4667 case 0:
4668 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
4669 break;
4670 case 1:
4671 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
4672 break;
4673 case 2:
4674 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
4675 break;
4676 default: /* Avoid compiler warnings. */
4677 abort();
4679 if (size != 2) {
4680 tmp2 = neon_load_reg(rd, pass);
4681 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4682 shift, size ? 16 : 8);
4683 tcg_temp_free_i32(tmp2);
4685 neon_store_reg(rd, pass, tmp);
4686 } else { /* Store */
4687 tmp = neon_load_reg(rd, pass);
4688 if (shift)
4689 tcg_gen_shri_i32(tmp, tmp, shift);
4690 switch (size) {
4691 case 0:
4692 gen_aa32_st8(tmp, addr, get_mem_index(s));
4693 break;
4694 case 1:
4695 gen_aa32_st16(tmp, addr, get_mem_index(s));
4696 break;
4697 case 2:
4698 gen_aa32_st32(tmp, addr, get_mem_index(s));
4699 break;
4701 tcg_temp_free_i32(tmp);
4703 rd += stride;
4704 tcg_gen_addi_i32(addr, addr, 1 << size);
4706 tcg_temp_free_i32(addr);
4707 stride = nregs * (1 << size);
4710 if (rm != 15) {
4711 TCGv_i32 base;
4713 base = load_reg(s, rn);
4714 if (rm == 13) {
4715 tcg_gen_addi_i32(base, base, stride);
4716 } else {
4717 TCGv_i32 index;
4718 index = load_reg(s, rm);
4719 tcg_gen_add_i32(base, base, index);
4720 tcg_temp_free_i32(index);
4722 store_reg(s, rn, base);
4724 return 0;
4727 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4728 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
4730 tcg_gen_and_i32(t, t, c);
4731 tcg_gen_andc_i32(f, f, c);
4732 tcg_gen_or_i32(dest, t, f);
4735 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
4737 switch (size) {
4738 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4739 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4740 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
4741 default: abort();
4745 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4747 switch (size) {
4748 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4749 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4750 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4751 default: abort();
4755 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
4757 switch (size) {
4758 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4759 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4760 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4761 default: abort();
4765 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4767 switch (size) {
4768 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4769 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4770 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4771 default: abort();
4775 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
4776 int q, int u)
4778 if (q) {
4779 if (u) {
4780 switch (size) {
4781 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4782 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4783 default: abort();
4785 } else {
4786 switch (size) {
4787 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4788 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4789 default: abort();
4792 } else {
4793 if (u) {
4794 switch (size) {
4795 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4796 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4797 default: abort();
4799 } else {
4800 switch (size) {
4801 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4802 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4803 default: abort();
4809 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
4811 if (u) {
4812 switch (size) {
4813 case 0: gen_helper_neon_widen_u8(dest, src); break;
4814 case 1: gen_helper_neon_widen_u16(dest, src); break;
4815 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4816 default: abort();
4818 } else {
4819 switch (size) {
4820 case 0: gen_helper_neon_widen_s8(dest, src); break;
4821 case 1: gen_helper_neon_widen_s16(dest, src); break;
4822 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4823 default: abort();
4826 tcg_temp_free_i32(src);
4829 static inline void gen_neon_addl(int size)
4831 switch (size) {
4832 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4833 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4834 case 2: tcg_gen_add_i64(CPU_V001); break;
4835 default: abort();
4839 static inline void gen_neon_subl(int size)
4841 switch (size) {
4842 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4843 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4844 case 2: tcg_gen_sub_i64(CPU_V001); break;
4845 default: abort();
4849 static inline void gen_neon_negl(TCGv_i64 var, int size)
4851 switch (size) {
4852 case 0: gen_helper_neon_negl_u16(var, var); break;
4853 case 1: gen_helper_neon_negl_u32(var, var); break;
4854 case 2:
4855 tcg_gen_neg_i64(var, var);
4856 break;
4857 default: abort();
4861 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4863 switch (size) {
4864 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4865 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4866 default: abort();
4870 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4871 int size, int u)
4873 TCGv_i64 tmp;
4875 switch ((size << 1) | u) {
4876 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4877 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4878 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4879 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4880 case 4:
4881 tmp = gen_muls_i64_i32(a, b);
4882 tcg_gen_mov_i64(dest, tmp);
4883 tcg_temp_free_i64(tmp);
4884 break;
4885 case 5:
4886 tmp = gen_mulu_i64_i32(a, b);
4887 tcg_gen_mov_i64(dest, tmp);
4888 tcg_temp_free_i64(tmp);
4889 break;
4890 default: abort();
4893 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4894 Don't forget to clean them now. */
4895 if (size < 2) {
4896 tcg_temp_free_i32(a);
4897 tcg_temp_free_i32(b);
4901 static void gen_neon_narrow_op(int op, int u, int size,
4902 TCGv_i32 dest, TCGv_i64 src)
4904 if (op) {
4905 if (u) {
4906 gen_neon_unarrow_sats(size, dest, src);
4907 } else {
4908 gen_neon_narrow(size, dest, src);
4910 } else {
4911 if (u) {
4912 gen_neon_narrow_satu(size, dest, src);
4913 } else {
4914 gen_neon_narrow_sats(size, dest, src);
4919 /* Symbolic constants for op fields for Neon 3-register same-length.
4920 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4921 * table A7-9.
4923 #define NEON_3R_VHADD 0
4924 #define NEON_3R_VQADD 1
4925 #define NEON_3R_VRHADD 2
4926 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4927 #define NEON_3R_VHSUB 4
4928 #define NEON_3R_VQSUB 5
4929 #define NEON_3R_VCGT 6
4930 #define NEON_3R_VCGE 7
4931 #define NEON_3R_VSHL 8
4932 #define NEON_3R_VQSHL 9
4933 #define NEON_3R_VRSHL 10
4934 #define NEON_3R_VQRSHL 11
4935 #define NEON_3R_VMAX 12
4936 #define NEON_3R_VMIN 13
4937 #define NEON_3R_VABD 14
4938 #define NEON_3R_VABA 15
4939 #define NEON_3R_VADD_VSUB 16
4940 #define NEON_3R_VTST_VCEQ 17
4941 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4942 #define NEON_3R_VMUL 19
4943 #define NEON_3R_VPMAX 20
4944 #define NEON_3R_VPMIN 21
4945 #define NEON_3R_VQDMULH_VQRDMULH 22
4946 #define NEON_3R_VPADD 23
4947 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4948 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4949 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4950 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4951 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4952 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4953 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4954 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4956 static const uint8_t neon_3r_sizes[] = {
4957 [NEON_3R_VHADD] = 0x7,
4958 [NEON_3R_VQADD] = 0xf,
4959 [NEON_3R_VRHADD] = 0x7,
4960 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4961 [NEON_3R_VHSUB] = 0x7,
4962 [NEON_3R_VQSUB] = 0xf,
4963 [NEON_3R_VCGT] = 0x7,
4964 [NEON_3R_VCGE] = 0x7,
4965 [NEON_3R_VSHL] = 0xf,
4966 [NEON_3R_VQSHL] = 0xf,
4967 [NEON_3R_VRSHL] = 0xf,
4968 [NEON_3R_VQRSHL] = 0xf,
4969 [NEON_3R_VMAX] = 0x7,
4970 [NEON_3R_VMIN] = 0x7,
4971 [NEON_3R_VABD] = 0x7,
4972 [NEON_3R_VABA] = 0x7,
4973 [NEON_3R_VADD_VSUB] = 0xf,
4974 [NEON_3R_VTST_VCEQ] = 0x7,
4975 [NEON_3R_VML] = 0x7,
4976 [NEON_3R_VMUL] = 0x7,
4977 [NEON_3R_VPMAX] = 0x7,
4978 [NEON_3R_VPMIN] = 0x7,
4979 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4980 [NEON_3R_VPADD] = 0x7,
4981 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
4982 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4983 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4984 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4985 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4986 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4987 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4988 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
4991 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4992 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4993 * table A7-13.
4995 #define NEON_2RM_VREV64 0
4996 #define NEON_2RM_VREV32 1
4997 #define NEON_2RM_VREV16 2
4998 #define NEON_2RM_VPADDL 4
4999 #define NEON_2RM_VPADDL_U 5
5000 #define NEON_2RM_AESE 6 /* Includes AESD */
5001 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5002 #define NEON_2RM_VCLS 8
5003 #define NEON_2RM_VCLZ 9
5004 #define NEON_2RM_VCNT 10
5005 #define NEON_2RM_VMVN 11
5006 #define NEON_2RM_VPADAL 12
5007 #define NEON_2RM_VPADAL_U 13
5008 #define NEON_2RM_VQABS 14
5009 #define NEON_2RM_VQNEG 15
5010 #define NEON_2RM_VCGT0 16
5011 #define NEON_2RM_VCGE0 17
5012 #define NEON_2RM_VCEQ0 18
5013 #define NEON_2RM_VCLE0 19
5014 #define NEON_2RM_VCLT0 20
5015 #define NEON_2RM_SHA1H 21
5016 #define NEON_2RM_VABS 22
5017 #define NEON_2RM_VNEG 23
5018 #define NEON_2RM_VCGT0_F 24
5019 #define NEON_2RM_VCGE0_F 25
5020 #define NEON_2RM_VCEQ0_F 26
5021 #define NEON_2RM_VCLE0_F 27
5022 #define NEON_2RM_VCLT0_F 28
5023 #define NEON_2RM_VABS_F 30
5024 #define NEON_2RM_VNEG_F 31
5025 #define NEON_2RM_VSWP 32
5026 #define NEON_2RM_VTRN 33
5027 #define NEON_2RM_VUZP 34
5028 #define NEON_2RM_VZIP 35
5029 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5030 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5031 #define NEON_2RM_VSHLL 38
5032 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5033 #define NEON_2RM_VRINTN 40
5034 #define NEON_2RM_VRINTX 41
5035 #define NEON_2RM_VRINTA 42
5036 #define NEON_2RM_VRINTZ 43
5037 #define NEON_2RM_VCVT_F16_F32 44
5038 #define NEON_2RM_VRINTM 45
5039 #define NEON_2RM_VCVT_F32_F16 46
5040 #define NEON_2RM_VRINTP 47
5041 #define NEON_2RM_VCVTAU 48
5042 #define NEON_2RM_VCVTAS 49
5043 #define NEON_2RM_VCVTNU 50
5044 #define NEON_2RM_VCVTNS 51
5045 #define NEON_2RM_VCVTPU 52
5046 #define NEON_2RM_VCVTPS 53
5047 #define NEON_2RM_VCVTMU 54
5048 #define NEON_2RM_VCVTMS 55
5049 #define NEON_2RM_VRECPE 56
5050 #define NEON_2RM_VRSQRTE 57
5051 #define NEON_2RM_VRECPE_F 58
5052 #define NEON_2RM_VRSQRTE_F 59
5053 #define NEON_2RM_VCVT_FS 60
5054 #define NEON_2RM_VCVT_FU 61
5055 #define NEON_2RM_VCVT_SF 62
5056 #define NEON_2RM_VCVT_UF 63
5058 static int neon_2rm_is_float_op(int op)
5060 /* Return true if this neon 2reg-misc op is float-to-float */
5061 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
5062 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
5063 op == NEON_2RM_VRINTM ||
5064 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5065 op >= NEON_2RM_VRECPE_F);
5068 /* Each entry in this array has bit n set if the insn allows
5069 * size value n (otherwise it will UNDEF). Since unallocated
5070 * op values will have no bits set they always UNDEF.
5072 static const uint8_t neon_2rm_sizes[] = {
5073 [NEON_2RM_VREV64] = 0x7,
5074 [NEON_2RM_VREV32] = 0x3,
5075 [NEON_2RM_VREV16] = 0x1,
5076 [NEON_2RM_VPADDL] = 0x7,
5077 [NEON_2RM_VPADDL_U] = 0x7,
5078 [NEON_2RM_AESE] = 0x1,
5079 [NEON_2RM_AESMC] = 0x1,
5080 [NEON_2RM_VCLS] = 0x7,
5081 [NEON_2RM_VCLZ] = 0x7,
5082 [NEON_2RM_VCNT] = 0x1,
5083 [NEON_2RM_VMVN] = 0x1,
5084 [NEON_2RM_VPADAL] = 0x7,
5085 [NEON_2RM_VPADAL_U] = 0x7,
5086 [NEON_2RM_VQABS] = 0x7,
5087 [NEON_2RM_VQNEG] = 0x7,
5088 [NEON_2RM_VCGT0] = 0x7,
5089 [NEON_2RM_VCGE0] = 0x7,
5090 [NEON_2RM_VCEQ0] = 0x7,
5091 [NEON_2RM_VCLE0] = 0x7,
5092 [NEON_2RM_VCLT0] = 0x7,
5093 [NEON_2RM_SHA1H] = 0x4,
5094 [NEON_2RM_VABS] = 0x7,
5095 [NEON_2RM_VNEG] = 0x7,
5096 [NEON_2RM_VCGT0_F] = 0x4,
5097 [NEON_2RM_VCGE0_F] = 0x4,
5098 [NEON_2RM_VCEQ0_F] = 0x4,
5099 [NEON_2RM_VCLE0_F] = 0x4,
5100 [NEON_2RM_VCLT0_F] = 0x4,
5101 [NEON_2RM_VABS_F] = 0x4,
5102 [NEON_2RM_VNEG_F] = 0x4,
5103 [NEON_2RM_VSWP] = 0x1,
5104 [NEON_2RM_VTRN] = 0x7,
5105 [NEON_2RM_VUZP] = 0x7,
5106 [NEON_2RM_VZIP] = 0x7,
5107 [NEON_2RM_VMOVN] = 0x7,
5108 [NEON_2RM_VQMOVN] = 0x7,
5109 [NEON_2RM_VSHLL] = 0x7,
5110 [NEON_2RM_SHA1SU1] = 0x4,
5111 [NEON_2RM_VRINTN] = 0x4,
5112 [NEON_2RM_VRINTX] = 0x4,
5113 [NEON_2RM_VRINTA] = 0x4,
5114 [NEON_2RM_VRINTZ] = 0x4,
5115 [NEON_2RM_VCVT_F16_F32] = 0x2,
5116 [NEON_2RM_VRINTM] = 0x4,
5117 [NEON_2RM_VCVT_F32_F16] = 0x2,
5118 [NEON_2RM_VRINTP] = 0x4,
5119 [NEON_2RM_VCVTAU] = 0x4,
5120 [NEON_2RM_VCVTAS] = 0x4,
5121 [NEON_2RM_VCVTNU] = 0x4,
5122 [NEON_2RM_VCVTNS] = 0x4,
5123 [NEON_2RM_VCVTPU] = 0x4,
5124 [NEON_2RM_VCVTPS] = 0x4,
5125 [NEON_2RM_VCVTMU] = 0x4,
5126 [NEON_2RM_VCVTMS] = 0x4,
5127 [NEON_2RM_VRECPE] = 0x4,
5128 [NEON_2RM_VRSQRTE] = 0x4,
5129 [NEON_2RM_VRECPE_F] = 0x4,
5130 [NEON_2RM_VRSQRTE_F] = 0x4,
5131 [NEON_2RM_VCVT_FS] = 0x4,
5132 [NEON_2RM_VCVT_FU] = 0x4,
5133 [NEON_2RM_VCVT_SF] = 0x4,
5134 [NEON_2RM_VCVT_UF] = 0x4,
5137 /* Translate a NEON data processing instruction. Return nonzero if the
5138 instruction is invalid.
5139 We process data in a mixture of 32-bit and 64-bit chunks.
5140 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5142 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
5144 int op;
5145 int q;
5146 int rd, rn, rm;
5147 int size;
5148 int shift;
5149 int pass;
5150 int count;
5151 int pairwise;
5152 int u;
5153 uint32_t imm, mask;
5154 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
5155 TCGv_i64 tmp64;
5157 /* FIXME: this access check should not take precedence over UNDEF
5158 * for invalid encodings; we will generate incorrect syndrome information
5159 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5161 if (s->fp_excp_el) {
5162 gen_exception_insn(s, 4, EXCP_UDEF,
5163 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
5164 return 0;
5167 if (!s->vfp_enabled)
5168 return 1;
5169 q = (insn & (1 << 6)) != 0;
5170 u = (insn >> 24) & 1;
5171 VFP_DREG_D(rd, insn);
5172 VFP_DREG_N(rn, insn);
5173 VFP_DREG_M(rm, insn);
5174 size = (insn >> 20) & 3;
5175 if ((insn & (1 << 23)) == 0) {
5176 /* Three register same length. */
5177 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5178 /* Catch invalid op and bad size combinations: UNDEF */
5179 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5180 return 1;
5182 /* All insns of this form UNDEF for either this condition or the
5183 * superset of cases "Q==1"; we catch the latter later.
5185 if (q && ((rd | rn | rm) & 1)) {
5186 return 1;
5189 * The SHA-1/SHA-256 3-register instructions require special treatment
5190 * here, as their size field is overloaded as an op type selector, and
5191 * they all consume their input in a single pass.
5193 if (op == NEON_3R_SHA) {
5194 if (!q) {
5195 return 1;
5197 if (!u) { /* SHA-1 */
5198 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
5199 return 1;
5201 tmp = tcg_const_i32(rd);
5202 tmp2 = tcg_const_i32(rn);
5203 tmp3 = tcg_const_i32(rm);
5204 tmp4 = tcg_const_i32(size);
5205 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5206 tcg_temp_free_i32(tmp4);
5207 } else { /* SHA-256 */
5208 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
5209 return 1;
5211 tmp = tcg_const_i32(rd);
5212 tmp2 = tcg_const_i32(rn);
5213 tmp3 = tcg_const_i32(rm);
5214 switch (size) {
5215 case 0:
5216 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5217 break;
5218 case 1:
5219 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5220 break;
5221 case 2:
5222 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5223 break;
5226 tcg_temp_free_i32(tmp);
5227 tcg_temp_free_i32(tmp2);
5228 tcg_temp_free_i32(tmp3);
5229 return 0;
5231 if (size == 3 && op != NEON_3R_LOGIC) {
5232 /* 64-bit element instructions. */
5233 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5234 neon_load_reg64(cpu_V0, rn + pass);
5235 neon_load_reg64(cpu_V1, rm + pass);
5236 switch (op) {
5237 case NEON_3R_VQADD:
5238 if (u) {
5239 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5240 cpu_V0, cpu_V1);
5241 } else {
5242 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5243 cpu_V0, cpu_V1);
5245 break;
5246 case NEON_3R_VQSUB:
5247 if (u) {
5248 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5249 cpu_V0, cpu_V1);
5250 } else {
5251 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5252 cpu_V0, cpu_V1);
5254 break;
5255 case NEON_3R_VSHL:
5256 if (u) {
5257 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5258 } else {
5259 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5261 break;
5262 case NEON_3R_VQSHL:
5263 if (u) {
5264 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5265 cpu_V1, cpu_V0);
5266 } else {
5267 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5268 cpu_V1, cpu_V0);
5270 break;
5271 case NEON_3R_VRSHL:
5272 if (u) {
5273 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5274 } else {
5275 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5277 break;
5278 case NEON_3R_VQRSHL:
5279 if (u) {
5280 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5281 cpu_V1, cpu_V0);
5282 } else {
5283 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5284 cpu_V1, cpu_V0);
5286 break;
5287 case NEON_3R_VADD_VSUB:
5288 if (u) {
5289 tcg_gen_sub_i64(CPU_V001);
5290 } else {
5291 tcg_gen_add_i64(CPU_V001);
5293 break;
5294 default:
5295 abort();
5297 neon_store_reg64(cpu_V0, rd + pass);
5299 return 0;
5301 pairwise = 0;
5302 switch (op) {
5303 case NEON_3R_VSHL:
5304 case NEON_3R_VQSHL:
5305 case NEON_3R_VRSHL:
5306 case NEON_3R_VQRSHL:
5308 int rtmp;
5309 /* Shift instruction operands are reversed. */
5310 rtmp = rn;
5311 rn = rm;
5312 rm = rtmp;
5314 break;
5315 case NEON_3R_VPADD:
5316 if (u) {
5317 return 1;
5319 /* Fall through */
5320 case NEON_3R_VPMAX:
5321 case NEON_3R_VPMIN:
5322 pairwise = 1;
5323 break;
5324 case NEON_3R_FLOAT_ARITH:
5325 pairwise = (u && size < 2); /* if VPADD (float) */
5326 break;
5327 case NEON_3R_FLOAT_MINMAX:
5328 pairwise = u; /* if VPMIN/VPMAX (float) */
5329 break;
5330 case NEON_3R_FLOAT_CMP:
5331 if (!u && size) {
5332 /* no encoding for U=0 C=1x */
5333 return 1;
5335 break;
5336 case NEON_3R_FLOAT_ACMP:
5337 if (!u) {
5338 return 1;
5340 break;
5341 case NEON_3R_FLOAT_MISC:
5342 /* VMAXNM/VMINNM in ARMv8 */
5343 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5344 return 1;
5346 break;
5347 case NEON_3R_VMUL:
5348 if (u && (size != 0)) {
5349 /* UNDEF on invalid size for polynomial subcase */
5350 return 1;
5352 break;
5353 case NEON_3R_VFM:
5354 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
5355 return 1;
5357 break;
5358 default:
5359 break;
5362 if (pairwise && q) {
5363 /* All the pairwise insns UNDEF if Q is set */
5364 return 1;
5367 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5369 if (pairwise) {
5370 /* Pairwise. */
5371 if (pass < 1) {
5372 tmp = neon_load_reg(rn, 0);
5373 tmp2 = neon_load_reg(rn, 1);
5374 } else {
5375 tmp = neon_load_reg(rm, 0);
5376 tmp2 = neon_load_reg(rm, 1);
5378 } else {
5379 /* Elementwise. */
5380 tmp = neon_load_reg(rn, pass);
5381 tmp2 = neon_load_reg(rm, pass);
5383 switch (op) {
5384 case NEON_3R_VHADD:
5385 GEN_NEON_INTEGER_OP(hadd);
5386 break;
5387 case NEON_3R_VQADD:
5388 GEN_NEON_INTEGER_OP_ENV(qadd);
5389 break;
5390 case NEON_3R_VRHADD:
5391 GEN_NEON_INTEGER_OP(rhadd);
5392 break;
5393 case NEON_3R_LOGIC: /* Logic ops. */
5394 switch ((u << 2) | size) {
5395 case 0: /* VAND */
5396 tcg_gen_and_i32(tmp, tmp, tmp2);
5397 break;
5398 case 1: /* BIC */
5399 tcg_gen_andc_i32(tmp, tmp, tmp2);
5400 break;
5401 case 2: /* VORR */
5402 tcg_gen_or_i32(tmp, tmp, tmp2);
5403 break;
5404 case 3: /* VORN */
5405 tcg_gen_orc_i32(tmp, tmp, tmp2);
5406 break;
5407 case 4: /* VEOR */
5408 tcg_gen_xor_i32(tmp, tmp, tmp2);
5409 break;
5410 case 5: /* VBSL */
5411 tmp3 = neon_load_reg(rd, pass);
5412 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5413 tcg_temp_free_i32(tmp3);
5414 break;
5415 case 6: /* VBIT */
5416 tmp3 = neon_load_reg(rd, pass);
5417 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5418 tcg_temp_free_i32(tmp3);
5419 break;
5420 case 7: /* VBIF */
5421 tmp3 = neon_load_reg(rd, pass);
5422 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5423 tcg_temp_free_i32(tmp3);
5424 break;
5426 break;
5427 case NEON_3R_VHSUB:
5428 GEN_NEON_INTEGER_OP(hsub);
5429 break;
5430 case NEON_3R_VQSUB:
5431 GEN_NEON_INTEGER_OP_ENV(qsub);
5432 break;
5433 case NEON_3R_VCGT:
5434 GEN_NEON_INTEGER_OP(cgt);
5435 break;
5436 case NEON_3R_VCGE:
5437 GEN_NEON_INTEGER_OP(cge);
5438 break;
5439 case NEON_3R_VSHL:
5440 GEN_NEON_INTEGER_OP(shl);
5441 break;
5442 case NEON_3R_VQSHL:
5443 GEN_NEON_INTEGER_OP_ENV(qshl);
5444 break;
5445 case NEON_3R_VRSHL:
5446 GEN_NEON_INTEGER_OP(rshl);
5447 break;
5448 case NEON_3R_VQRSHL:
5449 GEN_NEON_INTEGER_OP_ENV(qrshl);
5450 break;
5451 case NEON_3R_VMAX:
5452 GEN_NEON_INTEGER_OP(max);
5453 break;
5454 case NEON_3R_VMIN:
5455 GEN_NEON_INTEGER_OP(min);
5456 break;
5457 case NEON_3R_VABD:
5458 GEN_NEON_INTEGER_OP(abd);
5459 break;
5460 case NEON_3R_VABA:
5461 GEN_NEON_INTEGER_OP(abd);
5462 tcg_temp_free_i32(tmp2);
5463 tmp2 = neon_load_reg(rd, pass);
5464 gen_neon_add(size, tmp, tmp2);
5465 break;
5466 case NEON_3R_VADD_VSUB:
5467 if (!u) { /* VADD */
5468 gen_neon_add(size, tmp, tmp2);
5469 } else { /* VSUB */
5470 switch (size) {
5471 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5472 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5473 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
5474 default: abort();
5477 break;
5478 case NEON_3R_VTST_VCEQ:
5479 if (!u) { /* VTST */
5480 switch (size) {
5481 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5482 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5483 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
5484 default: abort();
5486 } else { /* VCEQ */
5487 switch (size) {
5488 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5489 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5490 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5491 default: abort();
5494 break;
5495 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
5496 switch (size) {
5497 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5498 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5499 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5500 default: abort();
5502 tcg_temp_free_i32(tmp2);
5503 tmp2 = neon_load_reg(rd, pass);
5504 if (u) { /* VMLS */
5505 gen_neon_rsb(size, tmp, tmp2);
5506 } else { /* VMLA */
5507 gen_neon_add(size, tmp, tmp2);
5509 break;
5510 case NEON_3R_VMUL:
5511 if (u) { /* polynomial */
5512 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5513 } else { /* Integer */
5514 switch (size) {
5515 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5516 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5517 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5518 default: abort();
5521 break;
5522 case NEON_3R_VPMAX:
5523 GEN_NEON_INTEGER_OP(pmax);
5524 break;
5525 case NEON_3R_VPMIN:
5526 GEN_NEON_INTEGER_OP(pmin);
5527 break;
5528 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5529 if (!u) { /* VQDMULH */
5530 switch (size) {
5531 case 1:
5532 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5533 break;
5534 case 2:
5535 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5536 break;
5537 default: abort();
5539 } else { /* VQRDMULH */
5540 switch (size) {
5541 case 1:
5542 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5543 break;
5544 case 2:
5545 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5546 break;
5547 default: abort();
5550 break;
5551 case NEON_3R_VPADD:
5552 switch (size) {
5553 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5554 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5555 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5556 default: abort();
5558 break;
5559 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5561 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5562 switch ((u << 2) | size) {
5563 case 0: /* VADD */
5564 case 4: /* VPADD */
5565 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5566 break;
5567 case 2: /* VSUB */
5568 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5569 break;
5570 case 6: /* VABD */
5571 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5572 break;
5573 default:
5574 abort();
5576 tcg_temp_free_ptr(fpstatus);
5577 break;
5579 case NEON_3R_FLOAT_MULTIPLY:
5581 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5582 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5583 if (!u) {
5584 tcg_temp_free_i32(tmp2);
5585 tmp2 = neon_load_reg(rd, pass);
5586 if (size == 0) {
5587 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5588 } else {
5589 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5592 tcg_temp_free_ptr(fpstatus);
5593 break;
5595 case NEON_3R_FLOAT_CMP:
5597 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5598 if (!u) {
5599 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5600 } else {
5601 if (size == 0) {
5602 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5603 } else {
5604 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5607 tcg_temp_free_ptr(fpstatus);
5608 break;
5610 case NEON_3R_FLOAT_ACMP:
5612 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5613 if (size == 0) {
5614 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5615 } else {
5616 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5618 tcg_temp_free_ptr(fpstatus);
5619 break;
5621 case NEON_3R_FLOAT_MINMAX:
5623 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5624 if (size == 0) {
5625 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5626 } else {
5627 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5629 tcg_temp_free_ptr(fpstatus);
5630 break;
5632 case NEON_3R_FLOAT_MISC:
5633 if (u) {
5634 /* VMAXNM/VMINNM */
5635 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5636 if (size == 0) {
5637 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5638 } else {
5639 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5641 tcg_temp_free_ptr(fpstatus);
5642 } else {
5643 if (size == 0) {
5644 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5645 } else {
5646 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5649 break;
5650 case NEON_3R_VFM:
5652 /* VFMA, VFMS: fused multiply-add */
5653 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5654 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5655 if (size) {
5656 /* VFMS */
5657 gen_helper_vfp_negs(tmp, tmp);
5659 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5660 tcg_temp_free_i32(tmp3);
5661 tcg_temp_free_ptr(fpstatus);
5662 break;
5664 default:
5665 abort();
5667 tcg_temp_free_i32(tmp2);
5669 /* Save the result. For elementwise operations we can put it
5670 straight into the destination register. For pairwise operations
5671 we have to be careful to avoid clobbering the source operands. */
5672 if (pairwise && rd == rm) {
5673 neon_store_scratch(pass, tmp);
5674 } else {
5675 neon_store_reg(rd, pass, tmp);
5678 } /* for pass */
5679 if (pairwise && rd == rm) {
5680 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5681 tmp = neon_load_scratch(pass);
5682 neon_store_reg(rd, pass, tmp);
5685 /* End of 3 register same size operations. */
5686 } else if (insn & (1 << 4)) {
5687 if ((insn & 0x00380080) != 0) {
5688 /* Two registers and shift. */
5689 op = (insn >> 8) & 0xf;
5690 if (insn & (1 << 7)) {
5691 /* 64-bit shift. */
5692 if (op > 7) {
5693 return 1;
5695 size = 3;
5696 } else {
5697 size = 2;
5698 while ((insn & (1 << (size + 19))) == 0)
5699 size--;
5701 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5702 /* To avoid excessive duplication of ops we implement shift
5703 by immediate using the variable shift operations. */
5704 if (op < 8) {
5705 /* Shift by immediate:
5706 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5707 if (q && ((rd | rm) & 1)) {
5708 return 1;
5710 if (!u && (op == 4 || op == 6)) {
5711 return 1;
5713 /* Right shifts are encoded as N - shift, where N is the
5714 element size in bits. */
5715 if (op <= 4)
5716 shift = shift - (1 << (size + 3));
5717 if (size == 3) {
5718 count = q + 1;
5719 } else {
5720 count = q ? 4: 2;
5722 switch (size) {
5723 case 0:
5724 imm = (uint8_t) shift;
5725 imm |= imm << 8;
5726 imm |= imm << 16;
5727 break;
5728 case 1:
5729 imm = (uint16_t) shift;
5730 imm |= imm << 16;
5731 break;
5732 case 2:
5733 case 3:
5734 imm = shift;
5735 break;
5736 default:
5737 abort();
5740 for (pass = 0; pass < count; pass++) {
5741 if (size == 3) {
5742 neon_load_reg64(cpu_V0, rm + pass);
5743 tcg_gen_movi_i64(cpu_V1, imm);
5744 switch (op) {
5745 case 0: /* VSHR */
5746 case 1: /* VSRA */
5747 if (u)
5748 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5749 else
5750 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5751 break;
5752 case 2: /* VRSHR */
5753 case 3: /* VRSRA */
5754 if (u)
5755 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5756 else
5757 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5758 break;
5759 case 4: /* VSRI */
5760 case 5: /* VSHL, VSLI */
5761 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5762 break;
5763 case 6: /* VQSHLU */
5764 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5765 cpu_V0, cpu_V1);
5766 break;
5767 case 7: /* VQSHL */
5768 if (u) {
5769 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5770 cpu_V0, cpu_V1);
5771 } else {
5772 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5773 cpu_V0, cpu_V1);
5775 break;
5777 if (op == 1 || op == 3) {
5778 /* Accumulate. */
5779 neon_load_reg64(cpu_V1, rd + pass);
5780 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5781 } else if (op == 4 || (op == 5 && u)) {
5782 /* Insert */
5783 neon_load_reg64(cpu_V1, rd + pass);
5784 uint64_t mask;
5785 if (shift < -63 || shift > 63) {
5786 mask = 0;
5787 } else {
5788 if (op == 4) {
5789 mask = 0xffffffffffffffffull >> -shift;
5790 } else {
5791 mask = 0xffffffffffffffffull << shift;
5794 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5795 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5797 neon_store_reg64(cpu_V0, rd + pass);
5798 } else { /* size < 3 */
5799 /* Operands in T0 and T1. */
5800 tmp = neon_load_reg(rm, pass);
5801 tmp2 = tcg_temp_new_i32();
5802 tcg_gen_movi_i32(tmp2, imm);
5803 switch (op) {
5804 case 0: /* VSHR */
5805 case 1: /* VSRA */
5806 GEN_NEON_INTEGER_OP(shl);
5807 break;
5808 case 2: /* VRSHR */
5809 case 3: /* VRSRA */
5810 GEN_NEON_INTEGER_OP(rshl);
5811 break;
5812 case 4: /* VSRI */
5813 case 5: /* VSHL, VSLI */
5814 switch (size) {
5815 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5816 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5817 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5818 default: abort();
5820 break;
5821 case 6: /* VQSHLU */
5822 switch (size) {
5823 case 0:
5824 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5825 tmp, tmp2);
5826 break;
5827 case 1:
5828 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5829 tmp, tmp2);
5830 break;
5831 case 2:
5832 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5833 tmp, tmp2);
5834 break;
5835 default:
5836 abort();
5838 break;
5839 case 7: /* VQSHL */
5840 GEN_NEON_INTEGER_OP_ENV(qshl);
5841 break;
5843 tcg_temp_free_i32(tmp2);
5845 if (op == 1 || op == 3) {
5846 /* Accumulate. */
5847 tmp2 = neon_load_reg(rd, pass);
5848 gen_neon_add(size, tmp, tmp2);
5849 tcg_temp_free_i32(tmp2);
5850 } else if (op == 4 || (op == 5 && u)) {
5851 /* Insert */
5852 switch (size) {
5853 case 0:
5854 if (op == 4)
5855 mask = 0xff >> -shift;
5856 else
5857 mask = (uint8_t)(0xff << shift);
5858 mask |= mask << 8;
5859 mask |= mask << 16;
5860 break;
5861 case 1:
5862 if (op == 4)
5863 mask = 0xffff >> -shift;
5864 else
5865 mask = (uint16_t)(0xffff << shift);
5866 mask |= mask << 16;
5867 break;
5868 case 2:
5869 if (shift < -31 || shift > 31) {
5870 mask = 0;
5871 } else {
5872 if (op == 4)
5873 mask = 0xffffffffu >> -shift;
5874 else
5875 mask = 0xffffffffu << shift;
5877 break;
5878 default:
5879 abort();
5881 tmp2 = neon_load_reg(rd, pass);
5882 tcg_gen_andi_i32(tmp, tmp, mask);
5883 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5884 tcg_gen_or_i32(tmp, tmp, tmp2);
5885 tcg_temp_free_i32(tmp2);
5887 neon_store_reg(rd, pass, tmp);
5889 } /* for pass */
5890 } else if (op < 10) {
5891 /* Shift by immediate and narrow:
5892 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5893 int input_unsigned = (op == 8) ? !u : u;
5894 if (rm & 1) {
5895 return 1;
5897 shift = shift - (1 << (size + 3));
5898 size++;
5899 if (size == 3) {
5900 tmp64 = tcg_const_i64(shift);
5901 neon_load_reg64(cpu_V0, rm);
5902 neon_load_reg64(cpu_V1, rm + 1);
5903 for (pass = 0; pass < 2; pass++) {
5904 TCGv_i64 in;
5905 if (pass == 0) {
5906 in = cpu_V0;
5907 } else {
5908 in = cpu_V1;
5910 if (q) {
5911 if (input_unsigned) {
5912 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5913 } else {
5914 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5916 } else {
5917 if (input_unsigned) {
5918 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5919 } else {
5920 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5923 tmp = tcg_temp_new_i32();
5924 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5925 neon_store_reg(rd, pass, tmp);
5926 } /* for pass */
5927 tcg_temp_free_i64(tmp64);
5928 } else {
5929 if (size == 1) {
5930 imm = (uint16_t)shift;
5931 imm |= imm << 16;
5932 } else {
5933 /* size == 2 */
5934 imm = (uint32_t)shift;
5936 tmp2 = tcg_const_i32(imm);
5937 tmp4 = neon_load_reg(rm + 1, 0);
5938 tmp5 = neon_load_reg(rm + 1, 1);
5939 for (pass = 0; pass < 2; pass++) {
5940 if (pass == 0) {
5941 tmp = neon_load_reg(rm, 0);
5942 } else {
5943 tmp = tmp4;
5945 gen_neon_shift_narrow(size, tmp, tmp2, q,
5946 input_unsigned);
5947 if (pass == 0) {
5948 tmp3 = neon_load_reg(rm, 1);
5949 } else {
5950 tmp3 = tmp5;
5952 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5953 input_unsigned);
5954 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5955 tcg_temp_free_i32(tmp);
5956 tcg_temp_free_i32(tmp3);
5957 tmp = tcg_temp_new_i32();
5958 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5959 neon_store_reg(rd, pass, tmp);
5960 } /* for pass */
5961 tcg_temp_free_i32(tmp2);
5963 } else if (op == 10) {
5964 /* VSHLL, VMOVL */
5965 if (q || (rd & 1)) {
5966 return 1;
5968 tmp = neon_load_reg(rm, 0);
5969 tmp2 = neon_load_reg(rm, 1);
5970 for (pass = 0; pass < 2; pass++) {
5971 if (pass == 1)
5972 tmp = tmp2;
5974 gen_neon_widen(cpu_V0, tmp, size, u);
5976 if (shift != 0) {
5977 /* The shift is less than the width of the source
5978 type, so we can just shift the whole register. */
5979 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5980 /* Widen the result of shift: we need to clear
5981 * the potential overflow bits resulting from
5982 * left bits of the narrow input appearing as
5983 * right bits of left the neighbour narrow
5984 * input. */
5985 if (size < 2 || !u) {
5986 uint64_t imm64;
5987 if (size == 0) {
5988 imm = (0xffu >> (8 - shift));
5989 imm |= imm << 16;
5990 } else if (size == 1) {
5991 imm = 0xffff >> (16 - shift);
5992 } else {
5993 /* size == 2 */
5994 imm = 0xffffffff >> (32 - shift);
5996 if (size < 2) {
5997 imm64 = imm | (((uint64_t)imm) << 32);
5998 } else {
5999 imm64 = imm;
6001 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
6004 neon_store_reg64(cpu_V0, rd + pass);
6006 } else if (op >= 14) {
6007 /* VCVT fixed-point. */
6008 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6009 return 1;
6011 /* We have already masked out the must-be-1 top bit of imm6,
6012 * hence this 32-shift where the ARM ARM has 64-imm6.
6014 shift = 32 - shift;
6015 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6016 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
6017 if (!(op & 1)) {
6018 if (u)
6019 gen_vfp_ulto(0, shift, 1);
6020 else
6021 gen_vfp_slto(0, shift, 1);
6022 } else {
6023 if (u)
6024 gen_vfp_toul(0, shift, 1);
6025 else
6026 gen_vfp_tosl(0, shift, 1);
6028 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
6030 } else {
6031 return 1;
6033 } else { /* (insn & 0x00380080) == 0 */
6034 int invert;
6035 if (q && (rd & 1)) {
6036 return 1;
6039 op = (insn >> 8) & 0xf;
6040 /* One register and immediate. */
6041 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6042 invert = (insn & (1 << 5)) != 0;
6043 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6044 * We choose to not special-case this and will behave as if a
6045 * valid constant encoding of 0 had been given.
6047 switch (op) {
6048 case 0: case 1:
6049 /* no-op */
6050 break;
6051 case 2: case 3:
6052 imm <<= 8;
6053 break;
6054 case 4: case 5:
6055 imm <<= 16;
6056 break;
6057 case 6: case 7:
6058 imm <<= 24;
6059 break;
6060 case 8: case 9:
6061 imm |= imm << 16;
6062 break;
6063 case 10: case 11:
6064 imm = (imm << 8) | (imm << 24);
6065 break;
6066 case 12:
6067 imm = (imm << 8) | 0xff;
6068 break;
6069 case 13:
6070 imm = (imm << 16) | 0xffff;
6071 break;
6072 case 14:
6073 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6074 if (invert)
6075 imm = ~imm;
6076 break;
6077 case 15:
6078 if (invert) {
6079 return 1;
6081 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6082 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6083 break;
6085 if (invert)
6086 imm = ~imm;
6088 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6089 if (op & 1 && op < 12) {
6090 tmp = neon_load_reg(rd, pass);
6091 if (invert) {
6092 /* The immediate value has already been inverted, so
6093 BIC becomes AND. */
6094 tcg_gen_andi_i32(tmp, tmp, imm);
6095 } else {
6096 tcg_gen_ori_i32(tmp, tmp, imm);
6098 } else {
6099 /* VMOV, VMVN. */
6100 tmp = tcg_temp_new_i32();
6101 if (op == 14 && invert) {
6102 int n;
6103 uint32_t val;
6104 val = 0;
6105 for (n = 0; n < 4; n++) {
6106 if (imm & (1 << (n + (pass & 1) * 4)))
6107 val |= 0xff << (n * 8);
6109 tcg_gen_movi_i32(tmp, val);
6110 } else {
6111 tcg_gen_movi_i32(tmp, imm);
6114 neon_store_reg(rd, pass, tmp);
6117 } else { /* (insn & 0x00800010 == 0x00800000) */
6118 if (size != 3) {
6119 op = (insn >> 8) & 0xf;
6120 if ((insn & (1 << 6)) == 0) {
6121 /* Three registers of different lengths. */
6122 int src1_wide;
6123 int src2_wide;
6124 int prewiden;
6125 /* undefreq: bit 0 : UNDEF if size == 0
6126 * bit 1 : UNDEF if size == 1
6127 * bit 2 : UNDEF if size == 2
6128 * bit 3 : UNDEF if U == 1
6129 * Note that [2:0] set implies 'always UNDEF'
6131 int undefreq;
6132 /* prewiden, src1_wide, src2_wide, undefreq */
6133 static const int neon_3reg_wide[16][4] = {
6134 {1, 0, 0, 0}, /* VADDL */
6135 {1, 1, 0, 0}, /* VADDW */
6136 {1, 0, 0, 0}, /* VSUBL */
6137 {1, 1, 0, 0}, /* VSUBW */
6138 {0, 1, 1, 0}, /* VADDHN */
6139 {0, 0, 0, 0}, /* VABAL */
6140 {0, 1, 1, 0}, /* VSUBHN */
6141 {0, 0, 0, 0}, /* VABDL */
6142 {0, 0, 0, 0}, /* VMLAL */
6143 {0, 0, 0, 9}, /* VQDMLAL */
6144 {0, 0, 0, 0}, /* VMLSL */
6145 {0, 0, 0, 9}, /* VQDMLSL */
6146 {0, 0, 0, 0}, /* Integer VMULL */
6147 {0, 0, 0, 1}, /* VQDMULL */
6148 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6149 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6152 prewiden = neon_3reg_wide[op][0];
6153 src1_wide = neon_3reg_wide[op][1];
6154 src2_wide = neon_3reg_wide[op][2];
6155 undefreq = neon_3reg_wide[op][3];
6157 if ((undefreq & (1 << size)) ||
6158 ((undefreq & 8) && u)) {
6159 return 1;
6161 if ((src1_wide && (rn & 1)) ||
6162 (src2_wide && (rm & 1)) ||
6163 (!src2_wide && (rd & 1))) {
6164 return 1;
6167 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6168 * outside the loop below as it only performs a single pass.
6170 if (op == 14 && size == 2) {
6171 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6173 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
6174 return 1;
6176 tcg_rn = tcg_temp_new_i64();
6177 tcg_rm = tcg_temp_new_i64();
6178 tcg_rd = tcg_temp_new_i64();
6179 neon_load_reg64(tcg_rn, rn);
6180 neon_load_reg64(tcg_rm, rm);
6181 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6182 neon_store_reg64(tcg_rd, rd);
6183 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6184 neon_store_reg64(tcg_rd, rd + 1);
6185 tcg_temp_free_i64(tcg_rn);
6186 tcg_temp_free_i64(tcg_rm);
6187 tcg_temp_free_i64(tcg_rd);
6188 return 0;
6191 /* Avoid overlapping operands. Wide source operands are
6192 always aligned so will never overlap with wide
6193 destinations in problematic ways. */
6194 if (rd == rm && !src2_wide) {
6195 tmp = neon_load_reg(rm, 1);
6196 neon_store_scratch(2, tmp);
6197 } else if (rd == rn && !src1_wide) {
6198 tmp = neon_load_reg(rn, 1);
6199 neon_store_scratch(2, tmp);
6201 TCGV_UNUSED_I32(tmp3);
6202 for (pass = 0; pass < 2; pass++) {
6203 if (src1_wide) {
6204 neon_load_reg64(cpu_V0, rn + pass);
6205 TCGV_UNUSED_I32(tmp);
6206 } else {
6207 if (pass == 1 && rd == rn) {
6208 tmp = neon_load_scratch(2);
6209 } else {
6210 tmp = neon_load_reg(rn, pass);
6212 if (prewiden) {
6213 gen_neon_widen(cpu_V0, tmp, size, u);
6216 if (src2_wide) {
6217 neon_load_reg64(cpu_V1, rm + pass);
6218 TCGV_UNUSED_I32(tmp2);
6219 } else {
6220 if (pass == 1 && rd == rm) {
6221 tmp2 = neon_load_scratch(2);
6222 } else {
6223 tmp2 = neon_load_reg(rm, pass);
6225 if (prewiden) {
6226 gen_neon_widen(cpu_V1, tmp2, size, u);
6229 switch (op) {
6230 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6231 gen_neon_addl(size);
6232 break;
6233 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6234 gen_neon_subl(size);
6235 break;
6236 case 5: case 7: /* VABAL, VABDL */
6237 switch ((size << 1) | u) {
6238 case 0:
6239 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6240 break;
6241 case 1:
6242 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6243 break;
6244 case 2:
6245 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6246 break;
6247 case 3:
6248 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6249 break;
6250 case 4:
6251 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6252 break;
6253 case 5:
6254 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6255 break;
6256 default: abort();
6258 tcg_temp_free_i32(tmp2);
6259 tcg_temp_free_i32(tmp);
6260 break;
6261 case 8: case 9: case 10: case 11: case 12: case 13:
6262 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6263 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6264 break;
6265 case 14: /* Polynomial VMULL */
6266 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6267 tcg_temp_free_i32(tmp2);
6268 tcg_temp_free_i32(tmp);
6269 break;
6270 default: /* 15 is RESERVED: caught earlier */
6271 abort();
6273 if (op == 13) {
6274 /* VQDMULL */
6275 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6276 neon_store_reg64(cpu_V0, rd + pass);
6277 } else if (op == 5 || (op >= 8 && op <= 11)) {
6278 /* Accumulate. */
6279 neon_load_reg64(cpu_V1, rd + pass);
6280 switch (op) {
6281 case 10: /* VMLSL */
6282 gen_neon_negl(cpu_V0, size);
6283 /* Fall through */
6284 case 5: case 8: /* VABAL, VMLAL */
6285 gen_neon_addl(size);
6286 break;
6287 case 9: case 11: /* VQDMLAL, VQDMLSL */
6288 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6289 if (op == 11) {
6290 gen_neon_negl(cpu_V0, size);
6292 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6293 break;
6294 default:
6295 abort();
6297 neon_store_reg64(cpu_V0, rd + pass);
6298 } else if (op == 4 || op == 6) {
6299 /* Narrowing operation. */
6300 tmp = tcg_temp_new_i32();
6301 if (!u) {
6302 switch (size) {
6303 case 0:
6304 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6305 break;
6306 case 1:
6307 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6308 break;
6309 case 2:
6310 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6311 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6312 break;
6313 default: abort();
6315 } else {
6316 switch (size) {
6317 case 0:
6318 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6319 break;
6320 case 1:
6321 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6322 break;
6323 case 2:
6324 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6325 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6326 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6327 break;
6328 default: abort();
6331 if (pass == 0) {
6332 tmp3 = tmp;
6333 } else {
6334 neon_store_reg(rd, 0, tmp3);
6335 neon_store_reg(rd, 1, tmp);
6337 } else {
6338 /* Write back the result. */
6339 neon_store_reg64(cpu_V0, rd + pass);
6342 } else {
6343 /* Two registers and a scalar. NB that for ops of this form
6344 * the ARM ARM labels bit 24 as Q, but it is in our variable
6345 * 'u', not 'q'.
6347 if (size == 0) {
6348 return 1;
6350 switch (op) {
6351 case 1: /* Float VMLA scalar */
6352 case 5: /* Floating point VMLS scalar */
6353 case 9: /* Floating point VMUL scalar */
6354 if (size == 1) {
6355 return 1;
6357 /* fall through */
6358 case 0: /* Integer VMLA scalar */
6359 case 4: /* Integer VMLS scalar */
6360 case 8: /* Integer VMUL scalar */
6361 case 12: /* VQDMULH scalar */
6362 case 13: /* VQRDMULH scalar */
6363 if (u && ((rd | rn) & 1)) {
6364 return 1;
6366 tmp = neon_get_scalar(size, rm);
6367 neon_store_scratch(0, tmp);
6368 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6369 tmp = neon_load_scratch(0);
6370 tmp2 = neon_load_reg(rn, pass);
6371 if (op == 12) {
6372 if (size == 1) {
6373 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6374 } else {
6375 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6377 } else if (op == 13) {
6378 if (size == 1) {
6379 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6380 } else {
6381 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6383 } else if (op & 1) {
6384 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6385 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6386 tcg_temp_free_ptr(fpstatus);
6387 } else {
6388 switch (size) {
6389 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6390 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6391 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6392 default: abort();
6395 tcg_temp_free_i32(tmp2);
6396 if (op < 8) {
6397 /* Accumulate. */
6398 tmp2 = neon_load_reg(rd, pass);
6399 switch (op) {
6400 case 0:
6401 gen_neon_add(size, tmp, tmp2);
6402 break;
6403 case 1:
6405 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6406 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6407 tcg_temp_free_ptr(fpstatus);
6408 break;
6410 case 4:
6411 gen_neon_rsb(size, tmp, tmp2);
6412 break;
6413 case 5:
6415 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6416 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6417 tcg_temp_free_ptr(fpstatus);
6418 break;
6420 default:
6421 abort();
6423 tcg_temp_free_i32(tmp2);
6425 neon_store_reg(rd, pass, tmp);
6427 break;
6428 case 3: /* VQDMLAL scalar */
6429 case 7: /* VQDMLSL scalar */
6430 case 11: /* VQDMULL scalar */
6431 if (u == 1) {
6432 return 1;
6434 /* fall through */
6435 case 2: /* VMLAL sclar */
6436 case 6: /* VMLSL scalar */
6437 case 10: /* VMULL scalar */
6438 if (rd & 1) {
6439 return 1;
6441 tmp2 = neon_get_scalar(size, rm);
6442 /* We need a copy of tmp2 because gen_neon_mull
6443 * deletes it during pass 0. */
6444 tmp4 = tcg_temp_new_i32();
6445 tcg_gen_mov_i32(tmp4, tmp2);
6446 tmp3 = neon_load_reg(rn, 1);
6448 for (pass = 0; pass < 2; pass++) {
6449 if (pass == 0) {
6450 tmp = neon_load_reg(rn, 0);
6451 } else {
6452 tmp = tmp3;
6453 tmp2 = tmp4;
6455 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6456 if (op != 11) {
6457 neon_load_reg64(cpu_V1, rd + pass);
6459 switch (op) {
6460 case 6:
6461 gen_neon_negl(cpu_V0, size);
6462 /* Fall through */
6463 case 2:
6464 gen_neon_addl(size);
6465 break;
6466 case 3: case 7:
6467 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6468 if (op == 7) {
6469 gen_neon_negl(cpu_V0, size);
6471 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6472 break;
6473 case 10:
6474 /* no-op */
6475 break;
6476 case 11:
6477 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6478 break;
6479 default:
6480 abort();
6482 neon_store_reg64(cpu_V0, rd + pass);
6486 break;
6487 default: /* 14 and 15 are RESERVED */
6488 return 1;
6491 } else { /* size == 3 */
6492 if (!u) {
6493 /* Extract. */
6494 imm = (insn >> 8) & 0xf;
6496 if (imm > 7 && !q)
6497 return 1;
6499 if (q && ((rd | rn | rm) & 1)) {
6500 return 1;
6503 if (imm == 0) {
6504 neon_load_reg64(cpu_V0, rn);
6505 if (q) {
6506 neon_load_reg64(cpu_V1, rn + 1);
6508 } else if (imm == 8) {
6509 neon_load_reg64(cpu_V0, rn + 1);
6510 if (q) {
6511 neon_load_reg64(cpu_V1, rm);
6513 } else if (q) {
6514 tmp64 = tcg_temp_new_i64();
6515 if (imm < 8) {
6516 neon_load_reg64(cpu_V0, rn);
6517 neon_load_reg64(tmp64, rn + 1);
6518 } else {
6519 neon_load_reg64(cpu_V0, rn + 1);
6520 neon_load_reg64(tmp64, rm);
6522 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6523 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6524 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6525 if (imm < 8) {
6526 neon_load_reg64(cpu_V1, rm);
6527 } else {
6528 neon_load_reg64(cpu_V1, rm + 1);
6529 imm -= 8;
6531 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6532 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6533 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6534 tcg_temp_free_i64(tmp64);
6535 } else {
6536 /* BUGFIX */
6537 neon_load_reg64(cpu_V0, rn);
6538 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6539 neon_load_reg64(cpu_V1, rm);
6540 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6541 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6543 neon_store_reg64(cpu_V0, rd);
6544 if (q) {
6545 neon_store_reg64(cpu_V1, rd + 1);
6547 } else if ((insn & (1 << 11)) == 0) {
6548 /* Two register misc. */
6549 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6550 size = (insn >> 18) & 3;
6551 /* UNDEF for unknown op values and bad op-size combinations */
6552 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6553 return 1;
6555 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6556 q && ((rm | rd) & 1)) {
6557 return 1;
6559 switch (op) {
6560 case NEON_2RM_VREV64:
6561 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6562 tmp = neon_load_reg(rm, pass * 2);
6563 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6564 switch (size) {
6565 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6566 case 1: gen_swap_half(tmp); break;
6567 case 2: /* no-op */ break;
6568 default: abort();
6570 neon_store_reg(rd, pass * 2 + 1, tmp);
6571 if (size == 2) {
6572 neon_store_reg(rd, pass * 2, tmp2);
6573 } else {
6574 switch (size) {
6575 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6576 case 1: gen_swap_half(tmp2); break;
6577 default: abort();
6579 neon_store_reg(rd, pass * 2, tmp2);
6582 break;
6583 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6584 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6585 for (pass = 0; pass < q + 1; pass++) {
6586 tmp = neon_load_reg(rm, pass * 2);
6587 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6588 tmp = neon_load_reg(rm, pass * 2 + 1);
6589 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6590 switch (size) {
6591 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6592 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6593 case 2: tcg_gen_add_i64(CPU_V001); break;
6594 default: abort();
6596 if (op >= NEON_2RM_VPADAL) {
6597 /* Accumulate. */
6598 neon_load_reg64(cpu_V1, rd + pass);
6599 gen_neon_addl(size);
6601 neon_store_reg64(cpu_V0, rd + pass);
6603 break;
6604 case NEON_2RM_VTRN:
6605 if (size == 2) {
6606 int n;
6607 for (n = 0; n < (q ? 4 : 2); n += 2) {
6608 tmp = neon_load_reg(rm, n);
6609 tmp2 = neon_load_reg(rd, n + 1);
6610 neon_store_reg(rm, n, tmp2);
6611 neon_store_reg(rd, n + 1, tmp);
6613 } else {
6614 goto elementwise;
6616 break;
6617 case NEON_2RM_VUZP:
6618 if (gen_neon_unzip(rd, rm, size, q)) {
6619 return 1;
6621 break;
6622 case NEON_2RM_VZIP:
6623 if (gen_neon_zip(rd, rm, size, q)) {
6624 return 1;
6626 break;
6627 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6628 /* also VQMOVUN; op field and mnemonics don't line up */
6629 if (rm & 1) {
6630 return 1;
6632 TCGV_UNUSED_I32(tmp2);
6633 for (pass = 0; pass < 2; pass++) {
6634 neon_load_reg64(cpu_V0, rm + pass);
6635 tmp = tcg_temp_new_i32();
6636 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6637 tmp, cpu_V0);
6638 if (pass == 0) {
6639 tmp2 = tmp;
6640 } else {
6641 neon_store_reg(rd, 0, tmp2);
6642 neon_store_reg(rd, 1, tmp);
6645 break;
6646 case NEON_2RM_VSHLL:
6647 if (q || (rd & 1)) {
6648 return 1;
6650 tmp = neon_load_reg(rm, 0);
6651 tmp2 = neon_load_reg(rm, 1);
6652 for (pass = 0; pass < 2; pass++) {
6653 if (pass == 1)
6654 tmp = tmp2;
6655 gen_neon_widen(cpu_V0, tmp, size, 1);
6656 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6657 neon_store_reg64(cpu_V0, rd + pass);
6659 break;
6660 case NEON_2RM_VCVT_F16_F32:
6661 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6662 q || (rm & 1)) {
6663 return 1;
6665 tmp = tcg_temp_new_i32();
6666 tmp2 = tcg_temp_new_i32();
6667 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
6668 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6669 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
6670 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6671 tcg_gen_shli_i32(tmp2, tmp2, 16);
6672 tcg_gen_or_i32(tmp2, tmp2, tmp);
6673 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
6674 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6675 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6676 neon_store_reg(rd, 0, tmp2);
6677 tmp2 = tcg_temp_new_i32();
6678 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6679 tcg_gen_shli_i32(tmp2, tmp2, 16);
6680 tcg_gen_or_i32(tmp2, tmp2, tmp);
6681 neon_store_reg(rd, 1, tmp2);
6682 tcg_temp_free_i32(tmp);
6683 break;
6684 case NEON_2RM_VCVT_F32_F16:
6685 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6686 q || (rd & 1)) {
6687 return 1;
6689 tmp3 = tcg_temp_new_i32();
6690 tmp = neon_load_reg(rm, 0);
6691 tmp2 = neon_load_reg(rm, 1);
6692 tcg_gen_ext16u_i32(tmp3, tmp);
6693 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6694 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6695 tcg_gen_shri_i32(tmp3, tmp, 16);
6696 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6697 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
6698 tcg_temp_free_i32(tmp);
6699 tcg_gen_ext16u_i32(tmp3, tmp2);
6700 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6701 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6702 tcg_gen_shri_i32(tmp3, tmp2, 16);
6703 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6704 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
6705 tcg_temp_free_i32(tmp2);
6706 tcg_temp_free_i32(tmp3);
6707 break;
6708 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6709 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
6710 || ((rm | rd) & 1)) {
6711 return 1;
6713 tmp = tcg_const_i32(rd);
6714 tmp2 = tcg_const_i32(rm);
6716 /* Bit 6 is the lowest opcode bit; it distinguishes between
6717 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6719 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6721 if (op == NEON_2RM_AESE) {
6722 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6723 } else {
6724 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6726 tcg_temp_free_i32(tmp);
6727 tcg_temp_free_i32(tmp2);
6728 tcg_temp_free_i32(tmp3);
6729 break;
6730 case NEON_2RM_SHA1H:
6731 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
6732 || ((rm | rd) & 1)) {
6733 return 1;
6735 tmp = tcg_const_i32(rd);
6736 tmp2 = tcg_const_i32(rm);
6738 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6740 tcg_temp_free_i32(tmp);
6741 tcg_temp_free_i32(tmp2);
6742 break;
6743 case NEON_2RM_SHA1SU1:
6744 if ((rm | rd) & 1) {
6745 return 1;
6747 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6748 if (q) {
6749 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
6750 return 1;
6752 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
6753 return 1;
6755 tmp = tcg_const_i32(rd);
6756 tmp2 = tcg_const_i32(rm);
6757 if (q) {
6758 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6759 } else {
6760 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6762 tcg_temp_free_i32(tmp);
6763 tcg_temp_free_i32(tmp2);
6764 break;
6765 default:
6766 elementwise:
6767 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6768 if (neon_2rm_is_float_op(op)) {
6769 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6770 neon_reg_offset(rm, pass));
6771 TCGV_UNUSED_I32(tmp);
6772 } else {
6773 tmp = neon_load_reg(rm, pass);
6775 switch (op) {
6776 case NEON_2RM_VREV32:
6777 switch (size) {
6778 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6779 case 1: gen_swap_half(tmp); break;
6780 default: abort();
6782 break;
6783 case NEON_2RM_VREV16:
6784 gen_rev16(tmp);
6785 break;
6786 case NEON_2RM_VCLS:
6787 switch (size) {
6788 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6789 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6790 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6791 default: abort();
6793 break;
6794 case NEON_2RM_VCLZ:
6795 switch (size) {
6796 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6797 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6798 case 2: gen_helper_clz(tmp, tmp); break;
6799 default: abort();
6801 break;
6802 case NEON_2RM_VCNT:
6803 gen_helper_neon_cnt_u8(tmp, tmp);
6804 break;
6805 case NEON_2RM_VMVN:
6806 tcg_gen_not_i32(tmp, tmp);
6807 break;
6808 case NEON_2RM_VQABS:
6809 switch (size) {
6810 case 0:
6811 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6812 break;
6813 case 1:
6814 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6815 break;
6816 case 2:
6817 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6818 break;
6819 default: abort();
6821 break;
6822 case NEON_2RM_VQNEG:
6823 switch (size) {
6824 case 0:
6825 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6826 break;
6827 case 1:
6828 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6829 break;
6830 case 2:
6831 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6832 break;
6833 default: abort();
6835 break;
6836 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6837 tmp2 = tcg_const_i32(0);
6838 switch(size) {
6839 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6840 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6841 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6842 default: abort();
6844 tcg_temp_free_i32(tmp2);
6845 if (op == NEON_2RM_VCLE0) {
6846 tcg_gen_not_i32(tmp, tmp);
6848 break;
6849 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6850 tmp2 = tcg_const_i32(0);
6851 switch(size) {
6852 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6853 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6854 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6855 default: abort();
6857 tcg_temp_free_i32(tmp2);
6858 if (op == NEON_2RM_VCLT0) {
6859 tcg_gen_not_i32(tmp, tmp);
6861 break;
6862 case NEON_2RM_VCEQ0:
6863 tmp2 = tcg_const_i32(0);
6864 switch(size) {
6865 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6866 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6867 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6868 default: abort();
6870 tcg_temp_free_i32(tmp2);
6871 break;
6872 case NEON_2RM_VABS:
6873 switch(size) {
6874 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6875 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6876 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6877 default: abort();
6879 break;
6880 case NEON_2RM_VNEG:
6881 tmp2 = tcg_const_i32(0);
6882 gen_neon_rsb(size, tmp, tmp2);
6883 tcg_temp_free_i32(tmp2);
6884 break;
6885 case NEON_2RM_VCGT0_F:
6887 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6888 tmp2 = tcg_const_i32(0);
6889 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6890 tcg_temp_free_i32(tmp2);
6891 tcg_temp_free_ptr(fpstatus);
6892 break;
6894 case NEON_2RM_VCGE0_F:
6896 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6897 tmp2 = tcg_const_i32(0);
6898 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6899 tcg_temp_free_i32(tmp2);
6900 tcg_temp_free_ptr(fpstatus);
6901 break;
6903 case NEON_2RM_VCEQ0_F:
6905 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6906 tmp2 = tcg_const_i32(0);
6907 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6908 tcg_temp_free_i32(tmp2);
6909 tcg_temp_free_ptr(fpstatus);
6910 break;
6912 case NEON_2RM_VCLE0_F:
6914 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6915 tmp2 = tcg_const_i32(0);
6916 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6917 tcg_temp_free_i32(tmp2);
6918 tcg_temp_free_ptr(fpstatus);
6919 break;
6921 case NEON_2RM_VCLT0_F:
6923 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6924 tmp2 = tcg_const_i32(0);
6925 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6926 tcg_temp_free_i32(tmp2);
6927 tcg_temp_free_ptr(fpstatus);
6928 break;
6930 case NEON_2RM_VABS_F:
6931 gen_vfp_abs(0);
6932 break;
6933 case NEON_2RM_VNEG_F:
6934 gen_vfp_neg(0);
6935 break;
6936 case NEON_2RM_VSWP:
6937 tmp2 = neon_load_reg(rd, pass);
6938 neon_store_reg(rm, pass, tmp2);
6939 break;
6940 case NEON_2RM_VTRN:
6941 tmp2 = neon_load_reg(rd, pass);
6942 switch (size) {
6943 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6944 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6945 default: abort();
6947 neon_store_reg(rm, pass, tmp2);
6948 break;
6949 case NEON_2RM_VRINTN:
6950 case NEON_2RM_VRINTA:
6951 case NEON_2RM_VRINTM:
6952 case NEON_2RM_VRINTP:
6953 case NEON_2RM_VRINTZ:
6955 TCGv_i32 tcg_rmode;
6956 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6957 int rmode;
6959 if (op == NEON_2RM_VRINTZ) {
6960 rmode = FPROUNDING_ZERO;
6961 } else {
6962 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6965 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6966 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6967 cpu_env);
6968 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6969 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6970 cpu_env);
6971 tcg_temp_free_ptr(fpstatus);
6972 tcg_temp_free_i32(tcg_rmode);
6973 break;
6975 case NEON_2RM_VRINTX:
6977 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6978 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6979 tcg_temp_free_ptr(fpstatus);
6980 break;
6982 case NEON_2RM_VCVTAU:
6983 case NEON_2RM_VCVTAS:
6984 case NEON_2RM_VCVTNU:
6985 case NEON_2RM_VCVTNS:
6986 case NEON_2RM_VCVTPU:
6987 case NEON_2RM_VCVTPS:
6988 case NEON_2RM_VCVTMU:
6989 case NEON_2RM_VCVTMS:
6991 bool is_signed = !extract32(insn, 7, 1);
6992 TCGv_ptr fpst = get_fpstatus_ptr(1);
6993 TCGv_i32 tcg_rmode, tcg_shift;
6994 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6996 tcg_shift = tcg_const_i32(0);
6997 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6998 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6999 cpu_env);
7001 if (is_signed) {
7002 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7003 tcg_shift, fpst);
7004 } else {
7005 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7006 tcg_shift, fpst);
7009 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7010 cpu_env);
7011 tcg_temp_free_i32(tcg_rmode);
7012 tcg_temp_free_i32(tcg_shift);
7013 tcg_temp_free_ptr(fpst);
7014 break;
7016 case NEON_2RM_VRECPE:
7018 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7019 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7020 tcg_temp_free_ptr(fpstatus);
7021 break;
7023 case NEON_2RM_VRSQRTE:
7025 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7026 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7027 tcg_temp_free_ptr(fpstatus);
7028 break;
7030 case NEON_2RM_VRECPE_F:
7032 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7033 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7034 tcg_temp_free_ptr(fpstatus);
7035 break;
7037 case NEON_2RM_VRSQRTE_F:
7039 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7040 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7041 tcg_temp_free_ptr(fpstatus);
7042 break;
7044 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
7045 gen_vfp_sito(0, 1);
7046 break;
7047 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
7048 gen_vfp_uito(0, 1);
7049 break;
7050 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
7051 gen_vfp_tosiz(0, 1);
7052 break;
7053 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
7054 gen_vfp_touiz(0, 1);
7055 break;
7056 default:
7057 /* Reserved op values were caught by the
7058 * neon_2rm_sizes[] check earlier.
7060 abort();
7062 if (neon_2rm_is_float_op(op)) {
7063 tcg_gen_st_f32(cpu_F0s, cpu_env,
7064 neon_reg_offset(rd, pass));
7065 } else {
7066 neon_store_reg(rd, pass, tmp);
7069 break;
7071 } else if ((insn & (1 << 10)) == 0) {
7072 /* VTBL, VTBX. */
7073 int n = ((insn >> 8) & 3) + 1;
7074 if ((rn + n) > 32) {
7075 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7076 * helper function running off the end of the register file.
7078 return 1;
7080 n <<= 3;
7081 if (insn & (1 << 6)) {
7082 tmp = neon_load_reg(rd, 0);
7083 } else {
7084 tmp = tcg_temp_new_i32();
7085 tcg_gen_movi_i32(tmp, 0);
7087 tmp2 = neon_load_reg(rm, 0);
7088 tmp4 = tcg_const_i32(rn);
7089 tmp5 = tcg_const_i32(n);
7090 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7091 tcg_temp_free_i32(tmp);
7092 if (insn & (1 << 6)) {
7093 tmp = neon_load_reg(rd, 1);
7094 } else {
7095 tmp = tcg_temp_new_i32();
7096 tcg_gen_movi_i32(tmp, 0);
7098 tmp3 = neon_load_reg(rm, 1);
7099 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
7100 tcg_temp_free_i32(tmp5);
7101 tcg_temp_free_i32(tmp4);
7102 neon_store_reg(rd, 0, tmp2);
7103 neon_store_reg(rd, 1, tmp3);
7104 tcg_temp_free_i32(tmp);
7105 } else if ((insn & 0x380) == 0) {
7106 /* VDUP */
7107 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7108 return 1;
7110 if (insn & (1 << 19)) {
7111 tmp = neon_load_reg(rm, 1);
7112 } else {
7113 tmp = neon_load_reg(rm, 0);
7115 if (insn & (1 << 16)) {
7116 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
7117 } else if (insn & (1 << 17)) {
7118 if ((insn >> 18) & 1)
7119 gen_neon_dup_high16(tmp);
7120 else
7121 gen_neon_dup_low16(tmp);
7123 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7124 tmp2 = tcg_temp_new_i32();
7125 tcg_gen_mov_i32(tmp2, tmp);
7126 neon_store_reg(rd, pass, tmp2);
7128 tcg_temp_free_i32(tmp);
7129 } else {
7130 return 1;
7134 return 0;
7137 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7139 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7140 const ARMCPRegInfo *ri;
7142 cpnum = (insn >> 8) & 0xf;
7144 /* First check for coprocessor space used for XScale/iwMMXt insns */
7145 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7146 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7147 return 1;
7149 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7150 return disas_iwmmxt_insn(s, insn);
7151 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7152 return disas_dsp_insn(s, insn);
7154 return 1;
7157 /* Otherwise treat as a generic register access */
7158 is64 = (insn & (1 << 25)) == 0;
7159 if (!is64 && ((insn & (1 << 4)) == 0)) {
7160 /* cdp */
7161 return 1;
7164 crm = insn & 0xf;
7165 if (is64) {
7166 crn = 0;
7167 opc1 = (insn >> 4) & 0xf;
7168 opc2 = 0;
7169 rt2 = (insn >> 16) & 0xf;
7170 } else {
7171 crn = (insn >> 16) & 0xf;
7172 opc1 = (insn >> 21) & 7;
7173 opc2 = (insn >> 5) & 7;
7174 rt2 = 0;
7176 isread = (insn >> 20) & 1;
7177 rt = (insn >> 12) & 0xf;
7179 ri = get_arm_cp_reginfo(s->cp_regs,
7180 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7181 if (ri) {
7182 /* Check access permissions */
7183 if (!cp_access_ok(s->current_el, ri, isread)) {
7184 return 1;
7187 if (ri->accessfn ||
7188 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7189 /* Emit code to perform further access permissions checks at
7190 * runtime; this may result in an exception.
7191 * Note that on XScale all cp0..c13 registers do an access check
7192 * call in order to handle c15_cpar.
7194 TCGv_ptr tmpptr;
7195 TCGv_i32 tcg_syn;
7196 uint32_t syndrome;
7198 /* Note that since we are an implementation which takes an
7199 * exception on a trapped conditional instruction only if the
7200 * instruction passes its condition code check, we can take
7201 * advantage of the clause in the ARM ARM that allows us to set
7202 * the COND field in the instruction to 0xE in all cases.
7203 * We could fish the actual condition out of the insn (ARM)
7204 * or the condexec bits (Thumb) but it isn't necessary.
7206 switch (cpnum) {
7207 case 14:
7208 if (is64) {
7209 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7210 isread, s->thumb);
7211 } else {
7212 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7213 rt, isread, s->thumb);
7215 break;
7216 case 15:
7217 if (is64) {
7218 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7219 isread, s->thumb);
7220 } else {
7221 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7222 rt, isread, s->thumb);
7224 break;
7225 default:
7226 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7227 * so this can only happen if this is an ARMv7 or earlier CPU,
7228 * in which case the syndrome information won't actually be
7229 * guest visible.
7231 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7232 syndrome = syn_uncategorized();
7233 break;
7236 gen_set_pc_im(s, s->pc - 4);
7237 tmpptr = tcg_const_ptr(ri);
7238 tcg_syn = tcg_const_i32(syndrome);
7239 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
7240 tcg_temp_free_ptr(tmpptr);
7241 tcg_temp_free_i32(tcg_syn);
7244 /* Handle special cases first */
7245 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7246 case ARM_CP_NOP:
7247 return 0;
7248 case ARM_CP_WFI:
7249 if (isread) {
7250 return 1;
7252 gen_set_pc_im(s, s->pc);
7253 s->is_jmp = DISAS_WFI;
7254 return 0;
7255 default:
7256 break;
7259 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7260 gen_io_start();
7263 if (isread) {
7264 /* Read */
7265 if (is64) {
7266 TCGv_i64 tmp64;
7267 TCGv_i32 tmp;
7268 if (ri->type & ARM_CP_CONST) {
7269 tmp64 = tcg_const_i64(ri->resetvalue);
7270 } else if (ri->readfn) {
7271 TCGv_ptr tmpptr;
7272 tmp64 = tcg_temp_new_i64();
7273 tmpptr = tcg_const_ptr(ri);
7274 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7275 tcg_temp_free_ptr(tmpptr);
7276 } else {
7277 tmp64 = tcg_temp_new_i64();
7278 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7280 tmp = tcg_temp_new_i32();
7281 tcg_gen_extrl_i64_i32(tmp, tmp64);
7282 store_reg(s, rt, tmp);
7283 tcg_gen_shri_i64(tmp64, tmp64, 32);
7284 tmp = tcg_temp_new_i32();
7285 tcg_gen_extrl_i64_i32(tmp, tmp64);
7286 tcg_temp_free_i64(tmp64);
7287 store_reg(s, rt2, tmp);
7288 } else {
7289 TCGv_i32 tmp;
7290 if (ri->type & ARM_CP_CONST) {
7291 tmp = tcg_const_i32(ri->resetvalue);
7292 } else if (ri->readfn) {
7293 TCGv_ptr tmpptr;
7294 tmp = tcg_temp_new_i32();
7295 tmpptr = tcg_const_ptr(ri);
7296 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7297 tcg_temp_free_ptr(tmpptr);
7298 } else {
7299 tmp = load_cpu_offset(ri->fieldoffset);
7301 if (rt == 15) {
7302 /* Destination register of r15 for 32 bit loads sets
7303 * the condition codes from the high 4 bits of the value
7305 gen_set_nzcv(tmp);
7306 tcg_temp_free_i32(tmp);
7307 } else {
7308 store_reg(s, rt, tmp);
7311 } else {
7312 /* Write */
7313 if (ri->type & ARM_CP_CONST) {
7314 /* If not forbidden by access permissions, treat as WI */
7315 return 0;
7318 if (is64) {
7319 TCGv_i32 tmplo, tmphi;
7320 TCGv_i64 tmp64 = tcg_temp_new_i64();
7321 tmplo = load_reg(s, rt);
7322 tmphi = load_reg(s, rt2);
7323 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7324 tcg_temp_free_i32(tmplo);
7325 tcg_temp_free_i32(tmphi);
7326 if (ri->writefn) {
7327 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7328 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7329 tcg_temp_free_ptr(tmpptr);
7330 } else {
7331 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7333 tcg_temp_free_i64(tmp64);
7334 } else {
7335 if (ri->writefn) {
7336 TCGv_i32 tmp;
7337 TCGv_ptr tmpptr;
7338 tmp = load_reg(s, rt);
7339 tmpptr = tcg_const_ptr(ri);
7340 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7341 tcg_temp_free_ptr(tmpptr);
7342 tcg_temp_free_i32(tmp);
7343 } else {
7344 TCGv_i32 tmp = load_reg(s, rt);
7345 store_cpu_offset(tmp, ri->fieldoffset);
7350 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7351 /* I/O operations must end the TB here (whether read or write) */
7352 gen_io_end();
7353 gen_lookup_tb(s);
7354 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7355 /* We default to ending the TB on a coprocessor register write,
7356 * but allow this to be suppressed by the register definition
7357 * (usually only necessary to work around guest bugs).
7359 gen_lookup_tb(s);
7362 return 0;
7365 /* Unknown register; this might be a guest error or a QEMU
7366 * unimplemented feature.
7368 if (is64) {
7369 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7370 "64 bit system register cp:%d opc1: %d crm:%d "
7371 "(%s)\n",
7372 isread ? "read" : "write", cpnum, opc1, crm,
7373 s->ns ? "non-secure" : "secure");
7374 } else {
7375 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7376 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7377 "(%s)\n",
7378 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7379 s->ns ? "non-secure" : "secure");
7382 return 1;
7386 /* Store a 64-bit value to a register pair. Clobbers val. */
7387 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7389 TCGv_i32 tmp;
7390 tmp = tcg_temp_new_i32();
7391 tcg_gen_extrl_i64_i32(tmp, val);
7392 store_reg(s, rlow, tmp);
7393 tmp = tcg_temp_new_i32();
7394 tcg_gen_shri_i64(val, val, 32);
7395 tcg_gen_extrl_i64_i32(tmp, val);
7396 store_reg(s, rhigh, tmp);
7399 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7400 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7402 TCGv_i64 tmp;
7403 TCGv_i32 tmp2;
7405 /* Load value and extend to 64 bits. */
7406 tmp = tcg_temp_new_i64();
7407 tmp2 = load_reg(s, rlow);
7408 tcg_gen_extu_i32_i64(tmp, tmp2);
7409 tcg_temp_free_i32(tmp2);
7410 tcg_gen_add_i64(val, val, tmp);
7411 tcg_temp_free_i64(tmp);
7414 /* load and add a 64-bit value from a register pair. */
7415 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7417 TCGv_i64 tmp;
7418 TCGv_i32 tmpl;
7419 TCGv_i32 tmph;
7421 /* Load 64-bit value rd:rn. */
7422 tmpl = load_reg(s, rlow);
7423 tmph = load_reg(s, rhigh);
7424 tmp = tcg_temp_new_i64();
7425 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7426 tcg_temp_free_i32(tmpl);
7427 tcg_temp_free_i32(tmph);
7428 tcg_gen_add_i64(val, val, tmp);
7429 tcg_temp_free_i64(tmp);
7432 /* Set N and Z flags from hi|lo. */
7433 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7435 tcg_gen_mov_i32(cpu_NF, hi);
7436 tcg_gen_or_i32(cpu_ZF, lo, hi);
7439 /* Load/Store exclusive instructions are implemented by remembering
7440 the value/address loaded, and seeing if these are the same
7441 when the store is performed. This should be sufficient to implement
7442 the architecturally mandated semantics, and avoids having to monitor
7443 regular stores.
7445 In system emulation mode only one CPU will be running at once, so
7446 this sequence is effectively atomic. In user emulation mode we
7447 throw an exception and handle the atomic operation elsewhere. */
7448 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7449 TCGv_i32 addr, int size)
7451 TCGv_i32 tmp = tcg_temp_new_i32();
7453 s->is_ldex = true;
7455 /* emit alignment check if needed */
7456 if (size != 0) {
7457 /* NB: all LDREX variants (incl. thumb) occupy 4 bytes */
7458 gen_alignment_check(s, 4, (target_ulong)1 << size, addr);
7461 switch (size) {
7462 case 0:
7463 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
7464 break;
7465 case 1:
7466 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
7467 break;
7468 case 2:
7469 case 3:
7470 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7471 break;
7472 default:
7473 abort();
7476 if (size == 3) {
7477 TCGv_i32 tmp2 = tcg_temp_new_i32();
7478 TCGv_i32 tmp3 = tcg_temp_new_i32();
7480 tcg_gen_addi_i32(tmp2, addr, 4);
7481 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7482 tcg_temp_free_i32(tmp2);
7483 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7484 store_reg(s, rt2, tmp3);
7485 } else {
7486 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7489 store_reg(s, rt, tmp);
7490 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7493 static void gen_clrex(DisasContext *s)
7495 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7498 #ifdef CONFIG_USER_ONLY
7499 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7500 TCGv_i32 addr, int size)
7502 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
7503 tcg_gen_movi_i32(cpu_exclusive_info,
7504 size | (rd << 4) | (rt << 8) | (rt2 << 12));
7505 gen_exception_internal_insn(s, 4, EXCP_STREX);
7507 #else
7508 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7509 TCGv_i32 addr, int size)
7511 TCGv_i32 tmp;
7512 TCGv_i64 val64, extaddr;
7513 TCGLabel *done_label;
7514 TCGLabel *fail_label;
7516 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7517 [addr] = {Rt};
7518 {Rd} = 0;
7519 } else {
7520 {Rd} = 1;
7521 } */
7522 fail_label = gen_new_label();
7523 done_label = gen_new_label();
7524 extaddr = tcg_temp_new_i64();
7525 tcg_gen_extu_i32_i64(extaddr, addr);
7526 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7527 tcg_temp_free_i64(extaddr);
7529 tmp = tcg_temp_new_i32();
7530 switch (size) {
7531 case 0:
7532 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
7533 break;
7534 case 1:
7535 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
7536 break;
7537 case 2:
7538 case 3:
7539 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7540 break;
7541 default:
7542 abort();
7545 val64 = tcg_temp_new_i64();
7546 if (size == 3) {
7547 TCGv_i32 tmp2 = tcg_temp_new_i32();
7548 TCGv_i32 tmp3 = tcg_temp_new_i32();
7549 tcg_gen_addi_i32(tmp2, addr, 4);
7550 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7551 tcg_temp_free_i32(tmp2);
7552 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7553 tcg_temp_free_i32(tmp3);
7554 } else {
7555 tcg_gen_extu_i32_i64(val64, tmp);
7557 tcg_temp_free_i32(tmp);
7559 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7560 tcg_temp_free_i64(val64);
7562 tmp = load_reg(s, rt);
7563 switch (size) {
7564 case 0:
7565 gen_aa32_st8(tmp, addr, get_mem_index(s));
7566 break;
7567 case 1:
7568 gen_aa32_st16(tmp, addr, get_mem_index(s));
7569 break;
7570 case 2:
7571 case 3:
7572 gen_aa32_st32(tmp, addr, get_mem_index(s));
7573 break;
7574 default:
7575 abort();
7577 tcg_temp_free_i32(tmp);
7578 if (size == 3) {
7579 tcg_gen_addi_i32(addr, addr, 4);
7580 tmp = load_reg(s, rt2);
7581 gen_aa32_st32(tmp, addr, get_mem_index(s));
7582 tcg_temp_free_i32(tmp);
7584 tcg_gen_movi_i32(cpu_R[rd], 0);
7585 tcg_gen_br(done_label);
7586 gen_set_label(fail_label);
7587 tcg_gen_movi_i32(cpu_R[rd], 1);
7588 gen_set_label(done_label);
7589 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7591 #endif
7593 /* gen_srs:
7594 * @env: CPUARMState
7595 * @s: DisasContext
7596 * @mode: mode field from insn (which stack to store to)
7597 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7598 * @writeback: true if writeback bit set
7600 * Generate code for the SRS (Store Return State) insn.
7602 static void gen_srs(DisasContext *s,
7603 uint32_t mode, uint32_t amode, bool writeback)
7605 int32_t offset;
7606 TCGv_i32 addr = tcg_temp_new_i32();
7607 TCGv_i32 tmp = tcg_const_i32(mode);
7608 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7609 tcg_temp_free_i32(tmp);
7610 switch (amode) {
7611 case 0: /* DA */
7612 offset = -4;
7613 break;
7614 case 1: /* IA */
7615 offset = 0;
7616 break;
7617 case 2: /* DB */
7618 offset = -8;
7619 break;
7620 case 3: /* IB */
7621 offset = 4;
7622 break;
7623 default:
7624 abort();
7626 tcg_gen_addi_i32(addr, addr, offset);
7627 tmp = load_reg(s, 14);
7628 gen_aa32_st32(tmp, addr, get_mem_index(s));
7629 tcg_temp_free_i32(tmp);
7630 tmp = load_cpu_field(spsr);
7631 tcg_gen_addi_i32(addr, addr, 4);
7632 gen_aa32_st32(tmp, addr, get_mem_index(s));
7633 tcg_temp_free_i32(tmp);
7634 if (writeback) {
7635 switch (amode) {
7636 case 0:
7637 offset = -8;
7638 break;
7639 case 1:
7640 offset = 4;
7641 break;
7642 case 2:
7643 offset = -4;
7644 break;
7645 case 3:
7646 offset = 0;
7647 break;
7648 default:
7649 abort();
7651 tcg_gen_addi_i32(addr, addr, offset);
7652 tmp = tcg_const_i32(mode);
7653 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7654 tcg_temp_free_i32(tmp);
7656 tcg_temp_free_i32(addr);
7659 static void disas_arm_insn(DisasContext *s, unsigned int insn)
7661 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
7662 TCGv_i32 tmp;
7663 TCGv_i32 tmp2;
7664 TCGv_i32 tmp3;
7665 TCGv_i32 addr;
7666 TCGv_i64 tmp64;
7668 /* M variants do not implement ARM mode. */
7669 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7670 goto illegal_op;
7672 cond = insn >> 28;
7673 if (cond == 0xf){
7674 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7675 * choose to UNDEF. In ARMv5 and above the space is used
7676 * for miscellaneous unconditional instructions.
7678 ARCH(5);
7680 /* Unconditional instructions. */
7681 if (((insn >> 25) & 7) == 1) {
7682 /* NEON Data processing. */
7683 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7684 goto illegal_op;
7687 if (disas_neon_data_insn(s, insn)) {
7688 goto illegal_op;
7690 return;
7692 if ((insn & 0x0f100000) == 0x04000000) {
7693 /* NEON load/store. */
7694 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7695 goto illegal_op;
7698 if (disas_neon_ls_insn(s, insn)) {
7699 goto illegal_op;
7701 return;
7703 if ((insn & 0x0f000e10) == 0x0e000a00) {
7704 /* VFP. */
7705 if (disas_vfp_insn(s, insn)) {
7706 goto illegal_op;
7708 return;
7710 if (((insn & 0x0f30f000) == 0x0510f000) ||
7711 ((insn & 0x0f30f010) == 0x0710f000)) {
7712 if ((insn & (1 << 22)) == 0) {
7713 /* PLDW; v7MP */
7714 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7715 goto illegal_op;
7718 /* Otherwise PLD; v5TE+ */
7719 ARCH(5TE);
7720 return;
7722 if (((insn & 0x0f70f000) == 0x0450f000) ||
7723 ((insn & 0x0f70f010) == 0x0650f000)) {
7724 ARCH(7);
7725 return; /* PLI; V7 */
7727 if (((insn & 0x0f700000) == 0x04100000) ||
7728 ((insn & 0x0f700010) == 0x06100000)) {
7729 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7730 goto illegal_op;
7732 return; /* v7MP: Unallocated memory hint: must NOP */
7735 if ((insn & 0x0ffffdff) == 0x01010000) {
7736 ARCH(6);
7737 /* setend */
7738 if (((insn >> 9) & 1) != s->bswap_code) {
7739 /* Dynamic endianness switching not implemented. */
7740 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
7741 goto illegal_op;
7743 return;
7744 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7745 switch ((insn >> 4) & 0xf) {
7746 case 1: /* clrex */
7747 ARCH(6K);
7748 gen_clrex(s);
7749 return;
7750 case 4: /* dsb */
7751 case 5: /* dmb */
7752 ARCH(7);
7753 /* We don't emulate caches so these are a no-op. */
7754 return;
7755 case 6: /* isb */
7756 /* We need to break the TB after this insn to execute
7757 * self-modifying code correctly and also to take
7758 * any pending interrupts immediately.
7760 gen_lookup_tb(s);
7761 return;
7762 default:
7763 goto illegal_op;
7765 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7766 /* srs */
7767 if (IS_USER(s)) {
7768 goto illegal_op;
7770 ARCH(6);
7771 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
7772 return;
7773 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
7774 /* rfe */
7775 int32_t offset;
7776 if (IS_USER(s))
7777 goto illegal_op;
7778 ARCH(6);
7779 rn = (insn >> 16) & 0xf;
7780 addr = load_reg(s, rn);
7781 i = (insn >> 23) & 3;
7782 switch (i) {
7783 case 0: offset = -4; break; /* DA */
7784 case 1: offset = 0; break; /* IA */
7785 case 2: offset = -8; break; /* DB */
7786 case 3: offset = 4; break; /* IB */
7787 default: abort();
7789 if (offset)
7790 tcg_gen_addi_i32(addr, addr, offset);
7791 /* Load PC into tmp and CPSR into tmp2. */
7792 tmp = tcg_temp_new_i32();
7793 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7794 tcg_gen_addi_i32(addr, addr, 4);
7795 tmp2 = tcg_temp_new_i32();
7796 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
7797 if (insn & (1 << 21)) {
7798 /* Base writeback. */
7799 switch (i) {
7800 case 0: offset = -8; break;
7801 case 1: offset = 4; break;
7802 case 2: offset = -4; break;
7803 case 3: offset = 0; break;
7804 default: abort();
7806 if (offset)
7807 tcg_gen_addi_i32(addr, addr, offset);
7808 store_reg(s, rn, addr);
7809 } else {
7810 tcg_temp_free_i32(addr);
7812 gen_rfe(s, tmp, tmp2);
7813 return;
7814 } else if ((insn & 0x0e000000) == 0x0a000000) {
7815 /* branch link and change to thumb (blx <offset>) */
7816 int32_t offset;
7818 val = (uint32_t)s->pc;
7819 tmp = tcg_temp_new_i32();
7820 tcg_gen_movi_i32(tmp, val);
7821 store_reg(s, 14, tmp);
7822 /* Sign-extend the 24-bit offset */
7823 offset = (((int32_t)insn) << 8) >> 8;
7824 /* offset * 4 + bit24 * 2 + (thumb bit) */
7825 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7826 /* pipeline offset */
7827 val += 4;
7828 /* protected by ARCH(5); above, near the start of uncond block */
7829 gen_bx_im(s, val);
7830 return;
7831 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7832 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7833 /* iWMMXt register transfer. */
7834 if (extract32(s->c15_cpar, 1, 1)) {
7835 if (!disas_iwmmxt_insn(s, insn)) {
7836 return;
7840 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7841 /* Coprocessor double register transfer. */
7842 ARCH(5TE);
7843 } else if ((insn & 0x0f000010) == 0x0e000010) {
7844 /* Additional coprocessor register transfer. */
7845 } else if ((insn & 0x0ff10020) == 0x01000000) {
7846 uint32_t mask;
7847 uint32_t val;
7848 /* cps (privileged) */
7849 if (IS_USER(s))
7850 return;
7851 mask = val = 0;
7852 if (insn & (1 << 19)) {
7853 if (insn & (1 << 8))
7854 mask |= CPSR_A;
7855 if (insn & (1 << 7))
7856 mask |= CPSR_I;
7857 if (insn & (1 << 6))
7858 mask |= CPSR_F;
7859 if (insn & (1 << 18))
7860 val |= mask;
7862 if (insn & (1 << 17)) {
7863 mask |= CPSR_M;
7864 val |= (insn & 0x1f);
7866 if (mask) {
7867 gen_set_psr_im(s, mask, 0, val);
7869 return;
7871 goto illegal_op;
7873 if (cond != 0xe) {
7874 /* if not always execute, we generate a conditional jump to
7875 next instruction */
7876 s->condlabel = gen_new_label();
7877 arm_gen_test_cc(cond ^ 1, s->condlabel);
7878 s->condjmp = 1;
7880 if ((insn & 0x0f900000) == 0x03000000) {
7881 if ((insn & (1 << 21)) == 0) {
7882 ARCH(6T2);
7883 rd = (insn >> 12) & 0xf;
7884 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7885 if ((insn & (1 << 22)) == 0) {
7886 /* MOVW */
7887 tmp = tcg_temp_new_i32();
7888 tcg_gen_movi_i32(tmp, val);
7889 } else {
7890 /* MOVT */
7891 tmp = load_reg(s, rd);
7892 tcg_gen_ext16u_i32(tmp, tmp);
7893 tcg_gen_ori_i32(tmp, tmp, val << 16);
7895 store_reg(s, rd, tmp);
7896 } else {
7897 if (((insn >> 12) & 0xf) != 0xf)
7898 goto illegal_op;
7899 if (((insn >> 16) & 0xf) == 0) {
7900 gen_nop_hint(s, insn & 0xff);
7901 } else {
7902 /* CPSR = immediate */
7903 val = insn & 0xff;
7904 shift = ((insn >> 8) & 0xf) * 2;
7905 if (shift)
7906 val = (val >> shift) | (val << (32 - shift));
7907 i = ((insn & (1 << 22)) != 0);
7908 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7909 i, val)) {
7910 goto illegal_op;
7914 } else if ((insn & 0x0f900000) == 0x01000000
7915 && (insn & 0x00000090) != 0x00000090) {
7916 /* miscellaneous instructions */
7917 op1 = (insn >> 21) & 3;
7918 sh = (insn >> 4) & 0xf;
7919 rm = insn & 0xf;
7920 switch (sh) {
7921 case 0x0: /* move program status register */
7922 if (op1 & 1) {
7923 /* PSR = reg */
7924 tmp = load_reg(s, rm);
7925 i = ((op1 & 2) != 0);
7926 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
7927 goto illegal_op;
7928 } else {
7929 /* reg = PSR */
7930 rd = (insn >> 12) & 0xf;
7931 if (op1 & 2) {
7932 if (IS_USER(s))
7933 goto illegal_op;
7934 tmp = load_cpu_field(spsr);
7935 } else {
7936 tmp = tcg_temp_new_i32();
7937 gen_helper_cpsr_read(tmp, cpu_env);
7939 store_reg(s, rd, tmp);
7941 break;
7942 case 0x1:
7943 if (op1 == 1) {
7944 /* branch/exchange thumb (bx). */
7945 ARCH(4T);
7946 tmp = load_reg(s, rm);
7947 gen_bx(s, tmp);
7948 } else if (op1 == 3) {
7949 /* clz */
7950 ARCH(5);
7951 rd = (insn >> 12) & 0xf;
7952 tmp = load_reg(s, rm);
7953 gen_helper_clz(tmp, tmp);
7954 store_reg(s, rd, tmp);
7955 } else {
7956 goto illegal_op;
7958 break;
7959 case 0x2:
7960 if (op1 == 1) {
7961 ARCH(5J); /* bxj */
7962 /* Trivial implementation equivalent to bx. */
7963 tmp = load_reg(s, rm);
7964 gen_bx(s, tmp);
7965 } else {
7966 goto illegal_op;
7968 break;
7969 case 0x3:
7970 if (op1 != 1)
7971 goto illegal_op;
7973 ARCH(5);
7974 /* branch link/exchange thumb (blx) */
7975 tmp = load_reg(s, rm);
7976 tmp2 = tcg_temp_new_i32();
7977 tcg_gen_movi_i32(tmp2, s->pc);
7978 store_reg(s, 14, tmp2);
7979 gen_bx(s, tmp);
7980 break;
7981 case 0x4:
7983 /* crc32/crc32c */
7984 uint32_t c = extract32(insn, 8, 4);
7986 /* Check this CPU supports ARMv8 CRC instructions.
7987 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7988 * Bits 8, 10 and 11 should be zero.
7990 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
7991 (c & 0xd) != 0) {
7992 goto illegal_op;
7995 rn = extract32(insn, 16, 4);
7996 rd = extract32(insn, 12, 4);
7998 tmp = load_reg(s, rn);
7999 tmp2 = load_reg(s, rm);
8000 if (op1 == 0) {
8001 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8002 } else if (op1 == 1) {
8003 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8005 tmp3 = tcg_const_i32(1 << op1);
8006 if (c & 0x2) {
8007 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8008 } else {
8009 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8011 tcg_temp_free_i32(tmp2);
8012 tcg_temp_free_i32(tmp3);
8013 store_reg(s, rd, tmp);
8014 break;
8016 case 0x5: /* saturating add/subtract */
8017 ARCH(5TE);
8018 rd = (insn >> 12) & 0xf;
8019 rn = (insn >> 16) & 0xf;
8020 tmp = load_reg(s, rm);
8021 tmp2 = load_reg(s, rn);
8022 if (op1 & 2)
8023 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
8024 if (op1 & 1)
8025 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8026 else
8027 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8028 tcg_temp_free_i32(tmp2);
8029 store_reg(s, rd, tmp);
8030 break;
8031 case 7:
8033 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8034 switch (op1) {
8035 case 1:
8036 /* bkpt */
8037 ARCH(5);
8038 gen_exception_insn(s, 4, EXCP_BKPT,
8039 syn_aa32_bkpt(imm16, false),
8040 default_exception_el(s));
8041 break;
8042 case 2:
8043 /* Hypervisor call (v7) */
8044 ARCH(7);
8045 if (IS_USER(s)) {
8046 goto illegal_op;
8048 gen_hvc(s, imm16);
8049 break;
8050 case 3:
8051 /* Secure monitor call (v6+) */
8052 ARCH(6K);
8053 if (IS_USER(s)) {
8054 goto illegal_op;
8056 gen_smc(s);
8057 break;
8058 default:
8059 goto illegal_op;
8061 break;
8063 case 0x8: /* signed multiply */
8064 case 0xa:
8065 case 0xc:
8066 case 0xe:
8067 ARCH(5TE);
8068 rs = (insn >> 8) & 0xf;
8069 rn = (insn >> 12) & 0xf;
8070 rd = (insn >> 16) & 0xf;
8071 if (op1 == 1) {
8072 /* (32 * 16) >> 16 */
8073 tmp = load_reg(s, rm);
8074 tmp2 = load_reg(s, rs);
8075 if (sh & 4)
8076 tcg_gen_sari_i32(tmp2, tmp2, 16);
8077 else
8078 gen_sxth(tmp2);
8079 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8080 tcg_gen_shri_i64(tmp64, tmp64, 16);
8081 tmp = tcg_temp_new_i32();
8082 tcg_gen_extrl_i64_i32(tmp, tmp64);
8083 tcg_temp_free_i64(tmp64);
8084 if ((sh & 2) == 0) {
8085 tmp2 = load_reg(s, rn);
8086 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8087 tcg_temp_free_i32(tmp2);
8089 store_reg(s, rd, tmp);
8090 } else {
8091 /* 16 * 16 */
8092 tmp = load_reg(s, rm);
8093 tmp2 = load_reg(s, rs);
8094 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8095 tcg_temp_free_i32(tmp2);
8096 if (op1 == 2) {
8097 tmp64 = tcg_temp_new_i64();
8098 tcg_gen_ext_i32_i64(tmp64, tmp);
8099 tcg_temp_free_i32(tmp);
8100 gen_addq(s, tmp64, rn, rd);
8101 gen_storeq_reg(s, rn, rd, tmp64);
8102 tcg_temp_free_i64(tmp64);
8103 } else {
8104 if (op1 == 0) {
8105 tmp2 = load_reg(s, rn);
8106 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8107 tcg_temp_free_i32(tmp2);
8109 store_reg(s, rd, tmp);
8112 break;
8113 default:
8114 goto illegal_op;
8116 } else if (((insn & 0x0e000000) == 0 &&
8117 (insn & 0x00000090) != 0x90) ||
8118 ((insn & 0x0e000000) == (1 << 25))) {
8119 int set_cc, logic_cc, shiftop;
8121 op1 = (insn >> 21) & 0xf;
8122 set_cc = (insn >> 20) & 1;
8123 logic_cc = table_logic_cc[op1] & set_cc;
8125 /* data processing instruction */
8126 if (insn & (1 << 25)) {
8127 /* immediate operand */
8128 val = insn & 0xff;
8129 shift = ((insn >> 8) & 0xf) * 2;
8130 if (shift) {
8131 val = (val >> shift) | (val << (32 - shift));
8133 tmp2 = tcg_temp_new_i32();
8134 tcg_gen_movi_i32(tmp2, val);
8135 if (logic_cc && shift) {
8136 gen_set_CF_bit31(tmp2);
8138 } else {
8139 /* register */
8140 rm = (insn) & 0xf;
8141 tmp2 = load_reg(s, rm);
8142 shiftop = (insn >> 5) & 3;
8143 if (!(insn & (1 << 4))) {
8144 shift = (insn >> 7) & 0x1f;
8145 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8146 } else {
8147 rs = (insn >> 8) & 0xf;
8148 tmp = load_reg(s, rs);
8149 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8152 if (op1 != 0x0f && op1 != 0x0d) {
8153 rn = (insn >> 16) & 0xf;
8154 tmp = load_reg(s, rn);
8155 } else {
8156 TCGV_UNUSED_I32(tmp);
8158 rd = (insn >> 12) & 0xf;
8159 switch(op1) {
8160 case 0x00:
8161 tcg_gen_and_i32(tmp, tmp, tmp2);
8162 if (logic_cc) {
8163 gen_logic_CC(tmp);
8165 store_reg_bx(s, rd, tmp);
8166 break;
8167 case 0x01:
8168 tcg_gen_xor_i32(tmp, tmp, tmp2);
8169 if (logic_cc) {
8170 gen_logic_CC(tmp);
8172 store_reg_bx(s, rd, tmp);
8173 break;
8174 case 0x02:
8175 if (set_cc && rd == 15) {
8176 /* SUBS r15, ... is used for exception return. */
8177 if (IS_USER(s)) {
8178 goto illegal_op;
8180 gen_sub_CC(tmp, tmp, tmp2);
8181 gen_exception_return(s, tmp);
8182 } else {
8183 if (set_cc) {
8184 gen_sub_CC(tmp, tmp, tmp2);
8185 } else {
8186 tcg_gen_sub_i32(tmp, tmp, tmp2);
8188 store_reg_bx(s, rd, tmp);
8190 break;
8191 case 0x03:
8192 if (set_cc) {
8193 gen_sub_CC(tmp, tmp2, tmp);
8194 } else {
8195 tcg_gen_sub_i32(tmp, tmp2, tmp);
8197 store_reg_bx(s, rd, tmp);
8198 break;
8199 case 0x04:
8200 if (set_cc) {
8201 gen_add_CC(tmp, tmp, tmp2);
8202 } else {
8203 tcg_gen_add_i32(tmp, tmp, tmp2);
8205 store_reg_bx(s, rd, tmp);
8206 break;
8207 case 0x05:
8208 if (set_cc) {
8209 gen_adc_CC(tmp, tmp, tmp2);
8210 } else {
8211 gen_add_carry(tmp, tmp, tmp2);
8213 store_reg_bx(s, rd, tmp);
8214 break;
8215 case 0x06:
8216 if (set_cc) {
8217 gen_sbc_CC(tmp, tmp, tmp2);
8218 } else {
8219 gen_sub_carry(tmp, tmp, tmp2);
8221 store_reg_bx(s, rd, tmp);
8222 break;
8223 case 0x07:
8224 if (set_cc) {
8225 gen_sbc_CC(tmp, tmp2, tmp);
8226 } else {
8227 gen_sub_carry(tmp, tmp2, tmp);
8229 store_reg_bx(s, rd, tmp);
8230 break;
8231 case 0x08:
8232 if (set_cc) {
8233 tcg_gen_and_i32(tmp, tmp, tmp2);
8234 gen_logic_CC(tmp);
8236 tcg_temp_free_i32(tmp);
8237 break;
8238 case 0x09:
8239 if (set_cc) {
8240 tcg_gen_xor_i32(tmp, tmp, tmp2);
8241 gen_logic_CC(tmp);
8243 tcg_temp_free_i32(tmp);
8244 break;
8245 case 0x0a:
8246 if (set_cc) {
8247 gen_sub_CC(tmp, tmp, tmp2);
8249 tcg_temp_free_i32(tmp);
8250 break;
8251 case 0x0b:
8252 if (set_cc) {
8253 gen_add_CC(tmp, tmp, tmp2);
8255 tcg_temp_free_i32(tmp);
8256 break;
8257 case 0x0c:
8258 tcg_gen_or_i32(tmp, tmp, tmp2);
8259 if (logic_cc) {
8260 gen_logic_CC(tmp);
8262 store_reg_bx(s, rd, tmp);
8263 break;
8264 case 0x0d:
8265 if (logic_cc && rd == 15) {
8266 /* MOVS r15, ... is used for exception return. */
8267 if (IS_USER(s)) {
8268 goto illegal_op;
8270 gen_exception_return(s, tmp2);
8271 } else {
8272 if (logic_cc) {
8273 gen_logic_CC(tmp2);
8275 store_reg_bx(s, rd, tmp2);
8277 break;
8278 case 0x0e:
8279 tcg_gen_andc_i32(tmp, tmp, tmp2);
8280 if (logic_cc) {
8281 gen_logic_CC(tmp);
8283 store_reg_bx(s, rd, tmp);
8284 break;
8285 default:
8286 case 0x0f:
8287 tcg_gen_not_i32(tmp2, tmp2);
8288 if (logic_cc) {
8289 gen_logic_CC(tmp2);
8291 store_reg_bx(s, rd, tmp2);
8292 break;
8294 if (op1 != 0x0f && op1 != 0x0d) {
8295 tcg_temp_free_i32(tmp2);
8297 } else {
8298 /* other instructions */
8299 op1 = (insn >> 24) & 0xf;
8300 switch(op1) {
8301 case 0x0:
8302 case 0x1:
8303 /* multiplies, extra load/stores */
8304 sh = (insn >> 5) & 3;
8305 if (sh == 0) {
8306 if (op1 == 0x0) {
8307 rd = (insn >> 16) & 0xf;
8308 rn = (insn >> 12) & 0xf;
8309 rs = (insn >> 8) & 0xf;
8310 rm = (insn) & 0xf;
8311 op1 = (insn >> 20) & 0xf;
8312 switch (op1) {
8313 case 0: case 1: case 2: case 3: case 6:
8314 /* 32 bit mul */
8315 tmp = load_reg(s, rs);
8316 tmp2 = load_reg(s, rm);
8317 tcg_gen_mul_i32(tmp, tmp, tmp2);
8318 tcg_temp_free_i32(tmp2);
8319 if (insn & (1 << 22)) {
8320 /* Subtract (mls) */
8321 ARCH(6T2);
8322 tmp2 = load_reg(s, rn);
8323 tcg_gen_sub_i32(tmp, tmp2, tmp);
8324 tcg_temp_free_i32(tmp2);
8325 } else if (insn & (1 << 21)) {
8326 /* Add */
8327 tmp2 = load_reg(s, rn);
8328 tcg_gen_add_i32(tmp, tmp, tmp2);
8329 tcg_temp_free_i32(tmp2);
8331 if (insn & (1 << 20))
8332 gen_logic_CC(tmp);
8333 store_reg(s, rd, tmp);
8334 break;
8335 case 4:
8336 /* 64 bit mul double accumulate (UMAAL) */
8337 ARCH(6);
8338 tmp = load_reg(s, rs);
8339 tmp2 = load_reg(s, rm);
8340 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8341 gen_addq_lo(s, tmp64, rn);
8342 gen_addq_lo(s, tmp64, rd);
8343 gen_storeq_reg(s, rn, rd, tmp64);
8344 tcg_temp_free_i64(tmp64);
8345 break;
8346 case 8: case 9: case 10: case 11:
8347 case 12: case 13: case 14: case 15:
8348 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8349 tmp = load_reg(s, rs);
8350 tmp2 = load_reg(s, rm);
8351 if (insn & (1 << 22)) {
8352 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8353 } else {
8354 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8356 if (insn & (1 << 21)) { /* mult accumulate */
8357 TCGv_i32 al = load_reg(s, rn);
8358 TCGv_i32 ah = load_reg(s, rd);
8359 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8360 tcg_temp_free_i32(al);
8361 tcg_temp_free_i32(ah);
8363 if (insn & (1 << 20)) {
8364 gen_logicq_cc(tmp, tmp2);
8366 store_reg(s, rn, tmp);
8367 store_reg(s, rd, tmp2);
8368 break;
8369 default:
8370 goto illegal_op;
8372 } else {
8373 rn = (insn >> 16) & 0xf;
8374 rd = (insn >> 12) & 0xf;
8375 if (insn & (1 << 23)) {
8376 /* load/store exclusive */
8377 int op2 = (insn >> 8) & 3;
8378 op1 = (insn >> 21) & 0x3;
8380 switch (op2) {
8381 case 0: /* lda/stl */
8382 if (op1 == 1) {
8383 goto illegal_op;
8385 ARCH(8);
8386 break;
8387 case 1: /* reserved */
8388 goto illegal_op;
8389 case 2: /* ldaex/stlex */
8390 ARCH(8);
8391 break;
8392 case 3: /* ldrex/strex */
8393 if (op1) {
8394 ARCH(6K);
8395 } else {
8396 ARCH(6);
8398 break;
8401 addr = tcg_temp_local_new_i32();
8402 load_reg_var(s, addr, rn);
8404 /* Since the emulation does not have barriers,
8405 the acquire/release semantics need no special
8406 handling */
8407 if (op2 == 0) {
8408 if (insn & (1 << 20)) {
8409 tmp = tcg_temp_new_i32();
8410 switch (op1) {
8411 case 0: /* lda */
8412 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8413 break;
8414 case 2: /* ldab */
8415 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8416 break;
8417 case 3: /* ldah */
8418 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8419 break;
8420 default:
8421 abort();
8423 store_reg(s, rd, tmp);
8424 } else {
8425 rm = insn & 0xf;
8426 tmp = load_reg(s, rm);
8427 switch (op1) {
8428 case 0: /* stl */
8429 gen_aa32_st32(tmp, addr, get_mem_index(s));
8430 break;
8431 case 2: /* stlb */
8432 gen_aa32_st8(tmp, addr, get_mem_index(s));
8433 break;
8434 case 3: /* stlh */
8435 gen_aa32_st16(tmp, addr, get_mem_index(s));
8436 break;
8437 default:
8438 abort();
8440 tcg_temp_free_i32(tmp);
8442 } else if (insn & (1 << 20)) {
8443 switch (op1) {
8444 case 0: /* ldrex */
8445 gen_load_exclusive(s, rd, 15, addr, 2);
8446 break;
8447 case 1: /* ldrexd */
8448 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8449 break;
8450 case 2: /* ldrexb */
8451 gen_load_exclusive(s, rd, 15, addr, 0);
8452 break;
8453 case 3: /* ldrexh */
8454 gen_load_exclusive(s, rd, 15, addr, 1);
8455 break;
8456 default:
8457 abort();
8459 } else {
8460 rm = insn & 0xf;
8461 switch (op1) {
8462 case 0: /* strex */
8463 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8464 break;
8465 case 1: /* strexd */
8466 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8467 break;
8468 case 2: /* strexb */
8469 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8470 break;
8471 case 3: /* strexh */
8472 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8473 break;
8474 default:
8475 abort();
8478 tcg_temp_free_i32(addr);
8479 } else {
8480 /* SWP instruction */
8481 rm = (insn) & 0xf;
8483 /* ??? This is not really atomic. However we know
8484 we never have multiple CPUs running in parallel,
8485 so it is good enough. */
8486 addr = load_reg(s, rn);
8487 tmp = load_reg(s, rm);
8488 tmp2 = tcg_temp_new_i32();
8489 if (insn & (1 << 22)) {
8490 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8491 gen_aa32_st8(tmp, addr, get_mem_index(s));
8492 } else {
8493 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8494 gen_aa32_st32(tmp, addr, get_mem_index(s));
8496 tcg_temp_free_i32(tmp);
8497 tcg_temp_free_i32(addr);
8498 store_reg(s, rd, tmp2);
8501 } else {
8502 int address_offset;
8503 bool load = insn & (1 << 20);
8504 bool doubleword = false;
8505 /* Misc load/store */
8506 rn = (insn >> 16) & 0xf;
8507 rd = (insn >> 12) & 0xf;
8509 if (!load && (sh & 2)) {
8510 /* doubleword */
8511 ARCH(5TE);
8512 if (rd & 1) {
8513 /* UNPREDICTABLE; we choose to UNDEF */
8514 goto illegal_op;
8516 load = (sh & 1) == 0;
8517 doubleword = true;
8520 addr = load_reg(s, rn);
8521 if (insn & (1 << 24))
8522 gen_add_datah_offset(s, insn, 0, addr);
8523 address_offset = 0;
8525 if (doubleword) {
8526 if (!load) {
8527 /* store */
8528 tmp = load_reg(s, rd);
8529 gen_aa32_st32(tmp, addr, get_mem_index(s));
8530 tcg_temp_free_i32(tmp);
8531 tcg_gen_addi_i32(addr, addr, 4);
8532 tmp = load_reg(s, rd + 1);
8533 gen_aa32_st32(tmp, addr, get_mem_index(s));
8534 tcg_temp_free_i32(tmp);
8535 } else {
8536 /* load */
8537 tmp = tcg_temp_new_i32();
8538 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8539 store_reg(s, rd, tmp);
8540 tcg_gen_addi_i32(addr, addr, 4);
8541 tmp = tcg_temp_new_i32();
8542 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8543 rd++;
8545 address_offset = -4;
8546 } else if (load) {
8547 /* load */
8548 tmp = tcg_temp_new_i32();
8549 switch (sh) {
8550 case 1:
8551 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8552 break;
8553 case 2:
8554 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
8555 break;
8556 default:
8557 case 3:
8558 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
8559 break;
8561 } else {
8562 /* store */
8563 tmp = load_reg(s, rd);
8564 gen_aa32_st16(tmp, addr, get_mem_index(s));
8565 tcg_temp_free_i32(tmp);
8567 /* Perform base writeback before the loaded value to
8568 ensure correct behavior with overlapping index registers.
8569 ldrd with base writeback is undefined if the
8570 destination and index registers overlap. */
8571 if (!(insn & (1 << 24))) {
8572 gen_add_datah_offset(s, insn, address_offset, addr);
8573 store_reg(s, rn, addr);
8574 } else if (insn & (1 << 21)) {
8575 if (address_offset)
8576 tcg_gen_addi_i32(addr, addr, address_offset);
8577 store_reg(s, rn, addr);
8578 } else {
8579 tcg_temp_free_i32(addr);
8581 if (load) {
8582 /* Complete the load. */
8583 store_reg(s, rd, tmp);
8586 break;
8587 case 0x4:
8588 case 0x5:
8589 goto do_ldst;
8590 case 0x6:
8591 case 0x7:
8592 if (insn & (1 << 4)) {
8593 ARCH(6);
8594 /* Armv6 Media instructions. */
8595 rm = insn & 0xf;
8596 rn = (insn >> 16) & 0xf;
8597 rd = (insn >> 12) & 0xf;
8598 rs = (insn >> 8) & 0xf;
8599 switch ((insn >> 23) & 3) {
8600 case 0: /* Parallel add/subtract. */
8601 op1 = (insn >> 20) & 7;
8602 tmp = load_reg(s, rn);
8603 tmp2 = load_reg(s, rm);
8604 sh = (insn >> 5) & 7;
8605 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8606 goto illegal_op;
8607 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
8608 tcg_temp_free_i32(tmp2);
8609 store_reg(s, rd, tmp);
8610 break;
8611 case 1:
8612 if ((insn & 0x00700020) == 0) {
8613 /* Halfword pack. */
8614 tmp = load_reg(s, rn);
8615 tmp2 = load_reg(s, rm);
8616 shift = (insn >> 7) & 0x1f;
8617 if (insn & (1 << 6)) {
8618 /* pkhtb */
8619 if (shift == 0)
8620 shift = 31;
8621 tcg_gen_sari_i32(tmp2, tmp2, shift);
8622 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8623 tcg_gen_ext16u_i32(tmp2, tmp2);
8624 } else {
8625 /* pkhbt */
8626 if (shift)
8627 tcg_gen_shli_i32(tmp2, tmp2, shift);
8628 tcg_gen_ext16u_i32(tmp, tmp);
8629 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8631 tcg_gen_or_i32(tmp, tmp, tmp2);
8632 tcg_temp_free_i32(tmp2);
8633 store_reg(s, rd, tmp);
8634 } else if ((insn & 0x00200020) == 0x00200000) {
8635 /* [us]sat */
8636 tmp = load_reg(s, rm);
8637 shift = (insn >> 7) & 0x1f;
8638 if (insn & (1 << 6)) {
8639 if (shift == 0)
8640 shift = 31;
8641 tcg_gen_sari_i32(tmp, tmp, shift);
8642 } else {
8643 tcg_gen_shli_i32(tmp, tmp, shift);
8645 sh = (insn >> 16) & 0x1f;
8646 tmp2 = tcg_const_i32(sh);
8647 if (insn & (1 << 22))
8648 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8649 else
8650 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8651 tcg_temp_free_i32(tmp2);
8652 store_reg(s, rd, tmp);
8653 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8654 /* [us]sat16 */
8655 tmp = load_reg(s, rm);
8656 sh = (insn >> 16) & 0x1f;
8657 tmp2 = tcg_const_i32(sh);
8658 if (insn & (1 << 22))
8659 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8660 else
8661 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8662 tcg_temp_free_i32(tmp2);
8663 store_reg(s, rd, tmp);
8664 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8665 /* Select bytes. */
8666 tmp = load_reg(s, rn);
8667 tmp2 = load_reg(s, rm);
8668 tmp3 = tcg_temp_new_i32();
8669 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8670 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8671 tcg_temp_free_i32(tmp3);
8672 tcg_temp_free_i32(tmp2);
8673 store_reg(s, rd, tmp);
8674 } else if ((insn & 0x000003e0) == 0x00000060) {
8675 tmp = load_reg(s, rm);
8676 shift = (insn >> 10) & 3;
8677 /* ??? In many cases it's not necessary to do a
8678 rotate, a shift is sufficient. */
8679 if (shift != 0)
8680 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8681 op1 = (insn >> 20) & 7;
8682 switch (op1) {
8683 case 0: gen_sxtb16(tmp); break;
8684 case 2: gen_sxtb(tmp); break;
8685 case 3: gen_sxth(tmp); break;
8686 case 4: gen_uxtb16(tmp); break;
8687 case 6: gen_uxtb(tmp); break;
8688 case 7: gen_uxth(tmp); break;
8689 default: goto illegal_op;
8691 if (rn != 15) {
8692 tmp2 = load_reg(s, rn);
8693 if ((op1 & 3) == 0) {
8694 gen_add16(tmp, tmp2);
8695 } else {
8696 tcg_gen_add_i32(tmp, tmp, tmp2);
8697 tcg_temp_free_i32(tmp2);
8700 store_reg(s, rd, tmp);
8701 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8702 /* rev */
8703 tmp = load_reg(s, rm);
8704 if (insn & (1 << 22)) {
8705 if (insn & (1 << 7)) {
8706 gen_revsh(tmp);
8707 } else {
8708 ARCH(6T2);
8709 gen_helper_rbit(tmp, tmp);
8711 } else {
8712 if (insn & (1 << 7))
8713 gen_rev16(tmp);
8714 else
8715 tcg_gen_bswap32_i32(tmp, tmp);
8717 store_reg(s, rd, tmp);
8718 } else {
8719 goto illegal_op;
8721 break;
8722 case 2: /* Multiplies (Type 3). */
8723 switch ((insn >> 20) & 0x7) {
8724 case 5:
8725 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8726 /* op2 not 00x or 11x : UNDEF */
8727 goto illegal_op;
8729 /* Signed multiply most significant [accumulate].
8730 (SMMUL, SMMLA, SMMLS) */
8731 tmp = load_reg(s, rm);
8732 tmp2 = load_reg(s, rs);
8733 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8735 if (rd != 15) {
8736 tmp = load_reg(s, rd);
8737 if (insn & (1 << 6)) {
8738 tmp64 = gen_subq_msw(tmp64, tmp);
8739 } else {
8740 tmp64 = gen_addq_msw(tmp64, tmp);
8743 if (insn & (1 << 5)) {
8744 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8746 tcg_gen_shri_i64(tmp64, tmp64, 32);
8747 tmp = tcg_temp_new_i32();
8748 tcg_gen_extrl_i64_i32(tmp, tmp64);
8749 tcg_temp_free_i64(tmp64);
8750 store_reg(s, rn, tmp);
8751 break;
8752 case 0:
8753 case 4:
8754 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8755 if (insn & (1 << 7)) {
8756 goto illegal_op;
8758 tmp = load_reg(s, rm);
8759 tmp2 = load_reg(s, rs);
8760 if (insn & (1 << 5))
8761 gen_swap_half(tmp2);
8762 gen_smul_dual(tmp, tmp2);
8763 if (insn & (1 << 22)) {
8764 /* smlald, smlsld */
8765 TCGv_i64 tmp64_2;
8767 tmp64 = tcg_temp_new_i64();
8768 tmp64_2 = tcg_temp_new_i64();
8769 tcg_gen_ext_i32_i64(tmp64, tmp);
8770 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
8771 tcg_temp_free_i32(tmp);
8772 tcg_temp_free_i32(tmp2);
8773 if (insn & (1 << 6)) {
8774 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8775 } else {
8776 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8778 tcg_temp_free_i64(tmp64_2);
8779 gen_addq(s, tmp64, rd, rn);
8780 gen_storeq_reg(s, rd, rn, tmp64);
8781 tcg_temp_free_i64(tmp64);
8782 } else {
8783 /* smuad, smusd, smlad, smlsd */
8784 if (insn & (1 << 6)) {
8785 /* This subtraction cannot overflow. */
8786 tcg_gen_sub_i32(tmp, tmp, tmp2);
8787 } else {
8788 /* This addition cannot overflow 32 bits;
8789 * however it may overflow considered as a
8790 * signed operation, in which case we must set
8791 * the Q flag.
8793 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8795 tcg_temp_free_i32(tmp2);
8796 if (rd != 15)
8798 tmp2 = load_reg(s, rd);
8799 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8800 tcg_temp_free_i32(tmp2);
8802 store_reg(s, rn, tmp);
8804 break;
8805 case 1:
8806 case 3:
8807 /* SDIV, UDIV */
8808 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
8809 goto illegal_op;
8811 if (((insn >> 5) & 7) || (rd != 15)) {
8812 goto illegal_op;
8814 tmp = load_reg(s, rm);
8815 tmp2 = load_reg(s, rs);
8816 if (insn & (1 << 21)) {
8817 gen_helper_udiv(tmp, tmp, tmp2);
8818 } else {
8819 gen_helper_sdiv(tmp, tmp, tmp2);
8821 tcg_temp_free_i32(tmp2);
8822 store_reg(s, rn, tmp);
8823 break;
8824 default:
8825 goto illegal_op;
8827 break;
8828 case 3:
8829 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8830 switch (op1) {
8831 case 0: /* Unsigned sum of absolute differences. */
8832 ARCH(6);
8833 tmp = load_reg(s, rm);
8834 tmp2 = load_reg(s, rs);
8835 gen_helper_usad8(tmp, tmp, tmp2);
8836 tcg_temp_free_i32(tmp2);
8837 if (rd != 15) {
8838 tmp2 = load_reg(s, rd);
8839 tcg_gen_add_i32(tmp, tmp, tmp2);
8840 tcg_temp_free_i32(tmp2);
8842 store_reg(s, rn, tmp);
8843 break;
8844 case 0x20: case 0x24: case 0x28: case 0x2c:
8845 /* Bitfield insert/clear. */
8846 ARCH(6T2);
8847 shift = (insn >> 7) & 0x1f;
8848 i = (insn >> 16) & 0x1f;
8849 if (i < shift) {
8850 /* UNPREDICTABLE; we choose to UNDEF */
8851 goto illegal_op;
8853 i = i + 1 - shift;
8854 if (rm == 15) {
8855 tmp = tcg_temp_new_i32();
8856 tcg_gen_movi_i32(tmp, 0);
8857 } else {
8858 tmp = load_reg(s, rm);
8860 if (i != 32) {
8861 tmp2 = load_reg(s, rd);
8862 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
8863 tcg_temp_free_i32(tmp2);
8865 store_reg(s, rd, tmp);
8866 break;
8867 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8868 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
8869 ARCH(6T2);
8870 tmp = load_reg(s, rm);
8871 shift = (insn >> 7) & 0x1f;
8872 i = ((insn >> 16) & 0x1f) + 1;
8873 if (shift + i > 32)
8874 goto illegal_op;
8875 if (i < 32) {
8876 if (op1 & 0x20) {
8877 gen_ubfx(tmp, shift, (1u << i) - 1);
8878 } else {
8879 gen_sbfx(tmp, shift, i);
8882 store_reg(s, rd, tmp);
8883 break;
8884 default:
8885 goto illegal_op;
8887 break;
8889 break;
8891 do_ldst:
8892 /* Check for undefined extension instructions
8893 * per the ARM Bible IE:
8894 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8896 sh = (0xf << 20) | (0xf << 4);
8897 if (op1 == 0x7 && ((insn & sh) == sh))
8899 goto illegal_op;
8901 /* load/store byte/word */
8902 rn = (insn >> 16) & 0xf;
8903 rd = (insn >> 12) & 0xf;
8904 tmp2 = load_reg(s, rn);
8905 if ((insn & 0x01200000) == 0x00200000) {
8906 /* ldrt/strt */
8907 i = get_a32_user_mem_index(s);
8908 } else {
8909 i = get_mem_index(s);
8911 if (insn & (1 << 24))
8912 gen_add_data_offset(s, insn, tmp2);
8913 if (insn & (1 << 20)) {
8914 /* load */
8915 tmp = tcg_temp_new_i32();
8916 if (insn & (1 << 22)) {
8917 gen_aa32_ld8u(tmp, tmp2, i);
8918 } else {
8919 gen_aa32_ld32u(tmp, tmp2, i);
8921 } else {
8922 /* store */
8923 tmp = load_reg(s, rd);
8924 if (insn & (1 << 22)) {
8925 gen_aa32_st8(tmp, tmp2, i);
8926 } else {
8927 gen_aa32_st32(tmp, tmp2, i);
8929 tcg_temp_free_i32(tmp);
8931 if (!(insn & (1 << 24))) {
8932 gen_add_data_offset(s, insn, tmp2);
8933 store_reg(s, rn, tmp2);
8934 } else if (insn & (1 << 21)) {
8935 store_reg(s, rn, tmp2);
8936 } else {
8937 tcg_temp_free_i32(tmp2);
8939 if (insn & (1 << 20)) {
8940 /* Complete the load. */
8941 store_reg_from_load(s, rd, tmp);
8943 break;
8944 case 0x08:
8945 case 0x09:
8947 int j, n, loaded_base;
8948 bool exc_return = false;
8949 bool is_load = extract32(insn, 20, 1);
8950 bool user = false;
8951 TCGv_i32 loaded_var;
8952 /* load/store multiple words */
8953 /* XXX: store correct base if write back */
8954 if (insn & (1 << 22)) {
8955 /* LDM (user), LDM (exception return) and STM (user) */
8956 if (IS_USER(s))
8957 goto illegal_op; /* only usable in supervisor mode */
8959 if (is_load && extract32(insn, 15, 1)) {
8960 exc_return = true;
8961 } else {
8962 user = true;
8965 rn = (insn >> 16) & 0xf;
8966 addr = load_reg(s, rn);
8968 /* compute total size */
8969 loaded_base = 0;
8970 TCGV_UNUSED_I32(loaded_var);
8971 n = 0;
8972 for(i=0;i<16;i++) {
8973 if (insn & (1 << i))
8974 n++;
8976 /* XXX: test invalid n == 0 case ? */
8977 if (insn & (1 << 23)) {
8978 if (insn & (1 << 24)) {
8979 /* pre increment */
8980 tcg_gen_addi_i32(addr, addr, 4);
8981 } else {
8982 /* post increment */
8984 } else {
8985 if (insn & (1 << 24)) {
8986 /* pre decrement */
8987 tcg_gen_addi_i32(addr, addr, -(n * 4));
8988 } else {
8989 /* post decrement */
8990 if (n != 1)
8991 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
8994 j = 0;
8995 for(i=0;i<16;i++) {
8996 if (insn & (1 << i)) {
8997 if (is_load) {
8998 /* load */
8999 tmp = tcg_temp_new_i32();
9000 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9001 if (user) {
9002 tmp2 = tcg_const_i32(i);
9003 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9004 tcg_temp_free_i32(tmp2);
9005 tcg_temp_free_i32(tmp);
9006 } else if (i == rn) {
9007 loaded_var = tmp;
9008 loaded_base = 1;
9009 } else {
9010 store_reg_from_load(s, i, tmp);
9012 } else {
9013 /* store */
9014 if (i == 15) {
9015 /* special case: r15 = PC + 8 */
9016 val = (long)s->pc + 4;
9017 tmp = tcg_temp_new_i32();
9018 tcg_gen_movi_i32(tmp, val);
9019 } else if (user) {
9020 tmp = tcg_temp_new_i32();
9021 tmp2 = tcg_const_i32(i);
9022 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9023 tcg_temp_free_i32(tmp2);
9024 } else {
9025 tmp = load_reg(s, i);
9027 gen_aa32_st32(tmp, addr, get_mem_index(s));
9028 tcg_temp_free_i32(tmp);
9030 j++;
9031 /* no need to add after the last transfer */
9032 if (j != n)
9033 tcg_gen_addi_i32(addr, addr, 4);
9036 if (insn & (1 << 21)) {
9037 /* write back */
9038 if (insn & (1 << 23)) {
9039 if (insn & (1 << 24)) {
9040 /* pre increment */
9041 } else {
9042 /* post increment */
9043 tcg_gen_addi_i32(addr, addr, 4);
9045 } else {
9046 if (insn & (1 << 24)) {
9047 /* pre decrement */
9048 if (n != 1)
9049 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9050 } else {
9051 /* post decrement */
9052 tcg_gen_addi_i32(addr, addr, -(n * 4));
9055 store_reg(s, rn, addr);
9056 } else {
9057 tcg_temp_free_i32(addr);
9059 if (loaded_base) {
9060 store_reg(s, rn, loaded_var);
9062 if (exc_return) {
9063 /* Restore CPSR from SPSR. */
9064 tmp = load_cpu_field(spsr);
9065 gen_set_cpsr(tmp, CPSR_ERET_MASK);
9066 tcg_temp_free_i32(tmp);
9067 s->is_jmp = DISAS_JUMP;
9070 break;
9071 case 0xa:
9072 case 0xb:
9074 int32_t offset;
9076 /* branch (and link) */
9077 val = (int32_t)s->pc;
9078 if (insn & (1 << 24)) {
9079 tmp = tcg_temp_new_i32();
9080 tcg_gen_movi_i32(tmp, val);
9081 store_reg(s, 14, tmp);
9083 offset = sextract32(insn << 2, 0, 26);
9084 val += offset + 4;
9085 gen_jmp(s, val);
9087 break;
9088 case 0xc:
9089 case 0xd:
9090 case 0xe:
9091 if (((insn >> 8) & 0xe) == 10) {
9092 /* VFP. */
9093 if (disas_vfp_insn(s, insn)) {
9094 goto illegal_op;
9096 } else if (disas_coproc_insn(s, insn)) {
9097 /* Coprocessor. */
9098 goto illegal_op;
9100 break;
9101 case 0xf:
9102 /* swi */
9103 gen_set_pc_im(s, s->pc);
9104 s->svc_imm = extract32(insn, 0, 24);
9105 s->is_jmp = DISAS_SWI;
9106 break;
9107 default:
9108 illegal_op:
9109 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9110 default_exception_el(s));
9111 break;
9116 /* Return true if this is a Thumb-2 logical op. */
9117 static int
9118 thumb2_logic_op(int op)
9120 return (op < 8);
9123 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9124 then set condition code flags based on the result of the operation.
9125 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9126 to the high bit of T1.
9127 Returns zero if the opcode is valid. */
9129 static int
9130 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9131 TCGv_i32 t0, TCGv_i32 t1)
9133 int logic_cc;
9135 logic_cc = 0;
9136 switch (op) {
9137 case 0: /* and */
9138 tcg_gen_and_i32(t0, t0, t1);
9139 logic_cc = conds;
9140 break;
9141 case 1: /* bic */
9142 tcg_gen_andc_i32(t0, t0, t1);
9143 logic_cc = conds;
9144 break;
9145 case 2: /* orr */
9146 tcg_gen_or_i32(t0, t0, t1);
9147 logic_cc = conds;
9148 break;
9149 case 3: /* orn */
9150 tcg_gen_orc_i32(t0, t0, t1);
9151 logic_cc = conds;
9152 break;
9153 case 4: /* eor */
9154 tcg_gen_xor_i32(t0, t0, t1);
9155 logic_cc = conds;
9156 break;
9157 case 8: /* add */
9158 if (conds)
9159 gen_add_CC(t0, t0, t1);
9160 else
9161 tcg_gen_add_i32(t0, t0, t1);
9162 break;
9163 case 10: /* adc */
9164 if (conds)
9165 gen_adc_CC(t0, t0, t1);
9166 else
9167 gen_adc(t0, t1);
9168 break;
9169 case 11: /* sbc */
9170 if (conds) {
9171 gen_sbc_CC(t0, t0, t1);
9172 } else {
9173 gen_sub_carry(t0, t0, t1);
9175 break;
9176 case 13: /* sub */
9177 if (conds)
9178 gen_sub_CC(t0, t0, t1);
9179 else
9180 tcg_gen_sub_i32(t0, t0, t1);
9181 break;
9182 case 14: /* rsb */
9183 if (conds)
9184 gen_sub_CC(t0, t1, t0);
9185 else
9186 tcg_gen_sub_i32(t0, t1, t0);
9187 break;
9188 default: /* 5, 6, 7, 9, 12, 15. */
9189 return 1;
9191 if (logic_cc) {
9192 gen_logic_CC(t0);
9193 if (shifter_out)
9194 gen_set_CF_bit31(t1);
9196 return 0;
9199 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9200 is not legal. */
9201 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9203 uint32_t insn, imm, shift, offset;
9204 uint32_t rd, rn, rm, rs;
9205 TCGv_i32 tmp;
9206 TCGv_i32 tmp2;
9207 TCGv_i32 tmp3;
9208 TCGv_i32 addr;
9209 TCGv_i64 tmp64;
9210 int op;
9211 int shiftop;
9212 int conds;
9213 int logic_cc;
9215 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9216 || arm_dc_feature(s, ARM_FEATURE_M))) {
9217 /* Thumb-1 cores may need to treat bl and blx as a pair of
9218 16-bit instructions to get correct prefetch abort behavior. */
9219 insn = insn_hw1;
9220 if ((insn & (1 << 12)) == 0) {
9221 ARCH(5);
9222 /* Second half of blx. */
9223 offset = ((insn & 0x7ff) << 1);
9224 tmp = load_reg(s, 14);
9225 tcg_gen_addi_i32(tmp, tmp, offset);
9226 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9228 tmp2 = tcg_temp_new_i32();
9229 tcg_gen_movi_i32(tmp2, s->pc | 1);
9230 store_reg(s, 14, tmp2);
9231 gen_bx(s, tmp);
9232 return 0;
9234 if (insn & (1 << 11)) {
9235 /* Second half of bl. */
9236 offset = ((insn & 0x7ff) << 1) | 1;
9237 tmp = load_reg(s, 14);
9238 tcg_gen_addi_i32(tmp, tmp, offset);
9240 tmp2 = tcg_temp_new_i32();
9241 tcg_gen_movi_i32(tmp2, s->pc | 1);
9242 store_reg(s, 14, tmp2);
9243 gen_bx(s, tmp);
9244 return 0;
9246 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9247 /* Instruction spans a page boundary. Implement it as two
9248 16-bit instructions in case the second half causes an
9249 prefetch abort. */
9250 offset = ((int32_t)insn << 21) >> 9;
9251 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9252 return 0;
9254 /* Fall through to 32-bit decode. */
9257 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9258 s->pc += 2;
9259 insn |= (uint32_t)insn_hw1 << 16;
9261 if ((insn & 0xf800e800) != 0xf000e800) {
9262 ARCH(6T2);
9265 rn = (insn >> 16) & 0xf;
9266 rs = (insn >> 12) & 0xf;
9267 rd = (insn >> 8) & 0xf;
9268 rm = insn & 0xf;
9269 switch ((insn >> 25) & 0xf) {
9270 case 0: case 1: case 2: case 3:
9271 /* 16-bit instructions. Should never happen. */
9272 abort();
9273 case 4:
9274 if (insn & (1 << 22)) {
9275 /* Other load/store, table branch. */
9276 if (insn & 0x01200000) {
9277 /* Load/store doubleword. */
9278 if (rn == 15) {
9279 addr = tcg_temp_new_i32();
9280 tcg_gen_movi_i32(addr, s->pc & ~3);
9281 } else {
9282 addr = load_reg(s, rn);
9284 offset = (insn & 0xff) * 4;
9285 if ((insn & (1 << 23)) == 0)
9286 offset = -offset;
9287 if (insn & (1 << 24)) {
9288 tcg_gen_addi_i32(addr, addr, offset);
9289 offset = 0;
9291 if (insn & (1 << 20)) {
9292 /* ldrd */
9293 tmp = tcg_temp_new_i32();
9294 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9295 store_reg(s, rs, tmp);
9296 tcg_gen_addi_i32(addr, addr, 4);
9297 tmp = tcg_temp_new_i32();
9298 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9299 store_reg(s, rd, tmp);
9300 } else {
9301 /* strd */
9302 tmp = load_reg(s, rs);
9303 gen_aa32_st32(tmp, addr, get_mem_index(s));
9304 tcg_temp_free_i32(tmp);
9305 tcg_gen_addi_i32(addr, addr, 4);
9306 tmp = load_reg(s, rd);
9307 gen_aa32_st32(tmp, addr, get_mem_index(s));
9308 tcg_temp_free_i32(tmp);
9310 if (insn & (1 << 21)) {
9311 /* Base writeback. */
9312 if (rn == 15)
9313 goto illegal_op;
9314 tcg_gen_addi_i32(addr, addr, offset - 4);
9315 store_reg(s, rn, addr);
9316 } else {
9317 tcg_temp_free_i32(addr);
9319 } else if ((insn & (1 << 23)) == 0) {
9320 /* Load/store exclusive word. */
9321 addr = tcg_temp_local_new_i32();
9322 load_reg_var(s, addr, rn);
9323 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9324 if (insn & (1 << 20)) {
9325 gen_load_exclusive(s, rs, 15, addr, 2);
9326 } else {
9327 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9329 tcg_temp_free_i32(addr);
9330 } else if ((insn & (7 << 5)) == 0) {
9331 /* Table Branch. */
9332 if (rn == 15) {
9333 addr = tcg_temp_new_i32();
9334 tcg_gen_movi_i32(addr, s->pc);
9335 } else {
9336 addr = load_reg(s, rn);
9338 tmp = load_reg(s, rm);
9339 tcg_gen_add_i32(addr, addr, tmp);
9340 if (insn & (1 << 4)) {
9341 /* tbh */
9342 tcg_gen_add_i32(addr, addr, tmp);
9343 tcg_temp_free_i32(tmp);
9344 tmp = tcg_temp_new_i32();
9345 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9346 } else { /* tbb */
9347 tcg_temp_free_i32(tmp);
9348 tmp = tcg_temp_new_i32();
9349 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9351 tcg_temp_free_i32(addr);
9352 tcg_gen_shli_i32(tmp, tmp, 1);
9353 tcg_gen_addi_i32(tmp, tmp, s->pc);
9354 store_reg(s, 15, tmp);
9355 } else {
9356 int op2 = (insn >> 6) & 0x3;
9357 op = (insn >> 4) & 0x3;
9358 switch (op2) {
9359 case 0:
9360 goto illegal_op;
9361 case 1:
9362 /* Load/store exclusive byte/halfword/doubleword */
9363 if (op == 2) {
9364 goto illegal_op;
9366 ARCH(7);
9367 break;
9368 case 2:
9369 /* Load-acquire/store-release */
9370 if (op == 3) {
9371 goto illegal_op;
9373 /* Fall through */
9374 case 3:
9375 /* Load-acquire/store-release exclusive */
9376 ARCH(8);
9377 break;
9379 addr = tcg_temp_local_new_i32();
9380 load_reg_var(s, addr, rn);
9381 if (!(op2 & 1)) {
9382 if (insn & (1 << 20)) {
9383 tmp = tcg_temp_new_i32();
9384 switch (op) {
9385 case 0: /* ldab */
9386 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9387 break;
9388 case 1: /* ldah */
9389 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9390 break;
9391 case 2: /* lda */
9392 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9393 break;
9394 default:
9395 abort();
9397 store_reg(s, rs, tmp);
9398 } else {
9399 tmp = load_reg(s, rs);
9400 switch (op) {
9401 case 0: /* stlb */
9402 gen_aa32_st8(tmp, addr, get_mem_index(s));
9403 break;
9404 case 1: /* stlh */
9405 gen_aa32_st16(tmp, addr, get_mem_index(s));
9406 break;
9407 case 2: /* stl */
9408 gen_aa32_st32(tmp, addr, get_mem_index(s));
9409 break;
9410 default:
9411 abort();
9413 tcg_temp_free_i32(tmp);
9415 } else if (insn & (1 << 20)) {
9416 gen_load_exclusive(s, rs, rd, addr, op);
9417 } else {
9418 gen_store_exclusive(s, rm, rs, rd, addr, op);
9420 tcg_temp_free_i32(addr);
9422 } else {
9423 /* Load/store multiple, RFE, SRS. */
9424 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9425 /* RFE, SRS: not available in user mode or on M profile */
9426 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9427 goto illegal_op;
9429 if (insn & (1 << 20)) {
9430 /* rfe */
9431 addr = load_reg(s, rn);
9432 if ((insn & (1 << 24)) == 0)
9433 tcg_gen_addi_i32(addr, addr, -8);
9434 /* Load PC into tmp and CPSR into tmp2. */
9435 tmp = tcg_temp_new_i32();
9436 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9437 tcg_gen_addi_i32(addr, addr, 4);
9438 tmp2 = tcg_temp_new_i32();
9439 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9440 if (insn & (1 << 21)) {
9441 /* Base writeback. */
9442 if (insn & (1 << 24)) {
9443 tcg_gen_addi_i32(addr, addr, 4);
9444 } else {
9445 tcg_gen_addi_i32(addr, addr, -4);
9447 store_reg(s, rn, addr);
9448 } else {
9449 tcg_temp_free_i32(addr);
9451 gen_rfe(s, tmp, tmp2);
9452 } else {
9453 /* srs */
9454 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9455 insn & (1 << 21));
9457 } else {
9458 int i, loaded_base = 0;
9459 TCGv_i32 loaded_var;
9460 /* Load/store multiple. */
9461 addr = load_reg(s, rn);
9462 offset = 0;
9463 for (i = 0; i < 16; i++) {
9464 if (insn & (1 << i))
9465 offset += 4;
9467 if (insn & (1 << 24)) {
9468 tcg_gen_addi_i32(addr, addr, -offset);
9471 TCGV_UNUSED_I32(loaded_var);
9472 for (i = 0; i < 16; i++) {
9473 if ((insn & (1 << i)) == 0)
9474 continue;
9475 if (insn & (1 << 20)) {
9476 /* Load. */
9477 tmp = tcg_temp_new_i32();
9478 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9479 if (i == 15) {
9480 gen_bx(s, tmp);
9481 } else if (i == rn) {
9482 loaded_var = tmp;
9483 loaded_base = 1;
9484 } else {
9485 store_reg(s, i, tmp);
9487 } else {
9488 /* Store. */
9489 tmp = load_reg(s, i);
9490 gen_aa32_st32(tmp, addr, get_mem_index(s));
9491 tcg_temp_free_i32(tmp);
9493 tcg_gen_addi_i32(addr, addr, 4);
9495 if (loaded_base) {
9496 store_reg(s, rn, loaded_var);
9498 if (insn & (1 << 21)) {
9499 /* Base register writeback. */
9500 if (insn & (1 << 24)) {
9501 tcg_gen_addi_i32(addr, addr, -offset);
9503 /* Fault if writeback register is in register list. */
9504 if (insn & (1 << rn))
9505 goto illegal_op;
9506 store_reg(s, rn, addr);
9507 } else {
9508 tcg_temp_free_i32(addr);
9512 break;
9513 case 5:
9515 op = (insn >> 21) & 0xf;
9516 if (op == 6) {
9517 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9518 goto illegal_op;
9520 /* Halfword pack. */
9521 tmp = load_reg(s, rn);
9522 tmp2 = load_reg(s, rm);
9523 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9524 if (insn & (1 << 5)) {
9525 /* pkhtb */
9526 if (shift == 0)
9527 shift = 31;
9528 tcg_gen_sari_i32(tmp2, tmp2, shift);
9529 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9530 tcg_gen_ext16u_i32(tmp2, tmp2);
9531 } else {
9532 /* pkhbt */
9533 if (shift)
9534 tcg_gen_shli_i32(tmp2, tmp2, shift);
9535 tcg_gen_ext16u_i32(tmp, tmp);
9536 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9538 tcg_gen_or_i32(tmp, tmp, tmp2);
9539 tcg_temp_free_i32(tmp2);
9540 store_reg(s, rd, tmp);
9541 } else {
9542 /* Data processing register constant shift. */
9543 if (rn == 15) {
9544 tmp = tcg_temp_new_i32();
9545 tcg_gen_movi_i32(tmp, 0);
9546 } else {
9547 tmp = load_reg(s, rn);
9549 tmp2 = load_reg(s, rm);
9551 shiftop = (insn >> 4) & 3;
9552 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9553 conds = (insn & (1 << 20)) != 0;
9554 logic_cc = (conds && thumb2_logic_op(op));
9555 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9556 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9557 goto illegal_op;
9558 tcg_temp_free_i32(tmp2);
9559 if (rd != 15) {
9560 store_reg(s, rd, tmp);
9561 } else {
9562 tcg_temp_free_i32(tmp);
9565 break;
9566 case 13: /* Misc data processing. */
9567 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9568 if (op < 4 && (insn & 0xf000) != 0xf000)
9569 goto illegal_op;
9570 switch (op) {
9571 case 0: /* Register controlled shift. */
9572 tmp = load_reg(s, rn);
9573 tmp2 = load_reg(s, rm);
9574 if ((insn & 0x70) != 0)
9575 goto illegal_op;
9576 op = (insn >> 21) & 3;
9577 logic_cc = (insn & (1 << 20)) != 0;
9578 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9579 if (logic_cc)
9580 gen_logic_CC(tmp);
9581 store_reg_bx(s, rd, tmp);
9582 break;
9583 case 1: /* Sign/zero extend. */
9584 op = (insn >> 20) & 7;
9585 switch (op) {
9586 case 0: /* SXTAH, SXTH */
9587 case 1: /* UXTAH, UXTH */
9588 case 4: /* SXTAB, SXTB */
9589 case 5: /* UXTAB, UXTB */
9590 break;
9591 case 2: /* SXTAB16, SXTB16 */
9592 case 3: /* UXTAB16, UXTB16 */
9593 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9594 goto illegal_op;
9596 break;
9597 default:
9598 goto illegal_op;
9600 if (rn != 15) {
9601 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9602 goto illegal_op;
9605 tmp = load_reg(s, rm);
9606 shift = (insn >> 4) & 3;
9607 /* ??? In many cases it's not necessary to do a
9608 rotate, a shift is sufficient. */
9609 if (shift != 0)
9610 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9611 op = (insn >> 20) & 7;
9612 switch (op) {
9613 case 0: gen_sxth(tmp); break;
9614 case 1: gen_uxth(tmp); break;
9615 case 2: gen_sxtb16(tmp); break;
9616 case 3: gen_uxtb16(tmp); break;
9617 case 4: gen_sxtb(tmp); break;
9618 case 5: gen_uxtb(tmp); break;
9619 default:
9620 g_assert_not_reached();
9622 if (rn != 15) {
9623 tmp2 = load_reg(s, rn);
9624 if ((op >> 1) == 1) {
9625 gen_add16(tmp, tmp2);
9626 } else {
9627 tcg_gen_add_i32(tmp, tmp, tmp2);
9628 tcg_temp_free_i32(tmp2);
9631 store_reg(s, rd, tmp);
9632 break;
9633 case 2: /* SIMD add/subtract. */
9634 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9635 goto illegal_op;
9637 op = (insn >> 20) & 7;
9638 shift = (insn >> 4) & 7;
9639 if ((op & 3) == 3 || (shift & 3) == 3)
9640 goto illegal_op;
9641 tmp = load_reg(s, rn);
9642 tmp2 = load_reg(s, rm);
9643 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
9644 tcg_temp_free_i32(tmp2);
9645 store_reg(s, rd, tmp);
9646 break;
9647 case 3: /* Other data processing. */
9648 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9649 if (op < 4) {
9650 /* Saturating add/subtract. */
9651 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9652 goto illegal_op;
9654 tmp = load_reg(s, rn);
9655 tmp2 = load_reg(s, rm);
9656 if (op & 1)
9657 gen_helper_double_saturate(tmp, cpu_env, tmp);
9658 if (op & 2)
9659 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9660 else
9661 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
9662 tcg_temp_free_i32(tmp2);
9663 } else {
9664 switch (op) {
9665 case 0x0a: /* rbit */
9666 case 0x08: /* rev */
9667 case 0x09: /* rev16 */
9668 case 0x0b: /* revsh */
9669 case 0x18: /* clz */
9670 break;
9671 case 0x10: /* sel */
9672 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9673 goto illegal_op;
9675 break;
9676 case 0x20: /* crc32/crc32c */
9677 case 0x21:
9678 case 0x22:
9679 case 0x28:
9680 case 0x29:
9681 case 0x2a:
9682 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
9683 goto illegal_op;
9685 break;
9686 default:
9687 goto illegal_op;
9689 tmp = load_reg(s, rn);
9690 switch (op) {
9691 case 0x0a: /* rbit */
9692 gen_helper_rbit(tmp, tmp);
9693 break;
9694 case 0x08: /* rev */
9695 tcg_gen_bswap32_i32(tmp, tmp);
9696 break;
9697 case 0x09: /* rev16 */
9698 gen_rev16(tmp);
9699 break;
9700 case 0x0b: /* revsh */
9701 gen_revsh(tmp);
9702 break;
9703 case 0x10: /* sel */
9704 tmp2 = load_reg(s, rm);
9705 tmp3 = tcg_temp_new_i32();
9706 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9707 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9708 tcg_temp_free_i32(tmp3);
9709 tcg_temp_free_i32(tmp2);
9710 break;
9711 case 0x18: /* clz */
9712 gen_helper_clz(tmp, tmp);
9713 break;
9714 case 0x20:
9715 case 0x21:
9716 case 0x22:
9717 case 0x28:
9718 case 0x29:
9719 case 0x2a:
9721 /* crc32/crc32c */
9722 uint32_t sz = op & 0x3;
9723 uint32_t c = op & 0x8;
9725 tmp2 = load_reg(s, rm);
9726 if (sz == 0) {
9727 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9728 } else if (sz == 1) {
9729 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9731 tmp3 = tcg_const_i32(1 << sz);
9732 if (c) {
9733 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9734 } else {
9735 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9737 tcg_temp_free_i32(tmp2);
9738 tcg_temp_free_i32(tmp3);
9739 break;
9741 default:
9742 g_assert_not_reached();
9745 store_reg(s, rd, tmp);
9746 break;
9747 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9748 switch ((insn >> 20) & 7) {
9749 case 0: /* 32 x 32 -> 32 */
9750 case 7: /* Unsigned sum of absolute differences. */
9751 break;
9752 case 1: /* 16 x 16 -> 32 */
9753 case 2: /* Dual multiply add. */
9754 case 3: /* 32 * 16 -> 32msb */
9755 case 4: /* Dual multiply subtract. */
9756 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9757 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9758 goto illegal_op;
9760 break;
9762 op = (insn >> 4) & 0xf;
9763 tmp = load_reg(s, rn);
9764 tmp2 = load_reg(s, rm);
9765 switch ((insn >> 20) & 7) {
9766 case 0: /* 32 x 32 -> 32 */
9767 tcg_gen_mul_i32(tmp, tmp, tmp2);
9768 tcg_temp_free_i32(tmp2);
9769 if (rs != 15) {
9770 tmp2 = load_reg(s, rs);
9771 if (op)
9772 tcg_gen_sub_i32(tmp, tmp2, tmp);
9773 else
9774 tcg_gen_add_i32(tmp, tmp, tmp2);
9775 tcg_temp_free_i32(tmp2);
9777 break;
9778 case 1: /* 16 x 16 -> 32 */
9779 gen_mulxy(tmp, tmp2, op & 2, op & 1);
9780 tcg_temp_free_i32(tmp2);
9781 if (rs != 15) {
9782 tmp2 = load_reg(s, rs);
9783 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9784 tcg_temp_free_i32(tmp2);
9786 break;
9787 case 2: /* Dual multiply add. */
9788 case 4: /* Dual multiply subtract. */
9789 if (op)
9790 gen_swap_half(tmp2);
9791 gen_smul_dual(tmp, tmp2);
9792 if (insn & (1 << 22)) {
9793 /* This subtraction cannot overflow. */
9794 tcg_gen_sub_i32(tmp, tmp, tmp2);
9795 } else {
9796 /* This addition cannot overflow 32 bits;
9797 * however it may overflow considered as a signed
9798 * operation, in which case we must set the Q flag.
9800 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9802 tcg_temp_free_i32(tmp2);
9803 if (rs != 15)
9805 tmp2 = load_reg(s, rs);
9806 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9807 tcg_temp_free_i32(tmp2);
9809 break;
9810 case 3: /* 32 * 16 -> 32msb */
9811 if (op)
9812 tcg_gen_sari_i32(tmp2, tmp2, 16);
9813 else
9814 gen_sxth(tmp2);
9815 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9816 tcg_gen_shri_i64(tmp64, tmp64, 16);
9817 tmp = tcg_temp_new_i32();
9818 tcg_gen_extrl_i64_i32(tmp, tmp64);
9819 tcg_temp_free_i64(tmp64);
9820 if (rs != 15)
9822 tmp2 = load_reg(s, rs);
9823 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9824 tcg_temp_free_i32(tmp2);
9826 break;
9827 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9828 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9829 if (rs != 15) {
9830 tmp = load_reg(s, rs);
9831 if (insn & (1 << 20)) {
9832 tmp64 = gen_addq_msw(tmp64, tmp);
9833 } else {
9834 tmp64 = gen_subq_msw(tmp64, tmp);
9837 if (insn & (1 << 4)) {
9838 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9840 tcg_gen_shri_i64(tmp64, tmp64, 32);
9841 tmp = tcg_temp_new_i32();
9842 tcg_gen_extrl_i64_i32(tmp, tmp64);
9843 tcg_temp_free_i64(tmp64);
9844 break;
9845 case 7: /* Unsigned sum of absolute differences. */
9846 gen_helper_usad8(tmp, tmp, tmp2);
9847 tcg_temp_free_i32(tmp2);
9848 if (rs != 15) {
9849 tmp2 = load_reg(s, rs);
9850 tcg_gen_add_i32(tmp, tmp, tmp2);
9851 tcg_temp_free_i32(tmp2);
9853 break;
9855 store_reg(s, rd, tmp);
9856 break;
9857 case 6: case 7: /* 64-bit multiply, Divide. */
9858 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
9859 tmp = load_reg(s, rn);
9860 tmp2 = load_reg(s, rm);
9861 if ((op & 0x50) == 0x10) {
9862 /* sdiv, udiv */
9863 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9864 goto illegal_op;
9866 if (op & 0x20)
9867 gen_helper_udiv(tmp, tmp, tmp2);
9868 else
9869 gen_helper_sdiv(tmp, tmp, tmp2);
9870 tcg_temp_free_i32(tmp2);
9871 store_reg(s, rd, tmp);
9872 } else if ((op & 0xe) == 0xc) {
9873 /* Dual multiply accumulate long. */
9874 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9875 tcg_temp_free_i32(tmp);
9876 tcg_temp_free_i32(tmp2);
9877 goto illegal_op;
9879 if (op & 1)
9880 gen_swap_half(tmp2);
9881 gen_smul_dual(tmp, tmp2);
9882 if (op & 0x10) {
9883 tcg_gen_sub_i32(tmp, tmp, tmp2);
9884 } else {
9885 tcg_gen_add_i32(tmp, tmp, tmp2);
9887 tcg_temp_free_i32(tmp2);
9888 /* BUGFIX */
9889 tmp64 = tcg_temp_new_i64();
9890 tcg_gen_ext_i32_i64(tmp64, tmp);
9891 tcg_temp_free_i32(tmp);
9892 gen_addq(s, tmp64, rs, rd);
9893 gen_storeq_reg(s, rs, rd, tmp64);
9894 tcg_temp_free_i64(tmp64);
9895 } else {
9896 if (op & 0x20) {
9897 /* Unsigned 64-bit multiply */
9898 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9899 } else {
9900 if (op & 8) {
9901 /* smlalxy */
9902 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9903 tcg_temp_free_i32(tmp2);
9904 tcg_temp_free_i32(tmp);
9905 goto illegal_op;
9907 gen_mulxy(tmp, tmp2, op & 2, op & 1);
9908 tcg_temp_free_i32(tmp2);
9909 tmp64 = tcg_temp_new_i64();
9910 tcg_gen_ext_i32_i64(tmp64, tmp);
9911 tcg_temp_free_i32(tmp);
9912 } else {
9913 /* Signed 64-bit multiply */
9914 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9917 if (op & 4) {
9918 /* umaal */
9919 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9920 tcg_temp_free_i64(tmp64);
9921 goto illegal_op;
9923 gen_addq_lo(s, tmp64, rs);
9924 gen_addq_lo(s, tmp64, rd);
9925 } else if (op & 0x40) {
9926 /* 64-bit accumulate. */
9927 gen_addq(s, tmp64, rs, rd);
9929 gen_storeq_reg(s, rs, rd, tmp64);
9930 tcg_temp_free_i64(tmp64);
9932 break;
9934 break;
9935 case 6: case 7: case 14: case 15:
9936 /* Coprocessor. */
9937 if (((insn >> 24) & 3) == 3) {
9938 /* Translate into the equivalent ARM encoding. */
9939 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9940 if (disas_neon_data_insn(s, insn)) {
9941 goto illegal_op;
9943 } else if (((insn >> 8) & 0xe) == 10) {
9944 if (disas_vfp_insn(s, insn)) {
9945 goto illegal_op;
9947 } else {
9948 if (insn & (1 << 28))
9949 goto illegal_op;
9950 if (disas_coproc_insn(s, insn)) {
9951 goto illegal_op;
9954 break;
9955 case 8: case 9: case 10: case 11:
9956 if (insn & (1 << 15)) {
9957 /* Branches, misc control. */
9958 if (insn & 0x5000) {
9959 /* Unconditional branch. */
9960 /* signextend(hw1[10:0]) -> offset[:12]. */
9961 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9962 /* hw1[10:0] -> offset[11:1]. */
9963 offset |= (insn & 0x7ff) << 1;
9964 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9965 offset[24:22] already have the same value because of the
9966 sign extension above. */
9967 offset ^= ((~insn) & (1 << 13)) << 10;
9968 offset ^= ((~insn) & (1 << 11)) << 11;
9970 if (insn & (1 << 14)) {
9971 /* Branch and link. */
9972 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
9975 offset += s->pc;
9976 if (insn & (1 << 12)) {
9977 /* b/bl */
9978 gen_jmp(s, offset);
9979 } else {
9980 /* blx */
9981 offset &= ~(uint32_t)2;
9982 /* thumb2 bx, no need to check */
9983 gen_bx_im(s, offset);
9985 } else if (((insn >> 23) & 7) == 7) {
9986 /* Misc control */
9987 if (insn & (1 << 13))
9988 goto illegal_op;
9990 if (insn & (1 << 26)) {
9991 if (!(insn & (1 << 20))) {
9992 /* Hypervisor call (v7) */
9993 int imm16 = extract32(insn, 16, 4) << 12
9994 | extract32(insn, 0, 12);
9995 ARCH(7);
9996 if (IS_USER(s)) {
9997 goto illegal_op;
9999 gen_hvc(s, imm16);
10000 } else {
10001 /* Secure monitor call (v6+) */
10002 ARCH(6K);
10003 if (IS_USER(s)) {
10004 goto illegal_op;
10006 gen_smc(s);
10008 } else {
10009 op = (insn >> 20) & 7;
10010 switch (op) {
10011 case 0: /* msr cpsr. */
10012 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10013 tmp = load_reg(s, rn);
10014 addr = tcg_const_i32(insn & 0xff);
10015 gen_helper_v7m_msr(cpu_env, addr, tmp);
10016 tcg_temp_free_i32(addr);
10017 tcg_temp_free_i32(tmp);
10018 gen_lookup_tb(s);
10019 break;
10021 /* fall through */
10022 case 1: /* msr spsr. */
10023 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10024 goto illegal_op;
10026 tmp = load_reg(s, rn);
10027 if (gen_set_psr(s,
10028 msr_mask(s, (insn >> 8) & 0xf, op == 1),
10029 op == 1, tmp))
10030 goto illegal_op;
10031 break;
10032 case 2: /* cps, nop-hint. */
10033 if (((insn >> 8) & 7) == 0) {
10034 gen_nop_hint(s, insn & 0xff);
10036 /* Implemented as NOP in user mode. */
10037 if (IS_USER(s))
10038 break;
10039 offset = 0;
10040 imm = 0;
10041 if (insn & (1 << 10)) {
10042 if (insn & (1 << 7))
10043 offset |= CPSR_A;
10044 if (insn & (1 << 6))
10045 offset |= CPSR_I;
10046 if (insn & (1 << 5))
10047 offset |= CPSR_F;
10048 if (insn & (1 << 9))
10049 imm = CPSR_A | CPSR_I | CPSR_F;
10051 if (insn & (1 << 8)) {
10052 offset |= 0x1f;
10053 imm |= (insn & 0x1f);
10055 if (offset) {
10056 gen_set_psr_im(s, offset, 0, imm);
10058 break;
10059 case 3: /* Special control operations. */
10060 ARCH(7);
10061 op = (insn >> 4) & 0xf;
10062 switch (op) {
10063 case 2: /* clrex */
10064 gen_clrex(s);
10065 break;
10066 case 4: /* dsb */
10067 case 5: /* dmb */
10068 /* These execute as NOPs. */
10069 break;
10070 case 6: /* isb */
10071 /* We need to break the TB after this insn
10072 * to execute self-modifying code correctly
10073 * and also to take any pending interrupts
10074 * immediately.
10076 gen_lookup_tb(s);
10077 break;
10078 default:
10079 goto illegal_op;
10081 break;
10082 case 4: /* bxj */
10083 /* Trivial implementation equivalent to bx. */
10084 tmp = load_reg(s, rn);
10085 gen_bx(s, tmp);
10086 break;
10087 case 5: /* Exception return. */
10088 if (IS_USER(s)) {
10089 goto illegal_op;
10091 if (rn != 14 || rd != 15) {
10092 goto illegal_op;
10094 tmp = load_reg(s, rn);
10095 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10096 gen_exception_return(s, tmp);
10097 break;
10098 case 6: /* mrs cpsr. */
10099 tmp = tcg_temp_new_i32();
10100 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10101 addr = tcg_const_i32(insn & 0xff);
10102 gen_helper_v7m_mrs(tmp, cpu_env, addr);
10103 tcg_temp_free_i32(addr);
10104 } else {
10105 gen_helper_cpsr_read(tmp, cpu_env);
10107 store_reg(s, rd, tmp);
10108 break;
10109 case 7: /* mrs spsr. */
10110 /* Not accessible in user mode. */
10111 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10112 goto illegal_op;
10114 tmp = load_cpu_field(spsr);
10115 store_reg(s, rd, tmp);
10116 break;
10119 } else {
10120 /* Conditional branch. */
10121 op = (insn >> 22) & 0xf;
10122 /* Generate a conditional jump to next instruction. */
10123 s->condlabel = gen_new_label();
10124 arm_gen_test_cc(op ^ 1, s->condlabel);
10125 s->condjmp = 1;
10127 /* offset[11:1] = insn[10:0] */
10128 offset = (insn & 0x7ff) << 1;
10129 /* offset[17:12] = insn[21:16]. */
10130 offset |= (insn & 0x003f0000) >> 4;
10131 /* offset[31:20] = insn[26]. */
10132 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10133 /* offset[18] = insn[13]. */
10134 offset |= (insn & (1 << 13)) << 5;
10135 /* offset[19] = insn[11]. */
10136 offset |= (insn & (1 << 11)) << 8;
10138 /* jump to the offset */
10139 gen_jmp(s, s->pc + offset);
10141 } else {
10142 /* Data processing immediate. */
10143 if (insn & (1 << 25)) {
10144 if (insn & (1 << 24)) {
10145 if (insn & (1 << 20))
10146 goto illegal_op;
10147 /* Bitfield/Saturate. */
10148 op = (insn >> 21) & 7;
10149 imm = insn & 0x1f;
10150 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10151 if (rn == 15) {
10152 tmp = tcg_temp_new_i32();
10153 tcg_gen_movi_i32(tmp, 0);
10154 } else {
10155 tmp = load_reg(s, rn);
10157 switch (op) {
10158 case 2: /* Signed bitfield extract. */
10159 imm++;
10160 if (shift + imm > 32)
10161 goto illegal_op;
10162 if (imm < 32)
10163 gen_sbfx(tmp, shift, imm);
10164 break;
10165 case 6: /* Unsigned bitfield extract. */
10166 imm++;
10167 if (shift + imm > 32)
10168 goto illegal_op;
10169 if (imm < 32)
10170 gen_ubfx(tmp, shift, (1u << imm) - 1);
10171 break;
10172 case 3: /* Bitfield insert/clear. */
10173 if (imm < shift)
10174 goto illegal_op;
10175 imm = imm + 1 - shift;
10176 if (imm != 32) {
10177 tmp2 = load_reg(s, rd);
10178 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
10179 tcg_temp_free_i32(tmp2);
10181 break;
10182 case 7:
10183 goto illegal_op;
10184 default: /* Saturate. */
10185 if (shift) {
10186 if (op & 1)
10187 tcg_gen_sari_i32(tmp, tmp, shift);
10188 else
10189 tcg_gen_shli_i32(tmp, tmp, shift);
10191 tmp2 = tcg_const_i32(imm);
10192 if (op & 4) {
10193 /* Unsigned. */
10194 if ((op & 1) && shift == 0) {
10195 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10196 tcg_temp_free_i32(tmp);
10197 tcg_temp_free_i32(tmp2);
10198 goto illegal_op;
10200 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10201 } else {
10202 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10204 } else {
10205 /* Signed. */
10206 if ((op & 1) && shift == 0) {
10207 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10208 tcg_temp_free_i32(tmp);
10209 tcg_temp_free_i32(tmp2);
10210 goto illegal_op;
10212 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10213 } else {
10214 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10217 tcg_temp_free_i32(tmp2);
10218 break;
10220 store_reg(s, rd, tmp);
10221 } else {
10222 imm = ((insn & 0x04000000) >> 15)
10223 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10224 if (insn & (1 << 22)) {
10225 /* 16-bit immediate. */
10226 imm |= (insn >> 4) & 0xf000;
10227 if (insn & (1 << 23)) {
10228 /* movt */
10229 tmp = load_reg(s, rd);
10230 tcg_gen_ext16u_i32(tmp, tmp);
10231 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10232 } else {
10233 /* movw */
10234 tmp = tcg_temp_new_i32();
10235 tcg_gen_movi_i32(tmp, imm);
10237 } else {
10238 /* Add/sub 12-bit immediate. */
10239 if (rn == 15) {
10240 offset = s->pc & ~(uint32_t)3;
10241 if (insn & (1 << 23))
10242 offset -= imm;
10243 else
10244 offset += imm;
10245 tmp = tcg_temp_new_i32();
10246 tcg_gen_movi_i32(tmp, offset);
10247 } else {
10248 tmp = load_reg(s, rn);
10249 if (insn & (1 << 23))
10250 tcg_gen_subi_i32(tmp, tmp, imm);
10251 else
10252 tcg_gen_addi_i32(tmp, tmp, imm);
10255 store_reg(s, rd, tmp);
10257 } else {
10258 int shifter_out = 0;
10259 /* modified 12-bit immediate. */
10260 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10261 imm = (insn & 0xff);
10262 switch (shift) {
10263 case 0: /* XY */
10264 /* Nothing to do. */
10265 break;
10266 case 1: /* 00XY00XY */
10267 imm |= imm << 16;
10268 break;
10269 case 2: /* XY00XY00 */
10270 imm |= imm << 16;
10271 imm <<= 8;
10272 break;
10273 case 3: /* XYXYXYXY */
10274 imm |= imm << 16;
10275 imm |= imm << 8;
10276 break;
10277 default: /* Rotated constant. */
10278 shift = (shift << 1) | (imm >> 7);
10279 imm |= 0x80;
10280 imm = imm << (32 - shift);
10281 shifter_out = 1;
10282 break;
10284 tmp2 = tcg_temp_new_i32();
10285 tcg_gen_movi_i32(tmp2, imm);
10286 rn = (insn >> 16) & 0xf;
10287 if (rn == 15) {
10288 tmp = tcg_temp_new_i32();
10289 tcg_gen_movi_i32(tmp, 0);
10290 } else {
10291 tmp = load_reg(s, rn);
10293 op = (insn >> 21) & 0xf;
10294 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10295 shifter_out, tmp, tmp2))
10296 goto illegal_op;
10297 tcg_temp_free_i32(tmp2);
10298 rd = (insn >> 8) & 0xf;
10299 if (rd != 15) {
10300 store_reg(s, rd, tmp);
10301 } else {
10302 tcg_temp_free_i32(tmp);
10306 break;
10307 case 12: /* Load/store single data item. */
10309 int postinc = 0;
10310 int writeback = 0;
10311 int memidx;
10312 if ((insn & 0x01100000) == 0x01000000) {
10313 if (disas_neon_ls_insn(s, insn)) {
10314 goto illegal_op;
10316 break;
10318 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10319 if (rs == 15) {
10320 if (!(insn & (1 << 20))) {
10321 goto illegal_op;
10323 if (op != 2) {
10324 /* Byte or halfword load space with dest == r15 : memory hints.
10325 * Catch them early so we don't emit pointless addressing code.
10326 * This space is a mix of:
10327 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10328 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10329 * cores)
10330 * unallocated hints, which must be treated as NOPs
10331 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10332 * which is easiest for the decoding logic
10333 * Some space which must UNDEF
10335 int op1 = (insn >> 23) & 3;
10336 int op2 = (insn >> 6) & 0x3f;
10337 if (op & 2) {
10338 goto illegal_op;
10340 if (rn == 15) {
10341 /* UNPREDICTABLE, unallocated hint or
10342 * PLD/PLDW/PLI (literal)
10344 return 0;
10346 if (op1 & 1) {
10347 return 0; /* PLD/PLDW/PLI or unallocated hint */
10349 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10350 return 0; /* PLD/PLDW/PLI or unallocated hint */
10352 /* UNDEF space, or an UNPREDICTABLE */
10353 return 1;
10356 memidx = get_mem_index(s);
10357 if (rn == 15) {
10358 addr = tcg_temp_new_i32();
10359 /* PC relative. */
10360 /* s->pc has already been incremented by 4. */
10361 imm = s->pc & 0xfffffffc;
10362 if (insn & (1 << 23))
10363 imm += insn & 0xfff;
10364 else
10365 imm -= insn & 0xfff;
10366 tcg_gen_movi_i32(addr, imm);
10367 } else {
10368 addr = load_reg(s, rn);
10369 if (insn & (1 << 23)) {
10370 /* Positive offset. */
10371 imm = insn & 0xfff;
10372 tcg_gen_addi_i32(addr, addr, imm);
10373 } else {
10374 imm = insn & 0xff;
10375 switch ((insn >> 8) & 0xf) {
10376 case 0x0: /* Shifted Register. */
10377 shift = (insn >> 4) & 0xf;
10378 if (shift > 3) {
10379 tcg_temp_free_i32(addr);
10380 goto illegal_op;
10382 tmp = load_reg(s, rm);
10383 if (shift)
10384 tcg_gen_shli_i32(tmp, tmp, shift);
10385 tcg_gen_add_i32(addr, addr, tmp);
10386 tcg_temp_free_i32(tmp);
10387 break;
10388 case 0xc: /* Negative offset. */
10389 tcg_gen_addi_i32(addr, addr, -imm);
10390 break;
10391 case 0xe: /* User privilege. */
10392 tcg_gen_addi_i32(addr, addr, imm);
10393 memidx = get_a32_user_mem_index(s);
10394 break;
10395 case 0x9: /* Post-decrement. */
10396 imm = -imm;
10397 /* Fall through. */
10398 case 0xb: /* Post-increment. */
10399 postinc = 1;
10400 writeback = 1;
10401 break;
10402 case 0xd: /* Pre-decrement. */
10403 imm = -imm;
10404 /* Fall through. */
10405 case 0xf: /* Pre-increment. */
10406 tcg_gen_addi_i32(addr, addr, imm);
10407 writeback = 1;
10408 break;
10409 default:
10410 tcg_temp_free_i32(addr);
10411 goto illegal_op;
10415 if (insn & (1 << 20)) {
10416 /* Load. */
10417 tmp = tcg_temp_new_i32();
10418 switch (op) {
10419 case 0:
10420 gen_aa32_ld8u(tmp, addr, memidx);
10421 break;
10422 case 4:
10423 gen_aa32_ld8s(tmp, addr, memidx);
10424 break;
10425 case 1:
10426 gen_aa32_ld16u(tmp, addr, memidx);
10427 break;
10428 case 5:
10429 gen_aa32_ld16s(tmp, addr, memidx);
10430 break;
10431 case 2:
10432 gen_aa32_ld32u(tmp, addr, memidx);
10433 break;
10434 default:
10435 tcg_temp_free_i32(tmp);
10436 tcg_temp_free_i32(addr);
10437 goto illegal_op;
10439 if (rs == 15) {
10440 gen_bx(s, tmp);
10441 } else {
10442 store_reg(s, rs, tmp);
10444 } else {
10445 /* Store. */
10446 tmp = load_reg(s, rs);
10447 switch (op) {
10448 case 0:
10449 gen_aa32_st8(tmp, addr, memidx);
10450 break;
10451 case 1:
10452 gen_aa32_st16(tmp, addr, memidx);
10453 break;
10454 case 2:
10455 gen_aa32_st32(tmp, addr, memidx);
10456 break;
10457 default:
10458 tcg_temp_free_i32(tmp);
10459 tcg_temp_free_i32(addr);
10460 goto illegal_op;
10462 tcg_temp_free_i32(tmp);
10464 if (postinc)
10465 tcg_gen_addi_i32(addr, addr, imm);
10466 if (writeback) {
10467 store_reg(s, rn, addr);
10468 } else {
10469 tcg_temp_free_i32(addr);
10472 break;
10473 default:
10474 goto illegal_op;
10476 return 0;
10477 illegal_op:
10478 return 1;
10481 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
10483 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10484 int32_t offset;
10485 int i;
10486 TCGv_i32 tmp;
10487 TCGv_i32 tmp2;
10488 TCGv_i32 addr;
10490 if (s->condexec_mask) {
10491 cond = s->condexec_cond;
10492 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10493 s->condlabel = gen_new_label();
10494 arm_gen_test_cc(cond ^ 1, s->condlabel);
10495 s->condjmp = 1;
10499 insn = arm_lduw_code(env, s->pc, s->bswap_code);
10500 s->pc += 2;
10502 switch (insn >> 12) {
10503 case 0: case 1:
10505 rd = insn & 7;
10506 op = (insn >> 11) & 3;
10507 if (op == 3) {
10508 /* add/subtract */
10509 rn = (insn >> 3) & 7;
10510 tmp = load_reg(s, rn);
10511 if (insn & (1 << 10)) {
10512 /* immediate */
10513 tmp2 = tcg_temp_new_i32();
10514 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
10515 } else {
10516 /* reg */
10517 rm = (insn >> 6) & 7;
10518 tmp2 = load_reg(s, rm);
10520 if (insn & (1 << 9)) {
10521 if (s->condexec_mask)
10522 tcg_gen_sub_i32(tmp, tmp, tmp2);
10523 else
10524 gen_sub_CC(tmp, tmp, tmp2);
10525 } else {
10526 if (s->condexec_mask)
10527 tcg_gen_add_i32(tmp, tmp, tmp2);
10528 else
10529 gen_add_CC(tmp, tmp, tmp2);
10531 tcg_temp_free_i32(tmp2);
10532 store_reg(s, rd, tmp);
10533 } else {
10534 /* shift immediate */
10535 rm = (insn >> 3) & 7;
10536 shift = (insn >> 6) & 0x1f;
10537 tmp = load_reg(s, rm);
10538 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10539 if (!s->condexec_mask)
10540 gen_logic_CC(tmp);
10541 store_reg(s, rd, tmp);
10543 break;
10544 case 2: case 3:
10545 /* arithmetic large immediate */
10546 op = (insn >> 11) & 3;
10547 rd = (insn >> 8) & 0x7;
10548 if (op == 0) { /* mov */
10549 tmp = tcg_temp_new_i32();
10550 tcg_gen_movi_i32(tmp, insn & 0xff);
10551 if (!s->condexec_mask)
10552 gen_logic_CC(tmp);
10553 store_reg(s, rd, tmp);
10554 } else {
10555 tmp = load_reg(s, rd);
10556 tmp2 = tcg_temp_new_i32();
10557 tcg_gen_movi_i32(tmp2, insn & 0xff);
10558 switch (op) {
10559 case 1: /* cmp */
10560 gen_sub_CC(tmp, tmp, tmp2);
10561 tcg_temp_free_i32(tmp);
10562 tcg_temp_free_i32(tmp2);
10563 break;
10564 case 2: /* add */
10565 if (s->condexec_mask)
10566 tcg_gen_add_i32(tmp, tmp, tmp2);
10567 else
10568 gen_add_CC(tmp, tmp, tmp2);
10569 tcg_temp_free_i32(tmp2);
10570 store_reg(s, rd, tmp);
10571 break;
10572 case 3: /* sub */
10573 if (s->condexec_mask)
10574 tcg_gen_sub_i32(tmp, tmp, tmp2);
10575 else
10576 gen_sub_CC(tmp, tmp, tmp2);
10577 tcg_temp_free_i32(tmp2);
10578 store_reg(s, rd, tmp);
10579 break;
10582 break;
10583 case 4:
10584 if (insn & (1 << 11)) {
10585 rd = (insn >> 8) & 7;
10586 /* load pc-relative. Bit 1 of PC is ignored. */
10587 val = s->pc + 2 + ((insn & 0xff) * 4);
10588 val &= ~(uint32_t)2;
10589 addr = tcg_temp_new_i32();
10590 tcg_gen_movi_i32(addr, val);
10591 tmp = tcg_temp_new_i32();
10592 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10593 tcg_temp_free_i32(addr);
10594 store_reg(s, rd, tmp);
10595 break;
10597 if (insn & (1 << 10)) {
10598 /* data processing extended or blx */
10599 rd = (insn & 7) | ((insn >> 4) & 8);
10600 rm = (insn >> 3) & 0xf;
10601 op = (insn >> 8) & 3;
10602 switch (op) {
10603 case 0: /* add */
10604 tmp = load_reg(s, rd);
10605 tmp2 = load_reg(s, rm);
10606 tcg_gen_add_i32(tmp, tmp, tmp2);
10607 tcg_temp_free_i32(tmp2);
10608 store_reg(s, rd, tmp);
10609 break;
10610 case 1: /* cmp */
10611 tmp = load_reg(s, rd);
10612 tmp2 = load_reg(s, rm);
10613 gen_sub_CC(tmp, tmp, tmp2);
10614 tcg_temp_free_i32(tmp2);
10615 tcg_temp_free_i32(tmp);
10616 break;
10617 case 2: /* mov/cpy */
10618 tmp = load_reg(s, rm);
10619 store_reg(s, rd, tmp);
10620 break;
10621 case 3:/* branch [and link] exchange thumb register */
10622 tmp = load_reg(s, rm);
10623 if (insn & (1 << 7)) {
10624 ARCH(5);
10625 val = (uint32_t)s->pc | 1;
10626 tmp2 = tcg_temp_new_i32();
10627 tcg_gen_movi_i32(tmp2, val);
10628 store_reg(s, 14, tmp2);
10630 /* already thumb, no need to check */
10631 gen_bx(s, tmp);
10632 break;
10634 break;
10637 /* data processing register */
10638 rd = insn & 7;
10639 rm = (insn >> 3) & 7;
10640 op = (insn >> 6) & 0xf;
10641 if (op == 2 || op == 3 || op == 4 || op == 7) {
10642 /* the shift/rotate ops want the operands backwards */
10643 val = rm;
10644 rm = rd;
10645 rd = val;
10646 val = 1;
10647 } else {
10648 val = 0;
10651 if (op == 9) { /* neg */
10652 tmp = tcg_temp_new_i32();
10653 tcg_gen_movi_i32(tmp, 0);
10654 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10655 tmp = load_reg(s, rd);
10656 } else {
10657 TCGV_UNUSED_I32(tmp);
10660 tmp2 = load_reg(s, rm);
10661 switch (op) {
10662 case 0x0: /* and */
10663 tcg_gen_and_i32(tmp, tmp, tmp2);
10664 if (!s->condexec_mask)
10665 gen_logic_CC(tmp);
10666 break;
10667 case 0x1: /* eor */
10668 tcg_gen_xor_i32(tmp, tmp, tmp2);
10669 if (!s->condexec_mask)
10670 gen_logic_CC(tmp);
10671 break;
10672 case 0x2: /* lsl */
10673 if (s->condexec_mask) {
10674 gen_shl(tmp2, tmp2, tmp);
10675 } else {
10676 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
10677 gen_logic_CC(tmp2);
10679 break;
10680 case 0x3: /* lsr */
10681 if (s->condexec_mask) {
10682 gen_shr(tmp2, tmp2, tmp);
10683 } else {
10684 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
10685 gen_logic_CC(tmp2);
10687 break;
10688 case 0x4: /* asr */
10689 if (s->condexec_mask) {
10690 gen_sar(tmp2, tmp2, tmp);
10691 } else {
10692 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
10693 gen_logic_CC(tmp2);
10695 break;
10696 case 0x5: /* adc */
10697 if (s->condexec_mask) {
10698 gen_adc(tmp, tmp2);
10699 } else {
10700 gen_adc_CC(tmp, tmp, tmp2);
10702 break;
10703 case 0x6: /* sbc */
10704 if (s->condexec_mask) {
10705 gen_sub_carry(tmp, tmp, tmp2);
10706 } else {
10707 gen_sbc_CC(tmp, tmp, tmp2);
10709 break;
10710 case 0x7: /* ror */
10711 if (s->condexec_mask) {
10712 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10713 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
10714 } else {
10715 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
10716 gen_logic_CC(tmp2);
10718 break;
10719 case 0x8: /* tst */
10720 tcg_gen_and_i32(tmp, tmp, tmp2);
10721 gen_logic_CC(tmp);
10722 rd = 16;
10723 break;
10724 case 0x9: /* neg */
10725 if (s->condexec_mask)
10726 tcg_gen_neg_i32(tmp, tmp2);
10727 else
10728 gen_sub_CC(tmp, tmp, tmp2);
10729 break;
10730 case 0xa: /* cmp */
10731 gen_sub_CC(tmp, tmp, tmp2);
10732 rd = 16;
10733 break;
10734 case 0xb: /* cmn */
10735 gen_add_CC(tmp, tmp, tmp2);
10736 rd = 16;
10737 break;
10738 case 0xc: /* orr */
10739 tcg_gen_or_i32(tmp, tmp, tmp2);
10740 if (!s->condexec_mask)
10741 gen_logic_CC(tmp);
10742 break;
10743 case 0xd: /* mul */
10744 tcg_gen_mul_i32(tmp, tmp, tmp2);
10745 if (!s->condexec_mask)
10746 gen_logic_CC(tmp);
10747 break;
10748 case 0xe: /* bic */
10749 tcg_gen_andc_i32(tmp, tmp, tmp2);
10750 if (!s->condexec_mask)
10751 gen_logic_CC(tmp);
10752 break;
10753 case 0xf: /* mvn */
10754 tcg_gen_not_i32(tmp2, tmp2);
10755 if (!s->condexec_mask)
10756 gen_logic_CC(tmp2);
10757 val = 1;
10758 rm = rd;
10759 break;
10761 if (rd != 16) {
10762 if (val) {
10763 store_reg(s, rm, tmp2);
10764 if (op != 0xf)
10765 tcg_temp_free_i32(tmp);
10766 } else {
10767 store_reg(s, rd, tmp);
10768 tcg_temp_free_i32(tmp2);
10770 } else {
10771 tcg_temp_free_i32(tmp);
10772 tcg_temp_free_i32(tmp2);
10774 break;
10776 case 5:
10777 /* load/store register offset. */
10778 rd = insn & 7;
10779 rn = (insn >> 3) & 7;
10780 rm = (insn >> 6) & 7;
10781 op = (insn >> 9) & 7;
10782 addr = load_reg(s, rn);
10783 tmp = load_reg(s, rm);
10784 tcg_gen_add_i32(addr, addr, tmp);
10785 tcg_temp_free_i32(tmp);
10787 if (op < 3) { /* store */
10788 tmp = load_reg(s, rd);
10789 } else {
10790 tmp = tcg_temp_new_i32();
10793 switch (op) {
10794 case 0: /* str */
10795 gen_aa32_st32(tmp, addr, get_mem_index(s));
10796 break;
10797 case 1: /* strh */
10798 gen_aa32_st16(tmp, addr, get_mem_index(s));
10799 break;
10800 case 2: /* strb */
10801 gen_aa32_st8(tmp, addr, get_mem_index(s));
10802 break;
10803 case 3: /* ldrsb */
10804 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
10805 break;
10806 case 4: /* ldr */
10807 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10808 break;
10809 case 5: /* ldrh */
10810 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
10811 break;
10812 case 6: /* ldrb */
10813 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
10814 break;
10815 case 7: /* ldrsh */
10816 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
10817 break;
10819 if (op >= 3) { /* load */
10820 store_reg(s, rd, tmp);
10821 } else {
10822 tcg_temp_free_i32(tmp);
10824 tcg_temp_free_i32(addr);
10825 break;
10827 case 6:
10828 /* load/store word immediate offset */
10829 rd = insn & 7;
10830 rn = (insn >> 3) & 7;
10831 addr = load_reg(s, rn);
10832 val = (insn >> 4) & 0x7c;
10833 tcg_gen_addi_i32(addr, addr, val);
10835 if (insn & (1 << 11)) {
10836 /* load */
10837 tmp = tcg_temp_new_i32();
10838 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10839 store_reg(s, rd, tmp);
10840 } else {
10841 /* store */
10842 tmp = load_reg(s, rd);
10843 gen_aa32_st32(tmp, addr, get_mem_index(s));
10844 tcg_temp_free_i32(tmp);
10846 tcg_temp_free_i32(addr);
10847 break;
10849 case 7:
10850 /* load/store byte immediate offset */
10851 rd = insn & 7;
10852 rn = (insn >> 3) & 7;
10853 addr = load_reg(s, rn);
10854 val = (insn >> 6) & 0x1f;
10855 tcg_gen_addi_i32(addr, addr, val);
10857 if (insn & (1 << 11)) {
10858 /* load */
10859 tmp = tcg_temp_new_i32();
10860 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
10861 store_reg(s, rd, tmp);
10862 } else {
10863 /* store */
10864 tmp = load_reg(s, rd);
10865 gen_aa32_st8(tmp, addr, get_mem_index(s));
10866 tcg_temp_free_i32(tmp);
10868 tcg_temp_free_i32(addr);
10869 break;
10871 case 8:
10872 /* load/store halfword immediate offset */
10873 rd = insn & 7;
10874 rn = (insn >> 3) & 7;
10875 addr = load_reg(s, rn);
10876 val = (insn >> 5) & 0x3e;
10877 tcg_gen_addi_i32(addr, addr, val);
10879 if (insn & (1 << 11)) {
10880 /* load */
10881 tmp = tcg_temp_new_i32();
10882 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
10883 store_reg(s, rd, tmp);
10884 } else {
10885 /* store */
10886 tmp = load_reg(s, rd);
10887 gen_aa32_st16(tmp, addr, get_mem_index(s));
10888 tcg_temp_free_i32(tmp);
10890 tcg_temp_free_i32(addr);
10891 break;
10893 case 9:
10894 /* load/store from stack */
10895 rd = (insn >> 8) & 7;
10896 addr = load_reg(s, 13);
10897 val = (insn & 0xff) * 4;
10898 tcg_gen_addi_i32(addr, addr, val);
10900 if (insn & (1 << 11)) {
10901 /* load */
10902 tmp = tcg_temp_new_i32();
10903 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10904 store_reg(s, rd, tmp);
10905 } else {
10906 /* store */
10907 tmp = load_reg(s, rd);
10908 gen_aa32_st32(tmp, addr, get_mem_index(s));
10909 tcg_temp_free_i32(tmp);
10911 tcg_temp_free_i32(addr);
10912 break;
10914 case 10:
10915 /* add to high reg */
10916 rd = (insn >> 8) & 7;
10917 if (insn & (1 << 11)) {
10918 /* SP */
10919 tmp = load_reg(s, 13);
10920 } else {
10921 /* PC. bit 1 is ignored. */
10922 tmp = tcg_temp_new_i32();
10923 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
10925 val = (insn & 0xff) * 4;
10926 tcg_gen_addi_i32(tmp, tmp, val);
10927 store_reg(s, rd, tmp);
10928 break;
10930 case 11:
10931 /* misc */
10932 op = (insn >> 8) & 0xf;
10933 switch (op) {
10934 case 0:
10935 /* adjust stack pointer */
10936 tmp = load_reg(s, 13);
10937 val = (insn & 0x7f) * 4;
10938 if (insn & (1 << 7))
10939 val = -(int32_t)val;
10940 tcg_gen_addi_i32(tmp, tmp, val);
10941 store_reg(s, 13, tmp);
10942 break;
10944 case 2: /* sign/zero extend. */
10945 ARCH(6);
10946 rd = insn & 7;
10947 rm = (insn >> 3) & 7;
10948 tmp = load_reg(s, rm);
10949 switch ((insn >> 6) & 3) {
10950 case 0: gen_sxth(tmp); break;
10951 case 1: gen_sxtb(tmp); break;
10952 case 2: gen_uxth(tmp); break;
10953 case 3: gen_uxtb(tmp); break;
10955 store_reg(s, rd, tmp);
10956 break;
10957 case 4: case 5: case 0xc: case 0xd:
10958 /* push/pop */
10959 addr = load_reg(s, 13);
10960 if (insn & (1 << 8))
10961 offset = 4;
10962 else
10963 offset = 0;
10964 for (i = 0; i < 8; i++) {
10965 if (insn & (1 << i))
10966 offset += 4;
10968 if ((insn & (1 << 11)) == 0) {
10969 tcg_gen_addi_i32(addr, addr, -offset);
10971 for (i = 0; i < 8; i++) {
10972 if (insn & (1 << i)) {
10973 if (insn & (1 << 11)) {
10974 /* pop */
10975 tmp = tcg_temp_new_i32();
10976 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10977 store_reg(s, i, tmp);
10978 } else {
10979 /* push */
10980 tmp = load_reg(s, i);
10981 gen_aa32_st32(tmp, addr, get_mem_index(s));
10982 tcg_temp_free_i32(tmp);
10984 /* advance to the next address. */
10985 tcg_gen_addi_i32(addr, addr, 4);
10988 TCGV_UNUSED_I32(tmp);
10989 if (insn & (1 << 8)) {
10990 if (insn & (1 << 11)) {
10991 /* pop pc */
10992 tmp = tcg_temp_new_i32();
10993 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10994 /* don't set the pc until the rest of the instruction
10995 has completed */
10996 } else {
10997 /* push lr */
10998 tmp = load_reg(s, 14);
10999 gen_aa32_st32(tmp, addr, get_mem_index(s));
11000 tcg_temp_free_i32(tmp);
11002 tcg_gen_addi_i32(addr, addr, 4);
11004 if ((insn & (1 << 11)) == 0) {
11005 tcg_gen_addi_i32(addr, addr, -offset);
11007 /* write back the new stack pointer */
11008 store_reg(s, 13, addr);
11009 /* set the new PC value */
11010 if ((insn & 0x0900) == 0x0900) {
11011 store_reg_from_load(s, 15, tmp);
11013 break;
11015 case 1: case 3: case 9: case 11: /* czb */
11016 rm = insn & 7;
11017 tmp = load_reg(s, rm);
11018 s->condlabel = gen_new_label();
11019 s->condjmp = 1;
11020 if (insn & (1 << 11))
11021 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
11022 else
11023 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
11024 tcg_temp_free_i32(tmp);
11025 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11026 val = (uint32_t)s->pc + 2;
11027 val += offset;
11028 gen_jmp(s, val);
11029 break;
11031 case 15: /* IT, nop-hint. */
11032 if ((insn & 0xf) == 0) {
11033 gen_nop_hint(s, (insn >> 4) & 0xf);
11034 break;
11036 /* If Then. */
11037 s->condexec_cond = (insn >> 4) & 0xe;
11038 s->condexec_mask = insn & 0x1f;
11039 /* No actual code generated for this insn, just setup state. */
11040 break;
11042 case 0xe: /* bkpt */
11044 int imm8 = extract32(insn, 0, 8);
11045 ARCH(5);
11046 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11047 default_exception_el(s));
11048 break;
11051 case 0xa: /* rev */
11052 ARCH(6);
11053 rn = (insn >> 3) & 0x7;
11054 rd = insn & 0x7;
11055 tmp = load_reg(s, rn);
11056 switch ((insn >> 6) & 3) {
11057 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
11058 case 1: gen_rev16(tmp); break;
11059 case 3: gen_revsh(tmp); break;
11060 default: goto illegal_op;
11062 store_reg(s, rd, tmp);
11063 break;
11065 case 6:
11066 switch ((insn >> 5) & 7) {
11067 case 2:
11068 /* setend */
11069 ARCH(6);
11070 if (((insn >> 3) & 1) != s->bswap_code) {
11071 /* Dynamic endianness switching not implemented. */
11072 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
11073 goto illegal_op;
11075 break;
11076 case 3:
11077 /* cps */
11078 ARCH(6);
11079 if (IS_USER(s)) {
11080 break;
11082 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11083 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11084 /* FAULTMASK */
11085 if (insn & 1) {
11086 addr = tcg_const_i32(19);
11087 gen_helper_v7m_msr(cpu_env, addr, tmp);
11088 tcg_temp_free_i32(addr);
11090 /* PRIMASK */
11091 if (insn & 2) {
11092 addr = tcg_const_i32(16);
11093 gen_helper_v7m_msr(cpu_env, addr, tmp);
11094 tcg_temp_free_i32(addr);
11096 tcg_temp_free_i32(tmp);
11097 gen_lookup_tb(s);
11098 } else {
11099 if (insn & (1 << 4)) {
11100 shift = CPSR_A | CPSR_I | CPSR_F;
11101 } else {
11102 shift = 0;
11104 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
11106 break;
11107 default:
11108 goto undef;
11110 break;
11112 default:
11113 goto undef;
11115 break;
11117 case 12:
11119 /* load/store multiple */
11120 TCGv_i32 loaded_var;
11121 TCGV_UNUSED_I32(loaded_var);
11122 rn = (insn >> 8) & 0x7;
11123 addr = load_reg(s, rn);
11124 for (i = 0; i < 8; i++) {
11125 if (insn & (1 << i)) {
11126 if (insn & (1 << 11)) {
11127 /* load */
11128 tmp = tcg_temp_new_i32();
11129 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
11130 if (i == rn) {
11131 loaded_var = tmp;
11132 } else {
11133 store_reg(s, i, tmp);
11135 } else {
11136 /* store */
11137 tmp = load_reg(s, i);
11138 gen_aa32_st32(tmp, addr, get_mem_index(s));
11139 tcg_temp_free_i32(tmp);
11141 /* advance to the next address */
11142 tcg_gen_addi_i32(addr, addr, 4);
11145 if ((insn & (1 << rn)) == 0) {
11146 /* base reg not in list: base register writeback */
11147 store_reg(s, rn, addr);
11148 } else {
11149 /* base reg in list: if load, complete it now */
11150 if (insn & (1 << 11)) {
11151 store_reg(s, rn, loaded_var);
11153 tcg_temp_free_i32(addr);
11155 break;
11157 case 13:
11158 /* conditional branch or swi */
11159 cond = (insn >> 8) & 0xf;
11160 if (cond == 0xe)
11161 goto undef;
11163 if (cond == 0xf) {
11164 /* swi */
11165 gen_set_pc_im(s, s->pc);
11166 s->svc_imm = extract32(insn, 0, 8);
11167 s->is_jmp = DISAS_SWI;
11168 break;
11170 /* generate a conditional jump to next instruction */
11171 s->condlabel = gen_new_label();
11172 arm_gen_test_cc(cond ^ 1, s->condlabel);
11173 s->condjmp = 1;
11175 /* jump to the offset */
11176 val = (uint32_t)s->pc + 2;
11177 offset = ((int32_t)insn << 24) >> 24;
11178 val += offset << 1;
11179 gen_jmp(s, val);
11180 break;
11182 case 14:
11183 if (insn & (1 << 11)) {
11184 if (disas_thumb2_insn(env, s, insn))
11185 goto undef32;
11186 break;
11188 /* unconditional branch */
11189 val = (uint32_t)s->pc;
11190 offset = ((int32_t)insn << 21) >> 21;
11191 val += (offset << 1) + 2;
11192 gen_jmp(s, val);
11193 break;
11195 case 15:
11196 if (disas_thumb2_insn(env, s, insn))
11197 goto undef32;
11198 break;
11200 return;
11201 undef32:
11202 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11203 default_exception_el(s));
11204 return;
11205 illegal_op:
11206 undef:
11207 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11208 default_exception_el(s));
11211 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11213 /* Return true if the insn at dc->pc might cross a page boundary.
11214 * (False positives are OK, false negatives are not.)
11216 uint16_t insn;
11218 if ((s->pc & 3) == 0) {
11219 /* At a 4-aligned address we can't be crossing a page */
11220 return false;
11223 /* This must be a Thumb insn */
11224 insn = arm_lduw_code(env, s->pc, s->bswap_code);
11226 if ((insn >> 11) >= 0x1d) {
11227 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11228 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11229 * end up actually treating this as two 16-bit insns (see the
11230 * code at the start of disas_thumb2_insn()) but we don't bother
11231 * to check for that as it is unlikely, and false positives here
11232 * are harmless.
11234 return true;
11236 /* Definitely a 16-bit insn, can't be crossing a page. */
11237 return false;
11240 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
11241 basic block 'tb'. */
11242 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
11244 ARMCPU *cpu = arm_env_get_cpu(env);
11245 CPUState *cs = CPU(cpu);
11246 DisasContext dc1, *dc = &dc1;
11247 target_ulong pc_start;
11248 target_ulong next_page_start;
11249 int num_insns;
11250 int max_insns;
11251 bool end_of_page;
11253 /* generate intermediate code */
11255 /* The A64 decoder has its own top level loop, because it doesn't need
11256 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11258 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
11259 gen_intermediate_code_a64(cpu, tb);
11260 return;
11263 pc_start = tb->pc;
11265 dc->tb = tb;
11267 dc->is_jmp = DISAS_NEXT;
11268 dc->pc = pc_start;
11269 dc->singlestep_enabled = cs->singlestep_enabled;
11270 dc->condjmp = 0;
11272 dc->aarch64 = 0;
11273 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11274 * there is no secure EL1, so we route exceptions to EL3.
11276 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11277 !arm_el_is_aa64(env, 3);
11278 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
11279 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
11280 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11281 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
11282 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11283 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11284 #if !defined(CONFIG_USER_ONLY)
11285 dc->user = (dc->current_el == 0);
11286 #endif
11287 dc->ns = ARM_TBFLAG_NS(tb->flags);
11288 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
11289 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11290 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11291 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
11292 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
11293 dc->cp_regs = cpu->cp_regs;
11294 dc->features = env->features;
11296 /* Single step state. The code-generation logic here is:
11297 * SS_ACTIVE == 0:
11298 * generate code with no special handling for single-stepping (except
11299 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11300 * this happens anyway because those changes are all system register or
11301 * PSTATE writes).
11302 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11303 * emit code for one insn
11304 * emit code to clear PSTATE.SS
11305 * emit code to generate software step exception for completed step
11306 * end TB (as usual for having generated an exception)
11307 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11308 * emit code to generate a software step exception
11309 * end the TB
11311 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11312 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11313 dc->is_ldex = false;
11314 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11316 cpu_F0s = tcg_temp_new_i32();
11317 cpu_F1s = tcg_temp_new_i32();
11318 cpu_F0d = tcg_temp_new_i64();
11319 cpu_F1d = tcg_temp_new_i64();
11320 cpu_V0 = cpu_F0d;
11321 cpu_V1 = cpu_F1d;
11322 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11323 cpu_M0 = tcg_temp_new_i64();
11324 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
11325 num_insns = 0;
11326 max_insns = tb->cflags & CF_COUNT_MASK;
11327 if (max_insns == 0) {
11328 max_insns = CF_COUNT_MASK;
11330 if (max_insns > TCG_MAX_INSNS) {
11331 max_insns = TCG_MAX_INSNS;
11334 gen_tb_start(tb);
11336 tcg_clear_temp_count();
11338 /* A note on handling of the condexec (IT) bits:
11340 * We want to avoid the overhead of having to write the updated condexec
11341 * bits back to the CPUARMState for every instruction in an IT block. So:
11342 * (1) if the condexec bits are not already zero then we write
11343 * zero back into the CPUARMState now. This avoids complications trying
11344 * to do it at the end of the block. (For example if we don't do this
11345 * it's hard to identify whether we can safely skip writing condexec
11346 * at the end of the TB, which we definitely want to do for the case
11347 * where a TB doesn't do anything with the IT state at all.)
11348 * (2) if we are going to leave the TB then we call gen_set_condexec()
11349 * which will write the correct value into CPUARMState if zero is wrong.
11350 * This is done both for leaving the TB at the end, and for leaving
11351 * it because of an exception we know will happen, which is done in
11352 * gen_exception_insn(). The latter is necessary because we need to
11353 * leave the TB with the PC/IT state just prior to execution of the
11354 * instruction which caused the exception.
11355 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11356 * then the CPUARMState will be wrong and we need to reset it.
11357 * This is handled in the same way as restoration of the
11358 * PC in these situations; we save the value of the condexec bits
11359 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11360 * then uses this to restore them after an exception.
11362 * Note that there are no instructions which can read the condexec
11363 * bits, and none which can write non-static values to them, so
11364 * we don't need to care about whether CPUARMState is correct in the
11365 * middle of a TB.
11368 /* Reset the conditional execution bits immediately. This avoids
11369 complications trying to do it at the end of the block. */
11370 if (dc->condexec_mask || dc->condexec_cond)
11372 TCGv_i32 tmp = tcg_temp_new_i32();
11373 tcg_gen_movi_i32(tmp, 0);
11374 store_cpu_field(tmp, condexec_bits);
11376 do {
11377 tcg_gen_insn_start(dc->pc,
11378 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
11379 num_insns++;
11381 #ifdef CONFIG_USER_ONLY
11382 /* Intercept jump to the magic kernel page. */
11383 if (dc->pc >= 0xffff0000) {
11384 /* We always get here via a jump, so know we are not in a
11385 conditional execution block. */
11386 gen_exception_internal(EXCP_KERNEL_TRAP);
11387 dc->is_jmp = DISAS_EXC;
11388 break;
11390 #else
11391 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
11392 /* We always get here via a jump, so know we are not in a
11393 conditional execution block. */
11394 gen_exception_internal(EXCP_EXCEPTION_EXIT);
11395 dc->is_jmp = DISAS_EXC;
11396 break;
11398 #endif
11400 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11401 CPUBreakpoint *bp;
11402 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
11403 if (bp->pc == dc->pc) {
11404 if (bp->flags & BP_CPU) {
11405 gen_set_pc_im(dc, dc->pc);
11406 gen_helper_check_breakpoints(cpu_env);
11407 /* End the TB early; it's likely not going to be executed */
11408 dc->is_jmp = DISAS_UPDATE;
11409 } else {
11410 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11411 /* The address covered by the breakpoint must be
11412 included in [tb->pc, tb->pc + tb->size) in order
11413 to for it to be properly cleared -- thus we
11414 increment the PC here so that the logic setting
11415 tb->size below does the right thing. */
11416 /* TODO: Advance PC by correct instruction length to
11417 * avoid disassembler error messages */
11418 dc->pc += 2;
11419 goto done_generating;
11421 break;
11426 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
11427 gen_io_start();
11430 if (dc->ss_active && !dc->pstate_ss) {
11431 /* Singlestep state is Active-pending.
11432 * If we're in this state at the start of a TB then either
11433 * a) we just took an exception to an EL which is being debugged
11434 * and this is the first insn in the exception handler
11435 * b) debug exceptions were masked and we just unmasked them
11436 * without changing EL (eg by clearing PSTATE.D)
11437 * In either case we're going to take a swstep exception in the
11438 * "did not step an insn" case, and so the syndrome ISV and EX
11439 * bits should be zero.
11441 assert(num_insns == 1);
11442 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11443 default_exception_el(dc));
11444 goto done_generating;
11447 if (dc->thumb) {
11448 disas_thumb_insn(env, dc);
11449 if (dc->condexec_mask) {
11450 dc->condexec_cond = (dc->condexec_cond & 0xe)
11451 | ((dc->condexec_mask >> 4) & 1);
11452 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11453 if (dc->condexec_mask == 0) {
11454 dc->condexec_cond = 0;
11457 } else {
11458 unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
11459 dc->pc += 4;
11460 disas_arm_insn(dc, insn);
11463 if (dc->condjmp && !dc->is_jmp) {
11464 gen_set_label(dc->condlabel);
11465 dc->condjmp = 0;
11468 if (tcg_check_temp_count()) {
11469 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11470 dc->pc);
11473 /* Translation stops when a conditional branch is encountered.
11474 * Otherwise the subsequent code could get translated several times.
11475 * Also stop translation when a page boundary is reached. This
11476 * ensures prefetch aborts occur at the right place. */
11478 /* We want to stop the TB if the next insn starts in a new page,
11479 * or if it spans between this page and the next. This means that
11480 * if we're looking at the last halfword in the page we need to
11481 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11482 * or a 32-bit Thumb insn (which won't).
11483 * This is to avoid generating a silly TB with a single 16-bit insn
11484 * in it at the end of this page (which would execute correctly
11485 * but isn't very efficient).
11487 end_of_page = (dc->pc >= next_page_start) ||
11488 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11490 } while (!dc->is_jmp && !tcg_op_buf_full() &&
11491 !cs->singlestep_enabled &&
11492 !singlestep &&
11493 !dc->ss_active &&
11494 !end_of_page &&
11495 num_insns < max_insns);
11497 if (tb->cflags & CF_LAST_IO) {
11498 if (dc->condjmp) {
11499 /* FIXME: This can theoretically happen with self-modifying
11500 code. */
11501 cpu_abort(cs, "IO on conditional branch instruction");
11503 gen_io_end();
11506 /* At this stage dc->condjmp will only be set when the skipped
11507 instruction was a conditional branch or trap, and the PC has
11508 already been written. */
11509 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
11510 /* Make sure the pc is updated, and raise a debug exception. */
11511 if (dc->condjmp) {
11512 gen_set_condexec(dc);
11513 if (dc->is_jmp == DISAS_SWI) {
11514 gen_ss_advance(dc);
11515 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11516 default_exception_el(dc));
11517 } else if (dc->is_jmp == DISAS_HVC) {
11518 gen_ss_advance(dc);
11519 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11520 } else if (dc->is_jmp == DISAS_SMC) {
11521 gen_ss_advance(dc);
11522 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11523 } else if (dc->ss_active) {
11524 gen_step_complete_exception(dc);
11525 } else {
11526 gen_exception_internal(EXCP_DEBUG);
11528 gen_set_label(dc->condlabel);
11530 if (dc->condjmp || dc->is_jmp == DISAS_NEXT ||
11531 dc->is_jmp == DISAS_UPDATE) {
11532 gen_set_pc_im(dc, dc->pc);
11533 dc->condjmp = 0;
11535 gen_set_condexec(dc);
11536 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
11537 gen_ss_advance(dc);
11538 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11539 default_exception_el(dc));
11540 } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
11541 gen_ss_advance(dc);
11542 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11543 } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
11544 gen_ss_advance(dc);
11545 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11546 } else if (dc->ss_active) {
11547 gen_step_complete_exception(dc);
11548 } else {
11549 /* FIXME: Single stepping a WFI insn will not halt
11550 the CPU. */
11551 gen_exception_internal(EXCP_DEBUG);
11553 } else {
11554 /* While branches must always occur at the end of an IT block,
11555 there are a few other things that can cause us to terminate
11556 the TB in the middle of an IT block:
11557 - Exception generating instructions (bkpt, swi, undefined).
11558 - Page boundaries.
11559 - Hardware watchpoints.
11560 Hardware breakpoints have already been handled and skip this code.
11562 gen_set_condexec(dc);
11563 switch(dc->is_jmp) {
11564 case DISAS_NEXT:
11565 gen_goto_tb(dc, 1, dc->pc);
11566 break;
11567 case DISAS_UPDATE:
11568 gen_set_pc_im(dc, dc->pc);
11569 /* fall through */
11570 case DISAS_JUMP:
11571 default:
11572 /* indicate that the hash table must be used to find the next TB */
11573 tcg_gen_exit_tb(0);
11574 break;
11575 case DISAS_TB_JUMP:
11576 /* nothing more to generate */
11577 break;
11578 case DISAS_WFI:
11579 gen_helper_wfi(cpu_env);
11580 /* The helper doesn't necessarily throw an exception, but we
11581 * must go back to the main loop to check for interrupts anyway.
11583 tcg_gen_exit_tb(0);
11584 break;
11585 case DISAS_WFE:
11586 gen_helper_wfe(cpu_env);
11587 break;
11588 case DISAS_YIELD:
11589 gen_helper_yield(cpu_env);
11590 break;
11591 case DISAS_SWI:
11592 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11593 default_exception_el(dc));
11594 break;
11595 case DISAS_HVC:
11596 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11597 break;
11598 case DISAS_SMC:
11599 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11600 break;
11602 if (dc->condjmp) {
11603 gen_set_label(dc->condlabel);
11604 gen_set_condexec(dc);
11605 gen_goto_tb(dc, 1, dc->pc);
11606 dc->condjmp = 0;
11610 done_generating:
11611 gen_tb_end(tb, num_insns);
11613 #ifdef DEBUG_DISAS
11614 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
11615 qemu_log("----------------\n");
11616 qemu_log("IN: %s\n", lookup_symbol(pc_start));
11617 log_target_disas(cs, pc_start, dc->pc - pc_start,
11618 dc->thumb | (dc->bswap_code << 1));
11619 qemu_log("\n");
11621 #endif
11622 tb->size = dc->pc - pc_start;
11623 tb->icount = num_insns;
11626 static const char *cpu_mode_names[16] = {
11627 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11628 "???", "???", "hyp", "und", "???", "???", "???", "sys"
11631 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11632 int flags)
11634 ARMCPU *cpu = ARM_CPU(cs);
11635 CPUARMState *env = &cpu->env;
11636 int i;
11637 uint32_t psr;
11638 const char *ns_status;
11640 if (is_a64(env)) {
11641 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11642 return;
11645 for(i=0;i<16;i++) {
11646 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
11647 if ((i % 4) == 3)
11648 cpu_fprintf(f, "\n");
11649 else
11650 cpu_fprintf(f, " ");
11652 psr = cpsr_read(env);
11654 if (arm_feature(env, ARM_FEATURE_EL3) &&
11655 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
11656 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
11657 } else {
11658 ns_status = "";
11661 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
11662 psr,
11663 psr & (1 << 31) ? 'N' : '-',
11664 psr & (1 << 30) ? 'Z' : '-',
11665 psr & (1 << 29) ? 'C' : '-',
11666 psr & (1 << 28) ? 'V' : '-',
11667 psr & CPSR_T ? 'T' : 'A',
11668 ns_status,
11669 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
11671 if (flags & CPU_DUMP_FPU) {
11672 int numvfpregs = 0;
11673 if (arm_feature(env, ARM_FEATURE_VFP)) {
11674 numvfpregs += 16;
11676 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11677 numvfpregs += 16;
11679 for (i = 0; i < numvfpregs; i++) {
11680 uint64_t v = float64_val(env->vfp.regs[i]);
11681 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11682 i * 2, (uint32_t)v,
11683 i * 2 + 1, (uint32_t)(v >> 32),
11684 i, v);
11686 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
11690 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
11691 target_ulong *data)
11693 if (is_a64(env)) {
11694 env->pc = data[0];
11695 env->condexec_bits = 0;
11696 } else {
11697 env->regs[15] = data[0];
11698 env->condexec_bits = data[1];