tcg: Add INDEX_op_trunc_shr_i32
[qemu.git] / target-arm / op_helper.c
blob57e7d9c48052835ef8662cbe7562085cd1f40931
1 /*
2 * ARM helper routines
4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "cpu.h"
20 #include "helper.h"
21 #include "internals.h"
23 #define SIGNBIT (uint32_t)0x80000000
24 #define SIGNBIT64 ((uint64_t)1 << 63)
26 static void raise_exception(CPUARMState *env, int tt)
28 ARMCPU *cpu = arm_env_get_cpu(env);
29 CPUState *cs = CPU(cpu);
31 cs->exception_index = tt;
32 cpu_loop_exit(cs);
35 uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
36 uint32_t rn, uint32_t maxindex)
38 uint32_t val;
39 uint32_t tmp;
40 int index;
41 int shift;
42 uint64_t *table;
43 table = (uint64_t *)&env->vfp.regs[rn];
44 val = 0;
45 for (shift = 0; shift < 32; shift += 8) {
46 index = (ireg >> shift) & 0xff;
47 if (index < maxindex) {
48 tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
49 val |= tmp << shift;
50 } else {
51 val |= def & (0xff << shift);
54 return val;
57 #if !defined(CONFIG_USER_ONLY)
59 #include "exec/softmmu_exec.h"
61 #define MMUSUFFIX _mmu
63 #define SHIFT 0
64 #include "exec/softmmu_template.h"
66 #define SHIFT 1
67 #include "exec/softmmu_template.h"
69 #define SHIFT 2
70 #include "exec/softmmu_template.h"
72 #define SHIFT 3
73 #include "exec/softmmu_template.h"
75 /* try to fill the TLB and return an exception if error. If retaddr is
76 * NULL, it means that the function was called in C code (i.e. not
77 * from generated code or from helper.c)
79 void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
80 uintptr_t retaddr)
82 int ret;
84 ret = arm_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
85 if (unlikely(ret)) {
86 ARMCPU *cpu = ARM_CPU(cs);
87 CPUARMState *env = &cpu->env;
89 if (retaddr) {
90 /* now we have a real cpu fault */
91 cpu_restore_state(cs, retaddr);
93 raise_exception(env, cs->exception_index);
96 #endif
98 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
100 uint32_t res = a + b;
101 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
102 env->QF = 1;
103 return res;
106 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
108 uint32_t res = a + b;
109 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
110 env->QF = 1;
111 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
113 return res;
116 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
118 uint32_t res = a - b;
119 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
120 env->QF = 1;
121 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
123 return res;
126 uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
128 uint32_t res;
129 if (val >= 0x40000000) {
130 res = ~SIGNBIT;
131 env->QF = 1;
132 } else if (val <= (int32_t)0xc0000000) {
133 res = SIGNBIT;
134 env->QF = 1;
135 } else {
136 res = val << 1;
138 return res;
141 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
143 uint32_t res = a + b;
144 if (res < a) {
145 env->QF = 1;
146 res = ~0;
148 return res;
151 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
153 uint32_t res = a - b;
154 if (res > a) {
155 env->QF = 1;
156 res = 0;
158 return res;
161 /* Signed saturation. */
162 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
164 int32_t top;
165 uint32_t mask;
167 top = val >> shift;
168 mask = (1u << shift) - 1;
169 if (top > 0) {
170 env->QF = 1;
171 return mask;
172 } else if (top < -1) {
173 env->QF = 1;
174 return ~mask;
176 return val;
179 /* Unsigned saturation. */
180 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
182 uint32_t max;
184 max = (1u << shift) - 1;
185 if (val < 0) {
186 env->QF = 1;
187 return 0;
188 } else if (val > max) {
189 env->QF = 1;
190 return max;
192 return val;
195 /* Signed saturate. */
196 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
198 return do_ssat(env, x, shift);
201 /* Dual halfword signed saturate. */
202 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
204 uint32_t res;
206 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
207 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
208 return res;
211 /* Unsigned saturate. */
212 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
214 return do_usat(env, x, shift);
217 /* Dual halfword unsigned saturate. */
218 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
220 uint32_t res;
222 res = (uint16_t)do_usat(env, (int16_t)x, shift);
223 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
224 return res;
227 void HELPER(wfi)(CPUARMState *env)
229 CPUState *cs = CPU(arm_env_get_cpu(env));
231 cs->exception_index = EXCP_HLT;
232 cs->halted = 1;
233 cpu_loop_exit(cs);
236 void HELPER(wfe)(CPUARMState *env)
238 CPUState *cs = CPU(arm_env_get_cpu(env));
240 /* Don't actually halt the CPU, just yield back to top
241 * level loop
243 cs->exception_index = EXCP_YIELD;
244 cpu_loop_exit(cs);
247 /* Raise an internal-to-QEMU exception. This is limited to only
248 * those EXCP values which are special cases for QEMU to interrupt
249 * execution and not to be used for exceptions which are passed to
250 * the guest (those must all have syndrome information and thus should
251 * use exception_with_syndrome).
253 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
255 CPUState *cs = CPU(arm_env_get_cpu(env));
257 assert(excp_is_internal(excp));
258 cs->exception_index = excp;
259 cpu_loop_exit(cs);
262 /* Raise an exception with the specified syndrome register value */
263 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
264 uint32_t syndrome)
266 CPUState *cs = CPU(arm_env_get_cpu(env));
268 assert(!excp_is_internal(excp));
269 cs->exception_index = excp;
270 env->exception.syndrome = syndrome;
271 cpu_loop_exit(cs);
274 uint32_t HELPER(cpsr_read)(CPUARMState *env)
276 return cpsr_read(env) & ~CPSR_EXEC;
279 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
281 cpsr_write(env, val, mask);
284 /* Access to user mode registers from privileged modes. */
285 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
287 uint32_t val;
289 if (regno == 13) {
290 val = env->banked_r13[0];
291 } else if (regno == 14) {
292 val = env->banked_r14[0];
293 } else if (regno >= 8
294 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
295 val = env->usr_regs[regno - 8];
296 } else {
297 val = env->regs[regno];
299 return val;
302 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
304 if (regno == 13) {
305 env->banked_r13[0] = val;
306 } else if (regno == 14) {
307 env->banked_r14[0] = val;
308 } else if (regno >= 8
309 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
310 env->usr_regs[regno - 8] = val;
311 } else {
312 env->regs[regno] = val;
316 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
318 const ARMCPRegInfo *ri = rip;
319 switch (ri->accessfn(env, ri)) {
320 case CP_ACCESS_OK:
321 return;
322 case CP_ACCESS_TRAP:
323 env->exception.syndrome = syndrome;
324 break;
325 case CP_ACCESS_TRAP_UNCATEGORIZED:
326 env->exception.syndrome = syn_uncategorized();
327 break;
328 default:
329 g_assert_not_reached();
331 raise_exception(env, EXCP_UDEF);
334 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
336 const ARMCPRegInfo *ri = rip;
338 ri->writefn(env, ri, value);
341 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
343 const ARMCPRegInfo *ri = rip;
345 return ri->readfn(env, ri);
348 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
350 const ARMCPRegInfo *ri = rip;
352 ri->writefn(env, ri, value);
355 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
357 const ARMCPRegInfo *ri = rip;
359 return ri->readfn(env, ri);
362 void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
364 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
365 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
366 * to catch that case at translate time.
368 if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
369 raise_exception(env, EXCP_UDEF);
372 switch (op) {
373 case 0x05: /* SPSel */
374 update_spsel(env, imm);
375 break;
376 case 0x1e: /* DAIFSet */
377 env->daif |= (imm << 6) & PSTATE_DAIF;
378 break;
379 case 0x1f: /* DAIFClear */
380 env->daif &= ~((imm << 6) & PSTATE_DAIF);
381 break;
382 default:
383 g_assert_not_reached();
387 void HELPER(exception_return)(CPUARMState *env)
389 uint32_t spsr = env->banked_spsr[0];
390 int new_el, i;
392 if (env->pstate & PSTATE_SP) {
393 env->sp_el[1] = env->xregs[31];
394 } else {
395 env->sp_el[0] = env->xregs[31];
398 env->exclusive_addr = -1;
400 if (spsr & PSTATE_nRW) {
401 env->aarch64 = 0;
402 new_el = 0;
403 env->uncached_cpsr = 0x10;
404 cpsr_write(env, spsr, ~0);
405 for (i = 0; i < 15; i++) {
406 env->regs[i] = env->xregs[i];
409 env->regs[15] = env->elr_el1 & ~0x1;
410 } else {
411 new_el = extract32(spsr, 2, 2);
412 if (new_el > 1) {
413 /* Return to unimplemented EL */
414 goto illegal_return;
416 if (extract32(spsr, 1, 1)) {
417 /* Return with reserved M[1] bit set */
418 goto illegal_return;
420 if (new_el == 0 && (spsr & PSTATE_SP)) {
421 /* Return to EL1 with M[0] bit set */
422 goto illegal_return;
424 env->aarch64 = 1;
425 pstate_write(env, spsr);
426 env->xregs[31] = env->sp_el[new_el];
427 env->pc = env->elr_el1;
430 return;
432 illegal_return:
433 /* Illegal return events of various kinds have architecturally
434 * mandated behaviour:
435 * restore NZCV and DAIF from SPSR_ELx
436 * set PSTATE.IL
437 * restore PC from ELR_ELx
438 * no change to exception level, execution state or stack pointer
440 env->pstate |= PSTATE_IL;
441 env->pc = env->elr_el1;
442 spsr &= PSTATE_NZCV | PSTATE_DAIF;
443 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
444 pstate_write(env, spsr);
447 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
448 The only way to do that in TCG is a conditional branch, which clobbers
449 all our temporaries. For now implement these as helper functions. */
451 /* Similarly for variable shift instructions. */
453 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
455 int shift = i & 0xff;
456 if (shift >= 32) {
457 if (shift == 32)
458 env->CF = x & 1;
459 else
460 env->CF = 0;
461 return 0;
462 } else if (shift != 0) {
463 env->CF = (x >> (32 - shift)) & 1;
464 return x << shift;
466 return x;
469 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
471 int shift = i & 0xff;
472 if (shift >= 32) {
473 if (shift == 32)
474 env->CF = (x >> 31) & 1;
475 else
476 env->CF = 0;
477 return 0;
478 } else if (shift != 0) {
479 env->CF = (x >> (shift - 1)) & 1;
480 return x >> shift;
482 return x;
485 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
487 int shift = i & 0xff;
488 if (shift >= 32) {
489 env->CF = (x >> 31) & 1;
490 return (int32_t)x >> 31;
491 } else if (shift != 0) {
492 env->CF = (x >> (shift - 1)) & 1;
493 return (int32_t)x >> shift;
495 return x;
498 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
500 int shift1, shift;
501 shift1 = i & 0xff;
502 shift = shift1 & 0x1f;
503 if (shift == 0) {
504 if (shift1 != 0)
505 env->CF = (x >> 31) & 1;
506 return x;
507 } else {
508 env->CF = (x >> (shift - 1)) & 1;
509 return ((uint32_t)x >> shift) | (x << (32 - shift));