cpu: Directly use get_memory_mapping() fallback handlers in place
[qemu/ar7.git] / target / arm / op_helper.c
blobefcb60099277ce2bbce617dad8b70186126131c4
1 /*
2 * ARM helper routines
4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "internals.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
27 #define SIGNBIT (uint32_t)0x80000000
28 #define SIGNBIT64 ((uint64_t)1 << 63)
30 static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
31 uint32_t syndrome, uint32_t target_el)
33 CPUState *cs = env_cpu(env);
35 if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
37 * Redirect NS EL1 exceptions to NS EL2. These are reported with
38 * their original syndrome register value, with the exception of
39 * SIMD/FP access traps, which are reported as uncategorized
40 * (see DDI0478C.a D1.10.4)
42 target_el = 2;
43 if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
44 syndrome = syn_uncategorized();
48 assert(!excp_is_internal(excp));
49 cs->exception_index = excp;
50 env->exception.syndrome = syndrome;
51 env->exception.target_el = target_el;
53 return cs;
56 void raise_exception(CPUARMState *env, uint32_t excp,
57 uint32_t syndrome, uint32_t target_el)
59 CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
60 cpu_loop_exit(cs);
63 void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
64 uint32_t target_el, uintptr_t ra)
66 CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
67 cpu_loop_exit_restore(cs, ra);
70 uint64_t HELPER(neon_tbl)(CPUARMState *env, uint32_t desc,
71 uint64_t ireg, uint64_t def)
73 uint64_t tmp, val = 0;
74 uint32_t maxindex = ((desc & 3) + 1) * 8;
75 uint32_t base_reg = desc >> 2;
76 uint32_t shift, index, reg;
78 for (shift = 0; shift < 64; shift += 8) {
79 index = (ireg >> shift) & 0xff;
80 if (index < maxindex) {
81 reg = base_reg + (index >> 3);
82 tmp = *aa32_vfp_dreg(env, reg);
83 tmp = ((tmp >> ((index & 7) << 3)) & 0xff) << shift;
84 } else {
85 tmp = def & (0xffull << shift);
87 val |= tmp;
89 return val;
92 void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
95 * Perform the v8M stack limit check for SP updates from translated code,
96 * raising an exception if the limit is breached.
98 if (newvalue < v7m_sp_limit(env)) {
99 CPUState *cs = env_cpu(env);
102 * Stack limit exceptions are a rare case, so rather than syncing
103 * PC/condbits before the call, we use cpu_restore_state() to
104 * get them right before raising the exception.
106 cpu_restore_state(cs, GETPC(), true);
107 raise_exception(env, EXCP_STKOF, 0, 1);
111 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
113 uint32_t res = a + b;
114 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
115 env->QF = 1;
116 return res;
119 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
121 uint32_t res = a + b;
122 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
123 env->QF = 1;
124 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
126 return res;
129 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
131 uint32_t res = a - b;
132 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
133 env->QF = 1;
134 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
136 return res;
139 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
141 uint32_t res = a + b;
142 if (res < a) {
143 env->QF = 1;
144 res = ~0;
146 return res;
149 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
151 uint32_t res = a - b;
152 if (res > a) {
153 env->QF = 1;
154 res = 0;
156 return res;
159 /* Signed saturation. */
160 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
162 int32_t top;
163 uint32_t mask;
165 top = val >> shift;
166 mask = (1u << shift) - 1;
167 if (top > 0) {
168 env->QF = 1;
169 return mask;
170 } else if (top < -1) {
171 env->QF = 1;
172 return ~mask;
174 return val;
177 /* Unsigned saturation. */
178 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
180 uint32_t max;
182 max = (1u << shift) - 1;
183 if (val < 0) {
184 env->QF = 1;
185 return 0;
186 } else if (val > max) {
187 env->QF = 1;
188 return max;
190 return val;
193 /* Signed saturate. */
194 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
196 return do_ssat(env, x, shift);
199 /* Dual halfword signed saturate. */
200 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
202 uint32_t res;
204 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
205 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
206 return res;
209 /* Unsigned saturate. */
210 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
212 return do_usat(env, x, shift);
215 /* Dual halfword unsigned saturate. */
216 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
218 uint32_t res;
220 res = (uint16_t)do_usat(env, (int16_t)x, shift);
221 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
222 return res;
225 void HELPER(setend)(CPUARMState *env)
227 env->uncached_cpsr ^= CPSR_E;
228 arm_rebuild_hflags(env);
231 #ifndef CONFIG_USER_ONLY
232 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
233 * The function returns the target EL (1-3) if the instruction is to be trapped;
234 * otherwise it returns 0 indicating it is not trapped.
236 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
238 int cur_el = arm_current_el(env);
239 uint64_t mask;
241 if (arm_feature(env, ARM_FEATURE_M)) {
242 /* M profile cores can never trap WFI/WFE. */
243 return 0;
246 /* If we are currently in EL0 then we need to check if SCTLR is set up for
247 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
249 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
250 int target_el;
252 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
253 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
254 /* Secure EL0 and Secure PL1 is at EL3 */
255 target_el = 3;
256 } else {
257 target_el = 1;
260 if (!(env->cp15.sctlr_el[target_el] & mask)) {
261 return target_el;
265 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
266 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
267 * bits will be zero indicating no trap.
269 if (cur_el < 2) {
270 mask = is_wfe ? HCR_TWE : HCR_TWI;
271 if (arm_hcr_el2_eff(env) & mask) {
272 return 2;
276 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
277 if (cur_el < 3) {
278 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
279 if (env->cp15.scr_el3 & mask) {
280 return 3;
284 return 0;
286 #endif
288 void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
290 #ifdef CONFIG_USER_ONLY
292 * WFI in the user-mode emulator is technically permitted but not
293 * something any real-world code would do. AArch64 Linux kernels
294 * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
295 * AArch32 kernels don't trap it so it will delay a bit.
296 * For QEMU, make it NOP here, because trying to raise EXCP_HLT
297 * would trigger an abort.
299 return;
300 #else
301 CPUState *cs = env_cpu(env);
302 int target_el = check_wfx_trap(env, false);
304 if (cpu_has_work(cs)) {
305 /* Don't bother to go into our "low power state" if
306 * we would just wake up immediately.
308 return;
311 if (target_el) {
312 if (env->aarch64) {
313 env->pc -= insn_len;
314 } else {
315 env->regs[15] -= insn_len;
318 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
319 target_el);
322 cs->exception_index = EXCP_HLT;
323 cs->halted = 1;
324 cpu_loop_exit(cs);
325 #endif
328 void HELPER(wfe)(CPUARMState *env)
330 /* This is a hint instruction that is semantically different
331 * from YIELD even though we currently implement it identically.
332 * Don't actually halt the CPU, just yield back to top
333 * level loop. This is not going into a "low power state"
334 * (ie halting until some event occurs), so we never take
335 * a configurable trap to a different exception level.
337 HELPER(yield)(env);
340 void HELPER(yield)(CPUARMState *env)
342 CPUState *cs = env_cpu(env);
344 /* This is a non-trappable hint instruction that generally indicates
345 * that the guest is currently busy-looping. Yield control back to the
346 * top level loop so that a more deserving VCPU has a chance to run.
348 cs->exception_index = EXCP_YIELD;
349 cpu_loop_exit(cs);
352 /* Raise an internal-to-QEMU exception. This is limited to only
353 * those EXCP values which are special cases for QEMU to interrupt
354 * execution and not to be used for exceptions which are passed to
355 * the guest (those must all have syndrome information and thus should
356 * use exception_with_syndrome).
358 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
360 CPUState *cs = env_cpu(env);
362 assert(excp_is_internal(excp));
363 cs->exception_index = excp;
364 cpu_loop_exit(cs);
367 /* Raise an exception with the specified syndrome register value */
368 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
369 uint32_t syndrome, uint32_t target_el)
371 raise_exception(env, excp, syndrome, target_el);
374 /* Raise an EXCP_BKPT with the specified syndrome register value,
375 * targeting the correct exception level for debug exceptions.
377 void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
379 int debug_el = arm_debug_target_el(env);
380 int cur_el = arm_current_el(env);
382 /* FSR will only be used if the debug target EL is AArch32. */
383 env->exception.fsr = arm_debug_exception_fsr(env);
384 /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
385 * values to the guest that it shouldn't be able to see at its
386 * exception/security level.
388 env->exception.vaddress = 0;
390 * Other kinds of architectural debug exception are ignored if
391 * they target an exception level below the current one (in QEMU
392 * this is checked by arm_generate_debug_exceptions()). Breakpoint
393 * instructions are special because they always generate an exception
394 * to somewhere: if they can't go to the configured debug exception
395 * level they are taken to the current exception level.
397 if (debug_el < cur_el) {
398 debug_el = cur_el;
400 raise_exception(env, EXCP_BKPT, syndrome, debug_el);
403 uint32_t HELPER(cpsr_read)(CPUARMState *env)
405 return cpsr_read(env) & ~CPSR_EXEC;
408 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
410 cpsr_write(env, val, mask, CPSRWriteByInstr);
411 /* TODO: Not all cpsr bits are relevant to hflags. */
412 arm_rebuild_hflags(env);
415 /* Write the CPSR for a 32-bit exception return */
416 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
418 uint32_t mask;
420 qemu_mutex_lock_iothread();
421 arm_call_pre_el_change_hook(env_archcpu(env));
422 qemu_mutex_unlock_iothread();
424 mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
425 cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
427 /* Generated code has already stored the new PC value, but
428 * without masking out its low bits, because which bits need
429 * masking depends on whether we're returning to Thumb or ARM
430 * state. Do the masking now.
432 env->regs[15] &= (env->thumb ? ~1 : ~3);
433 arm_rebuild_hflags(env);
435 qemu_mutex_lock_iothread();
436 arm_call_el_change_hook(env_archcpu(env));
437 qemu_mutex_unlock_iothread();
440 /* Access to user mode registers from privileged modes. */
441 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
443 uint32_t val;
445 if (regno == 13) {
446 val = env->banked_r13[BANK_USRSYS];
447 } else if (regno == 14) {
448 val = env->banked_r14[BANK_USRSYS];
449 } else if (regno >= 8
450 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
451 val = env->usr_regs[regno - 8];
452 } else {
453 val = env->regs[regno];
455 return val;
458 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
460 if (regno == 13) {
461 env->banked_r13[BANK_USRSYS] = val;
462 } else if (regno == 14) {
463 env->banked_r14[BANK_USRSYS] = val;
464 } else if (regno >= 8
465 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
466 env->usr_regs[regno - 8] = val;
467 } else {
468 env->regs[regno] = val;
472 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
474 if ((env->uncached_cpsr & CPSR_M) == mode) {
475 env->regs[13] = val;
476 } else {
477 env->banked_r13[bank_number(mode)] = val;
481 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
483 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
484 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
485 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
487 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
488 exception_target_el(env));
491 if ((env->uncached_cpsr & CPSR_M) == mode) {
492 return env->regs[13];
493 } else {
494 return env->banked_r13[bank_number(mode)];
498 static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
499 uint32_t regno)
501 /* Raise an exception if the requested access is one of the UNPREDICTABLE
502 * cases; otherwise return. This broadly corresponds to the pseudocode
503 * BankedRegisterAccessValid() and SPSRAccessValid(),
504 * except that we have already handled some cases at translate time.
506 int curmode = env->uncached_cpsr & CPSR_M;
508 if (regno == 17) {
509 /* ELR_Hyp: a special case because access from tgtmode is OK */
510 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
511 goto undef;
513 return;
516 if (curmode == tgtmode) {
517 goto undef;
520 if (tgtmode == ARM_CPU_MODE_USR) {
521 switch (regno) {
522 case 8 ... 12:
523 if (curmode != ARM_CPU_MODE_FIQ) {
524 goto undef;
526 break;
527 case 13:
528 if (curmode == ARM_CPU_MODE_SYS) {
529 goto undef;
531 break;
532 case 14:
533 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
534 goto undef;
536 break;
537 default:
538 break;
542 if (tgtmode == ARM_CPU_MODE_HYP) {
543 /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
544 if (curmode != ARM_CPU_MODE_MON) {
545 goto undef;
549 return;
551 undef:
552 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
553 exception_target_el(env));
556 void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
557 uint32_t regno)
559 msr_mrs_banked_exc_checks(env, tgtmode, regno);
561 switch (regno) {
562 case 16: /* SPSRs */
563 env->banked_spsr[bank_number(tgtmode)] = value;
564 break;
565 case 17: /* ELR_Hyp */
566 env->elr_el[2] = value;
567 break;
568 case 13:
569 env->banked_r13[bank_number(tgtmode)] = value;
570 break;
571 case 14:
572 env->banked_r14[r14_bank_number(tgtmode)] = value;
573 break;
574 case 8 ... 12:
575 switch (tgtmode) {
576 case ARM_CPU_MODE_USR:
577 env->usr_regs[regno - 8] = value;
578 break;
579 case ARM_CPU_MODE_FIQ:
580 env->fiq_regs[regno - 8] = value;
581 break;
582 default:
583 g_assert_not_reached();
585 break;
586 default:
587 g_assert_not_reached();
591 uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
593 msr_mrs_banked_exc_checks(env, tgtmode, regno);
595 switch (regno) {
596 case 16: /* SPSRs */
597 return env->banked_spsr[bank_number(tgtmode)];
598 case 17: /* ELR_Hyp */
599 return env->elr_el[2];
600 case 13:
601 return env->banked_r13[bank_number(tgtmode)];
602 case 14:
603 return env->banked_r14[r14_bank_number(tgtmode)];
604 case 8 ... 12:
605 switch (tgtmode) {
606 case ARM_CPU_MODE_USR:
607 return env->usr_regs[regno - 8];
608 case ARM_CPU_MODE_FIQ:
609 return env->fiq_regs[regno - 8];
610 default:
611 g_assert_not_reached();
613 default:
614 g_assert_not_reached();
618 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
619 uint32_t isread)
621 const ARMCPRegInfo *ri = rip;
622 int target_el;
624 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
625 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
626 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
630 * Check for an EL2 trap due to HSTR_EL2. We expect EL0 accesses
631 * to sysregs non accessible at EL0 to have UNDEF-ed already.
633 if (!is_a64(env) && arm_current_el(env) < 2 && ri->cp == 15 &&
634 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
635 uint32_t mask = 1 << ri->crn;
637 if (ri->type & ARM_CP_64BIT) {
638 mask = 1 << ri->crm;
641 /* T4 and T14 are RES0 */
642 mask &= ~((1 << 4) | (1 << 14));
644 if (env->cp15.hstr_el2 & mask) {
645 target_el = 2;
646 goto exept;
650 if (!ri->accessfn) {
651 return;
654 switch (ri->accessfn(env, ri, isread)) {
655 case CP_ACCESS_OK:
656 return;
657 case CP_ACCESS_TRAP:
658 target_el = exception_target_el(env);
659 break;
660 case CP_ACCESS_TRAP_EL2:
661 /* Requesting a trap to EL2 when we're in EL3 is
662 * a bug in the access function.
664 assert(arm_current_el(env) != 3);
665 target_el = 2;
666 break;
667 case CP_ACCESS_TRAP_EL3:
668 target_el = 3;
669 break;
670 case CP_ACCESS_TRAP_UNCATEGORIZED:
671 target_el = exception_target_el(env);
672 syndrome = syn_uncategorized();
673 break;
674 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
675 target_el = 2;
676 syndrome = syn_uncategorized();
677 break;
678 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
679 target_el = 3;
680 syndrome = syn_uncategorized();
681 break;
682 case CP_ACCESS_TRAP_FP_EL2:
683 target_el = 2;
684 /* Since we are an implementation that takes exceptions on a trapped
685 * conditional insn only if the insn has passed its condition code
686 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
687 * (which is also the required value for AArch64 traps).
689 syndrome = syn_fp_access_trap(1, 0xe, false);
690 break;
691 case CP_ACCESS_TRAP_FP_EL3:
692 target_el = 3;
693 syndrome = syn_fp_access_trap(1, 0xe, false);
694 break;
695 default:
696 g_assert_not_reached();
699 exept:
700 raise_exception(env, EXCP_UDEF, syndrome, target_el);
703 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
705 const ARMCPRegInfo *ri = rip;
707 if (ri->type & ARM_CP_IO) {
708 qemu_mutex_lock_iothread();
709 ri->writefn(env, ri, value);
710 qemu_mutex_unlock_iothread();
711 } else {
712 ri->writefn(env, ri, value);
716 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
718 const ARMCPRegInfo *ri = rip;
719 uint32_t res;
721 if (ri->type & ARM_CP_IO) {
722 qemu_mutex_lock_iothread();
723 res = ri->readfn(env, ri);
724 qemu_mutex_unlock_iothread();
725 } else {
726 res = ri->readfn(env, ri);
729 return res;
732 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
734 const ARMCPRegInfo *ri = rip;
736 if (ri->type & ARM_CP_IO) {
737 qemu_mutex_lock_iothread();
738 ri->writefn(env, ri, value);
739 qemu_mutex_unlock_iothread();
740 } else {
741 ri->writefn(env, ri, value);
745 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
747 const ARMCPRegInfo *ri = rip;
748 uint64_t res;
750 if (ri->type & ARM_CP_IO) {
751 qemu_mutex_lock_iothread();
752 res = ri->readfn(env, ri);
753 qemu_mutex_unlock_iothread();
754 } else {
755 res = ri->readfn(env, ri);
758 return res;
761 void HELPER(pre_hvc)(CPUARMState *env)
763 ARMCPU *cpu = env_archcpu(env);
764 int cur_el = arm_current_el(env);
765 /* FIXME: Use actual secure state. */
766 bool secure = false;
767 bool undef;
769 if (arm_is_psci_call(cpu, EXCP_HVC)) {
770 /* If PSCI is enabled and this looks like a valid PSCI call then
771 * that overrides the architecturally mandated HVC behaviour.
773 return;
776 if (!arm_feature(env, ARM_FEATURE_EL2)) {
777 /* If EL2 doesn't exist, HVC always UNDEFs */
778 undef = true;
779 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
780 /* EL3.HCE has priority over EL2.HCD. */
781 undef = !(env->cp15.scr_el3 & SCR_HCE);
782 } else {
783 undef = env->cp15.hcr_el2 & HCR_HCD;
786 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
787 * For ARMv8/AArch64, HVC is allowed in EL3.
788 * Note that we've already trapped HVC from EL0 at translation
789 * time.
791 if (secure && (!is_a64(env) || cur_el == 1)) {
792 undef = true;
795 if (undef) {
796 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
797 exception_target_el(env));
801 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
803 ARMCPU *cpu = env_archcpu(env);
804 int cur_el = arm_current_el(env);
805 bool secure = arm_is_secure(env);
806 bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
809 * SMC behaviour is summarized in the following table.
810 * This helper handles the "Trap to EL2" and "Undef insn" cases.
811 * The "Trap to EL3" and "PSCI call" cases are handled in the exception
812 * helper.
814 * -> ARM_FEATURE_EL3 and !SMD
815 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
817 * Conduit SMC, valid call Trap to EL2 PSCI Call
818 * Conduit SMC, inval call Trap to EL2 Trap to EL3
819 * Conduit not SMC Trap to EL2 Trap to EL3
822 * -> ARM_FEATURE_EL3 and SMD
823 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
825 * Conduit SMC, valid call Trap to EL2 PSCI Call
826 * Conduit SMC, inval call Trap to EL2 Undef insn
827 * Conduit not SMC Trap to EL2 Undef insn
830 * -> !ARM_FEATURE_EL3
831 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
833 * Conduit SMC, valid call Trap to EL2 PSCI Call
834 * Conduit SMC, inval call Trap to EL2 Undef insn
835 * Conduit not SMC Undef insn Undef insn
838 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
839 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
840 * extensions, SMD only applies to NS state.
841 * On ARMv7 without the Virtualization extensions, the SMD bit
842 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
843 * so we need not special case this here.
845 bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
846 : smd_flag && !secure;
848 if (!arm_feature(env, ARM_FEATURE_EL3) &&
849 cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
850 /* If we have no EL3 then SMC always UNDEFs and can't be
851 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
852 * firmware within QEMU, and we want an EL2 guest to be able
853 * to forbid its EL1 from making PSCI calls into QEMU's
854 * "firmware" via HCR.TSC, so for these purposes treat
855 * PSCI-via-SMC as implying an EL3.
856 * This handles the very last line of the previous table.
858 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
859 exception_target_el(env));
862 if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
863 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
864 * We also want an EL2 guest to be able to forbid its EL1 from
865 * making PSCI calls into QEMU's "firmware" via HCR.TSC.
866 * This handles all the "Trap to EL2" cases of the previous table.
868 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
871 /* Catch the two remaining "Undef insn" cases of the previous table:
872 * - PSCI conduit is SMC but we don't have a valid PCSI call,
873 * - We don't have EL3 or SMD is set.
875 if (!arm_is_psci_call(cpu, EXCP_SMC) &&
876 (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
877 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
878 exception_target_el(env));
882 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
883 The only way to do that in TCG is a conditional branch, which clobbers
884 all our temporaries. For now implement these as helper functions. */
886 /* Similarly for variable shift instructions. */
888 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
890 int shift = i & 0xff;
891 if (shift >= 32) {
892 if (shift == 32)
893 env->CF = x & 1;
894 else
895 env->CF = 0;
896 return 0;
897 } else if (shift != 0) {
898 env->CF = (x >> (32 - shift)) & 1;
899 return x << shift;
901 return x;
904 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
906 int shift = i & 0xff;
907 if (shift >= 32) {
908 if (shift == 32)
909 env->CF = (x >> 31) & 1;
910 else
911 env->CF = 0;
912 return 0;
913 } else if (shift != 0) {
914 env->CF = (x >> (shift - 1)) & 1;
915 return x >> shift;
917 return x;
920 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
922 int shift = i & 0xff;
923 if (shift >= 32) {
924 env->CF = (x >> 31) & 1;
925 return (int32_t)x >> 31;
926 } else if (shift != 0) {
927 env->CF = (x >> (shift - 1)) & 1;
928 return (int32_t)x >> shift;
930 return x;
933 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
935 int shift1, shift;
936 shift1 = i & 0xff;
937 shift = shift1 & 0x1f;
938 if (shift == 0) {
939 if (shift1 != 0)
940 env->CF = (x >> 31) & 1;
941 return x;
942 } else {
943 env->CF = (x >> (shift - 1)) & 1;
944 return ((uint32_t)x >> shift) | (x << (32 - shift));
948 void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
949 uint32_t access_type, uint32_t mmu_idx,
950 uint32_t size)
952 uint32_t in_page = -((uint32_t)ptr | TARGET_PAGE_SIZE);
953 uintptr_t ra = GETPC();
955 if (likely(size <= in_page)) {
956 probe_access(env, ptr, size, access_type, mmu_idx, ra);
957 } else {
958 probe_access(env, ptr, in_page, access_type, mmu_idx, ra);
959 probe_access(env, ptr + in_page, size - in_page,
960 access_type, mmu_idx, ra);