4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
23 #include "exec/helper-proto.h"
24 #include "internals.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #define SIGNBIT (uint32_t)0x80000000
29 #define SIGNBIT64 ((uint64_t)1 << 63)
31 static CPUState
*do_raise_exception(CPUARMState
*env
, uint32_t excp
,
32 uint32_t syndrome
, uint32_t target_el
)
34 CPUState
*cs
= env_cpu(env
);
36 if (target_el
== 1 && (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
38 * Redirect NS EL1 exceptions to NS EL2. These are reported with
39 * their original syndrome register value, with the exception of
40 * SIMD/FP access traps, which are reported as uncategorized
41 * (see DDI0478C.a D1.10.4)
44 if (syn_get_ec(syndrome
) == EC_ADVSIMDFPACCESSTRAP
) {
45 syndrome
= syn_uncategorized();
49 assert(!excp_is_internal(excp
));
50 cs
->exception_index
= excp
;
51 env
->exception
.syndrome
= syndrome
;
52 env
->exception
.target_el
= target_el
;
57 void raise_exception(CPUARMState
*env
, uint32_t excp
,
58 uint32_t syndrome
, uint32_t target_el
)
60 CPUState
*cs
= do_raise_exception(env
, excp
, syndrome
, target_el
);
64 void raise_exception_ra(CPUARMState
*env
, uint32_t excp
, uint32_t syndrome
,
65 uint32_t target_el
, uintptr_t ra
)
67 CPUState
*cs
= do_raise_exception(env
, excp
, syndrome
, target_el
);
68 cpu_loop_exit_restore(cs
, ra
);
71 uint64_t HELPER(neon_tbl
)(CPUARMState
*env
, uint32_t desc
,
72 uint64_t ireg
, uint64_t def
)
74 uint64_t tmp
, val
= 0;
75 uint32_t maxindex
= ((desc
& 3) + 1) * 8;
76 uint32_t base_reg
= desc
>> 2;
77 uint32_t shift
, index
, reg
;
79 for (shift
= 0; shift
< 64; shift
+= 8) {
80 index
= (ireg
>> shift
) & 0xff;
81 if (index
< maxindex
) {
82 reg
= base_reg
+ (index
>> 3);
83 tmp
= *aa32_vfp_dreg(env
, reg
);
84 tmp
= ((tmp
>> ((index
& 7) << 3)) & 0xff) << shift
;
86 tmp
= def
& (0xffull
<< shift
);
93 void HELPER(v8m_stackcheck
)(CPUARMState
*env
, uint32_t newvalue
)
96 * Perform the v8M stack limit check for SP updates from translated code,
97 * raising an exception if the limit is breached.
99 if (newvalue
< v7m_sp_limit(env
)) {
100 CPUState
*cs
= env_cpu(env
);
103 * Stack limit exceptions are a rare case, so rather than syncing
104 * PC/condbits before the call, we use cpu_restore_state() to
105 * get them right before raising the exception.
107 cpu_restore_state(cs
, GETPC(), true);
108 raise_exception(env
, EXCP_STKOF
, 0, 1);
112 uint32_t HELPER(add_setq
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
114 uint32_t res
= a
+ b
;
115 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
))
120 uint32_t HELPER(add_saturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
122 uint32_t res
= a
+ b
;
123 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
)) {
125 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
130 uint32_t HELPER(sub_saturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
132 uint32_t res
= a
- b
;
133 if (((res
^ a
) & SIGNBIT
) && ((a
^ b
) & SIGNBIT
)) {
135 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
140 uint32_t HELPER(add_usaturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
142 uint32_t res
= a
+ b
;
150 uint32_t HELPER(sub_usaturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
152 uint32_t res
= a
- b
;
160 /* Signed saturation. */
161 static inline uint32_t do_ssat(CPUARMState
*env
, int32_t val
, int shift
)
167 mask
= (1u << shift
) - 1;
171 } else if (top
< -1) {
178 /* Unsigned saturation. */
179 static inline uint32_t do_usat(CPUARMState
*env
, int32_t val
, int shift
)
183 max
= (1u << shift
) - 1;
187 } else if (val
> max
) {
194 /* Signed saturate. */
195 uint32_t HELPER(ssat
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
197 return do_ssat(env
, x
, shift
);
200 /* Dual halfword signed saturate. */
201 uint32_t HELPER(ssat16
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
205 res
= (uint16_t)do_ssat(env
, (int16_t)x
, shift
);
206 res
|= do_ssat(env
, ((int32_t)x
) >> 16, shift
) << 16;
210 /* Unsigned saturate. */
211 uint32_t HELPER(usat
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
213 return do_usat(env
, x
, shift
);
216 /* Dual halfword unsigned saturate. */
217 uint32_t HELPER(usat16
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
221 res
= (uint16_t)do_usat(env
, (int16_t)x
, shift
);
222 res
|= do_usat(env
, ((int32_t)x
) >> 16, shift
) << 16;
226 void HELPER(setend
)(CPUARMState
*env
)
228 env
->uncached_cpsr
^= CPSR_E
;
229 arm_rebuild_hflags(env
);
232 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
233 * The function returns the target EL (1-3) if the instruction is to be trapped;
234 * otherwise it returns 0 indicating it is not trapped.
236 static inline int check_wfx_trap(CPUARMState
*env
, bool is_wfe
)
238 int cur_el
= arm_current_el(env
);
241 if (arm_feature(env
, ARM_FEATURE_M
)) {
242 /* M profile cores can never trap WFI/WFE. */
246 /* If we are currently in EL0 then we need to check if SCTLR is set up for
247 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
249 if (cur_el
< 1 && arm_feature(env
, ARM_FEATURE_V8
)) {
252 mask
= is_wfe
? SCTLR_nTWE
: SCTLR_nTWI
;
253 if (arm_is_secure_below_el3(env
) && !arm_el_is_aa64(env
, 3)) {
254 /* Secure EL0 and Secure PL1 is at EL3 */
260 if (!(env
->cp15
.sctlr_el
[target_el
] & mask
)) {
265 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
266 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
267 * bits will be zero indicating no trap.
270 mask
= is_wfe
? HCR_TWE
: HCR_TWI
;
271 if (arm_hcr_el2_eff(env
) & mask
) {
276 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
278 mask
= (is_wfe
) ? SCR_TWE
: SCR_TWI
;
279 if (env
->cp15
.scr_el3
& mask
) {
287 void HELPER(wfi
)(CPUARMState
*env
, uint32_t insn_len
)
289 CPUState
*cs
= env_cpu(env
);
290 int target_el
= check_wfx_trap(env
, false);
292 if (cpu_has_work(cs
)) {
293 /* Don't bother to go into our "low power state" if
294 * we would just wake up immediately.
303 env
->regs
[15] -= insn_len
;
306 raise_exception(env
, EXCP_UDEF
, syn_wfx(1, 0xe, 0, insn_len
== 2),
310 cs
->exception_index
= EXCP_HLT
;
315 void HELPER(wfe
)(CPUARMState
*env
)
317 /* This is a hint instruction that is semantically different
318 * from YIELD even though we currently implement it identically.
319 * Don't actually halt the CPU, just yield back to top
320 * level loop. This is not going into a "low power state"
321 * (ie halting until some event occurs), so we never take
322 * a configurable trap to a different exception level.
327 void HELPER(yield
)(CPUARMState
*env
)
329 CPUState
*cs
= env_cpu(env
);
331 /* This is a non-trappable hint instruction that generally indicates
332 * that the guest is currently busy-looping. Yield control back to the
333 * top level loop so that a more deserving VCPU has a chance to run.
335 cs
->exception_index
= EXCP_YIELD
;
339 /* Raise an internal-to-QEMU exception. This is limited to only
340 * those EXCP values which are special cases for QEMU to interrupt
341 * execution and not to be used for exceptions which are passed to
342 * the guest (those must all have syndrome information and thus should
343 * use exception_with_syndrome).
345 void HELPER(exception_internal
)(CPUARMState
*env
, uint32_t excp
)
347 CPUState
*cs
= env_cpu(env
);
349 assert(excp_is_internal(excp
));
350 cs
->exception_index
= excp
;
354 /* Raise an exception with the specified syndrome register value */
355 void HELPER(exception_with_syndrome
)(CPUARMState
*env
, uint32_t excp
,
356 uint32_t syndrome
, uint32_t target_el
)
358 raise_exception(env
, excp
, syndrome
, target_el
);
361 /* Raise an EXCP_BKPT with the specified syndrome register value,
362 * targeting the correct exception level for debug exceptions.
364 void HELPER(exception_bkpt_insn
)(CPUARMState
*env
, uint32_t syndrome
)
366 int debug_el
= arm_debug_target_el(env
);
367 int cur_el
= arm_current_el(env
);
369 /* FSR will only be used if the debug target EL is AArch32. */
370 env
->exception
.fsr
= arm_debug_exception_fsr(env
);
371 /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
372 * values to the guest that it shouldn't be able to see at its
373 * exception/security level.
375 env
->exception
.vaddress
= 0;
377 * Other kinds of architectural debug exception are ignored if
378 * they target an exception level below the current one (in QEMU
379 * this is checked by arm_generate_debug_exceptions()). Breakpoint
380 * instructions are special because they always generate an exception
381 * to somewhere: if they can't go to the configured debug exception
382 * level they are taken to the current exception level.
384 if (debug_el
< cur_el
) {
387 raise_exception(env
, EXCP_BKPT
, syndrome
, debug_el
);
390 uint32_t HELPER(cpsr_read
)(CPUARMState
*env
)
392 return cpsr_read(env
) & ~CPSR_EXEC
;
395 void HELPER(cpsr_write
)(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
397 cpsr_write(env
, val
, mask
, CPSRWriteByInstr
);
398 /* TODO: Not all cpsr bits are relevant to hflags. */
399 arm_rebuild_hflags(env
);
402 /* Write the CPSR for a 32-bit exception return */
403 void HELPER(cpsr_write_eret
)(CPUARMState
*env
, uint32_t val
)
407 qemu_mutex_lock_iothread();
408 arm_call_pre_el_change_hook(env_archcpu(env
));
409 qemu_mutex_unlock_iothread();
411 mask
= aarch32_cpsr_valid_mask(env
->features
, &env_archcpu(env
)->isar
);
412 cpsr_write(env
, val
, mask
, CPSRWriteExceptionReturn
);
414 /* Generated code has already stored the new PC value, but
415 * without masking out its low bits, because which bits need
416 * masking depends on whether we're returning to Thumb or ARM
417 * state. Do the masking now.
419 env
->regs
[15] &= (env
->thumb
? ~1 : ~3);
420 arm_rebuild_hflags(env
);
422 qemu_mutex_lock_iothread();
423 arm_call_el_change_hook(env_archcpu(env
));
424 qemu_mutex_unlock_iothread();
427 /* Access to user mode registers from privileged modes. */
428 uint32_t HELPER(get_user_reg
)(CPUARMState
*env
, uint32_t regno
)
433 val
= env
->banked_r13
[BANK_USRSYS
];
434 } else if (regno
== 14) {
435 val
= env
->banked_r14
[BANK_USRSYS
];
436 } else if (regno
>= 8
437 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
438 val
= env
->usr_regs
[regno
- 8];
440 val
= env
->regs
[regno
];
445 void HELPER(set_user_reg
)(CPUARMState
*env
, uint32_t regno
, uint32_t val
)
448 env
->banked_r13
[BANK_USRSYS
] = val
;
449 } else if (regno
== 14) {
450 env
->banked_r14
[BANK_USRSYS
] = val
;
451 } else if (regno
>= 8
452 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
453 env
->usr_regs
[regno
- 8] = val
;
455 env
->regs
[regno
] = val
;
459 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
461 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
464 env
->banked_r13
[bank_number(mode
)] = val
;
468 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
470 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_SYS
) {
471 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
472 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
474 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
475 exception_target_el(env
));
478 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
479 return env
->regs
[13];
481 return env
->banked_r13
[bank_number(mode
)];
485 static void msr_mrs_banked_exc_checks(CPUARMState
*env
, uint32_t tgtmode
,
488 /* Raise an exception if the requested access is one of the UNPREDICTABLE
489 * cases; otherwise return. This broadly corresponds to the pseudocode
490 * BankedRegisterAccessValid() and SPSRAccessValid(),
491 * except that we have already handled some cases at translate time.
493 int curmode
= env
->uncached_cpsr
& CPSR_M
;
496 /* ELR_Hyp: a special case because access from tgtmode is OK */
497 if (curmode
!= ARM_CPU_MODE_HYP
&& curmode
!= ARM_CPU_MODE_MON
) {
503 if (curmode
== tgtmode
) {
507 if (tgtmode
== ARM_CPU_MODE_USR
) {
510 if (curmode
!= ARM_CPU_MODE_FIQ
) {
515 if (curmode
== ARM_CPU_MODE_SYS
) {
520 if (curmode
== ARM_CPU_MODE_HYP
|| curmode
== ARM_CPU_MODE_SYS
) {
529 if (tgtmode
== ARM_CPU_MODE_HYP
) {
530 /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
531 if (curmode
!= ARM_CPU_MODE_MON
) {
539 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
540 exception_target_el(env
));
543 void HELPER(msr_banked
)(CPUARMState
*env
, uint32_t value
, uint32_t tgtmode
,
546 msr_mrs_banked_exc_checks(env
, tgtmode
, regno
);
550 env
->banked_spsr
[bank_number(tgtmode
)] = value
;
552 case 17: /* ELR_Hyp */
553 env
->elr_el
[2] = value
;
556 env
->banked_r13
[bank_number(tgtmode
)] = value
;
559 env
->banked_r14
[r14_bank_number(tgtmode
)] = value
;
563 case ARM_CPU_MODE_USR
:
564 env
->usr_regs
[regno
- 8] = value
;
566 case ARM_CPU_MODE_FIQ
:
567 env
->fiq_regs
[regno
- 8] = value
;
570 g_assert_not_reached();
574 g_assert_not_reached();
578 uint32_t HELPER(mrs_banked
)(CPUARMState
*env
, uint32_t tgtmode
, uint32_t regno
)
580 msr_mrs_banked_exc_checks(env
, tgtmode
, regno
);
584 return env
->banked_spsr
[bank_number(tgtmode
)];
585 case 17: /* ELR_Hyp */
586 return env
->elr_el
[2];
588 return env
->banked_r13
[bank_number(tgtmode
)];
590 return env
->banked_r14
[r14_bank_number(tgtmode
)];
593 case ARM_CPU_MODE_USR
:
594 return env
->usr_regs
[regno
- 8];
595 case ARM_CPU_MODE_FIQ
:
596 return env
->fiq_regs
[regno
- 8];
598 g_assert_not_reached();
601 g_assert_not_reached();
605 void HELPER(access_check_cp_reg
)(CPUARMState
*env
, void *rip
, uint32_t syndrome
,
608 const ARMCPRegInfo
*ri
= rip
;
611 if (arm_feature(env
, ARM_FEATURE_XSCALE
) && ri
->cp
< 14
612 && extract32(env
->cp15
.c15_cpar
, ri
->cp
, 1) == 0) {
613 raise_exception(env
, EXCP_UDEF
, syndrome
, exception_target_el(env
));
617 * Check for an EL2 trap due to HSTR_EL2. We expect EL0 accesses
618 * to sysregs non accessible at EL0 to have UNDEF-ed already.
620 if (!is_a64(env
) && arm_current_el(env
) < 2 && ri
->cp
== 15 &&
621 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
)) {
622 uint32_t mask
= 1 << ri
->crn
;
624 if (ri
->type
& ARM_CP_64BIT
) {
628 /* T4 and T14 are RES0 */
629 mask
&= ~((1 << 4) | (1 << 14));
631 if (env
->cp15
.hstr_el2
& mask
) {
641 switch (ri
->accessfn(env
, ri
, isread
)) {
645 target_el
= exception_target_el(env
);
647 case CP_ACCESS_TRAP_EL2
:
648 /* Requesting a trap to EL2 when we're in EL3 is
649 * a bug in the access function.
651 assert(arm_current_el(env
) != 3);
654 case CP_ACCESS_TRAP_EL3
:
657 case CP_ACCESS_TRAP_UNCATEGORIZED
:
658 target_el
= exception_target_el(env
);
659 syndrome
= syn_uncategorized();
661 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2
:
663 syndrome
= syn_uncategorized();
665 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3
:
667 syndrome
= syn_uncategorized();
669 case CP_ACCESS_TRAP_FP_EL2
:
671 /* Since we are an implementation that takes exceptions on a trapped
672 * conditional insn only if the insn has passed its condition code
673 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
674 * (which is also the required value for AArch64 traps).
676 syndrome
= syn_fp_access_trap(1, 0xe, false);
678 case CP_ACCESS_TRAP_FP_EL3
:
680 syndrome
= syn_fp_access_trap(1, 0xe, false);
683 g_assert_not_reached();
687 raise_exception(env
, EXCP_UDEF
, syndrome
, target_el
);
690 void HELPER(set_cp_reg
)(CPUARMState
*env
, void *rip
, uint32_t value
)
692 const ARMCPRegInfo
*ri
= rip
;
694 if (ri
->type
& ARM_CP_IO
) {
695 qemu_mutex_lock_iothread();
696 ri
->writefn(env
, ri
, value
);
697 qemu_mutex_unlock_iothread();
699 ri
->writefn(env
, ri
, value
);
703 uint32_t HELPER(get_cp_reg
)(CPUARMState
*env
, void *rip
)
705 const ARMCPRegInfo
*ri
= rip
;
708 if (ri
->type
& ARM_CP_IO
) {
709 qemu_mutex_lock_iothread();
710 res
= ri
->readfn(env
, ri
);
711 qemu_mutex_unlock_iothread();
713 res
= ri
->readfn(env
, ri
);
719 void HELPER(set_cp_reg64
)(CPUARMState
*env
, void *rip
, uint64_t value
)
721 const ARMCPRegInfo
*ri
= rip
;
723 if (ri
->type
& ARM_CP_IO
) {
724 qemu_mutex_lock_iothread();
725 ri
->writefn(env
, ri
, value
);
726 qemu_mutex_unlock_iothread();
728 ri
->writefn(env
, ri
, value
);
732 uint64_t HELPER(get_cp_reg64
)(CPUARMState
*env
, void *rip
)
734 const ARMCPRegInfo
*ri
= rip
;
737 if (ri
->type
& ARM_CP_IO
) {
738 qemu_mutex_lock_iothread();
739 res
= ri
->readfn(env
, ri
);
740 qemu_mutex_unlock_iothread();
742 res
= ri
->readfn(env
, ri
);
748 void HELPER(pre_hvc
)(CPUARMState
*env
)
750 ARMCPU
*cpu
= env_archcpu(env
);
751 int cur_el
= arm_current_el(env
);
752 /* FIXME: Use actual secure state. */
756 if (arm_is_psci_call(cpu
, EXCP_HVC
)) {
757 /* If PSCI is enabled and this looks like a valid PSCI call then
758 * that overrides the architecturally mandated HVC behaviour.
763 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
764 /* If EL2 doesn't exist, HVC always UNDEFs */
766 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
767 /* EL3.HCE has priority over EL2.HCD. */
768 undef
= !(env
->cp15
.scr_el3
& SCR_HCE
);
770 undef
= env
->cp15
.hcr_el2
& HCR_HCD
;
773 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
774 * For ARMv8/AArch64, HVC is allowed in EL3.
775 * Note that we've already trapped HVC from EL0 at translation
778 if (secure
&& (!is_a64(env
) || cur_el
== 1)) {
783 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
784 exception_target_el(env
));
788 void HELPER(pre_smc
)(CPUARMState
*env
, uint32_t syndrome
)
790 ARMCPU
*cpu
= env_archcpu(env
);
791 int cur_el
= arm_current_el(env
);
792 bool secure
= arm_is_secure(env
);
793 bool smd_flag
= env
->cp15
.scr_el3
& SCR_SMD
;
796 * SMC behaviour is summarized in the following table.
797 * This helper handles the "Trap to EL2" and "Undef insn" cases.
798 * The "Trap to EL3" and "PSCI call" cases are handled in the exception
801 * -> ARM_FEATURE_EL3 and !SMD
802 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
804 * Conduit SMC, valid call Trap to EL2 PSCI Call
805 * Conduit SMC, inval call Trap to EL2 Trap to EL3
806 * Conduit not SMC Trap to EL2 Trap to EL3
809 * -> ARM_FEATURE_EL3 and SMD
810 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
812 * Conduit SMC, valid call Trap to EL2 PSCI Call
813 * Conduit SMC, inval call Trap to EL2 Undef insn
814 * Conduit not SMC Trap to EL2 Undef insn
817 * -> !ARM_FEATURE_EL3
818 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
820 * Conduit SMC, valid call Trap to EL2 PSCI Call
821 * Conduit SMC, inval call Trap to EL2 Undef insn
822 * Conduit not SMC Undef insn Undef insn
825 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
826 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
827 * extensions, SMD only applies to NS state.
828 * On ARMv7 without the Virtualization extensions, the SMD bit
829 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
830 * so we need not special case this here.
832 bool smd
= arm_feature(env
, ARM_FEATURE_AARCH64
) ? smd_flag
833 : smd_flag
&& !secure
;
835 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
836 cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
837 /* If we have no EL3 then SMC always UNDEFs and can't be
838 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
839 * firmware within QEMU, and we want an EL2 guest to be able
840 * to forbid its EL1 from making PSCI calls into QEMU's
841 * "firmware" via HCR.TSC, so for these purposes treat
842 * PSCI-via-SMC as implying an EL3.
843 * This handles the very last line of the previous table.
845 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
846 exception_target_el(env
));
849 if (cur_el
== 1 && (arm_hcr_el2_eff(env
) & HCR_TSC
)) {
850 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
851 * We also want an EL2 guest to be able to forbid its EL1 from
852 * making PSCI calls into QEMU's "firmware" via HCR.TSC.
853 * This handles all the "Trap to EL2" cases of the previous table.
855 raise_exception(env
, EXCP_HYP_TRAP
, syndrome
, 2);
858 /* Catch the two remaining "Undef insn" cases of the previous table:
859 * - PSCI conduit is SMC but we don't have a valid PCSI call,
860 * - We don't have EL3 or SMD is set.
862 if (!arm_is_psci_call(cpu
, EXCP_SMC
) &&
863 (smd
|| !arm_feature(env
, ARM_FEATURE_EL3
))) {
864 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
865 exception_target_el(env
));
869 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
870 The only way to do that in TCG is a conditional branch, which clobbers
871 all our temporaries. For now implement these as helper functions. */
873 /* Similarly for variable shift instructions. */
875 uint32_t HELPER(shl_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
877 int shift
= i
& 0xff;
884 } else if (shift
!= 0) {
885 env
->CF
= (x
>> (32 - shift
)) & 1;
891 uint32_t HELPER(shr_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
893 int shift
= i
& 0xff;
896 env
->CF
= (x
>> 31) & 1;
900 } else if (shift
!= 0) {
901 env
->CF
= (x
>> (shift
- 1)) & 1;
907 uint32_t HELPER(sar_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
909 int shift
= i
& 0xff;
911 env
->CF
= (x
>> 31) & 1;
912 return (int32_t)x
>> 31;
913 } else if (shift
!= 0) {
914 env
->CF
= (x
>> (shift
- 1)) & 1;
915 return (int32_t)x
>> shift
;
920 uint32_t HELPER(ror_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
924 shift
= shift1
& 0x1f;
927 env
->CF
= (x
>> 31) & 1;
930 env
->CF
= (x
>> (shift
- 1)) & 1;
931 return ((uint32_t)x
>> shift
) | (x
<< (32 - shift
));
935 void HELPER(probe_access
)(CPUARMState
*env
, target_ulong ptr
,
936 uint32_t access_type
, uint32_t mmu_idx
,
939 uint32_t in_page
= -((uint32_t)ptr
| TARGET_PAGE_SIZE
);
940 uintptr_t ra
= GETPC();
942 if (likely(size
<= in_page
)) {
943 probe_access(env
, ptr
, size
, access_type
, mmu_idx
, ra
);
945 probe_access(env
, ptr
, in_page
, access_type
, mmu_idx
, ra
);
946 probe_access(env
, ptr
+ in_page
, size
- in_page
,
947 access_type
, mmu_idx
, ra
);